summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format13
-rw-r--r--.mailmap6
-rw-r--r--Documentation/ABI/obsolete/sysfs-cpuidle2
-rw-r--r--Documentation/ABI/removed/sysfs-kernel-uids2
-rw-r--r--Documentation/ABI/stable/sysfs-bus-vmbus2
-rw-r--r--Documentation/ABI/stable/sysfs-bus-xen-backend2
-rw-r--r--Documentation/ABI/stable/sysfs-devices-system-cpu83
-rw-r--r--Documentation/ABI/stable/sysfs-driver-dma-idxd2
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io4
-rw-r--r--Documentation/ABI/testing/configfs-iio2
-rw-r--r--Documentation/ABI/testing/configfs-most8
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget2
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uvc4
-rw-r--r--Documentation/ABI/testing/debugfs-driver-genwqe2
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs2
-rw-r--r--Documentation/ABI/testing/evm36
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fsi2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci4
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight100
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-adp552031
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-adp886037
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-adp887032
-rw-r--r--Documentation/ABI/testing/sysfs-class-firmware-attributes18
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-driver-el152030009
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-pattern3
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-soc-ipa78
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu10
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs4
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs2
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-iommu_groups12
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dell-privacy-wmi55
-rw-r--r--Documentation/Makefile2
-rw-r--r--Documentation/PCI/acpi-info.rst18
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint-cfs.rst2
-rw-r--r--Documentation/PCI/pci.rst6
-rw-r--r--Documentation/RCU/checklist.rst55
-rw-r--r--Documentation/accounting/delay-accounting.rst12
-rw-r--r--Documentation/admin-guide/cgroup-v1/blkio-controller.rst155
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst55
-rw-r--r--Documentation/admin-guide/cputopology.rst85
-rw-r--r--Documentation/admin-guide/ext4.rst2
-rw-r--r--Documentation/admin-guide/hw-vuln/core-scheduling.rst223
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst3
-rw-r--r--Documentation/admin-guide/kdump/kdump.rst170
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt48
-rw-r--r--Documentation/admin-guide/laptops/laptop-mode.rst11
-rw-r--r--Documentation/admin-guide/lockup-watchdogs.rst4
-rw-r--r--Documentation/admin-guide/media/bt8xx.rst15
-rw-r--r--Documentation/admin-guide/media/bttv.rst21
-rw-r--r--Documentation/admin-guide/media/index.rst12
-rw-r--r--Documentation/admin-guide/media/ipu3.rst35
-rw-r--r--Documentation/admin-guide/media/saa7134.rst3
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst77
-rw-r--r--Documentation/admin-guide/pm/intel_idle.rst16
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst15
-rw-r--r--Documentation/admin-guide/pstore-blk.rst14
-rw-r--r--Documentation/admin-guide/reporting-issues.rst2
-rw-r--r--Documentation/admin-guide/sysctl/abi.rst2
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst78
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst50
-rw-r--r--Documentation/arm/marvell.rst2
-rw-r--r--Documentation/arm64/booting.rst6
-rw-r--r--Documentation/block/bfq-iosched.rst38
-rw-r--r--Documentation/block/biodoc.rst2
-rw-r--r--Documentation/block/blk-mq.rst4
-rw-r--r--Documentation/block/stat.rst2
-rw-r--r--Documentation/bpf/bpf_lsm.rst13
-rw-r--r--Documentation/bpf/index.rst14
-rw-r--r--Documentation/bpf/libbpf/libbpf.rst14
-rw-r--r--Documentation/bpf/libbpf/libbpf_api.rst27
-rw-r--r--Documentation/bpf/libbpf/libbpf_build.rst37
-rw-r--r--Documentation/bpf/libbpf/libbpf_naming_convention.rst (renamed from tools/lib/bpf/README.rst)30
-rw-r--r--Documentation/bpf/llvm_reloc.rst240
-rw-r--r--Documentation/conf.py24
-rw-r--r--Documentation/core-api/bus-virt-phys-mapping.rst2
-rw-r--r--Documentation/core-api/dma-api.rst5
-rw-r--r--Documentation/core-api/dma-isa-lpc.rst2
-rw-r--r--Documentation/core-api/index.rst4
-rw-r--r--Documentation/core-api/irq/irq-domain.rst1
-rw-r--r--Documentation/core-api/printk-formats.rst16
-rw-r--r--Documentation/dev-tools/checkpatch.rst509
-rw-r--r--Documentation/dev-tools/kasan.rst9
-rw-r--r--Documentation/dev-tools/kunit/api/index.rst8
-rw-r--r--Documentation/dev-tools/kunit/faq.rst2
-rw-r--r--Documentation/dev-tools/kunit/index.rst14
-rw-r--r--Documentation/dev-tools/kunit/start.rst4
-rw-r--r--Documentation/dev-tools/kunit/tips.rst5
-rw-r--r--Documentation/dev-tools/kunit/usage.rst8
-rw-r--r--Documentation/dev-tools/testing-overview.rst18
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-actmon.txt57
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.yaml15
-rw-r--r--Documentation/devicetree/bindings/crypto/cortina,sl3516-crypto.yaml50
-rw-r--r--Documentation/devicetree/bindings/crypto/intel,ixp4xx-crypto.yaml47
-rw-r--r--Documentation/devicetree/bindings/devfreq/nvidia,tegra30-actmon.yaml126
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml22
-rw-r--r--Documentation/devicetree/bindings/hwmon/lm75.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mpc.yaml7
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml13
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml1
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml106
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed-kcs-bmc.txt33
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/microchip,polarfire-soc-mailbox.yaml47
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml3
-rw-r--r--Documentation/devicetree/bindings/media/atmel,isc.yaml114
-rw-r--r--Documentation/devicetree/bindings/media/atmel-isc.txt65
-rw-r--r--Documentation/devicetree/bindings/media/i2c/rda,rda5807.yaml67
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-vcodec.txt2
-rw-r--r--Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml47
-rw-r--r--Documentation/devicetree/bindings/media/microchip,xisc.yaml129
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml109
-rw-r--r--Documentation/devicetree/bindings/media/rc.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,csi2.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,drif.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/renesas,isp.yaml196
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.yaml27
-rw-r--r--Documentation/devicetree/bindings/media/rockchip,vdec.yaml10
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-vpu.yaml33
-rw-r--r--Documentation/devicetree/bindings/media/tango-ir.txt21
-rw-r--r--Documentation/devicetree/bindings/mfd/mt6397.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml25
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.txt53
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.yaml135
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml10
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-am654.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt23
-rw-r--r--Documentation/devicetree/bindings/net/brcm,iproc-mdio.yaml38
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt80
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_canfd.txt107
-rw-r--r--Documentation/devicetree/bindings/net/can/renesas,rcar-can.yaml139
-rw-r--r--Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml122
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mt7530.txt6
-rw-r--r--Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml132
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.txt40
-rw-r--r--Documentation/devicetree/bindings/net/dsa/sja1105.txt156
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ingenic,mac.yaml76
-rw-r--r--Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml226
-rw-r--r--Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt69
-rw-r--r--Documentation/devicetree/bindings/net/qualcomm-bluetooth.yaml183
-rw-r--r--Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml45
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ether.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.yaml30
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml21
-rw-r--r--Documentation/devicetree/bindings/regulator/max8893.yaml88
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml385
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml17
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml5
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.yaml82
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rt6160-regulator.yaml61
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rt6245-regulator.yaml89
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml6
-rw-r--r--Documentation/devicetree/bindings/soc/microchip/microchip,polarfire-soc-sys-controller.yaml35
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml4
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt11
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.txt30
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.yaml66
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml7
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mux.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt23
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.yaml57
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt25
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml51
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.rst11
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml4
-rw-r--r--Documentation/doc-guide/contributing.rst8
-rw-r--r--Documentation/driver-api/acpi/linuxized-acpica.rst2
-rw-r--r--Documentation/driver-api/gpio/using-gpio.rst4
-rw-r--r--Documentation/driver-api/ioctl.rst10
-rw-r--r--Documentation/driver-api/media/drivers/bttv-devel.rst2
-rw-r--r--Documentation/driver-api/media/drivers/ccs/ccs-regs.asc2
-rwxr-xr-xDocumentation/driver-api/media/drivers/ccs/mk-ccs-regs5
-rw-r--r--Documentation/driver-api/media/drivers/zoran.rst2
-rw-r--r--Documentation/driver-api/media/index.rst10
-rw-r--r--Documentation/driver-api/pm/devices.rst8
-rw-r--r--Documentation/driver-api/surface_aggregator/clients/cdev.rst127
-rw-r--r--Documentation/driver-api/surface_aggregator/clients/index.rst3
-rw-r--r--Documentation/driver-api/surface_aggregator/internal.rst15
-rw-r--r--Documentation/driver-api/surface_aggregator/overview.rst6
-rw-r--r--Documentation/driver-api/thermal/sysfs-api.rst24
-rw-r--r--Documentation/driver-api/usb/dma.rst6
-rw-r--r--Documentation/driver-api/usb/usb.rst2
-rw-r--r--Documentation/fault-injection/fault-injection.rst24
-rw-r--r--Documentation/filesystems/dax.rst291
-rw-r--r--Documentation/filesystems/dax.txt257
-rw-r--r--Documentation/filesystems/ext2.rst2
-rw-r--r--Documentation/filesystems/ext4/blockgroup.rst2
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/filesystems/locking.rst2
-rw-r--r--Documentation/filesystems/path-lookup.rst194
-rw-r--r--Documentation/firmware-guide/acpi/dsd/data-node-references.rst3
-rw-r--r--Documentation/firmware-guide/acpi/dsd/graph.rst2
-rw-r--r--Documentation/firmware-guide/acpi/dsd/phy.rst199
-rw-r--r--Documentation/firmware-guide/acpi/enumeration.rst7
-rw-r--r--Documentation/firmware-guide/acpi/index.rst1
-rw-r--r--Documentation/hwmon/adm1177.rst3
-rw-r--r--Documentation/hwmon/dps920ab.rst73
-rw-r--r--Documentation/hwmon/index.rst4
-rw-r--r--Documentation/hwmon/ir36021.rst2
-rw-r--r--Documentation/hwmon/lm75.rst6
-rw-r--r--Documentation/hwmon/ltc2992.rst2
-rw-r--r--Documentation/hwmon/max31790.rst5
-rw-r--r--Documentation/hwmon/mp2888.rst113
-rw-r--r--Documentation/hwmon/pim4328.rst105
-rw-r--r--Documentation/hwmon/pm6764tr.rst2
-rw-r--r--Documentation/hwmon/pmbus-core.rst42
-rw-r--r--Documentation/hwmon/pmbus.rst11
-rw-r--r--Documentation/hwmon/sht4x.rst45
-rw-r--r--Documentation/hwmon/zl6100.rst132
-rw-r--r--Documentation/i2c/instantiating-devices.rst2
-rw-r--r--Documentation/i2c/old-module-parameters.rst3
-rw-r--r--Documentation/i2c/smbus-protocol.rst4
-rw-r--r--Documentation/input/joydev/joystick-api.rst2
-rw-r--r--Documentation/kernel-hacking/hacking.rst4
-rw-r--r--Documentation/locking/lockdep-design.rst4
-rw-r--r--Documentation/networking/af_xdp.rst32
-rw-r--r--Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst126
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst164
-rw-r--r--Documentation/networking/device_drivers/ethernet/google/gve.rst53
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/i40e.rst6
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/iavf.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst88
-rw-r--r--Documentation/networking/device_drivers/index.rst1
-rw-r--r--Documentation/networking/device_drivers/wwan/index.rst18
-rw-r--r--Documentation/networking/device_drivers/wwan/iosm.rst96
-rw-r--r--Documentation/networking/devlink/devlink-port.rst35
-rw-r--r--Documentation/networking/devlink/devlink-region.rst2
-rw-r--r--Documentation/networking/devlink/devlink-trap.rst5
-rw-r--r--Documentation/networking/devlink/index.rst1
-rw-r--r--Documentation/networking/devlink/netdevsim.rst26
-rw-r--r--Documentation/networking/devlink/prestera.rst141
-rw-r--r--Documentation/networking/dsa/configuration.rst68
-rw-r--r--Documentation/networking/dsa/dsa.rst21
-rw-r--r--Documentation/networking/dsa/sja1105.rst61
-rw-r--r--Documentation/networking/ethtool-netlink.rst8
-rw-r--r--Documentation/networking/ip-sysctl.rst95
-rw-r--r--Documentation/networking/mptcp-sysctl.rst29
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.rst24
-rw-r--r--Documentation/networking/packet_mmap.rst2
-rw-r--r--Documentation/networking/phy.rst6
-rw-r--r--Documentation/networking/tuntap.rst2
-rw-r--r--Documentation/power/runtime_pm.rst15
-rw-r--r--Documentation/process/submitting-patches.rst32
-rw-r--r--Documentation/riscv/vm-layout.rst4
-rw-r--r--Documentation/scheduler/sched-bwc.rst2
-rw-r--r--Documentation/scheduler/sched-capacity.rst6
-rw-r--r--Documentation/scheduler/sched-energy.rst2
-rw-r--r--Documentation/scheduler/sched-nice-design.rst2
-rw-r--r--Documentation/security/IMA-templates.rst12
-rw-r--r--Documentation/security/landlock.rst3
-rw-r--r--Documentation/spi/pxa2xx.rst58
-rw-r--r--Documentation/trace/coresight/coresight-etm4x-reference.rst2
-rw-r--r--Documentation/trace/coresight/coresight.rst8
-rw-r--r--Documentation/trace/ftrace.rst6
-rw-r--r--Documentation/trace/kprobes.rst24
-rw-r--r--Documentation/translations/index.rst4
-rw-r--r--Documentation/translations/it_IT/index.rst4
-rw-r--r--Documentation/translations/it_IT/process/coding-style.rst2
-rw-r--r--Documentation/translations/ja_JP/index.rst5
-rw-r--r--Documentation/translations/ko_KR/index.rst5
-rw-r--r--Documentation/translations/zh_CN/admin-guide/index.rst2
-rw-r--r--Documentation/translations/zh_CN/admin-guide/lockup-watchdogs.rst66
-rw-r--r--Documentation/translations/zh_CN/core-api/cachetlb.rst336
-rw-r--r--Documentation/translations/zh_CN/core-api/index.rst24
-rw-r--r--Documentation/translations/zh_CN/core-api/kernel-api.rst369
-rw-r--r--Documentation/translations/zh_CN/core-api/kobject.rst378
-rw-r--r--Documentation/translations/zh_CN/core-api/local_ops.rst194
-rw-r--r--Documentation/translations/zh_CN/core-api/padata.rst158
-rw-r--r--Documentation/translations/zh_CN/core-api/printk-basics.rst110
-rw-r--r--Documentation/translations/zh_CN/core-api/printk-formats.rst595
-rw-r--r--Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst154
-rw-r--r--Documentation/translations/zh_CN/core-api/symbol-namespaces.rst142
-rw-r--r--Documentation/translations/zh_CN/core-api/workqueue.rst337
-rw-r--r--Documentation/translations/zh_CN/dev-tools/index.rst2
-rw-r--r--Documentation/translations/zh_CN/dev-tools/kasan.rst417
-rw-r--r--Documentation/translations/zh_CN/index.rst5
-rw-r--r--Documentation/translations/zh_CN/maintainer/configure-git.rst62
-rw-r--r--Documentation/translations/zh_CN/maintainer/index.rst21
-rw-r--r--Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst92
-rw-r--r--Documentation/translations/zh_CN/maintainer/modifying-patches.rst51
-rw-r--r--Documentation/translations/zh_CN/maintainer/pull-requests.rst148
-rw-r--r--Documentation/translations/zh_CN/maintainer/rebasing-and-merging.rst165
-rw-r--r--Documentation/translations/zh_CN/parisc/debugging.rst42
-rw-r--r--Documentation/translations/zh_CN/parisc/index.rst28
-rw-r--r--Documentation/translations/zh_CN/parisc/registers.rst153
-rw-r--r--Documentation/translations/zh_CN/process/8.Conclusion.rst2
-rw-r--r--Documentation/translations/zh_CN/process/coding-style.rst2
-rw-r--r--Documentation/usb/ehci.rst2
-rw-r--r--Documentation/usb/gadget_printer.rst2
-rw-r--r--Documentation/userspace-api/ioctl/hdio.rst799
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst2
-rw-r--r--Documentation/userspace-api/landlock.rst11
-rw-r--r--Documentation/userspace-api/media/Makefile10
-rw-r--r--Documentation/userspace-api/media/audio.h.rst.exceptions19
-rw-r--r--Documentation/userspace-api/media/drivers/hantro.rst19
-rw-r--r--Documentation/userspace-api/media/drivers/index.rst1
-rw-r--r--Documentation/userspace-api/media/dvb/dmx-fopen.rst2
-rw-r--r--Documentation/userspace-api/media/dvb/dmx-fread.rst2
-rw-r--r--Documentation/userspace-api/media/dvb/dmx-set-filter.rst2
-rw-r--r--Documentation/userspace-api/media/dvb/headers.rst7
-rw-r--r--Documentation/userspace-api/media/dvb/intro.rst6
-rw-r--r--Documentation/userspace-api/media/dvb/legacy_dvb_apis.rst7
-rw-r--r--Documentation/userspace-api/media/fdl-appendix.rst64
-rw-r--r--Documentation/userspace-api/media/glossary.rst2
-rw-r--r--Documentation/userspace-api/media/index.rst12
-rw-r--r--Documentation/userspace-api/media/v4l/biblio.rst8
-rw-r--r--Documentation/userspace-api/media/v4l/dev-decoder.rst6
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst214
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst333
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-compressed.rst11
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst2
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst12
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst24
-rw-r--r--Documentation/userspace-api/media/video.h.rst.exceptions39
-rw-r--r--Documentation/userspace-api/media/videodev2.h.rst.exceptions5
-rw-r--r--Documentation/userspace-api/seccomp_filter.rst28
-rw-r--r--Documentation/virt/kvm/api.rst358
-rw-r--r--Documentation/virt/kvm/cpuid.rst7
-rw-r--r--Documentation/virt/kvm/hypercalls.rst21
-rw-r--r--Documentation/virt/kvm/locking.rst5
-rw-r--r--Documentation/virt/kvm/mmu.rst11
-rw-r--r--Documentation/virt/kvm/msr.rst13
-rw-r--r--Documentation/virt/kvm/s390-pv-boot.rst2
-rw-r--r--Documentation/virt/kvm/vcpu-requests.rst10
-rw-r--r--Documentation/vm/memory-model.rst45
-rw-r--r--Documentation/vm/slub.rst10
-rw-r--r--Documentation/vm/zswap.rst4
-rw-r--r--Documentation/x86/boot.rst4
-rw-r--r--Documentation/x86/buslock.rst126
-rw-r--r--Documentation/x86/index.rst1
-rw-r--r--Documentation/x86/mtrr.rst2
-rw-r--r--Documentation/x86/x86_64/boot-options.rst31
-rw-r--r--MAINTAINERS224
-rw-r--r--Makefile14
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig22
-rw-r--r--arch/alpha/configs/defconfig13
-rw-r--r--arch/alpha/include/asm/atomic.h88
-rw-r--r--arch/alpha/include/asm/cmpxchg.h12
-rw-r--r--arch/alpha/include/asm/machvec.h6
-rw-r--r--arch/alpha/include/asm/mmzone.h100
-rw-r--r--arch/alpha/include/asm/page.h6
-rw-r--r--arch/alpha/include/asm/pgtable.h4
-rw-r--r--arch/alpha/include/asm/topology.h39
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/alpha/kernel/core_marvel.c53
-rw-r--r--arch/alpha/kernel/core_wildfire.c29
-rw-r--r--arch/alpha/kernel/pci_iommu.c29
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/proto.h8
-rw-r--r--arch/alpha/kernel/setup.c16
-rw-r--r--arch/alpha/kernel/smp.c1
-rw-r--r--arch/alpha/kernel/sys_marvel.c5
-rw-r--r--arch/alpha/kernel/sys_wildfire.c5
-rw-r--r--arch/alpha/mm/Makefile2
-rw-r--r--arch/alpha/mm/init.c3
-rw-r--r--arch/alpha/mm/numa.c223
-rw-r--r--arch/arc/Kconfig13
-rw-r--r--arch/arc/include/asm/atomic.h60
-rw-r--r--arch/arc/include/asm/cmpxchg.h10
-rw-r--r--arch/arc/include/asm/mmzone.h40
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/arc/kernel/kprobes.c16
-rw-r--r--arch/arc/kernel/signal.c43
-rw-r--r--arch/arc/kernel/smp.c1
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/troubleshoot.c8
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/mm/init.c21
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6q-dhcom-som.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7d-meerkat96.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-pico.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi7
-rw-r--r--arch/arm/configs/footbridge_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/crypto/Makefile10
-rw-r--r--arch/arm/crypto/poly1305-core.S_shipped1158
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped2816
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped1869
-rw-r--r--arch/arm/include/asm/atomic.h96
-rw-r--r--arch/arm/include/asm/cmpxchg.h20
-rw-r--r--arch/arm/include/asm/cpuidle.h10
-rw-r--r--arch/arm/include/asm/sync_bitops.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h13
-rw-r--r--arch/arm/kernel/irq.c22
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/mach-imx/pm-imx27.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c26
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c14
-rw-r--r--arch/arm/mach-omap1/board-h2.c4
-rw-r--r--arch/arm/mach-omap1/pm.c10
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c2
-rw-r--r--arch/arm/mach-zynq/Kconfig2
-rw-r--r--arch/arm/mm/tlb-v6.S2
-rw-r--r--arch/arm/mm/tlb-v7.S2
-rw-r--r--arch/arm/probes/kprobes/core.c23
-rw-r--r--arch/arm64/Kbuild3
-rw-r--r--arch/arm64/Kconfig36
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi23
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5.dtsi94
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi481
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi621
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi3
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi13
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts31
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-main.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi7
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi11
-rw-r--r--arch/arm64/crypto/Makefile10
-rw-r--r--arch/arm64/crypto/poly1305-core.S_shipped835
-rw-r--r--arch/arm64/crypto/sha256-core.S_shipped2069
-rw-r--r--arch/arm64/crypto/sha512-core.S_shipped1093
-rw-r--r--arch/arm64/include/asm/alternative-macros.h9
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h3
-rw-r--r--arch/arm64/include/asm/asm-prototypes.h6
-rw-r--r--arch/arm64/include/asm/asm_pointer_auth.h49
-rw-r--r--arch/arm64/include/asm/assembler.h98
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h71
-rw-r--r--arch/arm64/include/asm/compiler.h16
-rw-r--r--arch/arm64/include/asm/cpu.h45
-rw-r--r--arch/arm64/include/asm/cpufeature.h15
-rw-r--r--arch/arm64/include/asm/cpuidle.h35
-rw-r--r--arch/arm64/include/asm/efi.h2
-rw-r--r--arch/arm64/include/asm/exception.h34
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h4
-rw-r--r--arch/arm64/include/asm/insn-def.h9
-rw-r--r--arch/arm64/include/asm/insn.h67
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h19
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_asm.h4
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h17
-rw-r--r--arch/arm64/include/asm/kvm_mte.h66
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h42
-rw-r--r--arch/arm64/include/asm/linkage.h8
-rw-r--r--arch/arm64/include/asm/memory.h22
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/module.lds.h17
-rw-r--r--arch/arm64/include/asm/mte-def.h1
-rw-r--r--arch/arm64/include/asm/mte-kasan.h93
-rw-r--r--arch/arm64/include/asm/mte.h8
-rw-r--r--arch/arm64/include/asm/page.h10
-rw-r--r--arch/arm64/include/asm/patching.h13
-rw-r--r--arch/arm64/include/asm/perf_event.h5
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h7
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h27
-rw-r--r--arch/arm64/include/asm/pointer_auth.h59
-rw-r--r--arch/arm64/include/asm/preempt.h2
-rw-r--r--arch/arm64/include/asm/processor.h16
-rw-r--r--arch/arm64/include/asm/scs.h8
-rw-r--r--arch/arm64/include/asm/sdei.h10
-rw-r--r--arch/arm64/include/asm/smp.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h32
-rw-r--r--arch/arm64/include/asm/sysreg.h5
-rw-r--r--arch/arm64/include/asm/tlb.h4
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h11
-rw-r--r--arch/arm64/kernel/Makefile11
-rw-r--r--arch/arm64/kernel/acpi.c22
-rw-r--r--arch/arm64/kernel/alternative.c2
-rw-r--r--arch/arm64/kernel/asm-offsets.c17
-rw-r--r--arch/arm64/kernel/cpufeature.c210
-rw-r--r--arch/arm64/kernel/cpuinfo.c58
-rw-r--r--arch/arm64/kernel/efi-entry.S9
-rw-r--r--arch/arm64/kernel/entry-common.c256
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S22
-rw-r--r--arch/arm64/kernel/entry.S369
-rw-r--r--arch/arm64/kernel/fpsimd.c6
-rw-r--r--arch/arm64/kernel/ftrace.c1
-rw-r--r--arch/arm64/kernel/head.S76
-rw-r--r--arch/arm64/kernel/hibernate-asm.S7
-rw-r--r--arch/arm64/kernel/hibernate.c20
-rw-r--r--arch/arm64/kernel/idle.c46
-rw-r--r--arch/arm64/kernel/idreg-override.c3
-rw-r--r--arch/arm64/kernel/image-vars.h2
-rw-r--r--arch/arm64/kernel/jump_label.c1
-rw-r--r--arch/arm64/kernel/kaslr.c12
-rw-r--r--arch/arm64/kernel/kgdb.c1
-rw-r--r--arch/arm64/kernel/machine_kexec.c30
-rw-r--r--arch/arm64/kernel/mte.c18
-rw-r--r--arch/arm64/kernel/patching.c150
-rw-r--r--arch/arm64/kernel/perf_callchain.c2
-rw-r--r--arch/arm64/kernel/perf_event.c40
-rw-r--r--arch/arm64/kernel/probes/kprobes.c35
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c1
-rw-r--r--arch/arm64/kernel/probes/uprobes.c2
-rw-r--r--arch/arm64/kernel/process.c101
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/sdei.c64
-rw-r--r--arch/arm64/kernel/setup.c8
-rw-r--r--arch/arm64/kernel/signal.c26
-rw-r--r--arch/arm64/kernel/smccc-call.S83
-rw-r--r--arch/arm64/kernel/smp.c17
-rw-r--r--arch/arm64/kernel/smp_spin_table.c7
-rw-r--r--arch/arm64/kernel/stacktrace.c18
-rw-r--r--arch/arm64/kernel/suspend.c12
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/traps.c135
-rw-r--r--arch/arm64/kvm/Kconfig5
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c162
-rw-r--r--arch/arm64/kvm/arm.c53
-rw-r--r--arch/arm64/kvm/guest.c134
-rw-r--r--arch/arm64/kvm/hyp/entry.S7
-rw-r--r--arch/arm64/kvm/hyp/exception.c21
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/adjust_pc.h18
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h21
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/gfp.h45
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h2
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/memory.h7
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h13
-rw-r--r--arch/arm64/kvm/hyp/nvhe/cache.S4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c60
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c112
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c35
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c3
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c2
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c61
-rw-r--r--arch/arm64/kvm/hyp/reserved_mem.c3
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c3
-rw-r--r--arch/arm64/kvm/mmu.c210
-rw-r--r--arch/arm64/kvm/pmu-emul.c4
-rw-r--r--arch/arm64/kvm/reset.c32
-rw-r--r--arch/arm64/kvm/sys_regs.c74
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c36
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c19
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c19
-rw-r--r--arch/arm64/kvm/vgic/vgic.c14
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/lib/clear_user.S47
-rw-r--r--arch/arm64/lib/insn.c (renamed from arch/arm64/kernel/insn.c)249
-rw-r--r--arch/arm64/lib/kasan_sw_tags.S76
-rw-r--r--arch/arm64/lib/memchr.S65
-rw-r--r--arch/arm64/lib/memcmp.S346
-rw-r--r--arch/arm64/lib/memcpy.S272
-rw-r--r--arch/arm64/lib/memmove.S189
-rw-r--r--arch/arm64/lib/mte.S20
-rw-r--r--arch/arm64/lib/strcmp.S289
-rw-r--r--arch/arm64/lib/strlen.S258
-rw-r--r--arch/arm64/lib/strncmp.S406
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c4
-rw-r--r--arch/arm64/mm/cache.S158
-rw-r--r--arch/arm64/mm/context.c6
-rw-r--r--arch/arm64/mm/fault.c50
-rw-r--r--arch/arm64/mm/flush.c29
-rw-r--r--arch/arm64/mm/init.c7
-rw-r--r--arch/arm64/mm/mmu.c16
-rw-r--r--arch/arm64/mm/proc.S24
-rw-r--r--arch/arm64/mm/ptdump.c4
-rw-r--r--arch/arm64/net/bpf_jit_comp.c20
-rw-r--r--arch/arm64/tools/cpucaps3
-rw-r--r--arch/csky/include/asm/cmpxchg.h8
-rw-r--r--arch/csky/kernel/asm-offsets.c1
-rw-r--r--arch/csky/kernel/probes/kprobes.c17
-rw-r--r--arch/csky/kernel/smp.c1
-rw-r--r--arch/csky/kernel/stacktrace.c2
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/atomic.h97
-rw-r--r--arch/h8300/include/asm/cmpxchg.h66
-rw-r--r--arch/h8300/kernel/asm-offsets.c1
-rw-r--r--arch/h8300/kernel/process.c2
-rw-r--r--arch/h8300/kernel/setup.c2
-rw-r--r--arch/hexagon/include/asm/atomic.h28
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h4
-rw-r--r--arch/hexagon/kernel/process.c2
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/atomic.h74
-rw-r--r--arch/ia64/include/asm/cmpxchg.h16
-rw-r--r--arch/ia64/include/asm/page.h6
-rw-r--r--arch/ia64/include/asm/pal.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h2
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h2
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h10
-rw-r--r--arch/ia64/kernel/efi_stub.S2
-rw-r--r--arch/ia64/kernel/kprobes.c16
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/process.c4
-rw-r--r--arch/ia64/kernel/ptrace.c8
-rw-r--r--arch/ia64/kernel/smpboot.c1
-rw-r--r--arch/ia64/kernel/topology.c5
-rw-r--r--arch/ia64/mm/numa.c5
-rw-r--r--arch/m68k/Kconfig.cpu10
-rw-r--r--arch/m68k/Kconfig.machine3
-rw-r--r--arch/m68k/Makefile3
-rw-r--r--arch/m68k/atari/config.c12
-rw-r--r--arch/m68k/configs/amiga_defconfig15
-rw-r--r--arch/m68k/configs/apollo_defconfig5
-rw-r--r--arch/m68k/configs/atari_defconfig13
-rw-r--r--arch/m68k/configs/bvme6000_defconfig5
-rw-r--r--arch/m68k/configs/hp300_defconfig5
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig21
-rw-r--r--arch/m68k/configs/mvme147_defconfig5
-rw-r--r--arch/m68k/configs/mvme16x_defconfig5
-rw-r--r--arch/m68k/configs/q40_defconfig13
-rw-r--r--arch/m68k/configs/sun3_defconfig5
-rw-r--r--arch/m68k/configs/sun3x_defconfig5
-rw-r--r--arch/m68k/emu/nfblock.c20
-rw-r--r--arch/m68k/include/asm/atomic.h60
-rw-r--r--arch/m68k/include/asm/cmpxchg.h10
-rw-r--r--arch/m68k/include/asm/mmu_context.h2
-rw-r--r--arch/m68k/include/asm/mmzone.h10
-rw-r--r--arch/m68k/include/asm/page.h2
-rw-r--r--arch/m68k/include/asm/page_mm.h35
-rw-r--r--arch/m68k/include/asm/page_no.h6
-rw-r--r--arch/m68k/include/asm/tlbflush.h2
-rw-r--r--arch/m68k/kernel/dma.c3
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/m68k/kernel/sys_m68k.c4
-rw-r--r--arch/m68k/mac/config.c24
-rw-r--r--arch/m68k/mm/init.c20
-rw-r--r--arch/m68k/q40/config.c37
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/atomic.h28
-rw-r--r--arch/microblaze/include/asm/cmpxchg.h9
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/kernel/asm-offsets.c1
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/alchemy/board-xxs1500.c1
-rw-r--r--arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi46
-rw-r--r--arch/mips/boot/dts/loongson/ls7a-pch.dtsi6
-rw-r--r--arch/mips/include/asm/atomic.h55
-rw-r--r--arch/mips/include/asm/cmpxchg.h22
-rw-r--r--arch/mips/include/asm/irq.h1
-rw-r--r--arch/mips/include/asm/kvm_host.h9
-rw-r--r--arch/mips/include/asm/mips-boards/launch.h5
-rw-r--r--arch/mips/include/asm/mmzone.h8
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/cmpxchg.c4
-rw-r--r--arch/mips/kernel/kprobes.c3
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/kvm/Makefile2
-rw-r--r--arch/mips/kvm/mips.c90
-rw-r--r--arch/mips/lantiq/xway/dma.c1
-rw-r--r--arch/mips/lib/mips-atomic.c12
-rw-r--r--arch/mips/mm/cache.c30
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/pci/pci-rt3883.c1
-rw-r--r--arch/mips/pci/pci-xtalk-bridge.c1
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c1
-rw-r--r--arch/mips/sgi-ip30/ip30-irq.c1
-rw-r--r--arch/nds32/include/asm/memory.h6
-rw-r--r--arch/nds32/kernel/process.c2
-rw-r--r--arch/nios2/include/asm/irq.h1
-rw-r--r--arch/nios2/kernel/irq.c1
-rw-r--r--arch/nios2/kernel/process.c2
-rw-r--r--arch/openrisc/include/asm/atomic.h42
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h4
-rw-r--r--arch/openrisc/include/asm/tlbflush.h2
-rw-r--r--arch/openrisc/kernel/asm-offsets.c1
-rw-r--r--arch/openrisc/kernel/smp.c2
-rw-r--r--arch/parisc/include/asm/atomic.h34
-rw-r--r--arch/parisc/include/asm/cmpxchg.h14
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/process.c4
-rw-r--r--arch/parisc/kernel/smp.c1
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi16
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h3
-rw-r--r--arch/powerpc/include/asm/atomic.h140
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h4
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h30
-rw-r--r--arch/powerpc/include/asm/cputhreads.h30
-rw-r--r--arch/powerpc/include/asm/exception-64s.h13
-rw-r--r--arch/powerpc/include/asm/hvcall.h4
-rw-r--r--arch/powerpc/include/asm/irq.h5
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_host.h22
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h18
-rw-r--r--arch/powerpc/include/asm/mmu_context.h18
-rw-r--r--arch/powerpc/include/asm/mmzone.h4
-rw-r--r--arch/powerpc/include/asm/pte-walk.h29
-rw-r--r--arch/powerpc/include/asm/qspinlock.h2
-rw-r--r--arch/powerpc/include/asm/time.h12
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/eeh.c23
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S250
-rw-r--r--arch/powerpc/kernel/io-workarounds.c16
-rw-r--r--arch/powerpc/kernel/iommu.c11
-rw-r--r--arch/powerpc/kernel/kprobes.c21
-rw-r--r--arch/powerpc/kernel/mce.c1
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/security.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/time.c10
-rw-r--r--arch/powerpc/kexec/core.c4
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/book3s.c108
-rw-r--r--arch/powerpc/kvm/book3s_64_entry.S416
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c27
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv.c817
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c137
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S9
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c122
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c508
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c29
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S689
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c3
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_segment.S3
-rw-r--r--arch/powerpc/kvm/book3s_xive.c114
-rw-r--r--arch/powerpc/kvm/book3s_xive.h7
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c11
-rw-r--r--arch/powerpc/kvm/booke.c76
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c33
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c202
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/mmu_context.c4
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/platforms/cell/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/pmu.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c1
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig1
-rw-r--r--arch/powerpc/platforms/powernv/idle.c52
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig1
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c5
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c1
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c1
-rw-r--r--arch/powerpc/sysdev/i8259.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c1
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c1
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/sysdev/xive/Kconfig1
-rw-r--r--arch/powerpc/xmon/xmon.c13
-rw-r--r--arch/riscv/Kconfig21
-rw-r--r--arch/riscv/Kconfig.socs1
-rw-r--r--arch/riscv/Makefile11
-rw-r--r--arch/riscv/boot/dts/microchip/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi2
-rw-r--r--arch/riscv/errata/sifive/Makefile2
-rw-r--r--arch/riscv/include/asm/alternative-macros.h4
-rw-r--r--arch/riscv/include/asm/atomic.h128
-rw-r--r--arch/riscv/include/asm/cmpxchg.h34
-rw-r--r--arch/riscv/include/asm/kexec.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h5
-rw-r--r--arch/riscv/kernel/machine_kexec.c11
-rw-r--r--arch/riscv/kernel/probes/kprobes.c19
-rw-r--r--arch/riscv/kernel/setup.c4
-rw-r--r--arch/riscv/kernel/smpboot.c1
-rw-r--r--arch/riscv/kernel/stacktrace.c16
-rw-r--r--arch/riscv/kernel/traps.c13
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S15
-rw-r--r--arch/riscv/mm/init.c8
-rw-r--r--arch/riscv/mm/kasan_init.c10
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h9
-rw-r--r--arch/s390/include/asm/page.h6
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/preempt.h4
-rw-r--r--arch/s390/include/asm/qdio.h4
-rw-r--r--arch/s390/include/asm/stacktrace.h18
-rw-r--r--arch/s390/kernel/entry.S5
-rw-r--r--arch/s390/kernel/kprobes.c17
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/signal.c1
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/topology.c12
-rw-r--r--arch/s390/kvm/Makefile3
-rw-r--r--arch/s390/kvm/kvm-s390.c254
-rw-r--r--arch/s390/kvm/pv.c7
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/tools/gen_facilities.c4
-rw-r--r--arch/sh/include/asm/atomic-grb.h6
-rw-r--r--arch/sh/include/asm/atomic-irq.h6
-rw-r--r--arch/sh/include/asm/atomic-llsc.h6
-rw-r--r--arch/sh/include/asm/atomic.h8
-rw-r--r--arch/sh/include/asm/cmpxchg.h4
-rw-r--r--arch/sh/include/asm/mmzone.h4
-rw-r--r--arch/sh/kernel/kprobes.c17
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh/kernel/topology.c2
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/init.c2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/atomic_32.h38
-rw-r--r--arch/sparc/include/asm/atomic_64.h36
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h12
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h12
-rw-r--r--arch/sparc/include/asm/mmzone.h4
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/kernel/kprobes.c17
-rw-r--r--arch/sparc/kernel/process_32.c3
-rw-r--r--arch/sparc/kernel/process_64.c3
-rw-r--r--arch/sparc/kernel/smp_32.c1
-rw-r--r--arch/sparc/kernel/smp_64.c5
-rw-r--r--arch/sparc/lib/atomic32.c24
-rw-r--r--arch/sparc/lib/atomic_64.S42
-rw-r--r--arch/sparc/mm/init_64.c12
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/skas/syscall.c2
-rw-r--r--arch/x86/Kconfig36
-rw-r--r--arch/x86/Makefile10
-rw-r--r--arch/x86/boot/.gitignore1
-rw-r--r--arch/x86/boot/Makefile44
-rw-r--r--arch/x86/boot/genimage.sh303
-rw-r--r--arch/x86/boot/mtools.conf.in3
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c2
-rw-r--r--arch/x86/entry/Makefile10
-rw-r--r--arch/x86/entry/calling.h45
-rw-r--r--arch/x86/entry/common.c92
-rw-r--r--arch/x86/entry/entry_64.S9
-rw-r--r--arch/x86/entry/syscall_32.c20
-rw-r--r--arch/x86/entry/syscall_64.c17
-rw-r--r--arch/x86/entry/syscall_x32.c35
-rw-r--r--arch/x86/entry/syscalls/Makefile38
-rw-r--r--arch/x86/entry/syscalls/syscallhdr.sh35
-rw-r--r--arch/x86/entry/syscalls/syscalltbl.sh46
-rw-r--r--arch/x86/events/core.c28
-rw-r--r--arch/x86/events/intel/core.c23
-rw-r--r--arch/x86/events/intel/ds.c20
-rw-r--r--arch/x86/events/intel/lbr.c3
-rw-r--r--arch/x86/events/intel/uncore.c4
-rw-r--r--arch/x86/events/intel/uncore.h1
-rw-r--r--arch/x86/events/intel/uncore_snbep.c182
-rw-r--r--arch/x86/events/perf_event.h1
-rw-r--r--arch/x86/events/rapl.c6
-rw-r--r--arch/x86/hyperv/hv_init.c47
-rw-r--r--arch/x86/ia32/ia32_aout.c4
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/asm.h27
-rw-r--r--arch/x86/include/asm/atomic.h2
-rw-r--r--arch/x86/include/asm/barrier.h7
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/crash.h6
-rw-r--r--arch/x86/include/asm/desc.h24
-rw-r--r--arch/x86/include/asm/disabled-features.h7
-rw-r--r--arch/x86/include/asm/fpu/api.h6
-rw-r--r--arch/x86/include/asm/fpu/internal.h50
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h19
-rw-r--r--arch/x86/include/asm/idtentry.h33
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/irq_vectors.h7
-rw-r--r--arch/x86/include/asm/jump_label.h79
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h170
-rw-r--r--arch/x86/include/asm/mce.h13
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/nops.h24
-rw-r--r--arch/x86/include/asm/page.h6
-rw-r--r--arch/x86/include/asm/page_64.h2
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/preempt.h2
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/sev-common.h16
-rw-r--r--arch/x86/include/asm/sgx.h2
-rw-r--r--arch/x86/include/asm/stackprotector.h2
-rw-r--r--arch/x86/include/asm/svm.h9
-rw-r--r--arch/x86/include/asm/syscall.h13
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h10
-rw-r--r--arch/x86/include/asm/thermal.h4
-rw-r--r--arch/x86/include/asm/unistd.h8
-rw-r--r--arch/x86/include/uapi/asm/hwcap2.h6
-rw-r--r--arch/x86/include/uapi/asm/kvm.h13
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h13
-rw-r--r--arch/x86/include/uapi/asm/svm.h3
-rw-r--r--arch/x86/kernel/acpi/boot.c118
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/alternative.c70
-rw-r--r--arch/x86/kernel/amd_nb.c3
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/vector.c20
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/common.c40
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/hygon.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c46
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c55
-rw-r--r--arch/x86/kernel/cpu/mce/apei.c3
-rw-r--r--arch/x86/kernel/cpu/mce/core.c13
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c12
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h21
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c10
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c2
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.h4
-rw-r--r--arch/x86/kernel/cpu/sgx/virt.c1
-rw-r--r--arch/x86/kernel/cpu/tsx.c37
-rw-r--r--arch/x86/kernel/crash.c13
-rw-r--r--arch/x86/kernel/fpu/signal.c80
-rw-r--r--arch/x86/kernel/fpu/xstate.c98
-rw-r--r--arch/x86/kernel/head_64.S6
-rw-r--r--arch/x86/kernel/idt.c45
-rw-r--r--arch/x86/kernel/jump_label.c81
-rw-r--r--arch/x86/kernel/kprobes/core.c20
-rw-r--r--arch/x86/kernel/machine_kexec_32.c15
-rw-r--r--arch/x86/kernel/machine_kexec_64.c33
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c69
-rw-r--r--arch/x86/kernel/setup_percpu.c6
-rw-r--r--arch/x86/kernel/sev.c201
-rw-r--r--arch/x86/kernel/signal.c4
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/traps.c9
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/umip.c10
-rw-r--r--arch/x86/kvm/Kconfig6
-rw-r--r--arch/x86/kvm/Makefile11
-rw-r--r--arch/x86/kvm/cpuid.c7
-rw-r--r--arch/x86/kvm/debugfs.c11
-rw-r--r--arch/x86/kvm/emulate.c171
-rw-r--r--arch/x86/kvm/fpu.h140
-rw-r--r--arch/x86/kvm/hyperv.c448
-rw-r--r--arch/x86/kvm/hyperv.h1
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h15
-rw-r--r--arch/x86/kvm/kvm_emulate.h13
-rw-r--r--arch/x86/kvm/kvm_onhyperv.c93
-rw-r--r--arch/x86/kvm/kvm_onhyperv.h32
-rw-r--r--arch/x86/kvm/lapic.c48
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.h30
-rw-r--r--arch/x86/kvm/mmu/mmu.c894
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h15
-rw-r--r--arch/x86/kvm/mmu/mmutrace.h2
-rw-r--r--arch/x86/kvm/mmu/page_track.c2
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h82
-rw-r--r--arch/x86/kvm/mmu/spte.c22
-rw-r--r--arch/x86/kvm/mmu/spte.h32
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c51
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h35
-rw-r--r--arch/x86/kvm/svm/avic.c26
-rw-r--r--arch/x86/kvm/svm/nested.c91
-rw-r--r--arch/x86/kvm/svm/sev.c26
-rw-r--r--arch/x86/kvm/svm/svm.c108
-rw-r--r--arch/x86/kvm/svm/svm.h24
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.c41
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.h130
-rw-r--r--arch/x86/kvm/trace.h8
-rw-r--r--arch/x86/kvm/vmx/capabilities.h4
-rw-r--r--arch/x86/kvm/vmx/evmcs.c3
-rw-r--r--arch/x86/kvm/vmx/evmcs.h8
-rw-r--r--arch/x86/kvm/vmx/nested.c418
-rw-r--r--arch/x86/kvm/vmx/nested.h11
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c14
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h1
-rw-r--r--arch/x86/kvm/vmx/vmcs.h13
-rw-r--r--arch/x86/kvm/vmx/vmcs12.c1
-rw-r--r--arch/x86/kvm/vmx/vmcs12.h10
-rw-r--r--arch/x86/kvm/vmx/vmx.c232
-rw-r--r--arch/x86/kvm/vmx/vmx.h21
-rw-r--r--arch/x86/kvm/x86.c957
-rw-r--r--arch/x86/kvm/x86.h10
-rw-r--r--arch/x86/lib/insn-eval.c30
-rw-r--r--arch/x86/lib/retpoline.S4
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c11
-rw-r--r--arch/x86/mm/numa.c8
-rw-r--r--arch/x86/mm/pkeys.c4
-rw-r--r--arch/x86/mm/tlb.c10
-rw-r--r--arch/x86/net/bpf_jit_comp.c46
-rw-r--r--arch/x86/pci/fixup.c44
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/quirks.c12
-rw-r--r--arch/x86/realmode/Makefile1
-rw-r--r--arch/x86/realmode/init.c14
-rw-r--r--arch/x86/um/sys_call_table_32.c14
-rw-r--r--arch/x86/um/sys_call_table_64.c15
-rw-r--r--arch/x86/xen/enlighten_pv.c2
-rw-r--r--arch/xtensa/include/asm/atomic.h26
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h14
-rw-r--r--arch/xtensa/include/asm/page.h4
-rw-r--r--arch/xtensa/include/asm/tlbflush.h4
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/smp.c1
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c29
-rw-r--r--block/Kconfig19
-rw-r--r--block/Kconfig.iosched6
-rw-r--r--block/Makefile6
-rw-r--r--block/bfq-iosched.c115
-rw-r--r--block/bio.c13
-rw-r--r--block/blk-cgroup.c41
-rw-r--r--block/blk-core.c22
-rw-r--r--block/blk-flush.c3
-rw-r--r--block/blk-ioprio.c262
-rw-r--r--block/blk-ioprio.h19
-rw-r--r--block/blk-lib.c1
-rw-r--r--block/blk-merge.c27
-rw-r--r--block/blk-mq-debugfs.c15
-rw-r--r--block/blk-mq-sched.c99
-rw-r--r--block/blk-mq-sched.h5
-rw-r--r--block/blk-mq-tag.c114
-rw-r--r--block/blk-mq-tag.h15
-rw-r--r--block/blk-mq.c212
-rw-r--r--block/blk-mq.h14
-rw-r--r--block/blk-rq-qos.c4
-rw-r--r--block/blk-rq-qos.h38
-rw-r--r--block/blk-sysfs.c45
-rw-r--r--block/blk-wbt.c12
-rw-r--r--block/blk-wbt.h1
-rw-r--r--block/blk.h17
-rw-r--r--block/disk-events.c469
-rw-r--r--block/elevator.c17
-rw-r--r--block/genhd.c701
-rw-r--r--block/ioctl.c2
-rw-r--r--block/mq-deadline-cgroup.c126
-rw-r--r--block/mq-deadline-cgroup.h114
-rw-r--r--block/mq-deadline-main.c1175
-rw-r--r--block/mq-deadline.c815
-rw-r--r--block/partitions/core.c129
-rw-r--r--block/partitions/msdos.c2
-rw-r--r--crypto/af_alg.c2
-rw-r--r--crypto/algapi.c18
-rw-r--r--crypto/algboss.c31
-rw-r--r--crypto/async_tx/async_xor.c3
-rw-r--r--crypto/drbg.c12
-rw-r--r--crypto/ecdh.c49
-rw-r--r--crypto/internal.h12
-rw-r--r--crypto/khazad.c2
-rw-r--r--crypto/shash.c18
-rw-r--r--crypto/sm2.c24
-rw-r--r--crypto/tcrypt.c36
-rw-r--r--crypto/testmgr.c10
-rw-r--r--crypto/testmgr.h71
-rw-r--r--crypto/wp512.c40
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/accessibility/braille/braille_console.c1
-rw-r--r--drivers/acpi/Kconfig5
-rw-r--r--drivers/acpi/Makefile6
-rw-r--r--drivers/acpi/acpi_apd.c1
-rw-r--r--drivers/acpi/acpi_cmos_rtc.c6
-rw-r--r--drivers/acpi/acpi_configfs.c5
-rw-r--r--drivers/acpi/acpi_fpdt.c4
-rw-r--r--drivers/acpi/acpi_ipmi.c7
-rw-r--r--drivers/acpi/acpi_lpss.c13
-rw-r--r--drivers/acpi/acpi_video.c6
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/exfield.c8
-rw-r--r--drivers/acpi/acpica/exserial.c12
-rw-r--r--drivers/acpi/acpica/nsrepair2.c7
-rw-r--r--drivers/acpi/acpica/utdelete.c8
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c41
-rw-r--r--drivers/acpi/apei/einj.c2
-rw-r--r--drivers/acpi/apei/ghes.c81
-rw-r--r--drivers/acpi/arm64/iort.c2
-rw-r--r--drivers/acpi/bgrt.c57
-rw-r--r--drivers/acpi/blacklist.c9
-rw-r--r--drivers/acpi/bus.c44
-rw-r--r--drivers/acpi/device_pm.c70
-rw-r--r--drivers/acpi/device_sysfs.c7
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c2
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/event.c8
-rw-r--r--drivers/acpi/fan.c7
-rw-r--r--drivers/acpi/fan.h13
-rw-r--r--drivers/acpi/glue.c29
-rw-r--r--drivers/acpi/internal.h15
-rw-r--r--drivers/acpi/nvs.c32
-rw-r--r--drivers/acpi/osl.c11
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pmic/Kconfig2
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c2
-rw-r--r--drivers/acpi/power.c103
-rw-r--r--drivers/acpi/pptt.c18
-rw-r--r--drivers/acpi/prmt.c303
-rw-r--r--drivers/acpi/processor_idle.c40
-rw-r--r--drivers/acpi/processor_perflib.c38
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/processor_throttling.c75
-rw-r--r--drivers/acpi/reboot.c4
-rw-r--r--drivers/acpi/resource.c9
-rw-r--r--drivers/acpi/sbs.c12
-rw-r--r--drivers/acpi/sbshc.c9
-rw-r--r--drivers/acpi/scan.c316
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/acpi/sysfs.c85
-rw-r--r--drivers/acpi/tables.c9
-rw-r--r--drivers/acpi/utils.c14
-rw-r--r--drivers/acpi/x86/s2idle.c145
-rw-r--r--drivers/acpi/x86/utils.c25
-rw-r--r--drivers/ata/Kconfig6
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ahci.h7
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/pata_atiixp.c3
-rw-r--r--drivers/ata/pata_cs5520.c3
-rw-r--r--drivers/ata/pata_cs5530.c3
-rw-r--r--drivers/ata/pata_cypress.c10
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/pata_falcon.c62
-rw-r--r--drivers/ata/pata_macio.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/ata/pata_rb532_cf.c8
-rw-r--r--drivers/ata/pata_sc1200.c3
-rw-r--r--drivers/ata/pata_serverworks.c3
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_highbank.c6
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/ata/sata_nv.c12
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/atm/fore200e.c1
-rw-r--r--drivers/atm/iphase.c13
-rw-r--r--drivers/atm/iphase.h1
-rw-r--r--drivers/atm/nicstar.c26
-rw-r--r--drivers/atm/zeprom.h2
-rw-r--r--drivers/base/core.c75
-rw-r--r--drivers/base/memory.c6
-rw-r--r--drivers/base/node.c18
-rw-r--r--drivers/base/power/domain.c64
-rw-r--r--drivers/base/power/domain_governor.c1
-rw-r--r--drivers/base/power/runtime.c18
-rw-r--r--drivers/base/power/wakeirq.c4
-rw-r--r--drivers/base/property.c31
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/regmap-i2c.c45
-rw-r--r--drivers/base/regmap/regmap-irq.c7
-rw-r--r--drivers/base/regmap/regmap-mdio.c116
-rw-r--r--drivers/base/regmap/regmap.c15
-rw-r--r--drivers/base/swnode.c16
-rw-r--r--drivers/block/amiflop.c16
-rw-r--r--drivers/block/aoe/aoeblk.c33
-rw-r--r--drivers/block/aoe/aoechr.c4
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/aoe/aoedev.c3
-rw-r--r--drivers/block/ataflop.c16
-rw-r--r--drivers/block/brd.c94
-rw-r--r--drivers/block/drbd/drbd_main.c23
-rw-r--r--drivers/block/drbd/drbd_receiver.c22
-rw-r--r--drivers/block/floppy.c22
-rw-r--r--drivers/block/loop.c322
-rw-r--r--drivers/block/loop.h16
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c26
-rw-r--r--drivers/block/n64cart.c8
-rw-r--r--drivers/block/nbd.c53
-rw-r--r--drivers/block/null_blk/main.c37
-rw-r--r--drivers/block/paride/pcd.c19
-rw-r--r--drivers/block/paride/pd.c30
-rw-r--r--drivers/block/paride/pf.c18
-rw-r--r--drivers/block/pktcdvd.c11
-rw-r--r--drivers/block/ps3disk.c36
-rw-r--r--drivers/block/ps3vram.c31
-rw-r--r--drivers/block/rbd.c52
-rw-r--r--drivers/block/rnbd/rnbd-clt.c35
-rw-r--r--drivers/block/rsxx/dev.c39
-rw-r--r--drivers/block/rsxx/dma.c6
-rw-r--r--drivers/block/rsxx/rsxx_priv.h1
-rw-r--r--drivers/block/sunvdc.c50
-rw-r--r--drivers/block/swim.c34
-rw-r--r--drivers/block/swim3.c33
-rw-r--r--drivers/block/sx8.c25
-rw-r--r--drivers/block/virtio_blk.c26
-rw-r--r--drivers/block/xen-blkfront.c104
-rw-r--r--drivers/block/z2ram.c25
-rw-r--r--drivers/block/zram/zram_drv.c37
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/btbcm.c1
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/btmtkuart.c6
-rw-r--r--drivers/bluetooth/btqca.c113
-rw-r--r--drivers/bluetooth/btqca.h14
-rw-r--r--drivers/bluetooth/btrtl.c35
-rw-r--r--drivers/bluetooth/btrtl.h7
-rw-r--r--drivers/bluetooth/btusb.c70
-rw-r--r--drivers/bluetooth/hci_ag6xx.c1
-rw-r--r--drivers/bluetooth/hci_h5.c5
-rw-r--r--drivers/bluetooth/hci_qca.c118
-rw-r--r--drivers/bluetooth/virtio_bt.c3
-rw-r--r--drivers/bus/mhi/pci_generic.c42
-rw-r--r--drivers/bus/ti-sysc.c60
-rw-r--r--drivers/cdrom/gdrom.c45
-rw-r--r--drivers/char/hw_random/Kconfig10
-rw-r--r--drivers/char/hw_random/amd-rng.c2
-rw-r--r--drivers/char/hw_random/core.c38
-rw-r--r--drivers/char/hw_random/exynos-trng.c7
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c3
-rw-r--r--drivers/char/hw_random/omap-rng.c6
-rw-r--r--drivers/char/ipmi/Kconfig27
-rw-r--r--drivers/char/ipmi/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c22
-rw-r--r--drivers/char/ipmi/kcs_bmc.c505
-rw-r--r--drivers/char/ipmi/kcs_bmc.h92
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c633
-rw-r--r--drivers/char/ipmi/kcs_bmc_cdev_ipmi.c568
-rw-r--r--drivers/char/ipmi/kcs_bmc_client.h45
-rw-r--r--drivers/char/ipmi/kcs_bmc_device.h22
-rw-r--r--drivers/char/ipmi/kcs_bmc_npcm7xx.c92
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c157
-rw-r--r--drivers/char/tpm/tpm1-cmd.c4
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c2
-rw-r--r--drivers/char/tpm/tpm_tis.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c25
-rw-r--r--drivers/char/tpm/tpm_tis_core.h3
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c4
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c14
-rw-r--r--drivers/clocksource/Kconfig14
-rw-r--r--drivers/clocksource/arm_arch_timer.c3
-rw-r--r--drivers/clocksource/arm_global_timer.c122
-rw-r--r--drivers/clocksource/ingenic-sysost.c10
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c41
-rw-r--r--drivers/clocksource/timer-mediatek.c24
-rw-r--r--drivers/clocksource/timer-ti-dm.c9
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c245
-rw-r--r--drivers/cpufreq/cpufreq.c11
-rw-r--r--drivers/cpufreq/cpufreq_stats.c5
-rw-r--r--drivers/cpufreq/intel_pstate.c263
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c1
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/sh-cpufreq.c1
-rw-r--r--drivers/cpuidle/governors/menu.c6
-rw-r--r--drivers/cpuidle/governors/teo.c476
-rw-r--r--drivers/crypto/Kconfig41
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c21
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c16
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c3
-rw-r--r--drivers/crypto/ccp/sev-dev.c4
-rw-r--r--drivers/crypto/ccp/sp-pci.c6
-rw-r--r--drivers/crypto/gemini/Makefile2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c387
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c535
-rw-r--r--drivers/crypto/gemini/sl3516-ce-rng.c61
-rw-r--r--drivers/crypto/gemini/sl3516-ce.h347
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c185
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c256
-rw-r--r--drivers/crypto/hisilicon/qm.c1843
-rw-r--r--drivers/crypto/hisilicon/qm.h17
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h23
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c1036
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h193
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c100
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c99
-rw-r--r--drivers/crypto/ixp4xx_crypto.c413
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile13
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c93
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h36
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h23
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h16
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c9
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c160
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c32
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h8
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c49
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c43
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c17
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c31
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c4
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c4
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c4
-rw-r--r--drivers/crypto/nx/nx-sha256.c19
-rw-r--r--drivers/crypto/nx/nx-sha512.c19
-rw-r--r--drivers/crypto/nx/nx_csbcpb.h4
-rw-r--r--drivers/crypto/omap-des.c9
-rw-r--r--drivers/crypto/omap-sham.c4
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c14
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c12
-rw-r--r--drivers/crypto/qce/Makefile1
-rw-r--r--drivers/crypto/qce/aead.c847
-rw-r--r--drivers/crypto/qce/aead.h56
-rw-r--r--drivers/crypto/qce/common.c196
-rw-r--r--drivers/crypto/qce/common.h9
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qce/skcipher.c19
-rw-r--r--drivers/crypto/sa2ul.c50
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c1
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/devfreq/Kconfig1
-rw-r--r--drivers/devfreq/devfreq.c1
-rw-r--r--drivers/devfreq/governor_passive.c3
-rw-r--r--drivers/devfreq/governor_userspace.c10
-rw-r--r--drivers/devfreq/imx-bus.c14
-rw-r--r--drivers/devfreq/tegra30-devfreq.c1
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c3
-rw-r--r--drivers/dma/idxd/cdev.c1
-rw-r--r--drivers/dma/idxd/init.c67
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c27
-rw-r--r--drivers/dma/pl330.c6
-rw-r--r--drivers/dma/qcom/Kconfig1
-rw-r--r--drivers/dma/sf-pdma/Kconfig1
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/ste_dma40.c3
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c31
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c2
-rw-r--r--drivers/edac/Kconfig3
-rw-r--r--drivers/edac/aspeed_edac.c4
-rw-r--r--drivers/edac/i10nm_base.c174
-rw-r--r--drivers/edac/igen6_edac.c374
-rw-r--r--drivers/edac/mce_amd.c70
-rw-r--r--drivers/edac/pnd2_edac.c3
-rw-r--r--drivers/edac/sb_edac.c3
-rw-r--r--drivers/edac/skx_base.c3
-rw-r--r--drivers/edac/skx_common.c82
-rw-r--r--drivers/edac/skx_common.h34
-rw-r--r--drivers/edac/thunderx_edac.c4
-rw-r--r--drivers/edac/ti_edac.c1
-rw-r--r--drivers/extcon/extcon-max14577.c16
-rw-r--r--drivers/extcon/extcon-max77693.c17
-rw-r--r--drivers/extcon/extcon-max8997.c45
-rw-r--r--drivers/firewire/core-topology.c1
-rw-r--r--drivers/firmware/efi/apple-properties.c2
-rw-r--r--drivers/firmware/efi/cper.c4
-rw-r--r--drivers/firmware/efi/dev-path-parser.c49
-rw-r--r--drivers/firmware/efi/fdtparams.c3
-rw-r--r--drivers/firmware/efi/libstub/file.c2
-rw-r--r--drivers/firmware/efi/memattr.c5
-rw-r--r--drivers/firmware/psci/psci.c9
-rw-r--r--drivers/firmware/qemu_fw_cfg.c8
-rw-r--r--drivers/firmware/smccc/smccc.c4
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/gpio/gpio-crystalcove.c10
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c2
-rw-r--r--drivers/gpio/gpio-wcove.c39
-rw-r--r--drivers/gpio/gpiolib-acpi.c61
-rw-r--r--drivers/gpio/gpiolib-cdev.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c17
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_ioctl.c9
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c71
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c44
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c1
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c155
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h5
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c41
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/hub.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c70
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c2
-rw-r--r--drivers/gpu/host1x/bus.c30
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c54
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.c3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h12
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c89
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h43
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c48
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h11
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h112
-rw-r--r--drivers/hid/hid-a4tech.c2
-rw-r--r--drivers/hid/hid-asus.c32
-rw-r--r--drivers/hid/hid-core.c14
-rw-r--r--drivers/hid/hid-debug.c14
-rw-r--r--drivers/hid/hid-ft260.c29
-rw-r--r--drivers/hid/hid-google-hammer.c10
-rw-r--r--drivers/hid/hid-gt683r.c1
-rw-r--r--drivers/hid/hid-ids.h11
-rw-r--r--drivers/hid/hid-input.c33
-rw-r--r--drivers/hid/hid-ite.c1
-rw-r--r--drivers/hid/hid-lg-g15.c141
-rw-r--r--drivers/hid/hid-logitech-dj.c16
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-magicmouse.c7
-rw-r--r--drivers/hid/hid-multitouch.c47
-rw-r--r--drivers/hid/hid-quirks.c4
-rw-r--r--drivers/hid/hid-semitek.c40
-rw-r--r--drivers/hid/hid-sensor-custom.c8
-rw-r--r--drivers/hid/hid-sensor-hub.c13
-rw-r--r--drivers/hid/hid-sony.c98
-rw-r--r--drivers/hid/hid-thrustmaster.c7
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c13
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c28
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c5
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c51
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c15
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h9
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c24
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c23
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c10
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.h1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h5
-rw-r--r--drivers/hid/surface-hid/surface_hid.c2
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c10
-rw-r--r--drivers/hid/usbhid/hid-pidff.c1
-rw-r--r--drivers/hid/usbhid/usbkbd.c4
-rw-r--r--drivers/hid/usbhid/usbmouse.c2
-rw-r--r--drivers/hid/wacom_wac.h2
-rw-r--r--drivers/hv/Makefile3
-rw-r--r--drivers/hv/channel.c23
-rw-r--r--drivers/hv/connection.c4
-rw-r--r--drivers/hv/hv_balloon.c1
-rw-r--r--drivers/hv/hv_common.c66
-rw-r--r--drivers/hv/hv_fcopy.c1
-rw-r--r--drivers/hv/hv_kvp.c1
-rw-r--r--drivers/hv/hv_util.c4
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/ring_buffer.c95
-rw-r--r--drivers/hwmon/Kconfig11
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/bt1-pvt.c4
-rw-r--r--drivers/hwmon/corsair-cpro.c1
-rw-r--r--drivers/hwmon/corsair-psu.c14
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c4
-rw-r--r--drivers/hwmon/hwmon.c36
-rw-r--r--drivers/hwmon/ina3221.c22
-rw-r--r--drivers/hwmon/lm70.c28
-rw-r--r--drivers/hwmon/lm75.c13
-rw-r--r--drivers/hwmon/lm90.c92
-rw-r--r--drivers/hwmon/max31722.c9
-rw-r--r--drivers/hwmon/max31790.c72
-rw-r--r--drivers/hwmon/max6621.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c4
-rw-r--r--drivers/hwmon/pmbus/Kconfig34
-rw-r--r--drivers/hwmon/pmbus/Makefile3
-rw-r--r--drivers/hwmon/pmbus/adm1275.c14
-rw-r--r--drivers/hwmon/pmbus/bpa-rs600.c29
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c206
-rw-r--r--drivers/hwmon/pmbus/fsp-3y.c32
-rw-r--r--drivers/hwmon/pmbus/isl68137.c4
-rw-r--r--drivers/hwmon/pmbus/mp2888.c408
-rw-r--r--drivers/hwmon/pmbus/pim4328.c233
-rw-r--r--drivers/hwmon/pmbus/pmbus.c19
-rw-r--r--drivers/hwmon/pmbus/pmbus.h2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c151
-rw-r--r--drivers/hwmon/pmbus/q54sj108a2.c2
-rw-r--r--drivers/hwmon/pmbus/zl6100.c94
-rw-r--r--drivers/hwmon/sch5627.c18
-rw-r--r--drivers/hwmon/sch5636.c9
-rw-r--r--drivers/hwmon/sch56xx-common.c65
-rw-r--r--drivers/hwmon/sch56xx-common.h4
-rw-r--r--drivers/hwmon/scpi-hwmon.c9
-rw-r--r--drivers/hwmon/sht4x.c296
-rw-r--r--drivers/hwmon/tps23861.c17
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-altera.c9
-rw-r--r--drivers/i2c/busses/i2c-cadence.c2
-rw-r--r--drivers/i2c/busses/i2c-cp2615.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c9
-rw-r--r--drivers/i2c/busses/i2c-icy.c1
-rw-r--r--drivers/i2c/busses/i2c-mpc.c81
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c5
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c8
-rw-r--r--drivers/i2c/busses/i2c-pnx.c8
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c23
-rw-r--r--drivers/i2c/busses/i2c-robotfuzz-osif.c4
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c4
-rw-r--r--drivers/i2c/i2c-core-acpi.c8
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c4
-rw-r--r--drivers/ide/Kconfig849
-rw-r--r--drivers/ide/Makefile111
-rw-r--r--drivers/ide/aec62xx.c331
-rw-r--r--drivers/ide/ali14xx.c250
-rw-r--r--drivers/ide/alim15x3.c602
-rw-r--r--drivers/ide/amd74xx.c343
-rw-r--r--drivers/ide/atiixp.c212
-rw-r--r--drivers/ide/buddha.c238
-rw-r--r--drivers/ide/cmd640.c848
-rw-r--r--drivers/ide/cmd64x.c452
-rw-r--r--drivers/ide/cs5520.c168
-rw-r--r--drivers/ide/cs5530.c295
-rw-r--r--drivers/ide/cs5535.c216
-rw-r--r--drivers/ide/cs5536.c294
-rw-r--r--drivers/ide/cy82c693.c234
-rw-r--r--drivers/ide/delkin_cb.c181
-rw-r--r--drivers/ide/dtc2278.c155
-rw-r--r--drivers/ide/falconide.c197
-rw-r--r--drivers/ide/gayle.c188
-rw-r--r--drivers/ide/hpt366.c1545
-rw-r--r--drivers/ide/ht6560b.c383
-rw-r--r--drivers/ide/icside.c692
-rw-r--r--drivers/ide/ide-4drives.c65
-rw-r--r--drivers/ide/ide-acpi.c622
-rw-r--r--drivers/ide/ide-atapi.c756
-rw-r--r--drivers/ide/ide-cd.c1858
-rw-r--r--drivers/ide/ide-cd.h123
-rw-r--r--drivers/ide/ide-cd_ioctl.c468
-rw-r--r--drivers/ide/ide-cd_verbose.c362
-rw-r--r--drivers/ide/ide-cs.c364
-rw-r--r--drivers/ide/ide-devsets.c192
-rw-r--r--drivers/ide/ide-disk.c795
-rw-r--r--drivers/ide/ide-disk.h30
-rw-r--r--drivers/ide/ide-disk_ioctl.c33
-rw-r--r--drivers/ide/ide-disk_proc.c125
-rw-r--r--drivers/ide/ide-dma-sff.c336
-rw-r--r--drivers/ide/ide-dma.c551
-rw-r--r--drivers/ide/ide-eh.c443
-rw-r--r--drivers/ide/ide-floppy.c551
-rw-r--r--drivers/ide/ide-floppy.h42
-rw-r--r--drivers/ide/ide-floppy_ioctl.c339
-rw-r--r--drivers/ide/ide-floppy_proc.c34
-rw-r--r--drivers/ide/ide-gd.c432
-rw-r--r--drivers/ide/ide-gd.h43
-rw-r--r--drivers/ide/ide-generic.c139
-rw-r--r--drivers/ide/ide-io-std.c262
-rw-r--r--drivers/ide/ide-io.c904
-rw-r--r--drivers/ide/ide-ioctls.c306
-rw-r--r--drivers/ide/ide-iops.c536
-rw-r--r--drivers/ide/ide-legacy.c59
-rw-r--r--drivers/ide/ide-lib.c146
-rw-r--r--drivers/ide/ide-park.c155
-rw-r--r--drivers/ide/ide-pci-generic.c203
-rw-r--r--drivers/ide/ide-pio-blacklist.c96
-rw-r--r--drivers/ide/ide-pm.c261
-rw-r--r--drivers/ide/ide-pnp.c92
-rw-r--r--drivers/ide/ide-probe.c1623
-rw-r--r--drivers/ide/ide-proc.c633
-rw-r--r--drivers/ide/ide-scan-pci.c113
-rw-r--r--drivers/ide/ide-sysfs.c143
-rw-r--r--drivers/ide/ide-tape.c2083
-rw-r--r--drivers/ide/ide-taskfile.c668
-rw-r--r--drivers/ide/ide-timings.c198
-rw-r--r--drivers/ide/ide-xfer-mode.c267
-rw-r--r--drivers/ide/ide.c415
-rw-r--r--drivers/ide/ide_platform.c133
-rw-r--r--drivers/ide/it8172.c165
-rw-r--r--drivers/ide/it8213.c217
-rw-r--r--drivers/ide/it821x.c715
-rw-r--r--drivers/ide/jmicron.c176
-rw-r--r--drivers/ide/macide.c161
-rw-r--r--drivers/ide/ns87415.c350
-rw-r--r--drivers/ide/opti621.c179
-rw-r--r--drivers/ide/palm_bk3710.c387
-rw-r--r--drivers/ide/pdc202xx_new.c557
-rw-r--r--drivers/ide/pdc202xx_old.c362
-rw-r--r--drivers/ide/piix.c476
-rw-r--r--drivers/ide/pmac.c1703
-rw-r--r--drivers/ide/q40ide.c168
-rw-r--r--drivers/ide/qd65xx.c446
-rw-r--r--drivers/ide/qd65xx.h145
-rw-r--r--drivers/ide/rapide.c106
-rw-r--r--drivers/ide/rz1000.c100
-rw-r--r--drivers/ide/sc1200.c355
-rw-r--r--drivers/ide/serverworks.c456
-rw-r--r--drivers/ide/setup-pci.c682
-rw-r--r--drivers/ide/siimage.c843
-rw-r--r--drivers/ide/sis5513.c637
-rw-r--r--drivers/ide/sl82c105.c367
-rw-r--r--drivers/ide/slc90e66.c182
-rw-r--r--drivers/ide/tc86c001.c270
-rw-r--r--drivers/ide/triflex.c143
-rw-r--r--drivers/ide/trm290.c374
-rw-r--r--drivers/ide/tx4938ide.c209
-rw-r--r--drivers/ide/tx4939ide.c628
-rw-r--r--drivers/ide/umc8672.c184
-rw-r--r--drivers/ide/via82cxxx.c532
-rw-r--r--drivers/idle/intel_idle.c33
-rw-r--r--drivers/iio/adc/ad7124.c36
-rw-r--r--drivers/iio/adc/ad7192.c19
-rw-r--r--drivers/iio/adc/ad7768-1.c8
-rw-r--r--drivers/iio/adc/ad7793.c1
-rw-r--r--drivers/iio/adc/ad7923.c4
-rw-r--r--drivers/iio/dac/ad5770r.c16
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c7
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c20
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c1
-rw-r--r--drivers/input/keyboard/Kconfig3
-rw-r--r--drivers/input/touchscreen/goodix.c52
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c4
-rw-r--r--drivers/iommu/amd/iommu.c4
-rw-r--r--drivers/iommu/intel/dmar.c4
-rw-r--r--drivers/iommu/intel/iommu.c9
-rw-r--r--drivers/iommu/intel/pasid.c3
-rw-r--r--drivers/iommu/virtio-iommu.c1
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/exynos-combiner.c14
-rw-r--r--drivers/irqchip/irq-al-fic.c7
-rw-r--r--drivers/irqchip/irq-apple-aic.c9
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c19
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c8
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c6
-rw-r--r--drivers/irqchip/irq-ath79-misc.c2
-rw-r--r--drivers/irqchip/irq-bcm2835.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c6
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c6
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c3
-rw-r--r--drivers/irqchip/irq-gic-common.c13
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-pm.c4
-rw-r--r--drivers/irqchip/irq-gic-v2m.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c8
-rw-r--r--drivers/irqchip/irq-gic-v3.c42
-rw-r--r--drivers/irqchip/irq-gic.c19
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c5
-rw-r--r--drivers/irqchip/irq-i8259.c4
-rw-r--r--drivers/irqchip/irq-idt3243x.c6
-rw-r--r--drivers/irqchip/irq-imgpdc.c19
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-imx-intmux.c9
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c9
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c2
-rw-r--r--drivers/irqchip/irq-ingenic.c3
-rw-r--r--drivers/irqchip/irq-keystone.c14
-rw-r--r--drivers/irqchip/irq-loongson-htpic.c2
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c4
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c2
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c6
-rw-r--r--drivers/irqchip/irq-ls1x.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c12
-rw-r--r--drivers/irqchip/irq-mips-gic.c21
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c2
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c7
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c13
-rw-r--r--drivers/irqchip/irq-nvic.c4
-rw-r--r--drivers/irqchip/irq-orion.c2
-rw-r--r--drivers/irqchip/irq-partition-percpu.c9
-rw-r--r--drivers/irqchip/irq-pruss-intc.c9
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c2
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c2
-rw-r--r--drivers/irqchip/irq-sifive-plic.c8
-rw-r--r--drivers/irqchip/irq-stm32-exti.c10
-rw-r--r--drivers/irqchip/irq-sun4i.c8
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c3
-rw-r--r--drivers/irqchip/irq-tb10x.c2
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c9
-rw-r--r--drivers/irqchip/irq-ts4800.c3
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c23
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c6
-rw-r--r--drivers/irqchip/qcom-pdc.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c1
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c46
-rw-r--r--drivers/lightnvm/core.c24
-rw-r--r--drivers/mailbox/Kconfig12
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/arm_mhu.c4
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c1
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c1
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c1
-rw-r--r--drivers/mailbox/hi3660-mailbox.c2
-rw-r--r--drivers/mailbox/hi6220-mailbox.c2
-rw-r--r--drivers/mailbox/imx-mailbox.c200
-rw-r--r--drivers/mailbox/mailbox-mpfs.c251
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c28
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c8
-rw-r--r--drivers/mailbox/qcom-ipcc.c6
-rw-r--r--drivers/md/Kconfig6
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/request.c20
-rw-r--r--drivers/md/bcache/stats.c14
-rw-r--r--drivers/md/bcache/stats.h1
-rw-r--r--drivers/md/bcache/super.c15
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/md/dm-rq.c9
-rw-r--r--drivers/md/dm.c22
-rw-r--r--drivers/md/md-bitmap.c2
-rw-r--r--drivers/md/md-faulty.c2
-rw-r--r--drivers/md/md-linear.c2
-rw-r--r--drivers/md/md-multipath.c2
-rw-r--r--drivers/md/md.c141
-rw-r--r--drivers/md/md.h19
-rw-r--r--drivers/md/raid0.c3
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5.c65
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c7
-rw-r--r--drivers/media/common/Kconfig4
-rw-r--r--drivers/media/common/Makefile1
-rw-r--r--drivers/media/common/siano/smscoreapi.c22
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsdvb-main.c11
-rw-r--r--drivers/media/common/ttpci-eeprom.c (renamed from drivers/media/pci/ttpci/ttpci-eeprom.c)0
-rw-r--r--drivers/media/common/ttpci-eeprom.h (renamed from drivers/media/pci/ttpci/ttpci-eeprom.h)0
-rw-r--r--drivers/media/common/videobuf2/frame_vector.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c14
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c1
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c222
-rw-r--r--drivers/media/dvb-core/dvb_net.c25
-rw-r--r--drivers/media/dvb-core/dvbdev.c3
-rw-r--r--drivers/media/dvb-frontends/Kconfig12
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.h35
-rw-r--r--drivers/media/dvb-frontends/mxl692.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c4
-rw-r--r--drivers/media/i2c/Kconfig22
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7170.c6
-rw-r--r--drivers/media/i2c/adv7175.c6
-rw-r--r--drivers/media/i2c/adv7180.c18
-rw-r--r--drivers/media/i2c/adv7183.c8
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c13
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c14
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c13
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c10
-rw-r--r--drivers/media/i2c/adv7604.c12
-rw-r--r--drivers/media/i2c/adv7842.c53
-rw-r--r--drivers/media/i2c/ak7375.c10
-rw-r--r--drivers/media/i2c/ak881x.c6
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c125
-rw-r--r--drivers/media/i2c/ccs/ccs-limits.c4
-rw-r--r--drivers/media/i2c/ccs/ccs-limits.h4
-rw-r--r--drivers/media/i2c/ccs/ccs-regs.h6
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/dw9714.c10
-rw-r--r--drivers/media/i2c/dw9768.c10
-rw-r--r--drivers/media/i2c/dw9807-vcm.c10
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c23
-rw-r--r--drivers/media/i2c/hi556.c18
-rw-r--r--drivers/media/i2c/imx208.c1088
-rw-r--r--drivers/media/i2c/imx214.c43
-rw-r--r--drivers/media/i2c/imx219.c36
-rw-r--r--drivers/media/i2c/imx258.c25
-rw-r--r--drivers/media/i2c/imx274.c41
-rw-r--r--drivers/media/i2c/imx290.c26
-rw-r--r--drivers/media/i2c/imx319.c24
-rw-r--r--drivers/media/i2c/imx334.c35
-rw-r--r--drivers/media/i2c/imx355.c24
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c4
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c21
-rw-r--r--drivers/media/i2c/max9271.c42
-rw-r--r--drivers/media/i2c/max9271.h9
-rw-r--r--drivers/media/i2c/max9286.c58
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/i2c/mt9m001.c27
-rw-r--r--drivers/media/i2c/mt9m032.c38
-rw-r--r--drivers/media/i2c/mt9m111.c18
-rw-r--r--drivers/media/i2c/mt9p031.c45
-rw-r--r--drivers/media/i2c/mt9t001.c44
-rw-r--r--drivers/media/i2c/mt9t112.c14
-rw-r--r--drivers/media/i2c/mt9v011.c6
-rw-r--r--drivers/media/i2c/mt9v032.c44
-rw-r--r--drivers/media/i2c/mt9v111.c25
-rw-r--r--drivers/media/i2c/noon010pc30.c19
-rw-r--r--drivers/media/i2c/ov02a10.c23
-rw-r--r--drivers/media/i2c/ov13858.c24
-rw-r--r--drivers/media/i2c/ov2640.c16
-rw-r--r--drivers/media/i2c/ov2659.c47
-rw-r--r--drivers/media/i2c/ov2680.c23
-rw-r--r--drivers/media/i2c/ov2685.c17
-rw-r--r--drivers/media/i2c/ov2740.c21
-rw-r--r--drivers/media/i2c/ov5640.c14
-rw-r--r--drivers/media/i2c/ov5645.c38
-rw-r--r--drivers/media/i2c/ov5647.c35
-rw-r--r--drivers/media/i2c/ov5648.c20
-rw-r--r--drivers/media/i2c/ov5670.c25
-rw-r--r--drivers/media/i2c/ov5675.c18
-rw-r--r--drivers/media/i2c/ov5695.c21
-rw-r--r--drivers/media/i2c/ov6650.c28
-rw-r--r--drivers/media/i2c/ov7251.c39
-rw-r--r--drivers/media/i2c/ov7670.c17
-rw-r--r--drivers/media/i2c/ov772x.c12
-rw-r--r--drivers/media/i2c/ov7740.c23
-rw-r--r--drivers/media/i2c/ov8856.c2467
-rw-r--r--drivers/media/i2c/ov8865.c22
-rw-r--r--drivers/media/i2c/ov9640.c8
-rw-r--r--drivers/media/i2c/ov9650.c21
-rw-r--r--drivers/media/i2c/ov9734.c18
-rw-r--r--drivers/media/i2c/rdacm20.c88
-rw-r--r--drivers/media/i2c/rdacm21.c71
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c12
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c61
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/s5k4ecgx.c32
-rw-r--r--drivers/media/i2c/s5k5baf.c55
-rw-r--r--drivers/media/i2c/s5k6a3.c19
-rw-r--r--drivers/media/i2c/s5k6aa.c49
-rw-r--r--drivers/media/i2c/saa6588.c4
-rw-r--r--drivers/media/i2c/saa6752hs.c6
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/sr030pc30.c8
-rw-r--r--drivers/media/i2c/st-mipid02.c21
-rw-r--r--drivers/media/i2c/tc358743.c9
-rw-r--r--drivers/media/i2c/tda1997x.c14
-rw-r--r--drivers/media/i2c/tvp514x.c12
-rw-r--r--drivers/media/i2c/tvp5150.c36
-rw-r--r--drivers/media/i2c/tvp7002.c11
-rw-r--r--drivers/media/i2c/tw9910.c10
-rw-r--r--drivers/media/i2c/video-i2c.c12
-rw-r--r--drivers/media/i2c/vs6624.c8
-rw-r--r--drivers/media/mc/Makefile2
-rw-r--r--drivers/media/mc/mc-entity.c2
-rw-r--r--drivers/media/mc/mc-request.c3
-rw-r--r--drivers/media/pci/bt8xx/bt878.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c1
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h7
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c6
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c3
-rw-r--r--drivers/media/pci/cx88/cx88-core.c6
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c3
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c6
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c3
-rw-r--r--drivers/media/pci/cx88/cx88-video.c5
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c10
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c20
-rw-r--r--drivers/media/pci/ivtv/Kconfig12
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c221
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c40
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-tvaudio.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c6
-rw-r--r--drivers/media/pci/ttpci/Kconfig74
-rw-r--r--drivers/media/pci/ttpci/Makefile11
-rw-r--r--drivers/media/pci/ttpci/budget-core.c3
-rw-r--r--drivers/media/pci/ttpci/budget.h2
-rw-r--r--drivers/media/pci/tw5864/tw5864-reg.h62
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.c2
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.c2
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c15
-rw-r--r--drivers/media/platform/atmel/Kconfig11
-rw-r--r--drivers/media/platform/atmel/Makefile2
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c427
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h133
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h122
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c38
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c300
-rw-r--r--drivers/media/platform/atmel/atmel-sama7g5-isc.c630
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c8
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c22
-rw-r--r--drivers/media/platform/coda/coda-common.c11
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c6
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c11
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c28
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c10
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c44
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c44
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c5
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c10
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c27
-rw-r--r--drivers/media/platform/imx-jpeg/mxc-jpeg.c18
-rw-r--r--drivers/media/platform/imx-jpeg/mxc-jpeg.h18
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c12
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c14
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c4
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h26
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c92
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c17
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_ipi_msg.h4
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c12
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c85
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c49
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c41
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c69
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c70
-rw-r--r--drivers/media/platform/pxa_camera.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c49
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c59
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c48
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c92
-rw-r--r--drivers/media/platform/qcom/venus/core.c60
-rw-r--r--drivers/media/platform/qcom/venus/core.h7
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c5
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c31
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h10
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c16
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.h6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c16
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c28
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c28
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c153
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c6
-rw-r--r--drivers/media/platform/qcom/venus/venc.c5
-rw-r--r--drivers/media/platform/rcar-fcp.c10
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c34
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c6
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c16
-rw-r--r--drivers/media/platform/rcar_fdp1.c28
-rw-r--r--drivers/media/platform/rcar_jpu.c6
-rw-r--r--drivers/media/platform/renesas-ceu.c11
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c3
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c19
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c112
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c95
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c20
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c5
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c3
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c6
-rw-r--r--drivers/media/platform/sh_vou.c6
-rw-r--r--drivers/media/platform/sti/bdisp/Makefile2
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c8
-rw-r--r--drivers/media/platform/sti/delta/Makefile2
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c9
-rw-r--r--drivers/media/platform/sti/hva/Makefile2
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c20
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c19
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c22
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c4
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c2
-rw-r--r--drivers/media/platform/ti-vpe/cal-camerarx.c35
-rw-r--r--drivers/media/platform/ti-vpe/cal-video.c4
-rw-r--r--drivers/media/platform/ti-vpe/cal.c8
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c8
-rw-r--r--drivers/media/platform/via-camera.c5
-rw-r--r--drivers/media/platform/video-mux.c32
-rw-r--r--drivers/media/platform/vsp1/vsp1_brx.c34
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c10
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c59
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h20
-rw-r--r--drivers/media/platform/vsp1/vsp1_histo.c51
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c32
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c22
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c22
-rw-r--r--drivers/media/platform/vsp1/vsp1_uif.c27
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c26
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c5
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c25
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c18
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h4
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c2
-rw-r--r--drivers/media/rc/Kconfig83
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/bpf-lirc.c3
-rw-r--r--drivers/media/rc/imon.c15
-rw-r--r--drivers/media/rc/ite-cir.h2
-rw-r--r--drivers/media/rc/keymaps/Makefile2
-rw-r--r--drivers/media/rc/keymaps/rc-ct-90405.c86
-rw-r--r--drivers/media/rc/keymaps/rc-tango.c89
-rw-r--r--drivers/media/rc/st_rc.c22
-rw-r--r--drivers/media/rc/tango-ir.c267
-rw-r--r--drivers/media/spi/cxd2880-spi.c12
-rw-r--r--drivers/media/test-drivers/vim2m.c5
-rw-r--r--drivers/media/test-drivers/vimc/vimc-debayer.c20
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c36
-rw-r--r--drivers/media/test-drivers/vimc/vimc-sensor.c16
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c44
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h1
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c8
-rw-r--r--drivers/media/usb/Kconfig5
-rw-r--r--drivers/media/usb/airspy/airspy.c3
-rw-r--r--drivers/media/usb/au0828/au0828-core.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2.h1
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c12
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c13
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c149
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c9
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c5
-rw-r--r--drivers/media/usb/dvb-usb/Makefile2
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c13
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c7
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c8
-rw-r--r--drivers/media/usb/go7007/s2250-board.c2
-rw-r--r--drivers/media/usb/gspca/cpia1.c5
-rw-r--r--drivers/media/usb/gspca/gl860/gl860.c4
-rw-r--r--drivers/media/usb/gspca/ov519.c2
-rw-r--r--drivers/media/usb/gspca/sq905.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c8
-rw-r--r--drivers/media/usb/hackrf/hackrf.c3
-rw-r--r--drivers/media/usb/msi2500/msi2500.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c4
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c23
-rw-r--r--drivers/media/usb/uvc/uvc_video.c27
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c1
-rw-r--r--drivers/media/v4l2-core/Kconfig5
-rw-r--r--drivers/media/v4l2-core/Makefile8
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c23
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c1225
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c1946
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c1579
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h96
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c496
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c5035
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c166
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c1
-rw-r--r--drivers/memstick/core/ms_block.c63
-rw-r--r--drivers/memstick/core/mspro_block.c26
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c10
-rw-r--r--drivers/mfd/Kconfig18
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/ioc3.c1
-rw-r--r--drivers/mfd/mt6358-irq.c89
-rw-r--r--drivers/mfd/mt6397-core.c24
-rw-r--r--drivers/mfd/tps68470.c97
-rw-r--r--drivers/misc/cardreader/rtl8411.c1
-rw-r--r--drivers/misc/cardreader/rts5209.c1
-rw-r--r--drivers/misc/cardreader/rts5227.c2
-rw-r--r--drivers/misc/cardreader/rts5228.c1
-rw-r--r--drivers/misc/cardreader/rts5229.c1
-rw-r--r--drivers/misc/cardreader/rts5249.c3
-rw-r--r--drivers/misc/cardreader/rts5260.c1
-rw-r--r--drivers/misc/cardreader/rts5261.c1
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c44
-rw-r--r--drivers/misc/kgdbts.c3
-rw-r--r--drivers/misc/lkdtm/bugs.c6
-rw-r--r--drivers/misc/mei/interrupt.c3
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/mmc/core/block.c11
-rw-r--r--drivers/mmc/core/core.c22
-rw-r--r--drivers/mmc/core/core.h9
-rw-r--r--drivers/mmc/core/debugfs.c1
-rw-r--r--drivers/mmc/core/host.c3
-rw-r--r--drivers/mmc/core/mmc.c68
-rw-r--r--drivers/mmc/core/mmc_ops.c163
-rw-r--r--drivers/mmc/core/mmc_ops.h12
-rw-r--r--drivers/mmc/core/sd.c481
-rw-r--r--drivers/mmc/core/sd_ops.c38
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/cqhci-core.c21
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c1
-rw-r--r--drivers/mmc/host/jz4740_mmc.c6
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c50
-rw-r--r--drivers/mmc/host/mmc_spi.c12
-rw-r--r--drivers/mmc/host/mtk-sd.c25
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c9
-rw-r--r--drivers/mmc/host/s3cmci.c7
-rw-r--r--drivers/mmc/host/sdhci-acpi.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c18
-rw-r--r--drivers/mmc/host/sdhci-iproc.c30
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c50
-rw-r--r--drivers/mmc/host/sdhci-omap.c5
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c4
-rw-r--r--drivers/mmc/host/sdhci-sprd.c1
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_am654.c6
-rw-r--r--drivers/mmc/host/usdhi6rol0.c1
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c49
-rw-r--r--drivers/mtd/mtdpstore.c10
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c12
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c12
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c15
-rw-r--r--drivers/mtd/nand/raw/ndfc.c12
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c12
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c8
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c5
-rw-r--r--drivers/mtd/nand/spi/core.c45
-rw-r--r--drivers/mtd/parsers/ofpart_core.c26
-rw-r--r--drivers/mtd/ubi/block.c68
-rw-r--r--drivers/net/Kconfig23
-rw-r--r--drivers/net/appletalk/cops.c34
-rw-r--r--drivers/net/appletalk/ltpc.c16
-rw-r--r--drivers/net/bareudp.c1
-rw-r--r--drivers/net/bonding/bond_alb.c13
-rw-r--r--drivers/net/bonding/bond_debugfs.c3
-rw-r--r--drivers/net/bonding/bond_main.c41
-rw-r--r--drivers/net/bonding/bond_netlink.c2
-rw-r--r--drivers/net/bonding/bond_options.c5
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/bonding/bond_sysfs.c7
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_virtio.c6
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/Makefile5
-rw-r--r--drivers/net/can/c_can/c_can.h3
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c43
-rw-r--r--drivers/net/can/c_can/c_can_main.c (renamed from drivers/net/can/c_can/c_can.c)2
-rw-r--r--drivers/net/can/m_can/m_can.c244
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c4
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c14
-rw-r--r--drivers/net/can/usb/mcba_usb.c17
-rw-r--r--drivers/net/dsa/b53/b53_common.c30
-rw-r--r--drivers/net/dsa/b53/b53_srab.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c3
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c214
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h67
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/mt7530.c272
-rw-r--r--drivers/net/dsa/mt7530.h20
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/dsa/ocelot/felix.c2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c15
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c5
-rw-r--r--drivers/net/dsa/qca8k.c803
-rw-r--r--drivers/net/dsa/qca8k.h58
-rw-r--r--drivers/net/dsa/sja1105/Kconfig9
-rw-r--r--drivers/net/dsa/sja1105/Makefile1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h128
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c170
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c383
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c1089
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c13
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c839
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c543
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c97
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h13
-rw-r--r--drivers/net/dsa/sja1105/sja1105_sgmii.h53
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c518
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c500
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h109
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c78
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c14
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c6
-rw-r--r--drivers/net/ethernet/8390/stnic.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/alteon/acenic.c26
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c30
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c18
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c274
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h23
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h6
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c12
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c12
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c12
-rw-r--r--drivers/net/ethernet/apple/bmac.c30
-rw-r--r--drivers/net/ethernet/apple/mace.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.h4
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h2
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c21
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c85
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h28
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c35
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h42
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c587
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c20
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c281
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h667
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c473
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h81
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c6
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c80
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c6
-rw-r--r--drivers/net/ethernet/cortina/gemini.c34
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c18
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h1
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c12
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c7
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c6
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c103
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c31
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c67
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c76
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h74
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c30
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c6
-rw-r--r--drivers/net/ethernet/google/Kconfig2
-rw-r--r--drivers/net/ethernet/google/gve/Makefile2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h332
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c334
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h112
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h256
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h81
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c21
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c316
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c54
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c763
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c35
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c1030
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c81
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h28
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c76
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h90
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1471
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1335
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h99
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c150
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c2604
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c414
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h89
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c621
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h60
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c119
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c542
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h134
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c215
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c76
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c27
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c54
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c280
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c132
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c124
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c124
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c134
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c465
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c62
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c334
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h151
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c150
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c330
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1558
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h204
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c651
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c93
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h232
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c246
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c68
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h42
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h34
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c738
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c109
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/korina.c12
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c26
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c32
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h25
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c177
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h107
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h3915
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c168
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c923
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h85
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c323
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c192
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c143
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c58
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h39
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c376
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h124
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c530
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c194
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c359
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h18
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c661
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h51
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c301
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c104
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c239
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.h20
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c186
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h7
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.h2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c32
-rw-r--r--drivers/net/ethernet/marvell/sky2.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c77
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h24
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c140
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c1299
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c295
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sf.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c630
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig13
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/Makefile11
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h190
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c137
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c452
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c320
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c284
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/ib.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h126
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c378
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c595
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1691
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c15
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c111
-rw-r--r--drivers/net/ethernet/microchip/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c596
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c1227
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c500
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c853
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h375
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h4642
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c264
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c320
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c210
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c1146
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h93
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c510
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c224
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c9
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c5
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c29
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/neterion/s2io.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c36
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile3
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm_mbox.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c1180
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h231
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c129
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c102
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c140
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c829
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h103
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c376
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h39
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c43
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h11
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c434
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c20
-rw-r--r--drivers/net/ethernet/rdc/r6040.c9
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c6
-rw-r--r--drivers/net/ethernet/realtek/8139too.c6
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c12
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c15
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c4
-rw-r--r--drivers/net/ethernet/seeq/ether3.c10
-rw-r--r--drivers/net/ethernet/sfc/ef10.c20
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c36
-rw-r--r--drivers/net/ethernet/sfc/efx.c19
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c10
-rw-r--r--drivers/net/ethernet/sfc/farch.c13
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/sfc/rx.c9
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.c22
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c42
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c14
-rw-r--r--drivers/net/ethernet/socionext/netsec.c3
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c398
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c219
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c207
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c131
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c20
-rw-r--r--drivers/net/ethernet/sun/sunhme.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c18
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c7
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c25
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c5
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c10
-rw-r--r--drivers/net/fddi/skfp/ess.c6
-rw-r--r--drivers/net/fddi/skfp/h/supern_2.h2
-rw-r--r--drivers/net/fjes/fjes_main.c16
-rw-r--r--drivers/net/gtp.c3
-rw-r--r--drivers/net/hamradio/6pack.c10
-rw-r--r--drivers/net/hamradio/baycom_epp.c4
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/hamradio/scc.c20
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h8
-rw-r--r--drivers/net/hyperv/netvsc.c10
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/hyperv/rndis_filter.c10
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c11
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ifb.c4
-rw-r--r--drivers/net/ipa/Makefile9
-rw-r--r--drivers/net/ipa/gsi.c90
-rw-r--r--drivers/net/ipa/gsi.h2
-rw-r--r--drivers/net/ipa/gsi_reg.h3
-rw-r--r--drivers/net/ipa/ipa.h2
-rw-r--r--drivers/net/ipa/ipa_cmd.c40
-rw-r--r--drivers/net/ipa/ipa_data-v3.1.c533
-rw-r--r--drivers/net/ipa/ipa_data-v3.5.1.c45
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c66
-rw-r--r--drivers/net/ipa/ipa_data-v4.2.c54
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c69
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c70
-rw-r--r--drivers/net/ipa/ipa_data.h1
-rw-r--r--drivers/net/ipa/ipa_endpoint.c90
-rw-r--r--drivers/net/ipa/ipa_main.c55
-rw-r--r--drivers/net/ipa/ipa_mem.c263
-rw-r--r--drivers/net/ipa/ipa_mem.h26
-rw-r--r--drivers/net/ipa/ipa_qmi.c32
-rw-r--r--drivers/net/ipa/ipa_reg.h1
-rw-r--r--drivers/net/ipa/ipa_smp2p.c5
-rw-r--r--drivers/net/ipa/ipa_sysfs.c136
-rw-r--r--drivers/net/ipa/ipa_sysfs.h15
-rw-r--r--drivers/net/ipa/ipa_table.c94
-rw-r--r--drivers/net/ipa/ipa_uc.c3
-rw-r--r--drivers/net/ipa/ipa_version.h2
-rw-r--r--drivers/net/macsec.c4
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/Kconfig14
-rw-r--r--drivers/net/mdio/Makefile4
-rw-r--r--drivers/net/mdio/acpi_mdio.c58
-rw-r--r--drivers/net/mdio/fwnode_mdio.c144
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c70
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c6
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c9
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c2
-rw-r--r--drivers/net/mdio/mdio-octeon.c2
-rw-r--r--drivers/net/mdio/mdio-thunder.c1
-rw-r--r--drivers/net/mdio/of_mdio.c149
-rw-r--r--drivers/net/mhi/net.c135
-rw-r--r--drivers/net/mhi/proto_mbim.c5
-rw-r--r--drivers/net/mii.c2
-rw-r--r--drivers/net/netdevsim/bus.c129
-rw-r--r--drivers/net/netdevsim/dev.c404
-rw-r--r--drivers/net/netdevsim/netdev.c95
-rw-r--r--drivers/net/netdevsim/netdevsim.h49
-rw-r--r--drivers/net/pcs/Makefile4
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c185
-rw-r--r--drivers/net/pcs/pcs-xpcs.c659
-rw-r--r--drivers/net/pcs/pcs-xpcs.h115
-rw-r--r--drivers/net/phy/Kconfig16
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/adin.c2
-rw-r--r--drivers/net/phy/at803x.c192
-rw-r--r--drivers/net/phy/ax88796b.c74
-rw-r--r--drivers/net/phy/bcm87xx.c4
-rw-r--r--drivers/net/phy/davicom.c6
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/phy/et1011c.c15
-rw-r--r--drivers/net/phy/fixed_phy.c4
-rw-r--r--drivers/net/phy/lxt.c4
-rw-r--r--drivers/net/phy/marvell.c40
-rw-r--r--drivers/net/phy/mdio_bus.c7
-rw-r--r--drivers/net/phy/mdio_device.c4
-rw-r--r--drivers/net/phy/mediatek-ge.c112
-rw-r--r--drivers/net/phy/micrel.c410
-rw-r--r--drivers/net/phy/mii_timestamper.c3
-rw-r--r--drivers/net/phy/motorcomm.c137
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c2
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.h2
-rw-r--r--drivers/net/phy/national.c6
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c537
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/net/phy/phy-core.c3
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/phy_device.c132
-rw-r--r--drivers/net/phy/phylink.c60
-rw-r--r--drivers/net/phy/qsemi.c1
-rw-r--r--drivers/net/phy/realtek.c76
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/sfp-bus.c33
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c10
-rw-r--r--drivers/net/phy/ste10Xp.c6
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/ppp/bsd_comp.c2
-rw-r--r--drivers/net/slip/slhc.c2
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/Kconfig12
-rw-r--r--drivers/net/usb/asix.h13
-rw-r--r--drivers/net/usb/asix_common.c106
-rw-r--r--drivers/net/usb/asix_devices.c202
-rw-r--r--drivers/net/usb/ax88172a.c21
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c42
-rw-r--r--drivers/net/usb/hso.c52
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c1
-rw-r--r--drivers/net/usb/int51x1.c2
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/lg-vl600.c4
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/usb/r8152.c141
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/usbnet.c27
-rw-r--r--drivers/net/virtio_net.c49
-rw-r--r--drivers/net/vrf.c22
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/net/wan/c101.c46
-rw-r--r--drivers/net/wan/cosa.c493
-rw-r--r--drivers/net/wan/farsync.c487
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c3
-rw-r--r--drivers/net/wan/hd64570.c124
-rw-r--r--drivers/net/wan/hd64572.c95
-rw-r--r--drivers/net/wan/hdlc.c63
-rw-r--r--drivers/net/wan/hdlc_cisco.c49
-rw-r--r--drivers/net/wan/hdlc_fr.c101
-rw-r--r--drivers/net/wan/hdlc_ppp.c38
-rw-r--r--drivers/net/wan/hdlc_x25.c77
-rw-r--r--drivers/net/wan/hostess_sv11.c113
-rw-r--r--drivers/net/wan/ixp4xx_hss.c144
-rw-r--r--drivers/net/wan/lapbether.c65
-rw-r--r--drivers/net/wan/lmc/lmc.h2
-rw-r--r--drivers/net/wan/n2.c56
-rw-r--r--drivers/net/wan/pc300too.c52
-rw-r--r--drivers/net/wan/pci200syn.c51
-rw-r--r--drivers/net/wan/sealevel.c126
-rw-r--r--drivers/net/wan/wanxl.c190
-rw-r--r--drivers/net/wan/z85230.c995
-rw-r--r--drivers/net/wireguard/Makefile3
-rw-r--r--drivers/net/wireguard/allowedips.c189
-rw-r--r--drivers/net/wireguard/allowedips.h14
-rw-r--r--drivers/net/wireguard/main.c17
-rw-r--r--drivers/net/wireguard/peer.c27
-rw-r--r--drivers/net/wireguard/peer.h3
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c165
-rw-r--r--drivers/net/wireguard/socket.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c199
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c47
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c34
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c391
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c30
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c49
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h87
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig8
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h20
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c131
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c288
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h17
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h14
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c47
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c13
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c70
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c57
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c418
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c262
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c357
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c39
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c18
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.h5
-rw-r--r--drivers/net/wireless/intersil/orinoco/wext.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c60
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c149
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c11
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c86
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c157
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c232
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h72
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c179
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c384
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c103
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c673
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h80
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c103
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c253
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c175
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c200
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h11
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c59
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c45
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c114
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h55
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c196
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h57
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c81
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c113
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c1008
-rw-r--r--drivers/net/wireless/rndis_wlan.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c20
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h1
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c1
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c17
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c17
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c67
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c24
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c4
-rw-r--r--drivers/net/wwan/Kconfig53
-rw-r--r--drivers/net/wwan/Makefile6
-rw-r--r--drivers/net/wwan/iosm/Makefile23
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c88
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h59
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c1363
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h579
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c346
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h101
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_irq.c90
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_irq.h33
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c223
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h183
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c455
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h343
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c910
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h193
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c580
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.h209
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pm.c333
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pm.h207
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c85
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.h50
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.c283
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.h237
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c552
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h444
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_task_queue.c202
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_task_queue.h97
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.c44
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.h41
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c340
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.h55
-rw-r--r--drivers/net/wwan/rpmsg_wwan_ctrl.c166
-rw-r--r--drivers/net/wwan/wwan_core.c638
-rw-r--r--drivers/net/wwan/wwan_hwsim.c547
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/nfc/fdp/fdp.c42
-rw-r--r--drivers/nfc/fdp/fdp.h1
-rw-r--r--drivers/nfc/fdp/i2c.c14
-rw-r--r--drivers/nfc/mei_phy.c8
-rw-r--r--drivers/nfc/microread/microread.c1
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c25
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.h17
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c24
-rw-r--r--drivers/nfc/nfcmrvl/main.c13
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h29
-rw-r--r--drivers/nfc/nfcmrvl/spi.c19
-rw-r--r--drivers/nfc/nfcmrvl/uart.c49
-rw-r--r--drivers/nfc/nfcmrvl/usb.c31
-rw-r--r--drivers/nfc/nxp-nci/core.c39
-rw-r--r--drivers/nfc/nxp-nci/firmware.c7
-rw-r--r--drivers/nfc/pn533/i2c.c10
-rw-r--r--drivers/nfc/pn533/pn533.c46
-rw-r--r--drivers/nfc/pn533/uart.c2
-rw-r--r--drivers/nfc/pn533/usb.c4
-rw-r--r--drivers/nfc/pn544/i2c.c11
-rw-r--r--drivers/nfc/port100.c4
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c32
-rw-r--r--drivers/nfc/st-nci/i2c.c9
-rw-r--r--drivers/nfc/st-nci/se.c14
-rw-r--r--drivers/nfc/st-nci/spi.c9
-rw-r--r--drivers/nfc/st-nci/vendor_cmds.c15
-rw-r--r--drivers/nfc/st21nfca/dep.c59
-rw-r--r--drivers/nfc/st21nfca/i2c.c9
-rw-r--r--drivers/nfc/st95hf/core.c13
-rw-r--r--drivers/nvdimm/blk.c27
-rw-r--r--drivers/nvdimm/btt.c25
-rw-r--r--drivers/nvdimm/btt.h2
-rw-r--r--drivers/nvdimm/pmem.c21
-rw-r--r--drivers/nvme/host/Kconfig5
-rw-r--r--drivers/nvme/host/core.c197
-rw-r--r--drivers/nvme/host/fabrics.c61
-rw-r--r--drivers/nvme/host/fabrics.h6
-rw-r--r--drivers/nvme/host/fc.c27
-rw-r--r--drivers/nvme/host/ioctl.c61
-rw-r--r--drivers/nvme/host/multipath.c79
-rw-r--r--drivers/nvme/host/nvme.h17
-rw-r--r--drivers/nvme/host/pci.c82
-rw-r--r--drivers/nvme/host/rdma.c7
-rw-r--r--drivers/nvme/host/tcp.c31
-rw-r--r--drivers/nvme/host/zns.c27
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c155
-rw-r--r--drivers/nvme/target/configfs.c102
-rw-r--r--drivers/nvme/target/core.c148
-rw-r--r--drivers/nvme/target/discovery.c8
-rw-r--r--drivers/nvme/target/fc.c10
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c36
-rw-r--r--drivers/nvme/target/io-cmd-file.c4
-rw-r--r--drivers/nvme/target/loop.c11
-rw-r--r--drivers/nvme/target/nvmet.h43
-rw-r--r--drivers/nvme/target/passthru.c3
-rw-r--r--drivers/nvme/target/rdma.c3
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/nvme/target/zns.c615
-rw-r--r--drivers/opp/core.c10
-rw-r--r--drivers/opp/of.c27
-rw-r--r--drivers/pci/controller/dwc/Makefile3
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c108
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c138
-rw-r--r--drivers/pci/controller/pci-aardvark.c49
-rw-r--r--drivers/pci/controller/pci-hyperv.c3
-rw-r--r--drivers/pci/of.c9
-rw-r--r--drivers/pci/pci.c16
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/quirks.c93
-rw-r--r--drivers/perf/arm-cci.c4
-rw-r--r--drivers/perf/arm-ccn.c6
-rw-r--r--drivers/perf/arm-cmn.c13
-rw-r--r--drivers/perf/arm_dmc620_pmu.c5
-rw-r--r--drivers/perf/arm_dsu_pmu.c8
-rw-r--r--drivers/perf/arm_pmu.c16
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c35
-rw-r--r--drivers/perf/arm_spe_pmu.c12
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c18
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c9
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c7
-rw-r--r--drivers/perf/qcom_l2_pmu.c11
-rw-r--r--drivers/perf/qcom_l3_pmu.c15
-rw-r--r--drivers/perf/thunderx2_pmu.c4
-rw-r--r--drivers/perf/xgene_pmu.c17
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h4
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c2
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c4
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c4
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c4
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c3
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.c3
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c4
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c18
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt2880.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c9
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c4
-rw-r--r--drivers/platform/surface/aggregator/Kconfig2
-rw-r--r--drivers/platform/surface/aggregator/Makefile2
-rw-r--r--drivers/platform/surface/aggregator/bus.c2
-rw-r--r--drivers/platform/surface/aggregator/bus.h2
-rw-r--r--drivers/platform/surface/aggregator/controller.c334
-rw-r--r--drivers/platform/surface/aggregator/controller.h2
-rw-r--r--drivers/platform/surface/aggregator/core.c8
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c12
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c12
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.h2
-rw-r--r--drivers/platform/surface/aggregator/trace.h2
-rw-r--r--drivers/platform/surface/surface3_power.c22
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c7
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c534
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c54
-rw-r--r--drivers/platform/surface/surface_dtx.c1
-rw-r--r--drivers/platform/x86/Kconfig49
-rw-r--r--drivers/platform/x86/Makefile10
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c77
-rw-r--r--drivers/platform/x86/dell/Kconfig11
-rw-r--r--drivers/platform/x86/dell/Makefile2
-rw-r--r--drivers/platform/x86/dell/dcdbas.c3
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c13
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c (renamed from drivers/platform/x86/dell/dell-wmi.c)14
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.c391
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.h36
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c39
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c16
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c6
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c4
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c16
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c21
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c52
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h11
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp-wireless.c102
-rw-r--r--drivers/platform/x86/ideapad-laptop.c12
-rw-r--r--drivers/platform/x86/intel/Kconfig22
-rw-r--r--drivers/platform/x86/intel/Makefile8
-rw-r--r--drivers/platform/x86/intel/int33fe/Kconfig24
-rw-r--r--drivers/platform/x86/intel/int33fe/Makefile5
-rw-r--r--drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.c (renamed from drivers/platform/x86/intel_cht_int33fe_common.c)0
-rw-r--r--drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.h (renamed from drivers/platform/x86/intel_cht_int33fe_common.h)0
-rw-r--r--drivers/platform/x86/intel/int33fe/intel_cht_int33fe_microb.c (renamed from drivers/platform/x86/intel_cht_int33fe_microb.c)0
-rw-r--r--drivers/platform/x86/intel/int33fe/intel_cht_int33fe_typec.c (renamed from drivers/platform/x86/intel_cht_int33fe_typec.c)4
-rw-r--r--drivers/platform/x86/intel/int3472/Kconfig30
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile5
-rw-r--r--drivers/platform/x86/intel/int3472/intel_skl_int3472_clk_and_regulator.c207
-rw-r--r--drivers/platform/x86/intel/int3472/intel_skl_int3472_common.c106
-rw-r--r--drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h122
-rw-r--r--drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c413
-rw-r--r--drivers/platform/x86/intel/int3472/intel_skl_int3472_tps68470.c137
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/intel_pmt_crashlog.c2
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_common.c73
-rw-r--r--drivers/platform/x86/samsung-laptop.c35
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/think-lmi.c904
-rw-r--r--drivers/platform/x86/think-lmi.h72
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c3
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/platform/x86/toshiba_haps.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c87
-rw-r--r--drivers/platform/x86/uv_sysfs.c4
-rw-r--r--drivers/platform/x86/wireless-hotkey.c103
-rw-r--r--drivers/pnp/base.h1
-rw-r--r--drivers/pnp/card.c14
-rw-r--r--drivers/pnp/core.c17
-rw-r--r--drivers/pnp/interface.c4
-rw-r--r--drivers/pnp/isapnp/proc.c13
-rw-r--r--drivers/pnp/pnpbios/core.c4
-rw-r--r--drivers/pnp/resource.c2
-rw-r--r--drivers/ptp/ptp_clock.c24
-rw-r--r--drivers/ptp/ptp_ocp.c4
-rw-r--r--drivers/regulator/Kconfig57
-rw-r--r--drivers/regulator/Makefile8
-rw-r--r--drivers/regulator/atc260x-regulator.c19
-rw-r--r--drivers/regulator/bd70528-regulator.c283
-rw-r--r--drivers/regulator/bd71815-regulator.c60
-rw-r--r--drivers/regulator/bd718x7-regulator.c2
-rw-r--r--drivers/regulator/bd9576-regulator.c1084
-rw-r--r--drivers/regulator/core.c322
-rw-r--r--drivers/regulator/cros-ec-regulator.c3
-rw-r--r--drivers/regulator/da9052-regulator.c3
-rw-r--r--drivers/regulator/da9121-regulator.c10
-rw-r--r--drivers/regulator/devres.c52
-rw-r--r--drivers/regulator/fan53555.c119
-rw-r--r--drivers/regulator/fan53880.c10
-rw-r--r--drivers/regulator/fixed.c10
-rw-r--r--drivers/regulator/helpers.c2
-rw-r--r--drivers/regulator/hi6421-regulator.c8
-rw-r--r--drivers/regulator/hi6421v600-regulator.c63
-rw-r--r--drivers/regulator/hi655x-regulator.c18
-rw-r--r--drivers/regulator/internal.h11
-rw-r--r--drivers/regulator/irq_helpers.c397
-rw-r--r--drivers/regulator/lp8755.c55
-rw-r--r--drivers/regulator/ltc3589.c73
-rw-r--r--drivers/regulator/max77620-regulator.c17
-rw-r--r--drivers/regulator/max77686-regulator.c42
-rw-r--r--drivers/regulator/max77802-regulator.c70
-rw-r--r--drivers/regulator/max8893.c183
-rw-r--r--drivers/regulator/max8973-regulator.c37
-rw-r--r--drivers/regulator/mcp16502.c79
-rw-r--r--drivers/regulator/mp5416.c44
-rw-r--r--drivers/regulator/mp886x.c32
-rw-r--r--drivers/regulator/mt6315-regulator.c23
-rw-r--r--drivers/regulator/mt6358-regulator.c24
-rw-r--r--drivers/regulator/mt6359-regulator.c997
-rw-r--r--drivers/regulator/of_regulator.c58
-rw-r--r--drivers/regulator/pca9450-regulator.c51
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c10
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c62
-rw-r--r--drivers/regulator/qcom_smd-regulator.c85
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c6
-rw-r--r--drivers/regulator/qcom_usb_vbus-regulator.c12
-rw-r--r--drivers/regulator/rk808-regulator.c116
-rw-r--r--drivers/regulator/rt4801-regulator.c4
-rw-r--r--drivers/regulator/rt4831-regulator.c3
-rw-r--r--drivers/regulator/rt6160-regulator.c319
-rw-r--r--drivers/regulator/rt6245-regulator.c254
-rw-r--r--drivers/regulator/rtmv20-regulator.c44
-rw-r--r--drivers/regulator/scmi-regulator.c2
-rw-r--r--drivers/regulator/stpmic1_regulator.c20
-rw-r--r--drivers/regulator/sy7636a-regulator.c128
-rw-r--r--drivers/regulator/uniphier-regulator.c1
-rw-r--r--drivers/regulator/userspace-consumer.c14
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/s390/block/dasd_diag.c8
-rw-r--r--drivers/s390/block/dasd_fba.c8
-rw-r--r--drivers/s390/block/dasd_genhd.c12
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dcssblk.c26
-rw-r--r--drivers/s390/block/scm_blk.c21
-rw-r--r--drivers/s390/block/xpram.c26
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c12
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c2
-rw-r--r--drivers/s390/crypto/ap_queue.c11
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c10
-rw-r--r--drivers/s390/net/netiucv.c28
-rw-r--r--drivers/s390/net/qeth_core.h42
-rw-r--r--drivers/s390/net/qeth_core_main.c349
-rw-r--r--drivers/s390/net/qeth_ethtool.c7
-rw-r--r--drivers/s390/net/qeth_l2_main.c12
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h2
-rw-r--r--drivers/scsi/aic7xxx/scsi_message.h11
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c8
-rw-r--r--drivers/scsi/hosts.c47
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c1
-rw-r--r--drivers/scsi/libsas/sas_port.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/storvsc_drv.c104
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c15
-rw-r--r--drivers/scsi/vmw_pvscsi.c8
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c4
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c7
-rw-r--r--drivers/soundwire/qcom.c12
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/spi/spi-altera-dfl.c4
-rw-r--r--drivers/spi/spi-ath79.c9
-rw-r--r--drivers/spi/spi-atmel.c139
-rw-r--r--drivers/spi/spi-bcm2835.c204
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-bitbang.c18
-rw-r--r--drivers/spi/spi-dw-mmio.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c4
-rw-r--r--drivers/spi/spi-fsl-spi.c4
-rw-r--r--drivers/spi/spi-geni-qcom.c4
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c51
-rw-r--r--drivers/spi/spi-lm70llp.c2
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-mem.c88
-rw-r--r--drivers/spi/spi-meson-spicc.c8
-rw-r--r--drivers/spi/spi-mpc512x-psc.c4
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c4
-rw-r--r--drivers/spi/spi-mpc52xx.c2
-rw-r--r--drivers/spi/spi-npcm-pspi.c2
-rw-r--r--drivers/spi/spi-nxp-fspi.c11
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-omap-100k.c6
-rw-r--r--drivers/spi/spi-omap-uwire.c13
-rw-r--r--drivers/spi/spi-omap2-mcspi.c37
-rw-r--r--drivers/spi/spi-pl022.c4
-rw-r--r--drivers/spi/spi-ppc4xx.c10
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c41
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c11
-rw-r--r--drivers/spi/spi-pxa2xx.c392
-rw-r--r--drivers/spi/spi-pxa2xx.h68
-rw-r--r--drivers/spi/spi-rockchip.c55
-rw-r--r--drivers/spi/spi-rspi.c6
-rw-r--r--drivers/spi/spi-sc18is602.c9
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/spi/spi-sprd.c1
-rw-r--r--drivers/spi/spi-stm32-qspi.c93
-rw-r--r--drivers/spi/spi-sun6i.c6
-rw-r--r--drivers/spi/spi-tegra114.c3
-rw-r--r--drivers/spi/spi-tegra20-slink.c5
-rw-r--r--drivers/spi/spi-tegra210-quad.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-uniphier.c2
-rw-r--r--drivers/spi/spi-zynq-qspi.c16
-rw-r--r--drivers/spi/spi.c312
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/ssb/driver_gpio.c6
-rw-r--r--drivers/ssb/driver_pcicore.c18
-rw-r--r--drivers/ssb/main.c36
-rw-r--r--drivers/ssb/pci.c16
-rw-r--r--drivers/ssb/pcmcia.c16
-rw-r--r--drivers/ssb/scan.c1
-rw-r--r--drivers/ssb/sdio.c1
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c4
-rw-r--r--drivers/staging/iio/cdc/ad7746.c1
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/Makefile1
-rw-r--r--drivers/staging/media/atomisp/TODO5
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c57
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c37
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c6
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c120
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c36
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c28
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h6
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h10
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.c12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h161
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c1202
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_file.c14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c68
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.c12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c6
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c2089
-rw-r--r--drivers/staging/media/av7110/Kconfig94
-rw-r--r--drivers/staging/media/av7110/Makefile22
-rw-r--r--drivers/staging/media/av7110/TODO3
-rw-r--r--drivers/staging/media/av7110/audio-bilingual-channel-select.rst (renamed from Documentation/userspace-api/media/dvb/audio-bilingual-channel-select.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-channel-select.rst (renamed from Documentation/userspace-api/media/dvb/audio-channel-select.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-clear-buffer.rst (renamed from Documentation/userspace-api/media/dvb/audio-clear-buffer.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-continue.rst (renamed from Documentation/userspace-api/media/dvb/audio-continue.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-fclose.rst (renamed from Documentation/userspace-api/media/dvb/audio-fclose.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-fopen.rst (renamed from Documentation/userspace-api/media/dvb/audio-fopen.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-fwrite.rst (renamed from Documentation/userspace-api/media/dvb/audio-fwrite.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-get-capabilities.rst (renamed from Documentation/userspace-api/media/dvb/audio-get-capabilities.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-get-status.rst (renamed from Documentation/userspace-api/media/dvb/audio-get-status.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-pause.rst (renamed from Documentation/userspace-api/media/dvb/audio-pause.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-play.rst (renamed from Documentation/userspace-api/media/dvb/audio-play.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-select-source.rst (renamed from Documentation/userspace-api/media/dvb/audio-select-source.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-av-sync.rst (renamed from Documentation/userspace-api/media/dvb/audio-set-av-sync.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-bypass-mode.rst (renamed from Documentation/userspace-api/media/dvb/audio-set-bypass-mode.rst)2
-rw-r--r--drivers/staging/media/av7110/audio-set-id.rst (renamed from Documentation/userspace-api/media/dvb/audio-set-id.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-mixer.rst (renamed from Documentation/userspace-api/media/dvb/audio-set-mixer.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-mute.rst (renamed from Documentation/userspace-api/media/dvb/audio-set-mute.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-set-streamtype.rst (renamed from Documentation/userspace-api/media/dvb/audio-set-streamtype.rst)0
-rw-r--r--drivers/staging/media/av7110/audio-stop.rst (renamed from Documentation/userspace-api/media/dvb/audio-stop.rst)0
-rw-r--r--drivers/staging/media/av7110/audio.h (renamed from include/uapi/linux/dvb/audio.h)0
-rw-r--r--drivers/staging/media/av7110/audio.rst (renamed from Documentation/userspace-api/media/dvb/audio.rst)2
-rw-r--r--drivers/staging/media/av7110/audio_data_types.rst (renamed from Documentation/userspace-api/media/dvb/audio_data_types.rst)0
-rw-r--r--drivers/staging/media/av7110/audio_function_calls.rst (renamed from Documentation/userspace-api/media/dvb/audio_function_calls.rst)0
-rw-r--r--drivers/staging/media/av7110/av7110.c (renamed from drivers/media/pci/ttpci/av7110.c)0
-rw-r--r--drivers/staging/media/av7110/av7110.h (renamed from drivers/media/pci/ttpci/av7110.h)7
-rw-r--r--drivers/staging/media/av7110/av7110_av.c (renamed from drivers/media/pci/ttpci/av7110_av.c)0
-rw-r--r--drivers/staging/media/av7110/av7110_av.h (renamed from drivers/media/pci/ttpci/av7110_av.h)0
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c (renamed from drivers/media/pci/ttpci/av7110_ca.c)0
-rw-r--r--drivers/staging/media/av7110/av7110_ca.h (renamed from drivers/media/pci/ttpci/av7110_ca.h)0
-rw-r--r--drivers/staging/media/av7110/av7110_hw.c (renamed from drivers/media/pci/ttpci/av7110_hw.c)0
-rw-r--r--drivers/staging/media/av7110/av7110_hw.h (renamed from drivers/media/pci/ttpci/av7110_hw.h)0
-rw-r--r--drivers/staging/media/av7110/av7110_ipack.c (renamed from drivers/media/pci/ttpci/av7110_ipack.c)0
-rw-r--r--drivers/staging/media/av7110/av7110_ipack.h (renamed from drivers/media/pci/ttpci/av7110_ipack.h)0
-rw-r--r--drivers/staging/media/av7110/av7110_ir.c (renamed from drivers/media/pci/ttpci/av7110_ir.c)0
-rw-r--r--drivers/staging/media/av7110/av7110_v4l.c (renamed from drivers/media/pci/ttpci/av7110_v4l.c)0
-rw-r--r--drivers/staging/media/av7110/budget-patch.c (renamed from drivers/media/pci/ttpci/budget-patch.c)0
-rw-r--r--drivers/staging/media/av7110/dvb_filter.c (renamed from drivers/media/pci/ttpci/dvb_filter.c)0
-rw-r--r--drivers/staging/media/av7110/dvb_filter.h (renamed from drivers/media/pci/ttpci/dvb_filter.h)0
-rw-r--r--drivers/staging/media/av7110/osd.h (renamed from include/uapi/linux/dvb/osd.h)0
-rw-r--r--drivers/staging/media/av7110/sp8870.c (renamed from drivers/media/dvb-frontends/sp8870.c)0
-rw-r--r--drivers/staging/media/av7110/sp8870.h (renamed from drivers/media/dvb-frontends/sp8870.h)0
-rw-r--r--drivers/staging/media/av7110/video-clear-buffer.rst (renamed from Documentation/userspace-api/media/dvb/video-clear-buffer.rst)0
-rw-r--r--drivers/staging/media/av7110/video-command.rst (renamed from Documentation/userspace-api/media/dvb/video-command.rst)0
-rw-r--r--drivers/staging/media/av7110/video-continue.rst (renamed from Documentation/userspace-api/media/dvb/video-continue.rst)0
-rw-r--r--drivers/staging/media/av7110/video-fast-forward.rst (renamed from Documentation/userspace-api/media/dvb/video-fast-forward.rst)0
-rw-r--r--drivers/staging/media/av7110/video-fclose.rst (renamed from Documentation/userspace-api/media/dvb/video-fclose.rst)0
-rw-r--r--drivers/staging/media/av7110/video-fopen.rst (renamed from Documentation/userspace-api/media/dvb/video-fopen.rst)0
-rw-r--r--drivers/staging/media/av7110/video-freeze.rst (renamed from Documentation/userspace-api/media/dvb/video-freeze.rst)0
-rw-r--r--drivers/staging/media/av7110/video-fwrite.rst (renamed from Documentation/userspace-api/media/dvb/video-fwrite.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-capabilities.rst (renamed from Documentation/userspace-api/media/dvb/video-get-capabilities.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-event.rst (renamed from Documentation/userspace-api/media/dvb/video-get-event.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-frame-count.rst (renamed from Documentation/userspace-api/media/dvb/video-get-frame-count.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-pts.rst (renamed from Documentation/userspace-api/media/dvb/video-get-pts.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-size.rst (renamed from Documentation/userspace-api/media/dvb/video-get-size.rst)0
-rw-r--r--drivers/staging/media/av7110/video-get-status.rst (renamed from Documentation/userspace-api/media/dvb/video-get-status.rst)0
-rw-r--r--drivers/staging/media/av7110/video-play.rst (renamed from Documentation/userspace-api/media/dvb/video-play.rst)0
-rw-r--r--drivers/staging/media/av7110/video-select-source.rst (renamed from Documentation/userspace-api/media/dvb/video-select-source.rst)0
-rw-r--r--drivers/staging/media/av7110/video-set-blank.rst (renamed from Documentation/userspace-api/media/dvb/video-set-blank.rst)0
-rw-r--r--drivers/staging/media/av7110/video-set-display-format.rst (renamed from Documentation/userspace-api/media/dvb/video-set-display-format.rst)0
-rw-r--r--drivers/staging/media/av7110/video-set-format.rst (renamed from Documentation/userspace-api/media/dvb/video-set-format.rst)0
-rw-r--r--drivers/staging/media/av7110/video-set-streamtype.rst (renamed from Documentation/userspace-api/media/dvb/video-set-streamtype.rst)0
-rw-r--r--drivers/staging/media/av7110/video-slowmotion.rst (renamed from Documentation/userspace-api/media/dvb/video-slowmotion.rst)0
-rw-r--r--drivers/staging/media/av7110/video-stillpicture.rst (renamed from Documentation/userspace-api/media/dvb/video-stillpicture.rst)0
-rw-r--r--drivers/staging/media/av7110/video-stop.rst (renamed from Documentation/userspace-api/media/dvb/video-stop.rst)0
-rw-r--r--drivers/staging/media/av7110/video-try-command.rst (renamed from Documentation/userspace-api/media/dvb/video-try-command.rst)0
-rw-r--r--drivers/staging/media/av7110/video.h (renamed from include/uapi/linux/dvb/video.h)0
-rw-r--r--drivers/staging/media/av7110/video.rst (renamed from Documentation/userspace-api/media/dvb/video.rst)2
-rw-r--r--drivers/staging/media/av7110/video_function_calls.rst (renamed from Documentation/userspace-api/media/dvb/video_function_calls.rst)0
-rw-r--r--drivers/staging/media/av7110/video_types.rst (renamed from Documentation/userspace-api/media/dvb/video_types.rst)0
-rw-r--r--drivers/staging/media/hantro/Kconfig10
-rw-r--r--drivers/staging/media/hantro/Makefile15
-rw-r--r--drivers/staging/media/hantro/hantro.h13
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c185
-rw-r--r--drivers/staging/media/hantro/hantro_g1.c39
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c10
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c119
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c6
-rw-r--r--drivers/staging/media/hantro/hantro_g2_hevc_dec.c586
-rw-r--r--drivers/staging/media/hantro/hantro_g2_regs.h198
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/hantro_hevc.c333
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h101
-rw-r--r--drivers/staging/media/hantro/hantro_mpeg2.c2
-rw-r--r--drivers/staging/media/hantro/hantro_postproc.c14
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c14
-rw-r--r--drivers/staging/media/hantro/imx8m_vpu_hw.c79
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c236
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw.c222
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c (renamed from drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c)32
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu2_hw_mpeg2_dec.c (renamed from drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c)123
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c (renamed from drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c)6
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu2_regs.h (renamed from drivers/staging/media/hantro/rk3399_vpu_regs.h)6
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu_hw.c526
-rw-r--r--drivers/staging/media/hantro/sama5d4_vdec_hw.c117
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c19
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c31
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c96
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c4
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c24
-rw-r--r--drivers/staging/media/imx/imx-media.h2
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c12
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c33
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c1020
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h (renamed from drivers/staging/media/ipu3/include/intel-ipu3.h)13
-rw-r--r--drivers/staging/media/ipu3/ipu3-abi.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.h1
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c26
-rw-r--r--drivers/staging/media/ipu3/ipu3.c3
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.c2
-rw-r--r--drivers/staging/media/omap4iss/iss.h3
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c37
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c37
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c47
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c39
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c4
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c12
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c16
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h6
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c12
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c16
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c97
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c14
-rw-r--r--drivers/staging/media/tegra-vde/vde.c22
-rw-r--r--drivers/staging/media/tegra-video/csi.c13
-rw-r--r--drivers/staging/media/tegra-video/vi.c31
-rw-r--r--drivers/staging/media/zoran/zoran.h1
-rw-r--r--drivers/staging/media/zoran/zoran_card.c7
-rw-r--r--drivers/staging/media/zoran/zoran_device.c65
-rw-r--r--drivers/staging/media/zoran/zoran_device.h2
-rw-r--r--drivers/staging/media/zoran/zoran_driver.c6
-rw-r--r--drivers/staging/media/zoran/zr36016.c3
-rw-r--r--drivers/staging/media/zoran/zr36050.c5
-rw-r--r--drivers/staging/media/zoran/zr36057.h14
-rw-r--r--drivers/staging/media/zoran/zr36060.c3
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi4
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c2
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c6
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_transport.c6
-rw-r--r--drivers/target/target_core_user.c12
-rw-r--r--drivers/tee/optee/call.c6
-rw-r--r--drivers/tee/optee/optee_msg.h6
-rw-r--r--drivers/tee/tee_core.c1
-rw-r--r--drivers/thermal/cpufreq_cooling.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c4
-rw-r--r--drivers/thermal/intel/therm_throt.c15
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c2
-rw-r--r--drivers/thermal/thermal_core.c63
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thunderbolt/dma_port.c11
-rw-r--r--drivers/thunderbolt/usb4.c9
-rw-r--r--drivers/tty/serial/8250/8250.h32
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c1
-rw-r--r--drivers/tty/serial/8250/8250_dw.c1
-rw-r--r--drivers/tty/serial/8250/8250_exar.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c47
-rw-r--r--drivers/tty/serial/8250/8250_port.c12
-rw-r--r--drivers/tty/serial/rp2.c52
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/sh-sci.c4
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c14
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c7
-rw-r--r--drivers/usb/chipidea/udc.c1
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c16
-rw-r--r--drivers/usb/class/cdc-wdm.c181
-rw-r--r--drivers/usb/core/devio.c11
-rw-r--r--drivers/usb/core/hub.c7
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/debug.h3
-rw-r--r--drivers/usb/dwc3/debugfs.c21
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c13
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c31
-rw-r--r--drivers/usb/gadget/config.c8
-rw-r--r--drivers/usb/gadget/function/f_ecm.c2
-rw-r--r--drivers/usb/gadget/function/f_eem.c6
-rw-r--r--drivers/usb/gadget/function/f_fs.c3
-rw-r--r--drivers/usb/gadget/function/f_hid.c3
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_ncm.c10
-rw-r--r--drivers/usb/gadget/function/f_printer.c3
-rw-r--r--drivers/usb/gadget/function/f_rndis.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c3
-rw-r--r--drivers/usb/gadget/function/f_subset.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c3
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c15
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/max3421-hcd.c3
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-ring.c14
-rw-r--r--drivers/usb/host/xhci-tegra.c6
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/misc/brcmstb-usb-pinmap.c2
-rw-r--r--drivers/usb/misc/trancevibrator.c4
-rw-r--r--drivers/usb/misc/uss720.c1
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/serial/cp210x.c84
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/omninet.c8
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/quatech2.c6
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/typec/mux.c9
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c15
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c150
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c3
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c19
-rw-r--r--drivers/vfio/pci/Kconfig1
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c4
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/vsock.c58
-rw-r--r--drivers/video/fbdev/core/fb_defio.c35
-rw-r--r--drivers/video/fbdev/core/fbmem.c4
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/virtio/virtio_balloon.c17
-rw-r--r--drivers/watchdog/machzwd.c1
-rw-r--r--drivers/watchdog/octeon-wdt-main.c1
-rw-r--r--drivers/xen/events/events_base.c11
-rw-r--r--fs/adfs/inode.c1
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/afs/cmservice.c5
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/main.c4
-rw-r--r--fs/afs/vlclient.c1
-rw-r--r--fs/afs/write.c26
-rw-r--r--fs/bfs/file.c1
-rw-r--r--fs/binfmt_aout.c4
-rw-r--r--fs/binfmt_elf.c10
-rw-r--r--fs/binfmt_elf_fdpic.c15
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/block_dev.c245
-rw-r--r--fs/btrfs/Kconfig2
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/block-group.c39
-rw-r--r--fs/btrfs/compression.c74
-rw-r--r--fs/btrfs/compression.h26
-rw-r--r--fs/btrfs/ctree.c5
-rw-r--r--fs/btrfs/ctree.h120
-rw-r--r--fs/btrfs/delalloc-space.c2
-rw-r--r--fs/btrfs/delayed-inode.c41
-rw-r--r--fs/btrfs/delayed-ref.c26
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/discard.c2
-rw-r--r--fs/btrfs/disk-io.c81
-rw-r--r--fs/btrfs/extent-tree.c20
-rw-r--r--fs/btrfs/extent_io.c973
-rw-r--r--fs/btrfs/extent_io.h29
-rw-r--r--fs/btrfs/file-item.c110
-rw-r--r--fs/btrfs/file.c48
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c536
-rw-r--r--fs/btrfs/ioctl.c184
-rw-r--r--fs/btrfs/locking.c4
-rw-r--r--fs/btrfs/ordered-data.c253
-rw-r--r--fs/btrfs/ordered-data.h10
-rw-r--r--fs/btrfs/props.c16
-rw-r--r--fs/btrfs/qgroup.c10
-rw-r--r--fs/btrfs/reflink.c52
-rw-r--r--fs/btrfs/relocation.c75
-rw-r--r--fs/btrfs/scrub.c159
-rw-r--r--fs/btrfs/send.c47
-rw-r--r--fs/btrfs/space-info.c233
-rw-r--r--fs/btrfs/space-info.h30
-rw-r--r--fs/btrfs/subpage.c155
-rw-r--r--fs/btrfs/subpage.h33
-rw-r--r--fs/btrfs/super.c16
-rw-r--r--fs/btrfs/sysfs.c74
-rw-r--r--fs/btrfs/tests/extent-map-tests.c2
-rw-r--r--fs/btrfs/transaction.c61
-rw-r--r--fs/btrfs/transaction.h6
-rw-r--r--fs/btrfs/tree-log.c59
-rw-r--r--fs/btrfs/volumes.c26
-rw-r--r--fs/btrfs/volumes.h5
-rw-r--r--fs/btrfs/zoned.c66
-rw-r--r--fs/btrfs/zoned.h9
-rw-r--r--fs/buffer.c25
-rw-r--r--fs/ceph/dir.c22
-rw-r--r--fs/ceph/file.c17
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/cifs/Kconfig6
-rw-r--r--fs/cifs/Makefile8
-rw-r--r--fs/cifs/asn1.c623
-rw-r--r--fs/cifs/cache.c14
-rw-r--r--fs/cifs/cifs_debug.c24
-rw-r--r--fs/cifs/cifs_debug.h2
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifs_fs_sb.h18
-rw-r--r--fs/cifs/cifs_ioctl.h36
-rw-r--r--fs/cifs/cifs_spnego.c14
-rw-r--r--fs/cifs/cifs_spnego.h14
-rw-r--r--fs/cifs/cifs_spnego_negtokeninit.asn140
-rw-r--r--fs/cifs/cifs_swn.c10
-rw-r--r--fs/cifs/cifsacl.c18
-rw-r--r--fs/cifs/cifsacl.h15
-rw-r--r--fs/cifs/cifsencrypt.c14
-rw-r--r--fs/cifs/cifsfs.c14
-rw-r--r--fs/cifs/cifsfs.h14
-rw-r--r--fs/cifs/cifsglob.h20
-rw-r--r--fs/cifs/cifspdu.h17
-rw-r--r--fs/cifs/cifsproto.h14
-rw-r--r--fs/cifs/cifssmb.c16
-rw-r--r--fs/cifs/connect.c159
-rw-r--r--fs/cifs/dfs_cache.c1079
-rw-r--r--fs/cifs/dfs_cache.h45
-rw-r--r--fs/cifs/dir.c27
-rw-r--r--fs/cifs/dns_resolve.c14
-rw-r--r--fs/cifs/dns_resolve.h14
-rw-r--r--fs/cifs/export.c14
-rw-r--r--fs/cifs/file.c14
-rw-r--r--fs/cifs/fscache.c14
-rw-r--r--fs/cifs/fscache.h14
-rw-r--r--fs/cifs/inode.c19
-rw-r--r--fs/cifs/ioctl.c157
-rw-r--r--fs/cifs/link.c14
-rw-r--r--fs/cifs/misc.c14
-rw-r--r--fs/cifs/netlink.c2
-rw-r--r--fs/cifs/ntlmssp.h14
-rw-r--r--fs/cifs/readdir.c16
-rw-r--r--fs/cifs/rfc1002pdu.h14
-rw-r--r--fs/cifs/sess.c16
-rw-r--r--fs/cifs/smb2file.c14
-rw-r--r--fs/cifs/smb2glob.h11
-rw-r--r--fs/cifs/smb2inode.c14
-rw-r--r--fs/cifs/smb2maperror.c14
-rw-r--r--fs/cifs/smb2misc.c52
-rw-r--r--fs/cifs/smb2ops.c140
-rw-r--r--fs/cifs/smb2pdu.c56
-rw-r--r--fs/cifs/smb2pdu.h53
-rw-r--r--fs/cifs/smb2proto.h16
-rw-r--r--fs/cifs/smb2status.h14
-rw-r--r--fs/cifs/smb2transport.c26
-rw-r--r--fs/cifs/smbdirect.c14
-rw-r--r--fs/cifs/smberr.h14
-rw-r--r--fs/cifs/smbfsctl.h14
-rw-r--r--fs/cifs/trace.h29
-rw-r--r--fs/cifs/transport.c14
-rw-r--r--fs/cifs/xattr.c14
-rw-r--r--fs/configfs/inode.c8
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/crypto/fname.c10
-rw-r--r--fs/crypto/keysetup.c40
-rw-r--r--fs/dax.c3
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/debugfs/inode.c9
-rw-r--r--fs/dlm/config.c18
-rw-r--r--fs/dlm/config.h5
-rw-r--r--fs/dlm/debug_fs.c54
-rw-r--r--fs/dlm/dlm_internal.h42
-rw-r--r--fs/dlm/lock.c16
-rw-r--r--fs/dlm/lockspace.c14
-rw-r--r--fs/dlm/lowcomms.c411
-rw-r--r--fs/dlm/lowcomms.h25
-rw-r--r--fs/dlm/member.c37
-rw-r--r--fs/dlm/midcomms.c1343
-rw-r--r--fs/dlm/midcomms.h15
-rw-r--r--fs/dlm/rcom.c123
-rw-r--r--fs/dlm/util.c10
-rw-r--r--fs/dlm/util.h2
-rw-r--r--fs/ecryptfs/mmap.c13
-rw-r--r--fs/erofs/Kconfig1
-rw-r--r--fs/erofs/compress.h2
-rw-r--r--fs/erofs/data.c2
-rw-r--r--fs/erofs/decompressor.c2
-rw-r--r--fs/erofs/dir.c2
-rw-r--r--fs/erofs/erofs_fs.h2
-rw-r--r--fs/erofs/inode.c2
-rw-r--r--fs/erofs/internal.h2
-rw-r--r--fs/erofs/namei.c2
-rw-r--r--fs/erofs/super.c3
-rw-r--r--fs/erofs/tagptr.h3
-rw-r--r--fs/erofs/utils.c2
-rw-r--r--fs/erofs/xattr.c2
-rw-r--r--fs/erofs/xattr.h1
-rw-r--r--fs/erofs/zdata.c6
-rw-r--r--fs/erofs/zdata.h1
-rw-r--r--fs/erofs/zmap.c2
-rw-r--r--fs/erofs/zpvec.h7
-rw-r--r--fs/exec.c6
-rw-r--r--fs/exfat/inode.c1
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/ext4/extents.c43
-rw-r--r--fs/ext4/fast_commit.c170
-rw-r--r--fs/ext4/fast_commit.h19
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/super.c11
-rw-r--r--fs/ext4/sysfs.c4
-rw-r--r--fs/fat/inode.c1
-rw-r--r--fs/fs-writeback.c357
-rw-r--r--fs/fuse/dax.c3
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/bmap.c62
-rw-r--r--fs/gfs2/bmap.h2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/file.c84
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glops.c2
-rw-r--r--fs/gfs2/log.c6
-rw-r--r--fs/gfs2/log.h1
-rw-r--r--fs/gfs2/lops.c7
-rw-r--r--fs/gfs2/lops.h1
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hugetlbfs/inode.c17
-rw-r--r--fs/io-wq.c29
-rw-r--r--fs/io-wq.h2
-rw-r--r--fs/io_uring.c58
-rw-r--r--fs/iomap/buffered-io.c27
-rw-r--r--fs/jfs/inode.c1
-rw-r--r--fs/kernfs/inode.c8
-rw-r--r--fs/libfs.c44
-rw-r--r--fs/minix/inode.c1
-rw-r--r--fs/namespace.c9
-rw-r--r--fs/netfs/Kconfig2
-rw-r--r--fs/netfs/read_helper.c51
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/filelayout/filelayout.c2
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs4_fs.h1
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfs/nfs4proc.c41
-rw-r--r--fs/nfs/nfstrace.h4
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/pnfs.c17
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nilfs2/mdt.c1
-rw-r--r--fs/nilfs2/sysfs.c20
-rw-r--r--fs/notify/fanotify/fanotify_user.c34
-rw-r--r--fs/notify/fdinfo.c2
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c7
-rw-r--r--fs/ocfs2/cluster/nodemanager.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/file.c55
-rw-r--r--fs/ocfs2/filecheck.c6
-rw-r--r--fs/ocfs2/stackglue.c8
-rw-r--r--fs/omfs/file.c1
-rw-r--r--fs/open.c14
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c13
-rw-r--r--fs/proc/loadavg.c2
-rw-r--r--fs/proc/stat.c4
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/pstore/Kconfig1
-rw-r--r--fs/pstore/blk.c403
-rw-r--r--fs/ramfs/inode.c9
-rw-r--r--fs/reiserfs/namei.c1
-rw-r--r--fs/squashfs/block.c5
-rw-r--r--fs/squashfs/squashfs_fs_sb.h1
-rw-r--r--fs/squashfs/super.c86
-rw-r--r--fs/super.c8
-rw-r--r--fs/sysv/itree.c1
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/ufs/inode.c1
-rw-r--r--fs/userfaultfd.c4
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c22
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c12
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c46
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c17
-rw-r--r--fs/xfs/scrub/agheader.c1
-rw-r--r--fs/xfs/scrub/bmap.c2
-rw-r--r--fs/xfs/scrub/btree.c2
-rw-r--r--fs/xfs/scrub/common.c6
-rw-r--r--fs/xfs/scrub/dabtree.c2
-rw-r--r--fs/xfs/scrub/repair.c2
-rw-r--r--fs/xfs/xfs_aops.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_export.c4
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_inode.c31
-rw-r--r--fs/xfs/xfs_ioctl.c105
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_log.c1
-rw-r--r--fs/xfs/xfs_message.h15
-rw-r--r--fs/xfs/xfs_trans_buf.c2
-rw-r--r--fs/zonefs/super.c4
-rw-r--r--include/acpi/acbuffer.h10
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acpi_bus.h19
-rw-r--r--include/acpi/acpi_numa.h2
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl1.h41
-rw-r--r--include/acpi/actbl2.h137
-rw-r--r--include/asm-generic/atomic-instrumented.h498
-rw-r--r--include/asm-generic/atomic.h118
-rw-r--r--include/asm-generic/atomic64.h45
-rw-r--r--include/asm-generic/bug.h37
-rw-r--r--include/asm-generic/cmpxchg-local.h4
-rw-r--r--include/asm-generic/cmpxchg.h42
-rw-r--r--include/asm-generic/hyperv-tlfs.h1
-rw-r--r--include/asm-generic/memory_model.h37
-rw-r--r--include/asm-generic/pgtable-nop4d.h1
-rw-r--r--include/asm-generic/preempt.h2
-rw-r--r--include/asm-generic/topology.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/clocksource/samsung_pwm.h3
-rw-r--r--include/clocksource/timer-ti-dm.h1
-rw-r--r--include/crypto/aead.h2
-rw-r--r--include/crypto/algapi.h10
-rw-r--r--include/crypto/engine.h2
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/internal/hash.h8
-rw-r--r--include/dt-bindings/usb/pd.h89
-rw-r--r--include/kunit/test.h5
-rw-r--r--include/kvm/arm_vgic.h41
-rw-r--r--include/linux/acpi.h30
-rw-r--r--include/linux/acpi_mdio.h26
-rw-r--r--include/linux/arch_topology.h1
-rw-r--r--include/linux/arm-smccc.h88
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/atomic-fallback.h2595
-rw-r--r--include/linux/atomic.h4
-rw-r--r--include/linux/avf/virtchnl.h30
-rw-r--r--include/linux/backing-dev-defs.h20
-rw-r--r--include/linux/bio.h14
-rw-r--r--include/linux/blk-mq.h24
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bpf.h42
-rw-r--r--include/linux/bpf_local_storage.h4
-rw-r--r--include/linux/bpf_types.h2
-rw-r--r--include/linux/bpf_verifier.h9
-rw-r--r--include/linux/bpfptr.h75
-rw-r--r--include/linux/btf.h2
-rw-r--r--include/linux/ceph/auth.h4
-rw-r--r--include/linux/cgroup-defs.h6
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/clocksource.h8
-rw-r--r--include/linux/compiler.h32
-rw-r--r--include/linux/compiler_attributes.h20
-rw-r--r--include/linux/compiler_types.h2
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cred.h4
-rw-r--r--include/linux/crypto.h26
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/delayacct.h22
-rw-r--r--include/linux/dev_printk.h2
-rw-r--r--include/linux/device.h9
-rw-r--r--include/linux/devm-helpers.h25
-rw-r--r--include/linux/dsa/8021q.h79
-rw-r--r--include/linux/dsa/sja1105.h26
-rw-r--r--include/linux/elevator.h4
-rw-r--r--include/linux/energy_model.h16
-rw-r--r--include/linux/entry-kvm.h3
-rw-r--r--include/linux/ethtool.h12
-rw-r--r--include/linux/evm.h34
-rw-r--r--include/linux/fanotify.h4
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/fcntl.h4
-rw-r--r--include/linux/filter.h29
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/fwnode_mdio.h35
-rw-r--r--include/linux/genhd.h32
-rw-r--r--include/linux/gfp.h31
-rw-r--r--include/linux/gpio/consumer.h2
-rw-r--r--include/linux/hid.h22
-rw-r--r--include/linux/highmem.h43
-rw-r--r--include/linux/host1x.h30
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--include/linux/hugetlb.h28
-rw-r--r--include/linux/hyperv.h61
-rw-r--r--include/linux/ide.h1623
-rw-r--r--include/linux/ieee80211.h10
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/if_bridge.h38
-rw-r--r--include/linux/if_rmnet.h32
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/instrumentation.h20
-rw-r--r--include/linux/integrity.h1
-rw-r--r--include/linux/intel-ish-client-if.h10
-rw-r--r--include/linux/interrupt.h38
-rw-r--r--include/linux/iomap.h1
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic-common.h25
-rw-r--r--include/linux/irqchip/arm-vgic-info.h45
-rw-r--r--include/linux/irqdesc.h18
-rw-r--r--include/linux/irqdomain.h65
-rw-r--r--include/linux/jump_label.h16
-rw-r--r--include/linux/kasan.h71
-rw-r--r--include/linux/kernel.h14
-rw-r--r--include/linux/kprobes.h8
-rw-r--r--include/linux/kthread.h4
-rw-r--r--include/linux/kvm_host.h146
-rw-r--r--include/linux/kvm_types.h14
-rw-r--r--include/linux/libata.h13
-rw-r--r--include/linux/lockdep_types.h2
-rw-r--r--include/linux/lsm_audit.h8
-rw-r--r--include/linux/lsm_hook_defs.h3
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h8
-rw-r--r--include/linux/math64.h19
-rw-r--r--include/linux/memblock.h6
-rw-r--r--include/linux/memcontrol.h54
-rw-r--r--include/linux/mfd/madera/pdata.h2
-rw-r--r--include/linux/mfd/mt6358/core.h8
-rw-r--r--include/linux/mfd/mt6359/core.h133
-rw-r--r--include/linux/mfd/mt6359/registers.h529
-rw-r--r--include/linux/mfd/mt6359p/registers.h249
-rw-r--r--include/linux/mfd/mt6397/core.h1
-rw-r--r--include/linux/mfd/mt6397/rtc.h1
-rw-r--r--include/linux/mfd/rohm-bd70528.h4
-rw-r--r--include/linux/mfd/rohm-bd71828.h10
-rw-r--r--include/linux/micrel_phy.h16
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h50
-rw-r--r--include/linux/mlx5/eq.h1
-rw-r--r--include/linux/mlx5/eswitch.h17
-rw-r--r--include/linux/mlx5/fs.h14
-rw-r--r--include/linux/mlx5/mlx5_ifc.h51
-rw-r--r--include/linux/mlx5/mpfs.h18
-rw-r--r--include/linux/mlx5/transobj.h1
-rw-r--r--include/linux/mm.h81
-rw-r--r--include/linux/mm_types.h44
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmc/card.h23
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sd.h4
-rw-r--r--include/linux/mmdebug.h3
-rw-r--r--include/linux/mmzone.h90
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/mtd/spinand.h22
-rw-r--r--include/linux/net/intel/i40e_client.h12
-rw-r--r--include/linux/net/intel/iidc.h100
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter.h12
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/nvme.h12
-rw-r--r--include/linux/of_mdio.h7
-rw-r--r--include/linux/oid_registry.h8
-rw-r--r--include/linux/once_lite.h24
-rw-r--r--include/linux/page-flags.h19
-rw-r--r--include/linux/page_owner.h6
-rw-r--r--include/linux/page_ref.h4
-rw-r--r--include/linux/page_reporting.h3
-rw-r--r--include/linux/pageblock-flags.h2
-rw-r--r--include/linux/pagemap.h17
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/pcs/pcs-xpcs.h46
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/pgtable.h30
-rw-r--r--include/linux/phy.h45
-rw-r--r--include/linux/phylink.h3
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/platform_data/spi-ath79.h16
-rw-r--r--include/linux/platform_data/ti-sysc.h1
-rw-r--r--include/linux/platform_profile.h2
-rw-r--r--include/linux/pm_domain.h1
-rw-r--r--include/linux/pm_runtime.h3
-rw-r--r--include/linux/pmbus.h30
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/prandom.h2
-rw-r--r--include/linux/printk.h69
-rw-r--r--include/linux/prmt.h7
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/pstore_blk.h27
-rw-r--r--include/linux/ptp_clock_kernel.h34
-rw-r--r--include/linux/pxa2xx_ssp.h51
-rw-r--r--include/linux/qed/common_hsi.h2
-rw-r--r--include/linux/qed/nvmetcp_common.h531
-rw-r--r--include/linux/qed/qed_if.h18
-rw-r--r--include/linux/qed/qed_ll2_if.h2
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h240
-rw-r--r--include/linux/qed/qed_nvmetcp_ip_services_if.h29
-rw-r--r--include/linux/rcupdate.h14
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/regmap.h40
-rw-r--r--include/linux/regulator/consumer.h14
-rw-r--r--include/linux/regulator/coupler.h5
-rw-r--r--include/linux/regulator/driver.h187
-rw-r--r--include/linux/regulator/machine.h26
-rw-r--r--include/linux/regulator/mt6359-regulator.h59
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/rtsx_pci.h2
-rw-r--r--include/linux/sched.h61
-rw-r--r--include/linux/sched/coredump.h8
-rw-r--r--include/linux/sched/cpufreq.h2
-rw-r--r--include/linux/sched/debug.h2
-rw-r--r--include/linux/sched/sd_flags.h10
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/sched/stat.h16
-rw-r--r--include/linux/sched/user.h7
-rw-r--r--include/linux/sched_clock.h2
-rw-r--r--include/linux/sctp.h7
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/seqlock.h6
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/signal_types.h4
-rw-r--r--include/linux/skbuff.h39
-rw-r--r--include/linux/skmsg.h4
-rw-r--r--include/linux/slab.h59
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/spi/pxa2xx_spi.h21
-rw-r--r--include/linux/spi/spi-mem.h16
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/stmmac.h17
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/surface_aggregator/controller.h27
-rw-r--r--include/linux/surface_aggregator/device.h2
-rw-r--r--include/linux/surface_aggregator/serial_hub.h2
-rw-r--r--include/linux/swap.h19
-rw-r--r--include/linux/swapops.h20
-rw-r--r--include/linux/tick.h33
-rw-r--r--include/linux/usb/cdc-wdm.h3
-rw-r--r--include/linux/usb/pd.h2
-rw-r--r--include/linux/usb/pd_ext_sdb.h4
-rw-r--r--include/linux/user_namespace.h31
-rw-r--r--include/linux/virtio_vsock.h10
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vmstat.h65
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/linux/wwan.h71
-rw-r--r--include/media/hevc-ctrls.h52
-rw-r--r--include/media/media-dev-allocator.h2
-rw-r--r--include/media/mpeg2-ctrls.h82
-rw-r--r--include/media/rc-map.h2
-rw-r--r--include/media/v4l2-ctrls.h11
-rw-r--r--include/media/v4l2-subdev.h78
-rw-r--r--include/media/videobuf2-v4l2.h16
-rw-r--r--include/net/af_vsock.h8
-rw-r--r--include/net/bluetooth/hci.h99
-rw-r--r--include/net/bluetooth/hci_core.h29
-rw-r--r--include/net/bluetooth/mgmt.h3
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/caif/caif_dev.h2
-rw-r--r--include/net/caif/cfcnfg.h2
-rw-r--r--include/net/caif/cfserl.h1
-rw-r--r--include/net/cfg80211.h55
-rw-r--r--include/net/devlink.h58
-rw-r--r--include/net/dsa.h62
-rw-r--r--include/net/flow_offload.h12
-rw-r--r--include/net/icmp.h1
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip6_route.h16
-rw-r--r--include/net/ip_fib.h43
-rw-r--r--include/net/ipv6.h8
-rw-r--r--include/net/mac80211.h81
-rw-r--r--include/net/macsec.h2
-rw-r--r--include/net/mptcp.h10
-rw-r--r--include/net/net_namespace.h18
-rw-r--r--include/net/netfilter/nf_conntrack.h7
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h20
-rw-r--r--include/net/netfilter/nf_flow_table.h3
-rw-r--r--include/net/netfilter/nf_tables.h40
-rw-r--r--include/net/netfilter/nf_tables_core.h32
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h40
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h42
-rw-r--r--include/net/netns/conntrack.h8
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/netns/sctp.h3
-rw-r--r--include/net/netns/smc.h16
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/nfc/nci_core.h1
-rw-r--r--include/net/page_pool.h9
-rw-r--r--include/net/pkt_cls.h11
-rw-r--r--include/net/pkt_sched.h7
-rw-r--r--include/net/protocol.h1
-rw-r--r--include/net/rtnetlink.h8
-rw-r--r--include/net/sch_generic.h64
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/constants.h20
-rw-r--r--include/net/sctp/sctp.h57
-rw-r--r--include/net/sctp/sm.h6
-rw-r--r--include/net/sctp/structs.h22
-rw-r--r--include/net/sock.h26
-rw-r--r--include/net/sock_reuseport.h9
-rw-r--r--include/net/switchdev.h13
-rw-r--r--include/net/tc_act/tc_vlan.h1
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/tls.h14
-rw-r--r--include/net/xdp.h1
-rw-r--r--include/net/xdp_sock.h2
-rw-r--r--include/net/xfrm.h40
-rw-r--r--include/net/xsk_buff_pool.h9
-rw-r--r--include/soc/microchip/mpfs.h43
-rw-r--r--include/sound/soc-dai.h2
-rw-r--r--include/trace/events/btrfs.h23
-rw-r--r--include/trace/events/cma.h4
-rw-r--r--include/trace/events/filemap.h2
-rw-r--r--include/trace/events/kmem.h12
-rw-r--r--include/trace/events/mmflags.h9
-rw-r--r--include/trace/events/mptcp.h17
-rw-r--r--include/trace/events/page_pool.h4
-rw-r--r--include/trace/events/pagemap.h4
-rw-r--r--include/trace/events/sock.h60
-rw-r--r--include/trace/events/spi.h57
-rw-r--r--include/trace/events/tcp.h76
-rw-r--r--include/trace/events/vmscan.h2
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h5
-rw-r--r--include/trace/events/xdp.h6
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/asm-generic/unistd.h3
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/bpf.h82
-rw-r--r--include/uapi/linux/btrfs.h4
-rw-r--r--include/uapi/linux/btrfs_tree.h4
-rw-r--r--include/uapi/linux/can.h13
-rw-r--r--include/uapi/linux/cec-funcs.h2
-rw-r--r--include/uapi/linux/devlink.h17
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/ethtool_netlink.h2
-rw-r--r--include/uapi/linux/futex.h2
-rw-r--r--include/uapi/linux/icmp.h3
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h9
-rw-r--r--include/uapi/linux/in.h3
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/io_uring.h19
-rw-r--r--include/uapi/linux/kvm.h110
-rw-r--r--include/uapi/linux/kvm_para.h1
-rw-r--r--include/uapi/linux/lirc.h1
-rw-r--r--include/uapi/linux/mount.h1
-rw-r--r--include/uapi/linux/mptcp.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h17
-rw-r--r--include/uapi/linux/netfilter/nfnetlink.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_hook.h55
-rw-r--r--include/uapi/linux/netlink.h5
-rw-r--r--include/uapi/linux/nl80211.h9
-rw-r--r--include/uapi/linux/prctl.h8
-rw-r--r--include/uapi/linux/sctp.h8
-rw-r--r--include/uapi/linux/seccomp.h1
-rw-r--r--include/uapi/linux/seg6_local.h2
-rw-r--r--include/uapi/linux/smc.h83
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/surface_aggregator/cdev.h73
-rw-r--r--include/uapi/linux/userfaultfd.h4
-rw-r--r--include/uapi/linux/v4l2-controls.h135
-rw-r--r--include/uapi/linux/videodev2.h7
-rw-r--r--include/uapi/linux/virtio_ids.h2
-rw-r--r--include/uapi/linux/virtio_vsock.h9
-rw-r--r--include/uapi/linux/wwan.h16
-rw-r--r--init/Kconfig44
-rw-r--r--init/do_mounts.c10
-rw-r--r--init/init_task.c2
-rw-r--r--init/main.c19
-rw-r--r--ipc/mqueue.c40
-rw-r--r--ipc/shm.c26
-rw-r--r--kernel/Kconfig.preempt20
-rw-r--r--kernel/audit.h10
-rw-r--r--kernel/audit_tree.c12
-rw-r--r--kernel/auditsc.c63
-rw-r--r--kernel/bpf/Kconfig89
-rw-r--r--kernel/bpf/bpf_inode_storage.c2
-rw-r--r--kernel/bpf/bpf_iter.c13
-rw-r--r--kernel/bpf/bpf_lsm.c4
-rw-r--r--kernel/bpf/btf.c88
-rw-r--r--kernel/bpf/core.c61
-rw-r--r--kernel/bpf/cpumap.c16
-rw-r--r--kernel/bpf/devmap.c358
-rw-r--r--kernel/bpf/hashtab.c123
-rw-r--r--kernel/bpf/helpers.c48
-rw-r--r--kernel/bpf/inode.c2
-rw-r--r--kernel/bpf/lpm_trie.c6
-rw-r--r--kernel/bpf/preload/iterators/iterators.bpf.c1
-rw-r--r--kernel/bpf/reuseport_array.c2
-rw-r--r--kernel/bpf/ringbuf.c26
-rw-r--r--kernel/bpf/syscall.c244
-rw-r--r--kernel/bpf/tnum.c41
-rw-r--r--kernel/bpf/trampoline.c2
-rw-r--r--kernel/bpf/verifier.c525
-rw-r--r--kernel/cgroup/cgroup-v1.c8
-rw-r--r--kernel/cgroup/cgroup.c22
-rw-r--r--kernel/cgroup/cpuset.c2
-rw-r--r--kernel/cgroup/rdma.c2
-rw-r--r--kernel/cgroup/rstat.c2
-rw-r--r--kernel/cpu.c53
-rw-r--r--kernel/crash_core.c5
-rw-r--r--kernel/cred.c51
-rw-r--r--kernel/debug/kdb/kdb_main.c9
-rw-r--r--kernel/debug/kdb/kdb_support.c18
-rw-r--r--kernel/delayacct.c71
-rw-r--r--kernel/dma/swiotlb.c23
-rw-r--r--kernel/entry/common.c5
-rw-r--r--kernel/events/core.c32
-rw-r--r--kernel/events/hw_breakpoint.c2
-rw-r--r--kernel/events/uprobes.c5
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c32
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/futex.c32
-rw-r--r--kernel/gcov/Kconfig1
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/irq/Kconfig5
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/irqdesc.c73
-rw-r--r--kernel/irq/irqdomain.c120
-rw-r--r--kernel/irq/manage.c38
-rw-r--r--kernel/irq/settings.h12
-rw-r--r--kernel/irq_work.c3
-rw-r--r--kernel/jump_label.c12
-rw-r--r--kernel/kcsan/report.c2
-rw-r--r--kernel/kprobes.c19
-rw-r--r--kernel/kthread.c130
-rw-r--r--kernel/locking/lockdep.c133
-rw-r--r--kernel/locking/mutex.c6
-rw-r--r--kernel/locking/rtmutex.c4
-rw-r--r--kernel/locking/rwsem.c2
-rw-r--r--kernel/module.c14
-rw-r--r--kernel/power/Kconfig12
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/snapshot.c10
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/printk/printk.c116
-rw-r--r--kernel/printk/printk_safe.c2
-rw-r--r--kernel/ptrace.c12
-rw-r--r--kernel/rcu/rcutorture.c4
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/tree_stall.h12
-rw-r--r--kernel/reboot.c79
-rw-r--r--kernel/sched/Makefile1
-rw-r--r--kernel/sched/core.c1146
-rw-r--r--kernel/sched/core_sched.c229
-rw-r--r--kernel/sched/cpuacct.c12
-rw-r--r--kernel/sched/cpufreq_schedutil.c1
-rw-r--r--kernel/sched/deadline.c50
-rw-r--r--kernel/sched/debug.c7
-rw-r--r--kernel/sched/fair.c546
-rw-r--r--kernel/sched/idle.c13
-rw-r--r--kernel/sched/isolation.c4
-rw-r--r--kernel/sched/loadavg.c2
-rw-r--r--kernel/sched/pelt.h13
-rw-r--r--kernel/sched/psi.c12
-rw-r--r--kernel/sched/rt.c48
-rw-r--r--kernel/sched/sched.h437
-rw-r--r--kernel/sched/stats.h68
-rw-r--r--kernel/sched/stop_task.c14
-rw-r--r--kernel/sched/topology.c213
-rw-r--r--kernel/sched/wait.c9
-rw-r--r--kernel/seccomp.c81
-rw-r--r--kernel/signal.c71
-rw-r--r--kernel/smpboot.c1
-rw-r--r--kernel/softirq.c5
-rw-r--r--kernel/sys.c19
-rw-r--r--kernel/sysctl.c57
-rw-r--r--kernel/time/Kconfig20
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c23
-rw-r--r--kernel/time/clocksource-wdtest.c202
-rw-r--r--kernel/time/clocksource.c227
-rw-r--r--kernel/time/jiffies.c15
-rw-r--r--kernel/time/posix-cpu-timers.c4
-rw-r--r--kernel/time/tick-broadcast.c143
-rw-r--r--kernel/time/tick-common.c2
-rw-r--r--kernel/time/tick-internal.h5
-rw-r--r--kernel/time/tick-sched.c130
-rw-r--r--kernel/time/time_test.c99
-rw-r--r--kernel/time/timeconv.c128
-rw-r--r--kernel/time/timer.c2
-rw-r--r--kernel/time/timer_list.c10
-rw-r--r--kernel/trace/bpf_trace.c34
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/trace.c13
-rw-r--r--kernel/trace/trace.h13
-rw-r--r--kernel/trace/trace_clock.c6
-rw-r--r--kernel/ucount.c116
-rw-r--r--kernel/user.c3
-rw-r--r--kernel/user_namespace.c9
-rw-r--r--kernel/watchdog.c12
-rw-r--r--kernel/workqueue.c12
-rw-r--r--lib/Kconfig.debug31
-rw-r--r--lib/Kconfig.kasan16
-rw-r--r--lib/Makefile2
-rw-r--r--lib/atomic64.c36
-rw-r--r--lib/crc64.c2
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/dump_stack.c48
-rw-r--r--lib/kstrtox.c13
-rw-r--r--lib/kstrtox.h2
-rw-r--r--lib/kunit/test.c18
-rw-r--r--lib/locking-selftest.c83
-rw-r--r--lib/percpu-refcount.c6
-rw-r--r--lib/slub_kunit.c152
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/syscall.c4
-rw-r--r--lib/test_hmm.c5
-rw-r--r--lib/test_kasan.c11
-rw-r--r--lib/test_printf.c5
-rw-r--r--lib/test_scanf.c750
-rw-r--r--lib/vsprintf.c112
-rw-r--r--mm/Kconfig36
-rw-r--r--mm/backing-dev.c66
-rw-r--r--mm/compaction.c4
-rw-r--r--mm/debug.c25
-rw-r--r--mm/debug_vm_pgtable.c67
-rw-r--r--mm/dmapool.c5
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/gup.c73
-rw-r--r--mm/huge_memory.c56
-rw-r--r--mm/hugetlb.c158
-rw-r--r--mm/internal.h62
-rw-r--r--mm/kasan/Makefile4
-rw-r--r--mm/kasan/common.c10
-rw-r--r--mm/kasan/generic.c3
-rw-r--r--mm/kasan/hw_tags.c38
-rw-r--r--mm/kasan/init.c10
-rw-r--r--mm/kasan/kasan.h10
-rw-r--r--mm/kasan/report.c6
-rw-r--r--mm/kasan/report_hw_tags.c5
-rw-r--r--mm/kasan/report_sw_tags.c43
-rw-r--r--mm/kasan/report_tags.c51
-rw-r--r--mm/kasan/shadow.c6
-rw-r--r--mm/kasan/sw_tags.c42
-rw-r--r--mm/kasan/tags.c59
-rw-r--r--mm/kfence/core.c6
-rw-r--r--mm/kfence/kfence_test.c5
-rw-r--r--mm/kmemleak.c18
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/memblock.c8
-rw-r--r--mm/memcontrol.c363
-rw-r--r--mm/memfd.c4
-rw-r--r--mm/memory-failure.c453
-rw-r--r--mm/memory.c67
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/mempool.c6
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/mlock.c22
-rw-r--r--mm/mmap.c58
-rw-r--r--mm/mmap_lock.c33
-rw-r--r--mm/mremap.c5
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page-writeback.c94
-rw-r--r--mm/page_alloc.c880
-rw-r--r--mm/page_ext.c2
-rw-r--r--mm/page_owner.c2
-rw-r--r--mm/page_reporting.c19
-rw-r--r--mm/page_reporting.h5
-rw-r--r--mm/page_vma_mapped.c160
-rw-r--r--mm/pagewalk.c58
-rw-r--r--mm/pgtable-generic.c5
-rw-r--r--mm/rmap.c39
-rw-r--r--mm/shmem.c28
-rw-r--r--mm/slab.h24
-rw-r--r--mm/slab_common.c63
-rw-r--r--mm/slub.c455
-rw-r--r--mm/sparse.c13
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_slots.c2
-rw-r--r--mm/swap_state.c20
-rw-r--r--mm/swapfile.c179
-rw-r--r--mm/truncate.c43
-rw-r--r--mm/vmalloc.c162
-rw-r--r--mm/vmscan.c43
-rw-r--r--mm/vmstat.c246
-rw-r--r--mm/workingset.c2
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan.h6
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/9p/trans_virtio.c6
-rw-r--r--net/Kconfig27
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/atm/atm_sysfs.c24
-rw-r--r--net/atm/br2684.c4
-rw-r--r--net/atm/resources.c7
-rw-r--r--net/batman-adv/bat_iv_ogm.c10
-rw-r--r--net/batman-adv/bat_v.c10
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c4
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h1
-rw-r--r--net/batman-adv/hard-interface.c65
-rw-r--r--net/batman-adv/hard-interface.h3
-rw-r--r--net/batman-adv/hash.h2
-rw-r--r--net/batman-adv/main.h3
-rw-r--r--net/batman-adv/multicast.c41
-rw-r--r--net/batman-adv/netlink.c8
-rw-r--r--net/batman-adv/routing.c9
-rw-r--r--net/batman-adv/send.c374
-rw-r--r--net/batman-adv/send.h12
-rw-r--r--net/batman-adv/soft-interface.c49
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/bluetooth/6lowpan.c54
-rw-r--r--net/bluetooth/a2mp.c24
-rw-r--r--net/bluetooth/amp.c6
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/cmtp/capi.c22
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/hci_conn.c12
-rw-r--r--net/bluetooth/hci_core.c93
-rw-r--r--net/bluetooth/hci_debugfs.c8
-rw-r--r--net/bluetooth/hci_event.c189
-rw-r--r--net/bluetooth/hci_request.c203
-rw-r--r--net/bluetooth/hci_sock.c22
-rw-r--r--net/bluetooth/hidp/core.c8
-rw-r--r--net/bluetooth/l2cap_core.c16
-rw-r--r--net/bluetooth/mgmt.c60
-rw-r--r--net/bluetooth/mgmt_config.c4
-rw-r--r--net/bluetooth/msft.c8
-rw-r--r--net/bluetooth/rfcomm/tty.c10
-rw-r--r--net/bluetooth/sco.c8
-rw-r--r--net/bluetooth/smp.c90
-rw-r--r--net/bluetooth/smp.h6
-rw-r--r--net/bpf/test_run.c45
-rw-r--r--net/bpfilter/main.c2
-rw-r--r--net/bridge/br_cfm.c2
-rw-r--r--net/bridge/br_fdb.c60
-rw-r--r--net/bridge/br_forward.c5
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c80
-rw-r--r--net/bridge/br_mrp.c33
-rw-r--r--net/bridge/br_multicast.c445
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h81
-rw-r--r--net/bridge/br_private_mrp.h11
-rw-r--r--net/bridge/br_stp.c4
-rw-r--r--net/bridge/br_switchdev.c12
-rw-r--r--net/bridge/br_vlan.c19
-rw-r--r--net/bridge/br_vlan_tunnel.c38
-rw-r--r--net/caif/caif_dev.c13
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/caif_usb.c14
-rw-r--r--net/caif/cfcnfg.c18
-rw-r--r--net/caif/cfserl.c5
-rw-r--r--net/caif/chnl_net.c2
-rw-r--r--net/can/bcm.c73
-rw-r--r--net/can/gw.c3
-rw-r--r--net/can/isotp.c157
-rw-r--r--net/can/j1939/main.c4
-rw-r--r--net/can/j1939/socket.c9
-rw-r--r--net/can/j1939/transport.c54
-rw-r--r--net/can/proc.c6
-rw-r--r--net/can/raw.c68
-rw-r--r--net/ceph/auth.c20
-rw-r--r--net/ceph/auth_none.c5
-rw-r--r--net/ceph/auth_x.c15
-rw-r--r--net/ceph/auth_x_protocol.h2
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osdmap.c4
-rw-r--r--net/compat.c2
-rw-r--r--net/core/bpf_sk_storage.c3
-rw-r--r--net/core/dev.c81
-rw-r--r--net/core/devlink.c714
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/filter.c117
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/net-traces.c1
-rw-r--r--net/core/net_namespace.c20
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/page_pool.c28
-rw-r--r--net/core/pktgen.c38
-rw-r--r--net/core/rtnetlink.c74
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/core/skmsg.c82
-rw-r--r--net/core/sock.c144
-rw-r--r--net/core/sock_map.c2
-rw-r--r--net/core/sock_reuseport.c366
-rw-r--r--net/core/xdp.c39
-rw-r--r--net/dcb/dcbnl.c6
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c1
-rw-r--r--net/dccp/ipv4.c5
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/decnet/dn_nsp_in.c2
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/devres.c2
-rw-r--r--net/dsa/dsa2.c36
-rw-r--r--net/dsa/dsa_priv.h27
-rw-r--r--net/dsa/master.c11
-rw-r--r--net/dsa/port.c148
-rw-r--r--net/dsa/slave.c263
-rw-r--r--net/dsa/switch.c338
-rw-r--r--net/dsa/tag_8021q.c25
-rw-r--r--net/dsa/tag_ar9331.c2
-rw-r--r--net/dsa/tag_brcm.c6
-rw-r--r--net/dsa/tag_dsa.c4
-rw-r--r--net/dsa/tag_gswip.c2
-rw-r--r--net/dsa/tag_hellcreek.c3
-rw-r--r--net/dsa/tag_ksz.c9
-rw-r--r--net/dsa/tag_lan9303.c2
-rw-r--r--net/dsa/tag_mtk.c2
-rw-r--r--net/dsa/tag_ocelot.c4
-rw-r--r--net/dsa/tag_ocelot_8021q.c20
-rw-r--r--net/dsa/tag_qca.c2
-rw-r--r--net/dsa/tag_rtl4_a.c2
-rw-r--r--net/dsa/tag_sja1105.c312
-rw-r--r--net/dsa/tag_trailer.c3
-rw-r--r--net/dsa/tag_xrs700x.c3
-rw-r--r--net/ethtool/eeprom.c15
-rw-r--r--net/ethtool/ioctl.c10
-rw-r--r--net/ethtool/netlink.c11
-rw-r--r--net/ethtool/netlink.h4
-rw-r--r--net/ethtool/stats.c2
-rw-r--r--net/ethtool/strset.c2
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/hsr/hsr_forward.c30
-rw-r--r--net/hsr/hsr_forward.h8
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/hsr/hsr_main.h4
-rw-r--r--net/hsr/hsr_slave.c11
-rw-r--r--net/ieee802154/nl-mac.c10
-rw-r--r--net/ieee802154/nl-phy.c4
-rw-r--r--net/ieee802154/nl802154.c9
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/bpf_tcp_ca.c2
-rw-r--r--net/ipv4/cipso_ipv4.c4
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/esp4.c4
-rw-r--r--net/ipv4/esp4_offload.c4
-rw-r--r--net/ipv4/fib_frontend.c12
-rw-r--r--net/ipv4/fib_lookup.h2
-rw-r--r--net/ipv4/fib_semantics.c1
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/icmp.c72
-rw-r--r--net/ipv4/igmp.c1
-rw-r--r--net/ipv4/inet_connection_sock.c202
-rw-r--r--net/ipv4/inet_diag.c12
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ip_output.c32
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/ipconfig.c13
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c5
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c2
-rw-r--r--net/ipv4/ping.c14
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/protocol.c6
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c145
-rw-r--r--net/ipv4/sysctl_net_ipv4.c40
-rw-r--r--net/ipv4/tcp.c18
-rw-r--r--net/ipv4/tcp_bpf.c31
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c48
-rw-r--r--net/ipv4/tcp_ipv4.c27
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/tunnel4.c3
-rw-r--r--net/ipv4/udp.c16
-rw-r--r--net/ipv4/udp_bpf.c53
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_protocol.c3
-rw-r--r--net/ipv4/xfrm4_tunnel.c1
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c4
-rw-r--r--net/ipv6/esp6_offload.c1
-rw-r--r--net/ipv6/exthdrs.c31
-rw-r--r--net/ipv6/fib6_rules.c2
-rw-r--r--net/ipv6/icmp.c21
-rw-r--r--net/ipv6/ip6_fib.c9
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c5
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/mcast.c28
-rw-r--r--net/ipv6/mip6.c99
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c22
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c2
-rw-r--r--net/ipv6/output_core.c28
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c139
-rw-r--r--net/ipv6/seg6_local.c94
-rw-r--r--net/ipv6/sit.c9
-rw-r--r--net/ipv6/sysctl_net_ipv6.c31
-rw-r--r--net/ipv6/tcp_ipv6.c20
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/ipv6/xfrm6_output.c7
-rw-r--r--net/ipv6/xfrm6_tunnel.c1
-rw-r--r--net/iucv/af_iucv.c27
-rw-r--r--net/kcm/kcmsock.c2
-rw-r--r--net/key/af_key.c6
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/lapb/lapb_iface.c4
-rw-r--r--net/mac80211/cfg.c45
-rw-r--r--net/mac80211/chan.c108
-rw-r--r--net/mac80211/debugfs.c81
-rw-r--r--net/mac80211/debugfs_netdev.c33
-rw-r--r--net/mac80211/debugfs_sta.c24
-rw-r--r--net/mac80211/driver-ops.h26
-rw-r--r--net/mac80211/he.c8
-rw-r--r--net/mac80211/ht.c18
-rw-r--r--net/mac80211/ieee80211_i.h232
-rw-r--r--net/mac80211/iface.c264
-rw-r--r--net/mac80211/key.c7
-rw-r--r--net/mac80211/key.h2
-rw-r--r--net/mac80211/led.c12
-rw-r--r--net/mac80211/main.c39
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c256
-rw-r--r--net/mac80211/rate.c13
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c36
-rw-r--r--net/mac80211/rx.c211
-rw-r--r--net/mac80211/scan.c21
-rw-r--r--net/mac80211/sta_info.c89
-rw-r--r--net/mac80211/sta_info.h44
-rw-r--r--net/mac80211/status.c26
-rw-r--r--net/mac80211/tdls.c28
-rw-r--r--net/mac80211/trace.h33
-rw-r--r--net/mac80211/tx.c518
-rw-r--r--net/mac80211/util.c59
-rw-r--r--net/mac80211/wpa.c13
-rw-r--r--net/mptcp/ctrl.c68
-rw-r--r--net/mptcp/mib.c2
-rw-r--r--net/mptcp/mib.h2
-rw-r--r--net/mptcp/mptcp_diag.c1
-rw-r--r--net/mptcp/options.c201
-rw-r--r--net/mptcp/pm.c1
-rw-r--r--net/mptcp/pm_netlink.c36
-rw-r--r--net/mptcp/protocol.c317
-rw-r--r--net/mptcp/protocol.h54
-rw-r--r--net/mptcp/sockopt.c153
-rw-r--r--net/mptcp/subflow.c366
-rw-r--r--net/mptcp/token.c9
-rw-r--r--net/ncsi/internal.h4
-rw-r--r--net/ncsi/ncsi-manage.c2
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/ipset/ip_set_core.c50
-rw-r--r--net/netfilter/ipvs/Kconfig2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c22
-rw-r--r--net/netfilter/nf_conntrack_ecache.c8
-rw-r--r--net/netfilter/nf_conntrack_expect.c12
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c88
-rw-r--r--net/netfilter/nf_conntrack_proto.c24
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c14
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c7
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c28
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c11
-rw-r--r--net/netfilter/nf_conntrack_standalone.c54
-rw-r--r--net/netfilter/nf_flow_table_core.c50
-rw-r--r--net/netfilter/nf_flow_table_offload.c11
-rw-r--r--net/netfilter/nf_synproxy_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c436
-rw-r--r--net/netfilter/nf_tables_core.c3
-rw-r--r--net/netfilter/nf_tables_offload.c34
-rw-r--r--net/netfilter/nf_tables_trace.c6
-rw-r--r--net/netfilter/nfnetlink.c3
-rw-r--r--net/netfilter/nfnetlink_acct.c9
-rw-r--r--net/netfilter/nfnetlink_cthelper.c18
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c34
-rw-r--r--net/netfilter/nfnetlink_hook.c377
-rw-r--r--net/netfilter/nfnetlink_log.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/netfilter/nft_chain_filter.c26
-rw-r--r--net/netfilter/nft_chain_nat.c4
-rw-r--r--net/netfilter/nft_chain_route.c4
-rw-r--r--net/netfilter/nft_compat.c45
-rw-r--r--net/netfilter/nft_ct.c2
-rw-r--r--net/netfilter/nft_exthdr.c67
-rw-r--r--net/netfilter/nft_flow_offload.c2
-rw-r--r--net/netfilter/nft_last.c87
-rw-r--r--net/netfilter/nft_lookup.c35
-rw-r--r--net/netfilter/nft_objref.c4
-rw-r--r--net/netfilter/nft_osf.c5
-rw-r--r--net/netfilter/nft_payload.c10
-rw-r--r--net/netfilter/nft_reject_inet.c4
-rw-r--r--net/netfilter/nft_set_bitmap.c5
-rw-r--r--net/netfilter/nft_set_hash.c17
-rw-r--r--net/netfilter/nft_set_pipapo.c4
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c15
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.h2
-rw-r--r--net/netfilter/nft_set_rbtree.c5
-rw-r--r--net/netfilter/nft_synproxy.c4
-rw-r--r--net/netfilter/nft_tproxy.c13
-rw-r--r--net/netfilter/xt_AUDIT.c2
-rw-r--r--net/netfilter/xt_CT.c1
-rw-r--r--net/netfilter/xt_limit.c46
-rw-r--r--net/netlabel/netlabel_calipso.c4
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_domainhash.c2
-rw-r--r--net/netlabel/netlabel_kapi.c2
-rw-r--r--net/netlabel/netlabel_mgmt.c27
-rw-r--r--net/netlabel/netlabel_unlabeled.c10
-rw-r--r--net/netlabel/netlabel_user.h4
-rw-r--r--net/netlink/af_netlink.c14
-rw-r--r--net/netrom/nr_route.c4
-rw-r--r--net/nfc/hci/command.c2
-rw-r--r--net/nfc/hci/core.c2
-rw-r--r--net/nfc/hci/llc_shdlc.c2
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/nfc/nci/core.c1
-rw-r--r--net/nfc/nci/hci.c7
-rw-r--r--net/nfc/rawsock.c4
-rw-r--r--net/openvswitch/Makefile3
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/conntrack.c11
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/openvswitch/meter.c8
-rw-r--r--net/openvswitch/openvswitch_trace.c10
-rw-r--r--net/openvswitch/openvswitch_trace.h158
-rw-r--r--net/packet/af_packet.c63
-rw-r--r--net/qrtr/ns.c4
-rw-r--r--net/qrtr/qrtr.c4
-rw-r--r--net/rds/connection.c23
-rw-r--r--net/rds/ib_ring.c2
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/tcp.c4
-rw-r--r--net/rds/tcp.h3
-rw-r--r--net/rds/tcp_connect.c1
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_recv.c2
-rw-r--r--net/rds/threads.c2
-rw-r--r--net/rxrpc/af_rxrpc.c1
-rw-r--r--net/rxrpc/local_event.c2
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_ct.c31
-rw-r--r--net/sched/act_vlan.c11
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/cls_bpf.c3
-rw-r--r--net/sched/cls_rsvp.h2
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/ematch.c2
-rw-r--r--net/sched/sch_cake.c18
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_fq_pie.c19
-rw-r--r--net/sched/sch_generic.c89
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_htb.c47
-rw-r--r--net/sched/sch_qfq.c8
-rw-r--r--net/sched/sch_taprio.c88
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/bind_addr.c19
-rw-r--r--net/sctp/debug.c1
-rw-r--r--net/sctp/input.c145
-rw-r--r--net/sctp/ipv6.c121
-rw-r--r--net/sctp/output.c33
-rw-r--r--net/sctp/outqueue.c13
-rw-r--r--net/sctp/protocol.c29
-rw-r--r--net/sctp/sm_make_chunk.c73
-rw-r--r--net/sctp/sm_sideeffect.c37
-rw-r--r--net/sctp/sm_statefuns.c70
-rw-r--r--net/sctp/sm_statetable.c43
-rw-r--r--net/sctp/socket.c124
-rw-r--r--net/sctp/sysctl.c37
-rw-r--r--net/sctp/transport.c150
-rw-r--r--net/smc/Makefile2
-rw-r--r--net/smc/af_smc.c104
-rw-r--r--net/smc/smc_core.c28
-rw-r--r--net/smc/smc_ism.c12
-rw-r--r--net/smc/smc_netlink.c11
-rw-r--r--net/smc/smc_netlink.h2
-rw-r--r--net/smc/smc_rx.c8
-rw-r--r--net/smc/smc_stats.c413
-rw-r--r--net/smc/smc_stats.h266
-rw-r--r--net/smc/smc_tx.c23
-rw-r--r--net/socket.c65
-rw-r--r--net/strparser/strparser.c2
-rw-r--r--net/sunrpc/clnt.c7
-rw-r--r--net/sunrpc/xprt.c40
-rw-r--r--net/sunrpc/xprtmultipath.c2
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c27
-rw-r--r--net/sunrpc/xprtrdma/transport.c12
-rw-r--r--net/sunrpc/xprtrdma/verbs.c18
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/switchdev/switchdev.c25
-rw-r--r--net/tipc/bcast.c2
-rw-r--r--net/tipc/core.c6
-rw-r--r--net/tipc/core.h10
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/link.c12
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/msg.c36
-rw-r--r--net/tipc/msg.h3
-rw-r--r--net/tipc/name_table.c6
-rw-r--r--net/tipc/name_table.h4
-rw-r--r--net/tipc/net.c15
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/socket.c157
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/tipc/udp_media.c2
-rw-r--r--net/tls/tls_device.c62
-rw-r--r--net/tls/tls_device_fallback.c7
-rw-r--r--net/tls/tls_main.c1
-rw-r--r--net/tls/tls_sw.c16
-rw-r--r--net/unix/af_unix.c199
-rw-r--r--net/vmw_vsock/af_vsock.c470
-rw-r--r--net/vmw_vsock/hyperv_transport.c4
-rw-r--r--net/vmw_vsock/virtio_transport.c30
-rw-r--r--net/vmw_vsock/virtio_transport_common.c178
-rw-r--r--net/vmw_vsock/vmci_transport.c6
-rw-r--r--net/vmw_vsock/vsock_loopback.c12
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/chan.c43
-rw-r--r--net/wireless/core.c63
-rw-r--r--net/wireless/core.h3
-rw-r--r--net/wireless/nl80211.c22
-rw-r--r--net/wireless/pmsr.c28
-rw-r--r--net/wireless/rdev-ops.h12
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/wireless/scan.c22
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/trace.h36
-rw-r--r--net/wireless/util.c10
-rw-r--r--net/wireless/wext-compat.c8
-rw-r--r--net/wireless/wext-spy.c14
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/x25/x25_forward.c8
-rw-r--r--net/x25/x25_link.c5
-rw-r--r--net/x25/x25_route.c15
-rw-r--r--net/xdp/xdp_umem.c7
-rw-r--r--net/xdp/xsk.c6
-rw-r--r--net/xdp/xsk.h4
-rw-r--r--net/xdp/xsk_queue.h11
-rw-r--r--net/xdp/xskmap.c32
-rw-r--r--net/xfrm/xfrm_device.c1
-rw-r--r--net/xfrm/xfrm_hash.h7
-rw-r--r--net/xfrm/xfrm_input.c6
-rw-r--r--net/xfrm/xfrm_interface.c1
-rw-r--r--net/xfrm/xfrm_output.c131
-rw-r--r--net/xfrm/xfrm_policy.c29
-rw-r--r--net/xfrm/xfrm_replay.c171
-rw-r--r--net/xfrm/xfrm_state.c81
-rw-r--r--net/xfrm/xfrm_user.c28
-rw-r--r--samples/bpf/Makefile3
-rw-r--r--samples/bpf/ibumad_kern.c2
-rw-r--r--samples/bpf/ibumad_user.c2
-rw-r--r--samples/bpf/task_fd_query_user.c2
-rw-r--r--samples/bpf/xdp_fwd_user.c2
-rw-r--r--samples/bpf/xdp_redirect_map_multi_kern.c88
-rw-r--r--samples/bpf/xdp_redirect_map_multi_user.c302
-rw-r--r--samples/bpf/xdp_redirect_user.c4
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c3
-rw-r--r--samples/bpf/xdpsock_user.c2
-rw-r--r--samples/kprobes/kprobe_example.c46
-rw-r--r--samples/pktgen/parameters.sh7
-rwxr-xr-xsamples/pktgen/pktgen_sample01_simple.sh2
-rwxr-xr-xsamples/pktgen/pktgen_sample02_multiqueue.sh2
-rwxr-xr-xsamples/pktgen/pktgen_sample03_burst_single_flow.sh2
-rwxr-xr-xsamples/pktgen/pktgen_sample04_many_flows.sh2
-rwxr-xr-xsamples/pktgen/pktgen_sample05_flow_per_thread.sh2
-rwxr-xr-xsamples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh2
-rw-r--r--samples/vfio-mdev/mdpy-fb.c13
-rw-r--r--scripts/Makefile.build5
-rw-r--r--scripts/Makefile.kasan1
-rw-r--r--scripts/Makefile.modfinal2
-rwxr-xr-xscripts/atomic/check-atomics.sh1
-rwxr-xr-xscripts/atomic/gen-atomic-instrumented.sh51
-rwxr-xr-xscripts/atomic/gen-atomics.sh1
-rwxr-xr-xscripts/documentation-file-ref-check2
-rwxr-xr-xscripts/kconfig/streamline_config.pl80
-rwxr-xr-xscripts/kernel-doc71
-rwxr-xr-xscripts/link-vmlinux.sh6
-rw-r--r--scripts/recordmcount.h15
-rw-r--r--scripts/spelling.txt16
-rwxr-xr-xscripts/sphinx-pre-install262
-rwxr-xr-xscripts/tools-support-relr.sh3
-rw-r--r--security/integrity/evm/evm.h1
-rw-r--r--security/integrity/evm/evm_crypto.c58
-rw-r--r--security/integrity/evm/evm_main.c376
-rw-r--r--security/integrity/evm/evm_secfs.c31
-rw-r--r--security/integrity/iint.c4
-rw-r--r--security/integrity/ima/Kconfig7
-rw-r--r--security/integrity/ima/ima_appraise.c44
-rw-r--r--security/integrity/ima/ima_asymmetric_keys.c1
-rw-r--r--security/integrity/ima/ima_crypto.c4
-rw-r--r--security/integrity/ima/ima_fs.c6
-rw-r--r--security/integrity/ima/ima_init.c4
-rw-r--r--security/integrity/ima/ima_kexec.c1
-rw-r--r--security/integrity/ima/ima_main.c2
-rw-r--r--security/integrity/ima/ima_queue.c5
-rw-r--r--security/integrity/ima/ima_template.c30
-rw-r--r--security/integrity/ima/ima_template_lib.c211
-rw-r--r--security/integrity/ima/ima_template_lib.h16
-rw-r--r--security/lsm_audit.c1
-rw-r--r--security/safesetid/lsm.c2
-rw-r--r--security/safesetid/lsm.h2
-rw-r--r--security/security.c8
-rw-r--r--security/selinux/avc.c61
-rw-r--r--security/selinux/hooks.c22
-rw-r--r--security/selinux/include/avc.h13
-rw-r--r--security/selinux/include/xfrm.h2
-rw-r--r--security/selinux/ss/avtab.c28
-rw-r--r--security/selinux/ss/avtab.h16
-rw-r--r--security/selinux/ss/conditional.c14
-rw-r--r--security/selinux/ss/policydb.c1
-rw-r--r--security/selinux/ss/services.c27
-rw-r--r--security/selinux/xfrm.c2
-rw-r--r--security/smack/smack_access.c10
-rw-r--r--security/smack/smackfs.c4
-rw-r--r--security/tomoyo/audit.c1
-rw-r--r--security/tomoyo/securityfs_if.c1
-rw-r--r--security/tomoyo/tomoyo.c12
-rw-r--r--security/tomoyo/util.c4
-rw-r--r--sound/core/control_led.c33
-rw-r--r--sound/core/seq/seq_timer.c10
-rw-r--r--sound/core/timer.c3
-rw-r--r--sound/firewire/amdtp-stream.c2
-rw-r--r--sound/hda/intel-dsp-config.c4
-rw-r--r--sound/pci/hda/hda_codec.c5
-rw-r--r--sound/pci/hda/hda_generic.c1
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_cirrus.c7
-rw-r--r--sound/pci/hda/patch_realtek.c63
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c10
-rw-r--r--sound/soc/amd/raven/acp3x.h1
-rw-r--r--sound/soc/amd/raven/pci-acp3x.c15
-rw-r--r--sound/soc/codecs/ak5558.c2
-rw-r--r--sound/soc/codecs/cs35l32.c3
-rw-r--r--sound/soc/codecs/cs35l33.c1
-rw-r--r--sound/soc/codecs/cs35l34.c3
-rw-r--r--sound/soc/codecs/cs42l42.c3
-rw-r--r--sound/soc/codecs/cs42l56.c7
-rw-r--r--sound/soc/codecs/cs42l73.c3
-rw-r--r--sound/soc/codecs/cs53l30.c3
-rw-r--r--sound/soc/codecs/da7219.c5
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c1
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c1
-rw-r--r--sound/soc/codecs/max98088.c13
-rw-r--r--sound/soc/codecs/rt5645.c49
-rw-r--r--sound/soc/codecs/rt5659.c26
-rw-r--r--sound/soc/codecs/rt5682-sdw.c3
-rw-r--r--sound/soc/codecs/rt711-sdca.c4
-rw-r--r--sound/soc/codecs/sti-sas.c1
-rw-r--r--sound/soc/codecs/tas2562.h14
-rw-r--r--sound/soc/fsl/Kconfig1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c1
-rw-r--r--sound/soc/generic/audio-graph-card.c57
-rw-r--r--sound/soc/generic/simple-card.c168
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c25
-rw-r--r--sound/soc/pxa/pxa-ssp.c16
-rw-r--r--sound/soc/qcom/lpass-cpu.c91
-rw-r--r--sound/soc/qcom/lpass.h4
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/soc/soc-topology.c6
-rw-r--r--sound/soc/sof/intel/hda-dai.c5
-rw-r--r--sound/soc/sof/pm.c1
-rw-r--r--sound/soc/stm/stm32_sai_sub.c5
-rw-r--r--sound/usb/format.c2
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--sound/usb/mixer_scarlett_gen2.c81
-rw-r--r--sound/usb/mixer_scarlett_gen2.h2
-rw-r--r--tools/arch/mips/include/uapi/asm/perf_regs.h40
-rw-r--r--tools/arch/x86/include/asm/asm.h193
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h7
-rw-r--r--tools/arch/x86/include/asm/irq_vectors.h7
-rw-r--r--tools/arch/x86/include/asm/nops.h24
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h2
-rw-r--r--tools/bootconfig/include/linux/bootconfig.h4
-rw-r--r--tools/bootconfig/main.c1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst2
-rw-r--r--tools/bpf/bpftool/Makefile5
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool6
-rw-r--r--tools/bpf/bpftool/cgroup.c3
-rw-r--r--tools/bpf/bpftool/gen.c421
-rw-r--r--tools/bpf/bpftool/main.c11
-rw-r--r--tools/bpf/bpftool/main.h1
-rw-r--r--tools/bpf/bpftool/prog.c109
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c3
-rw-r--r--tools/bpf/resolve_btfids/main.c3
-rwxr-xr-xtools/debugging/kernel-chktaint2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h3
-rw-r--r--tools/include/uapi/linux/bpf.h82
-rw-r--r--tools/include/uapi/linux/fs.h2
-rw-r--r--tools/include/uapi/linux/in.h3
-rw-r--r--tools/include/uapi/linux/kvm.h5
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/include/uapi/linux/prctl.h8
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile18
-rw-r--r--tools/lib/bpf/bpf.c179
-rw-r--r--tools/lib/bpf/bpf.h2
-rw-r--r--tools/lib/bpf/bpf_gen_internal.h41
-rw-r--r--tools/lib/bpf/bpf_helpers.h66
-rw-r--r--tools/lib/bpf/bpf_prog_linfo.c18
-rw-r--r--tools/lib/bpf/bpf_tracing.h108
-rw-r--r--tools/lib/bpf/btf.c302
-rw-r--r--tools/lib/bpf/btf_dump.c14
-rw-r--r--tools/lib/bpf/gen_loader.c729
-rw-r--r--tools/lib/bpf/libbpf.c965
-rw-r--r--tools/lib/bpf/libbpf.h68
-rw-r--r--tools/lib/bpf/libbpf.map13
-rw-r--r--tools/lib/bpf/libbpf_errno.c7
-rw-r--r--tools/lib/bpf/libbpf_internal.h66
-rw-r--r--tools/lib/bpf/libbpf_legacy.h59
-rw-r--r--tools/lib/bpf/linker.c41
-rw-r--r--tools/lib/bpf/netlink.c572
-rw-r--r--tools/lib/bpf/nlattr.c2
-rw-r--r--tools/lib/bpf/nlattr.h60
-rw-r--r--tools/lib/bpf/ringbuf.c26
-rw-r--r--tools/lib/bpf/skel_internal.h123
-rw-r--r--tools/lib/bpf/xsk.c2
-rw-r--r--tools/lib/traceevent/plugins/plugin_kvm.c4
-rw-r--r--tools/objtool/arch/x86/decode.c6
-rw-r--r--tools/objtool/arch/x86/include/arch/special.h1
-rw-r--r--tools/objtool/check.c38
-rw-r--r--tools/objtool/elf.c135
-rw-r--r--tools/objtool/include/objtool/elf.h18
-rw-r--r--tools/objtool/include/objtool/objtool.h3
-rw-r--r--tools/objtool/include/objtool/special.h1
-rw-r--r--tools/objtool/special.c14
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt6
-rw-r--r--tools/perf/Documentation/perf-script.txt7
-rw-r--r--tools/perf/Makefile.config1
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl2
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--tools/perf/builtin-buildid-list.c3
-rw-r--r--tools/perf/builtin-record.c6
-rw-r--r--tools/perf/builtin-stat.c13
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/perf.c4
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/cache.json30
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/floating_point.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/frontend.json124
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/locks.json4
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/marked.json61
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/memory.json79
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/others.json133
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pipeline.json135
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pmc.json8
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/translation.json22
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py12
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/pfm.c4
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh4
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h2
-rw-r--r--tools/perf/util/bpf_counter.c10
-rw-r--r--tools/perf/util/dwarf-aux.c8
-rw-r--r--tools/perf/util/env.c1
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c3
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c6
-rw-r--r--tools/perf/util/intel-pt.c6
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/metricgroup.c14
-rw-r--r--tools/perf/util/parse-events.c13
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/perf_api_probe.c10
-rw-r--r--tools/perf/util/perf_api_probe.h1
-rw-r--r--tools/perf/util/pfm.c11
-rw-r--r--tools/perf/util/probe-finder.c3
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/stat-display.c8
-rw-r--r--tools/perf/util/symbol-elf.c1
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c18
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c15
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst.h2
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/arm64/fp/sve-probe-vls.c2
-rw-r--r--tools/testing/selftests/bpf/.gitignore4
-rw-r--r--tools/testing/selftests/bpf/Makefile19
-rw-r--r--tools/testing/selftests/bpf/Makefile.docs3
-rw-r--r--tools/testing/selftests/bpf/README.rst19
-rw-r--r--tools/testing/selftests/bpf/bench.c1
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_rename.c2
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_ringbufs.c6
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c2
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c93
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_write.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c84
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_link.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/check_mtu.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_sleep.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/hashmap.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_btf.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_module.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/link_pinning.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c288
-rw-r--r--tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c559
-rw-r--r--tools/testing/selftests/bpf/prog_tests/obj_name.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_branches.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_buffer.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rdonly_maps.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/resolve_btfids.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c57
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c53
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skeleton.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_fields.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/static_linked.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/syscall.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_bpf.c395
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c785
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_overhead.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_printk.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/udp_limit.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_link.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_netlink.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_file.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp4.c1
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp6.c1
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c4
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps1.c2
-rw-r--r--tools/testing/selftests/bpf/progs/syscall.c121
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall4.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall5.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_check_mtu.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_args.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_and_delete.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_migrate_reuseport.c135
-rw-r--r--tools/testing/selftests/bpf/progs/test_rdonly_maps.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_skeleton.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf_single.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked1.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked2.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_bpf.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh.c33
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_peer.c56
-rw-r--r--tools/testing/selftests/bpf/progs/trace_printk.c6
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c94
-rwxr-xr-xtools/testing/selftests/bpf/test_doc_build.sh1
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c8
-rw-r--r--tools/testing/selftests/bpf/test_maps.c185
-rw-r--r--tools/testing/selftests/bpf/test_progs.c3
-rw-r--r--tools/testing/selftests/bpf/test_progs.h9
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_redirect.sh216
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c7
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect_multi.sh204
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c10
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c15
-rw-r--r--tools/testing/selftests/bpf/xdp_redirect_multi.c226
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh3
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/port_scale.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh69
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh14
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh24
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/router_scale.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_sample.sh12
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh167
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh14
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/nexthop.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/psample.sh4
-rw-r--r--tools/testing/selftests/futex/functional/.gitignore2
-rw-r--r--tools/testing/selftests/futex/functional/Makefile7
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue.c136
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait.c171
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c126
-rwxr-xr-xtools/testing/selftests/futex/functional/run.sh6
-rw-r--r--tools/testing/selftests/kvm/.gitignore8
-rw-r--r--tools/testing/selftests/kvm/Makefile16
-rw-r--r--tools/testing/selftests/kvm/aarch64/debug-exceptions.c250
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list-sve.c3
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c439
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c174
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c5
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c34
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h83
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h58
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h12
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/apic.h91
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/evmcs.h (renamed from tools/testing/selftests/kvm/include/evmcs.h)2
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/hyperv.h185
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h66
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h11
-rw-r--r--tools/testing/selftests/kvm/kvm_binary_stats_test.c237
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c4
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/handlers.S126
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c131
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c2
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c6
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c404
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h17
-rw-r--r--tools/testing/selftests/kvm/lib/perf_test_util.c8
-rw-r--r--tools/testing/selftests/kvm/lib/rbtree.c1
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c17
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c51
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/apic.c45
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c368
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c9
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c52
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c18
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c1037
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c6
-rw-r--r--tools/testing/selftests/kvm/steal_time.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/emulator_error_test.c219
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c79
-rw-r--r--tools/testing/selftests/kvm/x86_64/get_cpuid_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/get_msr_index_features.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_clock.c10
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c649
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_pv_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmu_role_test.c147
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c10
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c7
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c242
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c65
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c2
-rw-r--r--tools/testing/selftests/lib/Makefile2
-rw-r--r--tools/testing/selftests/lib/config1
-rwxr-xr-xtools/testing/selftests/lib/scanf.sh4
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c88
-rw-r--r--tools/testing/selftests/nci/.gitignore1
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/config1
-rwxr-xr-xtools/testing/selftests/net/devlink_port_split.py8
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh12
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh25
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh364
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh32
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh456
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh458
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_dsfield.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_l4port.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/skbedit_priority.sh2
-rwxr-xr-xtools/testing/selftests/net/icmp.sh74
-rwxr-xr-xtools/testing/selftests/net/icmp_redirect.sh8
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c125
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh83
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh180
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh4
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh3
-rw-r--r--tools/testing/selftests/net/so_netns_cookie.c61
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh573
-rw-r--r--tools/testing/selftests/net/tls.c87
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh2
-rwxr-xr-xtools/testing/selftests/net/unicast_extensions.sh17
-rwxr-xr-xtools/testing/selftests/net/veth.sh5
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh221
-rw-r--r--tools/testing/selftests/openat2/openat2_test.c7
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/rlimits/.gitignore2
-rw-r--r--tools/testing/selftests/rlimits/Makefile6
-rw-r--r--tools/testing/selftests/rlimits/config1
-rw-r--r--tools/testing/selftests/rlimits/rlimits-per-userns.c161
-rw-r--r--tools/testing/selftests/sched/.gitignore1
-rw-r--r--tools/testing/selftests/sched/Makefile14
-rw-r--r--tools/testing/selftests/sched/config1
-rw-r--r--tools/testing/selftests/sched/cs_prctl_test.c338
-rw-r--r--tools/testing/selftests/seccomp/seccomp_benchmark.c10
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c51
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py42
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ct.json45
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json28
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json8
-rw-r--r--tools/testing/selftests/vm/gup_test.c96
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh1
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config1
-rw-r--r--tools/testing/selftests/x86/syscall_numbering.c491
-rw-r--r--tools/testing/vsock/util.c32
-rw-r--r--tools/testing/vsock/util.h3
-rw-r--r--tools/testing/vsock/vsock_test.c116
-rw-r--r--tools/vm/page_owner_sort.c4
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/binary_stats.c146
-rw-r--r--virt/kvm/kvm_main.c352
-rw-r--r--virt/lib/irqbypass.c16
5707 files changed, 213905 insertions, 131343 deletions
diff --git a/.clang-format b/.clang-format
index c24b147cac01..15d4eaabc6b5 100644
--- a/.clang-format
+++ b/.clang-format
@@ -109,8 +109,8 @@ ForEachMacros:
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- - 'cxl_for_each_cmd'
- 'device_for_each_child_node'
+ - 'displayid_iter_for_each'
- 'dma_fence_chain_for_each'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
@@ -136,6 +136,7 @@ ForEachMacros:
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
+ - 'for_each_acpi_dev_match'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
@@ -171,7 +172,6 @@ ForEachMacros:
- 'for_each_dapm_widgets'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- - 'for_each_displayid_db'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
@@ -179,6 +179,7 @@ ForEachMacros:
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
+ - 'for_each_dtpm_table'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
@@ -215,6 +216,7 @@ ForEachMacros:
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
+ - 'for_each_msi_vector'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_netdev'
@@ -270,6 +272,12 @@ ForEachMacros:
- 'for_each_prime_number_from'
- 'for_each_process'
- 'for_each_process_thread'
+ - 'for_each_prop_codec_conf'
+ - 'for_each_prop_dai_codec'
+ - 'for_each_prop_dai_cpu'
+ - 'for_each_prop_dlc_codecs'
+ - 'for_each_prop_dlc_cpus'
+ - 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
- 'for_each_requested_gpio'
@@ -430,6 +438,7 @@ ForEachMacros:
- 'queue_for_each_hw_ctx'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
+ - 'rb_for_each'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
diff --git a/.mailmap b/.mailmap
index ce6c497767e2..a35ae244dfda 100644
--- a/.mailmap
+++ b/.mailmap
@@ -102,6 +102,7 @@ Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
Filipe Lautert <filipe@icewall.org>
+Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
Franck Bui-Huu <vagabon.xyz@gmail.com>
Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
@@ -212,6 +213,8 @@ Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
+Marek Behún <kabel@kernel.org> <marek.behun@nic.cz>
+Marek Behún <kabel@kernel.org> Marek Behun <marek.behun@nic.cz>
Mark Brown <broonie@sirena.org.uk>
Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
@@ -243,6 +246,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
+Michel Lespinasse <michel@lespinasse.org>
+Michel Lespinasse <michel@lespinasse.org> <walken@google.com>
+Michel Lespinasse <michel@lespinasse.org> <walken@zoy.org>
Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
diff --git a/Documentation/ABI/obsolete/sysfs-cpuidle b/Documentation/ABI/obsolete/sysfs-cpuidle
index e398fb5e542f..972cc11d3434 100644
--- a/Documentation/ABI/obsolete/sysfs-cpuidle
+++ b/Documentation/ABI/obsolete/sysfs-cpuidle
@@ -6,4 +6,4 @@ Description:
with the update that cpuidle governor can be changed at runtime in default,
both current_governor and current_governor_ro co-exist under
/sys/devices/system/cpu/cpuidle/ file, it's duplicate so make
- current_governor_ro obselete.
+ current_governor_ro obsolete.
diff --git a/Documentation/ABI/removed/sysfs-kernel-uids b/Documentation/ABI/removed/sysfs-kernel-uids
index dc4463f190a7..85a90b86ce1e 100644
--- a/Documentation/ABI/removed/sysfs-kernel-uids
+++ b/Documentation/ABI/removed/sysfs-kernel-uids
@@ -5,7 +5,7 @@ Contact: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Description:
The /sys/kernel/uids/<uid>/cpu_shares tunable is used
to set the cpu bandwidth a user is allowed. This is a
- propotional value. What that means is that if there
+ proportional value. What that means is that if there
are two users logged in, each with an equal number of
shares, then they will get equal CPU bandwidth. Another
example would be, if User A has shares = 1024 and user
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index 42599d9fa161..3066feae1d8d 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -61,7 +61,7 @@ Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Directory for per-channel information
- NN is the VMBUS relid associtated with the channel.
+ NN is the VMBUS relid associated with the channel.
What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/cpu
Date: September. 2017
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend
index e8b60bd766f7..480a89edfa05 100644
--- a/Documentation/ABI/stable/sysfs-bus-xen-backend
+++ b/Documentation/ABI/stable/sysfs-bus-xen-backend
@@ -19,7 +19,7 @@ Date: April 2011
KernelVersion: 3.0
Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Description:
- The major:minor number (in hexidecimal) of the
+ The major:minor number (in hexadecimal) of the
physical device providing the storage for this backend
block device.
diff --git a/Documentation/ABI/stable/sysfs-devices-system-cpu b/Documentation/ABI/stable/sysfs-devices-system-cpu
index 33c133e2a631..516dafea03eb 100644
--- a/Documentation/ABI/stable/sysfs-devices-system-cpu
+++ b/Documentation/ABI/stable/sysfs-devices-system-cpu
@@ -23,3 +23,86 @@ Description: Default value for the Data Stream Control Register (DSCR) on
here).
If set by a process it will be inherited by child processes.
Values: 64 bit unsigned integer (bit field)
+
+What: /sys/devices/system/cpu/cpuX/topology/physical_package_id
+Description: physical package id of cpuX. Typically corresponds to a physical
+ socket number, but the actual value is architecture and platform
+ dependent.
+Values: integer
+
+What: /sys/devices/system/cpu/cpuX/topology/die_id
+Description: the CPU die ID of cpuX. Typically it is the hardware platform's
+ identifier (rather than the kernel's). The actual value is
+ architecture and platform dependent.
+Values: integer
+
+What: /sys/devices/system/cpu/cpuX/topology/core_id
+Description: the CPU core ID of cpuX. Typically it is the hardware platform's
+ identifier (rather than the kernel's). The actual value is
+ architecture and platform dependent.
+Values: integer
+
+What: /sys/devices/system/cpu/cpuX/topology/book_id
+Description: the book ID of cpuX. Typically it is the hardware platform's
+ identifier (rather than the kernel's). The actual value is
+ architecture and platform dependent. it's only used on s390.
+Values: integer
+
+What: /sys/devices/system/cpu/cpuX/topology/drawer_id
+Description: the drawer ID of cpuX. Typically it is the hardware platform's
+ identifier (rather than the kernel's). The actual value is
+ architecture and platform dependent. it's only used on s390.
+Values: integer
+
+What: /sys/devices/system/cpu/cpuX/topology/core_cpus
+Description: internal kernel map of CPUs within the same core.
+ (deprecated name: "thread_siblings")
+Values: hexadecimal bitmask.
+
+What: /sys/devices/system/cpu/cpuX/topology/core_cpus_list
+Description: human-readable list of CPUs within the same core.
+ The format is like 0-3, 8-11, 14,17.
+ (deprecated name: "thread_siblings_list").
+Values: decimal list.
+
+What: /sys/devices/system/cpu/cpuX/topology/package_cpus
+Description: internal kernel map of the CPUs sharing the same physical_package_id.
+ (deprecated name: "core_siblings").
+Values: hexadecimal bitmask.
+
+What: /sys/devices/system/cpu/cpuX/topology/package_cpus_list
+Description: human-readable list of CPUs sharing the same physical_package_id.
+ The format is like 0-3, 8-11, 14,17.
+ (deprecated name: "core_siblings_list")
+Values: decimal list.
+
+What: /sys/devices/system/cpu/cpuX/topology/die_cpus
+Description: internal kernel map of CPUs within the same die.
+Values: hexadecimal bitmask.
+
+What: /sys/devices/system/cpu/cpuX/topology/die_cpus_list
+Description: human-readable list of CPUs within the same die.
+ The format is like 0-3, 8-11, 14,17.
+Values: decimal list.
+
+What: /sys/devices/system/cpu/cpuX/topology/book_siblings
+Description: internal kernel map of cpuX's hardware threads within the same
+ book_id. it's only used on s390.
+Values: hexadecimal bitmask.
+
+What: /sys/devices/system/cpu/cpuX/topology/book_siblings_list
+Description: human-readable list of cpuX's hardware threads within the same
+ book_id.
+ The format is like 0-3, 8-11, 14,17. it's only used on s390.
+Values: decimal list.
+
+What: /sys/devices/system/cpu/cpuX/topology/drawer_siblings
+Description: internal kernel map of cpuX's hardware threads within the same
+ drawer_id. it's only used on s390.
+Values: hexadecimal bitmask.
+
+What: /sys/devices/system/cpu/cpuX/topology/drawer_siblings_list
+Description: human-readable list of cpuX's hardware threads within the same
+ drawer_id.
+ The format is like 0-3, 8-11, 14,17. it's only used on s390.
+Values: decimal list.
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index 55285c136cf0..d431e2d00472 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -173,7 +173,7 @@ What: /sys/bus/dsa/devices/wq<m>.<n>/priority
Date: Oct 25, 2019
KernelVersion: 5.6.0
Contact: dmaengine@vger.kernel.org
-Description: The priority value of this work queue, it is a vlue relative to
+Description: The priority value of this work queue, it is a value relative to
other work queue in the same group to control quality of service
for dispatching work from multiple workqueues in the same group.
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index fd9a8045bb0c..b2553df2e786 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -137,7 +137,7 @@ Contact: Vadim Pasternak <vadimpmellanox.com>
Description: These files show the system reset cause, as following:
COMEX thermal shutdown; wathchdog power off or reset was derived
by one of the next components: COMEX, switch board or by Small Form
- Factor mezzanine, reset requested from ASIC, reset cuased by BIOS
+ Factor mezzanine, reset requested from ASIC, reset caused by BIOS
reload. Value 1 in file means this is reset cause, 0 - otherwise.
Only one of the above causes could be 1 at the same time, representing
only last reset cause.
@@ -183,7 +183,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/vpd_wp
Date: January 2020
KernelVersion: 5.6
Contact: Vadim Pasternak <vadimpmellanox.com>
-Description: This file allows to overwrite system VPD hardware wrtie
+Description: This file allows to overwrite system VPD hardware write
protection when attribute is set 1.
The file is read/write.
diff --git a/Documentation/ABI/testing/configfs-iio b/Documentation/ABI/testing/configfs-iio
index aebda53ec0f7..1637fcb50f56 100644
--- a/Documentation/ABI/testing/configfs-iio
+++ b/Documentation/ABI/testing/configfs-iio
@@ -31,4 +31,4 @@ Date: April 2016
KernelVersion: 4.7
Description:
Dummy IIO devices directory. Creating a directory here will result
- in creating a dummy IIO device in the IIO subystem.
+ in creating a dummy IIO device in the IIO subsystem.
diff --git a/Documentation/ABI/testing/configfs-most b/Documentation/ABI/testing/configfs-most
index bc6b8bd18da4..0a4b8649aa5a 100644
--- a/Documentation/ABI/testing/configfs-most
+++ b/Documentation/ABI/testing/configfs-most
@@ -20,7 +20,7 @@ Description:
subbuffer_size
configure the sub-buffer size for this channel
- (needed for synchronous and isochrnous data)
+ (needed for synchronous and isochronous data)
num_buffers
@@ -75,7 +75,7 @@ Description:
subbuffer_size
configure the sub-buffer size for this channel
- (needed for synchronous and isochrnous data)
+ (needed for synchronous and isochronous data)
num_buffers
@@ -130,7 +130,7 @@ Description:
subbuffer_size
configure the sub-buffer size for this channel
- (needed for synchronous and isochrnous data)
+ (needed for synchronous and isochronous data)
num_buffers
@@ -196,7 +196,7 @@ Description:
subbuffer_size
configure the sub-buffer size for this channel
- (needed for synchronous and isochrnous data)
+ (needed for synchronous and isochronous data)
num_buffers
diff --git a/Documentation/ABI/testing/configfs-usb-gadget b/Documentation/ABI/testing/configfs-usb-gadget
index dc351e9af80a..b7943aa7e997 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget
+++ b/Documentation/ABI/testing/configfs-usb-gadget
@@ -137,7 +137,7 @@ Description:
This group contains "OS String" extension handling attributes.
============= ===============================================
- use flag turning "OS Desctiptors" support on/off
+ use flag turning "OS Descriptors" support on/off
b_vendor_code one-byte value used for custom per-device and
per-interface requests
qw_sign an identifier to be reported as "OS String"
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
index ac5e11af79a8..889ed45be4ca 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
@@ -170,7 +170,7 @@ Description: Default color matching descriptors
bMatrixCoefficients matrix used to compute luma and
chroma values from the color primaries
bTransferCharacteristics optoelectronic transfer
- characteristic of the source picutre,
+ characteristic of the source picture,
also called the gamma function
bColorPrimaries color primaries and the reference
white
@@ -311,7 +311,7 @@ Description: Specific streaming header descriptors
a hardware trigger interrupt event
bTriggerSupport flag specifying if hardware
triggering is supported
- bStillCaptureMethod method of still image caputre
+ bStillCaptureMethod method of still image capture
supported
bTerminalLink id of the output terminal to which
the video endpoint of this interface
diff --git a/Documentation/ABI/testing/debugfs-driver-genwqe b/Documentation/ABI/testing/debugfs-driver-genwqe
index 1c2f25674e8c..b45b016545d8 100644
--- a/Documentation/ABI/testing/debugfs-driver-genwqe
+++ b/Documentation/ABI/testing/debugfs-driver-genwqe
@@ -31,7 +31,7 @@ What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_regs
Date: Oct 2013
Contact: haver@linux.vnet.ibm.com
Description: Dump of the error registers before the last reset of
- the card occured.
+ the card occurred.
Only available for PF.
What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_dbg_uid0
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
index c78fc9282876..e89c6351503c 100644
--- a/Documentation/ABI/testing/debugfs-driver-habanalabs
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -153,7 +153,7 @@ KernelVersion: 5.1
Contact: ogabbay@kernel.org
Description: Triggers an I2C transaction that is generated by the device's
CPU. Writing to this file generates a write transaction while
- reading from the file generates a read transcation
+ reading from the file generates a read transaction
What: /sys/kernel/debug/habanalabs/hl<n>/i2c_reg
Date: Jan 2019
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
index 3c477ba48a31..553fd8a33e56 100644
--- a/Documentation/ABI/testing/evm
+++ b/Documentation/ABI/testing/evm
@@ -24,7 +24,7 @@ Description:
1 Enable digital signature validation
2 Permit modification of EVM-protected metadata at
runtime. Not supported if HMAC validation and
- creation is enabled.
+ creation is enabled (deprecated).
31 Disable further runtime modification of EVM policy
=== ==================================================
@@ -47,10 +47,38 @@ Description:
will enable digital signature validation, permit
modification of EVM-protected metadata and
- disable all further modification of policy
+ disable all further modification of policy. This option is now
+ deprecated in favor of::
- Note that once a key has been loaded, it will no longer be
- possible to enable metadata modification.
+ echo 0x80000002 ><securityfs>/evm
+
+ as the outstanding issues that prevent the usage of EVM portable
+ signatures have been solved.
+
+ Echoing a value is additive, the new value is added to the
+ existing initialization flags.
+
+ For example, after::
+
+ echo 2 ><securityfs>/evm
+
+ another echo can be performed::
+
+ echo 1 ><securityfs>/evm
+
+ and the resulting value will be 3.
+
+ Note that once an HMAC key has been loaded, it will no longer
+ be possible to enable metadata modification. Signaling that an
+ HMAC key has been loaded will clear the corresponding flag.
+ For example, if the current value is 6 (2 and 4 set)::
+
+ echo 1 ><securityfs>/evm
+
+ will set the new value to 3 (4 cleared).
+
+ Loading an HMAC key is the only way to disable metadata
+ modification.
Until key loading has been signaled EVM can not create
or validate the 'security.evm' xattr, but returns
diff --git a/Documentation/ABI/testing/sysfs-bus-fsi b/Documentation/ABI/testing/sysfs-bus-fsi
index d148214181a1..76e0caa0c2b3 100644
--- a/Documentation/ABI/testing/sysfs-bus-fsi
+++ b/Documentation/ABI/testing/sysfs-bus-fsi
@@ -12,7 +12,7 @@ KernelVersion: 4.12
Contact: linux-fsi@lists.ozlabs.org
Description:
Sends an FSI BREAK command on a master's communication
- link to any connnected slaves. A BREAK resets connected
+ link to any connected slaves. A BREAK resets connected
device's logic and preps it to receive further commands
from the master.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 267973541e72..6f98b6a9b785 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -786,7 +786,7 @@ What: /sys/.../events/in_capacitanceY_adaptive_thresh_rising_en
What: /sys/.../events/in_capacitanceY_adaptive_thresh_falling_en
KernelVersion: 5.13
Contact: linux-iio@vger.kernel.org
-Descrption:
+Description:
Adaptive thresholds are similar to normal fixed thresholds
but the value is expressed as an offset from a value which
provides a low frequency approximation of the channel itself.
@@ -798,10 +798,10 @@ What: /sys/.../in_capacitanceY_adaptive_thresh_rising_timeout
What: /sys/.../in_capacitanceY_adaptive_thresh_falling_timeout
KernelVersion: 5.11
Contact: linux-iio@vger.kernel.org
-Descrption:
+Description:
When adaptive thresholds are used, the tracking signal
may adjust too slowly to step changes in the raw signal.
- *_timeout (in seconds) specifies a time for which the
+ Thus these specify the time in seconds for which the
difference between the slow tracking signal and the raw
signal is allowed to remain out-of-range before a reset
event occurs in which the tracking signal is made equal
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index ef00fada2efb..793cbb76cd25 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -139,8 +139,8 @@ Description:
binary file containing the Vital Product Data for the
device. It should follow the VPD format defined in
PCI Specification 2.1 or 2.2, but users should consider
- that some devices may have malformatted data. If the
- underlying VPD has a writable section then the
+ that some devices may have incorrectly formatted data.
+ If the underlying VPD has a writable section then the
corresponding section of this file will be writable.
What: /sys/bus/pci/devices/.../virtfnN
diff --git a/Documentation/ABI/testing/sysfs-class-backlight b/Documentation/ABI/testing/sysfs-class-backlight
index 1fc86401bf95..c453646b06e2 100644
--- a/Documentation/ABI/testing/sysfs-class-backlight
+++ b/Documentation/ABI/testing/sysfs-class-backlight
@@ -84,3 +84,103 @@ Description:
It can be enabled by writing the value stored in
/sys/class/backlight/<backlight>/max_brightness to
/sys/class/backlight/<backlight>/brightness.
+
+What: /sys/class/backlight/<backlight>/<ambient light zone>_max
+Date: Sep, 2009
+KernelVersion: v2.6.32
+Contact: device-drivers-devel@blackfin.uclinux.org
+Description:
+ Control the maximum brightness for <ambient light zone>
+ on this <backlight>. Values are between 0 and 127. This file
+ will also show the brightness level stored for this
+ <ambient light zone>.
+
+ The <ambient light zone> is device-driver specific:
+
+ For ADP5520 and ADP5501, <ambient light zone> can be:
+
+ =========== ================================================
+ Ambient sysfs entry
+ light zone
+ =========== ================================================
+ daylight /sys/class/backlight/<backlight>/daylight_max
+ office /sys/class/backlight/<backlight>/office_max
+ dark /sys/class/backlight/<backlight>/dark_max
+ =========== ================================================
+
+ For ADP8860, <ambient light zone> can be:
+
+ =========== ================================================
+ Ambient sysfs entry
+ light zone
+ =========== ================================================
+ l1_daylight /sys/class/backlight/<backlight>/l1_daylight_max
+ l2_office /sys/class/backlight/<backlight>/l2_office_max
+ l3_dark /sys/class/backlight/<backlight>/l3_dark_max
+ =========== ================================================
+
+ For ADP8870, <ambient light zone> can be:
+
+ =========== ================================================
+ Ambient sysfs entry
+ light zone
+ =========== ================================================
+ l1_daylight /sys/class/backlight/<backlight>/l1_daylight_max
+ l2_bright /sys/class/backlight/<backlight>/l2_bright_max
+ l3_office /sys/class/backlight/<backlight>/l3_office_max
+ l4_indoor /sys/class/backlight/<backlight>/l4_indoor_max
+ l5_dark /sys/class/backlight/<backlight>/l5_dark_max
+ =========== ================================================
+
+ See also: /sys/class/backlight/<backlight>/ambient_light_zone.
+
+What: /sys/class/backlight/<backlight>/<ambient light zone>_dim
+Date: Sep, 2009
+KernelVersion: v2.6.32
+Contact: device-drivers-devel@blackfin.uclinux.org
+Description:
+ Control the dim brightness for <ambient light zone>
+ on this <backlight>. Values are between 0 and 127, typically
+ set to 0. Full off when the backlight is disabled.
+ This file will also show the dim brightness level stored for
+ this <ambient light zone>.
+
+ The <ambient light zone> is device-driver specific:
+
+ For ADP5520 and ADP5501, <ambient light zone> can be:
+
+ =========== ================================================
+ Ambient sysfs entry
+ light zone
+ =========== ================================================
+ daylight /sys/class/backlight/<backlight>/daylight_dim
+ office /sys/class/backlight/<backlight>/office_dim
+ dark /sys/class/backlight/<backlight>/dark_dim
+ =========== ================================================
+
+ For ADP8860, <ambient light zone> can be:
+
+ =========== ================================================
+ Ambient sysfs entry
+ light zone
+ =========== ================================================
+ l1_daylight /sys/class/backlight/<backlight>/l1_daylight_dim
+ l2_office /sys/class/backlight/<backlight>/l2_office_dim
+ l3_dark /sys/class/backlight/<backlight>/l3_dark_dim
+ =========== ================================================
+
+ For ADP8870, <ambient light zone> can be:
+
+ =========== ================================================
+ Ambient sysfs entry
+ light zone
+ =========== ================================================
+ l1_daylight /sys/class/backlight/<backlight>/l1_daylight_dim
+ l2_bright /sys/class/backlight/<backlight>/l2_bright_dim
+ l3_office /sys/class/backlight/<backlight>/l3_office_dim
+ l4_indoor /sys/class/backlight/<backlight>/l4_indoor_dim
+ l5_dark /sys/class/backlight/<backlight>/l5_dark_dim
+ =========== ================================================
+
+ See also: /sys/class/backlight/<backlight>/ambient_light_zone.
+
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-adp5520 b/Documentation/ABI/testing/sysfs-class-backlight-adp5520
deleted file mode 100644
index 34b6ebafa210..000000000000
--- a/Documentation/ABI/testing/sysfs-class-backlight-adp5520
+++ /dev/null
@@ -1,31 +0,0 @@
-sysfs interface for analog devices adp5520(01) backlight driver
----------------------------------------------------------------
-
-The backlight brightness control operates at three different levels for the
-adp5520 and adp5501 devices: daylight (level 1), office (level 2) and dark
-(level 3). By default the brightness operates at the daylight brightness level.
-
-What: /sys/class/backlight/<backlight>/daylight_max
-What: /sys/class/backlight/<backlight>/office_max
-What: /sys/class/backlight/<backlight>/dark_max
-Date: Sep, 2009
-KernelVersion: v2.6.32
-Contact: Michael Hennerich <michael.hennerich@analog.com>
-Description:
- (RW) Maximum current setting for the backlight when brightness
- is at one of the three levels (daylight, office or dark). This
- is an input code between 0 and 127, which is transformed to a
- value between 0 mA and 30 mA using linear or non-linear
- algorithms.
-
-What: /sys/class/backlight/<backlight>/daylight_dim
-What: /sys/class/backlight/<backlight>/office_dim
-What: /sys/class/backlight/<backlight>/dark_dim
-Date: Sep, 2009
-KernelVersion: v2.6.32
-Contact: Michael Hennerich <michael.hennerich@analog.com>
-Description:
- (RW) Dim current setting for the backlight when brightness is at
- one of the three levels (daylight, office or dark). This is an
- input code between 0 and 127, which is transformed to a value
- between 0 mA and 30 mA using linear or non-linear algorithms.
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-adp8860 b/Documentation/ABI/testing/sysfs-class-backlight-adp8860
deleted file mode 100644
index 6610ac73f9ba..000000000000
--- a/Documentation/ABI/testing/sysfs-class-backlight-adp8860
+++ /dev/null
@@ -1,37 +0,0 @@
-sysfs interface for analog devices adp8860 backlight driver
------------------------------------------------------------
-
-The backlight brightness control operates at three different levels for the
-adp8860, adp8861 and adp8863 devices: daylight (level 1), office (level 2) and
-dark (level 3). By default the brightness operates at the daylight brightness
-level.
-
-See also /sys/class/backlight/<backlight>/ambient_light_level and
-/sys/class/backlight/<backlight>/ambient_light_zone.
-
-
-What: /sys/class/backlight/<backlight>/l1_daylight_max
-What: /sys/class/backlight/<backlight>/l2_office_max
-What: /sys/class/backlight/<backlight>/l3_dark_max
-Date: Apr, 2010
-KernelVersion: v2.6.35
-Contact: Michael Hennerich <michael.hennerich@analog.com>
-Description:
- (RW) Maximum current setting for the backlight when brightness
- is at one of the three levels (daylight, office or dark). This
- is an input code between 0 and 127, which is transformed to a
- value between 0 mA and 30 mA using linear or non-linear
- algorithms.
-
-
-What: /sys/class/backlight/<backlight>/l1_daylight_dim
-What: /sys/class/backlight/<backlight>/l2_office_dim
-What: /sys/class/backlight/<backlight>/l3_dark_dim
-Date: Apr, 2010
-KernelVersion: v2.6.35
-Contact: Michael Hennerich <michael.hennerich@analog.com>
-Description:
- (RW) Dim current setting for the backlight when brightness is at
- one of the three levels (daylight, office or dark). This is an
- input code between 0 and 127, which is transformed to a value
- between 0 mA and 30 mA using linear or non-linear algorithms.
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870 b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
deleted file mode 100644
index b08ca912cad4..000000000000
--- a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
+++ /dev/null
@@ -1,32 +0,0 @@
-See also /sys/class/backlight/<backlight>/ambient_light_level and
-/sys/class/backlight/<backlight>/ambient_light_zone.
-
-What: /sys/class/backlight/<backlight>/<ambient light zone>_max
-What: /sys/class/backlight/<backlight>/l1_daylight_max
-What: /sys/class/backlight/<backlight>/l2_bright_max
-What: /sys/class/backlight/<backlight>/l3_office_max
-What: /sys/class/backlight/<backlight>/l4_indoor_max
-What: /sys/class/backlight/<backlight>/l5_dark_max
-Date: May 2011
-KernelVersion: 3.0
-Contact: device-drivers-devel@blackfin.uclinux.org
-Description:
- Control the maximum brightness for <ambient light zone>
- on this <backlight>. Values are between 0 and 127. This file
- will also show the brightness level stored for this
- <ambient light zone>.
-
-What: /sys/class/backlight/<backlight>/<ambient light zone>_dim
-What: /sys/class/backlight/<backlight>/l2_bright_dim
-What: /sys/class/backlight/<backlight>/l3_office_dim
-What: /sys/class/backlight/<backlight>/l4_indoor_dim
-What: /sys/class/backlight/<backlight>/l5_dark_dim
-Date: May 2011
-KernelVersion: 3.0
-Contact: device-drivers-devel@blackfin.uclinux.org
-Description:
- Control the dim brightness for <ambient light zone>
- on this <backlight>. Values are between 0 and 127, typically
- set to 0. Full off when the backlight is disabled.
- This file will also show the dim brightness level stored for
- this <ambient light zone>.
diff --git a/Documentation/ABI/testing/sysfs-class-firmware-attributes b/Documentation/ABI/testing/sysfs-class-firmware-attributes
index 8ea59fea4709..3348bf80a37c 100644
--- a/Documentation/ABI/testing/sysfs-class-firmware-attributes
+++ b/Documentation/ABI/testing/sysfs-class-firmware-attributes
@@ -197,8 +197,24 @@ Description:
Drivers may emit a CHANGE uevent when a password is set or unset
userspace may check it again.
- On Dell systems, if Admin password is set, then all BIOS attributes
+ On Dell and Lenovo systems, if Admin password is set, then all BIOS attributes
require password validation.
+ On Lenovo systems if you change the Admin password the new password is not active until
+ the next boot.
+
+ Lenovo specific class extensions
+ ------------------------------
+
+ On Lenovo systems the following additional settings are available:
+
+ lenovo_encoding:
+ The encoding method that is used. This can be either "ascii"
+ or "scancode". Default is set to "ascii"
+
+ lenovo_kbdlang:
+ The keyboard language method that is used. This is generally a
+ two char code (e.g. "us", "fr", "gr") and may vary per platform.
+ Default is set to "us"
What: /sys/class/firmware-attributes/*/attributes/pending_reboot
Date: February 2021
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-el15203000 b/Documentation/ABI/testing/sysfs-class-led-driver-el15203000
deleted file mode 100644
index 04f3ffdc5936..000000000000
--- a/Documentation/ABI/testing/sysfs-class-led-driver-el15203000
+++ /dev/null
@@ -1,9 +0,0 @@
-What: /sys/class/leds/<led>/repeat
-Date: September 2019
-KernelVersion: 5.5
-Description:
- EL15203000 supports only indefinitely patterns,
- so this file should always store -1.
-
- For more info, please see:
- Documentation/ABI/testing/sysfs-class-led-trigger-pattern
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
index d91a07767adf..8c57d2780554 100644
--- a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
@@ -35,3 +35,6 @@ Description:
This file will always return the originally written repeat
number.
+
+ It should be noticed that some leds, like EL15203000 may
+ only support indefinitely patterns, so they always store -1.
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-soc-ipa b/Documentation/ABI/testing/sysfs-devices-platform-soc-ipa
new file mode 100644
index 000000000000..c56dcf15bf29
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-platform-soc-ipa
@@ -0,0 +1,78 @@
+What: /sys/devices/platform/soc@X/XXXXXXX.ipa/
+Date: June 2021
+KernelVersion: v5.14
+Contact: Alex Elder <elder@kernel.org>
+Description:
+ The /sys/devices/platform/soc@X/XXXXXXX.ipa/ directory
+ contains read-only attributes exposing information about
+ an IPA device. The X values could vary, but are typically
+ "soc@0/1e40000.ipa".
+
+What: .../XXXXXXX.ipa/version
+Date: June 2021
+KernelVersion: v5.14
+Contact: Alex Elder <elder@kernel.org>
+Description:
+ The .../XXXXXXX.ipa/version file contains the IPA hardware
+ version, as a period-separated set of two or three integers
+ (e.g., "3.5.1" or "4.2").
+
+What: .../XXXXXXX.ipa/feature/
+Date: June 2021
+KernelVersion: v5.14
+Contact: Alex Elder <elder@kernel.org>
+Description:
+ The .../XXXXXXX.ipa/feature/ directory contains a set of
+ attributes describing features implemented by the IPA
+ hardware.
+
+What: .../XXXXXXX.ipa/feature/rx_offload
+Date: June 2021
+KernelVersion: v5.14
+Contact: Alex Elder <elder@kernel.org>
+Description:
+ The .../XXXXXXX.ipa/feature/rx_offload file contains a
+ string indicating the type of receive checksum offload
+ that is supported by the hardware. The possible values
+ are "MAPv4" or "MAPv5".
+
+What: .../XXXXXXX.ipa/feature/tx_offload
+Date: June 2021
+KernelVersion: v5.14
+Contact: Alex Elder <elder@kernel.org>
+Description:
+ The .../XXXXXXX.ipa/feature/tx_offload file contains a
+ string indicating the type of transmit checksum offload
+ that is supported by the hardware. The possible values
+ are "MAPv4" or "MAPv5".
+
+What: .../XXXXXXX.ipa/modem/
+Date: June 2021
+KernelVersion: v5.14
+Contact: Alex Elder <elder@kernel.org>
+Description:
+ The .../XXXXXXX.ipa/modem/ directory contains a set of
+ attributes describing properties of the modem execution
+ environment reachable by the IPA hardware.
+
+What: .../XXXXXXX.ipa/modem/rx_endpoint_id
+Date: June 2021
+KernelVersion: v5.14
+Contact: Alex Elder <elder@kernel.org>
+Description:
+ The .../XXXXXXX.ipa/feature/rx_endpoint_id file contains
+ the AP endpoint ID that receives packets originating from
+ the modem execution environment. The "rx" is from the
+ perspective of the AP; this endpoint is considered an "IPA
+ producer". An endpoint ID is a small unsigned integer.
+
+What: .../XXXXXXX.ipa/modem/tx_endpoint_id
+Date: June 2021
+KernelVersion: v5.14
+Contact: Alex Elder <elder@kernel.org>
+Description:
+ The .../XXXXXXX.ipa/feature/tx_endpoint_id file contains
+ the AP endpoint ID used to transmit packets destined for
+ the modem execution environment. The "tx" is from the
+ perspective of the AP; this endpoint is considered an "IPA
+ consumer". An endpoint ID is a small unsigned integer.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index fe13baa53c59..160b10c029c0 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -50,7 +50,7 @@ Description: Dynamic addition and removal of CPU's. This is not hotplug
architecture specific.
release: writes to this file dynamically remove a CPU from
- the system. Information writtento the file to remove CPU's
+ the system. Information written to the file to remove CPU's
is architecture specific.
What: /sys/devices/system/cpu/cpu#/node
@@ -97,7 +97,7 @@ Description: CPU topology files that describe a logical CPU's relationship
corresponds to a physical socket number, but the actual value
is architecture and platform dependent.
- thread_siblings: internel kernel map of cpu#'s hardware
+ thread_siblings: internal kernel map of cpu#'s hardware
threads within the same core as cpu#
thread_siblings_list: human-readable list of cpu#'s hardware
@@ -280,7 +280,7 @@ Description: Disable L3 cache indices
on a processor with this functionality will return the currently
disabled index for that node. There is one L3 structure per
node, or per internal node on MCM machines. Writing a valid
- index to one of these files will cause the specificed cache
+ index to one of these files will cause the specified cache
index to be disabled.
All AMD processors with L3 caches provide this functionality.
@@ -295,7 +295,7 @@ Description: Processor frequency boosting control
This switch controls the boost setting for the whole system.
Boosting allows the CPU and the firmware to run at a frequency
- beyound it's nominal limit.
+ beyond it's nominal limit.
More details can be found in
Documentation/admin-guide/pm/cpufreq.rst
@@ -532,7 +532,7 @@ What: /sys/devices/system/cpu/smt
/sys/devices/system/cpu/smt/control
Date: June 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
-Description: Control Symetric Multi Threading (SMT)
+Description: Control Symmetric Multi Threading (SMT)
active: Tells whether SMT is active (enabled and siblings online)
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index d1bc23cb6a9d..eaac6898f0c0 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -168,7 +168,7 @@ Description: This file shows the manufacturing date in BCD format.
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/manufacturer_id
Date: February 2018
Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
-Description: This file shows the manufacturee ID. This is one of the
+Description: This file shows the manufacturer ID. This is one of the
UFS device descriptor parameters. The full information about
the descriptor could be found at UFS specifications 2.1.
@@ -521,7 +521,7 @@ Description: This file shows maximum VCC, VCCQ and VCCQ2 value for
What: /sys/bus/platform/drivers/ufshcd/*/string_descriptors/manufacturer_name
Date: February 2018
Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
-Description: This file contains a device manufactureer name string.
+Description: This file contains a device manufacturer name string.
The full information about the descriptor could be found at
UFS specifications 2.1.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 4849b8e84e42..5d9ae27bd462 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -238,7 +238,7 @@ Description: Shows current reserved blocks in system, it may be temporarily
What: /sys/fs/f2fs/<disk>/gc_urgent
Date: August 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
-Description: Do background GC agressively when set. When gc_urgent = 1,
+Description: Do background GC aggressively when set. When gc_urgent = 1,
background thread starts to do GC by given gc_urgent_sleep_time
interval. When gc_urgent = 2, F2FS will lower the bar of
checking idle in order to process outstanding discard commands
diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
index 0fedbb0f94e4..eae2f1c1e11e 100644
--- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups
+++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
@@ -25,14 +25,10 @@ Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
the base IOVA, the second is the end IOVA and the third
field describes the type of the region.
-What: /sys/kernel/iommu_groups/reserved_regions
-Date: June 2019
-KernelVersion: v5.3
-Contact: Eric Auger <eric.auger@redhat.com>
-Description: In case an RMRR is used only by graphics or USB devices
- it is now exposed as "direct-relaxable" instead of "direct".
- In device assignment use case, for instance, those RMRR
- are considered to be relaxable and safe.
+ Since kernel 5.3, in case an RMRR is used only by graphics or
+ USB devices it is now exposed as "direct-relaxable" instead
+ of "direct". In device assignment use case, for instance,
+ those RMRR are considered to be relaxable and safe.
What: /sys/kernel/iommu_groups/<grp_id>/type
Date: November 2020
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-privacy-wmi b/Documentation/ABI/testing/sysfs-platform-dell-privacy-wmi
new file mode 100644
index 000000000000..7f9e18705861
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dell-privacy-wmi
@@ -0,0 +1,55 @@
+What: /sys/bus/wmi/devices/6932965F-1671-4CEB-B988-D3AB0A901919/dell_privacy_supported_type
+Date: Apr 2021
+KernelVersion: 5.13
+Contact: "perry.yuan@dell.com>"
+Description:
+ Display which dell hardware level privacy devices are supported
+ “Dell Privacy” is a set of HW, FW, and SW features to enhance
+ Dell’s commitment to platform privacy for MIC, Camera, and
+ ePrivacy screens.
+ The supported hardware privacy devices are:
+Attributes:
+ Microphone Mute:
+ Identifies the local microphone can be muted by hardware, no applications
+ is available to capture system mic sound
+
+ Camera Shutter:
+ Identifies camera shutter controlled by hardware, which is a micromechanical
+ shutter assembly that is built onto the camera module to block capturing images
+ from outside the laptop
+
+ supported:
+ The privacy device is supported by this system
+
+ unsupported:
+ The privacy device is not supported on this system
+
+ For example to check which privacy devices are supported:
+
+ # cat /sys/bus/wmi/drivers/dell-privacy/6932965F-1671-4CEB-B988-D3AB0A901919/dell_privacy_supported_type
+ [Microphone Mute] [supported]
+ [Camera Shutter] [supported]
+ [ePrivacy Screen] [unsupported]
+
+What: /sys/bus/wmi/devices/6932965F-1671-4CEB-B988-D3AB0A901919/dell_privacy_current_state
+Date: Apr 2021
+KernelVersion: 5.13
+Contact: "perry.yuan@dell.com>"
+Description:
+ Allow user space to check current dell privacy device state.
+ Describes the Device State class exposed by BIOS which can be
+ consumed by various applications interested in knowing the Privacy
+ feature capabilities
+Attributes:
+ muted:
+ Identifies the privacy device is turned off and cannot send stream to OS applications
+
+ unmuted:
+ Identifies the privacy device is turned on ,audio or camera driver can get
+ stream from mic and camera module to OS applications
+
+ For example to check all supported current privacy device states:
+
+ # cat /sys/bus/wmi/drivers/dell-privacy/6932965F-1671-4CEB-B988-D3AB0A901919/dell_privacy_current_state
+ [Microphone] [unmuted]
+ [Camera Shutter] [unmuted]
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 9c42dde97671..c3feb657b654 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -76,7 +76,7 @@ quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
PYTHONDONTWRITEBYTECODE=1 \
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
$(PYTHON3) $(srctree)/scripts/jobserver-exec \
- $(SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
+ $(CONFIG_SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
$(SPHINXBUILD) \
-b $2 \
-c $(abspath $(srctree)/$(src)) \
diff --git a/Documentation/PCI/acpi-info.rst b/Documentation/PCI/acpi-info.rst
index 060217081c79..34c64a5a66ec 100644
--- a/Documentation/PCI/acpi-info.rst
+++ b/Documentation/PCI/acpi-info.rst
@@ -22,9 +22,9 @@ or if the device has INTx interrupts connected by platform interrupt
controllers and a _PRT is needed to describe those connections.
ACPI resource description is done via _CRS objects of devices in the ACPI
-namespace [2].   The _CRS is like a generalized PCI BAR: the OS can read
+namespace [2]. The _CRS is like a generalized PCI BAR: the OS can read
_CRS and figure out what resource is being consumed even if it doesn't have
-a driver for the device [3].  That's important because it means an old OS
+a driver for the device [3]. That's important because it means an old OS
can work correctly even on a system with new devices unknown to the OS.
The new devices might not do anything, but the OS can at least make sure no
resources conflict with them.
@@ -41,15 +41,15 @@ ACPI, that device will have a specific _HID/_CID that tells the OS what
driver to bind to it, and the _CRS tells the OS and the driver where the
device's registers are.
-PCI host bridges are PNP0A03 or PNP0A08 devices.  Their _CRS should
-describe all the address space they consume.  This includes all the windows
+PCI host bridges are PNP0A03 or PNP0A08 devices. Their _CRS should
+describe all the address space they consume. This includes all the windows
they forward down to the PCI bus, as well as registers of the host bridge
-itself that are not forwarded to PCI.  The host bridge registers include
+itself that are not forwarded to PCI. The host bridge registers include
things like secondary/subordinate bus registers that determine the bus
range below the bridge, window registers that describe the apertures, etc.
These are all device-specific, non-architected things, so the only way a
PNP0A03/PNP0A08 driver can manage them is via _PRS/_CRS/_SRS, which contain
-the device-specific details.  The host bridge registers also include ECAM
+the device-specific details. The host bridge registers also include ECAM
space, since it is consumed by the host bridge.
ACPI defines a Consumer/Producer bit to distinguish the bridge registers
@@ -66,7 +66,7 @@ the PNP0A03/PNP0A08 device itself. The workaround was to describe the
bridge registers (including ECAM space) in PNP0C02 catch-all devices [6].
With the exception of ECAM, the bridge register space is device-specific
anyway, so the generic PNP0A03/PNP0A08 driver (pci_root.c) has no need to
-know about it.  
+know about it.
New architectures should be able to use "Consumer" Extended Address Space
descriptors in the PNP0A03 device for bridge registers, including ECAM,
@@ -75,9 +75,9 @@ ia64 kernels assume all address space descriptors, including "Consumer"
Extended Address Space ones, are windows, so it would not be safe to
describe bridge registers this way on those architectures.
-PNP0C02 "motherboard" devices are basically a catch-all.  There's no
+PNP0C02 "motherboard" devices are basically a catch-all. There's no
programming model for them other than "don't use these resources for
-anything else."  So a PNP0C02 _CRS should claim any address space that is
+anything else." So a PNP0C02 _CRS should claim any address space that is
(1) not claimed by _CRS under any other device object in the ACPI namespace
and (2) should not be assigned by the OS to something else.
diff --git a/Documentation/PCI/endpoint/pci-endpoint-cfs.rst b/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
index 696f8eeb4738..db609b97ad58 100644
--- a/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
+++ b/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
@@ -125,4 +125,4 @@ all the EPF devices are created and linked with the EPC device.
| interrupt_pin
| function
-[1] :doc:`pci-endpoint`
+[1] Documentation/PCI/endpoint/pci-endpoint.rst
diff --git a/Documentation/PCI/pci.rst b/Documentation/PCI/pci.rst
index 814b40f8360b..fa651e25d98c 100644
--- a/Documentation/PCI/pci.rst
+++ b/Documentation/PCI/pci.rst
@@ -265,7 +265,7 @@ Set the DMA mask size
---------------------
.. note::
If anything below doesn't make sense, please refer to
- :doc:`/core-api/dma-api`. This section is just a reminder that
+ Documentation/core-api/dma-api.rst. This section is just a reminder that
drivers need to indicate DMA capabilities of the device and is not
an authoritative source for DMA interfaces.
@@ -291,7 +291,7 @@ Many 64-bit "PCI" devices (before PCI-X) and some PCI-X devices are
Setup shared control data
-------------------------
Once the DMA masks are set, the driver can allocate "consistent" (a.k.a. shared)
-memory. See :doc:`/core-api/dma-api` for a full description of
+memory. See Documentation/core-api/dma-api.rst for a full description of
the DMA APIs. This section is just a reminder that it needs to be done
before enabling DMA on the device.
@@ -421,7 +421,7 @@ owners if there is one.
Then clean up "consistent" buffers which contain the control data.
-See :doc:`/core-api/dma-api` for details on unmapping interfaces.
+See Documentation/core-api/dma-api.rst for details on unmapping interfaces.
Unregister from other subsystems
diff --git a/Documentation/RCU/checklist.rst b/Documentation/RCU/checklist.rst
index 1030119294d0..01cc21f17f7b 100644
--- a/Documentation/RCU/checklist.rst
+++ b/Documentation/RCU/checklist.rst
@@ -211,27 +211,40 @@ over a rather long period of time, but improvements are always welcome!
of the system, especially to real-time workloads running on
the rest of the system.
-7. As of v4.20, a given kernel implements only one RCU flavor,
- which is RCU-sched for PREEMPTION=n and RCU-preempt for PREEMPTION=y.
- If the updater uses call_rcu() or synchronize_rcu(),
- then the corresponding readers may use rcu_read_lock() and
- rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(),
- or any pair of primitives that disables and re-enables preemption,
- for example, rcu_read_lock_sched() and rcu_read_unlock_sched().
- If the updater uses synchronize_srcu() or call_srcu(),
- then the corresponding readers must use srcu_read_lock() and
- srcu_read_unlock(), and with the same srcu_struct. The rules for
- the expedited primitives are the same as for their non-expedited
- counterparts. Mixing things up will result in confusion and
- broken kernels, and has even resulted in an exploitable security
- issue.
-
- One exception to this rule: rcu_read_lock() and rcu_read_unlock()
- may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
- in cases where local bottom halves are already known to be
- disabled, for example, in irq or softirq context. Commenting
- such cases is a must, of course! And the jury is still out on
- whether the increased speed is worth it.
+7. As of v4.20, a given kernel implements only one RCU flavor, which
+ is RCU-sched for PREEMPTION=n and RCU-preempt for PREEMPTION=y.
+ If the updater uses call_rcu() or synchronize_rcu(), then
+ the corresponding readers may use: (1) rcu_read_lock() and
+ rcu_read_unlock(), (2) any pair of primitives that disables
+ and re-enables softirq, for example, rcu_read_lock_bh() and
+ rcu_read_unlock_bh(), or (3) any pair of primitives that disables
+ and re-enables preemption, for example, rcu_read_lock_sched() and
+ rcu_read_unlock_sched(). If the updater uses synchronize_srcu()
+ or call_srcu(), then the corresponding readers must use
+ srcu_read_lock() and srcu_read_unlock(), and with the same
+ srcu_struct. The rules for the expedited RCU grace-period-wait
+ primitives are the same as for their non-expedited counterparts.
+
+ If the updater uses call_rcu_tasks() or synchronize_rcu_tasks(),
+ then the readers must refrain from executing voluntary
+ context switches, that is, from blocking. If the updater uses
+ call_rcu_tasks_trace() or synchronize_rcu_tasks_trace(), then
+ the corresponding readers must use rcu_read_lock_trace() and
+ rcu_read_unlock_trace(). If an updater uses call_rcu_tasks_rude()
+ or synchronize_rcu_tasks_rude(), then the corresponding readers
+ must use anything that disables interrupts.
+
+ Mixing things up will result in confusion and broken kernels, and
+ has even resulted in an exploitable security issue. Therefore,
+ when using non-obvious pairs of primitives, commenting is
+ of course a must. One example of non-obvious pairing is
+ the XDP feature in networking, which calls BPF programs from
+ network-driver NAPI (softirq) context. BPF relies heavily on RCU
+ protection for its data structures, but because the BPF program
+ invocation happens entirely within a single local_bh_disable()
+ section in a NAPI poll cycle, this usage is safe. The reason
+ that this usage is safe is that readers can use anything that
+ disables BH when updaters use call_rcu() or synchronize_rcu().
8. Although synchronize_rcu() is slower than is call_rcu(), it
usually results in simpler code. So, unless update performance is
diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst
index 7cc7f5852da0..1b8b46deeb29 100644
--- a/Documentation/accounting/delay-accounting.rst
+++ b/Documentation/accounting/delay-accounting.rst
@@ -69,13 +69,15 @@ Compile the kernel with::
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASKSTATS=y
-Delay accounting is enabled by default at boot up.
-To disable, add::
+Delay accounting is disabled by default at boot up.
+To enable, add::
- nodelayacct
+ delayacct
-to the kernel boot options. The rest of the instructions
-below assume this has not been done.
+to the kernel boot options. The rest of the instructions below assume this has
+been done. Alternatively, use sysctl kernel.task_delayacct to switch the state
+at runtime. Note however that only tasks started after enabling it will have
+delayacct information.
After the system has booted up, use a utility
similar to getdelays.c to access the delays
diff --git a/Documentation/admin-guide/cgroup-v1/blkio-controller.rst b/Documentation/admin-guide/cgroup-v1/blkio-controller.rst
index 36d43ae7dc13..16253eda192e 100644
--- a/Documentation/admin-guide/cgroup-v1/blkio-controller.rst
+++ b/Documentation/admin-guide/cgroup-v1/blkio-controller.rst
@@ -17,36 +17,37 @@ level logical devices like device mapper.
HOWTO
=====
+
Throttling/Upper Limit policy
-----------------------------
-- Enable Block IO controller::
+Enable Block IO controller::
CONFIG_BLK_CGROUP=y
-- Enable throttling in block layer::
+Enable throttling in block layer::
CONFIG_BLK_DEV_THROTTLING=y
-- Mount blkio controller (see cgroups.txt, Why are cgroups needed?)::
+Mount blkio controller (see cgroups.txt, Why are cgroups needed?)::
mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
-- Specify a bandwidth rate on particular device for root group. The format
- for policy is "<major>:<minor> <bytes_per_second>"::
+Specify a bandwidth rate on particular device for root group. The format
+for policy is "<major>:<minor> <bytes_per_second>"::
echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device
- Above will put a limit of 1MB/second on reads happening for root group
- on device having major/minor number 8:16.
+This will put a limit of 1MB/second on reads happening for root group
+on device having major/minor number 8:16.
-- Run dd to read a file and see if rate is throttled to 1MB/s or not::
+Run dd to read a file and see if rate is throttled to 1MB/s or not::
# dd iflag=direct if=/mnt/common/zerofile of=/dev/null bs=4K count=1024
1024+0 records in
1024+0 records out
4194304 bytes (4.2 MB) copied, 4.0001 s, 1.0 MB/s
- Limits for writes can be put using blkio.throttle.write_bps_device file.
+Limits for writes can be put using blkio.throttle.write_bps_device file.
Hierarchical Cgroups
====================
@@ -79,85 +80,89 @@ following::
Various user visible config options
===================================
-CONFIG_BLK_CGROUP
- - Block IO controller.
-CONFIG_BFQ_CGROUP_DEBUG
- - Debug help. Right now some additional stats file show up in cgroup
+ CONFIG_BLK_CGROUP
+ Block IO controller.
+
+ CONFIG_BFQ_CGROUP_DEBUG
+ Debug help. Right now some additional stats file show up in cgroup
if this option is enabled.
-CONFIG_BLK_DEV_THROTTLING
- - Enable block device throttling support in block layer.
+ CONFIG_BLK_DEV_THROTTLING
+ Enable block device throttling support in block layer.
Details of cgroup files
=======================
+
Proportional weight policy files
--------------------------------
-- blkio.weight
- - Specifies per cgroup weight. This is default weight of the group
- on all the devices until and unless overridden by per device rule.
- (See blkio.weight_device).
- Currently allowed range of weights is from 10 to 1000.
-- blkio.weight_device
- - One can specify per cgroup per device rules using this interface.
- These rules override the default value of group weight as specified
- by blkio.weight.
+ blkio.bfq.weight
+ Specifies per cgroup weight. This is default weight of the group
+ on all the devices until and unless overridden by per device rule
+ (see `blkio.bfq.weight_device` below).
+
+ Currently allowed range of weights is from 1 to 1000. For more details,
+ see Documentation/block/bfq-iosched.rst.
+
+ blkio.bfq.weight_device
+ Specifes per cgroup per device weights, overriding the default group
+ weight. For more details, see Documentation/block/bfq-iosched.rst.
Following is the format::
- # echo dev_maj:dev_minor weight > blkio.weight_device
+ # echo dev_maj:dev_minor weight > blkio.bfq.weight_device
Configure weight=300 on /dev/sdb (8:16) in this cgroup::
- # echo 8:16 300 > blkio.weight_device
- # cat blkio.weight_device
+ # echo 8:16 300 > blkio.bfq.weight_device
+ # cat blkio.bfq.weight_device
dev weight
8:16 300
Configure weight=500 on /dev/sda (8:0) in this cgroup::
- # echo 8:0 500 > blkio.weight_device
- # cat blkio.weight_device
+ # echo 8:0 500 > blkio.bfq.weight_device
+ # cat blkio.bfq.weight_device
dev weight
8:0 500
8:16 300
Remove specific weight for /dev/sda in this cgroup::
- # echo 8:0 0 > blkio.weight_device
- # cat blkio.weight_device
+ # echo 8:0 0 > blkio.bfq.weight_device
+ # cat blkio.bfq.weight_device
dev weight
8:16 300
-- blkio.time
- - disk time allocated to cgroup per device in milliseconds. First
+ blkio.time
+ Disk time allocated to cgroup per device in milliseconds. First
two fields specify the major and minor number of the device and
third field specifies the disk time allocated to group in
milliseconds.
-- blkio.sectors
- - number of sectors transferred to/from disk by the group. First
+ blkio.sectors
+ Number of sectors transferred to/from disk by the group. First
two fields specify the major and minor number of the device and
third field specifies the number of sectors transferred by the
group to/from the device.
-- blkio.io_service_bytes
- - Number of bytes transferred to/from the disk by the group. These
+ blkio.io_service_bytes
+ Number of bytes transferred to/from the disk by the group. These
are further divided by the type of operation - read or write, sync
or async. First two fields specify the major and minor number of the
device, third field specifies the operation type and the fourth field
specifies the number of bytes.
-- blkio.io_serviced
- - Number of IOs (bio) issued to the disk by the group. These
+ blkio.io_serviced
+ Number of IOs (bio) issued to the disk by the group. These
are further divided by the type of operation - read or write, sync
or async. First two fields specify the major and minor number of the
device, third field specifies the operation type and the fourth field
specifies the number of IOs.
-- blkio.io_service_time
- - Total amount of time between request dispatch and request completion
+ blkio.io_service_time
+ Total amount of time between request dispatch and request completion
for the IOs done by this cgroup. This is in nanoseconds to make it
meaningful for flash devices too. For devices with queue depth of 1,
this time represents the actual service time. When queue_depth > 1,
@@ -170,8 +175,8 @@ Proportional weight policy files
specifies the operation type and the fourth field specifies the
io_service_time in ns.
-- blkio.io_wait_time
- - Total amount of time the IOs for this cgroup spent waiting in the
+ blkio.io_wait_time
+ Total amount of time the IOs for this cgroup spent waiting in the
scheduler queues for service. This can be greater than the total time
elapsed since it is cumulative io_wait_time for all IOs. It is not a
measure of total time the cgroup spent waiting but rather a measure of
@@ -185,24 +190,24 @@ Proportional weight policy files
minor number of the device, third field specifies the operation type
and the fourth field specifies the io_wait_time in ns.
-- blkio.io_merged
- - Total number of bios/requests merged into requests belonging to this
+ blkio.io_merged
+ Total number of bios/requests merged into requests belonging to this
cgroup. This is further divided by the type of operation - read or
write, sync or async.
-- blkio.io_queued
- - Total number of requests queued up at any given instant for this
+ blkio.io_queued
+ Total number of requests queued up at any given instant for this
cgroup. This is further divided by the type of operation - read or
write, sync or async.
-- blkio.avg_queue_size
- - Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y.
+ blkio.avg_queue_size
+ Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y.
The average queue size for this cgroup over the entire time of this
cgroup's existence. Queue size samples are taken each time one of the
queues of this cgroup gets a timeslice.
-- blkio.group_wait_time
- - Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y.
+ blkio.group_wait_time
+ Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y.
This is the amount of time the cgroup had to wait since it became busy
(i.e., went from 0 to 1 request queued) to get a timeslice for one of
its queues. This is different from the io_wait_time which is the
@@ -212,8 +217,8 @@ Proportional weight policy files
will only report the group_wait_time accumulated till the last time it
got a timeslice and will not include the current delta.
-- blkio.empty_time
- - Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y.
+ blkio.empty_time
+ Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y.
This is the amount of time a cgroup spends without any pending
requests when not being served, i.e., it does not include any time
spent idling for one of the queues of the cgroup. This is in
@@ -221,8 +226,8 @@ Proportional weight policy files
the stat will only report the empty_time accumulated till the last
time it had a pending request and will not include the current delta.
-- blkio.idle_time
- - Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y.
+ blkio.idle_time
+ Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y.
This is the amount of time spent by the IO scheduler idling for a
given cgroup in anticipation of a better request than the existing ones
from other queues/cgroups. This is in nanoseconds. If this is read
@@ -230,60 +235,60 @@ Proportional weight policy files
idle_time accumulated till the last idle period and will not include
the current delta.
-- blkio.dequeue
- - Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y. This
+ blkio.dequeue
+ Debugging aid only enabled if CONFIG_BFQ_CGROUP_DEBUG=y. This
gives the statistics about how many a times a group was dequeued
from service tree of the device. First two fields specify the major
and minor number of the device and third field specifies the number
of times a group was dequeued from a particular device.
-- blkio.*_recursive
- - Recursive version of various stats. These files show the
+ blkio.*_recursive
+ Recursive version of various stats. These files show the
same information as their non-recursive counterparts but
include stats from all the descendant cgroups.
Throttling/Upper limit policy files
-----------------------------------
-- blkio.throttle.read_bps_device
- - Specifies upper limit on READ rate from the device. IO rate is
+ blkio.throttle.read_bps_device
+ Specifies upper limit on READ rate from the device. IO rate is
specified in bytes per second. Rules are per device. Following is
the format::
echo "<major>:<minor> <rate_bytes_per_second>" > /cgrp/blkio.throttle.read_bps_device
-- blkio.throttle.write_bps_device
- - Specifies upper limit on WRITE rate to the device. IO rate is
+ blkio.throttle.write_bps_device
+ Specifies upper limit on WRITE rate to the device. IO rate is
specified in bytes per second. Rules are per device. Following is
the format::
echo "<major>:<minor> <rate_bytes_per_second>" > /cgrp/blkio.throttle.write_bps_device
-- blkio.throttle.read_iops_device
- - Specifies upper limit on READ rate from the device. IO rate is
+ blkio.throttle.read_iops_device
+ Specifies upper limit on READ rate from the device. IO rate is
specified in IO per second. Rules are per device. Following is
the format::
echo "<major>:<minor> <rate_io_per_second>" > /cgrp/blkio.throttle.read_iops_device
-- blkio.throttle.write_iops_device
- - Specifies upper limit on WRITE rate to the device. IO rate is
+ blkio.throttle.write_iops_device
+ Specifies upper limit on WRITE rate to the device. IO rate is
specified in io per second. Rules are per device. Following is
the format::
echo "<major>:<minor> <rate_io_per_second>" > /cgrp/blkio.throttle.write_iops_device
-Note: If both BW and IOPS rules are specified for a device, then IO is
- subjected to both the constraints.
+ Note: If both BW and IOPS rules are specified for a device, then IO is
+ subjected to both the constraints.
-- blkio.throttle.io_serviced
- - Number of IOs (bio) issued to the disk by the group. These
+ blkio.throttle.io_serviced
+ Number of IOs (bio) issued to the disk by the group. These
are further divided by the type of operation - read or write, sync
or async. First two fields specify the major and minor number of the
device, third field specifies the operation type and the fourth field
specifies the number of IOs.
-- blkio.throttle.io_service_bytes
- - Number of bytes transferred to/from the disk by the group. These
+ blkio.throttle.io_service_bytes
+ Number of bytes transferred to/from the disk by the group. These
are further divided by the type of operation - read or write, sync
or async. First two fields specify the major and minor number of the
device, third field specifies the operation type and the fourth field
@@ -291,6 +296,6 @@ Note: If both BW and IOPS rules are specified for a device, then IO is
Common files among various policies
-----------------------------------
-- blkio.reset_stats
- - Writing an int to this file will result in resetting all the stats
+ blkio.reset_stats
+ Writing an int to this file will result in resetting all the stats
for that cgroup.
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index b1e81aa8598a..4e59925e6583 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -56,6 +56,7 @@ v1 is available under :ref:`Documentation/admin-guide/cgroup-v1/index.rst <cgrou
5-3-3. IO Latency
5-3-3-1. How IO Latency Throttling Works
5-3-3-2. IO Latency Interface Files
+ 5-3-4. IO Priority
5-4. PID
5-4-1. PID Interface Files
5-5. Cpuset
@@ -1866,6 +1867,60 @@ IO Latency Interface Files
duration of time between evaluation events. Windows only elapse
with IO activity. Idle periods extend the most recent window.
+IO Priority
+~~~~~~~~~~~
+
+A single attribute controls the behavior of the I/O priority cgroup policy,
+namely the blkio.prio.class attribute. The following values are accepted for
+that attribute:
+
+ no-change
+ Do not modify the I/O priority class.
+
+ none-to-rt
+ For requests that do not have an I/O priority class (NONE),
+ change the I/O priority class into RT. Do not modify
+ the I/O priority class of other requests.
+
+ restrict-to-be
+ For requests that do not have an I/O priority class or that have I/O
+ priority class RT, change it into BE. Do not modify the I/O priority
+ class of requests that have priority class IDLE.
+
+ idle
+ Change the I/O priority class of all requests into IDLE, the lowest
+ I/O priority class.
+
+The following numerical values are associated with the I/O priority policies:
+
++-------------+---+
+| no-change | 0 |
++-------------+---+
+| none-to-rt | 1 |
++-------------+---+
+| rt-to-be | 2 |
++-------------+---+
+| all-to-idle | 3 |
++-------------+---+
+
+The numerical value that corresponds to each I/O priority class is as follows:
+
++-------------------------------+---+
+| IOPRIO_CLASS_NONE | 0 |
++-------------------------------+---+
+| IOPRIO_CLASS_RT (real-time) | 1 |
++-------------------------------+---+
+| IOPRIO_CLASS_BE (best effort) | 2 |
++-------------------------------+---+
+| IOPRIO_CLASS_IDLE | 3 |
++-------------------------------+---+
+
+The algorithm to set the I/O priority class for a request is as follows:
+
+- Translate the I/O priority class policy into a number.
+- Change the request I/O priority class into the maximum of the I/O priority
+ class policy number and the numerical I/O priority class.
+
PID
---
diff --git a/Documentation/admin-guide/cputopology.rst b/Documentation/admin-guide/cputopology.rst
index b90dafcc8237..8632a1db36e4 100644
--- a/Documentation/admin-guide/cputopology.rst
+++ b/Documentation/admin-guide/cputopology.rst
@@ -2,87 +2,10 @@
How CPU topology info is exported via sysfs
===========================================
-Export CPU topology info via sysfs. Items (attributes) are similar
-to /proc/cpuinfo output of some architectures. They reside in
-/sys/devices/system/cpu/cpuX/topology/:
-
-physical_package_id:
-
- physical package id of cpuX. Typically corresponds to a physical
- socket number, but the actual value is architecture and platform
- dependent.
-
-die_id:
-
- the CPU die ID of cpuX. Typically it is the hardware platform's
- identifier (rather than the kernel's). The actual value is
- architecture and platform dependent.
-
-core_id:
-
- the CPU core ID of cpuX. Typically it is the hardware platform's
- identifier (rather than the kernel's). The actual value is
- architecture and platform dependent.
-
-book_id:
-
- the book ID of cpuX. Typically it is the hardware platform's
- identifier (rather than the kernel's). The actual value is
- architecture and platform dependent.
-
-drawer_id:
-
- the drawer ID of cpuX. Typically it is the hardware platform's
- identifier (rather than the kernel's). The actual value is
- architecture and platform dependent.
-
-core_cpus:
-
- internal kernel map of CPUs within the same core.
- (deprecated name: "thread_siblings")
-
-core_cpus_list:
-
- human-readable list of CPUs within the same core.
- (deprecated name: "thread_siblings_list");
-
-package_cpus:
-
- internal kernel map of the CPUs sharing the same physical_package_id.
- (deprecated name: "core_siblings")
-
-package_cpus_list:
-
- human-readable list of CPUs sharing the same physical_package_id.
- (deprecated name: "core_siblings_list")
-
-die_cpus:
-
- internal kernel map of CPUs within the same die.
-
-die_cpus_list:
-
- human-readable list of CPUs within the same die.
-
-book_siblings:
-
- internal kernel map of cpuX's hardware threads within the same
- book_id.
-
-book_siblings_list:
-
- human-readable list of cpuX's hardware threads within the same
- book_id.
-
-drawer_siblings:
-
- internal kernel map of cpuX's hardware threads within the same
- drawer_id.
-
-drawer_siblings_list:
-
- human-readable list of cpuX's hardware threads within the same
- drawer_id.
+CPU topology info is exported via sysfs. Items (attributes) are similar
+to /proc/cpuinfo output of some architectures. They reside in
+/sys/devices/system/cpu/cpuX/topology/. Please refer to the ABI file:
+Documentation/ABI/stable/sysfs-devices-system-cpu.
Architecture-neutral, drivers/base/topology.c, exports these attributes.
However, the book and drawer related sysfs files will only be created if
diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst
index d2795ca6821e..4c559e08d11e 100644
--- a/Documentation/admin-guide/ext4.rst
+++ b/Documentation/admin-guide/ext4.rst
@@ -392,7 +392,7 @@ When mounting an ext4 filesystem, the following option are accepted:
dax
Use direct access (no page cache). See
- Documentation/filesystems/dax.txt. Note that this option is
+ Documentation/filesystems/dax.rst. Note that this option is
incompatible with data=journal.
inlinecrypt
diff --git a/Documentation/admin-guide/hw-vuln/core-scheduling.rst b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
new file mode 100644
index 000000000000..7b410aef9c5c
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
@@ -0,0 +1,223 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Core Scheduling
+===============
+Core scheduling support allows userspace to define groups of tasks that can
+share a core. These groups can be specified either for security usecases (one
+group of tasks don't trust another), or for performance usecases (some
+workloads may benefit from running on the same core as they don't need the same
+hardware resources of the shared core, or may prefer different cores if they
+do share hardware resource needs). This document only describes the security
+usecase.
+
+Security usecase
+----------------
+A cross-HT attack involves the attacker and victim running on different Hyper
+Threads of the same core. MDS and L1TF are examples of such attacks. The only
+full mitigation of cross-HT attacks is to disable Hyper Threading (HT). Core
+scheduling is a scheduler feature that can mitigate some (not all) cross-HT
+attacks. It allows HT to be turned on safely by ensuring that only tasks in a
+user-designated trusted group can share a core. This increase in core sharing
+can also improve performance, however it is not guaranteed that performance
+will always improve, though that is seen to be the case with a number of real
+world workloads. In theory, core scheduling aims to perform at least as good as
+when Hyper Threading is disabled. In practice, this is mostly the case though
+not always: as synchronizing scheduling decisions across 2 or more CPUs in a
+core involves additional overhead - especially when the system is lightly
+loaded. When ``total_threads <= N_CPUS/2``, the extra overhead may cause core
+scheduling to perform more poorly compared to SMT-disabled, where N_CPUS is the
+total number of CPUs. Please measure the performance of your workloads always.
+
+Usage
+-----
+Core scheduling support is enabled via the ``CONFIG_SCHED_CORE`` config option.
+Using this feature, userspace defines groups of tasks that can be co-scheduled
+on the same core. The core scheduler uses this information to make sure that
+tasks that are not in the same group never run simultaneously on a core, while
+doing its best to satisfy the system's scheduling requirements.
+
+Core scheduling can be enabled via the ``PR_SCHED_CORE`` prctl interface.
+This interface provides support for the creation of core scheduling groups, as
+well as admission and removal of tasks from created groups::
+
+ #include <sys/prctl.h>
+
+ int prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+
+option:
+ ``PR_SCHED_CORE``
+
+arg2:
+ Command for operation, must be one off:
+
+ - ``PR_SCHED_CORE_GET`` -- get core_sched cookie of ``pid``.
+ - ``PR_SCHED_CORE_CREATE`` -- create a new unique cookie for ``pid``.
+ - ``PR_SCHED_CORE_SHARE_TO`` -- push core_sched cookie to ``pid``.
+ - ``PR_SCHED_CORE_SHARE_FROM`` -- pull core_sched cookie from ``pid``.
+
+arg3:
+ ``pid`` of the task for which the operation applies.
+
+arg4:
+ ``pid_type`` for which the operation applies. It is of type ``enum pid_type``.
+ For example, if arg4 is ``PIDTYPE_TGID``, then the operation of this command
+ will be performed for all tasks in the task group of ``pid``.
+
+arg5:
+ userspace pointer to an unsigned long for storing the cookie returned by
+ ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
+
+In order for a process to push a cookie to, or pull a cookie from a process, it
+is required to have the ptrace access mode: `PTRACE_MODE_READ_REALCREDS` to the
+process.
+
+Building hierarchies of tasks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The simplest way to build hierarchies of threads/processes which share a
+cookie and thus a core is to rely on the fact that the core-sched cookie is
+inherited across forks/clones and execs, thus setting a cookie for the
+'initial' script/executable/daemon will place every spawned child in the
+same core-sched group.
+
+Cookie Transferral
+~~~~~~~~~~~~~~~~~~
+Transferring a cookie between the current and other tasks is possible using
+PR_SCHED_CORE_SHARE_FROM and PR_SCHED_CORE_SHARE_TO to inherit a cookie from a
+specified task or a share a cookie with a task. In combination this allows a
+simple helper program to pull a cookie from a task in an existing core
+scheduling group and share it with already running tasks.
+
+Design/Implementation
+---------------------
+Each task that is tagged is assigned a cookie internally in the kernel. As
+mentioned in `Usage`_, tasks with the same cookie value are assumed to trust
+each other and share a core.
+
+The basic idea is that, every schedule event tries to select tasks for all the
+siblings of a core such that all the selected tasks running on a core are
+trusted (same cookie) at any point in time. Kernel threads are assumed trusted.
+The idle task is considered special, as it trusts everything and everything
+trusts it.
+
+During a schedule() event on any sibling of a core, the highest priority task on
+the sibling's core is picked and assigned to the sibling calling schedule(), if
+the sibling has the task enqueued. For rest of the siblings in the core,
+highest priority task with the same cookie is selected if there is one runnable
+in their individual run queues. If a task with same cookie is not available,
+the idle task is selected. Idle task is globally trusted.
+
+Once a task has been selected for all the siblings in the core, an IPI is sent to
+siblings for whom a new task was selected. Siblings on receiving the IPI will
+switch to the new task immediately. If an idle task is selected for a sibling,
+then the sibling is considered to be in a `forced idle` state. I.e., it may
+have tasks on its on runqueue to run, however it will still have to run idle.
+More on this in the next section.
+
+Forced-idling of hyperthreads
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The scheduler tries its best to find tasks that trust each other such that all
+tasks selected to be scheduled are of the highest priority in a core. However,
+it is possible that some runqueues had tasks that were incompatible with the
+highest priority ones in the core. Favoring security over fairness, one or more
+siblings could be forced to select a lower priority task if the highest
+priority task is not trusted with respect to the core wide highest priority
+task. If a sibling does not have a trusted task to run, it will be forced idle
+by the scheduler (idle thread is scheduled to run).
+
+When the highest priority task is selected to run, a reschedule-IPI is sent to
+the sibling to force it into idle. This results in 4 cases which need to be
+considered depending on whether a VM or a regular usermode process was running
+on either HT::
+
+ HT1 (attack) HT2 (victim)
+ A idle -> user space user space -> idle
+ B idle -> user space guest -> idle
+ C idle -> guest user space -> idle
+ D idle -> guest guest -> idle
+
+Note that for better performance, we do not wait for the destination CPU
+(victim) to enter idle mode. This is because the sending of the IPI would bring
+the destination CPU immediately into kernel mode from user space, or VMEXIT
+in the case of guests. At best, this would only leak some scheduler metadata
+which may not be worth protecting. It is also possible that the IPI is received
+too late on some architectures, but this has not been observed in the case of
+x86.
+
+Trust model
+~~~~~~~~~~~
+Core scheduling maintains trust relationships amongst groups of tasks by
+assigning them a tag that is the same cookie value.
+When a system with core scheduling boots, all tasks are considered to trust
+each other. This is because the core scheduler does not have information about
+trust relationships until userspace uses the above mentioned interfaces, to
+communicate them. In other words, all tasks have a default cookie value of 0.
+and are considered system-wide trusted. The forced-idling of siblings running
+cookie-0 tasks is also avoided.
+
+Once userspace uses the above mentioned interfaces to group sets of tasks, tasks
+within such groups are considered to trust each other, but do not trust those
+outside. Tasks outside the group also don't trust tasks within.
+
+Limitations of core-scheduling
+------------------------------
+Core scheduling tries to guarantee that only trusted tasks run concurrently on a
+core. But there could be small window of time during which untrusted tasks run
+concurrently or kernel could be running concurrently with a task not trusted by
+kernel.
+
+IPI processing delays
+~~~~~~~~~~~~~~~~~~~~~
+Core scheduling selects only trusted tasks to run together. IPI is used to notify
+the siblings to switch to the new task. But there could be hardware delays in
+receiving of the IPI on some arch (on x86, this has not been observed). This may
+cause an attacker task to start running on a CPU before its siblings receive the
+IPI. Even though cache is flushed on entry to user mode, victim tasks on siblings
+may populate data in the cache and micro architectural buffers after the attacker
+starts to run and this is a possibility for data leak.
+
+Open cross-HT issues that core scheduling does not solve
+--------------------------------------------------------
+1. For MDS
+~~~~~~~~~~
+Core scheduling cannot protect against MDS attacks between an HT running in
+user mode and another running in kernel mode. Even though both HTs run tasks
+which trust each other, kernel memory is still considered untrusted. Such
+attacks are possible for any combination of sibling CPU modes (host or guest mode).
+
+2. For L1TF
+~~~~~~~~~~~
+Core scheduling cannot protect against an L1TF guest attacker exploiting a
+guest or host victim. This is because the guest attacker can craft invalid
+PTEs which are not inverted due to a vulnerable guest kernel. The only
+solution is to disable EPT (Extended Page Tables).
+
+For both MDS and L1TF, if the guest vCPU is configured to not trust each
+other (by tagging separately), then the guest to guest attacks would go away.
+Or it could be a system admin policy which considers guest to guest attacks as
+a guest problem.
+
+Another approach to resolve these would be to make every untrusted task on the
+system to not trust every other untrusted task. While this could reduce
+parallelism of the untrusted tasks, it would still solve the above issues while
+allowing system processes (trusted tasks) to share a core.
+
+3. Protecting the kernel (IRQ, syscall, VMEXIT)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Unfortunately, core scheduling does not protect kernel contexts running on
+sibling hyperthreads from one another. Prototypes of mitigations have been posted
+to LKML to solve this, but it is debatable whether such windows are practically
+exploitable, and whether the performance overhead of the prototypes are worth
+it (not to mention, the added code complexity).
+
+Other Use cases
+---------------
+The main use case for Core scheduling is mitigating the cross-HT vulnerabilities
+with SMT enabled. There are other use cases where this feature could be used:
+
+- Isolating tasks that needs a whole core: Examples include realtime tasks, tasks
+ that uses SIMD instructions etc.
+- Gang scheduling: Requirements for a group of tasks that needs to be scheduled
+ together could also be realized using core scheduling. One example is vCPUs of
+ a VM.
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index ca4dbdd9016d..f12cda55538b 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -15,3 +15,4 @@ are configurable at compile, boot or run time.
tsx_async_abort
multihit.rst
special-register-buffer-data-sampling.rst
+ core-scheduling.rst
diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
index 3b1ce68d2456..966c9b3296ea 100644
--- a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
+++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
@@ -3,7 +3,8 @@
SRBDS - Special Register Buffer Data Sampling
=============================================
-SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to
+SRBDS is a hardware vulnerability that allows MDS
+Documentation/admin-guide/hw-vuln/mds.rst techniques to
infer values returned from special register accesses. Special register
accesses are accesses to off core registers. According to Intel's evaluation,
the special register reads that have a security expectation of privacy are
diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst
index 75a9dd98e76e..cb30ca3df27c 100644
--- a/Documentation/admin-guide/kdump/kdump.rst
+++ b/Documentation/admin-guide/kdump/kdump.rst
@@ -2,7 +2,7 @@
Documentation for Kdump - The kexec-based Crash Dumping Solution
================================================================
-This document includes overview, setup and installation, and analysis
+This document includes overview, setup, installation, and analysis
information.
Overview
@@ -13,9 +13,9 @@ dump of the system kernel's memory needs to be taken (for example, when
the system panics). The system kernel's memory image is preserved across
the reboot and is accessible to the dump-capture kernel.
-You can use common commands, such as cp and scp, to copy the
-memory image to a dump file on the local disk, or across the network to
-a remote system.
+You can use common commands, such as cp, scp or makedumpfile to copy
+the memory image to a dump file on the local disk, or across the network
+to a remote system.
Kdump and kexec are currently supported on the x86, x86_64, ppc64, ia64,
s390x, arm and arm64 architectures.
@@ -26,13 +26,15 @@ the dump-capture kernel. This ensures that ongoing Direct Memory Access
The kexec -p command loads the dump-capture kernel into this reserved
memory.
-On x86 machines, the first 640 KB of physical memory is needed to boot,
-regardless of where the kernel loads. Therefore, kexec backs up this
-region just before rebooting into the dump-capture kernel.
+On x86 machines, the first 640 KB of physical memory is needed for boot,
+regardless of where the kernel loads. For simpler handling, the whole
+low 1M is reserved to avoid any later kernel or device driver writing
+data into this area. Like this, the low 1M can be reused as system RAM
+by kdump kernel without extra handling.
-Similarly on PPC64 machines first 32KB of physical memory is needed for
-booting regardless of where the kernel is loaded and to support 64K page
-size kexec backs up the first 64KB memory.
+On PPC64 machines first 32KB of physical memory is needed for booting
+regardless of where the kernel is loaded and to support 64K page size
+kexec backs up the first 64KB memory.
For s390x, when kdump is triggered, the crashkernel region is exchanged
with the region [0, crashkernel region size] and then the kdump kernel
@@ -46,14 +48,14 @@ passed to the dump-capture kernel through the elfcorehdr= boot
parameter. Optionally the size of the ELF header can also be passed
when using the elfcorehdr=[size[KMG]@]offset[KMG] syntax.
-
With the dump-capture kernel, you can access the memory image through
/proc/vmcore. This exports the dump as an ELF-format file that you can
-write out using file copy commands such as cp or scp. Further, you can
-use analysis tools such as the GNU Debugger (GDB) and the Crash tool to
-debug the dump file. This method ensures that the dump pages are correctly
-ordered.
-
+write out using file copy commands such as cp or scp. You can also use
+makedumpfile utility to analyze and write out filtered contents with
+options, e.g with '-d 31' it will only write out kernel data. Further,
+you can use analysis tools such as the GNU Debugger (GDB) and the Crash
+tool to debug the dump file. This method ensures that the dump pages are
+correctly ordered.
Setup and Installation
======================
@@ -125,9 +127,18 @@ dump-capture kernels for enabling kdump support.
System kernel config options
----------------------------
-1) Enable "kexec system call" in "Processor type and features."::
+1) Enable "kexec system call" or "kexec file based system call" in
+ "Processor type and features."::
+
+ CONFIG_KEXEC=y or CONFIG_KEXEC_FILE=y
+
+ And both of them will select KEXEC_CORE::
- CONFIG_KEXEC=y
+ CONFIG_KEXEC_CORE=y
+
+ Subsequently, CRASH_CORE is selected by KEXEC_CORE::
+
+ CONFIG_CRASH_CORE=y
2) Enable "sysfs file system support" in "Filesystem" -> "Pseudo
filesystems." This is usually enabled by default::
@@ -175,17 +186,19 @@ Dump-capture kernel config options (Arch Dependent, i386 and x86_64)
CONFIG_HIGHMEM4G
-2) On i386 and x86_64, disable symmetric multi-processing support
- under "Processor type and features"::
+2) With CONFIG_SMP=y, usually nr_cpus=1 need specified on the kernel
+ command line when loading the dump-capture kernel because one
+ CPU is enough for kdump kernel to dump vmcore on most of systems.
- CONFIG_SMP=n
+ However, you can also specify nr_cpus=X to enable multiple processors
+ in kdump kernel. In this case, "disable_cpu_apicid=" is needed to
+ tell kdump kernel which cpu is 1st kernel's BSP. Please refer to
+ admin-guide/kernel-parameters.txt for more details.
- (If CONFIG_SMP=y, then specify maxcpus=1 on the kernel command line
- when loading the dump-capture kernel, see section "Load the Dump-capture
- Kernel".)
+ With CONFIG_SMP=n, the above things are not related.
-3) If one wants to build and use a relocatable kernel,
- Enable "Build a relocatable kernel" support under "Processor type and
+3) A relocatable kernel is suggested to be built by default. If not yet,
+ enable "Build a relocatable kernel" support under "Processor type and
features"::
CONFIG_RELOCATABLE=y
@@ -232,7 +245,7 @@ Dump-capture kernel config options (Arch Dependent, ia64)
as a dump-capture kernel if desired.
The crashkernel region can be automatically placed by the system
- kernel at run time. This is done by specifying the base address as 0,
+ kernel at runtime. This is done by specifying the base address as 0,
or omitting it all together::
crashkernel=256M@0
@@ -241,10 +254,6 @@ Dump-capture kernel config options (Arch Dependent, ia64)
crashkernel=256M
- If the start address is specified, note that the start address of the
- kernel will be aligned to 64Mb, so if the start address is not then
- any space below the alignment point will be wasted.
-
Dump-capture kernel config options (Arch Dependent, arm)
----------------------------------------------------------
@@ -260,46 +269,82 @@ Dump-capture kernel config options (Arch Dependent, arm64)
on non-VHE systems even if it is configured. This is because the CPU
will not be reset to EL2 on panic.
-Extended crashkernel syntax
+crashkernel syntax
===========================
+1) crashkernel=size@offset
-While the "crashkernel=size[@offset]" syntax is sufficient for most
-configurations, sometimes it's handy to have the reserved memory dependent
-on the value of System RAM -- that's mostly for distributors that pre-setup
-the kernel command line to avoid a unbootable system after some memory has
-been removed from the machine.
+ Here 'size' specifies how much memory to reserve for the dump-capture kernel
+ and 'offset' specifies the beginning of this reserved memory. For example,
+ "crashkernel=64M@16M" tells the system kernel to reserve 64 MB of memory
+ starting at physical address 0x01000000 (16MB) for the dump-capture kernel.
-The syntax is::
+ The crashkernel region can be automatically placed by the system
+ kernel at run time. This is done by specifying the base address as 0,
+ or omitting it all together::
- crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
- range=start-[end]
+ crashkernel=256M@0
-For example::
+ or::
- crashkernel=512M-2G:64M,2G-:128M
+ crashkernel=256M
-This would mean:
+ If the start address is specified, note that the start address of the
+ kernel will be aligned to a value (which is Arch dependent), so if the
+ start address is not then any space below the alignment point will be
+ wasted.
- 1) if the RAM is smaller than 512M, then don't reserve anything
- (this is the "rescue" case)
- 2) if the RAM size is between 512M and 2G (exclusive), then reserve 64M
- 3) if the RAM size is larger than 2G, then reserve 128M
+2) range1:size1[,range2:size2,...][@offset]
+ While the "crashkernel=size[@offset]" syntax is sufficient for most
+ configurations, sometimes it's handy to have the reserved memory dependent
+ on the value of System RAM -- that's mostly for distributors that pre-setup
+ the kernel command line to avoid a unbootable system after some memory has
+ been removed from the machine.
+ The syntax is::
-Boot into System Kernel
-=======================
+ crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
+ range=start-[end]
+
+ For example::
+
+ crashkernel=512M-2G:64M,2G-:128M
+ This would mean:
+
+ 1) if the RAM is smaller than 512M, then don't reserve anything
+ (this is the "rescue" case)
+ 2) if the RAM size is between 512M and 2G (exclusive), then reserve 64M
+ 3) if the RAM size is larger than 2G, then reserve 128M
+
+3) crashkernel=size,high and crashkernel=size,low
+
+ If memory above 4G is preferred, crashkernel=size,high can be used to
+ fulfill that. With it, physical memory is allowed to be allocated from top,
+ so could be above 4G if system has more than 4G RAM installed. Otherwise,
+ memory region will be allocated below 4G if available.
+
+ When crashkernel=X,high is passed, kernel could allocate physical memory
+ region above 4G, low memory under 4G is needed in this case. There are
+ three ways to get low memory:
+
+ 1) Kernel will allocate at least 256M memory below 4G automatically
+ if crashkernel=Y,low is not specified.
+ 2) Let user specify low memory size instead.
+ 3) Specified value 0 will disable low memory allocation::
+
+ crashkernel=0,low
+
+Boot into System Kernel
+-----------------------
1) Update the boot loader (such as grub, yaboot, or lilo) configuration
files as necessary.
-2) Boot the system kernel with the boot parameter "crashkernel=Y@X",
- where Y specifies how much memory to reserve for the dump-capture kernel
- and X specifies the beginning of this reserved memory. For example,
- "crashkernel=64M@16M" tells the system kernel to reserve 64 MB of memory
- starting at physical address 0x01000000 (16MB) for the dump-capture kernel.
+2) Boot the system kernel with the boot parameter "crashkernel=Y@X".
- On x86 and x86_64, use "crashkernel=64M@16M".
+ On x86 and x86_64, use "crashkernel=Y[@X]". Most of the time, the
+ start address 'X' is not necessary, kernel will search a suitable
+ area. Unless an explicit start address is expected.
On ppc64, use "crashkernel=128M@32M".
@@ -331,8 +376,8 @@ of dump-capture kernel. Following is the summary.
For i386 and x86_64:
- - Use vmlinux if kernel is not relocatable.
- Use bzImage/vmlinuz if kernel is relocatable.
+ - Use vmlinux if kernel is not relocatable.
For ppc64:
@@ -392,7 +437,7 @@ loading dump-capture kernel.
For i386, x86_64 and ia64:
- "1 irqpoll maxcpus=1 reset_devices"
+ "1 irqpoll nr_cpus=1 reset_devices"
For ppc64:
@@ -400,7 +445,7 @@ For ppc64:
For s390x:
- "1 maxcpus=1 cgroup_disable=memory"
+ "1 nr_cpus=1 cgroup_disable=memory"
For arm:
@@ -408,7 +453,7 @@ For arm:
For arm64:
- "1 maxcpus=1 reset_devices"
+ "1 nr_cpus=1 reset_devices"
Notes on loading the dump-capture kernel:
@@ -488,6 +533,10 @@ the following command::
cp /proc/vmcore <dump-file>
+You can also use makedumpfile utility to write out the dump file
+with specified options to filter out unwanted contents, e.g::
+
+ makedumpfile -l --message-level 1 -d 31 /proc/vmcore <dump-file>
Analysis
========
@@ -535,8 +584,7 @@ This will cause a kdump to occur at the add_taint()->panic() call.
Contact
=======
-- Vivek Goyal (vgoyal@redhat.com)
-- Maneesh Soni (maneesh@in.ibm.com)
+- kexec@lists.infradead.org
GDB macros
==========
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index cb89dbdedc46..2991f6e692bd 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -113,7 +113,7 @@
the GPE dispatcher.
This facility can be used to prevent such uncontrolled
GPE floodings.
- Format: <byte>
+ Format: <byte> or <bitmap-list>
acpi_no_auto_serialize [HW,ACPI]
Disable auto-serialization of AML methods
@@ -581,6 +581,28 @@
loops can be debugged more effectively on production
systems.
+ clocksource.max_cswd_read_retries= [KNL]
+ Number of clocksource_watchdog() retries due to
+ external delays before the clock will be marked
+ unstable. Defaults to three retries, that is,
+ four attempts to read the clock under test.
+
+ clocksource.verify_n_cpus= [KNL]
+ Limit the number of CPUs checked for clocksources
+ marked with CLOCK_SOURCE_VERIFY_PERCPU that
+ are marked unstable due to excessive skew.
+ A negative value says to check all CPUs, while
+ zero says not to check any. Values larger than
+ nr_cpu_ids are silently truncated to nr_cpu_ids.
+ The actual CPUs are chosen randomly, with
+ no replacement if the same CPU is chosen twice.
+
+ clocksource-wdtest.holdoff= [KNL]
+ Set the time in seconds that the clocksource
+ watchdog test waits before commencing its tests.
+ Defaults to zero when built as a module and to
+ 10 seconds when built into the kernel.
+
clearcpuid=BITNUM[,BITNUM...] [X86]
Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeatures.h for the valid bit
@@ -3244,7 +3266,7 @@
noclflush [BUGS=X86] Don't use the CLFLUSH instruction
- nodelayacct [KNL] Disable per-task delay accounting
+ delayacct [KNL] Enable per-task delay accounting
nodsp [SH] Disable hardware DSP at boot time.
@@ -3513,6 +3535,9 @@
nr_uarts= [SERIAL] maximum number of UARTs to be registered.
+ numa=off [KNL, ARM64, PPC, RISCV, SPARC, X86] Disable NUMA, Only
+ set up a single NUMA node spanning all memory.
+
numa_balancing= [KNL,ARM64,PPC,RISCV,S390,X86] Enable or disable automatic
NUMA balancing.
Allowed values are enable and disable
@@ -3566,6 +3591,12 @@
off: turn off poisoning (default)
on: turn on poisoning
+ page_reporting.page_reporting_order=
+ [KNL] Minimal page reporting order
+ Format: <integer>
+ Adjust the minimal page reporting order. The page
+ reporting is disabled when it exceeds (MAX_ORDER-1).
+
panic= [KNL] Kernel behaviour on panic: delay <timeout>
timeout > 0: seconds before rebooting
timeout = 0: wait forever
@@ -4775,11 +4806,6 @@
Reserves a hole at the top of the kernel virtual
address space.
- reservelow= [X86]
- Format: nn[K]
- Set the amount of memory to reserve for BIOS at
- the bottom of the address space.
-
reset_devices [KNL] Force drivers to reset the underlying device
during initialization.
@@ -5283,6 +5309,14 @@
exception. Default behavior is by #AC if
both features are enabled in hardware.
+ ratelimit:N -
+ Set system wide rate limit to N bus locks
+ per second for bus lock detection.
+ 0 < N <= 1000.
+
+ N/A for split lock detection.
+
+
If an #AC exception is hit in the kernel or in
firmware (i.e. not while executing in user mode)
the kernel will oops in either "warn" or "fatal"
diff --git a/Documentation/admin-guide/laptops/laptop-mode.rst b/Documentation/admin-guide/laptops/laptop-mode.rst
index c984c4262f2e..b61cc601d298 100644
--- a/Documentation/admin-guide/laptops/laptop-mode.rst
+++ b/Documentation/admin-guide/laptops/laptop-mode.rst
@@ -101,17 +101,6 @@ this results in concentration of disk activity in a small time interval which
occurs only once every 10 minutes, or whenever the disk is forced to spin up by
a cache miss. The disk can then be spun down in the periods of inactivity.
-If you want to find out which process caused the disk to spin up, you can
-gather information by setting the flag /proc/sys/vm/block_dump. When this flag
-is set, Linux reports all disk read and write operations that take place, and
-all block dirtyings done to files. This makes it possible to debug why a disk
-needs to spin up, and to increase battery life even more. The output of
-block_dump is written to the kernel output, and it can be retrieved using
-"dmesg". When you use block_dump and your kernel logging level also includes
-kernel debugging messages, you probably want to turn off klogd, otherwise
-the output of block_dump will be logged, causing disk activity that is not
-normally there.
-
Configuration
-------------
diff --git a/Documentation/admin-guide/lockup-watchdogs.rst b/Documentation/admin-guide/lockup-watchdogs.rst
index 290840c160af..3e09284a8b9b 100644
--- a/Documentation/admin-guide/lockup-watchdogs.rst
+++ b/Documentation/admin-guide/lockup-watchdogs.rst
@@ -39,7 +39,7 @@ in principle, they should work in any architecture where these
subsystems are present.
A periodic hrtimer runs to generate interrupts and kick the watchdog
-task. An NMI perf event is generated every "watchdog_thresh"
+job. An NMI perf event is generated every "watchdog_thresh"
(compile-time initialized to 10 and configurable through sysctl of the
same name) seconds to check for hardlockups. If any CPU in the system
does not receive any hrtimer interrupt during that time the
@@ -47,7 +47,7 @@ does not receive any hrtimer interrupt during that time the
generate a kernel warning or call panic, depending on the
configuration.
-The watchdog task is a high priority kernel thread that updates a
+The watchdog job runs in a stop scheduling thread that updates a
timestamp every time it is scheduled. If that timestamp is not updated
for 2*watchdog_thresh seconds (the softlockup threshold) the
'softlockup detector' (coded inside the hrtimer callback function)
diff --git a/Documentation/admin-guide/media/bt8xx.rst b/Documentation/admin-guide/media/bt8xx.rst
index 1382ada1e38e..3589f6ab7e46 100644
--- a/Documentation/admin-guide/media/bt8xx.rst
+++ b/Documentation/admin-guide/media/bt8xx.rst
@@ -15,11 +15,12 @@ Authors:
General information
-------------------
-This class of cards has a bt878a as the PCI interface, and require the bttv driver
-for accessing the i2c bus and the gpio pins of the bt8xx chipset.
+This class of cards has a bt878a as the PCI interface, and require the bttv
+driver for accessing the i2c bus and the gpio pins of the bt8xx chipset.
-Please see :doc:`bttv-cardlist` for a complete list of Cards based on the
-Conexant Bt8xx PCI bridge supported by the Linux Kernel.
+Please see Documentation/admin-guide/media/bttv-cardlist.rst for a complete
+list of Cards based on the Conexant Bt8xx PCI bridge supported by the
+Linux Kernel.
In order to be able to compile the kernel, some config options should be
enabled::
@@ -80,7 +81,7 @@ for dvb-bt8xx drivers by passing modprobe parameters may be necessary.
Running TwinHan and Clones
~~~~~~~~~~~~~~~~~~~~~~~~~~
-As shown at :doc:`bttv-cardlist`, TwinHan and
+As shown at Documentation/admin-guide/media/bttv-cardlist.rst, TwinHan and
clones use ``card=113`` modprobe parameter. So, in order to properly
detect it for devices without EEPROM, you should use::
@@ -105,12 +106,12 @@ The autodetected values are determined by the cards' "response string".
In your logs see f. ex.: dst_get_device_id: Recognize [DSTMCI].
For bug reports please send in a complete log with verbose=4 activated.
-Please also see :doc:`ci`.
+Please also see Documentation/admin-guide/media/ci.rst.
Running multiple cards
~~~~~~~~~~~~~~~~~~~~~~
-See :doc:`bttv-cardlist` for a complete list of
+See Documentation/admin-guide/media/bttv-cardlist.rst for a complete list of
Card ID. Some examples:
=========================== ===
diff --git a/Documentation/admin-guide/media/bttv.rst b/Documentation/admin-guide/media/bttv.rst
index 0ef1f203104d..125f6f47123d 100644
--- a/Documentation/admin-guide/media/bttv.rst
+++ b/Documentation/admin-guide/media/bttv.rst
@@ -24,7 +24,8 @@ If your board has digital TV, you'll also need::
./scripts/config -m DVB_BT8XX
-In this case, please see :doc:`bt8xx` for additional notes.
+In this case, please see Documentation/admin-guide/media/bt8xx.rst
+for additional notes.
Make bttv work with your card
-----------------------------
@@ -39,7 +40,7 @@ If it doesn't bttv likely could not autodetect your card and needs some
insmod options. The most important insmod option for bttv is "card=n"
to select the correct card type. If you get video but no sound you've
very likely specified the wrong (or no) card type. A list of supported
-cards is in :doc:`bttv-cardlist`.
+cards is in Documentation/admin-guide/media/bttv-cardlist.rst.
If bttv takes very long to load (happens sometimes with the cheap
cards which have no tuner), try adding this to your modules configuration
@@ -57,8 +58,8 @@ directory should be enough for it to be autoload during the driver's
probing mode (e. g. when the Kernel boots or when the driver is
manually loaded via ``modprobe`` command).
-If your card isn't listed in :doc:`bttv-cardlist` or if you have
-trouble making audio work, please read :ref:`still_doesnt_work`.
+If your card isn't listed in Documentation/admin-guide/media/bttv-cardlist.rst
+or if you have trouble making audio work, please read :ref:`still_doesnt_work`.
Autodetecting cards
@@ -77,8 +78,8 @@ the Subsystem ID in the second line, looks like this:
only bt878-based cards can have a subsystem ID (which does not mean
that every card really has one). bt848 cards can't have a Subsystem
ID and therefore can't be autodetected. There is a list with the ID's
-at :doc:`bttv-cardlist` (in case you are interested or want to mail
-patches with updates).
+at Documentation/admin-guide/media/bttv-cardlist.rst
+(in case you are interested or want to mail patches with updates).
.. _still_doesnt_work:
@@ -259,15 +260,15 @@ bug. It is very helpful if you can tell where exactly it broke
With a hard freeze you probably doesn't find anything in the logfiles.
The only way to capture any kernel messages is to hook up a serial
console and let some terminal application log the messages. /me uses
-screen. See :doc:`/admin-guide/serial-console` for details on setting
-up a serial console.
+screen. See Documentation/admin-guide/serial-console.rst for details on
+setting up a serial console.
-Read :doc:`/admin-guide/bug-hunting` to learn how to get any useful
+Read Documentation/admin-guide/bug-hunting.rst to learn how to get any useful
information out of a register+stack dump printed by the kernel on
protection faults (so-called "kernel oops").
If you run into some kind of deadlock, you can try to dump a call trace
-for each process using sysrq-t (see :doc:`/admin-guide/sysrq`).
+for each process using sysrq-t (see Documentation/admin-guide/sysrq.rst).
This way it is possible to figure where *exactly* some process in "D"
state is stuck.
diff --git a/Documentation/admin-guide/media/index.rst b/Documentation/admin-guide/media/index.rst
index 6e0d2bae7154..c676af665111 100644
--- a/Documentation/admin-guide/media/index.rst
+++ b/Documentation/admin-guide/media/index.rst
@@ -11,12 +11,14 @@ its supported drivers.
Please see:
-- :doc:`/userspace-api/media/index`
- for the userspace APIs used on media devices.
+Documentation/userspace-api/media/index.rst
-- :doc:`/driver-api/media/index`
- for driver development information and Kernel APIs used by
- media devices;
+ - for the userspace APIs used on media devices.
+
+Documentation/driver-api/media/index.rst
+
+ - for driver development information and Kernel APIs used by
+ media devices;
The media subsystem
===================
diff --git a/Documentation/admin-guide/media/ipu3.rst b/Documentation/admin-guide/media/ipu3.rst
index f59697c7b374..52c1c04173da 100644
--- a/Documentation/admin-guide/media/ipu3.rst
+++ b/Documentation/admin-guide/media/ipu3.rst
@@ -234,22 +234,23 @@ The IPU3 ImgU pipelines can be configured using the Media Controller, defined at
Running mode and firmware binary selection
------------------------------------------
-ImgU works based on firmware, currently the ImgU firmware support run 2 pipes in
-time-sharing with single input frame data. Each pipe can run at certain mode -
-"VIDEO" or "STILL", "VIDEO" mode is commonly used for video frames capture, and
-"STILL" is used for still frame capture. However, you can also select "VIDEO" to
-capture still frames if you want to capture images with less system load and
-power. For "STILL" mode, ImgU will try to use smaller BDS factor and output
-larger bayer frame for further YUV processing than "VIDEO" mode to get high
-quality images. Besides, "STILL" mode need XNR3 to do noise reduction, hence
-"STILL" mode will need more power and memory bandwidth than "VIDEO" mode. TNR
-will be enabled in "VIDEO" mode and bypassed by "STILL" mode. ImgU is running at
-“VIDEO” mode by default, the user can use v4l2 control V4L2_CID_INTEL_IPU3_MODE
-(currently defined in drivers/staging/media/ipu3/include/intel-ipu3.h) to query
-and set the running mode. For user, there is no difference for buffer queueing
-between the "VIDEO" and "STILL" mode, mandatory input and main output node
-should be enabled and buffers need be queued, the statistics and the view-finder
-queues are optional.
+ImgU works based on firmware, currently the ImgU firmware support run 2 pipes
+in time-sharing with single input frame data. Each pipe can run at certain mode
+- "VIDEO" or "STILL", "VIDEO" mode is commonly used for video frames capture,
+and "STILL" is used for still frame capture. However, you can also select
+"VIDEO" to capture still frames if you want to capture images with less system
+load and power. For "STILL" mode, ImgU will try to use smaller BDS factor and
+output larger bayer frame for further YUV processing than "VIDEO" mode to get
+high quality images. Besides, "STILL" mode need XNR3 to do noise reduction,
+hence "STILL" mode will need more power and memory bandwidth than "VIDEO" mode.
+TNR will be enabled in "VIDEO" mode and bypassed by "STILL" mode. ImgU is
+running at "VIDEO" mode by default, the user can use v4l2 control
+V4L2_CID_INTEL_IPU3_MODE (currently defined in
+drivers/staging/media/ipu3/include/uapi/intel-ipu3.h) to query and set the
+running mode. For user, there is no difference for buffer queueing between the
+"VIDEO" and "STILL" mode, mandatory input and main output node should be
+enabled and buffers need be queued, the statistics and the view-finder queues
+are optional.
The firmware binary will be selected according to current running mode, such log
"using binary if_to_osys_striped " or "using binary if_to_osys_primary_striped"
@@ -586,7 +587,7 @@ preserved.
References
==========
-.. [#f5] drivers/staging/media/ipu3/include/intel-ipu3.h
+.. [#f5] drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
.. [#f1] https://github.com/intel/nvt
diff --git a/Documentation/admin-guide/media/saa7134.rst b/Documentation/admin-guide/media/saa7134.rst
index 7ab9c70b9abe..51eae7eb5ab7 100644
--- a/Documentation/admin-guide/media/saa7134.rst
+++ b/Documentation/admin-guide/media/saa7134.rst
@@ -50,7 +50,8 @@ To build and install, you should run::
Once the new Kernel is booted, saa7134 driver should be loaded automatically.
Depending on the card you might have to pass ``card=<nr>`` as insmod option.
-If so, please check :doc:`saa7134-cardlist` for valid choices.
+If so, please check Documentation/admin-guide/media/saa7134-cardlist.rst
+for valid choices.
Once you have your card type number, you can pass a modules configuration
via a file (usually, it is either ``/etc/modules.conf`` or some file at
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 10fde58d0869..aec2cd2aaea7 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -347,81 +347,8 @@ for tickless systems. It follows the same basic strategy as the ``menu`` `one
<menu-gov_>`_: it always tries to find the deepest idle state suitable for the
given conditions. However, it applies a different approach to that problem.
-First, it does not use sleep length correction factors, but instead it attempts
-to correlate the observed idle duration values with the available idle states
-and use that information to pick up the idle state that is most likely to
-"match" the upcoming CPU idle interval. Second, it does not take the tasks
-that were running on the given CPU in the past and are waiting on some I/O
-operations to complete now at all (there is no guarantee that they will run on
-the same CPU when they become runnable again) and the pattern detection code in
-it avoids taking timer wakeups into account. It also only uses idle duration
-values less than the current time till the closest timer (with the scheduler
-tick excluded) for that purpose.
-
-Like in the ``menu`` governor `case <menu-gov_>`_, the first step is to obtain
-the *sleep length*, which is the time until the closest timer event with the
-assumption that the scheduler tick will be stopped (that also is the upper bound
-on the time until the next CPU wakeup). That value is then used to preselect an
-idle state on the basis of three metrics maintained for each idle state provided
-by the ``CPUIdle`` driver: ``hits``, ``misses`` and ``early_hits``.
-
-The ``hits`` and ``misses`` metrics measure the likelihood that a given idle
-state will "match" the observed (post-wakeup) idle duration if it "matches" the
-sleep length. They both are subject to decay (after a CPU wakeup) every time
-the target residency of the idle state corresponding to them is less than or
-equal to the sleep length and the target residency of the next idle state is
-greater than the sleep length (that is, when the idle state corresponding to
-them "matches" the sleep length). The ``hits`` metric is increased if the
-former condition is satisfied and the target residency of the given idle state
-is less than or equal to the observed idle duration and the target residency of
-the next idle state is greater than the observed idle duration at the same time
-(that is, it is increased when the given idle state "matches" both the sleep
-length and the observed idle duration). In turn, the ``misses`` metric is
-increased when the given idle state "matches" the sleep length only and the
-observed idle duration is too short for its target residency.
-
-The ``early_hits`` metric measures the likelihood that a given idle state will
-"match" the observed (post-wakeup) idle duration if it does not "match" the
-sleep length. It is subject to decay on every CPU wakeup and it is increased
-when the idle state corresponding to it "matches" the observed (post-wakeup)
-idle duration and the target residency of the next idle state is less than or
-equal to the sleep length (i.e. the idle state "matching" the sleep length is
-deeper than the given one).
-
-The governor walks the list of idle states provided by the ``CPUIdle`` driver
-and finds the last (deepest) one with the target residency less than or equal
-to the sleep length. Then, the ``hits`` and ``misses`` metrics of that idle
-state are compared with each other and it is preselected if the ``hits`` one is
-greater (which means that that idle state is likely to "match" the observed idle
-duration after CPU wakeup). If the ``misses`` one is greater, the governor
-preselects the shallower idle state with the maximum ``early_hits`` metric
-(or if there are multiple shallower idle states with equal ``early_hits``
-metric which also is the maximum, the shallowest of them will be preselected).
-[If there is a wakeup latency constraint coming from the `PM QoS framework
-<cpu-pm-qos_>`_ which is hit before reaching the deepest idle state with the
-target residency within the sleep length, the deepest idle state with the exit
-latency within the constraint is preselected without consulting the ``hits``,
-``misses`` and ``early_hits`` metrics.]
-
-Next, the governor takes several idle duration values observed most recently
-into consideration and if at least a half of them are greater than or equal to
-the target residency of the preselected idle state, that idle state becomes the
-final candidate to ask for. Otherwise, the average of the most recent idle
-duration values below the target residency of the preselected idle state is
-computed and the governor walks the idle states shallower than the preselected
-one and finds the deepest of them with the target residency within that average.
-That idle state is then taken as the final candidate to ask for.
-
-Still, at this point the governor may need to refine the idle state selection if
-it has not decided to `stop the scheduler tick <idle-cpus-and-tick_>`_. That
-generally happens if the target residency of the idle state selected so far is
-less than the tick period and the tick has not been stopped already (in a
-previous iteration of the idle loop). Then, like in the ``menu`` governor
-`case <menu-gov_>`_, the sleep length used in the previous computations may not
-reflect the real time until the closest timer event and if it really is greater
-than that time, a shallower state with a suitable target residency may need to
-be selected.
-
+.. kernel-doc:: drivers/cpuidle/governors/teo.c
+ :doc: teo-description
.. _idle-states-representation:
diff --git a/Documentation/admin-guide/pm/intel_idle.rst b/Documentation/admin-guide/pm/intel_idle.rst
index 89309e1b0e48..b799a43da62e 100644
--- a/Documentation/admin-guide/pm/intel_idle.rst
+++ b/Documentation/admin-guide/pm/intel_idle.rst
@@ -20,8 +20,8 @@ Nehalem and later generations of Intel processors, but the level of support for
a particular processor model in it depends on whether or not it recognizes that
processor model and may also depend on information coming from the platform
firmware. [To understand ``intel_idle`` it is necessary to know how ``CPUIdle``
-works in general, so this is the time to get familiar with :doc:`cpuidle` if you
-have not done that yet.]
+works in general, so this is the time to get familiar with
+Documentation/admin-guide/pm/cpuidle.rst if you have not done that yet.]
``intel_idle`` uses the ``MWAIT`` instruction to inform the processor that the
logical CPU executing it is idle and so it may be possible to put some of the
@@ -53,7 +53,8 @@ processor) corresponding to them depends on the processor model and it may also
depend on the configuration of the platform.
In order to create a list of available idle states required by the ``CPUIdle``
-subsystem (see :ref:`idle-states-representation` in :doc:`cpuidle`),
+subsystem (see :ref:`idle-states-representation` in
+Documentation/admin-guide/pm/cpuidle.rst),
``intel_idle`` can use two sources of information: static tables of idle states
for different processor models included in the driver itself and the ACPI tables
of the system. The former are always used if the processor model at hand is
@@ -98,7 +99,8 @@ states may not be enabled by default if there are no matching entries in the
preliminary list of idle states coming from the ACPI tables. In that case user
space still can enable them later (on a per-CPU basis) with the help of
the ``disable`` idle state attribute in ``sysfs`` (see
-:ref:`idle-states-representation` in :doc:`cpuidle`). This basically means that
+:ref:`idle-states-representation` in
+Documentation/admin-guide/pm/cpuidle.rst). This basically means that
the idle states "known" to the driver may not be enabled by default if they have
not been exposed by the platform firmware (through the ACPI tables).
@@ -186,7 +188,8 @@ be desirable. In practice, it is only really necessary to do that if the idle
states in question cannot be enabled during system startup, because in the
working state of the system the CPU power management quality of service (PM
QoS) feature can be used to prevent ``CPUIdle`` from touching those idle states
-even if they have been enumerated (see :ref:`cpu-pm-qos` in :doc:`cpuidle`).
+even if they have been enumerated (see :ref:`cpu-pm-qos` in
+Documentation/admin-guide/pm/cpuidle.rst).
Setting ``max_cstate`` to 0 causes the ``intel_idle`` initialization to fail.
The ``no_acpi`` and ``use_acpi`` module parameters (recognized by ``intel_idle``
@@ -202,7 +205,8 @@ Namely, the positions of the bits that are set in the ``states_off`` value are
the indices of idle states to be disabled by default (as reflected by the names
of the corresponding idle state directories in ``sysfs``, :file:`state0`,
:file:`state1` ... :file:`state<i>` ..., where ``<i>`` is the index of the given
-idle state; see :ref:`idle-states-representation` in :doc:`cpuidle`).
+idle state; see :ref:`idle-states-representation` in
+Documentation/admin-guide/pm/cpuidle.rst).
For example, if ``states_off`` is equal to 3, the driver will disable idle
states 0 and 1 by default, and if it is equal to 8, idle state 3 will be
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index df29b4f1f219..d5043cd8d2f5 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -18,8 +18,8 @@ General Information
(``CPUFreq``). It is a scaling driver for the Sandy Bridge and later
generations of Intel processors. Note, however, that some of those processors
may not be supported. [To understand ``intel_pstate`` it is necessary to know
-how ``CPUFreq`` works in general, so this is the time to read :doc:`cpufreq` if
-you have not done that yet.]
+how ``CPUFreq`` works in general, so this is the time to read
+Documentation/admin-guide/pm/cpufreq.rst if you have not done that yet.]
For the processors supported by ``intel_pstate``, the P-state concept is broader
than just an operating frequency or an operating performance point (see the
@@ -365,6 +365,9 @@ argument is passed to the kernel in the command line.
inclusive) including both turbo and non-turbo P-states (see
`Turbo P-states Support`_).
+ This attribute is present only if the value exposed by it is the same
+ for all of the CPUs in the system.
+
The value of this attribute is not affected by the ``no_turbo``
setting described `below <no_turbo_attr_>`_.
@@ -374,6 +377,9 @@ argument is passed to the kernel in the command line.
Ratio of the `turbo range <turbo_>`_ size to the size of the entire
range of supported P-states, in percent.
+ This attribute is present only if the value exposed by it is the same
+ for all of the CPUs in the system.
+
This attribute is read-only.
.. _no_turbo_attr:
@@ -445,8 +451,9 @@ Interpretation of Policy Attributes
-----------------------------------
The interpretation of some ``CPUFreq`` policy attributes described in
-:doc:`cpufreq` is special with ``intel_pstate`` as the current scaling driver
-and it generally depends on the driver's `operation mode <Operation Modes_>`_.
+Documentation/admin-guide/pm/cpufreq.rst is special with ``intel_pstate``
+as the current scaling driver and it generally depends on the driver's
+`operation mode <Operation Modes_>`_.
First of all, the values of the ``cpuinfo_max_freq``, ``cpuinfo_min_freq`` and
``scaling_cur_freq`` attributes are produced by applying a processor-specific
diff --git a/Documentation/admin-guide/pstore-blk.rst b/Documentation/admin-guide/pstore-blk.rst
index 49d8149f8d32..2d22ead9520e 100644
--- a/Documentation/admin-guide/pstore-blk.rst
+++ b/Documentation/admin-guide/pstore-blk.rst
@@ -45,15 +45,18 @@ blkdev
The block device to use. Most of the time, it is a partition of block device.
It's required for pstore/blk. It is also used for MTD device.
-It accepts the following variants for block device:
+When pstore/blk is built as a module, "blkdev" accepts the following variants:
-1. <hex_major><hex_minor> device number in hexadecimal represents itself; no
- leading 0x, for example b302.
-#. /dev/<disk_name> represents the device number of disk
+1. /dev/<disk_name> represents the device number of disk
#. /dev/<disk_name><decimal> represents the device number of partition - device
number of disk plus the partition number
#. /dev/<disk_name>p<decimal> - same as the above; this form is used when disk
name of partitioned disk ends with a digit.
+
+When pstore/blk is built into the kernel, "blkdev" accepts the following variants:
+
+#. <hex_major><hex_minor> device number in hexadecimal representation,
+ with no leading 0x, for example b302.
#. PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF represents the unique id of
a partition if the partition table provides it. The UUID may be either an
EFI/GPT UUID, or refer to an MSDOS partition using the format SSSSSSSS-PP,
@@ -227,8 +230,5 @@ For developer reference, here are all the important structures and APIs:
.. kernel-doc:: include/linux/pstore_zone.h
:internal:
-.. kernel-doc:: fs/pstore/blk.c
- :internal:
-
.. kernel-doc:: include/linux/pstore_blk.h
:internal:
diff --git a/Documentation/admin-guide/reporting-issues.rst b/Documentation/admin-guide/reporting-issues.rst
index 18d8e25ba9df..d7ac13f789cc 100644
--- a/Documentation/admin-guide/reporting-issues.rst
+++ b/Documentation/admin-guide/reporting-issues.rst
@@ -1248,7 +1248,7 @@ paragraph makes the severeness obvious.
In case you performed a successful bisection, use the title of the change that
introduced the regression as the second part of your subject. Make the report
-also mention the commit id of the culprit. In case of an unsuccessful bisection,
+also mention the commit id of the culprit. In case of an unsuccessful bisection,
make your report mention the latest tested version that's working fine (say 5.7)
and the oldest where the issue occurs (say 5.8-rc1).
diff --git a/Documentation/admin-guide/sysctl/abi.rst b/Documentation/admin-guide/sysctl/abi.rst
index 77b1d1b2ad42..4e6db0a2a4c0 100644
--- a/Documentation/admin-guide/sysctl/abi.rst
+++ b/Documentation/admin-guide/sysctl/abi.rst
@@ -11,7 +11,7 @@ Documentation for /proc/sys/abi/
Copyright (c) 2020, Stephen Kitt
-For general info, see :doc:`index`.
+For general info, see Documentation/admin-guide/sysctl/index.rst.
------------------------------------------------------------------------------
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 7ca8df5451d4..426162009ce9 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -9,7 +9,8 @@ Copyright (c) 1998, 1999, Rik van Riel <riel@nl.linux.org>
Copyright (c) 2009, Shen Feng<shen@cn.fujitsu.com>
-For general info and legal blurb, please look in :doc:`index`.
+For general info and legal blurb, please look in
+Documentation/admin-guide/sysctl/index.rst.
------------------------------------------------------------------------------
@@ -54,7 +55,7 @@ free space valid for 30 seconds.
acpi_video_flags
================
-See :doc:`/power/video`. This allows the video resume mode to be set,
+See Documentation/power/video.rst. This allows the video resume mode to be set,
in a similar fashion to the ``acpi_sleep`` kernel parameter, by
combining the following values:
@@ -89,7 +90,7 @@ is 0x15 and the full version number is 0x234, this file will contain
the value 340 = 0x154.
See the ``type_of_loader`` and ``ext_loader_type`` fields in
-:doc:`/x86/boot` for additional information.
+Documentation/x86/boot.rst for additional information.
bootloader_version (x86 only)
@@ -99,7 +100,7 @@ The complete bootloader version number. In the example above, this
file will contain the value 564 = 0x234.
See the ``type_of_loader`` and ``ext_loader_ver`` fields in
-:doc:`/x86/boot` for additional information.
+Documentation/x86/boot.rst for additional information.
bpf_stats_enabled
@@ -269,7 +270,7 @@ see the ``hostname(1)`` man page.
firmware_config
===============
-See :doc:`/driver-api/firmware/fallback-mechanisms`.
+See Documentation/driver-api/firmware/fallback-mechanisms.rst.
The entries in this directory allow the firmware loader helper
fallback to be controlled:
@@ -297,7 +298,7 @@ crashes and outputting them to a serial console.
ftrace_enabled, stack_tracer_enabled
====================================
-See :doc:`/trace/ftrace`.
+See Documentation/trace/ftrace.rst.
hardlockup_all_cpu_backtrace
@@ -325,7 +326,7 @@ when a hard lockup is detected.
1 Panic on hard lockup.
= ===========================
-See :doc:`/admin-guide/lockup-watchdogs` for more information.
+See Documentation/admin-guide/lockup-watchdogs.rst for more information.
This can also be set using the nmi_watchdog kernel parameter.
@@ -333,7 +334,12 @@ hotplug
=======
Path for the hotplug policy agent.
-Default value is "``/sbin/hotplug``".
+Default value is ``CONFIG_UEVENT_HELPER_PATH``, which in turn defaults
+to the empty string.
+
+This file only exists when ``CONFIG_UEVENT_HELPER`` is enabled. Most
+modern systems rely exclusively on the netlink-based uevent source and
+don't need this.
hung_task_all_cpu_backtrace
@@ -582,7 +588,8 @@ in a KVM virtual machine. This default can be overridden by adding::
nmi_watchdog=1
-to the guest kernel command line (see :doc:`/admin-guide/kernel-parameters`).
+to the guest kernel command line (see
+Documentation/admin-guide/kernel-parameters.rst).
numa_balancing
@@ -1067,7 +1074,7 @@ that support this feature.
real-root-dev
=============
-See :doc:`/admin-guide/initrd`.
+See Documentation/admin-guide/initrd.rst.
reboot-cmd (SPARC only)
@@ -1088,6 +1095,13 @@ Model available). If your platform happens to meet the
requirements for EAS but you do not want to use it, change
this value to 0.
+task_delayacct
+===============
+
+Enables/disables task delay accounting (see
+:doc:`accounting/delay-accounting.rst`). Enabling this feature incurs
+a small amount of overhead in the scheduler but is useful for debugging
+and performance tuning. It is required by some tools such as iotop.
sched_schedstats
================
@@ -1154,7 +1168,7 @@ will take effect.
seccomp
=======
-See :doc:`/userspace-api/seccomp_filter`.
+See Documentation/userspace-api/seccomp_filter.rst.
sg-big-buff
@@ -1283,11 +1297,11 @@ This parameter can be used to control the soft lockup detector.
= =================================
The soft lockup detector monitors CPUs for threads that are hogging the CPUs
-without rescheduling voluntarily, and thus prevent the 'watchdog/N' threads
-from running. The mechanism depends on the CPUs ability to respond to timer
-interrupts which are needed for the 'watchdog/N' threads to be woken up by
-the watchdog timer function, otherwise the NMI watchdog — if enabled — can
-detect a hard lockup condition.
+without rescheduling voluntarily, and thus prevent the 'migration/N' threads
+from running, causing the watchdog work fail to execute. The mechanism depends
+on the CPUs ability to respond to timer interrupts which are needed for the
+watchdog work to be queued by the watchdog timer function, otherwise the NMI
+watchdog — if enabled — can detect a hard lockup condition.
stack_erasing
@@ -1325,7 +1339,7 @@ the boot PROM.
sysrq
=====
-See :doc:`/admin-guide/sysrq`.
+See Documentation/admin-guide/sysrq.rst.
tainted
@@ -1355,15 +1369,16 @@ ORed together. The letters are seen in "Tainted" line of Oops reports.
131072 `(T)` The kernel was built with the struct randomization plugin
====== ===== ==============================================================
-See :doc:`/admin-guide/tainted-kernels` for more information.
+See Documentation/admin-guide/tainted-kernels.rst for more information.
Note:
writes to this sysctl interface will fail with ``EINVAL`` if the kernel is
booted with the command line option ``panic_on_taint=<bitmask>,nousertaint``
and any of the ORed together values being written to ``tainted`` match with
the bitmask declared on panic_on_taint.
- See :doc:`/admin-guide/kernel-parameters` for more details on that particular
- kernel command line option and its optional ``nousertaint`` switch.
+ See Documentation/admin-guide/kernel-parameters.rst for more details on
+ that particular kernel command line option and its optional
+ ``nousertaint`` switch.
threads-max
===========
@@ -1387,7 +1402,7 @@ If a value outside of this range is written to ``threads-max`` an
traceoff_on_warning
===================
-When set, disables tracing (see :doc:`/trace/ftrace`) when a
+When set, disables tracing (see Documentation/trace/ftrace.rst) when a
``WARN()`` is hit.
@@ -1407,8 +1422,8 @@ will send them to printk() again.
This only works if the kernel was booted with ``tp_printk`` enabled.
-See :doc:`/admin-guide/kernel-parameters` and
-:doc:`/trace/boottime-trace`.
+See Documentation/admin-guide/kernel-parameters.rst and
+Documentation/trace/boottime-trace.rst.
.. _unaligned-dump-stack:
@@ -1458,11 +1473,22 @@ unprivileged_bpf_disabled
=========================
Writing 1 to this entry will disable unprivileged calls to ``bpf()``;
-once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` will return
-``-EPERM``.
+once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` or ``CAP_BPF``
+will return ``-EPERM``. Once set to 1, this can't be cleared from the
+running kernel anymore.
-Once set, this can't be cleared.
+Writing 2 to this entry will also disable unprivileged calls to ``bpf()``,
+however, an admin can still change this setting later on, if needed, by
+writing 0 or 1 to this entry.
+If ``BPF_UNPRIV_DEFAULT_OFF`` is enabled in the kernel config, then this
+entry will default to 2 instead of 0.
+
+= =============================================================
+0 Unprivileged calls to ``bpf()`` are enabled
+1 Unprivileged calls to ``bpf()`` are disabled without recovery
+2 Unprivileged calls to ``bpf()`` are disabled
+= =============================================================
watchdog
========
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 586cd4b86428..003d5cc3751b 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -25,7 +25,6 @@ files can be found in mm/swap.c.
Currently, these files are in /proc/sys/vm:
- admin_reserve_kbytes
-- block_dump
- compact_memory
- compaction_proactiveness
- compact_unevictable_allowed
@@ -64,7 +63,7 @@ Currently, these files are in /proc/sys/vm:
- overcommit_ratio
- page-cluster
- panic_on_oom
-- percpu_pagelist_fraction
+- percpu_pagelist_high_fraction
- stat_interval
- stat_refresh
- numa_stat
@@ -106,13 +105,6 @@ On x86_64 this is about 128MB.
Changing this takes effect whenever an application requests memory.
-block_dump
-==========
-
-block_dump enables block I/O debugging when set to a nonzero value. More
-information on block I/O debugging is in Documentation/admin-guide/laptops/laptop-mode.rst.
-
-
compact_memory
==============
@@ -790,22 +782,24 @@ panic_on_oom=2+kdump gives you very strong tool to investigate
why oom happens. You can get snapshot.
-percpu_pagelist_fraction
-========================
+percpu_pagelist_high_fraction
+=============================
-This is the fraction of pages at most (high mark pcp->high) in each zone that
-are allocated for each per cpu page list. The min value for this is 8. It
-means that we don't allow more than 1/8th of pages in each zone to be
-allocated in any single per_cpu_pagelist. This entry only changes the value
-of hot per cpu pagelists. User can specify a number like 100 to allocate
-1/100th of each zone to each per cpu page list.
+This is the fraction of pages in each zone that are can be stored to
+per-cpu page lists. It is an upper boundary that is divided depending
+on the number of online CPUs. The min value for this is 8 which means
+that we do not allow more than 1/8th of pages in each zone to be stored
+on per-cpu page lists. This entry only changes the value of hot per-cpu
+page lists. A user can specify a number like 100 to allocate 1/100th of
+each zone between per-cpu lists.
-The batch value of each per cpu pagelist is also updated as a result. It is
-set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
+The batch value of each per-cpu page list remains the same regardless of
+the value of the high fraction so allocation latencies are unaffected.
-The initial value is zero. Kernel does not use this value at boot time to set
-the high water marks for each per cpu page list. If the user writes '0' to this
-sysctl, it will revert to this default behavior.
+The initial value is zero. Kernel uses this value to set the high pcp->high
+mark based on the low watermark for the zone and the number of local
+online CPUs. If the user writes '0' to this sysctl, it will revert to
+this default behavior.
stat_interval
@@ -936,12 +930,12 @@ allocations, THP and hugetlbfs pages.
To make it sensible with respect to the watermark_scale_factor
parameter, the unit is in fractions of 10,000. The default value of
-15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
-watermark will be reclaimed in the event of a pageblock being mixed due
-to fragmentation. The level of reclaim is determined by the number of
-fragmentation events that occurred in the recent past. If this value is
-smaller than a pageblock then a pageblocks worth of pages will be reclaimed
-(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
+15,000 means that up to 150% of the high watermark will be reclaimed in the
+event of a pageblock being mixed due to fragmentation. The level of reclaim
+is determined by the number of fragmentation events that occurred in the
+recent past. If this value is smaller than a pageblock then a pageblocks
+worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
+of 0 will disable the feature.
watermark_scale_factor
diff --git a/Documentation/arm/marvell.rst b/Documentation/arm/marvell.rst
index c50be711ec72..db2246493d18 100644
--- a/Documentation/arm/marvell.rst
+++ b/Documentation/arm/marvell.rst
@@ -259,7 +259,7 @@ Storage family
https://web.archive.org/web/20191129073953/http://www.marvell.com/storage/armada-sp/
Core:
- Sheeva ARMv7 comatible Quad-core PJ4C
+ Sheeva ARMv7 compatible Quad-core PJ4C
(not supported in upstream Linux kernel)
diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst
index 18b8cc1bf32c..a9192e7a231b 100644
--- a/Documentation/arm64/booting.rst
+++ b/Documentation/arm64/booting.rst
@@ -277,6 +277,12 @@ Before jumping into the kernel, the following conditions must be met:
- SCR_EL3.FGTEn (bit 27) must be initialised to 0b1.
+ For CPUs with support for HCRX_EL2 (FEAT_HCX) present:
+
+ - If EL3 is present and the kernel is entered at EL2:
+
+ - SCR_EL3.HXEn (bit 38) must be initialised to 0b1.
+
For CPUs with Advanced SIMD and floating point support:
- If EL3 is present:
diff --git a/Documentation/block/bfq-iosched.rst b/Documentation/block/bfq-iosched.rst
index 66c5a4e54130..df3a8a47f58c 100644
--- a/Documentation/block/bfq-iosched.rst
+++ b/Documentation/block/bfq-iosched.rst
@@ -553,20 +553,36 @@ throughput sustainable with bfq, because updating the blkio.bfq.*
stats is rather costly, especially for some of the stats enabled by
CONFIG_BFQ_CGROUP_DEBUG.
-Parameters to set
------------------
+Parameters
+----------
-For each group, there is only the following parameter to set.
+For each group, the following parameters can be set:
-weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the
-group inside its parent. Available values: 1..1000 (default 100). The
-linear mapping between ioprio and weights, described at the beginning
-of the tunable section, is still valid, but all weights higher than
-IOPRIO_BE_NR*10 are mapped to ioprio 0.
+ weight
+ This specifies the default weight for the cgroup inside its parent.
+ Available values: 1..1000 (default: 100).
-Recall that, if low-latency is set, then BFQ automatically raises the
-weight of the queues associated with interactive and soft real-time
-applications. Unset this tunable if you need/want to control weights.
+ For cgroup v1, it is set by writing the value to `blkio.bfq.weight`.
+
+ For cgroup v2, it is set by writing the value to `io.bfq.weight`.
+ (with an optional prefix of `default` and a space).
+
+ The linear mapping between ioprio and weights, described at the beginning
+ of the tunable section, is still valid, but all weights higher than
+ IOPRIO_BE_NR*10 are mapped to ioprio 0.
+
+ Recall that, if low-latency is set, then BFQ automatically raises the
+ weight of the queues associated with interactive and soft real-time
+ applications. Unset this tunable if you need/want to control weights.
+
+ weight_device
+ This specifies a per-device weight for the cgroup. The syntax is
+ `minor:major weight`. A weight of `0` may be used to reset to the default
+ weight.
+
+ For cgroup v1, it is set by writing the value to `blkio.bfq.weight_device`.
+
+ For cgroup v2, the file name is `io.bfq.weight`.
[1]
diff --git a/Documentation/block/biodoc.rst b/Documentation/block/biodoc.rst
index 1d4d71e391af..2098477851a4 100644
--- a/Documentation/block/biodoc.rst
+++ b/Documentation/block/biodoc.rst
@@ -196,7 +196,7 @@ a virtual address mapping (unlike the earlier scheme of virtual address
do not have a corresponding kernel virtual address space mapping) and
low-memory pages.
-Note: Please refer to :doc:`/core-api/dma-api-howto` for a discussion
+Note: Please refer to Documentation/core-api/dma-api-howto.rst for a discussion
on PCI high mem DMA aspects and mapping of scatter gather lists, and support
for 64 bit PCI.
diff --git a/Documentation/block/blk-mq.rst b/Documentation/block/blk-mq.rst
index a980d23af48c..d96118c73954 100644
--- a/Documentation/block/blk-mq.rst
+++ b/Documentation/block/blk-mq.rst
@@ -62,7 +62,7 @@ queue, to be sent in the future, when the hardware is able.
Software staging queues
~~~~~~~~~~~~~~~~~~~~~~~
-The block IO subsystem adds requests in the software staging queues
+The block IO subsystem adds requests in the software staging queues
(represented by struct blk_mq_ctx) in case that they weren't sent
directly to the driver. A request is one or more BIOs. They arrived at the
block layer through the data structure struct bio. The block layer
@@ -132,7 +132,7 @@ In order to indicate which request has been completed, every request is
identified by an integer, ranging from 0 to the dispatch queue size. This tag
is generated by the block layer and later reused by the device driver, removing
the need to create a redundant identifier. When a request is completed in the
-drive, the tag is sent back to the block layer to notify it of the finalization.
+driver, the tag is sent back to the block layer to notify it of the finalization.
This removes the need to do a linear search to find out which IO has been
completed.
diff --git a/Documentation/block/stat.rst b/Documentation/block/stat.rst
index 77311335c08b..a1cd9db2058f 100644
--- a/Documentation/block/stat.rst
+++ b/Documentation/block/stat.rst
@@ -18,7 +18,7 @@ A.
each, it would be impossible to guarantee that a set of readings
represent a single point in time.
-The stat file consists of a single line of text containing 11 decimal
+The stat file consists of a single line of text containing 17 decimal
values separated by whitespace. The fields are summarized in the
following table, and described in more detail below.
diff --git a/Documentation/bpf/bpf_lsm.rst b/Documentation/bpf/bpf_lsm.rst
index 1c0a75a51d79..0dc3fb0d9544 100644
--- a/Documentation/bpf/bpf_lsm.rst
+++ b/Documentation/bpf/bpf_lsm.rst
@@ -20,10 +20,10 @@ LSM hook:
Other LSM hooks which can be instrumented can be found in
``include/linux/lsm_hooks.h``.
-eBPF programs that use :doc:`/bpf/btf` do not need to include kernel headers
-for accessing information from the attached eBPF program's context. They can
-simply declare the structures in the eBPF program and only specify the fields
-that need to be accessed.
+eBPF programs that use Documentation/bpf/btf.rst do not need to include kernel
+headers for accessing information from the attached eBPF program's context.
+They can simply declare the structures in the eBPF program and only specify
+the fields that need to be accessed.
.. code-block:: c
@@ -88,8 +88,9 @@ example:
The ``__attribute__((preserve_access_index))`` is a clang feature that allows
the BPF verifier to update the offsets for the access at runtime using the
-:doc:`/bpf/btf` information. Since the BPF verifier is aware of the types, it
-also validates all the accesses made to the various types in the eBPF program.
+Documentation/bpf/btf.rst information. Since the BPF verifier is aware of the
+types, it also validates all the accesses made to the various types in the
+eBPF program.
Loading
-------
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index a702f67dd45f..baea6c2abba5 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -12,6 +12,19 @@ BPF instruction-set.
The Cilium project also maintains a `BPF and XDP Reference Guide`_
that goes into great technical depth about the BPF Architecture.
+libbpf
+======
+
+Libbpf is a userspace library for loading and interacting with bpf programs.
+
+.. toctree::
+ :maxdepth: 1
+
+ libbpf/libbpf
+ libbpf/libbpf_api
+ libbpf/libbpf_build
+ libbpf/libbpf_naming_convention
+
BPF Type Format (BTF)
=====================
@@ -84,6 +97,7 @@ Other
:maxdepth: 1
ringbuf
+ llvm_reloc
.. Links:
.. _networking-filter: ../networking/filter.rst
diff --git a/Documentation/bpf/libbpf/libbpf.rst b/Documentation/bpf/libbpf/libbpf.rst
new file mode 100644
index 000000000000..1b1e61d5ead1
--- /dev/null
+++ b/Documentation/bpf/libbpf/libbpf.rst
@@ -0,0 +1,14 @@
+.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+libbpf
+======
+
+This is documentation for libbpf, a userspace library for loading and
+interacting with bpf programs.
+
+All general BPF questions, including kernel functionality, libbpf APIs and
+their application, should be sent to bpf@vger.kernel.org mailing list.
+You can `subscribe <http://vger.kernel.org/vger-lists.html#bpf>`_ to the
+mailing list search its `archive <https://lore.kernel.org/bpf/>`_.
+Please search the archive before asking new questions. It very well might
+be that this was already addressed or answered before.
diff --git a/Documentation/bpf/libbpf/libbpf_api.rst b/Documentation/bpf/libbpf/libbpf_api.rst
new file mode 100644
index 000000000000..f07eecd054da
--- /dev/null
+++ b/Documentation/bpf/libbpf/libbpf_api.rst
@@ -0,0 +1,27 @@
+.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+API
+===
+
+This documentation is autogenerated from header files in libbpf, tools/lib/bpf
+
+.. kernel-doc:: tools/lib/bpf/libbpf.h
+ :internal:
+
+.. kernel-doc:: tools/lib/bpf/bpf.h
+ :internal:
+
+.. kernel-doc:: tools/lib/bpf/btf.h
+ :internal:
+
+.. kernel-doc:: tools/lib/bpf/xsk.h
+ :internal:
+
+.. kernel-doc:: tools/lib/bpf/bpf_tracing.h
+ :internal:
+
+.. kernel-doc:: tools/lib/bpf/bpf_core_read.h
+ :internal:
+
+.. kernel-doc:: tools/lib/bpf/bpf_endian.h
+ :internal: \ No newline at end of file
diff --git a/Documentation/bpf/libbpf/libbpf_build.rst b/Documentation/bpf/libbpf/libbpf_build.rst
new file mode 100644
index 000000000000..8e8c23e8093d
--- /dev/null
+++ b/Documentation/bpf/libbpf/libbpf_build.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+Building libbpf
+===============
+
+libelf and zlib are internal dependencies of libbpf and thus are required to link
+against and must be installed on the system for applications to work.
+pkg-config is used by default to find libelf, and the program called
+can be overridden with PKG_CONFIG.
+
+If using pkg-config at build time is not desired, it can be disabled by
+setting NO_PKG_CONFIG=1 when calling make.
+
+To build both static libbpf.a and shared libbpf.so:
+
+.. code-block:: bash
+
+ $ cd src
+ $ make
+
+To build only static libbpf.a library in directory build/ and install them
+together with libbpf headers in a staging directory root/:
+
+.. code-block:: bash
+
+ $ cd src
+ $ mkdir build root
+ $ BUILD_STATIC_ONLY=y OBJDIR=build DESTDIR=root make install
+
+To build both static libbpf.a and shared libbpf.so against a custom libelf
+dependency installed in /build/root/ and install them together with libbpf
+headers in a build directory /build/root/:
+
+.. code-block:: bash
+
+ $ cd src
+ $ PKG_CONFIG_PATH=/build/root/lib64/pkgconfig DESTDIR=/build/root make \ No newline at end of file
diff --git a/tools/lib/bpf/README.rst b/Documentation/bpf/libbpf/libbpf_naming_convention.rst
index 8928f7787f2d..3de1d51e41da 100644
--- a/tools/lib/bpf/README.rst
+++ b/Documentation/bpf/libbpf/libbpf_naming_convention.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-libbpf API naming convention
-============================
+API naming convention
+=====================
libbpf API provides access to a few logically separated groups of
functions and types. Every group has its own naming convention
@@ -10,14 +10,14 @@ new function or type is added to keep libbpf API clean and consistent.
All types and functions provided by libbpf API should have one of the
following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``,
-``perf_buffer_``.
+``btf_dump_``, ``ring_buffer_``, ``perf_buffer_``.
System call wrappers
--------------------
System call wrappers are simple wrappers for commands supported by
sys_bpf system call. These wrappers should go to ``bpf.h`` header file
-and map one-on-one to corresponding commands.
+and map one to one to corresponding commands.
For example ``bpf_map_lookup_elem`` wraps ``BPF_MAP_LOOKUP_ELEM``
command of sys_bpf, ``bpf_prog_attach`` wraps ``BPF_PROG_ATTACH``, etc.
@@ -49,10 +49,6 @@ object, ``bpf_object``, double underscore and ``open`` that defines the
purpose of the function to open ELF file and create ``bpf_object`` from
it.
-Another example: ``bpf_program__load`` is named for corresponding
-object, ``bpf_program``, that is separated from other part of the name
-by double underscore.
-
All objects and corresponding functions other than BTF related should go
to ``libbpf.h``. BTF types and functions should go to ``btf.h``.
@@ -72,11 +68,7 @@ of both low-level ring access functions and high-level configuration
functions. These can be mixed and matched. Note that these functions
are not reentrant for performance reasons.
-Please take a look at Documentation/networking/af_xdp.rst in the Linux
-kernel source tree on how to use XDP sockets and for some common
-mistakes in case you do not get any traffic up to user space.
-
-libbpf ABI
+ABI
==========
libbpf can be both linked statically or used as DSO. To avoid possible
@@ -116,7 +108,8 @@ This bump in ABI version is at most once per kernel development cycle.
For example, if current state of ``libbpf.map`` is:
-.. code-block::
+.. code-block:: c
+
LIBBPF_0.0.1 {
global:
bpf_func_a;
@@ -128,7 +121,8 @@ For example, if current state of ``libbpf.map`` is:
, and a new symbol ``bpf_func_c`` is being introduced, then
``libbpf.map`` should be changed like this:
-.. code-block::
+.. code-block:: c
+
LIBBPF_0.0.1 {
global:
bpf_func_a;
@@ -148,7 +142,7 @@ Format of version script and ways to handle ABI changes, including
incompatible ones, described in details in [1].
Stand-alone build
-=================
+-------------------
Under https://github.com/libbpf/libbpf there is a (semi-)automated
mirror of the mainline's version of libbpf for a stand-alone build.
@@ -157,12 +151,12 @@ However, all changes to libbpf's code base must be upstreamed through
the mainline kernel tree.
License
-=======
+-------------------
libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause.
Links
-=====
+-------------------
[1] https://www.akkadia.org/drepper/dsohowto.pdf
(Chapter 3. Maintaining APIs and ABIs).
diff --git a/Documentation/bpf/llvm_reloc.rst b/Documentation/bpf/llvm_reloc.rst
new file mode 100644
index 000000000000..ca8957d5b671
--- /dev/null
+++ b/Documentation/bpf/llvm_reloc.rst
@@ -0,0 +1,240 @@
+.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+====================
+BPF LLVM Relocations
+====================
+
+This document describes LLVM BPF backend relocation types.
+
+Relocation Record
+=================
+
+LLVM BPF backend records each relocation with the following 16-byte
+ELF structure::
+
+ typedef struct
+ {
+ Elf64_Addr r_offset; // Offset from the beginning of section.
+ Elf64_Xword r_info; // Relocation type and symbol index.
+ } Elf64_Rel;
+
+For example, for the following code::
+
+ int g1 __attribute__((section("sec")));
+ int g2 __attribute__((section("sec")));
+ static volatile int l1 __attribute__((section("sec")));
+ static volatile int l2 __attribute__((section("sec")));
+ int test() {
+ return g1 + g2 + l1 + l2;
+ }
+
+Compiled with ``clang -target bpf -O2 -c test.c``, the following is
+the code with ``llvm-objdump -dr test.o``::
+
+ 0: 18 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r1 = 0 ll
+ 0000000000000000: R_BPF_64_64 g1
+ 2: 61 11 00 00 00 00 00 00 r1 = *(u32 *)(r1 + 0)
+ 3: 18 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r2 = 0 ll
+ 0000000000000018: R_BPF_64_64 g2
+ 5: 61 20 00 00 00 00 00 00 r0 = *(u32 *)(r2 + 0)
+ 6: 0f 10 00 00 00 00 00 00 r0 += r1
+ 7: 18 01 00 00 08 00 00 00 00 00 00 00 00 00 00 00 r1 = 8 ll
+ 0000000000000038: R_BPF_64_64 sec
+ 9: 61 11 00 00 00 00 00 00 r1 = *(u32 *)(r1 + 0)
+ 10: 0f 10 00 00 00 00 00 00 r0 += r1
+ 11: 18 01 00 00 0c 00 00 00 00 00 00 00 00 00 00 00 r1 = 12 ll
+ 0000000000000058: R_BPF_64_64 sec
+ 13: 61 11 00 00 00 00 00 00 r1 = *(u32 *)(r1 + 0)
+ 14: 0f 10 00 00 00 00 00 00 r0 += r1
+ 15: 95 00 00 00 00 00 00 00 exit
+
+There are four relations in the above for four ``LD_imm64`` instructions.
+The following ``llvm-readelf -r test.o`` shows the binary values of the four
+relocations::
+
+ Relocation section '.rel.text' at offset 0x190 contains 4 entries:
+ Offset Info Type Symbol's Value Symbol's Name
+ 0000000000000000 0000000600000001 R_BPF_64_64 0000000000000000 g1
+ 0000000000000018 0000000700000001 R_BPF_64_64 0000000000000004 g2
+ 0000000000000038 0000000400000001 R_BPF_64_64 0000000000000000 sec
+ 0000000000000058 0000000400000001 R_BPF_64_64 0000000000000000 sec
+
+Each relocation is represented by ``Offset`` (8 bytes) and ``Info`` (8 bytes).
+For example, the first relocation corresponds to the first instruction
+(Offset 0x0) and the corresponding ``Info`` indicates the relocation type
+of ``R_BPF_64_64`` (type 1) and the entry in the symbol table (entry 6).
+The following is the symbol table with ``llvm-readelf -s test.o``::
+
+ Symbol table '.symtab' contains 8 entries:
+ Num: Value Size Type Bind Vis Ndx Name
+ 0: 0000000000000000 0 NOTYPE LOCAL DEFAULT UND
+ 1: 0000000000000000 0 FILE LOCAL DEFAULT ABS test.c
+ 2: 0000000000000008 4 OBJECT LOCAL DEFAULT 4 l1
+ 3: 000000000000000c 4 OBJECT LOCAL DEFAULT 4 l2
+ 4: 0000000000000000 0 SECTION LOCAL DEFAULT 4 sec
+ 5: 0000000000000000 128 FUNC GLOBAL DEFAULT 2 test
+ 6: 0000000000000000 4 OBJECT GLOBAL DEFAULT 4 g1
+ 7: 0000000000000004 4 OBJECT GLOBAL DEFAULT 4 g2
+
+The 6th entry is global variable ``g1`` with value 0.
+
+Similarly, the second relocation is at ``.text`` offset ``0x18``, instruction 3,
+for global variable ``g2`` which has a symbol value 4, the offset
+from the start of ``.data`` section.
+
+The third and fourth relocations refers to static variables ``l1``
+and ``l2``. From ``.rel.text`` section above, it is not clear
+which symbols they really refers to as they both refers to
+symbol table entry 4, symbol ``sec``, which has ``STT_SECTION`` type
+and represents a section. So for static variable or function,
+the section offset is written to the original insn
+buffer, which is called ``A`` (addend). Looking at
+above insn ``7`` and ``11``, they have section offset ``8`` and ``12``.
+From symbol table, we can find that they correspond to entries ``2``
+and ``3`` for ``l1`` and ``l2``.
+
+In general, the ``A`` is 0 for global variables and functions,
+and is the section offset or some computation result based on
+section offset for static variables/functions. The non-section-offset
+case refers to function calls. See below for more details.
+
+Different Relocation Types
+==========================
+
+Six relocation types are supported. The following is an overview and
+``S`` represents the value of the symbol in the symbol table::
+
+ Enum ELF Reloc Type Description BitSize Offset Calculation
+ 0 R_BPF_NONE None
+ 1 R_BPF_64_64 ld_imm64 insn 32 r_offset + 4 S + A
+ 2 R_BPF_64_ABS64 normal data 64 r_offset S + A
+ 3 R_BPF_64_ABS32 normal data 32 r_offset S + A
+ 4 R_BPF_64_NODYLD32 .BTF[.ext] data 32 r_offset S + A
+ 10 R_BPF_64_32 call insn 32 r_offset + 4 (S + A) / 8 - 1
+
+For example, ``R_BPF_64_64`` relocation type is used for ``ld_imm64`` instruction.
+The actual to-be-relocated data (0 or section offset)
+is stored at ``r_offset + 4`` and the read/write
+data bitsize is 32 (4 bytes). The relocation can be resolved with
+the symbol value plus implicit addend. Note that the ``BitSize`` is 32 which
+means the section offset must be less than or equal to ``UINT32_MAX`` and this
+is enforced by LLVM BPF backend.
+
+In another case, ``R_BPF_64_ABS64`` relocation type is used for normal 64-bit data.
+The actual to-be-relocated data is stored at ``r_offset`` and the read/write data
+bitsize is 64 (8 bytes). The relocation can be resolved with
+the symbol value plus implicit addend.
+
+Both ``R_BPF_64_ABS32`` and ``R_BPF_64_NODYLD32`` types are for 32-bit data.
+But ``R_BPF_64_NODYLD32`` specifically refers to relocations in ``.BTF`` and
+``.BTF.ext`` sections. For cases like bcc where llvm ``ExecutionEngine RuntimeDyld``
+is involved, ``R_BPF_64_NODYLD32`` types of relocations should not be resolved
+to actual function/variable address. Otherwise, ``.BTF`` and ``.BTF.ext``
+become unusable by bcc and kernel.
+
+Type ``R_BPF_64_32`` is used for call instruction. The call target section
+offset is stored at ``r_offset + 4`` (32bit) and calculated as
+``(S + A) / 8 - 1``.
+
+Examples
+========
+
+Types ``R_BPF_64_64`` and ``R_BPF_64_32`` are used to resolve ``ld_imm64``
+and ``call`` instructions. For example::
+
+ __attribute__((noinline)) __attribute__((section("sec1")))
+ int gfunc(int a, int b) {
+ return a * b;
+ }
+ static __attribute__((noinline)) __attribute__((section("sec1")))
+ int lfunc(int a, int b) {
+ return a + b;
+ }
+ int global __attribute__((section("sec2")));
+ int test(int a, int b) {
+ return gfunc(a, b) + lfunc(a, b) + global;
+ }
+
+Compiled with ``clang -target bpf -O2 -c test.c``, we will have
+following code with `llvm-objdump -dr test.o``::
+
+ Disassembly of section .text:
+
+ 0000000000000000 <test>:
+ 0: bf 26 00 00 00 00 00 00 r6 = r2
+ 1: bf 17 00 00 00 00 00 00 r7 = r1
+ 2: 85 10 00 00 ff ff ff ff call -1
+ 0000000000000010: R_BPF_64_32 gfunc
+ 3: bf 08 00 00 00 00 00 00 r8 = r0
+ 4: bf 71 00 00 00 00 00 00 r1 = r7
+ 5: bf 62 00 00 00 00 00 00 r2 = r6
+ 6: 85 10 00 00 02 00 00 00 call 2
+ 0000000000000030: R_BPF_64_32 sec1
+ 7: 0f 80 00 00 00 00 00 00 r0 += r8
+ 8: 18 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r1 = 0 ll
+ 0000000000000040: R_BPF_64_64 global
+ 10: 61 11 00 00 00 00 00 00 r1 = *(u32 *)(r1 + 0)
+ 11: 0f 10 00 00 00 00 00 00 r0 += r1
+ 12: 95 00 00 00 00 00 00 00 exit
+
+ Disassembly of section sec1:
+
+ 0000000000000000 <gfunc>:
+ 0: bf 20 00 00 00 00 00 00 r0 = r2
+ 1: 2f 10 00 00 00 00 00 00 r0 *= r1
+ 2: 95 00 00 00 00 00 00 00 exit
+
+ 0000000000000018 <lfunc>:
+ 3: bf 20 00 00 00 00 00 00 r0 = r2
+ 4: 0f 10 00 00 00 00 00 00 r0 += r1
+ 5: 95 00 00 00 00 00 00 00 exit
+
+The first relocation corresponds to ``gfunc(a, b)`` where ``gfunc`` has a value of 0,
+so the ``call`` instruction offset is ``(0 + 0)/8 - 1 = -1``.
+The second relocation corresponds to ``lfunc(a, b)`` where ``lfunc`` has a section
+offset ``0x18``, so the ``call`` instruction offset is ``(0 + 0x18)/8 - 1 = 2``.
+The third relocation corresponds to ld_imm64 of ``global``, which has a section
+offset ``0``.
+
+The following is an example to show how R_BPF_64_ABS64 could be generated::
+
+ int global() { return 0; }
+ struct t { void *g; } gbl = { global };
+
+Compiled with ``clang -target bpf -O2 -g -c test.c``, we will see a
+relocation below in ``.data`` section with command
+``llvm-readelf -r test.o``::
+
+ Relocation section '.rel.data' at offset 0x458 contains 1 entries:
+ Offset Info Type Symbol's Value Symbol's Name
+ 0000000000000000 0000000700000002 R_BPF_64_ABS64 0000000000000000 global
+
+The relocation says the first 8-byte of ``.data`` section should be
+filled with address of ``global`` variable.
+
+With ``llvm-readelf`` output, we can see that dwarf sections have a bunch of
+``R_BPF_64_ABS32`` and ``R_BPF_64_ABS64`` relocations::
+
+ Relocation section '.rel.debug_info' at offset 0x468 contains 13 entries:
+ Offset Info Type Symbol's Value Symbol's Name
+ 0000000000000006 0000000300000003 R_BPF_64_ABS32 0000000000000000 .debug_abbrev
+ 000000000000000c 0000000400000003 R_BPF_64_ABS32 0000000000000000 .debug_str
+ 0000000000000012 0000000400000003 R_BPF_64_ABS32 0000000000000000 .debug_str
+ 0000000000000016 0000000600000003 R_BPF_64_ABS32 0000000000000000 .debug_line
+ 000000000000001a 0000000400000003 R_BPF_64_ABS32 0000000000000000 .debug_str
+ 000000000000001e 0000000200000002 R_BPF_64_ABS64 0000000000000000 .text
+ 000000000000002b 0000000400000003 R_BPF_64_ABS32 0000000000000000 .debug_str
+ 0000000000000037 0000000800000002 R_BPF_64_ABS64 0000000000000000 gbl
+ 0000000000000040 0000000400000003 R_BPF_64_ABS32 0000000000000000 .debug_str
+ ......
+
+The .BTF/.BTF.ext sections has R_BPF_64_NODYLD32 relocations::
+
+ Relocation section '.rel.BTF' at offset 0x538 contains 1 entries:
+ Offset Info Type Symbol's Value Symbol's Name
+ 0000000000000084 0000000800000004 R_BPF_64_NODYLD32 0000000000000000 gbl
+
+ Relocation section '.rel.BTF.ext' at offset 0x548 contains 2 entries:
+ Offset Info Type Symbol's Value Symbol's Name
+ 000000000000002c 0000000200000004 R_BPF_64_NODYLD32 0000000000000000 .text
+ 0000000000000040 0000000200000004 R_BPF_64_NODYLD32 0000000000000000 .text
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 879e86dbea66..7d92ec3e5b6e 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -41,15 +41,7 @@ extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
'maintainers_include', 'sphinx.ext.autosectionlabel',
'kernel_abi', 'kernel_feat']
-#
-# cdomain is badly broken in Sphinx 3+. Leaving it out generates *most*
-# of the docs correctly, but not all. Scream bloody murder but allow
-# the process to proceed; hopefully somebody will fix this properly soon.
-#
if major >= 3:
- sys.stderr.write('''WARNING: The kernel documentation build process
- support for Sphinx v3.0 and above is brand new. Be prepared for
- possible issues in the generated output.\n''')
if (major > 3) or (minor > 0 or patch >= 2):
# Sphinx c function parser is more pedantic with regards to type
# checking. Due to that, having macros at c:function cause problems.
@@ -353,6 +345,8 @@ latex_elements = {
# Additional stuff for the LaTeX preamble.
'preamble': '''
+ % Prevent column squeezing of tabulary.
+ \\setlength{\\tymin}{20em}
% Use some font with UTF-8 support with XeLaTeX
\\usepackage{fontspec}
\\setsansfont{DejaVu Sans}
@@ -366,11 +360,23 @@ latex_elements = {
cjk_cmd = check_output(['fc-list', '--format="%{family[0]}\n"']).decode('utf-8', 'ignore')
if cjk_cmd.find("Noto Sans CJK SC") >= 0:
- print ("enabling CJK for LaTeX builder")
latex_elements['preamble'] += '''
% This is needed for translations
\\usepackage{xeCJK}
\\setCJKmainfont{Noto Sans CJK SC}
+ % Define custom macros to on/off CJK
+ \\newcommand{\\kerneldocCJKon}{\\makexeCJKactive}
+ \\newcommand{\\kerneldocCJKoff}{\\makexeCJKinactive}
+ % To customize \sphinxtableofcontents
+ \\usepackage{etoolbox}
+ % Inactivate CJK after tableofcontents
+ \\apptocmd{\\sphinxtableofcontents}{\\kerneldocCJKoff}{}{}
+ '''
+else:
+ latex_elements['preamble'] += '''
+ % Custom macros to on/off CJK (Dummy)
+ \\newcommand{\\kerneldocCJKon}{}
+ \\newcommand{\\kerneldocCJKoff}{}
'''
# Fix reference escape troubles with Sphinx 1.4.x
diff --git a/Documentation/core-api/bus-virt-phys-mapping.rst b/Documentation/core-api/bus-virt-phys-mapping.rst
index c7bc99cd2e21..c72b24a7d52c 100644
--- a/Documentation/core-api/bus-virt-phys-mapping.rst
+++ b/Documentation/core-api/bus-virt-phys-mapping.rst
@@ -8,7 +8,7 @@ How to access I/O mapped memory from within device drivers
The virt_to_bus() and bus_to_virt() functions have been
superseded by the functionality provided by the PCI DMA interface
- (see :doc:`/core-api/dma-api-howto`). They continue
+ (see Documentation/core-api/dma-api-howto.rst). They continue
to be documented below for historical purposes, but new code
must not use them. --davidm 00/12/12
diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst
index 00a1d4fa3f9e..6d6d0edd2d27 100644
--- a/Documentation/core-api/dma-api.rst
+++ b/Documentation/core-api/dma-api.rst
@@ -5,7 +5,7 @@ Dynamic DMA mapping using the generic device
:Author: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
This document describes the DMA API. For a more gentle introduction
-of the API (and actual examples), see :doc:`/core-api/dma-api-howto`.
+of the API (and actual examples), see Documentation/core-api/dma-api-howto.rst.
This API is split into two pieces. Part I describes the basic API.
Part II describes extensions for supporting non-consistent memory
@@ -479,7 +479,8 @@ without the _attrs suffixes, except that they pass an optional
dma_attrs.
The interpretation of DMA attributes is architecture-specific, and
-each attribute should be documented in :doc:`/core-api/dma-attributes`.
+each attribute should be documented in
+Documentation/core-api/dma-attributes.rst.
If dma_attrs are 0, the semantics of each of these functions
is identical to those of the corresponding function
diff --git a/Documentation/core-api/dma-isa-lpc.rst b/Documentation/core-api/dma-isa-lpc.rst
index e59a3d35a93d..17b193603f0a 100644
--- a/Documentation/core-api/dma-isa-lpc.rst
+++ b/Documentation/core-api/dma-isa-lpc.rst
@@ -17,7 +17,7 @@ To do ISA style DMA you need to include two headers::
#include <asm/dma.h>
The first is the generic DMA API used to convert virtual addresses to
-bus addresses (see :doc:`/core-api/dma-api` for details).
+bus addresses (see Documentation/core-api/dma-api.rst for details).
The second contains the routines specific to ISA DMA transfers. Since
this is not present on all platforms make sure you construct your
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index f1c9d20bd42d..5de2c7a4b1b3 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -48,7 +48,7 @@ Concurrency primitives
======================
How Linux keeps everything from happening at the same time. See
-:doc:`/locking/index` for more related documentation.
+Documentation/locking/index.rst for more related documentation.
.. toctree::
:maxdepth: 1
@@ -77,7 +77,7 @@ Memory management
=================
How to allocate and use memory in the kernel. Note that there is a lot
-more memory-management documentation in :doc:`/vm/index`.
+more memory-management documentation in Documentation/vm/index.rst.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/core-api/irq/irq-domain.rst b/Documentation/core-api/irq/irq-domain.rst
index 8214e215a8bf..53283b3729a1 100644
--- a/Documentation/core-api/irq/irq-domain.rst
+++ b/Documentation/core-api/irq/irq-domain.rst
@@ -146,7 +146,6 @@ Legacy
irq_domain_add_simple()
irq_domain_add_legacy()
- irq_domain_add_legacy_isa()
irq_domain_create_simple()
irq_domain_create_legacy()
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index f063a384c7c8..4346ae17a72c 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -37,14 +37,13 @@ Integer types
u64 %llu or %llx
-If <type> is dependent on a config option for its size (e.g., sector_t,
-blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
-format specifier of its largest possible type and explicitly cast to it.
+If <type> is architecture-dependent for its size (e.g., cycles_t, tcflag_t) or
+is dependent on a config option for its size (e.g., blk_status_t), use a format
+specifier of its largest possible type and explicitly cast to it.
Example::
- printk("test: sector number/total blocks: %llu/%llu\n",
- (unsigned long long)sector, (unsigned long long)blockcount);
+ printk("test: latency: %llu cycles\n", (unsigned long long)time);
Reminder: sizeof() returns type size_t.
@@ -514,9 +513,10 @@ Time and date
::
%pt[RT] YYYY-mm-ddTHH:MM:SS
+ %pt[RT]s YYYY-mm-dd HH:MM:SS
%pt[RT]d YYYY-mm-dd
%pt[RT]t HH:MM:SS
- %pt[RT][dt][r]
+ %pt[RT][dt][r][s]
For printing date and time as represented by::
@@ -528,6 +528,10 @@ in human readable format.
By default year will be incremented by 1900 and month by 1.
Use %pt[RT]r (raw) to suppress this behaviour.
+The %pt[RT]s (space) will override ISO 8601 separator by using ' ' (space)
+instead of 'T' (Capital T) between date and time. It won't have any effect
+when date or time is omitted.
+
Passed by reference.
struct clk
diff --git a/Documentation/dev-tools/checkpatch.rst b/Documentation/dev-tools/checkpatch.rst
index 51fed1bd72ec..f0956e9ea2d8 100644
--- a/Documentation/dev-tools/checkpatch.rst
+++ b/Documentation/dev-tools/checkpatch.rst
@@ -246,6 +246,7 @@ Allocation style
The first argument for kcalloc or kmalloc_array should be the
number of elements. sizeof() as the first argument is generally
wrong.
+
See: https://www.kernel.org/doc/html/latest/core-api/memory-allocation.html
**ALLOC_SIZEOF_STRUCT**
@@ -264,6 +265,7 @@ Allocation style
**ALLOC_WITH_MULTIPLY**
Prefer kmalloc_array/kcalloc over kmalloc/kzalloc with a
sizeof multiply.
+
See: https://www.kernel.org/doc/html/latest/core-api/memory-allocation.html
@@ -284,6 +286,7 @@ API usage
BUG() or BUG_ON() should be avoided totally.
Use WARN() and WARN_ON() instead, and handle the "impossible"
error condition as gracefully as possible.
+
See: https://www.kernel.org/doc/html/latest/process/deprecated.html#bug-and-bug-on
**CONSIDER_KSTRTO**
@@ -292,12 +295,161 @@ API usage
may lead to unexpected results in callers. The respective kstrtol(),
kstrtoll(), kstrtoul(), and kstrtoull() functions tend to be the
correct replacements.
+
See: https://www.kernel.org/doc/html/latest/process/deprecated.html#simple-strtol-simple-strtoll-simple-strtoul-simple-strtoull
+ **CONSTANT_CONVERSION**
+ Use of __constant_<foo> form is discouraged for the following functions::
+
+ __constant_cpu_to_be[x]
+ __constant_cpu_to_le[x]
+ __constant_be[x]_to_cpu
+ __constant_le[x]_to_cpu
+ __constant_htons
+ __constant_ntohs
+
+ Using any of these outside of include/uapi/ is not preferred as using the
+ function without __constant_ is identical when the argument is a
+ constant.
+
+ In big endian systems, the macros like __constant_cpu_to_be32(x) and
+ cpu_to_be32(x) expand to the same expression::
+
+ #define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
+ #define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+
+ In little endian systems, the macros __constant_cpu_to_be32(x) and
+ cpu_to_be32(x) expand to __constant_swab32 and __swab32. __swab32
+ has a __builtin_constant_p check::
+
+ #define __swab32(x) \
+ (__builtin_constant_p((__u32)(x)) ? \
+ ___constant_swab32(x) : \
+ __fswab32(x))
+
+ So ultimately they have a special case for constants.
+ Similar is the case with all of the macros in the list. Thus
+ using the __constant_... forms are unnecessarily verbose and
+ not preferred outside of include/uapi.
+
+ See: https://lore.kernel.org/lkml/1400106425.12666.6.camel@joe-AO725/
+
+ **DEPRECATED_API**
+ Usage of a deprecated RCU API is detected. It is recommended to replace
+ old flavourful RCU APIs by their new vanilla-RCU counterparts.
+
+ The full list of available RCU APIs can be viewed from the kernel docs.
+
+ See: https://www.kernel.org/doc/html/latest/RCU/whatisRCU.html#full-list-of-rcu-apis
+
+ **DEPRECATED_VARIABLE**
+ EXTRA_{A,C,CPP,LD}FLAGS are deprecated and should be replaced by the new
+ flags added via commit f77bf01425b1 ("kbuild: introduce ccflags-y,
+ asflags-y and ldflags-y").
+
+ The following conversion scheme maybe used::
+
+ EXTRA_AFLAGS -> asflags-y
+ EXTRA_CFLAGS -> ccflags-y
+ EXTRA_CPPFLAGS -> cppflags-y
+ EXTRA_LDFLAGS -> ldflags-y
+
+ See:
+
+ 1. https://lore.kernel.org/lkml/20070930191054.GA15876@uranus.ravnborg.org/
+ 2. https://lore.kernel.org/lkml/1313384834-24433-12-git-send-email-lacombar@gmail.com/
+ 3. https://www.kernel.org/doc/html/latest/kbuild/makefiles.html#compilation-flags
+
+ **DEVICE_ATTR_FUNCTIONS**
+ The function names used in DEVICE_ATTR is unusual.
+ Typically, the store and show functions are used with <attr>_store and
+ <attr>_show, where <attr> is a named attribute variable of the device.
+
+ Consider the following examples::
+
+ static DEVICE_ATTR(type, 0444, type_show, NULL);
+ static DEVICE_ATTR(power, 0644, power_show, power_store);
+
+ The function names should preferably follow the above pattern.
+
+ See: https://www.kernel.org/doc/html/latest/driver-api/driver-model/device.html#attributes
+
+ **DEVICE_ATTR_RO**
+ The DEVICE_ATTR_RO(name) helper macro can be used instead of
+ DEVICE_ATTR(name, 0444, name_show, NULL);
+
+ Note that the macro automatically appends _show to the named
+ attribute variable of the device for the show method.
+
+ See: https://www.kernel.org/doc/html/latest/driver-api/driver-model/device.html#attributes
+
+ **DEVICE_ATTR_RW**
+ The DEVICE_ATTR_RW(name) helper macro can be used instead of
+ DEVICE_ATTR(name, 0644, name_show, name_store);
+
+ Note that the macro automatically appends _show and _store to the
+ named attribute variable of the device for the show and store methods.
+
+ See: https://www.kernel.org/doc/html/latest/driver-api/driver-model/device.html#attributes
+
+ **DEVICE_ATTR_WO**
+ The DEVICE_AATR_WO(name) helper macro can be used instead of
+ DEVICE_ATTR(name, 0200, NULL, name_store);
+
+ Note that the macro automatically appends _store to the
+ named attribute variable of the device for the store method.
+
+ See: https://www.kernel.org/doc/html/latest/driver-api/driver-model/device.html#attributes
+
+ **DUPLICATED_SYSCTL_CONST**
+ Commit d91bff3011cf ("proc/sysctl: add shared variables for range
+ check") added some shared const variables to be used instead of a local
+ copy in each source file.
+
+ Consider replacing the sysctl range checking value with the shared
+ one in include/linux/sysctl.h. The following conversion scheme may
+ be used::
+
+ &zero -> SYSCTL_ZERO
+ &one -> SYSCTL_ONE
+ &int_max -> SYSCTL_INT_MAX
+
+ See:
+
+ 1. https://lore.kernel.org/lkml/20190430180111.10688-1-mcroce@redhat.com/
+ 2. https://lore.kernel.org/lkml/20190531131422.14970-1-mcroce@redhat.com/
+
+ **ENOSYS**
+ ENOSYS means that a nonexistent system call was called.
+ Earlier, it was wrongly used for things like invalid operations on
+ otherwise valid syscalls. This should be avoided in new code.
+
+ See: https://lore.kernel.org/lkml/5eb299021dec23c1a48fa7d9f2c8b794e967766d.1408730669.git.luto@amacapital.net/
+
+ **ENOTSUPP**
+ ENOTSUPP is not a standard error code and should be avoided in new patches.
+ EOPNOTSUPP should be used instead.
+
+ See: https://lore.kernel.org/netdev/20200510182252.GA411829@lunn.ch/
+
+ **EXPORT_SYMBOL**
+ EXPORT_SYMBOL should immediately follow the symbol to be exported.
+
+ **IN_ATOMIC**
+ in_atomic() is not for driver use so any such use is reported as an ERROR.
+ Also in_atomic() is often used to determine if sleeping is permitted,
+ but it is not reliable in this use model. Therefore its use is
+ strongly discouraged.
+
+ However, in_atomic() is ok for core kernel use.
+
+ See: https://lore.kernel.org/lkml/20080320201723.b87b3732.akpm@linux-foundation.org/
+
**LOCKDEP**
The lockdep_no_validate class was added as a temporary measure to
prevent warnings on conversion of device->sem to device->mutex.
It should not be used for any other purpose.
+
See: https://lore.kernel.org/lkml/1268959062.9440.467.camel@laptop/
**MALFORMED_INCLUDE**
@@ -308,14 +460,21 @@ API usage
**USE_LOCKDEP**
lockdep_assert_held() annotations should be preferred over
assertions based on spin_is_locked()
+
See: https://www.kernel.org/doc/html/latest/locking/lockdep-design.html#annotations
**UAPI_INCLUDE**
No #include statements in include/uapi should use a uapi/ path.
+ **USLEEP_RANGE**
+ usleep_range() should be preferred over udelay(). The proper way of
+ using usleep_range() is mentioned in the kernel docs.
-Comment style
--------------
+ See: https://www.kernel.org/doc/html/latest/timers/timers-howto.html#delays-information-on-the-various-kernel-delay-sleep-mechanisms
+
+
+Comments
+--------
**BLOCK_COMMENT_STYLE**
The comment style is incorrect. The preferred style for multi-
@@ -338,8 +497,24 @@ Comment style
**C99_COMMENTS**
C99 style single line comments (//) should not be used.
Prefer the block comment style instead.
+
See: https://www.kernel.org/doc/html/latest/process/coding-style.html#commenting
+ **DATA_RACE**
+ Applications of data_race() should have a comment so as to document the
+ reasoning behind why it was deemed safe.
+
+ See: https://lore.kernel.org/lkml/20200401101714.44781-1-elver@google.com/
+
+ **FSF_MAILING_ADDRESS**
+ Kernel maintainers reject new instances of the GPL boilerplate paragraph
+ directing people to write to the FSF for a copy of the GPL, since the
+ FSF has moved in the past and may do so again.
+ So do not write paragraphs about writing to the Free Software Foundation's
+ mailing address.
+
+ See: https://lore.kernel.org/lkml/20131006222342.GT19510@leaf/
+
Commit message
--------------
@@ -347,6 +522,7 @@ Commit message
**BAD_SIGN_OFF**
The signed-off-by line does not fall in line with the standards
specified by the community.
+
See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#developer-s-certificate-of-origin-1-1
**BAD_STABLE_ADDRESS_STYLE**
@@ -368,12 +544,33 @@ Commit message
**COMMIT_MESSAGE**
The patch is missing a commit description. A brief
description of the changes made by the patch should be added.
+
+ See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#describe-your-changes
+
+ **EMAIL_SUBJECT**
+ Naming the tool that found the issue is not very useful in the
+ subject line. A good subject line summarizes the change that
+ the patch brings.
+
See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#describe-your-changes
+ **FROM_SIGN_OFF_MISMATCH**
+ The author's email does not match with that in the Signed-off-by:
+ line(s). This can be sometimes caused due to an improperly configured
+ email client.
+
+ This message is emitted due to any of the following reasons::
+
+ - The email names do not match.
+ - The email addresses do not match.
+ - The email subaddresses do not match.
+ - The email comments do not match.
+
**MISSING_SIGN_OFF**
The patch is missing a Signed-off-by line. A signed-off-by
line should be added according to Developer's certificate of
Origin.
+
See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin
**NO_AUTHOR_SIGN_OFF**
@@ -382,6 +579,7 @@ Commit message
end of explanation of the patch to denote that the author has
written it or otherwise has the rights to pass it on as an open
source patch.
+
See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin
**DIFF_IN_COMMIT_MSG**
@@ -389,6 +587,7 @@ Commit message
This causes problems when one tries to apply a file containing both
the changelog and the diff because patch(1) tries to apply the diff
which it found in the changelog.
+
See: https://lore.kernel.org/lkml/20150611134006.9df79a893e3636019ad2759e@linux-foundation.org/
**GERRIT_CHANGE_ID**
@@ -431,6 +630,7 @@ Comparison style
**BOOL_COMPARISON**
Comparisons of A to true and false are better written
as A and !A.
+
See: https://lore.kernel.org/lkml/1365563834.27174.12.camel@joe-AO722/
**COMPARISON_TO_NULL**
@@ -442,6 +642,87 @@ Comparison style
side of the test should be avoided.
+Indentation and Line Breaks
+---------------------------
+
+ **CODE_INDENT**
+ Code indent should use tabs instead of spaces.
+ Outside of comments, documentation and Kconfig,
+ spaces are never used for indentation.
+
+ See: https://www.kernel.org/doc/html/latest/process/coding-style.html#indentation
+
+ **DEEP_INDENTATION**
+ Indentation with 6 or more tabs usually indicate overly indented
+ code.
+
+ It is suggested to refactor excessive indentation of
+ if/else/for/do/while/switch statements.
+
+ See: https://lore.kernel.org/lkml/1328311239.21255.24.camel@joe2Laptop/
+
+ **SWITCH_CASE_INDENT_LEVEL**
+ switch should be at the same indent as case.
+ Example::
+
+ switch (suffix) {
+ case 'G':
+ case 'g':
+ mem <<= 30;
+ break;
+ case 'M':
+ case 'm':
+ mem <<= 20;
+ break;
+ case 'K':
+ case 'k':
+ mem <<= 10;
+ fallthrough;
+ default:
+ break;
+ }
+
+ See: https://www.kernel.org/doc/html/latest/process/coding-style.html#indentation
+
+ **LONG_LINE**
+ The line has exceeded the specified maximum length.
+ To use a different maximum line length, the --max-line-length=n option
+ may be added while invoking checkpatch.
+
+ Earlier, the default line length was 80 columns. Commit bdc48fa11e46
+ ("checkpatch/coding-style: deprecate 80-column warning") increased the
+ limit to 100 columns. This is not a hard limit either and it's
+ preferable to stay within 80 columns whenever possible.
+
+ See: https://www.kernel.org/doc/html/latest/process/coding-style.html#breaking-long-lines-and-strings
+
+ **LONG_LINE_STRING**
+ A string starts before but extends beyond the maximum line length.
+ To use a different maximum line length, the --max-line-length=n option
+ may be added while invoking checkpatch.
+
+ See: https://www.kernel.org/doc/html/latest/process/coding-style.html#breaking-long-lines-and-strings
+
+ **LONG_LINE_COMMENT**
+ A comment starts before but extends beyond the maximum line length.
+ To use a different maximum line length, the --max-line-length=n option
+ may be added while invoking checkpatch.
+
+ See: https://www.kernel.org/doc/html/latest/process/coding-style.html#breaking-long-lines-and-strings
+
+ **TRAILING_STATEMENTS**
+ Trailing statements (for example after any conditional) should be
+ on the next line.
+ Statements, such as::
+
+ if (x == y) break;
+
+ should be::
+
+ if (x == y)
+ break;
+
+
Macros, Attributes and Symbols
------------------------------
@@ -472,7 +753,7 @@ Macros, Attributes and Symbols
**BIT_MACRO**
Defines like: 1 << <digit> could be BIT(digit).
- The BIT() macro is defined in include/linux/bitops.h::
+ The BIT() macro is defined via include/linux/bits.h::
#define BIT(nr) (1UL << (nr))
@@ -492,6 +773,7 @@ Macros, Attributes and Symbols
The kernel does *not* use the ``__DATE__`` and ``__TIME__`` macros,
and enables warnings if they are used as they can lead to
non-deterministic builds.
+
See: https://www.kernel.org/doc/html/latest/kbuild/reproducible-builds.html#timestamps
**DEFINE_ARCH_HAS**
@@ -502,8 +784,12 @@ Macros, Attributes and Symbols
want architectures able to override them with optimized ones, we
should either use weak functions (appropriate for some cases), or
the symbol that protects them should be the same symbol we use.
+
See: https://lore.kernel.org/lkml/CA+55aFycQ9XJvEOsiM3txHL5bjUc8CeKWJNR_H+MiicaddB42Q@mail.gmail.com/
+ **DO_WHILE_MACRO_WITH_TRAILING_SEMICOLON**
+ do {} while(0) macros should not have a trailing semicolon.
+
**INIT_ATTRIBUTE**
Const init definitions should use __initconst instead of
__initdata.
@@ -528,6 +814,20 @@ Macros, Attributes and Symbols
...
}
+ **MISPLACED_INIT**
+ It is possible to use section markers on variables in a way
+ which gcc doesn't understand (or at least not the way the
+ developer intended)::
+
+ static struct __initdata samsung_pll_clock exynos4_plls[nr_plls] = {
+
+ does not put exynos4_plls in the .initdata section. The __initdata
+ marker can be virtually anywhere on the line, except right after
+ "struct". The preferred location is before the "=" sign if there is
+ one, or before the trailing ";" otherwise.
+
+ See: https://lore.kernel.org/lkml/1377655732.3619.19.camel@joe-AO722/
+
**MULTISTATEMENT_MACRO_USE_DO_WHILE**
Macros with multiple statements should be enclosed in a
do - while block. Same should also be the case for macros
@@ -541,6 +841,10 @@ Macros, Attributes and Symbols
See: https://www.kernel.org/doc/html/latest/process/coding-style.html#macros-enums-and-rtl
+ **PREFER_FALLTHROUGH**
+ Use the `fallthrough;` pseudo keyword instead of
+ `/* fallthrough */` like comments.
+
**WEAK_DECLARATION**
Using weak declarations like __attribute__((weak)) or __weak
can have unintended link defects. Avoid using them.
@@ -551,8 +855,51 @@ Functions and Variables
**CAMELCASE**
Avoid CamelCase Identifiers.
+
See: https://www.kernel.org/doc/html/latest/process/coding-style.html#naming
+ **CONST_CONST**
+ Using `const <type> const *` is generally meant to be
+ written `const <type> * const`.
+
+ **CONST_STRUCT**
+ Using const is generally a good idea. Checkpatch reads
+ a list of frequently used structs that are always or
+ almost always constant.
+
+ The existing structs list can be viewed from
+ `scripts/const_structs.checkpatch`.
+
+ See: https://lore.kernel.org/lkml/alpine.DEB.2.10.1608281509480.3321@hadrien/
+
+ **EMBEDDED_FUNCTION_NAME**
+ Embedded function names are less appropriate to use as
+ refactoring can cause function renaming. Prefer the use of
+ "%s", __func__ to embedded function names.
+
+ Note that this does not work with -f (--file) checkpatch option
+ as it depends on patch context providing the function name.
+
+ **FUNCTION_ARGUMENTS**
+ This warning is emitted due to any of the following reasons:
+
+ 1. Arguments for the function declaration do not follow
+ the identifier name. Example::
+
+ void foo
+ (int bar, int baz)
+
+ This should be corrected to::
+
+ void foo(int bar, int baz)
+
+ 2. Some arguments for the function definition do not
+ have an identifier name. Example::
+
+ void foo(int)
+
+ All arguments should have identifier names.
+
**FUNCTION_WITHOUT_ARGS**
Function declarations without arguments like::
@@ -583,6 +930,34 @@ Functions and Variables
return bar;
+Permissions
+-----------
+
+ **DEVICE_ATTR_PERMS**
+ The permissions used in DEVICE_ATTR are unusual.
+ Typically only three permissions are used - 0644 (RW), 0444 (RO)
+ and 0200 (WO).
+
+ See: https://www.kernel.org/doc/html/latest/filesystems/sysfs.html#attributes
+
+ **EXECUTE_PERMISSIONS**
+ There is no reason for source files to be executable. The executable
+ bit can be removed safely.
+
+ **EXPORTED_WORLD_WRITABLE**
+ Exporting world writable sysfs/debugfs files is usually a bad thing.
+ When done arbitrarily they can introduce serious security bugs.
+ In the past, some of the debugfs vulnerabilities would seemingly allow
+ any local user to write arbitrary values into device registers - a
+ situation from which little good can be expected to emerge.
+
+ See: https://lore.kernel.org/linux-arm-kernel/cover.1296818921.git.segoon@openwall.com/
+
+ **NON_OCTAL_PERMISSIONS**
+ Permission bits should use 4 digit octal permissions (like 0700 or 0444).
+ Avoid using any other base like decimal.
+
+
Spacing and Brackets
--------------------
@@ -616,7 +991,7 @@ Spacing and Brackets
1. With a type on the left::
- ;int [] a;
+ int [] a;
2. At the beginning of a line for slice initialisers::
@@ -626,12 +1001,6 @@ Spacing and Brackets
= { [0...10] = 5 }
- **CODE_INDENT**
- Code indent should use tabs instead of spaces.
- Outside of comments, documentation and Kconfig,
- spaces are never used for indentation.
- See: https://www.kernel.org/doc/html/latest/process/coding-style.html#indentation
-
**CONCATENATED_STRING**
Concatenated elements should have a space in between.
Example::
@@ -644,17 +1013,20 @@ Spacing and Brackets
**ELSE_AFTER_BRACE**
`else {` should follow the closing block `}` on the same line.
+
See: https://www.kernel.org/doc/html/latest/process/coding-style.html#placing-braces-and-spaces
**LINE_SPACING**
Vertical space is wasted given the limited number of lines an
editor window can display when multiple blank lines are used.
+
See: https://www.kernel.org/doc/html/latest/process/coding-style.html#spaces
**OPEN_BRACE**
The opening brace should be following the function definitions on the
next line. For any non-functional block it should be on the same line
as the last construct.
+
See: https://www.kernel.org/doc/html/latest/process/coding-style.html#placing-braces-and-spaces
**POINTER_LOCATION**
@@ -671,37 +1043,47 @@ Spacing and Brackets
**SPACING**
Whitespace style used in the kernel sources is described in kernel docs.
- See: https://www.kernel.org/doc/html/latest/process/coding-style.html#spaces
- **SWITCH_CASE_INDENT_LEVEL**
- switch should be at the same indent as case.
- Example::
-
- switch (suffix) {
- case 'G':
- case 'g':
- mem <<= 30;
- break;
- case 'M':
- case 'm':
- mem <<= 20;
- break;
- case 'K':
- case 'k':
- mem <<= 10;
- /* fall through */
- default:
- break;
- }
-
- See: https://www.kernel.org/doc/html/latest/process/coding-style.html#indentation
+ See: https://www.kernel.org/doc/html/latest/process/coding-style.html#spaces
**TRAILING_WHITESPACE**
Trailing whitespace should always be removed.
Some editors highlight the trailing whitespace and cause visual
distractions when editing files.
+
See: https://www.kernel.org/doc/html/latest/process/coding-style.html#spaces
+ **UNNECESSARY_PARENTHESES**
+ Parentheses are not required in the following cases:
+
+ 1. Function pointer uses::
+
+ (foo->bar)();
+
+ could be::
+
+ foo->bar();
+
+ 2. Comparisons in if::
+
+ if ((foo->bar) && (foo->baz))
+ if ((foo == bar))
+
+ could be::
+
+ if (foo->bar && foo->baz)
+ if (foo == bar)
+
+ 3. addressof/dereference single Lvalues::
+
+ &(foo->bar)
+ *(foo->bar)
+
+ could be::
+
+ &foo->bar
+ *foo->bar
+
**WHILE_AFTER_BRACE**
while should follow the closing bracket on the same line::
@@ -723,17 +1105,50 @@ Others
The patch seems to be corrupted or lines are wrapped.
Please regenerate the patch file before sending it to the maintainer.
+ **CVS_KEYWORD**
+ Since linux moved to git, the CVS markers are no longer used.
+ So, CVS style keywords ($Id$, $Revision$, $Log$) should not be
+ added.
+
+ **DEFAULT_NO_BREAK**
+ switch default case is sometimes written as "default:;". This can
+ cause new cases added below default to be defective.
+
+ A "break;" should be added after empty default statement to avoid
+ unwanted fallthrough.
+
**DOS_LINE_ENDINGS**
For DOS-formatted patches, there are extra ^M symbols at the end of
the line. These should be removed.
- **EXECUTE_PERMISSIONS**
- There is no reason for source files to be executable. The executable
- bit can be removed safely.
+ **DT_SCHEMA_BINDING_PATCH**
+ DT bindings moved to a json-schema based format instead of
+ freeform text.
- **NON_OCTAL_PERMISSIONS**
- Permission bits should use 4 digit octal permissions (like 0700 or 0444).
- Avoid using any other base like decimal.
+ See: https://www.kernel.org/doc/html/latest/devicetree/bindings/writing-schema.html
+
+ **DT_SPLIT_BINDING_PATCH**
+ Devicetree bindings should be their own patch. This is because
+ bindings are logically independent from a driver implementation,
+ they have a different maintainer (even though they often
+ are applied via the same tree), and it makes for a cleaner history in the
+ DT only tree created with git-filter-branch.
+
+ See: https://www.kernel.org/doc/html/latest/devicetree/bindings/submitting-patches.html#i-for-patch-submitters
+
+ **EMBEDDED_FILENAME**
+ Embedding the complete filename path inside the file isn't particularly
+ useful as often the path is moved around and becomes incorrect.
+
+ **FILE_PATH_CHANGES**
+ Whenever files are added, moved, or deleted, the MAINTAINERS file
+ patterns can be out of sync or outdated.
+
+ So MAINTAINERS might need updating in these cases.
+
+ **MEMSET**
+ The memset use appears to be incorrect. This may be caused due to
+ badly ordered parameters. Please recheck the usage.
**NOT_UNIFIED_DIFF**
The patch file does not appear to be in unified-diff format. Please
@@ -742,14 +1157,12 @@ Others
**PRINTF_0XDECIMAL**
Prefixing 0x with decimal output is defective and should be corrected.
- **TRAILING_STATEMENTS**
- Trailing statements (for example after any conditional) should be
- on the next line.
- Like::
-
- if (x == y) break;
+ **SPDX_LICENSE_TAG**
+ The source file is missing or has an improper SPDX identifier tag.
+ The Linux kernel requires the precise SPDX identifier in all source files,
+ and it is thoroughly documented in the kernel docs.
- should be::
+ See: https://www.kernel.org/doc/html/latest/process/license-rules.html
- if (x == y)
- break;
+ **TYPO_SPELLING**
+ Some words may have been misspelled. Consider reviewing them.
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index d3f335ffc751..83ec4a556c19 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -447,11 +447,10 @@ When a test fails due to a failed ``kmalloc``::
When a test fails due to a missing KASAN report::
- # kmalloc_double_kzfree: EXPECTATION FAILED at lib/test_kasan.c:629
- Expected kasan_data->report_expected == kasan_data->report_found, but
- kasan_data->report_expected == 1
- kasan_data->report_found == 0
- not ok 28 - kmalloc_double_kzfree
+ # kmalloc_double_kzfree: EXPECTATION FAILED at lib/test_kasan.c:974
+ KASAN failure expected in "kfree_sensitive(ptr)", but none occurred
+ not ok 44 - kmalloc_double_kzfree
+
At the end the cumulative status of all KASAN tests is printed. On success::
diff --git a/Documentation/dev-tools/kunit/api/index.rst b/Documentation/dev-tools/kunit/api/index.rst
index 9b9bffe5d41a..b33ad72bcf0b 100644
--- a/Documentation/dev-tools/kunit/api/index.rst
+++ b/Documentation/dev-tools/kunit/api/index.rst
@@ -10,7 +10,7 @@ API Reference
This section documents the KUnit kernel testing API. It is divided into the
following sections:
-================================= ==============================================
-:doc:`test` documents all of the standard testing API
- excluding mocking or mocking related features.
-================================= ==============================================
+Documentation/dev-tools/kunit/api/test.rst
+
+ - documents all of the standard testing API excluding mocking
+ or mocking related features.
diff --git a/Documentation/dev-tools/kunit/faq.rst b/Documentation/dev-tools/kunit/faq.rst
index 8d5029ad210a..5c6555d020f3 100644
--- a/Documentation/dev-tools/kunit/faq.rst
+++ b/Documentation/dev-tools/kunit/faq.rst
@@ -97,7 +97,7 @@ things to try.
modules will automatically execute associated tests when loaded. Test results
can be collected from ``/sys/kernel/debug/kunit/<test suite>/results``, and
can be parsed with ``kunit.py parse``. For more details, see "KUnit on
- non-UML architectures" in :doc:`usage`.
+ non-UML architectures" in Documentation/dev-tools/kunit/usage.rst.
If none of the above tricks help, you are always welcome to email any issues to
kunit-dev@googlegroups.com.
diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst
index 848478838347..25d92a9a05ea 100644
--- a/Documentation/dev-tools/kunit/index.rst
+++ b/Documentation/dev-tools/kunit/index.rst
@@ -36,7 +36,7 @@ To make running these tests (and reading the results) easier, KUnit offers
results. This provides a quick way of running KUnit tests during development,
without requiring a virtual machine or separate hardware.
-Get started now: :doc:`start`
+Get started now: Documentation/dev-tools/kunit/start.rst
Why KUnit?
==========
@@ -88,9 +88,9 @@ it takes to read their test log?
How do I use it?
================
-* :doc:`start` - for new users of KUnit
-* :doc:`tips` - for short examples of best practices
-* :doc:`usage` - for a more detailed explanation of KUnit features
-* :doc:`api/index` - for the list of KUnit APIs used for testing
-* :doc:`kunit-tool` - for more information on the kunit_tool helper script
-* :doc:`faq` - for answers to some common questions about KUnit
+* Documentation/dev-tools/kunit/start.rst - for new users of KUnit
+* Documentation/dev-tools/kunit/tips.rst - for short examples of best practices
+* Documentation/dev-tools/kunit/usage.rst - for a more detailed explanation of KUnit features
+* Documentation/dev-tools/kunit/api/index.rst - for the list of KUnit APIs used for testing
+* Documentation/dev-tools/kunit/kunit-tool.rst - for more information on the kunit_tool helper script
+* Documentation/dev-tools/kunit/faq.rst - for answers to some common questions about KUnit
diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst
index 0e65cabe08eb..63ef7b625c13 100644
--- a/Documentation/dev-tools/kunit/start.rst
+++ b/Documentation/dev-tools/kunit/start.rst
@@ -21,7 +21,7 @@ The wrapper can be run with:
./tools/testing/kunit/kunit.py run
For more information on this wrapper (also called kunit_tool) check out the
-:doc:`kunit-tool` page.
+Documentation/dev-tools/kunit/kunit-tool.rst page.
Creating a .kunitconfig
-----------------------
@@ -234,7 +234,7 @@ Congrats! You just wrote your first KUnit test!
Next Steps
==========
-* Check out the :doc:`tips` page for tips on
+* Check out the Documentation/dev-tools/kunit/tips.rst page for tips on
writing idiomatic KUnit tests.
* Optional: see the :doc:`usage` page for a more
in-depth explanation of KUnit.
diff --git a/Documentation/dev-tools/kunit/tips.rst b/Documentation/dev-tools/kunit/tips.rst
index 8d8c238f7f79..492d2ded2f5a 100644
--- a/Documentation/dev-tools/kunit/tips.rst
+++ b/Documentation/dev-tools/kunit/tips.rst
@@ -125,7 +125,8 @@ Here's a slightly in-depth example of how one could implement "mocking":
Note: here we're able to get away with using ``test->priv``, but if you wanted
-something more flexible you could use a named ``kunit_resource``, see :doc:`api/test`.
+something more flexible you could use a named ``kunit_resource``, see
+Documentation/dev-tools/kunit/api/test.rst.
Failing the current test
------------------------
@@ -185,5 +186,5 @@ Alternatively, one can take full control over the error message by using ``KUNIT
Next Steps
==========
-* Optional: see the :doc:`usage` page for a more
+* Optional: see the Documentation/dev-tools/kunit/usage.rst page for a more
in-depth explanation of KUnit.
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index 650f99590df5..3ee7ab91f712 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -10,7 +10,7 @@ understand it. This guide assumes a working knowledge of the Linux kernel and
some basic knowledge of testing.
For a high level introduction to KUnit, including setting up KUnit for your
-project, see :doc:`start`.
+project, see Documentation/dev-tools/kunit/start.rst.
Organization of this document
=============================
@@ -99,7 +99,8 @@ violated; however, the test will continue running, potentially trying other
expectations until the test case ends or is otherwise terminated. This is as
opposed to *assertions* which are discussed later.
-To learn about more expectations supported by KUnit, see :doc:`api/test`.
+To learn about more expectations supported by KUnit, see
+Documentation/dev-tools/kunit/api/test.rst.
.. note::
A single test case should be pretty short, pretty easy to understand,
@@ -216,7 +217,8 @@ test suite in a special linker section so that it can be run by KUnit either
after late_init, or when the test module is loaded (depending on whether the
test was built in or not).
-For more information on these types of things see the :doc:`api/test`.
+For more information on these types of things see the
+Documentation/dev-tools/kunit/api/test.rst.
Common Patterns
===============
diff --git a/Documentation/dev-tools/testing-overview.rst b/Documentation/dev-tools/testing-overview.rst
index b5b46709969c..65feb81edb14 100644
--- a/Documentation/dev-tools/testing-overview.rst
+++ b/Documentation/dev-tools/testing-overview.rst
@@ -71,15 +71,15 @@ can be used to verify that a test is executing particular functions or lines
of code. This is useful for determining how much of the kernel is being tested,
and for finding corner-cases which are not covered by the appropriate test.
-:doc:`gcov` is GCC's coverage testing tool, which can be used with the kernel
-to get global or per-module coverage. Unlike KCOV, it does not record per-task
-coverage. Coverage data can be read from debugfs, and interpreted using the
-usual gcov tooling.
-
-:doc:`kcov` is a feature which can be built in to the kernel to allow
-capturing coverage on a per-task level. It's therefore useful for fuzzing and
-other situations where information about code executed during, for example, a
-single syscall is useful.
+Documentation/dev-tools/gcov.rst is GCC's coverage testing tool, which can be
+used with the kernel to get global or per-module coverage. Unlike KCOV, it
+does not record per-task coverage. Coverage data can be read from debugfs,
+and interpreted using the usual gcov tooling.
+
+Documentation/dev-tools/kcov.rst is a feature which can be built in to the
+kernel to allow capturing coverage on a per-task level. It's therefore useful
+for fuzzing and other situations where information about code executed during,
+for example, a single syscall is useful.
Dynamic Analysis Tools
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-actmon.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-actmon.txt
deleted file mode 100644
index 897eedfa2bc8..000000000000
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-actmon.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-NVIDIA Tegra Activity Monitor
-
-The activity monitor block collects statistics about the behaviour of other
-components in the system. This information can be used to derive the rate at
-which the external memory needs to be clocked in order to serve all requests
-from the monitored clients.
-
-Required properties:
-- compatible: should be "nvidia,tegra<chip>-actmon"
-- reg: offset and length of the register set for the device
-- interrupts: standard interrupt property
-- clocks: Must contain a phandle and clock specifier pair for each entry in
-clock-names. See ../../clock/clock-bindings.txt for details.
-- clock-names: Must include the following entries:
- - actmon
- - emc
-- resets: Must contain an entry for each entry in reset-names. See
-../../reset/reset.txt for details.
-- reset-names: Must include the following entries:
- - actmon
-- operating-points-v2: See ../bindings/opp/opp.txt for details.
-- interconnects: Should contain entries for memory clients sitting on
- MC->EMC memory interconnect path.
-- interconnect-names: Should include name of the interconnect path for each
- interconnect entry. Consult TRM documentation for
- information about available memory clients, see MEMORY
- CONTROLLER section.
-
-For each opp entry in 'operating-points-v2' table:
-- opp-supported-hw: bitfield indicating SoC speedo ID mask
-- opp-peak-kBps: peak bandwidth of the memory channel
-
-Example:
- dfs_opp_table: opp-table {
- compatible = "operating-points-v2";
-
- opp@12750000 {
- opp-hz = /bits/ 64 <12750000>;
- opp-supported-hw = <0x000F>;
- opp-peak-kBps = <51000>;
- };
- ...
- };
-
- actmon@6000c800 {
- compatible = "nvidia,tegra124-actmon";
- reg = <0x0 0x6000c800 0x0 0x400>;
- interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA124_CLK_ACTMON>,
- <&tegra_car TEGRA124_CLK_EMC>;
- clock-names = "actmon", "emc";
- resets = <&tegra_car 119>;
- reset-names = "actmon";
- operating-points-v2 = <&dfs_opp_table>;
- interconnects = <&mc TEGRA124_MC_MPCORER &emc>;
- interconnect-names = "cpu";
- };
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.yaml b/Documentation/devicetree/bindings/connector/usb-connector.yaml
index 32509b98142e..92b49bc37939 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.yaml
+++ b/Documentation/devicetree/bindings/connector/usb-connector.yaml
@@ -149,6 +149,17 @@ properties:
maxItems: 6
$ref: /schemas/types.yaml#/definitions/uint32-array
+ sink-vdos-v1:
+ description: An array of u32 with each entry, a Vendor Defined Message Object (VDO),
+ providing additional information corresponding to the product, the detailed bit
+ definitions and the order of each VDO can be found in
+ "USB Power Delivery Specification Revision 2.0, Version 1.3" chapter 6.4.4.3.1 Discover
+ Identity. User can specify the VDO array via VDO_IDH/_CERT/_PRODUCT/_CABLE/_AMA defined in
+ dt-bindings/usb/pd.h.
+ minItems: 3
+ maxItems: 6
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
op-sink-microwatt:
description: Sink required operating power in microwatt, if source can't
offer the power, Capability Mismatch is set. Required for power sink and
@@ -207,6 +218,10 @@ properties:
SNK_READY for non-pd link.
type: boolean
+dependencies:
+ sink-vdos-v1: [ 'sink-vdos' ]
+ sink-vdos: [ 'sink-vdos-v1' ]
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/crypto/cortina,sl3516-crypto.yaml b/Documentation/devicetree/bindings/crypto/cortina,sl3516-crypto.yaml
new file mode 100644
index 000000000000..b633b8d0e6f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/cortina,sl3516-crypto.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/cortina,sl3516-crypto.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SL3516 cryptographic offloader driver
+
+maintainers:
+ - Corentin Labbe <clabbe@baylibre.com>
+
+properties:
+ compatible:
+ enum:
+ - cortina,sl3516-crypto
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/cortina,gemini-clock.h>
+ #include <dt-bindings/reset/cortina,gemini-reset.h>
+
+ crypto@62000000 {
+ compatible = "cortina,sl3516-crypto";
+ reg = <0x62000000 0x10000>;
+ interrupts = <7 IRQ_TYPE_EDGE_RISING>;
+ resets = <&syscon GEMINI_RESET_SECURITY>;
+ clocks = <&syscon GEMINI_CLK_GATE_SECURITY>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/intel,ixp4xx-crypto.yaml b/Documentation/devicetree/bindings/crypto/intel,ixp4xx-crypto.yaml
new file mode 100644
index 000000000000..9c53c27bd20a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/intel,ixp4xx-crypto.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2018 Linaro Ltd.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/crypto/intel,ixp4xx-crypto.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel IXP4xx cryptographic engine
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ The Intel IXP4xx cryptographic engine makes use of the IXP4xx NPE
+ (Network Processing Engine). Since it is not a device on its own
+ it is defined as a subnode of the NPE, if crypto support is
+ available on the platform.
+
+properties:
+ compatible:
+ const: intel,ixp4xx-crypto
+
+ intel,npe-handle:
+ $ref: '/schemas/types.yaml#/definitions/phandle-array'
+ maxItems: 1
+ description: phandle to the NPE this crypto engine is using, the cell
+ describing the NPE instance to be used.
+
+ queue-rx:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+ description: phandle to the RX queue on the NPE, the cell describing
+ the queue instance to be used.
+
+ queue-txready:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+ description: phandle to the TX READY queue on the NPE, the cell describing
+ the queue instance to be used.
+
+required:
+ - compatible
+ - intel,npe-handle
+ - queue-rx
+ - queue-txready
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/devfreq/nvidia,tegra30-actmon.yaml b/Documentation/devicetree/bindings/devfreq/nvidia,tegra30-actmon.yaml
new file mode 100644
index 000000000000..e3379d106728
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/nvidia,tegra30-actmon.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/devfreq/nvidia,tegra30-actmon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra30 Activity Monitor
+
+maintainers:
+ - Dmitry Osipenko <digetx@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+description: |
+ The activity monitor block collects statistics about the behaviour of other
+ components in the system. This information can be used to derive the rate at
+ which the external memory needs to be clocked in order to serve all requests
+ from the monitored clients.
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra30-actmon
+ - nvidia,tegra114-actmon
+ - nvidia,tegra124-actmon
+ - nvidia,tegra210-actmon
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: actmon
+ - const: emc
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: actmon
+
+ interrupts:
+ maxItems: 1
+
+ interconnects:
+ minItems: 1
+ maxItems: 12
+
+ interconnect-names:
+ minItems: 1
+ maxItems: 12
+ description:
+ Should include name of the interconnect path for each interconnect
+ entry. Consult TRM documentation for information about available
+ memory clients, see MEMORY CONTROLLER and ACTIVITY MONITOR sections.
+
+ operating-points-v2:
+ description:
+ Should contain freqs and voltages and opp-supported-hw property, which
+ is a bitfield indicating SoC speedo ID mask.
+
+ "#cooling-cells":
+ const: 2
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - interrupts
+ - interconnects
+ - interconnect-names
+ - operating-points-v2
+ - "#cooling-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/memory/tegra30-mc.h>
+
+ mc: memory-controller@7000f000 {
+ compatible = "nvidia,tegra30-mc";
+ reg = <0x7000f000 0x400>;
+ clocks = <&clk 32>;
+ clock-names = "mc";
+
+ interrupts = <0 77 4>;
+
+ #iommu-cells = <1>;
+ #reset-cells = <1>;
+ #interconnect-cells = <1>;
+ };
+
+ emc: external-memory-controller@7000f400 {
+ compatible = "nvidia,tegra30-emc";
+ reg = <0x7000f400 0x400>;
+ interrupts = <0 78 4>;
+ clocks = <&clk 57>;
+
+ nvidia,memory-controller = <&mc>;
+ operating-points-v2 = <&dvfs_opp_table>;
+ power-domains = <&domain>;
+
+ #interconnect-cells = <0>;
+ };
+
+ actmon@6000c800 {
+ compatible = "nvidia,tegra30-actmon";
+ reg = <0x6000c800 0x400>;
+ interrupts = <0 45 4>;
+ clocks = <&clk 119>, <&clk 57>;
+ clock-names = "actmon", "emc";
+ resets = <&rst 119>;
+ reset-names = "actmon";
+ operating-points-v2 = <&dvfs_opp_table>;
+ interconnects = <&mc TEGRA30_MC_MPCORER &emc>;
+ interconnect-names = "cpu-read";
+ #cooling-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
index 1bd2870c3a9c..c435c9f369a4 100644
--- a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
+++ b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
@@ -26,9 +26,16 @@ properties:
reg:
items:
- - description: NPE0 register range
- - description: NPE1 register range
- - description: NPE2 register range
+ - description: NPE0 (NPE-A) register range
+ - description: NPE1 (NPE-B) register range
+ - description: NPE2 (NPE-C) register range
+
+ crypto:
+ $ref: /schemas/crypto/intel,ixp4xx-crypto.yaml#
+ type: object
+ description: Optional node for the embedded crypto engine, the node
+ should be named with the instance number of the NPE engine used for
+ the crypto engine.
required:
- compatible
@@ -38,8 +45,15 @@ additionalProperties: false
examples:
- |
- npe@c8006000 {
+ npe: npe@c8006000 {
compatible = "intel,ixp4xx-network-processing-engine";
reg = <0xc8006000 0x1000>, <0xc8007000 0x1000>, <0xc8008000 0x1000>;
+
+ crypto {
+ compatible = "intel,ixp4xx-crypto";
+ intel,npe-handle = <&npe 2>;
+ queue-rx = <&qmgr 30>;
+ queue-txready = <&qmgr 29>;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/hwmon/lm75.yaml b/Documentation/devicetree/bindings/hwmon/lm75.yaml
index 96eed5cc7841..72980d083c21 100644
--- a/Documentation/devicetree/bindings/hwmon/lm75.yaml
+++ b/Documentation/devicetree/bindings/hwmon/lm75.yaml
@@ -30,6 +30,7 @@ properties:
- st,stds75
- st,stlm75
- microchip,tcn75
+ - ti,tmp1075
- ti,tmp100
- ti,tmp101
- ti,tmp105
diff --git a/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml b/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
index 33ee575bb09d..926be9a29044 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
@@ -49,7 +49,7 @@ examples:
#size-cells = <0>;
adc@48 {
- comatible = "ti,ads7828";
+ compatible = "ti,ads7828";
reg = <0x48>;
vref-supply = <&vref>;
ti,differential-input;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml b/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
index 7b553d559c83..98c6fcf7bf26 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
@@ -46,6 +46,13 @@ properties:
description: |
I2C bus timeout in microseconds
+ fsl,i2c-erratum-a004447:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ Indicates the presence of QorIQ erratum A-004447, which
+ says that the standard i2c recovery scheme mechanism does
+ not work and an alternate implementation is needed.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
index 1ecd1831cf02..c84f9fe7f254 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
@@ -145,6 +145,19 @@ properties:
required:
- affinity
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: aclk
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
dependencies:
mbi-ranges: [ msi-controller ]
msi-controller: [ mbi-ranges ]
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
index b67b8cbd33fc..abb22db3bb28 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
@@ -29,6 +29,7 @@ properties:
- renesas,intc-ex-r8a774c0 # RZ/G2E
- renesas,intc-ex-r8a7795 # R-Car H3
- renesas,intc-ex-r8a7796 # R-Car M3-W
+ - renesas,intc-ex-r8a77961 # R-Car M3-W+
- renesas,intc-ex-r8a77965 # R-Car M3-N
- renesas,intc-ex-r8a77970 # R-Car V3M
- renesas,intc-ex-r8a77980 # R-Car V3H
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml
new file mode 100644
index 000000000000..4ff6fabfcb30
--- /dev/null
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ipmi/aspeed,ast2400-kcs-bmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ASPEED BMC KCS Devices
+
+maintainers:
+ - Andrew Jeffery <andrew@aj.id.au>
+
+description: |
+ The Aspeed BMC SoCs typically use the Keyboard-Controller-Style (KCS)
+ interfaces on the LPC bus for in-band IPMI communication with their host.
+
+properties:
+ compatible:
+ oneOf:
+ - description: Channel ID derived from reg
+ items:
+ enum:
+ - aspeed,ast2400-kcs-bmc-v2
+ - aspeed,ast2500-kcs-bmc-v2
+ - aspeed,ast2600-kcs-bmc
+
+ - description: Old-style with explicit channel ID, no reg
+ deprecated: true
+ items:
+ enum:
+ - aspeed,ast2400-kcs-bmc
+ - aspeed,ast2500-kcs-bmc
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ # maxItems: 3
+ items:
+ - description: IDR register
+ - description: ODR register
+ - description: STR register
+
+ aspeed,lpc-io-reg:
+ $ref: '/schemas/types.yaml#/definitions/uint32-array'
+ minItems: 1
+ maxItems: 2
+ description: |
+ The host CPU LPC IO data and status addresses for the device. For most
+ channels the status address is derived from the data address, but the
+ status address may be optionally provided.
+
+ aspeed,lpc-interrupts:
+ $ref: "/schemas/types.yaml#/definitions/uint32-array"
+ minItems: 2
+ maxItems: 2
+ description: |
+ A 2-cell property expressing the LPC SerIRQ number and the interrupt
+ level/sense encoding (specified in the standard fashion).
+
+ Note that the generated interrupt is issued from the BMC to the host, and
+ thus the target interrupt controller is not captured by the BMC's
+ devicetree.
+
+ kcs_chan:
+ deprecated: true
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: The LPC channel number in the controller
+
+ kcs_addr:
+ deprecated: true
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: The host CPU IO map address
+
+required:
+ - compatible
+ - interrupts
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - aspeed,ast2400-kcs-bmc
+ - aspeed,ast2500-kcs-bmc
+ then:
+ required:
+ - kcs_chan
+ - kcs_addr
+ else:
+ required:
+ - reg
+ - aspeed,lpc-io-reg
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ kcs3: kcs@24 {
+ compatible = "aspeed,ast2600-kcs-bmc";
+ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
+ aspeed,lpc-io-reg = <0xca2>;
+ aspeed,lpc-interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <8>;
+ };
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed-kcs-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed-kcs-bmc.txt
deleted file mode 100644
index 193e71ca96b0..000000000000
--- a/Documentation/devicetree/bindings/ipmi/aspeed-kcs-bmc.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-# Aspeed KCS (Keyboard Controller Style) IPMI interface
-
-The Aspeed SOCs (AST2400 and AST2500) are commonly used as BMCs
-(Baseboard Management Controllers) and the KCS interface can be
-used to perform in-band IPMI communication with their host.
-
-## v1
-Required properties:
-- compatible : should be one of
- "aspeed,ast2400-kcs-bmc"
- "aspeed,ast2500-kcs-bmc"
-- interrupts : interrupt generated by the controller
-- kcs_chan : The LPC channel number in the controller
-- kcs_addr : The host CPU IO map address
-
-## v2
-Required properties:
-- compatible : should be one of
- "aspeed,ast2400-kcs-bmc-v2"
- "aspeed,ast2500-kcs-bmc-v2"
-- reg : The address and size of the IDR, ODR and STR registers
-- interrupts : interrupt generated by the controller
-- aspeed,lpc-io-reg : The host CPU LPC IO address for the device
-
-Example:
-
- kcs3: kcs@24 {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
- aspeed,lpc-reg = <0xca2>;
- interrupts = <8>;
- status = "okay";
- };
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml b/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
index 1a3dff277e2b..675ad9de15bb 100644
--- a/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
@@ -26,6 +26,7 @@ properties:
oneOf:
- const: fsl,imx6sx-mu
- const: fsl,imx7ulp-mu
+ - const: fsl,imx8ulp-mu
- const: fsl,imx8-mu-scu
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/mailbox/microchip,polarfire-soc-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/microchip,polarfire-soc-mailbox.yaml
new file mode 100644
index 000000000000..bbb173ea483c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/microchip,polarfire-soc-mailbox.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mailbox/microchip,polarfire-soc-mailbox.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Microchip PolarFire SoC (MPFS) MSS (microprocessor subsystem) mailbox controller
+
+maintainers:
+ - Conor Dooley <conor.dooley@microchip.com>
+
+properties:
+ compatible:
+ const: microchip,polarfire-soc-mailbox
+
+ reg:
+ items:
+ - description: mailbox data registers
+ - description: mailbox interrupt registers
+
+ interrupts:
+ maxItems: 1
+
+ "#mbox-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#mbox-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ mbox: mailbox@37020000 {
+ compatible = "microchip,polarfire-soc-mailbox";
+ reg = <0x0 0x37020000 0x0 0x1000>, <0x0 0x2000318c 0x0 0x40>;
+ interrupt-parent = <&L1>;
+ interrupts = <96>;
+ #mbox-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index 5dc1173d03fd..8878ec00820e 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -19,6 +19,7 @@ properties:
- qcom,ipq6018-apcs-apps-global
- qcom,ipq8074-apcs-apps-global
- qcom,msm8916-apcs-kpss-global
+ - qcom,msm8939-apcs-kpss-global
- qcom,msm8994-apcs-kpss-global
- qcom,msm8996-apcs-hmss-global
- qcom,msm8998-apcs-hmss-global
@@ -27,6 +28,7 @@ properties:
- qcom,sc8180x-apss-shared
- qcom,sdm660-apcs-hmss-global
- qcom,sdm845-apss-shared
+ - qcom,sm6125-apcs-hmss-global
- qcom,sm8150-apss-shared
reg:
@@ -75,6 +77,7 @@ allOf:
- qcom,sc7180-apss-shared
- qcom,sdm660-apcs-hmss-global
- qcom,sdm845-apss-shared
+ - qcom,sm6125-apcs-hmss-global
- qcom,sm8150-apss-shared
then:
properties:
diff --git a/Documentation/devicetree/bindings/media/atmel,isc.yaml b/Documentation/devicetree/bindings/media/atmel,isc.yaml
new file mode 100644
index 000000000000..3e4bb8892d94
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/atmel,isc.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2016-2021 Microchip Technology, Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/atmel,isc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel Image Sensor Controller (ISC)
+
+maintainers:
+ - Eugen Hristev <eugen.hristev@microchip.com>
+
+description: |
+ The Image Sensor Controller (ISC) device provides the video input capabilities for the
+ Atmel/Microchip AT91 SAMA family of devices.
+
+ The ISC has a single parallel input that supports RAW Bayer, RGB or YUV video,
+ with both external synchronization and BT.656 synchronization for the latter.
+
+properties:
+ compatible:
+ const: atmel,sama5d2-isc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 3
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: hclock
+ - const: iscck
+ - const: gck
+
+ '#clock-cells':
+ const: 0
+
+ clock-output-names:
+ const: isc-mck
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Input port node, single endpoint describing the input pad.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+
+ properties:
+ remote-endpoint: true
+
+ bus-width:
+ enum: [8, 9, 10, 11, 12]
+ default: 12
+
+ hsync-active:
+ enum: [0, 1]
+ default: 1
+
+ vsync-active:
+ enum: [0, 1]
+ default: 1
+
+ pclk-sample:
+ enum: [0, 1]
+ default: 1
+
+ required:
+ - remote-endpoint
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+ - clock-output-names
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ isc: isc@f0008000 {
+ compatible = "atmel,sama5d2-isc";
+ reg = <0xf0008000 0x4000>;
+ interrupts = <46 IRQ_TYPE_LEVEL_HIGH 5>;
+ clocks = <&isc_clk>, <&iscck>, <&isc_gclk>;
+ clock-names = "hclock", "iscck", "gck";
+ #clock-cells = <0>;
+ clock-output-names = "isc-mck";
+
+ port {
+ isc_0: endpoint {
+ remote-endpoint = <&ov7740_0>;
+ hsync-active = <1>;
+ vsync-active = <0>;
+ pclk-sample = <1>;
+ bus-width = <8>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/atmel-isc.txt b/Documentation/devicetree/bindings/media/atmel-isc.txt
deleted file mode 100644
index bbe0e87c6188..000000000000
--- a/Documentation/devicetree/bindings/media/atmel-isc.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-Atmel Image Sensor Controller (ISC)
-----------------------------------------------
-
-Required properties for ISC:
-- compatible
- Must be "atmel,sama5d2-isc".
-- reg
- Physical base address and length of the registers set for the device.
-- interrupts
- Should contain IRQ line for the ISC.
-- clocks
- List of clock specifiers, corresponding to entries in
- the clock-names property;
- Please refer to clock-bindings.txt.
-- clock-names
- Required elements: "hclock", "iscck", "gck".
-- #clock-cells
- Should be 0.
-- clock-output-names
- Should be "isc-mck".
-- pinctrl-names, pinctrl-0
- Please refer to pinctrl-bindings.txt.
-
-ISC supports a single port node with parallel bus. It should contain one
-'port' child node with child 'endpoint' node. Please refer to the bindings
-defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
-
-Example:
-isc: isc@f0008000 {
- compatible = "atmel,sama5d2-isc";
- reg = <0xf0008000 0x4000>;
- interrupts = <46 IRQ_TYPE_LEVEL_HIGH 5>;
- clocks = <&isc_clk>, <&iscck>, <&isc_gclk>;
- clock-names = "hclock", "iscck", "gck";
- #clock-cells = <0>;
- clock-output-names = "isc-mck";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
-
- port {
- isc_0: endpoint {
- remote-endpoint = <&ov7740_0>;
- hsync-active = <1>;
- vsync-active = <0>;
- pclk-sample = <1>;
- };
- };
-};
-
-i2c1: i2c@fc028000 {
- ov7740: camera@21 {
- compatible = "ovti,ov7740";
- reg = <0x21>;
- clocks = <&isc>;
- clock-names = "xvclk";
- assigned-clocks = <&isc>;
- assigned-clock-rates = <24000000>;
-
- port {
- ov7740_0: endpoint {
- remote-endpoint = <&isc_0>;
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/media/i2c/rda,rda5807.yaml b/Documentation/devicetree/bindings/media/i2c/rda,rda5807.yaml
new file mode 100644
index 000000000000..f50e54a722eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/rda,rda5807.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/rda,rda5807.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Unisoc Communications RDA5807 FM radio receiver
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+ compatible:
+ enum:
+ - rda,rda5807
+
+ reg:
+ description: I2C address.
+ maxItems: 1
+
+ power-supply: true
+
+ rda,lnan:
+ description: Use LNAN input port.
+ type: boolean
+
+ rda,lnap:
+ description: Use LNAP input port.
+ type: boolean
+
+ rda,analog-out:
+ description: Enable analog audio output.
+ type: boolean
+
+ rda,i2s-out:
+ description: Enable I2S digital audio output.
+ type: boolean
+
+ rda,lna-microamp:
+ description: LNA working current, in micro-amperes.
+ default: 2500
+ enum: [1800, 2100, 2500, 3000]
+
+required:
+ - compatible
+ - reg
+ - power-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ radio@11 {
+ compatible = "rda,rda5807";
+ reg = <0x11>;
+
+ power-supply = <&ldo6>;
+
+ rda,lnan;
+ rda,lnap;
+ rda,analog-out;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
index 06db6837cefd..ad1321e5a22d 100644
--- a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
+++ b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
@@ -9,6 +9,7 @@ Required properties:
"mediatek,mt8173-vcodec-enc" for mt8173 avc encoder.
"mediatek,mt8183-vcodec-enc" for MT8183 encoder.
"mediatek,mt8173-vcodec-dec" for MT8173 decoder.
+ "mediatek,mt8192-vcodec-enc" for MT8192 encoder.
- reg : Physical base address of the video codec registers and length of
memory mapped region.
- interrupts : interrupt number to the cpu.
@@ -22,6 +23,7 @@ Required properties:
- iommus : should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
for details.
+- dma-ranges : describes the dma address range space that the codec hw access.
One of the two following nodes:
- mediatek,vpu : the node of the video processor unit, if using VPU.
- mediatek,scp : the node of the SCP unit, if using SCP.
diff --git a/Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml b/Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml
new file mode 100644
index 000000000000..4b77103ca913
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/media/microchip,sama5d4-vdec.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Hantro G1 VPU codec implemented on Microchip SAMA5D4 SoCs
+
+maintainers:
+ - Emil Velikov <emil.velikov@collabora.com>
+
+description:
+ Hantro G1 video decode accelerator present on Microchip SAMA5D4 SoCs.
+
+properties:
+ compatible:
+ const: microchip,sama5d4-vdec
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ vdec0: vdec@300000 {
+ compatible = "microchip,sama5d4-vdec";
+ reg = <0x00300000 0x100000>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH 4>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+ };
diff --git a/Documentation/devicetree/bindings/media/microchip,xisc.yaml b/Documentation/devicetree/bindings/media/microchip,xisc.yaml
new file mode 100644
index 000000000000..41afe2e5f133
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/microchip,xisc.yaml
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2021 Microchip Technology, Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/microchip,xisc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip eXtended Image Sensor Controller (XISC)
+
+maintainers:
+ - Eugen Hristev <eugen.hristev@microchip.com>
+
+description: |
+ The eXtended Image Sensor Controller (XISC) device provides the video input capabilities for the
+ Microchip AT91 SAM family of devices.
+
+ The XISC has a single internal parallel input that supports RAW Bayer, RGB or YUV video.
+ The source can be either a demuxer from a CSI2 type of bus, or a simple direct bridge to a
+ parallel sensor.
+
+ The XISC provides one clock output that is used to clock the demuxer/bridge.
+
+properties:
+ compatible:
+ const: microchip,sama7g5-isc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: hclock
+
+ '#clock-cells':
+ const: 0
+
+ clock-output-names:
+ const: isc-mck
+
+ microchip,mipi-mode:
+ type: boolean
+ description:
+ As the XISC is usually connected to a demux/bridge, the XISC receives
+ the same type of input, however, it should be aware of the type of
+ signals received. The mipi-mode enables different internal handling
+ of the data and clock lines.
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Input port node, single endpoint describing the input pad.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+
+ properties:
+ bus-type:
+ enum: [5, 6]
+
+ remote-endpoint: true
+
+ bus-width:
+ enum: [8, 9, 10, 11, 12]
+ default: 12
+
+ hsync-active:
+ enum: [0, 1]
+ default: 1
+
+ vsync-active:
+ enum: [0, 1]
+ default: 1
+
+ pclk-sample:
+ enum: [0, 1]
+ default: 1
+
+ required:
+ - remote-endpoint
+ - bus-type
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+ - clock-output-names
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ xisc: xisc@e1408000 {
+ compatible = "microchip,sama7g5-isc";
+ reg = <0xe1408000 0x2000>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 56>;
+ clock-names = "hclock";
+ #clock-cells = <0>;
+ clock-output-names = "isc-mck";
+
+ port {
+ xisc_in: endpoint {
+ bus-type = <5>; /* Parallel */
+ remote-endpoint = <&csi2dc_out>;
+ hsync-active = <1>;
+ vsync-active = <1>;
+ bus-width = <12>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
index d8ed480482b9..7c09eec78ce5 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
@@ -4,15 +4,17 @@
$id: http://devicetree.org/schemas/media/nxp,imx7-mipi-csi2.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: NXP i.MX7 MIPI CSI-2 receiver
+title: NXP i.MX7 and i.MX8 MIPI CSI-2 receiver
maintainers:
- Rui Miguel Silva <rmfrfs@gmail.com>
+ - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
description: |-
- The NXP i.MX7 SoC family includes a MIPI CSI-2 receiver IP core, documented
- as "CSIS V3.3". The IP core seems to originate from Samsung, and may be
- compatible with some of the Exynos4 ad S5P SoCs.
+ The NXP i.MX7 and i.MX8 families contain SoCs that include a MIPI CSI-2
+ receiver IP core named CSIS. The IP core originates from Samsung, and may be
+ compatible with some of the Exynos4 and S5P SoCs. i.MX7 SoCs use CSIS version
+ 3.3, and i.MX8 SoCs use CSIS version 3.6.3.
While the CSI-2 receiver is separate from the MIPI D-PHY IP core, the PHY is
completely wrapped by the CSIS and doesn't expose a control interface of its
@@ -20,7 +22,9 @@ description: |-
properties:
compatible:
- const: fsl,imx7-mipi-csi2
+ enum:
+ - fsl,imx7-mipi-csi2
+ - fsl,imx8mm-mipi-csi2
reg:
maxItems: 1
@@ -29,16 +33,20 @@ properties:
maxItems: 1
clocks:
+ minItems: 3
items:
- description: The peripheral clock (a.k.a. APB clock)
- description: The external clock (optionally used as the pixel clock)
- description: The MIPI D-PHY clock
+ - description: The AXI clock
clock-names:
+ minItems: 3
items:
- const: pclk
- const: wrap
- const: phy
+ - const: axi
power-domains:
maxItems: 1
@@ -71,16 +79,30 @@ properties:
properties:
data-lanes:
- oneOf:
- - items:
- - const: 1
- - items:
- - const: 1
- - const: 2
+ items:
+ minItems: 1
+ maxItems: 4
+ items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
required:
- data-lanes
+ allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx7-mipi-csi2
+ then:
+ properties:
+ data-lanes:
+ items:
+ maxItems: 2
+
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
@@ -93,12 +115,29 @@ required:
- clocks
- clock-names
- power-domains
- - phy-supply
- - resets
- ports
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx7-mipi-csi2
+ then:
+ required:
+ - phy-supply
+ - resets
+ else:
+ properties:
+ clocks:
+ minItems: 4
+ clock-names:
+ minItems: 4
+ phy-supply: false
+ resets: false
+
examples:
- |
#include <dt-bindings/clock/imx7d-clock.h>
@@ -106,7 +145,7 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/reset/imx7-reset.h>
- mipi_csi: mipi-csi@30750000 {
+ mipi-csi@30750000 {
compatible = "fsl,imx7-mipi-csi2";
reg = <0x30750000 0x10000>;
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
@@ -144,4 +183,46 @@ examples:
};
};
+ - |
+ #include <dt-bindings/clock/imx8mm-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ mipi-csi@32e30000 {
+ compatible = "fsl,imx8mm-mipi-csi2";
+ reg = <0x32e30000 0x1000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <333000000>;
+ clocks = <&clk IMX8MM_CLK_DISP_APB_ROOT>,
+ <&clk IMX8MM_CLK_CSI1_ROOT>,
+ <&clk IMX8MM_CLK_CSI1_PHY_REF>,
+ <&clk IMX8MM_CLK_DISP_AXI_ROOT>;
+ clock-names = "pclk", "wrap", "phy", "axi";
+ power-domains = <&mipi_pd>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ imx8mm_mipi_csi_in: endpoint {
+ remote-endpoint = <&imx477_out>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ imx8mm_mipi_csi_out: endpoint {
+ remote-endpoint = <&csi_in>;
+ };
+ };
+ };
+ };
+
...
diff --git a/Documentation/devicetree/bindings/media/rc.yaml b/Documentation/devicetree/bindings/media/rc.yaml
index af9e7e59e5a1..d4c541c4b164 100644
--- a/Documentation/devicetree/bindings/media/rc.yaml
+++ b/Documentation/devicetree/bindings/media/rc.yaml
@@ -45,6 +45,7 @@ properties:
- rc-cec
- rc-cinergy
- rc-cinergy-1400
+ - rc-ct-90405
- rc-d680-dmb
- rc-delock-61959
- rc-dib0700-nec
@@ -125,7 +126,6 @@ properties:
- rc-snapstream-firefly
- rc-streamzap
- rc-su3000
- - rc-tango
- rc-tanix-tx3mini
- rc-tanix-tx5max
- rc-tbs-nec
diff --git a/Documentation/devicetree/bindings/media/renesas,csi2.yaml b/Documentation/devicetree/bindings/media/renesas,csi2.yaml
index 20396f1be999..23703b767f5b 100644
--- a/Documentation/devicetree/bindings/media/renesas,csi2.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,csi2.yaml
@@ -25,6 +25,7 @@ properties:
- renesas,r8a774e1-csi2 # RZ/G2H
- renesas,r8a7795-csi2 # R-Car H3
- renesas,r8a7796-csi2 # R-Car M3-W
+ - renesas,r8a77961-csi2 # R-Car M3-W+
- renesas,r8a77965-csi2 # R-Car M3-N
- renesas,r8a77970-csi2 # R-Car V3M
- renesas,r8a77980-csi2 # R-Car V3H
diff --git a/Documentation/devicetree/bindings/media/renesas,drif.yaml b/Documentation/devicetree/bindings/media/renesas,drif.yaml
index ce505a7c006a..9cd56ff2c316 100644
--- a/Documentation/devicetree/bindings/media/renesas,drif.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,drif.yaml
@@ -67,9 +67,7 @@ properties:
maxItems: 1
clock-names:
- maxItems: 1
- items:
- - const: fck
+ const: fck
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/renesas,isp.yaml b/Documentation/devicetree/bindings/media/renesas,isp.yaml
new file mode 100644
index 000000000000..514857d36f6b
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/renesas,isp.yaml
@@ -0,0 +1,196 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+# Copyright (C) 2021 Renesas Electronics Corp.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/renesas,isp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car ISP Channel Selector
+
+maintainers:
+ - Niklas Söderlund <niklas.soderlund@ragnatech.se>
+
+description:
+ The R-Car ISP Channel Selector provides MIPI CSI-2 VC and DT filtering
+ capabilities for the Renesas R-Car family of devices. It is used in
+ conjunction with the R-Car VIN and CSI-2 modules, which provides the video
+ capture capabilities.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r8a779a0-isp # V3U
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Input port node, multiple endpoints describing the connected R-Car
+ CSI-2 receivers.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Single endpoint describing the R-Car VIN connected to output port 0.
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Single endpoint describing the R-Car VIN connected to output port 1.
+
+ port@3:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Single endpoint describing the R-Car VIN connected to output port 2.
+
+ port@4:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Single endpoint describing the R-Car VIN connected to output port 3.
+
+ port@5:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Single endpoint describing the R-Car VIN connected to output port 4.
+
+ port@6:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Single endpoint describing the R-Car VIN connected to output port 5.
+
+ port@7:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Single endpoint describing the R-Car VIN connected to output port 6.
+
+ port@8:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Single endpoint describing the R-Car VIN connected to output port 7.
+
+ required:
+ - port@0
+ - port@1
+ - port@2
+ - port@3
+ - port@4
+ - port@5
+ - port@6
+ - port@7
+ - port@8
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+ - resets
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a779a0-sysc.h>
+
+ isp1: isp@fed20000 {
+ compatible = "renesas,r8a779a0-isp";
+ reg = <0xfed20000 0x10000>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 613>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP01>;
+ resets = <&cpg 613>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0>;
+ isp1csi41: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&csi41isp1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ isp1vin08: endpoint {
+ remote-endpoint = <&vin08isp1>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ isp1vin09: endpoint {
+ remote-endpoint = <&vin09isp1>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ isp1vin10: endpoint {
+ remote-endpoint = <&vin10isp1>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ isp1vin11: endpoint {
+ remote-endpoint = <&vin11isp1>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ isp1vin12: endpoint {
+ remote-endpoint = <&vin12isp1>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ isp1vin13: endpoint {
+ remote-endpoint = <&vin13isp1>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ isp1vin14: endpoint {
+ remote-endpoint = <&vin14isp1>;
+ };
+ };
+
+ port@8 {
+ reg = <8>;
+ isp1vin15: endpoint {
+ remote-endpoint = <&vin15isp1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
index dd1a5ce5896c..39bb6db2fb32 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
@@ -46,11 +46,13 @@ properties:
- renesas,vin-r8a7779 # R-Car H1
- renesas,vin-r8a7795 # R-Car H3
- renesas,vin-r8a7796 # R-Car M3-W
+ - renesas,vin-r8a77961 # R-Car M3-W+
- renesas,vin-r8a77965 # R-Car M3-N
- renesas,vin-r8a77970 # R-Car V3M
- renesas,vin-r8a77980 # R-Car V3H
- renesas,vin-r8a77990 # R-Car E3
- renesas,vin-r8a77995 # R-Car D3
+ - renesas,vin-r8a779a0 # R-Car V3U
reg:
maxItems: 1
@@ -111,7 +113,7 @@ properties:
description: VIN channel number
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
- maximum: 15
+ maximum: 31
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -187,6 +189,29 @@ properties:
- required:
- endpoint@3
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Input port node, multiple endpoints describing all the R-Car ISP
+ modules connected the VIN.
+
+ properties:
+ endpoint@0:
+ $ref: /schemas/graph.yaml#/properties/endpoint
+ description: Endpoint connected to ISP0.
+
+ endpoint@1:
+ $ref: /schemas/graph.yaml#/properties/endpoint
+ description: Endpoint connected to ISP1.
+
+ endpoint@2:
+ $ref: /schemas/graph.yaml#/properties/endpoint
+ description: Endpoint connected to ISP2.
+
+ endpoint@3:
+ $ref: /schemas/graph.yaml#/properties/endpoint
+ description: Endpoint connected to ISP3.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/rockchip,vdec.yaml b/Documentation/devicetree/bindings/media/rockchip,vdec.yaml
index 8d35c327018b..089f11d21b25 100644
--- a/Documentation/devicetree/bindings/media/rockchip,vdec.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip,vdec.yaml
@@ -15,7 +15,11 @@ description: |-
properties:
compatible:
- const: rockchip,rk3399-vdec
+ oneOf:
+ - const: rockchip,rk3399-vdec
+ - items:
+ - const: rockchip,rk3228-vdec
+ - const: rockchip,rk3399-vdec
reg:
maxItems: 1
@@ -37,6 +41,10 @@ properties:
- const: cabac
- const: core
+ assigned-clocks: true
+
+ assigned-clock-rates: true
+
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
index c81dbc3e8960..b88172a59de7 100644
--- a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
@@ -15,10 +15,19 @@ description:
properties:
compatible:
- enum:
- - rockchip,rk3288-vpu
- - rockchip,rk3328-vpu
- - rockchip,rk3399-vpu
+ oneOf:
+ - enum:
+ - rockchip,rk3036-vpu
+ - rockchip,rk3066-vpu
+ - rockchip,rk3288-vpu
+ - rockchip,rk3328-vpu
+ - rockchip,rk3399-vpu
+ - items:
+ - const: rockchip,rk3188-vpu
+ - const: rockchip,rk3066-vpu
+ - items:
+ - const: rockchip,rk3228-vpu
+ - const: rockchip,rk3399-vpu
reg:
maxItems: 1
@@ -35,12 +44,20 @@ properties:
- const: vdpu
clocks:
- maxItems: 2
+ oneOf:
+ - maxItems: 2
+ - maxItems: 4
clock-names:
- items:
- - const: aclk
- - const: hclk
+ oneOf:
+ - items:
+ - const: aclk
+ - const: hclk
+ - items:
+ - const: aclk_vdpu
+ - const: hclk_vdpu
+ - const: aclk_vepu
+ - const: hclk_vepu
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/tango-ir.txt b/Documentation/devicetree/bindings/media/tango-ir.txt
deleted file mode 100644
index a9f00c2bf897..000000000000
--- a/Documentation/devicetree/bindings/media/tango-ir.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Sigma Designs Tango IR NEC/RC-5/RC-6 decoder (SMP86xx and SMP87xx)
-
-Required properties:
-
-- compatible: "sigma,smp8642-ir"
-- reg: address/size of NEC+RC5 area, address/size of RC6 area
-- interrupts: spec for IR IRQ
-- clocks: spec for IR clock (typically the crystal oscillator)
-
-Optional properties:
-
-- linux,rc-map-name: see Documentation/devicetree/bindings/media/rc.txt
-
-Example:
-
- ir@10518 {
- compatible = "sigma,smp8642-ir";
- reg = <0x10518 0x18>, <0x105e0 0x1c>;
- interrupts = <21 IRQ_TYPE_EDGE_RISING>;
- clocks = <&xtal>;
- };
diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
index 2661775a3825..99a84b69a29f 100644
--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
@@ -21,6 +21,7 @@ Required properties:
compatible:
"mediatek,mt6323" for PMIC MT6323
"mediatek,mt6358" for PMIC MT6358
+ "mediatek,mt6359" for PMIC MT6359
"mediatek,mt6397" for PMIC MT6397
Optional subnodes:
diff --git a/Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml
index 6f569fbfa134..2f63f2cdeb71 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml
@@ -21,6 +21,7 @@ properties:
- brcm,bcm2711-emmc2
- brcm,sdhci-iproc-cygnus
- brcm,sdhci-iproc
+ - brcm,bcm7211a0-sdhci
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml b/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
index 04ba8b7fc054..546480f41141 100644
--- a/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
@@ -19,6 +19,7 @@ properties:
- ingenic,jz4740-mmc
- ingenic,jz4725b-mmc
- ingenic,jz4760-mmc
+ - ingenic,jz4775-mmc
- ingenic,jz4780-mmc
- ingenic,x1000-mmc
- items:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index e141330c1114..25ac8e200970 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -220,6 +220,11 @@ properties:
description:
eMMC HS400 enhanced strobe mode is supported
+ no-mmc-hs400:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ All eMMC HS400 modes are not supported.
+
dsr:
description:
Value the card Driver Stage Register (DSR) should be programmed
@@ -358,22 +363,6 @@ additionalProperties: true
examples:
- |
- mmc@ab000000 {
- compatible = "sdhci";
- reg = <0xab000000 0x200>;
- interrupts = <23>;
- bus-width = <4>;
- cd-gpios = <&gpio 69 0>;
- cd-inverted;
- wp-gpios = <&gpio 70 0>;
- max-frequency = <50000000>;
- keep-power-in-suspend;
- wakeup-source;
- mmc-pwrseq = <&sdhci0_pwrseq>;
- clk-phase-sd-hs = <63>, <72>;
- };
-
- - |
mmc3: mmc@1c12000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -385,9 +374,9 @@ examples:
non-removable;
mmc-pwrseq = <&sdhci0_pwrseq>;
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
- compatible = "brcm,bcm43xx-fmac";
+ compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&pio>;
interrupts = <10 8>;
interrupt-names = "host-wake";
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
deleted file mode 100644
index 291532ac0446..000000000000
--- a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-* Renesas Multi Media Card Interface (MMCIF) Controller
-
-This file documents differences between the core properties in mmc.txt
-and the properties used by the MMCIF device.
-
-
-Required properties:
-
-- compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a
- fallback. Examples with <soctype> are:
- - "renesas,mmcif-r7s72100" for the MMCIF found in r7s72100 SoCs
- - "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs
- - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
- - "renesas,mmcif-r8a7742" for the MMCIF found in r8a7742 SoCs
- - "renesas,mmcif-r8a7743" for the MMCIF found in r8a7743 SoCs
- - "renesas,mmcif-r8a7744" for the MMCIF found in r8a7744 SoCs
- - "renesas,mmcif-r8a7745" for the MMCIF found in r8a7745 SoCs
- - "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs
- - "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
- - "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
- - "renesas,mmcif-r8a7793" for the MMCIF found in r8a7793 SoCs
- - "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
- - "renesas,mmcif-sh73a0" for the MMCIF found in sh73a0 SoCs
-
-- interrupts: Some SoCs have only 1 shared interrupt, while others have either
- 2 or 3 individual interrupts (error, int, card detect). Below is the number
- of interrupts for each SoC:
- 1: r8a73a4, r8a7742, r8a7743, r8a7744, r8a7745, r8a7778, r8a7790, r8a7791,
- r8a7793, r8a7794
- 2: r8a7740, sh73a0
- 3: r7s72100
-
-- clocks: reference to the functional clock
-
-- dmas: reference to the DMA channels, one per channel name listed in the
- dma-names property.
-- dma-names: must contain "tx" for the transmit DMA channel and "rx" for the
- receive DMA channel.
-- max-frequency: Maximum operating clock frequency, driver uses default clock
- frequency if it is not set.
-
-
-Example: R8A7790 (R-Car H2) MMCIF0
-
- mmcif0: mmc@ee200000 {
- compatible = "renesas,mmcif-r8a7790", "renesas,sh-mmcif";
- reg = <0 0xee200000 0 0x80>;
- interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp3_clks R8A7790_CLK_MMCIF0>;
- dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
- dma-names = "tx", "rx";
- max-frequency = <97500000>;
- };
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.yaml b/Documentation/devicetree/bindings/mmc/renesas,mmcif.yaml
new file mode 100644
index 000000000000..c36ba561c387
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/renesas,mmcif.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/renesas,mmcif.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Multi Media Card Interface (MMCIF) Controller
+
+maintainers:
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+allOf:
+ - $ref: "mmc-controller.yaml"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,mmcif-r7s72100 # RZ/A1H
+ - renesas,mmcif-r8a73a4 # R-Mobile APE6
+ - renesas,mmcif-r8a7740 # R-Mobile A1
+ - renesas,mmcif-r8a7742 # RZ/G1H
+ - renesas,mmcif-r8a7743 # RZ/G1M
+ - renesas,mmcif-r8a7744 # RZ/G1N
+ - renesas,mmcif-r8a7745 # RZ/G1E
+ - renesas,mmcif-r8a7778 # R-Car M1A
+ - renesas,mmcif-r8a7790 # R-Car H2
+ - renesas,mmcif-r8a7791 # R-Car M2-W
+ - renesas,mmcif-r8a7793 # R-Car M2-N
+ - renesas,mmcif-r8a7794 # R-Car E2
+ - renesas,mmcif-sh73a0 # SH-Mobile AG5
+ - const: renesas,sh-mmcif
+
+ reg:
+ maxItems: 1
+
+ interrupts: true
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ minItems: 2
+ maxItems: 4
+ description:
+ Must contain a list of pairs of references to DMA specifiers, one for
+ transmission, and one for reception.
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ - tx
+ - rx
+
+ max-frequency: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,mmcif-r7s72100
+then:
+ properties:
+ interrupts:
+ items:
+ - description: Error interrupt
+ - description: Normal operation interrupt
+ - description: Card detection interrupt
+else:
+ if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,mmcif-r8a7740
+ - renesas,mmcif-sh73a0
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: Error interrupt
+ - description: Normal operation interrupt
+ else:
+ if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,mmcif-r8a73a4
+ - renesas,mmcif-r8a7778
+ then:
+ properties:
+ interrupts:
+ maxItems: 1
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+ required:
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ mmcif0: mmc@ee200000 {
+ compatible = "renesas,mmcif-r8a7790", "renesas,sh-mmcif";
+ reg = <0xee200000 0x80>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 315>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 315>;
+ dmas = <&dmac0 0xd1>, <&dmac0 0xd2>, <&dmac1 0xd1>, <&dmac1 0xd2>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <97500000>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
index 3762f1c8de96..54fb59820d2b 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
@@ -29,21 +29,15 @@ properties:
- const: rockchip,rk3288-dw-mshc
- items:
- enum:
- # for Rockchip PX30
- rockchip,px30-dw-mshc
- # for Rockchip RK3036
+ - rockchip,rk1808-dw-mshc
- rockchip,rk3036-dw-mshc
- # for Rockchip RK322x
- rockchip,rk3228-dw-mshc
- # for Rockchip RK3308
- rockchip,rk3308-dw-mshc
- # for Rockchip RK3328
- rockchip,rk3328-dw-mshc
- # for Rockchip RK3368
- rockchip,rk3368-dw-mshc
- # for Rockchip RK3399
- rockchip,rk3399-dw-mshc
- # for Rockchip RV1108
+ - rockchip,rk3568-dw-mshc
- rockchip,rv1108-dw-mshc
- const: rockchip,rk3288-dw-mshc
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml b/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
index 3a79e39253d2..29399e88ac53 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
@@ -19,7 +19,6 @@ properties:
- const: ti,am654-sdhci-5.1
- const: ti,j721e-sdhci-8bit
- const: ti,j721e-sdhci-4bit
- - const: ti,j721e-sdhci-4bit
- const: ti,am64-sdhci-8bit
- const: ti,am64-sdhci-4bit
- items:
diff --git a/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt b/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt
deleted file mode 100644
index 8ba9ed11d716..000000000000
--- a/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-* Broadcom iProc MDIO bus controller
-
-Required properties:
-- compatible: should be "brcm,iproc-mdio"
-- reg: address and length of the register set for the MDIO interface
-- #size-cells: must be 1
-- #address-cells: must be 0
-
-Child nodes of this MDIO bus controller node are standard Ethernet PHY device
-nodes as described in Documentation/devicetree/bindings/net/phy.txt
-
-Example:
-
-mdio@18002000 {
- compatible = "brcm,iproc-mdio";
- reg = <0x18002000 0x8>;
- #size-cells = <1>;
- #address-cells = <0>;
-
- enet-gphy@0 {
- reg = <0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/brcm,iproc-mdio.yaml b/Documentation/devicetree/bindings/net/brcm,iproc-mdio.yaml
new file mode 100644
index 000000000000..3031395f7e6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,iproc-mdio.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/brcm,iproc-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom iProc MDIO bus controller
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+allOf:
+ - $ref: mdio.yaml#
+
+properties:
+ compatible:
+ const: brcm,iproc-mdio
+
+ reg:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+ - reg
+
+examples:
+ - |
+ mdio@18002000 {
+ compatible = "brcm,iproc-mdio";
+ reg = <0x18002000 0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
deleted file mode 100644
index 90ac4fef23f5..000000000000
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Renesas R-Car CAN controller Device Tree Bindings
--------------------------------------------------
-
-Required properties:
-- compatible: "renesas,can-r8a7742" if CAN controller is a part of R8A7742 SoC.
- "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
- "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
- "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
- "renesas,can-r8a77470" if CAN controller is a part of R8A77470 SoC.
- "renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
- "renesas,can-r8a774b1" if CAN controller is a part of R8A774B1 SoC.
- "renesas,can-r8a774c0" if CAN controller is a part of R8A774C0 SoC.
- "renesas,can-r8a774e1" if CAN controller is a part of R8A774E1 SoC.
- "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
- "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
- "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
- "renesas,can-r8a7791" if CAN controller is a part of R8A7791 SoC.
- "renesas,can-r8a7792" if CAN controller is a part of R8A7792 SoC.
- "renesas,can-r8a7793" if CAN controller is a part of R8A7793 SoC.
- "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
- "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
- "renesas,can-r8a7796" if CAN controller is a part of R8A77960 SoC.
- "renesas,can-r8a77961" if CAN controller is a part of R8A77961 SoC.
- "renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC.
- "renesas,can-r8a77990" if CAN controller is a part of R8A77990 SoC.
- "renesas,can-r8a77995" if CAN controller is a part of R8A77995 SoC.
- "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
- "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
- compatible device.
- "renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2
- compatible device.
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
-
-- reg: physical base address and size of the R-Car CAN register map.
-- interrupts: interrupt specifier for the sole interrupt.
-- clocks: phandles and clock specifiers for 3 CAN clock inputs.
-- clock-names: 3 clock input name strings: "clkp1", "clkp2", and "can_clk".
-- pinctrl-0: pin control group to be used for this controller.
-- pinctrl-names: must be "default".
-
-Required properties for R8A774A1, R8A774B1, R8A774C0, R8A774E1, R8A7795,
-R8A77960, R8A77961, R8A77965, R8A77990, and R8A77995:
-For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
-be used by both CAN and CAN FD controller at the same time. It needs to be
-scaled to maximum frequency if any of these controllers use it. This is done
-using the below properties:
-
-- assigned-clocks: phandle of clkp2(CANFD) clock.
-- assigned-clock-rates: maximum frequency of this clock.
-
-Optional properties:
-- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
- <0x0> (default) : Peripheral clock (clkp1)
- <0x1> : Peripheral clock (clkp2)
- <0x3> : External input clock
-
-Example
--------
-
-SoC common .dtsi file:
-
- can0: can@e6e80000 {
- compatible = "renesas,can-r8a7791", "renesas,rcar-gen2-can";
- reg = <0 0xe6e80000 0 0x1000>;
- interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
- <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
- clock-names = "clkp1", "clkp2", "can_clk";
- status = "disabled";
- };
-
-Board specific .dts file:
-
-&can0 {
- pinctrl-0 = <&can0_pins>;
- pinctrl-names = "default";
- status = "okay";
-};
diff --git a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
deleted file mode 100644
index 248c4ed97a0a..000000000000
--- a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
+++ /dev/null
@@ -1,107 +0,0 @@
-Renesas R-Car CAN FD controller Device Tree Bindings
-----------------------------------------------------
-
-Required properties:
-- compatible: Must contain one or more of the following:
- - "renesas,rcar-gen3-canfd" for R-Car Gen3 and RZ/G2 compatible controllers.
- - "renesas,r8a774a1-canfd" for R8A774A1 (RZ/G2M) compatible controller.
- - "renesas,r8a774b1-canfd" for R8A774B1 (RZ/G2N) compatible controller.
- - "renesas,r8a774c0-canfd" for R8A774C0 (RZ/G2E) compatible controller.
- - "renesas,r8a774e1-canfd" for R8A774E1 (RZ/G2H) compatible controller.
- - "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller.
- - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3-W) compatible controller.
- - "renesas,r8a77965-canfd" for R8A77965 (R-Car M3-N) compatible controller.
- - "renesas,r8a77970-canfd" for R8A77970 (R-Car V3M) compatible controller.
- - "renesas,r8a77980-canfd" for R8A77980 (R-Car V3H) compatible controller.
- - "renesas,r8a77990-canfd" for R8A77990 (R-Car E3) compatible controller.
- - "renesas,r8a77995-canfd" for R8A77995 (R-Car D3) compatible controller.
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first, followed by the
- family-specific and/or generic versions.
-
-- reg: physical base address and size of the R-Car CAN FD register map.
-- interrupts: interrupt specifiers for the Channel & Global interrupts
-- clocks: phandles and clock specifiers for 3 clock inputs.
-- clock-names: 3 clock input name strings: "fck", "canfd", "can_clk".
-- pinctrl-0: pin control group to be used for this controller.
-- pinctrl-names: must be "default".
-
-Required child nodes:
-The controller supports two channels and each is represented as a child node.
-The name of the child nodes are "channel0" and "channel1" respectively. Each
-child node supports the "status" property only, which is used to
-enable/disable the respective channel.
-
-Required properties for R8A774A1, R8A774B1, R8A774C0, R8A774E1, R8A7795,
-R8A7796, R8A77965, R8A77990, and R8A77995:
-In the denoted SoCs, canfd clock is a div6 clock and can be used by both CAN
-and CAN FD controller at the same time. It needs to be scaled to maximum
-frequency if any of these controllers use it. This is done using the below
-properties:
-
-- assigned-clocks: phandle of canfd clock.
-- assigned-clock-rates: maximum frequency of this clock.
-
-Optional property:
-The controller can operate in either CAN FD only mode (default) or
-Classical CAN only mode. The mode is global to both the channels. In order to
-enable the later, define the following optional property.
- - renesas,no-can-fd: puts the controller in Classical CAN only mode.
-
-Example
--------
-
-SoC common .dtsi file:
-
- canfd: can@e66c0000 {
- compatible = "renesas,r8a7795-canfd",
- "renesas,rcar-gen3-canfd";
- reg = <0 0xe66c0000 0 0x8000>;
- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 914>,
- <&cpg CPG_CORE R8A7795_CLK_CANFD>,
- <&can_clk>;
- clock-names = "fck", "canfd", "can_clk";
- assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
- assigned-clock-rates = <40000000>;
- power-domains = <&cpg>;
- status = "disabled";
-
- channel0 {
- status = "disabled";
- };
-
- channel1 {
- status = "disabled";
- };
- };
-
-Board specific .dts file:
-
-E.g. below enables Channel 1 alone in the board in Classical CAN only mode.
-
-&canfd {
- pinctrl-0 = <&canfd1_pins>;
- pinctrl-names = "default";
- renesas,no-can-fd;
- status = "okay";
-
- channel1 {
- status = "okay";
- };
-};
-
-E.g. below enables Channel 0 alone in the board using External clock
-as fCAN clock.
-
-&canfd {
- pinctrl-0 = <&canfd0_pins>, <&can_clk_pins>;
- pinctrl-names = "default";
- status = "okay";
-
- channel0 {
- status = "okay";
- };
-};
diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-can.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-can.yaml
new file mode 100644
index 000000000000..fadc871fd6b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-can.yaml
@@ -0,0 +1,139 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/renesas,rcar-can.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car CAN Controller
+
+maintainers:
+ - Sergei Shtylyov <sergei.shtylyov@gmail.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,can-r8a7778 # R-Car M1-A
+ - renesas,can-r8a7779 # R-Car H1
+ - const: renesas,rcar-gen1-can # R-Car Gen1
+
+ - items:
+ - enum:
+ - renesas,can-r8a7742 # RZ/G1H
+ - renesas,can-r8a7743 # RZ/G1M
+ - renesas,can-r8a7744 # RZ/G1N
+ - renesas,can-r8a7745 # RZ/G1E
+ - renesas,can-r8a77470 # RZ/G1C
+ - renesas,can-r8a7790 # R-Car H2
+ - renesas,can-r8a7791 # R-Car M2-W
+ - renesas,can-r8a7792 # R-Car V2H
+ - renesas,can-r8a7793 # R-Car M2-N
+ - renesas,can-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-can # R-Car Gen2 and RZ/G1
+
+ - items:
+ - enum:
+ - renesas,can-r8a774a1 # RZ/G2M
+ - renesas,can-r8a774b1 # RZ/G2N
+ - renesas,can-r8a774c0 # RZ/G2E
+ - renesas,can-r8a774e1 # RZ/G2H
+ - renesas,can-r8a7795 # R-Car H3
+ - renesas,can-r8a7796 # R-Car M3-W
+ - renesas,can-r8a77961 # R-Car M3-W+
+ - renesas,can-r8a77965 # R-Car M3-N
+ - renesas,can-r8a77990 # R-Car E3
+ - renesas,can-r8a77995 # R-Car D3
+ - const: renesas,rcar-gen3-can # R-Car Gen3 and RZ/G2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: clkp1
+ - const: clkp2
+ - const: can_clk
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ renesas,can-clock-select:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1, 3 ]
+ default: 0
+ description: |
+ R-Car CAN Clock Source Select. Valid values are:
+ <0x0> (default) : Peripheral clock (clkp1)
+ <0x1> : Peripheral clock (clkp2)
+ <0x3> : External input clock
+
+ assigned-clocks:
+ description:
+ Reference to the clkp2 (CANFD) clock.
+ On R-Car Gen3 and RZ/G2 SoCs, "clkp2" is the CANFD clock. This is a div6
+ clock and can be used by both CAN and CAN FD controllers at the same
+ time. It needs to be scaled to maximum frequency if any of these
+ controllers use it.
+
+ assigned-clock-rates:
+ description: Maximum frequency of the CANFD clock.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+
+allOf:
+ - $ref: can-controller.yaml#
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: renesas,rcar-gen1-can
+ then:
+ required:
+ - resets
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,rcar-gen3-can
+ then:
+ required:
+ - assigned-clocks
+ - assigned-clock-rates
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7791-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7791-sysc.h>
+
+ can0: can@e6e80000 {
+ compatible = "renesas,can-r8a7791", "renesas,rcar-gen2-can";
+ reg = <0xe6e80000 0x1000>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 916>,
+ <&cpg CPG_CORE R8A7791_CLK_RCAN>, <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 916>;
+ };
diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
new file mode 100644
index 000000000000..0b33ba9ccb47
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/renesas,rcar-canfd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car CAN FD Controller
+
+maintainers:
+ - Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+
+allOf:
+ - $ref: can-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r8a774a1-canfd # RZ/G2M
+ - renesas,r8a774b1-canfd # RZ/G2N
+ - renesas,r8a774c0-canfd # RZ/G2E
+ - renesas,r8a774e1-canfd # RZ/G2H
+ - renesas,r8a7795-canfd # R-Car H3
+ - renesas,r8a7796-canfd # R-Car M3-W
+ - renesas,r8a77965-canfd # R-Car M3-N
+ - renesas,r8a77970-canfd # R-Car V3M
+ - renesas,r8a77980-canfd # R-Car V3H
+ - renesas,r8a77990-canfd # R-Car E3
+ - renesas,r8a77995-canfd # R-Car D3
+ - const: renesas,rcar-gen3-canfd # R-Car Gen3 and RZ/G2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Channel interrupt
+ - description: Global interrupt
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: fck
+ - const: canfd
+ - const: can_clk
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ renesas,no-can-fd:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ The controller can operate in either CAN FD only mode (default) or
+ Classical CAN only mode. The mode is global to both the channels.
+ Specify this property to put the controller in Classical CAN only mode.
+
+ assigned-clocks:
+ description:
+ Reference to the CANFD clock. The CANFD clock is a div6 clock and can be
+ used by both CAN (if present) and CAN FD controllers at the same time.
+ It needs to be scaled to maximum frequency if any of these controllers
+ use it.
+
+ assigned-clock-rates:
+ description: Maximum frequency of the CANFD clock.
+
+patternProperties:
+ "^channel[01]$":
+ type: object
+ description:
+ The controller supports two channels and each is represented as a child
+ node. Each child node supports the "status" property only, which
+ is used to enable/disable the respective channel.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+ - resets
+ - assigned-clocks
+ - assigned-clock-rates
+ - channel0
+ - channel1
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+
+ canfd: can@e66c0000 {
+ compatible = "renesas,r8a7795-canfd",
+ "renesas,rcar-gen3-canfd";
+ reg = <0xe66c0000 0x8000>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 914>,
+ <&cpg CPG_CORE R8A7795_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "fck", "canfd", "can_clk";
+ assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
+ assigned-clock-rates = <40000000>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 914>;
+
+ channel0 {
+ };
+
+ channel1 {
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
index de04626a8e9d..18247ebfc487 100644
--- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt
+++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
@@ -81,6 +81,12 @@ Optional properties:
- gpio-controller: Boolean; if defined, MT7530's LED controller will run on
GPIO mode.
- #gpio-cells: Must be 2 if gpio-controller is defined.
+- interrupt-controller: Boolean; Enables the internal interrupt controller.
+
+If interrupt-controller is defined, the following properties are required.
+
+- #interrupt-cells: Must be 1.
+- interrupts: Parent interrupt for the interrupt controller.
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
required, optional properties and how the integrated switch subnodes must
diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
new file mode 100644
index 000000000000..0b8a05dd52e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/nxp,sja1105.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP SJA1105 Automotive Ethernet Switch Family Device Tree Bindings
+
+description:
+ The SJA1105 SPI interface requires a CS-to-CLK time (t2 in UM10944.pdf) of at
+ least one half of t_CLK. At an SPI frequency of 1MHz, this means a minimum
+ cs_sck_delay of 500ns. Ensuring that this SPI timing requirement is observed
+ depends on the SPI bus master driver.
+
+allOf:
+ - $ref: "dsa.yaml#"
+
+maintainers:
+ - Vladimir Oltean <vladimir.oltean@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - nxp,sja1105e
+ - nxp,sja1105t
+ - nxp,sja1105p
+ - nxp,sja1105q
+ - nxp,sja1105r
+ - nxp,sja1105s
+ - nxp,sja1110a
+ - nxp,sja1110b
+ - nxp,sja1110c
+ - nxp,sja1110d
+
+ reg:
+ maxItems: 1
+
+ # Optional container node for the 2 internal MDIO buses of the SJA1110
+ # (one for the internal 100base-T1 PHYs and the other for the single
+ # 100base-TX PHY). The "reg" property does not have physical significance.
+ # The PHY addresses to port correspondence is as follows: for 100base-T1,
+ # port 5 has PHY 1, port 6 has PHY 2 etc, while for 100base-TX, port 1 has
+ # PHY 1.
+ mdios:
+ type: object
+
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^mdio@[0-1]$":
+ type: object
+
+ allOf:
+ - $ref: "http://devicetree.org/schemas/net/mdio.yaml#"
+
+ properties:
+ compatible:
+ oneOf:
+ - enum:
+ - nxp,sja1110-base-t1-mdio
+ - nxp,sja1110-base-tx-mdio
+
+ reg:
+ oneOf:
+ - enum:
+ - 0
+ - 1
+
+ required:
+ - compatible
+ - reg
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-switch@1 {
+ reg = <0x1>;
+ compatible = "nxp,sja1105t";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ phy-handle = <&rgmii_phy6>;
+ phy-mode = "rgmii-id";
+ reg = <0>;
+ };
+
+ port@1 {
+ phy-handle = <&rgmii_phy3>;
+ phy-mode = "rgmii-id";
+ reg = <1>;
+ };
+
+ port@2 {
+ phy-handle = <&rgmii_phy4>;
+ phy-mode = "rgmii-id";
+ reg = <2>;
+ };
+
+ port@3 {
+ phy-mode = "rgmii-id";
+ reg = <3>;
+ };
+
+ port@4 {
+ ethernet = <&enet2>;
+ phy-mode = "rgmii";
+ reg = <4>;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
index ccbc6d89325d..8c73f67c43ca 100644
--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
@@ -3,6 +3,7 @@
Required properties:
- compatible: should be one of:
+ "qca,qca8327"
"qca,qca8334"
"qca,qca8337"
@@ -20,6 +21,10 @@ described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
mdio-bus each subnode describing a port needs to have a valid phandle
referencing the internal PHY it is connected to. This is because there's no
N:N mapping of port and PHY id.
+To declare the internal mdio-bus configuration, declare a mdio node in the
+switch node and declare the phandle for the port referencing the internal
+PHY is connected to. In this config a internal mdio-bus is registered and
+the mdio MASTER is used as communication.
Don't use mixed external and internal mdio-bus configurations, as this is
not supported by the hardware.
@@ -149,26 +154,61 @@ for the internal master mdio-bus configuration:
port@1 {
reg = <1>;
label = "lan1";
+ phy-mode = "internal";
+ phy-handle = <&phy_port1>;
};
port@2 {
reg = <2>;
label = "lan2";
+ phy-mode = "internal";
+ phy-handle = <&phy_port2>;
};
port@3 {
reg = <3>;
label = "lan3";
+ phy-mode = "internal";
+ phy-handle = <&phy_port3>;
};
port@4 {
reg = <4>;
label = "lan4";
+ phy-mode = "internal";
+ phy-handle = <&phy_port4>;
};
port@5 {
reg = <5>;
label = "wan";
+ phy-mode = "internal";
+ phy-handle = <&phy_port5>;
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy_port1: phy@0 {
+ reg = <0>;
+ };
+
+ phy_port2: phy@1 {
+ reg = <1>;
+ };
+
+ phy_port3: phy@2 {
+ reg = <2>;
+ };
+
+ phy_port4: phy@3 {
+ reg = <3>;
+ };
+
+ phy_port5: phy@4 {
+ reg = <4>;
};
};
};
diff --git a/Documentation/devicetree/bindings/net/dsa/sja1105.txt b/Documentation/devicetree/bindings/net/dsa/sja1105.txt
deleted file mode 100644
index 13fd21074d48..000000000000
--- a/Documentation/devicetree/bindings/net/dsa/sja1105.txt
+++ /dev/null
@@ -1,156 +0,0 @@
-NXP SJA1105 switch driver
-=========================
-
-Required properties:
-
-- compatible:
- Must be one of:
- - "nxp,sja1105e"
- - "nxp,sja1105t"
- - "nxp,sja1105p"
- - "nxp,sja1105q"
- - "nxp,sja1105r"
- - "nxp,sja1105s"
-
- Although the device ID could be detected at runtime, explicit bindings
- are required in order to be able to statically check their validity.
- For example, SGMII can only be specified on port 4 of R and S devices,
- and the non-SGMII devices, while pin-compatible, are not equal in terms
- of support for RGMII internal delays (supported on P/Q/R/S, but not on
- E/T).
-
-Optional properties:
-
-- sja1105,role-mac:
-- sja1105,role-phy:
- Boolean properties that can be assigned under each port node. By
- default (unless otherwise specified) a port is configured as MAC if it
- is driving a PHY (phy-handle is present) or as PHY if it is PHY-less
- (fixed-link specified, presumably because it is connected to a MAC).
- The effect of this property (in either its implicit or explicit form)
- is:
- - In the case of MII or RMII it specifies whether the SJA1105 port is a
- clock source or sink for this interface (not applicable for RGMII
- where there is a Tx and an Rx clock).
- - In the case of RGMII it affects the behavior regarding internal
- delays:
- 1. If sja1105,role-mac is specified, and the phy-mode property is one
- of "rgmii-id", "rgmii-txid" or "rgmii-rxid", then the entity
- designated to apply the delay/clock skew necessary for RGMII
- is the PHY. The SJA1105 MAC does not apply any internal delays.
- 2. If sja1105,role-phy is specified, and the phy-mode property is one
- of the above, the designated entity to apply the internal delays
- is the SJA1105 MAC (if hardware-supported). This is only supported
- by the second-generation (P/Q/R/S) hardware. On a first-generation
- E or T device, it is an error to specify an RGMII phy-mode other
- than "rgmii" for a port that is in fixed-link mode. In that case,
- the clock skew must either be added by the MAC at the other end of
- the fixed-link, or by PCB serpentine traces on the board.
- These properties are required, for example, in the case where SJA1105
- ports are at both ends of a MII/RMII PHY-less setup. One end would need
- to have sja1105,role-mac, while the other sja1105,role-phy.
-
-See Documentation/devicetree/bindings/net/dsa/dsa.txt for the list of standard
-DSA required and optional properties.
-
-Other observations
-------------------
-
-The SJA1105 SPI interface requires a CS-to-CLK time (t2 in UM10944) of at least
-one half of t_CLK. At an SPI frequency of 1MHz, this means a minimum
-cs_sck_delay of 500ns. Ensuring that this SPI timing requirement is observed
-depends on the SPI bus master driver.
-
-Example
--------
-
-Ethernet switch connected via SPI to the host, CPU port wired to enet2:
-
-arch/arm/boot/dts/ls1021a-tsn.dts:
-
-/* SPI controller of the LS1021 */
-&dspi0 {
- sja1105@1 {
- reg = <0x1>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nxp,sja1105t";
- spi-max-frequency = <4000000>;
- fsl,spi-cs-sck-delay = <1000>;
- fsl,spi-sck-cs-delay = <1000>;
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- /* ETH5 written on chassis */
- label = "swp5";
- phy-handle = <&rgmii_phy6>;
- phy-mode = "rgmii-id";
- reg = <0>;
- /* Implicit "sja1105,role-mac;" */
- };
- port@1 {
- /* ETH2 written on chassis */
- label = "swp2";
- phy-handle = <&rgmii_phy3>;
- phy-mode = "rgmii-id";
- reg = <1>;
- /* Implicit "sja1105,role-mac;" */
- };
- port@2 {
- /* ETH3 written on chassis */
- label = "swp3";
- phy-handle = <&rgmii_phy4>;
- phy-mode = "rgmii-id";
- reg = <2>;
- /* Implicit "sja1105,role-mac;" */
- };
- port@3 {
- /* ETH4 written on chassis */
- phy-handle = <&rgmii_phy5>;
- label = "swp4";
- phy-mode = "rgmii-id";
- reg = <3>;
- /* Implicit "sja1105,role-mac;" */
- };
- port@4 {
- /* Internal port connected to eth2 */
- ethernet = <&enet2>;
- phy-mode = "rgmii";
- reg = <4>;
- /* Implicit "sja1105,role-phy;" */
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
- };
- };
-};
-
-/* MDIO controller of the LS1021 */
-&mdio0 {
- /* BCM5464 */
- rgmii_phy3: ethernet-phy@3 {
- reg = <0x3>;
- };
- rgmii_phy4: ethernet-phy@4 {
- reg = <0x4>;
- };
- rgmii_phy5: ethernet-phy@5 {
- reg = <0x5>;
- };
- rgmii_phy6: ethernet-phy@6 {
- reg = <0x6>;
- };
-};
-
-/* Ethernet master port of the LS1021 */
-&enet2 {
- phy-connection-type = "rgmii";
- status = "ok";
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index e8f04687a3e0..b0933a8c295a 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -68,6 +68,7 @@ properties:
- tbi
- rev-mii
- rmii
+ - rev-rmii
# RX and TX delays are added by the MAC when required
- rgmii
@@ -97,6 +98,7 @@ properties:
- 10gbase-kr
- usxgmii
- 10gbase-r
+ - 25gbase-r
phy-mode:
$ref: "#/properties/phy-connection-type"
diff --git a/Documentation/devicetree/bindings/net/ingenic,mac.yaml b/Documentation/devicetree/bindings/net/ingenic,mac.yaml
new file mode 100644
index 000000000000..d08a88125a5c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ingenic,mac.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ingenic,mac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for MAC in Ingenic SoCs
+
+maintainers:
+ - 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+
+description:
+ The Ethernet Media Access Controller in Ingenic SoCs.
+
+properties:
+ compatible:
+ enum:
+ - ingenic,jz4775-mac
+ - ingenic,x1000-mac
+ - ingenic,x1600-mac
+ - ingenic,x1830-mac
+ - ingenic,x2000-mac
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ const: macirq
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: stmmaceth
+
+ mode-reg:
+ description: An extra syscon register that control ethernet interface and timing delay
+
+ rx-clk-delay-ps:
+ description: RGMII receive clock delay defined in pico seconds
+
+ tx-clk-delay-ps:
+ description: RGMII transmit clock delay defined in pico seconds
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - mode-reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/x1000-cgu.h>
+
+ mac: ethernet@134b0000 {
+ compatible = "ingenic,x1000-mac";
+ reg = <0x134b0000 0x2000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <55>;
+ interrupt-names = "macirq";
+
+ clocks = <&cgu X1000_CLK_MAC>;
+ clock-names = "stmmaceth";
+
+ mode-reg = <&mac_phy_ctrl>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
new file mode 100644
index 000000000000..347b912a46bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
@@ -0,0 +1,226 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/microchip,sparx5-switch.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Sparx5 Ethernet switch controller
+
+maintainers:
+ - Steen Hegelund <steen.hegelund@microchip.com>
+ - Lars Povlsen <lars.povlsen@microchip.com>
+
+description: |
+ The SparX-5 Enterprise Ethernet switch family provides a rich set of
+ Enterprise switching features such as advanced TCAM-based VLAN and
+ QoS processing enabling delivery of differentiated services, and
+ security through TCAM-based frame processing using versatile content
+ aware processor (VCAP).
+
+ IPv4/IPv6 Layer 3 (L3) unicast and multicast routing is supported
+ with up to 18K IPv4/9K IPv6 unicast LPM entries and up to 9K IPv4/3K
+ IPv6 (S,G) multicast groups.
+
+ L3 security features include source guard and reverse path
+ forwarding (uRPF) tasks. Additional L3 features include VRF-Lite and
+ IP tunnels (IP over GRE/IP).
+
+ The SparX-5 switch family targets managed Layer 2 and Layer 3
+ equipment in SMB, SME, and Enterprise where high port count
+ 1G/2.5G/5G/10G switching with 10G/25G aggregation links is required.
+
+properties:
+ $nodename:
+ pattern: "^switch@[0-9a-f]+$"
+
+ compatible:
+ const: microchip,sparx5-switch
+
+ reg:
+ items:
+ - description: cpu target
+ - description: devices target
+ - description: general control block target
+
+ reg-names:
+ items:
+ - const: cpu
+ - const: devices
+ - const: gcb
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: register based extraction
+ - description: frame dma based extraction
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: xtr
+ - const: fdma
+
+ resets:
+ items:
+ - description: Reset controller used for switch core reset (soft reset)
+
+ reset-names:
+ items:
+ - const: switch
+
+ mac-address: true
+
+ ethernet-ports:
+ type: object
+ patternProperties:
+ "^port@[0-9a-f]+$":
+ type: object
+
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ reg:
+ description: Switch port number
+
+ phys:
+ maxItems: 1
+ description:
+ phandle of a Ethernet SerDes PHY. This defines which SerDes
+ instance will handle the Ethernet traffic.
+
+ phy-mode:
+ description:
+ This specifies the interface used by the Ethernet SerDes towards
+ the PHY or SFP.
+
+ microchip,bandwidth:
+ description: Specifies bandwidth in Mbit/s allocated to the port.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ maximum: 25000
+
+ phy-handle:
+ description:
+ phandle of a Ethernet PHY. This is optional and if provided it
+ points to the cuPHY used by the Ethernet SerDes.
+
+ sfp:
+ description:
+ phandle of an SFP. This is optional and used when not specifying
+ a cuPHY. It points to the SFP node that describes the SFP used by
+ the Ethernet SerDes.
+
+ managed: true
+
+ microchip,sd-sgpio:
+ description:
+ Index of the ports Signal Detect SGPIO in the set of 384 SGPIOs
+ This is optional, and only needed if the default used index is
+ is not correct.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 383
+
+ required:
+ - reg
+ - phys
+ - phy-mode
+ - microchip,bandwidth
+
+ oneOf:
+ - required:
+ - phy-handle
+ - required:
+ - sfp
+ - managed
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+ - resets
+ - reset-names
+ - ethernet-ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ switch: switch@600000000 {
+ compatible = "microchip,sparx5-switch";
+ reg = <0 0x401000>,
+ <0x10004000 0x7fc000>,
+ <0x11010000 0xaf0000>;
+ reg-names = "cpu", "devices", "gcb";
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "xtr";
+ resets = <&reset 0>;
+ reset-names = "switch";
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port0: port@0 {
+ reg = <0>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy0>;
+ phy-mode = "qsgmii";
+ };
+ /* ... */
+ /* Then the 25G interfaces */
+ port60: port@60 {
+ reg = <60>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 29>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth60>;
+ managed = "in-band-status";
+ microchip,sd-sgpio = <365>;
+ };
+ port61: port@61 {
+ reg = <61>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 30>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth61>;
+ managed = "in-band-status";
+ microchip,sd-sgpio = <369>;
+ };
+ port62: port@62 {
+ reg = <62>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 31>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth62>;
+ managed = "in-band-status";
+ microchip,sd-sgpio = <373>;
+ };
+ port63: port@63 {
+ reg = <63>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 32>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth63>;
+ managed = "in-band-status";
+ microchip,sd-sgpio = <377>;
+ };
+ /* Finally the Management interface */
+ port64: port@64 {
+ reg = <64>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 0>;
+ phy-handle = <&phy64>;
+ phy-mode = "sgmii";
+ mac-address = [ 00 00 00 01 02 03 ];
+ };
+ };
+ };
+
+...
+# vim: set ts=2 sw=2 sts=2 tw=80 et cc=80 ft=yaml :
diff --git a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
index 477066e2b821..081742c2b726 100644
--- a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
@@ -27,6 +27,9 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
wake-gpios:
maxItems: 1
description:
@@ -80,6 +83,8 @@ examples:
en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>;
wake-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&rpmcc 20>;
};
};
# UART example on Raspberry Pi
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index 5fe6d3dceb08..ed88ba4b94df 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -44,6 +44,7 @@ description:
properties:
compatible:
enum:
+ - qcom,msm8998-ipa
- qcom,sc7180-ipa
- qcom,sc7280-ipa
- qcom,sdm845-ipa
diff --git a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
deleted file mode 100644
index 709ca6d51650..000000000000
--- a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-Qualcomm Bluetooth Chips
----------------------
-
-This documents the binding structure and common properties for serial
-attached Qualcomm devices.
-
-Serial attached Qualcomm devices shall be a child node of the host UART
-device the slave device is attached to.
-
-Required properties:
- - compatible: should contain one of the following:
- * "qcom,qca6174-bt"
- * "qcom,qca9377-bt"
- * "qcom,wcn3990-bt"
- * "qcom,wcn3991-bt"
- * "qcom,wcn3998-bt"
- * "qcom,qca6390-bt"
-
-Optional properties for compatible string qcom,qca6174-bt:
-
- - enable-gpios: gpio specifier used to enable chip
- - clocks: clock provided to the controller (SUSCLK_32KHZ)
- - firmware-name: specify the name of nvm firmware to load
-
-Optional properties for compatible string qcom,qca9377-bt:
-
- - max-speed: see Documentation/devicetree/bindings/serial/serial.yaml
-
-Required properties for compatible string qcom,wcn399x-bt:
-
- - vddio-supply: VDD_IO supply regulator handle.
- - vddxo-supply: VDD_XO supply regulator handle.
- - vddrf-supply: VDD_RF supply regulator handle.
- - vddch0-supply: VDD_CH0 supply regulator handle.
-
-Optional properties for compatible string qcom,wcn399x-bt:
-
- - max-speed: see Documentation/devicetree/bindings/serial/serial.yaml
- - firmware-name: specify the name of nvm firmware to load
- - clocks: clock provided to the controller
-
-Examples:
-
-serial@7570000 {
- label = "BT-UART";
- status = "okay";
-
- bluetooth {
- compatible = "qcom,qca6174-bt";
-
- enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
- clocks = <&divclk4>;
- firmware-name = "nvm_00440302.bin";
- };
-};
-
-serial@898000 {
- bluetooth {
- compatible = "qcom,wcn3990-bt";
-
- vddio-supply = <&vreg_s4a_1p8>;
- vddxo-supply = <&vreg_l7a_1p8>;
- vddrf-supply = <&vreg_l17a_1p3>;
- vddch0-supply = <&vreg_l25a_3p3>;
- max-speed = <3200000>;
- firmware-name = "crnv21.bin";
- clocks = <&rpmhcc RPMH_RF_CLK2>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.yaml
new file mode 100644
index 000000000000..f93c6e7a1b59
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.yaml
@@ -0,0 +1,183 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qualcomm-bluetooth.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Bluetooth Chips
+
+maintainers:
+ - Balakrishna Godavarthi <bgodavar@codeaurora.org>
+ - Rocky Liao <rjliao@codeaurora.org>
+
+description:
+ This binding describes Qualcomm UART-attached bluetooth chips.
+
+properties:
+ compatible:
+ enum:
+ - qcom,qca6174-bt
+ - qcom,qca9377-bt
+ - qcom,wcn3990-bt
+ - qcom,wcn3991-bt
+ - qcom,wcn3998-bt
+ - qcom,qca6390-bt
+ - qcom,wcn6750-bt
+
+ enable-gpios:
+ maxItems: 1
+ description: gpio specifier used to enable chip
+
+ swctrl-gpios:
+ maxItems: 1
+ description: gpio specifier is used to find status
+ of clock supply to SoC
+
+ clocks:
+ maxItems: 1
+ description: clock provided to the controller (SUSCLK_32KHZ)
+
+ vddio-supply:
+ description: VDD_IO supply regulator handle
+
+ vddxo-supply:
+ description: VDD_XO supply regulator handle
+
+ vddrf-supply:
+ description: VDD_RF supply regulator handle
+
+ vddch0-supply:
+ description: VDD_CH0 supply regulator handle
+
+ vddaon-supply:
+ description: VDD_AON supply regulator handle
+
+ vddbtcxmx-supply:
+ description: VDD_BT_CXMX supply regulator handle
+
+ vddrfacmn-supply:
+ description: VDD_RFA_CMN supply regulator handle
+
+ vddrfa0p8-supply:
+ description: VDD_RFA_0P8 suppply regulator handle
+
+ vddrfa1p7-supply:
+ description: VDD_RFA_1P7 supply regulator handle
+
+ vddrfa1p2-supply:
+ description: VDD_RFA_1P2 supply regulator handle
+
+ vddrfa2p2-supply:
+ description: VDD_RFA_2P2 supply regulator handle
+
+ vddasd-supply:
+ description: VDD_ASD supply regulator handle
+
+ max-speed:
+ description: see Documentation/devicetree/bindings/serial/serial.yaml
+
+ firmware-name:
+ description: specify the name of nvm firmware to load
+
+ local-bd-address:
+ description: see Documentation/devicetree/bindings/net/bluetooth.txt
+
+
+required:
+ - compatible
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,qca6174-bt
+ then:
+ required:
+ - enable-gpios
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,wcn3990-bt
+ - qcom,wcn3991-bt
+ - qcom,wcn3998-bt
+ then:
+ required:
+ - vddio-supply
+ - vddxo-supply
+ - vddrf-supply
+ - vddch0-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,wcn6750-bt
+ then:
+ required:
+ - enable-gpios
+ - swctrl-gpios
+ - vddio-supply
+ - vddaon-supply
+ - vddbtcxmx-supply
+ - vddrfacmn-supply
+ - vddrfa0p8-supply
+ - vddrfa1p7-supply
+ - vddrfa1p2-supply
+ - vddasd-supply
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ serial {
+
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+ clocks = <&divclk4>;
+ firmware-name = "nvm_00440302.bin";
+ };
+ };
+ - |
+ serial {
+
+ bluetooth {
+ compatible = "qcom,wcn3990-bt";
+ vddio-supply = <&vreg_s4a_1p8>;
+ vddxo-supply = <&vreg_l7a_1p8>;
+ vddrf-supply = <&vreg_l17a_1p3>;
+ vddch0-supply = <&vreg_l25a_3p3>;
+ max-speed = <3200000>;
+ firmware-name = "crnv21.bin";
+ };
+ };
+ - |
+ serial {
+
+ bluetooth {
+ compatible = "qcom,wcn6750-bt";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_default>;
+ enable-gpios = <&tlmm 85 GPIO_ACTIVE_HIGH>;
+ swctrl-gpios = <&tlmm 86 GPIO_ACTIVE_HIGH>;
+ vddio-supply = <&vreg_l19b_1p8>;
+ vddaon-supply = <&vreg_s7b_0p9>;
+ vddbtcxmx-supply = <&vreg_s7b_0p9>;
+ vddrfacmn-supply = <&vreg_s7b_0p9>;
+ vddrfa0p8-supply = <&vreg_s7b_0p9>;
+ vddrfa1p7-supply = <&vreg_s1b_1p8>;
+ vddrfa1p2-supply = <&vreg_s8b_1p2>;
+ vddrfa2p2-supply = <&vreg_s1c_2p2>;
+ vddasd-supply = <&vreg_l11c_2p8>;
+ max-speed = <3200000>;
+ firmware-name = "msnv11.bin";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml b/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml
new file mode 100644
index 000000000000..bb94a2388520
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/realtek,rtl82xx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek RTL82xx PHY
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Heiner Kallweit <hkallweit1@gmail.com>
+
+description:
+ Bindings for Realtek RTL82xx PHYs
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ realtek,clkout-disable:
+ type: boolean
+ description:
+ Disable CLKOUT clock, CLKOUT clock default is enabled after hardware reset.
+
+
+ realtek,aldps-enable:
+ type: boolean
+ description:
+ Enable ALDPS mode, ALDPS mode default is disabled after hardware reset.
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ realtek,clkout-disable;
+ realtek,aldps-enable;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,ether.yaml b/Documentation/devicetree/bindings/net/renesas,ether.yaml
index 8ce5ed8a58dd..c101a1ec846e 100644
--- a/Documentation/devicetree/bindings/net/renesas,ether.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,ether.yaml
@@ -10,7 +10,7 @@ allOf:
- $ref: ethernet-controller.yaml#
maintainers:
- - Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+ - Sergei Shtylyov <sergei.shtylyov@gmail.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
index 5acddb6171bf..083623c8d718 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
@@ -19,10 +19,12 @@ select:
- rockchip,rk3128-gmac
- rockchip,rk3228-gmac
- rockchip,rk3288-gmac
+ - rockchip,rk3308-gmac
- rockchip,rk3328-gmac
- rockchip,rk3366-gmac
- rockchip,rk3368-gmac
- rockchip,rk3399-gmac
+ - rockchip,rk3568-gmac
- rockchip,rv1108-gmac
required:
- compatible
@@ -32,17 +34,23 @@ allOf:
properties:
compatible:
- items:
- - enum:
- - rockchip,px30-gmac
- - rockchip,rk3128-gmac
- - rockchip,rk3228-gmac
- - rockchip,rk3288-gmac
- - rockchip,rk3328-gmac
- - rockchip,rk3366-gmac
- - rockchip,rk3368-gmac
- - rockchip,rk3399-gmac
- - rockchip,rv1108-gmac
+ oneOf:
+ - items:
+ - enum:
+ - rockchip,px30-gmac
+ - rockchip,rk3128-gmac
+ - rockchip,rk3228-gmac
+ - rockchip,rk3288-gmac
+ - rockchip,rk3308-gmac
+ - rockchip,rk3328-gmac
+ - rockchip,rk3366-gmac
+ - rockchip,rk3368-gmac
+ - rockchip,rk3399-gmac
+ - rockchip,rv1108-gmac
+ - items:
+ - enum:
+ - rockchip,rk3568-gmac
+ - const: snps,dwmac-4.20a
clocks:
minItems: 5
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 2edd8bea993e..56f2235f5fb5 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -51,11 +51,20 @@ properties:
- allwinner,sun8i-r40-emac
- allwinner,sun8i-v3s-emac
- allwinner,sun50i-a64-emac
+ - loongson,ls2k-dwmac
+ - loongson,ls7a-dwmac
- amlogic,meson6-dwmac
- amlogic,meson8b-dwmac
- amlogic,meson8m2-dwmac
- amlogic,meson-gxbb-dwmac
- amlogic,meson-axg-dwmac
+ - loongson,ls2k-dwmac
+ - loongson,ls7a-dwmac
+ - ingenic,jz4775-mac
+ - ingenic,x1000-mac
+ - ingenic,x1600-mac
+ - ingenic,x1830-mac
+ - ingenic,x2000-mac
- rockchip,px30-gmac
- rockchip,rk3128-gmac
- rockchip,rk3228-gmac
@@ -310,6 +319,11 @@ allOf:
- allwinner,sun8i-r40-emac
- allwinner,sun8i-v3s-emac
- allwinner,sun50i-a64-emac
+ - ingenic,jz4775-mac
+ - ingenic,x1000-mac
+ - ingenic,x1600-mac
+ - ingenic,x1830-mac
+ - ingenic,x2000-mac
- snps,dwxgmac
- snps,dwxgmac-2.10
- st,spear600-gmac
@@ -353,6 +367,13 @@ allOf:
- allwinner,sun8i-r40-emac
- allwinner,sun8i-v3s-emac
- allwinner,sun50i-a64-emac
+ - loongson,ls2k-dwmac
+ - loongson,ls7a-dwmac
+ - ingenic,jz4775-mac
+ - ingenic,x1000-mac
+ - ingenic,x1600-mac
+ - ingenic,x1830-mac
+ - ingenic,x2000-mac
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
diff --git a/Documentation/devicetree/bindings/regulator/max8893.yaml b/Documentation/devicetree/bindings/regulator/max8893.yaml
new file mode 100644
index 000000000000..2b5e977bf409
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max8893.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/max8893.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Regulator driver for MAX8893 PMIC from Maxim Integrated.
+
+maintainers:
+ - Sergey Larin <cerg2010cerg2010@mail.ru>
+
+description: |
+ The device has 5 LDO regulators and a single BUCK regulator.
+ Programming is done through I2C bus.
+
+properties:
+ compatible:
+ const: maxim,max8893
+
+ reg:
+ maxItems: 1
+
+ regulators:
+ type: object
+
+ patternProperties:
+ "^(ldo[1-5]|buck)$":
+ $ref: "regulator.yaml#"
+
+ additionalProperties: false
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - regulators
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@3e {
+ compatible = "maxim,max8893";
+ reg = <0x3e>;
+
+ regulators {
+ /* Front camera - s5k6aafx, back - m5mo */
+ /* Numbers used to indicate the sequence */
+ front_1_back_1: buck {
+ regulator-name = "cam_isp_core_1v2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ front_4_back_5: ldo1 {
+ regulator-name = "vt_io_1v8,cam_isp_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ front_3_back_4: ldo2 {
+ regulator-name = "vt_core_1v5";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ };
+
+ front_5_back_6: ldo3 {
+ regulator-name = "vt_cam_1v8,vt_sensor_io_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo4 {
+ /* not used */
+ };
+
+ back_7: ldo5 {
+ regulator-name = "cam_sensor_io_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml
new file mode 100644
index 000000000000..8cc413eb482d
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml
@@ -0,0 +1,385 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/mt6359-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MT6359 Regulator from MediaTek Integrated
+
+maintainers:
+ - Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>
+
+description: |
+ List of regulators provided by this controller. It is named
+ according to its regulator type, buck_<name> and ldo_<name>.
+ MT6359 regulators node should be sub node of the MT6397 MFD node.
+
+patternProperties:
+ "^buck_v(s1|gpu11|modem|pu|core|s2|pa|proc2|proc1|core_sshub)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(s1|gpu11|modem|pu|core|s2|pa|proc2|proc1|core_sshub)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_v(ibr|rf12|usb|camio|efuse|xo22)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(ibr|rf12|usb|camio|efuse|xo22)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_v(rfck|emc|a12|a09|ufs|bbck)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(rfck|emc|a12|a09|ufs|bbck)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_vcn(18|13|33_1_bt|13_1_wifi|33_2_bt|33_2_wifi)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^vcn(18|13|33_1_bt|13_1_wifi|33_2_bt|33_2_wifi)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_vsram_(proc2|others|md|proc1|others_sshub)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^vsram_(proc2|others|md|proc1|others_sshub)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_v(fe|bif|io)28$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(fe|bif|io)28$"
+
+ unevaluatedProperties: false
+
+ "^ldo_v(aud|io|aux|rf|m)18$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(aud|io|aux|rf|m)18$"
+
+ unevaluatedProperties: false
+
+ "^ldo_vsim[12]$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^vsim[12]$"
+
+ required:
+ - regulator-name
+
+ unevaluatedProperties: false
+
+additionalProperties: false
+
+examples:
+ - |
+ pmic {
+ regulators {
+ mt6359_vs1_buck_reg: buck_vs1 {
+ regulator-name = "vs1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2200000>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+ mt6359_vgpu11_buck_reg: buck_vgpu11 {
+ regulator-name = "vgpu11";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-ramp-delay = <5000>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vmodem_buck_reg: buck_vmodem {
+ regulator-name = "vmodem";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-ramp-delay = <10760>;
+ regulator-enable-ramp-delay = <200>;
+ };
+ mt6359_vpu_buck_reg: buck_vpu {
+ regulator-name = "vpu";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-ramp-delay = <5000>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vcore_buck_reg: buck_vcore {
+ regulator-name = "vcore";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <5000>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vs2_buck_reg: buck_vs2 {
+ regulator-name = "vs2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1600000>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+ mt6359_vpa_buck_reg: buck_vpa {
+ regulator-name = "vpa";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <3650000>;
+ regulator-enable-ramp-delay = <300>;
+ };
+ mt6359_vproc2_buck_reg: buck_vproc2 {
+ regulator-name = "vproc2";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-ramp-delay = <7500>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vproc1_buck_reg: buck_vproc1 {
+ regulator-name = "vproc1";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-ramp-delay = <7500>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vcore_sshub_buck_reg: buck_vcore_sshub {
+ regulator-name = "vcore_sshub";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ };
+ mt6359_vgpu11_sshub_buck_reg: buck_vgpu11_sshub {
+ regulator-name = "vgpu11_sshub";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ };
+ mt6359_vaud18_ldo_reg: ldo_vaud18 {
+ regulator-name = "vaud18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vsim1_ldo_reg: ldo_vsim1 {
+ regulator-name = "vsim1";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <3100000>;
+ };
+ mt6359_vibr_ldo_reg: ldo_vibr {
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ mt6359_vrf12_ldo_reg: ldo_vrf12 {
+ regulator-name = "vrf12";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ };
+ mt6359_vusb_ldo_reg: ldo_vusb {
+ regulator-name = "vusb";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <960>;
+ regulator-always-on;
+ };
+ mt6359_vsram_proc2_ldo_reg: ldo_vsram_proc2 {
+ regulator-name = "vsram_proc2";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <7500>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+ mt6359_vio18_ldo_reg: ldo_vio18 {
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-enable-ramp-delay = <960>;
+ regulator-always-on;
+ };
+ mt6359_vcamio_ldo_reg: ldo_vcamio {
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ };
+ mt6359_vcn18_ldo_reg: ldo_vcn18 {
+ regulator-name = "vcn18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vfe28_ldo_reg: ldo_vfe28 {
+ regulator-name = "vfe28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+ mt6359_vcn13_ldo_reg: ldo_vcn13 {
+ regulator-name = "vcn13";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1300000>;
+ };
+ mt6359_vcn33_1_bt_ldo_reg: ldo_vcn33_1_bt {
+ regulator-name = "vcn33_1_bt";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3500000>;
+ };
+ mt6359_vcn33_1_wifi_ldo_reg: ldo_vcn33_1_wifi {
+ regulator-name = "vcn33_1_wifi";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3500000>;
+ };
+ mt6359_vaux18_ldo_reg: ldo_vaux18 {
+ regulator-name = "vaux18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+ mt6359_vsram_others_ldo_reg: ldo_vsram_others {
+ regulator-name = "vsram_others";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <5000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vefuse_ldo_reg: ldo_vefuse {
+ regulator-name = "vefuse";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <2000000>;
+ };
+ mt6359_vxo22_ldo_reg: ldo_vxo22 {
+ regulator-name = "vxo22";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2200000>;
+ regulator-always-on;
+ };
+ mt6359_vrfck_ldo_reg: ldo_vrfck {
+ regulator-name = "vrfck";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1700000>;
+ };
+ mt6359_vrfck_1_ldo_reg: ldo_vrfck_1 {
+ regulator-name = "vrfck";
+ regulator-min-microvolt = <1240000>;
+ regulator-max-microvolt = <1600000>;
+ };
+ mt6359_vbif28_ldo_reg: ldo_vbif28 {
+ regulator-name = "vbif28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vio28_ldo_reg: ldo_vio28 {
+ regulator-name = "vio28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ mt6359_vemc_ldo_reg: ldo_vemc {
+ regulator-name = "vemc";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ mt6359_vemc_1_ldo_reg: ldo_vemc_1 {
+ regulator-name = "vemc";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ mt6359_vcn33_2_bt_ldo_reg: ldo_vcn33_2_bt {
+ regulator-name = "vcn33_2_bt";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3500000>;
+ };
+ mt6359_vcn33_2_wifi_ldo_reg: ldo_vcn33_2_wifi {
+ regulator-name = "vcn33_2_wifi";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3500000>;
+ };
+ mt6359_va12_ldo_reg: ldo_va12 {
+ regulator-name = "va12";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ };
+ mt6359_va09_ldo_reg: ldo_va09 {
+ regulator-name = "va09";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ mt6359_vrf18_ldo_reg: ldo_vrf18 {
+ regulator-name = "vrf18";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1810000>;
+ };
+ mt6359_vsram_md_ldo_reg: ldo_vsram_md {
+ regulator-name = "vsram_md";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <10760>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vufs_ldo_reg: ldo_vufs {
+ regulator-name = "vufs";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ };
+ mt6359_vm18_ldo_reg: ldo_vm18 {
+ regulator-name = "vm18";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-always-on;
+ };
+ mt6359_vbbck_ldo_reg: ldo_vbbck {
+ regulator-name = "vbbck";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ mt6359_vsram_proc1_ldo_reg: ldo_vsram_proc1 {
+ regulator-name = "vsram_proc1";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <7500>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+ mt6359_vsim2_ldo_reg: ldo_vsim2 {
+ regulator-name = "vsim2";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <3100000>;
+ };
+ mt6359_vsram_others_sshub_ldo: ldo_vsram_others_sshub {
+ regulator-name = "vsram_others_sshub";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
index e561a5b941e4..34de38377aa6 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
@@ -33,6 +33,9 @@ description: |
The names used for regulator nodes must match those supported by a given
PMIC. Supported regulator node names are
+ For PM6150, smps1 - smps5, ldo1 - ldo19
+ For PM6150L, smps1 - smps8, ldo1 - ldo11, bob
+ For PM7325, smps1 - smps8, ldo1 - ldo19
For PM8005, smps1 - smps4
For PM8009, smps1 - smps2, ldo1 - ldo7
For PM8150, smps1 - smps10, ldo1 - ldo18
@@ -41,15 +44,15 @@ description: |
For PM8350C, smps1 - smps10, ldo1 - ldo13, bob
For PM8998, smps1 - smps13, ldo1 - ldo28, lvs1 - lvs2
For PMI8998, bob
- For PM6150, smps1 - smps5, ldo1 - ldo19
- For PM6150L, smps1 - smps8, ldo1 - ldo11, bob
- For PMX55, smps1 - smps7, ldo1 - ldo16
- For PM7325, smps1 - smps8, ldo1 - ldo19
For PMR735A, smps1 - smps3, ldo1 - ldo7
+ For PMX55, smps1 - smps7, ldo1 - ldo16
properties:
compatible:
enum:
+ - qcom,pm6150-rpmh-regulators
+ - qcom,pm6150l-rpmh-regulators
+ - qcom,pm7325-rpmh-regulators
- qcom,pm8005-rpmh-regulators
- qcom,pm8009-rpmh-regulators
- qcom,pm8009-1-rpmh-regulators
@@ -59,11 +62,9 @@ properties:
- qcom,pm8350c-rpmh-regulators
- qcom,pm8998-rpmh-regulators
- qcom,pmi8998-rpmh-regulators
- - qcom,pm6150-rpmh-regulators
- - qcom,pm6150l-rpmh-regulators
- - qcom,pmx55-rpmh-regulators
- - qcom,pm7325-rpmh-regulators
+ - qcom,pmm8155au-rpmh-regulators
- qcom,pmr735a-rpmh-regulators
+ - qcom,pmx55-rpmh-regulators
qcom,pmic-id:
description: |
diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
index a35c6cb9bf97..83b53579f463 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
@@ -24,6 +24,10 @@ description:
For mp5496, s2
+ For pm8226, s1, s2, s3, s4, s5, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10,
+ l11, l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, l25,
+ l26, l27, l28, lvs1
+
For pm8841, s1, s2, s3, s4, s5, s6, s7, s8
For pm8916, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
@@ -68,6 +72,7 @@ properties:
compatible:
enum:
- qcom,rpm-mp5496-regulators
+ - qcom,rpm-pm8226-regulators
- qcom,rpm-pm8841-regulators
- qcom,rpm-pm8916-regulators
- qcom,rpm-pm8941-regulators
diff --git a/Documentation/devicetree/bindings/regulator/regulator.yaml b/Documentation/devicetree/bindings/regulator/regulator.yaml
index 6d0bc9cd4040..a6ae9ecae5cc 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/regulator.yaml
@@ -117,6 +117,88 @@ properties:
description: Enable over current protection.
type: boolean
+ regulator-oc-protection-microamp:
+ description: Set over current protection limit. This is a limit where
+ hardware performs emergency shutdown. Zero can be passed to disable
+ protection and value '1' indicates that protection should be enabled but
+ limit setting can be omitted.
+
+ regulator-oc-error-microamp:
+ description: Set over current error limit. This is a limit where part of
+ the hardware propably is malfunctional and damage prevention is requested.
+ Zero can be passed to disable error detection and value '1' indicates
+ that detection should be enabled but limit setting can be omitted.
+
+ regulator-oc-warn-microamp:
+ description: Set over current warning limit. This is a limit where hardware
+ is assumed still to be functional but approaching limit where it gets
+ damaged. Recovery actions should be initiated. Zero can be passed to
+ disable detection and value '1' indicates that detection should
+ be enabled but limit setting can be omitted.
+
+ regulator-ov-protection-microvolt:
+ description: Set over voltage protection limit. This is a limit where
+ hardware performs emergency shutdown. Zero can be passed to disable
+ protection and value '1' indicates that protection should be enabled but
+ limit setting can be omitted. Limit is given as microvolt offset from
+ voltage set to regulator.
+
+ regulator-ov-error-microvolt:
+ description: Set over voltage error limit. This is a limit where part of
+ the hardware propably is malfunctional and damage prevention is requested
+ Zero can be passed to disable error detection and value '1' indicates
+ that detection should be enabled but limit setting can be omitted. Limit
+ is given as microvolt offset from voltage set to regulator.
+
+ regulator-ov-warn-microvolt:
+ description: Set over voltage warning limit. This is a limit where hardware
+ is assumed still to be functional but approaching limit where it gets
+ damaged. Recovery actions should be initiated. Zero can be passed to
+ disable detection and value '1' indicates that detection should
+ be enabled but limit setting can be omitted. Limit is given as microvolt
+ offset from voltage set to regulator.
+
+ regulator-uv-protection-microvolt:
+ description: Set over under voltage protection limit. This is a limit where
+ hardware performs emergency shutdown. Zero can be passed to disable
+ protection and value '1' indicates that protection should be enabled but
+ limit setting can be omitted. Limit is given as microvolt offset from
+ voltage set to regulator.
+
+ regulator-uv-error-microvolt:
+ description: Set under voltage error limit. This is a limit where part of
+ the hardware propably is malfunctional and damage prevention is requested
+ Zero can be passed to disable error detection and value '1' indicates
+ that detection should be enabled but limit setting can be omitted. Limit
+ is given as microvolt offset from voltage set to regulator.
+
+ regulator-uv-warn-microvolt:
+ description: Set over under voltage warning limit. This is a limit where
+ hardware is assumed still to be functional but approaching limit where
+ it gets damaged. Recovery actions should be initiated. Zero can be passed
+ to disable detection and value '1' indicates that detection should
+ be enabled but limit setting can be omitted. Limit is given as microvolt
+ offset from voltage set to regulator.
+
+ regulator-temp-protection-kelvin:
+ description: Set over temperature protection limit. This is a limit where
+ hardware performs emergency shutdown. Zero can be passed to disable
+ protection and value '1' indicates that protection should be enabled but
+ limit setting can be omitted.
+
+ regulator-temp-error-kelvin:
+ description: Set over temperature error limit. This is a limit where part of
+ the hardware propably is malfunctional and damage prevention is requested
+ Zero can be passed to disable error detection and value '1' indicates
+ that detection should be enabled but limit setting can be omitted.
+
+ regulator-temp-warn-kelvin:
+ description: Set over temperature warning limit. This is a limit where
+ hardware is assumed still to be functional but approaching limit where it
+ gets damaged. Recovery actions should be initiated. Zero can be passed to
+ disable detection and value '1' indicates that detection should
+ be enabled but limit setting can be omitted.
+
regulator-active-discharge:
description: |
tristate, enable/disable active discharge of regulators. The values are:
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rt6160-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rt6160-regulator.yaml
new file mode 100644
index 000000000000..0534b0d68359
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/richtek,rt6160-regulator.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rt6160-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT6160 BuckBoost converter
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+ The RT6160 is a high-efficiency buck-boost converter that can provide
+ up to 3A output current from 2025mV to 5200mV. And it support the wide
+ input voltage range from 2200mV to 5500mV.
+
+ Datasheet is available at
+ https://www.richtek.com/assets/product_file/RT6160A/DS6160A-00.pdf
+
+allOf:
+ - $ref: regulator.yaml#
+
+properties:
+ compatible:
+ enum:
+ - richtek,rt6160
+
+ reg:
+ maxItems: 1
+
+ enable-gpios:
+ description: A connection of the 'enable' gpio line.
+ maxItems: 1
+
+ richtek,vsel-active-low:
+ description: |
+ Used to indicate the 'vsel' pin active level. if not specified, use
+ high active level as the default.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rt6160@75 {
+ compatible = "richtek,rt6160";
+ reg = <0x75>;
+ enable-gpios = <&gpio26 2 0>;
+ regulator-name = "rt6160-buckboost";
+ regulator-min-microvolt = <2025000>;
+ regulator-max-microvolt = <5200000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rt6245-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rt6245-regulator.yaml
new file mode 100644
index 000000000000..796ceac87445
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/richtek,rt6245-regulator.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rt6245-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT6245 High Current Voltage Regulator
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+ The RT6245 is a high-performance, synchronous step-down converter
+ that can deliver up to 14A output current with an input supply
+ voltage range of 4.5V to 17V.
+
+allOf:
+ - $ref: regulator.yaml#
+
+properties:
+ compatible:
+ enum:
+ - richtek,rt6245
+
+ reg:
+ maxItems: 1
+
+ enable-gpios:
+ description: |
+ A connection of the chip 'enable' gpio line. If not provided,
+ it will be treat as a default-on power.
+ maxItems: 1
+
+ richtek,oc-level-select:
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2, 3]
+ description: |
+ Over current level selection. Each respective value means the current
+ limit 8A, 14A, 12A, 10A. If this property is missing then keep in
+ in chip default.
+
+ richtek,ot-level-select:
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2]
+ description: |
+ Over temperature level selection. Each respective value means the degree
+ 150'c, 130'c, 170'c. If this property is missing then keep in chip
+ default.
+
+ richtek,pgdly-time-select:
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2, 3]
+ description: |
+ Power good signal delay time selection. Each respective value means the
+ delay time 0us, 10us, 20us, 40us. If this property is missing then keep
+ in chip default.
+
+
+ richtek,switch-freq-select:
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2]
+ description: |
+ Buck switch frequency selection. Each respective value means 400KHz,
+ 800KHz, 1200KHz. If this property is missing then keep in chip default.
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rt6245@34 {
+ compatible = "richtek,rt6245";
+ status = "okay";
+ reg = <0x34>;
+ enable-gpios = <&gpio26 2 0>;
+
+ regulator-name = "rt6245-regulator";
+ regulator-min-microvolt = <437500>;
+ regulator-max-microvolt = <1387500>;
+ regulator-boot-on;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml b/Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml
index b6515a0cee62..7cb74cc8c5d9 100644
--- a/Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml
@@ -27,6 +27,12 @@ patternProperties:
Properties for single regulator.
$ref: "regulator.yaml#"
+ properties:
+ rohm,ocw-fet-ron-micro-ohms:
+ description: |
+ External FET's ON-resistance. Required if VoutS1 OCP/OCW is
+ to be set.
+
required:
- regulator-name
diff --git a/Documentation/devicetree/bindings/soc/microchip/microchip,polarfire-soc-sys-controller.yaml b/Documentation/devicetree/bindings/soc/microchip/microchip,polarfire-soc-sys-controller.yaml
new file mode 100644
index 000000000000..2cd3bc6bd8d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/microchip/microchip,polarfire-soc-sys-controller.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/microchip/microchip,polarfire-soc-sys-controller.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Microchip PolarFire SoC (MPFS) MSS (microprocessor subsystem) system controller
+
+maintainers:
+ - Conor Dooley <conor.dooley@microchip.com>
+
+description: |
+ The PolarFire SoC system controller is communicated with via a mailbox.
+ This document describes the bindings for the client portion of that mailbox.
+
+
+properties:
+ mboxes:
+ maxItems: 1
+
+ compatible:
+ const: microchip,polarfire-soc-sys-controller
+
+required:
+ - compatible
+ - mboxes
+
+additionalProperties: false
+
+examples:
+ - |
+ syscontroller: syscontroller {
+ compatible = "microchip,polarfire-soc-sys-controller";
+ mboxes = <&mbox 0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
index db61f0731a20..2e35aeaa8781 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
@@ -57,7 +57,7 @@ patternProperties:
rate
sound-dai:
- $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: phandle of the CPU DAI
patternProperties:
@@ -71,7 +71,7 @@ patternProperties:
properties:
sound-dai:
- $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: phandle of the codec DAI
required:
diff --git a/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt b/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt
deleted file mode 100644
index fb1a6728638d..000000000000
--- a/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Renesas RZ/N1 SPI Controller
-
-This controller is based on the Synopsys DW Synchronous Serial Interface and
-inherits all properties defined in snps,dw-apb-ssi.txt except for the
-compatible property.
-
-Required properties:
-- compatible : The device specific string followed by the generic RZ/N1 string.
- Therefore it must be one of:
- "renesas,r9a06g032-spi", "renesas,rzn1-spi"
- "renesas,r9a06g033-spi", "renesas,rzn1-spi"
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
index 4825157cd92e..ca91201a9926 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
@@ -67,6 +67,12 @@ properties:
const: baikal,bt1-sys-ssi
- description: Canaan Kendryte K210 SoS SPI Controller
const: canaan,k210-spi
+ - description: Renesas RZ/N1 SPI Controller
+ items:
+ - enum:
+ - renesas,r9a06g032-spi # RZ/N1D
+ - renesas,r9a06g033-spi # RZ/N1S
+ - const: renesas,rzn1-spi # RZ/N1
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.txt b/Documentation/devicetree/bindings/spi/spi-cadence.txt
deleted file mode 100644
index 05a2ef945664..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-cadence.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Cadence SPI controller Device Tree Bindings
--------------------------------------------
-
-Required properties:
-- compatible : Should be "cdns,spi-r1p6" or "xlnx,zynq-spi-r1p6".
-- reg : Physical base address and size of SPI registers map.
-- interrupts : Property with a value describing the interrupt
- number.
-- clock-names : List of input clock names - "ref_clk", "pclk"
- (See clock bindings for details).
-- clocks : Clock phandles (see clock bindings for details).
-
-Optional properties:
-- num-cs : Number of chip selects used.
- If a decoder is used, this will be the number of
- chip selects after the decoder.
-- is-decoded-cs : Flag to indicate whether decoder is used or not.
-
-Example:
-
- spi@e0007000 {
- compatible = "xlnx,zynq-spi-r1p6";
- clock-names = "ref_clk", "pclk";
- clocks = <&clkc 26>, <&clkc 35>;
- interrupt-parent = <&intc>;
- interrupts = <0 49 4>;
- num-cs = <4>;
- is-decoded-cs = <0>;
- reg = <0xe0007000 0x1000>;
- } ;
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
new file mode 100644
index 000000000000..9787be21318e
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/spi-cadence.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence SPI controller Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - cdns,spi-r1p6
+ - xlnx,zynq-spi-r1p6
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref_clk
+ - const: pclk
+
+ clocks:
+ maxItems: 2
+
+ num-cs:
+ description: |
+ Number of chip selects used. If a decoder is used,
+ this will be the number of chip selects after the
+ decoder.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 4
+ default: 4
+
+ is-decoded-cs:
+ description: |
+ Flag to indicate whether decoder is used or not.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ default: 0
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi@e0007000 {
+ compatible = "xlnx,zynq-spi-r1p6";
+ clock-names = "ref_clk", "pclk";
+ clocks = <&clkc 26>, <&clkc 35>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 49 4>;
+ num-cs = <4>;
+ is-decoded-cs = <0>;
+ reg = <0xe0007000 0x1000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index 0477396e4945..faef4f6f55b8 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -114,8 +114,11 @@ patternProperties:
Compatible of the SPI device.
reg:
- minimum: 0
- maximum: 256
+ minItems: 1
+ maxItems: 256
+ items:
+ minimum: 0
+ maximum: 256
description:
Chip select used by the device.
diff --git a/Documentation/devicetree/bindings/spi/spi-mux.yaml b/Documentation/devicetree/bindings/spi/spi-mux.yaml
index d09c6355e22d..51c7622dc20b 100644
--- a/Documentation/devicetree/bindings/spi/spi-mux.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-mux.yaml
@@ -72,7 +72,7 @@ examples:
mux-controls = <&mux>;
- spi-flash@0 {
+ flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <40000000>;
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index 1e6cf29e6388..7f987e79337c 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -33,6 +33,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
+ - rockchip,rv1126-spi
- const: rockchip,rk3066-spi
reg:
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
deleted file mode 100644
index 5f4ed3e5c994..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Xilinx SPI controller Device Tree Bindings
--------------------------------------------------
-
-Required properties:
-- compatible : Should be "xlnx,xps-spi-2.00.a", "xlnx,xps-spi-2.00.b" or "xlnx,axi-quad-spi-1.00.a"
-- reg : Physical base address and size of SPI registers map.
-- interrupts : Property with a value describing the interrupt
- number.
-
-Optional properties:
-- xlnx,num-ss-bits : Number of chip selects used.
-- xlnx,num-transfer-bits : Number of bits per transfer. This will be 8 if not specified
-
-Example:
- axi_quad_spi@41e00000 {
- compatible = "xlnx,xps-spi-2.00.a";
- interrupt-parent = <&intc>;
- interrupts = <0 31 1>;
- reg = <0x41e00000 0x10000>;
- xlnx,num-ss-bits = <0x1>;
- xlnx,num-transfer-bits = <32>;
- };
-
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.yaml b/Documentation/devicetree/bindings/spi/spi-xilinx.yaml
new file mode 100644
index 000000000000..593f7693bace
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/spi-xilinx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx SPI controller Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - xlnx,xps-spi-2.00.a
+ - xlnx,xps-spi-2.00.b
+ - xlnx,axi-quad-spi-1.00.a
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ xlnx,num-ss-bits:
+ description: Number of chip selects used.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+
+ xlnx,num-transfer-bits:
+ description: Number of bits per transfer. This will be 8 if not specified.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [8, 16, 32]
+ default: 8
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi0: spi@41e00000 {
+ compatible = "xlnx,xps-spi-2.00.a";
+ interrupt-parent = <&intc>;
+ interrupts = <0 31 1>;
+ reg = <0x41e00000 0x10000>;
+ xlnx,num-ss-bits = <0x1>;
+ xlnx,num-transfer-bits = <32>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
deleted file mode 100644
index 0f6d37ff541c..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
--------------------------------------------------------------------
-
-Required properties:
-- compatible : Should be "xlnx,zynqmp-qspi-1.0".
-- reg : Physical base address and size of GQSPI registers map.
-- interrupts : Property with a value describing the interrupt
- number.
-- clock-names : List of input clock names - "ref_clk", "pclk"
- (See clock bindings for details).
-- clocks : Clock phandles (see clock bindings for details).
-
-Optional properties:
-- num-cs : Number of chip selects used.
-
-Example:
- qspi: spi@ff0f0000 {
- compatible = "xlnx,zynqmp-qspi-1.0";
- clock-names = "ref_clk", "pclk";
- clocks = <&misc_clk &misc_clk>;
- interrupts = <0 15 4>;
- interrupt-parent = <&gic>;
- num-cs = <1>;
- reg = <0x0 0xff0f0000 0x1000>,<0x0 0xc0000000 0x8000000>;
- };
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
new file mode 100644
index 000000000000..ea72c8001256
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/spi-zynqmp-qspi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ const: xlnx,zynqmp-qspi-1.0
+
+ reg:
+ maxItems: 2
+
+ interrupts:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref_clk
+ - const: pclk
+
+ clocks:
+ maxItems: 2
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ qspi: spi@ff0f0000 {
+ compatible = "xlnx,zynqmp-qspi-1.0";
+ clocks = <&zynqmp_clk QSPI_REF>, <&zynqmp_clk LPD_LSBUS>;
+ clock-names = "ref_clk", "pclk";
+ interrupts = <0 15 4>;
+ interrupt-parent = <&gic>;
+ reg = <0x0 0xff0f0000 0x0 0x1000>,
+ <0x0 0xc0000000 0x0 0x8000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/submitting-patches.rst b/Documentation/devicetree/bindings/submitting-patches.rst
index 104fa8fb2c17..8087780f1685 100644
--- a/Documentation/devicetree/bindings/submitting-patches.rst
+++ b/Documentation/devicetree/bindings/submitting-patches.rst
@@ -7,8 +7,8 @@ Submitting Devicetree (DT) binding patches
I. For patch submitters
=======================
- 0) Normal patch submission rules from Documentation/process/submitting-patches.rst
- applies.
+ 0) Normal patch submission rules from
+ Documentation/process/submitting-patches.rst applies.
1) The Documentation/ and include/dt-bindings/ portion of the patch should
be a separate patch. The preferred subject prefix for binding patches is::
@@ -25,8 +25,8 @@ I. For patch submitters
make dt_binding_check
- See Documentation/devicetree/bindings/writing-schema.rst for more details about
- schema and tools setup.
+ See Documentation/devicetree/bindings/writing-schema.rst for more details
+ about schema and tools setup.
3) DT binding files should be dual licensed. The preferred license tag is
(GPL-2.0-only OR BSD-2-Clause).
@@ -84,7 +84,8 @@ II. For kernel maintainers
III. Notes
==========
- 0) Please see :doc:`ABI` for details regarding devicetree ABI.
+ 0) Please see Documentation/devicetree/bindings/ABI.rst for details
+ regarding devicetree ABI.
1) This document is intended as a general familiarization with the process as
decided at the 2013 Kernel Summit. When in doubt, the current word of the
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 8341e9d23c1e..37ac0a3ae3b4 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -73,6 +73,8 @@ properties:
- dallas,ds4510
# Digital Thermometer and Thermostat
- dallas,ds75
+ # Delta Electronics DPS920AB 920W 54V Power Supply
+ - delta,dps920ab
# 1/4 Brick DC/DC Regulated Power Module
- delta,q54sj108a2
# Devantech SRF02 ultrasonic ranger in I2C mode
@@ -103,6 +105,8 @@ properties:
- fsl,mpl3115
# MPR121: Proximity Capacitive Touch Sensor Controller
- fsl,mpr121
+ # Monolithic Power Systems Inc. multi-phase controller mp2888
+ - mps,mp2888
# Monolithic Power Systems Inc. multi-phase controller mp2975
- mps,mp2975
# G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
diff --git a/Documentation/doc-guide/contributing.rst b/Documentation/doc-guide/contributing.rst
index 67ee3691f91f..207fd93d7c80 100644
--- a/Documentation/doc-guide/contributing.rst
+++ b/Documentation/doc-guide/contributing.rst
@@ -237,10 +237,10 @@ We have been trying to improve the situation through the creation of
a set of "books" that group documentation for specific readers. These
include:
- - :doc:`../admin-guide/index`
- - :doc:`../core-api/index`
- - :doc:`../driver-api/index`
- - :doc:`../userspace-api/index`
+ - Documentation/admin-guide/index.rst
+ - Documentation/core-api/index.rst
+ - Documentation/driver-api/index.rst
+ - Documentation/userspace-api/index.rst
As well as this book on documentation itself.
diff --git a/Documentation/driver-api/acpi/linuxized-acpica.rst b/Documentation/driver-api/acpi/linuxized-acpica.rst
index 6bee03383225..cc234353d2c4 100644
--- a/Documentation/driver-api/acpi/linuxized-acpica.rst
+++ b/Documentation/driver-api/acpi/linuxized-acpica.rst
@@ -276,4 +276,4 @@ before they become available from the ACPICA release process.
# git clone https://github.com/acpica/acpica
# git clone https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
# cd acpica
- # generate/linux/divergences.sh -s ../linux
+ # generate/linux/divergence.sh -s ../linux
diff --git a/Documentation/driver-api/gpio/using-gpio.rst b/Documentation/driver-api/gpio/using-gpio.rst
index dda069444032..64c8d3f76c3a 100644
--- a/Documentation/driver-api/gpio/using-gpio.rst
+++ b/Documentation/driver-api/gpio/using-gpio.rst
@@ -9,13 +9,13 @@ with them.
For examples of already existing generic drivers that will also be good
examples for any other kernel drivers you want to author, refer to
-:doc:`drivers-on-gpio`
+Documentation/driver-api/gpio/drivers-on-gpio.rst
For any kind of mass produced system you want to support, such as servers,
laptops, phones, tablets, routers, and any consumer or office or business goods
using appropriate kernel drivers is paramount. Submit your code for inclusion
in the upstream Linux kernel when you feel it is mature enough and you will get
-help to refine it, see :doc:`../../process/submitting-patches`.
+help to refine it, see Documentation/process/submitting-patches.rst.
In Linux GPIO lines also have a userspace ABI.
diff --git a/Documentation/driver-api/ioctl.rst b/Documentation/driver-api/ioctl.rst
index c455db0e1627..35795f6a151a 100644
--- a/Documentation/driver-api/ioctl.rst
+++ b/Documentation/driver-api/ioctl.rst
@@ -25,16 +25,16 @@ ioctl commands that follow modern conventions: ``_IO``, ``_IOR``,
with the correct parameters:
_IO/_IOR/_IOW/_IOWR
- The macro name specifies how the argument will be used.  It may be a
+ The macro name specifies how the argument will be used. It may be a
pointer to data to be passed into the kernel (_IOW), out of the kernel
- (_IOR), or both (_IOWR).  _IO can indicate either commands with no
+ (_IOR), or both (_IOWR). _IO can indicate either commands with no
argument or those passing an integer value instead of a pointer.
It is recommended to only use _IO for commands without arguments,
and use pointers for passing data.
type
An 8-bit number, often a character literal, specific to a subsystem
- or driver, and listed in :doc:`../userspace-api/ioctl/ioctl-number`
+ or driver, and listed in Documentation/userspace-api/ioctl/ioctl-number.rst
nr
An 8-bit number identifying the specific command, unique for a give
@@ -200,10 +200,10 @@ cause an information leak, which can be used to defeat kernel address
space layout randomization (KASLR), helping in an attack.
For this reason (and for compat support) it is best to avoid any
-implicit padding in data structures.  Where there is implicit padding
+implicit padding in data structures. Where there is implicit padding
in an existing structure, kernel drivers must be careful to fully
initialize an instance of the structure before copying it to user
-space.  This is usually done by calling memset() before assigning to
+space. This is usually done by calling memset() before assigning to
individual members.
Subsystem abstractions
diff --git a/Documentation/driver-api/media/drivers/bttv-devel.rst b/Documentation/driver-api/media/drivers/bttv-devel.rst
index c9aa8b95a5e5..0885a04563a9 100644
--- a/Documentation/driver-api/media/drivers/bttv-devel.rst
+++ b/Documentation/driver-api/media/drivers/bttv-devel.rst
@@ -21,7 +21,7 @@ log, telling which card type is used. Like this one::
You should verify this is correct. If it isn't, you have to pass the
correct board type as insmod argument, ``insmod bttv card=2`` for
-example. The file :doc:`/admin-guide/media/bttv-cardlist` has a list
+example. The file Documentation/admin-guide/media/bttv-cardlist.rst has a list
of valid arguments for card.
If your card isn't listed there, you might check the source code for
diff --git a/Documentation/driver-api/media/drivers/ccs/ccs-regs.asc b/Documentation/driver-api/media/drivers/ccs/ccs-regs.asc
index f2042acc8a45..bbf9213c3388 100644
--- a/Documentation/driver-api/media/drivers/ccs/ccs-regs.asc
+++ b/Documentation/driver-api/media/drivers/ccs/ccs-regs.asc
@@ -210,7 +210,7 @@ pll_multiplier 0x0306 16
op_pix_clk_div 0x0308 16
op_sys_clk_div 0x030a 16
op_pre_pll_clk_div 0x030c 16
-op_pll_multiplier 0x031e 16
+op_pll_multiplier 0x030e 16
pll_mode 0x0310 8
- f 0 0
- e single 0
diff --git a/Documentation/driver-api/media/drivers/ccs/mk-ccs-regs b/Documentation/driver-api/media/drivers/ccs/mk-ccs-regs
index 6668deaf2f19..2a4edc7e051a 100755
--- a/Documentation/driver-api/media/drivers/ccs/mk-ccs-regs
+++ b/Documentation/driver-api/media/drivers/ccs/mk-ccs-regs
@@ -72,13 +72,14 @@ $uc_header =~ s/[^A-Z0-9]/_/g;
my $copyright = "/* Copyright (C) 2019--2020 Intel Corporation */\n";
my $license = "SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause";
+my $note = "/*\n * Generated by $0;\n * do not modify.\n */\n";
for my $fh ($A, $LC) {
- print $fh "// $license\n$copyright\n" if defined $fh;
+ print $fh "// $license\n$copyright$note\n" if defined $fh;
}
for my $fh ($H, $LH) {
- print $fh "/* $license */\n$copyright\n";
+ print $fh "/* $license */\n$copyright$note\n";
}
sub bit_def($) {
diff --git a/Documentation/driver-api/media/drivers/zoran.rst b/Documentation/driver-api/media/drivers/zoran.rst
index 83cbae9cedef..b205e10c3154 100644
--- a/Documentation/driver-api/media/drivers/zoran.rst
+++ b/Documentation/driver-api/media/drivers/zoran.rst
@@ -319,7 +319,7 @@ Conexant bt866 TV encoder
~~~~~~~~~~~~~~~~~~~~~~~~~
- is used in AVS6EYES, and
-- can generate: NTSC/PAL, PAL­M, PAL­N
+- can generate: NTSC/PAL, PAL-M, PAL-N
The adv717x, should be able to produce PAL N. But you find nothing PAL N
specific in the registers. Seem that you have to reuse a other standard
diff --git a/Documentation/driver-api/media/index.rst b/Documentation/driver-api/media/index.rst
index 2ad71dfa8828..813d7db59da7 100644
--- a/Documentation/driver-api/media/index.rst
+++ b/Documentation/driver-api/media/index.rst
@@ -11,11 +11,13 @@ its supported drivers.
Please see:
-- :doc:`/admin-guide/media/index`
- for usage information about media subsystem and supported drivers;
+Documentation/admin-guide/media/index.rst
-- :doc:`/userspace-api/media/index`
- for the userspace APIs used on media devices.
+ - for usage information about media subsystem and supported drivers;
+
+Documentation/userspace-api/media/index.rst
+
+ - for the userspace APIs used on media devices.
.. only:: html
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index 6b3bfd29fd84..d448cb57df86 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -217,7 +217,7 @@ system-wide transition to a sleep state even though its :c:member:`runtime_auto`
flag is clear.
For more information about the runtime power management framework, refer to
-:file:`Documentation/power/runtime_pm.rst`.
+Documentation/power/runtime_pm.rst.
Calling Drivers to Enter and Leave System Sleep States
@@ -655,7 +655,7 @@ been thawed. Generally speaking, the PM notifiers are suitable for performing
actions that either require user space to be available, or at least won't
interfere with user space.
-For details refer to :doc:`notifiers`.
+For details refer to Documentation/driver-api/pm/notifiers.rst.
Device Low-Power (suspend) States
@@ -726,7 +726,7 @@ it into account in any way.
Devices may be defined as IRQ-safe which indicates to the PM core that their
runtime PM callbacks may be invoked with disabled interrupts (see
-:file:`Documentation/power/runtime_pm.rst` for more information). If an
+Documentation/power/runtime_pm.rst for more information). If an
IRQ-safe device belongs to a PM domain, the runtime PM of the domain will be
disallowed, unless the domain itself is defined as IRQ-safe. However, it
makes sense to define a PM domain as IRQ-safe only if all the devices in it
@@ -805,7 +805,7 @@ The ``DPM_FLAG_MAY_SKIP_RESUME`` Driver Flag
--------------------------------------------
During system-wide resume from a sleep state it's easiest to put devices into
-the full-power state, as explained in :file:`Documentation/power/runtime_pm.rst`.
+the full-power state, as explained in Documentation/power/runtime_pm.rst.
[Refer to that document for more information regarding this particular issue as
well as for information on the device runtime power management framework in
general.] However, it often is desirable to leave devices in suspend after
diff --git a/Documentation/driver-api/surface_aggregator/clients/cdev.rst b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
index 248c1372d879..0134a841a079 100644
--- a/Documentation/driver-api/surface_aggregator/clients/cdev.rst
+++ b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
@@ -1,9 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0+
-.. |u8| replace:: :c:type:`u8 <u8>`
-.. |u16| replace:: :c:type:`u16 <u16>`
.. |ssam_cdev_request| replace:: :c:type:`struct ssam_cdev_request <ssam_cdev_request>`
.. |ssam_cdev_request_flags| replace:: :c:type:`enum ssam_cdev_request_flags <ssam_cdev_request_flags>`
+.. |ssam_cdev_event| replace:: :c:type:`struct ssam_cdev_event <ssam_cdev_event>`
==============================
User-Space EC Interface (cdev)
@@ -23,6 +22,40 @@ These IOCTLs and their respective input/output parameter structs are defined in
A small python library and scripts for accessing this interface can be found
at https://github.com/linux-surface/surface-aggregator-module/tree/master/scripts/ssam.
+.. contents::
+
+
+Receiving Events
+================
+
+Events can be received by reading from the device-file. The are represented by
+the |ssam_cdev_event| datatype.
+
+Before events are available to be read, however, the desired notifiers must be
+registered via the ``SSAM_CDEV_NOTIF_REGISTER`` IOCTL. Notifiers are, in
+essence, callbacks, called when the EC sends an event. They are, in this
+interface, associated with a specific target category and device-file-instance.
+They forward any event of this category to the buffer of the corresponding
+instance, from which it can then be read.
+
+Notifiers themselves do not enable events on the EC. Thus, it may additionally
+be necessary to enable events via the ``SSAM_CDEV_EVENT_ENABLE`` IOCTL. While
+notifiers work per-client (i.e. per-device-file-instance), events are enabled
+globally, for the EC and all of its clients (regardless of userspace or
+non-userspace). The ``SSAM_CDEV_EVENT_ENABLE`` and ``SSAM_CDEV_EVENT_DISABLE``
+IOCTLs take care of reference counting the events, such that an event is
+enabled as long as there is a client that has requested it.
+
+Note that enabled events are not automatically disabled once the client
+instance is closed. Therefore any client process (or group of processes) should
+balance their event enable calls with the corresponding event disable calls. It
+is, however, perfectly valid to enable and disable events on different client
+instances. For example, it is valid to set up notifiers and read events on
+client instance ``A``, enable those events on instance ``B`` (note that these
+will also be received by A since events are enabled/disabled globally), and
+after no more events are desired, disable the previously enabled events via
+instance ``C``.
+
Controller IOCTLs
=================
@@ -45,9 +78,33 @@ The following IOCTLs are provided:
- ``REQUEST``
- Perform synchronous SAM request.
+ * - ``0xA5``
+ - ``2``
+ - ``W``
+ - ``NOTIF_REGISTER``
+ - Register event notifier.
-``REQUEST``
------------
+ * - ``0xA5``
+ - ``3``
+ - ``W``
+ - ``NOTIF_UNREGISTER``
+ - Unregister event notifier.
+
+ * - ``0xA5``
+ - ``4``
+ - ``W``
+ - ``EVENT_ENABLE``
+ - Enable event source.
+
+ * - ``0xA5``
+ - ``5``
+ - ``W``
+ - ``EVENT_DISABLE``
+ - Disable event source.
+
+
+``SSAM_CDEV_REQUEST``
+---------------------
Defined as ``_IOWR(0xA5, 1, struct ssam_cdev_request)``.
@@ -82,6 +139,66 @@ submitted, and completed (i.e. handed back to user-space) successfully from
inside the IOCTL, but the request ``status`` member may still be negative in
case the actual execution of the request failed after it has been submitted.
-A full definition of the argument struct is provided below:
+A full definition of the argument struct is provided below.
+
+``SSAM_CDEV_NOTIF_REGISTER``
+----------------------------
+
+Defined as ``_IOW(0xA5, 2, struct ssam_cdev_notifier_desc)``.
+
+Register a notifier for the event target category specified in the given
+notifier description with the specified priority. Notifiers registration is
+required to receive events, but does not enable events themselves. After a
+notifier for a specific target category has been registered, all events of that
+category will be forwarded to the userspace client and can then be read from
+the device file instance. Note that events may have to be enabled, e.g. via the
+``SSAM_CDEV_EVENT_ENABLE`` IOCTL, before the EC will send them.
+
+Only one notifier can be registered per target category and client instance. If
+a notifier has already been registered, this IOCTL will fail with ``-EEXIST``.
+
+Notifiers will automatically be removed when the device file instance is
+closed.
+
+``SSAM_CDEV_NOTIF_UNREGISTER``
+------------------------------
+
+Defined as ``_IOW(0xA5, 3, struct ssam_cdev_notifier_desc)``.
+
+Unregisters the notifier associated with the specified target category. The
+priority field will be ignored by this IOCTL. If no notifier has been
+registered for this client instance and the given category, this IOCTL will
+fail with ``-ENOENT``.
+
+``SSAM_CDEV_EVENT_ENABLE``
+--------------------------
+
+Defined as ``_IOW(0xA5, 4, struct ssam_cdev_event_desc)``.
+
+Enable the event associated with the given event descriptor.
+
+Note that this call will not register a notifier itself, it will only enable
+events on the controller. If you want to receive events by reading from the
+device file, you will need to register the corresponding notifier(s) on that
+instance.
+
+Events are not automatically disabled when the device file is closed. This must
+be done manually, via a call to the ``SSAM_CDEV_EVENT_DISABLE`` IOCTL.
+
+``SSAM_CDEV_EVENT_DISABLE``
+---------------------------
+
+Defined as ``_IOW(0xA5, 5, struct ssam_cdev_event_desc)``.
+
+Disable the event associated with the given event descriptor.
+
+Note that this will not unregister any notifiers. Events may still be received
+and forwarded to user-space after this call. The only safe way of stopping
+events from being received is unregistering all previously registered
+notifiers.
+
+
+Structures and Enums
+====================
.. kernel-doc:: include/uapi/linux/surface_aggregator/cdev.h
diff --git a/Documentation/driver-api/surface_aggregator/clients/index.rst b/Documentation/driver-api/surface_aggregator/clients/index.rst
index 98ea9946b8a2..30160513afa5 100644
--- a/Documentation/driver-api/surface_aggregator/clients/index.rst
+++ b/Documentation/driver-api/surface_aggregator/clients/index.rst
@@ -5,7 +5,8 @@ Client Driver Documentation
===========================
This is the documentation for client drivers themselves. Refer to
-:doc:`../client` for documentation on how to write client drivers.
+Documentation/driver-api/surface_aggregator/client.rst for documentation
+on how to write client drivers.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/driver-api/surface_aggregator/internal.rst b/Documentation/driver-api/surface_aggregator/internal.rst
index 72704734982a..8c7c80c9f418 100644
--- a/Documentation/driver-api/surface_aggregator/internal.rst
+++ b/Documentation/driver-api/surface_aggregator/internal.rst
@@ -87,10 +87,11 @@ native SSAM devices, i.e. devices that are not defined in ACPI and not
implemented as platform devices, via |ssam_device| and |ssam_device_driver|
simplify management of client devices and client drivers.
-Refer to :doc:`client` for documentation regarding the client device/driver
-API and interface options for other kernel drivers. It is recommended to
-familiarize oneself with that chapter and the :doc:`ssh` before continuing
-with the architectural overview below.
+Refer to Documentation/driver-api/surface_aggregator/client.rst for
+documentation regarding the client device/driver API and interface options
+for other kernel drivers. It is recommended to familiarize oneself with
+that chapter and the Documentation/driver-api/surface_aggregator/ssh.rst
+before continuing with the architectural overview below.
Packet Transport Layer
@@ -190,9 +191,9 @@ with success on the transmitter thread.
Transmission of sequenced packets is limited by the number of concurrently
pending packets, i.e. a limit on how many packets may be waiting for an ACK
-from the EC in parallel. This limit is currently set to one (see :doc:`ssh`
-for the reasoning behind this). Control packets (i.e. ACK and NAK) can
-always be transmitted.
+from the EC in parallel. This limit is currently set to one (see
+Documentation/driver-api/surface_aggregator/ssh.rst for the reasoning behind
+this). Control packets (i.e. ACK and NAK) can always be transmitted.
Receiver Thread
---------------
diff --git a/Documentation/driver-api/surface_aggregator/overview.rst b/Documentation/driver-api/surface_aggregator/overview.rst
index 1e9d57e50063..26415e1ab7da 100644
--- a/Documentation/driver-api/surface_aggregator/overview.rst
+++ b/Documentation/driver-api/surface_aggregator/overview.rst
@@ -73,5 +73,7 @@ being a direct response to a previous request. We may also refer to requests
without response as commands. In general, events need to be enabled via one
of multiple dedicated requests before they are sent by the EC.
-See :doc:`ssh` for a more technical protocol documentation and
-:doc:`internal` for an overview of the internal driver architecture.
+See Documentation/driver-api/surface_aggregator/ssh.rst for a
+more technical protocol documentation and
+Documentation/driver-api/surface_aggregator/internal.rst for an
+overview of the internal driver architecture.
diff --git a/Documentation/driver-api/thermal/sysfs-api.rst b/Documentation/driver-api/thermal/sysfs-api.rst
index 4b638c14bc16..c93fa5e961a0 100644
--- a/Documentation/driver-api/thermal/sysfs-api.rst
+++ b/Documentation/driver-api/thermal/sysfs-api.rst
@@ -740,21 +740,15 @@ possible.
5. thermal_emergency_poweroff
=============================
-On an event of critical trip temperature crossing. Thermal framework
-allows the system to shutdown gracefully by calling orderly_poweroff().
-In the event of a failure of orderly_poweroff() to shut down the system
-we are in danger of keeping the system alive at undesirably high
-temperatures. To mitigate this high risk scenario we program a work
-queue to fire after a pre-determined number of seconds to start
-an emergency shutdown of the device using the kernel_power_off()
-function. In case kernel_power_off() fails then finally
-emergency_restart() is called in the worst case.
+On an event of critical trip temperature crossing the thermal framework
+shuts down the system by calling hw_protection_shutdown(). The
+hw_protection_shutdown() first attempts to perform an orderly shutdown
+but accepts a delay after which it proceeds doing a forced power-off
+or as last resort an emergency_restart.
The delay should be carefully profiled so as to give adequate time for
-orderly_poweroff(). In case of failure of an orderly_poweroff() the
-emergency poweroff kicks in after the delay has elapsed and shuts down
-the system.
+orderly poweroff.
-If set to 0 emergency poweroff will not be supported. So a carefully
-profiled non-zero positive value is a must for emergency poweroff to be
-triggered.
+If the delay is set to 0 emergency poweroff will not be supported. So a
+carefully profiled non-zero positive value is a must for emergency
+poweroff to be triggered.
diff --git a/Documentation/driver-api/usb/dma.rst b/Documentation/driver-api/usb/dma.rst
index 2b3dbd3265b4..d32c27e11b90 100644
--- a/Documentation/driver-api/usb/dma.rst
+++ b/Documentation/driver-api/usb/dma.rst
@@ -10,7 +10,7 @@ API overview
The big picture is that USB drivers can continue to ignore most DMA issues,
though they still must provide DMA-ready buffers (see
-:doc:`/core-api/dma-api-howto`). That's how they've worked through
+Documentation/core-api/dma-api-howto.rst). That's how they've worked through
the 2.4 (and earlier) kernels, or they can now be DMA-aware.
DMA-aware usb drivers:
@@ -60,7 +60,7 @@ and effects like cache-trashing can impose subtle penalties.
force a consistent memory access ordering by using memory barriers. It's
not using a streaming DMA mapping, so it's good for small transfers on
systems where the I/O would otherwise thrash an IOMMU mapping. (See
- :doc:`/core-api/dma-api-howto` for definitions of "coherent" and
+ Documentation/core-api/dma-api-howto.rst for definitions of "coherent" and
"streaming" DMA mappings.)
Asking for 1/Nth of a page (as well as asking for N pages) is reasonably
@@ -91,7 +91,7 @@ Working with existing buffers
Existing buffers aren't usable for DMA without first being mapped into the
DMA address space of the device. However, most buffers passed to your
driver can safely be used with such DMA mapping. (See the first section
-of :doc:`/core-api/dma-api-howto`, titled "What memory is DMA-able?")
+of Documentation/core-api/dma-api-howto.rst, titled "What memory is DMA-able?")
- When you're using scatterlists, you can map everything at once. On some
systems, this kicks in an IOMMU and turns the scatterlists into single
diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst
index 820e867af45a..2c94ff2f4385 100644
--- a/Documentation/driver-api/usb/usb.rst
+++ b/Documentation/driver-api/usb/usb.rst
@@ -123,6 +123,8 @@ are in ``drivers/usb/common/common.c``.
In addition, some functions useful for creating debugging output are
defined in ``drivers/usb/common/debug.c``.
+.. _usb_header:
+
Host-Side Data Types and Macros
===============================
diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
index 31ecfe44e5b4..f47d05ed0d94 100644
--- a/Documentation/fault-injection/fault-injection.rst
+++ b/Documentation/fault-injection/fault-injection.rst
@@ -78,8 +78,10 @@ configuration of fault-injection capabilities.
- /sys/kernel/debug/fail*/times:
- specifies how many times failures may happen at most.
- A value of -1 means "no limit".
+ specifies how many times failures may happen at most. A value of -1
+ means "no limit". Note, though, that this file only accepts unsigned
+ values. So, if you want to specify -1, you better use 'printf' instead
+ of 'echo', e.g.: $ printf %#x -1 > times
- /sys/kernel/debug/fail*/space:
@@ -167,11 +169,13 @@ configuration of fault-injection capabilities.
- ERRNO: retval must be -1 to -MAX_ERRNO (-4096).
- ERR_NULL: retval must be 0 or -1 to -MAX_ERRNO (-4096).
-- /sys/kernel/debug/fail_function/<functiuon-name>/retval:
+- /sys/kernel/debug/fail_function/<function-name>/retval:
- specifies the "error" return value to inject to the given
- function for given function. This will be created when
- user specifies new injection entry.
+ specifies the "error" return value to inject to the given function.
+ This will be created when the user specifies a new injection entry.
+ Note that this file only accepts unsigned values. So, if you want to
+ use a negative errno, you better use 'printf' instead of 'echo', e.g.:
+ $ printf %#x -12 > retval
Boot option
^^^^^^^^^^^
@@ -255,7 +259,7 @@ Application Examples
echo Y > /sys/kernel/debug/$FAILTYPE/task-filter
echo 10 > /sys/kernel/debug/$FAILTYPE/probability
echo 100 > /sys/kernel/debug/$FAILTYPE/interval
- echo -1 > /sys/kernel/debug/$FAILTYPE/times
+ printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
echo 0 > /sys/kernel/debug/$FAILTYPE/space
echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
@@ -309,7 +313,7 @@ Application Examples
echo N > /sys/kernel/debug/$FAILTYPE/task-filter
echo 10 > /sys/kernel/debug/$FAILTYPE/probability
echo 100 > /sys/kernel/debug/$FAILTYPE/interval
- echo -1 > /sys/kernel/debug/$FAILTYPE/times
+ printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
echo 0 > /sys/kernel/debug/$FAILTYPE/space
echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
@@ -336,11 +340,11 @@ Application Examples
FAILTYPE=fail_function
FAILFUNC=open_ctree
echo $FAILFUNC > /sys/kernel/debug/$FAILTYPE/inject
- echo -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
+ printf %#x -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
echo N > /sys/kernel/debug/$FAILTYPE/task-filter
echo 100 > /sys/kernel/debug/$FAILTYPE/probability
echo 0 > /sys/kernel/debug/$FAILTYPE/interval
- echo -1 > /sys/kernel/debug/$FAILTYPE/times
+ printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
echo 0 > /sys/kernel/debug/$FAILTYPE/space
echo 1 > /sys/kernel/debug/$FAILTYPE/verbose
diff --git a/Documentation/filesystems/dax.rst b/Documentation/filesystems/dax.rst
new file mode 100644
index 000000000000..9a1b8fd9e82b
--- /dev/null
+++ b/Documentation/filesystems/dax.rst
@@ -0,0 +1,291 @@
+=======================
+Direct Access for files
+=======================
+
+Motivation
+----------
+
+The page cache is usually used to buffer reads and writes to files.
+It is also used to provide the pages which are mapped into userspace
+by a call to mmap.
+
+For block devices that are memory-like, the page cache pages would be
+unnecessary copies of the original storage. The `DAX` code removes the
+extra copy by performing reads and writes directly to the storage device.
+For file mappings, the storage device is mapped directly into userspace.
+
+
+Usage
+-----
+
+If you have a block device which supports `DAX`, you can make a filesystem
+on it as usual. The `DAX` code currently only supports files with a block
+size equal to your kernel's `PAGE_SIZE`, so you may need to specify a block
+size when creating the filesystem.
+
+Currently 3 filesystems support `DAX`: ext2, ext4 and xfs. Enabling `DAX` on them
+is different.
+
+Enabling DAX on ext2
+--------------------
+
+When mounting the filesystem, use the ``-o dax`` option on the command line or
+add 'dax' to the options in ``/etc/fstab``. This works to enable `DAX` on all files
+within the filesystem. It is equivalent to the ``-o dax=always`` behavior below.
+
+
+Enabling DAX on xfs and ext4
+----------------------------
+
+Summary
+-------
+
+ 1. There exists an in-kernel file access mode flag `S_DAX` that corresponds to
+ the statx flag `STATX_ATTR_DAX`. See the manpage for statx(2) for details
+ about this access mode.
+
+ 2. There exists a persistent flag `FS_XFLAG_DAX` that can be applied to regular
+ files and directories. This advisory flag can be set or cleared at any
+ time, but doing so does not immediately affect the `S_DAX` state.
+
+ 3. If the persistent `FS_XFLAG_DAX` flag is set on a directory, this flag will
+ be inherited by all regular files and subdirectories that are subsequently
+ created in this directory. Files and subdirectories that exist at the time
+ this flag is set or cleared on the parent directory are not modified by
+ this modification of the parent directory.
+
+ 4. There exist dax mount options which can override `FS_XFLAG_DAX` in the
+ setting of the `S_DAX` flag. Given underlying storage which supports `DAX` the
+ following hold:
+
+ ``-o dax=inode`` means "follow `FS_XFLAG_DAX`" and is the default.
+
+ ``-o dax=never`` means "never set `S_DAX`, ignore `FS_XFLAG_DAX`."
+
+ ``-o dax=always`` means "always set `S_DAX` ignore `FS_XFLAG_DAX`."
+
+ ``-o dax`` is a legacy option which is an alias for ``dax=always``.
+
+ .. warning::
+
+ The option ``-o dax`` may be removed in the future so ``-o dax=always`` is
+ the preferred method for specifying this behavior.
+
+ .. note::
+
+ Modifications to and the inheritance behavior of `FS_XFLAG_DAX` remain
+ the same even when the filesystem is mounted with a dax option. However,
+ in-core inode state (`S_DAX`) will be overridden until the filesystem is
+ remounted with dax=inode and the inode is evicted from kernel memory.
+
+ 5. The `S_DAX` policy can be changed via:
+
+ a) Setting the parent directory `FS_XFLAG_DAX` as needed before files are
+ created
+
+ b) Setting the appropriate dax="foo" mount option
+
+ c) Changing the `FS_XFLAG_DAX` flag on existing regular files and
+ directories. This has runtime constraints and limitations that are
+ described in 6) below.
+
+ 6. When changing the `S_DAX` policy via toggling the persistent `FS_XFLAG_DAX`
+ flag, the change to existing regular files won't take effect until the
+ files are closed by all processes.
+
+
+Details
+-------
+
+There are 2 per-file dax flags. One is a persistent inode setting (`FS_XFLAG_DAX`)
+and the other is a volatile flag indicating the active state of the feature
+(`S_DAX`).
+
+`FS_XFLAG_DAX` is preserved within the filesystem. This persistent config
+setting can be set, cleared and/or queried using the `FS_IOC_FS`[`GS`]`ETXATTR` ioctl
+(see ioctl_xfs_fsgetxattr(2)) or an utility such as 'xfs_io'.
+
+New files and directories automatically inherit `FS_XFLAG_DAX` from
+their parent directory **when created**. Therefore, setting `FS_XFLAG_DAX` at
+directory creation time can be used to set a default behavior for an entire
+sub-tree.
+
+To clarify inheritance, here are 3 examples:
+
+Example A:
+
+.. code-block:: shell
+
+ mkdir -p a/b/c
+ xfs_io -c 'chattr +x' a
+ mkdir a/b/c/d
+ mkdir a/e
+
+ ------[outcome]------
+
+ dax: a,e
+ no dax: b,c,d
+
+Example B:
+
+.. code-block:: shell
+
+ mkdir a
+ xfs_io -c 'chattr +x' a
+ mkdir -p a/b/c/d
+
+ ------[outcome]------
+
+ dax: a,b,c,d
+ no dax:
+
+Example C:
+
+.. code-block:: shell
+
+ mkdir -p a/b/c
+ xfs_io -c 'chattr +x' c
+ mkdir a/b/c/d
+
+ ------[outcome]------
+
+ dax: c,d
+ no dax: a,b
+
+The current enabled state (`S_DAX`) is set when a file inode is instantiated in
+memory by the kernel. It is set based on the underlying media support, the
+value of `FS_XFLAG_DAX` and the filesystem's dax mount option.
+
+statx can be used to query `S_DAX`.
+
+.. note::
+
+ That only regular files will ever have `S_DAX` set and therefore statx
+ will never indicate that `S_DAX` is set on directories.
+
+Setting the `FS_XFLAG_DAX` flag (specifically or through inheritance) occurs even
+if the underlying media does not support dax and/or the filesystem is
+overridden with a mount option.
+
+
+Implementation Tips for Block Driver Writers
+--------------------------------------------
+
+To support `DAX` in your block driver, implement the 'direct_access'
+block device operation. It is used to translate the sector number
+(expressed in units of 512-byte sectors) to a page frame number (pfn)
+that identifies the physical page for the memory. It also returns a
+kernel virtual address that can be used to access the memory.
+
+The direct_access method takes a 'size' parameter that indicates the
+number of bytes being requested. The function should return the number
+of bytes that can be contiguously accessed at that offset. It may also
+return a negative errno if an error occurs.
+
+In order to support this method, the storage must be byte-accessible by
+the CPU at all times. If your device uses paging techniques to expose
+a large amount of memory through a smaller window, then you cannot
+implement direct_access. Equally, if your device can occasionally
+stall the CPU for an extended period, you should also not attempt to
+implement direct_access.
+
+These block devices may be used for inspiration:
+- brd: RAM backed block device driver
+- dcssblk: s390 dcss block device driver
+- pmem: NVDIMM persistent memory driver
+
+
+Implementation Tips for Filesystem Writers
+------------------------------------------
+
+Filesystem support consists of:
+
+* Adding support to mark inodes as being `DAX` by setting the `S_DAX` flag in
+ i_flags
+* Implementing ->read_iter and ->write_iter operations which use
+ :c:func:`dax_iomap_rw()` when inode has `S_DAX` flag set
+* Implementing an mmap file operation for `DAX` files which sets the
+ `VM_MIXEDMAP` and `VM_HUGEPAGE` flags on the `VMA`, and setting the vm_ops to
+ include handlers for fault, pmd_fault, page_mkwrite, pfn_mkwrite. These
+ handlers should probably call :c:func:`dax_iomap_fault()` passing the
+ appropriate fault size and iomap operations.
+* Calling :c:func:`iomap_zero_range()` passing appropriate iomap operations
+ instead of :c:func:`block_truncate_page()` for `DAX` files
+* Ensuring that there is sufficient locking between reads, writes,
+ truncates and page faults
+
+The iomap handlers for allocating blocks must make sure that allocated blocks
+are zeroed out and converted to written extents before being returned to avoid
+exposure of uninitialized data through mmap.
+
+These filesystems may be used for inspiration:
+
+.. seealso::
+
+ ext2: see Documentation/filesystems/ext2.rst
+
+.. seealso::
+
+ xfs: see Documentation/admin-guide/xfs.rst
+
+.. seealso::
+
+ ext4: see Documentation/filesystems/ext4/
+
+
+Handling Media Errors
+---------------------
+
+The libnvdimm subsystem stores a record of known media error locations for
+each pmem block device (in gendisk->badblocks). If we fault at such location,
+or one with a latent error not yet discovered, the application can expect
+to receive a `SIGBUS`. Libnvdimm also allows clearing of these errors by simply
+writing the affected sectors (through the pmem driver, and if the underlying
+NVDIMM supports the clear_poison DSM defined by ACPI).
+
+Since `DAX` IO normally doesn't go through the ``driver/bio`` path, applications or
+sysadmins have an option to restore the lost data from a prior ``backup/inbuilt``
+redundancy in the following ways:
+
+1. Delete the affected file, and restore from a backup (sysadmin route):
+ This will free the filesystem blocks that were being used by the file,
+ and the next time they're allocated, they will be zeroed first, which
+ happens through the driver, and will clear bad sectors.
+
+2. Truncate or hole-punch the part of the file that has a bad-block (at least
+ an entire aligned sector has to be hole-punched, but not necessarily an
+ entire filesystem block).
+
+These are the two basic paths that allow `DAX` filesystems to continue operating
+in the presence of media errors. More robust error recovery mechanisms can be
+built on top of this in the future, for example, involving redundancy/mirroring
+provided at the block layer through DM, or additionally, at the filesystem
+level. These would have to rely on the above two tenets, that error clearing
+can happen either by sending an IO through the driver, or zeroing (also through
+the driver).
+
+
+Shortcomings
+------------
+
+Even if the kernel or its modules are stored on a filesystem that supports
+`DAX` on a block device that supports `DAX`, they will still be copied into RAM.
+
+The DAX code does not work correctly on architectures which have virtually
+mapped caches such as ARM, MIPS and SPARC.
+
+Calling :c:func:`get_user_pages()` on a range of user memory that has been
+mmaped from a `DAX` file will fail when there are no 'struct page' to describe
+those pages. This problem has been addressed in some device drivers
+by adding optional struct page support for pages under the control of
+the driver (see `CONFIG_NVDIMM_PFN` in ``drivers/nvdimm`` for an example of
+how to do this). In the non struct page cases `O_DIRECT` reads/writes to
+those memory ranges from a non-`DAX` file will fail
+
+
+.. note::
+
+ `O_DIRECT` reads/writes _of a `DAX` file do work, it is the memory that
+ is being accessed that is key here). Other things that will not work in
+ the non struct page case include RDMA, :c:func:`sendfile()` and
+ :c:func:`splice()`.
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
deleted file mode 100644
index e03c20564f3a..000000000000
--- a/Documentation/filesystems/dax.txt
+++ /dev/null
@@ -1,257 +0,0 @@
-Direct Access for files
------------------------
-
-Motivation
-----------
-
-The page cache is usually used to buffer reads and writes to files.
-It is also used to provide the pages which are mapped into userspace
-by a call to mmap.
-
-For block devices that are memory-like, the page cache pages would be
-unnecessary copies of the original storage. The DAX code removes the
-extra copy by performing reads and writes directly to the storage device.
-For file mappings, the storage device is mapped directly into userspace.
-
-
-Usage
------
-
-If you have a block device which supports DAX, you can make a filesystem
-on it as usual. The DAX code currently only supports files with a block
-size equal to your kernel's PAGE_SIZE, so you may need to specify a block
-size when creating the filesystem.
-
-Currently 3 filesystems support DAX: ext2, ext4 and xfs. Enabling DAX on them
-is different.
-
-Enabling DAX on ext2
------------------------------
-
-When mounting the filesystem, use the "-o dax" option on the command line or
-add 'dax' to the options in /etc/fstab. This works to enable DAX on all files
-within the filesystem. It is equivalent to the '-o dax=always' behavior below.
-
-
-Enabling DAX on xfs and ext4
-----------------------------
-
-Summary
--------
-
- 1. There exists an in-kernel file access mode flag S_DAX that corresponds to
- the statx flag STATX_ATTR_DAX. See the manpage for statx(2) for details
- about this access mode.
-
- 2. There exists a persistent flag FS_XFLAG_DAX that can be applied to regular
- files and directories. This advisory flag can be set or cleared at any
- time, but doing so does not immediately affect the S_DAX state.
-
- 3. If the persistent FS_XFLAG_DAX flag is set on a directory, this flag will
- be inherited by all regular files and subdirectories that are subsequently
- created in this directory. Files and subdirectories that exist at the time
- this flag is set or cleared on the parent directory are not modified by
- this modification of the parent directory.
-
- 4. There exist dax mount options which can override FS_XFLAG_DAX in the
- setting of the S_DAX flag. Given underlying storage which supports DAX the
- following hold:
-
- "-o dax=inode" means "follow FS_XFLAG_DAX" and is the default.
-
- "-o dax=never" means "never set S_DAX, ignore FS_XFLAG_DAX."
-
- "-o dax=always" means "always set S_DAX ignore FS_XFLAG_DAX."
-
- "-o dax" is a legacy option which is an alias for "dax=always".
- This may be removed in the future so "-o dax=always" is
- the preferred method for specifying this behavior.
-
- NOTE: Modifications to and the inheritance behavior of FS_XFLAG_DAX remain
- the same even when the filesystem is mounted with a dax option. However,
- in-core inode state (S_DAX) will be overridden until the filesystem is
- remounted with dax=inode and the inode is evicted from kernel memory.
-
- 5. The S_DAX policy can be changed via:
-
- a) Setting the parent directory FS_XFLAG_DAX as needed before files are
- created
-
- b) Setting the appropriate dax="foo" mount option
-
- c) Changing the FS_XFLAG_DAX flag on existing regular files and
- directories. This has runtime constraints and limitations that are
- described in 6) below.
-
- 6. When changing the S_DAX policy via toggling the persistent FS_XFLAG_DAX
- flag, the change to existing regular files won't take effect until the
- files are closed by all processes.
-
-
-Details
--------
-
-There are 2 per-file dax flags. One is a persistent inode setting (FS_XFLAG_DAX)
-and the other is a volatile flag indicating the active state of the feature
-(S_DAX).
-
-FS_XFLAG_DAX is preserved within the filesystem. This persistent config
-setting can be set, cleared and/or queried using the FS_IOC_FS[GS]ETXATTR ioctl
-(see ioctl_xfs_fsgetxattr(2)) or an utility such as 'xfs_io'.
-
-New files and directories automatically inherit FS_XFLAG_DAX from
-their parent directory _when_ _created_. Therefore, setting FS_XFLAG_DAX at
-directory creation time can be used to set a default behavior for an entire
-sub-tree.
-
-To clarify inheritance, here are 3 examples:
-
-Example A:
-
-mkdir -p a/b/c
-xfs_io -c 'chattr +x' a
-mkdir a/b/c/d
-mkdir a/e
-
- dax: a,e
- no dax: b,c,d
-
-Example B:
-
-mkdir a
-xfs_io -c 'chattr +x' a
-mkdir -p a/b/c/d
-
- dax: a,b,c,d
- no dax:
-
-Example C:
-
-mkdir -p a/b/c
-xfs_io -c 'chattr +x' c
-mkdir a/b/c/d
-
- dax: c,d
- no dax: a,b
-
-
-The current enabled state (S_DAX) is set when a file inode is instantiated in
-memory by the kernel. It is set based on the underlying media support, the
-value of FS_XFLAG_DAX and the filesystem's dax mount option.
-
-statx can be used to query S_DAX. NOTE that only regular files will ever have
-S_DAX set and therefore statx will never indicate that S_DAX is set on
-directories.
-
-Setting the FS_XFLAG_DAX flag (specifically or through inheritance) occurs even
-if the underlying media does not support dax and/or the filesystem is
-overridden with a mount option.
-
-
-
-Implementation Tips for Block Driver Writers
---------------------------------------------
-
-To support DAX in your block driver, implement the 'direct_access'
-block device operation. It is used to translate the sector number
-(expressed in units of 512-byte sectors) to a page frame number (pfn)
-that identifies the physical page for the memory. It also returns a
-kernel virtual address that can be used to access the memory.
-
-The direct_access method takes a 'size' parameter that indicates the
-number of bytes being requested. The function should return the number
-of bytes that can be contiguously accessed at that offset. It may also
-return a negative errno if an error occurs.
-
-In order to support this method, the storage must be byte-accessible by
-the CPU at all times. If your device uses paging techniques to expose
-a large amount of memory through a smaller window, then you cannot
-implement direct_access. Equally, if your device can occasionally
-stall the CPU for an extended period, you should also not attempt to
-implement direct_access.
-
-These block devices may be used for inspiration:
-- brd: RAM backed block device driver
-- dcssblk: s390 dcss block device driver
-- pmem: NVDIMM persistent memory driver
-
-
-Implementation Tips for Filesystem Writers
-------------------------------------------
-
-Filesystem support consists of
-- adding support to mark inodes as being DAX by setting the S_DAX flag in
- i_flags
-- implementing ->read_iter and ->write_iter operations which use dax_iomap_rw()
- when inode has S_DAX flag set
-- implementing an mmap file operation for DAX files which sets the
- VM_MIXEDMAP and VM_HUGEPAGE flags on the VMA, and setting the vm_ops to
- include handlers for fault, pmd_fault, page_mkwrite, pfn_mkwrite. These
- handlers should probably call dax_iomap_fault() passing the appropriate
- fault size and iomap operations.
-- calling iomap_zero_range() passing appropriate iomap operations instead of
- block_truncate_page() for DAX files
-- ensuring that there is sufficient locking between reads, writes,
- truncates and page faults
-
-The iomap handlers for allocating blocks must make sure that allocated blocks
-are zeroed out and converted to written extents before being returned to avoid
-exposure of uninitialized data through mmap.
-
-These filesystems may be used for inspiration:
-- ext2: see Documentation/filesystems/ext2.rst
-- ext4: see Documentation/filesystems/ext4/
-- xfs: see Documentation/admin-guide/xfs.rst
-
-
-Handling Media Errors
----------------------
-
-The libnvdimm subsystem stores a record of known media error locations for
-each pmem block device (in gendisk->badblocks). If we fault at such location,
-or one with a latent error not yet discovered, the application can expect
-to receive a SIGBUS. Libnvdimm also allows clearing of these errors by simply
-writing the affected sectors (through the pmem driver, and if the underlying
-NVDIMM supports the clear_poison DSM defined by ACPI).
-
-Since DAX IO normally doesn't go through the driver/bio path, applications or
-sysadmins have an option to restore the lost data from a prior backup/inbuilt
-redundancy in the following ways:
-
-1. Delete the affected file, and restore from a backup (sysadmin route):
- This will free the filesystem blocks that were being used by the file,
- and the next time they're allocated, they will be zeroed first, which
- happens through the driver, and will clear bad sectors.
-
-2. Truncate or hole-punch the part of the file that has a bad-block (at least
- an entire aligned sector has to be hole-punched, but not necessarily an
- entire filesystem block).
-
-These are the two basic paths that allow DAX filesystems to continue operating
-in the presence of media errors. More robust error recovery mechanisms can be
-built on top of this in the future, for example, involving redundancy/mirroring
-provided at the block layer through DM, or additionally, at the filesystem
-level. These would have to rely on the above two tenets, that error clearing
-can happen either by sending an IO through the driver, or zeroing (also through
-the driver).
-
-
-Shortcomings
-------------
-
-Even if the kernel or its modules are stored on a filesystem that supports
-DAX on a block device that supports DAX, they will still be copied into RAM.
-
-The DAX code does not work correctly on architectures which have virtually
-mapped caches such as ARM, MIPS and SPARC.
-
-Calling get_user_pages() on a range of user memory that has been mmaped
-from a DAX file will fail when there are no 'struct page' to describe
-those pages. This problem has been addressed in some device drivers
-by adding optional struct page support for pages under the control of
-the driver (see CONFIG_NVDIMM_PFN in drivers/nvdimm for an example of
-how to do this). In the non struct page cases O_DIRECT reads/writes to
-those memory ranges from a non-DAX file will fail (note that O_DIRECT
-reads/writes _of a DAX file_ do work, it is the memory that is being
-accessed that is key here). Other things that will not work in the
-non struct page case include RDMA, sendfile() and splice().
diff --git a/Documentation/filesystems/ext2.rst b/Documentation/filesystems/ext2.rst
index c2fce22cfd03..154101cf0e4f 100644
--- a/Documentation/filesystems/ext2.rst
+++ b/Documentation/filesystems/ext2.rst
@@ -25,7 +25,7 @@ check=none, nocheck (*) Don't do extra checking of bitmaps on mount
(check=normal and check=strict options removed)
dax Use direct access (no page cache). See
- Documentation/filesystems/dax.txt.
+ Documentation/filesystems/dax.rst.
debug Extra debugging information is sent to the
kernel syslog. Useful for developers.
diff --git a/Documentation/filesystems/ext4/blockgroup.rst b/Documentation/filesystems/ext4/blockgroup.rst
index 3da156633339..d5d652addce5 100644
--- a/Documentation/filesystems/ext4/blockgroup.rst
+++ b/Documentation/filesystems/ext4/blockgroup.rst
@@ -84,7 +84,7 @@ Without the option META\_BG, for safety concerns, all block group
descriptors copies are kept in the first block group. Given the default
128MiB(2^27 bytes) block group size and 64-byte group descriptors, ext4
can have at most 2^27/64 = 2^21 block groups. This limits the entire
-filesystem size to 2^21 ∗ 2^27 = 2^48bytes or 256TiB.
+filesystem size to 2^21 * 2^27 = 2^48bytes or 256TiB.
The solution to this problem is to use the metablock group feature
(META\_BG), which is already in ext3 for all 2.6 releases. With the
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index d4853cb919d2..246af51b277a 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -77,6 +77,7 @@ Documentation for filesystem implementations.
coda
configfs
cramfs
+ dax
debugfs
dlmfs
ecryptfs
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index 1e894480115b..2183fd8cc350 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -480,7 +480,7 @@ prototypes::
locking rules:
======================= ===================
-ops bd_mutex
+ops open_mutex
======================= ===================
open: yes
release: yes
diff --git a/Documentation/filesystems/path-lookup.rst b/Documentation/filesystems/path-lookup.rst
index c482e1619e77..a6fa7619b69e 100644
--- a/Documentation/filesystems/path-lookup.rst
+++ b/Documentation/filesystems/path-lookup.rst
@@ -448,15 +448,17 @@ described. If it finds a ``LAST_NORM`` component it first calls
filesystem to revalidate the result if it is that sort of filesystem.
If that doesn't get a good result, it calls "``lookup_slow()``" which
takes ``i_rwsem``, rechecks the cache, and then asks the filesystem
-to find a definitive answer. Each of these will call
-``follow_managed()`` (as described below) to handle any mount points.
-
-In the absence of symbolic links, ``walk_component()`` creates a new
-``struct path`` containing a counted reference to the new dentry and a
-reference to the new ``vfsmount`` which is only counted if it is
-different from the previous ``vfsmount``. It then calls
-``path_to_nameidata()`` to install the new ``struct path`` in the
-``struct nameidata`` and drop the unneeded references.
+to find a definitive answer.
+
+As the last step of walk_component(), step_into() will be called either
+directly from walk_component() or from handle_dots(). It calls
+handle_mounts(), to check and handle mount points, in which a new
+``struct path`` is created containing a counted reference to the new dentry and
+a reference to the new ``vfsmount`` which is only counted if it is
+different from the previous ``vfsmount``. Then if there is
+a symbolic link, step_into() calls pick_link() to deal with it,
+otherwise it installs the new ``struct path`` in the ``struct nameidata``, and
+drops the unneeded references.
This "hand-over-hand" sequencing of getting a reference to the new
dentry before dropping the reference to the previous dentry may
@@ -470,8 +472,8 @@ Handling the final component
``nd->last_type`` to refer to the final component of the path. It does
not call ``walk_component()`` that last time. Handling that final
component remains for the caller to sort out. Those callers are
-``path_lookupat()``, ``path_parentat()``, ``path_mountpoint()`` and
-``path_openat()`` each of which handles the differing requirements of
+path_lookupat(), path_parentat() and
+path_openat() each of which handles the differing requirements of
different system calls.
``path_parentat()`` is clearly the simplest - it just wraps a little bit
@@ -486,20 +488,18 @@ perform their operation.
object is wanted such as by ``stat()`` or ``chmod()``. It essentially just
calls ``walk_component()`` on the final component through a call to
``lookup_last()``. ``path_lookupat()`` returns just the final dentry.
-
-``path_mountpoint()`` handles the special case of unmounting which must
-not try to revalidate the mounted filesystem. It effectively
-contains, through a call to ``mountpoint_last()``, an alternate
-implementation of ``lookup_slow()`` which skips that step. This is
-important when unmounting a filesystem that is inaccessible, such as
+It is worth noting that when flag ``LOOKUP_MOUNTPOINT`` is set,
+path_lookupat() will unset LOOKUP_JUMPED in nameidata so that in the
+subsequent path traversal d_weak_revalidate() won't be called.
+This is important when unmounting a filesystem that is inaccessible, such as
one provided by a dead NFS server.
Finally ``path_openat()`` is used for the ``open()`` system call; it
-contains, in support functions starting with "``do_last()``", all the
+contains, in support functions starting with "open_last_lookups()", all the
complexity needed to handle the different subtleties of O_CREAT (with
or without O_EXCL), final "``/``" characters, and trailing symbolic
links. We will revisit this in the final part of this series, which
-focuses on those symbolic links. "``do_last()``" will sometimes, but
+focuses on those symbolic links. "open_last_lookups()" will sometimes, but
not always, take ``i_rwsem``, depending on what it finds.
Each of these, or the functions which call them, need to be alert to
@@ -535,8 +535,7 @@ covered in greater detail in autofs.txt in the Linux documentation
tree, but a few notes specifically related to path lookup are in order
here.
-The Linux VFS has a concept of "managed" dentries which is reflected
-in function names such as "``follow_managed()``". There are three
+The Linux VFS has a concept of "managed" dentries. There are three
potentially interesting things about these dentries corresponding
to three different flags that might be set in ``dentry->d_flags``:
@@ -652,10 +651,10 @@ RCU-walk finds it cannot stop gracefully, it simply gives up and
restarts from the top with REF-walk.
This pattern of "try RCU-walk, if that fails try REF-walk" can be
-clearly seen in functions like ``filename_lookup()``,
-``filename_parentat()``, ``filename_mountpoint()``,
-``do_filp_open()``, and ``do_file_open_root()``. These five
-correspond roughly to the four ``path_*()`` functions we met earlier,
+clearly seen in functions like filename_lookup(),
+filename_parentat(),
+do_filp_open(), and do_file_open_root(). These four
+correspond roughly to the three ``path_*()`` functions we met earlier,
each of which calls ``link_path_walk()``. The ``path_*()`` functions are
called using different mode flags until a mode is found which works.
They are first called with ``LOOKUP_RCU`` set to request "RCU-walk". If
@@ -993,8 +992,8 @@ is 4096. There are a number of reasons for this limit; not letting the
kernel spend too much time on just one path is one of them. With
symbolic links you can effectively generate much longer paths so some
sort of limit is needed for the same reason. Linux imposes a limit of
-at most 40 symlinks in any one path lookup. It previously imposed a
-further limit of eight on the maximum depth of recursion, but that was
+at most 40 (MAXSYMLINKS) symlinks in any one path lookup. It previously imposed
+a further limit of eight on the maximum depth of recursion, but that was
raised to 40 when a separate stack was implemented, so there is now
just the one limit.
@@ -1061,42 +1060,26 @@ filesystem cannot successfully get a reference in RCU-walk mode, it
must return ``-ECHILD`` and ``unlazy_walk()`` will be called to return to
REF-walk mode in which the filesystem is allowed to sleep.
-The place for all this to happen is the ``i_op->follow_link()`` inode
-method. In the present mainline code this is never actually called in
-RCU-walk mode as the rewrite is not quite complete. It is likely that
-in a future release this method will be passed an ``inode`` pointer when
-called in RCU-walk mode so it both (1) knows to be careful, and (2) has the
-validated pointer. Much like the ``i_op->permission()`` method we
-looked at previously, ``->follow_link()`` would need to be careful that
+The place for all this to happen is the ``i_op->get_link()`` inode
+method. This is called both in RCU-walk and REF-walk. In RCU-walk the
+``dentry*`` argument is NULL, ``->get_link()`` can return -ECHILD to drop out of
+RCU-walk. Much like the ``i_op->permission()`` method we
+looked at previously, ``->get_link()`` would need to be careful that
all the data structures it references are safe to be accessed while
-holding no counted reference, only the RCU lock. Though getting a
-reference with ``->follow_link()`` is not yet done in RCU-walk mode, the
-code is ready to release the reference when that does happen.
-
-This need to drop the reference to a symlink adds significant
-complexity. It requires a reference to the inode so that the
-``i_op->put_link()`` inode operation can be called. In REF-walk, that
-reference is kept implicitly through a reference to the dentry, so
-keeping the ``struct path`` of the symlink is easiest. For RCU-walk,
-the pointer to the inode is kept separately. To allow switching from
-RCU-walk back to REF-walk in the middle of processing nested symlinks
-we also need the seq number for the dentry so we can confirm that
-switching back was safe.
-
-Finally, when providing a reference to a symlink, the filesystem also
-provides an opaque "cookie" that must be passed to ``->put_link()`` so that it
-knows what to free. This might be the allocated memory area, or a
-pointer to the ``struct page`` in the page cache, or something else
-completely. Only the filesystem knows what it is.
+holding no counted reference, only the RCU lock. A callback
+``struct delayed_called`` will be passed to ``->get_link()``:
+file systems can set their own put_link function and argument through
+set_delayed_call(). Later on, when VFS wants to put link, it will call
+do_delayed_call() to invoke that callback function with the argument.
In order for the reference to each symlink to be dropped when the walk completes,
whether in RCU-walk or REF-walk, the symlink stack needs to contain,
along with the path remnants:
-- the ``struct path`` to provide a reference to the inode in REF-walk
-- the ``struct inode *`` to provide a reference to the inode in RCU-walk
+- the ``struct path`` to provide a reference to the previous path
+- the ``const char *`` to provide a reference to the to previous name
- the ``seq`` to allow the path to be safely switched from RCU-walk to REF-walk
-- the ``cookie`` that tells ``->put_path()`` what to put.
+- the ``struct delayed_call`` for later invocation.
This means that each entry in the symlink stack needs to hold five
pointers and an integer instead of just one pointer (the path
@@ -1120,12 +1103,10 @@ doesn't need to notice. Getting this ``name`` variable on and off the
stack is very straightforward; pushing and popping the references is
a little more complex.
-When a symlink is found, ``walk_component()`` returns the value ``1``
-(``0`` is returned for any other sort of success, and a negative number
-is, as usual, an error indicator). This causes ``get_link()`` to be
-called; it then gets the link from the filesystem. Providing that
-operation is successful, the old path ``name`` is placed on the stack,
-and the new value is used as the ``name`` for a while. When the end of
+When a symlink is found, walk_component() calls pick_link() via step_into()
+which returns the link from the filesystem.
+Providing that operation is successful, the old path ``name`` is placed on the
+stack, and the new value is used as the ``name`` for a while. When the end of
the path is found (i.e. ``*name`` is ``'\0'``) the old ``name`` is restored
off the stack and path walking continues.
@@ -1142,23 +1123,23 @@ stack in ``walk_component()`` immediately when the symlink is found;
old symlink as it walks that last component. So it is quite
convenient for ``walk_component()`` to release the old symlink and pop
the references just before pushing the reference information for the
-new symlink. It is guided in this by two flags; ``WALK_GET``, which
-gives it permission to follow a symlink if it finds one, and
-``WALK_PUT``, which tells it to release the current symlink after it has been
-followed. ``WALK_PUT`` is tested first, leading to a call to
-``put_link()``. ``WALK_GET`` is tested subsequently (by
-``should_follow_link()``) leading to a call to ``pick_link()`` which sets
-up the stack frame.
+new symlink. It is guided in this by three flags: ``WALK_NOFOLLOW`` which
+forbids it from following a symlink if it finds one, ``WALK_MORE``
+which indicates that it is yet too early to release the
+current symlink, and ``WALK_TRAILING`` which indicates that it is on the final
+component of the lookup, so we will check userspace flag ``LOOKUP_FOLLOW`` to
+decide whether follow it when it is a symlink and call ``may_follow_link()`` to
+check if we have privilege to follow it.
Symlinks with no final component
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A pair of special-case symlinks deserve a little further explanation.
Both result in a new ``struct path`` (with mount and dentry) being set
-up in the ``nameidata``, and result in ``get_link()`` returning ``NULL``.
+up in the ``nameidata``, and result in pick_link() returning ``NULL``.
The more obvious case is a symlink to "``/``". All symlinks starting
-with "``/``" are detected in ``get_link()`` which resets the ``nameidata``
+with "``/``" are detected in pick_link() which resets the ``nameidata``
to point to the effective filesystem root. If the symlink only
contains "``/``" then there is nothing more to do, no components at all,
so ``NULL`` is returned to indicate that the symlink can be released and
@@ -1175,12 +1156,11 @@ something that looks like a symlink. It is really a reference to the
target file, not just the name of it. When you ``readlink`` these
objects you get a name that might refer to the same file - unless it
has been unlinked or mounted over. When ``walk_component()`` follows
-one of these, the ``->follow_link()`` method in "procfs" doesn't return
-a string name, but instead calls ``nd_jump_link()`` which updates the
-``nameidata`` in place to point to that target. ``->follow_link()`` then
-returns ``NULL``. Again there is no final component and ``get_link()``
-reports this by leaving the ``last_type`` field of ``nameidata`` as
-``LAST_BIND``.
+one of these, the ``->get_link()`` method in "procfs" doesn't return
+a string name, but instead calls nd_jump_link() which updates the
+``nameidata`` in place to point to that target. ``->get_link()`` then
+returns ``NULL``. Again there is no final component and pick_link()
+returns ``NULL``.
Following the symlink in the final component
--------------------------------------------
@@ -1197,42 +1177,38 @@ potentially need to call ``link_path_walk()`` again and again on
successive symlinks until one is found that doesn't point to another
symlink.
-This case is handled by the relevant caller of ``link_path_walk()``, such as
-``path_lookupat()`` using a loop that calls ``link_path_walk()``, and then
-handles the final component. If the final component is a symlink
-that needs to be followed, then ``trailing_symlink()`` is called to set
-things up properly and the loop repeats, calling ``link_path_walk()``
-again. This could loop as many as 40 times if the last component of
-each symlink is another symlink.
-
-The various functions that examine the final component and possibly
-report that it is a symlink are ``lookup_last()``, ``mountpoint_last()``
-and ``do_last()``, each of which use the same convention as
-``walk_component()`` of returning ``1`` if a symlink was found that needs
-to be followed.
-
-Of these, ``do_last()`` is the most interesting as it is used for
-opening a file. Part of ``do_last()`` runs with ``i_rwsem`` held and this
-part is in a separate function: ``lookup_open()``.
-
-Explaining ``do_last()`` completely is beyond the scope of this article,
-but a few highlights should help those interested in exploring the
-code.
-
-1. Rather than just finding the target file, ``do_last()`` needs to open
+This case is handled by relevant callers of link_path_walk(), such as
+path_lookupat(), path_openat() using a loop that calls link_path_walk(),
+and then handles the final component by calling open_last_lookups() or
+lookup_last(). If it is a symlink that needs to be followed,
+open_last_lookups() or lookup_last() will set things up properly and
+return the path so that the loop repeats, calling
+link_path_walk() again. This could loop as many as 40 times if the last
+component of each symlink is another symlink.
+
+Of the various functions that examine the final component,
+open_last_lookups() is the most interesting as it works in tandem
+with do_open() for opening a file. Part of open_last_lookups() runs
+with ``i_rwsem`` held and this part is in a separate function: lookup_open().
+
+Explaining open_last_lookups() and do_open() completely is beyond the scope
+of this article, but a few highlights should help those interested in exploring
+the code.
+
+1. Rather than just finding the target file, do_open() is used after
+ open_last_lookup() to open
it. If the file was found in the dcache, then ``vfs_open()`` is used for
this. If not, then ``lookup_open()`` will either call ``atomic_open()`` (if
the filesystem provides it) to combine the final lookup with the open, or
- will perform the separate ``lookup_real()`` and ``vfs_create()`` steps
+ will perform the separate ``i_op->lookup()`` and ``i_op->create()`` steps
directly. In the later case the actual "open" of this newly found or
- created file will be performed by ``vfs_open()``, just as if the name
+ created file will be performed by vfs_open(), just as if the name
were found in the dcache.
-2. ``vfs_open()`` can fail with ``-EOPENSTALE`` if the cached information
- wasn't quite current enough. Rather than restarting the lookup from
- the top with ``LOOKUP_REVAL`` set, ``lookup_open()`` is called instead,
- giving the filesystem a chance to resolve small inconsistencies.
- If that doesn't work, only then is the lookup restarted from the top.
+2. vfs_open() can fail with ``-EOPENSTALE`` if the cached information
+ wasn't quite current enough. If it's in RCU-walk ``-ECHILD`` will be returned
+ otherwise ``-ESTALE`` is returned. When ``-ESTALE`` is returned, the caller may
+ retry with ``LOOKUP_REVAL`` flag set.
3. An open with O_CREAT **does** follow a symlink in the final component,
unlike other creation system calls (like ``mkdir``). So the sequence::
@@ -1242,8 +1218,8 @@ code.
will create a file called ``/tmp/bar``. This is not permitted if
``O_EXCL`` is set but otherwise is handled for an O_CREAT open much
- like for a non-creating open: ``should_follow_link()`` returns ``1``, and
- so does ``do_last()`` so that ``trailing_symlink()`` gets called and the
+ like for a non-creating open: lookup_last() or open_last_lookup()
+ returns a non ``NULL`` value, and link_path_walk() gets called and the
open process continues on the symlink that was found.
Updating the access time
diff --git a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
index 9b17dc77d18c..b7ad47df49de 100644
--- a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
+++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
@@ -79,7 +79,8 @@ the ANOD object which is also the final target node of the reference.
})
}
-Please also see a graph example in :doc:`graph`.
+Please also see a graph example in
+Documentation/firmware-guide/acpi/dsd/graph.rst.
References
==========
diff --git a/Documentation/firmware-guide/acpi/dsd/graph.rst b/Documentation/firmware-guide/acpi/dsd/graph.rst
index 7072db801aeb..4341299aa937 100644
--- a/Documentation/firmware-guide/acpi/dsd/graph.rst
+++ b/Documentation/firmware-guide/acpi/dsd/graph.rst
@@ -174,4 +174,4 @@ References
referenced 2016-10-04.
[7] _DSD Device Properties Usage Rules.
- :doc:`../DSD-properties-rules`
+ Documentation/firmware-guide/acpi/DSD-properties-rules.rst
diff --git a/Documentation/firmware-guide/acpi/dsd/phy.rst b/Documentation/firmware-guide/acpi/dsd/phy.rst
new file mode 100644
index 000000000000..680ad179e5f9
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/dsd/phy.rst
@@ -0,0 +1,199 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+MDIO bus and PHYs in ACPI
+=========================
+
+The PHYs on an MDIO bus [1] are probed and registered using
+fwnode_mdiobus_register_phy().
+
+Later, for connecting these PHYs to their respective MACs, the PHYs registered
+on the MDIO bus have to be referenced.
+
+This document introduces two _DSD properties that are to be used
+for connecting PHYs on the MDIO bus [3] to the MAC layer.
+
+These properties are defined in accordance with the "Device
+Properties UUID For _DSD" [2] document and the
+daffd814-6eba-4d8c-8a91-bc9bbf4aa301 UUID must be used in the Device
+Data Descriptors containing them.
+
+phy-handle
+----------
+For each MAC node, a device property "phy-handle" is used to reference
+the PHY that is registered on an MDIO bus. This is mandatory for
+network interfaces that have PHYs connected to MAC via MDIO bus.
+
+During the MDIO bus driver initialization, PHYs on this bus are probed
+using the _ADR object as shown below and are registered on the MDIO bus.
+
+.. code-block:: none
+
+ Scope(\_SB.MDI0)
+ {
+ Device(PHY1) {
+ Name (_ADR, 0x1)
+ } // end of PHY1
+
+ Device(PHY2) {
+ Name (_ADR, 0x2)
+ } // end of PHY2
+ }
+
+Later, during the MAC driver initialization, the registered PHY devices
+have to be retrieved from the MDIO bus. For this, the MAC driver needs
+references to the previously registered PHYs which are provided
+as device object references (e.g. \_SB.MDI0.PHY1).
+
+phy-mode
+--------
+The "phy-mode" _DSD property is used to describe the connection to
+the PHY. The valid values for "phy-mode" are defined in [4].
+
+managed
+-------
+Optional property, which specifies the PHY management type.
+The valid values for "managed" are defined in [4].
+
+fixed-link
+----------
+The "fixed-link" is described by a data-only subnode of the
+MAC port, which is linked in the _DSD package via
+hierarchical data extension (UUID dbb8e3e6-5886-4ba6-8795-1319f52a966b
+in accordance with [5] "_DSD Implementation Guide" document).
+The subnode should comprise a required property ("speed") and
+possibly the optional ones - complete list of parameters and
+their values are specified in [4].
+
+The following ASL example illustrates the usage of these properties.
+
+DSDT entry for MDIO node
+------------------------
+
+The MDIO bus has an SoC component (MDIO controller) and a platform
+component (PHYs on the MDIO bus).
+
+a) Silicon Component
+This node describes the MDIO controller, MDI0
+---------------------------------------------
+
+.. code-block:: none
+
+ Scope(_SB)
+ {
+ Device(MDI0) {
+ Name(_HID, "NXP0006")
+ Name(_CCA, 1)
+ Name(_UID, 0)
+ Name(_CRS, ResourceTemplate() {
+ Memory32Fixed(ReadWrite, MDI0_BASE, MDI_LEN)
+ Interrupt(ResourceConsumer, Level, ActiveHigh, Shared)
+ {
+ MDI0_IT
+ }
+ }) // end of _CRS for MDI0
+ } // end of MDI0
+ }
+
+b) Platform Component
+The PHY1 and PHY2 nodes represent the PHYs connected to MDIO bus MDI0
+---------------------------------------------------------------------
+
+.. code-block:: none
+
+ Scope(\_SB.MDI0)
+ {
+ Device(PHY1) {
+ Name (_ADR, 0x1)
+ } // end of PHY1
+
+ Device(PHY2) {
+ Name (_ADR, 0x2)
+ } // end of PHY2
+ }
+
+DSDT entries representing MAC nodes
+-----------------------------------
+
+Below are the MAC nodes where PHY nodes are referenced.
+phy-mode and phy-handle are used as explained earlier.
+------------------------------------------------------
+
+.. code-block:: none
+
+ Scope(\_SB.MCE0.PR17)
+ {
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package (2) {"phy-mode", "rgmii-id"},
+ Package (2) {"phy-handle", \_SB.MDI0.PHY1}
+ }
+ })
+ }
+
+ Scope(\_SB.MCE0.PR18)
+ {
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package (2) {"phy-mode", "rgmii-id"},
+ Package (2) {"phy-handle", \_SB.MDI0.PHY2}}
+ }
+ })
+ }
+
+MAC node example where "managed" property is specified.
+-------------------------------------------------------
+
+.. code-block:: none
+
+ Scope(\_SB.PP21.ETH0)
+ {
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () {"phy-mode", "sgmii"},
+ Package () {"managed", "in-band-status"}
+ }
+ })
+ }
+
+MAC node example with a "fixed-link" subnode.
+---------------------------------------------
+
+.. code-block:: none
+
+ Scope(\_SB.PP21.ETH1)
+ {
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () {"phy-mode", "sgmii"},
+ },
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () {"fixed-link", "LNK0"}
+ }
+ })
+ Name (LNK0, Package(){ // Data-only subnode of port
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () {"speed", 1000},
+ Package () {"full-duplex", 1}
+ }
+ })
+ }
+
+References
+==========
+
+[1] Documentation/networking/phy.rst
+
+[2] https://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
+
+[3] Documentation/firmware-guide/acpi/DSD-properties-rules.rst
+
+[4] Documentation/devicetree/bindings/net/ethernet-controller.yaml
+
+[5] https://github.com/UEFI/DSD-Guide/blob/main/dsd-guide.pdf
diff --git a/Documentation/firmware-guide/acpi/enumeration.rst b/Documentation/firmware-guide/acpi/enumeration.rst
index 9f0d5c854fa4..18074eb71860 100644
--- a/Documentation/firmware-guide/acpi/enumeration.rst
+++ b/Documentation/firmware-guide/acpi/enumeration.rst
@@ -339,8 +339,8 @@ a code like this::
There are also devm_* versions of these functions which release the
descriptors once the device is released.
-See Documentation/firmware-guide/acpi/gpio-properties.rst for more information about the
-_DSD binding related to GPIOs.
+See Documentation/firmware-guide/acpi/gpio-properties.rst for more information
+about the _DSD binding related to GPIOs.
MFD devices
===========
@@ -460,7 +460,8 @@ the _DSD of the device object itself or the _DSD of its ancestor in the
Otherwise, the _DSD itself is regarded as invalid and therefore the "compatible"
property returned by it is meaningless.
-Refer to :doc:`DSD-properties-rules` for more information.
+Refer to Documentation/firmware-guide/acpi/DSD-properties-rules.rst for more
+information.
PCI hierarchy representation
============================
diff --git a/Documentation/firmware-guide/acpi/index.rst b/Documentation/firmware-guide/acpi/index.rst
index f72b5f1769fb..a99ee402b212 100644
--- a/Documentation/firmware-guide/acpi/index.rst
+++ b/Documentation/firmware-guide/acpi/index.rst
@@ -11,6 +11,7 @@ ACPI Support
dsd/graph
dsd/data-node-references
dsd/leds
+ dsd/phy
enumeration
osi
method-customizing
diff --git a/Documentation/hwmon/adm1177.rst b/Documentation/hwmon/adm1177.rst
index 471be1e98d6f..1c85a2af92bf 100644
--- a/Documentation/hwmon/adm1177.rst
+++ b/Documentation/hwmon/adm1177.rst
@@ -20,7 +20,8 @@ Usage Notes
-----------
This driver does not auto-detect devices. You will have to instantiate the
-devices explicitly. Please see :doc:`/i2c/instantiating-devices` for details.
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst
+for details.
Sysfs entries
diff --git a/Documentation/hwmon/dps920ab.rst b/Documentation/hwmon/dps920ab.rst
new file mode 100644
index 000000000000..c33b4cdc0a60
--- /dev/null
+++ b/Documentation/hwmon/dps920ab.rst
@@ -0,0 +1,73 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver dps920ab
+========================
+
+Supported chips:
+
+ * Delta DPS920AB
+
+ Prefix: 'dps920ab'
+
+ Addresses scanned: -
+
+Authors:
+ Robert Marko <robert.marko@sartura.hr>
+
+
+Description
+-----------
+
+This driver implements support for Delta DPS920AB 920W 54V DC single output
+power supply with PMBus support.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+
+Sysfs entries
+-------------
+
+======================= ======================================================
+curr1_label "iin"
+curr1_input Measured input current
+curr1_alarm Input current high alarm
+
+curr2_label "iout1"
+curr2_input Measured output current
+curr2_max Maximum output current
+curr2_rated_max Maximum rated output current
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_alarm Input voltage alarm
+
+in2_label "vout1"
+in2_input Measured output voltage
+in2_rated_min Minimum rated output voltage
+in2_rated_max Maximum rated output voltage
+in2_alarm Output voltage alarm
+
+power1_label "pin"
+power1_input Measured input power
+power1_alarm Input power high alarm
+
+power2_label "pout1"
+power2_input Measured output power
+power2_rated_max Maximum rated output power
+
+temp[1-3]_input Measured temperature
+temp[1-3]_alarm Temperature alarm
+
+fan1_alarm Fan 1 warning.
+fan1_fault Fan 1 fault.
+fan1_input Fan 1 speed in RPM.
+======================= ======================================================
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 9ed60fa84cbe..bc01601ea81a 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -53,6 +53,7 @@ Hardware Monitoring Kernel Drivers
da9055
dell-smm-hwmon
dme1737
+ dps920ab
drivetemp
ds1621
ds620
@@ -137,6 +138,7 @@ Hardware Monitoring Kernel Drivers
mcp3021
menf21bmc
mlxreg-fan
+ mp2888
mp2975
nct6683
nct6775
@@ -150,6 +152,7 @@ Hardware Monitoring Kernel Drivers
pc87360
pc87427
pcf8591
+ pim4328
pm6764tr
pmbus
powr1220
@@ -164,6 +167,7 @@ Hardware Monitoring Kernel Drivers
sht15
sht21
sht3x
+ sht4x
shtc1
sis5595
sl28cpld
diff --git a/Documentation/hwmon/ir36021.rst b/Documentation/hwmon/ir36021.rst
index ca3436b04e20..1faa85c39f1b 100644
--- a/Documentation/hwmon/ir36021.rst
+++ b/Documentation/hwmon/ir36021.rst
@@ -19,7 +19,7 @@ Authors:
Description
-----------
-The IR36021 is a dual‐loop digital multi‐phase buck controller designed for
+The IR36021 is a dual-loop digital multi-phase buck controller designed for
point of load applications.
Usage Notes
diff --git a/Documentation/hwmon/lm75.rst b/Documentation/hwmon/lm75.rst
index 81257d5fc48f..8d0ab4ad5fb5 100644
--- a/Documentation/hwmon/lm75.rst
+++ b/Documentation/hwmon/lm75.rst
@@ -93,9 +93,9 @@ Supported chips:
https://www.st.com/resource/en/datasheet/stlm75.pdf
- * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75B, TMP75C, TMP175, TMP275
+ * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75B, TMP75C, TMP175, TMP275, TMP1075
- Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75b', 'tmp75c', 'tmp275'
+ Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75b', 'tmp75c', 'tmp275', 'tmp1075'
Addresses scanned: none
@@ -119,6 +119,8 @@ Supported chips:
https://www.ti.com/product/tmp275
+ https://www.ti.com/product/TMP1075
+
* NXP LM75B, PCT2075
Prefix: 'lm75b', 'pct2075'
diff --git a/Documentation/hwmon/ltc2992.rst b/Documentation/hwmon/ltc2992.rst
index 46aa1aa84a1a..a0bcd867a0f5 100644
--- a/Documentation/hwmon/ltc2992.rst
+++ b/Documentation/hwmon/ltc2992.rst
@@ -19,7 +19,7 @@ This driver supports hardware monitoring for Linear Technology LTC2992 power mon
LTC2992 is a rail-to-rail system monitor that measures current,
voltage, and power of two supplies.
-Two ADCs simultaneously measure each supply’s current. A third ADC monitors
+Two ADCs simultaneously measure each supply's current. A third ADC monitors
the input voltages and four auxiliary external voltages.
diff --git a/Documentation/hwmon/max31790.rst b/Documentation/hwmon/max31790.rst
index f301385d8cef..7b097c3b9b90 100644
--- a/Documentation/hwmon/max31790.rst
+++ b/Documentation/hwmon/max31790.rst
@@ -38,6 +38,7 @@ Sysfs entries
fan[1-12]_input RO fan tachometer speed in RPM
fan[1-12]_fault RO fan experienced fault
fan[1-6]_target RW desired fan speed in RPM
-pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
-pwm[1-6] RW fan target duty cycle (0-255)
+pwm[1-6]_enable RW regulator mode, 0=disabled (duty cycle=0%), 1=manual mode, 2=rpm mode
+pwm[1-6] RW read: current pwm duty cycle,
+ write: target pwm duty cycle (0-255)
================== === =======================================================
diff --git a/Documentation/hwmon/mp2888.rst b/Documentation/hwmon/mp2888.rst
new file mode 100644
index 000000000000..5e578fd7b147
--- /dev/null
+++ b/Documentation/hwmon/mp2888.rst
@@ -0,0 +1,113 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver mp2888
+====================
+
+Supported chips:
+
+ * MPS MP12254
+
+ Prefix: 'mp2888'
+
+Author:
+
+ Vadim Pasternak <vadimp@nvidia.com>
+
+Description
+-----------
+
+This driver implements support for Monolithic Power Systems, Inc. (MPS)
+vendor dual-loop, digital, multi-phase controller MP2888.
+
+This device: supports:
+
+- One power rail.
+- Programmable Multi-Phase up to 10 Phases.
+- PWM-VID Interface
+- One pages 0 for telemetry.
+- Programmable pins for PMBus Address.
+- Built-In EEPROM to Store Custom Configurations.
+
+Device complaint with:
+
+- PMBus rev 1.3 interface.
+
+Device supports direct format for reading output current, output voltage,
+input and output power and temperature.
+Device supports linear format for reading input voltage and input power.
+
+The driver provides the next attributes for the current:
+
+- for current out input and maximum alarm;
+- for phase current: input and label.
+
+The driver exports the following attributes via the 'sysfs' files, where:
+
+- 'n' is number of configured phases (from 1 to 10);
+- index 1 for "iout";
+- indexes 2 ... 1 + n for phases.
+
+**curr[1-{1+n}]_input**
+
+**curr[1-{1+n}]_label**
+
+**curr1_max**
+
+**curr1_max_alarm**
+
+The driver provides the next attributes for the voltage:
+
+- for voltage in: input, low and high critical thresholds, low and high
+ critical alarms;
+- for voltage out: input and high alarm;
+
+The driver exports the following attributes via the 'sysfs' files, where
+
+**in1_crit**
+
+**in1_crit_alarm**
+
+**in1_input**
+
+**in1_label**
+
+**in1_min**
+
+**in1_min_alarm**
+
+**in2_alarm**
+
+**in2_input**
+
+**in2_label**
+
+The driver provides the next attributes for the power:
+
+- for power in alarm and input.
+- for power out: cap, cap alarm an input.
+
+The driver exports the following attributes via the 'sysfs' files, where
+- indexes 1 for "pin";
+- indexes 2 for "pout";
+
+**power1_alarm**
+
+**power1_input**
+
+**power1_label**
+
+**power2_input**
+
+**power2_label**
+
+**power2_max**
+
+**power2_max_alarm**
+
+The driver provides the next attributes for the temperature:
+
+**temp1_input**
+
+**temp1_max**
+
+**temp1_max_alarm**
diff --git a/Documentation/hwmon/pim4328.rst b/Documentation/hwmon/pim4328.rst
new file mode 100644
index 000000000000..70c9e7a6882c
--- /dev/null
+++ b/Documentation/hwmon/pim4328.rst
@@ -0,0 +1,105 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver pim4328
+=====================
+
+Supported chips:
+
+ * Flex PIM4328
+
+ Prefix: 'pim4328', 'bmr455'
+
+ Addresses scanned: -
+
+ Datasheet:
+
+https://flexpowermodules.com/resources/fpm-techspec-pim4328
+
+ * Flex PIM4820
+
+ Prefixes: 'pim4820'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-pim4820
+
+ * Flex PIM4006, PIM4106, PIM4206, PIM4306, PIM4406
+
+ Prefixes: 'pim4006', 'pim4106', 'pim4206', 'pim4306', 'pim4406'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-pim4006
+
+Author: Erik Rosen <erik.rosen@metormote.com>
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for Flex PIM4328 and
+compatible digital power interface modules.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus.rst and Documentation.hwmon/pmbus-core for details
+on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. All attributes are read-only.
+
+======================= ========================================================
+in1_label "vin"
+in1_input Measured input voltage.
+in1_alarm Input voltage alarm.
+
+in2_label "vin.0"
+in2_input Measured input voltage on input A.
+
+ PIM4328 and PIM4X06
+
+in3_label "vin.1"
+in3_input Measured input voltage on input B.
+
+ PIM4328 and PIM4X06
+
+in4_label "vcap"
+in4_input Measured voltage on holdup capacitor.
+
+ PIM4328
+
+curr1_label "iin.0"
+curr1_input Measured input current on input A.
+
+ PIM4X06
+
+curr2_label "iin.1"
+curr2_input Measured input current on input B.
+
+ PIM4X06
+
+currX_label "iout1"
+currX_input Measured output current.
+currX_alarm Output current alarm.
+
+ X is 1 for PIM4820, 3 otherwise.
+
+temp1_input Measured temperature.
+temp1_alarm High temperature alarm.
+======================= ========================================================
diff --git a/Documentation/hwmon/pm6764tr.rst b/Documentation/hwmon/pm6764tr.rst
index a1fb8fea2326..294a8ffc8bd8 100644
--- a/Documentation/hwmon/pm6764tr.rst
+++ b/Documentation/hwmon/pm6764tr.rst
@@ -20,7 +20,7 @@ Description:
------------
This driver supports the STMicroelectronics PM6764TR chip. The PM6764TR is a high
-performance digital controller designed to power Intel’s VR12.5 processors and memories.
+performance digital controller designed to power Intel's VR12.5 processors and memories.
The device utilizes digital technology to implement all control and power management
functions to provide maximum flexibility and performance. The NVM is embedded to store
diff --git a/Documentation/hwmon/pmbus-core.rst b/Documentation/hwmon/pmbus-core.rst
index 73e23ab42cc3..e7e0c9ef10be 100644
--- a/Documentation/hwmon/pmbus-core.rst
+++ b/Documentation/hwmon/pmbus-core.rst
@@ -289,12 +289,22 @@ PMBus driver platform data
==========================
PMBus platform data is defined in include/linux/pmbus.h. Platform data
-currently only provides a flag field with a single bit used::
+currently provides a flags field with four bits used::
- #define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+ #define PMBUS_SKIP_STATUS_CHECK BIT(0)
+
+ #define PMBUS_WRITE_PROTECTED BIT(1)
+
+ #define PMBUS_NO_CAPABILITY BIT(2)
+
+ #define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3)
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
+
+ /* regulator support */
+ int num_regulators;
+ struct regulator_init_data *reg_init_data;
};
@@ -302,8 +312,9 @@ Flags
-----
PMBUS_SKIP_STATUS_CHECK
- During register detection, skip checking the status register for
- communication or command errors.
+
+During register detection, skip checking the status register for
+communication or command errors.
Some PMBus chips respond with valid data when trying to read an unsupported
register. For such chips, checking the status register is mandatory when
@@ -315,3 +326,26 @@ status register must be disabled.
Some i2c controllers do not support single-byte commands (write commands with
no data, i2c_smbus_write_byte()). With such controllers, clearing the status
register is impossible, and the PMBUS_SKIP_STATUS_CHECK flag must be set.
+
+PMBUS_WRITE_PROTECTED
+
+Set if the chip is write protected and write protection is not determined
+by the standard WRITE_PROTECT command.
+
+PMBUS_NO_CAPABILITY
+
+Some PMBus chips don't respond with valid data when reading the CAPABILITY
+register. For such chips, this flag should be set so that the PMBus core
+driver doesn't use CAPABILITY to determine it's behavior.
+
+PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+
+Read the STATUS register after each failed register check.
+
+Some PMBus chips end up in an undefined state when trying to read an
+unsupported register. For such chips, it is necessary to reset the
+chip pmbus controller to a known state after a failed register check.
+This can be done by reading a known register. By setting this flag the
+driver will try to read the STATUS register after each failed
+register check. This read may fail, but it will put the chip into a
+known state.
diff --git a/Documentation/hwmon/pmbus.rst b/Documentation/hwmon/pmbus.rst
index c44f14115413..7ecfec6ca2db 100644
--- a/Documentation/hwmon/pmbus.rst
+++ b/Documentation/hwmon/pmbus.rst
@@ -3,15 +3,18 @@ Kernel driver pmbus
Supported chips:
- * Ericsson BMR453, BMR454
+ * Flex BMR310, BMR453, BMR454, BMR456, BMR457, BMR458, BMR480,
+ BMR490, BMR491, BMR492
- Prefixes: 'bmr453', 'bmr454'
+ Prefixes: 'bmr310', 'bmr453', 'bmr454', 'bmr456', 'bmr457', 'bmr458', 'bmr480',
+ 'bmr490', 'bmr491', 'bmr492'
Addresses scanned: -
- Datasheet:
+ Datasheets:
+
+ https://flexpowermodules.com/products
- http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
* ON Semiconductor ADP4000, NCP4200, NCP4208
diff --git a/Documentation/hwmon/sht4x.rst b/Documentation/hwmon/sht4x.rst
new file mode 100644
index 000000000000..3b37abcd4a46
--- /dev/null
+++ b/Documentation/hwmon/sht4x.rst
@@ -0,0 +1,45 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver sht4x
+===================
+
+Supported Chips:
+
+ * Sensirion SHT4X
+
+ Prefix: 'sht4x'
+
+ Addresses scanned: None
+
+ Datasheet:
+
+ English: https://www.sensirion.com/fileadmin/user_upload/customers/sensirion/Dokumente/2_Humidity_Sensors/Datasheets/Sensirion_Humidity_Sensors_SHT4x_Datasheet.pdf
+
+Author: Navin Sankar Velliangiri <navin@linumiz.com>
+
+
+Description
+-----------
+
+This driver implements support for the Sensirion SHT4x chip, a humidity
+and temperature sensor. Temperature is measured in degree celsius, relative
+humidity is expressed as a percentage. In sysfs interface, all values are
+scaled by 1000, i.e. the value for 31.5 degrees celsius is 31500.
+
+Usage Notes
+-----------
+
+The device communicates with the I2C protocol. Sensors can have the I2C
+address 0x44. See Documentation/i2c/instantiating-devices.rst for methods
+to instantiate the device.
+
+Sysfs entries
+-------------
+
+=============== ============================================
+temp1_input Measured temperature in millidegrees Celcius
+humidity1_input Measured humidity in %H
+update_interval The minimum interval for polling the sensor,
+ in milliseconds. Writable. Must be at least
+ 2000.
+============== =============================================
diff --git a/Documentation/hwmon/zl6100.rst b/Documentation/hwmon/zl6100.rst
index 968aff10ce0a..d42ed9d3ac69 100644
--- a/Documentation/hwmon/zl6100.rst
+++ b/Documentation/hwmon/zl6100.rst
@@ -3,87 +3,103 @@ Kernel driver zl6100
Supported chips:
- * Intersil / Zilker Labs ZL2004
+ * Renesas / Intersil / Zilker Labs ZL2004
Prefix: 'zl2004'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6847.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2004-datasheet.pdf
- * Intersil / Zilker Labs ZL2005
+ * Renesas / Intersil / Zilker Labs ZL2005
Prefix: 'zl2005'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6848.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2005-datasheet.pdf
- * Intersil / Zilker Labs ZL2006
+ * Renesas / Intersil / Zilker Labs ZL2006
Prefix: 'zl2006'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6850.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2006-datasheet.pdf
- * Intersil / Zilker Labs ZL2008
+ * Renesas / Intersil / Zilker Labs ZL2008
Prefix: 'zl2008'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6859.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2008-datasheet.pdf
- * Intersil / Zilker Labs ZL2105
+ * Renesas / Intersil / Zilker Labs ZL2105
Prefix: 'zl2105'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6851.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2105-datasheet.pdf
- * Intersil / Zilker Labs ZL2106
+ * Renesas / Intersil / Zilker Labs ZL2106
Prefix: 'zl2106'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6852.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2106-datasheet.pdf
- * Intersil / Zilker Labs ZL6100
+ * Renesas / Intersil / Zilker Labs ZL6100
Prefix: 'zl6100'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6876.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl6100-datasheet.pdf
- * Intersil / Zilker Labs ZL6105
+ * Renesas / Intersil / Zilker Labs ZL6105
Prefix: 'zl6105'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6906.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl6105-datasheet.pdf
- * Intersil / Zilker Labs ZL9101M
+ * Renesas / Intersil / Zilker Labs ZL8802
+
+ Prefix: 'zl8802'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl8802-datasheet
+
+ * Renesas / Intersil / Zilker Labs ZL9101M
Prefix: 'zl9101'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn7669.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl9101m-datasheet
- * Intersil / Zilker Labs ZL9117M
+ * Renesas / Intersil / Zilker Labs ZL9117M
Prefix: 'zl9117'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn7914.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl9117m-datasheet
+
+ * Renesas / Intersil / Zilker Labs ZLS1003, ZLS4009
+
+ Prefix: 'zls1003', zls4009
+
+ Addresses scanned: -
+
+ Datasheet: Not published
- * Ericsson BMR450, BMR451
+ * Flex BMR450, BMR451
Prefix: 'bmr450', 'bmr451'
@@ -91,17 +107,39 @@ Supported chips:
Datasheet:
-http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
+https://flexpowermodules.com/resources/fpm-techspec-bmr450-digital-pol-regulators-20a
- * Ericsson BMR462, BMR463, BMR464
+ * Flex BMR462, BMR463, BMR464
Prefixes: 'bmr462', 'bmr463', 'bmr464'
Addresses scanned: -
- Datasheet:
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-bmr462
+
+ * Flex BMR465, BMR467
+
+ Prefixes: 'bmr465', 'bmr467'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-bmr465-digital-pol
+
+ * Flex BMR466
+
+ Prefixes: 'bmr466'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-bmr466-8x12
- http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
+ * Flex BMR469
+
+ Prefixes: 'bmr469'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-bmr4696001
Author: Guenter Roeck <linux@roeck-us.net>
@@ -109,8 +147,8 @@ Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------
-This driver supports hardware monitoring for Intersil / Zilker Labs ZL6100 and
-compatible digital DC-DC controllers.
+This driver supports hardware monitoring for Renesas / Intersil / Zilker Labs
+ZL6100 and compatible digital DC-DC controllers.
The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus.rst and Documentation.hwmon/pmbus-core for details
@@ -147,12 +185,12 @@ Module parameters
delay
-----
-Intersil/Zilker Labs DC-DC controllers require a minimum interval between I2C
-bus accesses. According to Intersil, the minimum interval is 2 ms, though 1 ms
-appears to be sufficient and has not caused any problems in testing. The problem
-is known to affect all currently supported chips. For manual override, the
-driver provides a writeable module parameter, 'delay', which can be used to set
-the interval to a value between 0 and 65,535 microseconds.
+Renesas/Intersil/Zilker Labs DC-DC controllers require a minimum interval
+between I2C bus accesses. According to Intersil, the minimum interval is 2 ms,
+though 1 ms appears to be sufficient and has not caused any problems in testing.
+The problem is known to affect all currently supported chips. For manual override,
+the driver provides a writeable module parameter, 'delay', which can be used
+to set the interval to a value between 0 and 65,535 microseconds.
Sysfs entries
@@ -182,24 +220,32 @@ in2_crit Critical maximum VMON/VDRV voltage.
in2_lcrit_alarm VMON/VDRV voltage critical low alarm.
in2_crit_alarm VMON/VDRV voltage critical high alarm.
- vmon attributes are supported on ZL2004, ZL9101M,
- and ZL9117M only.
+ vmon attributes are supported on ZL2004, ZL8802,
+ ZL9101M, ZL9117M and ZLS4009 only.
-inX_label "vout1"
+inX_label "vout[12]"
inX_input Measured output voltage.
inX_lcrit Critical minimum output Voltage.
inX_crit Critical maximum output voltage.
inX_lcrit_alarm Critical output voltage critical low alarm.
inX_crit_alarm Critical output voltage critical high alarm.
- X is 3 for ZL2004, ZL9101M, and ZL9117M, 2 otherwise.
+ X is 3 for ZL2004, ZL9101M, and ZL9117M,
+ 3, 4 for ZL8802 and 2 otherwise.
+
+curr1_label "iin"
+curr1_input Measured input current.
+
+ iin attributes are supported on ZL8802 only
+
+currY_label "iout[12]"
+currY_input Measured output current.
+currY_lcrit Critical minimum output current.
+currY_crit Critical maximum output current.
+currY_lcrit_alarm Output current critical low alarm.
+currY_crit_alarm Output current critical high alarm.
-curr1_label "iout1"
-curr1_input Measured output current.
-curr1_lcrit Critical minimum output current.
-curr1_crit Critical maximum output current.
-curr1_lcrit_alarm Output current critical low alarm.
-curr1_crit_alarm Output current critical high alarm.
+ Y is 2, 3 for ZL8802, 1 otherwise
temp[12]_input Measured temperature.
temp[12]_min Minimum temperature.
diff --git a/Documentation/i2c/instantiating-devices.rst b/Documentation/i2c/instantiating-devices.rst
index e558e0a77e0c..890c9360ce19 100644
--- a/Documentation/i2c/instantiating-devices.rst
+++ b/Documentation/i2c/instantiating-devices.rst
@@ -59,7 +59,7 @@ Declare the I2C devices via ACPI
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ACPI can also describe I2C devices. There is special documentation for this
-which is currently located at :doc:`../firmware-guide/acpi/enumeration`.
+which is currently located at Documentation/firmware-guide/acpi/enumeration.rst.
Declare the I2C devices in board files
diff --git a/Documentation/i2c/old-module-parameters.rst b/Documentation/i2c/old-module-parameters.rst
index 38e55829dee8..b08b6daabce9 100644
--- a/Documentation/i2c/old-module-parameters.rst
+++ b/Documentation/i2c/old-module-parameters.rst
@@ -17,7 +17,8 @@ address), ``force`` (to forcibly attach the driver to a given device) and
With the conversion of the I2C subsystem to the standard device driver
binding model, it became clear that these per-module parameters were no
longer needed, and that a centralized implementation was possible. The new,
-sysfs-based interface is described in :doc:`instantiating-devices`, section
+sysfs-based interface is described in
+Documentation/i2c/instantiating-devices.rst, section
"Method 4: Instantiate from user-space".
Below is a mapping from the old module parameters to the new interface.
diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst
index 64689d19dd51..9e07e6bbe6a3 100644
--- a/Documentation/i2c/smbus-protocol.rst
+++ b/Documentation/i2c/smbus-protocol.rst
@@ -27,8 +27,8 @@ a different protocol operation entirely.
Each transaction type corresponds to a functionality flag. Before calling a
transaction function, a device driver should always check (just once) for
the corresponding functionality flag to ensure that the underlying I2C
-adapter supports the transaction in question. See :doc:`functionality` for
-the details.
+adapter supports the transaction in question. See
+Documentation/i2c/functionality.rst for the details.
Key to symbols
diff --git a/Documentation/input/joydev/joystick-api.rst b/Documentation/input/joydev/joystick-api.rst
index af5934c10c1c..5db6dc6fe1c5 100644
--- a/Documentation/input/joydev/joystick-api.rst
+++ b/Documentation/input/joydev/joystick-api.rst
@@ -263,7 +263,7 @@ possible overrun should the name be too long::
char name[128];
if (ioctl(fd, JSIOCGNAME(sizeof(name)), name) < 0)
- strncpy(name, "Unknown", sizeof(name));
+ strscpy(name, "Unknown", sizeof(name));
printf("Name: %s\n", name);
diff --git a/Documentation/kernel-hacking/hacking.rst b/Documentation/kernel-hacking/hacking.rst
index 451523424942..df65c19aa7df 100644
--- a/Documentation/kernel-hacking/hacking.rst
+++ b/Documentation/kernel-hacking/hacking.rst
@@ -601,7 +601,7 @@ Defined in ``include/linux/export.h``
This is the variant of `EXPORT_SYMBOL()` that allows specifying a symbol
namespace. Symbol Namespaces are documented in
-:doc:`../core-api/symbol-namespaces`
+Documentation/core-api/symbol-namespaces.rst
:c:func:`EXPORT_SYMBOL_NS_GPL()`
--------------------------------
@@ -610,7 +610,7 @@ Defined in ``include/linux/export.h``
This is the variant of `EXPORT_SYMBOL_GPL()` that allows specifying a symbol
namespace. Symbol Namespaces are documented in
-:doc:`../core-api/symbol-namespaces`
+Documentation/core-api/symbol-namespaces.rst
Routines and Conventions
========================
diff --git a/Documentation/locking/lockdep-design.rst b/Documentation/locking/lockdep-design.rst
index 9f3cfca9f8a4..82f36cab61bd 100644
--- a/Documentation/locking/lockdep-design.rst
+++ b/Documentation/locking/lockdep-design.rst
@@ -453,9 +453,9 @@ There are simply four block conditions:
Block condition matrix, Y means the row blocks the column, and N means otherwise.
+---+---+---+---+
- | | E | r | R |
+ | | W | r | R |
+---+---+---+---+
- | E | Y | Y | Y |
+ | W | Y | Y | Y |
+---+---+---+---+
| r | Y | Y | N |
+---+---+---+---+
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 2ccc5644cc98..42576880aa4a 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -290,19 +290,19 @@ round-robin example of distributing packets is shown below:
#define MAX_SOCKS 16
struct {
- __uint(type, BPF_MAP_TYPE_XSKMAP);
- __uint(max_entries, MAX_SOCKS);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, MAX_SOCKS);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
} xsks_map SEC(".maps");
static unsigned int rr;
SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
{
- rr = (rr + 1) & (MAX_SOCKS - 1);
+ rr = (rr + 1) & (MAX_SOCKS - 1);
- return bpf_redirect_map(&xsks_map, rr, XDP_DROP);
+ return bpf_redirect_map(&xsks_map, rr, XDP_DROP);
}
Note, that since there is only a single set of FILL and COMPLETION
@@ -379,7 +379,7 @@ would look like this for the TX path:
.. code-block:: c
if (xsk_ring_prod__needs_wakeup(&my_tx_ring))
- sendto(xsk_socket__fd(xsk_handle), NULL, 0, MSG_DONTWAIT, NULL, 0);
+ sendto(xsk_socket__fd(xsk_handle), NULL, 0, MSG_DONTWAIT, NULL, 0);
I.e., only use the syscall if the flag is set.
@@ -442,9 +442,9 @@ purposes. The supported statistics are shown below:
.. code-block:: c
struct xdp_statistics {
- __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
- __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
- __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
};
XDP_OPTIONS getsockopt
@@ -483,15 +483,15 @@ like this:
.. code-block:: c
// struct xdp_rxtx_ring {
- // __u32 *producer;
- // __u32 *consumer;
- // struct xdp_desc *desc;
+ // __u32 *producer;
+ // __u32 *consumer;
+ // struct xdp_desc *desc;
// };
// struct xdp_umem_ring {
- // __u32 *producer;
- // __u32 *consumer;
- // __u64 *desc;
+ // __u32 *producer;
+ // __u32 *consumer;
+ // __u64 *desc;
// };
// typedef struct xdp_rxtx_ring RING;
diff --git a/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
index 70643b58de05..4118384cf8eb 100644
--- a/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
+++ b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
@@ -27,34 +27,136 @@ these MAP frames and send them to appropriate PDN's.
2. Packet format
================
-a. MAP packet (data / control)
+a. MAP packet v1 (data / control)
-MAP header has the same endianness of the IP packet.
+MAP header fields are in big endian format.
Packet format::
- Bit 0 1 2-7 8 - 15 16 - 31
+ Bit 0 1 2-7 8-15 16-31
Function Command / Data Reserved Pad Multiplexer ID Payload length
- Bit 32 - x
- Function Raw Bytes
+
+ Bit 32-x
+ Function Raw bytes
Command (1)/ Data (0) bit value is to indicate if the packet is a MAP command
-or data packet. Control packet is used for transport level flow control. Data
+or data packet. Command packet is used for transport level flow control. Data
packets are standard IP packets.
-Reserved bits are usually zeroed out and to be ignored by receiver.
+Reserved bits must be zero when sent and ignored when received.
-Padding is number of bytes to be added for 4 byte alignment if required by
-hardware.
+Padding is the number of bytes to be appended to the payload to
+ensure 4 byte alignment.
Multiplexer ID is to indicate the PDN on which data has to be sent.
Payload length includes the padding length but does not include MAP header
length.
-b. MAP packet (command specific)::
+b. Map packet v4 (data / control)
+
+MAP header fields are in big endian format.
+
+Packet format::
+
+ Bit 0 1 2-7 8-15 16-31
+ Function Command / Data Reserved Pad Multiplexer ID Payload length
+
+ Bit 32-(x-33) (x-32)-x
+ Function Raw bytes Checksum offload header
+
+Command (1)/ Data (0) bit value is to indicate if the packet is a MAP command
+or data packet. Command packet is used for transport level flow control. Data
+packets are standard IP packets.
+
+Reserved bits must be zero when sent and ignored when received.
+
+Padding is the number of bytes to be appended to the payload to
+ensure 4 byte alignment.
+
+Multiplexer ID is to indicate the PDN on which data has to be sent.
+
+Payload length includes the padding length but does not include MAP header
+length.
+
+Checksum offload header, has the information about the checksum processing done
+by the hardware.Checksum offload header fields are in big endian format.
+
+Packet format::
+
+ Bit 0-14 15 16-31
+ Function Reserved Valid Checksum start offset
+
+ Bit 31-47 48-64
+ Function Checksum length Checksum value
+
+Reserved bits must be zero when sent and ignored when received.
+
+Valid bit indicates whether the partial checksum is calculated and is valid.
+Set to 1, if its is valid. Set to 0 otherwise.
+
+Padding is the number of bytes to be appended to the payload to
+ensure 4 byte alignment.
+
+Checksum start offset, Indicates the offset in bytes from the beginning of the
+IP header, from which modem computed checksum.
+
+Checksum length is the Length in bytes starting from CKSUM_START_OFFSET,
+over which checksum is computed.
+
+Checksum value, indicates the checksum computed.
+
+c. MAP packet v5 (data / control)
+
+MAP header fields are in big endian format.
+
+Packet format::
+
+ Bit 0 1 2-7 8-15 16-31
+ Function Command / Data Next header Pad Multiplexer ID Payload length
+
+ Bit 32-x
+ Function Raw bytes
+
+Command (1)/ Data (0) bit value is to indicate if the packet is a MAP command
+or data packet. Command packet is used for transport level flow control. Data
+packets are standard IP packets.
+
+Next header is used to indicate the presence of another header, currently is
+limited to checksum header.
+
+Padding is the number of bytes to be appended to the payload to
+ensure 4 byte alignment.
+
+Multiplexer ID is to indicate the PDN on which data has to be sent.
+
+Payload length includes the padding length but does not include MAP header
+length.
+
+d. Checksum offload header v5
+
+Checksum offload header fields are in big endian format.
+
+ Bit 0 - 6 7 8-15 16-31
+ Function Header Type Next Header Checksum Valid Reserved
+
+Header Type is to indicate the type of header, this usually is set to CHECKSUM
+
+Header types
+= ==========================================
+0 Reserved
+1 Reserved
+2 checksum header
+
+Checksum Valid is to indicate whether the header checksum is valid. Value of 1
+implies that checksum is calculated on this packet and is valid, value of 0
+indicates that the calculated packet checksum is invalid.
+
+Reserved bits must be zero when sent and ignored when received.
+
+e. MAP packet v1/v5 (command specific)::
- Bit 0 1 2-7 8 - 15 16 - 31
+ Bit 0 1 2-7 8 - 15 16 - 31
Function Command Reserved Pad Multiplexer ID Payload length
Bit 32 - 39 40 - 45 46 - 47 48 - 63
Function Command name Reserved Command Type Reserved
@@ -74,7 +176,7 @@ Command types
3 is for error during processing of commands
= ==========================================
-c. Aggregation
+f. Aggregation
Aggregation is multiple MAP packets (can be data or command) delivered to
rmnet in a single linear skb. rmnet will process the individual
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index f8c6469f2bd2..01b2a69b0cb0 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -11,12 +11,12 @@ ENA is a networking interface designed to make good use of modern CPU
features and system architectures.
The ENA device exposes a lightweight management interface with a
-minimal set of memory mapped registers and extendable command set
+minimal set of memory mapped registers and extendible command set
through an Admin Queue.
The driver supports a range of ENA devices, is link-speed independent
-(i.e., the same driver is used for 10GbE, 25GbE, 40GbE, etc.), and has
-a negotiated and extendable feature set.
+(i.e., the same driver is used for 10GbE, 25GbE, 40GbE, etc), and has
+a negotiated and extendible feature set.
Some ENA devices support SR-IOV. This driver is used for both the
SR-IOV Physical Function (PF) and Virtual Function (VF) devices.
@@ -27,9 +27,9 @@ is advertised by the device via the Admin Queue), a dedicated MSI-X
interrupt vector per Tx/Rx queue pair, adaptive interrupt moderation,
and CPU cacheline optimized data placement.
-The ENA driver supports industry standard TCP/IP offload features such
-as checksum offload and TCP transmit segmentation offload (TSO).
-Receive-side scaling (RSS) is supported for multi-core scaling.
+The ENA driver supports industry standard TCP/IP offload features such as
+checksum offload. Receive-side scaling (RSS) is supported for multi-core
+scaling.
The ENA driver and its corresponding devices implement health
monitoring mechanisms such as watchdog, enabling the device and driver
@@ -38,22 +38,20 @@ debug logs.
Some of the ENA devices support a working mode called Low-latency
Queue (LLQ), which saves several more microseconds.
-
ENA Source Code Directory Structure
===================================
================= ======================================================
ena_com.[ch] Management communication layer. This layer is
- responsible for the handling all the management
- (admin) communication between the device and the
- driver.
+ responsible for the handling all the management
+ (admin) communication between the device and the
+ driver.
ena_eth_com.[ch] Tx/Rx data path.
ena_admin_defs.h Definition of ENA management interface.
ena_eth_io_defs.h Definition of ENA data path interface.
ena_common_defs.h Common definitions for ena_com layer.
ena_regs_defs.h Definition of ENA PCI memory-mapped (MMIO) registers.
ena_netdev.[ch] Main Linux kernel driver.
-ena_syfsfs.[ch] Sysfs files.
ena_ethtool.c ethtool callbacks.
ena_pci_id_tbl.h Supported device IDs.
================= ======================================================
@@ -69,7 +67,7 @@ ENA management interface is exposed by means of:
- Asynchronous Event Notification Queue (AENQ)
ENA device MMIO Registers are accessed only during driver
-initialization and are not involved in further normal device
+initialization and are not used during further normal device
operation.
AQ is used for submitting management commands, and the
@@ -100,28 +98,27 @@ group may have multiple syndromes, as shown below
The events are:
- ==================== ===============
- Group Syndrome
- ==================== ===============
- Link state change **X**
- Fatal error **X**
- Notification Suspend traffic
- Notification Resume traffic
- Keep-Alive **X**
- ==================== ===============
+==================== ===============
+Group Syndrome
+==================== ===============
+Link state change **X**
+Fatal error **X**
+Notification Suspend traffic
+Notification Resume traffic
+Keep-Alive **X**
+==================== ===============
ACQ and AENQ share the same MSI-X vector.
-Keep-Alive is a special mechanism that allows monitoring of the
-device's health. The driver maintains a watchdog (WD) handler which,
-if fired, logs the current state and statistics then resets and
-restarts the ENA device and driver. A Keep-Alive event is delivered by
-the device every second. The driver re-arms the WD upon reception of a
-Keep-Alive event. A missed Keep-Alive event causes the WD handler to
-fire.
+Keep-Alive is a special mechanism that allows monitoring the device's health.
+A Keep-Alive event is delivered by the device every second.
+The driver maintains a watchdog (WD) handler which logs the current state and
+statistics. If the keep-alive events aren't delivered as expected the WD resets
+the device and the driver.
Data Path Interface
===================
+
I/O operations are based on Tx and Rx Submission Queues (Tx SQ and Rx
SQ correspondingly). Each SQ has a completion queue (CQ) associated
with it.
@@ -131,26 +128,24 @@ physical memory.
The ENA driver supports two Queue Operation modes for Tx SQs:
-- Regular mode
+- **Regular mode:**
+ In this mode the Tx SQs reside in the host's memory. The ENA
+ device fetches the ENA Tx descriptors and packet data from host
+ memory.
- * In this mode the Tx SQs reside in the host's memory. The ENA
- device fetches the ENA Tx descriptors and packet data from host
- memory.
+- **Low Latency Queue (LLQ) mode or "push-mode":**
+ In this mode the driver pushes the transmit descriptors and the
+ first 128 bytes of the packet directly to the ENA device memory
+ space. The rest of the packet payload is fetched by the
+ device. For this operation mode, the driver uses a dedicated PCI
+ device memory BAR, which is mapped with write-combine capability.
-- Low Latency Queue (LLQ) mode or "push-mode".
-
- * In this mode the driver pushes the transmit descriptors and the
- first 128 bytes of the packet directly to the ENA device memory
- space. The rest of the packet payload is fetched by the
- device. For this operation mode, the driver uses a dedicated PCI
- device memory BAR, which is mapped with write-combine capability.
+ **Note that** not all ENA devices support LLQ, and this feature is negotiated
+ with the device upon initialization. If the ENA device does not
+ support LLQ mode, the driver falls back to the regular mode.
The Rx SQs support only the regular mode.
-Note: Not all ENA devices support LLQ, and this feature is negotiated
- with the device upon initialization. If the ENA device does not
- support LLQ mode, the driver falls back to the regular mode.
-
The driver supports multi-queue for both Tx and Rx. This has various
benefits:
@@ -165,6 +160,7 @@ benefits:
Interrupt Modes
===============
+
The driver assigns a single MSI-X vector per queue pair (for both Tx
and Rx directions). The driver assigns an additional dedicated MSI-X vector
for management (for ACQ and AENQ).
@@ -190,20 +186,21 @@ unmasked by the driver after NAPI processing is complete.
Interrupt Moderation
====================
+
ENA driver and device can operate in conventional or adaptive interrupt
moderation mode.
-In conventional mode the driver instructs device to postpone interrupt
+**In conventional mode** the driver instructs device to postpone interrupt
posting according to static interrupt delay value. The interrupt delay
-value can be configured through ethtool(8). The following ethtool
-parameters are supported by the driver: tx-usecs, rx-usecs
+value can be configured through `ethtool(8)`. The following `ethtool`
+parameters are supported by the driver: ``tx-usecs``, ``rx-usecs``
-In adaptive interrupt moderation mode the interrupt delay value is
+**In adaptive interrupt** moderation mode the interrupt delay value is
updated by the driver dynamically and adjusted every NAPI cycle
according to the traffic nature.
-Adaptive coalescing can be switched on/off through ethtool(8)
-adaptive_rx on|off parameter.
+Adaptive coalescing can be switched on/off through `ethtool(8)`'s
+:code:`adaptive_rx on|off` parameter.
More information about Adaptive Interrupt Moderation (DIM) can be found in
Documentation/networking/net_dim.rst
@@ -214,17 +211,10 @@ The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
and can be configured by the ETHTOOL_STUNABLE command of the
SIOCETHTOOL ioctl.
-SKB
-===
-The driver-allocated SKB for frames received from Rx handling using
-NAPI context. The allocation method depends on the size of the packet.
-If the frame length is larger than rx_copybreak, napi_get_frags()
-is used, otherwise netdev_alloc_skb_ip_align() is used, the buffer
-content is copied (by CPU) to the SKB, and the buffer is recycled.
-
Statistics
==========
-The user can obtain ENA device and driver statistics using ethtool.
+
+The user can obtain ENA device and driver statistics using `ethtool`.
The driver can collect regular or extended statistics (including
per-queue stats) from the device.
@@ -232,22 +222,23 @@ In addition the driver logs the stats to syslog upon device reset.
MTU
===
+
The driver supports an arbitrarily large MTU with a maximum that is
negotiated with the device. The driver configures MTU using the
SetFeature command (ENA_ADMIN_MTU property). The user can change MTU
-via ip(8) and similar legacy tools.
+via `ip(8)` and similar legacy tools.
Stateless Offloads
==================
+
The ENA driver supports:
-- TSO over IPv4/IPv6
-- TSO with ECN
- IPv4 header checksum offload
- TCP/UDP over IPv4/IPv6 checksum offloads
RSS
===
+
- The ENA device supports RSS that allows flexible Rx traffic
steering.
- Toeplitz and CRC32 hash functions are supported.
@@ -260,41 +251,42 @@ RSS
function delivered in the Rx CQ descriptor is set in the received
SKB.
- The user can provide a hash key, hash function, and configure the
- indirection table through ethtool(8).
+ indirection table through `ethtool(8)`.
DATA PATH
=========
+
Tx
--
-ena_start_xmit() is called by the stack. This function does the following:
+:code:`ena_start_xmit()` is called by the stack. This function does the following:
-- Maps data buffers (skb->data and frags).
-- Populates ena_buf for the push buffer (if the driver and device are
- in push mode.)
+- Maps data buffers (``skb->data`` and frags).
+- Populates ``ena_buf`` for the push buffer (if the driver and device are
+ in push mode).
- Prepares ENA bufs for the remaining frags.
-- Allocates a new request ID from the empty req_id ring. The request
+- Allocates a new request ID from the empty ``req_id`` ring. The request
ID is the index of the packet in the Tx info. This is used for
- out-of-order TX completions.
+ out-of-order Tx completions.
- Adds the packet to the proper place in the Tx ring.
-- Calls ena_com_prepare_tx(), an ENA communication layer that converts
- the ena_bufs to ENA descriptors (and adds meta ENA descriptors as
- needed.)
+- Calls :code:`ena_com_prepare_tx()`, an ENA communication layer that converts
+ the ``ena_bufs`` to ENA descriptors (and adds meta ENA descriptors as
+ needed).
* This function also copies the ENA descriptors and the push buffer
- to the Device memory space (if in push mode.)
+ to the Device memory space (if in push mode).
-- Writes doorbell to the ENA device.
+- Writes a doorbell to the ENA device.
- When the ENA device finishes sending the packet, a completion
interrupt is raised.
- The interrupt handler schedules NAPI.
-- The ena_clean_tx_irq() function is called. This function handles the
+- The :code:`ena_clean_tx_irq()` function is called. This function handles the
completion descriptors generated by the ENA, with a single
completion descriptor per completed packet.
- * req_id is retrieved from the completion descriptor. The tx_info of
- the packet is retrieved via the req_id. The data buffers are
- unmapped and req_id is returned to the empty req_id ring.
+ * ``req_id`` is retrieved from the completion descriptor. The ``tx_info`` of
+ the packet is retrieved via the ``req_id``. The data buffers are
+ unmapped and ``req_id`` is returned to the empty ``req_id`` ring.
* The function stops when the completion descriptors are completed or
the budget is reached.
@@ -303,12 +295,11 @@ Rx
- When a packet is received from the ENA device.
- The interrupt handler schedules NAPI.
-- The ena_clean_rx_irq() function is called. This function calls
- ena_rx_pkt(), an ENA communication layer function, which returns the
- number of descriptors used for a new unhandled packet, and zero if
+- The :code:`ena_clean_rx_irq()` function is called. This function calls
+ :code:`ena_com_rx_pkt()`, an ENA communication layer function, which returns the
+ number of descriptors used for a new packet, and zero if
no new packet is found.
-- Then it calls the ena_clean_rx_irq() function.
-- ena_eth_rx_skb() checks packet length:
+- :code:`ena_rx_skb()` checks packet length:
* If the packet is small (len < rx_copybreak), the driver allocates
a SKB for the new packet, and copies the packet payload into the
@@ -317,9 +308,10 @@ Rx
- In this way the original data buffer is not passed to the stack
and is reused for future Rx packets.
- * Otherwise the function unmaps the Rx buffer, then allocates the
- new SKB structure and hooks the Rx buffer to the SKB frags.
+ * Otherwise the function unmaps the Rx buffer, sets the first
+ descriptor as `skb`'s linear part and the other descriptors as the
+ `skb`'s frags.
- The new SKB is updated with the necessary information (protocol,
- checksum hw verify result, etc.), and then passed to the network
- stack, using the NAPI interface function napi_gro_receive().
+ checksum hw verify result, etc), and then passed to the network
+ stack, using the NAPI interface function :code:`napi_gro_receive()`.
diff --git a/Documentation/networking/device_drivers/ethernet/google/gve.rst b/Documentation/networking/device_drivers/ethernet/google/gve.rst
index 793693cef6e3..6d73ee78f3d7 100644
--- a/Documentation/networking/device_drivers/ethernet/google/gve.rst
+++ b/Documentation/networking/device_drivers/ethernet/google/gve.rst
@@ -47,13 +47,24 @@ The driver interacts with the device in the following ways:
- Transmit and Receive Queues
- See description below
+Descriptor Formats
+------------------
+GVE supports two descriptor formats: GQI and DQO. These two formats have
+entirely different descriptors, which will be described below.
+
Registers
---------
-All registers are MMIO and big endian.
+All registers are MMIO.
The registers are used for initializing and configuring the device as well as
querying device status in response to management interrupts.
+Endianness
+----------
+- Admin Queue messages and registers are all Big Endian.
+- GQI descriptors and datapath registers are Big Endian.
+- DQO descriptors and datapath registers are Little Endian.
+
Admin Queue (AQ)
----------------
The Admin Queue is a PAGE_SIZE memory block, treated as an array of AQ
@@ -97,10 +108,10 @@ the queues associated with that interrupt.
The handler for these irqs schedule the napi for that block to run
and poll the queues.
-Traffic Queues
---------------
-gVNIC's queues are composed of a descriptor ring and a buffer and are
-assigned to a notification block.
+GQI Traffic Queues
+------------------
+GQI queues are composed of a descriptor ring and a buffer and are assigned to a
+notification block.
The descriptor rings are power-of-two-sized ring buffers consisting of
fixed-size descriptors. They advance their head pointer using a __be32
@@ -121,3 +132,35 @@ Receive
The buffers for receive rings are put into a data ring that is the same
length as the descriptor ring and the head and tail pointers advance over
the rings together.
+
+DQO Traffic Queues
+------------------
+- Every TX and RX queue is assigned a notification block.
+
+- TX and RX buffers queues, which send descriptors to the device, use MMIO
+ doorbells to notify the device of new descriptors.
+
+- RX and TX completion queues, which receive descriptors from the device, use a
+ "generation bit" to know when a descriptor was populated by the device. The
+ driver initializes all bits with the "current generation". The device will
+ populate received descriptors with the "next generation" which is inverted
+ from the current generation. When the ring wraps, the current/next generation
+ are swapped.
+
+- It's the driver's responsibility to ensure that the RX and TX completion
+ queues are not overrun. This can be accomplished by limiting the number of
+ descriptors posted to HW.
+
+- TX packets have a 16 bit completion_tag and RX buffers have a 16 bit
+ buffer_id. These will be returned on the TX completion and RX queues
+ respectively to let the driver know which packet/buffer was completed.
+
+Transmit
+~~~~~~~~
+A packet's buffers are DMA mapped for the device to access before transmission.
+After the packet was successfully transmitted, the buffers are unmapped.
+
+Receive
+~~~~~~~
+The driver posts fixed sized buffers to HW on the RX buffer queue. The packet
+received on the associated RX queue may span multiple descriptors.
diff --git a/Documentation/networking/device_drivers/ethernet/intel/i40e.rst b/Documentation/networking/device_drivers/ethernet/intel/i40e.rst
index 2d3f6bd969a2..ac35bd472bdc 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/i40e.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/i40e.rst
@@ -466,7 +466,7 @@ network. PTP support varies among Intel devices that support this driver. Use
"ethtool -T <netdev name>" to get a definitive list of PTP capabilities
supported by the device.
-IEEE 802.1ad (QinQ) Support
+IEEE 802.1ad (QinQ) Support
---------------------------
The IEEE 802.1ad standard, informally known as QinQ, allows for multiple VLAN
IDs within a single Ethernet frame. VLAN IDs are sometimes referred to as
@@ -523,8 +523,8 @@ of a port's bandwidth (should it be available). The sum of all the values for
Maximum Bandwidth is not restricted, because no more than 100% of a port's
bandwidth can ever be used.
-NOTE: X710/XXV710 devices fail to enable Max VFs (64) when Multiple Functions
-per Port (MFP) and SR-IOV are enabled. An error from i40e is logged that says
+NOTE: X710/XXV710 devices fail to enable Max VFs (64) when Multiple Functions
+per Port (MFP) and SR-IOV are enabled. An error from i40e is logged that says
"add vsi failed for VF N, aq_err 16". To workaround the issue, enable less than
64 virtual functions (VFs).
diff --git a/Documentation/networking/device_drivers/ethernet/intel/iavf.rst b/Documentation/networking/device_drivers/ethernet/intel/iavf.rst
index 25330b7b5168..151af0a8da9c 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/iavf.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/iavf.rst
@@ -113,7 +113,7 @@ which the AVF is associated. The following are base mode features:
- AVF device ID
- HW mailbox is used for VF to PF communications (including on Windows)
-IEEE 802.1ad (QinQ) Support
+IEEE 802.1ad (QinQ) Support
---------------------------
The IEEE 802.1ad standard, informally known as QinQ, allows for multiple VLAN
IDs within a single Ethernet frame. VLAN IDs are sometimes referred to as
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
index 936a10f1942c..ef8cb62e82a1 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
@@ -12,6 +12,7 @@ Contents
- `Enabling the driver and kconfig options`_
- `Devlink info`_
- `Devlink parameters`_
+- `Bridge offload`_
- `mlx5 subfunction`_
- `mlx5 function attributes`_
- `Devlink health reporters`_
@@ -217,6 +218,37 @@ users try to enable them.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+Bridge offload
+==============
+The mlx5 driver implements support for offloading bridge rules when in switchdev
+mode. Linux bridge FDBs are automatically offloaded when mlx5 switchdev
+representor is attached to bridge.
+
+- Change device to switchdev mode::
+
+ $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+
+- Attach mlx5 switchdev representor 'enp8s0f0' to bridge netdev 'bridge1'::
+
+ $ ip link set enp8s0f0 master bridge1
+
+VLANs
+-----
+Following bridge VLAN functions are supported by mlx5:
+
+- VLAN filtering (including multiple VLANs per port)::
+
+ $ ip link set bridge1 type bridge vlan_filtering 1
+ $ bridge vlan add dev enp8s0f0 vid 2-3
+
+- VLAN push on bridge ingress::
+
+ $ bridge vlan add dev enp8s0f0 vid 3 pvid
+
+- VLAN pop on bridge egress::
+
+ $ bridge vlan add dev enp8s0f0 vid 3 untagged
+
mlx5 subfunction
================
mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface.
@@ -568,3 +600,59 @@ tc and eswitch offloads tracepoints:
$ cat /sys/kernel/debug/tracing/trace
...
kworker/u48:7-2221 [009] ...1 1475.387435: mlx5e_rep_neigh_update: netdev: ens1f0 MAC: 24:8a:07:9a:17:9a IPv4: 1.1.1.10 IPv6: ::ffff:1.1.1.10 neigh_connected=1
+
+Bridge offloads tracepoints:
+
+- mlx5_esw_bridge_fdb_entry_init: trace bridge FDB entry offloaded to mlx5::
+
+ $ echo mlx5:mlx5_esw_bridge_fdb_entry_init >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u20:9-2217 [003] ...1 318.582243: mlx5_esw_bridge_fdb_entry_init: net_device=enp8s0f0_0 addr=e4:fd:05:08:00:02 vid=0 flags=0 used=0
+
+- mlx5_esw_bridge_fdb_entry_cleanup: trace bridge FDB entry deleted from mlx5::
+
+ $ echo mlx5:mlx5_esw_bridge_fdb_entry_cleanup >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ ip-2581 [005] ...1 318.629871: mlx5_esw_bridge_fdb_entry_cleanup: net_device=enp8s0f0_1 addr=e4:fd:05:08:00:03 vid=0 flags=0 used=16
+
+- mlx5_esw_bridge_fdb_entry_refresh: trace bridge FDB entry offload refreshed in
+ mlx5::
+
+ $ echo mlx5:mlx5_esw_bridge_fdb_entry_refresh >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ kworker/u20:8-3849 [003] ...1 466716: mlx5_esw_bridge_fdb_entry_refresh: net_device=enp8s0f0_0 addr=e4:fd:05:08:00:02 vid=3 flags=0 used=0
+
+- mlx5_esw_bridge_vlan_create: trace bridge VLAN object add on mlx5
+ representor::
+
+ $ echo mlx5:mlx5_esw_bridge_vlan_create >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ ip-2560 [007] ...1 318.460258: mlx5_esw_bridge_vlan_create: vid=1 flags=6
+
+- mlx5_esw_bridge_vlan_cleanup: trace bridge VLAN object delete from mlx5
+ representor::
+
+ $ echo mlx5:mlx5_esw_bridge_vlan_cleanup >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ bridge-2582 [007] ...1 318.653496: mlx5_esw_bridge_vlan_cleanup: vid=2 flags=8
+
+- mlx5_esw_bridge_vport_init: trace mlx5 vport assigned with bridge upper
+ device::
+
+ $ echo mlx5:mlx5_esw_bridge_vport_init >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ ip-2560 [007] ...1 318.458915: mlx5_esw_bridge_vport_init: vport_num=1
+
+- mlx5_esw_bridge_vport_cleanup: trace mlx5 vport removed from bridge upper
+ device::
+
+ $ echo mlx5:mlx5_esw_bridge_vport_cleanup >> set_event
+ $ cat /sys/kernel/debug/tracing/trace
+ ...
+ ip-5387 [000] ...1 573713: mlx5_esw_bridge_vport_cleanup: vport_num=1
diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst
index d8279de7bf25..3a5a1d46e77e 100644
--- a/Documentation/networking/device_drivers/index.rst
+++ b/Documentation/networking/device_drivers/index.rst
@@ -18,6 +18,7 @@ Contents:
qlogic/index
wan/index
wifi/index
+ wwan/index
.. only:: subproject and html
diff --git a/Documentation/networking/device_drivers/wwan/index.rst b/Documentation/networking/device_drivers/wwan/index.rst
new file mode 100644
index 000000000000..1cb8c7371401
--- /dev/null
+++ b/Documentation/networking/device_drivers/wwan/index.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+WWAN Device Drivers
+===================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ iosm
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/wwan/iosm.rst b/Documentation/networking/device_drivers/wwan/iosm.rst
new file mode 100644
index 000000000000..aceb0223eb46
--- /dev/null
+++ b/Documentation/networking/device_drivers/wwan/iosm.rst
@@ -0,0 +1,96 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+.. Copyright (C) 2020-21 Intel Corporation
+
+.. _iosm_driver_doc:
+
+===========================================
+IOSM Driver for Intel M.2 PCIe based Modems
+===========================================
+The IOSM (IPC over Shared Memory) driver is a WWAN PCIe host driver developed
+for linux or chrome platform for data exchange over PCIe interface between
+Host platform & Intel M.2 Modem. The driver exposes interface conforming to the
+MBIM protocol [1]. Any front end application ( eg: Modem Manager) could easily
+manage the MBIM interface to enable data communication towards WWAN.
+
+Basic usage
+===========
+MBIM functions are inactive when unmanaged. The IOSM driver only provides a
+userspace interface MBIM "WWAN PORT" representing MBIM control channel and does
+not play any role in managing the functionality. It is the job of a userspace
+application to detect port enumeration and enable MBIM functionality.
+
+Examples of few such userspace application are:
+- mbimcli (included with the libmbim [2] library), and
+- Modem Manager [3]
+
+Management Applications to carry out below required actions for establishing
+MBIM IP session:
+- open the MBIM control channel
+- configure network connection settings
+- connect to network
+- configure IP network interface
+
+Management application development
+==================================
+The driver and userspace interfaces are described below. The MBIM protocol is
+described in [1] Mobile Broadband Interface Model v1.0 Errata-1.
+
+MBIM control channel userspace ABI
+----------------------------------
+
+/dev/wwan0mbim0 character device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The driver exposes an MBIM interface to the MBIM function by implementing
+MBIM WWAN Port. The userspace end of the control channel pipe is a
+/dev/wwan0mbim0 character device. Application shall use this interface for
+MBIM protocol communication.
+
+Fragmentation
+~~~~~~~~~~~~~
+The userspace application is responsible for all control message fragmentation
+and defragmentation as per MBIM specification.
+
+/dev/wwan0mbim0 write()
+~~~~~~~~~~~~~~~~~~~~~~~
+The MBIM control messages from the management application must not exceed the
+negotiated control message size.
+
+/dev/wwan0mbim0 read()
+~~~~~~~~~~~~~~~~~~~~~~
+The management application must accept control messages of up the negotiated
+control message size.
+
+MBIM data channel userspace ABI
+-------------------------------
+
+wwan0-X network device
+~~~~~~~~~~~~~~~~~~~~~~
+The IOSM driver exposes IP link interface "wwan0-X" of type "wwan" for IP
+traffic. Iproute network utility is used for creating "wwan0-X" network
+interface and for associating it with MBIM IP session. The Driver supports
+upto 8 IP sessions for simultaneous IP communication.
+
+The userspace management application is responsible for creating new IP link
+prior to establishing MBIM IP session where the SessionId is greater than 0.
+
+For example, creating new IP link for a MBIM IP session with SessionId 1:
+
+ ip link add dev wwan0-1 parentdev-name wwan0 type wwan linkid 1
+
+The driver will automatically map the "wwan0-1" network device to MBIM IP
+session 1.
+
+References
+==========
+[1] "MBIM (Mobile Broadband Interface Model) Errata-1"
+ - https://www.usb.org/document-library/
+
+[2] libmbim - "a glib-based library for talking to WWAN modems and
+ devices which speak the Mobile Interface Broadband Model (MBIM)
+ protocol"
+ - http://www.freedesktop.org/wiki/Software/libmbim/
+
+[3] Modem Manager - "a DBus-activated daemon which controls mobile
+ broadband (2G/3G/4G) devices and connections"
+ - http://www.freedesktop.org/wiki/Software/ModemManager/
diff --git a/Documentation/networking/devlink/devlink-port.rst b/Documentation/networking/devlink/devlink-port.rst
index ab790e7980b8..7627b1da01f2 100644
--- a/Documentation/networking/devlink/devlink-port.rst
+++ b/Documentation/networking/devlink/devlink-port.rst
@@ -164,6 +164,41 @@ device to instantiate the subfunction device on particular PCI function.
A subfunction device is created on the :ref:`Documentation/driver-api/auxiliary_bus.rst <auxiliary_bus>`.
At this point a matching subfunction driver binds to the subfunction's auxiliary device.
+Rate object management
+======================
+
+Devlink provides API to manage tx rates of single devlink port or a group.
+This is done through rate objects, which can be one of the two types:
+
+``leaf``
+ Represents a single devlink port; created/destroyed by the driver. Since leaf
+ have 1to1 mapping to its devlink port, in user space it is referred as
+ ``pci/<bus_addr>/<port_index>``;
+
+``node``
+ Represents a group of rate objects (leafs and/or nodes); created/deleted by
+ request from the userspace; initially empty (no rate objects added). In
+ userspace it is referred as ``pci/<bus_addr>/<node_name>``, where
+ ``node_name`` can be any identifier, except decimal number, to avoid
+ collisions with leafs.
+
+API allows to configure following rate object's parameters:
+
+``tx_share``
+ Minimum TX rate value shared among all other rate objects, or rate objects
+ that parts of the parent group, if it is a part of the same group.
+
+``tx_max``
+ Maximum TX rate value.
+
+``parent``
+ Parent node name. Parent node rate limits are considered as additional limits
+ to all node children limits. ``tx_max`` is an upper limit for children.
+ ``tx_share`` is a total bandwidth distributed among children.
+
+Driver implementations are allowed to support both or either rate object types
+and setting methods of their parameters.
+
Terms and Definitions
=====================
diff --git a/Documentation/networking/devlink/devlink-region.rst b/Documentation/networking/devlink/devlink-region.rst
index 3654c3e9658f..58fe95e9a49d 100644
--- a/Documentation/networking/devlink/devlink-region.rst
+++ b/Documentation/networking/devlink/devlink-region.rst
@@ -22,7 +22,7 @@ The major benefit to creating a region is to provide access to internal
address regions that are otherwise inaccessible to the user.
Regions may also be used to provide an additional way to debug complex error
-states, but see also :doc:`devlink-health`
+states, but see also Documentation/networking/devlink/devlink-health.rst
Regions may optionally support capturing a snapshot on demand via the
``DEVLINK_CMD_REGION_NEW`` netlink message. A driver wishing to allow
diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst
index 935b6397e8cf..90d1381b88de 100644
--- a/Documentation/networking/devlink/devlink-trap.rst
+++ b/Documentation/networking/devlink/devlink-trap.rst
@@ -495,8 +495,9 @@ help debug packet drops caused by these exceptions. The following list includes
links to the description of driver-specific traps registered by various device
drivers:
- * :doc:`netdevsim`
- * :doc:`mlxsw`
+ * Documentation/networking/devlink/netdevsim.rst
+ * Documentation/networking/devlink/mlxsw.rst
+ * Documentation/networking/devlink/prestera.rst
.. _Generic-Packet-Trap-Groups:
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index 8428a1220723..b3b9e0692088 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -46,3 +46,4 @@ parameters, info versions, and other features it supports.
qed
ti-cpsw-switch
am65-nuss-cpsw-switch
+ prestera
diff --git a/Documentation/networking/devlink/netdevsim.rst b/Documentation/networking/devlink/netdevsim.rst
index 02c2d20dc673..8a292fb5aaea 100644
--- a/Documentation/networking/devlink/netdevsim.rst
+++ b/Documentation/networking/devlink/netdevsim.rst
@@ -57,6 +57,32 @@ entries, FIB rule entries and nexthops that the driver will allow.
$ devlink resource set netdevsim/netdevsim0 path /nexthops size 16
$ devlink dev reload netdevsim/netdevsim0
+Rate objects
+============
+
+The ``netdevsim`` driver supports rate objects management, which includes:
+
+- registerging/unregistering leaf rate objects per VF devlink port;
+- creation/deletion node rate objects;
+- setting tx_share and tx_max rate values for any rate object type;
+- setting parent node for any rate object type.
+
+Rate nodes and it's parameters are exposed in ``netdevsim`` debugfs in RO mode.
+For example created rate node with name ``some_group``:
+
+.. code:: shell
+
+ $ ls /sys/kernel/debug/netdevsim/netdevsim0/rate_groups/some_group
+ rate_parent tx_max tx_share
+
+Same parameters are exposed for leaf objects in corresponding ports directories.
+For ex.:
+
+.. code:: shell
+
+ $ ls /sys/kernel/debug/netdevsim/netdevsim0/ports/1
+ dev ethtool rate_parent tx_max tx_share
+
Driver-specific Traps
=====================
diff --git a/Documentation/networking/devlink/prestera.rst b/Documentation/networking/devlink/prestera.rst
new file mode 100644
index 000000000000..49409d1d3081
--- /dev/null
+++ b/Documentation/networking/devlink/prestera.rst
@@ -0,0 +1,141 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================
+prestera devlink support
+========================
+
+This document describes the devlink features implemented by the ``prestera``
+device driver.
+
+Driver-specific Traps
+=====================
+
+.. list-table:: List of Driver-specific Traps Registered by ``prestera``
+ :widths: 5 5 90
+
+ * - Name
+ - Type
+ - Description
+.. list-table:: List of Driver-specific Traps Registered by ``prestera``
+ :widths: 5 5 90
+
+ * - Name
+ - Type
+ - Description
+ * - ``arp_bc``
+ - ``trap``
+ - Traps ARP broadcast packets (both requests/responses)
+ * - ``is_is``
+ - ``trap``
+ - Traps IS-IS packets
+ * - ``ospf``
+ - ``trap``
+ - Traps OSPF packets
+ * - ``ip_bc_mac``
+ - ``trap``
+ - Traps IPv4 packets with broadcast DA Mac address
+ * - ``stp``
+ - ``trap``
+ - Traps STP BPDU
+ * - ``lacp``
+ - ``trap``
+ - Traps LACP packets
+ * - ``lldp``
+ - ``trap``
+ - Traps LLDP packets
+ * - ``router_mc``
+ - ``trap``
+ - Traps multicast packets
+ * - ``vrrp``
+ - ``trap``
+ - Traps VRRP packets
+ * - ``dhcp``
+ - ``trap``
+ - Traps DHCP packets
+ * - ``mtu_error``
+ - ``trap``
+ - Traps (exception) packets that exceeded port's MTU
+ * - ``mac_to_me``
+ - ``trap``
+ - Traps packets with switch-port's DA Mac address
+ * - ``ttl_error``
+ - ``trap``
+ - Traps (exception) IPv4 packets whose TTL exceeded
+ * - ``ipv4_options``
+ - ``trap``
+ - Traps (exception) packets due to the malformed IPV4 header options
+ * - ``ip_default_route``
+ - ``trap``
+ - Traps packets that have no specific IP interface (IP to me) and no forwarding prefix
+ * - ``local_route``
+ - ``trap``
+ - Traps packets that have been send to one of switch IP interfaces addresses
+ * - ``ipv4_icmp_redirect``
+ - ``trap``
+ - Traps (exception) IPV4 ICMP redirect packets
+ * - ``arp_response``
+ - ``trap``
+ - Traps ARP replies packets that have switch-port's DA Mac address
+ * - ``acl_code_0``
+ - ``trap``
+ - Traps packets that have ACL priority set to 0 (tc pref 0)
+ * - ``acl_code_1``
+ - ``trap``
+ - Traps packets that have ACL priority set to 1 (tc pref 1)
+ * - ``acl_code_2``
+ - ``trap``
+ - Traps packets that have ACL priority set to 2 (tc pref 2)
+ * - ``acl_code_3``
+ - ``trap``
+ - Traps packets that have ACL priority set to 3 (tc pref 3)
+ * - ``acl_code_4``
+ - ``trap``
+ - Traps packets that have ACL priority set to 4 (tc pref 4)
+ * - ``acl_code_5``
+ - ``trap``
+ - Traps packets that have ACL priority set to 5 (tc pref 5)
+ * - ``acl_code_6``
+ - ``trap``
+ - Traps packets that have ACL priority set to 6 (tc pref 6)
+ * - ``acl_code_7``
+ - ``trap``
+ - Traps packets that have ACL priority set to 7 (tc pref 7)
+ * - ``ipv4_bgp``
+ - ``trap``
+ - Traps IPv4 BGP packets
+ * - ``ssh``
+ - ``trap``
+ - Traps SSH packets
+ * - ``telnet``
+ - ``trap``
+ - Traps Telnet packets
+ * - ``icmp``
+ - ``trap``
+ - Traps ICMP packets
+ * - ``rxdma_drop``
+ - ``drop``
+ - Drops packets (RxDMA) due to the lack of ingress buffers etc.
+ * - ``port_no_vlan``
+ - ``drop``
+ - Drops packets due to faulty-configured network or due to internal bug (config issue).
+ * - ``local_port``
+ - ``drop``
+ - Drops packets whose decision (FDB entry) is to bridge packet back to the incoming port/trunk.
+ * - ``invalid_sa``
+ - ``drop``
+ - Drops packets with multicast source MAC address.
+ * - ``illegal_ip_addr``
+ - ``drop``
+ - Drops packets with illegal SIP/DIP multicast/unicast addresses.
+ * - ``illegal_ipv4_hdr``
+ - ``drop``
+ - Drops packets with illegal IPV4 header.
+ * - ``ip_uc_dip_da_mismatch``
+ - ``drop``
+ - Drops packets with destination MAC being unicast, but destination IP address being multicast.
+ * - ``ip_sip_is_zero``
+ - ``drop``
+ - Drops packets with zero (0) IPV4 source address.
+ * - ``met_red``
+ - ``drop``
+ - Drops non-conforming packets (dropped by Ingress policer, metering drop), e.g. packet rate exceeded configured bandwith.
diff --git a/Documentation/networking/dsa/configuration.rst b/Documentation/networking/dsa/configuration.rst
index 774f0e76c746..2b08f1a772d3 100644
--- a/Documentation/networking/dsa/configuration.rst
+++ b/Documentation/networking/dsa/configuration.rst
@@ -292,3 +292,71 @@ configuration.
# bring up the bridge devices
ip link set br0 up
+
+Forwarding database (FDB) management
+------------------------------------
+
+The existing DSA switches do not have the necessary hardware support to keep
+the software FDB of the bridge in sync with the hardware tables, so the two
+tables are managed separately (``bridge fdb show`` queries both, and depending
+on whether the ``self`` or ``master`` flags are being used, a ``bridge fdb
+add`` or ``bridge fdb del`` command acts upon entries from one or both tables).
+
+Up until kernel v4.14, DSA only supported user space management of bridge FDB
+entries using the bridge bypass operations (which do not update the software
+FDB, just the hardware one) using the ``self`` flag (which is optional and can
+be omitted).
+
+ .. code-block:: sh
+
+ bridge fdb add dev swp0 00:01:02:03:04:05 self static
+ # or shorthand
+ bridge fdb add dev swp0 00:01:02:03:04:05 static
+
+Due to a bug, the bridge bypass FDB implementation provided by DSA did not
+distinguish between ``static`` and ``local`` FDB entries (``static`` are meant
+to be forwarded, while ``local`` are meant to be locally terminated, i.e. sent
+to the host port). Instead, all FDB entries with the ``self`` flag (implicit or
+explicit) are treated by DSA as ``static`` even if they are ``local``.
+
+ .. code-block:: sh
+
+ # This command:
+ bridge fdb add dev swp0 00:01:02:03:04:05 static
+ # behaves the same for DSA as this command:
+ bridge fdb add dev swp0 00:01:02:03:04:05 local
+ # or shorthand, because the 'local' flag is implicit if 'static' is not
+ # specified, it also behaves the same as:
+ bridge fdb add dev swp0 00:01:02:03:04:05
+
+The last command is an incorrect way of adding a static bridge FDB entry to a
+DSA switch using the bridge bypass operations, and works by mistake. Other
+drivers will treat an FDB entry added by the same command as ``local`` and as
+such, will not forward it, as opposed to DSA.
+
+Between kernel v4.14 and v5.14, DSA has supported in parallel two modes of
+adding a bridge FDB entry to the switch: the bridge bypass discussed above, as
+well as a new mode using the ``master`` flag which installs FDB entries in the
+software bridge too.
+
+ .. code-block:: sh
+
+ bridge fdb add dev swp0 00:01:02:03:04:05 master static
+
+Since kernel v5.14, DSA has gained stronger integration with the bridge's
+software FDB, and the support for its bridge bypass FDB implementation (using
+the ``self`` flag) has been removed. This results in the following changes:
+
+ .. code-block:: sh
+
+ # This is the only valid way of adding an FDB entry that is supported,
+ # compatible with v4.14 kernels and later:
+ bridge fdb add dev swp0 00:01:02:03:04:05 master static
+ # This command is no longer buggy and the entry is properly treated as
+ # 'local' instead of being forwarded:
+ bridge fdb add dev swp0 00:01:02:03:04:05
+ # This command no longer installs a static FDB entry to hardware:
+ bridge fdb add dev swp0 00:01:02:03:04:05 static
+
+Script writers are therefore encouraged to use the ``master static`` set of
+flags when working with bridge FDB entries on DSA switch interfaces.
diff --git a/Documentation/networking/dsa/dsa.rst b/Documentation/networking/dsa/dsa.rst
index 8688009514cc..20baacf2bc5c 100644
--- a/Documentation/networking/dsa/dsa.rst
+++ b/Documentation/networking/dsa/dsa.rst
@@ -93,14 +93,15 @@ A tagging protocol may tag all packets with switch tags of the same length, or
the tag length might vary (for example packets with PTP timestamps might
require an extended switch tag, or there might be one tag length on TX and a
different one on RX). Either way, the tagging protocol driver must populate the
-``struct dsa_device_ops::overhead`` with the length in octets of the longest
-switch frame header. The DSA framework will automatically adjust the MTU of the
-master interface to accomodate for this extra size in order for DSA user ports
-to support the standard MTU (L2 payload length) of 1500 octets. The ``overhead``
-is also used to request from the network stack, on a best-effort basis, the
-allocation of packets with a ``needed_headroom`` or ``needed_tailroom``
-sufficient such that the act of pushing the switch tag on transmission of a
-packet does not cause it to reallocate due to lack of memory.
+``struct dsa_device_ops::needed_headroom`` and/or ``struct dsa_device_ops::needed_tailroom``
+with the length in octets of the longest switch frame header/trailer. The DSA
+framework will automatically adjust the MTU of the master interface to
+accommodate for this extra size in order for DSA user ports to support the
+standard MTU (L2 payload length) of 1500 octets. The ``needed_headroom`` and
+``needed_tailroom`` properties are also used to request from the network stack,
+on a best-effort basis, the allocation of packets with enough extra space such
+that the act of pushing the switch tag on transmission of a packet does not
+cause it to reallocate due to lack of memory.
Even though applications are not expected to parse DSA-specific frame headers,
the format on the wire of the tagging protocol represents an Application Binary
@@ -169,8 +170,8 @@ The job of this method is to prepare the skb in a way that the switch will
understand what egress port the packet is for (and not deliver it towards other
ports). Typically this is fulfilled by pushing a frame header. Checking for
insufficient size in the skb headroom or tailroom is unnecessary provided that
-the ``overhead`` and ``tail_tag`` properties were filled out properly, because
-DSA ensures there is enough space before calling this method.
+the ``needed_headroom`` and ``needed_tailroom`` properties were filled out
+properly, because DSA ensures there is enough space before calling this method.
The reception of a packet goes through the tagger's ``rcv`` function. The
passed ``struct sk_buff *skb`` has ``skb->data`` pointing at
diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst
index 7395a33baaf9..da4057ba37f1 100644
--- a/Documentation/networking/dsa/sja1105.rst
+++ b/Documentation/networking/dsa/sja1105.rst
@@ -5,7 +5,7 @@ NXP SJA1105 switch driver
Overview
========
-The NXP SJA1105 is a family of 6 devices:
+The NXP SJA1105 is a family of 10 SPI-managed automotive switches:
- SJA1105E: First generation, no TTEthernet
- SJA1105T: First generation, TTEthernet
@@ -13,9 +13,11 @@ The NXP SJA1105 is a family of 6 devices:
- SJA1105Q: Second generation, TTEthernet, no SGMII
- SJA1105R: Second generation, no TTEthernet, SGMII
- SJA1105S: Second generation, TTEthernet, SGMII
-
-These are SPI-managed automotive switches, with all ports being gigabit
-capable, and supporting MII/RMII/RGMII and optionally SGMII on one port.
+- SJA1110A: Third generation, TTEthernet, SGMII, integrated 100base-T1 and
+ 100base-TX PHYs
+- SJA1110B: Third generation, TTEthernet, SGMII, 100base-T1, 100base-TX
+- SJA1110C: Third generation, TTEthernet, SGMII, 100base-T1, 100base-TX
+- SJA1110D: Third generation, TTEthernet, SGMII, 100base-T1
Being automotive parts, their configuration interface is geared towards
set-and-forget use, with minimal dynamic interaction at runtime. They
@@ -579,3 +581,54 @@ A board would need to hook up the PHYs connected to the switch to any other
MDIO bus available to Linux within the system (e.g. to the DSA master's MDIO
bus). Link state management then works by the driver manually keeping in sync
(over SPI commands) the MAC link speed with the settings negotiated by the PHY.
+
+By comparison, the SJA1110 supports an MDIO slave access point over which its
+internal 100base-T1 PHYs can be accessed from the host. This is, however, not
+used by the driver, instead the internal 100base-T1 and 100base-TX PHYs are
+accessed through SPI commands, modeled in Linux as virtual MDIO buses.
+
+The microcontroller attached to the SJA1110 port 0 also has an MDIO controller
+operating in master mode, however the driver does not support this either,
+since the microcontroller gets disabled when the Linux driver operates.
+Discrete PHYs connected to the switch ports should have their MDIO interface
+attached to an MDIO controller from the host system and not to the switch,
+similar to SJA1105.
+
+Port compatibility matrix
+-------------------------
+
+The SJA1105 port compatibility matrix is:
+
+===== ============== ============== ==============
+Port SJA1105E/T SJA1105P/Q SJA1105R/S
+===== ============== ============== ==============
+0 xMII xMII xMII
+1 xMII xMII xMII
+2 xMII xMII xMII
+3 xMII xMII xMII
+4 xMII xMII SGMII
+===== ============== ============== ==============
+
+
+The SJA1110 port compatibility matrix is:
+
+===== ============== ============== ============== ==============
+Port SJA1110A SJA1110B SJA1110C SJA1110D
+===== ============== ============== ============== ==============
+0 RevMII (uC) RevMII (uC) RevMII (uC) RevMII (uC)
+1 100base-TX 100base-TX 100base-TX
+ or SGMII SGMII
+2 xMII xMII xMII xMII
+ or SGMII or SGMII
+3 xMII xMII xMII
+ or SGMII or SGMII SGMII
+ or 2500base-X or 2500base-X or 2500base-X
+4 SGMII SGMII SGMII SGMII
+ or 2500base-X or 2500base-X or 2500base-X or 2500base-X
+5 100base-T1 100base-T1 100base-T1 100base-T1
+6 100base-T1 100base-T1 100base-T1 100base-T1
+7 100base-T1 100base-T1 100base-T1 100base-T1
+8 100base-T1 100base-T1 n/a n/a
+9 100base-T1 100base-T1 n/a n/a
+10 100base-T1 n/a n/a n/a
+===== ============== ============== ============== ==============
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 25131df3c2bd..6ea91e41593f 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -1363,8 +1363,8 @@ in an implementation specific way.
``ETHTOOL_A_FEC_AUTO`` requests the driver to choose FEC mode based on SFP
module parameters. This does not mean autonegotiation.
-MODULE_EEPROM
-=============
+MODULE_EEPROM_GET
+=================
Fetch module EEPROM data dump.
This interface is designed to allow dumps of at most 1/2 page at once. This
@@ -1383,12 +1383,14 @@ Request contents:
``ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS`` u8 page I2C address
======================================= ====== ==========================
+If ``ETHTOOL_A_MODULE_EEPROM_BANK`` is not specified, bank 0 is assumed.
+
Kernel response contents:
+---------------------------------------------+--------+---------------------+
| ``ETHTOOL_A_MODULE_EEPROM_HEADER`` | nested | reply header |
+---------------------------------------------+--------+---------------------+
- | ``ETHTOOL_A_MODULE_EEPROM_DATA`` | nested | array of bytes from |
+ | ``ETHTOOL_A_MODULE_EEPROM_DATA`` | binary | array of bytes from |
| | | module EEPROM |
+---------------------------------------------+--------+---------------------+
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index c2ecc9894fd0..b3fa522e4cd9 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -99,6 +99,35 @@ fib_multipath_hash_policy - INTEGER
- 0 - Layer 3
- 1 - Layer 4
- 2 - Layer 3 or inner Layer 3 if present
+ - 3 - Custom multipath hash. Fields used for multipath hash calculation
+ are determined by fib_multipath_hash_fields sysctl
+
+fib_multipath_hash_fields - UNSIGNED INTEGER
+ When fib_multipath_hash_policy is set to 3 (custom multipath hash), the
+ fields used for multipath hash calculation are determined by this
+ sysctl.
+
+ This value is a bitmask which enables various fields for multipath hash
+ calculation.
+
+ Possible fields are:
+
+ ====== ============================
+ 0x0001 Source IP address
+ 0x0002 Destination IP address
+ 0x0004 IP protocol
+ 0x0008 Unused (Flow Label)
+ 0x0010 Source port
+ 0x0020 Destination port
+ 0x0040 Inner source IP address
+ 0x0080 Inner destination IP address
+ 0x0100 Inner IP protocol
+ 0x0200 Inner Flow Label
+ 0x0400 Inner source port
+ 0x0800 Inner destination port
+ ====== ============================
+
+ Default: 0x0007 (source IP, destination IP and IP protocol)
fib_sync_mem - UNSIGNED INTEGER
Amount of dirty memory from fib entries that can be backlogged before
@@ -732,6 +761,31 @@ tcp_syncookies - INTEGER
network connections you can set this knob to 2 to enable
unconditionally generation of syncookies.
+tcp_migrate_req - BOOLEAN
+ The incoming connection is tied to a specific listening socket when
+ the initial SYN packet is received during the three-way handshake.
+ When a listener is closed, in-flight request sockets during the
+ handshake and established sockets in the accept queue are aborted.
+
+ If the listener has SO_REUSEPORT enabled, other listeners on the
+ same port should have been able to accept such connections. This
+ option makes it possible to migrate such child sockets to another
+ listener after close() or shutdown().
+
+ The BPF_SK_REUSEPORT_SELECT_OR_MIGRATE type of eBPF program should
+ usually be used to define the policy to pick an alive listener.
+ Otherwise, the kernel will randomly pick an alive listener only if
+ this option is enabled.
+
+ Note that migration between listeners with different settings may
+ crash applications. Let's say migration happens from listener A to
+ B, and only B has TCP_SAVE_SYN enabled. B cannot read SYN data from
+ the requests migrated from A. To avoid such a situation, cancel
+ migration by returning SK_DROP in the type of eBPF program, or
+ disable this option.
+
+ Default: 0
+
tcp_fastopen - INTEGER
Enable TCP Fast Open (RFC7413) to send and accept data in the opening
SYN packet.
@@ -1743,6 +1797,35 @@ fib_multipath_hash_policy - INTEGER
- 0 - Layer 3 (source and destination addresses plus flow label)
- 1 - Layer 4 (standard 5-tuple)
- 2 - Layer 3 or inner Layer 3 if present
+ - 3 - Custom multipath hash. Fields used for multipath hash calculation
+ are determined by fib_multipath_hash_fields sysctl
+
+fib_multipath_hash_fields - UNSIGNED INTEGER
+ When fib_multipath_hash_policy is set to 3 (custom multipath hash), the
+ fields used for multipath hash calculation are determined by this
+ sysctl.
+
+ This value is a bitmask which enables various fields for multipath hash
+ calculation.
+
+ Possible fields are:
+
+ ====== ============================
+ 0x0001 Source IP address
+ 0x0002 Destination IP address
+ 0x0004 IP protocol
+ 0x0008 Flow Label
+ 0x0010 Source port
+ 0x0020 Destination port
+ 0x0040 Inner source IP address
+ 0x0080 Inner destination IP address
+ 0x0100 Inner IP protocol
+ 0x0200 Inner Flow Label
+ 0x0400 Inner source port
+ 0x0800 Inner destination port
+ ====== ============================
+
+ Default: 0x0007 (source IP, destination IP and IP protocol)
anycast_src_echo_reply - BOOLEAN
Controls the use of anycast addresses as source addresses for ICMPv6
@@ -2751,6 +2834,18 @@ encap_port - INTEGER
Default: 0
+plpmtud_probe_interval - INTEGER
+ The time interval (in milliseconds) for the PLPMTUD probe timer,
+ which is configured to expire after this period to receive an
+ acknowledgment to a probe packet. This is also the time interval
+ between the probes for the current pmtu when the probe search
+ is done.
+
+ PLPMTUD will be disabled when 0 is set, and other values for it
+ must be >= 5000.
+
+ Default: 0
+
``/proc/sys/net/core/*``
========================
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index 6af0196c4297..76d939e688b8 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -7,13 +7,13 @@ MPTCP Sysfs variables
/proc/sys/net/mptcp/* Variables
===============================
-enabled - INTEGER
+enabled - BOOLEAN
Control whether MPTCP sockets can be created.
- MPTCP sockets can be created if the value is nonzero. This is
- a per-namespace sysctl.
+ MPTCP sockets can be created if the value is 1. This is a
+ per-namespace sysctl.
- Default: 1
+ Default: 1 (enabled)
add_addr_timeout - INTEGER (seconds)
Set the timeout after which an ADD_ADDR control message will be
@@ -24,3 +24,24 @@ add_addr_timeout - INTEGER (seconds)
sysctl.
Default: 120
+
+checksum_enabled - BOOLEAN
+ Control whether DSS checksum can be enabled.
+
+ DSS checksum can be enabled if the value is nonzero. This is a
+ per-namespace sysctl.
+
+ Default: 0
+
+allow_join_initial_addr_port - BOOLEAN
+ Allow peers to send join requests to the IP address and port number used
+ by the initial subflow if the value is 1. This controls a flag that is
+ sent to the peer at connection time, and whether such join requests are
+ accepted or denied.
+
+ Joins to addresses advertised with ADD_ADDR are not affected by this
+ value.
+
+ This is a per-namespace sysctl.
+
+ Default: 1
diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst
index 11a9b76786cb..0467b30e4abe 100644
--- a/Documentation/networking/nf_conntrack-sysctl.rst
+++ b/Documentation/networking/nf_conntrack-sysctl.rst
@@ -177,3 +177,27 @@ nf_conntrack_gre_timeout_stream - INTEGER (seconds)
This extended timeout will be used in case there is an GRE stream
detected.
+
+nf_flowtable_tcp_timeout - INTEGER (seconds)
+ default 30
+
+ Control offload timeout for tcp connections.
+ TCP connections may be offloaded from nf conntrack to nf flow table.
+ Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
+
+nf_flowtable_tcp_pickup - INTEGER (seconds)
+ default 120
+
+ TCP connection timeout after being aged from nf flow table offload.
+
+nf_flowtable_udp_timeout - INTEGER (seconds)
+ default 30
+
+ Control offload timeout for udp connections.
+ UDP connections may be offloaded from nf conntrack to nf flow table.
+ Once aged, the connection is returned to nf conntrack with udp pickup timeout.
+
+nf_flowtable_udp_pickup - INTEGER (seconds)
+ default 30
+
+ UDP connection timeout after being aged from nf flow table offload.
diff --git a/Documentation/networking/packet_mmap.rst b/Documentation/networking/packet_mmap.rst
index 500ef60b1b82..c5da1a5d93de 100644
--- a/Documentation/networking/packet_mmap.rst
+++ b/Documentation/networking/packet_mmap.rst
@@ -153,7 +153,7 @@ As capture, each frame contains two parts::
struct ifreq s_ifr;
...
- strncpy (s_ifr.ifr_name, "eth0", sizeof(s_ifr.ifr_name));
+ strscpy_pad (s_ifr.ifr_name, "eth0", sizeof(s_ifr.ifr_name));
/* get interface index of eth0 */
ioctl(this->socket, SIOCGIFINDEX, &s_ifr);
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index 3f05d50ecd6e..571ba08386e7 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -292,6 +292,12 @@ Some of the interface modes are described below:
Note: due to legacy usage, some 10GBASE-R usage incorrectly makes
use of this definition.
+``PHY_INTERFACE_MODE_25GBASER``
+ This is the IEEE 802.3 PCS Clause 107 defined 25GBASE-R protocol.
+ The PCS is identical to 10GBASE-R, i.e. 64B/66B encoded
+ running 2.5 as fast, giving a fixed bit rate of 25.78125 Gbaud.
+ Please refer to the IEEE standard for further information.
+
``PHY_INTERFACE_MODE_100BASEX``
This defines IEEE 802.3 Clause 24. The link operates at a fixed data
rate of 125Mpbs using a 4B/5B encoding scheme, resulting in an underlying
diff --git a/Documentation/networking/tuntap.rst b/Documentation/networking/tuntap.rst
index a59d1dd6fdcc..4d7087f727be 100644
--- a/Documentation/networking/tuntap.rst
+++ b/Documentation/networking/tuntap.rst
@@ -107,7 +107,7 @@ Note that the character pointer becomes overwritten with the real device name
*/
ifr.ifr_flags = IFF_TUN;
if( *dev )
- strncpy(ifr.ifr_name, dev, IFNAMSIZ);
+ strscpy_pad(ifr.ifr_name, dev, IFNAMSIZ);
if( (err = ioctl(fd, TUNSETIFF, (void *) &ifr)) < 0 ){
close(fd);
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
index 18ae21bf7f92..d6bf84f061f4 100644
--- a/Documentation/power/runtime_pm.rst
+++ b/Documentation/power/runtime_pm.rst
@@ -378,7 +378,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
`int pm_runtime_get_sync(struct device *dev);`
- increment the device's usage counter, run pm_runtime_resume(dev) and
- return its result
+ return its result;
+ note that it does not drop the device's usage counter on errors, so
+ consider using pm_runtime_resume_and_get() instead of it, especially
+ if its return value is checked by the caller, as this is likely to
+ result in cleaner code.
`int pm_runtime_get_if_in_use(struct device *dev);`
- return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
@@ -827,6 +831,15 @@ or driver about runtime power changes. Instead, the driver for the device's
parent must take responsibility for telling the device's driver when the
parent's power state changes.
+Note that, in some cases it may not be desirable for subsystems/drivers to call
+pm_runtime_no_callbacks() for their devices. This could be because a subset of
+the runtime PM callbacks needs to be implemented, a platform dependent PM
+domain could get attached to the device or that the device is power managed
+through a supplier device link. For these reasons and to avoid boilerplate code
+in subsystems/drivers, the PM core allows runtime PM callbacks to be
+unassigned. More precisely, if a callback pointer is NULL, the PM core will act
+as though there was a callback and it returned 0.
+
9. Autosuspend, or automatically-delayed suspends
=================================================
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index c66a19201deb..0852bcf73630 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -10,10 +10,11 @@ can greatly increase the chances of your change being accepted.
This document contains a large number of suggestions in a relatively terse
format. For detailed information on how the kernel development process
-works, see :doc:`development-process`. Also, read :doc:`submit-checklist`
+works, see Documentation/process/development-process.rst. Also, read
+Documentation/process/submit-checklist.rst
for a list of items to check before submitting code. If you are submitting
-a driver, also read :doc:`submitting-drivers`; for device tree binding patches,
-read :doc:`submitting-patches`.
+a driver, also read Documentation/process/submitting-drivers.rst; for device
+tree binding patches, read Documentation/process/submitting-patches.rst.
This documentation assumes that you're using ``git`` to prepare your patches.
If you're unfamiliar with ``git``, you would be well-advised to learn how to
@@ -178,8 +179,7 @@ Style-check your changes
------------------------
Check your patch for basic style violations, details of which can be
-found in
-:ref:`Documentation/process/coding-style.rst <codingstyle>`.
+found in Documentation/process/coding-style.rst.
Failure to do so simply wastes
the reviewers time and will get your patch rejected, probably
without even being read.
@@ -238,7 +238,7 @@ If you have a patch that fixes an exploitable security bug, send that patch
to security@kernel.org. For severe bugs, a short embargo may be considered
to allow distributors to get the patch out to users; in such cases,
obviously, the patch should not be sent to any public lists. See also
-:doc:`/admin-guide/security-bugs`.
+Documentation/admin-guide/security-bugs.rst.
Patches that fix a severe bug in a released kernel should be directed
toward the stable maintainers by putting a line like this::
@@ -246,9 +246,8 @@ toward the stable maintainers by putting a line like this::
Cc: stable@vger.kernel.org
into the sign-off area of your patch (note, NOT an email recipient). You
-should also read
-:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
-in addition to this file.
+should also read Documentation/process/stable-kernel-rules.rst
+in addition to this document.
If changes affect userland-kernel interfaces, please send the MAN-PAGES
maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
@@ -305,8 +304,8 @@ decreasing the likelihood of your MIME-attached change being accepted.
Exception: If your mailer is mangling patches then someone may ask
you to re-send them using MIME.
-See :doc:`/process/email-clients` for hints about configuring your e-mail
-client so that it sends your patches untouched.
+See Documentation/process/email-clients.rst for hints about configuring
+your e-mail client so that it sends your patches untouched.
Respond to review comments
--------------------------
@@ -324,7 +323,7 @@ for their time. Code review is a tiring and time-consuming process, and
reviewers sometimes get grumpy. Even in that case, though, respond
politely and address the problems they have pointed out.
-See :doc:`email-clients` for recommendations on email
+See Documentation/process/email-clients.rst for recommendations on email
clients and mailing list etiquette.
@@ -562,10 +561,10 @@ method for indicating a bug fixed by the patch. See :ref:`describe_changes`
for more details.
Note: Attaching a Fixes: tag does not subvert the stable kernel rules
-process nor the requirement to Cc: stable@vger.kernel.org on all stable
+process nor the requirement to Cc: stable@vger.kernel.org on all stable
patch candidates. For more information, please read
-:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
-
+Documentation/process/stable-kernel-rules.rst.
+
.. _the_canonical_patch_format:
The canonical patch format
@@ -824,8 +823,7 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
<https://lore.kernel.org/r/20050711.125305.08322243.davem@davemloft.net>
-Kernel Documentation/process/coding-style.rst:
- :ref:`Documentation/process/coding-style.rst <codingstyle>`
+Kernel Documentation/process/coding-style.rst
Linus Torvalds's mail on the canonical patch format:
<https://lore.kernel.org/r/Pine.LNX.4.58.0504071023190.28951@ppc970.osdl.org>
diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst
index 329d32098af4..b7f98930d38d 100644
--- a/Documentation/riscv/vm-layout.rst
+++ b/Documentation/riscv/vm-layout.rst
@@ -58,6 +58,6 @@ RISC-V Linux Kernel SV39
|
____________________________________________________________|____________________________________________________________
| | | |
- ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules
- ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel, BPF
+ ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
+ ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
__________________|____________|__________________|_________|____________________________________________________________
diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst
index 845eee659199..1fc73555f5c4 100644
--- a/Documentation/scheduler/sched-bwc.rst
+++ b/Documentation/scheduler/sched-bwc.rst
@@ -29,7 +29,7 @@ Quota and period are managed within the cpu subsystem via cgroupfs.
.. note::
The cgroupfs files described in this section are only applicable
to cgroup v1. For cgroup v2, see
- :ref:`Documentation/admin-guide/cgroupv2.rst <cgroup-v2-cpu>`.
+ :ref:`Documentation/admin-guide/cgroup-v2.rst <cgroup-v2-cpu>`.
- cpu.cfs_quota_us: the total available run-time within a period (in
microseconds)
diff --git a/Documentation/scheduler/sched-capacity.rst b/Documentation/scheduler/sched-capacity.rst
index 9b7cbe43b2d1..805f85f330b5 100644
--- a/Documentation/scheduler/sched-capacity.rst
+++ b/Documentation/scheduler/sched-capacity.rst
@@ -284,8 +284,10 @@ whether the system exhibits asymmetric CPU capacities. Should that be the
case:
- The sched_asym_cpucapacity static key will be enabled.
-- The SD_ASYM_CPUCAPACITY flag will be set at the lowest sched_domain level that
- spans all unique CPU capacity values.
+- The SD_ASYM_CPUCAPACITY_FULL flag will be set at the lowest sched_domain
+ level that spans all unique CPU capacity values.
+- The SD_ASYM_CPUCAPACITY flag will be set for any sched_domain that spans
+ CPUs with any range of asymmetry.
The sched_asym_cpucapacity static key is intended to guard sections of code that
cater to asymmetric CPU capacity systems. Do note however that said key is
diff --git a/Documentation/scheduler/sched-energy.rst b/Documentation/scheduler/sched-energy.rst
index afe02d394402..8fbce5e767d9 100644
--- a/Documentation/scheduler/sched-energy.rst
+++ b/Documentation/scheduler/sched-energy.rst
@@ -328,7 +328,7 @@ section lists these dependencies and provides hints as to how they can be met.
As mentioned in the introduction, EAS is only supported on platforms with
asymmetric CPU topologies for now. This requirement is checked at run-time by
-looking for the presence of the SD_ASYM_CPUCAPACITY flag when the scheduling
+looking for the presence of the SD_ASYM_CPUCAPACITY_FULL flag when the scheduling
domains are built.
See Documentation/scheduler/sched-capacity.rst for requirements to be met for this
diff --git a/Documentation/scheduler/sched-nice-design.rst b/Documentation/scheduler/sched-nice-design.rst
index 0571f1b47e64..889bf2b737dc 100644
--- a/Documentation/scheduler/sched-nice-design.rst
+++ b/Documentation/scheduler/sched-nice-design.rst
@@ -60,7 +60,7 @@ within the constraints of HZ and jiffies and their nasty design level
coupling to timeslices and granularity it was not really viable.
The second (less frequent but still periodically occurring) complaint
-about Linux's nice level support was its assymetry around the origo
+about Linux's nice level support was its asymmetry around the origin
(which you can see demonstrated in the picture above), or more
accurately: the fact that nice level behavior depended on the _absolute_
nice level as well, while the nice API itself is fundamentally
diff --git a/Documentation/security/IMA-templates.rst b/Documentation/security/IMA-templates.rst
index c5a8432972ef..1a91d92950a7 100644
--- a/Documentation/security/IMA-templates.rst
+++ b/Documentation/security/IMA-templates.rst
@@ -70,9 +70,18 @@ descriptors by adding their identifier to the format string
prefix is shown only if the hash algorithm is not SHA1 or MD5);
- 'd-modsig': the digest of the event without the appended modsig;
- 'n-ng': the name of the event, without size limitations;
- - 'sig': the file signature;
+ - 'sig': the file signature, or the EVM portable signature if the file
+ signature is not found;
- 'modsig' the appended file signature;
- 'buf': the buffer data that was used to generate the hash without size limitations;
+ - 'evmsig': the EVM portable signature;
+ - 'iuid': the inode UID;
+ - 'igid': the inode GID;
+ - 'imode': the inode mode;
+ - 'xattrnames': a list of xattr names (separated by ``|``), only if the xattr is
+ present;
+ - 'xattrlengths': a list of xattr lengths (u32), only if the xattr is present;
+ - 'xattrvalues': a list of xattr values;
Below, there is the list of defined template descriptors:
@@ -82,6 +91,7 @@ Below, there is the list of defined template descriptors:
- "ima-sig": its format is ``d-ng|n-ng|sig``;
- "ima-buf": its format is ``d-ng|n-ng|buf``;
- "ima-modsig": its format is ``d-ng|n-ng|sig|d-modsig|modsig``;
+ - "evm-sig": its format is ``d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode``;
Use
diff --git a/Documentation/security/landlock.rst b/Documentation/security/landlock.rst
index 2e84925ae971..3df68cb1d10f 100644
--- a/Documentation/security/landlock.rst
+++ b/Documentation/security/landlock.rst
@@ -25,7 +25,8 @@ Any user can enforce Landlock rulesets on their processes. They are merged and
evaluated according to the inherited ones in a way that ensures that only more
constraints can be added.
-User space documentation can be found here: :doc:`/userspace-api/landlock`.
+User space documentation can be found here:
+Documentation/userspace-api/landlock.rst.
Guiding principles for safe access controls
===========================================
diff --git a/Documentation/spi/pxa2xx.rst b/Documentation/spi/pxa2xx.rst
index 882d3cc72cc2..6312968acfe9 100644
--- a/Documentation/spi/pxa2xx.rst
+++ b/Documentation/spi/pxa2xx.rst
@@ -2,43 +2,47 @@
PXA2xx SPI on SSP driver HOWTO
==============================
-This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx
-synchronous serial port into a SPI master controller
+This a mini HOWTO on the pxa2xx_spi driver. The driver turns a PXA2xx
+synchronous serial port into an SPI master controller
(see Documentation/spi/spi-summary.rst). The driver has the following features
-- Support for any PXA2xx SSP
+- Support for any PXA2xx and compatible SSP.
- SSP PIO and SSP DMA data transfers.
- External and Internal (SSPFRM) chip selects.
- Per slave device (chip) configuration.
- Full suspend, freeze, resume support.
-The driver is built around a "spi_message" fifo serviced by workqueue and a
-tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet
-(pump_transfer) is responsible for queuing SPI transactions and setting up and
-launching the dma/interrupt driven transfers.
+The driver is built around a &struct spi_message FIFO serviced by kernel
+thread. The kernel thread, spi_pump_messages(), drives message FIFO and
+is responsible for queuing SPI transactions and setting up and launching
+the DMA or interrupt driven transfers.
Declaring PXA2xx Master Controllers
-----------------------------------
-Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
-"platform device". The master configuration is passed to the driver via a table
-found in include/linux/spi/pxa2xx_spi.h::
+Typically, for a legacy platform, an SPI master is defined in the
+arch/.../mach-*/board-*.c as a "platform device". The master configuration
+is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h::
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
+ ...
};
The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of
slave device (chips) attached to this SPI master.
The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should
-be used. This caused the driver to acquire two DMA channels: rx_channel and
-tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
+be used. This caused the driver to acquire two DMA channels: Rx channel and
+Tx channel. The Rx channel has a higher DMA service priority than the Tx channel.
See the "PXA2xx Developer Manual" section "DMA Controller".
+For the new platforms the description of the controller and peripheral devices
+comes from Device Tree or ACPI.
+
NSSP MASTER SAMPLE
------------------
-Below is a sample configuration using the PXA255 NSSP::
+Below is a sample configuration using the PXA255 NSSP for a legacy platform::
static struct resource pxa_spi_nssp_resources[] = {
[0] = {
@@ -79,9 +83,10 @@ Below is a sample configuration using the PXA255 NSSP::
Declaring Slave Devices
-----------------------
-Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
-using the "spi_board_info" structure found in "linux/spi/spi.h". See
-"Documentation/spi/spi-summary.rst" for additional information.
+Typically, for a legacy platform, each SPI slave (chip) is defined in the
+arch/.../mach-*/board-*.c using the "spi_board_info" structure found in
+"linux/spi/spi.h". See "Documentation/spi/spi-summary.rst" for additional
+information.
Each slave device attached to the PXA must provide slave specific configuration
information via the structure "pxa2xx_spi_chip" found in
@@ -101,9 +106,9 @@ device. All fields are optional.
};
The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
-used to configure the SSP hardware fifo. These fields are critical to the
+used to configure the SSP hardware FIFO. These fields are critical to the
performance of pxa2xx_spi driver and misconfiguration will result in rx
-fifo overruns (especially in PIO mode transfers). Good default values are::
+FIFO overruns (especially in PIO mode transfers). Good default values are::
.tx_threshold = 8,
.rx_threshold = 8,
@@ -118,7 +123,7 @@ use a value of 8. The driver will determine a reasonable default if
dma_burst_size == 0.
The "pxa2xx_spi_chip.timeout" fields is used to efficiently handle
-trailing bytes in the SSP receiver fifo. The correct value for this field is
+trailing bytes in the SSP receiver FIFO. The correct value for this field is
dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
slave device. Please note that the PXA2xx SSP 1 does not support trailing byte
timeouts and must busy-wait any trailing bytes.
@@ -131,19 +136,19 @@ testing.
The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific
function for asserting/deasserting a slave device chip select. If the field is
NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
-configured to use SSPFRM instead.
+configured to use GPIO or SSPFRM instead.
NOTE: the SPI driver cannot control the chip select if SSPFRM is used, so the
chipselect is dropped after each spi_transfer. Most devices need chip select
-asserted around the complete message. Use SSPFRM as a GPIO (through cs_control)
+asserted around the complete message. Use SSPFRM as a GPIO (through a descriptor)
to accommodate these chips.
NSSP SLAVE SAMPLE
-----------------
-The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
-"spi_board_info.controller_data" field. Below is a sample configuration using
-the PXA255 NSSP.
+For a legacy platform or in some other cases, the pxa2xx_spi_chip structure
+is passed to the pxa2xx_spi driver in the "spi_board_info.controller_data"
+field. Below is a sample configuration using the PXA255 NSSP.
::
@@ -212,7 +217,9 @@ DMA and PIO I/O Support
-----------------------
The pxa2xx_spi driver supports both DMA and interrupt driven PIO message
transfers. The driver defaults to PIO mode and DMA transfers must be enabled
-by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure. The DMA
+by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure.
+For the newer platforms, that are known to support DMA, the driver will enable
+it automatically and try it first with a possible fallback to PIO. The DMA
mode supports both coherent and stream based DMA mappings.
The following logic is used to determine the type of I/O to be used on
@@ -236,5 +243,4 @@ a per "spi_transfer" basis::
THANKS TO
---------
-
David Brownell and others for mentoring the development of this driver.
diff --git a/Documentation/trace/coresight/coresight-etm4x-reference.rst b/Documentation/trace/coresight/coresight-etm4x-reference.rst
index b64d9a9c79df..d25dfe86af9b 100644
--- a/Documentation/trace/coresight/coresight-etm4x-reference.rst
+++ b/Documentation/trace/coresight/coresight-etm4x-reference.rst
@@ -427,7 +427,7 @@ the ‘TRC’ prefix.
:Syntax:
``echo idx > vmid_idx``
- Where idx <  numvmidc
+ Where idx < numvmidc
----
diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst
index 169749efd8d1..1ec8dc35b1d8 100644
--- a/Documentation/trace/coresight/coresight.rst
+++ b/Documentation/trace/coresight/coresight.rst
@@ -315,7 +315,8 @@ intermediate links as required.
Note: ``cti_sys0`` appears in two of the connections lists above.
CTIs can connect to multiple devices and are arranged in a star topology
-via the CTM. See (:doc:`coresight-ect`) [#fourth]_ for further details.
+via the CTM. See (Documentation/trace/coresight/coresight-ect.rst)
+[#fourth]_ for further details.
Looking at this device we see 4 connections::
linaro-developer:~# ls -l /sys/bus/coresight/devices/cti_sys0/connections
@@ -606,7 +607,8 @@ interface provided for that purpose by the generic STM API::
crw------- 1 root root 10, 61 Jan 3 18:11 /dev/stm0
root@genericarmv8:~#
-Details on how to use the generic STM API can be found here:- :doc:`../stm` [#second]_.
+Details on how to use the generic STM API can be found here:
+- Documentation/trace/stm.rst [#second]_.
The CTI & CTM Modules
---------------------
@@ -616,7 +618,7 @@ individual CTIs and components, and can propagate these between all CTIs via
channels on the CTM (Cross Trigger Matrix).
A separate documentation file is provided to explain the use of these devices.
-(:doc:`coresight-ect`) [#fourth]_.
+(Documentation/trace/coresight/coresight-ect.rst) [#fourth]_.
.. [#first] Documentation/ABI/testing/sysfs-bus-coresight-devices-stm
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 62c98e9bbdd9..cfc81e98e0b8 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -40,7 +40,7 @@ See events.rst for more information.
Implementation Details
----------------------
-See :doc:`ftrace-design` for details for arch porters and such.
+See Documentation/trace/ftrace-design.rst for details for arch porters and such.
The File System
@@ -354,8 +354,8 @@ of ftrace. Here is a list of some of the key files:
is being directly called by the function. If the count is greater
than 1 it most likely will be ftrace_ops_list_func().
- If the callback of the function jumps to a trampoline that is
- specific to a the callback and not the standard trampoline,
+ If the callback of a function jumps to a trampoline that is
+ specific to the callback and which is not the standard trampoline,
its address will be printed as well as the function that the
trampoline calls.
diff --git a/Documentation/trace/kprobes.rst b/Documentation/trace/kprobes.rst
index b757b6dfd3d4..998149ce2fd9 100644
--- a/Documentation/trace/kprobes.rst
+++ b/Documentation/trace/kprobes.rst
@@ -362,14 +362,11 @@ register_kprobe
#include <linux/kprobes.h>
int register_kprobe(struct kprobe *kp);
-Sets a breakpoint at the address kp->addr. When the breakpoint is
-hit, Kprobes calls kp->pre_handler. After the probed instruction
-is single-stepped, Kprobe calls kp->post_handler. If a fault
-occurs during execution of kp->pre_handler or kp->post_handler,
-or during single-stepping of the probed instruction, Kprobes calls
-kp->fault_handler. Any or all handlers can be NULL. If kp->flags
-is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled,
-so, its handlers aren't hit until calling enable_kprobe(kp).
+Sets a breakpoint at the address kp->addr. When the breakpoint is hit, Kprobes
+calls kp->pre_handler. After the probed instruction is single-stepped, Kprobe
+calls kp->post_handler. Any or all handlers can be NULL. If kp->flags is set
+KPROBE_FLAG_DISABLED, that kp will be registered but disabled, so, its handlers
+aren't hit until calling enable_kprobe(kp).
.. note::
@@ -415,17 +412,6 @@ User's post-handler (kp->post_handler)::
p and regs are as described for the pre_handler. flags always seems
to be zero.
-User's fault-handler (kp->fault_handler)::
-
- #include <linux/kprobes.h>
- #include <linux/ptrace.h>
- int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr);
-
-p and regs are as described for the pre_handler. trapnr is the
-architecture-specific trap number associated with the fault (e.g.,
-on i386, 13 for a general protection fault or 14 for a page fault).
-Returns 1 if it successfully handled the exception.
-
register_kretprobe
------------------
diff --git a/Documentation/translations/index.rst b/Documentation/translations/index.rst
index e446e5ed00a6..556b050884fc 100644
--- a/Documentation/translations/index.rst
+++ b/Documentation/translations/index.rst
@@ -18,6 +18,10 @@ Translations
Disclaimer
----------
+.. raw:: latex
+
+ \kerneldocCJKoff
+
Translation's purpose is to ease reading and understanding in languages other
than English. Its aim is to help people who do not understand English or have
doubts about its interpretation. Additionally, some people prefer to read
diff --git a/Documentation/translations/it_IT/index.rst b/Documentation/translations/it_IT/index.rst
index bb8fa7346939..e80a3097aa57 100644
--- a/Documentation/translations/it_IT/index.rst
+++ b/Documentation/translations/it_IT/index.rst
@@ -4,6 +4,10 @@
Traduzione italiana
===================
+.. raw:: latex
+
+ \kerneldocCJKoff
+
:manutentore: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_disclaimer:
diff --git a/Documentation/translations/it_IT/process/coding-style.rst b/Documentation/translations/it_IT/process/coding-style.rst
index 95f2e7c985e2..ecc74ba50d3e 100644
--- a/Documentation/translations/it_IT/process/coding-style.rst
+++ b/Documentation/translations/it_IT/process/coding-style.rst
@@ -62,7 +62,7 @@ i ``case``. Un esempio.:
case 'K':
case 'k':
mem <<= 10;
- /* fall through */
+ fallthrough;
default:
break;
}
diff --git a/Documentation/translations/ja_JP/index.rst b/Documentation/translations/ja_JP/index.rst
index 2f91b895e3c2..f94ba62d41c3 100644
--- a/Documentation/translations/ja_JP/index.rst
+++ b/Documentation/translations/ja_JP/index.rst
@@ -1,7 +1,8 @@
.. raw:: latex
- \renewcommand\thesection*
- \renewcommand\thesubsection*
+ \renewcommand\thesection*
+ \renewcommand\thesubsection*
+ \kerneldocCJKon
Japanese translations
=====================
diff --git a/Documentation/translations/ko_KR/index.rst b/Documentation/translations/ko_KR/index.rst
index b9e27d20b039..6ae258118bdf 100644
--- a/Documentation/translations/ko_KR/index.rst
+++ b/Documentation/translations/ko_KR/index.rst
@@ -1,7 +1,8 @@
.. raw:: latex
- \renewcommand\thesection*
- \renewcommand\thesubsection*
+ \renewcommand\thesection*
+ \renewcommand\thesubsection*
+ \kerneldocCJKon
한국어 번역
===========
diff --git a/Documentation/translations/zh_CN/admin-guide/index.rst b/Documentation/translations/zh_CN/admin-guide/index.rst
index be835ec8e632..460034cbc2ab 100644
--- a/Documentation/translations/zh_CN/admin-guide/index.rst
+++ b/Documentation/translations/zh_CN/admin-guide/index.rst
@@ -65,6 +65,7 @@ Todolist:
clearing-warn-once
cpu-load
+ lockup-watchdogs
unicode
Todolist:
@@ -100,7 +101,6 @@ Todolist:
laptops/index
lcd-panel-cgram
ldm
- lockup-watchdogs
LSM/index
md
media/index
diff --git a/Documentation/translations/zh_CN/admin-guide/lockup-watchdogs.rst b/Documentation/translations/zh_CN/admin-guide/lockup-watchdogs.rst
new file mode 100644
index 000000000000..55ed3f4af442
--- /dev/null
+++ b/Documentation/translations/zh_CN/admin-guide/lockup-watchdogs.rst
@@ -0,0 +1,66 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/admin-guide/lockup-watchdogs.rst
+:Translator: Hailong Liu <liu.hailong6@zte.com.cn>
+
+.. _cn_lockup-watchdogs:
+
+
+=================================================
+Softlockup与hardlockup检测机制(又名:nmi_watchdog)
+=================================================
+
+Linux中内核实现了一种用以检测系统发生softlockup和hardlockup的看门狗机制。
+
+Softlockup是一种会引发系统在内核态中一直循环超过20秒(详见下面“实现”小节)导致
+其他任务没有机会得到运行的BUG。一旦检测到'softlockup'发生,默认情况下系统会打
+印当前堆栈跟踪信息并进入锁定状态。也可配置使其在检测到'softlockup'后进入panic
+状态;通过sysctl命令设置“kernel.softlockup_panic”、使用内核启动参数
+“softlockup_panic”(详见Documentation/admin-guide/kernel-parameters.rst)以及使
+能内核编译选项“BOOTPARAM_SOFTLOCKUP_PANIC”都可实现这种配置。
+
+而'hardlockup'是一种会引发系统在内核态一直循环超过10秒钟(详见"实现"小节)导致其
+他中断没有机会运行的缺陷。与'softlockup'情况类似,除了使用sysctl命令设置
+'hardlockup_panic'、使能内核选项“BOOTPARAM_HARDLOCKUP_PANIC”以及使用内核参数
+"nmi_watchdog"(详见:”Documentation/admin-guide/kernel-parameters.rst“)外,一旦检
+测到'hardlockup'默认情况下系统打印当前堆栈跟踪信息,然后进入锁定状态。
+
+这个panic选项也可以与panic_timeout结合使用(这个panic_timeout是通过稍具迷惑性的
+sysctl命令"kernel.panic"来设置),使系统在panic指定时间后自动重启。
+
+实现
+====
+
+Softlockup和hardlockup分别建立在hrtimer(高精度定时器)和perf两个子系统上而实现。
+这也就意味着理论上任何架构只要实现了这两个子系统就支持这两种检测机制。
+
+Hrtimer用于周期性产生中断并唤醒watchdog线程;NMI perf事件则以”watchdog_thresh“
+(编译时默认初始化为10秒,也可通过”watchdog_thresh“这个sysctl接口来进行配置修改)
+为间隔周期产生以检测 hardlockups。如果一个CPU在这个时间段内没有检测到hrtimer中
+断发生,'hardlockup 检测器'(即NMI perf事件处理函数)将会视系统配置而选择产生内核
+警告或者直接panic。
+
+而watchdog线程本质上是一个高优先级内核线程,每调度一次就对时间戳进行一次更新。
+如果时间戳在2*watchdog_thresh(这个是softlockup的触发门限)这段时间都未更新,那么
+"softlocup 检测器"(内部hrtimer定时器回调函数)会将相关的调试信息打印到系统日志中,
+然后如果系统配置了进入panic流程则进入panic,否则内核继续执行。
+
+Hrtimer定时器的周期是2*watchdog_thresh/5,也就是说在hardlockup被触发前hrtimer有
+2~3次机会产生时钟中断。
+
+如上所述,内核相当于为系统管理员提供了一个可调节hrtimer定时器和perf事件周期长度
+的调节旋钮。如何通过这个旋钮为特定使用场景配置一个合理的周期值要对lockups检测的
+响应速度和lockups检测开销这二者之间进行权衡。
+
+默认情况下所有在线cpu上都会运行一个watchdog线程。不过在内核配置了”NO_HZ_FULL“的
+情况下watchdog线程默认只会运行在管家(housekeeping)cpu上,而”nohz_full“启动参数指
+定的cpu上则不会有watchdog线程运行。试想,如果我们允许watchdog线程在”nohz_full“指
+定的cpu上运行,这些cpu上必须得运行时钟定时器来激发watchdog线程调度;这样一来就会
+使”nohz_full“保护用户程序免受内核干扰的功能失效。当然,副作用就是”nohz_full“指定
+的cpu即使在内核产生了lockup问题我们也无法检测到。不过,至少我们可以允许watchdog
+线程在管家(non-tickless)核上继续运行以便我们能继续正常的监测这些cpus上的lockups
+事件。
+
+不论哪种情况都可以通过sysctl命令kernel.watchdog_cpumask来对没有运行watchdog线程
+的cpu集合进行调节。对于nohz_full而言,如果nohz_full cpu上有异常挂住的情况,通过
+这种方式打开这些cpu上的watchdog进行调试可能会有所作用。
diff --git a/Documentation/translations/zh_CN/core-api/cachetlb.rst b/Documentation/translations/zh_CN/core-api/cachetlb.rst
new file mode 100644
index 000000000000..8376485a534d
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/cachetlb.rst
@@ -0,0 +1,336 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/cachetlb.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+
+:校译:
+
+ 吴想成 Wu XiangCheng <bobwxc@email.cn>
+
+.. _cn_core-api_cachetlb:
+
+======================
+Linux下的缓存和TLB刷新
+======================
+
+:作者: David S. Miller <davem@redhat.com>
+
+*译注:TLB,Translation Lookaside Buffer,页表缓存/变换旁查缓冲器*
+
+本文描述了由Linux虚拟内存子系统调用的缓存/TLB刷新接口。它列举了每个接
+口,描述了它的预期目的,以及接口被调用后的预期副作用。
+
+下面描述的副作用是针对单处理器的实现,以及在单个处理器上发生的情况。若
+为SMP,则只需将定义简单地扩展一下,使发生在某个特定接口的副作用扩展到系
+统的所有处理器上。不要被这句话吓到,以为SMP的缓存/tlb刷新一定是很低
+效的,事实上,这是一个可以进行很多优化的领域。例如,如果可以证明一个用
+户地址空间从未在某个cpu上执行过(见mm_cpumask()),那么就不需要在该
+cpu上对这个地址空间进行刷新。
+
+首先是TLB刷新接口,因为它们是最简单的。在Linux下,TLB被抽象为cpu
+用来缓存从软件页表获得的虚拟->物理地址转换的东西。这意味着,如果软件页
+表发生变化,这个“TLB”缓存中就有可能出现过时(脏)的翻译。因此,当软件页表
+发生变化时,内核会在页表发生 *变化后* 调用以下一种刷新方法:
+
+1) ``void flush_tlb_all(void)``
+
+ 最严格的刷新。在这个接口运行后,任何以前的页表修改都会对cpu可见。
+
+ 这通常是在内核页表被改变时调用的,因为这种转换在本质上是“全局”的。
+
+2) ``void flush_tlb_mm(struct mm_struct *mm)``
+
+ 这个接口从TLB中刷新整个用户地址空间。在运行后,这个接口必须确保
+ 以前对地址空间‘mm’的任何页表修改对cpu来说是可见的。也就是说,在
+ 运行后,TLB中不会有‘mm’的页表项。
+
+ 这个接口被用来处理整个地址空间的页表操作,比如在fork和exec过程
+ 中发生的事情。
+
+3) ``void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)``
+
+ 这里我们要从TLB中刷新一个特定范围的(用户)虚拟地址转换。在运行后,
+ 这个接口必须确保以前对‘start’到‘end-1’范围内的地址空间‘vma->vm_mm’
+ 的任何页表修改对cpu来说是可见的。也就是说,在运行后,TLB中不会有
+ ‘mm’的页表项用于‘start’到‘end-1’范围内的虚拟地址。
+
+ “vma”是用于该区域的备份存储。主要是用于munmap()类型的操作。
+
+ 提供这个接口是希望端口能够找到一个合适的有效方法来从TLB中删除多
+ 个页面大小的转换,而不是让内核为每个可能被修改的页表项调用
+ flush_tlb_page(见下文)。
+
+4) ``void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)``
+
+ 这一次我们需要从TLB中删除PAGE_SIZE大小的转换。‘vma’是Linux用来跟
+ 踪进程的mmap区域的支持结构体,地址空间可以通过vma->vm_mm获得。另
+ 外,可以通过测试(vma->vm_flags & VM_EXEC)来查看这个区域是否是
+ 可执行的(因此在split-tlb类型的设置中可能在“指令TLB”中)。
+
+ 在运行后,这个接口必须确保之前对用户虚拟地址“addr”的地址空间
+ “vma->vm_mm”的页表修改对cpu来说是可见的。也就是说,在运行后,TLB
+ 中不会有虚拟地址‘addr’的‘vma->vm_mm’的页表项。
+
+ 这主要是在故障处理时使用。
+
+5) ``void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)``
+
+ 在每个页面故障结束时,这个程序被调用,以告诉体系结构特定的代码,在
+ 软件页表中,在地址空间“vma->vm_mm”的虚拟地址“地址”处,现在存在
+ 一个翻译。
+
+ 可以用它所选择的任何方式使用这个信息来进行移植。例如,它可以使用这
+ 个事件来为软件管理的TLB配置预装TLB转换。目前sparc64移植就是这么干
+ 的。
+
+接下来,我们有缓存刷新接口。一般来说,当Linux将现有的虚拟->物理映射
+改变为新的值时,其顺序将是以下形式之一::
+
+ 1) flush_cache_mm(mm);
+ change_all_page_tables_of(mm);
+ flush_tlb_mm(mm);
+
+ 2) flush_cache_range(vma, start, end);
+ change_range_of_page_tables(mm, start, end);
+ flush_tlb_range(vma, start, end);
+
+ 3) flush_cache_page(vma, addr, pfn);
+ set_pte(pte_pointer, new_pte_val);
+ flush_tlb_page(vma, addr);
+
+缓存级别的刷新将永远是第一位的,因为这允许我们正确处理那些缓存严格,
+且在虚拟地址被从缓存中刷新时要求一个虚拟地址的虚拟->物理转换存在的系统。
+HyperSparc cpu就是这样一个具有这种属性的cpu。
+
+下面的缓存刷新程序只需要在特定的cpu需要的范围内处理缓存刷新。大多数
+情况下,这些程序必须为cpu实现,这些cpu有虚拟索引的缓存,当虚拟->物
+理转换被改变或移除时,必须被刷新。因此,例如,IA32处理器的物理索引
+的物理标记的缓存没有必要实现这些接口,因为这些缓存是完全同步的,并
+且不依赖于翻译信息。
+
+下面逐个列出这些程序:
+
+1) ``void flush_cache_mm(struct mm_struct *mm)``
+
+ 这个接口将整个用户地址空间从高速缓存中刷掉。也就是说,在运行后,
+ 将没有与‘mm’相关的缓存行。
+
+ 这个接口被用来处理整个地址空间的页表操作,比如在退出和执行过程
+ 中发生的事情。
+
+2) ``void flush_cache_dup_mm(struct mm_struct *mm)``
+
+ 这个接口将整个用户地址空间从高速缓存中刷新掉。也就是说,在运行
+ 后,将没有与‘mm’相关的缓存行。
+
+ 这个接口被用来处理整个地址空间的页表操作,比如在fork过程中发生
+ 的事情。
+
+ 这个选项与flush_cache_mm分开,以允许对VIPT缓存进行一些优化。
+
+3) ``void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)``
+
+ 在这里,我们要从缓存中刷新一个特定范围的(用户)虚拟地址。运行
+ 后,在“start”到“end-1”范围内的虚拟地址的“vma->vm_mm”的缓存中
+ 将没有页表项。
+
+ “vma”是被用于该区域的备份存储。主要是用于munmap()类型的操作。
+
+ 提供这个接口是希望端口能够找到一个合适的有效方法来从缓存中删
+ 除多个页面大小的区域, 而不是让内核为每个可能被修改的页表项调
+ 用 flush_cache_page (见下文)。
+
+4) ``void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)``
+
+ 这一次我们需要从缓存中删除一个PAGE_SIZE大小的区域。“vma”是
+ Linux用来跟踪进程的mmap区域的支持结构体,地址空间可以通过
+ vma->vm_mm获得。另外,我们可以通过测试(vma->vm_flags &
+ VM_EXEC)来查看这个区域是否是可执行的(因此在“Harvard”类
+ 型的缓存布局中可能是在“指令缓存”中)。
+
+ “pfn”表示“addr”所对应的物理页框(通过PAGE_SHIFT左移这个
+ 值来获得物理地址)。正是这个映射应该从缓存中删除。
+
+ 在运行之后,对于虚拟地址‘addr’的‘vma->vm_mm’,在缓存中不会
+ 有任何页表项,它被翻译成‘pfn’。
+
+ 这主要是在故障处理过程中使用。
+
+5) ``void flush_cache_kmaps(void)``
+
+ 只有在平台使用高位内存的情况下才需要实现这个程序。它将在所有的
+ kmaps失效之前被调用。
+
+ 运行后,内核虚拟地址范围PKMAP_ADDR(0)到PKMAP_ADDR(LAST_PKMAP)
+ 的缓存中将没有页表项。
+
+ 这个程序应该在asm/highmem.h中实现。
+
+6) ``void flush_cache_vmap(unsigned long start, unsigned long end)``
+ ``void flush_cache_vunmap(unsigned long start, unsigned long end)``
+
+ 在这里,在这两个接口中,我们从缓存中刷新一个特定范围的(内核)
+ 虚拟地址。运行后,在“start”到“end-1”范围内的虚拟地址的内核地
+ 址空间的缓存中不会有页表项。
+
+ 这两个程序中的第一个是在vmap_range()安装了页表项之后调用的。
+ 第二个是在vunmap_range()删除页表项之前调用的。
+
+还有一类cpu缓存问题,目前需要一套完全不同的接口来正确处理。最大
+的问题是处理器的数据缓存中的虚拟别名。
+
+.. note::
+
+ 这段内容有些晦涩,为了减轻中文阅读压力,特作此译注。
+
+ 别名(alias)属于缓存一致性问题,当不同的虚拟地址映射相同的
+ 物理地址,而这些虚拟地址的index不同,此时就发生了别名现象(多
+ 个虚拟地址被称为别名)。通俗点来说就是指同一个物理地址的数据被
+ 加载到不同的cacheline中就会出现别名现象。
+
+ 常见的解决方法有两种:第一种是硬件维护一致性,设计特定的cpu电
+ 路来解决问题(例如设计为PIPT的cache);第二种是软件维护一致性,
+ 就是下面介绍的sparc的解决方案——页面染色,涉及的技术细节太多,
+ 译者不便展开,请读者自行查阅相关资料。
+
+您的移植是否容易在其D-cache中出现虚拟别名?嗯,如果您的D-cache
+是虚拟索引的,且cache大于PAGE_SIZE(页大小),并且不能防止同一
+物理地址的多个cache行同时存在,您就会遇到这个问题。
+
+如果你的D-cache有这个问题,首先正确定义asm/shmparam.h SHMLBA,
+它基本上应该是你的虚拟寻址D-cache的大小(或者如果大小是可变的,
+则是最大的可能大小)。这个设置将迫使SYSv IPC层只允许用户进程在
+这个值的倍数的地址上对共享内存进行映射。
+
+.. note::
+
+ 这并不能解决共享mmaps的问题,请查看sparc64移植解决
+ 这个问题的一个方法(特别是 SPARC_FLAG_MMAPSHARED)。
+
+接下来,你必须解决所有其他情况下的D-cache别名问题。请记住这个事
+实,对于一个给定的页面映射到某个用户地址空间,总是至少还有一个映
+射,那就是内核在其线性映射中从PAGE_OFFSET开始。因此,一旦第一个
+用户将一个给定的物理页映射到它的地址空间,就意味着D-cache的别名
+问题有可能存在,因为内核已经将这个页映射到它的虚拟地址。
+
+ ``void copy_user_page(void *to, void *from, unsigned long addr, struct page *page)``
+ ``void clear_user_page(void *to, unsigned long addr, struct page *page)``
+
+ 这两个程序在用户匿名或COW页中存储数据。它允许一个端口有效地
+ 避免用户空间和内核之间的D-cache别名问题。
+
+ 例如,一个端口可以在复制过程中把“from”和“to”暂时映射到内核
+ 的虚拟地址上。这两个页面的虚拟地址的选择方式是,内核的加载/存
+ 储指令发生在虚拟地址上,而这些虚拟地址与用户的页面映射是相同
+ 的“颜色”。例如,Sparc64就使用这种技术。
+
+ “addr”参数告诉了用户最终要映射这个页面的虚拟地址,“page”参
+ 数给出了一个指向目标页结构体的指针。
+
+ 如果D-cache别名不是问题,这两个程序可以简单地直接调用
+ memcpy/memset而不做其他事情。
+
+ ``void flush_dcache_page(struct page *page)``
+
+ 任何时候,当内核写到一个页面缓存页,或者内核要从一个页面缓存
+ 页中读出,并且这个页面的用户空间共享/可写映射可能存在时,
+ 这个程序就会被调用。
+
+ .. note::
+
+ 这个程序只需要为有可能被映射到用户进程的地址空间的
+ 页面缓存调用。因此,例如,处理页面缓存中vfs符号链
+ 接的VFS层代码根本不需要调用这个接口。
+
+ “内核写入页面缓存的页面”这句话的意思是,具体来说,内核执行存
+ 储指令,在该页面的页面->虚拟映射处弄脏该页面的数据。在这里,通
+ 过刷新的手段处理D-cache的别名是很重要的,以确保这些内核存储对
+ 该页的用户空间映射是可见的。
+
+ 推论的情况也同样重要,如果有用户对这个文件有共享+可写的映射,
+ 我们必须确保内核对这些页面的读取会看到用户所做的最新的存储。
+
+ 如果D-cache别名不是一个问题,这个程序可以简单地定义为该架构上
+ 的nop。
+
+ 在page->flags (PG_arch_1)中有一个位是“架构私有”。内核保证,
+ 对于分页缓存的页面,当这样的页面第一次进入分页缓存时,它将清除
+ 这个位。
+
+ 这使得这些接口可以更有效地被实现。如果目前没有用户进程映射这个
+ 页面,它允许我们“推迟”(也许是无限期)实际的刷新过程。请看
+ sparc64的flush_dcache_page和update_mmu_cache实现,以了解如
+ 何做到这一点。
+
+ 这个想法是,首先在flush_dcache_page()时,如果page->mapping->i_mmap
+ 是一个空树,只需标记架构私有页标志位。之后,在update_mmu_cache()
+ 中,会对这个标志位进行检查,如果设置了,就进行刷新,并清除标志位。
+
+ .. important::
+
+ 通常很重要的是,如果你推迟刷新,实际的刷新发生在同一个
+ CPU上,因为它将cpu存储到页面上,使其变脏。同样,请看
+ sparc64关于如何处理这个问题的例子。
+
+ ``void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len)``
+ ``void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len)``
+
+ 当内核需要复制任意的数据进出任意的用户页时(比如ptrace()),它将使
+ 用这两个程序。
+
+ 任何必要的缓存刷新或其他需要发生的一致性操作都应该在这里发生。如果
+ 处理器的指令缓存没有对cpu存储进行窥探,那么你很可能需要为
+ copy_to_user_page()刷新指令缓存。
+
+ ``void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vmaddr)``
+
+ 当内核需要访问一个匿名页的内容时,它会调用这个函数(目前只有
+ get_user_pages())。注意:flush_dcache_page()故意对匿名页不起作
+ 用。默认的实现是nop(对于所有相干的架构应该保持这样)。对于不一致性
+ 的架构,它应该刷新vmaddr处的页面缓存。
+
+ ``void flush_kernel_dcache_page(struct page *page)``
+
+ 当内核需要修改一个用kmap获得的用户页时,它会在所有修改完成后(但在
+ kunmapping之前)调用这个函数,以使底层页面达到最新状态。这里假定用
+ 户没有不一致性的缓存副本(即原始页面是从类似get_user_pages()的机制
+ 中获得的)。默认的实现是一个nop,在所有相干的架构上都应该如此。在不
+ 一致性的架构上,这应该刷新内核缓存中的页面(使用page_address(page))。
+
+
+ ``void flush_icache_range(unsigned long start, unsigned long end)``
+
+ 当内核存储到它将执行的地址中时(例如在加载模块时),这个函数被调用。
+
+ 如果icache不对存储进行窥探,那么这个程序将需要对其进行刷新。
+
+ ``void flush_icache_page(struct vm_area_struct *vma, struct page *page)``
+
+ flush_icache_page的所有功能都可以在flush_dcache_page和update_mmu_cache
+ 中实现。在未来,我们希望能够完全删除这个接口。
+
+最后一类API是用于I/O到内核内特意设置的别名地址范围。这种别名是通过使用
+vmap/vmalloc API设置的。由于内核I/O是通过物理页进行的,I/O子系统假定用户
+映射和内核偏移映射是唯一的别名。这对vmap别名来说是不正确的,所以内核中任何
+试图对vmap区域进行I/O的东西都必须手动管理一致性。它必须在做I/O之前刷新vmap
+范围,并在I/O返回后使其失效。
+
+ ``void flush_kernel_vmap_range(void *vaddr, int size)``
+
+ 刷新vmap区域中指定的虚拟地址范围的内核缓存。这是为了确保内核在vmap范围
+ 内修改的任何数据对物理页是可见的。这个设计是为了使这个区域可以安全地执
+ 行I/O。注意,这个API并 *没有* 刷新该区域的偏移映射别名。
+
+ ``void invalidate_kernel_vmap_range(void *vaddr, int size) invalidates``
+
+ 在vmap区域的一个给定的虚拟地址范围的缓存,这可以防止处理器在物理页的I/O
+ 发生时通过投机性地读取数据而使缓存变脏。这只对读入vmap区域的数据是必要的。
diff --git a/Documentation/translations/zh_CN/core-api/index.rst b/Documentation/translations/zh_CN/core-api/index.rst
index f1fa71e45c77..b4bde9396339 100644
--- a/Documentation/translations/zh_CN/core-api/index.rst
+++ b/Documentation/translations/zh_CN/core-api/index.rst
@@ -19,12 +19,13 @@
来的大量 kerneldoc 信息;有朝一日,若有人有动力的话,应当把它们拆分
出来。
-Todolist:
+.. toctree::
+ :maxdepth: 1
kernel-api
- workqueue
printk-basics
printk-formats
+ workqueue
symbol-namespaces
数据结构和低级实用程序
@@ -32,9 +33,13 @@ Todolist:
在整个内核中使用的函数库。
-Todolist:
+.. toctree::
+ :maxdepth: 1
kobject
+
+Todolist:
+
kref
assoc_array
xarray
@@ -58,12 +63,12 @@ Linux如何让一切同时发生。 详情请参阅
:maxdepth: 1
irq/index
-
-Todolist:
-
refcount-vs-atomic
local_ops
padata
+
+Todolist:
+
../RCU/index
低级硬件管理
@@ -71,9 +76,14 @@ Todolist:
缓存管理,CPU热插拔管理等。
-Todolist:
+.. toctree::
+ :maxdepth: 1
cachetlb
+
+Todolist:
+
+
cpu_hotplug
memory-hotplug
genericirq
diff --git a/Documentation/translations/zh_CN/core-api/kernel-api.rst b/Documentation/translations/zh_CN/core-api/kernel-api.rst
new file mode 100644
index 000000000000..d6f815ec265b
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/kernel-api.rst
@@ -0,0 +1,369 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/kernel-api.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_kernel-api.rst:
+
+
+============
+Linux内核API
+============
+
+
+列表管理函数
+============
+
+该API在以下内核代码中:
+
+include/linux/list.h
+
+基本的C库函数
+=============
+
+在编写驱动程序时,一般不能使用C库中的例程。部分函数通常很有用,它们在
+下面被列出。这些函数的行为可能会与ANSI定义的略有不同,这些偏差会在文中
+注明。
+
+字符串转换
+----------
+
+该API在以下内核代码中:
+
+lib/vsprintf.c
+
+include/linux/kernel.h
+
+include/linux/kernel.h
+
+lib/kstrtox.c
+
+lib/string_helpers.c
+
+字符串处理
+----------
+
+该API在以下内核代码中:
+
+lib/string.c
+
+include/linux/string.h
+
+mm/util.c
+
+基本的内核库函数
+================
+
+Linux内核提供了很多实用的基本函数。
+
+位运算
+------
+
+该API在以下内核代码中:
+
+include/asm-generic/bitops/instrumented-atomic.h
+
+include/asm-generic/bitops/instrumented-non-atomic.h
+
+include/asm-generic/bitops/instrumented-lock.h
+
+位图运算
+--------
+
+该API在以下内核代码中:
+
+lib/bitmap.c
+
+include/linux/bitmap.h
+
+include/linux/bitmap.h
+
+include/linux/bitmap.h
+
+lib/bitmap.c
+
+lib/bitmap.c
+
+include/linux/bitmap.h
+
+命令行解析
+----------
+
+该API在以下内核代码中:
+
+lib/cmdline.c
+
+排序
+----
+
+该API在以下内核代码中:
+
+lib/sort.c
+
+lib/list_sort.c
+
+文本检索
+--------
+
+该API在以下内核代码中:
+
+lib/textsearch.c
+
+lib/textsearch.c
+
+include/linux/textsearch.h
+
+Linux中的CRC和数学函数
+======================
+
+
+CRC函数
+-------
+
+*译注:CRC,Cyclic Redundancy Check,循环冗余校验*
+
+该API在以下内核代码中:
+
+lib/crc4.c
+
+lib/crc7.c
+
+lib/crc8.c
+
+lib/crc16.c
+
+lib/crc32.c
+
+lib/crc-ccitt.c
+
+lib/crc-itu-t.c
+
+基数为2的对数和幂函数
+---------------------
+
+该API在以下内核代码中:
+
+include/linux/log2.h
+
+整数幂函数
+----------
+
+该API在以下内核代码中:
+
+lib/math/int_pow.c
+
+lib/math/int_sqrt.c
+
+除法函数
+--------
+
+该API在以下内核代码中:
+
+include/asm-generic/div64.h
+
+include/linux/math64.h
+
+lib/math/div64.c
+
+lib/math/gcd.c
+
+UUID/GUID
+---------
+
+该API在以下内核代码中:
+
+lib/uuid.c
+
+内核IPC设备
+===========
+
+IPC实用程序
+-----------
+
+该API在以下内核代码中:
+
+ipc/util.c
+
+FIFO 缓冲区
+===========
+
+kfifo接口
+---------
+
+该API在以下内核代码中:
+
+include/linux/kfifo.h
+
+转发接口支持
+============
+
+转发接口支持旨在为工具和设备提供一种有效的机制,将大量数据从内核空间
+转发到用户空间。
+
+转发接口
+--------
+
+该API在以下内核代码中:
+
+kernel/relay.c
+
+kernel/relay.c
+
+模块支持
+========
+
+模块加载
+--------
+
+该API在以下内核代码中:
+
+kernel/kmod.c
+
+模块接口支持
+------------
+
+更多信息请参考文件kernel/module.c。
+
+硬件接口
+========
+
+
+该API在以下内核代码中:
+
+kernel/dma.c
+
+资源管理
+--------
+
+该API在以下内核代码中:
+
+kernel/resource.c
+
+kernel/resource.c
+
+MTRR处理
+--------
+
+该API在以下内核代码中:
+
+arch/x86/kernel/cpu/mtrr/mtrr.c
+
+安全框架
+========
+
+该API在以下内核代码中:
+
+security/security.c
+
+security/inode.c
+
+审计接口
+========
+
+该API在以下内核代码中:
+
+kernel/audit.c
+
+kernel/auditsc.c
+
+kernel/auditfilter.c
+
+核算框架
+========
+
+该API在以下内核代码中:
+
+kernel/acct.c
+
+块设备
+======
+
+该API在以下内核代码中:
+
+block/blk-core.c
+
+block/blk-core.c
+
+block/blk-map.c
+
+block/blk-sysfs.c
+
+block/blk-settings.c
+
+block/blk-exec.c
+
+block/blk-flush.c
+
+block/blk-lib.c
+
+block/blk-integrity.c
+
+kernel/trace/blktrace.c
+
+block/genhd.c
+
+block/genhd.c
+
+字符设备
+========
+
+该API在以下内核代码中:
+
+fs/char_dev.c
+
+时钟框架
+========
+
+时钟框架定义了编程接口,以支持系统时钟树的软件管理。该框架广泛用于系统级芯片(SOC)平
+台,以支持电源管理和各种可能需要自定义时钟速率的设备。请注意,这些 “时钟”与计时或实
+时时钟(RTC)无关,它们都有单独的框架。这些:c:type: `struct clk <clk>` 实例可用于管理
+各种时钟信号,例如一个96理例如96MHz的时钟信号,该信号可被用于总线或外设的数据交换,或以
+其他方式触发系统硬件中的同步状态机转换。
+
+通过明确的软件时钟门控来支持电源管理:未使用的时钟被禁用,因此系统不会因为改变不在使用
+中的晶体管的状态而浪费电源。在某些系统中,这可能是由硬件时钟门控支持的,其中时钟被门控
+而不在软件中被禁用。芯片的部分,在供电但没有时钟的情况下,可能会保留其最后的状态。这种
+低功耗状态通常被称为*保留模式*。这种模式仍然会产生漏电流,特别是在电路几何结构较细的情
+况下,但对于CMOS电路来说,电能主要是随着时钟翻转而被消耗的。
+
+电源感知驱动程序只有在其管理的设备处于活动使用状态时才会启用时钟。此外,系统睡眠状态通
+常根据哪些时钟域处于活动状态而有所不同:“待机”状态可能允许从多个活动域中唤醒,而
+"mem"(暂停到RAM)状态可能需要更全面地关闭来自高速PLL和振荡器的时钟,从而限制了可能
+的唤醒事件源的数量。驱动器的暂停方法可能需要注意目标睡眠状态的系统特定时钟约束。
+
+一些平台支持可编程时钟发生器。这些可以被各种外部芯片使用,如其他CPU、多媒体编解码器以
+及对接口时钟有严格要求的设备。
+
+该API在以下内核代码中:
+
+include/linux/clk.h
+
+同步原语
+========
+
+读-复制-更新(RCU)
+-------------------
+
+该API在以下内核代码中:
+
+include/linux/rcupdate.h
+
+kernel/rcu/tree.c
+
+kernel/rcu/tree_exp.h
+
+kernel/rcu/update.c
+
+include/linux/srcu.h
+
+kernel/rcu/srcutree.c
+
+include/linux/rculist_bl.h
+
+include/linux/rculist.h
+
+include/linux/rculist_nulls.h
+
+include/linux/rcu_sync.h
+
+kernel/rcu/sync.c
diff --git a/Documentation/translations/zh_CN/core-api/kobject.rst b/Documentation/translations/zh_CN/core-api/kobject.rst
new file mode 100644
index 000000000000..f0e6a4aeb372
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/kobject.rst
@@ -0,0 +1,378 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/kobject.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_core_api_kobject.rst:
+
+=======================================================
+关于kobjects、ksets和ktypes的一切你没想过需要了解的东西
+=======================================================
+
+:作者: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+:最后一次更新: December 19, 2007
+
+根据Jon Corbet于2003年10月1日为lwn.net撰写的原创文章改编,网址是:
+https://lwn.net/Articles/51437/
+
+理解驱动模型和建立在其上的kobject抽象的部分的困难在于,没有明显的切入点。
+处理kobjects需要理解一些不同的类型,所有这些类型都会相互引用。为了使事情
+变得更简单,我们将多路并进,从模糊的术语开始,并逐渐增加细节。那么,先来
+了解一些我们将要使用的术语的简明定义吧。
+
+ - 一个kobject是一个kobject结构体类型的对象。Kobjects有一个名字和一个
+ 引用计数。一个kobject也有一个父指针(允许对象被排列成层次结构),一个
+ 特定的类型,并且,通常在sysfs虚拟文件系统中表示。
+
+ Kobjects本身通常并不引人关注;相反它们常常被嵌入到其他包含真正引人注目
+ 的代码的结构体中。
+
+ 任何结构体都 **不应该** 有一个以上的kobject嵌入其中。如果有的话,对象的引用计
+ 数肯定会被打乱,而且不正确,你的代码就会出现错误。所以不要这样做。
+
+ - ktype是嵌入一个kobject的对象的类型。每个嵌入kobject的结构体都需要一个
+ 相应的ktype。ktype控制着kobject在被创建和销毁时的行为。
+
+ - 一个kset是一组kobjects。这些kobjects可以是相同的ktype或者属于不同的
+ ktype。kset是kobjects集合的基本容器类型。Ksets包含它们自己的kobjects,
+ 但你可以安全地忽略这个实现细节,因为kset的核心代码会自动处理这个kobject。
+
+ 当你看到一个下面全是其他目录的sysfs目录时,通常这些目录中的每一个都对应
+ 于同一个kset中的一个kobject。
+
+ 我们将研究如何创建和操作所有这些类型。将采取一种自下而上的方法,所以我们
+ 将回到kobjects。
+
+
+嵌入kobjects
+=============
+
+内核代码很少创建孤立的kobject,只有一个主要的例外,下面会解释。相反,
+kobjects被用来控制对一个更大的、特定领域的对象的访问。为此,kobjects会被
+嵌入到其他结构中。如果你习惯于用面向对象的术语来思考问题,那么kobjects可
+以被看作是一个顶级的抽象类,其他的类都是从它派生出来的。一个kobject实现了
+一系列的功能,这些功能本身并不特别有用,但在其他对象中却很好用。C语言不允
+许直接表达继承,所以必须使用其他技术——比如结构体嵌入。
+
+(对于那些熟悉内核链表实现的人来说,这类似于“list_head”结构本身很少有用,
+但总是被嵌入到感兴趣的更大的对象中)。
+
+例如, ``drivers/uio/uio.c`` 中的IO代码有一个结构体,定义了与uio设备相
+关的内存区域::
+
+ struct uio_map {
+ struct kobject kobj;
+ struct uio_mem *mem;
+ };
+
+如果你有一个uio_map结构体,找到其嵌入的kobject只是一个使用kobj成员的问题。
+然而,与kobjects一起工作的代码往往会遇到相反的问题:给定一个结构体kobject
+的指针,指向包含结构体的指针是什么?你必须避免使用一些技巧(比如假设
+kobject在结构的开头),相反,你得使用container_of()宏,其可以在 ``<linux/kernel.h>``
+中找到::
+
+ container_of(ptr, type, member)
+
+其中:
+
+ * ``ptr`` 是一个指向嵌入kobject的指针,
+ * ``type`` 是包含结构体的类型,
+ * ``member`` 是 ``指针`` 所指向的结构体域的名称。
+
+container_of()的返回值是一个指向相应容器类型的指针。因此,例如,一个嵌入到
+uio_map结构 **中** 的kobject结构体的指针kp可以被转换为一个指向 **包含** uio_map
+结构体的指针,方法是::
+
+ struct uio_map *u_map = container_of(kp, struct uio_map, kobj);
+
+为了方便起见,程序员经常定义一个简单的宏,用于将kobject指针 **反推** 到包含
+类型。在早期的 ``drivers/uio/uio.c`` 中正是如此,你可以在这里看到::
+
+ struct uio_map {
+ struct kobject kobj;
+ struct uio_mem *mem;
+ };
+
+ #define to_map(map) container_of(map, struct uio_map, kobj)
+
+其中宏的参数“map”是一个指向有关的kobject结构体的指针。该宏随后被调用::
+
+ struct uio_map *map = to_map(kobj);
+
+
+kobjects的初始化
+================
+
+当然,创建kobject的代码必须初始化该对象。一些内部字段是通过(强制)调用kobject_init()
+来设置的::
+
+ void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
+
+ktype是正确创建kobject的必要条件,因为每个kobject都必须有一个相关的kobj_type。
+在调用kobject_init()后,为了向sysfs注册kobject,必须调用函数kobject_add()::
+
+ int kobject_add(struct kobject *kobj, struct kobject *parent,
+ const char *fmt, ...);
+
+这将正确设置kobject的父级和kobject的名称。如果该kobject要与一个特定的kset相关
+联,在调用kobject_add()之前必须分配kobj->kset。如果kset与kobject相关联,则
+kobject的父级可以在调用kobject_add()时被设置为NULL,则kobject的父级将是kset
+本身。
+
+由于kobject的名字是在它被添加到内核时设置的,所以kobject的名字不应该被直接操作。
+如果你必须改变kobject的名字,请调用kobject_rename()::
+
+ int kobject_rename(struct kobject *kobj, const char *new_name);
+
+kobject_rename()函数不会执行任何锁定操作,也不会对name进行可靠性检查,所以调用
+者自己检查和串行化操作是明智的选择
+
+有一个叫kobject_set_name()的函数,但那是历史遗产,正在被删除。如果你的代码需
+要调用这个函数,那么它是不正确的,需要被修复。
+
+要正确访问kobject的名称,请使用函数kobject_name()::
+
+ const char *kobject_name(const struct kobject * kobj);
+
+有一个辅助函数可以同时初始化和添加kobject到内核中,令人惊讶的是,该函数被称为
+kobject_init_and_add()::
+
+ int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
+ struct kobject *parent, const char *fmt, ...);
+
+参数与上面描述的单个kobject_init()和kobject_add()函数相同。
+
+
+Uevents
+=======
+
+当一个kobject被注册到kobject核心后,你需要向全世界宣布它已经被创建了。这可以通
+过调用kobject_uevent()来实现::
+
+ int kobject_uevent(struct kobject *kobj, enum kobject_action action);
+
+当kobject第一次被添加到内核时,使用 *KOBJ_ADD* 动作。这应该在该kobject的任
+何属性或子对象被正确初始化后进行,因为当这个调用发生时,用户空间会立即开始寻
+找它们。
+
+当kobject从内核中移除时(关于如何做的细节在下面), **KOBJ_REMOVE** 的uevent
+将由kobject核心自动创建,所以调用者不必担心手动操作。
+
+
+引用计数
+========
+
+kobject的关键功能之一是作为它所嵌入的对象的一个引用计数器。只要对该对象的引用
+存在,该对象(以及支持它的代码)就必须继续存在。用于操作kobject的引用计数的低
+级函数是::
+
+ struct kobject *kobject_get(struct kobject *kobj);
+ void kobject_put(struct kobject *kobj);
+
+对kobject_get()的成功调用将增加kobject的引用计数器值并返回kobject的指针。
+
+当引用被释放时,对kobject_put()的调用将递减引用计数值,并可能释放该对象。请注
+意,kobject_init()将引用计数设置为1,所以设置kobject的代码最终需要kobject_put()
+来释放该引用。
+
+因为kobjects是动态的,所以它们不能以静态方式或在堆栈中声明,而总是以动态方式分
+配。未来版本的内核将包含对静态创建的kobjects的运行时检查,并将警告开发者这种不
+当的使用。
+
+如果你使用struct kobject只是为了给你的结构体提供一个引用计数器,请使用struct kref
+来代替;kobject是多余的。关于如何使用kref结构体的更多信息,请参见Linux内核源代
+码树中的文件Documentation/core-api/kref.rst
+
+
+创建“简单的”kobjects
+====================
+
+有时,开发者想要的只是在sysfs层次结构中创建一个简单的目录,而不必去搞那些复杂
+的ksets、显示和存储函数,以及其他细节。这是一个应该创建单个kobject的例外。要
+创建这样一个条目(即简单的目录),请使用函数::
+
+ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent);
+
+这个函数将创建一个kobject,并将其放在sysfs中指定的父kobject下面的位置。要创
+建与此kobject相关的简单属性,请使用::
+
+ int sysfs_create_file(struct kobject *kobj, const struct attribute *attr);
+
+或者::
+
+ int sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp);
+
+这里使用的两种类型的属性,与已经用kobject_create_and_add()创建的kobject,
+都可以是kobj_attribute类型,所以不需要创建特殊的自定义属性。
+
+参见示例模块, ``samples/kobject/kobject-example.c`` ,以了解一个简单的
+kobject和属性的实现。
+
+
+
+ktypes和释放方法
+================
+
+以上讨论中还缺少一件重要的事情,那就是当一个kobject的引用次数达到零的时候
+会发生什么。创建kobject的代码通常不知道何时会发生这种情况;首先,如果它知
+道,那么使用kobject就没有什么意义。当sysfs被引入时,即使是可预测的对象生命
+周期也会变得更加复杂,因为内核的其他部分可以获得在系统中注册的任何kobject
+的引用。
+
+最终的结果是,一个由kobject保护的结构体在其引用计数归零之前不能被释放。引
+用计数不受创建kobject的代码的直接控制。因此,每当它的一个kobjects的最后一
+个引用消失时,必须异步通知该代码。
+
+一旦你通过kobject_add()注册了你的kobject,你绝对不能使用kfree()来直接释
+放它。唯一安全的方法是使用kobject_put()。在kobject_init()之后总是使用
+kobject_put()以避免错误的发生是一个很好的做法。
+
+这个通知是通过kobject的release()方法完成的。通常这样的方法有如下形式::
+
+ void my_object_release(struct kobject *kobj)
+ {
+ struct my_object *mine = container_of(kobj, struct my_object, kobj);
+
+ /* Perform any additional cleanup on this object, then... */
+ kfree(mine);
+ }
+
+有一点很重要:每个kobject都必须有一个release()方法,而且这个kobject必
+须持续存在(处于一致的状态),直到这个方法被调用。如果这些约束条件没有
+得到满足,那么代码就是有缺陷的。注意,如果你忘记提供release()方法,内
+核会警告你。不要试图通过提供一个“空”的释放函数来摆脱这个警告。
+
+如果你的清理函数只需要调用kfree(),那么你必须创建一个包装函数,该函数
+使用container_of()来向上造型到正确的类型(如上面的例子所示),然后在整个
+结构体上调用kfree()。
+
+注意,kobject的名字在release函数中是可用的,但它不能在这个回调中被改
+变。否则,在kobject核心中会出现内存泄漏,这让人很不爽。
+
+有趣的是,release()方法并不存储在kobject本身;相反,它与ktype相关。
+因此,让我们引入结构体kobj_type::
+
+ struct kobj_type {
+ void (*release)(struct kobject *kobj);
+ const struct sysfs_ops *sysfs_ops;
+ struct attribute **default_attrs;
+ const struct attribute_group **default_groups;
+ const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
+ const void *(*namespace)(struct kobject *kobj);
+ void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
+ };
+
+这个结构提用来描述一个特定类型的kobject(或者更正确地说,包含对象的
+类型)。每个kobject都需要有一个相关的kobj_type结构;当你调用
+kobject_init()或kobject_init_and_add()时必须指定一个指向该结构的
+指针。
+
+当然,kobj_type结构中的release字段是指向这种类型的kobject的release()
+方法的一个指针。另外两个字段(sysfs_ops 和 default_attrs)控制这种
+类型的对象如何在 sysfs 中被表示;它们超出了本文的范围。
+
+default_attrs 指针是一个默认属性的列表,它将为任何用这个 ktype 注册
+的 kobject 自动创建。
+
+
+ksets
+=====
+
+一个kset仅仅是一个希望相互关联的kobjects的集合。没有限制它们必须是相
+同的ktype,但是如果它们不是相同的,就要非常小心。
+
+一个kset有以下功能:
+
+ - 它像是一个包含一组对象的袋子。一个kset可以被内核用来追踪“所有块
+ 设备”或“所有PCI设备驱动”。
+
+ - kset也是sysfs中的一个子目录,与kset相关的kobjects可以在这里显示
+ 出来。每个kset都包含一个kobject,它可以被设置为其他kobject的父对象;
+ sysfs层次结构的顶级目录就是以这种方式构建的。
+
+ - Ksets可以支持kobjects的 "热插拔",并影响uevent事件如何被报告给
+ 用户空间。
+
+ 在面向对象的术语中,“kset”是顶级的容器类;ksets包含它们自己的kobject,
+ 但是这个kobject是由kset代码管理的,不应该被任何其他用户所操纵。
+
+ kset在一个标准的内核链表中保存它的子对象。Kobjects通过其kset字段指向其
+ 包含的kset。在几乎所有的情况下,属于一个kset的kobjects在它们的父
+ 对象中都有那个kset(或者,严格地说,它的嵌入kobject)。
+
+ 由于kset中包含一个kobject,它应该总是被动态地创建,而不是静态地
+ 或在堆栈中声明。要创建一个新的kset,请使用::
+
+ struct kset *kset_create_and_add(const char *name,
+ const struct kset_uevent_ops *uevent_ops,
+ struct kobject *parent_kobj);
+
+当你完成对kset的处理后,调用::
+
+ void kset_unregister(struct kset *k);
+
+来销毁它。这将从sysfs中删除该kset并递减其引用计数值。当引用计数
+为零时,该kset将被释放。因为对该kset的其他引用可能仍然存在,
+释放可能发生在kset_unregister()返回之后。
+
+一个使用kset的例子可以在内核树中的 ``samples/kobject/kset-example.c``
+文件中看到。
+
+如果一个kset希望控制与它相关的kobjects的uevent操作,它可以使用
+结构体kset_uevent_ops来处理它::
+
+ struct kset_uevent_ops {
+ int (* const filter)(struct kset *kset, struct kobject *kobj);
+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
+ struct kobj_uevent_env *env);
+ };
+
+
+过滤器函数允许kset阻止一个特定kobject的uevent被发送到用户空间。
+如果该函数返回0,该uevent将不会被发射出去。
+
+name函数将被调用以覆盖uevent发送到用户空间的kset的默认名称。默
+认情况下,该名称将与kset本身相同,但这个函数,如果存在,可以覆盖
+该名称。
+
+当uevent即将被发送至用户空间时,uevent函数将被调用,以允许更多
+的环境变量被添加到uevent中。
+
+有人可能会问,鉴于没有提出执行该功能的函数,究竟如何将一个kobject
+添加到一个kset中。答案是这个任务是由kobject_add()处理的。当一个
+kobject被传递给kobject_add()时,它的kset成员应该指向这个kobject
+所属的kset。 kobject_add()将处理剩下的部分。
+
+如果属于一个kset的kobject没有父kobject集,它将被添加到kset的目
+录中。并非所有的kset成员都必须住在kset目录中。如果在添加kobject
+之前分配了一个明确的父kobject,那么该kobject将被注册到kset中,
+但是被添加到父kobject下面。
+
+
+移除Kobject
+===========
+
+当一个kobject在kobject核心注册成功后,在代码使用完它时,必须将其
+清理掉。要做到这一点,请调用kobject_put()。通过这样做,kobject核
+心会自动清理这个kobject分配的所有内存。如果为这个对象发送了 ``KOBJ_ADD``
+uevent,那么相应的 ``KOBJ_REMOVE`` uevent也将被发送,任何其他的
+sysfs内务将被正确处理。
+
+如果你需要分两次对kobject进行删除(比如说在你要销毁对象时无权睡眠),
+那么调用kobject_del()将从sysfs中取消kobject的注册。这使得kobject
+“不可见”,但它并没有被清理掉,而且该对象的引用计数仍然是一样的。在稍
+后的时间调用kobject_put()来完成与该kobject相关的内存的清理。
+
+kobject_del()可以用来放弃对父对象的引用,如果循环引用被构建的话。
+在某些情况下,一个父对象引用一个子对象是有效的。循环引用必须通过明
+确调用kobject_del()来打断,这样一个释放函数就会被调用,前一个循环
+中的对象会相互释放。
+
+
+示例代码出处
+============
+
+关于正确使用ksets和kobjects的更完整的例子,请参见示例程序
+``samples/kobject/{kobject-example.c,kset-example.c}`` ,如果
+您选择 ``CONFIG_SAMPLE_KOBJECT`` ,它们将被构建为可加载模块。
diff --git a/Documentation/translations/zh_CN/core-api/local_ops.rst b/Documentation/translations/zh_CN/core-api/local_ops.rst
new file mode 100644
index 000000000000..ee67379b6869
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/local_ops.rst
@@ -0,0 +1,194 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/local_ops.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_local_ops:
+
+
+========================
+本地原子操作的语义和行为
+========================
+
+:作者: Mathieu Desnoyers
+
+
+本文解释了本地原子操作的目的,如何为任何给定的架构实现这些操作,并说明了
+如何正确使用这些操作。它还强调了在内存写入顺序很重要的情况下,跨CPU读取
+这些本地变量时必须采取的预防措施。
+
+.. note::
+
+ 注意,基于 ``local_t`` 的操作不建议用于一般内核操作。请使用 ``this_cpu``
+ 操作来代替使用,除非真的有特殊目的。大多数内核中使用的 ``local_t`` 已
+ 经被 ``this_cpu`` 操作所取代。 ``this_cpu`` 操作在一条指令中结合了重
+ 定位和类似 ``local_t`` 的语义,产生了更紧凑和更快的执行代码。
+
+
+本地原子操作的目的
+==================
+
+本地原子操作的目的是提供快速和高度可重入的每CPU计数器。它们通过移除LOCK前
+缀和通常需要在CPU间同步的内存屏障,将标准原子操作的性能成本降到最低。
+
+在许多情况下,拥有快速的每CPU原子计数器是很有吸引力的:它不需要禁用中断来保护中
+断处理程序,它允许在NMI(Non Maskable Interrupt)处理程序中使用连贯的计数器。
+它对追踪目的和各种性能监测计数器特别有用。
+
+本地原子操作只保证在拥有数据的CPU上的变量修改的原子性。因此,必须注意确保只
+有一个CPU写到 ``local_t`` 的数据。这是通过使用每CPU的数据来实现的,并确
+保我们在一个抢占式安全上下文中修改它。然而,从任何一个CPU读取 ``local_t``
+数据都是允许的:这样它就会显得与所有者CPU的其他内存写入顺序不一致。
+
+
+针对特定架构的实现
+==================
+
+这可以通过稍微修改标准的原子操作来实现:只有它们的UP变体必须被保留。这通常
+意味着删除LOCK前缀(在i386和x86_64上)和任何SMP同步屏障。如果架构在SMP和
+UP之间没有不同的行为,在你的架构的 ``local.h`` 中包括 ``asm-generic/local.h``
+就足够了。
+
+通过在一个结构体中嵌入一个 ``atomic_long_t`` , ``local_t`` 类型被定义为
+一个不透明的 ``signed long`` 。这样做的目的是为了使从这个类型到
+``long`` 的转换失败。该定义看起来像::
+
+ typedef struct { atomic_long_t a; } local_t;
+
+
+使用本地原子操作时应遵循的规则
+==============================
+
+* 被本地操作触及的变量必须是每cpu的变量。
+
+* *只有* 这些变量的CPU所有者才可以写入这些变量。
+
+* 这个CPU可以从任何上下文(进程、中断、软中断、nmi...)中使用本地操作来更新
+ 它的local_t变量。
+
+* 当在进程上下文中使用本地操作时,必须禁用抢占(或中断),以确保进程在获得每
+ CPU变量和进行实际的本地操作之间不会被迁移到不同的CPU。
+
+* 当在中断上下文中使用本地操作时,在主线内核上不需要特别注意,因为它们将在局
+ 部CPU上运行,并且已经禁用了抢占。然而,我建议无论如何都要明确地禁用抢占,
+ 以确保它在-rt内核上仍能正确工作。
+
+* 读取本地cpu变量将提供该变量的当前拷贝。
+
+* 对这些变量的读取可以从任何CPU进行,因为对 “ ``long`` ”,对齐的变量的更新
+ 总是原子的。由于写入程序的CPU没有进行内存同步,所以在读取 *其他* cpu的变
+ 量时,可以读取该变量的过期副本。
+
+
+如何使用本地原子操作
+====================
+
+::
+
+ #include <linux/percpu.h>
+ #include <asm/local.h>
+
+ static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
+
+
+计数器
+======
+
+计数是在一个signed long的所有位上进行的。
+
+在可抢占的上下文中,围绕本地原子操作使用 ``get_cpu_var()`` 和
+``put_cpu_var()`` :它确保在对每个cpu变量进行写访问时,抢占被禁用。比如
+说::
+
+ local_inc(&get_cpu_var(counters));
+ put_cpu_var(counters);
+
+如果你已经在一个抢占安全上下文中,你可以使用 ``this_cpu_ptr()`` 代替::
+
+ local_inc(this_cpu_ptr(&counters));
+
+
+
+读取计数器
+==========
+
+那些本地计数器可以从外部的CPU中读取,以求得计数的总和。请注意,local_read
+所看到的跨CPU的数据必须被认为是相对于拥有该数据的CPU上发生的其他内存写入来
+说不符合顺序的::
+
+ long sum = 0;
+ for_each_online_cpu(cpu)
+ sum += local_read(&per_cpu(counters, cpu));
+
+如果你想使用远程local_read来同步CPU之间对资源的访问,必须在写入者和读取者
+的CPU上分别使用显式的 ``smp_wmb()`` 和 ``smp_rmb()`` 内存屏障。如果你使
+用 ``local_t`` 变量作为写在缓冲区中的字节的计数器,就会出现这种情况:在缓
+冲区写和计数器增量之间应该有一个 ``smp_wmb()`` ,在计数器读和缓冲区读之间
+也应有一个 ``smp_rmb()`` 。
+
+下面是一个使用 ``local.h`` 实现每个cpu基本计数器的示例模块::
+
+ /* test-local.c
+ *
+ * Sample module for local.h usage.
+ */
+
+
+ #include <asm/local.h>
+ #include <linux/module.h>
+ #include <linux/timer.h>
+
+ static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
+
+ static struct timer_list test_timer;
+
+ /* IPI called on each CPU. */
+ static void test_each(void *info)
+ {
+ /* Increment the counter from a non preemptible context */
+ printk("Increment on cpu %d\n", smp_processor_id());
+ local_inc(this_cpu_ptr(&counters));
+
+ /* This is what incrementing the variable would look like within a
+ * preemptible context (it disables preemption) :
+ *
+ * local_inc(&get_cpu_var(counters));
+ * put_cpu_var(counters);
+ */
+ }
+
+ static void do_test_timer(unsigned long data)
+ {
+ int cpu;
+
+ /* Increment the counters */
+ on_each_cpu(test_each, NULL, 1);
+ /* Read all the counters */
+ printk("Counters read from CPU %d\n", smp_processor_id());
+ for_each_online_cpu(cpu) {
+ printk("Read : CPU %d, count %ld\n", cpu,
+ local_read(&per_cpu(counters, cpu)));
+ }
+ mod_timer(&test_timer, jiffies + 1000);
+ }
+
+ static int __init test_init(void)
+ {
+ /* initialize the timer that will increment the counter */
+ timer_setup(&test_timer, do_test_timer, 0);
+ mod_timer(&test_timer, jiffies + 1);
+
+ return 0;
+ }
+
+ static void __exit test_exit(void)
+ {
+ del_timer_sync(&test_timer);
+ }
+
+ module_init(test_init);
+ module_exit(test_exit);
+
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Mathieu Desnoyers");
+ MODULE_DESCRIPTION("Local Atomic Ops");
diff --git a/Documentation/translations/zh_CN/core-api/padata.rst b/Documentation/translations/zh_CN/core-api/padata.rst
new file mode 100644
index 000000000000..c627f8f131f9
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/padata.rst
@@ -0,0 +1,158 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/padata.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_core_api_padata.rst:
+
+==================
+padata并行执行机制
+==================
+
+:日期: 2020年5月
+
+Padata是一种机制,内核可以通过此机制将工作分散到多个CPU上并行完成,同时
+可以选择保持它们的顺序。
+
+它最初是为IPsec开发的,它需要在不对这些数据包重新排序的其前提下,为大量的数
+据包进行加密和解密。这是目前padata的序列化作业支持的唯一用途。
+
+Padata还支持多线程作业,将作业平均分割,同时在线程之间进行负载均衡和协调。
+
+执行序列化作业
+==============
+
+初始化
+------
+
+使用padata执行序列化作业的第一步是建立一个padata_instance结构体,以全面
+控制作业的运行方式::
+
+ #include <linux/padata.h>
+
+ struct padata_instance *padata_alloc(const char *name);
+
+'name'即标识了这个实例。
+
+然后,通过分配一个padata_shell来完成padata的初始化::
+
+ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
+
+一个padata_shell用于向padata提交一个作业,并允许一系列这样的作业被独立地
+序列化。一个padata_instance可以有一个或多个padata_shell与之相关联,每个
+都允许一系列独立的作业。
+
+修改cpumasks
+------------
+
+用于运行作业的CPU可以通过两种方式改变,通过padata_set_cpumask()编程或通
+过sysfs。前者的定义是::
+
+ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
+ cpumask_var_t cpumask);
+
+这里cpumask_type是PADATA_CPU_PARALLEL(并行)或PADATA_CPU_SERIAL(串行)之一,其中并
+行cpumask描述了哪些处理器将被用来并行执行提交给这个实例的作业,串行cpumask
+定义了哪些处理器被允许用作串行化回调处理器。 cpumask指定了要使用的新cpumask。
+
+一个实例的cpumasks可能有sysfs文件。例如,pcrypt的文件在
+/sys/kernel/pcrypt/<instance-name>。在一个实例的目录中,有两个文件,parallel_cpumask
+和serial_cpumask,任何一个cpumask都可以通过在文件中回显(echo)一个bitmask
+来改变,比如说::
+
+ echo f > /sys/kernel/pcrypt/pencrypt/parallel_cpumask
+
+读取其中一个文件会显示用户提供的cpumask,它可能与“可用”的cpumask不同。
+
+Padata内部维护着两对cpumask,用户提供的cpumask和“可用的”cpumask(每一对由一个
+并行和一个串行cpumask组成)。用户提供的cpumasks在实例分配时默认为所有可能的CPU,
+并且可以如上所述进行更改。可用的cpumasks总是用户提供的cpumasks的一个子集,只包
+含用户提供的掩码中的在线CPU;这些是padata实际使用的cpumasks。因此,向padata提
+供一个包含离线CPU的cpumask是合法的。一旦用户提供的cpumask中的一个离线CPU上线,
+padata就会使用它。
+
+改变CPU掩码的操作代价很高,所以不应频繁更改。
+
+运行一个作业
+-------------
+
+实际上向padata实例提交工作需要创建一个padata_priv结构体,它代表一个作业::
+
+ struct padata_priv {
+ /* Other stuff here... */
+ void (*parallel)(struct padata_priv *padata);
+ void (*serial)(struct padata_priv *padata);
+ };
+
+这个结构体几乎肯定会被嵌入到一些针对要做的工作的大结构体中。它的大部分字段对
+padata来说是私有的,但是这个结构在初始化时应该被清零,并且应该提供parallel()和
+serial()函数。在完成工作的过程中,这些函数将被调用,我们马上就会遇到。
+
+工作的提交是通过::
+
+ int padata_do_parallel(struct padata_shell *ps,
+ struct padata_priv *padata, int *cb_cpu);
+
+ps和padata结构体必须如上所述进行设置;cb_cpu指向作业完成后用于最终回调的首选CPU;
+它必须在当前实例的CPU掩码中(如果不是,cb_cpu指针将被更新为指向实际选择的CPU)。
+padata_do_parallel()的返回值在成功时为0,表示工作正在进行中。-EBUSY意味着有人
+在其他地方正在搞乱实例的CPU掩码,而当cb_cpu不在串行cpumask中、并行或串行cpumasks
+中无在线CPU,或实例停止时,则会出现-EINVAL反馈。
+
+每个提交给padata_do_parallel()的作业将依次传递给一个CPU上的上述parallel()函数
+的一个调用,所以真正的并行是通过提交多个作业来实现的。parallel()在运行时禁用软
+件中断,因此不能睡眠。parallel()函数把获得的padata_priv结构体指针作为其唯一的参
+数;关于实际要做的工作的信息可能是通过使用container_of()找到封装结构体来获得的。
+
+请注意,parallel()没有返回值;padata子系统假定parallel()将从此时开始负责这项工
+作。作业不需要在这次调用中完成,但是,如果parallel()留下了未完成的工作,它应该准
+备在前一个作业完成之前,被以新的作业再次调用
+
+序列化作业
+----------
+
+当一个作业完成时,parallel()(或任何实际完成该工作的函数)应该通过调用通知padata此
+事::
+
+ void padata_do_serial(struct padata_priv *padata);
+
+在未来的某个时刻,padata_do_serial()将触发对padata_priv结构体中serial()函数的调
+用。这个调用将发生在最初要求调用padata_do_parallel()的CPU上;它也是在本地软件中断
+被禁用的情况下运行的。
+请注意,这个调用可能会被推迟一段时间,因为padata代码会努力确保作业按照提交的顺序完
+成。
+
+销毁
+----
+
+清理一个padata实例时,可以预见的是调用两个free函数,这两个函数对应于分配的逆过程::
+
+ void padata_free_shell(struct padata_shell *ps);
+ void padata_free(struct padata_instance *pinst);
+
+用户有责任确保在调用上述任何一项之前,所有未完成的工作都已完成。
+
+运行多线程作业
+==============
+
+一个多线程作业有一个主线程和零个或多个辅助线程,主线程参与作业,然后等待所有辅助线
+程完成。padata将作业分割成称为chunk的单元,其中chunk是一个线程在一次调用线程函数
+中完成的作业片段。
+
+用户必须做三件事来运行一个多线程作业。首先,通过定义一个padata_mt_job结构体来描述
+作业,这在接口部分有解释。这包括一个指向线程函数的指针,padata每次将作业块分配给线
+程时都会调用这个函数。然后,定义线程函数,它接受三个参数: ``start`` 、 ``end`` 和 ``arg`` ,
+其中前两个参数限定了线程操作的范围,最后一个是指向作业共享状态的指针,如果有的话。
+准备好共享状态,它通常被分配在主线程的堆栈中。最后,调用padata_do_multithreaded(),
+它将在作业完成后返回。
+
+接口
+====
+
+该API在以下内核代码中:
+
+include/linux/padata.h
+
+kernel/padata.c
diff --git a/Documentation/translations/zh_CN/core-api/printk-basics.rst b/Documentation/translations/zh_CN/core-api/printk-basics.rst
new file mode 100644
index 000000000000..2b20f6303a82
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/printk-basics.rst
@@ -0,0 +1,110 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/printk-basics.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_printk-basics.rst:
+
+
+==================
+使用printk记录消息
+==================
+
+printk()是Linux内核中最广为人知的函数之一。它是我们打印消息的标准工具,通常也是追踪和调试
+的最基本方法。如果你熟悉printf(3),你就能够知道printk()是基于它的,尽管它在功能上有一些不
+同之处:
+
+ - printk() 消息可以指定日志级别。
+
+ - 格式字符串虽然与C99基本兼容,但并不遵循完全相同的规范。它有一些扩展和一些限制(没
+ 有 ``%n`` 或浮点转换指定符)。参见:ref: `如何正确地获得printk格式指定符<printk-specifiers>` 。
+
+所有的printk()消息都会被打印到内核日志缓冲区,这是一个通过/dev/kmsg输出到用户空间的环
+形缓冲区。读取它的通常方法是使用 ``dmesg`` 。
+
+printk()的用法通常是这样的::
+
+ printk(KERN_INFO "Message: %s\n", arg);
+
+其中 ``KERN_INFO`` 是日志级别(注意,它与格式字符串连在一起,日志级别不是一个单独的参数)。
+可用的日志级别是:
+
+
++----------------+--------+-----------------------------------------------+
+| 名称 | 字符串 | 别名函数 |
++================+========+===============================================+
+| KERN_EMERG | "0" | pr_emerg() |
++----------------+--------+-----------------------------------------------+
+| KERN_ALERT | "1" | pr_alert() |
++----------------+--------+-----------------------------------------------+
+| KERN_CRIT | "2" | pr_crit() |
++----------------+--------+-----------------------------------------------+
+| KERN_ERR | "3" | pr_err() |
++----------------+--------+-----------------------------------------------+
+| KERN_WARNING | "4" | pr_warn() |
++----------------+--------+-----------------------------------------------+
+| KERN_NOTICE | "5" | pr_notice() |
++----------------+--------+-----------------------------------------------+
+| KERN_INFO | "6" | pr_info() |
++----------------+--------+-----------------------------------------------+
+| KERN_DEBUG | "7" | pr_debug() and pr_devel() 若定义了DEBUG |
++----------------+--------+-----------------------------------------------+
+| KERN_DEFAULT | "" | |
++----------------+--------+-----------------------------------------------+
+| KERN_CONT | "c" | pr_cont() |
++----------------+--------+-----------------------------------------------+
+
+
+日志级别指定了一条消息的重要性。内核根据日志级别和当前 *console_loglevel* (一个内核变量)决
+定是否立即显示消息(将其打印到当前控制台)。如果消息的优先级比 *console_loglevel* 高(日志级
+别值较低),消息将被打印到控制台。
+
+如果省略了日志级别,则以 ``KERN_DEFAULT`` 级别打印消息。
+
+你可以用以下方法检查当前的 *console_loglevel* ::
+
+ $ cat /proc/sys/kernel/printk
+ 4 4 1 7
+
+结果显示了 *current*, *default*, *minimum* 和 *boot-time-default* 日志级别
+
+要改变当前的 console_loglevel,只需在 ``/proc/sys/kernel/printk`` 中写入所需的
+级别。例如,要打印所有的消息到控制台上::
+
+ # echo 8 > /proc/sys/kernel/printk
+
+另一种方式,使用 ``dmesg``::
+
+ # dmesg -n 5
+
+设置 console_loglevel 打印 KERN_WARNING (4) 或更严重的消息到控制台。更多消息参
+见 ``dmesg(1)`` 。
+
+作为printk()的替代方案,你可以使用 ``pr_*()`` 别名来记录日志。这个系列的宏在宏名中
+嵌入了日志级别。例如::
+
+ pr_info("Info message no. %d\n", msg_num);
+
+打印 ``KERN_INFO`` 消息。
+
+除了比等效的printk()调用更简洁之外,它们还可以通过pr_fmt()宏为格式字符串使用一个通用
+的定义。例如,在源文件的顶部(在任何 ``#include`` 指令之前)定义这样的内容。::
+
+ #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+
+会在该文件中的每一条 pr_*() 消息前加上发起该消息的模块和函数名称。
+
+为了调试,还有两个有条件编译的宏:
+pr_debug()和pr_devel(),除非定义了 ``DEBUG`` (或者在pr_debug()的情况下定义了
+``CONFIG_DYNAMIC_DEBUG`` ),否则它们会被编译。
+
+
+函数接口
+========
+
+该API在以下内核代码中:
+
+kernel/printk/printk.c
+
+include/linux/printk.h
diff --git a/Documentation/translations/zh_CN/core-api/printk-formats.rst b/Documentation/translations/zh_CN/core-api/printk-formats.rst
new file mode 100644
index 000000000000..a680c8f164c3
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/printk-formats.rst
@@ -0,0 +1,595 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/printk-formats.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_printk-formats.rst:
+
+
+==============================
+如何获得正确的printk格式占位符
+==============================
+
+
+
+:作者: Randy Dunlap <rdunlap@infradead.org>
+:作者: Andrew Murray <amurray@mpc-data.co.uk>
+
+
+整数类型
+========
+
+::
+
+ 若变量类型是Type,则使用printk格式占位符。
+ -------------------------------------------
+ char %d 或 %x
+ unsigned char %u 或 %x
+ short int %d 或 %x
+ unsigned short int %u 或 %x
+ int %d 或 %x
+ unsigned int %u 或 %x
+ long %ld 或 %lx
+ unsigned long %lu 或 %lx
+ long long %lld 或 %llx
+ unsigned long long %llu 或 %llx
+ size_t %zu 或 %zx
+ ssize_t %zd 或 %zx
+ s8 %d 或 %x
+ u8 %u 或 %x
+ s16 %d 或 %x
+ u16 %u 或 %x
+ s32 %d 或 %x
+ u32 %u 或 %x
+ s64 %lld 或 %llx
+ u64 %llu 或 %llx
+
+
+如果 <type> 的大小依赖于配置选项 (例如 sector_t, blkcnt_t) 或其大小依赖于架构
+(例如 tcflag_t),则使用其可能的最大类型的格式占位符并显式强制转换为它。
+
+例如
+
+::
+
+ printk("test: sector number/total blocks: %llu/%llu\n",
+ (unsigned long long)sector, (unsigned long long)blockcount);
+
+提醒:sizeof()返回类型为size_t。
+
+内核的printf不支持%n。显而易见,浮点格式(%e, %f, %g, %a)也不被识别。使用任何不
+支持的占位符或长度限定符都会导致一个WARN并且终止vsnprintf()执行。
+
+指针类型
+========
+
+一个原始指针值可以用%p打印,它将在打印前对地址进行哈希处理。内核也支持扩展占位符来打印
+不同类型的指针。
+
+一些扩展占位符会打印给定地址上的数据,而不是打印地址本身。在这种情况下,以下错误消息可能
+会被打印出来,而不是无法访问的消息::
+
+ (null) data on plain NULL address
+ (efault) data on invalid address
+ (einval) invalid data on a valid address
+
+普通指针
+----------
+
+::
+
+ %p abcdef12 or 00000000abcdef12
+
+没有指定扩展名的指针(即没有修饰符的%p)被哈希(hash),以防止内核内存布局消息的泄露。这
+样还有一个额外的好处,就是提供一个唯一的标识符。在64位机器上,前32位被清零。当没有足够的
+熵进行散列处理时,内核将打印(ptrval)代替
+
+如果可能的话,使用专门的修饰符,如%pS或%pB(如下所述),以避免打印一个必须事后解释的非哈
+希地址。如果不可能,而且打印地址的目的是为调试提供更多的消息,使用%p,并在调试过程中
+用 ``no_hash_pointers`` 参数启动内核,这将打印所有未修改的%p地址。如果你 *真的* 想知
+道未修改的地址,请看下面的%px。
+
+如果(也只有在)你将地址作为虚拟文件的内容打印出来,例如在procfs或sysfs中(使用
+seq_printf(),而不是printk())由用户空间进程读取,使用下面描述的%pK修饰符,不
+要用%p或%px。
+
+
+错误指针
+--------
+
+::
+
+ %pe -ENOSPC
+
+用于打印错误指针(即IS_ERR()为真的指针)的符号错误名。不知道符号名的错误值会以十进制打印,
+而作为%pe参数传递的非ERR_PTR会被视为普通的%p。
+
+符号/函数指针
+-------------
+
+::
+
+ %pS versatile_init+0x0/0x110
+ %ps versatile_init
+ %pSR versatile_init+0x9/0x110
+ (with __builtin_extract_return_addr() translation)
+ %pB prev_fn_of_versatile_init+0x88/0x88
+
+
+``S`` 和 ``s`` 占位符用于打印符号格式的指针。它们的结果是符号名称带有(S)或不带有(s)偏移
+量。如果禁用KALLSYMS,则打印符号地址。
+
+``B`` 占位符的结果是带有偏移量的符号名,在打印堆栈回溯时应该使用。占位符将考虑编译器优化
+的影响,当使用尾部调用并使用noreturn GCC属性标记时,可能会发生这种优化。
+
+如果指针在一个模块内,模块名称和可选的构建ID将被打印在符号名称之后,并在说明符的末尾添加
+一个额外的 ``b`` 。
+
+::
+
+ %pS versatile_init+0x0/0x110 [module_name]
+ %pSb versatile_init+0x0/0x110 [module_name ed5019fdf5e53be37cb1ba7899292d7e143b259e]
+ %pSRb versatile_init+0x9/0x110 [module_name ed5019fdf5e53be37cb1ba7899292d7e143b259e]
+ (with __builtin_extract_return_addr() translation)
+ %pBb prev_fn_of_versatile_init+0x88/0x88 [module_name ed5019fdf5e53be37cb1ba7899292d7e143b259e]
+
+来自BPF / tracing追踪的探查指针
+----------------------------------
+
+::
+
+ %pks kernel string
+ %pus user string
+
+``k`` 和 ``u`` 指定符用于打印来自内核内存(k)或用户内存(u)的先前探测的内存。后面的 ``s`` 指
+定符的结果是打印一个字符串。对于直接在常规的vsnprintf()中使用时,(k)和(u)注释被忽略,但是,当
+在BPF的bpf_trace_printk()之外使用时,它会读取它所指向的内存,不会出现错误。
+
+内核指针
+--------
+
+::
+
+ %pK 01234567 or 0123456789abcdef
+
+用于打印应该对非特权用户隐藏的内核指针。%pK的行为取决于kptr_restrict sysctl——详见
+Documentation/admin-guide/sysctl/kernel.rst。
+
+未经修改的地址
+--------------
+
+::
+
+ %px 01234567 or 0123456789abcdef
+
+对于打印指针,当你 *真的* 想打印地址时。在用%px打印指针之前,请考虑你是否泄露了内核内
+存布局的敏感消息。%px在功能上等同于%lx(或%lu)。%px是首选,因为它在grep查找时更唯一。
+如果将来我们需要修改内核处理打印指针的方式,我们将能更好地找到调用点。
+
+在使用%px之前,请考虑使用%p并在调试过程中启用' ' no_hash_pointer ' '内核参数是否足
+够(参见上面的%p描述)。%px的一个有效场景可能是在panic发生之前立即打印消息,这样无论如何
+都可以防止任何敏感消息被利用,使用%px就不需要用no_hash_pointer来重现panic。
+
+指针差异
+--------
+
+::
+
+ %td 2560
+ %tx a00
+
+为了打印指针的差异,使用ptrdiff_t的%t修饰符。
+
+例如::
+
+ printk("test: difference between pointers: %td\n", ptr2 - ptr1);
+
+结构体资源(Resources)
+-----------------------
+
+::
+
+ %pr [mem 0x60000000-0x6fffffff flags 0x2200] or
+ [mem 0x0000000060000000-0x000000006fffffff flags 0x2200]
+ %pR [mem 0x60000000-0x6fffffff pref] or
+ [mem 0x0000000060000000-0x000000006fffffff pref]
+
+用于打印结构体资源。 ``R`` 和 ``r`` 占位符的结果是打印出的资源带有(R)或不带有(r)解码标志
+成员。
+
+通过引用传递。
+
+物理地址类型 phys_addr_t
+------------------------
+
+::
+
+ %pa[p] 0x01234567 or 0x0123456789abcdef
+
+用于打印phys_addr_t类型(以及它的衍生物,如resource_size_t),该类型可以根据构建选项而
+变化,无论CPU数据真实物理地址宽度如何。
+
+通过引用传递。
+
+DMA地址类型dma_addr_t
+---------------------
+
+::
+
+ %pad 0x01234567 or 0x0123456789abcdef
+
+用于打印dma_addr_t类型,该类型可以根据构建选项而变化,而不考虑CPU数据路径的宽度。
+
+通过引用传递。
+
+原始缓冲区为转义字符串
+----------------------
+
+::
+
+ %*pE[achnops]
+
+用于将原始缓冲区打印成转义字符串。对于以下缓冲区::
+
+ 1b 62 20 5c 43 07 22 90 0d 5d
+
+几个例子展示了如何进行转换(不包括两端的引号)。::
+
+ %*pE "\eb \C\a"\220\r]"
+ %*pEhp "\x1bb \C\x07"\x90\x0d]"
+ %*pEa "\e\142\040\\\103\a\042\220\r\135"
+
+转换规则是根据可选的标志组合来应用的(详见:c:func:`string_escape_mem` 内核文档):
+
+ - a - ESCAPE_ANY
+ - c - ESCAPE_SPECIAL
+ - h - ESCAPE_HEX
+ - n - ESCAPE_NULL
+ - o - ESCAPE_OCTAL
+ - p - ESCAPE_NP
+ - s - ESCAPE_SPACE
+
+默认情况下,使用 ESCAPE_ANY_NP。
+
+ESCAPE_ANY_NP是许多情况下的明智选择,特别是对于打印SSID。
+
+如果字段宽度被省略,那么将只转义1个字节。
+
+原始缓冲区为十六进制字符串
+--------------------------
+
+::
+
+ %*ph 00 01 02 ... 3f
+ %*phC 00:01:02: ... :3f
+ %*phD 00-01-02- ... -3f
+ %*phN 000102 ... 3f
+
+对于打印小的缓冲区(最长64个字节),可以用一定的分隔符作为一个
+十六进制字符串。对于较大的缓冲区,可以考虑使用
+:c:func:`print_hex_dump` 。
+
+MAC/FDDI地址
+------------
+
+::
+
+ %pM 00:01:02:03:04:05
+ %pMR 05:04:03:02:01:00
+ %pMF 00-01-02-03-04-05
+ %pm 000102030405
+ %pmR 050403020100
+
+用于打印以十六进制表示的6字节MAC/FDDI地址。 ``M`` 和 ``m`` 占位符导致打印的
+地址有(M)或没有(m)字节分隔符。默认的字节分隔符是冒号(:)。
+
+对于FDDI地址,可以在 ``M`` 占位符之后使用 ``F`` 说明,以使用破折号(——)分隔符
+代替默认的分隔符。
+
+对于蓝牙地址, ``R`` 占位符应使用在 ``M`` 占位符之后,以使用反转的字节顺序,适
+合于以小尾端顺序的蓝牙地址的肉眼可见的解析。
+
+通过引用传递。
+
+IPv4地址
+--------
+
+::
+
+ %pI4 1.2.3.4
+ %pi4 001.002.003.004
+ %p[Ii]4[hnbl]
+
+用于打印IPv4点分隔的十进制地址。 ``I4`` 和 ``i4`` 占位符的结果是打印的地址
+有(i4)或没有(I4)前导零。
+
+附加的 ``h`` 、 ``n`` 、 ``b`` 和 ``l`` 占位符分别用于指定主机、网络、大
+尾端或小尾端地址。如果没有提供占位符,则使用默认的网络/大尾端顺序。
+
+通过引用传递。
+
+IPv6 地址
+---------
+
+::
+
+ %pI6 0001:0002:0003:0004:0005:0006:0007:0008
+ %pi6 00010002000300040005000600070008
+ %pI6c 1:2:3:4:5:6:7:8
+
+用于打印IPv6网络顺序的16位十六进制地址。 ``I6`` 和 ``i6`` 占位符的结果是
+打印的地址有(I6)或没有(i6)分号。始终使用前导零。
+
+额外的 ``c`` 占位符可与 ``I`` 占位符一起使用,以打印压缩的IPv6地址,如
+https://tools.ietf.org/html/rfc5952 所述
+
+通过引用传递。
+
+IPv4/IPv6地址(generic, with port, flowinfo, scope)
+--------------------------------------------------
+
+::
+
+ %pIS 1.2.3.4 or 0001:0002:0003:0004:0005:0006:0007:0008
+ %piS 001.002.003.004 or 00010002000300040005000600070008
+ %pISc 1.2.3.4 or 1:2:3:4:5:6:7:8
+ %pISpc 1.2.3.4:12345 or [1:2:3:4:5:6:7:8]:12345
+ %p[Ii]S[pfschnbl]
+
+用于打印一个IP地址,不需要区分它的类型是AF_INET还是AF_INET6。一个指向有效结构
+体sockaddr的指针,通过 ``IS`` 或 ``IS`` 指定,可以传递给这个格式占位符。
+
+附加的 ``p`` 、 ``f`` 和 ``s`` 占位符用于指定port(IPv4, IPv6)、
+flowinfo (IPv6)和sope(IPv6)。port有一个 ``:`` 前缀,flowinfo是 ``/`` 和
+范围是 ``%`` ,每个后面都跟着实际的值。
+
+对于IPv6地址,如果指定了额外的指定符 ``c`` ,则使用
+https://tools.ietf.org/html/rfc5952 描述的压缩IPv6地址。
+如https://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-07
+所建议的,IPv6地址由'[',']'包围,以防止出现额外的占位符 ``p`` , ``f`` 或 ``s`` 。
+
+对于IPv4地址,也可以使用额外的 ``h`` , ``n`` , ``b`` 和 ``l`` 说
+明符,但对于IPv6地址则忽略。
+
+通过引用传递。
+
+更多例子::
+
+ %pISfc 1.2.3.4 or [1:2:3:4:5:6:7:8]/123456789
+ %pISsc 1.2.3.4 or [1:2:3:4:5:6:7:8]%1234567890
+ %pISpfc 1.2.3.4:12345 or [1:2:3:4:5:6:7:8]:12345/123456789
+
+UUID/GUID地址
+-------------
+
+::
+
+ %pUb 00010203-0405-0607-0809-0a0b0c0d0e0f
+ %pUB 00010203-0405-0607-0809-0A0B0C0D0E0F
+ %pUl 03020100-0504-0706-0809-0a0b0c0e0e0f
+ %pUL 03020100-0504-0706-0809-0A0B0C0E0E0F
+
+用于打印16字节的UUID/GUIDs地址。附加的 ``l`` , ``L`` , ``b`` 和 ``B`` 占位符用
+于指定小写(l)或大写(L)十六进制表示法中的小尾端顺序,以及小写(b)或大写(B)十六进制表
+示法中的大尾端顺序。
+
+如果没有使用额外的占位符,则将打印带有小写十六进制表示法的默认大端顺序。
+
+通过引用传递。
+
+目录项(dentry)的名称
+----------------------
+
+::
+
+ %pd{,2,3,4}
+ %pD{,2,3,4}
+
+用于打印dentry名称;如果我们用 :c:func:`d_move` 和它比较,名称可能是新旧混合的,但
+不会oops。 %pd dentry比较安全,其相当于我们以前用的%s dentry->d_name.name,%pd<n>打
+印 ``n`` 最后的组件。 %pD对结构文件做同样的事情。
+
+
+通过引用传递。
+
+块设备(block_device)名称
+--------------------------
+
+::
+
+ %pg sda, sda1 or loop0p1
+
+用于打印block_device指针的名称。
+
+va_format结构体
+---------------
+
+::
+
+ %pV
+
+用于打印结构体va_format。这些结构包含一个格式字符串
+和va_list如下
+
+::
+
+ struct va_format {
+ const char *fmt;
+ va_list *va;
+ };
+
+实现 "递归vsnprintf"。
+
+如果没有一些机制来验证格式字符串和va_list参数的正确性,请不要使用这个功能。
+
+通过引用传递。
+
+设备树节点
+----------
+
+::
+
+ %pOF[fnpPcCF]
+
+
+用于打印设备树节点结构。默认行为相当于%pOFf。
+
+ - f - 设备节点全称
+ - n - 设备节点名
+ - p - 设备节点句柄
+ - P - 设备节点路径规范(名称+@单位)
+ - F - 设备节点标志
+ - c - 主要兼容字符串
+ - C - 全兼容字符串
+
+当使用多个参数时,分隔符是':'。
+
+例如
+
+::
+
+ %pOF /foo/bar@0 - Node full name
+ %pOFf /foo/bar@0 - Same as above
+ %pOFfp /foo/bar@0:10 - Node full name + phandle
+ %pOFfcF /foo/bar@0:foo,device:--P- - Node full name +
+ major compatible string +
+ node flags
+ D - dynamic
+ d - detached
+ P - Populated
+ B - Populated bus
+
+通过引用传递。
+
+Fwnode handles
+--------------
+
+::
+
+ %pfw[fP]
+
+用于打印fwnode_handles的消息。默认情况下是打印完整的节点名称,包括路径。
+这些修饰符在功能上等同于上面的%pOF。
+
+ - f - 节点的全名,包括路径。
+ - P - 节点名称,包括地址(如果有的话)。
+
+例如 (ACPI)
+
+::
+
+ %pfwf \_SB.PCI0.CIO2.port@1.endpoint@0 - Full node name
+ %pfwP endpoint@0 - Node name
+
+例如 (OF)
+
+::
+
+ %pfwf /ocp@68000000/i2c@48072000/camera@10/port/endpoint - Full name
+ %pfwP endpoint - Node name
+
+时间和日期
+----------
+
+::
+
+ %pt[RT] YYYY-mm-ddTHH:MM:SS
+ %pt[RT]s YYYY-mm-dd HH:MM:SS
+ %pt[RT]d YYYY-mm-dd
+ %pt[RT]t HH:MM:SS
+ %pt[RT][dt][r][s]
+
+用于打印日期和时间::
+
+ R struct rtc_time structure
+ T time64_t type
+
+以我们(人类)可读的格式。
+
+默认情况下,年将以1900为单位递增,月将以1为单位递增。 使用%pt[RT]r (raw)
+来抑制这种行为。
+
+%pt[RT]s(空格)将覆盖ISO 8601的分隔符,在日期和时间之间使用''(空格)而
+不是'T'(大写T)。当日期或时间被省略时,它不会有任何影响。
+
+通过引用传递。
+
+clk结构体
+---------
+
+::
+
+ %pC pll1
+ %pCn pll1
+
+用于打印clk结构。%pC 和 %pCn 打印时钟的名称(通用时钟框架)或唯一的32位
+ID(传统时钟框架)。
+
+通过引用传递。
+
+位图及其衍生物,如cpumask和nodemask
+-----------------------------------
+
+::
+
+ %*pb 0779
+ %*pbl 0,3-6,8-10
+
+对于打印位图(bitmap)及其派生的cpumask和nodemask,%*pb输出以字段宽度为位数的位图,
+%*pbl输出以字段宽度为位数的范围列表。
+
+字段宽度用值传递,位图用引用传递。可以使用辅助宏cpumask_pr_args()和
+nodemask_pr_args()来方便打印cpumask和nodemask。
+
+标志位字段,如页标志、gfp_flags
+-------------------------------
+
+::
+
+ %pGp referenced|uptodate|lru|active|private|node=0|zone=2|lastcpupid=0x1fffff
+ %pGg GFP_USER|GFP_DMA32|GFP_NOWARN
+ %pGv read|exec|mayread|maywrite|mayexec|denywrite
+
+将flags位字段打印为构造值的符号常量集合。标志的类型由第三个字符给出。目前支持的
+是[p]age flags, [v]ma_flags(都期望 ``unsigned long *`` )和
+[g]fp_flags(期望 ``gfp_t *`` )。标志名称和打印顺序取决于特定的类型。
+
+注意,这种格式不应该直接用于跟踪点的:c:func:`TP_printk()` 部分。相反,应使
+用 <trace/events/mmflags.h>中的show_*_flags()函数。
+
+通过引用传递。
+
+网络设备特性
+------------
+
+::
+
+ %pNF 0x000000000000c000
+
+用于打印netdev_features_t。
+
+通过引用传递。
+
+V4L2和DRM FourCC代码(像素格式)
+------------------------------
+
+::
+
+ %p4cc
+
+打印V4L2或DRM使用的FourCC代码,包括格式端序及其十六进制的数值。
+
+通过引用传递。
+
+例如::
+
+ %p4cc BG12 little-endian (0x32314742)
+ %p4cc Y10 little-endian (0x20303159)
+ %p4cc NV12 big-endian (0xb231564e)
+
+谢谢
+====
+
+如果您添加了其他%p扩展,请在可行的情况下,用一个或多个测试用例扩展<lib/test_printf.c>。
+
+谢谢你的合作和关注。
diff --git a/Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst b/Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst
new file mode 100644
index 000000000000..ea834e38d2f6
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst
@@ -0,0 +1,154 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/refcount-vs-atomic.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_refcount-vs-atomic:
+
+
+=======================================
+与atomic_t相比,refcount_t的API是这样的
+=======================================
+
+.. contents:: :local:
+
+简介
+====
+
+refcount_t API的目标是为实现对象的引用计数器提供一个最小的API。虽然来自
+lib/refcount.c的独立于架构的通用实现在下面使用了原子操作,但一些 ``refcount_*()``
+和 ``atomic_*()`` 函数在内存顺序保证方面有很多不同。本文档概述了这些差异,并
+提供了相应的例子,以帮助开发者根据这些内存顺序保证的变化来验证他们的代码。
+
+本文档中使用的术语尽量遵循tools/memory-model/Documentation/explanation.txt
+中定义的正式LKMM。
+
+memory-barriers.txt和atomic_t.txt提供了更多关于内存顺序的背景,包括通用的
+和针对原子操作的。
+
+内存顺序的相关类型
+==================
+
+.. note:: 下面的部分只涵盖了本文使用的与原子操作和引用计数器有关的一些内存顺
+ 序类型。如果想了解更广泛的情况,请查阅memory-barriers.txt文件。
+
+在没有任何内存顺序保证的情况下(即完全无序),atomics和refcounters只提供原
+子性和程序顺序(program order, po)关系(在同一个CPU上)。它保证每个
+``atomic_* ()`` 和 ``refcount_*()`` 操作都是原子性的,指令在单个CPU上按程序
+顺序执行。这是用READ_ONCE()/WRITE_ONCE()和比较并交换原语实现的。
+
+强(完全)内存顺序保证在同一CPU上的所有较早加载和存储的指令(所有程序顺序较早
+[po-earlier]指令)在执行任何程序顺序较后指令(po-later)之前完成。它还保证
+同一CPU上储存的程序优先较早的指令和来自其他CPU传播的指令必须在该CPU执行任何
+程序顺序较后指令之前传播到其他CPU(A-累积属性)。这是用smp_mb()实现的。
+
+RELEASE内存顺序保证了在同一CPU上所有较早加载和存储的指令(所有程序顺序较早
+指令)在此操作前完成。它还保证同一CPU上储存的程序优先较早的指令和来自其他CPU
+传播的指令必须在释放(release)操作之前传播到所有其他CPU(A-累积属性)。这是用
+smp_store_release()实现的。
+
+ACQUIRE内存顺序保证了同一CPU上的所有后加载和存储的指令(所有程序顺序较后
+指令)在获取(acquire)操作之后完成。它还保证在获取操作执行后,同一CPU上
+储存的所有程序顺序较后指令必须传播到所有其他CPU。这是用
+smp_acquire__after_ctrl_dep()实现的。
+
+对Refcounters的控制依赖(取决于成功)保证了如果一个对象的引用被成功获得(引用计数
+器的增量或增加行为发生了,函数返回true),那么进一步的存储是针对这个操作的命令。对存
+储的控制依赖没有使用任何明确的屏障来实现,而是依赖于CPU不对存储进行猜测。这只是
+一个单一的CPU关系,对其他CPU不提供任何保证。
+
+
+函数的比较
+==========
+
+情况1) - 非 “读/修改/写”(RMW)操作
+------------------------------------
+
+函数变化:
+
+ * atomic_set() --> refcount_set()
+ * atomic_read() --> refcount_read()
+
+内存顺序保证变化:
+
+ * none (两者都是完全无序的)
+
+
+情况2) - 基于增量的操作,不返回任何值
+--------------------------------------
+
+函数变化:
+
+ * atomic_inc() --> refcount_inc()
+ * atomic_add() --> refcount_add()
+
+内存顺序保证变化:
+
+ * none (两者都是完全无序的)
+
+情况3) - 基于递减的RMW操作,没有返回值
+---------------------------------------
+
+函数变化:
+
+ * atomic_dec() --> refcount_dec()
+
+内存顺序保证变化:
+
+ * 完全无序的 --> RELEASE顺序
+
+
+情况4) - 基于增量的RMW操作,返回一个值
+---------------------------------------
+
+函数变化:
+
+ * atomic_inc_not_zero() --> refcount_inc_not_zero()
+ * 无原子性对应函数 --> refcount_add_not_zero()
+
+内存顺序保证变化:
+
+ * 完全有序的 --> 控制依赖于存储的成功
+
+.. note:: 此处 **假设** 了,必要的顺序是作为获得对象指针的结果而提供的。
+
+
+情况 5) - 基于Dec/Sub递减的通用RMW操作,返回一个值
+---------------------------------------------------
+
+函数变化:
+
+ * atomic_dec_and_test() --> refcount_dec_and_test()
+ * atomic_sub_and_test() --> refcount_sub_and_test()
+
+内存顺序保证变化:
+
+ * 完全有序的 --> RELEASE顺序 + 成功后ACQUIRE顺序
+
+
+情况6)其他基于递减的RMW操作,返回一个值
+----------------------------------------
+
+函数变化:
+
+ * 无原子性对应函数 --> refcount_dec_if_one()
+ * ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)``
+
+内存顺序保证变化:
+
+ * 完全有序的 --> RELEASE顺序 + 控制依赖
+
+.. note:: atomic_add_unless()只在执行成功时提供完整的顺序。
+
+
+情况7)--基于锁的RMW
+--------------------
+
+函数变化:
+
+ * atomic_dec_and_lock() --> refcount_dec_and_lock()
+ * atomic_dec_and_mutex_lock() --> refcount_dec_and_mutex_lock()
+
+内存顺序保证变化:
+
+ * 完全有序 --> RELEASE顺序 + 控制依赖 + 持有
diff --git a/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst b/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
new file mode 100644
index 000000000000..ce05c29c7697
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
@@ -0,0 +1,142 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/symbol-namespaces.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_symbol-namespaces.rst:
+
+
+=================================
+符号命名空间(Symbol Namespaces)
+=================================
+
+本文档描述了如何使用符号命名空间来构造通过EXPORT_SYMBOL()系列宏导出的内核内符号的导出面。
+
+.. 目录
+
+ === 1 简介
+ === 2 如何定义符号命名空间
+ --- 2.1 使用EXPORT_SYMBOL宏
+ --- 2.2 使用DEFAULT_SYMBOL_NAMESPACE定义
+ === 3 如何使用命名空间中导出的符号
+ === 4 加载使用命名空间符号的模块
+ === 5 自动创建MODULE_IMPORT_NS声明
+
+1. 简介
+=======
+
+符号命名空间已经被引入,作为构造内核内API的导出面的一种手段。它允许子系统维护者将
+他们导出的符号划分进独立的命名空间。这对于文档的编写非常有用(想想SUBSYSTEM_DEBUG
+命名空间),也可以限制一组符号在内核其他部分的使用。今后,使用导出到命名空间的符号
+的模块必须导入命名空间。否则,内核将根据其配置,拒绝加载该模块或警告说缺少
+导入。
+
+2. 如何定义符号命名空间
+=======================
+
+符号可以用不同的方法导出到命名空间。所有这些都在改变 EXPORT_SYMBOL 和与之类似的那些宏
+被检测到的方式,以创建 ksymtab 条目。
+
+2.1 使用EXPORT_SYMBOL宏
+=======================
+
+除了允许将内核符号导出到内核符号表的宏EXPORT_SYMBOL()和EXPORT_SYMBOL_GPL()之外,
+这些宏的变体还可以将符号导出到某个命名空间:EXPORT_SYMBOL_NS() 和 EXPORT_SYMBOL_NS_GPL()。
+它们需要一个额外的参数:命名空间(the namespace)。请注意,由于宏扩展,该参数需
+要是一个预处理器符号。例如,要把符号 ``usb_stor_suspend`` 导出到命名空间 ``USB_STORAGE``,
+请使用::
+
+ EXPORT_SYMBOL_NS(usb_stor_suspend, USB_STORAGE);
+
+相应的 ksymtab 条目结构体 ``kernel_symbol`` 将有相应的成员 ``命名空间`` 集。
+导出时未指明命名空间的符号将指向 ``NULL`` 。如果没有定义命名空间,则默认没有。
+``modpost`` 和kernel/module.c分别在构建时或模块加载时使用名称空间。
+
+2.2 使用DEFAULT_SYMBOL_NAMESPACE定义
+====================================
+
+为一个子系统的所有符号定义命名空间可能会非常冗长,并可能变得难以维护。因此,我
+们提供了一个默认定义(DEFAULT_SYMBOL_NAMESPACE),如果设置了这个定义, 它将成
+为所有没有指定命名空间的 EXPORT_SYMBOL() 和 EXPORT_SYMBOL_GPL() 宏扩展的默认
+定义。
+
+有多种方法来指定这个定义,使用哪种方法取决于子系统和维护者的喜好。第一种方法是在
+子系统的 ``Makefile`` 中定义默认命名空间。例如,如果要将usb-common中定义的所有符号导
+出到USB_COMMON命名空间,可以在drivers/usb/common/Makefile中添加这样一行::
+
+ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
+
+这将影响所有 EXPORT_SYMBOL() 和 EXPORT_SYMBOL_GPL() 语句。当这个定义存在时,
+用EXPORT_SYMBOL_NS()导出的符号仍然会被导出到作为命名空间参数传递的命名空间中,
+因为这个参数优先于默认的符号命名空间。
+
+定义默认命名空间的第二个选项是直接在编译单元中作为预处理声明。上面的例子就会变
+成::
+
+ #undef DEFAULT_SYMBOL_NAMESPACE
+ #define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
+
+应置于相关编译单元中任何 EXPORT_SYMBOL 宏之前
+
+3. 如何使用命名空间中导出的符号
+===============================
+
+为了使用被导出到命名空间的符号,内核模块需要明确地导入这些命名空间。
+否则内核可能会拒绝加载该模块。模块代码需要使用宏MODULE_IMPORT_NS来
+表示它所使用的命名空间的符号。例如,一个使用usb_stor_suspend符号的
+模块,需要使用如下语句导入命名空间USB_STORAGE::
+
+ MODULE_IMPORT_NS(USB_STORAGE);
+
+这将在模块中为每个导入的命名空间创建一个 ``modinfo`` 标签。这也顺带
+使得可以用modinfo检查模块已导入的命名空间::
+
+ $ modinfo drivers/usb/storage/ums-karma.ko
+ [...]
+ import_ns: USB_STORAGE
+ [...]
+
+
+建议将 MODULE_IMPORT_NS() 语句添加到靠近其他模块元数据定义的地方,
+如 MODULE_AUTHOR() 或 MODULE_LICENSE() 。关于自动创建缺失的导入
+语句的方法,请参考第5节。
+
+4. 加载使用命名空间符号的模块
+=============================
+
+在模块加载时(比如 ``insmod`` ),内核将检查每个从模块中引用的符号是否可
+用,以及它可能被导出到的名字空间是否被模块导入。内核的默认行为是拒绝
+加载那些没有指明足以导入的模块。此错误会被记录下来,并且加载将以
+EINVAL方式失败。要允许加载不满足这个前提条件的模块,可以使用此配置选项:
+设置 MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y 将使加载不受影响,但会
+发出警告。
+
+5. 自动创建MODULE_IMPORT_NS声明
+===============================
+
+缺少命名空间的导入可以在构建时很容易被检测到。事实上,如果一个模块
+使用了一个命名空间的符号而没有导入它,modpost会发出警告。
+MODULE_IMPORT_NS()语句通常会被添加到一个明确的位置(和其他模块元
+数据一起)。为了使模块作者(和子系统维护者)的生活更加轻松,我们提
+供了一个脚本和make目标来修复丢失的导入。修复丢失的导入可以用::
+
+ $ make nsdeps
+
+对模块作者来说,以下情况可能很典型::
+
+ - 编写依赖未导入命名空间的符号的代码
+ - ``make``
+ - 注意 ``modpost`` 的警告,提醒你有一个丢失的导入。
+ - 运行 ``make nsdeps``将导入添加到正确的代码位置。
+
+对于引入命名空间的子系统维护者来说,其步骤非常相似。同样,make nsdeps最终将
+为树内模块添加缺失的命名空间导入::
+
+ - 向命名空间转移或添加符号(例如,使用EXPORT_SYMBOL_NS())。
+ - `make e`(最好是用allmodconfig来覆盖所有的内核模块)。
+ - 注意 ``modpost`` 的警告,提醒你有一个丢失的导入。
+ - 运行 ``maknsdeps``将导入添加到正确的代码位置。
+
+你也可以为外部模块的构建运行nsdeps。典型的用法是::
+
+ $ make -C <path_to_kernel_src> M=$PWD nsdeps
diff --git a/Documentation/translations/zh_CN/core-api/workqueue.rst b/Documentation/translations/zh_CN/core-api/workqueue.rst
new file mode 100644
index 000000000000..0b8f730db6c0
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/workqueue.rst
@@ -0,0 +1,337 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/workqueue.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_workqueue.rst:
+
+
+=========================
+并发管理的工作队列 (cmwq)
+=========================
+
+:日期: September, 2010
+:作者: Tejun Heo <tj@kernel.org>
+:作者: Florian Mickler <florian@mickler.org>
+
+
+简介
+====
+
+在很多情况下,需要一个异步进程的执行环境,工作队列(wq)API是这种情况下
+最常用的机制。
+
+当需要这样一个异步执行上下文时,一个描述将要执行的函数的工作项(work,
+即一个待执行的任务)被放在队列中。一个独立的线程作为异步执行环境。该队
+列被称为workqueue,线程被称为工作者(worker,即执行这一队列的线程)。
+
+当工作队列上有工作项时,工作者会一个接一个地执行与工作项相关的函数。当
+工作队列中没有任何工作项时,工作者就会变得空闲。当一个新的工作项被排入
+队列时,工作者又开始执行。
+
+
+为什么要cmwq?
+=============
+
+在最初的wq实现中,多线程(MT)wq在每个CPU上有一个工作者线程,而单线程
+(ST)wq在全系统有一个工作者线程。一个MT wq需要保持与CPU数量相同的工
+作者数量。这些年来,内核增加了很多MT wq的用户,随着CPU核心数量的不断
+增加,一些系统刚启动就达到了默认的32k PID的饱和空间。
+
+尽管MT wq浪费了大量的资源,但所提供的并发性水平却不能令人满意。这个限
+制在ST和MT wq中都有,只是在MT中没有那么严重。每个wq都保持着自己独立的
+工作者池。一个MT wq只能为每个CPU提供一个执行环境,而一个ST wq则为整个
+系统提供一个。工作项必须竞争这些非常有限的执行上下文,从而导致各种问题,
+包括在单一执行上下文周围容易发生死锁。
+
+(MT wq)所提供的并发性水平和资源使用之间的矛盾也迫使其用户做出不必要的权衡,比
+如libata选择使用ST wq来轮询PIO,并接受一个不必要的限制,即没有两个轮
+询PIO可以同时进行。由于MT wq并没有提供更好的并发性,需要更高层次的并
+发性的用户,如async或fscache,不得不实现他们自己的线程池。
+
+并发管理工作队列(cmwq)是对wq的重新实现,重点是以下目标。
+
+* 保持与原始工作队列API的兼容性。
+
+* 使用由所有wq共享的每CPU统一的工作者池,在不浪费大量资源的情况下按
+* 需提供灵活的并发水平。
+
+* 自动调节工作者池和并发水平,使API用户不需要担心这些细节。
+
+
+设计
+====
+
+为了简化函数的异步执行,引入了一个新的抽象概念,即工作项。
+
+一个工作项是一个简单的结构,它持有一个指向将被异步执行的函数的指针。
+每当一个驱动程序或子系统希望一个函数被异步执行时,它必须建立一个指
+向该函数的工作项,并在工作队列中排队等待该工作项。(就是挂到workqueue
+队列里面去)
+
+特定目的线程,称为工作线程(工作者),一个接一个地执行队列中的功能。
+如果没有工作项排队,工作者线程就会闲置。这些工作者线程被管理在所谓
+的工作者池中。
+
+cmwq设计区分了面向用户的工作队列,子系统和驱动程序在上面排队工作,
+以及管理工作者池和处理排队工作项的后端机制。
+
+每个可能的CPU都有两个工作者池,一个用于正常的工作项,另一个用于高
+优先级的工作项,还有一些额外的工作者池,用于服务未绑定工作队列的工
+作项目——这些后备池的数量是动态的。
+
+当他们认为合适的时候,子系统和驱动程序可以通过特殊的
+``workqueue API`` 函数创建和排队工作项。他们可以通过在工作队列上
+设置标志来影响工作项执行方式的某些方面,他们把工作项放在那里。这些
+标志包括诸如CPU定位、并发限制、优先级等等。要获得详细的概述,请参
+考下面的 ``alloc_workqueue()`` 的 API 描述。
+
+当一个工作项被排入一个工作队列时,目标工作池将根据队列参数和工作队
+列属性确定,并被附加到工作池的共享工作列表上。例如,除非特别重写,
+否则一个绑定的工作队列的工作项将被排在与发起线程运行的CPU相关的普
+通或高级工作工作者池的工作项列表中。
+
+对于任何工作者池的实施,管理并发水平(有多少执行上下文处于活动状
+态)是一个重要问题。最低水平是为了节省资源,而饱和水平是指系统被
+充分使用。
+
+每个与实际CPU绑定的worker-pool通过钩住调度器来实现并发管理。每当
+一个活动的工作者被唤醒或睡眠时,工作者池就会得到通知,并跟踪当前可
+运行的工作者的数量。一般来说,工作项不会占用CPU并消耗很多周期。这
+意味着保持足够的并发性以防止工作处理停滞应该是最优的。只要CPU上有
+一个或多个可运行的工作者,工作者池就不会开始执行新的工作,但是,当
+最后一个运行的工作者进入睡眠状态时,它会立即安排一个新的工作者,这
+样CPU就不会在有待处理的工作项目时闲置。这允许在不损失执行带宽的情
+况下使用最少的工作者。
+
+除了kthreads的内存空间外,保留空闲的工作者并没有其他成本,所以cmwq
+在杀死它们之前会保留一段时间的空闲。
+
+对于非绑定的工作队列,后备池的数量是动态的。可以使用
+``apply_workqueue_attrs()`` 为非绑定工作队列分配自定义属性,
+workqueue将自动创建与属性相匹配的后备工作者池。调节并发水平的责任在
+用户身上。也有一个标志可以将绑定的wq标记为忽略并发管理。
+详情请参考API部分。
+
+前进进度的保证依赖于当需要更多的执行上下文时可以创建工作者,这也是
+通过使用救援工作者来保证的。所有可能在处理内存回收的代码路径上使用
+的工作项都需要在wq上排队,wq上保留了一个救援工作者,以便在内存有压
+力的情况下下执行。否则,工作者池就有可能出现死锁,等待执行上下文释
+放出来。
+
+
+应用程序编程接口 (API)
+======================
+
+``alloc_workqueue()`` 分配了一个wq。原来的 ``create_*workqueue()``
+函数已被废弃,并计划删除。 ``alloc_workqueue()`` 需要三个
+参数 - ``@name`` , ``@flags`` 和 ``@max_active`` 。
+``@name`` 是wq的名称,如果有的话,也用作救援线程的名称。
+
+一个wq不再管理执行资源,而是作为前进进度保证、刷新(flush)和
+工作项属性的域。 ``@flags`` 和 ``@max_active`` 控制着工作
+项如何被分配执行资源、安排和执行。
+
+
+``flags``
+---------
+
+``WQ_UNBOUND``
+ 排队到非绑定wq的工作项由特殊的工作者池提供服务,这些工作者不
+ 绑定在任何特定的CPU上。这使得wq表现得像一个简单的执行环境提
+ 供者,没有并发管理。非绑定工作者池试图尽快开始执行工作项。非
+ 绑定的wq牺牲了局部性,但在以下情况下是有用的。
+
+ * 预计并发水平要求会有很大的波动,使用绑定的wq最终可能会在不
+ 同的CPU上产生大量大部分未使用的工作者,因为发起线程在不同
+ 的CPU上跳转。
+
+ * 长期运行的CPU密集型工作负载,可以由系统调度器更好地管理。
+
+``WQ_FREEZABLE``
+ 一个可冻结的wq参与了系统暂停操作的冻结阶段。wq上的工作项被
+ 排空,在解冻之前没有新的工作项开始执行。
+
+``WQ_MEM_RECLAIM``
+ 所有可能在内存回收路径中使用的wq都必须设置这个标志。无论内
+ 存压力如何,wq都能保证至少有一个执行上下文。
+
+``WQ_HIGHPRI``
+ 高优先级wq的工作项目被排到目标cpu的高优先级工作者池中。高
+ 优先级的工作者池由具有较高级别的工作者线程提供服务。
+
+ 请注意,普通工作者池和高优先级工作者池之间并不相互影响。他
+ 们各自维护其独立的工作者池,并在其工作者之间实现并发管理。
+
+``WQ_CPU_INTENSIVE``
+ CPU密集型wq的工作项对并发水平没有贡献。换句话说,可运行的
+ CPU密集型工作项不会阻止同一工作者池中的其他工作项开始执行。
+ 这对于那些预计会占用CPU周期的绑定工作项很有用,这样它们的
+ 执行就会受到系统调度器的监管。
+
+ 尽管CPU密集型工作项不会对并发水平做出贡献,但它们的执行开
+ 始仍然受到并发管理的管制,可运行的非CPU密集型工作项会延迟
+ CPU密集型工作项的执行。
+
+ 这个标志对于未绑定的wq来说是没有意义的。
+
+请注意,标志 ``WQ_NON_REENTRANT`` 不再存在,因为现在所有的工作
+队列都是不可逆的——任何工作项都保证在任何时间内最多被整个系统的一
+个工作者执行。
+
+
+``max_active``
+--------------
+
+``@max_active`` 决定了每个CPU可以分配给wq的工作项的最大执行上
+下文数量。例如,如果 ``@max_active为16`` ,每个CPU最多可以同
+时执行16个wq的工作项。
+
+目前,对于一个绑定的wq, ``@max_active`` 的最大限制是512,当指
+定为0时使用的默认值是256。对于非绑定的wq,其限制是512和
+4 * ``num_possible_cpus()`` 中的较高值。这些值被选得足够高,所
+以它们不是限制性因素,同时会在失控情况下提供保护。
+
+一个wq的活动工作项的数量通常由wq的用户来调节,更具体地说,是由用
+户在同一时间可以排列多少个工作项来调节。除非有特定的需求来控制活动
+工作项的数量,否则建议指定 为"0"。
+
+一些用户依赖于ST wq的严格执行顺序。 ``@max_active`` 为1和 ``WQ_UNBOUND``
+的组合用来实现这种行为。这种wq上的工作项目总是被排到未绑定的工作池
+中,并且在任何时候都只有一个工作项目处于活动状态,从而实现与ST wq相
+同的排序属性。
+
+在目前的实现中,上述配置只保证了特定NUMA节点内的ST行为。相反,
+``alloc_ordered_queue()`` 应该被用来实现全系统的ST行为。
+
+
+执行场景示例
+============
+
+下面的示例执行场景试图说明cmwq在不同配置下的行为。
+
+ 工作项w0、w1、w2被排到同一个CPU上的一个绑定的wq q0上。w0
+ 消耗CPU 5ms,然后睡眠10ms,然后在完成之前再次消耗CPU 5ms。
+
+忽略所有其他的任务、工作和处理开销,并假设简单的FIFO调度,
+下面是一个高度简化的原始wq的可能事件序列的版本。::
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 starts and burns CPU
+ 25 w1 sleeps
+ 35 w1 wakes up and finishes
+ 35 w2 starts and burns CPU
+ 40 w2 sleeps
+ 50 w2 wakes up and finishes
+
+And with cmwq with ``@max_active`` >= 3, ::
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 starts and burns CPU
+ 10 w1 sleeps
+ 10 w2 starts and burns CPU
+ 15 w2 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 25 w2 wakes up and finishes
+
+如果 ``@max_active`` == 2, ::
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 starts and burns CPU
+ 10 w1 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 20 w2 starts and burns CPU
+ 25 w2 sleeps
+ 35 w2 wakes up and finishes
+
+现在,我们假设w1和w2被排到了不同的wq q1上,这个wq q1
+有 ``WQ_CPU_INTENSIVE`` 设置::
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 and w2 start and burn CPU
+ 10 w1 sleeps
+ 15 w2 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 25 w2 wakes up and finishes
+
+
+指南
+====
+
+* 如果一个wq可能处理在内存回收期间使用的工作项目,请不
+ 要忘记使用 ``WQ_MEM_RECLAIM`` 。每个设置了
+ ``WQ_MEM_RECLAIM`` 的wq都有一个为其保留的执行环境。
+ 如果在内存回收过程中使用的多个工作项之间存在依赖关系,
+ 它们应该被排在不同的wq中,每个wq都有 ``WQ_MEM_RECLAIM`` 。
+
+* 除非需要严格排序,否则没有必要使用ST wq。
+
+* 除非有特殊需要,建议使用0作为@max_active。在大多数使用情
+ 况下,并发水平通常保持在默认限制之下。
+
+* 一个wq作为前进进度保证(WQ_MEM_RECLAIM,冲洗(flush)和工
+ 作项属性的域。不涉及内存回收的工作项,不需要作为工作项组的一
+ 部分被刷新,也不需要任何特殊属性,可以使用系统中的一个wq。使
+ 用专用wq和系统wq在执行特性上没有区别。
+
+* 除非工作项预计会消耗大量的CPU周期,否则使用绑定的wq通常是有
+ 益的,因为wq操作和工作项执行中的定位水平提高了。
+
+
+调试
+====
+
+因为工作函数是由通用的工作者线程执行的,所以需要一些手段来揭示一些行为不端的工作队列用户。
+
+工作者线程在进程列表中显示为: ::
+
+ root 5671 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/0:1]
+ root 5672 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/1:2]
+ root 5673 0.0 0.0 0 0 ? S 12:12 0:00 [kworker/0:0]
+ root 5674 0.0 0.0 0 0 ? S 12:13 0:00 [kworker/1:0]
+
+如果kworkers失控了(使用了太多的cpu),有两类可能的问题:
+
+ 1. 正在迅速调度的事情
+ 2. 一个消耗大量cpu周期的工作项。
+
+第一个可以用追踪的方式进行跟踪: ::
+
+ $ echo workqueue:workqueue_queue_work > /sys/kernel/debug/tracing/set_event
+ $ cat /sys/kernel/debug/tracing/trace_pipe > out.txt
+ (wait a few secs)
+
+如果有什么东西在工作队列上忙着做循环,它就会主导输出,可以用工作项函数确定违规者。
+
+对于第二类问题,应该可以只检查违规工作者线程的堆栈跟踪。 ::
+
+ $ cat /proc/THE_OFFENDING_KWORKER/stack
+
+工作项函数在堆栈追踪中应该是微不足道的。
+
+
+内核内联文档参考
+================
+
+该API在以下内核代码中:
+
+include/linux/workqueue.h
+
+kernel/workqueue.c
diff --git a/Documentation/translations/zh_CN/dev-tools/index.rst b/Documentation/translations/zh_CN/dev-tools/index.rst
index fd73c479917b..e6c99f2f543f 100644
--- a/Documentation/translations/zh_CN/dev-tools/index.rst
+++ b/Documentation/translations/zh_CN/dev-tools/index.rst
@@ -19,13 +19,13 @@
:maxdepth: 2
gcov
+ kasan
Todolist:
- coccinelle
- sparse
- kcov
- - kasan
- ubsan
- kmemleak
- kcsan
diff --git a/Documentation/translations/zh_CN/dev-tools/kasan.rst b/Documentation/translations/zh_CN/dev-tools/kasan.rst
new file mode 100644
index 000000000000..23db9d419047
--- /dev/null
+++ b/Documentation/translations/zh_CN/dev-tools/kasan.rst
@@ -0,0 +1,417 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/dev-tools/kasan.rst
+:Translator: 万家兵 Wan Jiabing <wanjiabing@vivo.com>
+
+内核地址消毒剂(KASAN)
+=====================
+
+概述
+----
+
+KernelAddressSANitizer(KASAN)是一种动态内存安全错误检测工具,主要功能是
+检查内存越界访问和使用已释放内存的问题。KASAN有三种模式:
+
+1. 通用KASAN(与用户空间的ASan类似)
+2. 基于软件标签的KASAN(与用户空间的HWASan类似)
+3. 基于硬件标签的KASAN(基于硬件内存标签)
+
+由于通用KASAN的内存开销较大,通用KASAN主要用于调试。基于软件标签的KASAN
+可用于dogfood测试,因为它具有较低的内存开销,并允许将其用于实际工作量。
+基于硬件标签的KASAN具有较低的内存和性能开销,因此可用于生产。同时可用于
+检测现场内存问题或作为安全缓解措施。
+
+软件KASAN模式(#1和#2)使用编译时工具在每次内存访问之前插入有效性检查,
+因此需要一个支持它的编译器版本。
+
+通用KASAN在GCC和Clang受支持。GCC需要8.3.0或更高版本。任何受支持的Clang
+版本都是兼容的,但从Clang 11才开始支持检测全局变量的越界访问。
+
+基于软件标签的KASAN模式仅在Clang中受支持。
+
+硬件KASAN模式(#3)依赖硬件来执行检查,但仍需要支持内存标签指令的编译器
+版本。GCC 10+和Clang 11+支持此模式。
+
+两种软件KASAN模式都适用于SLUB和SLAB内存分配器,而基于硬件标签的KASAN目前
+仅支持SLUB。
+
+目前x86_64、arm、arm64、xtensa、s390、riscv架构支持通用KASAN模式,仅
+arm64架构支持基于标签的KASAN模式。
+
+用法
+----
+
+要启用KASAN,请使用以下命令配置内核::
+
+ CONFIG_KASAN=y
+
+同时在 ``CONFIG_KASAN_GENERIC`` (启用通用KASAN模式), ``CONFIG_KASAN_SW_TAGS``
+(启用基于硬件标签的KASAN模式),和 ``CONFIG_KASAN_HW_TAGS`` (启用基于硬件标签
+的KASAN模式)之间进行选择。
+
+对于软件模式,还可以在 ``CONFIG_KASAN_OUTLINE`` 和 ``CONFIG_KASAN_INLINE``
+之间进行选择。outline和inline是编译器插桩类型。前者产生较小的二进制文件,
+而后者快1.1-2倍。
+
+要将受影响的slab对象的alloc和free堆栈跟踪包含到报告中,请启用
+``CONFIG_STACKTRACE`` 。要包括受影响物理页面的分配和释放堆栈跟踪的话,
+请启用 ``CONFIG_PAGE_OWNER`` 并使用 ``page_owner=on`` 进行引导。
+
+错误报告
+~~~~~~~~
+
+典型的KASAN报告如下所示::
+
+ ==================================================================
+ BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [test_kasan]
+ Write of size 1 at addr ffff8801f44ec37b by task insmod/2760
+
+ CPU: 1 PID: 2760 Comm: insmod Not tainted 4.19.0-rc3+ #698
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014
+ Call Trace:
+ dump_stack+0x94/0xd8
+ print_address_description+0x73/0x280
+ kasan_report+0x144/0x187
+ __asan_report_store1_noabort+0x17/0x20
+ kmalloc_oob_right+0xa8/0xbc [test_kasan]
+ kmalloc_tests_init+0x16/0x700 [test_kasan]
+ do_one_initcall+0xa5/0x3ae
+ do_init_module+0x1b6/0x547
+ load_module+0x75df/0x8070
+ __do_sys_init_module+0x1c6/0x200
+ __x64_sys_init_module+0x6e/0xb0
+ do_syscall_64+0x9f/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7f96443109da
+ RSP: 002b:00007ffcf0b51b08 EFLAGS: 00000202 ORIG_RAX: 00000000000000af
+ RAX: ffffffffffffffda RBX: 000055dc3ee521a0 RCX: 00007f96443109da
+ RDX: 00007f96445cff88 RSI: 0000000000057a50 RDI: 00007f9644992000
+ RBP: 000055dc3ee510b0 R08: 0000000000000003 R09: 0000000000000000
+ R10: 00007f964430cd0a R11: 0000000000000202 R12: 00007f96445cff88
+ R13: 000055dc3ee51090 R14: 0000000000000000 R15: 0000000000000000
+
+ Allocated by task 2760:
+ save_stack+0x43/0xd0
+ kasan_kmalloc+0xa7/0xd0
+ kmem_cache_alloc_trace+0xe1/0x1b0
+ kmalloc_oob_right+0x56/0xbc [test_kasan]
+ kmalloc_tests_init+0x16/0x700 [test_kasan]
+ do_one_initcall+0xa5/0x3ae
+ do_init_module+0x1b6/0x547
+ load_module+0x75df/0x8070
+ __do_sys_init_module+0x1c6/0x200
+ __x64_sys_init_module+0x6e/0xb0
+ do_syscall_64+0x9f/0x2c0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ Freed by task 815:
+ save_stack+0x43/0xd0
+ __kasan_slab_free+0x135/0x190
+ kasan_slab_free+0xe/0x10
+ kfree+0x93/0x1a0
+ umh_complete+0x6a/0xa0
+ call_usermodehelper_exec_async+0x4c3/0x640
+ ret_from_fork+0x35/0x40
+
+ The buggy address belongs to the object at ffff8801f44ec300
+ which belongs to the cache kmalloc-128 of size 128
+ The buggy address is located 123 bytes inside of
+ 128-byte region [ffff8801f44ec300, ffff8801f44ec380)
+ The buggy address belongs to the page:
+ page:ffffea0007d13b00 count:1 mapcount:0 mapping:ffff8801f7001640 index:0x0
+ flags: 0x200000000000100(slab)
+ raw: 0200000000000100 ffffea0007d11dc0 0000001a0000001a ffff8801f7001640
+ raw: 0000000000000000 0000000080150015 00000001ffffffff 0000000000000000
+ page dumped because: kasan: bad access detected
+
+ Memory state around the buggy address:
+ ffff8801f44ec200: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
+ ffff8801f44ec280: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+ >ffff8801f44ec300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03
+ ^
+ ffff8801f44ec380: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
+ ffff8801f44ec400: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+ ==================================================================
+
+报告标题总结了发生的错误类型以及导致该错误的访问类型。紧随其后的是错误访问的
+堆栈跟踪、所访问内存分配位置的堆栈跟踪(对于访问了slab对象的情况)以及对象
+被释放的位置的堆栈跟踪(对于访问已释放内存的问题报告)。接下来是对访问的
+slab对象的描述以及关于访问的内存页的信息。
+
+最后,报告展示了访问地址周围的内存状态。在内部,KASAN单独跟踪每个内存颗粒的
+内存状态,根据KASAN模式分为8或16个对齐字节。报告的内存状态部分中的每个数字
+都显示了围绕访问地址的其中一个内存颗粒的状态。
+
+对于通用KASAN,每个内存颗粒的大小为8个字节。每个颗粒的状态被编码在一个影子字节
+中。这8个字节可以是可访问的,部分访问的,已释放的或成为Redzone的一部分。KASAN
+对每个影子字节使用以下编码:00表示对应内存区域的所有8个字节都可以访问;数字N
+(1 <= N <= 7)表示前N个字节可访问,其他(8 - N)个字节不可访问;任何负值都表示
+无法访问整个8字节。KASAN使用不同的负值来区分不同类型的不可访问内存,如redzones
+或已释放的内存(参见 mm/kasan/kasan.h)。
+
+在上面的报告中,箭头指向影子字节 ``03`` ,表示访问的地址是部分可访问的。
+
+对于基于标签的KASAN模式,报告最后的部分显示了访问地址周围的内存标签
+(参考 `实施细则`_ 章节)。
+
+请注意,KASAN错误标题(如 ``slab-out-of-bounds`` 或 ``use-after-free`` )
+是尽量接近的:KASAN根据其拥有的有限信息打印出最可能的错误类型。错误的实际类型
+可能会有所不同。
+
+通用KASAN还报告两个辅助调用堆栈跟踪。这些堆栈跟踪指向代码中与对象交互但不直接
+出现在错误访问堆栈跟踪中的位置。目前,这包括 call_rcu() 和排队的工作队列。
+
+启动参数
+~~~~~~~~
+
+KASAN受通用 ``panic_on_warn`` 命令行参数的影响。启用该功能后,KASAN在打印错误
+报告后会引起内核恐慌。
+
+默认情况下,KASAN只为第一次无效内存访问打印错误报告。使用 ``kasan_multi_shot`` ,
+KASAN会针对每个无效访问打印报告。这有效地禁用了KASAN报告的 ``panic_on_warn`` 。
+
+基于硬件标签的KASAN模式(请参阅下面有关各种模式的部分)旨在在生产中用作安全缓解
+措施。因此,它支持允许禁用KASAN或控制其功能的引导参数。
+
+- ``kasan=off`` 或 ``=on`` 控制KASAN是否启用 (默认: ``on`` )。
+
+- ``kasan.mode=sync`` 或 ``=async`` 控制KASAN是否配置为同步或异步执行模式(默认:
+ ``sync`` )。同步模式:当标签检查错误发生时,立即检测到错误访问。异步模式:
+ 延迟错误访问检测。当标签检查错误发生时,信息存储在硬件中(在arm64的
+ TFSR_EL1寄存器中)。内核会定期检查硬件,并且仅在这些检查期间报告标签错误。
+
+- ``kasan.stacktrace=off`` 或 ``=on`` 禁用或启用alloc和free堆栈跟踪收集
+ (默认: ``on`` )。
+
+- ``kasan.fault=report`` 或 ``=panic`` 控制是只打印KASAN报告还是同时使内核恐慌
+ (默认: ``report`` )。即使启用了 ``kasan_multi_shot`` ,也会发生内核恐慌。
+
+实施细则
+--------
+
+通用KASAN
+~~~~~~~~~
+
+软件KASAN模式使用影子内存来记录每个内存字节是否可以安全访问,并使用编译时工具
+在每次内存访问之前插入影子内存检查。
+
+通用KASAN将1/8的内核内存专用于其影子内存(16TB以覆盖x86_64上的128TB),并使用
+具有比例和偏移量的直接映射将内存地址转换为其相应的影子地址。
+
+这是将地址转换为其相应影子地址的函数::
+
+ static inline void *kasan_mem_to_shadow(const void *addr)
+ {
+ return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ + KASAN_SHADOW_OFFSET;
+ }
+
+在这里 ``KASAN_SHADOW_SCALE_SHIFT = 3`` 。
+
+编译时工具用于插入内存访问检查。编译器在每次访问大小为1、2、4、8或16的内存之前
+插入函数调用( ``__asan_load*(addr)`` , ``__asan_store*(addr)``)。这些函数通过
+检查相应的影子内存来检查内存访问是否有效。
+
+使用inline插桩,编译器不进行函数调用,而是直接插入代码来检查影子内存。此选项
+显著地增大了内核体积,但与outline插桩内核相比,它提供了x1.1-x2的性能提升。
+
+通用KASAN是唯一一种通过隔离延迟重新使用已释放对象的模式
+(参见 mm/kasan/quarantine.c 以了解实现)。
+
+基于软件标签的KASAN模式
+~~~~~~~~~~~~~~~~~~~~~~~
+
+基于软件标签的KASAN使用软件内存标签方法来检查访问有效性。目前仅针对arm64架构实现。
+
+基于软件标签的KASAN使用arm64 CPU的顶部字节忽略(TBI)特性在内核指针的顶部字节中
+存储一个指针标签。它使用影子内存来存储与每个16字节内存单元相关的内存标签(因此,
+它将内核内存的1/16专用于影子内存)。
+
+在每次内存分配时,基于软件标签的KASAN都会生成一个随机标签,用这个标签标记分配
+的内存,并将相同的标签嵌入到返回的指针中。
+
+基于软件标签的KASAN使用编译时工具在每次内存访问之前插入检查。这些检查确保正在
+访问的内存的标签等于用于访问该内存的指针的标签。如果标签不匹配,基于软件标签
+的KASAN会打印错误报告。
+
+基于软件标签的KASAN也有两种插桩模式(outline,发出回调来检查内存访问;inline,
+执行内联的影子内存检查)。使用outline插桩模式,会从执行访问检查的函数打印错误
+报告。使用inline插桩,编译器会发出 ``brk`` 指令,并使用专用的 ``brk`` 处理程序
+来打印错误报告。
+
+基于软件标签的KASAN使用0xFF作为匹配所有指针标签(不检查通过带有0xFF指针标签
+的指针进行的访问)。值0xFE当前保留用于标记已释放的内存区域。
+
+基于软件标签的KASAN目前仅支持对Slab和page_alloc内存进行标记。
+
+基于硬件标签的KASAN模式
+~~~~~~~~~~~~~~~~~~~~~~~
+
+基于硬件标签的KASAN在概念上类似于软件模式,但它是使用硬件内存标签作为支持而
+不是编译器插桩和影子内存。
+
+基于硬件标签的KASAN目前仅针对arm64架构实现,并且基于ARMv8.5指令集架构中引入
+的arm64内存标记扩展(MTE)和最高字节忽略(TBI)。
+
+特殊的arm64指令用于为每次内存分配指定内存标签。相同的标签被指定给指向这些分配
+的指针。在每次内存访问时,硬件确保正在访问的内存的标签等于用于访问该内存的指针
+的标签。如果标签不匹配,则会生成故障并打印报告。
+
+基于硬件标签的KASAN使用0xFF作为匹配所有指针标签(不检查通过带有0xFF指针标签的
+指针进行的访问)。值0xFE当前保留用于标记已释放的内存区域。
+
+基于硬件标签的KASAN目前仅支持对Slab和page_alloc内存进行标记。
+
+如果硬件不支持MTE(ARMv8.5之前),则不会启用基于硬件标签的KASAN。在这种情况下,
+所有KASAN引导参数都将被忽略。
+
+请注意,启用CONFIG_KASAN_HW_TAGS始终会导致启用内核中的TBI。即使提供了
+``kasan.mode=off`` 或硬件不支持MTE(但支持TBI)。
+
+基于硬件标签的KASAN只报告第一个发现的错误。之后,MTE标签检查将被禁用。
+
+影子内存
+--------
+
+内核将内存映射到地址空间的几个不同部分。内核虚拟地址的范围很大:没有足够的真实
+内存来支持内核可以访问的每个地址的真实影子区域。因此,KASAN只为地址空间的某些
+部分映射真实的影子。
+
+默认行为
+~~~~~~~~
+
+默认情况下,体系结构仅将实际内存映射到用于线性映射的阴影区域(以及可能的其他
+小区域)。对于所有其他区域 —— 例如vmalloc和vmemmap空间 —— 一个只读页面被映射
+到阴影区域上。这个只读的影子页面声明所有内存访问都是允许的。
+
+这给模块带来了一个问题:它们不存在于线性映射中,而是存在于专用的模块空间中。
+通过连接模块分配器,KASAN临时映射真实的影子内存以覆盖它们。例如,这允许检测
+对模块全局变量的无效访问。
+
+这也造成了与 ``VMAP_STACK`` 的不兼容:如果堆栈位于vmalloc空间中,它将被分配
+只读页面的影子内存,并且内核在尝试为堆栈变量设置影子数据时会出错。
+
+CONFIG_KASAN_VMALLOC
+~~~~~~~~~~~~~~~~~~~~
+
+使用 ``CONFIG_KASAN_VMALLOC`` ,KASAN可以以更大的内存使用为代价覆盖vmalloc
+空间。目前,这在x86、riscv、s390和powerpc上受支持。
+
+这通过连接到vmalloc和vmap并动态分配真实的影子内存来支持映射。
+
+vmalloc空间中的大多数映射都很小,需要不到一整页的阴影空间。因此,为每个映射
+分配一个完整的影子页面将是一种浪费。此外,为了确保不同的映射使用不同的影子
+页面,映射必须与 ``KASAN_GRANULE_SIZE * PAGE_SIZE`` 对齐。
+
+相反,KASAN跨多个映射共享后备空间。当vmalloc空间中的映射使用影子区域的特定
+页面时,它会分配一个后备页面。此页面稍后可以由其他vmalloc映射共享。
+
+KASAN连接到vmap基础架构以懒清理未使用的影子内存。
+
+为了避免交换映射的困难,KASAN预测覆盖vmalloc空间的阴影区域部分将不会被早期
+的阴影页面覆盖,但是将不会被映射。这将需要更改特定于arch的代码。
+
+这允许在x86上支持 ``VMAP_STACK`` ,并且可以简化对没有固定模块区域的架构的支持。
+
+对于开发者
+----------
+
+忽略访问
+~~~~~~~~
+
+软件KASAN模式使用编译器插桩来插入有效性检查。此类检测可能与内核的某些部分
+不兼容,因此需要禁用。
+
+内核的其他部分可能会访问已分配对象的元数据。通常,KASAN会检测并报告此类访问,
+但在某些情况下(例如,在内存分配器中),这些访问是有效的。
+
+对于软件KASAN模式,要禁用特定文件或目录的检测,请将 ``KASAN_SANITIZE`` 添加
+到相应的内核Makefile中:
+
+- 对于单个文件(例如,main.o)::
+
+ KASAN_SANITIZE_main.o := n
+
+- 对于一个目录下的所有文件::
+
+ KASAN_SANITIZE := n
+
+对于软件KASAN模式,要在每个函数的基础上禁用检测,请使用KASAN特定的
+``__no_sanitize_address`` 函数属性或通用的 ``noinstr`` 。
+
+请注意,禁用编译器插桩(基于每个文件或每个函数)会使KASAN忽略在软件KASAN模式
+的代码中直接发生的访问。当访问是间接发生的(通过调用检测函数)或使用没有编译器
+插桩的基于硬件标签的模式时,它没有帮助。
+
+对于软件KASAN模式,要在当前任务的一部分内核代码中禁用KASAN报告,请使用
+``kasan_disable_current()``/``kasan_enable_current()`` 部分注释这部分代码。
+这也会禁用通过函数调用发生的间接访问的报告。
+
+对于基于标签的KASAN模式(包括硬件模式),要禁用访问检查,请使用
+``kasan_reset_tag()`` 或 ``page_kasan_tag_reset()`` 。请注意,通过
+``page_kasan_tag_reset()`` 临时禁用访问检查需要通过 ``page_kasan_tag``
+/ ``page_kasan_tag_set`` 保存和恢复每页KASAN标签。
+
+测试
+~~~~
+
+有一些KASAN测试可以验证KASAN是否正常工作并可以检测某些类型的内存损坏。
+测试由两部分组成:
+
+1. 与KUnit测试框架集成的测试。使用 ``CONFIG_KASAN_KUNIT_TEST`` 启用。
+这些测试可以通过几种不同的方式自动运行和部分验证;请参阅下面的说明。
+
+2. 与KUnit不兼容的测试。使用 ``CONFIG_KASAN_MODULE_TEST`` 启用并且只能作为模块
+运行。这些测试只能通过加载内核模块并检查内核日志以获取KASAN报告来手动验证。
+
+如果检测到错误,每个KUnit兼容的KASAN测试都会打印多个KASAN报告之一,然后测试打印
+其编号和状态。
+
+当测试通过::
+
+ ok 28 - kmalloc_double_kzfree
+
+当由于 ``kmalloc`` 失败而导致测试失败时::
+
+ # kmalloc_large_oob_right: ASSERTION FAILED at lib/test_kasan.c:163
+ Expected ptr is not null, but is
+ not ok 4 - kmalloc_large_oob_right
+
+当由于缺少KASAN报告而导致测试失败时::
+
+ # kmalloc_double_kzfree: EXPECTATION FAILED at lib/test_kasan.c:629
+ Expected kasan_data->report_expected == kasan_data->report_found, but
+ kasan_data->report_expected == 1
+ kasan_data->report_found == 0
+ not ok 28 - kmalloc_double_kzfree
+
+最后打印所有KASAN测试的累积状态。成功::
+
+ ok 1 - kasan
+
+或者,如果其中一项测试失败::
+
+ not ok 1 - kasan
+
+有几种方法可以运行与KUnit兼容的KASAN测试。
+
+1. 可加载模块
+
+ 启用 ``CONFIG_KUNIT`` 后,KASAN-KUnit测试可以构建为可加载模块,并通过使用
+ ``insmod`` 或 ``modprobe`` 加载 ``test_kasan.ko`` 来运行。
+
+2. 内置
+
+ 通过内置 ``CONFIG_KUNIT`` ,也可以内置KASAN-KUnit测试。在这种情况下,
+ 测试将在启动时作为后期初始化调用运行。
+
+3. 使用kunit_tool
+
+ 通过内置 ``CONFIG_KUNIT`` 和 ``CONFIG_KASAN_KUNIT_TEST`` ,还可以使用
+ ``kunit_tool`` 以更易读的方式查看KUnit测试结果。这不会打印通过测试
+ 的KASAN报告。有关 ``kunit_tool`` 更多最新信息,请参阅
+ `KUnit文档 <https://www.kernel.org/doc/html/latest/dev-tools/kunit/index.html>`_ 。
+
+.. _KUnit: https://www.kernel.org/doc/html/latest/dev-tools/kunit/index.html
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index d56d6b7092e6..1f953d3439a5 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -4,6 +4,7 @@
\renewcommand\thesection*
\renewcommand\thesubsection*
+ \kerneldocCJKon
.. _linux_doc_zh:
@@ -72,11 +73,11 @@ TODOlist:
dev-tools/index
doc-guide/index
kernel-hacking/index
+ maintainer/index
TODOList:
* trace/index
-* maintainer/index
* fault-injection/index
* livepatch/index
* rust/index
@@ -153,6 +154,7 @@ TODOList:
arm64/index
riscv/index
openrisc/index
+ parisc/index
TODOList:
@@ -160,7 +162,6 @@ TODOList:
* ia64/index
* m68k/index
* nios2/index
-* parisc/index
* powerpc/index
* s390/index
* sh/index
diff --git a/Documentation/translations/zh_CN/maintainer/configure-git.rst b/Documentation/translations/zh_CN/maintainer/configure-git.rst
new file mode 100644
index 000000000000..a45ea736f73b
--- /dev/null
+++ b/Documentation/translations/zh_CN/maintainer/configure-git.rst
@@ -0,0 +1,62 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/maintainer/configure-git.rst
+
+:译者:
+
+ 吴想成 Wu XiangCheng <bobwxc@email.cn>
+
+.. _configuregit_zh:
+
+Git配置
+=======
+
+本章讲述了维护者级别的git配置。
+
+Documentation/maintainer/pull-requests.rst 中使用的标记分支应使用开发人员的
+GPG公钥进行签名。可以通过将 ``-u`` 标志传递给 ``git tag`` 来创建签名标记。
+但是,由于 *通常* 对同一项目使用同一个密钥,因此可以设置::
+
+ git config user.signingkey "keyname"
+
+或者手动编辑你的 ``.git/config`` 或 ``~/.gitconfig`` 文件::
+
+ [user]
+ name = Jane Developer
+ email = jd@domain.org
+ signingkey = jd@domain.org
+
+你可能需要告诉 ``git`` 去使用 ``gpg2``::
+
+ [gpg]
+ program = /path/to/gpg2
+
+你可能也需要告诉 ``gpg`` 去使用哪个 ``tty`` (添加到你的shell rc文件中)::
+
+ export GPG_TTY=$(tty)
+
+
+创建链接到lore.kernel.org的提交
+-------------------------------
+
+http://lore.kernel.org 网站是所有涉及或影响内核开发的邮件列表的总存档。在这里
+存储补丁存档是推荐的做法,当维护人员将补丁应用到子系统树时,最好提供一个指向
+lore存档链接的标签,以便浏览提交历史的人可以找到某个更改背后的相关讨论和基本
+原理。链接标签如下所示:
+
+ Link: https://lore.kernel.org/r/<message-id>
+
+通过在git中添加以下钩子,可以将此配置为在发布 ``git am`` 时自动执行:
+
+.. code-block:: none
+
+ $ git config am.messageid true
+ $ cat >.git/hooks/applypatch-msg <<'EOF'
+ #!/bin/sh
+ . git-sh-setup
+ perl -pi -e 's|^Message-Id:\s*<?([^>]+)>?$|Link: https://lore.kernel.org/r/$1|g;' "$1"
+ test -x "$GIT_DIR/hooks/commit-msg" &&
+ exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"}
+ :
+ EOF
+ $ chmod a+x .git/hooks/applypatch-msg
diff --git a/Documentation/translations/zh_CN/maintainer/index.rst b/Documentation/translations/zh_CN/maintainer/index.rst
new file mode 100644
index 000000000000..eb75ccea9a21
--- /dev/null
+++ b/Documentation/translations/zh_CN/maintainer/index.rst
@@ -0,0 +1,21 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/maintainer/index.rst
+
+==============
+内核维护者手册
+==============
+
+本文档本是内核维护者手册的首页。
+本手册还需要大量完善!请自由提出(和编写)本手册的补充内容。
+*译注:指英文原版*
+
+.. toctree::
+ :maxdepth: 2
+
+ configure-git
+ rebasing-and-merging
+ pull-requests
+ maintainer-entry-profile
+ modifying-patches
+
diff --git a/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst b/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst
new file mode 100644
index 000000000000..a1ee99c4786e
--- /dev/null
+++ b/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst
@@ -0,0 +1,92 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/maintainer/maintainer-entry-profile.rst
+
+:译者:
+
+ 吴想成 Wu XiangCheng <bobwxc@email.cn>
+
+.. _maintainerentryprofile_zh:
+
+维护者条目概要
+==============
+
+维护人员条目概要补充了顶层过程文档(提交补丁,提交驱动程序……),增加了子系
+统/设备驱动程序本地习惯以及有关补丁提交生命周期的相关内容。贡献者使用此文档
+来调整他们的期望和避免常见错误;维护人员可以使用这些信息超越子系统层面查看
+是否有机会汇聚到通用实践中。
+
+
+总览
+----
+
+提供了子系统如何操作的介绍。MAINTAINERS文件告诉了贡献者应发送某文件的补丁到哪,
+但它没有传达其他子系统的本地基础设施和机制以协助开发。
+
+请考虑以下问题:
+
+- 当补丁被本地树接纳或合并到上游时是否有通知?
+- 子系统是否使用patchwork实例?Patchwork状态变更是否有通知?
+- 是否有任何机器人或CI基础设施监视列表,或子系统是否使用自动测试反馈以便把
+ 控接纳补丁?
+- 被拉入-next的Git分支是哪个?
+- 贡献者应针对哪个分支提交?
+- 是否链接到其他维护者条目概要?例如一个设备驱动可能指向其父子系统的条目。
+ 这使得贡献者意识到某维护者可能对提交链中其他维护者负有的义务。
+
+
+提交检查单补遗
+--------------
+
+列出强制性和咨询性标准,超出通用标准“提交检查表,以便维护者检查一个补丁是否
+足够健康。例如:“通过checkpatch.pl,没有错误、没有警告。通过单元测试详见某处”。
+
+提交检查单补遗还可以包括有关硬件规格状态的详细信息。例如,子系统接受补丁之前
+是否需要考虑在某个修订版上发布的规范。
+
+
+开发周期的关键日期
+------------------
+
+提交者常常会误以为补丁可以在合并窗口关闭之前的任何时间发送,且下一个-rc1时仍
+可以。事实上,大多数补丁都需要在下一个合并窗口打开之前提前进入linux-next中。
+向提交者澄清关键日期(以-rc发布周为标志)以明确什么时候补丁会被考虑合并以及
+何时需要等待下一个-rc。
+
+至少需要讲明:
+
+- 最后一个可以提交新功能的-rc:
+ 针对下一个合并窗口的新功能提交应该在此点之前首次发布以供考虑。在此时间点
+ 之后提交的补丁应该明确他们的目标为下下个合并窗口,或者给出应加快进度被接受
+ 的充足理由。通常新特性贡献者的提交应出现在-rc5之前。
+
+- 最后合并-rc:合并决策的最后期限。
+ 向贡献者指出尚未接受的补丁集需要等待下下个合并窗口。当然,维护者没有义务
+ 接受所有给定的补丁集,但是如果审阅在此时间点尚未结束,那么希望贡献者应该
+ 等待并在下一个合并窗口重新提交。
+
+可选项:
+
+- 开发基线分支的首个-rc,列在概述部分,视为已为新提交做好准备。
+
+
+审阅节奏
+--------
+
+贡献者最担心的问题之一是:补丁集已发布却未收到反馈,应在多久后发送提醒。除了
+指定在重新提交之前要等待多长时间,还可以指示更新的首选样式;例如,重新发送
+整个系列,或私下发送提醒邮件。本节也可以列出本区域的代码审阅方式,以及获取
+不能直接从维护者那里得到的反馈的方法。
+
+
+现有概要
+--------
+
+这里列出了现有的维护人员条目概要;我们可能会想要在不久的将来做一些不同的事情。
+
+.. toctree::
+ :maxdepth: 1
+
+ ../doc-guide/maintainer-profile
+ ../../../nvdimm/maintainer-entry-profile
+ ../../../riscv/patch-acceptance
diff --git a/Documentation/translations/zh_CN/maintainer/modifying-patches.rst b/Documentation/translations/zh_CN/maintainer/modifying-patches.rst
new file mode 100644
index 000000000000..7f6326137f6b
--- /dev/null
+++ b/Documentation/translations/zh_CN/maintainer/modifying-patches.rst
@@ -0,0 +1,51 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/maintainer/modifying-patches.rst
+
+:译者:
+
+ 吴想成 Wu XiangCheng <bobwxc@email.cn>
+
+.. _modifyingpatches_zh:
+
+修改补丁
+========
+
+如果你是子系统或者分支的维护者,由于代码在你的和提交者的树中并不完全相同,
+有时你需要稍微修改一下收到的补丁以合并它们。
+
+如果你严格遵守开发者来源证书的规则(c),你应该要求提交者重做,但这完全是会
+适得其反的时间、精力浪费。规则(b)允许你调整代码,但这样修改提交者的代码并
+让他背书你的错误是非常不礼貌的。为解决此问题,建议在你之前最后一个
+Signed-off-by标签和你的之间添加一行,以指示更改的性质。这没有强制性要求,最
+好在描述前面加上你的邮件和/或姓名,用方括号括住整行,以明显指出你对最后一刻
+的更改负责。例如::
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+ [lucky@maintainer.example.org: struct foo moved from foo.c to foo.h]
+ Signed-off-by: Lucky K Maintainer <lucky@maintainer.example.org>
+
+如果您维护着一个稳定的分支,并希望同时明确贡献、跟踪更改、合并修复,并保护
+提交者免受责难,这种做法尤其有用。请注意,在任何情况下都不得更改作者的身份
+(From头),因为它会在变更日志中显示。
+
+向后移植(back-port)人员特别要注意:为了便于跟踪,请在提交消息的顶部(即主题行
+之后)插入补丁的来源,这是一种常见而有用的做法。例如,我们可以在3.x稳定版本
+中看到以下内容::
+
+ Date: Tue Oct 7 07:26:38 2014 -0400
+
+ libata: Un-break ATA blacklist
+
+ commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream.
+
+下面是一个旧的内核在某补丁被向后移植后会出现的::
+
+ Date: Tue May 13 22:12:27 2008 +0200
+
+ wireless, airo: waitbusy() won't delay
+
+ [backport of 2.6 commit b7acbdfbd1f277c1eb23f344f899cfa4cd0bf36a]
+
+不管什么格式,这些信息都为人们跟踪你的树,以及试图解决你树中的错误的人提供了
+有价值的帮助。
diff --git a/Documentation/translations/zh_CN/maintainer/pull-requests.rst b/Documentation/translations/zh_CN/maintainer/pull-requests.rst
new file mode 100644
index 000000000000..f46d6f3f2498
--- /dev/null
+++ b/Documentation/translations/zh_CN/maintainer/pull-requests.rst
@@ -0,0 +1,148 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/maintainer/pull-requests.rst
+
+:译者:
+
+ 吴想成 Wu XiangCheng <bobwxc@email.cn>
+
+.. _pullrequests_zh:
+
+如何创建拉取请求
+================
+
+本章描述维护人员如何创建并向其他维护人员提交拉取请求。这对将更改从一个维护者
+树转移到另一个维护者树非常有用。
+
+本文档由Tobin C. Harding(当时他尚不是一名经验丰富的维护人员)编写,内容主要
+来自Greg Kroah Hartman和Linus Torvalds在LKML上的评论。Jonathan Corbet和Mauro
+Carvalho Chehab提出了一些建议和修改。错误不可避免,如有问题,请找Tobin C.
+Harding <me@tobin.cc>。
+
+原始邮件线程::
+
+ http://lkml.kernel.org/r/20171114110500.GA21175@kroah.com
+
+
+创建分支
+--------
+
+首先,您需要将希望包含拉取请求里的所有更改都放在单独分支中。通常您将基于某开发
+人员树的一个分支,一般是打算向其发送拉取请求的开发人员。
+
+为了创建拉取请求,您必须首先标记刚刚创建的分支。建议您选择一个有意义的标记名,
+以即使过了一段时间您和他人仍能理解的方式。在名称中包含源子系统和目标内核版本
+的指示也是一个好的做法。
+
+Greg提供了以下内容。对于一个含有drivers/char中混杂事项、将应用于4.15-rc1内核的
+拉取请求,可以命名为 ``char-misc-4.15-rc1`` 。如果要在 ``char-misc-next`` 分支
+上打上此标记,您可以使用以下命令::
+
+ git tag -s char-misc-4.15-rc1 char-misc-next
+
+这将在 ``char-misc-next`` 分支的最后一个提交上创建一个名为 ``char-misc-4.15-rc1``
+的标记,并用您的gpg密钥签名(参见 Documentation/maintainer/configure-git.rst )。
+
+Linus只接受基于签名过的标记的拉取请求。其他维护者可能会有所不同。
+
+当您运行上述命令时 ``git`` 会打开编辑器要求你描述一下这个标记。在本例中您需要
+描述拉取请求,所以请概述一下包含的内容,为什么要合并,是否完成任何测试。所有
+这些信息都将留在标记中,然后在维护者合并拉取请求时保留在合并提交中。所以把它
+写好,它将永远留在内核中。
+
+正如Linus所说::
+
+ 不管怎么样,至少对我来说,重要的是 *信息* 。我需要知道我在拉取什么、
+ 为什么我要拉取。我也希望将此消息用于合并消息,因此它不仅应该对我有
+ 意义,也应该可以成为一个有意义的历史记录。
+
+ 注意,如果拉取请求有一些不寻常的地方,请详细说明。如果你修改了并非
+ 由你维护的文件,请解释 **为什么** 。我总会在差异中看到的,如果你不
+ 提的话,我只会觉得分外可疑。当你在合并窗口后给我发新东西的时候,
+ (甚至是比较重大的错误修复),不仅需要解释做了什么、为什么这么做,
+ 还请解释一下 **时间问题** 。为什么错过了合并窗口……
+
+ 我会看你写在拉取请求邮件和签名标记里面的内容,所以根据你的工作流,
+ 你可以在签名标记里面描述工作内容(也会自动放进拉取请求邮件),也
+ 可以只在标记里面放个占位符,稍后在你实际发给我拉取请求时描述工作内容。
+
+ 是的,我会编辑这些消息。部分因为我需要做一些琐碎的格式调整(整体缩进、
+ 括号等),也因为此消息可能对我有意义(描述了冲突或一些个人问题)而对
+ 合并提交信息上下文没啥意义,因此我需要尽力让它有意义起来。我也会
+ 修复一些拼写和语法错误,特别是非母语者(母语者也是;^)。但我也会删掉
+ 或增加一些内容。
+
+ Linus
+
+Greg给出了一个拉取请求的例子::
+
+ Char/Misc patches for 4.15-rc1
+
+ Here is the big char/misc patch set for the 4.15-rc1 merge window.
+ Contained in here is the normal set of new functions added to all
+ of these crazy drivers, as well as the following brand new
+ subsystems:
+ - time_travel_controller: Finally a set of drivers for the
+ latest time travel bus architecture that provides i/o to
+ the CPU before it asked for it, allowing uninterrupted
+ processing
+ - relativity_shifters: due to the affect that the
+ time_travel_controllers have on the overall system, there
+ was a need for a new set of relativity shifter drivers to
+ accommodate the newly formed black holes that would
+ threaten to suck CPUs into them. This subsystem handles
+ this in a way to successfully neutralize the problems.
+ There is a Kconfig option to force these to be enabled
+ when needed, so problems should not occur.
+
+ All of these patches have been successfully tested in the latest
+ linux-next releases, and the original problems that it found have
+ all been resolved (apologies to anyone living near Canberra for the
+ lack of the Kconfig options in the earlier versions of the
+ linux-next tree creations.)
+
+ Signed-off-by: Your-name-here <your_email@domain>
+
+
+此标记消息格式就像一个git提交。顶部有一行“总结标题”, 一定要在下面sign-off。
+
+现在您已经有了一个本地签名标记,您需要将它推送到可以被拉取的位置::
+
+ git push origin char-misc-4.15-rc1
+
+
+创建拉取请求
+------------
+
+最后要做的是创建拉取请求消息。可以使用 ``git request-pull`` 命令让 ``git``
+为你做这件事,但它需要确定你想拉取什么,以及拉取针对的基础(显示正确的拉取
+更改和变更状态)。以下命令将生成一个拉取请求::
+
+ git request-pull master git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git/ char-misc-4.15-rc1
+
+引用Greg的话::
+
+ 此命令要求git比较从“char-misc-4.15-rc1”标记位置到“master”分支头(上述
+ 例子中指向了我从Linus的树分叉的地方,通常是-rc发布)的差异,并去使用
+ git:// 协议拉取。如果你希望使用 https:// 协议,也可以用在这里(但是请
+ 注意,部分人由于防火墙问题没法用https协议拉取)。
+
+ 如果char-misc-4.15-rc1标记没有出现在我要求拉取的仓库中,git会提醒
+ 它不在那里,所以记得推送到公开地方。
+
+ “git request-pull”会包含git树的地址和需要拉取的特定标记,以及标记
+ 描述全文(详尽描述标记)。同时它也会创建此拉取请求的差异状态和单个
+ 提交的缩短日志。
+
+Linus回复说他倾向于 ``git://`` 协议。其他维护者可能有不同的偏好。另外,请注意
+如果你创建的拉取请求没有签名标记, ``https://`` 可能是更好的选择。完整的讨论
+请看原邮件。
+
+
+提交拉取请求
+------------
+
+拉取请求的提交方式与普通补丁相同。向维护人员发送内联电子邮件并抄送LKML以及
+任何必要特定子系统的列表。对Linus的拉取请求通常有如下主题行::
+
+ [GIT PULL] <subsystem> changes for v4.15-rc1
diff --git a/Documentation/translations/zh_CN/maintainer/rebasing-and-merging.rst b/Documentation/translations/zh_CN/maintainer/rebasing-and-merging.rst
new file mode 100644
index 000000000000..83b7dabfe88b
--- /dev/null
+++ b/Documentation/translations/zh_CN/maintainer/rebasing-and-merging.rst
@@ -0,0 +1,165 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/maintainer/rebasing-and-merging.rst
+
+:译者:
+
+ 吴想成 Wu XiangCheng <bobwxc@email.cn>
+
+==========
+变基与合并
+==========
+
+一般来说,维护子系统需要熟悉Git源代码管理系统。Git是一个功能强大的工具,有
+很多功能;就像这类工具常出现的情况一样,使用这些功能的方法有对有错。本文档
+特别介绍了变基与合并的用法。维护者经常在错误使用这些工具时遇到麻烦,但避免
+问题实际上并不那么困难。
+
+总的来说,需要注意的一点是:与许多其他项目不同,内核社区并不害怕在其开发历史
+中看到合并提交。事实上,考虑到该项目的规模,避免合并几乎是不可能的。维护者会
+在希望避免合并时遇到一些问题,而过于频繁的合并也会带来另一些问题。
+
+变基
+====
+
+“变基(Rebase)”是更改存储库中一系列提交的历史记录的过程。有两种不同型的操作
+都被称为变基,因为这两种操作都使用 ``git rebase`` 命令,但它们之间存在显著
+差异:
+
+ - 更改一系列补丁的父提交(起始提交)。例如,变基操作可以将基于上一内核版本
+ 的一个补丁集重建到当前版本上。在下面的讨论中,我们将此操作称为“变根”。
+
+ - 通过修复(或删除)损坏的提交、添加补丁、添加标记以更改一系列补丁的历史,
+ 来提交变更日志或更改已应用提交的顺序。在下文中,这种类型的操作称为“历史
+ 修改”
+
+术语“变基”将用于指代上述两种操作。如果使用得当,变基可以产生更清晰、更整洁的
+开发历史;如果使用不当,它可能会模糊历史并引入错误。
+
+以下一些经验法则可以帮助开发者避免最糟糕的变基风险:
+
+ - 已经发布到你私人系统之外世界的历史通常不应更改。其他人可能会拉取你的树
+ 的副本,然后基于它进行工作;修改你的树会给他们带来麻烦。如果工作需要变基,
+ 这通常是表明它还没有准备好提交到公共存储库的信号。
+
+ 但是,总有例外。有些树(linux-next是一个典型的例子)由于它们的需要经常
+ 变基,开发人员知道不要基于它们来工作。开发人员有时会公开一个不稳定的分支,
+ 供其他人或自动测试服务进行测试。如果您确实以这种方式公开了一个可能不稳定
+ 的分支,请确保潜在使用者知道不要基于它来工作。
+
+ - 不要在包含由他人创建的历史的分支上变基。如果你从别的开发者的仓库拉取了变更,
+ 那你现在就成了他们历史记录的保管人。你不应该改变它,除了少数例外情况。例如
+ 树中有问题的提交必须显式恢复(即通过另一个补丁修复),而不是通过修改历史而
+ 消失。
+
+ - 没有合理理由,不要对树变根。仅为了切换到更新的基或避免与上游储存库的合并
+ 通常不是合理理由。
+
+ - 如果你必须对储存库进行变根,请不要随机选取一个提交作为新基。在发布节点之间
+ 内核通常处于一个相对不稳定的状态;基于其中某点进行开发会增加遇到意外错误的
+ 几率。当一系列补丁必须移动到新基时,请选择移动到一个稳定节点(例如-rc版本
+ 节点)。
+
+ - 请知悉对补丁系列进行变根(或做明显的历史修改)会改变它们的开发环境,且很
+ 可能使做过的大部分测试失效。一般来说,变基后的补丁系列应当像新代码一样对
+ 待,并重新测试。
+
+合并窗口麻烦的一个常见原因是,Linus收到了一个明显在拉取请求发送之前不久才变根
+(通常是变根到随机的提交上)的补丁系列。这样一个系列被充分测试的可能性相对较
+低,拉取请求被接受的几率也同样较低。
+
+相反,如果变基仅限于私有树、提交基于一个通用的起点、且经过充分测试,则引起
+麻烦的可能性就很低。
+
+合并
+====
+
+内核开发过程中,合并是一个很常见的操作;5.1版本开发周期中有超过1126个合并
+——差不多占了整体的9%。内核开发工作积累在100多个不同的子系统树中,每个
+子系统树都可能包含多个主题分支;每个分支通常独立于其他分支进行开发。因此
+在任何给定分支进入上游储存库之前,至少需要一次合并。
+
+许多项目要求拉取请求中的分支基于当前主干,这样历史记录中就不会出现合并提交。
+内核并不是这样;任何为了避免合并而重新对分支变基都很可能导致麻烦。
+
+子系统维护人员发现他们必须进行两种类型的合并:从较低层级的子系统树和从其他
+子系统树(同级树或主线)进行合并。这两种情况下要遵循的最佳实践是不同的。
+
+合并较低层级树
+--------------
+
+较大的子系统往往有多个级别的维护人员,较低级别的维护人员向较高级别发送拉取
+请求。合并这样的请求执行几乎肯定会生成一个合并提交;这也是应该的。实际上,
+子系统维护人员可能希望在极少数快进合并情况下使用 ``-–no-ff`` 标志来强制添加
+合并提交,以便记录合并的原因。 **任何** 类型的合并的变更日志必须说明
+*为什么* 合并。对于较低级别的树,“为什么”通常是对该取所带来的变化的总结。
+
+各级维护人员都应在他们的拉取请求上使用经签名的标签,上游维护人员应在拉取分支
+时验证标签。不这样做会威胁整个开发过程的安全。
+
+根据上面列出的规则,一旦您将其他人的历史记录合并到树中,您就不得对该分支进行
+变基,即使您能够这样做。
+
+合并同级树或上游树
+------------------
+
+虽然来自下游的合并是常见且不起眼的,但当需要将一个分支推向上游时,其中来自
+其他树的合并往往是一个危险信号。这种合并需要仔细考虑并加以充分证明,否则后续
+的拉取请求很可能会被拒绝。
+
+想要将主分支合并到存储库中是很自然的;这种类型的合并通常被称为“反向合并”
+。反向合并有助于确保与并行的开发没有冲突,并且通常会给人一种温暖、舒服的
+感觉,即处于最新。但这种诱惑几乎总是应该避免的。
+
+为什么呢?反向合并将搅乱你自己分支的开发历史。它们会大大增加你遇到来自社区
+其他地方的错误的机会,且使你很难确保你所管理的工作稳定并准备好合入上游。
+频繁的合并还可以掩盖树中开发过程中的问题;它们会隐藏与其他树的交互,而这些
+交互不应该(经常)发生在管理良好的分支中。
+
+也就是说,偶尔需要进行反向合并;当这种情况发生时,一定要在提交信息中记录
+*为什么* 。同样,在一个众所周知的稳定点进行合并,而不是随机提交。即使这样,
+你也不应该反向合并一棵比你的直接上游树更高层级的树;如果确实需要更高级别的
+反向合并,应首先在上游树进行。
+
+导致合并相关问题最常见的原因之一是:在发送拉取请求之前维护者合并上游以解决
+合并冲突。同样,这种诱惑很容易理解,但绝对应该避免。对于最终拉取请求来说
+尤其如此:Linus坚信他更愿意看到合并冲突,而不是不必要的反向合并。看到冲突
+可以让他了解潜在的问题所在。他做过很多合并(在5.1版本开发周期中是382次),
+而且在解决冲突方面也很在行——通常比参与的开发人员要强。
+
+那么,当他们的子系统分支和主线之间发生冲突时,维护人员应该怎么做呢?最重要
+的一步是在拉取请求中提示Linus会发生冲突;如果啥都没说则表明您的分支可以正常
+合入。对于特别困难的冲突,创建并推送一个 *独立* 分支来展示你将如何解决问题。
+在拉取请求中提到该分支,但是请求本身应该针对未合并的分支。
+
+即使不存在已知冲突,在发送拉取请求之前进行合并测试也是个好主意。它可能会提醒
+您一些在linux-next树中没有发现的问题,并帮助您准确地理解您正在要求上游做什么。
+
+合并上游树或另一个子系统树的另一个原因是解决依赖关系。这些依赖性问题有时确实
+会发生,而且有时与另一棵树交叉合并是解决这些问题的最佳方法;同样,在这种情况
+下,合并提交应该解释为什么要进行合并。花点时间把它做好;会有人阅读这些变更
+日志。
+
+然而依赖性问题通常表明需要改变方法。合并另一个子系统树以解决依赖性风险会带来
+其他缺陷,几乎永远不应这样做。如果该子系统树无法被合到上游,那么它的任何问题
+也都会阻碍你的树合并。更可取的选择包括与维护人员达成一致意见,在其中一个树中
+同时进行两组更改;或者创建一个主题分支专门处理可以合并到两个树中的先决条件提交。
+如果依赖关系与主要的基础结构更改相关,正确的解决方案可能是将依赖提交保留一个
+开发周期,以便这些更改有时间在主线上稳定。
+
+最后
+====
+
+在开发周期的开头合并主线是比较常见的,可以获取树中其他地方的更改和修复。同样,
+这样的合并应该选择一个众所周知的发布点,而不是一些随机点。如果在合并窗口期间
+上游分支已完全清空到主线中,则可以使用以下命令向前拉取它::
+
+ git merge v5.2-rc1^0
+
+“^0”使Git执行快进合并(在这种情况下这应该可以),从而避免多余的虚假合并提交。
+
+上面列出的就是指导方针了。总是会有一些情况需要不同的解决方案,这些指导原则
+不应阻止开发人员在需要时做正确的事情。但是,我们应该时刻考虑是否真的出现了
+这样的需求,并准备好解释为什么需要做一些不寻常的事情。
diff --git a/Documentation/translations/zh_CN/parisc/debugging.rst b/Documentation/translations/zh_CN/parisc/debugging.rst
new file mode 100644
index 000000000000..c21beb986e15
--- /dev/null
+++ b/Documentation/translations/zh_CN/parisc/debugging.rst
@@ -0,0 +1,42 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/parisc/debugging.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_parisc_debugging:
+
+=================
+调试PA-RISC
+=================
+
+好吧,这里有一些关于调试linux/parisc的较底层部分的信息。
+
+
+1. 绝对地址
+=====================
+
+很多汇编代码目前运行在实模式下,这意味着会使用绝对地址,而不是像内核其他
+部分那样使用虚拟地址。要将绝对地址转换为虚拟地址,你可以在System.map中查
+找,添加__PAGE_OFFSET(目前是0x10000000)。
+
+
+2. HPMCs
+========
+
+当实模式的代码试图访问不存在的内存时,会出现HPMC(high priority machine
+check)而不是内核oops。若要调试HPMC,请尝试找到系统响应程序/请求程序地址。
+系统请求程序地址应该与(某)处理器的HPA(I/O范围内的高地址)相匹配;系统响应程
+序地址是实模式代码试图访问的地址。
+
+系统响应程序地址的典型值是大于__PAGE_OFFSET (0x10000000)的地址,这意味着
+在实模式试图访问它之前,虚拟地址没有被翻译成物理地址。
+
+
+3. 有趣的Q位
+============
+
+某些非常关键的代码必须清除PSW中的Q位。当Q位被清除时,CPU不会更新中断处理
+程序所读取的寄存器,以找出机器被中断的位置——所以如果你在清除Q位的指令和再
+次设置Q位的RFI之间遇到中断,你不知道它到底发生在哪里。如果你幸运的话,IAOQ
+会指向清除Q位的指令,如果你不幸运的话,它会指向任何地方。通常Q位的问题会
+表现为无法解释的系统挂起或物理内存越界。
diff --git a/Documentation/translations/zh_CN/parisc/index.rst b/Documentation/translations/zh_CN/parisc/index.rst
new file mode 100644
index 000000000000..a47454ebe32e
--- /dev/null
+++ b/Documentation/translations/zh_CN/parisc/index.rst
@@ -0,0 +1,28 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/parisc/index.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_parisc_index:
+
+====================
+PA-RISC体系架构
+====================
+
+.. toctree::
+ :maxdepth: 2
+
+ debugging
+ registers
+
+Todolist:
+
+ features
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/parisc/registers.rst b/Documentation/translations/zh_CN/parisc/registers.rst
new file mode 100644
index 000000000000..71e2404cd103
--- /dev/null
+++ b/Documentation/translations/zh_CN/parisc/registers.rst
@@ -0,0 +1,153 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/parisc/registers.rst
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_parisc_registers:
+
+=========================
+Linux/PA-RISC的寄存器用法
+=========================
+
+[ 用星号表示目前尚未实现的计划用途。 ]
+
+ABI约定的通用寄存器
+===================
+
+控制寄存器
+----------
+
+============================ =================================
+CR 0 (恢复计数器) 用于ptrace
+CR 1-CR 7(无定义) 未使用
+CR 8 (Protection ID) 每进程值*
+CR 9, 12, 13 (PIDS) 未使用
+CR10 (CCR) FPU延迟保存*
+CR11 按照ABI的规定(SAR)
+CR14 (中断向量) 初始化为 fault_vector
+CR15 (EIEM) 所有位初始化为1*
+CR16 (间隔计时器) 读取周期数/写入开始时间间隔计时器
+CR17-CR22 中断参数
+CR19 中断指令寄存器
+CR20 中断空间寄存器
+CR21 中断偏移量寄存器
+CR22 中断 PSW
+CR23 (EIRR) 读取未决中断/写入清除位
+CR24 (TR 0) 内核空间页目录指针
+CR25 (TR 1) 用户空间页目录指针
+CR26 (TR 2) 不使用
+CR27 (TR 3) 线程描述符指针
+CR28 (TR 4) 不使用
+CR29 (TR 5) 不使用
+CR30 (TR 6) 当前 / 0
+CR31 (TR 7) 临时寄存器,在不同地方使用
+============================ =================================
+
+空间寄存器(内核模式)
+----------------------
+
+======== ==============================
+SR0 临时空间寄存器
+SR4-SR7 设置为0
+SR1 临时空间寄存器
+SR2 内核不应该破坏它
+SR3 用于用户空间访问(当前进程)
+======== ==============================
+
+空间寄存器(用户模式)
+----------------------
+
+======== ============================
+SR0 临时空间寄存器
+SR1 临时空间寄存器
+SR2 保存Linux gateway page的空间
+SR3 在内核中保存用户地址空间的值
+SR4-SR7 定义了用户/内核的短地址空间
+======== ============================
+
+
+处理器状态字
+------------
+
+====================== ================================================
+W (64位地址) 0
+E (小尾端) 0
+S (安全间隔计时器) 0
+T (产生分支陷阱) 0
+H (高特权级陷阱) 0
+L (低特权级陷阱) 0
+N (撤销下一条指令) 被C代码使用
+X (数据存储中断禁用) 0
+B (产生分支) 被C代码使用
+C (代码地址转译) 1, 在执行实模式代码时为0
+V (除法步长校正) 被C代码使用
+M (HPMC 掩码) 0, 在执行HPMC操作*时为1
+C/B (进/借 位) 被C代码使用
+O (有序引用) 1*
+F (性能监视器) 0
+R (回收计数器陷阱) 0
+Q (收集中断状态) 1 (在rfi之前的代码中为0)
+P (保护标识符) 1*
+D (数据地址转译) 1, 在执行实模式代码时为0
+I (外部中断掩码) 由cli()/sti()宏使用。
+====================== ================================================
+
+“隐形”寄存器(影子寄存器)
+---------------------------
+
+============= ===================
+PSW W 默认值 0
+PSW E 默认值 0
+影子寄存器 被中断处理代码使用
+TOC启用位 1
+============= ===================
+
+----------------------------------------------------------
+
+PA-RISC架构定义了7个寄存器作为“影子寄存器”。这些寄存器在
+RETURN FROM INTERRUPTION AND RESTORE指令中使用,通过消
+除中断处理程序中对一般寄存器(GR)的保存和恢复的需要来减
+少状态保存和恢复时间。影子寄存器是GRs 1, 8, 9, 16, 17,
+24和25。
+
+-------------------------------------------------------------------------
+
+寄存器使用说明,最初由John Marvin提供,并由Randolph Chung提供一些补充说明。
+
+对于通用寄存器:
+
+r1,r2,r19-r26,r28,r29 & r31可以在不保存它们的情况下被使用。当然,如果你
+关心它们,在调用另一个程序之前,你也需要保存它们。上面的一些寄存器确实
+有特殊的含义,你应该注意一下:
+
+ r1:
+ addil指令是硬性规定将其结果放在r1中,所以如果你使用这条指令要
+ 注意这点。
+
+ r2:
+ 这就是返回指针。一般来说,你不想使用它,因为你需要这个指针来返
+ 回给你的调用者。然而,它与这组寄存器组合在一起,因为调用者不能
+ 依赖你返回时的值是相同的,也就是说,你可以将r2复制到另一个寄存
+ 器,并在作废r2后通过该寄存器返回,这应该不会给调用程序带来问题。
+
+ r19-r22:
+ 这些通常被认为是临时寄存器。
+ 请注意,在64位中它们是arg7-arg4。
+
+ r23-r26:
+ 这些是arg3-arg0,也就是说,如果你不再关心传入的值,你可以使用
+ 它们。
+
+ r28,r29:
+ 这俩是ret0和ret1。它们是你传入返回值的地方。r28是主返回值。当返回
+ 小结构体时,r29也可以用来将数据传回给调用程序。
+
+ r30:
+ 栈指针
+
+ r31:
+ ble指令将返回指针放在这里。
+
+
+ r3-r18,r27,r30需要被保存和恢复。r3-r18只是一般用途的寄存器。
+ r27是数据指针,用来使对全局变量的引用更容易。r30是栈指针。
diff --git a/Documentation/translations/zh_CN/process/8.Conclusion.rst b/Documentation/translations/zh_CN/process/8.Conclusion.rst
index 71c3e30efc6f..4707f0101964 100644
--- a/Documentation/translations/zh_CN/process/8.Conclusion.rst
+++ b/Documentation/translations/zh_CN/process/8.Conclusion.rst
@@ -19,7 +19,7 @@
:ref:`Documentation/translations/zh_CN/process/howto.rst <cn_process_howto>`
文件是一个重要的起点;
:ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
-和 :ref:`Documentation/transaltions/zh_CN/process/submitting-drivers.rst <cn_submittingdrivers>`
+和 :ref:`Documentation/translations/zh_CN/process/submitting-drivers.rst <cn_submittingdrivers>`
也是所有内核开发人员都应该阅读的内容。许多内部内核API都是使用kerneldoc机制
记录的;“make htmldocs”或“make pdfdocs”可用于以HTML或PDF格式生成这些文档
(尽管某些发行版提供的tex版本会遇到内部限制,无法正确处理文档)。
diff --git a/Documentation/translations/zh_CN/process/coding-style.rst b/Documentation/translations/zh_CN/process/coding-style.rst
index 406d43a02c02..b8c484a84d10 100644
--- a/Documentation/translations/zh_CN/process/coding-style.rst
+++ b/Documentation/translations/zh_CN/process/coding-style.rst
@@ -61,7 +61,7 @@ Linux 内核代码风格
case 'K':
case 'k':
mem <<= 10;
- /* fall through */
+ fallthrough;
default:
break;
}
diff --git a/Documentation/usb/ehci.rst b/Documentation/usb/ehci.rst
index 31f650e7c1b4..76190501907a 100644
--- a/Documentation/usb/ehci.rst
+++ b/Documentation/usb/ehci.rst
@@ -1,4 +1,4 @@
-===========
+===========
EHCI driver
===========
diff --git a/Documentation/usb/gadget_printer.rst b/Documentation/usb/gadget_printer.rst
index 5e5516c69075..e611a6d91093 100644
--- a/Documentation/usb/gadget_printer.rst
+++ b/Documentation/usb/gadget_printer.rst
@@ -1,4 +1,4 @@
-===============================
+===============================
Linux USB Printer Gadget Driver
===============================
diff --git a/Documentation/userspace-api/ioctl/hdio.rst b/Documentation/userspace-api/ioctl/hdio.rst
index 817371bf94e9..6ee8fc88699f 100644
--- a/Documentation/userspace-api/ioctl/hdio.rst
+++ b/Documentation/userspace-api/ioctl/hdio.rst
@@ -7,8 +7,8 @@ Summary of `HDIO_` ioctl calls
November, 2004
This document attempts to describe the ioctl(2) calls supported by
-the HD/IDE layer. These are by-and-large implemented (as of Linux 2.6)
-in drivers/ide/ide.c and drivers/block/scsi_ioctl.c
+the HD/IDE layer. These are by-and-large implemented (as of Linux 5.11)
+drivers/ata/libata-scsi.c.
ioctl values are listed in <linux/hdreg.h>. As of this writing, they
are as follows:
@@ -17,50 +17,17 @@ are as follows:
======================= =======================================
HDIO_GETGEO get device geometry
- HDIO_GET_UNMASKINTR get current unmask setting
- HDIO_GET_MULTCOUNT get current IDE blockmode setting
- HDIO_GET_QDMA get use-qdma flag
- HDIO_SET_XFER set transfer rate via proc
- HDIO_OBSOLETE_IDENTITY OBSOLETE, DO NOT USE
- HDIO_GET_KEEPSETTINGS get keep-settings-on-reset flag
HDIO_GET_32BIT get current io_32bit setting
- HDIO_GET_NOWERR get ignore-write-error flag
- HDIO_GET_DMA get use-dma flag
- HDIO_GET_NICE get nice flags
HDIO_GET_IDENTITY get IDE identification info
- HDIO_GET_WCACHE get write cache mode on|off
- HDIO_GET_ACOUSTIC get acoustic value
- HDIO_GET_ADDRESS get sector addressing mode
- HDIO_GET_BUSSTATE get the bus state of the hwif
- HDIO_TRISTATE_HWIF execute a channel tristate
- HDIO_DRIVE_RESET execute a device reset
HDIO_DRIVE_TASKFILE execute raw taskfile
HDIO_DRIVE_TASK execute task and special drive command
HDIO_DRIVE_CMD execute a special drive command
- HDIO_DRIVE_CMD_AEB HDIO_DRIVE_TASK
======================= =======================================
ioctls that pass non-pointer values:
======================= =======================================
- HDIO_SET_MULTCOUNT change IDE blockmode
- HDIO_SET_UNMASKINTR permit other irqs during I/O
- HDIO_SET_KEEPSETTINGS keep ioctl settings on reset
HDIO_SET_32BIT change io_32bit flags
- HDIO_SET_NOWERR change ignore-write-error flag
- HDIO_SET_DMA change use-dma flag
- HDIO_SET_PIO_MODE reconfig interface to new speed
- HDIO_SCAN_HWIF register and (re)scan interface
- HDIO_SET_NICE set nice flags
- HDIO_UNREGISTER_HWIF unregister interface
- HDIO_SET_WCACHE change write cache enable-disable
- HDIO_SET_ACOUSTIC change acoustic behavior
- HDIO_SET_BUSSTATE set the bus state of the hwif
- HDIO_SET_QDMA change use-qdma flag
- HDIO_SET_ADDRESS change lba addressing modes
-
- HDIO_SET_IDE_SCSI Set scsi emulation mode on/off
- HDIO_SET_SCSI_IDE not implemented yet
======================= =======================================
@@ -137,143 +104,6 @@ HDIO_GETGEO
-
-HDIO_GET_UNMASKINTR
- get current unmask setting
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_UNMASKINTR, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the drive's current unmask setting
-
-
-
-
-
-HDIO_SET_UNMASKINTR
- permit other irqs during I/O
-
-
- usage::
-
- unsigned long val;
-
- ioctl(fd, HDIO_SET_UNMASKINTR, val);
-
- inputs:
- New value for unmask flag
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-
-HDIO_GET_MULTCOUNT
- get current IDE blockmode setting
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_MULTCOUNT, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current IDE block mode setting. This
- controls how many sectors the drive will transfer per
- interrupt.
-
-
-
-HDIO_SET_MULTCOUNT
- change IDE blockmode
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_MULTCOUNT, val);
-
- inputs:
- New value for IDE block mode setting. This controls how many
- sectors the drive will transfer per interrupt.
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range supported by disk.
- - EBUSY Controller busy or blockmode already set.
- - EIO Drive did not accept new block mode.
-
- notes:
- Source code comments read::
-
- This is tightly woven into the driver->do_special cannot
- touch. DON'T do it again until a total personality rewrite
- is committed.
-
- If blockmode has already been set, this ioctl will fail with
- -EBUSY
-
-
-
-HDIO_GET_QDMA
- get use-qdma flag
-
-
- Not implemented, as of 2.6.8.1
-
-
-
-HDIO_SET_XFER
- set transfer rate via proc
-
-
- Not implemented, as of 2.6.8.1
-
-
-
-HDIO_OBSOLETE_IDENTITY
- OBSOLETE, DO NOT USE
-
-
- Same as HDIO_GET_IDENTITY (see below), except that it only
- returns the first 142 bytes of drive identity information.
-
-
-
HDIO_GET_IDENTITY
get IDE identification info
@@ -308,60 +138,6 @@ HDIO_GET_IDENTITY
-HDIO_GET_KEEPSETTINGS
- get keep-settings-on-reset flag
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_KEEPSETTINGS, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current "keep settings" flag
-
-
-
- notes:
- When set, indicates that kernel should restore settings
- after a drive reset.
-
-
-
-HDIO_SET_KEEPSETTINGS
- keep ioctl settings on reset
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_SET_KEEPSETTINGS, val);
-
- inputs:
- New value for keep_settings flag
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
HDIO_GET_32BIT
get current io_32bit setting
@@ -387,288 +163,6 @@ HDIO_GET_32BIT
-
-
-HDIO_GET_NOWERR
- get ignore-write-error flag
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_NOWERR, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current ignore-write-error flag
-
-
-
-
-
-HDIO_GET_DMA
- get use-dma flag
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_DMA, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current use-dma flag
-
-
-
-
-
-HDIO_GET_NICE
- get nice flags
-
-
- usage::
-
- long nice;
-
- ioctl(fd, HDIO_GET_NICE, &nice);
-
- inputs:
- none
-
-
-
- outputs:
- The drive's "nice" values.
-
-
-
- notes:
- Per-drive flags which determine when the system will give more
- bandwidth to other devices sharing the same IDE bus.
-
- See <linux/hdreg.h>, near symbol IDE_NICE_DSC_OVERLAP.
-
-
-
-
-HDIO_SET_NICE
- set nice flags
-
-
- usage::
-
- unsigned long nice;
-
- ...
- ioctl(fd, HDIO_SET_NICE, nice);
-
- inputs:
- bitmask of nice flags.
-
-
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EPERM Flags other than DSC_OVERLAP and NICE_1 set.
- - EPERM DSC_OVERLAP specified but not supported by drive
-
- notes:
- This ioctl sets the DSC_OVERLAP and NICE_1 flags from values
- provided by the user.
-
- Nice flags are listed in <linux/hdreg.h>, starting with
- IDE_NICE_DSC_OVERLAP. These values represent shifts.
-
-
-
-
-
-HDIO_GET_WCACHE
- get write cache mode on|off
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_WCACHE, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current write cache mode
-
-
-
-
-
-HDIO_GET_ACOUSTIC
- get acoustic value
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_ACOUSTIC, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current acoustic settings
-
-
-
- notes:
- See HDIO_SET_ACOUSTIC
-
-
-
-
-
-HDIO_GET_ADDRESS
- usage::
-
-
- long val;
-
- ioctl(fd, HDIO_GET_ADDRESS, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current addressing mode:
-
- = ===================
- 0 28-bit
- 1 48-bit
- 2 48-bit doing 28-bit
- 3 64-bit
- = ===================
-
-
-
-HDIO_GET_BUSSTATE
- get the bus state of the hwif
-
-
- usage::
-
- long state;
-
- ioctl(fd, HDIO_SCAN_HWIF, &state);
-
- inputs:
- none
-
-
-
- outputs:
- Current power state of the IDE bus. One of BUSSTATE_OFF,
- BUSSTATE_ON, or BUSSTATE_TRISTATE
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_ADMIN
-
-
-
-
-HDIO_SET_BUSSTATE
- set the bus state of the hwif
-
-
- usage::
-
- int state;
-
- ...
- ioctl(fd, HDIO_SCAN_HWIF, state);
-
- inputs:
- Desired IDE power state. One of BUSSTATE_OFF, BUSSTATE_ON,
- or BUSSTATE_TRISTATE
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_RAWIO
- - EOPNOTSUPP Hardware interface does not support bus power control
-
-
-
-
-HDIO_TRISTATE_HWIF
- execute a channel tristate
-
-
- Not implemented, as of 2.6.8.1. See HDIO_SET_BUSSTATE
-
-
-
-HDIO_DRIVE_RESET
- execute a device reset
-
-
- usage::
-
- int args[3]
-
- ...
- ioctl(fd, HDIO_DRIVE_RESET, args);
-
- inputs:
- none
-
-
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - ENXIO No such device: phy dead or ctl_addr == 0
- - EIO I/O error: reset timed out or hardware error
-
- notes:
-
- - Execute a reset on the device as soon as the current IO
- operation has completed.
-
- - Executes an ATAPI soft reset if applicable, otherwise
- executes an ATA soft reset on the controller.
-
-
-
HDIO_DRIVE_TASKFILE
execute raw taskfile
@@ -1026,14 +520,6 @@ HDIO_DRIVE_TASK
-HDIO_DRIVE_CMD_AEB
- HDIO_DRIVE_TASK
-
-
- Not implemented, as of 2.6.8.1
-
-
-
HDIO_SET_32BIT
change io_32bit flags
@@ -1059,284 +545,3 @@ HDIO_SET_32BIT
- EACCES Access denied: requires CAP_SYS_ADMIN
- EINVAL value out of range [0 3]
- EBUSY Controller busy
-
-
-
-
-HDIO_SET_NOWERR
- change ignore-write-error flag
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_NOWERR, val);
-
- inputs:
- New value for ignore-write-error flag. Used for ignoring
-
-
- WRERR_STAT
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_DMA
- change use-dma flag
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_SET_DMA, val);
-
- inputs:
- New value for use-dma flag
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_PIO_MODE
- reconfig interface to new speed
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_SET_PIO_MODE, val);
-
- inputs:
- New interface speed.
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 255]
- - EBUSY Controller busy
-
-
-
-HDIO_SCAN_HWIF
- register and (re)scan interface
-
-
- usage::
-
- int args[3]
-
- ...
- ioctl(fd, HDIO_SCAN_HWIF, args);
-
- inputs:
-
- ======= =========================
- args[0] io address to probe
-
-
- args[1] control address to probe
- args[2] irq number
- ======= =========================
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_RAWIO
- - EIO Probe failed.
-
- notes:
- This ioctl initializes the addresses and irq for a disk
- controller, probes for drives, and creates /proc/ide
- interfaces as appropriate.
-
-
-
-HDIO_UNREGISTER_HWIF
- unregister interface
-
-
- usage::
-
- int index;
-
- ioctl(fd, HDIO_UNREGISTER_HWIF, index);
-
- inputs:
- index index of hardware interface to unregister
-
-
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_RAWIO
-
- notes:
- This ioctl removes a hardware interface from the kernel.
-
- Currently (2.6.8) this ioctl silently fails if any drive on
- the interface is busy.
-
-
-
-HDIO_SET_WCACHE
- change write cache enable-disable
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_WCACHE, val);
-
- inputs:
- New value for write cache enable
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_ACOUSTIC
- change acoustic behavior
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_ACOUSTIC, val);
-
- inputs:
- New value for drive acoustic settings
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 254]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_QDMA
- change use-qdma flag
-
-
- Not implemented, as of 2.6.8.1
-
-
-
-HDIO_SET_ADDRESS
- change lba addressing modes
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_ADDRESS, val);
-
- inputs:
- New value for addressing mode
-
- = ===================
- 0 28-bit
- 1 48-bit
- 2 48-bit doing 28-bit
- = ===================
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 2]
- - EBUSY Controller busy
- - EIO Drive does not support lba48 mode.
-
-
-HDIO_SET_IDE_SCSI
- usage::
-
-
- long val;
-
- ioctl(fd, HDIO_SET_IDE_SCSI, val);
-
- inputs:
- New value for scsi emulation mode (?)
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_SCSI_IDE
- Not implemented, as of 2.6.8.1
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 9bfc2b510c64..1409e40e6345 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -325,7 +325,7 @@ Code Seq# Include File Comments
0xA3 90-9F linux/dtlk.h
0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem
0xA4 00-1F uapi/asm/sgx.h <mailto:linux-sgx@vger.kernel.org>
-0xA5 01 linux/surface_aggregator/cdev.h Microsoft Surface Platform System Aggregator
+0xA5 01-05 linux/surface_aggregator/cdev.h Microsoft Surface Platform System Aggregator
<mailto:luzmaximilian@gmail.com>
0xA5 20-2F linux/surface_aggregator/dtx.h Microsoft Surface DTX driver
<mailto:luzmaximilian@gmail.com>
diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst
index 62c9361a3c7f..f35552ff19ba 100644
--- a/Documentation/userspace-api/landlock.rst
+++ b/Documentation/userspace-api/landlock.rst
@@ -145,7 +145,8 @@ Bind mounts and OverlayFS
Landlock enables to restrict access to file hierarchies, which means that these
access rights can be propagated with bind mounts (cf.
-:doc:`/filesystems/sharedsubtree`) but not with :doc:`/filesystems/overlayfs`.
+Documentation/filesystems/sharedsubtree.rst) but not with
+Documentation/filesystems/overlayfs.rst.
A bind mount mirrors a source file hierarchy to a destination. The destination
hierarchy is then composed of the exact same files, on which Landlock rules can
@@ -170,8 +171,8 @@ Inheritance
Every new thread resulting from a :manpage:`clone(2)` inherits Landlock domain
restrictions from its parent. This is similar to the seccomp inheritance (cf.
-:doc:`/userspace-api/seccomp_filter`) or any other LSM dealing with task's
-:manpage:`credentials(7)`. For instance, one process's thread may apply
+Documentation/userspace-api/seccomp_filter.rst) or any other LSM dealing with
+task's :manpage:`credentials(7)`. For instance, one process's thread may apply
Landlock rules to itself, but they will not be automatically applied to other
sibling threads (unlike POSIX thread credential changes, cf.
:manpage:`nptl(7)`).
@@ -278,7 +279,7 @@ Memory usage
------------
Kernel memory allocated to create rulesets is accounted and can be restricted
-by the :doc:`/admin-guide/cgroup-v1/memory`.
+by the Documentation/admin-guide/cgroup-v1/memory.rst.
Questions and answers
=====================
@@ -303,7 +304,7 @@ issues, especially when untrusted processes can manipulate them (cf.
Additional documentation
========================
-* :doc:`/security/landlock`
+* Documentation/security/landlock.rst
* https://landlock.io
.. Links
diff --git a/Documentation/userspace-api/media/Makefile b/Documentation/userspace-api/media/Makefile
index 81a4a1a53bce..00922aa7efde 100644
--- a/Documentation/userspace-api/media/Makefile
+++ b/Documentation/userspace-api/media/Makefile
@@ -7,8 +7,8 @@ PARSER = $(srctree)/Documentation/sphinx/parse-headers.pl
UAPI = $(srctree)/include/uapi/linux
KAPI = $(srctree)/include/linux
-FILES = audio.h.rst ca.h.rst dmx.h.rst frontend.h.rst net.h.rst video.h.rst \
- videodev2.h.rst media.h.rst cec.h.rst lirc.h.rst
+FILES = ca.h.rst dmx.h.rst frontend.h.rst net.h.rst \
+ videodev2.h.rst media.h.rst cec.h.rst lirc.h.rst
TARGETS := $(addprefix $(BUILDDIR)/, $(FILES))
@@ -21,9 +21,6 @@ quiet_gen_rst = echo ' PARSE $(patsubst $(srctree)/%,%,$<)'; \
silent_gen_rst = ${gen_rst}
-$(BUILDDIR)/audio.h.rst: ${UAPI}/dvb/audio.h ${PARSER} $(SRC_DIR)/audio.h.rst.exceptions
- @$($(quiet)gen_rst)
-
$(BUILDDIR)/ca.h.rst: ${UAPI}/dvb/ca.h ${PARSER} $(SRC_DIR)/ca.h.rst.exceptions
@$($(quiet)gen_rst)
@@ -36,9 +33,6 @@ $(BUILDDIR)/frontend.h.rst: ${UAPI}/dvb/frontend.h ${PARSER} $(SRC_DIR)/frontend
$(BUILDDIR)/net.h.rst: ${UAPI}/dvb/net.h ${PARSER} $(SRC_DIR)/net.h.rst.exceptions
@$($(quiet)gen_rst)
-$(BUILDDIR)/video.h.rst: ${UAPI}/dvb/video.h ${PARSER} $(SRC_DIR)/video.h.rst.exceptions
- @$($(quiet)gen_rst)
-
$(BUILDDIR)/videodev2.h.rst: ${UAPI}/videodev2.h ${PARSER} $(SRC_DIR)/videodev2.h.rst.exceptions
@$($(quiet)gen_rst)
diff --git a/Documentation/userspace-api/media/audio.h.rst.exceptions b/Documentation/userspace-api/media/audio.h.rst.exceptions
deleted file mode 100644
index cf6620477f73..000000000000
--- a/Documentation/userspace-api/media/audio.h.rst.exceptions
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-# Ignore header name
-ignore define _DVBAUDIO_H_
-
-# Undocumented audio caps, as this is a deprecated API anyway
-ignore define AUDIO_CAP_DTS
-ignore define AUDIO_CAP_LPCM
-ignore define AUDIO_CAP_MP1
-ignore define AUDIO_CAP_MP2
-ignore define AUDIO_CAP_MP3
-ignore define AUDIO_CAP_AAC
-ignore define AUDIO_CAP_OGG
-ignore define AUDIO_CAP_SDDS
-ignore define AUDIO_CAP_AC3
-
-# some typedefs should point to struct/enums
-replace typedef audio_mixer_t :c:type:`audio_mixer`
-replace typedef audio_status_t :c:type:`audio_status`
diff --git a/Documentation/userspace-api/media/drivers/hantro.rst b/Documentation/userspace-api/media/drivers/hantro.rst
new file mode 100644
index 000000000000..cd9754b4e005
--- /dev/null
+++ b/Documentation/userspace-api/media/drivers/hantro.rst
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Hantro video decoder driver
+===========================
+
+The Hantro video decoder driver implements the following driver-specific controls:
+
+``V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP (integer)``
+ Specifies to Hantro HEVC video decoder driver the number of data (in bits) to
+ skip in the slice segment header.
+ If non-IDR, the bits to be skipped go from syntax element "pic_output_flag"
+ to before syntax element "slice_temporal_mvp_enabled_flag".
+ If IDR, the skipped bits are just "pic_output_flag"
+ (separate_colour_plane_flag is not supported).
+
+.. note::
+
+ This control is not yet part of the public kernel API and
+ it is expected to change.
diff --git a/Documentation/userspace-api/media/drivers/index.rst b/Documentation/userspace-api/media/drivers/index.rst
index 1a9038f5f9fa..12e3c512d718 100644
--- a/Documentation/userspace-api/media/drivers/index.rst
+++ b/Documentation/userspace-api/media/drivers/index.rst
@@ -33,6 +33,7 @@ For more details see the file COPYING in the source distribution of Linux.
ccs
cx2341x-uapi
+ hantro
imx-uapi
max2175
meye-uapi
diff --git a/Documentation/userspace-api/media/dvb/dmx-fopen.rst b/Documentation/userspace-api/media/dvb/dmx-fopen.rst
index 8f0a2b831d4a..50b36eb4371e 100644
--- a/Documentation/userspace-api/media/dvb/dmx-fopen.rst
+++ b/Documentation/userspace-api/media/dvb/dmx-fopen.rst
@@ -82,7 +82,7 @@ appropriately.
:widths: 1 16
- - ``EMFILE``
- - “Too many open files”, i.e. no more filters available.
+ - "Too many open files", i.e. no more filters available.
The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/userspace-api/media/dvb/dmx-fread.rst b/Documentation/userspace-api/media/dvb/dmx-fread.rst
index 78e9daef595a..88c4cddf7c30 100644
--- a/Documentation/userspace-api/media/dvb/dmx-fread.rst
+++ b/Documentation/userspace-api/media/dvb/dmx-fread.rst
@@ -34,7 +34,7 @@ Description
This system call returns filtered data, which might be section or Packetized
Elementary Stream (PES) data. The filtered data is transferred from
-the driver’s internal circular buffer to ``buf``. The maximum amount of data
+the driver's internal circular buffer to ``buf``. The maximum amount of data
to be transferred is implied by count.
.. note::
diff --git a/Documentation/userspace-api/media/dvb/dmx-set-filter.rst b/Documentation/userspace-api/media/dvb/dmx-set-filter.rst
index f43455b7adae..1b8c8071b14f 100644
--- a/Documentation/userspace-api/media/dvb/dmx-set-filter.rst
+++ b/Documentation/userspace-api/media/dvb/dmx-set-filter.rst
@@ -37,7 +37,7 @@ parameters provided. A timeout may be defined stating number of seconds
to wait for a section to be loaded. A value of 0 means that no timeout
should be applied. Finally there is a flag field where it is possible to
state whether a section should be CRC-checked, whether the filter should
-be a ”one-shot” filter, i.e. if the filtering operation should be
+be a "one-shot" filter, i.e. if the filtering operation should be
stopped after the first section is received, and whether the filtering
operation should be started immediately (without waiting for a
:ref:`DMX_START` ioctl call). If a filter was previously set-up, this
diff --git a/Documentation/userspace-api/media/dvb/headers.rst b/Documentation/userspace-api/media/dvb/headers.rst
index 9743ffc35096..88c3eb33a89e 100644
--- a/Documentation/userspace-api/media/dvb/headers.rst
+++ b/Documentation/userspace-api/media/dvb/headers.rst
@@ -14,10 +14,3 @@ Digital TV uAPI headers
.. kernel-include:: $BUILDDIR/ca.h.rst
.. kernel-include:: $BUILDDIR/net.h.rst
-
-Legacy uAPI
-***********
-
-.. kernel-include:: $BUILDDIR/audio.h.rst
-
-.. kernel-include:: $BUILDDIR/video.h.rst
diff --git a/Documentation/userspace-api/media/dvb/intro.rst b/Documentation/userspace-api/media/dvb/intro.rst
index a935f3914e56..6784ae79657c 100644
--- a/Documentation/userspace-api/media/dvb/intro.rst
+++ b/Documentation/userspace-api/media/dvb/intro.rst
@@ -107,7 +107,7 @@ Audio and video decoder
a Systems on a Chip (SoC) integrated circuit.
It may also not be needed for certain usages (e.g. for data-only
- uses like “internet over satellite”).
+ uses like "internet over satellite").
:ref:`stb_components` shows a crude schematic of the control and data
flow between those components.
@@ -148,9 +148,9 @@ individual devices are called:
- ``/dev/dvb/adapterN/caM``,
-where ``N`` enumerates the Digital TV cards in a system starting from 0, and
+where ``N`` enumerates the Digital TV cards in a system starting from 0, and
``M`` enumerates the devices of each type within each adapter, starting
-from 0, too. We will omit the “``/dev/dvb/adapterN/``\ ” in the further
+from 0, too. We will omit the "``/dev/dvb/adapterN/``\ " in the further
discussion of these devices.
More details about the data structures and function calls of all the
diff --git a/Documentation/userspace-api/media/dvb/legacy_dvb_apis.rst b/Documentation/userspace-api/media/dvb/legacy_dvb_apis.rst
index 6104879d728a..b97d56ee543c 100644
--- a/Documentation/userspace-api/media/dvb/legacy_dvb_apis.rst
+++ b/Documentation/userspace-api/media/dvb/legacy_dvb_apis.rst
@@ -11,11 +11,6 @@ The APIs described here **should not** be used on new drivers or applications.
The DVBv3 frontend API has issues with new delivery systems, including
DVB-S2, DVB-T2, ISDB, etc.
-There's just one driver for a very legacy hardware using the Digital TV
-audio and video APIs. No modern drivers should use it. Instead, audio and
-video should be using the V4L2 and ALSA APIs, and the pipelines should
-be set via the Media Controller API.
-
.. attention::
The APIs described here doesn't necessarily reflect the current
@@ -28,5 +23,3 @@ be set via the Media Controller API.
:maxdepth: 1
frontend_legacy_dvbv3_api
- video
- audio
diff --git a/Documentation/userspace-api/media/fdl-appendix.rst b/Documentation/userspace-api/media/fdl-appendix.rst
index 683ebed87017..b1bc725b4ec7 100644
--- a/Documentation/userspace-api/media/fdl-appendix.rst
+++ b/Documentation/userspace-api/media/fdl-appendix.rst
@@ -13,14 +13,14 @@ GNU Free Documentation License
===========
The purpose of this License is to make a manual, textbook, or other
-written document “free” in the sense of freedom: to assure everyone the
+written document "free" in the sense of freedom: to assure everyone the
effective freedom to copy and redistribute it, with or without modifying
it, either commercially or noncommercially. Secondarily, this License
preserves for the author and publisher a way to get credit for their
work, while not being considered responsible for modifications made by
others.
-This License is a kind of “copyleft”, which means that derivative works
+This License is a kind of "copyleft", which means that derivative works
of the document must themselves be free in the same sense. It
complements the GNU General Public License, which is a copyleft license
designed for free software.
@@ -44,21 +44,21 @@ works whose purpose is instruction or reference.
This License applies to any manual or other work that contains a notice
placed by the copyright holder saying it can be distributed under the
-terms of this License. The “Document”, below, refers to any such manual
+terms of this License. The "Document", below, refers to any such manual
or work. Any member of the public is a licensee, and is addressed as
-“you”.
+"you".
.. _fdl-modified:
-A “Modified Version” of the Document means any work containing the
+A "Modified Version" of the Document means any work containing the
Document or a portion of it, either copied verbatim, or with
modifications and/or translated into another language.
.. _fdl-secondary:
-A “Secondary Section” is a named appendix or a front-matter section of
+A "Secondary Section" is a named appendix or a front-matter section of
the :ref:`Document <fdl-document>` that deals exclusively with the
relationship of the publishers or authors of the Document to the
Document's overall subject (or to related matters) and contains nothing
@@ -72,7 +72,7 @@ regarding them.
.. _fdl-invariant:
-The “Invariant Sections” are certain
+The "Invariant Sections" are certain
:ref:`Secondary Sections <fdl-secondary>` whose titles are designated,
as being those of Invariant Sections, in the notice that says that the
:ref:`Document <fdl-document>` is released under this License.
@@ -80,14 +80,14 @@ as being those of Invariant Sections, in the notice that says that the
.. _fdl-cover-texts:
-The “Cover Texts” are certain short passages of text that are listed, as
+The "Cover Texts" are certain short passages of text that are listed, as
Front-Cover Texts or Back-Cover Texts, in the notice that says that the
:ref:`Document <fdl-document>` is released under this License.
.. _fdl-transparent:
-A “Transparent” copy of the :ref:`Document <fdl-document>` means a
+A "Transparent" copy of the :ref:`Document <fdl-document>` means a
machine-readable copy, represented in a format whose specification is
available to the general public, whose contents can be viewed and edited
directly and straightforwardly with generic text editors or (for images
@@ -97,7 +97,7 @@ formatters or for automatic translation to a variety of formats suitable
for input to text formatters. A copy made in an otherwise Transparent
file format whose markup has been designed to thwart or discourage
subsequent modification by readers is not Transparent. A copy that is
-not “Transparent” is called “Opaque”.
+not "Transparent" is called "Opaque".
Examples of suitable formats for Transparent copies include plain ASCII
without markup, Texinfo input format, LaTeX input format, SGML or XML
@@ -111,10 +111,10 @@ word processors for output purposes only.
.. _fdl-title-page:
-The “Title Page” means, for a printed book, the title page itself, plus
+The "Title Page" means, for a printed book, the title page itself, plus
such following pages as are needed to hold, legibly, the material this
License requires to appear in the title page. For works in formats which
-do not have any title page as such, “Title Page” means the text near the
+do not have any title page as such, "Title Page" means the text near the
most prominent appearance of the work's title, preceding the beginning
of the body of the text.
@@ -242,11 +242,11 @@ Modified Version:
Include an unaltered copy of this License.
- **I.**
- Preserve the section entitled “History”, and its title, and add to it
+ Preserve the section entitled "History", and its title, and add to it
an item stating at least the title, year, new authors, and publisher
of the :ref:`Modified Version <fdl-modified>` as given on the
:ref:`Title Page <fdl-title-page>`. If there is no section entitled
- “History” in the :ref:`Document <fdl-document>`, create one stating
+ "History" in the :ref:`Document <fdl-document>`, create one stating
the title, year, authors, and publisher of the Document as given on
its Title Page, then add an item describing the Modified Version as
stated in the previous sentence.
@@ -256,13 +256,13 @@ Modified Version:
:ref:`Document <fdl-document>` for public access to a
:ref:`Transparent <fdl-transparent>` copy of the Document, and
likewise the network locations given in the Document for previous
- versions it was based on. These may be placed in the “History”
+ versions it was based on. These may be placed in the "History"
section. You may omit a network location for a work that was
published at least four years before the Document itself, or if the
original publisher of the version it refers to gives permission.
- **K.**
- In any section entitled “Acknowledgements” or “Dedications”, preserve
+ In any section entitled "Acknowledgements" or "Dedications", preserve
the section's title, and preserve in the section all the substance
and tone of each of the contributor acknowledgements and/or
dedications given therein.
@@ -274,11 +274,11 @@ Modified Version:
part of the section titles.
- **M.**
- Delete any section entitled “Endorsements”. Such a section may not be
+ Delete any section entitled "Endorsements". Such a section may not be
included in the :ref:`Modified Version <fdl-modified>`.
- **N.**
- Do not retitle any existing section as “Endorsements” or to conflict
+ Do not retitle any existing section as "Endorsements" or to conflict
in title with any :ref:`Invariant Section <fdl-invariant>`.
If the :ref:`Modified Version <fdl-modified>` includes new
@@ -290,7 +290,7 @@ of :ref:`Invariant Sections <fdl-invariant>` in the Modified Version's
license notice. These titles must be distinct from any other section
titles.
-You may add a section entitled “Endorsements”, provided it contains
+You may add a section entitled "Endorsements", provided it contains
nothing but endorsements of your
:ref:`Modified Version <fdl-modified>` by various parties--for
example, statements of peer review or that the text has been approved by
@@ -337,11 +337,11 @@ the original author or publisher of that section if known, or else a
unique number. Make the same adjustment to the section titles in the
list of Invariant Sections in the license notice of the combined work.
-In the combination, you must combine any sections entitled “History” in
-the various original documents, forming one section entitled “History”;
-likewise combine any sections entitled “Acknowledgements”, and any
-sections entitled “Dedications”. You must delete all sections entitled
-“Endorsements.”
+In the combination, you must combine any sections entitled "History" in
+the various original documents, forming one section entitled "History";
+likewise combine any sections entitled "Acknowledgements", and any
+sections entitled "Dedications". You must delete all sections entitled
+"Endorsements."
.. _fdl-section6:
@@ -372,7 +372,7 @@ with other separate and independent documents or works, in or on a
volume of a storage or distribution medium, does not as a whole count as
a :ref:`Modified Version <fdl-modified>` of the Document, provided no
compilation copyright is claimed for the compilation. Such a compilation
-is called an “aggregate”, and this License does not apply to the other
+is called an "aggregate", and this License does not apply to the other
self-contained works thus compiled with the Document , on account of
their being thus compiled, if they are not themselves derivative works
of the Document. If the :ref:`Cover Text <fdl-cover-texts>`
@@ -429,7 +429,7 @@ concerns. See
Each version of the License is given a distinguishing version number. If
the :ref:`Document <fdl-document>` specifies that a particular
-numbered version of this License “or any later version” applies to it,
+numbered version of this License "or any later version" applies to it,
you have the option of following the terms and conditions either of that
specified version or of any later version that has been published (not
as a draft) by the Free Software Foundation. If the Document does not
@@ -455,13 +455,13 @@ notices just after the title page:
being LIST THEIR TITLES, with the
:ref:`Front-Cover Texts <fdl-cover-texts>` being LIST, and with
the :ref:`Back-Cover Texts <fdl-cover-texts>` being LIST. A copy
- of the license is included in the section entitled “GNU Free
- Documentation License”.
+ of the license is included in the section entitled "GNU Free
+ Documentation License".
-If you have no :ref:`Invariant Sections <fdl-invariant>`, write “with
-no Invariant Sections” instead of saying which ones are invariant. If
-you have no :ref:`Front-Cover Texts <fdl-cover-texts>`, write “no
-Front-Cover Texts” instead of “Front-Cover Texts being LIST”; likewise
+If you have no :ref:`Invariant Sections <fdl-invariant>`, write "with
+no Invariant Sections" instead of saying which ones are invariant. If
+you have no :ref:`Front-Cover Texts <fdl-cover-texts>`, write "no
+Front-Cover Texts" instead of "Front-Cover Texts being LIST"; likewise
for :ref:`Back-Cover Texts <fdl-cover-texts>`.
If your document contains nontrivial examples of program code, we
diff --git a/Documentation/userspace-api/media/glossary.rst b/Documentation/userspace-api/media/glossary.rst
index cb165d7176b7..96a360edbf3b 100644
--- a/Documentation/userspace-api/media/glossary.rst
+++ b/Documentation/userspace-api/media/glossary.rst
@@ -116,7 +116,7 @@ Glossary
- :term:`RC API`; and
- :term:`V4L2 API`.
- See :doc:`index`.
+ See Documentation/userspace-api/media/index.rst.
MC API
**Media Controller API**
diff --git a/Documentation/userspace-api/media/index.rst b/Documentation/userspace-api/media/index.rst
index 7f42f83b9f59..d839904be085 100644
--- a/Documentation/userspace-api/media/index.rst
+++ b/Documentation/userspace-api/media/index.rst
@@ -11,12 +11,14 @@ used by media devices.
Please see:
-- :doc:`/admin-guide/media/index`
- for usage information about media subsystem and supported drivers;
+Documentation/admin-guide/media/index.rst
-- :doc:`/driver-api/media/index`
- for driver development information and Kernel APIs used by
- media devices;
+ - for usage information about media subsystem and supported drivers;
+
+Documentation/driver-api/media/index.rst
+
+ - for driver development information and Kernel APIs used by
+ media devices;
.. only:: html
diff --git a/Documentation/userspace-api/media/v4l/biblio.rst b/Documentation/userspace-api/media/v4l/biblio.rst
index 64d241daf63c..7b8e6738ff9e 100644
--- a/Documentation/userspace-api/media/v4l/biblio.rst
+++ b/Documentation/userspace-api/media/v4l/biblio.rst
@@ -51,7 +51,7 @@ ISO 13818-1
===========
-:title: ITU-T Rec. H.222.0 | ISO/IEC 13818-1 "Information technology — Generic coding of moving pictures and associated audio information: Systems"
+:title: ITU-T Rec. H.222.0 | ISO/IEC 13818-1 "Information technology --- Generic coding of moving pictures and associated audio information: Systems"
:author: International Telecommunication Union (http://www.itu.ch), International Organisation for Standardisation (http://www.iso.ch)
@@ -61,7 +61,7 @@ ISO 13818-2
===========
-:title: ITU-T Rec. H.262 | ISO/IEC 13818-2 "Information technology — Generic coding of moving pictures and associated audio information: Video"
+:title: ITU-T Rec. H.262 | ISO/IEC 13818-2 "Information technology --- Generic coding of moving pictures and associated audio information: Video"
:author: International Telecommunication Union (http://www.itu.ch), International Organisation for Standardisation (http://www.iso.ch)
@@ -150,7 +150,7 @@ ITU-T.81
========
-:title: ITU-T Recommendation T.81 "Information Technology — Digital Compression and Coding of Continous-Tone Still Images — Requirements and Guidelines"
+:title: ITU-T Recommendation T.81 "Information Technology --- Digital Compression and Coding of Continous-Tone Still Images --- Requirements and Guidelines"
:author: International Telecommunication Union (http://www.itu.int)
@@ -310,7 +310,7 @@ ISO 12232:2006
==============
-:title: Photography — Digital still cameras — Determination of exposure index, ISO speed ratings, standard output sensitivity, and recommended exposure index
+:title: Photography --- Digital still cameras --- Determination of exposure index, ISO speed ratings, standard output sensitivity, and recommended exposure index
:author: International Organization for Standardization (http://www.iso.org)
diff --git a/Documentation/userspace-api/media/v4l/dev-decoder.rst b/Documentation/userspace-api/media/v4l/dev-decoder.rst
index 3d4138a4ba69..5b9b83feeceb 100644
--- a/Documentation/userspace-api/media/v4l/dev-decoder.rst
+++ b/Documentation/userspace-api/media/v4l/dev-decoder.rst
@@ -38,7 +38,7 @@ Conventions and Notations Used in This Document
6. i = [a..b]: sequence of integers from a to b, inclusive, i.e. i =
[0..2]: i = 0, 1, 2.
-7. Given an ``OUTPUT`` buffer A, then A’ represents a buffer on the ``CAPTURE``
+7. Given an ``OUTPUT`` buffer A, then A' represents a buffer on the ``CAPTURE``
queue containing data that resulted from processing buffer A.
.. _decoder-glossary:
@@ -288,7 +288,7 @@ Initialization
Changing the ``OUTPUT`` format may change the currently set ``CAPTURE``
format. How the new ``CAPTURE`` format is determined is up to the decoder
- and the client must ensure it matches its needs afterwards.
+ and the client must ensure it matches its needs afterwards.
2. Allocate source (bytestream) buffers via :c:func:`VIDIOC_REQBUFS` on
``OUTPUT``.
@@ -874,7 +874,7 @@ it may be affected as per normal decoder operation.
any of the following results on the ``CAPTURE`` queue is allowed:
- {A’, B’, G’, H’}, {A’, G’, H’}, {G’, H’}.
+ {A', B', G', H'}, {A', G', H'}, {G', H'}.
To determine the CAPTURE buffer containing the first decoded frame after the
seek, the client may observe the timestamps to match the CAPTURE and OUTPUT
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
index 3fc04daa9ffb..72f5e85b4f34 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
@@ -1244,3 +1244,217 @@ FWHT Flags
* - __u8
- ``padding[3]``
- Applications and drivers must set this to zero.
+
+.. _v4l2-codec-stateless-mpeg2:
+
+``V4L2_CID_STATELESS_MPEG2_SEQUENCE (struct)``
+ Specifies the sequence parameters (as extracted from the bitstream) for the
+ associated MPEG-2 slice data. This includes fields matching the syntax
+ elements from the sequence header and sequence extension parts of the
+ bitstream as specified by :ref:`mpeg2part2`.
+
+.. c:type:: v4l2_ctrl_mpeg2_sequence
+
+.. raw:: latex
+
+ \small
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.4cm}|p{6.5cm}|p{9.4cm}|
+
+.. flat-table:: struct v4l2_ctrl_mpeg2_sequence
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u16
+ - ``horizontal_size``
+ - The width of the displayable part of the frame's luminance component.
+ * - __u16
+ - ``vertical_size``
+ - The height of the displayable part of the frame's luminance component.
+ * - __u32
+ - ``vbv_buffer_size``
+ - Used to calculate the required size of the video buffering verifier,
+ defined (in bits) as: 16 * 1024 * vbv_buffer_size.
+ * - __u16
+ - ``profile_and_level_indication``
+ - The current profile and level indication as extracted from the
+ bitstream.
+ * - __u8
+ - ``chroma_format``
+ - The chrominance sub-sampling format (1: 4:2:0, 2: 4:2:2, 3: 4:4:4).
+ * - __u8
+ - ``flags``
+ - See :ref:`MPEG-2 Sequence Flags <mpeg2_sequence_flags>`.
+
+.. _mpeg2_sequence_flags:
+
+``MPEG-2 Sequence Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE``
+ - 0x01
+ - Indication that all the frames for the sequence are progressive instead
+ of interlaced.
+
+.. raw:: latex
+
+ \normalsize
+
+``V4L2_CID_STATELESS_MPEG2_PICTURE (struct)``
+ Specifies the picture parameters (as extracted from the bitstream) for the
+ associated MPEG-2 slice data. This includes fields matching the syntax
+ elements from the picture header and picture coding extension parts of the
+ bitstream as specified by :ref:`mpeg2part2`.
+
+.. c:type:: v4l2_ctrl_mpeg2_picture
+
+.. raw:: latex
+
+ \small
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.0cm}|p{5.6cm}|p{10.7cm}|
+
+.. flat-table:: struct v4l2_ctrl_mpeg2_picture
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u64
+ - ``backward_ref_ts``
+ - Timestamp of the V4L2 capture buffer to use as backward reference, used
+ with B-coded and P-coded frames. The timestamp refers to the
+ ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
+ :c:func:`v4l2_timeval_to_ns()` function to convert the struct
+ :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
+ * - __u64
+ - ``forward_ref_ts``
+ - Timestamp for the V4L2 capture buffer to use as forward reference, used
+ with B-coded frames. The timestamp refers to the ``timestamp`` field in
+ struct :c:type:`v4l2_buffer`. Use the :c:func:`v4l2_timeval_to_ns()`
+ function to convert the struct :c:type:`timeval` in struct
+ :c:type:`v4l2_buffer` to a __u64.
+ * - __u32
+ - ``flags``
+ - See :ref:`MPEG-2 Picture Flags <mpeg2_picture_flags>`.
+ * - __u8
+ - ``f_code[2][2]``
+ - Motion vector codes.
+ * - __u8
+ - ``picture_coding_type``
+ - Picture coding type for the frame covered by the current slice
+ (V4L2_MPEG2_PIC_CODING_TYPE_I, V4L2_MPEG2_PIC_CODING_TYPE_P or
+ V4L2_MPEG2_PIC_CODING_TYPE_B).
+ * - __u8
+ - ``picture_structure``
+ - Picture structure (1: interlaced top field, 2: interlaced bottom field,
+ 3: progressive frame).
+ * - __u8
+ - ``intra_dc_precision``
+ - Precision of Discrete Cosine transform (0: 8 bits precision,
+ 1: 9 bits precision, 2: 10 bits precision, 3: 11 bits precision).
+ * - __u8
+ - ``reserved[5]``
+ - Applications and drivers must set this to zero.
+
+.. _mpeg2_picture_flags:
+
+``MPEG-2 Picture Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST``
+ - 0x00000001
+ - If set and it's an interlaced stream, top field is output first.
+ * - ``V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT``
+ - 0x00000002
+ - If set only frame-DCT and frame prediction are used.
+ * - ``V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV``
+ - 0x00000004
+ - If set motion vectors are coded for intra macroblocks.
+ * - ``V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE``
+ - 0x00000008
+ - This flag affects the inverse quantization process.
+ * - ``V4L2_MPEG2_PIC_FLAG_INTRA_VLC``
+ - 0x00000010
+ - This flag affects the decoding of transform coefficient data.
+ * - ``V4L2_MPEG2_PIC_FLAG_ALT_SCAN``
+ - 0x00000020
+ - This flag affects the decoding of transform coefficient data.
+ * - ``V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST``
+ - 0x00000040
+ - This flag affects the decoding process of progressive frames.
+ * - ``V4L2_MPEG2_PIC_FLAG_PROGRESSIVE``
+ - 0x00000080
+ - Indicates whether the current frame is progressive.
+
+.. raw:: latex
+
+ \normalsize
+
+``V4L2_CID_STATELESS_MPEG2_QUANTISATION (struct)``
+ Specifies quantisation matrices, in zigzag scanning order, for the
+ associated MPEG-2 slice data. This control is initialized by the kernel
+ to the matrices default values. If a bitstream transmits a user-defined
+ quantisation matrices load, applications are expected to use this control.
+ Applications are also expected to set the control loading the default
+ values, if the quantisation matrices need to be reset, for instance on a
+ sequence header. This process is specified by section 6.3.7.
+ "Quant matrix extension" of the specification.
+
+.. c:type:: v4l2_ctrl_mpeg2_quantisation
+
+.. tabularcolumns:: |p{0.8cm}|p{8.0cm}|p{8.5cm}|
+
+.. cssclass:: longtable
+
+.. raw:: latex
+
+ \small
+
+.. flat-table:: struct v4l2_ctrl_mpeg2_quantisation
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``intra_quantiser_matrix[64]``
+ - The quantisation matrix coefficients for intra-coded frames, in zigzag
+ scanning order. It is relevant for both luma and chroma components,
+ although it can be superseded by the chroma-specific matrix for
+ non-4:2:0 YUV formats.
+ * - __u8
+ - ``non_intra_quantiser_matrix[64]``
+ - The quantisation matrix coefficients for non-intra-coded frames, in
+ zigzag scanning order. It is relevant for both luma and chroma
+ components, although it can be superseded by the chroma-specific matrix
+ for non-4:2:0 YUV formats.
+ * - __u8
+ - ``chroma_intra_quantiser_matrix[64]``
+ - The quantisation matrix coefficients for the chominance component of
+ intra-coded frames, in zigzag scanning order. Only relevant for
+ non-4:2:0 YUV formats.
+ * - __u8
+ - ``chroma_non_intra_quantiser_matrix[64]``
+ - The quantisation matrix coefficients for the chrominance component of
+ non-intra-coded frames, in zigzag scanning order. Only relevant for
+ non-4:2:0 YUV formats.
+
+.. raw:: latex
+
+ \normalsize
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
index b0de4e6e7ebd..8c6e2a11ed95 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
@@ -1606,223 +1606,6 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR (integer)``
Indicates bit rate (bps) for hierarchical coding layer 6 for H264 encoder.
-.. _v4l2-mpeg-mpeg2:
-
-``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (struct)``
- Specifies the slice parameters (as extracted from the bitstream) for the
- associated MPEG-2 slice data. This includes the necessary parameters for
- configuring a stateless hardware decoding pipeline for MPEG-2.
- The bitstream parameters are defined according to :ref:`mpeg2part2`.
-
- .. note::
-
- This compound control is not yet part of the public kernel API and
- it is expected to change.
-
-.. c:type:: v4l2_ctrl_mpeg2_slice_params
-
-.. tabularcolumns:: |p{5.6cm}|p{4.6cm}|p{7.1cm}|
-
-.. cssclass:: longtable
-
-.. flat-table:: struct v4l2_ctrl_mpeg2_slice_params
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
- * - __u32
- - ``bit_size``
- - Size (in bits) of the current slice data.
- * - __u32
- - ``data_bit_offset``
- - Offset (in bits) to the video data in the current slice data.
- * - struct :c:type:`v4l2_mpeg2_sequence`
- - ``sequence``
- - Structure with MPEG-2 sequence metadata, merging relevant fields from
- the sequence header and sequence extension parts of the bitstream.
- * - struct :c:type:`v4l2_mpeg2_picture`
- - ``picture``
- - Structure with MPEG-2 picture metadata, merging relevant fields from
- the picture header and picture coding extension parts of the bitstream.
- * - __u64
- - ``backward_ref_ts``
- - Timestamp of the V4L2 capture buffer to use as backward reference, used
- with B-coded and P-coded frames. The timestamp refers to the
- ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
- :c:func:`v4l2_timeval_to_ns()` function to convert the struct
- :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
- * - __u64
- - ``forward_ref_ts``
- - Timestamp for the V4L2 capture buffer to use as forward reference, used
- with B-coded frames. The timestamp refers to the ``timestamp`` field in
- struct :c:type:`v4l2_buffer`. Use the :c:func:`v4l2_timeval_to_ns()`
- function to convert the struct :c:type:`timeval` in struct
- :c:type:`v4l2_buffer` to a __u64.
- * - __u32
- - ``quantiser_scale_code``
- - Code used to determine the quantization scale to use for the IDCT.
-
-.. c:type:: v4l2_mpeg2_sequence
-
-.. cssclass:: longtable
-
-.. tabularcolumns:: |p{1.4cm}|p{6.5cm}|p{9.4cm}|
-
-.. flat-table:: struct v4l2_mpeg2_sequence
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
- * - __u16
- - ``horizontal_size``
- - The width of the displayable part of the frame's luminance component.
- * - __u16
- - ``vertical_size``
- - The height of the displayable part of the frame's luminance component.
- * - __u32
- - ``vbv_buffer_size``
- - Used to calculate the required size of the video buffering verifier,
- defined (in bits) as: 16 * 1024 * vbv_buffer_size.
- * - __u16
- - ``profile_and_level_indication``
- - The current profile and level indication as extracted from the
- bitstream.
- * - __u8
- - ``progressive_sequence``
- - Indication that all the frames for the sequence are progressive instead
- of interlaced.
- * - __u8
- - ``chroma_format``
- - The chrominance sub-sampling format (1: 4:2:0, 2: 4:2:2, 3: 4:4:4).
-
-.. c:type:: v4l2_mpeg2_picture
-
-.. raw:: latex
-
- \small
-
-.. cssclass:: longtable
-
-.. tabularcolumns:: |p{1.0cm}|p{5.6cm}|p{10.7cm}|
-
-.. flat-table:: struct v4l2_mpeg2_picture
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
- * - __u8
- - ``picture_coding_type``
- - Picture coding type for the frame covered by the current slice
- (V4L2_MPEG2_PICTURE_CODING_TYPE_I, V4L2_MPEG2_PICTURE_CODING_TYPE_P or
- V4L2_MPEG2_PICTURE_CODING_TYPE_B).
- * - __u8
- - ``f_code[2][2]``
- - Motion vector codes.
- * - __u8
- - ``intra_dc_precision``
- - Precision of Discrete Cosine transform (0: 8 bits precision,
- 1: 9 bits precision, 2: 10 bits precision, 3: 11 bits precision).
- * - __u8
- - ``picture_structure``
- - Picture structure (1: interlaced top field, 2: interlaced bottom field,
- 3: progressive frame).
- * - __u8
- - ``top_field_first``
- - If set to 1 and interlaced stream, top field is output first.
- * - __u8
- - ``frame_pred_frame_dct``
- - If set to 1, only frame-DCT and frame prediction are used.
- * - __u8
- - ``concealment_motion_vectors``
- - If set to 1, motion vectors are coded for intra macroblocks.
- * - __u8
- - ``q_scale_type``
- - This flag affects the inverse quantization process.
- * - __u8
- - ``intra_vlc_format``
- - This flag affects the decoding of transform coefficient data.
- * - __u8
- - ``alternate_scan``
- - This flag affects the decoding of transform coefficient data.
- * - __u8
- - ``repeat_first_field``
- - This flag affects the decoding process of progressive frames.
- * - __u16
- - ``progressive_frame``
- - Indicates whether the current frame is progressive.
-
-.. raw:: latex
-
- \normalsize
-
-``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (struct)``
- Specifies quantization matrices (as extracted from the bitstream) for the
- associated MPEG-2 slice data.
-
- .. note::
-
- This compound control is not yet part of the public kernel API and
- it is expected to change.
-
-.. c:type:: v4l2_ctrl_mpeg2_quantization
-
-.. tabularcolumns:: |p{0.8cm}|p{8.0cm}|p{8.5cm}|
-
-.. cssclass:: longtable
-
-.. raw:: latex
-
- \small
-
-.. flat-table:: struct v4l2_ctrl_mpeg2_quantization
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
- * - __u8
- - ``load_intra_quantiser_matrix``
- - One bit to indicate whether to load the ``intra_quantiser_matrix`` data.
- * - __u8
- - ``load_non_intra_quantiser_matrix``
- - One bit to indicate whether to load the ``non_intra_quantiser_matrix``
- data.
- * - __u8
- - ``load_chroma_intra_quantiser_matrix``
- - One bit to indicate whether to load the
- ``chroma_intra_quantiser_matrix`` data, only relevant for non-4:2:0 YUV
- formats.
- * - __u8
- - ``load_chroma_non_intra_quantiser_matrix``
- - One bit to indicate whether to load the
- ``chroma_non_intra_quantiser_matrix`` data, only relevant for non-4:2:0
- YUV formats.
- * - __u8
- - ``intra_quantiser_matrix[64]``
- - The quantization matrix coefficients for intra-coded frames, in zigzag
- scanning order. It is relevant for both luma and chroma components,
- although it can be superseded by the chroma-specific matrix for
- non-4:2:0 YUV formats.
- * - __u8
- - ``non_intra_quantiser_matrix[64]``
- - The quantization matrix coefficients for non-intra-coded frames, in
- zigzag scanning order. It is relevant for both luma and chroma
- components, although it can be superseded by the chroma-specific matrix
- for non-4:2:0 YUV formats.
- * - __u8
- - ``chroma_intra_quantiser_matrix[64]``
- - The quantization matrix coefficients for the chominance component of
- intra-coded frames, in zigzag scanning order. Only relevant for
- non-4:2:0 YUV formats.
- * - __u8
- - ``chroma_non_intra_quantiser_matrix[64]``
- - The quantization matrix coefficients for the chrominance component of
- non-intra-coded frames, in zigzag scanning order. Only relevant for
- non-4:2:0 YUV formats.
-
-.. raw:: latex
-
- \normalsize
-
``V4L2_CID_FWHT_I_FRAME_QP (integer)``
Quantization parameter for an I frame for FWHT. Valid range: from 1
to 31.
@@ -2924,6 +2707,9 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
* - __u8
- ``chroma_format_idc``
-
+ * - __u8
+ - ``sps_max_sub_layers_minus1``
+ -
* - __u64
- ``flags``
- See :ref:`Sequence Parameter Set Flags <hevc_sps_flags>`
@@ -3000,6 +2786,12 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
* - __u8
- ``num_extra_slice_header_bits``
-
+ * - __u8
+ - ``num_ref_idx_l0_default_active_minus1``
+ - Specifies the inferred value of num_ref_idx_l0_active_minus1
+ * - __u8
+ - ``num_ref_idx_l1_default_active_minus1``
+ - Specifies the inferred value of num_ref_idx_l1_active_minus1
* - __s8
- ``init_qp_minus26``
-
@@ -3053,7 +2845,7 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
:stub-columns: 0
:widths: 1 1 2
- * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT``
+ * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED``
- 0x00000001
-
* - ``V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT``
@@ -3110,6 +2902,14 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
* - ``V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT``
- 0x00040000
-
+ * - ``V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT``
+ - 0x00080000
+ - Specifies the presence of deblocking filter control syntax elements in
+ the PPS
+ * - ``V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING``
+ - 0x00100000
+ - Specifies that tile column boundaries and likewise tile row boundaries
+ are distributed uniformly across the picture
.. raw:: latex
@@ -3201,31 +3001,14 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
- ``pic_struct``
-
* - __u8
- - ``num_active_dpb_entries``
- - The number of entries in ``dpb``.
- * - __u8
- ``ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
- The list of L0 reference elements as indices in the DPB.
* - __u8
- ``ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
- The list of L1 reference elements as indices in the DPB.
* - __u8
- - ``num_rps_poc_st_curr_before``
- - The number of reference pictures in the short-term set that come before
- the current frame.
- * - __u8
- - ``num_rps_poc_st_curr_after``
- - The number of reference pictures in the short-term set that come after
- the current frame.
- * - __u8
- - ``num_rps_poc_lt_curr``
- - The number of reference pictures in the long-term set.
- * - __u8
- - ``padding[7]``
+ - ``padding``
- Applications and drivers must set this to zero.
- * - struct :c:type:`v4l2_hevc_dpb_entry`
- - ``dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
- - The decoded picture buffer, for meta-data about reference frames.
* - struct :c:type:`v4l2_hevc_pred_weight_table`
- ``pred_weight_table``
- The prediction weight coefficients for inter-picture prediction.
@@ -3277,6 +3060,9 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
* - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED``
- 0x00000100
-
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT``
+ - 0x00000200
+ -
.. raw:: latex
@@ -3478,3 +3264,78 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
encoding the next frame queued after setting this control.
This provides a bitmask which consists of bits [0, LTR_COUNT-1].
This is applicable to the H264 and HEVC encoders.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS (struct)``
+ Specifies various decode parameters, especially the references picture order
+ count (POC) for all the lists (short, long, before, current, after) and the
+ number of entries for each of them.
+ These parameters are defined according to :ref:`hevc`.
+ They are described in section 8.3 "Slice decoding process" of the
+ specification.
+
+.. c:type:: v4l2_ctrl_hevc_decode_params
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_hevc_decode_params
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __s32
+ - ``pic_order_cnt_val``
+ - PicOrderCntVal as described in section 8.3.1 "Decoding process
+ for picture order count" of the specification.
+ * - __u8
+ - ``num_active_dpb_entries``
+ - The number of entries in ``dpb``.
+ * - struct :c:type:`v4l2_hevc_dpb_entry`
+ - ``dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - The decoded picture buffer, for meta-data about reference frames.
+ * - __u8
+ - ``num_poc_st_curr_before``
+ - The number of reference pictures in the short-term set that come before
+ the current frame.
+ * - __u8
+ - ``num_poc_st_curr_after``
+ - The number of reference pictures in the short-term set that come after
+ the current frame.
+ * - __u8
+ - ``num_poc_lt_curr``
+ - The number of reference pictures in the long-term set.
+ * - __u8
+ - ``poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - PocStCurrBefore as described in section 8.3.2 "Decoding process for reference
+ picture set.
+ * - __u8
+ - ``poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - PocStCurrAfter as described in section 8.3.2 "Decoding process for reference
+ picture set.
+ * - __u8
+ - ``poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - PocLtCurr as described in section 8.3.2 "Decoding process for reference
+ picture set.
+ * - __u64
+ - ``flags``
+ - See :ref:`Decode Parameters Flags <hevc_decode_params_flags>`
+
+.. _hevc_decode_params_flags:
+
+``Decode Parameters Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC``
+ - 0x00000001
+ -
+ * - ``V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC``
+ - 0x00000002
+ -
+ * - ``V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR``
+ - 0x00000004
+ -
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-compressed.rst b/Documentation/userspace-api/media/v4l/pixfmt-compressed.rst
index 6dba70da822b..0ede39907ee2 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-compressed.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-compressed.rst
@@ -112,12 +112,13 @@ Compressed Formats
- 'MG2S'
- MPEG-2 parsed slice data, as extracted from the MPEG-2 bitstream.
This format is adapted for stateless video decoders that implement a
- MPEG-2 pipeline (using the :ref:`mem2mem` and :ref:`media-request-api`).
+ MPEG-2 pipeline with the :ref:`stateless_decoder`.
Metadata associated with the frame to decode is required to be passed
- through the ``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS`` control and
- quantization matrices can optionally be specified through the
- ``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION`` control.
- See the :ref:`associated Codec Control IDs <v4l2-mpeg-mpeg2>`.
+ through the ``V4L2_CID_STATELESS_MPEG2_SEQUENCE`` and
+ ``V4L2_CID_STATELESS_MPEG2_PICTURE`` controls.
+ Quantisation matrices can optionally be specified through the
+ ``V4L2_CID_STATELESS_MPEG2_QUANTISATION`` control.
+ See the :ref:`associated Codec Control IDs <v4l2-codec-stateless-mpeg2>`.
Exactly one output and one capture buffer must be provided for use with
this pixel format. The output buffer must contain the appropriate number
of macroblocks to decode a full corresponding frame to the matching
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst b/Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst
index 5f33d35532ef..84d81dd7a7b5 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst
@@ -78,4 +78,4 @@ hardware and algorithm details.
Intel IPU3 ImgU uAPI data types
===============================
-.. kernel-doc:: drivers/staging/media/ipu3/include/intel-ipu3.h
+.. kernel-doc:: drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
diff --git a/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst b/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
index 3ba22983d21f..2d6bc8d94380 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
@@ -221,6 +221,18 @@ still cause this situation.
- ``p_vp8_frame``
- A pointer to a struct :c:type:`v4l2_ctrl_vp8_frame`. Valid if this control is
of type ``V4L2_CTRL_TYPE_VP8_FRAME``.
+ * - struct :c:type:`v4l2_ctrl_mpeg2_sequence` *
+ - ``p_mpeg2_sequence``
+ - A pointer to a struct :c:type:`v4l2_ctrl_mpeg2_sequence`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_MPEG2_SEQUENCE``.
+ * - struct :c:type:`v4l2_ctrl_mpeg2_picture` *
+ - ``p_mpeg2_picture``
+ - A pointer to a struct :c:type:`v4l2_ctrl_mpeg2_picture`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_MPEG2_PICTURE``.
+ * - struct :c:type:`v4l2_ctrl_mpeg2_quantisation` *
+ - ``p_mpeg2_quantisation``
+ - A pointer to a struct :c:type:`v4l2_ctrl_mpeg2_quantisation`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_MPEG2_QUANTISATION``.
* - struct :c:type:`v4l2_ctrl_hdr10_cll_info` *
- ``p_hdr10_cll``
- A pointer to a struct :c:type:`v4l2_ctrl_hdr10_cll_info`. Valid if this control is
diff --git a/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst b/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
index 8a285daedc6a..f9ecf6276129 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
@@ -417,18 +417,24 @@ See also the examples in :ref:`control`.
- any
- An unsigned 32-bit valued control ranging from minimum to maximum
inclusive. The step value indicates the increment between values.
- * - ``V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS``
+ * - ``V4L2_CTRL_TYPE_MPEG2_QUANTISATION``
- n/a
- n/a
- n/a
- - A struct :c:type:`v4l2_ctrl_mpeg2_slice_params`, containing MPEG-2
- slice parameters for stateless video decoders.
- * - ``V4L2_CTRL_TYPE_MPEG2_QUANTIZATION``
+ - A struct :c:type:`v4l2_ctrl_mpeg2_quantisation`, containing MPEG-2
+ quantisation matrices for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_MPEG2_SEQUENCE``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_mpeg2_sequence`, containing MPEG-2
+ sequence parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_MPEG2_PICTURE``
- n/a
- n/a
- n/a
- - A struct :c:type:`v4l2_ctrl_mpeg2_quantization`, containing MPEG-2
- quantization matrices for stateless video decoders.
+ - A struct :c:type:`v4l2_ctrl_mpeg2_picture`, containing MPEG-2
+ picture parameters for stateless video decoders.
* - ``V4L2_CTRL_TYPE_AREA``
- n/a
- n/a
@@ -495,6 +501,12 @@ See also the examples in :ref:`control`.
- n/a
- A struct :c:type:`v4l2_ctrl_vp8_frame`, containing VP8
frame parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_hevc_decode_params`, containing HEVC
+ decoding parameters for stateless video decoders.
.. raw:: latex
diff --git a/Documentation/userspace-api/media/video.h.rst.exceptions b/Documentation/userspace-api/media/video.h.rst.exceptions
deleted file mode 100644
index ea9de59ad8b7..000000000000
--- a/Documentation/userspace-api/media/video.h.rst.exceptions
+++ /dev/null
@@ -1,39 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-# Ignore header name
-ignore define _UAPI_DVBVIDEO_H_
-
-# This is a deprecated obscure API. Just ignore things we don't know
-ignore define VIDEO_CMD_PLAY
-ignore define VIDEO_CMD_STOP
-ignore define VIDEO_CMD_FREEZE
-ignore define VIDEO_CMD_CONTINUE
-ignore define VIDEO_CMD_FREEZE_TO_BLACK
-ignore define VIDEO_CMD_STOP_TO_BLACK
-ignore define VIDEO_CMD_STOP_IMMEDIATELY
-ignore define VIDEO_PLAY_FMT_NONE
-ignore define VIDEO_PLAY_FMT_GOP
-ignore define VIDEO_VSYNC_FIELD_UNKNOWN
-ignore define VIDEO_VSYNC_FIELD_ODD
-ignore define VIDEO_VSYNC_FIELD_EVEN
-ignore define VIDEO_VSYNC_FIELD_PROGRESSIVE
-ignore define VIDEO_EVENT_SIZE_CHANGED
-ignore define VIDEO_EVENT_FRAME_RATE_CHANGED
-ignore define VIDEO_EVENT_DECODER_STOPPED
-ignore define VIDEO_EVENT_VSYNC
-ignore define VIDEO_CAP_MPEG1
-ignore define VIDEO_CAP_MPEG2
-ignore define VIDEO_CAP_SYS
-ignore define VIDEO_CAP_PROG
-ignore define VIDEO_CAP_SPU
-ignore define VIDEO_CAP_NAVI
-ignore define VIDEO_CAP_CSS
-
-# some typedefs should point to struct/enums
-replace typedef video_format_t :c:type:`video_format`
-replace typedef video_system_t :c:type:`video_system`
-replace typedef video_displayformat_t :c:type:`video_displayformat`
-replace typedef video_size_t :c:type:`video_size`
-replace typedef video_stream_source_t :c:type:`video_stream_source`
-replace typedef video_play_state_t :c:type:`video_play_state`
-replace typedef video_navi_pack_t :c:type:`video_navi_pack`
diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
index f59940352faa..2217b56c2686 100644
--- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions
+++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
@@ -134,8 +134,9 @@ replace symbol V4L2_CTRL_TYPE_STRING :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_U16 :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_U32 :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_U8 :c:type:`v4l2_ctrl_type`
-replace symbol V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS :c:type:`v4l2_ctrl_type`
-replace symbol V4L2_CTRL_TYPE_MPEG2_QUANTIZATION :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_MPEG2_SEQUENCE :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_MPEG2_PICTURE :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_MPEG2_QUANTISATION :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_SPS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_PPS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_SCALING_MATRIX :c:type:`v4l2_ctrl_type`
diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst
index bd9165241b6c..d61219889e49 100644
--- a/Documentation/userspace-api/seccomp_filter.rst
+++ b/Documentation/userspace-api/seccomp_filter.rst
@@ -250,14 +250,26 @@ Users can read via ``ioctl(SECCOMP_IOCTL_NOTIF_RECV)`` (or ``poll()``) on a
seccomp notification fd to receive a ``struct seccomp_notif``, which contains
five members: the input length of the structure, a unique-per-filter ``id``,
the ``pid`` of the task which triggered this request (which may be 0 if the
-task is in a pid ns not visible from the listener's pid namespace), a ``flags``
-member which for now only has ``SECCOMP_NOTIF_FLAG_SIGNALED``, representing
-whether or not the notification is a result of a non-fatal signal, and the
-``data`` passed to seccomp. Userspace can then make a decision based on this
-information about what to do, and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a
-response, indicating what should be returned to userspace. The ``id`` member of
-``struct seccomp_notif_resp`` should be the same ``id`` as in ``struct
-seccomp_notif``.
+task is in a pid ns not visible from the listener's pid namespace). The
+notification also contains the ``data`` passed to seccomp, and a filters flag.
+The structure should be zeroed out prior to calling the ioctl.
+
+Userspace can then make a decision based on this information about what to do,
+and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be
+returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should
+be the same ``id`` as in ``struct seccomp_notif``.
+
+Userspace can also add file descriptors to the notifying process via
+``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of
+``struct seccomp_notif_addfd`` should be the same ``id`` as in
+``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags
+like O_EXEC on the file descriptor in the notifying process. If the supervisor
+wants to inject the file descriptor with a specific number, the
+``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to
+the specific number to use. If that file descriptor is already open in the
+notifying process it will be replaced. The supervisor can also add an FD, and
+respond atomically by using the ``SECCOMP_ADDFD_FLAG_SEND`` flag and the return
+value will be the injected file descriptor number.
It is worth noting that ``struct seccomp_data`` contains the values of register
arguments to the syscall, but does not contain pointers to memory. The task's
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 7fcb2fd38f42..c7b165ca70b6 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -688,9 +688,14 @@ MSRs that have been set successfully.
Defines the vcpu responses to the cpuid instruction. Applications
should use the KVM_SET_CPUID2 ioctl if available.
-Note, when this IOCTL fails, KVM gives no guarantees that previous valid CPUID
-configuration (if there is) is not corrupted. Userspace can get a copy of the
-resulting CPUID configuration through KVM_GET_CPUID2 in case.
+Caveat emptor:
+ - If this IOCTL fails, KVM gives no guarantees that previous valid CPUID
+ configuration (if there is) is not corrupted. Userspace can get a copy
+ of the resulting CPUID configuration through KVM_GET_CPUID2 in case.
+ - Using KVM_SET_CPUID{,2} after KVM_RUN, i.e. changing the guest vCPU model
+ after running the guest, may cause guest instability.
+ - Using heterogeneous CPUID configurations, modulo APIC IDs, topology, etc...
+ may cause guest instability.
::
@@ -5034,6 +5039,260 @@ see KVM_XEN_VCPU_SET_ATTR above.
The KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST type may not be used
with the KVM_XEN_VCPU_GET_ATTR ioctl.
+4.130 KVM_ARM_MTE_COPY_TAGS
+---------------------------
+
+:Capability: KVM_CAP_ARM_MTE
+:Architectures: arm64
+:Type: vm ioctl
+:Parameters: struct kvm_arm_copy_mte_tags
+:Returns: number of bytes copied, < 0 on error (-EINVAL for incorrect
+ arguments, -EFAULT if memory cannot be accessed).
+
+::
+
+ struct kvm_arm_copy_mte_tags {
+ __u64 guest_ipa;
+ __u64 length;
+ void __user *addr;
+ __u64 flags;
+ __u64 reserved[2];
+ };
+
+Copies Memory Tagging Extension (MTE) tags to/from guest tag memory. The
+``guest_ipa`` and ``length`` fields must be ``PAGE_SIZE`` aligned. The ``addr``
+field must point to a buffer which the tags will be copied to or from.
+
+``flags`` specifies the direction of copy, either ``KVM_ARM_TAGS_TO_GUEST`` or
+``KVM_ARM_TAGS_FROM_GUEST``.
+
+The size of the buffer to store the tags is ``(length / 16)`` bytes
+(granules in MTE are 16 bytes long). Each byte contains a single tag
+value. This matches the format of ``PTRACE_PEEKMTETAGS`` and
+``PTRACE_POKEMTETAGS``.
+
+If an error occurs before any data is copied then a negative error code is
+returned. If some tags have been copied before an error occurs then the number
+of bytes successfully copied is returned. If the call completes successfully
+then ``length`` is returned.
+
+4.131 KVM_GET_SREGS2
+------------------
+
+:Capability: KVM_CAP_SREGS2
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_sregs2 (out)
+:Returns: 0 on success, -1 on error
+
+Reads special registers from the vcpu.
+This ioctl (when supported) replaces the KVM_GET_SREGS.
+
+::
+
+struct kvm_sregs2 {
+ /* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 flags;
+ __u64 pdptrs[4];
+};
+
+flags values for ``kvm_sregs2``:
+
+``KVM_SREGS2_FLAGS_PDPTRS_VALID``
+
+ Indicates thats the struct contain valid PDPTR values.
+
+
+4.132 KVM_SET_SREGS2
+------------------
+
+:Capability: KVM_CAP_SREGS2
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_sregs2 (in)
+:Returns: 0 on success, -1 on error
+
+Writes special registers into the vcpu.
+See KVM_GET_SREGS2 for the data structures.
+This ioctl (when supported) replaces the KVM_SET_SREGS.
+
+4.133 KVM_GET_STATS_FD
+----------------------
+
+:Capability: KVM_CAP_STATS_BINARY_FD
+:Architectures: all
+:Type: vm ioctl, vcpu ioctl
+:Parameters: none
+:Returns: statistics file descriptor on success, < 0 on error
+
+Errors:
+
+ ====== ======================================================
+ ENOMEM if the fd could not be created due to lack of memory
+ EMFILE if the number of opened files exceeds the limit
+ ====== ======================================================
+
+The returned file descriptor can be used to read VM/vCPU statistics data in
+binary format. The data in the file descriptor consists of four blocks
+organized as follows:
+
++-------------+
+| Header |
++-------------+
+| id string |
++-------------+
+| Descriptors |
++-------------+
+| Stats Data |
++-------------+
+
+Apart from the header starting at offset 0, please be aware that it is
+not guaranteed that the four blocks are adjacent or in the above order;
+the offsets of the id, descriptors and data blocks are found in the
+header. However, all four blocks are aligned to 64 bit offsets in the
+file and they do not overlap.
+
+All blocks except the data block are immutable. Userspace can read them
+only one time after retrieving the file descriptor, and then use ``pread`` or
+``lseek`` to read the statistics repeatedly.
+
+All data is in system endianness.
+
+The format of the header is as follows::
+
+ struct kvm_stats_header {
+ __u32 flags;
+ __u32 name_size;
+ __u32 num_desc;
+ __u32 id_offset;
+ __u32 desc_offset;
+ __u32 data_offset;
+ };
+
+The ``flags`` field is not used at the moment. It is always read as 0.
+
+The ``name_size`` field is the size (in byte) of the statistics name string
+(including trailing '\0') which is contained in the "id string" block and
+appended at the end of every descriptor.
+
+The ``num_desc`` field is the number of descriptors that are included in the
+descriptor block. (The actual number of values in the data block may be
+larger, since each descriptor may comprise more than one value).
+
+The ``id_offset`` field is the offset of the id string from the start of the
+file indicated by the file descriptor. It is a multiple of 8.
+
+The ``desc_offset`` field is the offset of the Descriptors block from the start
+of the file indicated by the file descriptor. It is a multiple of 8.
+
+The ``data_offset`` field is the offset of the Stats Data block from the start
+of the file indicated by the file descriptor. It is a multiple of 8.
+
+The id string block contains a string which identifies the file descriptor on
+which KVM_GET_STATS_FD was invoked. The size of the block, including the
+trailing ``'\0'``, is indicated by the ``name_size`` field in the header.
+
+The descriptors block is only needed to be read once for the lifetime of the
+file descriptor contains a sequence of ``struct kvm_stats_desc``, each followed
+by a string of size ``name_size``.
+
+ #define KVM_STATS_TYPE_SHIFT 0
+ #define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT)
+ #define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
+ #define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
+ #define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
+
+ #define KVM_STATS_UNIT_SHIFT 4
+ #define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
+ #define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
+ #define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
+ #define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
+ #define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
+
+ #define KVM_STATS_BASE_SHIFT 8
+ #define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
+ #define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
+ #define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
+
+ struct kvm_stats_desc {
+ __u32 flags;
+ __s16 exponent;
+ __u16 size;
+ __u32 offset;
+ __u32 unused;
+ char name[];
+ };
+
+The ``flags`` field contains the type and unit of the statistics data described
+by this descriptor. Its endianness is CPU native.
+The following flags are supported:
+
+Bits 0-3 of ``flags`` encode the type:
+ * ``KVM_STATS_TYPE_CUMULATIVE``
+ The statistics data is cumulative. The value of data can only be increased.
+ Most of the counters used in KVM are of this type.
+ The corresponding ``size`` field for this type is always 1.
+ All cumulative statistics data are read/write.
+ * ``KVM_STATS_TYPE_INSTANT``
+ The statistics data is instantaneous. Its value can be increased or
+ decreased. This type is usually used as a measurement of some resources,
+ like the number of dirty pages, the number of large pages, etc.
+ All instant statistics are read only.
+ The corresponding ``size`` field for this type is always 1.
+ * ``KVM_STATS_TYPE_PEAK``
+ The statistics data is peak. The value of data can only be increased, and
+ represents a peak value for a measurement, for example the maximum number
+ of items in a hash table bucket, the longest time waited and so on.
+ The corresponding ``size`` field for this type is always 1.
+
+Bits 4-7 of ``flags`` encode the unit:
+ * ``KVM_STATS_UNIT_NONE``
+ There is no unit for the value of statistics data. This usually means that
+ the value is a simple counter of an event.
+ * ``KVM_STATS_UNIT_BYTES``
+ It indicates that the statistics data is used to measure memory size, in the
+ unit of Byte, KiByte, MiByte, GiByte, etc. The unit of the data is
+ determined by the ``exponent`` field in the descriptor.
+ * ``KVM_STATS_UNIT_SECONDS``
+ It indicates that the statistics data is used to measure time or latency.
+ * ``KVM_STATS_UNIT_CYCLES``
+ It indicates that the statistics data is used to measure CPU clock cycles.
+
+Bits 8-11 of ``flags``, together with ``exponent``, encode the scale of the
+unit:
+ * ``KVM_STATS_BASE_POW10``
+ The scale is based on power of 10. It is used for measurement of time and
+ CPU clock cycles. For example, an exponent of -9 can be used with
+ ``KVM_STATS_UNIT_SECONDS`` to express that the unit is nanoseconds.
+ * ``KVM_STATS_BASE_POW2``
+ The scale is based on power of 2. It is used for measurement of memory size.
+ For example, an exponent of 20 can be used with ``KVM_STATS_UNIT_BYTES`` to
+ express that the unit is MiB.
+
+The ``size`` field is the number of values of this statistics data. Its
+value is usually 1 for most of simple statistics. 1 means it contains an
+unsigned 64bit data.
+
+The ``offset`` field is the offset from the start of Data Block to the start of
+the corresponding statistics data.
+
+The ``unused`` field is reserved for future support for other types of
+statistics data, like log/linear histogram. Its value is always 0 for the types
+defined above.
+
+The ``name`` field is the name string of the statistics data. The name string
+starts at the end of ``struct kvm_stats_desc``. The maximum length including
+the trailing ``'\0'``, is indicated by ``name_size`` in the header.
+
+The Stats Data block contains an array of 64-bit values in the same order
+as the descriptors in Descriptors block.
+
5. The kvm_run structure
========================
@@ -6323,6 +6582,7 @@ KVM_RUN_BUS_LOCK flag is used to distinguish between them.
This capability can be used to check / enable 2nd DAWR feature provided
by POWER10 processor.
+
7.24 KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
-------------------------------------
@@ -6360,7 +6620,67 @@ system fingerprint. To prevent userspace from circumventing such restrictions
by running an enclave in a VM, KVM prevents access to privileged attributes by
default.
-See Documentation/x86/sgx/2.Kernel-internals.rst for more details.
+See Documentation/x86/sgx.rst for more details.
+
+7.26 KVM_CAP_PPC_RPT_INVALIDATE
+-------------------------------
+
+:Capability: KVM_CAP_PPC_RPT_INVALIDATE
+:Architectures: ppc
+:Type: vm
+
+This capability indicates that the kernel is capable of handling
+H_RPT_INVALIDATE hcall.
+
+In order to enable the use of H_RPT_INVALIDATE in the guest,
+user space might have to advertise it for the guest. For example,
+IBM pSeries (sPAPR) guest starts using it if "hcall-rpt-invalidate" is
+present in the "ibm,hypertas-functions" device-tree property.
+
+This capability is enabled for hypervisors on platforms like POWER9
+that support radix MMU.
+
+7.27 KVM_CAP_EXIT_ON_EMULATION_FAILURE
+--------------------------------------
+
+:Architectures: x86
+:Parameters: args[0] whether the feature should be enabled or not
+
+When this capability is enabled, an emulation failure will result in an exit
+to userspace with KVM_INTERNAL_ERROR (except when the emulator was invoked
+to handle a VMware backdoor instruction). Furthermore, KVM will now provide up
+to 15 instruction bytes for any exit to userspace resulting from an emulation
+failure. When these exits to userspace occur use the emulation_failure struct
+instead of the internal struct. They both have the same layout, but the
+emulation_failure struct matches the content better. It also explicitly
+defines the 'flags' field which is used to describe the fields in the struct
+that are valid (ie: if KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES is
+set in the 'flags' field then both 'insn_size' and 'insn_bytes' have valid data
+in them.)
+
+7.28 KVM_CAP_ARM_MTE
+--------------------
+
+:Architectures: arm64
+:Parameters: none
+
+This capability indicates that KVM (and the hardware) supports exposing the
+Memory Tagging Extensions (MTE) to the guest. It must also be enabled by the
+VMM before creating any VCPUs to allow the guest access. Note that MTE is only
+available to a guest running in AArch64 mode and enabling this capability will
+cause attempts to create AArch32 VCPUs to fail.
+
+When enabled the guest is able to access tags associated with any memory given
+to the guest. KVM will ensure that the tags are maintained during swap or
+hibernation of the host; however the VMM needs to manually save/restore the
+tags as appropriate if the VM is migrated.
+
+When this capability is enabled all memory in memslots must be mapped as
+not-shareable (no MAP_SHARED), attempts to create a memslot with a
+MAP_SHARED mmap will result in an -EINVAL return.
+
+When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to
+perform a bulk copy of tags to/from the guest.
8. Other capabilities.
======================
@@ -6891,3 +7211,33 @@ This capability is always enabled.
This capability indicates that the KVM virtual PTP service is
supported in the host. A VMM can check whether the service is
available to the guest on migration.
+
+8.33 KVM_CAP_HYPERV_ENFORCE_CPUID
+-----------------------------
+
+Architectures: x86
+
+When enabled, KVM will disable emulated Hyper-V features provided to the
+guest according to the bits Hyper-V CPUID feature leaves. Otherwise, all
+currently implmented Hyper-V features are provided unconditionally when
+Hyper-V identification is set in the HYPERV_CPUID_INTERFACE (0x40000001)
+leaf.
+
+8.34 KVM_CAP_EXIT_HYPERCALL
+---------------------------
+
+:Capability: KVM_CAP_EXIT_HYPERCALL
+:Architectures: x86
+:Type: vm
+
+This capability, if enabled, will cause KVM to exit to userspace
+with KVM_EXIT_HYPERCALL exit reason to process some hypercalls.
+
+Calling KVM_CHECK_EXTENSION for this capability will return a bitmask
+of hypercalls that can be configured to exit to userspace.
+Right now, the only such hypercall is KVM_HC_MAP_GPA_RANGE.
+
+The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset
+of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace
+the hypercalls whose corresponding bit is in the argument, and return
+ENOSYS for the others.
diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst
index cf62162d4be2..bda3e3e737d7 100644
--- a/Documentation/virt/kvm/cpuid.rst
+++ b/Documentation/virt/kvm/cpuid.rst
@@ -96,6 +96,13 @@ KVM_FEATURE_MSI_EXT_DEST_ID 15 guest checks this feature bit
before using extended destination
ID bits in MSI address bits 11-5.
+KVM_FEATURE_HC_MAP_GPA_RANGE 16 guest checks this feature bit before
+ using the map gpa range hypercall
+ to notify the page state change
+
+KVM_FEATURE_MIGRATION_CONTROL 17 guest checks this feature bit before
+ using MSR_KVM_MIGRATION_CONTROL
+
KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 host will warn if no guest-side
per-cpu warps are expected in
kvmclock
diff --git a/Documentation/virt/kvm/hypercalls.rst b/Documentation/virt/kvm/hypercalls.rst
index ed4fddd364ea..e56fa8b9cfca 100644
--- a/Documentation/virt/kvm/hypercalls.rst
+++ b/Documentation/virt/kvm/hypercalls.rst
@@ -169,3 +169,24 @@ a0: destination APIC ID
:Usage example: When sending a call-function IPI-many to vCPUs, yield if
any of the IPI target vCPUs was preempted.
+
+8. KVM_HC_MAP_GPA_RANGE
+-------------------------
+:Architecture: x86
+:Status: active
+:Purpose: Request KVM to map a GPA range with the specified attributes.
+
+a0: the guest physical address of the start page
+a1: the number of (4kb) pages (must be contiguous in GPA space)
+a2: attributes
+
+ Where 'attributes' :
+ * bits 3:0 - preferred page size encoding 0 = 4kb, 1 = 2mb, 2 = 1gb, etc...
+ * bit 4 - plaintext = 0, encrypted = 1
+ * bits 63:5 - reserved (must be zero)
+
+**Implementation note**: this hypercall is implemented in userspace via
+the KVM_CAP_EXIT_HYPERCALL capability. Userspace must enable that capability
+before advertising KVM_FEATURE_HC_MAP_GPA_RANGE in the guest CPUID. In
+addition, if the guest supports KVM_FEATURE_MIGRATION_CONTROL, userspace
+must also set up an MSR filter to process writes to MSR_KVM_MIGRATION_CONTROL.
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 1fc860c007a3..35eca377543d 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -16,6 +16,11 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.
+- Unlike kvm->slots_lock, kvm->slots_arch_lock is released before
+ synchronize_srcu(&kvm->srcu). Therefore kvm->slots_arch_lock
+ can be taken inside a kvm->srcu read-side critical section,
+ while kvm->slots_lock cannot.
+
On x86:
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/mmu.rst
index 5bfe28b0728e..f60f5488e121 100644
--- a/Documentation/virt/kvm/mmu.rst
+++ b/Documentation/virt/kvm/mmu.rst
@@ -171,8 +171,8 @@ Shadow pages contain the following information:
shadow pages) so role.quadrant takes values in the range 0..3. Each
quadrant maps 1GB virtual address space.
role.access:
- Inherited guest access permissions in the form uwx. Note execute
- permission is positive, not negative.
+ Inherited guest access permissions from the parent ptes in the form uwx.
+ Note execute permission is positive, not negative.
role.invalid:
The page is invalid and should not be used. It is a root page that is
currently pinned (by a cpu hardware register pointing to it); once it is
@@ -180,8 +180,8 @@ Shadow pages contain the following information:
role.gpte_is_8_bytes:
Reflects the size of the guest PTE for which the page is valid, i.e. '1'
if 64-bit gptes are in use, '0' if 32-bit gptes are in use.
- role.nxe:
- Contains the value of efer.nxe for which the page is valid.
+ role.efer_nx:
+ Contains the value of efer.nx for which the page is valid.
role.cr0_wp:
Contains the value of cr0.wp for which the page is valid.
role.smep_andnot_wp:
@@ -192,9 +192,6 @@ Shadow pages contain the following information:
Contains the value of cr4.smap && !cr0.wp for which the page is valid
(pages for which this is true are different from other pages; see the
treatment of cr0.wp=0 below).
- role.ept_sp:
- This is a virtual flag to denote a shadowed nested EPT page. ept_sp
- is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination.
role.smm:
Is 1 if the page is valid in system management mode. This field
determines which of the kvm_memslots array was used to build this
diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst
index e37a14c323d2..9315fc385fb0 100644
--- a/Documentation/virt/kvm/msr.rst
+++ b/Documentation/virt/kvm/msr.rst
@@ -376,3 +376,16 @@ data:
write '1' to bit 0 of the MSR, this causes the host to re-scan its queue
and check if there are more notifications pending. The MSR is available
if KVM_FEATURE_ASYNC_PF_INT is present in CPUID.
+
+MSR_KVM_MIGRATION_CONTROL:
+ 0x4b564d08
+
+data:
+ This MSR is available if KVM_FEATURE_MIGRATION_CONTROL is present in
+ CPUID. Bit 0 represents whether live migration of the guest is allowed.
+
+ When a guest is started, bit 0 will be 0 if the guest has encrypted
+ memory and 1 if the guest does not have encrypted memory. If the
+ guest is communicating page encryption status to the host using the
+ ``KVM_HC_MAP_GPA_RANGE`` hypercall, it can set bit 0 in this MSR to
+ allow live migration of the guest.
diff --git a/Documentation/virt/kvm/s390-pv-boot.rst b/Documentation/virt/kvm/s390-pv-boot.rst
index ad1f7866c001..73a6083cb5e7 100644
--- a/Documentation/virt/kvm/s390-pv-boot.rst
+++ b/Documentation/virt/kvm/s390-pv-boot.rst
@@ -10,7 +10,7 @@ The memory of Protected Virtual Machines (PVMs) is not accessible to
I/O or the hypervisor. In those cases where the hypervisor needs to
access the memory of a PVM, that memory must be made accessible.
Memory made accessible to the hypervisor will be encrypted. See
-:doc:`s390-pv` for details."
+Documentation/virt/kvm/s390-pv.rst for details."
On IPL (boot) a small plaintext bootloader is started, which provides
information about the encrypted components and necessary metadata to
diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst
index 5feb3706a7ae..ad2915ef7020 100644
--- a/Documentation/virt/kvm/vcpu-requests.rst
+++ b/Documentation/virt/kvm/vcpu-requests.rst
@@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD
necessary to inform each VCPU to completely refresh the tables. This
request is used for that.
-KVM_REQ_PENDING_TIMER
+KVM_REQ_UNBLOCK
- This request may be made from a timer handler run on the host on behalf
- of a VCPU. It informs the VCPU thread to inject a timer interrupt.
+ This request informs the vCPU to exit kvm_vcpu_block. It is used for
+ example from timer handlers that run on the host on behalf of a vCPU,
+ or in order to update the interrupt routing and ensure that assigned
+ devices will wake up the vCPU.
KVM_REQ_UNHALT
@@ -302,6 +304,6 @@ VCPU returns from the call.
References
==========
-.. [atomic-ops] Documentation/core-api/atomic_ops.rst
+.. [atomic-ops] Documentation/atomic_bitops.txt and Documentation/atomic_t.txt
.. [memory-barriers] Documentation/memory-barriers.txt
.. [lwn-mb] https://lwn.net/Articles/573436/
diff --git a/Documentation/vm/memory-model.rst b/Documentation/vm/memory-model.rst
index ce398a7dc6cd..30e8fbed6914 100644
--- a/Documentation/vm/memory-model.rst
+++ b/Documentation/vm/memory-model.rst
@@ -14,15 +14,11 @@ for the CPU. Then there could be several contiguous ranges at
completely distinct addresses. And, don't forget about NUMA, where
different memory banks are attached to different CPUs.
-Linux abstracts this diversity using one of the three memory models:
-FLATMEM, DISCONTIGMEM and SPARSEMEM. Each architecture defines what
+Linux abstracts this diversity using one of the two memory models:
+FLATMEM and SPARSEMEM. Each architecture defines what
memory models it supports, what the default memory model is and
whether it is possible to manually override that default.
-.. note::
- At time of this writing, DISCONTIGMEM is considered deprecated,
- although it is still in use by several architectures.
-
All the memory models track the status of physical page frames using
struct page arranged in one or more arrays.
@@ -63,43 +59,6 @@ straightforward: `PFN - ARCH_PFN_OFFSET` is an index to the
The `ARCH_PFN_OFFSET` defines the first page frame number for
systems with physical memory starting at address different from 0.
-DISCONTIGMEM
-============
-
-The DISCONTIGMEM model treats the physical memory as a collection of
-`nodes` similarly to how Linux NUMA support does. For each node Linux
-constructs an independent memory management subsystem represented by
-`struct pglist_data` (or `pg_data_t` for short). Among other
-things, `pg_data_t` holds the `node_mem_map` array that maps
-physical pages belonging to that node. The `node_start_pfn` field of
-`pg_data_t` is the number of the first page frame belonging to that
-node.
-
-The architecture setup code should call :c:func:`free_area_init_node` for
-each node in the system to initialize the `pg_data_t` object and its
-`node_mem_map`.
-
-Every `node_mem_map` behaves exactly as FLATMEM's `mem_map` -
-every physical page frame in a node has a `struct page` entry in the
-`node_mem_map` array. When DISCONTIGMEM is enabled, a portion of the
-`flags` field of the `struct page` encodes the node number of the
-node hosting that page.
-
-The conversion between a PFN and the `struct page` in the
-DISCONTIGMEM model became slightly more complex as it has to determine
-which node hosts the physical page and which `pg_data_t` object
-holds the `struct page`.
-
-Architectures that support DISCONTIGMEM provide :c:func:`pfn_to_nid`
-to convert PFN to the node number. The opposite conversion helper
-:c:func:`page_to_nid` is generic as it uses the node number encoded in
-page->flags.
-
-Once the node number is known, the PFN can be used to index
-appropriate `node_mem_map` array to access the `struct page` and
-the offset of the `struct page` from the `node_mem_map` plus
-`node_start_pfn` is the PFN of that page.
-
SPARSEMEM
=========
diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
index 03f294a638bd..d3028554b1e9 100644
--- a/Documentation/vm/slub.rst
+++ b/Documentation/vm/slub.rst
@@ -181,7 +181,7 @@ SLUB Debug output
Here is a sample of slub debug output::
====================================================================
- BUG kmalloc-8: Redzone overwritten
+ BUG kmalloc-8: Right Redzone overwritten
--------------------------------------------------------------------
INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
- Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
- Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
- Redzone 0xc90f6d28: 00 cc cc cc .
- Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+ Object (0xc90f6d20): 31 30 31 39 2e 30 30 35 1019.005
+ Redzone (0xc90f6d28): 00 cc cc cc .
+ Padding (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
[<c010523d>] dump_trace+0x63/0x1eb
[<c01053df>] show_trace_log_lvl+0x1a/0x2f
diff --git a/Documentation/vm/zswap.rst b/Documentation/vm/zswap.rst
index d8d9fa4a1f0d..8edb8d578caf 100644
--- a/Documentation/vm/zswap.rst
+++ b/Documentation/vm/zswap.rst
@@ -10,7 +10,7 @@ Overview
Zswap is a lightweight compressed cache for swap pages. It takes pages that are
in the process of being swapped out and attempts to compress them into a
dynamically allocated RAM-based memory pool. zswap basically trades CPU cycles
-for potentially reduced swap I/O.  This trade-off can also result in a
+for potentially reduced swap I/O. This trade-off can also result in a
significant performance improvement if reads from the compressed cache are
faster than reads from a swap device.
@@ -26,7 +26,7 @@ faster than reads from a swap device.
performance impact of swapping.
* Overcommitted guests that share a common I/O resource can
dramatically reduce their swap I/O pressure, avoiding heavy handed I/O
- throttling by the hypervisor. This allows more work to get done with less
+ throttling by the hypervisor. This allows more work to get done with less
impact to the guest workload and guests sharing the I/O subsystem
* Users with SSDs as swap devices can extend the life of the device by
drastically reducing life-shortening writes.
diff --git a/Documentation/x86/boot.rst b/Documentation/x86/boot.rst
index fc844913dece..894a19897005 100644
--- a/Documentation/x86/boot.rst
+++ b/Documentation/x86/boot.rst
@@ -1343,7 +1343,7 @@ follow::
In addition to read/modify/write the setup header of the struct
boot_params as that of 16-bit boot protocol, the boot loader should
also fill the additional fields of the struct boot_params as
-described in chapter :doc:`zero-page`.
+described in chapter Documentation/x86/zero-page.rst.
After setting up the struct boot_params, the boot loader can load the
32/64-bit kernel in the same way as that of 16-bit boot protocol.
@@ -1379,7 +1379,7 @@ can be calculated as follows::
In addition to read/modify/write the setup header of the struct
boot_params as that of 16-bit boot protocol, the boot loader should
also fill the additional fields of the struct boot_params as described
-in chapter :doc:`zero-page`.
+in chapter Documentation/x86/zero-page.rst.
After setting up the struct boot_params, the boot loader can load
64-bit kernel in the same way as that of 16-bit boot protocol, but
diff --git a/Documentation/x86/buslock.rst b/Documentation/x86/buslock.rst
new file mode 100644
index 000000000000..7c051e714943
--- /dev/null
+++ b/Documentation/x86/buslock.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: <isonum.txt>
+
+===============================
+Bus lock detection and handling
+===============================
+
+:Copyright: |copy| 2021 Intel Corporation
+:Authors: - Fenghua Yu <fenghua.yu@intel.com>
+ - Tony Luck <tony.luck@intel.com>
+
+Problem
+=======
+
+A split lock is any atomic operation whose operand crosses two cache lines.
+Since the operand spans two cache lines and the operation must be atomic,
+the system locks the bus while the CPU accesses the two cache lines.
+
+A bus lock is acquired through either split locked access to writeback (WB)
+memory or any locked access to non-WB memory. This is typically thousands of
+cycles slower than an atomic operation within a cache line. It also disrupts
+performance on other cores and brings the whole system to its knees.
+
+Detection
+=========
+
+Intel processors may support either or both of the following hardware
+mechanisms to detect split locks and bus locks.
+
+#AC exception for split lock detection
+--------------------------------------
+
+Beginning with the Tremont Atom CPU split lock operations may raise an
+Alignment Check (#AC) exception when a split lock operation is attemped.
+
+#DB exception for bus lock detection
+------------------------------------
+
+Some CPUs have the ability to notify the kernel by an #DB trap after a user
+instruction acquires a bus lock and is executed. This allows the kernel to
+terminate the application or to enforce throttling.
+
+Software handling
+=================
+
+The kernel #AC and #DB handlers handle bus lock based on the kernel
+parameter "split_lock_detect". Here is a summary of different options:
+
++------------------+----------------------------+-----------------------+
+|split_lock_detect=|#AC for split lock |#DB for bus lock |
++------------------+----------------------------+-----------------------+
+|off |Do nothing |Do nothing |
++------------------+----------------------------+-----------------------+
+|warn |Kernel OOPs |Warn once per task and |
+|(default) |Warn once per task and |and continues to run. |
+| |disable future checking | |
+| |When both features are | |
+| |supported, warn in #AC | |
++------------------+----------------------------+-----------------------+
+|fatal |Kernel OOPs |Send SIGBUS to user. |
+| |Send SIGBUS to user | |
+| |When both features are | |
+| |supported, fatal in #AC | |
++------------------+----------------------------+-----------------------+
+|ratelimit:N |Do nothing |Limit bus lock rate to |
+|(0 < N <= 1000) | |N bus locks per second |
+| | |system wide and warn on|
+| | |bus locks. |
++------------------+----------------------------+-----------------------+
+
+Usages
+======
+
+Detecting and handling bus lock may find usages in various areas:
+
+It is critical for real time system designers who build consolidated real
+time systems. These systems run hard real time code on some cores and run
+"untrusted" user processes on other cores. The hard real time cannot afford
+to have any bus lock from the untrusted processes to hurt real time
+performance. To date the designers have been unable to deploy these
+solutions as they have no way to prevent the "untrusted" user code from
+generating split lock and bus lock to block the hard real time code to
+access memory during bus locking.
+
+It's also useful for general computing to prevent guests or user
+applications from slowing down the overall system by executing instructions
+with bus lock.
+
+
+Guidance
+========
+off
+---
+
+Disable checking for split lock and bus lock. This option can be useful if
+there are legacy applications that trigger these events at a low rate so
+that mitigation is not needed.
+
+warn
+----
+
+A warning is emitted when a bus lock is detected which allows to identify
+the offending application. This is the default behavior.
+
+fatal
+-----
+
+In this case, the bus lock is not tolerated and the process is killed.
+
+ratelimit
+---------
+
+A system wide bus lock rate limit N is specified where 0 < N <= 1000. This
+allows a bus lock rate up to N bus locks per second. When the bus lock rate
+is exceeded then any task which is caught via the buslock #DB exception is
+throttled by enforced sleeps until the rate goes under the limit again.
+
+This is an effective mitigation in cases where a minimal impact can be
+tolerated, but an eventual Denial of Service attack has to be prevented. It
+allows to identify the offending processes and analyze whether they are
+malicious or just badly written.
+
+Selecting a rate limit of 1000 allows the bus to be locked for up to about
+seven million cycles each second (assuming 7000 cycles for each bus
+lock). On a 2 GHz processor that would be about 0.35% system slowdown.
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
index 4693e192b447..0004f5d2283e 100644
--- a/Documentation/x86/index.rst
+++ b/Documentation/x86/index.rst
@@ -29,6 +29,7 @@ x86-specific Documentation
microcode
resctrl
tsx_async_abort
+ buslock
usb-legacy-support
i386/index
x86_64/index
diff --git a/Documentation/x86/mtrr.rst b/Documentation/x86/mtrr.rst
index c5b695d75349..9f0b1851771a 100644
--- a/Documentation/x86/mtrr.rst
+++ b/Documentation/x86/mtrr.rst
@@ -28,7 +28,7 @@ are aligned with platform MTRR setup. If MTRRs are only set up by the platform
firmware code though and the OS does not make any specific MTRR mapping
requests mtrr_type_lookup() should always return MTRR_TYPE_INVALID.
-For details refer to :doc:`pat`.
+For details refer to Documentation/x86/pat.rst.
.. tip::
On Intel P6 family processors (Pentium Pro, Pentium II and later)
diff --git a/Documentation/x86/x86_64/boot-options.rst b/Documentation/x86/x86_64/boot-options.rst
index 324cefff92e7..5f62b3b86357 100644
--- a/Documentation/x86/x86_64/boot-options.rst
+++ b/Documentation/x86/x86_64/boot-options.rst
@@ -247,16 +247,11 @@ Multiple x86-64 PCI-DMA mapping implementations exist, for example:
Kernel boot message: "PCI-DMA: Using software bounce buffering
for IO (SWIOTLB)"
- 4. <arch/x86_64/pci-calgary.c> : IBM Calgary hardware IOMMU. Used in IBM
- pSeries and xSeries servers. This hardware IOMMU supports DMA address
- mapping with memory protection, etc.
- Kernel boot message: "PCI-DMA: Using Calgary IOMMU"
-
::
iommu=[<size>][,noagp][,off][,force][,noforce]
[,memaper[=<order>]][,merge][,fullflush][,nomerge]
- [,noaperture][,calgary]
+ [,noaperture]
General iommu options:
@@ -295,8 +290,6 @@ iommu options only relevant to the AMD GART hardware IOMMU:
Don't initialize the AGP driver and use full aperture.
panic
Always panic when IOMMU overflows.
- calgary
- Use the Calgary IOMMU if it is available
iommu options only relevant to the software bounce buffering (SWIOTLB) IOMMU
implementation:
@@ -307,28 +300,6 @@ implementation:
force
Force all IO through the software TLB.
-Settings for the IBM Calgary hardware IOMMU currently found in IBM
-pSeries and xSeries machines
-
- calgary=[64k,128k,256k,512k,1M,2M,4M,8M]
- Set the size of each PCI slot's translation table when using the
- Calgary IOMMU. This is the size of the translation table itself
- in main memory. The smallest table, 64k, covers an IO space of
- 32MB; the largest, 8MB table, can cover an IO space of 4GB.
- Normally the kernel will make the right choice by itself.
- calgary=[translate_empty_slots]
- Enable translation even on slots that have no devices attached to
- them, in case a device will be hotplugged in the future.
- calgary=[disable=<PCI bus number>]
- Disable translation on a given PHB. For
- example, the built-in graphics adapter resides on the first bridge
- (PCI bus number 0); if translation (isolation) is enabled on this
- bridge, X servers that access the hardware directly from user
- space might stop working. Use this option if you have devices that
- are accessed from userspace directly on some PCI host bridge.
- panic
- Always panic when IOMMU overflows
-
Miscellaneous
=============
diff --git a/MAINTAINERS b/MAINTAINERS
index 81e1edeceae4..88449b7a4c95 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -973,7 +973,7 @@ F: drivers/net/ethernet/amd/xgbe/
AMD SENSOR FUSION HUB DRIVER
M: Nehal Shah <nehal-bakulchandra.shah@amd.com>
-M: Sandeep Singh <sandeep.singh@amd.com>
+M: Basavaraj Natikar <basavaraj.natikar@amd.com>
L: linux-input@vger.kernel.org
S: Maintained
F: Documentation/hid/amd-sfh*
@@ -1811,12 +1811,13 @@ F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
F: arch/arm/mach-gemini/
+F: drivers/crypto/gemini/
F: drivers/net/ethernet/cortina/
F: drivers/pinctrl/pinctrl-gemini.c
F: drivers/rtc/rtc-ftrtc010.c
ARM/CZ.NIC TURRIS SUPPORT
-M: Marek Behun <kabel@kernel.org>
+M: Marek Behún <kabel@kernel.org>
S: Maintained
W: https://www.turris.cz/
F: Documentation/ABI/testing/debugfs-moxtet
@@ -1972,6 +1973,7 @@ F: Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt
F: Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
F: arch/arm/mach-ixp4xx/
F: drivers/clocksource/timer-ixp4xx.c
+F: drivers/crypto/ixp4xx_crypto.c
F: drivers/gpio/gpio-ixp4xx.c
F: drivers/irqchip/irq-ixp4xx.c
F: include/linux/irqchip/irq-ixp4xx.h
@@ -3877,6 +3879,7 @@ L: linux-btrfs@vger.kernel.org
S: Maintained
W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
+C: irc://irc.libera.chat/btrfs
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
F: Documentation/filesystems/btrfs.rst
F: fs/btrfs/
@@ -4138,6 +4141,14 @@ S: Odd Fixes
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
+CBS/ETF/TAPRIO QDISCS
+M: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+S: Maintained
+L: netdev@vger.kernel.org
+F: net/sched/sch_cbs.c
+F: net/sched/sch_etf.c
+F: net/sched/sch_taprio.c
+
CC2520 IEEE-802.15.4 RADIO DRIVER
M: Varka Bhadram <varkabhadram@gmail.com>
L: linux-wpan@vger.kernel.org
@@ -4436,6 +4447,18 @@ F: include/linux/compiler-clang.h
F: scripts/clang-tools/
K: \b(?i:clang|llvm)\b
+CLANG CONTROL FLOW INTEGRITY SUPPORT
+M: Sami Tolvanen <samitolvanen@google.com>
+M: Kees Cook <keescook@chromium.org>
+R: Nathan Chancellor <nathan@kernel.org>
+R: Nick Desaulniers <ndesaulniers@google.com>
+L: clang-built-linux@googlegroups.com
+S: Supported
+B: https://github.com/ClangBuiltLinux/linux/issues
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/clang/features
+F: include/linux/cfi.h
+F: kernel/cfi.c
+
CLEANCACHE API
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: linux-kernel@vger.kernel.org
@@ -4601,6 +4624,12 @@ S: Supported
F: drivers/video/console/
F: include/linux/console*
+CONTEXT TRACKING
+M: Frederic Weisbecker <frederic@kernel.org>
+S: Maintained
+F: kernel/context_tracking.c
+F: include/linux/context_tracking*
+
CONTROL GROUP (CGROUP)
M: Tejun Heo <tj@kernel.org>
M: Zefan Li <lizefan.x@bytedance.com>
@@ -5170,7 +5199,14 @@ DELL WMI NOTIFICATIONS DRIVER
M: Matthew Garrett <mjg59@srcf.ucam.org>
M: Pali Rohár <pali@kernel.org>
S: Maintained
-F: drivers/platform/x86/dell/dell-wmi.c
+F: drivers/platform/x86/dell/dell-wmi-base.c
+
+DELL WMI HARDWARE PRIVACY SUPPORT
+M: Perry Yuan <Perry.Yuan@dell.com>
+L: Dell.Client.Kernel@dell.com
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/dell/dell-wmi-privacy.c
DELTA ST MEDIA DRIVER
M: Hugues Fruchet <hugues.fruchet@foss.st.com>
@@ -5180,6 +5216,13 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/platform/sti/delta
+DELTA DPS920AB PSU DRIVER
+M: Robert Marko <robert.marko@sartura.hr>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/dps920ab.rst
+F: drivers/hwmon/pmbus/dps920ab.c
+
DENALI NAND DRIVER
L: linux-mtd@lists.infradead.org
S: Orphan
@@ -5569,7 +5612,6 @@ F: drivers/soc/fsl/dpio
DPAA2 ETHERNET DRIVER
M: Ioana Ciornei <ioana.ciornei@nxp.com>
-M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst
@@ -6437,10 +6479,11 @@ F: Documentation/filesystems/ecryptfs.rst
F: fs/ecryptfs/
EDAC-AMD64
-M: Borislav Petkov <bp@alien8.de>
+M: Yazen Ghannam <yazen.ghannam@amd.com>
L: linux-edac@vger.kernel.org
-S: Maintained
+S: Supported
F: drivers/edac/amd64_edac*
+F: drivers/edac/mce_amd*
EDAC-ARMADA
M: Jan Luebbe <jlu@pengutronix.de>
@@ -6763,7 +6806,7 @@ F: include/video/s1d13xxxfb.h
EROFS FILE SYSTEM
M: Gao Xiang <xiang@kernel.org>
-M: Chao Yu <yuchao0@huawei.com>
+M: Chao Yu <chao@kernel.org>
L: linux-erofs@lists.ozlabs.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
@@ -6804,6 +6847,8 @@ F: Documentation/devicetree/bindings/net/mdio*
F: Documentation/devicetree/bindings/net/qca,ar803x.yaml
F: Documentation/networking/phy.rst
F: drivers/net/mdio/
+F: drivers/net/mdio/acpi_mdio.c
+F: drivers/net/mdio/fwnode_mdio.c
F: drivers/net/mdio/of_mdio.c
F: drivers/net/pcs/
F: drivers/net/phy/
@@ -6938,6 +6983,7 @@ F: net/core/failover.c
FANOTIFY
M: Jan Kara <jack@suse.cz>
R: Amir Goldstein <amir73il@gmail.com>
+R: Matthew Bobrowski <repnop@google.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/notify/fanotify/
@@ -7157,7 +7203,7 @@ F: include/video/
FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
M: Horia Geantă <horia.geanta@nxp.com>
-M: Aymen Sghaier <aymen.sghaier@nxp.com>
+M: Pankaj Gupta <pankaj.gupta@nxp.com>
L: linux-crypto@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -7345,7 +7391,6 @@ F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h
FREESCALE SOC SOUND DRIVERS
-M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <festevam@gmail.com>
@@ -7548,6 +7593,12 @@ M: Kieran Bingham <kbingham@kernel.org>
S: Supported
F: scripts/gdb/
+GEMINI CRYPTO DRIVER
+M: Corentin Labbe <clabbe@baylibre.com>
+L: linux-crypto@vger.kernel.org
+S: Maintained
+F: drivers/crypto/gemini/
+
GEMTEK FM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -8763,22 +8814,6 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-icy.c
-IDE SUBSYSTEM
-M: "David S. Miller" <davem@davemloft.net>
-L: linux-ide@vger.kernel.org
-S: Maintained
-Q: http://patchwork.ozlabs.org/project/linux-ide/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide.git
-F: Documentation/ide/
-F: drivers/ide/
-F: include/linux/ide.h
-
-IDE/ATAPI DRIVERS
-L: linux-ide@vger.kernel.org
-S: Orphan
-F: Documentation/cdrom/ide-cd.rst
-F: drivers/ide/ide-cd*
-
IDEAPAD LAPTOP EXTRAS DRIVER
M: Ike Panhc <ike.pan@canonical.com>
L: platform-driver-x86@vger.kernel.org
@@ -9130,6 +9165,7 @@ F: Documentation/networking/device_drivers/ethernet/intel/
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
+F: include/linux/net/intel/iidc.h
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
M: Maik Broemme <mbroemme@libmpq.org>
@@ -9235,6 +9271,12 @@ F: Documentation/admin-guide/media/ipu3_rcb.svg
F: Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst
F: drivers/staging/media/ipu3/
+INTEL IXP4XX CRYPTO SUPPORT
+M: Corentin Labbe <clabbe@baylibre.com>
+L: linux-crypto@vger.kernel.org
+S: Maintained
+F: drivers/crypto/ixp4xx_crypto.c
+
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
M: Krzysztof Halasa <khalasa@piap.pl>
S: Maintained
@@ -9378,6 +9420,11 @@ S: Maintained
F: arch/x86/include/asm/intel_scu_ipc.h
F: drivers/platform/x86/intel_scu_*
+INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER
+M: Daniel Scally <djrscally@gmail.com>
+S: Maintained
+F: drivers/platform/x86/intel/int3472/
+
INTEL SPEED SELECT TECHNOLOGY
M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -9398,7 +9445,7 @@ F: include/linux/firmware/intel/stratix10-smc.h
F: include/linux/firmware/intel/stratix10-svc-client.h
INTEL TELEMETRY DRIVER
-M: Rajneesh Bhardwaj <rajneesh.bhardwaj@linux.intel.com>
+M: Rajneesh Bhardwaj <irenic.rajneesh@gmail.com>
M: "David E. Box" <david.e.box@linux.intel.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
@@ -9443,6 +9490,13 @@ L: Dell.Client.Kernel@dell.com
S: Maintained
F: drivers/platform/x86/intel-wmi-thunderbolt.c
+INTEL WWAN IOSM DRIVER
+M: M Chetan Kumar <m.chetan.kumar@intel.com>
+M: Intel Corporation <linuxwwan@intel.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/wwan/iosm/
+
INTEL(R) TRACE HUB
M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
S: Supported
@@ -9986,6 +10040,8 @@ F: arch/arm64/include/asm/kvm*
F: arch/arm64/include/uapi/asm/kvm*
F: arch/arm64/kvm/
F: include/kvm/arm_*
+F: tools/testing/selftests/kvm/*/aarch64/
+F: tools/testing/selftests/kvm/aarch64/
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
M: Huacai Chen <chenhuacai@kernel.org>
@@ -10851,6 +10907,7 @@ S: Maintained
F: drivers/mailbox/
F: include/linux/mailbox_client.h
F: include/linux/mailbox_controller.h
+F: include/dt-bindings/mailbox/
F: Documentation/devicetree/bindings/mailbox/
MAILBOX ARM MHUv2
@@ -10937,7 +10994,7 @@ F: include/linux/mv643xx.h
MARVELL MV88X3310 PHY DRIVER
M: Russell King <linux@armlinux.org.uk>
-M: Marek Behun <marek.behun@nic.cz>
+M: Marek Behún <kabel@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/marvell10g.c
@@ -11283,6 +11340,7 @@ F: include/media/imx.h
MEDIA DRIVERS FOR FREESCALE IMX7
M: Rui Miguel Silva <rmfrfs@gmail.com>
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@ -11392,6 +11450,7 @@ L: linux-renesas-soc@vger.kernel.org
S: Supported
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/renesas,csi2.yaml
+F: Documentation/devicetree/bindings/media/renesas,isp.yaml
F: Documentation/devicetree/bindings/media/renesas,vin.yaml
F: drivers/media/platform/rcar-vin/
@@ -11981,11 +12040,13 @@ MICROCHIP ISC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
L: linux-media@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/media/atmel-isc.txt
+F: Documentation/devicetree/bindings/media/atmel,isc.yaml
+F: Documentation/devicetree/bindings/media/microchip,xisc.yaml
F: drivers/media/platform/atmel/atmel-isc-base.c
F: drivers/media/platform/atmel/atmel-isc-regs.h
F: drivers/media/platform/atmel/atmel-isc.h
F: drivers/media/platform/atmel/atmel-sama5d2-isc.c
+F: drivers/media/platform/atmel/atmel-sama7g5-isc.c
F: include/linux/atmel-isc-media.h
MICROCHIP ISI DRIVER
@@ -12183,7 +12244,7 @@ M: Maximilian Luz <luzmaximilian@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
W: https://github.com/linux-surface/surface-aggregator-module
-C: irc://chat.freenode.net/##linux-surface
+C: irc://irc.libera.chat/linux-surface
F: Documentation/driver-api/surface_aggregator/
F: drivers/platform/surface/aggregator/
F: drivers/platform/surface/surface_acpi_notify.c
@@ -12379,6 +12440,12 @@ F: Documentation/userspace-api/media/drivers/meye*
F: drivers/media/pci/meye/
F: include/uapi/linux/meye.h
+MOTORCOMM PHY DRIVER
+M: Peter Geis <pgwipeout@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/phy/motorcomm.c
+
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
S: Orphan
F: Documentation/driver-api/serial/moxa-smartio.rst
@@ -12593,7 +12660,7 @@ S: Orphan
F: drivers/net/ethernet/natsemi/natsemi.c
NCR 5380 SCSI DRIVERS
-M: Finn Thain <fthain@telegraphics.com.au>
+M: Finn Thain <fthain@linux-m68k.org>
M: Michael Schmitz <schmitzmic@gmail.com>
L: linux-scsi@vger.kernel.org
S: Maintained
@@ -12650,6 +12717,7 @@ W: http://www.netfilter.org/
W: http://www.iptables.org/
W: http://www.nftables.org/
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
+C: irc://irc.libera.chat/netfilter
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
F: include/linux/netfilter*
@@ -12681,9 +12749,9 @@ F: drivers/rtc/rtc-ntxec.c
F: include/linux/mfd/ntxec.h
NETRONOME ETHERNET DRIVERS
-M: Simon Horman <simon.horman@netronome.com>
+M: Simon Horman <simon.horman@corigine.com>
R: Jakub Kicinski <kuba@kernel.org>
-L: oss-drivers@netronome.com
+L: oss-drivers@corigine.com
S: Maintained
F: drivers/net/ethernet/netronome/
@@ -12710,7 +12778,6 @@ M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
-W: http://www.linuxfoundation.org/en/Net
Q: https://patchwork.kernel.org/project/netdevbpf/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
@@ -12755,7 +12822,6 @@ M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
-W: http://www.linuxfoundation.org/en/Net
Q: https://patchwork.kernel.org/project/netdevbpf/list/
B: mailto:netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@ -12897,8 +12963,10 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
NFC SUBSYSTEM
+M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+L: linux-nfc@lists.01.org (subscribers-only)
L: netdev@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/devicetree/bindings/net/nfc/
F: drivers/nfc/
F: include/linux/platform_data/nfcmrvl.h
@@ -12909,7 +12977,7 @@ F: net/nfc/
NFC VIRTUAL NCI DEVICE DRIVER
M: Bongsu Jeon <bongsu.jeon@samsung.com>
L: netdev@vger.kernel.org
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+L: linux-nfc@lists.01.org (subscribers-only)
S: Supported
F: drivers/nfc/virtual_ncidev.c
F: tools/testing/selftests/nci/
@@ -13065,7 +13133,7 @@ F: Documentation/filesystems/ntfs.rst
F: fs/ntfs/
NUBUS SUBSYSTEM
-M: Finn Thain <fthain@telegraphics.com.au>
+M: Finn Thain <fthain@linux-m68k.org>
L: linux-m68k@lists.linux-m68k.org
S: Maintained
F: arch/*/include/asm/nubus.h
@@ -13187,6 +13255,7 @@ M: Vladimir Oltean <olteanv@gmail.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/net/dsa/sja1105
+F: drivers/net/pcs/pcs-xpcs-nxp.c
NXP TDA998X DRM DRIVER
M: Russell King <linux@armlinux.org.uk>
@@ -13206,9 +13275,8 @@ F: Documentation/devicetree/bindings/sound/tfa9879.txt
F: sound/soc/codecs/tfa9879*
NXP-NCI NFC DRIVER
-M: Clément Perrochaud <clement.perrochaud@effinnov.com>
R: Charles Gorand <charles.gorand@effinnov.com>
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+L: linux-nfc@lists.01.org (subscribers-only)
S: Supported
F: drivers/nfc/nxp-nci
@@ -14111,6 +14179,7 @@ F: drivers/pci/controller/pci-v3-semi.c
PCI ENDPOINT SUBSYSTEM
M: Kishon Vijay Abraham I <kishon@ti.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+R: Krzysztof Wilczyński <kw@linux.com>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/PCI/endpoint/*
@@ -14159,6 +14228,7 @@ F: drivers/pci/controller/pci-xgene-msi.c
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
R: Rob Herring <robh@kernel.org>
+R: Krzysztof Wilczyński <kw@linux.com>
L: linux-pci@vger.kernel.org
S: Supported
Q: http://patchwork.ozlabs.org/project/linux-pci/list/
@@ -14318,10 +14388,12 @@ PER-CPU MEMORY ALLOCATOR
M: Dennis Zhou <dennis@kernel.org>
M: Tejun Heo <tj@kernel.org>
M: Christoph Lameter <cl@linux.com>
+L: linux-mm@kvack.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
F: arch/*/include/asm/percpu.h
F: include/linux/percpu*.h
+F: lib/percpu*.c
F: mm/percpu*.c
PER-TASK DELAY ACCOUNTING
@@ -15129,6 +15201,13 @@ S: Maintained
F: Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt
F: drivers/cpufreq/qcom-cpufreq-nvmem.c
+QUALCOMM CRYPTO DRIVERS
+M: Thara Gopinath <thara.gopinath@linaro.org>
+L: linux-crypto@vger.kernel.org
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: drivers/crypto/qce/
+
QUALCOMM EMAC GIGABIT ETHERNET DRIVER
M: Timur Tabi <timur@kernel.org>
L: netdev@vger.kernel.org
@@ -15564,6 +15643,13 @@ F: include/linux/rpmsg/
F: include/uapi/linux/rpmsg.h
F: samples/rpmsg/
+REMOTE PROCESSOR MESSAGING (RPMSG) WWAN CONTROL DRIVER
+M: Stephan Gerhold <stephan@gerhold.net>
+L: netdev@vger.kernel.org
+L: linux-remoteproc@vger.kernel.org
+S: Maintained
+F: drivers/net/wwan/rpmsg_wwan_ctrl.c
+
RENESAS CLOCK DRIVERS
M: Geert Uytterhoeven <geert+renesas@glider.be>
L: linux-renesas-soc@vger.kernel.org
@@ -15693,6 +15779,14 @@ F: arch/riscv/
N: riscv
K: riscv
+RISC-V/MICROCHIP POLARFIRE SOC SUPPORT
+M: Lewis Hanly <lewis.hanly@microchip.com>
+L: linux-riscv@lists.infradead.org
+S: Supported
+F: drivers/mailbox/mailbox-mpfs.c
+F: drivers/soc/microchip/
+F: include/soc/microchip/mpfs.h
+
RNBD BLOCK DRIVERS
M: Md. Haris Iqbal <haris.iqbal@ionos.com>
M: Jack Wang <jinpu.wang@ionos.com>
@@ -15945,6 +16039,7 @@ S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
+L: netdev@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/net/*iucv*
@@ -15955,6 +16050,7 @@ S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
+L: netdev@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/net/
@@ -16133,7 +16229,7 @@ F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
M: Krzysztof Opasiak <k.opasiak@samsung.com>
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+L: linux-nfc@lists.01.org (subscribers-only)
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
F: drivers/nfc/s3fwrn5
@@ -16546,6 +16642,7 @@ F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Karsten Graul <kgraul@linux.ibm.com>
+M: Guvenc Gulce <guvenc@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -16991,6 +17088,13 @@ S: Maintained
F: drivers/ssb/
F: include/linux/ssb/
+SONY IMX208 SENSOR DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: drivers/media/i2c/imx208.c
+
SONY IMX214 SENSOR DRIVER
M: Ricardo Ribalda <ribalda@kernel.org>
L: linux-media@vger.kernel.org
@@ -17304,6 +17408,12 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-stm32*
+ST STM32 SPI DRIVER
+M: Alain Volmat <alain.volmat@foss.st.com>
+L: linux-spi@vger.kernel.org
+S: Maintained
+F: drivers/spi/spi-stm32.c
+
ST STPDDC60 DRIVER
M: Daniel Nilsson <daniel.nilsson@flex.com>
L: linux-hwmon@vger.kernel.org
@@ -17653,6 +17763,7 @@ M: Jose Abreu <Jose.Abreu@synopsys.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/pcs/pcs-xpcs.c
+F: drivers/net/pcs/pcs-xpcs.h
F: include/linux/pcs/pcs-xpcs.h
SYNOPSYS DESIGNWARE I2C DRIVER
@@ -17662,7 +17773,6 @@ R: Mika Westerberg <mika.westerberg@linux.intel.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-designware-*
-F: include/linux/platform_data/i2c-designware.h
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
M: Jaehoon Chung <jh80.chung@samsung.com>
@@ -18156,6 +18266,13 @@ W: http://thinkwiki.org/wiki/Ibm-acpi
T: git git://repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git
F: drivers/platform/x86/thinkpad_acpi.c
+THINKPAD LMI DRIVER
+M: Mark Pearson <markpearson@lenovo.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-class-firmware-attributes
+F: drivers/platform/x86/think-lmi.?
+
THUNDERBOLT DMA TRAFFIC TEST DRIVER
M: Isaac Hazan <isaac.hazan@intel.com>
L: linux-usb@vger.kernel.org
@@ -18318,7 +18435,7 @@ F: sound/soc/codecs/tas571x*
TI TRF7970A NFC DRIVER
M: Mark Greer <mgreer@animalcreek.com>
L: linux-wireless@vger.kernel.org
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+L: linux-nfc@lists.01.org (subscribers-only)
S: Supported
F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt
F: drivers/nfc/trf7970a.c
@@ -18854,6 +18971,13 @@ S: Maintained
F: drivers/usb/host/isp116x*
F: include/linux/usb/isp116x.h
+USB ISP1760 DRIVER
+M: Rui Miguel Silva <rui.silva@linaro.org>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/usb/isp1760/*
+F: Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
+
USB LAN78XX ETHERNET DRIVER
M: Woojung Huh <woojung.huh@microchip.com>
M: UNGLinuxDriver@microchip.com
@@ -19551,6 +19675,10 @@ F: include/dt-bindings/regulator/
F: include/linux/regulator/
K: regulator_get_optional
+VOLTAGE AND CURRENT REGULATOR IRQ HELPERS
+R: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+F: drivers/regulator/irq_helpers.c
+
VRF
M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
@@ -19568,6 +19696,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pmladek/printk.git
F: Documentation/core-api/printk-formats.rst
F: lib/test_printf.c
+F: lib/test_scanf.c
F: lib/vsprintf.c
VT1211 HARDWARE MONITOR DRIVER
@@ -19751,6 +19880,16 @@ F: Documentation/core-api/workqueue.rst
F: include/linux/workqueue.h
F: kernel/workqueue.c
+WWAN DRIVERS
+M: Loic Poulain <loic.poulain@linaro.org>
+M: Sergey Ryazanov <ryazanov.s.a@gmail.com>
+R: Johannes Berg <johannes@sipsolutions.net>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/wwan/
+F: include/linux/wwan.h
+F: include/uapi/linux/wwan.h
+
X-POWERS AXP288 PMIC DRIVERS
M: Hans de Goede <hdegoede@redhat.com>
S: Maintained
@@ -19998,6 +20137,7 @@ F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
XFS FILESYSTEM
+C: irc://irc.oftc.net/xfs
M: Darrick J. Wong <djwong@kernel.org>
M: linux-xfs@vger.kernel.org
L: linux-xfs@vger.kernel.org
diff --git a/Makefile b/Makefile
index e4468353425a..88888fff4c62 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
VERSION = 5
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Frozen Wasteland
+EXTRAVERSION =
+NAME = Opossums on Parade
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -928,6 +928,14 @@ CC_FLAGS_LTO += -fvisibility=hidden
# Limit inlining across translation units to reduce binary size
KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
+
+# Check for frame size exceeding threshold during prolog/epilog insertion
+# when using lld < 13.0.0.
+ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
+endif
+endif
endif
ifdef CONFIG_LTO
@@ -1031,7 +1039,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
ifeq ($(CONFIG_RELR),y)
-LDFLAGS_vmlinux += --pack-dyn-relocs=relr
+LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags
endif
# We never want expected sections to be placed heuristically by the
diff --git a/arch/Kconfig b/arch/Kconfig
index c45b770d3579..129df498a8e1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -285,6 +285,13 @@ config ARCH_THREAD_STACK_ALLOCATOR
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool
+config ARCH_WANTS_NO_INSTR
+ bool
+ help
+ An architecture should select this if the noinstr macro is being used on
+ functions to denote that the toolchain should avoid instrumenting such
+ functions and is required for correctness.
+
config ARCH_32BIT_OFF_T
bool
depends on !64BIT
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5998106faa60..8954216b9956 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -549,29 +549,12 @@ config NR_CPUS
MARVEL support can handle a maximum of 32 CPUs, all the others
with working support have a maximum of 4 CPUs.
-config ARCH_DISCONTIGMEM_ENABLE
- bool "Discontiguous Memory Support"
- depends on BROKEN
- help
- Say Y to support efficient handling of discontiguous physical memory,
- for architectures which are either NUMA (Non-Uniform Memory Access)
- or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa.rst> for more.
-
config ARCH_SPARSEMEM_ENABLE
bool "Sparse Memory Support"
help
Say Y to support efficient handling of discontiguous physical memory,
for systems that have huge holes in the physical address space.
-config NUMA
- bool "NUMA Support (EXPERIMENTAL)"
- depends on DISCONTIGMEM && BROKEN
- help
- Say Y to compile the kernel to support NUMA (Non-Uniform Memory
- Access). This option is for configuring high-end multiprocessor
- server machines. If in doubt, say N.
-
config ALPHA_WTINT
bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
default y if ALPHA_QEMU
@@ -596,11 +579,6 @@ config ALPHA_WTINT
If unsure, say N.
-config NODES_SHIFT
- int
- default "7"
- depends on NEED_MULTIPLE_NODES
-
# LARGE_VMALLOC is racy, if you *really* need it then fix it first
config ALPHA_LARGE_VMALLOC
bool
diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig
index 724c4075df40..dd2dd9f0861f 100644
--- a/arch/alpha/configs/defconfig
+++ b/arch/alpha/configs/defconfig
@@ -25,19 +25,18 @@ CONFIG_PNP=y
CONFIG_ISAPNP=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_ALI15X3=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_CY82C693=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_PATA_ALI=y
+CONFIG_PATA_CMD64X=y
+CONFIG_PATA_CYPRESS=y
+CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NET_ETHERNET=y
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e41c113c6688..f2861a43a61e 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -26,11 +26,11 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
-#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/*
* To get proper branch prediction for the main line, we must branch
@@ -39,7 +39,7 @@
*/
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
@@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \
s64 temp; \
__asm__ __volatile__( \
@@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
}
#define ATOMIC64_FETCH_OP(op, asm_op) \
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
-#define atomic_andnot atomic_andnot
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
@@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
@@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
@@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
smp_mb();
@@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb();
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;
smp_mb();
@@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 6c7c39452471..6e0a850aa9d3 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -17,7 +17,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -26,7 +26,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
@@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -53,7 +53,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
@@ -65,10 +65,10 @@
__ret; \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#undef ____cmpxchg
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index a4e96e2bec74..e49fabce7b33 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -99,12 +99,6 @@ struct alpha_machine_vector
const char *vector_name;
- /* NUMA information */
- int (*pa_to_nid)(unsigned long);
- int (*cpuid_to_nid)(int);
- unsigned long (*node_mem_start)(int);
- unsigned long (*node_mem_size)(int);
-
/* System specific parameters. */
union {
struct {
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
deleted file mode 100644
index 86644604d977..000000000000
--- a/arch/alpha/include/asm/mmzone.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
- * Adapted for the alpha wildfire architecture Jan 2001.
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#ifdef CONFIG_DISCONTIGMEM
-
-#include <asm/smp.h>
-
-/*
- * Following are macros that are specific to this numa platform.
- */
-
-extern pg_data_t node_data[];
-
-#define alpha_pa_to_nid(pa) \
- (alpha_mv.pa_to_nid \
- ? alpha_mv.pa_to_nid(pa) \
- : (0))
-#define node_mem_start(nid) \
- (alpha_mv.node_mem_start \
- ? alpha_mv.node_mem_start(nid) \
- : (0UL))
-#define node_mem_size(nid) \
- (alpha_mv.node_mem_size \
- ? alpha_mv.node_mem_size(nid) \
- : ((nid) ? (0UL) : (~0UL)))
-
-#define pa_to_nid(pa) alpha_pa_to_nid(pa)
-#define NODE_DATA(nid) (&node_data[(nid)])
-
-#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-
-#if 1
-#define PLAT_NODE_DATA_LOCALNR(p, n) \
- (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
-#else
-static inline unsigned long
-PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
-{
- unsigned long temp;
- temp = p >> PAGE_SHIFT;
- return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
-}
-#endif
-
-/*
- * Following are macros that each numa implementation must define.
- */
-
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
-
-/*
- * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
- * and returns the kaddr corresponding to first physical page in the
- * node's mem_map.
- */
-#define LOCAL_BASE_ADDR(kaddr) \
- ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
- << PAGE_SHIFT))
-
-/* XXX: FIXME -- nyc */
-#define kern_addr_valid(kaddr) (0)
-
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- unsigned long pfn; \
- \
- pfn = page_to_pfn(page) << 32; \
- pte_val(pte) = pfn | pgprot_val(pgprot); \
- \
- pte; \
-})
-
-#define pte_page(x) \
-({ \
- unsigned long kvirt; \
- struct page * __xx; \
- \
- kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
- __xx = virt_to_page(kvirt); \
- \
- __xx; \
-})
-
-#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
-#define pfn_valid(pfn) \
- (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
- node_spanned_pages(pfn_to_nid(pfn))) \
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index 268f99b4602b..18f48a6f2ff6 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -17,9 +17,9 @@
extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vmaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
extern void copy_page(void * _to, void * _from);
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 8d856c62e22a..e1757b7cfe3d 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -206,7 +206,6 @@ extern unsigned long __zero_page(void);
#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pte_pfn(pte) (pte_val(pte) >> 32)
-#ifndef CONFIG_DISCONTIGMEM
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, pgprot) \
({ \
@@ -215,7 +214,6 @@ extern unsigned long __zero_page(void);
pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
pte; \
})
-#endif
extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
@@ -330,9 +328,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifndef CONFIG_DISCONTIGMEM
#define kern_addr_valid(addr) (1)
-#endif
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index 5a77a40567fa..7d393036aa8f 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -7,45 +7,6 @@
#include <linux/numa.h>
#include <asm/machvec.h>
-#ifdef CONFIG_NUMA
-static inline int cpu_to_node(int cpu)
-{
- int node;
-
- if (!alpha_mv.cpuid_to_nid)
- return 0;
-
- node = alpha_mv.cpuid_to_nid(cpu);
-
-#ifdef DEBUG_NUMA
- BUG_ON(node < 0);
-#endif
-
- return node;
-}
-
-extern struct cpumask node_to_cpumask_map[];
-/* FIXME: This is dumb, recalculating every time. But simple. */
-static const struct cpumask *cpumask_of_node(int node)
-{
- int cpu;
-
- if (node == NUMA_NO_NODE)
- return cpu_all_mask;
-
- cpumask_clear(&node_to_cpumask_map[node]);
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) == node)
- cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
- }
-
- return &node_to_cpumask_map[node];
-}
-
-#define cpumask_of_pcibus(bus) (cpu_online_mask)
-
-#endif /* !CONFIG_NUMA */
# include <asm-generic/topology.h>
#endif /* _ASM_ALPHA_TOPOLOGY_H */
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 57420356ce4c..6b3daba60987 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -127,6 +127,8 @@
#define SO_PREFER_BUSY_POLL 69
#define SO_BUSY_POLL_BUDGET 70
+#define SO_NETNS_COOKIE 71
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 4485b77f8658..1efca79ac83c 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -287,8 +287,7 @@ io7_init_hose(struct io7 *io7, int port)
/*
* Set up window 0 for scatter-gather 8MB at 8MB.
*/
- hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
- hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new_node(0, hose, 0x00800000, 0x00800000, 0);
hose->sg_isa->align_entry = 8; /* cache line boundary */
csrs->POx_WBASE[0].csr =
hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg;
@@ -305,8 +304,7 @@ io7_init_hose(struct io7 *io7, int port)
/*
* Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
*/
- hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
- hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci = iommu_arena_new_node(0, hose, 0xc0000000, 0x40000000, 0);
hose->sg_pci->align_entry = 8; /* cache line boundary */
csrs->POx_WBASE[2].csr =
hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg;
@@ -843,53 +841,8 @@ EXPORT_SYMBOL(marvel_ioportmap);
EXPORT_SYMBOL(marvel_ioread8);
EXPORT_SYMBOL(marvel_iowrite8);
#endif
-
-/*
- * NUMA Support
- */
-/**********
- * FIXME - for now each cpu is a node by itself
- * -- no real support for striped mode
- **********
- */
-int
-marvel_pa_to_nid(unsigned long pa)
-{
- int cpuid;
- if ((pa >> 43) & 1) /* I/O */
- cpuid = (~(pa >> 35) & 0xff);
- else /* mem */
- cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2));
-
- return marvel_cpuid_to_nid(cpuid);
-}
-
-int
-marvel_cpuid_to_nid(int cpuid)
-{
- return cpuid;
-}
-
-unsigned long
-marvel_node_mem_start(int nid)
-{
- unsigned long pa;
-
- pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1);
- pa <<= 34;
-
- return pa;
-}
-
-unsigned long
-marvel_node_mem_size(int nid)
-{
- return 16UL * 1024 * 1024 * 1024; /* 16GB */
-}
-
-
-/*
+/*
* AGP GART Support.
*/
#include <linux/agp_backend.h>
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c
index e8d3b033018d..3a804b67f9da 100644
--- a/arch/alpha/kernel/core_wildfire.c
+++ b/arch/alpha/kernel/core_wildfire.c
@@ -434,39 +434,12 @@ wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
return PCIBIOS_SUCCESSFUL;
}
-struct pci_ops wildfire_pci_ops =
+struct pci_ops wildfire_pci_ops =
{
.read = wildfire_read_config,
.write = wildfire_write_config,
};
-
-/*
- * NUMA Support
- */
-int wildfire_pa_to_nid(unsigned long pa)
-{
- return pa >> 36;
-}
-
-int wildfire_cpuid_to_nid(int cpuid)
-{
- /* assume 4 CPUs per node */
- return cpuid >> 2;
-}
-
-unsigned long wildfire_node_mem_start(int nid)
-{
- /* 64GB per node */
- return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
-}
-
-unsigned long wildfire_node_mem_size(int nid)
-{
- /* 64GB per node */
- return 64UL * 1024 * 1024 * 1024;
-}
-
#if DEBUG_DUMP_REGS
static void __init
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index d84b19aa8e9d..35d7b3096d6e 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -71,33 +71,6 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
if (align < mem_size)
align = mem_size;
-
-#ifdef CONFIG_DISCONTIGMEM
-
- arena = memblock_alloc_node(sizeof(*arena), align, nid);
- if (!NODE_DATA(nid) || !arena) {
- printk("%s: couldn't allocate arena from node %d\n"
- " falling back to system-wide allocation\n",
- __func__, nid);
- arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
- if (!arena)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*arena));
- }
-
- arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
- if (!NODE_DATA(nid) || !arena->ptes) {
- printk("%s: couldn't allocate arena ptes from node %d\n"
- " falling back to system-wide allocation\n",
- __func__, nid);
- arena->ptes = memblock_alloc(mem_size, align);
- if (!arena->ptes)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, mem_size, align);
- }
-
-#else /* CONFIG_DISCONTIGMEM */
-
arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
if (!arena)
panic("%s: Failed to allocate %zu bytes\n", __func__,
@@ -107,8 +80,6 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, mem_size, align);
-#endif /* CONFIG_DISCONTIGMEM */
-
spin_lock_init(&arena->lock);
arena->hose = hose;
arena->dma_base = base;
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 5112ab996394..ef0c08ed0481 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -380,7 +380,7 @@ get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
* This one depends on the frame size of schedule(). Do a
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 701a05090141..5816a31c1b38 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -49,10 +49,6 @@ extern void marvel_init_arch(void);
extern void marvel_kill_arch(int);
extern void marvel_machine_check(unsigned long, unsigned long);
extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
-extern int marvel_pa_to_nid(unsigned long);
-extern int marvel_cpuid_to_nid(int);
-extern unsigned long marvel_node_mem_start(int);
-extern unsigned long marvel_node_mem_size(int);
extern struct _alpha_agp_info *marvel_agp_info(void);
struct io7 *marvel_find_io7(int pe);
struct io7 *marvel_next_io7(struct io7 *prev);
@@ -101,10 +97,6 @@ extern void wildfire_init_arch(void);
extern void wildfire_kill_arch(int);
extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr);
extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
-extern int wildfire_pa_to_nid(unsigned long);
-extern int wildfire_cpuid_to_nid(int);
-extern unsigned long wildfire_node_mem_start(int);
-extern unsigned long wildfire_node_mem_size(int);
/* console.c */
#ifdef CONFIG_VGA_HOSE
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 03dda3beb3bd..5f6858e9dc28 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -79,11 +79,6 @@ int alpha_l3_cacheshape;
unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
#endif
-#ifdef CONFIG_NUMA
-struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_to_cpumask_map);
-#endif
-
/* Which processor we booted from. */
int boot_cpuid;
@@ -305,7 +300,6 @@ move_initrd(unsigned long mem_limit)
}
#endif
-#ifndef CONFIG_DISCONTIGMEM
static void __init
setup_memory(void *kernel_end)
{
@@ -389,9 +383,6 @@ setup_memory(void *kernel_end)
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
-#else
-extern void setup_memory(void *);
-#endif /* !CONFIG_DISCONTIGMEM */
int __init
page_is_ram(unsigned long pfn)
@@ -618,13 +609,6 @@ setup_arch(char **cmdline_p)
"VERBOSE_MCHECK "
#endif
-#ifdef CONFIG_DISCONTIGMEM
- "DISCONTIGMEM "
-#ifdef CONFIG_NUMA
- "NUMA "
-#endif
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
"DEBUG_SPINLOCK "
#endif
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index f4dd9f3f3001..4b2575f936d4 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -166,7 +166,6 @@ smp_callin(void)
DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
cpuid, current, current->active_mm));
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 83d6c53d6d4d..1f99b03effc2 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -461,10 +461,5 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = {
.kill_arch = marvel_kill_arch,
.pci_map_irq = marvel_map_irq,
.pci_swizzle = common_swizzle,
-
- .pa_to_nid = marvel_pa_to_nid,
- .cpuid_to_nid = marvel_cpuid_to_nid,
- .node_mem_start = marvel_node_mem_start,
- .node_mem_size = marvel_node_mem_size,
};
ALIAS_MV(marvel_ev7)
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 2c54d707142a..3cee05443f07 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -337,10 +337,5 @@ struct alpha_machine_vector wildfire_mv __initmv = {
.kill_arch = wildfire_kill_arch,
.pci_map_irq = wildfire_map_irq,
.pci_swizzle = common_swizzle,
-
- .pa_to_nid = wildfire_pa_to_nid,
- .cpuid_to_nid = wildfire_cpuid_to_nid,
- .node_mem_start = wildfire_node_mem_start,
- .node_mem_size = wildfire_node_mem_size,
};
ALIAS_MV(wildfire)
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
index 08ac6612edad..bd770302eb82 100644
--- a/arch/alpha/mm/Makefile
+++ b/arch/alpha/mm/Makefile
@@ -6,5 +6,3 @@
ccflags-y := -Werror
obj-y := init.o fault.o
-
-obj-$(CONFIG_DISCONTIGMEM) += numa.o
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index a97650a618f1..f6114d03357c 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -235,8 +235,6 @@ callback_init(void * kernel_end)
return kernel_end;
}
-
-#ifndef CONFIG_DISCONTIGMEM
/*
* paging_init() sets up the memory map.
*/
@@ -257,7 +255,6 @@ void __init paging_init(void)
/* Initialize the kernel's ZERO_PGE. */
memset((void *)ZERO_PGE, 0, PAGE_SIZE);
}
-#endif /* CONFIG_DISCONTIGMEM */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
void
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
deleted file mode 100644
index 0636e254a22f..000000000000
--- a/arch/alpha/mm/numa.c
+++ /dev/null
@@ -1,223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/mm/numa.c
- *
- * DISCONTIGMEM NUMA alpha support.
- *
- * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/memblock.h>
-#include <linux/swap.h>
-#include <linux/initrd.h>
-#include <linux/pfn.h>
-#include <linux/module.h>
-
-#include <asm/hwrpb.h>
-#include <asm/sections.h>
-
-pg_data_t node_data[MAX_NUMNODES];
-EXPORT_SYMBOL(node_data);
-
-#undef DEBUG_DISCONTIG
-#ifdef DEBUG_DISCONTIG
-#define DBGDCONT(args...) printk(args)
-#else
-#define DBGDCONT(args...)
-#endif
-
-#define for_each_mem_cluster(memdesc, _cluster, i) \
- for ((_cluster) = (memdesc)->cluster, (i) = 0; \
- (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
-
-static void __init show_mem_layout(void)
-{
- struct memclust_struct * cluster;
- struct memdesc_struct * memdesc;
- int i;
-
- /* Find free clusters, and init and free the bootmem accordingly. */
- memdesc = (struct memdesc_struct *)
- (hwrpb->mddt_offset + (unsigned long) hwrpb);
-
- printk("Raw memory layout:\n");
- for_each_mem_cluster(memdesc, cluster, i) {
- printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
- i, cluster->usage, cluster->start_pfn,
- cluster->start_pfn + cluster->numpages);
- }
-}
-
-static void __init
-setup_memory_node(int nid, void *kernel_end)
-{
- extern unsigned long mem_size_limit;
- struct memclust_struct * cluster;
- struct memdesc_struct * memdesc;
- unsigned long start_kernel_pfn, end_kernel_pfn;
- unsigned long start, end;
- unsigned long node_pfn_start, node_pfn_end;
- unsigned long node_min_pfn, node_max_pfn;
- int i;
- int show_init = 0;
-
- /* Find the bounds of current node */
- node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT;
- node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT);
-
- /* Find free clusters, and init and free the bootmem accordingly. */
- memdesc = (struct memdesc_struct *)
- (hwrpb->mddt_offset + (unsigned long) hwrpb);
-
- /* find the bounds of this node (node_min_pfn/node_max_pfn) */
- node_min_pfn = ~0UL;
- node_max_pfn = 0UL;
- for_each_mem_cluster(memdesc, cluster, i) {
- /* Bit 0 is console/PALcode reserved. Bit 1 is
- non-volatile memory -- we might want to mark
- this for later. */
- if (cluster->usage & 3)
- continue;
-
- start = cluster->start_pfn;
- end = start + cluster->numpages;
-
- if (start >= node_pfn_end || end <= node_pfn_start)
- continue;
-
- if (!show_init) {
- show_init = 1;
- printk("Initializing bootmem allocator on Node ID %d\n", nid);
- }
- printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
- i, cluster->usage, cluster->start_pfn,
- cluster->start_pfn + cluster->numpages);
-
- if (start < node_pfn_start)
- start = node_pfn_start;
- if (end > node_pfn_end)
- end = node_pfn_end;
-
- if (start < node_min_pfn)
- node_min_pfn = start;
- if (end > node_max_pfn)
- node_max_pfn = end;
- }
-
- if (mem_size_limit && node_max_pfn > mem_size_limit) {
- static int msg_shown = 0;
- if (!msg_shown) {
- msg_shown = 1;
- printk("setup: forcing memory size to %ldK (from %ldK).\n",
- mem_size_limit << (PAGE_SHIFT - 10),
- node_max_pfn << (PAGE_SHIFT - 10));
- }
- node_max_pfn = mem_size_limit;
- }
-
- if (node_min_pfn >= node_max_pfn)
- return;
-
- /* Update global {min,max}_low_pfn from node information. */
- if (node_min_pfn < min_low_pfn)
- min_low_pfn = node_min_pfn;
- if (node_max_pfn > max_low_pfn)
- max_pfn = max_low_pfn = node_max_pfn;
-
-#if 0 /* we'll try this one again in a little while */
- /* Cute trick to make sure our local node data is on local memory */
- node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
-#endif
- printk(" Detected node memory: start %8lu, end %8lu\n",
- node_min_pfn, node_max_pfn);
-
- DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid));
-
- /* Find the bounds of kernel memory. */
- start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
- end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
-
- if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
- panic("kernel loaded out of ram");
-
- memblock_add_node(PFN_PHYS(node_min_pfn),
- (node_max_pfn - node_min_pfn) << PAGE_SHIFT, nid);
-
- /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
- Note that we round this down, not up - node memory
- has much larger alignment than 8Mb, so it's safe. */
- node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
-
- NODE_DATA(nid)->node_start_pfn = node_min_pfn;
- NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
-
- node_set_online(nid);
-}
-
-void __init
-setup_memory(void *kernel_end)
-{
- unsigned long kernel_size;
- int nid;
-
- show_mem_layout();
-
- nodes_clear(node_online_map);
-
- min_low_pfn = ~0UL;
- max_low_pfn = 0UL;
- for (nid = 0; nid < MAX_NUMNODES; nid++)
- setup_memory_node(nid, kernel_end);
-
- kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
- memblock_reserve(KERNEL_START_PHYS, kernel_size);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- initrd_start = INITRD_START;
- if (initrd_start) {
- extern void *move_initrd(unsigned long);
-
- initrd_end = initrd_start+INITRD_SIZE;
- printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *) initrd_start, INITRD_SIZE);
-
- if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
- if (!move_initrd(PFN_PHYS(max_low_pfn)))
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%p)\ndisabling initrd\n",
- initrd_end,
- phys_to_virt(PFN_PHYS(max_low_pfn)));
- } else {
- nid = kvaddr_to_nid(initrd_start);
- memblock_reserve(virt_to_phys((void *)initrd_start),
- INITRD_SIZE);
- }
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
-}
-
-void __init paging_init(void)
-{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
- unsigned long dma_local_pfn;
-
- /*
- * The old global MAX_DMA_ADDRESS per-arch API doesn't fit
- * in the NUMA model, for now we convert it to a pfn and
- * we interpret this pfn as a local per-node information.
- * This issue isn't very important since none of these machines
- * have legacy ISA slots anyways.
- */
- dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
- max_zone_pfn[ZONE_DMA] = dma_local_pfn;
- max_zone_pfn[ZONE_NORMAL] = max_pfn;
-
- free_area_init(max_zone_pfn);
-
- /* Initialize the kernel's ZERO_PGE. */
- memset((void *)ZERO_PGE, 0, PAGE_SIZE);
-}
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2d98501c0897..d8f51eb8963b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -62,10 +62,6 @@ config SCHED_OMIT_FRAME_POINTER
config GENERIC_CSUM
def_bool y
-config ARCH_DISCONTIGMEM_ENABLE
- def_bool n
- depends on BROKEN
-
config ARCH_FLATMEM_ENABLE
def_bool y
@@ -344,15 +340,6 @@ config ARC_HUGEPAGE_16M
endchoice
-config NODES_SHIFT
- int "Maximum NUMA Nodes (as a power of 2)"
- default "0" if !DISCONTIGMEM
- default "1" if DISCONTIGMEM
- depends on NEED_MULTIPLE_NODES
- help
- Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
- zones.
-
config ARC_COMPACT_IRQ_LEVELS
depends on ISA_ARCOMPACT
bool "Setup Timer IRQ as high Priority"
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5afc79c9b2f5..7a36d79b5b2f 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -14,14 +14,14 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned int val; \
\
@@ -37,7 +37,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned int val; \
\
@@ -63,7 +63,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned int val, orig; \
\
@@ -94,11 +94,11 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#ifndef CONFIG_SMP
/* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#else
-static inline void atomic_set(atomic_t *v, int i)
+static inline void arch_atomic_set(atomic_t *v, int i)
{
/*
* Independent of hardware support, all of the atomic_xxx() APIs need
@@ -116,7 +116,7 @@ static inline void atomic_set(atomic_t *v, int i)
atomic_ops_unlock(flags);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
#endif
@@ -126,7 +126,7 @@ static inline void atomic_set(atomic_t *v, int i)
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -136,7 +136,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long temp; \
@@ -154,7 +154,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long orig; \
@@ -180,9 +180,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
-#define atomic_fetch_andnot atomic_fetch_andnot
-
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
@@ -193,6 +190,9 @@ ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -220,7 +220,7 @@ typedef struct {
#define ATOMIC64_INIT(a) { (a) }
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 val;
@@ -232,7 +232,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return val;
}
-static inline void atomic64_set(atomic64_t *v, s64 a)
+static inline void arch_atomic64_set(atomic64_t *v, s64 a)
{
/*
* This could have been a simple assignment in "C" but would need
@@ -253,7 +253,7 @@ static inline void atomic64_set(atomic64_t *v, s64 a)
}
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 a, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
@@ -270,7 +270,7 @@ static inline void atomic64_##op(s64 a, atomic64_t *v) \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
@@ -293,7 +293,7 @@ static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, op1, op2) \
-static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
s64 val, orig; \
\
@@ -320,9 +320,6 @@ static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-
ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc)
ATOMIC64_OPS(and, and, and)
@@ -330,13 +327,16 @@ ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, or, or)
ATOMIC64_OPS(xor, xor, xor)
+#define arch_atomic64_andnot arch_atomic64_andnot
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline s64
-atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
{
s64 prev;
@@ -358,7 +358,7 @@ atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
return prev;
}
-static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{
s64 prev;
@@ -379,14 +379,14 @@ static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
}
/**
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 val;
@@ -408,10 +408,10 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return val;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
/**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -419,7 +419,7 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, if it was not @u.
* Returns the old value of @v
*/
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 old, temp;
@@ -443,7 +443,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index dfeffa25499b..d42917e803e1 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -63,7 +63,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
#endif
-#define cmpxchg(ptr, o, n) ({ \
+#define arch_cmpxchg(ptr, o, n) ({ \
(typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n)); \
@@ -75,7 +75,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
* !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
* semantics, and this lock also happens to be used by atomic_*()
*/
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
/*
@@ -123,7 +123,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
-#define xchg(ptr, with) \
+#define arch_xchg(ptr, with) \
({ \
unsigned long flags; \
typeof(*(ptr)) old_val; \
@@ -136,7 +136,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#else
-#define xchg(ptr, with) _xchg(ptr, with)
+#define arch_xchg(ptr, with) _xchg(ptr, with)
#endif
@@ -153,6 +153,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
* can't be clobbered by others. Thus no serialization required when
* atomic_xchg is involved.
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif
diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h
deleted file mode 100644
index b86b9d1e54dc..000000000000
--- a/arch/arc/include/asm/mmzone.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#ifndef _ASM_ARC_MMZONE_H
-#define _ASM_ARC_MMZONE_H
-
-#ifdef CONFIG_DISCONTIGMEM
-
-extern struct pglist_data node_data[];
-#define NODE_DATA(nid) (&node_data[nid])
-
-static inline int pfn_to_nid(unsigned long pfn)
-{
- int is_end_low = 1;
-
- if (IS_ENABLED(CONFIG_ARC_HAS_PAE40))
- is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
-
- /*
- * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF
- * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF
- * HIGHMEM with PAE40: 0x1_0000_0000 to ...
- */
- if (pfn >= ARCH_PFN_OFFSET && is_end_low)
- return 0;
-
- return 1;
-}
-
-static inline int pfn_valid(unsigned long pfn)
-{
- int nid = pfn_to_nid(pfn);
-
- return (pfn <= node_end_pfn(nid));
-}
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
index 95f8a4380e11..7a5449dfcb29 100644
--- a/arch/arc/include/uapi/asm/sigcontext.h
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -18,6 +18,7 @@
*/
struct sigcontext {
struct user_regs_struct regs;
+ struct user_regs_arcv2 v2abi;
};
#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index cabef45f11df..5f0415fc7328 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -317,22 +317,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
* caused the fault.
*/
- /* We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
/*
* In case the user-specified fault handler returned zero,
* try to fix up.
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index b3ccb9e5ffe4..cb2f88502baf 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -61,6 +61,41 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
+static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ v2abi.r30 = regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ v2abi.r58 = regs->r58;
+ v2abi.r59 = regs->r59;
+#else
+ v2abi.r58 = v2abi.r59 = 0;
+#endif
+ err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+#endif
+ return err;
+}
+
+static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
+
+ regs->r30 = v2abi.r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ regs->r58 = v2abi.r58;
+ regs->r59 = v2abi.r59;
+#endif
+#endif
+ return err;
+}
+
static int
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
return err ? -EFAULT : 0;
@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
if (err)
return -EFAULT;
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 52906d314537..db0e104d6835 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -189,7 +189,6 @@ void start_kernel_secondary(void)
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index f73da203b170..1b9576d21e24 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -83,7 +83,7 @@ seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
* is safe-kept and BLINK at a well known location in there
*/
- if (tsk->state == TASK_RUNNING)
+ if (task_is_running(tsk))
return -1;
frame_info->task = tsk;
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index a331bb5d8319..7654c2e42dc0 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -83,12 +83,12 @@ static void show_faulting_vma(unsigned long address)
* non-inclusive vma
*/
mmap_read_lock(active_mm);
- vma = find_vma(active_mm, address);
+ vma = vma_lookup(active_mm, address);
- /* check against the find_vma( ) behaviour which returns the next VMA
- * if the container VMA is not found
+ /* Lookup the vma at the address and report if the container VMA is not
+ * found
*/
- if (vma && (vma->vm_start <= address)) {
+ if (vma) {
char buf[ARC_PATH_MAX];
char *nm = "?";
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 33ce59d91461..e2146a8da195 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -57,7 +57,6 @@ SECTIONS
.init.ramfs : { INIT_RAM_FS }
. = ALIGN(PAGE_SIZE);
- _stext = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(L1_CACHE_BYTES)
@@ -83,6 +82,7 @@ SECTIONS
.text : {
_text = .;
+ _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index e2ed355438c9..abfeef7bf6f8 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -32,11 +32,6 @@ unsigned long arch_pfn_offset;
EXPORT_SYMBOL(arch_pfn_offset);
#endif
-#ifdef CONFIG_DISCONTIGMEM
-struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-#endif
-
long __init arc_get_mem_sz(void)
{
return low_mem_sz;
@@ -139,20 +134,14 @@ void __init setup_arch_memory(void)
#ifdef CONFIG_HIGHMEM
/*
- * Populate a new node with highmem
- *
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
- * than addresses in normal ala low memory (0x8000_0000 based).
+ * than addresses in normal aka low memory (0x8000_0000 based).
* Even with PAE, the huge peripheral space hole would waste a lot of
- * mem with single mem_map[]. This warrants a mem_map per region design.
- * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
- *
- * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
- * populated with normal memory zone while node 1 only has highmem
+ * mem with single contiguous mem_map[].
+ * Thus when HIGHMEM on ARC is enabled the memory map corresponding
+ * to the hole is freed and ARC specific version of pfn_valid()
+ * handles the hole in the memory map.
*/
-#ifdef CONFIG_DISCONTIGMEM
- node_set_online(1);
-#endif
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index 7d2c72562c73..9148a01ed6d9 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -105,9 +105,13 @@
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
phy-reset-duration = <20>;
phy-supply = <&sw2_reg>;
- phy-handle = <&ethphy0>;
status = "okay";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+
mdio {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
index 236fc205c389..d0768ae429fa 100644
--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
@@ -406,6 +406,18 @@
vin-supply = <&sw1_reg>;
};
+&reg_pu {
+ vin-supply = <&sw1_reg>;
+};
+
+&reg_vdd1p1 {
+ vin-supply = <&sw2_reg>;
+};
+
+&reg_vdd2p5 {
+ vin-supply = <&sw2_reg>;
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
index 828cf3e39784..c4e146f3341b 100644
--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
@@ -126,7 +126,7 @@
compatible = "nxp,pca8574";
reg = <0x3a>;
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
};
};
diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts
index 5339210b63d0..dd8003bd1fc0 100644
--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
+++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
@@ -193,7 +193,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
keep-power-in-suspend;
- tuning-step = <2>;
+ fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>;
no-1-8-v;
broken-cd;
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index e57da0d32b98..e519897fae08 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -351,7 +351,7 @@
pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
bus-width = <4>;
- tuning-step = <2>;
+ fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>;
wakeup-source;
no-1-8-v;
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 05c55875835d..e47e1ca63043 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -101,6 +101,13 @@
ranges = <0 0x100000 0x2400>;
};
+ vdec0: vdec@300000 {
+ compatible = "microchip,sama5d4-vdec";
+ reg = <0x00300000 0x100000>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH 4>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+ };
+
usb0: gadget@400000 {
compatible = "atmel,sama5d3-udc";
reg = <0x00400000 0x100000
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 2aa3ebeb89d7..7a32de51f0fa 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -64,7 +64,6 @@ CONFIG_PARIDE_ON26=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_NET_VENDOR_3COM=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 875a3c28a267..363f1b1b08e3 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -215,8 +215,6 @@ CONFIG_IIO=m
CONFIG_AD5446=m
CONFIG_EEPROM_AT24=m
CONFIG_SENSORS_LIS3_SPI=m
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_ST=m
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 8f26c454ea12..eafa898ba6a7 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -45,20 +45,12 @@ poly1305-arm-y := poly1305-core.o poly1305-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
curve25519-neon-y := curve25519-core.o curve25519-glue.o
-ifdef REGENERATE_ARM_CRYPTO
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
-$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv4.pl
+$(obj)/%-core.S: $(src)/%-armv4.pl
$(call cmd,perl)
-$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
- $(call cmd,perl)
-
-$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
- $(call cmd,perl)
-endif
-
clean-files += poly1305-core.S sha256-core.S sha512-core.S
# massage the perlasm code a bit so we only get the NEON routine if we need it
diff --git a/arch/arm/crypto/poly1305-core.S_shipped b/arch/arm/crypto/poly1305-core.S_shipped
deleted file mode 100644
index 37b71d990293..000000000000
--- a/arch/arm/crypto/poly1305-core.S_shipped
+++ /dev/null
@@ -1,1158 +0,0 @@
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
-# define poly1305_init poly1305_init_arm
-# define poly1305_blocks poly1305_blocks_arm
-# define poly1305_emit poly1305_emit_arm
-.globl poly1305_blocks_neon
-#endif
-
-#if defined(__thumb2__)
-.syntax unified
-.thumb
-#else
-.code 32
-#endif
-
-.text
-
-.globl poly1305_emit
-.globl poly1305_blocks
-.globl poly1305_init
-.type poly1305_init,%function
-.align 5
-poly1305_init:
-.Lpoly1305_init:
- stmdb sp!,{r4-r11}
-
- eor r3,r3,r3
- cmp r1,#0
- str r3,[r0,#0] @ zero hash value
- str r3,[r0,#4]
- str r3,[r0,#8]
- str r3,[r0,#12]
- str r3,[r0,#16]
- str r3,[r0,#36] @ clear is_base2_26
- add r0,r0,#20
-
-#ifdef __thumb2__
- it eq
-#endif
- moveq r0,#0
- beq .Lno_key
-
-#if __ARM_MAX_ARCH__>=7
- mov r3,#-1
- str r3,[r0,#28] @ impossible key power value
-# ifndef __KERNEL__
- adr r11,.Lpoly1305_init
- ldr r12,.LOPENSSL_armcap
-# endif
-#endif
- ldrb r4,[r1,#0]
- mov r10,#0x0fffffff
- ldrb r5,[r1,#1]
- and r3,r10,#-4 @ 0x0ffffffc
- ldrb r6,[r1,#2]
- ldrb r7,[r1,#3]
- orr r4,r4,r5,lsl#8
- ldrb r5,[r1,#4]
- orr r4,r4,r6,lsl#16
- ldrb r6,[r1,#5]
- orr r4,r4,r7,lsl#24
- ldrb r7,[r1,#6]
- and r4,r4,r10
-
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-# if !defined(_WIN32)
- ldr r12,[r11,r12] @ OPENSSL_armcap_P
-# endif
-# if defined(__APPLE__) || defined(_WIN32)
- ldr r12,[r12]
-# endif
-#endif
- ldrb r8,[r1,#7]
- orr r5,r5,r6,lsl#8
- ldrb r6,[r1,#8]
- orr r5,r5,r7,lsl#16
- ldrb r7,[r1,#9]
- orr r5,r5,r8,lsl#24
- ldrb r8,[r1,#10]
- and r5,r5,r3
-
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- tst r12,#ARMV7_NEON @ check for NEON
-# ifdef __thumb2__
- adr r9,.Lpoly1305_blocks_neon
- adr r11,.Lpoly1305_blocks
- it ne
- movne r11,r9
- adr r12,.Lpoly1305_emit
- orr r11,r11,#1 @ thumb-ify addresses
- orr r12,r12,#1
-# else
- add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
- ite eq
- addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
- addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
-# endif
-#endif
- ldrb r9,[r1,#11]
- orr r6,r6,r7,lsl#8
- ldrb r7,[r1,#12]
- orr r6,r6,r8,lsl#16
- ldrb r8,[r1,#13]
- orr r6,r6,r9,lsl#24
- ldrb r9,[r1,#14]
- and r6,r6,r3
-
- ldrb r10,[r1,#15]
- orr r7,r7,r8,lsl#8
- str r4,[r0,#0]
- orr r7,r7,r9,lsl#16
- str r5,[r0,#4]
- orr r7,r7,r10,lsl#24
- str r6,[r0,#8]
- and r7,r7,r3
- str r7,[r0,#12]
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- stmia r2,{r11,r12} @ fill functions table
- mov r0,#1
-#else
- mov r0,#0
-#endif
-.Lno_key:
- ldmia sp!,{r4-r11}
-#if __ARM_ARCH__>=5
- bx lr @ bx lr
-#else
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_init,.-poly1305_init
-.type poly1305_blocks,%function
-.align 5
-poly1305_blocks:
-.Lpoly1305_blocks:
- stmdb sp!,{r3-r11,lr}
-
- ands r2,r2,#-16
- beq .Lno_data
-
- add r2,r2,r1 @ end pointer
- sub sp,sp,#32
-
-#if __ARM_ARCH__<7
- ldmia r0,{r4-r12} @ load context
- add r0,r0,#20
- str r2,[sp,#16] @ offload stuff
- str r0,[sp,#12]
-#else
- ldr lr,[r0,#36] @ is_base2_26
- ldmia r0!,{r4-r8} @ load hash value
- str r2,[sp,#16] @ offload stuff
- str r0,[sp,#12]
-
- adds r9,r4,r5,lsl#26 @ base 2^26 -> base 2^32
- mov r10,r5,lsr#6
- adcs r10,r10,r6,lsl#20
- mov r11,r6,lsr#12
- adcs r11,r11,r7,lsl#14
- mov r12,r7,lsr#18
- adcs r12,r12,r8,lsl#8
- mov r2,#0
- teq lr,#0
- str r2,[r0,#16] @ clear is_base2_26
- adc r2,r2,r8,lsr#24
-
- itttt ne
- movne r4,r9 @ choose between radixes
- movne r5,r10
- movne r6,r11
- movne r7,r12
- ldmia r0,{r9-r12} @ load key
- it ne
- movne r8,r2
-#endif
-
- mov lr,r1
- cmp r3,#0
- str r10,[sp,#20]
- str r11,[sp,#24]
- str r12,[sp,#28]
- b .Loop
-
-.align 4
-.Loop:
-#if __ARM_ARCH__<7
- ldrb r0,[lr],#16 @ load input
-# ifdef __thumb2__
- it hi
-# endif
- addhi r8,r8,#1 @ 1<<128
- ldrb r1,[lr,#-15]
- ldrb r2,[lr,#-14]
- ldrb r3,[lr,#-13]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-12]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-11]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-10]
- adds r4,r4,r3 @ accumulate input
-
- ldrb r3,[lr,#-9]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-8]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-7]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-6]
- adcs r5,r5,r3
-
- ldrb r3,[lr,#-5]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-4]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-3]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-2]
- adcs r6,r6,r3
-
- ldrb r3,[lr,#-1]
- orr r1,r0,r1,lsl#8
- str lr,[sp,#8] @ offload input pointer
- orr r2,r1,r2,lsl#16
- add r10,r10,r10,lsr#2
- orr r3,r2,r3,lsl#24
-#else
- ldr r0,[lr],#16 @ load input
- it hi
- addhi r8,r8,#1 @ padbit
- ldr r1,[lr,#-12]
- ldr r2,[lr,#-8]
- ldr r3,[lr,#-4]
-# ifdef __ARMEB__
- rev r0,r0
- rev r1,r1
- rev r2,r2
- rev r3,r3
-# endif
- adds r4,r4,r0 @ accumulate input
- str lr,[sp,#8] @ offload input pointer
- adcs r5,r5,r1
- add r10,r10,r10,lsr#2
- adcs r6,r6,r2
-#endif
- add r11,r11,r11,lsr#2
- adcs r7,r7,r3
- add r12,r12,r12,lsr#2
-
- umull r2,r3,r5,r9
- adc r8,r8,#0
- umull r0,r1,r4,r9
- umlal r2,r3,r8,r10
- umlal r0,r1,r7,r10
- ldr r10,[sp,#20] @ reload r10
- umlal r2,r3,r6,r12
- umlal r0,r1,r5,r12
- umlal r2,r3,r7,r11
- umlal r0,r1,r6,r11
- umlal r2,r3,r4,r10
- str r0,[sp,#0] @ future r4
- mul r0,r11,r8
- ldr r11,[sp,#24] @ reload r11
- adds r2,r2,r1 @ d1+=d0>>32
- eor r1,r1,r1
- adc lr,r3,#0 @ future r6
- str r2,[sp,#4] @ future r5
-
- mul r2,r12,r8
- eor r3,r3,r3
- umlal r0,r1,r7,r12
- ldr r12,[sp,#28] @ reload r12
- umlal r2,r3,r7,r9
- umlal r0,r1,r6,r9
- umlal r2,r3,r6,r10
- umlal r0,r1,r5,r10
- umlal r2,r3,r5,r11
- umlal r0,r1,r4,r11
- umlal r2,r3,r4,r12
- ldr r4,[sp,#0]
- mul r8,r9,r8
- ldr r5,[sp,#4]
-
- adds r6,lr,r0 @ d2+=d1>>32
- ldr lr,[sp,#8] @ reload input pointer
- adc r1,r1,#0
- adds r7,r2,r1 @ d3+=d2>>32
- ldr r0,[sp,#16] @ reload end pointer
- adc r3,r3,#0
- add r8,r8,r3 @ h4+=d3>>32
-
- and r1,r8,#-4
- and r8,r8,#3
- add r1,r1,r1,lsr#2 @ *=5
- adds r4,r4,r1
- adcs r5,r5,#0
- adcs r6,r6,#0
- adcs r7,r7,#0
- adc r8,r8,#0
-
- cmp r0,lr @ done yet?
- bhi .Loop
-
- ldr r0,[sp,#12]
- add sp,sp,#32
- stmdb r0,{r4-r8} @ store the result
-
-.Lno_data:
-#if __ARM_ARCH__>=5
- ldmia sp!,{r3-r11,pc}
-#else
- ldmia sp!,{r3-r11,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_blocks,.-poly1305_blocks
-.type poly1305_emit,%function
-.align 5
-poly1305_emit:
-.Lpoly1305_emit:
- stmdb sp!,{r4-r11}
-
- ldmia r0,{r3-r7}
-
-#if __ARM_ARCH__>=7
- ldr ip,[r0,#36] @ is_base2_26
-
- adds r8,r3,r4,lsl#26 @ base 2^26 -> base 2^32
- mov r9,r4,lsr#6
- adcs r9,r9,r5,lsl#20
- mov r10,r5,lsr#12
- adcs r10,r10,r6,lsl#14
- mov r11,r6,lsr#18
- adcs r11,r11,r7,lsl#8
- mov r0,#0
- adc r0,r0,r7,lsr#24
-
- tst ip,ip
- itttt ne
- movne r3,r8
- movne r4,r9
- movne r5,r10
- movne r6,r11
- it ne
- movne r7,r0
-#endif
-
- adds r8,r3,#5 @ compare to modulus
- adcs r9,r4,#0
- adcs r10,r5,#0
- adcs r11,r6,#0
- adc r0,r7,#0
- tst r0,#4 @ did it carry/borrow?
-
-#ifdef __thumb2__
- it ne
-#endif
- movne r3,r8
- ldr r8,[r2,#0]
-#ifdef __thumb2__
- it ne
-#endif
- movne r4,r9
- ldr r9,[r2,#4]
-#ifdef __thumb2__
- it ne
-#endif
- movne r5,r10
- ldr r10,[r2,#8]
-#ifdef __thumb2__
- it ne
-#endif
- movne r6,r11
- ldr r11,[r2,#12]
-
- adds r3,r3,r8
- adcs r4,r4,r9
- adcs r5,r5,r10
- adc r6,r6,r11
-
-#if __ARM_ARCH__>=7
-# ifdef __ARMEB__
- rev r3,r3
- rev r4,r4
- rev r5,r5
- rev r6,r6
-# endif
- str r3,[r1,#0]
- str r4,[r1,#4]
- str r5,[r1,#8]
- str r6,[r1,#12]
-#else
- strb r3,[r1,#0]
- mov r3,r3,lsr#8
- strb r4,[r1,#4]
- mov r4,r4,lsr#8
- strb r5,[r1,#8]
- mov r5,r5,lsr#8
- strb r6,[r1,#12]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#1]
- mov r3,r3,lsr#8
- strb r4,[r1,#5]
- mov r4,r4,lsr#8
- strb r5,[r1,#9]
- mov r5,r5,lsr#8
- strb r6,[r1,#13]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#2]
- mov r3,r3,lsr#8
- strb r4,[r1,#6]
- mov r4,r4,lsr#8
- strb r5,[r1,#10]
- mov r5,r5,lsr#8
- strb r6,[r1,#14]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#3]
- strb r4,[r1,#7]
- strb r5,[r1,#11]
- strb r6,[r1,#15]
-#endif
- ldmia sp!,{r4-r11}
-#if __ARM_ARCH__>=5
- bx lr @ bx lr
-#else
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_emit,.-poly1305_emit
-#if __ARM_MAX_ARCH__>=7
-.fpu neon
-
-.type poly1305_init_neon,%function
-.align 5
-poly1305_init_neon:
-.Lpoly1305_init_neon:
- ldr r3,[r0,#48] @ first table element
- cmp r3,#-1 @ is value impossible?
- bne .Lno_init_neon
-
- ldr r4,[r0,#20] @ load key base 2^32
- ldr r5,[r0,#24]
- ldr r6,[r0,#28]
- ldr r7,[r0,#32]
-
- and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
- mov r3,r4,lsr#26
- mov r4,r5,lsr#20
- orr r3,r3,r5,lsl#6
- mov r5,r6,lsr#14
- orr r4,r4,r6,lsl#12
- mov r6,r7,lsr#8
- orr r5,r5,r7,lsl#18
- and r3,r3,#0x03ffffff
- and r4,r4,#0x03ffffff
- and r5,r5,#0x03ffffff
-
- vdup.32 d0,r2 @ r^1 in both lanes
- add r2,r3,r3,lsl#2 @ *5
- vdup.32 d1,r3
- add r3,r4,r4,lsl#2
- vdup.32 d2,r2
- vdup.32 d3,r4
- add r4,r5,r5,lsl#2
- vdup.32 d4,r3
- vdup.32 d5,r5
- add r5,r6,r6,lsl#2
- vdup.32 d6,r4
- vdup.32 d7,r6
- vdup.32 d8,r5
-
- mov r5,#2 @ counter
-
-.Lsquare_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
- @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
- @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
- @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
- @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
-
- vmull.u32 q5,d0,d0[1]
- vmull.u32 q6,d1,d0[1]
- vmull.u32 q7,d3,d0[1]
- vmull.u32 q8,d5,d0[1]
- vmull.u32 q9,d7,d0[1]
-
- vmlal.u32 q5,d7,d2[1]
- vmlal.u32 q6,d0,d1[1]
- vmlal.u32 q7,d1,d1[1]
- vmlal.u32 q8,d3,d1[1]
- vmlal.u32 q9,d5,d1[1]
-
- vmlal.u32 q5,d5,d4[1]
- vmlal.u32 q6,d7,d4[1]
- vmlal.u32 q8,d1,d3[1]
- vmlal.u32 q7,d0,d3[1]
- vmlal.u32 q9,d3,d3[1]
-
- vmlal.u32 q5,d3,d6[1]
- vmlal.u32 q8,d0,d5[1]
- vmlal.u32 q6,d5,d6[1]
- vmlal.u32 q7,d7,d6[1]
- vmlal.u32 q9,d1,d5[1]
-
- vmlal.u32 q8,d7,d8[1]
- vmlal.u32 q5,d1,d8[1]
- vmlal.u32 q6,d3,d8[1]
- vmlal.u32 q7,d5,d8[1]
- vmlal.u32 q9,d0,d7[1]
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
- @ and P. Schwabe
- @
- @ H0>>+H1>>+H2>>+H3>>+H4
- @ H3>>+H4>>*5+H0>>+H1
- @
- @ Trivia.
- @
- @ Result of multiplication of n-bit number by m-bit number is
- @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
- @ m-bit number multiplied by 2^n is still n+m bits wide.
- @
- @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
- @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
- @ one is n+1 bits wide.
- @
- @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
- @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
- @ can be 27. However! In cases when their width exceeds 26 bits
- @ they are limited by 2^26+2^6. This in turn means that *sum*
- @ of the products with these values can still be viewed as sum
- @ of 52-bit numbers as long as the amount of addends is not a
- @ power of 2. For example,
- @
- @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
- @
- @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
- @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
- @ 8 * (2^52) or 2^55. However, the value is then multiplied by
- @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
- @ which is less than 32 * (2^52) or 2^57. And when processing
- @ data we are looking at triple as many addends...
- @
- @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
- @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
- @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
- @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
- @ instruction accepts 2x32-bit input and writes 2x64-bit result.
- @ This means that result of reduction have to be compressed upon
- @ loop wrap-around. This can be done in the process of reduction
- @ to minimize amount of instructions [as well as amount of
- @ 128-bit instructions, which benefits low-end processors], but
- @ one has to watch for H2 (which is narrower than H0) and 5*H4
- @ not being wider than 58 bits, so that result of right shift
- @ by 26 bits fits in 32 bits. This is also useful on x86,
- @ because it allows to use paddd in place for paddq, which
- @ benefits Atom, where paddq is ridiculously slow.
-
- vshr.u64 q15,q8,#26
- vmovn.i64 d16,q8
- vshr.u64 q4,q5,#26
- vmovn.i64 d10,q5
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vbic.i32 d16,#0xfc000000 @ &=0x03ffffff
- vadd.i64 q6,q6,q4 @ h0 -> h1
- vbic.i32 d10,#0xfc000000
-
- vshrn.u64 d30,q9,#26
- vmovn.i64 d18,q9
- vshr.u64 q4,q6,#26
- vmovn.i64 d12,q6
- vadd.i64 q7,q7,q4 @ h1 -> h2
- vbic.i32 d18,#0xfc000000
- vbic.i32 d12,#0xfc000000
-
- vadd.i32 d10,d10,d30
- vshl.u32 d30,d30,#2
- vshrn.u64 d8,q7,#26
- vmovn.i64 d14,q7
- vadd.i32 d10,d10,d30 @ h4 -> h0
- vadd.i32 d16,d16,d8 @ h2 -> h3
- vbic.i32 d14,#0xfc000000
-
- vshr.u32 d30,d10,#26
- vbic.i32 d10,#0xfc000000
- vshr.u32 d8,d16,#26
- vbic.i32 d16,#0xfc000000
- vadd.i32 d12,d12,d30 @ h0 -> h1
- vadd.i32 d18,d18,d8 @ h3 -> h4
-
- subs r5,r5,#1
- beq .Lsquare_break_neon
-
- add r6,r0,#(48+0*9*4)
- add r7,r0,#(48+1*9*4)
-
- vtrn.32 d0,d10 @ r^2:r^1
- vtrn.32 d3,d14
- vtrn.32 d5,d16
- vtrn.32 d1,d12
- vtrn.32 d7,d18
-
- vshl.u32 d4,d3,#2 @ *5
- vshl.u32 d6,d5,#2
- vshl.u32 d2,d1,#2
- vshl.u32 d8,d7,#2
- vadd.i32 d4,d4,d3
- vadd.i32 d2,d2,d1
- vadd.i32 d6,d6,d5
- vadd.i32 d8,d8,d7
-
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
- vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
- vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vst1.32 {d8[0]},[r6,:32]
- vst1.32 {d8[1]},[r7,:32]
-
- b .Lsquare_neon
-
-.align 4
-.Lsquare_break_neon:
- add r6,r0,#(48+2*4*9)
- add r7,r0,#(48+3*4*9)
-
- vmov d0,d10 @ r^4:r^3
- vshl.u32 d2,d12,#2 @ *5
- vmov d1,d12
- vshl.u32 d4,d14,#2
- vmov d3,d14
- vshl.u32 d6,d16,#2
- vmov d5,d16
- vshl.u32 d8,d18,#2
- vmov d7,d18
- vadd.i32 d2,d2,d12
- vadd.i32 d4,d4,d14
- vadd.i32 d6,d6,d16
- vadd.i32 d8,d8,d18
-
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
- vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
- vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vst1.32 {d8[0]},[r6]
- vst1.32 {d8[1]},[r7]
-
-.Lno_init_neon:
- bx lr @ bx lr
-.size poly1305_init_neon,.-poly1305_init_neon
-
-.type poly1305_blocks_neon,%function
-.align 5
-poly1305_blocks_neon:
-.Lpoly1305_blocks_neon:
- ldr ip,[r0,#36] @ is_base2_26
-
- cmp r2,#64
- blo .Lpoly1305_blocks
-
- stmdb sp!,{r4-r7}
- vstmdb sp!,{d8-d15} @ ABI specification says so
-
- tst ip,ip @ is_base2_26?
- bne .Lbase2_26_neon
-
- stmdb sp!,{r1-r3,lr}
- bl .Lpoly1305_init_neon
-
- ldr r4,[r0,#0] @ load hash value base 2^32
- ldr r5,[r0,#4]
- ldr r6,[r0,#8]
- ldr r7,[r0,#12]
- ldr ip,[r0,#16]
-
- and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
- mov r3,r4,lsr#26
- veor d10,d10,d10
- mov r4,r5,lsr#20
- orr r3,r3,r5,lsl#6
- veor d12,d12,d12
- mov r5,r6,lsr#14
- orr r4,r4,r6,lsl#12
- veor d14,d14,d14
- mov r6,r7,lsr#8
- orr r5,r5,r7,lsl#18
- veor d16,d16,d16
- and r3,r3,#0x03ffffff
- orr r6,r6,ip,lsl#24
- veor d18,d18,d18
- and r4,r4,#0x03ffffff
- mov r1,#1
- and r5,r5,#0x03ffffff
- str r1,[r0,#36] @ set is_base2_26
-
- vmov.32 d10[0],r2
- vmov.32 d12[0],r3
- vmov.32 d14[0],r4
- vmov.32 d16[0],r5
- vmov.32 d18[0],r6
- adr r5,.Lzeros
-
- ldmia sp!,{r1-r3,lr}
- b .Lhash_loaded
-
-.align 4
-.Lbase2_26_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ load hash value
-
- veor d10,d10,d10
- veor d12,d12,d12
- veor d14,d14,d14
- veor d16,d16,d16
- veor d18,d18,d18
- vld4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
- adr r5,.Lzeros
- vld1.32 {d18[0]},[r0]
- sub r0,r0,#16 @ rewind
-
-.Lhash_loaded:
- add r4,r1,#32
- mov r3,r3,lsl#24
- tst r2,#31
- beq .Leven
-
- vld4.32 {d20[0],d22[0],d24[0],d26[0]},[r1]!
- vmov.32 d28[0],r3
- sub r2,r2,#16
- add r4,r1,#32
-
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q13,q13
- vrev32.8 q11,q11
- vrev32.8 q12,q12
-# endif
- vsri.u32 d28,d26,#8 @ base 2^32 -> base 2^26
- vshl.u32 d26,d26,#18
-
- vsri.u32 d26,d24,#14
- vshl.u32 d24,d24,#12
- vadd.i32 d29,d28,d18 @ add hash value and move to #hi
-
- vbic.i32 d26,#0xfc000000
- vsri.u32 d24,d22,#20
- vshl.u32 d22,d22,#6
-
- vbic.i32 d24,#0xfc000000
- vsri.u32 d22,d20,#26
- vadd.i32 d27,d26,d16
-
- vbic.i32 d20,#0xfc000000
- vbic.i32 d22,#0xfc000000
- vadd.i32 d25,d24,d14
-
- vadd.i32 d21,d20,d10
- vadd.i32 d23,d22,d12
-
- mov r7,r5
- add r6,r0,#48
-
- cmp r2,r2
- b .Long_tail
-
-.align 4
-.Leven:
- subs r2,r2,#64
- it lo
- movlo r4,r5
-
- vmov.i32 q14,#1<<24 @ padbit, yes, always
- vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
- add r1,r1,#64
- vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
- add r4,r4,#64
- itt hi
- addhi r7,r0,#(48+1*9*4)
- addhi r6,r0,#(48+3*9*4)
-
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q13,q13
- vrev32.8 q11,q11
- vrev32.8 q12,q12
-# endif
- vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
- vshl.u32 q13,q13,#18
-
- vsri.u32 q13,q12,#14
- vshl.u32 q12,q12,#12
-
- vbic.i32 q13,#0xfc000000
- vsri.u32 q12,q11,#20
- vshl.u32 q11,q11,#6
-
- vbic.i32 q12,#0xfc000000
- vsri.u32 q11,q10,#26
-
- vbic.i32 q10,#0xfc000000
- vbic.i32 q11,#0xfc000000
-
- bls .Lskip_loop
-
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^2
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- b .Loop_neon
-
-.align 5
-.Loop_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
- @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
- @ ___________________/
- @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
- @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
- @ ___________________/ ____________________/
- @
- @ Note that we start with inp[2:3]*r^2. This is because it
- @ doesn't depend on reduction in previous iteration.
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
- @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
- @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
- @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
- @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ inp[2:3]*r^2
-
- vadd.i32 d24,d24,d14 @ accumulate inp[0:1]
- vmull.u32 q7,d25,d0[1]
- vadd.i32 d20,d20,d10
- vmull.u32 q5,d21,d0[1]
- vadd.i32 d26,d26,d16
- vmull.u32 q8,d27,d0[1]
- vmlal.u32 q7,d23,d1[1]
- vadd.i32 d22,d22,d12
- vmull.u32 q6,d23,d0[1]
-
- vadd.i32 d28,d28,d18
- vmull.u32 q9,d29,d0[1]
- subs r2,r2,#64
- vmlal.u32 q5,d29,d2[1]
- it lo
- movlo r4,r5
- vmlal.u32 q8,d25,d1[1]
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q6,d21,d1[1]
- vmlal.u32 q9,d27,d1[1]
-
- vmlal.u32 q5,d27,d4[1]
- vmlal.u32 q8,d23,d3[1]
- vmlal.u32 q9,d25,d3[1]
- vmlal.u32 q6,d29,d4[1]
- vmlal.u32 q7,d21,d3[1]
-
- vmlal.u32 q8,d21,d5[1]
- vmlal.u32 q5,d25,d6[1]
- vmlal.u32 q9,d23,d5[1]
- vmlal.u32 q6,d27,d6[1]
- vmlal.u32 q7,d29,d6[1]
-
- vmlal.u32 q8,d29,d8[1]
- vmlal.u32 q5,d23,d8[1]
- vmlal.u32 q9,d21,d7[1]
- vmlal.u32 q6,d25,d8[1]
- vmlal.u32 q7,d27,d8[1]
-
- vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
- add r4,r4,#64
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ (hash+inp[0:1])*r^4 and accumulate
-
- vmlal.u32 q8,d26,d0[0]
- vmlal.u32 q5,d20,d0[0]
- vmlal.u32 q9,d28,d0[0]
- vmlal.u32 q6,d22,d0[0]
- vmlal.u32 q7,d24,d0[0]
- vld1.32 d8[0],[r6,:32]
-
- vmlal.u32 q8,d24,d1[0]
- vmlal.u32 q5,d28,d2[0]
- vmlal.u32 q9,d26,d1[0]
- vmlal.u32 q6,d20,d1[0]
- vmlal.u32 q7,d22,d1[0]
-
- vmlal.u32 q8,d22,d3[0]
- vmlal.u32 q5,d26,d4[0]
- vmlal.u32 q9,d24,d3[0]
- vmlal.u32 q6,d28,d4[0]
- vmlal.u32 q7,d20,d3[0]
-
- vmlal.u32 q8,d20,d5[0]
- vmlal.u32 q5,d24,d6[0]
- vmlal.u32 q9,d22,d5[0]
- vmlal.u32 q6,d26,d6[0]
- vmlal.u32 q8,d28,d8[0]
-
- vmlal.u32 q7,d28,d6[0]
- vmlal.u32 q5,d22,d8[0]
- vmlal.u32 q9,d20,d7[0]
- vmov.i32 q14,#1<<24 @ padbit, yes, always
- vmlal.u32 q6,d24,d8[0]
- vmlal.u32 q7,d26,d8[0]
-
- vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
- add r1,r1,#64
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q11,q11
- vrev32.8 q12,q12
- vrev32.8 q13,q13
-# endif
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction interleaved with base 2^32 -> base 2^26 of
- @ inp[0:3] previously loaded to q10-q13 and smashed to q10-q14.
-
- vshr.u64 q15,q8,#26
- vmovn.i64 d16,q8
- vshr.u64 q4,q5,#26
- vmovn.i64 d10,q5
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vbic.i32 d16,#0xfc000000
- vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
- vadd.i64 q6,q6,q4 @ h0 -> h1
- vshl.u32 q13,q13,#18
- vbic.i32 d10,#0xfc000000
-
- vshrn.u64 d30,q9,#26
- vmovn.i64 d18,q9
- vshr.u64 q4,q6,#26
- vmovn.i64 d12,q6
- vadd.i64 q7,q7,q4 @ h1 -> h2
- vsri.u32 q13,q12,#14
- vbic.i32 d18,#0xfc000000
- vshl.u32 q12,q12,#12
- vbic.i32 d12,#0xfc000000
-
- vadd.i32 d10,d10,d30
- vshl.u32 d30,d30,#2
- vbic.i32 q13,#0xfc000000
- vshrn.u64 d8,q7,#26
- vmovn.i64 d14,q7
- vaddl.u32 q5,d10,d30 @ h4 -> h0 [widen for a sec]
- vsri.u32 q12,q11,#20
- vadd.i32 d16,d16,d8 @ h2 -> h3
- vshl.u32 q11,q11,#6
- vbic.i32 d14,#0xfc000000
- vbic.i32 q12,#0xfc000000
-
- vshrn.u64 d30,q5,#26 @ re-narrow
- vmovn.i64 d10,q5
- vsri.u32 q11,q10,#26
- vbic.i32 q10,#0xfc000000
- vshr.u32 d8,d16,#26
- vbic.i32 d16,#0xfc000000
- vbic.i32 d10,#0xfc000000
- vadd.i32 d12,d12,d30 @ h0 -> h1
- vadd.i32 d18,d18,d8 @ h3 -> h4
- vbic.i32 q11,#0xfc000000
-
- bhi .Loop_neon
-
-.Lskip_loop:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
-
- add r7,r0,#(48+0*9*4)
- add r6,r0,#(48+1*9*4)
- adds r2,r2,#32
- it ne
- movne r2,#0
- bne .Long_tail
-
- vadd.i32 d25,d24,d14 @ add hash value and move to #hi
- vadd.i32 d21,d20,d10
- vadd.i32 d27,d26,d16
- vadd.i32 d23,d22,d12
- vadd.i32 d29,d28,d18
-
-.Long_tail:
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^1
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^2
-
- vadd.i32 d24,d24,d14 @ can be redundant
- vmull.u32 q7,d25,d0
- vadd.i32 d20,d20,d10
- vmull.u32 q5,d21,d0
- vadd.i32 d26,d26,d16
- vmull.u32 q8,d27,d0
- vadd.i32 d22,d22,d12
- vmull.u32 q6,d23,d0
- vadd.i32 d28,d28,d18
- vmull.u32 q9,d29,d0
-
- vmlal.u32 q5,d29,d2
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vmlal.u32 q8,d25,d1
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vmlal.u32 q6,d21,d1
- vmlal.u32 q9,d27,d1
- vmlal.u32 q7,d23,d1
-
- vmlal.u32 q8,d23,d3
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q5,d27,d4
- vld1.32 d8[0],[r6,:32]
- vmlal.u32 q9,d25,d3
- vmlal.u32 q6,d29,d4
- vmlal.u32 q7,d21,d3
-
- vmlal.u32 q8,d21,d5
- it ne
- addne r7,r0,#(48+2*9*4)
- vmlal.u32 q5,d25,d6
- it ne
- addne r6,r0,#(48+3*9*4)
- vmlal.u32 q9,d23,d5
- vmlal.u32 q6,d27,d6
- vmlal.u32 q7,d29,d6
-
- vmlal.u32 q8,d29,d8
- vorn q0,q0,q0 @ all-ones, can be redundant
- vmlal.u32 q5,d23,d8
- vshr.u64 q0,q0,#38
- vmlal.u32 q9,d21,d7
- vmlal.u32 q6,d25,d8
- vmlal.u32 q7,d27,d8
-
- beq .Lshort_tail
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ (hash+inp[0:1])*r^4:r^3 and accumulate
-
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^3
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
-
- vmlal.u32 q7,d24,d0
- vmlal.u32 q5,d20,d0
- vmlal.u32 q8,d26,d0
- vmlal.u32 q6,d22,d0
- vmlal.u32 q9,d28,d0
-
- vmlal.u32 q5,d28,d2
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vmlal.u32 q8,d24,d1
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vmlal.u32 q6,d20,d1
- vmlal.u32 q9,d26,d1
- vmlal.u32 q7,d22,d1
-
- vmlal.u32 q8,d22,d3
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q5,d26,d4
- vld1.32 d8[0],[r6,:32]
- vmlal.u32 q9,d24,d3
- vmlal.u32 q6,d28,d4
- vmlal.u32 q7,d20,d3
-
- vmlal.u32 q8,d20,d5
- vmlal.u32 q5,d24,d6
- vmlal.u32 q9,d22,d5
- vmlal.u32 q6,d26,d6
- vmlal.u32 q7,d28,d6
-
- vmlal.u32 q8,d28,d8
- vorn q0,q0,q0 @ all-ones
- vmlal.u32 q5,d22,d8
- vshr.u64 q0,q0,#38
- vmlal.u32 q9,d20,d7
- vmlal.u32 q6,d24,d8
- vmlal.u32 q7,d26,d8
-
-.Lshort_tail:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ horizontal addition
-
- vadd.i64 d16,d16,d17
- vadd.i64 d10,d10,d11
- vadd.i64 d18,d18,d19
- vadd.i64 d12,d12,d13
- vadd.i64 d14,d14,d15
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction, but without narrowing
-
- vshr.u64 q15,q8,#26
- vand.i64 q8,q8,q0
- vshr.u64 q4,q5,#26
- vand.i64 q5,q5,q0
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vadd.i64 q6,q6,q4 @ h0 -> h1
-
- vshr.u64 q15,q9,#26
- vand.i64 q9,q9,q0
- vshr.u64 q4,q6,#26
- vand.i64 q6,q6,q0
- vadd.i64 q7,q7,q4 @ h1 -> h2
-
- vadd.i64 q5,q5,q15
- vshl.u64 q15,q15,#2
- vshr.u64 q4,q7,#26
- vand.i64 q7,q7,q0
- vadd.i64 q5,q5,q15 @ h4 -> h0
- vadd.i64 q8,q8,q4 @ h2 -> h3
-
- vshr.u64 q15,q5,#26
- vand.i64 q5,q5,q0
- vshr.u64 q4,q8,#26
- vand.i64 q8,q8,q0
- vadd.i64 q6,q6,q15 @ h0 -> h1
- vadd.i64 q9,q9,q4 @ h3 -> h4
-
- cmp r2,#0
- bne .Leven
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ store hash value
-
- vst4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
- vst1.32 {d18[0]},[r0]
-
- vldmia sp!,{d8-d15} @ epilogue
- ldmia sp!,{r4-r7}
- bx lr @ bx lr
-.size poly1305_blocks_neon,.-poly1305_blocks_neon
-
-.align 5
-.Lzeros:
-.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-#ifndef __KERNEL__
-.LOPENSSL_armcap:
-# ifdef _WIN32
-.word OPENSSL_armcap_P
-# else
-.word OPENSSL_armcap_P-.Lpoly1305_init
-# endif
-.comm OPENSSL_armcap_P,4,4
-.hidden OPENSSL_armcap_P
-#endif
-#endif
-.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by @dot-asm"
-.align 2
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
deleted file mode 100644
index 6363014a50d7..000000000000
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ /dev/null
@@ -1,2816 +0,0 @@
-@ SPDX-License-Identifier: GPL-2.0
-
-@ This code is taken from the OpenSSL project but the author (Andy Polyakov)
-@ has relicensed it under the GPLv2. Therefore this program is free software;
-@ you can redistribute it and/or modify it under the terms of the GNU General
-@ Public License version 2 as published by the Free Software Foundation.
-@
-@ The original headers, including the original license headers, are
-@ included below for completeness.
-
-@ ====================================================================
-@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-@ project. The module is, however, dual licensed under OpenSSL and
-@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see https://www.openssl.org/~appro/cryptogams/.
-@ ====================================================================
-
-@ SHA256 block procedure for ARMv4. May 2007.
-
-@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
-@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
-@ byte [on single-issue Xscale PXA250 core].
-
-@ July 2010.
-@
-@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
-@ Cortex A8 core and ~20 cycles per processed byte.
-
-@ February 2011.
-@
-@ Profiler-assisted and platform-specific optimization resulted in 16%
-@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
-
-@ September 2013.
-@
-@ Add NEON implementation. On Cortex A8 it was measured to process one
-@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
-@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
-@ code (meaning that latter performs sub-optimally, nothing was done
-@ about it).
-
-@ May 2014.
-@
-@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ 7
-#endif
-
-.text
-#if __ARM_ARCH__<7
-.code 32
-#else
-.syntax unified
-# ifdef __thumb2__
-.thumb
-# else
-.code 32
-# endif
-#endif
-
-.type K256,%object
-.align 5
-K256:
-.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
-.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
-.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
-.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
-.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
-.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
-.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
-.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
-.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
-.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
-.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
-.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
-.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
-.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
-.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
-.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
-.size K256,.-K256
-.word 0 @ terminator
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha256_block_data_order
-#endif
-.align 5
-
-.global sha256_block_data_order
-.type sha256_block_data_order,%function
-sha256_block_data_order:
-.Lsha256_block_data_order:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha256_block_data_order
-#else
- adr r3,.Lsha256_block_data_order
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- ldr r12,.LOPENSSL_armcap
- ldr r12,[r3,r12] @ OPENSSL_armcap_P
- tst r12,#ARMV8_SHA256
- bne .LARMv8
- tst r12,#ARMV7_NEON
- bne .LNEON
-#endif
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
- stmdb sp!,{r0,r1,r2,r4-r11,lr}
- ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
- sub r14,r3,#256+32 @ K256
- sub sp,sp,#16*4 @ alloca(X[16])
-.Loop:
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ magic
- eor r12,r12,r12
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 0
-# if 0==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 0
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 0==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#0*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 0==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 0<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#2*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#15*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 1
-# if 1==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 1
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 1==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#1*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 1==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 1<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#3*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#0*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 2
-# if 2==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 2
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 2==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#2*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 2==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 2<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#4*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#1*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 3
-# if 3==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 3
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 3==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#3*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 3==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 3<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#5*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#2*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 4
-# if 4==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 4
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 4==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#4*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 4==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 4<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#6*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#3*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 5
-# if 5==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 5==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#5*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 5==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 5<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#7*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#4*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 6
-# if 6==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 6
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 6==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#6*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 6==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 6<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#8*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#5*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 7
-# if 7==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 7==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#7*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 7==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 7<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#9*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#6*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 8
-# if 8==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 8
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 8==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#8*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 8==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 8<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#10*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#7*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 9
-# if 9==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 9
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 9==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#9*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 9==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 9<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#11*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#8*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 10
-# if 10==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 10
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 10==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#10*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 10==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 10<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#12*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#9*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 11
-# if 11==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 11
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 11==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#11*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 11==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 11<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#13*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#10*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 12
-# if 12==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 12
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 12==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#12*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 12==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 12<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#14*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#11*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 13
-# if 13==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 13
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 13==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#13*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 13==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 13<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#15*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#12*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 14
-# if 14==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 14
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 14==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#14*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 14==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 14<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#0*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#13*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 15
-# if 15==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 15
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 15==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#15*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 15==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 15<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#1*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#14*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-.Lrounds_16_xx:
- @ ldr r2,[sp,#1*4] @ 16
- @ ldr r1,[sp,#14*4]
- mov r0,r2,ror#7
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#0*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#9*4]
-
- add r12,r12,r0
- eor r0,r8,r8,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r8,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#0*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 16==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 16<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#2*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#15*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#2*4] @ 17
- @ ldr r1,[sp,#15*4]
- mov r0,r2,ror#7
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#1*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#10*4]
-
- add r3,r3,r0
- eor r0,r7,r7,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r7,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#1*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 17==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 17<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#3*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#0*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#3*4] @ 18
- @ ldr r1,[sp,#0*4]
- mov r0,r2,ror#7
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#2*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#11*4]
-
- add r12,r12,r0
- eor r0,r6,r6,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r6,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#2*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 18==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 18<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#4*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#1*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#4*4] @ 19
- @ ldr r1,[sp,#1*4]
- mov r0,r2,ror#7
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#3*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#12*4]
-
- add r3,r3,r0
- eor r0,r5,r5,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r5,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#3*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 19==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 19<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#5*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#2*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#5*4] @ 20
- @ ldr r1,[sp,#2*4]
- mov r0,r2,ror#7
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#4*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#13*4]
-
- add r12,r12,r0
- eor r0,r4,r4,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r4,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#4*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 20==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 20<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#6*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#3*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#6*4] @ 21
- @ ldr r1,[sp,#3*4]
- mov r0,r2,ror#7
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#5*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#14*4]
-
- add r3,r3,r0
- eor r0,r11,r11,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r11,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#5*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 21==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 21<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#7*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#4*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#7*4] @ 22
- @ ldr r1,[sp,#4*4]
- mov r0,r2,ror#7
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#6*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#15*4]
-
- add r12,r12,r0
- eor r0,r10,r10,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r10,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#6*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 22==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 22<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#8*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#5*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#8*4] @ 23
- @ ldr r1,[sp,#5*4]
- mov r0,r2,ror#7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#7*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#0*4]
-
- add r3,r3,r0
- eor r0,r9,r9,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r9,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#7*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 23==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 23<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#9*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#6*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#9*4] @ 24
- @ ldr r1,[sp,#6*4]
- mov r0,r2,ror#7
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#8*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#1*4]
-
- add r12,r12,r0
- eor r0,r8,r8,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r8,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#8*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 24==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 24<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#10*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#7*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#10*4] @ 25
- @ ldr r1,[sp,#7*4]
- mov r0,r2,ror#7
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#9*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#2*4]
-
- add r3,r3,r0
- eor r0,r7,r7,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r7,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#9*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 25==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 25<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#11*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#8*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#11*4] @ 26
- @ ldr r1,[sp,#8*4]
- mov r0,r2,ror#7
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#10*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#3*4]
-
- add r12,r12,r0
- eor r0,r6,r6,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r6,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#10*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 26==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 26<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#12*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#9*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#12*4] @ 27
- @ ldr r1,[sp,#9*4]
- mov r0,r2,ror#7
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#11*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#4*4]
-
- add r3,r3,r0
- eor r0,r5,r5,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r5,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#11*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 27==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 27<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#13*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#10*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#13*4] @ 28
- @ ldr r1,[sp,#10*4]
- mov r0,r2,ror#7
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#12*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#5*4]
-
- add r12,r12,r0
- eor r0,r4,r4,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r4,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#12*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 28==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 28<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#14*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#11*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#14*4] @ 29
- @ ldr r1,[sp,#11*4]
- mov r0,r2,ror#7
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#13*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#6*4]
-
- add r3,r3,r0
- eor r0,r11,r11,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r11,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#13*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 29==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 29<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#15*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#12*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#15*4] @ 30
- @ ldr r1,[sp,#12*4]
- mov r0,r2,ror#7
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#14*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#7*4]
-
- add r12,r12,r0
- eor r0,r10,r10,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r10,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#14*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 30==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 30<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#0*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#13*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#0*4] @ 31
- @ ldr r1,[sp,#13*4]
- mov r0,r2,ror#7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#15*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#8*4]
-
- add r3,r3,r0
- eor r0,r9,r9,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r9,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#15*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 31==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 31<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#1*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#14*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- ite eq @ Thumb2 thing, sanity check in ARM
-#endif
- ldreq r3,[sp,#16*4] @ pull ctx
- bne .Lrounds_16_xx
-
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldr r0,[r3,#0]
- ldr r2,[r3,#4]
- ldr r12,[r3,#8]
- add r4,r4,r0
- ldr r0,[r3,#12]
- add r5,r5,r2
- ldr r2,[r3,#16]
- add r6,r6,r12
- ldr r12,[r3,#20]
- add r7,r7,r0
- ldr r0,[r3,#24]
- add r8,r8,r2
- ldr r2,[r3,#28]
- add r9,r9,r12
- ldr r1,[sp,#17*4] @ pull inp
- ldr r12,[sp,#18*4] @ pull inp+len
- add r10,r10,r0
- add r11,r11,r2
- stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
- cmp r1,r12
- sub r14,r14,#256 @ rewind Ktbl
- bne .Loop
-
- add sp,sp,#19*4 @ destroy frame
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r11,pc}
-#else
- ldmia sp!,{r4-r11,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size sha256_block_data_order,.-sha256_block_data_order
-#if __ARM_MAX_ARCH__>=7
-.arch armv7-a
-.fpu neon
-
-.global sha256_block_data_order_neon
-.type sha256_block_data_order_neon,%function
-.align 4
-sha256_block_data_order_neon:
-.LNEON:
- stmdb sp!,{r4-r12,lr}
-
- sub r11,sp,#16*4+16
- adr r14,.Lsha256_block_data_order
- sub r14,r14,#.Lsha256_block_data_order-K256
- bic r11,r11,#15 @ align for 128-bit stores
- mov r12,sp
- mov sp,r11 @ alloca
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
-
- vld1.8 {q0},[r1]!
- vld1.8 {q1},[r1]!
- vld1.8 {q2},[r1]!
- vld1.8 {q3},[r1]!
- vld1.32 {q8},[r14,:128]!
- vld1.32 {q9},[r14,:128]!
- vld1.32 {q10},[r14,:128]!
- vld1.32 {q11},[r14,:128]!
- vrev32.8 q0,q0 @ yes, even on
- str r0,[sp,#64]
- vrev32.8 q1,q1 @ big-endian
- str r1,[sp,#68]
- mov r1,sp
- vrev32.8 q2,q2
- str r2,[sp,#72]
- vrev32.8 q3,q3
- str r12,[sp,#76] @ save original sp
- vadd.i32 q8,q8,q0
- vadd.i32 q9,q9,q1
- vst1.32 {q8},[r1,:128]!
- vadd.i32 q10,q10,q2
- vst1.32 {q9},[r1,:128]!
- vadd.i32 q11,q11,q3
- vst1.32 {q10},[r1,:128]!
- vst1.32 {q11},[r1,:128]!
-
- ldmia r0,{r4-r11}
- sub r1,r1,#64
- ldr r2,[sp,#0]
- eor r12,r12,r12
- eor r3,r5,r6
- b .L_00_48
-
-.align 4
-.L_00_48:
- vext.8 q8,q0,q1,#4
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- vext.8 q9,q2,q3,#4
- add r4,r4,r12
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vadd.i32 q0,q0,q9
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- vshr.u32 q9,q8,#3
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#4]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- veor q9,q9,q10
- add r10,r10,r2
- vsli.32 q11,q8,#14
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- vshr.u32 d24,d7,#17
- add r11,r11,r3
- and r2,r2,r7
- veor q9,q9,q11
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- vsli.32 d24,d7,#15
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- vshr.u32 d25,d7,#10
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- vadd.i32 q0,q0,q9
- add r10,r10,r2
- ldr r2,[sp,#8]
- veor d25,d25,d24
- and r12,r12,r3
- add r6,r6,r10
- vshr.u32 d24,d7,#19
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- vsli.32 d24,d7,#13
- add r9,r9,r2
- eor r2,r7,r8
- veor d25,d25,d24
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- vadd.i32 d0,d0,d25
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- vshr.u32 d24,d0,#17
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- vsli.32 d24,d0,#15
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- vshr.u32 d25,d0,#10
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- veor d25,d25,d24
- ldr r2,[sp,#12]
- and r3,r3,r12
- vshr.u32 d24,d0,#19
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- vld1.32 {q8},[r14,:128]!
- add r8,r8,r2
- vsli.32 d24,d0,#13
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- veor d25,d25,d24
- add r9,r9,r3
- and r2,r2,r5
- vadd.i32 d1,d1,d25
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- vadd.i32 q8,q8,q0
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#16]
- and r12,r12,r3
- add r4,r4,r8
- vst1.32 {q8},[r1,:128]!
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vext.8 q8,q1,q2,#4
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- vext.8 q9,q3,q0,#4
- add r8,r8,r12
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vadd.i32 q1,q1,q9
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- vshr.u32 q9,q8,#3
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#20]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- veor q9,q9,q10
- add r6,r6,r2
- vsli.32 q11,q8,#14
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- vshr.u32 d24,d1,#17
- add r7,r7,r3
- and r2,r2,r11
- veor q9,q9,q11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- vsli.32 d24,d1,#15
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- vshr.u32 d25,d1,#10
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- vadd.i32 q1,q1,q9
- add r6,r6,r2
- ldr r2,[sp,#24]
- veor d25,d25,d24
- and r12,r12,r3
- add r10,r10,r6
- vshr.u32 d24,d1,#19
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- vsli.32 d24,d1,#13
- add r5,r5,r2
- eor r2,r11,r4
- veor d25,d25,d24
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- vadd.i32 d2,d2,d25
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- vshr.u32 d24,d2,#17
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- vsli.32 d24,d2,#15
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- vshr.u32 d25,d2,#10
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- veor d25,d25,d24
- ldr r2,[sp,#28]
- and r3,r3,r12
- vshr.u32 d24,d2,#19
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- vld1.32 {q8},[r14,:128]!
- add r4,r4,r2
- vsli.32 d24,d2,#13
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- veor d25,d25,d24
- add r5,r5,r3
- and r2,r2,r9
- vadd.i32 d3,d3,d25
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- vadd.i32 q8,q8,q1
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#32]
- and r12,r12,r3
- add r8,r8,r4
- vst1.32 {q8},[r1,:128]!
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vext.8 q8,q2,q3,#4
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- vext.8 q9,q0,q1,#4
- add r4,r4,r12
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vadd.i32 q2,q2,q9
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- vshr.u32 q9,q8,#3
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#36]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- veor q9,q9,q10
- add r10,r10,r2
- vsli.32 q11,q8,#14
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- vshr.u32 d24,d3,#17
- add r11,r11,r3
- and r2,r2,r7
- veor q9,q9,q11
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- vsli.32 d24,d3,#15
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- vshr.u32 d25,d3,#10
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- vadd.i32 q2,q2,q9
- add r10,r10,r2
- ldr r2,[sp,#40]
- veor d25,d25,d24
- and r12,r12,r3
- add r6,r6,r10
- vshr.u32 d24,d3,#19
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- vsli.32 d24,d3,#13
- add r9,r9,r2
- eor r2,r7,r8
- veor d25,d25,d24
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- vadd.i32 d4,d4,d25
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- vshr.u32 d24,d4,#17
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- vsli.32 d24,d4,#15
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- vshr.u32 d25,d4,#10
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- veor d25,d25,d24
- ldr r2,[sp,#44]
- and r3,r3,r12
- vshr.u32 d24,d4,#19
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- vld1.32 {q8},[r14,:128]!
- add r8,r8,r2
- vsli.32 d24,d4,#13
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- veor d25,d25,d24
- add r9,r9,r3
- and r2,r2,r5
- vadd.i32 d5,d5,d25
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- vadd.i32 q8,q8,q2
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#48]
- and r12,r12,r3
- add r4,r4,r8
- vst1.32 {q8},[r1,:128]!
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vext.8 q8,q3,q0,#4
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- vext.8 q9,q1,q2,#4
- add r8,r8,r12
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vadd.i32 q3,q3,q9
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- vshr.u32 q9,q8,#3
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#52]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- veor q9,q9,q10
- add r6,r6,r2
- vsli.32 q11,q8,#14
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- vshr.u32 d24,d5,#17
- add r7,r7,r3
- and r2,r2,r11
- veor q9,q9,q11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- vsli.32 d24,d5,#15
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- vshr.u32 d25,d5,#10
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- vadd.i32 q3,q3,q9
- add r6,r6,r2
- ldr r2,[sp,#56]
- veor d25,d25,d24
- and r12,r12,r3
- add r10,r10,r6
- vshr.u32 d24,d5,#19
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- vsli.32 d24,d5,#13
- add r5,r5,r2
- eor r2,r11,r4
- veor d25,d25,d24
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- vadd.i32 d6,d6,d25
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- vshr.u32 d24,d6,#17
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- vsli.32 d24,d6,#15
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- vshr.u32 d25,d6,#10
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- veor d25,d25,d24
- ldr r2,[sp,#60]
- and r3,r3,r12
- vshr.u32 d24,d6,#19
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- vld1.32 {q8},[r14,:128]!
- add r4,r4,r2
- vsli.32 d24,d6,#13
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- veor d25,d25,d24
- add r5,r5,r3
- and r2,r2,r9
- vadd.i32 d7,d7,d25
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- vadd.i32 q8,q8,q3
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[r14]
- and r12,r12,r3
- add r8,r8,r4
- vst1.32 {q8},[r1,:128]!
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- teq r2,#0 @ check for K256 terminator
- ldr r2,[sp,#0]
- sub r1,r1,#64
- bne .L_00_48
-
- ldr r1,[sp,#68]
- ldr r0,[sp,#72]
- sub r14,r14,#256 @ rewind r14
- teq r1,r0
- it eq
- subeq r1,r1,#64 @ avoid SEGV
- vld1.8 {q0},[r1]! @ load next input block
- vld1.8 {q1},[r1]!
- vld1.8 {q2},[r1]!
- vld1.8 {q3},[r1]!
- it ne
- strne r1,[sp,#68]
- mov r1,sp
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- add r4,r4,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vrev32.8 q0,q0
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vadd.i32 q8,q8,q0
- ldr r2,[sp,#4]
- and r3,r3,r12
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- add r10,r10,r2
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- add r11,r11,r3
- and r2,r2,r7
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- add r10,r10,r2
- ldr r2,[sp,#8]
- and r12,r12,r3
- add r6,r6,r10
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- add r9,r9,r2
- eor r2,r7,r8
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- ldr r2,[sp,#12]
- and r3,r3,r12
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- add r8,r8,r2
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- add r9,r9,r3
- and r2,r2,r5
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#16]
- and r12,r12,r3
- add r4,r4,r8
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vst1.32 {q8},[r1,:128]!
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- add r8,r8,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vrev32.8 q1,q1
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vadd.i32 q8,q8,q1
- ldr r2,[sp,#20]
- and r3,r3,r12
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- add r6,r6,r2
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- add r7,r7,r3
- and r2,r2,r11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- add r6,r6,r2
- ldr r2,[sp,#24]
- and r12,r12,r3
- add r10,r10,r6
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- add r5,r5,r2
- eor r2,r11,r4
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- ldr r2,[sp,#28]
- and r3,r3,r12
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- add r4,r4,r2
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- add r5,r5,r3
- and r2,r2,r9
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#32]
- and r12,r12,r3
- add r8,r8,r4
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vst1.32 {q8},[r1,:128]!
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- add r4,r4,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vrev32.8 q2,q2
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vadd.i32 q8,q8,q2
- ldr r2,[sp,#36]
- and r3,r3,r12
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- add r10,r10,r2
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- add r11,r11,r3
- and r2,r2,r7
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- add r10,r10,r2
- ldr r2,[sp,#40]
- and r12,r12,r3
- add r6,r6,r10
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- add r9,r9,r2
- eor r2,r7,r8
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- ldr r2,[sp,#44]
- and r3,r3,r12
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- add r8,r8,r2
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- add r9,r9,r3
- and r2,r2,r5
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#48]
- and r12,r12,r3
- add r4,r4,r8
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vst1.32 {q8},[r1,:128]!
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- add r8,r8,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vrev32.8 q3,q3
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vadd.i32 q8,q8,q3
- ldr r2,[sp,#52]
- and r3,r3,r12
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- add r6,r6,r2
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- add r7,r7,r3
- and r2,r2,r11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- add r6,r6,r2
- ldr r2,[sp,#56]
- and r12,r12,r3
- add r10,r10,r6
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- add r5,r5,r2
- eor r2,r11,r4
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- ldr r2,[sp,#60]
- and r3,r3,r12
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- add r4,r4,r2
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- add r5,r5,r3
- and r2,r2,r9
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#64]
- and r12,r12,r3
- add r8,r8,r4
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vst1.32 {q8},[r1,:128]!
- ldr r0,[r2,#0]
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldr r12,[r2,#4]
- ldr r3,[r2,#8]
- ldr r1,[r2,#12]
- add r4,r4,r0 @ accumulate
- ldr r0,[r2,#16]
- add r5,r5,r12
- ldr r12,[r2,#20]
- add r6,r6,r3
- ldr r3,[r2,#24]
- add r7,r7,r1
- ldr r1,[r2,#28]
- add r8,r8,r0
- str r4,[r2],#4
- add r9,r9,r12
- str r5,[r2],#4
- add r10,r10,r3
- str r6,[r2],#4
- add r11,r11,r1
- str r7,[r2],#4
- stmia r2,{r8-r11}
-
- ittte ne
- movne r1,sp
- ldrne r2,[sp,#0]
- eorne r12,r12,r12
- ldreq sp,[sp,#76] @ restore original sp
- itt ne
- eorne r3,r5,r6
- bne .L_00_48
-
- ldmia sp!,{r4-r12,pc}
-.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-
-# ifdef __thumb2__
-# define INST(a,b,c,d) .byte c,d|0xc,a,b
-# else
-# define INST(a,b,c,d) .byte a,b,c,d
-# endif
-
-.type sha256_block_data_order_armv8,%function
-.align 5
-sha256_block_data_order_armv8:
-.LARMv8:
- vld1.32 {q0,q1},[r0]
-# ifdef __thumb2__
- adr r3,.LARMv8
- sub r3,r3,#.LARMv8-K256
-# else
- adrl r3,K256
-# endif
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
-
-.Loop_v8:
- vld1.8 {q8-q9},[r1]!
- vld1.8 {q10-q11},[r1]!
- vld1.32 {q12},[r3]!
- vrev32.8 q8,q8
- vrev32.8 q9,q9
- vrev32.8 q10,q10
- vrev32.8 q11,q11
- vmov q14,q0 @ offload
- vmov q15,q1
- teq r1,r2
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
-
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
-
- vld1.32 {q13},[r3]
- vadd.i32 q12,q12,q10
- sub r3,r3,#256-16 @ rewind
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
-
- vadd.i32 q13,q13,q11
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
-
- vadd.i32 q0,q0,q14
- vadd.i32 q1,q1,q15
- it ne
- bne .Loop_v8
-
- vst1.32 {q0,q1},[r0]
-
- bx lr @ bx lr
-.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
-#endif
-.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
deleted file mode 100644
index 03014624f2ab..000000000000
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ /dev/null
@@ -1,1869 +0,0 @@
-@ SPDX-License-Identifier: GPL-2.0
-
-@ This code is taken from the OpenSSL project but the author (Andy Polyakov)
-@ has relicensed it under the GPLv2. Therefore this program is free software;
-@ you can redistribute it and/or modify it under the terms of the GNU General
-@ Public License version 2 as published by the Free Software Foundation.
-@
-@ The original headers, including the original license headers, are
-@ included below for completeness.
-
-@ ====================================================================
-@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-@ project. The module is, however, dual licensed under OpenSSL and
-@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see https://www.openssl.org/~appro/cryptogams/.
-@ ====================================================================
-
-@ SHA512 block procedure for ARMv4. September 2007.
-
-@ This code is ~4.5 (four and a half) times faster than code generated
-@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
-@ Xscale PXA250 core].
-@
-@ July 2010.
-@
-@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
-@ Cortex A8 core and ~40 cycles per processed byte.
-
-@ February 2011.
-@
-@ Profiler-assisted and platform-specific optimization resulted in 7%
-@ improvement on Coxtex A8 core and ~38 cycles per byte.
-
-@ March 2011.
-@
-@ Add NEON implementation. On Cortex A8 it was measured to process
-@ one byte in 23.3 cycles or ~60% faster than integer-only code.
-
-@ August 2012.
-@
-@ Improve NEON performance by 12% on Snapdragon S4. In absolute
-@ terms it's 22.6 cycles per byte, which is disappointing result.
-@ Technical writers asserted that 3-way S4 pipeline can sustain
-@ multiple NEON instructions per cycle, but dual NEON issue could
-@ not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html
-@ for further details. On side note Cortex-A15 processes one byte in
-@ 16 cycles.
-
-@ Byte order [in]dependence. =========================================
-@
-@ Originally caller was expected to maintain specific *dword* order in
-@ h[0-7], namely with most significant dword at *lower* address, which
-@ was reflected in below two parameters as 0 and 4. Now caller is
-@ expected to maintain native byte order for whole 64-bit values.
-#ifndef __KERNEL__
-# include "arm_arch.h"
-# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
-# define VFP_ABI_POP vldmia sp!,{d8-d15}
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ 7
-# define VFP_ABI_PUSH
-# define VFP_ABI_POP
-#endif
-
-#ifdef __ARMEL__
-# define LO 0
-# define HI 4
-# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
-#else
-# define HI 0
-# define LO 4
-# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
-#endif
-
-.text
-#if __ARM_ARCH__<7
-.code 32
-#else
-.syntax unified
-# ifdef __thumb2__
-.thumb
-# else
-.code 32
-# endif
-#endif
-
-.type K512,%object
-.align 5
-K512:
-WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
-WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
-WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
-WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
-WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
-WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
-WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
-WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
-WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
-WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
-WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
-WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
-WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
-WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
-WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
-WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
-WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
-WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
-WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
-WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
-WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
-WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
-WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
-WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
-WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
-WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
-WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
-WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
-WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
-WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
-WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
-WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
-WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
-WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
-WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
-WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
-WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
-WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
-WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
-WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
-.size K512,.-K512
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha512_block_data_order
-.skip 32-4
-#else
-.skip 32
-#endif
-
-.global sha512_block_data_order
-.type sha512_block_data_order,%function
-sha512_block_data_order:
-.Lsha512_block_data_order:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha512_block_data_order
-#else
- adr r3,.Lsha512_block_data_order
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- ldr r12,.LOPENSSL_armcap
- ldr r12,[r3,r12] @ OPENSSL_armcap_P
- tst r12,#1
- bne .LNEON
-#endif
- add r2,r1,r2,lsl#7 @ len to point at the end of inp
- stmdb sp!,{r4-r12,lr}
- sub r14,r3,#672 @ K512
- sub sp,sp,#9*8
-
- ldr r7,[r0,#32+LO]
- ldr r8,[r0,#32+HI]
- ldr r9, [r0,#48+LO]
- ldr r10, [r0,#48+HI]
- ldr r11, [r0,#56+LO]
- ldr r12, [r0,#56+HI]
-.Loop:
- str r9, [sp,#48+0]
- str r10, [sp,#48+4]
- str r11, [sp,#56+0]
- str r12, [sp,#56+4]
- ldr r5,[r0,#0+LO]
- ldr r6,[r0,#0+HI]
- ldr r3,[r0,#8+LO]
- ldr r4,[r0,#8+HI]
- ldr r9, [r0,#16+LO]
- ldr r10, [r0,#16+HI]
- ldr r11, [r0,#24+LO]
- ldr r12, [r0,#24+HI]
- str r3,[sp,#8+0]
- str r4,[sp,#8+4]
- str r9, [sp,#16+0]
- str r10, [sp,#16+4]
- str r11, [sp,#24+0]
- str r12, [sp,#24+4]
- ldr r3,[r0,#40+LO]
- ldr r4,[r0,#40+HI]
- str r3,[sp,#40+0]
- str r4,[sp,#40+4]
-
-.L00_15:
-#if __ARM_ARCH__<7
- ldrb r3,[r1,#7]
- ldrb r9, [r1,#6]
- ldrb r10, [r1,#5]
- ldrb r11, [r1,#4]
- ldrb r4,[r1,#3]
- ldrb r12, [r1,#2]
- orr r3,r3,r9,lsl#8
- ldrb r9, [r1,#1]
- orr r3,r3,r10,lsl#16
- ldrb r10, [r1],#8
- orr r3,r3,r11,lsl#24
- orr r4,r4,r12,lsl#8
- orr r4,r4,r9,lsl#16
- orr r4,r4,r10,lsl#24
-#else
- ldr r3,[r1,#4]
- ldr r4,[r1],#8
-#ifdef __ARMEL__
- rev r3,r3
- rev r4,r4
-#endif
-#endif
- @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
- @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
- @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
- mov r9,r7,lsr#14
- str r3,[sp,#64+0]
- mov r10,r8,lsr#14
- str r4,[sp,#64+4]
- eor r9,r9,r8,lsl#18
- ldr r11,[sp,#56+0] @ h.lo
- eor r10,r10,r7,lsl#18
- ldr r12,[sp,#56+4] @ h.hi
- eor r9,r9,r7,lsr#18
- eor r10,r10,r8,lsr#18
- eor r9,r9,r8,lsl#14
- eor r10,r10,r7,lsl#14
- eor r9,r9,r8,lsr#9
- eor r10,r10,r7,lsr#9
- eor r9,r9,r7,lsl#23
- eor r10,r10,r8,lsl#23 @ Sigma1(e)
- adds r3,r3,r9
- ldr r9,[sp,#40+0] @ f.lo
- adc r4,r4,r10 @ T += Sigma1(e)
- ldr r10,[sp,#40+4] @ f.hi
- adds r3,r3,r11
- ldr r11,[sp,#48+0] @ g.lo
- adc r4,r4,r12 @ T += h
- ldr r12,[sp,#48+4] @ g.hi
-
- eor r9,r9,r11
- str r7,[sp,#32+0]
- eor r10,r10,r12
- str r8,[sp,#32+4]
- and r9,r9,r7
- str r5,[sp,#0+0]
- and r10,r10,r8
- str r6,[sp,#0+4]
- eor r9,r9,r11
- ldr r11,[r14,#LO] @ K[i].lo
- eor r10,r10,r12 @ Ch(e,f,g)
- ldr r12,[r14,#HI] @ K[i].hi
-
- adds r3,r3,r9
- ldr r7,[sp,#24+0] @ d.lo
- adc r4,r4,r10 @ T += Ch(e,f,g)
- ldr r8,[sp,#24+4] @ d.hi
- adds r3,r3,r11
- and r9,r11,#0xff
- adc r4,r4,r12 @ T += K[i]
- adds r7,r7,r3
- ldr r11,[sp,#8+0] @ b.lo
- adc r8,r8,r4 @ d += T
- teq r9,#148
-
- ldr r12,[sp,#16+0] @ c.lo
-#if __ARM_ARCH__>=7
- it eq @ Thumb2 thing, sanity check in ARM
-#endif
- orreq r14,r14,#1
- @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
- @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
- @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
- mov r9,r5,lsr#28
- mov r10,r6,lsr#28
- eor r9,r9,r6,lsl#4
- eor r10,r10,r5,lsl#4
- eor r9,r9,r6,lsr#2
- eor r10,r10,r5,lsr#2
- eor r9,r9,r5,lsl#30
- eor r10,r10,r6,lsl#30
- eor r9,r9,r6,lsr#7
- eor r10,r10,r5,lsr#7
- eor r9,r9,r5,lsl#25
- eor r10,r10,r6,lsl#25 @ Sigma0(a)
- adds r3,r3,r9
- and r9,r5,r11
- adc r4,r4,r10 @ T += Sigma0(a)
-
- ldr r10,[sp,#8+4] @ b.hi
- orr r5,r5,r11
- ldr r11,[sp,#16+4] @ c.hi
- and r5,r5,r12
- and r12,r6,r10
- orr r6,r6,r10
- orr r5,r5,r9 @ Maj(a,b,c).lo
- and r6,r6,r11
- adds r5,r5,r3
- orr r6,r6,r12 @ Maj(a,b,c).hi
- sub sp,sp,#8
- adc r6,r6,r4 @ h += T
- tst r14,#1
- add r14,r14,#8
- tst r14,#1
- beq .L00_15
- ldr r9,[sp,#184+0]
- ldr r10,[sp,#184+4]
- bic r14,r14,#1
-.L16_79:
- @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
- @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
- @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
- mov r3,r9,lsr#1
- ldr r11,[sp,#80+0]
- mov r4,r10,lsr#1
- ldr r12,[sp,#80+4]
- eor r3,r3,r10,lsl#31
- eor r4,r4,r9,lsl#31
- eor r3,r3,r9,lsr#8
- eor r4,r4,r10,lsr#8
- eor r3,r3,r10,lsl#24
- eor r4,r4,r9,lsl#24
- eor r3,r3,r9,lsr#7
- eor r4,r4,r10,lsr#7
- eor r3,r3,r10,lsl#25
-
- @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
- @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
- @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
- mov r9,r11,lsr#19
- mov r10,r12,lsr#19
- eor r9,r9,r12,lsl#13
- eor r10,r10,r11,lsl#13
- eor r9,r9,r12,lsr#29
- eor r10,r10,r11,lsr#29
- eor r9,r9,r11,lsl#3
- eor r10,r10,r12,lsl#3
- eor r9,r9,r11,lsr#6
- eor r10,r10,r12,lsr#6
- ldr r11,[sp,#120+0]
- eor r9,r9,r12,lsl#26
-
- ldr r12,[sp,#120+4]
- adds r3,r3,r9
- ldr r9,[sp,#192+0]
- adc r4,r4,r10
-
- ldr r10,[sp,#192+4]
- adds r3,r3,r11
- adc r4,r4,r12
- adds r3,r3,r9
- adc r4,r4,r10
- @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
- @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
- @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
- mov r9,r7,lsr#14
- str r3,[sp,#64+0]
- mov r10,r8,lsr#14
- str r4,[sp,#64+4]
- eor r9,r9,r8,lsl#18
- ldr r11,[sp,#56+0] @ h.lo
- eor r10,r10,r7,lsl#18
- ldr r12,[sp,#56+4] @ h.hi
- eor r9,r9,r7,lsr#18
- eor r10,r10,r8,lsr#18
- eor r9,r9,r8,lsl#14
- eor r10,r10,r7,lsl#14
- eor r9,r9,r8,lsr#9
- eor r10,r10,r7,lsr#9
- eor r9,r9,r7,lsl#23
- eor r10,r10,r8,lsl#23 @ Sigma1(e)
- adds r3,r3,r9
- ldr r9,[sp,#40+0] @ f.lo
- adc r4,r4,r10 @ T += Sigma1(e)
- ldr r10,[sp,#40+4] @ f.hi
- adds r3,r3,r11
- ldr r11,[sp,#48+0] @ g.lo
- adc r4,r4,r12 @ T += h
- ldr r12,[sp,#48+4] @ g.hi
-
- eor r9,r9,r11
- str r7,[sp,#32+0]
- eor r10,r10,r12
- str r8,[sp,#32+4]
- and r9,r9,r7
- str r5,[sp,#0+0]
- and r10,r10,r8
- str r6,[sp,#0+4]
- eor r9,r9,r11
- ldr r11,[r14,#LO] @ K[i].lo
- eor r10,r10,r12 @ Ch(e,f,g)
- ldr r12,[r14,#HI] @ K[i].hi
-
- adds r3,r3,r9
- ldr r7,[sp,#24+0] @ d.lo
- adc r4,r4,r10 @ T += Ch(e,f,g)
- ldr r8,[sp,#24+4] @ d.hi
- adds r3,r3,r11
- and r9,r11,#0xff
- adc r4,r4,r12 @ T += K[i]
- adds r7,r7,r3
- ldr r11,[sp,#8+0] @ b.lo
- adc r8,r8,r4 @ d += T
- teq r9,#23
-
- ldr r12,[sp,#16+0] @ c.lo
-#if __ARM_ARCH__>=7
- it eq @ Thumb2 thing, sanity check in ARM
-#endif
- orreq r14,r14,#1
- @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
- @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
- @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
- mov r9,r5,lsr#28
- mov r10,r6,lsr#28
- eor r9,r9,r6,lsl#4
- eor r10,r10,r5,lsl#4
- eor r9,r9,r6,lsr#2
- eor r10,r10,r5,lsr#2
- eor r9,r9,r5,lsl#30
- eor r10,r10,r6,lsl#30
- eor r9,r9,r6,lsr#7
- eor r10,r10,r5,lsr#7
- eor r9,r9,r5,lsl#25
- eor r10,r10,r6,lsl#25 @ Sigma0(a)
- adds r3,r3,r9
- and r9,r5,r11
- adc r4,r4,r10 @ T += Sigma0(a)
-
- ldr r10,[sp,#8+4] @ b.hi
- orr r5,r5,r11
- ldr r11,[sp,#16+4] @ c.hi
- and r5,r5,r12
- and r12,r6,r10
- orr r6,r6,r10
- orr r5,r5,r9 @ Maj(a,b,c).lo
- and r6,r6,r11
- adds r5,r5,r3
- orr r6,r6,r12 @ Maj(a,b,c).hi
- sub sp,sp,#8
- adc r6,r6,r4 @ h += T
- tst r14,#1
- add r14,r14,#8
-#if __ARM_ARCH__>=7
- ittt eq @ Thumb2 thing, sanity check in ARM
-#endif
- ldreq r9,[sp,#184+0]
- ldreq r10,[sp,#184+4]
- beq .L16_79
- bic r14,r14,#1
-
- ldr r3,[sp,#8+0]
- ldr r4,[sp,#8+4]
- ldr r9, [r0,#0+LO]
- ldr r10, [r0,#0+HI]
- ldr r11, [r0,#8+LO]
- ldr r12, [r0,#8+HI]
- adds r9,r5,r9
- str r9, [r0,#0+LO]
- adc r10,r6,r10
- str r10, [r0,#0+HI]
- adds r11,r3,r11
- str r11, [r0,#8+LO]
- adc r12,r4,r12
- str r12, [r0,#8+HI]
-
- ldr r5,[sp,#16+0]
- ldr r6,[sp,#16+4]
- ldr r3,[sp,#24+0]
- ldr r4,[sp,#24+4]
- ldr r9, [r0,#16+LO]
- ldr r10, [r0,#16+HI]
- ldr r11, [r0,#24+LO]
- ldr r12, [r0,#24+HI]
- adds r9,r5,r9
- str r9, [r0,#16+LO]
- adc r10,r6,r10
- str r10, [r0,#16+HI]
- adds r11,r3,r11
- str r11, [r0,#24+LO]
- adc r12,r4,r12
- str r12, [r0,#24+HI]
-
- ldr r3,[sp,#40+0]
- ldr r4,[sp,#40+4]
- ldr r9, [r0,#32+LO]
- ldr r10, [r0,#32+HI]
- ldr r11, [r0,#40+LO]
- ldr r12, [r0,#40+HI]
- adds r7,r7,r9
- str r7,[r0,#32+LO]
- adc r8,r8,r10
- str r8,[r0,#32+HI]
- adds r11,r3,r11
- str r11, [r0,#40+LO]
- adc r12,r4,r12
- str r12, [r0,#40+HI]
-
- ldr r5,[sp,#48+0]
- ldr r6,[sp,#48+4]
- ldr r3,[sp,#56+0]
- ldr r4,[sp,#56+4]
- ldr r9, [r0,#48+LO]
- ldr r10, [r0,#48+HI]
- ldr r11, [r0,#56+LO]
- ldr r12, [r0,#56+HI]
- adds r9,r5,r9
- str r9, [r0,#48+LO]
- adc r10,r6,r10
- str r10, [r0,#48+HI]
- adds r11,r3,r11
- str r11, [r0,#56+LO]
- adc r12,r4,r12
- str r12, [r0,#56+HI]
-
- add sp,sp,#640
- sub r14,r14,#640
-
- teq r1,r2
- bne .Loop
-
- add sp,sp,#8*9 @ destroy frame
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size sha512_block_data_order,.-sha512_block_data_order
-#if __ARM_MAX_ARCH__>=7
-.arch armv7-a
-.fpu neon
-
-.global sha512_block_data_order_neon
-.type sha512_block_data_order_neon,%function
-.align 4
-sha512_block_data_order_neon:
-.LNEON:
- dmb @ errata #451034 on early Cortex A8
- add r2,r1,r2,lsl#7 @ len to point at the end of inp
- VFP_ABI_PUSH
- adr r3,.Lsha512_block_data_order
- sub r3,r3,.Lsha512_block_data_order-K512
- vldmia r0,{d16-d23} @ load context
-.Loop_neon:
- vshr.u64 d24,d20,#14 @ 0
-#if 0<16
- vld1.64 {d0},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d20,#18
-#if 0>0
- vadd.i64 d16,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 0<16 && defined(__ARMEL__)
- vrev64.8 d0,d0
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d0
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 1
-#if 1<16
- vld1.64 {d1},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 1>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 1<16 && defined(__ARMEL__)
- vrev64.8 d1,d1
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d1
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 d24,d18,#14 @ 2
-#if 2<16
- vld1.64 {d2},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d18,#18
-#if 2>0
- vadd.i64 d22,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 2<16 && defined(__ARMEL__)
- vrev64.8 d2,d2
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d2
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 3
-#if 3<16
- vld1.64 {d3},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 3>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 3<16 && defined(__ARMEL__)
- vrev64.8 d3,d3
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d3
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 d24,d16,#14 @ 4
-#if 4<16
- vld1.64 {d4},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d16,#18
-#if 4>0
- vadd.i64 d20,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 4<16 && defined(__ARMEL__)
- vrev64.8 d4,d4
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d4
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 5
-#if 5<16
- vld1.64 {d5},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 5>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 5<16 && defined(__ARMEL__)
- vrev64.8 d5,d5
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d5
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 d24,d22,#14 @ 6
-#if 6<16
- vld1.64 {d6},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d22,#18
-#if 6>0
- vadd.i64 d18,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 6<16 && defined(__ARMEL__)
- vrev64.8 d6,d6
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d6
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 7
-#if 7<16
- vld1.64 {d7},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 7>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 7<16 && defined(__ARMEL__)
- vrev64.8 d7,d7
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d7
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- vshr.u64 d24,d20,#14 @ 8
-#if 8<16
- vld1.64 {d8},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d20,#18
-#if 8>0
- vadd.i64 d16,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 8<16 && defined(__ARMEL__)
- vrev64.8 d8,d8
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d8
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 9
-#if 9<16
- vld1.64 {d9},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 9>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 9<16 && defined(__ARMEL__)
- vrev64.8 d9,d9
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d9
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 d24,d18,#14 @ 10
-#if 10<16
- vld1.64 {d10},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d18,#18
-#if 10>0
- vadd.i64 d22,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 10<16 && defined(__ARMEL__)
- vrev64.8 d10,d10
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d10
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 11
-#if 11<16
- vld1.64 {d11},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 11>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 11<16 && defined(__ARMEL__)
- vrev64.8 d11,d11
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d11
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 d24,d16,#14 @ 12
-#if 12<16
- vld1.64 {d12},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d16,#18
-#if 12>0
- vadd.i64 d20,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 12<16 && defined(__ARMEL__)
- vrev64.8 d12,d12
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d12
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 13
-#if 13<16
- vld1.64 {d13},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 13>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 13<16 && defined(__ARMEL__)
- vrev64.8 d13,d13
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d13
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 d24,d22,#14 @ 14
-#if 14<16
- vld1.64 {d14},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d22,#18
-#if 14>0
- vadd.i64 d18,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 14<16 && defined(__ARMEL__)
- vrev64.8 d14,d14
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d14
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 15
-#if 15<16
- vld1.64 {d15},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 15>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 15<16 && defined(__ARMEL__)
- vrev64.8 d15,d15
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d15
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- mov r12,#4
-.L16_79_neon:
- subs r12,#1
- vshr.u64 q12,q7,#19
- vshr.u64 q13,q7,#61
- vadd.i64 d16,d30 @ h+=Maj from the past
- vshr.u64 q15,q7,#6
- vsli.64 q12,q7,#45
- vext.8 q14,q0,q1,#8 @ X[i+1]
- vsli.64 q13,q7,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q0,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q4,q5,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d20,#14 @ from NEON_00_15
- vadd.i64 q0,q14
- vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d20,#41 @ from NEON_00_15
- vadd.i64 q0,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 16<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d0
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 17
-#if 17<16
- vld1.64 {d1},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 17>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 17<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d1
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 q12,q0,#19
- vshr.u64 q13,q0,#61
- vadd.i64 d22,d30 @ h+=Maj from the past
- vshr.u64 q15,q0,#6
- vsli.64 q12,q0,#45
- vext.8 q14,q1,q2,#8 @ X[i+1]
- vsli.64 q13,q0,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q1,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q5,q6,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d18,#14 @ from NEON_00_15
- vadd.i64 q1,q14
- vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d18,#41 @ from NEON_00_15
- vadd.i64 q1,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 18<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d2
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 19
-#if 19<16
- vld1.64 {d3},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 19>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 19<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d3
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 q12,q1,#19
- vshr.u64 q13,q1,#61
- vadd.i64 d20,d30 @ h+=Maj from the past
- vshr.u64 q15,q1,#6
- vsli.64 q12,q1,#45
- vext.8 q14,q2,q3,#8 @ X[i+1]
- vsli.64 q13,q1,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q2,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q6,q7,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d16,#14 @ from NEON_00_15
- vadd.i64 q2,q14
- vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d16,#41 @ from NEON_00_15
- vadd.i64 q2,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 20<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d4
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 21
-#if 21<16
- vld1.64 {d5},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 21>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 21<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d5
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 q12,q2,#19
- vshr.u64 q13,q2,#61
- vadd.i64 d18,d30 @ h+=Maj from the past
- vshr.u64 q15,q2,#6
- vsli.64 q12,q2,#45
- vext.8 q14,q3,q4,#8 @ X[i+1]
- vsli.64 q13,q2,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q3,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q7,q0,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d22,#14 @ from NEON_00_15
- vadd.i64 q3,q14
- vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d22,#41 @ from NEON_00_15
- vadd.i64 q3,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 22<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d6
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 23
-#if 23<16
- vld1.64 {d7},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 23>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 23<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d7
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- vshr.u64 q12,q3,#19
- vshr.u64 q13,q3,#61
- vadd.i64 d16,d30 @ h+=Maj from the past
- vshr.u64 q15,q3,#6
- vsli.64 q12,q3,#45
- vext.8 q14,q4,q5,#8 @ X[i+1]
- vsli.64 q13,q3,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q4,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q0,q1,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d20,#14 @ from NEON_00_15
- vadd.i64 q4,q14
- vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d20,#41 @ from NEON_00_15
- vadd.i64 q4,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 24<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d8
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 25
-#if 25<16
- vld1.64 {d9},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 25>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 25<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d9
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 q12,q4,#19
- vshr.u64 q13,q4,#61
- vadd.i64 d22,d30 @ h+=Maj from the past
- vshr.u64 q15,q4,#6
- vsli.64 q12,q4,#45
- vext.8 q14,q5,q6,#8 @ X[i+1]
- vsli.64 q13,q4,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q5,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q1,q2,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d18,#14 @ from NEON_00_15
- vadd.i64 q5,q14
- vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d18,#41 @ from NEON_00_15
- vadd.i64 q5,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 26<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d10
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 27
-#if 27<16
- vld1.64 {d11},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 27>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 27<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d11
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 q12,q5,#19
- vshr.u64 q13,q5,#61
- vadd.i64 d20,d30 @ h+=Maj from the past
- vshr.u64 q15,q5,#6
- vsli.64 q12,q5,#45
- vext.8 q14,q6,q7,#8 @ X[i+1]
- vsli.64 q13,q5,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q6,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q2,q3,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d16,#14 @ from NEON_00_15
- vadd.i64 q6,q14
- vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d16,#41 @ from NEON_00_15
- vadd.i64 q6,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 28<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d12
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 29
-#if 29<16
- vld1.64 {d13},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 29>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 29<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d13
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 q12,q6,#19
- vshr.u64 q13,q6,#61
- vadd.i64 d18,d30 @ h+=Maj from the past
- vshr.u64 q15,q6,#6
- vsli.64 q12,q6,#45
- vext.8 q14,q7,q0,#8 @ X[i+1]
- vsli.64 q13,q6,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q7,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q3,q4,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d22,#14 @ from NEON_00_15
- vadd.i64 q7,q14
- vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d22,#41 @ from NEON_00_15
- vadd.i64 q7,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 30<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d14
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 31
-#if 31<16
- vld1.64 {d15},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 31>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 31<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d15
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- bne .L16_79_neon
-
- vadd.i64 d16,d30 @ h+=Maj from the past
- vldmia r0,{d24-d31} @ load context to temp
- vadd.i64 q8,q12 @ vectorized accumulate
- vadd.i64 q9,q13
- vadd.i64 q10,q14
- vadd.i64 q11,q15
- vstmia r0,{d16-d23} @ save context
- teq r1,r2
- sub r3,#640 @ rewind K512
- bne .Loop_neon
-
- VFP_ABI_POP
- bx lr @ .word 0xe12fff1e
-.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
-#endif
-.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 455eb19a5ac1..db8512d9a918 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -22,8 +22,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -34,7 +34,7 @@
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -52,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -73,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result, val; \
@@ -93,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
return result; \
}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
+static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
@@ -123,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
return oldval;
}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -151,7 +151,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -160,7 +160,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -170,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -184,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -197,7 +197,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -211,7 +211,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-#define atomic_fetch_andnot atomic_fetch_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#endif /* __LINUX_ARM_ARCH__ */
@@ -223,7 +223,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define arch_atomic_andnot arch_atomic_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -240,7 +240,7 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
@@ -250,7 +250,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -263,7 +263,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -272,7 +272,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
);
}
#else
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -285,7 +285,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
s64 tmp;
@@ -302,7 +302,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
#endif
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 i, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -322,7 +322,7 @@ static inline void atomic64_##op(s64 i, atomic64_t *v) \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -345,7 +345,7 @@ atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, op1, op2) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
@@ -374,34 +374,34 @@ atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, orr, orr)
ATOMIC64_OPS(xor, eor, eor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
+static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{
s64 oldval;
unsigned long res;
@@ -422,9 +422,9 @@ static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
return oldval;
}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
-static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{
s64 result;
unsigned long tmp;
@@ -442,9 +442,9 @@ static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
return result;
}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 result;
unsigned long tmp;
@@ -470,9 +470,9 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 oldval, newval;
unsigned long tmp;
@@ -500,7 +500,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return oldval;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 8b701f8e175c..4dfe538dfc68 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -114,7 +114,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret;
}
-#define xchg_relaxed(ptr, x) ({ \
+#define arch_xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
@@ -128,20 +128,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
-#define xchg xchg_relaxed
+#define arch_xchg arch_xchg_relaxed
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) ({ \
- (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#include <asm-generic/cmpxchg.h>
@@ -207,7 +207,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-#define cmpxchg_relaxed(ptr,o,n) ({ \
+#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -224,7 +224,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
- ret = __cmpxchg_local_generic(ptr, old, new, size);
+ ret = __generic_cmpxchg_local(ptr, old, new, size);
break;
#endif
default:
@@ -234,7 +234,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
-#define cmpxchg_local(ptr, o, n) ({ \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -266,13 +266,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
return oldval;
}
-#define cmpxchg64_relaxed(ptr, o, n) ({ \
+#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
-#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 0d67ed682e07..397be5ed30e7 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -7,9 +7,11 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
#endif
/* Common ARM WFI state */
@@ -42,11 +44,15 @@ struct of_cpuidle_method {
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
- __used __section("__cpuidle_method_of_table") \
- = { .method = _method, .ops = _ops }
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
extern int arm_cpuidle_suspend(int index);
extern int arm_cpuidle_init(int cpu);
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+
#endif
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 39ff217136d1..6f5d627c44a3 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -21,7 +21,7 @@
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+#define arch_sync_cmpxchg arch_cmpxchg
#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 24cbfc112dfa..0ccc985b90af 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -253,7 +253,7 @@ extern struct cpu_tlb_fns cpu_tlb;
* space.
* - mm - mm_struct describing address space
*
- * flush_tlb_range(mm,start,end)
+ * flush_tlb_range(vma,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
@@ -261,18 +261,11 @@ extern struct cpu_tlb_fns cpu_tlb;
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
- * flush_tlb_page(vaddr,vma)
+ * flush_tlb_page(vma, uaddr)
*
* Invalidate the specified page in the specified address range.
+ * - vma - vm_area_struct describing address range
* - vaddr - virtual address (may not be aligned)
- * - vma - vma_struct describing address range
- *
- * flush_kern_tlb_page(kaddr)
- *
- * Invalidate the TLB entry for the specified page. The address
- * will be in the kernels virtual memory space. Current uses
- * only require the D-TLB to be invalidated.
- * - kaddr - Kernel virtual memory address
*/
/*
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 698b6f636156..20ab1e607522 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -63,7 +63,27 @@ int arch_show_interrupts(struct seq_file *p, int prec)
*/
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
- __handle_domain_irq(NULL, irq, false, regs);
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct irq_desc *desc;
+
+ irq_enter();
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (unlikely(!irq || irq >= nr_irqs))
+ desc = NULL;
+ else
+ desc = irq_to_desc(irq);
+
+ if (likely(desc))
+ handle_irq_desc(desc);
+ else
+ ack_bad_irq(irq);
+
+ irq_exit();
+ set_irq_regs(old_regs);
}
/*
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 2924d7910b10..eb2190477da1 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
} else {
armv7_pmnc_select_counter(idx);
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
}
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 6324f4db9b02..fc9e8b37eaa8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -288,7 +288,7 @@ unsigned long get_wchan(struct task_struct *p)
struct stackframe frame;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
frame.fp = thread_saved_fp(p);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1a5edf562e85..73ca7797b92f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -545,9 +545,11 @@ void notrace cpu_init(void)
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
-#define PLC "r"
+#define PLC_l "l"
+#define PLC_r "r"
#else
-#define PLC "I"
+#define PLC_l "I"
+#define PLC_r "I"
#endif
/*
@@ -569,15 +571,15 @@ void notrace cpu_init(void)
"msr cpsr_c, %9"
:
: "r" (stk),
- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
#endif
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 74679240a9d8..c7bb168b0d97 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void)
#endif
pr_debug("CPU%u: Booted secondary processor\n", cpu);
- preempt_disable();
trace_hardirqs_off();
/*
diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index 020e6deb67c8..237e8aa9fe83 100644
--- a/arch/arm/mach-imx/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
@@ -12,6 +12,7 @@
#include <linux/suspend.h>
#include <linux/io.h>
+#include "common.h"
#include "hardware.h"
static int mx27_suspend_enter(suspend_state_t state)
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 000f672a94c9..007a44412e24 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -233,12 +233,38 @@ static struct platform_device *ixp46x_devices[] __initdata = {
unsigned long ixp4xx_exp_bus_size;
EXPORT_SYMBOL(ixp4xx_exp_bus_size);
+static struct platform_device_info ixp_dev_info __initdata = {
+ .name = "ixp4xx_crypto",
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int __init ixp_crypto_register(void)
+{
+ struct platform_device *pdev;
+
+ if (!(~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
+ IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
+ printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
+ return -ENODEV;
+ }
+
+ pdev = platform_device_register_full(&ixp_dev_info);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return 0;
+}
+
void __init ixp4xx_sys_init(void)
{
ixp4xx_exp_bus_size = SZ_16M;
platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_IXP4XX))
+ ixp_crypto_register();
+
if (cpu_is_ixp46x()) {
int region;
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 2ee527c00284..1026a816dcc0 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -458,20 +458,6 @@ static struct gpiod_lookup_table leds_gpio_table = {
#ifdef CONFIG_LEDS_TRIGGERS
DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger);
-
-static int ams_delta_camera_power(struct device *dev, int power)
-{
- /*
- * turn on camera LED
- */
- if (power)
- led_trigger_event(ams_delta_camera_led_trigger, LED_FULL);
- else
- led_trigger_event(ams_delta_camera_led_trigger, LED_OFF);
- return 0;
-}
-#else
-#define ams_delta_camera_power NULL
#endif
static struct platform_device ams_delta_audio_device = {
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index c40cf5ef8607..977b0b744c22 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context)
{
if (!IS_BUILTIN(CONFIG_TPS65010))
return -ENOSYS;
-
+
tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V |
TPS_LDO1_ENABLE | TPS_VLDO1_3_0V);
@@ -394,6 +394,8 @@ static void __init h2_init(void)
BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0);
gpio_direction_input(H2_NAND_RB_GPIO_PIN);
+ gpiod_add_lookup_table(&isp1301_gpiod_table);
+
omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 2c1e2b32b9b3..a745d64d4699 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -655,9 +655,13 @@ static int __init omap_pm_init(void)
irq = INT_7XX_WAKE_UP_REQ;
else if (cpu_is_omap16xx())
irq = INT_1610_WAKE_UP_REQ;
- if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
- NULL))
- pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+ else
+ irq = -1;
+
+ if (irq >= 0) {
+ if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
+ pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+ }
/* Program new power ramp-up time
* (0 for most boards since we don't lower voltage when in deep sleep)
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 418a61ecb827..5e86145db0e2 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -322,6 +322,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
static void n8x0_mmc_callback(void *data, u8 card_mask)
{
+#ifdef CONFIG_MMC_OMAP
int bit, *openp, index;
if (board_is_n800()) {
@@ -339,7 +340,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
else
*openp = 0;
-#ifdef CONFIG_MMC_OMAP
omap_mmc_notify_cover_event(mmc_device, index, *openp);
#else
pr_warn("MMC: notify cover event not available\n");
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 43fb941dcd07..a56748d671c4 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -6,7 +6,7 @@ config ARCH_ZYNQ
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
- select ARM_GLOBAL_TIMER if !CPU_FREQ
+ select ARM_GLOBAL_TIMER
select CADENCE_TTC_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index 5335b9687297..74f4b383afe3 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -24,7 +24,7 @@
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
+ * - vma - vm_area_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 1bb28d7db567..87bf4ab17721 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -23,7 +23,7 @@
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
+ * - vma - vm_area_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index a9653117ca0d..27e0af78e88b 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -348,29 +348,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
reset_current_kprobe();
}
break;
-
- case KPROBE_HIT_ACTIVE:
- case KPROBE_HIT_SSDONE:
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
- return 1;
- break;
-
- default:
- break;
}
return 0;
diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild
index d6465823b281..7b393cfec071 100644
--- a/arch/arm64/Kbuild
+++ b/arch/arm64/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += kernel/ mm/
-obj-$(CONFIG_NET) += net/
+obj-y += kernel/ mm/ net/
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_XEN) += xen/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9f1d8566bbf9..be9083882f97 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -93,6 +93,7 @@ config ARM64
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_WANT_LD_ORPHAN_WARN
+ select ARCH_WANTS_NO_INSTR
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARM_AMBA
select ARM_ARCH_TIMER
@@ -1035,7 +1036,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 1 10
default "4"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
@@ -1481,12 +1482,6 @@ menu "ARMv8.3 architectural features"
config ARM64_PTR_AUTH
bool "Enable support for pointer authentication"
default y
- depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
- # Modern compilers insert a .note.gnu.property section note for PAC
- # which is only understood by binutils starting with version 2.33.1.
- depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
- depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
- depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
@@ -1498,13 +1493,6 @@ config ARM64_PTR_AUTH
for each process at exec() time, with these keys being
context-switched along with the process.
- If the compiler supports the -mbranch-protection or
- -msign-return-address flag (e.g. GCC 7 or later), then this option
- will also cause the kernel itself to be compiled with return address
- protection. In this case, and if the target hardware is known to
- support pointer authentication, then CONFIG_STACKPROTECTOR can be
- disabled with minimal loss of protection.
-
The feature is detected at runtime. If the feature is not present in
hardware it will not be advertised to userspace/KVM guest nor will it
be enabled.
@@ -1515,6 +1503,24 @@ config ARM64_PTR_AUTH
but with the feature disabled. On such a system, this option should
not be selected.
+config ARM64_PTR_AUTH_KERNEL
+ bool "Use pointer authentication for kernel"
+ default y
+ depends on ARM64_PTR_AUTH
+ depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
+ # Modern compilers insert a .note.gnu.property section note for PAC
+ # which is only understood by binutils starting with version 2.33.1.
+ depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
+ depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
+ help
+ If the compiler supports the -mbranch-protection or
+ -msign-return-address flag (e.g. GCC 7 or later), then this option
+ will cause the kernel itself to be compiled with return address
+ protection. In this case, and if the target hardware is known to
+ support pointer authentication, then CONFIG_STACKPROTECTOR can be
+ disabled with minimal loss of protection.
+
This feature works with FUNCTION_GRAPH_TRACER option only if
DYNAMIC_FTRACE_WITH_REGS is enabled.
@@ -1606,7 +1612,7 @@ config ARM64_BTI_KERNEL
bool "Use Branch Target Identification for kernel"
default y
depends on ARM64_BTI
- depends on ARM64_PTR_AUTH
+ depends on ARM64_PTR_AUTH_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6409b47b73e4..7336c1fd0dda 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -165,6 +165,7 @@ config ARCH_MEDIATEK
config ARCH_MESON
bool "Amlogic Platforms"
+ select COMMON_CLK
select MESON_IRQ_GPIO
help
This enables support for the arm64 based Amlogic SoCs
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index b52481f0605d..3b5b1c480449 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -70,7 +70,7 @@ endif
# off, this will be overridden if we are using branch protection.
branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
-ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
+ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
# We enable additional protection for leaf functions as there is some
# narrow potential for ROP protection benefits and no substantial
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
index 6c309b97587d..e8d31279b7a3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
@@ -46,7 +46,8 @@
eee-broken-100tx;
qca,clk-out-frequency = <125000000>;
qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
- vddio-supply = <&vddh>;
+ qca,keep-pll-enabled;
+ vddio-supply = <&vddio>;
vddio: vddio-regulator {
regulator-name = "VDDIO";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
index df212ed5bb94..e65d1c477e2c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
@@ -31,11 +31,10 @@
reg = <0x4>;
eee-broken-1000t;
eee-broken-100tx;
-
qca,clk-out-frequency = <125000000>;
qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-
- vddio-supply = <&vddh>;
+ qca,keep-pll-enabled;
+ vddio-supply = <&vddio>;
vddio: vddio-regulator {
regulator-name = "VDDIO";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index eca06a0c3cf8..a30249ebffa8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -197,8 +197,8 @@
ddr: memory-controller@1080000 {
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>;
- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
- big-endian;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ little-endian;
};
dcfg: syscon@1e00000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
index 631e01c1b9fd..be1e7d6f0ecb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
@@ -88,11 +88,11 @@
pinctrl-0 = <&pinctrl_codec2>;
reg = <0x18>;
#sound-dai-cells = <0>;
- HPVDD-supply = <&reg_3p3v>;
- SPRVDD-supply = <&reg_3p3v>;
- SPLVDD-supply = <&reg_3p3v>;
- AVDD-supply = <&reg_3p3v>;
- IOVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_gen_3p3>;
+ SPRVDD-supply = <&reg_gen_3p3>;
+ SPLVDD-supply = <&reg_gen_3p3>;
+ AVDD-supply = <&reg_gen_3p3>;
+ IOVDD-supply = <&reg_gen_3p3>;
DVDD-supply = <&vgen4_reg>;
reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
index 4dc8383478ee..a08a568c31d9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -45,8 +45,8 @@
reg_12p0_main: regulator-12p0-main {
compatible = "regulator-fixed";
regulator-name = "12V_MAIN";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
regulator-always-on;
};
@@ -77,15 +77,6 @@
regulator-always-on;
};
- reg_3p3v: regulator-3p3v {
- compatible = "regulator-fixed";
- vin-supply = <&reg_3p3_main>;
- regulator-name = "GEN_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
reg_usdhc2_vmmc: regulator-vsd-3v3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usdhc2>;
@@ -415,11 +406,11 @@
pinctrl-0 = <&pinctrl_codec1>;
reg = <0x18>;
#sound-dai-cells = <0>;
- HPVDD-supply = <&reg_3p3v>;
- SPRVDD-supply = <&reg_3p3v>;
- SPLVDD-supply = <&reg_3p3v>;
- AVDD-supply = <&reg_3p3v>;
- IOVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_gen_3p3>;
+ SPRVDD-supply = <&reg_gen_3p3>;
+ SPLVDD-supply = <&reg_gen_3p3>;
+ AVDD-supply = <&reg_gen_3p3>;
+ IOVDD-supply = <&reg_gen_3p3>;
DVDD-supply = <&vgen4_reg>;
reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi
index d64621d1213b..ad07fff40544 100644
--- a/arch/arm64/boot/dts/microchip/sparx5.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi
@@ -135,9 +135,12 @@
};
};
- reset@611010008 {
- compatible = "microchip,sparx5-chip-reset";
+ reset: reset-controller@611010008 {
+ compatible = "microchip,sparx5-switch-reset";
reg = <0x6 0x11010008 0x4>;
+ reg-names = "gcb";
+ #reset-cells = <1>;
+ cpu-syscon = <&cpu_ctrl>;
};
uart0: serial@600100000 {
@@ -275,6 +278,21 @@
"GPIO_46", "GPIO_47";
function = "emmc";
};
+
+ miim1_pins: miim1-pins {
+ pins = "GPIO_56", "GPIO_57";
+ function = "miim";
+ };
+
+ miim2_pins: miim2-pins {
+ pins = "GPIO_58", "GPIO_59";
+ function = "miim";
+ };
+
+ miim3_pins: miim3-pins {
+ pins = "GPIO_52", "GPIO_53";
+ function = "miim";
+ };
};
sgpio0: gpio@61101036c {
@@ -285,6 +303,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio0_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x1101036c 0x100>;
sgpio_in0: gpio@0 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -292,6 +312,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out0: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -310,6 +333,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio1_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x11010484 0x100>;
sgpio_in1: gpio@0 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -317,6 +342,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out1: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -335,6 +363,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio2_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x1101059c 0x100>;
sgpio_in2: gpio@0 {
reg = <0>;
@@ -342,6 +372,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out2: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -386,5 +419,62 @@
#thermal-sensor-cells = <0>;
clocks = <&ahb_clk>;
};
+
+ mdio0: mdio@6110102b0 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102b0 0x24>;
+ };
+
+ mdio1: mdio@6110102d4 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim1_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102d4 0x24>;
+ };
+
+ mdio2: mdio@6110102f8 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim2_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102d4 0x24>;
+ };
+
+ mdio3: mdio@61101031c {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim3_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x1101031c 0x24>;
+ };
+
+ serdes: serdes@10808000 {
+ compatible = "microchip,sparx5-serdes";
+ #phy-cells = <1>;
+ clocks = <&sys_clk>;
+ reg = <0x6 0x10808000 0x5d0000>;
+ };
+
+ switch: switch@0x600000000 {
+ compatible = "microchip,sparx5-switch";
+ reg = <0x6 0 0x401000>,
+ <0x6 0x10004000 0x7fc000>,
+ <0x6 0x11010000 0xaf0000>;
+ reg-names = "cpu", "dev", "gcb";
+ interrupt-names = "xtr";
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&reset 0>;
+ reset-names = "switch";
+ };
};
};
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
index f0c915160990..33faf1f3264f 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
@@ -7,30 +7,6 @@
#include "sparx5_pcb_common.dtsi"
/{
- aliases {
- i2c0 = &i2c0;
- i2c100 = &i2c100;
- i2c101 = &i2c101;
- i2c102 = &i2c102;
- i2c103 = &i2c103;
- i2c104 = &i2c104;
- i2c105 = &i2c105;
- i2c106 = &i2c106;
- i2c107 = &i2c107;
- i2c108 = &i2c108;
- i2c109 = &i2c109;
- i2c110 = &i2c110;
- i2c111 = &i2c111;
- i2c112 = &i2c112;
- i2c113 = &i2c113;
- i2c114 = &i2c114;
- i2c115 = &i2c115;
- i2c116 = &i2c116;
- i2c117 = &i2c117;
- i2c118 = &i2c118;
- i2c119 = &i2c119;
- };
-
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
@@ -298,17 +274,10 @@
&spi0 {
status = "okay";
- spi@0 {
- compatible = "spi-mux";
- mux-controls = <&mux>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>; /* CS0 */
- spi-flash@9 {
- compatible = "jedec,spi-nor";
- spi-max-frequency = <8000000>;
- reg = <0x9>; /* SPI */
- };
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <8000000>;
+ reg = <0>;
};
};
@@ -328,6 +297,33 @@
};
};
+&sgpio0 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <8 15>;
+ gpio@0 {
+ ngpios = <64>;
+ };
+ gpio@1 {
+ ngpios = <64>;
+ };
+};
+
+&sgpio1 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <24 31>;
+ gpio@0 {
+ ngpios = <64>;
+ };
+ gpio@1 {
+ ngpios = <64>;
+ };
+};
+
+&sgpio2 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <0 0>, <11 31>;
+};
+
&gpio {
i2cmux_pins_i: i2cmux-pins-i {
pins = "GPIO_16", "GPIO_17", "GPIO_18", "GPIO_19",
@@ -415,9 +411,9 @@
&i2c0_imux {
pinctrl-names =
- "i2c100", "i2c101", "i2c102", "i2c103",
- "i2c104", "i2c105", "i2c106", "i2c107",
- "i2c108", "i2c109", "i2c110", "i2c111", "idle";
+ "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
+ "i2c_sfp5", "i2c_sfp6", "i2c_sfp7", "i2c_sfp8",
+ "i2c_sfp9", "i2c_sfp10", "i2c_sfp11", "i2c_sfp12", "idle";
pinctrl-0 = <&i2cmux_0>;
pinctrl-1 = <&i2cmux_1>;
pinctrl-2 = <&i2cmux_2>;
@@ -431,62 +427,62 @@
pinctrl-10 = <&i2cmux_10>;
pinctrl-11 = <&i2cmux_11>;
pinctrl-12 = <&i2cmux_pins_i>;
- i2c100: i2c_sfp1 {
+ i2c_sfp1: i2c_sfp1 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c101: i2c_sfp2 {
+ i2c_sfp2: i2c_sfp2 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c102: i2c_sfp3 {
+ i2c_sfp3: i2c_sfp3 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c103: i2c_sfp4 {
+ i2c_sfp4: i2c_sfp4 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c104: i2c_sfp5 {
+ i2c_sfp5: i2c_sfp5 {
reg = <0x4>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c105: i2c_sfp6 {
+ i2c_sfp6: i2c_sfp6 {
reg = <0x5>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c106: i2c_sfp7 {
+ i2c_sfp7: i2c_sfp7 {
reg = <0x6>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c107: i2c_sfp8 {
+ i2c_sfp8: i2c_sfp8 {
reg = <0x7>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c108: i2c_sfp9 {
+ i2c_sfp9: i2c_sfp9 {
reg = <0x8>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c109: i2c_sfp10 {
+ i2c_sfp10: i2c_sfp10 {
reg = <0x9>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c110: i2c_sfp11 {
+ i2c_sfp11: i2c_sfp11 {
reg = <0xa>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c111: i2c_sfp12 {
+ i2c_sfp12: i2c_sfp12 {
reg = <0xb>;
#address-cells = <1>;
#size-cells = <0>;
@@ -499,44 +495,413 @@
&gpio 61 GPIO_ACTIVE_HIGH
&gpio 54 GPIO_ACTIVE_HIGH>;
idle-state = <0x8>;
- i2c112: i2c_sfp13 {
+ i2c_sfp13: i2c_sfp13 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c113: i2c_sfp14 {
+ i2c_sfp14: i2c_sfp14 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c114: i2c_sfp15 {
+ i2c_sfp15: i2c_sfp15 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c115: i2c_sfp16 {
+ i2c_sfp16: i2c_sfp16 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c116: i2c_sfp17 {
+ i2c_sfp17: i2c_sfp17 {
reg = <0x4>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c117: i2c_sfp18 {
+ i2c_sfp18: i2c_sfp18 {
reg = <0x5>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c118: i2c_sfp19 {
+ i2c_sfp19: i2c_sfp19 {
reg = <0x6>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c119: i2c_sfp20 {
+ i2c_sfp20: i2c_sfp20 {
reg = <0x7>;
#address-cells = <1>;
#size-cells = <0>;
};
};
+
+&mdio3 {
+ status = "ok";
+ phy64: ethernet-phy@64 {
+ reg = <28>;
+ };
+};
+
+&axi {
+ sfp_eth12: sfp-eth12 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ tx-disable-gpios = <&sgpio_out2 11 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 11 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 11 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 12 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth13: sfp-eth13 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp2>;
+ tx-disable-gpios = <&sgpio_out2 12 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 12 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 12 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 13 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth14: sfp-eth14 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp3>;
+ tx-disable-gpios = <&sgpio_out2 13 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 13 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 13 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 14 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth15: sfp-eth15 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp4>;
+ tx-disable-gpios = <&sgpio_out2 14 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 14 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 14 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 15 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth48: sfp-eth48 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp5>;
+ tx-disable-gpios = <&sgpio_out2 15 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 15 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 15 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 16 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth49: sfp-eth49 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp6>;
+ tx-disable-gpios = <&sgpio_out2 16 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 16 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 16 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 17 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth50: sfp-eth50 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp7>;
+ tx-disable-gpios = <&sgpio_out2 17 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 17 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 17 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 18 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth51: sfp-eth51 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp8>;
+ tx-disable-gpios = <&sgpio_out2 18 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 18 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 18 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 19 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth52: sfp-eth52 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp9>;
+ tx-disable-gpios = <&sgpio_out2 19 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 19 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 19 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 20 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth53: sfp-eth53 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp10>;
+ tx-disable-gpios = <&sgpio_out2 20 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 20 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 20 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 21 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth54: sfp-eth54 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp11>;
+ tx-disable-gpios = <&sgpio_out2 21 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 21 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 21 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 22 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth55: sfp-eth55 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp12>;
+ tx-disable-gpios = <&sgpio_out2 22 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 22 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 22 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 23 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth56: sfp-eth56 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp13>;
+ tx-disable-gpios = <&sgpio_out2 23 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 23 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 23 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 24 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth57: sfp-eth57 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp14>;
+ tx-disable-gpios = <&sgpio_out2 24 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 24 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 24 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 25 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth58: sfp-eth58 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp15>;
+ tx-disable-gpios = <&sgpio_out2 25 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 25 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 25 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 26 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth59: sfp-eth59 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp16>;
+ tx-disable-gpios = <&sgpio_out2 26 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 26 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 26 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 27 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth60: sfp-eth60 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp17>;
+ tx-disable-gpios = <&sgpio_out2 27 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 27 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 27 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth61: sfp-eth61 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp18>;
+ tx-disable-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth62: sfp-eth62 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp19>;
+ tx-disable-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth63: sfp-eth63 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp20>;
+ tx-disable-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&switch {
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 10G SFPs */
+ port12: port@12 {
+ reg = <12>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 13>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth12>;
+ microchip,sd-sgpio = <301>;
+ managed = "in-band-status";
+ };
+ port13: port@13 {
+ reg = <13>;
+ /* Example: CU SFP, 1G speed */
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 14>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth13>;
+ microchip,sd-sgpio = <305>;
+ managed = "in-band-status";
+ };
+ port14: port@14 {
+ reg = <14>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 15>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth14>;
+ microchip,sd-sgpio = <309>;
+ managed = "in-band-status";
+ };
+ port15: port@15 {
+ reg = <15>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 16>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth15>;
+ microchip,sd-sgpio = <313>;
+ managed = "in-band-status";
+ };
+ port48: port@48 {
+ reg = <48>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 17>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth48>;
+ microchip,sd-sgpio = <317>;
+ managed = "in-band-status";
+ };
+ port49: port@49 {
+ reg = <49>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 18>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth49>;
+ microchip,sd-sgpio = <321>;
+ managed = "in-band-status";
+ };
+ port50: port@50 {
+ reg = <50>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 19>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth50>;
+ microchip,sd-sgpio = <325>;
+ managed = "in-band-status";
+ };
+ port51: port@51 {
+ reg = <51>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 20>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth51>;
+ microchip,sd-sgpio = <329>;
+ managed = "in-band-status";
+ };
+ port52: port@52 {
+ reg = <52>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 21>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth52>;
+ microchip,sd-sgpio = <333>;
+ managed = "in-band-status";
+ };
+ port53: port@53 {
+ reg = <53>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 22>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth53>;
+ microchip,sd-sgpio = <337>;
+ managed = "in-band-status";
+ };
+ port54: port@54 {
+ reg = <54>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 23>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth54>;
+ microchip,sd-sgpio = <341>;
+ managed = "in-band-status";
+ };
+ port55: port@55 {
+ reg = <55>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 24>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth55>;
+ microchip,sd-sgpio = <345>;
+ managed = "in-band-status";
+ };
+ /* 25G SFPs */
+ port56: port@56 {
+ reg = <56>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 25>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth56>;
+ microchip,sd-sgpio = <349>;
+ managed = "in-band-status";
+ };
+ port57: port@57 {
+ reg = <57>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 26>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth57>;
+ microchip,sd-sgpio = <353>;
+ managed = "in-band-status";
+ };
+ port58: port@58 {
+ reg = <58>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 27>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth58>;
+ microchip,sd-sgpio = <357>;
+ managed = "in-band-status";
+ };
+ port59: port@59 {
+ reg = <59>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 28>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth59>;
+ microchip,sd-sgpio = <361>;
+ managed = "in-band-status";
+ };
+ port60: port@60 {
+ reg = <60>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 29>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth60>;
+ microchip,sd-sgpio = <365>;
+ managed = "in-band-status";
+ };
+ port61: port@61 {
+ reg = <61>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 30>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth61>;
+ microchip,sd-sgpio = <369>;
+ managed = "in-band-status";
+ };
+ port62: port@62 {
+ reg = <62>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 31>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth62>;
+ microchip,sd-sgpio = <373>;
+ managed = "in-band-status";
+ };
+ port63: port@63 {
+ reg = <63>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 32>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth63>;
+ microchip,sd-sgpio = <377>;
+ managed = "in-band-status";
+ };
+ /* Finally the Management interface */
+ port64: port@64 {
+ reg = <64>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 0>;
+ phy-handle = <&phy64>;
+ phy-mode = "sgmii";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
index e28c6dd16377..ef96e6d8c6b3 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
@@ -7,14 +7,6 @@
#include "sparx5_pcb_common.dtsi"
/{
- aliases {
- i2c0 = &i2c0;
- i2c152 = &i2c152;
- i2c153 = &i2c153;
- i2c154 = &i2c154;
- i2c155 = &i2c155;
- };
-
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
@@ -97,17 +89,10 @@
&spi0 {
status = "okay";
- spi@0 {
- compatible = "spi-mux";
- mux-controls = <&mux>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>; /* CS0 */
- spi-flash@9 {
- compatible = "jedec,spi-nor";
- spi-max-frequency = <8000000>;
- reg = <0x9>; /* SPI */
- };
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <8000000>;
+ reg = <0>;
};
};
@@ -138,6 +123,11 @@
};
};
+&sgpio2 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <0 0>, <16 18>, <28 31>;
+};
+
&axi {
i2c0_imux: i2c0-imux@0 {
compatible = "i2c-mux-pinctrl";
@@ -149,31 +139,614 @@
&i2c0_imux {
pinctrl-names =
- "i2c152", "i2c153", "i2c154", "i2c155",
+ "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
"idle";
pinctrl-0 = <&i2cmux_s29>;
pinctrl-1 = <&i2cmux_s30>;
pinctrl-2 = <&i2cmux_s31>;
pinctrl-3 = <&i2cmux_s32>;
pinctrl-4 = <&i2cmux_pins_i>;
- i2c152: i2c_sfp1 {
+ i2c_sfp1: i2c_sfp1 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c153: i2c_sfp2 {
+ i2c_sfp2: i2c_sfp2 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c154: i2c_sfp3 {
+ i2c_sfp3: i2c_sfp3 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c155: i2c_sfp4 {
+ i2c_sfp4: i2c_sfp4 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
};
+
+&axi {
+ sfp_eth60: sfp-eth60 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ tx-disable-gpios = <&sgpio_out2 28 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth61: sfp-eth61 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp2>;
+ tx-disable-gpios = <&sgpio_out2 29 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth62: sfp-eth62 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp3>;
+ tx-disable-gpios = <&sgpio_out2 30 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth63: sfp-eth63 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp4>;
+ tx-disable-gpios = <&sgpio_out2 31 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 31 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 31 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 31 2 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&mdio0 {
+ status = "ok";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ };
+ phy5: ethernet-phy@5 {
+ reg = <5>;
+ };
+ phy6: ethernet-phy@6 {
+ reg = <6>;
+ };
+ phy7: ethernet-phy@7 {
+ reg = <7>;
+ };
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ };
+ phy9: ethernet-phy@9 {
+ reg = <9>;
+ };
+ phy10: ethernet-phy@10 {
+ reg = <10>;
+ };
+ phy11: ethernet-phy@11 {
+ reg = <11>;
+ };
+ phy12: ethernet-phy@12 {
+ reg = <12>;
+ };
+ phy13: ethernet-phy@13 {
+ reg = <13>;
+ };
+ phy14: ethernet-phy@14 {
+ reg = <14>;
+ };
+ phy15: ethernet-phy@15 {
+ reg = <15>;
+ };
+ phy16: ethernet-phy@16 {
+ reg = <16>;
+ };
+ phy17: ethernet-phy@17 {
+ reg = <17>;
+ };
+ phy18: ethernet-phy@18 {
+ reg = <18>;
+ };
+ phy19: ethernet-phy@19 {
+ reg = <19>;
+ };
+ phy20: ethernet-phy@20 {
+ reg = <20>;
+ };
+ phy21: ethernet-phy@21 {
+ reg = <21>;
+ };
+ phy22: ethernet-phy@22 {
+ reg = <22>;
+ };
+ phy23: ethernet-phy@23 {
+ reg = <23>;
+ };
+};
+
+&mdio1 {
+ status = "ok";
+ phy24: ethernet-phy@24 {
+ reg = <0>;
+ };
+ phy25: ethernet-phy@25 {
+ reg = <1>;
+ };
+ phy26: ethernet-phy@26 {
+ reg = <2>;
+ };
+ phy27: ethernet-phy@27 {
+ reg = <3>;
+ };
+ phy28: ethernet-phy@28 {
+ reg = <4>;
+ };
+ phy29: ethernet-phy@29 {
+ reg = <5>;
+ };
+ phy30: ethernet-phy@30 {
+ reg = <6>;
+ };
+ phy31: ethernet-phy@31 {
+ reg = <7>;
+ };
+ phy32: ethernet-phy@32 {
+ reg = <8>;
+ };
+ phy33: ethernet-phy@33 {
+ reg = <9>;
+ };
+ phy34: ethernet-phy@34 {
+ reg = <10>;
+ };
+ phy35: ethernet-phy@35 {
+ reg = <11>;
+ };
+ phy36: ethernet-phy@36 {
+ reg = <12>;
+ };
+ phy37: ethernet-phy@37 {
+ reg = <13>;
+ };
+ phy38: ethernet-phy@38 {
+ reg = <14>;
+ };
+ phy39: ethernet-phy@39 {
+ reg = <15>;
+ };
+ phy40: ethernet-phy@40 {
+ reg = <16>;
+ };
+ phy41: ethernet-phy@41 {
+ reg = <17>;
+ };
+ phy42: ethernet-phy@42 {
+ reg = <18>;
+ };
+ phy43: ethernet-phy@43 {
+ reg = <19>;
+ };
+ phy44: ethernet-phy@44 {
+ reg = <20>;
+ };
+ phy45: ethernet-phy@45 {
+ reg = <21>;
+ };
+ phy46: ethernet-phy@46 {
+ reg = <22>;
+ };
+ phy47: ethernet-phy@47 {
+ reg = <23>;
+ };
+};
+
+&mdio3 {
+ status = "ok";
+ phy64: ethernet-phy@64 {
+ reg = <28>;
+ };
+};
+
+&switch {
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port0: port@0 {
+ reg = <0>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy0>;
+ phy-mode = "qsgmii";
+ };
+ port1: port@1 {
+ reg = <1>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy1>;
+ phy-mode = "qsgmii";
+ };
+ port2: port@2 {
+ reg = <2>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy2>;
+ phy-mode = "qsgmii";
+ };
+ port3: port@3 {
+ reg = <3>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy3>;
+ phy-mode = "qsgmii";
+ };
+ port4: port@4 {
+ reg = <4>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy4>;
+ phy-mode = "qsgmii";
+ };
+ port5: port@5 {
+ reg = <5>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy5>;
+ phy-mode = "qsgmii";
+ };
+ port6: port@6 {
+ reg = <6>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy6>;
+ phy-mode = "qsgmii";
+ };
+ port7: port@7 {
+ reg = <7>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy7>;
+ phy-mode = "qsgmii";
+ };
+ port8: port@8 {
+ reg = <8>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy8>;
+ phy-mode = "qsgmii";
+ };
+ port9: port@9 {
+ reg = <9>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy9>;
+ phy-mode = "qsgmii";
+ };
+ port10: port@10 {
+ reg = <10>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy10>;
+ phy-mode = "qsgmii";
+ };
+ port11: port@11 {
+ reg = <11>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy11>;
+ phy-mode = "qsgmii";
+ };
+ port12: port@12 {
+ reg = <12>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy12>;
+ phy-mode = "qsgmii";
+ };
+ port13: port@13 {
+ reg = <13>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy13>;
+ phy-mode = "qsgmii";
+ };
+ port14: port@14 {
+ reg = <14>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy14>;
+ phy-mode = "qsgmii";
+ };
+ port15: port@15 {
+ reg = <15>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy15>;
+ phy-mode = "qsgmii";
+ };
+ port16: port@16 {
+ reg = <16>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy16>;
+ phy-mode = "qsgmii";
+ };
+ port17: port@17 {
+ reg = <17>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy17>;
+ phy-mode = "qsgmii";
+ };
+ port18: port@18 {
+ reg = <18>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy18>;
+ phy-mode = "qsgmii";
+ };
+ port19: port@19 {
+ reg = <19>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy19>;
+ phy-mode = "qsgmii";
+ };
+ port20: port@20 {
+ reg = <20>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy20>;
+ phy-mode = "qsgmii";
+ };
+ port21: port@21 {
+ reg = <21>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy21>;
+ phy-mode = "qsgmii";
+ };
+ port22: port@22 {
+ reg = <22>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy22>;
+ phy-mode = "qsgmii";
+ };
+ port23: port@23 {
+ reg = <23>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy23>;
+ phy-mode = "qsgmii";
+ };
+ port24: port@24 {
+ reg = <24>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy24>;
+ phy-mode = "qsgmii";
+ };
+ port25: port@25 {
+ reg = <25>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy25>;
+ phy-mode = "qsgmii";
+ };
+ port26: port@26 {
+ reg = <26>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy26>;
+ phy-mode = "qsgmii";
+ };
+ port27: port@27 {
+ reg = <27>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy27>;
+ phy-mode = "qsgmii";
+ };
+ port28: port@28 {
+ reg = <28>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy28>;
+ phy-mode = "qsgmii";
+ };
+ port29: port@29 {
+ reg = <29>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy29>;
+ phy-mode = "qsgmii";
+ };
+ port30: port@30 {
+ reg = <30>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy30>;
+ phy-mode = "qsgmii";
+ };
+ port31: port@31 {
+ reg = <31>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy31>;
+ phy-mode = "qsgmii";
+ };
+ port32: port@32 {
+ reg = <32>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy32>;
+ phy-mode = "qsgmii";
+ };
+ port33: port@33 {
+ reg = <33>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy33>;
+ phy-mode = "qsgmii";
+ };
+ port34: port@34 {
+ reg = <34>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy34>;
+ phy-mode = "qsgmii";
+ };
+ port35: port@35 {
+ reg = <35>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy35>;
+ phy-mode = "qsgmii";
+ };
+ port36: port@36 {
+ reg = <36>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy36>;
+ phy-mode = "qsgmii";
+ };
+ port37: port@37 {
+ reg = <37>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy37>;
+ phy-mode = "qsgmii";
+ };
+ port38: port@38 {
+ reg = <38>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy38>;
+ phy-mode = "qsgmii";
+ };
+ port39: port@39 {
+ reg = <39>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy39>;
+ phy-mode = "qsgmii";
+ };
+ port40: port@40 {
+ reg = <40>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy40>;
+ phy-mode = "qsgmii";
+ };
+ port41: port@41 {
+ reg = <41>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy41>;
+ phy-mode = "qsgmii";
+ };
+ port42: port@42 {
+ reg = <42>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy42>;
+ phy-mode = "qsgmii";
+ };
+ port43: port@43 {
+ reg = <43>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy43>;
+ phy-mode = "qsgmii";
+ };
+ port44: port@44 {
+ reg = <44>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy44>;
+ phy-mode = "qsgmii";
+ };
+ port45: port@45 {
+ reg = <45>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy45>;
+ phy-mode = "qsgmii";
+ };
+ port46: port@46 {
+ reg = <46>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy46>;
+ phy-mode = "qsgmii";
+ };
+ port47: port@47 {
+ reg = <47>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy47>;
+ phy-mode = "qsgmii";
+ };
+ /* Then the 25G interfaces */
+ port60: port@60 {
+ reg = <60>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 29>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth60>;
+ managed = "in-band-status";
+ };
+ port61: port@61 {
+ reg = <61>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 30>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth61>;
+ managed = "in-band-status";
+ };
+ port62: port@62 {
+ reg = <62>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 31>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth62>;
+ managed = "in-band-status";
+ };
+ port63: port@63 {
+ reg = <63>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 32>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth63>;
+ managed = "in-band-status";
+ };
+ /* Finally the Management interface */
+ port64: port@64 {
+ reg = <64>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 0>;
+ phy-handle = <&phy64>;
+ phy-mode = "sgmii";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index 0c5fa9801e6f..b815ce73e5c6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -637,6 +637,28 @@
status = "disabled";
};
+ gmac: ethernet@ff4e0000 {
+ compatible = "rockchip,rk3308-gmac";
+ reg = <0x0 0xff4e0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX_TX>,
+ <&cru SCLK_MAC_RX_TX>, <&cru SCLK_MAC_REF>,
+ <&cru SCLK_MAC>, <&cru ACLK_MAC>,
+ <&cru PCLK_MAC>, <&cru SCLK_MAC_RMII>;
+ clock-names = "stmmaceth", "mac_clk_rx",
+ "mac_clk_tx", "clk_mac_ref",
+ "clk_mac_refout", "aclk_mac",
+ "pclk_mac", "clk_mac_speed";
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rmii_pins &mac_refclk_12ma>;
+ resets = <&cru SRST_MAC_A>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+ };
+
cru: clock-controller@ff500000 {
compatible = "rockchip,rk3308-cru";
reg = <0x0 0xff500000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index b2bcbf23eefd..ca59d1f711f8 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -42,12 +42,12 @@
};
};
- dmss: dmss {
+ dmss: bus@48000000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
dma-ranges;
- ranges;
+ ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
ti,sci-dev-id = <25>;
@@ -134,7 +134,7 @@
};
};
- dmsc: dmsc@44043000 {
+ dmsc: system-controller@44043000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
mbox-names = "rx", "tx";
@@ -148,7 +148,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -373,8 +373,9 @@
clocks = <&k3_clks 145 0>;
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
index 99e94dee1bd4..deb19ae5e168 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
@@ -74,8 +74,9 @@
clocks = <&k3_clks 148 0>;
};
- mcu_gpio_intr: interrupt-controller1 {
+ mcu_gpio_intr: interrupt-controller@4210000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x04210000 0x00 0x200>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index cb340d1b401f..6cd3131eb9ff 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -433,8 +433,9 @@
#phy-cells = <0>;
};
- intr_main_gpio: interrupt-controller0 {
+ intr_main_gpio: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x00a00000 0x0 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -444,18 +445,19 @@
ti,interrupt-ranges = <0 392 32>;
};
- main-navss {
+ main_navss: bus@30800000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0xbc00000>;
dma-coherent;
dma-ranges;
ti,sci-dev-id = <118>;
- intr_main_navss: interrupt-controller1 {
+ intr_main_navss: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x310e0000 0x0 0x2000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 0388c02c2203..f5b8ef2f5f77 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -116,11 +116,11 @@
};
};
- mcu-navss {
+ mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
dma-coherent;
dma-ranges;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index ed42f13e7663..7cb864b4d74a 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -6,24 +6,24 @@
*/
&cbass_wakeup {
- dmsc: dmsc {
+ dmsc: system-controller@44083000 {
compatible = "ti,am654-sci";
ti,host-id = <12>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
mbox-names = "rx", "tx";
mboxes= <&secure_proxy_main 11>,
<&secure_proxy_main 13>;
+ reg-names = "debug_messages";
+ reg = <0x44083000 0x1000>;
+
k3_pds: power-controller {
compatible = "ti,sci-pm-domain";
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -69,8 +69,9 @@
power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
};
- intr_wkup_gpio: interrupt-controller2 {
+ intr_wkup_gpio: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x42200000 0x200>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index 9e87fb313a54..eddb2ffb93ca 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -85,12 +85,6 @@
gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
};
};
-
- clk_ov5640_fixed: clock {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- };
};
&wkup_pmx0 {
@@ -287,23 +281,6 @@
pinctrl-names = "default";
pinctrl-0 = <&main_i2c1_pins_default>;
clock-frequency = <400000>;
-
- ov5640: camera@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
-
- clocks = <&clk_ov5640_fixed>;
- clock-names = "xclk";
-
- port {
- csi2_cam0: endpoint {
- remote-endpoint = <&csi2_phy0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
- };
- };
-
};
&main_i2c2 {
@@ -496,14 +473,6 @@
};
};
-&csi2_0 {
- csi2_phy0: endpoint {
- remote-endpoint = <&csi2_cam0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
-};
-
&mcu_cpsw {
pinctrl-names = "default";
pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
index f86c493a44f1..19fea8adbcff 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
@@ -68,8 +68,9 @@
};
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -85,9 +86,12 @@
#size-cells = <2>;
ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
ti,sci-dev-id = <199>;
+ dma-coherent;
+ dma-ranges;
- main_navss_intr: interrupt-controller1 {
+ main_navss_intr: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x310e0000 0x00 0x4000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index 5e74e43822c3..5663fe3ea466 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -6,7 +6,7 @@
*/
&cbass_mcu_wakeup {
- dmsc: dmsc@44083000 {
+ dmsc: system-controller@44083000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
@@ -23,7 +23,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -96,8 +96,9 @@
clock-names = "fclk";
};
- wkup_gpio_intr: interrupt-controller2 {
+ wkup_gpio_intr: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x42200000 0x00 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index c2aa45a3ac79..3bcafe4c1742 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -76,8 +76,9 @@
};
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -87,18 +88,19 @@
ti,interrupt-ranges = <8 392 56>;
};
- main-navss {
+ main_navss: bus@30000000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
dma-coherent;
dma-ranges;
ti,sci-dev-id = <199>;
- main_navss_intr: interrupt-controller1 {
+ main_navss_intr: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x310e0000 0x0 0x4000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index d56e3475aee7..5e825e4d0306 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -6,7 +6,7 @@
*/
&cbass_mcu_wakeup {
- dmsc: dmsc@44083000 {
+ dmsc: system-controller@44083000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
@@ -23,7 +23,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -96,8 +96,9 @@
clock-names = "fclk";
};
- wkup_gpio_intr: interrupt-controller2 {
+ wkup_gpio_intr: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x42200000 0x00 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -249,11 +250,11 @@
};
};
- mcu-navss {
+ mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
dma-coherent;
dma-ranges;
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index d0901e610df3..09a805cc32d7 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -68,19 +68,13 @@ CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
-ifdef REGENERATE_ARM64_CRYPTO
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)
-$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv8.pl
+$(obj)/%-core.S: $(src)/%-armv8.pl
$(call cmd,perlasm)
-$(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
+$(obj)/sha256-core.S: $(src)/sha512-armv8.pl
$(call cmd,perlasm)
-$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
- $(call cmd,perlasm)
-
-endif
-
clean-files += poly1305-core.S sha256-core.S sha512-core.S
diff --git a/arch/arm64/crypto/poly1305-core.S_shipped b/arch/arm64/crypto/poly1305-core.S_shipped
deleted file mode 100644
index fb2822abf63a..000000000000
--- a/arch/arm64/crypto/poly1305-core.S_shipped
+++ /dev/null
@@ -1,835 +0,0 @@
-#ifndef __KERNEL__
-# include "arm_arch.h"
-.extern OPENSSL_armcap_P
-#endif
-
-.text
-
-// forward "declarations" are required for Apple
-.globl poly1305_blocks
-.globl poly1305_emit
-
-.globl poly1305_init
-.type poly1305_init,%function
-.align 5
-poly1305_init:
- cmp x1,xzr
- stp xzr,xzr,[x0] // zero hash value
- stp xzr,xzr,[x0,#16] // [along with is_base2_26]
-
- csel x0,xzr,x0,eq
- b.eq .Lno_key
-
-#ifndef __KERNEL__
- adrp x17,OPENSSL_armcap_P
- ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
-#endif
-
- ldp x7,x8,[x1] // load key
- mov x9,#0xfffffffc0fffffff
- movk x9,#0x0fff,lsl#48
-#ifdef __AARCH64EB__
- rev x7,x7 // flip bytes
- rev x8,x8
-#endif
- and x7,x7,x9 // &=0ffffffc0fffffff
- and x9,x9,#-4
- and x8,x8,x9 // &=0ffffffc0ffffffc
- mov w9,#-1
- stp x7,x8,[x0,#32] // save key value
- str w9,[x0,#48] // impossible key power value
-
-#ifndef __KERNEL__
- tst w17,#ARMV7_NEON
-
- adr x12,.Lpoly1305_blocks
- adr x7,.Lpoly1305_blocks_neon
- adr x13,.Lpoly1305_emit
-
- csel x12,x12,x7,eq
-
-# ifdef __ILP32__
- stp w12,w13,[x2]
-# else
- stp x12,x13,[x2]
-# endif
-#endif
- mov x0,#1
-.Lno_key:
- ret
-.size poly1305_init,.-poly1305_init
-
-.type poly1305_blocks,%function
-.align 5
-poly1305_blocks:
-.Lpoly1305_blocks:
- ands x2,x2,#-16
- b.eq .Lno_data
-
- ldp x4,x5,[x0] // load hash value
- ldp x6,x17,[x0,#16] // [along with is_base2_26]
- ldp x7,x8,[x0,#32] // load key value
-
-#ifdef __AARCH64EB__
- lsr x12,x4,#32
- mov w13,w4
- lsr x14,x5,#32
- mov w15,w5
- lsr x16,x6,#32
-#else
- mov w12,w4
- lsr x13,x4,#32
- mov w14,w5
- lsr x15,x5,#32
- mov w16,w6
-#endif
-
- add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
- lsr x13,x14,#12
- adds x12,x12,x14,lsl#52
- add x13,x13,x15,lsl#14
- adc x13,x13,xzr
- lsr x14,x16,#24
- adds x13,x13,x16,lsl#40
- adc x14,x14,xzr
-
- cmp x17,#0 // is_base2_26?
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
- csel x4,x4,x12,eq // choose between radixes
- csel x5,x5,x13,eq
- csel x6,x6,x14,eq
-
-.Loop:
- ldp x10,x11,[x1],#16 // load input
- sub x2,x2,#16
-#ifdef __AARCH64EB__
- rev x10,x10
- rev x11,x11
-#endif
- adds x4,x4,x10 // accumulate input
- adcs x5,x5,x11
-
- mul x12,x4,x7 // h0*r0
- adc x6,x6,x3
- umulh x13,x4,x7
-
- mul x10,x5,x9 // h1*5*r1
- umulh x11,x5,x9
-
- adds x12,x12,x10
- mul x10,x4,x8 // h0*r1
- adc x13,x13,x11
- umulh x14,x4,x8
-
- adds x13,x13,x10
- mul x10,x5,x7 // h1*r0
- adc x14,x14,xzr
- umulh x11,x5,x7
-
- adds x13,x13,x10
- mul x10,x6,x9 // h2*5*r1
- adc x14,x14,x11
- mul x11,x6,x7 // h2*r0
-
- adds x13,x13,x10
- adc x14,x14,x11
-
- and x10,x14,#-4 // final reduction
- and x6,x14,#3
- add x10,x10,x14,lsr#2
- adds x4,x12,x10
- adcs x5,x13,xzr
- adc x6,x6,xzr
-
- cbnz x2,.Loop
-
- stp x4,x5,[x0] // store hash value
- stp x6,xzr,[x0,#16] // [and clear is_base2_26]
-
-.Lno_data:
- ret
-.size poly1305_blocks,.-poly1305_blocks
-
-.type poly1305_emit,%function
-.align 5
-poly1305_emit:
-.Lpoly1305_emit:
- ldp x4,x5,[x0] // load hash base 2^64
- ldp x6,x7,[x0,#16] // [along with is_base2_26]
- ldp x10,x11,[x2] // load nonce
-
-#ifdef __AARCH64EB__
- lsr x12,x4,#32
- mov w13,w4
- lsr x14,x5,#32
- mov w15,w5
- lsr x16,x6,#32
-#else
- mov w12,w4
- lsr x13,x4,#32
- mov w14,w5
- lsr x15,x5,#32
- mov w16,w6
-#endif
-
- add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
- lsr x13,x14,#12
- adds x12,x12,x14,lsl#52
- add x13,x13,x15,lsl#14
- adc x13,x13,xzr
- lsr x14,x16,#24
- adds x13,x13,x16,lsl#40
- adc x14,x14,xzr
-
- cmp x7,#0 // is_base2_26?
- csel x4,x4,x12,eq // choose between radixes
- csel x5,x5,x13,eq
- csel x6,x6,x14,eq
-
- adds x12,x4,#5 // compare to modulus
- adcs x13,x5,xzr
- adc x14,x6,xzr
-
- tst x14,#-4 // see if it's carried/borrowed
-
- csel x4,x4,x12,eq
- csel x5,x5,x13,eq
-
-#ifdef __AARCH64EB__
- ror x10,x10,#32 // flip nonce words
- ror x11,x11,#32
-#endif
- adds x4,x4,x10 // accumulate nonce
- adc x5,x5,x11
-#ifdef __AARCH64EB__
- rev x4,x4 // flip output bytes
- rev x5,x5
-#endif
- stp x4,x5,[x1] // write result
-
- ret
-.size poly1305_emit,.-poly1305_emit
-.type poly1305_mult,%function
-.align 5
-poly1305_mult:
- mul x12,x4,x7 // h0*r0
- umulh x13,x4,x7
-
- mul x10,x5,x9 // h1*5*r1
- umulh x11,x5,x9
-
- adds x12,x12,x10
- mul x10,x4,x8 // h0*r1
- adc x13,x13,x11
- umulh x14,x4,x8
-
- adds x13,x13,x10
- mul x10,x5,x7 // h1*r0
- adc x14,x14,xzr
- umulh x11,x5,x7
-
- adds x13,x13,x10
- mul x10,x6,x9 // h2*5*r1
- adc x14,x14,x11
- mul x11,x6,x7 // h2*r0
-
- adds x13,x13,x10
- adc x14,x14,x11
-
- and x10,x14,#-4 // final reduction
- and x6,x14,#3
- add x10,x10,x14,lsr#2
- adds x4,x12,x10
- adcs x5,x13,xzr
- adc x6,x6,xzr
-
- ret
-.size poly1305_mult,.-poly1305_mult
-
-.type poly1305_splat,%function
-.align 4
-poly1305_splat:
- and x12,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x13,x4,#26,#26
- extr x14,x5,x4,#52
- and x14,x14,#0x03ffffff
- ubfx x15,x5,#14,#26
- extr x16,x6,x5,#40
-
- str w12,[x0,#16*0] // r0
- add w12,w13,w13,lsl#2 // r1*5
- str w13,[x0,#16*1] // r1
- add w13,w14,w14,lsl#2 // r2*5
- str w12,[x0,#16*2] // s1
- str w14,[x0,#16*3] // r2
- add w14,w15,w15,lsl#2 // r3*5
- str w13,[x0,#16*4] // s2
- str w15,[x0,#16*5] // r3
- add w15,w16,w16,lsl#2 // r4*5
- str w14,[x0,#16*6] // s3
- str w16,[x0,#16*7] // r4
- str w15,[x0,#16*8] // s4
-
- ret
-.size poly1305_splat,.-poly1305_splat
-
-#ifdef __KERNEL__
-.globl poly1305_blocks_neon
-#endif
-.type poly1305_blocks_neon,%function
-.align 5
-poly1305_blocks_neon:
-.Lpoly1305_blocks_neon:
- ldr x17,[x0,#24]
- cmp x2,#128
- b.lo .Lpoly1305_blocks
-
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-80]!
- add x29,sp,#0
-
- stp d8,d9,[sp,#16] // meet ABI requirements
- stp d10,d11,[sp,#32]
- stp d12,d13,[sp,#48]
- stp d14,d15,[sp,#64]
-
- cbz x17,.Lbase2_64_neon
-
- ldp w10,w11,[x0] // load hash value base 2^26
- ldp w12,w13,[x0,#8]
- ldr w14,[x0,#16]
-
- tst x2,#31
- b.eq .Leven_neon
-
- ldp x7,x8,[x0,#32] // load key value
-
- add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
- lsr x5,x12,#12
- adds x4,x4,x12,lsl#52
- add x5,x5,x13,lsl#14
- adc x5,x5,xzr
- lsr x6,x14,#24
- adds x5,x5,x14,lsl#40
- adc x14,x6,xzr // can be partially reduced...
-
- ldp x12,x13,[x1],#16 // load input
- sub x2,x2,#16
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
-
-#ifdef __AARCH64EB__
- rev x12,x12
- rev x13,x13
-#endif
- adds x4,x4,x12 // accumulate input
- adcs x5,x5,x13
- adc x6,x6,x3
-
- bl poly1305_mult
-
- and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x11,x4,#26,#26
- extr x12,x5,x4,#52
- and x12,x12,#0x03ffffff
- ubfx x13,x5,#14,#26
- extr x14,x6,x5,#40
-
- b .Leven_neon
-
-.align 4
-.Lbase2_64_neon:
- ldp x7,x8,[x0,#32] // load key value
-
- ldp x4,x5,[x0] // load hash value base 2^64
- ldr x6,[x0,#16]
-
- tst x2,#31
- b.eq .Linit_neon
-
- ldp x12,x13,[x1],#16 // load input
- sub x2,x2,#16
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
-#ifdef __AARCH64EB__
- rev x12,x12
- rev x13,x13
-#endif
- adds x4,x4,x12 // accumulate input
- adcs x5,x5,x13
- adc x6,x6,x3
-
- bl poly1305_mult
-
-.Linit_neon:
- ldr w17,[x0,#48] // first table element
- and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x11,x4,#26,#26
- extr x12,x5,x4,#52
- and x12,x12,#0x03ffffff
- ubfx x13,x5,#14,#26
- extr x14,x6,x5,#40
-
- cmp w17,#-1 // is value impossible?
- b.ne .Leven_neon
-
- fmov d24,x10
- fmov d25,x11
- fmov d26,x12
- fmov d27,x13
- fmov d28,x14
-
- ////////////////////////////////// initialize r^n table
- mov x4,x7 // r^1
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
- mov x5,x8
- mov x6,xzr
- add x0,x0,#48+12
- bl poly1305_splat
-
- bl poly1305_mult // r^2
- sub x0,x0,#4
- bl poly1305_splat
-
- bl poly1305_mult // r^3
- sub x0,x0,#4
- bl poly1305_splat
-
- bl poly1305_mult // r^4
- sub x0,x0,#4
- bl poly1305_splat
- sub x0,x0,#48 // restore original x0
- b .Ldo_neon
-
-.align 4
-.Leven_neon:
- fmov d24,x10
- fmov d25,x11
- fmov d26,x12
- fmov d27,x13
- fmov d28,x14
-
-.Ldo_neon:
- ldp x8,x12,[x1,#32] // inp[2:3]
- subs x2,x2,#64
- ldp x9,x13,[x1,#48]
- add x16,x1,#96
- adr x17,.Lzeros
-
- lsl x3,x3,#24
- add x15,x0,#48
-
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- and x5,x9,#0x03ffffff
- ubfx x6,x8,#26,#26
- ubfx x7,x9,#26,#26
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- extr x8,x12,x8,#52
- extr x9,x13,x9,#52
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- fmov d14,x4
- and x8,x8,#0x03ffffff
- and x9,x9,#0x03ffffff
- ubfx x10,x12,#14,#26
- ubfx x11,x13,#14,#26
- add x12,x3,x12,lsr#40
- add x13,x3,x13,lsr#40
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- fmov d15,x6
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- fmov d16,x8
- fmov d17,x10
- fmov d18,x12
-
- ldp x8,x12,[x1],#16 // inp[0:1]
- ldp x9,x13,[x1],#48
-
- ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[x15],#64
- ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
- ld1 {v8.4s},[x15]
-
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- and x5,x9,#0x03ffffff
- ubfx x6,x8,#26,#26
- ubfx x7,x9,#26,#26
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- extr x8,x12,x8,#52
- extr x9,x13,x9,#52
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- fmov d9,x4
- and x8,x8,#0x03ffffff
- and x9,x9,#0x03ffffff
- ubfx x10,x12,#14,#26
- ubfx x11,x13,#14,#26
- add x12,x3,x12,lsr#40
- add x13,x3,x13,lsr#40
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- fmov d10,x6
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- movi v31.2d,#-1
- fmov d11,x8
- fmov d12,x10
- fmov d13,x12
- ushr v31.2d,v31.2d,#38
-
- b.ls .Lskip_loop
-
-.align 4
-.Loop_neon:
- ////////////////////////////////////////////////////////////////
- // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
- // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
- // ___________________/
- // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
- // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
- // ___________________/ ____________________/
- //
- // Note that we start with inp[2:3]*r^2. This is because it
- // doesn't depend on reduction in previous iteration.
- ////////////////////////////////////////////////////////////////
- // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
- // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
- // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
- // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
- // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
-
- subs x2,x2,#64
- umull v23.2d,v14.2s,v7.s[2]
- csel x16,x17,x16,lo
- umull v22.2d,v14.2s,v5.s[2]
- umull v21.2d,v14.2s,v3.s[2]
- ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
- umull v20.2d,v14.2s,v1.s[2]
- ldp x9,x13,[x16],#48
- umull v19.2d,v14.2s,v0.s[2]
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
-
- umlal v23.2d,v15.2s,v5.s[2]
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- umlal v22.2d,v15.2s,v3.s[2]
- and x5,x9,#0x03ffffff
- umlal v21.2d,v15.2s,v1.s[2]
- ubfx x6,x8,#26,#26
- umlal v20.2d,v15.2s,v0.s[2]
- ubfx x7,x9,#26,#26
- umlal v19.2d,v15.2s,v8.s[2]
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
-
- umlal v23.2d,v16.2s,v3.s[2]
- extr x8,x12,x8,#52
- umlal v22.2d,v16.2s,v1.s[2]
- extr x9,x13,x9,#52
- umlal v21.2d,v16.2s,v0.s[2]
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- umlal v20.2d,v16.2s,v8.s[2]
- fmov d14,x4
- umlal v19.2d,v16.2s,v6.s[2]
- and x8,x8,#0x03ffffff
-
- umlal v23.2d,v17.2s,v1.s[2]
- and x9,x9,#0x03ffffff
- umlal v22.2d,v17.2s,v0.s[2]
- ubfx x10,x12,#14,#26
- umlal v21.2d,v17.2s,v8.s[2]
- ubfx x11,x13,#14,#26
- umlal v20.2d,v17.2s,v6.s[2]
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- umlal v19.2d,v17.2s,v4.s[2]
- fmov d15,x6
-
- add v11.2s,v11.2s,v26.2s
- add x12,x3,x12,lsr#40
- umlal v23.2d,v18.2s,v0.s[2]
- add x13,x3,x13,lsr#40
- umlal v22.2d,v18.2s,v8.s[2]
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- umlal v21.2d,v18.2s,v6.s[2]
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- umlal v20.2d,v18.2s,v4.s[2]
- fmov d16,x8
- umlal v19.2d,v18.2s,v2.s[2]
- fmov d17,x10
-
- ////////////////////////////////////////////////////////////////
- // (hash+inp[0:1])*r^4 and accumulate
-
- add v9.2s,v9.2s,v24.2s
- fmov d18,x12
- umlal v22.2d,v11.2s,v1.s[0]
- ldp x8,x12,[x1],#16 // inp[0:1]
- umlal v19.2d,v11.2s,v6.s[0]
- ldp x9,x13,[x1],#48
- umlal v23.2d,v11.2s,v3.s[0]
- umlal v20.2d,v11.2s,v8.s[0]
- umlal v21.2d,v11.2s,v0.s[0]
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
-
- add v10.2s,v10.2s,v25.2s
- umlal v22.2d,v9.2s,v5.s[0]
- umlal v23.2d,v9.2s,v7.s[0]
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- umlal v21.2d,v9.2s,v3.s[0]
- and x5,x9,#0x03ffffff
- umlal v19.2d,v9.2s,v0.s[0]
- ubfx x6,x8,#26,#26
- umlal v20.2d,v9.2s,v1.s[0]
- ubfx x7,x9,#26,#26
-
- add v12.2s,v12.2s,v27.2s
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- umlal v22.2d,v10.2s,v3.s[0]
- extr x8,x12,x8,#52
- umlal v23.2d,v10.2s,v5.s[0]
- extr x9,x13,x9,#52
- umlal v19.2d,v10.2s,v8.s[0]
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- umlal v21.2d,v10.2s,v1.s[0]
- fmov d9,x4
- umlal v20.2d,v10.2s,v0.s[0]
- and x8,x8,#0x03ffffff
-
- add v13.2s,v13.2s,v28.2s
- and x9,x9,#0x03ffffff
- umlal v22.2d,v12.2s,v0.s[0]
- ubfx x10,x12,#14,#26
- umlal v19.2d,v12.2s,v4.s[0]
- ubfx x11,x13,#14,#26
- umlal v23.2d,v12.2s,v1.s[0]
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- umlal v20.2d,v12.2s,v6.s[0]
- fmov d10,x6
- umlal v21.2d,v12.2s,v8.s[0]
- add x12,x3,x12,lsr#40
-
- umlal v22.2d,v13.2s,v8.s[0]
- add x13,x3,x13,lsr#40
- umlal v19.2d,v13.2s,v2.s[0]
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- umlal v23.2d,v13.2s,v0.s[0]
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- umlal v20.2d,v13.2s,v4.s[0]
- fmov d11,x8
- umlal v21.2d,v13.2s,v6.s[0]
- fmov d12,x10
- fmov d13,x12
-
- /////////////////////////////////////////////////////////////////
- // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
- // and P. Schwabe
- //
- // [see discussion in poly1305-armv4 module]
-
- ushr v29.2d,v22.2d,#26
- xtn v27.2s,v22.2d
- ushr v30.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
- add v23.2d,v23.2d,v29.2d // h3 -> h4
- bic v27.2s,#0xfc,lsl#24 // &=0x03ffffff
- add v20.2d,v20.2d,v30.2d // h0 -> h1
-
- ushr v29.2d,v23.2d,#26
- xtn v28.2s,v23.2d
- ushr v30.2d,v20.2d,#26
- xtn v25.2s,v20.2d
- bic v28.2s,#0xfc,lsl#24
- add v21.2d,v21.2d,v30.2d // h1 -> h2
-
- add v19.2d,v19.2d,v29.2d
- shl v29.2d,v29.2d,#2
- shrn v30.2s,v21.2d,#26
- xtn v26.2s,v21.2d
- add v19.2d,v19.2d,v29.2d // h4 -> h0
- bic v25.2s,#0xfc,lsl#24
- add v27.2s,v27.2s,v30.2s // h2 -> h3
- bic v26.2s,#0xfc,lsl#24
-
- shrn v29.2s,v19.2d,#26
- xtn v24.2s,v19.2d
- ushr v30.2s,v27.2s,#26
- bic v27.2s,#0xfc,lsl#24
- bic v24.2s,#0xfc,lsl#24
- add v25.2s,v25.2s,v29.2s // h0 -> h1
- add v28.2s,v28.2s,v30.2s // h3 -> h4
-
- b.hi .Loop_neon
-
-.Lskip_loop:
- dup v16.2d,v16.d[0]
- add v11.2s,v11.2s,v26.2s
-
- ////////////////////////////////////////////////////////////////
- // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
-
- adds x2,x2,#32
- b.ne .Long_tail
-
- dup v16.2d,v11.d[0]
- add v14.2s,v9.2s,v24.2s
- add v17.2s,v12.2s,v27.2s
- add v15.2s,v10.2s,v25.2s
- add v18.2s,v13.2s,v28.2s
-
-.Long_tail:
- dup v14.2d,v14.d[0]
- umull2 v19.2d,v16.4s,v6.4s
- umull2 v22.2d,v16.4s,v1.4s
- umull2 v23.2d,v16.4s,v3.4s
- umull2 v21.2d,v16.4s,v0.4s
- umull2 v20.2d,v16.4s,v8.4s
-
- dup v15.2d,v15.d[0]
- umlal2 v19.2d,v14.4s,v0.4s
- umlal2 v21.2d,v14.4s,v3.4s
- umlal2 v22.2d,v14.4s,v5.4s
- umlal2 v23.2d,v14.4s,v7.4s
- umlal2 v20.2d,v14.4s,v1.4s
-
- dup v17.2d,v17.d[0]
- umlal2 v19.2d,v15.4s,v8.4s
- umlal2 v22.2d,v15.4s,v3.4s
- umlal2 v21.2d,v15.4s,v1.4s
- umlal2 v23.2d,v15.4s,v5.4s
- umlal2 v20.2d,v15.4s,v0.4s
-
- dup v18.2d,v18.d[0]
- umlal2 v22.2d,v17.4s,v0.4s
- umlal2 v23.2d,v17.4s,v1.4s
- umlal2 v19.2d,v17.4s,v4.4s
- umlal2 v20.2d,v17.4s,v6.4s
- umlal2 v21.2d,v17.4s,v8.4s
-
- umlal2 v22.2d,v18.4s,v8.4s
- umlal2 v19.2d,v18.4s,v2.4s
- umlal2 v23.2d,v18.4s,v0.4s
- umlal2 v20.2d,v18.4s,v4.4s
- umlal2 v21.2d,v18.4s,v6.4s
-
- b.eq .Lshort_tail
-
- ////////////////////////////////////////////////////////////////
- // (hash+inp[0:1])*r^4:r^3 and accumulate
-
- add v9.2s,v9.2s,v24.2s
- umlal v22.2d,v11.2s,v1.2s
- umlal v19.2d,v11.2s,v6.2s
- umlal v23.2d,v11.2s,v3.2s
- umlal v20.2d,v11.2s,v8.2s
- umlal v21.2d,v11.2s,v0.2s
-
- add v10.2s,v10.2s,v25.2s
- umlal v22.2d,v9.2s,v5.2s
- umlal v19.2d,v9.2s,v0.2s
- umlal v23.2d,v9.2s,v7.2s
- umlal v20.2d,v9.2s,v1.2s
- umlal v21.2d,v9.2s,v3.2s
-
- add v12.2s,v12.2s,v27.2s
- umlal v22.2d,v10.2s,v3.2s
- umlal v19.2d,v10.2s,v8.2s
- umlal v23.2d,v10.2s,v5.2s
- umlal v20.2d,v10.2s,v0.2s
- umlal v21.2d,v10.2s,v1.2s
-
- add v13.2s,v13.2s,v28.2s
- umlal v22.2d,v12.2s,v0.2s
- umlal v19.2d,v12.2s,v4.2s
- umlal v23.2d,v12.2s,v1.2s
- umlal v20.2d,v12.2s,v6.2s
- umlal v21.2d,v12.2s,v8.2s
-
- umlal v22.2d,v13.2s,v8.2s
- umlal v19.2d,v13.2s,v2.2s
- umlal v23.2d,v13.2s,v0.2s
- umlal v20.2d,v13.2s,v4.2s
- umlal v21.2d,v13.2s,v6.2s
-
-.Lshort_tail:
- ////////////////////////////////////////////////////////////////
- // horizontal add
-
- addp v22.2d,v22.2d,v22.2d
- ldp d8,d9,[sp,#16] // meet ABI requirements
- addp v19.2d,v19.2d,v19.2d
- ldp d10,d11,[sp,#32]
- addp v23.2d,v23.2d,v23.2d
- ldp d12,d13,[sp,#48]
- addp v20.2d,v20.2d,v20.2d
- ldp d14,d15,[sp,#64]
- addp v21.2d,v21.2d,v21.2d
- ldr x30,[sp,#8]
-
- ////////////////////////////////////////////////////////////////
- // lazy reduction, but without narrowing
-
- ushr v29.2d,v22.2d,#26
- and v22.16b,v22.16b,v31.16b
- ushr v30.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
-
- add v23.2d,v23.2d,v29.2d // h3 -> h4
- add v20.2d,v20.2d,v30.2d // h0 -> h1
-
- ushr v29.2d,v23.2d,#26
- and v23.16b,v23.16b,v31.16b
- ushr v30.2d,v20.2d,#26
- and v20.16b,v20.16b,v31.16b
- add v21.2d,v21.2d,v30.2d // h1 -> h2
-
- add v19.2d,v19.2d,v29.2d
- shl v29.2d,v29.2d,#2
- ushr v30.2d,v21.2d,#26
- and v21.16b,v21.16b,v31.16b
- add v19.2d,v19.2d,v29.2d // h4 -> h0
- add v22.2d,v22.2d,v30.2d // h2 -> h3
-
- ushr v29.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
- ushr v30.2d,v22.2d,#26
- and v22.16b,v22.16b,v31.16b
- add v20.2d,v20.2d,v29.2d // h0 -> h1
- add v23.2d,v23.2d,v30.2d // h3 -> h4
-
- ////////////////////////////////////////////////////////////////
- // write the result, can be partially reduced
-
- st4 {v19.s,v20.s,v21.s,v22.s}[0],[x0],#16
- mov x4,#1
- st1 {v23.s}[0],[x0]
- str x4,[x0,#8] // set is_base2_26
-
- ldr x29,[sp],#80
- .inst 0xd50323bf // autiasp
- ret
-.size poly1305_blocks_neon,.-poly1305_blocks_neon
-
-.align 5
-.Lzeros:
-.long 0,0,0,0,0,0,0,0
-.asciz "Poly1305 for ARMv8, CRYPTOGAMS by @dot-asm"
-.align 2
-#if !defined(__KERNEL__) && !defined(_WIN64)
-.comm OPENSSL_armcap_P,4,4
-.hidden OPENSSL_armcap_P
-#endif
diff --git a/arch/arm64/crypto/sha256-core.S_shipped b/arch/arm64/crypto/sha256-core.S_shipped
deleted file mode 100644
index 7c7ce2e3bad6..000000000000
--- a/arch/arm64/crypto/sha256-core.S_shipped
+++ /dev/null
@@ -1,2069 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// This code is taken from the OpenSSL project but the author (Andy Polyakov)
-// has relicensed it under the GPLv2. Therefore this program is free software;
-// you can redistribute it and/or modify it under the terms of the GNU General
-// Public License version 2 as published by the Free Software Foundation.
-//
-// The original headers, including the original license headers, are
-// included below for completeness.
-
-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the OpenSSL license (the "License"). You may not use
-// this file except in compliance with the License. You can obtain a copy
-// in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-
-// ====================================================================
-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// project. The module is, however, dual licensed under OpenSSL and
-// CRYPTOGAMS licenses depending on where you obtain it. For further
-// details see http://www.openssl.org/~appro/cryptogams/.
-// ====================================================================
-//
-// SHA256/512 for ARMv8.
-//
-// Performance in cycles per processed byte and improvement coefficient
-// over code generated with "default" compiler:
-//
-// SHA256-hw SHA256(*) SHA512
-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-//
-// (*) Software SHA256 results are of lesser relevance, presented
-// mostly for informational purposes.
-// (**) The result is a trade-off: it's possible to improve it by
-// 10% (or by 1 cycle per round), but at the cost of 20% loss
-// on Cortex-A53 (or by 4 cycles per round).
-// (***) Super-impressive coefficients over gcc-generated code are
-// indication of some compiler "pathology", most notably code
-// generated with -mgeneral-regs-only is significanty faster
-// and the gap is only 40-90%.
-//
-// October 2016.
-//
-// Originally it was reckoned that it makes no sense to implement NEON
-// version of SHA256 for 64-bit processors. This is because performance
-// improvement on most wide-spread Cortex-A5x processors was observed
-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
-// observed that 32-bit NEON SHA256 performs significantly better than
-// 64-bit scalar version on *some* of the more recent processors. As
-// result 64-bit NEON version of SHA256 was added to provide best
-// all-round performance. For example it executes ~30% faster on X-Gene
-// and Mongoose. [For reference, NEON version of SHA512 is bound to
-// deliver much less improvement, likely *negative* on Cortex-A5x.
-// Which is why NEON support is limited to SHA256.]
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#endif
-
-.text
-
-.extern OPENSSL_armcap_P
-.globl sha256_block_data_order
-.type sha256_block_data_order,%function
-.align 6
-sha256_block_data_order:
-#ifndef __KERNEL__
-# ifdef __ILP32__
- ldrsw x16,.LOPENSSL_armcap_P
-# else
- ldr x16,.LOPENSSL_armcap_P
-# endif
- adr x17,.LOPENSSL_armcap_P
- add x16,x16,x17
- ldr w16,[x16]
- tst w16,#ARMV8_SHA256
- b.ne .Lv8_entry
- tst w16,#ARMV7_NEON
- b.ne .Lneon_entry
-#endif
- stp x29,x30,[sp,#-128]!
- add x29,sp,#0
-
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#4*4
-
- ldp w20,w21,[x0] // load context
- ldp w22,w23,[x0,#2*4]
- ldp w24,w25,[x0,#4*4]
- add x2,x1,x2,lsl#6 // end of input
- ldp w26,w27,[x0,#6*4]
- adr x30,.LK256
- stp x0,x2,[x29,#96]
-
-.Loop:
- ldp w3,w4,[x1],#2*4
- ldr w19,[x30],#4 // *K++
- eor w28,w21,w22 // magic seed
- str x1,[x29,#112]
-#ifndef __AARCH64EB__
- rev w3,w3 // 0
-#endif
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- eor w6,w24,w24,ror#14
- and w17,w25,w24
- bic w19,w26,w24
- add w27,w27,w3 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w6,ror#11 // Sigma1(e)
- ror w6,w20,#2
- add w27,w27,w17 // h+=Ch(e,f,g)
- eor w17,w20,w20,ror#9
- add w27,w27,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w23,w23,w27 // d+=h
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w6,w17,ror#13 // Sigma0(a)
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w27,w27,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w4,w4 // 1
-#endif
- ldp w5,w6,[x1],#2*4
- add w27,w27,w17 // h+=Sigma0(a)
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- eor w7,w23,w23,ror#14
- and w17,w24,w23
- bic w28,w25,w23
- add w26,w26,w4 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w7,ror#11 // Sigma1(e)
- ror w7,w27,#2
- add w26,w26,w17 // h+=Ch(e,f,g)
- eor w17,w27,w27,ror#9
- add w26,w26,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w22,w22,w26 // d+=h
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w7,w17,ror#13 // Sigma0(a)
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w26,w26,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w5,w5 // 2
-#endif
- add w26,w26,w17 // h+=Sigma0(a)
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- eor w8,w22,w22,ror#14
- and w17,w23,w22
- bic w19,w24,w22
- add w25,w25,w5 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w8,ror#11 // Sigma1(e)
- ror w8,w26,#2
- add w25,w25,w17 // h+=Ch(e,f,g)
- eor w17,w26,w26,ror#9
- add w25,w25,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w21,w21,w25 // d+=h
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w8,w17,ror#13 // Sigma0(a)
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w25,w25,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w6,w6 // 3
-#endif
- ldp w7,w8,[x1],#2*4
- add w25,w25,w17 // h+=Sigma0(a)
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- eor w9,w21,w21,ror#14
- and w17,w22,w21
- bic w28,w23,w21
- add w24,w24,w6 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w9,ror#11 // Sigma1(e)
- ror w9,w25,#2
- add w24,w24,w17 // h+=Ch(e,f,g)
- eor w17,w25,w25,ror#9
- add w24,w24,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w20,w20,w24 // d+=h
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w9,w17,ror#13 // Sigma0(a)
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w24,w24,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w7,w7 // 4
-#endif
- add w24,w24,w17 // h+=Sigma0(a)
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- eor w10,w20,w20,ror#14
- and w17,w21,w20
- bic w19,w22,w20
- add w23,w23,w7 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w10,ror#11 // Sigma1(e)
- ror w10,w24,#2
- add w23,w23,w17 // h+=Ch(e,f,g)
- eor w17,w24,w24,ror#9
- add w23,w23,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w27,w27,w23 // d+=h
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w10,w17,ror#13 // Sigma0(a)
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w23,w23,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w8,w8 // 5
-#endif
- ldp w9,w10,[x1],#2*4
- add w23,w23,w17 // h+=Sigma0(a)
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- eor w11,w27,w27,ror#14
- and w17,w20,w27
- bic w28,w21,w27
- add w22,w22,w8 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w11,ror#11 // Sigma1(e)
- ror w11,w23,#2
- add w22,w22,w17 // h+=Ch(e,f,g)
- eor w17,w23,w23,ror#9
- add w22,w22,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w26,w26,w22 // d+=h
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w11,w17,ror#13 // Sigma0(a)
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w22,w22,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w9,w9 // 6
-#endif
- add w22,w22,w17 // h+=Sigma0(a)
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- eor w12,w26,w26,ror#14
- and w17,w27,w26
- bic w19,w20,w26
- add w21,w21,w9 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w12,ror#11 // Sigma1(e)
- ror w12,w22,#2
- add w21,w21,w17 // h+=Ch(e,f,g)
- eor w17,w22,w22,ror#9
- add w21,w21,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w25,w25,w21 // d+=h
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w12,w17,ror#13 // Sigma0(a)
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w21,w21,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w10,w10 // 7
-#endif
- ldp w11,w12,[x1],#2*4
- add w21,w21,w17 // h+=Sigma0(a)
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- eor w13,w25,w25,ror#14
- and w17,w26,w25
- bic w28,w27,w25
- add w20,w20,w10 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w13,ror#11 // Sigma1(e)
- ror w13,w21,#2
- add w20,w20,w17 // h+=Ch(e,f,g)
- eor w17,w21,w21,ror#9
- add w20,w20,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w24,w24,w20 // d+=h
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w13,w17,ror#13 // Sigma0(a)
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w20,w20,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w11,w11 // 8
-#endif
- add w20,w20,w17 // h+=Sigma0(a)
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- eor w14,w24,w24,ror#14
- and w17,w25,w24
- bic w19,w26,w24
- add w27,w27,w11 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w14,ror#11 // Sigma1(e)
- ror w14,w20,#2
- add w27,w27,w17 // h+=Ch(e,f,g)
- eor w17,w20,w20,ror#9
- add w27,w27,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w23,w23,w27 // d+=h
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w14,w17,ror#13 // Sigma0(a)
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w27,w27,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w12,w12 // 9
-#endif
- ldp w13,w14,[x1],#2*4
- add w27,w27,w17 // h+=Sigma0(a)
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- eor w15,w23,w23,ror#14
- and w17,w24,w23
- bic w28,w25,w23
- add w26,w26,w12 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w15,ror#11 // Sigma1(e)
- ror w15,w27,#2
- add w26,w26,w17 // h+=Ch(e,f,g)
- eor w17,w27,w27,ror#9
- add w26,w26,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w22,w22,w26 // d+=h
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w15,w17,ror#13 // Sigma0(a)
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w26,w26,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w13,w13 // 10
-#endif
- add w26,w26,w17 // h+=Sigma0(a)
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- eor w0,w22,w22,ror#14
- and w17,w23,w22
- bic w19,w24,w22
- add w25,w25,w13 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w0,ror#11 // Sigma1(e)
- ror w0,w26,#2
- add w25,w25,w17 // h+=Ch(e,f,g)
- eor w17,w26,w26,ror#9
- add w25,w25,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w21,w21,w25 // d+=h
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w0,w17,ror#13 // Sigma0(a)
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w25,w25,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w14,w14 // 11
-#endif
- ldp w15,w0,[x1],#2*4
- add w25,w25,w17 // h+=Sigma0(a)
- str w6,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- eor w6,w21,w21,ror#14
- and w17,w22,w21
- bic w28,w23,w21
- add w24,w24,w14 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w6,ror#11 // Sigma1(e)
- ror w6,w25,#2
- add w24,w24,w17 // h+=Ch(e,f,g)
- eor w17,w25,w25,ror#9
- add w24,w24,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w20,w20,w24 // d+=h
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w6,w17,ror#13 // Sigma0(a)
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w24,w24,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w15,w15 // 12
-#endif
- add w24,w24,w17 // h+=Sigma0(a)
- str w7,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- eor w7,w20,w20,ror#14
- and w17,w21,w20
- bic w19,w22,w20
- add w23,w23,w15 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w7,ror#11 // Sigma1(e)
- ror w7,w24,#2
- add w23,w23,w17 // h+=Ch(e,f,g)
- eor w17,w24,w24,ror#9
- add w23,w23,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w27,w27,w23 // d+=h
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w7,w17,ror#13 // Sigma0(a)
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w23,w23,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w0,w0 // 13
-#endif
- ldp w1,w2,[x1]
- add w23,w23,w17 // h+=Sigma0(a)
- str w8,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- eor w8,w27,w27,ror#14
- and w17,w20,w27
- bic w28,w21,w27
- add w22,w22,w0 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w8,ror#11 // Sigma1(e)
- ror w8,w23,#2
- add w22,w22,w17 // h+=Ch(e,f,g)
- eor w17,w23,w23,ror#9
- add w22,w22,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w26,w26,w22 // d+=h
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w8,w17,ror#13 // Sigma0(a)
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w22,w22,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w1,w1 // 14
-#endif
- ldr w6,[sp,#12]
- add w22,w22,w17 // h+=Sigma0(a)
- str w9,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- eor w9,w26,w26,ror#14
- and w17,w27,w26
- bic w19,w20,w26
- add w21,w21,w1 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w9,ror#11 // Sigma1(e)
- ror w9,w22,#2
- add w21,w21,w17 // h+=Ch(e,f,g)
- eor w17,w22,w22,ror#9
- add w21,w21,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w25,w25,w21 // d+=h
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w9,w17,ror#13 // Sigma0(a)
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w21,w21,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w2,w2 // 15
-#endif
- ldr w7,[sp,#0]
- add w21,w21,w17 // h+=Sigma0(a)
- str w10,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w9,w4,#7
- and w17,w26,w25
- ror w8,w1,#17
- bic w28,w27,w25
- ror w10,w21,#2
- add w20,w20,w2 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w9,w9,w4,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w10,w10,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w8,w8,w1,ror#19
- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w10,w21,ror#22 // Sigma0(a)
- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
- add w3,w3,w12
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w3,w3,w9
- add w20,w20,w17 // h+=Sigma0(a)
- add w3,w3,w8
-.Loop_16_xx:
- ldr w8,[sp,#4]
- str w11,[sp,#0]
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- ror w10,w5,#7
- and w17,w25,w24
- ror w9,w2,#17
- bic w19,w26,w24
- ror w11,w20,#2
- add w27,w27,w3 // h+=X[i]
- eor w16,w16,w24,ror#11
- eor w10,w10,w5,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w24,ror#25 // Sigma1(e)
- eor w11,w11,w20,ror#13
- add w27,w27,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w9,w9,w2,ror#19
- eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
- add w27,w27,w16 // h+=Sigma1(e)
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w11,w20,ror#22 // Sigma0(a)
- eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
- add w4,w4,w13
- add w23,w23,w27 // d+=h
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w4,w4,w10
- add w27,w27,w17 // h+=Sigma0(a)
- add w4,w4,w9
- ldr w9,[sp,#8]
- str w12,[sp,#4]
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- ror w11,w6,#7
- and w17,w24,w23
- ror w10,w3,#17
- bic w28,w25,w23
- ror w12,w27,#2
- add w26,w26,w4 // h+=X[i]
- eor w16,w16,w23,ror#11
- eor w11,w11,w6,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w23,ror#25 // Sigma1(e)
- eor w12,w12,w27,ror#13
- add w26,w26,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w10,w10,w3,ror#19
- eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
- add w26,w26,w16 // h+=Sigma1(e)
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w12,w27,ror#22 // Sigma0(a)
- eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
- add w5,w5,w14
- add w22,w22,w26 // d+=h
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w5,w5,w11
- add w26,w26,w17 // h+=Sigma0(a)
- add w5,w5,w10
- ldr w10,[sp,#12]
- str w13,[sp,#8]
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- ror w12,w7,#7
- and w17,w23,w22
- ror w11,w4,#17
- bic w19,w24,w22
- ror w13,w26,#2
- add w25,w25,w5 // h+=X[i]
- eor w16,w16,w22,ror#11
- eor w12,w12,w7,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w22,ror#25 // Sigma1(e)
- eor w13,w13,w26,ror#13
- add w25,w25,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w11,w11,w4,ror#19
- eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
- add w25,w25,w16 // h+=Sigma1(e)
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w13,w26,ror#22 // Sigma0(a)
- eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
- add w6,w6,w15
- add w21,w21,w25 // d+=h
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w6,w6,w12
- add w25,w25,w17 // h+=Sigma0(a)
- add w6,w6,w11
- ldr w11,[sp,#0]
- str w14,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- ror w13,w8,#7
- and w17,w22,w21
- ror w12,w5,#17
- bic w28,w23,w21
- ror w14,w25,#2
- add w24,w24,w6 // h+=X[i]
- eor w16,w16,w21,ror#11
- eor w13,w13,w8,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w21,ror#25 // Sigma1(e)
- eor w14,w14,w25,ror#13
- add w24,w24,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w12,w12,w5,ror#19
- eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
- add w24,w24,w16 // h+=Sigma1(e)
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w14,w25,ror#22 // Sigma0(a)
- eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
- add w7,w7,w0
- add w20,w20,w24 // d+=h
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w7,w7,w13
- add w24,w24,w17 // h+=Sigma0(a)
- add w7,w7,w12
- ldr w12,[sp,#4]
- str w15,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- ror w14,w9,#7
- and w17,w21,w20
- ror w13,w6,#17
- bic w19,w22,w20
- ror w15,w24,#2
- add w23,w23,w7 // h+=X[i]
- eor w16,w16,w20,ror#11
- eor w14,w14,w9,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w20,ror#25 // Sigma1(e)
- eor w15,w15,w24,ror#13
- add w23,w23,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w13,w13,w6,ror#19
- eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
- add w23,w23,w16 // h+=Sigma1(e)
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w15,w24,ror#22 // Sigma0(a)
- eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
- add w8,w8,w1
- add w27,w27,w23 // d+=h
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w8,w8,w14
- add w23,w23,w17 // h+=Sigma0(a)
- add w8,w8,w13
- ldr w13,[sp,#8]
- str w0,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- ror w15,w10,#7
- and w17,w20,w27
- ror w14,w7,#17
- bic w28,w21,w27
- ror w0,w23,#2
- add w22,w22,w8 // h+=X[i]
- eor w16,w16,w27,ror#11
- eor w15,w15,w10,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w27,ror#25 // Sigma1(e)
- eor w0,w0,w23,ror#13
- add w22,w22,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w14,w14,w7,ror#19
- eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
- add w22,w22,w16 // h+=Sigma1(e)
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w0,w23,ror#22 // Sigma0(a)
- eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
- add w9,w9,w2
- add w26,w26,w22 // d+=h
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w9,w9,w15
- add w22,w22,w17 // h+=Sigma0(a)
- add w9,w9,w14
- ldr w14,[sp,#12]
- str w1,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- ror w0,w11,#7
- and w17,w27,w26
- ror w15,w8,#17
- bic w19,w20,w26
- ror w1,w22,#2
- add w21,w21,w9 // h+=X[i]
- eor w16,w16,w26,ror#11
- eor w0,w0,w11,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w26,ror#25 // Sigma1(e)
- eor w1,w1,w22,ror#13
- add w21,w21,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w15,w15,w8,ror#19
- eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
- add w21,w21,w16 // h+=Sigma1(e)
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w1,w22,ror#22 // Sigma0(a)
- eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
- add w10,w10,w3
- add w25,w25,w21 // d+=h
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w10,w10,w0
- add w21,w21,w17 // h+=Sigma0(a)
- add w10,w10,w15
- ldr w15,[sp,#0]
- str w2,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w1,w12,#7
- and w17,w26,w25
- ror w0,w9,#17
- bic w28,w27,w25
- ror w2,w21,#2
- add w20,w20,w10 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w1,w1,w12,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w2,w2,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w0,w0,w9,ror#19
- eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w2,w21,ror#22 // Sigma0(a)
- eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
- add w11,w11,w4
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w11,w11,w1
- add w20,w20,w17 // h+=Sigma0(a)
- add w11,w11,w0
- ldr w0,[sp,#4]
- str w3,[sp,#0]
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- ror w2,w13,#7
- and w17,w25,w24
- ror w1,w10,#17
- bic w19,w26,w24
- ror w3,w20,#2
- add w27,w27,w11 // h+=X[i]
- eor w16,w16,w24,ror#11
- eor w2,w2,w13,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w24,ror#25 // Sigma1(e)
- eor w3,w3,w20,ror#13
- add w27,w27,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w1,w1,w10,ror#19
- eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
- add w27,w27,w16 // h+=Sigma1(e)
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w3,w20,ror#22 // Sigma0(a)
- eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
- add w12,w12,w5
- add w23,w23,w27 // d+=h
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w12,w12,w2
- add w27,w27,w17 // h+=Sigma0(a)
- add w12,w12,w1
- ldr w1,[sp,#8]
- str w4,[sp,#4]
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- ror w3,w14,#7
- and w17,w24,w23
- ror w2,w11,#17
- bic w28,w25,w23
- ror w4,w27,#2
- add w26,w26,w12 // h+=X[i]
- eor w16,w16,w23,ror#11
- eor w3,w3,w14,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w23,ror#25 // Sigma1(e)
- eor w4,w4,w27,ror#13
- add w26,w26,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w2,w2,w11,ror#19
- eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
- add w26,w26,w16 // h+=Sigma1(e)
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w4,w27,ror#22 // Sigma0(a)
- eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
- add w13,w13,w6
- add w22,w22,w26 // d+=h
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w13,w13,w3
- add w26,w26,w17 // h+=Sigma0(a)
- add w13,w13,w2
- ldr w2,[sp,#12]
- str w5,[sp,#8]
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- ror w4,w15,#7
- and w17,w23,w22
- ror w3,w12,#17
- bic w19,w24,w22
- ror w5,w26,#2
- add w25,w25,w13 // h+=X[i]
- eor w16,w16,w22,ror#11
- eor w4,w4,w15,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w22,ror#25 // Sigma1(e)
- eor w5,w5,w26,ror#13
- add w25,w25,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w3,w3,w12,ror#19
- eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
- add w25,w25,w16 // h+=Sigma1(e)
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w5,w26,ror#22 // Sigma0(a)
- eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
- add w14,w14,w7
- add w21,w21,w25 // d+=h
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w14,w14,w4
- add w25,w25,w17 // h+=Sigma0(a)
- add w14,w14,w3
- ldr w3,[sp,#0]
- str w6,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- ror w5,w0,#7
- and w17,w22,w21
- ror w4,w13,#17
- bic w28,w23,w21
- ror w6,w25,#2
- add w24,w24,w14 // h+=X[i]
- eor w16,w16,w21,ror#11
- eor w5,w5,w0,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w21,ror#25 // Sigma1(e)
- eor w6,w6,w25,ror#13
- add w24,w24,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w4,w4,w13,ror#19
- eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
- add w24,w24,w16 // h+=Sigma1(e)
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w6,w25,ror#22 // Sigma0(a)
- eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
- add w15,w15,w8
- add w20,w20,w24 // d+=h
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w15,w15,w5
- add w24,w24,w17 // h+=Sigma0(a)
- add w15,w15,w4
- ldr w4,[sp,#4]
- str w7,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- ror w6,w1,#7
- and w17,w21,w20
- ror w5,w14,#17
- bic w19,w22,w20
- ror w7,w24,#2
- add w23,w23,w15 // h+=X[i]
- eor w16,w16,w20,ror#11
- eor w6,w6,w1,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w20,ror#25 // Sigma1(e)
- eor w7,w7,w24,ror#13
- add w23,w23,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w5,w5,w14,ror#19
- eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
- add w23,w23,w16 // h+=Sigma1(e)
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w7,w24,ror#22 // Sigma0(a)
- eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
- add w0,w0,w9
- add w27,w27,w23 // d+=h
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w0,w0,w6
- add w23,w23,w17 // h+=Sigma0(a)
- add w0,w0,w5
- ldr w5,[sp,#8]
- str w8,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- ror w7,w2,#7
- and w17,w20,w27
- ror w6,w15,#17
- bic w28,w21,w27
- ror w8,w23,#2
- add w22,w22,w0 // h+=X[i]
- eor w16,w16,w27,ror#11
- eor w7,w7,w2,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w27,ror#25 // Sigma1(e)
- eor w8,w8,w23,ror#13
- add w22,w22,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w6,w6,w15,ror#19
- eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
- add w22,w22,w16 // h+=Sigma1(e)
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w8,w23,ror#22 // Sigma0(a)
- eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
- add w1,w1,w10
- add w26,w26,w22 // d+=h
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w1,w1,w7
- add w22,w22,w17 // h+=Sigma0(a)
- add w1,w1,w6
- ldr w6,[sp,#12]
- str w9,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- ror w8,w3,#7
- and w17,w27,w26
- ror w7,w0,#17
- bic w19,w20,w26
- ror w9,w22,#2
- add w21,w21,w1 // h+=X[i]
- eor w16,w16,w26,ror#11
- eor w8,w8,w3,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w26,ror#25 // Sigma1(e)
- eor w9,w9,w22,ror#13
- add w21,w21,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w7,w7,w0,ror#19
- eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
- add w21,w21,w16 // h+=Sigma1(e)
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w9,w22,ror#22 // Sigma0(a)
- eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
- add w2,w2,w11
- add w25,w25,w21 // d+=h
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w2,w2,w8
- add w21,w21,w17 // h+=Sigma0(a)
- add w2,w2,w7
- ldr w7,[sp,#0]
- str w10,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w9,w4,#7
- and w17,w26,w25
- ror w8,w1,#17
- bic w28,w27,w25
- ror w10,w21,#2
- add w20,w20,w2 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w9,w9,w4,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w10,w10,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w8,w8,w1,ror#19
- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w10,w21,ror#22 // Sigma0(a)
- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
- add w3,w3,w12
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w3,w3,w9
- add w20,w20,w17 // h+=Sigma0(a)
- add w3,w3,w8
- cbnz w19,.Loop_16_xx
-
- ldp x0,x2,[x29,#96]
- ldr x1,[x29,#112]
- sub x30,x30,#260 // rewind
-
- ldp w3,w4,[x0]
- ldp w5,w6,[x0,#2*4]
- add x1,x1,#14*4 // advance input pointer
- ldp w7,w8,[x0,#4*4]
- add w20,w20,w3
- ldp w9,w10,[x0,#6*4]
- add w21,w21,w4
- add w22,w22,w5
- add w23,w23,w6
- stp w20,w21,[x0]
- add w24,w24,w7
- add w25,w25,w8
- stp w22,w23,[x0,#2*4]
- add w26,w26,w9
- add w27,w27,w10
- cmp x1,x2
- stp w24,w25,[x0,#4*4]
- stp w26,w27,[x0,#6*4]
- b.ne .Loop
-
- ldp x19,x20,[x29,#16]
- add sp,sp,#4*4
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#128
- ret
-.size sha256_block_data_order,.-sha256_block_data_order
-
-.align 6
-.type .LK256,%object
-.LK256:
- .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
- .long 0 //terminator
-.size .LK256,.-.LK256
-#ifndef __KERNEL__
-.align 3
-.LOPENSSL_armcap_P:
-# ifdef __ILP32__
- .long OPENSSL_armcap_P-.
-# else
- .quad OPENSSL_armcap_P-.
-# endif
-#endif
-.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#ifndef __KERNEL__
-.type sha256_block_armv8,%function
-.align 6
-sha256_block_armv8:
-.Lv8_entry:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
-
- ld1 {v0.4s,v1.4s},[x0]
- adr x3,.LK256
-
-.Loop_hw:
- ld1 {v4.16b-v7.16b},[x1],#64
- sub x2,x2,#1
- ld1 {v16.4s},[x3],#16
- rev32 v4.16b,v4.16b
- rev32 v5.16b,v5.16b
- rev32 v6.16b,v6.16b
- rev32 v7.16b,v7.16b
- orr v18.16b,v0.16b,v0.16b // offload
- orr v19.16b,v1.16b,v1.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
-
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
-
- ld1 {v17.4s},[x3]
- add v16.4s,v16.4s,v6.4s
- sub x3,x3,#64*4-16 // rewind
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
-
- add v17.4s,v17.4s,v7.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
-
- add v0.4s,v0.4s,v18.4s
- add v1.4s,v1.4s,v19.4s
-
- cbnz x2,.Loop_hw
-
- st1 {v0.4s,v1.4s},[x0]
-
- ldr x29,[sp],#16
- ret
-.size sha256_block_armv8,.-sha256_block_armv8
-#endif
-#ifdef __KERNEL__
-.globl sha256_block_neon
-#endif
-.type sha256_block_neon,%function
-.align 4
-sha256_block_neon:
-.Lneon_entry:
- stp x29, x30, [sp, #-16]!
- mov x29, sp
- sub sp,sp,#16*4
-
- adr x16,.LK256
- add x2,x1,x2,lsl#6 // len to point at the end of inp
-
- ld1 {v0.16b},[x1], #16
- ld1 {v1.16b},[x1], #16
- ld1 {v2.16b},[x1], #16
- ld1 {v3.16b},[x1], #16
- ld1 {v4.4s},[x16], #16
- ld1 {v5.4s},[x16], #16
- ld1 {v6.4s},[x16], #16
- ld1 {v7.4s},[x16], #16
- rev32 v0.16b,v0.16b // yes, even on
- rev32 v1.16b,v1.16b // big-endian
- rev32 v2.16b,v2.16b
- rev32 v3.16b,v3.16b
- mov x17,sp
- add v4.4s,v4.4s,v0.4s
- add v5.4s,v5.4s,v1.4s
- add v6.4s,v6.4s,v2.4s
- st1 {v4.4s-v5.4s},[x17], #32
- add v7.4s,v7.4s,v3.4s
- st1 {v6.4s-v7.4s},[x17]
- sub x17,x17,#32
-
- ldp w3,w4,[x0]
- ldp w5,w6,[x0,#8]
- ldp w7,w8,[x0,#16]
- ldp w9,w10,[x0,#24]
- ldr w12,[sp,#0]
- mov w13,wzr
- eor w14,w4,w5
- mov w15,wzr
- b .L_00_48
-
-.align 4
-.L_00_48:
- ext v4.16b,v0.16b,v1.16b,#4
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- bic w15,w9,w7
- ext v7.16b,v2.16b,v3.16b,#4
- eor w11,w7,w7,ror#5
- add w3,w3,w13
- mov d19,v3.d[1]
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w3,w3,ror#11
- ushr v5.4s,v4.4s,#3
- add w10,w10,w12
- add v0.4s,v0.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- ushr v7.4s,v4.4s,#18
- add w10,w10,w11
- ldr w12,[sp,#4]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w6,w6,w10
- sli v7.4s,v4.4s,#14
- eor w14,w14,w4
- ushr v16.4s,v19.4s,#17
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- eor v5.16b,v5.16b,v7.16b
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- sli v16.4s,v19.4s,#15
- add w10,w10,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- ushr v7.4s,v19.4s,#19
- add w9,w9,w12
- ror w11,w11,#6
- add v0.4s,v0.4s,v5.4s
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- sli v7.4s,v19.4s,#13
- add w9,w9,w11
- ldr w12,[sp,#8]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- eor v17.16b,v17.16b,v7.16b
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- add v0.4s,v0.4s,v17.4s
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- ushr v18.4s,v0.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v0.4s,#10
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- sli v18.4s,v0.4s,#15
- add w8,w8,w12
- ushr v17.4s,v0.4s,#19
- ror w11,w11,#6
- eor w13,w9,w10
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- sli v17.4s,v0.4s,#13
- ldr w12,[sp,#12]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w4,w4,w8
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w10
- eor v17.16b,v17.16b,v17.16b
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- mov v17.d[1],v19.d[0]
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- add v0.4s,v0.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add v4.4s,v4.4s,v0.4s
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#16]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- ext v4.16b,v1.16b,v2.16b,#4
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- bic w15,w5,w3
- ext v7.16b,v3.16b,v0.16b,#4
- eor w11,w3,w3,ror#5
- add w7,w7,w13
- mov d19,v0.d[1]
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w7,w7,ror#11
- ushr v5.4s,v4.4s,#3
- add w6,w6,w12
- add v1.4s,v1.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- ushr v7.4s,v4.4s,#18
- add w6,w6,w11
- ldr w12,[sp,#20]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w10,w10,w6
- sli v7.4s,v4.4s,#14
- eor w14,w14,w8
- ushr v16.4s,v19.4s,#17
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- eor v5.16b,v5.16b,v7.16b
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- sli v16.4s,v19.4s,#15
- add w6,w6,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- ushr v7.4s,v19.4s,#19
- add w5,w5,w12
- ror w11,w11,#6
- add v1.4s,v1.4s,v5.4s
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- sli v7.4s,v19.4s,#13
- add w5,w5,w11
- ldr w12,[sp,#24]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- eor v17.16b,v17.16b,v7.16b
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- add v1.4s,v1.4s,v17.4s
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- ushr v18.4s,v1.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v1.4s,#10
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- sli v18.4s,v1.4s,#15
- add w4,w4,w12
- ushr v17.4s,v1.4s,#19
- ror w11,w11,#6
- eor w13,w5,w6
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- sli v17.4s,v1.4s,#13
- ldr w12,[sp,#28]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w8,w8,w4
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w6
- eor v17.16b,v17.16b,v17.16b
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- mov v17.d[1],v19.d[0]
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- add v1.4s,v1.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add v4.4s,v4.4s,v1.4s
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[sp,#32]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- ext v4.16b,v2.16b,v3.16b,#4
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- bic w15,w9,w7
- ext v7.16b,v0.16b,v1.16b,#4
- eor w11,w7,w7,ror#5
- add w3,w3,w13
- mov d19,v1.d[1]
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w3,w3,ror#11
- ushr v5.4s,v4.4s,#3
- add w10,w10,w12
- add v2.4s,v2.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- ushr v7.4s,v4.4s,#18
- add w10,w10,w11
- ldr w12,[sp,#36]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w6,w6,w10
- sli v7.4s,v4.4s,#14
- eor w14,w14,w4
- ushr v16.4s,v19.4s,#17
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- eor v5.16b,v5.16b,v7.16b
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- sli v16.4s,v19.4s,#15
- add w10,w10,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- ushr v7.4s,v19.4s,#19
- add w9,w9,w12
- ror w11,w11,#6
- add v2.4s,v2.4s,v5.4s
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- sli v7.4s,v19.4s,#13
- add w9,w9,w11
- ldr w12,[sp,#40]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- eor v17.16b,v17.16b,v7.16b
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- add v2.4s,v2.4s,v17.4s
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- ushr v18.4s,v2.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v2.4s,#10
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- sli v18.4s,v2.4s,#15
- add w8,w8,w12
- ushr v17.4s,v2.4s,#19
- ror w11,w11,#6
- eor w13,w9,w10
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- sli v17.4s,v2.4s,#13
- ldr w12,[sp,#44]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w4,w4,w8
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w10
- eor v17.16b,v17.16b,v17.16b
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- mov v17.d[1],v19.d[0]
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- add v2.4s,v2.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add v4.4s,v4.4s,v2.4s
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#48]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- ext v4.16b,v3.16b,v0.16b,#4
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- bic w15,w5,w3
- ext v7.16b,v1.16b,v2.16b,#4
- eor w11,w3,w3,ror#5
- add w7,w7,w13
- mov d19,v2.d[1]
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w7,w7,ror#11
- ushr v5.4s,v4.4s,#3
- add w6,w6,w12
- add v3.4s,v3.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- ushr v7.4s,v4.4s,#18
- add w6,w6,w11
- ldr w12,[sp,#52]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w10,w10,w6
- sli v7.4s,v4.4s,#14
- eor w14,w14,w8
- ushr v16.4s,v19.4s,#17
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- eor v5.16b,v5.16b,v7.16b
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- sli v16.4s,v19.4s,#15
- add w6,w6,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- ushr v7.4s,v19.4s,#19
- add w5,w5,w12
- ror w11,w11,#6
- add v3.4s,v3.4s,v5.4s
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- sli v7.4s,v19.4s,#13
- add w5,w5,w11
- ldr w12,[sp,#56]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- eor v17.16b,v17.16b,v7.16b
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- add v3.4s,v3.4s,v17.4s
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- ushr v18.4s,v3.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v3.4s,#10
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- sli v18.4s,v3.4s,#15
- add w4,w4,w12
- ushr v17.4s,v3.4s,#19
- ror w11,w11,#6
- eor w13,w5,w6
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- sli v17.4s,v3.4s,#13
- ldr w12,[sp,#60]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w8,w8,w4
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w6
- eor v17.16b,v17.16b,v17.16b
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- mov v17.d[1],v19.d[0]
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- add v3.4s,v3.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add v4.4s,v4.4s,v3.4s
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[x16]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- cmp w12,#0 // check for K256 terminator
- ldr w12,[sp,#0]
- sub x17,x17,#64
- bne .L_00_48
-
- sub x16,x16,#256 // rewind x16
- cmp x1,x2
- mov x17, #64
- csel x17, x17, xzr, eq
- sub x1,x1,x17 // avoid SEGV
- mov x17,sp
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- ld1 {v0.16b},[x1],#16
- bic w15,w9,w7
- eor w11,w7,w7,ror#5
- ld1 {v4.4s},[x16],#16
- add w3,w3,w13
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- eor w15,w3,w3,ror#11
- rev32 v0.16b,v0.16b
- add w10,w10,w12
- ror w11,w11,#6
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- add v4.4s,v4.4s,v0.4s
- add w10,w10,w11
- ldr w12,[sp,#4]
- and w14,w14,w13
- ror w15,w15,#2
- add w6,w6,w10
- eor w14,w14,w4
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- add w10,w10,w14
- orr w12,w12,w15
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- add w9,w9,w12
- ror w11,w11,#6
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- add w9,w9,w11
- ldr w12,[sp,#8]
- and w13,w13,w14
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- orr w12,w12,w15
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- add w8,w8,w12
- ror w11,w11,#6
- eor w13,w9,w10
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- ldr w12,[sp,#12]
- and w14,w14,w13
- ror w15,w15,#2
- add w4,w4,w8
- eor w14,w14,w10
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#16]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- ld1 {v1.16b},[x1],#16
- bic w15,w5,w3
- eor w11,w3,w3,ror#5
- ld1 {v4.4s},[x16],#16
- add w7,w7,w13
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- eor w15,w7,w7,ror#11
- rev32 v1.16b,v1.16b
- add w6,w6,w12
- ror w11,w11,#6
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- add v4.4s,v4.4s,v1.4s
- add w6,w6,w11
- ldr w12,[sp,#20]
- and w14,w14,w13
- ror w15,w15,#2
- add w10,w10,w6
- eor w14,w14,w8
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- add w6,w6,w14
- orr w12,w12,w15
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- add w5,w5,w12
- ror w11,w11,#6
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- add w5,w5,w11
- ldr w12,[sp,#24]
- and w13,w13,w14
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- orr w12,w12,w15
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- add w4,w4,w12
- ror w11,w11,#6
- eor w13,w5,w6
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- ldr w12,[sp,#28]
- and w14,w14,w13
- ror w15,w15,#2
- add w8,w8,w4
- eor w14,w14,w6
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[sp,#32]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- ld1 {v2.16b},[x1],#16
- bic w15,w9,w7
- eor w11,w7,w7,ror#5
- ld1 {v4.4s},[x16],#16
- add w3,w3,w13
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- eor w15,w3,w3,ror#11
- rev32 v2.16b,v2.16b
- add w10,w10,w12
- ror w11,w11,#6
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- add v4.4s,v4.4s,v2.4s
- add w10,w10,w11
- ldr w12,[sp,#36]
- and w14,w14,w13
- ror w15,w15,#2
- add w6,w6,w10
- eor w14,w14,w4
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- add w10,w10,w14
- orr w12,w12,w15
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- add w9,w9,w12
- ror w11,w11,#6
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- add w9,w9,w11
- ldr w12,[sp,#40]
- and w13,w13,w14
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- orr w12,w12,w15
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- add w8,w8,w12
- ror w11,w11,#6
- eor w13,w9,w10
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- ldr w12,[sp,#44]
- and w14,w14,w13
- ror w15,w15,#2
- add w4,w4,w8
- eor w14,w14,w10
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#48]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- ld1 {v3.16b},[x1],#16
- bic w15,w5,w3
- eor w11,w3,w3,ror#5
- ld1 {v4.4s},[x16],#16
- add w7,w7,w13
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- eor w15,w7,w7,ror#11
- rev32 v3.16b,v3.16b
- add w6,w6,w12
- ror w11,w11,#6
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- add v4.4s,v4.4s,v3.4s
- add w6,w6,w11
- ldr w12,[sp,#52]
- and w14,w14,w13
- ror w15,w15,#2
- add w10,w10,w6
- eor w14,w14,w8
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- add w6,w6,w14
- orr w12,w12,w15
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- add w5,w5,w12
- ror w11,w11,#6
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- add w5,w5,w11
- ldr w12,[sp,#56]
- and w13,w13,w14
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- orr w12,w12,w15
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- add w4,w4,w12
- ror w11,w11,#6
- eor w13,w5,w6
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- ldr w12,[sp,#60]
- and w14,w14,w13
- ror w15,w15,#2
- add w8,w8,w4
- eor w14,w14,w6
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- add w3,w3,w15 // h+=Sigma0(a) from the past
- ldp w11,w12,[x0,#0]
- add w3,w3,w13 // h+=Maj(a,b,c) from the past
- ldp w13,w14,[x0,#8]
- add w3,w3,w11 // accumulate
- add w4,w4,w12
- ldp w11,w12,[x0,#16]
- add w5,w5,w13
- add w6,w6,w14
- ldp w13,w14,[x0,#24]
- add w7,w7,w11
- add w8,w8,w12
- ldr w12,[sp,#0]
- stp w3,w4,[x0,#0]
- add w9,w9,w13
- mov w13,wzr
- stp w5,w6,[x0,#8]
- add w10,w10,w14
- stp w7,w8,[x0,#16]
- eor w14,w4,w5
- stp w9,w10,[x0,#24]
- mov w15,wzr
- mov x17,sp
- b.ne .L_00_48
-
- ldr x29,[x29]
- add sp,sp,#16*4+16
- ret
-.size sha256_block_neon,.-sha256_block_neon
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm64/crypto/sha512-core.S_shipped b/arch/arm64/crypto/sha512-core.S_shipped
deleted file mode 100644
index e063a6106720..000000000000
--- a/arch/arm64/crypto/sha512-core.S_shipped
+++ /dev/null
@@ -1,1093 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// This code is taken from the OpenSSL project but the author (Andy Polyakov)
-// has relicensed it under the GPLv2. Therefore this program is free software;
-// you can redistribute it and/or modify it under the terms of the GNU General
-// Public License version 2 as published by the Free Software Foundation.
-//
-// The original headers, including the original license headers, are
-// included below for completeness.
-
-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the OpenSSL license (the "License"). You may not use
-// this file except in compliance with the License. You can obtain a copy
-// in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-
-// ====================================================================
-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// project. The module is, however, dual licensed under OpenSSL and
-// CRYPTOGAMS licenses depending on where you obtain it. For further
-// details see http://www.openssl.org/~appro/cryptogams/.
-// ====================================================================
-//
-// SHA256/512 for ARMv8.
-//
-// Performance in cycles per processed byte and improvement coefficient
-// over code generated with "default" compiler:
-//
-// SHA256-hw SHA256(*) SHA512
-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-//
-// (*) Software SHA256 results are of lesser relevance, presented
-// mostly for informational purposes.
-// (**) The result is a trade-off: it's possible to improve it by
-// 10% (or by 1 cycle per round), but at the cost of 20% loss
-// on Cortex-A53 (or by 4 cycles per round).
-// (***) Super-impressive coefficients over gcc-generated code are
-// indication of some compiler "pathology", most notably code
-// generated with -mgeneral-regs-only is significanty faster
-// and the gap is only 40-90%.
-//
-// October 2016.
-//
-// Originally it was reckoned that it makes no sense to implement NEON
-// version of SHA256 for 64-bit processors. This is because performance
-// improvement on most wide-spread Cortex-A5x processors was observed
-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
-// observed that 32-bit NEON SHA256 performs significantly better than
-// 64-bit scalar version on *some* of the more recent processors. As
-// result 64-bit NEON version of SHA256 was added to provide best
-// all-round performance. For example it executes ~30% faster on X-Gene
-// and Mongoose. [For reference, NEON version of SHA512 is bound to
-// deliver much less improvement, likely *negative* on Cortex-A5x.
-// Which is why NEON support is limited to SHA256.]
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#endif
-
-.text
-
-.extern OPENSSL_armcap_P
-.globl sha512_block_data_order
-.type sha512_block_data_order,%function
-.align 6
-sha512_block_data_order:
- stp x29,x30,[sp,#-128]!
- add x29,sp,#0
-
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#4*8
-
- ldp x20,x21,[x0] // load context
- ldp x22,x23,[x0,#2*8]
- ldp x24,x25,[x0,#4*8]
- add x2,x1,x2,lsl#7 // end of input
- ldp x26,x27,[x0,#6*8]
- adr x30,.LK512
- stp x0,x2,[x29,#96]
-
-.Loop:
- ldp x3,x4,[x1],#2*8
- ldr x19,[x30],#8 // *K++
- eor x28,x21,x22 // magic seed
- str x1,[x29,#112]
-#ifndef __AARCH64EB__
- rev x3,x3 // 0
-#endif
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- eor x6,x24,x24,ror#23
- and x17,x25,x24
- bic x19,x26,x24
- add x27,x27,x3 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x6,ror#18 // Sigma1(e)
- ror x6,x20,#28
- add x27,x27,x17 // h+=Ch(e,f,g)
- eor x17,x20,x20,ror#5
- add x27,x27,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x23,x23,x27 // d+=h
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x6,x17,ror#34 // Sigma0(a)
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x27,x27,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x4,x4 // 1
-#endif
- ldp x5,x6,[x1],#2*8
- add x27,x27,x17 // h+=Sigma0(a)
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- eor x7,x23,x23,ror#23
- and x17,x24,x23
- bic x28,x25,x23
- add x26,x26,x4 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x7,ror#18 // Sigma1(e)
- ror x7,x27,#28
- add x26,x26,x17 // h+=Ch(e,f,g)
- eor x17,x27,x27,ror#5
- add x26,x26,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x22,x22,x26 // d+=h
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x7,x17,ror#34 // Sigma0(a)
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x26,x26,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x5,x5 // 2
-#endif
- add x26,x26,x17 // h+=Sigma0(a)
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- eor x8,x22,x22,ror#23
- and x17,x23,x22
- bic x19,x24,x22
- add x25,x25,x5 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x8,ror#18 // Sigma1(e)
- ror x8,x26,#28
- add x25,x25,x17 // h+=Ch(e,f,g)
- eor x17,x26,x26,ror#5
- add x25,x25,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x21,x21,x25 // d+=h
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x8,x17,ror#34 // Sigma0(a)
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x25,x25,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x6,x6 // 3
-#endif
- ldp x7,x8,[x1],#2*8
- add x25,x25,x17 // h+=Sigma0(a)
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- eor x9,x21,x21,ror#23
- and x17,x22,x21
- bic x28,x23,x21
- add x24,x24,x6 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x9,ror#18 // Sigma1(e)
- ror x9,x25,#28
- add x24,x24,x17 // h+=Ch(e,f,g)
- eor x17,x25,x25,ror#5
- add x24,x24,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x20,x20,x24 // d+=h
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x9,x17,ror#34 // Sigma0(a)
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x24,x24,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x7,x7 // 4
-#endif
- add x24,x24,x17 // h+=Sigma0(a)
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- eor x10,x20,x20,ror#23
- and x17,x21,x20
- bic x19,x22,x20
- add x23,x23,x7 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x10,ror#18 // Sigma1(e)
- ror x10,x24,#28
- add x23,x23,x17 // h+=Ch(e,f,g)
- eor x17,x24,x24,ror#5
- add x23,x23,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x27,x27,x23 // d+=h
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x10,x17,ror#34 // Sigma0(a)
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x23,x23,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x8,x8 // 5
-#endif
- ldp x9,x10,[x1],#2*8
- add x23,x23,x17 // h+=Sigma0(a)
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- eor x11,x27,x27,ror#23
- and x17,x20,x27
- bic x28,x21,x27
- add x22,x22,x8 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x11,ror#18 // Sigma1(e)
- ror x11,x23,#28
- add x22,x22,x17 // h+=Ch(e,f,g)
- eor x17,x23,x23,ror#5
- add x22,x22,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x26,x26,x22 // d+=h
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x11,x17,ror#34 // Sigma0(a)
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x22,x22,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x9,x9 // 6
-#endif
- add x22,x22,x17 // h+=Sigma0(a)
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- eor x12,x26,x26,ror#23
- and x17,x27,x26
- bic x19,x20,x26
- add x21,x21,x9 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x12,ror#18 // Sigma1(e)
- ror x12,x22,#28
- add x21,x21,x17 // h+=Ch(e,f,g)
- eor x17,x22,x22,ror#5
- add x21,x21,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x25,x25,x21 // d+=h
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x12,x17,ror#34 // Sigma0(a)
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x21,x21,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x10,x10 // 7
-#endif
- ldp x11,x12,[x1],#2*8
- add x21,x21,x17 // h+=Sigma0(a)
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- eor x13,x25,x25,ror#23
- and x17,x26,x25
- bic x28,x27,x25
- add x20,x20,x10 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x13,ror#18 // Sigma1(e)
- ror x13,x21,#28
- add x20,x20,x17 // h+=Ch(e,f,g)
- eor x17,x21,x21,ror#5
- add x20,x20,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x24,x24,x20 // d+=h
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x13,x17,ror#34 // Sigma0(a)
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x20,x20,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x11,x11 // 8
-#endif
- add x20,x20,x17 // h+=Sigma0(a)
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- eor x14,x24,x24,ror#23
- and x17,x25,x24
- bic x19,x26,x24
- add x27,x27,x11 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x14,ror#18 // Sigma1(e)
- ror x14,x20,#28
- add x27,x27,x17 // h+=Ch(e,f,g)
- eor x17,x20,x20,ror#5
- add x27,x27,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x23,x23,x27 // d+=h
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x14,x17,ror#34 // Sigma0(a)
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x27,x27,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x12,x12 // 9
-#endif
- ldp x13,x14,[x1],#2*8
- add x27,x27,x17 // h+=Sigma0(a)
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- eor x15,x23,x23,ror#23
- and x17,x24,x23
- bic x28,x25,x23
- add x26,x26,x12 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x15,ror#18 // Sigma1(e)
- ror x15,x27,#28
- add x26,x26,x17 // h+=Ch(e,f,g)
- eor x17,x27,x27,ror#5
- add x26,x26,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x22,x22,x26 // d+=h
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x15,x17,ror#34 // Sigma0(a)
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x26,x26,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x13,x13 // 10
-#endif
- add x26,x26,x17 // h+=Sigma0(a)
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- eor x0,x22,x22,ror#23
- and x17,x23,x22
- bic x19,x24,x22
- add x25,x25,x13 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x0,ror#18 // Sigma1(e)
- ror x0,x26,#28
- add x25,x25,x17 // h+=Ch(e,f,g)
- eor x17,x26,x26,ror#5
- add x25,x25,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x21,x21,x25 // d+=h
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x0,x17,ror#34 // Sigma0(a)
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x25,x25,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x14,x14 // 11
-#endif
- ldp x15,x0,[x1],#2*8
- add x25,x25,x17 // h+=Sigma0(a)
- str x6,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- eor x6,x21,x21,ror#23
- and x17,x22,x21
- bic x28,x23,x21
- add x24,x24,x14 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x6,ror#18 // Sigma1(e)
- ror x6,x25,#28
- add x24,x24,x17 // h+=Ch(e,f,g)
- eor x17,x25,x25,ror#5
- add x24,x24,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x20,x20,x24 // d+=h
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x6,x17,ror#34 // Sigma0(a)
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x24,x24,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x15,x15 // 12
-#endif
- add x24,x24,x17 // h+=Sigma0(a)
- str x7,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- eor x7,x20,x20,ror#23
- and x17,x21,x20
- bic x19,x22,x20
- add x23,x23,x15 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x7,ror#18 // Sigma1(e)
- ror x7,x24,#28
- add x23,x23,x17 // h+=Ch(e,f,g)
- eor x17,x24,x24,ror#5
- add x23,x23,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x27,x27,x23 // d+=h
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x7,x17,ror#34 // Sigma0(a)
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x23,x23,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x0,x0 // 13
-#endif
- ldp x1,x2,[x1]
- add x23,x23,x17 // h+=Sigma0(a)
- str x8,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- eor x8,x27,x27,ror#23
- and x17,x20,x27
- bic x28,x21,x27
- add x22,x22,x0 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x8,ror#18 // Sigma1(e)
- ror x8,x23,#28
- add x22,x22,x17 // h+=Ch(e,f,g)
- eor x17,x23,x23,ror#5
- add x22,x22,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x26,x26,x22 // d+=h
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x8,x17,ror#34 // Sigma0(a)
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x22,x22,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x1,x1 // 14
-#endif
- ldr x6,[sp,#24]
- add x22,x22,x17 // h+=Sigma0(a)
- str x9,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- eor x9,x26,x26,ror#23
- and x17,x27,x26
- bic x19,x20,x26
- add x21,x21,x1 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x9,ror#18 // Sigma1(e)
- ror x9,x22,#28
- add x21,x21,x17 // h+=Ch(e,f,g)
- eor x17,x22,x22,ror#5
- add x21,x21,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x25,x25,x21 // d+=h
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x9,x17,ror#34 // Sigma0(a)
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x21,x21,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x2,x2 // 15
-#endif
- ldr x7,[sp,#0]
- add x21,x21,x17 // h+=Sigma0(a)
- str x10,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x9,x4,#1
- and x17,x26,x25
- ror x8,x1,#19
- bic x28,x27,x25
- ror x10,x21,#28
- add x20,x20,x2 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x9,x9,x4,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x10,x10,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x8,x8,x1,ror#61
- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x10,x21,ror#39 // Sigma0(a)
- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
- add x3,x3,x12
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x3,x3,x9
- add x20,x20,x17 // h+=Sigma0(a)
- add x3,x3,x8
-.Loop_16_xx:
- ldr x8,[sp,#8]
- str x11,[sp,#0]
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- ror x10,x5,#1
- and x17,x25,x24
- ror x9,x2,#19
- bic x19,x26,x24
- ror x11,x20,#28
- add x27,x27,x3 // h+=X[i]
- eor x16,x16,x24,ror#18
- eor x10,x10,x5,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x24,ror#41 // Sigma1(e)
- eor x11,x11,x20,ror#34
- add x27,x27,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x9,x9,x2,ror#61
- eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
- add x27,x27,x16 // h+=Sigma1(e)
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x11,x20,ror#39 // Sigma0(a)
- eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
- add x4,x4,x13
- add x23,x23,x27 // d+=h
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x4,x4,x10
- add x27,x27,x17 // h+=Sigma0(a)
- add x4,x4,x9
- ldr x9,[sp,#16]
- str x12,[sp,#8]
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- ror x11,x6,#1
- and x17,x24,x23
- ror x10,x3,#19
- bic x28,x25,x23
- ror x12,x27,#28
- add x26,x26,x4 // h+=X[i]
- eor x16,x16,x23,ror#18
- eor x11,x11,x6,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x23,ror#41 // Sigma1(e)
- eor x12,x12,x27,ror#34
- add x26,x26,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x10,x10,x3,ror#61
- eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
- add x26,x26,x16 // h+=Sigma1(e)
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x12,x27,ror#39 // Sigma0(a)
- eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
- add x5,x5,x14
- add x22,x22,x26 // d+=h
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x5,x5,x11
- add x26,x26,x17 // h+=Sigma0(a)
- add x5,x5,x10
- ldr x10,[sp,#24]
- str x13,[sp,#16]
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- ror x12,x7,#1
- and x17,x23,x22
- ror x11,x4,#19
- bic x19,x24,x22
- ror x13,x26,#28
- add x25,x25,x5 // h+=X[i]
- eor x16,x16,x22,ror#18
- eor x12,x12,x7,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x22,ror#41 // Sigma1(e)
- eor x13,x13,x26,ror#34
- add x25,x25,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x11,x11,x4,ror#61
- eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
- add x25,x25,x16 // h+=Sigma1(e)
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x13,x26,ror#39 // Sigma0(a)
- eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
- add x6,x6,x15
- add x21,x21,x25 // d+=h
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x6,x6,x12
- add x25,x25,x17 // h+=Sigma0(a)
- add x6,x6,x11
- ldr x11,[sp,#0]
- str x14,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- ror x13,x8,#1
- and x17,x22,x21
- ror x12,x5,#19
- bic x28,x23,x21
- ror x14,x25,#28
- add x24,x24,x6 // h+=X[i]
- eor x16,x16,x21,ror#18
- eor x13,x13,x8,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x21,ror#41 // Sigma1(e)
- eor x14,x14,x25,ror#34
- add x24,x24,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x12,x12,x5,ror#61
- eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
- add x24,x24,x16 // h+=Sigma1(e)
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x14,x25,ror#39 // Sigma0(a)
- eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
- add x7,x7,x0
- add x20,x20,x24 // d+=h
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x7,x7,x13
- add x24,x24,x17 // h+=Sigma0(a)
- add x7,x7,x12
- ldr x12,[sp,#8]
- str x15,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- ror x14,x9,#1
- and x17,x21,x20
- ror x13,x6,#19
- bic x19,x22,x20
- ror x15,x24,#28
- add x23,x23,x7 // h+=X[i]
- eor x16,x16,x20,ror#18
- eor x14,x14,x9,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x20,ror#41 // Sigma1(e)
- eor x15,x15,x24,ror#34
- add x23,x23,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x13,x13,x6,ror#61
- eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
- add x23,x23,x16 // h+=Sigma1(e)
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x15,x24,ror#39 // Sigma0(a)
- eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
- add x8,x8,x1
- add x27,x27,x23 // d+=h
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x8,x8,x14
- add x23,x23,x17 // h+=Sigma0(a)
- add x8,x8,x13
- ldr x13,[sp,#16]
- str x0,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- ror x15,x10,#1
- and x17,x20,x27
- ror x14,x7,#19
- bic x28,x21,x27
- ror x0,x23,#28
- add x22,x22,x8 // h+=X[i]
- eor x16,x16,x27,ror#18
- eor x15,x15,x10,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x27,ror#41 // Sigma1(e)
- eor x0,x0,x23,ror#34
- add x22,x22,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x14,x14,x7,ror#61
- eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
- add x22,x22,x16 // h+=Sigma1(e)
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x0,x23,ror#39 // Sigma0(a)
- eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
- add x9,x9,x2
- add x26,x26,x22 // d+=h
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x9,x9,x15
- add x22,x22,x17 // h+=Sigma0(a)
- add x9,x9,x14
- ldr x14,[sp,#24]
- str x1,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- ror x0,x11,#1
- and x17,x27,x26
- ror x15,x8,#19
- bic x19,x20,x26
- ror x1,x22,#28
- add x21,x21,x9 // h+=X[i]
- eor x16,x16,x26,ror#18
- eor x0,x0,x11,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x26,ror#41 // Sigma1(e)
- eor x1,x1,x22,ror#34
- add x21,x21,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x15,x15,x8,ror#61
- eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
- add x21,x21,x16 // h+=Sigma1(e)
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x1,x22,ror#39 // Sigma0(a)
- eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
- add x10,x10,x3
- add x25,x25,x21 // d+=h
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x10,x10,x0
- add x21,x21,x17 // h+=Sigma0(a)
- add x10,x10,x15
- ldr x15,[sp,#0]
- str x2,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x1,x12,#1
- and x17,x26,x25
- ror x0,x9,#19
- bic x28,x27,x25
- ror x2,x21,#28
- add x20,x20,x10 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x1,x1,x12,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x2,x2,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x0,x0,x9,ror#61
- eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x2,x21,ror#39 // Sigma0(a)
- eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
- add x11,x11,x4
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x11,x11,x1
- add x20,x20,x17 // h+=Sigma0(a)
- add x11,x11,x0
- ldr x0,[sp,#8]
- str x3,[sp,#0]
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- ror x2,x13,#1
- and x17,x25,x24
- ror x1,x10,#19
- bic x19,x26,x24
- ror x3,x20,#28
- add x27,x27,x11 // h+=X[i]
- eor x16,x16,x24,ror#18
- eor x2,x2,x13,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x24,ror#41 // Sigma1(e)
- eor x3,x3,x20,ror#34
- add x27,x27,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x1,x1,x10,ror#61
- eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
- add x27,x27,x16 // h+=Sigma1(e)
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x3,x20,ror#39 // Sigma0(a)
- eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
- add x12,x12,x5
- add x23,x23,x27 // d+=h
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x12,x12,x2
- add x27,x27,x17 // h+=Sigma0(a)
- add x12,x12,x1
- ldr x1,[sp,#16]
- str x4,[sp,#8]
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- ror x3,x14,#1
- and x17,x24,x23
- ror x2,x11,#19
- bic x28,x25,x23
- ror x4,x27,#28
- add x26,x26,x12 // h+=X[i]
- eor x16,x16,x23,ror#18
- eor x3,x3,x14,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x23,ror#41 // Sigma1(e)
- eor x4,x4,x27,ror#34
- add x26,x26,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x2,x2,x11,ror#61
- eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
- add x26,x26,x16 // h+=Sigma1(e)
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x4,x27,ror#39 // Sigma0(a)
- eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
- add x13,x13,x6
- add x22,x22,x26 // d+=h
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x13,x13,x3
- add x26,x26,x17 // h+=Sigma0(a)
- add x13,x13,x2
- ldr x2,[sp,#24]
- str x5,[sp,#16]
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- ror x4,x15,#1
- and x17,x23,x22
- ror x3,x12,#19
- bic x19,x24,x22
- ror x5,x26,#28
- add x25,x25,x13 // h+=X[i]
- eor x16,x16,x22,ror#18
- eor x4,x4,x15,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x22,ror#41 // Sigma1(e)
- eor x5,x5,x26,ror#34
- add x25,x25,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x3,x3,x12,ror#61
- eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
- add x25,x25,x16 // h+=Sigma1(e)
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x5,x26,ror#39 // Sigma0(a)
- eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
- add x14,x14,x7
- add x21,x21,x25 // d+=h
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x14,x14,x4
- add x25,x25,x17 // h+=Sigma0(a)
- add x14,x14,x3
- ldr x3,[sp,#0]
- str x6,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- ror x5,x0,#1
- and x17,x22,x21
- ror x4,x13,#19
- bic x28,x23,x21
- ror x6,x25,#28
- add x24,x24,x14 // h+=X[i]
- eor x16,x16,x21,ror#18
- eor x5,x5,x0,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x21,ror#41 // Sigma1(e)
- eor x6,x6,x25,ror#34
- add x24,x24,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x4,x4,x13,ror#61
- eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
- add x24,x24,x16 // h+=Sigma1(e)
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x6,x25,ror#39 // Sigma0(a)
- eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
- add x15,x15,x8
- add x20,x20,x24 // d+=h
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x15,x15,x5
- add x24,x24,x17 // h+=Sigma0(a)
- add x15,x15,x4
- ldr x4,[sp,#8]
- str x7,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- ror x6,x1,#1
- and x17,x21,x20
- ror x5,x14,#19
- bic x19,x22,x20
- ror x7,x24,#28
- add x23,x23,x15 // h+=X[i]
- eor x16,x16,x20,ror#18
- eor x6,x6,x1,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x20,ror#41 // Sigma1(e)
- eor x7,x7,x24,ror#34
- add x23,x23,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x5,x5,x14,ror#61
- eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
- add x23,x23,x16 // h+=Sigma1(e)
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x7,x24,ror#39 // Sigma0(a)
- eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
- add x0,x0,x9
- add x27,x27,x23 // d+=h
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x0,x0,x6
- add x23,x23,x17 // h+=Sigma0(a)
- add x0,x0,x5
- ldr x5,[sp,#16]
- str x8,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- ror x7,x2,#1
- and x17,x20,x27
- ror x6,x15,#19
- bic x28,x21,x27
- ror x8,x23,#28
- add x22,x22,x0 // h+=X[i]
- eor x16,x16,x27,ror#18
- eor x7,x7,x2,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x27,ror#41 // Sigma1(e)
- eor x8,x8,x23,ror#34
- add x22,x22,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x6,x6,x15,ror#61
- eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
- add x22,x22,x16 // h+=Sigma1(e)
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x8,x23,ror#39 // Sigma0(a)
- eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
- add x1,x1,x10
- add x26,x26,x22 // d+=h
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x1,x1,x7
- add x22,x22,x17 // h+=Sigma0(a)
- add x1,x1,x6
- ldr x6,[sp,#24]
- str x9,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- ror x8,x3,#1
- and x17,x27,x26
- ror x7,x0,#19
- bic x19,x20,x26
- ror x9,x22,#28
- add x21,x21,x1 // h+=X[i]
- eor x16,x16,x26,ror#18
- eor x8,x8,x3,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x26,ror#41 // Sigma1(e)
- eor x9,x9,x22,ror#34
- add x21,x21,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x7,x7,x0,ror#61
- eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
- add x21,x21,x16 // h+=Sigma1(e)
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x9,x22,ror#39 // Sigma0(a)
- eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
- add x2,x2,x11
- add x25,x25,x21 // d+=h
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x2,x2,x8
- add x21,x21,x17 // h+=Sigma0(a)
- add x2,x2,x7
- ldr x7,[sp,#0]
- str x10,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x9,x4,#1
- and x17,x26,x25
- ror x8,x1,#19
- bic x28,x27,x25
- ror x10,x21,#28
- add x20,x20,x2 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x9,x9,x4,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x10,x10,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x8,x8,x1,ror#61
- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x10,x21,ror#39 // Sigma0(a)
- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
- add x3,x3,x12
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x3,x3,x9
- add x20,x20,x17 // h+=Sigma0(a)
- add x3,x3,x8
- cbnz x19,.Loop_16_xx
-
- ldp x0,x2,[x29,#96]
- ldr x1,[x29,#112]
- sub x30,x30,#648 // rewind
-
- ldp x3,x4,[x0]
- ldp x5,x6,[x0,#2*8]
- add x1,x1,#14*8 // advance input pointer
- ldp x7,x8,[x0,#4*8]
- add x20,x20,x3
- ldp x9,x10,[x0,#6*8]
- add x21,x21,x4
- add x22,x22,x5
- add x23,x23,x6
- stp x20,x21,[x0]
- add x24,x24,x7
- add x25,x25,x8
- stp x22,x23,[x0,#2*8]
- add x26,x26,x9
- add x27,x27,x10
- cmp x1,x2
- stp x24,x25,[x0,#4*8]
- stp x26,x27,[x0,#6*8]
- b.ne .Loop
-
- ldp x19,x20,[x29,#16]
- add sp,sp,#4*8
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#128
- ret
-.size sha512_block_data_order,.-sha512_block_data_order
-
-.align 6
-.type .LK512,%object
-.LK512:
- .quad 0x428a2f98d728ae22,0x7137449123ef65cd
- .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
- .quad 0x3956c25bf348b538,0x59f111f1b605d019
- .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
- .quad 0xd807aa98a3030242,0x12835b0145706fbe
- .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
- .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
- .quad 0x9bdc06a725c71235,0xc19bf174cf692694
- .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
- .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
- .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
- .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
- .quad 0x983e5152ee66dfab,0xa831c66d2db43210
- .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
- .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
- .quad 0x06ca6351e003826f,0x142929670a0e6e70
- .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
- .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
- .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
- .quad 0x81c2c92e47edaee6,0x92722c851482353b
- .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
- .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
- .quad 0xd192e819d6ef5218,0xd69906245565a910
- .quad 0xf40e35855771202a,0x106aa07032bbd1b8
- .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
- .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
- .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
- .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
- .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
- .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
- .quad 0x90befffa23631e28,0xa4506cebde82bde9
- .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
- .quad 0xca273eceea26619c,0xd186b8c721c0c207
- .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
- .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
- .quad 0x113f9804bef90dae,0x1b710b35131c471b
- .quad 0x28db77f523047d84,0x32caab7b40c72493
- .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
- .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
- .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
- .quad 0 // terminator
-.size .LK512,.-.LK512
-#ifndef __KERNEL__
-.align 3
-.LOPENSSL_armcap_P:
-# ifdef __ILP32__
- .long OPENSSL_armcap_P-.
-# else
- .quad OPENSSL_armcap_P-.
-# endif
-#endif
-.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index 8a078fc662ac..7e157ab6cd50 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -3,12 +3,10 @@
#define __ASM_ALTERNATIVE_MACROS_H
#include <asm/cpucaps.h>
+#include <asm/insn-def.h>
#define ARM64_CB_PATCH ARM64_NCAPS
-/* A64 instructions are always 32 bits. */
-#define AARCH64_INSN_SIZE 4
-
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
@@ -197,11 +195,6 @@ alternative_endif
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
-.macro user_alt, label, oldinstr, newinstr, cond
-9999: alternative_insn "\oldinstr", "\newinstr", \cond
- _asm_extable 9999b, \label
-.endm
-
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 934b9be582d2..4ad22c3135db 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
#define gic_read_lpir(c) readq_relaxed(c)
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
-#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+#define gic_flush_dcache_to_poc(a,l) \
+ dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
#define gits_read_baser(c) readq_relaxed(c)
#define gits_write_baser(v, c) writeq_relaxed(v, c)
diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h
index 1c9a3a0c5fa5..ec1d9655f885 100644
--- a/arch/arm64/include/asm/asm-prototypes.h
+++ b/arch/arm64/include/asm/asm-prototypes.h
@@ -23,4 +23,10 @@ long long __ashlti3(long long a, int b);
long long __ashrti3(long long a, int b);
long long __lshrti3(long long a, int b);
+/*
+ * This function uses a custom calling convention and cannot be called from C so
+ * this prototype is not entirely accurate.
+ */
+void __hwasan_tag_mismatch(unsigned long addr, unsigned long access_info);
+
#endif /* __ASM_PROTOTYPES_H */
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
index 8ca2dc0661ee..f1bba5fc61c4 100644
--- a/arch/arm64/include/asm/asm_pointer_auth.h
+++ b/arch/arm64/include/asm/asm_pointer_auth.h
@@ -7,19 +7,7 @@
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
-#ifdef CONFIG_ARM64_PTR_AUTH
-/*
- * thread.keys_user.ap* as offset exceeds the #imm offset range
- * so use the base value of ldp as thread.keys_user and offset as
- * thread.keys_user.ap*.
- */
- .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
- mov \tmp1, #THREAD_KEYS_USER
- add \tmp1, \tsk, \tmp1
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
- msr_s SYS_APIAKEYLO_EL1, \tmp2
- msr_s SYS_APIAKEYHI_EL1, \tmp3
- .endm
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_KERNEL
@@ -42,6 +30,33 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
alternative_else_nop_endif
.endm
+#else /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+ .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
+ .endm
+
+ .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
+ .endm
+
+ .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
+ .endm
+
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+/*
+ * thread.keys_user.ap* as offset exceeds the #imm offset range
+ * so use the base value of ldp as thread.keys_user and offset as
+ * thread.keys_user.ap*.
+ */
+ .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+ mov \tmp1, #THREAD_KEYS_USER
+ add \tmp1, \tsk, \tmp1
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
+ msr_s SYS_APIAKEYLO_EL1, \tmp2
+ msr_s SYS_APIAKEYHI_EL1, \tmp3
+ .endm
+
.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
mrs \tmp1, id_aa64isar1_el1
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
@@ -64,17 +79,11 @@ alternative_else_nop_endif
.Lno_addr_auth\@:
.endm
-#else /* CONFIG_ARM64_PTR_AUTH */
+#else /* !CONFIG_ARM64_PTR_AUTH */
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
.endm
- .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
- .endm
-
- .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
- .endm
-
#endif /* CONFIG_ARM64_PTR_AUTH */
#endif /* __ASM_ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 8418c1bd8f04..89faca0e740d 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -130,15 +130,27 @@ alternative_endif
.endm
/*
- * Emit an entry into the exception table
+ * Create an exception table entry for `insn`, which will branch to `fixup`
+ * when an unhandled fault is taken.
*/
- .macro _asm_extable, from, to
+ .macro _asm_extable, insn, fixup
.pushsection __ex_table, "a"
.align 3
- .long (\from - .), (\to - .)
+ .long (\insn - .), (\fixup - .)
.popsection
.endm
+/*
+ * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
+ * do nothing.
+ */
+ .macro _cond_extable, insn, fixup
+ .ifnc \fixup,
+ _asm_extable \insn, \fixup
+ .endif
+ .endm
+
+
#define USER(l, x...) \
9999: x; \
_asm_extable 9999b, l
@@ -232,17 +244,25 @@ lr .req x30 // link register
* @dst: destination register
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
mrs \dst, tpidr_el2
.endm
#else
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \dst, tpidr_el1
alternative_else
mrs \dst, tpidr_el2
alternative_endif
.endm
+
+ .macro set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ msr tpidr_el1, \src
+alternative_else
+ msr tpidr_el2, \src
+alternative_endif
+ .endm
#endif
/*
@@ -253,7 +273,7 @@ alternative_endif
.macro adr_this_cpu, dst, sym, tmp
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
add \dst, \dst, \tmp
.endm
@@ -264,7 +284,7 @@ alternative_endif
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
ldr \dst, [\dst, \tmp]
.endm
@@ -375,51 +395,53 @@ alternative_cb_end
bfi \tcr, \tmp0, \pos, #3
.endm
+ .macro __dcache_op_workaround_clean_cache, op, addr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+ dc \op, \addr
+alternative_else
+ dc civac, \addr
+alternative_endif
+ .endm
+
/*
* Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
+ * [start, end)
*
* op: operation passed to dc instruction
* domain: domain used in dsb instruciton
- * kaddr: starting virtual address of the region
- * size: size of the region
- * Corrupts: kaddr, size, tmp1, tmp2
+ * start: starting virtual address of the region
+ * end: end virtual address of the region
+ * fixup: optional label to branch to on user fault
+ * Corrupts: start, end, tmp1, tmp2
*/
- .macro __dcache_op_workaround_clean_cache, op, kaddr
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
- dc \op, \kaddr
-alternative_else
- dc civac, \kaddr
-alternative_endif
- .endm
-
- .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+ .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
dcache_line_size \tmp1, \tmp2
- add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
- bic \kaddr, \kaddr, \tmp2
-9998:
+ bic \start, \start, \tmp2
+.Ldcache_op\@:
.ifc \op, cvau
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvac
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvap
- sys 3, c7, c12, 1, \kaddr // dc cvap
+ sys 3, c7, c12, 1, \start // dc cvap
.else
.ifc \op, cvadp
- sys 3, c7, c13, 1, \kaddr // dc cvadp
+ sys 3, c7, c13, 1, \start // dc cvadp
.else
- dc \op, \kaddr
+ dc \op, \start
.endif
.endif
.endif
.endif
- add \kaddr, \kaddr, \tmp1
- cmp \kaddr, \size
- b.lo 9998b
+ add \start, \start, \tmp1
+ cmp \start, \end
+ b.lo .Ldcache_op\@
dsb \domain
+
+ _cond_extable .Ldcache_op\@, \fixup
.endm
/*
@@ -427,20 +449,22 @@ alternative_endif
* [start, end)
*
* start, end: virtual addresses describing the region
- * label: A label to branch to on user fault.
+ * fixup: optional label to branch to on user fault
* Corrupts: tmp1, tmp2
*/
- .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
+ .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
icache_line_size \tmp1, \tmp2
sub \tmp2, \tmp1, #1
bic \tmp2, \start, \tmp2
-9997:
-USER(\label, ic ivau, \tmp2) // invalidate I line PoU
+.Licache_op\@:
+ ic ivau, \tmp2 // invalidate I line PoU
add \tmp2, \tmp2, \tmp1
cmp \tmp2, \end
- b.lo 9997b
+ b.lo .Licache_op\@
dsb ish
isb
+
+ _cond_extable .Licache_op\@, \fixup
.endm
/*
@@ -745,7 +769,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
cbz \tmp, \lbl
#endif
adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
- this_cpu_offset \tmp2
+ get_this_cpu_offset \tmp2
ldr w\tmp, [\tmp, \tmp2]
cbnz w\tmp, \lbl // yield on pending softirq in task context
.Lnoyield_\@:
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index b56a4b2bc248..c9979273d389 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -223,6 +223,4 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define ARCH_ATOMIC
-
#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 2175ec0004ed..451e11e5fd23 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -74,7 +74,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
* This insanity brought to you by speculative system register reads,
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
*
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
*/
#define arch_counter_enforce_ordering(val) do { \
u64 tmp, _val = (val); \
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index a074459f8f2f..a9c0716e7440 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -47,7 +47,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_DMA_MINALIGN (128)
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 52e5c1623224..543c997eb3b7 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -30,45 +30,58 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT I-cache.
*
- * flush_icache_range(start, end)
+ * All functions below apply to the interval [start, end)
+ * - start - virtual start address (inclusive)
+ * - end - virtual end address (exclusive)
*
- * Ensure coherency between the I-cache and the D-cache in the
- * region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * caches_clean_inval_pou(start, end)
*
- * invalidate_icache_range(start, end)
+ * Ensure coherency between the I-cache and the D-cache region to
+ * the Point of Unification.
*
- * Invalidate the I-cache in the region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * caches_clean_inval_user_pou(start, end)
*
- * __flush_cache_user_range(start, end)
+ * Ensure coherency between the I-cache and the D-cache region to
+ * the Point of Unification.
+ * Use only if the region might access user memory.
*
- * Ensure coherency between the I-cache and the D-cache in the
- * region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * icache_inval_pou(start, end)
*
- * __flush_dcache_area(kaddr, size)
+ * Invalidate I-cache region to the Point of Unification.
*
- * Ensure that the data held in page is written back.
- * - kaddr - page address
- * - size - region size
+ * dcache_clean_inval_poc(start, end)
+ *
+ * Clean and invalidate D-cache region to the Point of Coherency.
+ *
+ * dcache_inval_poc(start, end)
+ *
+ * Invalidate D-cache region to the Point of Coherency.
+ *
+ * dcache_clean_poc(start, end)
+ *
+ * Clean D-cache region to the Point of Coherency.
+ *
+ * dcache_clean_pop(start, end)
+ *
+ * Clean D-cache region to the Point of Persistence.
+ *
+ * dcache_clean_pou(start, end)
+ *
+ * Clean D-cache region to the Point of Unification.
*/
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(void *addr, size_t len);
-extern void __inval_dcache_area(void *addr, size_t len);
-extern void __clean_dcache_area_poc(void *addr, size_t len);
-extern void __clean_dcache_area_pop(void *addr, size_t len);
-extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
-extern void sync_icache_aliases(void *kaddr, unsigned long len);
+extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
+extern void icache_inval_pou(unsigned long start, unsigned long end);
+extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_pop(unsigned long start, unsigned long end);
+extern void dcache_clean_pou(unsigned long start, unsigned long end);
+extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
+extern void sync_icache_aliases(unsigned long start, unsigned long end);
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
- __flush_icache_range(start, end);
+ caches_clean_inval_pou(start, end);
/*
* IPI all online CPUs so that they undergo a context synchronization
@@ -122,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-static __always_inline void __flush_icache_all(void)
+static __always_inline void icache_inval_all_pou(void)
{
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return;
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index 6fb2e6bcc392..dc3ea4080e2e 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -23,4 +23,20 @@
#define __builtin_return_address(val) \
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
+#ifdef CONFIG_CFI_CLANG
+/*
+ * With CONFIG_CFI_CLANG, the compiler replaces function address
+ * references with the address of the function's CFI jump table
+ * entry. The function_nocfi macro always returns the address of the
+ * actual function instead.
+ */
+#define function_nocfi(x) ({ \
+ void *addr; \
+ asm("adrp %0, " __stringify(x) "\n\t" \
+ "add %0, %0, :lo12:" __stringify(x) \
+ : "=r" (addr)); \
+ addr; \
+})
+#endif
+
#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 7faae6ff3ab4..0f6d16faa540 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -12,26 +12,7 @@
/*
* Records attributes of an individual CPU.
*/
-struct cpuinfo_arm64 {
- struct cpu cpu;
- struct kobject kobj;
- u32 reg_ctr;
- u32 reg_cntfrq;
- u32 reg_dczid;
- u32 reg_midr;
- u32 reg_revidr;
-
- u64 reg_id_aa64dfr0;
- u64 reg_id_aa64dfr1;
- u64 reg_id_aa64isar0;
- u64 reg_id_aa64isar1;
- u64 reg_id_aa64mmfr0;
- u64 reg_id_aa64mmfr1;
- u64 reg_id_aa64mmfr2;
- u64 reg_id_aa64pfr0;
- u64 reg_id_aa64pfr1;
- u64 reg_id_aa64zfr0;
-
+struct cpuinfo_32bit {
u32 reg_id_dfr0;
u32 reg_id_dfr1;
u32 reg_id_isar0;
@@ -54,6 +35,30 @@ struct cpuinfo_arm64 {
u32 reg_mvfr0;
u32 reg_mvfr1;
u32 reg_mvfr2;
+};
+
+struct cpuinfo_arm64 {
+ struct cpu cpu;
+ struct kobject kobj;
+ u64 reg_ctr;
+ u64 reg_cntfrq;
+ u64 reg_dczid;
+ u64 reg_midr;
+ u64 reg_revidr;
+ u64 reg_gmid;
+
+ u64 reg_id_aa64dfr0;
+ u64 reg_id_aa64dfr1;
+ u64 reg_id_aa64isar0;
+ u64 reg_id_aa64isar1;
+ u64 reg_id_aa64mmfr0;
+ u64 reg_id_aa64mmfr1;
+ u64 reg_id_aa64mmfr2;
+ u64 reg_id_aa64pfr0;
+ u64 reg_id_aa64pfr1;
+ u64 reg_id_aa64zfr0;
+
+ struct cpuinfo_32bit aarch32;
/* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
u64 reg_zcr;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 338840c00e8e..9bb9d11750d7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -619,6 +619,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
return val > 0;
}
+static inline bool id_aa64pfr1_mte(u64 pfr1)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
+
+ return val >= ID_AA64PFR1_MTE;
+}
+
void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);
@@ -630,9 +637,15 @@ static inline bool cpu_supports_mixed_endian_el0(void)
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
}
+const struct cpumask *system_32bit_el0_cpumask(void);
+DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
static inline bool system_supports_32bit_el0(void)
{
- return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+ id_aa64pfr0_32bit_el0(pfr0);
}
static inline bool system_supports_4kb_granule(void)
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 3c5ddb429ea2..14a19d1141bd 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -18,4 +18,39 @@ static inline int arm_cpuidle_suspend(int index)
return -EOPNOTSUPP;
}
#endif
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+#include <asm/arch_gicv3.h>
+
+struct arm_cpuidle_irq_context {
+ unsigned long pmr;
+ unsigned long daif_bits;
+};
+
+#define arm_cpuidle_save_irq_context(__c) \
+ do { \
+ struct arm_cpuidle_irq_context *c = __c; \
+ if (system_uses_irq_prio_masking()) { \
+ c->daif_bits = read_sysreg(daif); \
+ write_sysreg(c->daif_bits | PSR_I_BIT | PSR_F_BIT, \
+ daif); \
+ c->pmr = gic_read_pmr(); \
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); \
+ } \
+ } while (0)
+
+#define arm_cpuidle_restore_irq_context(__c) \
+ do { \
+ struct arm_cpuidle_irq_context *c = __c; \
+ if (system_uses_irq_prio_masking()) { \
+ gic_write_pmr(c->pmr); \
+ write_sysreg(c->daif_bits, daif); \
+ } \
+ } while (0)
+#else
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+#endif
#endif
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 3578aba9c608..1bed37eb013a 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);
static inline void efi_capsule_flush_cache_range(void *addr, int size)
{
- __flush_dcache_area(addr, size);
+ dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 6546158d2f2d..4afbc45b8bb0 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -31,20 +31,35 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
-asmlinkage void el1_sync_handler(struct pt_regs *regs);
-asmlinkage void el0_sync_handler(struct pt_regs *regs);
-asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
+asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
+
+asmlinkage void call_on_irq_stack(struct pt_regs *regs,
+ void (*func)(struct pt_regs *));
asmlinkage void enter_from_user_mode(void);
asmlinkage void exit_to_user_mode(void);
-void arm64_enter_nmi(struct pt_regs *regs);
-void arm64_exit_nmi(struct pt_regs *regs);
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
-asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs);
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
@@ -57,4 +72,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
+void do_serror(struct pt_regs *regs, unsigned int esr);
+
+void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 2599504674b5..c072161d5c65 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -69,7 +69,7 @@ static inline void *sve_pffr(struct thread_struct *thread)
extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1);
-extern void sve_flush_live(void);
+extern void sve_flush_live(unsigned long vq_minus_1);
extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index a2563992d2dc..059204477ce6 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -213,8 +213,10 @@
mov v\nz\().16b, v\nz\().16b
.endm
-.macro sve_flush
+.macro sve_flush_z
_for n, 0, 31, _sve_flush_z \n
+.endm
+.macro sve_flush_p_ffr
_for n, 0, 15, _sve_pfalse \n
_sve_wrffr 0
.endm
diff --git a/arch/arm64/include/asm/insn-def.h b/arch/arm64/include/asm/insn-def.h
new file mode 100644
index 000000000000..2c075f615c6a
--- /dev/null
+++ b/arch/arm64/include/asm/insn-def.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_INSN_DEF_H
+#define __ASM_INSN_DEF_H
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 4ebb9c054ccc..6b776c8667b2 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -10,7 +10,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
-#include <asm/alternative.h>
+#include <asm/insn-def.h>
#ifndef __ASSEMBLY__
/*
@@ -30,6 +30,7 @@
*/
enum aarch64_insn_encoding_class {
AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
+ AARCH64_INSN_CLS_SVE, /* SVE instructions */
AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
@@ -294,6 +295,12 @@ __AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000)
__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
+__AARCH64_INSN_FUNCS(store_imm, 0x3FC00000, 0x39000000)
+__AARCH64_INSN_FUNCS(load_imm, 0x3FC00000, 0x39400000)
+__AARCH64_INSN_FUNCS(store_pre, 0x3FE00C00, 0x38000C00)
+__AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00)
+__AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
+__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
@@ -302,6 +309,8 @@ __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
+__AARCH64_INSN_FUNCS(stp, 0x7FC00000, 0x29000000)
+__AARCH64_INSN_FUNCS(ldp, 0x7FC00000, 0x29400000)
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
@@ -334,6 +343,7 @@ __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00)
__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000)
__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000)
__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000)
+__AARCH64_INSN_FUNCS(mov_reg, 0x7FE0FFE0, 0x2A0003E0)
__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000)
__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000)
__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000)
@@ -368,6 +378,14 @@ __AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF)
__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
+__AARCH64_INSN_FUNCS(dmb, 0xFFFFF0FF, 0xD50330BF)
+__AARCH64_INSN_FUNCS(dsb_base, 0xFFFFF0FF, 0xD503309F)
+__AARCH64_INSN_FUNCS(dsb_nxs, 0xFFFFF3FF, 0xD503323F)
+__AARCH64_INSN_FUNCS(isb, 0xFFFFF0FF, 0xD50330DF)
+__AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF)
+__AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F)
+__AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F)
+__AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F)
#undef __AARCH64_INSN_FUNCS
@@ -379,8 +397,47 @@ static inline bool aarch64_insn_is_adr_adrp(u32 insn)
return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn);
}
-int aarch64_insn_read(void *addr, u32 *insnp);
-int aarch64_insn_write(void *addr, u32 insn);
+static inline bool aarch64_insn_is_dsb(u32 insn)
+{
+ return aarch64_insn_is_dsb_base(insn) || aarch64_insn_is_dsb_nxs(insn);
+}
+
+static inline bool aarch64_insn_is_barrier(u32 insn)
+{
+ return aarch64_insn_is_dmb(insn) || aarch64_insn_is_dsb(insn) ||
+ aarch64_insn_is_isb(insn) || aarch64_insn_is_sb(insn) ||
+ aarch64_insn_is_clrex(insn) || aarch64_insn_is_ssbb(insn) ||
+ aarch64_insn_is_pssbb(insn);
+}
+
+static inline bool aarch64_insn_is_store_single(u32 insn)
+{
+ return aarch64_insn_is_store_imm(insn) ||
+ aarch64_insn_is_store_pre(insn) ||
+ aarch64_insn_is_store_post(insn);
+}
+
+static inline bool aarch64_insn_is_store_pair(u32 insn)
+{
+ return aarch64_insn_is_stp(insn) ||
+ aarch64_insn_is_stp_pre(insn) ||
+ aarch64_insn_is_stp_post(insn);
+}
+
+static inline bool aarch64_insn_is_load_single(u32 insn)
+{
+ return aarch64_insn_is_load_imm(insn) ||
+ aarch64_insn_is_load_pre(insn) ||
+ aarch64_insn_is_load_post(insn);
+}
+
+static inline bool aarch64_insn_is_load_pair(u32 insn)
+{
+ return aarch64_insn_is_ldp(insn) ||
+ aarch64_insn_is_ldp_pre(insn) ||
+ aarch64_insn_is_ldp_post(insn);
+}
+
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
bool aarch64_insn_uses_literal(u32 insn);
bool aarch64_insn_is_branch(u32 insn);
@@ -487,9 +544,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
-int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
-int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
-
s32 aarch64_insn_adrp_get_offset(u32 insn);
u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset);
@@ -506,6 +560,7 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn);
typedef bool (pstate_check_t)(unsigned long);
extern pstate_check_t * const aarch32_opcode_cond_checks[16];
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index d44df9d62fc9..3512184cfec1 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -18,9 +18,9 @@
* 64K (section size = 512M).
*/
#ifdef CONFIG_ARM64_4K_PAGES
-#define ARM64_SWAPPER_USES_SECTION_MAPS 1
+#define ARM64_KERNEL_USES_PMD_MAPS 1
#else
-#define ARM64_SWAPPER_USES_SECTION_MAPS 0
+#define ARM64_KERNEL_USES_PMD_MAPS 0
#endif
/*
@@ -33,7 +33,7 @@
* VA range, so pages required to map highest possible PA are reserved in all
* cases.
*/
-#if ARM64_SWAPPER_USES_SECTION_MAPS
+#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
#else
@@ -90,9 +90,9 @@
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
/* Initial memory map size */
-#if ARM64_SWAPPER_USES_SECTION_MAPS
-#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
-#define SWAPPER_BLOCK_SIZE SECTION_SIZE
+#if ARM64_KERNEL_USES_PMD_MAPS
+#define SWAPPER_BLOCK_SHIFT PMD_SHIFT
+#define SWAPPER_BLOCK_SIZE PMD_SIZE
#define SWAPPER_TABLE_SHIFT PUD_SHIFT
#else
#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
@@ -100,16 +100,13 @@
#define SWAPPER_TABLE_SHIFT PMD_SHIFT
#endif
-/* The size of the initial kernel direct mapping */
-#define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT)
-
/*
* Initial memory map attributes.
*/
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#if ARM64_SWAPPER_USES_SECTION_MAPS
+#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
#else
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
@@ -125,7 +122,7 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ARM64_MEMSTART_SHIFT PUD_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
-#define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5)
+#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
#else
#define ARM64_MEMSTART_SHIFT PMD_SHIFT
#endif
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 692c9049befa..d436831dd706 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -12,7 +12,8 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
-#define HCR_ATA (UL(1) << 56)
+#define HCR_ATA_SHIFT 56
+#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
#define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index cf8df032b9c3..9f0bf2109be7 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -8,6 +8,7 @@
#define __ARM_KVM_ASM_H__
#include <asm/hyp_image.h>
+#include <asm/insn.h>
#include <asm/virt.h>
#define ARM_EXIT_WITH_SERROR_BIT 31
@@ -63,6 +64,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
+#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
#ifndef __ASSEMBLY__
@@ -201,6 +203,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
+
extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f612c090f2e4..fd418955e31e 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID2;
+
+ if (kvm_has_mte(vcpu->kvm))
+ vcpu->arch.hcr_el2 |= HCR_ATA;
}
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
@@ -463,4 +466,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
}
+static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
+{
+ return test_bit(feature, vcpu->arch.features);
+}
+
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7cd7d5c8c4bc..41911585ae0c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -46,6 +46,7 @@
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
+#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -132,6 +133,9 @@ struct kvm_arch {
u8 pfr0_csv2;
u8 pfr0_csv3;
+
+ /* Memory Tagging Extension enabled for the guest */
+ bool mte_enabled;
};
struct kvm_vcpu_fault_info {
@@ -206,6 +210,12 @@ enum vcpu_sysreg {
CNTP_CVAL_EL0,
CNTP_CTL_EL0,
+ /* Memory Tagging Extension registers */
+ RGSR_EL1, /* Random Allocation Tag Seed Register */
+ GCR_EL1, /* Tag Control Register */
+ TFSR_EL1, /* Tag Fault Status Register (EL1) */
+ TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
+
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
@@ -556,16 +566,11 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
}
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_generic generic;
};
struct kvm_vcpu_stat {
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
+ struct kvm_vcpu_stat_generic generic;
u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
@@ -721,6 +726,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
+long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ struct kvm_arm_copy_mte_tags *copy_tags);
+
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
@@ -769,6 +777,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 25ed956f9af1..b52c5c4b9a3d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -180,17 +180,16 @@ static inline void *__kvm_vector_slot2addr(void *base,
struct kvm;
-#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l) \
+ dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
-static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
+static inline void __clean_dcache_guest_page(void *va, size_t size)
{
- void *va = page_address(pfn_to_page(pfn));
-
/*
* With FWB, we ensure that the guest always accesses memory using
* cacheable attributes, and we don't have to clean to PoC when
@@ -203,18 +202,14 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
kvm_flush_dcache_to_poc(va, size);
}
-static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
- unsigned long size)
+static inline void __invalidate_icache_guest_page(void *va, size_t size)
{
if (icache_is_aliasing()) {
/* any kind of VIPT cache */
- __flush_icache_all();
+ icache_inval_all_pou();
} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
- void *va = page_address(pfn_to_page(pfn));
-
- invalidate_icache_range((unsigned long)va,
- (unsigned long)va + size);
+ icache_inval_pou((unsigned long)va, (unsigned long)va + size);
}
}
diff --git a/arch/arm64/include/asm/kvm_mte.h b/arch/arm64/include/asm/kvm_mte.h
new file mode 100644
index 000000000000..de002636eb1f
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mte.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 ARM Ltd.
+ */
+#ifndef __ASM_KVM_MTE_H
+#define __ASM_KVM_MTE_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_MTE
+
+.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
+alternative_if_not ARM64_MTE
+ b .L__skip_switch\@
+alternative_else_nop_endif
+ mrs \reg1, hcr_el2
+ tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@
+
+ mrs_s \reg1, SYS_RGSR_EL1
+ str \reg1, [\h_ctxt, #CPU_RGSR_EL1]
+ mrs_s \reg1, SYS_GCR_EL1
+ str \reg1, [\h_ctxt, #CPU_GCR_EL1]
+
+ ldr \reg1, [\g_ctxt, #CPU_RGSR_EL1]
+ msr_s SYS_RGSR_EL1, \reg1
+ ldr \reg1, [\g_ctxt, #CPU_GCR_EL1]
+ msr_s SYS_GCR_EL1, \reg1
+
+.L__skip_switch\@:
+.endm
+
+.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
+alternative_if_not ARM64_MTE
+ b .L__skip_switch\@
+alternative_else_nop_endif
+ mrs \reg1, hcr_el2
+ tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@
+
+ mrs_s \reg1, SYS_RGSR_EL1
+ str \reg1, [\g_ctxt, #CPU_RGSR_EL1]
+ mrs_s \reg1, SYS_GCR_EL1
+ str \reg1, [\g_ctxt, #CPU_GCR_EL1]
+
+ ldr \reg1, [\h_ctxt, #CPU_RGSR_EL1]
+ msr_s SYS_RGSR_EL1, \reg1
+ ldr \reg1, [\h_ctxt, #CPU_GCR_EL1]
+ msr_s SYS_GCR_EL1, \reg1
+
+ isb
+
+.L__skip_switch\@:
+.endm
+
+#else /* !CONFIG_ARM64_MTE */
+
+.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
+.endm
+
+.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
+.endm
+
+#endif /* CONFIG_ARM64_MTE */
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_KVM_MTE_H */
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index c3674c47d48c..f004c0115d89 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -27,23 +27,29 @@ typedef u64 kvm_pte_t;
/**
* struct kvm_pgtable_mm_ops - Memory management callbacks.
- * @zalloc_page: Allocate a single zeroed memory page. The @arg parameter
- * can be used by the walker to pass a memcache. The
- * initial refcount of the page is 1.
- * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages. The
- * @size parameter is in bytes, and is rounded-up to the
- * next page boundary. The resulting allocation is
- * physically contiguous.
- * @free_pages_exact: Free an exact number of memory pages previously
- * allocated by zalloc_pages_exact.
- * @get_page: Increment the refcount on a page.
- * @put_page: Decrement the refcount on a page. When the refcount
- * reaches 0 the page is automatically freed.
- * @page_count: Return the refcount of a page.
- * @phys_to_virt: Convert a physical address into a virtual address mapped
- * in the current context.
- * @virt_to_phys: Convert a virtual address mapped in the current context
- * into a physical address.
+ * @zalloc_page: Allocate a single zeroed memory page.
+ * The @arg parameter can be used by the walker
+ * to pass a memcache. The initial refcount of
+ * the page is 1.
+ * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages.
+ * The @size parameter is in bytes, and is rounded
+ * up to the next page boundary. The resulting
+ * allocation is physically contiguous.
+ * @free_pages_exact: Free an exact number of memory pages previously
+ * allocated by zalloc_pages_exact.
+ * @get_page: Increment the refcount on a page.
+ * @put_page: Decrement the refcount on a page. When the
+ * refcount reaches 0 the page is automatically
+ * freed.
+ * @page_count: Return the refcount of a page.
+ * @phys_to_virt: Convert a physical address into a virtual
+ * address mapped in the current context.
+ * @virt_to_phys: Convert a virtual address mapped in the current
+ * context into a physical address.
+ * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
+ * for the specified memory address range.
+ * @icache_inval_pou: Invalidate the instruction cache to the PoU
+ * for the specified memory address range.
*/
struct kvm_pgtable_mm_ops {
void* (*zalloc_page)(void *arg);
@@ -54,6 +60,8 @@ struct kvm_pgtable_mm_ops {
int (*page_count)(void *addr);
void* (*phys_to_virt)(phys_addr_t phys);
phys_addr_t (*virt_to_phys)(void *addr);
+ void (*dcache_clean_inval_poc)(void *addr, size_t size);
+ void (*icache_inval_pou)(void *addr, size_t size);
};
/**
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index ba89a9af820a..9906541a6861 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -56,8 +56,16 @@
SYM_FUNC_START_ALIAS(__pi_##x); \
SYM_FUNC_START_WEAK(x)
+#define SYM_FUNC_START_WEAK_ALIAS_PI(x) \
+ SYM_FUNC_START_ALIAS(__pi_##x); \
+ SYM_START(x, SYM_L_WEAK, SYM_A_ALIGN)
+
#define SYM_FUNC_END_PI(x) \
SYM_FUNC_END(x); \
SYM_FUNC_END_ALIAS(__pi_##x)
+#define SYM_FUNC_END_ALIAS_PI(x) \
+ SYM_FUNC_END_ALIAS(x); \
+ SYM_FUNC_END_ALIAS(__pi_##x)
+
#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 87b90dc27a43..1a35a4473598 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -135,10 +135,8 @@
#define MT_NORMAL 0
#define MT_NORMAL_TAGGED 1
#define MT_NORMAL_NC 2
-#define MT_NORMAL_WT 3
-#define MT_DEVICE_nGnRnE 4
-#define MT_DEVICE_nGnRE 5
-#define MT_DEVICE_GRE 6
+#define MT_DEVICE_nGnRnE 3
+#define MT_DEVICE_nGnRE 4
/*
* Memory types for Stage-2 translation
@@ -323,22 +321,6 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
-#ifdef CONFIG_CFI_CLANG
-/*
- * With CONFIG_CFI_CLANG, the compiler replaces function address
- * references with the address of the function's CFI jump table
- * entry. The function_nocfi macro always returns the address of the
- * actual function instead.
- */
-#define function_nocfi(x) ({ \
- void *addr; \
- asm("adrp %0, " __stringify(x) "\n\t" \
- "add %0, %0, :lo12:" __stringify(x) \
- : "=r" (addr)); \
- addr; \
-})
-#endif
-
/*
* virt_to_page(x) convert a _valid_ virtual address to struct page *
* virt_addr_valid(x) indicates whether a virtual address is valid
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index d3cef9133539..eeb210997149 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -177,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
return;
if (mm == &init_mm)
- ttbr = __pa_symbol(reserved_pg_dir);
+ ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
else
- ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+ ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
index 810045628c66..a11ccadd47d2 100644
--- a/arch/arm64/include/asm/module.lds.h
+++ b/arch/arm64/include/asm/module.lds.h
@@ -1,7 +1,20 @@
-#ifdef CONFIG_ARM64_MODULE_PLTS
SECTIONS {
+#ifdef CONFIG_ARM64_MODULE_PLTS
.plt 0 (NOLOAD) : { BYTE(0) }
.init.plt 0 (NOLOAD) : { BYTE(0) }
.text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
-}
#endif
+
+#ifdef CONFIG_KASAN_SW_TAGS
+ /*
+ * Outlined checks go into comdat-deduplicated sections named .text.hot.
+ * Because they are in comdats they are not combined by the linker and
+ * we otherwise end up with multiple sections with the same .text.hot
+ * name in the .ko file. The kernel module loader warns if it sees
+ * multiple sections with the same name so we use this sections
+ * directive to force them into a single section and silence the
+ * warning.
+ */
+ .text.hot : { *(.text.hot) }
+#endif
+}
diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h
index cf241b0f0a42..626d359b396e 100644
--- a/arch/arm64/include/asm/mte-def.h
+++ b/arch/arm64/include/asm/mte-def.h
@@ -7,6 +7,7 @@
#define MTE_GRANULE_SIZE UL(16)
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
+#define MTE_GRANULES_PER_PAGE (PAGE_SIZE / MTE_GRANULE_SIZE)
#define MTE_TAG_SHIFT 56
#define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index ddd4d17cf9a0..d952352bd008 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -48,43 +48,84 @@ static inline u8 mte_get_random_tag(void)
return mte_get_ptr_tag(addr);
}
+static inline u64 __stg_post(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
+ : "+r"(p)
+ :
+ : "memory");
+ return p;
+}
+
+static inline u64 __stzg_post(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
+ : "+r"(p)
+ :
+ : "memory");
+ return p;
+}
+
+static inline void __dc_gva(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
+}
+
+static inline void __dc_gzva(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
+}
+
/*
* Assign allocation tags for a region of memory based on the pointer tag.
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
- * size must be non-zero and MTE_GRANULE_SIZE aligned.
+ * size must be MTE_GRANULE_SIZE aligned.
*/
-static inline void mte_set_mem_tag_range(void *addr, size_t size,
- u8 tag, bool init)
+static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
+ bool init)
{
- u64 curr, end;
+ u64 curr, mask, dczid_bs, end1, end2, end3;
- if (!size)
- return;
+ /* Read DC G(Z)VA block size from the system register. */
+ dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf);
curr = (u64)__tag_set(addr, tag);
- end = curr + size;
+ mask = dczid_bs - 1;
+ /* STG/STZG up to the end of the first block. */
+ end1 = curr | mask;
+ end3 = curr + size;
+ /* DC GVA / GZVA in [end1, end2) */
+ end2 = end3 & ~mask;
/*
- * 'asm volatile' is required to prevent the compiler to move
- * the statement outside of the loop.
+ * The following code uses STG on the first DC GVA block even if the
+ * start address is aligned - it appears to be faster than an alignment
+ * check + conditional branch. Also, if the range size is at least 2 DC
+ * GVA blocks, the first two loops can use post-condition to save one
+ * branch each.
*/
- if (init) {
- do {
- asm volatile(__MTE_PREAMBLE "stzg %0, [%0]"
- :
- : "r" (curr)
- : "memory");
- curr += MTE_GRANULE_SIZE;
- } while (curr != end);
- } else {
- do {
- asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
- :
- : "r" (curr)
- : "memory");
- curr += MTE_GRANULE_SIZE;
- } while (curr != end);
- }
+#define SET_MEMTAG_RANGE(stg_post, dc_gva) \
+ do { \
+ if (size >= 2 * dczid_bs) { \
+ do { \
+ curr = stg_post(curr); \
+ } while (curr < end1); \
+ \
+ do { \
+ dc_gva(curr); \
+ curr += dczid_bs; \
+ } while (curr < end2); \
+ } \
+ \
+ while (curr < end3) \
+ curr = stg_post(curr); \
+ } while (0)
+
+ if (init)
+ SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
+ else
+ SET_MEMTAG_RANGE(__stg_post, __dc_gva);
+#undef SET_MEMTAG_RANGE
}
void mte_enable_kernel_sync(void);
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index bc88a1ced0d7..58c7f80f5596 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -37,7 +37,8 @@ void mte_free_tag_storage(char *storage);
/* track which pages have valid allocation tags */
#define PG_mte_tagged PG_arch_2
-void mte_sync_tags(pte_t *ptep, pte_t pte);
+void mte_zero_clear_page_tags(void *addr);
+void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
@@ -53,7 +54,10 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged 0
-static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
+static inline void mte_zero_clear_page_tags(void *addr)
+{
+}
+static inline void mte_sync_tags(pte_t old_pte, pte_t pte)
{
}
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 012cffc574e8..ed1b9dcf12b2 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
+#include <linux/types.h> /* for gfp_t */
#include <asm/pgtable-types.h>
struct page;
@@ -28,9 +29,12 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr);
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+
+void tag_clear_highpage(struct page *to);
+#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h
new file mode 100644
index 000000000000..6bf5adc56295
--- /dev/null
+++ b/arch/arm64/include/asm/patching.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_PATCHING_H
+#define __ASM_PATCHING_H
+
+#include <linux/types.h>
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASM_PATCHING_H */
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 60731f602d3e..4ef6f19331f9 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -239,6 +239,11 @@
/* PMMIR_EL1.SLOTS mask */
#define ARMV8_PMU_SLOTS_MASK 0xff
+#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
+#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
+#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
+#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index b82575a33f8b..40085e53f573 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -72,13 +72,6 @@
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
/*
- * Section address mask and size definitions.
- */
-#define SECTION_SHIFT PMD_SHIFT
-#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT)
-#define SECTION_MASK (~(SECTION_SIZE-1))
-
-/*
* Contiguous page definitions.
*/
#define CONT_PTE_SHIFT (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 938092df76cf..7032f04c8ac6 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -55,7 +55,6 @@ extern bool arm64_use_ng_mappings;
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0b10204e72fc..c0ba8cdfa10a 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -314,9 +314,25 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte);
- if (system_supports_mte() &&
- pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
- mte_sync_tags(ptep, pte);
+ /*
+ * If the PTE would provide user space access to the tags associated
+ * with it then ensure that the MTE tags are synchronised. Although
+ * pte_access_permitted() returns false for exec only mappings, they
+ * don't expose tags (instruction fetches don't check tags).
+ */
+ if (system_supports_mte() && pte_access_permitted(pte, false) &&
+ !pte_special(pte)) {
+ pte_t old_pte = READ_ONCE(*ptep);
+ /*
+ * We only need to synchronise if the new PTE has tags enabled
+ * or if swapping in (in which case another mapping may have
+ * set tags in the past even if this PTE isn't tagged).
+ * (!pte_none() && !pte_present()) is an open coded version of
+ * is_swap_pte()
+ */
+ if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
+ mte_sync_tags(old_pte, pte);
+ }
__check_racy_pte_update(mm, ptep, pte);
@@ -511,13 +527,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
-
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_leaf(pmd) pmd_sect(pmd)
+#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
@@ -604,7 +619,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
+#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) pud_sect(pud)
#define pud_valid(pud) pte_valid(pud_pte(pud))
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index d50416be99be..28a78b67d9b4 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -31,10 +31,6 @@ struct ptrauth_keys_user {
struct ptrauth_key apga;
};
-struct ptrauth_keys_kernel {
- struct ptrauth_key apia;
-};
-
#define __ptrauth_key_install_nosync(k, v) \
do { \
struct ptrauth_key __pki_v = (v); \
@@ -42,6 +38,29 @@ do { \
write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
} while (0)
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+
+struct ptrauth_keys_kernel {
+ struct ptrauth_key apia;
+};
+
+static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
+{
+ if (system_supports_address_auth())
+ get_random_bytes(&keys->apia, sizeof(keys->apia));
+}
+
+static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
+{
+ if (!system_supports_address_auth())
+ return;
+
+ __ptrauth_key_install_nosync(APIA, keys->apia);
+ isb();
+}
+
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys)
{
if (system_supports_address_auth()) {
@@ -69,21 +88,6 @@ static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
ptrauth_keys_install_user(keys);
}
-static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
-{
- if (system_supports_address_auth())
- get_random_bytes(&keys->apia, sizeof(keys->apia));
-}
-
-static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
-{
- if (!system_supports_address_auth())
- return;
-
- __ptrauth_key_install_nosync(APIA, keys->apia);
- isb();
-}
-
extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
@@ -121,11 +125,6 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_thread_switch_user(tsk) \
ptrauth_keys_install_user(&(tsk)->thread.keys_user)
-#define ptrauth_thread_init_kernel(tsk) \
- ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
-#define ptrauth_thread_switch_kernel(tsk) \
- ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
-
#else /* CONFIG_ARM64_PTR_AUTH */
#define ptrauth_enable()
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
@@ -134,11 +133,19 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_suspend_exit()
#define ptrauth_thread_init_user()
-#define ptrauth_thread_init_kernel(tsk)
#define ptrauth_thread_switch_user(tsk)
-#define ptrauth_thread_switch_kernel(tsk)
#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+#define ptrauth_thread_init_kernel(tsk) \
+ ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
+#define ptrauth_thread_switch_kernel(tsk) \
+ ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
+#else
+#define ptrauth_thread_init_kernel(tsk)
+#define ptrauth_thread_switch_kernel(tsk)
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
#define PR_PAC_ENABLED_KEYS_MASK \
(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 80e946b2abee..e83f0982b99c 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 9df3feeee890..b6517fd03d7b 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -148,8 +148,10 @@ struct thread_struct {
struct debug_info debug; /* debugging */
#ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys_user keys_user;
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
struct ptrauth_keys_kernel keys_kernel;
#endif
+#endif
#ifdef CONFIG_ARM64_MTE
u64 gcr_user_excl;
#endif
@@ -257,8 +259,6 @@ void set_task_sctlr_el1(u64 sctlr);
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
-asmlinkage void arm64_preempt_schedule_irq(void);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
@@ -329,13 +329,13 @@ long get_tagged_addr_ctrl(struct task_struct *task);
* of header definitions for the use of task_stack_page.
*/
-#define current_top_of_stack() \
-({ \
- struct stack_info _info; \
- BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
- _info.high; \
+#define current_top_of_stack() \
+({ \
+ struct stack_info _info; \
+ BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \
+ _info.high; \
})
-#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
+#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index eaa2cd92e4c1..8297bccf0784 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -9,18 +9,18 @@
#ifdef CONFIG_SHADOW_CALL_STACK
scs_sp .req x18
- .macro scs_load tsk, tmp
+ .macro scs_load tsk
ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
- .macro scs_save tsk, tmp
+ .macro scs_save tsk
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
#else
- .macro scs_load tsk, tmp
+ .macro scs_load tsk
.endm
- .macro scs_save tsk, tmp
+ .macro scs_save tsk
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index 63e0b92a5fbb..7bea1d705dd6 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -37,13 +37,17 @@ struct sdei_registered_event;
asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
struct sdei_registered_event *arg);
+unsigned long do_sdei_event(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
struct stack_info;
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
-static inline bool on_sdei_stack(unsigned long sp,
+bool _on_sdei_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
@@ -51,7 +55,7 @@ static inline bool on_sdei_stack(unsigned long sp,
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
return false;
if (in_nmi())
- return _on_sdei_stack(sp, info);
+ return _on_sdei_stack(sp, size, info);
return false;
}
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 0e357757c0cc..fc55f5a57a06 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);
/*
* Initial data for bringing up a secondary CPU.
- * @stack - sp for the secondary CPU
* @status - Result passed back from the secondary CPU to
* indicate failure.
*/
struct secondary_data {
- void *stack;
struct task_struct *task;
long status;
};
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 4b33ca620679..1801399204d7 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -69,14 +69,14 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
-static inline bool on_stack(unsigned long sp, unsigned long low,
- unsigned long high, enum stack_type type,
- struct stack_info *info)
+static inline bool on_stack(unsigned long sp, unsigned long size,
+ unsigned long low, unsigned long high,
+ enum stack_type type, struct stack_info *info)
{
if (!low)
return false;
- if (sp < low || sp >= high)
+ if (sp < low || sp + size < sp || sp + size > high)
return false;
if (info) {
@@ -87,38 +87,38 @@ static inline bool on_stack(unsigned long sp, unsigned long low,
return true;
}
-static inline bool on_irq_stack(unsigned long sp,
+static inline bool on_irq_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_IRQ, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
}
static inline bool on_task_stack(const struct task_struct *tsk,
- unsigned long sp,
+ unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_TASK, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
}
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
}
#else
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info) { return false; }
#endif
@@ -128,21 +128,21 @@ static inline bool on_overflow_stack(unsigned long sp,
* context.
*/
static inline bool on_accessible_stack(const struct task_struct *tsk,
- unsigned long sp,
+ unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
- if (on_task_stack(tsk, sp, info))
+ if (on_task_stack(tsk, sp, size, info))
return true;
if (tsk != current || preemptible())
return false;
- if (on_irq_stack(sp, info))
+ if (on_irq_stack(sp, size, info))
return true;
- if (on_overflow_stack(sp, info))
+ if (on_overflow_stack(sp, size, info))
return true;
- if (on_sdei_stack(sp, info))
+ if (on_sdei_stack(sp, size, info))
return true;
return false;
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 65d15700a168..7b9c3acba684 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -651,7 +651,8 @@
#define INIT_SCTLR_EL2_MMU_ON \
(SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \
- SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
+ SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \
+ SCTLR_ELx_ITFSB | SCTLR_EL2_RES1)
#define INIT_SCTLR_EL2_MMU_OFF \
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
@@ -703,9 +704,7 @@
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
-#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
#define MAIR_ATTR_NORMAL_NC UL(0x44)
-#define MAIR_ATTR_NORMAL_WT UL(0xbb)
#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
#define MAIR_ATTR_NORMAL UL(0xff)
#define MAIR_ATTR_MASK UL(0xff)
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 61c97d3b58c7..c995d1f4594f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb);
*/
static inline int tlb_get_level(struct mmu_gather *tlb)
{
+ /* The TTL field is only valid for the leaf entry. */
+ if (tlb->freed_tables)
+ return 0;
+
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
tlb->cleared_puds ||
tlb->cleared_p4ds))
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 24223adae150..b3edde68bc3e 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -184,6 +184,17 @@ struct kvm_vcpu_events {
__u32 reserved[12];
};
+struct kvm_arm_copy_mte_tags {
+ __u64 guest_ipa;
+ __u64 length;
+ void __user *addr;
+ __u64 flags;
+ __u64 reserved[2];
+};
+
+#define KVM_ARM_TAGS_TO_GUEST 0
+#define KVM_ARM_TAGS_FROM_GUEST 1
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 6cc97730790e..cce308586fcc 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -14,15 +14,22 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
CFLAGS_syscall.o += -fno-stack-protector
+# It's not safe to invoke KCOV when portions of the kernel environment aren't
+# available or are out-of-sync with HW state. Since `noinstr` doesn't always
+# inhibit KCOV instrumentation, disable it for the entire compilation unit.
+KCOV_INSTRUMENT_entry.o := n
+KCOV_INSTRUMENT_idle.o := n
+
# Object file lists.
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-common.o entry-fpsimd.o process.o ptrace.o \
setup.o signal.o sys.o stacktrace.o time.o traps.o \
- io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o \
+ io.o vdso.o hyp-stub.o psci.o cpu_ops.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o idreg-override.o
+ syscall.o proton-pack.o idreg-override.o idle.o \
+ patching.o
targets += efi-entry.o
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index cada0b816c8a..f3851724fe35 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -239,6 +239,18 @@ done:
}
}
+static pgprot_t __acpi_get_writethrough_mem_attribute(void)
+{
+ /*
+ * Although UEFI specifies the use of Normal Write-through for
+ * EFI_MEMORY_WT, it is seldom used in practice and not implemented
+ * by most (all?) CPUs. Rather than allocate a MAIR just for this
+ * purpose, emit a warning and use Normal Non-cacheable instead.
+ */
+ pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
+ return __pgprot(PROT_NORMAL_NC);
+}
+
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
{
/*
@@ -246,7 +258,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
* types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
* mapped to a corresponding MAIR attribute encoding.
* The EFI memory attribute advises all possible capabilities
- * of a memory region. We use the most efficient capability.
+ * of a memory region.
*/
u64 attr;
@@ -254,10 +266,10 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
attr = efi_mem_attributes(addr);
if (attr & EFI_MEMORY_WB)
return PAGE_KERNEL;
- if (attr & EFI_MEMORY_WT)
- return __pgprot(PROT_NORMAL_WT);
if (attr & EFI_MEMORY_WC)
return __pgprot(PROT_NORMAL_NC);
+ if (attr & EFI_MEMORY_WT)
+ return __acpi_get_writethrough_mem_attribute();
return __pgprot(PROT_DEVICE_nGnRnE);
}
@@ -340,10 +352,10 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
default:
if (region->attribute & EFI_MEMORY_WB)
prot = PAGE_KERNEL;
- else if (region->attribute & EFI_MEMORY_WT)
- prot = __pgprot(PROT_NORMAL_WT);
else if (region->attribute & EFI_MEMORY_WC)
prot = __pgprot(PROT_NORMAL_NC);
+ else if (region->attribute & EFI_MEMORY_WT)
+ prot = __acpi_get_writethrough_mem_attribute();
}
}
return __ioremap(phys, size, prot);
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index c906d20c7b52..3fb79b76e9d9 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -181,7 +181,7 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
*/
if (!is_module) {
dsb(ish);
- __flush_icache_all();
+ icache_inval_all_pou();
isb();
/* Ignore ARM64_CB bit from feature mask */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 0cb34ccb6e73..c85670692afa 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -27,6 +27,7 @@
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TSK_CPU, offsetof(struct task_struct, cpu));
BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
@@ -46,6 +47,8 @@ int main(void)
DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user));
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
#ifdef CONFIG_ARM64_MTE
@@ -99,7 +102,6 @@ int main(void)
DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
BLANK();
- DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));
@@ -111,6 +113,8 @@ int main(void)
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
+ DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
+ DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1]));
DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
@@ -138,6 +142,15 @@ int main(void)
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
+ DEFINE(ARM_SMCCC_1_2_REGS_X0_OFFS, offsetof(struct arm_smccc_1_2_regs, a0));
+ DEFINE(ARM_SMCCC_1_2_REGS_X2_OFFS, offsetof(struct arm_smccc_1_2_regs, a2));
+ DEFINE(ARM_SMCCC_1_2_REGS_X4_OFFS, offsetof(struct arm_smccc_1_2_regs, a4));
+ DEFINE(ARM_SMCCC_1_2_REGS_X6_OFFS, offsetof(struct arm_smccc_1_2_regs, a6));
+ DEFINE(ARM_SMCCC_1_2_REGS_X8_OFFS, offsetof(struct arm_smccc_1_2_regs, a8));
+ DEFINE(ARM_SMCCC_1_2_REGS_X10_OFFS, offsetof(struct arm_smccc_1_2_regs, a10));
+ DEFINE(ARM_SMCCC_1_2_REGS_X12_OFFS, offsetof(struct arm_smccc_1_2_regs, a12));
+ DEFINE(ARM_SMCCC_1_2_REGS_X14_OFFS, offsetof(struct arm_smccc_1_2_regs, a14));
+ DEFINE(ARM_SMCCC_1_2_REGS_X16_OFFS, offsetof(struct arm_smccc_1_2_regs, a16));
BLANK();
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
@@ -153,7 +166,9 @@ int main(void)
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia));
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
+#endif
BLANK();
#endif
return 0;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index efed2830d141..125d5c9471ac 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -76,6 +76,7 @@
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
+#include <asm/insn.h>
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
@@ -108,6 +109,24 @@ bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
/*
+ * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
+ * support it?
+ */
+static bool __read_mostly allow_mismatched_32bit_el0;
+
+/*
+ * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
+ * seen at least one CPU capable of 32-bit EL0.
+ */
+DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
+/*
+ * Mask of CPUs supporting 32-bit EL0.
+ * Only valid if arm64_mismatched_32bit_el0 is enabled.
+ */
+static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
+
+/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
* will be used to determine if a new booting CPU should
@@ -400,6 +419,11 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_gmid[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, SYS_GMID_EL1_BS_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_isar0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
@@ -617,6 +641,9 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 1, CRm = 2 */
ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
+ /* Op1 = 1, CRn = 0, CRm = 0 */
+ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
+
/* Op1 = 3, CRn = 0, CRm = 0 */
{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
@@ -767,7 +794,7 @@ static void __init sort_ftr_regs(void)
* Any bits that are not covered by an arm64_ftr_bits entry are considered
* RES0 for the system-wide value, and must strictly match.
*/
-static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
+static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
{
u64 val = 0;
u64 strict_mask = ~0x0ULL;
@@ -863,6 +890,31 @@ static void __init init_cpu_hwcaps_indirect_list(void)
static void __init setup_boot_cpu_capabilities(void);
+static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
+{
+ init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+ init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
+ init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+ init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+ init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+ init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+ init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+ init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+ init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
+ init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+ init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+ init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+ init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+ init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
+ init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
+ init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+ init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+ init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
+ init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+ init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+ init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+}
+
void __init init_cpu_features(struct cpuinfo_arm64 *info)
{
/* Before we start using the tables, make sure it is sorted */
@@ -882,35 +934,17 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
- if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
- init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
- init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
- init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
- init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
- init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
- init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
- init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
- init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
- init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
- init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
- init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
- init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
- init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
- init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
- init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
- init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
- init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
- init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
- init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
- init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
- init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
- }
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+ init_32bit_cpu_features(&info->aarch32);
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
sve_init_vq_map();
}
+ if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+ init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
+
/*
* Initialize the indirect array of CPU hwcaps capabilities pointers
* before we handle the boot CPU below.
@@ -975,21 +1009,29 @@ static void relax_cpu_ftr_reg(u32 sys_id, int field)
WARN_ON(!ftrp->width);
}
-static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
- struct cpuinfo_arm64 *boot)
+static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
+ struct cpuinfo_arm64 *boot)
+{
+ static bool boot_cpu_32bit_regs_overridden = false;
+
+ if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
+ return;
+
+ if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
+ return;
+
+ boot->aarch32 = info->aarch32;
+ init_32bit_cpu_features(&boot->aarch32);
+ boot_cpu_32bit_regs_overridden = true;
+}
+
+static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
+ struct cpuinfo_32bit *boot)
{
int taint = 0;
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
/*
- * If we don't have AArch32 at all then skip the checks entirely
- * as the register values may be UNKNOWN and we're not going to be
- * using them for anything.
- */
- if (!id_aa64pfr0_32bit_el0(pfr0))
- return taint;
-
- /*
* If we don't have AArch32 at EL1, then relax the strictness of
* EL1-dependent register fields to avoid spurious sanity check fails.
*/
@@ -1135,10 +1177,29 @@ void update_cpu_features(int cpu,
}
/*
+ * The kernel uses the LDGM/STGM instructions and the number of tags
+ * they read/write depends on the GMID_EL1.BS field. Check that the
+ * value is the same on all CPUs.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_MTE) &&
+ id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
+ taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
+ info->reg_gmid, boot->reg_gmid);
+ }
+
+ /*
+ * If we don't have AArch32 at all then skip the checks entirely
+ * as the register values may be UNKNOWN and we're not going to be
+ * using them for anything.
+ *
* This relies on a sanitised view of the AArch64 ID registers
* (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
*/
- taint |= update_32bit_cpu_features(cpu, info, boot);
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+ lazy_init_32bit_cpu_features(info, boot);
+ taint |= update_32bit_cpu_features(cpu, &info->aarch32,
+ &boot->aarch32);
+ }
/*
* Mismatched CPU features are a recipe for disaster. Don't even
@@ -1248,6 +1309,28 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
return feature_matches(val, entry);
}
+const struct cpumask *system_32bit_el0_cpumask(void)
+{
+ if (!system_supports_32bit_el0())
+ return cpu_none_mask;
+
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ return cpu_32bit_el0_mask;
+
+ return cpu_possible_mask;
+}
+
+static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ if (!has_cpuid_feature(entry, scope))
+ return allow_mismatched_32bit_el0;
+
+ if (scope == SCOPE_SYSTEM)
+ pr_info("detected: 32-bit EL0 Support\n");
+
+ return true;
+}
+
static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
{
bool has_sre;
@@ -1866,10 +1949,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_copy_el2regs,
},
{
- .desc = "32-bit EL0 Support",
- .capability = ARM64_HAS_32BIT_EL0,
+ .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .matches = has_cpuid_feature,
+ .matches = has_32bit_el0,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL0_SHIFT,
@@ -2378,7 +2460,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
{},
};
-static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
@@ -2423,7 +2505,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
return rc;
}
-static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
+static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
{
/* We support emulation of accesses to CPU ID feature registers */
cpu_set_named_feature(CPUID);
@@ -2598,7 +2680,7 @@ static void check_early_cpu_features(void)
}
static void
-verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
+__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++)
@@ -2609,6 +2691,14 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
}
}
+static void verify_local_elf_hwcaps(void)
+{
+ __verify_local_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
+ __verify_local_elf_hwcaps(compat_elf_hwcaps);
+}
+
static void verify_sve_features(void)
{
u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
@@ -2673,11 +2763,7 @@ static void verify_local_cpu_capabilities(void)
* on all secondary CPUs.
*/
verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
-
- verify_local_elf_hwcaps(arm64_elf_hwcaps);
-
- if (system_supports_32bit_el0())
- verify_local_elf_hwcaps(compat_elf_hwcaps);
+ verify_local_elf_hwcaps();
if (system_supports_sve())
verify_sve_features();
@@ -2812,6 +2898,34 @@ void __init setup_cpu_features(void)
ARCH_DMA_MINALIGN);
}
+static int enable_mismatched_32bit_el0(unsigned int cpu)
+{
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+ bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
+
+ if (cpu_32bit) {
+ cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
+ static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
+ setup_elf_hwcaps(compat_elf_hwcaps);
+ }
+
+ return 0;
+}
+
+static int __init init_32bit_el0_mask(void)
+{
+ if (!allow_mismatched_32bit_el0)
+ return 0;
+
+ if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "arm64/mismatched_32bit_el0:online",
+ enable_mismatched_32bit_el0, NULL);
+}
+subsys_initcall_sync(init_32bit_el0_mask);
+
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
{
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
@@ -2905,8 +3019,8 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
}
static struct undef_hook mrs_hook = {
- .instr_mask = 0xfff00000,
- .instr_val = 0xd5300000,
+ .instr_mask = 0xffff0000,
+ .instr_val = 0xd5380000,
.pstate_mask = PSR_AA32_MODE_MASK,
.pstate_val = PSR_MODE_EL0t,
.fn = emulate_mrs,
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 51fcf99d5351..87731fea5e41 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -246,7 +246,7 @@ static struct kobj_type cpuregs_kobj_type = {
struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \
\
if (info->reg_midr) \
- return sprintf(buf, "0x%016x\n", info->reg_##_field); \
+ return sprintf(buf, "0x%016llx\n", info->reg_##_field); \
else \
return 0; \
} \
@@ -344,6 +344,32 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
}
+static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info)
+{
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+ info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
+ info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+ info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+ info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+ info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+ info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+ info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+ info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
+ info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+ info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+ info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+ info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+ info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
+ info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
+ info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+ info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+ info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
+
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+}
+
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
@@ -371,31 +397,11 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
- /* Update the 32bit ID registers only if AArch32 is implemented */
- if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
- info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
- info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
- info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
- info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
- info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
- info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
- info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
- info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
- info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
- info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
- info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
- info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
- info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
- info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
- info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
- info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
- info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
- info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
-
- info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
- info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
- info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
- }
+ if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+ info->reg_gmid = read_cpuid(GMID_EL1);
+
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+ __cpuinfo_store_cpu_32bit(&info->aarch32);
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
id_aa64pfr0_sve(info->reg_id_aa64pfr0))
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 0073b24b5d25..61a87fa1c305 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -28,7 +28,8 @@ SYM_CODE_START(efi_enter_kernel)
* stale icache entries from before relocation.
*/
ldr w1, =kernel_size
- bl __clean_dcache_area_poc
+ add x1, x0, x1
+ bl dcache_clean_poc
ic ialluis
/*
@@ -36,8 +37,8 @@ SYM_CODE_START(efi_enter_kernel)
* so that we can safely disable the MMU and caches.
*/
adr x0, 0f
- ldr w1, 3f
- bl __clean_dcache_area_poc
+ adr x1, 3f
+ bl dcache_clean_poc
0:
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
@@ -64,5 +65,5 @@ SYM_CODE_START(efi_enter_kernel)
mov x2, xzr
mov x3, xzr
br x19
+3:
SYM_CODE_END(efi_enter_kernel)
-3: .long . - 0b
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 340d04e13617..12ce14a98b7c 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -6,7 +6,11 @@
*/
#include <linux/context_tracking.h>
+#include <linux/linkage.h>
+#include <linux/lockdep.h>
#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
@@ -15,7 +19,11 @@
#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/sdei.h>
+#include <asm/stacktrace.h>
#include <asm/sysreg.h>
+#include <asm/system_misc.h>
/*
* This is intended to match the logic in irqentry_enter(), handling the kernel
@@ -67,7 +75,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
}
}
-void noinstr arm64_enter_nmi(struct pt_regs *regs)
+static void noinstr arm64_enter_nmi(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@@ -80,7 +88,7 @@ void noinstr arm64_enter_nmi(struct pt_regs *regs)
ftrace_nmi_enter();
}
-void noinstr arm64_exit_nmi(struct pt_regs *regs)
+static void noinstr arm64_exit_nmi(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
@@ -97,7 +105,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs)
__nmi_exit();
}
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_enter_nmi(regs);
@@ -105,7 +113,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
enter_from_kernel_mode(regs);
}
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_exit_nmi(regs);
@@ -113,6 +121,65 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
+static void __sched arm64_preempt_schedule_irq(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
+ * priority masking is used the GIC irqchip driver will clear DAIF.IF
+ * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
+ * DAIF we must have handled an NMI, so skip preemption.
+ */
+ if (system_uses_irq_prio_masking() && read_sysreg(daif))
+ return;
+
+ /*
+ * Preempting a task from an IRQ means we leave copies of PSTATE
+ * on the stack. cpufeature's enable calls may modify PSTATE, but
+ * resuming one of these preempted tasks would undo those changes.
+ *
+ * Only allow a task to be preempted once cpufeatures have been
+ * enabled.
+ */
+ if (system_capabilities_finalized())
+ preempt_schedule_irq();
+}
+
+static void do_interrupt_handler(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ if (on_thread_stack())
+ call_on_irq_stack(regs, handler);
+ else
+ handler(regs);
+}
+
+extern void (*handle_arch_irq)(struct pt_regs *);
+extern void (*handle_arch_fiq)(struct pt_regs *);
+
+static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
+ unsigned int esr)
+{
+ arm64_enter_nmi(regs);
+
+ console_verbose();
+
+ pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
+ vector, smp_processor_id(), esr,
+ esr_get_class_string(esr));
+
+ __show_regs(regs);
+ panic("Unhandled exception");
+}
+
+#define UNHANDLED(el, regsize, vector) \
+asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
+{ \
+ const char *desc = #regsize "-bit " #el " " #vector; \
+ __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
+}
+
#ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -162,6 +229,11 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+UNHANDLED(el1t, 64, sync)
+UNHANDLED(el1t, 64, irq)
+UNHANDLED(el1t, 64, fiq)
+UNHANDLED(el1t, 64, error)
+
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@@ -193,15 +265,6 @@ static void noinstr el1_undef(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
-static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
-{
- enter_from_kernel_mode(regs);
- local_daif_inherit(regs);
- bad_mode(regs, 0, esr);
- local_daif_mask();
- exit_to_kernel_mode(regs);
-}
-
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@@ -245,7 +308,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs);
}
-asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -275,10 +338,50 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
el1_fpac(regs, esr);
break;
default:
- el1_inv(regs, esr);
+ __panic_unhandled(regs, "64-bit el1h sync", esr);
}
}
+static void noinstr el1_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ enter_el1_irq_or_nmi(regs);
+ do_interrupt_handler(regs, handler);
+
+ /*
+ * Note: thread_info::preempt_count includes both thread_info::count
+ * and thread_info::need_resched, and is not equivalent to
+ * preempt_count().
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION) &&
+ READ_ONCE(current_thread_info()->preempt_count) == 0)
+ arm64_preempt_schedule_irq();
+
+ exit_el1_irq_or_nmi(regs);
+}
+
+asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
+{
+ el1_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
+{
+ el1_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ local_daif_restore(DAIF_ERRCTX);
+ arm64_enter_nmi(regs);
+ do_serror(regs, esr);
+ arm64_exit_nmi(regs);
+}
+
asmlinkage void noinstr enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
@@ -398,7 +501,7 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
enter_from_user_mode();
do_debug_exception(far, esr, regs);
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
+ local_daif_restore(DAIF_PROCCTX);
}
static void noinstr el0_svc(struct pt_regs *regs)
@@ -415,7 +518,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
do_ptrauth_fault(regs, esr);
}
-asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -468,6 +571,56 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
}
}
+static void noinstr el0_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ enter_from_user_mode();
+
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ if (regs->pc & BIT(55))
+ arm64_apply_bp_hardening();
+
+ do_interrupt_handler(regs, handler);
+}
+
+static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
+{
+ el0_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
+{
+ __el0_irq_handler_common(regs);
+}
+
+static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
+{
+ el0_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
+{
+ __el0_fiq_handler_common(regs);
+}
+
+static void __el0_error_handler_common(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ enter_from_user_mode();
+ local_daif_restore(DAIF_ERRCTX);
+ arm64_enter_nmi(regs);
+ do_serror(regs, esr);
+ arm64_exit_nmi(regs);
+ local_daif_restore(DAIF_PROCCTX);
+}
+
+asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
+{
+ __el0_error_handler_common(regs);
+}
+
#ifdef CONFIG_COMPAT
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
@@ -483,7 +636,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
do_el0_svc_compat(regs);
}
-asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -526,4 +679,71 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
el0_inv(regs, esr);
}
}
+
+asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
+{
+ __el0_irq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
+{
+ __el0_fiq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
+{
+ __el0_error_handler_common(regs);
+}
+#else /* CONFIG_COMPAT */
+UNHANDLED(el0t, 32, sync)
+UNHANDLED(el0t, 32, irq)
+UNHANDLED(el0t, 32, fiq)
+UNHANDLED(el0t, 32, error)
#endif /* CONFIG_COMPAT */
+
+#ifdef CONFIG_VMAP_STACK
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned int esr = read_sysreg(esr_el1);
+ unsigned long far = read_sysreg(far_el1);
+
+ arm64_enter_nmi(regs);
+ panic_bad_stack(regs, esr, far);
+}
+#endif /* CONFIG_VMAP_STACK */
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+asmlinkage noinstr unsigned long
+__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
+{
+ unsigned long ret;
+
+ /*
+ * We didn't take an exception to get here, so the HW hasn't
+ * set/cleared bits in PSTATE that we may rely on.
+ *
+ * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
+ * whether PSTATE bits are inherited unchanged or generated from
+ * scratch, and the TF-A implementation always clears PAN and always
+ * clears UAO. There are no other known implementations.
+ *
+ * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
+ * PSTATE is modified upon architectural exceptions, and so PAN is
+ * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
+ * cleared.
+ *
+ * We must explicitly reset PAN to the expected state, including
+ * clearing it when the host isn't using it, in case a VM had it set.
+ */
+ if (system_uses_hw_pan())
+ set_pstate_pan(1);
+ else if (cpu_has_pan())
+ set_pstate_pan(0);
+
+ arm64_enter_nmi(regs);
+ ret = do_sdei_event(regs, arg);
+ arm64_exit_nmi(regs);
+
+ return ret;
+}
+#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 3ecec60d3295..0a7a64753878 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -63,16 +63,24 @@ SYM_FUNC_END(sve_set_vq)
* and the rest zeroed. All the other SVE registers will be zeroed.
*/
SYM_FUNC_START(sve_load_from_fpsimd_state)
- sve_load_vq x1, x2, x3
- fpsimd_restore x0, 8
- _for n, 0, 15, _sve_pfalse \n
- _sve_wrffr 0
- ret
+ sve_load_vq x1, x2, x3
+ fpsimd_restore x0, 8
+ sve_flush_p_ffr
+ ret
SYM_FUNC_END(sve_load_from_fpsimd_state)
-/* Zero all SVE registers but the first 128-bits of each vector */
+/*
+ * Zero all SVE registers but the first 128-bits of each vector
+ *
+ * VQ must already be configured by caller, any further updates of VQ
+ * will need to ensure that the register state remains valid.
+ *
+ * x0 = VQ - 1
+ */
SYM_FUNC_START(sve_flush_live)
- sve_flush
+ cbz x0, 1f // A VQ-1 of 0 is 128 bits so no extra Z state
+ sve_flush_z
+1: sve_flush_p_ffr
ret
SYM_FUNC_END(sve_flush_live)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 3513984a88bd..863d44f73028 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -33,12 +33,6 @@
* Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode.
*/
- .macro user_exit_irqoff
-#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
- bl enter_from_user_mode
-#endif
- .endm
-
.macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode
@@ -51,16 +45,7 @@
.endr
.endm
-/*
- * Bad Abort numbers
- *-----------------
- */
-#define BAD_SYNC 0
-#define BAD_IRQ 1
-#define BAD_FIQ 2
-#define BAD_ERROR 3
-
- .macro kernel_ventry, el, label, regsize = 64
+ .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
.align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
.if \el == 0
@@ -87,7 +72,7 @@ alternative_else_nop_endif
tbnz x0, #THREAD_SHIFT, 0f
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
- b el\()\el\()_\label
+ b el\el\ht\()_\regsize\()_\label
0:
/*
@@ -119,7 +104,7 @@ alternative_else_nop_endif
sub sp, sp, x0
mrs x0, tpidrro_el0
#endif
- b el\()\el\()_\label
+ b el\el\ht\()_\regsize\()_\label
.endm
.macro tramp_alias, dst, sym
@@ -275,7 +260,7 @@ alternative_else_nop_endif
mte_set_kernel_gcr x22, x23
- scs_load tsk, x20
+ scs_load tsk
.else
add x21, sp, #PT_REGS_SIZE
get_current_task tsk
@@ -285,7 +270,7 @@ alternative_else_nop_endif
stp lr, x21, [sp, #S_LR]
/*
- * For exceptions from EL0, create a terminal frame record.
+ * For exceptions from EL0, create a final frame record.
* For exceptions from EL1, create a synthetic frame record so the
* interrupted code shows up in the backtrace.
*/
@@ -375,7 +360,7 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
- scs_save tsk, x0
+ scs_save tsk
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -486,63 +471,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0)
SYM_CODE_END(__swpan_exit_el0)
#endif
- .macro irq_stack_entry
- mov x19, sp // preserve the original sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- mov x24, scs_sp // preserve the original shadow stack
-#endif
-
- /*
- * Compare sp with the base of the task stack.
- * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
- * and should switch to the irq stack.
- */
- ldr x25, [tsk, TSK_STACK]
- eor x25, x25, x19
- and x25, x25, #~(THREAD_SIZE - 1)
- cbnz x25, 9998f
-
- ldr_this_cpu x25, irq_stack_ptr, x26
- mov x26, #IRQ_STACK_SIZE
- add x26, x25, x26
-
- /* switch to the irq stack */
- mov sp, x26
-
-#ifdef CONFIG_SHADOW_CALL_STACK
- /* also switch to the irq shadow stack */
- ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
-#endif
-
-9998:
- .endm
-
- /*
- * The callee-saved regs (x19-x29) should be preserved between
- * irq_stack_entry and irq_stack_exit, but note that kernel_entry
- * uses x20-x23 to store data for later use.
- */
- .macro irq_stack_exit
- mov sp, x19
-#ifdef CONFIG_SHADOW_CALL_STACK
- mov scs_sp, x24
-#endif
- .endm
-
/* GPRs used by entry code */
tsk .req x28 // current thread_info
/*
* Interrupt handling.
*/
- .macro irq_handler, handler:req
- ldr_l x1, \handler
- mov x0, sp
- irq_stack_entry
- blr x1
- irq_stack_exit
- .endm
-
.macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -552,45 +486,6 @@ tsk .req x28 // current thread_info
#endif
.endm
- .macro el1_interrupt_handler, handler:req
- enable_da
-
- mov x0, sp
- bl enter_el1_irq_or_nmi
-
- irq_handler \handler
-
-#ifdef CONFIG_PREEMPTION
- ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- /*
- * DA were cleared at start of handling, and IF are cleared by
- * the GIC irqchip driver using gic_arch_enable_irqs() for
- * normal IRQs. If anything is set, it means we come back from
- * an NMI instead of a normal IRQ, so skip preemption
- */
- mrs x0, daif
- orr x24, x24, x0
-alternative_else_nop_endif
- cbnz x24, 1f // preempt count != 0 || NMI return path
- bl arm64_preempt_schedule_irq // irq en/disable is done inside
-1:
-#endif
-
- mov x0, sp
- bl exit_el1_irq_or_nmi
- .endm
-
- .macro el0_interrupt_handler, handler:req
- user_exit_irqoff
- enable_da
-
- tbz x22, #55, 1f
- bl do_el0_irq_bp_hardening
-1:
- irq_handler \handler
- .endm
-
.text
/*
@@ -600,32 +495,25 @@ alternative_else_nop_endif
.align 11
SYM_CODE_START(vectors)
- kernel_ventry 1, sync_invalid // Synchronous EL1t
- kernel_ventry 1, irq_invalid // IRQ EL1t
- kernel_ventry 1, fiq_invalid // FIQ EL1t
- kernel_ventry 1, error_invalid // Error EL1t
-
- kernel_ventry 1, sync // Synchronous EL1h
- kernel_ventry 1, irq // IRQ EL1h
- kernel_ventry 1, fiq // FIQ EL1h
- kernel_ventry 1, error // Error EL1h
-
- kernel_ventry 0, sync // Synchronous 64-bit EL0
- kernel_ventry 0, irq // IRQ 64-bit EL0
- kernel_ventry 0, fiq // FIQ 64-bit EL0
- kernel_ventry 0, error // Error 64-bit EL0
-
-#ifdef CONFIG_COMPAT
- kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
- kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
- kernel_ventry 0, fiq_compat, 32 // FIQ 32-bit EL0
- kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
-#else
- kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
- kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
- kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
- kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
-#endif
+ kernel_ventry 1, t, 64, sync // Synchronous EL1t
+ kernel_ventry 1, t, 64, irq // IRQ EL1t
+ kernel_ventry 1, t, 64, fiq // FIQ EL1h
+ kernel_ventry 1, t, 64, error // Error EL1t
+
+ kernel_ventry 1, h, 64, sync // Synchronous EL1h
+ kernel_ventry 1, h, 64, irq // IRQ EL1h
+ kernel_ventry 1, h, 64, fiq // FIQ EL1h
+ kernel_ventry 1, h, 64, error // Error EL1h
+
+ kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0
+ kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0
+ kernel_ventry 0, t, 64, error // Error 64-bit EL0
+
+ kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0
+ kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0
+ kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0
+ kernel_ventry 0, t, 32, error // Error 32-bit EL0
SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
@@ -656,147 +544,46 @@ __bad_stack:
ASM_BUG()
#endif /* CONFIG_VMAP_STACK */
-/*
- * Invalid mode handlers
- */
- .macro inv_entry, el, reason, regsize = 64
+
+ .macro entry_handler el:req, ht:req, regsize:req, label:req
+SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
kernel_entry \el, \regsize
mov x0, sp
- mov x1, #\reason
- mrs x2, esr_el1
- bl bad_mode
- ASM_BUG()
+ bl el\el\ht\()_\regsize\()_\label\()_handler
+ .if \el == 0
+ b ret_to_user
+ .else
+ b ret_to_kernel
+ .endif
+SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
.endm
-SYM_CODE_START_LOCAL(el0_sync_invalid)
- inv_entry 0, BAD_SYNC
-SYM_CODE_END(el0_sync_invalid)
-
-SYM_CODE_START_LOCAL(el0_irq_invalid)
- inv_entry 0, BAD_IRQ
-SYM_CODE_END(el0_irq_invalid)
-
-SYM_CODE_START_LOCAL(el0_fiq_invalid)
- inv_entry 0, BAD_FIQ
-SYM_CODE_END(el0_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el0_error_invalid)
- inv_entry 0, BAD_ERROR
-SYM_CODE_END(el0_error_invalid)
-
-SYM_CODE_START_LOCAL(el1_sync_invalid)
- inv_entry 1, BAD_SYNC
-SYM_CODE_END(el1_sync_invalid)
-
-SYM_CODE_START_LOCAL(el1_irq_invalid)
- inv_entry 1, BAD_IRQ
-SYM_CODE_END(el1_irq_invalid)
-
-SYM_CODE_START_LOCAL(el1_fiq_invalid)
- inv_entry 1, BAD_FIQ
-SYM_CODE_END(el1_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el1_error_invalid)
- inv_entry 1, BAD_ERROR
-SYM_CODE_END(el1_error_invalid)
-
/*
- * EL1 mode handlers.
+ * Early exception handlers
*/
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
- kernel_entry 1
- mov x0, sp
- bl el1_sync_handler
- kernel_exit 1
-SYM_CODE_END(el1_sync)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
- kernel_entry 1
- el1_interrupt_handler handle_arch_irq
- kernel_exit 1
-SYM_CODE_END(el1_irq)
-
-SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
- kernel_entry 1
- el1_interrupt_handler handle_arch_fiq
- kernel_exit 1
-SYM_CODE_END(el1_fiq)
-
-/*
- * EL0 mode handlers.
- */
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
- kernel_entry 0
- mov x0, sp
- bl el0_sync_handler
- b ret_to_user
-SYM_CODE_END(el0_sync)
-
-#ifdef CONFIG_COMPAT
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_sync_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_sync_compat)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
- kernel_entry 0, 32
- b el0_irq_naked
-SYM_CODE_END(el0_irq_compat)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
- kernel_entry 0, 32
- b el0_fiq_naked
-SYM_CODE_END(el0_fiq_compat)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
- kernel_entry 0, 32
- b el0_error_naked
-SYM_CODE_END(el0_error_compat)
-#endif
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
- kernel_entry 0
-el0_irq_naked:
- el0_interrupt_handler handle_arch_irq
- b ret_to_user
-SYM_CODE_END(el0_irq)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
- kernel_entry 0
-el0_fiq_naked:
- el0_interrupt_handler handle_arch_fiq
- b ret_to_user
-SYM_CODE_END(el0_fiq)
-
-SYM_CODE_START_LOCAL(el1_error)
- kernel_entry 1
- mrs x1, esr_el1
- enable_dbg
- mov x0, sp
- bl do_serror
+ entry_handler 1, t, 64, sync
+ entry_handler 1, t, 64, irq
+ entry_handler 1, t, 64, fiq
+ entry_handler 1, t, 64, error
+
+ entry_handler 1, h, 64, sync
+ entry_handler 1, h, 64, irq
+ entry_handler 1, h, 64, fiq
+ entry_handler 1, h, 64, error
+
+ entry_handler 0, t, 64, sync
+ entry_handler 0, t, 64, irq
+ entry_handler 0, t, 64, fiq
+ entry_handler 0, t, 64, error
+
+ entry_handler 0, t, 32, sync
+ entry_handler 0, t, 32, irq
+ entry_handler 0, t, 32, fiq
+ entry_handler 0, t, 32, error
+
+SYM_CODE_START_LOCAL(ret_to_kernel)
kernel_exit 1
-SYM_CODE_END(el1_error)
-
-SYM_CODE_START_LOCAL(el0_error)
- kernel_entry 0
-el0_error_naked:
- mrs x25, esr_el1
- user_exit_irqoff
- enable_dbg
- mov x0, sp
- mov x1, x25
- bl do_serror
- enable_da
- b ret_to_user
-SYM_CODE_END(el0_error)
+SYM_CODE_END(ret_to_kernel)
/*
* "slow" syscall return path.
@@ -979,8 +766,8 @@ SYM_FUNC_START(cpu_switch_to)
mov sp, x9
msr sp_el0, x1
ptrauth_keys_install_kernel x1, x8, x9, x10
- scs_save x0, x8
- scs_load x1, x8
+ scs_save x0
+ scs_load x1
ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
@@ -998,6 +785,42 @@ SYM_CODE_START(ret_from_fork)
SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork)
+/*
+ * void call_on_irq_stack(struct pt_regs *regs,
+ * void (*func)(struct pt_regs *));
+ *
+ * Calls func(regs) using this CPU's irq stack and shadow irq stack.
+ */
+SYM_FUNC_START(call_on_irq_stack)
+#ifdef CONFIG_SHADOW_CALL_STACK
+ stp scs_sp, xzr, [sp, #-16]!
+ ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
+#endif
+ /* Create a frame record to save our LR and SP (implicit in FP) */
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ ldr_this_cpu x16, irq_stack_ptr, x17
+ mov x15, #IRQ_STACK_SIZE
+ add x16, x16, x15
+
+ /* Move to the new stack and call the function there */
+ mov sp, x16
+ blr x1
+
+ /*
+ * Restore the SP from the FP, and restore the FP and LR from the frame
+ * record.
+ */
+ mov sp, x29
+ ldp x29, x30, [sp], #16
+#ifdef CONFIG_SHADOW_CALL_STACK
+ ldp scs_sp, xzr, [sp], #16
+#endif
+ ret
+SYM_FUNC_END(call_on_irq_stack)
+NOKPROBE(call_on_irq_stack)
+
#ifdef CONFIG_ARM_SDE_INTERFACE
#include <asm/sdei.h>
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index ad3dd34a83cf..e57b23f95284 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -957,8 +957,10 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
* disabling the trap, otherwise update our in-memory copy.
*/
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
- sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1);
- sve_flush_live();
+ unsigned long vq_minus_one =
+ sve_vq_from_vl(current->thread.sve_vl) - 1;
+ sve_set_vq(vq_minus_one);
+ sve_flush_live(vq_minus_one);
fpsimd_bind_task_to_cpu();
} else {
fpsimd_to_sve(current);
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index b5d3ddaf69d9..7f467bd9db7a 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -15,6 +15,7 @@
#include <asm/debug-monitors.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
+#include <asm/patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 96873dfa67fd..c5c994a73a64 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -16,6 +16,7 @@
#include <asm/asm_pointer_auth.h>
#include <asm/assembler.h>
#include <asm/boot.h>
+#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
@@ -117,8 +118,8 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
dmb sy // needed before dc ivac with
// MMU off
- mov x1, #0x20 // 4 x 8 bytes
- b __inval_dcache_area // tail call
+ add x1, x0, #0x20 // 4 x 8 bytes
+ b dcache_inval_poc // tail call
SYM_CODE_END(preserve_boot_args)
/*
@@ -195,7 +196,7 @@ SYM_CODE_END(preserve_boot_args)
and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
mov \istart, \ptrs
mul \istart, \istart, \count
- add \iend, \iend, \istart // iend += (count - 1) * ptrs
+ add \iend, \iend, \istart // iend += count * ptrs
// our entries span multiple tables
lsr \istart, \vstart, \shift
@@ -268,8 +269,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
*/
adrp x0, init_pg_dir
adrp x1, init_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
/*
* Clear the init page tables.
@@ -354,7 +354,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
#endif
1:
ldr_l x4, idmap_ptrs_per_pgd
- mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
@@ -382,39 +381,57 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
adrp x0, idmap_pg_dir
adrp x1, idmap_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
adrp x0, init_pg_dir
adrp x1, init_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
ret x28
SYM_FUNC_END(__create_page_tables)
+ /*
+ * Initialize CPU registers with task-specific and cpu-specific context.
+ *
+ * Create a final frame record at task_pt_regs(current)->stackframe, so
+ * that the unwinder can identify the final frame record of any task by
+ * its location in the task stack. We reserve the entire pt_regs space
+ * for consistency with user tasks and kthreads.
+ */
+ .macro init_cpu_task tsk, tmp1, tmp2
+ msr sp_el0, \tsk
+
+ ldr \tmp1, [\tsk, #TSK_STACK]
+ add sp, \tmp1, #THREAD_SIZE
+ sub sp, sp, #PT_REGS_SIZE
+
+ stp xzr, xzr, [sp, #S_STACKFRAME]
+ add x29, sp, #S_STACKFRAME
+
+ scs_load \tsk
+
+ adr_l \tmp1, __per_cpu_offset
+ ldr w\tmp2, [\tsk, #TSK_CPU]
+ ldr \tmp1, [\tmp1, \tmp2, lsl #3]
+ set_this_cpu_offset \tmp1
+ .endm
+
/*
* The following fragment of code is executed with the MMU enabled.
*
* x0 = __PHYS_OFFSET
*/
SYM_FUNC_START_LOCAL(__primary_switched)
- adrp x4, init_thread_union
- add sp, x4, #THREAD_SIZE
- adr_l x5, init_task
- msr sp_el0, x5 // Save thread_info
+ adr_l x4, init_task
+ init_cpu_task x4, x5, x6
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
- stp xzr, x30, [sp, #-16]!
+ stp x29, x30, [sp, #-16]!
mov x29, sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
-#endif
-
str_l x21, __fdt_pointer, x5 // Save FDT pointer
ldr_l x4, kimage_vaddr // Save the offset between
@@ -446,10 +463,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
0:
#endif
bl switch_to_vhe // Prefer VHE if possible
- add sp, sp, #16
- mov x29, #0
- mov x30, #0
- b start_kernel
+ ldp x29, x30, [sp], #16
+ bl start_kernel
+ ASM_BUG()
SYM_FUNC_END(__primary_switched)
.pushsection ".rodata", "a"
@@ -551,7 +567,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
-1: str w0, [x1] // This CPU has booted in EL1
+1: str w0, [x1] // Save CPU boot mode
dmb sy
dc ivac, x1 // Invalidate potentially stale cache line
ret
@@ -632,21 +648,17 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
isb
adr_l x0, secondary_data
- ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
- cbz x1, __secondary_too_slow
- mov sp, x1
ldr x2, [x0, #CPU_BOOT_TASK]
cbz x2, __secondary_too_slow
- msr sp_el0, x2
- scs_load x2, x3
- mov x29, #0
- mov x30, #0
+
+ init_cpu_task x2, x1, x3
#ifdef CONFIG_ARM64_PTR_AUTH
ptrauth_keys_init_cpu x2, x3, x4, x5
#endif
- b secondary_start_kernel
+ bl secondary_start_kernel
+ ASM_BUG()
SYM_FUNC_END(__secondary_switched)
SYM_FUNC_START_LOCAL(__secondary_too_slow)
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 8ccca660034e..81c0186a5e32 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -45,7 +45,7 @@
* Because this code has to be copied to a 'safe' page, it can't call out to
* other functions by PC-relative address. Also remember that it may be
* mid-way through over-writing other functions. For this reason it contains
- * code from flush_icache_range() and uses the copy_page() macro.
+ * code from caches_clean_inval_pou() and uses the copy_page() macro.
*
* This 'safe' page is mapped via ttbr0, and executed from there. This function
* switches to a copy of the linear map in ttbr1, performs the restore, then
@@ -87,11 +87,12 @@ SYM_CODE_START(swsusp_arch_suspend_exit)
copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
add x1, x10, #PAGE_SIZE
- /* Clean the copied page to PoU - based on flush_icache_range() */
+ /* Clean the copied page to PoU - based on caches_clean_inval_pou() */
raw_dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x10, x3
-2: dc cvau, x4 /* clean D line / unified line */
+2: /* clean D line / unified line */
+alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
add x4, x4, x2
cmp x4, x1
b.lo 2b
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index b1cef371df2b..46a0b4d6e251 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -210,7 +210,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
return -ENOMEM;
memcpy(page, src_start, length);
- __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+ caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
if (rc)
return rc;
@@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length,
return 0;
}
-#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
-
#ifdef CONFIG_ARM64_MTE
static DEFINE_XARRAY(mte_pages);
@@ -383,13 +381,18 @@ int swsusp_arch_suspend(void)
ret = swsusp_save();
} else {
/* Clean kernel core startup/idle code to PoC*/
- dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
- dcache_clean_range(__idmap_text_start, __idmap_text_end);
+ dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
+ (unsigned long)__mmuoff_data_end);
+ dcache_clean_inval_poc((unsigned long)__idmap_text_start,
+ (unsigned long)__idmap_text_end);
/* Clean kvm setup code to PoC? */
if (el2_reset_needed()) {
- dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
- dcache_clean_range(__hyp_text_start, __hyp_text_end);
+ dcache_clean_inval_poc(
+ (unsigned long)__hyp_idmap_text_start,
+ (unsigned long)__hyp_idmap_text_end);
+ dcache_clean_inval_poc((unsigned long)__hyp_text_start,
+ (unsigned long)__hyp_text_end);
}
swsusp_mte_restore_tags();
@@ -474,7 +477,8 @@ int swsusp_arch_resume(void)
* The hibernate exit text contains a set of el2 vectors, that will
* be executed at el2 with the mmu off in order to reload hyp-stub.
*/
- __flush_dcache_area(hibernate_exit, exit_size);
+ dcache_clean_inval_poc((unsigned long)hibernate_exit,
+ (unsigned long)hibernate_exit + exit_size);
/*
* KASLR will cause the el2 vectors to be in a different location in
diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c
new file mode 100644
index 000000000000..a2cfbacec2bb
--- /dev/null
+++ b/arch/arm64/kernel/idle.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level idle sequences
+ */
+
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+
+#include <asm/barrier.h>
+#include <asm/cpuidle.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (wait for interrupt).
+ *
+ * If the CPU supports priority masking we must do additional work to
+ * ensure that interrupts are not masked at the PMR (because the core will
+ * not wake up if we block the wake up signal in the interrupt controller).
+ */
+void noinstr cpu_do_idle(void)
+{
+ struct arm_cpuidle_irq_context context;
+
+ arm_cpuidle_save_irq_context(&context);
+
+ dsb(sy);
+ wfi();
+
+ arm_cpuidle_restore_irq_context(&context);
+}
+
+/*
+ * This is our default idle handler.
+ */
+void noinstr arch_cpu_idle(void)
+{
+ /*
+ * This should do all the clock switching and wait for interrupt
+ * tricks
+ */
+ cpu_do_idle();
+ raw_local_irq_enable();
+}
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index e628c8ce1ffe..53a381a7f65d 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -237,7 +237,8 @@ asmlinkage void __init init_feature_override(void)
for (i = 0; i < ARRAY_SIZE(regs); i++) {
if (regs[i]->override)
- __flush_dcache_area(regs[i]->override,
+ dcache_clean_inval_poc((unsigned long)regs[i]->override,
+ (unsigned long)regs[i]->override +
sizeof(*regs[i]->override));
}
}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index bcf3c2755370..c96a9a0043bf 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -35,7 +35,7 @@ __efistub_strnlen = __pi_strnlen;
__efistub_strcmp = __pi_strcmp;
__efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr;
-__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
+__efistub_dcache_clean_poc = __pi_dcache_clean_poc;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
__efistub___memcpy = __pi_memcpy;
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 9a8a0ae1e75f..fc98037e1220 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/insn.h>
+#include <asm/patching.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 341342b207f6..cfa2cfde3019 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void)
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
- __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+ (unsigned long)&module_alloc_base +
+ sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,
@@ -170,8 +172,12 @@ u64 __init kaslr_early_init(void)
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
- __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
- __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+ dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+ (unsigned long)&module_alloc_base +
+ sizeof(module_alloc_base));
+ dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
+ (unsigned long)&memstart_offset_seed +
+ sizeof(memstart_offset_seed));
return offset;
}
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 1a157ca33262..2aede780fb80 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -17,6 +17,7 @@
#include <asm/debug-monitors.h>
#include <asm/insn.h>
+#include <asm/patching.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 90a335c74442..03ceabe4d912 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -68,10 +68,16 @@ int machine_kexec_post_load(struct kimage *kimage)
kimage->arch.kern_reloc = __pa(reloc_code);
kexec_image_info(kimage);
- /* Flush the reloc_code in preparation for its execution. */
- __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
- flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
- arm64_relocate_new_kernel_size);
+ /*
+ * For execution with the MMU off, reloc_code needs to be cleaned to the
+ * PoC and invalidated from the I-cache.
+ */
+ dcache_clean_inval_poc((unsigned long)reloc_code,
+ (unsigned long)reloc_code +
+ arm64_relocate_new_kernel_size);
+ icache_inval_pou((uintptr_t)reloc_code,
+ (uintptr_t)reloc_code +
+ arm64_relocate_new_kernel_size);
return 0;
}
@@ -102,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage)
for (entry = &kimage->head; ; entry++) {
unsigned int flag;
- void *addr;
+ unsigned long addr;
/* flush the list entries. */
- __flush_dcache_area(entry, sizeof(kimage_entry_t));
+ dcache_clean_inval_poc((unsigned long)entry,
+ (unsigned long)entry +
+ sizeof(kimage_entry_t));
flag = *entry & IND_FLAGS;
if (flag == IND_DONE)
break;
- addr = phys_to_virt(*entry & PAGE_MASK);
+ addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK);
switch (flag) {
case IND_INDIRECTION:
@@ -120,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage)
break;
case IND_SOURCE:
/* flush the source pages. */
- __flush_dcache_area(addr, PAGE_SIZE);
+ dcache_clean_inval_poc(addr, addr + PAGE_SIZE);
break;
case IND_DESTINATION:
break;
@@ -147,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage)
kimage->segment[i].memsz,
kimage->segment[i].memsz / PAGE_SIZE);
- __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
- kimage->segment[i].memsz);
+ dcache_clean_inval_poc(
+ (unsigned long)phys_to_virt(kimage->segment[i].mem),
+ (unsigned long)phys_to_virt(kimage->segment[i].mem) +
+ kimage->segment[i].memsz);
}
}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 125a10e413e9..69b3fde8759e 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -32,10 +32,9 @@ DEFINE_STATIC_KEY_FALSE(mte_async_mode);
EXPORT_SYMBOL_GPL(mte_async_mode);
#endif
-static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
+static void mte_sync_page_tags(struct page *page, pte_t old_pte,
+ bool check_swap, bool pte_is_tagged)
{
- pte_t old_pte = READ_ONCE(*ptep);
-
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
@@ -43,6 +42,9 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
return;
}
+ if (!pte_is_tagged)
+ return;
+
page_kasan_tag_reset(page);
/*
* We need smp_wmb() in between setting the flags and clearing the
@@ -55,16 +57,22 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
mte_clear_page_tags(page_address(page));
}
-void mte_sync_tags(pte_t *ptep, pte_t pte)
+void mte_sync_tags(pte_t old_pte, pte_t pte)
{
struct page *page = pte_page(pte);
long i, nr_pages = compound_nr(page);
bool check_swap = nr_pages == 1;
+ bool pte_is_tagged = pte_tagged(pte);
+
+ /* Early out if there's nothing to do */
+ if (!check_swap && !pte_is_tagged)
+ return;
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
- mte_sync_page_tags(page, ptep, check_swap);
+ mte_sync_page_tags(page, old_pte, check_swap,
+ pte_is_tagged);
}
}
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
new file mode 100644
index 000000000000..771f543464e0
--- /dev/null
+++ b/arch/arm64/kernel/patching.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/insn.h>
+#include <asm/kprobes.h>
+#include <asm/patching.h>
+#include <asm/sections.h>
+
+static DEFINE_RAW_SPINLOCK(patch_lock);
+
+static bool is_exit_text(unsigned long addr)
+{
+ /* discarded with init text/data */
+ return system_state < SYSTEM_RUNNING &&
+ addr >= (unsigned long)__exittext_begin &&
+ addr < (unsigned long)__exittext_end;
+}
+
+static bool is_image_text(unsigned long addr)
+{
+ return core_kernel_text(addr) || is_exit_text(addr);
+}
+
+static void __kprobes *patch_map(void *addr, int fixmap)
+{
+ unsigned long uintaddr = (uintptr_t) addr;
+ bool image = is_image_text(uintaddr);
+ struct page *page;
+
+ if (image)
+ page = phys_to_page(__pa_symbol(addr));
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ page = vmalloc_to_page(addr);
+ else
+ return addr;
+
+ BUG_ON(!page);
+ return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+ (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap)
+{
+ clear_fixmap(fixmap);
+}
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ __le32 val;
+
+ ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
+{
+ void *waddr = addr;
+ unsigned long flags = 0;
+ int ret;
+
+ raw_spin_lock_irqsave(&patch_lock, flags);
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
+
+ patch_unmap(FIX_TEXT_POKE0);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ return __aarch64_insn_write(addr, cpu_to_le32(insn));
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ caches_clean_inval_pou((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The first CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == 1) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /* Notify other processors with an additional increment. */
+ atomic_inc(&pp->cpu_count);
+ } else {
+ while (atomic_read(&pp->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 88ff471b0bce..4a72c2727309 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -116,7 +116,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < entry->max_stack &&
- tail && !((unsigned long)tail & 0xf))
+ tail && !((unsigned long)tail & 0x7))
tail = user_backtrace(tail, entry);
} else {
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index f594957e29bd..d07788dad388 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -165,10 +165,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
}
#define ARMV8_EVENT_ATTR(name, config) \
- (&((struct perf_pmu_events_attr) { \
- .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \
- .id = config, \
- }).attr.attr)
+ PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
static struct attribute *armv8_pmuv3_event_attrs[] = {
ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
@@ -312,13 +309,46 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
- return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
+ return sysfs_emit(page, "0x%08x\n", slots);
}
static DEVICE_ATTR_RO(slots);
+static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
+ & ARMV8_PMU_BUS_SLOTS_MASK;
+
+ return sysfs_emit(page, "0x%08x\n", bus_slots);
+}
+
+static DEVICE_ATTR_RO(bus_slots);
+
+static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
+ & ARMV8_PMU_BUS_WIDTH_MASK;
+ u32 val = 0;
+
+ /* Encoded as Log2(number of bytes), plus one */
+ if (bus_width > 2 && bus_width < 13)
+ val = 1 << (bus_width - 1);
+
+ return sysfs_emit(page, "0x%08x\n", val);
+}
+
+static DEVICE_ATTR_RO(bus_width);
+
static struct attribute *armv8_pmuv3_caps_attrs[] = {
&dev_attr_slots.attr,
+ &dev_attr_bus_slots.attr,
+ &dev_attr_bus_width.attr,
NULL,
};
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d607c9912025..6dbcc89f6662 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -7,26 +7,28 @@
* Copyright (C) 2013 Linaro Limited.
* Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
*/
+#include <linux/extable.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
-#include <linux/extable.h>
-#include <linux/slab.h>
-#include <linux/stop_machine.h>
#include <linux/sched/debug.h>
#include <linux/set_memory.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
#include <linux/stringify.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <asm/traps.h>
-#include <asm/ptrace.h>
+
#include <asm/cacheflush.h>
-#include <asm/debug-monitors.h>
#include <asm/daifflags.h>
-#include <asm/system_misc.h>
+#include <asm/debug-monitors.h>
#include <asm/insn.h>
-#include <linux/uaccess.h>
#include <asm/irq.h>
+#include <asm/patching.h>
+#include <asm/ptrace.h>
#include <asm/sections.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
#include "decode-insn.h"
@@ -277,23 +279,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index 25f67ec59635..22d0b3252476 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -10,6 +10,7 @@
#include <linux/kprobes.h>
#include <asm/ptrace.h>
+#include <asm/traps.h>
#include "simulate-insn.h"
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 2c247634552b..9be668f3f034 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -21,7 +21,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
memcpy(dst, src, len);
/* flush caches (dcache/icache) */
- sync_icache_aliases(dst, len);
+ sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
kunmap_atomic(xol_page_kaddr);
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index b4bb67f17a2c..5ba0ed036dee 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -18,7 +18,6 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
-#include <linux/lockdep.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
@@ -46,7 +45,6 @@
#include <linux/prctl.h>
#include <asm/alternative.h>
-#include <asm/arch_gicv3.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
@@ -74,63 +72,6 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-static void noinstr __cpu_do_idle(void)
-{
- dsb(sy);
- wfi();
-}
-
-static void noinstr __cpu_do_idle_irqprio(void)
-{
- unsigned long pmr;
- unsigned long daif_bits;
-
- daif_bits = read_sysreg(daif);
- write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
-
- /*
- * Unmask PMR before going idle to make sure interrupts can
- * be raised.
- */
- pmr = gic_read_pmr();
- gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
- __cpu_do_idle();
-
- gic_write_pmr(pmr);
- write_sysreg(daif_bits, daif);
-}
-
-/*
- * cpu_do_idle()
- *
- * Idle the processor (wait for interrupt).
- *
- * If the CPU supports priority masking we must do additional work to
- * ensure that interrupts are not masked at the PMR (because the core will
- * not wake up if we block the wake up signal in the interrupt controller).
- */
-void noinstr cpu_do_idle(void)
-{
- if (system_uses_irq_prio_masking())
- __cpu_do_idle_irqprio();
- else
- __cpu_do_idle();
-}
-
-/*
- * This is our default idle handler.
- */
-void noinstr arch_cpu_idle(void)
-{
- /*
- * This should do all the clock switching and wait for interrupt
- * tricks
- */
- cpu_do_idle();
- raw_local_irq_enable();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
@@ -435,6 +376,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
+ /*
+ * For the benefit of the unwinder, set up childregs->stackframe
+ * as the final frame for the new task.
+ */
+ p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
ptrace_hw_copy_thread(p);
@@ -527,6 +473,15 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
write_sysreg(val, cntkctl_el1);
}
+static void compat_thread_switch(struct task_struct *next)
+{
+ if (!is_compat_thread(task_thread_info(next)))
+ return;
+
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
+}
+
static void update_sctlr_el1(u64 sctlr)
{
/*
@@ -568,6 +523,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
ptrauth_thread_switch_user(next);
+ compat_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -598,7 +554,7 @@ unsigned long get_wchan(struct task_struct *p)
struct stackframe frame;
unsigned long stack_page, ret = 0;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)try_get_task_stack(p);
@@ -633,8 +589,15 @@ unsigned long arch_align_stack(unsigned long sp)
*/
void arch_setup_new_exec(void)
{
- current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+ unsigned long mmflags = 0;
+
+ if (is_compat_task()) {
+ mmflags = MMCF_AARCH32;
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ }
+ current->mm->context.flags = mmflags;
ptrauth_thread_init_user();
mte_thread_init_user();
@@ -724,22 +687,6 @@ static int __init tagged_addr_init(void)
core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
-asmlinkage void __sched arm64_preempt_schedule_irq(void)
-{
- lockdep_assert_irqs_disabled();
-
- /*
- * Preempting a task from an IRQ means we leave copies of PSTATE
- * on the stack. cpufeature's enable calls may modify PSTATE, but
- * resuming one of these preempted tasks would undo those changes.
- *
- * Only allow a task to be preempted once cpufeatures have been
- * enabled.
- */
- if (system_capabilities_finalized())
- preempt_schedule_irq();
-}
-
#ifdef CONFIG_BINFMT_ELF
int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
bool has_interp, bool is_interp)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index eb2f73939b7b..499b6b2f9757 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -122,7 +122,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr, NULL);
+ on_irq_stack(addr, sizeof(unsigned long), NULL);
}
/**
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 2c7ca449dd51..47f77d1234cb 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -162,31 +162,33 @@ static int init_sdei_scs(void)
return err;
}
-static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
}
-static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
}
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
- if (on_sdei_critical_stack(sp, info))
+ if (on_sdei_critical_stack(sp, size, info))
return true;
- if (on_sdei_normal_stack(sp, info))
+ if (on_sdei_normal_stack(sp, size, info))
return true;
return false;
@@ -231,13 +233,13 @@ out_err:
}
/*
- * __sdei_handler() returns one of:
+ * do_sdei_event() returns one of:
* SDEI_EV_HANDLED - success, return to the interrupted context.
* SDEI_EV_FAILED - failure, return this error code to firmare.
* virtual-address - success, return to this address.
*/
-static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
- struct sdei_registered_event *arg)
+unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
{
u32 mode;
int i, err = 0;
@@ -292,45 +294,3 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
return vbar + 0x480;
}
-
-static void __kprobes notrace __sdei_pstate_entry(void)
-{
- /*
- * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
- * whether PSTATE bits are inherited unchanged or generated from
- * scratch, and the TF-A implementation always clears PAN and always
- * clears UAO. There are no other known implementations.
- *
- * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
- * PSTATE is modified upon architectural exceptions, and so PAN is
- * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
- * cleared.
- *
- * We must explicitly reset PAN to the expected state, including
- * clearing it when the host isn't using it, in case a VM had it set.
- */
- if (system_uses_hw_pan())
- set_pstate_pan(1);
- else if (cpu_has_pan())
- set_pstate_pan(0);
-}
-
-asmlinkage noinstr unsigned long
-__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
-{
- unsigned long ret;
-
- /*
- * We didn't take an exception to get here, so the HW hasn't
- * set/cleared bits in PSTATE that we may rely on. Initialize PAN.
- */
- __sdei_pstate_entry();
-
- arm64_enter_nmi(regs);
-
- ret = _sdei_handler(regs, arg);
-
- arm64_exit_nmi(regs);
-
- return ret;
-}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 61845c0821d9..8ed66142b088 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
set_cpu_logical_map(0, mpidr);
- /*
- * clear __my_cpu_offset on boot CPU to avoid hang caused by
- * using percpu variable early, for example, lockdep will
- * access percpu variable inside lock_release
- */
- set_my_cpu_offset(0);
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
(unsigned long)mpidr, read_cpuid_id());
}
@@ -381,7 +375,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
- init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
+ init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 6237486ff6bb..f8192f4ae0b8 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -911,6 +911,19 @@ static void do_signal(struct pt_regs *regs)
restore_saved_sigmask();
}
+static bool cpu_affinity_invalid(struct pt_regs *regs)
+{
+ if (!compat_user_mode(regs))
+ return false;
+
+ /*
+ * We're preemptible, but a reschedule will cause us to check the
+ * affinity again.
+ */
+ return !cpumask_test_cpu(raw_smp_processor_id(),
+ system_32bit_el0_cpumask());
+}
+
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_flags)
{
@@ -938,6 +951,19 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_flags & _TIF_NOTIFY_RESUME) {
tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
+
+ /*
+ * If we reschedule after checking the affinity
+ * then we must ensure that TIF_NOTIFY_RESUME
+ * is set so that we check the affinity again.
+ * Since tracehook_notify_resume() clears the
+ * flag, ensure that the compiler doesn't move
+ * it after the affinity check.
+ */
+ barrier();
+
+ if (cpu_affinity_invalid(regs))
+ force_sig(SIGKILL);
}
if (thread_flags & _TIF_FOREIGN_FPSTATE)
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index d62447964ed9..d3d37f932b97 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -7,8 +7,34 @@
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
+#include <asm/thread_info.h>
+
+/*
+ * If we have SMCCC v1.3 and (as is likely) no SVE state in
+ * the registers then set the SMCCC hint bit to say there's no
+ * need to preserve it. Do this by directly adjusting the SMCCC
+ * function value which is already stored in x0 ready to be called.
+ */
+SYM_FUNC_START(__arm_smccc_sve_check)
+
+ ldr_l x16, smccc_has_sve_hint
+ cbz x16, 2f
+
+ get_current_task x16
+ ldr x16, [x16, #TSK_TI_FLAGS]
+ tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state?
+ tbnz x16, #TIF_SVE, 2f // Does that state include SVE?
+
+1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT
+
+2: ret
+SYM_FUNC_END(__arm_smccc_sve_check)
+EXPORT_SYMBOL(__arm_smccc_sve_check)
.macro SMCCC instr
+alternative_if ARM64_SVE
+ bl __arm_smccc_sve_check
+alternative_else_nop_endif
\instr #0
ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
@@ -43,3 +69,60 @@ SYM_FUNC_START(__arm_smccc_hvc)
SMCCC hvc
SYM_FUNC_END(__arm_smccc_hvc)
EXPORT_SYMBOL(__arm_smccc_hvc)
+
+ .macro SMCCC_1_2 instr
+ /* Save `res` and free a GPR that won't be clobbered */
+ stp x1, x19, [sp, #-16]!
+
+ /* Ensure `args` won't be clobbered while loading regs in next step */
+ mov x19, x0
+
+ /* Load the registers x0 - x17 from the struct arm_smccc_1_2_regs */
+ ldp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS]
+ ldp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS]
+ ldp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS]
+ ldp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
+ ldp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS]
+ ldp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS]
+ ldp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS]
+ ldp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS]
+ ldp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS]
+
+ \instr #0
+
+ /* Load the `res` from the stack */
+ ldr x19, [sp]
+
+ /* Store the registers x0 - x17 into the result structure */
+ stp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS]
+ stp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS]
+ stp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS]
+ stp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
+ stp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS]
+ stp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS]
+ stp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS]
+ stp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS]
+ stp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS]
+
+ /* Restore original x19 */
+ ldp xzr, x19, [sp], #16
+ ret
+.endm
+
+/*
+ * void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ * struct arm_smccc_1_2_regs *res);
+ */
+SYM_FUNC_START(arm_smccc_1_2_hvc)
+ SMCCC_1_2 hvc
+SYM_FUNC_END(arm_smccc_1_2_hvc)
+EXPORT_SYMBOL(arm_smccc_1_2_hvc)
+
+/*
+ * void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ * struct arm_smccc_1_2_regs *res);
+ */
+SYM_FUNC_START(arm_smccc_1_2_smc)
+ SMCCC_1_2 smc
+SYM_FUNC_END(arm_smccc_1_2_smc)
+EXPORT_SYMBOL(arm_smccc_1_2_smc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index dcd7041b2b07..6f6ff072acbd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -120,9 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* page tables.
*/
secondary_data.task = idle;
- secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
update_cpu_boot_status(CPU_MMU_OFF);
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
/* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
@@ -142,8 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
- secondary_data.stack = NULL;
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
if (status == CPU_MMU_OFF)
status = READ_ONCE(__early_cpu_boot_status);
@@ -202,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops;
- unsigned int cpu;
-
- cpu = task_cpu(current);
- set_my_cpu_offset(per_cpu_offset(cpu));
+ unsigned int cpu = smp_processor_id();
/*
* All kernel threads share the same mm context; grab a
@@ -224,7 +217,6 @@ asmlinkage notrace void secondary_start_kernel(void)
init_gic_priority_masking();
rcu_cpu_starting(cpu);
- preempt_disable();
trace_hardirqs_off();
/*
@@ -352,7 +344,7 @@ void __cpu_die(unsigned int cpu)
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
- pr_notice("CPU%u: shutdown\n", cpu);
+ pr_debug("CPU%u: shutdown\n", cpu);
/*
* Now that the dying CPU is beyond the point of no return w.r.t.
@@ -452,6 +444,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
+ /*
+ * The runtime per-cpu areas have been allocated by
+ * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+ * freed shortly, so we must move over to the runtime per-cpu area.
+ */
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index c45a83512805..7e1624ecab3c 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -36,7 +36,7 @@ static void write_pen_release(u64 val)
unsigned long size = sizeof(secondary_holding_pen_release);
secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
+ dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
}
@@ -90,8 +90,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* the boot protocol.
*/
writeq_relaxed(pa_holding_pen, release_addr);
- __flush_dcache_area((__force void *)release_addr,
- sizeof(*release_addr));
+ dcache_clean_inval_poc((__force unsigned long)release_addr,
+ (__force unsigned long)release_addr +
+ sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index de07147a7926..b189de5ca6cb 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -68,13 +68,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
unsigned long fp = frame->fp;
struct stack_info info;
- if (fp & 0xf)
- return -EINVAL;
-
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp, &info))
+ /* Final frame; nothing to unwind */
+ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
+ return -ENOENT;
+
+ if (fp & 0x7)
+ return -EINVAL;
+
+ if (!on_accessible_stack(tsk, fp, 16, &info))
return -EINVAL;
if (test_bit(info.type, frame->stacks_done))
@@ -128,12 +132,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = ptrauth_strip_insn_pac(frame->pc);
- /*
- * This is a terminal record, so we have finished unwinding.
- */
- if (!frame->fp && !frame->pc)
- return -ENOENT;
-
return 0;
}
NOKPROBE_SYMBOL(unwind_frame);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index e3f72df9509d..938ce6fbee8a 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -7,6 +7,7 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
+#include <asm/cpuidle.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
@@ -91,6 +92,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
int ret = 0;
unsigned long flags;
struct sleep_stack_data state;
+ struct arm_cpuidle_irq_context context;
/* Report any MTE async fault before going to suspend */
mte_suspend_enter();
@@ -103,12 +105,18 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
flags = local_daif_save();
/*
- * Function graph tracer state gets incosistent when the kernel
+ * Function graph tracer state gets inconsistent when the kernel
* calls functions that never return (aka suspend finishers) hence
* disable graph tracing during their execution.
*/
pause_graph_tracing();
+ /*
+ * Switch to using DAIF.IF instead of PMR in order to reliably
+ * resume if we're using pseudo-NMIs.
+ */
+ arm_cpuidle_save_irq_context(&context);
+
if (__cpu_suspend_enter(&state)) {
/* Call the suspend finisher */
ret = fn(arg);
@@ -126,6 +134,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
RCU_NONIDLE(__cpu_suspend_exit());
}
+ arm_cpuidle_restore_irq_context(&context);
+
unpause_graph_tracing();
/*
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 265fe3eb1069..db5159a3055f 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -41,7 +41,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
dsb(ish);
}
- ret = __flush_cache_user_range(start, start + chunk);
+ ret = caches_clean_inval_user_pou(start, start + chunk);
if (ret)
return ret;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index a05d34f0e82a..b03e383d944a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/extable.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
+#include <asm/patching.h>
#include <asm/traps.h>
#include <asm/smp.h>
#include <asm/stack_pointer.h>
@@ -45,11 +46,102 @@
#include <asm/system_misc.h>
#include <asm/sysreg.h>
-static const char *handler[] = {
- "Synchronous Abort",
- "IRQ",
- "FIQ",
- "Error"
+static bool __kprobes __check_eq(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) != 0;
+}
+
+static bool __kprobes __check_ne(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) == 0;
+}
+
+static bool __kprobes __check_cs(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_cc(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_mi(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_pl(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_vs(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) != 0;
+}
+
+static bool __kprobes __check_vc(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) == 0;
+}
+
+static bool __kprobes __check_hi(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_ls(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_ge(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_lt(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_gt(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_le(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_al(unsigned long pstate)
+{
+ return true;
+}
+
+/*
+ * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
+ * it behaves identically to 0b1110 ("al").
+ */
+pstate_check_t * const aarch32_opcode_cond_checks[16] = {
+ __check_eq, __check_ne, __check_cs, __check_cc,
+ __check_mi, __check_pl, __check_vs, __check_vc,
+ __check_hi, __check_ls, __check_ge, __check_lt,
+ __check_gt, __check_le, __check_al, __check_al
};
int show_unhandled_signals = 0;
@@ -751,27 +843,8 @@ const char *esr_get_class_string(u32 esr)
}
/*
- * bad_mode handles the impossible case in the exception vector. This is always
- * fatal.
- */
-asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
-{
- arm64_enter_nmi(regs);
-
- console_verbose();
-
- pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
- handler[reason], smp_processor_id(), esr,
- esr_get_class_string(esr));
-
- __show_regs(regs);
- local_daif_mask();
- panic("bad mode");
-}
-
-/*
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
- * exceptions taken from EL0. Unlike bad_mode, this returns.
+ * exceptions taken from EL0.
*/
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
@@ -789,15 +862,11 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
-asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
- unsigned int esr = read_sysreg(esr_el1);
- unsigned long far = read_sysreg(far_el1);
-
- arm64_enter_nmi(regs);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!");
@@ -870,15 +939,11 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
}
}
-asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
+void do_serror(struct pt_regs *regs, unsigned int esr)
{
- arm64_enter_nmi(regs);
-
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
-
- arm64_exit_nmi(regs);
}
/* GENERIC_BUG traps */
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 3964acf5451e..a4eba0908bfa 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -20,8 +20,6 @@ if VIRTUALIZATION
menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on OF
- # for TASKSTATS/TASK_DELAY_ACCT:
- depends on NET && MULTIUSER
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -38,8 +36,7 @@ menuconfig KVM
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_VCPU_RUN_PID_CHANGE
- select TASKSTATS
- select TASK_DELAY_ACCT
+ select SCHED_INFO
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 589921392cb1..989bb5dad2c8 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/
kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
- $(KVM)/vfio.o $(KVM)/irqchip.o \
+ $(KVM)/vfio.o $(KVM)/irqchip.o $(KVM)/binary_stats.o \
arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o \
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 74e0699661e9..3df67c127489 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -9,6 +9,7 @@
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/uaccess.h>
#include <clocksource/arm_arch_timer.h>
@@ -973,36 +974,154 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
return 0;
}
-int kvm_timer_hyp_init(bool has_gic)
+static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
- struct arch_timer_kvm_info *info;
- int err;
+ if (vcpu)
+ irqd_set_forwarded_to_vcpu(d);
+ else
+ irqd_clr_forwarded_to_vcpu(d);
- info = arch_timer_get_kvm_info();
- timecounter = &info->timecounter;
+ return 0;
+}
- if (!timecounter->cc) {
- kvm_err("kvm_arch_timer: uninitialized timecounter\n");
- return -ENODEV;
+static int timer_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool val)
+{
+ if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
+ return irq_chip_set_parent_state(d, which, val);
+
+ if (val)
+ irq_chip_mask_parent(d);
+ else
+ irq_chip_unmask_parent(d);
+
+ return 0;
+}
+
+static void timer_irq_eoi(struct irq_data *d)
+{
+ if (!irqd_is_forwarded_to_vcpu(d))
+ irq_chip_eoi_parent(d);
+}
+
+static void timer_irq_ack(struct irq_data *d)
+{
+ d = d->parent_data;
+ if (d->chip->irq_ack)
+ d->chip->irq_ack(d);
+}
+
+static struct irq_chip timer_chip = {
+ .name = "KVM",
+ .irq_ack = timer_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = timer_irq_eoi,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity,
+ .irq_set_irqchip_state = timer_irq_set_irqchip_state,
+};
+
+static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ irq_hw_number_t hwirq = (uintptr_t)arg;
+
+ return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &timer_chip, NULL);
+}
+
+static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+}
+
+static const struct irq_domain_ops timer_domain_ops = {
+ .alloc = timer_irq_domain_alloc,
+ .free = timer_irq_domain_free,
+};
+
+static struct irq_ops arch_timer_irq_ops = {
+ .get_input_level = kvm_arch_timer_get_input_level,
+};
+
+static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
+{
+ *flags = irq_get_trigger_type(virq);
+ if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
+ kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
+ virq);
+ *flags = IRQF_TRIGGER_LOW;
}
+}
- /* First, do the virtual EL1 timer irq */
+static int kvm_irq_init(struct arch_timer_kvm_info *info)
+{
+ struct irq_domain *domain = NULL;
if (info->virtual_irq <= 0) {
kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
info->virtual_irq);
return -ENODEV;
}
+
host_vtimer_irq = info->virtual_irq;
+ kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
+
+ if (kvm_vgic_global_state.no_hw_deactivation) {
+ struct fwnode_handle *fwnode;
+ struct irq_data *data;
+
+ fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
+ if (!fwnode)
+ return -ENOMEM;
+
+ /* Assume both vtimer and ptimer in the same parent */
+ data = irq_get_irq_data(host_vtimer_irq);
+ domain = irq_domain_create_hierarchy(data->domain, 0,
+ NR_KVM_TIMERS, fwnode,
+ &timer_domain_ops, NULL);
+ if (!domain) {
+ irq_domain_free_fwnode(fwnode);
+ return -ENOMEM;
+ }
+
+ arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
+ WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
+ (void *)TIMER_VTIMER));
+ }
- host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
- if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
- host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
- kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
- host_vtimer_irq);
- host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
+ if (info->physical_irq > 0) {
+ host_ptimer_irq = info->physical_irq;
+ kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
+
+ if (domain)
+ WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
+ (void *)TIMER_PTIMER));
}
+ return 0;
+}
+
+int kvm_timer_hyp_init(bool has_gic)
+{
+ struct arch_timer_kvm_info *info;
+ int err;
+
+ info = arch_timer_get_kvm_info();
+ timecounter = &info->timecounter;
+
+ if (!timecounter->cc) {
+ kvm_err("kvm_arch_timer: uninitialized timecounter\n");
+ return -ENODEV;
+ }
+
+ err = kvm_irq_init(info);
+ if (err)
+ return err;
+
+ /* First, do the virtual EL1 timer irq */
+
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
"kvm guest vtimer", kvm_get_running_vcpus());
if (err) {
@@ -1027,15 +1146,6 @@ int kvm_timer_hyp_init(bool has_gic)
/* Now let's do the physical EL1 timer irq */
if (info->physical_irq > 0) {
- host_ptimer_irq = info->physical_irq;
- host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
- if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
- host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
- kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
- host_ptimer_irq);
- host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
- }
-
err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
"kvm guest ptimer", kvm_get_running_vcpus());
if (err) {
@@ -1143,7 +1253,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_vtimer->host_timer_irq,
map.direct_vtimer->irq.irq,
- kvm_arch_timer_get_input_level);
+ &arch_timer_irq_ops);
if (ret)
return ret;
@@ -1151,7 +1261,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_ptimer->host_timer_irq,
map.direct_ptimer->irq.irq,
- kvm_arch_timer_get_input_level);
+ &arch_timer_irq_ops);
}
if (ret)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 1cb39c0803a4..e9a2b8f27792 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -93,6 +93,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
kvm->arch.return_nisv_io_abort_to_user = true;
break;
+ case KVM_CAP_ARM_MTE:
+ if (!system_supports_mte() || kvm->created_vcpus)
+ return -EINVAL;
+ r = 0;
+ kvm->arch.mte_enabled = true;
+ break;
default:
r = -EINVAL;
break;
@@ -237,6 +243,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
+ case KVM_CAP_ARM_MTE:
+ r = system_supports_mte();
+ break;
case KVM_CAP_STEAL_TIME:
r = kvm_arm_pvtime_supported();
break;
@@ -689,9 +698,22 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
vgic_v4_load(vcpu);
preempt_enable();
}
+
+ if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
+ kvm_pmu_handle_pmcr(vcpu,
+ __vcpu_sys_reg(vcpu, PMCR_EL0));
}
}
+static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
+{
+ if (likely(!vcpu_mode_is_32bit(vcpu)))
+ return false;
+
+ return !system_supports_32bit_el0() ||
+ static_branch_unlikely(&arm64_mismatched_32bit_el0);
+}
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -720,11 +742,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
return ret;
}
- if (run->immediate_exit)
- return -EINTR;
-
vcpu_load(vcpu);
+ if (run->immediate_exit) {
+ ret = -EINTR;
+ goto out;
+ }
+
kvm_sigset_activate(vcpu);
ret = 1;
@@ -875,7 +899,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* with the asymmetric AArch32 case), return to userspace with
* a fatal error.
*/
- if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
+ if (vcpu_mode_is_bad_32bit(vcpu)) {
/*
* As we have caught the guest red-handed, decide that
* it isn't fit for purpose anymore by making the vcpu
@@ -897,6 +921,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_sigset_deactivate(vcpu);
+out:
+ /*
+ * In the unlikely event that we are returning to userspace
+ * with pending exceptions or PC adjustment, commit these
+ * adjustments in order to give userspace a consistent view of
+ * the vcpu state. Note that this relies on __kvm_adjust_pc()
+ * being preempt-safe on VHE.
+ */
+ if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_INCREMENT_PC)))
+ kvm_call_hyp(__kvm_adjust_pc, vcpu);
+
vcpu_put(vcpu);
return ret;
}
@@ -1064,7 +1100,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm);
else
- __flush_icache_all();
+ icache_inval_all_pou();
}
vcpu_reset_hcr(vcpu);
@@ -1336,6 +1372,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
return 0;
}
+ case KVM_ARM_MTE_COPY_TAGS: {
+ struct kvm_arm_copy_mte_tags copy_tags;
+
+ if (copy_from_user(&copy_tags, argp, sizeof(copy_tags)))
+ return -EFAULT;
+ return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
+ }
default:
return -EINVAL;
}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 5cb4a1cd5603..1dfb83578277 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -28,20 +28,40 @@
#include "trace.h"
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
- VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
- VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
- VCPU_STAT("mmio_exit_user", mmio_exit_user),
- VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
- VCPU_STAT("exits", exits),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_user),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
+ STATS_DESC_COUNTER(VCPU, exits)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
static bool core_reg_offset_is_vreg(u64 off)
@@ -995,3 +1015,89 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
return ret;
}
+
+long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ struct kvm_arm_copy_mte_tags *copy_tags)
+{
+ gpa_t guest_ipa = copy_tags->guest_ipa;
+ size_t length = copy_tags->length;
+ void __user *tags = copy_tags->addr;
+ gpa_t gfn;
+ bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
+ int ret = 0;
+
+ if (!kvm_has_mte(kvm))
+ return -EINVAL;
+
+ if (copy_tags->reserved[0] || copy_tags->reserved[1])
+ return -EINVAL;
+
+ if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
+ return -EINVAL;
+
+ if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
+ return -EINVAL;
+
+ gfn = gpa_to_gfn(guest_ipa);
+
+ mutex_lock(&kvm->slots_lock);
+
+ while (length > 0) {
+ kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
+ void *maddr;
+ unsigned long num_tags;
+ struct page *page;
+
+ if (is_error_noslot_pfn(pfn)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ page = pfn_to_online_page(pfn);
+ if (!page) {
+ /* Reject ZONE_DEVICE memory */
+ ret = -EFAULT;
+ goto out;
+ }
+ maddr = page_address(page);
+
+ if (!write) {
+ if (test_bit(PG_mte_tagged, &page->flags))
+ num_tags = mte_copy_tags_to_user(tags, maddr,
+ MTE_GRANULES_PER_PAGE);
+ else
+ /* No tags in memory, so write zeros */
+ num_tags = MTE_GRANULES_PER_PAGE -
+ clear_user(tags, MTE_GRANULES_PER_PAGE);
+ kvm_release_pfn_clean(pfn);
+ } else {
+ num_tags = mte_copy_tags_from_user(maddr, tags,
+ MTE_GRANULES_PER_PAGE);
+
+ /*
+ * Set the flag after checking the write
+ * completed fully
+ */
+ if (num_tags == MTE_GRANULES_PER_PAGE)
+ set_bit(PG_mte_tagged, &page->flags);
+
+ kvm_release_pfn_dirty(pfn);
+ }
+
+ if (num_tags != MTE_GRANULES_PER_PAGE) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ gfn++;
+ tags += num_tags;
+ length -= PAGE_SIZE;
+ }
+
+out:
+ mutex_unlock(&kvm->slots_lock);
+ /* If some data has been copied report the number of bytes copied */
+ if (length != copy_tags->length)
+ return copy_tags->length - length;
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index e831d3dfd50d..435346ea1504 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -13,6 +13,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_mte.h>
#include <asm/kvm_ptrauth.h>
.text
@@ -51,6 +52,9 @@ alternative_else_nop_endif
add x29, x0, #VCPU_CONTEXT
+ // mte_switch_to_guest(g_ctxt, h_ctxt, tmp1)
+ mte_switch_to_guest x29, x1, x2
+
// Macro ptrauth_switch_to_guest format:
// ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
// The below macro to restore guest keys is not implemented in C code
@@ -142,6 +146,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// when this feature is enabled for kernel code.
ptrauth_switch_to_hyp x1, x2, x3, x4, x5
+ // mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
+ mte_switch_to_hyp x1, x2, x3
+
// Restore hyp's sp_el0
restore_sp_el0 x2, x3
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 73629094f903..0418399e0a20 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -112,7 +112,8 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
new |= (old & PSR_C_BIT);
new |= (old & PSR_V_BIT);
- // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
+ if (kvm_has_mte(vcpu->kvm))
+ new |= PSR_TCO_BIT;
new |= (old & PSR_DIT_BIT);
@@ -296,7 +297,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
*vcpu_pc(vcpu) = vect_offset;
}
-void kvm_inject_exception(struct kvm_vcpu *vcpu)
+static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu)) {
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
@@ -329,3 +330,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
}
}
}
+
+/*
+ * Adjust the guest PC (and potentially exception state) depending on
+ * flags provided by the emulation code.
+ */
+void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+ kvm_inject_exception(vcpu);
+ vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_EXCEPT_MASK);
+ } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+ kvm_skip_instr(vcpu);
+ vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+ }
+}
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 5f49df4ffdd8..9aa9b73475c9 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -76,6 +76,7 @@ el1_trap:
b __guest_exit
el1_irq:
+el1_fiq:
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IRQ
b __guest_exit
@@ -131,7 +132,6 @@ SYM_CODE_END(\label)
invalid_vector el2t_error_invalid
invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid
- invalid_vector el1_fiq_invalid
.ltorg
@@ -179,12 +179,12 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_sync // Synchronous 64-bit EL1
valid_vect el1_irq // IRQ 64-bit EL1
- invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
+ valid_vect el1_fiq // FIQ 64-bit EL1
valid_vect el1_error // Error 64-bit EL1
valid_vect el1_sync // Synchronous 32-bit EL1
valid_vect el1_irq // IRQ 32-bit EL1
- invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
+ valid_vect el1_fiq // FIQ 32-bit EL1
valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index 61716359035d..4fdfeabefeb4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -13,8 +13,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
-void kvm_inject_exception(struct kvm_vcpu *vcpu);
-
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
@@ -44,22 +42,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
}
/*
- * Adjust the guest PC on entry, depending on flags provided by EL1
- * for the purpose of emulation (MMIO, sysreg) or exception injection.
- */
-static inline void __adjust_pc(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
- kvm_inject_exception(vcpu);
- vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
- KVM_ARM64_EXCEPT_MASK);
- } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
- kvm_skip_instr(vcpu);
- vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
- }
-}
-
-/*
* Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit.
*/
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index cce43bfe158f..de7e14c862e6 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -14,6 +14,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
@@ -26,6 +27,16 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
}
+static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
+
+ if (!vcpu)
+ vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+
+ return kvm_has_mte(kern_hyp_va(vcpu->kvm));
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, CSSELR_EL1) = read_sysreg(csselr_el1);
@@ -46,6 +57,11 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
+ if (ctxt_has_mte(ctxt)) {
+ ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR);
+ ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1);
+ }
+
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
@@ -107,6 +123,11 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
+ if (ctxt_has_mte(ctxt)) {
+ write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR);
+ write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1);
+ }
+
if (!has_vhe() &&
cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
ctxt->__hyp_running_vcpu) {
diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
index 18a4494337bd..fb0f523d1492 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
@@ -7,7 +7,7 @@
#include <nvhe/memory.h>
#include <nvhe/spinlock.h>
-#define HYP_NO_ORDER UINT_MAX
+#define HYP_NO_ORDER USHRT_MAX
struct hyp_pool {
/*
@@ -19,48 +19,13 @@ struct hyp_pool {
struct list_head free_area[MAX_ORDER];
phys_addr_t range_start;
phys_addr_t range_end;
- unsigned int max_order;
+ unsigned short max_order;
};
-static inline void hyp_page_ref_inc(struct hyp_page *p)
-{
- struct hyp_pool *pool = hyp_page_to_pool(p);
-
- hyp_spin_lock(&pool->lock);
- p->refcount++;
- hyp_spin_unlock(&pool->lock);
-}
-
-static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
-{
- struct hyp_pool *pool = hyp_page_to_pool(p);
- int ret;
-
- hyp_spin_lock(&pool->lock);
- p->refcount--;
- ret = (p->refcount == 0);
- hyp_spin_unlock(&pool->lock);
-
- return ret;
-}
-
-static inline void hyp_set_page_refcounted(struct hyp_page *p)
-{
- struct hyp_pool *pool = hyp_page_to_pool(p);
-
- hyp_spin_lock(&pool->lock);
- if (p->refcount) {
- hyp_spin_unlock(&pool->lock);
- BUG();
- }
- p->refcount = 1;
- hyp_spin_unlock(&pool->lock);
-}
-
/* Allocation */
-void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
-void hyp_get_page(void *addr);
-void hyp_put_page(void *addr);
+void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
+void hyp_get_page(struct hyp_pool *pool, void *addr);
+void hyp_put_page(struct hyp_pool *pool, void *addr);
/* Used pages cannot be freed */
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 42d81ec739fa..9c227d87c36d 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -23,7 +23,7 @@ extern struct host_kvm host_kvm;
int __pkvm_prot_finalize(void);
int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
-int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
+int kvm_host_prepare_stage2(void *pgt_pool_base);
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
static __always_inline void __load_host_stage2(void)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index fd78bde939ee..592b7edb3edb 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -7,12 +7,9 @@
#include <linux/types.h>
-struct hyp_pool;
struct hyp_page {
- unsigned int refcount;
- unsigned int order;
- struct hyp_pool *pool;
- struct list_head node;
+ unsigned short refcount;
+ unsigned short order;
};
extern u64 __hyp_vmemmap;
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index 0095f6289742..8ec3a5a7744b 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -78,19 +78,20 @@ static inline unsigned long hyp_s1_pgtable_pages(void)
return res;
}
-static inline unsigned long host_s2_mem_pgtable_pages(void)
+static inline unsigned long host_s2_pgtable_pages(void)
{
+ unsigned long res;
+
/*
* Include an extra 16 pages to safely upper-bound the worst case of
* concatenated pgds.
*/
- return __hyp_pgtable_total_pages() + 16;
-}
+ res = __hyp_pgtable_total_pages() + 16;
-static inline unsigned long host_s2_dev_pgtable_pages(void)
-{
/* Allow 1 GiB for MMIO mappings */
- return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+ res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+
+ return res;
}
#endif /* __KVM_HYP_MM_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S
index 36cef6915428..958734f4d6b0 100644
--- a/arch/arm64/kvm/hyp/nvhe/cache.S
+++ b/arch/arm64/kvm/hyp/nvhe/cache.S
@@ -7,7 +7,7 @@
#include <asm/assembler.h>
#include <asm/alternative.h>
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f36420a80474..1632f001f4ed 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
}
+static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
+ __kvm_adjust_pc(kern_hyp_va(vcpu));
+}
+
static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
{
__kvm_flush_vm_context();
@@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
+ HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index e342f7f4f4fb..d938ce95d3bd 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -23,8 +23,7 @@
extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm;
-struct hyp_pool host_s2_mem;
-struct hyp_pool host_s2_dev;
+static struct hyp_pool host_s2_pool;
/*
* Copies of the host's CPU features registers holding sanitized values.
@@ -36,7 +35,7 @@ static const u8 pkvm_hyp_id = 1;
static void *host_s2_zalloc_pages_exact(size_t size)
{
- return hyp_alloc_pages(&host_s2_mem, get_order(size));
+ return hyp_alloc_pages(&host_s2_pool, get_order(size));
}
static void *host_s2_zalloc_page(void *pool)
@@ -44,20 +43,24 @@ static void *host_s2_zalloc_page(void *pool)
return hyp_alloc_pages(pool, 0);
}
-static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
+static void host_s2_get_page(void *addr)
+{
+ hyp_get_page(&host_s2_pool, addr);
+}
+
+static void host_s2_put_page(void *addr)
+{
+ hyp_put_page(&host_s2_pool, addr);
+}
+
+static int prepare_s2_pool(void *pgt_pool_base)
{
unsigned long nr_pages, pfn;
int ret;
- pfn = hyp_virt_to_pfn(mem_pgt_pool);
- nr_pages = host_s2_mem_pgtable_pages();
- ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
- if (ret)
- return ret;
-
- pfn = hyp_virt_to_pfn(dev_pgt_pool);
- nr_pages = host_s2_dev_pgtable_pages();
- ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
+ pfn = hyp_virt_to_pfn(pgt_pool_base);
+ nr_pages = host_s2_pgtable_pages();
+ ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
if (ret)
return ret;
@@ -67,8 +70,8 @@ static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
.page_count = hyp_page_count,
- .get_page = hyp_get_page,
- .put_page = hyp_put_page,
+ .get_page = host_s2_get_page,
+ .put_page = host_s2_put_page,
};
return 0;
@@ -86,7 +89,7 @@ static void prepare_host_vtcr(void)
id_aa64mmfr1_el1_sys_val, phys_shift);
}
-int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
+int kvm_host_prepare_stage2(void *pgt_pool_base)
{
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
int ret;
@@ -94,7 +97,7 @@ int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
prepare_host_vtcr();
hyp_spin_lock_init(&host_kvm.lock);
- ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
+ ret = prepare_s2_pool(pgt_pool_base);
if (ret)
return ret;
@@ -199,11 +202,10 @@ static bool range_is_memory(u64 start, u64 end)
}
static inline int __host_stage2_idmap(u64 start, u64 end,
- enum kvm_pgtable_prot prot,
- struct hyp_pool *pool)
+ enum kvm_pgtable_prot prot)
{
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
- prot, pool);
+ prot, &host_s2_pool);
}
static int host_stage2_idmap(u64 addr)
@@ -211,7 +213,6 @@ static int host_stage2_idmap(u64 addr)
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
struct kvm_mem_range range;
bool is_memory = find_mem_range(addr, &range);
- struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
int ret;
if (is_memory)
@@ -222,22 +223,21 @@ static int host_stage2_idmap(u64 addr)
if (ret)
goto unlock;
- ret = __host_stage2_idmap(range.start, range.end, prot, pool);
- if (is_memory || ret != -ENOMEM)
+ ret = __host_stage2_idmap(range.start, range.end, prot);
+ if (ret != -ENOMEM)
goto unlock;
/*
- * host_s2_mem has been provided with enough pages to cover all of
- * memory with page granularity, so we should never hit the ENOMEM case.
- * However, it is difficult to know how much of the MMIO range we will
- * need to cover upfront, so we may need to 'recycle' the pages if we
- * run out.
+ * The pool has been provided with enough pages to cover all of memory
+ * with page granularity, but it is difficult to know how much of the
+ * MMIO range we will need to cover upfront, so we may need to 'recycle'
+ * the pages if we run out.
*/
ret = host_stage2_unmap_dev_all();
if (ret)
goto unlock;
- ret = __host_stage2_idmap(range.start, range.end, prot, pool);
+ ret = __host_stage2_idmap(range.start, range.end, prot);
unlock:
hyp_spin_unlock(&host_kvm.lock);
@@ -258,7 +258,7 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
hyp_spin_lock(&host_kvm.lock);
ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
- &host_s2_mem, pkvm_hyp_id);
+ &host_s2_pool, pkvm_hyp_id);
hyp_spin_unlock(&host_kvm.lock);
return ret != -EAGAIN ? ret : 0;
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index 237e03bf0cb1..41fc25bdfb34 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -32,7 +32,7 @@ u64 __hyp_vmemmap;
*/
static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned int order)
+ unsigned short order)
{
phys_addr_t addr = hyp_page_to_phys(p);
@@ -51,21 +51,49 @@ static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
/* Find a buddy page currently available for allocation */
static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned int order)
+ unsigned short order)
{
struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
- if (!buddy || buddy->order != order || list_empty(&buddy->node))
+ if (!buddy || buddy->order != order || buddy->refcount)
return NULL;
return buddy;
}
+/*
+ * Pages that are available for allocation are tracked in free-lists, so we use
+ * the pages themselves to store the list nodes to avoid wasting space. As the
+ * allocator always returns zeroed pages (which are zeroed on the hyp_put_page()
+ * path to optimize allocation speed), we also need to clean-up the list node in
+ * each page when we take it out of the list.
+ */
+static inline void page_remove_from_list(struct hyp_page *p)
+{
+ struct list_head *node = hyp_page_to_virt(p);
+
+ __list_del_entry(node);
+ memset(node, 0, sizeof(*node));
+}
+
+static inline void page_add_to_list(struct hyp_page *p, struct list_head *head)
+{
+ struct list_head *node = hyp_page_to_virt(p);
+
+ INIT_LIST_HEAD(node);
+ list_add_tail(node, head);
+}
+
+static inline struct hyp_page *node_to_page(struct list_head *node)
+{
+ return hyp_virt_to_page(node);
+}
+
static void __hyp_attach_page(struct hyp_pool *pool,
struct hyp_page *p)
{
- unsigned int order = p->order;
+ unsigned short order = p->order;
struct hyp_page *buddy;
memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
@@ -83,32 +111,23 @@ static void __hyp_attach_page(struct hyp_pool *pool,
break;
/* Take the buddy out of its list, and coallesce with @p */
- list_del_init(&buddy->node);
+ page_remove_from_list(buddy);
buddy->order = HYP_NO_ORDER;
p = min(p, buddy);
}
/* Mark the new head, and insert it */
p->order = order;
- list_add_tail(&p->node, &pool->free_area[order]);
-}
-
-static void hyp_attach_page(struct hyp_page *p)
-{
- struct hyp_pool *pool = hyp_page_to_pool(p);
-
- hyp_spin_lock(&pool->lock);
- __hyp_attach_page(pool, p);
- hyp_spin_unlock(&pool->lock);
+ page_add_to_list(p, &pool->free_area[order]);
}
static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned int order)
+ unsigned short order)
{
struct hyp_page *buddy;
- list_del_init(&p->node);
+ page_remove_from_list(p);
while (p->order > order) {
/*
* The buddy of order n - 1 currently has HYP_NO_ORDER as it
@@ -119,30 +138,64 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
p->order--;
buddy = __find_buddy_nocheck(pool, p, p->order);
buddy->order = p->order;
- list_add_tail(&buddy->node, &pool->free_area[buddy->order]);
+ page_add_to_list(buddy, &pool->free_area[buddy->order]);
}
return p;
}
-void hyp_put_page(void *addr)
+static inline void hyp_page_ref_inc(struct hyp_page *p)
{
- struct hyp_page *p = hyp_virt_to_page(addr);
+ BUG_ON(p->refcount == USHRT_MAX);
+ p->refcount++;
+}
+static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
+{
+ p->refcount--;
+ return (p->refcount == 0);
+}
+
+static inline void hyp_set_page_refcounted(struct hyp_page *p)
+{
+ BUG_ON(p->refcount);
+ p->refcount = 1;
+}
+
+static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
+{
if (hyp_page_ref_dec_and_test(p))
- hyp_attach_page(p);
+ __hyp_attach_page(pool, p);
+}
+
+/*
+ * Changes to the buddy tree and page refcounts must be done with the hyp_pool
+ * lock held. If a refcount change requires an update to the buddy tree (e.g.
+ * hyp_put_page()), both operations must be done within the same critical
+ * section to guarantee transient states (e.g. a page with null refcount but
+ * not yet attached to a free list) can't be observed by well-behaved readers.
+ */
+void hyp_put_page(struct hyp_pool *pool, void *addr)
+{
+ struct hyp_page *p = hyp_virt_to_page(addr);
+
+ hyp_spin_lock(&pool->lock);
+ __hyp_put_page(pool, p);
+ hyp_spin_unlock(&pool->lock);
}
-void hyp_get_page(void *addr)
+void hyp_get_page(struct hyp_pool *pool, void *addr)
{
struct hyp_page *p = hyp_virt_to_page(addr);
+ hyp_spin_lock(&pool->lock);
hyp_page_ref_inc(p);
+ hyp_spin_unlock(&pool->lock);
}
-void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
+void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
{
- unsigned int i = order;
+ unsigned short i = order;
struct hyp_page *p;
hyp_spin_lock(&pool->lock);
@@ -156,11 +209,11 @@ void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
}
/* Extract it from the tree at the right order */
- p = list_first_entry(&pool->free_area[i], struct hyp_page, node);
+ p = node_to_page(pool->free_area[i].next);
p = __hyp_extract_page(pool, p, order);
- hyp_spin_unlock(&pool->lock);
hyp_set_page_refcounted(p);
+ hyp_spin_unlock(&pool->lock);
return hyp_page_to_virt(p);
}
@@ -181,15 +234,14 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
/* Init the vmemmap portion */
p = hyp_phys_to_page(phys);
- memset(p, 0, sizeof(*p) * nr_pages);
for (i = 0; i < nr_pages; i++) {
- p[i].pool = pool;
- INIT_LIST_HEAD(&p[i].node);
+ p[i].order = 0;
+ hyp_set_page_refcounted(&p[i]);
}
/* Attach the unused pages to the buddy tree */
for (i = reserved_pages; i < nr_pages; i++)
- __hyp_attach_page(pool, &p[i]);
+ __hyp_put_page(pool, &p[i]);
return 0;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 7488f53b0aa2..0b574d106519 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -17,7 +17,6 @@
#include <nvhe/trap_handler.h>
struct hyp_pool hpool;
-struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
@@ -25,8 +24,8 @@ unsigned long hyp_nr_cpus;
static void *vmemmap_base;
static void *hyp_pgt_base;
-static void *host_s2_mem_pgt_base;
-static void *host_s2_dev_pgt_base;
+static void *host_s2_pgt_base;
+static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static int divide_memory_pool(void *virt, unsigned long size)
{
@@ -45,14 +44,9 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!hyp_pgt_base)
return -ENOMEM;
- nr_pages = host_s2_mem_pgtable_pages();
- host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages);
- if (!host_s2_mem_pgt_base)
- return -ENOMEM;
-
- nr_pages = host_s2_dev_pgtable_pages();
- host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages);
- if (!host_s2_dev_pgt_base)
+ nr_pages = host_s2_pgtable_pages();
+ host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
+ if (!host_s2_pgt_base)
return -ENOMEM;
return 0;
@@ -134,7 +128,8 @@ static void update_nvhe_init_params(void)
for (i = 0; i < hyp_nr_cpus; i++) {
params = per_cpu_ptr(&kvm_init_params, i);
params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
- __flush_dcache_area(params, sizeof(*params));
+ dcache_clean_inval_poc((unsigned long)params,
+ (unsigned long)params + sizeof(*params));
}
}
@@ -143,6 +138,16 @@ static void *hyp_zalloc_hyp_page(void *arg)
return hyp_alloc_pages(&hpool, 0);
}
+static void hpool_get_page(void *addr)
+{
+ hyp_get_page(&hpool, addr);
+}
+
+static void hpool_put_page(void *addr)
+{
+ hyp_put_page(&hpool, addr);
+}
+
void __noreturn __pkvm_init_finalise(void)
{
struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
@@ -158,7 +163,7 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
- ret = kvm_host_prepare_stage2(host_s2_mem_pgt_base, host_s2_dev_pgt_base);
+ ret = kvm_host_prepare_stage2(host_s2_pgt_base);
if (ret)
goto out;
@@ -166,8 +171,8 @@ void __noreturn __pkvm_init_finalise(void)
.zalloc_page = hyp_zalloc_hyp_page,
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
- .get_page = hyp_get_page,
- .put_page = hyp_put_page,
+ .get_page = hpool_get_page,
+ .put_page = hpool_put_page,
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index e9f6ea704d07..f7af9688c1f7 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
-#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <hyp/sysreg-sr.h>
@@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
__debug_save_host_buffers_nvhe(vcpu);
- __adjust_pc(vcpu);
+ __kvm_adjust_pc(vcpu);
/*
* We must restore the 32-bit state before the sysregs, thanks
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 83dc3b271bc5..38ed0f6f2703 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -104,7 +104,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
* you should be running with VHE enabled.
*/
if (icache_is_vpipt())
- __flush_icache_all();
+ icache_inval_all_pou();
__tlb_switch_to_host(&cxt);
}
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index c37c1dc4feaf..05321f4165e3 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -577,12 +577,24 @@ static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
mm_ops->put_page(ptep);
}
+static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
+{
+ u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
+ return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
+}
+
+static bool stage2_pte_executable(kvm_pte_t pte)
+{
+ return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
+}
+
static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
struct stage2_map_data *data)
{
kvm_pte_t new, old = *ptep;
u64 granule = kvm_granule_size(level), phys = data->phys;
+ struct kvm_pgtable *pgt = data->mmu->pgt;
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_block_mapping_supported(addr, end, phys, level))
@@ -606,6 +618,14 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
}
+ /* Perform CMOs before installation of the guest stage-2 PTE */
+ if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
+ granule);
+
+ if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
+ mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
+
smp_store_release(ptep, new);
if (stage2_pte_is_counted(new))
mm_ops->get_page(ptep);
@@ -798,12 +818,6 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
return ret;
}
-static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
-{
- u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
- return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
-}
-
static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
@@ -839,8 +853,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
stage2_put_pte(ptep, mmu, addr, level, mm_ops);
if (need_flush) {
- __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
+ kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
+
+ dcache_clean_inval_poc((unsigned long)pte_follow,
+ (unsigned long)pte_follow +
+ kvm_granule_size(level));
}
if (childp)
@@ -861,10 +878,11 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
}
struct stage2_attr_data {
- kvm_pte_t attr_set;
- kvm_pte_t attr_clr;
- kvm_pte_t pte;
- u32 level;
+ kvm_pte_t attr_set;
+ kvm_pte_t attr_clr;
+ kvm_pte_t pte;
+ u32 level;
+ struct kvm_pgtable_mm_ops *mm_ops;
};
static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
@@ -873,6 +891,7 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
{
kvm_pte_t pte = *ptep;
struct stage2_attr_data *data = arg;
+ struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_pte_valid(pte))
return 0;
@@ -887,8 +906,17 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* but worst-case the access flag update gets lost and will be
* set on the next access instead.
*/
- if (data->pte != pte)
+ if (data->pte != pte) {
+ /*
+ * Invalidate instruction cache before updating the guest
+ * stage-2 PTE if we are going to add executable permission.
+ */
+ if (mm_ops->icache_inval_pou &&
+ stage2_pte_executable(pte) && !stage2_pte_executable(*ptep))
+ mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
+ kvm_granule_size(level));
WRITE_ONCE(*ptep, pte);
+ }
return 0;
}
@@ -903,6 +931,7 @@ static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
struct stage2_attr_data data = {
.attr_set = attr_set & attr_mask,
.attr_clr = attr_clr & attr_mask,
+ .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_attr_walker,
@@ -988,11 +1017,15 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
struct kvm_pgtable *pgt = arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep;
+ kvm_pte_t *pte_follow;
if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0;
- __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
+ pte_follow = kvm_pte_follow(pte, mm_ops);
+ dcache_clean_inval_poc((unsigned long)pte_follow,
+ (unsigned long)pte_follow +
+ kvm_granule_size(level));
return 0;
}
diff --git a/arch/arm64/kvm/hyp/reserved_mem.c b/arch/arm64/kvm/hyp/reserved_mem.c
index 83ca23ac259b..d654921dd09b 100644
--- a/arch/arm64/kvm/hyp/reserved_mem.c
+++ b/arch/arm64/kvm/hyp/reserved_mem.c
@@ -71,8 +71,7 @@ void __init kvm_hyp_reserve(void)
}
hyp_mem_pages += hyp_s1_pgtable_pages();
- hyp_mem_pages += host_s2_mem_pgtable_pages();
- hyp_mem_pages += host_s2_dev_pgtable_pages();
+ hyp_mem_pages += host_s2_pgtable_pages();
/*
* The hyp_vmemmap needs to be backed by pages, but these pages
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 7b8f7db5c1ed..b3229924d243 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
-#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <linux/arm-smccc.h>
@@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__load_guest_stage2(vcpu->arch.hw_mmu);
__activate_traps(vcpu);
- __adjust_pc(vcpu);
+ __kvm_adjust_pc(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c5d1f3c87dbd..f23dfa06433b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -126,6 +126,16 @@ static void *kvm_host_va(phys_addr_t phys)
return __va(phys);
}
+static void clean_dcache_guest_page(void *va, size_t size)
+{
+ __clean_dcache_guest_page(va, size);
+}
+
+static void invalidate_icache_guest_page(void *va, size_t size)
+{
+ __invalidate_icache_guest_page(va, size);
+}
+
/*
* Unmapping vs dcache management:
*
@@ -432,6 +442,8 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.page_count = kvm_host_page_count,
.phys_to_virt = kvm_host_va,
.virt_to_phys = kvm_host_pa,
+ .dcache_clean_inval_poc = clean_dcache_guest_page,
+ .icache_inval_pou = invalidate_icache_guest_page,
};
/**
@@ -693,16 +705,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
-static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
- __clean_dcache_guest_page(pfn, size);
-}
-
-static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
- __invalidate_icache_guest_page(pfn, size);
-}
-
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
{
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
@@ -822,6 +824,74 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
return PAGE_SIZE;
}
+static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
+{
+ unsigned long pa;
+
+ if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
+ return huge_page_shift(hstate_vma(vma));
+
+ if (!(vma->vm_flags & VM_PFNMAP))
+ return PAGE_SHIFT;
+
+ VM_BUG_ON(is_vm_hugetlb_page(vma));
+
+ pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
+ ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
+ ALIGN(hva, PUD_SIZE) <= vma->vm_end)
+ return PUD_SHIFT;
+#endif
+
+ if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
+ ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
+ ALIGN(hva, PMD_SIZE) <= vma->vm_end)
+ return PMD_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+/*
+ * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
+ * able to see the page's tags and therefore they must be initialised first. If
+ * PG_mte_tagged is set, tags have already been initialised.
+ *
+ * The race in the test/set of the PG_mte_tagged flag is handled by:
+ * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
+ * racing to santise the same page
+ * - mmap_lock protects between a VM faulting a page in and the VMM performing
+ * an mprotect() to add VM_MTE
+ */
+static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
+ unsigned long size)
+{
+ unsigned long i, nr_pages = size >> PAGE_SHIFT;
+ struct page *page;
+
+ if (!kvm_has_mte(kvm))
+ return 0;
+
+ /*
+ * pfn_to_online_page() is used to reject ZONE_DEVICE pages
+ * that may not support tags.
+ */
+ page = pfn_to_online_page(pfn);
+
+ if (!page)
+ return -EFAULT;
+
+ for (i = 0; i < nr_pages; i++, page++) {
+ if (!test_bit(PG_mte_tagged, &page->flags)) {
+ mte_clear_page_tags(page_address(page));
+ set_bit(PG_mte_tagged, &page->flags);
+ }
+ }
+
+ return 0;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -830,6 +900,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool write_fault, writable, force_pte = false;
bool exec_fault;
bool device = false;
+ bool shared;
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -853,26 +924,31 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- /* Let's check if we will get back a huge page backed by hugetlbfs */
+ /*
+ * Let's check if we will get back a huge page backed by hugetlbfs, or
+ * get block mapping for device MMIO region.
+ */
mmap_read_lock(current->mm);
- vma = find_vma_intersection(current->mm, hva, hva + 1);
+ vma = vma_lookup(current->mm, hva);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
mmap_read_unlock(current->mm);
return -EFAULT;
}
- if (is_vm_hugetlb_page(vma))
- vma_shift = huge_page_shift(hstate_vma(vma));
- else
- vma_shift = PAGE_SHIFT;
-
- if (logging_active ||
- (vma->vm_flags & VM_PFNMAP)) {
+ /*
+ * logging_active is guaranteed to never be true for VM_PFNMAP
+ * memslots.
+ */
+ if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
+ } else {
+ vma_shift = get_vma_page_shift(vma, hva);
}
+ shared = (vma->vm_flags & VM_PFNMAP);
+
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SHIFT:
@@ -943,8 +1019,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
if (kvm_is_device_pfn(pfn)) {
+ /*
+ * If the page was identified as device early by looking at
+ * the VMA flags, vma_pagesize is already representing the
+ * largest quantity we can map. If instead it was mapped
+ * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
+ * and must not be upgraded.
+ *
+ * In both cases, we don't let transparent_hugepage_adjust()
+ * change things at the last minute.
+ */
device = true;
- force_pte = true;
} else if (logging_active && !write_fault) {
/*
* Only actually map the page as writable if this was a write
@@ -965,19 +1050,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
- if (vma_pagesize == PAGE_SIZE && !force_pte)
+ if (vma_pagesize == PAGE_SIZE && !(force_pte || device))
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
&pfn, &fault_ipa);
+
+ if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
+ /* Check the VMM hasn't introduced a new VM_SHARED VMA */
+ if (!shared)
+ ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
+ else
+ ret = -EFAULT;
+ if (ret)
+ goto out_unlock;
+ }
+
if (writable)
prot |= KVM_PGTABLE_PROT_W;
- if (fault_status != FSC_PERM && !device)
- clean_dcache_guest_page(pfn, vma_pagesize);
-
- if (exec_fault) {
+ if (exec_fault)
prot |= KVM_PGTABLE_PROT_X;
- invalidate_icache_guest_page(pfn, vma_pagesize);
- }
if (device)
prot |= KVM_PGTABLE_PROT_DEVICE;
@@ -1156,31 +1247,34 @@ out_unlock:
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
__unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT,
range->may_block);
- return 0;
+ return false;
}
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_pfn_t pfn = pte_pfn(range->pte);
+ int ret;
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
WARN_ON(range->end - range->start != 1);
- /*
- * We've moved a page around, probably through CoW, so let's treat it
- * just like a translation fault and clean the cache to the PoC.
- */
- clean_dcache_guest_page(pfn, PAGE_SIZE);
+ ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
+ if (ret)
+ return false;
/*
+ * We've moved a page around, probably through CoW, so let's treat
+ * it just like a translation fault and the map handler will clean
+ * the cache to the PoC.
+ *
* The MMU notifiers will have unmapped a huge PMD before calling
* ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
* therefore we never need to clear out a huge PMD through this
@@ -1190,7 +1284,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
PAGE_SIZE, __pfn_to_phys(pfn),
KVM_PGTABLE_PROT_R, NULL);
- return 0;
+ return false;
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1200,7 +1294,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
pte_t pte;
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
@@ -1213,7 +1307,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT);
@@ -1346,7 +1440,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
{
hva_t hva = mem->userspace_addr;
hva_t reg_end = hva + mem->memory_size;
- bool writable = !(mem->flags & KVM_MEM_READONLY);
int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -1363,8 +1456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
- * between them, so iterate over all of them to find out if we can map
- * any of them right now.
+ * between them, so iterate over all of them.
*
* +--------------------------------------------+
* +---------------+----------------+ +----------------+
@@ -1375,51 +1467,29 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
*/
do {
struct vm_area_struct *vma;
- hva_t vm_start, vm_end;
vma = find_vma_intersection(current->mm, hva, reg_end);
if (!vma)
break;
/*
- * Take the intersection of this VMA with the memory region
+ * VM_SHARED mappings are not allowed with MTE to avoid races
+ * when updating the PG_mte_tagged page flag, see
+ * sanitise_mte_tags for more details.
*/
- vm_start = max(hva, vma->vm_start);
- vm_end = min(reg_end, vma->vm_end);
+ if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
+ return -EINVAL;
if (vma->vm_flags & VM_PFNMAP) {
- gpa_t gpa = mem->guest_phys_addr +
- (vm_start - mem->userspace_addr);
- phys_addr_t pa;
-
- pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
- pa += vm_start - vma->vm_start;
-
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
- goto out;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
- vm_end - vm_start,
- writable);
- if (ret)
break;
+ }
}
- hva = vm_end;
+ hva = min(reg_end, vma->vm_end);
} while (hva < reg_end);
- if (change == KVM_MR_FLAGS_ONLY)
- goto out;
-
- spin_lock(&kvm->mmu_lock);
- if (ret)
- unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
- else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
- stage2_flush_memslot(kvm, memslot);
- spin_unlock(&kvm->mmu_lock);
-out:
mmap_read_unlock(current->mm);
return ret;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index fd167d4f4215..f33825c995cb 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -578,6 +578,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
+ mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_counter_value(vcpu, i, 0);
}
@@ -850,6 +851,9 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ /* One-off reload of the PMU on first run */
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
return 0;
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 956cdc240148..cba7872d69a8 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -166,6 +166,29 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0;
}
+static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *tmp;
+ bool is32bit;
+ int i;
+
+ is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
+ return false;
+
+ /* MTE is incompatible with AArch32 */
+ if (kvm_has_mte(vcpu->kvm) && is32bit)
+ return false;
+
+ /* Check that the vcpus are either all 32bit or all 64bit */
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
+ return false;
+ }
+
+ return true;
+}
+
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
@@ -217,13 +240,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
+ if (!vcpu_allowed_register_width(vcpu)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
- ret = -EINVAL;
- goto out;
- }
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 76ea2800c33e..f6f126eb6ac1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
}
static bool trap_bcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
}
static bool trap_wvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write,
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+ trace_trap_reg(__func__, rd->CRm, p->is_write,
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
return true;
}
@@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
}
static bool trap_wcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
}
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
@@ -1047,6 +1047,13 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break;
case SYS_ID_AA64PFR1_EL1:
val &= ~FEATURE(ID_AA64PFR1_MTE);
+ if (kvm_has_mte(vcpu->kvm)) {
+ u64 pfr, mte;
+
+ pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
+ mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
+ val |= FIELD_PREP(FEATURE(ID_AA64PFR1_MTE), mte);
+ }
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@@ -1302,6 +1309,23 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_mte(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+#define MTE_REG(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = undef_access, \
+ .reset = reset_unknown, \
+ .reg = name, \
+ .visibility = mte_visibility, \
+}
+
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
@@ -1470,8 +1494,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
- { SYS_DESC(SYS_RGSR_EL1), undef_access },
- { SYS_DESC(SYS_GCR_EL1), undef_access },
+ MTE_REG(RGSR_EL1),
+ MTE_REG(GCR_EL1),
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
@@ -1498,8 +1522,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
- { SYS_DESC(SYS_TFSR_EL1), undef_access },
- { SYS_DESC(SYS_TFSRE0_EL1), undef_access },
+ MTE_REG(TFSR_EL1),
+ MTE_REG(TFSRE0_EL1),
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 58cbda00e56d..340c51d87677 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -482,6 +482,16 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static struct gic_kvm_info *gic_kvm_info;
+
+void __init vgic_set_kvm_info(const struct gic_kvm_info *info)
+{
+ BUG_ON(gic_kvm_info != NULL);
+ gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (gic_kvm_info)
+ *gic_kvm_info = *info;
+}
+
/**
* kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
*
@@ -509,18 +519,29 @@ void kvm_vgic_init_cpu_hardware(void)
*/
int kvm_vgic_hyp_init(void)
{
- const struct gic_kvm_info *gic_kvm_info;
+ bool has_mask;
int ret;
- gic_kvm_info = gic_get_kvm_info();
if (!gic_kvm_info)
return -ENODEV;
- if (!gic_kvm_info->maint_irq) {
+ has_mask = !gic_kvm_info->no_maint_irq_mask;
+
+ if (has_mask && !gic_kvm_info->maint_irq) {
kvm_err("No vgic maintenance irq\n");
return -ENXIO;
}
+ /*
+ * If we get one of these oddball non-GICs, taint the kernel,
+ * as we have no idea of how they *really* behave.
+ */
+ if (gic_kvm_info->no_hw_deactivation) {
+ kvm_info("Non-architectural vgic, tainting kernel\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ kvm_vgic_global_state.no_hw_deactivation = true;
+ }
+
switch (gic_kvm_info->type) {
case GIC_V2:
ret = vgic_v2_probe(gic_kvm_info);
@@ -536,10 +557,17 @@ int kvm_vgic_hyp_init(void)
ret = -ENODEV;
}
+ kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
+
+ kfree(gic_kvm_info);
+ gic_kvm_info = NULL;
+
if (ret)
return ret;
- kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
+ if (!has_mask)
+ return 0;
+
ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
vgic_maintenance_handler,
"vgic", kvm_get_running_vcpus());
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 11934c2af2f4..2c580204f1dc 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -108,11 +108,22 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
+ *
+ * Another case is when the interrupt requires a helping hand
+ * on deactivation (no HW deactivation, for example).
*/
- if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) {
- irq->line_level = vgic_get_phys_line_level(irq);
+ if (vgic_irq_is_mapped_level(irq)) {
+ bool resample = false;
+
+ if (val & GICH_LR_PENDING_BIT) {
+ irq->line_level = vgic_get_phys_line_level(irq);
+ resample = !irq->line_level;
+ } else if (vgic_irq_needs_resampling(irq) &&
+ !(irq->active || irq->pending_latch)) {
+ resample = true;
+ }
- if (!irq->line_level)
+ if (resample)
vgic_irq_set_phys_active(irq, false);
}
@@ -152,7 +163,7 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (irq->group)
val |= GICH_LR_GROUP1;
- if (irq->hw) {
+ if (irq->hw && !vgic_irq_needs_resampling(irq)) {
val |= GICH_LR_HW;
val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
/*
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 41ecf219c333..66004f61cd83 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -101,11 +101,22 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
+ *
+ * Another case is when the interrupt requires a helping hand
+ * on deactivation (no HW deactivation, for example).
*/
- if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
- irq->line_level = vgic_get_phys_line_level(irq);
+ if (vgic_irq_is_mapped_level(irq)) {
+ bool resample = false;
+
+ if (val & ICH_LR_PENDING_BIT) {
+ irq->line_level = vgic_get_phys_line_level(irq);
+ resample = !irq->line_level;
+ } else if (vgic_irq_needs_resampling(irq) &&
+ !(irq->active || irq->pending_latch)) {
+ resample = true;
+ }
- if (!irq->line_level)
+ if (resample)
vgic_irq_set_phys_active(irq, false);
}
@@ -136,7 +147,7 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
}
}
- if (irq->hw) {
+ if (irq->hw && !vgic_irq_needs_resampling(irq)) {
val |= ICH_LR_HW;
val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
/*
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 15b666200f0b..111bff47e471 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -182,8 +182,8 @@ bool vgic_get_phys_line_level(struct vgic_irq *irq)
BUG_ON(!irq->hw);
- if (irq->get_input_level)
- return irq->get_input_level(irq->intid);
+ if (irq->ops && irq->ops->get_input_level)
+ return irq->ops->get_input_level(irq->intid);
WARN_ON(irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
@@ -480,7 +480,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
/* @irq->irq_lock must be held */
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
unsigned int host_irq,
- bool (*get_input_level)(int vindid))
+ struct irq_ops *ops)
{
struct irq_desc *desc;
struct irq_data *data;
@@ -500,7 +500,7 @@ static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
irq->hw = true;
irq->host_irq = host_irq;
irq->hwintid = data->hwirq;
- irq->get_input_level = get_input_level;
+ irq->ops = ops;
return 0;
}
@@ -509,11 +509,11 @@ static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
{
irq->hw = false;
irq->hwintid = 0;
- irq->get_input_level = NULL;
+ irq->ops = NULL;
}
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid, bool (*get_input_level)(int vindid))
+ u32 vintid, struct irq_ops *ops)
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
unsigned long flags;
@@ -522,7 +522,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
BUG_ON(!irq);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
+ ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index d31e1169d9b8..6dd56a49790a 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
lib-y := clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
- clear_page.o csum.o memchr.o memcpy.o memmove.o \
+ clear_page.o csum.o insn.o memchr.o memcpy.o \
memset.o memcmp.o strcmp.o strncmp.o strlen.o \
strnlen.o strchr.o strrchr.o tishift.o
@@ -18,3 +18,5 @@ obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
obj-$(CONFIG_ARM64_MTE) += mte.o
+
+obj-$(CONFIG_KASAN_SW_TAGS) += kasan_sw_tags.o
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index af9afcbec92c..a7efb2ad2a1c 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -1,12 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Based on arch/arm/lib/clear_user.S
- *
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
*/
-#include <linux/linkage.h>
-#include <asm/asm-uaccess.h>
+#include <linux/linkage.h>
#include <asm/assembler.h>
.text
@@ -19,25 +16,33 @@
*
* Alignment fixed up by hardware.
*/
+
+ .p2align 4
+ // Alignment is for the loop, but since the prologue (including BTI)
+ // is also 16 bytes we can keep any padding outside the function
SYM_FUNC_START(__arch_clear_user)
- mov x2, x1 // save the size for fixup return
+ add x2, x0, x1
subs x1, x1, #8
b.mi 2f
1:
-user_ldst 9f, sttr, xzr, x0, 8
+USER(9f, sttr xzr, [x0])
+ add x0, x0, #8
subs x1, x1, #8
- b.pl 1b
-2: adds x1, x1, #4
- b.mi 3f
-user_ldst 9f, sttr, wzr, x0, 4
- sub x1, x1, #4
-3: adds x1, x1, #2
- b.mi 4f
-user_ldst 9f, sttrh, wzr, x0, 2
- sub x1, x1, #2
-4: adds x1, x1, #1
- b.mi 5f
-user_ldst 9f, sttrb, wzr, x0, 0
+ b.hi 1b
+USER(9f, sttr xzr, [x2, #-8])
+ mov x0, #0
+ ret
+
+2: tbz x1, #2, 3f
+USER(9f, sttr wzr, [x0])
+USER(8f, sttr wzr, [x2, #-4])
+ mov x0, #0
+ ret
+
+3: tbz x1, #1, 4f
+USER(9f, sttrh wzr, [x0])
+4: tbz x1, #0, 5f
+USER(7f, sttrb wzr, [x2, #-1])
5: mov x0, #0
ret
SYM_FUNC_END(__arch_clear_user)
@@ -45,6 +50,8 @@ EXPORT_SYMBOL(__arch_clear_user)
.section .fixup,"ax"
.align 2
-9: mov x0, x2 // return the original size
+7: sub x0, x2, #5 // Adjust for faulting on the final byte...
+8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
+9: sub x0, x2, x0
ret
.previous
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/lib/insn.c
index 6c0de2f60ea9..b506a4b1e38c 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -7,21 +7,14 @@
*/
#include <linux/bitops.h>
#include <linux/bug.h>
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/stop_machine.h>
+#include <linux/printk.h>
+#include <linux/sizes.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
-#include <asm/fixmap.h>
+#include <asm/errno.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
-#include <asm/sections.h>
#define AARCH64_INSN_SF_BIT BIT(31)
#define AARCH64_INSN_N_BIT BIT(22)
@@ -30,7 +23,7 @@
static const int aarch64_insn_encoding_class[] = {
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
- AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_SVE,
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_LDST,
AARCH64_INSN_CLS_DP_REG,
@@ -83,81 +76,6 @@ bool aarch64_insn_is_branch_imm(u32 insn)
aarch64_insn_is_bcond(insn));
}
-static DEFINE_RAW_SPINLOCK(patch_lock);
-
-static bool is_exit_text(unsigned long addr)
-{
- /* discarded with init text/data */
- return system_state < SYSTEM_RUNNING &&
- addr >= (unsigned long)__exittext_begin &&
- addr < (unsigned long)__exittext_end;
-}
-
-static bool is_image_text(unsigned long addr)
-{
- return core_kernel_text(addr) || is_exit_text(addr);
-}
-
-static void __kprobes *patch_map(void *addr, int fixmap)
-{
- unsigned long uintaddr = (uintptr_t) addr;
- bool image = is_image_text(uintaddr);
- struct page *page;
-
- if (image)
- page = phys_to_page(__pa_symbol(addr));
- else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
- page = vmalloc_to_page(addr);
- else
- return addr;
-
- BUG_ON(!page);
- return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
- (uintaddr & ~PAGE_MASK));
-}
-
-static void __kprobes patch_unmap(int fixmap)
-{
- clear_fixmap(fixmap);
-}
-/*
- * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
- * little-endian.
- */
-int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
-{
- int ret;
- __le32 val;
-
- ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
- if (!ret)
- *insnp = le32_to_cpu(val);
-
- return ret;
-}
-
-static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
-{
- void *waddr = addr;
- unsigned long flags = 0;
- int ret;
-
- raw_spin_lock_irqsave(&patch_lock, flags);
- waddr = patch_map(addr, FIX_TEXT_POKE0);
-
- ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
-
- patch_unmap(FIX_TEXT_POKE0);
- raw_spin_unlock_irqrestore(&patch_lock, flags);
-
- return ret;
-}
-
-int __kprobes aarch64_insn_write(void *addr, u32 insn)
-{
- return __aarch64_insn_write(addr, cpu_to_le32(insn));
-}
-
bool __kprobes aarch64_insn_uses_literal(u32 insn)
{
/* ldr/ldrsw (literal), prfm */
@@ -187,67 +105,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
aarch64_insn_is_bcond(insn);
}
-int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
-{
- u32 *tp = addr;
- int ret;
-
- /* A64 instructions must be word aligned */
- if ((uintptr_t)tp & 0x3)
- return -EINVAL;
-
- ret = aarch64_insn_write(tp, insn);
- if (ret == 0)
- __flush_icache_range((uintptr_t)tp,
- (uintptr_t)tp + AARCH64_INSN_SIZE);
-
- return ret;
-}
-
-struct aarch64_insn_patch {
- void **text_addrs;
- u32 *new_insns;
- int insn_cnt;
- atomic_t cpu_count;
-};
-
-static int __kprobes aarch64_insn_patch_text_cb(void *arg)
-{
- int i, ret = 0;
- struct aarch64_insn_patch *pp = arg;
-
- /* The first CPU becomes master */
- if (atomic_inc_return(&pp->cpu_count) == 1) {
- for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
- ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
- pp->new_insns[i]);
- /* Notify other processors with an additional increment. */
- atomic_inc(&pp->cpu_count);
- } else {
- while (atomic_read(&pp->cpu_count) <= num_online_cpus())
- cpu_relax();
- isb();
- }
-
- return ret;
-}
-
-int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
-{
- struct aarch64_insn_patch patch = {
- .text_addrs = addrs,
- .new_insns = insns,
- .insn_cnt = cnt,
- .cpu_count = ATOMIC_INIT(0),
- };
-
- if (cnt <= 0)
- return -EINVAL;
-
- return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
- cpu_online_mask);
-}
-
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 *maskp, int *shiftp)
{
@@ -1432,104 +1289,6 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn)
return insn & CRM_MASK;
}
-static bool __kprobes __check_eq(unsigned long pstate)
-{
- return (pstate & PSR_Z_BIT) != 0;
-}
-
-static bool __kprobes __check_ne(unsigned long pstate)
-{
- return (pstate & PSR_Z_BIT) == 0;
-}
-
-static bool __kprobes __check_cs(unsigned long pstate)
-{
- return (pstate & PSR_C_BIT) != 0;
-}
-
-static bool __kprobes __check_cc(unsigned long pstate)
-{
- return (pstate & PSR_C_BIT) == 0;
-}
-
-static bool __kprobes __check_mi(unsigned long pstate)
-{
- return (pstate & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_pl(unsigned long pstate)
-{
- return (pstate & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_vs(unsigned long pstate)
-{
- return (pstate & PSR_V_BIT) != 0;
-}
-
-static bool __kprobes __check_vc(unsigned long pstate)
-{
- return (pstate & PSR_V_BIT) == 0;
-}
-
-static bool __kprobes __check_hi(unsigned long pstate)
-{
- pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return (pstate & PSR_C_BIT) != 0;
-}
-
-static bool __kprobes __check_ls(unsigned long pstate)
-{
- pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return (pstate & PSR_C_BIT) == 0;
-}
-
-static bool __kprobes __check_ge(unsigned long pstate)
-{
- pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return (pstate & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_lt(unsigned long pstate)
-{
- pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return (pstate & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_gt(unsigned long pstate)
-{
- /*PSR_N_BIT ^= PSR_V_BIT */
- unsigned long temp = pstate ^ (pstate << 3);
-
- temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
- return (temp & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_le(unsigned long pstate)
-{
- /*PSR_N_BIT ^= PSR_V_BIT */
- unsigned long temp = pstate ^ (pstate << 3);
-
- temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
- return (temp & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_al(unsigned long pstate)
-{
- return true;
-}
-
-/*
- * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
- * it behaves identically to 0b1110 ("al").
- */
-pstate_check_t * const aarch32_opcode_cond_checks[16] = {
- __check_eq, __check_ne, __check_cs, __check_cc,
- __check_mi, __check_pl, __check_vs, __check_vc,
- __check_hi, __check_ls, __check_ge, __check_lt,
- __check_gt, __check_le, __check_al, __check_al
-};
-
static bool range_of_ones(u64 val)
{
/* Doesn't handle full ones or full zeroes */
diff --git a/arch/arm64/lib/kasan_sw_tags.S b/arch/arm64/lib/kasan_sw_tags.S
new file mode 100644
index 000000000000..5b04464c045e
--- /dev/null
+++ b/arch/arm64/lib/kasan_sw_tags.S
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google LLC
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Report a tag mismatch detected by tag-based KASAN.
+ *
+ * A compiler-generated thunk calls this with a non-AAPCS calling
+ * convention. Upon entry to this function, registers are as follows:
+ *
+ * x0: fault address (see below for restore)
+ * x1: fault description (see below for restore)
+ * x2 to x15: callee-saved
+ * x16 to x17: safe to clobber
+ * x18 to x30: callee-saved
+ * sp: pre-decremented by 256 bytes (see below for restore)
+ *
+ * The caller has decremented the SP by 256 bytes, and created a
+ * structure on the stack as follows:
+ *
+ * sp + 0..15: x0 and x1 to be restored
+ * sp + 16..231: free for use
+ * sp + 232..247: x29 and x30 (same as in GPRs)
+ * sp + 248..255: free for use
+ *
+ * Note that this is not a struct pt_regs.
+ *
+ * To call a regular AAPCS function we must save x2 to x15 (which we can
+ * store in the gaps), and create a frame record (for which we can use
+ * x29 and x30 spilled by the caller as those match the GPRs).
+ *
+ * The caller expects x0 and x1 to be restored from the structure, and
+ * for the structure to be removed from the stack (i.e. the SP must be
+ * incremented by 256 prior to return).
+ */
+SYM_CODE_START(__hwasan_tag_mismatch)
+#ifdef BTI_C
+ BTI_C
+#endif
+ add x29, sp, #232
+ stp x2, x3, [sp, #8 * 2]
+ stp x4, x5, [sp, #8 * 4]
+ stp x6, x7, [sp, #8 * 6]
+ stp x8, x9, [sp, #8 * 8]
+ stp x10, x11, [sp, #8 * 10]
+ stp x12, x13, [sp, #8 * 12]
+ stp x14, x15, [sp, #8 * 14]
+#ifndef CONFIG_SHADOW_CALL_STACK
+ str x18, [sp, #8 * 18]
+#endif
+
+ mov x2, x30
+ bl kasan_tag_mismatch
+
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #8 * 2]
+ ldp x4, x5, [sp, #8 * 4]
+ ldp x6, x7, [sp, #8 * 6]
+ ldp x8, x9, [sp, #8 * 8]
+ ldp x10, x11, [sp, #8 * 10]
+ ldp x12, x13, [sp, #8 * 12]
+ ldp x14, x15, [sp, #8 * 14]
+#ifndef CONFIG_SHADOW_CALL_STACK
+ ldr x18, [sp, #8 * 18]
+#endif
+ ldp x29, x30, [sp, #8 * 29]
+
+ /* remove the structure from the stack */
+ add sp, sp, #256
+ ret
+SYM_CODE_END(__hwasan_tag_mismatch)
+EXPORT_SYMBOL(__hwasan_tag_mismatch)
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index edf6b970a277..7c2276fdab54 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -1,9 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Based on arch/arm/lib/memchr.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
*/
#include <linux/linkage.h>
@@ -19,16 +16,60 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
+
+#define L(label) .L ## label
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+
+#define srcin x0
+#define chrin w1
+#define cntin x2
+
+#define result x0
+
+#define wordcnt x3
+#define rep01 x4
+#define repchr x5
+#define cur_word x6
+#define cur_byte w6
+#define tmp x7
+#define tmp2 x8
+
+ .p2align 4
+ nop
SYM_FUNC_START_WEAK_PI(memchr)
- and w1, w1, #0xff
-1: subs x2, x2, #1
- b.mi 2f
- ldrb w3, [x0], #1
- cmp w3, w1
- b.ne 1b
- sub x0, x0, #1
+ and chrin, chrin, #0xff
+ lsr wordcnt, cntin, #3
+ cbz wordcnt, L(byte_loop)
+ mov rep01, #REP8_01
+ mul repchr, x1, rep01
+ and cntin, cntin, #7
+L(word_loop):
+ ldr cur_word, [srcin], #8
+ sub wordcnt, wordcnt, #1
+ eor cur_word, cur_word, repchr
+ sub tmp, cur_word, rep01
+ orr tmp2, cur_word, #REP8_7f
+ bics tmp, tmp, tmp2
+ b.ne L(found_word)
+ cbnz wordcnt, L(word_loop)
+L(byte_loop):
+ cbz cntin, L(not_found)
+ ldrb cur_byte, [srcin], #1
+ sub cntin, cntin, #1
+ cmp cur_byte, chrin
+ b.ne L(byte_loop)
+ sub srcin, srcin, #1
+ ret
+L(found_word):
+CPU_LE( rev tmp, tmp)
+ clz tmp, tmp
+ sub tmp, tmp, #64
+ add result, srcin, tmp, asr #3
ret
-2: mov x0, #0
+L(not_found):
+ mov result, #0
ret
SYM_FUNC_END_PI(memchr)
EXPORT_SYMBOL_NOKASAN(memchr)
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index c0671e793ea9..7d956384222f 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -1,247 +1,139 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/memcmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
-* compare memory areas(when two memory areas' offset are different,
-* alignment handled by the hardware)
-*
-* Parameters:
-* x0 - const memory area 1 pointer
-* x1 - const memory area 2 pointer
-* x2 - the maximal compare byte length
-* Returns:
-* x0 - a compare result, maybe less than, equal to, or greater than ZERO
-*/
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
+ */
+
+#define L(label) .L ## label
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-limit .req x2
-result .req x0
+#define src1 x0
+#define src2 x1
+#define limit x2
+#define result w0
/* Internal variables. */
-data1 .req x3
-data1w .req w3
-data2 .req x4
-data2w .req w4
-has_nul .req x5
-diff .req x6
-endloop .req x7
-tmp1 .req x8
-tmp2 .req x9
-tmp3 .req x10
-pos .req x11
-limit_wd .req x12
-mask .req x13
+#define data1 x3
+#define data1w w3
+#define data1h x4
+#define data2 x5
+#define data2w w5
+#define data2h x6
+#define tmp1 x7
+#define tmp2 x8
SYM_FUNC_START_WEAK_PI(memcmp)
- cbz limit, .Lret0
- eor tmp1, src1, src2
- tst tmp1, #7
- b.ne .Lmisaligned8
- ands tmp1, src1, #7
- b.ne .Lmutual_align
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
- lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
- /*
- * The input source addresses are at alignment boundary.
- * Directly compare eight bytes each time.
- */
-.Lloop_aligned:
- ldr data1, [src1], #8
- ldr data2, [src2], #8
-.Lstart_realigned:
- subs limit_wd, limit_wd, #1
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, cs /* Last Dword or differences. */
- cbz endloop, .Lloop_aligned
-
- /* Not reached the limit, must have found a diff. */
- tbz limit_wd, #63, .Lnot_limit
-
- /* Limit % 8 == 0 => the diff is in the last 8 bytes. */
- ands limit, limit, #7
- b.eq .Lnot_limit
- /*
- * The remained bytes less than 8. It is needed to extract valid data
- * from last eight bytes of the intended memory range.
- */
- lsl limit, limit, #3 /* bytes-> bits. */
- mov mask, #~0
-CPU_BE( lsr mask, mask, limit )
-CPU_LE( lsl mask, mask, limit )
- bic data1, data1, mask
- bic data2, data2, mask
-
- orr diff, diff, mask
- b .Lnot_limit
-
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that precede the start point.
- */
- bic src1, src1, #7
- bic src2, src2, #7
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- /*
- * We can not add limit with alignment offset(tmp1) here. Since the
- * addition probably make the limit overflown.
- */
- sub limit_wd, limit, #1/*limit != 0, so no underflow.*/
- and tmp3, limit_wd, #7
- lsr limit_wd, limit_wd, #3
- add tmp3, tmp3, tmp1
- add limit_wd, limit_wd, tmp3, lsr #3
- add limit, limit, tmp1/* Adjust the limit for the extra. */
-
- lsl tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
- neg tmp1, tmp1/* Bits to alignment -64. */
- mov tmp2, #~0
- /*mask off the non-intended bytes before the start address.*/
-CPU_BE( lsl tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
- /* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 )
-
- orr data1, data1, tmp2
- orr data2, data2, tmp2
- b .Lstart_realigned
-
- /*src1 and src2 have different alignment offset.*/
-.Lmisaligned8:
- cmp limit, #8
- b.lo .Ltiny8proc /*limit < 8: compare byte by byte*/
-
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum.*/
-
- sub limit, limit, pos
- /*compare the proceeding bytes in the first 8 byte segment.*/
-.Ltinycmp:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*diff occurred before the last byte.*/
- cmp data1w, data2w
- b.eq .Lstart_align
-1:
- sub result, data1, data2
+ subs limit, limit, 8
+ b.lo L(less8)
+
+ ldr data1, [src1], 8
+ ldr data2, [src2], 8
+ cmp data1, data2
+ b.ne L(return)
+
+ subs limit, limit, 8
+ b.gt L(more16)
+
+ ldr data1, [src1, limit]
+ ldr data2, [src2, limit]
+ b L(return)
+
+L(more16):
+ ldr data1, [src1], 8
+ ldr data2, [src2], 8
+ cmp data1, data2
+ bne L(return)
+
+ /* Jump directly to comparing the last 16 bytes for 32 byte (or less)
+ strings. */
+ subs limit, limit, 16
+ b.ls L(last_bytes)
+
+ /* We overlap loads between 0-32 bytes at either side of SRC1 when we
+ try to align, so limit it only to strings larger than 128 bytes. */
+ cmp limit, 96
+ b.ls L(loop16)
+
+ /* Align src1 and adjust src2 with bytes not yet done. */
+ and tmp1, src1, 15
+ add limit, limit, tmp1
+ sub src1, src1, tmp1
+ sub src2, src2, tmp1
+
+ /* Loop performing 16 bytes per iteration using aligned src1.
+ Limit is pre-decremented by 16 and must be larger than zero.
+ Exit if <= 16 bytes left to do or if the data is not equal. */
+ .p2align 4
+L(loop16):
+ ldp data1, data1h, [src1], 16
+ ldp data2, data2h, [src2], 16
+ subs limit, limit, 16
+ ccmp data1, data2, 0, hi
+ ccmp data1h, data2h, 0, eq
+ b.eq L(loop16)
+
+ cmp data1, data2
+ bne L(return)
+ mov data1, data1h
+ mov data2, data2h
+ cmp data1, data2
+ bne L(return)
+
+ /* Compare last 1-16 bytes using unaligned access. */
+L(last_bytes):
+ add src1, src1, limit
+ add src2, src2, limit
+ ldp data1, data1h, [src1]
+ ldp data2, data2h, [src2]
+ cmp data1, data2
+ bne L(return)
+ mov data1, data1h
+ mov data2, data2h
+ cmp data1, data2
+
+ /* Compare data bytes and set return value to 0, -1 or 1. */
+L(return):
+#ifndef __AARCH64EB__
+ rev data1, data1
+ rev data2, data2
+#endif
+ cmp data1, data2
+L(ret_eq):
+ cset result, ne
+ cneg result, result, lo
ret
-.Lstart_align:
- lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
-
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- /*process more leading bytes to make src1 aligned...*/
- add src1, src1, tmp3 /*backwards src1 to alignment boundary*/
- add src2, src2, tmp3
- sub limit, limit, tmp3
- lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
- /*load 8 bytes from aligned SRC1..*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
-
- subs limit_wd, limit_wd, #1
- eor diff, data1, data2 /*Non-zero if differences found.*/
- csinv endloop, diff, xzr, ne
- cbnz endloop, .Lunequal_proc
- /*How far is the current SRC2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-
-.Lrecal_offset:/*src1 is aligned now..*/
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes and compare from
- * the SRC2 alignment boundary. If all 8 bytes are equal,then start
- * the second part's comparison. Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- eor diff, data1, data2 /* Non-zero if differences found. */
- cbnz diff, .Lnot_limit
-
- /*The second part process*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- eor diff, data1, data2 /* Non-zero if differences found. */
- subs limit_wd, limit_wd, #1
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- cbz endloop, .Lloopcmp_proc
-.Lunequal_proc:
- cbz diff, .Lremain8
-
-/* There is difference occurred in the latest comparison. */
-.Lnot_limit:
-/*
-* For little endian,reverse the low significant equal bits into MSB,then
-* following CLZ can find how many equal bits exist.
-*/
-CPU_LE( rev diff, diff )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
-
- /*
- * The MS-non-zero bit of DIFF marks either the first bit
- * that is different, or the end of the significant data.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- clz pos, diff
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * We need to zero-extend (char is unsigned) the value and then
- * perform a signed subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
+ .p2align 4
+ /* Compare up to 8 bytes. Limit is [-8..-1]. */
+L(less8):
+ adds limit, limit, 4
+ b.lo L(less4)
+ ldr data1w, [src1], 4
+ ldr data2w, [src2], 4
+ cmp data1w, data2w
+ b.ne L(return)
+ sub limit, limit, 4
+L(less4):
+ adds limit, limit, 4
+ beq L(ret_eq)
+L(byte_loop):
+ ldrb data1w, [src1], 1
+ ldrb data2w, [src2], 1
+ subs limit, limit, 1
+ ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */
+ b.eq L(byte_loop)
+ sub result, data1w, data2w
ret
-.Lremain8:
- /* Limit % 8 == 0 =>. all data are equal.*/
- ands limit, limit, #7
- b.eq .Lret0
-
-.Ltiny8proc:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs limit, limit, #1
-
- ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
- b.eq .Ltiny8proc
- sub result, data1, data2
- ret
-.Lret0:
- mov result, #0
- ret
SYM_FUNC_END_PI(memcmp)
EXPORT_SYMBOL_NOKASAN(memcmp)
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index dc8d2a216a6e..b82fd64ee1e1 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -1,66 +1,252 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/memcpy.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/cache.h>
-/*
- * Copy a buffer from src to dest (alignment handled by the hardware)
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
*
- * Parameters:
- * x0 - dest
- * x1 - src
- * x2 - n
- * Returns:
- * x0 - dest
*/
- .macro ldrb1 reg, ptr, val
- ldrb \reg, [\ptr], \val
- .endm
-
- .macro strb1 reg, ptr, val
- strb \reg, [\ptr], \val
- .endm
- .macro ldrh1 reg, ptr, val
- ldrh \reg, [\ptr], \val
- .endm
+#define L(label) .L ## label
- .macro strh1 reg, ptr, val
- strh \reg, [\ptr], \val
- .endm
+#define dstin x0
+#define src x1
+#define count x2
+#define dst x3
+#define srcend x4
+#define dstend x5
+#define A_l x6
+#define A_lw w6
+#define A_h x7
+#define B_l x8
+#define B_lw w8
+#define B_h x9
+#define C_l x10
+#define C_lw w10
+#define C_h x11
+#define D_l x12
+#define D_h x13
+#define E_l x14
+#define E_h x15
+#define F_l x16
+#define F_h x17
+#define G_l count
+#define G_h dst
+#define H_l src
+#define H_h srcend
+#define tmp1 x14
- .macro ldr1 reg, ptr, val
- ldr \reg, [\ptr], \val
- .endm
+/* This implementation handles overlaps and supports both memcpy and memmove
+ from a single entry point. It uses unaligned accesses and branchless
+ sequences to keep the code small, simple and improve performance.
- .macro str1 reg, ptr, val
- str \reg, [\ptr], \val
- .endm
+ Copies are split into 3 main cases: small copies of up to 32 bytes, medium
+ copies of up to 128 bytes, and large copies. The overhead of the overlap
+ check is negligible since it is only required for large copies.
- .macro ldp1 reg1, reg2, ptr, val
- ldp \reg1, \reg2, [\ptr], \val
- .endm
-
- .macro stp1 reg1, reg2, ptr, val
- stp \reg1, \reg2, [\ptr], \val
- .endm
+ Large copies use a software pipelined loop processing 64 bytes per iteration.
+ The destination pointer is 16-byte aligned to minimize unaligned accesses.
+ The loop tail is handled by always copying 64 bytes from the end.
+*/
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_WEAK_ALIAS_PI(memmove)
SYM_FUNC_START_ALIAS(__memcpy)
SYM_FUNC_START_WEAK_PI(memcpy)
-#include "copy_template.S"
+ add srcend, src, count
+ add dstend, dstin, count
+ cmp count, 128
+ b.hi L(copy_long)
+ cmp count, 32
+ b.hi L(copy32_128)
+
+ /* Small copies: 0..32 bytes. */
+ cmp count, 16
+ b.lo L(copy16)
+ ldp A_l, A_h, [src]
+ ldp D_l, D_h, [srcend, -16]
+ stp A_l, A_h, [dstin]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ /* Copy 8-15 bytes. */
+L(copy16):
+ tbz count, 3, L(copy8)
+ ldr A_l, [src]
+ ldr A_h, [srcend, -8]
+ str A_l, [dstin]
+ str A_h, [dstend, -8]
+ ret
+
+ .p2align 3
+ /* Copy 4-7 bytes. */
+L(copy8):
+ tbz count, 2, L(copy4)
+ ldr A_lw, [src]
+ ldr B_lw, [srcend, -4]
+ str A_lw, [dstin]
+ str B_lw, [dstend, -4]
+ ret
+
+ /* Copy 0..3 bytes using a branchless sequence. */
+L(copy4):
+ cbz count, L(copy0)
+ lsr tmp1, count, 1
+ ldrb A_lw, [src]
+ ldrb C_lw, [srcend, -1]
+ ldrb B_lw, [src, tmp1]
+ strb A_lw, [dstin]
+ strb B_lw, [dstin, tmp1]
+ strb C_lw, [dstend, -1]
+L(copy0):
+ ret
+
+ .p2align 4
+ /* Medium copies: 33..128 bytes. */
+L(copy32_128):
+ ldp A_l, A_h, [src]
+ ldp B_l, B_h, [src, 16]
+ ldp C_l, C_h, [srcend, -32]
+ ldp D_l, D_h, [srcend, -16]
+ cmp count, 64
+ b.hi L(copy128)
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstend, -32]
+ stp D_l, D_h, [dstend, -16]
ret
+
+ .p2align 4
+ /* Copy 65..128 bytes. */
+L(copy128):
+ ldp E_l, E_h, [src, 32]
+ ldp F_l, F_h, [src, 48]
+ cmp count, 96
+ b.ls L(copy96)
+ ldp G_l, G_h, [srcend, -64]
+ ldp H_l, H_h, [srcend, -48]
+ stp G_l, G_h, [dstend, -64]
+ stp H_l, H_h, [dstend, -48]
+L(copy96):
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp E_l, E_h, [dstin, 32]
+ stp F_l, F_h, [dstin, 48]
+ stp C_l, C_h, [dstend, -32]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ .p2align 4
+ /* Copy more than 128 bytes. */
+L(copy_long):
+ /* Use backwards copy if there is an overlap. */
+ sub tmp1, dstin, src
+ cbz tmp1, L(copy0)
+ cmp tmp1, count
+ b.lo L(copy_long_backwards)
+
+ /* Copy 16 bytes and then align dst to 16-byte alignment. */
+
+ ldp D_l, D_h, [src]
+ and tmp1, dstin, 15
+ bic dst, dstin, 15
+ sub src, src, tmp1
+ add count, count, tmp1 /* Count is now 16 too large. */
+ ldp A_l, A_h, [src, 16]
+ stp D_l, D_h, [dstin]
+ ldp B_l, B_h, [src, 32]
+ ldp C_l, C_h, [src, 48]
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 128 + 16 /* Test and readjust count. */
+ b.ls L(copy64_from_end)
+
+L(loop64):
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [src, 16]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [src, 32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [src, 48]
+ stp D_l, D_h, [dst, 64]!
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 64
+ b.hi L(loop64)
+
+ /* Write the last iteration and copy 64 bytes from the end. */
+L(copy64_from_end):
+ ldp E_l, E_h, [srcend, -64]
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [srcend, -48]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [srcend, -16]
+ stp D_l, D_h, [dst, 64]
+ stp E_l, E_h, [dstend, -64]
+ stp A_l, A_h, [dstend, -48]
+ stp B_l, B_h, [dstend, -32]
+ stp C_l, C_h, [dstend, -16]
+ ret
+
+ .p2align 4
+
+ /* Large backwards copy for overlapping copies.
+ Copy 16 bytes and then align dst to 16-byte alignment. */
+L(copy_long_backwards):
+ ldp D_l, D_h, [srcend, -16]
+ and tmp1, dstend, 15
+ sub srcend, srcend, tmp1
+ sub count, count, tmp1
+ ldp A_l, A_h, [srcend, -16]
+ stp D_l, D_h, [dstend, -16]
+ ldp B_l, B_h, [srcend, -32]
+ ldp C_l, C_h, [srcend, -48]
+ ldp D_l, D_h, [srcend, -64]!
+ sub dstend, dstend, tmp1
+ subs count, count, 128
+ b.ls L(copy64_from_start)
+
+L(loop64_backwards):
+ stp A_l, A_h, [dstend, -16]
+ ldp A_l, A_h, [srcend, -16]
+ stp B_l, B_h, [dstend, -32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dstend, -48]
+ ldp C_l, C_h, [srcend, -48]
+ stp D_l, D_h, [dstend, -64]!
+ ldp D_l, D_h, [srcend, -64]!
+ subs count, count, 64
+ b.hi L(loop64_backwards)
+
+ /* Write the last iteration and copy 64 bytes from the start. */
+L(copy64_from_start):
+ ldp G_l, G_h, [src, 48]
+ stp A_l, A_h, [dstend, -16]
+ ldp A_l, A_h, [src, 32]
+ stp B_l, B_h, [dstend, -32]
+ ldp B_l, B_h, [src, 16]
+ stp C_l, C_h, [dstend, -48]
+ ldp C_l, C_h, [src]
+ stp D_l, D_h, [dstend, -64]
+ stp G_l, G_h, [dstin, 48]
+ stp A_l, A_h, [dstin, 32]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstin]
+ ret
+
SYM_FUNC_END_PI(memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(__memcpy)
+SYM_FUNC_END_ALIAS_PI(memmove)
+EXPORT_SYMBOL(memmove)
+SYM_FUNC_END_ALIAS(__memmove)
+EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
deleted file mode 100644
index 1035dce4bdaf..000000000000
--- a/arch/arm64/lib/memmove.S
+++ /dev/null
@@ -1,189 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
- *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cache.h>
-
-/*
- * Move a buffer from src to test (alignment handled by the hardware).
- * If dest <= src, call memcpy, otherwise copy in reverse order.
- *
- * Parameters:
- * x0 - dest
- * x1 - src
- * x2 - n
- * Returns:
- * x0 - dest
- */
-dstin .req x0
-src .req x1
-count .req x2
-tmp1 .req x3
-tmp1w .req w3
-tmp2 .req x4
-tmp2w .req w4
-tmp3 .req x5
-tmp3w .req w5
-dst .req x6
-
-A_l .req x7
-A_h .req x8
-B_l .req x9
-B_h .req x10
-C_l .req x11
-C_h .req x12
-D_l .req x13
-D_h .req x14
-
-SYM_FUNC_START_ALIAS(__memmove)
-SYM_FUNC_START_WEAK_PI(memmove)
- cmp dstin, src
- b.lo __memcpy
- add tmp1, src, count
- cmp dstin, tmp1
- b.hs __memcpy /* No overlap. */
-
- add dst, dstin, count
- add src, src, count
- cmp count, #16
- b.lo .Ltail15 /*probably non-alignment accesses.*/
-
- ands tmp2, src, #15 /* Bytes to reach alignment. */
- b.eq .LSrcAligned
- sub count, count, tmp2
- /*
- * process the aligned offset length to make the src aligned firstly.
- * those extra instructions' cost is acceptable. It also make the
- * coming accesses are based on aligned address.
- */
- tbz tmp2, #0, 1f
- ldrb tmp1w, [src, #-1]!
- strb tmp1w, [dst, #-1]!
-1:
- tbz tmp2, #1, 2f
- ldrh tmp1w, [src, #-2]!
- strh tmp1w, [dst, #-2]!
-2:
- tbz tmp2, #2, 3f
- ldr tmp1w, [src, #-4]!
- str tmp1w, [dst, #-4]!
-3:
- tbz tmp2, #3, .LSrcAligned
- ldr tmp1, [src, #-8]!
- str tmp1, [dst, #-8]!
-
-.LSrcAligned:
- cmp count, #64
- b.ge .Lcpy_over64
-
- /*
- * Deal with small copies quickly by dropping straight into the
- * exit block.
- */
-.Ltail63:
- /*
- * Copy up to 48 bytes of data. At this point we only need the
- * bottom 6 bits of count to be accurate.
- */
- ands tmp1, count, #0x30
- b.eq .Ltail15
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-1:
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-2:
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-
-.Ltail15:
- tbz count, #3, 1f
- ldr tmp1, [src, #-8]!
- str tmp1, [dst, #-8]!
-1:
- tbz count, #2, 2f
- ldr tmp1w, [src, #-4]!
- str tmp1w, [dst, #-4]!
-2:
- tbz count, #1, 3f
- ldrh tmp1w, [src, #-2]!
- strh tmp1w, [dst, #-2]!
-3:
- tbz count, #0, .Lexitfunc
- ldrb tmp1w, [src, #-1]
- strb tmp1w, [dst, #-1]
-
-.Lexitfunc:
- ret
-
-.Lcpy_over64:
- subs count, count, #128
- b.ge .Lcpy_body_large
- /*
- * Less than 128 bytes to copy, so handle 64 bytes here and then jump
- * to the tail.
- */
- ldp A_l, A_h, [src, #-16]
- stp A_l, A_h, [dst, #-16]
- ldp B_l, B_h, [src, #-32]
- ldp C_l, C_h, [src, #-48]
- stp B_l, B_h, [dst, #-32]
- stp C_l, C_h, [dst, #-48]
- ldp D_l, D_h, [src, #-64]!
- stp D_l, D_h, [dst, #-64]!
-
- tst count, #0x3f
- b.ne .Ltail63
- ret
-
- /*
- * Critical loop. Start at a new cache line boundary. Assuming
- * 64 bytes per line this ensures the entire loop is in one line.
- */
- .p2align L1_CACHE_SHIFT
-.Lcpy_body_large:
- /* pre-load 64 bytes data. */
- ldp A_l, A_h, [src, #-16]
- ldp B_l, B_h, [src, #-32]
- ldp C_l, C_h, [src, #-48]
- ldp D_l, D_h, [src, #-64]!
-1:
- /*
- * interlace the load of next 64 bytes data block with store of the last
- * loaded 64 bytes data.
- */
- stp A_l, A_h, [dst, #-16]
- ldp A_l, A_h, [src, #-16]
- stp B_l, B_h, [dst, #-32]
- ldp B_l, B_h, [src, #-32]
- stp C_l, C_h, [dst, #-48]
- ldp C_l, C_h, [src, #-48]
- stp D_l, D_h, [dst, #-64]!
- ldp D_l, D_h, [src, #-64]!
- subs count, count, #64
- b.ge 1b
- stp A_l, A_h, [dst, #-16]
- stp B_l, B_h, [dst, #-32]
- stp C_l, C_h, [dst, #-48]
- stp D_l, D_h, [dst, #-64]!
-
- tst count, #0x3f
- b.ne .Ltail63
- ret
-SYM_FUNC_END_PI(memmove)
-EXPORT_SYMBOL(memmove)
-SYM_FUNC_END_ALIAS(__memmove)
-EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 351537c12f36..e83643b3995f 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -37,6 +37,26 @@ SYM_FUNC_START(mte_clear_page_tags)
SYM_FUNC_END(mte_clear_page_tags)
/*
+ * Zero the page and tags at the same time
+ *
+ * Parameters:
+ * x0 - address to the beginning of the page
+ */
+SYM_FUNC_START(mte_zero_clear_page_tags)
+ mrs x1, dczid_el0
+ and w1, w1, #0xf
+ mov x2, #4
+ lsl x1, x2, x1
+ and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag
+
+1: dc gzva, x0
+ add x0, x0, x1
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+SYM_FUNC_END(mte_zero_clear_page_tags)
+
+/*
* Copy the tags from the source page to the destination one
* x0 - address of the destination page
* x1 - address of the source page
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 4e79566726c8..d7bee210a798 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -1,84 +1,123 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/strcmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * compare two strings
+/* Assumptions:
*
- * Parameters:
- * x0 - const string 1 pointer
- * x1 - const string 2 pointer
- * Returns:
- * x0 - an integer less than, equal to, or greater than zero
- * if s1 is found, respectively, to be less than, to match,
- * or be greater than s2.
+ * ARMv8-a, AArch64
*/
+#define L(label) .L ## label
+
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-result .req x0
+#define src1 x0
+#define src2 x1
+#define result x0
/* Internal variables. */
-data1 .req x2
-data1w .req w2
-data2 .req x3
-data2w .req w3
-has_nul .req x4
-diff .req x5
-syndrome .req x6
-tmp1 .req x7
-tmp2 .req x8
-tmp3 .req x9
-zeroones .req x10
-pos .req x11
-
+#define data1 x2
+#define data1w w2
+#define data2 x3
+#define data2w w3
+#define has_nul x4
+#define diff x5
+#define syndrome x6
+#define tmp1 x7
+#define tmp2 x8
+#define tmp3 x9
+#define zeroones x10
+#define pos x11
+
+ /* Start of performance-critical section -- one 64B cache line. */
+ .align 6
SYM_FUNC_START_WEAK_PI(strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
- b.ne .Lmisaligned8
+ b.ne L(misaligned8)
ands tmp1, src1, #7
- b.ne .Lmutual_align
-
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
-.Lloop_aligned:
+ b.ne L(mutual_align)
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. */
+L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
- cbz syndrome, .Lloop_aligned
- b .Lcal_cmpresult
+ cbz syndrome, L(loop_aligned)
+ /* End of performance-critical section -- one 64B cache line. */
+
+L(end):
+#ifndef __AARCH64EB__
+ rev syndrome, syndrome
+ rev data1, data1
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ clz pos, syndrome
+ rev data2, data2
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#else
+ /* For big-endian we cannot use the trick with the syndrome value
+ as carry-propagation can corrupt the upper bits if the trailing
+ bytes in the string contain 0x01. */
+ /* However, if there is no NUL byte in the dword, we can generate
+ the result directly. We can't just subtract the bytes as the
+ MSB might be significant. */
+ cbnz has_nul, 1f
+ cmp data1, data2
+ cset result, ne
+ cneg result, result, lo
+ ret
+1:
+ /* Re-compute the NUL-byte detection, using a byte-reversed value. */
+ rev tmp3, data1
+ sub tmp1, tmp3, zeroones
+ orr tmp2, tmp3, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ rev has_nul, has_nul
+ orr syndrome, diff, has_nul
+ clz pos, syndrome
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#endif
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that preceed the start point.
- */
+L(mutual_align):
+ /* Sources are mutually aligned, but are not currently at an
+ alignment boundary. Round down the addresses and then mask off
+ the bytes that preceed the start point. */
bic src1, src1, #7
bic src2, src2, #7
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
@@ -86,138 +125,52 @@ SYM_FUNC_START_WEAK_PI(strcmp)
neg tmp1, tmp1 /* Bits to alignment -64. */
ldr data2, [src2], #8
mov tmp2, #~0
+#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+ lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
-
+ lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+#endif
orr data1, data1, tmp2
orr data2, data2, tmp2
- b .Lstart_realigned
-
-.Lmisaligned8:
- /*
- * Get the align offset length to compare per byte first.
- * After this process, one string's address will be aligned.
- */
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum. */
-.Ltinycmp:
+ b L(start_realigned)
+
+L(misaligned8):
+ /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always
+ checking to make sure that we don't access beyond page boundary in
+ SRC2. */
+ tst src1, #7
+ b.eq L(loop_misaligned)
+L(do_misaligned):
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*find the null or unequal...*/
cmp data1w, #1
- ccmp data1w, data2w, #0, cs
- b.eq .Lstart_align /*the last bytes are equal....*/
-1:
- sub result, data1, data2
- ret
-
-.Lstart_align:
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- /*process more leading bytes to make str1 aligned...*/
- add src1, src1, tmp3
- add src2, src2, tmp3
- /*load 8 bytes from aligned str1 and non-aligned str2..*/
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.ne L(done)
+ tst src1, #7
+ b.ne L(do_misaligned)
+
+L(loop_misaligned):
+ /* Test if we are within the last dword of the end of a 4K page. If
+ yes then jump back to the misaligned loop to copy a byte at a time. */
+ and tmp1, src2, #0xff8
+ eor tmp1, tmp1, #0xff8
+ cbz tmp1, L(do_misaligned)
ldr data1, [src1], #8
ldr data2, [src2], #8
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
- orr syndrome, diff, has_nul
- cbnz syndrome, .Lcal_cmpresult
- /*How far is the current str2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-.Lrecal_offset:
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes from the SRC2 alignment
- * boundary,then compare with the relative bytes from SRC1.
- * If all 8 bytes are equal,then start the second part's comparison.
- * Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
- orr syndrome, diff, has_nul
- cbnz syndrome, .Lcal_cmpresult
-
- /*The second part process*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
- cbz syndrome, .Lloopcmp_proc
+ cbz syndrome, L(loop_misaligned)
+ b L(end)
-.Lcal_cmpresult:
- /*
- * reversed the byte-order as big-endian,then CLZ can find the most
- * significant zero bits.
- */
-CPU_LE( rev syndrome, syndrome )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
-
- /*
- * For big-endian we cannot use the trick with the syndrome value
- * as carry-propagation can corrupt the upper bits if the trailing
- * bytes in the string contain 0x01.
- * However, if there is no NUL byte in the dword, we can generate
- * the result directly. We cannot just subtract the bytes as the
- * MSB might be significant.
- */
-CPU_BE( cbnz has_nul, 1f )
-CPU_BE( cmp data1, data2 )
-CPU_BE( cset result, ne )
-CPU_BE( cneg result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
- /*Re-compute the NUL-byte detection, using a byte-reversed value. */
-CPU_BE( rev tmp3, data1 )
-CPU_BE( sub tmp1, tmp3, zeroones )
-CPU_BE( orr tmp2, tmp3, #REP8_7f )
-CPU_BE( bic has_nul, tmp1, tmp2 )
-CPU_BE( rev has_nul, has_nul )
-CPU_BE( orr syndrome, diff, has_nul )
-
- clz pos, syndrome
- /*
- * The MS-non-zero bit of the syndrome marks either the first bit
- * that is different, or the top bit of the first zero byte.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * But we need to zero-extend (char is unsigned) the value and then
- * perform a signed 32-bit subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
+L(done):
+ sub result, data1, data2
ret
+
SYM_FUNC_END_PI(strcmp)
EXPORT_SYMBOL_NOKASAN(strcmp)
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index ee3ed882dd79..35fbdb7d6e1a 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -1,115 +1,203 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/98e4d6a5c13c8e54/string/aarch64/strlen.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * calculate the length of a string
+/* Assumptions:
*
- * Parameters:
- * x0 - const string pointer
- * Returns:
- * x0 - the return length of specific string
+ * ARMv8-a, AArch64, unaligned accesses, min page size 4k.
*/
+#define L(label) .L ## label
+
/* Arguments and results. */
-srcin .req x0
-len .req x0
+#define srcin x0
+#define len x0
/* Locals and temporaries. */
-src .req x1
-data1 .req x2
-data2 .req x3
-data2a .req x4
-has_nul1 .req x5
-has_nul2 .req x6
-tmp1 .req x7
-tmp2 .req x8
-tmp3 .req x9
-tmp4 .req x10
-zeroones .req x11
-pos .req x12
+#define src x1
+#define data1 x2
+#define data2 x3
+#define has_nul1 x4
+#define has_nul2 x5
+#define tmp1 x4
+#define tmp2 x5
+#define tmp3 x6
+#define tmp4 x7
+#define zeroones x8
+
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. A faster check
+ (X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
+ false hits for characters 129..255. */
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
+#define MIN_PAGE_SIZE 4096
+
+ /* Since strings are short on average, we check the first 16 bytes
+ of the string for a NUL character. In order to do an unaligned ldp
+ safely we have to do a page cross check first. If there is a NUL
+ byte we calculate the length from the 2 8-byte words using
+ conditional select to reduce branch mispredictions (it is unlikely
+ strlen will be repeatedly called on strings with the same length).
+
+ If the string is longer than 16 bytes, we align src so don't need
+ further page cross checks, and process 32 bytes per iteration
+ using the fast NUL check. If we encounter non-ASCII characters,
+ fallback to a second loop using the full NUL check.
+
+ If the page cross check fails, we read 16 bytes from an aligned
+ address, remove any characters before the string, and continue
+ in the main loop using aligned loads. Since strings crossing a
+ page in the first 16 bytes are rare (probability of
+ 16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
+
+ AArch64 systems have a minimum page size of 4k. We don't bother
+ checking for larger page sizes - the cost of setting up the correct
+ page size is just not worth the extra gain from a small reduction in
+ the cases taking the slow path. Note that we only care about
+ whether the first fetch, which may be misaligned, crosses a page
+ boundary. */
+
SYM_FUNC_START_WEAK_PI(strlen)
- mov zeroones, #REP8_01
- bic src, srcin, #15
- ands tmp1, srcin, #15
- b.ne .Lmisaligned
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
- /*
- * The inner loop deals with two Dwords at a time. This has a
- * slightly higher start-up cost, but we should win quite quickly,
- * especially on cores with a high number of issue slots per
- * cycle, as we get much better parallelism out of the operations.
- */
-.Lloop:
- ldp data1, data2, [src], #16
-.Lrealigned:
+ and tmp1, srcin, MIN_PAGE_SIZE - 1
+ mov zeroones, REP8_01
+ cmp tmp1, MIN_PAGE_SIZE - 16
+ b.gt L(page_cross)
+ ldp data1, data2, [srcin]
+#ifdef __AARCH64EB__
+ /* For big-endian, carry propagation (if the final byte in the
+ string is 0x01) means we cannot use has_nul1/2 directly.
+ Since we expect strings to be small and early-exit,
+ byte-swap the data now so has_null1/2 will be correct. */
+ rev data1, data1
+ rev data2, data2
+#endif
sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
+ orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
- orr tmp4, data2, #REP8_7f
- bic has_nul1, tmp1, tmp2
- bics has_nul2, tmp3, tmp4
- ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
- b.eq .Lloop
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(main_loop_entry)
+
+ /* Enter with C = has_nul1 == 0. */
+ csel has_nul1, has_nul1, has_nul2, cc
+ mov len, 8
+ rev has_nul1, has_nul1
+ clz tmp1, has_nul1
+ csel len, xzr, len, cc
+ add len, len, tmp1, lsr 3
+ ret
+ /* The inner loop processes 32 bytes per iteration and uses the fast
+ NUL check. If we encounter non-ASCII characters, use a second
+ loop with the accurate NUL check. */
+ .p2align 4
+L(main_loop_entry):
+ bic src, srcin, 15
+ sub src, src, 16
+L(main_loop):
+ ldp data1, data2, [src, 32]!
+L(page_cross_entry):
+ sub tmp1, data1, zeroones
+ sub tmp3, data2, zeroones
+ orr tmp2, tmp1, tmp3
+ tst tmp2, zeroones, lsl 7
+ bne 1f
+ ldp data1, data2, [src, 16]
+ sub tmp1, data1, zeroones
+ sub tmp3, data2, zeroones
+ orr tmp2, tmp1, tmp3
+ tst tmp2, zeroones, lsl 7
+ beq L(main_loop)
+ add src, src, 16
+1:
+ /* The fast check failed, so do the slower, accurate NUL check. */
+ orr tmp2, data1, REP8_7f
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(nonascii_loop)
+
+ /* Enter with C = has_nul1 == 0. */
+L(tail):
+#ifdef __AARCH64EB__
+ /* For big-endian, carry propagation (if the final byte in the
+ string is 0x01) means we cannot use has_nul1/2 directly. The
+ easiest way to get the correct byte is to byte-swap the data
+ and calculate the syndrome a second time. */
+ csel data1, data1, data2, cc
+ rev data1, data1
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ bic has_nul1, tmp1, tmp2
+#else
+ csel has_nul1, has_nul1, has_nul2, cc
+#endif
sub len, src, srcin
- cbz has_nul1, .Lnul_in_data2
-CPU_BE( mov data2, data1 ) /*prepare data to re-calculate the syndrome*/
- sub len, len, #8
- mov has_nul2, has_nul1
-.Lnul_in_data2:
- /*
- * For big-endian, carry propagation (if the final byte in the
- * string is 0x01) means we cannot use has_nul directly. The
- * easiest way to get the correct byte is to byte-swap the data
- * and calculate the syndrome a second time.
- */
-CPU_BE( rev data2, data2 )
-CPU_BE( sub tmp1, data2, zeroones )
-CPU_BE( orr tmp2, data2, #REP8_7f )
-CPU_BE( bic has_nul2, tmp1, tmp2 )
-
- sub len, len, #8
- rev has_nul2, has_nul2
- clz pos, has_nul2
- add len, len, pos, lsr #3 /* Bits to bytes. */
+ rev has_nul1, has_nul1
+ add tmp2, len, 8
+ clz tmp1, has_nul1
+ csel len, len, tmp2, cc
+ add len, len, tmp1, lsr 3
ret
-.Lmisaligned:
- cmp tmp1, #8
- neg tmp1, tmp1
- ldp data1, data2, [src], #16
- lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
- mov tmp2, #~0
- /* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+L(nonascii_loop):
+ ldp data1, data2, [src, 16]!
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ bne L(tail)
+ ldp data1, data2, [src, 16]!
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(nonascii_loop)
+ b L(tail)
+
+ /* Load 16 bytes from [srcin & ~15] and force the bytes that precede
+ srcin to 0x7f, so we ignore any NUL bytes before the string.
+ Then continue in the aligned loop. */
+L(page_cross):
+ bic src, srcin, 15
+ ldp data1, data2, [src]
+ lsl tmp1, srcin, 3
+ mov tmp4, -1
+#ifdef __AARCH64EB__
+ /* Big-endian. Early bytes are at MSB. */
+ lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+ lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
+#endif
+ orr tmp1, tmp1, REP8_80
+ orn data1, data1, tmp1
+ orn tmp2, data2, tmp1
+ tst srcin, 8
+ csel data1, data1, tmp4, eq
+ csel data2, data2, tmp2, eq
+ b L(page_cross_entry)
- orr data1, data1, tmp2
- orr data2a, data2, tmp2
- csinv data1, data1, xzr, le
- csel data2, data2, data2a, le
- b .Lrealigned
SYM_FUNC_END_PI(strlen)
EXPORT_SYMBOL_NOKASAN(strlen)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index 2a7ee949ed47..48d44f7fddb1 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -1,299 +1,261 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/strncmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * compare two strings
+/* Assumptions:
*
- * Parameters:
- * x0 - const string 1 pointer
- * x1 - const string 2 pointer
- * x2 - the maximal length to be compared
- * Returns:
- * x0 - an integer less than, equal to, or greater than zero if s1 is found,
- * respectively, to be less than, to match, or be greater than s2.
+ * ARMv8-a, AArch64
*/
+#define L(label) .L ## label
+
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-limit .req x2
-result .req x0
+#define src1 x0
+#define src2 x1
+#define limit x2
+#define result x0
/* Internal variables. */
-data1 .req x3
-data1w .req w3
-data2 .req x4
-data2w .req w4
-has_nul .req x5
-diff .req x6
-syndrome .req x7
-tmp1 .req x8
-tmp2 .req x9
-tmp3 .req x10
-zeroones .req x11
-pos .req x12
-limit_wd .req x13
-mask .req x14
-endloop .req x15
+#define data1 x3
+#define data1w w3
+#define data2 x4
+#define data2w w4
+#define has_nul x5
+#define diff x6
+#define syndrome x7
+#define tmp1 x8
+#define tmp2 x9
+#define tmp3 x10
+#define zeroones x11
+#define pos x12
+#define limit_wd x13
+#define mask x14
+#define endloop x15
+#define count mask
SYM_FUNC_START_WEAK_PI(strncmp)
- cbz limit, .Lret0
+ cbz limit, L(ret0)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
- b.ne .Lmisaligned8
- ands tmp1, src1, #7
- b.ne .Lmutual_align
+ and count, src1, #7
+ b.ne L(misaligned8)
+ cbnz count, L(mutual_align)
/* Calculate the number of full and partial words -1. */
- /*
- * when limit is mulitply of 8, if not sub 1,
- * the judgement of last dword will wrong.
- */
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
- lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
-.Lloop_aligned:
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. */
+ .p2align 4
+L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
subs limit_wd, limit_wd, #1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, pl /* Last Dword or differences.*/
- bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ csinv endloop, diff, xzr, pl /* Last Dword or differences. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp endloop, #0, #0, eq
- b.eq .Lloop_aligned
+ b.eq L(loop_aligned)
+ /* End of main loop */
- /*Not reached the limit, must have found the end or a diff. */
- tbz limit_wd, #63, .Lnot_limit
+ /* Not reached the limit, must have found the end or a diff. */
+ tbz limit_wd, #63, L(not_limit)
/* Limit % 8 == 0 => all bytes significant. */
ands limit, limit, #7
- b.eq .Lnot_limit
+ b.eq L(not_limit)
- lsl limit, limit, #3 /* Bits -> bytes. */
+ lsl limit, limit, #3 /* Bits -> bytes. */
mov mask, #~0
-CPU_BE( lsr mask, mask, limit )
-CPU_LE( lsl mask, mask, limit )
+#ifdef __AARCH64EB__
+ lsr mask, mask, limit
+#else
+ lsl mask, mask, limit
+#endif
bic data1, data1, mask
bic data2, data2, mask
/* Make sure that the NUL byte is marked in the syndrome. */
orr has_nul, has_nul, mask
-.Lnot_limit:
+L(not_limit):
orr syndrome, diff, has_nul
- b .Lcal_cmpresult
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that precede the start point.
- * We also need to adjust the limit calculations, but without
- * overflowing if the limit is near ULONG_MAX.
- */
+#ifndef __AARCH64EB__
+ rev syndrome, syndrome
+ rev data1, data1
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ clz pos, syndrome
+ rev data2, data2
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#else
+ /* For big-endian we cannot use the trick with the syndrome value
+ as carry-propagation can corrupt the upper bits if the trailing
+ bytes in the string contain 0x01. */
+ /* However, if there is no NUL byte in the dword, we can generate
+ the result directly. We can't just subtract the bytes as the
+ MSB might be significant. */
+ cbnz has_nul, 1f
+ cmp data1, data2
+ cset result, ne
+ cneg result, result, lo
+ ret
+1:
+ /* Re-compute the NUL-byte detection, using a byte-reversed value. */
+ rev tmp3, data1
+ sub tmp1, tmp3, zeroones
+ orr tmp2, tmp3, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ rev has_nul, has_nul
+ orr syndrome, diff, has_nul
+ clz pos, syndrome
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#endif
+
+L(mutual_align):
+ /* Sources are mutually aligned, but are not currently at an
+ alignment boundary. Round down the addresses and then mask off
+ the bytes that precede the start point.
+ We also need to adjust the limit calculations, but without
+ overflowing if the limit is near ULONG_MAX. */
bic src1, src1, #7
bic src2, src2, #7
ldr data1, [src1], #8
- neg tmp3, tmp1, lsl #3 /* 64 - bits(bytes beyond align). */
+ neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */
ldr data2, [src2], #8
mov tmp2, #~0
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */
+ lsl tmp2, tmp2, tmp3 /* Shift (count & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */
-
+ lsr tmp2, tmp2, tmp3 /* Shift (count & 63). */
+#endif
and tmp3, limit_wd, #7
lsr limit_wd, limit_wd, #3
- /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.*/
- add limit, limit, tmp1
- add tmp3, tmp3, tmp1
+ /* Adjust the limit. Only low 3 bits used, so overflow irrelevant. */
+ add limit, limit, count
+ add tmp3, tmp3, count
orr data1, data1, tmp2
orr data2, data2, tmp2
add limit_wd, limit_wd, tmp3, lsr #3
- b .Lstart_realigned
+ b L(start_realigned)
+
+ .p2align 4
+ /* Don't bother with dwords for up to 16 bytes. */
+L(misaligned8):
+ cmp limit, #16
+ b.hs L(try_misaligned_words)
-/*when src1 offset is not equal to src2 offset...*/
-.Lmisaligned8:
- cmp limit, #8
- b.lo .Ltiny8proc /*limit < 8... */
- /*
- * Get the align offset length to compare per byte first.
- * After this process, one string's address will be aligned.*/
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum. */
- /*
- * Here, limit is not less than 8, so directly run .Ltinycmp
- * without checking the limit.*/
- sub limit, limit, pos
-.Ltinycmp:
+L(byte_loop):
+ /* Perhaps we can do better than this. */
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*find the null or unequal...*/
- cmp data1w, #1
- ccmp data1w, data2w, #0, cs
- b.eq .Lstart_align /*the last bytes are equal....*/
-1:
+ subs limit, limit, #1
+ ccmp data1w, #1, #0, hi /* NZCV = 0b0000. */
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.eq L(byte_loop)
+L(done):
sub result, data1, data2
ret
-
-.Lstart_align:
+ /* Align the SRC1 to a dword by doing a bytewise compare and then do
+ the dword loop. */
+L(try_misaligned_words):
lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
- /*process more leading bytes to make str1 aligned...*/
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- add src1, src1, tmp3 /*tmp3 is positive in this branch.*/
- add src2, src2, tmp3
- ldr data1, [src1], #8
- ldr data2, [src2], #8
+ cbz count, L(do_misaligned)
- sub limit, limit, tmp3
+ neg count, count
+ and count, count, #7
+ sub limit, limit, count
lsr limit_wd, limit, #3
- subs limit_wd, limit_wd, #1
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- bics has_nul, tmp1, tmp2
- ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
- b.ne .Lunequal_proc
- /*How far is the current str2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-.Lrecal_offset:
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes from the SRC2 alignment
- * boundary,then compare with the relative bytes from SRC1.
- * If all 8 bytes are equal,then start the second part's comparison.
- * Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, eq
- cbnz endloop, .Lunequal_proc
+L(page_end_loop):
+ ldrb data1w, [src1], #1
+ ldrb data2w, [src2], #1
+ cmp data1w, #1
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.ne L(done)
+ subs count, count, #1
+ b.hi L(page_end_loop)
+
+L(do_misaligned):
+ /* Prepare ourselves for the next page crossing. Unlike the aligned
+ loop, we fetch 1 less dword because we risk crossing bounds on
+ SRC2. */
+ mov count, #8
+ subs limit_wd, limit_wd, #1
+ b.lo L(done_loop)
+L(loop_misaligned):
+ and tmp2, src2, #0xff8
+ eor tmp2, tmp2, #0xff8
+ cbz tmp2, L(page_end_loop)
- /*The second part process*/
ldr data1, [src1], #8
ldr data2, [src2], #8
- subs limit_wd, limit_wd, #1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- bics has_nul, tmp1, tmp2
- ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
- b.eq .Lloopcmp_proc
-
-.Lunequal_proc:
- orr syndrome, diff, has_nul
- cbz syndrome, .Lremain8
-.Lcal_cmpresult:
- /*
- * reversed the byte-order as big-endian,then CLZ can find the most
- * significant zero bits.
- */
-CPU_LE( rev syndrome, syndrome )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
- /*
- * For big-endian we cannot use the trick with the syndrome value
- * as carry-propagation can corrupt the upper bits if the trailing
- * bytes in the string contain 0x01.
- * However, if there is no NUL byte in the dword, we can generate
- * the result directly. We can't just subtract the bytes as the
- * MSB might be significant.
- */
-CPU_BE( cbnz has_nul, 1f )
-CPU_BE( cmp data1, data2 )
-CPU_BE( cset result, ne )
-CPU_BE( cneg result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
- /* Re-compute the NUL-byte detection, using a byte-reversed value.*/
-CPU_BE( rev tmp3, data1 )
-CPU_BE( sub tmp1, tmp3, zeroones )
-CPU_BE( orr tmp2, tmp3, #REP8_7f )
-CPU_BE( bic has_nul, tmp1, tmp2 )
-CPU_BE( rev has_nul, has_nul )
-CPU_BE( orr syndrome, diff, has_nul )
- /*
- * The MS-non-zero bit of the syndrome marks either the first bit
- * that is different, or the top bit of the first zero byte.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- clz pos, syndrome
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * But we need to zero-extend (char is unsigned) the value and then
- * perform a signed 32-bit subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
- ret
-
-.Lremain8:
- /* Limit % 8 == 0 => all bytes significant. */
- ands limit, limit, #7
- b.eq .Lret0
-.Ltiny8proc:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs limit, limit, #1
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ ccmp diff, #0, #0, eq
+ b.ne L(not_limit)
+ subs limit_wd, limit_wd, #1
+ b.pl L(loop_misaligned)
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltiny8proc
- sub result, data1, data2
- ret
+L(done_loop):
+ /* We found a difference or a NULL before the limit was reached. */
+ and limit, limit, #7
+ cbz limit, L(not_limit)
+ /* Read the last word. */
+ sub src1, src1, 8
+ sub src2, src2, 8
+ ldr data1, [src1, limit]
+ ldr data2, [src2, limit]
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ ccmp diff, #0, #0, eq
+ b.ne L(not_limit)
-.Lret0:
+L(ret0):
mov result, #0
ret
+
SYM_FUNC_END_PI(strncmp)
EXPORT_SYMBOL_NOKASAN(strncmp)
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
index c83bb5a4aad2..baee22961bdb 100644
--- a/arch/arm64/lib/uaccess_flushcache.c
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
* barrier to order the cache maintenance against the memcpy.
*/
memcpy(dst, src, cnt);
- __clean_dcache_area_pop(dst, cnt);
+ dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt);
}
EXPORT_SYMBOL_GPL(memcpy_flushcache);
@@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
rc = raw_copy_from_user(to, from, n);
/* See above */
- __clean_dcache_area_pop(to, n - rc);
+ dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc);
return rc;
}
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2d881f34dd9d..5051b3c1a4f1 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@
#include <asm/asm-uaccess.h>
/*
- * flush_icache_range(start,end)
+ * caches_clean_inval_pou_macro(start,end) [fixup]
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -23,12 +23,27 @@
*
* - start - virtual start address of region
* - end - virtual end address of region
+ * - fixup - optional label to branch to on user fault
*/
-SYM_FUNC_START(__flush_icache_range)
- /* FALLTHROUGH */
+.macro caches_clean_inval_pou_macro, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+ dsb ishst
+ b .Ldc_skip_\@
+alternative_else_nop_endif
+ mov x2, x0
+ mov x3, x1
+ dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+ isb
+ b .Lic_skip_\@
+alternative_else_nop_endif
+ invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
/*
- * __flush_cache_user_range(start,end)
+ * caches_clean_inval_pou(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -37,117 +52,103 @@ SYM_FUNC_START(__flush_icache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-SYM_FUNC_START(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_pou)
+ caches_clean_inval_pou_macro
+ ret
+SYM_FUNC_END(caches_clean_inval_pou)
+
+/*
+ * caches_clean_inval_user_pou(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified region.
+ * This is typically used when code has been written to a memory region,
+ * and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+SYM_FUNC_START(caches_clean_inval_user_pou)
uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
- dsb ishst
- b 7f
-alternative_else_nop_endif
- dcache_line_size x2, x3
- sub x3, x2, #1
- bic x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
- add x4, x4, x2
- cmp x4, x1
- b.lo 1b
- dsb ish
-7:
-alternative_if ARM64_HAS_CACHE_DIC
- isb
- b 8f
-alternative_else_nop_endif
- invalidate_icache_by_line x0, x1, x2, x3, 9f
-8: mov x0, #0
+ caches_clean_inval_pou_macro 2f
+ mov x0, xzr
1:
uaccess_ttbr0_disable x1, x2
ret
-9:
+2:
mov x0, #-EFAULT
b 1b
-SYM_FUNC_END(__flush_icache_range)
-SYM_FUNC_END(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
/*
- * invalidate_icache_range(start,end)
+ * icache_inval_pou(start,end)
*
* Ensure that the I cache is invalid within specified region.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
-SYM_FUNC_START(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
alternative_if ARM64_HAS_CACHE_DIC
- mov x0, xzr
isb
ret
alternative_else_nop_endif
- uaccess_ttbr0_enable x2, x3, x4
-
- invalidate_icache_by_line x0, x1, x2, x3, 2f
- mov x0, xzr
-1:
- uaccess_ttbr0_disable x1, x2
+ invalidate_icache_by_line x0, x1, x2, x3
ret
-2:
- mov x0, #-EFAULT
- b 1b
-SYM_FUNC_END(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
/*
- * __flush_dcache_area(kaddr, size)
+ * dcache_clean_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned and invalidated to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
/*
- * __clean_dcache_area_pou(kaddr, size)
+ * dcache_clean_pou(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoU.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
-SYM_FUNC_END(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
/*
- * __inval_dcache_area(kaddr, size)
+ * dcache_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are invalidated. Any partial lines at the ends of the interval are
* also cleaned to PoC to prevent data loss.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - kernel start address of region
+ * - end - kernel end address of region
*/
SYM_FUNC_START_LOCAL(__dma_inv_area)
-SYM_FUNC_START_PI(__inval_dcache_area)
+SYM_FUNC_START_PI(dcache_inval_poc)
/* FALLTHROUGH */
/*
- * __dma_inv_area(start, size)
+ * __dma_inv_area(start, end)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
- add x1, x1, x0
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -165,48 +166,48 @@ SYM_FUNC_START_PI(__inval_dcache_area)
b.lo 2b
dsb sy
ret
-SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END_PI(dcache_inval_poc)
SYM_FUNC_END(__dma_inv_area)
/*
- * __clean_dcache_area_poc(kaddr, size)
+ * dcache_clean_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
SYM_FUNC_START_LOCAL(__dma_clean_area)
-SYM_FUNC_START_PI(__clean_dcache_area_poc)
+SYM_FUNC_START_PI(dcache_clean_poc)
/* FALLTHROUGH */
/*
- * __dma_clean_area(start, size)
+ * __dma_clean_area(start, end)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END_PI(dcache_clean_poc)
SYM_FUNC_END(__dma_clean_area)
/*
- * __clean_dcache_area_pop(kaddr, size)
+ * dcache_clean_pop(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoP.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START_PI(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(dcache_clean_pop)
alternative_if_not ARM64_HAS_DCPOP
- b __clean_dcache_area_poc
+ b dcache_clean_poc
alternative_else_nop_endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(dcache_clean_pop)
/*
* __dma_flush_area(start, size)
@@ -217,6 +218,7 @@ SYM_FUNC_END_PI(__clean_dcache_area_pop)
* - size - size in question
*/
SYM_FUNC_START_PI(__dma_flush_area)
+ add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__dma_flush_area)
@@ -228,6 +230,7 @@ SYM_FUNC_END_PI(__dma_flush_area)
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_map_area)
+ add x1, x0, x1
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_area
b __dma_clean_area
@@ -240,6 +243,7 @@ SYM_FUNC_END_PI(__dma_map_area)
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_unmap_area)
+ add x1, x0, x1
cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_area
ret
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 001737a8f309..cd72576ae2b7 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -402,14 +402,12 @@ static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
- asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
- GFP_KERNEL);
+ asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
- sizeof(*pinned_asid_map), GFP_KERNEL);
+ pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
nr_pinned_asids = 0;
/*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 871c82ab0a30..349c488765ca 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -99,6 +99,8 @@ static void mem_abort_decode(unsigned int esr)
pr_alert(" EA = %lu, S1PTW = %lu\n",
(esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
(esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
+ pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC),
+ esr_to_fault_info(esr)->name);
if (esr_is_data_abort(esr))
data_abort_decode(esr);
@@ -232,13 +234,17 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
+static bool is_el1_data_abort(unsigned int esr)
+{
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
+}
+
static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
- if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+ if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
if (fsc_type == ESR_ELx_FSC_PERM)
@@ -258,7 +264,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
unsigned long flags;
u64 par, dfsc;
- if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
+ if (!is_el1_data_abort(esr) ||
(esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
return false;
@@ -346,10 +352,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
{
- unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc = esr & ESR_ELx_FSC;
- if (ec != ESR_ELx_EC_DABT_CUR)
+ if (!is_el1_data_abort(esr))
return false;
if (fsc == ESR_ELx_FSC_MTE)
@@ -504,7 +509,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
*/
if (!(vma->vm_flags & vm_flags))
return VM_FAULT_BADACCESS;
- return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
+ return handle_mm_fault(vma, addr, mm_flags, regs);
}
static bool is_el0_instruction_abort(unsigned int esr)
@@ -836,13 +841,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_mem_abort);
-void do_el0_irq_bp_hardening(void)
-{
- /* PC has already been checked in entry.S */
- arm64_apply_bp_hardening();
-}
-NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
-
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
@@ -921,3 +919,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);
+
+/*
+ * Used during anonymous page fault handling.
+ */
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
+
+ /*
+ * If the page is mapped with PROT_MTE, initialise the tags at the
+ * point of allocation and page zeroing as this is usually faster than
+ * separate DC ZVA and STGM.
+ */
+ if (vma->vm_flags & VM_MTE)
+ flags |= __GFP_ZEROTAGS;
+
+ return alloc_page_vma(flags, vma, vaddr);
+}
+
+void tag_clear_highpage(struct page *page)
+{
+ mte_zero_clear_page_tags(page_address(page));
+ page_kasan_tag_reset(page);
+ set_bit(PG_mte_tagged, &page->flags);
+}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 6d44c028d1c9..2aaf950b906c 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -14,28 +14,25 @@
#include <asm/cache.h>
#include <asm/tlbflush.h>
-void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(unsigned long start, unsigned long end)
{
- unsigned long addr = (unsigned long)kaddr;
-
if (icache_is_aliasing()) {
- __clean_dcache_area_pou(kaddr, len);
- __flush_icache_all();
+ dcache_clean_pou(start, end);
+ icache_inval_all_pou();
} else {
/*
* Don't issue kick_all_cpus_sync() after I-cache invalidation
* for user mappings.
*/
- __flush_icache_range(addr, addr + len);
+ caches_clean_inval_pou(start, end);
}
}
-static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len)
+static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
{
if (vma->vm_flags & VM_EXEC)
- sync_icache_aliases(kaddr, len);
+ sync_icache_aliases(start, end);
}
/*
@@ -48,7 +45,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
memcpy(dst, src, len);
- flush_ptrace_access(vma, page, uaddr, dst, len);
+ flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
}
void __sync_icache_dcache(pte_t pte)
@@ -56,7 +53,9 @@ void __sync_icache_dcache(pte_t pte)
struct page *page = pte_page(pte);
if (!test_bit(PG_dcache_clean, &page->flags)) {
- sync_icache_aliases(page_address(page), page_size(page));
+ sync_icache_aliases((unsigned long)page_address(page),
+ (unsigned long)page_address(page) +
+ page_size(page));
set_bit(PG_dcache_clean, &page->flags);
}
}
@@ -77,20 +76,20 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
{
/* Ensure order against any prior non-cacheable writes */
dmb(osh);
- __clean_dcache_area_pop(addr, size);
+ dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
- __inval_dcache_area(addr, size);
+ dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index e55409caaee3..6e1ca044ca90 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -499,6 +499,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
+ /*
+ * Selected page table levels should match when derived from
+ * scratch using the virtual address range and page size.
+ */
+ BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
+ CONFIG_PGTABLE_LEVELS);
+
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6dd9369e3ea0..0b28cc218091 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -228,7 +228,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+ if (((addr | next | phys) & ~PMD_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
pmd_set_huge(pmdp, phys, prot);
@@ -515,7 +515,8 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
+ if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
+ IS_ENABLED(CONFIG_KFENCE))
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
@@ -1113,14 +1114,14 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif
-#if !ARM64_SWAPPER_USES_SECTION_MAPS
+#if !ARM64_KERNEL_USES_PMD_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
return vmemmap_populate_basepages(start, end, node, altmap);
}
-#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#else /* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
@@ -1165,17 +1166,18 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0;
}
-#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
-#ifdef CONFIG_MEMORY_HOTPLUG
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
unmap_hotplug_range(start, end, true, altmap);
free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
-#endif
}
+#endif /* CONFIG_MEMORY_HOTPLUG */
static inline pud_t *fixmap_pud(unsigned long addr)
{
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 97d7bcd8d4f2..35936c5ae1ce 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,9 +46,13 @@
#endif
#ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
+#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
-#define TCR_KASAN_HW_FLAGS 0
+/*
+ * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
+ * TBI being enabled at EL1.
+ */
+#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
#endif
/*
@@ -58,10 +62,8 @@
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
- MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
- MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
#ifdef CONFIG_CPU_PM
@@ -83,11 +85,7 @@ SYM_FUNC_START(cpu_do_suspend)
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x12, tpidr_el1
-alternative_else
- mrs x12, tpidr_el2
-alternative_endif
+ get_this_cpu_offset x12
mrs x13, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
@@ -145,11 +143,7 @@ SYM_FUNC_START(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- msr tpidr_el1, x13
-alternative_else
- msr tpidr_el2, x13
-alternative_endif
+ set_this_cpu_offset x13
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
@@ -464,7 +458,7 @@ SYM_FUNC_START(__cpu_setup)
msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
- mov_q x10, TCR_KASAN_HW_FLAGS
+ mov_q x10, TCR_MTE_FLAGS
orr tcr, tcr, x10
1:
#endif
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index a1937dfff31c..1c403536c9bb 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -159,10 +159,6 @@ static const struct prot_bits pte_bits[] = {
.set = "DEVICE/nGnRE",
}, {
.mask = PTE_ATTRINDX_MASK,
- .val = PTE_ATTRINDX(MT_DEVICE_GRE),
- .set = "DEVICE/GRE",
- }, {
- .mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL_NC),
.set = "MEM/NORMAL-NC",
}, {
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index f7b194878a99..dccf98a37283 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -16,6 +16,7 @@
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/insn.h>
#include <asm/set_memory.h>
#include "bpf_jit.h"
@@ -178,9 +179,6 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}
-/* Stack must be multiples of 16B */
-#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
-
/* Tail call offset to jump into */
#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
#define PROLOGUE_OFFSET 8
@@ -255,7 +253,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_BTI_J, ctx);
}
- ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth);
+ /* Stack must be multiples of 16B */
+ ctx->stack_size = round_up(prog->aux->stack_depth, 16);
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
@@ -487,17 +486,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
+ break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
- switch (BPF_OP(code)) {
- case BPF_DIV:
- emit(A64_UDIV(is64, dst, dst, src), ctx);
- break;
- case BPF_MOD:
- emit(A64_UDIV(is64, tmp, dst, src), ctx);
- emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
- break;
- }
+ emit(A64_UDIV(is64, tmp, dst, src), ctx);
+ emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 21fbdda7086e..49305c2e6dfd 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -3,7 +3,8 @@
# Internal CPU capabilities constants, keep this list sorted
BTI
-HAS_32BIT_EL0
+# Unreliable: use system_supports_32bit_el0() instead.
+HAS_32BIT_EL0_DO_NOT_USE
HAS_32BIT_EL1
HAS_ADDRESS_AUTH
HAS_ADDRESS_AUTH_ARCH
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index dabc8e46ce7b..d1bef11f8dc9 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -31,7 +31,7 @@ extern void __bad_xchg(void);
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
#define __cmpxchg_relaxed(ptr, old, new, size) \
@@ -61,14 +61,14 @@ extern void __bad_xchg(void);
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__smp_release_fence(); \
- __ret = cmpxchg_relaxed(ptr, o, n); \
+ __ret = arch_cmpxchg_relaxed(ptr, o, n); \
__smp_acquire_fence(); \
__ret; \
})
diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c
index 17479860d43d..1cbcba4b0dd1 100644
--- a/arch/csky/kernel/asm-offsets.c
+++ b/arch/csky/kernel/asm-offsets.c
@@ -9,7 +9,6 @@
int main(void)
{
/* offsets into the task struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
index 589f090f48b9..68b22b499aeb 100644
--- a/arch/csky/kernel/probes/kprobes.c
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -295,23 +295,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index 0f9f5eef9338..e2993539af8e 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -281,7 +281,6 @@ void csky_start_secondary(void)
pr_info("CPU%u Online: %s...\n", cpu, __func__);
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
index 16ae20a0af34..1b280ef08004 100644
--- a/arch/csky/kernel/stacktrace.c
+++ b/arch/csky/kernel/stacktrace.c
@@ -115,7 +115,7 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && task->state != TASK_RUNNING))
+ if (likely(task && task != current && !task_is_running(task)))
walk_stackframe(task, NULL, save_wchan, &pc);
return pc;
}
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 60ee7f0d60a8..e23139c8fc0d 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
+generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
deleted file mode 100644
index a990d151f163..000000000000
--- a/arch/h8300/include/asm/atomic.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_H8300_ATOMIC__
-#define __ARCH_H8300_ATOMIC__
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/cmpxchg.h>
-#include <asm/irqflags.h>
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- int ret; \
- \
- flags = arch_local_irq_save(); \
- ret = v->counter c_op i; \
- arch_local_irq_restore(flags); \
- return ret; \
-}
-
-#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- int ret; \
- \
- flags = arch_local_irq_save(); \
- ret = v->counter; \
- v->counter c_op i; \
- arch_local_irq_restore(flags); \
- return ret; \
-}
-
-#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- \
- flags = arch_local_irq_save(); \
- v->counter c_op i; \
- arch_local_irq_restore(flags); \
-}
-
-ATOMIC_OP_RETURN(add, +=)
-ATOMIC_OP_RETURN(sub, -=)
-
-#define ATOMIC_OPS(op, c_op) \
- ATOMIC_OP(op, c_op) \
- ATOMIC_FETCH_OP(op, c_op)
-
-ATOMIC_OPS(and, &=)
-ATOMIC_OPS(or, |=)
-ATOMIC_OPS(xor, ^=)
-ATOMIC_OPS(add, +=)
-ATOMIC_OPS(sub, -=)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- h8300flags flags;
-
- flags = arch_local_irq_save();
- ret = v->counter;
- if (likely(ret == old))
- v->counter = new;
- arch_local_irq_restore(flags);
- return ret;
-}
-
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int ret;
- h8300flags flags;
-
- flags = arch_local_irq_save();
- ret = v->counter;
- if (ret != u)
- v->counter += a;
- arch_local_irq_restore(flags);
- return ret;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-
-#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h
deleted file mode 100644
index c64bb38ce242..000000000000
--- a/arch/h8300/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_H8300_CMPXCHG__
-#define __ARCH_H8300_CMPXCHG__
-
-#include <linux/irqflags.h>
-
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
- sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-static inline unsigned long __xchg(unsigned long x,
- volatile void *ptr, int size)
-{
- unsigned long tmp, flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("mov.b %2,%0\n\t"
- "mov.b %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- case 2:
- __asm__ __volatile__
- ("mov.w %2,%0\n\t"
- "mov.w %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- case 4:
- __asm__ __volatile__
- ("mov.l %2,%0\n\t"
- "mov.l %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- default:
- tmp = 0;
- }
- local_irq_restore(flags);
- return tmp;
-}
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#endif /* __ARCH_H8300_CMPXCHG__ */
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
index d4b53af657c8..65571ee15132 100644
--- a/arch/h8300/kernel/asm-offsets.c
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -21,7 +21,6 @@
int main(void)
{
/* offsets into the task struct */
- OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_PTRACE, task_struct, ptrace);
OFFSET(TASK_BLOCKED, task_struct, blocked);
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 46b1342ce515..2ac27e4248a4 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -134,7 +134,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)p;
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 0281f92eea3d..c3590b2e9592 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -69,8 +69,6 @@ void __init h8300_fdt_init(void *fdt, char *bootargs)
static void __init bootmem_init(void)
{
- struct memblock_region *region;
-
memory_end = memory_start = 0;
/* Find main memory where is the kernel */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 4ab895d7111f..6e94f8d04146 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -14,7 +14,7 @@
/* Normal writes in our arch don't clear lock reservations */
-static inline void atomic_set(atomic_t *v, int new)
+static inline void arch_atomic_set(atomic_t *v, int new)
{
asm volatile(
"1: r6 = memw_locked(%0);\n"
@@ -26,26 +26,26 @@ static inline void atomic_set(atomic_t *v, int new)
);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
/**
- * atomic_read - reads a word, atomically
+ * arch_atomic_read - reads a word, atomically
* @v: pointer to atomic value
*
* Assumes all word reads on our architecture are atomic.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
- * atomic_xchg - atomic
+ * arch_atomic_xchg - atomic
* @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg)
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/**
- * atomic_cmpxchg - atomic compare-and-exchange values
+ * arch_atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change
* @old: desired old value to match
* @new: new value to put in
@@ -61,7 +61,7 @@ static inline void atomic_set(atomic_t *v, int new)
*
* "old" is "expected" old val, __oldval is actual old value
*/
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int __oldval;
@@ -81,7 +81,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
}
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int output; \
\
@@ -97,7 +97,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
\
@@ -114,7 +114,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int output, val; \
\
@@ -148,7 +148,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
@@ -157,7 +157,7 @@ ATOMIC_OPS(xor)
*
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;
register int tmp;
@@ -180,6 +180,6 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
);
return __oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#endif
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index 92b8a02e588a..cdb705e1496a 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU.
*/
-#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
+#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr))))
/*
@@ -51,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* variable casting.
*/
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index c61165c99ae0..6a6835fb4242 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -135,7 +135,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)task_stack_page(p);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 279252e3e0f7..da22a35e6f03 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -302,7 +302,7 @@ config NODES_SHIFT
int "Max num nodes shift(3-10)"
range 3 10
default "10"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
This option specifies the maximum number of nodes in your SSI system.
MAX_NUMNODES will be 2^(This value).
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index f267d956458f..266c429b9137 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,11 +21,11 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op) \
static __inline__ int \
@@ -36,7 +36,7 @@ ia64_atomic_##op (int i, atomic_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic_read(v); \
+ old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return new; \
@@ -51,7 +51,7 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic_read(v); \
+ old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return old; \
@@ -74,7 +74,7 @@ ATOMIC_OPS(sub, -)
#define __ia64_atomic_const(i) 0
#endif
-#define atomic_add_return(i,v) \
+#define arch_atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -82,7 +82,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_add(__ia64_aar_i, v); \
})
-#define atomic_sub_return(i,v) \
+#define arch_atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -90,7 +90,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \
})
-#define atomic_fetch_add(i,v) \
+#define arch_atomic_fetch_add(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -98,7 +98,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
})
-#define atomic_fetch_sub(i,v) \
+#define arch_atomic_fetch_sub(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -110,13 +110,13 @@ ATOMIC_FETCH_OP(and, &)
ATOMIC_FETCH_OP(or, |)
ATOMIC_FETCH_OP(xor, ^)
-#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
-#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
-#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
+#define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
+#define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
+#define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
-#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
-#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
-#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
+#define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
+#define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
+#define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
@@ -131,7 +131,7 @@ ia64_atomic64_##op (s64 i, atomic64_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic64_read(v); \
+ old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return new; \
@@ -146,7 +146,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic64_read(v); \
+ old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return old; \
@@ -159,7 +159,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, +)
ATOMIC64_OPS(sub, -)
-#define atomic64_add_return(i,v) \
+#define arch_atomic64_add_return(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -167,7 +167,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_add(__ia64_aar_i, v); \
})
-#define atomic64_sub_return(i,v) \
+#define arch_atomic64_sub_return(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -175,7 +175,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
-#define atomic64_fetch_add(i,v) \
+#define arch_atomic64_fetch_add(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -183,7 +183,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
})
-#define atomic64_fetch_sub(i,v) \
+#define arch_atomic64_fetch_sub(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -195,29 +195,29 @@ ATOMIC64_FETCH_OP(and, &)
ATOMIC64_FETCH_OP(or, |)
ATOMIC64_FETCH_OP(xor, ^)
-#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
-#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
-#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
+#define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
+#define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
+#define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
-#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
-#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
-#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
+#define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
+#define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
+#define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-#define atomic64_cmpxchg(v, old, new) \
- (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-#define atomic_add(i,v) (void)atomic_add_return((i), (v))
-#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
+#define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v))
+#define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v))
-#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
-#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
+#define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v))
+#define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v))
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..94ef84429843
--- /dev/null
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+#include <uapi/asm/cmpxchg.h>
+
+#define arch_xchg(ptr, x) \
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
+
+#define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define arch_cmpxchg_local arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg64
+
+#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index f4dc81fa7146..1b990466d540 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -82,16 +82,16 @@ do { \
} while (0)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
({ \
struct page *page = alloc_page_vma( \
- GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
+ GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
if (page) \
flush_dcache_page(page); \
page; \
})
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
index b1d87955e8cc..5c51fceedaf9 100644
--- a/arch/ia64/include/asm/pal.h
+++ b/arch/ia64/include/asm/pal.h
@@ -1086,7 +1086,7 @@ static inline long ia64_pal_freq_base(unsigned long *platform_base_freq)
/*
* Get the ratios for processor frequency, bus frequency and interval timer to
- * to base frequency of the platform
+ * the base frequency of the platform
*/
static inline s64
ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 5f620e66384e..864775970c50 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -26,7 +26,7 @@
* the queue, and the other indicating the current tail. The lock is acquired
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
- * becomes equal to the the initial value of the tail.
+ * becomes equal to the initial value of the tail.
* The pad bits in the middle are used to prevent the next_ticket number
* overflowing into the now_serving number.
*
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
index 2a88c7204e52..809ddb6896db 100644
--- a/arch/ia64/include/asm/uv/uv_hub.h
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -257,7 +257,7 @@ static inline int uv_numa_blade_id(void)
return 0;
}
-/* Convert a cpu number to the the UV blade number */
+/* Convert a cpu number to the UV blade number */
static inline int uv_cpu_to_blade_id(int cpu)
{
return 0;
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 5d90307fd6e0..926c6cb1e029 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_IA64_CMPXCHG_H
-#define _ASM_IA64_CMPXCHG_H
+#ifndef _UAPI_ASM_IA64_CMPXCHG_H
+#define _UAPI_ASM_IA64_CMPXCHG_H
/*
* Compare/Exchange, forked from asm/intrinsics.h
@@ -53,8 +53,10 @@ extern void ia64_xchg_called_with_bad_pointer(void);
__xchg_result; \
})
+#ifndef __KERNEL__
#define xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
+#endif
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -126,12 +128,14 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
* we had to back-pedal and keep the "legacy" behavior of a full fence :-(
*/
+#ifndef __KERNEL__
/* for compatibility with other platforms: */
#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg_local cmpxchg
#define cmpxchg64_local cmpxchg64
+#endif
#ifdef CONFIG_IA64_DEBUG_CMPXCHG
# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
@@ -152,4 +156,4 @@ do { \
#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_IA64_CMPXCHG_H */
+#endif /* _UAPI_ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S
index 58233bb7976d..1fd61b78fb29 100644
--- a/arch/ia64/kernel/efi_stub.S
+++ b/arch/ia64/kernel/efi_stub.S
@@ -7,7 +7,7 @@
*
* This stub allows us to make EFI calls in physical mode with interrupts
* turned off. We need this because we can't call SetVirtualMap() until
- * the kernel has booted far enough to allow allocation of struct vma_struct
+ * the kernel has booted far enough to allow allocation of struct vm_area_struct
* entries (which we would need to map stuff with memory attributes other
* than uncached or writeback...). Since the GetTime() service gets called
* earlier than that, we need to be able to make physical mode EFI calls from
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index fc1ff8a4d7de..441ed04b1037 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -844,22 +844,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index cdbac4b52f30..e628a88607bb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1788,7 +1788,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
ti->task = p;
ti->cpu = cpu;
p->stack = ti;
- p->state = TASK_UNINTERRUPTIBLE;
+ p->__state = TASK_UNINTERRUPTIBLE;
cpumask_set_cpu(cpu, &p->cpus_mask);
INIT_LIST_HEAD(&p->tasks);
p->parent = p->real_parent = p->group_leader = p;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 36a69b4e6169..5bfc79be4cef 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -343,7 +343,7 @@ init_record_index_pools(void)
/* - 2 - */
sect_min_size = sal_log_sect_min_sizes[0];
- for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
+ for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
if (sect_min_size > sal_log_sect_min_sizes[i])
sect_min_size = sal_log_sect_min_sizes[i];
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 7e1a1525e202..e56d63f4abf9 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -529,7 +529,7 @@ get_wchan (struct task_struct *p)
unsigned long ip;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
@@ -542,7 +542,7 @@ get_wchan (struct task_struct *p)
*/
unw_init_from_blocked_task(&info, p);
do {
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
return 0;
if (unw_unwind(&info) < 0)
return 0;
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index e14f5653393a..df28c7dd164f 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -641,11 +641,11 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
- if (child->state == TASK_STOPPED &&
+ if (READ_ONCE(child->__state) == TASK_STOPPED &&
!test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
set_notify_resume(child);
- child->state = TASK_TRACED;
+ WRITE_ONCE(child->__state, TASK_TRACED);
stopped = 1;
}
spin_unlock_irq(&child->sighand->siglock);
@@ -665,9 +665,9 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
- if (child->state == TASK_TRACED &&
+ if (READ_ONCE(child->__state) == TASK_TRACED &&
(child->signal->flags & SIGNAL_STOP_STOPPED)) {
- child->state = TASK_STOPPED;
+ WRITE_ONCE(child->__state, TASK_STOPPED);
}
spin_unlock_irq(&child->sighand->siglock);
}
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 49b488580939..d10f780c13b9 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -441,7 +441,6 @@ start_secondary (void *unused)
#endif
efi_map_pal_code();
cpu_init();
- preempt_disable();
smp_callin();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 09fc385c2acd..3639e0a7cb3b 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
+ * This file contains NUMA specific variables and functions which are used on
+ * NUMA machines with contiguous memory.
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
* Populate cpu entries in sysfs for non-numa systems as well
* Intel Corporation - Ashok Raj
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 46b6e5f3a40f..d6579ec3ea32 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
+ * This file contains NUMA specific variables and functions which are used on
+ * NUMA machines with contiguous memory.
*
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
*/
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index f4d23977d2a5..29e946394fdb 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -408,10 +408,6 @@ config SINGLE_MEMORY_CHUNK
order" to save memory that could be wasted for unused memory map.
Say N if not sure.
-config ARCH_DISCONTIGMEM_ENABLE
- depends on BROKEN
- def_bool MMU && !SINGLE_MEMORY_CHUNK
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ADVANCED
depends on !SINGLE_MEMORY_CHUNK
@@ -451,11 +447,6 @@ config M68K_L2_CACHE
depends on MAC
default y
-config NODES_SHIFT
- int
- default "3"
- depends on DISCONTIGMEM
-
config CPU_HAS_NO_BITFIELDS
bool
@@ -553,4 +544,3 @@ config CACHE_COPYBACK
The ColdFire CPU cache is set into Copy-back mode.
endchoice
endif
-
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 4d59ec2f5b8d..d964c1f27399 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -25,6 +25,9 @@ config ATARI
this kernel on an Atari, say Y here and browse the material
available in <file:Documentation/m68k>; otherwise say N.
+config ATARI_KBD_CORE
+ bool
+
config MAC
bool "Macintosh support"
depends on MMU
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 82620f14124d..c54055a3d284 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -66,8 +66,7 @@ KBUILD_CFLAGS += $(cpuflags-y)
KBUILD_CFLAGS += -pipe -ffreestanding
ifdef CONFIG_MMU
-# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
-KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
+KBUILD_CFLAGS += -ffixed-a2
else
# we can use a m68k-linux-gcc toolchain with these in place
KBUILD_CPPFLAGS += -DUTS_SYSNAME=\"uClinux\"
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 44f9b5216ac9..261a0f57cc9a 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -875,16 +875,8 @@ static const struct resource atari_scsi_tt_rsrc[] __initconst = {
#define FALCON_IDE_BASE 0xfff00000
static const struct resource atari_falconide_rsrc[] __initconst = {
- {
- .flags = IORESOURCE_MEM,
- .start = FALCON_IDE_BASE,
- .end = FALCON_IDE_BASE + 0x39,
- },
- {
- .flags = IORESOURCE_IRQ,
- .start = IRQ_MFP_FSCSI,
- .end = IRQ_MFP_FSCSI,
- },
+ DEFINE_RES_MEM(FALCON_IDE_BASE, 0x38),
+ DEFINE_RES_MEM(FALCON_IDE_BASE + 0x38, 2),
};
int __init atari_platform_init(void)
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 59b727b69357..0a2cacf7be08 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -85,7 +85,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -207,6 +206,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -253,7 +253,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -323,11 +322,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GAYLE=y
-CONFIG_BLK_DEV_BUDDHA=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -344,6 +338,11 @@ CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
CONFIG_SCSI_ZORRO_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_GAYLE=y
+CONFIG_PATA_BUDDHA=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -563,6 +562,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -627,6 +627,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 8d4ddcebe7b8..4dc6dcfaf28a 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -81,7 +81,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -203,6 +202,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -249,7 +249,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -519,6 +518,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -583,6 +583,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 9cc9f1a06516..23d910a692ab 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -88,7 +88,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -210,6 +209,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -256,7 +256,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -324,10 +323,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_FALCON_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -339,6 +334,10 @@ CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_ATARI_SCSI=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -541,6 +540,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -605,6 +605,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index c3f3f462e6ce..2c3f42833846 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -78,7 +78,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -200,6 +199,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -246,7 +246,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -512,6 +511,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -576,6 +576,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 8c908fc5c191..5b1898d4b249 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -80,7 +80,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -202,6 +201,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -248,7 +248,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -521,6 +520,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -585,6 +585,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 4e68b72d9c50..9606ccd8dafa 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -79,7 +79,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -201,6 +200,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -247,7 +247,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -315,11 +314,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -332,6 +326,10 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -544,6 +542,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -608,6 +607,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index d31896293c39..3175ba5007e1 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -99,7 +99,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -221,6 +220,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -267,7 +267,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -344,15 +343,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GAYLE=y
-CONFIG_BLK_DEV_BUDDHA=y
-CONFIG_BLK_DEV_FALCON_IDE=y
-CONFIG_BLK_DEV_MAC_IDE=y
-CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -376,6 +366,13 @@ CONFIG_MVME147_SCSI=y
CONFIG_MVME16x_SCSI=y
CONFIG_BVME6000_SCSI=y
CONFIG_SUN3X_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
+CONFIG_PATA_GAYLE=y
+CONFIG_PATA_BUDDHA=y
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -630,6 +627,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -694,6 +692,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index c7442f9dd469..793085f00c99 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -77,7 +77,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -199,6 +198,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -245,7 +245,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -511,6 +510,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -575,6 +575,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 233b82ea103a..56fbac7943b2 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -78,7 +78,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -200,6 +199,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -246,7 +246,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -512,6 +511,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -576,6 +576,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 664025a0f6a4..0e15431b65e2 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -79,7 +79,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -201,6 +200,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -247,7 +247,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -314,10 +313,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -328,6 +323,10 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -530,6 +529,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -594,6 +594,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 73293a0b3dc8..3490a05f29b8 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -75,7 +75,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -197,6 +196,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -243,7 +243,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -514,6 +513,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -577,6 +577,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index bca8a6f3e92f..4e92c8c332fc 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -75,7 +75,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -197,6 +196,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -243,7 +243,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -513,6 +512,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -577,6 +577,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index ba808543161a..9a8394e96388 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -55,7 +55,6 @@ struct nfhd_device {
int id;
u32 blocks, bsize;
int bshift;
- struct request_queue *queue;
struct gendisk *disk;
};
@@ -119,32 +118,24 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
dev->bsize = bsize;
dev->bshift = ffs(bsize) - 10;
- dev->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (dev->queue == NULL)
- goto free_dev;
-
- blk_queue_logical_block_size(dev->queue, bsize);
-
- dev->disk = alloc_disk(16);
+ dev->disk = blk_alloc_disk(NUMA_NO_NODE);
if (!dev->disk)
- goto free_queue;
+ goto free_dev;
dev->disk->major = major_num;
dev->disk->first_minor = dev_id * 16;
+ dev->disk->minors = 16;
dev->disk->fops = &nfhd_ops;
dev->disk->private_data = dev;
sprintf(dev->disk->disk_name, "nfhd%u", dev_id);
set_capacity(dev->disk, (sector_t)blocks * (bsize / 512));
- dev->disk->queue = dev->queue;
-
+ blk_queue_logical_block_size(dev->disk->queue, bsize);
add_disk(dev->disk);
list_add_tail(&dev->list, &nfhd_list);
return 0;
-free_queue:
- blk_cleanup_queue(dev->queue);
free_dev:
kfree(dev);
out:
@@ -186,8 +177,7 @@ static void __exit nfhd_exit(void)
list_for_each_entry_safe(dev, next, &nfhd_list, list) {
list_del(&dev->list);
del_gendisk(dev->disk);
- put_disk(dev->disk);
- blk_cleanup_queue(dev->queue);
+ blk_cleanup_disk(dev->disk);
kfree(dev);
}
unregister_blkdev(major_num, "nfhd");
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 756c5cc58f94..8637bf8a2f65 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -16,8 +16,8 @@
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
/*
* The ColdFire parts cannot do some immediate to memory operations,
@@ -30,7 +30,7 @@
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
} \
@@ -38,7 +38,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#ifdef CONFIG_RMW_INSNS
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int t, tmp; \
\
@@ -48,12 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
- : "g" (i), "2" (atomic_read(v))); \
+ : "g" (i), "2" (arch_atomic_read(v))); \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int t, tmp; \
\
@@ -63,14 +63,14 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
- : "g" (i), "2" (atomic_read(v))); \
+ : "g" (i), "2" (arch_atomic_read(v))); \
return tmp; \
}
#else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
@@ -83,7 +83,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
@@ -120,27 +120,27 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_inc(atomic_t *v)
+static inline void arch_atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
-#define atomic_inc atomic_inc
+#define arch_atomic_inc arch_atomic_inc
-static inline void atomic_dec(atomic_t *v)
+static inline void arch_atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
-#define atomic_dec atomic_dec
+#define arch_atomic_dec arch_atomic_dec
-static inline int atomic_dec_and_test(atomic_t *v)
+static inline int arch_atomic_dec_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_dec_and_test atomic_dec_and_test
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-static inline int atomic_dec_and_test_lt(atomic_t *v)
+static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
{
char c;
__asm__ __volatile__(
@@ -150,49 +150,49 @@ static inline int atomic_dec_and_test_lt(atomic_t *v)
return c != 0;
}
-static inline int atomic_inc_and_test(atomic_t *v)
+static inline int arch_atomic_inc_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_inc_and_test atomic_inc_and_test
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
#ifdef CONFIG_RMW_INSNS
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
- prev = atomic_read(v);
+ prev = arch_atomic_read(v);
if (prev == old)
- atomic_set(v, new);
+ arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
- prev = atomic_read(v);
- atomic_set(v, new);
+ prev = arch_atomic_read(v);
+ arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
#endif /* !CONFIG_RMW_INSNS */
-static inline int atomic_sub_and_test(int i, atomic_t *v)
+static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("subl %2,%1; seq %0"
@@ -200,9 +200,9 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-#define atomic_sub_and_test atomic_sub_and_test
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-static inline int atomic_add_negative(int i, atomic_t *v)
+static inline int arch_atomic_add_negative(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
@@ -210,6 +210,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-#define atomic_add_negative atomic_add_negative
+#define arch_atomic_add_negative arch_atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index a4aa82021d3b..e8ca4b0ccefa 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -76,11 +76,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#endif
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
extern unsigned long __invalid_cmpxchg_size(volatile void *,
unsigned long, unsigned long, int);
@@ -118,14 +118,14 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#else
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index a5d358855878..8ed6ac14d99f 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -31,7 +31,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
if (mm->context != NO_CONTEXT)
return;
- while (atomic_dec_and_test_lt(&nr_free_contexts)) {
+ while (arch_atomic_dec_and_test_lt(&nr_free_contexts)) {
atomic_inc(&nr_free_contexts);
steal_context();
}
diff --git a/arch/m68k/include/asm/mmzone.h b/arch/m68k/include/asm/mmzone.h
deleted file mode 100644
index 64573fe8e60d..000000000000
--- a/arch/m68k/include/asm/mmzone.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_MMZONE_H_
-#define _ASM_M68K_MMZONE_H_
-
-extern pg_data_t pg_data_map[];
-
-#define NODE_DATA(nid) (&pg_data_map[nid])
-#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
-
-#endif /* _ASM_M68K_MMZONE_H_ */
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index 97087dd3ca6d..2f1c54e4725d 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -62,7 +62,7 @@ extern unsigned long _ramend;
#include <asm/page_no.h>
#endif
-#if !defined(CONFIG_MMU) || defined(CONFIG_DISCONTIGMEM)
+#ifndef CONFIG_MMU
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#endif
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index 2411ea9ef578..a5b459bcb7d8 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -126,26 +126,6 @@ static inline void *__va(unsigned long x)
extern int m68k_virt_to_node_shift;
-#ifndef CONFIG_DISCONTIGMEM
-#define __virt_to_node(addr) (&pg_data_map[0])
-#else
-extern struct pglist_data *pg_data_table[];
-
-static inline __attribute_const__ int __virt_to_node_shift(void)
-{
- int shift;
-
- asm (
- "1: moveq #0,%0\n"
- m68k_fixup(%c1, 1b)
- : "=d" (shift)
- : "i" (m68k_fixup_vnode_shift));
- return shift;
-}
-
-#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
-#endif
-
#define virt_to_page(addr) ({ \
pfn_to_page(virt_to_pfn(addr)); \
})
@@ -153,23 +133,8 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
pfn_to_virt(page_to_pfn(page)); \
})
-#ifdef CONFIG_DISCONTIGMEM
-#define pfn_to_page(pfn) ({ \
- unsigned long __pfn = (pfn); \
- struct pglist_data *pgdat; \
- pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
- pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
-})
-#define page_to_pfn(_page) ({ \
- const struct page *__p = (_page); \
- struct pglist_data *pgdat; \
- pgdat = &pg_data_map[page_to_nid(__p)]; \
- ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
-})
-#else
#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
-#endif
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 8d0f862ee9d7..c9d0d84158a4 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -13,9 +13,9 @@ extern unsigned long memory_end;
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index 5337bc2c262f..a6318ccd308f 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -263,7 +263,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr
BUG();
}
-static inline void flush_tlb_range(struct mm_struct *mm,
+static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
BUG();
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 1c1b875fadc1..2e192a5df949 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -34,9 +34,6 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot)
return prot;
}
#else
-
-#include <asm/cacheflush.h>
-
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index da83cc83e791..db49f9091711 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -268,7 +268,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)task_stack_page(p);
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index f55bdcb8e4f1..bd0274c7592e 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -402,8 +402,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
* to this process.
*/
mmap_read_lock(current->mm);
- vma = find_vma(current->mm, addr);
- if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
+ vma = vma_lookup(current->mm, addr);
+ if (!vma || addr + len > vma->vm_end)
goto out_unlock;
}
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 1cdac959bd91..5d16f9b47aa9 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -933,13 +933,15 @@ static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
},
};
-static const struct resource mac_ide_quadra_rsrc[] __initconst = {
- DEFINE_RES_MEM(0x50F1A000, 0x104),
+static const struct resource mac_pata_quadra_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
DEFINE_RES_IRQ(IRQ_NUBUS_F),
};
-static const struct resource mac_ide_pb_rsrc[] __initconst = {
- DEFINE_RES_MEM(0x50F1A000, 0x104),
+static const struct resource mac_pata_pb_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
DEFINE_RES_IRQ(IRQ_NUBUS_C),
};
@@ -949,7 +951,7 @@ static const struct resource mac_pata_baboon_rsrc[] __initconst = {
DEFINE_RES_IRQ(IRQ_BABOON_1),
};
-static const struct pata_platform_info mac_pata_baboon_data __initconst = {
+static const struct pata_platform_info mac_pata_data __initconst = {
.ioport_shift = 2,
};
@@ -1067,17 +1069,19 @@ int __init mac_platform_init(void)
switch (macintosh_config->ide_type) {
case MAC_IDE_QUADRA:
- platform_device_register_simple("mac_ide", -1,
- mac_ide_quadra_rsrc, ARRAY_SIZE(mac_ide_quadra_rsrc));
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_quadra_rsrc, ARRAY_SIZE(mac_pata_quadra_rsrc),
+ &mac_pata_data, sizeof(mac_pata_data));
break;
case MAC_IDE_PB:
- platform_device_register_simple("mac_ide", -1,
- mac_ide_pb_rsrc, ARRAY_SIZE(mac_ide_pb_rsrc));
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_pb_rsrc, ARRAY_SIZE(mac_pata_pb_rsrc),
+ &mac_pata_data, sizeof(mac_pata_data));
break;
case MAC_IDE_BABOON:
platform_device_register_resndata(NULL, "pata_platform", -1,
mac_pata_baboon_rsrc, ARRAY_SIZE(mac_pata_baboon_rsrc),
- &mac_pata_baboon_data, sizeof(mac_pata_baboon_data));
+ &mac_pata_data, sizeof(mac_pata_data));
break;
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 1759ab875d47..5d749e188246 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -44,28 +44,8 @@ EXPORT_SYMBOL(empty_zero_page);
int m68k_virt_to_node_shift;
-#ifdef CONFIG_DISCONTIGMEM
-pg_data_t pg_data_map[MAX_NUMNODES];
-EXPORT_SYMBOL(pg_data_map);
-
-pg_data_t *pg_data_table[65];
-EXPORT_SYMBOL(pg_data_table);
-#endif
-
void __init m68k_setup_node(int node)
{
-#ifdef CONFIG_DISCONTIGMEM
- struct m68k_mem_info *info = m68k_memory + node;
- int i, end;
-
- i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
- end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
- for (; i <= end; i++) {
- if (pg_data_table[i])
- pr_warn("overlap at %u for chunk %u\n", i, node);
- pg_data_table[i] = pg_data_map + node;
- }
-#endif
node_set_online(node);
}
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index d6a423875231..5caf1e5be1c2 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -286,14 +286,39 @@ static int q40_set_rtc_pll(struct rtc_pll_info *pll)
return -EINVAL;
}
-static __init int q40_add_kbd_device(void)
-{
- struct platform_device *pdev;
+#define PCIDE_BASE1 0x1f0
+#define PCIDE_BASE2 0x170
+#define PCIDE_CTL 0x206
+
+static const struct resource q40_pata_rsrc_0[] __initconst = {
+ DEFINE_RES_MEM(q40_isa_io_base + PCIDE_BASE1 * 4, 0x38),
+ DEFINE_RES_MEM(q40_isa_io_base + (PCIDE_BASE1 + PCIDE_CTL) * 4, 2),
+ DEFINE_RES_IO(PCIDE_BASE1, 8),
+ DEFINE_RES_IO(PCIDE_BASE1 + PCIDE_CTL, 1),
+ DEFINE_RES_IRQ(14),
+};
+static const struct resource q40_pata_rsrc_1[] __initconst = {
+ DEFINE_RES_MEM(q40_isa_io_base + PCIDE_BASE2 * 4, 0x38),
+ DEFINE_RES_MEM(q40_isa_io_base + (PCIDE_BASE2 + PCIDE_CTL) * 4, 2),
+ DEFINE_RES_IO(PCIDE_BASE2, 8),
+ DEFINE_RES_IO(PCIDE_BASE2 + PCIDE_CTL, 1),
+ DEFINE_RES_IRQ(15),
+};
+
+static __init int q40_platform_init(void)
+{
if (!MACH_IS_Q40)
return -ENODEV;
- pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
- return PTR_ERR_OR_ZERO(pdev);
+ platform_device_register_simple("q40kbd", -1, NULL, 0);
+
+ platform_device_register_simple("atari-falcon-ide", 0, q40_pata_rsrc_0,
+ ARRAY_SIZE(q40_pata_rsrc_0));
+
+ platform_device_register_simple("atari-falcon-ide", 1, q40_pata_rsrc_1,
+ ARRAY_SIZE(q40_pata_rsrc_1));
+
+ return 0;
}
-arch_initcall(q40_add_kbd_device);
+arch_initcall(q40_platform_init);
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index b41f323e1fde..6d4af39e3890 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -3,7 +3,7 @@ KBUILD_DEFCONFIG := mmu_defconfig
UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
-# What CPU vesion are we building for, and crack it open
+# What CPU version are we building for, and crack it open
# as major.minor.rev
CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER))
CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1)
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 29b0e557aa7c..a055f5dbe00a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
+generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h
deleted file mode 100644
index 41e9aff23a62..000000000000
--- a/arch/microblaze/include/asm/atomic.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_MICROBLAZE_ATOMIC_H
-#define _ASM_MICROBLAZE_ATOMIC_H
-
-#include <asm/cmpxchg.h>
-#include <asm-generic/atomic.h>
-#include <asm-generic/atomic64.h>
-
-/*
- * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
- */
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
- unsigned long flags;
- int res;
-
- local_irq_save(flags);
- res = v->counter - 1;
- if (res >= 0)
- v->counter = res;
- local_irq_restore(flags);
-
- return res;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-
-#endif /* _ASM_MICROBLAZE_ATOMIC_H */
diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h
deleted file mode 100644
index 3523b51aab36..000000000000
--- a/arch/microblaze/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_MICROBLAZE_CMPXCHG_H
-#define _ASM_MICROBLAZE_CMPXCHG_H
-
-#ifndef CONFIG_SMP
-# include <asm-generic/cmpxchg.h>
-#endif
-
-#endif /* _ASM_MICROBLAZE_CMPXCHG_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index bf681f272f72..ce550978f4fc 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -35,9 +35,6 @@
#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
-#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
-#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
-
/*
* PAGE_OFFSET -- the first address of the first page of memory. With MMU
* it is set to the kernel start address (aligned on a page boundary).
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 6c69ce7be2e8..b77dd188dec4 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -70,7 +70,6 @@ int main(int argc, char *argv[])
/* struct task_struct */
DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ed51970c08e7..4704a16c2e44 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2867,7 +2867,7 @@ config RANDOMIZE_BASE_MAX_OFFSET
config NODES_SHIFT
int
default "6"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index b184baa4e56a..f175bce2987f 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -18,6 +18,7 @@
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
const char *get_system_type(void)
diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
index 569e814def83..5747f171de29 100644
--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
@@ -114,6 +114,52 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x18000000 0x0 0x00010000>,
<0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>;
+ gmac@3,0 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass0c0320",
+ "pciclass0c03",
+ "loongson, pci-gmac";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+ <13 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ };
+
+ gmac@3,1 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass0c0320",
+ "pciclass0c03",
+ "loongson, pci-gmac";
+
+ reg = <0x1900 0x0 0x0 0x0 0x0>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
+ <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: ethernet-phy@1 {
+ reg = <0>;
+ };
+ };
+ };
+
ehci@4,1 {
compatible = "pci0014,7a14.0",
"pci0014,7a14",
diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
index f99a7a11fded..58b9bb47c58a 100644
--- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
@@ -186,7 +186,8 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass020000",
- "pciclass0200";
+ "pciclass0200",
+ "loongson, pci-gmac";
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
@@ -208,7 +209,8 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass020000",
- "pciclass0200";
+ "pciclass0200",
+ "loongson, pci-gmac";
reg = <0x1900 0x0 0x0 0x0 0x0>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 27ad76791539..95e1f7f3597f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -25,24 +25,25 @@
#include <asm/war.h>
#define ATOMIC_OPS(pfx, type) \
-static __always_inline type pfx##_read(const pfx##_t *v) \
+static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
{ \
return READ_ONCE(v->counter); \
} \
\
-static __always_inline void pfx##_set(pfx##_t *v, type i) \
+static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
{ \
WRITE_ONCE(v->counter, i); \
} \
\
-static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \
+static __always_inline type \
+arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
{ \
- return cmpxchg(&v->counter, o, n); \
+ return arch_cmpxchg(&v->counter, o, n); \
} \
\
-static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
+static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
{ \
- return xchg(&v->counter, n); \
+ return arch_xchg(&v->counter, n); \
}
ATOMIC_OPS(atomic, int)
@@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64)
#endif
#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ void pfx##_##op(type i, pfx##_t * v) \
+static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
{ \
type temp; \
\
@@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v) \
}
#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
+static __inline__ type \
+arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
{ \
type temp, result; \
\
@@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
}
#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+static __inline__ type \
+arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
{ \
int temp, result; \
\
@@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
-# define atomic64_add_return_relaxed atomic64_add_return_relaxed
-# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#endif /* CONFIG_64BIT */
#undef ATOMIC_OPS
@@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
-# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#endif
#undef ATOMIC_OPS
@@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
* The function returns the old value of @v minus @i.
*/
#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
-static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
+static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
{ \
type temp, result; \
\
@@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
}
ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT
ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
-#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
#endif
#undef ATOMIC_SIP_OP
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index ed8f3f3c4304..0b983800f48b 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
}
}
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __res; \
\
@@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}
-#define cmpxchg_local(ptr, old, new) \
+#define arch_cmpxchg_local(ptr, old, new) \
((__typeof__(*(ptr))) \
__cmpxchg((ptr), \
(unsigned long)(__typeof__(*(ptr)))(old), \
(unsigned long)(__typeof__(*(ptr)))(new), \
sizeof(*(ptr))))
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __res; \
\
@@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \
\
- __res = cmpxchg_local((ptr), (old), (new)); \
+ __res = arch_cmpxchg_local((ptr), (old), (new)); \
\
/* \
* In the Loongson3 workaround case __cmpxchg_asm() already \
@@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
})
#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#else
# include <asm-generic/cmpxchg-local.h>
-# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
# ifdef CONFIG_SMP
@@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
return ret;
}
-# define cmpxchg64(ptr, o, n) ({ \
+# define arch_cmpxchg64(ptr, o, n) ({ \
unsigned long long __old = (__typeof__(*(ptr)))(o); \
unsigned long long __new = (__typeof__(*(ptr)))(n); \
__typeof__(*(ptr)) __res; \
@@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
})
# else /* !CONFIG_SMP */
-# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+# define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
# endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index f021de661c3a..d1477ecb1af9 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -11,7 +11,6 @@
#include <linux/linkage.h>
#include <linux/smp.h>
-#include <linux/irqdomain.h>
#include <asm/mipsmtregs.h>
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index fca4547d580f..696f6b009377 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -109,10 +109,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)
}
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_generic generic;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 wait_exits;
u64 cache_exits;
u64 signal_exits;
@@ -142,12 +143,6 @@ struct kvm_vcpu_stat {
#ifdef CONFIG_CPU_LOONGSON64
u64 vz_cpucfg_exits;
#endif
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
};
struct kvm_arch_memory_slot {
diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h
index f93aa5ee2e2e..3481ed4c117b 100644
--- a/arch/mips/include/asm/mips-boards/launch.h
+++ b/arch/mips/include/asm/mips-boards/launch.h
@@ -3,6 +3,9 @@
*
*/
+#ifndef _ASM_MIPS_BOARDS_LAUNCH_H
+#define _ASM_MIPS_BOARDS_LAUNCH_H
+
#ifndef _ASSEMBLER_
struct cpulaunch {
@@ -34,3 +37,5 @@ struct cpulaunch {
/* Polling period in count cycles for secondary CPU's */
#define LAUNCHPERIOD 10000
+
+#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index b826b8473e95..602a21aee9d4 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -8,7 +8,7 @@
#include <asm/page.h>
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
# include <mmzone.h>
#endif
@@ -20,10 +20,4 @@
#define nid_to_addrbase(nid) 0
#endif
-#ifdef CONFIG_DISCONTIGMEM
-
-#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
-
-#endif /* CONFIG_DISCONTIGMEM */
-
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 195ff4e9771f..96bc798c1ec1 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -239,7 +239,7 @@ static inline int pfn_valid(unsigned long pfn)
/* pfn_valid is defined in linux/mmzone.h */
-#elif defined(CONFIG_NEED_MULTIPLE_NODES)
+#elif defined(CONFIG_NUMA)
#define pfn_valid(pfn) \
({ \
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 2d949969313b..cdf404a831b2 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -138,6 +138,8 @@
#define SO_PREFER_BUSY_POLL 69
#define SO_BUSY_POLL_BUDGET 70
+#define SO_NETNS_COOKIE 71
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 5735b2cd6f2a..04ca75278f02 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -78,7 +78,6 @@ void output_ptreg_defines(void)
void output_task_defines(void)
{
COMMENT("MIPS task_struct offsets.");
- OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 89107deb03fc..ac9c8cfb2ba9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
do {
old32 = load32;
new32 = (load32 & ~mask) | (val << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
} while (load32 != old32);
return (load32 & mask) >> shift;
@@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
*/
old32 = (load32 & ~mask) | (old << shift);
new32 = (load32 & ~mask) | (new << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
if (load32 == old32)
return old;
}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 54dfba8fa77c..75bff0f77319 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -403,9 +403,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
if (kcb->kprobe_status & KPROBE_HIT_SS) {
resume_execution(cur, regs, kcb);
regs->cp0_status |= kcb->kprobe_old_SR;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index bff080db0294..73c8e7990a97 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -662,7 +662,7 @@ unsigned long get_wchan(struct task_struct *task)
unsigned long ra = 0;
#endif
- if (!task || task == current || task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
if (!task_stack_page(task))
goto out;
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index ef86fbad8546..d542fb7af3ba 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
*/
calibrate_delay();
- preempt_disable();
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0b4e06303c55..6f07362de5ce 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -784,7 +784,6 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
int si_code;
- struct vm_area_struct *vma;
switch (sig) {
case 0:
@@ -800,8 +799,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
case SIGSEGV:
mmap_read_lock(current->mm);
- vma = find_vma(current->mm, (unsigned long)fault_addr);
- if (vma && (vma->vm_start <= (unsigned long)fault_addr))
+ if (vma_lookup(current->mm, (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 30cc060857c7..c67250a956b8 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for KVM support for MIPS
#
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o binary_stats.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 4d4af97dcc88..af9dd029a4e1 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -38,43 +38,63 @@
#define VECTORSPACING 0x100 /* for EI/VI mode */
#endif
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("wait", wait_exits),
- VCPU_STAT("cache", cache_exits),
- VCPU_STAT("signal", signal_exits),
- VCPU_STAT("interrupt", int_exits),
- VCPU_STAT("cop_unusable", cop_unusable_exits),
- VCPU_STAT("tlbmod", tlbmod_exits),
- VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
- VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
- VCPU_STAT("addrerr_st", addrerr_st_exits),
- VCPU_STAT("addrerr_ld", addrerr_ld_exits),
- VCPU_STAT("syscall", syscall_exits),
- VCPU_STAT("resvd_inst", resvd_inst_exits),
- VCPU_STAT("break_inst", break_inst_exits),
- VCPU_STAT("trap_inst", trap_inst_exits),
- VCPU_STAT("msa_fpe", msa_fpe_exits),
- VCPU_STAT("fpe", fpe_exits),
- VCPU_STAT("msa_disabled", msa_disabled_exits),
- VCPU_STAT("flush_dcache", flush_dcache_exits),
- VCPU_STAT("vz_gpsi", vz_gpsi_exits),
- VCPU_STAT("vz_gsfc", vz_gsfc_exits),
- VCPU_STAT("vz_hc", vz_hc_exits),
- VCPU_STAT("vz_grr", vz_grr_exits),
- VCPU_STAT("vz_gva", vz_gva_exits),
- VCPU_STAT("vz_ghfc", vz_ghfc_exits),
- VCPU_STAT("vz_gpa", vz_gpa_exits),
- VCPU_STAT("vz_resvd", vz_resvd_exits),
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, wait_exits),
+ STATS_DESC_COUNTER(VCPU, cache_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, int_exits),
+ STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmod_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
+ STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
+ STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
+ STATS_DESC_COUNTER(VCPU, break_inst_exits),
+ STATS_DESC_COUNTER(VCPU, trap_inst_exits),
+ STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
+ STATS_DESC_COUNTER(VCPU, fpe_exits),
+ STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
+ STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_hc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_grr_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gva_exits),
+ STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
+ STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
#ifdef CONFIG_CPU_LOONGSON64
- VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
+ STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
#endif
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- {NULL}
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
bool kvm_trace_guest_mode_change;
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index aeb1b989cd4e..63dccb2ed08b 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <lantiq_soc.h>
#include <xway_dma.h>
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index de03838b343b..a9b72eacfc0b 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -37,7 +37,7 @@
*/
notrace void arch_local_irq_disable(void)
{
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_disable);
@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
return flags;
}
@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_restore);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index a7bf0c80371c..830ab91e574f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = PVA(_PAGE_PRESENT);
- protection_map[5] = PVA(_PAGE_PRESENT);
- protection_map[6] = PVA(_PAGE_PRESENT);
- protection_map[7] = PVA(_PAGE_PRESENT);
+ protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = PM(_PAGE_PRESENT);
+ protection_map[5] = PM(_PAGE_PRESENT);
+ protection_map[6] = PM(_PAGE_PRESENT);
+ protection_map[7] = PM(_PAGE_PRESENT);
protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+ protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
_PAGE_NO_READ);
- protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = PVA(_PAGE_PRESENT);
- protection_map[13] = PVA(_PAGE_PRESENT);
- protection_map[14] = PVA(_PAGE_PRESENT);
- protection_map[15] = PVA(_PAGE_PRESENT);
+ protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = PM(_PAGE_PRESENT);
+ protection_map[13] = PM(_PAGE_PRESENT);
+ protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}
-#undef _PVA
#undef PM
void cpu_cache_init(void)
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c36358758969..19347dc6bbf8 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -394,7 +394,7 @@ void maar_init(void)
}
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -454,9 +454,6 @@ void __init mem_init(void)
BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
#ifdef CONFIG_HIGHMEM
-#ifdef CONFIG_DISCONTIGMEM
-#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
-#endif
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
@@ -476,7 +473,7 @@ void __init mem_init(void)
0x80000000 - 4, KCORE_TEXT);
#endif
}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index aebd4964ea34..c48e23cf5b5e 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
index d2216942af18..ab9bedb82b28 100644
--- a/arch/mips/pci/pci-xtalk-bridge.c
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -13,6 +13,7 @@
#include <linux/platform_data/xtalk-bridge.h>
#include <linux/nvmem-consumer.h>
#include <linux/crc16.h>
+#include <linux/irqdomain.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 0c5de07da097..0135376c5de5 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
@@ -25,6 +26,7 @@
__iomem void *rt_sysc_membase;
__iomem void *rt_memc_membase;
+EXPORT_SYMBOL_GPL(rt_sysc_membase);
__iomem void *plat_of_remap_node(const char *node)
{
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 42df9fafa943..95c1bff1ab9f 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c
index e8374e4c705b..ba87704073c8 100644
--- a/arch/mips/sgi-ip30/ip30-irq.c
+++ b/arch/mips/sgi-ip30/ip30-irq.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/tick.h>
diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h
index 940d32842793..62faafbc28e4 100644
--- a/arch/nds32/include/asm/memory.h
+++ b/arch/nds32/include/asm/memory.h
@@ -76,18 +76,12 @@
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
*/
-#ifndef CONFIG_DISCONTIGMEM
-
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
-#else /* CONFIG_DISCONTIGMEM */
-#error CONFIG_DISCONTIGMEM is not supported yet.
-#endif /* !CONFIG_DISCONTIGMEM */
-
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#endif
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c
index c1327e552ec6..391895b54d13 100644
--- a/arch/nds32/kernel/process.c
+++ b/arch/nds32/kernel/process.c
@@ -239,7 +239,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_start, stack_end;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
diff --git a/arch/nios2/include/asm/irq.h b/arch/nios2/include/asm/irq.h
index 13ce37272279..c52c94884e93 100644
--- a/arch/nios2/include/asm/irq.h
+++ b/arch/nios2/include/asm/irq.h
@@ -10,6 +10,5 @@
#define NIOS2_CPU_NR_IRQS 32
#include <asm-generic/irq.h>
-#include <linux/irqdomain.h>
#endif
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
index 5f3555ce4865..c6a1a9f6ac42 100644
--- a/arch/nios2/kernel/irq.c
+++ b/arch/nios2/kernel/irq.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
static u32 ienable;
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index c5f916ca6845..9ff37ba2bb60 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -223,7 +223,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)p;
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
index b589fac39b92..326167e4783a 100644
--- a/arch/openrisc/include/asm/atomic.h
+++ b/arch/openrisc/include/asm/atomic.h
@@ -13,7 +13,7 @@
/* Atomically perform op with v->counter and i */
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return the result */
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return orig v->counter */
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int tmp, old; \
\
@@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(xor)
+ATOMIC_OP(add)
+ATOMIC_OP(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
@@ -83,16 +85,18 @@ ATOMIC_OP(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_add_return atomic_add_return
-#define atomic_sub_return atomic_sub_return
-#define atomic_fetch_add atomic_fetch_add
-#define atomic_fetch_sub atomic_fetch_sub
-#define atomic_fetch_and atomic_fetch_and
-#define atomic_fetch_or atomic_fetch_or
-#define atomic_fetch_xor atomic_fetch_xor
-#define atomic_and atomic_and
-#define atomic_or atomic_or
-#define atomic_xor atomic_xor
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#define arch_atomic_add arch_atomic_add
+#define arch_atomic_sub arch_atomic_sub
+#define arch_atomic_and arch_atomic_and
+#define arch_atomic_or arch_atomic_or
+#define arch_atomic_xor arch_atomic_xor
/*
* Atomically add a to v->counter as long as v is not already u.
@@ -100,7 +104,7 @@ ATOMIC_OP(xor)
*
* This is often used through atomic_inc_not_zero()
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;
@@ -119,8 +123,14 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#include <asm-generic/atomic.h>
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+
+#include <asm/cmpxchg.h>
+
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_OPENRISC_ATOMIC_H */
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index f9cd43a39d72..79fd16162ccb 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \
@@ -161,7 +161,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
}
}
-#define xchg(ptr, with) \
+#define arch_xchg(ptr, with) \
({ \
(__typeof__(*(ptr))) __xchg((ptr), \
(unsigned long)(with), \
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
index 185dcd3731ed..dbf030365ab4 100644
--- a/arch/openrisc/include/asm/tlbflush.h
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -25,7 +25,7 @@
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
*/
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
index 18c703d1d761..710651d5aaae 100644
--- a/arch/openrisc/kernel/asm-offsets.c
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -37,7 +37,6 @@
int main(void)
{
/* offsets into the task_struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
index 48e1092a64de..415e209732a3 100644
--- a/arch/openrisc/kernel/smp.c
+++ b/arch/openrisc/kernel/smp.c
@@ -145,8 +145,6 @@ asmlinkage __init void secondary_start_kernel(void)
set_cpu_online(cpu, true);
local_irq_enable();
-
- preempt_disable();
/*
* OK, it's off to the idle thread for us
*/
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 21b375c67e53..dd5a299ada69 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values.
*/
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -66,19 +66,19 @@ static __inline__ void atomic_set(atomic_t *v, int i)
_atomic_spin_unlock_irqrestore(v, flags);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE((v)->counter);
}
/* exported interface */
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define ATOMIC_OP(op, c_op) \
-static __inline__ void atomic_##op(int i, atomic_t *v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -88,7 +88,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
+static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -101,7 +101,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
+static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -141,7 +141,7 @@ ATOMIC_OPS(xor, ^=)
#define ATOMIC64_INIT(i) { (i) }
#define ATOMIC64_OP(op, c_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
\
@@ -151,7 +151,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
}
#define ATOMIC64_OP_RETURN(op, c_op) \
-static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
+static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
@@ -164,7 +164,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, c_op) \
-static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
+static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
@@ -200,7 +200,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP
static __inline__ void
-atomic64_set(atomic64_t *v, s64 i)
+arch_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -210,18 +210,18 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+#define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
static __inline__ s64
-atomic64_read(const atomic64_t *v)
+arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE((v)->counter);
}
/* exported interface */
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* !CONFIG_64BIT */
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 84ee232278a6..5f274be10567 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -44,7 +44,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -78,7 +78,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -98,7 +98,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
- return __cmpxchg_local_generic(ptr, old, new_, size);
+ return __generic_cmpxchg_local(ptr, old, new_, size);
}
}
@@ -106,19 +106,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
-#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
+#define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index f60904329bbc..5b5351cdcb33 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -119,6 +119,8 @@
#define SO_PREFER_BUSY_POLL 0x4043
#define SO_BUSY_POLL_BUDGET 0x4044
+#define SO_NETNS_COOKIE 0x4045
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index cd2cc1b1648c..33113ba24054 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -42,7 +42,6 @@
int main(void)
{
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b144fbe29bc1..184ec3c1eae4 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -249,7 +249,7 @@ get_wchan(struct task_struct *p)
unsigned long ip;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
@@ -260,7 +260,7 @@ get_wchan(struct task_struct *p)
do {
if (unwind_once(&info) < 0)
return 0;
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
return 0;
ip = info.ip;
if (!in_sched_functions(ip))
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 10227f667c8a..1405b603b91b 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
#endif
smp_cpu_init(slave_id);
- preempt_disable();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 088dd2afcfe4..14b132cf95e2 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -671,7 +671,7 @@ config NODES_SHIFT
int
default "8" if PPC64
default "4"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config USE_PERCPU_NUMA_NODE_ID
def_bool y
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index c2717f31925a..ccda0a91abf0 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -122,7 +122,15 @@
};
/include/ "pq3-i2c-0.dtsi"
+ i2c@3000 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-i2c-1.dtsi"
+ i2c@3100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-duart-0.dtsi"
/include/ "pq3-espi-0.dtsi"
spi0: spi@7000 {
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 872e4485dc3f..ddc018d42252 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -371,7 +371,23 @@
};
/include/ "qoriq-i2c-0.dtsi"
+ i2c@118000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@118100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-i2c-1.dtsi"
+ i2c@119000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@119100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-duart-0.dtsi"
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 1c7b75834e04..02ee6f5ac9fe 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -120,6 +120,7 @@ extern s32 patch__call_flush_branch_caches3;
extern s32 patch__flush_count_cache_return;
extern s32 patch__flush_link_stack_return;
extern s32 patch__call_kvm_flush_link_stack;
+extern s32 patch__call_kvm_flush_link_stack_p9;
extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_branch_caches;
@@ -140,7 +141,7 @@ void kvmhv_load_host_pmu(void);
void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
-int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
+void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 61c6e8b200e8..a1732a79e92a 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -23,7 +23,7 @@
#define __atomic_release_fence() \
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
@@ -32,13 +32,13 @@ static __inline__ int atomic_read(const atomic_t *v)
return t;
}
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int a, atomic_t *v) \
+static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \
int t; \
\
@@ -53,7 +53,7 @@ static __inline__ void atomic_##op(int a, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
@@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
}
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
@@ -94,11 +94,11 @@ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
@@ -109,16 +109,16 @@ ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-static __inline__ void atomic_inc(atomic_t *v)
+static __inline__ void arch_atomic_inc(atomic_t *v)
{
int t;
@@ -131,9 +131,9 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic_inc atomic_inc
+#define arch_atomic_inc arch_atomic_inc
-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
{
int t;
@@ -149,7 +149,7 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t;
}
-static __inline__ void atomic_dec(atomic_t *v)
+static __inline__ void arch_atomic_dec(atomic_t *v)
{
int t;
@@ -162,9 +162,9 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic_dec atomic_dec
+#define arch_atomic_dec arch_atomic_dec
-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
{
int t;
@@ -180,17 +180,20 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
return t;
}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/*
* Don't want to override the generic atomic_try_cmpxchg_acquire, because
@@ -199,7 +202,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* would be a surprise).
*/
static __always_inline bool
-atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
+arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -229,7 +232,7 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
@@ -250,7 +253,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return t;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
@@ -259,7 +262,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic_inc_not_zero(atomic_t *v)
+static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
{
int t1, t2;
@@ -280,14 +283,14 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
return t1;
}
-#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
+#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
+static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
{
int t;
@@ -307,13 +310,13 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}
-#define atomic_dec_if_positive atomic_dec_if_positive
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#ifdef __powerpc64__
#define ATOMIC64_INIT(i) { (i) }
-static __inline__ s64 atomic64_read(const atomic64_t *v)
+static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
@@ -322,13 +325,13 @@ static __inline__ s64 atomic64_read(const atomic64_t *v)
return t;
}
-static __inline__ void atomic64_set(atomic64_t *v, s64 i)
+static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -344,7 +347,7 @@ static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -362,7 +365,7 @@ atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
{ \
s64 res, t; \
\
@@ -386,11 +389,11 @@ atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op) \
@@ -401,16 +404,16 @@ ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOPIC64_OPS
#undef ATOMIC64_FETCH_OP_RELAXED
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-static __inline__ void atomic64_inc(atomic64_t *v)
+static __inline__ void arch_atomic64_inc(atomic64_t *v)
{
s64 t;
@@ -423,9 +426,9 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_inc atomic64_inc
+#define arch_atomic64_inc arch_atomic64_inc
-static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -441,7 +444,7 @@ static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-static __inline__ void atomic64_dec(atomic64_t *v)
+static __inline__ void arch_atomic64_dec(atomic64_t *v)
{
s64 t;
@@ -454,9 +457,9 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_dec atomic64_dec
+#define arch_atomic64_dec arch_atomic64_dec
-static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -472,14 +475,14 @@ static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
return t;
}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
-static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 t;
@@ -498,16 +501,19 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
return t;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic64_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic64_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/**
* atomic64_fetch_add_unless - add unless the number is a given value
@@ -518,7 +524,7 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 t;
@@ -539,7 +545,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return t;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -548,7 +554,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
{
s64 t1, t2;
@@ -569,7 +575,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
-#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
+#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index eace8c3f7b0a..c02f42d1031e 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -19,6 +19,7 @@ struct mmu_psize_def {
int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
+ unsigned long h_rpt_pgsize; /* H_RPT_INVALIDATE page size encoding */
union {
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
unsigned long ap; /* Ap encoding used by PowerISA 3.0 */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 8b33601cdb9d..a46fd37ad552 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -4,6 +4,10 @@
#include <asm/hvcall.h>
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
struct vm_area_struct;
struct mm_struct;
struct mmu_gather;
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index cf091c4c22e5..05f246c0e36e 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -185,14 +185,14 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
return x;
}
-#define xchg_local(ptr,x) \
+#define arch_xchg_local(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_local((ptr), \
(unsigned long)_x_, sizeof(*(ptr))); \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -467,7 +467,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -476,7 +476,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -484,7 +484,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -493,7 +493,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -502,29 +502,29 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
#ifdef CONFIG_PPC64
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64_relaxed(ptr, o, n) \
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
-#define cmpxchg64_acquire(ptr, o, n) \
+#define arch_cmpxchg64_acquire(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_acquire((ptr), (o), (n)); \
+ arch_cmpxchg_acquire((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index 98c8bd155bf9..b167186aaee4 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}
+/*
+ * tlb_thread_siblings are siblings which share a TLB. This is not
+ * architected, is not something a hypervisor could emulate and a future
+ * CPU may change behaviour even in compat mode, so this should only be
+ * used on PowerNV, and only with care.
+ */
+static inline int cpu_first_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu & ~0x6; /* Big Core */
+ else
+ return cpu_first_thread_sibling(cpu);
+}
+
+static inline int cpu_last_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu | 0x6; /* Big Core */
+ else
+ return cpu_last_thread_sibling(cpu);
+}
+
+static inline int cpu_tlb_thread_sibling_step(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return 2; /* Big Core */
+ else
+ return 1;
+}
+
static inline u32 get_tensr(void)
{
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index c1a8aac01cf9..bb6f78fcf981 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -35,6 +35,19 @@
/* PACA save area size in u64 units (exgen, exmc, etc) */
#define EX_SIZE 10
+/* PACA save area offsets */
+#define EX_R9 0
+#define EX_R10 8
+#define EX_R11 16
+#define EX_R12 24
+#define EX_R13 32
+#define EX_DAR 40
+#define EX_DSISR 48
+#define EX_CCR 52
+#define EX_CFAR 56
+#define EX_PPR 64
+#define EX_CTR 72
+
/*
* maximum recursive depth of MCE exceptions
*/
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index e3b29eda8074..7e4b2cef40c2 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -413,9 +413,9 @@
#define H_RPTI_TYPE_NESTED 0x0001 /* Invalidate nested guest partition-scope */
#define H_RPTI_TYPE_TLB 0x0002 /* Invalidate TLB */
#define H_RPTI_TYPE_PWC 0x0004 /* Invalidate Page Walk Cache */
-/* Invalidate Process Table Entries if H_RPTI_TYPE_NESTED is clear */
+/* Invalidate caching of Process Table Entries if H_RPTI_TYPE_NESTED is clear */
#define H_RPTI_TYPE_PRT 0x0008
-/* Invalidate Partition Table Entries if H_RPTI_TYPE_NESTED is set */
+/* Invalidate caching of Partition Table Entries if H_RPTI_TYPE_NESTED is set */
#define H_RPTI_TYPE_PAT 0x0008
#define H_RPTI_TYPE_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
H_RPTI_TYPE_PRT)
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index b2bd58830430..4982f3711fc3 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -6,7 +6,6 @@
/*
*/
-#include <linux/irqdomain.h>
#include <linux/threads.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
@@ -23,8 +22,8 @@ extern atomic_t ppc_n_lost_interrupts;
/* Total number of virq in the platform */
#define NR_IRQS CONFIG_NR_IRQS
-/* Same thing, used by the generic IRQ code */
-#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
+/* Number of irqs reserved for a legacy isa controller */
+#define NR_IRQS_LEGACY 16
extern irq_hw_number_t virq_to_hw(unsigned int virq);
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 2d5c6bec2b4f..93ce3ec25387 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -50,7 +50,7 @@ l_yes:
1098: nop; \
.pushsection __jump_table, "aw"; \
.long 1098b - ., LABEL - .; \
- FTR_ENTRY_LONG KEY; \
+ FTR_ENTRY_LONG KEY - .; \
.popsection
#endif
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index a3633560493b..fbbf3cec92e9 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -147,6 +147,7 @@
#define KVM_GUEST_MODE_SKIP 2
#define KVM_GUEST_MODE_GUEST_HV 3
#define KVM_GUEST_MODE_HOST_HV 4
+#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
#define KVM_INST_FETCH_FAILED -1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index e6b53c6e21e3..caaa0f592d8e 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -307,6 +307,9 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 9bb9bb370b53..eaf3a562bf1e 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -153,10 +153,18 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix;
}
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
+
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif
/*
+ * Invalid HDSISR value which is used to indicate when HW has not set the reg.
+ * Used to work around an errata.
+ */
+#define HDSISR_CANARY 0x7fff
+
+/*
* We use a lock bit in HPTE dword 0 to synchronize updates and
* accesses to each HPTE, and another bit to indicate non-present
* HPTEs.
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1e83359f286b..9f52f282b1aa 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -51,6 +51,7 @@
/* PPC-specific vcpu->requests bit members */
#define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
#define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
+#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2)
#include <linux/mmu_notifier.h>
@@ -80,12 +81,13 @@ struct kvmppc_book3s_shadow_vcpu;
struct kvm_nested_guest;
struct kvm_vm_stat {
- ulong remote_tlb_flush;
- ulong num_2M_pages;
- ulong num_1G_pages;
+ struct kvm_vm_stat_generic generic;
+ u64 num_2M_pages;
+ u64 num_1G_pages;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 sum_exits;
u64 mmio_exits;
u64 signal_exits;
@@ -101,14 +103,8 @@ struct kvm_vcpu_stat {
u64 emulated_inst_exits;
u64 dec_exits;
u64 ext_intr_exits;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 halt_wait_ns;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
u64 halt_successful_wait;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 dbell_exits;
u64 gdbell_exits;
u64 ld;
@@ -297,7 +293,6 @@ struct kvm_arch {
u8 fwnmi_enabled;
u8 secure_guest;
u8 svm_enabled;
- bool threads_indep;
bool nested_enable;
bool dawr1_enabled;
pgd_t *pgtable;
@@ -683,7 +678,12 @@ struct kvm_vcpu_arch {
ulong fault_dar;
u32 fault_dsisr;
unsigned long intr_msr;
- ulong fault_gpa; /* guest real address of page fault (POWER9) */
+ /*
+ * POWER9 and later: fault_gpa contains the guest real address of page
+ * fault for a radix guest, or segment descriptor (equivalent to result
+ * from slbmfev of SLB entry that translated the EA) for hash guests.
+ */
+ ulong fault_gpa;
#endif
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 5bf8ae9bb2cc..2d88944f9f34 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -129,6 +129,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
+extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
@@ -606,6 +607,7 @@ extern void kvmppc_free_pimap(struct kvm *kvm);
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
@@ -638,6 +640,8 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
+static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+ { return 0; }
#endif
#ifdef CONFIG_KVM_XIVE
@@ -655,8 +659,6 @@ extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority);
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
-extern void kvmppc_xive_init_module(void);
-extern void kvmppc_xive_exit_module(void);
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
@@ -671,6 +673,8 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{
@@ -680,8 +684,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
-extern void kvmppc_xive_native_init_module(void);
-extern void kvmppc_xive_native_exit_module(void);
extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val);
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
@@ -695,8 +697,6 @@ static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority) { return -1; }
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
-static inline void kvmppc_xive_init_module(void) { }
-static inline void kvmppc_xive_exit_module(void) { }
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
@@ -711,14 +711,14 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
-static inline void kvmppc_xive_native_init_module(void) { }
-static inline void kvmppc_xive_native_exit_module(void) { }
static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val)
{ return 0; }
@@ -754,7 +754,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long tce_value, unsigned long npages);
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned int yield_count);
-long kvmppc_h_random(struct kvm_vcpu *vcpu);
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
void kvmhv_commence_exit(int trap);
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void);
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 4bc45d3ed8b0..db186c539d37 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -122,12 +122,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
}
#endif
-#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
-#else
-static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
-#endif
-
extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
@@ -222,6 +216,18 @@ static inline void mm_context_add_copro(struct mm_struct *mm) { }
static inline void mm_context_remove_copro(struct mm_struct *mm) { }
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
+#else
+static inline void do_h_rpt_invalidate_prt(unsigned long pid,
+ unsigned long lpid,
+ unsigned long type,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end) { }
+#endif
extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 6cda76b57c5d..4c6c6dbd182f 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -18,7 +18,7 @@
* flags field of the struct page
*/
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
extern struct pglist_data *node_data[];
/*
@@ -41,7 +41,7 @@ u64 memory_hotplug_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
#ifdef CONFIG_FA_DUMP
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
#endif
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
index 33fa5dd8ee6a..714a35f0d425 100644
--- a/arch/powerpc/include/asm/pte-walk.h
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
pgd_t *pgdir = init_mm.pgd;
return __find_linux_pte(pgdir, ea, NULL, hshift);
}
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+ pte_t *ptep;
+ phys_addr_t pa;
+ int hugepage_shift;
+
+ /*
+ * init_mm does not free page tables, and does not do THP. It may
+ * have huge pages from huge vmalloc / ioremap etc.
+ */
+ ptep = find_init_mm_pte(addr, &hugepage_shift);
+ if (WARN_ON(!ptep))
+ return 0;
+
+ pa = PFN_PHYS(pte_pfn(*ptep));
+
+ if (!hugepage_shift)
+ hugepage_shift = PAGE_SHIFT;
+
+ pa |= addr & ((1ul << hugepage_shift) - 1);
+
+ return pa;
+}
+
/*
* This is what we should always use. Any other lockless page table lookup needs
* careful audit against THP split.
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index 07318bc63e3d..b676c4fb90fd 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -37,7 +37,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
u32 val = 0;
- if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
+ if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
return;
queued_spin_lock_slowpath(lock, val);
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 8dd3cdb25338..8c2c3dd4ddba 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -97,6 +97,18 @@ extern void div128_by_32(u64 dividend_high, u64 dividend_low,
extern void secondary_cpu_time_init(void);
extern void __init time_init(void);
+#ifdef CONFIG_PPC64
+static inline unsigned long test_irq_work_pending(void)
+{
+ unsigned long x;
+
+ asm volatile("lbz %0,%1(13)"
+ : "=r" (x)
+ : "i" (offsetof(struct paca_struct, irq_work_pending)));
+ return x;
+}
+#endif
+
DECLARE_PER_CPU(u64, decrementers_next_tb);
/* Convert timebase ticks to nanoseconds */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 28af4efb4587..aa267d173ded 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -534,7 +534,6 @@ int main(void)
OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
- OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index f24cd53ff26e..3bbdcc86d01b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
- pte_t *ptep;
- unsigned long pa;
- int hugepage_shift;
-
- /*
- * We won't find hugepages here(this is iomem). Hence we are not
- * worried about _PAGE_SPLITTING/collapse. Also we will not hit
- * page table free, because of init_mm.
- */
- ptep = find_init_mm_pte(token, &hugepage_shift);
- if (!ptep)
- return token;
-
- pa = pte_pfn(*ptep);
-
- /* On radix we can do hugepage mappings for io, so handle that */
- if (!hugepage_shift)
- hugepage_shift = PAGE_SHIFT;
-
- pa <<= PAGE_SHIFT;
- pa |= token & ((1ul << hugepage_shift) - 1);
- return pa;
+ return ppc_find_vmap_phys(token);
}
/*
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index fa8e52a0239e..f7fc6e078d4e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -21,22 +21,6 @@
#include <asm/feature-fixups.h>
#include <asm/kup.h>
-/* PACA save area offsets (exgen, exmc, etc) */
-#define EX_R9 0
-#define EX_R10 8
-#define EX_R11 16
-#define EX_R12 24
-#define EX_R13 32
-#define EX_DAR 40
-#define EX_DSISR 48
-#define EX_CCR 52
-#define EX_CFAR 56
-#define EX_PPR 64
-#define EX_CTR 72
-.if EX_SIZE != 10
- .error "EX_SIZE is wrong"
-.endif
-
/*
* Following are fixed section helper macros.
*
@@ -133,7 +117,6 @@ name:
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
-#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */
#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
@@ -190,9 +173,6 @@ do_define_int n
.ifndef IMASK
IMASK=0
.endif
- .ifndef IKVM_SKIP
- IKVM_SKIP=0
- .endif
.ifndef IKVM_REAL
IKVM_REAL=0
.endif
@@ -207,8 +187,6 @@ do_define_int n
.endif
.endm
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* All interrupts which set HSRR registers, as well as SRESET and MCE and
* syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
@@ -238,88 +216,28 @@ do_define_int n
/*
* If an interrupt is taken while a guest is running, it is immediately routed
- * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first
- * to kvmppc_interrupt_hv, which handles the PR guest case.
+ * to KVM to handle.
*/
-#define kvmppc_interrupt kvmppc_interrupt_hv
-#else
-#define kvmppc_interrupt kvmppc_interrupt_pr
-#endif
-.macro KVMTEST name
+.macro KVMTEST name handler
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
lbz r10,HSTATE_IN_GUEST(r13)
cmpwi r10,0
- bne \name\()_kvm
-.endm
-
-.macro GEN_KVM name
- .balign IFETCH_ALIGN_BYTES
-\name\()_kvm:
-
- .if IKVM_SKIP
- cmpwi r10,KVM_GUEST_MODE_SKIP
- beq 89f
- .else
-BEGIN_FTR_SECTION
- ld r10,IAREA+EX_CFAR(r13)
- std r10,HSTATE_CFAR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
- .endif
-
- ld r10,IAREA+EX_CTR(r13)
- mtctr r10
-BEGIN_FTR_SECTION
- ld r10,IAREA+EX_PPR(r13)
- std r10,HSTATE_PPR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ld r11,IAREA+EX_R11(r13)
- ld r12,IAREA+EX_R12(r13)
- std r12,HSTATE_SCRATCH0(r13)
- sldi r12,r9,32
- ld r9,IAREA+EX_R9(r13)
- ld r10,IAREA+EX_R10(r13)
/* HSRR variants have the 0x2 bit added to their trap number */
.if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
- ori r12,r12,(IVEC + 0x2)
- FTR_SECTION_ELSE
- ori r12,r12,(IVEC)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif IHSRR
- ori r12,r12,(IVEC+ 0x2)
- .else
- ori r12,r12,(IVEC)
- .endif
- b kvmppc_interrupt
-
- .if IKVM_SKIP
-89: mtocrf 0x80,r9
- ld r10,IAREA+EX_CTR(r13)
- mtctr r10
- ld r9,IAREA+EX_R9(r13)
- ld r10,IAREA+EX_R10(r13)
- ld r11,IAREA+EX_R11(r13)
- ld r12,IAREA+EX_R12(r13)
- .if IHSRR_IF_HVMODE
- BEGIN_FTR_SECTION
- b kvmppc_skip_Hinterrupt
+ li r10,(IVEC + 0x2)
FTR_SECTION_ELSE
- b kvmppc_skip_interrupt
+ li r10,(IVEC)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
.elseif IHSRR
- b kvmppc_skip_Hinterrupt
+ li r10,(IVEC + 0x2)
.else
- b kvmppc_skip_interrupt
+ li r10,(IVEC)
.endif
- .endif
-.endm
-
-#else
-.macro KVMTEST name
-.endm
-.macro GEN_KVM name
-.endm
+ bne \handler
#endif
+.endm
/*
* This is the BOOK3S interrupt entry code macro.
@@ -461,7 +379,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
DEFINE_FIXED_SYMBOL(\name\()_common_real)
\name\()_common_real:
.if IKVM_REAL
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
.endif
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
@@ -484,7 +402,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
DEFINE_FIXED_SYMBOL(\name\()_common_virt)
\name\()_common_virt:
.if IKVM_VIRT
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
1:
.endif
.endif /* IVIRT */
@@ -498,7 +416,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
DEFINE_FIXED_SYMBOL(\name\()_common_real)
\name\()_common_real:
.if IKVM_REAL
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
.endif
.endm
@@ -1000,8 +918,6 @@ EXC_COMMON_BEGIN(system_reset_common)
EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL
- GEN_KVM system_reset
-
/**
* Interrupt 0x200 - Machine Check Interrupt (MCE).
@@ -1070,7 +986,6 @@ INT_DEFINE_BEGIN(machine_check)
ISET_RI=0
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(machine_check)
@@ -1166,7 +1081,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/*
* Check if we are coming from guest. If yes, then run the normal
* exception handler which will take the
- * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
+ * machine_check_kvm->kvm_interrupt branch to deliver the MC event
* to guest.
*/
lbz r11,HSTATE_IN_GUEST(r13)
@@ -1236,8 +1151,6 @@ EXC_COMMON_BEGIN(machine_check_common)
bl machine_check_exception
b interrupt_return
- GEN_KVM machine_check
-
#ifdef CONFIG_PPC_P7_NAP
/*
@@ -1342,7 +1255,6 @@ INT_DEFINE_BEGIN(data_access)
IVEC=0x300
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(data_access)
@@ -1373,8 +1285,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
REST_NVGPRS(r1)
b interrupt_return
- GEN_KVM data_access
-
/**
* Interrupt 0x380 - Data Segment Interrupt (DSLB).
@@ -1396,7 +1306,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
INT_DEFINE_BEGIN(data_access_slb)
IVEC=0x380
IDAR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(data_access_slb)
@@ -1425,8 +1334,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
bl do_bad_slb_fault
b interrupt_return
- GEN_KVM data_access_slb
-
/**
* Interrupt 0x400 - Instruction Storage Interrupt (ISI).
@@ -1463,8 +1370,6 @@ MMU_FTR_SECTION_ELSE
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
b interrupt_return
- GEN_KVM instruction_access
-
/**
* Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
@@ -1509,8 +1414,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
bl do_bad_slb_fault
b interrupt_return
- GEN_KVM instruction_access_slb
-
/**
* Interrupt 0x500 - External Interrupt.
@@ -1555,8 +1458,6 @@ EXC_COMMON_BEGIN(hardware_interrupt_common)
bl do_IRQ
b interrupt_return
- GEN_KVM hardware_interrupt
-
/**
* Interrupt 0x600 - Alignment Interrupt
@@ -1584,8 +1485,6 @@ EXC_COMMON_BEGIN(alignment_common)
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
b interrupt_return
- GEN_KVM alignment
-
/**
* Interrupt 0x700 - Program Interrupt (program check).
@@ -1693,8 +1592,6 @@ EXC_COMMON_BEGIN(program_check_common)
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
b interrupt_return
- GEN_KVM program_check
-
/*
* Interrupt 0x800 - Floating-Point Unavailable Interrupt.
@@ -1744,8 +1641,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
b interrupt_return
#endif
- GEN_KVM fp_unavailable
-
/**
* Interrupt 0x900 - Decrementer Interrupt.
@@ -1784,8 +1679,6 @@ EXC_COMMON_BEGIN(decrementer_common)
bl timer_interrupt
b interrupt_return
- GEN_KVM decrementer
-
/**
* Interrupt 0x980 - Hypervisor Decrementer Interrupt.
@@ -1831,8 +1724,6 @@ EXC_COMMON_BEGIN(hdecrementer_common)
ld r13,PACA_EXGEN+EX_R13(r13)
HRFI_TO_KERNEL
- GEN_KVM hdecrementer
-
/**
* Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
@@ -1872,8 +1763,6 @@ EXC_COMMON_BEGIN(doorbell_super_common)
#endif
b interrupt_return
- GEN_KVM doorbell_super
-
EXC_REAL_NONE(0xb00, 0x100)
EXC_VIRT_NONE(0x4b00, 0x100)
@@ -1923,7 +1812,7 @@ INT_DEFINE_END(system_call)
GET_PACA(r13)
std r10,PACA_EXGEN+EX_R10(r13)
INTERRUPT_TO_KERNEL
- KVMTEST system_call /* uses r10, branch to system_call_kvm */
+ KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
mfctr r9
#else
mr r9,r13
@@ -1979,14 +1868,16 @@ EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
EXC_VIRT_END(system_call, 0x4c00, 0x100)
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-TRAMP_REAL_BEGIN(system_call_kvm)
- /*
- * This is a hcall, so register convention is as above, with these
- * differences:
- * r13 = PACA
- * ctr = orig r13
- * orig r10 saved in PACA
- */
+TRAMP_REAL_BEGIN(kvm_hcall)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfcr r9
+ mfctr r10
+ std r10,PACA_EXGEN+EX_R13(r13)
+ li r10,0
+ std r10,PACA_EXGEN+EX_CFAR(r13)
+ std r10,PACA_EXGEN+EX_CTR(r13)
/*
* Save the PPR (on systems that support it) before changing to
* HMT_MEDIUM. That allows the KVM code to save that value into the
@@ -1994,31 +1885,24 @@ TRAMP_REAL_BEGIN(system_call_kvm)
*/
BEGIN_FTR_SECTION
mfspr r10,SPRN_PPR
- std r10,HSTATE_PPR(r13)
+ std r10,PACA_EXGEN+EX_PPR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
HMT_MEDIUM
- mfctr r10
- SET_SCRATCH0(r10)
- mfcr r10
- std r12,HSTATE_SCRATCH0(r13)
- sldi r12,r10,32
- ori r12,r12,0xc00
+
#ifdef CONFIG_RELOCATABLE
/*
- * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
+ * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
* outside the head section.
*/
- __LOAD_FAR_HANDLER(r10, kvmppc_interrupt)
+ __LOAD_FAR_HANDLER(r10, kvmppc_hcall)
mtctr r10
- ld r10,PACA_EXGEN+EX_R10(r13)
bctr
#else
- ld r10,PACA_EXGEN+EX_R10(r13)
- b kvmppc_interrupt
+ b kvmppc_hcall
#endif
#endif
-
/**
* Interrupt 0xd00 - Trace Interrupt.
* This is a synchronous interrupt in response to instruction step or
@@ -2043,8 +1927,6 @@ EXC_COMMON_BEGIN(single_step_common)
bl single_step_exception
b interrupt_return
- GEN_KVM single_step
-
/**
* Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
@@ -2063,7 +1945,6 @@ INT_DEFINE_BEGIN(h_data_storage)
IHSRR=1
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
IKVM_VIRT=1
INT_DEFINE_END(h_data_storage)
@@ -2084,8 +1965,6 @@ MMU_FTR_SECTION_ELSE
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
b interrupt_return
- GEN_KVM h_data_storage
-
/**
* Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
@@ -2111,8 +1990,6 @@ EXC_COMMON_BEGIN(h_instr_storage_common)
bl unknown_exception
b interrupt_return
- GEN_KVM h_instr_storage
-
/**
* Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
@@ -2137,8 +2014,6 @@ EXC_COMMON_BEGIN(emulation_assist_common)
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
b interrupt_return
- GEN_KVM emulation_assist
-
/**
* Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
@@ -2210,16 +2085,12 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
EXCEPTION_RESTORE_REGS hsrr=1
GEN_INT_ENTRY hmi_exception, virt=0
- GEN_KVM hmi_exception_early
-
EXC_COMMON_BEGIN(hmi_exception_common)
GEN_COMMON hmi_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl handle_hmi_exception
b interrupt_return
- GEN_KVM hmi_exception
-
/**
* Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
@@ -2250,8 +2121,6 @@ EXC_COMMON_BEGIN(h_doorbell_common)
#endif
b interrupt_return
- GEN_KVM h_doorbell
-
/**
* Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
@@ -2278,8 +2147,6 @@ EXC_COMMON_BEGIN(h_virt_irq_common)
bl do_IRQ
b interrupt_return
- GEN_KVM h_virt_irq
-
EXC_REAL_NONE(0xec0, 0x20)
EXC_VIRT_NONE(0x4ec0, 0x20)
@@ -2323,8 +2190,6 @@ EXC_COMMON_BEGIN(performance_monitor_common)
bl performance_monitor_exception
b interrupt_return
- GEN_KVM performance_monitor
-
/**
* Interrupt 0xf20 - Vector Unavailable Interrupt.
@@ -2374,8 +2239,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
bl altivec_unavailable_exception
b interrupt_return
- GEN_KVM altivec_unavailable
-
/**
* Interrupt 0xf40 - VSX Unavailable Interrupt.
@@ -2424,8 +2287,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
bl vsx_unavailable_exception
b interrupt_return
- GEN_KVM vsx_unavailable
-
/**
* Interrupt 0xf60 - Facility Unavailable Interrupt.
@@ -2454,8 +2315,6 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
b interrupt_return
- GEN_KVM facility_unavailable
-
/**
* Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
@@ -2484,8 +2343,6 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
b interrupt_return
- GEN_KVM h_facility_unavailable
-
EXC_REAL_NONE(0xfa0, 0x20)
EXC_VIRT_NONE(0x4fa0, 0x20)
@@ -2515,8 +2372,6 @@ EXC_COMMON_BEGIN(cbe_system_error_common)
bl cbe_system_error_exception
b interrupt_return
- GEN_KVM cbe_system_error
-
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
@@ -2548,8 +2403,6 @@ EXC_COMMON_BEGIN(instruction_breakpoint_common)
bl instruction_breakpoint_exception
b interrupt_return
- GEN_KVM instruction_breakpoint
-
EXC_REAL_NONE(0x1400, 0x100)
EXC_VIRT_NONE(0x5400, 0x100)
@@ -2670,8 +2523,6 @@ EXC_COMMON_BEGIN(denorm_exception_common)
bl unknown_exception
b interrupt_return
- GEN_KVM denorm_exception
-
#ifdef CONFIG_CBE_RAS
INT_DEFINE_BEGIN(cbe_maintenance)
@@ -2689,8 +2540,6 @@ EXC_COMMON_BEGIN(cbe_maintenance_common)
bl cbe_maintenance_exception
b interrupt_return
- GEN_KVM cbe_maintenance
-
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
@@ -2721,8 +2570,6 @@ EXC_COMMON_BEGIN(altivec_assist_common)
#endif
b interrupt_return
- GEN_KVM altivec_assist
-
#ifdef CONFIG_CBE_RAS
INT_DEFINE_BEGIN(cbe_thermal)
@@ -2740,8 +2587,6 @@ EXC_COMMON_BEGIN(cbe_thermal_common)
bl cbe_thermal_exception
b interrupt_return
- GEN_KVM cbe_thermal
-
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
@@ -2994,6 +2839,15 @@ TRAMP_REAL_BEGIN(rfscv_flush_fallback)
USE_TEXT_SECTION()
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+kvm_interrupt:
+ /*
+ * The conditional branch in KVMTEST can't reach all the way,
+ * make a stub.
+ */
+ b kvmppc_interrupt
+#endif
+
_GLOBAL(do_uaccess_flush)
UACCESS_FLUSH_FIXUP_SECTION
nop
@@ -3009,32 +2863,6 @@ EXPORT_SYMBOL(do_uaccess_flush)
MASKED_INTERRUPT
MASKED_INTERRUPT hsrr=1
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-kvmppc_skip_interrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_SRR0
- addi r13, r13, 4
- mtspr SPRN_SRR0, r13
- GET_SCRATCH0(r13)
- RFI_TO_KERNEL
- b .
-
-kvmppc_skip_Hinterrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_HSRR0
- addi r13, r13, 4
- mtspr SPRN_HSRR0, r13
- GET_SCRATCH0(r13)
- HRFI_TO_KERNEL
- b .
-#endif
-
/*
* Relocation-on interrupts: A subset of the interrupts can be delivered
* with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 51bbaae94ccc..c877f074d174 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
#ifdef CONFIG_PPC_INDIRECT_MMIO
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
- unsigned hugepage_shift;
struct iowa_bus *bus;
int token;
@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
bus = &iowa_busses[token - 1];
else {
unsigned long vaddr, paddr;
- pte_t *ptep;
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
return NULL;
- /*
- * We won't find huge pages here (iomem). Also can't hit
- * a page table free due to init_mm
- */
- ptep = find_init_mm_pte(vaddr, &hugepage_shift);
- if (ptep == NULL)
- paddr = 0;
- else {
- WARN_ON(hugepage_shift);
- paddr = pte_pfn(*ptep) << PAGE_SHIFT;
- }
+
+ paddr = ppc_find_vmap_phys(vaddr);
+
bus = iowa_pci_find(vaddr, paddr);
if (bus == NULL)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 57d6b85e9b96..2af89a5e379f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
unsigned int order;
unsigned int nio_pages, io_order;
struct page *page;
- size_t size_io = size;
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
- nio_pages = size_io >> tbl->it_page_shift;
- io_order = get_iommu_order(size_io, tbl);
+ nio_pages = size >> tbl->it_page_shift;
+ io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
if (tbl) {
- size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
- unsigned int nio_pages = size_io >> tbl->it_page_shift;
+ unsigned int nio_pages;
+ size = PAGE_ALIGN(size);
+ nio_pages = size >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 01ab2163659e..c64a5feaebbe 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
int ret = 0;
struct kprobe *prev;
struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
- struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
ret = -EINVAL;
- } else if (ppc_inst_prefixed(prefix)) {
+ } else if ((unsigned long)p->addr & ~PAGE_MASK &&
+ ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
@@ -502,23 +502,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 9a3c2a84a2ac..15e7b4900689 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -18,6 +18,7 @@
#include <linux/extable.h>
#include <linux/ftrace.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/interrupt.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 89e34aa273e2..8935c5696bce 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -2084,7 +2084,7 @@ static unsigned long __get_wchan(struct task_struct *p)
unsigned long ip, sp;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
sp = p->thread.ksp;
@@ -2094,7 +2094,7 @@ static unsigned long __get_wchan(struct task_struct *p)
do {
sp = *(unsigned long *)sp;
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
- p->state == TASK_RUNNING)
+ task_is_running(p))
return 0;
if (count > 0) {
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 0fdfcdd9d880..c17d1c9362b5 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -432,16 +432,19 @@ device_initcall(stf_barrier_debugfs_init);
static void update_branch_cache_flush(void)
{
- u32 *site;
+ u32 *site, __maybe_unused *site2;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
site = &patch__call_kvm_flush_link_stack;
+ site2 = &patch__call_kvm_flush_link_stack_p9;
// This controls the branch from guest_exit_cont to kvm_flush_link_stack
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site2, ppc_inst(PPC_INST_NOP));
} else {
// Could use HW flush, but that could also flush count cache
patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
+ patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
}
#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e42b85e4f1aa..a35fbf4d0bce 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -788,7 +788,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
size_t align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = early_cpu_to_node(cpu);
void *ptr;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index dca66481d0c2..f9e1f5428b9e 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -902,6 +902,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
user_write_access_end();
+ /* Save the siginfo outside of the unsafe block. */
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
+ goto badframe;
+
/* Make sure signal handler doesn't get spurious FP exceptions */
tsk->thread.fp_state.fpscr = 0;
@@ -915,11 +919,6 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
regs->nip = (unsigned long) &frame->tramp[0];
}
-
- /* Save the siginfo outside of the unsafe block. */
- if (copy_siginfo_to_user(&frame->info, &ksig->info))
- goto badframe;
-
/* Allocate a dummy caller frame for the signal handler. */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 2e05c783440a..7ddc2d32c39e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1047,7 +1047,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
/*
* numa_node_id() works after this.
*/
@@ -1547,7 +1547,6 @@ void start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
rcu_cpu_starting(cpu);
- preempt_disable();
cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index b67d93a609a2..da995c5fb97d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -508,16 +508,6 @@ EXPORT_SYMBOL(profile_pc);
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
-static inline unsigned long test_irq_work_pending(void)
-{
- unsigned long x;
-
- asm volatile("lbz %0,%1(13)"
- : "=r" (x)
- : "i" (offsetof(struct paca_struct, irq_work_pending)));
- return x;
-}
-
static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 56da5eb2b923..48525e8b5730 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -68,11 +68,11 @@ void machine_kexec_cleanup(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
VMCOREINFO_SYMBOL(contig_page_data);
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 2bfeaa13befb..583c14ef596e 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -6,7 +6,7 @@
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
KVM := ../../../virt/kvm
-common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o
+common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o
common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
common-objs-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
@@ -57,6 +57,7 @@ kvm-pr-y := \
book3s_32_mmu.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+ book3s_64_entry.o \
tm.o
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -86,6 +87,7 @@ kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_hmi.o \
+ book3s_hv_p9_entry.o \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
book3s_hv_ras.o \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 2b691f4d1f26..79833f78d1da 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -38,37 +38,66 @@
/* #define EXIT_DEBUG */
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("exits", sum_exits),
- VCPU_STAT("mmio", mmio_exits),
- VCPU_STAT("sig", signal_exits),
- VCPU_STAT("sysc", syscall_exits),
- VCPU_STAT("inst_emu", emulated_inst_exits),
- VCPU_STAT("dec", dec_exits),
- VCPU_STAT("ext_intr", ext_intr_exits),
- VCPU_STAT("queue_intr", queue_intr),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("halt_wait_ns", halt_wait_ns),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_successful_wait", halt_successful_wait),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("pf_storage", pf_storage),
- VCPU_STAT("sp_storage", sp_storage),
- VCPU_STAT("pf_instruc", pf_instruc),
- VCPU_STAT("sp_instruc", sp_instruc),
- VCPU_STAT("ld", ld),
- VCPU_STAT("ld_slow", ld_slow),
- VCPU_STAT("st", st),
- VCPU_STAT("st_slow", st_slow),
- VCPU_STAT("pthru_all", pthru_all),
- VCPU_STAT("pthru_host", pthru_host),
- VCPU_STAT("pthru_bad_aff", pthru_bad_aff),
- VM_STAT("largepages_2M", num_2M_pages, .mode = 0444),
- VM_STAT("largepages_1G", num_1G_pages, .mode = 0444),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, num_2M_pages),
+ STATS_DESC_ICOUNTER(VM, num_1G_pages)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, sum_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, light_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, isi_exits),
+ STATS_DESC_COUNTER(VCPU, dsi_exits),
+ STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
+ STATS_DESC_COUNTER(VCPU, dec_exits),
+ STATS_DESC_COUNTER(VCPU, ext_intr_exits),
+ STATS_DESC_TIME_NSEC(VCPU, halt_wait_ns),
+ STATS_DESC_COUNTER(VCPU, halt_successful_wait),
+ STATS_DESC_COUNTER(VCPU, dbell_exits),
+ STATS_DESC_COUNTER(VCPU, gdbell_exits),
+ STATS_DESC_COUNTER(VCPU, ld),
+ STATS_DESC_COUNTER(VCPU, st),
+ STATS_DESC_COUNTER(VCPU, pf_storage),
+ STATS_DESC_COUNTER(VCPU, pf_instruc),
+ STATS_DESC_COUNTER(VCPU, sp_storage),
+ STATS_DESC_COUNTER(VCPU, sp_instruc),
+ STATS_DESC_COUNTER(VCPU, queue_intr),
+ STATS_DESC_COUNTER(VCPU, ld_slow),
+ STATS_DESC_COUNTER(VCPU, st_slow),
+ STATS_DESC_COUNTER(VCPU, pthru_all),
+ STATS_DESC_COUNTER(VCPU, pthru_host),
+ STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
@@ -171,6 +200,12 @@ void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
+void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
+{
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
+}
+EXPORT_SYMBOL(kvmppc_core_queue_syscall);
+
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
@@ -1044,13 +1079,10 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_XICS
#ifdef CONFIG_KVM_XIVE
if (xics_on_xive()) {
- kvmppc_xive_init_module();
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
- if (kvmppc_xive_native_supported()) {
- kvmppc_xive_native_init_module();
+ if (kvmppc_xive_native_supported())
kvm_register_device_ops(&kvm_xive_native_ops,
KVM_DEV_TYPE_XIVE);
- }
} else
#endif
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
@@ -1060,12 +1092,6 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void)
{
-#ifdef CONFIG_KVM_XICS
- if (xics_on_xive()) {
- kvmppc_xive_exit_module();
- kvmppc_xive_native_exit_module();
- }
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
#endif
diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S
new file mode 100644
index 000000000000..983b8c18bc31
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_entry.S
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <asm/asm-offsets.h>
+#include <asm/cache.h>
+#include <asm/code-patching-asm.h>
+#include <asm/exception-64s.h>
+#include <asm/export.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_book3s_asm.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/ultravisor-api.h>
+
+/*
+ * These are branched to from interrupt handlers in exception-64s.S which set
+ * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
+ */
+
+/*
+ * This is a hcall, so register convention is as
+ * Documentation/powerpc/papr_hcalls.rst.
+ *
+ * This may also be a syscall from PR-KVM userspace that is to be
+ * reflected to the PR guest kernel, so registers may be set up for
+ * a system call rather than hcall. We don't currently clobber
+ * anything here, but the 0xc00 handler has already clobbered CTR
+ * and CR0, so PR-KVM can not support a guest kernel that preserves
+ * those registers across its system calls.
+ *
+ * The state of registers is as kvmppc_interrupt, except CFAR is not
+ * saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
+ */
+.global kvmppc_hcall
+.balign IFETCH_ALIGN_BYTES
+kvmppc_hcall:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
+ beq kvmppc_p9_exit_hcall
+#endif
+ ld r10,PACA_EXGEN+EX_R13(r13)
+ SET_SCRATCH0(r10)
+ li r10,0xc00
+ /* Now we look like kvmppc_interrupt */
+ li r11,PACA_EXGEN
+ b .Lgot_save_area
+
+/*
+ * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
+ * call convention:
+ *
+ * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
+ * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
+ * guest R13 also saved in SCRATCH0
+ * R13 = PACA
+ * R11 = (H)SRR0
+ * R12 = (H)SRR1
+ * R9 = guest CR
+ * PPR is set to medium
+ *
+ * With the addition for KVM:
+ * R10 = trap vector
+ */
+.global kvmppc_interrupt
+.balign IFETCH_ALIGN_BYTES
+kvmppc_interrupt:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ std r10,HSTATE_SCRATCH0(r13)
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
+ beq kvmppc_p9_exit_interrupt
+ ld r10,HSTATE_SCRATCH0(r13)
+#endif
+ li r11,PACA_EXGEN
+ cmpdi r10,0x200
+ bgt+ .Lgot_save_area
+ li r11,PACA_EXMC
+ beq .Lgot_save_area
+ li r11,PACA_EXNMI
+.Lgot_save_area:
+ add r11,r11,r13
+BEGIN_FTR_SECTION
+ ld r12,EX_CFAR(r11)
+ std r12,HSTATE_CFAR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ ld r12,EX_CTR(r11)
+ mtctr r12
+BEGIN_FTR_SECTION
+ ld r12,EX_PPR(r11)
+ std r12,HSTATE_PPR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ld r12,EX_R12(r11)
+ std r12,HSTATE_SCRATCH0(r13)
+ sldi r12,r9,32
+ or r12,r12,r10
+ ld r9,EX_R9(r11)
+ ld r10,EX_R10(r11)
+ ld r11,EX_R11(r11)
+
+ /*
+ * Hcalls and other interrupts come here after normalising register
+ * contents and save locations:
+ *
+ * R12 = (guest CR << 32) | interrupt vector
+ * R13 = PACA
+ * guest R12 saved in shadow HSTATE_SCRATCH0
+ * guest R13 saved in SPRN_SCRATCH0
+ */
+ std r9,HSTATE_SCRATCH2(r13)
+ lbz r9,HSTATE_IN_GUEST(r13)
+ cmpwi r9,KVM_GUEST_MODE_SKIP
+ beq- .Lmaybe_skip
+.Lno_skip:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ cmpwi r9,KVM_GUEST_MODE_GUEST
+ beq kvmppc_interrupt_pr
+#endif
+ b kvmppc_interrupt_hv
+#else
+ b kvmppc_interrupt_pr
+#endif
+
+/*
+ * "Skip" interrupts are part of a trick KVM uses a with hash guests to load
+ * the faulting instruction in guest memory from the the hypervisor without
+ * walking page tables.
+ *
+ * When the guest takes a fault that requires the hypervisor to load the
+ * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
+ * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
+ * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
+ * but loads and stores will access the guest context. This is used to load
+ * the faulting instruction using the faulting guest effective address.
+ *
+ * However the guest context may not be able to translate, or it may cause a
+ * machine check or other issue, which results in a fault in the host
+ * (even with KVM-HV).
+ *
+ * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
+ * are (or are likely) caused by that load, the instruction is skipped by
+ * just returning with the PC advanced +4, where it is noticed the load did
+ * not execute and it goes to the slow path which walks the page tables to
+ * read guest memory.
+ */
+.Lmaybe_skip:
+ cmpwi r12,BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq 1f
+ cmpwi r12,BOOK3S_INTERRUPT_DATA_STORAGE
+ beq 1f
+ cmpwi r12,BOOK3S_INTERRUPT_DATA_SEGMENT
+ beq 1f
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* HSRR interrupts get 2 added to interrupt number */
+ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
+ beq 2f
+#endif
+ b .Lno_skip
+1: mfspr r9,SPRN_SRR0
+ addi r9,r9,4
+ mtspr SPRN_SRR0,r9
+ ld r12,HSTATE_SCRATCH0(r13)
+ ld r9,HSTATE_SCRATCH2(r13)
+ GET_SCRATCH0(r13)
+ RFI_TO_KERNEL
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+2: mfspr r9,SPRN_HSRR0
+ addi r9,r9,4
+ mtspr SPRN_HSRR0,r9
+ ld r12,HSTATE_SCRATCH0(r13)
+ ld r9,HSTATE_SCRATCH2(r13)
+ GET_SCRATCH0(r13)
+ HRFI_TO_KERNEL
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+/* Stack frame offsets for kvmppc_p9_enter_guest */
+#define SFS (144 + STACK_FRAME_MIN_SIZE)
+#define STACK_SLOT_NVGPRS (SFS - 144) /* 18 gprs */
+
+/*
+ * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
+ *
+ * Enter the guest on a ISAv3.0 or later system.
+ */
+.balign IFETCH_ALIGN_BYTES
+_GLOBAL(kvmppc_p9_enter_guest)
+EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
+ mflr r0
+ std r0,PPC_LR_STKOFF(r1)
+ stdu r1,-SFS(r1)
+
+ std r1,HSTATE_HOST_R1(r13)
+
+ mfcr r4
+ stw r4,SFS+8(r1)
+
+ reg = 14
+ .rept 18
+ std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ ld r4,VCPU_LR(r3)
+ mtlr r4
+ ld r4,VCPU_CTR(r3)
+ mtctr r4
+ ld r4,VCPU_XER(r3)
+ mtspr SPRN_XER,r4
+
+ ld r1,VCPU_CR(r3)
+
+BEGIN_FTR_SECTION
+ ld r4,VCPU_CFAR(r3)
+ mtspr SPRN_CFAR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+ ld r4,VCPU_PPR(r3)
+ mtspr SPRN_PPR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ reg = 4
+ .rept 28
+ ld reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ ld r4,VCPU_KVM(r3)
+ lbz r4,KVM_SECURE_GUEST(r4)
+ cmpdi r4,0
+ ld r4,VCPU_GPR(R4)(r3)
+ bne .Lret_to_ultra
+
+ mtcr r1
+
+ ld r0,VCPU_GPR(R0)(r3)
+ ld r1,VCPU_GPR(R1)(r3)
+ ld r2,VCPU_GPR(R2)(r3)
+ ld r3,VCPU_GPR(R3)(r3)
+
+ HRFI_TO_GUEST
+ b .
+
+ /*
+ * Use UV_RETURN ultracall to return control back to the Ultravisor
+ * after processing an hypercall or interrupt that was forwarded
+ * (a.k.a. reflected) to the Hypervisor.
+ *
+ * All registers have already been reloaded except the ucall requires:
+ * R0 = hcall result
+ * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
+ * R3 = UV_RETURN
+ */
+.Lret_to_ultra:
+ mtcr r1
+ ld r1,VCPU_GPR(R1)(r3)
+
+ ld r0,VCPU_GPR(R3)(r3)
+ mfspr r2,SPRN_SRR1
+ LOAD_REG_IMMEDIATE(r3, UV_RETURN)
+ sc 2
+
+/*
+ * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
+ * above if the interrupt was taken for a guest that was entered via
+ * kvmppc_p9_enter_guest().
+ *
+ * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
+ * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
+ * establishes the host stack and registers to return from the
+ * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
+ * (SPRs and FP, VEC, etc).
+ */
+.balign IFETCH_ALIGN_BYTES
+kvmppc_p9_exit_hcall:
+ mfspr r11,SPRN_SRR0
+ mfspr r12,SPRN_SRR1
+ li r10,0xc00
+ std r10,HSTATE_SCRATCH0(r13)
+
+.balign IFETCH_ALIGN_BYTES
+kvmppc_p9_exit_interrupt:
+ /*
+ * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
+ * hypervisor, that means we can't return from the entry stack.
+ */
+ rldicl. r10,r12,64-MSR_HV_LG,63
+ bne- kvmppc_p9_bad_interrupt
+
+ std r1,HSTATE_SCRATCH1(r13)
+ std r3,HSTATE_SCRATCH2(r13)
+ ld r1,HSTATE_HOST_R1(r13)
+ ld r3,HSTATE_KVM_VCPU(r13)
+
+ std r9,VCPU_CR(r3)
+
+1:
+ std r11,VCPU_PC(r3)
+ std r12,VCPU_MSR(r3)
+
+ reg = 14
+ .rept 18
+ std reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ /* r1, r3, r9-r13 are saved to vcpu by C code */
+ std r0,VCPU_GPR(R0)(r3)
+ std r2,VCPU_GPR(R2)(r3)
+ reg = 4
+ .rept 5
+ std reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ ld r2,PACATOC(r13)
+
+ mflr r4
+ std r4,VCPU_LR(r3)
+ mfspr r4,SPRN_XER
+ std r4,VCPU_XER(r3)
+
+ reg = 14
+ .rept 18
+ ld reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ lwz r4,SFS+8(r1)
+ mtcr r4
+
+ /*
+ * Flush the link stack here, before executing the first blr on the
+ * way out of the guest.
+ *
+ * The link stack won't match coming out of the guest anyway so the
+ * only cost is the flush itself. The call clobbers r0.
+ */
+1: nop
+ patch_site 1b patch__call_kvm_flush_link_stack_p9
+
+ addi r1,r1,SFS
+ ld r0,PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+
+/*
+ * Took an interrupt somewhere right before HRFID to guest, so registers are
+ * in a bad way. Return things hopefully enough to run host virtual code and
+ * run the Linux interrupt handler (SRESET or MCE) to print something useful.
+ *
+ * We could be really clever and save all host registers in known locations
+ * before setting HSTATE_IN_GUEST, then restoring them all here, and setting
+ * return address to a fixup that sets them up again. But that's a lot of
+ * effort for a small bit of code. Lots of other things to do first.
+ */
+kvmppc_p9_bad_interrupt:
+BEGIN_MMU_FTR_SECTION
+ /*
+ * Hash host doesn't try to recover MMU (requires host SLB reload)
+ */
+ b .
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+ /*
+ * Clean up guest registers to give host a chance to run.
+ */
+ li r10,0
+ mtspr SPRN_AMR,r10
+ mtspr SPRN_IAMR,r10
+ mtspr SPRN_CIABR,r10
+ mtspr SPRN_DAWRX0,r10
+BEGIN_FTR_SECTION
+ mtspr SPRN_DAWRX1,r10
+END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
+ mtspr SPRN_PID,r10
+
+ /*
+ * Switch to host MMU mode
+ */
+ ld r10, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_KVM(r10)
+ lwz r10, KVM_HOST_LPID(r10)
+ mtspr SPRN_LPID,r10
+
+ ld r10, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_KVM(r10)
+ ld r10, KVM_HOST_LPCR(r10)
+ mtspr SPRN_LPCR,r10
+
+ /*
+ * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
+ * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
+ */
+ li r10,KVM_GUEST_MODE_NONE
+ stb r10,HSTATE_IN_GUEST(r13)
+ li r10,MSR_RI
+ andc r12,r12,r10
+
+ /*
+ * Go back to interrupt handler. MCE and SRESET have their specific
+ * PACA save area so they should be used directly. They set up their
+ * own stack. The other handlers all use EXGEN. They will use the
+ * guest r1 if it looks like a kernel stack, so just load the
+ * emergency stack and go to program check for all other interrupts.
+ */
+ ld r10,HSTATE_SCRATCH0(r13)
+ cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq machine_check_common
+
+ cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
+ beq system_reset_common
+
+ b .
+#endif
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index d909c069363e..b5905ae4377c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -21,6 +21,7 @@
#include <asm/pte-walk.h>
#include <asm/ultravisor.h>
#include <asm/kvm_book3s_uvmem.h>
+#include <asm/plpar_wrappers.h>
/*
* Supported radix tree geometry.
@@ -318,9 +319,19 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
}
psi = shift_to_mmu_psize(pshift);
- rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
- lpid, rb);
+
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
+ rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
+ lpid, rb);
+ } else {
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_TLB,
+ psize_to_rpti_pgsize(psi),
+ addr, addr + psize);
+ }
+
if (rc)
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
@@ -334,8 +345,14 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
return;
}
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
- lpid, TLBIEL_INVAL_SET_LPID);
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ else
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
+ 0, -1UL);
if (rc)
pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 083a4e037718..dc6591548f0c 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -391,10 +391,6 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -489,10 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
bool prereg = false;
struct kvmppc_spapr_tce_iommu_table *stit;
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
/*
* used to check for invalidations in progress
*/
@@ -602,10 +594,6 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
long i, ret;
struct kvmppc_spapr_tce_iommu_table *stit;
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 28a80d240b76..260e860d53a2 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -76,6 +76,7 @@
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
#include <asm/dtl.h>
+#include <asm/plpar_wrappers.h>
#include "book3s.h"
@@ -103,13 +104,9 @@ static int target_smt_mode;
module_param(target_smt_mode, int, 0644);
MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
-static bool indep_threads_mode = true;
-module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
-
static bool one_vm_per_core;
module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
+MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)");
#ifdef CONFIG_KVM_XICS
static const struct kernel_param_ops module_param_ops = {
@@ -134,9 +131,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
return kvm->arch.nested_enable && kvm_is_radix(kvm);
}
-/* If set, the threads on each CPU core have to be in the same MMU mode */
-static bool no_mixing_hpt_and_radix __read_mostly;
-
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/*
@@ -236,7 +230,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
waitp = kvm_arch_vcpu_get_wait(vcpu);
if (rcuwait_wake_up(waitp))
- ++vcpu->stat.halt_wakeup;
+ ++vcpu->stat.generic.halt_wakeup;
cpu = READ_ONCE(vcpu->arch.thread_cpu);
if (cpu >= 0 && kvmppc_ipi_thread(cpu))
@@ -807,7 +801,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
* KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved.
* Keep this in synch with kvmppc_filter_guest_lpcr_hv.
*/
- if (mflags != 0 && mflags != 3)
+ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
+ kvmhv_vcpu_is_radix(vcpu) && mflags == 3)
return H_UNSUPPORTED_FLAG_START;
return H_TOO_HARD;
default:
@@ -899,6 +894,10 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
* H_SUCCESS if the source vcore wasn't idle (e.g. if it may
* have useful work to do and should not confer) so we don't
* recheck that here.
+ *
+ * In the case of the P9 single vcpu per vcore case, the real
+ * mode handler is not called but no other threads are in the
+ * source vcore.
*/
spin_lock(&vcore->lock);
@@ -924,8 +923,71 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
return yield_count;
}
+/*
+ * H_RPT_INVALIDATE hcall handler for nested guests.
+ *
+ * Handles only nested process-scoped invalidation requests in L0.
+ */
+static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
+{
+ unsigned long type = kvmppc_get_gpr(vcpu, 6);
+ unsigned long pid, pg_sizes, start, end;
+
+ /*
+ * The partition-scoped invalidations aren't handled here in L0.
+ */
+ if (type & H_RPTI_TYPE_NESTED)
+ return RESUME_HOST;
+
+ pid = kvmppc_get_gpr(vcpu, 4);
+ pg_sizes = kvmppc_get_gpr(vcpu, 7);
+ start = kvmppc_get_gpr(vcpu, 8);
+ end = kvmppc_get_gpr(vcpu, 9);
+
+ do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
+ type, pg_sizes, start, end);
+
+ kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ return RESUME_GUEST;
+}
+
+static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
+ unsigned long id, unsigned long target,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
+{
+ if (!kvm_is_radix(vcpu->kvm))
+ return H_UNSUPPORTED;
+
+ if (end < start)
+ return H_P5;
+
+ /*
+ * Partition-scoped invalidation for nested guests.
+ */
+ if (type & H_RPTI_TYPE_NESTED) {
+ if (!nesting_enabled(vcpu->kvm))
+ return H_FUNCTION;
+
+ /* Support only cores as target */
+ if (target != H_RPTI_TARGET_CMMU)
+ return H_P2;
+
+ return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
+ start, end);
+ }
+
+ /*
+ * Process-scoped invalidation for L1 guests.
+ */
+ do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
+ type, pg_sizes, start, end);
+ return H_SUCCESS;
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
int yield_count;
@@ -937,11 +999,57 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
switch (req) {
+ case H_REMOVE:
+ ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_ENTER:
+ ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_READ:
+ ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_CLEAR_MOD:
+ ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_CLEAR_REF:
+ ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_PROTECT:
+ ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_BULK_REMOVE:
+ ret = kvmppc_h_bulk_remove(vcpu);
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+
case H_CEDE:
break;
case H_PROD:
target = kvmppc_get_gpr(vcpu, 4);
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
@@ -955,7 +1063,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
target = kvmppc_get_gpr(vcpu, 4);
if (target == -1)
break;
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
@@ -971,12 +1079,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 6));
break;
case H_RTAS:
- if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+ if (list_empty(&kvm->arch.rtas_tokens))
return RESUME_HOST;
- idx = srcu_read_lock(&vcpu->kvm->srcu);
+ idx = srcu_read_lock(&kvm->srcu);
rc = kvmppc_rtas_hcall(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ srcu_read_unlock(&kvm->srcu, idx);
if (rc == -ENOENT)
return RESUME_HOST;
@@ -1060,15 +1168,23 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
ret = H_HARDWARE;
break;
+ case H_RPT_INVALIDATE:
+ ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7),
+ kvmppc_get_gpr(vcpu, 8),
+ kvmppc_get_gpr(vcpu, 9));
+ break;
case H_SET_PARTITION_TABLE:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_set_partition_table(vcpu);
break;
case H_ENTER_NESTED:
ret = H_FUNCTION;
- if (!nesting_enabled(vcpu->kvm))
+ if (!nesting_enabled(kvm))
break;
ret = kvmhv_enter_nested_guest(vcpu);
if (ret == H_INTERRUPT) {
@@ -1083,12 +1199,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
break;
case H_TLB_INVALIDATE:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_do_nested_tlbie(vcpu);
break;
case H_COPY_TOFROM_GUEST:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_copy_tofrom_guest_nested(vcpu);
break;
case H_PAGE_INIT:
@@ -1099,7 +1215,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_PAGE_IN:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_page_in(vcpu->kvm,
+ ret = kvmppc_h_svm_page_in(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
@@ -1107,7 +1223,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_PAGE_OUT:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_page_out(vcpu->kvm,
+ ret = kvmppc_h_svm_page_out(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
@@ -1115,12 +1231,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_INIT_START:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_init_start(vcpu->kvm);
+ ret = kvmppc_h_svm_init_start(kvm);
break;
case H_SVM_INIT_DONE:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_init_done(vcpu->kvm);
+ ret = kvmppc_h_svm_init_done(kvm);
break;
case H_SVM_INIT_ABORT:
/*
@@ -1130,24 +1246,26 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
* Instead the kvm->arch.secure_guest flag is checked inside
* kvmppc_h_svm_init_abort().
*/
- ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+ ret = kvmppc_h_svm_init_abort(kvm);
break;
default:
return RESUME_HOST;
}
+ WARN_ON_ONCE(ret == H_TOO_HARD);
kvmppc_set_gpr(vcpu, 3, ret);
vcpu->arch.hcall_needed = 0;
return RESUME_GUEST;
}
/*
- * Handle H_CEDE in the nested virtualization case where we haven't
- * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
+ * Handle H_CEDE in the P9 path where we don't call the real-mode hcall
+ * handlers in book3s_hv_rmhandlers.S.
+ *
* This has to be done early, not in kvmppc_pseries_do_hcall(), so
* that the cede logic in kvmppc_run_single_vcpu() works properly.
*/
-static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
+static void kvmppc_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.shregs.msr |= MSR_EE;
vcpu->arch.ceded = 1;
@@ -1178,6 +1296,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_XIRR_X:
#endif
case H_PAGE_INIT:
+ case H_RPT_INVALIDATE:
return 1;
}
@@ -1400,13 +1519,39 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
}
case BOOK3S_INTERRUPT_SYSCALL:
{
- /* hcall - punt to userspace */
int i;
- /* hypercall with MSR_PR has already been handled in rmode,
- * and never reaches here.
- */
+ if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
+ /*
+ * Guest userspace executed sc 1. This can only be
+ * reached by the P9 path because the old path
+ * handles this case in realmode hcall handlers.
+ */
+ if (!kvmhv_vcpu_is_radix(vcpu)) {
+ /*
+ * A guest could be running PR KVM, so this
+ * may be a PR KVM hcall. It must be reflected
+ * to the guest kernel as a sc interrupt.
+ */
+ kvmppc_core_queue_syscall(vcpu);
+ } else {
+ /*
+ * Radix guests can not run PR KVM or nested HV
+ * hash guests which might run PR KVM, so this
+ * is always a privilege fault. Send a program
+ * check to guest kernel.
+ */
+ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
+ }
+ r = RESUME_GUEST;
+ break;
+ }
+ /*
+ * hcall - gather args and set exit_reason. This will next be
+ * handled by kvmppc_pseries_do_hcall which may be able to deal
+ * with it and resume guest, or may punt to userspace.
+ */
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
for (i = 0; i < 9; ++i)
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
@@ -1419,20 +1564,102 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because
* it is for an emulated I/O device or because the corresonding
- * host page has been paged out. Any other HDSI/HISI interrupts
- * have been handled already.
+ * host page has been paged out.
+ *
+ * Any other HDSI/HISI interrupts have been handled already for P7/8
+ * guests. For POWER9 hash guests not using rmhandlers, basic hash
+ * fault handling is done here.
*/
- case BOOK3S_INTERRUPT_H_DATA_STORAGE:
- r = RESUME_PAGE_FAULT;
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
+ unsigned long vsid;
+ long err;
+
+ if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
+ r = RESUME_GUEST; /* Just retry if it's the canary */
+ break;
+ }
+
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * Radix doesn't require anything, and pre-ISAv3.0 hash
+ * already attempted to handle this in rmhandlers. The
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+ r = RESUME_PAGE_FAULT;
+ break;
+ }
+
+ if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
+ kvmppc_core_queue_data_storage(vcpu,
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ if (!(vcpu->arch.shregs.msr & MSR_DR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+
+ err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+ vsid, vcpu->arch.fault_dsisr, true);
+ if (err == 0) {
+ r = RESUME_GUEST;
+ } else if (err == -1 || err == -2) {
+ r = RESUME_PAGE_FAULT;
+ } else {
+ kvmppc_core_queue_data_storage(vcpu,
+ vcpu->arch.fault_dar, err);
+ r = RESUME_GUEST;
+ }
break;
- case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ }
+ case BOOK3S_INTERRUPT_H_INST_STORAGE: {
+ unsigned long vsid;
+ long err;
+
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
DSISR_SRR1_MATCH_64S;
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
- vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
- r = RESUME_PAGE_FAULT;
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * Radix doesn't require anything, and pre-ISAv3.0 hash
+ * already attempted to handle this in rmhandlers. The
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+ r = RESUME_PAGE_FAULT;
+ break;
+ }
+
+ if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
+ kvmppc_core_queue_inst_storage(vcpu,
+ vcpu->arch.fault_dsisr);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ if (!(vcpu->arch.shregs.msr & MSR_IR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+
+ err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+ vsid, vcpu->arch.fault_dsisr, false);
+ if (err == 0) {
+ r = RESUME_GUEST;
+ } else if (err == -1) {
+ r = RESUME_PAGE_FAULT;
+ } else {
+ kvmppc_core_queue_inst_storage(vcpu, err);
+ r = RESUME_GUEST;
+ }
break;
+ }
+
/*
* This occurs if the guest executes an illegal instruction.
* If the guest debug is disabled, generate a program interrupt
@@ -1593,6 +1820,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
if (!xics_on_xive())
kvmppc_xics_rm_complete(vcpu, 0);
break;
+ case BOOK3S_INTERRUPT_SYSCALL:
+ {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+ /*
+ * The H_RPT_INVALIDATE hcalls issued by nested
+ * guests for process-scoped invalidations when
+ * GTSE=0, are handled here in L0.
+ */
+ if (req == H_RPT_INVALIDATE) {
+ r = kvmppc_nested_h_rpt_invalidate(vcpu);
+ break;
+ }
+
+ r = RESUME_HOST;
+ break;
+ }
default:
r = RESUME_HOST;
break;
@@ -1654,6 +1898,14 @@ unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr)
lpcr &= ~LPCR_AIL;
if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */
+ /*
+ * On some POWER9s we force AIL off for radix guests to prevent
+ * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to
+ * guest, which can result in Q0 translations with LPID=0 PID=PIDR to
+ * be cached, which the host TLB management does not expect.
+ */
+ if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ lpcr &= ~LPCR_AIL;
/*
* On POWER9, allow userspace to enable large decrementer for the
@@ -2233,7 +2485,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*/
static int threads_per_vcore(struct kvm *kvm)
{
- if (kvm->arch.threads_indep)
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return 1;
return threads_per_subcore;
}
@@ -2657,7 +2909,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
cpumask_t *cpu_in_guest;
int i;
- cpu = cpu_first_thread_sibling(cpu);
+ cpu = cpu_first_tlb_thread_sibling(cpu);
if (nested) {
cpumask_set_cpu(cpu, &nested->need_tlb_flush);
cpu_in_guest = &nested->cpu_in_guest;
@@ -2671,9 +2923,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
* the other side is the first smp_mb() in kvmppc_run_core().
*/
smp_mb();
- for (i = 0; i < threads_per_core; ++i)
- if (cpumask_test_cpu(cpu + i, cpu_in_guest))
- smp_call_function_single(cpu + i, do_nothing, NULL, 1);
+ for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+ i += cpu_tlb_thread_sibling_step())
+ if (cpumask_test_cpu(i, cpu_in_guest))
+ smp_call_function_single(i, do_nothing, NULL, 1);
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2704,8 +2957,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
*/
if (prev_cpu != pcpu) {
if (prev_cpu >= 0 &&
- cpu_first_thread_sibling(prev_cpu) !=
- cpu_first_thread_sibling(pcpu))
+ cpu_first_tlb_thread_sibling(prev_cpu) !=
+ cpu_first_tlb_thread_sibling(pcpu))
radix_flush_cpu(kvm, prev_cpu, vcpu);
if (nested)
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
@@ -2967,9 +3220,6 @@ static void prepare_threads(struct kvmppc_vcore *vc)
for_each_runnable_thread(i, vcpu, vc) {
if (signal_pending(vcpu->arch.run_task))
vcpu->arch.ret = -EINTR;
- else if (no_mixing_hpt_and_radix &&
- kvm_is_radix(vc->kvm) != radix_enabled())
- vcpu->arch.ret = -EINVAL;
else if (vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending)
@@ -3176,6 +3426,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int trap;
bool is_power8;
+ if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
+ return;
+
/*
* Remove from the list any threads that have a signal pending
* or need a VPA update done
@@ -3203,9 +3456,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Make sure we are running on primary threads, and that secondary
* threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest.
- * On POWER9, we need to be not in independent-threads mode if
- * this is a HPT guest on a radix host machine where the
- * CPU threads may not be in different MMU modes.
*/
if ((controlled_threads > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
@@ -3230,18 +3480,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
collect_piggybacks(&core_info, target_threads);
/*
- * On radix, arrange for TLB flushing if necessary.
- * This has to be done before disabling interrupts since
- * it uses smp_call_function().
- */
- pcpu = smp_processor_id();
- if (kvm_is_radix(vc->kvm)) {
- for (sub = 0; sub < core_info.n_subcores; ++sub)
- for_each_runnable_thread(i, vcpu, core_info.vc[sub])
- kvmppc_prepare_radix_vcpu(vcpu, pcpu);
- }
-
- /*
* Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s).
@@ -3273,8 +3511,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
cmd_bit = stat_bit = 0;
split = core_info.n_subcores;
sip = NULL;
- is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
- && !cpu_has_feature(CPU_FTR_ARCH_300);
+ is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
if (split > 1) {
sip = &split_info;
@@ -3478,184 +3715,113 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 1);
}
-/*
- * Load up hypervisor-mode registers on P9.
- */
-static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
- unsigned long lpcr)
+static void load_spr_state(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
- s64 hdec;
- u64 tb, purr, spurr;
- int trap;
- unsigned long host_hfscr = mfspr(SPRN_HFSCR);
- unsigned long host_ciabr = mfspr(SPRN_CIABR);
- unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
- unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
- unsigned long host_psscr = mfspr(SPRN_PSSCR);
- unsigned long host_pidr = mfspr(SPRN_PID);
- unsigned long host_dawr1 = 0;
- unsigned long host_dawrx1 = 0;
-
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- host_dawr1 = mfspr(SPRN_DAWR1);
- host_dawrx1 = mfspr(SPRN_DAWRX1);
- }
+ mtspr(SPRN_DSCR, vcpu->arch.dscr);
+ mtspr(SPRN_IAMR, vcpu->arch.iamr);
+ mtspr(SPRN_PSPB, vcpu->arch.pspb);
+ mtspr(SPRN_FSCR, vcpu->arch.fscr);
+ mtspr(SPRN_TAR, vcpu->arch.tar);
+ mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+ mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+ mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ mtspr(SPRN_WORT, vcpu->arch.wort);
+ mtspr(SPRN_TIDR, vcpu->arch.tid);
+ mtspr(SPRN_AMR, vcpu->arch.amr);
+ mtspr(SPRN_UAMOR, vcpu->arch.uamor);
/*
- * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
- * so set HDICE before writing HDEC.
+ * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
+ * clear (or hstate set appropriately to catch those registers
+ * being clobbered if we take a MCE or SRESET), so those are done
+ * later.
*/
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
- isync();
-
- hdec = time_limit - mftb();
- if (hdec < 0) {
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
- isync();
- return BOOK3S_INTERRUPT_HV_DECREMENTER;
- }
- mtspr(SPRN_HDEC, hdec);
-
- if (vc->tb_offset) {
- u64 new_tb = mftb() + vc->tb_offset;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = vc->tb_offset;
- }
-
- if (vc->pcr)
- mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
- mtspr(SPRN_DPDES, vc->dpdes);
- mtspr(SPRN_VTB, vc->vtb);
-
- local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
- local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, vcpu->arch.purr);
- mtspr(SPRN_SPURR, vcpu->arch.spurr);
-
- if (dawr_enabled()) {
- mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
- mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
- mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
- }
- }
- mtspr(SPRN_CIABR, vcpu->arch.ciabr);
- mtspr(SPRN_IC, vcpu->arch.ic);
- mtspr(SPRN_PID, vcpu->arch.pid);
- mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
-
- mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
-
- mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
- mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
- mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
- mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
-
- mtspr(SPRN_AMOR, ~0UL);
-
- mtspr(SPRN_LPCR, lpcr);
- isync();
-
- kvmppc_xive_push_vcpu(vcpu);
-
- mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
- mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
-
- trap = __kvmhv_vcpu_entry_p9(vcpu);
-
- /* Advance host PURR/SPURR by the amount used by guest */
- purr = mfspr(SPRN_PURR);
- spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
- purr - vcpu->arch.purr);
- mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
- spurr - vcpu->arch.spurr);
- vcpu->arch.purr = purr;
- vcpu->arch.spurr = spurr;
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+}
- vcpu->arch.ic = mfspr(SPRN_IC);
- vcpu->arch.pid = mfspr(SPRN_PID);
- vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+static void store_spr_state(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
- vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
- vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
- vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
- vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+ vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ vcpu->arch.pspb = mfspr(SPRN_PSPB);
+ vcpu->arch.fscr = mfspr(SPRN_FSCR);
+ vcpu->arch.tar = mfspr(SPRN_TAR);
+ vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ vcpu->arch.wort = mfspr(SPRN_WORT);
+ vcpu->arch.tid = mfspr(SPRN_TIDR);
+ vcpu->arch.amr = mfspr(SPRN_AMR);
+ vcpu->arch.uamor = mfspr(SPRN_UAMOR);
+ vcpu->arch.dscr = mfspr(SPRN_DSCR);
+}
- /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
- mtspr(SPRN_PSSCR, host_psscr |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
- mtspr(SPRN_HFSCR, host_hfscr);
- mtspr(SPRN_CIABR, host_ciabr);
- mtspr(SPRN_DAWR0, host_dawr0);
- mtspr(SPRN_DAWRX0, host_dawrx0);
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- mtspr(SPRN_DAWR1, host_dawr1);
- mtspr(SPRN_DAWRX1, host_dawrx1);
- }
- mtspr(SPRN_PID, host_pidr);
+/*
+ * Privileged (non-hypervisor) host registers to save.
+ */
+struct p9_host_os_sprs {
+ unsigned long dscr;
+ unsigned long tidr;
+ unsigned long iamr;
+ unsigned long amr;
+ unsigned long fscr;
+};
- /*
- * Since this is radix, do a eieio; tlbsync; ptesync sequence in
- * case we interrupted the guest between a tlbie and a ptesync.
- */
- asm volatile("eieio; tlbsync; ptesync");
+static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
+{
+ host_os_sprs->dscr = mfspr(SPRN_DSCR);
+ host_os_sprs->tidr = mfspr(SPRN_TIDR);
+ host_os_sprs->iamr = mfspr(SPRN_IAMR);
+ host_os_sprs->amr = mfspr(SPRN_AMR);
+ host_os_sprs->fscr = mfspr(SPRN_FSCR);
+}
- /*
- * cp_abort is required if the processor supports local copy-paste
- * to clear the copy buffer that was under control of the guest.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- asm volatile(PPC_CP_ABORT);
+/* vcpu guest regs must already be saved */
+static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ mtspr(SPRN_PSPB, 0);
+ mtspr(SPRN_WORT, 0);
+ mtspr(SPRN_UAMOR, 0);
- mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
- isync();
+ mtspr(SPRN_DSCR, host_os_sprs->dscr);
+ mtspr(SPRN_TIDR, host_os_sprs->tidr);
+ mtspr(SPRN_IAMR, host_os_sprs->iamr);
- vc->dpdes = mfspr(SPRN_DPDES);
- vc->vtb = mfspr(SPRN_VTB);
- mtspr(SPRN_DPDES, 0);
- if (vc->pcr)
- mtspr(SPRN_PCR, PCR_MASK);
+ if (host_os_sprs->amr != vcpu->arch.amr)
+ mtspr(SPRN_AMR, host_os_sprs->amr);
- if (vc->tb_offset_applied) {
- u64 new_tb = mftb() - vc->tb_offset_applied;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = 0;
- }
+ if (host_os_sprs->fscr != vcpu->arch.fscr)
+ mtspr(SPRN_FSCR, host_os_sprs->fscr);
- mtspr(SPRN_HDEC, 0x7fffffff);
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+ /* Save guest CTRL register, set runlatch to 1 */
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, 1);
+}
- return trap;
+static inline bool hcall_is_xics(unsigned long req)
+{
+ return req == H_EOI || req == H_CPPR || req == H_IPI ||
+ req == H_IPOLL || req == H_XIRR || req == H_XIRR_X;
}
/*
- * Virtual-mode guest entry for POWER9 and later when the host and
- * guest are both using the radix MMU. The LPIDR has already been set.
+ * Guest entry for POWER9 and later CPUs.
*/
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
- unsigned long host_dscr = mfspr(SPRN_DSCR);
- unsigned long host_tidr = mfspr(SPRN_TIDR);
- unsigned long host_iamr = mfspr(SPRN_IAMR);
- unsigned long host_amr = mfspr(SPRN_AMR);
- unsigned long host_fscr = mfspr(SPRN_FSCR);
+ struct p9_host_os_sprs host_os_sprs;
s64 dec;
u64 tb;
int trap, save_pmu;
+ WARN_ON_ONCE(vcpu->arch.ceded);
+
dec = mfspr(SPRN_DEC);
tb = mftb();
if (dec < 0)
@@ -3664,7 +3830,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
if (local_paca->kvm_hstate.dec_expires < time_limit)
time_limit = local_paca->kvm_hstate.dec_expires;
- vcpu->arch.ceded = 0;
+ save_p9_host_os_sprs(&host_os_sprs);
kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */
@@ -3693,24 +3859,20 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
#endif
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
- mtspr(SPRN_DSCR, vcpu->arch.dscr);
- mtspr(SPRN_IAMR, vcpu->arch.iamr);
- mtspr(SPRN_PSPB, vcpu->arch.pspb);
- mtspr(SPRN_FSCR, vcpu->arch.fscr);
- mtspr(SPRN_TAR, vcpu->arch.tar);
- mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
- mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
- mtspr(SPRN_BESCR, vcpu->arch.bescr);
- mtspr(SPRN_WORT, vcpu->arch.wort);
- mtspr(SPRN_TIDR, vcpu->arch.tid);
- mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
- mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
- mtspr(SPRN_AMR, vcpu->arch.amr);
- mtspr(SPRN_UAMOR, vcpu->arch.uamor);
-
- if (!(vcpu->arch.ctrl & 1))
- mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+ load_spr_state(vcpu);
+ /*
+ * When setting DEC, we must always deal with irq_work_raise via NMI vs
+ * setting DEC. The problem occurs right as we switch into guest mode
+ * if a NMI hits and sets pending work and sets DEC, then that will
+ * apply to the guest and not bring us back to the host.
+ *
+ * irq_work_raise could check a flag (or possibly LPCR[HDICE] for
+ * example) and set HDEC to 1? That wouldn't solve the nested hv
+ * case which needs to abort the hcall or zero the time limit.
+ *
+ * XXX: Another day's problem.
+ */
mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
if (kvmhv_on_pseries()) {
@@ -3718,7 +3880,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
* We need to save and restore the guest visible part of the
* psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
* doesn't do this for us. Note only required if pseries since
- * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
+ * this is done in kvmhv_vcpu_entry_p9() below otherwise.
*/
unsigned long host_psscr;
/* call our hypervisor to load up HV regs and go */
@@ -3738,6 +3900,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
hvregs.vcpu_token = vcpu->vcpu_id;
}
hvregs.hdec_expiry = time_limit;
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
__pa(&vcpu->arch.regs));
kvmhv_restore_hv_return_state(vcpu, &hvregs);
@@ -3750,15 +3914,41 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
- kvmppc_nested_cede(vcpu);
+ kvmppc_cede(vcpu);
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
} else {
- trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
+ kvmppc_xive_push_vcpu(vcpu);
+ trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr);
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+ !(vcpu->arch.shregs.msr & MSR_PR)) {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+ /* H_CEDE has to be handled now, not later */
+ if (req == H_CEDE) {
+ kvmppc_cede(vcpu);
+ kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */
+ kvmppc_set_gpr(vcpu, 3, 0);
+ trap = 0;
+
+ /* XICS hcalls must be handled before xive is pulled */
+ } else if (hcall_is_xics(req)) {
+ int ret;
+
+ ret = kvmppc_xive_xics_hcall(vcpu, req);
+ if (ret != H_TOO_HARD) {
+ kvmppc_set_gpr(vcpu, 3, ret);
+ trap = 0;
+ }
+ }
+ }
+ kvmppc_xive_pull_vcpu(vcpu);
+
+ if (kvm_is_radix(vcpu->kvm))
+ vcpu->arch.slb_max = 0;
}
- vcpu->arch.slb_max = 0;
dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
@@ -3766,36 +3956,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.dec_expires = dec + tb;
vcpu->cpu = -1;
vcpu->arch.thread_cpu = -1;
- /* Save guest CTRL register, set runlatch to 1 */
- vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
- if (!(vcpu->arch.ctrl & 1))
- mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
-
- vcpu->arch.iamr = mfspr(SPRN_IAMR);
- vcpu->arch.pspb = mfspr(SPRN_PSPB);
- vcpu->arch.fscr = mfspr(SPRN_FSCR);
- vcpu->arch.tar = mfspr(SPRN_TAR);
- vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
- vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
- vcpu->arch.bescr = mfspr(SPRN_BESCR);
- vcpu->arch.wort = mfspr(SPRN_WORT);
- vcpu->arch.tid = mfspr(SPRN_TIDR);
- vcpu->arch.amr = mfspr(SPRN_AMR);
- vcpu->arch.uamor = mfspr(SPRN_UAMOR);
- vcpu->arch.dscr = mfspr(SPRN_DSCR);
- mtspr(SPRN_PSPB, 0);
- mtspr(SPRN_WORT, 0);
- mtspr(SPRN_UAMOR, 0);
- mtspr(SPRN_DSCR, host_dscr);
- mtspr(SPRN_TIDR, host_tidr);
- mtspr(SPRN_IAMR, host_iamr);
+ store_spr_state(vcpu);
- if (host_amr != vcpu->arch.amr)
- mtspr(SPRN_AMR, host_amr);
-
- if (host_fscr != vcpu->arch.fscr)
- mtspr(SPRN_FSCR, host_fscr);
+ restore_p9_host_os_sprs(vcpu, &host_os_sprs);
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp);
@@ -3825,6 +3989,9 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vc->in_guest = 0;
mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
+ /* We may have raced with new irq work */
+ if (test_irq_work_pending())
+ set_dec(1);
mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
kvmhv_load_host_pmu();
@@ -3925,7 +4092,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
cur = start_poll = ktime_get();
if (vc->halt_poll_ns) {
ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
- ++vc->runner->stat.halt_attempted_poll;
+ ++vc->runner->stat.generic.halt_attempted_poll;
vc->vcore_state = VCORE_POLLING;
spin_unlock(&vc->lock);
@@ -3936,13 +4103,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
break;
}
cur = ktime_get();
- } while (single_task_running() && ktime_before(cur, stop));
+ } while (kvm_vcpu_can_poll(cur, stop));
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
if (!do_sleep) {
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.generic.halt_successful_poll;
goto out;
}
}
@@ -3954,7 +4121,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
do_sleep = 0;
/* If we polled, count this as a successful poll */
if (vc->halt_poll_ns)
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.generic.halt_successful_poll;
goto out;
}
@@ -3981,13 +4148,13 @@ out:
ktime_to_ns(cur) - ktime_to_ns(start_wait);
/* Attribute failed poll time */
if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_fail_ns +=
+ vc->runner->stat.generic.halt_poll_fail_ns +=
ktime_to_ns(start_wait) -
ktime_to_ns(start_poll);
} else {
/* Attribute successful poll time */
if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_success_ns +=
+ vc->runner->stat.generic.halt_poll_success_ns +=
ktime_to_ns(cur) -
ktime_to_ns(start_poll);
}
@@ -4014,7 +4181,6 @@ out:
/*
* This never fails for a radix guest, as none of the operations it does
* for a radix guest can fail or have a way to report failure.
- * kvmhv_run_single_vcpu() relies on this fact.
*/
static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
{
@@ -4170,7 +4336,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
{
struct kvm_run *run = vcpu->run;
int trap, r, pcpu;
- int srcu_idx, lpid;
+ int srcu_idx;
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
@@ -4193,8 +4359,15 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc->runner = vcpu;
/* See if the MMU is ready to go */
- if (!kvm->arch.mmu_ready)
- kvmhv_setup_mmu(vcpu);
+ if (!kvm->arch.mmu_ready) {
+ r = kvmhv_setup_mmu(vcpu);
+ if (r) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ vcpu->arch.ret = r;
+ return r;
+ }
+ }
if (need_resched())
cond_resched();
@@ -4207,7 +4380,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
preempt_disable();
pcpu = smp_processor_id();
vc->pcpu = pcpu;
- kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+ if (kvm_is_radix(kvm))
+ kvmppc_prepare_radix_vcpu(vcpu, pcpu);
local_irq_disable();
hard_irq_disable();
@@ -4244,13 +4418,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc->vcore_state = VCORE_RUNNING;
trace_kvmppc_run_core(vc, 0);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
- mtspr(SPRN_LPID, lpid);
- isync();
- kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
- }
-
guest_enter_irqoff();
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -4269,11 +4436,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
srcu_read_unlock(&kvm->srcu, srcu_idx);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- mtspr(SPRN_LPID, kvm->arch.host_lpid);
- isync();
- }
-
set_irq_happened(trap);
kvmppc_set_host_core(pcpu);
@@ -4419,19 +4581,23 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
- /*
- * The TLB prefetch bug fixup is only in the kvmppc_run_vcpu
- * path, which also handles hash and dependent threads mode.
- */
- if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
- !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
r = kvmppc_run_vcpu(vcpu);
- if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
- !(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
+ if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
+ /*
+ * These should have been caught reflected
+ * into the guest by now. Final sanity check:
+ * don't allow userspace to execute hcalls in
+ * the hypervisor.
+ */
+ r = RESUME_GUEST;
+ continue;
+ }
trace_kvm_hcall_enter(vcpu);
r = kvmppc_pseries_do_hcall(vcpu);
trace_kvm_hcall_exit(vcpu, r);
@@ -4455,7 +4621,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
- mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);
@@ -4759,8 +4924,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Look up the VMA for the start of this memory slot */
hva = memslot->userspace_addr;
mmap_read_lock(kvm->mm);
- vma = find_vma(kvm->mm, hva);
- if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
+ vma = vma_lookup(kvm->mm, hva);
+ if (!vma || (vma->vm_flags & VM_IO))
goto up_out;
psize = vma_kernel_pagesize(vma);
@@ -5039,18 +5204,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
- * On POWER9, we only need to do this if the "indep_threads_mode"
- * module parameter has been set to N.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
- pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
- kvm->arch.threads_indep = true;
- } else {
- kvm->arch.threads_indep = indep_threads_mode;
- }
- }
- if (!kvm->arch.threads_indep)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
kvm_hv_vm_activated();
/*
@@ -5091,7 +5246,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
{
debugfs_remove_recursive(kvm->arch.debugfs_dir);
- if (!kvm->arch.threads_indep)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
kvm_hv_vm_deactivated();
kvmppc_free_vcores(kvm);
@@ -5512,7 +5667,9 @@ static int kvmhv_enable_nested(struct kvm *kvm)
{
if (!nested)
return -EPERM;
- if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+ if (!radix_enabled())
return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
@@ -5675,11 +5832,25 @@ static int kvmhv_enable_dawr1(struct kvm *kvm)
static bool kvmppc_hash_v3_possible(void)
{
- if (radix_enabled() && no_mixing_hpt_and_radix)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return false;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
return false;
- return cpu_has_feature(CPU_FTR_ARCH_300) &&
- cpu_has_feature(CPU_FTR_HVMODE);
+ /*
+ * POWER9 chips before version 2.02 can't have some threads in
+ * HPT mode and some in radix mode on the same core.
+ */
+ if (radix_enabled()) {
+ unsigned int pvr = mfspr(SPRN_PVR);
+ if ((pvr >> 16) == PVR_POWER9 &&
+ (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
+ ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
+ return false;
+ }
+
+ return true;
}
static struct kvmppc_ops kvm_ops_hv = {
@@ -5823,18 +5994,6 @@ static int kvmppc_book3s_init_hv(void)
if (kvmppc_radix_possible())
r = kvmppc_radix_init();
- /*
- * POWER9 chips before version 2.02 can't have some threads in
- * HPT mode and some in radix mode on the same core.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- unsigned int pvr = mfspr(SPRN_PVR);
- if ((pvr >> 16) == PVR_POWER9 &&
- (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
- ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
- no_mixing_hpt_and_radix = true;
- }
-
r = kvmppc_uvmem_init();
if (r < 0)
pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 7a0e33a9c980..be8ef1c5b1bf 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -35,21 +35,6 @@
#include "book3s_xive.h"
/*
- * The XIVE module will populate these when it loads
- */
-unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
-unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
-int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
-int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
-EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
-EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
-
-/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2.
*/
@@ -196,16 +181,9 @@ int kvmppc_hwrng_present(void)
}
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
-long kvmppc_h_random(struct kvm_vcpu *vcpu)
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
- int r;
-
- /* Only need to do the expensive mfmsr() on radix */
- if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
- r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
- else
- r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
- if (r)
+ if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]))
return H_SUCCESS;
return H_HARDWARE;
@@ -221,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu)
void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
- /* For a nested hypervisor, use the XICS via hcall */
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
- IPI_PRIORITY);
- return;
- }
-
/* On POWER9 we can use msgsnd for any destination cpu. */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
@@ -442,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again)
return 1;
/* Now read the interrupt from the ICP */
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
- xirr = cpu_to_be32(retbuf[0]);
- } else {
- xics_phys = local_paca->kvm_hstate.xics_phys;
- rc = 0;
- if (!xics_phys)
- rc = opal_int_get_xirr(&xirr, false);
- else
- xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
- }
+ xics_phys = local_paca->kvm_hstate.xics_phys;
+ rc = 0;
+ if (!xics_phys)
+ rc = opal_int_get_xirr(&xirr, false);
+ else
+ xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
if (rc < 0)
return 1;
@@ -483,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again)
*/
if (xisr == XICS_IPI) {
rc = 0;
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf,
- hard_smp_processor_id(), 0xff);
- plpar_hcall_raw(H_EOI, retbuf, h_xirr);
- } else if (xics_phys) {
+ if (xics_phys) {
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else {
@@ -515,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host,
* we need to resend that IPI, bummer
*/
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf,
- hard_smp_processor_id(),
- IPI_PRIORITY);
- } else if (xics_phys)
+ if (xics_phys)
__raw_rm_writeb(IPI_PRIORITY,
xics_phys + XICS_MFRR);
else
@@ -541,22 +491,13 @@ static long kvmppc_read_one_intr(bool *again)
}
#ifdef CONFIG_KVM_XICS
-static inline bool is_rm(void)
-{
- return !(mfmsr() & MSR_DR);
-}
-
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_xirr(vcpu);
- if (unlikely(!__xive_vm_h_xirr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_xirr(vcpu);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_xirr(vcpu);
+ else
return xics_rm_h_xirr(vcpu);
}
@@ -565,13 +506,9 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
vcpu->arch.regs.gpr[5] = get_tb();
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_xirr(vcpu);
- if (unlikely(!__xive_vm_h_xirr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_xirr(vcpu);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_xirr(vcpu);
+ else
return xics_rm_h_xirr(vcpu);
}
@@ -579,13 +516,9 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_ipoll(vcpu, server);
- if (unlikely(!__xive_vm_h_ipoll))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_ipoll(vcpu, server);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_ipoll(vcpu, server);
+ else
return H_TOO_HARD;
}
@@ -594,13 +527,9 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_ipi(vcpu, server, mfrr);
- if (unlikely(!__xive_vm_h_ipi))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_ipi(vcpu, server, mfrr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_ipi(vcpu, server, mfrr);
+ else
return xics_rm_h_ipi(vcpu, server, mfrr);
}
@@ -608,13 +537,9 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_cppr(vcpu, cppr);
- if (unlikely(!__xive_vm_h_cppr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_cppr(vcpu, cppr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_cppr(vcpu, cppr);
+ else
return xics_rm_h_cppr(vcpu, cppr);
}
@@ -622,13 +547,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_eoi(vcpu, xirr);
- if (unlikely(!__xive_vm_h_eoi))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_eoi(vcpu, xirr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_eoi(vcpu, xirr);
+ else
return xics_rm_h_eoi(vcpu, xirr);
}
#endif /* CONFIG_KVM_XICS */
@@ -800,7 +721,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
* Thus we make all 4 threads use the same bit.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu = cpu_first_thread_sibling(pcpu);
+ pcpu = cpu_first_tlb_thread_sibling(pcpu);
if (nested)
need_tlb_flush = &nested->need_tlb_flush;
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 327417d79eac..4444f83cb133 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -58,7 +58,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/*
* Put whatever is in the decrementer into the
* hypervisor decrementer.
- * Because of a hardware deviation in P8 and P9,
+ * Because of a hardware deviation in P8,
* we need to set LPCR[HDICE] before writing HDEC.
*/
ld r5, HSTATE_KVM_VCORE(r13)
@@ -67,15 +67,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ori r8, r9, LPCR_HDICE
mtspr SPRN_LPCR, r8
isync
- andis. r0, r9, LPCR_LD@h
mfspr r8,SPRN_DEC
mftb r7
-BEGIN_FTR_SECTION
- /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
- bne 32f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8
-32: mtspr SPRN_HDEC,r8
+ mtspr SPRN_HDEC,r8
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 60724f674421..8543ad538b0c 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -19,6 +19,7 @@
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
static struct patb_entry *pseries_partition_tb;
@@ -53,7 +54,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->dawrx1 = vcpu->arch.dawrx1;
}
-static void byteswap_pt_regs(struct pt_regs *regs)
+/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
+static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
{
unsigned long *addr = (unsigned long *) regs;
@@ -467,8 +469,15 @@ static void kvmhv_flush_lpid(unsigned int lpid)
return;
}
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
- lpid, TLBIEL_INVAL_SET_LPID);
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ else
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
+ H_RPTI_TYPE_PAT,
+ H_RPTI_PAGE_ALL, 0, -1UL);
if (rc)
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
@@ -1214,6 +1223,113 @@ long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
return H_SUCCESS;
}
+static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
+ unsigned long lpid, unsigned long ric)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+
+ gp = kvmhv_get_nested(kvm, lpid, false);
+ if (gp) {
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ kvmhv_put_nested(gp);
+ }
+ return H_SUCCESS;
+}
+
+/*
+ * Number of pages above which we invalidate the entire LPID rather than
+ * flush individual pages.
+ */
+static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
+
+static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
+ unsigned long lpid,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end)
+{
+ int ret = H_P4;
+ unsigned long addr, nr_pages;
+ struct mmu_psize_def *def;
+ unsigned long psize, ap, page_size;
+ bool flush_lpid;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ def = &mmu_psize_defs[psize];
+ if (!(pg_sizes & def->h_rpt_pgsize))
+ continue;
+
+ nr_pages = (end - start) >> def->shift;
+ flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
+ if (flush_lpid)
+ return do_tlb_invalidate_nested_all(vcpu, lpid,
+ RIC_FLUSH_TLB);
+ addr = start;
+ ap = mmu_get_ap(psize);
+ page_size = 1UL << def->shift;
+ do {
+ ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
+ get_epn(addr));
+ if (ret)
+ return H_P4;
+ addr += page_size;
+ } while (addr < end);
+ }
+ return ret;
+}
+
+/*
+ * Performs partition-scoped invalidations for nested guests
+ * as part of H_RPT_INVALIDATE hcall.
+ */
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
+{
+ /*
+ * If L2 lpid isn't valid, we need to return H_PARAMETER.
+ *
+ * However, nested KVM issues a L2 lpid flush call when creating
+ * partition table entries for L2. This happens even before the
+ * corresponding shadow lpid is created in HV which happens in
+ * H_ENTER_NESTED call. Since we can't differentiate this case from
+ * the invalid case, we ignore such flush requests and return success.
+ */
+ if (!kvmhv_find_nested(vcpu->kvm, lpid))
+ return H_SUCCESS;
+
+ /*
+ * A flush all request can be handled by a full lpid flush only.
+ */
+ if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
+ return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
+
+ /*
+ * We don't need to handle a PWC flush like process table here,
+ * because intermediate partition scoped table in nested guest doesn't
+ * really have PWC. Only level we have PWC is in L0 and for nested
+ * invalidate at L0 we always do kvm_flush_lpid() which does
+ * radix__flush_all_lpid(). For range invalidate at any level, we
+ * are not removing the higher level page tables and hence there is
+ * no PWC invalidate needed.
+ *
+ * if (type & H_RPTI_TYPE_PWC) {
+ * ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
+ * if (ret)
+ * return H_P4;
+ * }
+ */
+
+ if (start == 0 && end == -1)
+ return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
+
+ if (type & H_RPTI_TYPE_TLB)
+ return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
+ start, end);
+ return H_SUCCESS;
+}
+
/* Used to convert a nested guest real address to a L1 guest real address */
static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp,
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
new file mode 100644
index 000000000000..83f592eadcd2
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <asm/asm-prototypes.h>
+#include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+static void __start_timing(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 tb = mftb() - vc->tb_offset_applied;
+
+ vcpu->arch.cur_activity = next;
+ vcpu->arch.cur_tb_start = tb;
+}
+
+static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ struct kvmhv_tb_accumulator *curr;
+ u64 tb = mftb() - vc->tb_offset_applied;
+ u64 prev_tb;
+ u64 delta;
+ u64 seq;
+
+ curr = vcpu->arch.cur_activity;
+ vcpu->arch.cur_activity = next;
+ prev_tb = vcpu->arch.cur_tb_start;
+ vcpu->arch.cur_tb_start = tb;
+
+ if (!curr)
+ return;
+
+ delta = tb - prev_tb;
+
+ seq = curr->seqcount;
+ curr->seqcount = seq + 1;
+ smp_wmb();
+ curr->tb_total += delta;
+ if (seq == 0 || delta < curr->tb_min)
+ curr->tb_min = delta;
+ if (delta > curr->tb_max)
+ curr->tb_max = delta;
+ smp_wmb();
+ curr->seqcount = seq + 2;
+}
+
+#define start_timing(vcpu, next) __start_timing(vcpu, next)
+#define end_timing(vcpu) __start_timing(vcpu, NULL)
+#define accumulate_time(vcpu, next) __accumulate_time(vcpu, next)
+#else
+#define start_timing(vcpu, next) do {} while (0)
+#define end_timing(vcpu) do {} while (0)
+#define accumulate_time(vcpu, next) do {} while (0)
+#endif
+
+static inline void mfslb(unsigned int idx, u64 *slbee, u64 *slbev)
+{
+ asm volatile("slbmfev %0,%1" : "=r" (*slbev) : "r" (idx));
+ asm volatile("slbmfee %0,%1" : "=r" (*slbee) : "r" (idx));
+}
+
+static inline void mtslb(u64 slbee, u64 slbev)
+{
+ asm volatile("slbmte %0,%1" :: "r" (slbev), "r" (slbee));
+}
+
+static inline void clear_slb_entry(unsigned int idx)
+{
+ mtslb(idx, 0);
+}
+
+static inline void slb_clear_invalidate_partition(void)
+{
+ clear_slb_entry(0);
+ asm volatile(PPC_SLBIA(6));
+}
+
+/*
+ * Malicious or buggy radix guests may have inserted SLB entries
+ * (only 0..3 because radix always runs with UPRT=1), so these must
+ * be cleared here to avoid side-channels. slbmte is used rather
+ * than slbia, as it won't clear cached translations.
+ */
+static void radix_clear_slb(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ clear_slb_entry(i);
+}
+
+static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ u32 lpid;
+
+ lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+
+ /*
+ * All the isync()s are overkill but trivially follow the ISA
+ * requirements. Some can likely be replaced with justification
+ * comment for why they are not needed.
+ */
+ isync();
+ mtspr(SPRN_LPID, lpid);
+ isync();
+ mtspr(SPRN_LPCR, lpcr);
+ isync();
+ mtspr(SPRN_PID, vcpu->arch.pid);
+ isync();
+}
+
+static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+ u32 lpid;
+ int i;
+
+ lpid = kvm->arch.lpid;
+
+ mtspr(SPRN_LPID, lpid);
+ mtspr(SPRN_LPCR, lpcr);
+ mtspr(SPRN_PID, vcpu->arch.pid);
+
+ for (i = 0; i < vcpu->arch.slb_max; i++)
+ mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
+
+ isync();
+}
+
+static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
+{
+ isync();
+ mtspr(SPRN_PID, pid);
+ isync();
+ mtspr(SPRN_LPID, kvm->arch.host_lpid);
+ isync();
+ mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
+ isync();
+
+ if (!radix_enabled())
+ slb_restore_bolted_realmode();
+}
+
+static void save_clear_host_mmu(struct kvm *kvm)
+{
+ if (!radix_enabled()) {
+ /*
+ * Hash host could save and restore host SLB entries to
+ * reduce SLB fault overheads of VM exits, but for now the
+ * existing code clears all entries and restores just the
+ * bolted ones when switching back to host.
+ */
+ slb_clear_invalidate_partition();
+ }
+}
+
+static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
+{
+ if (kvm_is_radix(kvm)) {
+ radix_clear_slb();
+ } else {
+ int i;
+ int nr = 0;
+
+ /*
+ * This must run before switching to host (radix host can't
+ * access all SLBs).
+ */
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ u64 slbee, slbev;
+ mfslb(i, &slbee, &slbev);
+ if (slbee & SLB_ESID_V) {
+ vcpu->arch.slb[nr].orige = slbee | i;
+ vcpu->arch.slb[nr].origv = slbev;
+ nr++;
+ }
+ }
+ vcpu->arch.slb_max = nr;
+ slb_clear_invalidate_partition();
+ }
+}
+
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ s64 hdec;
+ u64 tb, purr, spurr;
+ u64 *exsave;
+ bool ri_set;
+ int trap;
+ unsigned long msr;
+ unsigned long host_hfscr;
+ unsigned long host_ciabr;
+ unsigned long host_dawr0;
+ unsigned long host_dawrx0;
+ unsigned long host_psscr;
+ unsigned long host_pidr;
+ unsigned long host_dawr1;
+ unsigned long host_dawrx1;
+
+ hdec = time_limit - mftb();
+ if (hdec < 0)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+
+ WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
+ WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
+
+ start_timing(vcpu, &vcpu->arch.rm_entry);
+
+ vcpu->arch.ceded = 0;
+
+ if (vc->tb_offset) {
+ u64 new_tb = mftb() + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ msr = mfmsr();
+
+ host_hfscr = mfspr(SPRN_HFSCR);
+ host_ciabr = mfspr(SPRN_CIABR);
+ host_dawr0 = mfspr(SPRN_DAWR0);
+ host_dawrx0 = mfspr(SPRN_DAWRX0);
+ host_psscr = mfspr(SPRN_PSSCR);
+ host_pidr = mfspr(SPRN_PID);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ host_dawr1 = mfspr(SPRN_DAWR1);
+ host_dawrx1 = mfspr(SPRN_DAWRX1);
+ }
+
+ if (vc->pcr)
+ mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
+ mtspr(SPRN_DPDES, vc->dpdes);
+ mtspr(SPRN_VTB, vc->vtb);
+
+ local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
+ local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, vcpu->arch.purr);
+ mtspr(SPRN_SPURR, vcpu->arch.spurr);
+
+ if (dawr_enabled()) {
+ mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
+ mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
+ mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
+ }
+ }
+ mtspr(SPRN_CIABR, vcpu->arch.ciabr);
+ mtspr(SPRN_IC, vcpu->arch.ic);
+
+ mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+
+ mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
+
+ mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+ mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
+
+ /*
+ * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
+ * Interrupt (HDSI) the HDSISR is not be updated at all.
+ *
+ * To work around this we put a canary value into the HDSISR before
+ * returning to a guest and then check for this canary when we take a
+ * HDSI. If we find the canary on a HDSI, we know the hardware didn't
+ * update the HDSISR. In this case we return to the guest to retake the
+ * HDSI which should correctly update the HDSISR the second time HDSI
+ * entry.
+ *
+ * Just do this on all p9 processors for now.
+ */
+ mtspr(SPRN_HDSISR, HDSISR_CANARY);
+
+ mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
+ mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
+ mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
+ mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
+
+ mtspr(SPRN_AMOR, ~0UL);
+
+ local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
+
+ /*
+ * Hash host, hash guest, or radix guest with prefetch bug, all have
+ * to disable the MMU before switching to guest MMU state.
+ */
+ if (!radix_enabled() || !kvm_is_radix(kvm) ||
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
+
+ save_clear_host_mmu(kvm);
+
+ if (kvm_is_radix(kvm)) {
+ switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
+ if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ __mtmsrd(0, 1); /* clear RI */
+
+ } else {
+ switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
+ }
+
+ /* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
+ kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
+
+ /*
+ * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
+ * so set guest LPCR (with HDICE) before writing HDEC.
+ */
+ mtspr(SPRN_HDEC, hdec);
+
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
+ mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
+
+ accumulate_time(vcpu, &vcpu->arch.guest_time);
+
+ kvmppc_p9_enter_guest(vcpu);
+
+ accumulate_time(vcpu, &vcpu->arch.rm_intr);
+
+ /* XXX: Could get these from r11/12 and paca exsave instead */
+ vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
+ vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+
+ /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
+ trap = local_paca->kvm_hstate.scratch0 & ~0x2;
+
+ /* HSRR interrupts leave MSR[RI] unchanged, SRR interrupts clear it. */
+ ri_set = false;
+ if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+ if (trap != BOOK3S_INTERRUPT_SYSCALL &&
+ (vcpu->arch.shregs.msr & MSR_RI))
+ ri_set = true;
+ exsave = local_paca->exgen;
+ } else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
+ exsave = local_paca->exnmi;
+ } else { /* trap == 0x200 */
+ exsave = local_paca->exmc;
+ }
+
+ vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
+ vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
+
+ /*
+ * Only set RI after reading machine check regs (DAR, DSISR, SRR0/1)
+ * and hstate scratch (which we need to move into exsave to make
+ * re-entrant vs SRESET/MCE)
+ */
+ if (ri_set) {
+ if (unlikely(!(mfmsr() & MSR_RI))) {
+ __mtmsrd(MSR_RI, 1);
+ WARN_ON_ONCE(1);
+ }
+ } else {
+ WARN_ON_ONCE(mfmsr() & MSR_RI);
+ __mtmsrd(MSR_RI, 1);
+ }
+
+ vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
+ vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
+ vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
+ vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
+ vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
+ vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
+ vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
+ vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
+
+ vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
+
+ if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+ vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+ vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+ kvmppc_realmode_machine_check(vcpu);
+
+ } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
+ kvmppc_realmode_hmi_handler();
+
+ } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
+ vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
+ vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+ vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+ vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
+ vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_FAC_UNAVAIL) {
+ vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /*
+ * Softpatch interrupt for transactional memory emulation cases
+ * on POWER9 DD2.2. This is early in the guest exit path - we
+ * haven't saved registers or done a treclaim yet.
+ */
+ } else if (trap == BOOK3S_INTERRUPT_HV_SOFTPATCH) {
+ vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+ /*
+ * The cases we want to handle here are those where the guest
+ * is in real suspend mode and is trying to transition to
+ * transactional mode.
+ */
+ if (local_paca->kvm_hstate.fake_suspend &&
+ (vcpu->arch.shregs.msr & MSR_TS_S)) {
+ if (kvmhv_p9_tm_emulation_early(vcpu)) {
+ /* Prevent it being handled again. */
+ trap = 0;
+ }
+ }
+#endif
+ }
+
+ accumulate_time(vcpu, &vcpu->arch.rm_exit);
+
+ /* Advance host PURR/SPURR by the amount used by guest */
+ purr = mfspr(SPRN_PURR);
+ spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
+ purr - vcpu->arch.purr);
+ mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
+ spurr - vcpu->arch.spurr);
+ vcpu->arch.purr = purr;
+ vcpu->arch.spurr = spurr;
+
+ vcpu->arch.ic = mfspr(SPRN_IC);
+ vcpu->arch.pid = mfspr(SPRN_PID);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+
+ vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
+ vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
+ vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
+ vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+
+ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
+ mtspr(SPRN_PSSCR, host_psscr |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+ mtspr(SPRN_HFSCR, host_hfscr);
+ mtspr(SPRN_CIABR, host_ciabr);
+ mtspr(SPRN_DAWR0, host_dawr0);
+ mtspr(SPRN_DAWRX0, host_dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ mtspr(SPRN_DAWR1, host_dawr1);
+ mtspr(SPRN_DAWRX1, host_dawrx1);
+ }
+
+ if (kvm_is_radix(kvm)) {
+ /*
+ * Since this is radix, do a eieio; tlbsync; ptesync sequence
+ * in case we interrupted the guest between a tlbie and a
+ * ptesync.
+ */
+ asm volatile("eieio; tlbsync; ptesync");
+ }
+
+ /*
+ * cp_abort is required if the processor supports local copy-paste
+ * to clear the copy buffer that was under control of the guest.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ asm volatile(PPC_CP_ABORT);
+
+ vc->dpdes = mfspr(SPRN_DPDES);
+ vc->vtb = mfspr(SPRN_VTB);
+ mtspr(SPRN_DPDES, 0);
+ if (vc->pcr)
+ mtspr(SPRN_PCR, PCR_MASK);
+
+ if (vc->tb_offset_applied) {
+ u64 new_tb = mftb() - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = 0;
+ }
+
+ mtspr(SPRN_HDEC, 0x7fffffff);
+
+ save_clear_guest_mmu(kvm, vcpu);
+ switch_mmu_to_host(kvm, host_pidr);
+ local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
+
+ /*
+ * If we are in real mode, only switch MMU on after the MMU is
+ * switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
+ */
+ __mtmsrd(msr, 0);
+
+ end_timing(vcpu);
+
+ return trap;
+}
+EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 7af7c70f1468..632b2545072b 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -23,20 +23,9 @@
#include <asm/pte-walk.h>
/* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
{
- unsigned long addr = (unsigned long) x;
- pte_t *p;
- /*
- * assume we don't have huge pages in vmalloc space...
- * So don't worry about THP collapse/split. Called
- * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
- */
- p = find_init_mm_pte(addr, NULL);
- if (!p || !pte_present(*p))
- return NULL;
- addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
- return __va(addr);
+ return __va(ppc_find_vmap_phys((unsigned long)addr));
}
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
@@ -57,6 +46,10 @@ static int global_invalidates(struct kvm *kvm)
else
global = 1;
+ /* LPID has been switched to host if in virt mode so can't do local */
+ if (!global && (mfmsr() & (MSR_IR|MSR_DR)))
+ global = 1;
+
if (!global) {
/* any other core might now have stale TLB entries... */
smp_wmb();
@@ -67,7 +60,7 @@ static int global_invalidates(struct kvm *kvm)
* so use the bit for the first thread to represent the core.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
- cpu = cpu_first_thread_sibling(cpu);
+ cpu = cpu_first_tlb_thread_sibling(cpu);
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
}
@@ -409,6 +402,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_enter);
#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
@@ -553,6 +547,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_remove);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
@@ -671,6 +666,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn)
@@ -741,6 +737,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_protect);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -781,6 +778,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
}
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_read);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -829,6 +827,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -876,6 +875,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
unsigned long gpa, int writing, unsigned long *hpa,
@@ -1294,3 +1294,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return -1; /* send fault up to host kernel mode */
}
+EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index c2c9c733f359..0a11ec88a0ae 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
return;
}
- if (xive_enabled() && kvmhv_on_pseries()) {
- /* No XICS access or hypercalls available, too hard */
- this_icp->rm_action |= XICS_RM_KICK_VCPU;
- this_icp->rm_kick_target = vcpu;
- return;
- }
-
/*
* Check if the core is loaded,
* if not, find an available host core to post to wake the VCPU,
@@ -771,14 +764,6 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
void __iomem *xics_phys;
int64_t rc;
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- iosync();
- plpar_hcall_raw(H_EOI, retbuf, hwirq);
- return;
- }
-
rc = pnv_opal_pci_msi_eoi(c, hwirq);
if (rc)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5e634db4809b..8dd437d7a2c6 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -25,18 +25,10 @@
#include <asm/export.h>
#include <asm/tm.h>
#include <asm/opal.h>
-#include <asm/xive-regs.h>
#include <asm/thread_info.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#include <asm/cpuidle.h>
-#include <asm/ultravisor-api.h>
-
-/* Sign-extend HDEC if not on POWER9 */
-#define EXTEND_HDEC(reg) \
-BEGIN_FTR_SECTION; \
- extsw reg, reg; \
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1
@@ -44,9 +36,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define NAPPING_UNSPLIT 3
/* Stack frame offsets for kvmppc_hv_entry */
-#define SFS 208
+#define SFS 160
#define STACK_SLOT_TRAP (SFS-4)
-#define STACK_SLOT_SHORT_PATH (SFS-8)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
@@ -57,10 +48,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_HFSCR (SFS-72)
#define STACK_SLOT_AMR (SFS-80)
#define STACK_SLOT_UAMOR (SFS-88)
-#define STACK_SLOT_DAWR1 (SFS-96)
-#define STACK_SLOT_DAWRX1 (SFS-104)
-/* the following is used by the P9 short path */
-#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
+#define STACK_SLOT_FSCR (SFS-96)
/*
* Call kvmppc_hv_entry in real mode.
@@ -136,15 +124,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Return the trap number on this thread as the return value */
mr r3, r12
- /*
- * If we came back from the guest via a relocation-on interrupt,
- * we will be in virtual mode at this point, which makes it a
- * little easier to get back to the caller.
- */
- mfmsr r0
- andi. r0, r0, MSR_IR /* in real mode? */
- bne .Lvirt_return
-
/* RFI into the highmem handler */
mfmsr r6
li r0, MSR_RI
@@ -154,11 +133,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtsrr1 r7
RFI_TO_KERNEL
- /* Virtual-mode return */
-.Lvirt_return:
- mtlr r8
- blr
-
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
@@ -245,7 +219,7 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC
- EXTEND_HDEC(r0)
+ extsw r0, r0
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpdi r0, 0
blt kvm_novcpu_exit
@@ -347,10 +321,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0
bne 63f
- LOAD_REG_ADDR(r6, decrementer_max)
- ld r6, 0(r6)
+ lis r6,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC, r6
-BEGIN_FTR_SECTION
/* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13)
cmpdi r6, 0
@@ -362,7 +334,6 @@ BEGIN_FTR_SECTION
ld r0, KVM_SPLIT_LDBAR(r6)
mtspr SPRN_LDBAR, r0
isync
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
63:
/* Order load of vcpu after load of vcore */
lwsync
@@ -433,7 +404,6 @@ kvm_no_guest:
blr
53:
-BEGIN_FTR_SECTION
HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13)
cmpdi r5, 0
@@ -448,14 +418,6 @@ BEGIN_FTR_SECTION
b kvm_unsplit_nap
60: HMT_MEDIUM
b kvm_secondary_got_guest
-FTR_SECTION_ELSE
- HMT_LOW
- ld r5, HSTATE_KVM_VCORE(r13)
- cmpdi r5, 0
- beq kvm_no_guest
- HMT_MEDIUM
- b kvm_secondary_got_guest
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
54: li r0, KVM_HWTHREAD_IN_KVM
stb r0, HSTATE_HWTHREAD_STATE(r13)
@@ -581,13 +543,11 @@ kvmppc_hv_entry:
bne 10f
lwz r7,KVM_LPID(r9)
-BEGIN_FTR_SECTION
ld r6,KVM_SDR1(r9)
li r0,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r0
ptesync
mtspr SPRN_SDR1,r6 /* switch to partition page table */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -668,16 +628,6 @@ kvmppc_got_guest:
/* Save host values of some registers */
BEGIN_FTR_SECTION
- mfspr r5, SPRN_TIDR
- mfspr r6, SPRN_PSSCR
- mfspr r7, SPRN_PID
- std r5, STACK_SLOT_TID(r1)
- std r6, STACK_SLOT_PSSCR(r1)
- std r7, STACK_SLOT_PID(r1)
- mfspr r5, SPRN_HFSCR
- std r5, STACK_SLOT_HFSCR(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
mfspr r6, SPRN_DAWR0
mfspr r7, SPRN_DAWRX0
@@ -686,13 +636,9 @@ BEGIN_FTR_SECTION
std r6, STACK_SLOT_DAWR0(r1)
std r7, STACK_SLOT_DAWRX0(r1)
std r8, STACK_SLOT_IAMR(r1)
+ mfspr r5, SPRN_FSCR
+ std r5, STACK_SLOT_FSCR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- mfspr r6, SPRN_DAWR1
- mfspr r7, SPRN_DAWRX1
- std r6, STACK_SLOT_DAWR1(r1)
- std r7, STACK_SLOT_DAWRX1(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
mfspr r5, SPRN_AMR
std r5, STACK_SLOT_AMR(r1)
@@ -710,13 +656,9 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -783,12 +725,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ld r6, VCPU_DAWRX0(r4)
mtspr SPRN_DAWR0, r5
mtspr SPRN_DAWRX0, r6
-BEGIN_FTR_SECTION
- ld r5, VCPU_DAWR1(r4)
- ld r6, VCPU_DAWRX1(r4)
- mtspr SPRN_DAWR1, r5
- mtspr SPRN_DAWRX1, r6
-END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
1:
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
@@ -806,7 +742,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
mtspr SPRN_BESCR, r6
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
-BEGIN_FTR_SECTION
/* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
@@ -817,18 +752,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_CSIGR, r7
mtspr SPRN_TACR, r8
nop
-FTR_SECTION_ELSE
- /* POWER9-only registers */
- ld r5, VCPU_TID(r4)
- ld r6, VCPU_PSSCR(r4)
- lbz r8, HSTATE_FAKE_SUSPEND(r13)
- oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
- rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
- ld r7, VCPU_HFSCR(r4)
- mtspr SPRN_TIDR, r5
- mtspr SPRN_PSSCR, r6
- mtspr SPRN_HFSCR, r7
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8:
ld r5, VCPU_SPRG0(r4)
@@ -898,23 +821,15 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC
- EXTEND_HDEC(r3)
+ extsw r3, r3
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon
- ld r6, VCPU_KVM(r4)
- lbz r0, KVM_RADIX(r6)
- cmpwi r0, 0
- bne 9f
-
- /* For hash guest, clear out and reload the SLB */
-BEGIN_MMU_FTR_SECTION
- /* Radix host won't have populated the SLB, so no need to clear */
+ /* Clear out and reload the SLB */
li r6, 0
slbmte r6, r6
PPC_SLBIA(6)
ptesync
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
/* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
lwz r5,VCPU_SLB_MAX(r4)
@@ -929,96 +844,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
bdnz 1b
9:
-#ifdef CONFIG_KVM_XICS
- /* We are entering the guest on that thread, push VCPU to XIVE */
- ld r11, VCPU_XIVE_SAVED_STATE(r4)
- li r9, TM_QW1_OS
- lwz r8, VCPU_XIVE_CAM_WORD(r4)
- cmpwi r8, 0
- beq no_xive
- li r7, TM_QW1_OS + TM_WORD2
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 2f
- ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
- cmpldi cr1, r10, 0
- beq cr1, no_xive
- eieio
- stdx r11,r9,r10
- stwx r8,r7,r10
- b 3f
-2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr1, r10, 0
- beq cr1, no_xive
- eieio
- stdcix r11,r9,r10
- stwcix r8,r7,r10
-3: li r9, 1
- stb r9, VCPU_XIVE_PUSHED(r4)
- eieio
-
- /*
- * We clear the irq_pending flag. There is a small chance of a
- * race vs. the escalation interrupt happening on another
- * processor setting it again, but the only consequence is to
- * cause a spurrious wakeup on the next H_CEDE which is not an
- * issue.
- */
- li r0,0
- stb r0, VCPU_IRQ_PENDING(r4)
-
- /*
- * In single escalation mode, if the escalation interrupt is
- * on, we mask it.
- */
- lbz r0, VCPU_XIVE_ESC_ON(r4)
- cmpwi cr1, r0,0
- beq cr1, 1f
- li r9, XIVE_ESB_SET_PQ_01
- beq 4f /* in real mode? */
- ld r10, VCPU_XIVE_ESC_VADDR(r4)
- ldx r0, r10, r9
- b 5f
-4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
- ldcix r0, r10, r9
-5: sync
-
- /* We have a possible subtle race here: The escalation interrupt might
- * have fired and be on its way to the host queue while we mask it,
- * and if we unmask it early enough (re-cede right away), there is
- * a theorical possibility that it fires again, thus landing in the
- * target queue more than once which is a big no-no.
- *
- * Fortunately, solving this is rather easy. If the above load setting
- * PQ to 01 returns a previous value where P is set, then we know the
- * escalation interrupt is somewhere on its way to the host. In that
- * case we simply don't clear the xive_esc_on flag below. It will be
- * eventually cleared by the handler for the escalation interrupt.
- *
- * Then, when doing a cede, we check that flag again before re-enabling
- * the escalation interrupt, and if set, we abort the cede.
- */
- andi. r0, r0, XIVE_ESB_VAL_P
- bne- 1f
-
- /* Now P is 0, we can clear the flag */
- li r0, 0
- stb r0, VCPU_XIVE_ESC_ON(r4)
-1:
-no_xive:
-#endif /* CONFIG_KVM_XICS */
-
- li r0, 0
- stw r0, STACK_SLOT_SHORT_PATH(r1)
-
deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
/* Check if we can deliver an external or decrementer interrupt now */
ld r0, VCPU_PENDING_EXC(r4)
-BEGIN_FTR_SECTION
- /* On POWER9, also check for emulated doorbell interrupt */
- lbz r3, VCPU_DBELL_REQ(r4)
- or r0, r0, r3
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
cmpdi r0, 0
beq 71f
mr r3, r4
@@ -1030,7 +858,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
-fast_guest_entry_c:
ld r10, VCPU_PC(r4)
ld r11, VCPU_MSR(r4)
/* r11 = vcpu->arch.msr & ~MSR_HV */
@@ -1092,18 +919,8 @@ BEGIN_FTR_SECTION
mtspr SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-/* Move canary into DSISR to check for later */
-BEGIN_FTR_SECTION
- li r0, 0x7fff
- mtspr SPRN_HDSISR, r0
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
- ld r6, VCPU_KVM(r4)
- lbz r7, KVM_SECURE_GUEST(r6)
- cmpdi r7, 0
ld r6, VCPU_GPR(R6)(r4)
ld r7, VCPU_GPR(R7)(r4)
- bne ret_to_ultra
ld r0, VCPU_CR(r4)
mtcr r0
@@ -1114,117 +931,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r4, VCPU_GPR(R4)(r4)
HRFI_TO_GUEST
b .
-/*
- * Use UV_RETURN ultracall to return control back to the Ultravisor after
- * processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
- * to the Hypervisor.
- *
- * All registers have already been loaded, except:
- * R0 = hcall result
- * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
- * R3 = UV_RETURN
- */
-ret_to_ultra:
- ld r0, VCPU_CR(r4)
- mtcr r0
-
- ld r0, VCPU_GPR(R3)(r4)
- mfspr r2, SPRN_SRR1
- li r3, 0
- ori r3, r3, UV_RETURN
- ld r4, VCPU_GPR(R4)(r4)
- sc 2
-
-/*
- * Enter the guest on a P9 or later system where we have exactly
- * one vcpu per vcore and we don't need to go to real mode
- * (which implies that host and guest are both using radix MMU mode).
- * r3 = vcpu pointer
- * Most SPRs and all the VSRs have been loaded already.
- */
-_GLOBAL(__kvmhv_vcpu_entry_p9)
-EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
- mflr r0
- std r0, PPC_LR_STKOFF(r1)
- stdu r1, -SFS(r1)
-
- li r0, 1
- stw r0, STACK_SLOT_SHORT_PATH(r1)
-
- std r3, HSTATE_KVM_VCPU(r13)
- mfcr r4
- stw r4, SFS+8(r1)
-
- std r1, HSTATE_HOST_R1(r13)
-
- reg = 14
- .rept 18
- std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
- reg = reg + 1
- .endr
-
- reg = 14
- .rept 18
- ld reg, __VCPU_GPR(reg)(r3)
- reg = reg + 1
- .endr
-
- mfmsr r10
- std r10, HSTATE_HOST_MSR(r13)
-
- mr r4, r3
- b fast_guest_entry_c
-guest_exit_short_path:
- /*
- * Malicious or buggy radix guests may have inserted SLB entries
- * (only 0..3 because radix always runs with UPRT=1), so these must
- * be cleared here to avoid side-channels. slbmte is used rather
- * than slbia, as it won't clear cached translations.
- */
- li r0,0
- slbmte r0,r0
- li r4,1
- slbmte r0,r4
- li r4,2
- slbmte r0,r4
- li r4,3
- slbmte r0,r4
-
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
- reg = 14
- .rept 18
- std reg, __VCPU_GPR(reg)(r9)
- reg = reg + 1
- .endr
-
- reg = 14
- .rept 18
- ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
- reg = reg + 1
- .endr
-
- lwz r4, SFS+8(r1)
- mtcr r4
-
- mr r3, r12 /* trap number */
-
- addi r1, r1, SFS
- ld r0, PPC_LR_STKOFF(r1)
- mtlr r0
-
- /* If we are in real mode, do a rfid to get back to the caller */
- mfmsr r4
- andi. r5, r4, MSR_IR
- bnelr
- rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
- mtspr SPRN_SRR0, r0
- ld r10, HSTATE_HOST_MSR(r13)
- rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
- mtspr SPRN_SRR1, r10
- RFI_TO_KERNEL
- b .
secondary_too_late:
li r12, 0
@@ -1265,21 +971,16 @@ hdec_soon:
kvmppc_interrupt_hv:
/*
* Register contents:
+ * R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | interrupt vector
* R13 = PACA
* guest R12 saved in shadow VCPU SCRATCH0
* guest R13 saved in SPRN_SCRATCH0
+ * guest R9 saved in HSTATE_SCRATCH2
*/
- std r9, HSTATE_SCRATCH2(r13)
- lbz r9, HSTATE_IN_GUEST(r13)
- cmpwi r9, KVM_GUEST_MODE_HOST_HV
- beq kvmppc_bad_host_intr
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
- cmpwi r9, KVM_GUEST_MODE_GUEST
- ld r9, HSTATE_SCRATCH2(r13)
- beq kvmppc_interrupt_pr
-#endif
/* We're now back in the host but in guest MMU context */
+ cmpwi r9,KVM_GUEST_MODE_HOST_HV
+ beq kvmppc_bad_host_intr
li r9, KVM_GUEST_MODE_HOST_HV
stb r9, HSTATE_IN_GUEST(r13)
@@ -1397,7 +1098,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
bne 2f
mfspr r3,SPRN_HDEC
- EXTEND_HDEC(r3)
+ extsw r3, r3
cmpdi r3,0
mr r4,r9
bge fast_guest_return
@@ -1409,14 +1110,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Hypervisor doorbell - exit only if host IPI flag set */
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
-BEGIN_FTR_SECTION
- PPC_MSGSYNC
- lwsync
- /* always exit if we're running a nested guest */
- ld r0, VCPU_NESTED(r9)
- cmpdi r0, 0
- bne guest_exit_cont
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
beq maybe_reenter_guest
@@ -1446,62 +1139,16 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
mr r4, r9
bl kvmhv_accumulate_time
#endif
-#ifdef CONFIG_KVM_XICS
- /* We are exiting, pull the VP from the XIVE */
- lbz r0, VCPU_XIVE_PUSHED(r9)
- cmpwi cr0, r0, 0
- beq 1f
- li r7, TM_SPC_PULL_OS_CTX
- li r6, TM_QW1_OS
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 2f
- ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
- cmpldi cr0, r10, 0
- beq 1f
- /* First load to pull the context, we ignore the value */
- eieio
- lwzx r11, r7, r10
- /* Second load to recover the context state (Words 0 and 1) */
- ldx r11, r6, r10
- b 3f
-2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr0, r10, 0
- beq 1f
- /* First load to pull the context, we ignore the value */
- eieio
- lwzcix r11, r7, r10
- /* Second load to recover the context state (Words 0 and 1) */
- ldcix r11, r6, r10
-3: std r11, VCPU_XIVE_SAVED_STATE(r9)
- /* Fixup some of the state for the next load */
- li r10, 0
- li r0, 0xff
- stb r10, VCPU_XIVE_PUSHED(r9)
- stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
- stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
- eieio
-1:
-#endif /* CONFIG_KVM_XICS */
/*
* Possibly flush the link stack here, before we do a blr in
- * guest_exit_short_path.
+ * kvmhv_switch_to_host.
*/
1: nop
patch_site 1b patch__call_kvm_flush_link_stack
- /* If we came in through the P9 short path, go back out to C now */
- lwz r0, STACK_SLOT_SHORT_PATH(r1)
- cmpwi r0, 0
- bne guest_exit_short_path
-
/* For hash guest, read the guest SLB and save it away */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
li r5, 0
- cmpwi r0, 0
- bne 0f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0
li r6,0
@@ -1525,9 +1172,6 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r5,VCPU_SLB_MAX(r9)
/* load host SLB entries */
-BEGIN_MMU_FTR_SECTION
- b guest_bypass
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
@@ -1540,21 +1184,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
slbmte r6,r5
1: addi r8,r8,16
.endr
- b guest_bypass
-
-0: /*
- * Sanitise radix guest SLB, see guest_exit_short_path comment.
- * We clear vcpu->arch.slb_max to match earlier behaviour.
- */
- li r0,0
- stw r0,VCPU_SLB_MAX(r9)
- slbmte r0,r0
- li r4,1
- slbmte r0,r4
- li r4,2
- slbmte r0,r4
- li r4,3
- slbmte r0,r4
guest_bypass:
stw r12, STACK_SLOT_TRAP(r1)
@@ -1564,12 +1193,6 @@ guest_bypass:
ld r3, HSTATE_KVM_VCORE(r13)
mfspr r5,SPRN_DEC
mftb r6
- /* On P9, if the guest has large decr enabled, don't sign extend */
-BEGIN_FTR_SECTION
- ld r4, VCORE_LPCR(r3)
- andis. r4, r4, LPCR_LD@h
- bne 16f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r5,r5
16: add r5,r5,r6
/* r5 is a guest timebase value here, convert to host TB */
@@ -1643,7 +1266,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6, VCPU_BESCR(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_TCSCR
mfspr r6, SPRN_ACOP
mfspr r7, SPRN_CSIGR
@@ -1652,17 +1274,10 @@ BEGIN_FTR_SECTION
std r6, VCPU_ACOP(r9)
std r7, VCPU_CSIGR(r9)
std r8, VCPU_TACR(r9)
-FTR_SECTION_ELSE
- mfspr r5, SPRN_TIDR
- mfspr r6, SPRN_PSSCR
- std r5, VCPU_TID(r9)
- rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
- rotldi r6, r6, 60
- std r6, VCPU_PSSCR(r9)
- /* Restore host HFSCR value */
- ld r7, STACK_SLOT_HFSCR(r1)
- mtspr SPRN_HFSCR, r7
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_FSCR(r1)
+ mtspr SPRN_FSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
@@ -1670,13 +1285,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
li r0, 0
mtspr SPRN_PSPB, r0
mtspr SPRN_WORT, r0
-BEGIN_FTR_SECTION
mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
ld r8, STACK_SLOT_IAMR(r1)
@@ -1733,13 +1346,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -1785,80 +1394,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_DAWR0, r6
mtspr SPRN_DAWRX0, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- ld r6, STACK_SLOT_DAWR1(r1)
- ld r7, STACK_SLOT_DAWRX1(r1)
- mtspr SPRN_DAWR1, r6
- mtspr SPRN_DAWRX1, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
-BEGIN_FTR_SECTION
- ld r5, STACK_SLOT_TID(r1)
- ld r6, STACK_SLOT_PSSCR(r1)
- ld r7, STACK_SLOT_PID(r1)
- mtspr SPRN_TIDR, r5
- mtspr SPRN_PSSCR, r6
- mtspr SPRN_PID, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
-#ifdef CONFIG_PPC_RADIX_MMU
- /*
- * Are we running hash or radix ?
- */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
- cmpwi cr2, r0, 0
- beq cr2, 2f
-
- /*
- * Radix: do eieio; tlbsync; ptesync sequence in case we
- * interrupted the guest between a tlbie and a ptesync.
- */
- eieio
- tlbsync
- ptesync
-
-BEGIN_FTR_SECTION
- /* Radix: Handle the case where the guest used an illegal PID */
- LOAD_REG_ADDR(r4, mmu_base_pid)
- lwz r3, VCPU_GUEST_PID(r9)
- lwz r5, 0(r4)
- cmpw cr0,r3,r5
- blt 2f
-
- /*
- * Illegal PID, the HW might have prefetched and cached in the TLB
- * some translations for the LPID 0 / guest PID combination which
- * Linux doesn't know about, so we need to flush that PID out of
- * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
- * the right context.
- */
- li r0,0
- mtspr SPRN_LPID,r0
- isync
-
- /* Then do a congruence class local flush */
- ld r6,VCPU_KVM(r9)
- lwz r0,KVM_TLB_SETS(r6)
- mtctr r0
- li r7,0x400 /* IS field = 0b01 */
- ptesync
- sldi r0,r3,32 /* RS has PID */
-1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
- addi r7,r7,0x1000
- bdnz 1b
- ptesync
-END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
-
-2:
-#endif /* CONFIG_PPC_RADIX_MMU */
-
- /*
- * cp_abort is required if the processor supports local copy-paste
- * to clear the copy buffer that was under control of the guest.
- */
-BEGIN_FTR_SECTION
- PPC_CP_ABORT
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
/*
* POWER7/POWER8 guest -> host partition switch code.
@@ -1895,13 +1430,11 @@ kvmhv_switch_to_host:
/* Primary thread switches back to host partition */
lwz r7,KVM_HOST_LPID(r4)
-BEGIN_FTR_SECTION
ld r6,KVM_HOST_SDR1(r4)
li r8,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r8
ptesync
mtspr SPRN_SDR1,r6 /* switch to host page table */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -2110,26 +1643,13 @@ kvmppc_tm_emul:
* reflect the HDSI to the guest as a DSI.
*/
kvmppc_hdsi:
- ld r3, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r3)
mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR
-BEGIN_FTR_SECTION
- /* Look for DSISR canary. If we find it, retry instruction */
- cmpdi r6, 0x7fff
- beq 6f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- cmpwi r0, 0
- bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
beq 1f /* if not, send it to the guest */
andi. r0, r11, MSR_DR /* data relocation enabled? */
beq 3f
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
- b 4f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
clrrdi r0, r4, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
@@ -2197,31 +1717,15 @@ fast_interrupt_c_return:
stb r0, HSTATE_IN_GUEST(r13)
b guest_exit_cont
-.Lradix_hdsi:
- std r4, VCPU_FAULT_DAR(r9)
- stw r6, VCPU_FAULT_DSISR(r9)
-.Lradix_hisi:
- mfspr r5, SPRN_ASDR
- std r5, VCPU_FAULT_GPA(r9)
- b guest_exit_cont
-
/*
* Similarly for an HISI, reflect it to the guest as an ISI unless
* it is an HPTE not found fault for a page that we have paged out.
*/
kvmppc_hisi:
- ld r3, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r3)
- cmpwi r0, 0
- bne .Lradix_hisi /* for radix, just save ASDR */
andis. r0, r11, SRR1_ISI_NOPT@h
beq 1f
andi. r0, r11, MSR_IR /* instruction relocation enabled? */
beq 3f
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
- b 4f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
clrrdi r0, r10, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_INST_SEGMENT
@@ -2269,10 +1773,6 @@ hcall_try_real_mode:
andi. r0,r11,MSR_PR
/* sc 1 from userspace - reflect to guest syscall */
bne sc_1_fast_return
- /* sc 1 from nested guest - give it to L1 to handle */
- ld r0, VCPU_NESTED(r9)
- cmpdi r0, 0
- bne guest_exit_cont
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
@@ -2537,7 +2037,7 @@ hcall_real_table:
#else
.long 0 /* 0x2fc - H_XIRR_X*/
#endif
- .long DOTSYM(kvmppc_h_random) - hcall_real_table
+ .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table
.globl hcall_real_table_end
hcall_real_table_end:
@@ -2668,13 +2168,9 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -2694,15 +2190,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC
mftb r5
-BEGIN_FTR_SECTION
- /* On P9 check whether the guest has large decrementer mode enabled */
- ld r6, HSTATE_KVM_VCORE(r13)
- ld r6, VCORE_LPCR(r6)
- andis. r6, r6, LPCR_LD@h
- bne 68f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r3, r3
-68: EXTEND_HDEC(r4)
+ extsw r4, r4
cmpd r3, r4
ble 67f
mtspr SPRN_DEC, r4
@@ -2747,28 +2236,11 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvm_nap_sequence: /* desired LPCR value in r5 */
-BEGIN_FTR_SECTION
- /*
- * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
- * enable state loss = 1 (allow SMT mode switch)
- * requested level = 0 (just stop dispatching)
- */
- lis r3, (PSSCR_EC | PSSCR_ESL)@h
- /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
- li r4, LPCR_PECE_HVEE@higher
- sldi r4, r4, 32
- or r5, r5, r4
-FTR_SECTION_ELSE
li r3, PNV_THREAD_NAP
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r5
isync
-BEGIN_FTR_SECTION
- bl isa300_idle_stop_mayloss
-FTR_SECTION_ELSE
bl isa206_idle_insn_mayloss
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mfspr r0, SPRN_CTRLF
ori r0, r0, 1
@@ -2787,10 +2259,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
beq kvm_end_cede
cmpwi r0, NAPPING_NOVCPU
beq kvm_novcpu_wakeup
-BEGIN_FTR_SECTION
cmpwi r0, NAPPING_UNSPLIT
beq kvm_unsplit_wakeup
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
twi 31,0,0 /* Nap state must not be zero */
33: mr r4, r3
@@ -2810,13 +2280,9 @@ kvm_end_cede:
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -2906,47 +2372,7 @@ kvm_cede_prodded:
/* we've ceded but we want to give control to the host */
kvm_cede_exit:
ld r9, HSTATE_KVM_VCPU(r13)
-#ifdef CONFIG_KVM_XICS
- /* are we using XIVE with single escalation? */
- ld r10, VCPU_XIVE_ESC_VADDR(r9)
- cmpdi r10, 0
- beq 3f
- li r6, XIVE_ESB_SET_PQ_00
- /*
- * If we still have a pending escalation, abort the cede,
- * and we must set PQ to 10 rather than 00 so that we don't
- * potentially end up with two entries for the escalation
- * interrupt in the XIVE interrupt queue. In that case
- * we also don't want to set xive_esc_on to 1 here in
- * case we race with xive_esc_irq().
- */
- lbz r5, VCPU_XIVE_ESC_ON(r9)
- cmpwi r5, 0
- beq 4f
- li r0, 0
- stb r0, VCPU_CEDED(r9)
- /*
- * The escalation interrupts are special as we don't EOI them.
- * There is no need to use the load-after-store ordering offset
- * to set PQ to 10 as we won't use StoreEOI.
- */
- li r6, XIVE_ESB_SET_PQ_10
- b 5f
-4: li r0, 1
- stb r0, VCPU_XIVE_ESC_ON(r9)
- /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
- sync
-5: /* Enable XIVE escalation */
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 1f
- ldx r0, r10, r6
- b 2f
-1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
- ldcix r0, r10, r6
-2: sync
-#endif /* CONFIG_KVM_XICS */
-3: b guest_exit_cont
+ b guest_exit_cont
/* Try to do machine check recovery in real mode */
machine_check_realmode:
@@ -3023,10 +2449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
-BEGIN_FTR_SECTION
- PPC_MSGSYNC
- lwsync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
@@ -3335,73 +2757,12 @@ kvmppc_bad_host_intr:
std r3, STACK_FRAME_OVERHEAD-16(r1)
/*
- * On POWER9 do a minimal restore of the MMU and call C code,
- * which will print a message and panic.
* XXX On POWER7 and POWER8, we just spin here since we don't
* know what the other threads are doing (and we don't want to
* coordinate with them) - but at least we now have register state
* in memory that we might be able to look at from another CPU.
*/
-BEGIN_FTR_SECTION
b .
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
- ld r9, HSTATE_KVM_VCPU(r13)
- ld r10, VCPU_KVM(r9)
-
- li r0, 0
- mtspr SPRN_AMR, r0
- mtspr SPRN_IAMR, r0
- mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX0, r0
-BEGIN_FTR_SECTION
- mtspr SPRN_DAWRX1, r0
-END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
-
- /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */
- slbmte r0, r0
- PPC_SLBIA(6)
-
-BEGIN_MMU_FTR_SECTION
- b 4f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
-
- ptesync
- ld r8, PACA_SLBSHADOWPTR(r13)
- .rept SLB_NUM_BOLTED
- li r3, SLBSHADOW_SAVEAREA
- LDX_BE r5, r8, r3
- addi r3, r3, 8
- LDX_BE r6, r8, r3
- andis. r7, r5, SLB_ESID_V@h
- beq 3f
- slbmte r6, r5
-3: addi r8, r8, 16
- .endr
-
-4: lwz r7, KVM_HOST_LPID(r10)
- mtspr SPRN_LPID, r7
- mtspr SPRN_PID, r0
- ld r8, KVM_HOST_LPCR(r10)
- mtspr SPRN_LPCR, r8
- isync
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
- /*
- * Turn on the MMU and jump to C code
- */
- bcl 20, 31, .+4
-5: mflr r3
- addi r3, r3, 9f - 5b
- li r4, -1
- rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
- ld r4, PACAKMSR(r13)
- mtspr SPRN_SRR0, r3
- mtspr SPRN_SRR1, r4
- RFI_TO_KERNEL
-9: addi r3, r1, STACK_FRAME_OVERHEAD
- bl kvmppc_bad_interrupt
- b 9b
/*
* This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 84e5a2dc8be5..a7061ee3b157 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -90,6 +90,7 @@
#include <linux/migrate.h>
#include <linux/kvm_host.h>
#include <linux/ksm.h>
+#include <linux/of.h>
#include <asm/ultravisor.h>
#include <asm/mman.h>
#include <asm/kvm_ppc.h>
@@ -614,7 +615,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
/* Fetch the VMA if addr is not in the latest fetched one */
if (!vma || addr >= vma->vm_end) {
- vma = find_vma_intersection(kvm->mm, addr, addr+1);
+ vma = vma_lookup(kvm->mm, addr);
if (!vma) {
pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
break;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d7733b07f489..71bcb0140461 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -493,7 +493,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ vcpu->stat.generic.halt_wakeup++;
/* Unset POW bit after we woke up */
msr &= ~MSR_POW;
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 031c8015864a..ac14239f3424 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -378,7 +378,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ vcpu->stat.generic.halt_wakeup++;
return EMULATE_DONE;
case H_LOGICAL_CI_LOAD:
return kvmppc_h_pr_logical_ci_load(vcpu);
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1f492aa4c8d6..202046a83fc1 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -164,12 +164,15 @@ kvmppc_interrupt_pr:
/* 64-bit entry. Register usage at this point:
*
* SPRG_SCRATCH0 = guest R13
+ * R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | exit handler id
* R13 = PACA
* HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH2 = guest R9
*/
#ifdef CONFIG_PPC64
/* Match 32-bit entry */
+ ld r9,HSTATE_SCRATCH2(r13)
rotldi r12, r12, 32 /* Flip R12 halves for stw */
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
srdi r12, r12, 32 /* shift trap into low half */
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index e7219b6f5f9a..8cfab3547494 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/uaccess.h>
+#include <linux/irqdomain.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
@@ -128,6 +129,71 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
/*
+ * Pull a vcpu's context from the XIVE on guest exit.
+ * This assumes we are in virtual mode (MMU on)
+ */
+void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
+{
+ void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
+
+ if (!vcpu->arch.xive_pushed)
+ return;
+
+ /*
+ * Should not have been pushed if there is no tima
+ */
+ if (WARN_ON(!tima))
+ return;
+
+ eieio();
+ /* First load to pull the context, we ignore the value */
+ __raw_readl(tima + TM_SPC_PULL_OS_CTX);
+ /* Second load to recover the context state (Words 0 and 1) */
+ vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
+
+ /* Fixup some of the state for the next load */
+ vcpu->arch.xive_saved_state.lsmfb = 0;
+ vcpu->arch.xive_saved_state.ack = 0xff;
+ vcpu->arch.xive_pushed = 0;
+ eieio();
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
+
+void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
+{
+ void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
+
+ if (!esc_vaddr)
+ return;
+
+ /* we are using XIVE with single escalation */
+
+ if (vcpu->arch.xive_esc_on) {
+ /*
+ * If we still have a pending escalation, abort the cede,
+ * and we must set PQ to 10 rather than 00 so that we don't
+ * potentially end up with two entries for the escalation
+ * interrupt in the XIVE interrupt queue. In that case
+ * we also don't want to set xive_esc_on to 1 here in
+ * case we race with xive_esc_irq().
+ */
+ vcpu->arch.ceded = 0;
+ /*
+ * The escalation interrupts are special as we don't EOI them.
+ * There is no need to use the load-after-store ordering offset
+ * to set PQ to 10 as we won't use StoreEOI.
+ */
+ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
+ } else {
+ vcpu->arch.xive_esc_on = true;
+ mb();
+ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
+ }
+ mb();
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
+
+/*
* This is a simple trigger for a generic XIVE IRQ. This must
* only be called for interrupts that support a trigger page
*/
@@ -2075,6 +2141,36 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
return 0;
}
+int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ /* The VM should have configured XICS mode before doing XICS hcalls. */
+ if (!kvmppc_xics_enabled(vcpu))
+ return H_TOO_HARD;
+
+ switch (req) {
+ case H_XIRR:
+ return xive_vm_h_xirr(vcpu);
+ case H_CPPR:
+ return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_EOI:
+ return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_IPI:
+ return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ case H_IPOLL:
+ return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_XIRR_X:
+ xive_vm_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
+ return H_SUCCESS;
+ }
+
+ return H_UNSUPPORTED;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
+
int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -2257,21 +2353,3 @@ struct kvm_device_ops kvm_xive_ops = {
.get_attr = xive_get_attr,
.has_attr = xive_has_attr,
};
-
-void kvmppc_xive_init_module(void)
-{
- __xive_vm_h_xirr = xive_vm_h_xirr;
- __xive_vm_h_ipoll = xive_vm_h_ipoll;
- __xive_vm_h_ipi = xive_vm_h_ipi;
- __xive_vm_h_cppr = xive_vm_h_cppr;
- __xive_vm_h_eoi = xive_vm_h_eoi;
-}
-
-void kvmppc_xive_exit_module(void)
-{
- __xive_vm_h_xirr = NULL;
- __xive_vm_h_ipoll = NULL;
- __xive_vm_h_ipi = NULL;
- __xive_vm_h_cppr = NULL;
- __xive_vm_h_eoi = NULL;
-}
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 86c24a4ad809..afe9eeac6d56 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -289,13 +289,6 @@ extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
-extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
-extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
-extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
-extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
-
/*
* Common Xive routines for XICS-over-XIVE and XIVE native
*/
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 76800c84f2a3..573ecaab3597 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/file.h>
+#include <linux/irqdomain.h>
#include <asm/uaccess.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -1281,13 +1282,3 @@ struct kvm_device_ops kvm_xive_native_ops = {
.has_attr = kvmppc_xive_native_has_attr,
.mmap = kvmppc_xive_native_mmap,
};
-
-void kvmppc_xive_native_init_module(void)
-{
- ;
-}
-
-void kvmppc_xive_native_exit_module(void)
-{
- ;
-}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 7d5fe43f85c4..551b30d84aee 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -36,29 +36,59 @@
unsigned long kvmppc_booke_handlers;
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("mmio", mmio_exits),
- VCPU_STAT("sig", signal_exits),
- VCPU_STAT("itlb_r", itlb_real_miss_exits),
- VCPU_STAT("itlb_v", itlb_virt_miss_exits),
- VCPU_STAT("dtlb_r", dtlb_real_miss_exits),
- VCPU_STAT("dtlb_v", dtlb_virt_miss_exits),
- VCPU_STAT("sysc", syscall_exits),
- VCPU_STAT("isi", isi_exits),
- VCPU_STAT("dsi", dsi_exits),
- VCPU_STAT("inst_emu", emulated_inst_exits),
- VCPU_STAT("dec", dec_exits),
- VCPU_STAT("ext_intr", ext_intr_exits),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("doorbell", dbell_exits),
- VCPU_STAT("guest doorbell", gdbell_exits),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VM_STAT("remote_tlb_flush", remote_tlb_flush),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, num_2M_pages),
+ STATS_DESC_ICOUNTER(VM, num_1G_pages)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, sum_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, light_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, isi_exits),
+ STATS_DESC_COUNTER(VCPU, dsi_exits),
+ STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
+ STATS_DESC_COUNTER(VCPU, dec_exits),
+ STATS_DESC_COUNTER(VCPU, ext_intr_exits),
+ STATS_DESC_TIME_NSEC(VCPU, halt_wait_ns),
+ STATS_DESC_COUNTER(VCPU, halt_successful_wait),
+ STATS_DESC_COUNTER(VCPU, dbell_exits),
+ STATS_DESC_COUNTER(VCPU, gdbell_exits),
+ STATS_DESC_COUNTER(VCPU, ld),
+ STATS_DESC_COUNTER(VCPU, st),
+ STATS_DESC_COUNTER(VCPU, pthru_all),
+ STATS_DESC_COUNTER(VCPU, pthru_host),
+ STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
/* TODO: use vcpu_printf() */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a2a68a958fa0..be33b5321a76 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -682,6 +682,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
!kvmppc_hv_ops->enable_dawr1(NULL));
break;
+ case KVM_CAP_PPC_RPT_INVALIDATE:
+ r = 1;
+ break;
#endif
default:
r = 0;
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index c3df3a8501d4..2ffcf540f08b 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -13,7 +13,7 @@ obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/
obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/
obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/
-obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
+obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 5fef8db3b463..2176a5f70746 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
@@ -357,30 +358,19 @@ static void __init radix_init_pgtable(void)
}
/* Find out how many PID bits are supported */
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
- mmu_base_pid = 1;
- } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ if (!cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
/*
- * When KVM is possible, we only use the top half of the
- * PID space to avoid collisions between host and guest PIDs
- * which can cause problems due to prefetch when exiting the
- * guest with AIL=3
+ * Older versions of KVM on these machines perfer if the
+ * guest only uses the low 19 PID bits.
*/
- mmu_base_pid = 1 << (mmu_pid_bits - 1);
-#else
- mmu_base_pid = 1;
-#endif
- } else {
- /* The guest uses the bottom half of the PID space */
if (!mmu_pid_bits)
mmu_pid_bits = 19;
- mmu_base_pid = 1;
+ } else {
+ if (!mmu_pid_bits)
+ mmu_pid_bits = 20;
}
+ mmu_base_pid = 1;
/*
* Allocate Partition table and process table for the
@@ -486,6 +476,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
def = &mmu_psize_defs[idx];
def->shift = shift;
def->ap = ap;
+ def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
}
/* needed ? */
@@ -560,9 +551,13 @@ void __init radix__early_init_devtree(void)
*/
mmu_psize_defs[MMU_PAGE_4K].shift = 12;
mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
+ mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
+ psize_to_rpti_pgsize(MMU_PAGE_4K);
mmu_psize_defs[MMU_PAGE_64K].shift = 16;
mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
+ mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
+ psize_to_rpti_pgsize(MMU_PAGE_64K);
}
/*
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 409e61210789..318ec4f33661 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -20,10 +20,6 @@
#include "internal.h"
-#define RIC_FLUSH_TLB 0
-#define RIC_FLUSH_PWC 1
-#define RIC_FLUSH_ALL 2
-
/*
* tlbiel instruction for radix, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
@@ -130,6 +126,21 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static __always_inline void __tlbie_pid_lpid(unsigned long pid,
+ unsigned long lpid,
+ unsigned long ric)
+{
+ unsigned long rb, rs, prs, r;
+
+ rb = PPC_BIT(53); /* IS = 1 */
+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+ prs = 1; /* process scoped */
+ r = 1; /* radix format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -190,6 +201,23 @@ static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
+ unsigned long lpid,
+ unsigned long ap, unsigned long ric)
+{
+ unsigned long rb, rs, prs, r;
+
+ rb = va & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+ prs = 1; /* process scoped */
+ r = 1; /* radix format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
+
static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long ap, unsigned long ric)
{
@@ -235,6 +263,22 @@ static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
}
}
+static inline void fixup_tlbie_va_range_lpid(unsigned long va,
+ unsigned long pid,
+ unsigned long lpid,
+ unsigned long ap)
+{
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
+ }
+}
+
static inline void fixup_tlbie_pid(unsigned long pid)
{
/*
@@ -254,6 +298,25 @@ static inline void fixup_tlbie_pid(unsigned long pid)
}
}
+static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
+{
+ /*
+ * We can use any address for the invalidation, pick one which is
+ * probably unused as an optimisation.
+ */
+ unsigned long va = ((1UL << 52) - 1);
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
+ RIC_FLUSH_TLB);
+ }
+}
static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long ap)
@@ -344,6 +407,31 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
+ unsigned long ric)
+{
+ asm volatile("ptesync" : : : "memory");
+
+ /*
+ * Workaround the fact that the "ric" argument to __tlbie_pid
+ * must be a compile-time contraint to match the "i" constraint
+ * in the asm statement.
+ */
+ switch (ric) {
+ case RIC_FLUSH_TLB:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+ fixup_tlbie_pid_lpid(pid, lpid);
+ break;
+ case RIC_FLUSH_PWC:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
+ fixup_tlbie_pid_lpid(pid, lpid);
+ }
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
struct tlbiel_pid {
unsigned long pid;
unsigned long ric;
@@ -469,6 +557,20 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
fixup_tlbie_va_range(addr - page_size, pid, ap);
}
+static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
+ unsigned long pid, unsigned long lpid,
+ unsigned long page_size,
+ unsigned long psize)
+{
+ unsigned long addr;
+ unsigned long ap = mmu_get_ap(psize);
+
+ for (addr = start; addr < end; addr += page_size)
+ __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
+
+ fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
+}
+
static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
unsigned long psize, unsigned long ric)
{
@@ -549,6 +651,18 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
+ unsigned long pid, unsigned long lpid,
+ unsigned long page_size,
+ unsigned long psize, bool also_pwc)
+{
+ asm volatile("ptesync" : : : "memory");
+ if (also_pwc)
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+ __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
+
static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long pid, unsigned long page_size,
@@ -1338,47 +1452,57 @@ void radix__flush_tlb_all(void)
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
+/*
+ * Performs process-scoped invalidations for a given LPID
+ * as part of H_RPT_INVALIDATE hcall.
+ */
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
{
- unsigned long pid = mm->context.id;
+ unsigned long psize, nr_pages;
+ struct mmu_psize_def *def;
+ bool flush_pid;
- if (unlikely(pid == MMU_NO_CONTEXT))
+ /*
+ * A H_RPTI_TYPE_ALL request implies RIC=3, hence
+ * do a single IS=1 based flush.
+ */
+ if ((type & H_RPTI_TYPE_ALL) == H_RPTI_TYPE_ALL) {
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
return;
+ }
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
- return;
+ if (type & H_RPTI_TYPE_PWC)
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
- /*
- * If this context hasn't run on that CPU before and KVM is
- * around, there's a slim chance that the guest on another
- * CPU just brought in obsolete translation into the TLB of
- * this CPU due to a bad prefetch using the guest PID on
- * the way into the hypervisor.
- *
- * We work around this here. If KVM is possible, we check if
- * any sibling thread is in KVM. If it is, the window may exist
- * and thus we flush that PID from the core.
- *
- * A potential future improvement would be to mark which PIDs
- * have never been used on the system and avoid it if the PID
- * is new and the process has no other cpumask bit set.
- */
- if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
- int cpu = smp_processor_id();
- int sib = cpu_first_thread_sibling(cpu);
- bool flush = false;
-
- for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
- if (sib == cpu)
- continue;
- if (!cpu_possible(sib))
- continue;
- if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
- flush = true;
+ /* Full PID flush */
+ if (start == 0 && end == -1)
+ return _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+
+ /* Do range invalidation for all the valid page sizes */
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ def = &mmu_psize_defs[psize];
+ if (!(pg_sizes & def->h_rpt_pgsize))
+ continue;
+
+ nr_pages = (end - start) >> def->shift;
+ flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+
+ /*
+ * If the number of pages spanning the range is above
+ * the ceiling, convert the request into a full PID flush.
+ * And since PID flush takes out all the page sizes, there
+ * is no need to consider remaining page sizes.
+ */
+ if (flush_pid) {
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+ return;
}
- if (flush)
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ _tlbie_va_range_lpid(start, end, pid, lpid,
+ (1UL << def->shift), psize, false);
}
}
-EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
+EXPORT_SYMBOL_GPL(do_h_rpt_invalidate_prt);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 043bbeaf407c..c5e520c6f13b 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -20,6 +20,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
+#include <asm/sparsemem.h>
#include <asm/svm.h>
#include <mm/mmu_decl.h>
@@ -126,7 +127,7 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
}
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init mem_topology_setup(void)
{
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
@@ -161,7 +162,7 @@ static int __init mark_nonram_nosave(void)
return 0;
}
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
static int __init mark_nonram_nosave(void)
{
return 0;
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index a857af401738..74246536b832 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -83,9 +83,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
- if (new_on_cpu)
- radix_kvm_prefetch_workaround(next);
- else
+ if (!new_on_cpu)
membarrier_arch_switch_mm(prev, next, tsk);
/*
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 16d4d1b6a1ff..51622411a7cc 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2254,7 +2254,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
bool use_siar = regs_use_siar(regs);
unsigned long siar = mfspr(SPRN_SIAR);
- if (ppmu->flags & PPMU_P10_DD1) {
+ if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
if (siar)
return siar;
else
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index e7c976bcadff..cb70c5f25bc6 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -35,6 +35,7 @@ config PPC_IBM_CELL_BLADE
config AXON_MSI
bool
depends on PPC_IBM_CELL_BLADE && PCI_MSI
+ select IRQ_DOMAIN_NOMAP
default y
menu "Cell Broadband Engine options"
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 35bbd15582af..b207a7f99be5 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -10,6 +10,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/types.h>
#include <linux/export.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index d39a9213a3e6..609bda2ad5dd 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index c02d8c503b29..b97bf12801eb 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -24,6 +24,7 @@ config PPC_PMAC32_PSURGE
bool "Support for powersurge upgrade cards" if EXPERT
depends on SMP && PPC32 && PPC_PMAC
select PPC_SMP_MUXED_IPI
+ select IRQ_DOMAIN_NOMAP
default y
help
The powersurge cpu boards can be used in the generation
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 999997d9e9a9..528a7e0cf83a 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -604,7 +604,7 @@ struct p9_sprs {
u64 uamor;
};
-static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
+static unsigned long power9_idle_stop(unsigned long psscr)
{
int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu);
@@ -620,8 +620,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */
- BUG_ON(!mmu_on);
-
/*
* Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason!
@@ -803,8 +801,7 @@ core_woken:
__slb_restore_bolted_realmode();
out:
- if (mmu_on)
- mtmsr(MSR_KERNEL);
+ mtmsr(MSR_KERNEL);
return srr1;
}
@@ -895,7 +892,7 @@ struct p10_sprs {
*/
};
-static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
+static unsigned long power10_idle_stop(unsigned long psscr)
{
int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu);
@@ -909,8 +906,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */
- BUG_ON(!mmu_on);
-
/*
* Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason!
@@ -991,8 +986,7 @@ core_woken:
__slb_restore_bolted_realmode();
out:
- if (mmu_on)
- mtmsr(MSR_KERNEL);
+ mtmsr(MSR_KERNEL);
return srr1;
}
@@ -1002,40 +996,10 @@ static unsigned long arch300_offline_stop(unsigned long psscr)
{
unsigned long srr1;
-#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- __ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, true);
+ srr1 = power10_idle_stop(psscr);
else
- srr1 = power9_idle_stop(psscr, true);
- __ppc64_runlatch_on();
-#else
- /*
- * Tell KVM we're entering idle.
- * This does not have to be done in real mode because the P9 MMU
- * is independent per-thread. Some steppings share radix/hash mode
- * between threads, but in that case KVM has a barrier sync in real
- * mode before and after switching between radix and hash.
- *
- * kvm_start_guest must still be called in real mode though, hence
- * the false argument.
- */
- local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
-
- __ppc64_runlatch_off();
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, false);
- else
- srr1 = power9_idle_stop(psscr, false);
- __ppc64_runlatch_on();
-
- local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
- /* Order setting hwthread_state vs. testing hwthread_req */
- smp_mb();
- if (local_paca->kvm_hstate.hwthread_req)
- srr1 = idle_kvm_start_guest(srr1);
- mtmsr(MSR_KERNEL);
-#endif
+ srr1 = power9_idle_stop(psscr);
return srr1;
}
@@ -1055,9 +1019,9 @@ void arch300_idle_type(unsigned long stop_psscr_val,
__ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, true);
+ srr1 = power10_idle_stop(psscr);
else
- srr1 = power9_idle_stop(psscr, true);
+ srr1 = power9_idle_stop(psscr);
__ppc64_runlatch_on();
fini_irq_for_idle_irqsoff();
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index e32406e918d0..4d0535cc7946 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -7,6 +7,7 @@ config PPC_PS3
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_MMIO
select HAVE_PCI
+ select IRQ_DOMAIN_NOMAP
help
This option enables support for the Sony PS3 game console
and other platforms using the PS3 hypervisor. Enabling this
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 78f2339ed5cb..49871427f599 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
@@ -45,7 +46,7 @@
* implementation equates HV plug value to Linux virq value, constrains each
* interrupt to have a system wide unique plug number, and limits the range
* of the plug values to map into the first dword of the bitmaps. This
- * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
+ * gives a usable range of plug values of {NR_IRQS_LEGACY..63}. Note
* that there is no constraint on how many in this set an individual thread
* can acquire.
*
@@ -721,7 +722,7 @@ static unsigned int ps3_get_irq(void)
}
#if defined(DEBUG)
- if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) {
+ if (unlikely(plug < NR_IRQS_LEGACY || plug > PS3_PLUG_MAX)) {
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
BUG();
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index a15ab33646b3..c6c79ef55e13 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -42,6 +42,7 @@
#include <linux/kobject.h>
#include <linux/dma-map-ops.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 48866e6c1efb..00705258ecf9 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
index 13583bbc3e8e..5fa5fa215541 100644
--- a/arch/powerpc/sysdev/fsl_mpic_err.c
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index c1d76c344351..dc1a151c63d7 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -260,7 +260,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */
- i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
+ i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
+ &i8259_host_ops, NULL);
if (i8259_host == NULL) {
printk(KERN_ERR "i8259: failed to allocate irq host !\n");
return;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index b0426f28946a..995fb2ada507 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -602,7 +602,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq)
{
- if (irq < NUM_ISA_INTERRUPTS)
+ if (irq < NR_IRQS_LEGACY)
return NULL;
return irq_get_chip_data(irq);
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 49f9541954f8..042bb38fa5c2 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -404,7 +404,8 @@ void __init tsi108_pci_int_init(struct device_node *node)
{
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
- pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL);
+ pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
+ &pci_irq_domain_ops, NULL);
if (pci_irq_host == NULL) {
printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
return;
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 21b9d1bf39ff..6765d9e264a3 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 68fd2540b093..675d708863d5 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 7e4305c01bac..fdf8db4444b6 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -201,7 +201,7 @@ void xics_migrate_irqs_away(void)
struct ics *ics;
/* We can't set affinity on ISA interrupts */
- if (virq < NUM_ISA_INTERRUPTS)
+ if (virq < NR_IRQS_LEGACY)
continue;
/* We only need to migrate enabled IRQS */
if (!desc->action)
diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig
index 785c292d104b..97796c6b63f0 100644
--- a/arch/powerpc/sysdev/xive/Kconfig
+++ b/arch/powerpc/sysdev/xive/Kconfig
@@ -3,6 +3,7 @@ config PPC_XIVE
bool
select PPC_SMP_MUXED_IPI
select HARDIRQS_SW_RESEND
+ select IRQ_DOMAIN_NOMAP
config PPC_XIVE_NATIVE
bool
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index c8173e92f19d..84de2d7c2f40 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -3162,6 +3162,7 @@ memzcan(void)
static void show_task(struct task_struct *tsk)
{
+ unsigned int p_state = READ_ONCE(tsk->__state);
char state;
/*
@@ -3169,14 +3170,14 @@ static void show_task(struct task_struct *tsk)
* appropriate for calling from xmon. This could be moved
* to a common, generic, routine used by both.
*/
- state = (tsk->state == 0) ? 'R' :
- (tsk->state < 0) ? 'U' :
- (tsk->state & TASK_UNINTERRUPTIBLE) ? 'D' :
- (tsk->state & TASK_STOPPED) ? 'T' :
- (tsk->state & TASK_TRACED) ? 'C' :
+ state = (p_state == 0) ? 'R' :
+ (p_state < 0) ? 'U' :
+ (p_state & TASK_UNINTERRUPTIBLE) ? 'D' :
+ (p_state & TASK_STOPPED) ? 'T' :
+ (p_state & TASK_TRACED) ? 'C' :
(tsk->exit_state & EXIT_ZOMBIE) ? 'Z' :
(tsk->exit_state & EXIT_DEAD) ? 'E' :
- (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+ (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?';
printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk,
tsk->thread.ksp, tsk->thread.regs,
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a8ad8eb76120..15f9490a7aad 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -34,6 +34,7 @@ config RISCV
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_SUPPORTS_HUGETLBFS if MMU
+ select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
@@ -60,11 +61,11 @@ config RISCV
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
- select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
@@ -79,9 +80,9 @@ config RISCV
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_KPROBES
- select HAVE_KPROBES_ON_FTRACE
- select HAVE_KRETPROBES
+ select HAVE_KPROBES if !XIP_KERNEL
+ select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
+ select HAVE_KRETPROBES if !XIP_KERNEL
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -230,11 +231,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
+ select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select SWIOTLB if MMU
endchoice
@@ -331,7 +332,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 1 10
default "2"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index ed963761fbd2..30676ebb16eb 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -14,6 +14,7 @@ config SOC_SIFIVE
select CLK_SIFIVE
select CLK_SIFIVE_PRCI
select SIFIVE_PLIC
+ select RISCV_ERRATA_ALTERNATIVE
select ERRATA_SIFIVE
help
This enables support for SiFive SoC platform hardware.
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 3eb9590a0775..99ecd8bcfd77 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
CC_FLAGS_FTRACE := -fpatchable-function-entry=8
endif
-ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
KBUILD_CFLAGS_MODULE += -mcmodel=medany
endif
@@ -38,6 +38,15 @@ else
KBUILD_LDFLAGS += -melf32lriscv
endif
+ifeq ($(CONFIG_LD_IS_LLD),y)
+ KBUILD_CFLAGS += -mno-relax
+ KBUILD_AFLAGS += -mno-relax
+ifneq ($(LLVM_IAS),1)
+ KBUILD_CFLAGS += -Wa,-mno-relax
+ KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile
index 622b12771fd3..855c1502d912 100644
--- a/arch/riscv/boot/dts/microchip/Makefile
+++ b/arch/riscv/boot/dts/microchip/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile
index 74c47fe9fc22..d90e4eb0ade8 100644
--- a/arch/riscv/boot/dts/sifive/Makefile
+++ b/arch/riscv/boot/dts/sifive/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
hifive-unmatched-a00.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
index 8eef82e4199f..abbb960f90a0 100644
--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -273,7 +273,7 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic0>;
- interrupts = <19 20 21 22>;
+ interrupts = <19 21 22 20>;
reg = <0x0 0x2010000 0x0 0x1000>;
};
gpio: gpio@10060000 {
diff --git a/arch/riscv/errata/sifive/Makefile b/arch/riscv/errata/sifive/Makefile
index bdd5fc843b8e..2fde48db0619 100644
--- a/arch/riscv/errata/sifive/Makefile
+++ b/arch/riscv/errata/sifive/Makefile
@@ -1,2 +1,2 @@
-obj-y += errata_cip_453.o
+obj-$(CONFIG_ERRATA_SIFIVE_CIP_453) += errata_cip_453.o
obj-y += errata.o
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 88c08705f64a..67406c376389 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -51,7 +51,7 @@
REG_ASM " " newlen "\n" \
".word " errata_id "\n"
-#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
".if " __stringify(enable) " == 1\n" \
".pushsection .alternative, \"a\"\n" \
ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
@@ -69,7 +69,7 @@
"886 :\n" \
old_c "\n" \
"887 :\n" \
- ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c)
+ ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 400a8c8b6de7..ac9bdf4fc404 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -25,22 +25,22 @@
#define __atomic_release_fence() \
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
-static __always_inline int atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
#ifndef CONFIG_GENERIC_ATOMIC64
#define ATOMIC64_INIT(i) { (i) }
-static __always_inline s64 atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
WRITE_ONCE(v->counter, i);
}
@@ -53,7 +53,7 @@ static __always_inline void atomic64_set(atomic64_t *v, s64 i)
*/
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
-void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " zero, %1, %0" \
@@ -87,7 +87,7 @@ ATOMIC_OPS(xor, xor, i)
*/
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
-c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
+c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
register c_type ret; \
@@ -99,7 +99,7 @@ c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
return ret; \
} \
static __always_inline \
-c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
@@ -112,15 +112,15 @@ c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
static __always_inline \
-c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
+c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
- return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
+ return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
} \
static __always_inline \
-c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
{ \
- return atomic##prefix##_fetch_##op(i, v) c_op I; \
+ return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
}
#ifdef CONFIG_GENERIC_ATOMIC64
@@ -138,26 +138,26 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
ATOMIC_OPS(add, add, +, i)
ATOMIC_OPS(sub, add, +, -i)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_add_return atomic_add_return
-#define atomic_sub_return atomic_sub_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#define atomic_fetch_add atomic_fetch_add
-#define atomic_fetch_sub atomic_fetch_sub
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_add_return atomic64_add_return
-#define atomic64_sub_return atomic64_sub_return
-
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#define atomic64_fetch_add atomic64_fetch_add
-#define atomic64_fetch_sub atomic64_fetch_sub
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#endif
#undef ATOMIC_OPS
@@ -175,20 +175,20 @@ ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or, or, i)
ATOMIC_OPS(xor, xor, i)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#define atomic_fetch_and atomic_fetch_and
-#define atomic_fetch_or atomic_fetch_or
-#define atomic_fetch_xor atomic_fetch_xor
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#define atomic64_fetch_and atomic64_fetch_and
-#define atomic64_fetch_or atomic64_fetch_or
-#define atomic64_fetch_xor atomic64_fetch_xor
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif
#undef ATOMIC_OPS
@@ -197,7 +197,7 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_OP_RETURN
/* This is required to provide a full barrier on success. */
-static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -214,10 +214,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
: "memory");
return prev;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 prev;
long rc;
@@ -235,7 +235,7 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
: "memory");
return prev;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
/*
@@ -244,45 +244,45 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
*/
#define ATOMIC_OP(c_t, prefix, size) \
static __always_inline \
-c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_relaxed(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_acquire(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_release(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_relaxed(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_acquire(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_release(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
+c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
{ \
return __cmpxchg(&(v->counter), o, n, size); \
}
@@ -298,19 +298,19 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
ATOMIC_OPS()
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg_acquire
-#define atomic_xchg_release atomic_xchg_release
-#define atomic_xchg atomic_xchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#define atomic_cmpxchg atomic_cmpxchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#define arch_atomic_xchg arch_atomic_xchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#undef ATOMIC_OPS
#undef ATOMIC_OP
-static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
+static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
{
int prev, rc;
@@ -328,10 +328,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
return prev - offset;
}
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
+static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
{
s64 prev;
long rc;
@@ -350,7 +350,7 @@ static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
return prev - offset;
}
-#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1)
#endif
#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 262e5bbb2776..36dc962f6343 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -37,7 +37,7 @@
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -72,7 +72,7 @@
__ret; \
})
-#define xchg_acquire(ptr, x) \
+#define arch_xchg_acquire(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_acquire((ptr), \
@@ -107,7 +107,7 @@
__ret; \
})
-#define xchg_release(ptr, x) \
+#define arch_xchg_release(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_release((ptr), \
@@ -140,7 +140,7 @@
__ret; \
})
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
@@ -149,13 +149,13 @@
#define xchg32(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- xchg((ptr), (x)); \
+ arch_xchg((ptr), (x)); \
})
#define xchg64(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- xchg((ptr), (x)); \
+ arch_xchg((ptr), (x)); \
})
/*
@@ -199,7 +199,7 @@
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -245,7 +245,7 @@
__ret; \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -291,7 +291,7 @@
__ret; \
})
-#define cmpxchg_release(ptr, o, n) \
+#define arch_cmpxchg_release(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -337,7 +337,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -345,31 +345,31 @@
_o_, _n_, sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
#define cmpxchg32(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg32_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- cmpxchg_relaxed((ptr), (o), (n)) \
+ arch_cmpxchg_relaxed((ptr), (o), (n)) \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
index 1e954101906a..e4e291d40759 100644
--- a/arch/riscv/include/asm/kexec.h
+++ b/arch/riscv/include/asm/kexec.h
@@ -42,8 +42,8 @@ struct kimage_arch {
unsigned long fdt_addr;
};
-const extern unsigned char riscv_kexec_relocate[];
-const extern unsigned int riscv_kexec_relocate_size;
+extern const unsigned char riscv_kexec_relocate[];
+extern const unsigned int riscv_kexec_relocate_size;
typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
unsigned long jump_addr,
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 9469f464e71a..380cd3a7e548 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -30,9 +30,8 @@
#define BPF_JIT_REGION_SIZE (SZ_128M)
#ifdef CONFIG_64BIT
-/* KASLR should leave at least 128MB for BPF after the kernel */
-#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end)
-#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (MODULES_END)
#else
#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END (VMALLOC_END)
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index cc048143fba5..9e99e1db156b 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -14,8 +14,9 @@
#include <asm/set_memory.h> /* For set_memory_x() */
#include <linux/compiler.h> /* For unreachable() */
#include <linux/cpu.h> /* For cpu_down() */
+#include <linux/reboot.h>
-/**
+/*
* kexec_image_info - Print received image details
*/
static void
@@ -39,7 +40,7 @@ kexec_image_info(const struct kimage *image)
}
}
-/**
+/*
* machine_kexec_prepare - Initialize kexec
*
* This function is called from do_kexec_load, when the user has
@@ -100,7 +101,7 @@ machine_kexec_prepare(struct kimage *image)
}
-/**
+/*
* machine_kexec_cleanup - Cleanup any leftovers from
* machine_kexec_prepare
*
@@ -135,7 +136,7 @@ void machine_shutdown(void)
#endif
}
-/**
+/*
* machine_crash_shutdown - Prepare to kexec after a kernel crash
*
* This function is called by crash_kexec just before machine_kexec
@@ -151,7 +152,7 @@ machine_crash_shutdown(struct pt_regs *regs)
pr_info("Starting crashdump kernel...\n");
}
-/**
+/*
* machine_kexec - Jump to the loaded kimage
*
* This function is called by kernel_kexec which is called by the
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 10b965c34536..247e33fa5bc7 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -84,6 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
+#ifdef CONFIG_MMU
void *alloc_insn_page(void)
{
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
@@ -91,6 +92,7 @@ void *alloc_insn_page(void)
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
__builtin_return_address(0));
}
+#endif
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -277,23 +279,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 03901d3a8b02..9a1b7a0603b2 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -231,13 +231,13 @@ static void __init init_resources(void)
/* Clean-up any unused pre-allocated resources */
mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
- memblock_free((phys_addr_t) mem_res, mem_res_sz);
+ memblock_free(__pa(mem_res), mem_res_sz);
return;
error:
/* Better an empty resource tree than an inconsistent one */
release_child_resources(&iomem_resource);
- memblock_free((phys_addr_t) mem_res, mem_res_sz);
+ memblock_free(__pa(mem_res), mem_res_sz);
}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 9a408e2942ac..bd82375db51a 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -180,7 +180,6 @@ asmlinkage __visible void smp_callin(void)
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.
*/
- preempt_disable();
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 2b3e0cb90d78..ff467b98c3e3 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
- } else if (task == NULL || task == current) {
- fp = (unsigned long)__builtin_frame_address(0);
- sp = sp_in_global;
- pc = (unsigned long)walk_stackframe;
+ } else if (task == current) {
+ fp = (unsigned long)__builtin_frame_address(1);
+ sp = (unsigned long)__builtin_frame_address(0);
+ pc = (unsigned long)__builtin_return_address(0);
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc)
return true;
}
-void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl)
{
- pr_cont("%sCall Trace:\n", loglvl);
walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
+ pr_cont("%sCall Trace:\n", loglvl);
dump_backtrace(NULL, task, loglvl);
}
@@ -132,14 +132,14 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && task->state != TASK_RUNNING))
+ if (likely(task && task != current && !task_is_running(task)))
walk_stackframe(task, NULL, save_wchan, &pc);
return pc;
}
#ifdef CONFIG_STACKTRACE
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 0721b9798595..7bc88d8aab97 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -86,8 +86,13 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
}
}
+#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#define __trap_section __section(".xip.traps")
+#else
+#define __trap_section
+#endif
#define DO_ERROR_INFO(name, signo, code, str) \
-asmlinkage __visible void name(struct pt_regs *regs) \
+asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
}
@@ -111,7 +116,7 @@ DO_ERROR_INFO(do_trap_store_misaligned,
int handle_misaligned_load(struct pt_regs *regs);
int handle_misaligned_store(struct pt_regs *regs);
-asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_load(regs))
return;
@@ -119,7 +124,7 @@ asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
"Oops - load address misaligned");
}
-asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_store(regs))
return;
@@ -146,7 +151,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
return GET_INSN_LENGTH(insn);
}
-asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 4b29b9917f99..a3ff09c4c3f9 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -99,9 +99,22 @@ SECTIONS
}
PERCPU_SECTION(L1_CACHE_BYTES)
- . = ALIGN(PAGE_SIZE);
+ . = ALIGN(8);
+ .alternative : {
+ __alt_start = .;
+ *(.alternative)
+ __alt_end = .;
+ }
__init_end = .;
+ . = ALIGN(16);
+ .xip.traps : {
+ __xip_traps_start = .;
+ *(.xip.traps)
+ __xip_traps_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
.sdata : {
__global_pointer$ = . + 0x800;
*(.sdata*)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4faf8bd157ea..4c4c92ce0bb8 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void)
unsigned long init_data_start = (unsigned long)__init_data_begin;
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;
- unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+ unsigned long end_va = kernel_virt_addr + load_sz;
+#else
+ unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#endif
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
/* rodata section is marked readonly in mark_rodata_ro */
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+ set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
}
void mark_rodata_ro(void)
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 9daacae93e33..d7189c8714a9 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -169,7 +169,7 @@ static void __init kasan_shallow_populate(void *start, void *end)
void __init kasan_init(void)
{
- phys_addr_t _start, _end;
+ phys_addr_t p_start, p_end;
u64 i;
/*
@@ -189,9 +189,9 @@ void __init kasan_init(void)
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
/* Populate the linear mapping */
- for_each_mem_range(i, &_start, &_end) {
- void *start = (void *)__va(_start);
- void *end = (void *)__va(_end);
+ for_each_mem_range(i, &p_start, &p_end) {
+ void *start = (void *)__va(p_start);
+ void *end = (void *)__va(p_end);
if (start >= end)
break;
@@ -201,7 +201,7 @@ void __init kasan_init(void)
/* Populate kernel, BPF, modules mapping */
kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
- kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END));
+ kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b4c7c34069f8..a49971647f81 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -117,6 +117,7 @@ config S390
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANTS_NO_INSTR
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
@@ -475,7 +476,7 @@ config NUMA
config NODES_SHIFT
int
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
default "1"
config SCHED_SMT
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 7c93c6573524..7138d189cc42 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -147,6 +147,4 @@ ATOMIC64_OPS(xor)
#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
-#define ARCH_ATOMIC
-
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8925f3969478..9b4473f76e56 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -361,6 +361,7 @@ struct sie_page {
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 exit_userspace;
u64 exit_null;
u64 exit_external_request;
@@ -370,13 +371,7 @@ struct kvm_vcpu_stat {
u64 exit_validity;
u64 exit_instruction;
u64 exit_pei;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
u64 halt_no_poll_steal;
- u64 halt_wakeup;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 instruction_lctl;
u64 instruction_lctlg;
u64 instruction_stctl;
@@ -755,12 +750,12 @@ struct kvm_vcpu_arch {
};
struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
u64 inject_io;
u64 inject_float_mchk;
u64 inject_pfault_done;
u64 inject_service_signal;
u64 inject_virtio;
- u64 remote_tlb_flush;
};
struct kvm_arch_memory_slot {
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index cc98f9b78fd4..479dc76e0eca 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -68,9 +68,9 @@ static inline void copy_page(void *to, void *from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
/*
* These are used to make use of C type-checking..
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 29c7ecd5ad1d..b38f7b781564 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -344,8 +344,6 @@ static inline int is_module_addr(void *addr)
#define PTRS_PER_P4D _CRST_ENTRIES
#define PTRS_PER_PGD _CRST_ENTRIES
-#define MAX_PTRS_PER_P4D PTRS_PER_P4D
-
/*
* Segment table and region3 table entry encoding
* (R = read-only, I = invalid, y = young bit):
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index b49e0492842c..23ff51be7e29 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -32,7 +32,7 @@ static inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+ S390_lowcore.preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
@@ -91,7 +91,7 @@ static inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+ S390_lowcore.preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 8fc52679543d..cb4f73c7228d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -137,7 +137,6 @@ struct slibe {
* @user0: user defineable value
* @res4: reserved paramater
* @user1: user defineable value
- * @user2: user defineable value
*/
struct qaob {
u64 res0[6];
@@ -152,8 +151,7 @@ struct qaob {
u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
u64 user0;
u64 res4[2];
- u64 user1;
- u64 user2;
+ u8 user1[16];
} __attribute__ ((packed, aligned(256)));
/**
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 2b543163d90a..76c6034428be 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -91,12 +91,16 @@ struct stack_frame {
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
-#define CALL_FMT_0 "=&d" (r2) :
-#define CALL_FMT_1 "+&d" (r2) :
-#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
-#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
-#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
-#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
+/*
+ * To keep this simple mark register 2-6 as being changed (volatile)
+ * by the called function, even though register 6 is saved/nonvolatile.
+ */
+#define CALL_FMT_0 "=&d" (r2)
+#define CALL_FMT_1 "+&d" (r2)
+#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
+#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
+#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
+#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5
@@ -118,7 +122,7 @@ struct stack_frame {
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \
- [_stack] "R" (stack), \
+ : [_stack] "R" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_frame] "d" (frame), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 12de7a9c85b3..e84f495e7eb2 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -418,6 +418,7 @@ ENTRY(\name)
xgr %r6,%r6
xgr %r7,%r7
xgr %r10,%r10
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
tm %r8,0x0001 # coming from user space?
@@ -651,9 +652,9 @@ ENDPROC(stack_overflow)
.Lcleanup_sie_mcck:
larl %r13,.Lsie_entry
slgr %r9,%r13
- larl %r13,.Lsie_skip
+ lghi %r13,.Lsie_skip - .Lsie_entry
clgr %r9,%r13
- jh .Lcleanup_sie_int
+ jhe .Lcleanup_sie_int
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
.Lcleanup_sie_int:
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index aae24dc75df6..74b0bd2c24d4 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -446,23 +446,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(p);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (p->fault_handler && p->fault_handler(p, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index e20bed1ed34a..7ae5dde9c54d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -180,7 +180,7 @@ unsigned long get_wchan(struct task_struct *p)
struct unwind_state state;
unsigned long ip = 0;
- if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
+ if (!p || p == current || task_is_running(p) || !task_stack_page(p))
return 0;
if (!try_get_task_stack(p))
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 90163e6184f5..080e7aed181f 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -512,7 +512,6 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
- clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2fec2b80d35d..111909aeb8d2 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -878,7 +878,6 @@ static void smp_init_secondary(void)
restore_access_regs(S390_lowcore.access_regs_save_area);
cpu_init();
rcu_cpu_starting(cpu);
- preempt_disable();
init_cpu_timer();
vtime_init();
vdso_getcpu_init();
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bfcc327acc6b..26aa2614ee35 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -66,7 +66,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
{
static cpumask_t mask;
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_clear(&mask);
+ if (!cpu_online(cpu))
+ goto out;
+ cpumask_set_cpu(cpu, &mask);
switch (topology_mode) {
case TOPOLOGY_MODE_HW:
while (info) {
@@ -83,10 +86,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
default:
fallthrough;
case TOPOLOGY_MODE_SINGLE:
- cpumask_copy(&mask, cpumask_of(cpu));
break;
}
cpumask_and(&mask, &mask, cpu_online_mask);
+out:
cpumask_copy(dst, &mask);
}
@@ -95,7 +98,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
static cpumask_t mask;
int i;
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_clear(&mask);
+ if (!cpu_online(cpu))
+ goto out;
+ cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW)
goto out;
cpu -= cpu % (smp_cpu_mtid + 1);
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 12decca22e7c..b3aaadc60ead 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -4,7 +4,8 @@
# Copyright IBM Corp. 2008
KVM := ../../../virt/kvm
-common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
+common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o \
+ $(KVM)/irqchip.o $(KVM)/vfio.o $(KVM)/binary_stats.o
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 1296fc10f80c..f9fb1e1d960d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -58,112 +58,132 @@
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
(KVM_MAX_VCPUS + LOCAL_IRQS))
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("userspace_handled", exit_userspace),
- VCPU_STAT("exit_null", exit_null),
- VCPU_STAT("pfault_sync", pfault_sync),
- VCPU_STAT("exit_validity", exit_validity),
- VCPU_STAT("exit_stop_request", exit_stop_request),
- VCPU_STAT("exit_external_request", exit_external_request),
- VCPU_STAT("exit_io_request", exit_io_request),
- VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
- VCPU_STAT("exit_instruction", exit_instruction),
- VCPU_STAT("exit_pei", exit_pei),
- VCPU_STAT("exit_program_interruption", exit_program_interruption),
- VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
- VCPU_STAT("exit_operation_exception", exit_operation_exception),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("instruction_lctlg", instruction_lctlg),
- VCPU_STAT("instruction_lctl", instruction_lctl),
- VCPU_STAT("instruction_stctl", instruction_stctl),
- VCPU_STAT("instruction_stctg", instruction_stctg),
- VCPU_STAT("deliver_ckc", deliver_ckc),
- VCPU_STAT("deliver_cputm", deliver_cputm),
- VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
- VCPU_STAT("deliver_external_call", deliver_external_call),
- VCPU_STAT("deliver_service_signal", deliver_service_signal),
- VCPU_STAT("deliver_virtio", deliver_virtio),
- VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
- VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
- VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
- VCPU_STAT("deliver_program", deliver_program),
- VCPU_STAT("deliver_io", deliver_io),
- VCPU_STAT("deliver_machine_check", deliver_machine_check),
- VCPU_STAT("exit_wait_state", exit_wait_state),
- VCPU_STAT("inject_ckc", inject_ckc),
- VCPU_STAT("inject_cputm", inject_cputm),
- VCPU_STAT("inject_external_call", inject_external_call),
- VM_STAT("inject_float_mchk", inject_float_mchk),
- VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
- VM_STAT("inject_io", inject_io),
- VCPU_STAT("inject_mchk", inject_mchk),
- VM_STAT("inject_pfault_done", inject_pfault_done),
- VCPU_STAT("inject_program", inject_program),
- VCPU_STAT("inject_restart", inject_restart),
- VM_STAT("inject_service_signal", inject_service_signal),
- VCPU_STAT("inject_set_prefix", inject_set_prefix),
- VCPU_STAT("inject_stop_signal", inject_stop_signal),
- VCPU_STAT("inject_pfault_init", inject_pfault_init),
- VM_STAT("inject_virtio", inject_virtio),
- VCPU_STAT("instruction_epsw", instruction_epsw),
- VCPU_STAT("instruction_gs", instruction_gs),
- VCPU_STAT("instruction_io_other", instruction_io_other),
- VCPU_STAT("instruction_lpsw", instruction_lpsw),
- VCPU_STAT("instruction_lpswe", instruction_lpswe),
- VCPU_STAT("instruction_pfmf", instruction_pfmf),
- VCPU_STAT("instruction_ptff", instruction_ptff),
- VCPU_STAT("instruction_stidp", instruction_stidp),
- VCPU_STAT("instruction_sck", instruction_sck),
- VCPU_STAT("instruction_sckpf", instruction_sckpf),
- VCPU_STAT("instruction_spx", instruction_spx),
- VCPU_STAT("instruction_stpx", instruction_stpx),
- VCPU_STAT("instruction_stap", instruction_stap),
- VCPU_STAT("instruction_iske", instruction_iske),
- VCPU_STAT("instruction_ri", instruction_ri),
- VCPU_STAT("instruction_rrbe", instruction_rrbe),
- VCPU_STAT("instruction_sske", instruction_sske),
- VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
- VCPU_STAT("instruction_essa", instruction_essa),
- VCPU_STAT("instruction_stsi", instruction_stsi),
- VCPU_STAT("instruction_stfl", instruction_stfl),
- VCPU_STAT("instruction_tb", instruction_tb),
- VCPU_STAT("instruction_tpi", instruction_tpi),
- VCPU_STAT("instruction_tprot", instruction_tprot),
- VCPU_STAT("instruction_tsch", instruction_tsch),
- VCPU_STAT("instruction_sthyi", instruction_sthyi),
- VCPU_STAT("instruction_sie", instruction_sie),
- VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
- VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
- VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
- VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
- VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
- VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
- VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
- VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
- VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
- VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
- VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
- VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
- VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
- VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
- VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
- VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
- VCPU_STAT("instruction_diag_10", diagnose_10),
- VCPU_STAT("instruction_diag_44", diagnose_44),
- VCPU_STAT("instruction_diag_9c", diagnose_9c),
- VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
- VCPU_STAT("diag_9c_forward", diagnose_9c_forward),
- VCPU_STAT("instruction_diag_258", diagnose_258),
- VCPU_STAT("instruction_diag_308", diagnose_308),
- VCPU_STAT("instruction_diag_500", diagnose_500),
- VCPU_STAT("instruction_diag_other", diagnose_other),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_COUNTER(VM, inject_io),
+ STATS_DESC_COUNTER(VM, inject_float_mchk),
+ STATS_DESC_COUNTER(VM, inject_pfault_done),
+ STATS_DESC_COUNTER(VM, inject_service_signal),
+ STATS_DESC_COUNTER(VM, inject_virtio)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, exit_userspace),
+ STATS_DESC_COUNTER(VCPU, exit_null),
+ STATS_DESC_COUNTER(VCPU, exit_external_request),
+ STATS_DESC_COUNTER(VCPU, exit_io_request),
+ STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
+ STATS_DESC_COUNTER(VCPU, exit_stop_request),
+ STATS_DESC_COUNTER(VCPU, exit_validity),
+ STATS_DESC_COUNTER(VCPU, exit_instruction),
+ STATS_DESC_COUNTER(VCPU, exit_pei),
+ STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
+ STATS_DESC_COUNTER(VCPU, instruction_lctl),
+ STATS_DESC_COUNTER(VCPU, instruction_lctlg),
+ STATS_DESC_COUNTER(VCPU, instruction_stctl),
+ STATS_DESC_COUNTER(VCPU, instruction_stctg),
+ STATS_DESC_COUNTER(VCPU, exit_program_interruption),
+ STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
+ STATS_DESC_COUNTER(VCPU, exit_operation_exception),
+ STATS_DESC_COUNTER(VCPU, deliver_ckc),
+ STATS_DESC_COUNTER(VCPU, deliver_cputm),
+ STATS_DESC_COUNTER(VCPU, deliver_external_call),
+ STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_service_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_virtio),
+ STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_program),
+ STATS_DESC_COUNTER(VCPU, deliver_io),
+ STATS_DESC_COUNTER(VCPU, deliver_machine_check),
+ STATS_DESC_COUNTER(VCPU, exit_wait_state),
+ STATS_DESC_COUNTER(VCPU, inject_ckc),
+ STATS_DESC_COUNTER(VCPU, inject_cputm),
+ STATS_DESC_COUNTER(VCPU, inject_external_call),
+ STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
+ STATS_DESC_COUNTER(VCPU, inject_mchk),
+ STATS_DESC_COUNTER(VCPU, inject_pfault_init),
+ STATS_DESC_COUNTER(VCPU, inject_program),
+ STATS_DESC_COUNTER(VCPU, inject_restart),
+ STATS_DESC_COUNTER(VCPU, inject_set_prefix),
+ STATS_DESC_COUNTER(VCPU, inject_stop_signal),
+ STATS_DESC_COUNTER(VCPU, instruction_epsw),
+ STATS_DESC_COUNTER(VCPU, instruction_gs),
+ STATS_DESC_COUNTER(VCPU, instruction_io_other),
+ STATS_DESC_COUNTER(VCPU, instruction_lpsw),
+ STATS_DESC_COUNTER(VCPU, instruction_lpswe),
+ STATS_DESC_COUNTER(VCPU, instruction_pfmf),
+ STATS_DESC_COUNTER(VCPU, instruction_ptff),
+ STATS_DESC_COUNTER(VCPU, instruction_sck),
+ STATS_DESC_COUNTER(VCPU, instruction_sckpf),
+ STATS_DESC_COUNTER(VCPU, instruction_stidp),
+ STATS_DESC_COUNTER(VCPU, instruction_spx),
+ STATS_DESC_COUNTER(VCPU, instruction_stpx),
+ STATS_DESC_COUNTER(VCPU, instruction_stap),
+ STATS_DESC_COUNTER(VCPU, instruction_iske),
+ STATS_DESC_COUNTER(VCPU, instruction_ri),
+ STATS_DESC_COUNTER(VCPU, instruction_rrbe),
+ STATS_DESC_COUNTER(VCPU, instruction_sske),
+ STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
+ STATS_DESC_COUNTER(VCPU, instruction_stsi),
+ STATS_DESC_COUNTER(VCPU, instruction_stfl),
+ STATS_DESC_COUNTER(VCPU, instruction_tb),
+ STATS_DESC_COUNTER(VCPU, instruction_tpi),
+ STATS_DESC_COUNTER(VCPU, instruction_tprot),
+ STATS_DESC_COUNTER(VCPU, instruction_tsch),
+ STATS_DESC_COUNTER(VCPU, instruction_sie),
+ STATS_DESC_COUNTER(VCPU, instruction_essa),
+ STATS_DESC_COUNTER(VCPU, instruction_sthyi),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
+ STATS_DESC_COUNTER(VCPU, diagnose_10),
+ STATS_DESC_COUNTER(VCPU, diagnose_44),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
+ STATS_DESC_COUNTER(VCPU, diagnose_258),
+ STATS_DESC_COUNTER(VCPU, diagnose_308),
+ STATS_DESC_COUNTER(VCPU, diagnose_500),
+ STATS_DESC_COUNTER(VCPU, diagnose_other),
+ STATS_DESC_COUNTER(VCPU, pfault_sync)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
/* allow nested virtualization in KVM (if enabled by user space) */
@@ -329,31 +349,31 @@ static void allow_cpu_feat(unsigned long nr)
static inline int plo_test_bit(unsigned char nr)
{
- register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
+ unsigned long function = (unsigned long)nr | 0x100;
int cc;
asm volatile(
+ " lgr 0,%[function]\n"
/* Parameter registers are ignored for "test bit" */
" plo 0,0,0,0(0)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
- : "d" (r0)
- : "cc");
+ : [function] "d" (function)
+ : "cc", "0");
return cc == 0;
}
static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
{
- register unsigned long r0 asm("0") = 0; /* query function */
- register unsigned long r1 asm("1") = (unsigned long) query;
-
asm volatile(
- /* Parameter regs are ignored */
+ " lghi 0,0\n"
+ " lgr 1,%[query]\n"
+ /* Parameter registers are ignored */
" .insn rrf,%[opc] << 16,2,4,6,0\n"
:
- : "d" (r0), "a" (r1), [opc] "i" (opcode)
- : "cc", "memory");
+ : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
+ : "cc", "memory", "0", "1");
}
#define INSN_SORTL 0xb938
@@ -713,6 +733,10 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
set_kvm_facility(kvm->arch.model.fac_mask, 152);
set_kvm_facility(kvm->arch.model.fac_list, 152);
}
+ if (test_facility(192)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 192);
+ set_kvm_facility(kvm->arch.model.fac_list, 192);
+ }
r = 0;
} else
r = -EINVAL;
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 813b6e93dc83..c8841f476e91 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -140,7 +140,12 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
/* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len;
- kvm->arch.pv.stor_var = vzalloc(vlen);
+ /*
+ * The Create Secure Configuration Ultravisor Call does not support
+ * using large pages for the virtual memory area.
+ * This is a hardware limitation.
+ */
+ kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
if (!kvm->arch.pv.stor_var)
goto out_err;
return 0;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 826d01777361..8ae3dc5783fd 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -702,7 +702,7 @@ static void pfault_interrupt(struct ext_code ext_code,
* interrupt since it must be a leftover of a PFAULT
* CANCEL operation which didn't remove all pending
* completion interrupts. */
- if (tsk->state == TASK_RUNNING)
+ if (task_is_running(tsk))
tsk->thread.pfault_wait = -1;
}
} else {
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 61ce5b59b828..606324e56e4e 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -115,6 +115,10 @@ static struct facility_def facility_defs[] = {
12, /* AP Query Configuration Information */
15, /* AP Facilities Test */
156, /* etoken facility */
+ 165, /* nnpa facility */
+ 193, /* bear enhancement facility */
+ 194, /* rdp enhancement facility */
+ 196, /* processor activity instrumentation facility */
-1 /* END */
}
},
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index aace62d42288..059791fd394f 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -3,7 +3,7 @@
#define __ASM_SH_ATOMIC_GRB_H
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -23,7 +23,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -45,7 +45,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int res, tmp; \
\
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index ee523bd2120f..7665de9d00d0 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -11,7 +11,7 @@
*/
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -21,7 +21,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
@@ -35,7 +35,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 1d06e4d288dc..b63dcfbfa14e 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -17,7 +17,7 @@
*/
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
\
@@ -32,7 +32,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp; \
\
@@ -50,7 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long res, temp; \
\
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 7c2a8a703b9a..528bfeda78f5 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -19,8 +19,8 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h>
@@ -30,8 +30,8 @@
#include <asm/atomic-irq.h>
#endif
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#endif /* CONFIG_CPU_J2 */
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h
index e9501d85c278..0ed9b3f4a577 100644
--- a/arch/sh/include/asm/cmpxchg.h
+++ b/arch/sh/include/asm/cmpxchg.h
@@ -45,7 +45,7 @@ extern void __xchg_called_with_bad_pointer(void);
__xchg__res; \
})
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error
@@ -63,7 +63,7 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 6552a088dc97..7b8dead2723d 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -2,7 +2,7 @@
#ifndef __ASM_SH_MMZONE_H
#define __ASM_SH_MMZONE_H
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
#include <linux/numa.h>
extern struct pglist_data *node_data[];
@@ -31,7 +31,7 @@ static inline void
setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
}
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
/* Platform specific mem init */
void __init plat_mem_setup(void);
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 756100b01e84..1c7f358ef0be 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -383,23 +383,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 1aa508eb0823..717de05c81f4 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -186,7 +186,7 @@ unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 372acdc9033e..65924d9ec245 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -186,8 +186,6 @@ asmlinkage void start_secondary(void)
per_cpu_trap_init();
- preempt_disable();
-
notify_cpu_starting(cpu);
local_irq_enable();
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 7a989eed3b18..76af6db9daa2 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -46,7 +46,7 @@ static int __init topology_init(void)
{
int i, ret;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
for_each_online_node(i)
register_one_node(i);
#endif
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index d551a9cac41e..ba569cfb4368 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -120,7 +120,7 @@ config NODES_SHIFT
int
default "3" if CPU_SUBTYPE_SHX3
default "1"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config ARCH_FLATMEM_ENABLE
def_bool y
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 168d7d4dd735..ce26c7f8950a 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -211,7 +211,7 @@ void __init allocate_pgdat(unsigned int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
NODE_DATA(nid) = memblock_alloc_try_nid(
sizeof(struct pglist_data),
SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 164a5254c91c..c72f52c704cd 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -265,7 +265,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 4 5 if SPARC64
default "5"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index efad5532f169..d775daa83d12 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -18,30 +18,30 @@
#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
-int atomic_add_return(int, atomic_t *);
-int atomic_fetch_add(int, atomic_t *);
-int atomic_fetch_and(int, atomic_t *);
-int atomic_fetch_or(int, atomic_t *);
-int atomic_fetch_xor(int, atomic_t *);
-int atomic_cmpxchg(atomic_t *, int, int);
-int atomic_xchg(atomic_t *, int);
-int atomic_fetch_add_unless(atomic_t *, int, int);
-void atomic_set(atomic_t *, int);
+int arch_atomic_add_return(int, atomic_t *);
+int arch_atomic_fetch_add(int, atomic_t *);
+int arch_atomic_fetch_and(int, atomic_t *);
+int arch_atomic_fetch_or(int, atomic_t *);
+int arch_atomic_fetch_xor(int, atomic_t *);
+int arch_atomic_cmpxchg(atomic_t *, int, int);
+int arch_atomic_xchg(atomic_t *, int);
+int arch_atomic_fetch_add_unless(atomic_t *, int, int);
+void arch_atomic_set(atomic_t *, int);
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
-#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_add(i, v) ((void)arch_atomic_add_return( (int)(i), (v)))
+#define arch_atomic_sub(i, v) ((void)arch_atomic_add_return(-(int)(i), (v)))
-#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
-#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
-#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v)))
+#define arch_atomic_and(i, v) ((void)arch_atomic_fetch_and((i), (v)))
+#define arch_atomic_or(i, v) ((void)arch_atomic_fetch_or((i), (v)))
+#define arch_atomic_xor(i, v) ((void)arch_atomic_fetch_xor((i), (v)))
-#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
-#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
+#define arch_atomic_sub_return(i, v) (arch_atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_fetch_sub(i, v) (arch_atomic_fetch_add (-(int)(i), (v)))
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 6b235d3d1d9d..077891686715 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,23 +14,23 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op) \
-void atomic_##op(int, atomic_t *); \
-void atomic64_##op(s64, atomic64_t *);
+void arch_atomic_##op(int, atomic_t *); \
+void arch_atomic64_##op(s64, atomic64_t *);
#define ATOMIC_OP_RETURN(op) \
-int atomic_##op##_return(int, atomic_t *); \
-s64 atomic64_##op##_return(s64, atomic64_t *);
+int arch_atomic_##op##_return(int, atomic_t *); \
+s64 arch_atomic64_##op##_return(s64, atomic64_t *);
#define ATOMIC_FETCH_OP(op) \
-int atomic_fetch_##op(int, atomic_t *); \
-s64 atomic64_fetch_##op(s64, atomic64_t *);
+int arch_atomic_fetch_##op(int, atomic_t *); \
+s64 arch_atomic64_fetch_##op(s64, atomic64_t *);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
@@ -49,18 +49,18 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
{
- return xchg(&v->counter, new);
+ return arch_xchg(&v->counter, new);
}
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+s64 arch_atomic64_dec_if_positive(atomic64_t *v);
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index a53d744d4212..27a57a3a7597 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x;
}
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
@@ -55,7 +55,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -64,7 +64,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
})
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
-#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
+#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h>
@@ -72,9 +72,9 @@ u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index 316faa0130ba..8c39a9981187 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -52,7 +52,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val;
}
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
@@ -168,7 +168,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -189,20 +189,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4:
case 8: return __cmpxchg(ptr, old, new, size);
default:
- return __cmpxchg_local_generic(ptr, old, new, size);
+ return __generic_cmpxchg_local(ptr, old, new, size);
}
return old;
}
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC64_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h
index 6543fb97a849..a236d8aa893a 100644
--- a/arch/sparc/include/asm/mmzone.h
+++ b/arch/sparc/include/asm/mmzone.h
@@ -2,7 +2,7 @@
#ifndef _SPARC64_MMZONE_H
#define _SPARC64_MMZONE_H
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
#include <linux/cpumask.h>
@@ -13,6 +13,6 @@ extern struct pglist_data *node_data[];
extern int numa_cpu_lookup_table[];
extern cpumask_t numa_cpumask_lookup_table[];
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
#endif /* _SPARC64_MMZONE_H */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 848a22fbac20..92675dc380fa 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -120,6 +120,8 @@
#define SO_PREFER_BUSY_POLL 0x0048
#define SO_BUSY_POLL_BUDGET 0x0049
+#define SO_NETNS_COOKIE 0x0050
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 217c21a6986a..4c05a4ee6a0e 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -346,23 +346,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 3b9794978e5b..93983d6d431d 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -376,8 +376,7 @@ unsigned long get_wchan(struct task_struct *task)
struct reg_window32 *rw;
int count = 0;
- if (!task || task == current ||
- task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
fp = task_thread_info(task)->ksp + bias;
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 7afd0a859a78..d33c58a58d4f 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -674,8 +674,7 @@ unsigned long get_wchan(struct task_struct *task)
unsigned long ret = 0;
int count = 0;
- if (!task || task == current ||
- task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
tp = task_thread_info(task);
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 50c127ab46d5..22b148e5a5f8 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg)
*/
arch_cpu_pre_starting(arg);
- preempt_disable();
cpu = smp_processor_id();
notify_cpu_starting(cpu);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index e38d8bf454e8..0224d8f19ed6 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -138,9 +138,6 @@ void smp_callin(void)
set_cpu_online(cpuid, true);
- /* idle thread is expected to have preempt disabled */
- preempt_disable();
-
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
@@ -1546,7 +1543,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
size_t align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = cpu_to_node(cpu);
void *ptr;
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 281fa634bb1a..8b81d0f00c97 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */
#define ATOMIC_FETCH_OP(op, c_op) \
-int atomic_fetch_##op(int i, atomic_t *v) \
+int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -41,10 +41,10 @@ int atomic_fetch_##op(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_fetch_##op);
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
#define ATOMIC_OP_RETURN(op, c_op) \
-int atomic_##op##_return(int i, atomic_t *v) \
+int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -55,7 +55,7 @@ int atomic_##op##_return(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_##op##_return);
+EXPORT_SYMBOL(arch_atomic_##op##_return);
ATOMIC_OP_RETURN(add, +=)
@@ -67,7 +67,7 @@ ATOMIC_FETCH_OP(xor, ^=)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-int atomic_xchg(atomic_t *v, int new)
+int arch_atomic_xchg(atomic_t *v, int new)
{
int ret;
unsigned long flags;
@@ -78,9 +78,9 @@ int atomic_xchg(atomic_t *v, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_xchg);
+EXPORT_SYMBOL(arch_atomic_xchg);
-int atomic_cmpxchg(atomic_t *v, int old, int new)
+int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -93,9 +93,9 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_cmpxchg);
+EXPORT_SYMBOL(arch_atomic_cmpxchg);
-int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
@@ -107,10 +107,10 @@ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_fetch_add_unless);
+EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
/* Atomic operations are already serializing */
-void atomic_set(atomic_t *v, int i)
+void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
@@ -118,7 +118,7 @@ void atomic_set(atomic_t *v, int i)
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
-EXPORT_SYMBOL(atomic_set);
+EXPORT_SYMBOL(arch_atomic_set);
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 456b65a30ecf..8245d4a97301 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -19,7 +19,7 @@
*/
#define ATOMIC_OP(op) \
-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -30,11 +30,11 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op); \
-EXPORT_SYMBOL(atomic_##op);
+ENDPROC(arch_atomic_##op); \
+EXPORT_SYMBOL(arch_atomic_##op);
#define ATOMIC_OP_RETURN(op) \
-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -45,11 +45,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op##_return); \
-EXPORT_SYMBOL(atomic_##op##_return);
+ENDPROC(arch_atomic_##op##_return); \
+EXPORT_SYMBOL(arch_atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \
-ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -60,8 +60,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_fetch_##op); \
-EXPORT_SYMBOL(atomic_fetch_##op);
+ENDPROC(arch_atomic_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
ATOMIC_OP(add)
ATOMIC_OP_RETURN(add)
@@ -85,7 +85,7 @@ ATOMIC_FETCH_OP(xor)
#undef ATOMIC_OP
#define ATOMIC64_OP(op) \
-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -96,11 +96,11 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op); \
-EXPORT_SYMBOL(atomic64_##op);
+ENDPROC(arch_atomic64_##op); \
+EXPORT_SYMBOL(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \
-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -111,11 +111,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op##_return); \
-EXPORT_SYMBOL(atomic64_##op##_return);
+ENDPROC(arch_atomic64_##op##_return); \
+EXPORT_SYMBOL(arch_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \
-ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -126,8 +126,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_fetch_##op); \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+ENDPROC(arch_atomic64_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic64_fetch_##op);
ATOMIC64_OP(add)
ATOMIC64_OP_RETURN(add)
@@ -150,7 +150,7 @@ ATOMIC64_FETCH_OP(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o0], %g1
brlez,pn %g1, 3f
@@ -162,5 +162,5 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
3: retl
sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_dec_if_positive)
-EXPORT_SYMBOL(atomic64_dec_if_positive)
+ENDPROC(arch_atomic64_dec_if_positive)
+EXPORT_SYMBOL(arch_atomic64_dec_if_positive)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index e454f179cf5d..06e938d03f3b 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -903,7 +903,7 @@ struct node_mem_mask {
static struct node_mem_mask node_masks[MAX_NUMNODES];
static int num_node_masks;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
struct mdesc_mlgroup {
u64 node;
@@ -1059,7 +1059,7 @@ static void __init allocate_node_data(int nid)
{
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
SMP_CACHE_BYTES, nid);
@@ -1080,7 +1080,7 @@ static void __init allocate_node_data(int nid)
static void init_node_masks_nonnuma(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int i;
#endif
@@ -1090,7 +1090,7 @@ static void init_node_masks_nonnuma(void)
node_masks[0].match = 0;
num_node_masks = 1;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
for (i = 0; i < NR_CPUS; i++)
numa_cpu_lookup_table[i] = 0;
@@ -1098,7 +1098,7 @@ static void init_node_masks_nonnuma(void)
#endif
}
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
@@ -2487,7 +2487,7 @@ int page_in_phys_avail(unsigned long paddr)
static void __init register_page_bootmem_info(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int i;
for_each_online_node(i)
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index c5011064b5dd..457a38db368b 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -369,7 +369,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page, sp, ip;
bool seen_sched = 0;
- if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
+ if ((p == NULL) || (p == current) || task_is_running(p))
return 0;
stack_page = (unsigned long) task_stack_page(p);
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 3d91f89fd852..9ee19e566da3 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -41,7 +41,7 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
- if (syscall >= 0 && syscall <= __NR_syscall_max)
+ if (syscall >= 0 && syscall < __NR_syscalls)
PT_REGS_SET_SYSCALL_RETURN(regs,
EXECUTE_SYSCALL(syscall, regs));
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0045e1b44190..867e7936dbc5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -103,8 +103,8 @@ config X86
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
- select ARCH_SUPPORTS_LTO_CLANG if X86_64
- select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64
+ select ARCH_SUPPORTS_LTO_CLANG
+ select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
@@ -113,6 +113,7 @@ config X86
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANTS_NO_INSTR
select ARCH_WANT_HUGE_PMD_SHARE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_THP_SWAP if X86_64
@@ -1597,7 +1598,7 @@ config NODES_SHIFT
default "10" if MAXSMP
default "6" if X86_64
default "3"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
@@ -1693,35 +1694,6 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
Set whether the default state of memory_corruption_check is
on or off.
-config X86_RESERVE_LOW
- int "Amount of low memory, in kilobytes, to reserve for the BIOS"
- default 64
- range 4 640
- help
- Specify the amount of low memory to reserve for the BIOS.
-
- The first page contains BIOS data structures that the kernel
- must not use, so that page must always be reserved.
-
- By default we reserve the first 64K of physical RAM, as a
- number of BIOSes are known to corrupt that memory range
- during events such as suspend/resume or monitor cable
- insertion, so it must not be used by the kernel.
-
- You can set this to 4 if you are absolutely sure that you
- trust the BIOS to get all its memory reservations and usages
- right. If you know your BIOS have problems beyond the
- default 64K area, you can set this to 640 to avoid using the
- entire low memory range.
-
- If you have doubts about the BIOS (e.g. suspend/resume does
- not work or there's kernel crashes after certain hardware
- hotplug events) then you might want to enable
- X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check
- typical corruption patterns.
-
- Leave this to the default value of 64 if you are unsure.
-
config MATH_EMULATION
bool
depends on MODIFY_LDT_SYSCALL
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 307529417021..53eceaf71ab7 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -200,8 +200,9 @@ endif
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
- -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
endif
ifdef CONFIG_X86_NEED_RELOCS
@@ -256,7 +257,7 @@ drivers-$(CONFIG_FB) += arch/x86/video/
boot := arch/x86/boot
-BOOT_TARGETS = bzdisk fdimage fdimage144 fdimage288 isoimage
+BOOT_TARGETS = bzdisk fdimage fdimage144 fdimage288 hdimage isoimage
PHONY += bzImage $(BOOT_TARGETS)
@@ -314,8 +315,9 @@ define archhelp
echo ' fdimage - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
echo ' fdimage144 - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
echo ' fdimage288 - Create 2.8MB boot floppy image (arch/x86/boot/fdimage)'
+ echo ' hdimage - Create a BIOS/EFI hard disk image (arch/x86/boot/hdimage)'
echo ' isoimage - Create a boot CD-ROM image (arch/x86/boot/image.iso)'
- echo ' bzdisk/fdimage*/isoimage also accept:'
+ echo ' bzdisk/fdimage*/hdimage/isoimage also accept:'
echo ' FDARGS="..." arguments for the booted kernel'
echo ' FDINITRD=file initrd for the booted kernel'
echo ''
diff --git a/arch/x86/boot/.gitignore b/arch/x86/boot/.gitignore
index 9cc7f1357b9b..1189be057ebd 100644
--- a/arch/x86/boot/.gitignore
+++ b/arch/x86/boot/.gitignore
@@ -11,3 +11,4 @@ setup.elf
fdimage
mtools.conf
image.iso
+hdimage
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index fe605205b4ce..dfbc26a8e924 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -29,7 +29,7 @@ KCOV_INSTRUMENT := n
SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
targets := vmlinux.bin setup.bin setup.elf bzImage
-targets += fdimage fdimage144 fdimage288 image.iso mtools.conf
+targets += fdimage fdimage144 fdimage288 image.iso hdimage
subdir- := compressed
setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o
@@ -115,47 +115,49 @@ $(obj)/compressed/vmlinux: FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
# Set this if you want to pass append arguments to the
-# bzdisk/fdimage/isoimage kernel
+# bzdisk/fdimage/hdimage/isoimage kernel
FDARGS =
-# Set this if you want an initrd included with the
-# bzdisk/fdimage/isoimage kernel
+# Set this if you want one or more initrds included in the image
FDINITRD =
-image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
+imgdeps = $(obj)/bzImage $(obj)/mtools.conf $(src)/genimage.sh
$(obj)/mtools.conf: $(src)/mtools.conf.in
sed -e 's|@OBJ@|$(obj)|g' < $< > $@
+targets += mtools.conf
+
+# genimage.sh requires bash, but it also has a bunch of other
+# external dependencies.
quiet_cmd_genimage = GENIMAGE $3
-cmd_genimage = sh $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
- $(obj)/mtools.conf '$(image_cmdline)' $(FDINITRD)
+cmd_genimage = $(BASH) $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
+ $(obj)/mtools.conf '$(FDARGS)' $(FDINITRD)
-PHONY += bzdisk fdimage fdimage144 fdimage288 isoimage bzlilo install
+PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage install
# This requires write access to /dev/fd0
-bzdisk: $(obj)/bzImage $(obj)/mtools.conf
+# All images require syslinux to be installed; hdimage also requires
+# EDK2/OVMF if the kernel is compiled with the EFI stub.
+bzdisk: $(imgdeps)
$(call cmd,genimage,bzdisk,/dev/fd0)
-# These require being root or having syslinux 2.02 or higher installed
-fdimage fdimage144: $(obj)/bzImage $(obj)/mtools.conf
+fdimage fdimage144: $(imgdeps)
$(call cmd,genimage,fdimage144,$(obj)/fdimage)
@$(kecho) 'Kernel: $(obj)/fdimage is ready'
-fdimage288: $(obj)/bzImage $(obj)/mtools.conf
+fdimage288: $(imgdeps)
$(call cmd,genimage,fdimage288,$(obj)/fdimage)
@$(kecho) 'Kernel: $(obj)/fdimage is ready'
-isoimage: $(obj)/bzImage
+hdimage: $(imgdeps)
+ $(call cmd,genimage,hdimage,$(obj)/hdimage)
+ @$(kecho) 'Kernel: $(obj)/hdimage is ready'
+
+isoimage: $(imgdeps)
$(call cmd,genimage,isoimage,$(obj)/image.iso)
@$(kecho) 'Kernel: $(obj)/image.iso is ready'
-bzlilo:
- if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
- if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
- cat $(obj)/bzImage > $(INSTALL_PATH)/vmlinuz
- cp System.map $(INSTALL_PATH)/
- if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
-
install:
- sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh \
+ $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
index 6a10d52a4145..0673fdfc1a11 100644
--- a/arch/x86/boot/genimage.sh
+++ b/arch/x86/boot/genimage.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
@@ -8,15 +8,24 @@
#
# Adapted from code in arch/x86/boot/Makefile by H. Peter Anvin and others
#
-# "make fdimage/fdimage144/fdimage288/isoimage" script for x86 architecture
+# "make fdimage/fdimage144/fdimage288/hdimage/isoimage"
+# script for x86 architecture
#
# Arguments:
-# $1 - fdimage format
-# $2 - target image file
-# $3 - kernel bzImage file
-# $4 - mtool configuration file
-# $5 - kernel cmdline
-# $6 - inird image file
+# $1 - fdimage format
+# $2 - target image file
+# $3 - kernel bzImage file
+# $4 - mtools configuration file
+# $5 - kernel cmdline
+# $6+ - initrd image file(s)
+#
+# This script requires:
+# bash
+# syslinux
+# mtools (for fdimage* and hdimage)
+# edk2/OVMF (for hdimage)
+#
+# Otherwise try to stick to POSIX shell commands...
#
# Use "make V=1" to debug this script
@@ -26,105 +35,237 @@ case "${KBUILD_VERBOSE}" in
;;
esac
-verify () {
- if [ ! -f "$1" ]; then
- echo "" 1>&2
- echo " *** Missing file: $1" 1>&2
- echo "" 1>&2
- exit 1
+# Exit the top-level shell with an error
+topshell=$$
+trap 'exit 1' USR1
+die() {
+ echo "" 1>&2
+ echo " *** $*" 1>&2
+ echo "" 1>&2
+ kill -USR1 $topshell
+}
+
+# Verify the existence and readability of a file
+verify() {
+ if [ ! -f "$1" -o ! -r "$1" ]; then
+ die "Missing file: $1"
fi
}
+diskfmt="$1"
+FIMAGE="$2"
+FBZIMAGE="$3"
+MTOOLSRC="$4"
+KCMDLINE="$5"
+shift 5 # Remaining arguments = initrd files
+
+export MTOOLSRC
-export MTOOLSRC=$4
-FIMAGE=$2
-FBZIMAGE=$3
-KCMDLINE=$5
-FDINITRD=$6
+# common options for dd
+dd='dd iflag=fullblock'
# Make sure the files actually exist
verify "$FBZIMAGE"
-genbzdisk() {
- verify "$MTOOLSRC"
- mformat a:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - a:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" a:initrd.img
+declare -a FDINITRDS
+irdpfx=' initrd='
+initrdopts_syslinux=''
+initrdopts_efi=''
+for f in "$@"; do
+ if [ -f "$f" -a -r "$f" ]; then
+ FDINITRDS=("${FDINITRDS[@]}" "$f")
+ fname="$(basename "$f")"
+ initrdopts_syslinux="${initrdopts_syslinux}${irdpfx}${fname}"
+ irdpfx=,
+ initrdopts_efi="${initrdopts_efi} initrd=${fname}"
fi
- mcopy $FBZIMAGE a:linux
+done
+
+# Read a $3-byte littleendian unsigned value at offset $2 from file $1
+le() {
+ local n=0
+ local m=1
+ for b in $(od -A n -v -j $2 -N $3 -t u1 "$1"); do
+ n=$((n + b*m))
+ m=$((m * 256))
+ done
+ echo $n
}
-genfdimage144() {
- verify "$MTOOLSRC"
- dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null
- mformat v:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - v:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" v:initrd.img
- fi
- mcopy $FBZIMAGE v:linux
+# Get the EFI architecture name such that boot{name}.efi is the default
+# boot file name. Returns false with no output if the file is not an
+# EFI image or otherwise unknown.
+efiarch() {
+ [ -f "$1" ] || return
+ [ $(le "$1" 0 2) -eq 23117 ] || return # MZ magic
+ peoffs=$(le "$1" 60 4) # PE header offset
+ [ $peoffs -ge 64 ] || return
+ [ $(le "$1" $peoffs 4) -eq 17744 ] || return # PE magic
+ case $(le "$1" $((peoffs+4+20)) 2) in # PE type
+ 267) ;; # PE32
+ 523) ;; # PE32+
+ *) return 1 ;; # Invalid
+ esac
+ [ $(le "$1" $((peoffs+4+20+68)) 2) -eq 10 ] || return # EFI app
+ case $(le "$1" $((peoffs+4)) 2) in # Machine type
+ 332) echo i386 ;;
+ 450) echo arm ;;
+ 512) echo ia64 ;;
+ 20530) echo riscv32 ;;
+ 20580) echo riscv64 ;;
+ 20776) echo riscv128 ;;
+ 34404) echo x64 ;;
+ 43620) echo aa64 ;;
+ esac
}
-genfdimage288() {
- verify "$MTOOLSRC"
- dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null
- mformat w:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - W:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" w:initrd.img
- fi
- mcopy $FBZIMAGE w:linux
+# Get the combined sizes in bytes of the files given, counting sparse
+# files as full length, and padding each file to a 4K block size
+filesizes() {
+ local t=0
+ local s
+ for s in $(ls -lnL "$@" 2>/dev/null | awk '/^-/{ print $5; }'); do
+ t=$((t + ((s+4095)/4096)*4096))
+ done
+ echo $t
}
-geniso() {
- tmp_dir=`dirname $FIMAGE`/isoimage
- rm -rf $tmp_dir
- mkdir $tmp_dir
- for i in lib lib64 share ; do
- for j in syslinux ISOLINUX ; do
- if [ -f /usr/$i/$j/isolinux.bin ] ; then
- isolinux=/usr/$i/$j/isolinux.bin
- fi
+# Expand directory names which should be in /usr/share into a list
+# of possible alternatives
+sharedirs() {
+ local dir file
+ for dir in /usr/share /usr/lib64 /usr/lib; do
+ for file; do
+ echo "$dir/$file"
+ echo "$dir/${file^^}"
done
- for j in syslinux syslinux/modules/bios ; do
- if [ -f /usr/$i/$j/ldlinux.c32 ]; then
- ldlinux=/usr/$i/$j/ldlinux.c32
- fi
+ done
+}
+efidirs() {
+ local dir file
+ for dir in /usr/share /boot /usr/lib64 /usr/lib; do
+ for file; do
+ echo "$dir/$file"
+ echo "$dir/${file^^}"
done
- if [ -n "$isolinux" -a -n "$ldlinux" ] ; then
- break
+ done
+}
+
+findsyslinux() {
+ local f="$(find -L $(sharedirs syslinux isolinux) \
+ -name "$1" -readable -type f -print -quit 2>/dev/null)"
+ if [ ! -f "$f" ]; then
+ die "Need a $1 file, please install syslinux/isolinux."
+ fi
+ echo "$f"
+ return 0
+}
+
+findovmf() {
+ local arch="$1"
+ shift
+ local -a names=(-false)
+ local name f
+ for name; do
+ names=("${names[@]}" -or -iname "$name")
+ done
+ for f in $(find -L $(efidirs edk2 ovmf) \
+ \( "${names[@]}" \) -readable -type f \
+ -print 2>/dev/null); do
+ if [ "$(efiarch "$f")" = "$arch" ]; then
+ echo "$f"
+ return 0
fi
done
- if [ -z "$isolinux" ] ; then
- echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
- exit 1
+ die "Need a $1 file for $arch, please install EDK2/OVMF."
+}
+
+do_mcopy() {
+ if [ ${#FDINITRDS[@]} -gt 0 ]; then
+ mcopy "${FDINITRDS[@]}" "$1"
+ fi
+ if [ -n "$efishell" ]; then
+ mmd "$1"EFI "$1"EFI/Boot
+ mcopy "$efishell" "$1"EFI/Boot/boot${kefiarch}.efi
fi
- if [ -z "$ldlinux" ] ; then
- echo 'Need an ldlinux.c32 file, please install syslinux/isolinux.'
- exit 1
+ if [ -n "$kefiarch" ]; then
+ echo linux "$KCMDLINE$initrdopts_efi" | \
+ mcopy - "$1"startup.nsh
fi
- cp $isolinux $tmp_dir
- cp $ldlinux $tmp_dir
- cp $FBZIMAGE $tmp_dir/linux
- echo "$KCMDLINE" > $tmp_dir/isolinux.cfg
- if [ -f "$FDINITRD" ] ; then
- cp "$FDINITRD" $tmp_dir/initrd.img
+ echo default linux "$KCMDLINE$initrdopts_syslinux" | \
+ mcopy - "$1"syslinux.cfg
+ mcopy "$FBZIMAGE" "$1"linux
+}
+
+genbzdisk() {
+ verify "$MTOOLSRC"
+ mformat -v 'LINUX_BOOT' a:
+ syslinux "$FIMAGE"
+ do_mcopy a:
+}
+
+genfdimage144() {
+ verify "$MTOOLSRC"
+ $dd if=/dev/zero of="$FIMAGE" bs=1024 count=1440 2>/dev/null
+ mformat -v 'LINUX_BOOT' v:
+ syslinux "$FIMAGE"
+ do_mcopy v:
+}
+
+genfdimage288() {
+ verify "$MTOOLSRC"
+ $dd if=/dev/zero of="$FIMAGE" bs=1024 count=2880 2>/dev/null
+ mformat -v 'LINUX_BOOT' w:
+ syslinux "$FIMAGE"
+ do_mcopy w:
+}
+
+genhdimage() {
+ verify "$MTOOLSRC"
+ mbr="$(findsyslinux mbr.bin)"
+ kefiarch="$(efiarch "$FBZIMAGE")"
+ if [ -n "$kefiarch" ]; then
+ # The efishell provides command line handling
+ efishell="$(findovmf $kefiarch shell.efi shell${kefiarch}.efi)"
+ ptype='-T 0xef' # EFI system partition, no GPT
fi
- genisoimage -J -r -input-charset=utf-8 -quiet -o $FIMAGE \
- -b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 \
- -boot-info-table $tmp_dir
- isohybrid $FIMAGE 2>/dev/null || true
- rm -rf $tmp_dir
+ sizes=$(filesizes "$FBZIMAGE" "${FDINITRDS[@]}" "$efishell")
+ # Allow 1% + 1 MiB for filesystem and partition table overhead,
+ # syslinux, and config files
+ megs=$(((sizes + sizes/100 + 2*1024*1024 - 1)/(1024*1024)))
+ $dd if=/dev/zero of="$FIMAGE" bs=$((1024*1024)) count=$megs 2>/dev/null
+ mpartition -I -c -s 32 -h 64 -t $megs $ptype -b 512 -a h:
+ $dd if="$mbr" of="$FIMAGE" bs=440 count=1 conv=notrunc 2>/dev/null
+ mformat -v 'LINUX_BOOT' -s 32 -h 64 -t $megs h:
+ syslinux --offset $((512*512)) "$FIMAGE"
+ do_mcopy h:
+}
+
+geniso() {
+ tmp_dir="$(dirname "$FIMAGE")/isoimage"
+ rm -rf "$tmp_dir"
+ mkdir "$tmp_dir"
+ isolinux=$(findsyslinux isolinux.bin)
+ ldlinux=$(findsyslinux ldlinux.c32)
+ cp "$isolinux" "$ldlinux" "$tmp_dir"
+ cp "$FBZIMAGE" "$tmp_dir"/linux
+ echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
+ cp "${FDINITRDS[@]}" "$tmp_dir"/
+ genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
+ -quiet -o "$FIMAGE" -b isolinux.bin \
+ -c boot.cat -no-emul-boot -boot-load-size 4 \
+ -boot-info-table "$tmp_dir"
+ isohybrid "$FIMAGE" 2>/dev/null || true
+ rm -rf "$tmp_dir"
}
-case $1 in
+rm -f "$FIMAGE"
+
+case "$diskfmt" in
bzdisk) genbzdisk;;
fdimage144) genfdimage144;;
fdimage288) genfdimage288;;
+ hdimage) genhdimage;;
isoimage) geniso;;
- *) echo 'Unknown image format'; exit 1;
+ *) die "Unknown image format: $diskfmt";;
esac
diff --git a/arch/x86/boot/mtools.conf.in b/arch/x86/boot/mtools.conf.in
index efd6d2490c1d..9e2662d01364 100644
--- a/arch/x86/boot/mtools.conf.in
+++ b/arch/x86/boot/mtools.conf.in
@@ -14,4 +14,7 @@ drive v:
drive w:
file="@OBJ@/fdimage" cylinders=80 heads=2 sectors=36 filter
+# Hard disk
+drive h:
+ file="@OBJ@/hdimage" partition=1 mformat_only
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index 6706b6cb1d0f..38caf61cd5b7 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -1500,7 +1500,7 @@ static int __init curve25519_mod_init(void)
static void __exit curve25519_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
- (boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX)))
+ static_branch_likely(&curve25519_use_bmi2_adx))
crypto_unregister_kpp(&curve25519_alg);
}
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 08bf95dbc911..7fec5dcf6438 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -8,18 +8,8 @@ UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_x32.o = $(CC_FLAGS_FTRACE)
CFLAGS_common.o += -fno-stack-protector
-CFLAGS_syscall_64.o += -fno-stack-protector
-CFLAGS_syscall_32.o += -fno-stack-protector
-CFLAGS_syscall_x32.o += -fno-stack-protector
-
-CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,)
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 07a9331d55e7..a4c061fb7c6e 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -6,6 +6,7 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/ptrace-abi.h>
/*
@@ -62,42 +63,7 @@ For 32-bit we have the following conventions - kernel is built with
* for assembly code:
*/
-/* The layout forms the "struct pt_regs" on the stack: */
-/*
- * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
- * unless syscall needs a complete, fully filled "struct pt_regs".
- */
-#define R15 0*8
-#define R14 1*8
-#define R13 2*8
-#define R12 3*8
-#define RBP 4*8
-#define RBX 5*8
-/* These regs are callee-clobbered. Always saved on kernel entry. */
-#define R11 6*8
-#define R10 7*8
-#define R9 8*8
-#define R8 9*8
-#define RAX 10*8
-#define RCX 11*8
-#define RDX 12*8
-#define RSI 13*8
-#define RDI 14*8
-/*
- * On syscall entry, this is syscall#. On CPU exception, this is error code.
- * On hw interrupt, it's IRQ number:
- */
-#define ORIG_RAX 15*8
-/* Return frame for iretq */
-#define RIP 16*8
-#define CS 17*8
-#define EFLAGS 18*8
-#define RSP 19*8
-#define SS 20*8
-
-#define SIZEOF_PTREGS 21*8
-
-.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
+.macro PUSH_REGS rdx=%rdx rax=%rax save_ret=0
.if \save_ret
pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
@@ -124,7 +90,9 @@ For 32-bit we have the following conventions - kernel is built with
.if \save_ret
pushq %rsi /* return address on top of stack */
.endif
+.endm
+.macro CLEAR_REGS
/*
* Sanitize registers of values that a speculation attack might
* otherwise want to exploit. The lower registers are likely clobbered
@@ -146,6 +114,11 @@ For 32-bit we have the following conventions - kernel is built with
.endm
+.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
+ PUSH_REGS rdx=\rdx, rax=\rax, save_ret=\save_ret
+ CLEAR_REGS
+.endm
+
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
popq %r15
popq %r14
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 7b2542b13ebd..6c2826417b33 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -36,61 +36,97 @@
#include <asm/irq_stack.h>
#ifdef CONFIG_X86_64
-__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+
+static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
+{
+ /*
+ * Convert negative numbers to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int unr = nr;
+
+ if (likely(unr < NR_syscalls)) {
+ unr = array_index_nospec(unr, NR_syscalls);
+ regs->ax = sys_call_table[unr](regs);
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
+{
+ /*
+ * Adjust the starting offset of the table, and convert numbers
+ * < __X32_SYSCALL_BIT to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int xnr = nr - __X32_SYSCALL_BIT;
+
+ if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
+ xnr = array_index_nospec(xnr, X32_NR_syscalls);
+ regs->ax = x32_sys_call_table[xnr](regs);
+ return true;
+ }
+ return false;
+}
+
+__visible noinstr void do_syscall_64(struct pt_regs *regs, int nr)
{
add_random_kstack_offset();
nr = syscall_enter_from_user_mode(regs, nr);
instrumentation_begin();
- if (likely(nr < NR_syscalls)) {
- nr = array_index_nospec(nr, NR_syscalls);
- regs->ax = sys_call_table[nr](regs);
-#ifdef CONFIG_X86_X32_ABI
- } else if (likely((nr & __X32_SYSCALL_BIT) &&
- (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) {
- nr = array_index_nospec(nr & ~__X32_SYSCALL_BIT,
- X32_NR_syscalls);
- regs->ax = x32_sys_call_table[nr](regs);
-#endif
+
+ if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
+ /* Invalid system call, but still a system call. */
+ regs->ax = __x64_sys_ni_syscall(regs);
}
+
instrumentation_end();
syscall_exit_to_user_mode(regs);
}
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
-static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
+static __always_inline int syscall_32_enter(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_IA32_EMULATION))
current_thread_info()->status |= TS_COMPAT;
- return (unsigned int)regs->orig_ax;
+ return (int)regs->orig_ax;
}
/*
* Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
*/
-static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
- unsigned int nr)
+static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
{
- if (likely(nr < IA32_NR_syscalls)) {
- nr = array_index_nospec(nr, IA32_NR_syscalls);
- regs->ax = ia32_sys_call_table[nr](regs);
+ /*
+ * Convert negative numbers to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int unr = nr;
+
+ if (likely(unr < IA32_NR_syscalls)) {
+ unr = array_index_nospec(unr, IA32_NR_syscalls);
+ regs->ax = ia32_sys_call_table[unr](regs);
+ } else if (nr != -1) {
+ regs->ax = __ia32_sys_ni_syscall(regs);
}
}
/* Handles int $0x80 */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ int nr = syscall_32_enter(regs);
add_random_kstack_offset();
/*
- * Subtlety here: if ptrace pokes something larger than 2^32-1 into
- * orig_ax, the unsigned int return value truncates it. This may
- * or may not be necessary, but it matches the old asm behavior.
+ * Subtlety here: if ptrace pokes something larger than 2^31-1 into
+ * orig_ax, the int return value truncates it. This matches
+ * the semantics of syscall_get_nr().
*/
- nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+ nr = syscall_enter_from_user_mode(regs, nr);
instrumentation_begin();
do_syscall_32_irqs_on(regs, nr);
@@ -101,7 +137,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ int nr = syscall_32_enter(regs);
int res;
add_random_kstack_offset();
@@ -130,14 +166,13 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
/* User code screwed up. */
regs->ax = -EFAULT;
- instrumentation_end();
local_irq_disable();
+ instrumentation_end();
irqentry_exit_to_user_mode(regs);
return false;
}
- /* The case truncates any ptrace induced syscall nr > 2^32 -1 */
- nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
+ nr = syscall_enter_from_user_mode_work(regs, nr);
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
@@ -269,15 +304,16 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
irqentry_state_t state = irqentry_enter(regs);
bool inhcall;
+ instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- instrumentation_begin();
irqentry_exit_cond_resched();
instrumentation_end();
restore_inhcall(inhcall);
} else {
+ instrumentation_end();
irqentry_exit(regs, state);
}
}
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index a16a5294d55f..e38a4cf795d9 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -107,8 +107,9 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
/* IRQs are off. */
- movq %rax, %rdi
- movq %rsp, %rsi
+ movq %rsp, %rdi
+ /* Sign extend the lower 32bit as syscall numbers are treated as int */
+ movslq %eax, %rsi
call do_syscall_64 /* returns with IRQs disabled */
/*
@@ -506,7 +507,7 @@ SYM_CODE_START(\asmsym)
movq %rsp, %rdi /* pt_regs pointer */
- call \cfunc
+ call kernel_\cfunc
/*
* No need to switch back to the IST stack. The current stack is either
@@ -517,7 +518,7 @@ SYM_CODE_START(\asmsym)
/* Switch to the regular task stack */
.Lfrom_usermode_switch_stack_\@:
- idtentry_body safe_stack_\cfunc, has_error_code=1
+ idtentry_body user_\cfunc, has_error_code=1
_ASM_NOKPROBE(\asmsym)
SYM_CODE_END(\asmsym)
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 86eb0d89d46f..8cfc9bc73e7f 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -5,21 +5,21 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-#define __SYSCALL_I386(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
+#ifdef CONFIG_IA32_EMULATION
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+#else
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#endif
+
+#define __SYSCALL(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
#include <asm/syscalls_32.h>
-#undef __SYSCALL_I386
+#undef __SYSCALL
-#define __SYSCALL_I386(nr, sym) [nr] = __ia32_##sym,
+#define __SYSCALL(nr, sym) __ia32_##sym,
-__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_ia32_syscall_max] = &__ia32_sys_ni_syscall,
+__visible const sys_call_ptr_t ia32_sys_call_table[] = {
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index 1594ec72bcbb..be120eec1fc9 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -5,23 +5,14 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-#define __SYSCALL_X32(nr, sym)
-#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym)
-
-#define __SYSCALL_64(nr, sym) extern long __x64_##sym(const struct pt_regs *);
+#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
-#undef __SYSCALL_64
+#undef __SYSCALL
-#define __SYSCALL_64(nr, sym) [nr] = __x64_##sym,
+#define __SYSCALL(nr, sym) __x64_##sym,
-asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
+asmlinkage const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
index f2fe0a33bcfd..bdd0e03a1265 100644
--- a/arch/x86/entry/syscall_x32.c
+++ b/arch/x86/entry/syscall_x32.c
@@ -5,37 +5,14 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-/*
- * Reuse the 64-bit entry points for the x32 versions that occupy different
- * slots in the syscall table.
- */
-#define __x32_sys_readv __x64_sys_readv
-#define __x32_sys_writev __x64_sys_writev
-#define __x32_sys_getsockopt __x64_sys_getsockopt
-#define __x32_sys_setsockopt __x64_sys_setsockopt
-#define __x32_sys_vmsplice __x64_sys_vmsplice
-#define __x32_sys_process_vm_readv __x64_sys_process_vm_readv
-#define __x32_sys_process_vm_writev __x64_sys_process_vm_writev
+#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
+#include <asm/syscalls_x32.h>
+#undef __SYSCALL
-#define __SYSCALL_64(nr, sym)
+#define __SYSCALL(nr, sym) __x64_##sym,
-#define __SYSCALL_X32(nr, sym) extern long __x32_##sym(const struct pt_regs *);
-#define __SYSCALL_COMMON(nr, sym) extern long __x64_##sym(const struct pt_regs *);
-#include <asm/syscalls_64.h>
-#undef __SYSCALL_X32
-#undef __SYSCALL_COMMON
-
-#define __SYSCALL_X32(nr, sym) [nr] = __x32_##sym,
-#define __SYSCALL_COMMON(nr, sym) [nr] = __x64_##sym,
-
-asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_x32_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_x32_syscall_max] = &__x64_sys_ni_syscall,
-#include <asm/syscalls_64.h>
+asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
+#include <asm/syscalls_x32.h>
};
diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile
index d8c4f6c9eadc..5b3efed0e4e8 100644
--- a/arch/x86/entry/syscalls/Makefile
+++ b/arch/x86/entry/syscalls/Makefile
@@ -9,47 +9,54 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
syscall32 := $(src)/syscall_32.tbl
syscall64 := $(src)/syscall_64.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
+offset :=
+prefix :=
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abi_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) --emit-nr \
+ $(if $(offset),--offset $(offset)) \
+ $(if $(prefix),--prefix $(prefix)) \
+ $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
quiet_cmd_hypercalls = HYPERCALLS $@
cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<, $(real-prereqs))
-syshdr_abi_unistd_32 := i386
+$(uapi)/unistd_32.h: abis := i386
$(uapi)/unistd_32.h: $(syscall32) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_32_ia32 := i386
-syshdr_pfx_unistd_32_ia32 := ia32_
+$(out)/unistd_32_ia32.h: abis := i386
+$(out)/unistd_32_ia32.h: prefix := ia32_
$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_x32 := common,x32
-syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
+$(uapi)/unistd_x32.h: abis := common,x32
+$(uapi)/unistd_x32.h: offset := __X32_SYSCALL_BIT
$(uapi)/unistd_x32.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_64 := common,64
+$(uapi)/unistd_64.h: abis := common,64
$(uapi)/unistd_64.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_64_x32 := x32
-syshdr_pfx_unistd_64_x32 := x32_
+$(out)/unistd_64_x32.h: abis := x32
+$(out)/unistd_64_x32.h: prefix := x32_
$(out)/unistd_64_x32.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
+$(out)/syscalls_32.h: abis := i386
$(out)/syscalls_32.h: $(syscall32) $(systbl) FORCE
$(call if_changed,systbl)
+$(out)/syscalls_64.h: abis := common,64
$(out)/syscalls_64.h: $(syscall64) $(systbl) FORCE
$(call if_changed,systbl)
+$(out)/syscalls_x32.h: abis := common,x32
+$(out)/syscalls_x32.h: $(syscall64) $(systbl) FORCE
+ $(call if_changed,systbl)
$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh FORCE
$(call if_changed,hypercalls)
@@ -60,6 +67,7 @@ uapisyshdr-y += unistd_32.h unistd_64.h unistd_x32.h
syshdr-y += syscalls_32.h
syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
syshdr-$(CONFIG_X86_64) += syscalls_64.h
+syshdr-$(CONFIG_X86_X32) += syscalls_x32.h
syshdr-$(CONFIG_XEN) += xen-hypercalls.h
uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
diff --git a/arch/x86/entry/syscalls/syscallhdr.sh b/arch/x86/entry/syscalls/syscallhdr.sh
deleted file mode 100644
index cc1e63857427..000000000000
--- a/arch/x86/entry/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_ASM_X86_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- echo "#ifndef ${fileguard}"
- echo "#define ${fileguard} 1"
- echo ""
-
- max=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- echo "#define __NR_${prefix}${name} $nr"
- else
- echo "#define __NR_${prefix}${name} ($offset + $nr)"
- fi
-
- max=$nr
- done
-
- echo ""
- echo "#ifdef __KERNEL__"
- echo "#define __NR_${prefix}syscall_max $max"
- echo "#endif"
- echo ""
- echo "#endif /* ${fileguard} */"
-) > "$out"
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
deleted file mode 100644
index 929bde120d6b..000000000000
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-
-syscall_macro() {
- local abi="$1"
- local nr="$2"
- local entry="$3"
-
- echo "__SYSCALL_${abi}($nr, $entry)"
-}
-
-emit() {
- local abi="$1"
- local nr="$2"
- local entry="$3"
- local compat="$4"
-
- if [ "$abi" != "I386" -a -n "$compat" ]; then
- echo "a compat entry ($abi: $compat) for a 64-bit syscall makes no sense" >&2
- exit 1
- fi
-
- if [ -z "$compat" ]; then
- if [ -n "$entry" ]; then
- syscall_macro "$abi" "$nr" "$entry"
- fi
- else
- echo "#ifdef CONFIG_X86_32"
- if [ -n "$entry" ]; then
- syscall_macro "$abi" "$nr" "$entry"
- fi
- echo "#else"
- syscall_macro "$abi" "$nr" "$compat"
- echo "#endif"
- fi
-}
-
-grep '^[0-9]' "$in" | sort -n | (
- while read nr abi name entry compat; do
- abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
- emit "$abi" "$nr" "$entry" "$compat"
- done
-) > "$out"
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 8f71dd72ef95..1eb45139fcc6 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1626,6 +1626,8 @@ static void x86_pmu_del(struct perf_event *event, int flags)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
goto do_del;
+ __set_bit(event->hw.idx, cpuc->dirty);
+
/*
* Not a TXN, therefore cleanup properly.
*/
@@ -2474,6 +2476,31 @@ static int x86_pmu_event_init(struct perf_event *event)
return err;
}
+void perf_clear_dirty_counters(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int i;
+
+ /* Don't need to clear the assigned counter. */
+ for (i = 0; i < cpuc->n_events; i++)
+ __clear_bit(cpuc->assign[i], cpuc->dirty);
+
+ if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
+ return;
+
+ for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
+ /* Metrics and fake events don't have corresponding HW counters. */
+ if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
+ continue;
+ else if (i >= INTEL_PMC_IDX_FIXED)
+ wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
+ else
+ wrmsrl(x86_pmu_event_addr(i), 0);
+ }
+
+ bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
+}
+
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
@@ -2497,7 +2524,6 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
-
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index e28892270c58..fca7a6e2242f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -280,6 +280,8 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+ INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
+ INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
EVENT_EXTRA_END
};
@@ -4030,8 +4032,10 @@ spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
* The :ppp indicates the Precise Distribution (PDist) facility, which
* is only supported on the GP counter 0. If a :ppp event which is not
* available on the GP counter 0, error out.
+ * Exception: Instruction PDIR is only available on the fixed counter 0.
*/
- if (event->attr.precise_ip == 3) {
+ if ((event->attr.precise_ip == 3) &&
+ !constraint_match(&fixed0_constraint, event->hw.config)) {
if (c->idxmsk64 & BIT_ULL(0))
return &counter0_constraint;
@@ -6015,7 +6019,13 @@ __init int intel_pmu_init(void)
tsx_attr = hsw_tsx_events_attrs;
intel_pmu_pebs_data_source_skl(pmem);
- if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ /*
+ * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
+ * TSX force abort hooks are not required on these systems. Only deploy
+ * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
+ */
+ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
+ !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
x86_pmu.flags |= PMU_FL_TFA;
x86_pmu.get_event_constraints = tfa_get_event_constraints;
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
@@ -6157,8 +6167,13 @@ __init int intel_pmu_init(void)
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
pmu->name = "cpu_core";
pmu->cpu_type = hybrid_big;
- pmu->num_counters = x86_pmu.num_counters + 2;
- pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
+ pmu->num_counters = x86_pmu.num_counters + 2;
+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
+ } else {
+ pmu->num_counters = x86_pmu.num_counters;
+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
+ }
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
pmu->unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 1ec8fd311f38..8647713276a7 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1187,6 +1187,9 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
+ u64 value = ds->pebs_event_reset[hwc->idx];
+ u32 base = MSR_RELOAD_PMC0;
+ unsigned int idx = hwc->idx;
if (!is_pebs_pt(event))
return;
@@ -1196,7 +1199,12 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
- wrmsrl(MSR_RELOAD_PMC0 + hwc->idx, ds->pebs_event_reset[hwc->idx]);
+ if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
+ base = MSR_RELOAD_FIXED_CTR0;
+ idx = hwc->idx - INTEL_PMC_IDX_FIXED;
+ value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
+ }
+ wrmsrl(base + idx, value);
}
void intel_pmu_pebs_enable(struct perf_event *event)
@@ -1204,6 +1212,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
+ unsigned int idx = hwc->idx;
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
@@ -1222,19 +1231,18 @@ void intel_pmu_pebs_enable(struct perf_event *event)
}
}
+ if (idx >= INTEL_PMC_IDX_FIXED)
+ idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
+
/*
* Use auto-reload if possible to save a MSR write in the PMI.
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
*/
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
- unsigned int idx = hwc->idx;
-
- if (idx >= INTEL_PMC_IDX_FIXED)
- idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
ds->pebs_event_reset[idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
- ds->pebs_event_reset[hwc->idx] = 0;
+ ds->pebs_event_reset[idx] = 0;
}
intel_pmu_pebs_via_pt_enable(event);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 4409d2cccfda..e8453de7a964 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -731,7 +731,8 @@ void reserve_lbr_buffers(void)
if (!kmem_cache || cpuc->lbr_xsave)
continue;
- cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
+ cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
+ GFP_KERNEL | __GFP_ZERO,
cpu_to_node(cpu));
}
}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index df7b07d7fdcb..9bf4dbbc26e2 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -801,8 +801,6 @@ static void uncore_pmu_enable(struct pmu *pmu)
struct intel_uncore_box *box;
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
- if (!uncore_pmu)
- return;
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
if (!box)
@@ -818,8 +816,6 @@ static void uncore_pmu_disable(struct pmu *pmu)
struct intel_uncore_box *box;
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
- if (!uncore_pmu)
- return;
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
if (!box)
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 291791002997..187d7287039c 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -92,6 +92,7 @@ struct intel_uncore_type {
/*
* Optional callbacks for managing mapping of Uncore units to PMONs
*/
+ int (*get_topology)(struct intel_uncore_type *type);
int (*set_mapping)(struct intel_uncore_type *type);
void (*cleanup_mapping)(struct intel_uncore_type *type);
};
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 63f097289a84..bb6eb1e5569c 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -348,6 +348,13 @@
#define SKX_M2M_PCI_PMON_CTR0 0x200
#define SKX_M2M_PCI_PMON_BOX_CTL 0x258
+/* Memory Map registers device ID */
+#define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
+#define SNR_ICX_SAD_CONTROL_CFG 0x3f4
+
+/* Getting I/O stack id in SAD_COTROL_CFG notation */
+#define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
+
/* SNR Ubox */
#define SNR_U_MSR_PMON_CTR0 0x1f98
#define SNR_U_MSR_PMON_CTL0 0x1f91
@@ -1406,6 +1413,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
die_id = i;
else
die_id = topology_phys_to_logical_pkg(i);
+ if (die_id < 0)
+ die_id = -ENODEV;
map->pbus_to_dieid[bus] = die_id;
break;
}
@@ -1452,14 +1461,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
i = -1;
if (reverse) {
for (bus = 255; bus >= 0; bus--) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
}
} else {
for (bus = 0; bus <= 255; bus++) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
@@ -3680,12 +3689,19 @@ static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
}
static umode_t
-skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
+ int die, int zero_bus_pmu)
{
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
- /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
- return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
+ return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
+}
+
+static umode_t
+skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 0. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 0);
}
static ssize_t skx_iio_mapping_show(struct device *dev,
@@ -3770,7 +3786,8 @@ static const struct attribute_group *skx_iio_attr_update[] = {
NULL,
};
-static int skx_iio_set_mapping(struct intel_uncore_type *type)
+static int
+pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
{
char buf[64];
int ret;
@@ -3778,7 +3795,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
struct attribute **attrs = NULL;
struct dev_ext_attribute *eas = NULL;
- ret = skx_iio_get_topology(type);
+ ret = type->get_topology(type);
if (ret < 0)
goto clear_attr_update;
@@ -3805,7 +3822,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
eas[die].var = (void *)die;
attrs[die] = &eas[die].attr.attr;
}
- skx_iio_mapping_group.attrs = attrs;
+ ag->attrs = attrs;
return 0;
err:
@@ -3819,6 +3836,11 @@ clear_attr_update:
return ret;
}
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
+}
+
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
{
struct attribute **attr = skx_iio_mapping_group.attrs;
@@ -3849,6 +3871,7 @@ static struct intel_uncore_type skx_uncore_iio = {
.ops = &skx_uncore_iio_ops,
.format_group = &skx_uncore_iio_format_group,
.attr_update = skx_iio_attr_update,
+ .get_topology = skx_iio_get_topology,
.set_mapping = skx_iio_set_mapping,
.cleanup_mapping = skx_iio_cleanup_mapping,
};
@@ -4391,6 +4414,91 @@ static const struct attribute_group snr_uncore_iio_format_group = {
.attrs = snr_uncore_iio_formats_attr,
};
+static umode_t
+snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 1. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 1);
+}
+
+static struct attribute_group snr_iio_mapping_group = {
+ .is_visible = snr_iio_mapping_visible,
+};
+
+static const struct attribute_group *snr_iio_attr_update[] = {
+ &snr_iio_mapping_group,
+ NULL,
+};
+
+static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
+{
+ u32 sad_cfg;
+ int die, stack_id, ret = -EPERM;
+ struct pci_dev *dev = NULL;
+
+ type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
+ GFP_KERNEL);
+ if (!type->topology)
+ return -ENOMEM;
+
+ while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
+ ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
+ if (ret) {
+ ret = pcibios_err_to_errno(ret);
+ break;
+ }
+
+ die = uncore_pcibus_to_dieid(dev->bus);
+ stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
+ if (die < 0 || stack_id >= type->num_boxes) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* Convert stack id from SAD_CONTROL to PMON notation. */
+ stack_id = sad_pmon_mapping[stack_id];
+
+ ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
+ type->topology[die].segment = pci_domain_nr(dev->bus);
+ }
+
+ if (ret) {
+ kfree(type->topology);
+ type->topology = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
+ */
+enum {
+ SNR_QAT_PMON_ID,
+ SNR_CBDMA_DMI_PMON_ID,
+ SNR_NIS_PMON_ID,
+ SNR_DLB_PMON_ID,
+ SNR_PCIE_GEN3_PMON_ID
+};
+
+static u8 snr_sad_pmon_mapping[] = {
+ SNR_CBDMA_DMI_PMON_ID,
+ SNR_PCIE_GEN3_PMON_ID,
+ SNR_DLB_PMON_ID,
+ SNR_NIS_PMON_ID,
+ SNR_QAT_PMON_ID
+};
+
+static int snr_iio_get_topology(struct intel_uncore_type *type)
+{
+ return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
+}
+
+static int snr_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
+}
+
static struct intel_uncore_type snr_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -4404,6 +4512,10 @@ static struct intel_uncore_type snr_uncore_iio = {
.msr_offset = SNR_IIO_MSR_OFFSET,
.ops = &ivbep_uncore_msr_ops,
.format_group = &snr_uncore_iio_format_group,
+ .attr_update = snr_iio_attr_update,
+ .get_topology = snr_iio_get_topology,
+ .set_mapping = snr_iio_set_mapping,
+ .cleanup_mapping = skx_iio_cleanup_mapping,
};
static struct intel_uncore_type snr_uncore_irp = {
@@ -4931,6 +5043,53 @@ static struct event_constraint icx_uncore_iio_constraints[] = {
EVENT_CONSTRAINT_END
};
+static umode_t
+icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 5. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 5);
+}
+
+static struct attribute_group icx_iio_mapping_group = {
+ .is_visible = icx_iio_mapping_visible,
+};
+
+static const struct attribute_group *icx_iio_attr_update[] = {
+ &icx_iio_mapping_group,
+ NULL,
+};
+
+/*
+ * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
+ */
+enum {
+ ICX_PCIE1_PMON_ID,
+ ICX_PCIE2_PMON_ID,
+ ICX_PCIE3_PMON_ID,
+ ICX_PCIE4_PMON_ID,
+ ICX_PCIE5_PMON_ID,
+ ICX_CBDMA_DMI_PMON_ID
+};
+
+static u8 icx_sad_pmon_mapping[] = {
+ ICX_CBDMA_DMI_PMON_ID,
+ ICX_PCIE1_PMON_ID,
+ ICX_PCIE2_PMON_ID,
+ ICX_PCIE3_PMON_ID,
+ ICX_PCIE4_PMON_ID,
+ ICX_PCIE5_PMON_ID,
+};
+
+static int icx_iio_get_topology(struct intel_uncore_type *type)
+{
+ return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
+}
+
+static int icx_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
+}
+
static struct intel_uncore_type icx_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -4945,6 +5104,10 @@ static struct intel_uncore_type icx_uncore_iio = {
.constraints = icx_uncore_iio_constraints,
.ops = &skx_uncore_iio_ops,
.format_group = &snr_uncore_iio_format_group,
+ .attr_update = icx_iio_attr_update,
+ .get_topology = icx_iio_get_topology,
+ .set_mapping = icx_iio_set_mapping,
+ .cleanup_mapping = skx_iio_cleanup_mapping,
};
static struct intel_uncore_type icx_uncore_irp = {
@@ -5097,9 +5260,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
.perf_ctr = SNR_M2M_PCI_PMON_CTR0,
.event_ctl = SNR_M2M_PCI_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
.box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
.ops = &snr_m2m_uncore_pci_ops,
- .format_group = &skx_uncore_format_group,
+ .format_group = &snr_m2m_uncore_format_group,
};
static struct attribute *icx_upi_uncore_formats_attr[] = {
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ad87cb36f7c8..2bf1c7ea2758 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -229,6 +229,7 @@ struct cpu_hw_events {
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events; /* the # of events in the below arrays */
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 84a1042c3b01..85feafacc445 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -764,13 +764,14 @@ static struct rapl_model model_spr = {
.rapl_msrs = intel_rapl_spr_msrs,
};
-static struct rapl_model model_amd_fam17h = {
+static struct rapl_model model_amd_hygon = {
.events = BIT(PERF_RAPL_PKG),
.msr_power_unit = MSR_AMD_RAPL_POWER_UNIT,
.rapl_msrs = amd_rapl_msrs,
};
static const struct x86_cpu_id rapl_model_match[] __initconst = {
+ X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &model_snb),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &model_snbep),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &model_snb),
@@ -803,9 +804,6 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
- X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h),
- X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h),
- X86_MATCH_VENDOR_FAM(AMD, 0x19, &model_amd_fam17h),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index bb0ae4b5c00f..6952e219cba3 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -614,50 +614,3 @@ bool hv_is_isolation_supported(void)
return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
}
EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
-
-/* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */
-bool hv_query_ext_cap(u64 cap_query)
-{
- /*
- * The address of the 'hv_extended_cap' variable will be used as an
- * output parameter to the hypercall below and so it should be
- * compatible with 'virt_to_phys'. Which means, it's address should be
- * directly mapped. Use 'static' to keep it compatible; stack variables
- * can be virtually mapped, making them imcompatible with
- * 'virt_to_phys'.
- * Hypercall input/output addresses should also be 8-byte aligned.
- */
- static u64 hv_extended_cap __aligned(8);
- static bool hv_extended_cap_queried;
- u64 status;
-
- /*
- * Querying extended capabilities is an extended hypercall. Check if the
- * partition supports extended hypercall, first.
- */
- if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS))
- return false;
-
- /* Extended capabilities do not change at runtime. */
- if (hv_extended_cap_queried)
- return hv_extended_cap & cap_query;
-
- status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL,
- &hv_extended_cap);
-
- /*
- * The query extended capabilities hypercall should not fail under
- * any normal circumstances. Avoid repeatedly making the hypercall, on
- * error.
- */
- hv_extended_cap_queried = true;
- status &= HV_HYPERCALL_RESULT_MASK;
- if (status != HV_STATUS_SUCCESS) {
- pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n",
- status);
- return false;
- }
-
- return hv_extended_cap & cap_query;
-}
-EXPORT_SYMBOL_GPL(hv_query_ext_cap);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index a09fc37ead9d..5e5b9fc2747f 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -203,7 +203,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
- MAP_EXECUTABLE | MAP_32BIT,
+ MAP_32BIT,
fd_offset);
if (error != N_TXTADDR(ex))
@@ -212,7 +212,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
- MAP_EXECUTABLE | MAP_32BIT,
+ MAP_32BIT,
fd_offset + ex.a_text);
if (error != N_DATADDR(ex))
return error;
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index b19ec8282d50..1e51650b79d7 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -3,6 +3,7 @@
generated-y += syscalls_32.h
generated-y += syscalls_64.h
+generated-y += syscalls_x32.h
generated-y += unistd_32_ia32.h
generated-y += unistd_64_x32.h
generated-y += xen-hypercalls.h
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 412b51e059c8..48067af94678 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
extern void lapic_assign_system_vectors(void);
extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
+extern void lapic_update_legacy_vectors(void);
extern void lapic_online(void);
extern void lapic_offline(void);
extern bool apic_needs_pit(void);
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 0603c7423aca..3ad3da9a7d97 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,25 +3,26 @@
#define _ASM_X86_ASM_H
#ifdef __ASSEMBLY__
-# define __ASM_FORM(x) x
-# define __ASM_FORM_RAW(x) x
-# define __ASM_FORM_COMMA(x) x,
+# define __ASM_FORM(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__,
#else
#include <linux/stringify.h>
-
-# define __ASM_FORM(x) " " __stringify(x) " "
-# define __ASM_FORM_RAW(x) __stringify(x)
-# define __ASM_FORM_COMMA(x) " " __stringify(x) ","
+# define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " "
+# define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__)
+# define __ASM_FORM_COMMA(x, ...) " " __stringify(x,##__VA_ARGS__) ","
#endif
+#define _ASM_BYTES(x, ...) __ASM_FORM(.byte x,##__VA_ARGS__ ;)
+
#ifndef __x86_64__
/* 32 bit */
-# define __ASM_SEL(a,b) __ASM_FORM(a)
-# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
+# define __ASM_SEL(a,b) __ASM_FORM(a)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
/* 64 bit */
-# define __ASM_SEL(a,b) __ASM_FORM(b)
-# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
+# define __ASM_SEL(a,b) __ASM_FORM(b)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
@@ -119,6 +120,8 @@
# define CC_OUT(c) [_cc_ ## c] "=qm"
#endif
+#ifdef __KERNEL__
+
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
@@ -185,4 +188,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index f732741ad7c7..5e754e895767 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -269,6 +269,4 @@ static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
-#define ARCH_ATOMIC
-
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 4819d5e5a335..3ba772a69cc8 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -54,11 +54,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
#define dma_rmb() barrier()
#define dma_wmb() barrier()
-#ifdef CONFIG_X86_32
-#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
-#else
-#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
-#endif
+#define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
+
#define __smp_rmb() dma_rmb()
#define __smp_wmb() barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index ac37830ae941..d0ce5cfd3ac1 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -108,7 +108,7 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
-/* free ( 3*32+29) */
+#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
@@ -378,6 +378,7 @@
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
index f58de66091e5..8b6bd63530dc 100644
--- a/arch/x86/include/asm/crash.h
+++ b/arch/x86/include/asm/crash.h
@@ -9,10 +9,4 @@ int crash_setup_memmap_entries(struct kimage *image,
struct boot_params *params);
void crash_smp_send_stop(void);
-#ifdef CONFIG_KEXEC_CORE
-void __init crash_reserve_low_1M(void);
-#else
-static inline void __init crash_reserve_low_1M(void) { }
-#endif
-
#endif /* _ASM_X86_CRASH_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 476082a83d1c..e63cf582201f 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -224,6 +224,26 @@ static inline void store_idt(struct desc_ptr *dtr)
asm volatile("sidt %0":"=m" (*dtr));
}
+static inline void native_gdt_invalidate(void)
+{
+ const struct desc_ptr invalid_gdt = {
+ .address = 0,
+ .size = 0
+ };
+
+ native_load_gdt(&invalid_gdt);
+}
+
+static inline void native_idt_invalidate(void)
+{
+ const struct desc_ptr invalid_idt = {
+ .address = 0,
+ .size = 0
+ };
+
+ native_load_idt(&invalid_idt);
+}
+
/*
* The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
* a read-only remapping. To prevent a page fault, the GDT is switched to the
@@ -421,12 +441,10 @@ extern bool idt_is_f00f_address(unsigned long address);
#ifdef CONFIG_X86_64
extern void idt_setup_early_pf(void);
-extern void idt_setup_ist_traps(void);
#else
static inline void idt_setup_early_pf(void) { }
-static inline void idt_setup_ist_traps(void) { }
#endif
-extern void idt_invalidate(void *addr);
+extern void idt_invalidate(void);
#endif /* _ASM_X86_DESC_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index b7dd944dc867..8f28fafa98b3 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -56,11 +56,8 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#ifdef CONFIG_X86_SGX
# define DISABLE_SGX 0
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index ed33a14188f6..23bef08a8388 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
*/
#define PASID_DISABLED 0
-#ifdef CONFIG_IOMMU_SUPPORT
-/* Update current's PASID MSR/state by mm's PASID. */
-void update_pasid(void);
-#else
static inline void update_pasid(void) { }
-#endif
+
#endif /* _ASM_X86_FPU_API_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 8d33ad80704f..16bf4d4a8159 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -204,6 +204,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
}
+static inline void fxsave(struct fxregs_state *fx)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
+ else
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
+}
+
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
@@ -272,28 +280,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
-{
- u64 mask = xfeatures_mask_all;
- u32 lmask = mask;
- u32 hmask = mask >> 32;
- int err;
-
- WARN_ON(system_state != SYSTEM_BOOTING);
-
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
- else
- XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
-
- /* We should never fault when copying to a kernel buffer: */
- WARN_ON_FPU(err);
-}
-
-/*
- * This function is called only during boot time when x86 caps are not set
- * up and alternative can not be used yet.
- */
static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
{
u64 mask = -1;
@@ -578,19 +564,19 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
* PKRU state is switched eagerly because it needs to be valid before we
* return to userland e.g. for a copy_to_user() operation.
*/
- if (current->mm) {
+ if (!(current->flags & PF_KTHREAD)) {
+ /*
+ * If the PKRU bit in xsave.header.xfeatures is not set,
+ * then the PKRU component was in init state, which means
+ * XRSTOR will set PKRU to 0. If the bit is not set then
+ * get_xsave_addr() will return NULL because the PKRU value
+ * in memory is not valid. This means pkru_val has to be
+ * set to 0 and not to init_pkru_value.
+ */
pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
- if (pk)
- pkru_val = pk->pkru;
+ pkru_val = pk ? pk->pkru : 0;
}
__write_pkru(pkru_val);
-
- /*
- * Expensive PASID MSR write will be avoided in update_pasid() because
- * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
- * unless it's different from mm->pasid to reduce overhead.
- */
- update_pasid();
}
#endif /* _ASM_X86_FPU_INTERNAL_H */
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 606f5cc579b2..f1366ce609e3 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -52,7 +52,7 @@
* Support for passing hypercall input parameter block via XMM
* registers is available
*/
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4)
/* Support for a virtual guest idle state is available */
#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
/* Frequency MSRs available */
@@ -61,6 +61,11 @@
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
/* Support for debug MSRs available */
#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
+/*
+ * Support for returning hypercall output block via XMM
+ * registers is available
+ */
+#define HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE BIT(15)
/* stimer Direct Mode is available */
#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
@@ -133,6 +138,15 @@
#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+/*
+ * This is specific to AMD and specifies that enlightened TLB flush is
+ * supported. If guest opts in to this feature, ASID invalidations only
+ * flushes gva -> hpa mapping entries. To flush the TLB entries derived
+ * from NPT, hypercalls should be used (HvFlushGuestPhysicalAddressSpace
+ * or HvFlushGuestPhysicalAddressList).
+ */
+#define HV_X64_NESTED_ENLIGHTENED_TLB BIT(22)
+
/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */
#define HV_PARAVISOR_PRESENT BIT(0)
@@ -314,6 +328,9 @@ struct hv_tsc_emulation_status {
#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
+/* Number of XMM registers used in hypercall input/output */
+#define HV_HYPERCALL_MAX_XMM_REGISTERS 6
+
struct hv_nested_enlightenments_control {
struct {
__u32 directhypercall:1;
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 73d45b0dfff2..1345088e9902 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -312,8 +312,8 @@ static __always_inline void __##func(struct pt_regs *regs)
*/
#define DECLARE_IDTENTRY_VC(vector, func) \
DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
- __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \
- __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
+ __visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code); \
+ __visible noinstr void user_##func(struct pt_regs *regs, unsigned long error_code)
/**
* DEFINE_IDTENTRY_IST - Emit code for IST entry points
@@ -355,33 +355,24 @@ static __always_inline void __##func(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
/**
- * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
- which runs on a safe stack.
+ * DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
+ when raised from kernel mode
* @func: Function name of the entry point
*
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
*/
-#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
+#define DEFINE_IDTENTRY_VC_KERNEL(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
/**
- * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
- which runs on the VC fall-back stack
+ * DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
+ when raised from user mode
* @func: Function name of the entry point
*
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
*/
-#define DEFINE_IDTENTRY_VC_IST(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
-
-/**
- * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
- * @func: Function name of the entry point
- *
- * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
- */
-#define DEFINE_IDTENTRY_VC(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+#define DEFINE_IDTENTRY_VC_USER(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
#else /* CONFIG_X86_64 */
@@ -504,7 +495,7 @@ __visible noinstr void func(struct pt_regs *regs, \
.align 8
SYM_CODE_START(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR
- .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+ .rept NR_EXTERNAL_VECTORS
UNWIND_HINT_IRET_REGS
0 :
.byte 0x6a, vector
@@ -520,7 +511,7 @@ SYM_CODE_END(irq_entries_start)
.align 8
SYM_CODE_START(spurious_entries_start)
vector=FIRST_SYSTEM_VECTOR
- .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+ .rept NR_SYSTEM_VECTORS
UNWIND_HINT_IRET_REGS
0 :
.byte 0x6a, vector
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 955b06d6325a..27158436f322 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -102,7 +102,8 @@
#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */
-#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Willow Cove */
+
+#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 889f8b1b5b7f..43dcb9284208 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -26,8 +26,8 @@
* This file enumerates the exact layout of them:
*/
+/* This is used as an interrupt vector when programming the APIC. */
#define NMI_VECTOR 0x02
-#define MCE_VECTOR 0x12
/*
* IDT vectors usable for external interrupt sources start at 0x20.
@@ -84,7 +84,7 @@
*/
#define IRQ_WORK_VECTOR 0xf6
-#define UV_BAU_MESSAGE 0xf5
+/* 0xf5 - unused, was UV_BAU_MESSAGE */
#define DEFERRED_ERROR_VECTOR 0xf4
/* Vector on which hypervisor callbacks will be delivered */
@@ -114,6 +114,9 @@
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
+#define NR_EXTERNAL_VECTORS (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+#define NR_SYSTEM_VECTORS (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+
/*
* Size the maximum number of interrupts.
*
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 610a05374c02..0449b125d27f 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -4,8 +4,6 @@
#define HAVE_JUMP_LABEL_BATCH
-#define JUMP_LABEL_NOP_SIZE 5
-
#include <asm/asm.h>
#include <asm/nops.h>
@@ -14,15 +12,35 @@
#include <linux/stringify.h>
#include <linux/types.h>
+#define JUMP_TABLE_ENTRY \
+ ".pushsection __jump_table, \"aw\" \n\t" \
+ _ASM_ALIGN "\n\t" \
+ ".long 1b - . \n\t" \
+ ".long %l[l_yes] - . \n\t" \
+ _ASM_PTR "%c0 + %c1 - .\n\t" \
+ ".popsection \n\t"
+
+#ifdef CONFIG_STACK_VALIDATION
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:"
+ "jmp %l[l_yes] # objtool NOPs this \n\t"
+ JUMP_TABLE_ENTRY
+ : : "i" (key), "i" (2 | branch) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#else
+
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
".byte " __stringify(BYTES_NOP5) "\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- ".long 1b - ., %l[l_yes] - . \n\t"
- _ASM_PTR "%c0 + %c1 - .\n\t"
- ".popsection \n\t"
+ JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -30,16 +48,13 @@ l_yes:
return true;
}
+#endif /* STACK_VALIDATION */
+
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
- ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
- "2:\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- ".long 1b - ., %l[l_yes] - . \n\t"
- _ASM_PTR "%c0 + %c1 - .\n\t"
- ".popsection \n\t"
+ "jmp %l[l_yes]\n\t"
+ JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -47,41 +62,7 @@ l_yes:
return true;
}
-#else /* __ASSEMBLY__ */
-
-.macro STATIC_JUMP_IF_TRUE target, key, def
-.Lstatic_jump_\@:
- .if \def
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .else
- .byte BYTES_NOP5
- .endif
- .pushsection __jump_table, "aw"
- _ASM_ALIGN
- .long .Lstatic_jump_\@ - ., \target - .
- _ASM_PTR \key - .
- .popsection
-.endm
-
-.macro STATIC_JUMP_IF_FALSE target, key, def
-.Lstatic_jump_\@:
- .if \def
- .byte BYTES_NOP5
- .else
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .endif
- .pushsection __jump_table, "aw"
- _ASM_ALIGN
- .long .Lstatic_jump_\@ - ., \target - .
- _ASM_PTR \key + 1 - .
- .popsection
-.endm
+extern int arch_jump_entry_size(struct jump_entry *entry);
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 323641097f63..a12a4987154e 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -87,7 +87,10 @@ KVM_X86_OP(set_identity_map_addr)
KVM_X86_OP(get_mt_mask)
KVM_X86_OP(load_mmu_pgd)
KVM_X86_OP_NULL(has_wbinvd_exit)
-KVM_X86_OP(write_l1_tsc_offset)
+KVM_X86_OP(get_l2_tsc_offset)
+KVM_X86_OP(get_l2_tsc_multiplier)
+KVM_X86_OP(write_tsc_offset)
+KVM_X86_OP(write_tsc_multiplier)
KVM_X86_OP(get_exit_info)
KVM_X86_OP(check_intercept)
KVM_X86_OP(handle_exit_irqoff)
@@ -99,14 +102,15 @@ KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking)
KVM_X86_OP_NULL(vcpu_unblocking)
KVM_X86_OP_NULL(update_pi_irte)
+KVM_X86_OP_NULL(start_assignment)
KVM_X86_OP_NULL(apicv_post_state_restore)
KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
KVM_X86_OP_NULL(set_hv_timer)
KVM_X86_OP_NULL(cancel_hv_timer)
KVM_X86_OP(setup_mce)
KVM_X86_OP(smi_allowed)
-KVM_X86_OP(pre_enter_smm)
-KVM_X86_OP(pre_leave_smm)
+KVM_X86_OP(enter_smm)
+KVM_X86_OP(leave_smm)
KVM_X86_OP(enable_smi_window)
KVM_X86_OP_NULL(mem_enc_op)
KVM_X86_OP_NULL(mem_enc_reg_region)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..974cbfb1eefe 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -85,7 +85,7 @@
#define KVM_REQ_APICV_UPDATE \
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
-#define KVM_REQ_HV_TLB_FLUSH \
+#define KVM_REQ_TLB_FLUSH_GUEST \
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
@@ -269,12 +269,36 @@ enum x86_intercept_stage;
struct kvm_kernel_irq_routing_entry;
/*
- * the pages used as guest page table on soft mmu are tracked by
- * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
- * by indirect shadow page can not be more than 15 bits.
+ * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
+ * also includes TDP pages) to determine whether or not a page can be used in
+ * the given MMU context. This is a subset of the overall kvm_mmu_role to
+ * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows allocating
+ * 2 bytes per gfn instead of 4 bytes per gfn.
*
- * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
- * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
+ * Indirect upper-level shadow pages are tracked for write-protection via
+ * gfn_track. As above, gfn_track is a 16 bit counter, so KVM must not create
+ * more than 2^16-1 upper-level shadow pages at a single gfn, otherwise
+ * gfn_track will overflow and explosions will ensure.
+ *
+ * A unique shadow page (SP) for a gfn is created if and only if an existing SP
+ * cannot be reused. The ability to reuse a SP is tracked by its role, which
+ * incorporates various mode bits and properties of the SP. Roughly speaking,
+ * the number of unique SPs that can theoretically be created is 2^n, where n
+ * is the number of bits that are used to compute the role.
+ *
+ * But, even though there are 18 bits in the mask below, not all combinations
+ * of modes and flags are possible. The maximum number of possible upper-level
+ * shadow pages for a single gfn is in the neighborhood of 2^13.
+ *
+ * - invalid shadow pages are not accounted.
+ * - level is effectively limited to four combinations, not 16 as the number
+ * bits would imply, as 4k SPs are not tracked (allowed to go unsync).
+ * - level is effectively unused for non-PAE paging because there is exactly
+ * one upper level (see 4k SP exception above).
+ * - quadrant is used only for non-PAE paging and is exclusive with
+ * gpte_is_8_bytes.
+ * - execonly and ad_disabled are used only for nested EPT, which makes it
+ * exclusive with quadrant.
*/
union kvm_mmu_page_role {
u32 word;
@@ -285,7 +309,7 @@ union kvm_mmu_page_role {
unsigned direct:1;
unsigned access:3;
unsigned invalid:1;
- unsigned nxe:1;
+ unsigned efer_nx:1;
unsigned cr0_wp:1;
unsigned smep_andnot_wp:1;
unsigned smap_andnot_wp:1;
@@ -303,13 +327,26 @@ union kvm_mmu_page_role {
};
};
-union kvm_mmu_extended_role {
/*
- * This structure complements kvm_mmu_page_role caching everything needed for
- * MMU configuration. If nothing in both these structures changed, MMU
- * re-configuration can be skipped. @valid bit is set on first usage so we don't
- * treat all-zero structure as valid data.
+ * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
+ * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
+ * including on nested transitions, if nothing in the full role changes then
+ * MMU re-configuration can be skipped. @valid bit is set on first usage so we
+ * don't treat all-zero structure as valid data.
+ *
+ * The properties that are tracked in the extended role but not the page role
+ * are for things that either (a) do not affect the validity of the shadow page
+ * or (b) are indirectly reflected in the shadow page's role. For example,
+ * CR4.PKE only affects permission checks for software walks of the guest page
+ * tables (because KVM doesn't support Protection Keys with shadow paging), and
+ * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
+ *
+ * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
+ * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
+ * SMAP, but the MMU's permission checks for software walks need to be SMEP and
+ * SMAP aware regardless of CR0.WP.
*/
+union kvm_mmu_extended_role {
u32 word;
struct {
unsigned int valid:1;
@@ -320,7 +357,7 @@ union kvm_mmu_extended_role {
unsigned int cr4_pke:1;
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
- unsigned int maxphyaddr:6;
+ unsigned int cr4_la57:1;
};
};
@@ -420,11 +457,6 @@ struct kvm_mmu {
struct rsvd_bits_validate guest_rsvd_check;
- /* Can have large pages at levels 2..last_nonleaf_level-1. */
- u8 last_nonleaf_level;
-
- bool nx;
-
u64 pdptrs[4]; /* pae */
};
@@ -543,6 +575,15 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
cpumask_t tlb_flush;
+ bool enforce_cpuid;
+ struct {
+ u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
+ u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
+ u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
+ u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
+ u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
+ u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
+ } cpuid_cache;
};
/* Xen HVM per vcpu emulation context */
@@ -707,7 +748,7 @@ struct kvm_vcpu_arch {
} st;
u64 l1_tsc_offset;
- u64 tsc_offset;
+ u64 tsc_offset; /* current tsc offset */
u64 last_guest_tsc;
u64 last_host_tsc;
u64 tsc_offset_adjustment;
@@ -721,7 +762,8 @@ struct kvm_vcpu_arch {
u32 virtual_tsc_khz;
s64 ia32_tsc_adjust_msr;
u64 msr_ia32_power_ctl;
- u64 tsc_scaling_ratio;
+ u64 l1_tsc_scaling_ratio;
+ u64 tsc_scaling_ratio; /* current scaling ratio */
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -829,7 +871,7 @@ struct kvm_vcpu_arch {
bool l1tf_flush_l1d;
/* Host CPU on which VM-entry was most recently attempted */
- unsigned int last_vmentry_cpu;
+ int last_vmentry_cpu;
/* AMD MSRC001_0015 Hardware Configuration */
u64 msr_hwcr;
@@ -851,6 +893,16 @@ struct kvm_vcpu_arch {
/* Protected Guests */
bool guest_state_protected;
+
+ /*
+ * Set when PDPTS were loaded directly by the userspace without
+ * reading the guest memory
+ */
+ bool pdptrs_from_userspace;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ hpa_t hv_root_tdp;
+#endif
};
struct kvm_lpage_info {
@@ -1002,7 +1054,7 @@ struct kvm_arch {
struct kvm_apic_map __rcu *apic_map;
atomic_t apic_map_dirty;
- bool apic_access_page_done;
+ bool apic_access_memslot_enabled;
unsigned long apicv_inhibit_reasons;
gpa_t wall_clock;
@@ -1062,11 +1114,19 @@ struct kvm_arch {
bool exception_payload_enabled;
bool bus_lock_detection_enabled;
+ /*
+ * If exit_on_emulation_error is set, and the in-kernel instruction
+ * emulator fails to emulate an instruction, allow userspace
+ * the opportunity to look at it.
+ */
+ bool exit_on_emulation_error;
/* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
u32 user_space_msr_mask;
struct kvm_x86_msr_filter __rcu *msr_filter;
+ u32 hypercall_exit_enabled;
+
/* Guest can access the SGX PROVISIONKEY. */
bool sgx_provisioning_allowed;
@@ -1124,23 +1184,35 @@ struct kvm_arch {
*/
spinlock_t tdp_mmu_pages_lock;
#endif /* CONFIG_X86_64 */
+
+ /*
+ * If set, rmaps have been allocated for all memslots and should be
+ * allocated for any newly created or modified memslots.
+ */
+ bool memslots_have_rmaps;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ hpa_t hv_root_tdp;
+ spinlock_t hv_root_tdp_lock;
+#endif
};
struct kvm_vm_stat {
- ulong mmu_shadow_zapped;
- ulong mmu_pte_write;
- ulong mmu_pde_zapped;
- ulong mmu_flooded;
- ulong mmu_recycled;
- ulong mmu_cache_miss;
- ulong mmu_unsync;
- ulong remote_tlb_flush;
- ulong lpages;
- ulong nx_lpage_splits;
- ulong max_mmu_page_hash_collisions;
+ struct kvm_vm_stat_generic generic;
+ u64 mmu_shadow_zapped;
+ u64 mmu_pte_write;
+ u64 mmu_pde_zapped;
+ u64 mmu_flooded;
+ u64 mmu_recycled;
+ u64 mmu_cache_miss;
+ u64 mmu_unsync;
+ u64 lpages;
+ u64 nx_lpage_splits;
+ u64 max_mmu_page_hash_collisions;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 pf_fixed;
u64 pf_guest;
u64 tlb_flush;
@@ -1154,10 +1226,6 @@ struct kvm_vcpu_stat {
u64 nmi_window_exits;
u64 l1d_flush;
u64 halt_exits;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 request_irq_exits;
u64 irq_exits;
u64 host_state_reload;
@@ -1168,11 +1236,10 @@ struct kvm_vcpu_stat {
u64 irq_injections;
u64 nmi_injections;
u64 req_event;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 nested_run;
u64 directed_yield_attempted;
u64 directed_yield_successful;
+ u64 guest_mode;
};
struct x86_instruction_info;
@@ -1304,8 +1371,10 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void);
- /* Returns actual tsc_offset set in active VMCS */
- u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+ u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
+ u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
+ void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+ void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
/*
* Retrieve somewhat arbitrary exit information. Intended to be used
@@ -1352,6 +1421,7 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+ void (*start_assignment)(struct kvm *kvm);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
@@ -1362,8 +1432,8 @@ struct kvm_x86_ops {
void (*setup_mce)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
- int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
- int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
+ int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+ int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
void (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -1422,6 +1492,7 @@ struct kvm_arch_async_pf {
extern u32 __read_mostly kvm_nr_uret_msrs;
extern u64 __read_mostly host_efer;
extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
extern struct kvm_x86_ops kvm_x86_ops;
#define KVM_X86_OP(func) \
@@ -1462,6 +1533,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
+void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -1476,7 +1548,6 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
-bool pdptrs_changed(struct kvm_vcpu *vcpu);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
@@ -1649,6 +1720,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
ulong roots_to_free);
+void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
@@ -1661,7 +1733,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception);
bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
void kvm_request_apicv_update(struct kvm *kvm, bool activate,
unsigned long bit);
@@ -1674,8 +1745,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t gva, hpa_t root_hpa);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
-void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
- bool skip_mmu_sync);
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
int tdp_huge_page_level);
@@ -1787,8 +1857,10 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr)
return kvm_find_user_return_msr(msr) >= 0;
}
-u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio);
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
+u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
+u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
@@ -1862,4 +1934,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
int kvm_cpu_dirty_log_size(void);
+int alloc_all_memslots_rmaps(struct kvm *kvm);
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index ddfb3cad8dff..0607ec4f5091 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -305,7 +305,7 @@ extern void apei_mce_report_mem_error(int corrected,
/* These may be used by multiple smca_hwid_mcatypes */
enum smca_bank_types {
SMCA_LS = 0, /* Load Store */
- SMCA_LS_V2, /* Load Store */
+ SMCA_LS_V2,
SMCA_IF, /* Instruction Fetch */
SMCA_L2_CACHE, /* L2 Cache */
SMCA_DE, /* Decoder Unit */
@@ -314,17 +314,22 @@ enum smca_bank_types {
SMCA_FP, /* Floating Point */
SMCA_L3_CACHE, /* L3 Cache */
SMCA_CS, /* Coherent Slave */
- SMCA_CS_V2, /* Coherent Slave */
+ SMCA_CS_V2,
SMCA_PIE, /* Power, Interrupts, etc. */
SMCA_UMC, /* Unified Memory Controller */
+ SMCA_UMC_V2,
SMCA_PB, /* Parameter Block */
SMCA_PSP, /* Platform Security Processor */
- SMCA_PSP_V2, /* Platform Security Processor */
+ SMCA_PSP_V2,
SMCA_SMU, /* System Management Unit */
- SMCA_SMU_V2, /* System Management Unit */
+ SMCA_SMU_V2,
SMCA_MP5, /* Microprocessor 5 Unit */
SMCA_NBIO, /* Northbridge IO Unit */
SMCA_PCIE, /* PCI Express Unit */
+ SMCA_PCIE_V2,
+ SMCA_XGMI_PCS, /* xGMI PCS Unit */
+ SMCA_XGMI_PHY, /* xGMI PHY Unit */
+ SMCA_WAFL_PHY, /* WAFL PHY Unit */
N_SMCA_BANK_TYPES
};
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 211ba3375ee9..a7c413432b33 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -772,6 +772,10 @@
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+#define MSR_TFA_TSX_CPUID_CLEAR_BIT 1
+#define MSR_TFA_TSX_CPUID_CLEAR BIT_ULL(MSR_TFA_TSX_CPUID_CLEAR_BIT)
+#define MSR_TFA_SDV_ENABLE_RTM_BIT 2
+#define MSR_TFA_SDV_ENABLE_RTM BIT_ULL(MSR_TFA_SDV_ENABLE_RTM_BIT)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index c1e5e818ba16..c5573eaa5bb9 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_NOPS_H
#define _ASM_X86_NOPS_H
+#include <asm/asm.h>
+
/*
* Define nops for use with alternative() and for tracing.
*/
@@ -57,20 +59,14 @@
#endif /* CONFIG_64BIT */
-#ifdef __ASSEMBLY__
-#define _ASM_MK_NOP(x) .byte x
-#else
-#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
-#endif
-
-#define ASM_NOP1 _ASM_MK_NOP(BYTES_NOP1)
-#define ASM_NOP2 _ASM_MK_NOP(BYTES_NOP2)
-#define ASM_NOP3 _ASM_MK_NOP(BYTES_NOP3)
-#define ASM_NOP4 _ASM_MK_NOP(BYTES_NOP4)
-#define ASM_NOP5 _ASM_MK_NOP(BYTES_NOP5)
-#define ASM_NOP6 _ASM_MK_NOP(BYTES_NOP6)
-#define ASM_NOP7 _ASM_MK_NOP(BYTES_NOP7)
-#define ASM_NOP8 _ASM_MK_NOP(BYTES_NOP8)
+#define ASM_NOP1 _ASM_BYTES(BYTES_NOP1)
+#define ASM_NOP2 _ASM_BYTES(BYTES_NOP2)
+#define ASM_NOP3 _ASM_BYTES(BYTES_NOP3)
+#define ASM_NOP4 _ASM_BYTES(BYTES_NOP4)
+#define ASM_NOP5 _ASM_BYTES(BYTES_NOP5)
+#define ASM_NOP6 _ASM_BYTES(BYTES_NOP6)
+#define ASM_NOP7 _ASM_BYTES(BYTES_NOP7)
+#define ASM_NOP8 _ASM_BYTES(BYTES_NOP8)
#define ASM_NOP_MAX 8
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 7555b48803a8..4d5810c8fab7 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -34,9 +34,9 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
copy_page(to, from);
}
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#ifndef __pa
#define __pa(x) __phys_addr((unsigned long)(x))
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index ca840fec7776..4bde0dc66100 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -75,7 +75,7 @@ void copy_page(void *to, void *from);
*
* With page table isolation enabled, we map the LDT in ... [stay tuned]
*/
-static inline unsigned long task_size_max(void)
+static __always_inline unsigned long task_size_max(void)
{
unsigned long ret;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 544f41a179fb..8fc1b5003713 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -478,6 +478,7 @@ struct x86_pmu_lbr {
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern void perf_check_microcode(void);
+extern void perf_clear_dirty_counters(void);
extern int x86_perf_rdpmc_index(struct perf_event *event);
#else
static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index f8cb8af4de5c..fe5efbcba824 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -44,7 +44,7 @@ static __always_inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
} while (0)
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 556b2b17c3e2..364d0e42e280 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -663,6 +663,7 @@ extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void);
+extern void cpu_init_secondary(void);
extern void cpu_init_exception_handling(void);
extern void cr4_init(void);
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index 629c3df243f0..2cef6c5a52c2 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -9,8 +9,13 @@
#define __ASM_X86_SEV_COMMON_H
#define GHCB_MSR_INFO_POS 0
-#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
+#define GHCB_DATA_LOW 12
+#define GHCB_MSR_INFO_MASK (BIT_ULL(GHCB_DATA_LOW) - 1)
+#define GHCB_DATA(v) \
+ (((unsigned long)(v) & ~GHCB_MSR_INFO_MASK) >> GHCB_DATA_LOW)
+
+/* SEV Information Request/Response */
#define GHCB_MSR_SEV_INFO_RESP 0x001
#define GHCB_MSR_SEV_INFO_REQ 0x002
#define GHCB_MSR_VER_MAX_POS 48
@@ -28,6 +33,7 @@
#define GHCB_MSR_PROTO_MAX(v) (((v) >> GHCB_MSR_VER_MAX_POS) & GHCB_MSR_VER_MAX_MASK)
#define GHCB_MSR_PROTO_MIN(v) (((v) >> GHCB_MSR_VER_MIN_POS) & GHCB_MSR_VER_MIN_MASK)
+/* CPUID Request/Response */
#define GHCB_MSR_CPUID_REQ 0x004
#define GHCB_MSR_CPUID_RESP 0x005
#define GHCB_MSR_CPUID_FUNC_POS 32
@@ -45,6 +51,14 @@
(((unsigned long)reg & GHCB_MSR_CPUID_REG_MASK) << GHCB_MSR_CPUID_REG_POS) | \
(((unsigned long)fn) << GHCB_MSR_CPUID_FUNC_POS))
+/* AP Reset Hold */
+#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
+#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
+
+/* GHCB Hypervisor Feature Request/Response */
+#define GHCB_MSR_HV_FT_REQ 0x080
+#define GHCB_MSR_HV_FT_RESP 0x081
+
#define GHCB_MSR_TERM_REQ 0x100
#define GHCB_MSR_TERM_REASON_SET_POS 12
#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h
index 9c31e0ebc55b..05f3e21f01a7 100644
--- a/arch/x86/include/asm/sgx.h
+++ b/arch/x86/include/asm/sgx.h
@@ -13,7 +13,7 @@
/*
* This file contains both data structures defined by SGX architecture and Linux
* defined software data structures and functions. The two should not be mixed
- * together for better readibility. The architectural definitions come first.
+ * together for better readability. The architectural definitions come first.
*/
/* The SGX specific CPUID function. */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index b6ffe58c70fa..24a8d6c4fb18 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -11,7 +11,7 @@
* The same segment is shared by percpu area and stack canary. On
* x86_64, percpu symbols are zero based and %gs (64-bit) points to the
* base of percpu area. The first occupant of the percpu area is always
- * fixed_percpu_data which contains stack_canary at the approproate
+ * fixed_percpu_data which contains stack_canary at the appropriate
* offset. On x86_32, the stack canary is just a regular percpu
* variable.
*
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 772e60efe243..e322676039f4 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -156,6 +156,12 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u64 avic_physical_id; /* Offset 0xf8 */
u8 reserved_7[8];
u64 vmsa_pa; /* Used for an SEV-ES guest */
+ u8 reserved_8[720];
+ /*
+ * Offset 0x3e0, 32 bytes reserved
+ * for use by hypervisor/software.
+ */
+ u8 reserved_sw[32];
};
@@ -314,7 +320,7 @@ struct ghcb {
#define EXPECTED_VMCB_SAVE_AREA_SIZE 1032
-#define EXPECTED_VMCB_CONTROL_AREA_SIZE 272
+#define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024
#define EXPECTED_GHCB_SIZE PAGE_SIZE
static inline void __unused_size_checks(void)
@@ -326,7 +332,6 @@ static inline void __unused_size_checks(void)
struct vmcb {
struct vmcb_control_area control;
- u8 reserved_control[1024 - sizeof(struct vmcb_control_area)];
struct vmcb_save_area save;
} __packed;
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 7cbf733d11af..f7e2d82d24fb 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -21,13 +21,12 @@ extern const sys_call_ptr_t sys_call_table[];
#if defined(CONFIG_X86_32)
#define ia32_sys_call_table sys_call_table
-#endif
-
-#if defined(CONFIG_IA32_EMULATION)
+#else
+/*
+ * These may not exist, but still put the prototypes in so we
+ * can use IS_ENABLED().
+ */
extern const sys_call_ptr_t ia32_sys_call_table[];
-#endif
-
-#ifdef CONFIG_X86_X32_ABI
extern const sys_call_ptr_t x32_sys_call_table[];
#endif
@@ -160,7 +159,7 @@ static inline int syscall_get_arch(struct task_struct *task)
? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
}
-void do_syscall_64(unsigned long nr, struct pt_regs *regs);
+void do_syscall_64(struct pt_regs *regs, int nr);
void do_int80_syscall_32(struct pt_regs *regs);
long do_fast_syscall_32(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index 80c08c7d5e72..6a2827d0681f 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -17,7 +17,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* __x64_sys_*() - 64-bit native syscall
* __ia32_sys_*() - 32-bit native syscall or common compat syscall
* __ia32_compat_sys_*() - 32-bit compat syscall
- * __x32_compat_sys_*() - 64-bit X32 compat syscall
+ * __x64_compat_sys_*() - 64-bit X32 compat syscall
*
* The registers are decoded according to the ABI:
* 64-bit: RDI, RSI, RDX, R10, R8, R9
@@ -166,17 +166,17 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* with x86_64 obviously do not need such care.
*/
#define __X32_COMPAT_SYS_STUB0(name) \
- __SYS_STUB0(x32, compat_sys_##name)
+ __SYS_STUB0(x64, compat_sys_##name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...) \
- __SYS_STUBx(x32, compat_sys##name, \
+ __SYS_STUBx(x64, compat_sys##name, \
SC_X86_64_REGS_TO_ARGS(x, __VA_ARGS__))
#define __X32_COMPAT_COND_SYSCALL(name) \
- __COND_SYSCALL(x32, compat_sys_##name)
+ __COND_SYSCALL(x64, compat_sys_##name)
#define __X32_COMPAT_SYS_NI(name) \
- __SYS_NI(x32, compat_sys_##name)
+ __SYS_NI(x64, compat_sys_##name)
#else /* CONFIG_X86_X32 */
#define __X32_COMPAT_SYS_STUB0(name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h
index ddbdefd5b94f..91a7b6687c3b 100644
--- a/arch/x86/include/asm/thermal.h
+++ b/arch/x86/include/asm/thermal.h
@@ -3,11 +3,13 @@
#define _ASM_X86_THERMAL_H
#ifdef CONFIG_X86_THERMAL_VECTOR
+void therm_lvt_init(void);
void intel_init_thermal(struct cpuinfo_x86 *c);
bool x86_thermal_enabled(void);
void intel_thermal_interrupt(void);
#else
-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
+static inline void therm_lvt_init(void) { }
+static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
#endif
#endif /* _ASM_X86_THERMAL_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index c1c3d31b15c0..80e9d5206a71 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -13,7 +13,7 @@
# define __ARCH_WANT_SYS_OLD_MMAP
# define __ARCH_WANT_SYS_OLD_SELECT
-# define __NR_ia32_syscall_max __NR_syscall_max
+# define IA32_NR_syscalls (__NR_syscalls)
# else
@@ -26,12 +26,12 @@
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+# define X32_NR_syscalls (__NR_x32_syscalls)
+# define IA32_NR_syscalls (__NR_ia32_syscalls)
# endif
-# define NR_syscalls (__NR_syscall_max + 1)
-# define X32_NR_syscalls (__NR_x32_syscall_max + 1)
-# define IA32_NR_syscalls (__NR_ia32_syscall_max + 1)
+# define NR_syscalls (__NR_syscalls)
# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
index 5fdfcb47000f..054604aba9f0 100644
--- a/arch/x86/include/uapi/asm/hwcap2.h
+++ b/arch/x86/include/uapi/asm/hwcap2.h
@@ -2,10 +2,12 @@
#ifndef _ASM_X86_HWCAP2_H
#define _ASM_X86_HWCAP2_H
+#include <linux/const.h>
+
/* MONITOR/MWAIT enabled in Ring 3 */
-#define HWCAP2_RING3MWAIT (1 << 0)
+#define HWCAP2_RING3MWAIT _BITUL(0)
/* Kernel allows FSGSBASE instructions available in Ring 3 */
-#define HWCAP2_FSGSBASE BIT(1)
+#define HWCAP2_FSGSBASE _BITUL(1)
#endif
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 0662f644aad9..a6c327f8ad9e 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -159,6 +159,19 @@ struct kvm_sregs {
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
};
+struct kvm_sregs2 {
+ /* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 flags;
+ __u64 pdptrs[4];
+};
+#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
+
/* for KVM_GET_FPU and KVM_SET_FPU */
struct kvm_fpu {
__u8 fpr[8][16];
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 950afebfba88..5146bbab84d4 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -33,6 +33,8 @@
#define KVM_FEATURE_PV_SCHED_YIELD 13
#define KVM_FEATURE_ASYNC_PF_INT 14
#define KVM_FEATURE_MSI_EXT_DEST_ID 15
+#define KVM_FEATURE_HC_MAP_GPA_RANGE 16
+#define KVM_FEATURE_MIGRATION_CONTROL 17
#define KVM_HINTS_REALTIME 0
@@ -54,6 +56,7 @@
#define MSR_KVM_POLL_CONTROL 0x4b564d05
#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
+#define MSR_KVM_MIGRATION_CONTROL 0x4b564d08
struct kvm_steal_time {
__u64 steal;
@@ -90,6 +93,16 @@ struct kvm_clock_pairing {
/* MSR_KVM_ASYNC_PF_INT */
#define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0)
+/* MSR_KVM_MIGRATION_CONTROL */
+#define KVM_MIGRATION_READY (1 << 0)
+
+/* KVM_HC_MAP_GPA_RANGE */
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_4K 0
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_2M (1 << 0)
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_1G (1 << 1)
+#define KVM_MAP_GPA_RANGE_ENC_STAT(n) (n << 4)
+#define KVM_MAP_GPA_RANGE_ENCRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(1)
+#define KVM_MAP_GPA_RANGE_DECRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(0)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 554f75fe013c..efa969325ede 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -110,6 +110,9 @@
#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+/* Exit code reserved for hypervisor/software use */
+#define SVM_EXIT_SW 0xf0000000
+
#define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e90310cbe73a..e55e0c1fad8c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -5,6 +5,7 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
#include <linux/acpi.h>
@@ -42,8 +43,6 @@ EXPORT_SYMBOL(acpi_disabled);
# include <asm/proto.h>
#endif /* X86 */
-#define PREFIX "ACPI: "
-
int acpi_noirq; /* skip ACPI IRQ initialization */
static int acpi_nobgrt; /* skip ACPI BGRT */
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
@@ -130,15 +129,14 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
madt = (struct acpi_table_madt *)table;
if (!madt) {
- printk(KERN_WARNING PREFIX "Unable to map MADT\n");
+ pr_warn("Unable to map MADT\n");
return -ENODEV;
}
if (madt->address) {
acpi_lapic_addr = (u64) madt->address;
- printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
- madt->address);
+ pr_debug("Local APIC address 0x%08x\n", madt->address);
}
default_acpi_madt_oem_check(madt->header.oem_id,
@@ -161,7 +159,7 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
int cpu;
if (id >= MAX_LOCAL_APIC) {
- printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
+ pr_info("skipped apicid that is too big\n");
return -EINVAL;
}
@@ -213,13 +211,13 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
*/
if (!apic->apic_id_valid(apic_id)) {
if (enabled)
- pr_warn(PREFIX "x2apic entry ignored\n");
+ pr_warn("x2apic entry ignored\n");
return 0;
}
acpi_register_lapic(apic_id, processor->uid, enabled);
#else
- printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+ pr_warn("x2apic entry ignored\n");
#endif
return 0;
@@ -306,7 +304,7 @@ acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
acpi_table_print_madt_entry(&header->common);
if (x2apic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@@ -324,7 +322,7 @@ acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long e
acpi_table_print_madt_entry(&header->common);
if (lapic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@@ -514,14 +512,14 @@ acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
if (intsrc->source_irq == 0) {
if (acpi_skip_timer_override) {
- printk(PREFIX "BIOS IRQ0 override ignored.\n");
+ pr_warn("BIOS IRQ0 override ignored.\n");
return 0;
}
if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
&& (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
- printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ pr_warn("BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
}
@@ -597,7 +595,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
if (old == new)
return;
- printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
+ pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
outb(new, 0x4d0);
outb(new >> 8, 0x4d1);
}
@@ -754,7 +752,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED);
if (cpu < 0) {
- pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+ pr_info("Unable to map lapic to logical cpu number\n");
return cpu;
}
@@ -870,8 +868,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
- printk(KERN_WARNING PREFIX "HPET timers must be located in "
- "memory.\n");
+ pr_warn("HPET timers must be located in memory.\n");
return -1;
}
@@ -883,9 +880,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
* want to allocate a resource there.
*/
if (!hpet_address) {
- printk(KERN_WARNING PREFIX
- "HPET id: %#x base: %#lx is invalid\n",
- hpet_tbl->id, hpet_address);
+ pr_warn("HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address);
return 0;
}
#ifdef CONFIG_X86_64
@@ -896,21 +891,17 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
*/
if (hpet_address == 0xfed0000000000000UL) {
if (!hpet_force_user) {
- printk(KERN_WARNING PREFIX "HPET id: %#x "
- "base: 0xfed0000000000000 is bogus\n "
- "try hpet=force on the kernel command line to "
- "fix it up to 0xfed00000.\n", hpet_tbl->id);
+ pr_warn("HPET id: %#x base: 0xfed0000000000000 is bogus, try hpet=force on the kernel command line to fix it up to 0xfed00000.\n",
+ hpet_tbl->id);
hpet_address = 0;
return 0;
}
- printk(KERN_WARNING PREFIX
- "HPET id: %#x base: 0xfed0000000000000 fixed up "
- "to 0xfed00000.\n", hpet_tbl->id);
+ pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n",
+ hpet_tbl->id);
hpet_address >>= 32;
}
#endif
- printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
- hpet_tbl->id, hpet_address);
+ pr_info("HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address);
/*
* Allocate and initialize the HPET firmware resource for adding into
@@ -955,24 +946,24 @@ late_initcall(hpet_insert_resource);
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
- pr_debug("ACPI: no legacy devices present\n");
+ pr_debug("no legacy devices present\n");
x86_platform.legacy.devices.pnpbios = 0;
}
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
!(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
- pr_debug("ACPI: i8042 controller is absent\n");
+ pr_debug("i8042 controller is absent\n");
x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
- pr_debug("ACPI: not registering RTC platform device\n");
+ pr_debug("not registering RTC platform device\n");
x86_platform.legacy.rtc = 0;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
- pr_debug("ACPI: probing for VGA not safe\n");
+ pr_debug("probing for VGA not safe\n");
x86_platform.legacy.no_vga = 1;
}
@@ -997,8 +988,7 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
}
if (pmtmr_ioport)
- printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
- pmtmr_ioport);
+ pr_info("PM-Timer IO Port: %#x\n", pmtmr_ioport);
#endif
return 0;
}
@@ -1024,8 +1014,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
+ pr_err("Error parsing LAPIC address override entry\n");
return count;
}
@@ -1057,8 +1046,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
sizeof(struct acpi_table_madt),
madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
if (ret < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC/X2APIC entries\n");
+ pr_err("Error parsing LAPIC/X2APIC entries\n");
return ret;
}
@@ -1066,11 +1054,11 @@ static int __init acpi_parse_madt_lapic_entries(void)
x2count = madt_proc[1].count;
}
if (!count && !x2count) {
- printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+ pr_err("No LAPIC entries present\n");
/* TBD: Cleanup to allow fallback to MPS */
return -ENODEV;
} else if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
+ pr_err("Error parsing LAPIC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1080,7 +1068,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
acpi_parse_lapic_nmi, 0);
if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+ pr_err("Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1139,7 +1127,7 @@ static void __init mp_config_acpi_legacy_irqs(void)
}
if (idx != mp_irq_entries) {
- printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
+ pr_debug("ACPI: IRQ%d used by override.\n", i);
continue; /* IRQ already used */
}
@@ -1179,26 +1167,24 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* if "noapic" boot option, don't look for IO-APICs
*/
if (skip_ioapic_setup) {
- printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
- "due to 'noapic' option.\n");
+ pr_info("Skipping IOAPIC probe due to 'noapic' option.\n");
return -ENODEV;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
MAX_IO_APICS);
if (!count) {
- printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+ pr_err("No IOAPIC entries present\n");
return -ENODEV;
} else if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
+ pr_err("Error parsing IOAPIC entry\n");
return count;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
acpi_parse_int_src_ovr, nr_irqs);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing interrupt source overrides entry\n");
+ pr_err("Error parsing interrupt source overrides entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1218,7 +1204,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
acpi_parse_nmi_src, nr_irqs);
if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+ pr_err("Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1251,8 +1237,7 @@ static void __init early_acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
+ pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
}
@@ -1289,8 +1274,7 @@ static void __init acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
+ pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
} else {
@@ -1300,8 +1284,7 @@ static void __init acpi_process_madt(void)
* Boot with "acpi=off" to use MPS on such a system.
*/
if (smp_found_config) {
- printk(KERN_WARNING PREFIX
- "No APIC-table, disabling MPS\n");
+ pr_warn("No APIC-table, disabling MPS\n");
smp_found_config = 0;
}
}
@@ -1311,11 +1294,9 @@ static void __init acpi_process_madt(void)
* processors, where MPS only supports physical.
*/
if (acpi_lapic && acpi_ioapic)
- printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
- "information\n");
+ pr_info("Using ACPI (MADT) for SMP configuration information\n");
else if (acpi_lapic)
- printk(KERN_INFO "Using ACPI for processor (LAPIC) "
- "configuration information\n");
+ pr_info("Using ACPI for processor (LAPIC) configuration information\n");
#endif
return;
}
@@ -1323,8 +1304,7 @@ static void __init acpi_process_madt(void)
static int __init disable_acpi_irq(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
- d->ident);
+ pr_notice("%s detected: force use of acpi=noirq\n", d->ident);
acpi_noirq_set();
}
return 0;
@@ -1333,8 +1313,7 @@ static int __init disable_acpi_irq(const struct dmi_system_id *d)
static int __init disable_acpi_pci(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
- d->ident);
+ pr_notice("%s detected: force use of pci=noacpi\n", d->ident);
acpi_disable_pci();
}
return 0;
@@ -1343,11 +1322,10 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d)
static int __init dmi_disable_acpi(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
+ pr_notice("%s detected: acpi off\n", d->ident);
disable_acpi();
} else {
- printk(KERN_NOTICE
- "Warning: DMI blacklist says broken, but acpi forced\n");
+ pr_notice("Warning: DMI blacklist says broken, but acpi forced\n");
}
return 0;
}
@@ -1574,9 +1552,9 @@ int __init early_acpi_boot_init(void)
*/
if (acpi_blacklisted()) {
if (acpi_force) {
- printk(KERN_WARNING PREFIX "acpi=force override\n");
+ pr_warn("acpi=force override\n");
} else {
- printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
+ pr_warn("Disabling ACPI support\n");
disable_acpi();
return 1;
}
@@ -1692,9 +1670,7 @@ int __init acpi_mps_check(void)
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
if (acpi_disabled || acpi_noirq) {
- printk(KERN_WARNING "MPS support code is not built-in.\n"
- "Using acpi=off or acpi=noirq or pci=noacpi "
- "may have problem\n");
+ pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
}
#endif
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 49ae4e1ac9cd..7de599eba7f0 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -197,7 +197,8 @@ static int __init ffh_cstate_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_INTEL &&
- c->x86_vendor != X86_VENDOR_AMD)
+ c->x86_vendor != X86_VENDOR_AMD &&
+ c->x86_vendor != X86_VENDOR_HYGON)
return -1;
cpu_cstate_entry = alloc_percpu(struct cstate_entry);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 6974b5174495..e9da3dc71254 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -75,7 +75,7 @@ do { \
} \
} while (0)
-const unsigned char x86nops[] =
+static const unsigned char x86nops[] =
{
BYTES_NOP1,
BYTES_NOP2,
@@ -183,41 +183,69 @@ done:
}
/*
+ * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
+ *
+ * @instr: instruction byte stream
+ * @instrlen: length of the above
+ * @off: offset within @instr where the first NOP has been detected
+ *
+ * Return: number of NOPs found (and replaced).
+ */
+static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
+{
+ unsigned long flags;
+ int i = off, nnops;
+
+ while (i < instrlen) {
+ if (instr[i] != 0x90)
+ break;
+
+ i++;
+ }
+
+ nnops = i - off;
+
+ if (nnops <= 1)
+ return nnops;
+
+ local_irq_save(flags);
+ add_nops(instr + off, nnops);
+ local_irq_restore(flags);
+
+ DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
+
+ return nnops;
+}
+
+/*
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
{
- unsigned long flags;
struct insn insn;
- int nop, i = 0;
+ int i = 0;
/*
- * Jump over the non-NOP insns, the remaining bytes must be single-byte
- * NOPs, optimize them.
+ * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
+ * ones.
*/
for (;;) {
if (insn_decode_kernel(&insn, &instr[i]))
return;
+ /*
+ * See if this and any potentially following NOPs can be
+ * optimized.
+ */
if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
- break;
-
- if ((i += insn.length) >= a->instrlen)
- return;
- }
+ i += optimize_nops_range(instr, a->instrlen, i);
+ else
+ i += insn.length;
- for (nop = i; i < a->instrlen; i++) {
- if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i]))
+ if (i >= a->instrlen)
return;
}
-
- local_irq_save(flags);
- add_nops(instr + nop, i - nop);
- local_irq_restore(flags);
-
- DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
- instr, nop, a->instrlen);
}
/*
@@ -273,8 +301,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
instr, instr, a->instrlen,
replacement, a->replacementlen);
- DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
- DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
+ DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
+ DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
memcpy(insn_buff, replacement, a->replacementlen);
insn_buff_sz = a->replacementlen;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 09083094eb57..23dda362dc0f 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -25,6 +25,7 @@
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@@ -57,6 +58,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{}
};
@@ -72,6 +74,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4a39fb429f15..d262811ce14b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode)
end_local_APIC_setup();
irq_remap_enable_fault_handling();
setup_IO_APIC();
+ lapic_update_legacy_vectors();
}
#ifdef CONFIG_UP_LATE_INIT
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 6dbdc7c22bb7..fb67ed5e7e6a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -738,6 +738,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
}
+void __init lapic_update_legacy_vectors(void)
+{
+ unsigned int i;
+
+ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
+ return;
+
+ /*
+ * If the IO/APIC is disabled via config, kernel command line or
+ * lack of enumeration then all legacy interrupts are routed
+ * through the PIC. Make sure that they are marked as legacy
+ * vectors. PIC_CASCADE_IRQ has already been marked in
+ * lapic_assign_system_vectors().
+ */
+ for (i = 0; i < nr_legacy_irqs(); i++) {
+ if (i != PIC_CASCADE_IR)
+ lapic_assign_legacy_vector(i, true);
+ }
+}
+
void __init lapic_assign_system_vectors(void)
{
unsigned int i, vector = 0;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c06ac56eae4d..b7c003013d41 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -646,6 +646,10 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & BIT(12))
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+ /* Bit 14 indicates the Runtime Average Power Limit interface. */
+ if (c->x86_power & BIT(14))
+ set_cpu_cap(c, X86_FEATURE_RAPL);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#else
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a1b756c49a93..a99d00393206 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1773,10 +1773,16 @@ void syscall_init(void)
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
#endif
- /* Flags to clear on syscall */
+ /*
+ * Flags to clear on syscall; clear as much as possible
+ * to minimize user space-kernel interference.
+ */
wrmsrl(MSR_SYSCALL_MASK,
- X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
- X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
+ X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
+ X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
+ X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
+ X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
+ X86_EFLAGS_AC|X86_EFLAGS_ID);
}
#else /* CONFIG_X86_64 */
@@ -1938,13 +1944,12 @@ void cpu_init_exception_handling(void)
/*
* cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
+ * initialized (naturally) in the bootstrap process, such as the GDT. We
+ * reload it nevertheless, this function acts as a 'CPU state barrier',
+ * nothing should get across.
*/
void cpu_init(void)
{
- struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
struct task_struct *cur = current;
int cpu = raw_smp_processor_id();
@@ -1957,8 +1962,6 @@ void cpu_init(void)
early_cpu_to_node(cpu) != NUMA_NO_NODE)
set_numa_node(early_cpu_to_node(cpu));
#endif
- setup_getcpu(cpu);
-
pr_debug("Initializing CPU#%d\n", cpu);
if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
@@ -1970,7 +1973,6 @@ void cpu_init(void)
* and set up the GDT descriptor:
*/
switch_to_new_gdt(cpu);
- load_current_idt();
if (IS_ENABLED(CONFIG_X86_64)) {
loadsegment(fs, 0);
@@ -1990,12 +1992,6 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, cur);
- /* Initialize the TSS. */
- tss_setup_ist(tss);
- tss_setup_io_bitmap(tss);
- set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
-
- load_TR_desc();
/*
* sp0 points to the entry trampoline stack regardless of what task
* is running.
@@ -2017,6 +2013,18 @@ void cpu_init(void)
load_fixmap_gdt(cpu);
}
+#ifdef CONFIG_SMP
+void cpu_init_secondary(void)
+{
+ /*
+ * Relies on the BP having set-up the IDT tables, which are loaded
+ * on this CPU in cpu_init_exception_handling().
+ */
+ cpu_init_exception_handling();
+ cpu_init();
+}
+#endif
+
/*
* The microcode loader calls this upon late microcode load to recheck features,
* only when microcode has been updated. Caller holds microcode_mutex and CPU
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 67944128876d..95521302630d 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -48,6 +48,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
enum tsx_ctrl_states {
TSX_CTRL_ENABLE,
TSX_CTRL_DISABLE,
+ TSX_CTRL_RTM_ALWAYS_ABORT,
TSX_CTRL_NOT_SUPPORTED,
};
@@ -56,6 +57,7 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
extern void tsx_enable(void);
extern void tsx_disable(void);
+extern void tsx_clear_cpuid(void);
#else
static inline void tsx_init(void) { }
#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index 0bd6c74e3ba1..6d50136f7ab9 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -260,6 +260,10 @@ static void early_init_hygon(struct cpuinfo_x86 *c)
if (c->x86_power & BIT(12))
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+ /* Bit 14 indicates the Runtime Average Power Limit interface. */
+ if (c->x86_power & BIT(14))
+ set_cpu_cap(c, X86_FEATURE_RAPL);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#endif
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8adffc17fa8b..8321c43554a1 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -10,6 +10,7 @@
#include <linux/thread_info.h>
#include <linux/init.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
@@ -41,6 +42,7 @@ enum split_lock_detect_state {
sld_off = 0,
sld_warn,
sld_fatal,
+ sld_ratelimit,
};
/*
@@ -717,8 +719,10 @@ static void init_intel(struct cpuinfo_x86 *c)
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
tsx_enable();
- if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+ else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
tsx_disable();
+ else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+ tsx_clear_cpuid();
split_lock_init();
bus_lock_init();
@@ -997,13 +1001,30 @@ static const struct {
{ "off", sld_off },
{ "warn", sld_warn },
{ "fatal", sld_fatal },
+ { "ratelimit:", sld_ratelimit },
};
+static struct ratelimit_state bld_ratelimit;
+
static inline bool match_option(const char *arg, int arglen, const char *opt)
{
- int len = strlen(opt);
+ int len = strlen(opt), ratelimit;
+
+ if (strncmp(arg, opt, len))
+ return false;
+
+ /*
+ * Min ratelimit is 1 bus lock/sec.
+ * Max ratelimit is 1000 bus locks/sec.
+ */
+ if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
+ ratelimit > 0 && ratelimit <= 1000) {
+ ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
+ ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
+ return true;
+ }
- return len == arglen && !strncmp(arg, opt, len);
+ return len == arglen;
}
static bool split_lock_verify_msr(bool on)
@@ -1082,6 +1103,15 @@ static void sld_update_msr(bool on)
static void split_lock_init(void)
{
+ /*
+ * #DB for bus lock handles ratelimit and #AC for split lock is
+ * disabled.
+ */
+ if (sld_state == sld_ratelimit) {
+ split_lock_verify_msr(false);
+ return;
+ }
+
if (cpu_model_supports_sld)
split_lock_verify_msr(sld_state != sld_off);
}
@@ -1154,6 +1184,12 @@ void handle_bus_lock(struct pt_regs *regs)
switch (sld_state) {
case sld_off:
break;
+ case sld_ratelimit:
+ /* Enforce no more than bld_ratelimit bus locks/sec. */
+ while (!__ratelimit(&bld_ratelimit))
+ msleep(20);
+ /* Warn on the bus lock. */
+ fallthrough;
case sld_warn:
pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
current->comm, current->pid, regs->ip);
@@ -1259,6 +1295,10 @@ static void sld_state_show(void)
" from non-WB" : "");
}
break;
+ case sld_ratelimit:
+ if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+ pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
+ break;
}
}
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index e486f96b3cb3..08831acc1d03 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -77,27 +77,29 @@ struct smca_bank_name {
};
static struct smca_bank_name smca_names[] = {
- [SMCA_LS] = { "load_store", "Load Store Unit" },
- [SMCA_LS_V2] = { "load_store", "Load Store Unit" },
- [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
- [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
- [SMCA_DE] = { "decode_unit", "Decode Unit" },
- [SMCA_RESERVED] = { "reserved", "Reserved" },
- [SMCA_EX] = { "execution_unit", "Execution Unit" },
- [SMCA_FP] = { "floating_point", "Floating Point Unit" },
- [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
- [SMCA_CS] = { "coherent_slave", "Coherent Slave" },
- [SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
- [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
- [SMCA_UMC] = { "umc", "Unified Memory Controller" },
- [SMCA_PB] = { "param_block", "Parameter Block" },
- [SMCA_PSP] = { "psp", "Platform Security Processor" },
- [SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
- [SMCA_SMU] = { "smu", "System Management Unit" },
- [SMCA_SMU_V2] = { "smu", "System Management Unit" },
- [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
- [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
- [SMCA_PCIE] = { "pcie", "PCI Express Unit" },
+ [SMCA_LS ... SMCA_LS_V2] = { "load_store", "Load Store Unit" },
+ [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
+ [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
+ [SMCA_DE] = { "decode_unit", "Decode Unit" },
+ [SMCA_RESERVED] = { "reserved", "Reserved" },
+ [SMCA_EX] = { "execution_unit", "Execution Unit" },
+ [SMCA_FP] = { "floating_point", "Floating Point Unit" },
+ [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
+ [SMCA_CS ... SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
+ [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
+
+ /* UMC v2 is separate because both of them can exist in a single system. */
+ [SMCA_UMC] = { "umc", "Unified Memory Controller" },
+ [SMCA_UMC_V2] = { "umc_v2", "Unified Memory Controller v2" },
+ [SMCA_PB] = { "param_block", "Parameter Block" },
+ [SMCA_PSP ... SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
+ [SMCA_SMU ... SMCA_SMU_V2] = { "smu", "System Management Unit" },
+ [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
+ [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
+ [SMCA_PCIE ... SMCA_PCIE_V2] = { "pcie", "PCI Express Unit" },
+ [SMCA_XGMI_PCS] = { "xgmi_pcs", "Ext Global Memory Interconnect PCS Unit" },
+ [SMCA_XGMI_PHY] = { "xgmi_phy", "Ext Global Memory Interconnect PHY Unit" },
+ [SMCA_WAFL_PHY] = { "wafl_phy", "WAFL PHY Unit" },
};
static const char *smca_get_name(enum smca_bank_types t)
@@ -155,6 +157,7 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* Unified Memory Controller MCA type */
{ SMCA_UMC, HWID_MCATYPE(0x96, 0x0) },
+ { SMCA_UMC_V2, HWID_MCATYPE(0x96, 0x1) },
/* Parameter Block MCA type */
{ SMCA_PB, HWID_MCATYPE(0x05, 0x0) },
@@ -175,6 +178,16 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* PCI Express Unit MCA type */
{ SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) },
+ { SMCA_PCIE_V2, HWID_MCATYPE(0x46, 0x1) },
+
+ /* xGMI PCS MCA type */
+ { SMCA_XGMI_PCS, HWID_MCATYPE(0x50, 0x0) },
+
+ /* xGMI PHY MCA type */
+ { SMCA_XGMI_PHY, HWID_MCATYPE(0x259, 0x0) },
+
+ /* WAFL PHY MCA type */
+ { SMCA_WAFL_PHY, HWID_MCATYPE(0x267, 0x0) },
};
struct smca_bank smca_banks[MAX_NR_BANKS];
diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c
index b58b85380ddb..0e3ae64d3b76 100644
--- a/arch/x86/kernel/cpu/mce/apei.c
+++ b/arch/x86/kernel/cpu/mce/apei.c
@@ -36,7 +36,8 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
mce_setup(&m);
m.bank = -1;
/* Fake a memory read error with unknown channel */
- m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
+ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f;
+ m.misc = (MCI_MISC_ADDR_PHYS << 6) | PAGE_SHIFT;
if (severity >= GHES_SEV_RECOVERABLE)
m.status |= MCI_STATUS_UC;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index bf7fe87a7e88..22791aadc085 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1257,19 +1257,28 @@ static void kill_me_maybe(struct callback_head *cb)
{
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
int flags = MF_ACTION_REQUIRED;
+ int ret;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
flags |= MF_MUST_KILL;
- if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) &&
- !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
+ ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
+ if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
sync_core();
return;
}
+ /*
+ * -EHWPOISON from memory_failure() means that it already sent SIGBUS
+ * to the current process with the proper error info, so no need to
+ * send SIGBUS here again.
+ */
+ if (ret == -EHWPOISON)
+ return;
+
if (p->mce_vaddr != (void __user *)-1l) {
force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
} else {
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 22f13343b5da..01ca94f42e4e 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -236,7 +236,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
for_each_present_cpu(i) {
if (i == 0)
continue;
- ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
+ ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
BUG_ON(ret);
}
@@ -252,6 +252,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
static void __init ms_hyperv_init_platform(void)
{
+ int hv_max_functions_eax;
int hv_host_info_eax;
int hv_host_info_ebx;
int hv_host_info_ecx;
@@ -269,6 +270,8 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
+ hv_max_functions_eax = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
+
pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n",
ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
ms_hyperv.misc_features);
@@ -298,8 +301,7 @@ static void __init ms_hyperv_init_platform(void)
/*
* Extract host information.
*/
- if (cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS) >=
- HYPERV_CPUID_VERSION) {
+ if (hv_max_functions_eax >= HYPERV_CPUID_VERSION) {
hv_host_info_eax = cpuid_eax(HYPERV_CPUID_VERSION);
hv_host_info_ebx = cpuid_ebx(HYPERV_CPUID_VERSION);
hv_host_info_ecx = cpuid_ecx(HYPERV_CPUID_VERSION);
@@ -325,9 +327,11 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
}
- if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) {
+ if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
ms_hyperv.nested_features =
cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
+ pr_info("Hyper-V: Nested features: 0x%x\n",
+ ms_hyperv.nested_features);
}
/*
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 3ef5868ac588..7aecb2fc3186 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BPU_PERFCTR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_PERFCTR0;
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BSU_ESCR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index c4d320d02fd5..6a5f60a37219 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -70,6 +70,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* struct mon_evt - Entry in the event list of a resource
* @evtid: event id
* @name: name of the event
+ * @list: entry in &rdt_resource->evt_list
*/
struct mon_evt {
u32 evtid;
@@ -78,10 +79,13 @@ struct mon_evt {
};
/**
- * struct mon_data_bits - Monitoring details for each event file
- * @rid: Resource id associated with the event file.
+ * union mon_data_bits - Monitoring details for each event file
+ * @priv: Used to store monitoring event data in @u
+ * as kernfs private data
+ * @rid: Resource id associated with the event file
* @evtid: Event id associated with the event file
* @domid: The domain to which the event file belongs
+ * @u: Name of the bit fields struct
*/
union mon_data_bits {
void *priv;
@@ -119,6 +123,7 @@ enum rdt_group_type {
* @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
* @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
* allowed AND the allocations are Cache Pseudo-Locked
+ * @RDT_NUM_MODES: Total number of modes
*
* The mode of a resource group enables control over the allowed overlap
* between allocations associated with different resource groups (classes
@@ -142,7 +147,7 @@ enum rdtgrp_mode {
/**
* struct mongroup - store mon group's data in resctrl fs.
- * @mon_data_kn kernlfs node for the mon_data directory
+ * @mon_data_kn: kernfs node for the mon_data directory
* @parent: parent rdtgrp
* @crdtgrp_list: child rdtgroup node list
* @rmid: rmid for this rdtgroup
@@ -282,11 +287,11 @@ struct rftype {
/**
* struct mbm_state - status for each MBM counter in each domain
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
- * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it
+ * @prev_msr: Value of IA32_QM_CTR for this RMID last time we read it
* @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
- * @prev_bw The most recent bandwidth in MBps
- * @delta_bw Difference between the current and previous bandwidth
- * @delta_comp Indicates whether to compute the delta_bw
+ * @prev_bw: The most recent bandwidth in MBps
+ * @delta_bw: Difference between the current and previous bandwidth
+ * @delta_comp: Indicates whether to compute the delta_bw
*/
struct mbm_state {
u64 chunks;
@@ -456,11 +461,13 @@ struct rdt_parse_data {
* @data_width: Character width of data when displaying
* @domains: All domains for this resource
* @cache: Cache allocation related data
+ * @membw: If the component has bandwidth controls, their properties.
* @format_str: Per resource format string to show domain value
* @parse_ctrlval: Per resource function pointer to parse control values
* @evt_list: List of monitoring events
* @num_rmid: Number of RMIDs available
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
+ * @mbm_width: Monitor width, to detect and correct for overflow.
* @fflags: flags to choose base and info files
*/
struct rdt_resource {
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 05a89e33fde2..2207916cae65 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -49,6 +49,7 @@ static struct class *pseudo_lock_class;
/**
* get_prefetch_disable_bits - prefetch disable bits of supported platforms
+ * @void: It takes no parameters.
*
* Capture the list of platforms that have been validated to support
* pseudo-locking. This includes testing to ensure pseudo-locked regions
@@ -162,7 +163,7 @@ static struct rdtgroup *region_find_by_minor(unsigned int minor)
}
/**
- * pseudo_lock_pm_req - A power management QoS request list entry
+ * struct pseudo_lock_pm_req - A power management QoS request list entry
* @list: Entry within the @pm_reqs list for a pseudo-locked region
* @req: PM QoS request
*/
@@ -184,6 +185,7 @@ static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
/**
* pseudo_lock_cstates_constrain - Restrict cores from entering C6
+ * @plr: Pseudo-locked region
*
* To prevent the cache from being affected by power management entering
* C6 has to be avoided. This is accomplished by requesting a latency
@@ -196,6 +198,8 @@ static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
* the ACPI latencies need to be considered while keeping in mind that C2
* may be set to map to deeper sleep states. In this case the latency
* requirement needs to prevent entering C2 also.
+ *
+ * Return: 0 on success, <0 on failure
*/
static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
{
@@ -520,7 +524,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
/**
* rdtgroup_monitor_in_progress - Test if monitoring in progress
- * @r: resource group being queried
+ * @rdtgrp: resource group being queried
*
* Return: 1 if monitor groups have been created for this resource
* group, 0 otherwise.
@@ -1140,6 +1144,8 @@ out:
/**
* pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
+ * @rdtgrp: Resource group to which the pseudo-locked region belongs.
+ * @sel: Selector of which measurement to perform on a pseudo-locked region.
*
* The measurement of latency to access a pseudo-locked region should be
* done from a cpu that is associated with that pseudo-locked region.
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 3be203297988..001808e3901c 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -383,7 +383,7 @@ const struct vm_operations_struct sgx_vm_ops = {
/**
* sgx_encl_release - Destroy an enclave instance
- * @kref: address of a kref inside &sgx_encl
+ * @ref: address of a kref inside &sgx_encl
*
* Used together with kref_put(). Frees all the resources associated with the
* enclave and the instance itself.
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index 6e74f85b6264..fec43ca65065 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -91,8 +91,8 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
{
struct vm_area_struct *result;
- result = find_vma(mm, addr);
- if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
+ result = vma_lookup(mm, addr);
+ if (!result || result->vm_ops != &sgx_vm_ops)
return -EINVAL;
*vma = result;
diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c
index 6ad165a5c0cc..64511c4a5200 100644
--- a/arch/x86/kernel/cpu/sgx/virt.c
+++ b/arch/x86/kernel/cpu/sgx/virt.c
@@ -212,6 +212,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
list_splice_tail(&secs_pages, &zombie_secs_pages);
mutex_unlock(&zombie_secs_pages_lock);
+ xa_destroy(&vepc->page_array);
kfree(vepc);
return 0;
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
index e2ad30e474f8..9c7a5f049292 100644
--- a/arch/x86/kernel/cpu/tsx.c
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -2,7 +2,7 @@
/*
* Intel Transactional Synchronization Extensions (TSX) control.
*
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019-2021 Intel Corporation
*
* Author:
* Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
@@ -84,13 +84,46 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
return TSX_CTRL_ENABLE;
}
+void tsx_clear_cpuid(void)
+{
+ u64 msr;
+
+ /*
+ * MSR_TFA_TSX_CPUID_CLEAR bit is only present when both CPUID
+ * bits RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are present.
+ */
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+ boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ rdmsrl(MSR_TSX_FORCE_ABORT, msr);
+ msr |= MSR_TFA_TSX_CPUID_CLEAR;
+ wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+ }
+}
+
void __init tsx_init(void)
{
char arg[5] = {};
int ret;
- if (!tsx_ctrl_is_supported())
+ /*
+ * Hardware will always abort a TSX transaction if both CPUID bits
+ * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
+ * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
+ * here.
+ */
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+ boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
+ tsx_clear_cpuid();
+ setup_clear_cpu_cap(X86_FEATURE_RTM);
+ setup_clear_cpu_cap(X86_FEATURE_HLE);
return;
+ }
+
+ if (!tsx_ctrl_is_supported()) {
+ tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
+ return;
+ }
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
if (ret >= 0) {
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 54ce999ed321..e8326a8d1c5d 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -70,19 +70,6 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void)
rcu_read_unlock();
}
-/*
- * When the crashkernel option is specified, only use the low
- * 1M for the real mode trampoline.
- */
-void __init crash_reserve_low_1M(void)
-{
- if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
- return;
-
- memblock_reserve(0, 1<<20);
- pr_info("Reserving the low 1M of memory for crashkernel\n");
-}
-
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index a4ec65317a7f..b7b92cdf3add 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -221,28 +221,18 @@ sanitize_restored_user_xstate(union fpregs_state *state,
if (use_xsave()) {
/*
- * Note: we don't need to zero the reserved bits in the
- * xstate_header here because we either didn't copy them at all,
- * or we checked earlier that they aren't set.
+ * Clear all feature bits which are not set in
+ * user_xfeatures and clear all extended features
+ * for fx_only mode.
*/
+ u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
/*
- * 'user_xfeatures' might have bits clear which are
- * set in header->xfeatures. This represents features that
- * were in init state prior to a signal delivery, and need
- * to be reset back to the init state. Clear any user
- * feature bits which are set in the kernel buffer to get
- * them back to the init state.
- *
- * Supervisor state is unchanged by input from userspace.
- * Ensure supervisor state bits stay set and supervisor
- * state is not modified.
+ * Supervisor state has to be preserved. The sigframe
+ * restore can only modify user features, i.e. @mask
+ * cannot contain them.
*/
- if (fx_only)
- header->xfeatures = XFEATURE_MASK_FPSSE;
- else
- header->xfeatures &= user_xfeatures |
- xfeatures_mask_supervisor();
+ header->xfeatures &= mask | xfeatures_mask_supervisor();
}
if (use_fxsr()) {
@@ -307,13 +297,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
return 0;
}
- if (!access_ok(buf, size))
- return -EACCES;
+ if (!access_ok(buf, size)) {
+ ret = -EACCES;
+ goto out;
+ }
- if (!static_cpu_has(X86_FEATURE_FPU))
- return fpregs_soft_set(current, NULL,
- 0, sizeof(struct user_i387_ia32_struct),
- NULL, buf) != 0;
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
+ ret = fpregs_soft_set(current, NULL, 0,
+ sizeof(struct user_i387_ia32_struct),
+ NULL, buf);
+ goto out;
+ }
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
@@ -369,6 +363,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_unlock();
return 0;
}
+
+ /*
+ * The above did an FPU restore operation, restricted to
+ * the user portion of the registers, and failed, but the
+ * microcode might have modified the FPU registers
+ * nevertheless.
+ *
+ * If the FPU registers do not belong to current, then
+ * invalidate the FPU register state otherwise the task might
+ * preempt current and return to user space with corrupted
+ * FPU registers.
+ *
+ * In case current owns the FPU registers then no further
+ * action is required. The fixup below will handle it
+ * correctly.
+ */
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ __cpu_invalidate_fpregs_state();
+
fpregs_unlock();
} else {
/*
@@ -377,7 +390,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
*/
ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
- goto err_out;
+ goto out;
envp = &env;
}
@@ -405,16 +418,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (use_xsave() && !fx_only) {
u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
- if (using_compacted_format()) {
- ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
- } else {
- ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
-
- if (!ret && state_size > offsetof(struct xregs_state, header))
- ret = validate_user_xstate_header(&fpu->state.xsave.header);
- }
+ ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
if (ret)
- goto err_out;
+ goto out;
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
fx_only);
@@ -434,7 +440,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
if (ret) {
ret = -EFAULT;
- goto err_out;
+ goto out;
}
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
@@ -452,7 +458,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
} else {
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
if (ret)
- goto err_out;
+ goto out;
fpregs_lock();
ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
@@ -463,7 +469,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_deactivate(fpu);
fpregs_unlock();
-err_out:
+out:
if (ret)
fpu__clear_user_states(fpu);
return ret;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index a85c64000218..1cadb2faf740 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -441,12 +441,35 @@ static void __init print_xstate_offset_size(void)
}
/*
+ * All supported features have either init state all zeros or are
+ * handled in setup_init_fpu() individually. This is an explicit
+ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
+ * newly added supported features at build time and make people
+ * actually look at the init state for the new feature.
+ */
+#define XFEATURES_INIT_FPSTATE_HANDLED \
+ (XFEATURE_MASK_FP | \
+ XFEATURE_MASK_SSE | \
+ XFEATURE_MASK_YMM | \
+ XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_ZMM_Hi256 | \
+ XFEATURE_MASK_Hi16_ZMM | \
+ XFEATURE_MASK_PKRU | \
+ XFEATURE_MASK_BNDREGS | \
+ XFEATURE_MASK_BNDCSR | \
+ XFEATURE_MASK_PASID)
+
+/*
* setup the xstate image representing the init state
*/
static void __init setup_init_fpu_buf(void)
{
static int on_boot_cpu __initdata = 1;
+ BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
+ XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
+ XFEATURES_INIT_FPSTATE_HANDLED);
+
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
@@ -466,10 +489,22 @@ static void __init setup_init_fpu_buf(void)
copy_kernel_to_xregs_booting(&init_fpstate.xsave);
/*
- * Dump the init state again. This is to identify the init state
- * of any feature which is not represented by all zero's.
+ * All components are now in init state. Read the state back so
+ * that init_fpstate contains all non-zero init state. This only
+ * works with XSAVE, but not with XSAVEOPT and XSAVES because
+ * those use the init optimization which skips writing data for
+ * components in init state.
+ *
+ * XSAVE could be used, but that would require to reshuffle the
+ * data when XSAVES is available because XSAVES uses xstate
+ * compaction. But doing so is a pointless exercise because most
+ * components have an all zeros init state except for the legacy
+ * ones (FP and SSE). Those can be saved with FXSAVE into the
+ * legacy area. Adding new features requires to ensure that init
+ * state is all zeroes or if not to add the necessary handling
+ * here.
*/
- copy_xregs_to_kernel_booting(&init_fpstate.xsave);
+ fxsave(&init_fpstate.fxsave);
}
static int xfeature_uncompacted_offset(int xfeature_nr)
@@ -1402,60 +1437,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
-
-#ifdef CONFIG_IOMMU_SUPPORT
-void update_pasid(void)
-{
- u64 pasid_state;
- u32 pasid;
-
- if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
- return;
-
- if (!current->mm)
- return;
-
- pasid = READ_ONCE(current->mm->pasid);
- /* Set the valid bit in the PASID MSR/state only for valid pasid. */
- pasid_state = pasid == PASID_DISABLED ?
- pasid : pasid | MSR_IA32_PASID_VALID;
-
- /*
- * No need to hold fregs_lock() since the task's fpstate won't
- * be changed by others (e.g. ptrace) while the task is being
- * switched to or is in IPI.
- */
- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
- /* The MSR is active and can be directly updated. */
- wrmsrl(MSR_IA32_PASID, pasid_state);
- } else {
- struct fpu *fpu = &current->thread.fpu;
- struct ia32_pasid_state *ppasid_state;
- struct xregs_state *xsave;
-
- /*
- * The CPU's xstate registers are not currently active. Just
- * update the PASID state in the memory buffer here. The
- * PASID MSR will be loaded when returning to user mode.
- */
- xsave = &fpu->state.xsave;
- xsave->header.xfeatures |= XFEATURE_MASK_PASID;
- ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
- /*
- * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
- * won't be NULL and no need to check its value.
- *
- * Only update the task's PASID state when it's different
- * from the mm's pasid.
- */
- if (ppasid_state->pasid != pasid_state) {
- /*
- * Invalid fpregs so that state restoring will pick up
- * the PASID state.
- */
- __fpu_invalidate_fpregs_state(fpu);
- ppasid_state->pasid = pasid_state;
- }
- }
-}
-#endif /* CONFIG_IOMMU_SUPPORT */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 04bddaaba8e2..d8b3ebd2bb85 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -62,7 +62,7 @@ SYM_CODE_START_NOALIGN(startup_64)
*/
/* Set up the stack for verify_cpu(), similar to initial_stack below */
- leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
+ leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp
leaq _text(%rip), %rdi
pushq %rsi
@@ -343,10 +343,10 @@ SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
#endif
/*
- * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
+ * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder
* reliably detect the end of the stack.
*/
-SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS)
+SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE)
__FINITDATA
__INIT
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index d552f177eca0..df0fa695bb09 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -35,12 +35,16 @@
#define SYSG(_vector, _addr) \
G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS)
+#ifdef CONFIG_X86_64
/*
* Interrupt gate with interrupt stack. The _ist index is the index in
* the tss.ist[] array, but for the descriptor it needs to start at 1.
*/
#define ISTG(_vector, _addr, _ist) \
G(_vector, _addr, _ist + 1, GATE_INTERRUPT, DPL0, __KERNEL_CS)
+#else
+#define ISTG(_vector, _addr, _ist) INTG(_vector, _addr)
+#endif
/* Task gate */
#define TSKG(_vector, _gdt) \
@@ -74,7 +78,7 @@ static const __initconst struct idt_data early_idts[] = {
*/
static const __initconst struct idt_data def_idts[] = {
INTG(X86_TRAP_DE, asm_exc_divide_error),
- INTG(X86_TRAP_NMI, asm_exc_nmi),
+ ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
INTG(X86_TRAP_BR, asm_exc_bounds),
INTG(X86_TRAP_UD, asm_exc_invalid_op),
INTG(X86_TRAP_NM, asm_exc_device_not_available),
@@ -91,12 +95,16 @@ static const __initconst struct idt_data def_idts[] = {
#ifdef CONFIG_X86_32
TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS),
#else
- INTG(X86_TRAP_DF, asm_exc_double_fault),
+ ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
#endif
- INTG(X86_TRAP_DB, asm_exc_debug),
+ ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
#ifdef CONFIG_X86_MCE
- INTG(X86_TRAP_MC, asm_exc_machine_check),
+ ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
+#endif
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC),
#endif
SYSG(X86_TRAP_OF, asm_exc_overflow),
@@ -221,22 +229,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
INTG(X86_TRAP_PF, asm_exc_page_fault),
};
-/*
- * The exceptions which use Interrupt stacks. They are setup after
- * cpu_init() when the TSS has been initialized.
- */
-static const __initconst struct idt_data ist_idts[] = {
- ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
- ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
- ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
-#ifdef CONFIG_X86_MCE
- ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
-#endif
-#ifdef CONFIG_AMD_MEM_ENCRYPT
- ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC),
-#endif
-};
-
/**
* idt_setup_early_pf - Initialize the idt table with early pagefault handler
*
@@ -254,14 +246,6 @@ void __init idt_setup_early_pf(void)
idt_setup_from_table(idt_table, early_pf_idts,
ARRAY_SIZE(early_pf_idts), true);
}
-
-/**
- * idt_setup_ist_traps - Initialize the idt table with traps using IST
- */
-void __init idt_setup_ist_traps(void)
-{
- idt_setup_from_table(idt_table, ist_idts, ARRAY_SIZE(ist_idts), true);
-}
#endif
static void __init idt_map_in_cea(void)
@@ -331,11 +315,10 @@ void __init idt_setup_early_handler(void)
/**
* idt_invalidate - Invalidate interrupt descriptor table
- * @addr: The virtual address of the 'invalid' IDT
*/
-void idt_invalidate(void *addr)
+void idt_invalidate(void)
{
- struct desc_ptr idt = { .address = (unsigned long) addr, .size = 0 };
+ static const struct desc_ptr idt = { .address = 0, .size = 0 };
load_idt(&idt);
}
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 6a2eb62c85e6..674906fad43b 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -15,50 +15,75 @@
#include <asm/kprobes.h>
#include <asm/alternative.h>
#include <asm/text-patching.h>
+#include <asm/insn.h>
-static void bug_at(const void *ip, int line)
+int arch_jump_entry_size(struct jump_entry *entry)
{
- /*
- * The location is not an op that we were expecting.
- * Something went wrong. Crash the box, as something could be
- * corrupting the kernel.
- */
- pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
- BUG();
+ struct insn insn = {};
+
+ insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
+ BUG_ON(insn.length != 2 && insn.length != 5);
+
+ return insn.length;
}
-static const void *
-__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type)
+struct jump_label_patch {
+ const void *code;
+ int size;
+};
+
+static struct jump_label_patch
+__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
{
- const void *expect, *code;
+ const void *expect, *code, *nop;
const void *addr, *dest;
- int line;
+ int size;
addr = (void *)jump_entry_code(entry);
dest = (void *)jump_entry_target(entry);
- code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ size = arch_jump_entry_size(entry);
+ switch (size) {
+ case JMP8_INSN_SIZE:
+ code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
- if (type == JUMP_LABEL_JMP) {
- expect = x86_nops[5]; line = __LINE__;
- } else {
- expect = code; line = __LINE__;
+ case JMP32_INSN_SIZE:
+ code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
+
+ default: BUG();
}
- if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
- bug_at(addr, line);
+ if (type == JUMP_LABEL_JMP)
+ expect = nop;
+ else
+ expect = code;
+
+ if (memcmp(addr, expect, size)) {
+ /*
+ * The location is not an op that we were expecting.
+ * Something went wrong. Crash the box, as something could be
+ * corrupting the kernel.
+ */
+ pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
+ addr, addr, addr, expect, size, type);
+ BUG();
+ }
if (type == JUMP_LABEL_NOP)
- code = x86_nops[5];
+ code = nop;
- return code;
+ return (struct jump_label_patch){.code = code, .size = size};
}
static inline void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
- const void *opcode = __jump_label_set_jump_code(entry, type);
+ const struct jump_label_patch jlp = __jump_label_patch(entry, type);
/*
* As long as only a single processor is running and the code is still
@@ -72,12 +97,11 @@ static inline void __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*/
if (init || system_state == SYSTEM_BOOTING) {
- text_poke_early((void *)jump_entry_code(entry), opcode,
- JUMP_LABEL_NOP_SIZE);
+ text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
return;
}
- text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
+ text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
}
static void __ref jump_label_transform(struct jump_entry *entry,
@@ -98,7 +122,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
- const void *opcode;
+ struct jump_label_patch jlp;
if (system_state == SYSTEM_BOOTING) {
/*
@@ -109,9 +133,8 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
}
mutex_lock(&text_mutex);
- opcode = __jump_label_set_jump_code(entry, type);
- text_poke_queue((void *)jump_entry_code(entry),
- opcode, JUMP_LABEL_NOP_SIZE, NULL);
+ jlp = __jump_label_patch(entry, type);
+ text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
return true;
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index d3d65545cb8b..c492ad3001ca 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -674,7 +674,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn)
break;
if (insn->addr_bytes != sizeof(unsigned long))
- return -EOPNOTSUPP; /* Don't support differnt size */
+ return -EOPNOTSUPP; /* Don't support different size */
if (X86_MODRM_MOD(opcode) != 3)
return -EOPNOTSUPP; /* TODO: support memory addressing */
@@ -1102,24 +1102,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
- } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
- kcb->kprobe_status == KPROBE_HIT_SSDONE) {
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
}
return 0;
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 64b00b0d7fe8..1b373d79cedc 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -23,17 +23,6 @@
#include <asm/set_memory.h>
#include <asm/debugreg.h>
-static void set_gdt(void *newgdt, __u16 limit)
-{
- struct desc_ptr curgdt;
-
- /* ia32 supports unaligned loads & stores */
- curgdt.size = limit;
- curgdt.address = (unsigned long)newgdt;
-
- load_gdt(&curgdt);
-}
-
static void load_segments(void)
{
#define __STR(X) #X
@@ -232,8 +221,8 @@ void machine_kexec(struct kimage *image)
* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
- idt_invalidate(phys_to_virt(0));
- set_gdt(phys_to_virt(0), 0);
+ native_idt_invalidate();
+ native_gdt_invalidate();
/* now call it */
image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index c078b0d3ab0e..131f30fdcfbd 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -256,35 +256,6 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
return init_transition_pgtable(image, level4p);
}
-static void set_idt(void *newidt, u16 limit)
-{
- struct desc_ptr curidt;
-
- /* x86-64 supports unaligned loads & stores */
- curidt.size = limit;
- curidt.address = (unsigned long)newidt;
-
- __asm__ __volatile__ (
- "lidtq %0\n"
- : : "m" (curidt)
- );
-};
-
-
-static void set_gdt(void *newgdt, u16 limit)
-{
- struct desc_ptr curgdt;
-
- /* x86-64 supports unaligned loads & stores */
- curgdt.size = limit;
- curgdt.address = (unsigned long)newgdt;
-
- __asm__ __volatile__ (
- "lgdtq %0\n"
- : : "m" (curgdt)
- );
-};
-
static void load_segments(void)
{
__asm__ __volatile__ (
@@ -379,8 +350,8 @@ void machine_kexec(struct kimage *image)
* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
- set_gdt(phys_to_virt(0), 0);
- set_idt(phys_to_virt(0), 0);
+ native_idt_invalidate();
+ native_gdt_invalidate();
/* now call it */
image->start = relocate_kernel((unsigned long)image->head,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 5e1f38179f49..e52b208b4641 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
int count = 0;
- if (p == current || p->state == TASK_RUNNING)
+ if (p == current || task_is_running(p))
return 0;
if (!try_get_task_stack(p))
@@ -975,7 +975,7 @@ unsigned long get_wchan(struct task_struct *p)
goto out;
}
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
- } while (count++ < 16 && p->state != TASK_RUNNING);
+ } while (count++ < 16 && !task_is_running(p));
out:
put_task_stack(p);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 87a4143aa7d7..4c208ea3bd9f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -911,7 +911,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value)
* syscall with TS_COMPAT still set.
*/
regs->orig_ax = value;
- if (syscall_get_nr(child, regs) >= 0)
+ if (syscall_get_nr(child, regs) != -1)
child->thread_info.status |= TS_I386_REGS_POKED;
break;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index b29657b76e3f..ebfb91108232 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -669,7 +669,7 @@ static void native_machine_emergency_restart(void)
break;
case BOOT_TRIPLE:
- idt_invalidate(NULL);
+ idt_invalidate();
__asm__ __volatile__("int3");
/* We're probably dead after this, but... */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 72920af0b3c0..85acd22f8022 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -44,6 +44,7 @@
#include <asm/pci-direct.h>
#include <asm/prom.h>
#include <asm/proto.h>
+#include <asm/thermal.h>
#include <asm/unwind.h>
#include <asm/vsyscall.h>
#include <linux/vmalloc.h>
@@ -637,11 +638,11 @@ static void __init trim_snb_memory(void)
* them from accessing certain memory ranges, namely anything below
* 1M and in the pages listed in bad_pages[] above.
*
- * To avoid these pages being ever accessed by SNB gfx devices
- * reserve all memory below the 1 MB mark and bad_pages that have
- * not already been reserved at boot time.
+ * To avoid these pages being ever accessed by SNB gfx devices reserve
+ * bad_pages that have not already been reserved at boot time.
+ * All memory below the 1 MB mark is anyway reserved later during
+ * setup_arch(), so there is no need to reserve it here.
*/
- memblock_reserve(0, 1<<20);
for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
if (memblock_reserve(bad_pages[i], PAGE_SIZE))
@@ -694,30 +695,6 @@ static void __init e820_add_kernel_range(void)
e820__range_add(start, size, E820_TYPE_RAM);
}
-static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
-
-static int __init parse_reservelow(char *p)
-{
- unsigned long long size;
-
- if (!p)
- return -EINVAL;
-
- size = memparse(p, &p);
-
- if (size < 4096)
- size = 4096;
-
- if (size > 640*1024)
- size = 640*1024;
-
- reserve_low = size;
-
- return 0;
-}
-
-early_param("reservelow", parse_reservelow);
-
static void __init early_reserve_memory(void)
{
/*
@@ -733,14 +710,14 @@ static void __init early_reserve_memory(void)
* The first 4Kb of memory is a BIOS owned area, but generally it is
* not listed as such in the E820 table.
*
- * Reserve the first memory page and typically some additional
- * memory (64KiB by default) since some BIOSes are known to corrupt
- * low memory. See the Kconfig help text for X86_RESERVE_LOW.
+ * Reserve the first 64K of memory since some BIOSes are known to
+ * corrupt low memory. After the real mode trampoline is allocated the
+ * rest of the memory below 640k is reserved.
*
* In addition, make sure page 0 is always reserved because on
* systems with L1TF its contents can be leaked to user processes.
*/
- memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
+ memblock_reserve(0, SZ_64K);
early_reserve_initrd();
@@ -751,6 +728,7 @@ static void __init early_reserve_memory(void)
reserve_ibft_region();
reserve_bios_regions();
+ trim_snb_memory();
}
/*
@@ -1081,14 +1059,21 @@ void __init setup_arch(char **cmdline_p)
(max_pfn_mapped<<PAGE_SHIFT) - 1);
#endif
- reserve_real_mode();
-
/*
- * Reserving memory causing GPU hangs on Sandy Bridge integrated
- * graphics devices should be done after we allocated memory under
- * 1M for the real mode trampoline.
+ * Find free memory for the real mode trampoline and place it there. If
+ * there is not enough free memory under 1M, on EFI-enabled systems
+ * there will be additional attempt to reclaim the memory for the real
+ * mode trampoline at efi_free_boot_services().
+ *
+ * Unconditionally reserve the entire first 1M of RAM because BIOSes
+ * are known to corrupt low memory and several hundred kilobytes are not
+ * worth complex detection what memory gets clobbered. Windows does the
+ * same thing for very similar reasons.
+ *
+ * Moreover, on machines with SandyBridge graphics or in setups that use
+ * crashkernel the entire 1M is reserved anyway.
*/
- trim_snb_memory();
+ reserve_real_mode();
init_mem_mapping();
@@ -1226,6 +1211,14 @@ void __init setup_arch(char **cmdline_p)
x86_init.timers.wallclock_init();
+ /*
+ * This needs to run before setup_local_APIC() which soft-disables the
+ * local APIC temporarily and that masks the thermal LVT interrupt,
+ * leading to softlockups on machines which have configured SMI
+ * interrupt delivery.
+ */
+ therm_lvt_init();
+
mcheck_init();
register_refined_jiffies(CLOCK_TICK_RATE);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 0941d2f44f2a..78a32b956e81 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -66,7 +66,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
*/
static bool __init pcpu_need_numa(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
pg_data_t *last = NULL;
unsigned int cpu;
@@ -101,7 +101,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
unsigned long align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = early_cpu_to_node(cpu);
void *ptr;
@@ -140,7 +140,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 651b81cd648e..a6895e440bc3 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -7,12 +7,11 @@
* Author: Joerg Roedel <jroedel@suse.de>
*/
-#define pr_fmt(fmt) "SEV-ES: " fmt
+#define pr_fmt(fmt) "SEV: " fmt
#include <linux/sched/debug.h> /* For show_regs() */
#include <linux/percpu-defs.h>
#include <linux/mem_encrypt.h>
-#include <linux/lockdep.h>
#include <linux/printk.h>
#include <linux/mm_types.h>
#include <linux/set_memory.h>
@@ -192,11 +191,19 @@ void noinstr __sev_es_ist_exit(void)
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
}
-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+/*
+ * Nothing shall interrupt this code path while holding the per-CPU
+ * GHCB. The backup GHCB is only for NMIs interrupting this path.
+ *
+ * Callers must disable local interrupts around it.
+ */
+static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
+ WARN_ON(!irqs_disabled());
+
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
@@ -213,7 +220,9 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
data->ghcb_active = false;
data->backup_ghcb_active = false;
+ instrumentation_begin();
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+ instrumentation_end();
}
/* Mark backup_ghcb active before writing to it */
@@ -258,17 +267,24 @@ static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
{
char buffer[MAX_INSN_SIZE];
- int res;
+ int insn_bytes;
- res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
- if (!res) {
+ insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
+ if (insn_bytes == 0) {
+ /* Nothing could be copied */
ctxt->fi.vector = X86_TRAP_PF;
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
ctxt->fi.cr2 = ctxt->regs->ip;
return ES_EXCEPTION;
+ } else if (insn_bytes == -EINVAL) {
+ /* Effective RIP could not be calculated */
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ ctxt->fi.cr2 = 0;
+ return ES_EXCEPTION;
}
- if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, res))
+ if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
return ES_DECODE_FAILED;
if (ctxt->insn.immediate.got)
@@ -479,11 +495,13 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
+static noinstr void __sev_put_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
+ WARN_ON(!irqs_disabled());
+
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
@@ -507,7 +525,7 @@ void noinstr __sev_es_nmi_complete(void)
struct ghcb_state state;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
@@ -517,7 +535,7 @@ void noinstr __sev_es_nmi_complete(void)
sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
VMGEXIT();
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
}
static u64 get_jump_table_addr(void)
@@ -529,7 +547,7 @@ static u64 get_jump_table_addr(void)
local_irq_save(flags);
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
@@ -543,7 +561,7 @@ static u64 get_jump_table_addr(void)
ghcb_sw_exit_info_2_is_valid(ghcb))
ret = ghcb->save.sw_exit_info_2;
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
local_irq_restore(flags);
@@ -668,7 +686,7 @@ static void sev_es_ap_hlt_loop(void)
struct ghcb_state state;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
while (true) {
vc_ghcb_invalidate(ghcb);
@@ -685,7 +703,7 @@ static void sev_es_ap_hlt_loop(void)
break;
}
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
}
/*
@@ -775,7 +793,7 @@ void __init sev_es_init_vc_handling(void)
sev_es_setup_play_dead();
/* Secondary CPUs use the runtime #VC handler */
- initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
+ initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
}
static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
@@ -1213,14 +1231,6 @@ static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
return ES_EXCEPTION;
}
-static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
-{
- if (user_mode(regs))
- noist_exc_debug(regs);
- else
- exc_debug(regs);
-}
-
static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
struct ghcb *ghcb,
unsigned long exit_code)
@@ -1316,44 +1326,15 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
}
-/*
- * Main #VC exception handler. It is called when the entry code was able to
- * switch off the IST to a safe kernel stack.
- *
- * With the current implementation it is always possible to switch to a safe
- * stack because #VC exceptions only happen at known places, like intercepted
- * instructions or accesses to MMIO areas/IO ports. They can also happen with
- * code instrumentation when the hypervisor intercepts #DB, but the critical
- * paths are forbidden to be instrumented, so #DB exceptions currently also
- * only happen in safe places.
- */
-DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
{
- irqentry_state_t irq_state;
struct ghcb_state state;
struct es_em_ctxt ctxt;
enum es_result result;
struct ghcb *ghcb;
+ bool ret = true;
- /*
- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
- */
- if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
- vc_handle_trap_db(regs);
- return;
- }
-
- irq_state = irqentry_nmi_enter(regs);
- lockdep_assert_irqs_disabled();
- instrumentation_begin();
-
- /*
- * This is invoked through an interrupt gate, so IRQs are disabled. The
- * code below might walk page-tables for user or kernel addresses, so
- * keep the IRQs disabled to protect us against concurrent TLB flushes.
- */
-
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code);
@@ -1361,7 +1342,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
if (result == ES_OK)
result = vc_handle_exitcode(&ctxt, ghcb, error_code);
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
/* Done - now check the result */
switch (result) {
@@ -1369,17 +1350,20 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
vc_finish_insn(&ctxt);
break;
case ES_UNSUPPORTED:
- pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_VMM_ERROR:
pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_DECODE_FAILED:
pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_EXCEPTION:
vc_forward_exception(&ctxt);
break;
@@ -1395,24 +1379,52 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
BUG();
}
-out:
- instrumentation_end();
- irqentry_nmi_exit(regs, irq_state);
+ return ret;
+}
- return;
+static __always_inline bool vc_is_db(unsigned long error_code)
+{
+ return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
+}
-fail:
- if (user_mode(regs)) {
- /*
- * Do not kill the machine if user-space triggered the
- * exception. Send SIGBUS instead and let user-space deal with
- * it.
- */
- force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
- } else {
- pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
- result);
+/*
+ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
+ * and will panic when an error happens.
+ */
+DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
+{
+ irqentry_state_t irq_state;
+
+ /*
+ * With the current implementation it is always possible to switch to a
+ * safe stack because #VC exceptions only happen at known places, like
+ * intercepted instructions or accesses to MMIO areas/IO ports. They can
+ * also happen with code instrumentation when the hypervisor intercepts
+ * #DB, but the critical paths are forbidden to be instrumented, so #DB
+ * exceptions currently also only happen in safe places.
+ *
+ * But keep this here in case the noinstr annotations are violated due
+ * to bug elsewhere.
+ */
+ if (unlikely(on_vc_fallback_stack(regs))) {
+ instrumentation_begin();
+ panic("Can't handle #VC exception from unsupported context\n");
+ instrumentation_end();
+ }
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ exc_debug(regs);
+ return;
+ }
+
+ irq_state = irqentry_nmi_enter(regs);
+
+ instrumentation_begin();
+
+ if (!vc_raw_handle_exception(regs, error_code)) {
/* Show some debug info */
show_regs(regs);
@@ -1423,23 +1435,38 @@ fail:
panic("Returned from Terminate-Request to Hypervisor\n");
}
- goto out;
+ instrumentation_end();
+ irqentry_nmi_exit(regs, irq_state);
}
-/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
-DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
+/*
+ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
+ * and will kill the current task with SIGBUS when an error happens.
+ */
+DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
{
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ noist_exc_debug(regs);
+ return;
+ }
+
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
- panic("Can't handle #VC exception from unsupported context\n");
- instrumentation_end();
-}
-DEFINE_IDTENTRY_VC(exc_vmm_communication)
-{
- if (likely(!on_vc_fallback_stack(regs)))
- safe_stack_exc_vmm_communication(regs, error_code);
- else
- ist_exc_vmm_communication(regs, error_code);
+ if (!vc_raw_handle_exception(regs, error_code)) {
+ /*
+ * Do not kill the machine if user-space triggered the
+ * exception. Send SIGBUS instead and let user-space deal with
+ * it.
+ */
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
+ }
+
+ instrumentation_end();
+ irqentry_exit_to_user_mode(regs);
}
bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index a06cb107c0e8..e12779a2714d 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -713,7 +713,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
/* Are we from a system call? */
- if (syscall_get_nr(current, regs) >= 0) {
+ if (syscall_get_nr(current, regs) != -1) {
/* If so, check system call restarting.. */
switch (syscall_get_error(current, regs)) {
case -ERESTART_RESTARTBLOCK:
@@ -793,7 +793,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
}
/* Did we come from a system call? */
- if (syscall_get_nr(current, regs) >= 0) {
+ if (syscall_get_nr(current, regs) != -1) {
/* Restart the system call - no handlers present */
switch (syscall_get_error(current, regs)) {
case -ERESTARTNOHAND:
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7770245cc7fa..9320285a5e29 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -232,11 +232,9 @@ static void notrace start_secondary(void *unused)
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#endif
- cpu_init_exception_handling();
- cpu_init();
+ cpu_init_secondary();
rcu_cpu_starting(raw_smp_processor_id());
x86_cpuinit.early_percpu_clock_init();
- preempt_disable();
smp_callin();
enable_start_cpu0 = 0;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 853ea7a80806..ed540e09a399 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -1160,12 +1160,9 @@ void __init trap_init(void)
/* Init GHCB memory pages when running as an SEV-ES guest */
sev_es_init_vc_handling();
+ /* Initialize TSS before setting up traps so ISTs work */
+ cpu_init_exception_handling();
+ /* Setup traps as cpu_init() might #GP */
idt_setup_traps();
-
- /*
- * Should be a barrier for any external CPU state:
- */
cpu_init();
-
- idt_setup_ist_traps();
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 57ec01192180..2e076a459a0c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1128,6 +1128,7 @@ static int tsc_cs_enable(struct clocksource *cs)
static struct clocksource clocksource_tsc_early = {
.name = "tsc-early",
.rating = 299,
+ .uncertainty_margin = 32 * NSEC_PER_MSEC,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
@@ -1152,7 +1153,8 @@ static struct clocksource clocksource_tsc = {
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_VALID_FOR_HRES |
- CLOCK_SOURCE_MUST_VERIFY,
+ CLOCK_SOURCE_MUST_VERIFY |
+ CLOCK_SOURCE_VERIFY_PERCPU,
.vdso_clock_mode = VDSO_CLOCKMODE_TSC,
.enable = tsc_cs_enable,
.resume = tsc_resume,
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index 8daa70b0d2da..576b47e7523d 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -346,14 +346,12 @@ bool fixup_umip_exception(struct pt_regs *regs)
if (!regs)
return false;
- nr_copied = insn_fetch_from_user(regs, buf);
-
/*
- * The insn_fetch_from_user above could have failed if user code
- * is protected by a memory protection key. Give up on emulation
- * in such a case. Should we issue a page fault?
+ * Give up on emulation if fetching the instruction failed. Should a
+ * page fault or a #GP be issued?
*/
- if (!nr_copied)
+ nr_copied = insn_fetch_from_user(regs, buf);
+ if (nr_copied <= 0)
return false;
if (!insn_decode_from_regs(&insn, regs, buf, nr_copied))
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index f6b93a35ce14..ac69894eab88 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,8 +22,6 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
depends on HIGH_RES_TIMERS
- # for TASKSTATS/TASK_DELAY_ACCT:
- depends on NET && MULTIUSER
depends on X86_LOCAL_APIC
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
@@ -36,8 +34,7 @@ config KVM
select KVM_ASYNC_PF
select USER_RETURN_NOTIFIER
select KVM_MMIO
- select TASKSTATS
- select TASK_DELAY_ACCT
+ select SCHED_INFO
select PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -46,6 +43,7 @@ config KVM
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO
select SRCU
+ select HAVE_KVM_PM_NOTIFIER if PM
help
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index c589db5d91b3..75dfd27b6e8a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -11,13 +11,18 @@ KVM := ../../../virt/kvm
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \
- $(KVM)/dirty_ring.o
+ $(KVM)/dirty_ring.o $(KVM)/binary_stats.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
mmu/spte.o
+
+ifdef CONFIG_HYPERV
+kvm-y += kvm_onhyperv.o
+endif
+
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
kvm-$(CONFIG_KVM_XEN) += xen.o
@@ -27,6 +32,10 @@ kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
+ifdef CONFIG_HYPERV
+kvm-amd-y += svm/svm_onhyperv.o
+endif
+
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
obj-$(CONFIG_KVM_AMD) += kvm-amd.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 9a48f138832d..c42613cfb5ba 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -202,10 +202,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
/*
- * Except for the MMU, which needs to be reset after any vendor
- * specific adjustments to the reserved GPA bits.
+ * Except for the MMU, which needs to do its thing any vendor specific
+ * adjustments to the reserved GPA bits.
*/
- kvm_mmu_reset_context(vcpu);
+ kvm_mmu_after_set_cpuid(vcpu);
}
static int is_efer_nx(void)
@@ -655,6 +655,7 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
entry->ecx = F(RDPID);
++array->nent;
+ break;
default:
break;
}
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index 7e818d64bb4d..95a98413dc32 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -17,6 +17,15 @@ static int vcpu_get_timer_advance_ns(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(vcpu_timer_advance_ns_fops, vcpu_get_timer_advance_ns, NULL, "%llu\n");
+static int vcpu_get_guest_mode(void *data, u64 *val)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
+ *val = vcpu->stat.guest_mode;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_guest_mode_fops, vcpu_get_guest_mode, NULL, "%lld\n");
+
static int vcpu_get_tsc_offset(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
@@ -45,6 +54,8 @@ DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bi
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
+ debugfs_create_file("guest_mode", 0444, debugfs_dentry, vcpu,
+ &vcpu_guest_mode_fops);
debugfs_create_file("tsc-offset", 0444, debugfs_dentry, vcpu,
&vcpu_tsc_offset_fops);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8a0ccdb56076..2837110e66ed 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -22,7 +22,6 @@
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include <linux/stringify.h>
-#include <asm/fpu/api.h>
#include <asm/debugreg.h>
#include <asm/nospec-branch.h>
@@ -1081,116 +1080,14 @@ static void fetch_register_operand(struct operand *op)
}
}
-static void emulator_get_fpu(void)
-{
- fpregs_lock();
-
- fpregs_assert_state_consistent();
- if (test_thread_flag(TIF_NEED_FPU_LOAD))
- switch_fpu_return();
-}
-
-static void emulator_put_fpu(void)
-{
- fpregs_unlock();
-}
-
-static void read_sse_reg(sse128_t *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
- case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
- case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
- case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
- case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
- case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
- case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
- case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
-#ifdef CONFIG_X86_64
- case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
- case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
- case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
- case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
- case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
- case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
- case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
- case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
-#endif
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void write_sse_reg(sse128_t *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
- case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
- case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
- case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
- case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
- case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
- case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
- case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
-#ifdef CONFIG_X86_64
- case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
- case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
- case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
- case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
- case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
- case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
- case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
- case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
-#endif
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void read_mmx_reg(u64 *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
- case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
- case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
- case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
- case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
- case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
- case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
- case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void write_mmx_reg(u64 *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
- case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
- case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
- case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
- case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
- case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
- case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
- case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
- default: BUG();
- }
- emulator_put_fpu();
-}
-
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fninit");
- emulator_put_fpu();
+ kvm_fpu_put();
return X86EMUL_CONTINUE;
}
@@ -1201,9 +1098,9 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fnstcw %0": "+m"(fcw));
- emulator_put_fpu();
+ kvm_fpu_put();
ctxt->dst.val = fcw;
@@ -1217,9 +1114,9 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fnstsw %0": "+m"(fsw));
- emulator_put_fpu();
+ kvm_fpu_put();
ctxt->dst.val = fsw;
@@ -1238,7 +1135,7 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
- read_sse_reg(&op->vec_val, reg);
+ kvm_read_sse_reg(reg, &op->vec_val);
return;
}
if (ctxt->d & Mmx) {
@@ -1289,7 +1186,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
- read_sse_reg(&op->vec_val, ctxt->modrm_rm);
+ kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
return rc;
}
if (ctxt->d & Mmx) {
@@ -1866,10 +1763,10 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
op->bytes * op->count);
break;
case OP_XMM:
- write_sse_reg(&op->vec_val, op->addr.xmm);
+ kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
break;
case OP_MM:
- write_mmx_reg(&op->mm_val, op->addr.mm);
+ kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
break;
case OP_NONE:
/* no writeback */
@@ -2638,8 +2535,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
- ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
- ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+ ctxt->ops->exiting_smm(ctxt);
/*
* Get back to real mode, to prepare a safe state in which to load
@@ -2678,12 +2574,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
}
/*
- * Give pre_leave_smm() a chance to make ISA-specific changes to the
- * vCPU state (e.g. enter guest mode) before loading state from the SMM
+ * Give leave_smm() a chance to make ISA-specific changes to the vCPU
+ * state (e.g. enter guest mode) before loading state from the SMM
* state-save area.
*/
- if (ctxt->ops->pre_leave_smm(ctxt, buf))
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->ops->leave_smm(ctxt, buf))
+ goto emulate_shutdown;
#ifdef CONFIG_X86_64
if (emulator_has_longmode(ctxt))
@@ -2692,13 +2588,21 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
#endif
ret = rsm_load_state_32(ctxt, buf);
- if (ret != X86EMUL_CONTINUE) {
- /* FIXME: should triple fault */
- return X86EMUL_UNHANDLEABLE;
- }
+ if (ret != X86EMUL_CONTINUE)
+ goto emulate_shutdown;
- ctxt->ops->post_leave_smm(ctxt);
+ /*
+ * Note, the ctxt->ops callbacks are responsible for handling side
+ * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
+ * runtime updates, etc... If that changes, e.g. this flow is moved
+ * out of the emulator to make it look more like enter_smm(), then
+ * those side effects need to be explicitly handled for both success
+ * and shutdown.
+ */
+ return X86EMUL_CONTINUE;
+emulate_shutdown:
+ ctxt->ops->triple_fault(ctxt);
return X86EMUL_CONTINUE;
}
@@ -4124,11 +4028,11 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- emulator_get_fpu();
+ kvm_fpu_get();
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
- emulator_put_fpu();
+ kvm_fpu_put();
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -4172,7 +4076,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- emulator_get_fpu();
+ kvm_fpu_get();
if (size < __fxstate_size(16)) {
rc = fxregs_fixup(&fx_state, size);
@@ -4189,7 +4093,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
out:
- emulator_put_fpu();
+ kvm_fpu_put();
return rc;
}
@@ -5111,7 +5015,7 @@ done:
return rc;
}
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
@@ -5322,7 +5226,8 @@ done_prefixes:
ctxt->execute = opcode.u.execute;
- if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+ if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
+ likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
@@ -5436,9 +5341,9 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
int rc;
- emulator_get_fpu();
+ kvm_fpu_get();
rc = asm_safe("fwait");
- emulator_put_fpu();
+ kvm_fpu_put();
if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
@@ -5449,7 +5354,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
static void fetch_possible_mmx_operand(struct operand *op)
{
if (op->type == OP_MM)
- read_mmx_reg(&op->mm_val, op->addr.mm);
+ kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
}
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
diff --git a/arch/x86/kvm/fpu.h b/arch/x86/kvm/fpu.h
new file mode 100644
index 000000000000..3ba12888bf66
--- /dev/null
+++ b/arch/x86/kvm/fpu.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __KVM_FPU_H_
+#define __KVM_FPU_H_
+
+#include <asm/fpu/api.h>
+
+typedef u32 __attribute__((vector_size(16))) sse128_t;
+#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
+#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
+#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
+#define sse128_l0(x) ({ __sse128_u t; t.vec = x; t.as_u32[0]; })
+#define sse128_l1(x) ({ __sse128_u t; t.vec = x; t.as_u32[1]; })
+#define sse128_l2(x) ({ __sse128_u t; t.vec = x; t.as_u32[2]; })
+#define sse128_l3(x) ({ __sse128_u t; t.vec = x; t.as_u32[3]; })
+#define sse128(lo, hi) ({ __sse128_u t; t.as_u64[0] = lo; t.as_u64[1] = hi; t.vec; })
+
+static inline void _kvm_read_sse_reg(int reg, sse128_t *data)
+{
+ switch (reg) {
+ case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
+ case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
+ case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
+ case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
+ case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
+ case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
+ case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
+ case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
+#ifdef CONFIG_X86_64
+ case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
+ case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
+ case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
+ case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
+ case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
+ case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
+ case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
+ case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
+#endif
+ default: BUG();
+ }
+}
+
+static inline void _kvm_write_sse_reg(int reg, const sse128_t *data)
+{
+ switch (reg) {
+ case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
+ case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
+ case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
+ case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
+ case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
+ case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
+ case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
+ case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
+#ifdef CONFIG_X86_64
+ case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
+ case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
+ case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
+ case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
+ case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
+ case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
+ case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
+ case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
+#endif
+ default: BUG();
+ }
+}
+
+static inline void _kvm_read_mmx_reg(int reg, u64 *data)
+{
+ switch (reg) {
+ case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
+ case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
+ case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
+ case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
+ case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
+ case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
+ case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
+ case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
+ default: BUG();
+ }
+}
+
+static inline void _kvm_write_mmx_reg(int reg, const u64 *data)
+{
+ switch (reg) {
+ case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
+ case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
+ case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
+ case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
+ case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
+ case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
+ case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
+ case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
+ default: BUG();
+ }
+}
+
+static inline void kvm_fpu_get(void)
+{
+ fpregs_lock();
+
+ fpregs_assert_state_consistent();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+}
+
+static inline void kvm_fpu_put(void)
+{
+ fpregs_unlock();
+}
+
+static inline void kvm_read_sse_reg(int reg, sse128_t *data)
+{
+ kvm_fpu_get();
+ _kvm_read_sse_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_write_sse_reg(int reg, const sse128_t *data)
+{
+ kvm_fpu_get();
+ _kvm_write_sse_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_read_mmx_reg(int reg, u64 *data)
+{
+ kvm_fpu_get();
+ _kvm_read_mmx_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_write_mmx_reg(int reg, const u64 *data)
+{
+ kvm_fpu_get();
+ _kvm_write_mmx_reg(reg, data);
+ kvm_fpu_put();
+}
+
+#endif
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index f98370a39936..b07592ca92f0 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -36,6 +36,7 @@
#include "trace.h"
#include "irq.h"
+#include "fpu.h"
/* "Hv#1" signature */
#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
@@ -273,15 +274,10 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
{
- struct kvm_cpuid_entry2 *entry;
-
- entry = kvm_find_cpuid_entry(vcpu,
- HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES,
- 0);
- if (!entry)
- return false;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
- return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ return hv_vcpu->cpuid_cache.syndbg_cap_eax &
+ HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
}
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
@@ -635,11 +631,17 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
union hv_stimer_config new_config = {.as_uint64 = config},
old_config = {.as_uint64 = stimer->config.as_uint64};
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
if (!synic->active && !host)
return 1;
+ if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
+ !(hv_vcpu->cpuid_cache.features_edx &
+ HV_STIMER_DIRECT_MODE_AVAILABLE)))
+ return 1;
+
trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index, config, host);
@@ -1172,6 +1174,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
{
struct kvm_hv *hv = to_kvm_hv(kvm);
u64 gfn;
+ int idx;
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
@@ -1190,20 +1193,105 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
hv->tsc_ref.tsc_sequence = 0;
+
+ /*
+ * Take the srcu lock as memslots will be accessed to check the gfn
+ * cache generation against the memslots generation.
+ */
+ idx = srcu_read_lock(&kvm->srcu);
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+ srcu_read_unlock(&kvm->srcu, idx);
out_unlock:
mutex_unlock(&hv->hv_lock);
}
+
+static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
+{
+ if (!hv_vcpu->enforce_cpuid)
+ return true;
+
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ case HV_X64_MSR_HYPERCALL:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_HYPERCALL_AVAILABLE;
+ case HV_X64_MSR_VP_RUNTIME:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_VP_RUNTIME_AVAILABLE;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_TIME_REF_COUNT_AVAILABLE;
+ case HV_X64_MSR_VP_INDEX:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_VP_INDEX_AVAILABLE;
+ case HV_X64_MSR_RESET:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_RESET_AVAILABLE;
+ case HV_X64_MSR_REFERENCE_TSC:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_REFERENCE_TSC_AVAILABLE;
+ case HV_X64_MSR_SCONTROL:
+ case HV_X64_MSR_SVERSION:
+ case HV_X64_MSR_SIEFP:
+ case HV_X64_MSR_SIMP:
+ case HV_X64_MSR_EOM:
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_SYNIC_AVAILABLE;
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_SYNTIMER_AVAILABLE;
+ case HV_X64_MSR_EOI:
+ case HV_X64_MSR_ICR:
+ case HV_X64_MSR_TPR:
+ case HV_X64_MSR_VP_ASSIST_PAGE:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_APIC_ACCESS_AVAILABLE;
+ break;
+ case HV_X64_MSR_TSC_FREQUENCY:
+ case HV_X64_MSR_APIC_FREQUENCY:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_ACCESS_FREQUENCY_MSRS;
+ case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
+ case HV_X64_MSR_TSC_EMULATION_CONTROL:
+ case HV_X64_MSR_TSC_EMULATION_STATUS:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_ACCESS_REENLIGHTENMENT;
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ case HV_X64_MSR_CRASH_CTL:
+ return hv_vcpu->cpuid_cache.features_edx &
+ HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+ case HV_X64_MSR_SYNDBG_OPTIONS:
+ case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
+ return hv_vcpu->cpuid_cache.features_edx &
+ HV_FEATURE_DEBUG_MSRS_AVAILABLE;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm);
+ if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
hv->hv_guest_os_id = data;
@@ -1332,6 +1420,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
@@ -1446,6 +1537,9 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm);
+ if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
data = hv->hv_guest_os_id;
@@ -1495,6 +1589,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
u64 data = 0;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_VP_INDEX:
data = hv_vcpu->vp_index;
@@ -1623,8 +1720,22 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
return vcpu_bitmap;
}
-static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex)
+struct kvm_hv_hcall {
+ u64 param;
+ u64 ingpa;
+ u64 outgpa;
+ u16 code;
+ u16 rep_cnt;
+ u16 rep_idx;
+ bool fast;
+ bool rep;
+ sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
+};
+
+static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
{
+ int i;
+ gpa_t gpa;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct hv_tlb_flush_ex flush_ex;
@@ -1638,8 +1749,15 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
bool all_cpus;
if (!ex) {
- if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (hc->fast) {
+ flush.address_space = hc->ingpa;
+ flush.flags = hc->outgpa;
+ flush.processor_mask = sse128_lo(hc->xmm[0]);
+ } else {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa,
+ &flush, sizeof(flush))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
trace_kvm_hv_flush_tlb(flush.processor_mask,
flush.address_space, flush.flags);
@@ -1657,9 +1775,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
flush.processor_mask == 0;
} else {
- if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
- sizeof(flush_ex))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (hc->fast) {
+ flush_ex.address_space = hc->ingpa;
+ flush_ex.flags = hc->outgpa;
+ memcpy(&flush_ex.hv_vp_set,
+ &hc->xmm[0], sizeof(hc->xmm[0]));
+ } else {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
+ sizeof(flush_ex))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
flush_ex.hv_vp_set.format,
@@ -1670,20 +1795,28 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
all_cpus = flush_ex.hv_vp_set.format !=
HV_GENERIC_SET_SPARSE_4K;
- sparse_banks_len =
- bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
- sizeof(sparse_banks[0]);
+ sparse_banks_len = bitmap_weight((unsigned long *)&valid_bank_mask, 64);
if (!sparse_banks_len && !all_cpus)
goto ret_success;
- if (!all_cpus &&
- kvm_read_guest(kvm,
- ingpa + offsetof(struct hv_tlb_flush_ex,
- hv_vp_set.bank_contents),
- sparse_banks,
- sparse_banks_len))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (!all_cpus) {
+ if (hc->fast) {
+ if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ for (i = 0; i < sparse_banks_len; i += 2) {
+ sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
+ sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
+ }
+ } else {
+ gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex,
+ hv_vp_set.bank_contents);
+ if (unlikely(kvm_read_guest(kvm, gpa, sparse_banks,
+ sparse_banks_len *
+ sizeof(sparse_banks[0]))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+ }
}
cpumask_clear(&hv_vcpu->tlb_flush);
@@ -1696,13 +1829,13 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
* analyze it here, flush TLB regardless of the specified address space.
*/
- kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
+ kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
NULL, vcpu_mask, &hv_vcpu->tlb_flush);
ret_success:
- /* We always do full TLB flush, set rep_done = rep_cnt. */
+ /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
return (u64)HV_STATUS_SUCCESS |
- ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
+ ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
}
static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
@@ -1724,8 +1857,7 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
}
}
-static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
- bool ex, bool fast)
+static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
{
struct kvm *kvm = vcpu->kvm;
struct hv_send_ipi_ex send_ipi_ex;
@@ -1740,25 +1872,25 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
bool all_cpus;
if (!ex) {
- if (!fast) {
- if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
+ if (!hc->fast) {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
sizeof(send_ipi))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
sparse_banks[0] = send_ipi.cpu_mask;
vector = send_ipi.vector;
} else {
/* 'reserved' part of hv_send_ipi should be 0 */
- if (unlikely(ingpa >> 32 != 0))
+ if (unlikely(hc->ingpa >> 32 != 0))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
- sparse_banks[0] = outgpa;
- vector = (u32)ingpa;
+ sparse_banks[0] = hc->outgpa;
+ vector = (u32)hc->ingpa;
}
all_cpus = false;
valid_bank_mask = BIT_ULL(0);
trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
} else {
- if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
sizeof(send_ipi_ex))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
@@ -1778,8 +1910,8 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
if (!all_cpus &&
kvm_read_guest(kvm,
- ingpa + offsetof(struct hv_send_ipi_ex,
- vp_set.bank_contents),
+ hc->ingpa + offsetof(struct hv_send_ipi_ex,
+ vp_set.bank_contents),
sparse_banks,
sparse_banks_len))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
@@ -1801,12 +1933,67 @@ ret_success:
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *entry;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
- if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX)
+ if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
vcpu->arch.hyperv_enabled = true;
- else
+ } else {
vcpu->arch.hyperv_enabled = false;
+ return;
+ }
+
+ if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
+ return;
+
+ hv_vcpu = to_hv_vcpu(vcpu);
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
+ if (entry) {
+ hv_vcpu->cpuid_cache.features_eax = entry->eax;
+ hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
+ hv_vcpu->cpuid_cache.features_edx = entry->edx;
+ } else {
+ hv_vcpu->cpuid_cache.features_eax = 0;
+ hv_vcpu->cpuid_cache.features_ebx = 0;
+ hv_vcpu->cpuid_cache.features_edx = 0;
+ }
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
+ if (entry) {
+ hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
+ hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
+ } else {
+ hv_vcpu->cpuid_cache.enlightenments_eax = 0;
+ hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
+ }
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
+ if (entry)
+ hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
+ else
+ hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
+}
+
+int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
+{
+ struct kvm_vcpu_hv *hv_vcpu;
+ int ret = 0;
+
+ if (!to_hv_vcpu(vcpu)) {
+ if (enforce) {
+ ret = kvm_hv_vcpu_init(vcpu);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+ }
+
+ hv_vcpu = to_hv_vcpu(vcpu);
+ hv_vcpu->enforce_cpuid = enforce;
+
+ return ret;
}
bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
@@ -1839,20 +2026,21 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
}
-static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
+static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
struct eventfd_ctx *eventfd;
- if (unlikely(!fast)) {
+ if (unlikely(!hc->fast)) {
int ret;
- gpa_t gpa = param;
+ gpa_t gpa = hc->ingpa;
- if ((gpa & (__alignof__(param) - 1)) ||
- offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
+ if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
+ offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
return HV_STATUS_INVALID_ALIGNMENT;
- ret = kvm_vcpu_read_guest(vcpu, gpa, &param, sizeof(param));
+ ret = kvm_vcpu_read_guest(vcpu, gpa,
+ &hc->ingpa, sizeof(hc->ingpa));
if (ret < 0)
return HV_STATUS_INVALID_ALIGNMENT;
}
@@ -1862,15 +2050,15 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
* have no use for it, and in all known usecases it is zero, so just
* report lookup failure if it isn't.
*/
- if (param & 0xffff00000000ULL)
+ if (hc->ingpa & 0xffff00000000ULL)
return HV_STATUS_INVALID_PORT_ID;
/* remaining bits are reserved-zero */
- if (param & ~KVM_HYPERV_CONN_ID_MASK)
+ if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
return HV_STATUS_INVALID_HYPERCALL_INPUT;
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
rcu_read_lock();
- eventfd = idr_find(&hv->conn_to_evt, param);
+ eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
rcu_read_unlock();
if (!eventfd)
return HV_STATUS_INVALID_PORT_ID;
@@ -1879,11 +2067,80 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
return HV_STATUS_SUCCESS;
}
+static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
+{
+ switch (hc->code) {
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ return true;
+ }
+
+ return false;
+}
+
+static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
+{
+ int reg;
+
+ kvm_fpu_get();
+ for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
+ _kvm_read_sse_reg(reg, &hc->xmm[reg]);
+ kvm_fpu_put();
+}
+
+static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
+{
+ if (!hv_vcpu->enforce_cpuid)
+ return true;
+
+ switch (code) {
+ case HVCALL_NOTIFY_LONG_SPIN_WAIT:
+ return hv_vcpu->cpuid_cache.enlightenments_ebx &&
+ hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
+ case HVCALL_POST_MESSAGE:
+ return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
+ case HVCALL_SIGNAL_EVENT:
+ return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
+ case HVCALL_POST_DEBUG_DATA:
+ case HVCALL_RETRIEVE_DEBUG_DATA:
+ case HVCALL_RESET_DEBUG_SESSION:
+ /*
+ * Return 'true' when SynDBG is disabled so the resulting code
+ * will be HV_STATUS_INVALID_HYPERCALL_CODE.
+ */
+ return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
+ hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+ fallthrough;
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
+ return hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ case HVCALL_SEND_IPI_EX:
+ if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+ fallthrough;
+ case HVCALL_SEND_IPI:
+ return hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_CLUSTER_IPI_RECOMMENDED;
+ default:
+ break;
+ }
+
+ return true;
+}
+
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
- u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
- uint16_t code, rep_idx, rep_cnt;
- bool fast, rep;
+ struct kvm_hv_hcall hc;
+ u64 ret = HV_STATUS_SUCCESS;
/*
* hypercall generates UD from non zero cpl and real mode
@@ -1896,104 +2153,113 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64
if (is_64_bit_mode(vcpu)) {
- param = kvm_rcx_read(vcpu);
- ingpa = kvm_rdx_read(vcpu);
- outgpa = kvm_r8_read(vcpu);
+ hc.param = kvm_rcx_read(vcpu);
+ hc.ingpa = kvm_rdx_read(vcpu);
+ hc.outgpa = kvm_r8_read(vcpu);
} else
#endif
{
- param = ((u64)kvm_rdx_read(vcpu) << 32) |
- (kvm_rax_read(vcpu) & 0xffffffff);
- ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
- (kvm_rcx_read(vcpu) & 0xffffffff);
- outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
- (kvm_rsi_read(vcpu) & 0xffffffff);
+ hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
+ (kvm_rax_read(vcpu) & 0xffffffff);
+ hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
+ (kvm_rcx_read(vcpu) & 0xffffffff);
+ hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
+ (kvm_rsi_read(vcpu) & 0xffffffff);
}
- code = param & 0xffff;
- fast = !!(param & HV_HYPERCALL_FAST_BIT);
- rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
- rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
- rep = !!(rep_cnt || rep_idx);
+ hc.code = hc.param & 0xffff;
+ hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
+ hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
+ hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
+ hc.rep = !!(hc.rep_cnt || hc.rep_idx);
- trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+ if (hc.fast && is_xmm_fast_hypercall(&hc))
+ kvm_hv_hypercall_read_xmm(&hc);
- switch (code) {
+ trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
+ hc.ingpa, hc.outgpa);
+
+ if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) {
+ ret = HV_STATUS_ACCESS_DENIED;
+ goto hypercall_complete;
+ }
+
+ switch (hc.code) {
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
kvm_vcpu_on_spin(vcpu, true);
break;
case HVCALL_SIGNAL_EVENT:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
+ ret = kvm_hvcall_signal_event(vcpu, &hc);
if (ret != HV_STATUS_INVALID_PORT_ID)
break;
fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
- if (unlikely(rep || !to_hv_synic(vcpu)->active)) {
+ if (unlikely(hc.rep || !to_hv_synic(vcpu)->active)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
- vcpu->run->hyperv.u.hcall.input = param;
- vcpu->run->hyperv.u.hcall.params[0] = ingpa;
- vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+ vcpu->run->hyperv.u.hcall.input = hc.param;
+ vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
+ vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
vcpu->arch.complete_userspace_io =
kvm_hv_hypercall_complete_userspace;
return 0;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
- if (unlikely(fast || !rep_cnt || rep_idx)) {
+ if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, false);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, false);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
- if (unlikely(fast || !rep_cnt || rep_idx)) {
+ if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, true);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, true);
break;
case HVCALL_SEND_IPI:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
+ ret = kvm_hv_send_ipi(vcpu, &hc, false);
break;
case HVCALL_SEND_IPI_EX:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.fast || hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
+ ret = kvm_hv_send_ipi(vcpu, &hc, true);
break;
case HVCALL_POST_DEBUG_DATA:
case HVCALL_RETRIEVE_DEBUG_DATA:
- if (unlikely(fast)) {
+ if (unlikely(hc.fast)) {
ret = HV_STATUS_INVALID_PARAMETER;
break;
}
@@ -2012,9 +2278,9 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
}
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
- vcpu->run->hyperv.u.hcall.input = param;
- vcpu->run->hyperv.u.hcall.params[0] = ingpa;
- vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+ vcpu->run->hyperv.u.hcall.input = hc.param;
+ vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
+ vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
vcpu->arch.complete_userspace_io =
kvm_hv_hypercall_complete_userspace;
return 0;
@@ -2024,6 +2290,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
break;
}
+hypercall_complete:
return kvm_hv_hypercall_complete(vcpu, ret);
}
@@ -2172,6 +2439,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
ent->ebx |= HV_POST_MESSAGES;
ent->ebx |= HV_SIGNAL_EVENTS;
+ ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 60547d5cb6d7..730da8537d05 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -138,6 +138,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu);
+int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 3db5c42c9ecd..90e1ffdc05b7 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -55,6 +55,13 @@ static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
}
+static inline void kvm_register_clear_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
@@ -118,6 +125,11 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
return vcpu->arch.walk_mmu->pdptrs[index];
}
+static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
+{
+ vcpu->arch.walk_mmu->pdptrs[index] = value;
+}
+
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
@@ -162,6 +174,7 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{
vcpu->arch.hflags |= HF_GUEST_MASK;
+ vcpu->stat.guest_mode = 1;
}
static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
@@ -172,6 +185,8 @@ static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
vcpu->arch.load_eoi_exitmap_pending = false;
kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
}
+
+ vcpu->stat.guest_mode = 0;
}
static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index f016838faedd..68b420289d7e 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -13,6 +13,7 @@
#define _ASM_X86_KVM_X86_EMULATE_H
#include <asm/desc_defs.h>
+#include "fpu.h"
struct x86_emulate_ctxt;
enum x86_intercept;
@@ -229,15 +230,12 @@ struct x86_emulate_ops {
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
- void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
- int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
- const char *smstate);
- void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
+ void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
+ int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
+ void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
};
-typedef u32 __attribute__((vector_size(16))) sse128_t;
-
/* Type, address-of, and value of an instruction's operand. */
struct operand {
enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
@@ -314,7 +312,6 @@ struct x86_emulate_ctxt {
int interruptibility;
bool perm_ok; /* do not check permissions if true */
- bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception;
@@ -491,7 +488,7 @@ enum x86_intercept {
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1
#define EMULATION_OK 0
diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c
new file mode 100644
index 000000000000..c7db2df50a7a
--- /dev/null
+++ b/arch/x86/kvm/kvm_onhyperv.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/mshyperv.h>
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+
+static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
+ void *data)
+{
+ struct kvm_tlb_range *range = data;
+
+ return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
+ range->pages);
+}
+
+static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
+ struct kvm_tlb_range *range)
+{
+ if (range)
+ return hyperv_flush_guest_mapping_range(root_tdp,
+ kvm_fill_hv_flush_list_func, (void *)range);
+ else
+ return hyperv_flush_guest_mapping(root_tdp);
+}
+
+int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
+{
+ struct kvm_arch *kvm_arch = &kvm->arch;
+ struct kvm_vcpu *vcpu;
+ int ret = 0, i, nr_unique_valid_roots;
+ hpa_t root;
+
+ spin_lock(&kvm_arch->hv_root_tdp_lock);
+
+ if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
+ nr_unique_valid_roots = 0;
+
+ /*
+ * Flush all valid roots, and see if all vCPUs have converged
+ * on a common root, in which case future flushes can skip the
+ * loop and flush the common root.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ root = vcpu->arch.hv_root_tdp;
+ if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
+ continue;
+
+ /*
+ * Set the tracked root to the first valid root. Keep
+ * this root for the entirety of the loop even if more
+ * roots are encountered as a low effort optimization
+ * to avoid flushing the same (first) root again.
+ */
+ if (++nr_unique_valid_roots == 1)
+ kvm_arch->hv_root_tdp = root;
+
+ if (!ret)
+ ret = hv_remote_flush_root_tdp(root, range);
+
+ /*
+ * Stop processing roots if a failure occurred and
+ * multiple valid roots have already been detected.
+ */
+ if (ret && nr_unique_valid_roots > 1)
+ break;
+ }
+
+ /*
+ * The optimized flush of a single root can't be used if there
+ * are multiple valid roots (obviously).
+ */
+ if (nr_unique_valid_roots > 1)
+ kvm_arch->hv_root_tdp = INVALID_PAGE;
+ } else {
+ ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
+ }
+
+ spin_unlock(&kvm_arch->hv_root_tdp_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);
+
+int hv_remote_flush_tlb(struct kvm *kvm)
+{
+ return hv_remote_flush_tlb_with_range(kvm, NULL);
+}
+EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h
new file mode 100644
index 000000000000..1c67abf2eba9
--- /dev/null
+++ b/arch/x86/kvm/kvm_onhyperv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V.
+ */
+
+#ifndef __ARCH_X86_KVM_KVM_ONHYPERV_H__
+#define __ARCH_X86_KVM_KVM_ONHYPERV_H__
+
+#if IS_ENABLED(CONFIG_HYPERV)
+int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range);
+int hv_remote_flush_tlb(struct kvm *kvm);
+
+static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+{
+ struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
+
+ if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
+ spin_lock(&kvm_arch->hv_root_tdp_lock);
+ vcpu->arch.hv_root_tdp = root_tdp;
+ if (root_tdp != kvm_arch->hv_root_tdp)
+ kvm_arch->hv_root_tdp = INVALID_PAGE;
+ spin_unlock(&kvm_arch->hv_root_tdp_lock);
+ }
+}
+#else /* !CONFIG_HYPERV */
+static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+{
+}
+#endif /* !CONFIG_HYPERV */
+
+#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c0ebef560bd1..ba5a27879f1d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
if (!apic_x2apic_mode(apic))
valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
+ if (alignment + len > 4)
+ return 1;
+
if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
return 1;
@@ -1494,6 +1497,15 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
static void cancel_hv_timer(struct kvm_lapic *apic);
+static void cancel_apic_timer(struct kvm_lapic *apic)
+{
+ hrtimer_cancel(&apic->lapic_timer.timer);
+ preempt_disable();
+ if (apic->lapic_timer.hv_timer_in_use)
+ cancel_hv_timer(apic);
+ preempt_enable();
+}
+
static void apic_update_lvtt(struct kvm_lapic *apic)
{
u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1502,11 +1514,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
if (apic->lapic_timer.timer_mode != timer_mode) {
if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
APIC_LVT_TIMER_TSCDEADLINE)) {
- hrtimer_cancel(&apic->lapic_timer.timer);
- preempt_disable();
- if (apic->lapic_timer.hv_timer_in_use)
- cancel_hv_timer(apic);
- preempt_enable();
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, 0);
apic->lapic_timer.period = 0;
apic->lapic_timer.tscdeadline = 0;
@@ -1598,11 +1606,19 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
+ if (lapic_timer_advance_dynamic) {
+ adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
+ /*
+ * If the timer fired early, reread the TSC to account for the
+ * overhead of the above adjustment to avoid waiting longer
+ * than is necessary.
+ */
+ if (guest_tsc < tsc_deadline)
+ guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+ }
+
if (guest_tsc < tsc_deadline)
__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
-
- if (lapic_timer_advance_dynamic)
- adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
}
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
@@ -1661,7 +1677,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
}
atomic_inc(&apic->lapic_timer.pending);
- kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
if (from_timer_fn)
kvm_vcpu_kick(vcpu);
}
@@ -2084,7 +2100,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
if (apic_lvtt_tscdeadline(apic))
break;
- hrtimer_cancel(&apic->lapic_timer.timer);
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
break;
@@ -2615,6 +2631,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
update_divide_count(apic);
__start_apic_timer(apic, APIC_TMCCT);
+ kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
kvm_apic_update_apicv(vcpu);
apic->highest_isr_cache = -1;
if (vcpu->arch.apicv_active) {
@@ -2856,7 +2873,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
}
-void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
+int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u8 sipi_vector;
@@ -2864,7 +2881,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
unsigned long pe;
if (!lapic_in_kernel(vcpu))
- return;
+ return 0;
/*
* Read pending events before calling the check_events
@@ -2872,12 +2889,12 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
*/
pe = smp_load_acquire(&apic->pending_events);
if (!pe)
- return;
+ return 0;
if (is_guest_mode(vcpu)) {
r = kvm_check_nested_events(vcpu);
if (r < 0)
- return;
+ return r == -EBUSY ? 0 : r;
/*
* If an event has happened and caused a vmexit,
* we know INITs are latched and therefore
@@ -2898,7 +2915,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
if (test_bit(KVM_APIC_SIPI, &pe))
clear_bit(KVM_APIC_SIPI, &apic->pending_events);
- return;
+ return 0;
}
if (test_bit(KVM_APIC_INIT, &pe)) {
@@ -2919,6 +2936,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
}
+ return 0;
}
void kvm_lapic_exit(void)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 997c45a5963a..d7c25d0c1354 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -76,7 +76,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
-void kvm_apic_accept_events(struct kvm_vcpu *vcpu);
+int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 88d0ed5225a4..83e6c6965f1e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,6 +44,12 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
+#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | \
+ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE | \
+ X86_CR4_LA57)
+
+#define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
+
static __always_inline u64 rsvd_bits(int s, int e)
{
BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
@@ -62,12 +68,9 @@ static __always_inline u64 rsvd_bits(int s, int e)
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
-void
-reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
-
-void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
-void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
- gpa_t nested_cr3);
+void kvm_init_mmu(struct kvm_vcpu *vcpu);
+void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+ unsigned long cr4, u64 efer, gpa_t nested_cr3);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
@@ -162,11 +165,6 @@ static inline bool is_writable_pte(unsigned long pte)
return pte & PT_WRITABLE_MASK;
}
-static inline bool is_write_protection(struct kvm_vcpu *vcpu)
-{
- return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
-}
-
/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
@@ -232,4 +230,14 @@ int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
+static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
+{
+ /*
+ * Read memslot_have_rmaps before rmap pointers. Hence, threads reading
+ * memslots_have_rmaps in any lock context are guaranteed to see the
+ * pointers. Pairs with smp_store_release in alloc_all_memslots_rmaps.
+ */
+ return smp_load_acquire(&kvm->arch.memslots_have_rmaps);
+}
+
#endif
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0144c40d09c7..845d114ae075 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -55,7 +55,7 @@
extern bool itlb_multihit_kvm_mitigation;
-static int __read_mostly nx_huge_pages = -1;
+int __read_mostly nx_huge_pages = -1;
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
@@ -176,9 +176,80 @@ static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
+struct kvm_mmu_role_regs {
+ const unsigned long cr0;
+ const unsigned long cr4;
+ const u64 efer;
+};
+
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
+/*
+ * Yes, lot's of underscores. They're a hint that you probably shouldn't be
+ * reading from the role_regs. Once the mmu_role is constructed, it becomes
+ * the single source of truth for the MMU's state.
+ */
+#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
+static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
+{ \
+ return !!(regs->reg & flag); \
+}
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
+
+/*
+ * The MMU itself (with a valid role) is the single source of truth for the
+ * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
+ * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
+ * and the vCPU may be incorrect/irrelevant.
+ */
+#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \
+static inline bool is_##reg##_##name(struct kvm_mmu *mmu) \
+{ \
+ return !!(mmu->mmu_role. base_or_ext . reg##_##name); \
+}
+BUILD_MMU_ROLE_ACCESSOR(ext, cr0, pg);
+BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pae);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
+BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
+
+static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu_role_regs regs = {
+ .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
+ .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
+ .efer = vcpu->arch.efer,
+ };
+
+ return regs;
+}
+
+static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
+{
+ if (!____is_cr0_pg(regs))
+ return 0;
+ else if (____is_efer_lma(regs))
+ return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
+ PT64_ROOT_4LEVEL;
+ else if (____is_cr4_pae(regs))
+ return PT32E_ROOT_LEVEL;
+ else
+ return PT32_ROOT_LEVEL;
+}
static inline bool kvm_available_flush_tlb_with_range(void)
{
@@ -208,11 +279,6 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm_flush_remote_tlbs_with_range(kvm, &range);
}
-bool is_nx_huge_page_enabled(void)
-{
- return READ_ONCE(nx_huge_pages);
-}
-
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned int access)
{
@@ -269,11 +335,6 @@ static int is_cpuid_PSE36(void)
return 1;
}
-static int is_nx(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.efer & EFER_NX;
-}
-
static gfn_t pse36_gfn_delta(u32 gpte)
{
int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
@@ -1177,8 +1238,7 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
* @gfn_offset: start of the BITS_PER_LONG pages we care about
* @mask: indicates which pages we should protect
*
- * Used when we do not need to care about huge page mappings: e.g. during dirty
- * logging we do not have any such mappings.
+ * Used when we do not need to care about huge page mappings.
*/
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
@@ -1189,6 +1249,10 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);
+
+ if (!kvm_memslots_have_rmaps(kvm))
+ return;
+
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
@@ -1218,6 +1282,10 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);
+
+ if (!kvm_memslots_have_rmaps(kvm))
+ return;
+
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
@@ -1235,13 +1303,36 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
* It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
* enable dirty logging for them.
*
- * Used when we do not need to care about huge page mappings: e.g. during dirty
- * logging we do not have any such mappings.
+ * We need to care about huge page mappings: e.g. during dirty logging we may
+ * have such mappings.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
+ /*
+ * Huge pages are NOT write protected when we start dirty logging in
+ * initially-all-set mode; must write protect them here so that they
+ * are split to 4K on the first write.
+ *
+ * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
+ * of memslot has no such restriction, so the range can cross two large
+ * pages.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
+ gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
+ gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
+
+ kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
+
+ /* Cross two large pages? */
+ if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
+ ALIGN(end << PAGE_SHIFT, PMD_SIZE))
+ kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
+ PG_LEVEL_2M);
+ }
+
+ /* Now handle 4K PTEs. */
if (kvm_x86_ops.cpu_dirty_log_size)
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
else
@@ -1254,20 +1345,23 @@ int kvm_cpu_dirty_log_size(void)
}
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn)
+ struct kvm_memory_slot *slot, u64 gfn,
+ int min_level)
{
struct kvm_rmap_head *rmap_head;
int i;
bool write_protected = false;
- for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
- rmap_head = __gfn_to_rmap(gfn, i, slot);
- write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+ rmap_head = __gfn_to_rmap(gfn, i, slot);
+ write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+ }
}
if (is_tdp_mmu_enabled(kvm))
write_protected |=
- kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
+ kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
return write_protected;
}
@@ -1277,7 +1371,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+ return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1433,9 +1527,10 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- bool flush;
+ bool flush = false;
- flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
@@ -1445,9 +1540,10 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- bool flush;
+ bool flush = false;
- flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
@@ -1500,9 +1596,10 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- bool young;
+ bool young = false;
- young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
@@ -1512,9 +1609,10 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- bool young;
+ bool young = false;
- young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
@@ -1748,17 +1846,10 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
-static inline bool is_ept_sp(struct kvm_mmu_page *sp)
-{
- return sp->role.cr0_wp && sp->role.smap_andnot_wp;
-}
-
-/* @sp->gfn should be write-protected at the call site */
-static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct list_head *invalid_list)
+static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list)
{
- if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
- vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
+ if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
@@ -1804,31 +1895,6 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}
-static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct list_head *invalid_list)
-{
- kvm_unlink_unsync_page(vcpu->kvm, sp);
- return __kvm_sync_page(vcpu, sp, invalid_list);
-}
-
-/* @gfn should be write-protected at the call site */
-static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
- struct list_head *invalid_list)
-{
- struct kvm_mmu_page *s;
- bool ret = false;
-
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
- if (!s->unsync)
- continue;
-
- WARN_ON(s->role.level != PG_LEVEL_4K);
- ret |= kvm_sync_page(vcpu, s, invalid_list);
- }
-
- return ret;
-}
-
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
unsigned int idx[PT64_ROOT_MAX_LEVEL];
@@ -1923,6 +1989,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
}
for_each_sp(pages, sp, parents, i) {
+ kvm_unlink_unsync_page(vcpu->kvm, sp);
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
@@ -1958,8 +2025,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
struct hlist_head *sp_list;
unsigned quadrant;
struct kvm_mmu_page *sp;
- bool need_sync = false;
- bool flush = false;
int collisions = 0;
LIST_HEAD(invalid_list);
@@ -1982,20 +2047,39 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
continue;
}
- if (!need_sync && sp->unsync)
- need_sync = true;
-
- if (sp->role.word != role.word)
+ if (sp->role.word != role.word) {
+ /*
+ * If the guest is creating an upper-level page, zap
+ * unsync pages for the same gfn. While it's possible
+ * the guest is using recursive page tables, in all
+ * likelihood the guest has stopped using the unsync
+ * page and is installing a completely unrelated page.
+ * Unsync pages must not be left as is, because the new
+ * upper-level page will be write-protected.
+ */
+ if (level > PG_LEVEL_4K && sp->unsync)
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
+ &invalid_list);
continue;
+ }
if (direct_mmu)
goto trace_get_page;
if (sp->unsync) {
- /* The page is good, but __kvm_sync_page might still end
- * up zapping it. If so, break in order to rebuild it.
+ /*
+ * The page is good, but is stale. kvm_sync_page does
+ * get the latest guest state, but (unlike mmu_unsync_children)
+ * it doesn't write-protect the page or mark it synchronized!
+ * This way the validity of the mapping is ensured, but the
+ * overhead of write protection is not incurred until the
+ * guest invalidates the TLB mapping. This allows multiple
+ * SPs for a single gfn to be unsync.
+ *
+ * If the sync fails, the page is zapped. If so, break
+ * in order to rebuild it.
*/
- if (!__kvm_sync_page(vcpu, sp, &invalid_list))
+ if (!kvm_sync_page(vcpu, sp, &invalid_list))
break;
WARN_ON(!list_empty(&invalid_list));
@@ -2020,22 +2104,14 @@ trace_get_page:
sp->role = role;
hlist_add_head(&sp->hash_link, sp_list);
if (!direct) {
- /*
- * we should do write protection before syncing pages
- * otherwise the content of the synced shadow page may
- * be inconsistent with guest page table.
- */
account_shadowed(vcpu->kvm, sp);
if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
-
- if (level > PG_LEVEL_4K && need_sync)
- flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
trace_kvm_mmu_get_page(sp, true);
-
- kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
out:
+ kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+
if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
return sp;
@@ -2374,7 +2450,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
* page is available, while the caller may end up allocating as many as
* four pages, e.g. for PAE roots or for 5-level paging. Temporarily
* exceeding the (arbitrary by default) limit will not harm the host,
- * being too agressive may unnecessarily kill the guest, and getting an
+ * being too aggressive may unnecessarily kill the guest, and getting an
* exact count is far more trouble than it's worth, especially in the
* page fault paths.
*/
@@ -2448,17 +2524,33 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
kvm_mmu_mark_parents_unsync(sp);
}
-bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool can_unsync)
+/*
+ * Attempt to unsync any shadow pages that can be reached by the specified gfn,
+ * KVM is creating a writable mapping for said gfn. Returns 0 if all pages
+ * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
+ * be write-protected.
+ */
+int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
{
struct kvm_mmu_page *sp;
+ /*
+ * Force write-protection if the page is being tracked. Note, the page
+ * track machinery is used to write-protect upper-level shadow pages,
+ * i.e. this guards the role.level == 4K assertion below!
+ */
if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
- return true;
+ return -EPERM;
+ /*
+ * The page is not write-tracked, mark existing shadow pages unsync
+ * unless KVM is synchronizing an unsync SP (can_unsync = false). In
+ * that case, KVM must complete emulation of the guest TLB flush before
+ * allowing shadow pages to become unsync (writable by the guest).
+ */
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (!can_unsync)
- return true;
+ return -EPERM;
if (sp->unsync)
continue;
@@ -2489,8 +2581,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
* 2.2 Guest issues TLB flush.
* That causes a VM Exit.
*
- * 2.3 kvm_mmu_sync_pages() reads sp->unsync.
- * Since it is false, so it just returns.
+ * 2.3 Walking of unsync pages sees sp->unsync is
+ * false and skips the page.
*
* 2.4 Guest accesses GVA X.
* Since the mapping in the SP was not updated,
@@ -2506,7 +2598,7 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
*/
smp_wmb();
- return false;
+ return 0;
}
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
@@ -2827,9 +2919,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
gfn_t gfn = gpa >> PAGE_SHIFT;
gfn_t base_gfn = gfn;
- if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
-
level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
huge_page_disallowed, &req_level);
@@ -3180,6 +3269,33 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
+void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+{
+ unsigned long roots_to_free = 0;
+ hpa_t root_hpa;
+ int i;
+
+ /*
+ * This should not be called while L2 is active, L2 can't invalidate
+ * _only_ its own roots, e.g. INVVPID unconditionally exits.
+ */
+ WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+ root_hpa = mmu->prev_roots[i].hpa;
+ if (!VALID_PAGE(root_hpa))
+ continue;
+
+ if (!to_shadow_page(root_hpa) ||
+ to_shadow_page(root_hpa)->role.guest_mode)
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+ }
+
+ kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
+
+
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
int ret = 0;
@@ -3280,6 +3396,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
}
}
+ r = alloc_all_memslots_rmaps(vcpu->kvm);
+ if (r)
+ return r;
+
write_lock(&vcpu->kvm->mmu_lock);
r = make_mmu_pages_available(vcpu);
if (r < 0)
@@ -3423,8 +3543,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
* flush strictly after those changes are made. We only need to
* ensure that the other CPU sets these flags before any actual
* changes to the page tables are made. The comments in
- * mmu_need_write_protect() describe what could go wrong if this
- * requirement isn't satisfied.
+ * mmu_try_to_unsync_pages() describe what could go wrong if
+ * this requirement isn't satisfied.
*/
if (!smp_load_acquire(&sp->unsync) &&
!smp_load_acquire(&sp->unsync_children))
@@ -3474,19 +3594,6 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
}
-static bool
-__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
-{
- int bit7 = (pte >> 7) & 1;
-
- return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
-}
-
-static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
-{
- return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
-}
-
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
/*
@@ -3540,12 +3647,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
int root, leaf, level;
bool reserved = false;
- if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
- *sptep = 0ull;
- return reserved;
- }
-
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu(vcpu->arch.mmu))
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
leaf = get_walk(vcpu, addr, sptes, &root);
@@ -3569,13 +3671,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
for (level = root; level >= leaf; level--)
- /*
- * Use a bitwise-OR instead of a logical-OR to aggregate the
- * reserved bit and EPT's invalid memtype/XWR checks to avoid
- * adding a Jcc in the loop.
- */
- reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
- __is_rsvd_bits_set(rsvd_check, sptes[level], level);
+ reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
if (reserved) {
pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
@@ -3583,7 +3679,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
for (level = root; level >= leaf; level--)
pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
sptes[level], level,
- rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
+ get_rsvd_bits(rsvd_check, sptes[level], level));
}
return reserved;
@@ -3717,6 +3813,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault, int max_level, bool is_tdp)
{
+ bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
bool write = error_code & PFERR_WRITE_MASK;
bool map_writable;
@@ -3729,7 +3826,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;
- if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
+ if (!is_tdp_mmu_fault) {
r = fast_page_fault(vcpu, gpa, error_code);
if (r != RET_PF_INVALID)
return r;
@@ -3751,7 +3848,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
r = RET_PF_RETRY;
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
read_lock(&vcpu->kvm->mmu_lock);
else
write_lock(&vcpu->kvm->mmu_lock);
@@ -3762,7 +3859,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (r)
goto out_unlock;
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
pfn, prefault);
else
@@ -3770,7 +3867,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
prefault, is_tdp);
out_unlock:
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
read_unlock(&vcpu->kvm->mmu_lock);
else
write_unlock(&vcpu->kvm->mmu_lock);
@@ -3840,17 +3937,13 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
max_level, true);
}
-static void nonpaging_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void nonpaging_init_context(struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
- context->root_level = 0;
- context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = true;
- context->nx = false;
}
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
@@ -3913,8 +4006,7 @@ static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
}
static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
- union kvm_mmu_page_role new_role,
- bool skip_tlb_flush, bool skip_mmu_sync)
+ union kvm_mmu_page_role new_role)
{
if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
@@ -3929,10 +4021,10 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
*/
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
- if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
+ if (force_flush_and_sync_on_reuse) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
- if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ }
/*
* The last MMIO access's GVA and GPA are cached in the VCPU. When
@@ -3951,11 +4043,9 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
to_shadow_page(vcpu->arch.mmu->root_hpa));
}
-void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
- bool skip_mmu_sync)
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{
- __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
- skip_tlb_flush, skip_mmu_sync);
+ __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
@@ -3981,26 +4071,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
return false;
}
-static inline bool is_last_gpte(struct kvm_mmu *mmu,
- unsigned level, unsigned gpte)
-{
- /*
- * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
- * If it is clear, there are no large pages at this level, so clear
- * PT_PAGE_SIZE_MASK in gpte if that is the case.
- */
- gpte &= level - mmu->last_nonleaf_level;
-
- /*
- * PG_LEVEL_4K always terminates. The RHS has bit 7 set
- * iff level <= PG_LEVEL_4K, which for our purpose means
- * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
- */
- gpte |= level - PG_LEVEL_4K - 1;
-
- return gpte & PT_PAGE_SIZE_MASK;
-}
-
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
@@ -4015,8 +4085,7 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
#undef PTTYPE
static void
-__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
- struct rsvd_bits_validate *rsvd_check,
+__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
bool pse, bool amd)
{
@@ -4105,14 +4174,29 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
}
}
+static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
+{
+ /*
+ * If TDP is enabled, let the guest use GBPAGES if they're supported in
+ * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
+ * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
+ * walk for performance and complexity reasons. Not to mention KVM
+ * _can't_ solve the problem because GVA->GPA walks aren't visible to
+ * KVM once a TDP translation is installed. Mimic hardware behavior so
+ * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
+ */
+ return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
+ guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
+}
+
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
- __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
+ __reset_rsvds_bits_mask(&context->guest_rsvd_check,
vcpu->arch.reserved_gpa_bits,
- context->root_level, context->nx,
- guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
- is_pse(vcpu),
+ context->root_level, is_efer_nx(context),
+ guest_can_use_gbpages(vcpu),
+ is_cr4_pse(context),
guest_cpuid_is_amd_or_hygon(vcpu));
}
@@ -4165,24 +4249,32 @@ static inline u64 reserved_hpa_bits(void)
* table in guest or amd nested guest, its mmu features completely
* follow the features in guest.
*/
-void
-reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- bool uses_nx = context->nx ||
- context->mmu_role.base.smep_andnot_wp;
+ /*
+ * KVM uses NX when TDP is disabled to handle a variety of scenarios,
+ * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
+ * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
+ * The iTLB multi-hit workaround can be toggled at any time, so assume
+ * NX can be used by any non-nested shadow MMU to avoid having to reset
+ * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
+ */
+ bool uses_nx = is_efer_nx(context) || !tdp_enabled;
+
+ /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
+ bool is_amd = true;
+ /* KVM doesn't use 2-level page tables for the shadow MMU. */
+ bool is_pse = false;
struct rsvd_bits_validate *shadow_zero_check;
int i;
- /*
- * Passing "true" to the last argument is okay; it adds a check
- * on bit 8 of the SPTEs which KVM doesn't use anyway.
- */
+ WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
+
shadow_zero_check = &context->shadow_zero_check;
- __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- reserved_hpa_bits(),
+ __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->shadow_root_level, uses_nx,
- guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
- is_pse(vcpu), true);
+ guest_can_use_gbpages(vcpu), is_pse, is_amd);
if (!shadow_me_mask)
return;
@@ -4193,7 +4285,6 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
}
}
-EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
static inline bool boot_cpu_is_amd(void)
{
@@ -4215,11 +4306,10 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
shadow_zero_check = &context->shadow_zero_check;
if (boot_cpu_is_amd())
- __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- reserved_hpa_bits(),
+ __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES),
- true, true);
+ false, true);
else
__reset_rsvds_bits_mask_ept(shadow_zero_check,
reserved_hpa_bits(), false);
@@ -4255,8 +4345,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
(7 & (access) ? 128 : 0))
-static void update_permission_bitmask(struct kvm_vcpu *vcpu,
- struct kvm_mmu *mmu, bool ept)
+static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
{
unsigned byte;
@@ -4264,9 +4353,10 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
const u8 w = BYTE_MASK(ACC_WRITE_MASK);
const u8 u = BYTE_MASK(ACC_USER_MASK);
- bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
- bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
- bool cr0_wp = is_write_protection(vcpu);
+ bool cr4_smep = is_cr4_smep(mmu);
+ bool cr4_smap = is_cr4_smap(mmu);
+ bool cr0_wp = is_cr0_wp(mmu);
+ bool efer_nx = is_efer_nx(mmu);
for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
unsigned pfec = byte << 1;
@@ -4292,7 +4382,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
/* Not really needed: !nx will cause pte.nx to fault */
- if (!mmu->nx)
+ if (!efer_nx)
ff = 0;
/* Allow supervisor writes if !cr0.wp */
@@ -4351,24 +4441,17 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* away both AD and WD. For all reads or if the last condition holds, WD
* only will be masked away.
*/
-static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- bool ept)
+static void update_pkru_bitmask(struct kvm_mmu *mmu)
{
unsigned bit;
bool wp;
- if (ept) {
- mmu->pkru_mask = 0;
- return;
- }
-
- /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
- if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
+ if (!is_cr4_pke(mmu)) {
mmu->pkru_mask = 0;
return;
}
- wp = is_write_protection(vcpu);
+ wp = is_cr0_wp(mmu);
for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
unsigned pfec, pkey_bits;
@@ -4402,81 +4485,51 @@ static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
}
-static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu)
{
- unsigned root_level = mmu->root_level;
+ if (!is_cr0_pg(mmu))
+ return;
- mmu->last_nonleaf_level = root_level;
- if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
- mmu->last_nonleaf_level++;
+ reset_rsvds_bits_mask(vcpu, mmu);
+ update_permission_bitmask(mmu, false);
+ update_pkru_bitmask(mmu);
}
-static void paging64_init_context_common(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context,
- int level)
+static void paging64_init_context(struct kvm_mmu *context)
{
- context->nx = is_nx(vcpu);
- context->root_level = level;
-
- reset_rsvds_bits_mask(vcpu, context);
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
-
- MMU_WARN_ON(!is_pae(vcpu));
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
- context->shadow_root_level = level;
context->direct_map = false;
}
-static void paging64_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging32_init_context(struct kvm_mmu *context)
{
- int root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
-
- paging64_init_context_common(vcpu, context, root_level);
-}
-
-static void paging32_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
-{
- context->nx = false;
- context->root_level = PT32_ROOT_LEVEL;
-
- reset_rsvds_bits_mask(vcpu, context);
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
-
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
- context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = false;
}
-static void paging32E_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
-{
- paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
-}
-
-static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
+static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_extended_role ext = {0};
- ext.cr0_pg = !!is_paging(vcpu);
- ext.cr4_pae = !!is_pae(vcpu);
- ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
- ext.cr4_pse = !!is_pse(vcpu);
- ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
- ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
+ if (____is_cr0_pg(regs)) {
+ ext.cr0_pg = 1;
+ ext.cr4_pae = ____is_cr4_pae(regs);
+ ext.cr4_smep = ____is_cr4_smep(regs);
+ ext.cr4_smap = ____is_cr4_smap(regs);
+ ext.cr4_pse = ____is_cr4_pse(regs);
+
+ /* PKEY and LA57 are active iff long mode is active. */
+ ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
+ ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
+ }
ext.valid = 1;
@@ -4484,20 +4537,23 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs,
bool base_only)
{
union kvm_mmu_role role = {0};
role.base.access = ACC_ALL;
- role.base.nxe = !!is_nx(vcpu);
- role.base.cr0_wp = is_write_protection(vcpu);
+ if (____is_cr0_pg(regs)) {
+ role.base.efer_nx = ____is_efer_nx(regs);
+ role.base.cr0_wp = ____is_cr0_wp(regs);
+ }
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
if (base_only)
return role;
- role.ext = kvm_calc_mmu_role_ext(vcpu);
+ role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
return role;
}
@@ -4512,9 +4568,10 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role
-kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4527,8 +4584,9 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role new_role =
- kvm_calc_tdp_mmu_root_page_role(vcpu, false);
+ kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
@@ -4542,60 +4600,44 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
+ context->root_level = role_regs_to_root_level(&regs);
- if (!is_paging(vcpu)) {
- context->nx = false;
+ if (!is_cr0_pg(context))
context->gva_to_gpa = nonpaging_gva_to_gpa;
- context->root_level = 0;
- } else if (is_long_mode(vcpu)) {
- context->nx = is_nx(vcpu);
- context->root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
- context->gva_to_gpa = paging64_gva_to_gpa;
- } else if (is_pae(vcpu)) {
- context->nx = is_nx(vcpu);
- context->root_level = PT32E_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
+ else if (is_cr4_pae(context))
context->gva_to_gpa = paging64_gva_to_gpa;
- } else {
- context->nx = false;
- context->root_level = PT32_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
+ else
context->gva_to_gpa = paging32_gva_to_gpa;
- }
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
+ reset_guest_paging_metadata(vcpu, context);
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
static union kvm_mmu_role
-kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
- role.base.smep_andnot_wp = role.ext.cr4_smep &&
- !is_write_protection(vcpu);
- role.base.smap_andnot_wp = role.ext.cr4_smap &&
- !is_write_protection(vcpu);
- role.base.gpte_is_8_bytes = !!is_pae(vcpu);
+ role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
+ role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
+ role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
return role;
}
static union kvm_mmu_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, base_only);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
- role.base.direct = !is_paging(vcpu);
+ role.base.direct = !____is_cr0_pg(regs);
- if (!is_long_mode(vcpu))
+ if (!____is_efer_lma(regs))
role.base.level = PT32E_ROOT_LEVEL;
- else if (is_la57_mode(vcpu))
+ else if (____is_cr4_la57(regs))
role.base.level = PT64_ROOT_5LEVEL;
else
role.base.level = PT64_ROOT_4LEVEL;
@@ -4604,37 +4646,44 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
}
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
- u32 cr0, u32 cr4, u32 efer,
+ struct kvm_mmu_role_regs *regs,
union kvm_mmu_role new_role)
{
- if (!(cr0 & X86_CR0_PG))
- nonpaging_init_context(vcpu, context);
- else if (efer & EFER_LMA)
- paging64_init_context(vcpu, context);
- else if (cr4 & X86_CR4_PAE)
- paging32E_init_context(vcpu, context);
- else
- paging32_init_context(vcpu, context);
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
context->mmu_role.as_u64 = new_role.as_u64;
+
+ if (!is_cr0_pg(context))
+ nonpaging_init_context(context);
+ else if (is_cr4_pae(context))
+ paging64_init_context(context);
+ else
+ paging32_init_context(context);
+ context->root_level = role_regs_to_root_level(regs);
+
+ reset_guest_paging_metadata(vcpu, context);
+ context->shadow_root_level = new_role.base.level;
+
reset_shadow_zero_bits_mask(vcpu, context);
}
-static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
+static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
- kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+ kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
- if (new_role.as_u64 != context->mmu_role.as_u64)
- shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+ shadow_mmu_init_context(vcpu, context, regs, new_role);
}
static union kvm_mmu_role
-kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
+kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, false);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
role.base.direct = false;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4642,23 +4691,22 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
return role;
}
-void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
- gpa_t nested_cr3)
+void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+ unsigned long cr4, u64 efer, gpa_t nested_cr3)
{
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
- union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
+ struct kvm_mmu_role_regs regs = {
+ .cr0 = cr0,
+ .cr4 = cr4,
+ .efer = efer,
+ };
+ union kvm_mmu_role new_role;
- __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
+ new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
- if (new_role.as_u64 != context->mmu_role.as_u64) {
- shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+ __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
- /*
- * Override the level set by the common init helper, nested TDP
- * always uses the host's TDP configuration.
- */
- context->shadow_root_level = new_role.base.level;
- }
+ shadow_mmu_init_context(vcpu, context, &regs, new_role);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
@@ -4678,15 +4726,10 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
role.base.guest_mode = true;
role.base.access = ACC_ALL;
- /*
- * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
- * SMAP variation to denote shadow EPT entries.
- */
- role.base.cr0_wp = true;
- role.base.smap_andnot_wp = true;
-
- role.ext = kvm_calc_mmu_role_ext(vcpu);
+ /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
+ role.ext.word = 0;
role.ext.execonly = execonly;
+ role.ext.valid = 1;
return role;
}
@@ -4700,14 +4743,15 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
execonly, level);
- __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
+ __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
+ context->mmu_role.as_u64 = new_role.as_u64;
+
context->shadow_root_level = level;
- context->nx = true;
context->ept_ad = accessed_dirty;
context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa;
@@ -4715,11 +4759,9 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->invlpg = ept_invlpg;
context->root_level = level;
context->direct_map = false;
- context->mmu_role.as_u64 = new_role.as_u64;
- update_permission_bitmask(vcpu, context, true);
- update_pkru_bitmask(vcpu, context, true);
- update_last_nonleaf_level(vcpu, context);
+ update_permission_bitmask(context, true);
+ update_pkru_bitmask(context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
@@ -4728,20 +4770,36 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
- kvm_init_shadow_mmu(vcpu,
- kvm_read_cr0_bits(vcpu, X86_CR0_PG),
- kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
- vcpu->arch.efer);
+ kvm_init_shadow_mmu(vcpu, &regs);
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
}
+static union kvm_mmu_role
+kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
+{
+ union kvm_mmu_role role;
+
+ role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
+
+ /*
+ * Nested MMUs are used only for walking L2's gva->gpa, they never have
+ * shadow pages of their own and so "direct" has no meaning. Set it
+ * to "true" to try to detect bogus usage of the nested MMU.
+ */
+ role.base.direct = true;
+ role.base.level = role_regs_to_root_level(regs);
+ return role;
+}
+
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
- union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
+ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
@@ -4751,6 +4809,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
g_context->get_guest_pgd = get_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
+ g_context->root_level = new_role.base.level;
/*
* L2 page tables are never shadowed, so there is no need to sync
@@ -4766,44 +4825,20 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
* nested page tables as the second level of translation. Basically
* the gva_to_gpa functions between mmu and nested_mmu are swapped.
*/
- if (!is_paging(vcpu)) {
- g_context->nx = false;
- g_context->root_level = 0;
+ if (!is_paging(vcpu))
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
- } else if (is_long_mode(vcpu)) {
- g_context->nx = is_nx(vcpu);
- g_context->root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else if (is_long_mode(vcpu))
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
- } else if (is_pae(vcpu)) {
- g_context->nx = is_nx(vcpu);
- g_context->root_level = PT32E_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else if (is_pae(vcpu))
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
- } else {
- g_context->nx = false;
- g_context->root_level = PT32_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else
g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
- }
- update_permission_bitmask(vcpu, g_context, false);
- update_pkru_bitmask(vcpu, g_context, false);
- update_last_nonleaf_level(vcpu, g_context);
+ reset_guest_paging_metadata(vcpu, g_context);
}
-void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
+void kvm_init_mmu(struct kvm_vcpu *vcpu)
{
- if (reset_roots) {
- uint i;
-
- vcpu->arch.mmu->root_hpa = INVALID_PAGE;
-
- for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
- }
-
if (mmu_is_nested(vcpu))
init_kvm_nested_mmu(vcpu);
else if (tdp_enabled)
@@ -4816,20 +4851,53 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role role;
if (tdp_enabled)
- role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
+ role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
else
- role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
+ role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
return role.base;
}
+void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Invalidate all MMU roles to force them to reinitialize as CPUID
+ * information is factored into reserved bit calculations.
+ */
+ vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
+ vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
+ vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
+ kvm_mmu_reset_context(vcpu);
+
+ /*
+ * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+ * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+ * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
+ * faults due to reusing SPs/SPTEs. Alert userspace, but otherwise
+ * sweep the problem under the rug.
+ *
+ * KVM's horrific CPUID ABI makes the problem all but impossible to
+ * solve, as correctly handling multiple vCPU models (with respect to
+ * paging and physical address properties) in a single VM would require
+ * tracking all relevant CPUID information in kvm_mmu_page_role. That
+ * is very undesirable as it would double the memory requirements for
+ * gfn_track (see struct kvm_mmu_page_role comments), and in practice
+ * no sane VMM mucks with the core vCPU model on the fly.
+ */
+ if (vcpu->arch.last_vmentry_cpu != -1) {
+ pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
+ pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
+ }
+}
+
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
- kvm_init_mmu(vcpu, true);
+ kvm_init_mmu(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
@@ -5467,7 +5535,13 @@ void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
- kvm_mmu_init_tdp_mmu(kvm);
+ if (!kvm_mmu_init_tdp_mmu(kvm))
+ /*
+ * No smp_load/store wrappers needed here as we are in
+ * VM init and there cannot be any memslots / other threads
+ * accessing this struct kvm yet.
+ */
+ kvm->arch.memslots_have_rmaps = true;
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
@@ -5490,29 +5564,29 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
int i;
bool flush = false;
- write_lock(&kvm->mmu_lock);
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- slots = __kvm_memslots(kvm, i);
- kvm_for_each_memslot(memslot, slots) {
- gfn_t start, end;
-
- start = max(gfn_start, memslot->base_gfn);
- end = min(gfn_end, memslot->base_gfn + memslot->npages);
- if (start >= end)
- continue;
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(memslot, slots) {
+ gfn_t start, end;
+
+ start = max(gfn_start, memslot->base_gfn);
+ end = min(gfn_end, memslot->base_gfn + memslot->npages);
+ if (start >= end)
+ continue;
- flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
- PG_LEVEL_4K,
- KVM_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true, flush);
+ flush = slot_handle_level_range(kvm, memslot,
+ kvm_zap_rmapp, PG_LEVEL_4K,
+ KVM_MAX_HUGEPAGE_LEVEL, start,
+ end - 1, true, flush);
+ }
}
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
+ write_unlock(&kvm->mmu_lock);
}
- if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
-
- write_unlock(&kvm->mmu_lock);
-
if (is_tdp_mmu_enabled(kvm)) {
flush = false;
@@ -5539,12 +5613,15 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot,
int start_level)
{
- bool flush;
+ bool flush = false;
- write_lock(&kvm->mmu_lock);
- flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
- start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
- write_unlock(&kvm->mmu_lock);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
+ start_level, KVM_MAX_HUGEPAGE_LEVEL,
+ false);
+ write_unlock(&kvm->mmu_lock);
+ }
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
@@ -5612,18 +5689,17 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
{
/* FIXME: const-ify all uses of struct kvm_memory_slot. */
struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
- bool flush;
-
- write_lock(&kvm->mmu_lock);
- flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+ bool flush = false;
- if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
- write_unlock(&kvm->mmu_lock);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+ if (flush)
+ kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ write_unlock(&kvm->mmu_lock);
+ }
if (is_tdp_mmu_enabled(kvm)) {
- flush = false;
-
read_lock(&kvm->mmu_lock);
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
if (flush)
@@ -5650,11 +5726,14 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
- bool flush;
+ bool flush = false;
- write_lock(&kvm->mmu_lock);
- flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
- write_unlock(&kvm->mmu_lock);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
+ false);
+ write_unlock(&kvm->mmu_lock);
+ }
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
@@ -5957,6 +6036,7 @@ static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel
static void kvm_recover_nx_lpages(struct kvm *kvm)
{
+ unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
int rcu_idx;
struct kvm_mmu_page *sp;
unsigned int ratio;
@@ -5968,7 +6048,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
write_lock(&kvm->mmu_lock);
ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
- to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
+ to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
for ( ; to_zap; --to_zap) {
if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
break;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d64ccb417c60..35567293c1fd 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -116,14 +116,19 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
kvm_x86_ops.cpu_dirty_log_size;
}
-bool is_nx_huge_page_enabled(void);
-bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool can_unsync);
+extern int nx_huge_pages;
+static inline bool is_nx_huge_page_enabled(void)
+{
+ return READ_ONCE(nx_huge_pages);
+}
+
+int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn);
+ struct kvm_memory_slot *slot, u64 gfn,
+ int min_level);
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages);
@@ -158,8 +163,6 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
kvm_pfn_t *pfnp, int *goal_levelp);
-bool is_nx_huge_page_enabled(void);
-
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index e798489b56b5..efbad33a0645 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -40,7 +40,7 @@
role.direct ? " direct" : "", \
access_str[role.access], \
role.invalid ? " invalid" : "", \
- role.nxe ? "" : "!", \
+ role.efer_nx ? "" : "!", \
role.ad_disabled ? "!" : "", \
__entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 34bb0ec69bd8..91a9f7e0fd91 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -100,7 +100,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
kvm_mmu_gfn_disallow_lpage(slot, gfn);
if (mode == KVM_PAGE_TRACK_WRITE)
- if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+ if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 70b7e44e3035..490a028ddabe 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -90,8 +90,8 @@ struct guest_walker {
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
bool pte_writable[PT_MAX_FULL_LEVELS];
- unsigned pt_access;
- unsigned pte_access;
+ unsigned int pt_access[PT_MAX_FULL_LEVELS];
+ unsigned int pte_access;
gfn_t gfn;
struct x86_exception fault;
};
@@ -305,6 +305,35 @@ static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
return pkeys;
}
+static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
+ unsigned int level, unsigned int gpte)
+{
+ /*
+ * For EPT and PAE paging (both variants), bit 7 is either reserved at
+ * all level or indicates a huge page (ignoring CR3/EPTP). In either
+ * case, bit 7 being set terminates the walk.
+ */
+#if PTTYPE == 32
+ /*
+ * 32-bit paging requires special handling because bit 7 is ignored if
+ * CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is
+ * greater than the last level for which bit 7 is the PAGE_SIZE bit.
+ *
+ * The RHS has bit 7 set iff level < (2 + PSE). If it is clear, bit 7
+ * is not reserved and does not indicate a large page at this level,
+ * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
+ */
+ gpte &= level - (PT32_ROOT_LEVEL + mmu->mmu_role.ext.cr4_pse);
+#endif
+ /*
+ * PG_LEVEL_4K always terminates. The RHS has bit 7 set
+ * iff level <= PG_LEVEL_4K, which for our purpose means
+ * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
+ */
+ gpte |= level - PG_LEVEL_4K - 1;
+
+ return gpte & PT_PAGE_SIZE_MASK;
+}
/*
* Fetch a guest pte for a guest virtual address, or for an L2's GPA.
*/
@@ -418,13 +447,15 @@ retry_walk:
}
walker->ptes[walker->level - 1] = pte;
- } while (!is_last_gpte(mmu, walker->level, pte));
+
+ /* Convert to ACC_*_MASK flags for struct guest_walker. */
+ walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
+ } while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
- walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
@@ -463,13 +494,13 @@ retry_walk:
}
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __func__, (u64)pte, walker->pte_access, walker->pt_access);
+ __func__, (u64)pte, walker->pte_access,
+ walker->pt_access[walker->level - 1]);
return 1;
error:
errcode |= write_fault | user_fault;
- if (fetch_fault && (mmu->nx ||
- kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
+ if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
errcode |= PFERR_FETCH_MASK;
walker->fault.vector = PF_VECTOR;
@@ -643,7 +674,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
- unsigned direct_access, access = gw->pt_access;
+ unsigned int direct_access, access;
int top_level, level, req_level, ret;
gfn_t base_gfn = gw->gfn;
@@ -675,6 +706,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
sp = NULL;
if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2];
+ access = gw->pt_access[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
false, access);
}
@@ -763,7 +795,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
bool self_changed = false;
if (!(walker->pte_access & ACC_WRITE_MASK ||
- (!is_write_protection(vcpu) && !user_fault)))
+ (!is_cr0_wp(vcpu->arch.mmu) && !user_fault)))
return false;
for (level = walker->level; level <= walker->max_level; level++) {
@@ -861,8 +893,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
* we will cache the incorrect access into mmio spte.
*/
if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
- !is_write_protection(vcpu) && !user_fault &&
- !is_noslot_pfn(pfn)) {
+ !is_cr0_wp(vcpu->arch.mmu) && !user_fault && !is_noslot_pfn(pfn)) {
walker.pte_access |= ACC_WRITE_MASK;
walker.pte_access &= ~ACC_USER_MASK;
@@ -872,7 +903,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
* then we should prevent the kernel from executing it
* if SMEP is enabled.
*/
- if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ if (is_cr4_smep(vcpu->arch.mmu))
walker.pte_access &= ~ACC_EXEC_MASK;
}
@@ -1027,13 +1058,36 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
+ union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base;
int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;
int set_spte_ret = 0;
- /* direct kvm_mmu_page can not be unsync. */
- BUG_ON(sp->role.direct);
+ /*
+ * Ignore various flags when verifying that it's safe to sync a shadow
+ * page using the current MMU context.
+ *
+ * - level: not part of the overall MMU role and will never match as the MMU's
+ * level tracks the root level
+ * - access: updated based on the new guest PTE
+ * - quadrant: not part of the overall MMU role (similar to level)
+ */
+ const union kvm_mmu_page_role sync_role_ign = {
+ .level = 0xf,
+ .access = 0x7,
+ .quadrant = 0x3,
+ };
+
+ /*
+ * Direct pages can never be unsync, and KVM should never attempt to
+ * sync a shadow page for a different MMU context, e.g. if the role
+ * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
+ * reserved bits checks will be wrong, etc...
+ */
+ if (WARN_ON_ONCE(sp->role.direct ||
+ (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word))
+ return 0;
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 66d43cec0c31..3e97cdb13eb7 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -103,13 +103,6 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
/*
- * Bits 62:52 of PAE SPTEs are reserved. WARN if said bits are set
- * if PAE paging may be employed (shadow paging or any 32-bit KVM).
- */
- WARN_ON_ONCE((!tdp_enabled || !IS_ENABLED(CONFIG_X86_64)) &&
- (spte & SPTE_TDP_AD_MASK));
-
- /*
* For the EPT case, shadow_present_mask is 0 if hardware
* supports exec-only page table entries. In that case,
* ACC_USER_MASK and shadow_user_mask are used to represent
@@ -154,13 +147,19 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
/*
* Optimization: for pte sync, if spte was writable the hash
* lookup is unnecessary (and expensive). Write protection
- * is responsibility of mmu_get_page / kvm_sync_page.
+ * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
* Same reasoning can be applied to dirty page accounting.
*/
if (!can_unsync && is_writable_pte(old_spte))
goto out;
- if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
+ /*
+ * Unsync shadow pages that are reachable by the new, writable
+ * SPTE. Write-protect the SPTE if the page can't be unsync'd,
+ * e.g. it's write-tracked (upper-level SPs) or has one or more
+ * shadow pages and unsync'ing pages is not allowed.
+ */
+ if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
ret |= SET_SPTE_WRITE_PROTECTED_PT;
@@ -176,7 +175,10 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
spte = mark_spte_for_access_track(spte);
out:
- WARN_ON(is_mmio_spte(spte));
+ WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
+ "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
+ get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
+
*new_spte = spte;
return ret;
}
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index bca0ba11cccf..7a5ce9314107 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -293,6 +293,38 @@ static inline bool is_dirty_spte(u64 spte)
return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
}
+static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
+ int level)
+{
+ int bit7 = (pte >> 7) & 1;
+
+ return rsvd_check->rsvd_bits_mask[bit7][level-1];
+}
+
+static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
+ u64 pte, int level)
+{
+ return pte & get_rsvd_bits(rsvd_check, pte, level);
+}
+
+static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
+ u64 pte)
+{
+ return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
+}
+
+static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
+ u64 spte, int level)
+{
+ /*
+ * Use a bitwise-OR instead of a logical-OR to aggregate the reserved
+ * bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc
+ * (this is extremely unlikely to be short-circuited as true).
+ */
+ return __is_bad_mt_xwr(rsvd_check, spte) |
+ __is_rsvd_bits_set(rsvd_check, spte, level);
+}
+
static inline bool spte_can_locklessly_be_made_writable(u64 spte)
{
return (spte & shadow_host_writable_mask) &&
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 95eeb5ac6a8a..0853370bd811 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -14,10 +14,10 @@ static bool __read_mostly tdp_mmu_enabled = false;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
/* Initializes the TDP MMU for the VM, if enabled. */
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
- return;
+ return false;
/* This should not be changed for the lifetime of the VM. */
kvm->arch.tdp_mmu_enabled = true;
@@ -25,6 +25,8 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
+
+ return true;
}
static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
@@ -335,7 +337,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
sptep = rcu_dereference(pt) + i;
- gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
+ gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
if (shared) {
/*
@@ -377,12 +379,12 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
WRITE_ONCE(*sptep, REMOVED_SPTE);
}
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
- old_child_spte, REMOVED_SPTE, level - 1,
+ old_child_spte, REMOVED_SPTE, level,
shared);
}
kvm_flush_remote_tlbs_with_address(kvm, gfn,
- KVM_PAGES_PER_HPAGE(level));
+ KVM_PAGES_PER_HPAGE(level + 1));
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
}
@@ -912,7 +914,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
kvm_pfn_t pfn, bool prefault)
{
u64 new_spte;
- int ret = 0;
+ int ret = RET_PF_FIXED;
int make_spte_ret = 0;
if (unlikely(is_noslot_pfn(pfn)))
@@ -949,7 +951,11 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
rcu_dereference(iter->sptep));
}
- if (!prefault)
+ /*
+ * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
+ * consistent with legacy MMU behavior.
+ */
+ if (ret != RET_PF_SPURIOUS)
vcpu->stat.pf_fixed++;
return ret;
@@ -977,11 +983,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
int level;
int req_level;
- if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
- if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
-
level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
huge_page_disallowed, &req_level);
@@ -1017,14 +1018,14 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (!is_shadow_present_pte(iter.old_spte)) {
/*
- * If SPTE has been forzen by another thread, just
+ * If SPTE has been frozen by another thread, just
* give up and retry, avoiding unnecessary page table
* allocation and free.
*/
if (is_removed_spte(iter.old_spte))
break;
- sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
+ sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
child_pt = sp->spt;
new_spte = make_nonleaf_spte(child_pt,
@@ -1192,9 +1193,9 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
}
/*
- * Remove write access from all the SPTEs mapping GFNs [start, end). If
- * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
+ * Remove write access from all SPTEs at or above min_level that map GFNs
+ * [start, end). Returns true if an SPTE has been changed and the TLBs need to
+ * be flushed.
*/
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, int min_level)
@@ -1462,15 +1463,22 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
* Returns true if an SPTE was set and a TLB flush is needed.
*/
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t gfn)
+ gfn_t gfn, int min_level)
{
struct tdp_iter iter;
u64 new_spte;
bool spte_set = false;
+ BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
+
rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
+ for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+ min_level, gfn, gfn + 1) {
+ if (!is_shadow_present_pte(iter.old_spte) ||
+ !is_last_spte(iter.old_spte, iter.level))
+ continue;
+
if (!is_writable_pte(iter.old_spte))
break;
@@ -1492,14 +1500,15 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
* Returns true if an SPTE was set and a TLB flush is needed.
*/
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn)
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level)
{
struct kvm_mmu_page *root;
bool spte_set = false;
lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root, slot->as_id)
- spte_set |= write_protect_gfn(kvm, root, gfn);
+ spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
return spte_set;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 5fdf63090451..1cae4485b3bc 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -31,7 +31,7 @@ static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
}
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
- gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
+ gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
/*
* Don't allow yielding, as the caller may have a flush pending. Note,
@@ -74,37 +74,40 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn);
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
#ifdef CONFIG_X86_64
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
-#else
-static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
-static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
-static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
-static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
-#endif
-static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
{
struct kvm_mmu_page *sp;
+ hpa_t hpa = mmu->root_hpa;
- if (!is_tdp_mmu_enabled(kvm))
- return false;
if (WARN_ON(!VALID_PAGE(hpa)))
return false;
+ /*
+ * A NULL shadow page is legal when shadowing a non-paging guest with
+ * PAE paging, as the MMU will be direct with root_hpa pointing at the
+ * pae_root page, not a shadow page.
+ */
sp = to_shadow_page(hpa);
- if (WARN_ON(!sp))
- return false;
-
- return is_tdp_mmu_page(sp) && sp->root_count;
+ return sp && is_tdp_mmu_page(sp) && sp->root_count;
}
+#else
+static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
+static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
+#endif
#endif /* __KVM_X86_MMU_TDP_MMU_H */
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..1d01da64c333 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -27,12 +27,6 @@
#include "irq.h"
#include "svm.h"
-/* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
-
#define SVM_AVIC_DOORBELL 0xc001011b
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
@@ -126,7 +120,7 @@ void avic_vm_destroy(struct kvm *kvm)
unsigned long flags;
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
- if (!avic)
+ if (!enable_apicv)
return;
if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +143,7 @@ int avic_vm_init(struct kvm *kvm)
struct page *l_page;
u32 vm_id;
- if (!avic)
+ if (!enable_apicv)
return 0;
/* Allocating physical APIC ID table (4KB) */
@@ -223,7 +217,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
return &avic_physical_id_table[index];
}
-/**
+/*
* Note:
* AVIC hardware walks the nested page table to check permissions,
* but does not use the SPA address specified in the leaf page
@@ -242,7 +236,7 @@ static int avic_update_access_page(struct kvm *kvm, bool activate)
* APICv mode change, which update APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
* memory region. So, we need to ensure that kvm->mm == current->mm.
*/
- if ((kvm->arch.apic_access_page_done == activate) ||
+ if ((kvm->arch.apic_access_memslot_enabled == activate) ||
(kvm->mm != current->mm))
goto out;
@@ -255,7 +249,7 @@ static int avic_update_access_page(struct kvm *kvm, bool activate)
goto out;
}
- kvm->arch.apic_access_page_done = activate;
+ kvm->arch.apic_access_memslot_enabled = activate;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -571,7 +565,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
int ret;
struct kvm_vcpu *vcpu = &svm->vcpu;
- if (!avic || !irqchip_in_kernel(vcpu->kvm))
+ if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
return 0;
ret = avic_init_backing_page(vcpu);
@@ -595,7 +589,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)
void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
{
- if (!avic || !lapic_in_kernel(vcpu))
+ if (!enable_apicv || !lapic_in_kernel(vcpu))
return;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -655,7 +649,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
struct vmcb *vmcb = svm->vmcb;
bool activated = kvm_vcpu_apicv_active(vcpu);
- if (!avic)
+ if (!enable_apicv)
return;
if (activated) {
@@ -766,7 +760,7 @@ out:
return ret;
}
-/**
+/*
* Note:
* The HW cannot support posting multicast/broadcast
* interrupts to a vCPU. So, we still use legacy interrupt
@@ -1007,7 +1001,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
}
-/**
+/*
* This function is called during VCPU halt/unhalt.
*/
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 5e8d8443154e..21d03e3a5dfd 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -98,13 +98,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
WARN_ON(mmu_is_nested(vcpu));
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
+
+ /*
+ * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
+ * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
+ * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
+ */
kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
svm->vmcb01.ptr->save.efer,
svm->nested.ctl.nested_cr3);
vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
- reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}
@@ -380,33 +385,47 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
}
+static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
+{
+ /*
+ * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
+ * things to fix before this can be conditional:
+ *
+ * - Flush TLBs for both L1 and L2 remote TLB flush
+ * - Honor L1's request to flush an ASID on nested VMRUN
+ * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
+ * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
+ * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
+ *
+ * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
+ * NPT guest-physical mappings on VMRUN.
+ */
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+}
+
/*
* Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
* if we are emulating VM-Entry into a guest with NPT enabled.
*/
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
- bool nested_npt)
+ bool nested_npt, bool reload_pdptrs)
{
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
return -EINVAL;
- if (!nested_npt && is_pae_paging(vcpu) &&
- (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
- if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
- return -EINVAL;
- }
+ if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
+ CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
+ return -EINVAL;
- /*
- * TODO: optimize unconditional TLB flush/MMU sync here and in
- * kvm_init_shadow_npt_mmu().
- */
if (!nested_npt)
- kvm_mmu_new_pgd(vcpu, cr3, false, false);
+ kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
- kvm_init_mmu(vcpu, false);
+ /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
+ kvm_init_mmu(vcpu);
return 0;
}
@@ -481,6 +500,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
{
const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
/*
* Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
@@ -505,10 +525,10 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
/* nested_cr3. */
if (nested_npt_enabled(svm))
- nested_svm_init_mmu_context(&svm->vcpu);
+ nested_svm_init_mmu_context(vcpu);
- svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
- svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
+ svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset =
+ vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
svm->vmcb->control.int_ctl =
(svm->nested.ctl.int_ctl & ~mask) |
@@ -523,8 +543,10 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
+ nested_svm_transition_tlb_flush(vcpu);
+
/* Enter Guest-Mode */
- enter_guest_mode(&svm->vcpu);
+ enter_guest_mode(vcpu);
/*
* Merge guest and host intercepts - must be called with vcpu in
@@ -576,7 +598,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
nested_vmcb02_prepare_save(svm, vmcb12);
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
- nested_npt_enabled(svm));
+ nested_npt_enabled(svm), true);
if (ret)
return ret;
@@ -596,8 +618,6 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
u64 vmcb12_gpa;
- ++vcpu->stat.nested_run;
-
if (is_smm(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
@@ -803,9 +823,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_vcpu_unmap(vcpu, &map, true);
+ nested_svm_transition_tlb_flush(vcpu);
+
nested_svm_uninit_mmu_context(vcpu);
- rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false);
+ rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
if (rc)
return 1;
@@ -1228,8 +1250,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
&user_kvm_nested_state->data.svm[0];
struct vmcb_control_area *ctl;
struct vmcb_save_area *save;
+ unsigned long cr0;
int ret;
- u32 cr0;
BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
KVM_STATE_NESTED_SVM_VMCB_SIZE);
@@ -1302,6 +1324,19 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
goto out_free;
/*
+ * While the nested guest CR3 is already checked and set by
+ * KVM_SET_SREGS, it was set when nested state was yet loaded,
+ * thus MMU might not be initialized correctly.
+ * Set it again to fix this.
+ */
+
+ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
+ nested_npt_enabled(svm), false);
+ if (WARN_ON_ONCE(ret))
+ goto out_free;
+
+
+ /*
* All checks done, we can enter guest mode. Userspace provides
* vmcb12.control, which will be combined with L1 and stored into
* vmcb02, and the L1 save state which we store in vmcb01.
@@ -1358,9 +1393,15 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
if (WARN_ON(!is_guest_mode(vcpu)))
return true;
- if (nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
- nested_npt_enabled(svm)))
- return false;
+ if (!vcpu->arch.pdptrs_from_userspace &&
+ !nested_npt_enabled(svm) && is_pae_paging(vcpu))
+ /*
+ * Reload the guest's PDPTRs since after a migration
+ * the guest CR3 might be restored prior to setting the nested
+ * state which can lead to a load of wrong PDPTRs.
+ */
+ if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
+ return false;
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 5bc887e9a986..8d36f0c73071 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -199,9 +199,19 @@ static void sev_asid_free(struct kvm_sev_info *sev)
sev->misc_cg = NULL;
}
-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+static void sev_decommission(unsigned int handle)
{
struct sev_data_decommission decommission;
+
+ if (!handle)
+ return;
+
+ decommission.handle = handle;
+ sev_guest_decommission(&decommission, NULL);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
struct sev_data_deactivate deactivate;
if (!handle)
@@ -214,9 +224,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
sev_guest_deactivate(&deactivate, NULL);
up_read(&sev_deactivate_lock);
- /* decommission handle */
- decommission.handle = handle;
- sev_guest_decommission(&decommission, NULL);
+ sev_decommission(handle);
}
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
@@ -341,8 +349,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Bind ASID to this guest */
ret = sev_bind_asid(kvm, start.handle, error);
- if (ret)
+ if (ret) {
+ sev_decommission(start.handle);
goto e_free_session;
+ }
/* return handle to userspace */
params.handle = start.handle;
@@ -1103,10 +1113,9 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct sev_data_send_start data;
int ret;
+ memset(&data, 0, sizeof(data));
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
- if (ret < 0)
- return ret;
params->session_len = data.session_len;
if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
@@ -1215,10 +1224,9 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct sev_data_send_update_data data;
int ret;
+ memset(&data, 0, sizeof(data));
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
- if (ret < 0)
- return ret;
params->hdr_len = data.hdr_len;
params->trans_len = data.trans_len;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 05eca131eaf2..8834822c00cd 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -43,6 +43,9 @@
#include "svm.h"
#include "svm_ops.h"
+#include "kvm_onhyperv.h"
+#include "svm_onhyperv.h"
+
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet");
@@ -185,6 +188,13 @@ module_param(vls, int, 0444);
static int vgif = true;
module_param(vgif, int, 0444);
+/*
+ * enable / disable AVIC. Because the defaults differ for APICv
+ * support between VMX and SVM we cannot use module_param_named.
+ */
+static bool avic;
+module_param(avic, bool, 0444);
+
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
@@ -673,6 +683,9 @@ static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
msrpm[offset] = tmp;
+
+ svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
+
}
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
@@ -939,6 +952,16 @@ static __init int svm_hardware_setup(void)
int r;
unsigned int order = get_order(IOPM_SIZE);
+ /*
+ * NX is required for shadow paging and for NPT if the NX huge pages
+ * mitigation is enabled.
+ */
+ if (!boot_cpu_has(X86_FEATURE_NX)) {
+ pr_err_ratelimited("NX (Execute Disable) not supported\n");
+ return -EOPNOTSUPP;
+ }
+ kvm_enable_efer_bits(EFER_NX);
+
iopm_pages = alloc_pages(GFP_KERNEL, order);
if (!iopm_pages)
@@ -952,9 +975,6 @@ static __init int svm_hardware_setup(void)
supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
- if (boot_cpu_has(X86_FEATURE_NX))
- kvm_enable_efer_bits(EFER_NX);
-
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
kvm_enable_efer_bits(EFER_FFXSR);
@@ -996,6 +1016,8 @@ static __init int svm_hardware_setup(void)
/* Note, SEV setup consumes npt_enabled. */
sev_hardware_setup();
+ svm_hv_hardware_setup();
+
svm_adjust_mmio_mask();
for_each_possible_cpu(cpu) {
@@ -1009,16 +1031,12 @@ static __init int svm_hardware_setup(void)
nrips = false;
}
- if (avic) {
- if (!npt_enabled ||
- !boot_cpu_has(X86_FEATURE_AVIC) ||
- !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
- avic = false;
- } else {
- pr_info("AVIC enabled\n");
+ enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC);
- amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
- }
+ if (enable_apicv) {
+ pr_info("AVIC enabled\n");
+
+ amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
}
if (vls) {
@@ -1082,26 +1100,30 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0;
}
-static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- u64 g_tsc_offset = 0;
- if (is_guest_mode(vcpu)) {
- /* Write L1's TSC offset. */
- g_tsc_offset = svm->vmcb->control.tsc_offset -
- svm->vmcb01.ptr->control.tsc_offset;
- svm->vmcb01.ptr->control.tsc_offset = offset;
- }
+ return svm->nested.ctl.tsc_offset;
+}
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- svm->vmcb->control.tsc_offset - g_tsc_offset,
- offset);
+static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+ return kvm_default_tsc_scaling_ratio;
+}
- svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
+ svm->vmcb->control.tsc_offset = offset;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
- return svm->vmcb->control.tsc_offset;
+}
+
+static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+{
+ wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
}
/* Evaluate instruction intercepts that depend on guest CPUID features. */
@@ -1289,6 +1311,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
}
}
+ svm_hv_init_vmcb(svm->vmcb);
+
vmcb_mark_all_dirty(svm->vmcb);
enable_gif(svm);
@@ -3108,6 +3132,8 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
return;
}
+ pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
+ svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
pr_err("VMCB Control Area:\n");
pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
@@ -3764,6 +3790,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->save.cr2 = vcpu->arch.cr2;
+ svm_hv_update_vp_id(svm->vmcb, vcpu);
+
/*
* Run with all-zero DR6 unless needed, so that we can get the exact cause
* of a #DB.
@@ -3837,6 +3865,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
svm->next_rip = 0;
if (is_guest_mode(vcpu)) {
nested_sync_control_from_vmcb02(svm);
+
+ /* Track VMRUNs that have made past consistency checking */
+ if (svm->nested.nested_run_pending &&
+ svm->vmcb->control.exit_code != SVM_EXIT_ERR)
+ ++vcpu->stat.nested_run;
+
svm->nested.nested_run_pending = 0;
}
@@ -3848,10 +3882,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.apf.host_apf_flags =
kvm_read_and_reset_apf_flags();
- if (npt_enabled) {
- vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
- vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
- }
+ if (npt_enabled)
+ kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR);
/*
* We need to handle MC intercepts here before the vcpu has a chance to
@@ -3879,6 +3911,8 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
+ hv_track_root_tdp(vcpu, root_hpa);
+
/* Loading L2's CR3 is handled by enter_svm_guest_mode. */
if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
return;
@@ -4251,7 +4285,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !svm_smi_blocked(vcpu);
}
-static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
int ret;
@@ -4273,7 +4307,7 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map;
@@ -4429,13 +4463,12 @@ static int svm_vm_init(struct kvm *kvm)
if (!pause_filter_count || !pause_filter_thresh)
kvm->arch.pause_in_guest = true;
- if (avic) {
+ if (enable_apicv) {
int ret = avic_vm_init(kvm);
if (ret)
return ret;
}
- kvm_apicv_init(kvm, avic);
return 0;
}
@@ -4526,7 +4559,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
- .write_l1_tsc_offset = svm_write_l1_tsc_offset,
+ .get_l2_tsc_offset = svm_get_l2_tsc_offset,
+ .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
+ .write_tsc_offset = svm_write_tsc_offset,
+ .write_tsc_multiplier = svm_write_tsc_multiplier,
.load_mmu_pgd = svm_load_mmu_pgd,
@@ -4546,8 +4582,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.setup_mce = svm_setup_mce,
.smi_allowed = svm_smi_allowed,
- .pre_enter_smm = svm_pre_enter_smm,
- .pre_leave_smm = svm_pre_leave_smm,
+ .enter_smm = svm_enter_smm,
+ .leave_smm = svm_leave_smm,
.enable_smi_window = svm_enable_smi_window,
.mem_enc_op = svm_mem_enc_op,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 2c9ece618b29..f89b623bb591 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -32,6 +32,11 @@
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
+/*
+ * Clean bits in VMCB.
+ * VMCB_ALL_CLEAN_MASK might also need to
+ * be updated if this enum is modified.
+ */
enum {
VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
pause filter count */
@@ -49,9 +54,17 @@ enum {
* AVIC PHYSICAL_TABLE pointer,
* AVIC LOGICAL_TABLE pointer
*/
- VMCB_DIRTY_MAX,
+ VMCB_SW = 31, /* Reserved for hypervisor/software use */
};
+#define VMCB_ALL_CLEAN_MASK ( \
+ (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
+ (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
+ (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
+ (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
+ (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
+ (1U << VMCB_SW))
+
/* TPR and CR2 are always written before VMRUN */
#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
@@ -238,10 +251,15 @@ static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
{
- vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
+ vmcb->control.clean = VMCB_ALL_CLEAN_MASK
& ~VMCB_ALWAYS_DIRTY_MASK;
}
+static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
+{
+ return (vmcb->control.clean & (1 << bit));
+}
+
static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
@@ -480,8 +498,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-extern int avic;
-
static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
{
svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c
new file mode 100644
index 000000000000..98aa981c04ec
--- /dev/null
+++ b/arch/x86/kvm/svm/svm_onhyperv.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V for SVM.
+ */
+
+#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
+
+#include <asm/mshyperv.h>
+
+#include "svm.h"
+#include "svm_ops.h"
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+#include "svm_onhyperv.h"
+
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
+{
+ struct hv_enlightenments *hve;
+ struct hv_partition_assist_pg **p_hv_pa_pg =
+ &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
+
+ if (!*p_hv_pa_pg)
+ *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (!*p_hv_pa_pg)
+ return -ENOMEM;
+
+ hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw;
+
+ hve->partition_assist_page = __pa(*p_hv_pa_pg);
+ hve->hv_vm_id = (unsigned long)vcpu->kvm;
+ if (!hve->hv_enlightenments_control.nested_flush_hypercall) {
+ hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+ vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+ }
+
+ return 0;
+}
+
diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
new file mode 100644
index 000000000000..9b9a55abc29f
--- /dev/null
+++ b/arch/x86/kvm/svm/svm_onhyperv.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V for SVM.
+ */
+
+#ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__
+#define __ARCH_X86_KVM_SVM_ONHYPERV_H__
+
+#if IS_ENABLED(CONFIG_HYPERV)
+#include <asm/mshyperv.h>
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+
+static struct kvm_x86_ops svm_x86_ops;
+
+/*
+ * Hyper-V uses the software reserved 32 bytes in VMCB
+ * control area to expose SVM enlightenments to guests.
+ */
+struct hv_enlightenments {
+ struct __packed hv_enlightenments_control {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 enlightened_npt_tlb: 1;
+ u32 reserved:29;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 reserved;
+} __packed;
+
+/*
+ * Hyper-V uses the software reserved clean bit in VMCB
+ */
+#define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW
+
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu);
+
+static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+{
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+
+ if (npt_enabled &&
+ ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB)
+ hve->hv_enlightenments_control.enlightened_npt_tlb = 1;
+}
+
+static inline void svm_hv_hardware_setup(void)
+{
+ if (npt_enabled &&
+ ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
+ pr_info("kvm: Hyper-V enlightened NPT TLB flush enabled\n");
+ svm_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
+ svm_x86_ops.tlb_remote_flush_with_range =
+ hv_remote_flush_tlb_with_range;
+ }
+
+ if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
+ int cpu;
+
+ pr_info("kvm: Hyper-V Direct TLB Flush enabled\n");
+ for_each_online_cpu(cpu) {
+ struct hv_vp_assist_page *vp_ap =
+ hv_get_vp_assist_page(cpu);
+
+ if (!vp_ap)
+ continue;
+
+ vp_ap->nested_control.features.directhypercall = 1;
+ }
+ svm_x86_ops.enable_direct_tlbflush =
+ svm_hv_enable_direct_tlbflush;
+ }
+}
+
+static inline void svm_hv_vmcb_dirty_nested_enlightenments(
+ struct kvm_vcpu *vcpu)
+{
+ struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+
+ /*
+ * vmcb can be NULL if called during early vcpu init.
+ * And its okay not to mark vmcb dirty during vcpu init
+ * as we mark it dirty unconditionally towards end of vcpu
+ * init phase.
+ */
+ if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
+ hve->hv_enlightenments_control.msr_bitmap)
+ vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+}
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+ struct kvm_vcpu *vcpu)
+{
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+ u32 vp_index = kvm_hv_get_vpindex(vcpu);
+
+ if (hve->hv_vp_id != vp_index) {
+ hve->hv_vp_id = vp_index;
+ vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+ }
+}
+#else
+
+static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+{
+}
+
+static inline void svm_hv_hardware_setup(void)
+{
+}
+
+static inline void svm_hv_vmcb_dirty_nested_enlightenments(
+ struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+ struct kvm_vcpu *vcpu)
+{
+}
+#endif /* CONFIG_HYPERV */
+
+#endif /* __ARCH_X86_KVM_SVM_ONHYPERV_H__ */
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a61c015870e3..b484141ea15b 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -997,7 +997,7 @@ TRACE_EVENT(kvm_wait_lapic_expire,
__entry->delta < 0 ? "early" : "late")
);
-TRACE_EVENT(kvm_enter_smm,
+TRACE_EVENT(kvm_smm_transition,
TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
TP_ARGS(vcpu_id, smbase, entering),
@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
TP_ARGS(msg, err),
TP_STRUCT__entry(
- __field(const char *, msg)
+ __string(msg, msg)
__field(u32, err)
),
TP_fast_assign(
- __entry->msg = msg;
+ __assign_str(msg, msg);
__entry->err = err;
),
- TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
+ TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
);
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 8dee8a5fbc17..4705ad55abb5 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -12,7 +12,6 @@ extern bool __read_mostly enable_ept;
extern bool __read_mostly enable_unrestricted_guest;
extern bool __read_mostly enable_ept_ad_bits;
extern bool __read_mostly enable_pml;
-extern bool __read_mostly enable_apicv;
extern int __read_mostly pt_mode;
#define PT_MODE_SYSTEM 0
@@ -90,8 +89,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)
static inline bool cpu_has_vmx_posted_intr(void)
{
- return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
- vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
}
static inline bool cpu_has_load_ia32_efer(void)
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 41f24661af04..896b2a50b4aa 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -319,6 +319,9 @@ bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa)
if (unlikely(!assist_page.enlighten_vmentry))
return false;
+ if (unlikely(!evmptr_is_valid(assist_page.current_nested_vmcs)))
+ return false;
+
*evmcs_gpa = assist_page.current_nested_vmcs;
return true;
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index bd41d9462355..2ec9b46f0d0c 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -197,6 +197,14 @@ static inline void evmcs_load(u64 phys_addr) {}
static inline void evmcs_touch_msr_bitmap(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
+#define EVMPTR_INVALID (-1ULL)
+#define EVMPTR_MAP_PENDING (-2ULL)
+
+static inline bool evmptr_is_valid(u64 evmptr)
+{
+ return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
+}
+
enum nested_evmptrld_status {
EVMPTRLD_DISABLED,
EVMPTRLD_SUCCEEDED,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 6058a65a6ede..1a52134b0c42 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -173,9 +173,13 @@ static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
| X86_EFLAGS_ZF);
get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
/*
- * We don't need to force a shadow sync because
- * VM_INSTRUCTION_ERROR is not shadowed
+ * We don't need to force sync to shadow VMCS because
+ * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
+ * fields and thus must be synced.
*/
+ if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
+ to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
+
return kvm_skip_emulated_instruction(vcpu);
}
@@ -187,7 +191,8 @@ static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
* failValid writes the error number to the current VMCS, which
* can't be done if there isn't a current VMCS.
*/
- if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
+ if (vmx->nested.current_vmptr == -1ull &&
+ !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
return nested_vmx_failInvalid(vcpu);
return nested_vmx_failValid(vcpu, vm_instruction_error);
@@ -221,12 +226,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!vmx->nested.hv_evmcs)
- return;
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+ kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
+ vmx->nested.hv_evmcs = NULL;
+ }
- kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
- vmx->nested.hv_evmcs_vmptr = 0;
- vmx->nested.hv_evmcs = NULL;
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
}
static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
@@ -346,16 +351,21 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vmcs12->guest_physical_address = fault->address;
}
+static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
+{
+ kvm_init_shadow_ept_mmu(vcpu,
+ to_vmx(vcpu)->nested.msrs.ept_caps &
+ VMX_EPT_EXECUTE_ONLY_BIT,
+ nested_ept_ad_enabled(vcpu),
+ nested_ept_get_eptp(vcpu));
+}
+
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
- kvm_init_shadow_ept_mmu(vcpu,
- to_vmx(vcpu)->nested.msrs.ept_caps &
- VMX_EPT_EXECUTE_ONLY_BIT,
- nested_ept_ad_enabled(vcpu),
- nested_ept_get_eptp(vcpu));
+ nested_ept_new_eptp(vcpu);
vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
@@ -1058,54 +1068,13 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
}
/*
- * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit.
- * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't
- * enable VPID for L2 (implying it expects a TLB flush on VMX transitions).
- * Here's why.
- *
- * If EPT is enabled by L0 a sync is never needed:
- * - if it is disabled by L1, then L0 is not shadowing L1 or L2 PTEs, there
- * cannot be unsync'd SPTEs for either L1 or L2.
- *
- * - if it is also enabled by L1, then L0 doesn't need to sync on VM-Enter
- * VM-Enter as VM-Enter isn't required to invalidate guest-physical mappings
- * (irrespective of VPID), i.e. L1 can't rely on the (virtual) CPU to flush
- * stale guest-physical mappings for L2 from the TLB. And as above, L0 isn't
- * shadowing L1 PTEs so there are no unsync'd SPTEs to sync on VM-Exit.
- *
- * If EPT is disabled by L0:
- * - if VPID is enabled by L1 (for L2), the situation is similar to when L1
- * enables EPT: L0 doesn't need to sync as VM-Enter and VM-Exit aren't
- * required to invalidate linear mappings (EPT is disabled so there are
- * no combined or guest-physical mappings), i.e. L1 can't rely on the
- * (virtual) CPU to flush stale linear mappings for either L2 or itself (L1).
- *
- * - however if VPID is disabled by L1, then a sync is needed as L1 expects all
- * linear mappings (EPT is disabled so there are no combined or guest-physical
- * mappings) to be invalidated on both VM-Enter and VM-Exit.
- *
- * Note, this logic is subtly different than nested_has_guest_tlb_tag(), which
- * additionally checks that L2 has been assigned a VPID (when EPT is disabled).
- * Whether or not L2 has been assigned a VPID by L0 is irrelevant with respect
- * to L1's expectations, e.g. L0 needs to invalidate hardware TLB entries if L2
- * doesn't have a unique VPID to prevent reusing L1's entries (assuming L1 has
- * been assigned a VPID), but L0 doesn't need to do a MMU sync because L1
- * doesn't expect stale (virtual) TLB entries to be flushed, i.e. L1 doesn't
- * know that L0 will flush the TLB and so L1 will do INVVPID as needed to flush
- * stale TLB entries, at which point L0 will sync L2's MMU.
- */
-static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu)
-{
- return !enable_ept && !nested_cpu_has_vpid(get_vmcs12(vcpu));
-}
-
-/*
* Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
* emulating VM-Entry into a guest with EPT enabled. On failure, the expected
* Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
* @entry_failure_code.
*/
-static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
+static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
+ bool nested_ept, bool reload_pdptrs,
enum vm_entry_failure_code *entry_failure_code)
{
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
@@ -1117,27 +1086,20 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
* If PAE paging and EPT are both on, CR3 is not used by the CPU and
* must not be dereferenced.
*/
- if (!nested_ept && is_pae_paging(vcpu) &&
- (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
- if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
- *entry_failure_code = ENTRY_FAIL_PDPTE;
- return -EINVAL;
- }
+ if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
+ CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
+ *entry_failure_code = ENTRY_FAIL_PDPTE;
+ return -EINVAL;
}
- /*
- * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
- * flushes are handled by nested_vmx_transition_tlb_flush(). See
- * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
- */
if (!nested_ept)
- kvm_mmu_new_pgd(vcpu, cr3, true,
- !nested_vmx_transition_mmu_sync(vcpu));
+ kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
- kvm_init_mmu(vcpu, false);
+ /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
+ kvm_init_mmu(vcpu);
return 0;
}
@@ -1170,17 +1132,28 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
- * If VPID is disabled, linear and combined mappings are flushed on
- * VM-Enter/VM-Exit, and guest-physical mappings are valid only for
- * their associated EPTP.
+ * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
+ * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
+ * full TLB flush from the guest's perspective. This is required even
+ * if VPID is disabled in the host as KVM may need to synchronize the
+ * MMU in response to the guest TLB flush.
+ *
+ * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
+ * EPT is a special snowflake, as guest-physical mappings aren't
+ * flushed on VPID invalidations, including VM-Enter or VM-Exit with
+ * VPID disabled. As a result, KVM _never_ needs to sync nEPT
+ * entries on VM-Enter because L1 can't rely on VM-Enter to flush
+ * those mappings.
*/
- if (!enable_vpid)
+ if (!nested_cpu_has_vpid(vmcs12)) {
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
return;
+ }
+
+ /* L2 should never have a VPID if VPID is disabled. */
+ WARN_ON(!enable_vpid);
/*
- * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
- * for *all* contexts to be flushed on VM-Enter/VM-Exit.
- *
* If VPID is enabled and used by vmc12, but L2 does not have a unique
* TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
* a VPID for L2, flush the current context as the effective ASID is
@@ -1192,13 +1165,12 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
*
* If a TLB flush isn't required due to any of the above, and vpid12 is
* changing then the new "virtual" VPID (vpid12) will reuse the same
- * "real" VPID (vpid02), and so needs to be sync'd. There is no direct
+ * "real" VPID (vpid02), and so needs to be flushed. There's no direct
* mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
- * all nested vCPUs.
+ * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
+ * guest-physical mappings, so there is no need to sync the nEPT MMU.
*/
- if (!nested_cpu_has_vpid(vmcs12)) {
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- } else if (!nested_has_guest_tlb_tag(vcpu)) {
+ if (!nested_has_guest_tlb_tag(vcpu)) {
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} else if (is_vmenter &&
vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
@@ -1586,7 +1558,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
vmcs_load(vmx->loaded_vmcs->vmcs);
}
-static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
+static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
{
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
@@ -1595,7 +1567,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->tpr_threshold = evmcs->tpr_threshold;
vmcs12->guest_rip = evmcs->guest_rip;
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
vmcs12->guest_rsp = evmcs->guest_rsp;
vmcs12->guest_rflags = evmcs->guest_rflags;
@@ -1603,23 +1575,23 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->guest_interruptibility_info;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
vmcs12->cpu_based_vm_exec_control =
evmcs->cpu_based_vm_exec_control;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
vmcs12->exception_bitmap = evmcs->exception_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
vmcs12->vm_entry_intr_info_field =
evmcs->vm_entry_intr_info_field;
@@ -1629,7 +1601,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->vm_entry_instruction_len;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
@@ -1649,7 +1621,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->host_tr_selector = evmcs->host_tr_selector;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
vmcs12->pin_based_vm_exec_control =
evmcs->pin_based_vm_exec_control;
@@ -1658,18 +1630,18 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->secondary_vm_exec_control;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
vmcs12->msr_bitmap = evmcs->msr_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
vmcs12->guest_es_base = evmcs->guest_es_base;
vmcs12->guest_cs_base = evmcs->guest_cs_base;
@@ -1709,14 +1681,14 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
vmcs12->tsc_offset = evmcs->tsc_offset;
vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
@@ -1728,7 +1700,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->guest_dr7 = evmcs->guest_dr7;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
vmcs12->host_fs_base = evmcs->host_fs_base;
vmcs12->host_gs_base = evmcs->host_gs_base;
@@ -1738,13 +1710,13 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->host_rsp = evmcs->host_rsp;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
vmcs12->ept_pointer = evmcs->ept_pointer;
vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
@@ -1799,10 +1771,10 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
* vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
*/
- return 0;
+ return;
}
-static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
+static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
{
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
@@ -1962,7 +1934,7 @@ static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
- return 0;
+ return;
}
/*
@@ -1979,13 +1951,13 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
if (likely(!vmx->nested.enlightened_vmcs_enabled))
return EVMPTRLD_DISABLED;
- if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
+ if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
+ nested_release_evmcs(vcpu);
return EVMPTRLD_DISABLED;
+ }
- if (unlikely(!vmx->nested.hv_evmcs ||
- evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
- if (!vmx->nested.hv_evmcs)
- vmx->nested.current_vmptr = -1ull;
+ if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
+ vmx->nested.current_vmptr = -1ull;
nested_release_evmcs(vcpu);
@@ -2023,7 +1995,6 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
return EVMPTRLD_VMFAIL;
}
- vmx->nested.dirty_vmcs12 = true;
vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
evmcs_gpa_changed = true;
@@ -2056,14 +2027,10 @@ void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->nested.hv_evmcs) {
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
copy_vmcs12_to_enlightened(vmx);
- /* All fields are clean */
- vmx->nested.hv_evmcs->hv_clean_fields |=
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
- } else {
+ else
copy_vmcs12_to_shadow(vmx);
- }
vmx->nested.need_vmcs12_to_shadow_sync = false;
}
@@ -2208,7 +2175,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
u32 exec_control;
u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
- if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
+ if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
prepare_vmcs02_early_rare(vmx, vmcs12);
/*
@@ -2277,7 +2244,8 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_ENABLE_VMFUNC);
+ SECONDARY_EXEC_ENABLE_VMFUNC |
+ SECONDARY_EXEC_TSC_SCALING);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
@@ -2488,18 +2456,18 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
* is assigned to entry_failure_code on failure.
*/
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ bool from_vmentry,
enum vm_entry_failure_code *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
bool load_guest_pdptrs_vmcs12 = false;
- if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
+ if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
prepare_vmcs02_rare(vmx, vmcs12);
vmx->nested.dirty_vmcs12 = false;
- load_guest_pdptrs_vmcs12 = !hv_evmcs ||
- !(hv_evmcs->hv_clean_fields &
+ load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
+ !(vmx->nested.hv_evmcs->hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
}
@@ -2532,10 +2500,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
}
- vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
+ vcpu->arch.l1_tsc_offset,
+ vmx_get_l2_tsc_offset(vcpu),
+ vmx_get_l2_tsc_multiplier(vcpu));
+ vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
+ vcpu->arch.l1_tsc_scaling_ratio,
+ vmx_get_l2_tsc_multiplier(vcpu));
+
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
- decache_tsc_multiplier(vmx);
+ vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
@@ -2572,7 +2548,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
- entry_failure_code))
+ from_vmentry, entry_failure_code))
return -EINVAL;
/*
@@ -2604,6 +2580,17 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_rip_write(vcpu, vmcs12->guest_rip);
+
+ /*
+ * It was observed that genuine Hyper-V running in L1 doesn't reset
+ * 'hv_clean_fields' by itself, it only sets the corresponding dirty
+ * bits when it changes a field in eVMCS. Mark all fields as clean
+ * here.
+ */
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+ vmx->nested.hv_evmcs->hv_clean_fields |=
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+
return 0;
}
@@ -3093,13 +3080,20 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
* L2 was running), map it here to make sure vmcs12 changes are
* properly reflected.
*/
- if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) {
+ if (vmx->nested.enlightened_vmcs_enabled &&
+ vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
enum nested_evmptrld_status evmptrld_status =
nested_vmx_handle_enlightened_vmptrld(vcpu, false);
if (evmptrld_status == EVMPTRLD_VMFAIL ||
evmptrld_status == EVMPTRLD_ERROR)
return false;
+
+ /*
+ * Post migration VMCS12 always provides the most actual
+ * information, copy it to eVMCS upon entry.
+ */
+ vmx->nested.need_vmcs12_to_shadow_sync = true;
}
return true;
@@ -3113,6 +3107,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
struct page *page;
u64 hpa;
+ if (!vcpu->arch.pdptrs_from_userspace &&
+ !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
+ /*
+ * Reload the guest's PDPTRs since after a migration
+ * the guest CR3 might be restored prior to setting the nested
+ * state which can lead to a load of wrong PDPTRs.
+ */
+ if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
+ return false;
+ }
+
+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
@@ -3175,6 +3181,15 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
offset_in_page(vmcs12->posted_intr_desc_addr));
vmcs_write64(POSTED_INTR_DESC_ADDR,
pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
+ } else {
+ /*
+ * Defer the KVM_INTERNAL_EXIT until KVM tries to
+ * access the contents of the VMCS12 posted interrupt
+ * descriptor. (Note that KVM may do this when it
+ * should not, per the architectural specification.)
+ */
+ vmx->nested.pi_desc = NULL;
+ pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
}
}
if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
@@ -3354,10 +3369,8 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
}
enter_guest_mode(vcpu);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
- vcpu->arch.tsc_offset += vmcs12->tsc_offset;
- if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
+ if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit_guest_mode;
@@ -3437,7 +3450,7 @@ vmentry_fail_vmexit:
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason.full;
- if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+ if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
vmx->nested.need_vmcs12_to_shadow_sync = true;
return NVMX_VMENTRY_VMEXIT;
}
@@ -3454,8 +3467,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
enum nested_evmptrld_status evmptrld_status;
- ++vcpu->stat.nested_run;
-
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -3467,7 +3478,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return nested_vmx_failInvalid(vcpu);
}
- if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull))
+ if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
+ vmx->nested.current_vmptr == -1ull))
return nested_vmx_failInvalid(vcpu);
vmcs12 = get_vmcs12(vcpu);
@@ -3481,8 +3493,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (CC(vmcs12->hdr.shadow_vmcs))
return nested_vmx_failInvalid(vcpu);
- if (vmx->nested.hv_evmcs) {
- copy_enlightened_to_vmcs12(vmx);
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+ copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
/* Enlightened VMCS doesn't have launch state */
vmcs12->launch_state = !launch;
} else if (enable_shadow_vmcs) {
@@ -3682,25 +3694,29 @@ void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
}
}
-static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
void *vapic_page;
u16 status;
- if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
- return;
+ if (!vmx->nested.pi_pending)
+ return 0;
+
+ if (!vmx->nested.pi_desc)
+ goto mmio_needed;
vmx->nested.pi_pending = false;
+
if (!pi_test_and_clear_on(vmx->nested.pi_desc))
- return;
+ return 0;
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
if (max_irr != 256) {
vapic_page = vmx->nested.virtual_apic_map.hva;
if (!vapic_page)
- return;
+ goto mmio_needed;
__kvm_apic_update_irr(vmx->nested.pi_desc->pir,
vapic_page, &max_irr);
@@ -3713,6 +3729,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
}
nested_mark_vmcs12_pages_dirty(vcpu);
+ return 0;
+
+mmio_needed:
+ kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
+ return -ENXIO;
}
static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
@@ -3887,8 +3908,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
}
no_vmexit:
- vmx_complete_nested_posted_interrupt(vcpu);
- return 0;
+ return vmx_complete_nested_posted_interrupt(vcpu);
}
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
@@ -4032,10 +4052,11 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->nested.hv_evmcs)
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
- vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
+ vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
+ !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
@@ -4206,7 +4227,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* Only PDPTE load can fail as the value of cr3 was checked on entry and
* couldn't have changed.
*/
- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored))
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
@@ -4463,8 +4484,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (nested_cpu_has_preemption_timer(vmcs12))
hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
- vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
+ vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
+ vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ }
if (likely(!vmx->fail)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
@@ -4501,12 +4525,12 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ if (kvm_has_tsc_control)
+ vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
+
if (vmx->nested.l1_tpr_threshold != -1)
vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
- if (kvm_has_tsc_control)
- decache_tsc_multiplier(vmx);
-
if (vmx->nested.change_vmcs01_virtual_apic_mode) {
vmx->nested.change_vmcs01_virtual_apic_mode = false;
vmx_set_virtual_apic_mode(vcpu);
@@ -4532,7 +4556,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
}
if ((vm_exit_reason != -1) &&
- (enable_shadow_vmcs || vmx->nested.hv_evmcs))
+ (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
vmx->nested.need_vmcs12_to_shadow_sync = true;
/* in case we halted in L2 */
@@ -4987,6 +5011,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
vmptr + offsetof(struct vmcs12,
launch_state),
&zero, sizeof(zero));
+ } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
+ nested_release_evmcs(vcpu);
}
return nested_vmx_succeed(vcpu);
@@ -5228,7 +5254,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
/* Forbid normal VMPTRLD if Enlightened version was used */
- if (vmx->nested.hv_evmcs)
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
return 1;
if (vmx->nested.current_vmptr != vmptr) {
@@ -5284,7 +5310,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
+ if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
return 1;
if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
@@ -5461,8 +5487,8 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
/*
* Sync the shadow page tables if EPT is disabled, L1 is invalidating
- * linear mappings for L2 (tagged with L2's VPID). Free all roots as
- * VPIDs are not tracked in the MMU role.
+ * linear mappings for L2 (tagged with L2's VPID). Free all guest
+ * roots as VPIDs are not tracked in the MMU role.
*
* Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
* an MMU when EPT is disabled.
@@ -5470,8 +5496,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
* TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
*/
if (!enable_ept)
- kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu,
- KVM_MMU_ROOTS_ALL);
+ kvm_mmu_free_guest_mode_roots(vcpu, &vcpu->arch.root_mmu);
return nested_vmx_succeed(vcpu);
}
@@ -5481,23 +5506,16 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
{
u32 index = kvm_rcx_read(vcpu);
u64 new_eptp;
- bool accessed_dirty;
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- if (!nested_cpu_has_eptp_switching(vmcs12) ||
- !nested_cpu_has_ept(vmcs12))
+ if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
return 1;
-
if (index >= VMFUNC_EPTP_ENTRIES)
return 1;
-
if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
&new_eptp, index * 8, 8))
return 1;
- accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
-
/*
* If the (L2) guest does a vmfunc to the currently
* active ept pointer, we don't have to do anything else
@@ -5506,11 +5524,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
if (!nested_vmx_check_eptp(vcpu, new_eptp))
return 1;
- mmu->ept_ad = accessed_dirty;
- mmu->mmu_role.base.ad_disabled = !accessed_dirty;
vmcs12->ept_pointer = new_eptp;
+ nested_ept_new_eptp(vcpu);
- kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ if (!nested_cpu_has_vpid(vmcs12))
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
}
return 0;
@@ -5533,7 +5551,17 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
}
vmcs12 = get_vmcs12(vcpu);
- if ((vmcs12->vm_function_control & (1 << function)) == 0)
+
+ /*
+ * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
+ * is enabled in vmcs02 if and only if it's enabled in vmcs12.
+ */
+ if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (!(vmcs12->vm_function_control & BIT_ULL(function)))
goto fail;
switch (function) {
@@ -5806,6 +5834,9 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
else if (is_breakpoint(intr_info) &&
vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return true;
+ else if (is_alignment_check(intr_info) &&
+ !vmx_guest_inject_ac(vcpu))
+ return true;
return false;
case EXIT_REASON_EXTERNAL_INTERRUPT:
return true;
@@ -6056,7 +6087,8 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
- if (vmx->nested.hv_evmcs)
+ /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
+ if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
if (is_guest_mode(vcpu) &&
@@ -6112,8 +6144,15 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
} else {
copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
if (!vmx->nested.need_vmcs12_to_shadow_sync) {
- if (vmx->nested.hv_evmcs)
- copy_enlightened_to_vmcs12(vmx);
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+ /*
+ * L1 hypervisor is not obliged to keep eVMCS
+ * clean fields data always up-to-date while
+ * not in guest mode, 'hv_clean_fields' is only
+ * supposed to be actual upon vmentry so we need
+ * to ignore it here and do full copy.
+ */
+ copy_enlightened_to_vmcs12(vmx, 0);
else if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);
}
@@ -6255,6 +6294,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
* restored yet. EVMCS will be mapped from
* nested_get_vmcs12_pages().
*/
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
} else {
return -EINVAL;
@@ -6339,6 +6379,40 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void)
}
/*
+ * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
+ * that madness to get the encoding for comparison.
+ */
+#define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
+
+static u64 nested_vmx_calc_vmcs_enum_msr(void)
+{
+ /*
+ * Note these are the so called "index" of the VMCS field encoding, not
+ * the index into vmcs12.
+ */
+ unsigned int max_idx, idx;
+ int i;
+
+ /*
+ * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
+ * vmcs12, regardless of whether or not the associated feature is
+ * exposed to L1. Simply find the field with the highest index.
+ */
+ max_idx = 0;
+ for (i = 0; i < nr_vmcs12_fields; i++) {
+ /* The vmcs12 table is very, very sparsely populated. */
+ if (!vmcs_field_to_offset_table[i])
+ continue;
+
+ idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
+ if (idx > max_idx)
+ max_idx = idx;
+ }
+
+ return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
+}
+
+/*
* nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
* returned for the various VMX controls MSRs when nested VMX is enabled.
* The same values should also be used to verify that vmcs12 control fields are
@@ -6474,7 +6548,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
SECONDARY_EXEC_RDRAND_EXITING |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_RDSEED_EXITING |
- SECONDARY_EXEC_XSAVES;
+ SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_TSC_SCALING;
/*
* We can emulate "VMCS shadowing," even if the hardware
@@ -6582,8 +6657,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
- /* highest index: VMX_PREEMPTION_TIMER_VALUE */
- msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
+ msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr();
}
void nested_vmx_hardware_unsetup(void)
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 184418baeb3c..b69a80f43b37 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -56,14 +56,9 @@ static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- /*
- * In case we do two consecutive get/set_nested_state()s while L2 was
- * running hv_evmcs may end up not being mapped (we map it from
- * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
- * have vmcs12 if it is true.
- */
- return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
- vmx->nested.hv_evmcs;
+ /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
+ return vmx->nested.current_vmptr != -1ull ||
+ vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID;
}
static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 459748680daf..5f81ef092bd4 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -238,6 +238,20 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
/*
+ * Bail out of the block loop if the VM has an assigned
+ * device, but the blocking vCPU didn't reconfigure the
+ * PI.NV to the wakeup vector, i.e. the assigned device
+ * came along after the initial check in pi_pre_block().
+ */
+void vmx_pi_start_assignment(struct kvm *kvm)
+{
+ if (!irq_remapping_cap(IRQ_POSTING_CAP))
+ return;
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
+}
+
+/*
* pi_update_irte - set IRTE for Posted-Interrupts
*
* @kvm: kvm
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index 0bdc41391c5b..7f7b2326caf5 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -95,5 +95,6 @@ void __init pi_init_cpu(int cpu);
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
bool set);
+void vmx_pi_start_assignment(struct kvm *kvm);
#endif /* __KVM_X86_VMX_POSTED_INTR_H */
diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
index 1472c6c376f7..4b9957e2bf5b 100644
--- a/arch/x86/kvm/vmx/vmcs.h
+++ b/arch/x86/kvm/vmx/vmcs.h
@@ -117,6 +117,11 @@ static inline bool is_gp_fault(u32 intr_info)
return is_exception_n(intr_info, GP_VECTOR);
}
+static inline bool is_alignment_check(u32 intr_info)
+{
+ return is_exception_n(intr_info, AC_VECTOR);
+}
+
static inline bool is_machine_check(u32 intr_info)
{
return is_exception_n(intr_info, MC_VECTOR);
@@ -164,4 +169,12 @@ static inline int vmcs_field_readonly(unsigned long field)
return (((field >> 10) & 0x3) == 1);
}
+#define VMCS_FIELD_INDEX_SHIFT (1)
+#define VMCS_FIELD_INDEX_MASK GENMASK(9, 1)
+
+static inline unsigned int vmcs_field_index(unsigned long field)
+{
+ return (field & VMCS_FIELD_INDEX_MASK) >> VMCS_FIELD_INDEX_SHIFT;
+}
+
#endif /* __KVM_X86_VMX_VMCS_H */
diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c
index 034adb6404dc..d9f5d7c56ae3 100644
--- a/arch/x86/kvm/vmx/vmcs12.c
+++ b/arch/x86/kvm/vmx/vmcs12.c
@@ -37,6 +37,7 @@ const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
FIELD64(PML_ADDRESS, pml_address),
FIELD64(TSC_OFFSET, tsc_offset),
+ FIELD64(TSC_MULTIPLIER, tsc_multiplier),
FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
index 13494956d0e9..5e0e1b39f495 100644
--- a/arch/x86/kvm/vmx/vmcs12.h
+++ b/arch/x86/kvm/vmx/vmcs12.h
@@ -70,7 +70,8 @@ struct __packed vmcs12 {
u64 eptp_list_address;
u64 pml_address;
u64 encls_exiting_bitmap;
- u64 padding64[2]; /* room for future expansion */
+ u64 tsc_multiplier;
+ u64 padding64[1]; /* room for future expansion */
/*
* To allow migration of L1 (complete with its L2 guests) between
* machines of different natural widths (32 or 64 bit), we cannot have
@@ -205,12 +206,6 @@ struct __packed vmcs12 {
#define VMCS12_SIZE KVM_STATE_NESTED_VMX_VMCS_SIZE
/*
- * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
- * supported VMCS12 field encoding.
- */
-#define VMCS12_MAX_FIELD_INDEX 0x17
-
-/*
* For save/restore compatibility, the vmcs12 field offsets must not change.
*/
#define CHECK_OFFSET(field, loc) \
@@ -258,6 +253,7 @@ static inline void vmx_check_vmcs12_offsets(void)
CHECK_OFFSET(eptp_list_address, 304);
CHECK_OFFSET(pml_address, 312);
CHECK_OFFSET(encls_exiting_bitmap, 320);
+ CHECK_OFFSET(tsc_multiplier, 328);
CHECK_OFFSET(cr0_guest_host_mask, 344);
CHECK_OFFSET(cr4_guest_host_mask, 352);
CHECK_OFFSET(cr0_read_shadow, 360);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4bceb5ca3a89..927a552393b9 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -52,6 +52,7 @@
#include "cpuid.h"
#include "evmcs.h"
#include "hyperv.h"
+#include "kvm_onhyperv.h"
#include "irq.h"
#include "kvm_cache_regs.h"
#include "lapic.h"
@@ -101,7 +102,6 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
static bool __read_mostly fasteoi = 1;
module_param(fasteoi, bool, S_IRUGO);
-bool __read_mostly enable_apicv = 1;
module_param(enable_apicv, bool, S_IRUGO);
/*
@@ -459,86 +459,6 @@ static unsigned long host_idt_base;
static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444);
-static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
- void *data)
-{
- struct kvm_tlb_range *range = data;
-
- return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
- range->pages);
-}
-
-static inline int hv_remote_flush_root_ept(hpa_t root_ept,
- struct kvm_tlb_range *range)
-{
- if (range)
- return hyperv_flush_guest_mapping_range(root_ept,
- kvm_fill_hv_flush_list_func, (void *)range);
- else
- return hyperv_flush_guest_mapping(root_ept);
-}
-
-static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
- struct kvm_tlb_range *range)
-{
- struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
- struct kvm_vcpu *vcpu;
- int ret = 0, i, nr_unique_valid_roots;
- hpa_t root;
-
- spin_lock(&kvm_vmx->hv_root_ept_lock);
-
- if (!VALID_PAGE(kvm_vmx->hv_root_ept)) {
- nr_unique_valid_roots = 0;
-
- /*
- * Flush all valid roots, and see if all vCPUs have converged
- * on a common root, in which case future flushes can skip the
- * loop and flush the common root.
- */
- kvm_for_each_vcpu(i, vcpu, kvm) {
- root = to_vmx(vcpu)->hv_root_ept;
- if (!VALID_PAGE(root) || root == kvm_vmx->hv_root_ept)
- continue;
-
- /*
- * Set the tracked root to the first valid root. Keep
- * this root for the entirety of the loop even if more
- * roots are encountered as a low effort optimization
- * to avoid flushing the same (first) root again.
- */
- if (++nr_unique_valid_roots == 1)
- kvm_vmx->hv_root_ept = root;
-
- if (!ret)
- ret = hv_remote_flush_root_ept(root, range);
-
- /*
- * Stop processing roots if a failure occurred and
- * multiple valid roots have already been detected.
- */
- if (ret && nr_unique_valid_roots > 1)
- break;
- }
-
- /*
- * The optimized flush of a single root can't be used if there
- * are multiple valid roots (obviously).
- */
- if (nr_unique_valid_roots > 1)
- kvm_vmx->hv_root_ept = INVALID_PAGE;
- } else {
- ret = hv_remote_flush_root_ept(kvm_vmx->hv_root_ept, range);
- }
-
- spin_unlock(&kvm_vmx->hv_root_ept_lock);
- return ret;
-}
-static int hv_remote_flush_tlb(struct kvm *kvm)
-{
- return hv_remote_flush_tlb_with_range(kvm, NULL);
-}
-
static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
struct hv_enlightened_vmcs *evmcs;
@@ -566,21 +486,6 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
#endif /* IS_ENABLED(CONFIG_HYPERV) */
-static void hv_track_root_ept(struct kvm_vcpu *vcpu, hpa_t root_ept)
-{
-#if IS_ENABLED(CONFIG_HYPERV)
- struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
-
- if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
- spin_lock(&kvm_vmx->hv_root_ept_lock);
- to_vmx(vcpu)->hv_root_ept = root_ept;
- if (root_ept != kvm_vmx->hv_root_ept)
- kvm_vmx->hv_root_ept = INVALID_PAGE;
- spin_unlock(&kvm_vmx->hv_root_ept_lock);
- }
-#endif
-}
-
/*
* Comment's format: document - errata name - stepping - processor name.
* Refer from
@@ -842,16 +747,21 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
else {
- /*
- * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
- * between guest and host. In that case we only care about present
- * faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
- * prepare_vmcs02_rare.
- */
- bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
- int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
+ int mask = 0, match = 0;
+
+ if (enable_ept && (eb & (1u << PF_VECTOR))) {
+ /*
+ * If EPT is enabled, #PF is currently only intercepted
+ * if MAXPHYADDR is smaller on the guest than on the
+ * host. In that case we only care about present,
+ * non-reserved faults. For vmcs02, however, PFEC_MASK
+ * and PFEC_MATCH are set in prepare_vmcs02_rare.
+ */
+ mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
+ match = PFERR_PRESENT_MASK;
+ }
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
}
vmcs_write32(EXCEPTION_BITMAP, eb);
@@ -1390,11 +1300,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
vmx->loaded_vmcs->cpu = cpu;
}
-
- /* Setup TSC multiplier */
- if (kvm_has_tsc_control &&
- vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
- decache_tsc_multiplier(vmx);
}
/*
@@ -1787,26 +1692,35 @@ static void setup_msrs(struct vcpu_vmx *vmx)
vmx->guest_uret_msrs_loaded = false;
}
-static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- u64 g_tsc_offset = 0;
- /*
- * We're here if L1 chose not to trap WRMSR to TSC. According
- * to the spec, this should set L1's TSC; The offset that L1
- * set for L2 remains unchanged, and still needs to be added
- * to the newly set TSC to get L2's TSC.
- */
- if (is_guest_mode(vcpu) &&
- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
- g_tsc_offset = vmcs12->tsc_offset;
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
+ return vmcs12->tsc_offset;
+
+ return 0;
+}
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- vcpu->arch.tsc_offset - g_tsc_offset,
- offset);
- vmcs_write64(TSC_OFFSET, offset + g_tsc_offset);
- return offset + g_tsc_offset;
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
+ return vmcs12->tsc_multiplier;
+
+ return kvm_default_tsc_scaling_ratio;
+}
+
+static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ vmcs_write64(TSC_OFFSET, offset);
+}
+
+static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+{
+ vmcs_write64(TSC_MULTIPLIER, multiplier);
}
/*
@@ -3181,7 +3095,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
eptp = construct_eptp(vcpu, root_hpa, root_level);
vmcs_write64(EPT_POINTER, eptp);
- hv_track_root_ept(vcpu, root_hpa);
+ hv_track_root_tdp(vcpu, root_hpa);
if (!enable_unrestricted_guest && !is_paging(vcpu))
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
@@ -3707,7 +3621,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
int ret = 0;
mutex_lock(&kvm->slots_lock);
- if (kvm->arch.apic_access_page_done)
+ if (kvm->arch.apic_access_memslot_enabled)
goto out;
hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
@@ -3727,7 +3641,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
* is able to migrate it.
*/
put_page(page);
- kvm->arch.apic_access_page_done = true;
+ kvm->arch.apic_access_memslot_enabled = true;
out:
mutex_unlock(&kvm->slots_lock);
return ret;
@@ -4829,7 +4743,7 @@ static int handle_machine_check(struct kvm_vcpu *vcpu)
* - Guest has #AC detection enabled in CR0
* - Guest EFLAGS has AC bit set
*/
-static inline bool guest_inject_ac(struct kvm_vcpu *vcpu)
+bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
{
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
return true;
@@ -4843,7 +4757,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 intr_info, ex_no, error_code;
- unsigned long cr2, rip, dr6;
+ unsigned long cr2, dr6;
u32 vect_info;
vect_info = vmx->idt_vectoring_info;
@@ -4933,12 +4847,11 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
- rip = kvm_rip_read(vcpu);
- kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
+ kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
kvm_run->debug.arch.exception = ex_no;
break;
case AC_VECTOR:
- if (guest_inject_ac(vcpu)) {
+ if (vmx_guest_inject_ac(vcpu)) {
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
return 1;
}
@@ -5811,6 +5724,8 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
if (cpu_has_secondary_exec_ctrls())
secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
+ vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
pr_err("*** Guest State ***\n");
pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
@@ -6248,6 +6163,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
switch (kvm_get_apic_mode(vcpu)) {
case LAPIC_MODE_INVALID:
WARN_ONCE(true, "Invalid local APIC state");
+ break;
case LAPIC_MODE_DISABLED:
break;
case LAPIC_MODE_XAPIC:
@@ -6806,7 +6722,18 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
kvm_load_host_xsave_state(vcpu);
- vmx->nested.nested_run_pending = 0;
+ if (is_guest_mode(vcpu)) {
+ /*
+ * Track VMLAUNCH/VMRESUME that have made past guest state
+ * checking.
+ */
+ if (vmx->nested.nested_run_pending &&
+ !vmx->exit_reason.failed_vmentry)
+ ++vcpu->stat.nested_run;
+
+ vmx->nested.nested_run_pending = 0;
+ }
+
vmx->idt_vectoring_info = 0;
if (unlikely(vmx->fail)) {
@@ -6941,6 +6868,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
vcpu->arch.microcode_version = 0x100000000ULL;
vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
@@ -6952,9 +6880,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmx->pi_desc.nv = POSTED_INTR_VECTOR;
vmx->pi_desc.sn = 1;
-#if IS_ENABLED(CONFIG_HYPERV)
- vmx->hv_root_ept = INVALID_PAGE;
-#endif
return 0;
free_vmcs:
@@ -6971,10 +6896,6 @@ free_vpid:
static int vmx_vm_init(struct kvm *kvm)
{
-#if IS_ENABLED(CONFIG_HYPERV)
- spin_lock_init(&to_kvm_vmx(kvm)->hv_root_ept_lock);
-#endif
-
if (!ple_gap)
kvm->arch.pause_in_guest = true;
@@ -7001,7 +6922,6 @@ static int vmx_vm_init(struct kvm *kvm)
break;
}
}
- kvm_apicv_init(kvm, enable_apicv);
return 0;
}
@@ -7453,10 +7373,10 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
delta_tsc = 0;
/* Convert to host delta tsc if tsc scaling is enabled */
- if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
+ if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
delta_tsc && u64_shl_div_u64(delta_tsc,
kvm_tsc_scaling_ratio_frac_bits,
- vcpu->arch.tsc_scaling_ratio, &delta_tsc))
+ vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
return -ERANGE;
/*
@@ -7542,7 +7462,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !is_smm(vcpu);
}
-static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7556,7 +7476,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -7700,7 +7620,10 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
- .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
+ .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
+ .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
+ .write_tsc_offset = vmx_write_tsc_offset,
+ .write_tsc_multiplier = vmx_write_tsc_multiplier,
.load_mmu_pgd = vmx_load_mmu_pgd,
@@ -7721,6 +7644,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.nested_ops = &vmx_nested_ops,
.update_pi_irte = pi_update_irte,
+ .start_assignment = vmx_pi_start_assignment,
#ifdef CONFIG_X86_64
.set_hv_timer = vmx_set_hv_timer,
@@ -7730,8 +7654,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.setup_mce = vmx_setup_mce,
.smi_allowed = vmx_smi_allowed,
- .pre_enter_smm = vmx_pre_enter_smm,
- .pre_leave_smm = vmx_pre_leave_smm,
+ .enter_smm = vmx_enter_smm,
+ .leave_smm = vmx_leave_smm,
.enable_smi_window = vmx_enable_smi_window,
.can_emulate_instruction = vmx_can_emulate_instruction,
@@ -7806,6 +7730,12 @@ static __init int hardware_setup(void)
!cpu_has_vmx_invept_global())
enable_ept = 0;
+ /* NX support is required for shadow paging. */
+ if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
+ pr_err_ratelimited("kvm: NX (Execute Disable) not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
enable_ept_ad_bits = 0;
@@ -7995,6 +7925,8 @@ static void vmx_exit(void)
}
#endif
vmx_cleanup_l1d_flush();
+
+ allow_smaller_maxphyaddr = false;
}
module_exit(vmx_exit);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 16e4e457ba23..3979a947933a 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -322,8 +322,6 @@ struct vcpu_vmx {
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
- u64 current_tsc_ratio;
-
unsigned long host_debugctlmsr;
/*
@@ -336,10 +334,6 @@ struct vcpu_vmx {
/* SGX Launch Control public key hash */
u64 msr_ia32_sgxlepubkeyhash[4];
-#if IS_ENABLED(CONFIG_HYPERV)
- u64 hv_root_ept;
-#endif
-
struct pt_desc pt_desc;
struct lbr_desc lbr_desc;
@@ -357,11 +351,6 @@ struct kvm_vmx {
unsigned int tss_addr;
bool ept_identity_pagetable_done;
gpa_t ept_identity_map_addr;
-
-#if IS_ENABLED(CONFIG_HYPERV)
- hpa_t hv_root_ept;
- spinlock_t hv_root_ept_lock;
-#endif
};
bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
@@ -387,6 +376,7 @@ void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
+bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
@@ -404,6 +394,9 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
+
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
int type, bool value)
{
@@ -529,12 +522,6 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
GFP_KERNEL_ACCOUNT);
}
-static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
-{
- vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
- vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
-}
-
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
{
return vmx->secondary_exec_control &
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bbc4e04e67ad..17468d983fbd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -58,6 +58,7 @@
#include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
+#include <linux/suspend.h>
#include <trace/events/kvm.h>
@@ -102,6 +103,8 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
+#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
+
#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
@@ -113,6 +116,9 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
+static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
+static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
+
struct kvm_x86_ops kvm_x86_ops __read_mostly;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -209,55 +215,78 @@ EXPORT_SYMBOL_GPL(host_efer);
bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
+bool __read_mostly enable_apicv = true;
+EXPORT_SYMBOL_GPL(enable_apicv);
+
u64 __read_mostly host_xss;
EXPORT_SYMBOL_GPL(host_xss);
u64 __read_mostly supported_xss;
EXPORT_SYMBOL_GPL(supported_xss);
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("pf_fixed", pf_fixed),
- VCPU_STAT("pf_guest", pf_guest),
- VCPU_STAT("tlb_flush", tlb_flush),
- VCPU_STAT("invlpg", invlpg),
- VCPU_STAT("exits", exits),
- VCPU_STAT("io_exits", io_exits),
- VCPU_STAT("mmio_exits", mmio_exits),
- VCPU_STAT("signal_exits", signal_exits),
- VCPU_STAT("irq_window", irq_window_exits),
- VCPU_STAT("nmi_window", nmi_window_exits),
- VCPU_STAT("halt_exits", halt_exits),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("hypercalls", hypercalls),
- VCPU_STAT("request_irq", request_irq_exits),
- VCPU_STAT("irq_exits", irq_exits),
- VCPU_STAT("host_state_reload", host_state_reload),
- VCPU_STAT("fpu_reload", fpu_reload),
- VCPU_STAT("insn_emulation", insn_emulation),
- VCPU_STAT("insn_emulation_fail", insn_emulation_fail),
- VCPU_STAT("irq_injections", irq_injections),
- VCPU_STAT("nmi_injections", nmi_injections),
- VCPU_STAT("req_event", req_event),
- VCPU_STAT("l1d_flush", l1d_flush),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("nested_run", nested_run),
- VCPU_STAT("directed_yield_attempted", directed_yield_attempted),
- VCPU_STAT("directed_yield_successful", directed_yield_successful),
- VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
- VM_STAT("mmu_pte_write", mmu_pte_write),
- VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
- VM_STAT("mmu_flooded", mmu_flooded),
- VM_STAT("mmu_recycled", mmu_recycled),
- VM_STAT("mmu_cache_miss", mmu_cache_miss),
- VM_STAT("mmu_unsync", mmu_unsync),
- VM_STAT("remote_tlb_flush", remote_tlb_flush),
- VM_STAT("largepages", lpages, .mode = 0444),
- VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
- VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
+ STATS_DESC_COUNTER(VM, mmu_pte_write),
+ STATS_DESC_COUNTER(VM, mmu_pde_zapped),
+ STATS_DESC_COUNTER(VM, mmu_flooded),
+ STATS_DESC_COUNTER(VM, mmu_recycled),
+ STATS_DESC_COUNTER(VM, mmu_cache_miss),
+ STATS_DESC_ICOUNTER(VM, mmu_unsync),
+ STATS_DESC_ICOUNTER(VM, lpages),
+ STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
+ STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, pf_fixed),
+ STATS_DESC_COUNTER(VCPU, pf_guest),
+ STATS_DESC_COUNTER(VCPU, tlb_flush),
+ STATS_DESC_COUNTER(VCPU, invlpg),
+ STATS_DESC_COUNTER(VCPU, exits),
+ STATS_DESC_COUNTER(VCPU, io_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, irq_window_exits),
+ STATS_DESC_COUNTER(VCPU, nmi_window_exits),
+ STATS_DESC_COUNTER(VCPU, l1d_flush),
+ STATS_DESC_COUNTER(VCPU, halt_exits),
+ STATS_DESC_COUNTER(VCPU, request_irq_exits),
+ STATS_DESC_COUNTER(VCPU, irq_exits),
+ STATS_DESC_COUNTER(VCPU, host_state_reload),
+ STATS_DESC_COUNTER(VCPU, fpu_reload),
+ STATS_DESC_COUNTER(VCPU, insn_emulation),
+ STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
+ STATS_DESC_COUNTER(VCPU, hypercalls),
+ STATS_DESC_COUNTER(VCPU, irq_injections),
+ STATS_DESC_COUNTER(VCPU, nmi_injections),
+ STATS_DESC_COUNTER(VCPU, req_event),
+ STATS_DESC_COUNTER(VCPU, nested_run),
+ STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
+ STATS_DESC_COUNTER(VCPU, directed_yield_successful),
+ STATS_DESC_ICOUNTER(VCPU, guest_mode)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
u64 __read_mostly host_xcr0;
@@ -778,13 +807,6 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
-static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
- void *data, int offset, int len, u32 access)
-{
- return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
- data, offset, len, access);
-}
-
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
@@ -819,6 +841,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ vcpu->arch.pdptrs_from_userspace = false;
out:
@@ -826,40 +849,14 @@ out:
}
EXPORT_SYMBOL_GPL(load_pdptrs);
-bool pdptrs_changed(struct kvm_vcpu *vcpu)
-{
- u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
- int offset;
- gfn_t gfn;
- int r;
-
- if (!is_pae_paging(vcpu))
- return false;
-
- if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
- return true;
-
- gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
- offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
- r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
- PFERR_USER_MASK | PFERR_WRITE_MASK);
- if (r < 0)
- return true;
-
- return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
-}
-EXPORT_SYMBOL_GPL(pdptrs_changed);
-
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{
- unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
-
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
}
- if ((cr0 ^ old_cr0) & update_bits)
+ if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
kvm_mmu_reset_context(vcpu);
if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
@@ -1038,10 +1035,7 @@ EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);
void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
{
- unsigned long mmu_role_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
-
- if (((cr4 ^ old_cr4) & mmu_role_bits) ||
+ if (((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
}
@@ -1084,25 +1078,46 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
+static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
+{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+ unsigned long roots_to_free = 0;
+ int i;
+
+ /*
+ * If neither the current CR3 nor any of the prev_roots use the given
+ * PCID, then nothing needs to be done here because a resync will
+ * happen anyway before switching to any other CR3.
+ */
+ if (kvm_get_active_pcid(vcpu) == pcid) {
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ }
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+
+ kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
+}
+
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
bool skip_tlb_flush = false;
+ unsigned long pcid = 0;
#ifdef CONFIG_X86_64
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
if (pcid_enabled) {
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
cr3 &= ~X86_CR3_PCID_NOFLUSH;
+ pcid = cr3 & X86_CR3_PCID_MASK;
}
#endif
- if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
- if (!skip_tlb_flush) {
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- }
- return 0;
- }
+ /* PDPTRs are always reloaded for PAE paging. */
+ if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
+ goto handle_tlb_flush;
/*
* Do not condition the GPA check on long mode, this helper is used to
@@ -1115,10 +1130,23 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
- kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
+ if (cr3 != kvm_read_cr3(vcpu))
+ kvm_mmu_new_pgd(vcpu, cr3);
+
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
+handle_tlb_flush:
+ /*
+ * A load of CR3 that flushes the TLB flushes only the current PCID,
+ * even if PCID is disabled, in which case PCID=0 is flushed. It's a
+ * moot point in the end because _disabling_ PCID will flush all PCIDs,
+ * and it's impossible to use a non-zero PCID when PCID is disabled,
+ * i.e. only PCID=0 can be relevant.
+ */
+ if (!skip_tlb_flush)
+ kvm_invalidate_pcid(vcpu, pcid);
+
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
@@ -2179,13 +2207,15 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
return v;
}
+static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
+
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{
u64 ratio;
/* Guest TSC same frequency as host TSC? */
if (!scale) {
- vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
return 0;
}
@@ -2211,7 +2241,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
return -1;
}
- vcpu->arch.tsc_scaling_ratio = ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
return 0;
}
@@ -2223,7 +2253,7 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
/* tsc_khz can be zero if TSC calibration fails */
if (user_tsc_khz == 0) {
/* set tsc_scaling_ratio to a safe value */
- vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
return -1;
}
@@ -2305,10 +2335,9 @@ static inline u64 __scale_tsc(u64 ratio, u64 tsc)
return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
}
-u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio)
{
u64 _tsc = tsc;
- u64 ratio = vcpu->arch.tsc_scaling_ratio;
if (ratio != kvm_default_tsc_scaling_ratio)
_tsc = __scale_tsc(ratio, tsc);
@@ -2317,25 +2346,86 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
}
EXPORT_SYMBOL_GPL(kvm_scale_tsc);
-static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
u64 tsc;
- tsc = kvm_scale_tsc(vcpu, rdtsc());
+ tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
return target_tsc - tsc;
}
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
- return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
+ return vcpu->arch.l1_tsc_offset +
+ kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
-static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
{
- vcpu->arch.l1_tsc_offset = offset;
- vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset);
+ u64 nested_offset;
+
+ if (l2_multiplier == kvm_default_tsc_scaling_ratio)
+ nested_offset = l1_offset;
+ else
+ nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
+ kvm_tsc_scaling_ratio_frac_bits);
+
+ nested_offset += l2_offset;
+ return nested_offset;
+}
+EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset);
+
+u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
+{
+ if (l2_multiplier != kvm_default_tsc_scaling_ratio)
+ return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
+ kvm_tsc_scaling_ratio_frac_bits);
+
+ return l1_multiplier;
+}
+EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
+
+static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
+{
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+ vcpu->arch.l1_tsc_offset,
+ l1_offset);
+
+ vcpu->arch.l1_tsc_offset = l1_offset;
+
+ /*
+ * If we are here because L1 chose not to trap WRMSR to TSC then
+ * according to the spec this should set L1's TSC (as opposed to
+ * setting L1's offset for L2).
+ */
+ if (is_guest_mode(vcpu))
+ vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
+ l1_offset,
+ static_call(kvm_x86_get_l2_tsc_offset)(vcpu),
+ static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
+ else
+ vcpu->arch.tsc_offset = l1_offset;
+
+ static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
+}
+
+static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
+{
+ vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
+
+ /* Userspace is changing the multiplier while L2 is active */
+ if (is_guest_mode(vcpu))
+ vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
+ l1_multiplier,
+ static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
+ else
+ vcpu->arch.tsc_scaling_ratio = l1_multiplier;
+
+ if (kvm_has_tsc_control)
+ static_call(kvm_x86_write_tsc_multiplier)(
+ vcpu, vcpu->arch.tsc_scaling_ratio);
}
static inline bool kvm_check_tsc_unstable(void)
@@ -2361,7 +2451,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
bool synchronizing = false;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
- offset = kvm_compute_tsc_offset(vcpu, data);
+ offset = kvm_compute_l1_tsc_offset(vcpu, data);
ns = get_kvmclock_base_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
@@ -2400,7 +2490,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
- offset = kvm_compute_tsc_offset(vcpu, data);
+ offset = kvm_compute_l1_tsc_offset(vcpu, data);
}
matched = true;
already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
@@ -2459,9 +2549,10 @@ static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
- if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
+ if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
WARN_ON(adjustment < 0);
- adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
+ adjustment = kvm_scale_tsc(vcpu, (u64) adjustment,
+ vcpu->arch.l1_tsc_scaling_ratio);
adjust_tsc_offset_guest(vcpu, adjustment);
}
@@ -2844,7 +2935,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* With all the info we got, fill in the values */
if (kvm_has_tsc_control)
- tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
+ tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz,
+ v->arch.l1_tsc_scaling_ratio);
if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
@@ -3072,6 +3164,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
+
+ if (!tdp_enabled) {
+ /*
+ * A TLB flush on behalf of the guest is equivalent to
+ * INVPCID(all), toggling CR4.PGE, etc., which requires
+ * a forced sync of the shadow page tables. Unload the
+ * entire MMU here and the subsequent load will sync the
+ * shadow page tables, and also flush the TLB.
+ */
+ kvm_mmu_unload(vcpu);
+ return;
+ }
+
static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
@@ -3101,10 +3206,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
* expensive IPIs.
*/
if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+ u8 st_preempted = xchg(&st->preempted, 0);
+
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
- st->preempted & KVM_VCPU_FLUSH_TLB);
- if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ st_preempted & KVM_VCPU_FLUSH_TLB);
+ if (st_preempted & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb_guest(vcpu);
+ } else {
+ st->preempted = 0;
}
vcpu->arch.st.preempted = 0;
@@ -3233,7 +3342,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr_info->host_initiated) {
kvm_synchronize_tsc(vcpu, data);
} else {
- u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
+ u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
adjust_tsc_offset_guest(vcpu, adj);
vcpu->arch.ia32_tsc_adjust_msr += adj;
}
@@ -3535,10 +3644,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* return L1's TSC value to ensure backwards-compatible
* behavior for migration.
*/
- u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
- vcpu->arch.tsc_offset;
+ u64 offset, ratio;
+
+ if (msr_info->host_initiated) {
+ offset = vcpu->arch.l1_tsc_offset;
+ ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ } else {
+ offset = vcpu->arch.tsc_offset;
+ ratio = vcpu->arch.tsc_scaling_ratio;
+ }
- msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
+ msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset;
break;
}
case MSR_MTRRcap:
@@ -3862,6 +3978,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_TLBFLUSH:
case KVM_CAP_HYPERV_SEND_IPI:
case KVM_CAP_HYPERV_CPUID:
+ case KVM_CAP_HYPERV_ENFORCE_CPUID:
case KVM_CAP_SYS_HYPERV_CPUID:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
@@ -3892,8 +4009,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SGX_ATTRIBUTE:
#endif
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+ case KVM_CAP_SREGS2:
+ case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
r = 1;
break;
+ case KVM_CAP_EXIT_HYPERCALL:
+ r = KVM_EXIT_HYPERCALL_VALID_MASK;
+ break;
case KVM_CAP_SET_GUEST_DEBUG2:
return KVM_GUESTDBG_VALID_MASK;
#ifdef CONFIG_KVM_XEN
@@ -4121,7 +4243,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mark_tsc_unstable("KVM discovered backwards TSC");
if (kvm_check_tsc_unstable()) {
- u64 offset = kvm_compute_tsc_offset(vcpu,
+ u64 offset = kvm_compute_l1_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
@@ -4440,7 +4562,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
memset(&events->reserved, 0, sizeof(events->reserved));
}
-static void kvm_smm_changed(struct kvm_vcpu *vcpu);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
@@ -4500,13 +4622,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
- if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
- if (events->smi.smm)
- vcpu->arch.hflags |= HF_SMM_MASK;
- else
- vcpu->arch.hflags &= ~HF_SMM_MASK;
- kvm_smm_changed(vcpu);
- }
+ if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm)
+ kvm_smm_changed(vcpu, events->smi.smm);
vcpu->arch.smi_pending = events->smi.pending;
@@ -4790,6 +4907,9 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return static_call(kvm_x86_enable_direct_tlbflush)(vcpu);
+ case KVM_CAP_HYPERV_ENFORCE_CPUID:
+ return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
+
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
vcpu->arch.pv_cpuid.enforce = cap->args[0];
if (vcpu->arch.pv_cpuid.enforce)
@@ -4808,6 +4928,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
int r;
union {
+ struct kvm_sregs2 *sregs2;
struct kvm_lapic_state *lapic;
struct kvm_xsave *xsave;
struct kvm_xcrs *xcrs;
@@ -5180,6 +5301,28 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
#endif
+ case KVM_GET_SREGS2: {
+ u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!u.sregs2)
+ goto out;
+ __get_sregs2(vcpu, u.sregs2);
+ r = -EFAULT;
+ if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_SREGS2: {
+ u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
+ if (IS_ERR(u.sregs2)) {
+ r = PTR_ERR(u.sregs2);
+ u.sregs2 = NULL;
+ goto out;
+ }
+ r = __set_sregs2(vcpu, u.sregs2);
+ break;
+ }
default:
r = -EINVAL;
}
@@ -5499,6 +5642,21 @@ split_irqchip_unlock:
if (kvm_x86_ops.vm_copy_enc_context_from)
r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]);
return r;
+ case KVM_CAP_EXIT_HYPERCALL:
+ if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
+ r = -EINVAL;
+ break;
+ }
+ kvm->arch.hypercall_exit_enabled = cap->args[0];
+ r = 0;
+ break;
+ case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
+ r = -EINVAL;
+ if (cap->args[0] & ~1)
+ break;
+ kvm->arch.exit_on_emulation_error = cap->args[0];
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -5613,6 +5771,41 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
return 0;
}
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+static int kvm_arch_suspend_notifier(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i, ret = 0;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.pv_time_enabled)
+ continue;
+
+ ret = kvm_set_guest_paused(vcpu);
+ if (ret) {
+ kvm_err("Failed to pause guest VCPU%d: %d\n",
+ vcpu->vcpu_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+
+ return ret ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
+{
+ switch (state) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ return kvm_arch_suspend_notifier(kvm);
+ }
+
+ return NOTIFY_DONE;
+}
+#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -7087,20 +7280,22 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
return emul_to_vcpu(ctxt)->arch.hflags;
}
-static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
{
- emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+ kvm_smm_changed(vcpu, false);
}
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
const char *smstate)
{
- return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate);
+ return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
}
-static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
{
- kvm_smm_changed(emul_to_vcpu(ctxt));
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
}
static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
@@ -7149,9 +7344,9 @@ static const struct x86_emulate_ops emulate_ops = {
.guest_has_fxsr = emulator_guest_has_fxsr,
.set_nmi_mask = emulator_set_nmi_mask,
.get_hflags = emulator_get_hflags,
- .set_hflags = emulator_set_hflags,
- .pre_leave_smm = emulator_pre_leave_smm,
- .post_leave_smm = emulator_post_leave_smm,
+ .exiting_smm = emulator_exiting_smm,
+ .leave_smm = emulator_leave_smm,
+ .triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
};
@@ -7226,6 +7421,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
+ ctxt->interruptibility = 0;
+ ctxt->have_exception = false;
+ ctxt->exception.vector = -1;
+ ctxt->perm_ok = false;
+
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
@@ -7252,8 +7452,33 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
}
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
+static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ u32 insn_size = ctxt->fetch.end - ctxt->fetch.data;
+ struct kvm_run *run = vcpu->run;
+
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ run->emulation_failure.ndata = 0;
+ run->emulation_failure.flags = 0;
+
+ if (insn_size) {
+ run->emulation_failure.ndata = 3;
+ run->emulation_failure.flags |=
+ KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES;
+ run->emulation_failure.insn_size = insn_size;
+ memset(run->emulation_failure.insn_bytes, 0x90,
+ sizeof(run->emulation_failure.insn_bytes));
+ memcpy(run->emulation_failure.insn_bytes,
+ ctxt->fetch.data, insn_size);
+ }
+}
+
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
{
+ struct kvm *kvm = vcpu->kvm;
+
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
@@ -7262,10 +7487,9 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
return 1;
}
- if (emulation_type & EMULTYPE_SKIP) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
+ if (kvm->arch.exit_on_emulation_error ||
+ (emulation_type & EMULTYPE_SKIP)) {
+ prepare_emulation_failure_exit(vcpu);
return 0;
}
@@ -7407,11 +7631,14 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
-static void kvm_smm_changed(struct kvm_vcpu *vcpu)
+static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{
- if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
- /* This is a good place to trace that we are exiting SMM. */
- trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
+ trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
+
+ if (entering_smm) {
+ vcpu->arch.hflags |= HF_SMM_MASK;
+ } else {
+ vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
/* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -7561,14 +7788,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
- ctxt->interruptibility = 0;
- ctxt->have_exception = false;
- ctxt->exception.vector = -1;
- ctxt->perm_ok = false;
-
- ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
-
- r = x86_decode_insn(ctxt, insn, insn_len);
+ r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
@@ -8243,6 +8463,7 @@ void kvm_arch_exit(void)
kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();
free_percpu(user_return_msrs);
+ kmem_cache_destroy(x86_emulator_cache);
kmem_cache_destroy(x86_fpu_cache);
#ifdef CONFIG_KVM_XEN
static_key_deferred_flush(&kvm_xen_enabled);
@@ -8342,16 +8563,15 @@ bool kvm_apicv_activated(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_apicv_activated);
-void kvm_apicv_init(struct kvm *kvm, bool enable)
+static void kvm_apicv_init(struct kvm *kvm)
{
- if (enable)
+ if (enable_apicv)
clear_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
else
set_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
}
-EXPORT_SYMBOL_GPL(kvm_apicv_init);
static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
{
@@ -8360,6 +8580,9 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
vcpu->stat.directed_yield_attempted++;
+ if (single_task_running())
+ goto no_yield;
+
rcu_read_lock();
map = rcu_dereference(vcpu->kvm->arch.apic_map);
@@ -8384,6 +8607,17 @@ no_yield:
return;
}
+static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
+{
+ u64 ret = vcpu->run->hypercall.ret;
+
+ if (!is_64_bit_mode(vcpu))
+ ret = (u32)ret;
+ kvm_rax_write(vcpu, ret);
+ ++vcpu->stat.hypercalls;
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
@@ -8449,6 +8683,28 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
kvm_sched_yield(vcpu, a0);
ret = 0;
break;
+ case KVM_HC_MAP_GPA_RANGE: {
+ u64 gpa = a0, npages = a1, attrs = a2;
+
+ ret = -KVM_ENOSYS;
+ if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
+ break;
+
+ if (!PAGE_ALIGNED(gpa) || !npages ||
+ gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) {
+ ret = -KVM_EINVAL;
+ break;
+ }
+
+ vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
+ vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
+ vcpu->run->hypercall.args[0] = gpa;
+ vcpu->run->hypercall.args[1] = npages;
+ vcpu->run->hypercall.args[2] = attrs;
+ vcpu->run->hypercall.longmode = op_64_bit;
+ vcpu->arch.complete_userspace_io = complete_hypercall_exit;
+ return 0;
+ }
default:
ret = -KVM_ENOSYS;
break;
@@ -8532,9 +8788,6 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
int kvm_check_nested_events(struct kvm_vcpu *vcpu)
{
- if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
- return -EIO;
-
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
kvm_x86_ops.nested_ops->triple_fault(vcpu);
return 1;
@@ -8550,7 +8803,7 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
static_call(kvm_x86_queue_exception)(vcpu);
}
-static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
+static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
{
int r;
bool can_inject = true;
@@ -8597,7 +8850,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (is_guest_mode(vcpu)) {
r = kvm_check_nested_events(vcpu);
if (r < 0)
- goto busy;
+ goto out;
}
/* try to inject new event if pending */
@@ -8639,7 +8892,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (vcpu->arch.smi_pending) {
r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
vcpu->arch.smi_pending = false;
++vcpu->arch.smi_count;
@@ -8652,7 +8905,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (vcpu->arch.nmi_pending) {
r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
@@ -8667,7 +8920,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (kvm_cpu_has_injectable_intr(vcpu)) {
r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false);
static_call(kvm_x86_set_irq)(vcpu);
@@ -8683,11 +8936,14 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
*req_immediate_exit = true;
WARN_ON(vcpu->arch.exception.pending);
- return;
+ return 0;
-busy:
- *req_immediate_exit = true;
- return;
+out:
+ if (r == -EBUSY) {
+ *req_immediate_exit = true;
+ r = 0;
+ }
+ return r;
}
static void process_nmi(struct kvm_vcpu *vcpu)
@@ -8866,10 +9122,9 @@ static void enter_smm(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
struct desc_ptr dt;
+ unsigned long cr0;
char buf[512];
- u32 cr0;
- trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
memset(buf, 0, 512);
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
@@ -8879,13 +9134,13 @@ static void enter_smm(struct kvm_vcpu *vcpu)
enter_smm_save_state_32(vcpu, buf);
/*
- * Give pre_enter_smm() a chance to make ISA-specific changes to the
- * vCPU state (e.g. leave guest mode) after we've saved the state into
- * the SMM state-save area.
+ * Give enter_smm() a chance to make ISA-specific changes to the vCPU
+ * state (e.g. leave guest mode) after we've saved the state into the
+ * SMM state-save area.
*/
- static_call(kvm_x86_pre_enter_smm)(vcpu, buf);
+ static_call(kvm_x86_enter_smm)(vcpu, buf);
- vcpu->arch.hflags |= HF_SMM_MASK;
+ kvm_smm_changed(vcpu, true);
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
if (static_call(kvm_x86_get_nmi_mask)(vcpu))
@@ -8974,6 +9229,15 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm);
kvm_apic_update_apicv(vcpu);
static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
+
+ /*
+ * When APICv gets disabled, we may still have injected interrupts
+ * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
+ * still active when the interrupt got accepted. Make sure
+ * inject_pending_event() is called to check for that.
+ */
+ if (!vcpu->arch.apicv_active)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
@@ -9149,7 +9413,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
- if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
@@ -9242,13 +9506,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
kvm_xen_has_interrupt(vcpu)) {
++vcpu->stat.req_event;
- kvm_apic_accept_events(vcpu);
+ r = kvm_apic_accept_events(vcpu);
+ if (r < 0) {
+ r = 0;
+ goto out;
+ }
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
r = 1;
goto out;
}
- inject_pending_event(vcpu, &req_immediate_exit);
+ r = inject_pending_event(vcpu, &req_immediate_exit);
+ if (r < 0) {
+ r = 0;
+ goto out;
+ }
if (req_int_win)
static_call(kvm_x86_enable_irq_window)(vcpu);
@@ -9450,7 +9722,8 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
return 1;
}
- kvm_apic_accept_events(vcpu);
+ if (kvm_apic_accept_events(vcpu) < 0)
+ return 0;
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
case KVM_MP_STATE_AP_RESET_HOLD:
@@ -9496,7 +9769,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (r <= 0)
break;
- kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
@@ -9674,7 +9947,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
kvm_vcpu_block(vcpu);
- kvm_apic_accept_events(vcpu);
+ if (kvm_apic_accept_events(vcpu) < 0) {
+ r = 0;
+ goto out;
+ }
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
r = -EAGAIN;
if (signal_pending(current)) {
@@ -9823,7 +10099,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
-static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct desc_ptr dt;
@@ -9856,14 +10132,36 @@ skip_protected_regs:
sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
+}
+
+static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ __get_sregs_common(vcpu, sregs);
- memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
+ if (vcpu->arch.guest_state_protected)
+ return;
if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
set_bit(vcpu->arch.interrupt.nr,
(unsigned long *)sregs->interrupt_bitmap);
}
+static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
+{
+ int i;
+
+ __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2);
+
+ if (vcpu->arch.guest_state_protected)
+ return;
+
+ if (is_pae_paging(vcpu)) {
+ for (i = 0 ; i < 4 ; i++)
+ sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i);
+ sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
+ }
+}
+
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
@@ -9876,11 +10174,17 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
+ int r;
+
vcpu_load(vcpu);
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
- kvm_apic_accept_events(vcpu);
+ r = kvm_apic_accept_events(vcpu);
+ if (r < 0)
+ goto out;
+ r = 0;
+
if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
vcpu->arch.pv.pv_unhalted)
@@ -9888,10 +10192,11 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
else
mp_state->mp_state = vcpu->arch.mp_state;
+out:
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);
- return 0;
+ return r;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
@@ -9975,24 +10280,23 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return kvm_is_valid_cr4(vcpu, sregs->cr4);
}
-static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
+ int *mmu_reset_needed, bool update_pdptrs)
{
struct msr_data apic_base_msr;
- int mmu_reset_needed = 0;
- int pending_vec, max_bits, idx;
+ int idx;
struct desc_ptr dt;
- int ret = -EINVAL;
if (!kvm_is_valid_sregs(vcpu, sregs))
- goto out;
+ return -EINVAL;
apic_base_msr.data = sregs->apic_base;
apic_base_msr.host_initiated = true;
if (kvm_set_apic_base(vcpu, &apic_base_msr))
- goto out;
+ return -EINVAL;
if (vcpu->arch.guest_state_protected)
- goto skip_protected_regs;
+ return 0;
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
@@ -10002,31 +10306,30 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
static_call(kvm_x86_set_gdt)(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
- mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
+ *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
kvm_set_cr8(vcpu, sregs->cr8);
- mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
+ *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
static_call(kvm_x86_set_efer)(vcpu, sregs->efer);
- mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
+ *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
- mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
+ *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4);
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (is_pae_paging(vcpu)) {
- load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
- mmu_reset_needed = 1;
+ if (update_pdptrs) {
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ if (is_pae_paging(vcpu)) {
+ load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
+ *mmu_reset_needed = 1;
+ }
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-
- if (mmu_reset_needed)
- kvm_mmu_reset_context(vcpu);
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
@@ -10046,20 +10349,63 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
!is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-skip_protected_regs:
+ return 0;
+}
+
+static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ int pending_vec, max_bits;
+ int mmu_reset_needed = 0;
+ int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
+
+ if (ret)
+ return ret;
+
+ if (mmu_reset_needed)
+ kvm_mmu_reset_context(vcpu);
+
max_bits = KVM_NR_INTERRUPTS;
pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap, max_bits);
+
if (pending_vec < max_bits) {
kvm_queue_interrupt(vcpu, pending_vec, false);
pr_debug("Set back pending irq %d\n", pending_vec);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
+ return 0;
+}
- kvm_make_request(KVM_REQ_EVENT, vcpu);
+static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
+{
+ int mmu_reset_needed = 0;
+ bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
+ bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
+ !(sregs2->efer & EFER_LMA);
+ int i, ret;
- ret = 0;
-out:
- return ret;
+ if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID)
+ return -EINVAL;
+
+ if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
+ return -EINVAL;
+
+ ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
+ &mmu_reset_needed, !valid_pdptrs);
+ if (ret)
+ return ret;
+
+ if (valid_pdptrs) {
+ for (i = 0; i < 4 ; i++)
+ kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);
+
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ mmu_reset_needed = 1;
+ vcpu->arch.pdptrs_from_userspace = true;
+ }
+ if (mmu_reset_needed)
+ kvm_mmu_reset_context(vcpu);
+ return 0;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -10115,8 +10461,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_update_dr7(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
- get_segment_base(vcpu, VCPU_SREG_CS);
+ vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
/*
* Trigger an rflags update that will inject or remove the trace
@@ -10284,13 +10629,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct page *page;
int r;
+ vcpu->arch.last_vmentry_cpu = -1;
+
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
- kvm_set_tsc_khz(vcpu, max_tsc_khz);
-
r = kvm_mmu_create(vcpu);
if (r < 0)
return r;
@@ -10350,6 +10695,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;
+#if IS_ENABLED(CONFIG_HYPERV)
+ vcpu->arch.hv_root_tdp = INVALID_PAGE;
+#endif
+
r = static_call(kvm_x86_vcpu_create)(vcpu);
if (r)
goto free_guest_fpu;
@@ -10358,8 +10707,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
+ kvm_set_tsc_khz(vcpu, max_tsc_khz);
kvm_vcpu_reset(vcpu, false);
- kvm_init_mmu(vcpu, false);
+ kvm_init_mmu(vcpu);
vcpu_put(vcpu);
return 0;
@@ -10433,6 +10783,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
+ unsigned long old_cr0 = kvm_read_cr0(vcpu);
+
kvm_lapic_reset(vcpu, init_event);
vcpu->arch.hflags = 0;
@@ -10501,6 +10853,17 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.ia32_xss = 0;
static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
+
+ /*
+ * Reset the MMU context if paging was enabled prior to INIT (which is
+ * implied if CR0.PG=1 as CR0 will be '0' prior to RESET). Unlike the
+ * standard CR0/CR4/EFER modification paths, only CR0.PG needs to be
+ * checked because it is unconditionally cleared on INIT and all other
+ * paging related bits are ignored if paging is disabled, i.e. CR0.WP,
+ * CR4, and EFER changes are all irrelevant if CR0.PG was '0'.
+ */
+ if (old_cr0 & X86_CR0_PG)
+ kvm_mmu_reset_context(vcpu);
}
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
@@ -10618,6 +10981,9 @@ int kvm_arch_hardware_setup(void *opaque)
int r;
rdmsrl_safe(MSR_EFER, &host_efer);
+ if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) &&
+ !(host_efer & EFER_NX)))
+ return -EIO;
if (boot_cpu_has(X86_FEATURE_XSAVES))
rdmsrl(MSR_IA32_XSS, host_xss);
@@ -10733,9 +11099,15 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.guest_can_read_msr_platform_info = true;
+#if IS_ENABLED(CONFIG_HYPERV)
+ spin_lock_init(&kvm->arch.hv_root_tdp_lock);
+ kvm->arch.hv_root_tdp = INVALID_PAGE;
+#endif
+
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
+ kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
kvm_page_track_init(kvm);
kvm_mmu_init_vm(kvm);
@@ -10896,17 +11268,23 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_hv_destroy_vm(kvm);
}
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+static void memslot_rmap_free(struct kvm_memory_slot *slot)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
+ }
+}
- if (i == 0)
- continue;
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ int i;
+
+ memslot_rmap_free(slot);
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
@@ -10914,11 +11292,79 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
kvm_page_track_free_memslot(slot);
}
-static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
- unsigned long npages)
+static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
+ const int sz = sizeof(*slot->arch.rmap[0]);
int i;
+ for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+ int level = i + 1;
+ int lpages = gfn_to_index(slot->base_gfn + npages - 1,
+ slot->base_gfn, level) + 1;
+
+ WARN_ON(slot->arch.rmap[i]);
+
+ slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
+ if (!slot->arch.rmap[i]) {
+ memslot_rmap_free(slot);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int alloc_all_memslots_rmaps(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
+ int r, i;
+
+ /*
+ * Check if memslots alreday have rmaps early before acquiring
+ * the slots_arch_lock below.
+ */
+ if (kvm_memslots_have_rmaps(kvm))
+ return 0;
+
+ mutex_lock(&kvm->slots_arch_lock);
+
+ /*
+ * Read memslots_have_rmaps again, under the slots arch lock,
+ * before allocating the rmaps
+ */
+ if (kvm_memslots_have_rmaps(kvm)) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return 0;
+ }
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(slot, slots) {
+ r = memslot_rmap_alloc(slot, slot->npages);
+ if (r) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return r;
+ }
+ }
+ }
+
+ /*
+ * Ensure that memslots_have_rmaps becomes true strictly after
+ * all the rmap pointers are set.
+ */
+ smp_store_release(&kvm->arch.memslots_have_rmaps, true);
+ mutex_unlock(&kvm->slots_arch_lock);
+ return 0;
+}
+
+static int kvm_alloc_memslot_metadata(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ unsigned long npages)
+{
+ int i, r;
+
/*
* Clear out the previous array pointers for the KVM_MR_MOVE case. The
* old arrays will be freed by __kvm_set_memory_region() if installing
@@ -10926,7 +11372,13 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
*/
memset(&slot->arch, 0, sizeof(slot->arch));
- for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+ if (kvm_memslots_have_rmaps(kvm)) {
+ r = memslot_rmap_alloc(slot, npages);
+ if (r)
+ return r;
+ }
+
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn;
int lpages;
@@ -10935,14 +11387,6 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
lpages = gfn_to_index(slot->base_gfn + npages - 1,
slot->base_gfn, level) + 1;
- slot->arch.rmap[i] =
- kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
- GFP_KERNEL_ACCOUNT);
- if (!slot->arch.rmap[i])
- goto out_free;
- if (i == 0)
- continue;
-
linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
if (!linfo)
goto out_free;
@@ -10972,12 +11416,9 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
return 0;
out_free:
- for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
- kvfree(slot->arch.rmap[i]);
- slot->arch.rmap[i] = NULL;
- if (i == 0)
- continue;
+ memslot_rmap_free(slot);
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
@@ -11006,7 +11447,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
- return kvm_alloc_memslot_metadata(memslot,
+ return kvm_alloc_memslot_metadata(kvm, memslot,
mem->memory_size >> PAGE_SHIFT);
return 0;
}
@@ -11082,36 +11523,19 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
*/
kvm_mmu_zap_collapsible_sptes(kvm, new);
} else {
- /* By default, write-protect everything to log writes. */
- int level = PG_LEVEL_4K;
+ /*
+ * Initially-all-set does not require write protecting any page,
+ * because they're all assumed to be dirty.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ return;
if (kvm_x86_ops.cpu_dirty_log_size) {
- /*
- * Clear all dirty bits, unless pages are treated as
- * dirty from the get-go.
- */
- if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
- kvm_mmu_slot_leaf_clear_dirty(kvm, new);
-
- /*
- * Write-protect large pages on write so that dirty
- * logging happens at 4k granularity. No need to
- * write-protect small SPTEs since write accesses are
- * logged by the CPU via dirty bits.
- */
- level = PG_LEVEL_2M;
- } else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
- /*
- * If we're with initial-all-set, we don't need
- * to write protect any small page because
- * they're reported as dirty already. However
- * we still need to write-protect huge pages
- * so that the page split can happen lazily on
- * the first write to the huge page.
- */
- level = PG_LEVEL_2M;
+ kvm_mmu_slot_leaf_clear_dirty(kvm, new);
+ kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
+ } else {
+ kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
}
- kvm_mmu_slot_remove_write_access(kvm, new, level);
}
}
@@ -11499,7 +11923,8 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
void kvm_arch_start_assignment(struct kvm *kvm)
{
- atomic_inc(&kvm->arch.assigned_device_count);
+ if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
+ static_call_cond(kvm_x86_start_assignment)(kvm);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
@@ -11679,8 +12104,6 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
{
bool pcid_enabled;
struct x86_exception e;
- unsigned i;
- unsigned long roots_to_free = 0;
struct {
u64 pcid;
u64 gla;
@@ -11714,23 +12137,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
return 1;
}
- if (kvm_get_active_pcid(vcpu) == operand.pcid) {
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- }
-
- for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
- == operand.pcid)
- roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
-
- kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
- /*
- * If neither the current cr3 nor any of the prev_roots use the
- * given PCID, then nothing needs to be done here because a
- * resync will happen anyway before switching to any other CR3.
- */
-
+ kvm_invalidate_pcid(vcpu, operand.pcid);
return kvm_skip_emulated_instruction(vcpu);
case INVPCID_TYPE_ALL_NON_GLOBAL:
@@ -11743,7 +12150,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
- kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
return kvm_skip_emulated_instruction(vcpu);
default:
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 521f74e5bbf2..44ae10312740 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -157,16 +157,6 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
return cs_l;
}
-static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
- return (vcpu->arch.efer & EFER_LMA) &&
- kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
-#else
- return 0;
-#endif
-}
-
static inline bool x86_exception_has_error_code(unsigned int vector)
{
static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index a67afd74232c..a1d24fdc07cf 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -1417,7 +1417,7 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
}
}
-static unsigned long insn_get_effective_ip(struct pt_regs *regs)
+static int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip)
{
unsigned long seg_base = 0;
@@ -1430,10 +1430,12 @@ static unsigned long insn_get_effective_ip(struct pt_regs *regs)
if (!user_64bit_mode(regs)) {
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
if (seg_base == -1L)
- return 0;
+ return -EINVAL;
}
- return seg_base + regs->ip;
+ *ip = seg_base + regs->ip;
+
+ return 0;
}
/**
@@ -1446,18 +1448,17 @@ static unsigned long insn_get_effective_ip(struct pt_regs *regs)
*
* Returns:
*
- * Number of instruction bytes copied.
- *
- * 0 if nothing was copied.
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated
*/
int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
unsigned long ip;
int not_copied;
- ip = insn_get_effective_ip(regs);
- if (!ip)
- return 0;
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
@@ -1475,18 +1476,17 @@ int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
*
* Returns:
*
- * Number of instruction bytes copied.
- *
- * 0 if nothing was copied.
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated.
*/
int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
unsigned long ip;
int not_copied;
- ip = insn_get_effective_ip(regs);
- if (!ip)
- return 0;
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 4d32cb06ffd5..ec9922cba30a 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -58,12 +58,16 @@ SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg)
2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_call_\reg)
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg)
+
SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg)
ANNOTATE_RETPOLINE_SAFE
1: jmp *%\reg
2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_jmp_\reg)
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg)
+
.endm
/*
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 1c548ad00752..2d27932c9ac7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (si_code == SEGV_PKUERR)
force_sig_pkuerr((void __user *)address, pkey);
-
- force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ else
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
local_irq_disable();
}
@@ -1186,7 +1186,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
return;
/* kprobes don't want to hook the spurious faults: */
- if (kprobe_page_fault(regs, X86_TRAP_PF))
+ if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*
@@ -1239,7 +1239,7 @@ void do_user_addr_fault(struct pt_regs *regs,
}
/* kprobes don't want to hook the spurious faults: */
- if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
+ if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 21ffb03f6c72..74b78840182d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -651,7 +651,7 @@ void __init find_low_pfn_range(void)
highmem_pfn_init();
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
#ifdef CONFIG_HIGHMEM
@@ -677,7 +677,7 @@ void __init initmem_init(void)
setup_bootmem_allocator();
}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
void __init setup_bootmem_allocator(void)
{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 12c686c65ea9..60ade7dd71bd 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
if (!IS_ENABLED(CONFIG_EFI))
return;
- if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
+ if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
+ (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
+ efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
desc->flags |= IORES_MAP_ENCRYPTED;
}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index a9639f663d25..470b20208430 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
#define AMD_SME_BIT BIT(0)
#define AMD_SEV_BIT BIT(1)
- /* Check the SEV MSR whether SEV or SME is enabled */
- sev_status = __rdmsr(MSR_AMD64_SEV);
- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
-
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
@@ -519,11 +515,16 @@ void __init sme_enable(struct boot_params *bp)
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
- if (!(eax & feature_mask))
+ /* Check whether SEV or SME is supported */
+ if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
return;
me_mask = 1UL << (ebx & 0x3f);
+ /* Check the SEV MSR whether SEV or SME is enabled */
+ sev_status = __rdmsr(MSR_AMD64_SEV);
+ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
/* Check if memory encryption is enabled */
if (feature_mask == AMD_SME_BIT) {
/*
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 5eb4dc2b97da..e94da744386f 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
/* make sure all non-reserved blocks are inside the limits */
bi->start = max(bi->start, low);
- bi->end = min(bi->end, high);
+
+ /* preserve info for non-RAM areas above 'max_pfn': */
+ if (bi->end > high) {
+ numa_add_memblk_to(bi->nid, high, bi->end,
+ &numa_reserved_meminfo);
+ bi->end = high;
+ }
/* and there's no empty block */
if (bi->start >= bi->end)
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index a2332eef66e9..4a67b922bce1 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -192,6 +192,10 @@ static const struct file_operations fops_init_pkru = {
static int __init create_init_pkru_value(void)
{
+ /* Do not expose the file if pkeys are not supported. */
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return 0;
+
debugfs_create_file("init_pkru", S_IRUSR | S_IWUSR,
arch_debugfs_dir, NULL, &fops_init_pkru);
return 0;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 78804680e923..cfe6b1e85fa6 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,6 +14,7 @@
#include <asm/nospec-branch.h>
#include <asm/cache.h>
#include <asm/apic.h>
+#include <asm/perf_event.h>
#include "mm_internal.h"
@@ -404,9 +405,14 @@ static inline void cr4_update_pce_mm(struct mm_struct *mm)
{
if (static_branch_unlikely(&rdpmc_always_available_key) ||
(!static_branch_unlikely(&rdpmc_never_available_key) &&
- atomic_read(&mm->context.perf_rdpmc_allowed)))
+ atomic_read(&mm->context.perf_rdpmc_allowed))) {
+ /*
+ * Clear the existing dirty counters to
+ * prevent the leak for an RDPMC task.
+ */
+ perf_clear_dirty_counters();
cr4_set_bits_irqsoff(X86_CR4_PCE);
- else
+ } else
cr4_clear_bits_irqsoff(X86_CR4_PCE);
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 2a2e290fa5d8..e835164189f1 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -31,7 +31,7 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
}
#define EMIT(bytes, len) \
- do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
+ do { prog = emit_code(prog, bytes, len); } while (0)
#define EMIT1(b1) EMIT(b1, 1)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
@@ -239,7 +239,6 @@ struct jit_context {
static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
- int cnt = 0;
if (callee_regs_used[0])
EMIT1(0x53); /* push rbx */
@@ -255,7 +254,6 @@ static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
- int cnt = 0;
if (callee_regs_used[3])
EMIT2(0x41, 0x5F); /* pop r15 */
@@ -277,13 +275,12 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
bool tail_call_reachable, bool is_subprog)
{
u8 *prog = *pprog;
- int cnt = X86_PATCH_SIZE;
/* BPF trampoline can be made to work without these nops,
* but let's waste 5 bytes for now and optimize later
*/
- memcpy(prog, x86_nops[5], cnt);
- prog += cnt;
+ memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
+ prog += X86_PATCH_SIZE;
if (!ebpf_from_cbpf) {
if (tail_call_reachable && !is_subprog)
EMIT2(0x31, 0xC0); /* xor eax, eax */
@@ -303,7 +300,6 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
{
u8 *prog = *pprog;
- int cnt = 0;
s64 offset;
offset = func - (ip + X86_PATCH_SIZE);
@@ -423,7 +419,6 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
int off1 = 42;
int off2 = 31;
int off3 = 9;
- int cnt = 0;
/* count the additional bytes used for popping callee regs from stack
* that need to be taken into account for each of the offsets that
@@ -513,7 +508,6 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
int pop_bytes = 0;
int off1 = 20;
int poke_off;
- int cnt = 0;
/* count the additional bytes used for popping callee regs to stack
* that need to be taken into account for jump offset that is used for
@@ -615,7 +609,6 @@ static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
{
u8 *prog = *pprog;
u8 b1, b2, b3;
- int cnt = 0;
/*
* Optimization: if imm32 is positive, use 'mov %eax, imm32'
@@ -655,7 +648,6 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
const u32 imm32_hi, const u32 imm32_lo)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
/*
@@ -678,7 +670,6 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is64) {
/* mov dst, src */
@@ -697,7 +688,6 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is_imm8(off)) {
/* 1-byte signed displacement.
@@ -720,7 +710,6 @@ static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is64)
EMIT1(add_2mod(0x48, dst_reg, src_reg));
@@ -733,7 +722,6 @@ static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
switch (size) {
case BPF_B:
@@ -764,7 +752,6 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
switch (size) {
case BPF_B:
@@ -799,7 +786,6 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
{
u8 *prog = *pprog;
- int cnt = 0;
EMIT1(0xF0); /* lock prefix */
@@ -869,10 +855,10 @@ static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
}
}
-static int emit_nops(u8 **pprog, int len)
+static void emit_nops(u8 **pprog, int len)
{
u8 *prog = *pprog;
- int i, noplen, cnt = 0;
+ int i, noplen;
while (len > 0) {
noplen = len;
@@ -886,8 +872,6 @@ static int emit_nops(u8 **pprog, int len)
}
*pprog = prog;
-
- return cnt;
}
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
@@ -902,7 +886,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
bool tail_call_seen = false;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
- int i, cnt = 0, excnt = 0;
+ int i, excnt = 0;
int ilen, proglen = 0;
u8 *prog = temp;
int err;
@@ -1297,7 +1281,7 @@ st: if (is_imm8(insn->off))
emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
struct exception_table_entry *ex;
- u8 *_insn = image + proglen;
+ u8 *_insn = image + proglen + (start_of_ldx - temp);
s64 delta;
/* populate jmp_offset for JMP above */
@@ -1576,7 +1560,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, nops);
+ emit_nops(&prog, nops);
}
EMIT2(jmp_cond, jmp_offset);
} else if (is_simm32(jmp_offset)) {
@@ -1622,7 +1606,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, nops);
+ emit_nops(&prog, nops);
}
break;
}
@@ -1647,7 +1631,7 @@ emit_jmp:
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, INSN_SZ_DIFF - 2);
+ emit_nops(&prog, INSN_SZ_DIFF - 2);
}
EMIT2(0xEB, jmp_offset);
} else if (is_simm32(jmp_offset)) {
@@ -1754,7 +1738,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
{
u8 *prog = *pprog;
u8 *jmp_insn;
- int cnt = 0;
/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
@@ -1822,7 +1805,6 @@ static void emit_align(u8 **pprog, u32 align)
static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
{
u8 *prog = *pprog;
- int cnt = 0;
s64 offset;
offset = func - (ip + 2 + 4);
@@ -1854,7 +1836,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
u8 **branches)
{
u8 *prog = *pprog;
- int i, cnt = 0;
+ int i;
/* The first fmod_ret program will receive a garbage return value.
* Set this to 0 to avoid confusing the program.
@@ -1950,7 +1932,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
struct bpf_tramp_progs *tprogs,
void *orig_call)
{
- int ret, i, cnt = 0, nr_args = m->nr_args;
+ int ret, i, nr_args = m->nr_args;
int stack_size = nr_args * 8;
struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
@@ -2095,8 +2077,6 @@ static int emit_fallback_jump(u8 **pprog)
*/
err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
#else
- int cnt = 0;
-
EMIT2(0xFF, 0xE2); /* jmp rdx */
#endif
*pprog = prog;
@@ -2106,7 +2086,7 @@ static int emit_fallback_jump(u8 **pprog)
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
{
u8 *jg_reloc, *prog = *pprog;
- int pivot, err, jg_bytes = 1, cnt = 0;
+ int pivot, err, jg_bytes = 1;
s64 jg_offset;
if (a == b) {
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 02dc64625e64..2edd86649468 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+#define RS690_LOWER_TOP_OF_DRAM2 0x30
+#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
+#define RS690_UPPER_TOP_OF_DRAM2 0x31
+#define RS690_HTIU_NB_INDEX 0xA8
+#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
+#define RS690_HTIU_NB_DATA 0xAC
+
+/*
+ * Some BIOS implementations support RAM above 4GB, but do not configure the
+ * PCI host to respond to bus master accesses for these addresses. These
+ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
+ * works as expected for addresses below 4GB.
+ *
+ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
+ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
+ */
+static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+{
+ u32 val = 0;
+ phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
+
+ if (top_of_dram <= (1ULL << 32))
+ return;
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2);
+ pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
+
+ if (val)
+ return;
+
+ pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
+ top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+
#endif
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 8a26e705cb06..147c30a81f15 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -468,7 +468,7 @@ void __init efi_init(void)
*/
if (!efi_runtime_supported())
- pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
+ pr_err("No EFI runtime due to 32/64-bit mismatch with kernel\n");
if (!efi_runtime_supported() || efi_runtime_disabled()) {
efi_memmap_unmap();
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 7850111008a8..b15ebfe40a73 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -450,6 +450,18 @@ void __init efi_free_boot_services(void)
size -= rm_size;
}
+ /*
+ * Don't free memory under 1M for two reasons:
+ * - BIOS might clobber it
+ * - Crash kernel needs it to be reserved
+ */
+ if (start + size < SZ_1M)
+ continue;
+ if (start < SZ_1M) {
+ size -= (SZ_1M - start);
+ start = SZ_1M;
+ }
+
memblock_free_late(start, size);
}
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 6b1f3a4eeb44..a0b491ae2de8 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -10,7 +10,6 @@
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
subdir- := rm
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 2e1c1bec0f9e..6534c92d0f83 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -29,14 +29,16 @@ void __init reserve_real_mode(void)
/* Has to be under 1M so we can execute real-mode AP code. */
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (!mem) {
+ if (!mem)
pr_info("No sub-1M memory is available for the trampoline\n");
- return;
- }
+ else
+ set_real_mode_mem(mem);
- memblock_reserve(mem, size);
- set_real_mode_mem(mem);
- crash_reserve_low_1M();
+ /*
+ * Unconditionally reserve the entire fisrt 1M, see comment in
+ * setup_arch().
+ */
+ memblock_reserve(0, SZ_1M);
}
static void sme_sev_setup_real_mode(struct trampoline_header *th)
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 2ed81e581755..0575decb5e54 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
#define __NO_STUBS
@@ -26,20 +25,17 @@
#define old_mmap sys_old_mmap
-#define __SYSCALL_I386(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+
+#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_32.h>
-#undef __SYSCALL_I386
-#define __SYSCALL_I386(nr, sym) [ nr ] = sym,
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 2e8544dafbb0..95725b5a41ac 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
#define __NO_STUBS
@@ -36,23 +35,15 @@
#define stub_execveat sys_execveat
#define stub_rt_sigreturn sys_rt_sigreturn
-#define __SYSCALL_X32(nr, sym)
-#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym)
-
-#define __SYSCALL_64(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_64.h>
-#undef __SYSCALL_64
-#define __SYSCALL_64(nr, sym) [ nr ] = sym,
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index e87699aa2dc8..03149422dce2 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -592,8 +592,10 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
+ instrumentation_begin();
pr_err("Unknown trap in Xen PV mode.");
BUG();
+ instrumentation_end();
}
#ifdef CONFIG_X86_MCE
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 744c2f463845..4361fe4247e3 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -43,7 +43,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -52,11 +52,11 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if XCHAL_HAVE_EXCLUSIVE
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -74,7 +74,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -95,7 +95,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -116,7 +116,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#elif XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t * v) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -135,7 +135,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -157,7 +157,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -180,7 +180,7 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
#else /* XCHAL_HAVE_S32C1I */
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t * v) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
@@ -198,7 +198,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
@@ -218,7 +218,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned int tmp, vval; \
\
@@ -257,7 +257,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index a175f8aec3fb..3699e2818efb 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -80,7 +80,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
}
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
@@ -97,7 +97,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4:
return __cmpxchg_u32(ptr, old, new);
default:
- return __cmpxchg_local_generic(ptr, old, new, size);
+ return __generic_cmpxchg_local(ptr, old, new, size);
}
return old;
@@ -107,11 +107,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
/*
* xchg_u32
@@ -169,7 +169,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#endif
}
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 37ce25ef92d6..493eb7083b1a 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -192,10 +192,6 @@ static inline unsigned long ___pa(unsigned long va)
#define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
-#ifdef CONFIG_DISCONTIGMEM
-# error CONFIG_DISCONTIGMEM not supported
-#endif
-
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 856e2da2e397..573df8cea200 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -26,8 +26,8 @@
*
* - flush_tlb_all() flushes all processes TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
- * - flush_tlb_page(mm, vmaddr) flushes a single page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_page(vma, page) flushes a single page
+ * - flush_tlb_range(vma, vmaddr, end) flushes a range of pages
*/
void local_flush_tlb_all(void);
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 9534ef515d74..060165340612 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -304,7 +304,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page = (unsigned long) task_stack_page(p);
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
sp = p->thread.sp;
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index cd85a7a2722b..1254da07ead1 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -145,7 +145,6 @@ void secondary_start_kernel(void)
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);
- preempt_disable();
trace_hardirqs_off();
calibrate_delay();
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index fc09be7b1347..3cdfa00738e0 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -27,7 +27,6 @@
struct simdisk {
const char *filename;
spinlock_t lock;
- struct request_queue *queue;
struct gendisk *gd;
struct proc_dir_entry *procfile;
int users;
@@ -266,21 +265,13 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
spin_lock_init(&dev->lock);
dev->users = 0;
- dev->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (dev->queue == NULL) {
- pr_err("blk_alloc_queue failed\n");
- goto out_alloc_queue;
- }
-
- dev->gd = alloc_disk(SIMDISK_MINORS);
- if (dev->gd == NULL) {
- pr_err("alloc_disk failed\n");
- goto out_alloc_disk;
- }
+ dev->gd = blk_alloc_disk(NUMA_NO_NODE);
+ if (!dev->gd)
+ return -ENOMEM;
dev->gd->major = simdisk_major;
dev->gd->first_minor = which;
+ dev->gd->minors = SIMDISK_MINORS;
dev->gd->fops = &simdisk_ops;
- dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
set_capacity(dev->gd, 0);
@@ -288,12 +279,6 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
dev->procfile = proc_create_data(tmp, 0644, procdir, &simdisk_proc_ops, dev);
return 0;
-
-out_alloc_disk:
- blk_cleanup_queue(dev->queue);
- dev->queue = NULL;
-out_alloc_queue:
- return -ENOMEM;
}
static int __init simdisk_init(void)
@@ -343,10 +328,10 @@ static void simdisk_teardown(struct simdisk *dev, int which,
char tmp[2] = { '0' + which, 0 };
simdisk_detach(dev);
- if (dev->gd)
+ if (dev->gd) {
del_gendisk(dev->gd);
- if (dev->queue)
- blk_cleanup_queue(dev->queue);
+ blk_cleanup_disk(dev->gd);
+ }
remove_proc_entry(tmp, procdir);
}
diff --git a/block/Kconfig b/block/Kconfig
index a2297edfdde8..e71c63eaaf52 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -133,6 +133,13 @@ config BLK_WBT
dynamically on an algorithm loosely based on CoDel, factoring in
the realtime performance of the disk.
+config BLK_WBT_MQ
+ bool "Enable writeback throttling by default"
+ default y
+ depends on BLK_WBT
+ help
+ Enable writeback throttling by default for request-based block devices.
+
config BLK_CGROUP_IOLATENCY
bool "Enable support for latency based cgroup IO protection"
depends on BLK_CGROUP=y
@@ -155,12 +162,14 @@ config BLK_CGROUP_IOCOST
distributes IO capacity between different groups based on
their share of the overall weight distribution.
-config BLK_WBT_MQ
- bool "Multiqueue writeback throttling"
- default y
- depends on BLK_WBT
+config BLK_CGROUP_IOPRIO
+ bool "Cgroup I/O controller for assigning an I/O priority class"
+ depends on BLK_CGROUP
help
- Enable writeback throttling by default on multiqueue devices.
+ Enable the .prio interface for assigning an I/O priority class to
+ requests. The I/O priority class affects the order in which an I/O
+ scheduler and block devices process requests. Only some I/O schedulers
+ and some block devices support I/O priorities.
config BLK_DEBUG_FS
bool "Block layer debugging information in debugfs"
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 2f2158e05a91..64053d67a97b 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -9,6 +9,12 @@ config MQ_IOSCHED_DEADLINE
help
MQ version of the deadline IO scheduler.
+config MQ_IOSCHED_DEADLINE_CGROUP
+ tristate
+ default y
+ depends on MQ_IOSCHED_DEADLINE
+ depends on BLK_CGROUP
+
config MQ_IOSCHED_KYBER
tristate "Kyber I/O scheduler"
default y
diff --git a/block/Makefile b/block/Makefile
index 8d841f5f986f..bfbe4e13ca1e 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
blk-exec.o blk-merge.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
- genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o
+ genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o \
+ disk-events.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
@@ -17,9 +18,12 @@ obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
obj-$(CONFIG_BLK_CGROUP_RWSTAT) += blk-cgroup-rwstat.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
+obj-$(CONFIG_BLK_CGROUP_IOPRIO) += blk-ioprio.o
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
+mq-deadline-y += mq-deadline-main.o
+mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o
obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index acd1f881273e..727955918563 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -364,6 +364,16 @@ static int ref_wr_duration[2];
*/
static const unsigned long max_service_from_wr = 120000;
+/*
+ * Maximum time between the creation of two queues, for stable merge
+ * to be activated (in ms)
+ */
+static const unsigned long bfq_activation_stable_merging = 600;
+/*
+ * Minimum time to be waited before evaluating delayed stable merge (in ms)
+ */
+static const unsigned long bfq_late_stable_merging = 600;
+
#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
@@ -1729,10 +1739,23 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
bfqq->entity.new_weight == 40;
*interactive = !in_burst && idle_for_long_time &&
bfqq->entity.new_weight == 40;
+ /*
+ * Merged bfq_queues are kept out of weight-raising
+ * (low-latency) mechanisms. The reason is that these queues
+ * are usually created for non-interactive and
+ * non-soft-real-time tasks. Yet this is not the case for
+ * stably-merged queues. These queues are merged just because
+ * they are created shortly after each other. So they may
+ * easily serve the I/O of an interactive or soft-real time
+ * application, if the application happens to spawn multiple
+ * processes. So let also stably-merged queued enjoy weight
+ * raising.
+ */
wr_or_deserves_wr = bfqd->low_latency &&
(bfqq->wr_coeff > 1 ||
(bfq_bfqq_sync(bfqq) &&
- bfqq->bic && (*interactive || soft_rt)));
+ (bfqq->bic || RQ_BIC(rq)->stably_merged) &&
+ (*interactive || soft_rt)));
/*
* Using the last flag, update budget and check whether bfqq
@@ -1962,14 +1985,18 @@ static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
* Turning back to the detection of a waker queue, a queue Q is deemed
* as a waker queue for bfqq if, for three consecutive times, bfqq
* happens to become non empty right after a request of Q has been
- * completed. In particular, on the first time, Q is tentatively set
- * as a candidate waker queue, while on the third consecutive time
- * that Q is detected, the field waker_bfqq is set to Q, to confirm
- * that Q is a waker queue for bfqq. These detection steps are
- * performed only if bfqq has a long think time, so as to make it more
- * likely that bfqq's I/O is actually being blocked by a
- * synchronization. This last filter, plus the above three-times
- * requirement, make false positives less likely.
+ * completed. In this respect, even if bfqq is empty, we do not check
+ * for a waker if it still has some in-flight I/O. In fact, in this
+ * case bfqq is actually still being served by the drive, and may
+ * receive new I/O on the completion of some of the in-flight
+ * requests. In particular, on the first time, Q is tentatively set as
+ * a candidate waker queue, while on the third consecutive time that Q
+ * is detected, the field waker_bfqq is set to Q, to confirm that Q is
+ * a waker queue for bfqq. These detection steps are performed only if
+ * bfqq has a long think time, so as to make it more likely that
+ * bfqq's I/O is actually being blocked by a synchronization. This
+ * last filter, plus the above three-times requirement, make false
+ * positives less likely.
*
* NOTE
*
@@ -1995,6 +2022,7 @@ static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (!bfqd->last_completed_rq_bfqq ||
bfqd->last_completed_rq_bfqq == bfqq ||
bfq_bfqq_has_short_ttime(bfqq) ||
+ bfqq->dispatched > 0 ||
now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC ||
bfqd->last_completed_rq_bfqq == bfqq->waker_bfqq)
return;
@@ -2317,9 +2345,9 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
+ spin_unlock_irq(&bfqd->lock);
if (free)
blk_mq_free_request(free);
- spin_unlock_irq(&bfqd->lock);
return ret;
}
@@ -2405,7 +2433,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
*next_bfqq = bfq_init_rq(next);
if (!bfqq)
- return;
+ goto remove;
/*
* If next and rq belong to the same bfq_queue and next is older
@@ -2428,6 +2456,14 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
bfqq->next_rq = rq;
bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
+remove:
+ /* Merged request may be in the IO scheduler. Remove it. */
+ if (!RB_EMPTY_NODE(&next->rb_node)) {
+ bfq_remove_request(next->q, next);
+ if (next_bfqq)
+ bfqg_stats_update_io_remove(bfqq_group(next_bfqq),
+ next->cmd_flags);
+ }
}
/* Must be called with bfqq != NULL */
@@ -2695,10 +2731,18 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* costly and complicated.
*/
if (unlikely(!bfqd->nonrot_with_queueing)) {
- if (bic->stable_merge_bfqq &&
+ /*
+ * Make sure also that bfqq is sync, because
+ * bic->stable_merge_bfqq may point to some queue (for
+ * stable merging) also if bic is associated with a
+ * sync queue, but this bfqq is async
+ */
+ if (bfq_bfqq_sync(bfqq) && bic->stable_merge_bfqq &&
!bfq_bfqq_just_created(bfqq) &&
- time_is_after_jiffies(bfqq->split_time +
- msecs_to_jiffies(200))) {
+ time_is_before_jiffies(bfqq->split_time +
+ msecs_to_jiffies(bfq_late_stable_merging)) &&
+ time_is_before_jiffies(bfqq->creation_time +
+ msecs_to_jiffies(bfq_late_stable_merging))) {
struct bfq_queue *stable_merge_bfqq =
bic->stable_merge_bfqq;
int proc_ref = min(bfqq_process_refs(bfqq),
@@ -5479,7 +5523,7 @@ static struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd,
*/
if (!last_bfqq_created ||
time_before(last_bfqq_created->creation_time +
- bfqd->bfq_burst_interval,
+ msecs_to_jiffies(bfq_activation_stable_merging),
bfqq->creation_time) ||
bfqq->entity.parent != last_bfqq_created->entity.parent ||
bfqq->ioprio != last_bfqq_created->ioprio ||
@@ -5925,14 +5969,16 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct bfq_queue *bfqq;
bool idle_timer_disabled = false;
unsigned int cmd_flags;
+ LIST_HEAD(free);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
bfqg_stats_update_legacy_io(q, rq);
#endif
spin_lock_irq(&bfqd->lock);
- if (blk_mq_sched_try_insert_merge(q, rq)) {
+ if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
spin_unlock_irq(&bfqd->lock);
+ blk_mq_free_requests(&free);
return;
}
@@ -6129,11 +6175,13 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
* of other queues. But a false waker will unjustly steal
* bandwidth to its supposedly woken queue. So considering
* also shared queues in the waking mechanism may cause more
- * control troubles than throughput benefits. Then do not set
- * last_completed_rq_bfqq to bfqq if bfqq is a shared queue.
+ * control troubles than throughput benefits. Then reset
+ * last_completed_rq_bfqq if bfqq is a shared queue.
*/
if (!bfq_bfqq_coop(bfqq))
bfqd->last_completed_rq_bfqq = bfqq;
+ else
+ bfqd->last_completed_rq_bfqq = NULL;
/*
* If we are waiting to discover whether the request pattern
@@ -6376,6 +6424,7 @@ static void bfq_finish_requeue_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd;
+ unsigned long flags;
/*
* rq either is not associated with any icq, or is an already
@@ -6393,39 +6442,15 @@ static void bfq_finish_requeue_request(struct request *rq)
rq->io_start_time_ns,
rq->cmd_flags);
+ spin_lock_irqsave(&bfqd->lock, flags);
if (likely(rq->rq_flags & RQF_STARTED)) {
- unsigned long flags;
-
- spin_lock_irqsave(&bfqd->lock, flags);
-
if (rq == bfqd->waited_rq)
bfq_update_inject_limit(bfqd, bfqq);
bfq_completed_request(bfqq, bfqd);
- bfq_finish_requeue_request_body(bfqq);
-
- spin_unlock_irqrestore(&bfqd->lock, flags);
- } else {
- /*
- * Request rq may be still/already in the scheduler,
- * in which case we need to remove it (this should
- * never happen in case of requeue). And we cannot
- * defer such a check and removal, to avoid
- * inconsistencies in the time interval from the end
- * of this function to the start of the deferred work.
- * This situation seems to occur only in process
- * context, as a consequence of a merge. In the
- * current version of the code, this implies that the
- * lock is held.
- */
-
- if (!RB_EMPTY_NODE(&rq->rb_node)) {
- bfq_remove_request(rq->q, rq);
- bfqg_stats_update_io_remove(bfqq_group(bfqq),
- rq->cmd_flags);
- }
- bfq_finish_requeue_request_body(bfqq);
}
+ bfq_finish_requeue_request_body(bfqq);
+ spin_unlock_irqrestore(&bfqd->lock, flags);
/*
* Reset private fields. In case of a requeue, this allows
diff --git a/block/bio.c b/block/bio.c
index 44205dfb6b60..1fab762e079b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1375,8 +1375,7 @@ static inline bool bio_remaining_done(struct bio *bio)
*
* bio_endio() can be called several times on a bio that has been chained
* using bio_chain(). The ->bi_end_io() function will only be called the
- * last time. At this point the BLK_TA_COMPLETE tracing event will be
- * generated if BIO_TRACE_COMPLETION is set.
+ * last time.
**/
void bio_endio(struct bio *bio)
{
@@ -1389,6 +1388,11 @@ again:
if (bio->bi_bdev)
rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
+ if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+ trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
+ bio_clear_flag(bio, BIO_TRACE_COMPLETION);
+ }
+
/*
* Need to have a real endio function for chained bios, otherwise
* various corner cases will break (like stacking block devices that
@@ -1402,11 +1406,6 @@ again:
goto again;
}
- if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
- trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
- bio_clear_flag(bio, BIO_TRACE_COMPLETION);
- }
-
blk_throtl_bio_endio(bio);
/* release cgroup info */
bio_uninit(bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 582d2f18717e..7b06a5fa3cac 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -31,6 +31,7 @@
#include <linux/tracehook.h>
#include <linux/psi.h>
#include "blk.h"
+#include "blk-ioprio.h"
/*
* blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
@@ -1183,15 +1184,18 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded)
radix_tree_preload_end();
- ret = blk_throtl_init(q);
+ ret = blk_iolatency_init(q);
if (ret)
goto err_destroy_all;
- ret = blk_iolatency_init(q);
- if (ret) {
- blk_throtl_exit(q);
+ ret = blk_ioprio_init(q);
+ if (ret)
goto err_destroy_all;
- }
+
+ ret = blk_throtl_init(q);
+ if (ret)
+ goto err_destroy_all;
+
return 0;
err_destroy_all:
@@ -1217,32 +1221,6 @@ void blkcg_exit_queue(struct request_queue *q)
blk_throtl_exit(q);
}
-/*
- * We cannot support shared io contexts, as we have no mean to support
- * two tasks with the same ioc in two different groups without major rework
- * of the main cic data structures. For now we allow a task to change
- * its cgroup only if it's the only owner of its ioc.
- */
-static int blkcg_can_attach(struct cgroup_taskset *tset)
-{
- struct task_struct *task;
- struct cgroup_subsys_state *dst_css;
- struct io_context *ioc;
- int ret = 0;
-
- /* task_lock() is needed to avoid races with exit_io_context() */
- cgroup_taskset_for_each(task, dst_css, tset) {
- task_lock(task);
- ioc = task->io_context;
- if (ioc && atomic_read(&ioc->nr_tasks) > 1)
- ret = -EINVAL;
- task_unlock(task);
- if (ret)
- break;
- }
- return ret;
-}
-
static void blkcg_bind(struct cgroup_subsys_state *root_css)
{
int i;
@@ -1275,7 +1253,6 @@ struct cgroup_subsys io_cgrp_subsys = {
.css_online = blkcg_css_online,
.css_offline = blkcg_css_offline,
.css_free = blkcg_css_free,
- .can_attach = blkcg_can_attach,
.css_rstat_flush = blkcg_rstat_flush,
.bind = blkcg_bind,
.dfl_cftypes = blkcg_files,
diff --git a/block/blk-core.c b/block/blk-core.c
index 9bcdae93f6d4..514838ccab2d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -599,7 +599,6 @@ fail_q:
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
-EXPORT_SYMBOL(blk_alloc_queue);
/**
* blk_get_queue - increment the request_queue refcount
@@ -1086,15 +1085,6 @@ blk_qc_t submit_bio(struct bio *bio)
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
}
-
- if (unlikely(block_dump)) {
- char b[BDEVNAME_SIZE];
- printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
- current->comm, task_pid_nr(current),
- op_is_write(bio_op(bio)) ? "WRITE" : "READ",
- (unsigned long long)bio->bi_iter.bi_sector,
- bio_devname(bio, b), count);
- }
}
/*
@@ -1394,26 +1384,22 @@ void blk_steal_bios(struct bio_list *list, struct request *rq)
EXPORT_SYMBOL_GPL(blk_steal_bios);
/**
- * blk_update_request - Special helper function for request stacking drivers
+ * blk_update_request - Complete multiple bytes without completing the request
* @req: the request being processed
* @error: block status code
- * @nr_bytes: number of bytes to complete @req
+ * @nr_bytes: number of bytes to complete for @req
*
* Description:
* Ends I/O on a number of bytes attached to @req, but doesn't complete
* the request structure even if @req doesn't have leftover.
* If @req has leftover, sets it up for the next range of segments.
*
- * This special helper function is only for request stacking drivers
- * (e.g. request-based dm) so that they can handle partial completion.
- * Actual device drivers should use blk_mq_end_request instead.
- *
* Passing the result of blk_rq_bytes() as @nr_bytes guarantees
* %false return from this function.
*
* Note:
- * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
- * blk_rq_bytes() and in blk_update_request().
+ * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
+ * except in the consistency check at the end of this function.
*
* Return:
* %false - this request doesn't have any more data
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 7942ca6ed321..1002f6c58181 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -219,8 +219,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
unsigned long flags = 0;
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
- blk_account_io_flush(flush_rq);
-
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
@@ -230,6 +228,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
return;
}
+ blk_account_io_flush(flush_rq);
/*
* Flush request has to be marked as IDLE when it is really ended
* because its .end_io() is called from timeout code path too for
diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c
new file mode 100644
index 000000000000..332a07761bf8
--- /dev/null
+++ b/block/blk-ioprio.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Block rq-qos policy for assigning an I/O priority class to requests.
+ *
+ * Using an rq-qos policy for assigning I/O priority class has two advantages
+ * over using the ioprio_set() system call:
+ *
+ * - This policy is cgroup based so it has all the advantages of cgroups.
+ * - While ioprio_set() does not affect page cache writeback I/O, this rq-qos
+ * controller affects page cache writeback I/O for filesystems that support
+ * assiociating a cgroup with writeback I/O. See also
+ * Documentation/admin-guide/cgroup-v2.rst.
+ */
+
+#include <linux/blk-cgroup.h>
+#include <linux/blk-mq.h>
+#include <linux/blk_types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "blk-ioprio.h"
+#include "blk-rq-qos.h"
+
+/**
+ * enum prio_policy - I/O priority class policy.
+ * @POLICY_NO_CHANGE: (default) do not modify the I/O priority class.
+ * @POLICY_NONE_TO_RT: modify IOPRIO_CLASS_NONE into IOPRIO_CLASS_RT.
+ * @POLICY_RESTRICT_TO_BE: modify IOPRIO_CLASS_NONE and IOPRIO_CLASS_RT into
+ * IOPRIO_CLASS_BE.
+ * @POLICY_ALL_TO_IDLE: change the I/O priority class into IOPRIO_CLASS_IDLE.
+ *
+ * See also <linux/ioprio.h>.
+ */
+enum prio_policy {
+ POLICY_NO_CHANGE = 0,
+ POLICY_NONE_TO_RT = 1,
+ POLICY_RESTRICT_TO_BE = 2,
+ POLICY_ALL_TO_IDLE = 3,
+};
+
+static const char *policy_name[] = {
+ [POLICY_NO_CHANGE] = "no-change",
+ [POLICY_NONE_TO_RT] = "none-to-rt",
+ [POLICY_RESTRICT_TO_BE] = "restrict-to-be",
+ [POLICY_ALL_TO_IDLE] = "idle",
+};
+
+static struct blkcg_policy ioprio_policy;
+
+/**
+ * struct ioprio_blkg - Per (cgroup, request queue) data.
+ * @pd: blkg_policy_data structure.
+ */
+struct ioprio_blkg {
+ struct blkg_policy_data pd;
+};
+
+/**
+ * struct ioprio_blkcg - Per cgroup data.
+ * @cpd: blkcg_policy_data structure.
+ * @prio_policy: One of the IOPRIO_CLASS_* values. See also <linux/ioprio.h>.
+ */
+struct ioprio_blkcg {
+ struct blkcg_policy_data cpd;
+ enum prio_policy prio_policy;
+};
+
+static inline struct ioprio_blkg *pd_to_ioprio(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct ioprio_blkg, pd) : NULL;
+}
+
+static struct ioprio_blkcg *blkcg_to_ioprio_blkcg(struct blkcg *blkcg)
+{
+ return container_of(blkcg_to_cpd(blkcg, &ioprio_policy),
+ struct ioprio_blkcg, cpd);
+}
+
+static struct ioprio_blkcg *
+ioprio_blkcg_from_css(struct cgroup_subsys_state *css)
+{
+ return blkcg_to_ioprio_blkcg(css_to_blkcg(css));
+}
+
+static struct ioprio_blkcg *ioprio_blkcg_from_bio(struct bio *bio)
+{
+ struct blkg_policy_data *pd = blkg_to_pd(bio->bi_blkg, &ioprio_policy);
+
+ if (!pd)
+ return NULL;
+
+ return blkcg_to_ioprio_blkcg(pd->blkg->blkcg);
+}
+
+static int ioprio_show_prio_policy(struct seq_file *sf, void *v)
+{
+ struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(seq_css(sf));
+
+ seq_printf(sf, "%s\n", policy_name[blkcg->prio_policy]);
+ return 0;
+}
+
+static ssize_t ioprio_set_prio_policy(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(of_css(of));
+ int ret;
+
+ if (off != 0)
+ return -EIO;
+ /* kernfs_fop_write_iter() terminates 'buf' with '\0'. */
+ ret = sysfs_match_string(policy_name, buf);
+ if (ret < 0)
+ return ret;
+ blkcg->prio_policy = ret;
+
+ return nbytes;
+}
+
+static struct blkg_policy_data *
+ioprio_alloc_pd(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg)
+{
+ struct ioprio_blkg *ioprio_blkg;
+
+ ioprio_blkg = kzalloc(sizeof(*ioprio_blkg), gfp);
+ if (!ioprio_blkg)
+ return NULL;
+
+ return &ioprio_blkg->pd;
+}
+
+static void ioprio_free_pd(struct blkg_policy_data *pd)
+{
+ struct ioprio_blkg *ioprio_blkg = pd_to_ioprio(pd);
+
+ kfree(ioprio_blkg);
+}
+
+static struct blkcg_policy_data *ioprio_alloc_cpd(gfp_t gfp)
+{
+ struct ioprio_blkcg *blkcg;
+
+ blkcg = kzalloc(sizeof(*blkcg), gfp);
+ if (!blkcg)
+ return NULL;
+ blkcg->prio_policy = POLICY_NO_CHANGE;
+ return &blkcg->cpd;
+}
+
+static void ioprio_free_cpd(struct blkcg_policy_data *cpd)
+{
+ struct ioprio_blkcg *blkcg = container_of(cpd, typeof(*blkcg), cpd);
+
+ kfree(blkcg);
+}
+
+#define IOPRIO_ATTRS \
+ { \
+ .name = "prio.class", \
+ .seq_show = ioprio_show_prio_policy, \
+ .write = ioprio_set_prio_policy, \
+ }, \
+ { } /* sentinel */
+
+/* cgroup v2 attributes */
+static struct cftype ioprio_files[] = {
+ IOPRIO_ATTRS
+};
+
+/* cgroup v1 attributes */
+static struct cftype ioprio_legacy_files[] = {
+ IOPRIO_ATTRS
+};
+
+static struct blkcg_policy ioprio_policy = {
+ .dfl_cftypes = ioprio_files,
+ .legacy_cftypes = ioprio_legacy_files,
+
+ .cpd_alloc_fn = ioprio_alloc_cpd,
+ .cpd_free_fn = ioprio_free_cpd,
+
+ .pd_alloc_fn = ioprio_alloc_pd,
+ .pd_free_fn = ioprio_free_pd,
+};
+
+struct blk_ioprio {
+ struct rq_qos rqos;
+};
+
+static void blkcg_ioprio_track(struct rq_qos *rqos, struct request *rq,
+ struct bio *bio)
+{
+ struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio);
+
+ /*
+ * Except for IOPRIO_CLASS_NONE, higher I/O priority numbers
+ * correspond to a lower priority. Hence, the max_t() below selects
+ * the lower priority of bi_ioprio and the cgroup I/O priority class.
+ * If the cgroup policy has been set to POLICY_NO_CHANGE == 0, the
+ * bio I/O priority is not modified. If the bio I/O priority equals
+ * IOPRIO_CLASS_NONE, the cgroup I/O priority is assigned to the bio.
+ */
+ bio->bi_ioprio = max_t(u16, bio->bi_ioprio,
+ IOPRIO_PRIO_VALUE(blkcg->prio_policy, 0));
+}
+
+static void blkcg_ioprio_exit(struct rq_qos *rqos)
+{
+ struct blk_ioprio *blkioprio_blkg =
+ container_of(rqos, typeof(*blkioprio_blkg), rqos);
+
+ blkcg_deactivate_policy(rqos->q, &ioprio_policy);
+ kfree(blkioprio_blkg);
+}
+
+static struct rq_qos_ops blkcg_ioprio_ops = {
+ .track = blkcg_ioprio_track,
+ .exit = blkcg_ioprio_exit,
+};
+
+int blk_ioprio_init(struct request_queue *q)
+{
+ struct blk_ioprio *blkioprio_blkg;
+ struct rq_qos *rqos;
+ int ret;
+
+ blkioprio_blkg = kzalloc(sizeof(*blkioprio_blkg), GFP_KERNEL);
+ if (!blkioprio_blkg)
+ return -ENOMEM;
+
+ ret = blkcg_activate_policy(q, &ioprio_policy);
+ if (ret) {
+ kfree(blkioprio_blkg);
+ return ret;
+ }
+
+ rqos = &blkioprio_blkg->rqos;
+ rqos->id = RQ_QOS_IOPRIO;
+ rqos->ops = &blkcg_ioprio_ops;
+ rqos->q = q;
+
+ /*
+ * Registering the rq-qos policy after activating the blk-cgroup
+ * policy guarantees that ioprio_blkcg_from_bio(bio) != NULL in the
+ * rq-qos callbacks.
+ */
+ rq_qos_add(q, rqos);
+
+ return 0;
+}
+
+static int __init ioprio_init(void)
+{
+ return blkcg_policy_register(&ioprio_policy);
+}
+
+static void __exit ioprio_exit(void)
+{
+ blkcg_policy_unregister(&ioprio_policy);
+}
+
+module_init(ioprio_init);
+module_exit(ioprio_exit);
diff --git a/block/blk-ioprio.h b/block/blk-ioprio.h
new file mode 100644
index 000000000000..a7785c2f1aea
--- /dev/null
+++ b/block/blk-ioprio.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLK_IOPRIO_H_
+#define _BLK_IOPRIO_H_
+
+#include <linux/kconfig.h>
+
+struct request_queue;
+
+#ifdef CONFIG_BLK_CGROUP_IOPRIO
+int blk_ioprio_init(struct request_queue *q);
+#else
+static inline int blk_ioprio_init(struct request_queue *q)
+{
+ return 0;
+}
+#endif
+
+#endif /* _BLK_IOPRIO_H_ */
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 7b256131b20b..9f09beadcbe3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -21,6 +21,7 @@ struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
return new;
}
+EXPORT_SYMBOL_GPL(blk_next_bio);
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int flags,
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 4d97fb6dd226..a11b3b53717e 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -559,10 +559,14 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
unsigned int nr_phys_segs)
{
- if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
+ if (blk_integrity_merge_bio(req->q, req, bio) == false)
goto no_merge;
- if (blk_integrity_merge_bio(req->q, req, bio) == false)
+ /* discard request merge won't add new segment */
+ if (req_op(req) == REQ_OP_DISCARD)
+ return 1;
+
+ if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
goto no_merge;
/*
@@ -846,18 +850,15 @@ static struct request *attempt_front_merge(struct request_queue *q,
return NULL;
}
-int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
- struct request *next)
+/*
+ * Try to merge 'next' into 'rq'. Return true if the merge happened, false
+ * otherwise. The caller is responsible for freeing 'next' if the merge
+ * happened.
+ */
+bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+ struct request *next)
{
- struct request *free;
-
- free = attempt_merge(q, rq, next);
- if (free) {
- blk_put_request(free);
- return 1;
- }
-
- return 0;
+ return attempt_merge(q, rq, next);
}
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 2a75bc7401df..4b66d2776eda 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -937,6 +937,21 @@ void blk_mq_debugfs_unregister_sched(struct request_queue *q)
q->sched_debugfs_dir = NULL;
}
+static const char *rq_qos_id_to_name(enum rq_qos_id id)
+{
+ switch (id) {
+ case RQ_QOS_WBT:
+ return "wbt";
+ case RQ_QOS_LATENCY:
+ return "latency";
+ case RQ_QOS_COST:
+ return "cost";
+ case RQ_QOS_IOPRIO:
+ return "ioprio";
+ }
+ return "unknown";
+}
+
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
{
debugfs_remove_recursive(rqos->debugfs_dir);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 996a4b2f73aa..c838d81ac058 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -168,9 +168,19 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
* in blk_mq_dispatch_rq_list().
*/
list_add_tail(&rq->queuelist, &rq_list);
+ count++;
if (rq->mq_hctx != hctx)
multi_hctxs = true;
- } while (++count < max_dispatch);
+
+ /*
+ * If we cannot get tag for the request, stop dequeueing
+ * requests from the IO scheduler. We are unlikely to be able
+ * to submit them anyway and it creates false impression for
+ * scheduling heuristics that the device can take more IO.
+ */
+ if (!blk_mq_get_driver_tag(rq))
+ break;
+ } while (count < max_dispatch);
if (!count) {
if (run_queue)
@@ -284,8 +294,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
- struct elevator_queue *e = q->elevator;
- const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
+ const bool has_sched = q->elevator;
int ret = 0;
LIST_HEAD(rq_list);
@@ -316,12 +325,12 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
- if (has_sched_dispatch)
+ if (has_sched)
ret = blk_mq_do_dispatch_sched(hctx);
else
ret = blk_mq_do_dispatch_ctx(hctx);
}
- } else if (has_sched_dispatch) {
+ } else if (has_sched) {
ret = blk_mq_do_dispatch_sched(hctx);
} else if (hctx->dispatch_busy) {
/* dequeue request one by one from sw queue if queue is busy */
@@ -390,9 +399,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
return ret;
}
-bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
+bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free)
{
- return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
+ return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
@@ -453,7 +463,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
goto run;
}
- if (e && e->type->ops.insert_requests) {
+ if (e) {
LIST_HEAD(list);
list_add(&rq->queuelist, &list);
@@ -484,9 +494,9 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
percpu_ref_get(&q->q_usage_counter);
e = hctx->queue->elevator;
- if (e && e->type->ops.insert_requests)
+ if (e) {
e->type->ops.insert_requests(hctx, list, false);
- else {
+ } else {
/*
* try to issue requests directly if the hw queue isn't
* busy in case of 'none' scheduler, and this way may save
@@ -509,11 +519,9 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
- unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
-
if (hctx->sched_tags) {
blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
- blk_mq_free_rq_map(hctx->sched_tags, flags);
+ blk_mq_free_rq_map(hctx->sched_tags, set->flags);
hctx->sched_tags = NULL;
}
}
@@ -523,12 +531,10 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
unsigned int hctx_idx)
{
struct blk_mq_tag_set *set = q->tag_set;
- /* Clear HCTX_SHARED so tags are init'ed */
- unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
int ret;
hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
- set->reserved_tags, flags);
+ set->reserved_tags, set->flags);
if (!hctx->sched_tags)
return -ENOMEM;
@@ -546,16 +552,50 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- /* Clear HCTX_SHARED so tags are freed */
- unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
-
if (hctx->sched_tags) {
- blk_mq_free_rq_map(hctx->sched_tags, flags);
+ blk_mq_free_rq_map(hctx->sched_tags, hctx->flags);
hctx->sched_tags = NULL;
}
}
}
+static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
+{
+ struct blk_mq_tag_set *set = queue->tag_set;
+ int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
+ struct blk_mq_hw_ctx *hctx;
+ int ret, i;
+
+ /*
+ * Set initial depth at max so that we don't need to reallocate for
+ * updating nr_requests.
+ */
+ ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags,
+ &queue->sched_breserved_tags,
+ MAX_SCHED_RQ, set->reserved_tags,
+ set->numa_node, alloc_policy);
+ if (ret)
+ return ret;
+
+ queue_for_each_hw_ctx(queue, hctx, i) {
+ hctx->sched_tags->bitmap_tags =
+ &queue->sched_bitmap_tags;
+ hctx->sched_tags->breserved_tags =
+ &queue->sched_breserved_tags;
+ }
+
+ sbitmap_queue_resize(&queue->sched_bitmap_tags,
+ queue->nr_requests - set->reserved_tags);
+
+ return 0;
+}
+
+static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue)
+{
+ sbitmap_queue_free(&queue->sched_bitmap_tags);
+ sbitmap_queue_free(&queue->sched_breserved_tags);
+}
+
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
struct blk_mq_hw_ctx *hctx;
@@ -580,12 +620,18 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_sched_alloc_tags(q, hctx, i);
if (ret)
- goto err;
+ goto err_free_tags;
+ }
+
+ if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
+ ret = blk_mq_init_sched_shared_sbitmap(q);
+ if (ret)
+ goto err_free_tags;
}
ret = e->ops.init_sched(q, e);
if (ret)
- goto err;
+ goto err_free_sbitmap;
blk_mq_debugfs_register_sched(q);
@@ -605,7 +651,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return 0;
-err:
+err_free_sbitmap:
+ if (blk_mq_is_sbitmap_shared(q->tag_set->flags))
+ blk_mq_exit_sched_shared_sbitmap(q);
+err_free_tags:
blk_mq_sched_free_requests(q);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
@@ -631,6 +680,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
+ unsigned int flags = 0;
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_debugfs_unregister_sched_hctx(hctx);
@@ -638,10 +688,13 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
e->type->ops.exit_hctx(hctx, i);
hctx->sched_data = NULL;
}
+ flags = hctx->flags;
}
blk_mq_debugfs_unregister_sched(q);
if (e->type->ops.exit_sched)
e->type->ops.exit_sched(e);
blk_mq_sched_tags_teardown(q);
+ if (blk_mq_is_sbitmap_shared(flags))
+ blk_mq_exit_sched_shared_sbitmap(q);
q->elevator = NULL;
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 5b18ab915c65..5246ae040704 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -5,13 +5,16 @@
#include "blk-mq.h"
#include "blk-mq-tag.h"
+#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ)
+
void blk_mq_sched_assign_ioc(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs);
-bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
+bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free);
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 2a37731e8244..86f87346232a 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
/*
@@ -199,6 +200,20 @@ struct bt_iter_data {
bool reserved;
};
+static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
+ unsigned int bitnr)
+{
+ struct request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tags->lock, flags);
+ rq = tags->rqs[bitnr];
+ if (!rq || !refcount_inc_not_zero(&rq->ref))
+ rq = NULL;
+ spin_unlock_irqrestore(&tags->lock, flags);
+ return rq;
+}
+
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
struct bt_iter_data *iter_data = data;
@@ -206,18 +221,22 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
struct blk_mq_tags *tags = hctx->tags;
bool reserved = iter_data->reserved;
struct request *rq;
+ bool ret = true;
if (!reserved)
bitnr += tags->nr_reserved_tags;
- rq = tags->rqs[bitnr];
-
/*
* We can hit rq == NULL here, because the tagging functions
* test and set the bit before assigning ->rqs[].
*/
- if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx)
- return iter_data->fn(hctx, rq, iter_data->data, reserved);
- return true;
+ rq = blk_mq_find_and_get_req(tags, bitnr);
+ if (!rq)
+ return true;
+
+ if (rq->q == hctx->queue && rq->mq_hctx == hctx)
+ ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
+ blk_mq_put_rq_ref(rq);
+ return ret;
}
/**
@@ -264,6 +283,8 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
struct blk_mq_tags *tags = iter_data->tags;
bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
struct request *rq;
+ bool ret = true;
+ bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
if (!reserved)
bitnr += tags->nr_reserved_tags;
@@ -272,16 +293,19 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* We can hit rq == NULL here, because the tagging functions
* test and set the bit before assigning ->rqs[].
*/
- if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
+ if (iter_static_rqs)
rq = tags->static_rqs[bitnr];
else
- rq = tags->rqs[bitnr];
+ rq = blk_mq_find_and_get_req(tags, bitnr);
if (!rq)
return true;
- if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
- !blk_mq_request_started(rq))
- return true;
- return iter_data->fn(rq, iter_data->data, reserved);
+
+ if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
+ blk_mq_request_started(rq))
+ ret = iter_data->fn(rq, iter_data->data, reserved);
+ if (!iter_static_rqs)
+ blk_mq_put_rq_ref(rq);
+ return ret;
}
/**
@@ -348,6 +372,9 @@ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
* indicates whether or not @rq is a reserved request. Return
* true to continue iterating tags, false to stop.
* @priv: Will be passed as second argument to @fn.
+ *
+ * We grab one request reference before calling @fn and release it after
+ * @fn returns.
*/
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv)
@@ -445,39 +472,54 @@ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
node);
}
-static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
- int node, int alloc_policy)
+int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
+ struct sbitmap_queue *breserved_tags,
+ unsigned int queue_depth, unsigned int reserved,
+ int node, int alloc_policy)
{
- unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
+ unsigned int depth = queue_depth - reserved;
bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
- if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node))
+ if (bt_alloc(bitmap_tags, depth, round_robin, node))
return -ENOMEM;
- if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags,
- round_robin, node))
+ if (bt_alloc(breserved_tags, reserved, round_robin, node))
goto free_bitmap_tags;
+ return 0;
+
+free_bitmap_tags:
+ sbitmap_queue_free(bitmap_tags);
+ return -ENOMEM;
+}
+
+static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
+ int node, int alloc_policy)
+{
+ int ret;
+
+ ret = blk_mq_init_bitmaps(&tags->__bitmap_tags,
+ &tags->__breserved_tags,
+ tags->nr_tags, tags->nr_reserved_tags,
+ node, alloc_policy);
+ if (ret)
+ return ret;
+
tags->bitmap_tags = &tags->__bitmap_tags;
tags->breserved_tags = &tags->__breserved_tags;
return 0;
-free_bitmap_tags:
- sbitmap_queue_free(&tags->__bitmap_tags);
- return -ENOMEM;
}
-int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags)
+int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set)
{
- unsigned int depth = set->queue_depth - set->reserved_tags;
int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
- bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
- int i, node = set->numa_node;
+ int i, ret;
- if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node))
- return -ENOMEM;
- if (bt_alloc(&set->__breserved_tags, set->reserved_tags,
- round_robin, node))
- goto free_bitmap_tags;
+ ret = blk_mq_init_bitmaps(&set->__bitmap_tags, &set->__breserved_tags,
+ set->queue_depth, set->reserved_tags,
+ set->numa_node, alloc_policy);
+ if (ret)
+ return ret;
for (i = 0; i < set->nr_hw_queues; i++) {
struct blk_mq_tags *tags = set->tags[i];
@@ -487,9 +529,6 @@ int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags)
}
return 0;
-free_bitmap_tags:
- sbitmap_queue_free(&set->__bitmap_tags);
- return -ENOMEM;
}
void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set)
@@ -516,6 +555,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
+ spin_lock_init(&tags->lock);
if (blk_mq_is_sbitmap_shared(flags))
return tags;
@@ -551,8 +591,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
*/
if (tdepth > tags->nr_tags) {
struct blk_mq_tag_set *set = hctx->queue->tag_set;
- /* Only sched tags can grow, so clear HCTX_SHARED flag */
- unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
struct blk_mq_tags *new;
bool ret;
@@ -563,21 +601,21 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
* We need some sort of upper limit, set it high enough that
* no valid use cases should require more.
*/
- if (tdepth > 16 * BLKDEV_MAX_RQ)
+ if (tdepth > MAX_SCHED_RQ)
return -EINVAL;
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
- tags->nr_reserved_tags, flags);
+ tags->nr_reserved_tags, set->flags);
if (!new)
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
if (ret) {
- blk_mq_free_rq_map(new, flags);
+ blk_mq_free_rq_map(new, set->flags);
return -ENOMEM;
}
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
- blk_mq_free_rq_map(*tagsptr, flags);
+ blk_mq_free_rq_map(*tagsptr, set->flags);
*tagsptr = new;
} else {
/*
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 7d3e6b333a4a..8ed55af08427 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -20,17 +20,26 @@ struct blk_mq_tags {
struct request **rqs;
struct request **static_rqs;
struct list_head page_list;
+
+ /*
+ * used to clear request reference in rqs[] before freeing one
+ * request pool
+ */
+ spinlock_t lock;
};
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
unsigned int reserved_tags,
int node, unsigned int flags);
extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags);
+extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
+ struct sbitmap_queue *breserved_tags,
+ unsigned int queue_depth,
+ unsigned int reserved,
+ int node, int alloc_policy);
-extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set,
- unsigned int flags);
+extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set);
extern void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set);
-
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
unsigned int tag);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c86c01bfecdb..2e9fd0ec63d7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -909,6 +909,14 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
return false;
}
+void blk_mq_put_rq_ref(struct request *rq)
+{
+ if (is_flush_rq(rq, rq->mq_hctx))
+ rq->end_io(rq, 0);
+ else if (refcount_dec_and_test(&rq->ref))
+ __blk_mq_free_request(rq);
+}
+
static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
@@ -942,11 +950,7 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
- if (is_flush_rq(rq, hctx))
- rq->end_io(rq, 0);
- else if (refcount_dec_and_test(&rq->ref))
- __blk_mq_free_request(rq);
-
+ blk_mq_put_rq_ref(rq);
return true;
}
@@ -1100,7 +1104,7 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
return true;
}
-static bool blk_mq_get_driver_tag(struct request *rq)
+bool blk_mq_get_driver_tag(struct request *rq)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
@@ -1220,9 +1224,6 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
{
unsigned int ewma;
- if (hctx->queue->elevator)
- return;
-
ewma = hctx->dispatch_busy;
if (!ewma && !busy)
@@ -2303,6 +2304,45 @@ queue_exit:
return BLK_QC_T_NONE;
}
+static size_t order_to_size(unsigned int order)
+{
+ return (size_t)PAGE_SIZE << order;
+}
+
+/* called before freeing request pool in @tags */
+static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
+ struct blk_mq_tags *tags, unsigned int hctx_idx)
+{
+ struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
+ struct page *page;
+ unsigned long flags;
+
+ list_for_each_entry(page, &tags->page_list, lru) {
+ unsigned long start = (unsigned long)page_address(page);
+ unsigned long end = start + order_to_size(page->private);
+ int i;
+
+ for (i = 0; i < set->queue_depth; i++) {
+ struct request *rq = drv_tags->rqs[i];
+ unsigned long rq_addr = (unsigned long)rq;
+
+ if (rq_addr >= start && rq_addr < end) {
+ WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
+ cmpxchg(&drv_tags->rqs[i], rq, NULL);
+ }
+ }
+ }
+
+ /*
+ * Wait until all pending iteration is done.
+ *
+ * Request reference is cleared and it is guaranteed to be observed
+ * after the ->lock is released.
+ */
+ spin_lock_irqsave(&drv_tags->lock, flags);
+ spin_unlock_irqrestore(&drv_tags->lock, flags);
+}
+
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx)
{
@@ -2321,6 +2361,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
}
}
+ blk_mq_clear_rq_mapping(set, tags, hctx_idx);
+
while (!list_empty(&tags->page_list)) {
page = list_first_entry(&tags->page_list, struct page, lru);
list_del_init(&page->lru);
@@ -2380,11 +2422,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
return tags;
}
-static size_t order_to_size(unsigned int order)
-{
- return (size_t)PAGE_SIZE << order;
-}
-
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, int node)
{
@@ -2603,16 +2640,49 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
&hctx->cpuhp_dead);
}
+/*
+ * Before freeing hw queue, clearing the flush request reference in
+ * tags->rqs[] for avoiding potential UAF.
+ */
+static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
+ unsigned int queue_depth, struct request *flush_rq)
+{
+ int i;
+ unsigned long flags;
+
+ /* The hw queue may not be mapped yet */
+ if (!tags)
+ return;
+
+ WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
+
+ for (i = 0; i < queue_depth; i++)
+ cmpxchg(&tags->rqs[i], flush_rq, NULL);
+
+ /*
+ * Wait until all pending iteration is done.
+ *
+ * Request reference is cleared and it is guaranteed to be observed
+ * after the ->lock is released.
+ */
+ spin_lock_irqsave(&tags->lock, flags);
+ spin_unlock_irqrestore(&tags->lock, flags);
+}
+
/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
+ struct request *flush_rq = hctx->fq->flush_rq;
+
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx);
+ blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
+ set->queue_depth, flush_rq);
if (set->ops->exit_request)
- set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
+ set->ops->exit_request(set, flush_rq, hctx_idx);
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -3042,21 +3112,18 @@ void blk_mq_release(struct request_queue *q)
struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
void *queuedata)
{
- struct request_queue *uninit_q, *q;
+ struct request_queue *q;
+ int ret;
- uninit_q = blk_alloc_queue(set->numa_node);
- if (!uninit_q)
+ q = blk_alloc_queue(set->numa_node);
+ if (!q)
return ERR_PTR(-ENOMEM);
- uninit_q->queuedata = queuedata;
-
- /*
- * Initialize the queue without an elevator. device_add_disk() will do
- * the initialization.
- */
- q = blk_mq_init_allocated_queue(set, uninit_q, false);
- if (IS_ERR(q))
- blk_cleanup_queue(uninit_q);
-
+ q->queuedata = queuedata;
+ ret = blk_mq_init_allocated_queue(set, q);
+ if (ret) {
+ blk_cleanup_queue(q);
+ return ERR_PTR(ret);
+ }
return q;
}
EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
@@ -3067,39 +3134,24 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
}
EXPORT_SYMBOL(blk_mq_init_queue);
-/*
- * Helper for setting up a queue with mq ops, given queue depth, and
- * the passed in mq ops flags.
- */
-struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops,
- unsigned int queue_depth,
- unsigned int set_flags)
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata)
{
struct request_queue *q;
- int ret;
+ struct gendisk *disk;
- memset(set, 0, sizeof(*set));
- set->ops = ops;
- set->nr_hw_queues = 1;
- set->nr_maps = 1;
- set->queue_depth = queue_depth;
- set->numa_node = NUMA_NO_NODE;
- set->flags = set_flags;
-
- ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ q = blk_mq_init_queue_data(set, queuedata);
+ if (IS_ERR(q))
+ return ERR_CAST(q);
- q = blk_mq_init_queue(set);
- if (IS_ERR(q)) {
- blk_mq_free_tag_set(set);
- return q;
+ disk = __alloc_disk_node(0, set->numa_node);
+ if (!disk) {
+ blk_cleanup_queue(q);
+ return ERR_PTR(-ENOMEM);
}
-
- return q;
+ disk->queue = q;
+ return disk;
}
-EXPORT_SYMBOL(blk_mq_init_sq_queue);
+EXPORT_SYMBOL(__blk_mq_alloc_disk);
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
struct blk_mq_tag_set *set, struct request_queue *q,
@@ -3212,9 +3264,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock);
}
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q,
- bool elevator_init)
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q)
{
/* mark the queue as mq asap */
q->mq_ops = set->ops;
@@ -3264,11 +3315,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q);
-
- if (elevator_init)
- elevator_init_mq(q);
-
- return q;
+ return 0;
err_hctxs:
kfree(q->queue_hw_ctx);
@@ -3279,7 +3326,7 @@ err_poll:
q->poll_cb = NULL;
err_exit:
q->mq_ops = NULL;
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
@@ -3491,7 +3538,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (blk_mq_is_sbitmap_shared(set->flags)) {
atomic_set(&set->active_queues_shared_sbitmap, 0);
- if (blk_mq_init_shared_sbitmap(set, set->flags)) {
+ if (blk_mq_init_shared_sbitmap(set)) {
ret = -ENOMEM;
goto out_free_mq_rq_maps;
}
@@ -3516,6 +3563,22 @@ out_free_mq_map:
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);
+/* allocate and initialize a tagset for a simple single-queue device */
+int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int queue_depth,
+ unsigned int set_flags)
+{
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->nr_hw_queues = 1;
+ set->nr_maps = 1;
+ set->queue_depth = queue_depth;
+ set->numa_node = NUMA_NO_NODE;
+ set->flags = set_flags;
+ return blk_mq_alloc_tag_set(set);
+}
+EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
+
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
int i, j;
@@ -3567,15 +3630,24 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
} else {
ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
nr, true);
+ if (blk_mq_is_sbitmap_shared(set->flags)) {
+ hctx->sched_tags->bitmap_tags =
+ &q->sched_bitmap_tags;
+ hctx->sched_tags->breserved_tags =
+ &q->sched_breserved_tags;
+ }
}
if (ret)
break;
if (q->elevator && q->elevator->type->ops.depth_updated)
q->elevator->type->ops.depth_updated(hctx);
}
-
- if (!ret)
+ if (!ret) {
q->nr_requests = nr;
+ if (q->elevator && blk_mq_is_sbitmap_shared(set->flags))
+ sbitmap_queue_resize(&q->sched_bitmap_tags,
+ nr - set->reserved_tags);
+ }
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
@@ -3886,7 +3958,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
struct blk_mq_hw_ctx *hctx;
- long state;
+ unsigned int state;
if (!blk_qc_t_valid(cookie) ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
@@ -3910,7 +3982,7 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
hctx->poll_considered++;
- state = current->state;
+ state = get_current_state();
do {
int ret;
@@ -3926,7 +3998,7 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (signal_pending_state(state, current))
__set_current_state(TASK_RUNNING);
- if (current->state == TASK_RUNNING)
+ if (task_is_running(current))
return 1;
if (ret < 0 || !spin)
break;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9ce64bc4a6c8..d08779f77a26 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -47,6 +47,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
+void blk_mq_put_rq_ref(struct request *rq);
/*
* Internal helpers for allocating/freeing the request map
@@ -259,6 +260,8 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
__blk_mq_put_driver_tag(rq->mq_hctx, rq);
}
+bool blk_mq_get_driver_tag(struct request *rq);
+
static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
{
int cpu;
@@ -299,6 +302,17 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
return NULL;
}
+/* Free all requests on the list */
+static inline void blk_mq_free_requests(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct request *rq = list_entry_rq(list->next);
+
+ list_del_init(&rq->queuelist);
+ blk_mq_free_request(rq);
+ }
+}
+
/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 656460636ad3..e83af7bc7591 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -266,8 +266,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
return;
- prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
- has_sleeper = !wq_has_single_sleeper(&rqw->wait);
+ has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
+ TASK_UNINTERRUPTIBLE);
do {
/* The memory barrier in set_task_state saves us here. */
if (data.got_token)
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 2bc43e94f4c4..f000f83e0621 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -7,6 +7,7 @@
#include <linux/blk_types.h>
#include <linux/atomic.h>
#include <linux/wait.h>
+#include <linux/blk-mq.h>
#include "blk-mq-debugfs.h"
@@ -16,6 +17,7 @@ enum rq_qos_id {
RQ_QOS_WBT,
RQ_QOS_LATENCY,
RQ_QOS_COST,
+ RQ_QOS_IOPRIO,
};
struct rq_wait {
@@ -78,19 +80,6 @@ static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
return rq_qos_id(q, RQ_QOS_LATENCY);
}
-static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
-{
- switch (id) {
- case RQ_QOS_WBT:
- return "wbt";
- case RQ_QOS_LATENCY:
- return "latency";
- case RQ_QOS_COST:
- return "cost";
- }
- return "unknown";
-}
-
static inline void rq_wait_init(struct rq_wait *rq_wait)
{
atomic_set(&rq_wait->inflight, 0);
@@ -99,8 +88,21 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
{
+ /*
+ * No IO can be in-flight when adding rqos, so freeze queue, which
+ * is fine since we only support rq_qos for blk-mq queue.
+ *
+ * Reuse ->queue_lock for protecting against other concurrent
+ * rq_qos adding/deleting
+ */
+ blk_mq_freeze_queue(q);
+
+ spin_lock_irq(&q->queue_lock);
rqos->next = q->rq_qos;
q->rq_qos = rqos;
+ spin_unlock_irq(&q->queue_lock);
+
+ blk_mq_unfreeze_queue(q);
if (rqos->ops->debugfs_attrs)
blk_mq_debugfs_register_rqos(rqos);
@@ -110,12 +112,22 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
{
struct rq_qos **cur;
+ /*
+ * See comment in rq_qos_add() about freezing queue & using
+ * ->queue_lock.
+ */
+ blk_mq_freeze_queue(q);
+
+ spin_lock_irq(&q->queue_lock);
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
if (*cur == rqos) {
*cur = rqos->next;
break;
}
}
+ spin_unlock_irq(&q->queue_lock);
+
+ blk_mq_unfreeze_queue(q);
blk_mq_debugfs_unregister_rqos(rqos);
}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e03bedf180ab..370d83c18057 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -91,7 +91,7 @@ static ssize_t queue_ra_show(struct request_queue *q, char *page)
unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_SHIFT - 10);
- return queue_var_show(ra_kb, (page));
+ return queue_var_show(ra_kb, page);
}
static ssize_t
@@ -112,28 +112,28 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{
int max_sectors_kb = queue_max_sectors(q) >> 1;
- return queue_var_show(max_sectors_kb, (page));
+ return queue_var_show(max_sectors_kb, page);
}
static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
{
- return queue_var_show(queue_max_segments(q), (page));
+ return queue_var_show(queue_max_segments(q), page);
}
static ssize_t queue_max_discard_segments_show(struct request_queue *q,
char *page)
{
- return queue_var_show(queue_max_discard_segments(q), (page));
+ return queue_var_show(queue_max_discard_segments(q), page);
}
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{
- return queue_var_show(q->limits.max_integrity_segments, (page));
+ return queue_var_show(q->limits.max_integrity_segments, page);
}
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
- return queue_var_show(queue_max_segment_size(q), (page));
+ return queue_var_show(queue_max_segment_size(q), page);
}
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -261,12 +261,12 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
- return queue_var_show(max_hw_sectors_kb, (page));
+ return queue_var_show(max_hw_sectors_kb, page);
}
static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
{
- return queue_var_show(q->limits.virt_boundary_mask, (page));
+ return queue_var_show(q->limits.virt_boundary_mask, page);
}
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
@@ -866,20 +866,6 @@ int blk_register_queue(struct gendisk *disk)
"%s is registering an already registered queue\n",
kobject_name(&dev->kobj));
- /*
- * SCSI probing may synchronously create and destroy a lot of
- * request_queues for non-existent devices. Shutting down a fully
- * functional queue takes measureable wallclock time as RCU grace
- * periods are involved. To avoid excessive latency in these
- * cases, a request_queue starts out in a degraded mode which is
- * faster to shut down and is made fully functional here as
- * request_queues for non-existent devices never get registered.
- */
- if (!blk_queue_init_done(q)) {
- blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
- percpu_ref_switch_to_percpu(&q->q_usage_counter);
- }
-
blk_queue_update_readahead(q);
ret = blk_trace_init_sysfs(dev);
@@ -938,6 +924,21 @@ int blk_register_queue(struct gendisk *disk)
ret = 0;
unlock:
mutex_unlock(&q->sysfs_dir_lock);
+
+ /*
+ * SCSI probing may synchronously create and destroy a lot of
+ * request_queues for non-existent devices. Shutting down a fully
+ * functional queue takes measureable wallclock time as RCU grace
+ * periods are involved. To avoid excessive latency in these
+ * cases, a request_queue starts out in a degraded mode which is
+ * faster to shut down and is made fully functional here as
+ * request_queues for non-existent devices never get registered.
+ */
+ if (!blk_queue_init_done(q)) {
+ blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
+ percpu_ref_switch_to_percpu(&q->q_usage_counter);
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(blk_register_queue);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 42aed0160f86..3ed71b8da887 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -77,7 +77,8 @@ enum {
static inline bool rwb_enabled(struct rq_wb *rwb)
{
- return rwb && rwb->wb_normal != 0;
+ return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
+ rwb->wb_normal != 0;
}
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
@@ -563,7 +564,6 @@ static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
}
/*
- * Returns true if the IO request should be accounted, false if not.
* May sleep, if we have exceeded the writeback limits. Caller can pass
* in an irq held spinlock, if it holds one when calling this function.
* If we do sleep, we'll release and re-grab it.
@@ -636,9 +636,13 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
void wbt_enable_default(struct request_queue *q)
{
struct rq_qos *rqos = wbt_rq_qos(q);
+
/* Throttling already enabled? */
- if (rqos)
+ if (rqos) {
+ if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
+ RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
return;
+ }
/* Queue not registered? Maybe shutting down... */
if (!blk_queue_registered(q))
@@ -702,7 +706,7 @@ void wbt_disable_default(struct request_queue *q)
rwb = RQWB(rqos);
if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
blk_stat_deactivate(rwb->cb);
- rwb->wb_normal = 0;
+ rwb->enable_state = WBT_STATE_OFF_DEFAULT;
}
}
EXPORT_SYMBOL_GPL(wbt_disable_default);
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 16bdc85b8df9..2eb01becde8c 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -34,6 +34,7 @@ enum {
enum {
WBT_STATE_ON_DEFAULT = 1,
WBT_STATE_ON_MANUAL = 2,
+ WBT_STATE_OFF_DEFAULT
};
struct rq_wb {
diff --git a/block/blk.h b/block/blk.h
index 8b3591aee0a5..4b885c0f6708 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -192,7 +192,6 @@ void blk_account_io_done(struct request *req, u64 now);
void blk_insert_flush(struct request *rq);
-void elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
void __elevator_exit(struct request_queue *, struct elevator_queue *);
@@ -225,7 +224,7 @@ ssize_t part_timeout_store(struct device *, struct device_attribute *,
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);
-int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
unsigned int blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
@@ -343,8 +342,8 @@ static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
#endif
-int blk_alloc_devt(struct block_device *part, dev_t *devt);
-void blk_free_devt(dev_t devt);
+int blk_alloc_ext_minor(void);
+void blk_free_ext_minor(unsigned int minor);
char *disk_name(struct gendisk *hd, int partno, char *buf);
#define ADDPART_FLAG_NONE 0
#define ADDPART_FLAG_RAID 1
@@ -359,4 +358,14 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page);
+struct request_queue *blk_alloc_queue(int node_id);
+
+void disk_alloc_events(struct gendisk *disk);
+void disk_add_events(struct gendisk *disk);
+void disk_del_events(struct gendisk *disk);
+void disk_release_events(struct gendisk *disk);
+extern struct device_attribute dev_attr_events;
+extern struct device_attribute dev_attr_events_async;
+extern struct device_attribute dev_attr_events_poll_msecs;
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/disk-events.c b/block/disk-events.c
new file mode 100644
index 000000000000..a75931ff5da4
--- /dev/null
+++ b/block/disk-events.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Disk events - monitor disk events like media change and eject request.
+ */
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+#include <linux/genhd.h>
+#include "blk.h"
+
+struct disk_events {
+ struct list_head node; /* all disk_event's */
+ struct gendisk *disk; /* the associated disk */
+ spinlock_t lock;
+
+ struct mutex block_mutex; /* protects blocking */
+ int block; /* event blocking depth */
+ unsigned int pending; /* events already sent out */
+ unsigned int clearing; /* events being cleared */
+
+ long poll_msecs; /* interval, -1 for default */
+ struct delayed_work dwork;
+};
+
+static const char *disk_events_strs[] = {
+ [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
+ [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
+};
+
+static char *disk_uevents[] = {
+ [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
+ [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
+};
+
+/* list of all disk_events */
+static DEFINE_MUTEX(disk_events_mutex);
+static LIST_HEAD(disk_events);
+
+/* disable in-kernel polling by default */
+static unsigned long disk_events_dfl_poll_msecs;
+
+static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
+{
+ struct disk_events *ev = disk->ev;
+ long intv_msecs = 0;
+
+ /*
+ * If device-specific poll interval is set, always use it. If
+ * the default is being used, poll if the POLL flag is set.
+ */
+ if (ev->poll_msecs >= 0)
+ intv_msecs = ev->poll_msecs;
+ else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
+ intv_msecs = disk_events_dfl_poll_msecs;
+
+ return msecs_to_jiffies(intv_msecs);
+}
+
+/**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events(). Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void disk_block_events(struct gendisk *disk)
+{
+ struct disk_events *ev = disk->ev;
+ unsigned long flags;
+ bool cancel;
+
+ if (!ev)
+ return;
+
+ /*
+ * Outer mutex ensures that the first blocker completes canceling
+ * the event work before further blockers are allowed to finish.
+ */
+ mutex_lock(&ev->block_mutex);
+
+ spin_lock_irqsave(&ev->lock, flags);
+ cancel = !ev->block++;
+ spin_unlock_irqrestore(&ev->lock, flags);
+
+ if (cancel)
+ cancel_delayed_work_sync(&disk->ev->dwork);
+
+ mutex_unlock(&ev->block_mutex);
+}
+
+static void __disk_unblock_events(struct gendisk *disk, bool check_now)
+{
+ struct disk_events *ev = disk->ev;
+ unsigned long intv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ev->lock, flags);
+
+ if (WARN_ON_ONCE(ev->block <= 0))
+ goto out_unlock;
+
+ if (--ev->block)
+ goto out_unlock;
+
+ intv = disk_events_poll_jiffies(disk);
+ if (check_now)
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, 0);
+ else if (intv)
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, intv);
+out_unlock:
+ spin_unlock_irqrestore(&ev->lock, flags);
+}
+
+/**
+ * disk_unblock_events - unblock disk event checking
+ * @disk: disk to unblock events for
+ *
+ * Undo disk_block_events(). When the block count reaches zero, it
+ * starts events polling if configured.
+ *
+ * CONTEXT:
+ * Don't care. Safe to call from irq context.
+ */
+void disk_unblock_events(struct gendisk *disk)
+{
+ if (disk->ev)
+ __disk_unblock_events(disk, false);
+}
+
+/**
+ * disk_flush_events - schedule immediate event checking and flushing
+ * @disk: disk to check and flush events for
+ * @mask: events to flush
+ *
+ * Schedule immediate event checking on @disk if not blocked. Events in
+ * @mask are scheduled to be cleared from the driver. Note that this
+ * doesn't clear the events from @disk->ev.
+ *
+ * CONTEXT:
+ * If @mask is non-zero must be called with disk->open_mutex held.
+ */
+void disk_flush_events(struct gendisk *disk, unsigned int mask)
+{
+ struct disk_events *ev = disk->ev;
+
+ if (!ev)
+ return;
+
+ spin_lock_irq(&ev->lock);
+ ev->clearing |= mask;
+ if (!ev->block)
+ mod_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, 0);
+ spin_unlock_irq(&ev->lock);
+}
+
+static void disk_check_events(struct disk_events *ev,
+ unsigned int *clearing_ptr)
+{
+ struct gendisk *disk = ev->disk;
+ char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
+ unsigned int clearing = *clearing_ptr;
+ unsigned int events;
+ unsigned long intv;
+ int nr_events = 0, i;
+
+ /* check events */
+ events = disk->fops->check_events(disk, clearing);
+
+ /* accumulate pending events and schedule next poll if necessary */
+ spin_lock_irq(&ev->lock);
+
+ events &= ~ev->pending;
+ ev->pending |= events;
+ *clearing_ptr &= ~clearing;
+
+ intv = disk_events_poll_jiffies(disk);
+ if (!ev->block && intv)
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, intv);
+
+ spin_unlock_irq(&ev->lock);
+
+ /*
+ * Tell userland about new events. Only the events listed in
+ * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
+ * is set. Otherwise, events are processed internally but never
+ * get reported to userland.
+ */
+ for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
+ if ((events & disk->events & (1 << i)) &&
+ (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
+ envp[nr_events++] = disk_uevents[i];
+
+ if (nr_events)
+ kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+}
+
+/**
+ * disk_clear_events - synchronously check, clear and return pending events
+ * @disk: disk to fetch and clear events from
+ * @mask: mask of events to be fetched and cleared
+ *
+ * Disk events are synchronously checked and pending events in @mask
+ * are cleared and returned. This ignores the block count.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
+{
+ struct disk_events *ev = disk->ev;
+ unsigned int pending;
+ unsigned int clearing = mask;
+
+ if (!ev)
+ return 0;
+
+ disk_block_events(disk);
+
+ /*
+ * store the union of mask and ev->clearing on the stack so that the
+ * race with disk_flush_events does not cause ambiguity (ev->clearing
+ * can still be modified even if events are blocked).
+ */
+ spin_lock_irq(&ev->lock);
+ clearing |= ev->clearing;
+ ev->clearing = 0;
+ spin_unlock_irq(&ev->lock);
+
+ disk_check_events(ev, &clearing);
+ /*
+ * if ev->clearing is not 0, the disk_flush_events got called in the
+ * middle of this function, so we want to run the workfn without delay.
+ */
+ __disk_unblock_events(disk, ev->clearing ? true : false);
+
+ /* then, fetch and clear pending events */
+ spin_lock_irq(&ev->lock);
+ pending = ev->pending & mask;
+ ev->pending &= ~mask;
+ spin_unlock_irq(&ev->lock);
+ WARN_ON_ONCE(clearing & mask);
+
+ return pending;
+}
+
+/**
+ * bdev_check_media_change - check if a removable media has been changed
+ * @bdev: block device to check
+ *
+ * Check whether a removable media has been changed, and attempt to free all
+ * dentries and inodes and invalidates all block device page cache entries in
+ * that case.
+ *
+ * Returns %true if the block device changed, or %false if not.
+ */
+bool bdev_check_media_change(struct block_device *bdev)
+{
+ unsigned int events;
+
+ events = disk_clear_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE |
+ DISK_EVENT_EJECT_REQUEST);
+ if (!(events & DISK_EVENT_MEDIA_CHANGE))
+ return false;
+
+ if (__invalidate_device(bdev, true))
+ pr_warn("VFS: busy inodes on changed media %s\n",
+ bdev->bd_disk->disk_name);
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ return true;
+}
+EXPORT_SYMBOL(bdev_check_media_change);
+
+/*
+ * Separate this part out so that a different pointer for clearing_ptr can be
+ * passed in for disk_clear_events.
+ */
+static void disk_events_workfn(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
+
+ disk_check_events(ev, &ev->clearing);
+}
+
+/*
+ * A disk events enabled device has the following sysfs nodes under
+ * its /sys/block/X/ directory.
+ *
+ * events : list of all supported events
+ * events_async : list of events which can be detected w/o polling
+ * (always empty, only for backwards compatibility)
+ * events_poll_msecs : polling interval, 0: disable, -1: system default
+ */
+static ssize_t __disk_events_show(unsigned int events, char *buf)
+{
+ const char *delim = "";
+ ssize_t pos = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
+ if (events & (1 << i)) {
+ pos += sprintf(buf + pos, "%s%s",
+ delim, disk_events_strs[i]);
+ delim = " ";
+ }
+ if (pos)
+ pos += sprintf(buf + pos, "\n");
+ return pos;
+}
+
+static ssize_t disk_events_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
+ return 0;
+ return __disk_events_show(disk->events, buf);
+}
+
+static ssize_t disk_events_async_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+
+static ssize_t disk_events_poll_msecs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (!disk->ev)
+ return sprintf(buf, "-1\n");
+ return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
+}
+
+static ssize_t disk_events_poll_msecs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ long intv;
+
+ if (!count || !sscanf(buf, "%ld", &intv))
+ return -EINVAL;
+
+ if (intv < 0 && intv != -1)
+ return -EINVAL;
+
+ if (!disk->ev)
+ return -ENODEV;
+
+ disk_block_events(disk);
+ disk->ev->poll_msecs = intv;
+ __disk_unblock_events(disk, true);
+ return count;
+}
+
+DEVICE_ATTR(events, 0444, disk_events_show, NULL);
+DEVICE_ATTR(events_async, 0444, disk_events_async_show, NULL);
+DEVICE_ATTR(events_poll_msecs, 0644, disk_events_poll_msecs_show,
+ disk_events_poll_msecs_store);
+
+/*
+ * The default polling interval can be specified by the kernel
+ * parameter block.events_dfl_poll_msecs which defaults to 0
+ * (disable). This can also be modified runtime by writing to
+ * /sys/module/block/parameters/events_dfl_poll_msecs.
+ */
+static int disk_events_set_dfl_poll_msecs(const char *val,
+ const struct kernel_param *kp)
+{
+ struct disk_events *ev;
+ int ret;
+
+ ret = param_set_ulong(val, kp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&disk_events_mutex);
+ list_for_each_entry(ev, &disk_events, node)
+ disk_flush_events(ev->disk, 0);
+ mutex_unlock(&disk_events_mutex);
+ return 0;
+}
+
+static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
+ .set = disk_events_set_dfl_poll_msecs,
+ .get = param_get_ulong,
+};
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "block."
+
+module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
+ &disk_events_dfl_poll_msecs, 0644);
+
+/*
+ * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
+ */
+void disk_alloc_events(struct gendisk *disk)
+{
+ struct disk_events *ev;
+
+ if (!disk->fops->check_events || !disk->events)
+ return;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev) {
+ pr_warn("%s: failed to initialize events\n", disk->disk_name);
+ return;
+ }
+
+ INIT_LIST_HEAD(&ev->node);
+ ev->disk = disk;
+ spin_lock_init(&ev->lock);
+ mutex_init(&ev->block_mutex);
+ ev->block = 1;
+ ev->poll_msecs = -1;
+ INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
+
+ disk->ev = ev;
+}
+
+void disk_add_events(struct gendisk *disk)
+{
+ if (!disk->ev)
+ return;
+
+ mutex_lock(&disk_events_mutex);
+ list_add_tail(&disk->ev->node, &disk_events);
+ mutex_unlock(&disk_events_mutex);
+
+ /*
+ * Block count is initialized to 1 and the following initial
+ * unblock kicks it into action.
+ */
+ __disk_unblock_events(disk, true);
+}
+
+void disk_del_events(struct gendisk *disk)
+{
+ if (disk->ev) {
+ disk_block_events(disk);
+
+ mutex_lock(&disk_events_mutex);
+ list_del_init(&disk->ev->node);
+ mutex_unlock(&disk_events_mutex);
+ }
+}
+
+void disk_release_events(struct gendisk *disk)
+{
+ /* the block count should be 1 from disk_del_events() */
+ WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
+ kfree(disk->ev);
+}
diff --git a/block/elevator.c b/block/elevator.c
index 440699c28119..52ada14cfe45 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -350,9 +350,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
* we can append 'rq' to an existing request, so we can throw 'rq' away
* afterwards.
*
- * Returns true if we merged, false otherwise
+ * Returns true if we merged, false otherwise. 'free' will contain all
+ * requests that need to be freed.
*/
-bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
+bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free)
{
struct request *__rq;
bool ret;
@@ -363,8 +365,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
/*
* First try one-hit cache.
*/
- if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
+ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
+ list_add(&rq->queuelist, free);
return true;
+ }
if (blk_queue_noxmerges(q))
return false;
@@ -378,6 +382,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
break;
+ list_add(&rq->queuelist, free);
/* The merged request could be merged with others, try again */
ret = true;
rq = __rq;
@@ -522,6 +527,10 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e)
{
+ /* insert_requests and dispatch_request are mandatory */
+ if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
+ return -EINVAL;
+
/* create icq_cache if requested */
if (e->icq_size) {
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
@@ -693,7 +702,7 @@ void elevator_init_mq(struct request_queue *q)
elevator_put(e);
}
}
-
+EXPORT_SYMBOL_GPL(elevator_init_mq); /* only for dm-rq */
/*
* switch to new_e io scheduler. be careful not to introduce deadlocks -
diff --git a/block/genhd.c b/block/genhd.c
index 9f8cb7beaad1..79aa40b4c39c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -33,13 +33,6 @@ static struct kobject *block_depr;
#define NR_EXT_DEVT (1 << MINORBITS)
static DEFINE_IDA(ext_devt_ida);
-static void disk_check_events(struct disk_events *ev,
- unsigned int *clearing_ptr);
-static void disk_alloc_events(struct gendisk *disk);
-static void disk_add_events(struct gendisk *disk);
-static void disk_del_events(struct gendisk *disk);
-static void disk_release_events(struct gendisk *disk);
-
void set_capacity(struct gendisk *disk, sector_t sectors)
{
struct block_device *bdev = disk->part0;
@@ -333,52 +326,22 @@ static int blk_mangle_minor(int minor)
return minor;
}
-/**
- * blk_alloc_devt - allocate a dev_t for a block device
- * @bdev: block device to allocate dev_t for
- * @devt: out parameter for resulting dev_t
- *
- * Allocate a dev_t for block device.
- *
- * RETURNS:
- * 0 on success, allocated dev_t is returned in *@devt. -errno on
- * failure.
- *
- * CONTEXT:
- * Might sleep.
- */
-int blk_alloc_devt(struct block_device *bdev, dev_t *devt)
+int blk_alloc_ext_minor(void)
{
- struct gendisk *disk = bdev->bd_disk;
int idx;
- /* in consecutive minor range? */
- if (bdev->bd_partno < disk->minors) {
- *devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
- return 0;
- }
-
idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
- if (idx < 0)
- return idx == -ENOSPC ? -EBUSY : idx;
-
- *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
- return 0;
+ if (idx < 0) {
+ if (idx == -ENOSPC)
+ return -EBUSY;
+ return idx;
+ }
+ return blk_mangle_minor(idx);
}
-/**
- * blk_free_devt - free a dev_t
- * @devt: dev_t to free
- *
- * Free @devt which was allocated using blk_alloc_devt().
- *
- * CONTEXT:
- * Might sleep.
- */
-void blk_free_devt(dev_t devt)
+void blk_free_ext_minor(unsigned int minor)
{
- if (MAJOR(devt) == BLOCK_EXT_MAJOR)
- ida_free(&ext_devt_ida, blk_mangle_minor(MINOR(devt)));
+ ida_free(&ext_devt_ida, blk_mangle_minor(minor));
}
static char *bdevt_str(dev_t devt, char *buf)
@@ -499,8 +462,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups,
bool register_queue)
{
- dev_t devt;
- int retval;
+ int ret;
/*
* The disk queue should now be all set with enough information about
@@ -511,23 +473,35 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
if (register_queue)
elevator_init_mq(disk->queue);
- /* minors == 0 indicates to use ext devt from part0 and should
- * be accompanied with EXT_DEVT flag. Make sure all
- * parameters make sense.
+ /*
+ * If the driver provides an explicit major number it also must provide
+ * the number of minors numbers supported, and those will be used to
+ * setup the gendisk.
+ * Otherwise just allocate the device numbers for both the whole device
+ * and all partitions from the extended dev_t space.
*/
- WARN_ON(disk->minors && !(disk->major || disk->first_minor));
- WARN_ON(!disk->minors &&
- !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN)));
+ if (disk->major) {
+ WARN_ON(!disk->minors);
- disk->flags |= GENHD_FL_UP;
+ if (disk->minors > DISK_MAX_PARTS) {
+ pr_err("block: can't allocate more than %d partitions\n",
+ DISK_MAX_PARTS);
+ disk->minors = DISK_MAX_PARTS;
+ }
+ } else {
+ WARN_ON(disk->minors);
- retval = blk_alloc_devt(disk->part0, &devt);
- if (retval) {
- WARN_ON(1);
- return;
+ ret = blk_alloc_ext_minor();
+ if (ret < 0) {
+ WARN_ON(1);
+ return;
+ }
+ disk->major = BLOCK_EXT_MAJOR;
+ disk->first_minor = MINOR(ret);
+ disk->flags |= GENHD_FL_EXT_DEVT;
}
- disk->major = MAJOR(devt);
- disk->first_minor = MINOR(devt);
+
+ disk->flags |= GENHD_FL_UP;
disk_alloc_events(disk);
@@ -541,14 +515,14 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
} else {
struct backing_dev_info *bdi = disk->queue->backing_dev_info;
struct device *dev = disk_to_dev(disk);
- int ret;
/* Register BDI before referencing it from bdev */
- dev->devt = devt;
- ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt));
+ dev->devt = MKDEV(disk->major, disk->first_minor);
+ ret = bdi_register(bdi, "%u:%u",
+ disk->major, disk->first_minor);
WARN_ON(ret);
bdi_set_owner(bdi, dev);
- bdev_add(disk->part0, devt);
+ bdev_add(disk->part0, dev->devt);
}
register_disk(parent, disk, groups);
if (register_queue)
@@ -558,7 +532,10 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
* Take an extra ref on queue which will be put on disk_release()
* so that it sticks around as long as @disk is there.
*/
- WARN_ON_ONCE(!blk_get_queue(disk->queue));
+ if (blk_get_queue(disk->queue))
+ set_bit(GD_QUEUE_REF, &disk->state);
+ else
+ WARN_ON_ONCE(1);
disk_add_events(disk);
blk_integrity_add(disk);
@@ -607,10 +584,10 @@ void del_gendisk(struct gendisk *disk)
blk_integrity_del(disk);
disk_del_events(disk);
- mutex_lock(&disk->part0->bd_mutex);
+ mutex_lock(&disk->open_mutex);
disk->flags &= ~GENHD_FL_UP;
blk_drop_partitions(disk);
- mutex_unlock(&disk->part0->bd_mutex);
+ mutex_unlock(&disk->open_mutex);
fsync_bdev(disk->part0);
__invalidate_device(disk->part0, true);
@@ -692,32 +669,6 @@ void blk_request_module(dev_t devt)
request_module("block-major-%d", MAJOR(devt));
}
-/**
- * bdget_disk - do bdget() by gendisk and partition number
- * @disk: gendisk of interest
- * @partno: partition number
- *
- * Find partition @partno from @disk, do bdget() on it.
- *
- * CONTEXT:
- * Don't care.
- *
- * RETURNS:
- * Resulting block_device on success, NULL on failure.
- */
-struct block_device *bdget_disk(struct gendisk *disk, int partno)
-{
- struct block_device *bdev = NULL;
-
- rcu_read_lock();
- bdev = xa_load(&disk->part_tbl, partno);
- if (bdev && !bdgrab(bdev))
- bdev = NULL;
- rcu_read_unlock();
-
- return bdev;
-}
-
/*
* print a full list of all partitions - intended for places where the root
* filesystem can't be mounted and thus to give the victim some idea of what
@@ -1071,6 +1022,9 @@ static struct attribute *disk_attrs[] = {
&dev_attr_stat.attr,
&dev_attr_inflight.attr,
&dev_attr_badblocks.attr,
+ &dev_attr_events.attr,
+ &dev_attr_events_async.attr,
+ &dev_attr_events_poll_msecs.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif
@@ -1120,12 +1074,13 @@ static void disk_release(struct device *dev)
might_sleep();
- blk_free_devt(dev->devt);
+ if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
+ blk_free_ext_minor(MINOR(dev->devt));
disk_release_events(disk);
kfree(disk->random);
xa_destroy(&disk->part_tbl);
bdput(disk->part0);
- if (disk->queue)
+ if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue)
blk_put_queue(disk->queue);
kfree(disk);
}
@@ -1242,6 +1197,20 @@ static int __init proc_genhd_init(void)
module_init(proc_genhd_init);
#endif /* CONFIG_PROC_FS */
+dev_t part_devt(struct gendisk *disk, u8 partno)
+{
+ struct block_device *part;
+ dev_t devt = 0;
+
+ rcu_read_lock();
+ part = xa_load(&disk->part_tbl, partno);
+ if (part)
+ devt = part->bd_dev;
+ rcu_read_unlock();
+
+ return devt;
+}
+
dev_t blk_lookup_devt(const char *name, int partno)
{
dev_t devt = MKDEV(0, 0);
@@ -1251,7 +1220,6 @@ dev_t blk_lookup_devt(const char *name, int partno)
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
while ((dev = class_dev_iter_next(&iter))) {
struct gendisk *disk = dev_to_disk(dev);
- struct block_device *part;
if (strcmp(dev_name(dev), name))
continue;
@@ -1262,13 +1230,10 @@ dev_t blk_lookup_devt(const char *name, int partno)
*/
devt = MKDEV(MAJOR(dev->devt),
MINOR(dev->devt) + partno);
- break;
- }
- part = bdget_disk(disk, partno);
- if (part) {
- devt = part->bd_dev;
- bdput(part);
- break;
+ } else {
+ devt = part_devt(disk, partno);
+ if (devt)
+ break;
}
}
class_dev_iter_exit(&iter);
@@ -1279,13 +1244,6 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
- if (minors > DISK_MAX_PARTS) {
- printk(KERN_ERR
- "block: can't allocate more than %d partitions\n",
- DISK_MAX_PARTS);
- minors = DISK_MAX_PARTS;
- }
-
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (!disk)
return NULL;
@@ -1295,6 +1253,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
goto out_free_disk;
disk->node_id = node_id;
+ mutex_init(&disk->open_mutex);
xa_init(&disk->part_tbl);
if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
goto out_destroy_part_tbl;
@@ -1315,6 +1274,25 @@ out_free_disk:
}
EXPORT_SYMBOL(__alloc_disk_node);
+struct gendisk *__blk_alloc_disk(int node)
+{
+ struct request_queue *q;
+ struct gendisk *disk;
+
+ q = blk_alloc_queue(node);
+ if (!q)
+ return NULL;
+
+ disk = __alloc_disk_node(0, node);
+ if (!disk) {
+ blk_cleanup_queue(q);
+ return NULL;
+ }
+ disk->queue = q;
+ return disk;
+}
+EXPORT_SYMBOL(__blk_alloc_disk);
+
/**
* put_disk - decrements the gendisk refcount
* @disk: the struct gendisk to decrement the refcount for
@@ -1332,6 +1310,22 @@ void put_disk(struct gendisk *disk)
}
EXPORT_SYMBOL(put_disk);
+/**
+ * blk_cleanup_disk - shutdown a gendisk allocated by blk_alloc_disk
+ * @disk: gendisk to shutdown
+ *
+ * Mark the queue hanging off @disk DYING, drain all pending requests, then mark
+ * the queue DEAD, destroy and put it and the gendisk structure.
+ *
+ * Context: can sleep
+ */
+void blk_cleanup_disk(struct gendisk *disk)
+{
+ blk_cleanup_queue(disk->queue);
+ put_disk(disk);
+}
+EXPORT_SYMBOL(blk_cleanup_disk);
+
static void set_disk_ro_uevent(struct gendisk *gd, int ro)
{
char event[] = "DISK_RO=1";
@@ -1369,488 +1363,3 @@ int bdev_read_only(struct block_device *bdev)
return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
}
EXPORT_SYMBOL(bdev_read_only);
-
-/*
- * Disk events - monitor disk events like media change and eject request.
- */
-struct disk_events {
- struct list_head node; /* all disk_event's */
- struct gendisk *disk; /* the associated disk */
- spinlock_t lock;
-
- struct mutex block_mutex; /* protects blocking */
- int block; /* event blocking depth */
- unsigned int pending; /* events already sent out */
- unsigned int clearing; /* events being cleared */
-
- long poll_msecs; /* interval, -1 for default */
- struct delayed_work dwork;
-};
-
-static const char *disk_events_strs[] = {
- [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
- [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
-};
-
-static char *disk_uevents[] = {
- [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
- [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
-};
-
-/* list of all disk_events */
-static DEFINE_MUTEX(disk_events_mutex);
-static LIST_HEAD(disk_events);
-
-/* disable in-kernel polling by default */
-static unsigned long disk_events_dfl_poll_msecs;
-
-static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
-{
- struct disk_events *ev = disk->ev;
- long intv_msecs = 0;
-
- /*
- * If device-specific poll interval is set, always use it. If
- * the default is being used, poll if the POLL flag is set.
- */
- if (ev->poll_msecs >= 0)
- intv_msecs = ev->poll_msecs;
- else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
- intv_msecs = disk_events_dfl_poll_msecs;
-
- return msecs_to_jiffies(intv_msecs);
-}
-
-/**
- * disk_block_events - block and flush disk event checking
- * @disk: disk to block events for
- *
- * On return from this function, it is guaranteed that event checking
- * isn't in progress and won't happen until unblocked by
- * disk_unblock_events(). Events blocking is counted and the actual
- * unblocking happens after the matching number of unblocks are done.
- *
- * Note that this intentionally does not block event checking from
- * disk_clear_events().
- *
- * CONTEXT:
- * Might sleep.
- */
-void disk_block_events(struct gendisk *disk)
-{
- struct disk_events *ev = disk->ev;
- unsigned long flags;
- bool cancel;
-
- if (!ev)
- return;
-
- /*
- * Outer mutex ensures that the first blocker completes canceling
- * the event work before further blockers are allowed to finish.
- */
- mutex_lock(&ev->block_mutex);
-
- spin_lock_irqsave(&ev->lock, flags);
- cancel = !ev->block++;
- spin_unlock_irqrestore(&ev->lock, flags);
-
- if (cancel)
- cancel_delayed_work_sync(&disk->ev->dwork);
-
- mutex_unlock(&ev->block_mutex);
-}
-
-static void __disk_unblock_events(struct gendisk *disk, bool check_now)
-{
- struct disk_events *ev = disk->ev;
- unsigned long intv;
- unsigned long flags;
-
- spin_lock_irqsave(&ev->lock, flags);
-
- if (WARN_ON_ONCE(ev->block <= 0))
- goto out_unlock;
-
- if (--ev->block)
- goto out_unlock;
-
- intv = disk_events_poll_jiffies(disk);
- if (check_now)
- queue_delayed_work(system_freezable_power_efficient_wq,
- &ev->dwork, 0);
- else if (intv)
- queue_delayed_work(system_freezable_power_efficient_wq,
- &ev->dwork, intv);
-out_unlock:
- spin_unlock_irqrestore(&ev->lock, flags);
-}
-
-/**
- * disk_unblock_events - unblock disk event checking
- * @disk: disk to unblock events for
- *
- * Undo disk_block_events(). When the block count reaches zero, it
- * starts events polling if configured.
- *
- * CONTEXT:
- * Don't care. Safe to call from irq context.
- */
-void disk_unblock_events(struct gendisk *disk)
-{
- if (disk->ev)
- __disk_unblock_events(disk, false);
-}
-
-/**
- * disk_flush_events - schedule immediate event checking and flushing
- * @disk: disk to check and flush events for
- * @mask: events to flush
- *
- * Schedule immediate event checking on @disk if not blocked. Events in
- * @mask are scheduled to be cleared from the driver. Note that this
- * doesn't clear the events from @disk->ev.
- *
- * CONTEXT:
- * If @mask is non-zero must be called with bdev->bd_mutex held.
- */
-void disk_flush_events(struct gendisk *disk, unsigned int mask)
-{
- struct disk_events *ev = disk->ev;
-
- if (!ev)
- return;
-
- spin_lock_irq(&ev->lock);
- ev->clearing |= mask;
- if (!ev->block)
- mod_delayed_work(system_freezable_power_efficient_wq,
- &ev->dwork, 0);
- spin_unlock_irq(&ev->lock);
-}
-
-/**
- * disk_clear_events - synchronously check, clear and return pending events
- * @disk: disk to fetch and clear events from
- * @mask: mask of events to be fetched and cleared
- *
- * Disk events are synchronously checked and pending events in @mask
- * are cleared and returned. This ignores the block count.
- *
- * CONTEXT:
- * Might sleep.
- */
-static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
-{
- struct disk_events *ev = disk->ev;
- unsigned int pending;
- unsigned int clearing = mask;
-
- if (!ev)
- return 0;
-
- disk_block_events(disk);
-
- /*
- * store the union of mask and ev->clearing on the stack so that the
- * race with disk_flush_events does not cause ambiguity (ev->clearing
- * can still be modified even if events are blocked).
- */
- spin_lock_irq(&ev->lock);
- clearing |= ev->clearing;
- ev->clearing = 0;
- spin_unlock_irq(&ev->lock);
-
- disk_check_events(ev, &clearing);
- /*
- * if ev->clearing is not 0, the disk_flush_events got called in the
- * middle of this function, so we want to run the workfn without delay.
- */
- __disk_unblock_events(disk, ev->clearing ? true : false);
-
- /* then, fetch and clear pending events */
- spin_lock_irq(&ev->lock);
- pending = ev->pending & mask;
- ev->pending &= ~mask;
- spin_unlock_irq(&ev->lock);
- WARN_ON_ONCE(clearing & mask);
-
- return pending;
-}
-
-/**
- * bdev_check_media_change - check if a removable media has been changed
- * @bdev: block device to check
- *
- * Check whether a removable media has been changed, and attempt to free all
- * dentries and inodes and invalidates all block device page cache entries in
- * that case.
- *
- * Returns %true if the block device changed, or %false if not.
- */
-bool bdev_check_media_change(struct block_device *bdev)
-{
- unsigned int events;
-
- events = disk_clear_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE |
- DISK_EVENT_EJECT_REQUEST);
- if (!(events & DISK_EVENT_MEDIA_CHANGE))
- return false;
-
- if (__invalidate_device(bdev, true))
- pr_warn("VFS: busy inodes on changed media %s\n",
- bdev->bd_disk->disk_name);
- set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
- return true;
-}
-EXPORT_SYMBOL(bdev_check_media_change);
-
-/*
- * Separate this part out so that a different pointer for clearing_ptr can be
- * passed in for disk_clear_events.
- */
-static void disk_events_workfn(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
-
- disk_check_events(ev, &ev->clearing);
-}
-
-static void disk_check_events(struct disk_events *ev,
- unsigned int *clearing_ptr)
-{
- struct gendisk *disk = ev->disk;
- char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
- unsigned int clearing = *clearing_ptr;
- unsigned int events;
- unsigned long intv;
- int nr_events = 0, i;
-
- /* check events */
- events = disk->fops->check_events(disk, clearing);
-
- /* accumulate pending events and schedule next poll if necessary */
- spin_lock_irq(&ev->lock);
-
- events &= ~ev->pending;
- ev->pending |= events;
- *clearing_ptr &= ~clearing;
-
- intv = disk_events_poll_jiffies(disk);
- if (!ev->block && intv)
- queue_delayed_work(system_freezable_power_efficient_wq,
- &ev->dwork, intv);
-
- spin_unlock_irq(&ev->lock);
-
- /*
- * Tell userland about new events. Only the events listed in
- * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
- * is set. Otherwise, events are processed internally but never
- * get reported to userland.
- */
- for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if ((events & disk->events & (1 << i)) &&
- (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
- envp[nr_events++] = disk_uevents[i];
-
- if (nr_events)
- kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
-}
-
-/*
- * A disk events enabled device has the following sysfs nodes under
- * its /sys/block/X/ directory.
- *
- * events : list of all supported events
- * events_async : list of events which can be detected w/o polling
- * (always empty, only for backwards compatibility)
- * events_poll_msecs : polling interval, 0: disable, -1: system default
- */
-static ssize_t __disk_events_show(unsigned int events, char *buf)
-{
- const char *delim = "";
- ssize_t pos = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
- if (events & (1 << i)) {
- pos += sprintf(buf + pos, "%s%s",
- delim, disk_events_strs[i]);
- delim = " ";
- }
- if (pos)
- pos += sprintf(buf + pos, "\n");
- return pos;
-}
-
-static ssize_t disk_events_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gendisk *disk = dev_to_disk(dev);
-
- if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
- return 0;
-
- return __disk_events_show(disk->events, buf);
-}
-
-static ssize_t disk_events_async_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return 0;
-}
-
-static ssize_t disk_events_poll_msecs_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gendisk *disk = dev_to_disk(dev);
-
- if (!disk->ev)
- return sprintf(buf, "-1\n");
-
- return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
-}
-
-static ssize_t disk_events_poll_msecs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gendisk *disk = dev_to_disk(dev);
- long intv;
-
- if (!count || !sscanf(buf, "%ld", &intv))
- return -EINVAL;
-
- if (intv < 0 && intv != -1)
- return -EINVAL;
-
- if (!disk->ev)
- return -ENODEV;
-
- disk_block_events(disk);
- disk->ev->poll_msecs = intv;
- __disk_unblock_events(disk, true);
-
- return count;
-}
-
-static const DEVICE_ATTR(events, 0444, disk_events_show, NULL);
-static const DEVICE_ATTR(events_async, 0444, disk_events_async_show, NULL);
-static const DEVICE_ATTR(events_poll_msecs, 0644,
- disk_events_poll_msecs_show,
- disk_events_poll_msecs_store);
-
-static const struct attribute *disk_events_attrs[] = {
- &dev_attr_events.attr,
- &dev_attr_events_async.attr,
- &dev_attr_events_poll_msecs.attr,
- NULL,
-};
-
-/*
- * The default polling interval can be specified by the kernel
- * parameter block.events_dfl_poll_msecs which defaults to 0
- * (disable). This can also be modified runtime by writing to
- * /sys/module/block/parameters/events_dfl_poll_msecs.
- */
-static int disk_events_set_dfl_poll_msecs(const char *val,
- const struct kernel_param *kp)
-{
- struct disk_events *ev;
- int ret;
-
- ret = param_set_ulong(val, kp);
- if (ret < 0)
- return ret;
-
- mutex_lock(&disk_events_mutex);
-
- list_for_each_entry(ev, &disk_events, node)
- disk_flush_events(ev->disk, 0);
-
- mutex_unlock(&disk_events_mutex);
-
- return 0;
-}
-
-static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
- .set = disk_events_set_dfl_poll_msecs,
- .get = param_get_ulong,
-};
-
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "block."
-
-module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
- &disk_events_dfl_poll_msecs, 0644);
-
-/*
- * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
- */
-static void disk_alloc_events(struct gendisk *disk)
-{
- struct disk_events *ev;
-
- if (!disk->fops->check_events || !disk->events)
- return;
-
- ev = kzalloc(sizeof(*ev), GFP_KERNEL);
- if (!ev) {
- pr_warn("%s: failed to initialize events\n", disk->disk_name);
- return;
- }
-
- INIT_LIST_HEAD(&ev->node);
- ev->disk = disk;
- spin_lock_init(&ev->lock);
- mutex_init(&ev->block_mutex);
- ev->block = 1;
- ev->poll_msecs = -1;
- INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
-
- disk->ev = ev;
-}
-
-static void disk_add_events(struct gendisk *disk)
-{
- /* FIXME: error handling */
- if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
- pr_warn("%s: failed to create sysfs files for events\n",
- disk->disk_name);
-
- if (!disk->ev)
- return;
-
- mutex_lock(&disk_events_mutex);
- list_add_tail(&disk->ev->node, &disk_events);
- mutex_unlock(&disk_events_mutex);
-
- /*
- * Block count is initialized to 1 and the following initial
- * unblock kicks it into action.
- */
- __disk_unblock_events(disk, true);
-}
-
-static void disk_del_events(struct gendisk *disk)
-{
- if (disk->ev) {
- disk_block_events(disk);
-
- mutex_lock(&disk_events_mutex);
- list_del_init(&disk->ev->node);
- mutex_unlock(&disk_events_mutex);
- }
-
- sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
-}
-
-static void disk_release_events(struct gendisk *disk)
-{
- /* the block count should be 1 from disk_del_events() */
- WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
- kfree(disk->ev);
-}
diff --git a/block/ioctl.c b/block/ioctl.c
index 8ba1ed8defd0..24beec9ca9c9 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -89,7 +89,7 @@ static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (bdev->bd_part_count)
+ if (bdev->bd_disk->open_partitions)
return -EBUSY;
/*
diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c
new file mode 100644
index 000000000000..3b4bfddec39f
--- /dev/null
+++ b/block/mq-deadline-cgroup.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blk-cgroup.h>
+#include <linux/ioprio.h>
+
+#include "mq-deadline-cgroup.h"
+
+static struct blkcg_policy dd_blkcg_policy;
+
+static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp)
+{
+ struct dd_blkcg *pd;
+
+ pd = kzalloc(sizeof(*pd), gfp);
+ if (!pd)
+ return NULL;
+ pd->stats = alloc_percpu_gfp(typeof(*pd->stats),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!pd->stats) {
+ kfree(pd);
+ return NULL;
+ }
+ return &pd->cpd;
+}
+
+static void dd_cpd_free(struct blkcg_policy_data *cpd)
+{
+ struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd);
+
+ free_percpu(dd_blkcg->stats);
+ kfree(dd_blkcg);
+}
+
+static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd)
+{
+ return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy),
+ struct dd_blkcg, cpd);
+}
+
+/*
+ * Convert an association between a block cgroup and a request queue into a
+ * pointer to the mq-deadline information associated with a (blkcg, queue) pair.
+ */
+struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
+{
+ struct blkg_policy_data *pd;
+
+ pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy);
+ if (!pd)
+ return NULL;
+
+ return dd_blkcg_from_pd(pd);
+}
+
+static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
+{
+ static const char *const prio_class_name[] = {
+ [IOPRIO_CLASS_NONE] = "NONE",
+ [IOPRIO_CLASS_RT] = "RT",
+ [IOPRIO_CLASS_BE] = "BE",
+ [IOPRIO_CLASS_IDLE] = "IDLE",
+ };
+ struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
+ int res = 0;
+ u8 prio;
+
+ for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
+ res += scnprintf(buf + res, size - res,
+ " [%s] dispatched=%u inserted=%u merged=%u",
+ prio_class_name[prio],
+ ddcg_sum(blkcg, dispatched, prio) +
+ ddcg_sum(blkcg, merged, prio) -
+ ddcg_sum(blkcg, completed, prio),
+ ddcg_sum(blkcg, inserted, prio) -
+ ddcg_sum(blkcg, completed, prio),
+ ddcg_sum(blkcg, merged, prio));
+
+ return res;
+}
+
+static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
+ struct blkcg *blkcg)
+{
+ struct dd_blkg *pd;
+
+ pd = kzalloc(sizeof(*pd), gfp);
+ if (!pd)
+ return NULL;
+ return &pd->pd;
+}
+
+static void dd_pd_free(struct blkg_policy_data *pd)
+{
+ struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd);
+
+ kfree(dd_blkg);
+}
+
+static struct blkcg_policy dd_blkcg_policy = {
+ .cpd_alloc_fn = dd_cpd_alloc,
+ .cpd_free_fn = dd_cpd_free,
+
+ .pd_alloc_fn = dd_pd_alloc,
+ .pd_free_fn = dd_pd_free,
+ .pd_stat_fn = dd_pd_stat,
+};
+
+int dd_activate_policy(struct request_queue *q)
+{
+ return blkcg_activate_policy(q, &dd_blkcg_policy);
+}
+
+void dd_deactivate_policy(struct request_queue *q)
+{
+ blkcg_deactivate_policy(q, &dd_blkcg_policy);
+}
+
+int __init dd_blkcg_init(void)
+{
+ return blkcg_policy_register(&dd_blkcg_policy);
+}
+
+void __exit dd_blkcg_exit(void)
+{
+ blkcg_policy_unregister(&dd_blkcg_policy);
+}
diff --git a/block/mq-deadline-cgroup.h b/block/mq-deadline-cgroup.h
new file mode 100644
index 000000000000..0143fd74f3ce
--- /dev/null
+++ b/block/mq-deadline-cgroup.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_MQ_DEADLINE_CGROUP_H_)
+#define _MQ_DEADLINE_CGROUP_H_
+
+#include <linux/blk-cgroup.h>
+
+struct request_queue;
+
+/**
+ * struct io_stats_per_prio - I/O statistics per I/O priority class.
+ * @inserted: Number of inserted requests.
+ * @merged: Number of merged requests.
+ * @dispatched: Number of dispatched requests.
+ * @completed: Number of I/O completions.
+ */
+struct io_stats_per_prio {
+ local_t inserted;
+ local_t merged;
+ local_t dispatched;
+ local_t completed;
+};
+
+/* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */
+struct blkcg_io_stats {
+ struct io_stats_per_prio stats[4];
+};
+
+/**
+ * struct dd_blkcg - Per cgroup data.
+ * @cpd: blkcg_policy_data structure.
+ * @stats: I/O statistics.
+ */
+struct dd_blkcg {
+ struct blkcg_policy_data cpd; /* must be the first member */
+ struct blkcg_io_stats __percpu *stats;
+};
+
+/*
+ * Count one event of type 'event_type' and with I/O priority class
+ * 'prio_class'.
+ */
+#define ddcg_count(ddcg, event_type, prio_class) do { \
+if (ddcg) { \
+ struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \
+ \
+ BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \
+ BUILD_BUG_ON(!__same_type((prio_class), u8)); \
+ local_inc(&io_stats->stats[(prio_class)].event_type); \
+ put_cpu_ptr(io_stats); \
+} \
+} while (0)
+
+/*
+ * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls
+ * across all CPUs. No locking or barriers since it is fine if the returned
+ * sum is slightly outdated.
+ */
+#define ddcg_sum(ddcg, event_type, prio) ({ \
+ unsigned int cpu; \
+ u32 sum = 0; \
+ \
+ BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \
+ BUILD_BUG_ON(!__same_type((prio), u8)); \
+ for_each_present_cpu(cpu) \
+ sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \
+ stats[(prio)].event_type); \
+ sum; \
+})
+
+#ifdef CONFIG_BLK_CGROUP
+
+/**
+ * struct dd_blkg - Per (cgroup, request queue) data.
+ * @pd: blkg_policy_data structure.
+ */
+struct dd_blkg {
+ struct blkg_policy_data pd; /* must be the first member */
+};
+
+struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio);
+int dd_activate_policy(struct request_queue *q);
+void dd_deactivate_policy(struct request_queue *q);
+int __init dd_blkcg_init(void);
+void __exit dd_blkcg_exit(void);
+
+#else /* CONFIG_BLK_CGROUP */
+
+static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
+{
+ return NULL;
+}
+
+static inline int dd_activate_policy(struct request_queue *q)
+{
+ return 0;
+}
+
+static inline void dd_deactivate_policy(struct request_queue *q)
+{
+}
+
+static inline int dd_blkcg_init(void)
+{
+ return 0;
+}
+
+static inline void dd_blkcg_exit(void)
+{
+}
+
+#endif /* CONFIG_BLK_CGROUP */
+
+#endif /* _MQ_DEADLINE_CGROUP_H_ */
diff --git a/block/mq-deadline-main.c b/block/mq-deadline-main.c
new file mode 100644
index 000000000000..6f612e6dc82b
--- /dev/null
+++ b/block/mq-deadline-main.c
@@ -0,0 +1,1175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
+ * for the blk-mq scheduling framework
+ *
+ * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <linux/sbitmap.h>
+
+#include <trace/events/block.h>
+
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
+#include "blk-mq-tag.h"
+#include "blk-mq-sched.h"
+#include "mq-deadline-cgroup.h"
+
+/*
+ * See Documentation/block/deadline-iosched.rst
+ */
+static const int read_expire = HZ / 2; /* max time before a read is submitted. */
+static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
+/*
+ * Time after which to dispatch lower priority requests even if higher
+ * priority requests are pending.
+ */
+static const int aging_expire = 10 * HZ;
+static const int writes_starved = 2; /* max times reads can starve a write */
+static const int fifo_batch = 16; /* # of sequential requests treated as one
+ by the above parameters. For throughput. */
+
+enum dd_data_dir {
+ DD_READ = READ,
+ DD_WRITE = WRITE,
+};
+
+enum { DD_DIR_COUNT = 2 };
+
+enum dd_prio {
+ DD_RT_PRIO = 0,
+ DD_BE_PRIO = 1,
+ DD_IDLE_PRIO = 2,
+ DD_PRIO_MAX = 2,
+};
+
+enum { DD_PRIO_COUNT = 3 };
+
+/* I/O statistics for all I/O priorities (enum dd_prio). */
+struct io_stats {
+ struct io_stats_per_prio stats[DD_PRIO_COUNT];
+};
+
+/*
+ * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
+ * present on both sort_list[] and fifo_list[].
+ */
+struct dd_per_prio {
+ struct list_head dispatch;
+ struct rb_root sort_list[DD_DIR_COUNT];
+ struct list_head fifo_list[DD_DIR_COUNT];
+ /* Next request in FIFO order. Read, write or both are NULL. */
+ struct request *next_rq[DD_DIR_COUNT];
+};
+
+struct deadline_data {
+ /*
+ * run time data
+ */
+
+ /* Request queue that owns this data structure. */
+ struct request_queue *queue;
+
+ struct dd_per_prio per_prio[DD_PRIO_COUNT];
+
+ /* Data direction of latest dispatched request. */
+ enum dd_data_dir last_dir;
+ unsigned int batching; /* number of sequential requests made */
+ unsigned int starved; /* times reads have starved writes */
+
+ struct io_stats __percpu *stats;
+
+ /*
+ * settings that change how the i/o scheduler behaves
+ */
+ int fifo_expire[DD_DIR_COUNT];
+ int fifo_batch;
+ int writes_starved;
+ int front_merges;
+ u32 async_depth;
+ int aging_expire;
+
+ spinlock_t lock;
+ spinlock_t zone_lock;
+};
+
+/* Count one event of type 'event_type' and with I/O priority 'prio' */
+#define dd_count(dd, event_type, prio) do { \
+ struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
+ \
+ BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
+ BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
+ local_inc(&io_stats->stats[(prio)].event_type); \
+ put_cpu_ptr(io_stats); \
+} while (0)
+
+/*
+ * Returns the total number of dd_count(dd, event_type, prio) calls across all
+ * CPUs. No locking or barriers since it is fine if the returned sum is slightly
+ * outdated.
+ */
+#define dd_sum(dd, event_type, prio) ({ \
+ unsigned int cpu; \
+ u32 sum = 0; \
+ \
+ BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
+ BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
+ for_each_present_cpu(cpu) \
+ sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
+ stats[(prio)].event_type); \
+ sum; \
+})
+
+/* Maps an I/O priority class to a deadline scheduler priority. */
+static const enum dd_prio ioprio_class_to_prio[] = {
+ [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
+ [IOPRIO_CLASS_RT] = DD_RT_PRIO,
+ [IOPRIO_CLASS_BE] = DD_BE_PRIO,
+ [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
+};
+
+static inline struct rb_root *
+deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
+{
+ return &per_prio->sort_list[rq_data_dir(rq)];
+}
+
+/*
+ * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
+ * request.
+ */
+static u8 dd_rq_ioclass(struct request *rq)
+{
+ return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+}
+
+/*
+ * get the request after `rq' in sector-sorted order
+ */
+static inline struct request *
+deadline_latter_request(struct request *rq)
+{
+ struct rb_node *node = rb_next(&rq->rb_node);
+
+ if (node)
+ return rb_entry_rq(node);
+
+ return NULL;
+}
+
+static void
+deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
+{
+ struct rb_root *root = deadline_rb_root(per_prio, rq);
+
+ elv_rb_add(root, rq);
+}
+
+static inline void
+deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
+{
+ const enum dd_data_dir data_dir = rq_data_dir(rq);
+
+ if (per_prio->next_rq[data_dir] == rq)
+ per_prio->next_rq[data_dir] = deadline_latter_request(rq);
+
+ elv_rb_del(deadline_rb_root(per_prio, rq), rq);
+}
+
+/*
+ * remove rq from rbtree and fifo.
+ */
+static void deadline_remove_request(struct request_queue *q,
+ struct dd_per_prio *per_prio,
+ struct request *rq)
+{
+ list_del_init(&rq->queuelist);
+
+ /*
+ * We might not be on the rbtree, if we are doing an insert merge
+ */
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+ deadline_del_rq_rb(per_prio, rq);
+
+ elv_rqhash_del(q, rq);
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
+}
+
+static void dd_request_merged(struct request_queue *q, struct request *req,
+ enum elv_merge type)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ const u8 ioprio_class = dd_rq_ioclass(req);
+ const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+ struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+ /*
+ * if the merge was a front merge, we need to reposition request
+ */
+ if (type == ELEVATOR_FRONT_MERGE) {
+ elv_rb_del(deadline_rb_root(per_prio, req), req);
+ deadline_add_rq_rb(per_prio, req);
+ }
+}
+
+/*
+ * Callback function that is invoked after @next has been merged into @req.
+ */
+static void dd_merged_requests(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ const u8 ioprio_class = dd_rq_ioclass(next);
+ const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+ struct dd_blkcg *blkcg = next->elv.priv[0];
+
+ dd_count(dd, merged, prio);
+ ddcg_count(blkcg, merged, ioprio_class);
+
+ /*
+ * if next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo
+ */
+ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before((unsigned long)next->fifo_time,
+ (unsigned long)req->fifo_time)) {
+ list_move(&req->queuelist, &next->queuelist);
+ req->fifo_time = next->fifo_time;
+ }
+ }
+
+ /*
+ * kill knowledge of next, this one is a goner
+ */
+ deadline_remove_request(q, &dd->per_prio[prio], next);
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void
+deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+ struct request *rq)
+{
+ const enum dd_data_dir data_dir = rq_data_dir(rq);
+
+ per_prio->next_rq[data_dir] = deadline_latter_request(rq);
+
+ /*
+ * take it off the sort and fifo list
+ */
+ deadline_remove_request(rq->q, per_prio, rq);
+}
+
+/* Number of requests queued for a given priority level. */
+static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
+{
+ return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
+}
+
+/*
+ * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
+ * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
+ */
+static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
+ enum dd_data_dir data_dir)
+{
+ struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
+
+ /*
+ * rq is expired!
+ */
+ if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * For the specified data direction, return the next request to
+ * dispatch using arrival ordered lists.
+ */
+static struct request *
+deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+ enum dd_data_dir data_dir)
+{
+ struct request *rq;
+ unsigned long flags;
+
+ if (list_empty(&per_prio->fifo_list[data_dir]))
+ return NULL;
+
+ rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
+ if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ goto out;
+ }
+ rq = NULL;
+out:
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+ return rq;
+}
+
+/*
+ * For the specified data direction, return the next request to
+ * dispatch using sector position sorted lists.
+ */
+static struct request *
+deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+ enum dd_data_dir data_dir)
+{
+ struct request *rq;
+ unsigned long flags;
+
+ rq = per_prio->next_rq[data_dir];
+ if (!rq)
+ return NULL;
+
+ if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ while (rq) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ break;
+ rq = deadline_latter_request(rq);
+ }
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+ return rq;
+}
+
+/*
+ * deadline_dispatch_requests selects the best request according to
+ * read/write expire, fifo_batch, etc and with a start time <= @latest.
+ */
+static struct request *__dd_dispatch_request(struct deadline_data *dd,
+ struct dd_per_prio *per_prio,
+ u64 latest_start_ns)
+{
+ struct request *rq, *next_rq;
+ enum dd_data_dir data_dir;
+ struct dd_blkcg *blkcg;
+ enum dd_prio prio;
+ u8 ioprio_class;
+
+ lockdep_assert_held(&dd->lock);
+
+ if (!list_empty(&per_prio->dispatch)) {
+ rq = list_first_entry(&per_prio->dispatch, struct request,
+ queuelist);
+ if (rq->start_time_ns > latest_start_ns)
+ return NULL;
+ list_del_init(&rq->queuelist);
+ goto done;
+ }
+
+ /*
+ * batches are currently reads XOR writes
+ */
+ rq = deadline_next_request(dd, per_prio, dd->last_dir);
+ if (rq && dd->batching < dd->fifo_batch)
+ /* we have a next request are still entitled to batch */
+ goto dispatch_request;
+
+ /*
+ * at this point we are not running a batch. select the appropriate
+ * data direction (read / write)
+ */
+
+ if (!list_empty(&per_prio->fifo_list[DD_READ])) {
+ BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
+
+ if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
+ (dd->starved++ >= dd->writes_starved))
+ goto dispatch_writes;
+
+ data_dir = DD_READ;
+
+ goto dispatch_find_request;
+ }
+
+ /*
+ * there are either no reads or writes have been starved
+ */
+
+ if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
+dispatch_writes:
+ BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
+
+ dd->starved = 0;
+
+ data_dir = DD_WRITE;
+
+ goto dispatch_find_request;
+ }
+
+ return NULL;
+
+dispatch_find_request:
+ /*
+ * we are not running a batch, find best request for selected data_dir
+ */
+ next_rq = deadline_next_request(dd, per_prio, data_dir);
+ if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
+ /*
+ * A deadline has expired, the last request was in the other
+ * direction, or we have run out of higher-sectored requests.
+ * Start again from the request with the earliest expiry time.
+ */
+ rq = deadline_fifo_request(dd, per_prio, data_dir);
+ } else {
+ /*
+ * The last req was the same dir and we have a next request in
+ * sort order. No expired requests so continue on from here.
+ */
+ rq = next_rq;
+ }
+
+ /*
+ * For a zoned block device, if we only have writes queued and none of
+ * them can be dispatched, rq will be NULL.
+ */
+ if (!rq)
+ return NULL;
+
+ dd->last_dir = data_dir;
+ dd->batching = 0;
+
+dispatch_request:
+ if (rq->start_time_ns > latest_start_ns)
+ return NULL;
+ /*
+ * rq is the selected appropriate request.
+ */
+ dd->batching++;
+ deadline_move_request(dd, per_prio, rq);
+done:
+ ioprio_class = dd_rq_ioclass(rq);
+ prio = ioprio_class_to_prio[ioprio_class];
+ dd_count(dd, dispatched, prio);
+ blkcg = rq->elv.priv[0];
+ ddcg_count(blkcg, dispatched, ioprio_class);
+ /*
+ * If the request needs its target zone locked, do it.
+ */
+ blk_req_zone_write_lock(rq);
+ rq->rq_flags |= RQF_STARTED;
+ return rq;
+}
+
+/*
+ * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
+ *
+ * One confusing aspect here is that we get called for a specific
+ * hardware queue, but we may return a request that is for a
+ * different hardware queue. This is because mq-deadline has shared
+ * state for all hardware queues, in terms of sorting, FIFOs, etc.
+ */
+static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+{
+ struct deadline_data *dd = hctx->queue->elevator->elevator_data;
+ const u64 now_ns = ktime_get_ns();
+ struct request *rq = NULL;
+ enum dd_prio prio;
+
+ spin_lock(&dd->lock);
+ /*
+ * Start with dispatching requests whose deadline expired more than
+ * aging_expire jiffies ago.
+ */
+ for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
+ rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns -
+ jiffies_to_nsecs(dd->aging_expire));
+ if (rq)
+ goto unlock;
+ }
+ /*
+ * Next, dispatch requests in priority order. Ignore lower priority
+ * requests if any higher priority requests are pending.
+ */
+ for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+ rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns);
+ if (rq || dd_queued(dd, prio))
+ break;
+ }
+
+unlock:
+ spin_unlock(&dd->lock);
+
+ return rq;
+}
+
+/*
+ * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
+ * function is used by __blk_mq_get_tag().
+ */
+static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+{
+ struct deadline_data *dd = data->q->elevator->elevator_data;
+
+ /* Do not throttle synchronous reads. */
+ if (op_is_sync(op) && !op_is_write(op))
+ return;
+
+ /*
+ * Throttle asynchronous requests and writes such that these requests
+ * do not block the allocation of synchronous requests.
+ */
+ data->shallow_depth = dd->async_depth;
+}
+
+/* Called by blk_mq_update_nr_requests(). */
+static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct deadline_data *dd = q->elevator->elevator_data;
+ struct blk_mq_tags *tags = hctx->sched_tags;
+
+ dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
+
+ sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
+}
+
+/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
+static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
+ dd_depth_updated(hctx);
+ return 0;
+}
+
+static void dd_exit_sched(struct elevator_queue *e)
+{
+ struct deadline_data *dd = e->elevator_data;
+ enum dd_prio prio;
+
+ dd_deactivate_policy(dd->queue);
+
+ for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+ struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+ WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
+ WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
+ }
+
+ free_percpu(dd->stats);
+
+ kfree(dd);
+}
+
+/*
+ * Initialize elevator private data (deadline_data) and associate with blkcg.
+ */
+static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
+{
+ struct deadline_data *dd;
+ struct elevator_queue *eq;
+ enum dd_prio prio;
+ int ret = -ENOMEM;
+
+ /*
+ * Initialization would be very tricky if the queue is not frozen,
+ * hence the warning statement below.
+ */
+ WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
+
+ eq = elevator_alloc(q, e);
+ if (!eq)
+ return ret;
+
+ dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+ if (!dd)
+ goto put_eq;
+
+ eq->elevator_data = dd;
+
+ dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dd->stats)
+ goto free_dd;
+
+ dd->queue = q;
+
+ for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+ struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+ INIT_LIST_HEAD(&per_prio->dispatch);
+ INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
+ INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
+ per_prio->sort_list[DD_READ] = RB_ROOT;
+ per_prio->sort_list[DD_WRITE] = RB_ROOT;
+ }
+ dd->fifo_expire[DD_READ] = read_expire;
+ dd->fifo_expire[DD_WRITE] = write_expire;
+ dd->writes_starved = writes_starved;
+ dd->front_merges = 1;
+ dd->last_dir = DD_WRITE;
+ dd->fifo_batch = fifo_batch;
+ dd->aging_expire = aging_expire;
+ spin_lock_init(&dd->lock);
+ spin_lock_init(&dd->zone_lock);
+
+ ret = dd_activate_policy(q);
+ if (ret)
+ goto free_stats;
+
+ ret = 0;
+ q->elevator = eq;
+ return 0;
+
+free_stats:
+ free_percpu(dd->stats);
+
+free_dd:
+ kfree(dd);
+
+put_eq:
+ kobject_put(&eq->kobj);
+ return ret;
+}
+
+/*
+ * Try to merge @bio into an existing request. If @bio has been merged into
+ * an existing request, store the pointer to that request into *@rq.
+ */
+static int dd_request_merge(struct request_queue *q, struct request **rq,
+ struct bio *bio)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
+ const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+ struct dd_per_prio *per_prio = &dd->per_prio[prio];
+ sector_t sector = bio_end_sector(bio);
+ struct request *__rq;
+
+ if (!dd->front_merges)
+ return ELEVATOR_NO_MERGE;
+
+ __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
+ if (__rq) {
+ BUG_ON(sector != blk_rq_pos(__rq));
+
+ if (elv_bio_merge_ok(__rq, bio)) {
+ *rq = __rq;
+ return ELEVATOR_FRONT_MERGE;
+ }
+ }
+
+ return ELEVATOR_NO_MERGE;
+}
+
+/*
+ * Attempt to merge a bio into an existing request. This function is called
+ * before @bio is associated with a request.
+ */
+static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
+ unsigned int nr_segs)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ struct request *free = NULL;
+ bool ret;
+
+ spin_lock(&dd->lock);
+ ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
+ spin_unlock(&dd->lock);
+
+ if (free)
+ blk_mq_free_request(free);
+
+ return ret;
+}
+
+/*
+ * add rq to rbtree and fifo
+ */
+static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ bool at_head)
+{
+ struct request_queue *q = hctx->queue;
+ struct deadline_data *dd = q->elevator->elevator_data;
+ const enum dd_data_dir data_dir = rq_data_dir(rq);
+ u16 ioprio = req_get_ioprio(rq);
+ u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
+ struct dd_per_prio *per_prio;
+ enum dd_prio prio;
+ struct dd_blkcg *blkcg;
+ LIST_HEAD(free);
+
+ lockdep_assert_held(&dd->lock);
+
+ /*
+ * This may be a requeue of a write request that has locked its
+ * target zone. If it is the case, this releases the zone lock.
+ */
+ blk_req_zone_write_unlock(rq);
+
+ /*
+ * If a block cgroup has been associated with the submitter and if an
+ * I/O priority has been set in the associated block cgroup, use the
+ * lowest of the cgroup priority and the request priority for the
+ * request. If no priority has been set in the request, use the cgroup
+ * priority.
+ */
+ prio = ioprio_class_to_prio[ioprio_class];
+ dd_count(dd, inserted, prio);
+ blkcg = dd_blkcg_from_bio(rq->bio);
+ ddcg_count(blkcg, inserted, ioprio_class);
+ rq->elv.priv[0] = blkcg;
+
+ if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
+ blk_mq_free_requests(&free);
+ return;
+ }
+
+ trace_block_rq_insert(rq);
+
+ per_prio = &dd->per_prio[prio];
+ if (at_head) {
+ list_add(&rq->queuelist, &per_prio->dispatch);
+ } else {
+ deadline_add_rq_rb(per_prio, rq);
+
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+ if (!q->last_merge)
+ q->last_merge = rq;
+ }
+
+ /*
+ * set expire time and add to fifo list
+ */
+ rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
+ list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
+ }
+}
+
+/*
+ * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
+ */
+static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
+ struct list_head *list, bool at_head)
+{
+ struct request_queue *q = hctx->queue;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ spin_lock(&dd->lock);
+ while (!list_empty(list)) {
+ struct request *rq;
+
+ rq = list_first_entry(list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ dd_insert_request(hctx, rq, at_head);
+ }
+ spin_unlock(&dd->lock);
+}
+
+/* Callback from inside blk_mq_rq_ctx_init(). */
+static void dd_prepare_request(struct request *rq)
+{
+ rq->elv.priv[0] = NULL;
+}
+
+/*
+ * Callback from inside blk_mq_free_request().
+ *
+ * For zoned block devices, write unlock the target zone of
+ * completed write requests. Do this while holding the zone lock
+ * spinlock so that the zone is never unlocked while deadline_fifo_request()
+ * or deadline_next_request() are executing. This function is called for
+ * all requests, whether or not these requests complete successfully.
+ *
+ * For a zoned block device, __dd_dispatch_request() may have stopped
+ * dispatching requests if all the queued requests are write requests directed
+ * at zones that are already locked due to on-going write requests. To ensure
+ * write request dispatch progress in this case, mark the queue as needing a
+ * restart to ensure that the queue is run again after completion of the
+ * request and zones being unlocked.
+ */
+static void dd_finish_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ struct deadline_data *dd = q->elevator->elevator_data;
+ struct dd_blkcg *blkcg = rq->elv.priv[0];
+ const u8 ioprio_class = dd_rq_ioclass(rq);
+ const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+ struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+ dd_count(dd, completed, prio);
+ ddcg_count(blkcg, completed, ioprio_class);
+
+ if (blk_queue_is_zoned(q)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ blk_req_zone_write_unlock(rq);
+ if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
+ blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+ }
+}
+
+static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
+{
+ return !list_empty_careful(&per_prio->dispatch) ||
+ !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
+ !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
+}
+
+static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
+{
+ struct deadline_data *dd = hctx->queue->elevator->elevator_data;
+ enum dd_prio prio;
+
+ for (prio = 0; prio <= DD_PRIO_MAX; prio++)
+ if (dd_has_work_for_prio(&dd->per_prio[prio]))
+ return true;
+
+ return false;
+}
+
+/*
+ * sysfs parts below
+ */
+#define SHOW_INT(__FUNC, __VAR) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct deadline_data *dd = e->elevator_data; \
+ \
+ return sysfs_emit(page, "%d\n", __VAR); \
+}
+#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
+SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
+SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
+SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire);
+SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
+SHOW_INT(deadline_front_merges_show, dd->front_merges);
+SHOW_INT(deadline_async_depth_show, dd->front_merges);
+SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
+#undef SHOW_INT
+#undef SHOW_JIFFIES
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct deadline_data *dd = e->elevator_data; \
+ int __data, __ret; \
+ \
+ __ret = kstrtoint(page, 0, &__data); \
+ if (__ret < 0) \
+ return __ret; \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ *(__PTR) = __CONV(__data); \
+ return count; \
+}
+#define STORE_INT(__FUNC, __PTR, MIN, MAX) \
+ STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
+#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
+ STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
+STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
+STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
+STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX);
+STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
+STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
+STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
+STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
+#undef STORE_FUNCTION
+#undef STORE_INT
+#undef STORE_JIFFIES
+
+#define DD_ATTR(name) \
+ __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
+
+static struct elv_fs_entry deadline_attrs[] = {
+ DD_ATTR(read_expire),
+ DD_ATTR(write_expire),
+ DD_ATTR(writes_starved),
+ DD_ATTR(front_merges),
+ DD_ATTR(async_depth),
+ DD_ATTR(fifo_batch),
+ DD_ATTR(aging_expire),
+ __ATTR_NULL
+};
+
+#ifdef CONFIG_BLK_DEBUG_FS
+#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
+static void *deadline_##name##_fifo_start(struct seq_file *m, \
+ loff_t *pos) \
+ __acquires(&dd->lock) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
+ \
+ spin_lock(&dd->lock); \
+ return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
+} \
+ \
+static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
+ loff_t *pos) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
+ \
+ return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
+} \
+ \
+static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
+ __releases(&dd->lock) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ \
+ spin_unlock(&dd->lock); \
+} \
+ \
+static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
+ .start = deadline_##name##_fifo_start, \
+ .next = deadline_##name##_fifo_next, \
+ .stop = deadline_##name##_fifo_stop, \
+ .show = blk_mq_debugfs_rq_show, \
+}; \
+ \
+static int deadline_##name##_next_rq_show(void *data, \
+ struct seq_file *m) \
+{ \
+ struct request_queue *q = data; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
+ struct request *rq = per_prio->next_rq[data_dir]; \
+ \
+ if (rq) \
+ __blk_mq_debugfs_rq_show(m, rq); \
+ return 0; \
+}
+
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
+#undef DEADLINE_DEBUGFS_DDIR_ATTRS
+
+static int deadline_batching_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", dd->batching);
+ return 0;
+}
+
+static int deadline_starved_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", dd->starved);
+ return 0;
+}
+
+static int dd_async_depth_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", dd->async_depth);
+ return 0;
+}
+
+static int dd_queued_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
+ dd_queued(dd, DD_BE_PRIO),
+ dd_queued(dd, DD_IDLE_PRIO));
+ return 0;
+}
+
+/* Number of requests owned by the block driver for a given priority. */
+static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
+{
+ return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
+ - dd_sum(dd, completed, prio);
+}
+
+static int dd_owned_by_driver_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
+ dd_owned_by_driver(dd, DD_BE_PRIO),
+ dd_owned_by_driver(dd, DD_IDLE_PRIO));
+ return 0;
+}
+
+#define DEADLINE_DISPATCH_ATTR(prio) \
+static void *deadline_dispatch##prio##_start(struct seq_file *m, \
+ loff_t *pos) \
+ __acquires(&dd->lock) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
+ \
+ spin_lock(&dd->lock); \
+ return seq_list_start(&per_prio->dispatch, *pos); \
+} \
+ \
+static void *deadline_dispatch##prio##_next(struct seq_file *m, \
+ void *v, loff_t *pos) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
+ \
+ return seq_list_next(v, &per_prio->dispatch, pos); \
+} \
+ \
+static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
+ __releases(&dd->lock) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ \
+ spin_unlock(&dd->lock); \
+} \
+ \
+static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
+ .start = deadline_dispatch##prio##_start, \
+ .next = deadline_dispatch##prio##_next, \
+ .stop = deadline_dispatch##prio##_stop, \
+ .show = blk_mq_debugfs_rq_show, \
+}
+
+DEADLINE_DISPATCH_ATTR(0);
+DEADLINE_DISPATCH_ATTR(1);
+DEADLINE_DISPATCH_ATTR(2);
+#undef DEADLINE_DISPATCH_ATTR
+
+#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
+ {#name "_fifo_list", 0400, \
+ .seq_ops = &deadline_##name##_fifo_seq_ops}
+#define DEADLINE_NEXT_RQ_ATTR(name) \
+ {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
+static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
+ DEADLINE_QUEUE_DDIR_ATTRS(read0),
+ DEADLINE_QUEUE_DDIR_ATTRS(write0),
+ DEADLINE_QUEUE_DDIR_ATTRS(read1),
+ DEADLINE_QUEUE_DDIR_ATTRS(write1),
+ DEADLINE_QUEUE_DDIR_ATTRS(read2),
+ DEADLINE_QUEUE_DDIR_ATTRS(write2),
+ DEADLINE_NEXT_RQ_ATTR(read0),
+ DEADLINE_NEXT_RQ_ATTR(write0),
+ DEADLINE_NEXT_RQ_ATTR(read1),
+ DEADLINE_NEXT_RQ_ATTR(write1),
+ DEADLINE_NEXT_RQ_ATTR(read2),
+ DEADLINE_NEXT_RQ_ATTR(write2),
+ {"batching", 0400, deadline_batching_show},
+ {"starved", 0400, deadline_starved_show},
+ {"async_depth", 0400, dd_async_depth_show},
+ {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
+ {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
+ {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
+ {"owned_by_driver", 0400, dd_owned_by_driver_show},
+ {"queued", 0400, dd_queued_show},
+ {},
+};
+#undef DEADLINE_QUEUE_DDIR_ATTRS
+#endif
+
+static struct elevator_type mq_deadline = {
+ .ops = {
+ .depth_updated = dd_depth_updated,
+ .limit_depth = dd_limit_depth,
+ .insert_requests = dd_insert_requests,
+ .dispatch_request = dd_dispatch_request,
+ .prepare_request = dd_prepare_request,
+ .finish_request = dd_finish_request,
+ .next_request = elv_rb_latter_request,
+ .former_request = elv_rb_former_request,
+ .bio_merge = dd_bio_merge,
+ .request_merge = dd_request_merge,
+ .requests_merged = dd_merged_requests,
+ .request_merged = dd_request_merged,
+ .has_work = dd_has_work,
+ .init_sched = dd_init_sched,
+ .exit_sched = dd_exit_sched,
+ .init_hctx = dd_init_hctx,
+ },
+
+#ifdef CONFIG_BLK_DEBUG_FS
+ .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
+#endif
+ .elevator_attrs = deadline_attrs,
+ .elevator_name = "mq-deadline",
+ .elevator_alias = "deadline",
+ .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
+ .elevator_owner = THIS_MODULE,
+};
+MODULE_ALIAS("mq-deadline-iosched");
+
+static int __init deadline_init(void)
+{
+ int ret;
+
+ ret = elv_register(&mq_deadline);
+ if (ret)
+ goto out;
+ ret = dd_blkcg_init();
+ if (ret)
+ goto unreg;
+
+out:
+ return ret;
+
+unreg:
+ elv_unregister(&mq_deadline);
+ goto out;
+}
+
+static void __exit deadline_exit(void)
+{
+ dd_blkcg_exit();
+ elv_unregister(&mq_deadline);
+}
+
+module_init(deadline_init);
+module_exit(deadline_exit);
+
+MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MQ deadline IO scheduler");
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
deleted file mode 100644
index 8eea2cbf2bf4..000000000000
--- a/block/mq-deadline.c
+++ /dev/null
@@ -1,815 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
- * for the blk-mq scheduling framework
- *
- * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/rbtree.h>
-#include <linux/sbitmap.h>
-
-#include <trace/events/block.h>
-
-#include "blk.h"
-#include "blk-mq.h"
-#include "blk-mq-debugfs.h"
-#include "blk-mq-tag.h"
-#include "blk-mq-sched.h"
-
-/*
- * See Documentation/block/deadline-iosched.rst
- */
-static const int read_expire = HZ / 2; /* max time before a read is submitted. */
-static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
-static const int writes_starved = 2; /* max times reads can starve a write */
-static const int fifo_batch = 16; /* # of sequential requests treated as one
- by the above parameters. For throughput. */
-
-struct deadline_data {
- /*
- * run time data
- */
-
- /*
- * requests (deadline_rq s) are present on both sort_list and fifo_list
- */
- struct rb_root sort_list[2];
- struct list_head fifo_list[2];
-
- /*
- * next in sort order. read, write or both are NULL
- */
- struct request *next_rq[2];
- unsigned int batching; /* number of sequential requests made */
- unsigned int starved; /* times reads have starved writes */
-
- /*
- * settings that change how the i/o scheduler behaves
- */
- int fifo_expire[2];
- int fifo_batch;
- int writes_starved;
- int front_merges;
-
- spinlock_t lock;
- spinlock_t zone_lock;
- struct list_head dispatch;
-};
-
-static inline struct rb_root *
-deadline_rb_root(struct deadline_data *dd, struct request *rq)
-{
- return &dd->sort_list[rq_data_dir(rq)];
-}
-
-/*
- * get the request after `rq' in sector-sorted order
- */
-static inline struct request *
-deadline_latter_request(struct request *rq)
-{
- struct rb_node *node = rb_next(&rq->rb_node);
-
- if (node)
- return rb_entry_rq(node);
-
- return NULL;
-}
-
-static void
-deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
-{
- struct rb_root *root = deadline_rb_root(dd, rq);
-
- elv_rb_add(root, rq);
-}
-
-static inline void
-deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
-{
- const int data_dir = rq_data_dir(rq);
-
- if (dd->next_rq[data_dir] == rq)
- dd->next_rq[data_dir] = deadline_latter_request(rq);
-
- elv_rb_del(deadline_rb_root(dd, rq), rq);
-}
-
-/*
- * remove rq from rbtree and fifo.
- */
-static void deadline_remove_request(struct request_queue *q, struct request *rq)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
-
- list_del_init(&rq->queuelist);
-
- /*
- * We might not be on the rbtree, if we are doing an insert merge
- */
- if (!RB_EMPTY_NODE(&rq->rb_node))
- deadline_del_rq_rb(dd, rq);
-
- elv_rqhash_del(q, rq);
- if (q->last_merge == rq)
- q->last_merge = NULL;
-}
-
-static void dd_request_merged(struct request_queue *q, struct request *req,
- enum elv_merge type)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
-
- /*
- * if the merge was a front merge, we need to reposition request
- */
- if (type == ELEVATOR_FRONT_MERGE) {
- elv_rb_del(deadline_rb_root(dd, req), req);
- deadline_add_rq_rb(dd, req);
- }
-}
-
-static void dd_merged_requests(struct request_queue *q, struct request *req,
- struct request *next)
-{
- /*
- * if next expires before rq, assign its expire time to rq
- * and move into next position (next will be deleted) in fifo
- */
- if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before((unsigned long)next->fifo_time,
- (unsigned long)req->fifo_time)) {
- list_move(&req->queuelist, &next->queuelist);
- req->fifo_time = next->fifo_time;
- }
- }
-
- /*
- * kill knowledge of next, this one is a goner
- */
- deadline_remove_request(q, next);
-}
-
-/*
- * move an entry to dispatch queue
- */
-static void
-deadline_move_request(struct deadline_data *dd, struct request *rq)
-{
- const int data_dir = rq_data_dir(rq);
-
- dd->next_rq[READ] = NULL;
- dd->next_rq[WRITE] = NULL;
- dd->next_rq[data_dir] = deadline_latter_request(rq);
-
- /*
- * take it off the sort and fifo list
- */
- deadline_remove_request(rq->q, rq);
-}
-
-/*
- * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
- * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
- */
-static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
-{
- struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
-
- /*
- * rq is expired!
- */
- if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
- return 1;
-
- return 0;
-}
-
-/*
- * For the specified data direction, return the next request to
- * dispatch using arrival ordered lists.
- */
-static struct request *
-deadline_fifo_request(struct deadline_data *dd, int data_dir)
-{
- struct request *rq;
- unsigned long flags;
-
- if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
- return NULL;
-
- if (list_empty(&dd->fifo_list[data_dir]))
- return NULL;
-
- rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
- if (data_dir == READ || !blk_queue_is_zoned(rq->q))
- return rq;
-
- /*
- * Look for a write request that can be dispatched, that is one with
- * an unlocked target zone.
- */
- spin_lock_irqsave(&dd->zone_lock, flags);
- list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
- if (blk_req_can_dispatch_to_zone(rq))
- goto out;
- }
- rq = NULL;
-out:
- spin_unlock_irqrestore(&dd->zone_lock, flags);
-
- return rq;
-}
-
-/*
- * For the specified data direction, return the next request to
- * dispatch using sector position sorted lists.
- */
-static struct request *
-deadline_next_request(struct deadline_data *dd, int data_dir)
-{
- struct request *rq;
- unsigned long flags;
-
- if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
- return NULL;
-
- rq = dd->next_rq[data_dir];
- if (!rq)
- return NULL;
-
- if (data_dir == READ || !blk_queue_is_zoned(rq->q))
- return rq;
-
- /*
- * Look for a write request that can be dispatched, that is one with
- * an unlocked target zone.
- */
- spin_lock_irqsave(&dd->zone_lock, flags);
- while (rq) {
- if (blk_req_can_dispatch_to_zone(rq))
- break;
- rq = deadline_latter_request(rq);
- }
- spin_unlock_irqrestore(&dd->zone_lock, flags);
-
- return rq;
-}
-
-/*
- * deadline_dispatch_requests selects the best request according to
- * read/write expire, fifo_batch, etc
- */
-static struct request *__dd_dispatch_request(struct deadline_data *dd)
-{
- struct request *rq, *next_rq;
- bool reads, writes;
- int data_dir;
-
- if (!list_empty(&dd->dispatch)) {
- rq = list_first_entry(&dd->dispatch, struct request, queuelist);
- list_del_init(&rq->queuelist);
- goto done;
- }
-
- reads = !list_empty(&dd->fifo_list[READ]);
- writes = !list_empty(&dd->fifo_list[WRITE]);
-
- /*
- * batches are currently reads XOR writes
- */
- rq = deadline_next_request(dd, WRITE);
- if (!rq)
- rq = deadline_next_request(dd, READ);
-
- if (rq && dd->batching < dd->fifo_batch)
- /* we have a next request are still entitled to batch */
- goto dispatch_request;
-
- /*
- * at this point we are not running a batch. select the appropriate
- * data direction (read / write)
- */
-
- if (reads) {
- BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
-
- if (deadline_fifo_request(dd, WRITE) &&
- (dd->starved++ >= dd->writes_starved))
- goto dispatch_writes;
-
- data_dir = READ;
-
- goto dispatch_find_request;
- }
-
- /*
- * there are either no reads or writes have been starved
- */
-
- if (writes) {
-dispatch_writes:
- BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
-
- dd->starved = 0;
-
- data_dir = WRITE;
-
- goto dispatch_find_request;
- }
-
- return NULL;
-
-dispatch_find_request:
- /*
- * we are not running a batch, find best request for selected data_dir
- */
- next_rq = deadline_next_request(dd, data_dir);
- if (deadline_check_fifo(dd, data_dir) || !next_rq) {
- /*
- * A deadline has expired, the last request was in the other
- * direction, or we have run out of higher-sectored requests.
- * Start again from the request with the earliest expiry time.
- */
- rq = deadline_fifo_request(dd, data_dir);
- } else {
- /*
- * The last req was the same dir and we have a next request in
- * sort order. No expired requests so continue on from here.
- */
- rq = next_rq;
- }
-
- /*
- * For a zoned block device, if we only have writes queued and none of
- * them can be dispatched, rq will be NULL.
- */
- if (!rq)
- return NULL;
-
- dd->batching = 0;
-
-dispatch_request:
- /*
- * rq is the selected appropriate request.
- */
- dd->batching++;
- deadline_move_request(dd, rq);
-done:
- /*
- * If the request needs its target zone locked, do it.
- */
- blk_req_zone_write_lock(rq);
- rq->rq_flags |= RQF_STARTED;
- return rq;
-}
-
-/*
- * One confusing aspect here is that we get called for a specific
- * hardware queue, but we may return a request that is for a
- * different hardware queue. This is because mq-deadline has shared
- * state for all hardware queues, in terms of sorting, FIFOs, etc.
- */
-static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
-{
- struct deadline_data *dd = hctx->queue->elevator->elevator_data;
- struct request *rq;
-
- spin_lock(&dd->lock);
- rq = __dd_dispatch_request(dd);
- spin_unlock(&dd->lock);
-
- return rq;
-}
-
-static void dd_exit_queue(struct elevator_queue *e)
-{
- struct deadline_data *dd = e->elevator_data;
-
- BUG_ON(!list_empty(&dd->fifo_list[READ]));
- BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
-
- kfree(dd);
-}
-
-/*
- * initialize elevator private data (deadline_data).
- */
-static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
-{
- struct deadline_data *dd;
- struct elevator_queue *eq;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
- dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
- if (!dd) {
- kobject_put(&eq->kobj);
- return -ENOMEM;
- }
- eq->elevator_data = dd;
-
- INIT_LIST_HEAD(&dd->fifo_list[READ]);
- INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
- dd->sort_list[READ] = RB_ROOT;
- dd->sort_list[WRITE] = RB_ROOT;
- dd->fifo_expire[READ] = read_expire;
- dd->fifo_expire[WRITE] = write_expire;
- dd->writes_starved = writes_starved;
- dd->front_merges = 1;
- dd->fifo_batch = fifo_batch;
- spin_lock_init(&dd->lock);
- spin_lock_init(&dd->zone_lock);
- INIT_LIST_HEAD(&dd->dispatch);
-
- q->elevator = eq;
- return 0;
-}
-
-static int dd_request_merge(struct request_queue *q, struct request **rq,
- struct bio *bio)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- sector_t sector = bio_end_sector(bio);
- struct request *__rq;
-
- if (!dd->front_merges)
- return ELEVATOR_NO_MERGE;
-
- __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
- if (__rq) {
- BUG_ON(sector != blk_rq_pos(__rq));
-
- if (elv_bio_merge_ok(__rq, bio)) {
- *rq = __rq;
- return ELEVATOR_FRONT_MERGE;
- }
- }
-
- return ELEVATOR_NO_MERGE;
-}
-
-static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
- unsigned int nr_segs)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- struct request *free = NULL;
- bool ret;
-
- spin_lock(&dd->lock);
- ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
- spin_unlock(&dd->lock);
-
- if (free)
- blk_mq_free_request(free);
-
- return ret;
-}
-
-/*
- * add rq to rbtree and fifo
- */
-static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
- bool at_head)
-{
- struct request_queue *q = hctx->queue;
- struct deadline_data *dd = q->elevator->elevator_data;
- const int data_dir = rq_data_dir(rq);
-
- /*
- * This may be a requeue of a write request that has locked its
- * target zone. If it is the case, this releases the zone lock.
- */
- blk_req_zone_write_unlock(rq);
-
- if (blk_mq_sched_try_insert_merge(q, rq))
- return;
-
- trace_block_rq_insert(rq);
-
- if (at_head) {
- list_add(&rq->queuelist, &dd->dispatch);
- } else {
- deadline_add_rq_rb(dd, rq);
-
- if (rq_mergeable(rq)) {
- elv_rqhash_add(q, rq);
- if (!q->last_merge)
- q->last_merge = rq;
- }
-
- /*
- * set expire time and add to fifo list
- */
- rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
- list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
- }
-}
-
-static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
- struct list_head *list, bool at_head)
-{
- struct request_queue *q = hctx->queue;
- struct deadline_data *dd = q->elevator->elevator_data;
-
- spin_lock(&dd->lock);
- while (!list_empty(list)) {
- struct request *rq;
-
- rq = list_first_entry(list, struct request, queuelist);
- list_del_init(&rq->queuelist);
- dd_insert_request(hctx, rq, at_head);
- }
- spin_unlock(&dd->lock);
-}
-
-/*
- * Nothing to do here. This is defined only to ensure that .finish_request
- * method is called upon request completion.
- */
-static void dd_prepare_request(struct request *rq)
-{
-}
-
-/*
- * For zoned block devices, write unlock the target zone of
- * completed write requests. Do this while holding the zone lock
- * spinlock so that the zone is never unlocked while deadline_fifo_request()
- * or deadline_next_request() are executing. This function is called for
- * all requests, whether or not these requests complete successfully.
- *
- * For a zoned block device, __dd_dispatch_request() may have stopped
- * dispatching requests if all the queued requests are write requests directed
- * at zones that are already locked due to on-going write requests. To ensure
- * write request dispatch progress in this case, mark the queue as needing a
- * restart to ensure that the queue is run again after completion of the
- * request and zones being unlocked.
- */
-static void dd_finish_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
-
- if (blk_queue_is_zoned(q)) {
- struct deadline_data *dd = q->elevator->elevator_data;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->zone_lock, flags);
- blk_req_zone_write_unlock(rq);
- if (!list_empty(&dd->fifo_list[WRITE]))
- blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
- spin_unlock_irqrestore(&dd->zone_lock, flags);
- }
-}
-
-static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
-{
- struct deadline_data *dd = hctx->queue->elevator->elevator_data;
-
- return !list_empty_careful(&dd->dispatch) ||
- !list_empty_careful(&dd->fifo_list[0]) ||
- !list_empty_careful(&dd->fifo_list[1]);
-}
-
-/*
- * sysfs parts below
- */
-static ssize_t
-deadline_var_show(int var, char *page)
-{
- return sprintf(page, "%d\n", var);
-}
-
-static void
-deadline_var_store(int *var, const char *page)
-{
- char *p = (char *) page;
-
- *var = simple_strtol(p, &p, 10);
-}
-
-#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, char *page) \
-{ \
- struct deadline_data *dd = e->elevator_data; \
- int __data = __VAR; \
- if (__CONV) \
- __data = jiffies_to_msecs(__data); \
- return deadline_var_show(__data, (page)); \
-}
-SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
-SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
-SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
-SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
-SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
-#undef SHOW_FUNCTION
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
-{ \
- struct deadline_data *dd = e->elevator_data; \
- int __data; \
- deadline_var_store(&__data, (page)); \
- if (__data < (MIN)) \
- __data = (MIN); \
- else if (__data > (MAX)) \
- __data = (MAX); \
- if (__CONV) \
- *(__PTR) = msecs_to_jiffies(__data); \
- else \
- *(__PTR) = __data; \
- return count; \
-}
-STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
-STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
-STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
-#undef STORE_FUNCTION
-
-#define DD_ATTR(name) \
- __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
-
-static struct elv_fs_entry deadline_attrs[] = {
- DD_ATTR(read_expire),
- DD_ATTR(write_expire),
- DD_ATTR(writes_starved),
- DD_ATTR(front_merges),
- DD_ATTR(fifo_batch),
- __ATTR_NULL
-};
-
-#ifdef CONFIG_BLK_DEBUG_FS
-#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
-static void *deadline_##name##_fifo_start(struct seq_file *m, \
- loff_t *pos) \
- __acquires(&dd->lock) \
-{ \
- struct request_queue *q = m->private; \
- struct deadline_data *dd = q->elevator->elevator_data; \
- \
- spin_lock(&dd->lock); \
- return seq_list_start(&dd->fifo_list[ddir], *pos); \
-} \
- \
-static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
- loff_t *pos) \
-{ \
- struct request_queue *q = m->private; \
- struct deadline_data *dd = q->elevator->elevator_data; \
- \
- return seq_list_next(v, &dd->fifo_list[ddir], pos); \
-} \
- \
-static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
- __releases(&dd->lock) \
-{ \
- struct request_queue *q = m->private; \
- struct deadline_data *dd = q->elevator->elevator_data; \
- \
- spin_unlock(&dd->lock); \
-} \
- \
-static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
- .start = deadline_##name##_fifo_start, \
- .next = deadline_##name##_fifo_next, \
- .stop = deadline_##name##_fifo_stop, \
- .show = blk_mq_debugfs_rq_show, \
-}; \
- \
-static int deadline_##name##_next_rq_show(void *data, \
- struct seq_file *m) \
-{ \
- struct request_queue *q = data; \
- struct deadline_data *dd = q->elevator->elevator_data; \
- struct request *rq = dd->next_rq[ddir]; \
- \
- if (rq) \
- __blk_mq_debugfs_rq_show(m, rq); \
- return 0; \
-}
-DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
-DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
-#undef DEADLINE_DEBUGFS_DDIR_ATTRS
-
-static int deadline_batching_show(void *data, struct seq_file *m)
-{
- struct request_queue *q = data;
- struct deadline_data *dd = q->elevator->elevator_data;
-
- seq_printf(m, "%u\n", dd->batching);
- return 0;
-}
-
-static int deadline_starved_show(void *data, struct seq_file *m)
-{
- struct request_queue *q = data;
- struct deadline_data *dd = q->elevator->elevator_data;
-
- seq_printf(m, "%u\n", dd->starved);
- return 0;
-}
-
-static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
- __acquires(&dd->lock)
-{
- struct request_queue *q = m->private;
- struct deadline_data *dd = q->elevator->elevator_data;
-
- spin_lock(&dd->lock);
- return seq_list_start(&dd->dispatch, *pos);
-}
-
-static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct request_queue *q = m->private;
- struct deadline_data *dd = q->elevator->elevator_data;
-
- return seq_list_next(v, &dd->dispatch, pos);
-}
-
-static void deadline_dispatch_stop(struct seq_file *m, void *v)
- __releases(&dd->lock)
-{
- struct request_queue *q = m->private;
- struct deadline_data *dd = q->elevator->elevator_data;
-
- spin_unlock(&dd->lock);
-}
-
-static const struct seq_operations deadline_dispatch_seq_ops = {
- .start = deadline_dispatch_start,
- .next = deadline_dispatch_next,
- .stop = deadline_dispatch_stop,
- .show = blk_mq_debugfs_rq_show,
-};
-
-#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
- {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
- {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
-static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
- DEADLINE_QUEUE_DDIR_ATTRS(read),
- DEADLINE_QUEUE_DDIR_ATTRS(write),
- {"batching", 0400, deadline_batching_show},
- {"starved", 0400, deadline_starved_show},
- {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
- {},
-};
-#undef DEADLINE_QUEUE_DDIR_ATTRS
-#endif
-
-static struct elevator_type mq_deadline = {
- .ops = {
- .insert_requests = dd_insert_requests,
- .dispatch_request = dd_dispatch_request,
- .prepare_request = dd_prepare_request,
- .finish_request = dd_finish_request,
- .next_request = elv_rb_latter_request,
- .former_request = elv_rb_former_request,
- .bio_merge = dd_bio_merge,
- .request_merge = dd_request_merge,
- .requests_merged = dd_merged_requests,
- .request_merged = dd_request_merged,
- .has_work = dd_has_work,
- .init_sched = dd_init_queue,
- .exit_sched = dd_exit_queue,
- },
-
-#ifdef CONFIG_BLK_DEBUG_FS
- .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
-#endif
- .elevator_attrs = deadline_attrs,
- .elevator_name = "mq-deadline",
- .elevator_alias = "deadline",
- .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
- .elevator_owner = THIS_MODULE,
-};
-MODULE_ALIAS("mq-deadline-iosched");
-
-static int __init deadline_init(void)
-{
- return elv_register(&mq_deadline);
-}
-
-static void __exit deadline_exit(void)
-{
- elv_unregister(&mq_deadline);
-}
-
-module_init(deadline_init);
-module_exit(deadline_exit);
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MQ deadline IO scheduler");
diff --git a/block/partitions/core.c b/block/partitions/core.c
index dc60ecf46fe6..347c56a51d87 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -120,8 +120,7 @@ static void free_partitions(struct parsed_partitions *state)
kfree(state);
}
-static struct parsed_partitions *check_partition(struct gendisk *hd,
- struct block_device *bdev)
+static struct parsed_partitions *check_partition(struct gendisk *hd)
{
struct parsed_partitions *state;
int i, res, err;
@@ -136,7 +135,7 @@ static struct parsed_partitions *check_partition(struct gendisk *hd,
}
state->pp_buf[0] = '\0';
- state->bdev = bdev;
+ state->bdev = hd->part0;
disk_name(hd, 0, state->name);
snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
if (isdigit(state->name[strlen(state->name)-1]))
@@ -260,7 +259,8 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
- blk_free_devt(dev->devt);
+ if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
+ blk_free_ext_minor(MINOR(dev->devt));
bdput(dev_to_bdev(dev));
}
@@ -282,7 +282,7 @@ struct device_type part_type = {
};
/*
- * Must be called either with bd_mutex held, before a disk can be opened or
+ * Must be called either with open_mutex held, before a disk can be opened or
* after all disk users are gone.
*/
static void delete_partition(struct block_device *part)
@@ -311,7 +311,7 @@ static ssize_t whole_disk_show(struct device *dev,
static DEVICE_ATTR(whole_disk, 0444, whole_disk_show, NULL);
/*
- * Must be called either with bd_mutex held, before a disk can be opened or
+ * Must be called either with open_mutex held, before a disk can be opened or
* after all disk users are gone.
*/
static struct block_device *add_partition(struct gendisk *disk, int partno,
@@ -325,10 +325,8 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
const char *dname;
int err;
- /*
- * disk_max_parts() won't be zero, either GENHD_FL_EXT_DEVT is set
- * or 'minors' is passed to alloc_disk().
- */
+ lockdep_assert_held(&disk->open_mutex);
+
if (partno >= disk_max_parts(disk))
return ERR_PTR(-EINVAL);
@@ -379,9 +377,15 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
pdev->type = &part_type;
pdev->parent = ddev;
- err = blk_alloc_devt(bdev, &devt);
- if (err)
- goto out_put;
+ /* in consecutive minor range? */
+ if (bdev->bd_partno < disk->minors) {
+ devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
+ } else {
+ err = blk_alloc_ext_minor();
+ if (err < 0)
+ goto out_put;
+ devt = MKDEV(BLOCK_EXT_MAJOR, err);
+ }
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */
@@ -450,29 +454,27 @@ int bdev_add_partition(struct block_device *bdev, int partno,
{
struct block_device *part;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
if (partition_overlaps(bdev->bd_disk, start, length, -1)) {
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return -EBUSY;
}
part = add_partition(bdev->bd_disk, partno, start, length,
ADDPART_FLAG_NONE, NULL);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return PTR_ERR_OR_ZERO(part);
}
int bdev_del_partition(struct block_device *bdev, int partno)
{
- struct block_device *part;
- int ret;
+ struct block_device *part = NULL;
+ int ret = -ENXIO;
- part = bdget_disk(bdev->bd_disk, partno);
+ mutex_lock(&bdev->bd_disk->open_mutex);
+ part = xa_load(&bdev->bd_disk->part_tbl, partno);
if (!part)
- return -ENXIO;
-
- mutex_lock(&part->bd_mutex);
- mutex_lock_nested(&bdev->bd_mutex, 1);
+ goto out_unlock;
ret = -EBUSY;
if (part->bd_openers)
@@ -481,24 +483,21 @@ int bdev_del_partition(struct block_device *bdev, int partno)
delete_partition(part);
ret = 0;
out_unlock:
- mutex_unlock(&bdev->bd_mutex);
- mutex_unlock(&part->bd_mutex);
- bdput(part);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return ret;
}
int bdev_resize_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length)
{
- struct block_device *part;
- int ret = 0;
+ struct block_device *part = NULL;
+ int ret = -ENXIO;
- part = bdget_disk(bdev->bd_disk, partno);
+ mutex_lock(&bdev->bd_disk->open_mutex);
+ part = xa_load(&bdev->bd_disk->part_tbl, partno);
if (!part)
- return -ENXIO;
+ goto out_unlock;
- mutex_lock(&part->bd_mutex);
- mutex_lock_nested(&bdev->bd_mutex, 1);
ret = -EINVAL;
if (start != part->bd_start_sect)
goto out_unlock;
@@ -511,9 +510,7 @@ int bdev_resize_partition(struct block_device *bdev, int partno,
ret = 0;
out_unlock:
- mutex_unlock(&part->bd_mutex);
- mutex_unlock(&bdev->bd_mutex);
- bdput(part);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return ret;
}
@@ -538,7 +535,7 @@ void blk_drop_partitions(struct gendisk *disk)
struct block_device *part;
unsigned long idx;
- lockdep_assert_held(&disk->part0->bd_mutex);
+ lockdep_assert_held(&disk->open_mutex);
xa_for_each_start(&disk->part_tbl, idx, part, 1) {
if (!bdgrab(part))
@@ -548,7 +545,7 @@ void blk_drop_partitions(struct gendisk *disk)
}
}
-static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
+static bool blk_add_partition(struct gendisk *disk,
struct parsed_partitions *state, int p)
{
sector_t size = state->parts[p].size;
@@ -598,7 +595,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
return true;
}
-int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
+static int blk_add_partitions(struct gendisk *disk)
{
struct parsed_partitions *state;
int ret = -EAGAIN, p;
@@ -606,7 +603,7 @@ int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
if (!disk_part_scan_enabled(disk))
return 0;
- state = check_partition(disk, bdev);
+ state = check_partition(disk);
if (!state)
return 0;
if (IS_ERR(state)) {
@@ -650,7 +647,7 @@ int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
for (p = 1; p < state->limit; p++)
- if (!blk_add_partition(disk, bdev, state, p))
+ if (!blk_add_partition(disk, state, p))
goto out_free_state;
ret = 0;
@@ -659,6 +656,58 @@ out_free_state:
return ret;
}
+int bdev_disk_changed(struct gendisk *disk, bool invalidate)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&disk->open_mutex);
+
+ if (!(disk->flags & GENHD_FL_UP))
+ return -ENXIO;
+
+rescan:
+ if (disk->open_partitions)
+ return -EBUSY;
+ sync_blockdev(disk->part0);
+ invalidate_bdev(disk->part0);
+ blk_drop_partitions(disk);
+
+ clear_bit(GD_NEED_PART_SCAN, &disk->state);
+
+ /*
+ * Historically we only set the capacity to zero for devices that
+ * support partitions (independ of actually having partitions created).
+ * Doing that is rather inconsistent, but changing it broke legacy
+ * udisks polling for legacy ide-cdrom devices. Use the crude check
+ * below to get the sane behavior for most device while not breaking
+ * userspace for this particular setup.
+ */
+ if (invalidate) {
+ if (disk_part_scan_enabled(disk) ||
+ !(disk->flags & GENHD_FL_REMOVABLE))
+ set_capacity(disk, 0);
+ }
+
+ if (get_capacity(disk)) {
+ ret = blk_add_partitions(disk);
+ if (ret == -EAGAIN)
+ goto rescan;
+ } else if (invalidate) {
+ /*
+ * Tell userspace that the media / partition table may have
+ * changed.
+ */
+ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+ }
+
+ return ret;
+}
+/*
+ * Only exported for loop and dasd for historic reasons. Don't use in new
+ * code!
+ */
+EXPORT_SYMBOL_GPL(bdev_disk_changed);
+
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
{
struct address_space *mapping = state->bdev->bd_inode->i_mapping;
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 8f2fcc080264..63e4f6f8b6e9 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -622,7 +622,7 @@ int msdos_partition(struct parsed_partitions *state)
for (slot = 1; slot <= 4; slot++, p++) {
if (p->boot_ind != 0 && p->boot_ind != 0x80) {
/*
- * Even without a valid boot inidicator value
+ * Even without a valid boot indicator value
* its still possible this is valid FAT filesystem
* without a partition table.
*/
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 18cc82dc4a42..8bd288d2b089 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -411,7 +411,7 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
if (n < 0)
return n;
- npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ npages = DIV_ROUND_UP(off + n, PAGE_SIZE);
if (WARN_ON(npages == 0))
return -EINVAL;
/* Add one extra for linking */
diff --git a/crypto/algapi.c b/crypto/algapi.c
index fdabf2675b63..43f999dba4dc 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -868,24 +868,6 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-int crypto_attr_u32(struct rtattr *rta, u32 *num)
-{
- struct crypto_attr_u32 *nu32;
-
- if (!rta)
- return -ENOENT;
- if (RTA_PAYLOAD(rta) < sizeof(*nu32))
- return -EINVAL;
- if (rta->rta_type != CRYPTOA_U32)
- return -EINVAL;
-
- nu32 = RTA_DATA(rta);
- *num = nu32->num;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_attr_u32);
-
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg)
{
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 5ebccbd6b74e..1814d2c5188a 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -28,16 +28,9 @@ struct cryptomgr_param {
struct crypto_attr_type data;
} type;
- union {
+ struct {
struct rtattr attr;
- struct {
- struct rtattr attr;
- struct crypto_attr_alg data;
- } alg;
- struct {
- struct rtattr attr;
- struct crypto_attr_u32 data;
- } nu32;
+ struct crypto_attr_alg data;
} attrs[CRYPTO_MAX_ATTRS];
char template[CRYPTO_MAX_ALG_NAME];
@@ -104,12 +97,10 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
i = 0;
for (;;) {
- int notnum = 0;
-
name = ++p;
for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
- notnum |= !isdigit(*p);
+ ;
if (*p == '(') {
int recursion = 0;
@@ -123,7 +114,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
break;
}
- notnum = 1;
p++;
}
@@ -131,18 +121,9 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
if (!len)
goto err_free_param;
- if (notnum) {
- param->attrs[i].alg.attr.rta_len =
- sizeof(param->attrs[i].alg);
- param->attrs[i].alg.attr.rta_type = CRYPTOA_ALG;
- memcpy(param->attrs[i].alg.data.name, name, len);
- } else {
- param->attrs[i].nu32.attr.rta_len =
- sizeof(param->attrs[i].nu32);
- param->attrs[i].nu32.attr.rta_type = CRYPTOA_U32;
- param->attrs[i].nu32.data.num =
- simple_strtol(name, NULL, 0);
- }
+ param->attrs[i].attr.rta_len = sizeof(param->attrs[i]);
+ param->attrs[i].attr.rta_type = CRYPTOA_ALG;
+ memcpy(param->attrs[i].data.name, name, len);
param->tb[i + 1] = &param->attrs[i].attr;
i++;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 6cd7f7025df4..d8a91521144e 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset,
if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--;
src_list++;
- src_offs++;
+ if (src_offs)
+ src_offs++;
}
/* wait for any prerequisite operations */
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 1b4587e0ddad..ea85d4a0fe9e 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -178,16 +178,16 @@ static const struct drbg_core drbg_cores[] = {
.backend_cra_name = "hmac(sha384)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
- .statelen = 64, /* block length of cipher */
- .blocklen_bytes = 64,
- .cra_name = "hmac_sha512",
- .backend_cra_name = "hmac(sha512)",
- }, {
- .flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 32, /* block length of cipher */
.blocklen_bytes = 32,
.cra_name = "hmac_sha256",
.backend_cra_name = "hmac(sha256)",
+ }, {
+ .flags = DRBG_HMAC | DRBG_STRENGTH256,
+ .statelen = 64, /* block length of cipher */
+ .blocklen_bytes = 64,
+ .cra_name = "hmac_sha512",
+ .backend_cra_name = "hmac(sha512)",
},
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
};
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 04a427b8c956..c6f61c2211dc 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -141,7 +141,7 @@ static struct kpp_alg ecdh_nist_p192 = {
.init = ecdh_nist_p192_init_tfm,
.base = {
.cra_name = "ecdh-nist-p192",
- .cra_driver_name = "ecdh-generic",
+ .cra_driver_name = "ecdh-nist-p192-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecdh_ctx),
@@ -166,7 +166,32 @@ static struct kpp_alg ecdh_nist_p256 = {
.init = ecdh_nist_p256_init_tfm,
.base = {
.cra_name = "ecdh-nist-p256",
- .cra_driver_name = "ecdh-generic",
+ .cra_driver_name = "ecdh-nist-p256-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecdh_ctx),
+ },
+};
+
+static int ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P384;
+ ctx->ndigits = ECC_CURVE_NIST_P384_DIGITS;
+
+ return 0;
+}
+
+static struct kpp_alg ecdh_nist_p384 = {
+ .set_secret = ecdh_set_secret,
+ .generate_public_key = ecdh_compute_value,
+ .compute_shared_secret = ecdh_compute_value,
+ .max_size = ecdh_max_size,
+ .init = ecdh_nist_p384_init_tfm,
+ .base = {
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "ecdh-nist-p384-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecdh_ctx),
@@ -179,10 +204,27 @@ static int ecdh_init(void)
{
int ret;
+ /* NIST p192 will fail to register in FIPS mode */
ret = crypto_register_kpp(&ecdh_nist_p192);
ecdh_nist_p192_registered = ret == 0;
- return crypto_register_kpp(&ecdh_nist_p256);
+ ret = crypto_register_kpp(&ecdh_nist_p256);
+ if (ret)
+ goto nist_p256_error;
+
+ ret = crypto_register_kpp(&ecdh_nist_p384);
+ if (ret)
+ goto nist_p384_error;
+
+ return 0;
+
+nist_p384_error:
+ crypto_unregister_kpp(&ecdh_nist_p256);
+
+nist_p256_error:
+ if (ecdh_nist_p192_registered)
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ return ret;
}
static void ecdh_exit(void)
@@ -190,6 +232,7 @@ static void ecdh_exit(void)
if (ecdh_nist_p192_registered)
crypto_unregister_kpp(&ecdh_nist_p192);
crypto_unregister_kpp(&ecdh_nist_p256);
+ crypto_unregister_kpp(&ecdh_nist_p384);
}
subsys_initcall(ecdh_init);
diff --git a/crypto/internal.h b/crypto/internal.h
index 976ec9dfc76d..f00869af689f 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -29,6 +29,18 @@ struct crypto_larval {
u32 mask;
};
+enum {
+ CRYPTOA_UNSPEC,
+ CRYPTOA_ALG,
+ CRYPTOA_TYPE,
+ __CRYPTOA_MAX,
+};
+
+#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
+
+/* Maximum number of (rtattr) parameters for each template. */
+#define CRYPTO_MAX_ATTRS 32
+
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
extern struct blocking_notifier_head crypto_chain;
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 14ca7f1631c7..f19339954c89 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -819,7 +819,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
T6[(int)(state >> 8) & 0xff] ^
T7[(int)(state ) & 0xff] ^
roundKey[r];
- }
+ }
state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^
(T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^
diff --git a/crypto/shash.c b/crypto/shash.c
index 2e3433ad9762..0a0a50cb694f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -20,12 +20,24 @@
static const struct crypto_type crypto_shash_type;
-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
+static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
{
return -ENOSYS;
}
-EXPORT_SYMBOL_GPL(shash_no_setkey);
+
+/*
+ * Check whether an shash algorithm has a setkey function.
+ *
+ * For CFI compatibility, this must not be an inline function. This is because
+ * when CFI is enabled, modules won't get the same address for shash_no_setkey
+ * (if it were exported, which inlining would require) as the core kernel will.
+ */
+bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+{
+ return alg->setkey != shash_no_setkey;
+}
+EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey);
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
diff --git a/crypto/sm2.c b/crypto/sm2.c
index b21addc3ac06..db8a4a265669 100644
--- a/crypto/sm2.c
+++ b/crypto/sm2.c
@@ -79,10 +79,17 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
goto free;
rc = -ENOMEM;
+
+ ec->Q = mpi_point_new(0);
+ if (!ec->Q)
+ goto free;
+
/* mpi_ec_setup_elliptic_curve */
ec->G = mpi_point_new(0);
- if (!ec->G)
+ if (!ec->G) {
+ mpi_point_release(ec->Q);
goto free;
+ }
mpi_set(ec->G->x, x);
mpi_set(ec->G->y, y);
@@ -91,6 +98,7 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
rc = -EINVAL;
ec->n = mpi_scanval(ecp->n);
if (!ec->n) {
+ mpi_point_release(ec->Q);
mpi_point_release(ec->G);
goto free;
}
@@ -386,27 +394,15 @@ static int sm2_set_pub_key(struct crypto_akcipher *tfm,
MPI a;
int rc;
- ec->Q = mpi_point_new(0);
- if (!ec->Q)
- return -ENOMEM;
-
/* include the uncompressed flag '0x04' */
- rc = -ENOMEM;
a = mpi_read_raw_data(key, keylen);
if (!a)
- goto error;
+ return -ENOMEM;
mpi_normalize(a);
rc = sm2_ecc_os2ec(ec->Q, a);
mpi_free(a);
- if (rc)
- goto error;
-
- return 0;
-error:
- mpi_point_release(ec->Q);
- ec->Q = NULL;
return rc;
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 6b7c158dc508..f8d06da78e4f 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1847,10 +1847,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("cts(cbc(aes))");
break;
+ case 39:
+ ret += tcrypt_test("xxhash64");
+ break;
+
case 40:
ret += tcrypt_test("rmd160");
break;
+ case 41:
+ ret += tcrypt_test("blake2s-256");
+ break;
+
+ case 42:
+ ret += tcrypt_test("blake2b-512");
+ break;
+
case 43:
ret += tcrypt_test("ecb(seed)");
break;
@@ -2356,10 +2368,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
+ case 314:
+ test_hash_speed("xxhash64", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+ fallthrough;
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
+ case 316:
+ test_hash_speed("blake2s-256", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+ fallthrough;
+ case 317:
+ test_hash_speed("blake2b-512", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+ fallthrough;
case 318:
klen = 16;
test_hash_speed("ghash", sec, generic_hash_speed_template);
@@ -2456,10 +2480,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
+ case 414:
+ test_ahash_speed("xxhash64", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
+ case 416:
+ test_ahash_speed("blake2s-256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 417:
+ test_ahash_speed("blake2b-512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
case 418:
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 10c5b3b01ec4..1f7f63e836ae 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -4899,15 +4899,12 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
#endif
-#ifndef CONFIG_CRYPTO_FIPS
.alg = "ecdh-nist-p192",
.test = alg_test_kpp,
- .fips_allowed = 1,
.suite = {
.kpp = __VECS(ecdh_p192_tv_template)
}
}, {
-#endif
.alg = "ecdh-nist-p256",
.test = alg_test_kpp,
.fips_allowed = 1,
@@ -4915,6 +4912,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.kpp = __VECS(ecdh_p256_tv_template)
}
}, {
+ .alg = "ecdh-nist-p384",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = __VECS(ecdh_p384_tv_template)
+ }
+ }, {
.alg = "ecdsa-nist-p192",
.test = alg_test_akcipher,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 34e4a3db3991..96eb7ce9f81b 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -2685,7 +2685,6 @@ static const struct kpp_testvec curve25519_tv_template[] = {
}
};
-#ifndef CONFIG_CRYPTO_FIPS
static const struct kpp_testvec ecdh_p192_tv_template[] = {
{
.secret =
@@ -2719,13 +2718,12 @@ static const struct kpp_testvec ecdh_p192_tv_template[] = {
"\xf4\x57\xcc\x4f\x1f\x4e\x31\xcc"
"\xe3\x40\x60\xc8\x06\x93\xc6\x2e"
"\x99\x80\x81\x28\xaf\xc5\x51\x74",
- .secret_size = 32,
+ .secret_size = 30,
.b_public_size = 48,
.expected_a_public_size = 48,
.expected_ss_size = 24
}
};
-#endif
static const struct kpp_testvec ecdh_p256_tv_template[] = {
{
@@ -2766,7 +2764,7 @@ static const struct kpp_testvec ecdh_p256_tv_template[] = {
"\x9f\x4a\x38\xcc\xc0\x2c\x49\x2f"
"\xb1\x32\xbb\xaf\x22\x61\xda\xcb"
"\x6f\xdb\xa9\xaa\xfc\x77\x81\xf3",
- .secret_size = 40,
+ .secret_size = 38,
.b_public_size = 64,
.expected_a_public_size = 64,
.expected_ss_size = 32
@@ -2804,8 +2802,8 @@ static const struct kpp_testvec ecdh_p256_tv_template[] = {
"\x37\x08\xcc\x40\x5e\x7a\xfd\x6a"
"\x6a\x02\x6e\x41\x87\x68\x38\x77"
"\xfa\xa9\x44\x43\x2d\xef\x09\xdf",
- .secret_size = 8,
- .b_secret_size = 40,
+ .secret_size = 6,
+ .b_secret_size = 38,
.b_public_size = 64,
.expected_a_public_size = 64,
.expected_ss_size = 32,
@@ -2814,6 +2812,67 @@ static const struct kpp_testvec ecdh_p256_tv_template[] = {
};
/*
+ * NIST P384 test vectors from RFC5903
+ */
+static const struct kpp_testvec ecdh_p384_tv_template[] = {
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x02\x00" /* type */
+ "\x36\x00" /* len */
+ "\x30\x00" /* key_size */
+#else
+ "\x00\x02" /* type */
+ "\x00\x36" /* len */
+ "\x00\x30" /* key_size */
+#endif
+ "\x09\x9F\x3C\x70\x34\xD4\xA2\xC6"
+ "\x99\x88\x4D\x73\xA3\x75\xA6\x7F"
+ "\x76\x24\xEF\x7C\x6B\x3C\x0F\x16"
+ "\x06\x47\xB6\x74\x14\xDC\xE6\x55"
+ "\xE3\x5B\x53\x80\x41\xE6\x49\xEE"
+ "\x3F\xAE\xF8\x96\x78\x3A\xB1\x94",
+ .b_public =
+ "\xE5\x58\xDB\xEF\x53\xEE\xCD\xE3"
+ "\xD3\xFC\xCF\xC1\xAE\xA0\x8A\x89"
+ "\xA9\x87\x47\x5D\x12\xFD\x95\x0D"
+ "\x83\xCF\xA4\x17\x32\xBC\x50\x9D"
+ "\x0D\x1A\xC4\x3A\x03\x36\xDE\xF9"
+ "\x6F\xDA\x41\xD0\x77\x4A\x35\x71"
+ "\xDC\xFB\xEC\x7A\xAC\xF3\x19\x64"
+ "\x72\x16\x9E\x83\x84\x30\x36\x7F"
+ "\x66\xEE\xBE\x3C\x6E\x70\xC4\x16"
+ "\xDD\x5F\x0C\x68\x75\x9D\xD1\xFF"
+ "\xF8\x3F\xA4\x01\x42\x20\x9D\xFF"
+ "\x5E\xAA\xD9\x6D\xB9\xE6\x38\x6C",
+ .expected_a_public =
+ "\x66\x78\x42\xD7\xD1\x80\xAC\x2C"
+ "\xDE\x6F\x74\xF3\x75\x51\xF5\x57"
+ "\x55\xC7\x64\x5C\x20\xEF\x73\xE3"
+ "\x16\x34\xFE\x72\xB4\xC5\x5E\xE6"
+ "\xDE\x3A\xC8\x08\xAC\xB4\xBD\xB4"
+ "\xC8\x87\x32\xAE\xE9\x5F\x41\xAA"
+ "\x94\x82\xED\x1F\xC0\xEE\xB9\xCA"
+ "\xFC\x49\x84\x62\x5C\xCF\xC2\x3F"
+ "\x65\x03\x21\x49\xE0\xE1\x44\xAD"
+ "\xA0\x24\x18\x15\x35\xA0\xF3\x8E"
+ "\xEB\x9F\xCF\xF3\xC2\xC9\x47\xDA"
+ "\xE6\x9B\x4C\x63\x45\x73\xA8\x1C",
+ .expected_ss =
+ "\x11\x18\x73\x31\xC2\x79\x96\x2D"
+ "\x93\xD6\x04\x24\x3F\xD5\x92\xCB"
+ "\x9D\x0A\x92\x6F\x42\x2E\x47\x18"
+ "\x75\x21\x28\x7E\x71\x56\xC5\xC4"
+ "\xD6\x03\x13\x55\x69\xB9\xE9\xD0"
+ "\x9C\xF5\xD4\xA2\x70\xF5\x97\x46",
+ .secret_size = 54,
+ .b_public_size = 96,
+ .expected_a_public_size = 96,
+ .expected_ss_size = 48
+ }
+};
+
+/*
* MD4 test vectors from RFC1320
*/
static const struct hash_testvec md4_tv_template[] = {
diff --git a/crypto/wp512.c b/crypto/wp512.c
index feadc13ccae0..bf79fbb2340f 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1066,33 +1066,31 @@ static int wp512_final(struct shash_desc *desc, u8 *out)
{
struct wp512_ctx *wctx = shash_desc_ctx(desc);
int i;
- u8 *buffer = wctx->buffer;
- u8 *bitLength = wctx->bitLength;
- int bufferBits = wctx->bufferBits;
- int bufferPos = wctx->bufferPos;
+ u8 *buffer = wctx->buffer;
+ u8 *bitLength = wctx->bitLength;
+ int bufferBits = wctx->bufferBits;
+ int bufferPos = wctx->bufferPos;
__be64 *digest = (__be64 *)out;
- buffer[bufferPos] |= 0x80U >> (bufferBits & 7);
- bufferPos++;
- if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) {
- if (bufferPos < WP512_BLOCK_SIZE) {
- memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos);
- }
- wp512_process_buffer(wctx);
- bufferPos = 0;
- }
- if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES) {
- memset(&buffer[bufferPos], 0,
+ buffer[bufferPos] |= 0x80U >> (bufferBits & 7);
+ bufferPos++;
+ if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) {
+ if (bufferPos < WP512_BLOCK_SIZE)
+ memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos);
+ wp512_process_buffer(wctx);
+ bufferPos = 0;
+ }
+ if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES)
+ memset(&buffer[bufferPos], 0,
(WP512_BLOCK_SIZE - WP512_LENGTHBYTES) - bufferPos);
- }
- bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES;
- memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES],
+ bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES;
+ memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES],
bitLength, WP512_LENGTHBYTES);
- wp512_process_buffer(wctx);
+ wp512_process_buffer(wctx);
for (i = 0; i < WP512_DIGEST_SIZE/8; i++)
digest[i] = cpu_to_be64(wctx->hash[i]);
- wctx->bufferBits = bufferBits;
- wctx->bufferPos = bufferPos;
+ wctx->bufferBits = bufferBits;
+ wctx->bufferPos = bufferPos;
return 0;
}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 47980c6b1945..8bad63417a50 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -33,8 +33,6 @@ source "drivers/nvme/Kconfig"
source "drivers/misc/Kconfig"
-source "drivers/ide/Kconfig"
-
source "drivers/scsi/Kconfig"
source "drivers/ata/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 5a6d613e868d..27c018bdf4de 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -78,7 +78,6 @@ obj-$(CONFIG_CXL_BUS) += cxl/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
-obj-$(CONFIG_IDE) += ide/
obj-y += scsi/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
@@ -161,7 +160,7 @@ obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
obj-$(CONFIG_VIRT_DRIVERS) += virt/
-obj-$(CONFIG_HYPERV) += hv/
+obj-$(subst m,y,$(CONFIG_HYPERV)) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_EXTCON) += extcon/
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index 9861302cc7db..359bead4b280 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -246,6 +246,7 @@ static int keyboard_notifier_call(struct notifier_block *blk,
beep(440);
}
}
+ break;
case KBD_UNBOUND_KEYCODE:
case KBD_UNICODE:
case KBD_KEYSYM:
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index eedec61e3476..3972de7b7565 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -543,3 +543,8 @@ config X86_PM_TIMER
You should nearly always say Y here because many modern
systems require this timer.
+
+config ACPI_PRMT
+ bool "Platform Runtime Mechanism Support"
+ depends on EFI && X86_64
+ default y
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 700b41adf2db..ceb1aed4b1fc 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -8,6 +8,11 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
#
# ACPI Boot-Time Table Parsing
#
+ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
+tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
+
+endif
+
obj-$(CONFIG_ACPI) += tables.o
obj-$(CONFIG_X86) += blacklist.o
@@ -61,6 +66,7 @@ acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
+acpi-$(CONFIG_ACPI_PRMT) += prmt.o
# Address translation
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 0ec5b3f69112..6e02448d15d9 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -226,6 +226,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "AMDI0010", APD_ADDR(wt_i2c_desc) },
{ "AMD0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) },
+ { "AMDI0022", APD_ADDR(cz_uart_desc) },
{ "AMD0030", },
{ "AMD0040", APD_ADDR(fch_misc_desc)},
{ "HYGO0010", APD_ADDR(wt_i2c_desc) },
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
index 67f1d33d15c4..4cf4aef7ce0c 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -6,6 +6,8 @@
* Authors: Lan Tianyu <tianyu.lan@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -59,7 +61,7 @@ static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev,
&acpi_cmos_rtc_space_handler,
NULL, NULL);
if (ACPI_FAILURE(status)) {
- pr_err(PREFIX "Error installing CMOS-RTC region handler\n");
+ pr_err("Error installing CMOS-RTC region handler\n");
return -ENODEV;
}
@@ -70,7 +72,7 @@ static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev)
{
if (ACPI_FAILURE(acpi_remove_address_space_handler(adev->handle,
ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler)))
- pr_err(PREFIX "Error removing CMOS-RTC region handler\n");
+ pr_err("Error removing CMOS-RTC region handler\n");
}
static struct acpi_scan_handler cmos_rtc_handler = {
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 3a14859dbb75..76b83b181356 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -13,9 +13,6 @@
#include <linux/acpi.h>
#include <linux/security.h>
-#include "acpica/accommon.h"
-#include "acpica/actables.h"
-
static struct config_group *acpi_table_group;
struct acpi_table {
@@ -226,7 +223,7 @@ static void acpi_table_drop_item(struct config_group *group,
{
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
- ACPI_INFO(("Host-directed Dynamic ACPI Table Unload"));
+ pr_debug("Host-directed Dynamic ACPI Table Unload\n");
acpi_unload_table(table->index);
config_item_put(cfg);
}
diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
index a89a806a7a2a..4ee2ad234e3d 100644
--- a/drivers/acpi/acpi_fpdt.c
+++ b/drivers/acpi/acpi_fpdt.c
@@ -240,8 +240,10 @@ static int __init acpi_init_fpdt(void)
return 0;
fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
- if (!fpdt_kobj)
+ if (!fpdt_kobj) {
+ acpi_put_table(header);
return -ENOMEM;
+ }
while (offset < header->length) {
subtable = (void *)header + offset;
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index bbd00d96b7a8..a5fe2926bf50 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -597,9 +597,14 @@ static int __init acpi_ipmi_init(void)
pr_warn("Can't register IPMI opregion space handle\n");
return -EINVAL;
}
+
result = ipmi_smi_watcher_register(&driver_data.bmc_events);
- if (result)
+ if (result) {
+ acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
+ ACPI_ADR_SPACE_IPMI,
+ &acpi_ipmi_space_handler);
pr_err("Can't register IPMI system interface watcher\n");
+ }
return result;
}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index ca742f16a507..894b7e6ae144 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -186,13 +186,12 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
long uid = 0;
/* Expected to always be true, but better safe then sorry */
- if (uid_str)
- uid = simple_strtol(uid_str, NULL, 10);
-
- /* Detect I2C bus shared with PUNIT and ignore its d3 status */
- status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
- if (ACPI_SUCCESS(status) && shared_host && uid)
- pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
+ if (uid_str && !kstrtol(uid_str, 10, &uid) && uid) {
+ /* Detect I2C bus shared with PUNIT and ignore its d3 status */
+ status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
+ if (ACPI_SUCCESS(status) && shared_host)
+ pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
+ }
lpss_deassert_reset(pdata);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0c884020f74b..ffb4afc5aad9 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1619,8 +1619,6 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
input_report_key(input, keycode, 0);
input_sync(input);
}
-
- return;
}
static void brightness_switch_event(struct acpi_video_device *video_device,
@@ -1690,8 +1688,6 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
input_report_key(input, keycode, 0);
input_sync(input);
}
-
- return;
}
static int acpi_video_resume(struct notifier_block *nb,
@@ -2308,8 +2304,6 @@ static void __exit acpi_video_exit(void)
{
acpi_video_detect_exit();
acpi_video_unregister();
-
- return;
}
module_init(acpi_video_init);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index bccae0d3db75..59d6ded01614 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -737,6 +737,8 @@ const char *acpi_ah_match_uuid(u8 *data);
*/
#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_HELP_APP)
void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer);
+
+acpi_status acpi_ut_convert_uuid_to_string(char *uuid_buffer, char *out_string);
#endif
#endif /* _ACUTILS_H */
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 32f03ee81785..06f3c9df1e22 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -139,7 +139,9 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GSBUS
|| obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_IPMI)) {
+ ACPI_ADR_SPACE_IPMI
+ || obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_PLATFORM_RT)) {
/* SMBus, GSBus, IPMI serial */
@@ -301,7 +303,9 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GSBUS
|| obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_IPMI)) {
+ ACPI_ADR_SPACE_IPMI
+ || obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_PLATFORM_RT)) {
/* SMBus, GSBus, IPMI serial */
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 8e8d95f7947b..10d68a5f76a3 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -195,6 +195,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
function = ACPI_READ | (accessor_type << 16);
break;
+ case ACPI_ADR_SPACE_PLATFORM_RT:
+
+ buffer_length = ACPI_PRM_INPUT_BUFFER_SIZE;
+ function = ACPI_READ;
+ break;
+
default:
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
@@ -311,6 +317,12 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
function = ACPI_WRITE | (accessor_type << 16);
break;
+ case ACPI_ADR_SPACE_PLATFORM_RT:
+
+ buffer_length = ACPI_PRM_INPUT_BUFFER_SIZE;
+ function = ACPI_WRITE;
+ break;
+
default:
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 14b71b41e845..38e10ab976e6 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -379,6 +379,13 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
(*element_ptr)->common.reference_count =
original_ref_count;
+
+ /*
+ * The original_element holds a reference from the package object
+ * that represents _HID. Since a new element was created by _HID,
+ * remove the reference from the _CID package.
+ */
+ acpi_ut_remove_reference(original_element);
}
element_ptr++;
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 624a26794d55..e5ba9795ec69 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
}
break;
+ case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "***** Address handler %p\n", object));
+
+ acpi_os_delete_mutex(object->address_space.context_mutex);
+ break;
+
default:
break;
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index e37d612e8db5..05426596d1f4 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -475,7 +475,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
case 'X':
type |= ACPI_FORMAT_UPPER;
- /* FALLTHROUGH */
+ ACPI_FALLTHROUGH;
case 'x':
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 090e44b6b6c7..dca9061518ab 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -61,4 +61,45 @@ void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer)
1]);
}
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_convert_uuid_to_string
+ *
+ * PARAMETERS: uuid_buffer - 16-byte UUID buffer
+ * out_string - 36-byte formatted UUID string
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert 16-byte UUID buffer to 36-byte formatted UUID string
+ * out_string must be 37 bytes to include null terminator.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_convert_uuid_to_string(char *uuid_buffer, char *out_string)
+{
+ u32 i;
+
+ if (!uuid_buffer || !out_string) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ for (i = 0; i < UUID_BUFFER_LENGTH; i++) {
+ out_string[acpi_gbl_map_to_uuid_offset[i]] =
+ acpi_ut_hex_to_ascii_char(uuid_buffer[i], 4);
+
+ out_string[acpi_gbl_map_to_uuid_offset[i] + 1] =
+ acpi_ut_hex_to_ascii_char(uuid_buffer[i], 0);
+ }
+
+ /* Insert required hyphens (dashes) */
+
+ out_string[UUID_HYPHEN1_OFFSET] =
+ out_string[UUID_HYPHEN2_OFFSET] =
+ out_string[UUID_HYPHEN3_OFFSET] =
+ out_string[UUID_HYPHEN4_OFFSET] = '-';
+
+ out_string[UUID_STRING_LENGTH] = 0; /* Null terminate */
+ return (AE_OK);
+}
#endif
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 328e8aeece6c..2882450c443e 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -673,7 +673,7 @@ static int __init einj_init(void)
struct apei_exec_context ctx;
if (acpi_disabled) {
- pr_warn("ACPI disabled.\n");
+ pr_info("ACPI disabled.\n");
return -ENODEV;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index fce7ade2aba9..0c8330ed1ffd 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -441,28 +441,35 @@ static void ghes_kick_task_work(struct callback_head *head)
gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
}
-static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
- int sev)
+static bool ghes_do_memory_failure(u64 physical_addr, int flags)
{
unsigned long pfn;
- int flags = -1;
- int sec_sev = ghes_severity(gdata->error_severity);
- struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
return false;
- if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
- return false;
-
- pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ pfn = PHYS_PFN(physical_addr);
if (!pfn_valid(pfn)) {
pr_warn_ratelimited(FW_WARN GHES_PFX
"Invalid address in generic error data: %#llx\n",
- mem_err->physical_addr);
+ physical_addr);
return false;
}
+ memory_failure_queue(pfn, flags);
+ return true;
+}
+
+static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+ int sev)
+{
+ int flags = -1;
+ int sec_sev = ghes_severity(gdata->error_severity);
+ struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
+
+ if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
+ return false;
+
/* iff following two events can be handled properly by now */
if (sec_sev == GHES_SEV_CORRECTED &&
(gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
@@ -470,14 +477,56 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
flags = 0;
- if (flags != -1) {
- memory_failure_queue(pfn, flags);
- return true;
- }
+ if (flags != -1)
+ return ghes_do_memory_failure(mem_err->physical_addr, flags);
return false;
}
+static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
+{
+ struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
+ bool queued = false;
+ int sec_sev, i;
+ char *p;
+
+ log_arm_hw_error(err);
+
+ sec_sev = ghes_severity(gdata->error_severity);
+ if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
+ return false;
+
+ p = (char *)(err + 1);
+ for (i = 0; i < err->err_info_num; i++) {
+ struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
+ bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
+ bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
+ const char *error_type = "unknown error";
+
+ /*
+ * The field (err_info->error_info & BIT(26)) is fixed to set to
+ * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
+ * firmware won't mix corrected errors in an uncorrected section,
+ * and don't filter out 'corrected' error here.
+ */
+ if (is_cache && has_pa) {
+ queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
+ p += err_info->length;
+ continue;
+ }
+
+ if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
+ error_type = cper_proc_error_type_strs[err_info->type];
+
+ pr_warn_ratelimited(FW_WARN GHES_PFX
+ "Unhandled processor error type: %s\n",
+ error_type);
+ p += err_info->length;
+ }
+
+ return queued;
+}
+
/*
* PCIe AER errors need to be sent to the AER driver for reporting and
* recovery. The GHES severities map to the following AER severities and
@@ -605,9 +654,7 @@ static bool ghes_do_proc(struct ghes *ghes,
ghes_handle_aer(gdata);
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
- struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
-
- log_arm_hw_error(err);
+ queued = ghes_handle_arm_hw_error(gdata, sev);
} else {
void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 3912a1f6058e..e34937e11186 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -976,7 +976,7 @@ static void iort_named_component_init(struct device *dev,
FIELD_GET(ACPI_IORT_NC_PASID_BITS,
nc->node_flags));
- if (device_add_properties(dev, props))
+ if (device_create_managed_software_node(dev, props, NULL))
dev_warn(dev, "Could not add device properties\n");
}
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 19bb7f870204..02d208732f9a 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -15,40 +15,19 @@
static void *bgrt_image;
static struct kobject *bgrt_kobj;
-static ssize_t version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
-}
-static DEVICE_ATTR_RO(version);
-
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
-}
-static DEVICE_ATTR_RO(status);
-
-static ssize_t type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
-}
-static DEVICE_ATTR_RO(type);
-
-static ssize_t xoffset_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
-}
-static DEVICE_ATTR_RO(xoffset);
-
-static ssize_t yoffset_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
-}
-static DEVICE_ATTR_RO(yoffset);
+#define BGRT_SHOW(_name, _member) \
+ static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return sysfs_emit(buf, "%d\n", bgrt_tab._member); \
+ } \
+ struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name)
+
+BGRT_SHOW(version, version);
+BGRT_SHOW(status, status);
+BGRT_SHOW(type, image_type);
+BGRT_SHOW(xoffset, image_offset_x);
+BGRT_SHOW(yoffset, image_offset_y);
static ssize_t image_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
@@ -60,11 +39,11 @@ static ssize_t image_read(struct file *file, struct kobject *kobj,
static BIN_ATTR_RO(image, 0); /* size gets filled in later */
static struct attribute *bgrt_attributes[] = {
- &dev_attr_version.attr,
- &dev_attr_status.attr,
- &dev_attr_type.attr,
- &dev_attr_xoffset.attr,
- &dev_attr_yoffset.attr,
+ &bgrt_attr_version.attr,
+ &bgrt_attr_status.attr,
+ &bgrt_attr_type.attr,
+ &bgrt_attr_xoffset.attr,
+ &bgrt_attr_yoffset.attr,
NULL,
};
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index a86a770c9b79..a558d24fb788 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -10,6 +10,8 @@
* Copyright (C) 2002 Andy Grover <andrew.grover@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/acpi.h>
@@ -49,12 +51,12 @@ int __init acpi_blacklisted(void)
i = acpi_match_platform_list(acpi_blacklist);
if (i >= 0) {
- pr_err(PREFIX "Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n",
+ pr_err("Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n",
acpi_blacklist[i].oem_id,
acpi_blacklist[i].oem_table_id,
acpi_blacklist[i].oem_revision);
- pr_err(PREFIX "Reason: %s. This is a %s error\n",
+ pr_err("Reason: %s. This is a %s error\n",
acpi_blacklist[i].reason,
(acpi_blacklist[i].data ?
"non-recoverable" : "recoverable"));
@@ -73,8 +75,7 @@ int __init acpi_blacklisted(void)
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
{
- printk(KERN_NOTICE PREFIX "DMI detected: %s (force ACPI _REV to 5)\n",
- d->ident);
+ pr_notice("DMI detected: %s (force ACPI _REV to 5)\n", d->ident);
acpi_rev_override_setup(NULL);
return 0;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index be7da23fad76..60fb6a843853 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -30,6 +30,7 @@
#include <linux/pci.h>
#include <acpi/apei.h>
#include <linux/suspend.h>
+#include <linux/prmt.h>
#include "internal.h"
@@ -262,8 +263,6 @@ out_success:
out_kfree:
kfree(output.pointer);
- if (status != AE_OK)
- context->ret.pointer = NULL;
return status;
}
EXPORT_SYMBOL(acpi_run_osc);
@@ -304,6 +303,7 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
#ifdef CONFIG_ARM64
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
@@ -330,32 +330,21 @@ static void acpi_bus_osc_negotiate_platform_control(void)
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
- capbuf_ret = context.ret.pointer;
- if (context.ret.length <= OSC_SUPPORT_DWORD) {
- kfree(context.ret.pointer);
- return;
- }
+ kfree(context.ret.pointer);
- /*
- * Now run _OSC again with query flag clear and with the caps
- * supported by both the OS and the platform.
- */
+ /* Now run _OSC again with query flag clear */
capbuf[OSC_QUERY_DWORD] = 0;
- capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
- kfree(context.ret.pointer);
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_DWORD) {
- osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
- osc_pc_lpi_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
- osc_sb_native_usb4_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
- }
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ osc_sb_native_usb4_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
kfree(context.ret.pointer);
}
@@ -370,7 +359,7 @@ EXPORT_SYMBOL_GPL(osc_sb_native_usb4_control);
static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
{
- printk(KERN_INFO PREFIX "%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
+ pr_info("%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
(bits & OSC_USB_USB3_TUNNELING) ? '+' : '-',
(bits & OSC_USB_DP_TUNNELING) ? '+' : '-',
(bits & OSC_USB_PCIE_TUNNELING) ? '+' : '-',
@@ -409,7 +398,7 @@ static void acpi_bus_osc_negotiate_usb_control(void)
return;
if (context.ret.length != sizeof(capbuf)) {
- printk(KERN_INFO PREFIX "USB4 _OSC: returned invalid length buffer\n");
+ pr_info("USB4 _OSC: returned invalid length buffer\n");
goto out_free;
}
@@ -1206,7 +1195,8 @@ void __init acpi_subsystem_init(void)
static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context)
{
- acpi_scan_table_handler(event, table, context);
+ if (event == ACPI_TABLE_EVENT_LOAD)
+ acpi_scan_table_notify();
return acpi_sysfs_table_handler(event, table, context);
}
@@ -1325,13 +1315,13 @@ static int __init acpi_init(void)
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
- if (!acpi_kobj) {
+ if (!acpi_kobj)
pr_debug("%s: kset create error\n", __func__);
- acpi_kobj = NULL;
- }
+ init_prmt();
result = acpi_bus_init();
if (result) {
+ kobject_put(acpi_kobj);
disable_acpi();
return result;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index d260bc1f3e6e..0028b6b51c87 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
+#include "fan.h"
#include "internal.h"
/**
@@ -1133,20 +1134,49 @@ static int acpi_subsys_resume_noirq(struct device *dev)
*
* Use ACPI to put the given device into the full-power state and carry out the
* generic early resume procedure for it during system transition into the
- * working state.
+ * working state, but only do that if device either defines early resume
+ * handler, or does not define power operations at all. Otherwise powering up
+ * of the device is postponed to the normal resume phase.
*/
static int acpi_subsys_resume_early(struct device *dev)
{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
if (dev_pm_skip_resume(dev))
return 0;
+ if (pm && !pm->resume_early) {
+ dev_dbg(dev, "postponing D0 transition to normal resume stage\n");
+ return 0;
+ }
+
ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
/**
+ * acpi_subsys_resume - Resume device using ACPI.
+ * @dev: Device to Resume.
+ *
+ * Use ACPI to put the given device into the full-power state if it has not been
+ * powered up during early resume phase, and carry out the generic resume
+ * procedure for it during system transition into the working state.
+ */
+static int acpi_subsys_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret = 0;
+
+ if (!dev_pm_skip_resume(dev) && pm && !pm->resume_early) {
+ dev_dbg(dev, "executing postponed D0 transition\n");
+ ret = acpi_dev_resume(dev);
+ }
+
+ return ret ? ret : pm_generic_resume(dev);
+}
+
+/**
* acpi_subsys_freeze - Run the device driver's freeze callback.
* @dev: Device to handle.
*/
@@ -1239,6 +1269,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.prepare = acpi_subsys_prepare,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
+ .resume = acpi_subsys_resume,
.suspend_late = acpi_subsys_suspend_late,
.suspend_noirq = acpi_subsys_suspend_noirq,
.resume_noirq = acpi_subsys_resume_noirq,
@@ -1310,10 +1341,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
* with the generic ACPI PM domain.
*/
static const struct acpi_device_id special_pm_ids[] = {
- {"PNP0C0B", }, /* Generic ACPI fan */
- {"INT3404", }, /* Fan */
- {"INTC1044", }, /* Fan for Tiger Lake generation */
- {"INTC1048", }, /* Fan for Alder Lake generation */
+ ACPI_FAN_DEVICE_IDS,
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);
@@ -1340,4 +1368,36 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
+
+/**
+ * acpi_storage_d3 - Check if D3 should be used in the suspend path
+ * @dev: Device to check
+ *
+ * Return %true if the platform firmware wants @dev to be programmed
+ * into D3hot or D3cold (if supported) in the suspend path, or %false
+ * when there is no specific preference. On some platforms, if this
+ * hint is ignored, @dev may remain unresponsive after suspending the
+ * platform as a whole.
+ *
+ * Although the property has storage in the name it actually is
+ * applied to the PCIe slot and plugging in a non-storage device the
+ * same platform restrictions will likely apply.
+ */
+bool acpi_storage_d3(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ u8 val;
+
+ if (force_storage_d3())
+ return true;
+
+ if (!adev)
+ return false;
+ if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+ &val))
+ return false;
+ return val == 1;
+}
+EXPORT_SYMBOL_GPL(acpi_storage_d3);
+
#endif /* CONFIG_PM */
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index fa2c1c93072c..61271e61c307 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -268,6 +268,8 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
/**
* acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
+ * @dev: Struct device to get ACPI device node.
+ * @env: Environment variables of the kobject uevent.
*
* Create the uevent modalias field for ACPI-enumerated devices.
*
@@ -313,6 +315,9 @@ static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
/**
* acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
+ * @dev: Struct device to get ACPI device node.
+ * @buf: The buffer to save pnp_modalias and of_modalias.
+ * @size: Size of buffer.
*
* Create the modalias sysfs attribute for ACPI-enumerated devices.
*
@@ -448,7 +453,7 @@ static ssize_t description_show(struct device *dev,
(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
acpi_dev->pnp.str_obj->buffer.length,
UTF16_LITTLE_ENDIAN, buf,
- PAGE_SIZE);
+ PAGE_SIZE - 1);
buf[result++] = '\n';
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index d14025a85ce8..da5d5f0be2f2 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -24,6 +24,7 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INT3409"},
{"INT340A"},
{"INT340B"},
+ {"INT3532"},
{"INTC1040"},
{"INTC1041"},
{"INTC1043"},
@@ -33,6 +34,7 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC1047"},
{"INTC1048"},
{"INTC1049"},
+ {"INTC1050"},
{"INTC1060"},
{"INTC1061"},
{""},
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 13565629ce0a..e629e891d1bb 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -183,6 +183,7 @@ static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
+static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
@@ -1593,7 +1594,8 @@ static int acpi_ec_add(struct acpi_device *device)
}
if (boot_ec && ec->command_addr == boot_ec->command_addr &&
- ec->data_addr == boot_ec->data_addr) {
+ ec->data_addr == boot_ec->data_addr &&
+ !EC_FLAGS_TRUST_DSDT_GPE) {
/*
* Trust PNP0C09 namespace location rather than
* ECDT ID. But trust ECDT GPE rather than _GPE
@@ -1627,7 +1629,7 @@ static int acpi_ec_add(struct acpi_device *device)
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
/* Reprobe devices depending on the EC */
- acpi_walk_dep_device_list(ec->handle);
+ acpi_dev_clear_dependencies(device);
acpi_handle_debug(ec->handle, "enumerated.\n");
return 0;
@@ -1817,6 +1819,18 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
}
/*
+ * Some ECDTs contain wrong GPE setting, but they share the same port addresses
+ * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=209989
+ */
+static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing DSDT GPE setting.\n");
+ EC_FLAGS_TRUST_DSDT_GPE = 1;
+ return 0;
+}
+
+/*
* Some DSDTs contain wrong GPE setting.
* Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
* https://bugzilla.kernel.org/show_bug.cgi?id=195651
@@ -1846,6 +1860,22 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
{
+ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
+ {
+ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
+ {
+ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
+ {
+ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
+ {
ec_honor_ecdt_gpe, "ASUS X550VXK", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
@@ -1854,6 +1884,11 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
{
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
+ ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL},
+ {
ec_clear_on_resume, "Samsung hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 92e59f45329b..d199a19bb292 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -7,6 +7,8 @@
*
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/proc_fs.h>
@@ -165,7 +167,7 @@ static int acpi_event_genetlink_init(void)
static int __init acpi_event_init(void)
{
- int error = 0;
+ int error;
if (acpi_disabled)
return 0;
@@ -173,8 +175,8 @@ static int __init acpi_event_init(void)
/* create genetlink for acpi event */
error = acpi_event_genetlink_init();
if (error)
- printk(KERN_WARNING PREFIX
- "Failed to create genetlink family for ACPI event\n");
+ pr_warn("Failed to create genetlink family for ACPI event\n");
+
return 0;
}
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 66c3983f0ccc..5cd0ceb50bc8 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include <linux/sort.h>
+#include "fan.h"
+
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Fan Driver");
MODULE_LICENSE("GPL");
@@ -24,10 +26,7 @@ static int acpi_fan_probe(struct platform_device *pdev);
static int acpi_fan_remove(struct platform_device *pdev);
static const struct acpi_device_id fan_device_ids[] = {
- {"PNP0C0B", 0},
- {"INT3404", 0},
- {"INTC1044", 0},
- {"INTC1048", 0},
+ ACPI_FAN_DEVICE_IDS,
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
new file mode 100644
index 000000000000..dc9a6efa514b
--- /dev/null
+++ b/drivers/acpi/fan.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * ACPI fan device IDs are shared between the fan driver and the device power
+ * management code.
+ *
+ * Add new device IDs before the generic ACPI fan one.
+ */
+#define ACPI_FAN_DEVICE_IDS \
+ {"INT3404", }, /* Fan */ \
+ {"INTC1044", }, /* Fan for Tiger Lake generation */ \
+ {"INTC1048", }, /* Fan for Alder Lake generation */ \
+ {"PNP0C0B", } /* Generic ACPI fan */
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 0715e3be99a0..fce3f3bba714 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -6,6 +6,8 @@
* Copyright (c) 2005 Intel Corp.
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/acpi_iort.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -19,17 +21,6 @@
#include "internal.h"
-#define ACPI_GLUE_DEBUG 0
-#if ACPI_GLUE_DEBUG
-#define DBG(fmt, ...) \
- printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__)
-#else
-#define DBG(fmt, ...) \
-do { \
- if (0) \
- printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__); \
-} while (0)
-#endif
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
@@ -44,7 +35,7 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
- printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
+ pr_info("bus type %s registered\n", type->name);
return 0;
}
return -ENODEV;
@@ -59,8 +50,7 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
down_write(&bus_type_sem);
list_del_init(&type->list);
up_write(&bus_type_sem);
- printk(KERN_INFO PREFIX "bus type %s unregistered\n",
- type->name);
+ pr_info("bus type %s unregistered\n", type->name);
return 0;
}
return -ENODEV;
@@ -307,7 +297,7 @@ static int acpi_device_notify(struct device *dev)
adev = type->find_companion(dev);
if (!adev) {
- DBG("Unable to get handle for %s\n", dev_name(dev));
+ pr_debug("Unable to get handle for %s\n", dev_name(dev));
ret = -ENODEV;
goto out;
}
@@ -328,16 +318,15 @@ static int acpi_device_notify(struct device *dev)
adev->handler->bind(dev);
out:
-#if ACPI_GLUE_DEBUG
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
- DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
+ pr_debug("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
- } else
- DBG("Device %s -> No ACPI support\n", dev_name(dev));
-#endif
+ } else {
+ pr_debug("Device %s -> No ACPI support\n", dev_name(dev));
+ }
return ret;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index f973bbe90e5e..d91b560e8867 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -11,8 +11,6 @@
#include <linux/idr.h>
-#define PREFIX "ACPI: "
-
int early_acpi_osi_init(void);
int acpi_osi_init(void);
acpi_status acpi_os_initialize1(void);
@@ -88,7 +86,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src);
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
-void acpi_scan_table_handler(u32 event, void *table, void *context);
+void acpi_scan_table_notify(void);
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
@@ -134,7 +132,7 @@ int acpi_power_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
-int acpi_add_power_resource(acpi_handle handle);
+struct acpi_device *acpi_add_power_resource(acpi_handle handle);
void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
int acpi_device_sleep_wake(struct acpi_device *dev,
@@ -236,6 +234,15 @@ static inline int suspend_nvs_save(void) { return 0; }
static inline void suspend_nvs_restore(void) {}
#endif
+#ifdef CONFIG_X86
+bool force_storage_d3(void);
+#else
+static inline bool force_storage_d3(void)
+{
+ return false;
+}
+#endif
+
/*--------------------------------------------------------------------------
Device properties
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 9f8712a557b3..a2b11069e792 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -5,6 +5,8 @@
* Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*/
+#define pr_fmt(fmt) "ACPI: PM: " fmt
+
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -82,19 +84,19 @@ struct nvs_page {
static LIST_HEAD(nvs_list);
/**
- * suspend_nvs_register - register platform NVS memory region to save
- * @start - physical address of the region
- * @size - size of the region
+ * suspend_nvs_register - register platform NVS memory region to save
+ * @start: Physical address of the region.
+ * @size: Size of the region.
*
- * The NVS region need not be page-aligned (both ends) and we arrange
- * things so that the data from page-aligned addresses in this region will
- * be copied into separate RAM pages.
+ * The NVS region need not be page-aligned (both ends) and we arrange
+ * things so that the data from page-aligned addresses in this region will
+ * be copied into separate RAM pages.
*/
static int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
- pr_info("PM: Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
+ pr_info("Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
start, start + size - 1, size);
while (size > 0) {
@@ -123,7 +125,7 @@ static int suspend_nvs_register(unsigned long start, unsigned long size)
}
/**
- * suspend_nvs_free - free data pages allocated for saving NVS regions
+ * suspend_nvs_free - free data pages allocated for saving NVS regions
*/
void suspend_nvs_free(void)
{
@@ -147,7 +149,7 @@ void suspend_nvs_free(void)
}
/**
- * suspend_nvs_alloc - allocate memory necessary for saving NVS regions
+ * suspend_nvs_alloc - allocate memory necessary for saving NVS regions
*/
int suspend_nvs_alloc(void)
{
@@ -164,13 +166,13 @@ int suspend_nvs_alloc(void)
}
/**
- * suspend_nvs_save - save NVS memory regions
+ * suspend_nvs_save - save NVS memory regions
*/
int suspend_nvs_save(void)
{
struct nvs_page *entry;
- printk(KERN_INFO "PM: Saving platform NVS memory\n");
+ pr_info("Saving platform NVS memory\n");
list_for_each_entry(entry, &nvs_list, node)
if (entry->data) {
@@ -193,16 +195,16 @@ int suspend_nvs_save(void)
}
/**
- * suspend_nvs_restore - restore NVS memory regions
+ * suspend_nvs_restore - restore NVS memory regions
*
- * This function is going to be called with interrupts disabled, so it
- * cannot iounmap the virtual addresses used to access the NVS region.
+ * This function is going to be called with interrupts disabled, so it
+ * cannot iounmap the virtual addresses used to access the NVS region.
*/
void suspend_nvs_restore(void)
{
struct nvs_page *entry;
- printk(KERN_INFO "PM: Restoring platform NVS memory\n");
+ pr_info("Restoring platform NVS memory\n");
list_for_each_entry(entry, &nvs_list, node)
if (entry->data)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 327e1b4eb6b0..45c5c0e45e33 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -212,7 +212,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
return efi.acpi20;
if (efi.acpi != EFI_INVALID_TABLE_ADDR)
return efi.acpi;
- pr_err(PREFIX "System description tables not found\n");
+ pr_err("System description tables not found\n");
} else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
acpi_find_root_pointer(&pa);
}
@@ -430,7 +430,7 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
map = acpi_map_lookup_virt(virt, size);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
- WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
+ WARN(true, "ACPI: %s: bad address %p\n", __func__, virt);
return;
}
acpi_os_drop_map_ref(map);
@@ -1487,12 +1487,7 @@ EXPORT_SYMBOL(acpi_check_resource_conflict);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name)
{
- struct resource res = {
- .start = start,
- .end = start + n - 1,
- .name = name,
- .flags = IORESOURCE_IO,
- };
+ struct resource res = DEFINE_RES_IO_NAMED(start, n, name);
return acpi_check_resource_conflict(&res);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index dcd593766a64..d7deedf3548e 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -6,6 +6,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -574,7 +576,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
goto end;
}
- pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
+ pr_info("%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
diff --git a/drivers/acpi/pmic/Kconfig b/drivers/acpi/pmic/Kconfig
index 56bbcb2ce61b..f84b8f6038dc 100644
--- a/drivers/acpi/pmic/Kconfig
+++ b/drivers/acpi/pmic/Kconfig
@@ -52,7 +52,7 @@ endif # PMIC_OPREGION
config TPS68470_PMIC_OPREGION
bool "ACPI operation region support for TPS68470 PMIC"
- depends on MFD_TPS68470
+ depends on INTEL_SKL_INT3472
help
This config adds ACPI operation region support for TI TPS68470 PMIC.
TPS68470 device is an advanced power management unit that powers
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index a5101b07611a..fef7831d0d63 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -117,7 +117,7 @@ static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev)
return err;
/* Re-enumerate devices depending on PMIC */
- acpi_walk_dep_device_list(ACPI_HANDLE(pdev->dev.parent));
+ acpi_dev_clear_dependencies(ACPI_COMPANION(pdev->dev.parent));
return 0;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 56102eaaa2da..eba7785047ca 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -52,6 +52,7 @@ struct acpi_power_resource {
u32 system_level;
u32 order;
unsigned int ref_count;
+ u8 state;
bool wakeup_enabled;
struct mutex resource_lock;
struct list_head dependents;
@@ -147,6 +148,7 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
for (i = start; i < package->package.count; i++) {
union acpi_object *element = &package->package.elements[i];
+ struct acpi_device *rdev;
acpi_handle rhandle;
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
@@ -163,10 +165,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
if (acpi_power_resource_is_dup(package, start, i))
continue;
- err = acpi_add_power_resource(rhandle);
- if (err)
+ rdev = acpi_add_power_resource(rhandle);
+ if (!rdev) {
+ err = -ENODEV;
break;
-
+ }
err = acpi_power_resources_list_add(rhandle, list);
if (err)
break;
@@ -177,44 +180,54 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
return err;
}
-static int acpi_power_get_state(acpi_handle handle, int *state)
+static int __get_state(acpi_handle handle, u8 *state)
{
acpi_status status = AE_OK;
unsigned long long sta = 0;
-
- if (!handle || !state)
- return -EINVAL;
+ u8 cur_state;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
- *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
- ACPI_POWER_RESOURCE_STATE_OFF;
+ cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON;
acpi_handle_debug(handle, "Power resource is %s\n",
- *state ? "on" : "off");
+ cur_state ? "on" : "off");
+ *state = cur_state;
return 0;
}
-static int acpi_power_get_list_state(struct list_head *list, int *state)
+static int acpi_power_get_state(struct acpi_power_resource *resource, u8 *state)
+{
+ if (resource->state == ACPI_POWER_RESOURCE_STATE_UNKNOWN) {
+ int ret;
+
+ ret = __get_state(resource->device.handle, &resource->state);
+ if (ret)
+ return ret;
+ }
+
+ *state = resource->state;
+ return 0;
+}
+
+static int acpi_power_get_list_state(struct list_head *list, u8 *state)
{
struct acpi_power_resource_entry *entry;
- int cur_state;
+ u8 cur_state = ACPI_POWER_RESOURCE_STATE_OFF;
if (!list || !state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
- cur_state = 0;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
- acpi_handle handle = resource->device.handle;
int result;
mutex_lock(&resource->resource_lock);
- result = acpi_power_get_state(handle, &cur_state);
+ result = acpi_power_get_state(resource, &cur_state);
mutex_unlock(&resource->resource_lock);
if (result)
return result;
@@ -347,8 +360,12 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
acpi_status status = AE_OK;
status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
return -ENODEV;
+ }
+
+ resource->state = ACPI_POWER_RESOURCE_STATE_ON;
pr_debug("Power resource [%s] turned on\n", resource->name);
@@ -400,8 +417,12 @@ static int __acpi_power_off(struct acpi_power_resource *resource)
status = acpi_evaluate_object(resource->device.handle, "_OFF",
NULL, NULL);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
return -ENODEV;
+ }
+
+ resource->state = ACPI_POWER_RESOURCE_STATE_OFF;
pr_debug("Power resource [%s] turned off\n", resource->name);
@@ -585,13 +606,12 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
- acpi_handle handle = resource->device.handle;
int result;
- int state;
+ u8 state;
mutex_lock(&resource->resource_lock);
- result = acpi_power_get_state(handle, &state);
+ result = acpi_power_get_state(resource, &state);
if (result) {
mutex_unlock(&resource->resource_lock);
return result;
@@ -784,8 +804,8 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
{
+ u8 list_state = ACPI_POWER_RESOURCE_STATE_OFF;
int result = 0;
- int list_state = 0;
int i = 0;
if (!device || !state)
@@ -907,22 +927,22 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource
mutex_unlock(&power_resource_list_lock);
}
-int acpi_add_power_resource(acpi_handle handle)
+struct acpi_device *acpi_add_power_resource(acpi_handle handle)
{
struct acpi_power_resource *resource;
struct acpi_device *device = NULL;
union acpi_object acpi_object;
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
acpi_status status;
- int state, result = -ENODEV;
+ int result;
acpi_bus_get_device(handle, &device);
if (device)
- return 0;
+ return device;
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
- return -ENOMEM;
+ return NULL;
device = &resource->device;
acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER);
@@ -941,13 +961,9 @@ int acpi_add_power_resource(acpi_handle handle)
resource->system_level = acpi_object.power_resource.system_level;
resource->order = acpi_object.power_resource.resource_order;
+ resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
- result = acpi_power_get_state(handle, &state);
- if (result)
- goto err;
-
- pr_info("%s [%s] (%s)\n", acpi_device_name(device),
- acpi_device_bid(device), state ? "on" : "off");
+ pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
device->flags.match_driver = true;
result = acpi_device_add(device, acpi_release_power_resource);
@@ -959,11 +975,11 @@ int acpi_add_power_resource(acpi_handle handle)
acpi_power_add_resource_to_list(resource);
acpi_device_add_finalize(device);
- return 0;
+ return device;
err:
acpi_release_power_resource(&device->dev);
- return result;
+ return NULL;
}
#ifdef CONFIG_ACPI_SLEEP
@@ -974,11 +990,13 @@ void acpi_resume_power_resources(void)
mutex_lock(&power_resource_list_lock);
list_for_each_entry(resource, &acpi_power_resource_list, list_node) {
- int result, state;
+ int result;
+ u8 state;
mutex_lock(&resource->resource_lock);
- result = acpi_power_get_state(resource->device.handle, &state);
+ resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
+ result = acpi_power_get_state(resource, &state);
if (result) {
mutex_unlock(&resource->resource_lock);
continue;
@@ -986,7 +1004,7 @@ void acpi_resume_power_resources(void)
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
- dev_info(&resource->device.dev, "Turning ON\n");
+ dev_dbg(&resource->device.dev, "Turning ON\n");
__acpi_power_on(resource);
}
@@ -997,6 +1015,9 @@ void acpi_resume_power_resources(void)
}
#endif
+/**
+ * acpi_turn_off_unused_power_resources - Turn off power resources not in use.
+ */
void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
@@ -1006,8 +1027,14 @@ void acpi_turn_off_unused_power_resources(void)
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
mutex_lock(&resource->resource_lock);
- if (!resource->ref_count) {
- dev_info(&resource->device.dev, "Turning OFF\n");
+ /*
+ * Turn off power resources in an unknown state too, because the
+ * platform firmware on some system expects the OS to turn off
+ * power resources without any users unconditionally.
+ */
+ if (!resource->ref_count &&
+ resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
+ dev_dbg(&resource->device.dev, "Turning OFF\n");
__acpi_power_off(resource);
}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 4ae93350b70d..fe69dc518f31 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -347,6 +347,7 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
* @this_leaf: Kernel cache info structure being updated
* @found_cache: The PPTT node describing this cache instance
* @cpu_node: A unique reference to describe this cache instance
+ * @revision: The revision of the PPTT table
*
* The ACPI spec implies that the fields in the cache structures are used to
* extend and correct the information probed from the hardware. Lets only
@@ -356,8 +357,11 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
*/
static void update_cache_properties(struct cacheinfo *this_leaf,
struct acpi_pptt_cache *found_cache,
- struct acpi_pptt_processor *cpu_node)
+ struct acpi_pptt_processor *cpu_node,
+ u8 revision)
{
+ struct acpi_pptt_cache_v1* found_cache_v1;
+
this_leaf->fw_token = cpu_node;
if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID)
this_leaf->size = found_cache->size;
@@ -405,6 +409,13 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
if (this_leaf->type == CACHE_TYPE_NOCACHE &&
found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)
this_leaf->type = CACHE_TYPE_UNIFIED;
+
+ if (revision >= 3 && (found_cache->flags & ACPI_PPTT_CACHE_ID_VALID)) {
+ found_cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1,
+ found_cache, sizeof(struct acpi_pptt_cache));
+ this_leaf->id = found_cache_v1->cache_id;
+ this_leaf->attributes |= CACHE_ID;
+ }
}
static void cache_setup_acpi_cpu(struct acpi_table_header *table,
@@ -425,9 +436,8 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
&cpu_node);
pr_debug("found = %p %p\n", found_cache, cpu_node);
if (found_cache)
- update_cache_properties(this_leaf,
- found_cache,
- cpu_node);
+ update_cache_properties(this_leaf, found_cache,
+ cpu_node, table->revision);
index++;
}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
new file mode 100644
index 000000000000..31cf9aee5edd
--- /dev/null
+++ b/drivers/acpi/prmt.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Author: Erik Kaneda <erik.kaneda@intel.com>
+ * Copyright 2020 Intel Corporation
+ *
+ * prmt.c
+ *
+ * Each PRM service is an executable that is run in a restricted environment
+ * that is invoked by writing to the PlatformRtMechanism OperationRegion from
+ * AML bytecode.
+ *
+ * init_prmt initializes the Platform Runtime Mechanism (PRM) services by
+ * processing data in the PRMT as well as registering an ACPI OperationRegion
+ * handler for the PlatformRtMechanism subtype.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/acpi.h>
+#include <linux/prmt.h>
+#include <asm/efi.h>
+
+#pragma pack(1)
+struct prm_mmio_addr_range {
+ u64 phys_addr;
+ u64 virt_addr;
+ u32 length;
+};
+
+struct prm_mmio_info {
+ u64 mmio_count;
+ struct prm_mmio_addr_range addr_ranges[];
+};
+
+struct prm_buffer {
+ u8 prm_status;
+ u64 efi_status;
+ u8 prm_cmd;
+ guid_t handler_guid;
+};
+
+struct prm_context_buffer {
+ char signature[ACPI_NAMESEG_SIZE];
+ u16 revision;
+ u16 reserved;
+ guid_t identifier;
+ u64 static_data_buffer;
+ struct prm_mmio_info *mmio_ranges;
+};
+#pragma pack()
+
+
+static LIST_HEAD(prm_module_list);
+
+struct prm_handler_info {
+ guid_t guid;
+ u64 handler_addr;
+ u64 static_data_buffer_addr;
+ u64 acpi_param_buffer_addr;
+
+ struct list_head handler_list;
+};
+
+struct prm_module_info {
+ guid_t guid;
+ u16 major_rev;
+ u16 minor_rev;
+ u16 handler_count;
+ struct prm_mmio_info *mmio_info;
+ bool updatable;
+
+ struct list_head module_list;
+ struct prm_handler_info handlers[];
+};
+
+
+static u64 efi_pa_va_lookup(u64 pa)
+{
+ efi_memory_desc_t *md;
+ u64 pa_offset = pa & ~PAGE_MASK;
+ u64 page = pa & PAGE_MASK;
+
+ for_each_efi_memory_desc(md) {
+ if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
+ return pa_offset + md->virt_addr + page - md->phys_addr;
+ }
+
+ return 0;
+}
+
+
+#define get_first_handler(a) ((struct acpi_prmt_handler_info *) ((char *) (a) + a->handler_info_offset))
+#define get_next_handler(a) ((struct acpi_prmt_handler_info *) (sizeof(struct acpi_prmt_handler_info) + (char *) a))
+
+static int __init
+acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_prmt_module_info *module_info;
+ struct acpi_prmt_handler_info *handler_info;
+ struct prm_handler_info *th;
+ struct prm_module_info *tm;
+ u64 mmio_count = 0;
+ u64 cur_handler = 0;
+ u32 module_info_size = 0;
+ u64 mmio_range_size = 0;
+ void *temp_mmio;
+
+ module_info = (struct acpi_prmt_module_info *) header;
+ module_info_size = struct_size(tm, handlers, module_info->handler_info_count);
+ tm = kmalloc(module_info_size, GFP_KERNEL);
+
+ guid_copy(&tm->guid, (guid_t *) module_info->module_guid);
+ tm->major_rev = module_info->major_rev;
+ tm->minor_rev = module_info->minor_rev;
+ tm->handler_count = module_info->handler_info_count;
+ tm->updatable = true;
+
+ if (module_info->mmio_list_pointer) {
+ /*
+ * Each module is associated with a list of addr
+ * ranges that it can use during the service
+ */
+ mmio_count = *(u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
+ mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
+ tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
+ temp_mmio = memremap(module_info->mmio_list_pointer, mmio_range_size, MEMREMAP_WB);
+ memmove(tm->mmio_info, temp_mmio, mmio_range_size);
+ } else {
+ mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
+ tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
+ tm->mmio_info->mmio_count = 0;
+ }
+
+ INIT_LIST_HEAD(&tm->module_list);
+ list_add(&tm->module_list, &prm_module_list);
+
+ handler_info = get_first_handler(module_info);
+ do {
+ th = &tm->handlers[cur_handler];
+
+ guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+ th->handler_addr = efi_pa_va_lookup(handler_info->handler_address);
+ th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
+ th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
+ } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
+
+ return 0;
+}
+
+#define GET_MODULE 0
+#define GET_HANDLER 1
+
+static void *find_guid_info(const guid_t *guid, u8 mode)
+{
+ struct prm_handler_info *cur_handler;
+ struct prm_module_info *cur_module;
+ int i = 0;
+
+ list_for_each_entry(cur_module, &prm_module_list, module_list) {
+ for (i = 0; i < cur_module->handler_count; ++i) {
+ cur_handler = &cur_module->handlers[i];
+ if (guid_equal(guid, &cur_handler->guid)) {
+ if (mode == GET_MODULE)
+ return (void *)cur_module;
+ else
+ return (void *)cur_handler;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+static struct prm_module_info *find_prm_module(const guid_t *guid)
+{
+ return (struct prm_module_info *)find_guid_info(guid, GET_MODULE);
+}
+
+static struct prm_handler_info *find_prm_handler(const guid_t *guid)
+{
+ return (struct prm_handler_info *) find_guid_info(guid, GET_HANDLER);
+}
+
+/* In-coming PRM commands */
+
+#define PRM_CMD_RUN_SERVICE 0
+#define PRM_CMD_START_TRANSACTION 1
+#define PRM_CMD_END_TRANSACTION 2
+
+/* statuses that can be passed back to ASL */
+
+#define PRM_HANDLER_SUCCESS 0
+#define PRM_HANDLER_ERROR 1
+#define INVALID_PRM_COMMAND 2
+#define PRM_HANDLER_GUID_NOT_FOUND 3
+#define UPDATE_LOCK_ALREADY_HELD 4
+#define UPDATE_UNLOCK_WITHOUT_LOCK 5
+
+/*
+ * This is the PlatformRtMechanism opregion space handler.
+ * @function: indicates the read/write. In fact as the PlatformRtMechanism
+ * message is driven by command, only write is meaningful.
+ *
+ * @addr : not used
+ * @bits : not used.
+ * @value : it is an in/out parameter. It points to the PRM message buffer.
+ * @handler_context: not used
+ */
+static acpi_status acpi_platformrt_space_handler(u32 function,
+ acpi_physical_address addr,
+ u32 bits, acpi_integer *value,
+ void *handler_context,
+ void *region_context)
+{
+ struct prm_buffer *buffer = ACPI_CAST_PTR(struct prm_buffer, value);
+ struct prm_handler_info *handler;
+ struct prm_module_info *module;
+ efi_status_t status;
+ struct prm_context_buffer context;
+
+ /*
+ * The returned acpi_status will always be AE_OK. Error values will be
+ * saved in the first byte of the PRM message buffer to be used by ASL.
+ */
+ switch (buffer->prm_cmd) {
+ case PRM_CMD_RUN_SERVICE:
+
+ handler = find_prm_handler(&buffer->handler_guid);
+ module = find_prm_module(&buffer->handler_guid);
+ if (!handler || !module)
+ goto invalid_guid;
+
+ ACPI_COPY_NAMESEG(context.signature, "PRMC");
+ context.revision = 0x0;
+ context.reserved = 0x0;
+ context.identifier = handler->guid;
+ context.static_data_buffer = handler->static_data_buffer_addr;
+ context.mmio_ranges = module->mmio_info;
+
+ status = efi_call_virt_pointer(handler, handler_addr,
+ handler->acpi_param_buffer_addr,
+ &context);
+ if (status == EFI_SUCCESS) {
+ buffer->prm_status = PRM_HANDLER_SUCCESS;
+ } else {
+ buffer->prm_status = PRM_HANDLER_ERROR;
+ buffer->efi_status = status;
+ }
+ break;
+
+ case PRM_CMD_START_TRANSACTION:
+
+ module = find_prm_module(&buffer->handler_guid);
+ if (!module)
+ goto invalid_guid;
+
+ if (module->updatable)
+ module->updatable = false;
+ else
+ buffer->prm_status = UPDATE_LOCK_ALREADY_HELD;
+ break;
+
+ case PRM_CMD_END_TRANSACTION:
+
+ module = find_prm_module(&buffer->handler_guid);
+ if (!module)
+ goto invalid_guid;
+
+ if (module->updatable)
+ buffer->prm_status = UPDATE_UNLOCK_WITHOUT_LOCK;
+ else
+ module->updatable = true;
+ break;
+
+ default:
+
+ buffer->prm_status = INVALID_PRM_COMMAND;
+ break;
+ }
+
+ return AE_OK;
+
+invalid_guid:
+ buffer->prm_status = PRM_HANDLER_GUID_NOT_FOUND;
+ return AE_OK;
+}
+
+void __init init_prmt(void)
+{
+ acpi_status status;
+ int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
+ sizeof (struct acpi_table_prmt_header),
+ 0, acpi_parse_prmt, 0);
+ pr_info("PRM: found %u modules\n", mc);
+
+ status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
+ ACPI_ADR_SPACE_PLATFORM_RT,
+ &acpi_platformrt_space_handler,
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ pr_alert("PRM: OperationRegion handler could not be installed\n");
+}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 45a019619e4a..095c8aca141e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/sched.h> /* need_resched() */
+#include <linux/sort.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
@@ -384,10 +385,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
return;
}
+static int acpi_cst_latency_cmp(const void *a, const void *b)
+{
+ const struct acpi_processor_cx *x = a, *y = b;
+
+ if (!(x->valid && y->valid))
+ return 0;
+ if (x->latency > y->latency)
+ return 1;
+ if (x->latency < y->latency)
+ return -1;
+ return 0;
+}
+static void acpi_cst_latency_swap(void *a, void *b, int n)
+{
+ struct acpi_processor_cx *x = a, *y = b;
+ u32 tmp;
+
+ if (!(x->valid && y->valid))
+ return;
+ tmp = x->latency;
+ x->latency = y->latency;
+ y->latency = tmp;
+}
+
static int acpi_processor_power_verify(struct acpi_processor *pr)
{
unsigned int i;
unsigned int working = 0;
+ unsigned int last_latency = 0;
+ unsigned int last_type = 0;
+ bool buggy_latency = false;
pr->power.timer_broadcast_on_state = INT_MAX;
@@ -411,12 +439,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
}
if (!cx->valid)
continue;
+ if (cx->type >= last_type && cx->latency < last_latency)
+ buggy_latency = true;
+ last_latency = cx->latency;
+ last_type = cx->type;
lapic_timer_check_state(i, pr, cx);
tsc_check_state(cx->type);
working++;
}
+ if (buggy_latency) {
+ pr_notice("FW issue: working around C-state latencies out of order\n");
+ sort(&pr->power.states[1], max_cstate,
+ sizeof(struct acpi_processor_cx),
+ acpi_cst_latency_cmp,
+ acpi_cst_latency_swap);
+ }
+
lapic_timer_propagate_broadcast(pr);
return (working);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index d088a0089ee9..757a98f6d7a2 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -9,6 +9,8 @@
* - Added processor hotplug support
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -20,8 +22,6 @@
#include <asm/cpufeature.h>
#endif
-#define PREFIX "ACPI: "
-
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
static DEFINE_MUTEX(performance_mutex);
@@ -194,7 +194,6 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
union acpi_object *pct = NULL;
union acpi_object obj = { 0 };
-
status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_evaluation_failure_warn(pr->handle, "_PCT", status);
@@ -204,7 +203,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
pct = (union acpi_object *)buffer.pointer;
if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
|| (pct->package.count != 2)) {
- printk(KERN_ERR PREFIX "Invalid _PCT data\n");
+ pr_err("Invalid _PCT data\n");
result = -EFAULT;
goto end;
}
@@ -218,7 +217,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_pct_register))
|| (obj.buffer.pointer == NULL)) {
- printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
+ pr_err("Invalid _PCT data (control_register)\n");
result = -EFAULT;
goto end;
}
@@ -234,7 +233,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_pct_register))
|| (obj.buffer.pointer == NULL)) {
- printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
+ pr_err("Invalid _PCT data (status_register)\n");
result = -EFAULT;
goto end;
}
@@ -242,7 +241,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
memcpy(&pr->performance->status_register, obj.buffer.pointer,
sizeof(struct acpi_pct_register));
- end:
+end:
kfree(buffer.pointer);
return result;
@@ -294,7 +293,6 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
int i;
int last_invalid = -1;
-
status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_evaluation_failure_warn(pr->handle, "_PSS", status);
@@ -303,7 +301,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
pss = buffer.pointer;
if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
- printk(KERN_ERR PREFIX "Invalid _PSS data\n");
+ pr_err("Invalid _PSS data\n");
result = -EFAULT;
goto end;
}
@@ -357,7 +355,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
if (!px->core_frequency ||
((u32)(px->core_frequency * 1000) !=
(px->core_frequency * 1000))) {
- printk(KERN_ERR FW_BUG PREFIX
+ pr_err(FW_BUG
"Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
pr->id, px->core_frequency);
if (last_invalid == -1)
@@ -375,8 +373,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
}
if (last_invalid == 0) {
- printk(KERN_ERR FW_BUG PREFIX
- "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
+ pr_err(FW_BUG
+ "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
result = -EFAULT;
kfree(pr->performance->states);
pr->performance->states = NULL;
@@ -385,7 +383,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
if (last_invalid > 0)
pr->performance->state_count = last_invalid;
- end:
+end:
kfree(buffer.pointer);
return result;
@@ -426,7 +424,7 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
#ifdef CONFIG_X86
if (acpi_has_method(pr->handle, "_PPC")) {
if(boot_cpu_has(X86_FEATURE_EST))
- printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
+ pr_warn(FW_BUG "BIOS needs update for CPU "
"frequency support\n");
}
#endif
@@ -520,13 +518,13 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
psd = buffer.pointer;
if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
- printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+ pr_err("Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
if (psd->package.count != 1) {
- printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+ pr_err("Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
@@ -537,19 +535,19 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
status = acpi_extract_package(&(psd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+ pr_err("Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
- printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
+ pr_err("Unknown _PSD:num_entries\n");
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
- printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
+ pr_err("Unknown _PSD:revision\n");
result = -EFAULT;
goto end;
}
@@ -557,7 +555,7 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
- printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
+ pr_err("Invalid _PSD:coord_type\n");
result = -EFAULT;
goto end;
}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 677a132be242..a3d34e3f9f94 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -17,8 +17,6 @@
#include <acpi/processor.h>
#include <linux/uaccess.h>
-#define PREFIX "ACPI: "
-
#ifdef CONFIG_CPU_FREQ
/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index e61b8f038364..a822fe410dda 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -6,9 +6,11 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * - Added processor hotplug support
+ * - Added processor hotplug support
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,8 +22,6 @@
#include <asm/io.h>
#include <linux/uaccess.h>
-#define PREFIX "ACPI: "
-
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
* 1 -> acpi processor driver ignores _TPC values
@@ -195,15 +195,13 @@ void acpi_processor_throttling_init(void)
{
if (acpi_processor_update_tsd_coord())
pr_debug("Assume no T-state coordination\n");
-
- return;
}
static int acpi_processor_throttling_notifier(unsigned long event, void *data)
{
struct throttling_tstate *p_tstate = data;
struct acpi_processor *pr;
- unsigned int cpu ;
+ unsigned int cpu;
int target_state;
struct acpi_processor_limit *p_limit;
struct acpi_processor_throttling *p_throttling;
@@ -236,8 +234,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
if (pr->throttling_platform_limit > target_state)
target_state = pr->throttling_platform_limit;
if (target_state >= p_throttling->state_count) {
- printk(KERN_WARNING
- "Exceed the limit of T-state \n");
+ pr_warn("Exceed the limit of T-state \n");
target_state = p_throttling->state_count - 1;
}
p_tstate->target_state = target_state;
@@ -256,8 +253,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
cpu, target_state);
break;
default:
- printk(KERN_WARNING
- "Unsupported Throttling notifier event\n");
+ pr_warn("Unsupported Throttling notifier event\n");
break;
}
@@ -408,7 +404,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *ptc = NULL;
- union acpi_object obj = { 0 };
+ union acpi_object obj;
struct acpi_processor_throttling *throttling;
status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
@@ -422,7 +418,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
ptc = (union acpi_object *)buffer.pointer;
if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
|| (ptc->package.count != 2)) {
- printk(KERN_ERR PREFIX "Invalid _PTC data\n");
+ pr_err("Invalid _PTC data\n");
result = -EFAULT;
goto end;
}
@@ -436,8 +432,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_ptc_register))
|| (obj.buffer.pointer == NULL)) {
- printk(KERN_ERR PREFIX
- "Invalid _PTC data (control_register)\n");
+ pr_err("Invalid _PTC data (control_register)\n");
result = -EFAULT;
goto end;
}
@@ -453,7 +448,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_ptc_register))
|| (obj.buffer.pointer == NULL)) {
- printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
+ pr_err("Invalid _PTC data (status_register)\n");
result = -EFAULT;
goto end;
}
@@ -465,19 +460,19 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
if ((throttling->control_register.bit_width +
throttling->control_register.bit_offset) > 32) {
- printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
+ pr_err("Invalid _PTC control register\n");
result = -EFAULT;
goto end;
}
if ((throttling->status_register.bit_width +
throttling->status_register.bit_offset) > 32) {
- printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
+ pr_err("Invalid _PTC status register\n");
result = -EFAULT;
goto end;
}
- end:
+end:
kfree(buffer.pointer);
return result;
@@ -506,7 +501,7 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
tss = buffer.pointer;
if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
- printk(KERN_ERR PREFIX "Invalid _TSS data\n");
+ pr_err("Invalid _TSS data\n");
result = -EFAULT;
goto end;
}
@@ -546,15 +541,14 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
}
if (!tx->freqpercentage) {
- printk(KERN_ERR PREFIX
- "Invalid _TSS data: freq is zero\n");
+ pr_err("Invalid _TSS data: freq is zero\n");
result = -EFAULT;
kfree(pr->throttling.states_tss);
goto end;
}
}
- end:
+end:
kfree(buffer.pointer);
return result;
@@ -587,13 +581,13 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
tsd = buffer.pointer;
if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
- printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+ pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (tsd->package.count != 1) {
- printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+ pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
@@ -606,19 +600,19 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
status = acpi_extract_package(&(tsd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+ pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
- printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
+ pr_err("Unknown _TSD:num_entries\n");
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
- printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
+ pr_err("Unknown _TSD:revision\n");
result = -EFAULT;
goto end;
}
@@ -639,7 +633,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
- end:
+end:
kfree(buffer.pointer);
return result;
}
@@ -711,13 +705,12 @@ static int acpi_throttling_rdmsr(u64 *value)
if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
!this_cpu_has(X86_FEATURE_ACPI)) {
- printk(KERN_ERR PREFIX
- "HARDWARE addr space,NOT supported yet\n");
+ pr_err("HARDWARE addr space,NOT supported yet\n");
} else {
msr_low = 0;
msr_high = 0;
rdmsr_safe(MSR_IA32_THERM_CONTROL,
- (u32 *)&msr_low , (u32 *) &msr_high);
+ (u32 *)&msr_low, (u32 *) &msr_high);
msr = (msr_high << 32) | msr_low;
*value = (u64) msr;
ret = 0;
@@ -732,8 +725,7 @@ static int acpi_throttling_wrmsr(u64 value)
if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
!this_cpu_has(X86_FEATURE_ACPI)) {
- printk(KERN_ERR PREFIX
- "HARDWARE addr space,NOT supported yet\n");
+ pr_err("HARDWARE addr space,NOT supported yet\n");
} else {
msr = value;
wrmsr_safe(MSR_IA32_THERM_CONTROL,
@@ -745,15 +737,13 @@ static int acpi_throttling_wrmsr(u64 value)
#else
static int acpi_throttling_rdmsr(u64 *value)
{
- printk(KERN_ERR PREFIX
- "HARDWARE addr space,NOT supported yet\n");
+ pr_err("HARDWARE addr space,NOT supported yet\n");
return -1;
}
static int acpi_throttling_wrmsr(u64 value)
{
- printk(KERN_ERR PREFIX
- "HARDWARE addr space,NOT supported yet\n");
+ pr_err("HARDWARE addr space,NOT supported yet\n");
return -1;
}
#endif
@@ -784,7 +774,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
ret = acpi_throttling_rdmsr(value);
break;
default:
- printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+ pr_err("Unknown addr space %d\n",
(u32) (throttling->status_register.space_id));
}
return ret;
@@ -817,7 +807,7 @@ static int acpi_write_throttling_state(struct acpi_processor *pr,
ret = acpi_throttling_wrmsr(value);
break;
default:
- printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+ pr_err("Unknown addr space %d\n",
(u32) (throttling->control_register.space_id));
}
return ret;
@@ -926,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
}
/* TBD: Support duty_cycle values that span bit 4. */
else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
- printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
+ pr_warn("duty_cycle spans bit 4\n");
return -EINVAL;
}
@@ -1185,8 +1175,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
*/
if (acpi_processor_get_throttling_control(pr) ||
acpi_processor_get_throttling_states(pr) ||
- acpi_processor_get_platform_limit(pr))
- {
+ acpi_processor_get_platform_limit(pr)) {
pr->throttling.acpi_processor_get_throttling =
&acpi_processor_get_throttling_fadt;
pr->throttling.acpi_processor_set_throttling =
@@ -1246,7 +1235,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
goto end;
}
- end:
+end:
if (result)
pr->flags.throttling = 0;
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index 2a61f884e222..b79b7c99c237 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/pci.h>
#include <linux/acpi.h>
#include <acpi/reboot.h>
@@ -63,7 +65,7 @@ void acpi_reboot(void)
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
case ACPI_ADR_SPACE_SYSTEM_IO:
- printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n");
+ pr_debug("ACPI MEMORY or I/O RESET_REG.\n");
acpi_reset();
break;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index ee78a210c606..dc01fb550b28 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -423,6 +423,13 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
+static bool irq_is_legacy(struct acpi_resource_irq *irq)
+{
+ return irq->triggering == ACPI_EDGE_SENSITIVE &&
+ irq->polarity == ACPI_ACTIVE_HIGH &&
+ irq->shareable == ACPI_EXCLUSIVE;
+}
+
/**
* acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
* @ares: Input ACPI resource object.
@@ -461,7 +468,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
- irq->shareable, true);
+ irq->shareable, irq_is_legacy(irq));
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 3b0b6dd34914..4938010fcac7 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -7,6 +7,8 @@
* Copyright (c) 2005 Rich Townsend <rhdt@bartol.udel.edu>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -23,8 +25,6 @@
#include "sbshc.h"
-#define PREFIX "ACPI: "
-
#define ACPI_SBS_CLASS "sbs"
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
@@ -544,7 +544,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
goto end;
battery->have_sysfs_alarm = 1;
end:
- printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n",
+ pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
battery->name, battery->present ? "present" : "absent");
return result;
@@ -577,10 +577,10 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
result = PTR_ERR(sbs->charger);
sbs->charger = NULL;
}
- printk(KERN_INFO PREFIX "%s [%s]: AC Adapter [%s] (%s)\n",
+ pr_info("%s [%s]: AC Adapter [%s] (%s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line");
- end:
+end:
return result;
}
@@ -658,7 +658,7 @@ static int acpi_sbs_add(struct acpi_device *device)
acpi_battery_add(sbs, 0);
acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
- end:
+end:
if (result)
acpi_sbs_remove(device);
return result;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 53c2862c4c75..7c62e149a7a1 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -5,6 +5,8 @@
* Copyright (c) 2007 Alexey Starikovskiy
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/acpi.h>
#include <linux/wait.h>
#include <linux/slab.h>
@@ -13,8 +15,6 @@
#include <linux/interrupt.h>
#include "sbshc.h"
-#define PREFIX "ACPI: "
-
#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
@@ -109,7 +109,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
u8 temp, sz = 0;
if (!hc) {
- printk(KERN_ERR PREFIX "host controller is not configured\n");
+ pr_err("host controller is not configured\n");
return ret;
}
@@ -231,7 +231,6 @@ static int smbus_alarm(void *context)
case ACPI_SBS_BATTERY:
acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_smbus_callback, hc);
- default:;
}
mutex_unlock(&hc->lock);
return 0;
@@ -254,7 +253,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
status = acpi_evaluate_integer(device->handle, "_EC", NULL, &val);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "error obtaining _EC.\n");
+ pr_err("error obtaining _EC.\n");
return -EIO;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 453eff8ec8c3..0641bc20b097 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -3,6 +3,8 @@
* scan.c - support for transforming the ACPI namespace into individual objects
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -47,12 +49,6 @@ static DEFINE_MUTEX(acpi_hp_context_lock);
*/
static u64 spcr_uart_addr;
-struct acpi_dep_data {
- struct list_head node;
- acpi_handle supplier;
- acpi_handle consumer;
-};
-
void acpi_scan_lock_acquire(void)
{
mutex_lock(&acpi_scan_lock);
@@ -612,11 +608,6 @@ struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
return handle_to_device(handle, get_acpi_device);
}
-void acpi_bus_put_acpi_device(struct acpi_device *adev)
-{
- acpi_dev_put(adev);
-}
-
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{
struct acpi_device_bus_id *acpi_device_bus_id;
@@ -644,24 +635,29 @@ static int acpi_device_set_name(struct acpi_device *device,
return 0;
}
-int acpi_device_add(struct acpi_device *device,
- void (*release)(struct device *))
+static int acpi_tie_acpi_dev(struct acpi_device *adev)
{
- struct acpi_device_bus_id *acpi_device_bus_id;
- int result;
+ acpi_handle handle = adev->handle;
+ acpi_status status;
- if (device->handle) {
- acpi_status status;
+ if (!handle)
+ return 0;
- status = acpi_attach_data(device->handle, acpi_scan_drop_device,
- device);
- if (ACPI_FAILURE(status)) {
- acpi_handle_err(device->handle,
- "Unable to attach device data\n");
- return -ENODEV;
- }
+ status = acpi_attach_data(handle, acpi_scan_drop_device, adev);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "Unable to attach device data\n");
+ return -ENODEV;
}
+ return 0;
+}
+
+static int __acpi_device_add(struct acpi_device *device,
+ void (*release)(struct device *))
+{
+ struct acpi_device_bus_id *acpi_device_bus_id;
+ int result;
+
/*
* Linkage
* -------
@@ -729,7 +725,7 @@ int acpi_device_add(struct acpi_device *device,
result = acpi_device_setup_files(device);
if (result)
- printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
+ pr_err("Error creating sysfs interface for device %s\n",
dev_name(&device->dev));
return 0;
@@ -750,6 +746,17 @@ err_unlock:
return result;
}
+int acpi_device_add(struct acpi_device *adev, void (*release)(struct device *))
+{
+ int ret;
+
+ ret = acpi_tie_acpi_dev(adev);
+ if (ret)
+ return ret;
+
+ return __acpi_device_add(adev, release);
+}
+
/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
@@ -1320,8 +1327,7 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
acpi_get_object_info(handle, &info);
if (!info) {
- pr_err(PREFIX "%s: Error reading device info\n",
- __func__);
+ pr_err("%s: Error reading device info\n", __func__);
return;
}
@@ -1405,7 +1411,7 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
*
* Return false if DMA is not supported. Otherwise, return true
*/
-bool acpi_dma_supported(struct acpi_device *adev)
+bool acpi_dma_supported(const struct acpi_device *adev)
{
if (!adev)
return false;
@@ -1671,8 +1677,16 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
- /* Assume there are unmet deps to start with. */
- device->dep_unmet = 1;
+}
+
+static void acpi_scan_dep_init(struct acpi_device *adev)
+{
+ struct acpi_dep_data *dep;
+
+ list_for_each_entry(dep, &acpi_dep_list, node) {
+ if (dep->consumer == adev->handle)
+ adev->dep_unmet++;
+ }
}
void acpi_device_add_finalize(struct acpi_device *device)
@@ -1688,9 +1702,10 @@ static void acpi_scan_init_status(struct acpi_device *adev)
}
static int acpi_add_single_object(struct acpi_device **child,
- acpi_handle handle, int type)
+ acpi_handle handle, int type, bool dep_init)
{
struct acpi_device *device;
+ bool release_dep_lock = false;
int result;
device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
@@ -1703,13 +1718,32 @@ static int acpi_add_single_object(struct acpi_device **child,
* acpi_bus_get_status() and use its quirk handling. Note that
* this must be done before the get power-/wakeup_dev-flags calls.
*/
- if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR)
+ if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR) {
+ if (dep_init) {
+ mutex_lock(&acpi_dep_list_lock);
+ /*
+ * Hold the lock until the acpi_tie_acpi_dev() call
+ * below to prevent concurrent acpi_scan_clear_dep()
+ * from deleting a dependency list entry without
+ * updating dep_unmet for the device.
+ */
+ release_dep_lock = true;
+ acpi_scan_dep_init(device);
+ }
acpi_scan_init_status(device);
+ }
acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
- result = acpi_device_add(device, acpi_device_release);
+ result = acpi_tie_acpi_dev(device);
+
+ if (release_dep_lock)
+ mutex_unlock(&acpi_dep_list_lock);
+
+ if (!result)
+ result = __acpi_device_add(device, acpi_device_release);
+
if (result) {
acpi_device_release(&device->dev);
return result;
@@ -1886,22 +1920,6 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
return count;
}
-static void acpi_scan_dep_init(struct acpi_device *adev)
-{
- struct acpi_dep_data *dep;
-
- adev->dep_unmet = 0;
-
- mutex_lock(&acpi_dep_list_lock);
-
- list_for_each_entry(dep, &acpi_dep_list, node) {
- if (dep->consumer == adev->handle)
- adev->dep_unmet++;
- }
-
- mutex_unlock(&acpi_dep_list_lock);
-}
-
static bool acpi_bus_scan_second_pass;
static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
@@ -1949,19 +1967,15 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
return AE_OK;
}
- acpi_add_single_object(&device, handle, type);
- if (!device)
- return AE_CTRL_DEPTH;
-
- acpi_scan_init_hotplug(device);
/*
* If check_dep is true at this point, the device has no dependencies,
* or the creation of the device object would have been postponed above.
*/
- if (check_dep)
- device->dep_unmet = 0;
- else
- acpi_scan_dep_init(device);
+ acpi_add_single_object(&device, handle, type, !check_dep);
+ if (!device)
+ return AE_CTRL_DEPTH;
+
+ acpi_scan_init_hotplug(device);
out:
if (!*adev_p)
@@ -2111,29 +2125,141 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
device->handler->hotplug.notify_online(device);
}
-void acpi_walk_dep_device_list(acpi_handle handle)
+static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
{
- struct acpi_dep_data *dep, *tmp;
struct acpi_device *adev;
+ adev = acpi_bus_get_acpi_device(dep->consumer);
+ if (adev) {
+ *(struct acpi_device **)data = adev;
+ return 1;
+ }
+ /* Continue parsing if the device object is not present. */
+ return 0;
+}
+
+struct acpi_scan_clear_dep_work {
+ struct work_struct work;
+ struct acpi_device *adev;
+};
+
+static void acpi_scan_clear_dep_fn(struct work_struct *work)
+{
+ struct acpi_scan_clear_dep_work *cdw;
+
+ cdw = container_of(work, struct acpi_scan_clear_dep_work, work);
+
+ acpi_scan_lock_acquire();
+ acpi_bus_attach(cdw->adev, true);
+ acpi_scan_lock_release();
+
+ acpi_dev_put(cdw->adev);
+ kfree(cdw);
+}
+
+static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
+{
+ struct acpi_scan_clear_dep_work *cdw;
+
+ if (adev->dep_unmet)
+ return false;
+
+ cdw = kmalloc(sizeof(*cdw), GFP_KERNEL);
+ if (!cdw)
+ return false;
+
+ cdw->adev = adev;
+ INIT_WORK(&cdw->work, acpi_scan_clear_dep_fn);
+ /*
+ * Since the work function may block on the lock until the entire
+ * initial enumeration of devices is complete, put it into the unbound
+ * workqueue.
+ */
+ queue_work(system_unbound_wq, &cdw->work);
+
+ return true;
+}
+
+static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
+{
+ struct acpi_device *adev = acpi_bus_get_acpi_device(dep->consumer);
+
+ if (adev) {
+ adev->dep_unmet--;
+ if (!acpi_scan_clear_dep_queue(adev))
+ acpi_dev_put(adev);
+ }
+
+ list_del(&dep->node);
+ kfree(dep);
+
+ return 0;
+}
+
+/**
+ * acpi_walk_dep_device_list - Apply a callback to every entry in acpi_dep_list
+ * @handle: The ACPI handle of the supplier device
+ * @callback: Pointer to the callback function to apply
+ * @data: Pointer to some data to pass to the callback
+ *
+ * The return value of the callback determines this function's behaviour. If 0
+ * is returned we continue to iterate over acpi_dep_list. If a positive value
+ * is returned then the loop is broken but this function returns 0. If a
+ * negative value is returned by the callback then the loop is broken and that
+ * value is returned as the final error.
+ */
+static int acpi_walk_dep_device_list(acpi_handle handle,
+ int (*callback)(struct acpi_dep_data *, void *),
+ void *data)
+{
+ struct acpi_dep_data *dep, *tmp;
+ int ret = 0;
+
mutex_lock(&acpi_dep_list_lock);
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
if (dep->supplier == handle) {
- acpi_bus_get_device(dep->consumer, &adev);
-
- if (adev) {
- adev->dep_unmet--;
- if (!adev->dep_unmet)
- acpi_bus_attach(adev, true);
- }
-
- list_del(&dep->node);
- kfree(dep);
+ ret = callback(dep, data);
+ if (ret)
+ break;
}
}
mutex_unlock(&acpi_dep_list_lock);
+
+ return ret > 0 ? 0 : ret;
+}
+
+/**
+ * acpi_dev_clear_dependencies - Inform consumers that the device is now active
+ * @supplier: Pointer to the supplier &struct acpi_device
+ *
+ * Clear dependencies on the given device.
+ */
+void acpi_dev_clear_dependencies(struct acpi_device *supplier)
+{
+ acpi_walk_dep_device_list(supplier->handle, acpi_scan_clear_dep, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_clear_dependencies);
+
+/**
+ * acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier
+ * @supplier: Pointer to the dependee device
+ *
+ * Returns the first &struct acpi_device which declares itself dependent on
+ * @supplier via the _DEP buffer, parsed from the acpi_dep_list.
+ *
+ * The caller is responsible for putting the reference to adev when it is no
+ * longer needed.
+ */
+struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_walk_dep_device_list(supplier->handle,
+ acpi_dev_get_first_consumer_dev_cb, &adev);
+
+ return adev;
}
-EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list);
+EXPORT_SYMBOL_GPL(acpi_dev_get_first_consumer_dev);
/**
* acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
@@ -2223,7 +2349,7 @@ int acpi_bus_register_early_device(int type)
struct acpi_device *device = NULL;
int result;
- result = acpi_add_single_object(&device, NULL, type);
+ result = acpi_add_single_object(&device, NULL, type, false);
if (result)
return result;
@@ -2243,7 +2369,7 @@ static int acpi_bus_scan_fixed(void)
struct acpi_device *device = NULL;
result = acpi_add_single_object(&device, NULL,
- ACPI_BUS_TYPE_POWER_BUTTON);
+ ACPI_BUS_TYPE_POWER_BUTTON, false);
if (result)
return result;
@@ -2259,7 +2385,7 @@ static int acpi_bus_scan_fixed(void)
struct acpi_device *device = NULL;
result = acpi_add_single_object(&device, NULL,
- ACPI_BUS_TYPE_SLEEP_BUTTON);
+ ACPI_BUS_TYPE_SLEEP_BUTTON, false);
if (result)
return result;
@@ -2278,7 +2404,7 @@ static void __init acpi_get_spcr_uart_addr(void)
status = acpi_get_table(ACPI_SIG_SPCR, 0,
(struct acpi_table_header **)&spcr_ptr);
if (ACPI_FAILURE(status)) {
- pr_warn(PREFIX "STAO table present, but SPCR is missing\n");
+ pr_warn("STAO table present, but SPCR is missing\n");
return;
}
@@ -2319,7 +2445,7 @@ int __init acpi_scan_init(void)
(struct acpi_table_header **)&stao_ptr);
if (ACPI_SUCCESS(status)) {
if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
- pr_info(PREFIX "STAO Name List not yet supported.\n");
+ pr_info("STAO Name List not yet supported.\n");
if (stao_ptr->ignore_uart)
acpi_get_spcr_uart_addr();
@@ -2408,46 +2534,28 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
return count;
}
-struct acpi_table_events_work {
- struct work_struct work;
- void *table;
- u32 event;
-};
-
static void acpi_table_events_fn(struct work_struct *work)
{
- struct acpi_table_events_work *tew;
+ acpi_scan_lock_acquire();
+ acpi_bus_scan(ACPI_ROOT_OBJECT);
+ acpi_scan_lock_release();
- tew = container_of(work, struct acpi_table_events_work, work);
-
- if (tew->event == ACPI_TABLE_EVENT_LOAD) {
- acpi_scan_lock_acquire();
- acpi_bus_scan(ACPI_ROOT_OBJECT);
- acpi_scan_lock_release();
- }
-
- kfree(tew);
+ kfree(work);
}
-void acpi_scan_table_handler(u32 event, void *table, void *context)
+void acpi_scan_table_notify(void)
{
- struct acpi_table_events_work *tew;
+ struct work_struct *work;
if (!acpi_scan_initialized)
return;
- if (event != ACPI_TABLE_EVENT_LOAD)
- return;
-
- tew = kmalloc(sizeof(*tew), GFP_KERNEL);
- if (!tew)
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
return;
- INIT_WORK(&tew->work, acpi_table_events_fn);
- tew->table = table;
- tew->event = event;
-
- schedule_work(&tew->work);
+ INIT_WORK(work, acpi_table_events_fn);
+ schedule_work(work);
}
int acpi_reconfig_notifier_register(struct notifier_block *nb)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 09fd13757b65..3023224515ab 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -8,6 +8,8 @@
* Copyright (c) 2003 Open Source Development Lab
*/
+#define pr_fmt(fmt) "ACPI: PM: " fmt
+
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/dmi.h>
@@ -41,7 +43,7 @@ static void acpi_sleep_tts_switch(u32 acpi_state)
* OS can't evaluate the _TTS object correctly. Some warning
* message will be printed. But it won't break anything.
*/
- printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
+ pr_notice("Failure in evaluating _TTS object\n");
}
}
@@ -73,8 +75,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
}
ACPI_FLUSH_CPU_CACHE();
#endif
- printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
- acpi_state);
+ pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
acpi_enable_wakeup_devices(acpi_state);
acpi_enter_sleep_state_prep(acpi_state);
return 0;
@@ -406,7 +407,7 @@ static int acpi_pm_freeze(void)
}
/**
- * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
+ * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
*/
static int acpi_pm_pre_suspend(void)
{
@@ -459,8 +460,7 @@ static void acpi_pm_finish(void)
if (acpi_state == ACPI_STATE_S0)
return;
- printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
- acpi_state);
+ pr_info("Waking up from system sleep state S%d\n", acpi_state);
acpi_disable_wakeup_devices(acpi_state);
acpi_leave_sleep_state(acpi_state);
@@ -581,7 +581,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
error = acpi_suspend_lowlevel();
if (error)
return error;
- pr_info(PREFIX "Low-level resume complete\n");
+ pr_info("Low-level resume complete\n");
pm_set_resume_via_firmware();
break;
}
@@ -921,7 +921,7 @@ static void acpi_hibernation_leave(void)
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
/* Check the hardware signature */
if (facs && s4_hardware_signature != facs->hardware_signature)
- pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
+ pr_crit("Hardware changed while hibernated, success doubtful!\n");
/* Restore the NVS memory area */
suspend_nvs_restore();
/* Allow EC transactions to happen. */
@@ -1009,10 +1009,8 @@ static void acpi_sleep_hibernate_setup(void)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
- if (facs) {
+ if (facs)
s4_hardware_signature = facs->hardware_signature;
- acpi_put_table((struct acpi_table_header *)facs);
- }
}
#else /* !CONFIG_HIBERNATION */
static inline void acpi_sleep_hibernate_setup(void) {}
@@ -1029,7 +1027,7 @@ static void acpi_power_off_prepare(void)
static void acpi_power_off(void)
{
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
- printk(KERN_DEBUG "%s called\n", __func__);
+ pr_debug("%s called\n", __func__);
local_irq_disable();
acpi_enter_sleep_state(ACPI_STATE_S5);
}
@@ -1061,7 +1059,7 @@ int __init acpi_sleep_init(void)
if (sleep_states[i])
pos += sprintf(pos, " S%d", i);
}
- pr_info(PREFIX "(supports%s)\n", supported);
+ pr_info("(supports%s)\n", supported);
/*
* Register the tts_notifier to reboot notifier list so that the _TTS
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index d25927195d6d..00c0ebaab29f 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -5,10 +5,11 @@
#define pr_fmt(fmt) "ACPI: " fmt
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
-#include <linux/acpi.h>
#include "internal.h"
@@ -254,16 +255,12 @@ static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
{
if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
return sprintf(buffer, "disable\n");
- else {
- if (acpi_gbl_trace_method_name) {
- if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
- return sprintf(buffer, "method-once\n");
- else
- return sprintf(buffer, "method\n");
- } else
- return sprintf(buffer, "enable\n");
- }
- return 0;
+ if (!acpi_gbl_trace_method_name)
+ return sprintf(buffer, "enable\n");
+ if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
+ return sprintf(buffer, "method-once\n");
+ else
+ return sprintf(buffer, "method\n");
}
module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
@@ -359,8 +356,7 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
}
table_attr->instance++;
if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
- pr_warn("%4.4s: too many table instances\n",
- table_attr->name);
+ pr_warn("%4.4s: too many table instances\n", table_attr->name);
return -ERANGE;
}
@@ -388,8 +384,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
switch (event) {
case ACPI_TABLE_EVENT_INSTALL:
- table_attr =
- kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
+ table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
if (!table_attr)
return AE_NO_MEMORY;
@@ -420,7 +415,7 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
- void __iomem *base;
+ void *base;
ssize_t rc;
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
@@ -582,8 +577,6 @@ static void delete_gpe_attr_array(void)
kfree(counter_attrs);
}
kfree(all_attrs);
-
- return;
}
static void gpe_count(u32 gpe_number)
@@ -598,8 +591,6 @@ static void gpe_count(u32 gpe_number)
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
-
- return;
}
static void fixed_event_count(u32 event_number)
@@ -612,8 +603,6 @@ static void fixed_event_count(u32 event_number)
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
-
- return;
}
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
@@ -737,8 +726,7 @@ static ssize_t counter_set(struct kobject *kobj,
goto end;
if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
- printk(KERN_WARNING PREFIX
- "Can not change Invalid GPE/Fixed Event status\n");
+ pr_warn("Can not change Invalid GPE/Fixed Event status\n");
return -EINVAL;
}
@@ -796,6 +784,7 @@ end:
* the GPE flooding for GPE 00, they need to specify the following boot
* parameter:
* acpi_mask_gpe=0x00
+ * Note, the parameter can be a list (see bitmap_parselist() for the details).
* The masking status can be modified by the following runtime controlling
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
@@ -805,11 +794,16 @@ static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
+ int ret;
u8 gpe;
- if (kstrtou8(val, 0, &gpe))
- return -EINVAL;
- set_bit(gpe, acpi_masked_gpes_map);
+ ret = kstrtou8(val, 0, &gpe);
+ if (ret) {
+ ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
+ if (ret)
+ return ret;
+ } else
+ set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
@@ -841,13 +835,11 @@ void acpi_irq_stats_init(void)
num_gpes = acpi_current_gpe_count;
num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
- all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
- GFP_KERNEL);
+ all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
if (all_attrs == NULL)
return;
- all_counters = kcalloc(num_counters, sizeof(struct event_counter),
- GFP_KERNEL);
+ all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
if (all_counters == NULL)
goto fail;
@@ -855,8 +847,7 @@ void acpi_irq_stats_init(void)
if (ACPI_FAILURE(status))
goto fail;
- counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
- GFP_KERNEL);
+ counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
if (counter_attrs == NULL)
goto fail;
@@ -906,7 +897,6 @@ void acpi_irq_stats_init(void)
fail:
delete_gpe_attr_array();
- return;
}
static void __exit interrupt_stats_exit(void)
@@ -914,31 +904,24 @@ static void __exit interrupt_stats_exit(void)
sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
delete_gpe_attr_array();
-
- return;
}
-static ssize_t
-acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
+static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
-static const struct kobj_attribute pm_profile_attr =
- __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
+static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
-static ssize_t hotplug_enabled_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
return sprintf(buf, "%d\n", hotplug->enabled);
}
-static ssize_t hotplug_enabled_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t size)
+static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t size)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
unsigned int val;
@@ -950,9 +933,7 @@ static ssize_t hotplug_enabled_store(struct kobject *kobj,
return size;
}
-static struct kobj_attribute hotplug_enabled_attr =
- __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
- hotplug_enabled_store);
+static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
static struct attribute *hotplug_profile_attrs[] = {
&hotplug_enabled_attr.attr,
@@ -983,7 +964,7 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
return;
err_out:
- pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
+ pr_err("Unable to add hotplug profile '%s'\n", name);
}
static ssize_t force_remove_show(struct kobject *kobj,
@@ -1010,9 +991,7 @@ static ssize_t force_remove_store(struct kobject *kobj,
return size;
}
-static const struct kobj_attribute force_remove_attr =
- __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
- force_remove_store);
+static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
int __init acpi_sysfs_init(void)
{
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 9d581045acff..a37a1532a575 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -39,6 +39,7 @@ static int acpi_apic_instance __initdata;
enum acpi_subtable_type {
ACPI_SUBTABLE_COMMON,
ACPI_SUBTABLE_HMAT,
+ ACPI_SUBTABLE_PRMT,
};
struct acpi_subtable_entry {
@@ -222,6 +223,8 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
return entry->hdr->common.type;
case ACPI_SUBTABLE_HMAT:
return entry->hdr->hmat.type;
+ case ACPI_SUBTABLE_PRMT:
+ return 0;
}
return 0;
}
@@ -234,6 +237,8 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
return entry->hdr->common.length;
case ACPI_SUBTABLE_HMAT:
return entry->hdr->hmat.length;
+ case ACPI_SUBTABLE_PRMT:
+ return entry->hdr->prmt.length;
}
return 0;
}
@@ -246,6 +251,8 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
return sizeof(entry->hdr->common);
case ACPI_SUBTABLE_HMAT:
return sizeof(entry->hdr->hmat);
+ case ACPI_SUBTABLE_PRMT:
+ return sizeof(entry->hdr->prmt);
}
return 0;
}
@@ -255,6 +262,8 @@ acpi_get_subtable_type(char *id)
{
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
return ACPI_SUBTABLE_HMAT;
+ if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
+ return ACPI_SUBTABLE_PRMT;
return ACPI_SUBTABLE_COMMON;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 3b54b8fd7396..e7ddd281afff 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -277,6 +277,20 @@ acpi_evaluate_integer(acpi_handle handle,
EXPORT_SYMBOL(acpi_evaluate_integer);
+int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ unsigned long long adr;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr);
+ if (ACPI_FAILURE(status))
+ return -ENODATA;
+
+ *addr = (u32)adr;
+ return 0;
+}
+EXPORT_SYMBOL(acpi_get_local_address);
+
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 2b69536cdccb..816bf2c34b7a 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
{"", },
};
+/* Microsoft platform agnostic UUID */
+#define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461"
+
#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
@@ -39,15 +42,22 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_SCREEN_ON 4
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
+#define ACPI_LPS0_MS_ENTRY 7
+#define ACPI_LPS0_MS_EXIT 8
/* AMD */
#define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
+#define ACPI_LPS0_ENTRY_AMD 2
+#define ACPI_LPS0_EXIT_AMD 3
#define ACPI_LPS0_SCREEN_OFF_AMD 4
#define ACPI_LPS0_SCREEN_ON_AMD 5
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
-static char lps0_dsm_func_mask;
+static int lps0_dsm_func_mask;
+
+static guid_t lps0_dsm_guid_microsoft;
+static int lps0_dsm_func_mask_microsoft;
/* Device constraint entry structure */
struct lpi_device_info {
@@ -68,15 +78,7 @@ struct lpi_constraints {
int min_dstate;
};
-/* AMD */
-/* Device constraint entry structure */
-struct lpi_device_info_amd {
- int revision;
- int count;
- union acpi_object *package;
-};
-
-/* Constraint package structure */
+/* AMD Constraint package structure */
struct lpi_device_constraint_amd {
char *name;
int enabled;
@@ -94,15 +96,15 @@ static void lpi_device_get_constraints_amd(void)
int i, j, k;
out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
- 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
- return;
-
acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
out_obj ? "successful" : "failed");
+ if (!out_obj)
+ return;
+
for (i = 0; i < out_obj->package.count; i++) {
union acpi_object *package = &out_obj->package.elements[i];
@@ -315,14 +317,15 @@ static void lpi_check_constraints(void)
}
}
-static void acpi_sleep_run_lps0_dsm(unsigned int func)
+static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
{
union acpi_object *out_obj;
- if (!(lps0_dsm_func_mask & (1 << func)))
+ if (!(func_mask & (1 << func)))
return;
- out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
+ out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
+ rev_id, func, NULL);
ACPI_FREE(out_obj);
acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
@@ -334,11 +337,33 @@ static bool acpi_s2idle_vendor_amd(void)
return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
}
+static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
+{
+ union acpi_object *obj;
+ int ret = -EINVAL;
+
+ guid_parse(uuid, dsm_guid);
+ obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
+
+ /* Check if the _DSM is present and as expected. */
+ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
+ obj->buffer.length > sizeof(u32)) {
+ acpi_handle_debug(handle,
+ "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
+ goto out;
+ }
+
+ ret = *(int *)obj->buffer.pointer;
+ acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+
static int lps0_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
- union acpi_object *out_obj;
-
if (lps0_device_handle)
return 0;
@@ -346,28 +371,36 @@ static int lps0_device_attach(struct acpi_device *adev,
return 0;
if (acpi_s2idle_vendor_amd()) {
- guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
+ /* AMD0004, AMDI0005:
+ * - Should use rev_id 0x0
+ * - function mask > 0x3: Should use AMD method, but has off by one bug
+ * - function mask = 0x3: Should use Microsoft method
+ * AMDI0006:
+ * - should use rev_id 0x0
+ * - function mask = 0x3: Should use Microsoft method
+ */
+ const char *hid = acpi_device_hid(adev);
rev_id = 0;
+ lps0_dsm_func_mask = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
+ lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
+ &lps0_dsm_guid_microsoft);
+ if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
+ !strcmp(hid, "AMDI0005"))) {
+ lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
+ acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
+ ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+ }
} else {
- guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
rev_id = 1;
+ lps0_dsm_func_mask = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
+ lps0_dsm_func_mask_microsoft = -EINVAL;
}
- /* Check if the _DSM is present and as expected. */
- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
- acpi_handle_debug(adev->handle,
- "_DSM function 0 evaluation failed\n");
- return 0;
- }
-
- lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
-
- ACPI_FREE(out_obj);
-
- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
- lps0_dsm_func_mask);
+ if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
+ return 0; //function evaluation failed
lps0_device_handle = adev->handle;
@@ -406,11 +439,23 @@ int acpi_s2idle_prepare_late(void)
if (pm_debug_messages_on)
lpi_check_constraints();
- if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
+ if (lps0_dsm_func_mask_microsoft > 0) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ } else if (acpi_s2idle_vendor_amd()) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
+ lps0_dsm_func_mask, lps0_dsm_guid);
} else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask, lps0_dsm_guid);
}
return 0;
@@ -421,11 +466,23 @@ void acpi_s2idle_restore_early(void)
if (!lps0_device_handle || sleep_no_lps0)
return;
- if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
+ if (lps0_dsm_func_mask_microsoft > 0) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ } else if (acpi_s2idle_vendor_amd()) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
+ lps0_dsm_func_mask, lps0_dsm_guid);
} else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
+ lps0_dsm_func_mask, lps0_dsm_guid);
}
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index bdc1ba00aee9..f22f23933063 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev)
return ret;
}
+
+/*
+ * AMD systems from Renoir and Lucienne *require* that the NVME controller
+ * is put into D3 over a Modern Standby / suspend-to-idle cycle.
+ *
+ * This is "typically" accomplished using the `StorageD3Enable`
+ * property in the _DSD that is checked via the `acpi_storage_d3` function
+ * but this property was introduced after many of these systems launched
+ * and most OEM systems don't have it in their BIOS.
+ *
+ * The Microsoft documentation for StorageD3Enable mentioned that Windows has
+ * a hardcoded allowlist for D3 support, which was used for these platforms.
+ *
+ * This allows quirking on Linux in a similar fashion.
+ */
+static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
+ {}
+};
+
+bool force_storage_d3(void)
+{
+ return x86_match_cpu(storage_d3_cpu_ids);
+}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 030cb32da980..b7a5abee2147 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1015,11 +1015,11 @@ config PATA_CMD640_PCI
If unsure, say N.
config PATA_FALCON
- tristate "Atari Falcon PATA support"
- depends on M68K && ATARI
+ tristate "Atari Falcon and Q40/Q60 PATA support"
+ depends on M68K && (ATARI || Q40)
help
This option enables support for the on-board IDE
- interface on the Atari Falcon.
+ interface on the Atari Falcon and Q40/Q60.
If unsure, say N.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 33192a8f687d..186cbf90c8ea 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -446,6 +446,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+ /* Dell S140/S150 */
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_SUBVENDOR_ID_DELL, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index d1f284f0c83d..2e89499bd9c3 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -384,12 +384,15 @@ extern struct device_attribute *ahci_sdev_attrs[];
* for ATA_BASE_SHT
*/
#define AHCI_SHT(drv_name) \
- ATA_NCQ_SHT(drv_name), \
+ __ATA_BASE_SHT(drv_name), \
.can_queue = AHCI_MAX_CMDS, \
.sg_tablesize = AHCI_MAX_SG, \
.dma_boundary = AHCI_DMA_BOUNDARY, \
.shost_attrs = ahci_shost_attrs, \
- .sdev_attrs = ahci_sdev_attrs
+ .sdev_attrs = ahci_sdev_attrs, \
+ .change_queue_depth = ata_scsi_change_queue_depth, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index cb69b737cb49..56b695136977 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -200,7 +200,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
}
static const struct ata_port_info ahci_sunxi_port_info = {
- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index d671d33ef287..c3a65ccd4b79 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -252,8 +252,9 @@ static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
}
static struct scsi_host_template atiixp_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations atiixp_port_ops = {
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index d09d432d3c44..247c14702624 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -95,8 +95,9 @@ static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
static struct scsi_host_template cs5520_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations cs5520_port_ops = {
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index a1b4aaccaa50..d5b7ac14e78f 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -147,8 +147,9 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
}
static struct scsi_host_template cs5530_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations cs5530_port_ops = {
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index e1486fe298ae..5b3a7a8ebef6 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -41,6 +41,10 @@ enum {
CY82_INDEX_TIMEOUT = 0x32
};
+static bool enable_dma = true;
+module_param(enable_dma, bool, 0);
+MODULE_PARM_DESC(enable_dma, "Enable bus master DMA operations");
+
/**
* cy82c693_set_piomode - set initial PIO mode data
* @ap: ATA interface
@@ -124,14 +128,16 @@ static struct ata_port_operations cy82c693_port_ops = {
static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- static const struct ata_port_info info = {
+ static struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
.port_ops = &cy82c693_port_ops
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+ if (enable_dma)
+ info.mwdma_mask = ATA_MWDMA2;
+
/* Devfn 1 is the ATA primary. The secondary is magic and on devfn2.
For the moment we don't handle the secondary. FIXME */
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index badab6708893..46208ececbb6 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -928,7 +928,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- err = -ENXIO;
+ err = irq;
goto err_rel_gpio;
}
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 27b0952fde6b..9d0dd8f4c21c 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -33,8 +33,6 @@
#define DRV_NAME "pata_falcon"
#define DRV_VERSION "0.1.0"
-#define ATA_HD_CONTROL 0x39
-
static struct scsi_host_template pata_falcon_sht = {
ATA_PIO_SHT(DRV_NAME),
};
@@ -121,23 +119,42 @@ static struct ata_port_operations pata_falcon_ops = {
static int __init pata_falcon_init_one(struct platform_device *pdev)
{
- struct resource *res;
+ struct resource *base_mem_res, *ctl_mem_res;
+ struct resource *base_res, *ctl_res, *irq_res;
struct ata_host *host;
struct ata_port *ap;
void __iomem *base;
+ int irq = 0;
- dev_info(&pdev->dev, "Atari Falcon PATA controller\n");
+ dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+ base_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (base_res && !devm_request_region(&pdev->dev, base_res->start,
+ resource_size(base_res), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
+ return -EBUSY;
+ }
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), DRV_NAME)) {
+ ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (ctl_res && !devm_request_region(&pdev->dev, ctl_res->start,
+ resource_size(ctl_res), DRV_NAME)) {
dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
+ base_mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!base_mem_res)
+ return -ENODEV;
+ if (!devm_request_mem_region(&pdev->dev, base_mem_res->start,
+ resource_size(base_mem_res), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
+ return -EBUSY;
+ }
+
+ ctl_mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!ctl_mem_res)
+ return -ENODEV;
+
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
@@ -147,10 +164,10 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
ap->ops = &pata_falcon_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
- ap->flags |= ATA_FLAG_PIO_POLLING;
- base = (void __iomem *)res->start;
- ap->ioaddr.data_addr = base;
+ base = (void __iomem *)base_mem_res->start;
+ /* N.B. this assumes data_addr will be used for word-sized I/O only */
+ ap->ioaddr.data_addr = base + 0 + 0 * 4;
ap->ioaddr.error_addr = base + 1 + 1 * 4;
ap->ioaddr.feature_addr = base + 1 + 1 * 4;
ap->ioaddr.nsect_addr = base + 1 + 2 * 4;
@@ -161,14 +178,25 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
ap->ioaddr.status_addr = base + 1 + 7 * 4;
ap->ioaddr.command_addr = base + 1 + 7 * 4;
- ap->ioaddr.altstatus_addr = base + ATA_HD_CONTROL;
- ap->ioaddr.ctl_addr = base + ATA_HD_CONTROL;
+ base = (void __iomem *)ctl_mem_res->start;
+ ap->ioaddr.altstatus_addr = base + 1;
+ ap->ioaddr.ctl_addr = base + 1;
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", (unsigned long)base,
- (unsigned long)base + ATA_HD_CONTROL);
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+ (unsigned long)base_mem_res->start,
+ (unsigned long)ctl_mem_res->start);
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq_res && irq_res->start > 0) {
+ irq = irq_res->start;
+ } else {
+ ap->flags |= ATA_FLAG_PIO_POLLING;
+ ata_port_desc(ap, "no IRQ, using PIO polling");
+ }
/* activate */
- return ata_host_activate(host, 0, NULL, 0, &pata_falcon_sht);
+ return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
+ IRQF_SHARED, &pata_falcon_sht);
}
static int __exit pata_falcon_remove_one(struct platform_device *pdev)
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index e47a28271f5b..be0ca8d5b345 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -914,7 +914,7 @@ static int pata_macio_do_resume(struct pata_macio_priv *priv)
#endif /* CONFIG_PM_SLEEP */
static struct scsi_host_template pata_macio_sht = {
- ATA_BASE_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
@@ -923,6 +923,9 @@ static struct scsi_host_template pata_macio_sht = {
*/
.max_segment_size = MAX_DBDMA_SEG,
.slave_configure = pata_macio_slave_config,
+ .sdev_attrs = ata_common_sdev_attrs,
+ .can_queue = ATA_DEF_QUEUE,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static struct ata_port_operations pata_macio_ops = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index bd87476ab481..b5a3f710d76d 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -898,10 +898,11 @@ static int octeon_cf_probe(struct platform_device *pdev)
return -EINVAL;
}
- irq_handler = octeon_cf_interrupt;
i = platform_get_irq(dma_dev, 0);
- if (i > 0)
+ if (i > 0) {
irq = i;
+ irq_handler = octeon_cf_interrupt;
+ }
}
of_node_put(dma_node);
}
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 479c4b29b856..2e110aefe59b 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -115,10 +115,10 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENOENT;
- }
+ if (irq < 0)
+ return irq;
+ if (!irq)
+ return -EINVAL;
gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
if (IS_ERR(gpiod)) {
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 3b8c111140bd..f28daf62a37d 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -193,8 +193,9 @@ static int sc1200_qc_defer(struct ata_queued_cmd *qc)
}
static struct scsi_host_template sc1200_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations sc1200_port_ops = {
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 7511e11eef4d..b602e303fb54 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -253,8 +253,9 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
}
static struct scsi_host_template serverworks_osb4_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct scsi_host_template serverworks_csb_sht = {
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index d55ee244d693..e5838b23c9e0 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -313,7 +313,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
intr_coalescing_count, intr_coalescing_ticks);
- DPRINTK("ICC register status: (hcr base: 0x%x) = 0x%x\n",
+ DPRINTK("ICC register status: (hcr base: %p) = 0x%x\n",
hcr_base, ioread32(hcr_base + ICC));
}
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 64b2ef15ec19..8440203e835e 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -469,10 +469,12 @@ static int ahci_highbank_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq < 0) {
dev_err(dev, "no irq\n");
- return -EINVAL;
+ return irq;
}
+ if (!irq)
+ return -EINVAL;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv) {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c8867c12c0b8..9d86203e1e7a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -666,10 +666,14 @@ static struct scsi_host_template mv5_sht = {
};
#endif
static struct scsi_host_template mv6_sht = {
- ATA_NCQ_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
+ .sdev_attrs = ata_ncq_sdev_attrs,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .slave_configure = ata_scsi_slave_config
};
static struct ata_port_operations mv5_ops = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 20190f66ced9..c385d18ce87b 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -375,19 +375,25 @@ static struct scsi_host_template nv_sht = {
};
static struct scsi_host_template nv_adma_sht = {
- ATA_NCQ_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
.slave_configure = nv_adma_slave_config,
+ .sdev_attrs = ata_ncq_sdev_attrs,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static struct scsi_host_template nv_swncq_sht = {
- ATA_NCQ_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = nv_swncq_slave_config,
+ .sdev_attrs = ata_ncq_sdev_attrs,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
/*
@@ -2118,7 +2124,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
*/
lack_dhfis = 1;
- DPRINTK("id 0x%x QC: qc_active 0x%x,"
+ DPRINTK("id 0x%x QC: qc_active 0x%llx,"
"SWNCQ:qc_active 0x%X defer_bits %X "
"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
ap->print_id, ap->qc_active, pp->qc_active,
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 560070d4f1d0..06a1e27c4f84 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -374,11 +374,14 @@ static struct pci_driver sil24_pci_driver = {
};
static struct scsi_host_template sil24_sht = {
- ATA_NCQ_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
+ .sdev_attrs = ata_ncq_sdev_attrs,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .slave_configure = ata_scsi_slave_config
};
static struct ata_port_operations sil24_ops = {
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index b508df2ecada..fb2be3574c26 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -420,6 +420,7 @@ fore200e_shutdown(struct fore200e* fore200e)
/* XXX shouldn't we *start* by deregistering the device? */
atm_dev_deregister(fore200e->atm_dev);
+ fallthrough;
case FORE200E_STATE_BLANK:
/* nothing to do for that state */
break;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 933e3ff2ee8d..bc8e8d9f176b 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -47,6 +47,7 @@
#include <linux/errno.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
+#include <linux/ctype.h>
#include <linux/sonet.h>
#include <linux/skbuff.h>
#include <linux/time.h>
@@ -996,10 +997,12 @@ static void xdump( u_char* cp, int length, char* prefix )
}
pBuf += sprintf( pBuf, " " );
for(col = 0;count + col < length && col < 16; col++){
- if (isprint((int)cp[count + col]))
- pBuf += sprintf( pBuf, "%c", cp[count + col] );
- else
- pBuf += sprintf( pBuf, "." );
+ u_char c = cp[count + col];
+
+ if (isascii(c) && isprint(c))
+ pBuf += sprintf(pBuf, "%c", c);
+ else
+ pBuf += sprintf(pBuf, ".");
}
printk("%s\n", prntBuf);
count += col;
@@ -3279,7 +3282,7 @@ static void __exit ia_module_exit(void)
{
pci_unregister_driver(&ia_driver);
- del_timer(&ia_timer);
+ del_timer_sync(&ia_timer);
}
module_init(ia_module_init);
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 2beacf2fc1ec..2f5f8875cbd1 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -124,7 +124,6 @@
#define IF_RXPKT(A)
#endif /* CONFIG_ATM_IA_DEBUG */
-#define isprint(a) ((a >=' ')&&(a <= '~'))
#define ATM_DESC(skb) (skb->protocol)
#define IA_SKB_STATE(skb) (skb->protocol)
#define IA_DLED 1
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 5c7e4df159b9..bc5a6ab6fa4b 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -299,7 +299,7 @@ static void __exit nicstar_cleanup(void)
{
XPRINTK("nicstar: nicstar_cleanup() called.\n");
- del_timer(&ns_timer);
+ del_timer_sync(&ns_timer);
pci_unregister_driver(&nicstar_driver);
@@ -527,6 +527,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
writel(0x00000000, card->membase + VPM);
+ card->intcnt = 0;
+ if (request_irq
+ (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
+ pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
+ error = 9;
+ ns_init_card_error(card, error);
+ return error;
+ }
+
/* Initialize TSQ */
card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
NS_TSQSIZE + NS_TSQ_ALIGNMENT,
@@ -753,15 +762,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->efbie = 1;
- card->intcnt = 0;
- if (request_irq
- (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
- printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
- error = 9;
- ns_init_card_error(card, error);
- return error;
- }
-
/* Register device */
card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
-1, NULL);
@@ -839,10 +839,12 @@ static void ns_init_card_error(ns_dev *card, int error)
dev_kfree_skb_any(hb);
}
if (error >= 12) {
- kfree(card->rsq.org);
+ dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+ card->rsq.org, card->rsq.dma);
}
if (error >= 11) {
- kfree(card->tsq.org);
+ dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+ card->tsq.org, card->tsq.dma);
}
if (error >= 10) {
free_irq(card->pcidev->irq, card);
diff --git a/drivers/atm/zeprom.h b/drivers/atm/zeprom.h
index 88e01f808a86..8e8819a3840d 100644
--- a/drivers/atm/zeprom.h
+++ b/drivers/atm/zeprom.h
@@ -12,7 +12,7 @@
#define ZEPROM_V1_REG PCI_VENDOR_ID /* PCI register */
#define ZEPROM_V2_REG 0x40
-/* Bits in contol register */
+/* Bits in control register */
#define ZEPROM_SK 0x80000000 /* strobe (probably on raising edge) */
#define ZEPROM_CS 0x40000000 /* Chip Select */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 628e33939aca..2a61003ea2c1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -194,6 +194,17 @@ int device_links_read_lock_held(void)
{
return srcu_read_lock_held(&device_links_srcu);
}
+
+static void device_link_synchronize_removal(void)
+{
+ synchronize_srcu(&device_links_srcu);
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+ list_del_rcu(&link->s_node);
+ list_del_rcu(&link->c_node);
+}
#else /* !CONFIG_SRCU */
static DECLARE_RWSEM(device_links_lock);
@@ -224,6 +235,16 @@ int device_links_read_lock_held(void)
return lockdep_is_held(&device_links_lock);
}
#endif
+
+static inline void device_link_synchronize_removal(void)
+{
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+ list_del(&link->s_node);
+ list_del(&link->c_node);
+}
#endif /* !CONFIG_SRCU */
static bool device_is_ancestor(struct device *dev, struct device *target)
@@ -445,8 +466,13 @@ static struct attribute *devlink_attrs[] = {
};
ATTRIBUTE_GROUPS(devlink);
-static void device_link_free(struct device_link *link)
+static void device_link_release_fn(struct work_struct *work)
{
+ struct device_link *link = container_of(work, struct device_link, rm_work);
+
+ /* Ensure that all references to the link object have been dropped. */
+ device_link_synchronize_removal();
+
while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier);
@@ -455,24 +481,19 @@ static void device_link_free(struct device_link *link)
kfree(link);
}
-#ifdef CONFIG_SRCU
-static void __device_link_free_srcu(struct rcu_head *rhead)
-{
- device_link_free(container_of(rhead, struct device_link, rcu_head));
-}
-
static void devlink_dev_release(struct device *dev)
{
struct device_link *link = to_devlink(dev);
- call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
-}
-#else
-static void devlink_dev_release(struct device *dev)
-{
- device_link_free(to_devlink(dev));
+ INIT_WORK(&link->rm_work, device_link_release_fn);
+ /*
+ * It may take a while to complete this work because of the SRCU
+ * synchronization in device_link_release_fn() and if the consumer or
+ * supplier devices get deleted when it runs, so put it into the "long"
+ * workqueue.
+ */
+ queue_work(system_long_wq, &link->rm_work);
}
-#endif
static struct class devlink_class = {
.name = "devlink",
@@ -846,7 +867,6 @@ out:
}
EXPORT_SYMBOL_GPL(device_link_add);
-#ifdef CONFIG_SRCU
static void __device_link_del(struct kref *kref)
{
struct device_link *link = container_of(kref, struct device_link, kref);
@@ -856,25 +876,9 @@ static void __device_link_del(struct kref *kref)
pm_runtime_drop_link(link);
- list_del_rcu(&link->s_node);
- list_del_rcu(&link->c_node);
- device_unregister(&link->link_dev);
-}
-#else /* !CONFIG_SRCU */
-static void __device_link_del(struct kref *kref)
-{
- struct device_link *link = container_of(kref, struct device_link, kref);
-
- dev_info(link->consumer, "Dropping the link to %s\n",
- dev_name(link->supplier));
-
- pm_runtime_drop_link(link);
-
- list_del(&link->s_node);
- list_del(&link->c_node);
+ device_link_remove_from_lists(link);
device_unregister(&link->link_dev);
}
-#endif /* !CONFIG_SRCU */
static void device_link_put_kref(struct device_link *link)
{
@@ -4723,6 +4727,13 @@ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
}
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
+{
+ dev->fwnode = fwnode;
+ dev->of_node = to_of_node(fwnode);
+}
+EXPORT_SYMBOL_GPL(device_set_node);
+
int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index b31b3af5c490..d5ffaab3cb61 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -218,14 +218,14 @@ static int memory_block_offline(struct memory_block *mem)
struct zone *zone;
int ret;
- zone = page_zone(pfn_to_page(start_pfn));
-
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
*/
- if (nr_vmemmap_pages)
+ if (nr_vmemmap_pages) {
+ zone = page_zone(pfn_to_page(start_pfn));
adjust_present_page_count(zone, -nr_vmemmap_pages);
+ }
ret = offline_pages(start_pfn + nr_vmemmap_pages,
nr_pages - nr_vmemmap_pages);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 2c36f61d30bc..9db297431b97 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -482,6 +482,7 @@ static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
static ssize_t node_read_numastat(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ fold_vm_numa_events();
return sysfs_emit(buf,
"numa_hit %lu\n"
"numa_miss %lu\n"
@@ -489,12 +490,12 @@ static ssize_t node_read_numastat(struct device *dev,
"interleave_hit %lu\n"
"local_node %lu\n"
"other_node %lu\n",
- sum_zone_numa_state(dev->id, NUMA_HIT),
- sum_zone_numa_state(dev->id, NUMA_MISS),
- sum_zone_numa_state(dev->id, NUMA_FOREIGN),
- sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
- sum_zone_numa_state(dev->id, NUMA_LOCAL),
- sum_zone_numa_state(dev->id, NUMA_OTHER));
+ sum_zone_numa_event_state(dev->id, NUMA_HIT),
+ sum_zone_numa_event_state(dev->id, NUMA_MISS),
+ sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
+ sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
+ sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
+ sum_zone_numa_event_state(dev->id, NUMA_OTHER));
}
static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
@@ -512,10 +513,11 @@ static ssize_t node_read_vmstat(struct device *dev,
sum_zone_node_page_state(nid, i));
#ifdef CONFIG_NUMA
- for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
+ fold_vm_numa_events();
+ for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
len += sysfs_emit_at(buf, len, "%s %lu\n",
numa_stat_name(i),
- sum_zone_numa_state(nid, i));
+ sum_zone_numa_event_state(nid, i));
#endif
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index b6a782c31613..ab0b740cc0f1 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -379,6 +379,44 @@ err:
return ret;
}
+static int genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+ struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
+ unsigned int prev_state;
+ int ret;
+
+ prev_state = gpd_data->performance_state;
+ if (prev_state == state)
+ return 0;
+
+ gpd_data->performance_state = state;
+ state = _genpd_reeval_performance_state(genpd, state);
+
+ ret = _genpd_set_performance_state(genpd, state, 0);
+ if (ret)
+ gpd_data->performance_state = prev_state;
+
+ return ret;
+}
+
+static int genpd_drop_performance_state(struct device *dev)
+{
+ unsigned int prev_state = dev_gpd_data(dev)->performance_state;
+
+ if (!genpd_set_performance_state(dev, 0))
+ return prev_state;
+
+ return 0;
+}
+
+static void genpd_restore_performance_state(struct device *dev,
+ unsigned int state)
+{
+ if (state)
+ genpd_set_performance_state(dev, state);
+}
+
/**
* dev_pm_genpd_set_performance_state- Set performance state of device's power
* domain.
@@ -397,8 +435,6 @@ err:
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
- struct generic_pm_domain_data *gpd_data;
- unsigned int prev;
int ret;
genpd = dev_to_genpd_safe(dev);
@@ -410,16 +446,7 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
return -EINVAL;
genpd_lock(genpd);
-
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- prev = gpd_data->performance_state;
- gpd_data->performance_state = state;
-
- state = _genpd_reeval_performance_state(genpd, state);
- ret = _genpd_set_performance_state(genpd, state, 0);
- if (ret)
- gpd_data->performance_state = prev;
-
+ ret = genpd_set_performance_state(dev, state);
genpd_unlock(genpd);
return ret;
@@ -572,6 +599,7 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
* RPM status of the releated device is in an intermediate state, not yet turned
* into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
* be RPM_SUSPENDED, while it tries to power off the PM domain.
+ * @depth: nesting count for lockdep.
*
* If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, remove power from @genpd.
@@ -832,7 +860,8 @@ static int genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
bool (*suspend_ok)(struct device *__dev);
- struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
+ struct gpd_timing_data *td = &gpd_data->td;
bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start;
s64 elapsed_ns;
@@ -889,6 +918,7 @@ static int genpd_runtime_suspend(struct device *dev)
return 0;
genpd_lock(genpd);
+ gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
@@ -906,7 +936,8 @@ static int genpd_runtime_suspend(struct device *dev)
static int genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
- struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
+ struct gpd_timing_data *td = &gpd_data->td;
bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start;
s64 elapsed_ns;
@@ -930,6 +961,8 @@ static int genpd_runtime_resume(struct device *dev)
genpd_lock(genpd);
ret = genpd_power_on(genpd, 0);
+ if (!ret)
+ genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
genpd_unlock(genpd);
if (ret)
@@ -968,6 +1001,7 @@ err_stop:
err_poweroff:
if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
genpd_lock(genpd);
+ gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
}
@@ -2505,7 +2539,7 @@ EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
/**
* of_genpd_remove_last - Remove the last PM domain registered for a provider
- * @provider: Pointer to device structure associated with provider
+ * @np: Pointer to device node associated with provider
*
* Find the last PM domain that was added by a particular provider and
* remove this PM domain from the list of PM domains. The provider is
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index c6c218758f0b..cd08c5885190 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -252,6 +252,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
/**
* _default_power_down_ok - Default generic PM domain power off governor routine.
* @pd: PM domain to check.
+ * @now: current ktime.
*
* This routine must be executed under the PM domain's lock.
*/
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index b570848d23e0..8a66eaf731e4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -345,7 +345,7 @@ static void rpm_suspend_suppliers(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- int retval, idx;
+ int retval = 0, idx;
bool use_links = dev->power.links_count > 0;
if (dev->power.irq_safe) {
@@ -373,7 +373,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
}
}
- retval = cb(dev);
+ if (cb)
+ retval = cb(dev);
if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
@@ -446,7 +447,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
- if (dev->power.no_callbacks)
+ callback = RPM_GET_CALLBACK(dev, runtime_idle);
+
+ /* If no callback assume success. */
+ if (!callback || dev->power.no_callbacks)
goto out;
/* Carry out an asynchronous or a synchronous idle notification. */
@@ -462,10 +466,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- callback = RPM_GET_CALLBACK(dev, runtime_idle);
-
- if (callback)
- retval = __rpm_callback(callback, dev);
+ retval = __rpm_callback(callback, dev);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
@@ -484,9 +485,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
{
int retval;
- if (!cb)
- return -ENOSYS;
-
if (dev->power.memalloc_noio) {
unsigned int noio_flag;
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 8e021082dba8..3bad3266a2ad 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -182,7 +182,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
@@ -192,7 +191,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
* so we use a threaded irq.
*/
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
- IRQF_ONESHOT, wirq->name, wirq);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ wirq->name, wirq);
if (err)
goto err_free_name;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 1421e9548857..1f533b314efc 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,7 +21,7 @@
struct fwnode_handle *dev_fwnode(struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
- &dev->of_node->fwnode : dev->fwnode;
+ of_fwnode_handle(dev->of_node) : dev->fwnode;
}
EXPORT_SYMBOL_GPL(dev_fwnode);
@@ -759,13 +759,8 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
struct fwnode_handle *device_get_next_child_node(struct device *dev,
struct fwnode_handle *child)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- struct fwnode_handle *fwnode = NULL, *next;
-
- if (dev->of_node)
- fwnode = &dev->of_node->fwnode;
- else if (adev)
- fwnode = acpi_fwnode_handle(adev);
+ const struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_handle *next;
/* Try to find a child in primary fwnode */
next = fwnode_get_next_child_node(fwnode, child);
@@ -868,28 +863,31 @@ EXPORT_SYMBOL_GPL(device_get_child_node_count);
bool device_dma_supported(struct device *dev)
{
+ const struct fwnode_handle *fwnode = dev_fwnode(dev);
+
/* For DT, this is always supported.
* For ACPI, this depends on CCA, which
* is determined by the acpi_dma_supported().
*/
- if (IS_ENABLED(CONFIG_OF) && dev->of_node)
+ if (is_of_node(fwnode))
return true;
- return acpi_dma_supported(ACPI_COMPANION(dev));
+ return acpi_dma_supported(to_acpi_device_node(fwnode));
}
EXPORT_SYMBOL_GPL(device_dma_supported);
enum dev_dma_attr device_get_dma_attr(struct device *dev)
{
+ const struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED;
- if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- if (of_dma_is_coherent(dev->of_node))
+ if (is_of_node(fwnode)) {
+ if (of_dma_is_coherent(to_of_node(fwnode)))
attr = DEV_DMA_COHERENT;
else
attr = DEV_DMA_NON_COHERENT;
} else
- attr = acpi_get_dma_attr(ACPI_COMPANION(dev));
+ attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
return attr;
}
@@ -1007,14 +1005,13 @@ EXPORT_SYMBOL(device_get_mac_address);
* Returns Linux IRQ number on success. Other values are determined
* accordingly to acpi_/of_ irq_get() operation.
*/
-int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index)
+int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
{
- struct device_node *of_node = to_of_node(fwnode);
struct resource res;
int ret;
- if (IS_ENABLED(CONFIG_OF) && of_node)
- return of_irq_get(of_node, index);
+ if (is_of_node(fwnode))
+ return of_irq_get(to_of_node(fwnode), index);
ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res);
if (ret)
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 50b1e2d06a25..159bac6c5046 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,8 +4,9 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO)
select IRQ_DOMAIN if REGMAP_IRQ
+ select MDIO_BUS if REGMAP_MDIO
bool
config REGCACHE_COMPRESSED
@@ -36,6 +37,9 @@ config REGMAP_W1
tristate
depends on W1
+config REGMAP_MDIO
+ tristate
+
config REGMAP_MMIO
tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 33f63adb5b3d..11facb32a027 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -19,3 +19,4 @@ obj-$(CONFIG_REGMAP_SOUNDWIRE_MBQ) += regmap-sdw-mbq.o
obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
obj-$(CONFIG_REGMAP_I3C) += regmap-i3c.o
obj-$(CONFIG_REGMAP_SPI_AVMM) += regmap-spi-avmm.o
+obj-$(CONFIG_REGMAP_MDIO) += regmap-mdio.o
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 62b95a9212ae..980e5ce6a3a3 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -306,33 +306,64 @@ static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
const struct regmap_config *config)
{
+ const struct i2c_adapter_quirks *quirks;
+ const struct regmap_bus *bus = NULL;
+ struct regmap_bus *ret_bus;
+ u16 max_read = 0, max_write = 0;
+
if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
- return &regmap_i2c;
+ bus = &regmap_i2c;
else if (config->val_bits == 8 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
- return &regmap_i2c_smbus_i2c_block;
+ bus = &regmap_i2c_smbus_i2c_block;
else if (config->val_bits == 8 && config->reg_bits == 16 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
- return &regmap_i2c_smbus_i2c_block_reg16;
+ bus = &regmap_i2c_smbus_i2c_block_reg16;
else if (config->val_bits == 16 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
case REGMAP_ENDIAN_LITTLE:
- return &regmap_smbus_word;
+ bus = &regmap_smbus_word;
+ break;
case REGMAP_ENDIAN_BIG:
- return &regmap_smbus_word_swapped;
+ bus = &regmap_smbus_word_swapped;
+ break;
default: /* everything else is not supported */
break;
}
else if (config->val_bits == 8 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
- return &regmap_smbus_byte;
+ bus = &regmap_smbus_byte;
+
+ if (!bus)
+ return ERR_PTR(-ENOTSUPP);
+
+ quirks = i2c->adapter->quirks;
+ if (quirks) {
+ if (quirks->max_read_len &&
+ (bus->max_raw_read == 0 || bus->max_raw_read > quirks->max_read_len))
+ max_read = quirks->max_read_len;
+
+ if (quirks->max_write_len &&
+ (bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
+ max_write = quirks->max_write_len;
+
+ if (max_read || max_write) {
+ ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+ if (!ret_bus)
+ return ERR_PTR(-ENOMEM);
+ ret_bus->free_on_exit = true;
+ ret_bus->max_raw_read = max_read;
+ ret_bus->max_raw_write = max_write;
+ bus = ret_bus;
+ }
+ }
- return ERR_PTR(-ENOTSUPP);
+ return bus;
}
struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 760296a4b606..d2656581a608 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -531,6 +531,10 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
}
+ if (chip->status_invert)
+ for (i = 0; i < data->chip->num_regs; i++)
+ data->status_buf[i] = ~data->status_buf[i];
+
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowleding the
@@ -800,6 +804,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
+ if (chip->status_invert)
+ d->status_buf[i] = ~d->status_buf[i];
+
if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = sub_irq_reg(d, d->chip->ack_base, i);
if (chip->ack_invert)
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
new file mode 100644
index 000000000000..6a20201299f5
--- /dev/null
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/errno.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define REGVAL_MASK GENMASK(15, 0)
+#define REGNUM_C22_MASK GENMASK(4, 0)
+/* Clause-45 mask includes the device type (5 bit) and actual register number (16 bit) */
+#define REGNUM_C45_MASK GENMASK(20, 0)
+
+static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int *val)
+{
+ int ret;
+
+ ret = mdiobus_read(mdio_dev->bus, mdio_dev->addr, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & REGVAL_MASK;
+ return 0;
+}
+
+static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val)
+{
+ return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val);
+}
+
+static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_read(mdio_dev, reg, val);
+}
+
+static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
+ return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val);
+}
+
+static const struct regmap_bus regmap_mdio_c22_bus = {
+ .reg_write = regmap_mdio_c22_write,
+ .reg_read = regmap_mdio_c22_read,
+};
+
+static int regmap_mdio_c45_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C45_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_read(mdio_dev, MII_ADDR_C45 | reg, val);
+}
+
+static int regmap_mdio_c45_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C45_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_write(mdio_dev, MII_ADDR_C45 | reg, val);
+}
+
+static const struct regmap_bus regmap_mdio_c45_bus = {
+ .reg_write = regmap_mdio_c45_write,
+ .reg_read = regmap_mdio_c45_read,
+};
+
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config, struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus;
+
+ if (config->reg_bits == 5 && config->val_bits == 16)
+ bus = &regmap_mdio_c22_bus;
+ else if (config->reg_bits == 21 && config->val_bits == 16)
+ bus = &regmap_mdio_c45_bus;
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return __regmap_init(&mdio_dev->dev, bus, mdio_dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_mdio);
+
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config, struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus;
+
+ if (config->reg_bits == 5 && config->val_bits == 16)
+ bus = &regmap_mdio_c22_bus;
+ else if (config->reg_bits == 21 && config->val_bits == 16)
+ bus = &regmap_mdio_c45_bus;
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return __devm_regmap_init(&mdio_dev->dev, bus, mdio_dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio);
+
+MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
+MODULE_DESCRIPTION("Regmap MDIO Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 297e95be25b3..fe3e38dd5324 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -243,6 +243,16 @@ static void regmap_format_7_9_write(struct regmap *map,
*out = cpu_to_be16((reg << 9) | val);
}
+static void regmap_format_7_17_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[2] = val;
+ out[1] = val >> 8;
+ out[0] = (val >> 16) | (reg << 1);
+}
+
static void regmap_format_10_14_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
@@ -885,6 +895,9 @@ struct regmap *__regmap_init(struct device *dev,
case 9:
map->format.format_write = regmap_format_7_9_write;
break;
+ case 17:
+ map->format.format_write = regmap_format_7_17_write;
+ break;
default:
goto err_hwlock;
}
@@ -1496,6 +1509,8 @@ void regmap_exit(struct regmap *map)
mutex_destroy(&map->mutex);
kfree_const(map->name);
kfree(map->patch);
+ if (map->bus && map->bus->free_on_exit)
+ kfree(map->bus);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 3cc11b813f28..d1f1a8240120 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -1045,7 +1045,15 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
}
set_secondary_fwnode(dev, &swnode->fwnode);
- software_node_notify(dev, KOBJ_ADD);
+
+ /*
+ * If the device has been fully registered by the time this function is
+ * called, software_node_notify() must be called separately so that the
+ * symlinks get created and the reference count of the node is kept in
+ * balance.
+ */
+ if (device_is_registered(dev))
+ software_node_notify(dev, KOBJ_ADD);
return 0;
}
@@ -1065,7 +1073,8 @@ void device_remove_software_node(struct device *dev)
if (!swnode)
return;
- software_node_notify(dev, KOBJ_REMOVE);
+ if (device_is_registered(dev))
+ software_node_notify(dev, KOBJ_REMOVE);
set_secondary_fwnode(dev, NULL);
kobject_put(&swnode->kobj);
}
@@ -1119,8 +1128,7 @@ int software_node_notify(struct device *dev, unsigned long action)
switch (action) {
case KOBJ_ADD:
- ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
- "software_node");
+ ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
if (ret)
break;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 9e2d0c6a3877..8b1714021498 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1781,15 +1781,13 @@ static int fd_alloc_disk(int drive, int system)
{
struct gendisk *disk;
- disk = alloc_disk(1);
- if (!disk)
- goto out;
- disk->queue = blk_mq_init_queue(&unit[drive].tag_set);
- if (IS_ERR(disk->queue))
- goto out_put_disk;
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive + system;
+ disk->minors = 1;
disk->fops = &floppy_fops;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (system)
@@ -1802,12 +1800,6 @@ static int fd_alloc_disk(int drive, int system)
unit[drive].gendisk[system] = disk;
add_disk(disk);
return 0;
-
-out_put_disk:
- disk->queue = NULL;
- put_disk(disk);
-out:
- return -ENOMEM;
}
static int fd_alloc_drive(int drive)
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index c34e71b0c4a9..06b360f7123a 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -338,14 +338,13 @@ static const struct blk_mq_ops aoeblk_mq_ops = {
.queue_rq = aoeblk_queue_rq,
};
-/* alloc_disk and add_disk can sleep */
+/* blk_mq_alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
{
struct aoedev *d = vp;
struct gendisk *gd;
mempool_t *mp;
- struct request_queue *q;
struct blk_mq_tag_set *set;
ulong flags;
int late = 0;
@@ -362,19 +361,12 @@ aoeblk_gdalloc(void *vp)
if (late)
return;
- gd = alloc_disk(AOE_PARTITIONS);
- if (gd == NULL) {
- pr_err("aoe: cannot allocate disk structure for %ld.%d\n",
- d->aoemajor, d->aoeminor);
- goto err;
- }
-
mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
buf_pool_cache);
if (mp == NULL) {
printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
d->aoemajor, d->aoeminor);
- goto err_disk;
+ goto err;
}
set = &d->tag_set;
@@ -391,12 +383,11 @@ aoeblk_gdalloc(void *vp)
goto err_mempool;
}
- q = blk_mq_init_queue(set);
- if (IS_ERR(q)) {
+ gd = blk_mq_alloc_disk(set, d);
+ if (IS_ERR(gd)) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
- blk_mq_free_tag_set(set);
- goto err_mempool;
+ goto err_tagset;
}
spin_lock_irqsave(&d->lock, flags);
@@ -405,16 +396,16 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_TKILL);
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
- blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- blk_queue_io_opt(q, SZ_2M);
+ blk_queue_max_hw_sectors(gd->queue, BLK_DEF_MAX_SECTORS);
+ blk_queue_io_opt(gd->queue, SZ_2M);
d->bufpool = mp;
- d->blkq = gd->queue = q;
- q->queuedata = d;
+ d->blkq = gd->queue;
d->gd = gd;
if (aoe_maxsectors)
- blk_queue_max_hw_sectors(q, aoe_maxsectors);
+ blk_queue_max_hw_sectors(gd->queue, aoe_maxsectors);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor;
+ gd->minors = AOE_PARTITIONS;
gd->fops = &aoe_bdops;
gd->private_data = d;
set_capacity(gd, d->ssize);
@@ -435,10 +426,10 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
return;
+err_tagset:
+ blk_mq_free_tag_set(set);
err_mempool:
mempool_destroy(mp);
-err_disk:
- put_disk(gd);
err:
spin_lock_irqsave(&d->lock, flags);
d->flags &= ~DEVFL_GD_NOW;
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index ab41be625a53..8eea2529da20 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -140,10 +140,8 @@ bail: spin_unlock_irqrestore(&emsgs_lock, flags);
}
mp = kmemdup(msg, n, GFP_ATOMIC);
- if (mp == NULL) {
- printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
+ if (!mp)
goto bail;
- }
em->msg = mp;
em->flags |= EMFL_VALID;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ecd77897a761..588889bea7c3 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1701,8 +1701,6 @@ aoecmd_init(void)
goto ktiowq_fail;
}
- mutex_init(&ktio_spawn_lock);
-
for (i = 0; i < ncpus; i++) {
INIT_LIST_HEAD(&iocq[i].head);
spin_lock_init(&iocq[i].lock);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index e2ea2356da06..c5753c6bfe80 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -277,9 +277,8 @@ freedev(struct aoedev *d)
if (d->gd) {
aoedisk_rm_debugfs(d);
del_gendisk(d->gd);
- put_disk(d->gd);
+ blk_cleanup_disk(d->gd);
blk_mq_free_tag_set(&d->tag_set);
- blk_cleanup_queue(d->blkq);
}
t = d->targets;
e = t + d->ntargets;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index d601e49f80e0..a093644ac39f 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1968,22 +1968,14 @@ static const struct blk_mq_ops ataflop_mq_ops = {
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
struct gendisk *disk;
- int ret;
-
- disk = alloc_disk(1);
- if (!disk)
- return -ENOMEM;
- disk->queue = blk_mq_init_queue(&unit[drive].tag_set);
- if (IS_ERR(disk->queue)) {
- ret = PTR_ERR(disk->queue);
- disk->queue = NULL;
- put_disk(disk);
- return ret;
- }
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive + (type << 2);
+ disk->minors = 1;
sprintf(disk->disk_name, "fd%d", drive);
disk->fops = &floppy_fops;
disk->events = DISK_EVENT_MEDIA_CHANGE;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 7562cf30b14e..95694113e38e 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -38,9 +38,7 @@
* device).
*/
struct brd_device {
- int brd_number;
-
- struct request_queue *brd_queue;
+ int brd_number;
struct gendisk *brd_disk;
struct list_head brd_list;
@@ -372,7 +370,7 @@ static LIST_HEAD(brd_devices);
static DEFINE_MUTEX(brd_devices_mutex);
static struct dentry *brd_debugfs_dir;
-static struct brd_device *brd_alloc(int i)
+static int brd_alloc(int i)
{
struct brd_device *brd;
struct gendisk *disk;
@@ -380,64 +378,55 @@ static struct brd_device *brd_alloc(int i)
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
if (!brd)
- goto out;
+ return -ENOMEM;
brd->brd_number = i;
spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
- brd->brd_queue = blk_alloc_queue(NUMA_NO_NODE);
- if (!brd->brd_queue)
- goto out_free_dev;
-
snprintf(buf, DISK_NAME_LEN, "ram%d", i);
if (!IS_ERR_OR_NULL(brd_debugfs_dir))
debugfs_create_u64(buf, 0444, brd_debugfs_dir,
&brd->brd_nr_pages);
- /* This is so fdisk will align partitions on 4k, because of
- * direct_access API needing 4k alignment, returning a PFN
- * (This is only a problem on very small devices <= 4M,
- * otherwise fdisk will align on 1M. Regardless this call
- * is harmless)
- */
- blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
- disk = brd->brd_disk = alloc_disk(max_part);
+ disk = brd->brd_disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- goto out_free_queue;
+ goto out_free_dev;
+
disk->major = RAMDISK_MAJOR;
disk->first_minor = i * max_part;
+ disk->minors = max_part;
disk->fops = &brd_fops;
disk->private_data = brd;
disk->flags = GENHD_FL_EXT_DEVT;
strlcpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
+
+ /*
+ * This is so fdisk will align partitions on 4k, because of
+ * direct_access API needing 4k alignment, returning a PFN
+ * (This is only a problem on very small devices <= 4M,
+ * otherwise fdisk will align on 1M. Regardless this call
+ * is harmless)
+ */
+ blk_queue_physical_block_size(disk->queue, PAGE_SIZE);
/* Tell the block layer that this is not a rotational device */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+ add_disk(disk);
+ list_add_tail(&brd->brd_list, &brd_devices);
- return brd;
+ return 0;
-out_free_queue:
- blk_cleanup_queue(brd->brd_queue);
out_free_dev:
kfree(brd);
-out:
- return NULL;
-}
-
-static void brd_free(struct brd_device *brd)
-{
- put_disk(brd->brd_disk);
- blk_cleanup_queue(brd->brd_queue);
- brd_free_pages(brd);
- kfree(brd);
+ return -ENOMEM;
}
static void brd_probe(dev_t dev)
{
- struct brd_device *brd;
int i = MINOR(dev) / max_part;
+ struct brd_device *brd;
mutex_lock(&brd_devices_mutex);
list_for_each_entry(brd, &brd_devices, brd_list) {
@@ -445,13 +434,7 @@ static void brd_probe(dev_t dev)
goto out_unlock;
}
- brd = brd_alloc(i);
- if (brd) {
- brd->brd_disk->queue = brd->brd_queue;
- add_disk(brd->brd_disk);
- list_add_tail(&brd->brd_list, &brd_devices);
- }
-
+ brd_alloc(i);
out_unlock:
mutex_unlock(&brd_devices_mutex);
}
@@ -460,7 +443,9 @@ static void brd_del_one(struct brd_device *brd)
{
list_del(&brd->brd_list);
del_gendisk(brd->brd_disk);
- brd_free(brd);
+ blk_cleanup_disk(brd->brd_disk);
+ brd_free_pages(brd);
+ kfree(brd);
}
static inline void brd_check_and_reset_par(void)
@@ -485,7 +470,7 @@ static inline void brd_check_and_reset_par(void)
static int __init brd_init(void)
{
struct brd_device *brd, *next;
- int i;
+ int err, i;
/*
* brd module now has a feature to instantiate underlying device
@@ -511,22 +496,11 @@ static int __init brd_init(void)
mutex_lock(&brd_devices_mutex);
for (i = 0; i < rd_nr; i++) {
- brd = brd_alloc(i);
- if (!brd)
+ err = brd_alloc(i);
+ if (err)
goto out_free;
- list_add_tail(&brd->brd_list, &brd_devices);
}
- /* point of no return */
-
- list_for_each_entry(brd, &brd_devices, brd_list) {
- /*
- * associate with queue just before adding disk for
- * avoiding to mess up failure path
- */
- brd->brd_disk->queue = brd->brd_queue;
- add_disk(brd->brd_disk);
- }
mutex_unlock(&brd_devices_mutex);
pr_info("brd: module loaded\n");
@@ -535,15 +509,13 @@ static int __init brd_init(void)
out_free:
debugfs_remove_recursive(brd_debugfs_dir);
- list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
- list_del(&brd->brd_list);
- brd_free(brd);
- }
+ list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
+ brd_del_one(brd);
mutex_unlock(&brd_devices_mutex);
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
pr_info("brd: module NOT loaded !!!\n");
- return -ENOMEM;
+ return err;
}
static void __exit brd_exit(void)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index de463773b530..55234a558e98 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2231,8 +2231,7 @@ void drbd_destroy_device(struct kref *kref)
if (device->bitmap) /* should no longer be there. */
drbd_bm_cleanup(device);
__free_page(device->md_io.page);
- put_disk(device->vdisk);
- blk_cleanup_queue(device->rq_queue);
+ blk_cleanup_disk(device->vdisk);
kfree(device->rs_plan_s);
/* not for_each_connection(connection, resource):
@@ -2701,7 +2700,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
struct drbd_device *device;
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
- struct request_queue *q;
int id;
int vnr = adm_ctx->volume;
enum drbd_ret_code err = ERR_NOMEM;
@@ -2723,29 +2721,26 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_init_set_defaults(device);
- q = blk_alloc_queue(NUMA_NO_NODE);
- if (!q)
- goto out_no_q;
- device->rq_queue = q;
-
- disk = alloc_disk(1);
+ disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
goto out_no_disk;
+
device->vdisk = disk;
+ device->rq_queue = disk->queue;
set_disk_ro(disk, true);
- disk->queue = q;
disk->major = DRBD_MAJOR;
disk->first_minor = minor;
+ disk->minors = 1;
disk->fops = &drbd_ops;
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
- blk_queue_write_cache(q, true, true);
+ blk_queue_write_cache(disk->queue, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
- blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
+ blk_queue_max_hw_sectors(disk->queue, DRBD_MAX_BIO_SIZE_SAFE >> 8);
device->md_io.page = alloc_page(GFP_KERNEL);
if (!device->md_io.page)
@@ -2834,10 +2829,8 @@ out_no_minor_idr:
out_no_bitmap:
__free_page(device->md_io.page);
out_no_io_page:
- put_disk(disk);
+ blk_cleanup_disk(disk);
out_no_disk:
- blk_cleanup_queue(q);
-out_no_q:
kref_put(&resource->kref, drbd_destroy_resource);
kfree(device);
return err;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 69284ebba786..1f740e42e457 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3770,10 +3770,8 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
}
new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
- if (!new_net_conf) {
- drbd_err(connection, "Allocation of new net_conf failed\n");
+ if (!new_net_conf)
goto disconnect;
- }
mutex_lock(&connection->data.mutex);
mutex_lock(&connection->resource->conf_update);
@@ -4020,10 +4018,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
if (verify_tfm || csums_tfm) {
new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
- if (!new_net_conf) {
- drbd_err(device, "Allocation of new net_conf failed\n");
+ if (!new_net_conf)
goto disconnect;
- }
*new_net_conf = *old_net_conf;
@@ -4161,7 +4157,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
- drbd_err(device, "Allocation of new disk_conf failed\n");
put_ldev(device);
return -ENOMEM;
}
@@ -4288,10 +4283,8 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
device = peer_device->device;
p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
- if (!p_uuid) {
- drbd_err(device, "kmalloc of p_uuid failed\n");
+ if (!p_uuid)
return false;
- }
for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
p_uuid[i] = be64_to_cpu(p->uuid[i]);
@@ -5484,8 +5477,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
}
peers_ch = kmalloc(pi.size, GFP_NOIO);
- if (peers_ch == NULL) {
- drbd_err(connection, "kmalloc of peers_ch failed\n");
+ if (!peers_ch) {
rv = -1;
goto fail;
}
@@ -5504,8 +5496,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
- if (response == NULL) {
- drbd_err(connection, "kmalloc of response failed\n");
+ if (!response) {
rv = -1;
goto fail;
}
@@ -5552,8 +5543,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
}
right_response = kmalloc(resp_size, GFP_NOIO);
- if (right_response == NULL) {
- drbd_err(connection, "kmalloc of right_response failed\n");
+ if (!right_response) {
rv = -1;
goto fail;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8a9d22207c59..87460e0e5c72 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2123,6 +2123,7 @@ static void format_interrupt(void)
switch (interpret_errors()) {
case 1:
cont->error();
+ break;
case 2:
break;
case 0:
@@ -2330,7 +2331,6 @@ static void rw_interrupt(void)
if (!drive_state[current_drive].first_read_date)
drive_state[current_drive].first_read_date = jiffies;
- nr_sectors = 0;
ssize = DIV_ROUND_UP(1 << raw_cmd->cmd[SIZECODE], 4);
if (reply_buffer[ST1] & ST1_EOC)
@@ -4491,23 +4491,15 @@ static bool floppy_available(int drive)
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
struct gendisk *disk;
- int err;
-
- disk = alloc_disk(1);
- if (!disk)
- return -ENOMEM;
- disk->queue = blk_mq_init_queue(&tag_sets[drive]);
- if (IS_ERR(disk->queue)) {
- err = PTR_ERR(disk->queue);
- disk->queue = NULL;
- put_disk(disk);
- return err;
- }
+ disk = blk_mq_alloc_disk(&tag_sets[drive], NULL);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
blk_queue_max_hw_sectors(disk->queue, 64);
disk->major = FLOPPY_MAJOR;
disk->first_minor = TOMINOR(drive) | (type << 2);
+ disk->minors = 1;
disk->fops = &floppy_fops;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (type)
@@ -4727,10 +4719,8 @@ out_put_disk:
if (!disks[drive][0])
break;
del_timer_sync(&motor_off_timer[drive]);
- blk_cleanup_queue(disks[drive][0]->queue);
- disks[drive][0]->queue = NULL;
+ blk_cleanup_disk(disks[drive][0]);
blk_mq_free_tag_set(&tag_sets[drive]);
- put_disk(disks[drive][0]);
}
return err;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d58d68f3c7cd..cc0e8c39a48b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -71,7 +71,6 @@
#include <linux/writeback.h>
#include <linux/completion.h>
#include <linux/highmem.h>
-#include <linux/kthread.h>
#include <linux/splice.h>
#include <linux/sysfs.h>
#include <linux/miscdevice.h>
@@ -79,11 +78,14 @@
#include <linux/uio.h>
#include <linux/ioprio.h>
#include <linux/blk-cgroup.h>
+#include <linux/sched/mm.h>
#include "loop.h"
#include <linux/uaccess.h>
+#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
+
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex);
@@ -515,8 +517,6 @@ static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
- if (cmd->css)
- css_put(cmd->css);
cmd->ret = ret;
lo_rw_aio_do_completion(cmd);
}
@@ -577,8 +577,6 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_complete = lo_rw_aio_complete;
cmd->iocb.ki_flags = IOCB_DIRECT;
cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
- if (cmd->css)
- kthread_associate_blkcg(cmd->css);
if (rw == WRITE)
ret = call_write_iter(file, &cmd->iocb, &iter);
@@ -586,7 +584,6 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
ret = call_read_iter(file, &cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
- kthread_associate_blkcg(NULL);
if (ret != -EIOCBQUEUED)
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
@@ -647,14 +644,13 @@ static inline void loop_update_dio(struct loop_device *lo)
lo->use_dio);
}
-static void loop_reread_partitions(struct loop_device *lo,
- struct block_device *bdev)
+static void loop_reread_partitions(struct loop_device *lo)
{
int rc;
- mutex_lock(&bdev->bd_mutex);
- rc = bdev_disk_changed(bdev, false);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_lock(&lo->lo_disk->open_mutex);
+ rc = bdev_disk_changed(lo->lo_disk, false);
+ mutex_unlock(&lo->lo_disk->open_mutex);
if (rc)
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
__func__, lo->lo_number, lo->lo_file_name, rc);
@@ -747,12 +743,12 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
mutex_unlock(&lo->lo_mutex);
/*
* We must drop file reference outside of lo_mutex as dropping
- * the file ref can take bd_mutex which creates circular locking
+ * the file ref can take open_mutex which creates circular locking
* dependency.
*/
fput(old_file);
if (partscan)
- loop_reread_partitions(lo, bdev);
+ loop_reread_partitions(lo);
return 0;
out_err:
@@ -921,27 +917,100 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_alignment = 0;
}
-static void loop_unprepare_queue(struct loop_device *lo)
+struct loop_worker {
+ struct rb_node rb_node;
+ struct work_struct work;
+ struct list_head cmd_list;
+ struct list_head idle_list;
+ struct loop_device *lo;
+ struct cgroup_subsys_state *blkcg_css;
+ unsigned long last_ran_at;
+};
+
+static void loop_workfn(struct work_struct *work);
+static void loop_rootcg_workfn(struct work_struct *work);
+static void loop_free_idle_workers(struct timer_list *timer);
+
+#ifdef CONFIG_BLK_CGROUP
+static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
{
- kthread_flush_worker(&lo->worker);
- kthread_stop(lo->worker_task);
+ return !css || css == blkcg_root_css;
}
-
-static int loop_kthread_worker_fn(void *worker_ptr)
+#else
+static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
{
- current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
- return kthread_worker_fn(worker_ptr);
+ return !css;
}
+#endif
-static int loop_prepare_queue(struct loop_device *lo)
+static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
{
- kthread_init_worker(&lo->worker);
- lo->worker_task = kthread_run(loop_kthread_worker_fn,
- &lo->worker, "loop%d", lo->lo_number);
- if (IS_ERR(lo->worker_task))
- return -ENOMEM;
- set_user_nice(lo->worker_task, MIN_NICE);
- return 0;
+ struct rb_node **node = &(lo->worker_tree.rb_node), *parent = NULL;
+ struct loop_worker *cur_worker, *worker = NULL;
+ struct work_struct *work;
+ struct list_head *cmd_list;
+
+ spin_lock_irq(&lo->lo_work_lock);
+
+ if (queue_on_root_worker(cmd->blkcg_css))
+ goto queue_work;
+
+ node = &lo->worker_tree.rb_node;
+
+ while (*node) {
+ parent = *node;
+ cur_worker = container_of(*node, struct loop_worker, rb_node);
+ if (cur_worker->blkcg_css == cmd->blkcg_css) {
+ worker = cur_worker;
+ break;
+ } else if ((long)cur_worker->blkcg_css < (long)cmd->blkcg_css) {
+ node = &(*node)->rb_left;
+ } else {
+ node = &(*node)->rb_right;
+ }
+ }
+ if (worker)
+ goto queue_work;
+
+ worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
+ /*
+ * In the event we cannot allocate a worker, just queue on the
+ * rootcg worker and issue the I/O as the rootcg
+ */
+ if (!worker) {
+ cmd->blkcg_css = NULL;
+ if (cmd->memcg_css)
+ css_put(cmd->memcg_css);
+ cmd->memcg_css = NULL;
+ goto queue_work;
+ }
+
+ worker->blkcg_css = cmd->blkcg_css;
+ css_get(worker->blkcg_css);
+ INIT_WORK(&worker->work, loop_workfn);
+ INIT_LIST_HEAD(&worker->cmd_list);
+ INIT_LIST_HEAD(&worker->idle_list);
+ worker->lo = lo;
+ rb_link_node(&worker->rb_node, parent, node);
+ rb_insert_color(&worker->rb_node, &lo->worker_tree);
+queue_work:
+ if (worker) {
+ /*
+ * We need to remove from the idle list here while
+ * holding the lock so that the idle timer doesn't
+ * free the worker
+ */
+ if (!list_empty(&worker->idle_list))
+ list_del_init(&worker->idle_list);
+ work = &worker->work;
+ cmd_list = &worker->cmd_list;
+ } else {
+ work = &lo->rootcg_work;
+ cmd_list = &lo->rootcg_cmd_list;
+ }
+ list_add_tail(&cmd->list_entry, cmd_list);
+ queue_work(lo->workqueue, work);
+ spin_unlock_irq(&lo->lo_work_lock);
}
static void loop_update_rotational(struct loop_device *lo)
@@ -1127,12 +1196,23 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
!file->f_op->write_iter)
lo->lo_flags |= LO_FLAGS_READ_ONLY;
- error = loop_prepare_queue(lo);
- if (error)
+ lo->workqueue = alloc_workqueue("loop%d",
+ WQ_UNBOUND | WQ_FREEZABLE,
+ 0,
+ lo->lo_number);
+ if (!lo->workqueue) {
+ error = -ENOMEM;
goto out_unlock;
+ }
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
+ INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
+ INIT_LIST_HEAD(&lo->rootcg_cmd_list);
+ INIT_LIST_HEAD(&lo->idle_worker_list);
+ lo->worker_tree = RB_ROOT;
+ timer_setup(&lo->timer, loop_free_idle_workers,
+ TIMER_DEFERRABLE);
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
lo->lo_backing_file = file;
@@ -1154,6 +1234,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
blk_queue_physical_block_size(lo->lo_queue, bsize);
blk_queue_io_min(lo->lo_queue, bsize);
+ loop_config_discard(lo);
loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
@@ -1174,7 +1255,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
bdgrab(bdev);
mutex_unlock(&lo->lo_mutex);
if (partscan)
- loop_reread_partitions(lo, bdev);
+ loop_reread_partitions(lo);
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
return 0;
@@ -1200,6 +1281,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
int err = 0;
bool partscan = false;
int lo_number;
+ struct loop_worker *pos, *worker;
mutex_lock(&lo->lo_mutex);
if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
@@ -1219,6 +1301,18 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
/* freeze request queue during the transition */
blk_mq_freeze_queue(lo->lo_queue);
+ destroy_workqueue(lo->workqueue);
+ spin_lock_irq(&lo->lo_work_lock);
+ list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
+ idle_list) {
+ list_del(&worker->idle_list);
+ rb_erase(&worker->rb_node, &lo->worker_tree);
+ css_put(worker->blkcg_css);
+ kfree(worker);
+ }
+ spin_unlock_irq(&lo->lo_work_lock);
+ del_timer_sync(&lo->timer);
+
spin_lock_irq(&lo->lo_lock);
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
@@ -1255,12 +1349,11 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
lo_number = lo->lo_number;
- loop_unprepare_queue(lo);
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan) {
/*
- * bd_mutex has been held already in release path, so don't
+ * open_mutex has been held already in release path, so don't
* acquire it if this function is called in such case.
*
* If the reread partition isn't from release path, lo_refcnt
@@ -1268,10 +1361,10 @@ out_unlock:
* current holder is released.
*/
if (!release)
- mutex_lock(&bdev->bd_mutex);
- err = bdev_disk_changed(bdev, false);
+ mutex_lock(&lo->lo_disk->open_mutex);
+ err = bdev_disk_changed(lo->lo_disk, false);
if (!release)
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo_number, err);
@@ -1298,7 +1391,7 @@ out_unlock:
/*
* Need not hold lo_mutex to fput backing file. Calling fput holding
* lo_mutex triggers a circular lock dependency possibility warning as
- * fput can take bd_mutex which is usually taken before lo_mutex.
+ * fput can take open_mutex which is usually taken before lo_mutex.
*/
if (filp)
fput(filp);
@@ -1416,7 +1509,7 @@ out_unfreeze:
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
- loop_reread_partitions(lo, bdev);
+ loop_reread_partitions(lo);
return err;
}
@@ -1879,29 +1972,18 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode)
{
- struct loop_device *lo;
+ struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- /*
- * take loop_ctl_mutex to protect lo pointer from race with
- * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
- * release it prior to updating lo->lo_refcnt.
- */
- err = mutex_lock_killable(&loop_ctl_mutex);
- if (err)
- return err;
- lo = bdev->bd_disk->private_data;
- if (!lo) {
- mutex_unlock(&loop_ctl_mutex);
- return -ENXIO;
- }
err = mutex_lock_killable(&lo->lo_mutex);
- mutex_unlock(&loop_ctl_mutex);
if (err)
return err;
- atomic_inc(&lo->lo_refcnt);
+ if (lo->lo_state == Lo_deleting)
+ err = -ENXIO;
+ else
+ atomic_inc(&lo->lo_refcnt);
mutex_unlock(&lo->lo_mutex);
- return 0;
+ return err;
}
static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -2019,14 +2101,19 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
}
/* always use the first bio's css */
+ cmd->blkcg_css = NULL;
+ cmd->memcg_css = NULL;
#ifdef CONFIG_BLK_CGROUP
- if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
- cmd->css = &bio_blkcg(rq->bio)->css;
- css_get(cmd->css);
- } else
+ if (rq->bio && rq->bio->bi_blkg) {
+ cmd->blkcg_css = &bio_blkcg(rq->bio)->css;
+#ifdef CONFIG_MEMCG
+ cmd->memcg_css =
+ cgroup_get_e_css(cmd->blkcg_css->cgroup,
+ &memory_cgrp_subsys);
+#endif
+ }
#endif
- cmd->css = NULL;
- kthread_queue_work(&lo->worker, &cmd->work);
+ loop_queue_work(lo, cmd);
return BLK_STS_OK;
}
@@ -2037,13 +2124,28 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
const bool write = op_is_write(req_op(rq));
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
+ struct mem_cgroup *old_memcg = NULL;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
goto failed;
}
+ if (cmd->blkcg_css)
+ kthread_associate_blkcg(cmd->blkcg_css);
+ if (cmd->memcg_css)
+ old_memcg = set_active_memcg(
+ mem_cgroup_from_css(cmd->memcg_css));
+
ret = do_req_filebacked(lo, rq);
+
+ if (cmd->blkcg_css)
+ kthread_associate_blkcg(NULL);
+
+ if (cmd->memcg_css) {
+ set_active_memcg(old_memcg);
+ css_put(cmd->memcg_css);
+ }
failed:
/* complete non-aio request */
if (!cmd->use_aio || ret) {
@@ -2056,26 +2158,82 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
}
}
-static void loop_queue_work(struct kthread_work *work)
+static void loop_set_timer(struct loop_device *lo)
{
- struct loop_cmd *cmd =
- container_of(work, struct loop_cmd, work);
+ timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
+}
- loop_handle_cmd(cmd);
+static void loop_process_work(struct loop_worker *worker,
+ struct list_head *cmd_list, struct loop_device *lo)
+{
+ int orig_flags = current->flags;
+ struct loop_cmd *cmd;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ spin_lock_irq(&lo->lo_work_lock);
+ while (!list_empty(cmd_list)) {
+ cmd = container_of(
+ cmd_list->next, struct loop_cmd, list_entry);
+ list_del(cmd_list->next);
+ spin_unlock_irq(&lo->lo_work_lock);
+
+ loop_handle_cmd(cmd);
+ cond_resched();
+
+ spin_lock_irq(&lo->lo_work_lock);
+ }
+
+ /*
+ * We only add to the idle list if there are no pending cmds
+ * *and* the worker will not run again which ensures that it
+ * is safe to free any worker on the idle list
+ */
+ if (worker && !work_pending(&worker->work)) {
+ worker->last_ran_at = jiffies;
+ list_add_tail(&worker->idle_list, &lo->idle_worker_list);
+ loop_set_timer(lo);
+ }
+ spin_unlock_irq(&lo->lo_work_lock);
+ current->flags = orig_flags;
}
-static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
+static void loop_workfn(struct work_struct *work)
{
- struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct loop_worker *worker =
+ container_of(work, struct loop_worker, work);
+ loop_process_work(worker, &worker->cmd_list, worker->lo);
+}
- kthread_init_work(&cmd->work, loop_queue_work);
- return 0;
+static void loop_rootcg_workfn(struct work_struct *work)
+{
+ struct loop_device *lo =
+ container_of(work, struct loop_device, rootcg_work);
+ loop_process_work(NULL, &lo->rootcg_cmd_list, lo);
+}
+
+static void loop_free_idle_workers(struct timer_list *timer)
+{
+ struct loop_device *lo = container_of(timer, struct loop_device, timer);
+ struct loop_worker *pos, *worker;
+
+ spin_lock_irq(&lo->lo_work_lock);
+ list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
+ idle_list) {
+ if (time_is_after_jiffies(worker->last_ran_at +
+ LOOP_IDLE_WORKER_TIMEOUT))
+ break;
+ list_del(&worker->idle_list);
+ rb_erase(&worker->rb_node, &lo->worker_tree);
+ css_put(worker->blkcg_css);
+ kfree(worker);
+ }
+ if (!list_empty(&lo->idle_worker_list))
+ loop_set_timer(lo);
+ spin_unlock_irq(&lo->lo_work_lock);
}
static const struct blk_mq_ops loop_mq_ops = {
.queue_rq = loop_queue_rq,
- .init_request = loop_init_request,
.complete = lo_complete_rq,
};
@@ -2117,12 +2275,12 @@ static int loop_add(struct loop_device **l, int i)
if (err)
goto out_free_idr;
- lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
- if (IS_ERR(lo->lo_queue)) {
- err = PTR_ERR(lo->lo_queue);
+ disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out_cleanup_tags;
}
- lo->lo_queue->queuedata = lo;
+ lo->lo_queue = lo->lo_disk->queue;
blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
@@ -2134,11 +2292,6 @@ static int loop_add(struct loop_device **l, int i)
*/
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
- err = -ENOMEM;
- disk = lo->lo_disk = alloc_disk(1 << part_shift);
- if (!disk)
- goto out_free_queue;
-
/*
* Disable partition scanning by default. The in-kernel partition
* scanning can be requested individually per-device during its
@@ -2164,8 +2317,10 @@ static int loop_add(struct loop_device **l, int i)
mutex_init(&lo->lo_mutex);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
+ spin_lock_init(&lo->lo_work_lock);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;
+ disk->minors = 1 << part_shift;
disk->fops = &lo_fops;
disk->private_data = lo;
disk->queue = lo->lo_queue;
@@ -2174,8 +2329,6 @@ static int loop_add(struct loop_device **l, int i)
*l = lo;
return lo->lo_number;
-out_free_queue:
- blk_cleanup_queue(lo->lo_queue);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
@@ -2189,9 +2342,8 @@ out:
static void loop_remove(struct loop_device *lo)
{
del_gendisk(lo->lo_disk);
- blk_cleanup_queue(lo->lo_queue);
+ blk_cleanup_disk(lo->lo_disk);
blk_mq_free_tag_set(&lo->tag_set);
- put_disk(lo->lo_disk);
mutex_destroy(&lo->lo_mutex);
kfree(lo);
}
@@ -2285,7 +2437,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
mutex_unlock(&lo->lo_mutex);
break;
}
- lo->lo_disk->private_data = NULL;
+ lo->lo_state = Lo_deleting;
mutex_unlock(&lo->lo_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index a3c04f310672..1988899db63a 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -14,7 +14,6 @@
#include <linux/blk-mq.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <uapi/linux/loop.h>
/* Possible states of device */
@@ -22,6 +21,7 @@ enum {
Lo_unbound,
Lo_bound,
Lo_rundown,
+ Lo_deleting,
};
struct loop_func_table;
@@ -54,8 +54,13 @@ struct loop_device {
spinlock_t lo_lock;
int lo_state;
- struct kthread_worker worker;
- struct task_struct *worker_task;
+ spinlock_t lo_work_lock;
+ struct workqueue_struct *workqueue;
+ struct work_struct rootcg_work;
+ struct list_head rootcg_cmd_list;
+ struct list_head idle_worker_list;
+ struct rb_root worker_tree;
+ struct timer_list timer;
bool use_dio;
bool sysfs_inited;
@@ -66,13 +71,14 @@ struct loop_device {
};
struct loop_cmd {
- struct kthread_work work;
+ struct list_head list_entry;
bool use_aio; /* use AIO interface to handle I/O */
atomic_t ref; /* only for aio */
long ret;
struct kiocb iocb;
struct bio_vec *bvec;
- struct cgroup_subsys_state *css;
+ struct cgroup_subsys_state *blkcg_css;
+ struct cgroup_subsys_state *memcg_css;
};
/* Support for loadable transfer modules */
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 589cb0f1e030..ff3e7b3f5ad8 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2238,7 +2238,6 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf)
static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
- struct driver_data *dd = (struct driver_data *)f->private_data;
int size = *offset;
char *buf;
int rv = 0;
@@ -2247,11 +2246,8 @@ static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
return 0;
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
- if (!buf) {
- dev_err(&dd->pdev->dev,
- "Memory allocation: status buffer\n");
+ if (!buf)
return -ENOMEM;
- }
size += show_device_status(NULL, buf);
@@ -2277,11 +2273,8 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
return 0;
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
- if (!buf) {
- dev_err(&dd->pdev->dev,
- "Memory allocation: register buffer\n");
+ if (!buf)
return -ENOMEM;
- }
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
@@ -2343,11 +2336,8 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
return 0;
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
- if (!buf) {
- dev_err(&dd->pdev->dev,
- "Memory allocation: flag buffer\n");
+ if (!buf)
return -ENOMEM;
- }
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
dd->port->flags);
@@ -2884,11 +2874,8 @@ static int mtip_hw_init(struct driver_data *dd)
dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
dd->numa_node);
- if (!dd->port) {
- dev_err(&dd->pdev->dev,
- "Memory allocation: port structure\n");
+ if (!dd->port)
return -ENOMEM;
- }
/* Continue workqueue setup */
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
@@ -4002,11 +3989,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
- if (dd == NULL) {
- dev_err(&pdev->dev,
- "Unable to allocate memory for driver data\n");
+ if (!dd)
return -ENOMEM;
- }
/* Attach the private data to this PCI device. */
pci_set_drvdata(pdev, dd);
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 47bdf324e962..7b4dd10af9ec 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -132,16 +132,12 @@ static int __init n64cart_probe(struct platform_device *pdev)
if (!reg_base)
return -EINVAL;
- disk = alloc_disk(0);
+ disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
return -ENOMEM;
- disk->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (!disk->queue)
- return -ENOMEM;
-
disk->first_minor = 0;
- disk->flags = GENHD_FL_NO_PART_SCAN | GENHD_FL_EXT_DEVT;
+ disk->flags = GENHD_FL_NO_PART_SCAN;
disk->fops = &n64cart_fops;
disk->private_data = &pdev->dev;
strcpy(disk->disk_name, "n64cart");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 45d2c28c8fc8..614d82e7fae4 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -219,15 +219,11 @@ static const struct device_attribute pid_attr = {
static void nbd_dev_remove(struct nbd_device *nbd)
{
struct gendisk *disk = nbd->disk;
- struct request_queue *q;
if (disk) {
- q = disk->queue;
del_gendisk(disk);
- blk_cleanup_queue(q);
blk_mq_free_tag_set(&nbd->tag_set);
- disk->private_data = NULL;
- put_disk(disk);
+ blk_cleanup_disk(disk);
}
/*
@@ -1646,15 +1642,24 @@ static int nbd_dev_add(int index)
{
struct nbd_device *nbd;
struct gendisk *disk;
- struct request_queue *q;
int err = -ENOMEM;
nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
if (!nbd)
goto out;
- disk = alloc_disk(1 << part_shift);
- if (!disk)
+ nbd->tag_set.ops = &nbd_mq_ops;
+ nbd->tag_set.nr_hw_queues = 1;
+ nbd->tag_set.queue_depth = 128;
+ nbd->tag_set.numa_node = NUMA_NO_NODE;
+ nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
+ nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_BLOCKING;
+ nbd->tag_set.driver_data = nbd;
+ nbd->destroy_complete = NULL;
+
+ err = blk_mq_alloc_tag_set(&nbd->tag_set);
+ if (err)
goto out_free_nbd;
if (index >= 0) {
@@ -1668,30 +1673,15 @@ static int nbd_dev_add(int index)
index = err;
}
if (err < 0)
- goto out_free_disk;
-
+ goto out_free_tags;
nbd->index = index;
- nbd->disk = disk;
- nbd->tag_set.ops = &nbd_mq_ops;
- nbd->tag_set.nr_hw_queues = 1;
- nbd->tag_set.queue_depth = 128;
- nbd->tag_set.numa_node = NUMA_NO_NODE;
- nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_BLOCKING;
- nbd->tag_set.driver_data = nbd;
- nbd->destroy_complete = NULL;
- err = blk_mq_alloc_tag_set(&nbd->tag_set);
- if (err)
+ disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out_free_idr;
-
- q = blk_mq_init_queue(&nbd->tag_set);
- if (IS_ERR(q)) {
- err = PTR_ERR(q);
- goto out_free_tags;
}
- disk->queue = q;
+ nbd->disk = disk;
/*
* Tell the block layer that we are not a rotational device
@@ -1712,6 +1702,7 @@ static int nbd_dev_add(int index)
INIT_LIST_HEAD(&nbd->list);
disk->major = NBD_MAJOR;
disk->first_minor = index << part_shift;
+ disk->minors = 1 << part_shift;
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
@@ -1719,12 +1710,10 @@ static int nbd_dev_add(int index)
nbd_total_devices++;
return index;
-out_free_tags:
- blk_mq_free_tag_set(&nbd->tag_set);
out_free_idr:
idr_remove(&nbd_index_idr, index);
-out_free_disk:
- put_disk(disk);
+out_free_tags:
+ blk_mq_free_tag_set(&nbd->tag_set);
out_free_nbd:
kfree(nbd);
out:
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 5f006d9e1472..3b320b005aa8 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1597,11 +1597,10 @@ static void null_del_dev(struct nullb *nullb)
null_restart_queue_async(nullb);
}
- blk_cleanup_queue(nullb->q);
+ blk_cleanup_disk(nullb->disk);
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- put_disk(nullb->disk);
cleanup_queues(nullb);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
@@ -1700,22 +1699,19 @@ static int init_driver_queues(struct nullb *nullb)
static int null_gendisk_register(struct nullb *nullb)
{
sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
- struct gendisk *disk;
+ struct gendisk *disk = nullb->disk;
- disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
- if (!disk)
- return -ENOMEM;
set_capacity(disk, size);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
+ disk->minors = 1;
if (queue_is_mq(nullb->q))
disk->fops = &null_rq_ops;
else
disk->fops = &null_bio_ops;
disk->private_data = nullb;
- disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
if (nullb->dev->zoned) {
@@ -1851,23 +1847,26 @@ static int null_add_dev(struct nullb_device *dev)
goto out_cleanup_queues;
if (!null_setup_fault())
- goto out_cleanup_queues;
+ goto out_cleanup_tags;
+ rv = -ENOMEM;
nullb->tag_set->timeout = 5 * HZ;
- nullb->q = blk_mq_init_queue_data(nullb->tag_set, nullb);
- if (IS_ERR(nullb->q)) {
- rv = -ENOMEM;
+ nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
+ if (IS_ERR(nullb->disk)) {
+ rv = PTR_ERR(nullb->disk);
goto out_cleanup_tags;
}
+ nullb->q = nullb->disk->queue;
} else if (dev->queue_mode == NULL_Q_BIO) {
- nullb->q = blk_alloc_queue(dev->home_node);
- if (!nullb->q) {
- rv = -ENOMEM;
+ rv = -ENOMEM;
+ nullb->disk = blk_alloc_disk(nullb->dev->home_node);
+ if (!nullb->disk)
goto out_cleanup_queues;
- }
+
+ nullb->q = nullb->disk->queue;
rv = init_driver_queues(nullb);
if (rv)
- goto out_cleanup_blk_queue;
+ goto out_cleanup_disk;
}
if (dev->mbps) {
@@ -1883,7 +1882,7 @@ static int null_add_dev(struct nullb_device *dev)
if (dev->zoned) {
rv = null_init_zoned_dev(dev, nullb->q);
if (rv)
- goto out_cleanup_blk_queue;
+ goto out_cleanup_disk;
}
nullb->q->queuedata = nullb;
@@ -1921,8 +1920,8 @@ static int null_add_dev(struct nullb_device *dev)
return 0;
out_cleanup_zone:
null_free_zoned_dev(dev);
-out_cleanup_blk_queue:
- blk_cleanup_queue(nullb->q);
+out_cleanup_disk:
+ blk_cleanup_disk(nullb->disk);
out_cleanup_tags:
if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 70da8b86ce58..f9cdd11f02f5 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -309,21 +309,19 @@ static void pcd_init_units(void)
pcd_drive_count = 0;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- struct gendisk *disk = alloc_disk(1);
+ struct gendisk *disk;
- if (!disk)
+ if (blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE))
continue;
- disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
- 1, BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(disk->queue)) {
- disk->queue = NULL;
- put_disk(disk);
+ disk = blk_mq_alloc_disk(&cd->tag_set, cd);
+ if (IS_ERR(disk)) {
+ blk_mq_free_tag_set(&cd->tag_set);
continue;
}
INIT_LIST_HEAD(&cd->rq_list);
- disk->queue->queuedata = cd;
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
cd->disk = disk;
cd->pi = &cd->pia;
@@ -343,6 +341,7 @@ static void pcd_init_units(void)
cd->info.mask = 0;
disk->major = major;
disk->first_minor = unit;
+ disk->minors = 1;
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
@@ -759,10 +758,8 @@ static int pcd_detect(void)
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->disk)
continue;
- blk_cleanup_queue(cd->disk->queue);
- cd->disk->queue = NULL;
+ blk_cleanup_disk(cd->disk);
blk_mq_free_tag_set(&cd->tag_set);
- put_disk(cd->disk);
}
pi_unregister_driver(par_drv);
return -1;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 828a45ffe0e7..3b2b8e872beb 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -879,18 +879,6 @@ static void pd_probe_drive(struct pd_unit *disk)
{
struct gendisk *p;
- p = alloc_disk(1 << PD_BITS);
- if (!p)
- return;
-
- strcpy(p->disk_name, disk->name);
- p->fops = &pd_fops;
- p->major = major;
- p->first_minor = (disk - pd) << PD_BITS;
- p->events = DISK_EVENT_MEDIA_CHANGE;
- disk->gd = p;
- p->private_data = disk;
-
memset(&disk->tag_set, 0, sizeof(disk->tag_set));
disk->tag_set.ops = &pd_mq_ops;
disk->tag_set.cmd_size = sizeof(struct pd_req);
@@ -903,14 +891,21 @@ static void pd_probe_drive(struct pd_unit *disk)
if (blk_mq_alloc_tag_set(&disk->tag_set))
return;
- p->queue = blk_mq_init_queue(&disk->tag_set);
- if (IS_ERR(p->queue)) {
+ p = blk_mq_alloc_disk(&disk->tag_set, disk);
+ if (!p) {
blk_mq_free_tag_set(&disk->tag_set);
- p->queue = NULL;
return;
}
+ disk->gd = p;
+
+ strcpy(p->disk_name, disk->name);
+ p->fops = &pd_fops;
+ p->major = major;
+ p->first_minor = (disk - pd) << PD_BITS;
+ p->minors = 1 << PD_BITS;
+ p->events = DISK_EVENT_MEDIA_CHANGE;
+ p->private_data = disk;
- p->queue->queuedata = disk;
blk_queue_max_hw_sectors(p->queue, cluster);
blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
@@ -1019,9 +1014,8 @@ static void __exit pd_exit(void)
if (p) {
disk->gd = NULL;
del_gendisk(p);
- blk_cleanup_queue(p->queue);
blk_mq_free_tag_set(&disk->tag_set);
- put_disk(p);
+ blk_cleanup_disk(p);
pi_release(disk->pi);
}
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bb09f21ce21a..d5b9c88ba76f 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -294,20 +294,17 @@ static void __init pf_init_units(void)
for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
struct gendisk *disk;
- disk = alloc_disk(1);
- if (!disk)
+ if (blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE))
continue;
- disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops,
- 1, BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(disk->queue)) {
- disk->queue = NULL;
- put_disk(disk);
+ disk = blk_mq_alloc_disk(&pf->tag_set, pf);
+ if (IS_ERR(disk)) {
+ blk_mq_free_tag_set(&pf->tag_set);
continue;
}
INIT_LIST_HEAD(&pf->rq_list);
- disk->queue->queuedata = pf;
blk_queue_max_segments(disk->queue, cluster);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
pf->disk = disk;
@@ -318,6 +315,7 @@ static void __init pf_init_units(void)
snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
disk->major = major;
disk->first_minor = unit;
+ disk->minors = 1;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
disk->events = DISK_EVENT_MEDIA_CHANGE;
@@ -766,10 +764,8 @@ static int pf_detect(void)
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
- blk_cleanup_queue(pf->disk->queue);
- pf->disk->queue = NULL;
+ blk_cleanup_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
- put_disk(pf->disk);
}
pi_unregister_driver(par_drv);
return -1;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index bd3556585122..f69b5c69c2a6 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2711,19 +2711,17 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->write_congestion_off = write_congestion_off;
ret = -ENOMEM;
- disk = alloc_disk(1);
+ disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
goto out_mem;
pd->disk = disk;
disk->major = pktdev_major;
disk->first_minor = idx;
+ disk->minors = 1;
disk->fops = &pktcdvd_ops;
disk->flags = GENHD_FL_REMOVABLE;
strcpy(disk->disk_name, pd->name);
disk->private_data = pd;
- disk->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (!disk->queue)
- goto out_mem2;
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
@@ -2746,7 +2744,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
return 0;
out_mem2:
- put_disk(disk);
+ blk_cleanup_disk(disk);
out_mem:
mempool_exit(&pd->rb_pool);
kfree(pd);
@@ -2796,8 +2794,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_dbg(1, pd, "writer unmapped\n");
del_gendisk(pd->disk);
- blk_cleanup_queue(pd->disk->queue);
- put_disk(pd->disk);
+ blk_cleanup_disk(pd->disk);
mempool_exit(&pd->rb_pool);
kfree(pd);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index ba3ece56cbb3..f374ea2c67ce 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -29,7 +29,6 @@
struct ps3disk_private {
spinlock_t lock; /* Request queue spinlock */
- struct request_queue *queue;
struct blk_mq_tag_set tag_set;
struct gendisk *gendisk;
unsigned int blocking_factor;
@@ -267,7 +266,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
blk_mq_end_request(req, error);
spin_unlock(&priv->lock);
- blk_mq_run_hw_queues(priv->queue, true);
+ blk_mq_run_hw_queues(priv->gendisk->queue, true);
return IRQ_HANDLED;
}
@@ -441,17 +440,20 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- queue = blk_mq_init_sq_queue(&priv->tag_set, &ps3disk_mq_ops, 1,
+ error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(queue)) {
- dev_err(&dev->sbd.core, "%s:%u: blk_mq_init_queue failed\n",
- __func__, __LINE__);
- error = PTR_ERR(queue);
+ if (error)
goto fail_teardown;
+
+ gendisk = blk_mq_alloc_disk(&priv->tag_set, dev);
+ if (IS_ERR(gendisk)) {
+ dev_err(&dev->sbd.core, "%s:%u: blk_mq_alloc_disk failed\n",
+ __func__, __LINE__);
+ error = PTR_ERR(gendisk);
+ goto fail_free_tag_set;
}
- priv->queue = queue;
- queue->queuedata = dev;
+ queue = gendisk->queue;
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
blk_queue_dma_alignment(queue, dev->blk_size-1);
@@ -462,19 +464,11 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
- gendisk = alloc_disk(PS3DISK_MINORS);
- if (!gendisk) {
- dev_err(&dev->sbd.core, "%s:%u: alloc_disk failed\n", __func__,
- __LINE__);
- error = -ENOMEM;
- goto fail_cleanup_queue;
- }
-
priv->gendisk = gendisk;
gendisk->major = ps3disk_major;
gendisk->first_minor = devidx * PS3DISK_MINORS;
+ gendisk->minors = PS3DISK_MINORS;
gendisk->fops = &ps3disk_fops;
- gendisk->queue = queue;
gendisk->private_data = dev;
snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME,
devidx+'a');
@@ -490,8 +484,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
device_add_disk(&dev->sbd.core, gendisk, NULL);
return 0;
-fail_cleanup_queue:
- blk_cleanup_queue(queue);
+fail_free_tag_set:
blk_mq_free_tag_set(&priv->tag_set);
fail_teardown:
ps3stor_teardown(dev);
@@ -517,9 +510,8 @@ static void ps3disk_remove(struct ps3_system_bus_device *_dev)
&ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
- blk_cleanup_queue(priv->queue);
+ blk_cleanup_disk(priv->gendisk);
blk_mq_free_tag_set(&priv->tag_set);
- put_disk(priv->gendisk);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);
ps3stor_teardown(dev);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 1d738999fb69..7fbf469651c4 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -67,7 +67,6 @@ struct ps3vram_cache {
};
struct ps3vram_priv {
- struct request_queue *queue;
struct gendisk *gendisk;
u64 size;
@@ -613,7 +612,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
int error, status;
- struct request_queue *queue;
struct gendisk *gendisk;
u64 ddr_size, ddr_lpar, ctrl_lpar, info_lpar, reports_lpar,
reports_size, xdr_lpar;
@@ -736,33 +734,23 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
ps3vram_proc_init(dev);
- queue = blk_alloc_queue(NUMA_NO_NODE);
- if (!queue) {
- dev_err(&dev->core, "blk_alloc_queue failed\n");
- error = -ENOMEM;
- goto out_cache_cleanup;
- }
-
- priv->queue = queue;
- blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
- blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
- blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
-
- gendisk = alloc_disk(1);
+ gendisk = blk_alloc_disk(NUMA_NO_NODE);
if (!gendisk) {
- dev_err(&dev->core, "alloc_disk failed\n");
+ dev_err(&dev->core, "blk_alloc_disk failed\n");
error = -ENOMEM;
- goto fail_cleanup_queue;
+ goto out_cache_cleanup;
}
priv->gendisk = gendisk;
gendisk->major = ps3vram_major;
- gendisk->first_minor = 0;
+ gendisk->minors = 1;
gendisk->fops = &ps3vram_fops;
- gendisk->queue = queue;
gendisk->private_data = dev;
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
+ blk_queue_max_segments(gendisk->queue, BLK_MAX_SEGMENTS);
+ blk_queue_max_segment_size(gendisk->queue, BLK_MAX_SEGMENT_SIZE);
+ blk_queue_max_hw_sectors(gendisk->queue, BLK_SAFE_MAX_SECTORS);
dev_info(&dev->core, "%s: Using %llu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
@@ -770,8 +758,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
device_add_disk(&dev->core, gendisk, NULL);
return 0;
-fail_cleanup_queue:
- blk_cleanup_queue(queue);
out_cache_cleanup:
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
@@ -802,8 +788,7 @@ static void ps3vram_remove(struct ps3_system_bus_device *dev)
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
del_gendisk(priv->gendisk);
- put_disk(priv->gendisk);
- blk_cleanup_queue(priv->queue);
+ blk_cleanup_disk(priv->gendisk);
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
iounmap(priv->reports);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index bbb88eb009e0..531d390902dd 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4750,9 +4750,8 @@ static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
- blk_cleanup_queue(rbd_dev->disk->queue);
+ blk_cleanup_disk(rbd_dev->disk);
blk_mq_free_tag_set(&rbd_dev->tag_set);
- put_disk(rbd_dev->disk);
rbd_dev->disk = NULL;
}
@@ -4922,22 +4921,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
int err;
- /* create gendisk info */
- disk = alloc_disk(single_major ?
- (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
- RBD_MINORS_PER_MAJOR);
- if (!disk)
- return -ENOMEM;
-
- snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
- rbd_dev->dev_id);
- disk->major = rbd_dev->major;
- disk->first_minor = rbd_dev->minor;
- if (single_major)
- disk->flags |= GENHD_FL_EXT_DEVT;
- disk->fops = &rbd_bd_ops;
- disk->private_data = rbd_dev;
-
memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
rbd_dev->tag_set.ops = &rbd_mq_ops;
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
@@ -4948,13 +4931,26 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
if (err)
- goto out_disk;
+ return err;
- q = blk_mq_init_queue(&rbd_dev->tag_set);
- if (IS_ERR(q)) {
- err = PTR_ERR(q);
+ disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out_tag_set;
}
+ q = disk->queue;
+
+ snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
+ rbd_dev->dev_id);
+ disk->major = rbd_dev->major;
+ disk->first_minor = rbd_dev->minor;
+ if (single_major) {
+ disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
+ disk->flags |= GENHD_FL_EXT_DEVT;
+ } else {
+ disk->minors = RBD_MINORS_PER_MAJOR;
+ }
+ disk->fops = &rbd_bd_ops;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
@@ -4976,21 +4972,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
- /*
- * disk_release() expects a queue ref from add_disk() and will
- * put it. Hold an extra ref until add_disk() is called.
- */
- WARN_ON(!blk_get_queue(q));
- disk->queue = q;
- q->queuedata = rbd_dev;
-
rbd_dev->disk = disk;
return 0;
out_tag_set:
blk_mq_free_tag_set(&rbd_dev->tag_set);
-out_disk:
- put_disk(disk);
return err;
}
@@ -7088,8 +7074,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
goto err_out_image_lock;
device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
- /* see rbd_init_disk() */
- blk_put_queue(rbd_dev->disk->queue);
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index c604a402cd5c..f4fa45d24c0b 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1353,18 +1353,6 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
}
}
-static int setup_mq_dev(struct rnbd_clt_dev *dev)
-{
- dev->queue = blk_mq_init_queue(&dev->sess->tag_set);
- if (IS_ERR(dev->queue)) {
- rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n",
- PTR_ERR(dev->queue));
- return PTR_ERR(dev->queue);
- }
- rnbd_init_mq_hw_queues(dev);
- return 0;
-}
-
static void setup_request_queue(struct rnbd_clt_dev *dev)
{
blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
@@ -1393,13 +1381,13 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
- dev->queue->queuedata = dev;
}
static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
{
dev->gd->major = rnbd_client_major;
dev->gd->first_minor = idx << RNBD_PART_BITS;
+ dev->gd->minors = 1 << RNBD_PART_BITS;
dev->gd->fops = &rnbd_client_ops;
dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
@@ -1426,24 +1414,18 @@ static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
{
- int err, idx = dev->clt_device_id;
+ int idx = dev->clt_device_id;
dev->size = dev->nsectors * dev->logical_block_size;
- err = setup_mq_dev(dev);
- if (err)
- return err;
+ dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev);
+ if (IS_ERR(dev->gd))
+ return PTR_ERR(dev->gd);
+ dev->queue = dev->gd->queue;
+ rnbd_init_mq_hw_queues(dev);
setup_request_queue(dev);
-
- dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE);
- if (!dev->gd) {
- blk_cleanup_queue(dev->queue);
- return -ENOMEM;
- }
-
rnbd_clt_setup_gen_disk(dev, idx);
-
return 0;
}
@@ -1650,8 +1632,7 @@ put_sess:
static void destroy_gen_disk(struct rnbd_clt_dev *dev)
{
del_gendisk(dev->gd);
- blk_cleanup_queue(dev->queue);
- put_disk(dev->gd);
+ blk_cleanup_disk(dev->gd);
}
static void destroy_sysfs(struct rnbd_clt_dev *dev,
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 9a28322a8cd8..1cc40b0ea761 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -236,47 +236,40 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
return -ENOMEM;
}
- card->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (!card->queue) {
- dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
- unregister_blkdev(card->major, DRIVER_NAME);
- return -ENOMEM;
- }
-
- card->gendisk = alloc_disk(blkdev_minors);
+ card->gendisk = blk_alloc_disk(blkdev_minors);
if (!card->gendisk) {
dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
- blk_cleanup_queue(card->queue);
unregister_blkdev(card->major, DRIVER_NAME);
return -ENOMEM;
}
if (card->config_valid) {
blk_size = card->config.data.block_size;
- blk_queue_dma_alignment(card->queue, blk_size - 1);
- blk_queue_logical_block_size(card->queue, blk_size);
+ blk_queue_dma_alignment(card->gendisk->queue, blk_size - 1);
+ blk_queue_logical_block_size(card->gendisk->queue, blk_size);
}
- blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
- blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
+ blk_queue_max_hw_sectors(card->gendisk->queue, blkdev_max_hw_sectors);
+ blk_queue_physical_block_size(card->gendisk->queue, RSXX_HW_BLK_SIZE);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, card->gendisk->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->gendisk->queue);
if (rsxx_discard_supported(card)) {
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue);
- blk_queue_max_discard_sectors(card->queue,
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->gendisk->queue);
+ blk_queue_max_discard_sectors(card->gendisk->queue,
RSXX_HW_BLK_SIZE >> 9);
- card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
- card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE;
+ card->gendisk->queue->limits.discard_granularity =
+ RSXX_HW_BLK_SIZE;
+ card->gendisk->queue->limits.discard_alignment =
+ RSXX_HW_BLK_SIZE;
}
snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
"rsxx%d", card->disk_id);
card->gendisk->major = card->major;
- card->gendisk->first_minor = 0;
+ card->gendisk->minors = blkdev_minors;
card->gendisk->fops = &rsxx_fops;
card->gendisk->private_data = card;
- card->gendisk->queue = card->queue;
return 0;
}
@@ -286,10 +279,8 @@ void rsxx_destroy_dev(struct rsxx_cardinfo *card)
if (!enable_blkdev)
return;
- put_disk(card->gendisk);
+ blk_cleanup_disk(card->gendisk);
card->gendisk = NULL;
-
- blk_cleanup_queue(card->queue);
unregister_blkdev(card->major, DRIVER_NAME);
}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 0574f4495755..ed182f3dd054 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -74,9 +74,6 @@ struct dma_tracker {
struct rsxx_dma *dma;
};
-#define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
- (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
-
struct dma_tracker_list {
spinlock_t lock;
int head;
@@ -808,7 +805,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
memset(&ctrl->stats, 0, sizeof(ctrl->stats));
- ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
+ ctrl->trackers = vmalloc(struct_size(ctrl->trackers, list,
+ RSXX_MAX_OUTSTANDING_CMDS));
if (!ctrl->trackers)
return -ENOMEM;
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 6147977994ff..26c320c0d924 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -154,7 +154,6 @@ struct rsxx_cardinfo {
bool bdev_attached;
int disk_id;
int major;
- struct request_queue *queue;
struct gendisk *gendisk;
struct {
/* Used to convert a byte address to a device address. */
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 39aeebc6837d..c4631e684386 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -780,27 +780,6 @@ static const struct blk_mq_ops vdc_mq_ops = {
.queue_rq = vdc_queue_rq,
};
-static void cleanup_queue(struct request_queue *q)
-{
- struct vdc_port *port = q->queuedata;
-
- blk_cleanup_queue(q);
- blk_mq_free_tag_set(&port->tag_set);
-}
-
-static struct request_queue *init_queue(struct vdc_port *port)
-{
- struct request_queue *q;
-
- q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE,
- BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(q))
- return q;
-
- q->queuedata = port;
- return q;
-}
-
static int probe_disk(struct vdc_port *port)
{
struct request_queue *q;
@@ -838,21 +817,21 @@ static int probe_disk(struct vdc_port *port)
(u64)geom.num_sec);
}
- q = init_queue(port);
- if (IS_ERR(q)) {
- printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
- port->vio.name);
- return PTR_ERR(q);
- }
- g = alloc_disk(1 << PARTITION_SHIFT);
- if (!g) {
+ err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
+ VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
+ if (err)
+ return err;
+
+ g = blk_mq_alloc_disk(&port->tag_set, port);
+ if (IS_ERR(g)) {
printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
port->vio.name);
- cleanup_queue(q);
- return -ENOMEM;
+ blk_mq_free_tag_set(&port->tag_set);
+ return PTR_ERR(g);
}
port->disk = g;
+ q = g->queue;
/* Each segment in a request is up to an aligned page in size. */
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
@@ -862,6 +841,7 @@ static int probe_disk(struct vdc_port *port)
blk_queue_max_hw_sectors(q, port->max_xfer_size);
g->major = vdc_major;
g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
+ g->minors = 1 << PARTITION_SHIFT;
strcpy(g->disk_name, port->disk_name);
g->fops = &vdc_fops;
@@ -1001,9 +981,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
port = kzalloc(sizeof(*port), GFP_KERNEL);
- err = -ENOMEM;
if (!port) {
- printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
+ err = -ENOMEM;
goto err_out_release_mdesc;
}
@@ -1083,9 +1062,8 @@ static int vdc_port_remove(struct vio_dev *vdev)
del_timer_sync(&port->vio.timer);
del_gendisk(port->disk);
- cleanup_queue(port->disk->queue);
- put_disk(port->disk);
- port->disk = NULL;
+ blk_cleanup_disk(port->disk);
+ blk_mq_free_tag_set(&port->tag_set);
vdc_free_tx_ring(port);
vio_ldc_free(&port->vio);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 2917b21f48ff..7ccc8d2a41bc 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -800,23 +800,20 @@ static int swim_floppy_init(struct swim_priv *swd)
spin_lock_init(&swd->lock);
for (drive = 0; drive < swd->floppy_count; drive++) {
- struct request_queue *q;
-
- swd->unit[drive].disk = alloc_disk(1);
- if (swd->unit[drive].disk == NULL) {
- err = -ENOMEM;
+ err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
+ &swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
+ if (err)
goto exit_put_disks;
- }
- q = blk_mq_init_sq_queue(&swd->unit[drive].tag_set, &swim_mq_ops,
- 2, BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(q)) {
- err = PTR_ERR(q);
+ swd->unit[drive].disk =
+ blk_mq_alloc_disk(&swd->unit[drive].tag_set,
+ &swd->unit[drive]);
+ if (IS_ERR(swd->unit[drive].disk)) {
+ blk_mq_free_tag_set(&swd->unit[drive].tag_set);
+ err = PTR_ERR(swd->unit[drive].disk);
goto exit_put_disks;
}
- swd->unit[drive].disk->queue = q;
- swd->unit[drive].disk->queue->queuedata = &swd->unit[drive];
swd->unit[drive].swd = swd;
}
@@ -824,6 +821,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->flags = GENHD_FL_REMOVABLE;
swd->unit[drive].disk->major = FLOPPY_MAJOR;
swd->unit[drive].disk->first_minor = drive;
+ swd->unit[drive].disk->minors = 1;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
@@ -839,14 +837,10 @@ exit_put_disks:
do {
struct gendisk *disk = swd->unit[drive].disk;
- if (disk) {
- if (disk->queue) {
- blk_cleanup_queue(disk->queue);
- disk->queue = NULL;
- }
- blk_mq_free_tag_set(&swd->unit[drive].tag_set);
- put_disk(disk);
- }
+ if (!disk)
+ continue;
+ blk_cleanup_disk(disk);
+ blk_mq_free_tag_set(&swd->unit[drive].tag_set);
} while (drive--);
return err;
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index a515d0c1d2cb..965af0a3e95b 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1202,30 +1202,27 @@ static int swim3_attach(struct macio_dev *mdev,
return rc;
}
- disk = alloc_disk(1);
- if (disk == NULL) {
- rc = -ENOMEM;
- goto out_unregister;
- }
-
fs = &floppy_states[floppy_count];
memset(fs, 0, sizeof(*fs));
- disk->queue = blk_mq_init_sq_queue(&fs->tag_set, &swim3_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(disk->queue)) {
- rc = PTR_ERR(disk->queue);
- disk->queue = NULL;
- goto out_put_disk;
+ rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (rc)
+ goto out_unregister;
+
+ disk = blk_mq_alloc_disk(&fs->tag_set, fs);
+ if (IS_ERR(disk)) {
+ rc = PTR_ERR(disk);
+ goto out_free_tag_set;
}
- disk->queue->queuedata = fs;
rc = swim3_add_device(mdev, floppy_count);
if (rc)
- goto out_cleanup_queue;
+ goto out_cleanup_disk;
disk->major = FLOPPY_MAJOR;
disk->first_minor = floppy_count;
+ disk->minors = 1;
disk->fops = &floppy_fops;
disk->private_data = fs;
disk->events = DISK_EVENT_MEDIA_CHANGE;
@@ -1237,12 +1234,10 @@ static int swim3_attach(struct macio_dev *mdev,
disks[floppy_count++] = disk;
return 0;
-out_cleanup_queue:
- blk_cleanup_queue(disk->queue);
- disk->queue = NULL;
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
+out_free_tag_set:
blk_mq_free_tag_set(&fs->tag_set);
-out_put_disk:
- put_disk(disk);
out_unregister:
if (floppy_count == 0)
unregister_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 2cdf2771f8e8..7b54353ee92b 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1343,32 +1343,25 @@ static int carm_init_disk(struct carm_host *host, unsigned int port_no)
{
struct carm_port *port = &host->port[port_no];
struct gendisk *disk;
- struct request_queue *q;
port->host = host;
port->port_no = port_no;
- disk = alloc_disk(CARM_MINORS_PER_MAJOR);
- if (!disk)
- return -ENOMEM;
+ disk = blk_mq_alloc_disk(&host->tag_set, port);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
port->disk = disk;
sprintf(disk->disk_name, DRV_NAME "/%u",
(unsigned int)host->id * CARM_MAX_PORTS + port_no);
disk->major = host->major;
disk->first_minor = port_no * CARM_MINORS_PER_MAJOR;
+ disk->minors = CARM_MINORS_PER_MAJOR;
disk->fops = &carm_bd_ops;
disk->private_data = port;
- q = blk_mq_init_queue(&host->tag_set);
- if (IS_ERR(q))
- return PTR_ERR(q);
-
- blk_queue_max_segments(q, CARM_MAX_REQ_SG);
- blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
-
- q->queuedata = port;
- disk->queue = q;
+ blk_queue_max_segments(disk->queue, CARM_MAX_REQ_SG);
+ blk_queue_segment_boundary(disk->queue, CARM_SG_BOUNDARY);
return 0;
}
@@ -1382,9 +1375,7 @@ static void carm_free_disk(struct carm_host *host, unsigned int port_no)
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
- if (disk->queue)
- blk_cleanup_queue(disk->queue);
- put_disk(disk);
+ blk_cleanup_disk(disk);
}
static int carm_init_shm(struct carm_host *host)
@@ -1429,8 +1420,6 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host) {
- printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
- pci_name(pdev));
rc = -ENOMEM;
goto err_out_regions;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index b9fa3ef5b57c..e4bd3b1fc3c2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -749,13 +749,6 @@ static int virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_free_vblk;
- /* FIXME: How many partitions? How long is a piece of string? */
- vblk->disk = alloc_disk(1 << PART_BITS);
- if (!vblk->disk) {
- err = -ENOMEM;
- goto out_free_vq;
- }
-
/* Default queue sizing is to fill the ring. */
if (likely(!virtblk_queue_depth)) {
queue_depth = vblk->vqs[0].vq->num_free;
@@ -779,21 +772,20 @@ static int virtblk_probe(struct virtio_device *vdev)
err = blk_mq_alloc_tag_set(&vblk->tag_set);
if (err)
- goto out_put_disk;
+ goto out_free_vq;
- q = blk_mq_init_queue(&vblk->tag_set);
- if (IS_ERR(q)) {
- err = -ENOMEM;
+ vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
+ if (IS_ERR(vblk->disk)) {
+ err = PTR_ERR(vblk->disk);
goto out_free_tags;
}
- vblk->disk->queue = q;
-
- q->queuedata = vblk;
+ q = vblk->disk->queue;
virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
vblk->disk->major = major;
vblk->disk->first_minor = index_to_minor(index);
+ vblk->disk->minors = 1 << PART_BITS;
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
vblk->disk->flags |= GENHD_FL_EXT_DEVT;
@@ -892,8 +884,6 @@ static int virtblk_probe(struct virtio_device *vdev)
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
-out_put_disk:
- put_disk(vblk->disk);
out_free_vq:
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
@@ -913,8 +903,7 @@ static void virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
- blk_cleanup_queue(vblk->disk->queue);
-
+ blk_cleanup_disk(vblk->disk);
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);
@@ -925,7 +914,6 @@ static void virtblk_remove(struct virtio_device *vdev)
/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
vblk->vdev = NULL;
- put_disk(vblk->disk);
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 10df39a8b18d..8d49f8fa98bb 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -968,48 +968,6 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
blk_queue_dma_alignment(rq, 511);
}
-static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
- unsigned int physical_sector_size)
-{
- struct request_queue *rq;
- struct blkfront_info *info = gd->private_data;
-
- memset(&info->tag_set, 0, sizeof(info->tag_set));
- info->tag_set.ops = &blkfront_mq_ops;
- info->tag_set.nr_hw_queues = info->nr_rings;
- if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
- /*
- * When indirect descriptior is not supported, the I/O request
- * will be split between multiple request in the ring.
- * To avoid problems when sending the request, divide by
- * 2 the depth of the queue.
- */
- info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
- } else
- info->tag_set.queue_depth = BLK_RING_SIZE(info);
- info->tag_set.numa_node = NUMA_NO_NODE;
- info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- info->tag_set.cmd_size = sizeof(struct blkif_req);
- info->tag_set.driver_data = info;
-
- if (blk_mq_alloc_tag_set(&info->tag_set))
- return -EINVAL;
- rq = blk_mq_init_queue(&info->tag_set);
- if (IS_ERR(rq)) {
- blk_mq_free_tag_set(&info->tag_set);
- return PTR_ERR(rq);
- }
-
- rq->queuedata = info;
- info->rq = gd->queue = rq;
- info->gd = gd;
- info->sector_size = sector_size;
- info->physical_sector_size = physical_sector_size;
- blkif_set_queue_limits(info);
-
- return 0;
-}
-
static const char *flush_info(struct blkfront_info *info)
{
if (info->feature_flush && info->feature_fua)
@@ -1146,12 +1104,36 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
err = xlbd_reserve_minors(minor, nr_minors);
if (err)
- goto out;
+ return err;
err = -ENODEV;
- gd = alloc_disk(nr_minors);
- if (gd == NULL)
- goto release;
+ memset(&info->tag_set, 0, sizeof(info->tag_set));
+ info->tag_set.ops = &blkfront_mq_ops;
+ info->tag_set.nr_hw_queues = info->nr_rings;
+ if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
+ /*
+ * When indirect descriptior is not supported, the I/O request
+ * will be split between multiple request in the ring.
+ * To avoid problems when sending the request, divide by
+ * 2 the depth of the queue.
+ */
+ info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
+ } else
+ info->tag_set.queue_depth = BLK_RING_SIZE(info);
+ info->tag_set.numa_node = NUMA_NO_NODE;
+ info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ info->tag_set.cmd_size = sizeof(struct blkif_req);
+ info->tag_set.driver_data = info;
+
+ err = blk_mq_alloc_tag_set(&info->tag_set);
+ if (err)
+ goto out_release_minors;
+
+ gd = blk_mq_alloc_disk(&info->tag_set, info);
+ if (IS_ERR(gd)) {
+ err = PTR_ERR(gd);
+ goto out_free_tag_set;
+ }
strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
@@ -1164,14 +1146,16 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->major = XENVBD_MAJOR;
gd->first_minor = minor;
+ gd->minors = nr_minors;
gd->fops = &xlvbd_block_fops;
gd->private_data = info;
set_capacity(gd, capacity);
- if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
- del_gendisk(gd);
- goto release;
- }
+ info->rq = gd->queue;
+ info->gd = gd;
+ info->sector_size = sector_size;
+ info->physical_sector_size = physical_sector_size;
+ blkif_set_queue_limits(info);
xlvbd_flush(info);
@@ -1186,9 +1170,10 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
return 0;
- release:
+out_free_tag_set:
+ blk_mq_free_tag_set(&info->tag_set);
+out_release_minors:
xlbd_release_minors(minor, nr_minors);
- out:
return err;
}
@@ -1217,12 +1202,9 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
nr_minors = info->gd->minors;
xlbd_release_minors(minor, nr_minors);
- blk_cleanup_queue(info->rq);
- blk_mq_free_tag_set(&info->tag_set);
- info->rq = NULL;
-
- put_disk(info->gd);
+ blk_cleanup_disk(info->gd);
info->gd = NULL;
+ blk_mq_free_tag_set(&info->tag_set);
}
/* Already hold rinfo->ring_lock. */
@@ -2163,7 +2145,7 @@ static void blkfront_closing(struct blkfront_info *info)
return;
}
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
if (bdev->bd_openers) {
xenbus_dev_error(xbdev, -EBUSY,
@@ -2174,7 +2156,7 @@ static void blkfront_closing(struct blkfront_info *info)
xenbus_frontend_closed(xbdev);
}
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
bdput(bdev);
}
@@ -2531,7 +2513,7 @@ static int blkfront_remove(struct xenbus_device *xbdev)
* isn't closed yet, we let release take care of it.
*/
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&disk->open_mutex);
info = disk->private_data;
dev_warn(disk_to_dev(disk),
@@ -2546,7 +2528,7 @@ static int blkfront_remove(struct xenbus_device *xbdev)
mutex_unlock(&blkfront_mutex);
}
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&disk->open_mutex);
bdput(bdev);
return 0;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index c1d20818e649..4eef218108c6 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -236,11 +236,8 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
case Z2MINOR_Z2ONLY:
z2ram_map = kmalloc(max_z2_map, GFP_KERNEL);
- if (z2ram_map == NULL) {
- printk(KERN_ERR DEVICE_NAME
- ": cannot get mem for z2ram_map\n");
+ if (!z2ram_map)
goto err_out;
- }
get_z2ram();
@@ -253,11 +250,8 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
case Z2MINOR_CHIPONLY:
z2ram_map = kmalloc(max_chip_map, GFP_KERNEL);
- if (z2ram_map == NULL) {
- printk(KERN_ERR DEVICE_NAME
- ": cannot get mem for z2ram_map\n");
+ if (!z2ram_map)
goto err_out;
- }
get_chipram();
@@ -323,27 +317,20 @@ static const struct blk_mq_ops z2_mq_ops = {
static int z2ram_register_disk(int minor)
{
- struct request_queue *q;
struct gendisk *disk;
- disk = alloc_disk(1);
- if (!disk)
- return -ENOMEM;
-
- q = blk_mq_init_queue(&tag_set);
- if (IS_ERR(q)) {
- put_disk(disk);
- return PTR_ERR(q);
- }
+ disk = blk_mq_alloc_disk(&tag_set, NULL);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
disk->major = Z2RAM_MAJOR;
disk->first_minor = minor;
+ disk->minors = 1;
disk->fops = &z2_fops;
if (minor)
sprintf(disk->disk_name, "z2ram%d", minor);
else
sprintf(disk->disk_name, "z2ram");
- disk->queue = q;
z2ram_gendisk[minor] = disk;
add_disk(disk);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index cf8deecc39ef..fcaf2750f68f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1781,24 +1781,24 @@ static ssize_t reset_store(struct device *dev,
zram = dev_to_zram(dev);
bdev = zram->disk->part0;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
/* Do not reset an active device or claimed device */
if (bdev->bd_openers || zram->claim) {
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return -EBUSY;
}
/* From now on, anyone can't open /dev/zram[0-9] */
zram->claim = true;
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
zram->claim = false;
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return len;
}
@@ -1808,7 +1808,7 @@ static int zram_open(struct block_device *bdev, fmode_t mode)
int ret = 0;
struct zram *zram;
- WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
+ WARN_ON(!mutex_is_locked(&bdev->bd_disk->open_mutex));
zram = bdev->bd_disk->private_data;
/* zram was claimed to reset so open request fails */
@@ -1890,7 +1890,6 @@ static const struct attribute_group *zram_disk_attr_groups[] = {
static int zram_add(void)
{
struct zram *zram;
- struct request_queue *queue;
int ret, device_id;
zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
@@ -1906,27 +1905,20 @@ static int zram_add(void)
#ifdef CONFIG_ZRAM_WRITEBACK
spin_lock_init(&zram->wb_limit_lock);
#endif
- queue = blk_alloc_queue(NUMA_NO_NODE);
- if (!queue) {
- pr_err("Error allocating disk queue for device %d\n",
- device_id);
- ret = -ENOMEM;
- goto out_free_idr;
- }
/* gendisk structure */
- zram->disk = alloc_disk(1);
+ zram->disk = blk_alloc_disk(NUMA_NO_NODE);
if (!zram->disk) {
pr_err("Error allocating disk structure for device %d\n",
device_id);
ret = -ENOMEM;
- goto out_free_queue;
+ goto out_free_idr;
}
zram->disk->major = zram_major;
zram->disk->first_minor = device_id;
+ zram->disk->minors = 1;
zram->disk->fops = &zram_devops;
- zram->disk->queue = queue;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
@@ -1969,8 +1961,6 @@ static int zram_add(void)
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
-out_free_queue:
- blk_cleanup_queue(queue);
out_free_idr:
idr_remove(&zram_index_idr, device_id);
out_free_dev:
@@ -1982,14 +1972,14 @@ static int zram_remove(struct zram *zram)
{
struct block_device *bdev = zram->disk->part0;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
if (bdev->bd_openers || zram->claim) {
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return -EBUSY;
}
zram->claim = true;
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
zram_debugfs_unregister(zram);
@@ -2000,8 +1990,7 @@ static int zram_remove(struct zram *zram)
pr_info("Removed device: %s\n", zram->disk->disk_name);
del_gendisk(zram->disk);
- blk_cleanup_queue(zram->disk->queue);
- put_disk(zram->disk);
+ blk_cleanup_disk(zram->disk);
kfree(zram);
return 0;
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 419a7e8281ee..74c411911b6e 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -112,7 +112,7 @@ struct zram {
/*
* zram is claimed so open request will be failed
*/
- bool claim; /* Protected by bdev->bd_mutex */
+ bool claim; /* Protected by disk->open_mutex */
struct file *backing_dev;
#ifdef CONFIG_ZRAM_WRITEBACK
spinlock_t wb_limit_lock;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1b9743b7f2ef..e5d706ed55ea 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -404,6 +404,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4217, "BCM4329B1" }, /* 002.002.023 */
{ 0x6106, "BCM4359C0" }, /* 003.001.006 */
{ 0x4106, "BCM4335A0" }, /* 002.001.006 */
+ { 0x410c, "BCM43430B0" }, /* 002.001.012 */
{ }
};
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 33d58b30c5ac..cddd350beba3 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1461,9 +1461,7 @@ static void btmrvl_sdio_coredump(struct device *dev)
BT_ERR("Allocated buffer not enough");
}
- if (stat != RDWR_STATUS_DONE) {
- continue;
- } else {
+ if (stat == RDWR_STATUS_DONE) {
BT_INFO("%s done: size=0x%tx",
entry->mem_name,
dbg_ptr - entry->mem_ptr);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 6c40bc75fb5b..e9d91d7c0db4 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -581,11 +581,9 @@ static int btmtkuart_open(struct hci_dev *hdev)
/* Enable the power domain and clock the device requires */
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
- if (err < 0) {
- pm_runtime_put_noidle(dev);
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0)
goto err_disable_rpm;
- }
err = clk_prepare_enable(bdev->clk);
if (err < 0)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 25114f0d1319..be04d74037d2 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -182,8 +182,9 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
-static void qca_tlv_check_data(struct qca_fw_config *config,
- const struct firmware *fw, enum qca_btsoc_type soc_type)
+static void qca_tlv_check_data(struct hci_dev *hdev,
+ struct qca_fw_config *config,
+ u8 *fw_data, enum qca_btsoc_type soc_type)
{
const u8 *data;
u32 type_len;
@@ -194,19 +195,21 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
struct tlv_type_nvm *tlv_nvm;
uint8_t nvm_baud_rate = config->user_baud_rate;
- tlv = (struct tlv_type_hdr *)fw->data;
-
- type_len = le32_to_cpu(tlv->type_len);
- length = (type_len >> 8) & 0x00ffffff;
-
- BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
- BT_DBG("Length\t\t : %d bytes", length);
-
config->dnld_mode = QCA_SKIP_EVT_NONE;
config->dnld_type = QCA_SKIP_EVT_NONE;
switch (config->type) {
+ case ELF_TYPE_PATCH:
+ config->dnld_mode = QCA_SKIP_EVT_VSE_CC;
+ config->dnld_type = QCA_SKIP_EVT_VSE_CC;
+
+ bt_dev_dbg(hdev, "File Class : 0x%x", fw_data[4]);
+ bt_dev_dbg(hdev, "Data Encoding : 0x%x", fw_data[5]);
+ bt_dev_dbg(hdev, "File version : 0x%x", fw_data[6]);
+ break;
case TLV_TYPE_PATCH:
+ tlv = (struct tlv_type_hdr *)fw_data;
+ type_len = le32_to_cpu(tlv->type_len);
tlv_patch = (struct tlv_type_patch *)tlv->data;
/* For Rome version 1.1 to 3.1, all segment commands
@@ -218,6 +221,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
config->dnld_mode = tlv_patch->download_mode;
config->dnld_type = config->dnld_mode;
+ BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
BT_DBG("Total Length : %d bytes",
le32_to_cpu(tlv_patch->total_size));
BT_DBG("Patch Data Length : %d bytes",
@@ -243,6 +247,14 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
break;
case TLV_TYPE_NVM:
+ tlv = (struct tlv_type_hdr *)fw_data;
+
+ type_len = le32_to_cpu(tlv->type_len);
+ length = (type_len >> 8) & 0x00ffffff;
+
+ BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+ BT_DBG("Length\t\t : %d bytes", length);
+
idx = 0;
data = tlv->data;
while (idx < length) {
@@ -387,25 +399,57 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
static int qca_download_firmware(struct hci_dev *hdev,
struct qca_fw_config *config,
- enum qca_btsoc_type soc_type)
+ enum qca_btsoc_type soc_type,
+ u8 rom_ver)
{
const struct firmware *fw;
+ u8 *data;
const u8 *segment;
- int ret, remain, i = 0;
+ int ret, size, remain, i = 0;
bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
ret = request_firmware(&fw, config->fwname, &hdev->dev);
if (ret) {
- bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
- config->fwname, ret);
- return ret;
+ /* For WCN6750, if mbn file is not present then check for
+ * tlv file.
+ */
+ if (soc_type == QCA_WCN6750 && config->type == ELF_TYPE_PATCH) {
+ bt_dev_dbg(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ config->type = TLV_TYPE_PATCH;
+ snprintf(config->fwname, sizeof(config->fwname),
+ "qca/msbtfw%02x.tlv", rom_ver);
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ return ret;
+ }
+ } else {
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ return ret;
+ }
}
- qca_tlv_check_data(config, fw, soc_type);
+ size = fw->size;
+ data = vmalloc(fw->size);
+ if (!data) {
+ bt_dev_err(hdev, "QCA Failed to allocate memory for file: %s",
+ config->fwname);
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ memcpy(data, fw->data, size);
+ release_firmware(fw);
+
+ qca_tlv_check_data(hdev, config, data, soc_type);
- segment = fw->data;
- remain = fw->size;
+ segment = data;
+ remain = size;
while (remain > 0) {
int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain);
@@ -435,7 +479,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
ret = qca_inject_cmd_complete_event(hdev);
out:
- release_firmware(fw);
+ vfree(data);
return ret;
}
@@ -502,27 +546,32 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
config.user_baud_rate = baudrate;
+ /* Firmware files to download are based on ROM version.
+ * ROM version is derived from last two bytes of soc_ver.
+ */
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
+
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
if (qca_is_wcn399x(soc_type)) {
- /* Firmware files to download are based on ROM version.
- * ROM version is derived from last two bytes of soc_ver.
- */
- rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
- (soc_ver & 0x0000000f);
snprintf(config.fwname, sizeof(config.fwname),
"qca/crbtfw%02x.tlv", rom_ver);
} else if (soc_type == QCA_QCA6390) {
- rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
- (soc_ver & 0x0000000f);
snprintf(config.fwname, sizeof(config.fwname),
"qca/htbtfw%02x.tlv", rom_ver);
+ } else if (soc_type == QCA_WCN6750) {
+ /* Choose mbn file by default.If mbn file is not found
+ * then choose tlv file
+ */
+ config.type = ELF_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/msbtfw%02x.mbn", rom_ver);
} else {
snprintf(config.fwname, sizeof(config.fwname),
"qca/rampatch_%08x.bin", soc_ver);
}
- err = qca_download_firmware(hdev, &config, soc_type);
+ err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
if (err < 0) {
bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
return err;
@@ -548,11 +597,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
else if (soc_type == QCA_QCA6390)
snprintf(config.fwname, sizeof(config.fwname),
"qca/htnv%02x.bin", rom_ver);
+ else if (soc_type == QCA_WCN6750)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/msnv%02x.bin", rom_ver);
else
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
- err = qca_download_firmware(hdev, &config, soc_type);
+ err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
if (err < 0) {
bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
return err;
@@ -564,13 +616,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
- /* WCN399x supports the Microsoft vendor extension with 0xFD70 as the
+ /* WCN399x and WCN6750 supports the Microsoft vendor extension with 0xFD70 as the
* VsMsftOpCode.
*/
switch (soc_type) {
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
+ case QCA_WCN6750:
hci_set_msft_opcode(hdev, 0xFD70);
break;
default:
@@ -584,7 +637,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
- if (soc_type == QCA_WCN3991) {
+ if (soc_type == QCA_WCN3991 || soc_type == QCA_WCN6750) {
/* get fw build info */
err = qca_read_fw_build_info(hdev);
if (err < 0)
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index b19add7675a4..30afa7703afd 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -80,7 +80,8 @@ enum qca_tlv_dnld_mode {
enum qca_tlv_type {
TLV_TYPE_PATCH = 1,
- TLV_TYPE_NVM
+ TLV_TYPE_NVM,
+ ELF_TYPE_PATCH,
};
struct qca_fw_config {
@@ -143,6 +144,7 @@ enum qca_btsoc_type {
QCA_WCN3998,
QCA_WCN3991,
QCA_QCA6390,
+ QCA_WCN6750,
};
#if IS_ENABLED(CONFIG_BT_QCA)
@@ -160,6 +162,11 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3991 ||
soc_type == QCA_WCN3998;
}
+static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
+{
+ return soc_type == QCA_WCN6750;
+}
+
#else
static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
@@ -192,6 +199,11 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
return false;
}
+static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
+{
+ return false;
+}
+
static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index e7fe5fb22753..cce0125ec4fd 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -132,12 +132,19 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8761a_config" },
/* 8761B */
- { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_USB),
+ { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART),
.config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8761b_fw.bin",
.cfg_name = "rtl_bt/rtl8761b_config" },
+ /* 8761BU */
+ { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8761bu_fw.bin",
+ .cfg_name = "rtl_bt/rtl8761bu_config" },
+
/* 8822C with UART interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART),
.config_needed = true,
@@ -719,17 +726,8 @@ int btrtl_download_firmware(struct hci_dev *hdev,
}
EXPORT_SYMBOL_GPL(btrtl_download_firmware);
-int btrtl_setup_realtek(struct hci_dev *hdev)
+void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
{
- struct btrtl_device_info *btrtl_dev;
- int ret;
-
- btrtl_dev = btrtl_initialize(hdev, NULL);
- if (IS_ERR(btrtl_dev))
- return PTR_ERR(btrtl_dev);
-
- ret = btrtl_download_firmware(hdev, btrtl_dev);
-
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
@@ -750,6 +748,21 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
rtl_dev_dbg(hdev, "WBS supported not enabled.");
break;
}
+}
+EXPORT_SYMBOL_GPL(btrtl_set_quirks);
+
+int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+ struct btrtl_device_info *btrtl_dev;
+ int ret;
+
+ btrtl_dev = btrtl_initialize(hdev, NULL);
+ if (IS_ERR(btrtl_dev))
+ return PTR_ERR(btrtl_dev);
+
+ ret = btrtl_download_firmware(hdev, btrtl_dev);
+
+ btrtl_set_quirks(hdev, btrtl_dev);
btrtl_free(btrtl_dev);
return ret;
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index 2a582682136d..2c441bda390a 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -54,6 +54,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
void btrtl_free(struct btrtl_device_info *btrtl_dev);
int btrtl_download_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev);
+void btrtl_set_quirks(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev);
int btrtl_setup_realtek(struct hci_dev *hdev);
int btrtl_shutdown_realtek(struct hci_dev *hdev);
int btrtl_get_uart_settings(struct hci_dev *hdev,
@@ -79,6 +81,11 @@ static inline int btrtl_download_firmware(struct hci_dev *hdev,
return -EOPNOTSUPP;
}
+static inline void btrtl_set_quirks(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
+{
+}
+
static inline int btrtl_setup_realtek(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 5d603ef39bad..a9855a2dd561 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -270,6 +270,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe500), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME |
@@ -388,6 +390,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
@@ -406,6 +410,11 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional MediaTek MT7615E Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
+ /* Additional MediaTek MT7921 Bluetooth devices */
+ { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
@@ -425,6 +434,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8761BU Bluetooth devices */
+ { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
@@ -1747,6 +1760,13 @@ static void btusb_work(struct work_struct *work)
* which work with WBS at all.
*/
new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
+ /* Because mSBC frames do not need to be aligned to the
+ * SCO packet boundary. If support the Alt 3, use the
+ * Alt 3 for HCI payload >= 60 Bytes let air packet
+ * data satisfy 60 bytes.
+ */
+ if (new_alts == 1 && btusb_find_altsetting(data, 3))
+ new_alts = 3;
}
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@@ -2527,10 +2547,17 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
}
btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi");
- err = request_firmware(&fw, fwname, &hdev->dev);
+ err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
fwname, err);
+
return err;
}
@@ -2680,12 +2707,24 @@ download:
err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
sizeof(fwname), "sfi");
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
- err = request_firmware(&fw, fwname, &hdev->dev);
+ err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
fwname, err);
return err;
@@ -3291,11 +3330,6 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_wmt_hdr *hdr;
int err;
- /* Submit control IN URB on demand to process the WMT event */
- err = btusb_mtk_submit_wmt_recv_urb(hdev);
- if (err < 0)
- return err;
-
/* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255)
@@ -3321,6 +3355,11 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
goto err_free_wc;
}
+ /* Submit control IN URB on demand to process the WMT event */
+ err = btusb_mtk_submit_wmt_recv_urb(hdev);
+ if (err < 0)
+ goto err_free_wc;
+
/* The vendor specific WMT commands are all answered by a vendor
* specific event and will have the Command Status or Command
* Complete as with usual HCI command flow control.
@@ -4041,6 +4080,11 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
sent += size;
count -= size;
+ /* ep2 need time to switch from function acl to function dfu,
+ * so we add 20ms delay here.
+ */
+ msleep(20);
+
while (count) {
size = min_t(size_t, count, QCA_DFU_PACKET_LEN);
@@ -4133,9 +4177,15 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
int err;
if (((ver->flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
- snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x_%04x.bin",
- le32_to_cpu(ver->rom_version),
- le16_to_cpu(ver->board_id));
+ /* if boardid equal 0, use default nvm without surfix */
+ if (le16_to_cpu(ver->board_id) == 0x0) {
+ snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
+ le32_to_cpu(ver->rom_version));
+ } else {
+ snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x_%04x.bin",
+ le32_to_cpu(ver->rom_version),
+ le16_to_cpu(ver->board_id));
+ }
} else {
snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
le32_to_cpu(ver->rom_version));
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
index 1f55df93e4ce..2d40302409ff 100644
--- a/drivers/bluetooth/hci_ag6xx.c
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -199,7 +199,6 @@ static int ag6xx_setup(struct hci_uart *hu)
fwname, err);
goto patch;
}
- fw_ptr = fw->data;
bt_dev_info(hdev, "Applying bddata (%s)", fwname);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 27e96681d583..e0520639f4ba 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -906,10 +906,7 @@ static int h5_btrtl_setup(struct h5 *h5)
/* Give the device some time before the hci-core sends it a reset */
usleep_range(10000, 20000);
- /* Enable controller to do both LE scan and BR/EDR inquiry
- * simultaneously.
- */
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
+ btrtl_set_quirks(h5->hu->hdev, btrtl_dev);
out_free:
btrtl_free(btrtl_dev);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 0a0056912d51..53deea2eb7b4 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -218,6 +218,7 @@ struct qca_power {
struct qca_serdev {
struct hci_uart serdev_hu;
struct gpio_desc *bt_en;
+ struct gpio_desc *sw_ctrl;
struct clk *susclk;
enum qca_btsoc_type btsoc_type;
struct qca_power *bt_power;
@@ -604,7 +605,8 @@ static int qca_open(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qca_is_wcn399x(qcadev->btsoc_type))
+ if (qca_is_wcn399x(qcadev->btsoc_type) ||
+ qca_is_wcn6750(qcadev->btsoc_type))
hu->init_speed = qcadev->init_speed;
if (qcadev->oper_speed)
@@ -1308,7 +1310,8 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
/* Give the controller time to process the request */
- if (qca_is_wcn399x(qca_soc_type(hu)))
+ if (qca_is_wcn399x(qca_soc_type(hu)) ||
+ qca_is_wcn6750(qca_soc_type(hu)))
usleep_range(1000, 10000);
else
msleep(300);
@@ -1384,7 +1387,8 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
- if (qca_is_wcn399x(qca_soc_type(hu))) {
+ if (qca_is_wcn399x(qca_soc_type(hu)) ||
+ qca_is_wcn6750(qca_soc_type(hu))) {
if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
@@ -1417,7 +1421,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
/* Disable flow control for wcn3990 to deassert RTS while
* changing the baudrate of chip and host.
*/
- if (qca_is_wcn399x(soc_type))
+ if (qca_is_wcn399x(soc_type) ||
+ qca_is_wcn6750(soc_type))
hci_uart_set_flow_control(hu, true);
if (soc_type == QCA_WCN3990) {
@@ -1434,7 +1439,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
host_set_baudrate(hu, speed);
error:
- if (qca_is_wcn399x(soc_type))
+ if (qca_is_wcn399x(soc_type) ||
+ qca_is_wcn6750(soc_type))
hci_uart_set_flow_control(hu, false);
if (soc_type == QCA_WCN3990) {
@@ -1585,10 +1591,12 @@ static bool qca_prevent_wake(struct hci_dev *hdev)
return !wakeup;
}
-static int qca_wcn3990_init(struct hci_uart *hu)
+static int qca_regulator_init(struct hci_uart *hu)
{
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
struct qca_serdev *qcadev;
int ret;
+ bool sw_ctrl_state;
/* Check for vregs status, may be hci down has turned
* off the voltage regulator.
@@ -1607,16 +1615,33 @@ static int qca_wcn3990_init(struct hci_uart *hu)
}
}
- /* Forcefully enable wcn3990 to enter in to boot mode. */
- host_set_baudrate(hu, 2400);
- ret = qca_send_power_pulse(hu, false);
- if (ret)
- return ret;
+ if (qca_is_wcn399x(soc_type)) {
+ /* Forcefully enable wcn399x to enter in to boot mode. */
+ host_set_baudrate(hu, 2400);
+ ret = qca_send_power_pulse(hu, false);
+ if (ret)
+ return ret;
+ }
+
+ /* For wcn6750 need to enable gpio bt_en */
+ if (qcadev->bt_en) {
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ msleep(50);
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ msleep(50);
+ if (qcadev->sw_ctrl) {
+ sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl);
+ bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state);
+ }
+ }
qca_set_speed(hu, QCA_INIT_SPEED);
- ret = qca_send_power_pulse(hu, true);
- if (ret)
- return ret;
+
+ if (qca_is_wcn399x(soc_type)) {
+ ret = qca_send_power_pulse(hu, true);
+ if (ret)
+ return ret;
+ }
/* Now the device is in ready state to communicate with host.
* To sync host with device we need to reopen port.
@@ -1649,8 +1674,9 @@ static int qca_power_on(struct hci_dev *hdev)
if (!hu->serdev)
return 0;
- if (qca_is_wcn399x(soc_type)) {
- ret = qca_wcn3990_init(hu);
+ if (qca_is_wcn399x(soc_type) ||
+ qca_is_wcn6750(soc_type)) {
+ ret = qca_regulator_init(hu);
} else {
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bt_en) {
@@ -1689,7 +1715,8 @@ static int qca_setup(struct hci_uart *hu)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
bt_dev_info(hdev, "setting up %s",
- qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
+ qca_is_wcn399x(soc_type) ? "wcn399x" :
+ (soc_type == QCA_WCN6750) ? "wcn6750" : "ROME/QCA6390");
qca->memdump_state = QCA_MEMDUMP_IDLE;
@@ -1700,7 +1727,8 @@ retry:
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
- if (qca_is_wcn399x(soc_type)) {
+ if (qca_is_wcn399x(soc_type) ||
+ qca_is_wcn6750(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
ret = qca_read_soc_version(hdev, &ver, soc_type);
@@ -1720,7 +1748,8 @@ retry:
qca_baudrate = qca_get_baudrate_value(speed);
}
- if (!qca_is_wcn399x(soc_type)) {
+ if (!(qca_is_wcn399x(soc_type) ||
+ qca_is_wcn6750(soc_type))) {
/* Get QCA version information */
ret = qca_read_soc_version(hdev, &ver, soc_type);
if (ret)
@@ -1828,14 +1857,30 @@ static const struct qca_device_data qca_soc_data_qca6390 = {
.num_vregs = 0,
};
+static const struct qca_device_data qca_soc_data_wcn6750 = {
+ .soc_type = QCA_WCN6750,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 5000 },
+ { "vddaon", 26000 },
+ { "vddbtcxmx", 126000 },
+ { "vddrfacmn", 12500 },
+ { "vddrfa0p8", 102000 },
+ { "vddrfa1p7", 302000 },
+ { "vddrfa1p2", 257000 },
+ { "vddrfa2p2", 1700000 },
+ { "vddasd", 200 },
+ },
+ .num_vregs = 9,
+ .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+};
+
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
unsigned long flags;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
-
- qcadev = serdev_device_get_drvdata(hu->serdev);
+ bool sw_ctrl_state;
/* From this point we go into power off state. But serial port is
* still open, stop queueing the IBS data and flush all the buffered
@@ -1852,10 +1897,20 @@ static void qca_power_shutdown(struct hci_uart *hu)
if (!hu->serdev)
return;
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+
if (qca_is_wcn399x(soc_type)) {
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hu, false);
qca_regulator_disable(qcadev);
+ } else if (soc_type == QCA_WCN6750) {
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ msleep(100);
+ qca_regulator_disable(qcadev);
+ if (qcadev->sw_ctrl) {
+ sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl);
+ bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state);
+ }
} else if (qcadev->bt_en) {
gpiod_set_value_cansleep(qcadev->bt_en, 0);
}
@@ -1978,7 +2033,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->oper_speed)
BT_DBG("UART will pick default operating speed");
- if (data && qca_is_wcn399x(data->soc_type)) {
+ if (data &&
+ (qca_is_wcn399x(data->soc_type) ||
+ qca_is_wcn6750(data->soc_type))) {
qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
@@ -1996,6 +2053,18 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_power->vregs_on = false;
+ qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (!qcadev->bt_en && data->soc_type == QCA_WCN6750) {
+ dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
+ power_ctrl_enabled = false;
+ }
+
+ qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
+ GPIOD_IN);
+ if (!qcadev->sw_ctrl && data->soc_type == QCA_WCN6750)
+ dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_err(&serdev->dev, "failed to acquire clk\n");
@@ -2068,7 +2137,9 @@ static void qca_serdev_remove(struct serdev_device *serdev)
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
struct qca_power *power = qcadev->bt_power;
- if (qca_is_wcn399x(qcadev->btsoc_type) && power->vregs_on)
+ if ((qca_is_wcn399x(qcadev->btsoc_type) ||
+ qca_is_wcn6750(qcadev->btsoc_type)) &&
+ power->vregs_on)
qca_power_shutdown(&qcadev->serdev_hu);
else if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
@@ -2244,6 +2315,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
+ { .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index c804db7e90f8..57908ce4fae8 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -34,6 +34,9 @@ static int virtbt_add_inbuf(struct virtio_bluetooth *vbt)
int err;
skb = alloc_skb(1000, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
sg_init_one(sg, skb->data, 1000);
err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL);
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 7c810f02a2ef..b3357a8a2fdb 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -311,8 +311,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
- MHI_CHANNEL_CONFIG_UL(32, "AT", 32, 0),
- MHI_CHANNEL_CONFIG_DL(33, "AT", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
@@ -708,7 +708,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- del_timer(&mhi_pdev->health_check_timer);
+ del_timer_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -935,9 +935,43 @@ static int __maybe_unused mhi_pci_resume(struct device *dev)
return ret;
}
+static int __maybe_unused mhi_pci_freeze(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ /* We want to stop all operations, hibernation does not guarantee that
+ * device will be in the same state as before freezing, especially if
+ * the intermediate restore kernel reinitializes MHI device with new
+ * context.
+ */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_restore(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+
+ /* Reinitialize the device */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+ return 0;
+}
+
static const struct dev_pm_ops mhi_pci_pm_ops = {
SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = mhi_pci_suspend,
+ .resume = mhi_pci_resume,
+ .freeze = mhi_pci_freeze,
+ .thaw = mhi_pci_restore,
+ .restore = mhi_pci_restore,
+#endif
};
static struct pci_driver mhi_pci_driver = {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 5fae60f8c135..38cb116ed433 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1334,6 +1334,34 @@ err_allow_idle:
return error;
}
+static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
+{
+ struct device *dev = ddata->dev;
+ int error;
+
+ /* Disable target module if it is enabled */
+ if (ddata->enabled) {
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+ }
+
+ /* Enable target module */
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "reinit resume failed: %i\n", error);
+
+ if (leave_enabled)
+ return error;
+
+ /* Disable target module if no leave_enabled was set */
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+
+ return error;
+}
+
static int __maybe_unused sysc_noirq_suspend(struct device *dev)
{
struct sysc *ddata;
@@ -1344,12 +1372,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
(SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
- return pm_runtime_force_suspend(dev);
+ if (!ddata->enabled)
+ return 0;
+
+ ddata->needs_resume = 1;
+
+ return sysc_runtime_suspend(dev);
}
static int __maybe_unused sysc_noirq_resume(struct device *dev)
{
struct sysc *ddata;
+ int error = 0;
ddata = dev_get_drvdata(dev);
@@ -1357,7 +1391,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
(SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
- return pm_runtime_force_resume(dev);
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
+ error = sysc_reinit_module(ddata, ddata->needs_resume);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ } else if (ddata->needs_resume) {
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ }
+
+ ddata->needs_resume = 0;
+
+ return error;
}
static const struct dev_pm_ops sysc_pm_ops = {
@@ -1408,9 +1454,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@@ -1459,6 +1505,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
@@ -1466,7 +1514,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_QUIRK_REINIT_ON_RESUME),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
/* PRUSS on am3, am4 and am5 */
@@ -1524,7 +1573,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
- SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index c6d8c0f59722..8e1fe75af93f 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -772,53 +772,50 @@ static int probe_gdrom(struct platform_device *devptr)
goto probe_fail_no_mem;
}
probe_gdrom_setupcd();
- gd.disk = alloc_disk(1);
- if (!gd.disk) {
- err = -ENODEV;
- goto probe_fail_no_disk;
+
+ err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (err)
+ goto probe_fail_free_cd_info;
+
+ gd.disk = blk_mq_alloc_disk(&gd.tag_set, NULL);
+ if (IS_ERR(gd.disk)) {
+ err = PTR_ERR(gd.disk);
+ goto probe_fail_free_tag_set;
}
+ gd.gdrom_rq = gd.disk->queue;
probe_gdrom_setupdisk();
if (register_cdrom(gd.disk, gd.cd_info)) {
err = -ENODEV;
- goto probe_fail_cdrom_register;
+ goto probe_fail_cleanup_disk;
}
gd.disk->fops = &gdrom_bdops;
gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
/* latch on to the interrupt */
err = gdrom_set_interrupt_handlers();
if (err)
- goto probe_fail_cmdirq_register;
-
- gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
- if (IS_ERR(gd.gdrom_rq)) {
- err = PTR_ERR(gd.gdrom_rq);
- gd.gdrom_rq = NULL;
- goto probe_fail_requestq;
- }
+ goto probe_fail_cleanup_disk;
err = probe_gdrom_setupqueue();
if (err)
- goto probe_fail_toc;
+ goto probe_fail_free_irqs;
gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL);
if (!gd.toc) {
err = -ENOMEM;
- goto probe_fail_toc;
+ goto probe_fail_free_irqs;
}
add_disk(gd.disk);
return 0;
-probe_fail_toc:
- blk_cleanup_queue(gd.gdrom_rq);
- blk_mq_free_tag_set(&gd.tag_set);
-probe_fail_requestq:
+probe_fail_free_irqs:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
-probe_fail_cmdirq_register:
-probe_fail_cdrom_register:
- del_gendisk(gd.disk);
-probe_fail_no_disk:
+probe_fail_cleanup_disk:
+ blk_cleanup_disk(gd.disk);
+probe_fail_free_tag_set:
+ blk_mq_free_tag_set(&gd.tag_set);
+probe_fail_free_cd_info:
kfree(gd.cd_info);
probe_fail_no_mem:
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 1fe006f3f12f..c11f12d4ab53 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -165,17 +165,17 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3
default HW_RANDOM
help
- This driver provides kernel-side support for the Random Number
+ This driver provides kernel-side support for the Random Number
Generator hardware found on OMAP16xx, OMAP2/3/4/5, AM33xx/AM43xx
multimedia processors, and Marvell Armada 7k/8k SoCs.
To compile this driver as a module, choose M here: the
module will be called omap-rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_OMAP3_ROM
tristate "OMAP3 ROM Random Number Generator support"
@@ -485,13 +485,13 @@ config HW_RANDOM_NPCM
depends on ARCH_NPCM || COMPILE_TEST
default HW_RANDOM
help
- This driver provides support for the Random Number
+ This driver provides support for the Random Number
Generator hardware available in Nuvoton NPCM SoCs.
To compile this driver as a module, choose M here: the
module will be called npcm-rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_KEYSTONE
depends on ARCH_KEYSTONE || COMPILE_TEST
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 9959c762da2f..d8d4ef5214a1 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -126,7 +126,7 @@ static struct hwrng amd_rng = {
static int __init mod_init(void)
{
- int err = -ENODEV;
+ int err;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
u32 pmbase;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index adb3c2bd7783..a3db27916256 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -319,11 +319,11 @@ static int enable_best_rng(void)
return ret;
}
-static ssize_t hwrng_attr_current_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t rng_current_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
- int err = -ENODEV;
+ int err;
struct hwrng *rng, *old_rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
@@ -354,9 +354,9 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
return err ? : len;
}
-static ssize_t hwrng_attr_current_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_current_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
ssize_t ret;
struct hwrng *rng;
@@ -371,9 +371,9 @@ static ssize_t hwrng_attr_current_show(struct device *dev,
return ret;
}
-static ssize_t hwrng_attr_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int err;
struct hwrng *rng;
@@ -392,22 +392,16 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
return strlen(buf);
}
-static ssize_t hwrng_attr_selected_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_selected_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
-static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
- hwrng_attr_current_show,
- hwrng_attr_current_store);
-static DEVICE_ATTR(rng_available, S_IRUGO,
- hwrng_attr_available_show,
- NULL);
-static DEVICE_ATTR(rng_selected, S_IRUGO,
- hwrng_attr_selected_show,
- NULL);
+static DEVICE_ATTR_RW(rng_current);
+static DEVICE_ATTR_RO(rng_available);
+static DEVICE_ATTR_RO(rng_selected);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 8e1fe3f8dd2d..9cc3d542dd0f 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -132,7 +132,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
return PTR_ERR(trng->mem);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Could not get runtime PM.\n");
goto err_pm_get;
@@ -165,7 +165,7 @@ err_register:
clk_disable_unprepare(trng->clk);
err_clock:
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
err_pm_get:
pm_runtime_disable(&pdev->dev);
@@ -196,10 +196,9 @@ static int __maybe_unused exynos_trng_resume(struct device *dev)
{
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Could not get runtime PM.\n");
- pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 8f1d47ff9799..2f2f21f1b659 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -241,10 +241,9 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to enable SA power-domain\n");
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index cede9f159102..00ff96703dd2 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -454,10 +454,9 @@ static int omap_rng_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
- pm_runtime_put_noidle(&pdev->dev);
goto err_ioremap;
}
@@ -543,10 +542,9 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
struct omap_rng_dev *priv = dev_get_drvdata(dev);
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to runtime_get device: %d\n", ret);
- pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 07847d9a459a..249b31197eea 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -124,6 +124,33 @@ config NPCM7XX_KCS_IPMI_BMC
This support is also available as a module. If so, the module
will be called kcs_bmc_npcm7xx.
+config IPMI_KCS_BMC_CDEV_IPMI
+ depends on IPMI_KCS_BMC
+ tristate "IPMI character device interface for BMC KCS devices"
+ help
+ Provides a BMC-side character device implementing IPMI
+ semantics for KCS IPMI devices.
+
+ Say YES if you wish to expose KCS devices on the BMC for IPMI
+ purposes.
+
+ This support is also available as a module. The module will be
+ called kcs_bmc_cdev_ipmi.
+
+config IPMI_KCS_BMC_SERIO
+ depends on IPMI_KCS_BMC && SERIO
+ tristate "SerIO adaptor for BMC KCS devices"
+ help
+ Adapts the BMC KCS device for the SerIO subsystem. This allows users
+ to take advantage of userspace interfaces provided by SerIO where
+ appropriate.
+
+ Say YES if you wish to expose KCS devices on the BMC via SerIO
+ interfaces.
+
+ This support is also available as a module. The module will be
+ called kcs_bmc_serio.
+
config ASPEED_BT_IPMI_BMC
depends on ARCH_ASPEED || COMPILE_TEST
depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 0822adc2ec41..84f47d18007f 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -23,6 +23,8 @@ obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
obj-$(CONFIG_IPMI_KCS_BMC) += kcs_bmc.o
+obj-$(CONFIG_IPMI_KCS_BMC_SERIO) += kcs_bmc_serio.o
+obj-$(CONFIG_IPMI_KCS_BMC_CDEV_IPMI) += kcs_bmc_cdev_ipmi.o
obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o
obj-$(CONFIG_NPCM7XX_KCS_IPMI_BMC) += kcs_bmc_npcm7xx.o
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 32c334e34d55..e4ff3b50de7f 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -371,16 +371,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
data[0] = 0;
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
- if ((ipmi_version_major > 1)
- || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
- /* This is an IPMI 1.5-only feature. */
- data[0] |= WDOG_DONT_STOP_ON_SET;
- } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
- /*
- * In ipmi 1.0, setting the timer stops the watchdog, we
- * need to start it back up again.
- */
- hbnow = 1;
+ if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ if ((ipmi_version_major > 1) ||
+ ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+ /* This is an IPMI 1.5-only feature. */
+ data[0] |= WDOG_DONT_STOP_ON_SET;
+ } else {
+ /*
+ * In ipmi 1.0, setting the timer stops the watchdog, we
+ * need to start it back up again.
+ */
+ hbnow = 1;
+ }
}
data[1] = 0;
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index f292e74bd4a5..03d02a848f3a 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -1,458 +1,189 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015-2018, Intel Corporation.
+ * Copyright (c) 2021, IBM Corp.
*/
-#define pr_fmt(fmt) "kcs-bmc: " fmt
-
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/ipmi_bmc.h>
+#include <linux/device.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
+#include <linux/mutex.h>
#include "kcs_bmc.h"
-#define DEVICE_NAME "ipmi-kcs"
-
-#define KCS_MSG_BUFSIZ 1000
-
-#define KCS_ZERO_DATA 0
+/* Implement both the device and client interfaces here */
+#include "kcs_bmc_device.h"
+#include "kcs_bmc_client.h"
+/* Record registered devices and drivers */
+static DEFINE_MUTEX(kcs_bmc_lock);
+static LIST_HEAD(kcs_bmc_devices);
+static LIST_HEAD(kcs_bmc_drivers);
-/* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
-#define KCS_STATUS_STATE(state) (state << 6)
-#define KCS_STATUS_STATE_MASK GENMASK(7, 6)
-#define KCS_STATUS_CMD_DAT BIT(3)
-#define KCS_STATUS_SMS_ATN BIT(2)
-#define KCS_STATUS_IBF BIT(1)
-#define KCS_STATUS_OBF BIT(0)
+/* Consumer data access */
-/* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
-enum kcs_states {
- IDLE_STATE = 0,
- READ_STATE = 1,
- WRITE_STATE = 2,
- ERROR_STATE = 3,
-};
-
-/* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
-#define KCS_CMD_GET_STATUS_ABORT 0x60
-#define KCS_CMD_WRITE_START 0x61
-#define KCS_CMD_WRITE_END 0x62
-#define KCS_CMD_READ_BYTE 0x68
-
-static inline u8 read_data(struct kcs_bmc *kcs_bmc)
+u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc)
{
- return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr);
+ return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr);
}
+EXPORT_SYMBOL(kcs_bmc_read_data);
-static inline void write_data(struct kcs_bmc *kcs_bmc, u8 data)
+void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data)
{
- kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data);
+ kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data);
}
+EXPORT_SYMBOL(kcs_bmc_write_data);
-static inline u8 read_status(struct kcs_bmc *kcs_bmc)
+u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc)
{
- return kcs_bmc->io_inputb(kcs_bmc, kcs_bmc->ioreg.str);
+ return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.str);
}
+EXPORT_SYMBOL(kcs_bmc_read_status);
-static inline void write_status(struct kcs_bmc *kcs_bmc, u8 data)
+void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data)
{
- kcs_bmc->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data);
+ kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data);
}
+EXPORT_SYMBOL(kcs_bmc_write_status);
-static void update_status_bits(struct kcs_bmc *kcs_bmc, u8 mask, u8 val)
+void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val)
{
- u8 tmp = read_status(kcs_bmc);
-
- tmp &= ~mask;
- tmp |= val & mask;
-
- write_status(kcs_bmc, tmp);
+ kcs_bmc->ops->io_updateb(kcs_bmc, kcs_bmc->ioreg.str, mask, val);
}
+EXPORT_SYMBOL(kcs_bmc_update_status);
-static inline void set_state(struct kcs_bmc *kcs_bmc, u8 state)
+irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc)
{
- update_status_bits(kcs_bmc, KCS_STATUS_STATE_MASK,
- KCS_STATUS_STATE(state));
-}
+ struct kcs_bmc_client *client;
+ irqreturn_t rc = IRQ_NONE;
-static void kcs_force_abort(struct kcs_bmc *kcs_bmc)
-{
- set_state(kcs_bmc, ERROR_STATE);
- read_data(kcs_bmc);
- write_data(kcs_bmc, KCS_ZERO_DATA);
+ spin_lock(&kcs_bmc->lock);
+ client = kcs_bmc->client;
+ if (client)
+ rc = client->ops->event(client);
+ spin_unlock(&kcs_bmc->lock);
- kcs_bmc->phase = KCS_PHASE_ERROR;
- kcs_bmc->data_in_avail = false;
- kcs_bmc->data_in_idx = 0;
-}
-
-static void kcs_bmc_handle_data(struct kcs_bmc *kcs_bmc)
-{
- u8 data;
-
- switch (kcs_bmc->phase) {
- case KCS_PHASE_WRITE_START:
- kcs_bmc->phase = KCS_PHASE_WRITE_DATA;
- fallthrough;
-
- case KCS_PHASE_WRITE_DATA:
- if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
- set_state(kcs_bmc, WRITE_STATE);
- write_data(kcs_bmc, KCS_ZERO_DATA);
- kcs_bmc->data_in[kcs_bmc->data_in_idx++] =
- read_data(kcs_bmc);
- } else {
- kcs_force_abort(kcs_bmc);
- kcs_bmc->error = KCS_LENGTH_ERROR;
- }
- break;
-
- case KCS_PHASE_WRITE_END_CMD:
- if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
- set_state(kcs_bmc, READ_STATE);
- kcs_bmc->data_in[kcs_bmc->data_in_idx++] =
- read_data(kcs_bmc);
- kcs_bmc->phase = KCS_PHASE_WRITE_DONE;
- kcs_bmc->data_in_avail = true;
- wake_up_interruptible(&kcs_bmc->queue);
- } else {
- kcs_force_abort(kcs_bmc);
- kcs_bmc->error = KCS_LENGTH_ERROR;
- }
- break;
-
- case KCS_PHASE_READ:
- if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len)
- set_state(kcs_bmc, IDLE_STATE);
-
- data = read_data(kcs_bmc);
- if (data != KCS_CMD_READ_BYTE) {
- set_state(kcs_bmc, ERROR_STATE);
- write_data(kcs_bmc, KCS_ZERO_DATA);
- break;
- }
-
- if (kcs_bmc->data_out_idx == kcs_bmc->data_out_len) {
- write_data(kcs_bmc, KCS_ZERO_DATA);
- kcs_bmc->phase = KCS_PHASE_IDLE;
- break;
- }
-
- write_data(kcs_bmc,
- kcs_bmc->data_out[kcs_bmc->data_out_idx++]);
- break;
-
- case KCS_PHASE_ABORT_ERROR1:
- set_state(kcs_bmc, READ_STATE);
- read_data(kcs_bmc);
- write_data(kcs_bmc, kcs_bmc->error);
- kcs_bmc->phase = KCS_PHASE_ABORT_ERROR2;
- break;
-
- case KCS_PHASE_ABORT_ERROR2:
- set_state(kcs_bmc, IDLE_STATE);
- read_data(kcs_bmc);
- write_data(kcs_bmc, KCS_ZERO_DATA);
- kcs_bmc->phase = KCS_PHASE_IDLE;
- break;
-
- default:
- kcs_force_abort(kcs_bmc);
- break;
- }
-}
-
-static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
-{
- u8 cmd;
-
- set_state(kcs_bmc, WRITE_STATE);
- write_data(kcs_bmc, KCS_ZERO_DATA);
-
- cmd = read_data(kcs_bmc);
- switch (cmd) {
- case KCS_CMD_WRITE_START:
- kcs_bmc->phase = KCS_PHASE_WRITE_START;
- kcs_bmc->error = KCS_NO_ERROR;
- kcs_bmc->data_in_avail = false;
- kcs_bmc->data_in_idx = 0;
- break;
-
- case KCS_CMD_WRITE_END:
- if (kcs_bmc->phase != KCS_PHASE_WRITE_DATA) {
- kcs_force_abort(kcs_bmc);
- break;
- }
-
- kcs_bmc->phase = KCS_PHASE_WRITE_END_CMD;
- break;
-
- case KCS_CMD_GET_STATUS_ABORT:
- if (kcs_bmc->error == KCS_NO_ERROR)
- kcs_bmc->error = KCS_ABORTED_BY_COMMAND;
-
- kcs_bmc->phase = KCS_PHASE_ABORT_ERROR1;
- kcs_bmc->data_in_avail = false;
- kcs_bmc->data_in_idx = 0;
- break;
-
- default:
- kcs_force_abort(kcs_bmc);
- kcs_bmc->error = KCS_ILLEGAL_CONTROL_CODE;
- break;
- }
-}
-
-int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
-{
- unsigned long flags;
- int ret = -ENODATA;
- u8 status;
-
- spin_lock_irqsave(&kcs_bmc->lock, flags);
-
- status = read_status(kcs_bmc);
- if (status & KCS_STATUS_IBF) {
- if (!kcs_bmc->running)
- kcs_force_abort(kcs_bmc);
- else if (status & KCS_STATUS_CMD_DAT)
- kcs_bmc_handle_cmd(kcs_bmc);
- else
- kcs_bmc_handle_data(kcs_bmc);
-
- ret = 0;
- }
-
- spin_unlock_irqrestore(&kcs_bmc->lock, flags);
-
- return ret;
+ return rc;
}
EXPORT_SYMBOL(kcs_bmc_handle_event);
-static inline struct kcs_bmc *to_kcs_bmc(struct file *filp)
-{
- return container_of(filp->private_data, struct kcs_bmc, miscdev);
-}
-
-static int kcs_bmc_open(struct inode *inode, struct file *filp)
+int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- int ret = 0;
+ int rc;
spin_lock_irq(&kcs_bmc->lock);
- if (!kcs_bmc->running)
- kcs_bmc->running = 1;
- else
- ret = -EBUSY;
- spin_unlock_irq(&kcs_bmc->lock);
-
- return ret;
-}
-
-static __poll_t kcs_bmc_poll(struct file *filp, poll_table *wait)
-{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- __poll_t mask = 0;
-
- poll_wait(filp, &kcs_bmc->queue, wait);
+ if (kcs_bmc->client) {
+ rc = -EBUSY;
+ } else {
+ u8 mask = KCS_BMC_EVENT_TYPE_IBF;
- spin_lock_irq(&kcs_bmc->lock);
- if (kcs_bmc->data_in_avail)
- mask |= EPOLLIN;
+ kcs_bmc->client = client;
+ kcs_bmc_update_event_mask(kcs_bmc, mask, mask);
+ rc = 0;
+ }
spin_unlock_irq(&kcs_bmc->lock);
- return mask;
+ return rc;
}
+EXPORT_SYMBOL(kcs_bmc_enable_device);
-static ssize_t kcs_bmc_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
+void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- bool data_avail;
- size_t data_len;
- ssize_t ret;
-
- if (!(filp->f_flags & O_NONBLOCK))
- wait_event_interruptible(kcs_bmc->queue,
- kcs_bmc->data_in_avail);
-
- mutex_lock(&kcs_bmc->mutex);
-
spin_lock_irq(&kcs_bmc->lock);
- data_avail = kcs_bmc->data_in_avail;
- if (data_avail) {
- data_len = kcs_bmc->data_in_idx;
- memcpy(kcs_bmc->kbuffer, kcs_bmc->data_in, data_len);
- }
- spin_unlock_irq(&kcs_bmc->lock);
-
- if (!data_avail) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- if (count < data_len) {
- pr_err("channel=%u with too large data : %zu\n",
- kcs_bmc->channel, data_len);
-
- spin_lock_irq(&kcs_bmc->lock);
- kcs_force_abort(kcs_bmc);
- spin_unlock_irq(&kcs_bmc->lock);
+ if (client == kcs_bmc->client) {
+ u8 mask = KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE;
- ret = -EOVERFLOW;
- goto out_unlock;
- }
-
- if (copy_to_user(buf, kcs_bmc->kbuffer, data_len)) {
- ret = -EFAULT;
- goto out_unlock;
- }
-
- ret = data_len;
-
- spin_lock_irq(&kcs_bmc->lock);
- if (kcs_bmc->phase == KCS_PHASE_WRITE_DONE) {
- kcs_bmc->phase = KCS_PHASE_WAIT_READ;
- kcs_bmc->data_in_avail = false;
- kcs_bmc->data_in_idx = 0;
- } else {
- ret = -EAGAIN;
+ kcs_bmc_update_event_mask(kcs_bmc, mask, 0);
+ kcs_bmc->client = NULL;
}
spin_unlock_irq(&kcs_bmc->lock);
-
-out_unlock:
- mutex_unlock(&kcs_bmc->mutex);
-
- return ret;
}
+EXPORT_SYMBOL(kcs_bmc_disable_device);
-static ssize_t kcs_bmc_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
+int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- ssize_t ret;
-
- /* a minimum response size '3' : netfn + cmd + ccode */
- if (count < 3 || count > KCS_MSG_BUFSIZ)
- return -EINVAL;
-
- mutex_lock(&kcs_bmc->mutex);
-
- if (copy_from_user(kcs_bmc->kbuffer, buf, count)) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ struct kcs_bmc_driver *drv;
+ int error = 0;
+ int rc;
- spin_lock_irq(&kcs_bmc->lock);
- if (kcs_bmc->phase == KCS_PHASE_WAIT_READ) {
- kcs_bmc->phase = KCS_PHASE_READ;
- kcs_bmc->data_out_idx = 1;
- kcs_bmc->data_out_len = count;
- memcpy(kcs_bmc->data_out, kcs_bmc->kbuffer, count);
- write_data(kcs_bmc, kcs_bmc->data_out[0]);
- ret = count;
- } else {
- ret = -EINVAL;
+ spin_lock_init(&kcs_bmc->lock);
+ kcs_bmc->client = NULL;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_add(&kcs_bmc->entry, &kcs_bmc_devices);
+ list_for_each_entry(drv, &kcs_bmc_drivers, entry) {
+ rc = drv->ops->add_device(kcs_bmc);
+ if (!rc)
+ continue;
+
+ dev_err(kcs_bmc->dev, "Failed to add chardev for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ error = rc;
}
- spin_unlock_irq(&kcs_bmc->lock);
-
-out_unlock:
- mutex_unlock(&kcs_bmc->mutex);
+ mutex_unlock(&kcs_bmc_lock);
- return ret;
+ return error;
}
+EXPORT_SYMBOL(kcs_bmc_add_device);
-static long kcs_bmc_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
- long ret = 0;
+ struct kcs_bmc_driver *drv;
+ int rc;
- spin_lock_irq(&kcs_bmc->lock);
-
- switch (cmd) {
- case IPMI_BMC_IOCTL_SET_SMS_ATN:
- update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN,
- KCS_STATUS_SMS_ATN);
- break;
-
- case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
- update_status_bits(kcs_bmc, KCS_STATUS_SMS_ATN,
- 0);
- break;
-
- case IPMI_BMC_IOCTL_FORCE_ABORT:
- kcs_force_abort(kcs_bmc);
- break;
-
- default:
- ret = -EINVAL;
- break;
+ mutex_lock(&kcs_bmc_lock);
+ list_del(&kcs_bmc->entry);
+ list_for_each_entry(drv, &kcs_bmc_drivers, entry) {
+ rc = drv->ops->remove_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to remove chardev for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
}
-
- spin_unlock_irq(&kcs_bmc->lock);
-
- return ret;
+ mutex_unlock(&kcs_bmc_lock);
}
+EXPORT_SYMBOL(kcs_bmc_remove_device);
-static int kcs_bmc_release(struct inode *inode, struct file *filp)
+void kcs_bmc_register_driver(struct kcs_bmc_driver *drv)
{
- struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
-
- spin_lock_irq(&kcs_bmc->lock);
- kcs_bmc->running = 0;
- kcs_force_abort(kcs_bmc);
- spin_unlock_irq(&kcs_bmc->lock);
+ struct kcs_bmc_device *kcs_bmc;
+ int rc;
- return 0;
+ mutex_lock(&kcs_bmc_lock);
+ list_add(&drv->entry, &kcs_bmc_drivers);
+ list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) {
+ rc = drv->ops->add_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to add driver for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
}
+EXPORT_SYMBOL(kcs_bmc_register_driver);
-static const struct file_operations kcs_bmc_fops = {
- .owner = THIS_MODULE,
- .open = kcs_bmc_open,
- .read = kcs_bmc_read,
- .write = kcs_bmc_write,
- .release = kcs_bmc_release,
- .poll = kcs_bmc_poll,
- .unlocked_ioctl = kcs_bmc_ioctl,
-};
-
-struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
+void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv)
{
- struct kcs_bmc *kcs_bmc;
-
- kcs_bmc = devm_kzalloc(dev, sizeof(*kcs_bmc) + sizeof_priv, GFP_KERNEL);
- if (!kcs_bmc)
- return NULL;
-
- spin_lock_init(&kcs_bmc->lock);
- kcs_bmc->channel = channel;
+ struct kcs_bmc_device *kcs_bmc;
+ int rc;
- mutex_init(&kcs_bmc->mutex);
- init_waitqueue_head(&kcs_bmc->queue);
-
- kcs_bmc->data_in = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
- kcs_bmc->data_out = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
- kcs_bmc->kbuffer = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
-
- kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
- kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
- DEVICE_NAME, channel);
- if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer ||
- !kcs_bmc->miscdev.name)
- return NULL;
- kcs_bmc->miscdev.fops = &kcs_bmc_fops;
+ mutex_lock(&kcs_bmc_lock);
+ list_del(&drv->entry);
+ list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) {
+ rc = drv->ops->remove_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to remove driver for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
+}
+EXPORT_SYMBOL(kcs_bmc_unregister_driver);
- return kcs_bmc;
+void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events)
+{
+ kcs_bmc->ops->irq_mask_update(kcs_bmc, mask, events);
}
-EXPORT_SYMBOL(kcs_bmc_alloc);
+EXPORT_SYMBOL(kcs_bmc_update_event_mask);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc.h b/drivers/char/ipmi/kcs_bmc.h
index eb9ea4ce78b8..fa408b802c79 100644
--- a/drivers/char/ipmi/kcs_bmc.h
+++ b/drivers/char/ipmi/kcs_bmc.h
@@ -6,54 +6,14 @@
#ifndef __KCS_BMC_H__
#define __KCS_BMC_H__
-#include <linux/miscdevice.h>
+#include <linux/list.h>
-/* Different phases of the KCS BMC module.
- * KCS_PHASE_IDLE:
- * BMC should not be expecting nor sending any data.
- * KCS_PHASE_WRITE_START:
- * BMC is receiving a WRITE_START command from system software.
- * KCS_PHASE_WRITE_DATA:
- * BMC is receiving a data byte from system software.
- * KCS_PHASE_WRITE_END_CMD:
- * BMC is waiting a last data byte from system software.
- * KCS_PHASE_WRITE_DONE:
- * BMC has received the whole request from system software.
- * KCS_PHASE_WAIT_READ:
- * BMC is waiting the response from the upper IPMI service.
- * KCS_PHASE_READ:
- * BMC is transferring the response to system software.
- * KCS_PHASE_ABORT_ERROR1:
- * BMC is waiting error status request from system software.
- * KCS_PHASE_ABORT_ERROR2:
- * BMC is waiting for idle status afer error from system software.
- * KCS_PHASE_ERROR:
- * BMC has detected a protocol violation at the interface level.
- */
-enum kcs_phases {
- KCS_PHASE_IDLE,
-
- KCS_PHASE_WRITE_START,
- KCS_PHASE_WRITE_DATA,
- KCS_PHASE_WRITE_END_CMD,
- KCS_PHASE_WRITE_DONE,
+#define KCS_BMC_EVENT_TYPE_OBE BIT(0)
+#define KCS_BMC_EVENT_TYPE_IBF BIT(1)
- KCS_PHASE_WAIT_READ,
- KCS_PHASE_READ,
-
- KCS_PHASE_ABORT_ERROR1,
- KCS_PHASE_ABORT_ERROR2,
- KCS_PHASE_ERROR
-};
-
-/* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */
-enum kcs_errors {
- KCS_NO_ERROR = 0x00,
- KCS_ABORTED_BY_COMMAND = 0x01,
- KCS_ILLEGAL_CONTROL_CODE = 0x02,
- KCS_LENGTH_ERROR = 0x06,
- KCS_UNSPECIFIED_ERROR = 0xFF
-};
+#define KCS_BMC_STR_OBF BIT(0)
+#define KCS_BMC_STR_IBF BIT(1)
+#define KCS_BMC_STR_CMD_DAT BIT(3)
/* IPMI 2.0 - 9.5, KCS Interface Registers
* @idr: Input Data Register
@@ -66,43 +26,21 @@ struct kcs_ioreg {
u32 str;
};
-struct kcs_bmc {
- spinlock_t lock;
+struct kcs_bmc_device_ops;
+struct kcs_bmc_client;
+struct kcs_bmc_device {
+ struct list_head entry;
+
+ struct device *dev;
u32 channel;
- int running;
- /* Setup by BMC KCS controller driver */
struct kcs_ioreg ioreg;
- u8 (*io_inputb)(struct kcs_bmc *kcs_bmc, u32 reg);
- void (*io_outputb)(struct kcs_bmc *kcs_bmc, u32 reg, u8 b);
-
- enum kcs_phases phase;
- enum kcs_errors error;
-
- wait_queue_head_t queue;
- bool data_in_avail;
- int data_in_idx;
- u8 *data_in;
-
- int data_out_idx;
- int data_out_len;
- u8 *data_out;
- struct mutex mutex;
- u8 *kbuffer;
+ const struct kcs_bmc_device_ops *ops;
- struct miscdevice miscdev;
-
- unsigned long priv[];
+ spinlock_t lock;
+ struct kcs_bmc_client *client;
};
-static inline void *kcs_bmc_priv(struct kcs_bmc *kcs_bmc)
-{
- return kcs_bmc->priv;
-}
-
-int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc);
-struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv,
- u32 channel);
#endif /* __KCS_BMC_H__ */
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index eefe362f65f0..92a37b33494c 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -9,10 +9,12 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/regmap.h>
@@ -20,24 +22,53 @@
#include <linux/slab.h>
#include <linux/timer.h>
-#include "kcs_bmc.h"
+#include "kcs_bmc_device.h"
#define DEVICE_NAME "ast-kcs-bmc"
#define KCS_CHANNEL_MAX 4
+/*
+ * Field class descriptions
+ *
+ * LPCyE Enable LPC channel y
+ * IBFIEy Input Buffer Full IRQ Enable for LPC channel y
+ * IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy)
+ * IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y
+ * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1)
+ * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y
+ */
+
+#define LPC_TYIRQX_LOW 0b00
+#define LPC_TYIRQX_HIGH 0b01
+#define LPC_TYIRQX_RSVD 0b10
+#define LPC_TYIRQX_RISING 0b11
+
#define LPC_HICR0 0x000
#define LPC_HICR0_LPC3E BIT(7)
#define LPC_HICR0_LPC2E BIT(6)
#define LPC_HICR0_LPC1E BIT(5)
#define LPC_HICR2 0x008
-#define LPC_HICR2_IBFIF3 BIT(3)
-#define LPC_HICR2_IBFIF2 BIT(2)
-#define LPC_HICR2_IBFIF1 BIT(1)
+#define LPC_HICR2_IBFIE3 BIT(3)
+#define LPC_HICR2_IBFIE2 BIT(2)
+#define LPC_HICR2_IBFIE1 BIT(1)
#define LPC_HICR4 0x010
#define LPC_HICR4_LADR12AS BIT(7)
#define LPC_HICR4_KCSENBL BIT(2)
+#define LPC_SIRQCR0 0x070
+/* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */
+#define LPC_SIRQCR0_IRQ12E1 BIT(1)
+#define LPC_SIRQCR0_IRQ1E1 BIT(0)
+#define LPC_HICR5 0x080
+#define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20)
+#define LPC_HICR5_ID3IRQX_SHIFT 20
+#define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16)
+#define LPC_HICR5_ID2IRQX_SHIFT 16
+#define LPC_HICR5_SEL3IRQX BIT(15)
+#define LPC_HICR5_IRQXE3 BIT(14)
+#define LPC_HICR5_SEL2IRQX BIT(13)
+#define LPC_HICR5_IRQXE2 BIT(12)
#define LPC_LADR3H 0x014
#define LPC_LADR3L 0x018
#define LPC_LADR12H 0x01C
@@ -52,21 +83,64 @@
#define LPC_STR2 0x040
#define LPC_STR3 0x044
#define LPC_HICRB 0x100
-#define LPC_HICRB_IBFIF4 BIT(1)
+#define LPC_HICRB_EN16LADR2 BIT(5)
+#define LPC_HICRB_EN16LADR1 BIT(4)
+#define LPC_HICRB_IBFIE4 BIT(1)
#define LPC_HICRB_LPC4E BIT(0)
+#define LPC_HICRC 0x104
+#define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4)
+#define LPC_HICRC_ID4IRQX_SHIFT 4
+#define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2)
+#define LPC_HICRC_TY4IRQX_SHIFT 2
+#define LPC_HICRC_OBF4_AUTO_CLR BIT(1)
+#define LPC_HICRC_IRQXE4 BIT(0)
#define LPC_LADR4 0x110
#define LPC_IDR4 0x114
#define LPC_ODR4 0x118
#define LPC_STR4 0x11C
+#define LPC_LSADR12 0x120
+#define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16)
+#define LPC_LSADR12_LSADR2_SHIFT 16
+#define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0)
+#define LPC_LSADR12_LSADR1_SHIFT 0
+
+#define OBE_POLL_PERIOD (HZ / 2)
+
+enum aspeed_kcs_irq_mode {
+ aspeed_kcs_irq_none,
+ aspeed_kcs_irq_serirq,
+};
struct aspeed_kcs_bmc {
+ struct kcs_bmc_device kcs_bmc;
+
struct regmap *map;
+
+ struct {
+ enum aspeed_kcs_irq_mode mode;
+ int id;
+ } upstream_irq;
+
+ struct {
+ spinlock_t lock;
+ bool remove;
+ struct timer_list timer;
+ } obe;
};
+struct aspeed_kcs_of_ops {
+ int (*get_channel)(struct platform_device *pdev);
+ int (*get_io_address)(struct platform_device *pdev, u32 addrs[2]);
+};
-static u8 aspeed_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
+static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
{
- struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc);
+}
+
+static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
u32 val = 0;
int rc;
@@ -76,15 +150,66 @@ static u8 aspeed_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
return rc == 0 ? (u8) val : 0;
}
-static void aspeed_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data)
+static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
{
- struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
int rc;
rc = regmap_write(priv->map, reg, data);
WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+
+ /* Trigger the upstream IRQ on ODR writes, if enabled */
+
+ switch (reg) {
+ case LPC_ODR1:
+ case LPC_ODR2:
+ case LPC_ODR3:
+ case LPC_ODR4:
+ break;
+ default:
+ return;
+ }
+
+ if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq)
+ return;
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ switch (priv->upstream_irq.id) {
+ case 12:
+ regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1,
+ LPC_SIRQCR0_IRQ12E1);
+ break;
+ case 1:
+ regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1,
+ LPC_SIRQCR0_IRQ1E1);
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2);
+ break;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3);
+ break;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4);
+ break;
+ default:
+ break;
+ }
}
+static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_update_bits(priv->map, reg, mask, val);
+ WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
+}
/*
* AST_usrGuide_KCS.pdf
@@ -99,118 +224,237 @@ static void aspeed_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data)
* C. KCS4
* D / C : CA4h / CA5h
*/
-static void aspeed_kcs_set_address(struct kcs_bmc *kcs_bmc, u16 addr)
+static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
{
- struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
- switch (kcs_bmc->channel) {
+ if (WARN_ON(nr_addrs < 1 || nr_addrs > 2))
+ return -EINVAL;
+
+ switch (priv->kcs_bmc.channel) {
case 1:
- regmap_update_bits(priv->map, LPC_HICR4,
- LPC_HICR4_LADR12AS, 0);
- regmap_write(priv->map, LPC_LADR12H, addr >> 8);
- regmap_write(priv->map, LPC_LADR12L, addr & 0xFF);
+ regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0);
+ regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
+ if (nr_addrs == 2) {
+ regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK,
+ addrs[1] << LPC_LSADR12_LSADR1_SHIFT);
+
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1,
+ LPC_HICRB_EN16LADR1);
+ }
break;
case 2:
- regmap_update_bits(priv->map, LPC_HICR4,
- LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
- regmap_write(priv->map, LPC_LADR12H, addr >> 8);
- regmap_write(priv->map, LPC_LADR12L, addr & 0xFF);
+ regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
+ regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
+ if (nr_addrs == 2) {
+ regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK,
+ addrs[1] << LPC_LSADR12_LSADR2_SHIFT);
+
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2,
+ LPC_HICRB_EN16LADR2);
+ }
break;
case 3:
- regmap_write(priv->map, LPC_LADR3H, addr >> 8);
- regmap_write(priv->map, LPC_LADR3L, addr & 0xFF);
+ if (nr_addrs == 2) {
+ dev_err(priv->kcs_bmc.dev,
+ "Channel 3 only supports inferred status IO address\n");
+ return -EINVAL;
+ }
+
+ regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF);
break;
case 4:
- regmap_write(priv->map, LPC_LADR4, ((addr + 1) << 16) |
- addr);
+ if (nr_addrs == 1)
+ regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]);
+ else
+ regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]);
+
break;
default:
- break;
+ return -EINVAL;
}
+
+ return 0;
}
-static void aspeed_kcs_enable_channel(struct kcs_bmc *kcs_bmc, bool enable)
+static inline int aspeed_kcs_map_serirq_type(u32 dt_type)
{
- struct aspeed_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ switch (dt_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ return LPC_TYIRQX_RISING;
+ case IRQ_TYPE_LEVEL_HIGH:
+ return LPC_TYIRQX_HIGH;
+ case IRQ_TYPE_LEVEL_LOW:
+ return LPC_TYIRQX_LOW;
+ default:
+ return -EINVAL;
+ }
+}
- switch (kcs_bmc->channel) {
+static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type)
+{
+ unsigned int mask, val, hw_type;
+ int ret;
+
+ if (id > 15)
+ return -EINVAL;
+
+ ret = aspeed_kcs_map_serirq_type(dt_type);
+ if (ret < 0)
+ return ret;
+ hw_type = ret;
+
+ priv->upstream_irq.mode = aspeed_kcs_irq_serirq;
+ priv->upstream_irq.id = id;
+
+ switch (priv->kcs_bmc.channel) {
case 1:
- if (enable) {
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF1, LPC_HICR2_IBFIF1);
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC1E, LPC_HICR0_LPC1E);
- } else {
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC1E, 0);
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF1, 0);
- }
+ /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */
break;
-
case 2:
- if (enable) {
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF2, LPC_HICR2_IBFIF2);
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC2E, LPC_HICR0_LPC2E);
- } else {
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC2E, 0);
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF2, 0);
- }
- break;
+ if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
+ return -EINVAL;
+
+ mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK;
+ val = (id << LPC_HICR5_ID2IRQX_SHIFT);
+ val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0;
+ regmap_update_bits(priv->map, LPC_HICR5, mask, val);
- case 3:
- if (enable) {
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF3, LPC_HICR2_IBFIF3);
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC3E, LPC_HICR0_LPC3E);
- regmap_update_bits(priv->map, LPC_HICR4,
- LPC_HICR4_KCSENBL, LPC_HICR4_KCSENBL);
- } else {
- regmap_update_bits(priv->map, LPC_HICR0,
- LPC_HICR0_LPC3E, 0);
- regmap_update_bits(priv->map, LPC_HICR4,
- LPC_HICR4_KCSENBL, 0);
- regmap_update_bits(priv->map, LPC_HICR2,
- LPC_HICR2_IBFIF3, 0);
- }
break;
+ case 3:
+ if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
+ return -EINVAL;
+
+ mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK;
+ val = (id << LPC_HICR5_ID3IRQX_SHIFT);
+ val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0;
+ regmap_update_bits(priv->map, LPC_HICR5, mask, val);
+ break;
case 4:
- if (enable)
- regmap_update_bits(priv->map, LPC_HICRB,
- LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
- LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E);
- else
- regmap_update_bits(priv->map, LPC_HICRB,
- LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
- 0);
+ mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR;
+ val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT);
+ regmap_update_bits(priv->map, LPC_HICRC, mask, val);
break;
+ default:
+ dev_warn(priv->kcs_bmc.dev,
+ "SerIRQ configuration not supported on KCS channel %d\n",
+ priv->kcs_bmc.channel);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ switch (kcs_bmc->channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
+ return;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
+ return;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
+ regmap_update_bits(priv->map, LPC_HICR4,
+ LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL);
+ return;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
+ return;
default:
- break;
+ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
+ return;
}
}
-static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
+static void aspeed_kcs_check_obe(struct timer_list *timer)
+{
+ struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer);
+ unsigned long flags;
+ u8 str;
+
+ spin_lock_irqsave(&priv->obe.lock, flags);
+ if (priv->obe.remove) {
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+ return;
+ }
+
+ str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
+ if (str & KCS_BMC_STR_OBF) {
+ mod_timer(timer, jiffies + OBE_POLL_PERIOD);
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+
+ kcs_bmc_handle_event(&priv->kcs_bmc);
+}
+
+static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
{
- struct kcs_bmc *kcs_bmc = arg;
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
- if (!kcs_bmc_handle_event(kcs_bmc))
- return IRQ_HANDLED;
+ /* We don't have an OBE IRQ, emulate it */
+ if (mask & KCS_BMC_EVENT_TYPE_OBE) {
+ if (KCS_BMC_EVENT_TYPE_OBE & state)
+ mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
+ else
+ del_timer(&priv->obe.timer);
+ }
- return IRQ_NONE;
+ if (mask & KCS_BMC_EVENT_TYPE_IBF) {
+ const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF);
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1,
+ enable * LPC_HICR2_IBFIE1);
+ return;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2,
+ enable * LPC_HICR2_IBFIE2);
+ return;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3,
+ enable * LPC_HICR2_IBFIE3);
+ return;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4,
+ enable * LPC_HICRB_IBFIE4);
+ return;
+ default:
+ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
+ return;
+ }
+ }
}
-static int aspeed_kcs_config_irq(struct kcs_bmc *kcs_bmc,
+static const struct kcs_bmc_device_ops aspeed_kcs_ops = {
+ .irq_mask_update = aspeed_kcs_irq_mask_update,
+ .io_inputb = aspeed_kcs_inb,
+ .io_outputb = aspeed_kcs_outb,
+ .io_updateb = aspeed_kcs_updateb,
+};
+
+static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
+{
+ struct kcs_bmc_device *kcs_bmc = arg;
+
+ return kcs_bmc_handle_event(kcs_bmc);
+}
+
+static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -231,13 +475,10 @@ static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
{ .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
};
-static struct kcs_bmc *aspeed_kcs_probe_of_v1(struct platform_device *pdev)
+static int aspeed_kcs_of_v1_get_channel(struct platform_device *pdev)
{
- struct aspeed_kcs_bmc *priv;
struct device_node *np;
- struct kcs_bmc *kcs;
u32 channel;
- u32 slave;
int rc;
np = pdev->dev.of_node;
@@ -245,166 +486,213 @@ static struct kcs_bmc *aspeed_kcs_probe_of_v1(struct platform_device *pdev)
rc = of_property_read_u32(np, "kcs_chan", &channel);
if ((rc != 0) || (channel == 0 || channel > KCS_CHANNEL_MAX)) {
dev_err(&pdev->dev, "no valid 'kcs_chan' configured\n");
- return ERR_PTR(-EINVAL);
- }
-
- kcs = kcs_bmc_alloc(&pdev->dev, sizeof(struct aspeed_kcs_bmc), channel);
- if (!kcs)
- return ERR_PTR(-ENOMEM);
-
- priv = kcs_bmc_priv(kcs);
- priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
- if (IS_ERR(priv->map)) {
- dev_err(&pdev->dev, "Couldn't get regmap\n");
- return ERR_PTR(-ENODEV);
- }
-
- rc = of_property_read_u32(np, "kcs_addr", &slave);
- if (rc) {
- dev_err(&pdev->dev, "no valid 'kcs_addr' configured\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- kcs->ioreg = ast_kcs_bmc_ioregs[channel - 1];
- aspeed_kcs_set_address(kcs, slave);
-
- return kcs;
+ return channel;
}
-static int aspeed_kcs_calculate_channel(const struct kcs_ioreg *regs)
+static int
+aspeed_kcs_of_v1_get_io_address(struct platform_device *pdev, u32 addrs[2])
{
- int i;
+ int rc;
- for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
- if (!memcmp(&ast_kcs_bmc_ioregs[i], regs, sizeof(*regs)))
- return i + 1;
+ rc = of_property_read_u32(pdev->dev.of_node, "kcs_addr", addrs);
+ if (rc || addrs[0] > 0xffff) {
+ dev_err(&pdev->dev, "no valid 'kcs_addr' configured\n");
+ return -EINVAL;
}
- return -EINVAL;
+ return 1;
}
-static struct kcs_bmc *aspeed_kcs_probe_of_v2(struct platform_device *pdev)
+static int aspeed_kcs_of_v2_get_channel(struct platform_device *pdev)
{
- struct aspeed_kcs_bmc *priv;
struct device_node *np;
struct kcs_ioreg ioreg;
- struct kcs_bmc *kcs;
const __be32 *reg;
- int channel;
- u32 slave;
- int rc;
+ int i;
np = pdev->dev.of_node;
/* Don't translate addresses, we want offsets for the regmaps */
reg = of_get_address(np, 0, NULL, NULL);
if (!reg)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ioreg.idr = be32_to_cpup(reg);
reg = of_get_address(np, 1, NULL, NULL);
if (!reg)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ioreg.odr = be32_to_cpup(reg);
reg = of_get_address(np, 2, NULL, NULL);
if (!reg)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ioreg.str = be32_to_cpup(reg);
- channel = aspeed_kcs_calculate_channel(&ioreg);
- if (channel < 0)
- return ERR_PTR(channel);
+ for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
+ if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg)))
+ return i + 1;
+ }
- kcs = kcs_bmc_alloc(&pdev->dev, sizeof(struct aspeed_kcs_bmc), channel);
- if (!kcs)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
+}
- kcs->ioreg = ioreg;
+static int
+aspeed_kcs_of_v2_get_io_address(struct platform_device *pdev, u32 addrs[2])
+{
+ int rc;
- priv = kcs_bmc_priv(kcs);
- priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
- if (IS_ERR(priv->map)) {
- dev_err(&pdev->dev, "Couldn't get regmap\n");
- return ERR_PTR(-ENODEV);
+ rc = of_property_read_variable_u32_array(pdev->dev.of_node,
+ "aspeed,lpc-io-reg",
+ addrs, 1, 2);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n");
+ return rc;
}
- rc = of_property_read_u32(np, "aspeed,lpc-io-reg", &slave);
- if (rc)
- return ERR_PTR(rc);
+ if (addrs[0] > 0xffff) {
+ dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n");
+ return -EINVAL;
+ }
- aspeed_kcs_set_address(kcs, slave);
+ if (rc == 2 && addrs[1] > 0xffff) {
+ dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n");
+ return -EINVAL;
+ }
- return kcs;
+ return rc;
}
static int aspeed_kcs_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct kcs_bmc *kcs_bmc;
+ const struct aspeed_kcs_of_ops *ops;
+ struct kcs_bmc_device *kcs_bmc;
+ struct aspeed_kcs_bmc *priv;
struct device_node *np;
- int rc;
+ bool have_upstream_irq;
+ u32 upstream_irq[2];
+ int rc, channel;
+ int nr_addrs;
+ u32 addrs[2];
- np = dev->of_node->parent;
+ np = pdev->dev.of_node->parent;
if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
!of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
!of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
- dev_err(dev, "unsupported LPC device binding\n");
+ dev_err(&pdev->dev, "unsupported LPC device binding\n");
return -ENODEV;
}
- np = dev->of_node;
- if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc") ||
- of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc"))
- kcs_bmc = aspeed_kcs_probe_of_v1(pdev);
- else if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc-v2") ||
- of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc-v2"))
- kcs_bmc = aspeed_kcs_probe_of_v2(pdev);
- else
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
return -EINVAL;
- if (IS_ERR(kcs_bmc))
- return PTR_ERR(kcs_bmc);
+ channel = ops->get_channel(pdev);
+ if (channel < 0)
+ return channel;
- kcs_bmc->io_inputb = aspeed_kcs_inb;
- kcs_bmc->io_outputb = aspeed_kcs_outb;
+ nr_addrs = ops->get_io_address(pdev, addrs);
+ if (nr_addrs < 0)
+ return nr_addrs;
- rc = aspeed_kcs_config_irq(kcs_bmc, pdev);
+ np = pdev->dev.of_node;
+ rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2);
+ if (rc && rc != -EINVAL)
+ return -EINVAL;
+
+ have_upstream_irq = !rc;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ kcs_bmc = &priv->kcs_bmc;
+ kcs_bmc->dev = &pdev->dev;
+ kcs_bmc->channel = channel;
+ kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1];
+ kcs_bmc->ops = &aspeed_kcs_ops;
+
+ priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&priv->obe.lock);
+ priv->obe.remove = false;
+ timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
+
+ rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs);
if (rc)
return rc;
- dev_set_drvdata(dev, kcs_bmc);
+ /* Host to BMC IRQ */
+ rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev);
+ if (rc)
+ return rc;
+
+ /* BMC to Host IRQ */
+ if (have_upstream_irq) {
+ rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]);
+ if (rc < 0)
+ return rc;
+ } else {
+ priv->upstream_irq.mode = aspeed_kcs_irq_none;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
aspeed_kcs_enable_channel(kcs_bmc, true);
- rc = misc_register(&kcs_bmc->miscdev);
+ rc = kcs_bmc_add_device(&priv->kcs_bmc);
if (rc) {
- dev_err(dev, "Unable to register device\n");
+ dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
return rc;
}
- dev_dbg(&pdev->dev,
- "Probed KCS device %d (IDR=0x%x, ODR=0x%x, STR=0x%x)\n",
- kcs_bmc->channel, kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr,
- kcs_bmc->ioreg.str);
+ dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n",
+ kcs_bmc->channel, addrs[0]);
return 0;
}
static int aspeed_kcs_remove(struct platform_device *pdev)
{
- struct kcs_bmc *kcs_bmc = dev_get_drvdata(&pdev->dev);
+ struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
+ struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+
+ kcs_bmc_remove_device(kcs_bmc);
- misc_deregister(&kcs_bmc->miscdev);
+ aspeed_kcs_enable_channel(kcs_bmc, false);
+ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+
+ /* Make sure it's proper dead */
+ spin_lock_irq(&priv->obe.lock);
+ priv->obe.remove = true;
+ spin_unlock_irq(&priv->obe.lock);
+ del_timer_sync(&priv->obe.timer);
return 0;
}
+static const struct aspeed_kcs_of_ops of_v1_ops = {
+ .get_channel = aspeed_kcs_of_v1_get_channel,
+ .get_io_address = aspeed_kcs_of_v1_get_io_address,
+};
+
+static const struct aspeed_kcs_of_ops of_v2_ops = {
+ .get_channel = aspeed_kcs_of_v2_get_channel,
+ .get_io_address = aspeed_kcs_of_v2_get_io_address,
+};
+
static const struct of_device_id ast_kcs_bmc_match[] = {
- { .compatible = "aspeed,ast2400-kcs-bmc" },
- { .compatible = "aspeed,ast2500-kcs-bmc" },
- { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
- { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2400-kcs-bmc", .data = &of_v1_ops },
+ { .compatible = "aspeed,ast2500-kcs-bmc", .data = &of_v1_ops },
+ { .compatible = "aspeed,ast2400-kcs-bmc-v2", .data = &of_v2_ops },
+ { .compatible = "aspeed,ast2500-kcs-bmc-v2", .data = &of_v2_ops },
{ }
};
MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
@@ -421,4 +709,5 @@ module_platform_driver(ast_kcs_bmc_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");
diff --git a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
new file mode 100644
index 000000000000..486834a962c3
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "kcs-bmc: " fmt
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ipmi_bmc.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_client.h"
+
+/* Different phases of the KCS BMC module.
+ * KCS_PHASE_IDLE:
+ * BMC should not be expecting nor sending any data.
+ * KCS_PHASE_WRITE_START:
+ * BMC is receiving a WRITE_START command from system software.
+ * KCS_PHASE_WRITE_DATA:
+ * BMC is receiving a data byte from system software.
+ * KCS_PHASE_WRITE_END_CMD:
+ * BMC is waiting a last data byte from system software.
+ * KCS_PHASE_WRITE_DONE:
+ * BMC has received the whole request from system software.
+ * KCS_PHASE_WAIT_READ:
+ * BMC is waiting the response from the upper IPMI service.
+ * KCS_PHASE_READ:
+ * BMC is transferring the response to system software.
+ * KCS_PHASE_ABORT_ERROR1:
+ * BMC is waiting error status request from system software.
+ * KCS_PHASE_ABORT_ERROR2:
+ * BMC is waiting for idle status afer error from system software.
+ * KCS_PHASE_ERROR:
+ * BMC has detected a protocol violation at the interface level.
+ */
+enum kcs_ipmi_phases {
+ KCS_PHASE_IDLE,
+
+ KCS_PHASE_WRITE_START,
+ KCS_PHASE_WRITE_DATA,
+ KCS_PHASE_WRITE_END_CMD,
+ KCS_PHASE_WRITE_DONE,
+
+ KCS_PHASE_WAIT_READ,
+ KCS_PHASE_READ,
+
+ KCS_PHASE_ABORT_ERROR1,
+ KCS_PHASE_ABORT_ERROR2,
+ KCS_PHASE_ERROR
+};
+
+/* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */
+enum kcs_ipmi_errors {
+ KCS_NO_ERROR = 0x00,
+ KCS_ABORTED_BY_COMMAND = 0x01,
+ KCS_ILLEGAL_CONTROL_CODE = 0x02,
+ KCS_LENGTH_ERROR = 0x06,
+ KCS_UNSPECIFIED_ERROR = 0xFF
+};
+
+struct kcs_bmc_ipmi {
+ struct list_head entry;
+
+ struct kcs_bmc_client client;
+
+ spinlock_t lock;
+
+ enum kcs_ipmi_phases phase;
+ enum kcs_ipmi_errors error;
+
+ wait_queue_head_t queue;
+ bool data_in_avail;
+ int data_in_idx;
+ u8 *data_in;
+
+ int data_out_idx;
+ int data_out_len;
+ u8 *data_out;
+
+ struct mutex mutex;
+ u8 *kbuffer;
+
+ struct miscdevice miscdev;
+};
+
+#define DEVICE_NAME "ipmi-kcs"
+
+#define KCS_MSG_BUFSIZ 1000
+
+#define KCS_ZERO_DATA 0
+
+/* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
+#define KCS_STATUS_STATE(state) (state << 6)
+#define KCS_STATUS_STATE_MASK GENMASK(7, 6)
+#define KCS_STATUS_CMD_DAT BIT(3)
+#define KCS_STATUS_SMS_ATN BIT(2)
+#define KCS_STATUS_IBF BIT(1)
+#define KCS_STATUS_OBF BIT(0)
+
+/* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
+enum kcs_states {
+ IDLE_STATE = 0,
+ READ_STATE = 1,
+ WRITE_STATE = 2,
+ ERROR_STATE = 3,
+};
+
+/* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
+#define KCS_CMD_GET_STATUS_ABORT 0x60
+#define KCS_CMD_WRITE_START 0x61
+#define KCS_CMD_WRITE_END 0x62
+#define KCS_CMD_READ_BYTE 0x68
+
+static inline void set_state(struct kcs_bmc_ipmi *priv, u8 state)
+{
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_STATE_MASK, KCS_STATUS_STATE(state));
+}
+
+static void kcs_bmc_ipmi_force_abort(struct kcs_bmc_ipmi *priv)
+{
+ set_state(priv, ERROR_STATE);
+ kcs_bmc_read_data(priv->client.dev);
+ kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
+
+ priv->phase = KCS_PHASE_ERROR;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+}
+
+static void kcs_bmc_ipmi_handle_data(struct kcs_bmc_ipmi *priv)
+{
+ struct kcs_bmc_device *dev;
+ u8 data;
+
+ dev = priv->client.dev;
+
+ switch (priv->phase) {
+ case KCS_PHASE_WRITE_START:
+ priv->phase = KCS_PHASE_WRITE_DATA;
+ fallthrough;
+
+ case KCS_PHASE_WRITE_DATA:
+ if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
+ set_state(priv, WRITE_STATE);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
+ } else {
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_LENGTH_ERROR;
+ }
+ break;
+
+ case KCS_PHASE_WRITE_END_CMD:
+ if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
+ set_state(priv, READ_STATE);
+ priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
+ priv->phase = KCS_PHASE_WRITE_DONE;
+ priv->data_in_avail = true;
+ wake_up_interruptible(&priv->queue);
+ } else {
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_LENGTH_ERROR;
+ }
+ break;
+
+ case KCS_PHASE_READ:
+ if (priv->data_out_idx == priv->data_out_len)
+ set_state(priv, IDLE_STATE);
+
+ data = kcs_bmc_read_data(dev);
+ if (data != KCS_CMD_READ_BYTE) {
+ set_state(priv, ERROR_STATE);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ break;
+ }
+
+ if (priv->data_out_idx == priv->data_out_len) {
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->phase = KCS_PHASE_IDLE;
+ break;
+ }
+
+ kcs_bmc_write_data(dev, priv->data_out[priv->data_out_idx++]);
+ break;
+
+ case KCS_PHASE_ABORT_ERROR1:
+ set_state(priv, READ_STATE);
+ kcs_bmc_read_data(dev);
+ kcs_bmc_write_data(dev, priv->error);
+ priv->phase = KCS_PHASE_ABORT_ERROR2;
+ break;
+
+ case KCS_PHASE_ABORT_ERROR2:
+ set_state(priv, IDLE_STATE);
+ kcs_bmc_read_data(dev);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->phase = KCS_PHASE_IDLE;
+ break;
+
+ default:
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+ }
+}
+
+static void kcs_bmc_ipmi_handle_cmd(struct kcs_bmc_ipmi *priv)
+{
+ u8 cmd;
+
+ set_state(priv, WRITE_STATE);
+ kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
+
+ cmd = kcs_bmc_read_data(priv->client.dev);
+ switch (cmd) {
+ case KCS_CMD_WRITE_START:
+ priv->phase = KCS_PHASE_WRITE_START;
+ priv->error = KCS_NO_ERROR;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ break;
+
+ case KCS_CMD_WRITE_END:
+ if (priv->phase != KCS_PHASE_WRITE_DATA) {
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+ }
+
+ priv->phase = KCS_PHASE_WRITE_END_CMD;
+ break;
+
+ case KCS_CMD_GET_STATUS_ABORT:
+ if (priv->error == KCS_NO_ERROR)
+ priv->error = KCS_ABORTED_BY_COMMAND;
+
+ priv->phase = KCS_PHASE_ABORT_ERROR1;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ break;
+
+ default:
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_ILLEGAL_CONTROL_CODE;
+ break;
+ }
+}
+
+static inline struct kcs_bmc_ipmi *client_to_kcs_bmc_ipmi(struct kcs_bmc_client *client)
+{
+ return container_of(client, struct kcs_bmc_ipmi, client);
+}
+
+static irqreturn_t kcs_bmc_ipmi_event(struct kcs_bmc_client *client)
+{
+ struct kcs_bmc_ipmi *priv;
+ u8 status;
+ int ret;
+
+ priv = client_to_kcs_bmc_ipmi(client);
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ status = kcs_bmc_read_status(client->dev);
+ if (status & KCS_STATUS_IBF) {
+ if (status & KCS_STATUS_CMD_DAT)
+ kcs_bmc_ipmi_handle_cmd(priv);
+ else
+ kcs_bmc_ipmi_handle_data(priv);
+
+ ret = IRQ_HANDLED;
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static const struct kcs_bmc_client_ops kcs_bmc_ipmi_client_ops = {
+ .event = kcs_bmc_ipmi_event,
+};
+
+static inline struct kcs_bmc_ipmi *to_kcs_bmc(struct file *filp)
+{
+ return container_of(filp->private_data, struct kcs_bmc_ipmi, miscdev);
+}
+
+static int kcs_bmc_ipmi_open(struct inode *inode, struct file *filp)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+
+ return kcs_bmc_enable_device(priv->client.dev, &priv->client);
+}
+
+static __poll_t kcs_bmc_ipmi_poll(struct file *filp, poll_table *wait)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ __poll_t mask = 0;
+
+ poll_wait(filp, &priv->queue, wait);
+
+ spin_lock_irq(&priv->lock);
+ if (priv->data_in_avail)
+ mask |= EPOLLIN;
+ spin_unlock_irq(&priv->lock);
+
+ return mask;
+}
+
+static ssize_t kcs_bmc_ipmi_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ bool data_avail;
+ size_t data_len;
+ ssize_t ret;
+
+ if (!(filp->f_flags & O_NONBLOCK))
+ wait_event_interruptible(priv->queue,
+ priv->data_in_avail);
+
+ mutex_lock(&priv->mutex);
+
+ spin_lock_irq(&priv->lock);
+ data_avail = priv->data_in_avail;
+ if (data_avail) {
+ data_len = priv->data_in_idx;
+ memcpy(priv->kbuffer, priv->data_in, data_len);
+ }
+ spin_unlock_irq(&priv->lock);
+
+ if (!data_avail) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (count < data_len) {
+ pr_err("channel=%u with too large data : %zu\n",
+ priv->client.dev->channel, data_len);
+
+ spin_lock_irq(&priv->lock);
+ kcs_bmc_ipmi_force_abort(priv);
+ spin_unlock_irq(&priv->lock);
+
+ ret = -EOVERFLOW;
+ goto out_unlock;
+ }
+
+ if (copy_to_user(buf, priv->kbuffer, data_len)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ ret = data_len;
+
+ spin_lock_irq(&priv->lock);
+ if (priv->phase == KCS_PHASE_WRITE_DONE) {
+ priv->phase = KCS_PHASE_WAIT_READ;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ } else {
+ ret = -EAGAIN;
+ }
+ spin_unlock_irq(&priv->lock);
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static ssize_t kcs_bmc_ipmi_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ ssize_t ret;
+
+ /* a minimum response size '3' : netfn + cmd + ccode */
+ if (count < 3 || count > KCS_MSG_BUFSIZ)
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+
+ if (copy_from_user(priv->kbuffer, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ spin_lock_irq(&priv->lock);
+ if (priv->phase == KCS_PHASE_WAIT_READ) {
+ priv->phase = KCS_PHASE_READ;
+ priv->data_out_idx = 1;
+ priv->data_out_len = count;
+ memcpy(priv->data_out, priv->kbuffer, count);
+ kcs_bmc_write_data(priv->client.dev, priv->data_out[0]);
+ ret = count;
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irq(&priv->lock);
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static long kcs_bmc_ipmi_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ long ret = 0;
+
+ spin_lock_irq(&priv->lock);
+
+ switch (cmd) {
+ case IPMI_BMC_IOCTL_SET_SMS_ATN:
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, KCS_STATUS_SMS_ATN);
+ break;
+
+ case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, 0);
+ break;
+
+ case IPMI_BMC_IOCTL_FORCE_ABORT:
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+static int kcs_bmc_ipmi_release(struct inode *inode, struct file *filp)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+
+ kcs_bmc_ipmi_force_abort(priv);
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+
+ return 0;
+}
+
+static const struct file_operations kcs_bmc_ipmi_fops = {
+ .owner = THIS_MODULE,
+ .open = kcs_bmc_ipmi_open,
+ .read = kcs_bmc_ipmi_read,
+ .write = kcs_bmc_ipmi_write,
+ .release = kcs_bmc_ipmi_release,
+ .poll = kcs_bmc_ipmi_poll,
+ .unlocked_ioctl = kcs_bmc_ipmi_ioctl,
+};
+
+static DEFINE_SPINLOCK(kcs_bmc_ipmi_instances_lock);
+static LIST_HEAD(kcs_bmc_ipmi_instances);
+
+static int kcs_bmc_ipmi_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_ipmi *priv;
+ int rc;
+
+ priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ mutex_init(&priv->mutex);
+
+ init_waitqueue_head(&priv->queue);
+
+ priv->client.dev = kcs_bmc;
+ priv->client.ops = &kcs_bmc_ipmi_client_ops;
+ priv->data_in = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ priv->data_out = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ priv->kbuffer = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME,
+ kcs_bmc->channel);
+ if (!priv->data_in || !priv->data_out || !priv->kbuffer || !priv->miscdev.name)
+ return -EINVAL;
+
+ priv->miscdev.fops = &kcs_bmc_ipmi_fops;
+
+ rc = misc_register(&priv->miscdev);
+ if (rc) {
+ dev_err(kcs_bmc->dev, "Unable to register device: %d\n", rc);
+ return rc;
+ }
+
+ spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
+ list_add(&priv->entry, &kcs_bmc_ipmi_instances);
+ spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
+
+ dev_info(kcs_bmc->dev, "Initialised IPMI client for channel %d", kcs_bmc->channel);
+
+ return 0;
+}
+
+static int kcs_bmc_ipmi_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_ipmi *priv = NULL, *pos;
+
+ spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
+ list_for_each_entry(pos, &kcs_bmc_ipmi_instances, entry) {
+ if (pos->client.dev == kcs_bmc) {
+ priv = pos;
+ list_del(&pos->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
+
+ if (!priv)
+ return -ENODEV;
+
+ misc_deregister(&priv->miscdev);
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+ devm_kfree(kcs_bmc->dev, priv->kbuffer);
+ devm_kfree(kcs_bmc->dev, priv->data_out);
+ devm_kfree(kcs_bmc->dev, priv->data_in);
+ devm_kfree(kcs_bmc->dev, priv);
+
+ return 0;
+}
+
+static const struct kcs_bmc_driver_ops kcs_bmc_ipmi_driver_ops = {
+ .add_device = kcs_bmc_ipmi_add_device,
+ .remove_device = kcs_bmc_ipmi_remove_device,
+};
+
+static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
+ .ops = &kcs_bmc_ipmi_driver_ops,
+};
+
+static int kcs_bmc_ipmi_init(void)
+{
+ kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
+
+ return 0;
+}
+module_init(kcs_bmc_ipmi_init);
+
+static void kcs_bmc_ipmi_exit(void)
+{
+ kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
+}
+module_exit(kcs_bmc_ipmi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc_client.h b/drivers/char/ipmi/kcs_bmc_client.h
new file mode 100644
index 000000000000..6fdcde0a7169
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_client.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, IBM Corp. */
+
+#ifndef __KCS_BMC_CONSUMER_H__
+#define __KCS_BMC_CONSUMER_H__
+
+#include <linux/irqreturn.h>
+
+#include "kcs_bmc.h"
+
+struct kcs_bmc_driver_ops {
+ int (*add_device)(struct kcs_bmc_device *kcs_bmc);
+ int (*remove_device)(struct kcs_bmc_device *kcs_bmc);
+};
+
+struct kcs_bmc_driver {
+ struct list_head entry;
+
+ const struct kcs_bmc_driver_ops *ops;
+};
+
+struct kcs_bmc_client_ops {
+ irqreturn_t (*event)(struct kcs_bmc_client *client);
+};
+
+struct kcs_bmc_client {
+ const struct kcs_bmc_client_ops *ops;
+
+ struct kcs_bmc_device *dev;
+};
+
+void kcs_bmc_register_driver(struct kcs_bmc_driver *drv);
+void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv);
+
+int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
+void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
+
+void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events);
+
+u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data);
+u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data);
+void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val);
+#endif
diff --git a/drivers/char/ipmi/kcs_bmc_device.h b/drivers/char/ipmi/kcs_bmc_device.h
new file mode 100644
index 000000000000..17c572f25c54
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_device.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, IBM Corp. */
+
+#ifndef __KCS_BMC_DEVICE_H__
+#define __KCS_BMC_DEVICE_H__
+
+#include <linux/irqreturn.h>
+
+#include "kcs_bmc.h"
+
+struct kcs_bmc_device_ops {
+ void (*irq_mask_update)(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 enable);
+ u8 (*io_inputb)(struct kcs_bmc_device *kcs_bmc, u32 reg);
+ void (*io_outputb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 b);
+ void (*io_updateb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 b);
+};
+
+irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc);
+int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc);
+
+#endif
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
index 722f7391fe1f..7961fec56476 100644
--- a/drivers/char/ipmi/kcs_bmc_npcm7xx.c
+++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
@@ -17,7 +17,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include "kcs_bmc.h"
+#include "kcs_bmc_device.h"
#define DEVICE_NAME "npcm-kcs-bmc"
#define KCS_CHANNEL_MAX 3
@@ -38,6 +38,7 @@
#define KCS2CTL 0x2A
#define KCS3CTL 0x3C
#define KCS_CTL_IBFIE BIT(0)
+#define KCS_CTL_OBEIE BIT(1)
#define KCS1IE 0x1C
#define KCS2IE 0x2E
@@ -65,6 +66,8 @@ struct npcm7xx_kcs_reg {
};
struct npcm7xx_kcs_bmc {
+ struct kcs_bmc_device kcs_bmc;
+
struct regmap *map;
const struct npcm7xx_kcs_reg *reg;
@@ -76,9 +79,14 @@ static const struct npcm7xx_kcs_reg npcm7xx_kcs_reg_tbl[KCS_CHANNEL_MAX] = {
{ .sts = KCS3ST, .dob = KCS3DO, .dib = KCS3DI, .ctl = KCS3CTL, .ie = KCS3IE },
};
-static u8 npcm7xx_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
+static inline struct npcm7xx_kcs_bmc *to_npcm7xx_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
{
- struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ return container_of(kcs_bmc, struct npcm7xx_kcs_bmc, kcs_bmc);
+}
+
+static u8 npcm7xx_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
u32 val = 0;
int rc;
@@ -88,37 +96,53 @@ static u8 npcm7xx_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg)
return rc == 0 ? (u8)val : 0;
}
-static void npcm7xx_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data)
+static void npcm7xx_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
{
- struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
int rc;
rc = regmap_write(priv->map, reg, data);
WARN(rc != 0, "regmap_write() failed: %d\n", rc);
}
-static void npcm7xx_kcs_enable_channel(struct kcs_bmc *kcs_bmc, bool enable)
+static void npcm7xx_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 data)
{
- struct npcm7xx_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc);
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+ int rc;
- regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
- enable ? KCS_CTL_IBFIE : 0);
+ rc = regmap_update_bits(priv->map, reg, mask, data);
+ WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
+}
+
+static void npcm7xx_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE,
enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0);
}
-static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg)
+static void npcm7xx_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
{
- struct kcs_bmc *kcs_bmc = arg;
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+
+ if (mask & KCS_BMC_EVENT_TYPE_OBE)
+ regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_OBEIE,
+ !!(state & KCS_BMC_EVENT_TYPE_OBE) * KCS_CTL_OBEIE);
- if (!kcs_bmc_handle_event(kcs_bmc))
- return IRQ_HANDLED;
+ if (mask & KCS_BMC_EVENT_TYPE_IBF)
+ regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
+ !!(state & KCS_BMC_EVENT_TYPE_IBF) * KCS_CTL_IBFIE);
+}
- return IRQ_NONE;
+static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg)
+{
+ struct kcs_bmc_device *kcs_bmc = arg;
+
+ return kcs_bmc_handle_event(kcs_bmc);
}
-static int npcm7xx_kcs_config_irq(struct kcs_bmc *kcs_bmc,
+static int npcm7xx_kcs_config_irq(struct kcs_bmc_device *kcs_bmc,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -132,11 +156,18 @@ static int npcm7xx_kcs_config_irq(struct kcs_bmc *kcs_bmc,
dev_name(dev), kcs_bmc);
}
+static const struct kcs_bmc_device_ops npcm7xx_kcs_ops = {
+ .irq_mask_update = npcm7xx_kcs_irq_mask_update,
+ .io_inputb = npcm7xx_kcs_inb,
+ .io_outputb = npcm7xx_kcs_outb,
+ .io_updateb = npcm7xx_kcs_updateb,
+};
+
static int npcm7xx_kcs_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct npcm7xx_kcs_bmc *priv;
- struct kcs_bmc *kcs_bmc;
+ struct kcs_bmc_device *kcs_bmc;
u32 chan;
int rc;
@@ -146,11 +177,10 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev)
return -ENODEV;
}
- kcs_bmc = kcs_bmc_alloc(dev, sizeof(*priv), chan);
- if (!kcs_bmc)
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- priv = kcs_bmc_priv(kcs_bmc);
priv->map = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(priv->map)) {
dev_err(dev, "Couldn't get regmap\n");
@@ -158,22 +188,26 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev)
}
priv->reg = &npcm7xx_kcs_reg_tbl[chan - 1];
+ kcs_bmc = &priv->kcs_bmc;
+ kcs_bmc->dev = &pdev->dev;
+ kcs_bmc->channel = chan;
kcs_bmc->ioreg.idr = priv->reg->dib;
kcs_bmc->ioreg.odr = priv->reg->dob;
kcs_bmc->ioreg.str = priv->reg->sts;
- kcs_bmc->io_inputb = npcm7xx_kcs_inb;
- kcs_bmc->io_outputb = npcm7xx_kcs_outb;
+ kcs_bmc->ops = &npcm7xx_kcs_ops;
- dev_set_drvdata(dev, kcs_bmc);
+ platform_set_drvdata(pdev, priv);
- npcm7xx_kcs_enable_channel(kcs_bmc, true);
rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev);
if (rc)
return rc;
- rc = misc_register(&kcs_bmc->miscdev);
+ npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+ npcm7xx_kcs_enable_channel(kcs_bmc, true);
+
+ rc = kcs_bmc_add_device(kcs_bmc);
if (rc) {
- dev_err(dev, "Unable to register device\n");
+ dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
return rc;
}
@@ -186,9 +220,13 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev)
static int npcm7xx_kcs_remove(struct platform_device *pdev)
{
- struct kcs_bmc *kcs_bmc = dev_get_drvdata(&pdev->dev);
+ struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev);
+ struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+
+ kcs_bmc_remove_device(kcs_bmc);
- misc_deregister(&kcs_bmc->miscdev);
+ npcm7xx_kcs_enable_channel(kcs_bmc, false);
+ npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
return 0;
}
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
new file mode 100644
index 000000000000..7948cabde50b
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2021 IBM Corp. */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_client.h"
+
+struct kcs_bmc_serio {
+ struct list_head entry;
+
+ struct kcs_bmc_client client;
+ struct serio *port;
+
+ spinlock_t lock;
+};
+
+static inline struct kcs_bmc_serio *client_to_kcs_bmc_serio(struct kcs_bmc_client *client)
+{
+ return container_of(client, struct kcs_bmc_serio, client);
+}
+
+static irqreturn_t kcs_bmc_serio_event(struct kcs_bmc_client *client)
+{
+ struct kcs_bmc_serio *priv;
+ u8 handled = IRQ_NONE;
+ u8 status;
+
+ priv = client_to_kcs_bmc_serio(client);
+
+ spin_lock(&priv->lock);
+
+ status = kcs_bmc_read_status(client->dev);
+
+ if (status & KCS_BMC_STR_IBF)
+ handled = serio_interrupt(priv->port, kcs_bmc_read_data(client->dev), 0);
+
+ spin_unlock(&priv->lock);
+
+ return handled;
+}
+
+static const struct kcs_bmc_client_ops kcs_bmc_serio_client_ops = {
+ .event = kcs_bmc_serio_event,
+};
+
+static int kcs_bmc_serio_open(struct serio *port)
+{
+ struct kcs_bmc_serio *priv = port->port_data;
+
+ return kcs_bmc_enable_device(priv->client.dev, &priv->client);
+}
+
+static void kcs_bmc_serio_close(struct serio *port)
+{
+ struct kcs_bmc_serio *priv = port->port_data;
+
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+}
+
+static DEFINE_SPINLOCK(kcs_bmc_serio_instances_lock);
+static LIST_HEAD(kcs_bmc_serio_instances);
+
+static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_serio *priv;
+ struct serio *port;
+
+ priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+
+ /* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!(priv && port))
+ return -ENOMEM;
+
+ port->id.type = SERIO_8042;
+ port->open = kcs_bmc_serio_open;
+ port->close = kcs_bmc_serio_close;
+ port->port_data = priv;
+ port->dev.parent = kcs_bmc->dev;
+
+ spin_lock_init(&priv->lock);
+ priv->port = port;
+ priv->client.dev = kcs_bmc;
+ priv->client.ops = &kcs_bmc_serio_client_ops;
+
+ spin_lock_irq(&kcs_bmc_serio_instances_lock);
+ list_add(&priv->entry, &kcs_bmc_serio_instances);
+ spin_unlock_irq(&kcs_bmc_serio_instances_lock);
+
+ serio_register_port(port);
+
+ dev_info(kcs_bmc->dev, "Initialised serio client for channel %d", kcs_bmc->channel);
+
+ return 0;
+}
+
+static int kcs_bmc_serio_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_serio *priv = NULL, *pos;
+
+ spin_lock_irq(&kcs_bmc_serio_instances_lock);
+ list_for_each_entry(pos, &kcs_bmc_serio_instances, entry) {
+ if (pos->client.dev == kcs_bmc) {
+ priv = pos;
+ list_del(&pos->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&kcs_bmc_serio_instances_lock);
+
+ if (!priv)
+ return -ENODEV;
+
+ /* kfree()s priv->port via put_device() */
+ serio_unregister_port(priv->port);
+
+ /* Ensure the IBF IRQ is disabled if we were the active client */
+ kcs_bmc_disable_device(kcs_bmc, &priv->client);
+
+ devm_kfree(priv->client.dev->dev, priv);
+
+ return 0;
+}
+
+static const struct kcs_bmc_driver_ops kcs_bmc_serio_driver_ops = {
+ .add_device = kcs_bmc_serio_add_device,
+ .remove_device = kcs_bmc_serio_remove_device,
+};
+
+static struct kcs_bmc_driver kcs_bmc_serio_driver = {
+ .ops = &kcs_bmc_serio_driver_ops,
+};
+
+static int kcs_bmc_serio_init(void)
+{
+ kcs_bmc_register_driver(&kcs_bmc_serio_driver);
+
+ return 0;
+}
+module_init(kcs_bmc_serio_init);
+
+static void kcs_bmc_serio_exit(void)
+{
+ kcs_bmc_unregister_driver(&kcs_bmc_serio_driver);
+}
+module_exit(kcs_bmc_serio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("Adapter driver for serio access to BMC KCS devices");
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index ca7158fa6e6c..f7dc986fa4a0 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -312,7 +312,7 @@ unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
#define TPM_ST_CLEAR 1
/**
- * tpm_startup() - turn on the TPM
+ * tpm1_startup() - turn on the TPM
* @chip: TPM chip to use
*
* Normally the firmware should start the TPM. This function is provided as a
@@ -611,7 +611,7 @@ out:
#define TPM_ORD_CONTINUE_SELFTEST 83
/**
- * tpm_continue_selftest() - run TPM's selftest
+ * tpm1_continue_selftest() - run TPM's selftest
* @chip: TPM chip to use
*
* Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c84d23951219..a25815a6f625 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -87,7 +87,7 @@ static u8 tpm2_ordinal_duration_index(u32 ordinal)
return TPM_MEDIUM;
case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
- return TPM_LONG;
+ return TPM_LONG_LONG;
case TPM2_CC_PCR_EXTEND: /* 182 */
return TPM_MEDIUM;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a9dcf31eadd2..18606651d1aa 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -464,7 +464,7 @@ static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
/* Detect a 64 bit address on a 32 bit system */
if (start != new_res.start)
- return (void __iomem *) ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
if (!iores)
return devm_ioremap_resource(dev, &new_res);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 4ed6e660273a..d3f2e5364c27 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -363,11 +363,7 @@ static int tpm_tis_force_device(void)
{
struct platform_device *pdev;
static const struct resource x86_resources[] = {
- {
- .start = 0xFED40000,
- .end = 0xFED40000 + TIS_MEM_LEN - 1,
- .flags = IORESOURCE_MEM,
- },
+ DEFINE_RES_MEM(0xFED40000, TIS_MEM_LEN)
};
if (!force)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 55b9d3965ae1..69579efb247b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -196,13 +196,24 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
return 0;
if (unlikely((status & TPM_STS_READ_ZERO) != 0)) {
- /*
- * If this trips, the chances are the read is
- * returning 0xff because the locality hasn't been
- * acquired. Usually because tpm_try_get_ops() hasn't
- * been called before doing a TPM operation.
- */
- WARN_ONCE(1, "TPM returned invalid status\n");
+ if (!test_and_set_bit(TPM_TIS_INVALID_STATUS, &priv->flags)) {
+ /*
+ * If this trips, the chances are the read is
+ * returning 0xff because the locality hasn't been
+ * acquired. Usually because tpm_try_get_ops() hasn't
+ * been called before doing a TPM operation.
+ */
+ dev_err(&chip->dev, "invalid TPM_STS.x 0x%02x, dumping stack for forensics\n",
+ status);
+
+ /*
+ * Dump stack for forensics, as invalid TPM_STS.x could be
+ * potentially triggered by impaired tpm_try_get_ops() or
+ * tpm_find_get_ops().
+ */
+ dump_stack();
+ }
+
return 0;
}
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 9b2d32a59f67..b2a3c6c72882 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -83,6 +83,7 @@ enum tis_defaults {
enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
+ TPM_TIS_INVALID_STATUS = BIT(1),
};
struct tpm_tis_data {
@@ -90,7 +91,7 @@ struct tpm_tis_data {
int locality;
int irq;
bool irq_tested;
- unsigned int flags;
+ unsigned long flags;
void __iomem *ilb_base_addr;
u16 clkrun_enabled;
wait_queue_head_t int_queue;
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index f19c227d20f4..44dde2fbe2fb 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -706,14 +706,14 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client,
if (client->irq > 0) {
rc = devm_request_irq(dev, client->irq, tpm_cr50_i2c_int_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
dev->driver->name, chip);
if (rc < 0) {
dev_err(dev, "Failed to probe IRQ %d\n", client->irq);
return rc;
}
- disable_irq(client->irq);
priv->irq = client->irq;
} else {
dev_warn(dev, "No IRQ, will use %ums delay for TPM ready\n",
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 3856f6ebcb34..54584b4b00d1 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -240,10 +240,14 @@ static int tpm_tis_spi_driver_probe(struct spi_device *spi)
tpm_tis_spi_probe_func probe_func;
probe_func = of_device_get_match_data(&spi->dev);
- if (!probe_func && spi_dev_id)
- probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
- if (!probe_func)
- return -ENODEV;
+ if (!probe_func) {
+ if (spi_dev_id) {
+ probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
+ if (!probe_func)
+ return -ENODEV;
+ } else
+ probe_func = tpm_tis_spi_probe;
+ }
return probe_func(spi);
}
@@ -260,6 +264,8 @@ static int tpm_tis_spi_remove(struct spi_device *dev)
}
static const struct spi_device_id tpm_tis_spi_id[] = {
+ { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
+ { "slb9670", (unsigned long)tpm_tis_spi_probe },
{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
{ "cr50", (unsigned long)cr50_spi_probe },
{}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 39aa21d01e05..9fa28237715a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -358,6 +358,20 @@ config ARM_GLOBAL_TIMER
help
This option enables support for the ARM global timer unit.
+config ARM_GT_INITIAL_PRESCALER_VAL
+ int "ARM global timer initial prescaler value"
+ default 2 if ARCH_ZYNQ
+ default 1
+ depends on ARM_GLOBAL_TIMER
+ help
+ When the ARM global timer initializes, its current rate is declared
+ to the kernel and maintained forever. Should it's parent clock
+ change, the driver tries to fix the timer's internal prescaler.
+ On some machs (i.e. Zynq) the initial prescaler value thus poses
+ bounds about how much the parent clock is allowed to decrease or
+ increase wrt the initial clock value.
+ This affects CPU_FREQ max delta from the initial frequency.
+
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && CLKDEV_LOOKUP
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index fe1a82627d57..be6d741d404c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -64,7 +64,6 @@ struct arch_timer {
#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
static u32 arch_timer_rate __ro_after_init;
-u32 arch_timer_rate1 __ro_after_init;
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = {
@@ -365,7 +364,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
do { \
_val = read_sysreg(reg); \
_retries--; \
- } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
+ } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
\
WARN_ON_ONCE(!_retries); \
_val; \
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 88b2d38a7a61..44a61dc6f932 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -31,6 +31,10 @@
#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
+#define GT_CONTROL_PRESCALER_SHIFT 8
+#define GT_CONTROL_PRESCALER_MAX 0xF
+#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
+ GT_CONTROL_PRESCALER_SHIFT)
#define GT_INT_STATUS 0x0c
#define GT_INT_STATUS_EVENT_FLAG BIT(0)
@@ -39,6 +43,7 @@
#define GT_COMP1 0x14
#define GT_AUTO_INC 0x18
+#define MAX_F_ERR 50
/*
* We are expecting to be clocked by the ARM peripheral clock.
*
@@ -46,7 +51,8 @@
* the units for all operations.
*/
static void __iomem *gt_base;
-static unsigned long gt_clk_rate;
+static struct notifier_block gt_clk_rate_change_nb;
+static u32 gt_psv_new, gt_psv_bck, gt_target_rate;
static int gt_ppi;
static struct clock_event_device __percpu *gt_evt;
@@ -96,7 +102,10 @@ static void gt_compare_set(unsigned long delta, int periodic)
unsigned long ctrl;
counter += delta;
- ctrl = GT_CONTROL_TIMER_ENABLE;
+ ctrl = readl(gt_base + GT_CONTROL);
+ ctrl &= ~(GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE |
+ GT_CONTROL_AUTO_INC);
+ ctrl |= GT_CONTROL_TIMER_ENABLE;
writel_relaxed(ctrl, gt_base + GT_CONTROL);
writel_relaxed(lower_32_bits(counter), gt_base + GT_COMP0);
writel_relaxed(upper_32_bits(counter), gt_base + GT_COMP1);
@@ -123,7 +132,7 @@ static int gt_clockevent_shutdown(struct clock_event_device *evt)
static int gt_clockevent_set_periodic(struct clock_event_device *evt)
{
- gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1);
+ gt_compare_set(DIV_ROUND_CLOSEST(gt_target_rate, HZ), 1);
return 0;
}
@@ -177,7 +186,7 @@ static int gt_starting_cpu(unsigned int cpu)
clk->cpumask = cpumask_of(cpu);
clk->rating = 300;
clk->irq = gt_ppi;
- clockevents_config_and_register(clk, gt_clk_rate,
+ clockevents_config_and_register(clk, gt_target_rate,
1, 0xffffffff);
enable_percpu_irq(clk->irq, IRQ_TYPE_NONE);
return 0;
@@ -232,9 +241,28 @@ static struct delay_timer gt_delay_timer = {
.read_current_timer = gt_read_long,
};
+static void gt_write_presc(u32 psv)
+{
+ u32 reg;
+
+ reg = readl(gt_base + GT_CONTROL);
+ reg &= ~GT_CONTROL_PRESCALER_MASK;
+ reg |= psv << GT_CONTROL_PRESCALER_SHIFT;
+ writel(reg, gt_base + GT_CONTROL);
+}
+
+static u32 gt_read_presc(void)
+{
+ u32 reg;
+
+ reg = readl(gt_base + GT_CONTROL);
+ reg &= GT_CONTROL_PRESCALER_MASK;
+ return reg >> GT_CONTROL_PRESCALER_SHIFT;
+}
+
static void __init gt_delay_timer_init(void)
{
- gt_delay_timer.freq = gt_clk_rate;
+ gt_delay_timer.freq = gt_target_rate;
register_current_timer_delay(&gt_delay_timer);
}
@@ -243,18 +271,81 @@ static int __init gt_clocksource_init(void)
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
- /* enables timer on all the cores */
- writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
+ /* set prescaler and enable timer on all the cores */
+ writel(((CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) <<
+ GT_CONTROL_PRESCALER_SHIFT)
+ | GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
- sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
+ sched_clock_register(gt_sched_clock_read, 64, gt_target_rate);
#endif
- return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
+ return clocksource_register_hz(&gt_clocksource, gt_target_rate);
+}
+
+static int gt_clk_rate_change_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ {
+ int psv;
+
+ psv = DIV_ROUND_CLOSEST(ndata->new_rate,
+ gt_target_rate);
+
+ if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
+ return NOTIFY_BAD;
+
+ psv--;
+
+ /* prescaler within legal range? */
+ if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
+ return NOTIFY_BAD;
+
+ /*
+ * store timer clock ctrl register so we can restore it in case
+ * of an abort.
+ */
+ gt_psv_bck = gt_read_presc();
+ gt_psv_new = psv;
+ /* scale down: adjust divider in post-change notification */
+ if (ndata->new_rate < ndata->old_rate)
+ return NOTIFY_DONE;
+
+ /* scale up: adjust divider now - before frequency change */
+ gt_write_presc(psv);
+ break;
+ }
+ case POST_RATE_CHANGE:
+ /* scale up: pre-change notification did the adjustment */
+ if (ndata->new_rate > ndata->old_rate)
+ return NOTIFY_OK;
+
+ /* scale down: adjust divider now - after frequency change */
+ gt_write_presc(gt_psv_new);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ /* we have to undo the adjustment in case we scale up */
+ if (ndata->new_rate < ndata->old_rate)
+ return NOTIFY_OK;
+
+ /* restore original register value */
+ gt_write_presc(gt_psv_bck);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
}
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
+ static unsigned long gt_clk_rate;
int err = 0;
/*
@@ -292,11 +383,20 @@ static int __init global_timer_of_register(struct device_node *np)
}
gt_clk_rate = clk_get_rate(gt_clk);
+ gt_target_rate = gt_clk_rate / CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+ gt_clk_rate_change_nb.notifier_call =
+ gt_clk_rate_change_cb;
+ err = clk_notifier_register(gt_clk, &gt_clk_rate_change_nb);
+ if (err) {
+ pr_warn("Unable to register clock notifier\n");
+ goto out_clk;
+ }
+
gt_evt = alloc_percpu(struct clock_event_device);
if (!gt_evt) {
pr_warn("global-timer: can't allocate memory\n");
err = -ENOMEM;
- goto out_clk;
+ goto out_clk_nb;
}
err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
@@ -326,6 +426,8 @@ out_irq:
free_percpu_irq(gt_ppi, gt_evt);
out_free:
free_percpu(gt_evt);
+out_clk_nb:
+ clk_notifier_unregister(gt_clk, &gt_clk_rate_change_nb);
out_clk:
clk_disable_unprepare(gt_clk);
out_unmap:
diff --git a/drivers/clocksource/ingenic-sysost.c b/drivers/clocksource/ingenic-sysost.c
index e77d58449005..a129840f14f9 100644
--- a/drivers/clocksource/ingenic-sysost.c
+++ b/drivers/clocksource/ingenic-sysost.c
@@ -186,7 +186,7 @@ static const struct clk_ops ingenic_ost_global_timer_ops = {
static const char * const ingenic_ost_clk_parents[] = { "ext" };
-static const struct ingenic_ost_clk_info ingenic_ost_clk_info[] = {
+static const struct ingenic_ost_clk_info x1000_ost_clk_info[] = {
[OST_CLK_PERCPU_TIMER] = {
.init_data = {
.name = "percpu timer",
@@ -414,14 +414,14 @@ static const struct ingenic_soc_info x1000_soc_info = {
.num_channels = 2,
};
-static const struct of_device_id __maybe_unused ingenic_ost_of_match[] __initconst = {
- { .compatible = "ingenic,x1000-ost", .data = &x1000_soc_info, },
+static const struct of_device_id __maybe_unused ingenic_ost_of_matches[] __initconst = {
+ { .compatible = "ingenic,x1000-ost", .data = &x1000_soc_info },
{ /* sentinel */ }
};
static int __init ingenic_ost_probe(struct device_node *np)
{
- const struct of_device_id *id = of_match_node(ingenic_ost_of_match, np);
+ const struct of_device_id *id = of_match_node(ingenic_ost_of_matches, np);
struct ingenic_ost *ost;
unsigned int i;
int ret;
@@ -462,7 +462,7 @@ static int __init ingenic_ost_probe(struct device_node *np)
ost->clocks->num = ost->soc_info->num_channels;
for (i = 0; i < ost->clocks->num; i++) {
- ret = ingenic_ost_register_clock(ost, i, &ingenic_ost_clk_info[i], ost->clocks);
+ ret = ingenic_ost_register_clock(ost, i, &x1000_ost_clk_info[i], ost->clocks);
if (ret) {
pr_crit("%s: Cannot register clock %d\n", __func__, i);
goto err_unregister_ost_clocks;
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index f760229d0c7f..6e46781bc9ac 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -4,7 +4,7 @@
* http://www.samsung.com/
*
* samsung - Common hr-timer support (s3c and s5p)
-*/
+ */
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -22,7 +22,6 @@
#include <clocksource/samsung_pwm.h>
-
/*
* Clocksource driver
*/
@@ -38,8 +37,8 @@
#define TCFG0_PRESCALER_MASK 0xff
#define TCFG0_PRESCALER1_SHIFT 8
-#define TCFG1_SHIFT(x) ((x) * 4)
-#define TCFG1_MUX_MASK 0xf
+#define TCFG1_SHIFT(x) ((x) * 4)
+#define TCFG1_MUX_MASK 0xf
/*
* Each channel occupies 4 bits in TCON register, but there is a gap of 4
@@ -62,7 +61,7 @@ EXPORT_SYMBOL(samsung_pwm_lock);
struct samsung_pwm_clocksource {
void __iomem *base;
- void __iomem *source_reg;
+ const void __iomem *source_reg;
unsigned int irq[SAMSUNG_PWM_NUM];
struct samsung_pwm_variant variant;
@@ -183,7 +182,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
}
static int samsung_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
/*
* This check is needed to account for internal rounding
@@ -225,6 +224,7 @@ static void samsung_clockevent_resume(struct clock_event_device *cev)
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
+
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
}
@@ -248,6 +248,7 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
+
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
@@ -272,7 +273,7 @@ static void __init samsung_clockevent_init(void)
time_event_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&time_event_device,
- clock_rate, 1, pwm.tcnt_max);
+ clock_rate, 1, pwm.tcnt_max);
irq_number = pwm.irq[pwm.event_id];
if (request_irq(irq_number, samsung_clock_event_isr,
@@ -282,6 +283,7 @@ static void __init samsung_clockevent_init(void)
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
+
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
}
@@ -347,7 +349,7 @@ static int __init samsung_clocksource_init(void)
pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14;
sched_clock_register(samsung_read_sched_clock,
- pwm.variant.bits, clock_rate);
+ pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
return clocksource_register_hz(&samsung_clocksource, clock_rate);
@@ -398,7 +400,8 @@ static int __init _samsung_pwm_clocksource_init(void)
}
void __init samsung_pwm_clocksource_init(void __iomem *base,
- unsigned int *irqs, struct samsung_pwm_variant *variant)
+ unsigned int *irqs,
+ const struct samsung_pwm_variant *variant)
{
pwm.base = base;
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
@@ -418,7 +421,7 @@ static int __init samsung_pwm_alloc(struct device_node *np,
struct property *prop;
const __be32 *cur;
u32 val;
- int i;
+ int i, ret;
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
for (i = 0; i < SAMSUNG_PWM_NUM; ++i)
@@ -441,10 +444,24 @@ static int __init samsung_pwm_alloc(struct device_node *np,
pwm.timerclk = of_clk_get_by_name(np, "timers");
if (IS_ERR(pwm.timerclk)) {
pr_crit("failed to get timers clock for timer\n");
- return PTR_ERR(pwm.timerclk);
+ ret = PTR_ERR(pwm.timerclk);
+ goto err_clk;
}
- return _samsung_pwm_clocksource_init();
+ ret = _samsung_pwm_clocksource_init();
+ if (ret)
+ goto err_clocksource;
+
+ return 0;
+
+err_clocksource:
+ clk_put(pwm.timerclk);
+ pwm.timerclk = NULL;
+err_clk:
+ iounmap(pwm.base);
+ pwm.base = NULL;
+
+ return ret;
}
static const struct samsung_pwm_variant s3c24xx_variant = {
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
index 9318edcd8963..ab63b95e414f 100644
--- a/drivers/clocksource/timer-mediatek.c
+++ b/drivers/clocksource/timer-mediatek.c
@@ -241,6 +241,28 @@ static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
timer_of_base(to) + GPT_IRQ_EN_REG);
}
+static void mtk_gpt_resume(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_enable_irq(to, TIMER_CLK_EVT);
+}
+
+static void mtk_gpt_suspend(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ /* Disable all interrupts */
+ writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
+
+ /*
+ * This is called with interrupts disabled,
+ * so we need to ack any interrupt that is pending
+ * or for example ATF will prevent a suspend from completing.
+ */
+ writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
+}
+
static struct timer_of to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
@@ -286,6 +308,8 @@ static int __init mtk_gpt_init(struct device_node *node)
to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
+ to.clkevt.suspend = mtk_gpt_suspend;
+ to.clkevt.resume = mtk_gpt_resume;
to.of_irq.handler = mtk_gpt_interrupt;
ret = timer_of_init(node, &to);
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 33eeabf9c3d1..3e52c5226c4d 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -78,6 +78,9 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
static void omap_timer_restore_context(struct omap_dm_timer *timer)
{
+ __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET,
+ timer->context.ocp_cfg, 0);
+
omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
timer->context.twer);
omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
@@ -95,6 +98,9 @@ static void omap_timer_restore_context(struct omap_dm_timer *timer)
static void omap_timer_save_context(struct omap_dm_timer *timer)
{
+ timer->context.ocp_cfg =
+ __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
+
timer->context.tclr =
omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
timer->context.twer =
@@ -122,7 +128,8 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
break;
omap_timer_save_context(timer);
break;
- case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
case CPU_CLUSTER_PM_EXIT:
if ((timer->capability & OMAP_TIMER_ALWON) ||
!atomic_read(&timer->enabled))
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a5c5f70acfc9..e65e0a43be64 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -19,16 +19,6 @@ config ACPI_CPPC_CPUFREQ
If in doubt, say N.
-config ACPI_CPPC_CPUFREQ_FIE
- bool "Frequency Invariance support for CPPC cpufreq driver"
- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
- default y
- help
- This extends frequency invariance support in the CPPC cpufreq driver,
- by using CPPC delivered and reference performance counters.
-
- If in doubt, say N.
-
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 3848b4c222e1..2f769b1630c5 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -10,18 +10,14 @@
#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
-#include <linux/arch_topology.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/dmi.h>
-#include <linux/irq_work.h>
-#include <linux/kthread.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
-#include <uapi/linux/sched/types.h>
#include <asm/unaligned.h>
@@ -61,204 +57,6 @@ static struct cppc_workaround_oem_info wa_info[] = {
}
};
-#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
-
-/* Frequency invariance support */
-struct cppc_freq_invariance {
- int cpu;
- struct irq_work irq_work;
- struct kthread_work work;
- struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
- struct cppc_cpudata *cpu_data;
-};
-
-static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
-static struct kthread_worker *kworker_fie;
-static bool fie_disabled;
-
-static struct cpufreq_driver cppc_cpufreq_driver;
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1);
-
-/**
- * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
- * @work: The work item.
- *
- * The CPPC driver register itself with the topology core to provide its own
- * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
- * gets called by the scheduler on every tick.
- *
- * Note that the arch specific counters have higher priority than CPPC counters,
- * if available, though the CPPC driver doesn't need to have any special
- * handling for that.
- *
- * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
- * reach here from hard-irq context), which then schedules a normal work item
- * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
- * based on the counter updates since the last tick.
- */
-static void cppc_scale_freq_workfn(struct kthread_work *work)
-{
- struct cppc_freq_invariance *cppc_fi;
- struct cppc_perf_fb_ctrs fb_ctrs = {0};
- struct cppc_cpudata *cpu_data;
- unsigned long local_freq_scale;
- u64 perf;
-
- cppc_fi = container_of(work, struct cppc_freq_invariance, work);
- cpu_data = cppc_fi->cpu_data;
-
- if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
- pr_warn("%s: failed to read perf counters\n", __func__);
- return;
- }
-
- cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
- perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs,
- fb_ctrs);
-
- perf <<= SCHED_CAPACITY_SHIFT;
- local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
- if (WARN_ON(local_freq_scale > 1024))
- local_freq_scale = 1024;
-
- per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
-}
-
-static void cppc_irq_work(struct irq_work *irq_work)
-{
- struct cppc_freq_invariance *cppc_fi;
-
- cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
- kthread_queue_work(kworker_fie, &cppc_fi->work);
-}
-
-static void cppc_scale_freq_tick(void)
-{
- struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
-
- /*
- * cppc_get_perf_ctrs() can potentially sleep, call that from the right
- * context.
- */
- irq_work_queue(&cppc_fi->irq_work);
-}
-
-static struct scale_freq_data cppc_sftd = {
- .source = SCALE_FREQ_SOURCE_CPPC,
- .set_freq_scale = cppc_scale_freq_tick,
-};
-
-static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
- struct cppc_cpudata *cpu_data)
-{
- struct cppc_perf_fb_ctrs fb_ctrs = {0};
- struct cppc_freq_invariance *cppc_fi;
- int i, ret;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- for_each_cpu(i, policy->cpus) {
- cppc_fi = &per_cpu(cppc_freq_inv, i);
- cppc_fi->cpu = i;
- cppc_fi->cpu_data = cpu_data;
- kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
- init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
-
- ret = cppc_get_perf_ctrs(i, &fb_ctrs);
- if (ret) {
- pr_warn("%s: failed to read perf counters: %d\n",
- __func__, ret);
- fie_disabled = true;
- } else {
- cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
- }
- }
-}
-
-static void __init cppc_freq_invariance_init(void)
-{
- struct sched_attr attr = {
- .size = sizeof(struct sched_attr),
- .sched_policy = SCHED_DEADLINE,
- .sched_nice = 0,
- .sched_priority = 0,
- /*
- * Fake (unused) bandwidth; workaround to "fix"
- * priority inheritance.
- */
- .sched_runtime = 1000000,
- .sched_deadline = 10000000,
- .sched_period = 10000000,
- };
- int ret;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- kworker_fie = kthread_create_worker(0, "cppc_fie");
- if (IS_ERR(kworker_fie))
- return;
-
- ret = sched_setattr_nocheck(kworker_fie->task, &attr);
- if (ret) {
- pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
- ret);
- kthread_destroy_worker(kworker_fie);
- return;
- }
-
- /* Register for freq-invariance */
- topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask);
-}
-
-static void cppc_freq_invariance_exit(void)
-{
- struct cppc_freq_invariance *cppc_fi;
- int i;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask);
-
- for_each_possible_cpu(i) {
- cppc_fi = &per_cpu(cppc_freq_inv, i);
- irq_work_sync(&cppc_fi->irq_work);
- }
-
- kthread_destroy_worker(kworker_fie);
- kworker_fie = NULL;
-}
-
-#else
-static inline void
-cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
- struct cppc_cpudata *cpu_data)
-{
-}
-
-static inline void cppc_freq_invariance_init(void)
-{
-}
-
-static inline void cppc_freq_invariance_exit(void)
-{
-}
-#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
@@ -547,12 +345,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
- if (ret) {
+ if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->highest_perf, cpu, ret);
- } else {
- cppc_freq_invariance_policy_init(policy, cpu_data);
- }
return ret;
}
@@ -565,12 +360,12 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1)
+static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
+ struct cppc_perf_fb_ctrs fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
- u64 reference_perf;
+ u64 reference_perf, delivered_perf;
reference_perf = fb_ctrs_t0.reference_perf;
@@ -579,21 +374,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
delta_delivered = get_delta(fb_ctrs_t1.delivered,
fb_ctrs_t0.delivered);
- /* Check to avoid divide-by zero and invalid delivered_perf */
- if (!delta_reference || !delta_delivered)
- return cpu_data->perf_ctrls.desired_perf;
-
- return (reference_perf * delta_delivered) / delta_reference;
-}
-
-static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1)
-{
- u64 delivered_perf;
-
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0,
- fb_ctrs_t1);
+ /* Check to avoid divide-by zero */
+ if (delta_reference || delta_delivered)
+ delivered_perf = (reference_perf * delta_delivered) /
+ delta_reference;
+ else
+ delivered_perf = cpu_data->perf_ctrls.desired_perf;
return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
}
@@ -718,8 +504,6 @@ static void cppc_check_hisi_workaround(void)
static int __init cppc_cpufreq_init(void)
{
- int ret;
-
if ((acpi_disabled) || !acpi_cpc_valid())
return -ENODEV;
@@ -727,11 +511,7 @@ static int __init cppc_cpufreq_init(void)
cppc_check_hisi_workaround();
- ret = cpufreq_register_driver(&cppc_cpufreq_driver);
- if (!ret)
- cppc_freq_invariance_init();
-
- return ret;
+ return cpufreq_register_driver(&cppc_cpufreq_driver);
}
static inline void free_cpu_data(void)
@@ -748,7 +528,6 @@ static inline void free_cpu_data(void)
static void __exit cppc_cpufreq_exit(void)
{
- cppc_freq_invariance_exit();
cpufreq_unregister_driver(&cppc_cpufreq_driver);
free_cpu_data();
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 802abc925b2a..cbab834c37a0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1367,9 +1367,14 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
+ /*
+ * The initialization has succeeded and the policy is online.
+ * If there is a problem with its frequency table, take it
+ * offline and drop it.
+ */
ret = cpufreq_table_validate_and_sort(policy);
if (ret)
- goto out_exit_policy;
+ goto out_offline_policy;
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
@@ -1515,6 +1520,10 @@ out_destroy_policy:
up_write(&policy->rwsem);
+out_offline_policy:
+ if (cpufreq_driver->offline)
+ cpufreq_driver->offline(policy);
+
out_exit_policy:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index da717f7cd9a9..1570d6f3e75d 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -211,7 +211,7 @@ void cpufreq_stats_free_table(struct cpufreq_policy *policy)
void cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
- unsigned int i = 0, count = 0, ret = -ENOMEM;
+ unsigned int i = 0, count;
struct cpufreq_stats *stats;
unsigned int alloc_size;
struct cpufreq_frequency_table *pos;
@@ -253,8 +253,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats->last_index = freq_table_get_index(stats, policy->cur);
policy->stats = stats;
- ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
- if (!ret)
+ if (!sysfs_create_group(&policy->kobj, &stats_attr_group))
return;
/* We failed, release resources */
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0e69dffd5a76..6012964df51b 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -121,9 +121,10 @@ struct sample {
* @max_pstate_physical:This is physical Max P state for a processor
* This can be higher than the max_pstate which can
* be limited by platform thermal design power limits
- * @scaling: Scaling factor to convert frequency to cpufreq
- * frequency units
+ * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor
+ * @scaling: Scaling factor between performance and frequency
* @turbo_pstate: Max Turbo P state possible for this platform
+ * @min_freq: @min_pstate frequency in cpufreq units
* @max_freq: @max_pstate frequency in cpufreq units
* @turbo_freq: @turbo_pstate frequency in cpufreq units
*
@@ -134,8 +135,10 @@ struct pstate_data {
int min_pstate;
int max_pstate;
int max_pstate_physical;
+ int perf_ctl_scaling;
int scaling;
int turbo_pstate;
+ unsigned int min_freq;
unsigned int max_freq;
unsigned int turbo_freq;
};
@@ -366,7 +369,7 @@ static void intel_pstate_set_itmt_prio(int cpu)
}
}
-static int intel_pstate_get_cppc_guranteed(int cpu)
+static int intel_pstate_get_cppc_guaranteed(int cpu)
{
struct cppc_perf_caps cppc_perf;
int ret;
@@ -382,7 +385,7 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
}
#else /* CONFIG_ACPI_CPPC_LIB */
-static void intel_pstate_set_itmt_prio(int cpu)
+static inline void intel_pstate_set_itmt_prio(int cpu)
{
}
#endif /* CONFIG_ACPI_CPPC_LIB */
@@ -467,6 +470,20 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
acpi_processor_unregister_performance(policy->cpu);
}
+
+static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps)
+{
+ return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf;
+}
+
+static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu,
+ struct cppc_perf_caps *caps)
+{
+ if (cppc_get_perf_caps(cpu->cpu, caps))
+ return false;
+
+ return caps->highest_perf && caps->lowest_perf <= caps->highest_perf;
+}
#else /* CONFIG_ACPI */
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
@@ -483,12 +500,146 @@ static inline bool intel_pstate_acpi_pm_profile_server(void)
#endif /* CONFIG_ACPI */
#ifndef CONFIG_ACPI_CPPC_LIB
-static int intel_pstate_get_cppc_guranteed(int cpu)
+static inline int intel_pstate_get_cppc_guaranteed(int cpu)
{
return -ENOTSUPP;
}
#endif /* CONFIG_ACPI_CPPC_LIB */
+static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
+{
+ pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu);
+
+ cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling;
+}
+
+/**
+ * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels.
+ * @cpu: Target CPU.
+ *
+ * On hybrid processors, HWP may expose more performance levels than there are
+ * P-states accessible through the PERF_CTL interface. If that happens, the
+ * scaling factor between HWP performance levels and CPU frequency will be less
+ * than the scaling factor between P-state values and CPU frequency.
+ *
+ * In that case, the scaling factor between HWP performance levels and CPU
+ * frequency needs to be determined which can be done with the help of the
+ * observation that certain HWP performance levels should correspond to certain
+ * P-states, like for example the HWP highest performance should correspond
+ * to the maximum turbo P-state of the CPU.
+ */
+static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu)
+{
+ int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
+ int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+ int perf_ctl_turbo = pstate_funcs.get_turbo();
+ int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
+ int perf_ctl_max = pstate_funcs.get_max();
+ int max_freq = perf_ctl_max * perf_ctl_scaling;
+ int scaling = INT_MAX;
+ int freq;
+
+ pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max);
+ pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+ pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
+
+ pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
+ pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
+
+#ifdef CONFIG_ACPI
+ if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) {
+ struct cppc_perf_caps caps;
+
+ if (intel_pstate_cppc_perf_caps(cpu, &caps)) {
+ if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) {
+ pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu);
+
+ /*
+ * If the CPPC nominal performance is valid, it
+ * can be assumed to correspond to cpu_khz.
+ */
+ if (caps.nominal_perf == perf_ctl_max_phys) {
+ intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
+ return;
+ }
+ scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf);
+ } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) {
+ pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu);
+
+ /*
+ * If the CPPC guaranteed performance is valid,
+ * it can be assumed to correspond to max_freq.
+ */
+ if (caps.guaranteed_perf == perf_ctl_max) {
+ intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
+ return;
+ }
+ scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf);
+ }
+ }
+ }
+#endif
+ /*
+ * If using the CPPC data to compute the HWP-to-frequency scaling factor
+ * doesn't work, use the HWP_CAP gauranteed perf for this purpose with
+ * the assumption that it corresponds to max_freq.
+ */
+ if (scaling > perf_ctl_scaling) {
+ pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu);
+
+ if (cpu->pstate.max_pstate == perf_ctl_max) {
+ intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
+ return;
+ }
+ scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate);
+ if (scaling > perf_ctl_scaling) {
+ /*
+ * This should not happen, because it would mean that
+ * the number of HWP perf levels was less than the
+ * number of P-states, so use the PERF_CTL scaling in
+ * that case.
+ */
+ pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu,
+ scaling);
+
+ intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
+ return;
+ }
+ }
+
+ /*
+ * If the product of the HWP performance scaling factor obtained above
+ * and the HWP_CAP highest performance is greater than the maximum turbo
+ * frequency corresponding to the pstate_funcs.get_turbo() return value,
+ * the scaling factor is too high, so recompute it so that the HWP_CAP
+ * highest performance corresponds to the maximum turbo frequency.
+ */
+ if (turbo_freq < cpu->pstate.turbo_pstate * scaling) {
+ pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling);
+
+ cpu->pstate.turbo_freq = turbo_freq;
+ scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
+ }
+
+ cpu->pstate.scaling = scaling;
+
+ pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+
+ cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
+ perf_ctl_scaling);
+
+ freq = perf_ctl_max_phys * perf_ctl_scaling;
+ cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling);
+
+ cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ /*
+ * Cast the min P-state value retrieved via pstate_funcs.get_min() to
+ * the effective range of HWP performance levels.
+ */
+ cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
+}
+
static inline void update_turbo_state(void)
{
u64 misc_en;
@@ -795,19 +946,22 @@ cpufreq_freq_attr_rw(energy_performance_preference);
static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
{
- struct cpudata *cpu;
- u64 cap;
- int ratio;
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ int ratio, freq;
- ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
+ ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
if (ratio <= 0) {
+ u64 cap;
+
rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
ratio = HWP_GUARANTEED_PERF(cap);
}
- cpu = all_cpu_data[policy->cpu];
+ freq = ratio * cpu->pstate.scaling;
+ if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling)
+ freq = rounddown(freq, cpu->pstate.perf_ctl_scaling);
- return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling);
+ return sprintf(buf, "%d\n", freq);
}
cpufreq_freq_attr_ro(base_frequency);
@@ -831,9 +985,20 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
+ int scaling = cpu->pstate.scaling;
+
__intel_pstate_get_hwp_cap(cpu);
- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
- cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling;
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
+ if (scaling != cpu->pstate.perf_ctl_scaling) {
+ int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+
+ cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq,
+ perf_ctl_scaling);
+ cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq,
+ perf_ctl_scaling);
+ }
}
static void intel_pstate_hwp_set(unsigned int cpu)
@@ -1365,8 +1530,6 @@ define_one_global_rw(energy_efficiency);
static struct attribute *intel_pstate_attributes[] = {
&status.attr,
&no_turbo.attr,
- &turbo_pct.attr,
- &num_pstates.attr,
NULL
};
@@ -1391,6 +1554,14 @@ static void __init intel_pstate_sysfs_expose_params(void)
if (WARN_ON(rc))
return;
+ if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
+ rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr);
+ WARN_ON(rc);
+
+ rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr);
+ WARN_ON(rc);
+ }
+
/*
* If per cpu limits are enforced there are no global limits, so
* return without creating max/min_perf_pct attributes
@@ -1417,6 +1588,11 @@ static void __init intel_pstate_sysfs_remove(void)
sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
+ if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
+ sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr);
+ sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr);
+ }
+
if (!per_cpu_limits) {
sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
@@ -1713,19 +1889,33 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
+ bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU);
+ int perf_ctl_max_phys = pstate_funcs.get_max_physical();
+ int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys :
+ pstate_funcs.get_scaling();
+
cpu->pstate.min_pstate = pstate_funcs.get_min();
- cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
- cpu->pstate.scaling = pstate_funcs.get_scaling();
+ cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
+ cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
if (hwp_active && !hwp_mode_bdw) {
__intel_pstate_get_hwp_cap(cpu);
+
+ if (hybrid_cpu)
+ intel_pstate_hybrid_hwp_calibrate(cpu);
+ else
+ cpu->pstate.scaling = perf_ctl_scaling;
} else {
+ cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
}
- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
- cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ if (cpu->pstate.scaling == perf_ctl_scaling) {
+ cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling;
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling;
+ }
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2087,6 +2277,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(ATOM_GOLDMONT, core_funcs),
X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
X86_MATCH(SKYLAKE_X, core_funcs),
+ X86_MATCH(COMETLAKE, core_funcs),
+ X86_MATCH(ICELAKE_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -2195,23 +2387,34 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
unsigned int policy_min,
unsigned int policy_max)
{
- int scaling = cpu->pstate.scaling;
+ int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int32_t max_policy_perf, min_policy_perf;
+ max_policy_perf = policy_max / perf_ctl_scaling;
+ if (policy_max == policy_min) {
+ min_policy_perf = max_policy_perf;
+ } else {
+ min_policy_perf = policy_min / perf_ctl_scaling;
+ min_policy_perf = clamp_t(int32_t, min_policy_perf,
+ 0, max_policy_perf);
+ }
+
/*
* HWP needs some special consideration, because HWP_REQUEST uses
* abstract values to represent performance rather than pure ratios.
*/
- if (hwp_active)
+ if (hwp_active) {
intel_pstate_get_hwp_cap(cpu);
- max_policy_perf = policy_max / scaling;
- if (policy_max == policy_min) {
- min_policy_perf = max_policy_perf;
- } else {
- min_policy_perf = policy_min / scaling;
- min_policy_perf = clamp_t(int32_t, min_policy_perf,
- 0, max_policy_perf);
+ if (cpu->pstate.scaling != perf_ctl_scaling) {
+ int scaling = cpu->pstate.scaling;
+ int freq;
+
+ freq = max_policy_perf * perf_ctl_scaling;
+ max_policy_perf = DIV_ROUND_UP(freq, scaling);
+ freq = min_policy_perf * perf_ctl_scaling;
+ min_policy_perf = DIV_ROUND_UP(freq, scaling);
+ }
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -2405,7 +2608,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu->min_perf_ratio = 0;
/* cpuinfo and default policy values */
- policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
+ policy->cpuinfo.min_freq = cpu->pstate.min_freq;
update_turbo_state();
global.turbo_disabled_mf = global.turbo_disabled;
policy->cpuinfo.max_freq = global.turbo_disabled ?
@@ -3135,6 +3338,8 @@ hwp_cpu_matched:
}
pr_info("HWP enabled\n");
+ } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
+ pr_warn("Problematic setup: Hybrid processor with disabled HWP\n");
}
return 0;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index d05e761d9572..afc59b292153 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -16,7 +16,6 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/err.h>
-#include <linux/sched.h> /* set_cpus_allowed() */
#include <linux/delay.h>
#include <linux/platform_device.h>
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 73a208559fe2..330c8d6cf93c 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -42,6 +42,7 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
default:
pr_err("error: cpuctl register has unexpected value %02x\n",
clockspeed_reg);
+ fallthrough;
case 0x01:
return 100000;
case 0x02:
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 0ac265d47ef0..1a251e635ebd 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -23,7 +23,6 @@
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/smp.h>
-#include <linux/sched.h> /* set_cpus_allowed() */
#include <linux/clk.h>
#include <linux/percpu.h>
#include <linux/sh_clk.h>
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c3aa8d6ccee3..2e5670446991 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -117,7 +117,7 @@ struct menu_device {
int interval_ptr;
};
-static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters)
+static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
{
int bucket = 0;
@@ -150,7 +150,7 @@ static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters)
* to be, the higher this multiplier, and thus the higher
* the barrier to go to an expensive C state.
*/
-static inline int performance_multiplier(unsigned long nr_iowaiters)
+static inline int performance_multiplier(unsigned int nr_iowaiters)
{
/* for IO wait tasks (per cpu!) we add 10x each */
return 1 + 10 * nr_iowaiters;
@@ -270,7 +270,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
unsigned int predicted_us;
u64 predicted_ns;
u64 interactivity_req;
- unsigned long nr_iowaiters;
+ unsigned int nr_iowaiters;
ktime_t delta, delta_tick;
int i, idx;
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index ac4bb27d69b0..7b91060e82f6 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -2,47 +2,103 @@
/*
* Timer events oriented CPU idle governor
*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
+
+/**
+ * DOC: teo-description
*
* The idea of this governor is based on the observation that on many systems
* timer events are two or more orders of magnitude more frequent than any
- * other interrupts, so they are likely to be the most significant source of CPU
+ * other interrupts, so they are likely to be the most significant cause of CPU
* wakeups from idle states. Moreover, information about what happened in the
* (relatively recent) past can be used to estimate whether or not the deepest
- * idle state with target residency within the time to the closest timer is
- * likely to be suitable for the upcoming idle time of the CPU and, if not, then
- * which of the shallower idle states to choose.
+ * idle state with target residency within the (known) time till the closest
+ * timer event, referred to as the sleep length, is likely to be suitable for
+ * the upcoming CPU idle period and, if not, then which of the shallower idle
+ * states to choose instead of it.
+ *
+ * Of course, non-timer wakeup sources are more important in some use cases
+ * which can be covered by taking a few most recent idle time intervals of the
+ * CPU into account. However, even in that context it is not necessary to
+ * consider idle duration values greater than the sleep length, because the
+ * closest timer will ultimately wake up the CPU anyway unless it is woken up
+ * earlier.
+ *
+ * Thus this governor estimates whether or not the prospective idle duration of
+ * a CPU is likely to be significantly shorter than the sleep length and selects
+ * an idle state for it accordingly.
+ *
+ * The computations carried out by this governor are based on using bins whose
+ * boundaries are aligned with the target residency parameter values of the CPU
+ * idle states provided by the %CPUIdle driver in the ascending order. That is,
+ * the first bin spans from 0 up to, but not including, the target residency of
+ * the second idle state (idle state 1), the second bin spans from the target
+ * residency of idle state 1 up to, but not including, the target residency of
+ * idle state 2, the third bin spans from the target residency of idle state 2
+ * up to, but not including, the target residency of idle state 3 and so on.
+ * The last bin spans from the target residency of the deepest idle state
+ * supplied by the driver to infinity.
+ *
+ * Two metrics called "hits" and "intercepts" are associated with each bin.
+ * They are updated every time before selecting an idle state for the given CPU
+ * in accordance with what happened last time.
+ *
+ * The "hits" metric reflects the relative frequency of situations in which the
+ * sleep length and the idle duration measured after CPU wakeup fall into the
+ * same bin (that is, the CPU appears to wake up "on time" relative to the sleep
+ * length). In turn, the "intercepts" metric reflects the relative frequency of
+ * situations in which the measured idle duration is so much shorter than the
+ * sleep length that the bin it falls into corresponds to an idle state
+ * shallower than the one whose bin is fallen into by the sleep length (these
+ * situations are referred to as "intercepts" below).
+ *
+ * In addition to the metrics described above, the governor counts recent
+ * intercepts (that is, intercepts that have occurred during the last
+ * %NR_RECENT invocations of it for the given CPU) for each bin.
*
- * Of course, non-timer wakeup sources are more important in some use cases and
- * they can be covered by taking a few most recent idle time intervals of the
- * CPU into account. However, even in that case it is not necessary to consider
- * idle duration values greater than the time till the closest timer, as the
- * patterns that they may belong to produce average values close enough to
- * the time till the closest timer (sleep length) anyway.
+ * In order to select an idle state for a CPU, the governor takes the following
+ * steps (modulo the possible latency constraint that must be taken into account
+ * too):
*
- * Thus this governor estimates whether or not the upcoming idle time of the CPU
- * is likely to be significantly shorter than the sleep length and selects an
- * idle state for it in accordance with that, as follows:
+ * 1. Find the deepest CPU idle state whose target residency does not exceed
+ * the current sleep length (the candidate idle state) and compute 3 sums as
+ * follows:
*
- * - Find an idle state on the basis of the sleep length and state statistics
- * collected over time:
+ * - The sum of the "hits" and "intercepts" metrics for the candidate state
+ * and all of the deeper idle states (it represents the cases in which the
+ * CPU was idle long enough to avoid being intercepted if the sleep length
+ * had been equal to the current one).
*
- * o Find the deepest idle state whose target residency is less than or equal
- * to the sleep length.
+ * - The sum of the "intercepts" metrics for all of the idle states shallower
+ * than the candidate one (it represents the cases in which the CPU was not
+ * idle long enough to avoid being intercepted if the sleep length had been
+ * equal to the current one).
*
- * o Select it if it matched both the sleep length and the observed idle
- * duration in the past more often than it matched the sleep length alone
- * (i.e. the observed idle duration was significantly shorter than the sleep
- * length matched by it).
+ * - The sum of the numbers of recent intercepts for all of the idle states
+ * shallower than the candidate one.
*
- * o Otherwise, select the shallower state with the greatest matched "early"
- * wakeups metric.
+ * 2. If the second sum is greater than the first one or the third sum is
+ * greater than %NR_RECENT / 2, the CPU is likely to wake up early, so look
+ * for an alternative idle state to select.
*
- * - If the majority of the most recent idle duration values are below the
- * target residency of the idle state selected so far, use those values to
- * compute the new expected idle duration and find an idle state matching it
- * (which has to be shallower than the one selected so far).
+ * - Traverse the idle states shallower than the candidate one in the
+ * descending order.
+ *
+ * - For each of them compute the sum of the "intercepts" metrics and the sum
+ * of the numbers of recent intercepts over all of the idle states between
+ * it and the candidate one (including the former and excluding the
+ * latter).
+ *
+ * - If each of these sums that needs to be taken into account (because the
+ * check related to it has indicated that the CPU is likely to wake up
+ * early) is greater than a half of the corresponding sum computed in step
+ * 1 (which means that the target residency of the state in question had
+ * not exceeded the idle duration in over a half of the relevant cases),
+ * select the given idle state instead of the candidate one.
+ *
+ * 3. By default, select the candidate state.
*/
#include <linux/cpuidle.h>
@@ -60,65 +116,51 @@
/*
* Number of the most recent idle duration values to take into consideration for
- * the detection of wakeup patterns.
+ * the detection of recent early wakeup patterns.
*/
-#define INTERVALS 8
+#define NR_RECENT 9
/**
- * struct teo_idle_state - Idle state data used by the TEO cpuidle governor.
- * @early_hits: "Early" CPU wakeups "matching" this state.
- * @hits: "On time" CPU wakeups "matching" this state.
- * @misses: CPU wakeups "missing" this state.
- *
- * A CPU wakeup is "matched" by a given idle state if the idle duration measured
- * after the wakeup is between the target residency of that state and the target
- * residency of the next one (or if this is the deepest available idle state, it
- * "matches" a CPU wakeup when the measured idle duration is at least equal to
- * its target residency).
- *
- * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if
- * it occurs significantly earlier than the closest expected timer event (that
- * is, early enough to match an idle state shallower than the one matching the
- * time till the closest timer event). Otherwise, the wakeup is "on time", or
- * it is a "hit".
- *
- * A "miss" occurs when the given state doesn't match the wakeup, but it matches
- * the time till the closest timer event used for idle state selection.
+ * struct teo_bin - Metrics used by the TEO cpuidle governor.
+ * @intercepts: The "intercepts" metric.
+ * @hits: The "hits" metric.
+ * @recent: The number of recent "intercepts".
*/
-struct teo_idle_state {
- unsigned int early_hits;
+struct teo_bin {
+ unsigned int intercepts;
unsigned int hits;
- unsigned int misses;
+ unsigned int recent;
};
/**
* struct teo_cpu - CPU data used by the TEO cpuidle governor.
* @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
- * @states: Idle states data corresponding to this CPU.
- * @interval_idx: Index of the most recent saved idle interval.
- * @intervals: Saved idle duration values.
+ * @state_bins: Idle state data bins for this CPU.
+ * @total: Grand total of the "intercepts" and "hits" mertics for all bins.
+ * @next_recent_idx: Index of the next @recent_idx entry to update.
+ * @recent_idx: Indices of bins corresponding to recent "intercepts".
*/
struct teo_cpu {
s64 time_span_ns;
s64 sleep_length_ns;
- struct teo_idle_state states[CPUIDLE_STATE_MAX];
- int interval_idx;
- u64 intervals[INTERVALS];
+ struct teo_bin state_bins[CPUIDLE_STATE_MAX];
+ unsigned int total;
+ int next_recent_idx;
+ int recent_idx[NR_RECENT];
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
/**
- * teo_update - Update CPU data after wakeup.
+ * teo_update - Update CPU metrics after wakeup.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
*/
static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- int i, idx_hit = 0, idx_timer = 0;
- unsigned int hits, misses;
+ int i, idx_timer = 0, idx_duration = 0;
u64 measured_ns;
if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
@@ -151,53 +193,52 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
measured_ns /= 2;
}
+ cpu_data->total = 0;
+
/*
- * Decay the "early hits" metric for all of the states and find the
- * states matching the sleep length and the measured idle duration.
+ * Decay the "hits" and "intercepts" metrics for all of the bins and
+ * find the bins that the sleep length and the measured idle duration
+ * fall into.
*/
for (i = 0; i < drv->state_count; i++) {
- unsigned int early_hits = cpu_data->states[i].early_hits;
+ s64 target_residency_ns = drv->states[i].target_residency_ns;
+ struct teo_bin *bin = &cpu_data->state_bins[i];
- cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
+ bin->hits -= bin->hits >> DECAY_SHIFT;
+ bin->intercepts -= bin->intercepts >> DECAY_SHIFT;
- if (drv->states[i].target_residency_ns <= cpu_data->sleep_length_ns) {
+ cpu_data->total += bin->hits + bin->intercepts;
+
+ if (target_residency_ns <= cpu_data->sleep_length_ns) {
idx_timer = i;
- if (drv->states[i].target_residency_ns <= measured_ns)
- idx_hit = i;
+ if (target_residency_ns <= measured_ns)
+ idx_duration = i;
}
}
- /*
- * Update the "hits" and "misses" data for the state matching the sleep
- * length. If it matches the measured idle duration too, this is a hit,
- * so increase the "hits" metric for it then. Otherwise, this is a
- * miss, so increase the "misses" metric for it. In the latter case
- * also increase the "early hits" metric for the state that actually
- * matches the measured idle duration.
- */
- hits = cpu_data->states[idx_timer].hits;
- hits -= hits >> DECAY_SHIFT;
+ i = cpu_data->next_recent_idx++;
+ if (cpu_data->next_recent_idx >= NR_RECENT)
+ cpu_data->next_recent_idx = 0;
- misses = cpu_data->states[idx_timer].misses;
- misses -= misses >> DECAY_SHIFT;
+ if (cpu_data->recent_idx[i] >= 0)
+ cpu_data->state_bins[cpu_data->recent_idx[i]].recent--;
- if (idx_timer == idx_hit) {
- hits += PULSE;
+ /*
+ * If the measured idle duration falls into the same bin as the sleep
+ * length, this is a "hit", so update the "hits" metric for that bin.
+ * Otherwise, update the "intercepts" metric for the bin fallen into by
+ * the measured idle duration.
+ */
+ if (idx_timer == idx_duration) {
+ cpu_data->state_bins[idx_timer].hits += PULSE;
+ cpu_data->recent_idx[i] = -1;
} else {
- misses += PULSE;
- cpu_data->states[idx_hit].early_hits += PULSE;
+ cpu_data->state_bins[idx_duration].intercepts += PULSE;
+ cpu_data->state_bins[idx_duration].recent++;
+ cpu_data->recent_idx[i] = idx_duration;
}
- cpu_data->states[idx_timer].misses = misses;
- cpu_data->states[idx_timer].hits = hits;
-
- /*
- * Save idle duration values corresponding to non-timer wakeups for
- * pattern detection.
- */
- cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
- if (cpu_data->interval_idx >= INTERVALS)
- cpu_data->interval_idx = 0;
+ cpu_data->total += PULSE;
}
static bool teo_time_ok(u64 interval_ns)
@@ -205,6 +246,12 @@ static bool teo_time_ok(u64 interval_ns)
return !tick_nohz_tick_stopped() || interval_ns >= TICK_NSEC;
}
+static s64 teo_middle_of_bin(int idx, struct cpuidle_driver *drv)
+{
+ return (drv->states[idx].target_residency_ns +
+ drv->states[idx+1].target_residency_ns) / 2;
+}
+
/**
* teo_find_shallower_state - Find shallower idle state matching given duration.
* @drv: cpuidle driver containing state data.
@@ -240,10 +287,18 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
- int max_early_idx, prev_max_early_idx, constraint_idx, idx0, idx, i;
- unsigned int hits, misses, early_hits;
+ unsigned int idx_intercept_sum = 0;
+ unsigned int intercept_sum = 0;
+ unsigned int idx_recent_sum = 0;
+ unsigned int recent_sum = 0;
+ unsigned int idx_hit_sum = 0;
+ unsigned int hit_sum = 0;
+ int constraint_idx = 0;
+ int idx0 = 0, idx = -1;
+ bool alt_intercepts, alt_recent;
ktime_t delta_tick;
s64 duration_ns;
+ int i;
if (dev->last_state_idx >= 0) {
teo_update(drv, dev);
@@ -255,171 +310,136 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
duration_ns = tick_nohz_get_sleep_length(&delta_tick);
cpu_data->sleep_length_ns = duration_ns;
- hits = 0;
- misses = 0;
- early_hits = 0;
- max_early_idx = -1;
- prev_max_early_idx = -1;
- constraint_idx = drv->state_count;
- idx = -1;
- idx0 = idx;
+ /* Check if there is any choice in the first place. */
+ if (drv->state_count < 2) {
+ idx = 0;
+ goto end;
+ }
+ if (!dev->states_usage[0].disable) {
+ idx = 0;
+ if (drv->states[1].target_residency_ns > duration_ns)
+ goto end;
+ }
- for (i = 0; i < drv->state_count; i++) {
+ /*
+ * Find the deepest idle state whose target residency does not exceed
+ * the current sleep length and the deepest idle state not deeper than
+ * the former whose exit latency does not exceed the current latency
+ * constraint. Compute the sums of metrics for early wakeup pattern
+ * detection.
+ */
+ for (i = 1; i < drv->state_count; i++) {
+ struct teo_bin *prev_bin = &cpu_data->state_bins[i-1];
struct cpuidle_state *s = &drv->states[i];
- if (dev->states_usage[i].disable) {
- /*
- * Ignore disabled states with target residencies beyond
- * the anticipated idle duration.
- */
- if (s->target_residency_ns > duration_ns)
- continue;
-
- /*
- * This state is disabled, so the range of idle duration
- * values corresponding to it is covered by the current
- * candidate state, but still the "hits" and "misses"
- * metrics of the disabled state need to be used to
- * decide whether or not the state covering the range in
- * question is good enough.
- */
- hits = cpu_data->states[i].hits;
- misses = cpu_data->states[i].misses;
-
- if (early_hits >= cpu_data->states[i].early_hits ||
- idx < 0)
- continue;
-
- /*
- * If the current candidate state has been the one with
- * the maximum "early hits" metric so far, the "early
- * hits" metric of the disabled state replaces the
- * current "early hits" count to avoid selecting a
- * deeper state with lower "early hits" metric.
- */
- if (max_early_idx == idx) {
- early_hits = cpu_data->states[i].early_hits;
- continue;
- }
-
- /*
- * The current candidate state is closer to the disabled
- * one than the current maximum "early hits" state, so
- * replace the latter with it, but in case the maximum
- * "early hits" state index has not been set so far,
- * check if the current candidate state is not too
- * shallow for that role.
- */
- if (teo_time_ok(drv->states[idx].target_residency_ns)) {
- prev_max_early_idx = max_early_idx;
- early_hits = cpu_data->states[i].early_hits;
- max_early_idx = idx;
- }
+ /*
+ * Update the sums of idle state mertics for all of the states
+ * shallower than the current one.
+ */
+ intercept_sum += prev_bin->intercepts;
+ hit_sum += prev_bin->hits;
+ recent_sum += prev_bin->recent;
+ if (dev->states_usage[i].disable)
continue;
- }
if (idx < 0) {
idx = i; /* first enabled state */
- hits = cpu_data->states[i].hits;
- misses = cpu_data->states[i].misses;
idx0 = i;
}
if (s->target_residency_ns > duration_ns)
break;
- if (s->exit_latency_ns > latency_req && constraint_idx > i)
+ idx = i;
+
+ if (s->exit_latency_ns <= latency_req)
constraint_idx = i;
- idx = i;
- hits = cpu_data->states[i].hits;
- misses = cpu_data->states[i].misses;
-
- if (early_hits < cpu_data->states[i].early_hits &&
- teo_time_ok(drv->states[i].target_residency_ns)) {
- prev_max_early_idx = max_early_idx;
- early_hits = cpu_data->states[i].early_hits;
- max_early_idx = i;
- }
+ idx_intercept_sum = intercept_sum;
+ idx_hit_sum = hit_sum;
+ idx_recent_sum = recent_sum;
}
- /*
- * If the "hits" metric of the idle state matching the sleep length is
- * greater than its "misses" metric, that is the one to use. Otherwise,
- * it is more likely that one of the shallower states will match the
- * idle duration observed after wakeup, so take the one with the maximum
- * "early hits" metric, but if that cannot be determined, just use the
- * state selected so far.
- */
- if (hits <= misses) {
- /*
- * The current candidate state is not suitable, so take the one
- * whose "early hits" metric is the maximum for the range of
- * shallower states.
- */
- if (idx == max_early_idx)
- max_early_idx = prev_max_early_idx;
-
- if (max_early_idx >= 0) {
- idx = max_early_idx;
- duration_ns = drv->states[idx].target_residency_ns;
- }
+ /* Avoid unnecessary overhead. */
+ if (idx < 0) {
+ idx = 0; /* No states enabled, must use 0. */
+ goto end;
+ } else if (idx == idx0) {
+ goto end;
}
/*
- * If there is a latency constraint, it may be necessary to use a
- * shallower idle state than the one selected so far.
+ * If the sum of the intercepts metric for all of the idle states
+ * shallower than the current candidate one (idx) is greater than the
+ * sum of the intercepts and hits metrics for the candidate state and
+ * all of the deeper states, or the sum of the numbers of recent
+ * intercepts over all of the states shallower than the candidate one
+ * is greater than a half of the number of recent events taken into
+ * account, the CPU is likely to wake up early, so find an alternative
+ * idle state to select.
*/
- if (constraint_idx < idx)
- idx = constraint_idx;
-
- if (idx < 0) {
- idx = 0; /* No states enabled. Must use 0. */
- } else if (idx > idx0) {
- unsigned int count = 0;
- u64 sum = 0;
+ alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum;
+ alt_recent = idx_recent_sum > NR_RECENT / 2;
+ if (alt_recent || alt_intercepts) {
+ s64 last_enabled_span_ns = duration_ns;
+ int last_enabled_idx = idx;
/*
- * The target residencies of at least two different enabled idle
- * states are less than or equal to the current expected idle
- * duration. Try to refine the selection using the most recent
- * measured idle duration values.
+ * Look for the deepest idle state whose target residency had
+ * not exceeded the idle duration in over a half of the relevant
+ * cases (both with respect to intercepts overall and with
+ * respect to the recent intercepts only) in the past.
*
- * Count and sum the most recent idle duration values less than
- * the current expected idle duration value.
+ * Take the possible latency constraint and duration limitation
+ * present if the tick has been stopped already into account.
*/
- for (i = 0; i < INTERVALS; i++) {
- u64 val = cpu_data->intervals[i];
+ intercept_sum = 0;
+ recent_sum = 0;
+
+ for (i = idx - 1; i >= idx0; i--) {
+ struct teo_bin *bin = &cpu_data->state_bins[i];
+ s64 span_ns;
- if (val >= duration_ns)
+ intercept_sum += bin->intercepts;
+ recent_sum += bin->recent;
+
+ if (dev->states_usage[i].disable)
continue;
- count++;
- sum += val;
- }
+ span_ns = teo_middle_of_bin(i, drv);
+ if (!teo_time_ok(span_ns)) {
+ /*
+ * The current state is too shallow, so select
+ * the first enabled deeper state.
+ */
+ duration_ns = last_enabled_span_ns;
+ idx = last_enabled_idx;
+ break;
+ }
- /*
- * Give up unless the majority of the most recent idle duration
- * values are in the interesting range.
- */
- if (count > INTERVALS / 2) {
- u64 avg_ns = div64_u64(sum, count);
-
- /*
- * Avoid spending too much time in an idle state that
- * would be too shallow.
- */
- if (teo_time_ok(avg_ns)) {
- duration_ns = avg_ns;
- if (drv->states[idx].target_residency_ns > avg_ns)
- idx = teo_find_shallower_state(drv, dev,
- idx, avg_ns);
+ if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
+ (!alt_intercepts ||
+ 2 * intercept_sum > idx_intercept_sum)) {
+ idx = i;
+ duration_ns = span_ns;
+ break;
}
+
+ last_enabled_span_ns = span_ns;
+ last_enabled_idx = i;
}
}
/*
+ * If there is a latency constraint, it may be necessary to select an
+ * idle state shallower than the current candidate one.
+ */
+ if (idx > constraint_idx)
+ idx = constraint_idx;
+
+end:
+ /*
* Don't stop the tick if the selected state is a polling one or if the
* expected idle duration is shorter than the tick period length.
*/
@@ -478,8 +498,8 @@ static int teo_enable_device(struct cpuidle_driver *drv,
memset(cpu_data, 0, sizeof(*cpu_data));
- for (i = 0; i < INTERVALS; i++)
- cpu_data->intervals[i] = U64_MAX;
+ for (i = 0; i < NR_RECENT; i++)
+ cpu_data->recent_idx[i] = -1;
return 0;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9a4c275a1335..ebcec460c045 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -266,6 +266,27 @@ config CRYPTO_DEV_NIAGARA2
Group, which can perform encryption, decryption, hashing,
checksumming, and raw copies.
+config CRYPTO_DEV_SL3516
+ tristate "Stormlink SL3516 crypto offloader"
+ depends on HAS_IOMEM
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_AES
+ select HW_RANDOM
+ depends on PM
+ help
+ This option allows you to have support for SL3516 crypto offloader.
+
+config CRYPTO_DEV_SL3516_DEBUG
+ bool "Enable SL3516 stats"
+ depends on CRYPTO_DEV_SL3516
+ depends on DEBUG_FS
+ help
+ Say y to enable SL3516 debug stats.
+ This will create /sys/kernel/debug/sl3516/stats for displaying
+ the number of requests per algorithm and other internal stats.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_LIB_DES
@@ -325,6 +346,11 @@ config CRYPTO_DEV_TALITOS2
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CTR
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -627,6 +653,12 @@ config CRYPTO_DEV_QCE_SHA
select CRYPTO_SHA1
select CRYPTO_SHA256
+config CRYPTO_DEV_QCE_AEAD
+ bool
+ depends on CRYPTO_DEV_QCE
+ select CRYPTO_AUTHENC
+ select CRYPTO_LIB_DES
+
choice
prompt "Algorithms enabled for QCE acceleration"
default CRYPTO_DEV_QCE_ENABLE_ALL
@@ -647,6 +679,7 @@ choice
bool "All supported algorithms"
select CRYPTO_DEV_QCE_SKCIPHER
select CRYPTO_DEV_QCE_SHA
+ select CRYPTO_DEV_QCE_AEAD
help
Enable all supported algorithms:
- AES (CBC, CTR, ECB, XTS)
@@ -672,6 +705,14 @@ choice
- SHA1, HMAC-SHA1
- SHA256, HMAC-SHA256
+ config CRYPTO_DEV_QCE_ENABLE_AEAD
+ bool "AEAD algorithms only"
+ select CRYPTO_DEV_QCE_AEAD
+ help
+ Enable AEAD algorithms only:
+ - authenc()
+ - ccm(aes)
+ - rfc4309(ccm(aes))
endchoice
config CRYPTO_DEV_QCE_SW_MAX_LEN
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fa22cb19e242..1fe5120eb966 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 06ee42e8a245..8c32d0eb8fcf 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -401,7 +401,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
}
-/**
+/*
* Ensure all cores are disengaged from all groups by
* calling cpt_disable_all_cores() before calling this
* function.
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 4fe7898c8561..153004bdfb5c 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -9,8 +9,8 @@
/**
* get_free_pending_entry - get free entry from pending queue
- * @param pqinfo: pending_qinfo structure
- * @param qno: queue number
+ * @q: pending queue
+ * @qlen: queue length
*/
static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
int qlen)
@@ -244,11 +244,7 @@ static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
memcpy(ent, (void *)cmd, qinfo->cmd_size);
if (++queue->idx >= queue->qhead->size / 64) {
- struct hlist_node *node;
-
- hlist_for_each(node, &queue->chead) {
- chunk = hlist_entry(node, struct command_chunk,
- nextchunk);
+ hlist_for_each_entry(chunk, &queue->chead, nextchunk) {
if (chunk == queue->qhead) {
continue;
} else {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index c288c4b51783..f19e520da6d0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -307,6 +307,10 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
* Entry 192: NPS_CORE_INT_ACTIVE
*/
nr_vecs = pci_msix_vec_count(pdev);
+ if (nr_vecs < 0) {
+ dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
+ return nr_vecs;
+ }
/* Enable MSI-X */
ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index d385daf2c71c..96bc7b5c6532 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -35,7 +35,7 @@ static LIST_HEAD(ndevlist);
static DEFINE_MUTEX(devlist_lock);
static unsigned int num_devices;
-/**
+/*
* nitrox_pci_tbl - PCI Device ID Table
*/
static const struct pci_device_id nitrox_pci_tbl[] = {
@@ -65,7 +65,7 @@ struct ucode {
u64 code[];
};
-/**
+/*
* write_to_ucd_unit - Write Firmware to NITROX UCD unit
*/
static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
@@ -424,8 +424,7 @@ static int nitrox_probe(struct pci_dev *pdev,
err = nitrox_device_flr(pdev);
if (err) {
dev_err(&pdev->dev, "FLR failed\n");
- pci_disable_device(pdev);
- return err;
+ goto flr_fail;
}
if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
@@ -434,16 +433,13 @@ static int nitrox_probe(struct pci_dev *pdev,
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "DMA configuration failed\n");
- pci_disable_device(pdev);
- return err;
+ goto flr_fail;
}
}
err = pci_request_mem_regions(pdev, nitrox_driver_name);
- if (err) {
- pci_disable_device(pdev);
- return err;
- }
+ if (err)
+ goto flr_fail;
pci_set_master(pdev);
ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
@@ -479,7 +475,7 @@ static int nitrox_probe(struct pci_dev *pdev,
err = nitrox_pf_sw_init(ndev);
if (err)
- goto ioremap_err;
+ goto pf_sw_fail;
err = nitrox_pf_hw_init(ndev);
if (err)
@@ -509,12 +505,15 @@ crypto_fail:
smp_mb__after_atomic();
pf_hw_fail:
nitrox_pf_sw_cleanup(ndev);
+pf_sw_fail:
+ iounmap(ndev->bar_addr);
ioremap_err:
nitrox_remove_from_devlist(ndev);
kfree(ndev);
pci_set_drvdata(pdev, NULL);
ndev_fail:
pci_release_mem_regions(pdev);
+flr_fail:
pci_disable_device(pdev);
return err;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index c1af9d4fca6e..2e9c0d214363 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -8,7 +8,7 @@
#define RING_TO_VFNO(_x, _y) ((_x) / (_y))
-/**
+/*
* mbx_msg_type - Mailbox message types
*/
enum mbx_msg_type {
@@ -18,7 +18,7 @@ enum mbx_msg_type {
MBX_MSG_TYPE_NACK,
};
-/**
+/*
* mbx_msg_opcode - Mailbox message opcodes
*/
enum mbx_msg_opcode {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index df95ba26b414..55c18da4a500 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -19,7 +19,7 @@
#define REQ_BACKLOG 2
#define REQ_POSTED 3
-/**
+/*
* Response codes from SE microcode
* 0x00 - Success
* Completion with no error
@@ -159,7 +159,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
struct se_crypto_request *req)
{
struct device *dev = DEV(sr->ndev);
- struct scatterlist *sg = req->src;
+ struct scatterlist *sg;
int i, nents, ret = 0;
nents = dma_map_sg(dev, req->src, sg_nents(req->src),
@@ -279,6 +279,7 @@ static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
/**
* post_se_instr - Post SE instruction to Packet Input ring
* @sr: Request structure
+ * @cmdq: Command queue structure
*
* Returns 0 if successful or a negative error code,
* if no space in ring.
@@ -369,9 +370,11 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
}
/**
- * nitrox_se_request - Send request to SE core
+ * nitrox_process_se_request - Send request to SE core
* @ndev: NITROX device
* @req: Crypto request
+ * @callback: Completion callback
+ * @cb_arg: Completion callback arguments
*
* Returns 0 on success, or a negative error code.
*/
@@ -526,9 +529,8 @@ static bool sr_completed(struct nitrox_softreq *sr)
}
/**
- * process_request_list - process completed requests
- * @ndev: N5 device
- * @qno: queue to operate
+ * process_response_list - process completed requests
+ * @cmdq: Command queue structure
*
* Returns the number of responses processed.
*/
@@ -578,7 +580,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
}
}
-/**
+/*
* pkt_slc_resp_tasklet - post processing of SE responses
*/
void pkt_slc_resp_tasklet(unsigned long data)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index a553ac65f324..248b4fff1c72 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -20,7 +20,7 @@ struct nitrox_cipher {
enum flexi_cipher value;
};
-/**
+/*
* supported cipher list
*/
static const struct nitrox_cipher flexi_cipher_table[] = {
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 6777582aa1ce..9ce4b68e9c48 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -470,7 +470,7 @@ int ccp_cmd_queue_thread(void *data)
/**
* ccp_alloc_struct - allocate and initialize the ccp_device struct
*
- * @dev: device struct of the CCP
+ * @sp: sp_device struct of the CCP
*/
struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
{
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 0770a83bf1a5..d718db224be4 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -307,8 +307,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
spin_lock_irqsave(&chan->lock, flags);
cookie = dma_cookie_assign(tx_desc);
- list_del(&desc->entry);
- list_add_tail(&desc->entry, &chan->pending);
+ list_move_tail(&desc->entry, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 3506b2050fb8..91808402e0bf 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -43,6 +43,10 @@ static int psp_probe_timeout = 5;
module_param(psp_probe_timeout, int, 0644);
MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
+MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
+MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
+
static bool psp_dead;
static int psp_timeout;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f468594ef8af..6fb6ba35f89d 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -222,7 +222,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
ret);
- goto e_err;
+ goto free_irqs;
}
}
@@ -230,10 +230,12 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = sp_init(sp);
if (ret)
- goto e_err;
+ goto free_irqs;
return 0;
+free_irqs:
+ sp_free_irqs(sp);
e_err:
dev_notice(dev, "initialization failed\n");
return ret;
diff --git a/drivers/crypto/gemini/Makefile b/drivers/crypto/gemini/Makefile
new file mode 100644
index 000000000000..c73c8b69260d
--- /dev/null
+++ b/drivers/crypto/gemini/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SL3516) += sl3516-ce.o
+sl3516-ce-y += sl3516-ce-core.o sl3516-ce-cipher.o sl3516-ce-rng.o
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
new file mode 100644
index 000000000000..b41c2f5fc495
--- /dev/null
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sl3516-ce-cipher.c - hardware cryptographic offloader for Stormlink SL3516 SoC
+ *
+ * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
+ *
+ * This file adds support for AES cipher with 128,192,256 bits keysize in
+ * ECB mode.
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include "sl3516-ce.h"
+
+/* sl3516_ce_need_fallback - check if a request can be handled by the CE */
+static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_dev *ce = op->ce;
+ struct scatterlist *in_sg = areq->src;
+ struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *sg;
+
+ if (areq->cryptlen == 0 || areq->cryptlen % 16) {
+ ce->fallback_mod16++;
+ return true;
+ }
+
+ /*
+ * check if we have enough descriptors for TX
+ * Note: TX need one control desc for each SG
+ */
+ if (sg_nents(areq->src) > MAXDESC / 2) {
+ ce->fallback_sg_count_tx++;
+ return true;
+ }
+ /* check if we have enough descriptors for RX */
+ if (sg_nents(areq->dst) > MAXDESC) {
+ ce->fallback_sg_count_rx++;
+ return true;
+ }
+
+ sg = areq->src;
+ while (sg) {
+ if ((sg->length % 16) != 0) {
+ ce->fallback_mod16++;
+ return true;
+ }
+ if ((sg_dma_len(sg) % 16) != 0) {
+ ce->fallback_mod16++;
+ return true;
+ }
+ if (!IS_ALIGNED(sg->offset, 16)) {
+ ce->fallback_align16++;
+ return true;
+ }
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg) {
+ if ((sg->length % 16) != 0) {
+ ce->fallback_mod16++;
+ return true;
+ }
+ if ((sg_dma_len(sg) % 16) != 0) {
+ ce->fallback_mod16++;
+ return true;
+ }
+ if (!IS_ALIGNED(sg->offset, 16)) {
+ ce->fallback_align16++;
+ return true;
+ }
+ sg = sg_next(sg);
+ }
+
+ /* need same numbers of SG (with same length) for source and destination */
+ in_sg = areq->src;
+ out_sg = areq->dst;
+ while (in_sg && out_sg) {
+ if (in_sg->length != out_sg->length) {
+ ce->fallback_not_same_len++;
+ return true;
+ }
+ in_sg = sg_next(in_sg);
+ out_sg = sg_next(out_sg);
+ }
+ if (in_sg || out_sg)
+ return true;
+
+ return false;
+}
+
+static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sl3516_ce_alg_template *algt;
+ int err;
+
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ algt->stat_fb++;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir == CE_DECRYPTION)
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ return err;
+}
+
+static int sl3516_ce_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_dev *ce = op->ce;
+ struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sl3516_ce_alg_template *algt;
+ struct scatterlist *sg;
+ unsigned int todo, len;
+ struct pkt_control_ecb *ecb;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+ int i;
+
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+
+ dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+ algt->stat_req++;
+
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > MAXDESC) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->src;
+ while (i < nr_sgs && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgs_next;
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_src[i].len = todo;
+ dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+sgs_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->dst;
+ while (i < nr_sgd && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgd_next;
+ rctx->t_dst[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_dst[i].len = todo;
+ dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+
+sgd_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ switch (algt->mode) {
+ case ECB_AES:
+ rctx->pctrllen = sizeof(struct pkt_control_ecb);
+ ecb = (struct pkt_control_ecb *)ce->pctrl;
+
+ rctx->tqflag = TQ0_TYPE_CTRL;
+ rctx->tqflag |= TQ1_CIPHER;
+ ecb->control.op_mode = rctx->op_dir;
+ ecb->control.cipher_algorithm = ECB_AES;
+ ecb->cipher.header_len = 0;
+ ecb->cipher.algorithm_len = areq->cryptlen;
+ cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4);
+ rctx->h = &ecb->cipher;
+
+ rctx->tqflag |= TQ4_KEY0;
+ rctx->tqflag |= TQ5_KEY4;
+ rctx->tqflag |= TQ6_KEY6;
+ ecb->control.aesnk = op->keylen / 4;
+ break;
+ }
+
+ rctx->nr_sgs = nr_sgs;
+ rctx->nr_sgd = nr_sgd;
+ err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm));
+
+theend_sgs:
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ }
+
+theend:
+
+ return err;
+}
+
+static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sl3516_ce_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sl3516_ce_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+
+ memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
+ rctx->op_dir = CE_DECRYPTION;
+
+ if (sl3516_ce_need_fallback(areq))
+ return sl3516_ce_cipher_fallback(areq);
+
+ engine = op->ce->engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sl3516_ce_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+
+ memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
+ rctx->op_dir = CE_ENCRYPTION;
+
+ if (sl3516_ce_need_fallback(areq))
+ return sl3516_ce_cipher_fallback(areq);
+
+ engine = op->ce->engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sl3516_ce_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+ int err;
+
+ memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ op->ce = algt->ce;
+
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm);
+
+ dev_info(op->ce->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&sktfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
+
+ op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ err = pm_runtime_get_sync(op->ce->dev);
+ if (err < 0)
+ goto error_pm;
+
+ return 0;
+error_pm:
+ pm_runtime_put_noidle(op->ce->dev);
+ crypto_free_skcipher(op->fallback_tfm);
+ return err;
+}
+
+void sl3516_ce_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ kfree_sensitive(op->key);
+ crypto_free_skcipher(op->fallback_tfm);
+ pm_runtime_put_sync_suspend(op->ce->dev);
+}
+
+int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_dev *ce = op->ce;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
+ return -EINVAL;
+ }
+ kfree_sensitive(op->key);
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
new file mode 100644
index 000000000000..da6cd529a6c0
--- /dev/null
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sl3516-ce-core.c - hardware cryptographic offloader for Stormlink SL3516 SoC
+ *
+ * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * Core file which registers crypto algorithms supported by the CryptoEngine
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sl3516-ce.h"
+
+static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce)
+{
+ const size_t sz = sizeof(struct descriptor) * MAXDESC;
+ int i;
+
+ ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL);
+ if (!ce->tx)
+ return -ENOMEM;
+ ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL);
+ if (!ce->rx)
+ goto err_rx;
+
+ for (i = 0; i < MAXDESC; i++) {
+ ce->tx[i].frame_ctrl.bits.own = CE_CPU;
+ ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor);
+ }
+ ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx;
+
+ for (i = 0; i < MAXDESC; i++) {
+ ce->rx[i].frame_ctrl.bits.own = CE_CPU;
+ ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor);
+ }
+ ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx;
+
+ ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb),
+ &ce->dctrl, GFP_KERNEL);
+ if (!ce->pctrl)
+ goto err_pctrl;
+
+ return 0;
+err_pctrl:
+ dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
+err_rx:
+ dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
+ return -ENOMEM;
+}
+
+static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce)
+{
+ const size_t sz = sizeof(struct descriptor) * MAXDESC;
+
+ dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
+ dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
+ dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl,
+ ce->dctrl);
+}
+
+static void start_dma_tx(struct sl3516_ce_dev *ce)
+{
+ u32 v;
+
+ v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \
+ TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK;
+
+ writel(v, ce->base + IPSEC_TXDMA_CTRL);
+}
+
+static void start_dma_rx(struct sl3516_ce_dev *ce)
+{
+ u32 v;
+
+ v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \
+ RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \
+ RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \
+ RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF;
+
+ writel(v, ce->base + IPSEC_RXDMA_CTRL);
+}
+
+static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce)
+{
+ struct descriptor *dd;
+
+ dd = &ce->tx[ce->ctx];
+ ce->ctx++;
+ if (ce->ctx >= MAXDESC)
+ ce->ctx = 0;
+ return dd;
+}
+
+static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce)
+{
+ struct descriptor *rdd;
+
+ rdd = &ce->rx[ce->crx];
+ ce->crx++;
+ if (ce->crx >= MAXDESC)
+ ce->crx = 0;
+ return rdd;
+}
+
+int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx,
+ const char *name)
+{
+ struct descriptor *dd, *rdd = NULL;
+ u32 v;
+ int i, err = 0;
+
+ ce->stat_req++;
+
+ reinit_completion(&ce->complete);
+ ce->status = 0;
+
+ for (i = 0; i < rctx->nr_sgd; i++) {
+ dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__,
+ i, rctx->nr_sgd, rctx->t_dst[i].len);
+ rdd = get_desc_rx(ce);
+ rdd->buf_adr = rctx->t_dst[i].addr;
+ rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len;
+ rdd->frame_ctrl.bits.own = CE_DMA;
+ }
+ rdd->next_desc.bits.eofie = 1;
+
+ for (i = 0; i < rctx->nr_sgs; i++) {
+ dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__,
+ i, rctx->nr_sgs, rctx->t_src[i].len);
+ rctx->h->algorithm_len = rctx->t_src[i].len;
+
+ dd = get_desc_tx(ce);
+ dd->frame_ctrl.raw = 0;
+ dd->flag_status.raw = 0;
+ dd->frame_ctrl.bits.buffer_size = rctx->pctrllen;
+ dd->buf_adr = ce->dctrl;
+ dd->flag_status.tx_flag.tqflag = rctx->tqflag;
+ dd->next_desc.bits.eofie = 0;
+ dd->next_desc.bits.dec = 0;
+ dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
+ dd->frame_ctrl.bits.own = CE_DMA;
+
+ dd = get_desc_tx(ce);
+ dd->frame_ctrl.raw = 0;
+ dd->flag_status.raw = 0;
+ dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len;
+ dd->buf_adr = rctx->t_src[i].addr;
+ dd->flag_status.tx_flag.tqflag = 0;
+ dd->next_desc.bits.eofie = 0;
+ dd->next_desc.bits.dec = 0;
+ dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
+ dd->frame_ctrl.bits.own = CE_DMA;
+ start_dma_tx(ce);
+ start_dma_rx(ce);
+ }
+ wait_for_completion_interruptible_timeout(&ce->complete,
+ msecs_to_jiffies(5000));
+ if (ce->status == 0) {
+ dev_err(ce->dev, "DMA timeout for %s\n", name);
+ err = -EFAULT;
+ }
+ v = readl(ce->base + IPSEC_STATUS_REG);
+ if (v & 0xFFF) {
+ dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v);
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+static irqreturn_t ce_irq_handler(int irq, void *data)
+{
+ struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data;
+ u32 v;
+
+ ce->stat_irq++;
+
+ v = readl(ce->base + IPSEC_DMA_STATUS);
+ writel(v, ce->base + IPSEC_DMA_STATUS);
+
+ if (v & DMA_STATUS_TS_DERR)
+ dev_err(ce->dev, "AHB bus Error While Tx !!!\n");
+ if (v & DMA_STATUS_TS_PERR)
+ dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n");
+ if (v & DMA_STATUS_RS_DERR)
+ dev_err(ce->dev, "AHB bus Error While Rx !!!\n");
+ if (v & DMA_STATUS_RS_PERR)
+ dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n");
+
+ if (v & DMA_STATUS_TS_EOFI)
+ ce->stat_irq_tx++;
+ if (v & DMA_STATUS_RS_EOFI) {
+ ce->status = 1;
+ complete(&ce->complete);
+ ce->stat_irq_rx++;
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sl3516_ce_alg_template ce_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .mode = ECB_AES,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sl3516",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sl3516_ce_cipher_init,
+ .cra_exit = sl3516_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sl3516_ce_aes_setkey,
+ .encrypt = sl3516_ce_skencrypt,
+ .decrypt = sl3516_ce_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
+{
+ struct sl3516_ce_dev *ce = seq->private;
+ unsigned int i;
+
+ seq_printf(seq, "HWRNG %lu %lu\n",
+ ce->hwrng_stat_req, ce->hwrng_stat_bytes);
+ seq_printf(seq, "IRQ %lu\n", ce->stat_irq);
+ seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx);
+ seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx);
+ seq_printf(seq, "nreq %lu\n", ce->stat_req);
+ seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx);
+ seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx);
+ seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16);
+ seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16);
+ seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len);
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+ ce_algs[i].alg.skcipher.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
+#endif
+
+static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
+{
+ int err;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ ce_algs[i].ce = ce;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ce->dev, "DEBUG: Register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ce->dev, "Fail to register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ return err;
+ }
+ break;
+ default:
+ ce_algs[i].ce = NULL;
+ dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
+ }
+ }
+ return 0;
+}
+
+static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.skcipher.base.cra_name);
+ crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static void sl3516_ce_start(struct sl3516_ce_dev *ce)
+{
+ ce->ctx = 0;
+ ce->crx = 0;
+ writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC);
+ writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC);
+ writel(0, ce->base + IPSEC_DMA_STATUS);
+}
+
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sl3516_ce_pm_suspend(struct device *dev)
+{
+ struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
+
+ reset_control_assert(ce->reset);
+ clk_disable_unprepare(ce->clks);
+ return 0;
+}
+
+static int sl3516_ce_pm_resume(struct device *dev)
+{
+ struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(ce->clks);
+ if (err) {
+ dev_err(ce->dev, "Cannot prepare_enable\n");
+ goto error;
+ }
+ err = reset_control_deassert(ce->reset);
+ if (err) {
+ dev_err(ce->dev, "Cannot deassert reset control\n");
+ goto error;
+ }
+
+ sl3516_ce_start(ce);
+
+ return 0;
+error:
+ sl3516_ce_pm_suspend(dev);
+ return err;
+}
+
+static const struct dev_pm_ops sl3516_ce_pm_ops = {
+ SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL)
+};
+
+static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ce->dev);
+ pm_runtime_set_autosuspend_delay(ce->dev, 2000);
+
+ err = pm_runtime_set_suspended(ce->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ce->dev);
+ return err;
+}
+
+static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce)
+{
+ pm_runtime_disable(ce->dev);
+}
+
+static int sl3516_ce_probe(struct platform_device *pdev)
+{
+ struct sl3516_ce_dev *ce;
+ int err, irq;
+ u32 v;
+
+ ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ ce->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ce);
+
+ ce->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ce->base))
+ return PTR_ERR(ce->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce);
+ if (err) {
+ dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err);
+ return err;
+ }
+
+ ce->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(ce->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
+ "No reset control found\n");
+ ce->clks = devm_clk_get(ce->dev, NULL);
+ if (IS_ERR(ce->clks)) {
+ err = PTR_ERR(ce->clks);
+ dev_err(ce->dev, "Cannot get clock err=%d\n", err);
+ return err;
+ }
+
+ err = sl3516_ce_desc_init(ce);
+ if (err)
+ return err;
+
+ err = sl3516_ce_pm_init(ce);
+ if (err)
+ goto error_pm;
+
+ init_completion(&ce->complete);
+
+ ce->engine = crypto_engine_alloc_init(ce->dev, true);
+ if (!ce->engine) {
+ dev_err(ce->dev, "Cannot allocate engine\n");
+ err = -ENOMEM;
+ goto error_engine;
+ }
+
+ err = crypto_engine_start(ce->engine);
+ if (err) {
+ dev_err(ce->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+
+ err = sl3516_ce_register_algs(ce);
+ if (err)
+ goto error_alg;
+
+ err = sl3516_ce_rng_register(ce);
+ if (err)
+ goto error_rng;
+
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
+ goto error_pmuse;
+
+ v = readl(ce->base + IPSEC_ID);
+ dev_info(ce->dev, "SL3516 dev %lx rev %lx\n",
+ v & GENMASK(31, 4),
+ v & GENMASK(3, 0));
+ v = readl(ce->base + IPSEC_DMA_DEVICE_ID);
+ dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n",
+ v & GENMASK(15, 4),
+ v & GENMASK(3, 0));
+
+ pm_runtime_put_sync(ce->dev);
+
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+ /* Ignore error of debugfs */
+ ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL);
+ ce->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ce->dbgfs_dir, ce,
+ &sl3516_ce_debugfs_fops);
+#endif
+
+ return 0;
+error_pmuse:
+ sl3516_ce_rng_unregister(ce);
+error_rng:
+ sl3516_ce_unregister_algs(ce);
+error_alg:
+ crypto_engine_exit(ce->engine);
+error_engine:
+ sl3516_ce_pm_exit(ce);
+error_pm:
+ sl3516_ce_free_descs(ce);
+ return err;
+}
+
+static int sl3516_ce_remove(struct platform_device *pdev)
+{
+ struct sl3516_ce_dev *ce = platform_get_drvdata(pdev);
+
+ sl3516_ce_rng_unregister(ce);
+ sl3516_ce_unregister_algs(ce);
+ crypto_engine_exit(ce->engine);
+ sl3516_ce_pm_exit(ce);
+ sl3516_ce_free_descs(ce);
+
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+ debugfs_remove_recursive(ce->dbgfs_dir);
+#endif
+
+ return 0;
+}
+
+static const struct of_device_id sl3516_ce_crypto_of_match_table[] = {
+ { .compatible = "cortina,sl3516-crypto"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
+
+static struct platform_driver sl3516_ce_driver = {
+ .probe = sl3516_ce_probe,
+ .remove = sl3516_ce_remove,
+ .driver = {
+ .name = "sl3516-crypto",
+ .pm = &sl3516_ce_pm_ops,
+ .of_match_table = sl3516_ce_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sl3516_ce_driver);
+
+MODULE_DESCRIPTION("SL3516 cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
diff --git a/drivers/crypto/gemini/sl3516-ce-rng.c b/drivers/crypto/gemini/sl3516-ce-rng.c
new file mode 100644
index 000000000000..76931ec1cec5
--- /dev/null
+++ b/drivers/crypto/gemini/sl3516-ce-rng.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sl3516-ce-rng.c - hardware cryptographic offloader for SL3516 SoC.
+ *
+ * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file handle the RNG found in the SL3516 crypto engine
+ */
+#include "sl3516-ce.h"
+#include <linux/pm_runtime.h>
+#include <linux/hw_random.h>
+
+static int sl3516_ce_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct sl3516_ce_dev *ce;
+ u32 *data = buf;
+ size_t read = 0;
+ int err;
+
+ ce = container_of(rng, struct sl3516_ce_dev, trng);
+
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+ ce->hwrng_stat_req++;
+ ce->hwrng_stat_bytes += max;
+#endif
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(ce->dev);
+ return err;
+ }
+
+ while (read < max) {
+ *data = readl(ce->base + IPSEC_RAND_NUM_REG);
+ data++;
+ read += 4;
+ }
+
+ pm_runtime_put(ce->dev);
+
+ return read;
+}
+
+int sl3516_ce_rng_register(struct sl3516_ce_dev *ce)
+{
+ int ret;
+
+ ce->trng.name = "SL3516 Crypto Engine RNG";
+ ce->trng.read = sl3516_ce_rng_read;
+ ce->trng.quality = 700;
+
+ ret = hwrng_register(&ce->trng);
+ if (ret)
+ dev_err(ce->dev, "Fail to register the RNG\n");
+ return ret;
+}
+
+void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce)
+{
+ hwrng_unregister(&ce->trng);
+}
diff --git a/drivers/crypto/gemini/sl3516-ce.h b/drivers/crypto/gemini/sl3516-ce.h
new file mode 100644
index 000000000000..4c0ec6c920d1
--- /dev/null
+++ b/drivers/crypto/gemini/sl3516-ce.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sl3516-ce.h - hardware cryptographic offloader for cortina/gemini SoC
+ *
+ * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
+ *
+ * General notes on this driver:
+ * Called either Crypto Acceleration Engine Module, Security Acceleration Engine
+ * or IPSEC module in the datasheet, it will be called Crypto Engine for short
+ * in this driver.
+ * The CE was designed to handle IPSEC and wifi(TKIP WEP) protocol.
+ * It can handle AES, DES, 3DES, MD5, WEP, TKIP, SHA1, HMAC(MD5), HMAC(SHA1),
+ * Michael cipher/digest suites.
+ * It acts the same as a network hw, with both RX and TX chained descriptors.
+ */
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/hw_random.h>
+
+#define TQ0_TYPE_DATA 0
+#define TQ0_TYPE_CTRL BIT(0)
+#define TQ1_CIPHER BIT(1)
+#define TQ2_AUTH BIT(2)
+#define TQ3_IV BIT(3)
+#define TQ4_KEY0 BIT(4)
+#define TQ5_KEY4 BIT(5)
+#define TQ6_KEY6 BIT(6)
+#define TQ7_AKEY0 BIT(7)
+#define TQ8_AKEY2 BIT(8)
+#define TQ9_AKEY2 BIT(9)
+
+#define ECB_AES 0x2
+
+#define DESC_LAST 0x01
+#define DESC_FIRST 0x02
+
+#define IPSEC_ID 0x0000
+#define IPSEC_STATUS_REG 0x00a8
+#define IPSEC_RAND_NUM_REG 0x00ac
+#define IPSEC_DMA_DEVICE_ID 0xff00
+#define IPSEC_DMA_STATUS 0xff04
+#define IPSEC_TXDMA_CTRL 0xff08
+#define IPSEC_TXDMA_FIRST_DESC 0xff0c
+#define IPSEC_TXDMA_CURR_DESC 0xff10
+#define IPSEC_RXDMA_CTRL 0xff14
+#define IPSEC_RXDMA_FIRST_DESC 0xff18
+#define IPSEC_RXDMA_CURR_DESC 0xff1c
+#define IPSEC_TXDMA_BUF_ADDR 0xff28
+#define IPSEC_RXDMA_BUF_ADDR 0xff38
+#define IPSEC_RXDMA_BUF_SIZE 0xff30
+
+#define CE_ENCRYPTION 0x01
+#define CE_DECRYPTION 0x03
+
+#define MAXDESC 6
+
+#define DMA_STATUS_RS_EOFI BIT(22)
+#define DMA_STATUS_RS_PERR BIT(24)
+#define DMA_STATUS_RS_DERR BIT(25)
+#define DMA_STATUS_TS_EOFI BIT(27)
+#define DMA_STATUS_TS_PERR BIT(29)
+#define DMA_STATUS_TS_DERR BIT(30)
+
+#define TXDMA_CTRL_START BIT(31)
+#define TXDMA_CTRL_CONTINUE BIT(30)
+#define TXDMA_CTRL_CHAIN_MODE BIT(29)
+/* the burst value is not documented in the datasheet */
+#define TXDMA_CTRL_BURST_UNK BIT(22)
+#define TXDMA_CTRL_INT_FAIL BIT(17)
+#define TXDMA_CTRL_INT_PERR BIT(16)
+
+#define RXDMA_CTRL_START BIT(31)
+#define RXDMA_CTRL_CONTINUE BIT(30)
+#define RXDMA_CTRL_CHAIN_MODE BIT(29)
+/* the burst value is not documented in the datasheet */
+#define RXDMA_CTRL_BURST_UNK BIT(22)
+#define RXDMA_CTRL_INT_FINISH BIT(18)
+#define RXDMA_CTRL_INT_FAIL BIT(17)
+#define RXDMA_CTRL_INT_PERR BIT(16)
+#define RXDMA_CTRL_INT_EOD BIT(15)
+#define RXDMA_CTRL_INT_EOF BIT(14)
+
+#define CE_CPU 0
+#define CE_DMA 1
+
+/*
+ * struct sl3516_ce_descriptor - descriptor for CE operations
+ * @frame_ctrl: Information for the current descriptor
+ * @flag_status: For send packet, describe flag of operations.
+ * @buf_adr: pointer to a send/recv buffer for data packet
+ * @next_desc: control linking to other descriptors
+ */
+struct descriptor {
+ union {
+ u32 raw;
+ /*
+ * struct desc_frame_ctrl - Information for the current descriptor
+ * @buffer_size: the size of buffer at buf_adr
+ * @desc_count: Upon completion of a DMA operation, DMA
+ * write the number of descriptors used
+ * for the current frame
+ * @checksum: unknown
+ * @authcomp: unknown
+ * @perr: Protocol error during processing this descriptor
+ * @derr: Data error during processing this descriptor
+ * @own: 0 if owned by CPU, 1 for DMA
+ */
+ struct desc_frame_ctrl {
+ u32 buffer_size :16;
+ u32 desc_count :6;
+ u32 checksum :6;
+ u32 authcomp :1;
+ u32 perr :1;
+ u32 derr :1;
+ u32 own :1;
+ } bits;
+ } frame_ctrl;
+
+ union {
+ u32 raw;
+ /*
+ * struct desc_flag_status - flag for this descriptor
+ * @tqflag: list of flag describing the type of operation
+ * to be performed.
+ */
+ struct desc_tx_flag_status {
+ u32 tqflag :10;
+ u32 unused :22;
+ } tx_flag;
+ } flag_status;
+
+ u32 buf_adr;
+
+ union {
+ u32 next_descriptor;
+ /*
+ * struct desc_next - describe chaining of descriptors
+ * @sof_eof: does the descriptor is first (0x11),
+ * the last (0x01), middle of a chan (0x00)
+ * or the only one (0x11)
+ * @dec: AHB bus address increase (0), decrease (1)
+ * @eofie: End of frame interrupt enable
+ * @ndar: Next descriptor address
+ */
+ struct desc_next {
+ u32 sof_eof :2;
+ u32 dec :1;
+ u32 eofie :1;
+ u32 ndar :28;
+ } bits;
+ } next_desc;
+};
+
+/*
+ * struct control - The value of this register is used to set the
+ * operation mode of the IPSec Module.
+ * @process_id: Used to identify the process. The number will be copied
+ * to the descriptor status of the received packet.
+ * @auth_check_len: Number of 32-bit words to be checked or appended by the
+ * authentication module
+ * @auth_algorithm:
+ * @auth_mode: 0:append 1:Check Authentication Result
+ * @fcs_stream_copy: 0:enable 1:disable authentication stream copy
+ * @mix_key_sel: 0:use rCipherKey0-3 1:use Key Mixer
+ * @aesnk: AES Key Size
+ * @cipher_algorithm: choice of CBC/ECE and AES/DES/3DES
+ * @op_mode: Operation Mode for the IPSec Module
+ */
+struct pkt_control_header {
+ u32 process_id :8;
+ u32 auth_check_len :3;
+ u32 un1 :1;
+ u32 auth_algorithm :3;
+ u32 auth_mode :1;
+ u32 fcs_stream_copy :1;
+ u32 un2 :2;
+ u32 mix_key_sel :1;
+ u32 aesnk :4;
+ u32 cipher_algorithm :3;
+ u32 un3 :1;
+ u32 op_mode :4;
+};
+
+struct pkt_control_cipher {
+ u32 algorithm_len :16;
+ u32 header_len :16;
+};
+
+/*
+ * struct pkt_control_ecb - control packet for ECB
+ */
+struct pkt_control_ecb {
+ struct pkt_control_header control;
+ struct pkt_control_cipher cipher;
+ unsigned char key[AES_MAX_KEY_SIZE];
+};
+
+/*
+ * struct sl3516_ce_dev - main container for all this driver information
+ * @base: base address
+ * @clks: clocks used
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @engine: ptr to the crypto/crypto_engine
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @dtx: base DMA address for TX descriptors
+ * @tx base address of TX descriptors
+ * @drx: base DMA address for RX descriptors
+ * @rx base address of RX descriptors
+ * @ctx current used TX descriptor
+ * @crx current used RX descriptor
+ * @trng hw_random structure for RNG
+ * @hwrng_stat_req number of HWRNG requests
+ * @hwrng_stat_bytes total number of bytes generated by RNG
+ * @stat_irq number of IRQ handled by CE
+ * @stat_irq_tx number of TX IRQ handled by CE
+ * @stat_irq_rx number of RX IRQ handled by CE
+ * @stat_req number of requests handled by CE
+ * @fallbak_sg_count_tx number of fallback due to destination SG count
+ * @fallbak_sg_count_rx number of fallback due to source SG count
+ * @fallbak_not_same_len number of fallback due to difference in SG length
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sl3516_ce_dev {
+ void __iomem *base;
+ struct clk *clks;
+ struct reset_control *reset;
+ struct device *dev;
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+ dma_addr_t dtx;
+ struct descriptor *tx;
+ dma_addr_t drx;
+ struct descriptor *rx;
+ int ctx;
+ int crx;
+ struct hwrng trng;
+ unsigned long hwrng_stat_req;
+ unsigned long hwrng_stat_bytes;
+ unsigned long stat_irq;
+ unsigned long stat_irq_tx;
+ unsigned long stat_irq_rx;
+ unsigned long stat_req;
+ unsigned long fallback_sg_count_tx;
+ unsigned long fallback_sg_count_rx;
+ unsigned long fallback_not_same_len;
+ unsigned long fallback_mod16;
+ unsigned long fallback_align16;
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+ void *pctrl;
+ dma_addr_t dctrl;
+};
+
+struct sginfo {
+ u32 addr;
+ u32 len;
+};
+
+/*
+ * struct sl3516_ce_cipher_req_ctx - context for a skcipher request
+ * @t_src: list of mapped SGs with their size
+ * @t_dst: list of mapped SGs with their size
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @pctrllen: the length of the ctrl packet
+ * @tqflag: the TQflag to set in data packet
+ * @h pointer to the pkt_control_cipher header
+ * @nr_sgs: number of source SG
+ * @nr_sgd: number of destination SG
+ * @fallback_req: request struct for invoking the fallback skcipher TFM
+ */
+struct sl3516_ce_cipher_req_ctx {
+ struct sginfo t_src[MAXDESC];
+ struct sginfo t_dst[MAXDESC];
+ u32 op_dir;
+ unsigned int pctrllen;
+ u32 tqflag;
+ struct pkt_control_cipher *h;
+ int nr_sgs;
+ int nr_sgd;
+ struct skcipher_request fallback_req; // keep at the end
+};
+
+/*
+ * struct sl3516_ce_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ce: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ *
+ * enginectx must be the first element
+ */
+struct sl3516_ce_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sl3516_ce_dev *ce;
+ struct crypto_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sl3516_ce_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @mode: value to be used in control packet for this algorithm
+ * @ce: pointer to the sl3516_ce_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: number of request which has fallbacked
+ * @stat_bytes: total data size done by this template
+ */
+struct sl3516_ce_alg_template {
+ u32 type;
+ u32 mode;
+ struct sl3516_ce_dev *ce;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+ unsigned long stat_req;
+ unsigned long stat_fb;
+ unsigned long stat_bytes;
+};
+
+int sl3516_ce_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sl3516_ce_cipher_init(struct crypto_tfm *tfm);
+void sl3516_ce_cipher_exit(struct crypto_tfm *tfm);
+int sl3516_ce_skdecrypt(struct skcipher_request *areq);
+int sl3516_ce_skencrypt(struct skcipher_request *areq);
+
+int sl3516_ce_run_task(struct sl3516_ce_dev *ce,
+ struct sl3516_ce_cipher_req_ctx *rctx, const char *name);
+
+int sl3516_ce_rng_register(struct sl3516_ce_dev *ce);
+void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a380087c83f7..a032c192ef1d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -5,6 +5,7 @@
#include <crypto/dh.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
+#include <crypto/rng.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/rsa.h>
@@ -30,7 +31,6 @@ struct hpre_ctx;
#define HPRE_DH_G_FLAG 0x02
#define HPRE_TRY_SEND_TIMES 100
#define HPRE_INVLD_REQ_ID (-1)
-#define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev))
#define HPRE_SQE_ALG_BITS 5
#define HPRE_SQE_DONE_SHIFT 30
@@ -39,12 +39,17 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+/* due to nist p521 */
+#define HPRE_ECC_MAX_KSZ 66
+
/* size in bytes of the n prime */
#define HPRE_ECC_NIST_P192_N_SIZE 24
#define HPRE_ECC_NIST_P256_N_SIZE 32
+#define HPRE_ECC_NIST_P384_N_SIZE 48
/* size in bytes */
#define HPRE_ECC_HW256_KSZ_B 32
+#define HPRE_ECC_HW384_KSZ_B 48
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
@@ -102,6 +107,7 @@ struct hpre_curve25519_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
+ struct device *dev;
struct hpre_asym_request **req_list;
struct hpre *hpre;
spinlock_t req_lock;
@@ -214,8 +220,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len,
int is_src, dma_addr_t *tmp)
{
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = hpre_req->ctx->dev;
enum dma_data_direction dma_dir;
if (is_src) {
@@ -239,7 +244,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
void *ptr;
int shift;
@@ -293,11 +298,13 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
struct scatterlist *dst,
struct scatterlist *src)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
struct hpre_sqe *sqe = &req->req;
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, tmp)))
+ return;
if (src) {
if (req->src)
@@ -307,6 +314,8 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, tmp)))
+ return;
if (req->dst) {
if (dst)
@@ -321,16 +330,15 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
void **kreq)
{
- struct device *dev = HPRE_DEV(ctx);
struct hpre_asym_request *req;
unsigned int err, done, alg;
int id;
#define HPRE_NO_HW_ERR 0
#define HPRE_HW_TASK_DONE 3
-#define HREE_HW_ERR_MASK 0x7ff
-#define HREE_SQE_DONE_MASK 0x3
-#define HREE_ALG_TYPE_MASK 0x1f
+#define HREE_HW_ERR_MASK GENMASK(10, 0)
+#define HREE_SQE_DONE_MASK GENMASK(1, 0)
+#define HREE_ALG_TYPE_MASK GENMASK(4, 0)
id = (int)le16_to_cpu(sqe->tag);
req = ctx->req_list[id];
hpre_rm_req_from_ctx(req);
@@ -346,7 +354,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
return 0;
alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
- dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
+ dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
alg, done, err);
return -EINVAL;
@@ -361,6 +369,7 @@ static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
+ ctx->dev = &qp->qm->pdev->dev;
hpre = container_of(ctx->qp->qm, struct hpre, qm);
ctx->hpre = hpre;
@@ -524,6 +533,8 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
}
+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
@@ -618,14 +629,14 @@ static int hpre_is_dh_params_length_valid(unsigned int key_sz)
case _HPRE_DH_GRP15:
case _HPRE_DH_GRP16:
return 0;
+ default:
+ return -EINVAL;
}
-
- return -EINVAL;
}
static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz;
if (params->p_size > HPRE_DH_MAX_P_SZ)
@@ -664,7 +675,7 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
if (is_clear_all)
@@ -877,18 +888,18 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
if (!hpre_rsa_key_size_is_support(ctx->key_sz))
return 0;
- ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
&ctx->rsa.dma_pubkey,
GFP_KERNEL);
if (!ctx->rsa.pubkey)
return -ENOMEM;
if (private) {
- ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
&ctx->rsa.dma_prikey,
GFP_KERNEL);
if (!ctx->rsa.prikey) {
- dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
+ dma_free_coherent(ctx->dev, vlen << 1,
ctx->rsa.pubkey,
ctx->rsa.dma_pubkey);
ctx->rsa.pubkey = NULL;
@@ -950,7 +961,7 @@ static int hpre_crt_para_get(char *para, size_t para_sz,
static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
{
unsigned int hlf_ksz = ctx->key_sz >> 1;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
u64 offset;
int ret;
@@ -1008,7 +1019,7 @@ free_key:
static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
unsigned int half_key_sz = ctx->key_sz >> 1;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
@@ -1179,7 +1190,7 @@ static void hpre_key_to_big_end(u8 *data, int len)
static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
bool is_ecdh)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
unsigned int shift = sz << 1;
@@ -1202,12 +1213,21 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
hpre_ctx_clear(ctx, is_clear_all);
}
+/*
+ * The bits of 192/224/256/384/521 are supported by HPRE,
+ * and convert the bits like:
+ * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
+ * If the parameter bit width is insufficient, then we fill in the
+ * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
+ */
static unsigned int hpre_ecdh_supported_curve(unsigned short id)
{
switch (id) {
case ECC_CURVE_NIST_P192:
case ECC_CURVE_NIST_P256:
return HPRE_ECC_HW256_KSZ_B;
+ case ECC_CURVE_NIST_P384:
+ return HPRE_ECC_HW384_KSZ_B;
default:
break;
}
@@ -1272,6 +1292,8 @@ static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
return HPRE_ECC_NIST_P192_N_SIZE;
case ECC_CURVE_NIST_P256:
return HPRE_ECC_NIST_P256_N_SIZE;
+ case ECC_CURVE_NIST_P384:
+ return HPRE_ECC_NIST_P384_N_SIZE;
default:
break;
}
@@ -1281,7 +1303,7 @@ static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz, shift, curve_sz;
int ret;
@@ -1328,11 +1350,32 @@ static bool hpre_key_is_zero(char *key, unsigned short key_sz)
return true;
}
+static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
+{
+ struct device *dev = ctx->dev;
+ int ret;
+
+ ret = crypto_get_default_rng();
+ if (ret) {
+ dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
+ return ret;
+ }
+
+ ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
+ params->key_size);
+ crypto_put_default_rng();
+ if (ret)
+ dev_err(dev, "failed to get rng, ret = %d!\n", ret);
+
+ return ret;
+}
+
static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
+ char key[HPRE_ECC_MAX_KSZ];
unsigned int sz, sz_shift;
struct ecdh params;
int ret;
@@ -1342,6 +1385,15 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
+ /* Use stdrng to generate private key */
+ if (!params.key || !params.key_size) {
+ params.key = key;
+ params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
+ ret = ecdh_gen_privkey(ctx, &params);
+ if (ret)
+ return ret;
+ }
+
if (hpre_key_is_zero(params.key, params.key_size)) {
dev_err(dev, "Invalid hpre key!\n");
return -EINVAL;
@@ -1367,16 +1419,20 @@ static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
struct scatterlist *dst,
struct scatterlist *src)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
struct hpre_sqe *sqe = &req->req;
dma_addr_t dma;
dma = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;
if (src && req->src)
dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
dma = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;
if (req->dst)
dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
@@ -1431,6 +1487,8 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
h_req->areq.ecdh = req;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->key = cpu_to_le64(ctx->ecdh.dma_p);
msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
@@ -1450,7 +1508,7 @@ static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int tmpshift;
dma_addr_t dma = 0;
void *ptr;
@@ -1480,8 +1538,8 @@ static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
- dma_addr_t dma = 0;
+ struct device *dev = ctx->dev;
+ dma_addr_t dma;
if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
dev_err(dev, "data or data length is illegal!\n");
@@ -1503,7 +1561,7 @@ static int hpre_ecdh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
@@ -1568,6 +1626,15 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
}
+static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P384;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
@@ -1609,7 +1676,7 @@ static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
unsigned int len)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
unsigned int shift = sz << 1;
@@ -1634,7 +1701,7 @@ static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
int ret = -EINVAL;
if (len != CURVE25519_KEY_SIZE ||
@@ -1662,16 +1729,20 @@ static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
struct scatterlist *dst,
struct scatterlist *src)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
struct hpre_sqe *sqe = &req->req;
dma_addr_t dma;
dma = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;
if (src && req->src)
dma_free_coherent(dev, ctx->key_sz, req->src, dma);
dma = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;
if (req->dst)
dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
@@ -1722,6 +1793,8 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
h_req->areq.curve25519 = req;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->key = cpu_to_le64(ctx->curve25519.dma_p);
msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
@@ -1752,7 +1825,7 @@ static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
u8 p[CURVE25519_KEY_SIZE] = { 0 };
const struct ecc_curve *curve;
dma_addr_t dma = 0;
@@ -1790,8 +1863,12 @@ static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
* When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
* we get its modulus to p, and then use it.
*/
- if (memcmp(ptr, p, ctx->key_sz) >= 0)
+ if (memcmp(ptr, p, ctx->key_sz) == 0) {
+ dev_err(dev, "gx is p!\n");
+ return -EINVAL;
+ } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
hpre_curve25519_src_modulo_p(ptr);
+ }
hpre_req->src = ptr;
msg->in = cpu_to_le64(dma);
@@ -1807,8 +1884,8 @@ static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
- dma_addr_t dma = 0;
+ struct device *dev = ctx->dev;
+ dma_addr_t dma;
if (!data || !sg_is_last(data) || len != ctx->key_sz) {
dev_err(dev, "data or data length is illegal!\n");
@@ -1830,7 +1907,7 @@ static int hpre_curve25519_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
@@ -1940,7 +2017,7 @@ static struct kpp_alg ecdh_nist_p192 = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
.cra_name = "ecdh-nist-p192",
- .cra_driver_name = "hpre-ecdh",
+ .cra_driver_name = "hpre-ecdh-nist-p192",
.cra_module = THIS_MODULE,
},
};
@@ -1957,7 +2034,24 @@ static struct kpp_alg ecdh_nist_p256 = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
.cra_name = "ecdh-nist-p256",
- .cra_driver_name = "hpre-ecdh",
+ .cra_driver_name = "hpre-ecdh-nist-p256",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg ecdh_nist_p384 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p384_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "hpre-ecdh-nist-p384",
.cra_module = THIS_MODULE,
},
};
@@ -1989,16 +2083,25 @@ static int hpre_register_ecdh(void)
return ret;
ret = crypto_register_kpp(&ecdh_nist_p256);
- if (ret) {
- crypto_unregister_kpp(&ecdh_nist_p192);
- return ret;
- }
+ if (ret)
+ goto unregister_ecdh_p192;
+
+ ret = crypto_register_kpp(&ecdh_nist_p384);
+ if (ret)
+ goto unregister_ecdh_p256;
return 0;
+
+unregister_ecdh_p256:
+ crypto_unregister_kpp(&ecdh_nist_p256);
+unregister_ecdh_p192:
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ return ret;
}
static void hpre_unregister_ecdh(void)
{
+ crypto_unregister_kpp(&ecdh_nist_p384);
crypto_unregister_kpp(&ecdh_nist_p256);
crypto_unregister_kpp(&ecdh_nist_p192);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 046bc962c8b2..8b0640fb04be 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -36,7 +36,7 @@
#define HPRE_INT_MASK 0x301400
#define HPRE_INT_STATUS 0x301800
#define HPRE_CORE_INT_ENABLE 0
-#define HPRE_CORE_INT_DISABLE 0x003fffff
+#define HPRE_CORE_INT_DISABLE GENMASK(21, 0)
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CLSTR_BASE 0x302000
#define HPRE_CORE_EN_OFFSET 0x04
@@ -50,6 +50,7 @@
#define HPRE_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
+#define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
#define HPRE_HAC_RAS_FE_ENABLE 0
#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
@@ -57,7 +58,6 @@
#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
#define HPRE_HAC_ECC1_CNT 0x301a04
#define HPRE_HAC_ECC2_CNT 0x301a08
-#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
@@ -69,13 +69,17 @@
#define HPRE_DBGFS_VAL_MAX_LEN 20
#define HPRE_PCI_DEVICE_ID 0xa258
#define HPRE_PCI_VF_DEVICE_ID 0xa259
-#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
-#define HPRE_QM_USR_CFG_MASK 0xfffffffe
-#define HPRE_QM_AXI_CFG_MASK 0xffff
-#define HPRE_QM_VFG_AX_MASK 0xff
-#define HPRE_BD_USR_MASK 0x3
-#define HPRE_CLUSTER_CORE_MASK_V2 0xf
-#define HPRE_CLUSTER_CORE_MASK_V3 0xff
+#define HPRE_QM_USR_CFG_MASK GENMASK(31, 1)
+#define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
+#define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
+#define HPRE_BD_USR_MASK GENMASK(1, 0)
+#define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0)
+#define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0)
+#define HPRE_PREFETCH_CFG 0x301130
+#define HPRE_SVA_PREFTCH_DFX 0x30115C
+#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
+#define HPRE_PREFETCH_DISABLE BIT(30)
+#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
@@ -88,11 +92,7 @@
#define HPRE_QM_PM_FLR BIT(11)
#define HPRE_QM_SRIOV_FLR BIT(12)
-#define HPRE_CLUSTERS_NUM(qm) \
- (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 : HPRE_CLUSTERS_NUM_V2)
-#define HPRE_CLUSTER_CORE_MASK(qm) \
- (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTER_CORE_MASK_V3 :\
- HPRE_CLUSTER_CORE_MASK_V2)
+#define HPRE_SHAPER_TYPE_RATE 128
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
@@ -123,21 +123,49 @@ static const char * const hpre_debug_file_name[] = {
};
static const struct hpre_hw_error hpre_hw_errors[] = {
- { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
- { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
- { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
- { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
- { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
- { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
- { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
- { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
- { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
- { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
- { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
- { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
- { .int_msk = BIT(22), .msg = "pt_rng_timeout_int_set"},
- { .int_msk = BIT(23), .msg = "sva_fsm_timeout_int_set"},
{
+ .int_msk = BIT(0),
+ .msg = "core_ecc_1bit_err_int_set"
+ }, {
+ .int_msk = BIT(1),
+ .msg = "core_ecc_2bit_err_int_set"
+ }, {
+ .int_msk = BIT(2),
+ .msg = "dat_wb_poison_int_set"
+ }, {
+ .int_msk = BIT(3),
+ .msg = "dat_rd_poison_int_set"
+ }, {
+ .int_msk = BIT(4),
+ .msg = "bd_rd_poison_int_set"
+ }, {
+ .int_msk = BIT(5),
+ .msg = "ooo_ecc_2bit_err_int_set"
+ }, {
+ .int_msk = BIT(6),
+ .msg = "cluster1_shb_timeout_int_set"
+ }, {
+ .int_msk = BIT(7),
+ .msg = "cluster2_shb_timeout_int_set"
+ }, {
+ .int_msk = BIT(8),
+ .msg = "cluster3_shb_timeout_int_set"
+ }, {
+ .int_msk = BIT(9),
+ .msg = "cluster4_shb_timeout_int_set"
+ }, {
+ .int_msk = GENMASK(15, 10),
+ .msg = "ooo_rdrsp_err_int_set"
+ }, {
+ .int_msk = GENMASK(21, 16),
+ .msg = "ooo_wrrsp_err_int_set"
+ }, {
+ .int_msk = BIT(22),
+ .msg = "pt_rng_timeout_int_set"
+ }, {
+ .int_msk = BIT(23),
+ .msg = "sva_fsm_timeout_int_set"
+ }, {
/* sentinel */
}
};
@@ -224,6 +252,18 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+static inline int hpre_cluster_num(struct hisi_qm *qm)
+{
+ return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 :
+ HPRE_CLUSTERS_NUM_V2;
+}
+
+static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
+{
+ return (qm->ver >= QM_HW_V3) ?
+ HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2;
+}
+
struct hisi_qp *hpre_create_qp(u8 type)
{
int node = cpu_to_node(smp_processor_id());
@@ -290,8 +330,8 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
static int hpre_set_cluster(struct hisi_qm *qm)
{
- u32 cluster_core_mask = HPRE_CLUSTER_CORE_MASK(qm);
- u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ u32 cluster_core_mask = hpre_cluster_core_mask(qm);
+ u8 clusters_num = hpre_cluster_num(qm);
struct device *dev = &qm->pdev->dev;
unsigned long offset;
u32 val = 0;
@@ -302,10 +342,10 @@ static int hpre_set_cluster(struct hisi_qm *qm)
/* clusters initiating */
writel(cluster_core_mask,
- HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
- writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
- ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
- HPRE_CORE_INI_STATUS), val,
+ qm->io_base + offset + HPRE_CORE_ENB);
+ writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);
+ ret = readl_relaxed_poll_timeout(qm->io_base + offset +
+ HPRE_CORE_INI_STATUS, val,
((val & cluster_core_mask) ==
cluster_core_mask),
HPRE_REG_RD_INTVRL_US,
@@ -329,11 +369,52 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
{
u32 val;
- val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ val = readl(qm->io_base + QM_PEH_AXUSER_CFG);
val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
val |= HPRE_QM_PM_FLR;
- writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
- writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
+ writel(val, qm->io_base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
+}
+
+static void hpre_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
+ val &= HPRE_PREFETCH_ENABLE;
+ writel(val, qm->io_base + HPRE_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
+ val, !(val & HPRE_PREFETCH_DISABLE),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+}
+
+static void hpre_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
+ val |= HPRE_PREFETCH_DISABLE;
+ writel(val, qm->io_base + HPRE_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
+ val, !(val & HPRE_SVA_DISABLE_READY),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
}
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
@@ -342,33 +423,33 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
u32 val;
int ret;
- writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
- writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
- writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
+ writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
+ writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
/* HPRE need more time, we close this interrupt */
- val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+ val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
- writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+ writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
if (qm->ver >= QM_HW_V3)
writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
- HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ qm->io_base + HPRE_TYPES_ENB);
else
- writel(HPRE_RSA_ENB, HPRE_ADDR(qm, HPRE_TYPES_ENB));
-
- writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
- writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
- writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
- writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
- writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
- writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
-
- writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
- writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
- writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
- ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
- val & BIT(0),
+ writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);
+
+ writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
+ writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
+ writel(0x0, qm->io_base + HPRE_INT_MASK);
+ writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
+ writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
+ writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
+
+ writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
+ writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);
+ writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,
+ val & BIT(0),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret) {
@@ -397,7 +478,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
- u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ u8 clusters_num = hpre_cluster_num(qm);
unsigned long offset;
int i;
@@ -413,36 +494,49 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hpre_hw_error_disable(struct hisi_qm *qm)
+static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
{
- u32 val;
+ u32 val1, val2;
+
+ val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ if (enable) {
+ val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ val2 = HPRE_HAC_RAS_NFE_ENABLE;
+ } else {
+ val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ val2 = 0x0;
+ }
+
+ if (qm->ver > QM_HW_V2)
+ writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+
+ writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+}
+static void hpre_hw_error_disable(struct hisi_qm *qm)
+{
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
- /* disable HPRE block master OOO when m-bit error occur */
- val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
- val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
+ hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- u32 val;
-
/* clear HPRE hw error source if having */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
- /* enable hpre hw error interrupts */
- writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
+ /* configure error type */
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
- /* enable HPRE block master OOO when m-bit error occur */
- val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
- val |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
+ hpre_master_ooo_ctrl(qm, true);
+
+ /* enable hpre hw error interrupts */
+ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -650,7 +744,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
- u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ u8 clusters_num = hpre_cluster_num(qm);
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
@@ -788,7 +882,7 @@ static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
{
- return readl(qm->io_base + HPRE_HAC_INT_STATUS);
+ return readl(qm->io_base + HPRE_INT_STATUS);
}
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
@@ -802,9 +896,9 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static void hpre_err_info_init(struct hisi_qm *qm)
@@ -829,6 +923,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
+ .open_sva_prefetch = hpre_open_sva_prefetch,
+ .close_sva_prefetch = hpre_close_sva_prefetch,
.err_info_init = hpre_err_info_init,
};
@@ -841,6 +937,8 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
+ hpre_open_sva_prefetch(qm);
+
qm->err_ini = &hpre_err_ini;
qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
@@ -850,6 +948,7 @@ static int hpre_pf_probe_init(struct hpre *hpre)
static int hpre_probe_init(struct hpre *hpre)
{
+ u32 type_rate = HPRE_SHAPER_TYPE_RATE;
struct hisi_qm *qm = &hpre->qm;
int ret;
@@ -857,6 +956,11 @@ static int hpre_probe_init(struct hpre *hpre)
ret = hpre_pf_probe_init(hpre);
if (ret)
return ret;
+ /* Enable shaper type 0 */
+ if (qm->ver >= QM_HW_V3) {
+ type_rate |= QM_SHAPER_ENABLE;
+ qm->type_rate = type_rate;
+ }
}
return 0;
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index ce439a0c66c9..1d67f94a1d56 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -25,9 +25,11 @@
#define QM_IRQ_NUM_V1 1
#define QM_IRQ_NUM_PF_V2 4
#define QM_IRQ_NUM_VF_V2 2
+#define QM_IRQ_NUM_VF_V3 3
#define QM_EQ_EVENT_IRQ_VECTOR 0
#define QM_AEQ_EVENT_IRQ_VECTOR 1
+#define QM_CMD_EVENT_IRQ_VECTOR 2
#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
/* mailbox */
@@ -39,6 +41,8 @@
#define QM_MB_CMD_CQC_BT 0x5
#define QM_MB_CMD_SQC_VFT_V2 0x6
#define QM_MB_CMD_STOP_QP 0x8
+#define QM_MB_CMD_SRC 0xc
+#define QM_MB_CMD_DST 0xd
#define QM_MB_CMD_SEND_BASE 0x300
#define QM_MB_EVENT_SHIFT 8
@@ -46,6 +50,9 @@
#define QM_MB_OP_SHIFT 14
#define QM_MB_CMD_DATA_ADDR_L 0x304
#define QM_MB_CMD_DATA_ADDR_H 0x308
+#define QM_MB_PING_ALL_VFS 0xffff
+#define QM_MB_CMD_DATA_SHIFT 32
+#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
/* sqc shift */
#define QM_SQ_HOP_NUM_SHIFT 0
@@ -95,6 +102,7 @@
#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
#define QM_QUE_ISO_CFG_V 0x0030
+#define QM_PAGE_SIZE 0x0034
#define QM_QUE_ISO_EN 0x100154
#define QM_CAPBILITY 0x100158
#define QM_QP_NUN_MASK GENMASK(10, 0)
@@ -155,11 +163,15 @@
#define QM_RAS_CE_THRESHOLD 0x1000f8
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_RAS_MSI_INT_SEL 0x1040f4
+#define QM_OOO_SHUTDOWN_SEL 0x1040f8
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
#define ACC_VENDOR_ID_VALUE 0x5a5a
#define QM_PEH_DFX_INFO0 0x1000fc
+#define QM_PEH_DFX_INFO1 0x100100
+#define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
+#define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
@@ -170,6 +182,31 @@
#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
+#define QM_MSI_CAP_ENABLE BIT(16)
+
+/* interfunction communication */
+#define QM_IFC_READY_STATUS 0x100128
+#define QM_IFC_C_STS_M 0x10012C
+#define QM_IFC_INT_SET_P 0x100130
+#define QM_IFC_INT_CFG 0x100134
+#define QM_IFC_INT_SOURCE_P 0x100138
+#define QM_IFC_INT_SOURCE_V 0x0020
+#define QM_IFC_INT_MASK 0x0024
+#define QM_IFC_INT_STATUS 0x0028
+#define QM_IFC_INT_SET_V 0x002C
+#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
+#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
+#define QM_IFC_INT_SOURCE_MASK BIT(0)
+#define QM_IFC_INT_DISABLE BIT(0)
+#define QM_IFC_INT_STATUS_MASK BIT(0)
+#define QM_IFC_INT_SET_MASK BIT(0)
+#define QM_WAIT_DST_ACK 10
+#define QM_MAX_PF_WAIT_COUNT 10
+#define QM_MAX_VF_WAIT_COUNT 40
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_TIMEOUT_US \
+ (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
#define QM_DFX_MB_CNT_VF 0x104010
#define QM_DFX_DB_CNT_VF 0x104020
@@ -205,6 +242,33 @@
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
#define QM_RESETTING 2
+#define QM_QOS_PARAM_NUM 2
+#define QM_QOS_VAL_NUM 1
+#define QM_QOS_BDF_PARAM_NUM 4
+#define QM_QOS_MAX_VAL 1000
+#define QM_QOS_RATE 100
+#define QM_QOS_EXPAND_RATE 1000
+#define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
+#define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
+#define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
+#define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
+#define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
+#define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
+#define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
+#define QM_SHAPER_CBS_B 1
+#define QM_SHAPER_CBS_S 16
+#define QM_SHAPER_VFT_OFFSET 6
+#define WAIT_FOR_QOS_VF 100
+#define QM_QOS_MIN_ERROR_RATE 5
+#define QM_QOS_TYPICAL_NUM 8
+#define QM_SHAPER_MIN_CBS_S 8
+#define QM_QOS_TICK 0x300U
+#define QM_QOS_DIVISOR_CLK 0x1f40U
+#define QM_QOS_MAX_CIR_B 200
+#define QM_QOS_MIN_CIR_B 100
+#define QM_QOS_MAX_CIR_U 6
+#define QM_QOS_MAX_CIR_S 11
+#define QM_QOS_VAL_MAX_LEN 32
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
@@ -245,6 +309,7 @@
enum vft_type {
SQC_VFT = 0,
CQC_VFT,
+ SHAPER_VFT,
};
enum acc_err_result {
@@ -253,6 +318,23 @@ enum acc_err_result {
ACC_ERR_RECOVERED,
};
+enum qm_alg_type {
+ ALG_TYPE_0,
+ ALG_TYPE_1,
+};
+
+enum qm_mb_cmd {
+ QM_PF_FLR_PREPARE = 0x01,
+ QM_PF_SRST_PREPARE,
+ QM_PF_RESET_DONE,
+ QM_VF_PREPARE_DONE,
+ QM_VF_PREPARE_FAIL,
+ QM_VF_START_DONE,
+ QM_VF_START_FAIL,
+ QM_PF_SET_QOS,
+ QM_VF_GET_QOS,
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -351,6 +433,9 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*stop_qp)(struct hisi_qp *qp);
+ int (*set_msi)(struct hisi_qm *qm, bool set);
+ int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
+ int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
};
struct qm_dfx_item {
@@ -412,6 +497,11 @@ static const char * const qp_s[] = {
"none", "init", "start", "stop", "close",
};
+static const u32 typical_qos_val[QM_QOS_TYPICAL_NUM] = {100, 250, 500, 1000,
+ 10000, 25000, 50000, 100000};
+static const u32 typical_qos_cbs_s[QM_QOS_TYPICAL_NUM] = {9, 10, 11, 12, 16,
+ 17, 18, 19};
+
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
enum qm_state curr = atomic_read(&qm->status.flags);
@@ -491,6 +581,18 @@ static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
return avail;
}
+static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
+ u64 base, u16 queue, bool op)
+{
+ mailbox->w0 = cpu_to_le16((cmd) |
+ ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
+ (0x1 << QM_MB_BUSY_SHIFT));
+ mailbox->queue_num = cpu_to_le16(queue);
+ mailbox->base_l = cpu_to_le32(lower_32_bits(base));
+ mailbox->base_h = cpu_to_le32(upper_32_bits(base));
+ mailbox->rsvd = 0;
+}
+
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
static int qm_wait_mb_ready(struct hisi_qm *qm)
{
@@ -523,44 +625,42 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
: "memory");
}
-static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
- bool op)
+static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
- struct qm_mailbox mailbox;
- int ret = 0;
-
- dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
- queue, cmd, (unsigned long long)dma_addr);
-
- mailbox.w0 = cpu_to_le16(cmd |
- (op ? 0x1 << QM_MB_OP_SHIFT : 0) |
- (0x1 << QM_MB_BUSY_SHIFT));
- mailbox.queue_num = cpu_to_le16(queue);
- mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
- mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
- mailbox.rsvd = 0;
-
- mutex_lock(&qm->mailbox_lock);
-
if (unlikely(qm_wait_mb_ready(qm))) {
- ret = -EBUSY;
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
- goto busy_unlock;
+ goto mb_busy;
}
- qm_mb_write(qm, &mailbox);
+ qm_mb_write(qm, mailbox);
if (unlikely(qm_wait_mb_ready(qm))) {
- ret = -EBUSY;
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
- goto busy_unlock;
+ goto mb_busy;
}
-busy_unlock:
+ return 0;
+
+mb_busy:
+ atomic64_inc(&qm->debug.dfx.mb_err_cnt);
+ return -EBUSY;
+}
+
+static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+ bool op)
+{
+ struct qm_mailbox mailbox;
+ int ret;
+
+ dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
+ queue, cmd, (unsigned long long)dma_addr);
+
+ qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
+
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox);
mutex_unlock(&qm->mailbox_lock);
- if (ret)
- atomic64_inc(&qm->debug.dfx.mb_err_cnt);
return ret;
}
@@ -626,6 +726,14 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
return QM_IRQ_NUM_VF_V2;
}
+static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_PF)
+ return QM_IRQ_NUM_PF_V2;
+
+ return QM_IRQ_NUM_VF_V3;
+}
+
static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
@@ -730,6 +838,21 @@ static irqreturn_t qm_irq(int irq, void *data)
return IRQ_NONE;
}
+static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+ u32 val;
+
+ val = readl(qm->io_base + QM_IFC_INT_STATUS);
+ val &= QM_IFC_INT_STATUS_MASK;
+ if (!val)
+ return IRQ_NONE;
+
+ schedule_work(&qm->cmd_process);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qm_aeq_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -770,14 +893,16 @@ static void qm_irq_unregister(struct hisi_qm *qm)
free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- if (qm->ver == QM_HW_V1)
- return;
+ if (qm->ver > QM_HW_V1) {
+ free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
+ if (qm->fun_type == QM_HW_PF)
+ free_irq(pci_irq_vector(pdev,
+ QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
+ }
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
+ if (qm->ver > QM_HW_V2)
+ free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
}
static void qm_init_qp_status(struct hisi_qp *qp)
@@ -790,8 +915,95 @@ static void qm_init_qp_status(struct hisi_qp *qp)
atomic_set(&qp_status->used, 0);
}
+static void qm_init_prefetch(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 page_type = 0x0;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ switch (PAGE_SIZE) {
+ case SZ_4K:
+ page_type = 0x0;
+ break;
+ case SZ_16K:
+ page_type = 0x1;
+ break;
+ case SZ_64K:
+ page_type = 0x2;
+ break;
+ default:
+ dev_err(dev, "system page size is not support: %lu, default set to 4KB",
+ PAGE_SIZE);
+ }
+
+ writel(page_type, qm->io_base + QM_PAGE_SIZE);
+}
+
+/*
+ * the formula:
+ * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
+ *
+ * IR_b * (2 ^ IR_u) * 8
+ * IR(Mbps) * 10 ^ -3 = -------------------------
+ * Tick * (2 ^ IR_s)
+ */
+static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
+{
+ return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
+ (QM_QOS_TICK * (1 << cir_s));
+}
+
+static u32 acc_shaper_calc_cbs_s(u32 ir)
+{
+ int i;
+
+ if (ir < typical_qos_val[0])
+ return QM_SHAPER_MIN_CBS_S;
+
+ for (i = 1; i < QM_QOS_TYPICAL_NUM; i++) {
+ if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i])
+ return typical_qos_cbs_s[i - 1];
+ }
+
+ return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1];
+}
+
+static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
+{
+ u32 cir_b, cir_u, cir_s, ir_calc;
+ u32 error_rate;
+
+ factor->cbs_s = acc_shaper_calc_cbs_s(ir);
+
+ for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
+ for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
+ for (cir_s = 0; cir_s <= QM_QOS_MAX_CIR_S; cir_s++) {
+ /** the formula is changed to:
+ * IR_b * (2 ^ IR_u) * DIVISOR_CLK
+ * IR(Mbps) = -------------------------
+ * 768 * (2 ^ IR_s)
+ */
+ ir_calc = acc_shaper_para_calc(cir_b, cir_u,
+ cir_s);
+ error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
+ if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
+ factor->cir_b = cir_b;
+ factor->cir_u = cir_u;
+ factor->cir_s = cir_s;
+
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
- u32 number)
+ u32 number, struct qm_shaper_factor *factor)
{
u64 tmp = 0;
@@ -820,6 +1032,15 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
tmp = QM_CQC_VFT_VALID;
}
break;
+ case SHAPER_VFT:
+ if (qm->ver >= QM_HW_V3) {
+ tmp = factor->cir_b |
+ (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
+ (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
+ (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
+ (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
+ }
+ break;
}
}
@@ -830,6 +1051,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
u32 fun_num, u32 base, u32 number)
{
+ struct qm_shaper_factor *factor = &qm->factor[fun_num];
unsigned int val;
int ret;
@@ -841,9 +1063,12 @@ static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
writel(type, qm->io_base + QM_VFT_CFG_TYPE);
+ if (type == SHAPER_VFT)
+ fun_num |= base << QM_SHAPER_VFT_OFFSET;
+
writel(fun_num, qm->io_base + QM_VFT_CFG);
- qm_vft_data_cfg(qm, type, base, number);
+ qm_vft_data_cfg(qm, type, base, number, factor);
writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
@@ -853,6 +1078,27 @@ static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
POLL_TIMEOUT);
}
+static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
+{
+ int ret, i;
+
+ qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL;
+ ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
+ return ret;
+ }
+ writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
+ for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
+ /* The base number of queue reuse for different alg type */
+ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/* The config should be conducted after qm_dev_mem_reset() */
static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
u32 number)
@@ -865,7 +1111,21 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return ret;
}
+ /* init default shaper qos val */
+ if (qm->ver >= QM_HW_V3) {
+ ret = qm_shaper_init_vft(qm, fun_num);
+ if (ret)
+ goto back_sqc_cqc;
+ }
+
return 0;
+back_sqc_cqc:
+ for (i = SQC_VFT; i <= CQC_VFT; i++) {
+ ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
+ if (ret)
+ return ret;
+ }
+ return ret;
}
static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
@@ -1570,16 +1830,9 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
if (count > QM_DBG_WRITE_LEN)
return -ENOSPC;
- cmd_buf = kzalloc(count + 1, GFP_KERNEL);
- if (!cmd_buf)
- return -ENOMEM;
-
- if (copy_from_user(cmd_buf, buffer, count)) {
- kfree(cmd_buf);
- return -EFAULT;
- }
-
- cmd_buf[count] = '\0';
+ cmd_buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
@@ -1623,13 +1876,9 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
-
qm->error_mask = ce | nfe | fe;
-
/* clear QM hw residual error source */
writel(QM_ABNORMAL_INT_SOURCE_CLR,
qm->io_base + QM_ABNORMAL_INT_SOURCE);
@@ -1639,6 +1888,14 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
+}
+
+static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+{
+ u32 irq_enable = ce | nfe | fe;
+ u32 irq_unmask = ~irq_enable;
+
+ qm_hw_error_cfg(qm, ce, nfe, fe);
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -1649,6 +1906,28 @@ static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
+static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+{
+ u32 irq_enable = ce | nfe | fe;
+ u32 irq_unmask = ~irq_enable;
+
+ qm_hw_error_cfg(qm, ce, nfe, fe);
+
+ /* enable close master ooo when hardware error happened */
+ writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
+}
+
+static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
+{
+ writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+
+ /* disable close master ooo when hardware error happened */
+ writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+}
+
static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
{
const struct hisi_qm_hw_error *err;
@@ -1715,15 +1994,371 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static u32 qm_get_hw_error_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+}
+
+static u32 qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ return qm->err_ini->get_dev_hw_err_status(qm);
+}
+
+/* Check if the error causes the master ooo block */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ u32 val, dev_val;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ val = qm_get_hw_error_status(qm);
+ dev_val = qm_get_dev_err_status(qm);
+
+ if (qm->ver < QM_HW_V3)
+ return (val & QM_ECC_MBIT) ||
+ (dev_val & qm->err_info.ecc_2bits_mask);
+
+ return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
+ (dev_val & (~qm->err_info.dev_ce_mask));
+}
+
+static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
+{
+ struct qm_mailbox mailbox;
+ int ret;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (ret)
+ goto err_unlock;
+
+ *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
+
+err_unlock:
+ mutex_unlock(&qm->mailbox_lock);
+ return ret;
+}
+
+static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
+{
+ u32 val;
+
+ if (qm->fun_type == QM_HW_PF)
+ writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
+
+ val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
+ val |= QM_IFC_INT_SOURCE_MASK;
+ writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
+}
+
+static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 cmd;
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ if (ret) {
+ dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ return;
+ }
+
+ cmd = msg & QM_MB_CMD_DATA_MASK;
+ switch (cmd) {
+ case QM_VF_PREPARE_FAIL:
+ dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
+ break;
+ case QM_VF_START_FAIL:
+ dev_err(dev, "failed to start VF(%u)!\n", vf_id);
+ break;
+ case QM_VF_PREPARE_DONE:
+ case QM_VF_START_DONE:
+ break;
+ default:
+ dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ break;
+ }
+}
+
+static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 vfs_num = qm->vfs_num;
+ int cnt = 0;
+ int ret = 0;
+ u64 val;
+ u32 i;
+
+ if (!qm->vfs_num || qm->ver < QM_HW_V3)
+ return 0;
+
+ while (true) {
+ val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
+ /* All VFs send command to PF, break */
+ if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
+ break;
+
+ if (++cnt > QM_MAX_PF_WAIT_COUNT) {
+ ret = -EBUSY;
+ break;
+ }
+
+ msleep(QM_WAIT_DST_ACK);
+ }
+
+ /* PF check VFs msg */
+ for (i = 1; i <= vfs_num; i++) {
+ if (val & BIT(i))
+ qm_handle_vf_msg(qm, i);
+ else
+ dev_err(dev, "VF(%u) not ping PF!\n", i);
+ }
+
+ /* PF clear interrupt to ack VFs */
+ qm_clear_cmd_interrupt(qm, val);
+
+ return ret;
+}
+
+static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
+{
+ u32 val;
+
+ val = readl(qm->io_base + QM_IFC_INT_CFG);
+ val &= ~QM_IFC_SEND_ALL_VFS;
+ val |= fun_num;
+ writel(val, qm->io_base + QM_IFC_INT_CFG);
+
+ val = readl(qm->io_base + QM_IFC_INT_SET_P);
+ val |= QM_IFC_INT_SET_MASK;
+ writel(val, qm->io_base + QM_IFC_INT_SET_P);
+}
+
+static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(qm->io_base + QM_IFC_INT_SET_V);
+ val |= QM_IFC_INT_SET_MASK;
+ writel(val, qm->io_base + QM_IFC_INT_SET_V);
+}
+
+static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_mailbox mailbox;
+ int cnt = 0;
+ u64 val;
+ int ret;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (ret) {
+ dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
+ goto err_unlock;
+ }
+
+ qm_trigger_vf_interrupt(qm, fun_num);
+ while (true) {
+ msleep(QM_WAIT_DST_ACK);
+ val = readq(qm->io_base + QM_IFC_READY_STATUS);
+ /* if VF respond, PF notifies VF successfully. */
+ if (!(val & BIT(fun_num)))
+ goto err_unlock;
+
+ if (++cnt > QM_MAX_PF_WAIT_COUNT) {
+ dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&qm->mailbox_lock);
+ return ret;
+}
+
+static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 vfs_num = qm->vfs_num;
+ struct qm_mailbox mailbox;
+ u64 val = 0;
+ int cnt = 0;
+ int ret;
+ u32 i;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
+ mutex_lock(&qm->mailbox_lock);
+ /* PF sends command to all VFs by mailbox */
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (ret) {
+ dev_err(dev, "failed to send command to VFs!\n");
+ mutex_unlock(&qm->mailbox_lock);
+ return ret;
+ }
+
+ qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
+ while (true) {
+ msleep(QM_WAIT_DST_ACK);
+ val = readq(qm->io_base + QM_IFC_READY_STATUS);
+ /* If all VFs acked, PF notifies VFs successfully. */
+ if (!(val & GENMASK(vfs_num, 1))) {
+ mutex_unlock(&qm->mailbox_lock);
+ return 0;
+ }
+
+ if (++cnt > QM_MAX_PF_WAIT_COUNT)
+ break;
+ }
+
+ mutex_unlock(&qm->mailbox_lock);
+
+ /* Check which vf respond timeout. */
+ for (i = 1; i <= vfs_num; i++) {
+ if (val & BIT(i))
+ dev_err(dev, "failed to get response from VF(%u)!\n", i);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+{
+ struct qm_mailbox mailbox;
+ int cnt = 0;
+ u32 val;
+ int ret;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ goto unlock;
+ }
+
+ qm_trigger_pf_interrupt(qm);
+ /* Waiting for PF response */
+ while (true) {
+ msleep(QM_WAIT_DST_ACK);
+ val = readl(qm->io_base + QM_IFC_INT_SET_V);
+ if (!(val & QM_IFC_INT_STATUS_MASK))
+ break;
+
+ if (++cnt > QM_MAX_VF_WAIT_COUNT) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+unlock:
+ mutex_unlock(&qm->mailbox_lock);
+ return ret;
+}
+
static int qm_stop_qp(struct hisi_qp *qp)
{
return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
}
+static int qm_set_msi(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ if (set) {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ 0);
+ } else {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ ACC_PEH_MSI_DISABLE);
+ if (qm->err_status.is_qm_ecc_mbit ||
+ qm->err_status.is_dev_ecc_mbit)
+ return 0;
+
+ mdelay(1);
+ if (readl(qm->io_base + QM_PEH_DFX_INFO0))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void qm_wait_msi_finish(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 cmd = ~0;
+ int cnt = 0;
+ u32 val;
+ int ret;
+
+ while (true) {
+ pci_read_config_dword(pdev, pdev->msi_cap +
+ PCI_MSI_PENDING_64, &cmd);
+ if (!cmd)
+ break;
+
+ if (++cnt > MAX_WAIT_COUNTS) {
+ pci_warn(pdev, "failed to empty MSI PENDING!\n");
+ break;
+ }
+
+ udelay(1);
+ }
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
+ val, !(val & QM_PEH_DFX_MASK),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ pci_warn(pdev, "failed to empty PEH MSI!\n");
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
+ val, !(val & QM_PEH_MSI_FINISH_MASK),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ pci_warn(pdev, "failed to finish MSI operation!\n");
+}
+
+static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret = -ETIMEDOUT;
+ u32 cmd, i;
+
+ pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
+ if (set)
+ cmd |= QM_MSI_CAP_ENABLE;
+ else
+ cmd &= ~QM_MSI_CAP_ENABLE;
+
+ pci_write_config_dword(pdev, pdev->msi_cap, cmd);
+ if (set) {
+ for (i = 0; i < MAX_WAIT_COUNTS; i++) {
+ pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
+ if (cmd & QM_MSI_CAP_ENABLE)
+ return 0;
+
+ udelay(1);
+ }
+ } else {
+ udelay(WAIT_PERIOD_US_MIN);
+ qm_wait_msi_finish(qm);
+ ret = 0;
+ }
+
+ return ret;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.get_irq_num = qm_get_irq_num_v1,
.hw_error_init = qm_hw_error_init_v1,
+ .set_msi = qm_set_msi,
};
static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
@@ -1733,16 +2368,20 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.hw_error_init = qm_hw_error_init_v2,
.hw_error_uninit = qm_hw_error_uninit_v2,
.hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi,
};
static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v2,
- .hw_error_init = qm_hw_error_init_v2,
- .hw_error_uninit = qm_hw_error_uninit_v2,
+ .get_irq_num = qm_get_irq_num_v3,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.stop_qp = qm_stop_qp,
+ .set_msi = qm_set_msi_v3,
+ .ping_all_vfs = qm_ping_all_vfs,
+ .ping_pf = qm_ping_pf,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2017,11 +2656,8 @@ static int qm_drain_qp(struct hisi_qp *qp)
int ret = 0, i = 0;
void *addr;
- /*
- * No need to judge if ECC multi-bit error occurs because the
- * master OOO will be blocked.
- */
- if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
+ /* No need to judge if master OOO is blocked. */
+ if (qm_check_dev_error(qm))
return 0;
/* Kunpeng930 supports drain qp by device */
@@ -2290,6 +2926,23 @@ static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
hisi_qm_stop_qp(q->priv);
}
+static int hisi_qm_is_q_updated(struct uacce_queue *q)
+{
+ struct hisi_qp *qp = q->priv;
+ struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
+ int updated = 0;
+
+ while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
+ /* make sure to read data from memory */
+ dma_rmb();
+ qm_cq_head_update(qp);
+ cqe = qp->cqe + qp->qp_status.cq_head;
+ updated = 1;
+ }
+
+ return updated;
+}
+
static void qm_set_sqctype(struct uacce_queue *q, u16 type)
{
struct hisi_qm *qm = q->uacce->priv;
@@ -2335,6 +2988,7 @@ static const struct uacce_ops uacce_qm_ops = {
.stop_queue = hisi_qm_uacce_stop_queue,
.mmap = hisi_qm_uacce_mmap,
.ioctl = hisi_qm_uacce_ioctl,
+ .is_q_updated = hisi_qm_is_q_updated,
};
static int qm_alloc_uacce(struct hisi_qm *qm)
@@ -2530,62 +3184,6 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
return 0;
}
-static int hisi_qm_memory_init(struct hisi_qm *qm)
-{
- struct device *dev = &qm->pdev->dev;
- size_t qp_dma_size, off = 0;
- int i, ret = 0;
-
-#define QM_INIT_BUF(qm, type, num) do { \
- (qm)->type = ((qm)->qdma.va + (off)); \
- (qm)->type##_dma = (qm)->qdma.dma + (off); \
- off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
-} while (0)
-
- idr_init(&qm->qp_idr);
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
- QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
- qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
- GFP_ATOMIC);
- dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
- if (!qm->qdma.va)
- return -ENOMEM;
-
- QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
- QM_INIT_BUF(qm, sqc, qm->qp_num);
- QM_INIT_BUF(qm, cqc, qm->qp_num);
-
- qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
- if (!qm->qp_array) {
- ret = -ENOMEM;
- goto err_alloc_qp_array;
- }
-
- /* one more page for device or qp statuses */
- qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
- qp_dma_size = PAGE_ALIGN(qp_dma_size);
- for (i = 0; i < qm->qp_num; i++) {
- ret = hisi_qp_memory_init(qm, qp_dma_size, i);
- if (ret)
- goto err_init_qp_mem;
-
- dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
- }
-
- return ret;
-
-err_init_qp_mem:
- hisi_qp_memory_uninit(qm, i);
-err_alloc_qp_array:
- dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
-
- return ret;
-}
-
static void hisi_qm_pre_init(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2604,6 +3202,34 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->misc_ctl = false;
}
+static void qm_cmd_uninit(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl(qm->io_base + QM_IFC_INT_MASK);
+ val |= QM_IFC_INT_DISABLE;
+ writel(val, qm->io_base + QM_IFC_INT_MASK);
+}
+
+static void qm_cmd_init(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ /* Clear communication interrupt source */
+ qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
+
+ /* Enable pf to vf communication reg. */
+ val = readl(qm->io_base + QM_IFC_INT_MASK);
+ val &= ~QM_IFC_INT_DISABLE;
+ writel(val, qm->io_base + QM_IFC_INT_MASK);
+}
+
static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2635,6 +3261,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
+ qm_cmd_uninit(qm);
+ kfree(qm->factor);
down_write(&qm->qps_lock);
if (!qm_avail_state(qm, QM_CLOSE)) {
@@ -2826,6 +3454,8 @@ static int __hisi_qm_start(struct hisi_qm *qm)
if (ret)
return ret;
+ qm_init_prefetch(qm);
+
writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
@@ -3034,79 +3664,6 @@ static int qm_debugfs_atomic64_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
qm_debugfs_atomic64_set, "%llu\n");
-/**
- * hisi_qm_debug_init() - Initialize qm related debugfs files.
- * @qm: The qm for which we want to add debugfs files.
- *
- * Create qm related debugfs files.
- */
-void hisi_qm_debug_init(struct hisi_qm *qm)
-{
- struct qm_dfx *dfx = &qm->debug.dfx;
- struct dentry *qm_d;
- void *data;
- int i;
-
- qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
- qm->debug.qm_d = qm_d;
-
- /* only show this in PF */
- if (qm->fun_type == QM_HW_PF) {
- qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
- for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
- qm_create_debugfs_file(qm, qm_d, i);
- }
-
- debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
-
- debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
-
- debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
- &qm_status_fops);
- for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
- data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
- debugfs_create_file(qm_dfx_files[i].name,
- 0644,
- qm_d,
- data,
- &qm_atomic64_ops);
- }
-}
-EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
-
-/**
- * hisi_qm_debug_regs_clear() - clear qm debug related registers.
- * @qm: The qm for which we want to clear its debug registers.
- */
-void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
-{
- struct qm_dfx_registers *regs;
- int i;
-
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
- /* clear current_q */
- writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
- writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- /*
- * these registers are reading and clearing, so clear them after
- * reading them.
- */
- writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
-
- regs = qm_dfx_regs;
- for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
- readl(qm->io_base + regs->reg_offset);
- regs++;
- }
-
- writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
-
static void qm_hw_error_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -3362,6 +3919,360 @@ static int qm_clear_vft_config(struct hisi_qm *qm)
return 0;
}
+static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 ir = qos * QM_QOS_RATE;
+ int ret, total_vfs, i;
+
+ total_vfs = pci_sriov_get_totalvfs(qm->pdev);
+ if (fun_index > total_vfs)
+ return -EINVAL;
+
+ qm->factor[fun_index].func_qos = qos;
+
+ ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
+ if (ret) {
+ dev_err(dev, "failed to calculate shaper parameter!\n");
+ return -EINVAL;
+ }
+
+ for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
+ /* The base number of queue reuse for different alg type */
+ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
+ if (ret) {
+ dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
+{
+ u64 cir_u = 0, cir_b = 0, cir_s = 0;
+ u64 shaper_vft, ir_calc, ir;
+ unsigned int val;
+ u32 error_rate;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+ val & BIT(0), POLL_PERIOD,
+ POLL_TIMEOUT);
+ if (ret)
+ return 0;
+
+ writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
+ writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
+ writel(fun_index, qm->io_base + QM_VFT_CFG);
+
+ writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
+ writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+ val & BIT(0), POLL_PERIOD,
+ POLL_TIMEOUT);
+ if (ret)
+ return 0;
+
+ shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
+ ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
+
+ cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
+ cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
+ cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
+
+ cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
+ cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
+
+ ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
+
+ ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
+
+ error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
+ if (error_rate > QM_QOS_MIN_ERROR_RATE) {
+ pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
+ return 0;
+ }
+
+ return ir;
+}
+
+static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
+{
+ struct device *dev = &qm->pdev->dev;
+ u64 mb_cmd;
+ u32 qos;
+ int ret;
+
+ qos = qm_get_shaper_vft_qos(qm, fun_num);
+ if (!qos) {
+ dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
+ return;
+ }
+
+ mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
+ ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ if (ret)
+ dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+}
+
+static int qm_vf_read_qos(struct hisi_qm *qm)
+{
+ int cnt = 0;
+ int ret;
+
+ /* reset mailbox qos val */
+ qm->mb_qos = 0;
+
+ /* vf ping pf to get function qos */
+ if (qm->ops->ping_pf) {
+ ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
+ if (ret) {
+ pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
+ return ret;
+ }
+ }
+
+ while (true) {
+ msleep(QM_WAIT_DST_ACK);
+ if (qm->mb_qos)
+ break;
+
+ if (++cnt > QM_MAX_VF_WAIT_COUNT) {
+ pci_err(qm->pdev, "PF ping VF timeout!\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char tbuf[QM_DBG_READ_LEN];
+ u32 qos_val, ir;
+ int ret;
+
+ /* Mailbox and reset cannot be operated at the same time */
+ if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
+ return -EAGAIN;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ir = qm_get_shaper_vft_qos(qm, 0);
+ } else {
+ ret = qm_vf_read_qos(qm);
+ if (ret)
+ goto err_get_status;
+ ir = qm->mb_qos;
+ }
+
+ qos_val = ir / QM_QOS_RATE;
+ ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
+
+ ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_get_status:
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+ return ret;
+}
+
+static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
+{
+ int buflen = strlen(buf);
+ int ret, i;
+
+ for (i = 0; i < buflen; i++) {
+ if (!isdigit(buf[i]))
+ return -EINVAL;
+ }
+
+ ret = sscanf(buf, "%ld", val);
+ if (ret != QM_QOS_VAL_NUM)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char tbuf[QM_DBG_READ_LEN];
+ int tmp1, bus, device, function;
+ char tbuf_bdf[QM_DBG_READ_LEN] = {0};
+ char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
+ unsigned int fun_index;
+ unsigned long val = 0;
+ int len, ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return -EINVAL;
+
+ /* Mailbox and reset cannot be operated at the same time */
+ if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
+ return -EAGAIN;
+ }
+
+ if (*pos != 0) {
+ ret = 0;
+ goto err_get_status;
+ }
+
+ if (count >= QM_DBG_READ_LEN) {
+ ret = -ENOSPC;
+ goto err_get_status;
+ }
+
+ len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
+ if (len < 0) {
+ ret = len;
+ goto err_get_status;
+ }
+
+ tbuf[len] = '\0';
+ ret = sscanf(tbuf, "%s %s", tbuf_bdf, val_buf);
+ if (ret != QM_QOS_PARAM_NUM) {
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
+ ret = qm_qos_value_init(val_buf, &val);
+ if (val == 0 || val > QM_QOS_MAX_VAL || ret) {
+ pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
+ ret = sscanf(tbuf_bdf, "%d:%x:%d.%d", &tmp1, &bus, &device, &function);
+ if (ret != QM_QOS_BDF_PARAM_NUM) {
+ pci_err(qm->pdev, "input pci bdf value is error!\n");
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
+ fun_index = device * 8 + function;
+
+ ret = qm_func_shaper_enable(qm, fun_index, val);
+ if (ret) {
+ pci_err(qm->pdev, "failed to enable function shaper!\n");
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
+ ret = count;
+
+err_get_status:
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+ return ret;
+}
+
+static const struct file_operations qm_algqos_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_algqos_read,
+ .write = qm_algqos_write,
+};
+
+/**
+ * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
+ * @qm: The qm for which we want to add debugfs files.
+ *
+ * Create function qos debugfs files.
+ */
+static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_PF)
+ debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
+ qm, &qm_algqos_fops);
+ else
+ debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
+ qm, &qm_algqos_fops);
+}
+
+/**
+ * hisi_qm_debug_init() - Initialize qm related debugfs files.
+ * @qm: The qm for which we want to add debugfs files.
+ *
+ * Create qm related debugfs files.
+ */
+void hisi_qm_debug_init(struct hisi_qm *qm)
+{
+ struct qm_dfx *dfx = &qm->debug.dfx;
+ struct dentry *qm_d;
+ void *data;
+ int i;
+
+ qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
+ qm->debug.qm_d = qm_d;
+
+ /* only show this in PF */
+ if (qm->fun_type == QM_HW_PF) {
+ qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
+ for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
+ qm_create_debugfs_file(qm, qm->debug.qm_d, i);
+ }
+
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+
+ debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
+
+ debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
+ &qm_status_fops);
+ for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
+ debugfs_create_file(qm_dfx_files[i].name,
+ 0644,
+ qm_d,
+ data,
+ &qm_atomic64_ops);
+ }
+
+ if (qm->ver >= QM_HW_V3)
+ hisi_qm_set_algqos_init(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
+
+/**
+ * hisi_qm_debug_regs_clear() - clear qm debug related registers.
+ * @qm: The qm for which we want to clear its debug registers.
+ */
+void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
+{
+ struct qm_dfx_registers *regs;
+ int i;
+
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear current_q */
+ writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+ writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ /*
+ * these registers are reading and clearing, so clear them after
+ * reading them.
+ */
+ writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
+
+ regs = qm_dfx_regs;
+ for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
+ readl(qm->io_base + regs->reg_offset);
+ regs++;
+ }
+
+ /* clear clear_enable */
+ writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+
/**
* hisi_qm_sriov_enable() - enable virtual functions
* @pdev: the PCIe device
@@ -3416,6 +4327,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
if (pci_vfs_assigned(pdev)) {
pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
@@ -3429,6 +4341,9 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
+ /* clear vf function shaper configure array */
+ memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+
return qm_clear_vft_config(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
@@ -3527,17 +4442,15 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
-static u32 qm_get_hw_error_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
-}
-
static int qm_check_req_recv(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
u32 val;
+ if (qm->ver >= QM_HW_V3)
+ return 0;
+
writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
(val == ACC_VENDOR_ID_VALUE),
@@ -3608,28 +4521,6 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
-static int qm_set_msi(struct hisi_qm *qm, bool set)
-{
- struct pci_dev *pdev = qm->pdev;
-
- if (set) {
- pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
- 0);
- } else {
- pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
- ACC_PEH_MSI_DISABLE);
- if (qm->err_status.is_qm_ecc_mbit ||
- qm->err_status.is_dev_ecc_mbit)
- return 0;
-
- mdelay(1);
- if (readl(qm->io_base + QM_PEH_DFX_INFO0))
- return -EFAULT;
- }
-
- return 0;
-}
-
static int qm_vf_reset_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
@@ -3660,14 +4551,35 @@ stop_fail:
return ret;
}
-static int qm_reset_prepare_ready(struct hisi_qm *qm)
+static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+ enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int ret;
+
+ if (!qm->vfs_num)
+ return 0;
+
+ /* Kunpeng930 supports to notify VFs to stop before PF reset */
+ if (qm->ops->ping_all_vfs) {
+ ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (ret)
+ pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ } else {
+ ret = qm_vf_reset_prepare(qm, stop_reason);
+ if (ret)
+ pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
+ }
+
+ return ret;
+}
+
+static int qm_wait_reset_finish(struct hisi_qm *qm)
+{
int delay = 0;
/* All reset requests need to be queued for processing */
- while (test_and_set_bit(QM_RESETTING, &pf_qm->misc_ctl)) {
+ while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return -EBUSY;
@@ -3676,6 +4588,32 @@ static int qm_reset_prepare_ready(struct hisi_qm *qm)
return 0;
}
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ /*
+ * PF and VF on host doesnot support resetting at the
+ * same time on Kunpeng920.
+ */
+ if (qm->ver < QM_HW_V3)
+ return qm_wait_reset_finish(pf_qm);
+
+ return qm_wait_reset_finish(qm);
+}
+
+static void qm_reset_bit_clear(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (qm->ver < QM_HW_V3)
+ clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
+
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+}
+
static int qm_controller_reset_prepare(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -3687,22 +4625,25 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return ret;
}
- if (qm->vfs_num) {
- ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
- if (ret) {
- pci_err(pdev, "Fails to stop VFs!\n");
- clear_bit(QM_RESETTING, &qm->misc_ctl);
- return ret;
- }
- }
+ /* PF obtains the information of VF by querying the register. */
+ qm_cmd_uninit(qm);
+
+ /* Whether VFs stop successfully, soft reset will continue. */
+ ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
+ if (ret)
+ pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop QM!\n");
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
return ret;
}
+ ret = qm_wait_vf_prepare_finish(qm);
+ if (ret)
+ pci_err(pdev, "failed to stop by vfs in soft reset!\n");
+
clear_bit(QM_RST_SCHED, &qm->misc_ctl);
return 0;
@@ -3712,6 +4653,10 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
{
u32 nfe_enb = 0;
+ /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
+ if (qm->ver >= QM_HW_V3)
+ return;
+
if (!qm->err_status.is_dev_ecc_mbit &&
qm->err_status.is_qm_ecc_mbit &&
qm->err_ini->close_axi_master_ooo) {
@@ -3748,7 +4693,7 @@ static int qm_soft_reset(struct hisi_qm *qm)
}
}
- ret = qm_set_msi(qm, false);
+ ret = qm->ops->set_msi(qm, false);
if (ret) {
pci_err(pdev, "Fails to disable PEH MSI bit.\n");
return ret;
@@ -3770,6 +4715,9 @@ static int qm_soft_reset(struct hisi_qm *qm)
return ret;
}
+ if (qm->err_ini->close_sva_prefetch)
+ qm->err_ini->close_sva_prefetch(qm);
+
ret = qm_set_pf_mse(qm, false);
if (ret) {
pci_err(pdev, "Fails to disable pf MSE bit.\n");
@@ -3830,9 +4778,32 @@ restart_fail:
return ret;
}
-static u32 qm_get_dev_err_status(struct hisi_qm *qm)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
{
- return qm->err_ini->get_dev_hw_err_status(qm);
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ if (!qm->vfs_num)
+ return 0;
+
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Kunpeng930 supports to notify VFs to start after PF reset. */
+ if (qm->ops->ping_all_vfs) {
+ ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (ret)
+ pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
+ } else {
+ ret = qm_vf_reset_done(qm);
+ if (ret)
+ pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
+ }
+
+ return ret;
}
static int qm_dev_hw_init(struct hisi_qm *qm)
@@ -3844,6 +4815,12 @@ static void qm_restart_prepare(struct hisi_qm *qm)
{
u32 value;
+ if (qm->err_ini->open_sva_prefetch)
+ qm->err_ini->open_sva_prefetch(qm);
+
+ if (qm->ver >= QM_HW_V3)
+ return;
+
if (!qm->err_status.is_qm_ecc_mbit &&
!qm->err_status.is_dev_ecc_mbit)
return;
@@ -3863,15 +4840,15 @@ static void qm_restart_prepare(struct hisi_qm *qm)
/* clear AM Reorder Buffer ecc mbit source */
writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
-
- if (qm->err_ini->open_axi_master_ooo)
- qm->err_ini->open_axi_master_ooo(qm);
}
static void qm_restart_done(struct hisi_qm *qm)
{
u32 value;
+ if (qm->ver >= QM_HW_V3)
+ goto clear_flags;
+
if (!qm->err_status.is_qm_ecc_mbit &&
!qm->err_status.is_dev_ecc_mbit)
return;
@@ -3881,6 +4858,7 @@ static void qm_restart_done(struct hisi_qm *qm)
value |= qm->err_info.msi_wr_port;
writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+clear_flags:
qm->err_status.is_qm_ecc_mbit = false;
qm->err_status.is_dev_ecc_mbit = false;
}
@@ -3890,7 +4868,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
- ret = qm_set_msi(qm, true);
+ ret = qm->ops->set_msi(qm, true);
if (ret) {
pci_err(pdev, "Fails to enable PEH MSI bit!\n");
return ret;
@@ -3917,6 +4895,9 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
}
qm_restart_prepare(qm);
+ hisi_qm_dev_err_init(qm);
+ if (qm->err_ini->open_axi_master_ooo)
+ qm->err_ini->open_axi_master_ooo(qm);
ret = qm_restart(qm);
if (ret) {
@@ -3924,24 +4905,18 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
return ret;
}
- if (qm->vfs_num) {
- ret = qm_vf_q_assign(qm, qm->vfs_num);
- if (ret) {
- pci_err(pdev, "Failed to assign queue!\n");
- return ret;
- }
- }
+ ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
+ if (ret)
+ pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
- ret = qm_vf_reset_done(qm);
- if (ret) {
- pci_err(pdev, "Failed to start VFs!\n");
- return -EPERM;
- }
+ ret = qm_wait_vf_prepare_finish(qm);
+ if (ret)
+ pci_err(pdev, "failed to start by vfs in soft reset!\n");
- hisi_qm_dev_err_init(qm);
+ qm_cmd_init(qm);
qm_restart_done(qm);
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
return 0;
}
@@ -3962,13 +4937,13 @@ static int qm_controller_reset(struct hisi_qm *qm)
ret = qm_soft_reset(qm);
if (ret) {
pci_err(pdev, "Controller reset failed (%d)\n", ret);
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
return ret;
}
ret = qm_controller_reset_done(qm);
if (ret) {
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
return ret;
}
@@ -4005,21 +4980,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
-/* check the interrupt is ecc-mbit error or not */
-static int qm_check_dev_error(struct hisi_qm *qm)
-{
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
- if (ret)
- return ret;
-
- return (qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask);
-}
-
void hisi_qm_reset_prepare(struct pci_dev *pdev)
{
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
@@ -4045,14 +5005,13 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
return;
}
- if (qm->vfs_num) {
- ret = qm_vf_reset_prepare(qm, QM_FLR);
- if (ret) {
- pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
- ret);
- return;
- }
- }
+ /* PF obtains the information of VF by querying the register. */
+ if (qm->fun_type == QM_HW_PF)
+ qm_cmd_uninit(qm);
+
+ ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
+ if (ret)
+ pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
ret = hisi_qm_stop(qm, QM_FLR);
if (ret) {
@@ -4060,6 +5019,10 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
return;
}
+ ret = qm_wait_vf_prepare_finish(qm);
+ if (ret)
+ pci_err(pdev, "failed to stop by vfs in FLR!\n");
+
pci_info(pdev, "FLR resetting...\n");
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
@@ -4085,42 +5048,38 @@ void hisi_qm_reset_done(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
- hisi_qm_dev_err_init(pf_qm);
-
- ret = qm_restart(qm);
- if (ret) {
- pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
- goto flr_done;
- }
-
if (qm->fun_type == QM_HW_PF) {
ret = qm_dev_hw_init(qm);
if (ret) {
pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
goto flr_done;
}
+ }
- if (!qm->vfs_num)
- goto flr_done;
-
- ret = qm_vf_q_assign(qm, qm->vfs_num);
- if (ret) {
- pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
- goto flr_done;
- }
+ hisi_qm_dev_err_init(pf_qm);
- ret = qm_vf_reset_done(qm);
- if (ret) {
- pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
- goto flr_done;
- }
+ ret = qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
+ goto flr_done;
}
+ ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
+ if (ret)
+ pci_err(pdev, "failed to start vfs by pf in FLR.\n");
+
+ ret = qm_wait_vf_prepare_finish(qm);
+ if (ret)
+ pci_err(pdev, "failed to start by vfs in FLR!\n");
+
flr_done:
+ if (qm->fun_type == QM_HW_PF)
+ qm_cmd_init(qm);
+
if (qm_flr_reset_complete(pdev))
pci_info(pdev, "FLR reset complete\n");
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
@@ -4149,7 +5108,7 @@ static int qm_irq_register(struct hisi_qm *qm)
if (ret)
return ret;
- if (qm->ver != QM_HW_V1) {
+ if (qm->ver > QM_HW_V1) {
ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
qm_aeq_irq, 0, qm->dev_name, qm);
if (ret)
@@ -4164,8 +5123,18 @@ static int qm_irq_register(struct hisi_qm *qm)
}
}
+ if (qm->ver > QM_HW_V2) {
+ ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
+ qm_mb_cmd_irq, 0, qm->dev_name, qm);
+ if (ret)
+ goto err_mb_cmd_irq;
+ }
+
return 0;
+err_mb_cmd_irq:
+ if (qm->fun_type == QM_HW_PF)
+ free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
err_abonormal_irq:
free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
err_aeq_irq:
@@ -4202,6 +5171,183 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
}
+static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
+ enum qm_stop_reason stop_reason)
+{
+ enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ dev_err(&pdev->dev, "reset prepare not ready!\n");
+ atomic_set(&qm->status.flags, QM_STOP);
+ cmd = QM_VF_PREPARE_FAIL;
+ goto err_prepare;
+ }
+
+ ret = hisi_qm_stop(qm, stop_reason);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
+ atomic_set(&qm->status.flags, QM_STOP);
+ cmd = QM_VF_PREPARE_FAIL;
+ goto err_prepare;
+ }
+
+err_prepare:
+ pci_save_state(pdev);
+ ret = qm->ops->ping_pf(qm, cmd);
+ if (ret)
+ dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
+}
+
+static void qm_pf_reset_vf_done(struct hisi_qm *qm)
+{
+ enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ pci_restore_state(pdev);
+ ret = hisi_qm_start(qm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
+ cmd = QM_VF_START_FAIL;
+ }
+
+ ret = qm->ops->ping_pf(qm, cmd);
+ if (ret)
+ dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
+
+ qm_reset_bit_clear(qm);
+}
+
+static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 val, cmd;
+ u64 msg;
+ int ret;
+
+ /* Wait for reset to finish */
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
+ val == BIT(0), QM_VF_RESET_WAIT_US,
+ QM_VF_RESET_WAIT_TIMEOUT_US);
+ /* hardware completion status should be available by this time */
+ if (ret) {
+ dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Whether message is got successfully,
+ * VF needs to ack PF by clearing the interrupt.
+ */
+ ret = qm_get_mb_cmd(qm, &msg, 0);
+ qm_clear_cmd_interrupt(qm, 0);
+ if (ret) {
+ dev_err(dev, "failed to get msg from PF in reset done!\n");
+ return ret;
+ }
+
+ cmd = msg & QM_MB_CMD_DATA_MASK;
+ if (cmd != QM_PF_RESET_DONE) {
+ dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void qm_pf_reset_vf_process(struct hisi_qm *qm,
+ enum qm_stop_reason stop_reason)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ dev_info(dev, "device reset start...\n");
+
+ /* The message is obtained by querying the register during resetting */
+ qm_cmd_uninit(qm);
+ qm_pf_reset_vf_prepare(qm, stop_reason);
+
+ ret = qm_wait_pf_reset_finish(qm);
+ if (ret)
+ goto err_get_status;
+
+ qm_pf_reset_vf_done(qm);
+ qm_cmd_init(qm);
+
+ dev_info(dev, "device reset done.\n");
+
+ return;
+
+err_get_status:
+ qm_cmd_init(qm);
+ qm_reset_bit_clear(qm);
+}
+
+static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
+{
+ struct device *dev = &qm->pdev->dev;
+ u64 msg;
+ u32 cmd;
+ int ret;
+
+ /*
+ * Get the msg from source by sending mailbox. Whether message is got
+ * successfully, destination needs to ack source by clearing the interrupt.
+ */
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ qm_clear_cmd_interrupt(qm, BIT(fun_num));
+ if (ret) {
+ dev_err(dev, "failed to get msg from source!\n");
+ return;
+ }
+
+ cmd = msg & QM_MB_CMD_DATA_MASK;
+ switch (cmd) {
+ case QM_PF_FLR_PREPARE:
+ qm_pf_reset_vf_process(qm, QM_FLR);
+ break;
+ case QM_PF_SRST_PREPARE:
+ qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
+ break;
+ case QM_VF_GET_QOS:
+ qm_vf_get_qos(qm, fun_num);
+ break;
+ case QM_PF_SET_QOS:
+ qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ break;
+ default:
+ dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ break;
+ }
+}
+
+static void qm_cmd_process(struct work_struct *cmd_process)
+{
+ struct hisi_qm *qm = container_of(cmd_process,
+ struct hisi_qm, cmd_process);
+ u32 vfs_num = qm->vfs_num;
+ u64 val;
+ u32 i;
+
+ if (qm->fun_type == QM_HW_PF) {
+ val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
+ if (!val)
+ return;
+
+ for (i = 1; i <= vfs_num; i++) {
+ if (val & BIT(i))
+ qm_handle_cmd_msg(qm, i);
+ }
+
+ return;
+ }
+
+ qm_handle_cmd_msg(qm, 0);
+}
+
/**
* hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
* @qm: The qm needs add.
@@ -4212,11 +5358,9 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
*/
int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
+ struct device *dev = &qm->pdev->dev;
int flag = 0;
int ret = 0;
- /* HW V2 not support both use uacce sva mode and hardware crypto algs */
- if (qm->ver <= QM_HW_V2 && qm->use_sva)
- return 0;
mutex_lock(&qm_list->lock);
if (list_empty(&qm_list->list))
@@ -4224,6 +5368,11 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
list_add_tail(&qm->list, &qm_list->list);
mutex_unlock(&qm_list->lock);
+ if (qm->ver <= QM_HW_V2 && qm->use_sva) {
+ dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
+ return 0;
+ }
+
if (flag) {
ret = qm_list->register_to_crypto(qm);
if (ret) {
@@ -4248,13 +5397,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
*/
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
- if (qm->ver <= QM_HW_V2 && qm->use_sva)
- return;
-
mutex_lock(&qm_list->lock);
list_del(&qm->list);
mutex_unlock(&qm_list->lock);
+ if (qm->ver <= QM_HW_V2 && qm->use_sva)
+ return;
+
if (list_empty(&qm_list->list))
qm_list->unregister_from_crypto(qm);
}
@@ -4389,6 +5538,94 @@ err_disable_pcidev:
return ret;
}
+static void hisi_qm_init_work(struct hisi_qm *qm)
+{
+ INIT_WORK(&qm->work, qm_work_process);
+ if (qm->fun_type == QM_HW_PF)
+ INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
+
+ if (qm->ver > QM_HW_V2)
+ INIT_WORK(&qm->cmd_process, qm_cmd_process);
+}
+
+static int hisi_qp_alloc_memory(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ size_t qp_dma_size;
+ int i, ret;
+
+ qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
+ if (!qm->qp_array)
+ return -ENOMEM;
+
+ /* one more page for device or qp statuses */
+ qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
+ sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
+ for (i = 0; i < qm->qp_num; i++) {
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ if (ret)
+ goto err_init_qp_mem;
+
+ dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
+ }
+
+ return 0;
+err_init_qp_mem:
+ hisi_qp_memory_uninit(qm, i);
+
+ return ret;
+}
+
+static int hisi_qm_memory_init(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret, total_vfs;
+ size_t off = 0;
+
+ total_vfs = pci_sriov_get_totalvfs(qm->pdev);
+ qm->factor = kcalloc(total_vfs + 1, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+ if (!qm->factor)
+ return -ENOMEM;
+
+#define QM_INIT_BUF(qm, type, num) do { \
+ (qm)->type = ((qm)->qdma.va + (off)); \
+ (qm)->type##_dma = (qm)->qdma.dma + (off); \
+ off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
+} while (0)
+
+ idr_init(&qm->qp_idr);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
+ QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
+ qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
+ GFP_ATOMIC);
+ dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
+ if (!qm->qdma.va) {
+ ret = -ENOMEM;
+ goto err_alloc_qdma;
+ }
+
+ QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
+ QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, sqc, qm->qp_num);
+ QM_INIT_BUF(qm, cqc, qm->qp_num);
+
+ ret = hisi_qp_alloc_memory(qm);
+ if (ret)
+ goto err_alloc_qp_array;
+
+ return 0;
+
+err_alloc_qp_array:
+ dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
+err_alloc_qdma:
+ kfree(qm->factor);
+
+ return ret;
+}
+
/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
@@ -4426,10 +5663,8 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
goto err_alloc_uacce;
- INIT_WORK(&qm->work, qm_work_process);
- if (qm->fun_type == QM_HW_PF)
- INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
-
+ hisi_qm_init_work(qm);
+ qm_cmd_init(qm);
atomic_set(&qm->status.flags, QM_INIT);
return 0;
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index acefdf8b3a50..035eaf8c442d 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -76,6 +76,9 @@
#define QM_Q_DEPTH 1024
#define QM_MIN_QNUM 2
#define HISI_ACC_SGL_SGE_NR_MAX 255
+#define QM_SHAPER_CFG 0x100164
+#define QM_SHAPER_ENABLE BIT(30)
+#define QM_SHAPER_TYPE1_OFFSET 10
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
@@ -148,6 +151,14 @@ struct qm_debug {
struct debugfs_file files[DEBUG_FILE_NUM];
};
+struct qm_shaper_factor {
+ u32 func_qos;
+ u64 cir_b;
+ u64 cir_u;
+ u64 cir_s;
+ u64 cbs_s;
+};
+
struct qm_dma {
void *va;
dma_addr_t dma;
@@ -188,6 +199,8 @@ struct hisi_qm_err_ini {
void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
void (*open_axi_master_ooo)(struct hisi_qm *qm);
void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ void (*open_sva_prefetch)(struct hisi_qm *qm);
+ void (*close_sva_prefetch)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
void (*err_info_init)(struct hisi_qm *qm);
};
@@ -248,6 +261,7 @@ struct hisi_qm {
struct workqueue_struct *wq;
struct work_struct work;
struct work_struct rst_work;
+ struct work_struct cmd_process;
const char *algs;
bool use_sva;
@@ -259,6 +273,9 @@ struct hisi_qm {
resource_size_t db_phys_base;
struct uacce_device *uacce;
int mode;
+ struct qm_shaper_factor *factor;
+ u32 mb_qos;
+ u32 type_rate;
};
struct hisi_qp_status {
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index dfdce2f21e65..018415b9840a 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -13,14 +13,14 @@ struct sec_alg_res {
dma_addr_t pbuf_dma;
u8 *c_ivin;
dma_addr_t c_ivin_dma;
+ u8 *a_ivin;
+ dma_addr_t a_ivin_dma;
u8 *out_mac;
dma_addr_t out_mac_dma;
};
/* Cipher request of SEC private */
struct sec_cipher_req {
- struct hisi_acc_hw_sgl *c_in;
- dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
u8 *c_ivin;
@@ -33,15 +33,25 @@ struct sec_cipher_req {
struct sec_aead_req {
u8 *out_mac;
dma_addr_t out_mac_dma;
+ u8 *a_ivin;
+ dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
};
/* SEC request of Crypto */
struct sec_req {
- struct sec_sqe sec_sqe;
+ union {
+ struct sec_sqe sec_sqe;
+ struct sec_sqe3 sec_sqe3;
+ };
struct sec_ctx *ctx;
struct sec_qp_ctx *qp_ctx;
+ /**
+ * Common parameter of the SEC request.
+ */
+ struct hisi_acc_hw_sgl *in;
+ dma_addr_t in_dma;
struct sec_cipher_req c_req;
struct sec_aead_req aead_req;
struct list_head backlog_head;
@@ -81,7 +91,9 @@ struct sec_auth_ctx {
u8 a_key_len;
u8 mac_len;
u8 a_alg;
+ bool fallback;
struct crypto_shash *hash_tfm;
+ struct crypto_aead *fallback_aead_tfm;
};
/* SEC cipher context which cipher's relatives */
@@ -94,6 +106,10 @@ struct sec_cipher_ctx {
u8 c_mode;
u8 c_alg;
u8 c_key_len;
+
+ /* add software support */
+ bool fallback;
+ struct crypto_sync_skcipher *fbtfm;
};
/* SEC queue context which defines queue's relatives */
@@ -137,6 +153,7 @@ struct sec_ctx {
bool pbuf_supported;
struct sec_cipher_ctx c_ctx;
struct sec_auth_ctx a_ctx;
+ u8 type_supported;
struct device *dev;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 133aede8bf07..6a45bd23b363 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/aes.h>
+#include <crypto/aead.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/des.h>
@@ -21,6 +22,7 @@
#define SEC_PRIORITY 4001
#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
@@ -37,10 +39,23 @@
#define SEC_AEAD_ALG_OFFSET 11
#define SEC_AUTH_OFFSET 6
+#define SEC_DE_OFFSET_V3 9
+#define SEC_SCENE_OFFSET_V3 5
+#define SEC_CKEY_OFFSET_V3 13
+#define SEC_SRC_SGL_OFFSET_V3 11
+#define SEC_DST_SGL_OFFSET_V3 14
+#define SEC_CALG_OFFSET_V3 4
+#define SEC_AKEY_OFFSET_V3 9
+#define SEC_MAC_OFFSET_V3 4
+#define SEC_AUTH_ALG_OFFSET_V3 15
+#define SEC_CIPHER_AUTH_V3 0xbf
+#define SEC_AUTH_CIPHER_V3 0x40
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
+#define SEC_ICV_MASK 0x000E
+#define SEC_SQE_LEN_RATE_MASK 0x3
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
@@ -66,6 +81,25 @@
#define SEC_SQE_CFLAG 2
#define SEC_SQE_AEAD_FLAG 3
#define SEC_SQE_DONE 0x1
+#define SEC_ICV_ERR 0x2
+#define MIN_MAC_LEN 4
+#define MAC_LEN_MASK 0x1U
+#define MAX_INPUT_DATA_LEN 0xFFFE00
+#define BITS_MASK 0xFF
+#define BYTE_BITS 0x8
+#define SEC_XTS_NAME_SZ 0x3
+#define IV_CM_CAL_NUM 2
+#define IV_CL_MASK 0x7
+#define IV_CL_MIN 2
+#define IV_CL_MID 4
+#define IV_CL_MAX 8
+#define IV_FLAGS_OFFSET 0x6
+#define IV_CM_OFFSET 0x3
+#define IV_LAST_BYTE1 1
+#define IV_LAST_BYTE2 2
+#define IV_LAST_BYTE_MASK 0xFF
+#define IV_CTR_INIT 0x1
+#define IV_BYTE_OFFSET 0x8
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
@@ -124,22 +158,59 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
-static int sec_aead_verify(struct sec_req *req)
+static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
{
- struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->aead_req.out_mac;
- u8 *mac = mac_out + SEC_MAX_MAC_LEN;
- struct scatterlist *sgl = aead_req->src;
- size_t sz;
+ struct sec_sqe *bd = resp;
+
+ status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
+ status->flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ status->tag = le16_to_cpu(bd->type2.tag);
+ status->err_type = bd->type2.error_type;
+
+ return bd->type_cipher_auth & SEC_TYPE_MASK;
+}
+
+static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
+{
+ struct sec_sqe3 *bd3 = resp;
+
+ status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
+ status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
+ status->flag = (le16_to_cpu(bd3->done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ status->tag = le64_to_cpu(bd3->tag);
+ status->err_type = bd3->error_type;
+
+ return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
+}
+
+static int sec_cb_status_check(struct sec_req *req,
+ struct bd_status *status)
+{
+ struct sec_ctx *ctx = req->ctx;
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
- aead_req->cryptlen + aead_req->assoclen -
- authsize);
- if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
- dev_err(req->ctx->dev, "aead verify failure!\n");
- return -EBADMSG;
+ if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
+ dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
+ req->err_type, status->done);
+ return -EIO;
+ }
+
+ if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
+ if (unlikely(status->flag != SEC_SQE_CFLAG)) {
+ dev_err_ratelimited(ctx->dev, "flag[%u]\n",
+ status->flag);
+ return -EIO;
+ }
+ } else if (unlikely(ctx->alg_type == SEC_AEAD)) {
+ if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
+ status->icv == SEC_ICV_ERR)) {
+ dev_err_ratelimited(ctx->dev,
+ "flag[%u], icv[%u]\n",
+ status->flag, status->icv);
+ return -EBADMSG;
+ }
}
return 0;
@@ -149,43 +220,38 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
- struct sec_sqe *bd = resp;
+ u8 type_supported = qp_ctx->ctx->type_supported;
+ struct bd_status status;
struct sec_ctx *ctx;
struct sec_req *req;
- u16 done, flag;
- int err = 0;
+ int err;
u8 type;
- type = bd->type_cipher_auth & SEC_TYPE_MASK;
- if (unlikely(type != SEC_BD_TYPE2)) {
+ if (type_supported == SEC_BD_TYPE2) {
+ type = pre_parse_finished_bd(&status, resp);
+ req = qp_ctx->req_list[status.tag];
+ } else {
+ type = pre_parse_finished_bd3(&status, resp);
+ req = (void *)(uintptr_t)status.tag;
+ }
+
+ if (unlikely(type != type_supported)) {
atomic64_inc(&dfx->err_bd_cnt);
pr_err("err bd type [%d]\n", type);
return;
}
- req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
if (unlikely(!req)) {
atomic64_inc(&dfx->invalid_req_cnt);
atomic_inc(&qp->qp_status.used);
return;
}
- req->err_type = bd->type2.error_type;
+
+ req->err_type = status.err_type;
ctx = req->ctx;
- done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
- flag = (le16_to_cpu(bd->type2.done_flag) &
- SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
- if (unlikely(req->err_type || done != SEC_SQE_DONE ||
- (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
- (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
- dev_err_ratelimited(ctx->dev,
- "err_type[%d],done[%d],flag[%d]\n",
- req->err_type, done, flag);
- err = -EIO;
+ err = sec_cb_status_check(req, &status);
+ if (err)
atomic64_inc(&dfx->done_flag_cnt);
- }
-
- if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
- err = sec_aead_verify(req);
atomic64_inc(&dfx->recv_cnt);
@@ -253,6 +319,30 @@ static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
res->c_ivin, res->c_ivin_dma);
}
+static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->a_ivin_dma, GFP_KERNEL);
+ if (!res->a_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
+ res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
+}
+
+static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->a_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->a_ivin, res->a_ivin_dma);
+}
+
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
{
int i;
@@ -335,9 +425,13 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
return ret;
if (ctx->alg_type == SEC_AEAD) {
+ ret = sec_alloc_aiv_resource(dev, res);
+ if (ret)
+ goto alloc_aiv_fail;
+
ret = sec_alloc_mac_resource(dev, res);
if (ret)
- goto alloc_fail;
+ goto alloc_mac_fail;
}
if (ctx->pbuf_supported) {
ret = sec_alloc_pbuf_resource(dev, res);
@@ -352,7 +446,10 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
alloc_pbuf_fail:
if (ctx->alg_type == SEC_AEAD)
sec_free_mac_resource(dev, qp_ctx->res);
-alloc_fail:
+alloc_mac_fail:
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_aiv_resource(dev, res);
+alloc_aiv_fail:
sec_free_civ_resource(dev, res);
return ret;
}
@@ -382,10 +479,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp = ctx->qps[qp_ctx_id];
qp->req_type = 0;
qp->qp_ctx = qp_ctx;
- qp->req_cb = sec_req_cb;
qp_ctx->qp = qp;
qp_ctx->ctx = ctx;
+ qp->req_cb = sec_req_cb;
+
mutex_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
INIT_LIST_HEAD(&qp_ctx->backlog);
@@ -536,6 +634,26 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
a_ctx->a_key, a_ctx->a_key_dma);
}
+static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
+{
+ const char *alg = crypto_tfm_alg_name(&tfm->base);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ c_ctx->fallback = false;
+ if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
+ return 0;
+
+ c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(c_ctx->fbtfm)) {
+ pr_err("failed to alloc fallback tfm!\n");
+ return PTR_ERR(c_ctx->fbtfm);
+ }
+
+ return 0;
+}
+
static int sec_skcipher_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -557,8 +675,14 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
if (ret)
goto err_cipher_init;
+ ret = sec_skcipher_fbtfm_init(tfm);
+ if (ret)
+ goto err_fbtfm_init;
+
return 0;
+err_fbtfm_init:
+ sec_cipher_uninit(ctx);
err_cipher_init:
sec_ctx_base_uninit(ctx);
return ret;
@@ -568,6 +692,9 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ if (ctx->c_ctx.fbtfm)
+ crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
+
sec_cipher_uninit(ctx);
sec_ctx_base_uninit(ctx);
}
@@ -607,6 +734,9 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
case SEC_XTS_MIN_KEY_SIZE:
c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
+ case SEC_XTS_MID_KEY_SIZE:
+ c_ctx->fallback = true;
+ break;
case SEC_XTS_MAX_KEY_SIZE:
c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
@@ -615,19 +745,25 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
return -EINVAL;
}
} else {
- switch (keylen) {
- case AES_KEYSIZE_128:
- c_ctx->c_key_len = SEC_CKEY_128BIT;
- break;
- case AES_KEYSIZE_192:
- c_ctx->c_key_len = SEC_CKEY_192BIT;
- break;
- case AES_KEYSIZE_256:
- c_ctx->c_key_len = SEC_CKEY_256BIT;
- break;
- default:
- pr_err("hisi_sec2: aes key error!\n");
+ if (c_ctx->c_alg == SEC_CALG_SM4 &&
+ keylen != AES_KEYSIZE_128) {
+ pr_err("hisi_sec2: sm4 key error!\n");
return -EINVAL;
+ } else {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aes key error!\n");
+ return -EINVAL;
+ }
}
}
@@ -672,7 +808,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
-
+ if (c_ctx->fallback) {
+ ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "failed to set fallback skcipher key!\n");
+ return ret;
+ }
+ }
return 0;
}
@@ -686,22 +828,30 @@ static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
-
+GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
+GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
+GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
-
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
+GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
+GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src)
{
- struct aead_request *aead_req = req->aead_req.aead_req;
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct aead_request *aead_req = a_req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
+ struct crypto_aead *tfm;
+ size_t authsize;
+ u8 *mac_offset;
if (ctx->alg_type == SEC_AEAD)
copy_size = aead_req->cryptlen + aead_req->assoclen;
@@ -709,15 +859,20 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
copy_size = c_req->c_len;
pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf,
- copy_size);
+ qp_ctx->res[req_id].pbuf, copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
}
+ if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
+ tfm = crypto_aead_reqtfm(aead_req);
+ authsize = crypto_aead_authsize(tfm);
+ mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
+ memcpy(a_req->out_mac, mac_offset, authsize);
+ }
- c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
- c_req->c_out_dma = c_req->c_in_dma;
+ req->in_dma = qp_ctx->res[req_id].pbuf_dma;
+ c_req->c_out_dma = req->in_dma;
return 0;
}
@@ -728,7 +883,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -738,10 +892,29 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
copy_size = c_req->c_len;
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
- qp_ctx->res[req_id].pbuf,
- copy_size);
+ qp_ctx->res[req_id].pbuf, copy_size);
if (unlikely(pbuf_length != copy_size))
- dev_err(dev, "copy pbuf data to dst error!\n");
+ dev_err(ctx->dev, "copy pbuf data to dst error!\n");
+}
+
+static int sec_aead_mac_init(struct sec_aead_req *req)
+{
+ struct aead_request *aead_req = req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac_out = req->out_mac;
+ struct scatterlist *sgl = aead_req->src;
+ size_t copy_size;
+ off_t skip_size;
+
+ /* Copy input mac */
+ skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
+ authsize, skip_size);
+ if (unlikely(copy_size != authsize))
+ return -EINVAL;
+
+ return 0;
}
static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
@@ -755,37 +928,48 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
int ret;
if (req->use_pbuf) {
- ret = sec_cipher_pbuf_map(ctx, req, src);
c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
if (ctx->alg_type == SEC_AEAD) {
+ a_req->a_ivin = res->a_ivin;
+ a_req->a_ivin_dma = res->a_ivin_dma;
a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
a_req->out_mac_dma = res->pbuf_dma +
SEC_PBUF_MAC_OFFSET;
}
+ ret = sec_cipher_pbuf_map(ctx, req, src);
return ret;
}
c_req->c_ivin = res->c_ivin;
c_req->c_ivin_dma = res->c_ivin_dma;
if (ctx->alg_type == SEC_AEAD) {
+ a_req->a_ivin = res->a_ivin;
+ a_req->a_ivin_dma = res->a_ivin_dma;
a_req->out_mac = res->out_mac;
a_req->out_mac_dma = res->out_mac_dma;
}
- c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
- qp_ctx->c_in_pool,
- req->req_id,
- &c_req->c_in_dma);
-
- if (IS_ERR(c_req->c_in)) {
+ req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &req->in_dma);
+ if (IS_ERR(req->in)) {
dev_err(dev, "fail to dma map input sgl buffers!\n");
- return PTR_ERR(c_req->c_in);
+ return PTR_ERR(req->in);
+ }
+
+ if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
+ ret = sec_aead_mac_init(a_req);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to init mac data for ICV!\n");
+ return ret;
+ }
}
if (dst == src) {
- c_req->c_out = c_req->c_in;
- c_req->c_out_dma = c_req->c_in_dma;
+ c_req->c_out = req->in;
+ c_req->c_out_dma = req->in_dma;
} else {
c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
qp_ctx->c_out_pool,
@@ -794,7 +978,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
if (IS_ERR(c_req->c_out)) {
dev_err(dev, "fail to dma map output sgl buffers!\n");
- hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in);
return PTR_ERR(c_req->c_out);
}
}
@@ -812,7 +996,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
sec_cipher_pbuf_unmap(ctx, req, dst);
} else {
if (dst != src)
- hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in);
hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
}
@@ -883,6 +1067,28 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
return 0;
}
+static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ if (unlikely(a_ctx->fallback_aead_tfm))
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
+
+ return 0;
+}
+
+static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
+ struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
+ crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
+ return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
+}
+
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
@@ -891,6 +1097,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
int ret;
@@ -900,6 +1107,23 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
+ if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
+ ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
+ if (ret) {
+ dev_err(dev, "set sec aes ccm cipher key err!\n");
+ return ret;
+ }
+ memcpy(c_ctx->c_key, key, keylen);
+
+ if (unlikely(a_ctx->fallback_aead_tfm)) {
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
if (crypto_authenc_extractkeys(&keys, key, keylen))
goto bad_key;
@@ -915,6 +1139,12 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
+ if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
+ (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ dev_err(dev, "MAC or AUTH key length error!\n");
+ goto bad_key;
+ }
+
return 0;
bad_key:
@@ -936,6 +1166,14 @@ GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
+ SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
+ SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
+ SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
+ SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -998,7 +1236,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
@@ -1014,29 +1252,86 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
sec_sqe->type_cipher_auth = bd_type | cipher;
- if (req->use_pbuf)
+ /* Set destination and source address type */
+ if (req->use_pbuf) {
sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
- else
+ da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
+ } else {
sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ }
+
+ sec_sqe->sdm_addr_type |= da_type;
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
- if (c_req->c_in_dma != c_req->c_out_dma)
+ if (req->in_dma != c_req->c_out_dma)
de = 0x1 << SEC_DE_OFFSET;
sec_sqe->sds_sa_type = (de | scene | sa_type);
- /* Just set DST address type */
- if (req->use_pbuf)
- da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
- else
- da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
- sec_sqe->sdm_addr_type |= da_type;
-
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
+static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ u32 bd_param = 0;
+ u16 cipher;
+
+ memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
+
+ sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
+ c_ctx->c_mode;
+ sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET_V3);
+
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC;
+ else
+ cipher = SEC_CIPHER_DEC;
+ sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
+
+ if (req->use_pbuf) {
+ bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
+ bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
+ } else {
+ bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
+ bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
+ }
+
+ bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
+ if (req->in_dma != c_req->c_out_dma)
+ bd_param |= 0x1 << SEC_DE_OFFSET_V3;
+
+ bd_param |= SEC_BD_TYPE3;
+ sec_sqe3->bd_param = cpu_to_le32(bd_param);
+
+ sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
+ sec_sqe3->tag = cpu_to_le64(req);
+
+ return 0;
+}
+
+/* increment counter (128-bit int) */
+static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
+{
+ do {
+ --bits;
+ nums += counter[bits];
+ counter[bits] = nums & BITS_MASK;
+ nums >>= BYTE_BITS;
+ } while (bits && nums);
+}
+
static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
struct aead_request *aead_req = req->aead_req.aead_req;
@@ -1060,10 +1355,17 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
cryptlen = aead_req->cryptlen;
}
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
- cryptlen - iv_size);
- if (unlikely(sz != iv_size))
- dev_err(req->ctx->dev, "copy output iv error!\n");
+ if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
+ dev_err(req->ctx->dev, "copy output iv error!\n");
+ } else {
+ sz = cryptlen / iv_size;
+ if (cryptlen % iv_size)
+ sz += 1;
+ ctr_iv_inc(iv, iv_size, sz);
+ }
}
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
@@ -1094,8 +1396,9 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
sec_free_req_id(req);
- /* IV output at encrypto of CBC mode */
- if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ /* IV output at encrypto of CBC/CTR mode */
+ if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
+ ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
while (1) {
@@ -1112,12 +1415,125 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
sk_req->base.complete(&sk_req->base, err);
}
-static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_aead_req *a_req = &req->aead_req;
+ size_t authsize = ctx->a_ctx.mac_len;
+ u32 data_size = aead_req->cryptlen;
+ u8 flage = 0;
+ u8 cm, cl;
+
+ /* the specification has been checked in aead_iv_demension_check() */
+ cl = c_req->c_ivin[0] + 1;
+ c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
+ memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
+ c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
+
+ /* the last 3bit is L' */
+ flage |= c_req->c_ivin[0] & IV_CL_MASK;
+
+ /* the M' is bit3~bit5, the Flags is bit6 */
+ cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
+ flage |= cm << IV_CM_OFFSET;
+ if (aead_req->assoclen)
+ flage |= 0x01 << IV_FLAGS_OFFSET;
+
+ memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
+ a_req->a_ivin[0] = flage;
+
+ /*
+ * the last 32bit is counter's initial number,
+ * but the nonce uses the first 16bit
+ * the tail 16bit fill with the cipher length
+ */
+ if (!c_req->encrypt)
+ data_size = aead_req->cryptlen - authsize;
+
+ a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
+ data_size & IV_LAST_BYTE_MASK;
+ data_size >>= IV_BYTE_OFFSET;
+ a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
+ data_size & IV_LAST_BYTE_MASK;
+}
+
+static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_aead_req *a_req = &req->aead_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
+ /*
+ * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
+ * the counter must set to 0x01
+ */
+ ctx->a_ctx.mac_len = authsize;
+ /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
+ set_aead_auth_iv(ctx, req);
+ }
+
+ /* GCM 12Byte Cipher_IV == Auth_IV */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ ctx->a_ctx.mac_len = authsize;
+ memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
+ }
+}
+
+static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe *sec_sqe)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+
+ /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+ sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
+ sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
+ sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
+
+ if (dir)
+ sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
+ else
+ sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
+
+ sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
+ sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
+ sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
+}
+
+static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe3 *sqe3)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+ sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+
+ /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+ sqe3->a_key_addr = sqe3->c_key_addr;
+ sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
+ sqe3->auth_mac_key |= SEC_NO_AUTH;
+
+ if (dir)
+ sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
+ else
+ sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
+
+ sqe3->a_len_key = cpu_to_le32(aq->assoclen);
+ sqe3->auth_src_offset = cpu_to_le16(0x0);
+ sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+ sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
}
static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
@@ -1139,13 +1555,13 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
- sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
-
- if (dir)
+ if (dir) {
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
- else
+ } else {
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
-
+ }
sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
@@ -1165,7 +1581,68 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
return ret;
}
- sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
+ ctx->c_ctx.c_mode == SEC_CMODE_GCM)
+ sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+ else
+ sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+
+ return 0;
+}
+
+static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe3 *sqe3)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
+
+ sqe3->auth_mac_key |=
+ cpu_to_le32((u32)(ctx->mac_len /
+ SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
+
+ sqe3->auth_mac_key |=
+ cpu_to_le32((u32)(ctx->a_key_len /
+ SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
+
+ sqe3->auth_mac_key |=
+ cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
+
+ if (dir) {
+ sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
+ sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
+ } else {
+ sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
+ sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
+ }
+ sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
+
+ sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
+}
+
+static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
+ int ret;
+
+ ret = sec_skcipher_bd_fill_v3(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
+ return ret;
+ }
+
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
+ ctx->c_ctx.c_mode == SEC_CMODE_GCM)
+ sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
+ req, sec_sqe3);
+ else
+ sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
+ req, sec_sqe3);
return 0;
}
@@ -1254,7 +1731,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
goto err_uninit_req;
/* Output IV as decrypto */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
+ ctx->c_ctx.c_mode == SEC_CMODE_CTR))
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
@@ -1296,20 +1774,51 @@ static const struct sec_req_op sec_skcipher_req_ops = {
static const struct sec_req_op sec_aead_req_ops = {
.buf_map = sec_aead_sgl_map,
.buf_unmap = sec_aead_sgl_unmap,
- .do_transfer = sec_aead_copy_iv,
+ .do_transfer = sec_aead_set_iv,
.bd_fill = sec_aead_bd_fill,
.bd_send = sec_bd_send,
.callback = sec_aead_callback,
.process = sec_process,
};
+static const struct sec_req_op sec_skcipher_req_ops_v3 = {
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill_v3,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static const struct sec_req_op sec_aead_req_ops_v3 = {
+ .buf_map = sec_aead_sgl_map,
+ .buf_unmap = sec_aead_sgl_unmap,
+ .do_transfer = sec_aead_set_iv,
+ .bd_fill = sec_aead_bd_fill_v3,
+ .bd_send = sec_bd_send,
+ .callback = sec_aead_callback,
+ .process = sec_process,
+};
+
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- ctx->req_op = &sec_skcipher_req_ops;
+ ret = sec_skcipher_init(tfm);
+ if (ret)
+ return ret;
- return sec_skcipher_init(tfm);
+ if (ctx->sec->qm.ver < QM_HW_V3) {
+ ctx->type_supported = SEC_BD_TYPE2;
+ ctx->req_op = &sec_skcipher_req_ops;
+ } else {
+ ctx->type_supported = SEC_BD_TYPE3;
+ ctx->req_op = &sec_skcipher_req_ops_v3;
+ }
+
+ return ret;
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
@@ -1325,15 +1834,22 @@ static int sec_aead_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
- if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(ctx->dev, "get error aead iv size!\n");
+ if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
+ ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ pr_err("get error aead iv size!\n");
return -EINVAL;
}
- ctx->req_op = &sec_aead_req_ops;
ret = sec_ctx_base_init(ctx);
if (ret)
return ret;
+ if (ctx->sec->qm.ver < QM_HW_V3) {
+ ctx->type_supported = SEC_BD_TYPE2;
+ ctx->req_op = &sec_aead_req_ops;
+ } else {
+ ctx->type_supported = SEC_BD_TYPE3;
+ ctx->req_op = &sec_aead_req_ops_v3;
+ }
ret = sec_auth_init(ctx);
if (ret)
@@ -1391,6 +1907,41 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
sec_aead_exit(tfm);
}
+static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
+ int ret;
+
+ ret = sec_aead_init(tfm);
+ if (ret) {
+ dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
+ return ret;
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
+ }
+ a_ctx->fallback = false;
+
+ return 0;
+}
+
+static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
+ sec_aead_exit(tfm);
+}
+
static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
{
return sec_aead_ctx_init(tfm, "sha1");
@@ -1429,6 +1980,14 @@ static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
ret = -EINVAL;
}
break;
+ case SEC_CMODE_CFB:
+ case SEC_CMODE_OFB:
+ case SEC_CMODE_CTR:
+ if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
+ dev_err(dev, "skcipher HW version error!\n");
+ ret = -EINVAL;
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -1442,7 +2001,8 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!sk_req->src || !sk_req->dst)) {
+ if (unlikely(!sk_req->src || !sk_req->dst ||
+ sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
@@ -1468,6 +2028,37 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
+static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
+ struct skcipher_request *sreq, bool encrypt)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
+ int ret;
+
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
+
+ if (!c_ctx->fbtfm) {
+ dev_err(dev, "failed to check fallback tfm\n");
+ return -EINVAL;
+ }
+
+ skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
+
+ /* software need sync mode to do crypto */
+ skcipher_request_set_callback(subreq, sreq->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
+ sreq->cryptlen, sreq->iv);
+ if (encrypt)
+ ret = crypto_skcipher_encrypt(subreq);
+ else
+ ret = crypto_skcipher_decrypt(subreq);
+
+ skcipher_request_zero(subreq);
+
+ return ret;
+}
+
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
@@ -1475,8 +2066,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- if (!sk_req->cryptlen)
+ if (!sk_req->cryptlen) {
+ if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
+ return -EINVAL;
return 0;
+ }
req->flag = sk_req->base.flags;
req->c_req.sk_req = sk_req;
@@ -1487,6 +2081,9 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (unlikely(ret))
return -EINVAL;
+ if (unlikely(ctx->c_ctx.fallback))
+ return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
+
return ctx->req_op->process(ctx, req);
}
@@ -1507,7 +2104,9 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
- .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
+ .cra_flags = CRYPTO_ALG_ASYNC |\
+ CRYPTO_ALG_ALLOCATES_MEMORY |\
+ CRYPTO_ALG_NEED_FALLBACK,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
@@ -1541,11 +2140,11 @@ static struct skcipher_alg sec_skciphers[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
DES3_EDE_BLOCK_SIZE, 0)
SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
@@ -1557,6 +2156,90 @@ static struct skcipher_alg sec_skciphers[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};
+static struct skcipher_alg sec_skciphers_v3[] = {
+ SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+};
+
+static int aead_iv_demension_check(struct aead_request *aead_req)
+{
+ u8 cl;
+
+ cl = aead_req->iv[0] + 1;
+ if (cl < IV_CL_MIN || cl > IV_CL_MAX)
+ return -EINVAL;
+
+ if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
+ return -EOVERFLOW;
+
+ return 0;
+}
+
+static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct aead_request *req = sreq->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 c_mode = ctx->c_ctx.c_mode;
+ struct device *dev = ctx->dev;
+ int ret;
+
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
+ dev_err(dev, "aead input spec error!\n");
+ return -EINVAL;
+ }
+
+ if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
+ (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
+ authsize & MAC_LEN_MASK)))) {
+ dev_err(dev, "aead input mac length error!\n");
+ return -EINVAL;
+ }
+
+ if (c_mode == SEC_CMODE_CCM) {
+ ret = aead_iv_demension_check(req);
+ if (ret) {
+ dev_err(dev, "aead input iv param error!\n");
+ return ret;
+ }
+ }
+
+ if (sreq->c_req.encrypt)
+ sreq->c_req.c_len = req->cryptlen;
+ else
+ sreq->c_req.c_len = req->cryptlen - authsize;
+ if (c_mode == SEC_CMODE_CBC) {
+ if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "aead crypto length error!\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
@@ -1565,34 +2248,61 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!req->src || !req->dst || !req->cryptlen ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
+ if (unlikely(!req->src || !req->dst)) {
dev_err(dev, "aead input param error!\n");
return -EINVAL;
}
+ if (ctx->sec->qm.ver == QM_HW_V2) {
+ if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
+ req->cryptlen <= authsize))) {
+ dev_err(dev, "Kunpeng920 not support 0 length!\n");
+ ctx->a_ctx.fallback = true;
+ return -EINVAL;
+ }
+ }
+
+ /* Support AES or SM4 */
+ if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
+ dev_err(dev, "aead crypto alg error!\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(sec_aead_spec_check(ctx, sreq)))
+ return -EINVAL;
+
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
SEC_PBUF_SZ)
sreq->use_pbuf = true;
else
sreq->use_pbuf = false;
- /* Support AES only */
- if (unlikely(c_alg != SEC_CALG_AES)) {
- dev_err(dev, "aead crypto alg error!\n");
- return -EINVAL;
- }
- if (sreq->c_req.encrypt)
- sreq->c_req.c_len = req->cryptlen;
- else
- sreq->c_req.c_len = req->cryptlen - authsize;
+ return 0;
+}
- if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(dev, "aead crypto length error!\n");
+static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+ struct aead_request *aead_req,
+ bool encrypt)
+{
+ struct aead_request *subreq = aead_request_ctx(aead_req);
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ struct device *dev = ctx->dev;
+
+ /* Kunpeng920 aead mode not support input 0 size */
+ if (!a_ctx->fallback_aead_tfm) {
+ dev_err(dev, "aead fallback tfm is NULL!\n");
return -EINVAL;
}
- return 0;
+ aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
+ aead_request_set_callback(subreq, aead_req->base.flags,
+ aead_req->base.complete, aead_req->base.data);
+ aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
+ aead_req->cryptlen, aead_req->iv);
+ aead_request_set_ad(subreq, aead_req->assoclen);
+
+ return encrypt ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
}
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
@@ -1608,8 +2318,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->ctx = ctx;
ret = sec_aead_param_check(ctx, req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ if (ctx->a_ctx.fallback)
+ return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
+ }
return ctx->req_op->process(ctx, req);
}
@@ -1624,14 +2337,16 @@ static int sec_aead_decrypt(struct aead_request *a_req)
return sec_aead_crypto(a_req, false);
}
-#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
+#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
ctx_exit, blk_size, iv_size, max_authsize)\
{\
.base = {\
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
- .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
+ .cra_flags = CRYPTO_ALG_ASYNC |\
+ CRYPTO_ALG_ALLOCATES_MEMORY |\
+ CRYPTO_ALG_NEED_FALLBACK,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
@@ -1639,28 +2354,46 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.init = ctx_init,\
.exit = ctx_exit,\
.setkey = sec_set_key,\
+ .setauthsize = sec_aead_setauthsize,\
.decrypt = sec_aead_decrypt,\
.encrypt = sec_aead_encrypt,\
.ivsize = iv_size,\
.maxauthsize = max_authsize,\
}
-#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
- SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
- sec_aead_ctx_exit, blksize, ivsize, authsize)
-
static struct aead_alg sec_aeads[] = {
SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+ sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+ sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+
+ SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+ SEC_AIV_SIZE, AES_BLOCK_SIZE)
+};
+
+static struct aead_alg sec_aeads_v3[] = {
+ SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+
+ SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+ SEC_AIV_SIZE, AES_BLOCK_SIZE)
};
int sec_register_to_crypto(struct hisi_qm *qm)
@@ -1673,16 +2406,45 @@ int sec_register_to_crypto(struct hisi_qm *qm)
if (ret)
return ret;
+ if (qm->ver > QM_HW_V2) {
+ ret = crypto_register_skciphers(sec_skciphers_v3,
+ ARRAY_SIZE(sec_skciphers_v3));
+ if (ret)
+ goto reg_skcipher_fail;
+ }
+
ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
if (ret)
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ goto reg_aead_fail;
+ if (qm->ver > QM_HW_V2) {
+ ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
+ if (ret)
+ goto reg_aead_v3_fail;
+ }
+ return ret;
+
+reg_aead_v3_fail:
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+reg_aead_fail:
+ if (qm->ver > QM_HW_V2)
+ crypto_unregister_skciphers(sec_skciphers_v3,
+ ARRAY_SIZE(sec_skciphers_v3));
+reg_skcipher_fail:
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(struct hisi_qm *qm)
{
+ if (qm->ver > QM_HW_V2)
+ crypto_unregister_aeads(sec_aeads_v3,
+ ARRAY_SIZE(sec_aeads_v3));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+
+ if (qm->ver > QM_HW_V2)
+ crypto_unregister_skciphers(sec_skciphers_v3,
+ ARRAY_SIZE(sec_skciphers_v3));
crypto_unregister_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 9c78edac56a4..9f71c358a6d3 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -4,9 +4,11 @@
#ifndef __HISI_SEC_V2_CRYPTO_H
#define __HISI_SEC_V2_CRYPTO_H
+#define SEC_AIV_SIZE 12
#define SEC_IV_SIZE 24
#define SEC_MAX_KEY_SIZE 64
#define SEC_COMM_SCENE 0
+#define SEC_MIN_BLOCK_SZ 1
enum sec_calg {
SEC_CALG_3DES = 0x1,
@@ -21,6 +23,11 @@ enum sec_hash_alg {
};
enum sec_mac_len {
+ SEC_HMAC_CCM_MAC = 16,
+ SEC_HMAC_GCM_MAC = 16,
+ SEC_SM3_MAC = 32,
+ SEC_HMAC_SM3_MAC = 32,
+ SEC_HMAC_MD5_MAC = 16,
SEC_HMAC_SHA1_MAC = 20,
SEC_HMAC_SHA256_MAC = 32,
SEC_HMAC_SHA512_MAC = 64,
@@ -29,7 +36,11 @@ enum sec_mac_len {
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CFB = 0x2,
+ SEC_CMODE_OFB = 0x3,
SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_CCM = 0x5,
+ SEC_CMODE_GCM = 0x6,
SEC_CMODE_XTS = 0x7,
};
@@ -44,6 +55,7 @@ enum sec_ckey_type {
enum sec_bd_type {
SEC_BD_TYPE1 = 0x1,
SEC_BD_TYPE2 = 0x2,
+ SEC_BD_TYPE3 = 0x3,
};
enum sec_auth {
@@ -63,6 +75,24 @@ enum sec_addr_type {
SEC_PRP = 0x2,
};
+struct bd_status {
+ u64 tag;
+ u8 done;
+ u8 err_type;
+ u16 flag;
+ u16 icv;
+};
+
+enum {
+ AUTHPAD_PAD,
+ AUTHPAD_NOPAD,
+};
+
+enum {
+ AIGEN_GEN,
+ AIGEN_NOGEN,
+};
+
struct sec_sqe_type2 {
/*
* mac_len: 0~4 bits
@@ -209,6 +239,169 @@ struct sec_sqe {
struct sec_sqe_type2 type2;
};
+struct bd3_auth_ivin {
+ __le64 a_ivin_addr;
+ __le32 rsvd0;
+ __le32 rsvd1;
+} __packed __aligned(4);
+
+struct bd3_skip_data {
+ __le32 rsvd0;
+
+ /*
+ * gran_num: 0~15 bits
+ * reserved: 16~31 bits
+ */
+ __le32 gran_num;
+
+ /*
+ * src_skip_data_len: 0~24 bits
+ * reserved: 25~31 bits
+ */
+ __le32 src_skip_data_len;
+
+ /*
+ * dst_skip_data_len: 0~24 bits
+ * reserved: 25~31 bits
+ */
+ __le32 dst_skip_data_len;
+};
+
+struct bd3_stream_scene {
+ __le64 c_ivin_addr;
+ __le64 long_a_data_len;
+
+ /*
+ * auth_pad: 0~1 bits
+ * stream_protocol: 2~4 bits
+ * reserved: 5~7 bits
+ */
+ __u8 stream_auth_pad;
+ __u8 plaintext_type;
+ __le16 pad_len_1p3;
+} __packed __aligned(4);
+
+struct bd3_no_scene {
+ __le64 c_ivin_addr;
+ __le32 rsvd0;
+ __le32 rsvd1;
+ __le32 rsvd2;
+} __packed __aligned(4);
+
+struct bd3_check_sum {
+ __u8 rsvd0;
+ __u8 hac_sva_status;
+ __le16 check_sum_i;
+};
+
+struct bd3_tls_type_back {
+ __u8 tls_1p3_type_back;
+ __u8 hac_sva_status;
+ __le16 pad_len_1p3_back;
+};
+
+struct sec_sqe3 {
+ /*
+ * type: 0~3 bit
+ * bd_invalid: 4 bit
+ * scene: 5~8 bit
+ * de: 9~10 bit
+ * src_addr_type: 11~13 bit
+ * dst_addr_type: 14~16 bit
+ * mac_addr_type: 17~19 bit
+ * reserved: 20~31 bits
+ */
+ __le32 bd_param;
+
+ /*
+ * cipher: 0~1 bits
+ * ci_gen: 2~3 bit
+ * c_icv_len: 4~9 bit
+ * c_width: 10~12 bits
+ * c_key_len: 13~15 bits
+ */
+ __le16 c_icv_key;
+
+ /*
+ * c_mode : 0~3 bits
+ * c_alg : 4~7 bits
+ */
+ __u8 c_mode_alg;
+
+ /*
+ * nonce_len : 0~3 bits
+ * huk : 4 bits
+ * cal_iv_addr_en : 5 bits
+ * seq : 6 bits
+ * reserved : 7 bits
+ */
+ __u8 huk_iv_seq;
+
+ __le64 tag;
+ __le64 data_src_addr;
+ __le64 a_key_addr;
+ union {
+ struct bd3_auth_ivin auth_ivin;
+ struct bd3_skip_data skip_data;
+ };
+
+ __le64 c_key_addr;
+
+ /*
+ * auth: 0~1 bits
+ * ai_gen: 2~3 bits
+ * mac_len: 4~8 bits
+ * akey_len: 9~14 bits
+ * a_alg: 15~20 bits
+ * key_sel: 21~24 bits
+ * updata_key: 25 bits
+ * reserved: 26~31 bits
+ */
+ __le32 auth_mac_key;
+ __le32 salt;
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+
+ /*
+ * auth_len: 0~23 bit
+ * auth_key_offset: 24~31 bits
+ */
+ __le32 a_len_key;
+
+ /*
+ * cipher_len: 0~23 bit
+ * auth_ivin_offset: 24~31 bits
+ */
+ __le32 c_len_ivin;
+ __le64 data_dst_addr;
+ __le64 mac_addr;
+ union {
+ struct bd3_stream_scene stream_scene;
+ struct bd3_no_scene no_scene;
+ };
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bit
+ * csc: 4~6 bit
+ * flag: 7~10 bit
+ * reserved: 11~15 bit
+ */
+ __le16 done_flag;
+ __u8 error_type;
+ __u8 warning_type;
+ union {
+ __le32 mac_i;
+ __le32 kek_key_addr_l;
+ };
+ union {
+ __le32 kek_key_addr_h;
+ struct bd3_check_sum check_sum;
+ struct bd3_tls_type_back tls_type_back;
+ };
+ __le32 counter;
+} __packed __aligned(4);
+
int sec_register_to_crypto(struct hisi_qm *qm);
void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 6f0062d4408c..d120ce3e34ed 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -52,6 +52,7 @@
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENB_MSK 0x7c177
+#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
#define SEC_MEM_START_INIT_REG 0x301100
#define SEC_MEM_INIT_DONE_REG 0x301104
@@ -84,6 +85,12 @@
#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
+#define SEC_PREFETCH_CFG 0x301130
+#define SEC_SVA_TRANS 0x301EC4
+#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
+#define SEC_PREFETCH_DISABLE BIT(1)
+#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
+
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
#define SEC_DBGFS_VAL_MAX_LEN 20
@@ -91,6 +98,7 @@
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
+#define SEC_SHAPER_TYPE_RATE 128
struct sec_hw_error {
u32 int_msk;
@@ -331,6 +339,42 @@ static u8 sec_get_endian(struct hisi_qm *qm)
return SEC_64BE;
}
+static void sec_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val &= SEC_PREFETCH_ENABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
+ val, !(val & SEC_PREFETCH_DISABLE),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+}
+
+static void sec_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val |= SEC_PREFETCH_DISABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
+ val, !(val & SEC_SVA_DISABLE_READY),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+}
+
static int sec_engine_init(struct hisi_qm *qm)
{
int ret;
@@ -430,53 +474,60 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void sec_hw_error_enable(struct hisi_qm *qm)
+static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
{
- u32 val;
+ u32 val1, val2;
+
+ val1 = readl(qm->io_base + SEC_CONTROL_REG);
+ if (enable) {
+ val1 |= SEC_AXI_SHUTDOWN_ENABLE;
+ val2 = SEC_RAS_NFE_ENB_MSK;
+ } else {
+ val1 &= SEC_AXI_SHUTDOWN_DISABLE;
+ val2 = 0x0;
+ }
+
+ if (qm->ver > QM_HW_V2)
+ writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+
+ writel(val1, qm->io_base + SEC_CONTROL_REG);
+}
+static void sec_hw_error_enable(struct hisi_qm *qm)
+{
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
- val = readl(qm->io_base + SEC_CONTROL_REG);
-
/* clear SEC hw error source if having */
writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
- /* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
-
/* enable RAS int */
writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
- /* enable SEC block master OOO when m-bit error occur */
- val = val | SEC_AXI_SHUTDOWN_ENABLE;
+ /* enable SEC block master OOO when nfe occurs on Kunpeng930 */
+ sec_master_ooo_ctrl(qm, true);
- writel(val, qm->io_base + SEC_CONTROL_REG);
+ /* enable SEC hw error interrupts */
+ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
{
- u32 val;
+ /* disable SEC hw error interrupts */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
- val = readl(qm->io_base + SEC_CONTROL_REG);
+ /* disable SEC block master OOO when nfe occurs on Kunpeng930 */
+ sec_master_ooo_ctrl(qm, false);
/* disable RAS int */
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
-
- /* disable SEC hw error interrupts */
- writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
-
- /* disable SEC block master OOO when m-bit error occur */
- val = val & SEC_AXI_SHUTDOWN_DISABLE;
-
- writel(val, qm->io_base + SEC_CONTROL_REG);
}
static u32 sec_clear_enable_read(struct sec_debug_file *file)
@@ -743,6 +794,8 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
+ .open_sva_prefetch = sec_open_sva_prefetch,
+ .close_sva_prefetch = sec_close_sva_prefetch,
.err_info_init = sec_err_info_init,
};
@@ -758,6 +811,7 @@ static int sec_pf_probe_init(struct sec_dev *sec)
if (ret)
return ret;
+ sec_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
@@ -821,6 +875,7 @@ static void sec_qm_uninit(struct hisi_qm *qm)
static int sec_probe_init(struct sec_dev *sec)
{
+ u32 type_rate = SEC_SHAPER_TYPE_RATE;
struct hisi_qm *qm = &sec->qm;
int ret;
@@ -828,6 +883,11 @@ static int sec_probe_init(struct sec_dev *sec)
ret = sec_pf_probe_init(sec);
if (ret)
return ret;
+ /* enable shaper type 0 */
+ if (qm->ver >= QM_HW_V3) {
+ type_rate |= QM_SHAPER_ENABLE;
+ qm->type_rate = type_rate;
+ }
}
return 0;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 2178b40e9f82..f8482ceebf2a 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -68,6 +68,7 @@
#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
+#define HZIP_OOO_SHUTDOWN_SEL 0x30120C
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
@@ -96,6 +97,16 @@
#define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \
HZIP_RO_CNT_CLR_CE_EN)
+#define HZIP_PREFETCH_CFG 0x3011B0
+#define HZIP_SVA_TRANS 0x3011C4
+#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
+#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
+#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
+#define HZIP_SHAPER_RATE_COMPRESS 252
+#define HZIP_SHAPER_RATE_DECOMPRESS 229
+#define HZIP_DELAY_1_US 1
+#define HZIP_POLL_TIMEOUT_US 1000
+
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
@@ -262,6 +273,45 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
+static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
+ val &= HZIP_PREFETCH_ENABLE;
+ writel(val, qm->io_base + HZIP_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
+ val, !(val & HZIP_SVA_PREFETCH_DISABLE),
+ HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+}
+
+static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
+ val |= HZIP_SVA_PREFETCH_DISABLE;
+ writel(val, qm->io_base + HZIP_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
+ val, !(val & HZIP_SVA_DISABLE_READY),
+ HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+}
+
static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
@@ -312,10 +362,27 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
return 0;
}
-static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
+static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
{
- u32 val;
+ u32 val1, val2;
+
+ val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ if (enable) {
+ val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
+ val2 = HZIP_CORE_INT_RAS_NFE_ENABLE;
+ } else {
+ val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
+ val2 = 0x0;
+ }
+
+ if (qm->ver > QM_HW_V2)
+ writel(val2, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+ writel(val1, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
+{
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -333,26 +400,20 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */
+ hisi_zip_master_ooo_ctrl(qm, true);
+
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
-
- /* enable ZIP block master OOO when m-bit error occur */
- val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
- val = val | HZIP_AXI_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
- u32 val;
-
/* disable ZIP hw error interrupts */
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
- /* disable ZIP block master OOO when m-bit error occur */
- val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
- val = val & ~HZIP_AXI_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */
+ hisi_zip_master_ooo_ctrl(qm, false);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -684,6 +745,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
+ .open_sva_prefetch = hisi_zip_open_sva_prefetch,
+ .close_sva_prefetch = hisi_zip_close_sva_prefetch,
.err_info_init = hisi_zip_err_info_init,
};
@@ -702,6 +765,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
qm->err_ini->err_info_init(qm);
hisi_zip_set_user_domain_and_cache(qm);
+ hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -761,6 +825,7 @@ static void hisi_zip_qm_uninit(struct hisi_qm *qm)
static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
{
+ u32 type_rate = HZIP_SHAPER_RATE_COMPRESS;
struct hisi_qm *qm = &hisi_zip->qm;
int ret;
@@ -768,6 +833,14 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
ret = hisi_zip_pf_probe_init(hisi_zip);
if (ret)
return ret;
+ /* enable shaper type 0 */
+ if (qm->ver >= QM_HW_V3) {
+ type_rate |= QM_SHAPER_ENABLE;
+
+ /* ZIP need to enable shaper type 1 */
+ type_rate |= HZIP_SHAPER_RATE_DECOMPRESS << QM_SHAPER_TYPE1_OFFSET;
+ qm->type_rate = type_rate;
+ }
}
return 0;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 0616e369522e..35fc5ee70491 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
@@ -71,15 +72,11 @@
#define MOD_AES256 (0x0a00 | KEYLEN_256)
#define MAX_IVLEN 16
-#define NPE_ID 2 /* NPE C */
#define NPE_QLEN 16
/* Space for registering when the first
* NPE_QLEN crypt_ctl are busy */
#define NPE_QLEN_TOTAL 64
-#define SEND_QID 29
-#define RECV_QID 30
-
#define CTL_FLAG_UNUSED 0x0000
#define CTL_FLAG_USED 0x1000
#define CTL_FLAG_PERFORM_ABLK 0x0001
@@ -136,7 +133,7 @@ struct crypt_ctl {
u32 crypto_ctx; /* NPE Crypto Param structure address */
/* Used by Host: 4*4 bytes*/
- unsigned ctl_flags;
+ unsigned int ctl_flags;
union {
struct skcipher_request *ablk_req;
struct aead_request *aead_req;
@@ -149,6 +146,9 @@ struct crypt_ctl {
struct ablk_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
+ u8 iv[MAX_IVLEN];
+ bool encrypt;
+ struct skcipher_request fallback_req; // keep at the end
};
struct aead_ctx {
@@ -181,9 +181,10 @@ struct ixp_ctx {
u8 enckey[MAX_KEYLEN];
u8 salt[MAX_IVLEN];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
- unsigned salted;
+ unsigned int salted;
atomic_t configuring;
struct completion completion;
+ struct crypto_skcipher *fallback_tfm;
};
struct ixp_alg {
@@ -209,6 +210,7 @@ static const struct ix_hash_algo hash_alg_md5 = {
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
};
+
static const struct ix_hash_algo hash_alg_sha1 = {
.cfgword = 0x00000005,
.icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
@@ -216,16 +218,17 @@ static const struct ix_hash_algo hash_alg_sha1 = {
};
static struct npe *npe_c;
-static struct dma_pool *buffer_pool = NULL;
-static struct dma_pool *ctx_pool = NULL;
-static struct crypt_ctl *crypt_virt = NULL;
+static unsigned int send_qid;
+static unsigned int recv_qid;
+static struct dma_pool *buffer_pool;
+static struct dma_pool *ctx_pool;
+
+static struct crypt_ctl *crypt_virt;
static dma_addr_t crypt_phys;
static int support_aes = 1;
-#define DRIVER_NAME "ixp4xx_crypto"
-
static struct platform_device *pdev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
@@ -240,12 +243,12 @@ static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
@@ -256,6 +259,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
+
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
@@ -269,7 +273,7 @@ static DEFINE_SPINLOCK(desc_lock);
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
- static int idx = 0;
+ static int idx;
unsigned long flags;
spin_lock_irqsave(&desc_lock, flags);
@@ -286,7 +290,7 @@ static struct crypt_ctl *get_crypt_desc(void)
idx = 0;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&desc_lock, flags);
- return crypt_virt +i;
+ return crypt_virt + i;
} else {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
@@ -314,7 +318,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
idx = NPE_QLEN;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&emerg_lock, flags);
- return crypt_virt +i;
+ return crypt_virt + i;
} else {
spin_unlock_irqrestore(&emerg_lock, flags);
return NULL;
@@ -330,7 +334,7 @@ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
buf1 = buf->next;
phys1 = buf->phys_next;
- dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
+ dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;
@@ -348,8 +352,8 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
int decryptlen = req->assoclen + req->cryptlen - authsize;
if (req_ctx->encrypt) {
- scatterwalk_map_and_copy(req_ctx->hmac_virt,
- req->dst, decryptlen, authsize, 1);
+ scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
+ decryptlen, authsize, 1);
}
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
}
@@ -372,19 +376,33 @@ static void one_packet(dma_addr_t phys)
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
- if (req_ctx->hmac_virt) {
+ if (req_ctx->hmac_virt)
finish_scattered_hmac(crypt);
- }
+
req->base.complete(&req->base, failed);
break;
}
case CTL_FLAG_PERFORM_ABLK: {
struct skcipher_request *req = crypt->data.ablk_req;
struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ unsigned int offset;
+
+ if (ivsize > 0) {
+ offset = req->cryptlen - ivsize;
+ if (req_ctx->encrypt) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ offset, ivsize, 0);
+ } else {
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ memzero_explicit(req_ctx->iv, ivsize);
+ }
+ }
- if (req_ctx->dst) {
+ if (req_ctx->dst)
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
- }
+
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
req->base.complete(&req->base, failed);
break;
@@ -392,14 +410,14 @@ static void one_packet(dma_addr_t phys)
case CTL_FLAG_GEN_ICV:
ctx = crypto_tfm_ctx(crypt->data.tfm);
dma_pool_free(ctx_pool, crypt->regist_ptr,
- crypt->regist_buf->phys_addr);
+ crypt->regist_buf->phys_addr);
dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
case CTL_FLAG_GEN_REVAES:
ctx = crypto_tfm_ctx(crypt->data.tfm);
- *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
+ *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
@@ -418,8 +436,8 @@ static void crypto_done_action(unsigned long arg)
{
int i;
- for(i=0; i<4; i++) {
- dma_addr_t phys = qmgr_get_entry(RECV_QID);
+ for (i = 0; i < 4; i++) {
+ dma_addr_t phys = qmgr_get_entry(recv_qid);
if (!phys)
return;
one_packet(phys);
@@ -429,15 +447,52 @@ static void crypto_done_action(unsigned long arg)
static int init_ixp_crypto(struct device *dev)
{
- int ret = -ENODEV;
+ struct device_node *np = dev->of_node;
u32 msg[2] = { 0, 0 };
+ int ret = -ENODEV;
+ u32 npe_id;
- if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
- IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
- printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
- return ret;
+ dev_info(dev, "probing...\n");
+
+ /* Locate the NPE and queue manager to use from device tree */
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ struct of_phandle_args queue_spec;
+ struct of_phandle_args npe_spec;
+
+ ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
+ 1, 0, &npe_spec);
+ if (ret) {
+ dev_err(dev, "no NPE engine specified\n");
+ return -ENODEV;
+ }
+ npe_id = npe_spec.args[0];
+
+ ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
+ &queue_spec);
+ if (ret) {
+ dev_err(dev, "no rx queue phandle\n");
+ return -ENODEV;
+ }
+ recv_qid = queue_spec.args[0];
+
+ ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
+ &queue_spec);
+ if (ret) {
+ dev_err(dev, "no txready queue phandle\n");
+ return -ENODEV;
+ }
+ send_qid = queue_spec.args[0];
+ } else {
+ /*
+ * Hardcoded engine when using platform data, this goes away
+ * when we switch to using DT only.
+ */
+ npe_id = 2;
+ send_qid = 29;
+ recv_qid = 30;
}
- npe_c = npe_request(NPE_ID);
+
+ npe_c = npe_request(npe_id);
if (!npe_c)
return ret;
@@ -455,10 +510,9 @@ static int init_ixp_crypto(struct device *dev)
goto npe_error;
}
- switch ((msg[1]>>16) & 0xff) {
+ switch ((msg[1] >> 16) & 0xff) {
case 3:
- printk(KERN_WARNING "Firmware of %s lacks AES support\n",
- npe_name(npe_c));
+ dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
support_aes = 0;
break;
case 4:
@@ -466,8 +520,7 @@ static int init_ixp_crypto(struct device *dev)
support_aes = 1;
break;
default:
- printk(KERN_ERR "Firmware of %s lacks crypto support\n",
- npe_name(npe_c));
+ dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
ret = -ENODEV;
goto npe_release;
}
@@ -475,35 +528,34 @@ static int init_ixp_crypto(struct device *dev)
* so assure it is large enough
*/
BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
- buffer_pool = dma_pool_create("buffer", dev,
- sizeof(struct buffer_desc), 32, 0);
+ buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
+ 32, 0);
ret = -ENOMEM;
- if (!buffer_pool) {
+ if (!buffer_pool)
goto err;
- }
- ctx_pool = dma_pool_create("context", dev,
- NPE_CTX_LEN, 16, 0);
- if (!ctx_pool) {
+
+ ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
+ if (!ctx_pool)
goto err;
- }
- ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
+
+ ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
"ixp_crypto:out", NULL);
if (ret)
goto err;
- ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
+ ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
"ixp_crypto:in", NULL);
if (ret) {
- qmgr_release_queue(SEND_QID);
+ qmgr_release_queue(send_qid);
goto err;
}
- qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
+ qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
- qmgr_enable_irq(RECV_QID);
+ qmgr_enable_irq(recv_qid);
return 0;
npe_error:
- printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
+ dev_err(dev, "%s not responding\n", npe_name(npe_c));
ret = -EIO;
err:
dma_pool_destroy(ctx_pool);
@@ -515,22 +567,20 @@ npe_release:
static void release_ixp_crypto(struct device *dev)
{
- qmgr_disable_irq(RECV_QID);
+ qmgr_disable_irq(recv_qid);
tasklet_kill(&crypto_done_tasklet);
- qmgr_release_queue(SEND_QID);
- qmgr_release_queue(RECV_QID);
+ qmgr_release_queue(send_qid);
+ qmgr_release_queue(recv_qid);
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
- if (crypt_virt) {
- dma_free_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- crypt_virt, crypt_phys);
- }
+ if (crypt_virt)
+ dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
+ crypt_virt, crypt_phys);
}
static void reset_sa_dir(struct ix_sa_dir *dir)
@@ -543,9 +593,9 @@ static void reset_sa_dir(struct ix_sa_dir *dir)
static int init_sa_dir(struct ix_sa_dir *dir)
{
dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
- if (!dir->npe_ctx) {
+ if (!dir->npe_ctx)
return -ENOMEM;
- }
+
reset_sa_dir(dir);
return 0;
}
@@ -566,15 +616,31 @@ static int init_tfm(struct crypto_tfm *tfm)
if (ret)
return ret;
ret = init_sa_dir(&ctx->decrypt);
- if (ret) {
+ if (ret)
free_sa_dir(&ctx->encrypt);
- }
+
return ret;
}
static int init_tfm_ablk(struct crypto_skcipher *tfm)
{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
+ struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+ struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+ const char *name = crypto_tfm_alg_name(ctfm);
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ pr_info("Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&tfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
+ );
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
return init_tfm(crypto_skcipher_tfm(tfm));
}
@@ -587,12 +653,17 @@ static int init_tfm_aead(struct crypto_aead *tfm)
static void exit_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+
free_sa_dir(&ctx->encrypt);
free_sa_dir(&ctx->decrypt);
}
static void exit_tfm_ablk(struct crypto_skcipher *tfm)
{
+ struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+ struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+
+ crypto_free_skcipher(ctx->fallback_tfm);
exit_tfm(crypto_skcipher_tfm(tfm));
}
@@ -602,7 +673,8 @@ static void exit_tfm_aead(struct crypto_aead *tfm)
}
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
- int init_len, u32 ctx_addr, const u8 *key, int key_len)
+ int init_len, u32 ctx_addr, const u8 *key,
+ int key_len)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypt_ctl *crypt;
@@ -629,9 +701,8 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
memcpy(pad, key, key_len);
memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
- for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
+ for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
pad[i] ^= xpad;
- }
crypt->data.tfm = tfm;
crypt->regist_ptr = pad;
@@ -652,13 +723,13 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
buf->phys_addr = pad_phys;
atomic_inc(&ctx->configuring);
- qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
- BUG_ON(qmgr_stat_overflow(SEND_QID));
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
return 0;
}
-static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
- const u8 *key, int key_len, unsigned digest_len)
+static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
+ const u8 *key, int key_len, unsigned int digest_len)
{
u32 itarget, otarget, npe_ctx_addr;
unsigned char *cinfo;
@@ -673,11 +744,11 @@ static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
algo = ix_hash(tfm);
/* write cfg word to cryptinfo */
- cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
+ cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
#ifndef __ARMEB__
cfgword ^= 0xAA000000; /* change the "byte swap" flags */
#endif
- *(u32*)cinfo = cpu_to_be32(cfgword);
+ *(u32 *)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
/* write ICV to cryptinfo */
@@ -697,11 +768,11 @@ static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
dir->npe_mode |= NPE_OP_HASH_VERIFY;
ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
- init_len, npe_ctx_addr, key, key_len);
+ init_len, npe_ctx_addr, key, key_len);
if (ret)
return ret;
return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
- init_len, npe_ctx_addr, key, key_len);
+ init_len, npe_ctx_addr, key, key_len);
}
static int gen_rev_aes_key(struct crypto_tfm *tfm)
@@ -711,10 +782,10 @@ static int gen_rev_aes_key(struct crypto_tfm *tfm)
struct ix_sa_dir *dir = &ctx->decrypt;
crypt = get_crypt_desc_emerg();
- if (!crypt) {
+ if (!crypt)
return -EAGAIN;
- }
- *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
+
+ *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
crypt->data.tfm = tfm;
crypt->crypt_offs = 0;
@@ -727,13 +798,13 @@ static int gen_rev_aes_key(struct crypto_tfm *tfm)
crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
atomic_inc(&ctx->configuring);
- qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
- BUG_ON(qmgr_stat_overflow(SEND_QID));
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
return 0;
}
-static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
- const u8 *key, int key_len)
+static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
+ int key_len)
{
u8 *cinfo;
u32 cipher_cfg;
@@ -753,9 +824,15 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
- case 16: keylen_cfg = MOD_AES128; break;
- case 24: keylen_cfg = MOD_AES192; break;
- case 32: keylen_cfg = MOD_AES256; break;
+ case 16:
+ keylen_cfg = MOD_AES128;
+ break;
+ case 24:
+ keylen_cfg = MOD_AES192;
+ break;
+ case 32:
+ keylen_cfg = MOD_AES256;
+ break;
default:
return -EINVAL;
}
@@ -766,31 +843,31 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
return err;
}
/* write cfg word to cryptinfo */
- *(u32*)cinfo = cpu_to_be32(cipher_cfg);
+ *(u32 *)cinfo = cpu_to_be32(cipher_cfg);
cinfo += sizeof(cipher_cfg);
/* write cipher key to cryptinfo */
memcpy(cinfo, key, key_len);
/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
- memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
+ memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
key_len = DES3_EDE_KEY_SIZE;
}
dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
- if ((cipher_cfg & MOD_AES) && !encrypt) {
+ if ((cipher_cfg & MOD_AES) && !encrypt)
return gen_rev_aes_key(tfm);
- }
+
return 0;
}
static struct buffer_desc *chainup_buffers(struct device *dev,
- struct scatterlist *sg, unsigned nbytes,
+ struct scatterlist *sg, unsigned int nbytes,
struct buffer_desc *buf, gfp_t flags,
enum dma_data_direction dir)
{
for (; nbytes > 0; sg = sg_next(sg)) {
- unsigned len = min(nbytes, sg->length);
+ unsigned int len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
dma_addr_t next_buf_phys;
void *ptr;
@@ -817,7 +894,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
}
static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int key_len)
+ unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
@@ -838,7 +915,12 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
- return ret;
+ if (ret)
+ return ret;
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
}
static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -849,7 +931,7 @@ static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int key_len)
+ unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -858,17 +940,36 @@ static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
return -EINVAL;
memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
- CTR_RFC3686_NONCE_SIZE);
+ CTR_RFC3686_NONCE_SIZE);
key_len -= CTR_RFC3686_NONCE_SIZE;
return ablk_setkey(tfm, key, key_len);
}
+static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
+ struct ablk_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (encrypt)
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+ return err;
+}
+
static int ablk_perform(struct skcipher_request *req, int encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned ivsize = crypto_skcipher_ivsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int nbytes = req->cryptlen;
@@ -876,15 +977,20 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
struct buffer_desc src_hook;
struct device *dev = &pdev->dev;
+ unsigned int offset;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
- if (qmgr_stat_full(SEND_QID))
+ if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
+ return ixp4xx_cipher_fallback(req, encrypt);
+
+ if (qmgr_stat_full(send_qid))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+ req_ctx->encrypt = encrypt;
crypt = get_crypt_desc();
if (!crypt)
@@ -900,14 +1006,19 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
+ if (ivsize > 0 && !encrypt) {
+ offset = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
+ }
if (req->src != req->dst) {
struct buffer_desc dst_hook;
+
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
- flags, DMA_FROM_DEVICE))
+ flags, DMA_FROM_DEVICE))
goto free_buf_dest;
src_direction = DMA_TO_DEVICE;
req_ctx->dst = dst_hook.next;
@@ -916,23 +1027,23 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
req_ctx->dst = NULL;
}
req_ctx->src = NULL;
- if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
- flags, src_direction))
+ if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
+ src_direction))
goto free_buf_src;
req_ctx->src = src_hook.next;
crypt->src_buf = src_hook.phys_next;
crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
- qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
- BUG_ON(qmgr_stat_overflow(SEND_QID));
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
return -EINPROGRESS;
free_buf_src:
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dest:
- if (req->src != req->dst) {
+ if (req->src != req->dst)
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
- }
+
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
@@ -956,7 +1067,7 @@ static int ablk_rfc3686_crypt(struct skcipher_request *req)
int ret;
/* set up counter block */
- memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
@@ -970,12 +1081,12 @@ static int ablk_rfc3686_crypt(struct skcipher_request *req)
}
static int aead_perform(struct aead_request *req, int encrypt,
- int cryptoffset, int eff_cryptlen, u8 *iv)
+ int cryptoffset, int eff_cryptlen, u8 *iv)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- unsigned ivsize = crypto_aead_ivsize(tfm);
- unsigned authsize = crypto_aead_authsize(tfm);
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
+ unsigned int authsize = crypto_aead_authsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int cryptlen;
@@ -987,7 +1098,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
unsigned int lastlen;
- if (qmgr_stat_full(SEND_QID))
+ if (qmgr_stat_full(send_qid))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
@@ -998,7 +1109,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
} else {
dir = &ctx->decrypt;
/* req->cryptlen includes the authsize when decrypting */
- cryptlen = req->cryptlen -authsize;
+ cryptlen = req->cryptlen - authsize;
eff_cryptlen -= authsize;
}
crypt = get_crypt_desc();
@@ -1058,12 +1169,12 @@ static int aead_perform(struct aead_request *req, int encrypt,
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
- &crypt->icv_rev_aes);
+ &crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
goto free_buf_dst;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
- req->src, cryptlen, authsize, 0);
+ req->src, cryptlen, authsize, 0);
}
req_ctx->encrypt = encrypt;
} else {
@@ -1071,8 +1182,8 @@ static int aead_perform(struct aead_request *req, int encrypt,
}
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
- qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
- BUG_ON(qmgr_stat_overflow(SEND_QID));
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
return -EINPROGRESS;
free_buf_dst:
@@ -1086,7 +1197,7 @@ free_buf_src:
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- unsigned digest_len = crypto_aead_maxauthsize(tfm);
+ unsigned int digest_len = crypto_aead_maxauthsize(tfm);
int ret;
if (!ctx->enckey_len && !ctx->authkey_len)
@@ -1104,11 +1215,11 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
if (ret)
goto out;
ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
- ctx->authkey_len, digest_len);
+ ctx->authkey_len, digest_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
- ctx->authkey_len, digest_len);
+ ctx->authkey_len, digest_len);
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1119,13 +1230,13 @@ static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
int max = crypto_aead_maxauthsize(tfm) >> 2;
- if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
+ if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
return -EINVAL;
return aead_setup(tfm, authsize);
}
static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
@@ -1364,43 +1475,33 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
#define IXP_POSTFIX "-ixp4xx"
-static const struct platform_device_info ixp_dev_info __initdata = {
- .name = DRIVER_NAME,
- .id = 0,
- .dma_mask = DMA_BIT_MASK(32),
-};
-
-static int __init ixp_module_init(void)
+static int ixp_crypto_probe(struct platform_device *_pdev)
{
+ struct device *dev = &_pdev->dev;
int num = ARRAY_SIZE(ixp4xx_algos);
int i, err;
- pdev = platform_device_register_full(&ixp_dev_info);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ pdev = _pdev;
- err = init_ixp_crypto(&pdev->dev);
- if (err) {
- platform_device_unregister(pdev);
+ err = init_ixp_crypto(dev);
+ if (err)
return err;
- }
- for (i=0; i< num; i++) {
+
+ for (i = 0; i < num; i++) {
struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s"IXP_POSTFIX, cra->base.cra_name) >=
- CRYPTO_MAX_ALG_NAME)
- {
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
continue;
- }
- if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
+ if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
continue;
- }
/* block ciphers */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY;
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK;
if (!cra->setkey)
cra->setkey = ablk_setkey;
if (!cra->encrypt)
@@ -1415,7 +1516,7 @@ static int __init ixp_module_init(void)
cra->base.cra_alignmask = 3;
cra->base.cra_priority = 300;
if (crypto_register_skcipher(cra))
- printk(KERN_ERR "Failed to register '%s'\n",
+ dev_err(&pdev->dev, "Failed to register '%s'\n",
cra->base.cra_name);
else
ixp4xx_algos[i].registered = 1;
@@ -1448,7 +1549,7 @@ static int __init ixp_module_init(void)
cra->base.cra_priority = 300;
if (crypto_register_aead(cra))
- printk(KERN_ERR "Failed to register '%s'\n",
+ dev_err(&pdev->dev, "Failed to register '%s'\n",
cra->base.cra_driver_name);
else
ixp4xx_aeads[i].registered = 1;
@@ -1456,7 +1557,7 @@ static int __init ixp_module_init(void)
return 0;
}
-static void __exit ixp_module_exit(void)
+static int ixp_crypto_remove(struct platform_device *pdev)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
@@ -1466,16 +1567,30 @@ static void __exit ixp_module_exit(void)
crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
}
- for (i=0; i< num; i++) {
+ for (i = 0; i < num; i++) {
if (ixp4xx_algos[i].registered)
crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto(&pdev->dev);
- platform_device_unregister(pdev);
+
+ return 0;
}
+static const struct of_device_id ixp4xx_crypto_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-crypto",
+ },
+ {},
+};
-module_init(ixp_module_init);
-module_exit(ixp_module_exit);
+static struct platform_driver ixp_crypto_driver = {
+ .probe = ixp_crypto_probe,
+ .remove = ixp_crypto_remove,
+ .driver = {
+ .name = "ixp4xx_crypto",
+ .of_match_table = ixp4xx_crypto_of_match,
+ },
+};
+module_platform_driver(ixp_crypto_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index c1007f2ba79c..d215a6bed6bc 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -66,7 +66,7 @@
#define CESA_SA_ST_ACT_1 BIT(1)
/*
- * CESA_SA_FPGA_INT_STATUS looks like a FPGA leftover and is documented only
+ * CESA_SA_FPGA_INT_STATUS looks like an FPGA leftover and is documented only
* in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA
* and someone forgot to remove it while switching to the core and moving to
* CESA_SA_INT_STATUS.
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
index b9c6201019e0..c242d22008c3 100644
--- a/drivers/crypto/marvell/octeontx2/Makefile
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2-cpt.o octeontx2-cptvf.o
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
-octeontx2-cpt-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
- otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o
-octeontx2-cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
- otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
- otx2_cptvf_algs.o
+rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
+ otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
+ cn10k_cpt.o
+rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
+ otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
+ otx2_cptvf_algs.o cn10k_cpt.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
new file mode 100644
index 000000000000..1499ef75b5c2
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Marvell. */
+
+#include <linux/soc/marvell/octeontx2/asm.h>
+#include "otx2_cptpf.h"
+#include "otx2_cptvf.h"
+#include "otx2_cptlf.h"
+#include "cn10k_cpt.h"
+
+static struct cpt_hw_ops otx2_hw_ops = {
+ .send_cmd = otx2_cpt_send_cmd,
+ .cpt_get_compcode = otx2_cpt_get_compcode,
+ .cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
+};
+
+static struct cpt_hw_ops cn10k_hw_ops = {
+ .send_cmd = cn10k_cpt_send_cmd,
+ .cpt_get_compcode = cn10k_cpt_get_compcode,
+ .cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
+};
+
+void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf)
+{
+ void __iomem *lmtline = lf->lmtline;
+ u64 val = (lf->slot & 0x7FF);
+ u64 tar_addr = 0;
+
+ /* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */
+ tar_addr |= (__force u64)lf->ioreg |
+ (((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4;
+ /*
+ * Make sure memory areas pointed in CPT_INST_S
+ * are flushed before the instruction is sent to CPT
+ */
+ dma_wmb();
+
+ /* Copy CPT command to LMTLINE */
+ memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ resource_size_t size;
+ u64 lmt_base;
+
+ if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
+ cptpf->lfs.ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ cptpf->lfs.ops = &cn10k_hw_ops;
+ lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
+ if (!lmt_base) {
+ dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
+ return -ENOMEM;
+ }
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
+ cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
+ if (!cptpf->lfs.lmt_base) {
+ dev_err(&pdev->dev,
+ "Mapping of PF LMTLINE address failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ resource_size_t offset, size;
+
+ if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) {
+ cptvf->lfs.ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ cptvf->lfs.ops = &cn10k_hw_ops;
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map VF LMILINE region */
+ cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
+ if (!cptvf->lfs.lmt_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
new file mode 100644
index 000000000000..c091392b47e0
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2021 Marvell.
+ */
+#ifndef __CN10K_CPT_H
+#define __CN10K_CPT_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "otx2_cptvf.h"
+
+static inline u8 cn10k_cpt_get_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn10k_cpt_res_s *)result)->compcode;
+}
+
+static inline u8 cn10k_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn10k_cpt_res_s *)result)->uc_compcode;
+}
+
+static inline u8 otx2_cpt_get_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn9k_cpt_res_s *)result)->compcode;
+}
+
+static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn9k_cpt_res_s *)result)->uc_compcode;
+}
+
+void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf);
+int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
+int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+
+#endif /* __CN10K_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index ecedd91a8d85..c5445b05f53c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -25,6 +25,10 @@
#define OTX2_CPT_NAME_LENGTH 64
#define OTX2_CPT_DMA_MINALIGN 128
+/* HW capability flags */
+#define CN10K_MBOX 0
+#define CN10K_LMTST 1
+
#define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
enum otx2_cpt_eng_type {
@@ -116,6 +120,25 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
}
+static inline bool is_dev_otx2(struct pci_dev *pdev)
+{
+ if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
+ pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
+ return true;
+
+ return false;
+}
+
+static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
+ unsigned long *cap_flag)
+{
+ if (!is_dev_otx2(pdev)) {
+ __set_bit(CN10K_MBOX, cap_flag);
+ __set_bit(CN10K_LMTST, cap_flag);
+ }
+}
+
+
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
index ecafc42f37a2..6f947978e4e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
@@ -10,6 +10,8 @@
/* Device IDs */
#define OTX2_CPT_PCI_PF_DEVICE_ID 0xA0FD
#define OTX2_CPT_PCI_VF_DEVICE_ID 0xA0FE
+#define CN10K_CPT_PCI_PF_DEVICE_ID 0xA0F2
+#define CN10K_CPT_PCI_VF_DEVICE_ID 0xA0F3
/* Mailbox interrupts offset */
#define OTX2_CPT_PF_MBOX_INT 6
@@ -25,6 +27,7 @@
*/
#define OTX2_CPT_VF_MSIX_VECTORS 1
#define OTX2_CPT_VF_INTR_MBOX_MASK BIT(0)
+#define CN10K_CPT_VF_MBOX_REGION (0xC0000)
/* CPT LF MSIX vectors */
#define OTX2_CPT_LF_MSIX_VECTORS 2
@@ -135,7 +138,7 @@ enum otx2_cpt_comp_e {
OTX2_CPT_COMP_E_FAULT = 0x02,
OTX2_CPT_COMP_E_HWERR = 0x04,
OTX2_CPT_COMP_E_INSTERR = 0x05,
- OTX2_CPT_COMP_E_LAST_ENTRY = 0x06
+ OTX2_CPT_COMP_E_WARN = 0x06
};
/*
@@ -266,13 +269,22 @@ union otx2_cpt_inst_s {
union otx2_cpt_res_s {
u64 u[2];
- struct {
+ struct cn9k_cpt_res_s {
u64 compcode:8;
u64 uc_compcode:8;
u64 doneint:1;
u64 reserved_17_63:47;
u64 reserved_64_127;
} s;
+
+ struct cn10k_cpt_res_s {
+ u64 compcode:7;
+ u64 doneint:1;
+ u64 uc_compcode:8;
+ u64 rlen:16;
+ u64 spi:32;
+ u64 esn;
+ } cn10k;
};
/*
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index 34aba1532761..c8350fcd60fa 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -379,9 +379,14 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
for (slot = 0; slot < lfs->lfs_num; slot++) {
lfs->lf[slot].lfs = lfs;
lfs->lf[slot].slot = slot;
- lfs->lf[slot].lmtline = lfs->reg_base +
- OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
+ if (lfs->lmt_base)
+ lfs->lf[slot].lmtline = lfs->lmt_base +
+ (slot * LMTLINE_SIZE);
+ else
+ lfs->lf[slot].lmtline = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
OTX2_CPT_LMT_LF_LMTLINEX(0));
+
lfs->lf[slot].ioreg = lfs->reg_base +
OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
OTX2_CPT_LF_NQX(0));
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index ab1678fc564d..b691b6c1d5c4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -84,12 +84,22 @@ struct otx2_cptlf_info {
struct otx2_cptlf_wqe *wqe; /* Tasklet work info */
};
+struct cpt_hw_ops {
+ void (*send_cmd)(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf);
+ u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result);
+ u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result);
+};
+
struct otx2_cptlfs_info {
/* Registers start address of VF/PF LFs are attached to */
void __iomem *reg_base;
+#define LMTLINE_SIZE 128
+ void __iomem *lmt_base;
struct pci_dev *pdev; /* Device LFs are attached to */
struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
struct otx2_mbox *mbox;
+ struct cpt_hw_ops *ops;
u8 are_lfs_attached; /* Whether CPT LFs are attached */
u8 lfs_num; /* Number of CPT LFs */
u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index e19af1356f12..5ebba86c65d9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -47,6 +47,7 @@ struct otx2_cptpf_dev {
struct workqueue_struct *flr_wq;
struct cptpf_flr_work *flr_work;
+ unsigned long cap_flag;
u8 pf_id; /* RVU PF number */
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 58f47e3ab62e..146a55ac4b9b 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -6,10 +6,11 @@
#include "otx2_cpt_common.h"
#include "otx2_cptpf_ucode.h"
#include "otx2_cptpf.h"
+#include "cn10k_cpt.h"
#include "rvu_reg.h"
-#define OTX2_CPT_DRV_NAME "octeontx2-cpt"
-#define OTX2_CPT_DRV_STRING "Marvell OcteonTX2 CPT Physical Function Driver"
+#define OTX2_CPT_DRV_NAME "rvu_cptpf"
+#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
@@ -62,45 +63,66 @@ static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
}
}
-static void cptpf_enable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf)
+static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
{
- /* Clear interrupt if any */
+ /* Clear FLR interrupt if any */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
- ~0x0ULL);
- otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
- ~0x0ULL);
+ INTR_MASK(num_vfs));
/* Enable VF FLR interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1SX(0), ~0x0ULL);
+ RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
+ /* Clear ME interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
+ INTR_MASK(num_vfs));
+ /* Enable VF ME interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
+
+ if (num_vfs <= 64)
+ return;
+
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ INTR_MASK(num_vfs - 64));
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
+
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
+ INTR_MASK(num_vfs - 64));
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1SX(1), ~0x0ULL);
+ RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
}
-static void cptpf_disable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf,
+static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
int vector;
/* Disable VF FLR interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1CX(0), ~0x0ULL);
+ RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+
+ /* Disable VF ME interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1CX(1), ~0x0ULL);
+ RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(vector, cptpf);
- /* Clear interrupt if any */
- otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
- ~0x0ULL);
- otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
- ~0x0ULL);
+ if (num_vfs <= 64)
+ return;
- vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
free_irq(vector, cptpf);
- if (num_vfs > 64) {
- vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
- free_irq(vector, cptpf);
- }
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(vector, cptpf);
}
static void cptpf_flr_wq_handler(struct work_struct *work)
@@ -172,11 +194,38 @@ static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ int reg, vf, num_reg = 1;
+ u64 intr;
+
+ if (cptpf->max_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ /* Clear interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
- cptpf_disable_vf_flr_intrs(cptpf, num_vfs);
+ cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
}
static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
@@ -202,6 +251,15 @@ static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
"IRQ registration failed for VFFLR0 irq\n");
goto free_mbox0_irq;
}
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
+ /* Register VF ME interrupt handler */
+ ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox0 irq\n");
+ goto free_flr0_irq;
+ }
+
if (num_vfs > 64) {
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
@@ -209,7 +267,7 @@ static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
if (ret) {
dev_err(dev,
"IRQ registration failed for PFVF mbox1 irq\n");
- goto free_flr0_irq;
+ goto free_me0_irq;
}
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
/* Register VF FLR interrupt handler */
@@ -220,15 +278,30 @@ static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
"IRQ registration failed for VFFLR1 irq\n");
goto free_mbox1_irq;
}
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR1 irq\n");
+ goto free_flr1_irq;
+ }
}
cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
- cptpf_enable_vf_flr_intrs(cptpf);
+ cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
return 0;
+free_flr1_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(vector, cptpf);
free_mbox1_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
free_irq(vector, cptpf);
+free_me0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(vector, cptpf);
free_flr0_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
free_irq(vector, cptpf);
@@ -284,7 +357,11 @@ static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
return -ENOMEM;
/* Map VF-PF mailbox memory */
- vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
+ if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
+ else
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
+
if (!vfpf_mbox_base) {
dev_err(dev, "VF-PF mailbox address not configured\n");
err = -ENOMEM;
@@ -365,6 +442,8 @@ static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
{
+ struct pci_dev *pdev = cptpf->pdev;
+ resource_size_t offset;
int err;
cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
@@ -373,8 +452,17 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
if (!cptpf->afpf_mbox_wq)
return -ENOMEM;
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ /* Map AF-PF mailbox memory */
+ cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
+ if (!cptpf->afpf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
- cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
+ pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
if (err)
goto error;
@@ -570,7 +658,7 @@ static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
if (ret)
goto disable_intr;
- ret = otx2_cpt_create_eng_grps(cptpf->pdev, &cptpf->eng_grps);
+ ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
if (ret)
goto disable_intr;
@@ -607,7 +695,6 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
- resource_size_t offset, size;
struct otx2_cptpf_dev *cptpf;
int err;
@@ -644,15 +731,6 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
if (err)
goto clear_drvdata;
- offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- /* Map AF-PF mailbox memory */
- cptpf->afpf_mbox_base = devm_ioremap_wc(dev, offset, size);
- if (!cptpf->afpf_mbox_base) {
- dev_err(&pdev->dev, "Unable to map BAR4\n");
- err = -ENODEV;
- goto clear_drvdata;
- }
err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
if (err < 0) {
@@ -660,6 +738,7 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
RVU_PF_INT_VEC_CNT);
goto clear_drvdata;
}
+ otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
/* Initialize AF-PF mailbox */
err = cptpf_afpf_mbox_init(cptpf);
if (err)
@@ -671,6 +750,10 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
+ err = cn10k_cptpf_lmtst_init(cptpf);
+ if (err)
+ goto unregister_intr;
+
/* Initialize CPT PF device */
err = cptpf_device_init(cptpf);
if (err)
@@ -719,6 +802,7 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
/* Supported devices */
static const struct pci_device_id otx2_cpt_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
{ 0, } /* end of table */
};
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index a531f4c8b441..dff34b3ec09e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -16,6 +16,8 @@
#define LOADFVC_MAJOR_OP 0x01
#define LOADFVC_MINOR_OP 0x08
+#define CTX_FLUSH_TIMER_CNT 0xFFFFFF
+
struct fw_info_t {
struct list_head ucodes;
};
@@ -666,7 +668,8 @@ static int reserve_engines(struct device *dev,
static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
{
if (ucode->va) {
- dma_free_coherent(dev, ucode->size, ucode->va, ucode->dma);
+ dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
+ ucode->dma);
ucode->va = NULL;
ucode->dma = 0;
ucode->size = 0;
@@ -685,7 +688,7 @@ static int copy_ucode_to_dma_mem(struct device *dev,
u32 i;
/* Allocate DMAable space */
- ucode->va = dma_alloc_coherent(dev, ucode->size, &ucode->dma,
+ ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
GFP_KERNEL);
if (!ucode->va)
return -ENOMEM;
@@ -1100,11 +1103,12 @@ int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
return eng_grp_num;
}
-int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_eng_grps *eng_grps)
{
struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct pci_dev *pdev = cptpf->pdev;
struct fw_info_t fw_info;
int ret;
@@ -1180,6 +1184,23 @@ int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
eng_grps->is_grps_created = true;
cpt_ucode_release_fw(&fw_info);
+
+ if (is_dev_otx2(pdev))
+ return 0;
+ /*
+ * Configure engine group mask to allow context prefetching
+ * for the groups.
+ */
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
+ OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
+ BLKADDR_CPT0);
+ /*
+ * Set interval to periodically flush dirty data for the next
+ * CTX cache entry. Set the interval count to maximum supported
+ * value.
+ */
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
+ CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
return 0;
delete_eng_grp:
@@ -1460,9 +1481,10 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
etype);
otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
- otx2_cpt_send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+ lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
- while (result->s.compcode == OTX2_CPT_COMPLETION_CODE_INIT)
+ while (lfs->ops->cpt_get_compcode(result) ==
+ OTX2_CPT_COMPLETION_CODE_INIT)
cpu_relax();
cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index 6b0d432de0af..fe019ab730b2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -23,11 +23,13 @@
/* Microcode version string length */
#define OTX2_CPT_UCODE_VER_STR_SZ 44
-/* Maximum number of supported engines/cores on OcteonTX2 platform */
-#define OTX2_CPT_MAX_ENGINES 128
+/* Maximum number of supported engines/cores on OcteonTX2/CN10K platform */
+#define OTX2_CPT_MAX_ENGINES 144
#define OTX2_CPT_ENGS_BITMASK_LEN BITS_TO_LONGS(OTX2_CPT_MAX_ENGINES)
+#define OTX2_CPT_UCODE_SZ (64 * 1024)
+
/* Microcode types */
enum otx2_cpt_ucode_type {
OTX2_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
@@ -153,7 +155,7 @@ int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps);
void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps);
-int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_eng_grps *eng_grps);
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
index 4f0a169fddbd..4207e2236903 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -19,11 +19,14 @@ struct otx2_cptvf_dev {
struct otx2_mbox pfvf_mbox;
struct work_struct pfvf_mbox_work;
struct workqueue_struct *pfvf_mbox_wq;
+ void *bbuf_base;
+ unsigned long cap_flag;
};
irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
+int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev);
#endif /* __OTX2_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 47f378731024..3411e664cf50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -5,9 +5,10 @@
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "otx2_cptvf_algs.h"
+#include "cn10k_cpt.h"
#include <rvu_reg.h>
-#define OTX2_CPTVF_DRV_NAME "octeontx2-cptvf"
+#define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
{
@@ -70,6 +71,8 @@ static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
{
+ struct pci_dev *pdev = cptvf->pdev;
+ resource_size_t offset, size;
int ret;
cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
@@ -78,14 +81,39 @@ static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
if (!cptvf->pfvf_mbox_wq)
return -ENOMEM;
+ if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ cptvf->pfvf_mbox_base = cptvf->reg_base +
+ CN10K_CPT_VF_MBOX_REGION;
+ } else {
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map PF-VF mailbox memory */
+ cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset,
+ size);
+ if (!cptvf->pfvf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ ret = -ENOMEM;
+ goto free_wqe;
+ }
+ }
+
ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
- cptvf->pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
+ pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
if (ret)
goto free_wqe;
+ ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
+ if (ret)
+ goto destroy_mbox;
+
INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
return 0;
+destroy_mbox:
+ otx2_mbox_destroy(&cptvf->pfvf_mbox);
free_wqe:
destroy_workqueue(cptvf->pfvf_mbox_wq);
return ret;
@@ -305,7 +333,6 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
- resource_size_t offset, size;
struct otx2_cptvf_dev *cptvf;
int ret;
@@ -337,15 +364,12 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
- offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- /* Map PF-VF mailbox memory */
- cptvf->pfvf_mbox_base = devm_ioremap_wc(dev, offset, size);
- if (!cptvf->pfvf_mbox_base) {
- dev_err(&pdev->dev, "Unable to map BAR4\n");
- ret = -ENODEV;
+ otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
+
+ ret = cn10k_cptvf_lmtst_init(cptvf);
+ if (ret)
goto clear_drvdata;
- }
+
/* Initialize PF<=>VF mailbox */
ret = cptvf_pfvf_mbox_init(cptvf);
if (ret)
@@ -392,6 +416,7 @@ static void otx2_cptvf_remove(struct pci_dev *pdev)
/* Supported devices */
static const struct pci_device_id otx2_cptvf_id_table[] = {
{PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
+ {PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
{ 0, } /* end of table */
};
@@ -405,6 +430,6 @@ static struct pci_driver otx2_cptvf_pci_driver = {
module_pci_driver(otx2_cptvf_pci_driver);
MODULE_AUTHOR("Marvell");
-MODULE_DESCRIPTION("Marvell OcteonTX2 CPT Virtual Function Driver");
+MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 5d73b711cba6..02cb9e44afd8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -5,6 +5,48 @@
#include "otx2_cptvf.h"
#include <rvu_reg.h>
+int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *otx2_mbox;
+
+ cptvf->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
+ if (!cptvf->bbuf_base)
+ return -ENOMEM;
+ /*
+ * Overwrite mbox mbase to point to bounce buffer, so that PF/VF
+ * prepare all mbox messages in bounce buffer instead of directly
+ * in hw mbox memory.
+ */
+ otx2_mbox = &cptvf->pfvf_mbox;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = cptvf->bbuf_base;
+
+ return 0;
+}
+
+static void otx2_cpt_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
+{
+ u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *hdr;
+ u64 msg_size;
+
+ if (mdev->mbase == hw_mbase)
+ return;
+
+ hdr = hw_mbase + mbox->rx_start;
+ msg_size = hdr->msg_size;
+
+ if (msg_size > mbox->rx_size - msgs_offset)
+ msg_size = mbox->rx_size - msgs_offset;
+
+ /* Copy mbox messages from mbox memory to bounce buffer */
+ memcpy(mdev->mbase + mbox->rx_start,
+ hw_mbase + mbox->rx_start, msg_size + msgs_offset);
+}
+
irqreturn_t otx2_cptvf_pfvf_mbox_intr(int __always_unused irq, void *arg)
{
struct otx2_cptvf_dev *cptvf = arg;
@@ -106,6 +148,7 @@ void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
cptvf = container_of(work, struct otx2_cptvf_dev, pfvf_mbox_work);
pfvf_mbox = &cptvf->pfvf_mbox;
+ otx2_cpt_sync_mbox_bbuf(pfvf_mbox, 0);
mdev = &pfvf_mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + pfvf_mbox->rx_start);
if (rsp_hdr->num_msgs == 0)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index d5c1c1b7c7e4..811ded72ce5f 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -320,7 +320,7 @@ static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
cpt_req->dlen, false);
/* Send CPT command */
- otx2_cpt_send_cmd(&cptinst, 1, lf);
+ lf->lfs->ops->send_cmd(&cptinst, 1, lf);
/*
* We allocate and prepare pending queue entry in critical section
@@ -349,13 +349,14 @@ int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
&lfs->lf[cpu_num]);
}
-static int cpt_process_ccode(struct pci_dev *pdev,
+static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
union otx2_cpt_res_s *cpt_status,
struct otx2_cpt_inst_info *info,
u32 *res_code)
{
- u8 uc_ccode = cpt_status->s.uc_compcode;
- u8 ccode = cpt_status->s.compcode;
+ u8 uc_ccode = lfs->ops->cpt_get_uc_compcode(cpt_status);
+ u8 ccode = lfs->ops->cpt_get_compcode(cpt_status);
+ struct pci_dev *pdev = lfs->pdev;
switch (ccode) {
case OTX2_CPT_COMP_E_FAULT:
@@ -389,6 +390,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
return 1;
case OTX2_CPT_COMP_E_GOOD:
+ case OTX2_CPT_COMP_E_WARN:
/*
* Check microcode completion code, it is only valid
* when completion code is CPT_COMP_E::GOOD
@@ -426,7 +428,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
return 0;
}
-static inline void process_pending_queue(struct pci_dev *pdev,
+static inline void process_pending_queue(struct otx2_cptlfs_info *lfs,
struct otx2_cpt_pending_queue *pqueue)
{
struct otx2_cpt_pending_entry *resume_pentry = NULL;
@@ -436,6 +438,7 @@ static inline void process_pending_queue(struct pci_dev *pdev,
struct otx2_cpt_inst_info *info = NULL;
struct otx2_cpt_req_info *req = NULL;
struct crypto_async_request *areq;
+ struct pci_dev *pdev = lfs->pdev;
u32 res_code, resume_index;
while (1) {
@@ -476,7 +479,7 @@ static inline void process_pending_queue(struct pci_dev *pdev,
goto process_pentry;
}
- if (cpt_process_ccode(pdev, cpt_status, info, &res_code)) {
+ if (cpt_process_ccode(lfs, cpt_status, info, &res_code)) {
spin_unlock_bh(&pqueue->lock);
return;
}
@@ -529,7 +532,7 @@ process_pentry:
void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
{
- process_pending_queue(wqe->lfs->pdev,
+ process_pending_queue(wqe->lfs,
&wqe->lfs->lf[wqe->lf_num].pqueue);
}
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index cc8dd3072b8b..1491cbfbc071 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -264,8 +264,8 @@ static int nx842_validate_result(struct device *dev,
* @inlen: Length of input buffer
* @out: Pointer to output buffer
* @outlen: Length of output buffer
- * @wrkmem: ptr to buffer for working memory, size determined by
- * nx842_pseries_driver.workmem_size
+ * @wmem: ptr to buffer for working memory, size determined by
+ * nx842_pseries_driver.workmem_size
*
* Returns:
* 0 Success, output of length @outlen stored in the buffer at @out
@@ -393,8 +393,8 @@ unlock:
* @inlen: Length of input buffer
* @out: Pointer to output buffer
* @outlen: Length of output buffer
- * @wrkmem: ptr to buffer for working memory, size determined by
- * nx842_pseries_driver.workmem_size
+ * @wmem: ptr to buffer for working memory, size determined by
+ * nx842_pseries_driver.workmem_size
*
* Returns:
* 0 Success, output of length @outlen stored in the buffer at @out
@@ -513,7 +513,7 @@ unlock:
/**
* nx842_OF_set_defaults -- Set default (disabled) values for devdata
*
- * @devdata - struct nx842_devdata to update
+ * @devdata: struct nx842_devdata to update
*
* Returns:
* 0 on success
@@ -538,13 +538,15 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
* The status field indicates if the device is enabled when the status
* is 'okay'. Otherwise the device driver will be disabled.
*
- * @prop - struct property point containing the maxsyncop for the update
+ * @devdata: struct nx842_devdata to use for dev_info
+ * @prop: struct property point containing the maxsyncop for the update
*
* Returns:
* 0 - Device is available
* -ENODEV - Device is not available
*/
-static int nx842_OF_upd_status(struct property *prop)
+static int nx842_OF_upd_status(struct nx842_devdata *devdata,
+ struct property *prop)
{
const char *status = (const char *)prop->value;
@@ -571,8 +573,8 @@ static int nx842_OF_upd_status(struct property *prop)
* In this example, the maximum byte length of a scatter list is
* 0x0ff0 (4,080).
*
- * @devdata - struct nx842_devdata to update
- * @prop - struct property point containing the maxsyncop for the update
+ * @devdata: struct nx842_devdata to update
+ * @prop: struct property point containing the maxsyncop for the update
*
* Returns:
* 0 on success
@@ -619,8 +621,8 @@ static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
* 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
* elements.
*
- * @devdata - struct nx842_devdata to update
- * @prop - struct property point containing the maxsyncop for the update
+ * @devdata: struct nx842_devdata to update
+ * @prop: struct property point containing the maxsyncop for the update
*
* Returns:
* 0 on success
@@ -689,7 +691,6 @@ out:
}
/**
- *
* nx842_OF_upd -- Handle OF properties updates for the device.
*
* Set all properties from the OF tree. Optionally, a new property
@@ -758,7 +759,7 @@ static int nx842_OF_upd(struct property *new_prop)
goto out;
/* Perform property updates */
- ret = nx842_OF_upd_status(status);
+ ret = nx842_OF_upd_status(new_devdata, status);
if (ret)
goto error_out;
@@ -812,8 +813,7 @@ error_out:
*
* @np: notifier block
* @action: notifier action
- * @update: struct pSeries_reconfig_prop_update pointer if action is
- * PSERIES_UPDATE_PROPERTY
+ * @data: struct of_reconfig_data pointer
*
* Returns:
* NOTIFY_OK on success
@@ -1069,6 +1069,7 @@ static const struct vio_device_id nx842_vio_driver_ids[] = {
{"ibm,compression-v1", "ibm,compression"},
{"", ""},
};
+MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
static struct vio_driver nx842_vio_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index d6314ea9ae89..0e440f704a8f 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -88,7 +88,7 @@ static int cbc_aes_nx_crypt(struct skcipher_request *req,
memcpy(req->iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index e7384d107573..3793885f928d 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -391,7 +391,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* update stats */
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
@@ -460,7 +460,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* update stats */
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 13f518802343..dfa3ad1a12f2 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -102,7 +102,7 @@ static int ctr_aes_nx_crypt(struct skcipher_request *req, u8 *iv)
memcpy(iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
@@ -118,7 +118,7 @@ static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
u8 iv[16];
- memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
+ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 7a729dc2bc17..502a565074e9 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -86,7 +86,7 @@ static int ecb_aes_nx_crypt(struct skcipher_request *req,
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index fc9baca13920..4a796318b430 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -382,7 +382,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 446f611726df..655361ba9107 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -660,8 +660,8 @@ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
* @inlen: input buffer size
* @out: output buffer pointer
* @outlenp: output buffer size pointer
- * @workmem: working memory buffer pointer, size determined by
- * nx842_powernv_driver.workmem_size
+ * @wmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
*
* Returns: see @nx842_powernv_exec()
*/
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index b0ad665e4bda..c3bebf0feabe 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -16,6 +16,11 @@
#include "nx_csbcpb.h"
#include "nx.h"
+struct sha256_state_be {
+ __be32 state[SHA256_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buf[SHA256_BLOCK_SIZE];
+};
static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
{
@@ -36,7 +41,7 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
}
static int nx_sha256_init(struct shash_desc *desc) {
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
memset(sctx, 0, sizeof *sctx);
@@ -56,7 +61,7 @@ static int nx_sha256_init(struct shash_desc *desc) {
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *out_sg;
@@ -175,7 +180,7 @@ out:
static int nx_sha256_final(struct shash_desc *desc, u8 *out)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
@@ -245,7 +250,7 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
@@ -254,7 +259,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
@@ -268,8 +273,8 @@ struct shash_alg nx_shash_sha256_alg = {
.final = nx_sha256_final,
.export = nx_sha256_export,
.import = nx_sha256_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
+ .descsize = sizeof(struct sha256_state_be),
+ .statesize = sizeof(struct sha256_state_be),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index c29103a1a0b6..1ffb40d2c324 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -15,6 +15,11 @@
#include "nx_csbcpb.h"
#include "nx.h"
+struct sha512_state_be {
+ __be64 state[SHA512_DIGEST_SIZE / 8];
+ u64 count[2];
+ u8 buf[SHA512_BLOCK_SIZE];
+};
static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
{
@@ -36,7 +41,7 @@ static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
static int nx_sha512_init(struct shash_desc *desc)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
memset(sctx, 0, sizeof *sctx);
@@ -56,7 +61,7 @@ static int nx_sha512_init(struct shash_desc *desc)
static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *out_sg;
@@ -178,7 +183,7 @@ out:
static int nx_sha512_final(struct shash_desc *desc, u8 *out)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
@@ -251,7 +256,7 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
@@ -260,7 +265,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
@@ -274,8 +279,8 @@ struct shash_alg nx_shash_sha512_alg = {
.final = nx_sha512_final,
.export = nx_sha512_export,
.import = nx_sha512_import,
- .descsize = sizeof(struct sha512_state),
- .statesize = sizeof(struct sha512_state),
+ .descsize = sizeof(struct sha512_state_be),
+ .statesize = sizeof(struct sha512_state_be),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h
index 493f8490ff94..e64f7e36fb92 100644
--- a/drivers/crypto/nx/nx_csbcpb.h
+++ b/drivers/crypto/nx/nx_csbcpb.h
@@ -140,8 +140,8 @@ struct cop_status_block {
u8 crb_seq_number;
u8 completion_code;
u8 completion_extension;
- u32 processed_byte_count;
- u64 address;
+ __be32 processed_byte_count;
+ __be64 address;
} __packed;
/* Nest accelerator workbook section 4.4 */
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index c9d38bcfd1c7..bc8631363d72 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -229,9 +229,8 @@ static int omap_des_hw_init(struct omap_des_dev *dd)
* It may be long delays between requests.
* Device might go to off mode to save power.
*/
- err = pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
- pm_runtime_put_noidle(dd->dev);
dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
return err;
}
@@ -994,9 +993,8 @@ static int omap_des_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
- pm_runtime_put_noidle(dev);
dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
goto err_get;
}
@@ -1124,9 +1122,8 @@ static int omap_des_resume(struct device *dev)
{
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
- pm_runtime_put_noidle(dev);
dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err);
return err;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ae0d320d3c60..dd53ad9987b0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -372,7 +372,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
{
int err;
- err = pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
dev_err(dd->dev, "failed to get sync: %d\n", err);
return err;
@@ -2244,7 +2244,7 @@ static int omap_sham_suspend(struct device *dev)
static int omap_sham_resume(struct device *dev)
{
- int err = pm_runtime_get_sync(dev);
+ int err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get sync: %d\n", err);
return err;
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
index b8f3463be6ef..7eb5daef4f88 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -24,7 +24,7 @@ struct icp_qat_fw_loader_hal_handle {
};
struct icp_qat_fw_loader_chip_info {
- bool sram_visible;
+ int mmp_sram_size;
bool nn;
bool lm2lm3;
u32 lm_size;
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index bd3028126cbe..12ca6b8764aa 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -696,7 +696,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
case ADF_4XXX_PCI_DEVICE_ID:
- handle->chip_info->sram_visible = false;
+ handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
@@ -730,7 +730,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
break;
case PCI_DEVICE_ID_INTEL_QAT_C62X:
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
- handle->chip_info->sram_visible = false;
+ handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = true;
handle->chip_info->lm2lm3 = false;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
@@ -763,7 +763,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
+ LOCAL_TO_XFER_REG_OFFSET);
break;
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
- handle->chip_info->sram_visible = true;
+ handle->chip_info->mmp_sram_size = 0x40000;
handle->chip_info->nn = true;
handle->chip_info->lm2lm3 = false;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
@@ -800,7 +800,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
goto out_err;
}
- if (handle->chip_info->sram_visible) {
+ if (handle->chip_info->mmp_sram_size > 0) {
sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr;
@@ -1417,7 +1417,11 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
return -EINVAL;
}
- qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+ status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+ if (status) {
+ pr_err("QAT: failed to read register");
+ return status;
+ }
gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
data16low = 0xffff & data;
data16hi = 0xffff & (data >> 0x10);
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 1fb5fc852f6b..2026cc6be8f0 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -342,7 +342,6 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
return 0;
}
-#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{
@@ -1546,15 +1545,14 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
int status = 0;
if (handle->chip_info->fw_auth) {
- if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
+ status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
+ if (!status)
status = qat_uclo_auth_fw(handle, desc);
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
- if (!handle->chip_info->sram_visible) {
- dev_dbg(&handle->pci_dev->dev,
- "QAT MMP fw not loaded for device 0x%x",
- handle->pci_dev->device);
- return status;
+ if (handle->chip_info->mmp_sram_size < mem_size) {
+ pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+ return -EFBIG;
}
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
}
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 14ade8a7d664..2cf8984e1b85 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -6,3 +6,4 @@ qcrypto-objs := core.o \
qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_AEAD) += aead.o
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
new file mode 100644
index 000000000000..290e2446a2f3
--- /dev/null
+++ b/drivers/crypto/qce/aead.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2021, Linaro Limited. All rights reserved.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <crypto/gcm.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/scatterwalk.h>
+#include "aead.h"
+
+#define CCM_NONCE_ADATA_SHIFT 6
+#define CCM_NONCE_AUTHSIZE_SHIFT 3
+#define MAX_CCM_ADATA_HEADER_LEN 6
+
+static LIST_HEAD(aead_algs);
+
+static void qce_aead_done(void *data)
+{
+ struct crypto_async_request *async_req = data;
+ struct aead_request *req = aead_request_cast(async_req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
+ enum dma_data_direction dir_src, dir_dst;
+ bool diff_dst;
+ int error;
+ u32 status;
+ unsigned int totallen;
+ unsigned char tag[SHA256_DIGEST_SIZE] = {0};
+ int ret = 0;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ error = qce_dma_terminate_all(&qce->dma);
+ if (error)
+ dev_dbg(qce->dev, "aead dma termination error (%d)\n",
+ error);
+ if (diff_dst)
+ dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+
+ if (IS_CCM(rctx->flags)) {
+ if (req->assoclen) {
+ sg_free_table(&rctx->src_tbl);
+ if (diff_dst)
+ sg_free_table(&rctx->dst_tbl);
+ } else {
+ if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
+ sg_free_table(&rctx->dst_tbl);
+ }
+ } else {
+ sg_free_table(&rctx->dst_tbl);
+ }
+
+ error = qce_check_status(qce, &status);
+ if (error < 0 && (error != -EBADMSG))
+ dev_err(qce->dev, "aead operation error (%x)\n", status);
+
+ if (IS_ENCRYPT(rctx->flags)) {
+ totallen = req->cryptlen + req->assoclen;
+ if (IS_CCM(rctx->flags))
+ scatterwalk_map_and_copy(rctx->ccmresult_buf, req->dst,
+ totallen, ctx->authsize, 1);
+ else
+ scatterwalk_map_and_copy(result_buf->auth_iv, req->dst,
+ totallen, ctx->authsize, 1);
+
+ } else if (!IS_CCM(rctx->flags)) {
+ totallen = req->cryptlen + req->assoclen - ctx->authsize;
+ scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
+ ret = memcmp(result_buf->auth_iv, tag, ctx->authsize);
+ if (ret) {
+ pr_err("Bad message error\n");
+ error = -EBADMSG;
+ }
+ }
+
+ qce->async_req_done(qce, error);
+}
+
+static struct scatterlist *
+qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+
+ sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+ return qce_sgtable_add(tbl, &rctx->result_sg, QCE_RESULT_BUF_SZ);
+}
+
+static struct scatterlist *
+qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+
+ sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE);
+ return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE);
+}
+
+static struct scatterlist *
+qce_aead_prepare_dst_buf(struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ struct scatterlist *sg, *msg_sg, __sg[2];
+ gfp_t gfp;
+ unsigned int assoclen = req->assoclen;
+ unsigned int totallen;
+ int ret;
+
+ totallen = rctx->cryptlen + assoclen;
+ rctx->dst_nents = sg_nents_for_len(req->dst, totallen);
+ if (rctx->dst_nents < 0) {
+ dev_err(qce->dev, "Invalid numbers of dst SG.\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (IS_CCM(rctx->flags))
+ rctx->dst_nents += 2;
+ else
+ rctx->dst_nents += 1;
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (IS_CCM(rctx->flags) && assoclen) {
+ /* Get the dst buffer */
+ msg_sg = scatterwalk_ffwd(__sg, req->dst, assoclen);
+
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
+ rctx->assoclen);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto dst_tbl_free;
+ }
+ /* dst buffer */
+ sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto dst_tbl_free;
+ }
+ totallen = rctx->cryptlen + rctx->assoclen;
+ } else {
+ if (totallen) {
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, totallen);
+ if (IS_ERR(sg))
+ goto dst_tbl_free;
+ }
+ }
+ if (IS_CCM(rctx->flags))
+ sg = qce_aead_prepare_ccm_result_buf(&rctx->dst_tbl, req);
+ else
+ sg = qce_aead_prepare_result_buf(&rctx->dst_tbl, req);
+
+ if (IS_ERR(sg))
+ goto dst_tbl_free;
+
+ sg_mark_end(sg);
+ rctx->dst_sg = rctx->dst_tbl.sgl;
+ rctx->dst_nents = sg_nents_for_len(rctx->dst_sg, totallen) + 1;
+
+ return sg;
+
+dst_tbl_free:
+ sg_free_table(&rctx->dst_tbl);
+ return sg;
+}
+
+static int
+qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
+{
+ struct scatterlist *sg, *msg_sg, __sg[2];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int assoclen = rctx->assoclen;
+ unsigned int adata_header_len, cryptlen, totallen;
+ gfp_t gfp;
+ bool diff_dst;
+ int ret;
+
+ if (IS_DECRYPT(rctx->flags))
+ cryptlen = rctx->cryptlen + ctx->authsize;
+ else
+ cryptlen = rctx->cryptlen;
+ totallen = cryptlen + req->assoclen;
+
+ /* Get the msg */
+ msg_sg = scatterwalk_ffwd(__sg, req->src, req->assoclen);
+
+ rctx->adata = kzalloc((ALIGN(assoclen, 16) + MAX_CCM_ADATA_HEADER_LEN) *
+ sizeof(unsigned char), GFP_ATOMIC);
+ if (!rctx->adata)
+ return -ENOMEM;
+
+ /*
+ * Format associated data (RFC3610 and NIST 800-38C)
+ * Even though specification allows for AAD to be up to 2^64 - 1 bytes,
+ * the assoclen field in aead_request is unsigned int and thus limits
+ * the AAD to be up to 2^32 - 1 bytes. So we handle only two scenarios
+ * while forming the header for AAD.
+ */
+ if (assoclen < 0xff00) {
+ adata_header_len = 2;
+ *(__be16 *)rctx->adata = cpu_to_be16(assoclen);
+ } else {
+ adata_header_len = 6;
+ *(__be16 *)rctx->adata = cpu_to_be16(0xfffe);
+ *(__be32 *)(rctx->adata + 2) = cpu_to_be32(assoclen);
+ }
+
+ /* Copy the associated data */
+ if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, assoclen),
+ rctx->adata + adata_header_len,
+ assoclen) != assoclen)
+ return -EINVAL;
+
+ /* Pad associated data to block size */
+ rctx->assoclen = ALIGN(assoclen + adata_header_len, 16);
+
+ diff_dst = (req->src != req->dst) ? true : false;
+
+ if (diff_dst)
+ rctx->src_nents = sg_nents_for_len(req->src, totallen) + 1;
+ else
+ rctx->src_nents = sg_nents_for_len(req->src, totallen) + 2;
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
+ ret = sg_alloc_table(&rctx->src_tbl, rctx->src_nents, gfp);
+ if (ret)
+ return ret;
+
+ /* Associated Data */
+ sg_init_one(&rctx->adata_sg, rctx->adata, rctx->assoclen);
+ sg = qce_sgtable_add(&rctx->src_tbl, &rctx->adata_sg,
+ rctx->assoclen);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto err_free;
+ }
+ /* src msg */
+ sg = qce_sgtable_add(&rctx->src_tbl, msg_sg, cryptlen);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto err_free;
+ }
+ if (!diff_dst) {
+ /*
+ * For decrypt, when src and dst buffers are same, there is already space
+ * in the buffer for padded 0's which is output in lieu of
+ * the MAC that is input. So skip the below.
+ */
+ if (!IS_DECRYPT(rctx->flags)) {
+ sg = qce_aead_prepare_ccm_result_buf(&rctx->src_tbl, req);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto err_free;
+ }
+ }
+ }
+ sg_mark_end(sg);
+ rctx->src_sg = rctx->src_tbl.sgl;
+ totallen = cryptlen + rctx->assoclen;
+ rctx->src_nents = sg_nents_for_len(rctx->src_sg, totallen);
+
+ if (diff_dst) {
+ sg = qce_aead_prepare_dst_buf(req);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto err_free;
+ }
+ } else {
+ if (IS_ENCRYPT(rctx->flags))
+ rctx->dst_nents = rctx->src_nents + 1;
+ else
+ rctx->dst_nents = rctx->src_nents;
+ rctx->dst_sg = rctx->src_sg;
+ }
+
+ return 0;
+err_free:
+ sg_free_table(&rctx->src_tbl);
+ return ret;
+}
+
+static int qce_aead_prepare_buf(struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ struct scatterlist *sg;
+ bool diff_dst = (req->src != req->dst) ? true : false;
+ unsigned int totallen;
+
+ totallen = rctx->cryptlen + rctx->assoclen;
+
+ sg = qce_aead_prepare_dst_buf(req);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+ if (diff_dst) {
+ rctx->src_nents = sg_nents_for_len(req->src, totallen);
+ if (rctx->src_nents < 0) {
+ dev_err(qce->dev, "Invalid numbers of src SG.\n");
+ return -EINVAL;
+ }
+ rctx->src_sg = req->src;
+ } else {
+ rctx->src_nents = rctx->dst_nents - 1;
+ rctx->src_sg = rctx->dst_sg;
+ }
+ return 0;
+}
+
+static int qce_aead_ccm_prepare_buf(struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct scatterlist *sg;
+ bool diff_dst = (req->src != req->dst) ? true : false;
+ unsigned int cryptlen;
+
+ if (rctx->assoclen)
+ return qce_aead_ccm_prepare_buf_assoclen(req);
+
+ if (IS_ENCRYPT(rctx->flags))
+ return qce_aead_prepare_buf(req);
+
+ cryptlen = rctx->cryptlen + ctx->authsize;
+ if (diff_dst) {
+ rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
+ rctx->src_sg = req->src;
+ sg = qce_aead_prepare_dst_buf(req);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+ } else {
+ rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
+ rctx->src_sg = req->src;
+ rctx->dst_nents = rctx->src_nents;
+ rctx->dst_sg = rctx->src_sg;
+ }
+
+ return 0;
+}
+
+static int qce_aead_create_ccm_nonce(struct qce_aead_reqctx *rctx, struct qce_aead_ctx *ctx)
+{
+ unsigned int msglen_size, ivsize;
+ u8 msg_len[4];
+ int i;
+
+ if (!rctx || !rctx->iv)
+ return -EINVAL;
+
+ msglen_size = rctx->iv[0] + 1;
+
+ /* Verify that msg len size is valid */
+ if (msglen_size < 2 || msglen_size > 8)
+ return -EINVAL;
+
+ ivsize = rctx->ivsize;
+
+ /*
+ * Clear the msglen bytes in IV.
+ * Else the h/w engine and nonce will use any stray value pending there.
+ */
+ if (!IS_CCM_RFC4309(rctx->flags)) {
+ for (i = 0; i < msglen_size; i++)
+ rctx->iv[ivsize - i - 1] = 0;
+ }
+
+ /*
+ * The crypto framework encodes cryptlen as unsigned int. Thus, even though
+ * spec allows for upto 8 bytes to encode msg_len only 4 bytes are needed.
+ */
+ if (msglen_size > 4)
+ msglen_size = 4;
+
+ memcpy(&msg_len[0], &rctx->cryptlen, 4);
+
+ memcpy(&rctx->ccm_nonce[0], rctx->iv, rctx->ivsize);
+ if (rctx->assoclen)
+ rctx->ccm_nonce[0] |= 1 << CCM_NONCE_ADATA_SHIFT;
+ rctx->ccm_nonce[0] |= ((ctx->authsize - 2) / 2) <<
+ CCM_NONCE_AUTHSIZE_SHIFT;
+ for (i = 0; i < msglen_size; i++)
+ rctx->ccm_nonce[QCE_MAX_NONCE - i - 1] = msg_len[i];
+
+ return 0;
+}
+
+static int
+qce_aead_async_req_handle(struct crypto_async_request *async_req)
+{
+ struct aead_request *req = aead_request_cast(async_req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ enum dma_data_direction dir_src, dir_dst;
+ bool diff_dst;
+ int dst_nents, src_nents, ret;
+
+ if (IS_CCM_RFC4309(rctx->flags)) {
+ memset(rctx->ccm_rfc4309_iv, 0, QCE_MAX_IV_SIZE);
+ rctx->ccm_rfc4309_iv[0] = 3;
+ memcpy(&rctx->ccm_rfc4309_iv[1], ctx->ccm4309_salt, QCE_CCM4309_SALT_SIZE);
+ memcpy(&rctx->ccm_rfc4309_iv[4], req->iv, 8);
+ rctx->iv = rctx->ccm_rfc4309_iv;
+ rctx->ivsize = AES_BLOCK_SIZE;
+ } else {
+ rctx->iv = req->iv;
+ rctx->ivsize = crypto_aead_ivsize(tfm);
+ }
+ if (IS_CCM_RFC4309(rctx->flags))
+ rctx->assoclen = req->assoclen - 8;
+ else
+ rctx->assoclen = req->assoclen;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ if (IS_CCM(rctx->flags)) {
+ ret = qce_aead_create_ccm_nonce(rctx, ctx);
+ if (ret)
+ return ret;
+ }
+ if (IS_CCM(rctx->flags))
+ ret = qce_aead_ccm_prepare_buf(req);
+ else
+ ret = qce_aead_prepare_buf(req);
+
+ if (ret)
+ return ret;
+ dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+ if (dst_nents < 0) {
+ ret = dst_nents;
+ goto error_free;
+ }
+
+ if (diff_dst) {
+ src_nents = dma_map_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+ if (src_nents < 0) {
+ ret = src_nents;
+ goto error_unmap_dst;
+ }
+ } else {
+ if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
+ src_nents = dst_nents;
+ else
+ src_nents = dst_nents - 1;
+ }
+
+ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, rctx->dst_sg, dst_nents,
+ qce_aead_done, async_req);
+ if (ret)
+ goto error_unmap_src;
+
+ qce_dma_issue_pending(&qce->dma);
+
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
+ if (ret)
+ goto error_terminate;
+
+ return 0;
+
+error_terminate:
+ qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+ if (diff_dst)
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+error_unmap_dst:
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+error_free:
+ if (IS_CCM(rctx->flags) && rctx->assoclen) {
+ sg_free_table(&rctx->src_tbl);
+ if (diff_dst)
+ sg_free_table(&rctx->dst_tbl);
+ } else {
+ sg_free_table(&rctx->dst_tbl);
+ }
+ return ret;
+}
+
+static int qce_aead_crypt(struct aead_request *req, int encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(tfm);
+ unsigned int blocksize = crypto_aead_blocksize(tfm);
+
+ rctx->flags = tmpl->alg_flags;
+ rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+ if (encrypt)
+ rctx->cryptlen = req->cryptlen;
+ else
+ rctx->cryptlen = req->cryptlen - ctx->authsize;
+
+ /* CE does not handle 0 length messages */
+ if (!rctx->cryptlen) {
+ if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
+ ctx->need_fallback = true;
+ }
+
+ /* If fallback is needed, schedule and exit */
+ if (ctx->need_fallback) {
+ /* Reset need_fallback in case the same ctx is used for another transaction */
+ ctx->need_fallback = false;
+
+ aead_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ aead_request_set_callback(&rctx->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ aead_request_set_ad(&rctx->fallback_req, req->assoclen);
+
+ return encrypt ? crypto_aead_encrypt(&rctx->fallback_req) :
+ crypto_aead_decrypt(&rctx->fallback_req);
+ }
+
+ /*
+ * CBC algorithms require message lengths to be
+ * multiples of block size.
+ */
+ if (IS_CBC(rctx->flags) && !IS_ALIGNED(rctx->cryptlen, blocksize))
+ return -EINVAL;
+
+ /* RFC4309 supported AAD size 16 bytes/20 bytes */
+ if (IS_CCM_RFC4309(rctx->flags))
+ if (crypto_ipsec_check_assoclen(req->assoclen))
+ return -EINVAL;
+
+ return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_aead_encrypt(struct aead_request *req)
+{
+ return qce_aead_crypt(req, 1);
+}
+
+static int qce_aead_decrypt(struct aead_request *req)
+{
+ return qce_aead_crypt(req, 0);
+}
+
+static int qce_aead_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
+
+ if (IS_CCM_RFC4309(flags)) {
+ if (keylen < QCE_CCM4309_SALT_SIZE)
+ return -EINVAL;
+ keylen -= QCE_CCM4309_SALT_SIZE;
+ memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
+ }
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192)
+ return -EINVAL;
+
+ ctx->enc_keylen = keylen;
+ ctx->auth_keylen = keylen;
+
+ memcpy(ctx->enc_key, key, keylen);
+ memcpy(ctx->auth_key, key, keylen);
+
+ if (keylen == AES_KEYSIZE_192)
+ ctx->need_fallback = true;
+
+ return IS_CCM_RFC4309(flags) ?
+ crypto_aead_setkey(ctx->fallback, key, keylen + QCE_CCM4309_SALT_SIZE) :
+ crypto_aead_setkey(ctx->fallback, key, keylen);
+}
+
+static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_authenc_keys authenc_keys;
+ unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
+ u32 _key[6];
+ int err;
+
+ err = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
+ if (err)
+ return err;
+
+ if (authenc_keys.enckeylen > QCE_MAX_KEY_SIZE ||
+ authenc_keys.authkeylen > QCE_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ if (IS_DES(flags)) {
+ err = verify_aead_des_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
+ if (err)
+ return err;
+ } else if (IS_3DES(flags)) {
+ err = verify_aead_des3_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
+ if (err)
+ return err;
+ /*
+ * The crypto engine does not support any two keys
+ * being the same for triple des algorithms. The
+ * verify_skcipher_des3_key does not check for all the
+ * below conditions. Schedule fallback in this case.
+ */
+ memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
+ if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
+ !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
+ !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+ ctx->need_fallback = true;
+ } else if (IS_AES(flags)) {
+ /* No random key sizes */
+ if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
+ authenc_keys.enckeylen != AES_KEYSIZE_192 &&
+ authenc_keys.enckeylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ if (authenc_keys.enckeylen == AES_KEYSIZE_192)
+ ctx->need_fallback = true;
+ }
+
+ ctx->enc_keylen = authenc_keys.enckeylen;
+ ctx->auth_keylen = authenc_keys.authkeylen;
+
+ memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
+
+ memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
+ memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
+
+ return crypto_aead_setkey(ctx->fallback, key, keylen);
+}
+
+static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
+
+ if (IS_CCM(flags)) {
+ if (authsize < 4 || authsize > 16 || authsize % 2)
+ return -EINVAL;
+ if (IS_CCM_RFC4309(flags) && (authsize < 8 || authsize % 4))
+ return -EINVAL;
+ }
+ ctx->authsize = authsize;
+
+ return crypto_aead_setauthsize(ctx->fallback, authsize);
+}
+
+static int qce_aead_init(struct crypto_aead *tfm)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->need_fallback = false;
+ ctx->fallback = crypto_alloc_aead(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback))
+ return PTR_ERR(ctx->fallback);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qce_aead_reqctx) +
+ crypto_aead_reqsize(ctx->fallback));
+ return 0;
+}
+
+static void qce_aead_exit(struct crypto_aead *tfm)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_aead(ctx->fallback);
+}
+
+struct qce_aead_def {
+ unsigned long flags;
+ const char *name;
+ const char *drv_name;
+ unsigned int blocksize;
+ unsigned int chunksize;
+ unsigned int ivsize;
+ unsigned int maxauthsize;
+};
+
+static const struct qce_aead_def aead_def[] = {
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
+ .name = "authenc(hmac(sha1),cbc(des))",
+ .drv_name = "authenc-hmac-sha1-cbc-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
+ .name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .drv_name = "authenc-hmac-sha1-cbc-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
+ .name = "authenc(hmac(sha256),cbc(des))",
+ .drv_name = "authenc-hmac-sha256-cbc-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
+ .name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .drv_name = "authenc-hmac-sha256-cbc-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
+ .name = "authenc(hmac(sha256),cbc(aes))",
+ .drv_name = "authenc-hmac-sha256-cbc-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CCM,
+ .name = "ccm(aes)",
+ .drv_name = "ccm-aes-qce",
+ .blocksize = 1,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CCM | QCE_MODE_CCM_RFC4309,
+ .name = "rfc4309(ccm(aes))",
+ .drv_name = "rfc4309-ccm-aes-qce",
+ .blocksize = 1,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+};
+
+static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl;
+ struct aead_alg *alg;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl)
+ return -ENOMEM;
+
+ alg = &tmpl->alg.aead;
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+
+ alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
+ alg->ivsize = def->ivsize;
+ alg->maxauthsize = def->maxauthsize;
+ if (IS_CCM(def->flags))
+ alg->setkey = qce_aead_ccm_setkey;
+ else
+ alg->setkey = qce_aead_setkey;
+ alg->setauthsize = qce_aead_setauthsize;
+ alg->encrypt = qce_aead_encrypt;
+ alg->decrypt = qce_aead_decrypt;
+ alg->init = qce_aead_init;
+ alg->exit = qce_aead_exit;
+
+ alg->base.cra_priority = 300;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->base.cra_ctxsize = sizeof(struct qce_aead_ctx);
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_module = THIS_MODULE;
+
+ INIT_LIST_HEAD(&tmpl->entry);
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AEAD;
+ tmpl->alg_flags = def->flags;
+ tmpl->qce = qce;
+
+ ret = crypto_register_aead(alg);
+ if (ret) {
+ kfree(tmpl);
+ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
+ return ret;
+ }
+
+ list_add_tail(&tmpl->entry, &aead_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
+ return 0;
+}
+
+static void qce_aead_unregister(struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl, *n;
+
+ list_for_each_entry_safe(tmpl, n, &aead_algs, entry) {
+ crypto_unregister_aead(&tmpl->alg.aead);
+ list_del(&tmpl->entry);
+ kfree(tmpl);
+ }
+}
+
+static int qce_aead_register(struct qce_device *qce)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(aead_def); i++) {
+ ret = qce_aead_register_one(&aead_def[i], qce);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ qce_aead_unregister(qce);
+ return ret;
+}
+
+const struct qce_algo_ops aead_ops = {
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .register_algs = qce_aead_register,
+ .unregister_algs = qce_aead_unregister,
+ .async_req_handle = qce_aead_async_req_handle,
+};
diff --git a/drivers/crypto/qce/aead.h b/drivers/crypto/qce/aead.h
new file mode 100644
index 000000000000..efb8477cc088
--- /dev/null
+++ b/drivers/crypto/qce/aead.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, Linaro Limited. All rights reserved.
+ */
+
+#ifndef _AEAD_H_
+#define _AEAD_H_
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_MAX_KEY_SIZE 64
+#define QCE_CCM4309_SALT_SIZE 3
+
+struct qce_aead_ctx {
+ u8 enc_key[QCE_MAX_KEY_SIZE];
+ u8 auth_key[QCE_MAX_KEY_SIZE];
+ u8 ccm4309_salt[QCE_CCM4309_SALT_SIZE];
+ unsigned int enc_keylen;
+ unsigned int auth_keylen;
+ unsigned int authsize;
+ bool need_fallback;
+ struct crypto_aead *fallback;
+};
+
+struct qce_aead_reqctx {
+ unsigned long flags;
+ u8 *iv;
+ unsigned int ivsize;
+ int src_nents;
+ int dst_nents;
+ struct scatterlist result_sg;
+ struct scatterlist adata_sg;
+ struct sg_table dst_tbl;
+ struct sg_table src_tbl;
+ struct scatterlist *dst_sg;
+ struct scatterlist *src_sg;
+ unsigned int cryptlen;
+ unsigned int assoclen;
+ unsigned char *adata;
+ u8 ccm_nonce[QCE_MAX_NONCE];
+ u8 ccmresult_buf[QCE_BAM_BURST_SIZE];
+ u8 ccm_rfc4309_iv[QCE_MAX_IV_SIZE];
+ struct aead_request fallback_req;
+};
+
+static inline struct qce_alg_template *to_aead_tmpl(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+
+ return container_of(alg, struct qce_alg_template, alg.aead);
+}
+
+extern const struct qce_algo_ops aead_ops;
+
+#endif /* _AEAD_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index dceb9579d87a..7c612ba5068f 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,7 @@
#include "core.h"
#include "regs-v5.h"
#include "sha.h"
+#include "aead.h"
static inline u32 qce_read(struct qce_device *qce, u32 offset)
{
@@ -88,17 +89,20 @@ static void qce_setup_config(struct qce_device *qce)
qce_write(qce, REG_CONFIG, config);
}
-static inline void qce_crypto_go(struct qce_device *qce)
+static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+ if (result_dump)
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+ else
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
}
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
-static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
{
u32 cfg = 0;
- if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+ if (IS_CCM(flags) || IS_CMAC(flags))
cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
else
cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
@@ -116,15 +120,16 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
else if (IS_CMAC(flags))
cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+ else if (IS_CCM(flags))
+ cfg |= (auth_size - 1) << AUTH_SIZE_SHIFT;
if (IS_SHA1(flags) || IS_SHA256(flags))
cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
- else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
- IS_CBC(flags) || IS_CTR(flags))
+ else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
- else if (IS_AES(flags) && IS_CCM(flags))
+ else if (IS_CCM(flags))
cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
- else if (IS_AES(flags) && IS_CMAC(flags))
+ else if (IS_CMAC(flags))
cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
if (IS_SHA(flags) || IS_SHA_HMAC(flags))
@@ -133,13 +138,11 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
if (IS_CCM(flags))
cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
- if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
- IS_CMAC(flags))
- cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
-
return cfg;
}
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
@@ -168,7 +171,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
qce_clear_array(qce, REG_AUTH_KEY0, 16);
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
- auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+ auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen, digestsize);
}
if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
@@ -196,7 +199,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
qce_write_array(qce, REG_AUTH_BYTECNT0,
(u32 *)rctx->byte_count, 2);
- auth_cfg = qce_auth_cfg(rctx->flags, 0);
+ auth_cfg = qce_auth_cfg(rctx->flags, 0, digestsize);
if (rctx->last_blk)
auth_cfg |= BIT(AUTH_LAST_SHIFT);
@@ -219,13 +222,13 @@ go_proc:
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
- qce_crypto_go(qce);
+ qce_crypto_go(qce, true);
return 0;
}
#endif
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
{
u32 cfg = 0;
@@ -271,7 +274,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
return cfg;
}
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
{
u8 swap[QCE_AES_IV_LENGTH];
@@ -380,7 +385,156 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
- qce_crypto_go(qce);
+ qce_crypto_go(qce, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned int len)
+{
+ u32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
+
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = be32_to_cpup((const __be32 *)s);
+ s += sizeof(u32);
+ d++;
+ }
+ return DIV_ROUND_UP(len, sizeof(u32));
+}
+
+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+ struct aead_request *req = aead_request_cast(async_req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ u32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ u32 enciv[QCE_MAX_IV_SIZE / sizeof(u32)] = {0};
+ u32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(u32)] = {0};
+ u32 authiv[SHA256_DIGEST_SIZE / sizeof(u32)] = {0};
+ u32 authnonce[QCE_MAX_NONCE / sizeof(u32)] = {0};
+ unsigned int enc_keylen = ctx->enc_keylen;
+ unsigned int auth_keylen = ctx->auth_keylen;
+ unsigned int enc_ivsize = rctx->ivsize;
+ unsigned int auth_ivsize = 0;
+ unsigned int enckey_words, enciv_words;
+ unsigned int authkey_words, authiv_words, authnonce_words;
+ unsigned long flags = rctx->flags;
+ u32 encr_cfg, auth_cfg, config, totallen;
+ u32 iv_last_word;
+
+ qce_setup_config(qce);
+
+ /* Write encryption key */
+ enckey_words = qce_be32_to_cpu_array(enckey, ctx->enc_key, enc_keylen);
+ qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words);
+
+ /* Write encryption iv */
+ enciv_words = qce_be32_to_cpu_array(enciv, rctx->iv, enc_ivsize);
+ qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words);
+
+ if (IS_CCM(rctx->flags)) {
+ iv_last_word = enciv[enciv_words - 1];
+ qce_write(qce, REG_CNTR3_IV3, iv_last_word + 1);
+ qce_write_array(qce, REG_ENCR_CCM_INT_CNTR0, (u32 *)enciv, enciv_words);
+ qce_write(qce, REG_CNTR_MASK, ~0);
+ qce_write(qce, REG_CNTR_MASK0, ~0);
+ qce_write(qce, REG_CNTR_MASK1, ~0);
+ qce_write(qce, REG_CNTR_MASK2, ~0);
+ }
+
+ /* Clear authentication IV and KEY registers of previous values */
+ qce_clear_array(qce, REG_AUTH_IV0, 16);
+ qce_clear_array(qce, REG_AUTH_KEY0, 16);
+
+ /* Clear byte count */
+ qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+ /* Write authentication key */
+ authkey_words = qce_be32_to_cpu_array(authkey, ctx->auth_key, auth_keylen);
+ qce_write_array(qce, REG_AUTH_KEY0, (u32 *)authkey, authkey_words);
+
+ /* Write initial authentication IV only for HMAC algorithms */
+ if (IS_SHA_HMAC(rctx->flags)) {
+ /* Write default authentication iv */
+ if (IS_SHA1_HMAC(rctx->flags)) {
+ auth_ivsize = SHA1_DIGEST_SIZE;
+ memcpy(authiv, std_iv_sha1, auth_ivsize);
+ } else if (IS_SHA256_HMAC(rctx->flags)) {
+ auth_ivsize = SHA256_DIGEST_SIZE;
+ memcpy(authiv, std_iv_sha256, auth_ivsize);
+ }
+ authiv_words = auth_ivsize / sizeof(u32);
+ qce_write_array(qce, REG_AUTH_IV0, (u32 *)authiv, authiv_words);
+ } else if (IS_CCM(rctx->flags)) {
+ /* Write nonce for CCM algorithms */
+ authnonce_words = qce_be32_to_cpu_array(authnonce, rctx->ccm_nonce, QCE_MAX_NONCE);
+ qce_write_array(qce, REG_AUTH_INFO_NONCE0, authnonce, authnonce_words);
+ }
+
+ /* Set up ENCR_SEG_CFG */
+ encr_cfg = qce_encr_cfg(flags, enc_keylen);
+ if (IS_ENCRYPT(flags))
+ encr_cfg |= BIT(ENCODE_SHIFT);
+ qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
+
+ /* Set up AUTH_SEG_CFG */
+ auth_cfg = qce_auth_cfg(rctx->flags, auth_keylen, ctx->authsize);
+ auth_cfg |= BIT(AUTH_LAST_SHIFT);
+ auth_cfg |= BIT(AUTH_FIRST_SHIFT);
+ if (IS_ENCRYPT(flags)) {
+ if (IS_CCM(rctx->flags))
+ auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+ else
+ auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
+ } else {
+ if (IS_CCM(rctx->flags))
+ auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
+ else
+ auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+ }
+ qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+
+ totallen = rctx->cryptlen + rctx->assoclen;
+
+ /* Set the encryption size and start offset */
+ if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
+ qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen + ctx->authsize);
+ else
+ qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
+ qce_write(qce, REG_ENCR_SEG_START, rctx->assoclen & 0xffff);
+
+ /* Set the authentication size and start offset */
+ qce_write(qce, REG_AUTH_SEG_SIZE, totallen);
+ qce_write(qce, REG_AUTH_SEG_START, 0);
+
+ /* Write total length */
+ if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
+ qce_write(qce, REG_SEG_SIZE, totallen + ctx->authsize);
+ else
+ qce_write(qce, REG_SEG_SIZE, totallen);
+
+ /* get little endianness */
+ config = qce_config_reg(qce, 1);
+ qce_write(qce, REG_CONFIG, config);
+
+ /* Start the process */
+ qce_crypto_go(qce, !IS_CCM(flags));
return 0;
}
@@ -397,6 +551,10 @@ int qce_start(struct crypto_async_request *async_req, u32 type)
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req);
#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+ case CRYPTO_ALG_TYPE_AEAD:
+ return qce_setup_regs_aead(async_req);
+#endif
default:
return -EINVAL;
}
@@ -419,6 +577,8 @@ int qce_check_status(struct qce_device *qce, u32 *status)
*/
if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
ret = -ENXIO;
+ else if (*status & BIT(MAC_FAILED_SHIFT))
+ ret = -EBADMSG;
return ret;
}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 3bc244bcca2d..02e63ad9f245 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -11,6 +11,7 @@
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/internal/aead.h>
/* xts du size */
#define QCE_SECTOR_SIZE 512
@@ -51,9 +52,11 @@
#define QCE_MODE_CCM BIT(12)
#define QCE_MODE_MASK GENMASK(12, 8)
+#define QCE_MODE_CCM_RFC4309 BIT(13)
+
/* cipher encryption/decryption operations */
-#define QCE_ENCRYPT BIT(13)
-#define QCE_DECRYPT BIT(14)
+#define QCE_ENCRYPT BIT(30)
+#define QCE_DECRYPT BIT(31)
#define IS_DES(flags) (flags & QCE_ALG_DES)
#define IS_3DES(flags) (flags & QCE_ALG_3DES)
@@ -73,6 +76,7 @@
#define IS_CTR(mode) (mode & QCE_MODE_CTR)
#define IS_XTS(mode) (mode & QCE_MODE_XTS)
#define IS_CCM(mode) (mode & QCE_MODE_CCM)
+#define IS_CCM_RFC4309(mode) ((mode) & QCE_MODE_CCM_RFC4309)
#define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT)
#define IS_DECRYPT(dir) (dir & QCE_DECRYPT)
@@ -85,6 +89,7 @@ struct qce_alg_template {
union {
struct skcipher_alg skcipher;
struct ahash_alg ahash;
+ struct aead_alg aead;
} alg;
struct qce_device *qce;
const u8 *hash_zero;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 80b75085c265..d3780be44a76 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -17,6 +17,7 @@
#include "core.h"
#include "cipher.h"
#include "sha.h"
+#include "aead.h"
#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
@@ -28,6 +29,9 @@ static const struct qce_algo_ops *qce_ops[] = {
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+ &aead_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index c0a0d8c4fce1..8ff10928f581 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -72,7 +72,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
struct scatterlist *sg;
bool diff_dst;
gfp_t gfp;
- int ret;
+ int dst_nents, src_nents, ret;
rctx->iv = req->iv;
rctx->ivsize = crypto_skcipher_ivsize(skcipher);
@@ -123,21 +123,26 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_mark_end(sg);
rctx->dst_sg = rctx->dst_tbl.sgl;
- ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (ret < 0)
+ dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+ if (dst_nents < 0) {
+ ret = dst_nents;
goto error_free;
+ }
if (diff_dst) {
- ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (ret < 0)
+ src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+ if (src_nents < 0) {
+ ret = src_nents;
goto error_unmap_dst;
+ }
rctx->src_sg = req->src;
} else {
rctx->src_sg = rctx->dst_sg;
+ src_nents = dst_nents - 1;
}
- ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
- rctx->dst_sg, rctx->dst_nents,
+ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
+ rctx->dst_sg, dst_nents,
qce_skcipher_done, async_req);
if (ret)
goto error_unmap_src;
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 1c6929fb3a13..544d7040cfc5 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1698,7 +1698,6 @@ static void sa_aead_dma_in_callback(void *data)
size_t pl, ml;
int i;
int err = 0;
- u16 auth_len;
u32 *mdptr;
sa_sync_from_device(rxd);
@@ -1711,13 +1710,10 @@ static void sa_aead_dma_in_callback(void *data)
for (i = 0; i < (authsize / 4); i++)
mdptr[i + 4] = swab32(mdptr[i + 4]);
- auth_len = req->assoclen + req->cryptlen;
-
if (rxd->enc) {
scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1);
} else {
- auth_len -= authsize;
start -= authsize;
scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
0);
@@ -2300,9 +2296,9 @@ static int sa_dma_init(struct sa_crypto_data *dd)
dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
if (IS_ERR(dd->dma_rx2)) {
- dma_release_channel(dd->dma_rx1);
- return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
- "Unable to request rx2 DMA channel\n");
+ ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
+ "Unable to request rx2 DMA channel\n");
+ goto err_dma_rx2;
}
dd->dma_tx = dma_request_chan(dd->dev, "tx");
@@ -2323,28 +2319,31 @@ static int sa_dma_init(struct sa_crypto_data *dd)
if (ret) {
dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
ret);
- return ret;
+ goto err_dma_config;
}
ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
if (ret) {
dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
ret);
- return ret;
+ goto err_dma_config;
}
ret = dmaengine_slave_config(dd->dma_tx, &cfg);
if (ret) {
dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
ret);
- return ret;
+ goto err_dma_config;
}
return 0;
+err_dma_config:
+ dma_release_channel(dd->dma_tx);
err_dma_tx:
- dma_release_channel(dd->dma_rx1);
dma_release_channel(dd->dma_rx2);
+err_dma_rx2:
+ dma_release_channel(dd->dma_rx1);
return ret;
}
@@ -2385,10 +2384,8 @@ MODULE_DEVICE_TABLE(of, of_match);
static int sa_ul_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
static void __iomem *saul_base;
struct sa_crypto_data *dev_data;
int ret;
@@ -2397,9 +2394,18 @@ static int sa_ul_probe(struct platform_device *pdev)
if (!dev_data)
return -ENOMEM;
+ dev_data->match_data = of_device_get_match_data(dev);
+ if (!dev_data->match_data)
+ return -ENODEV;
+
+ saul_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(saul_base))
+ return PTR_ERR(saul_base);
+
sa_k3_dev = dev;
dev_data->dev = dev;
dev_data->pdev = pdev;
+ dev_data->base = saul_base;
platform_set_drvdata(pdev, dev_data);
dev_set_drvdata(sa_k3_dev, dev_data);
@@ -2408,26 +2414,16 @@ static int sa_ul_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
ret);
+ pm_runtime_disable(dev);
return ret;
}
sa_init_mem(dev_data);
ret = sa_dma_init(dev_data);
if (ret)
- goto disable_pm_runtime;
-
- match = of_match_node(of_match, dev->of_node);
- if (!match) {
- dev_err(dev, "No compatible match found\n");
- return -ENODEV;
- }
- dev_data->match_data = match->data;
+ goto destroy_dma_pool;
spin_lock_init(&dev_data->scid_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- saul_base = devm_ioremap_resource(dev, res);
-
- dev_data->base = saul_base;
if (!dev_data->match_data->skip_engine_control) {
u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
@@ -2454,9 +2450,9 @@ release_dma:
dma_release_channel(dev_data->dma_rx1);
dma_release_channel(dev_data->dma_tx);
+destroy_dma_pool:
dma_pool_destroy(dev_data->sc_pool);
-disable_pm_runtime:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -2467,6 +2463,8 @@ static int sa_ul_remove(struct platform_device *pdev)
{
struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
+ of_platform_depopulate(&pdev->dev);
+
sa_unregister_algos(&pdev->dev);
dma_release_channel(dev_data->dma_rx2);
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index ecb7412e84e3..51a6e1a42434 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1011,6 +1011,7 @@ static int hash_hw_final(struct ahash_request *req)
goto out;
}
} else if (req->nbytes == 0 && ctx->keylen > 0) {
+ ret = -EPERM;
dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
__func__);
goto out;
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index db92573c94e8..dd8222a42808 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -337,7 +337,7 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
}
static const struct address_space_operations dev_dax_aops = {
- .set_page_dirty = noop_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_no_writeback,
.invalidatepage = noop_invalidatepage,
};
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 20373a893b44..e87d01c0b76a 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -103,7 +103,6 @@ config ARM_IMX8M_DDRC_DEVFREQ
tristate "i.MX8M DDRC DEVFREQ Driver"
depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \
(COMPILE_TEST && HAVE_ARM_SMCCC)
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
select DEVFREQ_GOV_USERSPACE
help
This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index fe08c46642f7..28f3e0ba6cdd 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -823,6 +823,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (devfreq->profile->timer < 0
|| devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
mutex_unlock(&devfreq->lock);
+ err = -EINVAL;
goto err_dev;
}
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index b094132bd20b..fc09324a03e0 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -65,7 +65,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
dev_pm_opp_put(p_opp);
if (IS_ERR(opp))
- return PTR_ERR(opp);
+ goto no_required_opp;
*freq = dev_pm_opp_get_freq(opp);
dev_pm_opp_put(opp);
@@ -73,6 +73,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
return 0;
}
+no_required_opp:
/*
* Get the OPP table's index of decided frequency by governor
* of parent device.
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 0fd6c4851071..ab9db7adb3ad 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -31,8 +31,8 @@ static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
return 0;
}
-static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct devfreq *devfreq = to_devfreq(dev);
struct userspace_data *data;
@@ -52,8 +52,8 @@ static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
return err;
}
-static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t set_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct devfreq *devfreq = to_devfreq(dev);
struct userspace_data *data;
@@ -70,7 +70,7 @@ static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
return err;
}
-static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
+static DEVICE_ATTR_RW(set_freq);
static struct attribute *dev_entries[] = {
&dev_attr_set_freq.attr,
NULL,
diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c
index 3fc3fd77492d..f3f6e25053ed 100644
--- a/drivers/devfreq/imx-bus.c
+++ b/drivers/devfreq/imx-bus.c
@@ -45,18 +45,6 @@ static int imx_bus_get_cur_freq(struct device *dev, unsigned long *freq)
return 0;
}
-static int imx_bus_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
-{
- struct imx_bus *priv = dev_get_drvdata(dev);
-
- stat->busy_time = 0;
- stat->total_time = 0;
- stat->current_frequency = clk_get_rate(priv->clk);
-
- return 0;
-}
-
static void imx_bus_exit(struct device *dev)
{
struct imx_bus *priv = dev_get_drvdata(dev);
@@ -129,9 +117,7 @@ static int imx_bus_probe(struct platform_device *pdev)
return ret;
}
- priv->profile.polling_ms = 1000;
priv->profile.target = imx_bus_target;
- priv->profile.get_dev_status = imx_bus_get_dev_status;
priv->profile.exit = imx_bus_exit;
priv->profile.get_cur_freq = imx_bus_get_cur_freq;
priv->profile.initial_freq = clk_get_rate(priv->clk);
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index ce83f883ca65..10661eb2aed8 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -688,6 +688,7 @@ static struct devfreq_dev_profile tegra_devfreq_profile = {
.polling_ms = ACTMON_SAMPLING_PERIOD,
.target = tegra_devfreq_target,
.get_dev_status = tegra_devfreq_get_dev_status,
+ .is_cooling_device = true,
};
static int tegra_governor_get_target(struct devfreq *devfreq,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ab9d9a488a6..39b5b46e880f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -59,6 +59,7 @@ config DMA_OF
#devices
config ALTERA_MSGDMA
tristate "Altera / Intel mSGDMA Engine"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for Altera / Intel mSGDMA controller.
@@ -701,6 +702,7 @@ config XILINX_ZYNQMP_DMA
config XILINX_ZYNQMP_DPDMA
tristate "Xilinx DPDMA Engine"
+ depends on HAS_IOMEM && OF
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 4ec909e0b810..4ae057922ef1 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
}
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+ err = -EINVAL;
dev_err(dev, "DPDMAI major version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
}
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+ err = -EINVAL;
dev_err(dev, "DPDMAI minor version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
ppriv->store =
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
if (!ppriv->store) {
+ err = -ENOMEM;
dev_err(dev, "dpaa2_io_store_create() failed\n");
goto err_store;
}
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 302cba5ff779..d4419bf1fede 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
+ rc = -EINVAL;
goto failed;
}
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 2a926bef87f2..442d55c11a5f 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -168,6 +168,32 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup_interrupts(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct idxd_irq_entry *irq_entry;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(pdev);
+ if (msixcnt <= 0)
+ return;
+
+ irq_entry = &idxd->irq_entries[0];
+ free_irq(irq_entry->vector, irq_entry);
+
+ for (i = 1; i < msixcnt; i++) {
+
+ irq_entry = &idxd->irq_entries[i];
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
+ idxd_device_release_int_handle(idxd, idxd->int_handles[i],
+ IDXD_IRQ_MSIX);
+ free_irq(irq_entry->vector, irq_entry);
+ }
+
+ idxd_mask_error_interrupts(idxd);
+ pci_free_irq_vectors(pdev);
+}
+
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -242,6 +268,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine->idxd = idxd;
device_initialize(&engine->conf_dev);
engine->conf_dev.parent = &idxd->conf_dev;
+ engine->conf_dev.bus = &dsa_bus_type;
engine->conf_dev.type = &idxd_engine_device_type;
rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
@@ -303,6 +330,19 @@ static int idxd_setup_groups(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup_internals(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_groups; i++)
+ put_device(&idxd->groups[i]->conf_dev);
+ for (i = 0; i < idxd->max_engines; i++)
+ put_device(&idxd->engines[i]->conf_dev);
+ for (i = 0; i < idxd->max_wqs; i++)
+ put_device(&idxd->wqs[i]->conf_dev);
+ destroy_workqueue(idxd->wq);
+}
+
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -531,12 +571,12 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "Loading RO device config\n");
rc = idxd_device_load_config(idxd);
if (rc < 0)
- goto err;
+ goto err_config;
}
rc = idxd_setup_interrupts(idxd);
if (rc)
- goto err;
+ goto err_config;
dev_dbg(dev, "IDXD interrupt setup complete.\n");
@@ -549,6 +589,8 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
return 0;
+ err_config:
+ idxd_cleanup_internals(idxd);
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
@@ -556,6 +598,18 @@ static int idxd_probe(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+
+ perfmon_pmu_remove(idxd);
+ idxd_cleanup_interrupts(idxd);
+ idxd_cleanup_internals(idxd);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+}
+
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -608,7 +662,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
- goto err;
+ goto err_dev_register;
}
idxd->state = IDXD_DEV_CONF_READY;
@@ -618,6 +672,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+ err_dev_register:
+ idxd_cleanup(idxd);
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
@@ -745,12 +801,12 @@ static int __init idxd_init_module(void)
* If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
* enumerating the device. We can not utilize it.
*/
- if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+ if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
pr_warn("idxd driver failed to load without MOVDIR64B.\n");
return -ENODEV;
}
- if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+ if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
pr_warn("Platform does not have ENQCMD(S) support.\n");
else
support_enqcmd = true;
@@ -787,6 +843,7 @@ module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
+ idxd_unregister_driver();
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
idxd_unregister_bus_type();
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 0d5c42f7bfa4..97d9a6f04f2a 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -230,7 +230,7 @@ out:
}
/**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
+ * ipu_irq_unmap() - unmap an IPU interrupt source
* @source: interrupt source bit position (see ipu_irq_map())
* @return: 0 or negative error code
*/
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 27c07350971d..375e7e647df6 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
{
- struct dma_chan *chan = vd->tx.chan;
- struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
-
- kfree(c->desc);
+ kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
}
static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
@@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
{
- struct mtk_uart_apdma_desc *d = c->desc;
-
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
-
- list_del(&d->vd.node);
- vchan_cookie_complete(&d->vd);
}
static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
@@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
c->rx_status = d->avail_len - cnt;
mtk_uart_apdma_write(c, VFF_RPT, wg);
+}
- list_del(&d->vd.node);
- vchan_cookie_complete(&d->vd);
+static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
+{
+ struct mtk_uart_apdma_desc *d = c->desc;
+
+ if (d) {
+ list_del(&d->vd.node);
+ vchan_cookie_complete(&d->vd);
+ c->desc = NULL;
+ }
}
static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
@@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
mtk_uart_apdma_rx_handler(c);
else if (c->dir == DMA_MEM_TO_DEV)
mtk_uart_apdma_tx_handler(c);
+ mtk_uart_apdma_chan_complete_handler(c);
spin_unlock_irqrestore(&c->vc.lock, flags);
return IRQ_HANDLED;
@@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
return NULL;
/* Now allocate and setup the descriptor */
- d = kzalloc(sizeof(*d), GFP_ATOMIC);
+ d = kzalloc(sizeof(*d), GFP_NOWAIT);
if (!d)
return NULL;
@@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
unsigned long flags;
spin_lock_irqsave(&c->vc.lock, flags);
- if (vchan_issue_pending(&c->vc)) {
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
vd = vchan_next_desc(&c->vc);
c->desc = to_mtk_uart_apdma_desc(&vd->tx);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fd8d2bc3be9f..110de8a60058 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
for (i = 0; i < len / period_len; i++) {
desc = pl330_get_desc(pch);
if (!desc) {
+ unsigned long iflags;
+
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
if (!first)
return NULL;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, iflags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, iflags);
return NULL;
}
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index 365f94eb3b08..3f926a653bd8 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
config QCOM_HIDMA_MGMT
tristate "Qualcomm Technologies HIDMA Management support"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for the Qualcomm Technologies HIDMA Management.
diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
index f8ffa02e279f..ba46a0a15a93 100644
--- a/drivers/dma/sf-pdma/Kconfig
+++ b/drivers/dma/sf-pdma/Kconfig
@@ -1,5 +1,6 @@
config SF_PDMA
tristate "Sifive PDMA controller driver"
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index d530c1bf11d9..6885b3dcd7a9 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
return ret;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 265d7c07b348..e1827393143f 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
kfree(base->lcla_pool.base_unaligned);
+ if (base->lcpa_base)
+ iounmap(base->lcpa_base);
+
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 36ba8b43e78d..18cbd1e43c2e 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
return -ENOMEM;
}
- ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
if (ret < 0)
return ret;
@@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
u32 ccr, id;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 70b29bd079c9..6c709803203a 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -113,6 +113,7 @@
#define XILINX_DPDMA_CH_VDO 0x020
#define XILINX_DPDMA_CH_PYLD_SZ 0x024
#define XILINX_DPDMA_CH_DESC_ID 0x028
+#define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
/* DPDMA descriptor fields */
#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
@@ -866,7 +867,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
* will be used, but it should be enough.
*/
list_for_each_entry(sw_desc, &desc->descriptors, node)
- sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
+ sw_desc->hw.desc_id = desc->vdesc.tx.cookie
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
sw_desc = list_first_entry(&desc->descriptors,
struct xilinx_dpdma_sw_desc, node);
@@ -1086,7 +1088,8 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
if (!chan->running || !pending)
goto out;
- desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
+ desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
/* If the retrigger raced with vsync, retry at the next frame. */
sw_desc = list_first_entry(&pending->descriptors,
@@ -1459,7 +1462,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
*/
static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
{
- dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
}
@@ -1596,6 +1599,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
}
+static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
+{
+ unsigned int i;
+ void __iomem *reg;
+
+ /* Disable all interrupts */
+ xilinx_dpdma_disable_irq(xdev);
+
+ /* Stop all channels */
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
+ reg = xdev->reg + XILINX_DPDMA_CH_BASE
+ + XILINX_DPDMA_CH_OFFSET * i;
+ dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+ }
+
+ /* Clear the interrupt status registers */
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
+}
+
static int xilinx_dpdma_probe(struct platform_device *pdev)
{
struct xilinx_dpdma_device *xdev;
@@ -1622,6 +1645,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
if (IS_ERR(xdev->reg))
return PTR_ERR(xdev->reg);
+ dpdma_hw_init(xdev);
+
xdev->irq = platform_get_irq(pdev, 0);
if (xdev->irq < 0) {
dev_err(xdev->dev, "failed to get platform irq\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d8419565b92c..5fecf5aa6e85 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
struct zynqmp_dma_desc_sw *desc;
int i, ret;
- ret = pm_runtime_get_sync(chan->dev);
+ ret = pm_runtime_resume_and_get(chan->dev);
if (ret < 0)
return ret;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 1e836e320edd..91164c5f0757 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -270,7 +270,8 @@ config EDAC_PND2
config EDAC_IGEN6
tristate "Intel client SoC Integrated MC"
- depends on PCI && X86_64 && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
+ depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
+ depends on X64_64 && X86_MCE_INTEL
help
Support for error detection and correction on the Intel
client SoC Integrated Memory Controller using In-Band ECC IP.
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index a46da56d6d54..6bd5f8815919 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -254,8 +254,8 @@ static int init_csrows(struct mem_ctl_info *mci)
return rc;
}
- dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
- r.start, resource_size(&r), PAGE_SHIFT);
+ dev_dbg(mci->pdev, "dt: /memory node resources: first page %pR, PAGE_SHIFT macro=0x%x\n",
+ &r, PAGE_SHIFT);
csrow->first_page = r.start >> PAGE_SHIFT;
nr_pages = resource_size(&r) >> PAGE_SHIFT;
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 238a4ad1e526..6ce0ed2ffaaf 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -13,7 +13,7 @@
#include "edac_module.h"
#include "skx_common.h"
-#define I10NM_REVISION "v0.0.4"
+#define I10NM_REVISION "v0.0.5"
#define EDAC_MOD_STR "i10nm_edac"
/* Debug macros */
@@ -24,19 +24,39 @@
pci_read_config_dword((d)->uracu, 0xd0, &(reg))
#define I10NM_GET_IMC_BAR(d, i, reg) \
pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
+#define I10NM_GET_SAD(d, offset, i, reg)\
+ pci_read_config_dword((d)->sad_all, (offset) + (i) * 8, &(reg))
+#define I10NM_GET_HBM_IMC_BAR(d, reg) \
+ pci_read_config_dword((d)->uracu, 0xd4, &(reg))
+#define I10NM_GET_CAPID3_CFG(d, reg) \
+ pci_read_config_dword((d)->pcu_cr3, 0x90, &(reg))
#define I10NM_GET_DIMMMTR(m, i, j) \
- readl((m)->mbase + 0x2080c + (i) * (m)->chan_mmio_sz + (j) * 4)
+ readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
+ (i) * (m)->chan_mmio_sz + (j) * 4)
#define I10NM_GET_MCDDRTCFG(m, i, j) \
- readl((m)->mbase + 0x20970 + (i) * (m)->chan_mmio_sz + (j) * 4)
+ readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
+ (i) * (m)->chan_mmio_sz + (j) * 4)
#define I10NM_GET_MCMTR(m, i) \
- readl((m)->mbase + 0x20ef8 + (i) * (m)->chan_mmio_sz)
+ readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
+ (i) * (m)->chan_mmio_sz)
#define I10NM_GET_AMAP(m, i) \
- readl((m)->mbase + 0x20814 + (i) * (m)->chan_mmio_sz)
+ readl((m)->mbase + ((m)->hbm_mc ? 0x814 : 0x20814) + \
+ (i) * (m)->chan_mmio_sz)
#define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23)
#define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
#define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \
GET_BITFIELD(reg, 0, 10) + 1) << 12)
+#define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg) \
+ ((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
+
+#define I10NM_HBM_IMC_MMIO_SIZE 0x9000
+#define I10NM_IS_HBM_PRESENT(reg) GET_BITFIELD(reg, 27, 30)
+#define I10NM_IS_HBM_IMC(reg) GET_BITFIELD(reg, 29, 29)
+
+#define I10NM_MAX_SAD 16
+#define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
+#define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
static struct list_head *i10nm_edac_list;
@@ -63,7 +83,32 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
return pdev;
}
-static int i10nm_get_all_munits(void)
+static bool i10nm_check_2lm(struct res_config *cfg)
+{
+ struct skx_dev *d;
+ u32 reg;
+ int i;
+
+ list_for_each_entry(d, i10nm_edac_list, list) {
+ d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[1],
+ PCI_SLOT(cfg->sad_all_devfn),
+ PCI_FUNC(cfg->sad_all_devfn));
+ if (!d->sad_all)
+ continue;
+
+ for (i = 0; i < I10NM_MAX_SAD; i++) {
+ I10NM_GET_SAD(d, cfg->sad_all_offset, i, reg);
+ if (I10NM_SAD_ENABLE(reg) && I10NM_SAD_NM_CACHEABLE(reg)) {
+ edac_dbg(2, "2-level memory configuration.\n");
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int i10nm_get_ddr_munits(void)
{
struct pci_dev *mdev;
void __iomem *mbase;
@@ -91,7 +136,7 @@ static int i10nm_get_all_munits(void)
edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
j++, base, reg);
- for (i = 0; i < I10NM_NUM_IMC; i++) {
+ for (i = 0; i < I10NM_NUM_DDR_IMC; i++) {
mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
12 + i, 0);
if (i == 0 && !mdev) {
@@ -127,11 +172,97 @@ static int i10nm_get_all_munits(void)
return 0;
}
+static bool i10nm_check_hbm_imc(struct skx_dev *d)
+{
+ u32 reg;
+
+ if (I10NM_GET_CAPID3_CFG(d, reg)) {
+ i10nm_printk(KERN_ERR, "Failed to get capid3_cfg\n");
+ return false;
+ }
+
+ return I10NM_IS_HBM_PRESENT(reg) != 0;
+}
+
+static int i10nm_get_hbm_munits(void)
+{
+ struct pci_dev *mdev;
+ void __iomem *mbase;
+ u32 reg, off, mcmtr;
+ struct skx_dev *d;
+ int i, lmc;
+ u64 base;
+
+ list_for_each_entry(d, i10nm_edac_list, list) {
+ d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[1], 30, 3);
+ if (!d->pcu_cr3)
+ return -ENODEV;
+
+ if (!i10nm_check_hbm_imc(d)) {
+ i10nm_printk(KERN_DEBUG, "No hbm memory\n");
+ return -ENODEV;
+ }
+
+ if (I10NM_GET_SCK_BAR(d, reg)) {
+ i10nm_printk(KERN_ERR, "Failed to get socket bar\n");
+ return -ENODEV;
+ }
+ base = I10NM_GET_SCK_MMIO_BASE(reg);
+
+ if (I10NM_GET_HBM_IMC_BAR(d, reg)) {
+ i10nm_printk(KERN_ERR, "Failed to get hbm mc bar\n");
+ return -ENODEV;
+ }
+ base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg);
+
+ lmc = I10NM_NUM_DDR_IMC;
+
+ for (i = 0; i < I10NM_NUM_HBM_IMC; i++) {
+ mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
+ 12 + i / 4, 1 + i % 4);
+ if (i == 0 && !mdev) {
+ i10nm_printk(KERN_ERR, "No hbm mc found\n");
+ return -ENODEV;
+ }
+ if (!mdev)
+ continue;
+
+ d->imc[lmc].mdev = mdev;
+ off = i * I10NM_HBM_IMC_MMIO_SIZE;
+
+ edac_dbg(2, "hbm mc%d mmio base 0x%llx size 0x%x\n",
+ lmc, base + off, I10NM_HBM_IMC_MMIO_SIZE);
+
+ mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE);
+ if (!mbase) {
+ i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n",
+ base + off);
+ return -ENOMEM;
+ }
+
+ d->imc[lmc].mbase = mbase;
+ d->imc[lmc].hbm_mc = true;
+
+ mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0);
+ if (!I10NM_IS_HBM_IMC(mcmtr)) {
+ i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n");
+ return -ENODEV;
+ }
+
+ lmc++;
+ }
+ }
+
+ return 0;
+}
+
static struct res_config i10nm_cfg0 = {
.type = I10NM,
.decs_did = 0x3452,
.busno_cfg_offset = 0xcc,
.ddr_chan_mmio_sz = 0x4000,
+ .sad_all_devfn = PCI_DEVFN(29, 0),
+ .sad_all_offset = 0x108,
};
static struct res_config i10nm_cfg1 = {
@@ -139,6 +270,8 @@ static struct res_config i10nm_cfg1 = {
.decs_did = 0x3452,
.busno_cfg_offset = 0xd0,
.ddr_chan_mmio_sz = 0x4000,
+ .sad_all_devfn = PCI_DEVFN(29, 0),
+ .sad_all_offset = 0x108,
};
static struct res_config spr_cfg = {
@@ -146,7 +279,10 @@ static struct res_config spr_cfg = {
.decs_did = 0x3252,
.busno_cfg_offset = 0xd0,
.ddr_chan_mmio_sz = 0x8000,
+ .hbm_chan_mmio_sz = 0x4000,
.support_ddr5 = true,
+ .sad_all_devfn = PCI_DEVFN(10, 0),
+ .sad_all_offset = 0x300,
};
static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -179,13 +315,13 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
struct dimm_info *dimm;
int i, j, ndimms;
- for (i = 0; i < I10NM_NUM_CHANNELS; i++) {
+ for (i = 0; i < imc->num_channels; i++) {
if (!imc->mbase)
continue;
ndimms = 0;
amap = I10NM_GET_AMAP(imc, i);
- for (j = 0; j < I10NM_NUM_DIMMS; j++) {
+ for (j = 0; j < imc->num_dimms; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
mtr = I10NM_GET_DIMMMTR(imc, i, j);
mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
@@ -278,6 +414,9 @@ static int __init i10nm_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
id = x86_match_cpu(i10nm_cpuids);
if (!id)
return -ENODEV;
@@ -296,8 +435,11 @@ static int __init i10nm_init(void)
return -ENODEV;
}
- rc = i10nm_get_all_munits();
- if (rc < 0)
+ skx_set_mem_cfg(i10nm_check_2lm(cfg));
+
+ rc = i10nm_get_ddr_munits();
+
+ if (i10nm_get_hbm_munits() && rc)
goto fail;
list_for_each_entry(d, i10nm_edac_list, list) {
@@ -318,7 +460,15 @@ static int __init i10nm_init(void)
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
d->imc[i].node_id = node_id;
- d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
+ if (d->imc[i].hbm_mc) {
+ d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
+ d->imc[i].num_channels = I10NM_NUM_HBM_CHANNELS;
+ d->imc[i].num_dimms = I10NM_NUM_HBM_DIMMS;
+ } else {
+ d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
+ d->imc[i].num_channels = I10NM_NUM_DDR_CHANNELS;
+ d->imc[i].num_dimms = I10NM_NUM_DDR_DIMMS;
+ }
rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
"Intel_10nm Socket", EDAC_MOD_STR,
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 6be9986fc6bd..a07bbfd075d0 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -22,11 +22,12 @@
#include <linux/io.h>
#include <asm/mach_traps.h>
#include <asm/nmi.h>
+#include <asm/mce.h>
#include "edac_mc.h"
#include "edac_module.h"
-#define IGEN6_REVISION "v2.4"
+#define IGEN6_REVISION "v2.5"
#define EDAC_MOD_STR "igen6_edac"
#define IGEN6_NMI_NAME "igen6_ibecc"
@@ -40,7 +41,7 @@
#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
-#define NUM_IMC 1 /* Max memory controllers */
+#define NUM_IMC 2 /* Max memory controllers */
#define NUM_CHANNELS 2 /* Max channels */
#define NUM_DIMMS 2 /* Max DIMMs per channel */
@@ -54,6 +55,10 @@
#define CAPID_C_OFFSET 0xec
#define CAPID_C_IBECC BIT(15)
+/* Capability register E */
+#define CAPID_E_OFFSET 0xf0
+#define CAPID_E_IBECC BIT(12)
+
/* Error Status */
#define ERRSTS_OFFSET 0xc8
#define ERRSTS_CE BIT_ULL(6)
@@ -70,7 +75,7 @@
#define IBECC_ACTIVATE_EN BIT(0)
/* IBECC error log */
-#define ECC_ERROR_LOG_OFFSET (IBECC_BASE + 0x170)
+#define ECC_ERROR_LOG_OFFSET (IBECC_BASE + res_cfg->ibecc_error_log_offset)
#define ECC_ERROR_LOG_CE BIT_ULL(62)
#define ECC_ERROR_LOG_UE BIT_ULL(63)
#define ECC_ERROR_LOG_ADDR_SHIFT 5
@@ -84,39 +89,54 @@
#define MCHBAR_SIZE 0x10000
/* Parameters for the channel decode stage */
-#define MAD_INTER_CHANNEL_OFFSET 0x5000
+#define IMC_BASE (res_cfg->imc_base)
+#define MAD_INTER_CHANNEL_OFFSET IMC_BASE
#define MAD_INTER_CHANNEL_DDR_TYPE(v) GET_BITFIELD(v, 0, 2)
#define MAD_INTER_CHANNEL_ECHM(v) GET_BITFIELD(v, 3, 3)
#define MAD_INTER_CHANNEL_CH_L_MAP(v) GET_BITFIELD(v, 4, 4)
#define MAD_INTER_CHANNEL_CH_S_SIZE(v) ((u64)GET_BITFIELD(v, 12, 19) << 29)
/* Parameters for DRAM decode stage */
-#define MAD_INTRA_CH0_OFFSET 0x5004
+#define MAD_INTRA_CH0_OFFSET (IMC_BASE + 4)
#define MAD_INTRA_CH_DIMM_L_MAP(v) GET_BITFIELD(v, 0, 0)
/* DIMM characteristics */
-#define MAD_DIMM_CH0_OFFSET 0x500c
+#define MAD_DIMM_CH0_OFFSET (IMC_BASE + 0xc)
#define MAD_DIMM_CH_DIMM_L_SIZE(v) ((u64)GET_BITFIELD(v, 0, 6) << 29)
#define MAD_DIMM_CH_DLW(v) GET_BITFIELD(v, 7, 8)
#define MAD_DIMM_CH_DIMM_S_SIZE(v) ((u64)GET_BITFIELD(v, 16, 22) << 29)
#define MAD_DIMM_CH_DSW(v) GET_BITFIELD(v, 24, 25)
+/* Hash for memory controller selection */
+#define MAD_MC_HASH_OFFSET (IMC_BASE + 0x1b8)
+#define MAC_MC_HASH_LSB(v) GET_BITFIELD(v, 1, 3)
+
/* Hash for channel selection */
-#define CHANNEL_HASH_OFFSET 0X5024
+#define CHANNEL_HASH_OFFSET (IMC_BASE + 0x24)
/* Hash for enhanced channel selection */
-#define CHANNEL_EHASH_OFFSET 0X5028
+#define CHANNEL_EHASH_OFFSET (IMC_BASE + 0x28)
#define CHANNEL_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define CHANNEL_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
#define CHANNEL_HASH_MODE(v) GET_BITFIELD(v, 28, 28)
+/* Parameters for memory slice decode stage */
+#define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
+#define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
+
static struct res_config {
+ bool machine_check;
int num_imc;
+ u32 imc_base;
+ u32 cmf_base;
+ u32 cmf_size;
+ u32 ms_hash_offset;
u32 ibecc_base;
+ u32 ibecc_error_log_offset;
bool (*ibecc_available)(struct pci_dev *pdev);
/* Convert error address logged in IBECC to system physical address */
- u64 (*err_addr_to_sys_addr)(u64 eaddr);
+ u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc);
/* Convert error address logged in IBECC to integrated memory controller address */
- u64 (*err_addr_to_imc_addr)(u64 eaddr);
+ u64 (*err_addr_to_imc_addr)(u64 eaddr, int mc);
} *res_cfg;
struct igen6_imc {
@@ -125,6 +145,7 @@ struct igen6_imc {
struct pci_dev *pdev;
struct device dev;
void __iomem *window;
+ u64 size;
u64 ch_s_size;
int ch_l_map;
u64 dimm_s_size[NUM_CHANNELS];
@@ -134,6 +155,9 @@ struct igen6_imc {
static struct igen6_pvt {
struct igen6_imc imc[NUM_IMC];
+ u64 ms_hash;
+ u64 ms_s_size;
+ int ms_l_map;
} *igen6_pvt;
/* The top of low usable DRAM */
@@ -183,6 +207,21 @@ static struct work_struct ecclog_work;
#define DID_EHL_SKU14 0x4534
#define DID_EHL_SKU15 0x4536
+/* Compute die IDs for ICL-NNPI with IBECC */
+#define DID_ICL_SKU8 0x4581
+#define DID_ICL_SKU10 0x4585
+#define DID_ICL_SKU11 0x4589
+#define DID_ICL_SKU12 0x458d
+
+/* Compute die IDs for Tiger Lake with IBECC */
+#define DID_TGL_SKU 0x9a14
+
+/* Compute die IDs for Alder Lake with IBECC */
+#define DID_ADL_SKU1 0x4601
+#define DID_ADL_SKU2 0x4602
+#define DID_ADL_SKU3 0x4621
+#define DID_ADL_SKU4 0x4641
+
static bool ehl_ibecc_available(struct pci_dev *pdev)
{
u32 v;
@@ -193,12 +232,12 @@ static bool ehl_ibecc_available(struct pci_dev *pdev)
return !!(CAPID_C_IBECC & v);
}
-static u64 ehl_err_addr_to_sys_addr(u64 eaddr)
+static u64 ehl_err_addr_to_sys_addr(u64 eaddr, int mc)
{
return eaddr;
}
-static u64 ehl_err_addr_to_imc_addr(u64 eaddr)
+static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc)
{
if (eaddr < igen6_tolud)
return eaddr;
@@ -212,12 +251,156 @@ static u64 ehl_err_addr_to_imc_addr(u64 eaddr)
return eaddr;
}
+static bool icl_ibecc_available(struct pci_dev *pdev)
+{
+ u32 v;
+
+ if (pci_read_config_dword(pdev, CAPID_C_OFFSET, &v))
+ return false;
+
+ return !(CAPID_C_IBECC & v) &&
+ (boot_cpu_data.x86_stepping >= 1);
+}
+
+static bool tgl_ibecc_available(struct pci_dev *pdev)
+{
+ u32 v;
+
+ if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v))
+ return false;
+
+ return !(CAPID_E_IBECC & v);
+}
+
+static u64 mem_addr_to_sys_addr(u64 maddr)
+{
+ if (maddr < igen6_tolud)
+ return maddr;
+
+ if (igen6_tom <= _4GB)
+ return maddr - igen6_tolud + _4GB;
+
+ if (maddr < _4GB)
+ return maddr - igen6_tolud + igen6_tom;
+
+ return maddr;
+}
+
+static u64 mem_slice_hash(u64 addr, u64 mask, u64 hash_init, int intlv_bit)
+{
+ u64 hash_addr = addr & mask, hash = hash_init;
+ u64 intlv = (addr >> intlv_bit) & 1;
+ int i;
+
+ for (i = 6; i < 20; i++)
+ hash ^= (hash_addr >> i) & 1;
+
+ return hash ^ intlv;
+}
+
+static u64 tgl_err_addr_to_mem_addr(u64 eaddr, int mc)
+{
+ u64 maddr, hash, mask, ms_s_size;
+ int intlv_bit;
+ u32 ms_hash;
+
+ ms_s_size = igen6_pvt->ms_s_size;
+ if (eaddr >= ms_s_size)
+ return eaddr + ms_s_size;
+
+ ms_hash = igen6_pvt->ms_hash;
+
+ mask = MEM_SLICE_HASH_MASK(ms_hash);
+ intlv_bit = MEM_SLICE_HASH_LSB_MASK_BIT(ms_hash) + 6;
+
+ maddr = GET_BITFIELD(eaddr, intlv_bit, 63) << (intlv_bit + 1) |
+ GET_BITFIELD(eaddr, 0, intlv_bit - 1);
+
+ hash = mem_slice_hash(maddr, mask, mc, intlv_bit);
+
+ return maddr | (hash << intlv_bit);
+}
+
+static u64 tgl_err_addr_to_sys_addr(u64 eaddr, int mc)
+{
+ u64 maddr = tgl_err_addr_to_mem_addr(eaddr, mc);
+
+ return mem_addr_to_sys_addr(maddr);
+}
+
+static u64 tgl_err_addr_to_imc_addr(u64 eaddr, int mc)
+{
+ return eaddr;
+}
+
+static u64 adl_err_addr_to_sys_addr(u64 eaddr, int mc)
+{
+ return mem_addr_to_sys_addr(eaddr);
+}
+
+static u64 adl_err_addr_to_imc_addr(u64 eaddr, int mc)
+{
+ u64 imc_addr, ms_s_size = igen6_pvt->ms_s_size;
+ struct igen6_imc *imc = &igen6_pvt->imc[mc];
+ int intlv_bit;
+ u32 mc_hash;
+
+ if (eaddr >= 2 * ms_s_size)
+ return eaddr - ms_s_size;
+
+ mc_hash = readl(imc->window + MAD_MC_HASH_OFFSET);
+
+ intlv_bit = MAC_MC_HASH_LSB(mc_hash) + 6;
+
+ imc_addr = GET_BITFIELD(eaddr, intlv_bit + 1, 63) << intlv_bit |
+ GET_BITFIELD(eaddr, 0, intlv_bit - 1);
+
+ return imc_addr;
+}
+
static struct res_config ehl_cfg = {
- .num_imc = 1,
- .ibecc_base = 0xdc00,
- .ibecc_available = ehl_ibecc_available,
- .err_addr_to_sys_addr = ehl_err_addr_to_sys_addr,
- .err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
+ .num_imc = 1,
+ .imc_base = 0x5000,
+ .ibecc_base = 0xdc00,
+ .ibecc_available = ehl_ibecc_available,
+ .ibecc_error_log_offset = 0x170,
+ .err_addr_to_sys_addr = ehl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
+};
+
+static struct res_config icl_cfg = {
+ .num_imc = 1,
+ .imc_base = 0x5000,
+ .ibecc_base = 0xd800,
+ .ibecc_error_log_offset = 0x170,
+ .ibecc_available = icl_ibecc_available,
+ .err_addr_to_sys_addr = ehl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
+};
+
+static struct res_config tgl_cfg = {
+ .machine_check = true,
+ .num_imc = 2,
+ .imc_base = 0x5000,
+ .cmf_base = 0x11000,
+ .cmf_size = 0x800,
+ .ms_hash_offset = 0xac,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x170,
+ .ibecc_available = tgl_ibecc_available,
+ .err_addr_to_sys_addr = tgl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
+};
+
+static struct res_config adl_cfg = {
+ .machine_check = true,
+ .num_imc = 2,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x68,
+ .ibecc_available = tgl_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
static const struct pci_device_id igen6_pci_tbl[] = {
@@ -232,6 +415,15 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_EHL_SKU13), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU14), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU15), (kernel_ulong_t)&ehl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ICL_SKU8), (kernel_ulong_t)&icl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ICL_SKU10), (kernel_ulong_t)&icl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ICL_SKU11), (kernel_ulong_t)&icl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ICL_SKU12), (kernel_ulong_t)&icl_cfg },
+ { PCI_VDEVICE(INTEL, DID_TGL_SKU), (kernel_ulong_t)&tgl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_SKU1), (kernel_ulong_t)&adl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_SKU2), (kernel_ulong_t)&adl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_SKU3), (kernel_ulong_t)&adl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_SKU4), (kernel_ulong_t)&adl_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
@@ -490,8 +682,8 @@ static void ecclog_work_cb(struct work_struct *work)
eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) <<
ECC_ERROR_LOG_ADDR_SHIFT;
res.mc = node->mc;
- res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr);
- res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr);
+ res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr, res.mc);
+ res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr, res.mc);
mci = igen6_pvt->imc[res.mc].mci;
@@ -540,6 +732,57 @@ static int ecclog_nmi_handler(unsigned int cmd, struct pt_regs *regs)
return NMI_HANDLED;
}
+static int ecclog_mce_handler(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ char *type;
+
+ if (mce->kflags & MCE_HANDLED_CEC)
+ return NOTIFY_DONE;
+
+ /*
+ * Ignore unless this is a memory related error.
+ * We don't check the bit MCI_STATUS_ADDRV of MCi_STATUS here,
+ * since this bit isn't set on some CPU (e.g., Tiger Lake UP3).
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ if (mce->mcgstatus & MCG_STATUS_MCIP)
+ type = "Exception";
+ else
+ type = "Event";
+
+ edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
+ mce->extcpu, type, mce->mcgstatus,
+ mce->bank, mce->status);
+ edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
+ edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
+ edac_dbg(0, "MISC 0x%llx\n", mce->misc);
+ edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+ /*
+ * We just use the Machine Check for the memory error notification.
+ * Each memory controller is associated with an IBECC instance.
+ * Directly read and clear the error information(error address and
+ * error type) on all the IBECC instances so that we know on which
+ * memory controller the memory error(s) occurred.
+ */
+ if (!ecclog_handler())
+ return NOTIFY_DONE;
+
+ mce->kflags |= MCE_HANDLED_EDAC;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ecclog_mce_dec = {
+ .notifier_call = ecclog_mce_handler,
+ .priority = MCE_PRIO_EDAC,
+};
+
static bool igen6_check_ecc(struct igen6_imc *imc)
{
u32 activate = readl(imc->window + IBECC_ACTIVATE_OFFSET);
@@ -573,6 +816,8 @@ static int igen6_get_dimm_config(struct mem_ctl_info *mci)
imc->dimm_l_size[i] = MAD_DIMM_CH_DIMM_L_SIZE(mad_dimm);
imc->dimm_s_size[i] = MAD_DIMM_CH_DIMM_S_SIZE(mad_dimm);
imc->dimm_l_map[i] = MAD_INTRA_CH_DIMM_L_MAP(mad_intra);
+ imc->size += imc->dimm_s_size[i];
+ imc->size += imc->dimm_l_size[i];
ndimms = 0;
for (j = 0; j < NUM_DIMMS; j++) {
@@ -608,6 +853,8 @@ static int igen6_get_dimm_config(struct mem_ctl_info *mci)
}
}
+ edac_dbg(0, "MC %d, total size %llu MiB\n", mc, imc->size >> 20);
+
return 0;
}
@@ -857,6 +1104,80 @@ static void igen6_unregister_mcis(void)
}
}
+static int igen6_mem_slice_setup(u64 mchbar)
+{
+ struct igen6_imc *imc = &igen6_pvt->imc[0];
+ u64 base = mchbar + res_cfg->cmf_base;
+ u32 offset = res_cfg->ms_hash_offset;
+ u32 size = res_cfg->cmf_size;
+ u64 ms_s_size, ms_hash;
+ void __iomem *cmf;
+ int ms_l_map;
+
+ edac_dbg(2, "\n");
+
+ if (imc[0].size < imc[1].size) {
+ ms_s_size = imc[0].size;
+ ms_l_map = 1;
+ } else {
+ ms_s_size = imc[1].size;
+ ms_l_map = 0;
+ }
+
+ igen6_pvt->ms_s_size = ms_s_size;
+ igen6_pvt->ms_l_map = ms_l_map;
+
+ edac_dbg(0, "ms_s_size: %llu MiB, ms_l_map %d\n",
+ ms_s_size >> 20, ms_l_map);
+
+ if (!size)
+ return 0;
+
+ cmf = ioremap(base, size);
+ if (!cmf) {
+ igen6_printk(KERN_ERR, "Failed to ioremap cmf 0x%llx\n", base);
+ return -ENODEV;
+ }
+
+ ms_hash = readq(cmf + offset);
+ igen6_pvt->ms_hash = ms_hash;
+
+ edac_dbg(0, "MEM_SLICE_HASH: 0x%llx\n", ms_hash);
+
+ iounmap(cmf);
+
+ return 0;
+}
+
+static int register_err_handler(void)
+{
+ int rc;
+
+ if (res_cfg->machine_check) {
+ mce_register_decode_chain(&ecclog_mce_dec);
+ return 0;
+ }
+
+ rc = register_nmi_handler(NMI_SERR, ecclog_nmi_handler,
+ 0, IGEN6_NMI_NAME);
+ if (rc) {
+ igen6_printk(KERN_ERR, "Failed to register NMI handler\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void unregister_err_handler(void)
+{
+ if (res_cfg->machine_check) {
+ mce_unregister_decode_chain(&ecclog_mce_dec);
+ return;
+ }
+
+ unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
+}
+
static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u64 mchbar;
@@ -880,6 +1201,12 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto fail2;
}
+ if (res_cfg->num_imc > 1) {
+ rc = igen6_mem_slice_setup(mchbar);
+ if (rc)
+ goto fail2;
+ }
+
ecclog_pool = ecclog_gen_pool_create();
if (!ecclog_pool) {
rc = -ENOMEM;
@@ -892,12 +1219,9 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Check if any pending errors before registering the NMI handler */
ecclog_handler();
- rc = register_nmi_handler(NMI_SERR, ecclog_nmi_handler,
- 0, IGEN6_NMI_NAME);
- if (rc) {
- igen6_printk(KERN_ERR, "Failed to register NMI handler\n");
+ rc = register_err_handler();
+ if (rc)
goto fail3;
- }
/* Enable error reporting */
rc = errcmd_enable_error_reporting(true);
@@ -925,7 +1249,7 @@ static void igen6_remove(struct pci_dev *pdev)
igen6_debug_teardown();
errcmd_enable_error_reporting(false);
- unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
+ unregister_err_handler();
irq_work_sync(&ecclog_irq_work);
flush_work(&ecclog_work);
gen_pool_destroy(ecclog_pool);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 5dd905a3f30c..27d56920b469 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -323,6 +323,21 @@ static const char * const smca_umc_mce_desc[] = {
"AES SRAM ECC error",
};
+static const char * const smca_umc2_mce_desc[] = {
+ "DRAM ECC error",
+ "Data poison error",
+ "SDP parity error",
+ "Reserved",
+ "Address/Command parity error",
+ "Write data parity error",
+ "DCQ SRAM ECC error",
+ "Reserved",
+ "Read data parity error",
+ "Rdb SRAM ECC error",
+ "RdRsp SRAM ECC error",
+ "LM32 MP errors",
+};
+
static const char * const smca_pb_mce_desc[] = {
"An ECC error in the Parameter Block RAM array",
};
@@ -400,6 +415,56 @@ static const char * const smca_pcie_mce_desc[] = {
"CCIX Non-okay write response with data error",
};
+static const char * const smca_pcie2_mce_desc[] = {
+ "SDP Parity Error logging",
+};
+
+static const char * const smca_xgmipcs_mce_desc[] = {
+ "Data Loss Error",
+ "Training Error",
+ "Flow Control Acknowledge Error",
+ "Rx Fifo Underflow Error",
+ "Rx Fifo Overflow Error",
+ "CRC Error",
+ "BER Exceeded Error",
+ "Tx Vcid Data Error",
+ "Replay Buffer Parity Error",
+ "Data Parity Error",
+ "Replay Fifo Overflow Error",
+ "Replay Fifo Underflow Error",
+ "Elastic Fifo Overflow Error",
+ "Deskew Error",
+ "Flow Control CRC Error",
+ "Data Startup Limit Error",
+ "FC Init Timeout Error",
+ "Recovery Timeout Error",
+ "Ready Serial Timeout Error",
+ "Ready Serial Attempt Error",
+ "Recovery Attempt Error",
+ "Recovery Relock Attempt Error",
+ "Replay Attempt Error",
+ "Sync Header Error",
+ "Tx Replay Timeout Error",
+ "Rx Replay Timeout Error",
+ "LinkSub Tx Timeout Error",
+ "LinkSub Rx Timeout Error",
+ "Rx CMD Pocket Error",
+};
+
+static const char * const smca_xgmiphy_mce_desc[] = {
+ "RAM ECC Error",
+ "ARC instruction buffer parity error",
+ "ARC data buffer parity error",
+ "PHY APB error",
+};
+
+static const char * const smca_waflphy_mce_desc[] = {
+ "RAM ECC Error",
+ "ARC instruction buffer parity error",
+ "ARC data buffer parity error",
+ "PHY APB error",
+};
+
struct smca_mce_desc {
const char * const *descs;
unsigned int num_descs;
@@ -418,6 +483,7 @@ static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) },
[SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) },
[SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) },
+ [SMCA_UMC_V2] = { smca_umc2_mce_desc, ARRAY_SIZE(smca_umc2_mce_desc) },
[SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) },
[SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) },
[SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) },
@@ -426,6 +492,10 @@ static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
[SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
[SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
+ [SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
+ [SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
+ [SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
+ [SMCA_WAFL_PHY] = { smca_waflphy_mce_desc, ARRAY_SIZE(smca_waflphy_mce_desc) },
};
static bool f12h_mc0_mce(u16 ec, u8 xec)
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 928f63a374c7..c94ca1f790c4 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1554,6 +1554,9 @@ static int __init pnd2_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
id = x86_match_cpu(pnd2_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 93daa4297f2e..4c626fcd4dcb 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3510,6 +3510,9 @@ static int __init sbridge_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
id = x86_match_cpu(sbridge_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 6a4f0b27c654..4dbd46575bfb 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -656,6 +656,9 @@ static int __init skx_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
id = x86_match_cpu(skx_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 81c3e2ec6f56..5e83f59bef8a 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -23,10 +23,13 @@
#include "skx_common.h"
static const char * const component_names[] = {
- [INDEX_SOCKET] = "ProcessorSocketId",
- [INDEX_MEMCTRL] = "MemoryControllerId",
- [INDEX_CHANNEL] = "ChannelId",
- [INDEX_DIMM] = "DimmSlotId",
+ [INDEX_SOCKET] = "ProcessorSocketId",
+ [INDEX_MEMCTRL] = "MemoryControllerId",
+ [INDEX_CHANNEL] = "ChannelId",
+ [INDEX_DIMM] = "DimmSlotId",
+ [INDEX_NM_MEMCTRL] = "NmMemoryControllerId",
+ [INDEX_NM_CHANNEL] = "NmChannelId",
+ [INDEX_NM_DIMM] = "NmDimmSlotId",
};
static int component_indices[ARRAY_SIZE(component_names)];
@@ -34,12 +37,14 @@ static int adxl_component_count;
static const char * const *adxl_component_names;
static u64 *adxl_values;
static char *adxl_msg;
+static unsigned long adxl_nm_bitmap;
static char skx_msg[MSG_SIZE];
static skx_decode_f skx_decode;
static skx_show_retry_log_f skx_show_retry_rd_err_log;
static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
+static bool skx_mem_cfg_2lm;
int __init skx_adxl_get(void)
{
@@ -56,14 +61,25 @@ int __init skx_adxl_get(void)
for (j = 0; names[j]; j++) {
if (!strcmp(component_names[i], names[j])) {
component_indices[i] = j;
+
+ if (i >= INDEX_NM_FIRST)
+ adxl_nm_bitmap |= 1 << i;
+
break;
}
}
- if (!names[j])
+ if (!names[j] && i < INDEX_NM_FIRST)
goto err;
}
+ if (skx_mem_cfg_2lm) {
+ if (!adxl_nm_bitmap)
+ skx_printk(KERN_NOTICE, "Not enough ADXL components for 2-level memory.\n");
+ else
+ edac_dbg(2, "adxl_nm_bitmap: 0x%lx\n", adxl_nm_bitmap);
+ }
+
adxl_component_names = names;
while (*names++)
adxl_component_count++;
@@ -99,7 +115,7 @@ void __exit skx_adxl_put(void)
kfree(adxl_msg);
}
-static bool skx_adxl_decode(struct decoded_addr *res)
+static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
{
struct skx_dev *d;
int i, len = 0;
@@ -116,11 +132,20 @@ static bool skx_adxl_decode(struct decoded_addr *res)
}
res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]];
- res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
- res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
- res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
+ if (error_in_1st_level_mem) {
+ res->imc = (adxl_nm_bitmap & BIT_NM_MEMCTRL) ?
+ (int)adxl_values[component_indices[INDEX_NM_MEMCTRL]] : -1;
+ res->channel = (adxl_nm_bitmap & BIT_NM_CHANNEL) ?
+ (int)adxl_values[component_indices[INDEX_NM_CHANNEL]] : -1;
+ res->dimm = (adxl_nm_bitmap & BIT_NM_DIMM) ?
+ (int)adxl_values[component_indices[INDEX_NM_DIMM]] : -1;
+ } else {
+ res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
+ res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
+ res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
+ }
- if (res->imc > NUM_IMC - 1) {
+ if (res->imc > NUM_IMC - 1 || res->imc < 0) {
skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
return false;
}
@@ -151,6 +176,11 @@ static bool skx_adxl_decode(struct decoded_addr *res)
return true;
}
+void skx_set_mem_cfg(bool mem_cfg_2lm)
+{
+ skx_mem_cfg_2lm = mem_cfg_2lm;
+}
+
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
skx_decode = decode;
@@ -313,9 +343,9 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
ranks = numrank(mtr);
rows = numrow(mtr);
- cols = numcol(mtr);
+ cols = imc->hbm_mc ? 6 : numcol(mtr);
- if (cfg->support_ddr5 && (amap & 0x8)) {
+ if (cfg->support_ddr5 && ((amap & 0x8) || imc->hbm_mc)) {
banks = 32;
mtype = MEM_DDR5;
} else {
@@ -344,8 +374,13 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
dimm->dtype = get_width(mtr);
dimm->mtype = mtype;
dimm->edac_mode = EDAC_SECDED; /* likely better than this */
- snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
- imc->src_id, imc->lmc, chan, dimmno);
+
+ if (imc->hbm_mc)
+ snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_HBMC#%u_Chan#%u",
+ imc->src_id, imc->lmc, chan);
+ else
+ snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
+ imc->src_id, imc->lmc, chan, dimmno);
return 1;
}
@@ -578,6 +613,21 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
optype, skx_msg);
}
+static bool skx_error_in_1st_level_mem(const struct mce *m)
+{
+ u32 errcode;
+
+ if (!skx_mem_cfg_2lm)
+ return false;
+
+ errcode = GET_BITFIELD(m->status, 0, 15);
+
+ if ((errcode & 0xef80) != 0x280)
+ return false;
+
+ return true;
+}
+
int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
{
@@ -597,7 +647,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
res.addr = mce->addr;
if (adxl_component_count) {
- if (!skx_adxl_decode(&res))
+ if (!skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce)))
return NOTIFY_DONE;
} else if (!skx_decode || !skx_decode(&res)) {
return NOTIFY_DONE;
@@ -658,6 +708,8 @@ void skx_remove(void)
}
if (d->util_all)
pci_dev_put(d->util_all);
+ if (d->pcu_cr3)
+ pci_dev_put(d->pcu_cr3);
if (d->sad_all)
pci_dev_put(d->sad_all);
if (d->uracu)
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index bf56bebff138..01f67e731766 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -9,6 +9,8 @@
#ifndef _SKX_COMM_EDAC_H
#define _SKX_COMM_EDAC_H
+#include <linux/bits.h>
+
#define MSG_SIZE 1024
/*
@@ -30,9 +32,17 @@
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
-#define I10NM_NUM_IMC 4
-#define I10NM_NUM_CHANNELS 2
-#define I10NM_NUM_DIMMS 2
+#define I10NM_NUM_DDR_IMC 4
+#define I10NM_NUM_DDR_CHANNELS 2
+#define I10NM_NUM_DDR_DIMMS 2
+
+#define I10NM_NUM_HBM_IMC 16
+#define I10NM_NUM_HBM_CHANNELS 2
+#define I10NM_NUM_HBM_DIMMS 1
+
+#define I10NM_NUM_IMC (I10NM_NUM_DDR_IMC + I10NM_NUM_HBM_IMC)
+#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
+#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
@@ -54,12 +64,16 @@ struct skx_dev {
struct pci_dev *sad_all;
struct pci_dev *util_all;
struct pci_dev *uracu; /* for i10nm CPU */
+ struct pci_dev *pcu_cr3; /* for HBM memory detection */
u32 mcroute;
struct skx_imc {
struct mem_ctl_info *mci;
struct pci_dev *mdev; /* for i10nm CPU */
void __iomem *mbase; /* for i10nm CPU */
int chan_mmio_sz; /* for i10nm CPU */
+ int num_channels; /* channels per memory controller */
+ int num_dimms; /* dimms per channel */
+ bool hbm_mc;
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
u8 src_id, node_id;
@@ -92,9 +106,17 @@ enum {
INDEX_MEMCTRL,
INDEX_CHANNEL,
INDEX_DIMM,
+ INDEX_NM_FIRST,
+ INDEX_NM_MEMCTRL = INDEX_NM_FIRST,
+ INDEX_NM_CHANNEL,
+ INDEX_NM_DIMM,
INDEX_MAX
};
+#define BIT_NM_MEMCTRL BIT_ULL(INDEX_NM_MEMCTRL)
+#define BIT_NM_CHANNEL BIT_ULL(INDEX_NM_CHANNEL)
+#define BIT_NM_DIMM BIT_ULL(INDEX_NM_DIMM)
+
struct decoded_addr {
struct skx_dev *dev;
u64 addr;
@@ -122,7 +144,12 @@ struct res_config {
int busno_cfg_offset;
/* Per DDR channel memory-mapped I/O size */
int ddr_chan_mmio_sz;
+ /* Per HBM channel memory-mapped I/O size */
+ int hbm_chan_mmio_sz;
bool support_ddr5;
+ /* SAD device number and function number */
+ unsigned int sad_all_devfn;
+ int sad_all_offset;
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
@@ -133,6 +160,7 @@ typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int le
int __init skx_adxl_get(void);
void __exit skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
+void skx_set_mem_cfg(bool mem_cfg_2lm);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
int skx_get_node_id(struct skx_dev *d, u8 *id);
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 0eb5eb97fd74..f13674081cb6 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1368,7 +1368,7 @@ static int thunderx_ocx_probe(struct pci_dev *pdev,
name, 1, "CCPI", 1,
0, NULL, 0, idx);
if (!edac_dev) {
- dev_err(&pdev->dev, "Cannot allocate EDAC device: %d\n", ret);
+ dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
return -ENOMEM;
}
ocx = edac_dev->pvt_info;
@@ -1380,7 +1380,7 @@ static int thunderx_ocx_probe(struct pci_dev *pdev,
ocx->regs = pcim_iomap_table(pdev)[0];
if (!ocx->regs) {
- dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
+ dev_err(&pdev->dev, "Cannot map PCI resources\n");
ret = -ENODEV;
goto err_free;
}
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index e7eae20f83d1..169f96e51c29 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -197,6 +197,7 @@ static const struct of_device_id ti_edac_of_match[] = {
{ .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
{},
};
+MODULE_DEVICE_TABLE(of, ti_edac_of_match);
static int _emif_get_id(struct device_node *node)
{
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index ace523924e58..5476f48ed74b 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -6,6 +6,7 @@
// Chanwoo Choi <cw00.choi@samsung.com>
// Krzysztof Kozlowski <krzk@kernel.org>
+#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
@@ -673,7 +674,10 @@ static int max14577_muic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
- INIT_WORK(&info->irq_work, max14577_muic_irq_work);
+ ret = devm_work_autocancel(&pdev->dev, &info->irq_work,
+ max14577_muic_irq_work);
+ if (ret)
+ return ret;
switch (max14577->dev_type) {
case MAXIM_DEVICE_TYPE_MAX77836:
@@ -766,15 +770,6 @@ static int max14577_muic_probe(struct platform_device *pdev)
return ret;
}
-static int max14577_muic_remove(struct platform_device *pdev)
-{
- struct max14577_muic_info *info = platform_get_drvdata(pdev);
-
- cancel_work_sync(&info->irq_work);
-
- return 0;
-}
-
static const struct platform_device_id max14577_muic_id[] = {
{ "max14577-muic", MAXIM_DEVICE_TYPE_MAX14577, },
{ "max77836-muic", MAXIM_DEVICE_TYPE_MAX77836, },
@@ -797,7 +792,6 @@ static struct platform_driver max14577_muic_driver = {
.of_match_table = of_max14577_muic_dt_match,
},
.probe = max14577_muic_probe,
- .remove = max14577_muic_remove,
.id_table = max14577_muic_id,
};
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 92af97e00828..1f1d9ab0c5c7 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -5,6 +5,7 @@
// Copyright (C) 2012 Samsung Electrnoics
// Chanwoo Choi <cw00.choi@samsung.com>
+#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
@@ -1127,7 +1128,10 @@ static int max77693_muic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
- INIT_WORK(&info->irq_work, max77693_muic_irq_work);
+ ret = devm_work_autocancel(&pdev->dev, &info->irq_work,
+ max77693_muic_irq_work);
+ if (ret)
+ return ret;
/* Support irq domain for MAX77693 MUIC device */
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
@@ -1254,22 +1258,11 @@ static int max77693_muic_probe(struct platform_device *pdev)
return ret;
}
-static int max77693_muic_remove(struct platform_device *pdev)
-{
- struct max77693_muic_info *info = platform_get_drvdata(pdev);
-
- cancel_work_sync(&info->irq_work);
- input_unregister_device(info->dock);
-
- return 0;
-}
-
static struct platform_driver max77693_muic_driver = {
.driver = {
.name = DEV_NAME,
},
.probe = max77693_muic_probe,
- .remove = max77693_muic_remove,
};
module_platform_driver(max77693_muic_driver);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index e1408075ef7d..bbc592823570 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -5,6 +5,7 @@
// Copyright (C) 2012 Samsung Electronics
// Donggeun Kim <dg77.kim@samsung.com>
+#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
@@ -650,27 +651,30 @@ static int max8997_muic_probe(struct platform_device *pdev)
mutex_init(&info->mutex);
INIT_WORK(&info->irq_work, max8997_muic_irq_work);
+ ret = devm_work_autocancel(&pdev->dev, &info->irq_work,
+ max8997_muic_irq_work);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max8997_muic_irq *muic_irq = &muic_irqs[i];
unsigned int virq = 0;
virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
- if (!virq) {
- ret = -EINVAL;
- goto err_irq;
- }
+ if (!virq)
+ return -EINVAL;
+
muic_irq->virq = virq;
- ret = request_threaded_irq(virq, NULL,
- max8997_muic_irq_handler,
- IRQF_NO_SUSPEND,
- muic_irq->name, info);
+ ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
+ max8997_muic_irq_handler,
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d, error :%d)\n",
muic_irq->irq, ret);
- goto err_irq;
+ return ret;
}
}
@@ -678,14 +682,13 @@ static int max8997_muic_probe(struct platform_device *pdev)
info->edev = devm_extcon_dev_allocate(&pdev->dev, max8997_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
- ret = PTR_ERR(info->edev);
- goto err_irq;
+ return PTR_ERR(info->edev);
}
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_irq;
+ return ret;
}
if (pdata && pdata->muic_pdata) {
@@ -756,23 +759,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
delay_jiffies);
return 0;
-
-err_irq:
- while (--i >= 0)
- free_irq(muic_irqs[i].virq, info);
- return ret;
-}
-
-static int max8997_muic_remove(struct platform_device *pdev)
-{
- struct max8997_muic_info *info = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
- free_irq(muic_irqs[i].virq, info);
- cancel_work_sync(&info->irq_work);
-
- return 0;
}
static struct platform_driver max8997_muic_driver = {
@@ -780,7 +766,6 @@ static struct platform_driver max8997_muic_driver = {
.name = DEV_NAME,
},
.probe = max8997_muic_probe,
- .remove = max8997_muic_remove,
};
module_platform_driver(max8997_muic_driver);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index ec68ed27b0a5..b63d55f5ebd3 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -58,6 +58,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
case SELFID_PORT_PARENT:
case SELFID_PORT_NCONN:
(*total_port_count)++;
+ fallthrough;
case SELFID_PORT_NONE:
break;
}
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index e1926483ae2f..4c3201e290e2 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -157,7 +157,7 @@ static int __init unmarshal_devices(struct properties_header *properties)
if (!entry[0].name)
goto skip_device;
- ret = device_add_properties(dev, entry); /* makes deep copy */
+ ret = device_create_managed_software_node(dev, entry, NULL);
if (ret)
dev_err(dev, "error %d assigning properties\n", ret);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index e15d484b6a5a..ea7ca74fc173 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
return 0;
- n = 0;
- len = CPER_REC_LEN - 1;
+ len = CPER_REC_LEN;
dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
if (bank && device)
n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
"DIMM location: not present. DMI handle: 0x%.4x ",
mem->mem_dev_handle);
- msg[n] = '\0';
return n;
}
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 5c9625e552f4..10d4457417a4 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -12,52 +12,39 @@
#include <linux/efi.h>
#include <linux/pci.h>
-struct acpi_hid_uid {
- struct acpi_device_id hid[2];
- char uid[11]; /* UINT_MAX + null byte */
-};
-
-static int __init match_acpi_dev(struct device *dev, const void *data)
-{
- struct acpi_hid_uid hid_uid = *(const struct acpi_hid_uid *)data;
- struct acpi_device *adev = to_acpi_device(dev);
-
- if (acpi_match_device_ids(adev, hid_uid.hid))
- return 0;
-
- if (adev->pnp.unique_id)
- return !strcmp(adev->pnp.unique_id, hid_uid.uid);
- else
- return !strcmp("0", hid_uid.uid);
-}
-
static long __init parse_acpi_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
- struct acpi_hid_uid hid_uid = {};
+ char hid[ACPI_ID_LEN], uid[11]; /* UINT_MAX + null byte */
+ struct acpi_device *adev;
struct device *phys_dev;
if (node->header.length != 12)
return -EINVAL;
- sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
+ sprintf(hid, "%c%c%c%04X",
'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
node->acpi.hid >> 16);
- sprintf(hid_uid.uid, "%u", node->acpi.uid);
-
- *child = bus_find_device(&acpi_bus_type, NULL, &hid_uid,
- match_acpi_dev);
- if (!*child)
+ sprintf(uid, "%u", node->acpi.uid);
+
+ for_each_acpi_dev_match(adev, hid, NULL, -1) {
+ if (adev->pnp.unique_id && !strcmp(adev->pnp.unique_id, uid))
+ break;
+ if (!adev->pnp.unique_id && node->acpi.uid == 0)
+ break;
+ acpi_dev_put(adev);
+ }
+ if (!adev)
return -ENODEV;
- phys_dev = acpi_get_first_physical_node(to_acpi_device(*child));
+ phys_dev = acpi_get_first_physical_node(adev);
if (phys_dev) {
- get_device(phys_dev);
- put_device(*child);
- *child = phys_dev;
- }
+ *child = get_device(phys_dev);
+ acpi_dev_put(adev);
+ } else
+ *child = &adev->dev;
return 0;
}
diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
index bb042ab7c2be..e901f8564ca0 100644
--- a/drivers/firmware/efi/fdtparams.c
+++ b/drivers/firmware/efi/fdtparams.c
@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
+ if (!fdt)
+ return 0;
+
for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
node = fdt_path_offset(fdt, dt_params[i].path);
if (node < 0)
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index 4e81c6077188..dd95f330fe6e 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
return 0;
/* Skip any leading slashes */
- while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+ while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
i++;
while (--result_len > 0 && i < cmdline_len) {
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 5737cb0fcd44..0a9aba5f9cef 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
return false;
}
- if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
- pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
- return false;
- }
-
if (PAGE_SIZE > EFI_PAGE_SIZE &&
(!PAGE_ALIGNED(in->phys_addr) ||
!PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 3c1c5daf6df2..e3da38e15c5b 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -335,10 +335,15 @@ int psci_cpu_suspend_enter(u32 state)
{
int ret;
- if (!psci_power_state_loses_context(state))
+ if (!psci_power_state_loses_context(state)) {
+ struct arm_cpuidle_irq_context context;
+
+ arm_cpuidle_save_irq_context(&context);
ret = psci_ops.cpu_suspend(state, 0);
- else
+ arm_cpuidle_restore_irq_context(&context);
+ } else {
ret = cpu_suspend(state, psci_suspend_finisher);
+ }
return ret;
}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 0078260fbabe..172c751a4f6c 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -299,15 +299,13 @@ static int fw_cfg_do_platform_probe(struct platform_device *pdev)
return 0;
}
-static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
+static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
+ char *buf)
{
return sprintf(buf, "%u\n", fw_cfg_rev);
}
-static const struct {
- struct attribute attr;
- ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
-} fw_cfg_rev_attr = {
+static const struct kobj_attribute fw_cfg_rev_attr = {
.attr = { .name = "rev", .mode = S_IRUSR },
.show = fw_cfg_showrev,
};
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index 028f81d702cc..9f937b125ab0 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -15,6 +15,7 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
bool __ro_after_init smccc_trng_available = false;
+u64 __ro_after_init smccc_has_sve_hint = false;
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
{
@@ -22,6 +23,9 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
smccc_conduit = conduit;
smccc_trng_available = smccc_probe_trng();
+ if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+ smccc_version >= ARM_SMCCC_VERSION_1_3)
+ smccc_has_sve_hint = true;
}
enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1dd0ec6727fd..63b84ba161dc 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1367,7 +1367,7 @@ config GPIO_TPS65912
config GPIO_TPS68470
bool "TPS68470 GPIO"
- depends on MFD_TPS68470
+ depends on INTEL_SKL_INT3472
help
Select this option to enable GPIO driver for the TPS68470
chip family.
@@ -1383,6 +1383,7 @@ config GPIO_TPS68470
config GPIO_TQMX86
tristate "TQ-Systems QTMX86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST
+ depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP
help
This driver supports GPIO on the TQMX86 IO controller.
@@ -1450,6 +1451,7 @@ menu "PCI GPIO expanders"
config GPIO_AMD8111
tristate "AMD 8111 GPIO driver"
depends on X86 || COMPILE_TEST
+ depends on HAS_IOPORT_MAP
help
The AMD 8111 south bridge contains 32 GPIO pins which can be used.
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 2ba225720086..5a909f3c79e8 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -339,8 +339,6 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
if (!cg)
return -ENOMEM;
- platform_set_drvdata(pdev, cg);
-
mutex_init(&cg->buslock);
cg->chip.label = KBUILD_MODNAME;
cg->chip.direction_input = crystalcove_gpio_dir_in;
@@ -372,13 +370,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
return retval;
}
- retval = devm_gpiochip_add_data(&pdev->dev, &cg->chip, cg);
- if (retval) {
- dev_warn(&pdev->dev, "add gpio chip error: %d\n", retval);
- return retval;
- }
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &cg->chip, cg);
}
static struct platform_driver crystalcove_gpio_driver = {
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 157106e1e438..b9fdf05d7669 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -334,7 +334,7 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_set_irq_type;
ct->chip.irq_set_wake = gpio_set_wake_irq;
- ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
ct->regs.ack = GPIO_ISR;
ct->regs.mask = GPIO_IMR;
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 1cbce5990855..97e6caedf1f3 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
#include <linux/of_device.h>
-#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_PIN_MASK(p) BIT(p)
#define WCD_REG_DIR_CTL_OFFSET 0x42
#define WCD_REG_VAL_CTL_OFFSET 0x43
#define WCD934X_NPINS 5
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index a19eeef6cf1e..16a0fae1e32e 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -99,19 +99,14 @@ struct wcove_gpio {
bool set_irq_mask;
};
-static inline int to_reg(int gpio, enum ctrl_register reg_type)
+static inline int to_reg(int gpio, enum ctrl_register type)
{
- unsigned int reg;
+ unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
if (gpio >= WCOVE_GPIO_NUM)
return -EOPNOTSUPP;
- if (reg_type == CTRL_IN)
- reg = GPIO_IN_CTRL_BASE + gpio;
- else
- reg = GPIO_OUT_CTRL_BASE + gpio;
-
- return reg;
+ return reg + gpio;
}
static inline int to_ireg(int gpio, enum ctrl_register type, unsigned int *mask)
@@ -129,7 +124,7 @@ static inline int to_ireg(int gpio, enum ctrl_register type, unsigned int *mask)
return reg;
}
-static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
+static void wcove_update_irq_mask(struct wcove_gpio *wg, irq_hw_number_t gpio)
{
unsigned int mask, reg = to_ireg(gpio, IRQ_MASK, &mask);
@@ -139,13 +134,10 @@ static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
regmap_clear_bits(wg->regmap, reg, mask);
}
-static void wcove_update_irq_ctrl(struct wcove_gpio *wg, int gpio)
+static void wcove_update_irq_ctrl(struct wcove_gpio *wg, irq_hw_number_t gpio)
{
int reg = to_reg(gpio, CTRL_IN);
- if (reg < 0)
- return;
-
regmap_update_bits(wg->regmap, reg, CTLI_INTCNT_BE, wg->intcnt);
}
@@ -248,8 +240,9 @@ static int wcove_irq_type(struct irq_data *data, unsigned int type)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ irq_hw_number_t gpio = irqd_to_hwirq(data);
- if (data->hwirq >= WCOVE_GPIO_NUM)
+ if (gpio >= WCOVE_GPIO_NUM)
return 0;
switch (type) {
@@ -286,7 +279,7 @@ static void wcove_bus_sync_unlock(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct wcove_gpio *wg = gpiochip_get_data(chip);
- int gpio = data->hwirq;
+ irq_hw_number_t gpio = irqd_to_hwirq(data);
if (wg->update & UPDATE_IRQ_TYPE)
wcove_update_irq_ctrl(wg, gpio);
@@ -301,8 +294,9 @@ static void wcove_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ irq_hw_number_t gpio = irqd_to_hwirq(data);
- if (data->hwirq >= WCOVE_GPIO_NUM)
+ if (gpio >= WCOVE_GPIO_NUM)
return;
wg->set_irq_mask = false;
@@ -313,8 +307,9 @@ static void wcove_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ irq_hw_number_t gpio = irqd_to_hwirq(data);
- if (data->hwirq >= WCOVE_GPIO_NUM)
+ if (gpio >= WCOVE_GPIO_NUM)
return;
wg->set_irq_mask = true;
@@ -369,8 +364,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void wcove_gpio_dbg_show(struct seq_file *s,
- struct gpio_chip *chip)
+static void wcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
unsigned int ctlo, ctli, irq_mask, irq_status;
struct wcove_gpio *wg = gpiochip_get_data(chip);
@@ -379,10 +373,15 @@ static void wcove_gpio_dbg_show(struct seq_file *s,
for (gpio = 0; gpio < WCOVE_GPIO_NUM; gpio++) {
ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_OUT), &ctlo);
ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_IN), &ctli);
+ if (ret) {
+ dev_err(wg->dev, "Failed to read registers: CTRL out/in\n");
+ break;
+ }
+
ret += regmap_read(wg->regmap, to_ireg(gpio, IRQ_MASK, &mask), &irq_mask);
ret += regmap_read(wg->regmap, to_ireg(gpio, IRQ_STATUS, &mask), &irq_status);
if (ret) {
- pr_err("Failed to read registers: ctrl out/in or irq status/mask\n");
+ dev_err(wg->dev, "Failed to read registers: IRQ status/mask\n");
break;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 3ef22a3c104d..411525ac4cc4 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -128,6 +128,34 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
return gpiochip_get_desc(chip, pin);
}
+/**
+ * acpi_get_and_request_gpiod - Translate ACPI GPIO pin to GPIO descriptor and
+ * hold a refcount to the GPIO device.
+ * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
+ * @pin: ACPI GPIO pin number (0-based, controller-relative)
+ * @label: Label to pass to gpiod_request()
+ *
+ * This function is a simple pass-through to acpi_get_gpiod(), except that
+ * as it is intended for use outside of the GPIO layer (in a similar fashion to
+ * gpiod_get_index() for example) it also holds a reference to the GPIO device.
+ */
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label)
+{
+ struct gpio_desc *gpio;
+ int ret;
+
+ gpio = acpi_get_gpiod(path, pin);
+ if (IS_ERR(gpio))
+ return gpio;
+
+ ret = gpiod_request(gpio, label);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return gpio;
+}
+EXPORT_SYMBOL_GPL(acpi_get_and_request_gpiod);
+
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
struct acpi_gpio_event *event = data;
@@ -168,6 +196,29 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
}
EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
+/**
+ * acpi_gpio_get_io_resource - Fetch details of an ACPI resource if it is a GPIO
+ * I/O resource or return False if not.
+ * @ares: Pointer to the ACPI resource to fetch
+ * @agpio: Pointer to a &struct acpi_resource_gpio to store the output pointer
+ */
+bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ struct acpi_resource_gpio *gpio;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+ return false;
+
+ gpio = &ares->data.gpio;
+ if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_IO)
+ return false;
+
+ *agpio = gpio;
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_gpio_get_io_resource);
+
static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
struct acpi_gpio_event *event)
{
@@ -1233,14 +1284,14 @@ static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
void acpi_gpiochip_add(struct gpio_chip *chip)
{
struct acpi_gpio_chip *acpi_gpio;
- acpi_handle handle;
+ struct acpi_device *adev;
acpi_status status;
if (!chip || !chip->parent)
return;
- handle = ACPI_HANDLE(chip->parent);
- if (!handle)
+ adev = ACPI_COMPANION(chip->parent);
+ if (!adev)
return;
acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL);
@@ -1254,7 +1305,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
INIT_LIST_HEAD(&acpi_gpio->events);
INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
- status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
+ status = acpi_attach_data(adev->handle, acpi_gpio_chip_dh, acpi_gpio);
if (ACPI_FAILURE(status)) {
dev_err(chip->parent, "Failed to attach ACPI GPIO chip\n");
kfree(acpi_gpio);
@@ -1263,7 +1314,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
acpi_gpiochip_request_regions(acpi_gpio);
acpi_gpiochip_scan_gpios(acpi_gpio);
- acpi_walk_dep_device_list(handle);
+ acpi_dev_clear_dependencies(adev);
}
void acpi_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 1631727bf0da..c7b5446d01fd 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1880,6 +1880,7 @@ static void gpio_v2_line_info_changed_to_v1(
struct gpio_v2_line_info_changed *lic_v2,
struct gpioline_info_changed *lic_v1)
{
+ memset(lic_v1, 0, sizeof(*lic_v1));
gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
lic_v1->timestamp = lic_v2->timestamp_ns;
lic_v1->event_type = lic_v2->event_type;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index fad3b91f74f5..d39cff4a1fe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 1:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 2:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
- mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 3:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
- mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
}
@@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
-#define HQD_N_REGS (19+6+7+10)
+#define HQD_N_REGS (19+6+7+12)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0350205c4897..6819fe5612d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
- unsigned long ras_counter;
if (!fpriv)
return -EINVAL;
@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
- /*query ue count*/
- ras_counter = amdgpu_ras_query_error_count(adev, false);
- /*ras counter is monotonic increasing*/
- if (ras_counter != ctx->ras_counter_ue) {
- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
- ctx->ras_counter_ue = ras_counter;
- }
-
- /*query ce count*/
- ras_counter = amdgpu_ras_query_error_count(adev, true);
- if (ras_counter != ctx->ras_counter_ce) {
- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
- ctx->ras_counter_ce = ras_counter;
- }
-
mutex_unlock(&mgr->lock);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 66ddfe4f58c2..57ec108b5972 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3118,7 +3118,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
+ if (amdgpu_sriov_vf(adev) ||
+ adev->enable_virtual_display ||
+ (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8a1fb8b6606e..2a4cd7d377bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1047,17 +1047,18 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret)
goto err;
- ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
return 0;
err:
- drm_err(dev, "Failed to init gem fb: %d\n", ret);
+ drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
rfb->base.obj[0] = NULL;
return ret;
}
@@ -1071,9 +1072,6 @@ int amdgpu_display_gem_fb_verify_and_init(
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret)
- goto err;
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1092,9 +1090,13 @@ int amdgpu_display_gem_fb_verify_and_init(
if (ret)
goto err;
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
return 0;
err:
- drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
+ drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
rfb->base.obj[0] = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index baa980a477d9..37ec59365080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -214,9 +214,21 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int r;
/* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ return r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r) {
+ amdgpu_bo_unpin(bo);
+ return r;
+ }
+ }
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 8f4a8f8d8146..39b6c6bfab45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
unsigned char buff[34];
- int addrptr = 0, size = 0;
+ int addrptr, size;
+ int len;
if (!is_fru_eeprom_supported(adev))
return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.smu_i2c.algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
- return 0;
+ return -ENODEV;
}
/* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
- return size;
+ return -EINVAL;
}
/* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product name, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Product name should only be 32 characters. Any more,
* and something could be wrong. Cap it at 32 to be safe
*/
- if (size > 32) {
+ if (len >= sizeof(adev->product_name)) {
DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
- size = 32;
+ len = sizeof(adev->product_name) - 1;
}
/* Start at 2 due to buff using fields 0 and 1 for the address */
- memcpy(adev->product_name, &buff[2], size);
- adev->product_name[size] = '\0';
+ memcpy(adev->product_name, &buff[2], len);
+ adev->product_name[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product number, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Product number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
- if (size > 16) {
+ if (len >= sizeof(adev->product_number)) {
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
- size = 16;
+ len = sizeof(adev->product_number) - 1;
}
- memcpy(adev->product_number, &buff[2], size);
- adev->product_number[size] = '\0';
+ memcpy(adev->product_number, &buff[2], len);
+ adev->product_number[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product version, ret:%d", size);
- return size;
+ return -EINVAL;
}
addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (size < 1) {
DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Serial number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
- if (size > 16) {
+ if (len >= sizeof(adev->serial)) {
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
- size = 16;
+ len = sizeof(adev->serial) - 1;
}
- memcpy(adev->serial, &buff[2], size);
- adev->serial[size] = '\0';
+ memcpy(adev->serial, &buff[2], len);
+ adev->serial[len] = '\0';
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1345f7eba011..f9434bc2f9b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -100,7 +100,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
kfree(ubo->metadata);
}
- kfree(bo);
+ kvfree(bo);
}
/**
@@ -552,7 +552,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
*bo_ptr = NULL;
- bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
+ bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 46a5328e00e0..60aa99a39a74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -76,6 +76,7 @@ struct psp_ring
uint64_t ring_mem_mc_addr;
void *ring_mem_handle;
uint32_t ring_size;
+ uint32_t ring_wptr;
};
/* More registers may will be supported */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d5cbc51c5eaa..61c4fb1b87fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -709,8 +709,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
}
mmap_read_lock(mm);
- vma = find_vma(mm, start);
- if (unlikely(!vma || start < vma->vm_start)) {
+ vma = vma_lookup(mm, start);
+ if (unlikely(!vma)) {
r = -EFAULT;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 7ce76a6b3a35..0597aeb5f0e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -173,6 +173,9 @@
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid 0x4ca5
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX 1
+
#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28)
#define GFX_RLCG_GC_WRITE (0x0 << 28)
#define GFX_RLCG_GC_READ (0x1 << 28)
@@ -1480,8 +1483,15 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
scratch_reg3 = adev->rmmio +
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
- spare_int = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ spare_int = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
+ + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
+ } else {
+ spare_int = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+ }
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
@@ -7349,9 +7359,15 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
+ tmp &= 0xffffff00;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+ } else {
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+ tmp &= 0xffffff00;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index de5abceced0d..85967a5570cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 938ef4ce5b76..46096ad7f0d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle)
static int jpeg_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
- ring = &adev->jpeg.inst[i].ring_dec;
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 94be35357f7d..bd77794315bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle)
static int jpeg_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
- ring = &adev->jpeg.inst->ring_dec;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 589410c32d09..02bba1f3c42e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
- data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ data = psp->km_ring.ring_wptr;
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ psp->km_ring.ring_wptr = value;
} else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index f2e725f72d2f..908664a5774b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
- data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ data = psp->km_ring.ring_wptr;
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
return data;
@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
/* send interrupt to PSP for SRIOV ring write pointer update */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_CONSUME_CMD);
+ psp->km_ring.ring_wptr = value;
} else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 2bab9c77952f..cf3803f8f075 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 0c1beefa3e49..27b1ced145d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 116b9643d5ba..8af567c546db 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 948813d7caa0..888b17d84691 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 14470da52113..3b23de996db2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -372,15 +372,14 @@ done:
static int vcn_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- ring = &adev->vcn.inst[i].ring_dec;
-
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 389eff96fcf6..652cc1a0e450 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -925,7 +925,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
}
- adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv)
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
DRM_ERROR("Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
@@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
-
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_display_mode saved_mode;
struct drm_display_mode *freesync_mode = NULL;
bool native_mode_found = false;
- bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ bool recalculate_timing = false;
+ bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- recalculate_timing |= amdgpu_freesync_vid_mode &&
+ recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -5571,11 +5572,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
mode = *freesync_mode;
} else {
decide_crtc_timing_for_drm_display_mode(
- &mode, preferred_mode,
- dm_state ? (dm_state->scaling != RMX_OFF) : false);
- }
+ &mode, preferred_mode, scale);
- preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ }
}
if (recalculate_timing)
@@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* If scaling is enabled and refresh rate didn't change
* we copy the vic and polarities of the old timings
*/
- if (!recalculate_timing || mode_refresh != preferred_refresh)
+ if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL,
requested_bpc);
@@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
if (cursor_scale_w != primary_scale_w ||
cursor_scale_h != primary_scale_h) {
- DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
+ drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
return -EINVAL;
}
@@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state)
int i;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane_state *primary_state, *overlay_state = NULL;
+ struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
/* Check if primary plane is contained inside overlay */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state)
if (!primary_state->crtc)
return 0;
+ /* check if cursor plane is enabled */
+ cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
+ if (IS_ERR(cursor_state))
+ return PTR_ERR(cursor_state);
+
+ if (drm_atomic_plane_disabling(plane->state, cursor_state))
+ return 0;
+
/* Perform the bounds check to ensure the overlay plane covers the primary */
if (primary_state->crtc_x < overlay_state->crtc_x ||
primary_state->crtc_y < overlay_state->crtc_y ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 527e56c353cb..8357aa3c41d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- if (voltage_supported && dummy_pstate_supported) {
+ if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
goto restore_dml_state;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index f5fe540cd536..27cf22716793 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -810,6 +810,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
data->fine_grain_enabled = 1;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..0eaf86b5e698 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2925,6 +2925,8 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
struct amdgpu_device *adev = smu->adev;
uint32_t param = 0;
@@ -2932,6 +2934,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
if (adev->asic_type == CHIP_NAVI12)
return 0;
+ /*
+ * Skip the MGpuFanBoost setting for those ASICs
+ * which do not support it
+ */
+ if (!smc_pptable->MGpuFanBoostLimitRpm)
+ return 0;
+
/* Workaround for WS SKU */
if (adev->pdev->device == 0x7312 &&
adev->pdev->revision == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d2fd44b903ca..b124a5e40dd6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3027,6 +3027,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+
+ /*
+ * Skip the MGpuFanBoost setting for those ASICs
+ * which do not support it
+ */
+ if (!smc_pptable->MGpuFanBoostLimitRpm)
+ return 0;
+
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMGpuFanBoostLimitRpm,
0,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 05ad75d155e8..cfe4fc69277e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -232,7 +232,6 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
pm_runtime_put_sync(dev->dev);
- drm_crtc_vblank_on(c);
}
#define ATMEL_HLCDC_RGB444_OUTPUT BIT(0)
@@ -344,7 +343,16 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_on(c);
+}
+
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *c,
+ struct drm_atomic_state *state)
+{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->dev->event_lock, flags);
if (c->state->event) {
c->state->event->pipe = drm_crtc_index(c);
@@ -354,12 +362,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
crtc->event = c->state->event;
c->state->event = NULL;
}
-}
-
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- /* TODO: write common plane control register if available */
+ spin_unlock_irqrestore(&c->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 65af56e47129..f09b6dd8754c 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -593,6 +593,7 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = dc->desc->max_width;
dev->mode_config.max_height = dc->desc->max_height;
dev->mode_config.funcs = &mode_config_funcs;
+ dev->mode_config.async_page_flip = true;
return 0;
}
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index f2d46b7ac6f9..232abbba3686 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -314,9 +314,10 @@ int drm_master_open(struct drm_file *file_priv)
void drm_master_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index d273d1a8603a..495a4767a443 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -118,17 +118,18 @@ int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
- mutex_lock(&master->dev->master_mutex);
+ mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (u->unique_len >= master->unique_len) {
if (copy_to_user(u->unique, master->unique, master->unique_len)) {
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return -EFAULT;
}
}
u->unique_len = master->unique_len;
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 93f4d059fc89..1e1cb245fca7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -20,7 +20,6 @@ config DRM_I915
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
- select IO_MAPPING
select SYNC_FILE
select IOSF_MBI
select CRC32
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 02a003fd48fb..50cae0198a3d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -128,49 +128,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}
-/**
- * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
- * @intel_dp: Intel DP struct
- *
- * Read the LTTPR common and DPRX capabilities and switch to non-transparent
- * link training mode if any is detected and read the PHY capabilities for all
- * detected LTTPRs. In case of an LTTPR detection error or if the number of
- * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
- * transparent mode link training mode.
- *
- * Returns:
- * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
- * DPRX capabilities are read out.
- * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
- * detection failure and the transparent LT mode was set. The DPRX
- * capabilities are read out.
- * <0 Reading out the DPRX capabilities failed.
- */
-int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
{
int lttpr_count;
- bool ret;
int i;
- ret = intel_dp_read_lttpr_common_caps(intel_dp);
-
- /* The DPTX shall read the DPRX caps after LTTPR detection. */
- if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
- intel_dp_reset_lttpr_common_caps(intel_dp);
- return -EIO;
- }
-
- if (!ret)
- return 0;
-
- /*
- * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
- * at least 1.4.
- */
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
- intel_dp_reset_lttpr_common_caps(intel_dp);
+ if (!intel_dp_read_lttpr_common_caps(intel_dp))
return 0;
- }
lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
/*
@@ -211,6 +175,37 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
return lttpr_count;
}
+
+/**
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
+ * @intel_dp: Intel DP struct
+ *
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
+ * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
+ * transparent mode link training mode.
+ *
+ * Returns:
+ * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
+ * DPRX capabilities are read out.
+ * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
+ * detection failure and the transparent LT mode was set. The DPRX
+ * capabilities are read out.
+ * <0 Reading out the DPRX capabilities failed.
+ */
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+{
+ int lttpr_count = intel_dp_init_lttpr(intel_dp);
+
+ /* The DPTX shall read the DPRX caps after LTTPR detection. */
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return -EIO;
+ }
+
+ return lttpr_count;
+}
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index f6fe5cb01438..8598a1c78a4c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -367,10 +367,11 @@ retry:
goto err_unpin;
/* Finally, remap it using the new GTT offset */
- ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
- (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start));
+ ret = remap_io_mapping(area,
+ area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start),
+ &ggtt->iomap);
if (ret)
goto err_fence;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5cf6df49c333..35c15ef1327d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -871,7 +871,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
- area = find_vma(current->mm, addr);
+ area = vma_lookup(current->mm, addr);
if (!area) {
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
obj->mm.region->name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9ec9277539ec..69e43bf91a15 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap);
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 9a777b0ff59b..666808cb3a32 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -37,6 +37,17 @@ struct remap_pfn {
resource_size_t iobase;
};
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+ r->pfn++;
+
+ return 0;
+}
+
#define use_dma(io) ((io) != -1)
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
@@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
return 0;
}
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap)
+{
+ struct remap_pfn r;
+ int err;
+
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ r.mm = vma->vm_mm;
+ r.pfn = pfn;
+ r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+ (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+ err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
+}
/**
* remap_io_sg - remap an IO mapping to userspace
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index ee8e753d98ce..eae0abd614cb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg)
for (n = 0; n < smoke[0].ncontexts; n++) {
smoke[0].contexts[n] = live_context(i915, file);
- if (!smoke[0].contexts[n]) {
- ret = -ENOMEM;
+ if (IS_ERR(smoke[0].contexts[n])) {
+ ret = PTR_ERR(smoke[0].contexts[n]);
goto out_contexts;
}
}
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index f64e06e1067d..96ea1a2c11dd 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -137,6 +137,7 @@ static int kmb_hw_init(struct drm_device *drm, unsigned long flags)
/* Allocate LCD interrupt resources */
irq_lcd = platform_get_irq(pdev, 0);
if (irq_lcd < 0) {
+ ret = irq_lcd;
drm_err(&kmb->drm, "irq_lcd not found");
goto setup_fail;
}
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index b3fd3501c412..5275b2723293 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
* porches and sync.
*/
/* (ps/s) / (pixels/s) = ps/pixels */
- pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
+ pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
pclk);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 453d8b4c5763..07fcd12dca16 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -485,11 +485,12 @@ static int meson_probe_remote(struct platform_device *pdev,
static void meson_drv_shutdown(struct platform_device *pdev)
{
struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
- struct drm_device *drm = priv->drm;
- DRM_DEBUG_DRIVER("\n");
- drm_kms_helper_poll_fini(drm);
- drm_atomic_helper_shutdown(drm);
+ if (!priv)
+ return;
+
+ drm_kms_helper_poll_fini(priv->drm);
+ drm_atomic_helper_shutdown(priv->drm);
}
static int meson_drv_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index b4d8e1b01ee4..f6c1b62b901e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
* GPU registers so we need to add 0x1a800 to the register value on A630
* to get the right value from PM4.
*/
- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
rbmemptr_stats(ring, index, alwayson_start));
/* Invalidate CCU depth and color */
@@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
rbmemptr_stats(ring, index, cpcycles_end));
- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, submit->seqno);
trace_msm_gpu_submit_flush(submit,
- gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
- REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
a6xx_flush(gpu, ring);
}
@@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
}
+/* For a615, a616, a618, A619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const u32 *regs = a6xx_protect;
+ unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
+
+ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+
+ if (adreno_is_a650(adreno_gpu)) {
+ regs = a650_protect;
+ count = ARRAY_SIZE(a650_protect);
+ count_max = 48;
+ }
+
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+ for (i = 0; i < count - 1; i++)
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+ /* last CP_PROTECT to have "infinite" length on the last entry */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
- uavflagprd_inv >> 4 | lower_bit << 1);
+ uavflagprd_inv << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
}
@@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
}
/* Protect registers from the CP */
- gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
-
- gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
- A6XX_PROTECT_RDONLY(0x600, 0x51));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
- A6XX_PROTECT_RDONLY(0xfc00, 0x3));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
- A6XX_PROTECT_RDONLY(0x0, 0x4f9));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
- A6XX_PROTECT_RDONLY(0x501, 0xa));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
- A6XX_PROTECT_RDONLY(0x511, 0x44));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
- A6XX_PROTECT_RW(0xbe20, 0x11f3));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
- A6XX_PROTECT_RDONLY(0x980, 0x4));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+ a6xx_set_cp_protect(gpu);
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
@@ -1211,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
- if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+ if (a6xx_gpu->shadow_bo)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index ce0610c5256f..bb544dfe5737 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -44,7 +44,7 @@ struct a6xx_gpu {
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
* registers starting at _reg.
*/
-#define A6XX_PROTECT_RW(_reg, _len) \
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
((1 << 31) | \
(((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 34bc93548fcf..657778889d35 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -432,6 +432,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
+ pll_10nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index e76ce40a12ab..6f96fbac8282 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -460,6 +460,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
+ pll_7nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 56df86e5f740..369d91e6361e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1241,6 +1241,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
+ /* Call chain get_pages() -> update_inactive() tries to
+ * access msm_obj->mm_list, but it is not initialized yet.
+ * To avoid NULL pointer dereference error, initialize
+ * mm_list to be empty.
+ */
+ INIT_LIST_HEAD(&msm_obj->mm_list);
+
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3e09df0472ce..cf11febf60c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -440,6 +440,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
break;
case TTM_PL_TT:
error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
+ break;
default:
break;
}
@@ -546,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
@@ -582,7 +583,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 61e6d7412505..eb844cdcaec2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -157,6 +157,7 @@ nouveau_conn_atomic_set_property(struct drm_connector *connector,
default:
break;
}
+ break;
case DRM_MODE_SCALE_FULLSCREEN:
case DRM_MODE_SCALE_CENTER:
case DRM_MODE_SCALE_ASPECT:
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 347488685f74..60019d0532fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,7 +93,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
if (ret)
return -EINVAL;
- return 0;
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
+ if (ret)
+ goto error;
+
+ if (nvbo->bo.moving)
+ ret = dma_fence_wait(nvbo->bo.moving, true);
+
+ ttm_bo_unreserve(&nvbo->bo);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ nouveau_bo_unpin(nvbo);
+ return ret;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 83067763c0ec..e1d31c62f9ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -313,6 +313,7 @@ nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
index 2b031d4eaeb6..684aff7437ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -41,6 +41,7 @@ pwm_info(struct nvkm_therm *therm, int line)
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index f484147fc3a6..c4b388850a13 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -383,6 +383,7 @@ MODULE_DEVICE_TABLE(spi, ld9040_ids);
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
+ .id_table = ld9040_ids,
.driver = {
.name = "panel-samsung-ld9040",
.of_match_table = ld9040_of_match,
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 42a87948e28c..4a90807351e7 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,9 +77,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (likely(ret == 0))
- bo->prime_shared_count++;
-
+ if (unlikely(ret))
+ goto error;
+
+ if (bo->tbo.moving) {
+ ret = dma_fence_wait(bo->tbo.moving, false);
+ if (unlikely(ret)) {
+ radeon_bo_unpin(bo);
+ goto error;
+ }
+ }
+
+ bo->prime_shared_count++;
+error:
radeon_bo_unreserve(bo);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index dfa9fdbe98da..06bb24d7a9fe 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+ memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
size = radeon_bo_size(rdev->uvd.vcpu_bo);
size -= rdev->uvd_fw->size;
@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
ptr = rdev->uvd.cpu_addr;
ptr += rdev->uvd_fw->size;
- memset(ptr, 0, size);
+ memset_io((void __iomem *)ptr, 0, size);
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index bbdfd5e26ec8..f75fb157f2ff 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_clk_tmds;
}
- ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
+ ret = sun8i_hdmi_phy_get(hdmi, phy_node);
of_node_put(phy_node);
if (ret) {
dev_err(dev, "Couldn't get the HDMI PHY\n");
@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
cleanup_encoder:
drm_encoder_cleanup(encoder);
- sun8i_hdmi_phy_remove(hdmi);
err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
- sun8i_hdmi_phy_remove(hdmi);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
gpiod_set_value(hdmi->ddc_en, 0);
@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.of_match_table = sun8i_dw_hdmi_dt_ids,
},
};
-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
+
+static int __init sun8i_dw_hdmi_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&sun8i_hdmi_phy_driver);
+ if (ret) {
+ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit sun8i_dw_hdmi_exit(void)
+{
+ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+ platform_driver_unregister(&sun8i_hdmi_phy_driver);
+}
+
+module_init(sun8i_dw_hdmi_init);
+module_exit(sun8i_dw_hdmi_exit);
MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d4b55af0592f..74f6ed0e2570 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
struct gpio_desc *ddc_en;
};
+extern struct platform_driver sun8i_hdmi_phy_driver;
+
static inline struct sun8i_dw_hdmi *
encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun8i_dw_hdmi, encoder);
}
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 9994edf67509..c9239708d398 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include "sun8i_dw_hdmi.h"
@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{ /* sentinel */ }
};
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+ struct sun8i_hdmi_phy *phy;
+
+ if (!pdev)
+ return -EPROBE_DEFER;
+
+ phy = platform_get_drvdata(pdev);
+ if (!phy)
+ return -EPROBE_DEFER;
+
+ hdmi->phy = phy;
+
+ put_device(&pdev->dev);
+
+ return 0;
+}
+
+static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- struct device *dev = hdmi->dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct sun8i_hdmi_phy *phy;
struct resource res;
void __iomem *regs;
@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
clk_prepare_enable(phy->clk_phy);
}
- hdmi->phy = phy;
+ platform_set_drvdata(pdev, phy);
return 0;
@@ -728,9 +749,9 @@ err_put_clk_bus:
return ret;
}
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
{
- struct sun8i_hdmi_phy *phy = hdmi->phy;
+ struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
clk_disable_unprepare(phy->clk_mod);
clk_disable_unprepare(phy->clk_bus);
@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
clk_put(phy->clk_pll1);
clk_put(phy->clk_mod);
clk_put(phy->clk_bus);
+ return 0;
}
+
+struct platform_driver sun8i_hdmi_phy_driver = {
+ .probe = sun8i_hdmi_phy_probe,
+ .remove = sun8i_hdmi_phy_remove,
+ .driver = {
+ .name = "sun8i-hdmi-phy",
+ .of_match_table = sun8i_hdmi_phy_of_table,
+ },
+};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 87df251c1fcf..0cb868065348 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -25,7 +25,7 @@
#include "trace.h"
/* XXX move to include/uapi/drm/drm_fourcc.h? */
-#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
struct reset_control;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 79bff8b48271..bfae8a02f55b 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
* dGPU sector layout.
*/
if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
- base |= BIT(39);
+ base |= BIT_ULL(39);
#endif
tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7b88261f57bb..0ea320c1092b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
err);
- return err;
+ goto rpm_put;
}
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n",
err);
- return err;
+ goto rpm_put;
}
}
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err);
- return err;
+ goto rpm_put;
}
usleep_range(1000, 3000);
@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err);
clk_disable_unprepare(sor->clk);
- return err;
+ goto rpm_put;
}
reset_control_release(sor->rst);
@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
}
return 0;
+
+rpm_put:
+ if (sor->rst)
+ pm_runtime_put(sor->dev);
+
+ return err;
}
static int tegra_sor_exit(struct host1x_client *client)
@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
- if (get_device(&sor->aux->ddc.dev)) {
- if (try_module_get(sor->aux->ddc.owner))
- sor->output.ddc = &sor->aux->ddc;
- else
- put_device(&sor->aux->ddc.dev);
- }
+ if (get_device(sor->aux->dev))
+ sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
err = tegra_sor_parse_dt(sor);
if (err < 0)
- return err;
+ goto put_aux;
err = tegra_output_probe(&sor->output);
- if (err < 0)
- return dev_err_probe(&pdev->dev, err,
- "failed to probe output\n");
+ if (err < 0) {
+ dev_err_probe(&pdev->dev, err, "failed to probe output\n");
+ goto put_aux;
+ }
if (sor->ops && sor->ops->probe) {
err = sor->ops->probe(sor);
@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
- INIT_LIST_HEAD(&sor->client.list);
+ host1x_client_init(&sor->client);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
- err = host1x_client_register(&sor->client);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to register host1x client: %d\n",
- err);
- goto rpm_disable;
- }
-
/*
* On Tegra210 and earlier, provide our own implementation for the
* pad output clock.
@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
sor->index);
if (!name) {
err = -ENOMEM;
- goto unregister;
+ goto uninit;
}
err = host1x_client_resume(&sor->client);
if (err < 0) {
dev_err(sor->dev, "failed to resume: %d\n", err);
- goto unregister;
+ goto uninit;
}
sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev)
err = PTR_ERR(sor->clk_pad);
dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
err);
- goto unregister;
+ goto uninit;
+ }
+
+ err = __host1x_client_register(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ goto uninit;
}
return 0;
-unregister:
- host1x_client_unregister(&sor->client);
-rpm_disable:
+uninit:
+ host1x_client_exit(&sor->client);
pm_runtime_disable(&pdev->dev);
remove:
+ if (sor->aux)
+ sor->output.ddc = NULL;
+
tegra_output_remove(&sor->output);
+put_aux:
+ if (sor->aux)
+ put_device(sor->aux->dev);
+
return err;
}
@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
+ if (sor->aux) {
+ put_device(sor->aux->dev);
+ sor->output.ddc = NULL;
+ }
+
tegra_output_remove(&sor->output);
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cfd0b9292397..ebcffe794adb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1172,7 +1172,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
return -EBUSY;
- if (!ttm_bo_get_unless_zero(bo)) {
+ if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
+ bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
+ bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+ !ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
return -EBUSY;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 510e3e001dab..3d9c62b93e29 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -143,14 +143,8 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
list_for_each_entry(bo, &man->lru[j], lru) {
- uint32_t num_pages;
+ uint32_t num_pages = PFN_UP(bo->base.size);
- if (!bo->ttm ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
- continue;
-
- num_pages = bo->ttm->num_pages;
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 1fda574579af..8106b5634fe1 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -159,6 +159,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
+ WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+
if (vc4_hdmi->hpd_gpio) {
if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
vc4_hdmi->hpd_active_low)
@@ -180,10 +182,12 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
}
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
@@ -473,7 +477,6 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -784,13 +787,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return;
}
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret) {
- DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->pixel_clock);
- return;
- }
-
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
/*
@@ -801,7 +797,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
(hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -809,7 +804,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -1929,6 +1923,29 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
+#ifdef CONFIG_PM
+static int vc4_hdmi_runtime_suspend(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+
+ return 0;
+}
+
+static int vc4_hdmi_runtime_resume(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#endif
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2165,11 +2182,18 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
+static const struct dev_pm_ops vc4_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
+ vc4_hdmi_runtime_resume,
+ NULL)
+};
+
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
+ .pm = &vc4_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index bb5529a7a9c2..948b3a58aad1 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -372,7 +372,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (!old_hvs_state->fifo_state[channel].in_use)
continue;
- ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit);
+ ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 46f69c532b6b..218e3718fd68 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -736,6 +736,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
EXPORT_SYMBOL(host1x_driver_unregister);
/**
+ * __host1x_client_init() - initialize a host1x client
+ * @client: host1x client
+ * @key: lock class key for the client-specific mutex
+ */
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
+{
+ INIT_LIST_HEAD(&client->list);
+ __mutex_init(&client->lock, "host1x client lock", key);
+ client->usecount = 0;
+}
+EXPORT_SYMBOL(__host1x_client_init);
+
+/**
+ * host1x_client_exit() - uninitialize a host1x client
+ * @client: host1x client
+ */
+void host1x_client_exit(struct host1x_client *client)
+{
+ mutex_destroy(&client->lock);
+}
+EXPORT_SYMBOL(host1x_client_exit);
+
+/**
* __host1x_client_register() - register a host1x client
* @client: host1x client
* @key: lock class key for the client-specific mutex
@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
* device and call host1x_device_init(), which will in turn call each client's
* &host1x_client_ops.init implementation.
*/
-int __host1x_client_register(struct host1x_client *client,
- struct lock_class_key *key)
+int __host1x_client_register(struct host1x_client *client)
{
struct host1x *host1x;
int err;
- INIT_LIST_HEAD(&client->list);
- __mutex_init(&client->lock, "host1x client lock", key);
- client->usecount = 0;
-
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4bf263c2d61a..160554903ef9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -93,11 +93,11 @@ menu "Special HID drivers"
depends on HID
config HID_A4TECH
- tristate "A4 tech mice"
+ tristate "A4TECH mice"
depends on HID
default !EXPERT
help
- Support for A4 tech X5 and WOP-35 / Trust 450L mice.
+ Support for some A4TECH mice with two scroll wheels.
config HID_ACCUTOUCH
tristate "Accutouch touch device"
@@ -922,6 +922,21 @@ config HID_SAMSUNG
help
Support for Samsung InfraRed remote control or keyboards.
+config HID_SEMITEK
+ tristate "Semitek USB keyboards"
+ depends on HID
+ help
+ Support for Semitek USB keyboards that are not fully compliant
+ with the HID standard.
+
+ There are many variants, including:
+ - GK61, GK64, GK68, GK84, GK96, etc.
+ - SK61, SK64, SK68, SK84, SK96, etc.
+ - Dierya DK61/DK66
+ - Tronsmart TK09R
+ - Woo-dy
+ - X-Bows Nature/Knight
+
config HID_SONY
tristate "Sony PS2/3/4 accessories"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 193431ec4db8..1ea1a7c0b20f 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
obj-$(CONFIG_HID_RMI) += hid-rmi.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
+obj-$(CONFIG_HID_SEMITEK) += hid-semitek.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 2ab38b715347..efb849411d25 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -77,6 +77,7 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
static void amd_sfh_work(struct work_struct *work)
{
struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work.work);
+ struct amd_input_data *in_data = cli_data->in_data;
struct request_list *req_node;
u8 current_index, sensor_index;
u8 report_id, node_type;
@@ -88,6 +89,7 @@ static void amd_sfh_work(struct work_struct *work)
sensor_index = req_node->sensor_idx;
report_id = req_node->report_id;
node_type = req_node->report_type;
+ kfree(req_node);
if (node_type == HID_FEATURE_REPORT) {
report_size = get_feature_report(sensor_index, report_id,
@@ -100,13 +102,11 @@ static void amd_sfh_work(struct work_struct *work)
pr_err("AMDSFH: Invalid report size\n");
} else if (node_type == HID_INPUT_REPORT) {
- report_size = get_input_report(sensor_index, report_id,
- cli_data->input_report[current_index],
- cli_data->sensor_virt_addr[current_index]);
+ report_size = get_input_report(current_index, sensor_index, report_id, in_data);
if (report_size)
hid_input_report(cli_data->hid_sensor_hubs[current_index],
cli_data->report_type[current_index],
- cli_data->input_report[current_index], report_size, 0);
+ in_data->input_report[current_index], report_size, 0);
else
pr_err("AMDSFH: Invalid report size\n");
}
@@ -118,21 +118,22 @@ static void amd_sfh_work(struct work_struct *work)
static void amd_sfh_work_buffer(struct work_struct *work)
{
struct amdtp_cl_data *cli_data = container_of(work, struct amdtp_cl_data, work_buffer.work);
+ struct amd_input_data *in_data = cli_data->in_data;
u8 report_size;
int i;
for (i = 0; i < cli_data->num_hid_devices; i++) {
- report_size = get_input_report(cli_data->sensor_idx[i], cli_data->report_id[i],
- cli_data->input_report[i],
- cli_data->sensor_virt_addr[i]);
+ report_size = get_input_report(i, cli_data->sensor_idx[i], cli_data->report_id[i],
+ in_data);
hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
- cli_data->input_report[i], report_size, 0);
+ in_data->input_report[i], report_size, 0);
}
schedule_delayed_work(&cli_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
}
int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
{
+ struct amd_input_data *in_data = &privdata->in_data;
struct amdtp_cl_data *cl_data = privdata->cl_data;
struct amd_mp2_sensor_info info;
struct device *dev;
@@ -142,18 +143,16 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
int rc, i;
dev = &privdata->pdev->dev;
- cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
- if (!cl_data)
- return -ENOMEM;
cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
INIT_LIST_HEAD(&req_list.list);
+ cl_data->in_data = in_data;
for (i = 0; i < cl_data->num_hid_devices; i++) {
- cl_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
+ in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
&cl_data->sensor_dma_addr[i],
GFP_KERNEL);
cl_data->sensor_sts[i] = 0;
@@ -175,13 +174,13 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
rc = -EINVAL;
goto cleanup;
}
- cl_data->feature_report[i] = kzalloc(feature_report_size, GFP_KERNEL);
+ cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL);
if (!cl_data->feature_report[i]) {
rc = -ENOMEM;
goto cleanup;
}
- cl_data->input_report[i] = kzalloc(input_report_size, GFP_KERNEL);
- if (!cl_data->input_report[i]) {
+ in_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL);
+ if (!in_data->input_report[i]) {
rc = -ENOMEM;
goto cleanup;
}
@@ -189,7 +188,8 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
info.sensor_idx = cl_idx;
info.dma_address = cl_data->sensor_dma_addr[i];
- cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
+ cl_data->report_descr[i] =
+ devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL);
if (!cl_data->report_descr[i]) {
rc = -ENOMEM;
goto cleanup;
@@ -200,47 +200,45 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
if (rc)
return rc;
- amd_start_sensor(privdata, info);
+ privdata->mp2_ops->start(privdata, info);
cl_data->sensor_sts[i] = 1;
}
- privdata->cl_data = cl_data;
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
return 0;
cleanup:
for (i = 0; i < cl_data->num_hid_devices; i++) {
- if (cl_data->sensor_virt_addr[i]) {
+ if (in_data->sensor_virt_addr[i]) {
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
- cl_data->sensor_virt_addr[i],
+ in_data->sensor_virt_addr[i],
cl_data->sensor_dma_addr[i]);
}
- kfree(cl_data->feature_report[i]);
- kfree(cl_data->input_report[i]);
- kfree(cl_data->report_descr[i]);
+ devm_kfree(dev, cl_data->feature_report[i]);
+ devm_kfree(dev, in_data->input_report[i]);
+ devm_kfree(dev, cl_data->report_descr[i]);
}
- kfree(cl_data);
return rc;
}
int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
{
struct amdtp_cl_data *cl_data = privdata->cl_data;
+ struct amd_input_data *in_data = cl_data->in_data;
int i;
for (i = 0; i < cl_data->num_hid_devices; i++)
- amd_stop_sensor(privdata, i);
+ privdata->mp2_ops->stop(privdata, i);
cancel_delayed_work_sync(&cl_data->work);
cancel_delayed_work_sync(&cl_data->work_buffer);
amdtp_hid_remove(cl_data);
for (i = 0; i < cl_data->num_hid_devices; i++) {
- if (cl_data->sensor_virt_addr[i]) {
+ if (in_data->sensor_virt_addr[i]) {
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
- cl_data->sensor_virt_addr[i],
+ in_data->sensor_virt_addr[i],
cl_data->sensor_dma_addr[i]);
}
}
- kfree(cl_data);
return 0;
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
index 4f989483aa03..5ad1e7acd294 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
@@ -162,9 +162,6 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
int i;
for (i = 0; i < cli_data->num_hid_devices; ++i) {
- kfree(cli_data->feature_report[i]);
- kfree(cli_data->input_report[i]);
- kfree(cli_data->report_descr[i]);
if (cli_data->hid_sensor_hubs[i]) {
kfree(cli_data->hid_sensor_hubs[i]->driver_data);
hid_destroy_device(cli_data->hid_sensor_hubs[i]);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index d7eac1728e31..ae2ac9191ba7 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -9,11 +9,16 @@
#ifndef AMDSFH_HID_H
#define AMDSFH_HID_H
-#define MAX_HID_DEVICES 4
+#define MAX_HID_DEVICES 5
#define BUS_AMD_AMDTP 0x20
#define AMD_SFH_HID_VENDOR 0x1022
#define AMD_SFH_HID_PRODUCT 0x0001
+struct amd_input_data {
+ u32 *sensor_virt_addr[MAX_HID_DEVICES];
+ u8 *input_report[MAX_HID_DEVICES];
+};
+
struct amdtp_cl_data {
u8 init_done;
u32 cur_hid_dev;
@@ -26,7 +31,6 @@ struct amdtp_cl_data {
u8 *hid_descr[MAX_HID_DEVICES];
int hid_descr_size[MAX_HID_DEVICES];
phys_addr_t phys_addr_base;
- u32 *sensor_virt_addr[MAX_HID_DEVICES];
dma_addr_t sensor_dma_addr[MAX_HID_DEVICES];
u32 sensor_sts[MAX_HID_DEVICES];
u32 sensor_requested_cnt[MAX_HID_DEVICES];
@@ -34,8 +38,8 @@ struct amdtp_cl_data {
u8 report_id[MAX_HID_DEVICES];
u8 sensor_idx[MAX_HID_DEVICES];
u8 *feature_report[MAX_HID_DEVICES];
- u8 *input_report[MAX_HID_DEVICES];
u8 request_done[MAX_HID_DEVICES];
+ struct amd_input_data *in_data;
struct delayed_work work;
struct delayed_work work_buffer;
};
@@ -64,4 +68,6 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data);
int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type);
void amd_sfh_set_report(struct hid_device *hid, int report_id, int report_type);
void amdtp_hid_wakeup(struct hid_device *hid);
+u8 get_input_report(u8 current_index, int sensor_idx, int report_id,
+ struct amd_input_data *in_data);
#endif
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index ddecc84fd6f0..96e2577fa37e 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -24,12 +24,55 @@
#define ACEL_EN BIT(0)
#define GYRO_EN BIT(1)
#define MAGNO_EN BIT(2)
+#define HPD_EN BIT(16)
#define ALS_EN BIT(19)
static int sensor_mask_override = -1;
module_param_named(sensor_mask, sensor_mask_override, int, 0444);
MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
+{
+ union sfh_cmd_base cmd_base;
+
+ cmd_base.ul = 0;
+ cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+ cmd_base.cmd_v2.period = info.period;
+ cmd_base.cmd_v2.sensor_id = info.sensor_idx;
+ cmd_base.cmd_v2.length = 16;
+
+ if (info.sensor_idx == als_idx)
+ cmd_base.cmd_v2.mem_type = USE_C2P_REG;
+
+ writeq(info.dma_address, privdata->mmio + AMD_C2P_MSG1);
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
+}
+
+static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
+{
+ union sfh_cmd_base cmd_base;
+
+ cmd_base.ul = 0;
+ cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+ cmd_base.cmd_v2.period = 0;
+ cmd_base.cmd_v2.sensor_id = sensor_idx;
+ cmd_base.cmd_v2.length = 16;
+
+ writeq(0x0, privdata->mmio + AMD_C2P_MSG2);
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
+}
+
+static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
+{
+ union sfh_cmd_base cmd_base;
+
+ cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.cmd_v2.period = 0;
+ cmd_base.cmd_v2.sensor_id = 0;
+
+ writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
+}
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -98,7 +141,6 @@ int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
{
int activestatus, num_of_sensors = 0;
const struct dmi_system_id *dmi_id;
- u32 activecontrolstatus;
if (sensor_mask_override == -1) {
dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
@@ -109,8 +151,7 @@ int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
if (sensor_mask_override >= 0) {
activestatus = sensor_mask_override;
} else {
- activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
- activestatus = activecontrolstatus >> 4;
+ activestatus = privdata->mp2_acs >> 4;
}
if (ACEL_EN & activestatus)
@@ -125,13 +166,46 @@ int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
if (ALS_EN & activestatus)
sensor_id[num_of_sensors++] = als_idx;
+ if (HPD_EN & activestatus)
+ sensor_id[num_of_sensors++] = HPD_IDX;
+
return num_of_sensors;
}
static void amd_mp2_pci_remove(void *privdata)
{
+ struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
- amd_stop_all_sensors(privdata);
+ mp2->mp2_ops->stop_all(mp2);
+}
+
+static const struct amd_mp2_ops amd_sfh_ops_v2 = {
+ .start = amd_start_sensor_v2,
+ .stop = amd_stop_sensor_v2,
+ .stop_all = amd_stop_all_sensor_v2,
+};
+
+static const struct amd_mp2_ops amd_sfh_ops = {
+ .start = amd_start_sensor,
+ .stop = amd_stop_sensor,
+ .stop_all = amd_stop_all_sensors,
+};
+
+static void mp2_select_ops(struct amd_mp2_dev *privdata)
+{
+ u8 acs;
+
+ privdata->mp2_acs = readl(privdata->mmio + AMD_P2C_MSG3);
+ acs = privdata->mp2_acs & GENMASK(3, 0);
+
+ switch (acs) {
+ case V2_STATUS:
+ privdata->mp2_ops = &amd_sfh_ops_v2;
+ break;
+ default:
+ privdata->mp2_ops = &amd_sfh_ops;
+ break;
+ }
}
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -160,10 +234,17 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
return rc;
}
+
+ privdata->cl_data = devm_kzalloc(&pdev->dev, sizeof(struct amdtp_cl_data), GFP_KERNEL);
+ if (!privdata->cl_data)
+ return -ENOMEM;
+
rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
if (rc)
return rc;
+ mp2_select_ops(privdata);
+
return amd_sfh_hid_client_init(privdata);
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 489415f7c22c..2d5c57e3782d 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -10,6 +10,7 @@
#define PCIE_MP2_AMD_H
#include <linux/pci.h>
+#include "amd_sfh_hid.h"
#define PCI_DEVICE_ID_AMD_MP2 0x15E4
@@ -22,9 +23,15 @@
#define AMD_C2P_MSG1 0x10504
#define AMD_C2P_MSG2 0x10508
+#define AMD_C2P_MSG(regno) (0x10500 + ((regno) * 4))
+
/* MP2 P2C Message Registers */
#define AMD_P2C_MSG3 0x1068C /* Supported Sensors info */
+#define V2_STATUS 0x2
+
+#define HPD_IDX 16
+
/* SFH Command register */
union sfh_cmd_base {
u32 ul;
@@ -33,6 +40,15 @@ union sfh_cmd_base {
u32 sensor_id : 8;
u32 period : 16;
} s;
+ struct {
+ u32 cmd_id : 4;
+ u32 intr_enable : 1;
+ u32 rsvd1 : 3;
+ u32 length : 7;
+ u32 mem_type : 1;
+ u32 sensor_id : 8;
+ u32 period : 8;
+ } cmd_v2;
};
union sfh_cmd_param {
@@ -61,6 +77,10 @@ struct amd_mp2_dev {
struct pci_dev *pdev;
struct amdtp_cl_data *cl_data;
void __iomem *mmio;
+ const struct amd_mp2_ops *mp2_ops;
+ struct amd_input_data in_data;
+ /* mp2 active control status */
+ u32 mp2_acs;
};
struct amd_mp2_sensor_info {
@@ -69,10 +89,33 @@ struct amd_mp2_sensor_info {
dma_addr_t dma_address;
};
+enum mem_use_type {
+ USE_DRAM,
+ USE_C2P_REG,
+};
+
+struct hpd_status {
+ union {
+ struct {
+ u32 human_presence_report : 4;
+ u32 human_presence_actual : 4;
+ u32 probablity : 8;
+ u32 object_distance : 16;
+ } shpd;
+ u32 val;
+ };
+};
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx);
void amd_stop_all_sensors(struct amd_mp2_dev *privdata);
int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id);
int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata);
int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata);
+
+struct amd_mp2_ops {
+ void (*start)(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
+ void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
+ void (*stop_all)(struct amd_mp2_dev *privdata);
+};
#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
index 6e3ad66e57a4..0c3697219382 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
@@ -12,6 +12,7 @@
#include "amd_sfh_pcie.h"
#include "amd_sfh_hid_desc.h"
#include "amd_sfh_hid_report_desc.h"
+#include "amd_sfh_hid.h"
#define AMD_SFH_FW_MULTIPLIER (1000)
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x41
@@ -49,6 +50,11 @@ int get_report_descriptor(int sensor_idx, u8 *rep_desc)
memcpy(rep_desc, als_report_descriptor,
sizeof(als_report_descriptor));
break;
+ case HPD_IDX: /* HPD sensor */
+ memset(rep_desc, 0, sizeof(hpd_report_descriptor));
+ memcpy(rep_desc, hpd_report_descriptor,
+ sizeof(hpd_report_descriptor));
+ break;
default:
break;
}
@@ -98,6 +104,17 @@ u32 get_descr_sz(int sensor_idx, int descriptor_name)
return sizeof(struct als_feature_report);
}
break;
+ case HPD_IDX:
+ switch (descriptor_name) {
+ case descr_size:
+ return sizeof(hpd_report_descriptor);
+ case input_size:
+ return sizeof(struct hpd_input_report);
+ case feature_size:
+ return sizeof(struct hpd_feature_report);
+ }
+ break;
+
default:
break;
}
@@ -119,6 +136,7 @@ u8 get_feature_report(int sensor_idx, int report_id, u8 *feature_report)
struct accel3_feature_report acc_feature;
struct gyro_feature_report gyro_feature;
struct magno_feature_report magno_feature;
+ struct hpd_feature_report hpd_feature;
struct als_feature_report als_feature;
u8 report_size = 0;
@@ -161,6 +179,12 @@ u8 get_feature_report(int sensor_idx, int report_id, u8 *feature_report)
memcpy(feature_report, &als_feature, sizeof(als_feature));
report_size = sizeof(als_feature);
break;
+ case HPD_IDX: /* human presence detection sensor */
+ get_common_features(&hpd_feature.common_property, report_id);
+ memcpy(feature_report, &hpd_feature, sizeof(hpd_feature));
+ report_size = sizeof(hpd_feature);
+ break;
+
default:
break;
}
@@ -174,12 +198,18 @@ static void get_common_inputs(struct common_input_property *common, int report_i
common->event_type = HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM;
}
-u8 get_input_report(int sensor_idx, int report_id, u8 *input_report, u32 *sensor_virt_addr)
+u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_input_data *in_data)
{
+ struct amd_mp2_dev *privdata = container_of(in_data, struct amd_mp2_dev, in_data);
+ u32 *sensor_virt_addr = in_data->sensor_virt_addr[current_index];
+ u8 *input_report = in_data->input_report[current_index];
+ u8 supported_input = privdata->mp2_acs & GENMASK(3, 0);
+ struct magno_input_report magno_input;
struct accel3_input_report acc_input;
struct gyro_input_report gyro_input;
- struct magno_input_report magno_input;
+ struct hpd_input_report hpd_input;
struct als_input_report als_input;
+ struct hpd_status hpdstatus;
u8 report_size = 0;
if (!sensor_virt_addr || !input_report)
@@ -213,10 +243,22 @@ u8 get_input_report(int sensor_idx, int report_id, u8 *input_report, u32 *sensor
break;
case als_idx: /* Als */
get_common_inputs(&als_input.common_property, report_id);
- als_input.illuminance_value = (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
+ /* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
+ if (supported_input == V2_STATUS)
+ als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
+ else
+ als_input.illuminance_value =
+ (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
report_size = sizeof(als_input);
memcpy(input_report, &als_input, sizeof(als_input));
break;
+ case HPD_IDX: /* hpd */
+ get_common_inputs(&hpd_input.common_property, report_id);
+ hpdstatus.val = readl(privdata->mmio + AMD_C2P_MSG(4));
+ hpd_input.human_presence = hpdstatus.shpd.human_presence_actual;
+ report_size = sizeof(hpd_input);
+ memcpy(input_report, &hpd_input, sizeof(hpd_input));
+ break;
default:
break;
}
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
index 095c471d8fd6..16f563d1823b 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.h
@@ -100,8 +100,17 @@ struct als_input_report {
int illuminance_value;
} __packed;
+struct hpd_feature_report {
+ struct common_feature_property common_property;
+} __packed;
+
+struct hpd_input_report {
+ struct common_input_property common_property;
+ /* values specific to human presence sensor */
+ u8 human_presence;
+} __packed;
+
int get_report_descriptor(int sensor_idx, u8 rep_desc[]);
u32 get_descr_sz(int sensor_idx, int descriptor_name);
u8 get_feature_report(int sensor_idx, int report_id, u8 *feature_report);
-u8 get_input_report(int sensor_idx, int report_id, u8 *input_report, u32 *sensor_virt_addr);
#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h
index 44271d39b322..66d6b26e4708 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_report_desc.h
@@ -642,4 +642,116 @@ const u8 als_report_descriptor[] = {
0X81, 0x02, /* HID Input (Data_Arr_Abs) */
0xC0 /* HID end collection */
};
+
+/* BIOMETRIC PRESENCE*/
+static const u8 hpd_report_descriptor[] = {
+0x05, 0x20, /* Usage page */
+0x09, 0x11, /* BIOMETRIC PRESENCE */
+0xA1, 0x00, /* HID Collection (Physical) */
+
+//feature reports(xmit/receive)
+0x85, 5, /* HID Report ID */
+0x05, 0x20, /* HID usage page sensor */
+0x0A, 0x09, 0x03, /* Sensor property and sensor connection type */
+0x15, 0, /* HID logical MIN_8(0) */
+0x25, 2, /* HID logical MAX_8(2) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection (logical) */
+0x0A, 0x30, 0x08, /* Sensor property connection type intergated sel*/
+0x0A, 0x31, 0x08, /* Sensor property connection type attached sel */
+0x0A, 0x32, 0x08, /* Sensor property connection type external sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x16, 0x03, /* HID usage sensor property reporting state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x40, 0x08, /* Sensor property report state no events sel */
+0x0A, 0x41, 0x08, /* Sensor property report state all events sel */
+0x0A, 0x42, 0x08, /* Sensor property report state threshold events sel */
+0x0A, 0x43, 0x08, /* Sensor property report state no events wake sel */
+0x0A, 0x44, 0x08, /* Sensor property report state all events wake sel */
+0x0A, 0x45, 0x08, /* Sensor property report state threshold events wake sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x19, 0x03, /* HID usage sensor property power state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x50, 0x08, /* Sensor property power state undefined sel */
+0x0A, 0x51, 0x08, /* Sensor property power state D0 full power sel */
+0x0A, 0x52, 0x08, /* Sensor property power state D1 low power sel */
+0x0A, 0x53, 0x08, /* Sensor property power state D2 standby with wake sel */
+0x0A, 0x54, 0x08, /* Sensor property power state D3 sleep with wake sel */
+0x0A, 0x55, 0x08, /* Sensor property power state D4 power off sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count(1) */
+0xA1, 0x02, /* HID collection(logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0xB1, 0x00, /* HID feature (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x0E, 0x03, /* HID usage sensor property report interval */
+0x15, 0, /* HID logical Min_8(0) */
+0x27, 0xFF, 0xFF, 0xFF, 0xFF, /* HID logical Max_32 */
+
+0x75, 32, /* HID report size(32) */
+0x95, 1, /* HID report count(1) */
+0x55, 0, /* HID unit exponent(0) */
+0xB1, 0x02, /* HID feature (Data_Var_Abs) */
+
+//input report (transmit)
+0x05, 0x20, /* HID usage page sensors */
+0x0A, 0x01, 0x02, /* HID usage sensor state */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 6, /* HID logical Max_8(6) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x00, 0x08, /* HID usage sensor state unknown sel */
+0x0A, 0x01, 0x08, /* HID usage sensor state ready sel */
+0x0A, 0x02, 0x08, /* HID usage sensor state not available sel */
+0x0A, 0x03, 0x08, /* HID usage sensor state no data sel */
+0x0A, 0x04, 0x08, /* HID usage sensor state initializing sel */
+0x0A, 0x05, 0x08, /* HID usage sensor state access denied sel */
+0x0A, 0x06, 0x08, /* HID usage sensor state error sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0x02, 0x02, /* HID usage sensor event */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 5, /* HID logical Max_8(5) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0xA1, 0x02, /* HID end collection (logical) */
+0x0A, 0x10, 0x08, /* HID usage sensor event unknown sel */
+0x0A, 0x11, 0x08, /* HID usage sensor event state changed sel */
+0x0A, 0x12, 0x08, /* HID usage sensor event property changed sel */
+0x0A, 0x13, 0x08, /* HID usage sensor event data updated sel */
+0x0A, 0x14, 0x08, /* HID usage sensor event poll response sel */
+0x0A, 0x15, 0x08, /* HID usage sensor event change sensitivity sel */
+0X81, 0x00, /* HID Input (Data_Arr_Abs) */
+0xC0, /* HID end collection */
+0x0A, 0xB1, 0x04, /* HID usage sensor data BIOMETRIC HUMAN PRESENCE */
+0x15, 0, /* HID logical Min_8(0) */
+0x25, 1, /* HID logical Max_8(1) */
+0x75, 8, /* HID report size(8) */
+0x95, 1, /* HID report count (1) */
+0X81, 0x02, /* HID Input (Data_Var_Abs) */
+0xC0 /* HID end collection */
+};
#endif
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 3a8c4a5971f7..2cbc32dda7f7 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -147,6 +147,8 @@ static const struct hid_device_id a4_devices[] = {
.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95),
+ .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
{ }
};
MODULE_DEVICE_TABLE(hid, a4_devices);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 2ab22b925941..fca8fc78a78a 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -79,10 +79,9 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_T100_KEYBOARD BIT(6)
#define QUIRK_T100CHI BIT(7)
#define QUIRK_G752_KEYBOARD BIT(8)
-#define QUIRK_T101HA_DOCK BIT(9)
-#define QUIRK_T90CHI BIT(10)
-#define QUIRK_MEDION_E1239T BIT(11)
-#define QUIRK_ROG_NKEY_KEYBOARD BIT(12)
+#define QUIRK_T90CHI BIT(9)
+#define QUIRK_MEDION_E1239T BIT(10)
+#define QUIRK_ROG_NKEY_KEYBOARD BIT(11)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -335,7 +334,7 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_MEDION_E1239T)
return asus_e1239t_event(drvdata, data, size);
- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
/*
* Skip these report ID, the device emits a continuous stream associated
* with the AURA mode it is in which looks like an 'echo'.
@@ -355,6 +354,16 @@ static int asus_raw_event(struct hid_device *hdev,
return -1;
}
}
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ /*
+ * G713 and G733 send these codes on some keypresses, depending on
+ * the key pressed it can trigger a shutdown event if not caught.
+ */
+ if(data[0] == 0x02 && data[1] == 0x30) {
+ return -1;
+ }
+ }
+
}
return 0;
@@ -1072,11 +1081,6 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
- /* use hid-multitouch for T101HA touchpad */
- if (id->driver_data & QUIRK_T101HA_DOCK &&
- hdev->collection->usage == HID_GD_MOUSE)
- return -ENODEV;
-
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "Asus hw start failed: %d\n", ret);
@@ -1230,8 +1234,6 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
@@ -1239,6 +1241,12 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
QUIRK_MEDION_E1239T },
+ /*
+ * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard
+ * part, while letting hid-multitouch.c handle the touchpad.
+ */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0ae9f6df59d1..7db332139f7d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2005,6 +2005,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
case BUS_I2C:
bus = "I2C";
break;
+ case BUS_VIRTUAL:
+ bus = "VIRTUAL";
+ break;
default:
bus = "<UNKNOWN>";
}
@@ -2303,12 +2306,8 @@ static int hid_device_remove(struct device *dev)
{
struct hid_device *hdev = to_hid_device(dev);
struct hid_driver *hdrv;
- int ret = 0;
- if (down_interruptible(&hdev->driver_input_lock)) {
- ret = -EINTR;
- goto end;
- }
+ down(&hdev->driver_input_lock);
hdev->io_started = false;
hdrv = hdev->driver;
@@ -2323,8 +2322,8 @@ static int hid_device_remove(struct device *dev)
if (!hdev->io_started)
up(&hdev->driver_input_lock);
-end:
- return ret;
+
+ return 0;
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
@@ -2588,7 +2587,6 @@ int hid_check_keys_pressed(struct hid_device *hid)
return 0;
}
-
EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
static int __init hid_init(void)
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 59f8d716d78f..fa57d05badf7 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -122,6 +122,7 @@ static const struct hid_usage_entry hid_usage_table[] = {
{ 9, 0, "Button" },
{ 10, 0, "Ordinal" },
{ 12, 0, "Consumer" },
+ {0, 0x003, "ProgrammableButtons"},
{0, 0x238, "HorizontalWheel"},
{ 13, 0, "Digitizers" },
{0, 0x01, "Digitizer"},
@@ -930,6 +931,9 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_APPSELECT] = "AppSelect",
[KEY_SCREENSAVER] = "ScreenSaver",
[KEY_VOICECOMMAND] = "VoiceCommand",
+ [KEY_ASSISTANT] = "Assistant",
+ [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
+ [KEY_EMOJI_PICKER] = "EmojiPicker",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
@@ -939,6 +943,16 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup",
[KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept",
[KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel",
+ [KEY_MACRO1] = "Macro1", [KEY_MACRO2] = "Macro2", [KEY_MACRO3] = "Macro3",
+ [KEY_MACRO4] = "Macro4", [KEY_MACRO5] = "Macro5", [KEY_MACRO6] = "Macro6",
+ [KEY_MACRO7] = "Macro7", [KEY_MACRO8] = "Macro8", [KEY_MACRO9] = "Macro9",
+ [KEY_MACRO10] = "Macro10", [KEY_MACRO11] = "Macro11", [KEY_MACRO12] = "Macro12",
+ [KEY_MACRO13] = "Macro13", [KEY_MACRO14] = "Macro14", [KEY_MACRO15] = "Macro15",
+ [KEY_MACRO16] = "Macro16", [KEY_MACRO17] = "Macro17", [KEY_MACRO18] = "Macro18",
+ [KEY_MACRO19] = "Macro19", [KEY_MACRO20] = "Macro20", [KEY_MACRO21] = "Macro21",
+ [KEY_MACRO22] = "Macro22", [KEY_MACRO23] = "Macro23", [KEY_MACRO24] = "Macro24",
+ [KEY_MACRO25] = "Macro25", [KEY_MACRO26] = "Macro26", [KEY_MACRO27] = "Macro27",
+ [KEY_MACRO28] = "Macro28", [KEY_MACRO29] = "Macro29", [KEY_MACRO30] = "Macro30",
};
static const char *relatives[REL_MAX + 1] = {
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
index a5751607ce24..f43a8406cb9a 100644
--- a/drivers/hid/hid-ft260.c
+++ b/drivers/hid/hid-ft260.c
@@ -201,7 +201,7 @@ struct ft260_i2c_write_request_report {
u8 address; /* 7-bit I2C address */
u8 flag; /* I2C transaction condition */
u8 length; /* data payload length */
- u8 data[60]; /* data payload */
+ u8 data[FT260_WR_DATA_MAX]; /* data payload */
} __packed;
struct ft260_i2c_read_request_report {
@@ -249,7 +249,10 @@ static int ft260_hid_feature_report_get(struct hid_device *hdev,
ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
- memcpy(data, buf, len);
+ if (likely(ret == len))
+ memcpy(data, buf, len);
+ else if (ret >= 0)
+ ret = -EIO;
kfree(buf);
return ret;
}
@@ -298,7 +301,7 @@ static int ft260_xfer_status(struct ft260_device *dev)
ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS,
(u8 *)&report, sizeof(report));
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
hid_err(hdev, "failed to retrieve status: %d\n", ret);
return ret;
}
@@ -429,6 +432,9 @@ static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd,
struct ft260_i2c_write_request_report *rep =
(struct ft260_i2c_write_request_report *)dev->write_buf;
+ if (data_len >= sizeof(rep->data))
+ return -EINVAL;
+
rep->address = addr;
rep->data[0] = cmd;
rep->length = data_len + 1;
@@ -721,10 +727,9 @@ static int ft260_get_system_config(struct hid_device *hdev,
ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS,
(u8 *)cfg, len);
- if (ret != len) {
+ if (ret < 0) {
hid_err(hdev, "failed to retrieve system status\n");
- if (ret >= 0)
- return -EIO;
+ return ret;
}
return 0;
}
@@ -777,8 +782,8 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
int ret;
ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
- if (ret != len && ret >= 0)
- return -EIO;
+ if (ret < 0)
+ return ret;
return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
}
@@ -789,8 +794,8 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
int ret;
ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
- if (ret != len && ret >= 0)
- return -EIO;
+ if (ret < 0)
+ return ret;
return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
}
@@ -941,10 +946,8 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION,
(u8 *)&version, sizeof(version));
- if (ret != sizeof(version)) {
+ if (ret < 0) {
hid_err(hdev, "failed to retrieve chip version\n");
- if (ret >= 0)
- ret = -EIO;
goto err_hid_close;
}
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index e60c31dd05ff..8123b871a3eb 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -17,6 +17,7 @@
#include <linux/hid.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
@@ -272,12 +273,21 @@ static const struct acpi_device_id cbas_ec_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
+#ifdef CONFIG_OF
+static const struct of_device_id cbas_ec_of_match[] = {
+ { .compatible = "google,cros-cbas" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cbas_ec_of_match);
+#endif
+
static struct platform_driver cbas_ec_driver = {
.probe = cbas_ec_probe,
.remove = cbas_ec_remove,
.driver = {
.name = "cbas_ec",
.acpi_match_table = ACPI_PTR(cbas_ec_acpi_ids),
+ .of_match_table = of_match_ptr(cbas_ec_of_match),
.pm = &cbas_ec_pm_ops,
},
};
diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c
index 898871c8c768..29ccb0accfba 100644
--- a/drivers/hid/hid-gt683r.c
+++ b/drivers/hid/hid-gt683r.c
@@ -54,6 +54,7 @@ static const struct hid_device_id gt683r_led_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
{ }
};
+MODULE_DEVICE_TABLE(hid, gt683r_led_id);
static void gt683r_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 84b8da3e7d09..8f1893e68112 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -26,6 +26,7 @@
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
#define USB_DEVICE_ID_A4TECH_X5_005D 0x000a
#define USB_DEVICE_ID_A4TECH_RP_649 0x001a
+#define USB_DEVICE_ID_A4TECH_NB_95 0x022b
#define USB_VENDOR_ID_AASHIMA 0x06d6
#define USB_DEVICE_ID_AASHIMA_GAMEPAD 0x0025
@@ -299,8 +300,6 @@
#define USB_VENDOR_ID_CORSAIR 0x1b1c
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
-
-#define USB_VENDOR_ID_CORSAIR 0x1b1c
#define USB_DEVICE_ID_CORSAIR_K70R 0x1b09
#define USB_DEVICE_ID_CORSAIR_K95RGB 0x1b11
#define USB_DEVICE_ID_CORSAIR_M65RGB 0x1b12
@@ -397,6 +396,7 @@
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
+#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -751,6 +751,7 @@
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
+#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
@@ -763,6 +764,7 @@
#define I2C_DEVICE_ID_LG_7010 0x7010
#define USB_VENDOR_ID_LOGITECH 0x046d
+#define USB_DEVICE_ID_LOGITECH_Z_10_SPK 0x0a07
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309
@@ -1051,6 +1053,7 @@
#define USB_DEVICE_ID_SAITEK_X52 0x075c
#define USB_DEVICE_ID_SAITEK_X52_2 0x0255
#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762
+#define USB_DEVICE_ID_SAITEK_X65 0x0b6a
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
@@ -1060,6 +1063,9 @@
#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD 0x0023
#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2 0x0027
+#define USB_VENDOR_ID_SEMITEK 0x1ea7
+#define USB_DEVICE_ID_SEMITEK_KEYBOARD 0x0907
+
#define USB_VENDOR_ID_SENNHEISER 0x1395
#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
@@ -1161,6 +1167,7 @@
#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A 0x6e21
#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 18f5e28d475c..4286a51f7f16 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -326,6 +326,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -567,6 +569,16 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
}
#endif /* CONFIG_HID_BATTERY_STRENGTH */
+static bool hidinput_field_in_collection(struct hid_device *device, struct hid_field *field,
+ unsigned int type, unsigned int usage)
+{
+ struct hid_collection *collection;
+
+ collection = &device->collection[field->usage->collection_index];
+
+ return collection->type == type && collection->usage == usage;
+}
+
static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage)
{
@@ -632,6 +644,18 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
else
code += BTN_TRIGGER_HAPPY - 0x10;
break;
+ case HID_CP_CONSUMER_CONTROL:
+ if (hidinput_field_in_collection(device, field,
+ HID_COLLECTION_NAMED_ARRAY,
+ HID_CP_PROGRAMMABLEBUTTONS)) {
+ if (code <= 0x1d)
+ code += KEY_MACRO1;
+ else
+ code += BTN_TRIGGER_HAPPY - 0x1e;
+ } else {
+ goto ignore;
+ }
+ break;
default:
switch (field->physical) {
case HID_GD_MOUSE:
@@ -964,6 +988,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break;
+
+ case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break;
+
case 0x0e0: map_abs_clear(ABS_VOLUME); break;
case 0x0e2: map_key_clear(KEY_MUTE); break;
case 0x0e5: map_key_clear(KEY_BASSBOOST); break;
@@ -1313,12 +1340,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
}
- if (usage->hid == (HID_UP_DIGITIZER | 0x003c)) { /* Invert */
+ if (usage->hid == HID_DG_INVERT) {
*quirks = value ? (*quirks | HID_QUIRK_INVERT) : (*quirks & ~HID_QUIRK_INVERT);
return;
}
- if (usage->hid == (HID_UP_DIGITIZER | 0x0032)) { /* InRange */
+ if (usage->hid == HID_DG_INRANGE) {
if (value) {
input_event(input, usage->type, (*quirks & HID_QUIRK_INVERT) ? BTN_TOOL_RUBBER : usage->code, 1);
return;
@@ -1328,7 +1355,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
}
- if (usage->hid == (HID_UP_DIGITIZER | 0x0030) && (*quirks & HID_QUIRK_NOTOUCH)) { /* Pressure */
+ if (usage->hid == HID_DG_TIPPRESSURE && (*quirks & HID_QUIRK_NOTOUCH)) {
int a = field->logical_minimum;
int b = field->logical_maximum;
input_event(input, EV_KEY, BTN_TOUCH, value > a + ((b - a) >> 3));
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 14fc068affad..430fa4f52ed3 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -135,4 +135,5 @@ static struct hid_driver ite_driver = {
};
module_hid_driver(ite_driver);
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index bfbba0d41933..b2a08233f8d5 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -28,6 +28,7 @@ enum lg_g15_model {
LG_G15_V2,
LG_G510,
LG_G510_USB_AUDIO,
+ LG_Z10,
};
enum lg_g15_led_type {
@@ -457,6 +458,13 @@ static int lg_g15_get_initial_led_brightness(struct lg_g15_data *g15)
return ret;
return lg_g510_update_mkey_led_brightness(g15);
+ case LG_Z10:
+ /*
+ * Getting the LCD backlight brightness is not supported.
+ * Reading Feature(2) fails with -EPIPE and this crashes
+ * the LCD and touch keys part of the speakers.
+ */
+ return 0;
}
return -EINVAL; /* Never reached */
}
@@ -464,7 +472,20 @@ static int lg_g15_get_initial_led_brightness(struct lg_g15_data *g15)
/******** Input functions ********/
/* On the G15 Mark I Logitech has been quite creative with which bit is what */
-static int lg_g15_event(struct lg_g15_data *g15, u8 *data, int size)
+static void lg_g15_handle_lcd_menu_keys(struct lg_g15_data *g15, u8 *data)
+{
+ int i, val;
+
+ /* Most left (round/display) button below the LCD */
+ input_report_key(g15->input, KEY_KBD_LCD_MENU1, data[8] & 0x80);
+ /* 4 other buttons below the LCD */
+ for (i = 0; i < 4; i++) {
+ val = data[i + 2] & 0x80;
+ input_report_key(g15->input, KEY_KBD_LCD_MENU2 + i, val);
+ }
+}
+
+static int lg_g15_event(struct lg_g15_data *g15, u8 *data)
{
int i, val;
@@ -494,13 +515,7 @@ static int lg_g15_event(struct lg_g15_data *g15, u8 *data, int size)
/* MR */
input_report_key(g15->input, KEY_MACRO_RECORD_START, data[7] & 0x40);
- /* Most left (round) button below the LCD */
- input_report_key(g15->input, KEY_KBD_LCD_MENU1, data[8] & 0x80);
- /* 4 other buttons below the LCD */
- for (i = 0; i < 4; i++) {
- val = data[i + 2] & 0x80;
- input_report_key(g15->input, KEY_KBD_LCD_MENU2 + i, val);
- }
+ lg_g15_handle_lcd_menu_keys(g15, data);
/* Backlight cycle button pressed? */
if (data[1] & 0x80)
@@ -510,7 +525,7 @@ static int lg_g15_event(struct lg_g15_data *g15, u8 *data, int size)
return 0;
}
-static int lg_g15_v2_event(struct lg_g15_data *g15, u8 *data, int size)
+static int lg_g15_v2_event(struct lg_g15_data *g15, u8 *data)
{
int i, val;
@@ -542,7 +557,7 @@ static int lg_g15_v2_event(struct lg_g15_data *g15, u8 *data, int size)
return 0;
}
-static int lg_g510_event(struct lg_g15_data *g15, u8 *data, int size)
+static int lg_g510_event(struct lg_g15_data *g15, u8 *data)
{
bool game_mode_enabled;
int i, val;
@@ -586,7 +601,7 @@ static int lg_g510_event(struct lg_g15_data *g15, u8 *data, int size)
return 0;
}
-static int lg_g510_leds_event(struct lg_g15_data *g15, u8 *data, int size)
+static int lg_g510_leds_event(struct lg_g15_data *g15, u8 *data)
{
bool backlight_disabled;
@@ -613,18 +628,24 @@ static int lg_g15_raw_event(struct hid_device *hdev, struct hid_report *report,
switch (g15->model) {
case LG_G15:
if (data[0] == 0x02 && size == 9)
- return lg_g15_event(g15, data, size);
+ return lg_g15_event(g15, data);
break;
case LG_G15_V2:
if (data[0] == 0x02 && size == 5)
- return lg_g15_v2_event(g15, data, size);
+ return lg_g15_v2_event(g15, data);
+ break;
+ case LG_Z10:
+ if (data[0] == 0x02 && size == 9) {
+ lg_g15_handle_lcd_menu_keys(g15, data);
+ input_sync(g15->input);
+ }
break;
case LG_G510:
case LG_G510_USB_AUDIO:
if (data[0] == 0x03 && size == 5)
- return lg_g510_event(g15, data, size);
+ return lg_g510_event(g15, data);
if (data[0] == 0x04 && size == 2)
- return lg_g510_leds_event(g15, data, size);
+ return lg_g510_leds_event(g15, data);
break;
}
@@ -645,25 +666,18 @@ static void lg_g15_input_close(struct input_dev *dev)
hid_hw_close(hdev);
}
-static int lg_g15_register_led(struct lg_g15_data *g15, int i)
+static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
{
- static const char * const led_names[] = {
- "g15::kbd_backlight",
- "g15::lcd_backlight",
- "g15::macro_preset1",
- "g15::macro_preset2",
- "g15::macro_preset3",
- "g15::macro_record",
- };
-
g15->leds[i].led = i;
- g15->leds[i].cdev.name = led_names[i];
+ g15->leds[i].cdev.name = name;
switch (g15->model) {
case LG_G15:
case LG_G15_V2:
- g15->leds[i].cdev.brightness_set_blocking = lg_g15_led_set;
g15->leds[i].cdev.brightness_get = lg_g15_led_get;
+ fallthrough;
+ case LG_Z10:
+ g15->leds[i].cdev.brightness_set_blocking = lg_g15_led_set;
if (i < LG_G15_BRIGHTNESS_MAX) {
g15->leds[i].cdev.flags = LED_BRIGHT_HW_CHANGED;
g15->leds[i].cdev.max_brightness = 2;
@@ -702,8 +716,38 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i)
return devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
}
+/* Common input device init code shared between keyboards and Z-10 speaker handling */
+static void lg_g15_init_input_dev(struct hid_device *hdev, struct input_dev *input,
+ const char *name)
+{
+ int i;
+
+ input->name = name;
+ input->phys = hdev->phys;
+ input->uniq = hdev->uniq;
+ input->id.bustype = hdev->bus;
+ input->id.vendor = hdev->vendor;
+ input->id.product = hdev->product;
+ input->id.version = hdev->version;
+ input->dev.parent = &hdev->dev;
+ input->open = lg_g15_input_open;
+ input->close = lg_g15_input_close;
+
+ /* Keys below the LCD, intended for controlling a menu on the LCD */
+ for (i = 0; i < 5; i++)
+ input_set_capability(input, EV_KEY, KEY_KBD_LCD_MENU1 + i);
+}
+
static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
+ static const char * const led_names[] = {
+ "g15::kbd_backlight",
+ "g15::lcd_backlight",
+ "g15::macro_preset1",
+ "g15::macro_preset2",
+ "g15::macro_preset3",
+ "g15::macro_record",
+ };
u8 gkeys_settings_output_report = 0;
u8 gkeys_settings_feature_report = 0;
struct hid_report_enum *rep_enum;
@@ -744,6 +788,8 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
g15->hdev = hdev;
g15->model = id->driver_data;
+ g15->input = input;
+ input_set_drvdata(input, hdev);
hid_set_drvdata(hdev, (void *)g15);
switch (g15->model) {
@@ -772,6 +818,9 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
gkeys_settings_feature_report = 0x01;
gkeys = 18;
break;
+ case LG_Z10:
+ connect_mask = HID_CONNECT_HIDRAW;
+ break;
}
ret = hid_hw_start(hdev, connect_mask);
@@ -814,17 +863,21 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto error_hw_stop;
+ if (g15->model == LG_Z10) {
+ lg_g15_init_input_dev(hdev, g15->input, "Logitech Z-10 LCD Menu Keys");
+ ret = input_register_device(g15->input);
+ if (ret)
+ goto error_hw_stop;
+
+ ret = lg_g15_register_led(g15, 1, "z-10::lcd_backlight");
+ if (ret)
+ goto error_hw_stop;
+
+ return 0; /* All done */
+ }
+
/* Setup and register input device */
- input->name = "Logitech Gaming Keyboard Gaming Keys";
- input->phys = hdev->phys;
- input->uniq = hdev->uniq;
- input->id.bustype = hdev->bus;
- input->id.vendor = hdev->vendor;
- input->id.product = hdev->product;
- input->id.version = hdev->version;
- input->dev.parent = &hdev->dev;
- input->open = lg_g15_input_open;
- input->close = lg_g15_input_close;
+ lg_g15_init_input_dev(hdev, input, "Logitech Gaming Keyboard Gaming Keys");
/* G-keys */
for (i = 0; i < gkeys; i++)
@@ -835,10 +888,6 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
input_set_capability(input, EV_KEY, KEY_MACRO_PRESET1 + i);
input_set_capability(input, EV_KEY, KEY_MACRO_RECORD_START);
- /* Keys below the LCD, intended for controlling a menu on the LCD */
- for (i = 0; i < 5; i++)
- input_set_capability(input, EV_KEY, KEY_KBD_LCD_MENU1 + i);
-
/*
* On the G510 only report headphone and mic mute keys when *not* using
* the builtin USB audio device. When the builtin audio is used these
@@ -850,16 +899,13 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
input_set_capability(input, EV_KEY, KEY_F20);
}
- g15->input = input;
- input_set_drvdata(input, hdev);
-
ret = input_register_device(input);
if (ret)
goto error_hw_stop;
/* Register LED devices */
for (i = 0; i < LG_G15_LED_MAX; i++) {
- ret = lg_g15_register_led(g15, i);
+ ret = lg_g15_register_led(g15, i, led_names[i]);
if (ret)
goto error_hw_stop;
}
@@ -890,6 +936,10 @@ static const struct hid_device_id lg_g15_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO),
.driver_data = LG_G510_USB_AUDIO },
+ /* Z-10 speakers */
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_Z_10_SPK),
+ .driver_data = LG_Z10 },
{ }
};
MODULE_DEVICE_TABLE(hid, lg_g15_devices);
@@ -902,4 +952,5 @@ static struct hid_driver lg_g15_driver = {
};
module_hid_driver(lg_g15_driver);
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index fa835d565982..a0017b010c34 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -100,6 +100,7 @@
#define HIDPP_DEVICE_TYPE_MASK GENMASK(3, 0)
#define HIDPP_LINK_STATUS_MASK BIT(6)
#define HIDPP_MANUFACTURER_MASK BIT(7)
+#define HIDPP_27MHZ_SECURE_MASK BIT(7)
#define HIDPP_DEVICE_TYPE_KEYBOARD 1
#define HIDPP_DEVICE_TYPE_MOUSE 2
@@ -984,6 +985,13 @@ static void logi_hidpp_dev_conn_notif_27mhz(struct hid_device *hdev,
workitem->reports_supported |= STD_MOUSE | HIDPP;
break;
case 3: /* Index 3 is always the keyboard */
+ if (hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] & HIDPP_27MHZ_SECURE_MASK) {
+ hid_info(hdev, "Keyboard connection is encrypted\n");
+ } else {
+ hid_warn(hdev, "Keyboard events are send over the air in plain-text / unencrypted\n");
+ hid_warn(hdev, "See: https://gitlab.freedesktop.org/jwrdegoede/logitech-27mhz-keyboard-encryption-setup/\n");
+ }
+ fallthrough;
case 4: /* Index 4 is used for an optional separate numpad */
workitem->device_type = HIDPP_DEVICE_TYPE_KEYBOARD;
workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
@@ -1489,6 +1497,13 @@ static void logi_dj_ll_stop(struct hid_device *hid)
dbg_hid("%s\n", __func__);
}
+static bool logi_dj_ll_may_wakeup(struct hid_device *hid)
+{
+ struct dj_device *djdev = hid->driver_data;
+ struct dj_receiver_dev *djrcv_dev = djdev->dj_receiver_dev;
+
+ return hid_hw_may_wakeup(djrcv_dev->hidpp);
+}
static struct hid_ll_driver logi_dj_ll_driver = {
.parse = logi_dj_ll_parse,
@@ -1497,6 +1512,7 @@ static struct hid_ll_driver logi_dj_ll_driver = {
.open = logi_dj_ll_open,
.close = logi_dj_ll_close,
.raw_request = logi_dj_ll_raw_request,
+ .may_wakeup = logi_dj_ll_may_wakeup,
};
static int logi_dj_dj_event(struct hid_device *hdev,
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index d598094dadd0..61635e629469 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -56,6 +56,8 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03
#define HIDPP_SUB_ID_ROLLER 0x05
#define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06
+#define HIDPP_SUB_ID_USER_IFACE_EVENT 0x08
+#define HIDPP_USER_IFACE_EVENT_ENCRYPTION_KEY_LOST BIT(5)
#define HIDPP_QUIRK_CLASS_WTP BIT(0)
#define HIDPP_QUIRK_CLASS_M560 BIT(1)
@@ -1263,6 +1265,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
int status;
long flags = (long) data[2];
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
if (flags & 0x80)
switch (flags & 0x07) {
@@ -3528,6 +3531,16 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
return 1;
}
+ if (hidpp->hid_dev->group == HID_GROUP_LOGITECH_27MHZ_DEVICE &&
+ data[0] == REPORT_ID_HIDPP_SHORT &&
+ data[2] == HIDPP_SUB_ID_USER_IFACE_EVENT &&
+ (data[3] & HIDPP_USER_IFACE_EVENT_ENCRYPTION_KEY_LOST)) {
+ dev_err_ratelimited(&hidpp->hid_dev->dev,
+ "Error the keyboard's wireless encryption key has been lost, your keyboard will not work unless you re-configure encryption.\n");
+ dev_err_ratelimited(&hidpp->hid_dev->dev,
+ "See: https://gitlab.freedesktop.org/jwrdegoede/logitech-27mhz-keyboard-encryption-setup/\n");
+ }
+
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
ret = hidpp20_battery_event_1000(hidpp, data, size);
if (ret != 0)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 2bb473d8c424..8bcaee4ccae0 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -693,7 +693,7 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE &&
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
hdev->type != HID_TYPE_USBMOUSE)
- return 0;
+ return -ENODEV;
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
@@ -779,7 +779,10 @@ err_stop_hw:
static void magicmouse_remove(struct hid_device *hdev)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
- cancel_delayed_work_sync(&msc->work);
+
+ if (msc)
+ cancel_delayed_work_sync(&msc->work);
+
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 9d9f3e1bd5f4..3ea7cb1cda84 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -70,6 +70,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18)
#define MT_QUIRK_SEPARATE_APP_REPORT BIT(19)
#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
+#define MT_QUIRK_DISABLE_WAKEUP BIT(21)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -191,6 +192,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
#define MT_CLS_EXPORT_ALL_INPUTS 0x0013
/* reserved 0x0014 */
#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015
+#define MT_CLS_WIN_8_DISABLE_WAKEUP 0x0016
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -283,6 +285,15 @@ static const struct mt_class mt_classes[] = {
MT_QUIRK_WIN8_PTP_BUTTONS |
MT_QUIRK_FORCE_MULTI_INPUT,
.export_all_inputs = true },
+ { .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_IGNORE_DUPLICATES |
+ MT_QUIRK_HOVERING |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_STICKY_FINGERS |
+ MT_QUIRK_WIN8_PTP_BUTTONS |
+ MT_QUIRK_DISABLE_WAKEUP,
+ .export_all_inputs = true },
/*
* vendor specific classes
@@ -604,9 +615,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
continue;
- for (n = 0; n < field->report_count; n++) {
- if (field->usage[n].hid == HID_DG_CONTACTID)
- rdata->is_mt_collection = true;
+ if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
+ for (n = 0; n < field->report_count; n++) {
+ if (field->usage[n].hid == HID_DG_CONTACTID) {
+ rdata->is_mt_collection = true;
+ break;
+ }
+ }
}
}
@@ -759,7 +774,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
case HID_DG_CONFIDENCE:
if ((cls->name == MT_CLS_WIN_8 ||
- cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
+ cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1576,13 +1592,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
/* we do not set suffix = "Touchscreen" */
hi->input->name = hdev->name;
break;
- case HID_DG_STYLUS:
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- break;
case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
suffix = "Custom Media Keys";
break;
+ case HID_DG_STYLUS:
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
+ fallthrough;
case HID_DG_PEN:
suffix = "Stylus";
break;
@@ -1749,8 +1765,15 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
#ifdef CONFIG_PM
static int mt_suspend(struct hid_device *hdev, pm_message_t state)
{
+ struct mt_device *td = hid_get_drvdata(hdev);
+
/* High latency is desirable for power savings during S3/S0ix */
- mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+ if ((td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP) ||
+ !hid_hw_may_wakeup(hdev))
+ mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
+ else
+ mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+
return 0;
}
@@ -1809,6 +1832,12 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
USB_DEVICE_ID_ANTON_TOUCH_PAD) },
+ /* Asus T101HA */
+ { .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
+
/* Asus T304UA */
{ .driver_data = MT_CLS_ASUS,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 3dd6f15f2a67..51b39bda9a9d 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -110,6 +110,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
@@ -158,6 +159,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X65), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
@@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K15A), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
@@ -211,6 +214,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95) },
#endif
#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c
new file mode 100644
index 000000000000..ba6607d5e051
--- /dev/null
+++ b/drivers/hid/hid-semitek.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID driver for Semitek keyboards
+ *
+ * Copyright (c) 2021 Benjamin Moody
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ /* In the report descriptor for interface 2, fix the incorrect
+ description of report ID 0x04 (the report contains a
+ bitmask, not an array of keycodes.) */
+ if (*rsize == 0xcb && rdesc[0x83] == 0x81 && rdesc[0x84] == 0x00) {
+ hid_info(hdev, "fixing up Semitek report descriptor\n");
+ rdesc[0x84] = 0x02;
+ }
+ return rdesc;
+}
+
+static const struct hid_device_id semitek_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMITEK, USB_DEVICE_ID_SEMITEK_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, semitek_devices);
+
+static struct hid_driver semitek_driver = {
+ .name = "semitek",
+ .id_table = semitek_devices,
+ .report_fixup = semitek_report_fixup,
+};
+module_hid_driver(semitek_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 2e6662173a79..32c2306e240d 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -387,7 +387,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
int index, field_index, usage;
char name[HID_CUSTOM_NAME_LENGTH];
- int value;
+ int value, ret;
if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
@@ -403,8 +403,10 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
report_id = sensor_inst->fields[field_index].attribute.
report_id;
- sensor_hub_set_feature(sensor_inst->hsdev, report_id,
- index, sizeof(value), &value);
+ ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id,
+ index, sizeof(value), &value);
+ if (ret)
+ return ret;
} else
return -EINVAL;
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 95cf88f3bafb..6abd3e2a9094 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -209,16 +209,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
buffer_size = buffer_size / sizeof(__s32);
if (buffer_size) {
for (i = 0; i < buffer_size; ++i) {
- hid_set_field(report->field[field_index], i,
- (__force __s32)cpu_to_le32(*buf32));
+ ret = hid_set_field(report->field[field_index], i,
+ (__force __s32)cpu_to_le32(*buf32));
+ if (ret)
+ goto done_proc;
+
++buf32;
}
}
if (remaining_bytes) {
value = 0;
memcpy(&value, (u8 *)buf32, remaining_bytes);
- hid_set_field(report->field[field_index], i,
- (__force __s32)cpu_to_le32(value));
+ ret = hid_set_field(report->field[field_index], i,
+ (__force __s32)cpu_to_le32(value));
+ if (ret)
+ goto done_proc;
}
hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
hid_hw_wait(hsdev->hdev);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 8319b0ce385a..b3722c51ec78 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -597,9 +597,8 @@ struct sony_sc {
/* DS4 calibration data */
struct ds4_calibration_data ds4_calib_data[6];
/* GH Live */
+ struct urb *ghl_urb;
struct timer_list ghl_poke_timer;
- struct usb_ctrlrequest *ghl_cr;
- u8 *ghl_databuf;
};
static void sony_set_leds(struct sony_sc *sc);
@@ -625,66 +624,54 @@ static inline void sony_schedule_work(struct sony_sc *sc,
static void ghl_magic_poke_cb(struct urb *urb)
{
- if (urb) {
- /* Free sc->ghl_cr and sc->ghl_databuf allocated in
- * ghl_magic_poke()
- */
- kfree(urb->setup_packet);
- kfree(urb->transfer_buffer);
- }
+ struct sony_sc *sc = urb->context;
+
+ if (urb->status < 0)
+ hid_err(sc->hdev, "URB transfer failed : %d", urb->status);
+
+ mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
}
static void ghl_magic_poke(struct timer_list *t)
{
+ int ret;
struct sony_sc *sc = from_timer(sc, t, ghl_poke_timer);
- int ret;
+ ret = usb_submit_urb(sc->ghl_urb, GFP_ATOMIC);
+ if (ret < 0)
+ hid_err(sc->hdev, "usb_submit_urb failed: %d", ret);
+}
+
+static int ghl_init_urb(struct sony_sc *sc, struct usb_device *usbdev)
+{
+ struct usb_ctrlrequest *cr;
+ u16 poke_size;
+ u8 *databuf;
unsigned int pipe;
- struct urb *urb;
- struct usb_device *usbdev = to_usb_device(sc->hdev->dev.parent->parent);
- const u16 poke_size =
- ARRAY_SIZE(ghl_ps3wiiu_magic_data);
+ poke_size = ARRAY_SIZE(ghl_ps3wiiu_magic_data);
pipe = usb_sndctrlpipe(usbdev, 0);
- if (!sc->ghl_cr) {
- sc->ghl_cr = kzalloc(sizeof(*sc->ghl_cr), GFP_ATOMIC);
- if (!sc->ghl_cr)
- goto resched;
- }
-
- if (!sc->ghl_databuf) {
- sc->ghl_databuf = kzalloc(poke_size, GFP_ATOMIC);
- if (!sc->ghl_databuf)
- goto resched;
- }
+ cr = devm_kzalloc(&sc->hdev->dev, sizeof(*cr), GFP_ATOMIC);
+ if (cr == NULL)
+ return -ENOMEM;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- goto resched;
+ databuf = devm_kzalloc(&sc->hdev->dev, poke_size, GFP_ATOMIC);
+ if (databuf == NULL)
+ return -ENOMEM;
- sc->ghl_cr->bRequestType =
+ cr->bRequestType =
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT;
- sc->ghl_cr->bRequest = USB_REQ_SET_CONFIGURATION;
- sc->ghl_cr->wValue = cpu_to_le16(ghl_ps3wiiu_magic_value);
- sc->ghl_cr->wIndex = 0;
- sc->ghl_cr->wLength = cpu_to_le16(poke_size);
- memcpy(sc->ghl_databuf, ghl_ps3wiiu_magic_data, poke_size);
-
+ cr->bRequest = USB_REQ_SET_CONFIGURATION;
+ cr->wValue = cpu_to_le16(ghl_ps3wiiu_magic_value);
+ cr->wIndex = 0;
+ cr->wLength = cpu_to_le16(poke_size);
+ memcpy(databuf, ghl_ps3wiiu_magic_data, poke_size);
usb_fill_control_urb(
- urb, usbdev, pipe,
- (unsigned char *) sc->ghl_cr, sc->ghl_databuf,
- poke_size, ghl_magic_poke_cb, NULL);
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- kfree(sc->ghl_databuf);
- kfree(sc->ghl_cr);
- }
- usb_free_urb(urb);
-
-resched:
- /* Reschedule for next time */
- mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
+ sc->ghl_urb, usbdev, pipe,
+ (unsigned char *) cr, databuf, poke_size,
+ ghl_magic_poke_cb, sc);
+ return 0;
}
static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -2981,6 +2968,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
int ret;
unsigned long quirks = id->driver_data;
struct sony_sc *sc;
+ struct usb_device *usbdev;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
if (!strcmp(hdev->name, "FutureMax Dance Mat"))
@@ -3000,6 +2988,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sc->quirks = quirks;
hid_set_drvdata(hdev, sc);
sc->hdev = hdev;
+ usbdev = to_usb_device(sc->hdev->dev.parent->parent);
ret = hid_parse(hdev);
if (ret) {
@@ -3042,6 +3031,15 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
if (sc->quirks & GHL_GUITAR_PS3WIIU) {
+ sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!sc->ghl_urb)
+ return -ENOMEM;
+ ret = ghl_init_urb(sc, usbdev);
+ if (ret) {
+ hid_err(hdev, "error preparing URB\n");
+ return ret;
+ }
+
timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
mod_timer(&sc->ghl_poke_timer,
jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
@@ -3054,8 +3052,10 @@ static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- if (sc->quirks & GHL_GUITAR_PS3WIIU)
+ if (sc->quirks & GHL_GUITAR_PS3WIIU) {
del_timer_sync(&sc->ghl_poke_timer);
+ usb_free_urb(sc->ghl_urb);
+ }
hid_hw_close(hdev);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 2e452c6e8ef4..cdc7d82ae9ed 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -311,12 +311,13 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
goto error4;
}
- tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
- if (!tm_wheel->model_request) {
+ tm_wheel->change_request = kmemdup(&change_request,
+ sizeof(struct usb_ctrlrequest),
+ GFP_KERNEL);
+ if (!tm_wheel->change_request) {
ret = -ENOMEM;
goto error5;
}
- memcpy(tm_wheel->change_request, &change_request, sizeof(struct usb_ctrlrequest));
tm_wheel->usb_dev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
hid_set_drvdata(hdev, tm_wheel);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 9993133989a5..46474612e73c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -45,6 +45,7 @@
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
+#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
/* flags */
@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_RESET_ON_RESUME },
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
I2C_HID_QUIRK_BAD_INPUT_SIZE },
+ /*
+ * Sending the wakeup after reset actually break ELAN touchscreen controller
+ */
+ { USB_VENDOR_ID_ELAN, HID_ANY_ID,
+ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
{ 0, 0 }
};
@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
}
/* At least some SIS devices need this after reset */
- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
out_unlock:
mutex_unlock(&ihid->reset_lock);
@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
hid->product = le16_to_cpu(ihid->hdesc.wProductID);
- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
- client->name, hid->vendor, hid->product);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+ client->name, (u16)hid->vendor, (u16)hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
index c6c9cfe2475e..689da84a520d 100644
--- a/drivers/hid/intel-ish-hid/Kconfig
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -5,6 +5,7 @@ menu "Intel ISH HID support"
config INTEL_ISH_HID
tristate "Intel Integrated Sensor Hub"
default n
+ depends on X86
select HID
help
The Integrated Sensor Hub (ISH) enables the ability to offload
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 21b87e4003af..07e3cbc86bef 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -28,6 +28,8 @@
#define EHL_Ax_DEVICE_ID 0x4BB3
#define TGL_LP_DEVICE_ID 0xA0FC
#define TGL_H_DEVICE_ID 0x43FC
+#define ADL_S_DEVICE_ID 0x7AF8
+#define ADL_P_DEVICE_ID 0x51FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 47bbeb8b492b..45e0c7b1c9ec 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -544,7 +544,7 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
#define TIMEOUT_FOR_HW_RDY_MS 300
/**
- * ish_fw_reset_work_fn() - FW reset worker function
+ * fw_reset_work_fn() - FW reset worker function
* @unused: not used
*
* Call ish_fw_reset_handler to complete FW reset
@@ -889,6 +889,29 @@ static uint32_t ish_ipc_get_header(struct ishtp_device *dev, int length,
return drbl_val;
}
+/**
+ * _dma_no_cache_snooping()
+ *
+ * Check on current platform, DMA supports cache snooping or not.
+ * This callback is used to notify uplayer driver if manully cache
+ * flush is needed when do DMA operation.
+ *
+ * Please pay attention to this callback implementation, if declare
+ * having cache snooping on a cache snooping not supported platform
+ * will cause uplayer driver receiving mismatched data; and if
+ * declare no cache snooping on a cache snooping supported platform
+ * will cause cache be flushed twice and performance hit.
+ *
+ * @dev: ishtp device pointer
+ *
+ * Return: false - has cache snooping capability
+ * true - no cache snooping, need manually cache flush
+ */
+static bool _dma_no_cache_snooping(struct ishtp_device *dev)
+{
+ return dev->pdev->device == EHL_Ax_DEVICE_ID;
+}
+
static const struct ishtp_hw_ops ish_hw_ops = {
.hw_reset = _ish_hw_reset,
.ipc_reset = _ish_ipc_reset,
@@ -897,7 +920,8 @@ static const struct ishtp_hw_ops ish_hw_ops = {
.write = write_ipc_to_queue,
.get_fw_status = _ish_read_fw_sts_reg,
.sync_fw_clock = _ish_sync_fw_clock,
- .ishtp_read_hdr = _ishtp_read_hdr
+ .ishtp_read_hdr = _ishtp_read_hdr,
+ .dma_no_cache_snooping = _dma_no_cache_snooping
};
/**
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 06081cf9b85a..1c5039081db2 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -39,6 +39,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
@@ -261,7 +263,6 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
struct pci_dev *pdev = to_pci_dev(ish_resume_device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
uint32_t fwsts = dev->ops->get_fw_status(dev);
- int ret;
if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
&& IPC_IS_ISH_ILUP(fwsts)) {
@@ -273,7 +274,7 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
/* Waiting to get resume response */
if (dev->resume_flag)
- ret = wait_event_interruptible_timeout(dev->resume_wait,
+ wait_event_interruptible_timeout(dev->resume_wait,
!dev->resume_flag,
msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 6cf59fd26ad7..1b486f262747 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -31,13 +31,13 @@
/**
* enum ish_loader_commands - ISH loader host commands.
- * LOADER_CMD_XFER_QUERY Query the Shim firmware loader for
+ * @LOADER_CMD_XFER_QUERY: Query the Shim firmware loader for
* capabilities
- * LOADER_CMD_XFER_FRAGMENT Transfer one firmware image fragment at a
+ * @LOADER_CMD_XFER_FRAGMENT: Transfer one firmware image fragment at a
* time. The command may be executed
* multiple times until the entire firmware
* image is downloaded to SRAM.
- * LOADER_CMD_START Start executing the main firmware.
+ * @LOADER_CMD_START: Start executing the main firmware.
*/
enum ish_loader_commands {
LOADER_CMD_XFER_QUERY = 0,
@@ -95,6 +95,7 @@ static int dma_buf_size_limit = 4 * PAGE_SIZE;
/**
* struct loader_msg_hdr - Header for ISH Loader commands.
* @command: LOADER_CMD* commands. Bit 7 is the response.
+ * @reserved: Reserved space
* @status: Command response status. Non 0, is error
* condition.
*
@@ -173,16 +174,16 @@ struct loader_start {
* struct response_info - Encapsulate firmware response related
* information for passing between function
* loader_cl_send() and process_recv() callback.
- * @data Copy the data received from firmware here.
- * @max_size Max size allocated for the @data buffer. If the
+ * @data: Copy the data received from firmware here.
+ * @max_size: Max size allocated for the @data buffer. If the
* received data exceeds this value, we log an
* error.
- * @size Actual size of data received from firmware.
- * @error Returns 0 for success, negative error code for a
+ * @size: Actual size of data received from firmware.
+ * @error: Returns 0 for success, negative error code for a
* failure in function process_recv().
- * @received Set to true on receiving a valid firmware
+ * @received: Set to true on receiving a valid firmware
* response to host command
- * @wait_queue Wait queue for Host firmware loading where the
+ * @wait_queue: Wait queue for Host firmware loading where the
* client sends message to ISH firmware and waits
* for response
*/
@@ -195,13 +196,13 @@ struct response_info {
wait_queue_head_t wait_queue;
};
-/**
+/*
* struct ishtp_cl_data - Encapsulate per ISH-TP Client Data.
* @work_ishtp_reset: Work queue for reset handling.
* @work_fw_load: Work queue for host firmware loading.
- * @flag_retry Flag for indicating host firmware loading should
+ * @flag_retry: Flag for indicating host firmware loading should
* be retried.
- * @retry_count Count the number of retries.
+ * @retry_count: Count the number of retries.
*
* This structure is used to store data per client.
*/
@@ -240,8 +241,8 @@ struct ishtp_cl_data {
/**
* get_firmware_variant() - Gets the filename of firmware image to be
* loaded based on platform variant.
- * @client_data Client data instance.
- * @filename Returns firmware filename.
+ * @client_data: Client data instance.
+ * @filename: Returns firmware filename.
*
* Queries the firmware-name device property string.
*
@@ -266,11 +267,11 @@ static int get_firmware_variant(struct ishtp_cl_data *client_data,
/**
* loader_cl_send() Send message from host to firmware
* @client_data: Client data instance
- * @out_msg Message buffer to be sent to firmware
- * @out_size Size of out going message
- * @in_msg Message buffer where the incoming data copied.
+ * @out_msg: Message buffer to be sent to firmware
+ * @out_size: Size of out going message
+ * @in_msg: Message buffer where the incoming data copied.
* This buffer is allocated by calling
- * @in_size Max size of incoming message
+ * @in_size: Max size of incoming message
*
* Return: Number of bytes copied in the in_msg on success, negative
* error code on failure.
@@ -435,7 +436,7 @@ end:
/**
* loader_cl_event_cb() - bus driver callback for incoming message
- * @device: Pointer to the ishtp client device for which this
+ * @cl_device: Pointer to the ishtp client device for which this
* message is targeted
*
* Remove the packet from the list and process the message by calling
@@ -455,7 +456,7 @@ static void loader_cl_event_cb(struct ishtp_cl_device *cl_device)
/**
* ish_query_loader_prop() - Query ISH Shim firmware loader
* @client_data: Client data instance
- * @fw: Poiner to firmware data struct in host memory
+ * @fw: Pointer to firmware data struct in host memory
* @fw_info: Loader firmware properties
*
* This function queries the ISH Shim firmware loader for capabilities.
@@ -536,7 +537,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
}
/**
- * ish_fw_xfer_ishtp() Loads ISH firmware using ishtp interface
+ * ish_fw_xfer_ishtp() - Loads ISH firmware using ishtp interface
* @client_data: Client data instance
* @fw: Pointer to firmware data struct in host memory
*
@@ -733,7 +734,7 @@ end_err_dma_buf_release:
}
/**
- * ish_fw_start() Start executing ISH main firmware
+ * ish_fw_start() - Start executing ISH main firmware
* @client_data: client data instance
*
* This function sends message to Shim firmware loader to start
@@ -756,7 +757,7 @@ static int ish_fw_start(struct ishtp_cl_data *client_data)
}
/**
- * load_fw_from_host() Loads ISH firmware from host
+ * load_fw_from_host() - Loads ISH firmware from host
* @client_data: Client data instance
*
* This function loads the ISH firmware to ISH SRAM and starts execution
@@ -1015,7 +1016,7 @@ static int loader_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
*
* Return: 0
*/
-static int loader_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+static void loader_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl_data *client_data;
struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
@@ -1032,8 +1033,6 @@ static int loader_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
cancel_work_sync(&client_data->work_ishtp_reset);
loader_deinit(loader_ishtp_cl);
ishtp_put_device(cl_device);
-
- return 0;
}
/**
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 6ba944b40fdb..6b1fa971b33e 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -11,6 +11,11 @@
#include <linux/sched.h>
#include "ishtp-hid.h"
+/* ISH Transport protocol (ISHTP in short) GUID */
+static const guid_t hid_ishtp_guid =
+ GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
+ 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26);
+
/* Rx ring buffer pool size */
#define HID_CL_RX_RING_SIZE 32
#define HID_CL_TX_RING_SIZE 16
@@ -18,7 +23,7 @@
#define cl_data_to_dev(client_data) ishtp_device(client_data->cl_device)
/**
- * report_bad_packets() - Report bad packets
+ * report_bad_packet() - Report bad packets
* @hid_ishtp_cl: Client instance to get stats
* @recv_buf: Raw received host interface message
* @cur_pos: Current position index in payload
@@ -779,7 +784,7 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
}
}
-void (*hid_print_trace)(void *unused, const char *format, ...);
+ishtp_print_log ishtp_hid_print_trace;
/**
* hid_ishtp_cl_probe() - ISHTP client driver probe
@@ -818,7 +823,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
- hid_print_trace = ishtp_trace_callback(cl_device);
+ ishtp_hid_print_trace = ishtp_trace_callback(cl_device);
rv = hid_ishtp_cl_init(hid_ishtp_cl, 0);
if (rv) {
@@ -838,7 +843,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
*
* Return: 0
*/
-static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
@@ -856,8 +861,6 @@ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
hid_ishtp_cl = NULL;
client_data->num_hid_devices = 0;
-
- return 0;
}
/**
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 393bed0abee9..14c271d7d8a9 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -254,7 +254,7 @@ err_hid_data:
}
/**
- * ishtp_hid_probe() - Remove registered hid device
+ * ishtp_hid_remove() - Remove registered hid device
* @client_data: client data pointer
*
* This function is used to destroy allocatd HID device.
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 5ffd0da3cf1f..f88443a7d935 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -16,14 +16,9 @@
#define IS_RESPONSE 0x80
/* Used to dump to Linux trace buffer, if enabled */
-extern void (*hid_print_trace)(void *unused, const char *format, ...);
+extern ishtp_print_log ishtp_hid_print_trace;
#define hid_ishtp_trace(client, ...) \
- (hid_print_trace)(NULL, __VA_ARGS__)
-
-/* ISH Transport protocol (ISHTP in short) GUID */
-static const guid_t hid_ishtp_guid =
- GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
- 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26);
+ (ishtp_hid_print_trace)(NULL, __VA_ARGS__)
/* ISH HID message structure */
struct hostif_msg_hdr {
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index bba29cd36d29..f0802b047ed8 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -164,6 +164,7 @@ EXPORT_SYMBOL(ishtp_fw_cl_get_client);
/**
* ishtp_get_fw_client_id() - Get fw client id
+ * @fw_client: firmware client used to fetch the ID
*
* This interface is used to reset HW get FW client id.
*
@@ -257,24 +258,17 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
static int ishtp_cl_device_remove(struct device *dev)
{
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
- struct ishtp_cl_driver *driver;
-
- if (!device || !dev->driver)
- return 0;
+ struct ishtp_cl_driver *driver = to_ishtp_cl_driver(dev->driver);
if (device->event_cb) {
device->event_cb = NULL;
cancel_work_sync(&device->event_work);
}
- driver = to_ishtp_cl_driver(dev->driver);
- if (!driver->remove) {
- dev->driver = NULL;
+ if (driver->remove)
+ driver->remove(device);
- return 0;
- }
-
- return driver->remove(device);
+ return 0;
}
/**
@@ -842,6 +836,7 @@ int ishtp_use_dma_transfer(void)
/**
* ishtp_device() - Return device pointer
+ * @device: ISH-TP client device instance
*
* This interface is used to return device pointer from ishtp_cl_device
* instance.
@@ -858,6 +853,7 @@ EXPORT_SYMBOL(ishtp_device);
* ishtp_get_pci_device() - Return PCI device dev pointer
* This interface is used to return PCI device pointer
* from ishtp_cl_device instance.
+ * @device: ISH-TP client device instance
*
* Return: device *.
*/
@@ -869,12 +865,13 @@ EXPORT_SYMBOL(ishtp_get_pci_device);
/**
* ishtp_trace_callback() - Return trace callback
+ * @cl_device: ISH-TP client device instance
*
* This interface is used to return trace callback function pointer.
*
- * Return: void *.
+ * Return: *ishtp_print_log()
*/
-void *ishtp_trace_callback(struct ishtp_cl_device *cl_device)
+ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device)
{
return cl_device->ishtp_dev->print_log;
}
@@ -882,6 +879,7 @@ EXPORT_SYMBOL(ishtp_trace_callback);
/**
* ish_hw_reset() - Call HW reset IPC callback
+ * @dev: ISHTP device instance
*
* This interface is used to reset HW in case of error.
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 1cc157126fce..405e0d5212cc 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -10,6 +10,7 @@
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
#include "hbm.h"
#include "client.h"
@@ -111,7 +112,7 @@ static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
/**
* ishtp_cl_allocate() - allocates client structure and sets it up.
- * @dev: ishtp device
+ * @cl_device: ishtp client device
*
* Allocate memory for new client device and call to initialize each field.
*
@@ -263,7 +264,6 @@ EXPORT_SYMBOL(ishtp_cl_unlink);
int ishtp_cl_disconnect(struct ishtp_cl *cl)
{
struct ishtp_device *dev;
- int err;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -283,7 +283,7 @@ int ishtp_cl_disconnect(struct ishtp_cl *cl)
return -ENODEV;
}
- err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ wait_event_interruptible_timeout(cl->wait_ctrl_res,
(dev->dev_state != ISHTP_DEV_ENABLED ||
cl->state == ISHTP_CL_DISCONNECTED),
ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
@@ -773,6 +773,14 @@ static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
/* write msg to dma buf */
memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
+ /*
+ * if current fw don't support cache snooping, driver have to
+ * flush the cache manually.
+ */
+ if (dev->ops->dma_no_cache_snooping &&
+ dev->ops->dma_no_cache_snooping(dev))
+ clflush_cache_range(msg_addr, cl_msg->send_buf.size);
+
/* send dma_xfer hbm msg */
off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
@@ -997,6 +1005,15 @@ void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
}
buffer = rb->buffer.data;
+
+ /*
+ * if current fw don't support cache snooping, driver have to
+ * flush the cache manually.
+ */
+ if (dev->ops->dma_no_cache_snooping &&
+ dev->ops->dma_no_cache_snooping(dev))
+ clflush_cache_range(msg, hbm->msg_length);
+
memcpy(buffer, msg, hbm->msg_length);
rb->buf_idx = hbm->msg_length;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 30a91d068306..9c031a06e4c4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -398,7 +398,7 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
}
/**
- * ishtp_client_disconnect_request() - Receive disconnect request
+ * ishtp_hbm_fw_disconnect_req() - Receive disconnect request
* @dev: ISHTP device instance
* @disconnect_req: disconnect request structure
*
@@ -430,7 +430,7 @@ static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
}
/**
- * ishtp_hbm_dma_xfer_ack(() - Receive transfer ACK
+ * ishtp_hbm_dma_xfer_ack() - Receive transfer ACK
* @dev: ISHTP device instance
* @dma_xfer: HBM transfer message
*
@@ -914,7 +914,7 @@ static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length,
/*** Suspend and resume notification ***/
static uint32_t current_state;
-static uint32_t supported_states = 0 | SUSPEND_STATE_BIT;
+static uint32_t supported_states = SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT;
/**
* ishtp_send_suspend() - Send suspend message to FW
@@ -933,7 +933,7 @@ void ishtp_send_suspend(struct ishtp_device *dev)
memset(&state_status_msg, 0, len);
state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
state_status_msg.supported_states = supported_states;
- current_state |= SUSPEND_STATE_BIT;
+ current_state |= (SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT);
dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__);
state_status_msg.states_status = current_state;
@@ -959,7 +959,7 @@ void ishtp_send_resume(struct ishtp_device *dev)
memset(&state_status_msg, 0, len);
state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
state_status_msg.supported_states = supported_states;
- current_state &= ~SUSPEND_STATE_BIT;
+ current_state &= ~(CONNECTED_STANDBY_STATE_BIT | SUSPEND_STATE_BIT);
dev->print_log(dev, "%s() sends RESUME notification\n", __func__);
state_status_msg.states_status = current_state;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.h b/drivers/hid/intel-ish-hid/ishtp/hbm.h
index 7c445b203f2a..08f3f3ceb18c 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.h
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.h
@@ -235,6 +235,7 @@ struct dma_xfer_hbm {
#define SYSTEM_STATE_QUERY_SUBSCRIBERS 0x3
#define SYSTEM_STATE_STATE_CHANGE_REQ 0x4
/*indicates suspend and resume states*/
+#define CONNECTED_STANDBY_STATE_BIT (1<<0)
#define SUSPEND_STATE_BIT (1<<1)
struct ish_system_states_header {
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 1cc6364aa957..32142c7d9a04 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/intel-ish-client-if.h>
#include "bus.h"
#include "hbm.h"
@@ -118,6 +119,7 @@ struct ishtp_hw_ops {
unsigned long buffer_length);
uint32_t (*get_fw_status)(struct ishtp_device *dev);
void (*sync_fw_clock)(struct ishtp_device *dev);
+ bool (*dma_no_cache_snooping)(struct ishtp_device *dev);
};
/**
@@ -202,8 +204,7 @@ struct ishtp_device {
uint64_t ishtp_host_dma_rx_buf_phys;
/* Dump to trace buffers if enabled*/
- __printf(2, 3) void (*print_log)(struct ishtp_device *dev,
- const char *format, ...);
+ ishtp_print_log print_log;
/* Debug stats */
unsigned int ipc_rx_cnt;
diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
index 3477b31611ae..a3a70e4f3f6c 100644
--- a/drivers/hid/surface-hid/surface_hid.c
+++ b/drivers/hid/surface-hid/surface_hid.c
@@ -143,7 +143,7 @@ static int ssam_hid_get_raw_report(struct surface_hid_device *shid, u8 rprt_id,
rqst.target_id = shid->uid.target;
rqst.instance_id = shid->uid.instance;
rqst.command_id = SURFACE_HID_CID_GET_FEATURE_REPORT;
- rqst.flags = 0;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
rqst.length = sizeof(rprt_id);
rqst.payload = &rprt_id;
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
index 7b27ec392232..5571e74abe91 100644
--- a/drivers/hid/surface-hid/surface_hid_core.c
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -168,9 +168,9 @@ int surface_hid_device_add(struct surface_hid_device *shid)
shid->hid->dev.parent = shid->dev;
shid->hid->bus = BUS_HOST;
- shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
- shid->hid->product = cpu_to_le16(shid->attrs.product);
- shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
+ shid->hid->vendor = get_unaligned_le16(&shid->attrs.vendor);
+ shid->hid->product = get_unaligned_le16(&shid->attrs.product);
+ shid->hid->version = get_unaligned_le16(&shid->hid_desc.hid_version);
shid->hid->country = shid->hid_desc.country_code;
snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 86257ce6d619..06130dc431a0 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -374,7 +374,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
dir = usbhid->ctrl[usbhid->ctrltail].dir;
- len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ len = hid_report_len(report);
if (dir == USB_DIR_OUT) {
usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
usbhid->urbctrl->transfer_buffer_length = len;
@@ -1304,6 +1304,13 @@ static int usbhid_idle(struct hid_device *hid, int report, int idle,
return hid_set_idle(dev, ifnum, report, idle);
}
+static bool usbhid_may_wakeup(struct hid_device *hid)
+{
+ struct usb_device *dev = hid_to_usb_dev(hid);
+
+ return device_may_wakeup(&dev->dev);
+}
+
struct hid_ll_driver usb_hid_driver = {
.parse = usbhid_parse,
.start = usbhid_start,
@@ -1316,6 +1323,7 @@ struct hid_ll_driver usb_hid_driver = {
.raw_request = usbhid_raw_request,
.output_report = usbhid_output_report,
.idle = usbhid_idle,
+ .may_wakeup = usbhid_may_wakeup,
};
EXPORT_SYMBOL_GPL(usb_hid_driver);
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index ea126c50acc3..3b4ee21cd811 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
+ error = -EPERM;
hid_notice(hid,
"device does not support device managed pool\n");
goto fail;
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index e22434dfc9ef..df02002066ce 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -239,11 +239,11 @@ static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd)
return -1;
if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL)))
return -1;
- if (!(kbd->new = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
+ if (!(kbd->new = usb_alloc_coherent(dev, 8, GFP_KERNEL, &kbd->new_dma)))
return -1;
if (!(kbd->cr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL)))
return -1;
- if (!(kbd->leds = usb_alloc_coherent(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
+ if (!(kbd->leds = usb_alloc_coherent(dev, 1, GFP_KERNEL, &kbd->leds_dma)))
return -1;
return 0;
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 073127e65ac1..c89332017d5d 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -130,7 +130,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
if (!mouse || !input_dev)
goto fail1;
- mouse->data = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &mouse->data_dma);
+ mouse->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &mouse->data_dma);
if (!mouse->data)
goto fail1;
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 71c886245dbf..8f16654eca09 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -122,7 +122,7 @@
#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0454)
#define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
#define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
-#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
+#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0940)
#define WACOM_HID_WD_MODE_CHANGE (WACOM_HID_UP_WACOMDIGITIZER | 0x0980)
#define WACOM_HID_WD_MUTE_DEVICE (WACOM_HID_UP_WACOMDIGITIZER | 0x0981)
#define WACOM_HID_WD_CONTROLPANEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0982)
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 94daf8240c95..d76df5c8c2a9 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -11,3 +11,6 @@ hv_vmbus-y := vmbus_drv.o \
channel_mgmt.o ring_buffer.o hv_trace.o
hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
+
+# Code that must be built-in
+obj-$(subst m,y,$(CONFIG_HYPERV)) += hv_common.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index c2635e913a92..f3761c73b074 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -662,12 +662,15 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
- err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
+ if (!newchannel->max_pkt_size)
+ newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
+
+ err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages, 0);
if (err)
goto error_clean_ring;
- err = hv_ringbuffer_init(&newchannel->inbound,
- &page[send_pages], recv_pages);
+ err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
+ recv_pages, newchannel->max_pkt_size);
if (err)
goto error_clean_ring;
@@ -1186,15 +1189,14 @@ EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
* vmbus_next_request_id - Returns a new request id. It is also
* the index at which the guest memory address is stored.
* Uses a spin lock to avoid race conditions.
- * @rqstor: Pointer to the requestor struct
+ * @channel: Pointer to the VMbus channel struct
* @rqst_add: Guest memory address to be stored in the array
*/
-u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr)
+u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
{
+ struct vmbus_requestor *rqstor = &channel->requestor;
unsigned long flags;
u64 current_id;
- const struct vmbus_channel *channel =
- container_of(rqstor, const struct vmbus_channel, requestor);
/* Check rqstor has been initialized */
if (!channel->rqstor_size)
@@ -1228,16 +1230,15 @@ EXPORT_SYMBOL_GPL(vmbus_next_request_id);
/*
* vmbus_request_addr - Returns the memory address stored at @trans_id
* in @rqstor. Uses a spin lock to avoid race conditions.
- * @rqstor: Pointer to the requestor struct
+ * @channel: Pointer to the VMbus channel struct
* @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
* next request id.
*/
-u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id)
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
{
+ struct vmbus_requestor *rqstor = &channel->requestor;
unsigned long flags;
u64 req_addr;
- const struct vmbus_channel *channel =
- container_of(rqstor, const struct vmbus_channel, requestor);
/* Check rqstor has been initialized */
if (!channel->rqstor_size)
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 311cd005b3be..5e479d54918c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -232,8 +232,10 @@ int vmbus_connect(void)
*/
for (i = 0; ; i++) {
- if (i == ARRAY_SIZE(vmbus_versions))
+ if (i == ARRAY_SIZE(vmbus_versions)) {
+ ret = -EDOM;
goto cleanup;
+ }
version = vmbus_versions[i];
if (version > max_version)
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 58af84e30144..7f11ea07d698 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1010,7 +1010,6 @@ static void hot_add_req(struct work_struct *dummy)
* that need to be hot-added while ensuring the alignment
* and size requirements of Linux as it relates to hot-add.
*/
- region_start = pg_start;
region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
if (pfn_cnt % HA_CHUNK)
region_size += HA_CHUNK;
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
new file mode 100644
index 000000000000..7f42da98d377
--- /dev/null
+++ b/drivers/hv/hv_common.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Architecture neutral utility routines for interacting with
+ * Hyper-V. This file is specifically for code that must be
+ * built-in to the kernel image when CONFIG_HYPERV is set
+ * (vs. being in a module) because it is called from architecture
+ * specific code under arch/.
+ *
+ * Copyright (C) 2021, Microsoft, Inc.
+ *
+ * Author : Michael Kelley <mikelley@microsoft.com>
+ */
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/bitfield.h>
+#include <asm/hyperv-tlfs.h>
+#include <asm/mshyperv.h>
+
+
+/* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */
+bool hv_query_ext_cap(u64 cap_query)
+{
+ /*
+ * The address of the 'hv_extended_cap' variable will be used as an
+ * output parameter to the hypercall below and so it should be
+ * compatible with 'virt_to_phys'. Which means, it's address should be
+ * directly mapped. Use 'static' to keep it compatible; stack variables
+ * can be virtually mapped, making them incompatible with
+ * 'virt_to_phys'.
+ * Hypercall input/output addresses should also be 8-byte aligned.
+ */
+ static u64 hv_extended_cap __aligned(8);
+ static bool hv_extended_cap_queried;
+ u64 status;
+
+ /*
+ * Querying extended capabilities is an extended hypercall. Check if the
+ * partition supports extended hypercall, first.
+ */
+ if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS))
+ return false;
+
+ /* Extended capabilities do not change at runtime. */
+ if (hv_extended_cap_queried)
+ return hv_extended_cap & cap_query;
+
+ status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL,
+ &hv_extended_cap);
+
+ /*
+ * The query extended capabilities hypercall should not fail under
+ * any normal circumstances. Avoid repeatedly making the hypercall, on
+ * error.
+ */
+ hv_extended_cap_queried = true;
+ if (!hv_result_success(status)) {
+ pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n",
+ status);
+ return false;
+ }
+
+ return hv_extended_cap & cap_query;
+}
+EXPORT_SYMBOL_GPL(hv_query_ext_cap);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 59ce85e00a02..660036da7449 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -349,6 +349,7 @@ int hv_fcopy_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
fcopy_transaction.recv_channel = srv->channel;
+ fcopy_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index b49962d312ce..c698592b83e4 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -757,6 +757,7 @@ hv_kvp_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
kvp_transaction.recv_channel = srv->channel;
+ kvp_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 4;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index e4aefeb330da..136576cba26f 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -750,8 +750,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
*/
hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
if (IS_ERR_OR_NULL(hv_ptp_clock)) {
- pr_err("cannot register PTP clock: %ld\n",
- PTR_ERR(hv_ptp_clock));
+ pr_err("cannot register PTP clock: %d\n",
+ PTR_ERR_OR_ZERO(hv_ptp_clock));
hv_ptp_clock = NULL;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 9416e09ebd58..42f3d9d123a1 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -174,7 +174,7 @@ extern int hv_synic_cleanup(unsigned int cpu);
void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
- struct page *pages, u32 pagecnt);
+ struct page *pages, u32 pagecnt, u32 max_pkt_size);
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 374f8afbf8a5..2aee356840a2 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -181,7 +181,7 @@ void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
- struct page *pages, u32 page_cnt)
+ struct page *pages, u32 page_cnt, u32 max_pkt_size)
{
int i;
struct page **pages_wraparound;
@@ -223,6 +223,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
sizeof(struct hv_ring_buffer);
ring_info->priv_read_index = 0;
+ /* Initialize buffer that holds copies of incoming packets */
+ if (max_pkt_size) {
+ ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL);
+ if (!ring_info->pkt_buffer)
+ return -ENOMEM;
+ ring_info->pkt_buffer_size = max_pkt_size;
+ }
+
spin_lock_init(&ring_info->ring_lock);
return 0;
@@ -235,6 +243,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
vunmap(ring_info->ring_buffer);
ring_info->ring_buffer = NULL;
mutex_unlock(&ring_info->ring_buffer_mutex);
+
+ kfree(ring_info->pkt_buffer);
+ ring_info->pkt_buffer_size = 0;
}
/* Write to the ring buffer. */
@@ -301,10 +312,12 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
*/
if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
- rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
- if (rqst_id == VMBUS_RQST_ERROR) {
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- return -EAGAIN;
+ if (channel->next_request_id_callback != NULL) {
+ rqst_id = channel->next_request_id_callback(channel, requestid);
+ if (rqst_id == VMBUS_RQST_ERROR) {
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ return -EAGAIN;
+ }
}
}
desc = hv_get_ring_buffer(outring_info) + old_write;
@@ -332,7 +345,8 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
if (channel->rescind) {
if (rqst_id != VMBUS_NO_RQSTOR) {
/* Reclaim request ID to avoid leak of IDs */
- vmbus_request_addr(&channel->requestor, rqst_id);
+ if (channel->request_addr_callback != NULL)
+ channel->request_addr_callback(channel, rqst_id);
}
return -ENODEV;
}
@@ -375,7 +389,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
memcpy(buffer, (const char *)desc + offset, packetlen);
/* Advance ring index to next packet descriptor */
- __hv_pkt_iter_next(channel, desc);
+ __hv_pkt_iter_next(channel, desc, true);
/* Notify host of update */
hv_pkt_iter_close(channel);
@@ -402,6 +416,22 @@ static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
}
/*
+ * Get first vmbus packet without copying it out of the ring buffer
+ */
+struct vmpacket_descriptor *hv_pkt_iter_first_raw(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
+
+ if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
+ return NULL;
+
+ return (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
+}
+EXPORT_SYMBOL_GPL(hv_pkt_iter_first_raw);
+
+/*
* Get first vmbus packet from ring buffer after read_index
*
* If ring buffer is empty, returns NULL and no other action needed.
@@ -409,17 +439,49 @@ static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
- struct vmpacket_descriptor *desc;
+ struct vmpacket_descriptor *desc, *desc_copy;
+ u32 bytes_avail, pkt_len, pkt_offset;
- hv_debug_delay_test(channel, MESSAGE_DELAY);
- if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
+ desc = hv_pkt_iter_first_raw(channel);
+ if (!desc)
return NULL;
- desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
- if (desc)
- prefetch((char *)desc + (desc->len8 << 3));
+ bytes_avail = min(rbi->pkt_buffer_size, hv_pkt_iter_avail(rbi));
+
+ /*
+ * Ensure the compiler does not use references to incoming Hyper-V values (which
+ * could change at any moment) when reading local variables later in the code
+ */
+ pkt_len = READ_ONCE(desc->len8) << 3;
+ pkt_offset = READ_ONCE(desc->offset8) << 3;
+
+ /*
+ * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and
+ * rbi->pkt_buffer_size
+ */
+ if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail)
+ pkt_len = bytes_avail;
+
+ /*
+ * If pkt_offset is invalid, arbitrarily set it to
+ * the size of vmpacket_descriptor
+ */
+ if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len)
+ pkt_offset = sizeof(struct vmpacket_descriptor);
+
+ /* Copy the Hyper-V packet out of the ring buffer */
+ desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer;
+ memcpy(desc_copy, desc, pkt_len);
+
+ /*
+ * Hyper-V could still change len8 and offset8 after the earlier read.
+ * Ensure that desc_copy has legal values for len8 and offset8 that
+ * are consistent with the copy we just made
+ */
+ desc_copy->len8 = pkt_len >> 3;
+ desc_copy->offset8 = pkt_offset >> 3;
- return desc;
+ return desc_copy;
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
@@ -431,7 +493,8 @@ EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
*/
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
- const struct vmpacket_descriptor *desc)
+ const struct vmpacket_descriptor *desc,
+ bool copy)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
u32 packetlen = desc->len8 << 3;
@@ -444,7 +507,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
rbi->priv_read_index -= dsize;
/* more data? */
- return hv_pkt_iter_first(channel);
+ return copy ? hv_pkt_iter_first(channel) : hv_pkt_iter_first_raw(channel);
}
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 87624902ea80..e3675377bc5d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1583,6 +1583,17 @@ config SENSORS_SHT3x
This driver can also be built as a module. If so, the module
will be called sht3x.
+config SENSORS_SHT4x
+ tristate "Sensiron humidity and temperature sensors. SHT4x and compat."
+ depends on I2C
+ select CRC8
+ help
+ If you say yes here you get support for the Sensiron SHT40, SHT41 and
+ SHT45 humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called sht4x.
+
config SENSORS_SHTC1
tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 59e78bc212cf..d712c61c1f5e 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -171,6 +171,7 @@ obj-$(CONFIG_SENSORS_SL28CPLD) += sl28cpld-hwmon.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
obj-$(CONFIG_SENSORS_SHT3x) += sht3x.o
+obj-$(CONFIG_SENSORS_SHT4x) += sht4x.o
obj-$(CONFIG_SENSORS_SHTC1) += shtc1.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMM665) += smm665.o
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
index 3e1d56585b91..74ce5211eb75 100644
--- a/drivers/hwmon/bt1-pvt.c
+++ b/drivers/hwmon/bt1-pvt.c
@@ -924,10 +924,8 @@ static int pvt_request_regs(struct pvt_hwmon *pvt)
}
pvt->regs = devm_ioremap_resource(pvt->dev, res);
- if (IS_ERR(pvt->regs)) {
- dev_err(pvt->dev, "Couldn't map PVT registers\n");
+ if (IS_ERR(pvt->regs))
return PTR_ERR(pvt->regs);
- }
return 0;
}
diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
index 591929ec217a..fa6aa4fc8b52 100644
--- a/drivers/hwmon/corsair-cpro.c
+++ b/drivers/hwmon/corsair-cpro.c
@@ -310,6 +310,7 @@ static int ccp_write(struct device *dev, enum hwmon_sensor_types type,
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 02298b86b57b..731d5117f9f1 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -771,6 +771,16 @@ static int corsairpsu_raw_event(struct hid_device *hdev, struct hid_report *repo
return 0;
}
+#ifdef CONFIG_PM
+static int corsairpsu_resume(struct hid_device *hdev)
+{
+ struct corsairpsu_data *priv = hid_get_drvdata(hdev);
+
+ /* some PSUs turn off the microcontroller during standby, so a reinit is required */
+ return corsairpsu_init(priv);
+}
+#endif
+
static const struct hid_device_id corsairpsu_idtable[] = {
{ HID_USB_DEVICE(0x1b1c, 0x1c03) }, /* Corsair HX550i */
{ HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
@@ -793,6 +803,10 @@ static struct hid_driver corsairpsu_driver = {
.probe = corsairpsu_probe,
.remove = corsairpsu_remove,
.raw_event = corsairpsu_raw_event,
+#ifdef CONFIG_PM
+ .resume = corsairpsu_resume,
+ .reset_resume = corsairpsu_resume,
+#endif
};
module_hid_driver(corsairpsu_driver);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 2970892bed82..f2221ca0aa7b 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
- if (disallow_fan_support && index >= 8)
+ if (disallow_fan_support && index >= 20)
return 0;
if (disallow_fan_type_call &&
- (index == 9 || index == 12 || index == 15))
+ (index == 21 || index == 25 || index == 28))
return 0;
if (index >= 0 && index <= 1 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index fd47ab4e6892..8d3b1dae31df 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -153,8 +153,44 @@ static int hwmon_thermal_get_temp(void *data, int *temp)
return 0;
}
+static int hwmon_thermal_set_trips(void *data, int low, int high)
+{
+ struct hwmon_thermal_data *tdata = data;
+ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
+ const struct hwmon_chip_info *chip = hwdev->chip;
+ const struct hwmon_channel_info **info = chip->info;
+ unsigned int i;
+ int err;
+
+ if (!chip->ops->write)
+ return 0;
+
+ for (i = 0; info[i] && info[i]->type != hwmon_temp; i++)
+ continue;
+
+ if (!info[i])
+ return 0;
+
+ if (info[i]->config[tdata->index] & HWMON_T_MIN) {
+ err = chip->ops->write(tdata->dev, hwmon_temp,
+ hwmon_temp_min, tdata->index, low);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ if (info[i]->config[tdata->index] & HWMON_T_MAX) {
+ err = chip->ops->write(tdata->dev, hwmon_temp,
+ hwmon_temp_max, tdata->index, high);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ return 0;
+}
+
static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
+ .set_trips = hwmon_thermal_set_trips,
};
static void hwmon_thermal_remove_sensor(void *data)
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index c602583d19f3..58d3828e2ec0 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -196,13 +196,11 @@ static inline u32 ina3221_reg_to_interval_us(u16 config)
u32 channels = hweight16(config & INA3221_CONFIG_CHs_EN_MASK);
u32 vbus_ct_idx = INA3221_CONFIG_VBUS_CT(config);
u32 vsh_ct_idx = INA3221_CONFIG_VSH_CT(config);
- u32 samples_idx = INA3221_CONFIG_AVG(config);
- u32 samples = ina3221_avg_samples[samples_idx];
u32 vbus_ct = ina3221_conv_time[vbus_ct_idx];
u32 vsh_ct = ina3221_conv_time[vsh_ct_idx];
/* Calculate total conversion time */
- return channels * (vbus_ct + vsh_ct) * samples;
+ return channels * (vbus_ct + vsh_ct);
}
static inline int ina3221_wait_for_data(struct ina3221_data *ina)
@@ -288,13 +286,14 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
return -ENODATA;
/* Write CONFIG register to trigger a single-shot measurement */
- if (ina->single_shot)
+ if (ina->single_shot) {
regmap_write(ina->regmap, INA3221_CONFIG,
ina->reg_config);
- ret = ina3221_wait_for_data(ina);
- if (ret)
- return ret;
+ ret = ina3221_wait_for_data(ina);
+ if (ret)
+ return ret;
+ }
ret = ina3221_read_value(ina, reg, &regval);
if (ret)
@@ -344,13 +343,14 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
return -ENODATA;
/* Write CONFIG register to trigger a single-shot measurement */
- if (ina->single_shot)
+ if (ina->single_shot) {
regmap_write(ina->regmap, INA3221_CONFIG,
ina->reg_config);
- ret = ina3221_wait_for_data(ina);
- if (ret)
- return ret;
+ ret = ina3221_wait_for_data(ina);
+ if (ret)
+ return ret;
+ }
fallthrough;
case hwmon_curr_crit:
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 40eab3349904..d2a60de5b8de 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -22,10 +22,10 @@
#include <linux/hwmon.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/acpi.h>
#define DRVNAME "lm70"
@@ -148,29 +148,6 @@ static const struct of_device_id lm70_of_ids[] = {
MODULE_DEVICE_TABLE(of, lm70_of_ids);
#endif
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id lm70_acpi_ids[] = {
- {
- .id = "LM000070",
- .driver_data = LM70_CHIP_LM70,
- },
- {
- .id = "TMP00121",
- .driver_data = LM70_CHIP_TMP121,
- },
- {
- .id = "LM000071",
- .driver_data = LM70_CHIP_LM71,
- },
- {
- .id = "LM000074",
- .driver_data = LM70_CHIP_LM74,
- },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
-#endif
-
static int lm70_probe(struct spi_device *spi)
{
struct device *hwmon_dev;
@@ -184,7 +161,7 @@ static int lm70_probe(struct spi_device *spi)
/* signaling is SPI_MODE_0 */
- if (spi->mode & (SPI_CPOL | SPI_CPHA))
+ if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
return -EINVAL;
/* NOTE: we assume 8-bit words, and convert to 16 bits manually */
@@ -217,7 +194,6 @@ static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
.of_match_table = of_match_ptr(lm70_of_ids),
- .acpi_match_table = ACPI_PTR(lm70_acpi_ids),
},
.id_table = lm70_ids,
.probe = lm70_probe,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index e447febd121a..afdbb63237b9 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -50,6 +50,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
tmp75,
tmp75b,
tmp75c,
+ tmp1075,
};
/**
@@ -293,6 +294,13 @@ static const struct lm75_params device_params[] = {
.clr_mask = 1 << 5, /*not one-shot mode*/
.default_resolution = 12,
.default_sample_time = MSEC_PER_SEC / 12,
+ },
+ [tmp1075] = { /* not one-shot mode, 27.5 ms sample rate */
+ .clr_mask = 1 << 5 | 1 << 6 | 1 << 7,
+ .default_resolution = 12,
+ .default_sample_time = 28,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
}
};
@@ -662,6 +670,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tmp75", tmp75, },
{ "tmp75b", tmp75b, },
{ "tmp75c", tmp75c, },
+ { "tmp1075", tmp1075, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm75_ids);
@@ -771,6 +780,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.compatible = "ti,tmp75c",
.data = (void *)tmp75c
},
+ {
+ .compatible = "ti,tmp1075",
+ .data = (void *)tmp1075
+ },
{ },
};
MODULE_DEVICE_TABLE(of, lm75_of_match);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index ebbfd5f352c0..567b7c521f38 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -465,6 +465,7 @@ enum lm90_temp11_reg_index {
struct lm90_data {
struct i2c_client *client;
+ struct device *hwmon_dev;
u32 channel_config[4];
struct hwmon_channel_info temp_info;
const struct hwmon_channel_info *info[3];
@@ -1028,8 +1029,11 @@ static int lm90_set_temp11(struct lm90_data *data, int index, long val)
int err;
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index <= 2)
+ if (data->kind == lm99 && index <= 2) {
+ /* prevent integer underflow */
+ val = max(val, -128000l);
val -= 16000;
+ }
if (data->kind == adt7461 || data->kind == tmp451)
data->temp11[index] = temp_to_u16_adt7461(data, val);
@@ -1088,8 +1092,11 @@ static int lm90_set_temp8(struct lm90_data *data, int index, long val)
int err;
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index == 3)
+ if (data->kind == lm99 && index == 3) {
+ /* prevent integer underflow */
+ val = max(val, -128000l);
val -= 16000;
+ }
if (data->kind == adt7461 || data->kind == tmp451)
data->temp8[index] = temp_to_u8_adt7461(data, val);
@@ -1136,6 +1143,9 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
else
temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
+ /* prevent integer underflow */
+ val = max(val, -128000l);
+
data->temp_hyst = hyst_to_reg(temp - val);
err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
data->temp_hyst);
@@ -1703,6 +1713,13 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
if (data->kind == max6696)
config &= ~0x08;
+ /*
+ * Interrupt is enabled by default on reset, but it may be disabled
+ * by bootloader, unmask it.
+ */
+ if (client->irq)
+ config &= ~0x80;
+
config &= 0xBF; /* run */
lm90_update_confreg(data, config);
@@ -1731,22 +1748,41 @@ static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) ||
(st2 & MAX6696_STATUS2_LOT2))
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 1);
+ dev_dbg(&client->dev,
+ "temp%d out of range, please check!\n", 1);
if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) ||
(st2 & MAX6696_STATUS2_ROT2))
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 2);
+ dev_dbg(&client->dev,
+ "temp%d out of range, please check!\n", 2);
if (st & LM90_STATUS_ROPEN)
- dev_warn(&client->dev,
- "temp%d diode open, please check!\n", 2);
+ dev_dbg(&client->dev,
+ "temp%d diode open, please check!\n", 2);
if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH |
MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2))
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 3);
+ dev_dbg(&client->dev,
+ "temp%d out of range, please check!\n", 3);
if (st2 & MAX6696_STATUS2_R2OPEN)
- dev_warn(&client->dev,
- "temp%d diode open, please check!\n", 3);
+ dev_dbg(&client->dev,
+ "temp%d diode open, please check!\n", 3);
+
+ if (st & LM90_STATUS_LLOW)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_min, 0);
+ if (st & LM90_STATUS_RLOW)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_min, 1);
+ if (st2 & MAX6696_STATUS2_R2LOW)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_min, 2);
+ if (st & LM90_STATUS_LHIGH)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_max, 0);
+ if (st & LM90_STATUS_RHIGH)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_max, 1);
+ if (st2 & MAX6696_STATUS2_R2HIGH)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_max, 2);
return true;
}
@@ -1904,12 +1940,13 @@ static int lm90_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
+ data->hwmon_dev = hwmon_dev;
+
if (client->irq) {
dev_dbg(dev, "IRQ: %d\n", client->irq);
err = devm_request_threaded_irq(dev, client->irq,
NULL, lm90_irq_thread,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "lm90", client);
+ IRQF_ONESHOT, "lm90", client);
if (err < 0) {
dev_err(dev, "cannot request IRQ %d\n", client->irq);
return err;
@@ -1941,15 +1978,40 @@ static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type,
lm90_update_confreg(data, data->config | 0x80);
}
} else {
- dev_info(&client->dev, "Everything OK\n");
+ dev_dbg(&client->dev, "Everything OK\n");
}
}
+static int __maybe_unused lm90_suspend(struct device *dev)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ if (client->irq)
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused lm90_resume(struct device *dev)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(lm90_pm_ops, lm90_suspend, lm90_resume);
+
static struct i2c_driver lm90_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm90",
.of_match_table = of_match_ptr(lm90_of_match),
+ .pm = &lm90_pm_ops,
},
.probe_new = lm90_probe,
.alert = lm90_alert,
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 062eceb7be0d..613338cbcb17 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -6,7 +6,6 @@
* Copyright (c) 2016, Intel Corporation.
*/
-#include <linux/acpi.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
@@ -133,20 +132,12 @@ static const struct spi_device_id max31722_spi_id[] = {
{"max31723", 0},
{}
};
-
-static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = {
- {"MAX31722", 0},
- {"MAX31723", 0},
- {}
-};
-
MODULE_DEVICE_TABLE(spi, max31722_spi_id);
static struct spi_driver max31722_driver = {
.driver = {
.name = "max31722",
.pm = &max31722_pm_ops,
- .acpi_match_table = ACPI_PTR(max31722_acpi_id),
},
.probe = max31722_probe,
.remove = max31722_remove,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 86e6c71db685..7e9362f6dc29 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -27,6 +27,7 @@
/* Fan Config register bits */
#define MAX31790_FAN_CFG_RPM_MODE 0x80
+#define MAX31790_FAN_CFG_CTRL_MON 0x10
#define MAX31790_FAN_CFG_TACH_INPUT_EN 0x08
#define MAX31790_FAN_CFG_TACH_INPUT 0x01
@@ -39,6 +40,8 @@
#define FAN_RPM_MIN 120
#define FAN_RPM_MAX 7864320
+#define FAN_COUNT_REG_MAX 0xffe0
+
#define RPM_FROM_REG(reg, sr) (((reg) >> 4) ? \
((60 * (sr) * 8192) / ((reg) >> 4)) : \
FAN_RPM_MAX)
@@ -79,7 +82,7 @@ static struct max31790_data *max31790_update_device(struct device *dev)
MAX31790_REG_FAN_FAULT_STATUS1);
if (rv < 0)
goto abort;
- data->fault_status = rv & 0x3F;
+ data->fault_status |= rv & 0x3F;
rv = i2c_smbus_read_byte_data(client,
MAX31790_REG_FAN_FAULT_STATUS2);
@@ -104,7 +107,7 @@ static struct max31790_data *max31790_update_device(struct device *dev)
data->tach[NR_CHANNEL + i] = rv;
} else {
rv = i2c_smbus_read_word_swapped(client,
- MAX31790_REG_PWMOUT(i));
+ MAX31790_REG_PWM_DUTY_CYCLE(i));
if (rv < 0)
goto abort;
data->pwm[i] = rv;
@@ -170,8 +173,11 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
switch (attr) {
case hwmon_fan_input:
- sr = get_tach_period(data->fan_dynamics[channel]);
- rpm = RPM_FROM_REG(data->tach[channel], sr);
+ sr = get_tach_period(data->fan_dynamics[channel % NR_CHANNEL]);
+ if (data->tach[channel] == FAN_COUNT_REG_MAX)
+ rpm = 0;
+ else
+ rpm = RPM_FROM_REG(data->tach[channel], sr);
*val = rpm;
return 0;
case hwmon_fan_target:
@@ -180,7 +186,21 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
*val = rpm;
return 0;
case hwmon_fan_fault:
+ mutex_lock(&data->update_lock);
*val = !!(data->fault_status & (1 << channel));
+ data->fault_status &= ~(1 << channel);
+ /*
+ * If a fault bit is set, we need to write into one of the fan
+ * configuration registers to clear it. Note that this also
+ * clears the fault for the companion channel if enabled.
+ */
+ if (*val) {
+ int reg = MAX31790_REG_TARGET_COUNT(channel % NR_CHANNEL);
+
+ i2c_smbus_write_byte_data(data->client, reg,
+ data->target_count[channel % NR_CHANNEL] >> 8);
+ }
+ mutex_unlock(&data->update_lock);
return 0;
default:
return -EOPNOTSUPP;
@@ -271,12 +291,12 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
*val = data->pwm[channel] >> 8;
return 0;
case hwmon_pwm_enable:
- if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
+ if (fan_config & MAX31790_FAN_CFG_CTRL_MON)
+ *val = 0;
+ else if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
*val = 2;
- else if (fan_config & MAX31790_FAN_CFG_TACH_INPUT_EN)
- *val = 1;
else
- *val = 0;
+ *val = 1;
return 0;
default:
return -EOPNOTSUPP;
@@ -299,31 +319,41 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
err = -EINVAL;
break;
}
- data->pwm[channel] = val << 8;
+ data->valid = false;
err = i2c_smbus_write_word_swapped(client,
MAX31790_REG_PWMOUT(channel),
- data->pwm[channel]);
+ val << 8);
break;
case hwmon_pwm_enable:
fan_config = data->fan_config[channel];
if (val == 0) {
- fan_config &= ~(MAX31790_FAN_CFG_TACH_INPUT_EN |
- MAX31790_FAN_CFG_RPM_MODE);
+ fan_config |= MAX31790_FAN_CFG_CTRL_MON;
+ /*
+ * Disable RPM mode; otherwise disabling fan speed
+ * monitoring is not possible.
+ */
+ fan_config &= ~MAX31790_FAN_CFG_RPM_MODE;
} else if (val == 1) {
- fan_config = (fan_config |
- MAX31790_FAN_CFG_TACH_INPUT_EN) &
- ~MAX31790_FAN_CFG_RPM_MODE;
+ fan_config &= ~(MAX31790_FAN_CFG_CTRL_MON | MAX31790_FAN_CFG_RPM_MODE);
} else if (val == 2) {
- fan_config |= MAX31790_FAN_CFG_TACH_INPUT_EN |
- MAX31790_FAN_CFG_RPM_MODE;
+ fan_config &= ~MAX31790_FAN_CFG_CTRL_MON;
+ /*
+ * The chip sets MAX31790_FAN_CFG_TACH_INPUT_EN on its
+ * own if MAX31790_FAN_CFG_RPM_MODE is set.
+ * Do it here as well to reflect the actual register
+ * value in the cache.
+ */
+ fan_config |= (MAX31790_FAN_CFG_RPM_MODE | MAX31790_FAN_CFG_TACH_INPUT_EN);
} else {
err = -EINVAL;
break;
}
- data->fan_config[channel] = fan_config;
- err = i2c_smbus_write_byte_data(client,
- MAX31790_REG_FAN_CONFIG(channel),
- fan_config);
+ if (fan_config != data->fan_config[channel]) {
+ err = i2c_smbus_write_byte_data(client, MAX31790_REG_FAN_CONFIG(channel),
+ fan_config);
+ if (!err)
+ data->fan_config[channel] = fan_config;
+ }
break;
default:
err = -EOPNOTSUPP;
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index 367855d5edae..7821132e17fa 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -156,7 +156,7 @@ max6621_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
default:
break;
}
-
+ break;
default:
break;
}
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8587189c7f15..18fd6f12ca16 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -8,7 +8,6 @@
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <linux/math64.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -17,9 +16,6 @@
#include <linux/platform_data/ntc_thermistor.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/machine.h>
-#include <linux/iio/driver.h>
#include <linux/iio/consumer.h>
#include <linux/hwmon.h>
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 37a5c39784fa..ffb609cee3a4 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -19,9 +19,10 @@ config SENSORS_PMBUS
default y
help
If you say yes here you get hardware monitoring support for generic
- PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
- MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400,
- TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
+ PMBus devices, including but not limited to ADP4000, BMR310, BMR453,
+ BMR454, BMR456, BMR457, BMR458, BMR480, BMR490, BMR491, BMR492,
+ MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012,
+ TPS40400, TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -85,6 +86,15 @@ config SENSORS_IBM_CFFPS
This driver can also be built as a module. If so, the module will
be called ibm-cffps.
+config SENSORS_DPS920AB
+ tristate "Delta DPS920AB Power Supply"
+ help
+ If you say yes here you get hardware monitoring support for Delta
+ DPS920AB Power Supplies.
+
+ This driver can also be built as a module. If so, the module will
+ be called dps920ab.
+
config SENSORS_INSPUR_IPSPS
tristate "INSPUR Power System Power Supply"
help
@@ -248,6 +258,15 @@ config SENSORS_MAX8688
This driver can also be built as a module. If so, the module will
be called max8688.
+config SENSORS_MP2888
+ tristate "MPS MP2888"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2888 Digital, Multi-Phase, Pulse-Width Modulation Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2888.
+
config SENSORS_MP2975
tristate "MPS MP2975"
help
@@ -257,6 +276,15 @@ config SENSORS_MP2975
This driver can also be built as a module. If so, the module will
be called mp2975.
+config SENSORS_PIM4328
+ tristate "Flex PIM4328 and compatibles"
+ help
+ If you say yes here you get hardware monitoring support for Flex
+ PIM4328, PIM4820 and PIM4006 Power Interface Modules.
+
+ This driver can also be built as a module. If so, the module will
+ be called pim4328.
+
config SENSORS_PM6764TR
tristate "ST PM6764TR"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index f8dcc27cd56a..0ed4d596a948 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_BPA_RS600) += bpa-rs600.o
obj-$(CONFIG_SENSORS_FSP_3Y) += fsp-3y.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
+obj-$(CONFIG_SENSORS_DPS920AB) += dps920ab.o
obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
obj-$(CONFIG_SENSORS_IR36021) += ir36021.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
@@ -39,3 +41,4 @@ obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
+obj-$(CONFIG_SENSORS_PIM4328) += pim4328.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 980a3850b2f3..d311e0557401 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -611,11 +611,13 @@ static int adm1275_probe(struct i2c_client *client)
tindex = 8;
info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
- PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
- /* Enable VOUT if not enabled (it is disabled by default) */
- if (!(config & ADM1278_VOUT_EN)) {
- config |= ADM1278_VOUT_EN;
+ /* Enable VOUT & TEMP1 if not enabled (disabled by default) */
+ if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) !=
+ (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) {
+ config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN;
ret = i2c_smbus_write_byte_data(client,
ADM1275_PMON_CONFIG,
config);
@@ -625,10 +627,6 @@ static int adm1275_probe(struct i2c_client *client)
return -ENODEV;
}
}
-
- if (config & ADM1278_TEMP1_EN)
- info->func[0] |=
- PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
if (config & ADM1278_VIN_EN)
info->func[0] |= PMBUS_HAVE_VIN;
break;
diff --git a/drivers/hwmon/pmbus/bpa-rs600.c b/drivers/hwmon/pmbus/bpa-rs600.c
index f6558ee9dec3..2be69fedfa36 100644
--- a/drivers/hwmon/pmbus/bpa-rs600.c
+++ b/drivers/hwmon/pmbus/bpa-rs600.c
@@ -46,6 +46,32 @@ static int bpa_rs600_read_byte_data(struct i2c_client *client, int page, int reg
return ret;
}
+/*
+ * The BPA-RS600 violates the PMBus spec. Specifically it treats the
+ * mantissa as unsigned. Deal with this here to allow the PMBus core
+ * to work with correctly encoded data.
+ */
+static int bpa_rs600_read_vin(struct i2c_client *client)
+{
+ int ret, exponent, mantissa;
+
+ ret = pmbus_read_word_data(client, 0, 0xff, PMBUS_READ_VIN);
+ if (ret < 0)
+ return ret;
+
+ if (ret & BIT(10)) {
+ exponent = ret >> 11;
+ mantissa = ret & 0x7ff;
+
+ exponent++;
+ mantissa >>= 1;
+
+ ret = (exponent << 11) | mantissa;
+ }
+
+ return ret;
+}
+
static int bpa_rs600_read_word_data(struct i2c_client *client, int page, int phase, int reg)
{
int ret;
@@ -85,6 +111,9 @@ static int bpa_rs600_read_word_data(struct i2c_client *client, int page, int pha
/* These commands return data but it is invalid/un-documented */
ret = -ENXIO;
break;
+ case PMBUS_READ_VIN:
+ ret = bpa_rs600_read_vin(client);
+ break;
default:
if (reg >= PMBUS_VIRT_BASE)
ret = -ENXIO;
diff --git a/drivers/hwmon/pmbus/dps920ab.c b/drivers/hwmon/pmbus/dps920ab.c
new file mode 100644
index 000000000000..d3941f6eb29a
--- /dev/null
+++ b/drivers/hwmon/pmbus/dps920ab.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Delta DPS920AB PSU
+ *
+ * Copyright (C) 2021 Delta Networks, Inc.
+ * Copyright (C) 2021 Sartura Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+struct dps920ab_data {
+ char *mfr_model;
+ char *mfr_id;
+};
+
+static int dps920ab_read_word_data(struct i2c_client *client, int page, int phase, int reg)
+{
+ /*
+ * This masks commands which are not supported.
+ * PSU advertises that all features are supported,
+ * in reality that unfortunately is not true.
+ * So enable only those that the datasheet confirms.
+ */
+ switch (reg) {
+ case PMBUS_FAN_COMMAND_1:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_STATUS_WORD:
+ case PMBUS_READ_VIN:
+ case PMBUS_READ_IIN:
+ case PMBUS_READ_VOUT:
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ case PMBUS_READ_TEMPERATURE_2:
+ case PMBUS_READ_TEMPERATURE_3:
+ case PMBUS_READ_FAN_SPEED_1:
+ case PMBUS_READ_POUT:
+ case PMBUS_READ_PIN:
+ case PMBUS_MFR_VOUT_MIN:
+ case PMBUS_MFR_VOUT_MAX:
+ case PMBUS_MFR_IOUT_MAX:
+ case PMBUS_MFR_POUT_MAX:
+ return pmbus_read_word_data(client, page, phase, reg);
+ default:
+ return -ENXIO;
+ }
+}
+
+static int dps920ab_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ /*
+ * This masks commands which are not supported.
+ * PSU only has one R/W register and that is
+ * for the fan.
+ */
+ switch (reg) {
+ case PMBUS_FAN_COMMAND_1:
+ return pmbus_write_word_data(client, page, reg, word);
+ default:
+ return -EACCES;
+ }
+}
+
+static struct pmbus_driver_info dps920ab_info = {
+ .pages = 1,
+
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_FAN] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+
+ .func[0] =
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = dps920ab_read_word_data,
+ .write_word_data = dps920ab_write_word_data,
+};
+
+static int dps920ab_mfr_id_show(struct seq_file *s, void *data)
+{
+ struct dps920ab_data *priv = s->private;
+
+ seq_printf(s, "%s\n", priv->mfr_id);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dps920ab_mfr_id);
+
+static int dps920ab_mfr_model_show(struct seq_file *s, void *data)
+{
+ struct dps920ab_data *priv = s->private;
+
+ seq_printf(s, "%s\n", priv->mfr_model);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dps920ab_mfr_model);
+
+static void dps920ab_init_debugfs(struct dps920ab_data *data, struct i2c_client *client)
+{
+ struct dentry *debugfs_dir;
+ struct dentry *root;
+
+ root = pmbus_get_debugfs_dir(client);
+ if (!root)
+ return;
+
+ debugfs_dir = debugfs_create_dir(client->name, root);
+
+ debugfs_create_file("mfr_id",
+ 0400,
+ debugfs_dir,
+ data,
+ &dps920ab_mfr_id_fops);
+
+ debugfs_create_file("mfr_model",
+ 0400,
+ debugfs_dir,
+ data,
+ &dps920ab_mfr_model_fops);
+}
+
+static int dps920ab_probe(struct i2c_client *client)
+{
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ struct dps920ab_data *data;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+
+ buf[ret] = '\0';
+ if (ret != 5 || strncmp(buf, "DELTA", 5)) {
+ buf[ret] = '\0';
+ dev_err(&client->dev, "Unsupported Manufacturer ID '%s'\n", buf);
+ return -ENODEV;
+ }
+ data->mfr_id = devm_kstrdup(&client->dev, buf, GFP_KERNEL);
+ if (!data->mfr_id)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+
+ buf[ret] = '\0';
+ if (ret != 11 || strncmp(buf, "DPS-920AB", 9)) {
+ dev_err(&client->dev, "Unsupported Manufacturer Model '%s'\n", buf);
+ return -ENODEV;
+ }
+ data->mfr_model = devm_kstrdup(&client->dev, buf, GFP_KERNEL);
+ if (!data->mfr_model)
+ return -ENOMEM;
+
+ ret = pmbus_do_probe(client, &dps920ab_info);
+ if (ret)
+ return ret;
+
+ dps920ab_init_debugfs(data, client);
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused dps920ab_of_match[] = {
+ { .compatible = "delta,dps920ab", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, dps920ab_of_match);
+
+static struct i2c_driver dps920ab_driver = {
+ .driver = {
+ .name = "dps920ab",
+ .of_match_table = of_match_ptr(dps920ab_of_match),
+ },
+ .probe_new = dps920ab_probe,
+};
+
+module_i2c_driver(dps920ab_driver);
+
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_DESCRIPTION("PMBus driver for Delta DPS920AB PSU");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c
index e24842475254..aec294cc72d1 100644
--- a/drivers/hwmon/pmbus/fsp-3y.c
+++ b/drivers/hwmon/pmbus/fsp-3y.c
@@ -37,6 +37,8 @@ struct fsp3y_data {
struct pmbus_driver_info info;
int chip;
int page;
+
+ bool vout_linear_11;
};
#define to_fsp3y_data(x) container_of(x, struct fsp3y_data, info)
@@ -108,11 +110,9 @@ static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
int rv;
/*
- * YH5151-E outputs vout in linear11. The conversion is done when
- * reading. Here, we have to inject pmbus_core with the correct
- * exponent (it is -6).
+ * Inject an exponent for non-compliant YH5151-E.
*/
- if (data->chip == yh5151e && reg == PMBUS_VOUT_MODE)
+ if (data->vout_linear_11 && reg == PMBUS_VOUT_MODE)
return 0x1A;
rv = set_page(client, page);
@@ -161,10 +161,9 @@ static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase,
return rv;
/*
- * YH-5151E is non-compliant and outputs output voltages in linear11
- * instead of linear16.
+ * Handle YH-5151E non-compliant linear11 vout voltage.
*/
- if (data->chip == yh5151e && reg == PMBUS_READ_VOUT)
+ if (data->vout_linear_11 && reg == PMBUS_READ_VOUT)
rv = sign_extend32(rv, 10) & 0xffff;
return rv;
@@ -256,6 +255,25 @@ static int fsp3y_probe(struct i2c_client *client)
data->info = fsp3y_info[data->chip];
+ /*
+ * YH-5151E sometimes reports vout in linear11 and sometimes in
+ * linear16. This depends on the exact individual piece of hardware. One
+ * YH-5151E can use linear16 and another might use linear11 instead.
+ *
+ * The format can be recognized by reading VOUT_MODE - if it doesn't
+ * report a valid exponent, then vout uses linear11. Otherwise, the
+ * device is compliant and uses linear16.
+ */
+ data->vout_linear_11 = false;
+ if (data->chip == yh5151e) {
+ rv = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+ if (rv < 0)
+ return rv;
+
+ if (rv == 0xFF)
+ data->vout_linear_11 = true;
+ }
+
return pmbus_do_probe(client, &data->info);
}
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 40597a9e799f..1a8caff1ac5f 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
info->read_word_data = raa_dmpvr2_read_word_data;
break;
case raa_dmpvr2_2rail_nontc:
- info->func[0] &= ~PMBUS_HAVE_TEMP;
- info->func[1] &= ~PMBUS_HAVE_TEMP;
+ info->func[0] &= ~PMBUS_HAVE_TEMP3;
+ info->func[1] &= ~PMBUS_HAVE_TEMP3;
fallthrough;
case raa_dmpvr2_2rail:
info->pages = 2;
diff --git a/drivers/hwmon/pmbus/mp2888.c b/drivers/hwmon/pmbus/mp2888.c
new file mode 100644
index 000000000000..8ecd4adfef40
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2888.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers
+ *
+ * Copyright (C) 2020 Nvidia Technologies Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+/* Vendor specific registers. */
+#define MP2888_MFR_SYS_CONFIG 0x44
+#define MP2888_MFR_READ_CS1_2 0x73
+#define MP2888_MFR_READ_CS3_4 0x74
+#define MP2888_MFR_READ_CS5_6 0x75
+#define MP2888_MFR_READ_CS7_8 0x76
+#define MP2888_MFR_READ_CS9_10 0x77
+#define MP2888_MFR_VR_CONFIG1 0xe1
+
+#define MP2888_TOTAL_CURRENT_RESOLUTION BIT(3)
+#define MP2888_PHASE_CURRENT_RESOLUTION BIT(4)
+#define MP2888_DRMOS_KCS GENMASK(2, 0)
+#define MP2888_TEMP_UNIT 10
+#define MP2888_MAX_PHASE 10
+
+struct mp2888_data {
+ struct pmbus_driver_info info;
+ int total_curr_resolution;
+ int phase_curr_resolution;
+ int curr_sense_gain;
+};
+
+#define to_mp2888_data(x) container_of(x, struct mp2888_data, info)
+
+static int mp2888_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /* Enforce VOUT direct format. */
+ return PB_VOUT_MODE_DIRECT;
+ default:
+ return -ENODATA;
+ }
+}
+
+static int
+mp2888_current_sense_gain_and_resolution_get(struct i2c_client *client, struct mp2888_data *data)
+{
+ int ret;
+
+ /*
+ * Obtain DrMOS current sense gain of power stage from the register
+ * , bits 0-2. The value is selected as below:
+ * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A. Other
+ * values are reserved.
+ */
+ ret = i2c_smbus_read_word_data(client, MP2888_MFR_SYS_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & MP2888_DRMOS_KCS) {
+ case 0:
+ data->curr_sense_gain = 85;
+ break;
+ case 1:
+ data->curr_sense_gain = 97;
+ break;
+ case 2:
+ data->curr_sense_gain = 100;
+ break;
+ case 3:
+ data->curr_sense_gain = 50;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Obtain resolution selector for total and phase current report and protection.
+ * 0: original resolution; 1: half resolution (in such case phase current value should
+ * be doubled.
+ */
+ data->total_curr_resolution = (ret & MP2888_TOTAL_CURRENT_RESOLUTION) >> 3;
+ data->phase_curr_resolution = (ret & MP2888_PHASE_CURRENT_RESOLUTION) >> 4;
+
+ return 0;
+}
+
+static int
+mp2888_read_phase(struct i2c_client *client, struct mp2888_data *data, int page, int phase, u8 reg)
+{
+ int ret;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (!((phase + 1) % 2))
+ ret >>= 8;
+ ret &= 0xff;
+
+ /*
+ * Output value is calculated as: (READ_CSx / 80 – 1.23) / (Kcs * Rcs)
+ * where:
+ * - Kcs is the DrMOS current sense gain of power stage, which is obtained from the
+ * register MP2888_MFR_VR_CONFIG1, bits 13-12 with the following selection of DrMOS
+ * (data->curr_sense_gain):
+ * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A.
+ * - Rcs is the internal phase current sense resistor. This parameter depends on hardware
+ * assembly. By default it is set to 1kΩ. In case of different assembly, user should
+ * scale this parameter by dividing it by Rcs.
+ * If phase current resolution bit is set to 1, READ_CSx value should be doubled.
+ * Note, that current phase sensing, providing by the device is not accurate. This is
+ * because sampling of current occurrence of bit weight has a big deviation, especially for
+ * light load.
+ */
+ ret = DIV_ROUND_CLOSEST(ret * 100 - 9800, data->curr_sense_gain);
+ ret = (data->phase_curr_resolution) ? ret * 2 : ret;
+ /* Scale according to total current resolution. */
+ ret = (data->total_curr_resolution) ? ret * 8 : ret * 4;
+ return ret;
+}
+
+static int
+mp2888_read_phases(struct i2c_client *client, struct mp2888_data *data, int page, int phase)
+{
+ int ret;
+
+ switch (phase) {
+ case 0 ... 1:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS1_2);
+ break;
+ case 2 ... 3:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS3_4);
+ break;
+ case 4 ... 5:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS5_6);
+ break;
+ case 6 ... 7:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS7_8);
+ break;
+ case 8 ... 9:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS9_10);
+ break;
+ default:
+ return -ENODATA;
+ }
+ return ret;
+}
+
+static int mp2888_read_word_data(struct i2c_client *client, int page, int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2888_data *data = to_mp2888_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_READ_VIN:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret <= 0)
+ return ret;
+
+ /*
+ * READ_VIN requires fixup to scale it to linear11 format. Register data format
+ * provides 10 bits for mantissa and 6 bits for exponent. Bits 15:10 are set with
+ * the fixed value 111011b.
+ */
+ ret = (ret & GENMASK(9, 0)) | ((ret & GENMASK(31, 10)) << 1);
+ break;
+ case PMBUS_OT_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * Chip reports limits in degrees C, but the actual temperature in 10th of
+ * degrees C - scaling is needed to match both.
+ */
+ ret *= MP2888_TEMP_UNIT;
+ break;
+ case PMBUS_READ_IOUT:
+ if (phase != 0xff)
+ return mp2888_read_phases(client, data, page, phase);
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * READ_IOUT register has unused bits 15:12 with fixed value 1110b. Clear these
+ * bits and scale with total current resolution. Data is provided in direct format.
+ */
+ ret &= GENMASK(11, 0);
+ ret = data->total_curr_resolution ? ret * 2 : ret;
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ ret &= GENMASK(9, 0);
+ /*
+ * Chip reports limits with resolution 1A or 2A, if total current resolution bit is
+ * set 1. Actual current is reported with 0.25A or respectively 0.5A resolution.
+ * Scaling is needed to match both.
+ */
+ ret = data->total_curr_resolution ? ret * 8 : ret * 4;
+ break;
+ case PMBUS_READ_POUT:
+ case PMBUS_READ_PIN:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ ret = data->total_curr_resolution ? ret * 2 : ret;
+ break;
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * Chip reports limits with resolution 1W or 2W, if total current resolution bit is
+ * set 1. Actual power is reported with 0.5W or 1W respectively resolution. Scaling
+ * is needed to match both.
+ */
+ ret = data->total_curr_resolution ? ret * 4 : ret * 2;
+ break;
+ /*
+ * The below registers are not implemented by device or implemented not according to the
+ * spec. Skip all of them to avoid exposing non-relevant inputs to sysfs.
+ */
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_UT_WARN_LIMIT:
+ case PMBUS_UT_FAULT_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_VOUT_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_IOUT_OC_LV_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_POUT_MAX:
+ case PMBUS_IOUT_UC_FAULT_LIMIT:
+ case PMBUS_POUT_OP_FAULT_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ case PMBUS_MFR_VIN_MIN:
+ case PMBUS_MFR_VOUT_MIN:
+ case PMBUS_MFR_VIN_MAX:
+ case PMBUS_MFR_VOUT_MAX:
+ case PMBUS_MFR_IIN_MAX:
+ case PMBUS_MFR_IOUT_MAX:
+ case PMBUS_MFR_PIN_MAX:
+ case PMBUS_MFR_POUT_MAX:
+ case PMBUS_MFR_MAX_TEMP_1:
+ return -ENXIO;
+ default:
+ return -ENODATA;
+ }
+
+ return ret;
+}
+
+static int mp2888_write_word_data(struct i2c_client *client, int page, int reg, u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2888_data *data = to_mp2888_data(info);
+
+ switch (reg) {
+ case PMBUS_OT_WARN_LIMIT:
+ word = DIV_ROUND_CLOSEST(word, MP2888_TEMP_UNIT);
+ /* Drop unused bits 15:8. */
+ word = clamp_val(word, 0, GENMASK(7, 0));
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ /* Fix limit according to total curent resolution. */
+ word = data->total_curr_resolution ? DIV_ROUND_CLOSEST(word, 8) :
+ DIV_ROUND_CLOSEST(word, 4);
+ /* Drop unused bits 15:10. */
+ word = clamp_val(word, 0, GENMASK(9, 0));
+ break;
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ /* Fix limit according to total curent resolution. */
+ word = data->total_curr_resolution ? DIV_ROUND_CLOSEST(word, 4) :
+ DIV_ROUND_CLOSEST(word, 2);
+ /* Drop unused bits 15:10. */
+ word = clamp_val(word, 0, GENMASK(9, 0));
+ break;
+ default:
+ return -ENODATA;
+ }
+ return pmbus_write_word_data(client, page, reg, word);
+}
+
+static int
+mp2888_identify_multiphase(struct i2c_client *client, struct mp2888_data *data,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify multiphase number - could be from 1 to 10. */
+ ret = i2c_smbus_read_word_data(client, MP2888_MFR_VR_CONFIG1);
+ if (ret <= 0)
+ return ret;
+
+ info->phases[0] = ret & GENMASK(3, 0);
+
+ /*
+ * The device provides a total of 10 PWM pins, and can be configured to different phase
+ * count applications for rail.
+ */
+ if (info->phases[0] > MP2888_MAX_PHASE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct pmbus_driver_info mp2888_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 1,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 4,
+ .m[PSC_POWER] = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_PHASE_VIRTUAL,
+ .pfunc[0] = PMBUS_HAVE_IOUT,
+ .pfunc[1] = PMBUS_HAVE_IOUT,
+ .pfunc[2] = PMBUS_HAVE_IOUT,
+ .pfunc[3] = PMBUS_HAVE_IOUT,
+ .pfunc[4] = PMBUS_HAVE_IOUT,
+ .pfunc[5] = PMBUS_HAVE_IOUT,
+ .pfunc[6] = PMBUS_HAVE_IOUT,
+ .pfunc[7] = PMBUS_HAVE_IOUT,
+ .pfunc[8] = PMBUS_HAVE_IOUT,
+ .pfunc[9] = PMBUS_HAVE_IOUT,
+ .read_byte_data = mp2888_read_byte_data,
+ .read_word_data = mp2888_read_word_data,
+ .write_word_data = mp2888_write_word_data,
+};
+
+static int mp2888_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp2888_data *data;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp2888_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp2888_info, sizeof(*info));
+ info = &data->info;
+
+ /* Identify multiphase configuration. */
+ ret = mp2888_identify_multiphase(client, data, info);
+ if (ret)
+ return ret;
+
+ /* Obtain current sense gain of power stage and current resolution. */
+ ret = mp2888_current_sense_gain_and_resolution_get(client, data);
+ if (ret)
+ return ret;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id mp2888_id[] = {
+ {"mp2888", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mp2888_id);
+
+static const struct of_device_id __maybe_unused mp2888_of_match[] = {
+ {.compatible = "mps,mp2888"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2888_of_match);
+
+static struct i2c_driver mp2888_driver = {
+ .driver = {
+ .name = "mp2888",
+ .of_match_table = of_match_ptr(mp2888_of_match),
+ },
+ .probe_new = mp2888_probe,
+ .id_table = mp2888_id,
+};
+
+module_i2c_driver(mp2888_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@nvidia.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2888 device");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/pim4328.c b/drivers/hwmon/pmbus/pim4328.c
new file mode 100644
index 000000000000..273ff6e57654
--- /dev/null
+++ b/drivers/hwmon/pmbus/pim4328.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for PIM4006, PIM4328 and PIM4820
+ *
+ * Copyright (c) 2021 Flextronics International Sweden AB
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include <linux/slab.h>
+#include "pmbus.h"
+
+enum chips { pim4006, pim4328, pim4820 };
+
+struct pim4328_data {
+ enum chips id;
+ struct pmbus_driver_info info;
+};
+
+#define to_pim4328_data(x) container_of(x, struct pim4328_data, info)
+
+/* PIM4006 and PIM4328 */
+#define PIM4328_MFR_READ_VINA 0xd3
+#define PIM4328_MFR_READ_VINB 0xd4
+
+/* PIM4006 */
+#define PIM4328_MFR_READ_IINA 0xd6
+#define PIM4328_MFR_READ_IINB 0xd7
+#define PIM4328_MFR_FET_CHECKSTATUS 0xd9
+
+/* PIM4328 */
+#define PIM4328_MFR_STATUS_BITS 0xd5
+
+/* PIM4820 */
+#define PIM4328_MFR_READ_STATUS 0xd0
+
+static const struct i2c_device_id pim4328_id[] = {
+ {"bmr455", pim4328},
+ {"pim4006", pim4006},
+ {"pim4106", pim4006},
+ {"pim4206", pim4006},
+ {"pim4306", pim4006},
+ {"pim4328", pim4328},
+ {"pim4406", pim4006},
+ {"pim4820", pim4820},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pim4328_id);
+
+static int pim4328_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ if (page > 0)
+ return -ENXIO;
+
+ if (phase == 0xff)
+ return -ENODATA;
+
+ switch (reg) {
+ case PMBUS_READ_VIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ phase == 0 ? PIM4328_MFR_READ_VINA
+ : PIM4328_MFR_READ_VINB);
+ break;
+ case PMBUS_READ_IIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ phase == 0 ? PIM4328_MFR_READ_IINA
+ : PIM4328_MFR_READ_IINB);
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ return ret;
+}
+
+static int pim4328_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct pim4328_data *data = to_pim4328_data(info);
+ int ret, status;
+
+ if (page > 0)
+ return -ENXIO;
+
+ switch (reg) {
+ case PMBUS_STATUS_BYTE:
+ ret = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+ if (ret < 0)
+ return ret;
+ if (data->id == pim4006) {
+ status = pmbus_read_word_data(client, page, 0xff,
+ PIM4328_MFR_FET_CHECKSTATUS);
+ if (status < 0)
+ return status;
+ if (status & 0x0630) /* Input UV */
+ ret |= PB_STATUS_VIN_UV;
+ } else if (data->id == pim4328) {
+ status = pmbus_read_byte_data(client, page,
+ PIM4328_MFR_STATUS_BITS);
+ if (status < 0)
+ return status;
+ if (status & 0x04) /* Input UV */
+ ret |= PB_STATUS_VIN_UV;
+ if (status & 0x40) /* Output UV */
+ ret |= PB_STATUS_NONE_ABOVE;
+ } else if (data->id == pim4820) {
+ status = pmbus_read_byte_data(client, page,
+ PIM4328_MFR_READ_STATUS);
+ if (status < 0)
+ return status;
+ if (status & 0x05) /* Input OV or OC */
+ ret |= PB_STATUS_NONE_ABOVE;
+ if (status & 0x1a) /* Input UV */
+ ret |= PB_STATUS_VIN_UV;
+ if (status & 0x40) /* OT */
+ ret |= PB_STATUS_TEMPERATURE;
+ }
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ return ret;
+}
+
+static int pim4328_probe(struct i2c_client *client)
+{
+ int status;
+ u8 device_id[I2C_SMBUS_BLOCK_MAX + 1];
+ const struct i2c_device_id *mid;
+ struct pim4328_data *data;
+ struct pmbus_driver_info *info;
+ struct pmbus_platform_data *pdata;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA
+ | I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct pim4328_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ status = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, device_id);
+ if (status < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer Model\n");
+ return status;
+ }
+ for (mid = pim4328_id; mid->name[0]; mid++) {
+ if (!strncasecmp(mid->name, device_id, strlen(mid->name)))
+ break;
+ }
+ if (!mid->name[0]) {
+ dev_err(&client->dev, "Unsupported device\n");
+ return -ENODEV;
+ }
+
+ if (strcmp(client->name, mid->name))
+ dev_notice(&client->dev,
+ "Device mismatch: Configured %s, detected %s\n",
+ client->name, mid->name);
+
+ data->id = mid->driver_data;
+ info = &data->info;
+ info->pages = 1;
+ info->read_byte_data = pim4328_read_byte_data;
+ info->read_word_data = pim4328_read_word_data;
+
+ pdata = devm_kzalloc(dev, sizeof(struct pmbus_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ dev->platform_data = pdata;
+ pdata->flags = PMBUS_NO_CAPABILITY | PMBUS_NO_WRITE_PROTECT;
+
+ switch (data->id) {
+ case pim4006:
+ info->phases[0] = 2;
+ info->func[0] = PMBUS_PHASE_VIRTUAL | PMBUS_HAVE_VIN
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_IOUT;
+ info->pfunc[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN;
+ info->pfunc[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN;
+ break;
+ case pim4328:
+ info->phases[0] = 2;
+ info->func[0] = PMBUS_PHASE_VIRTUAL
+ | PMBUS_HAVE_VCAP | PMBUS_HAVE_VIN
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_IOUT;
+ info->pfunc[0] = PMBUS_HAVE_VIN;
+ info->pfunc[1] = PMBUS_HAVE_VIN;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_TEMPERATURE] = direct;
+ info->format[PSC_CURRENT_OUT] = direct;
+ pdata->flags |= PMBUS_USE_COEFFICIENTS_CMD;
+ break;
+ case pim4820:
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_IIN;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_TEMPERATURE] = direct;
+ info->format[PSC_CURRENT_IN] = direct;
+ pdata->flags |= PMBUS_USE_COEFFICIENTS_CMD;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return pmbus_do_probe(client, info);
+}
+
+static struct i2c_driver pim4328_driver = {
+ .driver = {
+ .name = "pim4328",
+ },
+ .probe_new = pim4328_probe,
+ .id_table = pim4328_id,
+};
+
+module_i2c_driver(pim4328_driver);
+
+MODULE_AUTHOR("Erik Rosen <erik.rosen@metormote.com>");
+MODULE_DESCRIPTION("PMBus driver for PIM4006, PIM4328, PIM4820 power interface modules");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 618c377664c4..d0d386990af5 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -173,13 +173,13 @@ static int pmbus_probe(struct i2c_client *client)
return -ENOMEM;
device_info = (struct pmbus_device_info *)i2c_match_id(pmbus_id, client)->driver_data;
- if (device_info->flags & PMBUS_SKIP_STATUS_CHECK) {
+ if (device_info->flags) {
pdata = devm_kzalloc(dev, sizeof(struct pmbus_platform_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->flags = PMBUS_SKIP_STATUS_CHECK;
+ pdata->flags = device_info->flags;
}
info->pages = device_info->pages;
@@ -193,22 +193,37 @@ static const struct pmbus_device_info pmbus_info_one = {
.pages = 1,
.flags = 0
};
+
static const struct pmbus_device_info pmbus_info_zero = {
.pages = 0,
.flags = 0
};
+
static const struct pmbus_device_info pmbus_info_one_skip = {
.pages = 1,
.flags = PMBUS_SKIP_STATUS_CHECK
};
+static const struct pmbus_device_info pmbus_info_one_status = {
+ .pages = 1,
+ .flags = PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+};
+
/*
* Use driver_data to set the number of pages supported by the chip.
*/
static const struct i2c_device_id pmbus_id[] = {
{"adp4000", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr310", (kernel_ulong_t)&pmbus_info_one_status},
{"bmr453", (kernel_ulong_t)&pmbus_info_one},
{"bmr454", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr456", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr457", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr458", (kernel_ulong_t)&pmbus_info_one_status},
+ {"bmr480", (kernel_ulong_t)&pmbus_info_one_status},
+ {"bmr490", (kernel_ulong_t)&pmbus_info_one_status},
+ {"bmr491", (kernel_ulong_t)&pmbus_info_one_status},
+ {"bmr492", (kernel_ulong_t)&pmbus_info_one},
{"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 3968924f8533..e0aa8aa46d8c 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -375,7 +375,7 @@ enum pmbus_sensor_classes {
};
#define PMBUS_PAGES 32 /* Per PMBus specification */
-#define PMBUS_PHASES 8 /* Maximum number of phases per page */
+#define PMBUS_PHASES 10 /* Maximum number of phases per page */
/* Functionality bit mask */
#define PMBUS_HAVE_VIN BIT(0)
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index bbd745178147..776ee2237be2 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -523,6 +523,8 @@ static bool pmbus_check_register(struct i2c_client *client,
rv = func(client, page, reg);
if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
rv = pmbus_check_status_cml(client);
+ if (rv < 0 && (data->flags & PMBUS_READ_STATUS_AFTER_FAILED_CHECK))
+ data->read_status(client, -1);
pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
@@ -1327,14 +1329,14 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
pages = paged ? info->pages : 1;
for (page = 0; page < pages; page++) {
- if (!(info->func[page] & attrs->func))
- continue;
- ret = pmbus_add_sensor_attrs_one(client, data, info,
- name, index, page,
- 0xff, attrs, paged);
- if (ret)
- return ret;
- index++;
+ if (info->func[page] & attrs->func) {
+ ret = pmbus_add_sensor_attrs_one(client, data, info,
+ name, index, page,
+ 0xff, attrs, paged);
+ if (ret)
+ return ret;
+ index++;
+ }
if (info->phases[page]) {
int phase;
@@ -2140,6 +2142,111 @@ static int pmbus_find_attributes(struct i2c_client *client,
}
/*
+ * The pmbus_class_attr_map structure maps one sensor class to
+ * it's corresponding sensor attributes array.
+ */
+struct pmbus_class_attr_map {
+ enum pmbus_sensor_classes class;
+ int nattr;
+ const struct pmbus_sensor_attr *attr;
+};
+
+static const struct pmbus_class_attr_map class_attr_map[] = {
+ {
+ .class = PSC_VOLTAGE_IN,
+ .attr = voltage_attributes,
+ .nattr = ARRAY_SIZE(voltage_attributes),
+ }, {
+ .class = PSC_VOLTAGE_OUT,
+ .attr = voltage_attributes,
+ .nattr = ARRAY_SIZE(voltage_attributes),
+ }, {
+ .class = PSC_CURRENT_IN,
+ .attr = current_attributes,
+ .nattr = ARRAY_SIZE(current_attributes),
+ }, {
+ .class = PSC_CURRENT_OUT,
+ .attr = current_attributes,
+ .nattr = ARRAY_SIZE(current_attributes),
+ }, {
+ .class = PSC_POWER,
+ .attr = power_attributes,
+ .nattr = ARRAY_SIZE(power_attributes),
+ }, {
+ .class = PSC_TEMPERATURE,
+ .attr = temp_attributes,
+ .nattr = ARRAY_SIZE(temp_attributes),
+ }
+};
+
+/*
+ * Read the coefficients for direct mode.
+ */
+static int pmbus_read_coefficients(struct i2c_client *client,
+ struct pmbus_driver_info *info,
+ const struct pmbus_sensor_attr *attr)
+{
+ int rv;
+ union i2c_smbus_data data;
+ enum pmbus_sensor_classes class = attr->class;
+ s8 R;
+ s16 m, b;
+
+ data.block[0] = 2;
+ data.block[1] = attr->reg;
+ data.block[2] = 0x01;
+
+ rv = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, PMBUS_COEFFICIENTS,
+ I2C_SMBUS_BLOCK_PROC_CALL, &data);
+
+ if (rv < 0)
+ return rv;
+
+ if (data.block[0] != 5)
+ return -EIO;
+
+ m = data.block[1] | (data.block[2] << 8);
+ b = data.block[3] | (data.block[4] << 8);
+ R = data.block[5];
+ info->m[class] = m;
+ info->b[class] = b;
+ info->R[class] = R;
+
+ return rv;
+}
+
+static int pmbus_init_coefficients(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int i, n, ret = -EINVAL;
+ const struct pmbus_class_attr_map *map;
+ const struct pmbus_sensor_attr *attr;
+
+ for (i = 0; i < ARRAY_SIZE(class_attr_map); i++) {
+ map = &class_attr_map[i];
+ if (info->format[map->class] != direct)
+ continue;
+ for (n = 0; n < map->nattr; n++) {
+ attr = &map->attr[n];
+ if (map->class != attr->class)
+ continue;
+ ret = pmbus_read_coefficients(client, info, attr);
+ if (ret >= 0)
+ break;
+ }
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "No coefficients found for sensor class %d\n",
+ map->class);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
* Identify chip parameters.
* This function is called for all chips.
*/
@@ -2214,11 +2321,14 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
data->has_status_word = true;
}
- /* Enable PEC if the controller supports it */
+ /* Enable PEC if the controller and bus supports it */
if (!(data->flags & PMBUS_NO_CAPABILITY)) {
ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
- client->flags |= I2C_CLIENT_PEC;
+ if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) {
+ client->flags |= I2C_CLIENT_PEC;
+ }
+ }
}
/*
@@ -2226,9 +2336,11 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
* faults, and we should not try it. Also, in that case, writes into
* limit registers need to be disabled.
*/
- ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
- if (ret > 0 && (ret & PB_WP_ANY))
- data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ if (!(data->flags & PMBUS_NO_WRITE_PROTECT)) {
+ ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+ if (ret > 0 && (ret & PB_WP_ANY))
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ }
if (data->info->pages)
pmbus_clear_faults(client);
@@ -2255,6 +2367,17 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return ret;
}
}
+
+ if (data->flags & PMBUS_USE_COEFFICIENTS_CMD) {
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL))
+ return -ENODEV;
+
+ ret = pmbus_init_coefficients(client, info);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/hwmon/pmbus/q54sj108a2.c b/drivers/hwmon/pmbus/q54sj108a2.c
index b6e8b20466f1..fa298b4265a1 100644
--- a/drivers/hwmon/pmbus/q54sj108a2.c
+++ b/drivers/hwmon/pmbus/q54sj108a2.c
@@ -299,7 +299,7 @@ static int q54sj108a2_probe(struct i2c_client *client)
dev_err(&client->dev, "Failed to read Manufacturer ID\n");
return ret;
}
- if (ret != 5 || strncmp(buf, "DELTA", 5)) {
+ if (ret != 6 || strncmp(buf, "DELTA", 5)) {
buf[ret] = '\0';
dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
return -ENODEV;
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index b7d4eacdc3ef..e9df0c56d91e 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -18,7 +18,7 @@
#include "pmbus.h"
enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105,
- zl9101, zl9117 };
+ zl8802, zl9101, zl9117, zls1003, zls4009 };
struct zl6100_data {
int id;
@@ -34,6 +34,13 @@ struct zl6100_data {
#define ZL6100_MFR_XTEMP_ENABLE BIT(7)
+#define ZL8802_MFR_USER_GLOBAL_CONFIG 0xe9
+#define ZL8802_MFR_TMON_ENABLE BIT(12)
+#define ZL8802_MFR_USER_CONFIG 0xd1
+#define ZL8802_MFR_XTEMP_ENABLE_2 BIT(1)
+#define ZL8802_MFR_DDC_CONFIG 0xd3
+#define ZL8802_MFR_PHASES_MASK 0x0007
+
#define MFR_VMON_OV_FAULT_LIMIT 0xf5
#define MFR_VMON_UV_FAULT_LIMIT 0xf6
#define MFR_READ_VMON 0xf7
@@ -132,7 +139,7 @@ static int zl6100_read_word_data(struct i2c_client *client, int page,
struct zl6100_data *data = to_zl6100_data(info);
int ret, vreg;
- if (page > 0)
+ if (page >= info->pages)
return -ENXIO;
if (data->id == zl2005) {
@@ -191,7 +198,7 @@ static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
struct zl6100_data *data = to_zl6100_data(info);
int ret, status;
- if (page > 0)
+ if (page >= info->pages)
return -ENXIO;
zl6100_wait(data);
@@ -230,7 +237,7 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
struct zl6100_data *data = to_zl6100_data(info);
int ret, vreg;
- if (page > 0)
+ if (page >= info->pages)
return -ENXIO;
switch (reg) {
@@ -271,7 +278,7 @@ static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
struct zl6100_data *data = to_zl6100_data(info);
int ret;
- if (page > 0)
+ if (page >= info->pages)
return -ENXIO;
zl6100_wait(data);
@@ -287,6 +294,10 @@ static const struct i2c_device_id zl6100_id[] = {
{"bmr462", zl2008},
{"bmr463", zl2008},
{"bmr464", zl2008},
+ {"bmr465", zls4009},
+ {"bmr466", zls1003},
+ {"bmr467", zls4009},
+ {"bmr469", zl8802},
{"zl2004", zl2004},
{"zl2005", zl2005},
{"zl2006", zl2006},
@@ -295,15 +306,18 @@ static const struct i2c_device_id zl6100_id[] = {
{"zl2106", zl2106},
{"zl6100", zl6100},
{"zl6105", zl6105},
+ {"zl8802", zl8802},
{"zl9101", zl9101},
{"zl9117", zl9117},
+ {"zls1003", zls1003},
+ {"zls4009", zls4009},
{ }
};
MODULE_DEVICE_TABLE(i2c, zl6100_id);
static int zl6100_probe(struct i2c_client *client)
{
- int ret;
+ int ret, i;
struct zl6100_data *data;
struct pmbus_driver_info *info;
u8 device_id[I2C_SMBUS_BLOCK_MAX + 1];
@@ -367,18 +381,70 @@ static int zl6100_probe(struct i2c_client *client)
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
/*
- * ZL2004, ZL9101M, and ZL9117M support monitoring an extra voltage
- * (VMON for ZL2004, VDRV for ZL9101M and ZL9117M). Report it as vmon.
+ * ZL2004, ZL8802, ZL9101M, ZL9117M and ZLS4009 support monitoring
+ * an extra voltage (VMON for ZL2004, ZL8802 and ZLS4009,
+ * VDRV for ZL9101M and ZL9117M). Report it as vmon.
*/
- if (data->id == zl2004 || data->id == zl9101 || data->id == zl9117)
+ if (data->id == zl2004 || data->id == zl8802 || data->id == zl9101 ||
+ data->id == zl9117 || data->id == zls4009)
info->func[0] |= PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
- ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
- if (ret < 0)
- return ret;
+ /*
+ * ZL8802 has two outputs that can be used either independently or in
+ * a current sharing configuration. The driver uses the DDC_CONFIG
+ * register to check if the module is running with independent or
+ * shared outputs. If the module is in shared output mode, only one
+ * output voltage will be reported.
+ */
+ if (data->id == zl8802) {
+ info->pages = 2;
+ info->func[0] |= PMBUS_HAVE_IIN;
+
+ ret = i2c_smbus_read_word_data(client, ZL8802_MFR_DDC_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
+
+ if (ret & ZL8802_MFR_PHASES_MASK)
+ info->func[1] |= PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
+ else
+ info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
- if (ret & ZL6100_MFR_XTEMP_ENABLE)
- info->func[0] |= PMBUS_HAVE_TEMP2;
+ for (i = 0; i < 2; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
+
+ ret = i2c_smbus_read_word_data(client, ZL8802_MFR_USER_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ZL8802_MFR_XTEMP_ENABLE_2)
+ info->func[i] |= PMBUS_HAVE_TEMP2;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
+ }
+ ret = i2c_smbus_read_word_data(client, ZL8802_MFR_USER_GLOBAL_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ZL8802_MFR_TMON_ENABLE)
+ info->func[0] |= PMBUS_HAVE_TEMP3;
+ } else {
+ ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ZL6100_MFR_XTEMP_ENABLE)
+ info->func[0] |= PMBUS_HAVE_TEMP2;
+ }
data->access = ktime_get();
zl6100_wait(data);
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 4324a5dbc968..8f1b569c69e7 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -64,7 +64,6 @@ static const char * const SCH5627_IN_LABELS[SCH5627_NO_IN] = {
struct sch5627_data {
unsigned short addr;
- struct sch56xx_watchdog_data *watchdog;
u8 control;
u8 temp_max[SCH5627_NO_TEMPS];
u8 temp_crit[SCH5627_NO_TEMPS];
@@ -357,16 +356,6 @@ static const struct hwmon_chip_info sch5627_chip_info = {
.info = sch5627_info,
};
-static int sch5627_remove(struct platform_device *pdev)
-{
- struct sch5627_data *data = platform_get_drvdata(pdev);
-
- if (data->watchdog)
- sch56xx_watchdog_unregister(data->watchdog);
-
- return 0;
-}
-
static int sch5627_probe(struct platform_device *pdev)
{
struct sch5627_data *data;
@@ -460,9 +449,9 @@ static int sch5627_probe(struct platform_device *pdev)
return PTR_ERR(hwmon_dev);
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
- (build_code << 24) | (build_id << 8) | hwmon_rev,
- &data->update_lock, 1);
+ sch56xx_watchdog_register(&pdev->dev, data->addr,
+ (build_code << 24) | (build_id << 8) | hwmon_rev,
+ &data->update_lock, 1);
return 0;
}
@@ -472,7 +461,6 @@ static struct platform_driver sch5627_driver = {
.name = DRVNAME,
},
.probe = sch5627_probe,
- .remove = sch5627_remove,
};
module_platform_driver(sch5627_driver);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 5683a38740f6..a5cd4de36575 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -54,7 +54,6 @@ static const u16 SCH5636_REG_FAN_VAL[SCH5636_NO_FANS] = {
struct sch5636_data {
unsigned short addr;
struct device *hwmon_dev;
- struct sch56xx_watchdog_data *watchdog;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
@@ -372,9 +371,6 @@ static int sch5636_remove(struct platform_device *pdev)
struct sch5636_data *data = platform_get_drvdata(pdev);
int i;
- if (data->watchdog)
- sch56xx_watchdog_unregister(data->watchdog);
-
if (data->hwmon_dev)
hwmon_device_unregister(data->hwmon_dev);
@@ -495,9 +491,8 @@ static int sch5636_probe(struct platform_device *pdev)
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
- (revision[0] << 8) | revision[1],
- &data->update_lock, 0);
+ sch56xx_watchdog_register(&pdev->dev, data->addr, (revision[0] << 8) | revision[1],
+ &data->update_lock, 0);
return 0;
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 6c84780e358e..40cdadad35e5 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -20,8 +20,8 @@
#include "sch56xx-common.h"
/* Insmod parameters */
-static int nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, int, 0);
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
@@ -378,8 +378,8 @@ static const struct watchdog_ops watchdog_ops = {
.set_timeout = watchdog_set_timeout,
};
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
- u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
+void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
+ struct mutex *io_lock, int check_enabled)
{
struct sch56xx_watchdog_data *data;
int err, control, output_enable;
@@ -393,23 +393,22 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
mutex_unlock(io_lock);
if (control < 0)
- return NULL;
+ return;
if (output_enable < 0)
- return NULL;
+ return;
if (check_enabled && !(output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
pr_warn("Watchdog not enabled by BIOS, not registering\n");
- return NULL;
+ return;
}
- data = kzalloc(sizeof(struct sch56xx_watchdog_data), GFP_KERNEL);
+ data = devm_kzalloc(parent, sizeof(struct sch56xx_watchdog_data), GFP_KERNEL);
if (!data)
- return NULL;
+ return;
data->addr = addr;
data->io_lock = io_lock;
- strlcpy(data->wdinfo.identity, "sch56xx watchdog",
- sizeof(data->wdinfo.identity));
+ strscpy(data->wdinfo.identity, "sch56xx watchdog", sizeof(data->wdinfo.identity));
data->wdinfo.firmware_version = revision;
data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
if (!nowayout)
@@ -421,8 +420,7 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
data->wddev.timeout = 60;
data->wddev.min_timeout = 1;
data->wddev.max_timeout = 255 * 60;
- if (nowayout)
- set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+ watchdog_set_nowayout(&data->wddev, nowayout);
if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
set_bit(WDOG_ACTIVE, &data->wddev.status);
@@ -438,24 +436,14 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
data->watchdog_output_enable = output_enable;
watchdog_set_drvdata(&data->wddev, data);
- err = watchdog_register_device(&data->wddev);
+ err = devm_watchdog_register_device(parent, &data->wddev);
if (err) {
pr_err("Registering watchdog chardev: %d\n", err);
- kfree(data);
- return NULL;
+ devm_kfree(parent, data);
}
-
- return data;
}
EXPORT_SYMBOL(sch56xx_watchdog_register);
-void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
-{
- watchdog_unregister_device(&data->wddev);
- kfree(data);
-}
-EXPORT_SYMBOL(sch56xx_watchdog_unregister);
-
/*
* platform dev find, add and remove functions
*/
@@ -516,37 +504,18 @@ static int __init sch56xx_device_add(int address, const char *name)
struct resource res = {
.start = address,
.end = address + REGION_LENGTH - 1,
+ .name = name,
.flags = IORESOURCE_IO,
};
int err;
- sch56xx_pdev = platform_device_alloc(name, address);
- if (!sch56xx_pdev)
- return -ENOMEM;
-
- res.name = sch56xx_pdev->name;
err = acpi_check_resource_conflict(&res);
if (err)
- goto exit_device_put;
-
- err = platform_device_add_resources(sch56xx_pdev, &res, 1);
- if (err) {
- pr_err("Device resource addition failed\n");
- goto exit_device_put;
- }
-
- err = platform_device_add(sch56xx_pdev);
- if (err) {
- pr_err("Device addition failed\n");
- goto exit_device_put;
- }
-
- return 0;
+ return err;
-exit_device_put:
- platform_device_put(sch56xx_pdev);
+ sch56xx_pdev = platform_device_register_simple(name, -1, &res, 1);
- return err;
+ return PTR_ERR_OR_ZERO(sch56xx_pdev);
}
static int __init sch56xx_init(void)
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
index 75eb73617cf2..e907d9da0dd5 100644
--- a/drivers/hwmon/sch56xx-common.h
+++ b/drivers/hwmon/sch56xx-common.h
@@ -14,6 +14,6 @@ int sch56xx_read_virtual_reg16(u16 addr, u16 reg);
int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
int high_nibble);
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
- u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
+void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
+ struct mutex *io_lock, int check_enabled);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 25aac40f2764..919877970ae3 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
scpi_scale_reading(&value, sensor);
+ /*
+ * Temperature sensor values are treated as signed values based on
+ * observation even though that is not explicitly specified, and
+ * because an unsigned u64 temperature does not really make practical
+ * sense especially when the temperature is below zero degrees Celsius.
+ */
+ if (sensor->info.class == TEMPERATURE)
+ return sprintf(buf, "%lld\n", (s64)value);
+
return sprintf(buf, "%llu\n", value);
}
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
new file mode 100644
index 000000000000..09c2a0b06444
--- /dev/null
+++ b/drivers/hwmon/sht4x.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) Linumiz 2021
+ *
+ * sht4x.c - Linux hwmon driver for SHT4x Temperature and Humidity sensor
+ *
+ * Author: Navin Sankar Velliangiri <navin@linumiz.com>
+ */
+
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+/*
+ * Poll intervals (in milliseconds)
+ */
+#define SHT4X_MIN_POLL_INTERVAL 2000
+
+/*
+ * I2C command delays (in microseconds)
+ */
+#define SHT4X_MEAS_DELAY 1000
+#define SHT4X_DELAY_EXTRA 10000
+
+/*
+ * Command Bytes
+ */
+#define SHT4X_CMD_MEASURE_HPM 0b11111101
+#define SHT4X_CMD_RESET 0b10010100
+
+#define SHT4X_CMD_LEN 1
+#define SHT4X_CRC8_LEN 1
+#define SHT4X_WORD_LEN 2
+#define SHT4X_RESPONSE_LENGTH 6
+#define SHT4X_CRC8_POLYNOMIAL 0x31
+#define SHT4X_CRC8_INIT 0xff
+#define SHT4X_MIN_TEMPERATURE -45000
+#define SHT4X_MAX_TEMPERATURE 125000
+#define SHT4X_MIN_HUMIDITY 0
+#define SHT4X_MAX_HUMIDITY 100000
+
+DECLARE_CRC8_TABLE(sht4x_crc8_table);
+
+/**
+ * struct sht4x_data - All the data required to operate an SHT4X chip
+ * @client: the i2c client associated with the SHT4X
+ * @lock: a mutex that is used to prevent parallel access to the i2c client
+ * @update_interval: the minimum poll interval
+ * @last_updated: the previous time that the SHT4X was polled
+ * @temperature: the latest temperature value received from the SHT4X
+ * @humidity: the latest humidity value received from the SHT4X
+ */
+struct sht4x_data {
+ struct i2c_client *client;
+ struct mutex lock; /* atomic read data updates */
+ bool valid; /* validity of fields below */
+ long update_interval; /* in milli-seconds */
+ long last_updated; /* in jiffies */
+ s32 temperature;
+ s32 humidity;
+};
+
+/**
+ * sht4x_read_values() - read and parse the raw data from the SHT4X
+ * @sht4x_data: the struct sht4x_data to use for the lock
+ * Return: 0 if successful, -ERRNO if not
+ */
+static int sht4x_read_values(struct sht4x_data *data)
+{
+ int ret = 0;
+ u16 t_ticks, rh_ticks;
+ unsigned long next_update;
+ struct i2c_client *client = data->client;
+ u8 crc;
+ u8 cmd[SHT4X_CMD_LEN] = {SHT4X_CMD_MEASURE_HPM};
+ u8 raw_data[SHT4X_RESPONSE_LENGTH];
+
+ mutex_lock(&data->lock);
+ next_update = data->last_updated +
+ msecs_to_jiffies(data->update_interval);
+
+ if (data->valid && time_before_eq(jiffies, next_update))
+ goto unlock;
+
+ ret = i2c_master_send(client, cmd, SHT4X_CMD_LEN);
+ if (ret < 0)
+ goto unlock;
+
+ usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA);
+
+ ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
+ if (ret != SHT4X_RESPONSE_LENGTH) {
+ if (ret >= 0)
+ ret = -ENODATA;
+ goto unlock;
+ }
+
+ t_ticks = raw_data[0] << 8 | raw_data[1];
+ rh_ticks = raw_data[3] << 8 | raw_data[4];
+
+ crc = crc8(sht4x_crc8_table, &raw_data[0], SHT4X_WORD_LEN, CRC8_INIT_VALUE);
+ if (crc != raw_data[2]) {
+ dev_err(&client->dev, "data integrity check failed\n");
+ ret = -EIO;
+ goto unlock;
+ }
+
+ crc = crc8(sht4x_crc8_table, &raw_data[3], SHT4X_WORD_LEN, CRC8_INIT_VALUE);
+ if (crc != raw_data[5]) {
+ dev_err(&client->dev, "data integrity check failed\n");
+ ret = -EIO;
+ goto unlock;
+ }
+
+ data->temperature = ((21875 * (int32_t)t_ticks) >> 13) - 45000;
+ data->humidity = ((15625 * (int32_t)rh_ticks) >> 13) - 6000;
+ data->last_updated = jiffies;
+ data->valid = true;
+ ret = 0;
+
+unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static ssize_t sht4x_interval_write(struct sht4x_data *data, long val)
+{
+ data->update_interval = clamp_val(val, SHT4X_MIN_POLL_INTERVAL, UINT_MAX);
+
+ return 0;
+}
+
+/* sht4x_interval_read() - read the minimum poll interval in milliseconds */
+static size_t sht4x_interval_read(struct sht4x_data *data, long *val)
+{
+ *val = data->update_interval;
+ return 0;
+}
+
+/* sht4x_temperature1_read() - read the temperature in millidegrees */
+static int sht4x_temperature1_read(struct sht4x_data *data, long *val)
+{
+ int ret;
+
+ ret = sht4x_read_values(data);
+ if (ret < 0)
+ return ret;
+
+ *val = data->temperature;
+
+ return 0;
+}
+
+/* sht4x_humidity1_read() - read a relative humidity in millipercent */
+static int sht4x_humidity1_read(struct sht4x_data *data, long *val)
+{
+ int ret;
+
+ ret = sht4x_read_values(data);
+ if (ret < 0)
+ return ret;
+
+ *val = data->humidity;
+
+ return 0;
+}
+
+static umode_t sht4x_hwmon_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ case hwmon_humidity:
+ return 0444;
+ case hwmon_chip:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int sht4x_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return sht4x_temperature1_read(data, val);
+ case hwmon_humidity:
+ return sht4x_humidity1_read(data, val);
+ case hwmon_chip:
+ return sht4x_interval_read(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sht4x_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_chip:
+ return sht4x_interval_write(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *sht4x_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT),
+ NULL,
+};
+
+static const struct hwmon_ops sht4x_hwmon_ops = {
+ .is_visible = sht4x_hwmon_visible,
+ .read = sht4x_hwmon_read,
+ .write = sht4x_hwmon_write,
+};
+
+static const struct hwmon_chip_info sht4x_chip_info = {
+ .ops = &sht4x_hwmon_ops,
+ .info = sht4x_info,
+};
+
+static int sht4x_probe(struct i2c_client *client,
+ const struct i2c_device_id *sht4x_id)
+{
+ struct device *device = &client->dev;
+ struct device *hwmon_dev;
+ struct sht4x_data *data;
+ u8 cmd[] = {SHT4X_CMD_RESET};
+ int ret;
+
+ /*
+ * we require full i2c support since the sht4x uses multi-byte read and
+ * writes as well as multi-byte commands which are not supported by
+ * the smbus protocol
+ */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ data = devm_kzalloc(device, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->update_interval = SHT4X_MIN_POLL_INTERVAL;
+ data->client = client;
+
+ mutex_init(&data->lock);
+
+ crc8_populate_msb(sht4x_crc8_table, SHT4X_CRC8_POLYNOMIAL);
+
+ ret = i2c_master_send(client, cmd, SHT4X_CMD_LEN);
+ if (ret < 0)
+ return ret;
+ if (ret != SHT4X_CMD_LEN)
+ return -EIO;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(device,
+ client->name,
+ data,
+ &sht4x_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id sht4x_id[] = {
+ { "sht4x", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sht4x_id);
+
+static struct i2c_driver sht4x_driver = {
+ .driver = {
+ .name = "sht4x",
+ },
+ .probe = sht4x_probe,
+ .id_table = sht4x_id,
+};
+
+module_i2c_driver(sht4x_driver);
+
+MODULE_AUTHOR("Navin Sankar Velliangiri <navin@linumiz.com>");
+MODULE_DESCRIPTION("Sensirion SHT4x humidity and temperature sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index c2484f15298b..8bd6435c13e8 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -99,11 +99,14 @@
#define POWER_ENABLE 0x19
#define TPS23861_NUM_PORTS 4
+#define TPS23861_GENERAL_MASK_1 0x17
+#define TPS23861_CURRENT_SHUNT_MASK BIT(0)
+
#define TEMPERATURE_LSB 652 /* 0.652 degrees Celsius */
#define VOLTAGE_LSB 3662 /* 3.662 mV */
#define SHUNT_RESISTOR_DEFAULT 255000 /* 255 mOhm */
-#define CURRENT_LSB_255 62260 /* 62.260 uA */
-#define CURRENT_LSB_250 61039 /* 61.039 uA */
+#define CURRENT_LSB_250 62260 /* 62.260 uA */
+#define CURRENT_LSB_255 61039 /* 61.039 uA */
#define RESISTANCE_LSB 110966 /* 11.0966 Ohm*/
#define RESISTANCE_LSB_LOW 157216 /* 15.7216 Ohm*/
@@ -117,6 +120,7 @@ struct tps23861_data {
static struct regmap_config tps23861_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0x6f,
};
static int tps23861_read_temp(struct tps23861_data *data, long *val)
@@ -560,6 +564,15 @@ static int tps23861_probe(struct i2c_client *client)
else
data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
+ if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
+ regmap_clear_bits(data->regmap,
+ TPS23861_GENERAL_MASK_1,
+ TPS23861_CURRENT_SHUNT_MASK);
+ else
+ regmap_set_bits(data->regmap,
+ TPS23861_GENERAL_MASK_1,
+ TPS23861_CURRENT_SHUNT_MASK);
+
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &tps23861_chip_info,
NULL);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 281a65d9b44b..10acece9d7b9 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -647,7 +647,7 @@ config I2C_HIGHLANDER
config I2C_HISI
tristate "HiSilicon I2C controller"
- depends on ARM64 || COMPILE_TEST
+ depends on (ARM64 && ACPI) || COMPILE_TEST
help
Say Y here if you want to have Hisilicon I2C controller support
available on the Kunpeng Server.
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 4d12e3da12f0..55a9e93fbfeb 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge
*
* Copyright (C) 2004 Patrick Mochel
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 7d62cbda6e06..354cf7e45c4a 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -55,7 +55,7 @@
#define ALTR_I2C_XFER_TIMEOUT (msecs_to_jiffies(250))
/**
- * altr_i2c_dev - I2C device context
+ * struct altr_i2c_dev - I2C device context
* @base: pointer to register struct
* @msg: pointer to current message
* @msg_len: number of bytes transferred in msg
@@ -172,7 +172,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
}
-/**
+/*
* altr_i2c_transfer - On the last byte to be transmitted, send
* a Stop bit on the last byte.
*/
@@ -185,7 +185,7 @@ static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data)
writel(data, idev->base + ALTR_I2C_TFR_CMD);
}
-/**
+/*
* altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of
* transfer. Send a Stop bit on the last byte.
*/
@@ -201,9 +201,8 @@ static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev)
}
}
-/**
+/*
* altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer.
- * @return: Number of bytes left to transfer.
*/
static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev)
{
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index c1bbc4caeb5c..66aafa7d1123 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -144,7 +144,7 @@ enum cdns_i2c_mode {
};
/**
- * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode
+ * enum cdns_i2c_slave_state - Slave state when I2C is operating in slave mode
*
* @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle
* @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master
diff --git a/drivers/i2c/busses/i2c-cp2615.c b/drivers/i2c/busses/i2c-cp2615.c
index 78cfecd1ea76..3ded28632e4c 100644
--- a/drivers/i2c/busses/i2c-cp2615.c
+++ b/drivers/i2c/busses/i2c-cp2615.c
@@ -138,17 +138,23 @@ cp2615_i2c_send(struct usb_interface *usbif, struct cp2615_i2c_transfer *i2c_w)
static int
cp2615_i2c_recv(struct usb_interface *usbif, unsigned char tag, void *buf)
{
- struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
- struct cp2615_i2c_transfer_result *i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data;
struct usb_device *usbdev = interface_to_usbdev(usbif);
- int res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN),
- msg, sizeof(struct cp2615_iop_msg), NULL, 0);
+ struct cp2615_iop_msg *msg;
+ struct cp2615_i2c_transfer_result *i2c_r;
+ int res;
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN), msg,
+ sizeof(struct cp2615_iop_msg), NULL, 0);
if (res < 0) {
kfree(msg);
return res;
}
+ i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data;
if (msg->msg != htons(iop_I2cTransferResult) || i2c_r->tag != tag) {
kfree(msg);
return -EIO;
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 13be1d678c39..9b08bb5df38d 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -165,7 +165,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
}
/**
- * i2c_dw_init() - Initialize the designware I2C master hardware
+ * i2c_dw_init_master() - Initialize the designware I2C master hardware
* @dev: device private data
*
* This functions configures and enables the I2C master.
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 843b31a0f752..321b2770feab 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -148,7 +148,7 @@ struct i2c_algo_pch_data {
/**
* struct adapter_info - This structure holds the adapter information for the
- PCH i2c controller
+ * PCH i2c controller
* @pch_data: stores a list of i2c_algo_pch_data
* @pch_i2c_suspended: specifies whether the system is suspended or not
* perhaps with more lines and words.
@@ -358,6 +358,7 @@ static void pch_i2c_repstart(struct i2c_algo_pch_data *adap)
/**
* pch_i2c_writebytes() - write data to I2C bus in normal mode
* @i2c_adap: Pointer to the struct i2c_adapter.
+ * @msgs: Pointer to the i2c message structure.
* @last: specifies whether last message or not.
* In the case of compound mode it will be 1 for last message,
* otherwise 0.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 99d446763530..04a1e38f2a6f 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -395,11 +395,9 @@ static int i801_check_post(struct i801_priv *priv, int status)
dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
/* try to stop the current command */
dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
- outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
- SMBHSTCNT(priv));
+ outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
usleep_range(1000, 2000);
- outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
- SMBHSTCNT(priv));
+ outb_p(0, SMBHSTCNT(priv));
/* Check if it worked */
status = inb_p(SMBHSTSTS(priv));
@@ -980,6 +978,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
}
out:
+ /* Unlock the SMBus device for use by BIOS/ACPI */
+ outb_p(SMBHSTSTS_INUSE_STS, SMBHSTSTS(priv));
+
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
mutex_unlock(&priv->acpi_lock);
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index c8c422e9dda4..5dae7cab7260 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -123,7 +123,6 @@ static int icy_probe(struct zorro_dev *z,
{
struct icy_i2c *i2c;
struct i2c_algo_pcf_data *algo_data;
- struct fwnode_handle *new_fwnode;
struct i2c_board_info ltc2990_info = {
.type = "ltc2990",
.swnode = &icy_ltc2990_node,
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 30d9e89a3db2..dcca9c2396db 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/fsl_devices.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -45,6 +46,7 @@
#define CCR_MTX 0x10
#define CCR_TXAK 0x08
#define CCR_RSTA 0x04
+#define CCR_RSVD 0x02
#define CSR_MCF 0x80
#define CSR_MAAS 0x40
@@ -97,7 +99,7 @@ struct mpc_i2c {
u32 block;
int rc;
int expect_rxack;
-
+ bool has_errata_A004447;
};
struct mpc_i2c_divider {
@@ -136,6 +138,75 @@ static void mpc_i2c_fixup(struct mpc_i2c *i2c)
}
}
+static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask)
+{
+ void __iomem *addr = i2c->base + MPC_I2C_SR;
+ u8 val;
+
+ return readb_poll_timeout(addr, val, val & mask, 0, 100);
+}
+
+/*
+ * Workaround for Erratum A004447. From the P2040CE Rev Q
+ *
+ * 1. Set up the frequency divider and sampling rate.
+ * 2. I2CCR - a0h
+ * 3. Poll for I2CSR[MBB] to get set.
+ * 4. If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to
+ * step 5. If MAL is not set, then go to step 13.
+ * 5. I2CCR - 00h
+ * 6. I2CCR - 22h
+ * 7. I2CCR - a2h
+ * 8. Poll for I2CSR[MBB] to get set.
+ * 9. Issue read to I2CDR.
+ * 10. Poll for I2CSR[MIF] to be set.
+ * 11. I2CCR - 82h
+ * 12. Workaround complete. Skip the next steps.
+ * 13. Issue read to I2CDR.
+ * 14. Poll for I2CSR[MIF] to be set.
+ * 15. I2CCR - 80h
+ */
+static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c)
+{
+ int ret;
+ u32 val;
+
+ writeccr(i2c, CCR_MEN | CCR_MSTA);
+ ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
+ if (ret) {
+ dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
+ return;
+ }
+
+ val = readb(i2c->base + MPC_I2C_SR);
+
+ if (val & CSR_MAL) {
+ writeccr(i2c, 0x00);
+ writeccr(i2c, CCR_MSTA | CCR_RSVD);
+ writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD);
+ ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
+ if (ret) {
+ dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
+ return;
+ }
+ val = readb(i2c->base + MPC_I2C_DR);
+ ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
+ if (ret) {
+ dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
+ return;
+ }
+ writeccr(i2c, CCR_MEN | CCR_RSVD);
+ } else {
+ val = readb(i2c->base + MPC_I2C_DR);
+ ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
+ if (ret) {
+ dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
+ return;
+ }
+ writeccr(i2c, CCR_MEN);
+ }
+}
+
#if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
@@ -670,7 +741,10 @@ static int fsl_i2c_bus_recovery(struct i2c_adapter *adap)
{
struct mpc_i2c *i2c = i2c_get_adapdata(adap);
- mpc_i2c_fixup(i2c);
+ if (i2c->has_errata_A004447)
+ mpc_i2c_fixup_A004447(i2c);
+ else
+ mpc_i2c_fixup(i2c);
return 0;
}
@@ -767,6 +841,9 @@ static int fsl_i2c_probe(struct platform_device *op)
}
dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
+ if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447"))
+ i2c->has_errata_A004447 = true;
+
i2c->adap = mpc_ops;
scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
"MPC adapter (%s)", of_node_full_name(op->dev.of_node));
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 5ddfa4e56ee2..4e9fb6b44436 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -479,6 +479,11 @@ static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
+ u16 intr_stat_reg;
+
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
+ intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
+ mtk_i2c_writew(i2c, intr_stat_reg, OFFSET_INTR_STAT);
if (i2c->dev_comp->apdma_sync) {
writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index dc77e1c4e80f..a2d12a5b1c34 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -159,7 +159,7 @@ struct i2c_nmk_client {
* @clk_freq: clock frequency for the operation mode
* @tft: Tx FIFO Threshold in bytes
* @rft: Rx FIFO Threshold in bytes
- * @timeout Slave response timeout (ms)
+ * @timeout: Slave response timeout (ms)
* @sm: speed mode
* @stop: stop condition.
* @xfer_complete: acknowledge completion for a I2C message.
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 273222e38056..a0af027db04c 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -250,7 +250,7 @@ static irqreturn_t ocores_isr(int irq, void *dev_id)
}
/**
- * Process timeout event
+ * ocores_process_timeout() - Process timeout event
* @i2c: ocores I2C device instance
*/
static void ocores_process_timeout(struct ocores_i2c *i2c)
@@ -264,7 +264,7 @@ static void ocores_process_timeout(struct ocores_i2c *i2c)
}
/**
- * Wait until something change in a given register
+ * ocores_wait() - Wait until something change in a given register
* @i2c: ocores I2C device instance
* @reg: register to query
* @mask: bitmask to apply on register value
@@ -296,7 +296,7 @@ static int ocores_wait(struct ocores_i2c *i2c,
}
/**
- * Wait until is possible to process some data
+ * ocores_poll_wait() - Wait until is possible to process some data
* @i2c: ocores I2C device instance
*
* Used when the device is in polling mode (interrupts disabled).
@@ -334,7 +334,7 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
}
/**
- * It handles an IRQ-less transfer
+ * ocores_process_polling() - It handles an IRQ-less transfer
* @i2c: ocores I2C device instance
*
* Even if IRQ are disabled, the I2C OpenCore IP behavior is exactly the same
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 8c4ec7f13f5a..50f21cdbe90d 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -138,7 +138,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
/**
* i2c_pnx_start - start a device
* @slave_addr: slave address
- * @adap: pointer to adapter structure
+ * @alg_data: pointer to local driver data structure
*
* Generate a START signal in the desired mode.
*/
@@ -194,7 +194,7 @@ static int i2c_pnx_start(unsigned char slave_addr,
/**
* i2c_pnx_stop - stop a device
- * @adap: pointer to I2C adapter structure
+ * @alg_data: pointer to local driver data structure
*
* Generate a STOP signal to terminate the master transaction.
*/
@@ -223,7 +223,7 @@ static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data)
/**
* i2c_pnx_master_xmit - transmit data to slave
- * @adap: pointer to I2C adapter structure
+ * @alg_data: pointer to local driver data structure
*
* Sends one byte of data to the slave
*/
@@ -293,7 +293,7 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
/**
* i2c_pnx_master_rcv - receive data from slave
- * @adap: pointer to I2C adapter structure
+ * @alg_data: pointer to local driver data structure
*
* Reads one byte data from the slave
*/
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 214b4c913a13..6d635a7c104c 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -100,7 +100,7 @@ static const struct geni_i2c_err_log gi2c_log[] = {
[GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
[NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
[GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
- [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unepxected start/stop"},
+ [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"},
[ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
[GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
[GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
@@ -650,6 +650,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
return 0;
}
+static void geni_i2c_shutdown(struct platform_device *pdev)
+{
+ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
+
+ /* Make client i2c transfers start failing */
+ i2c_mark_adapter_suspended(&gi2c->adap);
+}
+
static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
{
int ret;
@@ -690,6 +698,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+ i2c_mark_adapter_suspended(&gi2c->adap);
+
if (!gi2c->suspended) {
geni_i2c_runtime_suspend(dev);
pm_runtime_disable(dev);
@@ -699,8 +709,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
return 0;
}
+static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
+{
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+ i2c_mark_adapter_resumed(&gi2c->adap);
+ return 0;
+}
+
static const struct dev_pm_ops geni_i2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
NULL)
};
@@ -714,6 +732,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
static struct platform_driver geni_i2c_driver = {
.probe = geni_i2c_probe,
.remove = geni_i2c_remove,
+ .shutdown = geni_i2c_shutdown,
.driver = {
.name = "geni_i2c",
.pm = &geni_i2c_pm_ops,
diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
index a39f7d092797..66dfa211e736 100644
--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
+++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
@@ -83,7 +83,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
}
- ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
+ ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
if (ret) {
dev_err(&adapter->dev, "failure sending STOP\n");
return -EREMOTEIO;
@@ -153,7 +153,7 @@ static int osif_probe(struct usb_interface *interface,
* Set bus frequency. The frequency is:
* 120,000,000 / ( 16 + 2 * div * 4^prescale).
* Using dev = 52, prescale = 0 give 100KHz */
- ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
+ ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
NULL, 0);
if (ret) {
dev_err(&interface->dev, "failure sending bit rate");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index ab928613afba..4d82761e1585 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -480,7 +480,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
* forces us to send a new START
* when we change direction
*/
+ dev_dbg(i2c->dev,
+ "missing START before write->read\n");
s3c24xx_i2c_stop(i2c, -EINVAL);
+ break;
}
goto retry_write;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 3ae6ca21a02c..2d2e630fd438 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -807,7 +807,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
- { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config },
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index faa81a95551f..88482316d22a 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -524,7 +524,7 @@ static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev)
}
/**
- * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read
+ * st_i2c_handle_read() - Handle FIFO empty interrupt in case of read
* @i2c_dev: Controller's private data
*/
static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
@@ -558,7 +558,7 @@ static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
}
/**
- * st_i2c_isr() - Interrupt routine
+ * st_i2c_isr_thread() - Interrupt routine
* @irq: interrupt number
* @data: Controller's private data
*/
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 4933fc8ce3fd..eebce7ecef25 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -313,7 +313,7 @@ static int stm32f4_i2c_wait_free_bus(struct stm32f4_i2c_dev *i2c_dev)
}
/**
- * stm32f4_i2c_write_ byte() - Write a byte in the data register
+ * stm32f4_i2c_write_byte() - Write a byte in the data register
* @i2c_dev: Controller's private data
* @byte: Data to write in the register
*/
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index 3680d608698b..ec0c7cad4240 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -65,7 +65,7 @@ static void tegra_bpmp_xlate_flags(u16 flags, u16 *out)
*out |= SERIALI2C_RECV_LEN;
}
-/**
+/*
* The serialized I2C format is simply the following:
* [addr little-endian][flags little-endian][len little-endian][data if write]
* [addr little-endian][flags little-endian][len little-endian][data if write]
@@ -109,7 +109,7 @@ static void tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
request->xfer.data_size = pos;
}
-/**
+/*
* The data in the BPMP -> CPU direction is composed of sequential blocks for
* those messages that have I2C_M_RD. So, for example, if you have:
*
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 8ceaa88dd78f..6f0aa0ed3241 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -259,8 +259,8 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
*/
void i2c_acpi_register_devices(struct i2c_adapter *adap)
{
+ struct acpi_device *adev;
acpi_status status;
- acpi_handle handle;
if (!has_acpi_companion(&adap->dev))
return;
@@ -275,11 +275,11 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
if (!adap->dev.parent)
return;
- handle = ACPI_HANDLE(adap->dev.parent);
- if (!handle)
+ adev = ACPI_COMPANION(adap->dev.parent);
+ if (!adev)
return;
- acpi_walk_dep_device_list(handle);
+ acpi_dev_clear_dependencies(adev);
}
static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 6ef38a8ee95c..cb64fe649390 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -526,7 +526,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
return put_user(funcs, (compat_ulong_t __user *)arg);
case I2C_RDWR: {
struct i2c_rdwr_ioctl_data32 rdwr_arg;
- struct i2c_msg32 *p;
+ struct i2c_msg32 __user *p;
struct i2c_msg *rdwr_pa;
int i;
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 6dc88902c189..1c78657631f4 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -34,7 +34,7 @@ struct i2c_arbitrator_data {
};
-/**
+/*
* i2c_arbitrator_select - claim the I2C bus
*
* Use the GPIO-based signalling protocol; return -EBUSY if we fail.
@@ -77,7 +77,7 @@ static int i2c_arbitrator_select(struct i2c_mux_core *muxc, u32 chan)
return -EBUSY;
}
-/**
+/*
* i2c_arbitrator_deselect - release the I2C bus
*
* Release the I2C bus using the GPIO-based signalling protocol.
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
deleted file mode 100644
index 19abf11c84c8..000000000000
--- a/drivers/ide/Kconfig
+++ /dev/null
@@ -1,849 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# IDE ATA ATAPI Block device driver configuration
-#
-
-# Select HAVE_IDE if IDE is supported
-config HAVE_IDE
- bool
-
-menuconfig IDE
- tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)"
- depends on HAVE_IDE
- depends on BLOCK
- select BLK_SCSI_REQUEST
- help
- If you say Y here, your kernel will be able to manage ATA/(E)IDE and
- ATAPI units. The most common cases are IDE hard drives and ATAPI
- CD-ROM drives.
-
- This subsystem is currently in maintenance mode with only bug fix
- changes applied. Users of ATA hardware are encouraged to migrate to
- the newer ATA subsystem ("Serial ATA (prod) and Parallel ATA
- (experimental) drivers") which is more actively maintained.
-
- To compile this driver as a module, choose M here: the
- module will be called ide-core.
-
- For further information, please read <file:Documentation/ide/ide.rst>.
-
- If unsure, say N.
-
-if IDE
-
-comment "Please see Documentation/ide/ide.rst for help/info on IDE drives"
-
-config IDE_XFER_MODE
- bool
-
-config IDE_TIMINGS
- bool
- select IDE_XFER_MODE
-
-config IDE_ATAPI
- bool
-
-config IDE_LEGACY
- bool
-
-config BLK_DEV_IDE_SATA
- bool "Support for SATA (deprecated; conflicts with libata SATA driver)"
- default n
- help
- There are two drivers for Serial ATA controllers.
-
- The main driver, "libata", uses the SCSI subsystem
- and supports most modern SATA controllers. In order to use it
- you may take a look at "Serial ATA (prod) and Parallel ATA
- (experimental) drivers".
-
- The IDE driver (which you are currently configuring) supports
- a few first-generation SATA controllers.
-
- In order to eliminate conflicts between the two subsystems,
- this config option enables the IDE driver's SATA support.
- Normally this is disabled, as it is preferred that libata
- supports SATA controllers, and this (IDE) driver supports
- PATA controllers.
-
- If unsure, say N.
-
-config IDE_GD
- tristate "generic ATA/ATAPI disk support"
- default y
- help
- Support for ATA/ATAPI disks (including ATAPI floppy drives).
-
- To compile this driver as a module, choose M here.
- The module will be called ide-gd_mod.
-
- If unsure, say Y.
-
-config IDE_GD_ATA
- bool "ATA disk support"
- depends on IDE_GD
- default y
- help
- This will include support for ATA hard disks.
-
- If unsure, say Y.
-
-config IDE_GD_ATAPI
- bool "ATAPI floppy support"
- depends on IDE_GD
- select IDE_ATAPI
- help
- This will include support for ATAPI floppy drives
- (i.e. Iomega ZIP or MKE LS-120).
-
- For information about jumper settings and the question
- of when a ZIP drive uses a partition table, see
- <http://www.win.tue.nl/~aeb/linux/zip/zip-1.html>.
-
- If unsure, say N.
-
-config BLK_DEV_IDECS
- tristate "PCMCIA IDE support"
- depends on PCMCIA
- help
- Support for Compact Flash cards, outboard IDE disks, tape drives,
- and CD-ROM drives connected through a PCMCIA card.
-
-config BLK_DEV_DELKIN
- tristate "Cardbus IDE support (Delkin/ASKA/Workbit)"
- depends on CARDBUS && PCI
- help
- Support for Delkin, ASKA, and Workbit Cardbus CompactFlash
- Adapters. This may also work for similar SD and XD adapters.
-
-config BLK_DEV_IDECD
- tristate "Include IDE/ATAPI CDROM support"
- depends on BLK_DEV
- select IDE_ATAPI
- select CDROM
- help
- If you have a CD-ROM drive using the ATAPI protocol, say Y. ATAPI is
- a newer protocol used by IDE CD-ROM and TAPE drives, similar to the
- SCSI protocol. Most new CD-ROM drives use ATAPI, including the
- NEC-260, Mitsumi FX400, Sony 55E, and just about all non-SCSI
- double(2X) or better speed drives.
-
- If you say Y here, the CD-ROM drive will be identified at boot time
- along with other IDE devices, as "hdb" or "hdc", or something
- similar (check the boot messages with dmesg). If this is your only
- CD-ROM drive, you can say N to all other CD-ROM options, but be sure
- to say Y or M to "ISO 9660 CD-ROM file system support".
-
- To compile this driver as a module, choose M here: the
- module will be called ide-cd.
-
-config BLK_DEV_IDECD_VERBOSE_ERRORS
- bool "Verbose error logging for IDE/ATAPI CDROM driver" if EXPERT
- depends on BLK_DEV_IDECD
- default y
- help
- Turn this on to have the driver print out the meanings of the
- ATAPI error codes. This will use up additional 8kB of kernel-space
- memory, though.
-
-config BLK_DEV_IDETAPE
- tristate "Include IDE/ATAPI TAPE support"
- select IDE_ATAPI
- help
- If you have an IDE tape drive using the ATAPI protocol, say Y.
- ATAPI is a newer protocol used by IDE tape and CD-ROM drives,
- similar to the SCSI protocol. If you have an SCSI tape drive
- however, you can say N here.
-
- You should also say Y if you have an OnStream DI-30 tape drive; this
- will not work with the SCSI protocol, until there is support for the
- SC-30 and SC-50 versions.
-
- If you say Y here, the tape drive will be identified at boot time
- along with other IDE devices, as "hdb" or "hdc", or something
- similar, and will be mapped to a character device such as "ht0"
- (check the boot messages with dmesg). Be sure to consult the
- <file:drivers/ide/ide-tape.c> and <file:Documentation/ide/ide.rst>
- files for usage information.
-
- To compile this driver as a module, choose M here: the
- module will be called ide-tape.
-
-config BLK_DEV_IDEACPI
- bool "IDE ACPI support"
- depends on ACPI
- help
- Implement ACPI support for generic IDE devices. On modern
- machines ACPI support is required to properly handle ACPI S3 states.
-
-config IDE_TASK_IOCTL
- bool "IDE Taskfile Access"
- help
- This is a direct raw access to the media. It is a complex but
- elegant solution to test and validate the domain of the hardware and
- perform below the driver data recovery if needed. This is the most
- basic form of media-forensics.
-
- If you are unsure, say N here.
-
-config IDE_PROC_FS
- bool "legacy /proc/ide/ support"
- depends on IDE && PROC_FS
- default y
- help
- This option enables support for the various files in
- /proc/ide. In Linux 2.6 this has been superseded by
- files in sysfs but many legacy applications rely on this.
-
- If unsure say Y.
-
-comment "IDE chipset support/bugfixes"
-
-config IDE_GENERIC
- tristate "generic/default IDE chipset support"
- depends on ALPHA || X86 || IA64 || MIPS || ARCH_RPC
- default ARM && ARCH_RPC
- help
- This is the generic IDE driver. This driver attaches to the
- fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and
- so on). Please note that if this driver is built into the
- kernel or loaded before other ATA (IDE or libata) drivers
- and the controller is located at legacy ports, this driver
- may grab those ports and thus can prevent the controller
- specific driver from attaching.
-
- Also, currently, IDE generic doesn't allow IRQ sharing
- meaning that the IRQs it grabs won't be available to other
- controllers sharing those IRQs which usually makes drivers
- for those controllers fail. Generally, it's not a good idea
- to load IDE generic driver on modern systems.
-
- If unsure, say N.
-
-config BLK_DEV_PLATFORM
- tristate "Platform driver for IDE interfaces"
- help
- This is the platform IDE driver, used mostly for Memory Mapped
- IDE devices, like Compact Flashes running in True IDE mode.
-
- If unsure, say N.
-
-config BLK_DEV_CMD640
- tristate "CMD640 chipset bugfix/support"
- depends on X86
- select IDE_TIMINGS
- help
- The CMD-Technologies CMD640 IDE chip is used on many common 486 and
- Pentium motherboards, usually in combination with a "Neptune" or
- "SiS" chipset. Unfortunately, it has a number of rather nasty
- design flaws that can cause severe data corruption under many common
- conditions. Say Y here to include code which tries to automatically
- detect and correct the problems under Linux. This option also
- enables access to the secondary IDE ports in some CMD640 based
- systems.
-
- This driver will work automatically in PCI based systems (most new
- systems have PCI slots). But if your system uses VESA local bus
- (VLB) instead of PCI, you must also supply a kernel boot parameter
- to enable the CMD640 bugfix/support: "cmd640.probe_vlb". (Try "man
- bootparam" or see the documentation of your boot loader about how to
- pass options to the kernel.)
-
- The CMD640 chip is also used on add-in cards by Acculogic, and on
- the "CSA-6400E PCI to IDE controller" that some people have. For
- details, read <file:Documentation/ide/ide.rst>.
-
-config BLK_DEV_CMD640_ENHANCED
- bool "CMD640 enhanced support"
- depends on BLK_DEV_CMD640
- help
- This option includes support for setting/autotuning PIO modes and
- prefetch on CMD640 IDE interfaces. For details, read
- <file:Documentation/ide/ide.rst>. If you have a CMD640 IDE interface
- and your BIOS does not already do this for you, then say Y here.
- Otherwise say N.
-
-config BLK_DEV_IDEPNP
- tristate "PNP EIDE support"
- depends on PNP
- help
- If you have a PnP (Plug and Play) compatible EIDE card and
- would like the kernel to automatically detect and activate
- it, say Y here.
-
-config BLK_DEV_IDEDMA_SFF
- bool
-
-if PCI
-
-comment "PCI IDE chipsets support"
-
-config BLK_DEV_IDEPCI
- bool
-
-config IDEPCI_PCIBUS_ORDER
- bool "Probe IDE PCI devices in the PCI bus order (DEPRECATED)"
- depends on IDE=y && BLK_DEV_IDEPCI
- default y
- help
- Probe IDE PCI devices in the order in which they appear on the
- PCI bus (i.e. 00:1f.1 PCI device before 02:01.0 PCI device)
- instead of the order in which IDE PCI host drivers are loaded.
-
- Please note that this method of assuring stable naming of
- IDE devices is unreliable and use other means for achieving
- it (i.e. udev).
-
- If in doubt, say N.
-
-# TODO: split it on per host driver config options (or module parameters)
-config BLK_DEV_OFFBOARD
- bool "Boot off-board chipsets first support (DEPRECATED)"
- depends on BLK_DEV_IDEPCI && (BLK_DEV_AEC62XX || BLK_DEV_GENERIC || BLK_DEV_HPT366 || BLK_DEV_PDC202XX_NEW || BLK_DEV_PDC202XX_OLD || BLK_DEV_TC86C001)
- help
- Normally, IDE controllers built into the motherboard (on-board
- controllers) are assigned to ide0 and ide1 while those on add-in PCI
- cards (off-board controllers) are relegated to ide2 and ide3.
- Answering Y here will allow you to reverse the situation, with
- off-board controllers on ide0/1 and on-board controllers on ide2/3.
- This can improve the usability of some boot managers such as lilo
- when booting from a drive on an off-board controller.
-
- Note that, if you do this, the order of the hd* devices will be
- rearranged which may require modification of fstab and other files.
-
- Please also note that this method of assuring stable naming of
- IDE devices is unreliable and use other means for achieving it
- (i.e. udev).
-
- If in doubt, say N.
-
-config BLK_DEV_GENERIC
- tristate "Generic PCI IDE Chipset Support"
- select BLK_DEV_IDEPCI
- help
- This option provides generic support for various PCI IDE Chipsets
- which otherwise might not be supported.
-
-config BLK_DEV_OPTI621
- tristate "OPTi 82C621 chipset enhanced support"
- select BLK_DEV_IDEPCI
- help
- This is a driver for the OPTi 82C621 EIDE controller.
- Please read the comments at the top of <file:drivers/ide/opti621.c>.
-
-config BLK_DEV_RZ1000
- tristate "RZ1000 chipset bugfix/support"
- depends on X86
- select BLK_DEV_IDEPCI
- help
- The PC-Technologies RZ1000 IDE chip is used on many common 486 and
- Pentium motherboards, usually along with the "Neptune" chipset.
- Unfortunately, it has a rather nasty design flaw that can cause
- severe data corruption under many conditions. Say Y here to include
- code which automatically detects and corrects the problem under
- Linux. This may slow disk throughput by a few percent, but at least
- things will operate 100% reliably.
-
-config BLK_DEV_IDEDMA_PCI
- bool
- select BLK_DEV_IDEPCI
- select BLK_DEV_IDEDMA_SFF
-
-config BLK_DEV_AEC62XX
- tristate "AEC62XX chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for Acard AEC62xx (Artop ATP8xx)
- IDE controllers. This allows the kernel to change PIO, DMA and UDMA
- speeds and to configure the chip to optimum performance.
-
-config BLK_DEV_ALI15X3
- tristate "ALI M15x3 chipset support"
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver ensures (U)DMA support for ALI 1533, 1543 and 1543C
- onboard chipsets. It also tests for Simplex mode and enables
- normal dual channel support.
-
- Please read the comments at the top of
- <file:drivers/ide/alim15x3.c>.
-
- If unsure, say N.
-
-config BLK_DEV_AMD74XX
- tristate "AMD and nVidia IDE support"
- depends on !ARM
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for AMD-7xx and AMD-8111 chips
- and also for the nVidia nForce chip. This allows the kernel to
- change PIO, DMA and UDMA speeds and to configure the chip to
- optimum performance.
-
-config BLK_DEV_ATIIXP
- tristate "ATI IXP chipset IDE support"
- depends on X86
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for ATI IXP chipset.
- This allows the kernel to change PIO, DMA and UDMA speeds
- and to configure the chip to optimum performance.
-
- Say Y here if you have an ATI IXP chipset IDE controller.
-
-config BLK_DEV_CMD64X
- tristate "CMD64{3|6|8|9} chipset support"
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- Say Y here if you have an IDE controller which uses any of these
- chipsets: CMD643, CMD646, or CMD648.
-
-config BLK_DEV_TRIFLEX
- tristate "Compaq Triflex IDE support"
- select BLK_DEV_IDEDMA_PCI
- help
- Say Y here if you have a Compaq Triflex IDE controller, such
- as those commonly found on Compaq Pentium-Pro systems
-
-config BLK_DEV_CY82C693
- tristate "CY82C693 chipset support"
- depends on ALPHA
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds detection and support for the CY82C693 chipset
- used on Digital's PC-Alpha 164SX boards.
-
-config BLK_DEV_CS5520
- tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
- depends on X86_32 || COMPILE_TEST
- select BLK_DEV_IDEDMA_PCI
- help
- Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
- 5510/5520 chipset. This will automatically be detected and
- configured if found.
-
- It is safe to say Y to this question.
-
-config BLK_DEV_CS5530
- tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
- depends on X86_32 || COMPILE_TEST
- select BLK_DEV_IDEDMA_PCI
- help
- Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
- will automatically be detected and configured if found.
-
- It is safe to say Y to this question.
-
-config BLK_DEV_CS5535
- tristate "AMD CS5535 chipset support"
- depends on X86_32
- select BLK_DEV_IDEDMA_PCI
- help
- Include support for UDMA on the NSC/AMD CS5535 companion chipset.
- This will automatically be detected and configured if found.
-
- It is safe to say Y to this question.
-
-config BLK_DEV_CS5536
- tristate "CS5536 chipset support"
- depends on X86_32
- select BLK_DEV_IDEDMA_PCI
- help
- This option enables support for the AMD CS5536
- companion chip used with the Geode LX processor family.
-
- If unsure, say N.
-
-config BLK_DEV_HPT366
- tristate "HPT36X/37X chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- HPT366 is an Ultra DMA chipset for ATA-66.
- HPT368 is an Ultra DMA chipset for ATA-66 RAID Based.
- HPT370 is an Ultra DMA chipset for ATA-100.
- HPT372 is an Ultra DMA chipset for ATA-100.
- HPT374 is an Ultra DMA chipset for ATA-100.
-
- This driver adds up to 4 more EIDE devices sharing a single
- interrupt.
-
- The HPT366 chipset in its current form is bootable. One solution
- for this problem are special LILO commands for redirecting the
- reference to device 0x80. The other solution is to say Y to "Boot
- off-board chipsets first support" (CONFIG_BLK_DEV_OFFBOARD) unless
- your mother board has the chipset natively mounted. Regardless one
- should use the fore mentioned option and call at LILO.
-
- This driver requires dynamic tuning of the chipset during the
- ide-probe at boot. It is reported to support DVD II drives, by the
- manufacturer.
-
-config BLK_DEV_JMICRON
- tristate "JMicron JMB36x support"
- select BLK_DEV_IDEDMA_PCI
- help
- Basic support for the JMicron ATA controllers. For full support
- use the libata drivers.
-
-config BLK_DEV_SC1200
- tristate "National SCx200 chipset support"
- depends on X86_32 || COMPILE_TEST
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for the on-board IDE controller on the
- National SCx200 series of embedded x86 "Geode" systems.
-
-config BLK_DEV_PIIX
- tristate "Intel PIIX/ICH chipsets support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for Intel PIIX and ICH chips.
- This allows the kernel to change PIO, DMA and UDMA speeds and to
- configure the chip to optimum performance.
-
-config BLK_DEV_IT8172
- tristate "IT8172 IDE support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for the IDE controller on the
- IT8172 System Controller.
-
-config BLK_DEV_IT8213
- tristate "IT8213 IDE support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for the ITE 8213 IDE controller.
-
-config BLK_DEV_IT821X
- tristate "IT821X IDE support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for the ITE 8211 IDE controller and the
- IT 8212 IDE RAID controller in both RAID and pass-through mode.
-
-config BLK_DEV_NS87415
- tristate "NS87415 chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds detection and support for the NS87415 chip
- (used mainly on SPARC64 and PA-RISC machines).
-
- Please read the comments at the top of <file:drivers/ide/ns87415.c>.
-
-config BLK_DEV_PDC202XX_OLD
- tristate "PROMISE PDC202{46|62|65|67} support"
- select BLK_DEV_IDEDMA_PCI
- help
- Promise Ultra33 or PDC20246
- Promise Ultra66 or PDC20262
- Promise Ultra100 or PDC20265/PDC20267/PDC20268
-
- This driver adds up to 4 more EIDE devices sharing a single
- interrupt. This add-on card is a bootable PCI UDMA controller. Since
- multiple cards can be installed and there are BIOS ROM problems that
- happen if the BIOS revisions of all installed cards (three-max) do
- not match, the driver attempts to do dynamic tuning of the chipset
- at boot-time for max-speed. Ultra33 BIOS 1.25 or newer is required
- for more than one card.
-
- Please read the comments at the top of
- <file:drivers/ide/pdc202xx_old.c>.
-
- If unsure, say N.
-
-config BLK_DEV_PDC202XX_NEW
- tristate "PROMISE PDC202{68|69|70|71|75|76|77} support"
- select BLK_DEV_IDEDMA_PCI
-
-config BLK_DEV_SVWKS
- tristate "ServerWorks OSB4/CSB5/CSB6 chipsets support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds PIO/(U)DMA support for the ServerWorks OSB4/CSB5
- chipsets.
-
-config BLK_DEV_SIIMAGE
- tristate "Silicon Image chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds PIO/(U)DMA support for the SI CMD680 and SII
- 3112 (Serial ATA) chips.
-
-config BLK_DEV_SIS5513
- tristate "SiS5513 chipset support"
- depends on X86
- select BLK_DEV_IDEDMA_PCI
- help
- This driver ensures (U)DMA support for SIS5513 chipset family based
- mainboards.
-
- The following chipsets are supported:
- ATA16: SiS5511, SiS5513
- ATA33: SiS5591, SiS5597, SiS5598, SiS5600
- ATA66: SiS530, SiS540, SiS620, SiS630, SiS640
- ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,
- SiS745, SiS750
-
- Please read the comments at the top of <file:drivers/ide/sis5513.c>.
-
-config BLK_DEV_SL82C105
- tristate "Winbond SL82c105 support"
- depends on (PPC || ARM)
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- If you have a Winbond SL82c105 IDE controller, say Y here to enable
- special configuration for this chip. This is common on various CHRP
- motherboards, but could be used elsewhere. If in doubt, say Y.
-
-config BLK_DEV_SLC90E66
- tristate "SLC90E66 chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver ensures (U)DMA support for Victory66 SouthBridges for
- SMsC with Intel NorthBridges. This is an Ultra66 based chipset.
- The nice thing about it is that you can mix Ultra/DMA/PIO devices
- and it will handle timing cycles. Since this is an improved
- look-a-like to the PIIX4 it should be a nice addition.
-
- Please read the comments at the top of
- <file:drivers/ide/slc90e66.c>.
-
-config BLK_DEV_TRM290
- tristate "Tekram TRM290 chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for bus master DMA transfers
- using the Tekram TRM290 PCI IDE chip. Volunteers are
- needed for further tweaking and development.
- Please read the comments at the top of <file:drivers/ide/trm290.c>.
-
-config BLK_DEV_VIA82CXXX
- tristate "VIA82CXXX chipset support"
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for VIA BusMastering IDE chips.
- This allows the kernel to change PIO, DMA and UDMA speeds and to
- configure the chip to optimum performance.
-
-config BLK_DEV_TC86C001
- tristate "Toshiba TC86C001 support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for Toshiba TC86C001 GOKU-S chip.
-
-endif
-
-# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
-config BLK_DEV_IDE_PMAC
- tristate "PowerMac on-board IDE support"
- depends on PPC_PMAC
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver provides support for the on-board IDE controller on
- most of the recent Apple Power Macintoshes and PowerBooks.
- If unsure, say Y.
-
-config BLK_DEV_IDE_PMAC_ATA100FIRST
- bool "Probe on-board ATA/100 (Kauai) first"
- depends on BLK_DEV_IDE_PMAC
- help
- This option will cause the ATA/100 controller found in UniNorth2
- based machines (Windtunnel PowerMac, Aluminium PowerBooks, ...)
- to be probed before the ATA/66 and ATA/33 controllers. Without
- these, those machine used to have the hard disk on hdc and the
- CD-ROM on hda. This option changes this to more natural hda for
- hard disk and hdc for CD-ROM.
-
-config BLK_DEV_IDE_TX4938
- tristate "TX4938 internal IDE support"
- depends on SOC_TX4938
- select IDE_TIMINGS
-
-config BLK_DEV_IDE_TX4939
- tristate "TX4939 internal IDE support"
- depends on SOC_TX4939
- select BLK_DEV_IDEDMA_SFF
-
-config BLK_DEV_IDE_ICSIDE
- tristate "ICS IDE interface support"
- depends on ARM && ARCH_ACORN
- help
- On Acorn systems, say Y here if you wish to use the ICS IDE
- interface card. This is not required for ICS partition support.
- If you are unsure, say N to this.
-
-config BLK_DEV_IDEDMA_ICS
- bool "ICS DMA support"
- depends on BLK_DEV_IDE_ICSIDE
- help
- Say Y here if you want to add DMA (Direct Memory Access) support to
- the ICS IDE driver.
-
-config BLK_DEV_IDE_RAPIDE
- tristate "RapIDE interface support"
- depends on ARM && ARCH_ACORN
- help
- Say Y here if you want to support the Yellowstone RapIDE controller
- manufactured for use with Acorn computers.
-
-config BLK_DEV_GAYLE
- tristate "Amiga Gayle IDE interface support"
- depends on AMIGA
- help
- This is the IDE driver for the Amiga Gayle IDE interface. It supports
- both the `A1200 style' and `A4000 style' of the Gayle IDE interface,
- This includes on-board IDE interfaces on some Amiga models (A600,
- A1200, A4000, and A4000T), and IDE interfaces on the Zorro expansion
- bus (M-Tech E-Matrix 530 expansion card).
-
- It also provides support for the so-called `IDE doublers' (made
- by various manufacturers, e.g. Eyetech) that can be connected to
- the on-board IDE interface of some Amiga models. Using such an IDE
- doubler, you can connect up to four instead of two IDE devices to
- the Amiga's on-board IDE interface. The feature is enabled at kernel
- runtime using the "gayle.doubler" kernel boot parameter.
-
- Say Y if you have an Amiga with a Gayle IDE interface and want to use
- IDE devices (hard disks, CD-ROM drives, etc.) that are connected to
- it.
-
- Note that you also have to enable Zorro bus support if you want to
- use Gayle IDE interfaces on the Zorro expansion bus.
-
-config BLK_DEV_BUDDHA
- tristate "Buddha/Catweasel/X-Surf IDE interface support"
- depends on ZORRO
- help
- This is the IDE driver for the IDE interfaces on the Buddha, Catweasel
- and X-Surf expansion boards. It supports up to two interfaces on the
- Buddha, three on the Catweasel and two on the X-Surf.
-
- Say Y if you have a Buddha or Catweasel expansion board and want to
- use IDE devices (hard disks, CD-ROM drives, etc.) that are connected
- to one of its IDE interfaces.
-
-config BLK_DEV_FALCON_IDE
- tristate "Falcon IDE interface support"
- depends on ATARI
- help
- This is the IDE driver for the on-board IDE interface on the Atari
- Falcon. Say Y if you have a Falcon and want to use IDE devices (hard
- disks, CD-ROM drives, etc.) that are connected to the on-board IDE
- interface.
-
-config BLK_DEV_MAC_IDE
- tristate "Macintosh Quadra/Powerbook IDE interface support"
- depends on MAC
- help
- This is the IDE driver for the on-board IDE interface on some m68k
- Macintosh models, namely Quadra/Centris 630, Performa 588 and
- Powerbook 150. The IDE interface on the Powerbook 190 is not
- supported by this driver and requires BLK_DEV_PLATFORM or
- PATA_PLATFORM.
-
- Say Y if you have such an Macintosh model and want to use IDE
- devices (hard disks, CD-ROM drives, etc.) that are connected to the
- on-board IDE interface.
-
-config BLK_DEV_Q40IDE
- tristate "Q40/Q60 IDE interface support"
- depends on Q40
- help
- Enable the on-board IDE controller in the Q40/Q60. This should
- normally be on; disable it only if you are running a custom hard
- drive subsystem through an expansion card.
-
-config BLK_DEV_PALMCHIP_BK3710
- tristate "Palmchip bk3710 IDE controller support"
- depends on ARCH_DAVINCI
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_SFF
- help
- Say Y here if you want to support the onchip IDE controller on the
- TI DaVinci SoC
-
-# no isa -> no vlb
-if ISA && (ALPHA || X86 || MIPS)
-
-comment "Other IDE chipsets support"
-comment "Note: most of these also require special kernel boot parameters"
-
-config BLK_DEV_4DRIVES
- tristate "Generic 4 drives/port support"
- help
- Certain older chipsets, including the Tekram 690CD, use a single set
- of I/O ports at 0x1f0 to control up to four drives, instead of the
- customary two drives per port. Support for this can be enabled at
- runtime using the "ide-4drives.probe" kernel boot parameter if you
- say Y here.
-
-config BLK_DEV_ALI14XX
- tristate "ALI M14xx support"
- select IDE_TIMINGS
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "ali14xx.probe" kernel
- boot parameter. It enables support for the secondary IDE interface
- of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
- I/O speeds to be set as well.
- See the files <file:Documentation/ide/ide.rst> and
- <file:drivers/ide/ali14xx.c> for more info.
-
-config BLK_DEV_DTC2278
- tristate "DTC-2278 support"
- select IDE_XFER_MODE
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "dtc2278.probe" kernel
- boot parameter. It enables support for the secondary IDE interface
- of the DTC-2278 card, and permits faster I/O speeds to be set as
- well. See the <file:Documentation/ide/ide.rst> and
- <file:drivers/ide/dtc2278.c> files for more info.
-
-config BLK_DEV_HT6560B
- tristate "Holtek HT6560B support"
- select IDE_TIMINGS
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "ht6560b.probe" kernel
- boot parameter. It enables support for the secondary IDE interface
- of the Holtek card, and permits faster I/O speeds to be set as well.
- See the <file:Documentation/ide/ide.rst> and
- <file:drivers/ide/ht6560b.c> files for more info.
-
-config BLK_DEV_QD65XX
- tristate "QDI QD65xx support"
- select IDE_TIMINGS
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "qd65xx.probe" kernel
- boot parameter. It permits faster I/O speeds to be set. See the
- <file:Documentation/ide/ide.rst> and <file:drivers/ide/qd65xx.c>
- for more info.
-
-config BLK_DEV_UMC8672
- tristate "UMC-8672 support"
- select IDE_XFER_MODE
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "umc8672.probe" kernel
- boot parameter. It enables support for the secondary IDE interface
- of the UMC-8672, and permits faster I/O speeds to be set as well.
- See the files <file:Documentation/ide/ide.rst> and
- <file:drivers/ide/umc8672.c> for more info.
-
-endif
-
-config BLK_DEV_IDEDMA
- def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_ICS
- select IDE_XFER_MODE
-
-endif # IDE
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
deleted file mode 100644
index 2605b3cdaf47..000000000000
--- a/drivers/ide/Makefile
+++ /dev/null
@@ -1,111 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# link order is important here
-#
-
-ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
- ide-taskfile.o ide-pm.o ide-park.o ide-sysfs.o ide-devsets.o \
- ide-io-std.o ide-eh.o
-
-# core IDE code
-ide-core-$(CONFIG_IDE_XFER_MODE) += ide-pio-blacklist.o ide-xfer-mode.o
-ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o
-ide-core-$(CONFIG_IDE_ATAPI) += ide-atapi.o
-ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
-ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
-ide-core-$(CONFIG_BLK_DEV_IDEDMA_SFF) += ide-dma-sff.o
-ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o
-ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
-ide-core-$(CONFIG_IDE_LEGACY) += ide-legacy.o
-
-obj-$(CONFIG_IDE) += ide-core.o
-
-obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o
-obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o
-obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o
-obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
-obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o
-obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o
-
-obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o
-obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o
-obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o
-obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o
-obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o
-
-obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
-obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
-obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
-obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
-obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
-obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
-obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
-obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
-obj-$(CONFIG_BLK_DEV_CS5536) += cs5536.o
-obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
-obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
-obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
-obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
-obj-$(CONFIG_BLK_DEV_IT8172) += it8172.o
-obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
-obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
-obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
-obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
-obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
-obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
-obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
-obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
-obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
-obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
-obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
-obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
-obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
-obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
-obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
-obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
-obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
-obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
-
-# Must appear at the end of the block
-obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
-
-obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o
-
-obj-$(CONFIG_BLK_DEV_CMD640) += cmd640.o
-
-obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
-
-obj-$(CONFIG_IDE_GENERIC) += ide-generic.o
-obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
-
-ide-gd_mod-y += ide-gd.o
-ide-cd_mod-y += ide-cd.o ide-cd_ioctl.o ide-cd_verbose.o
-
-ifeq ($(CONFIG_IDE_GD_ATA), y)
- ide-gd_mod-y += ide-disk.o ide-disk_ioctl.o
-ifeq ($(CONFIG_IDE_PROC_FS), y)
- ide-gd_mod-y += ide-disk_proc.o
-endif
-endif
-
-ifeq ($(CONFIG_IDE_GD_ATAPI), y)
- ide-gd_mod-y += ide-floppy.o ide-floppy_ioctl.o
-ifeq ($(CONFIG_IDE_PROC_FS), y)
- ide-gd_mod-y += ide-floppy_proc.o
-endif
-endif
-
-obj-$(CONFIG_IDE_GD) += ide-gd_mod.o
-obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o
-obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o
-
-obj-$(CONFIG_BLK_DEV_IDECS) += ide-cs.o
-
-obj-$(CONFIG_BLK_DEV_PLATFORM) += ide_platform.o
-
-obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
-obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
-obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
-
-obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o
-obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
deleted file mode 100644
index 4c959ce41ba9..000000000000
--- a/drivers/ide/aec62xx.c
+++ /dev/null
@@ -1,331 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "aec62xx"
-
-struct chipset_bus_clock_list_entry {
- u8 xfer_speed;
- u8 chipset_settings;
- u8 ultra_settings;
-};
-
-static const struct chipset_bus_clock_list_entry aec6xxx_33_base [] = {
- { XFER_UDMA_6, 0x31, 0x07 },
- { XFER_UDMA_5, 0x31, 0x06 },
- { XFER_UDMA_4, 0x31, 0x05 },
- { XFER_UDMA_3, 0x31, 0x04 },
- { XFER_UDMA_2, 0x31, 0x03 },
- { XFER_UDMA_1, 0x31, 0x02 },
- { XFER_UDMA_0, 0x31, 0x01 },
-
- { XFER_MW_DMA_2, 0x31, 0x00 },
- { XFER_MW_DMA_1, 0x31, 0x00 },
- { XFER_MW_DMA_0, 0x0a, 0x00 },
- { XFER_PIO_4, 0x31, 0x00 },
- { XFER_PIO_3, 0x33, 0x00 },
- { XFER_PIO_2, 0x08, 0x00 },
- { XFER_PIO_1, 0x0a, 0x00 },
- { XFER_PIO_0, 0x00, 0x00 },
- { 0, 0x00, 0x00 }
-};
-
-static const struct chipset_bus_clock_list_entry aec6xxx_34_base [] = {
- { XFER_UDMA_6, 0x41, 0x06 },
- { XFER_UDMA_5, 0x41, 0x05 },
- { XFER_UDMA_4, 0x41, 0x04 },
- { XFER_UDMA_3, 0x41, 0x03 },
- { XFER_UDMA_2, 0x41, 0x02 },
- { XFER_UDMA_1, 0x41, 0x01 },
- { XFER_UDMA_0, 0x41, 0x01 },
-
- { XFER_MW_DMA_2, 0x41, 0x00 },
- { XFER_MW_DMA_1, 0x42, 0x00 },
- { XFER_MW_DMA_0, 0x7a, 0x00 },
- { XFER_PIO_4, 0x41, 0x00 },
- { XFER_PIO_3, 0x43, 0x00 },
- { XFER_PIO_2, 0x78, 0x00 },
- { XFER_PIO_1, 0x7a, 0x00 },
- { XFER_PIO_0, 0x70, 0x00 },
- { 0, 0x00, 0x00 }
-};
-
-/*
- * TO DO: active tuning and correction of cards without a bios.
- */
-static u8 pci_bus_clock_list (u8 speed, struct chipset_bus_clock_list_entry * chipset_table)
-{
- for ( ; chipset_table->xfer_speed ; chipset_table++)
- if (chipset_table->xfer_speed == speed) {
- return chipset_table->chipset_settings;
- }
- return chipset_table->chipset_settings;
-}
-
-static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entry * chipset_table)
-{
- for ( ; chipset_table->xfer_speed ; chipset_table++)
- if (chipset_table->xfer_speed == speed) {
- return chipset_table->ultra_settings;
- }
- return chipset_table->ultra_settings;
-}
-
-static void aec6210_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
- u16 d_conf = 0;
- u8 ultra = 0, ultra_conf = 0;
- u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
- const u8 speed = drive->dma_mode;
- unsigned long flags;
-
- local_irq_save(flags);
- /* 0x40|(2*drive->dn): Active, 0x41|(2*drive->dn): Recovery */
- pci_read_config_word(dev, 0x40|(2*drive->dn), &d_conf);
- tmp0 = pci_bus_clock_list(speed, bus_clock);
- d_conf = ((tmp0 & 0xf0) << 4) | (tmp0 & 0xf);
- pci_write_config_word(dev, 0x40|(2*drive->dn), d_conf);
-
- tmp1 = 0x00;
- tmp2 = 0x00;
- pci_read_config_byte(dev, 0x54, &ultra);
- tmp1 = ((0x00 << (2*drive->dn)) | (ultra & ~(3 << (2*drive->dn))));
- ultra_conf = pci_bus_clock_list_ultra(speed, bus_clock);
- tmp2 = ((ultra_conf << (2*drive->dn)) | (tmp1 & ~(3 << (2*drive->dn))));
- pci_write_config_byte(dev, 0x54, tmp2);
- local_irq_restore(flags);
-}
-
-static void aec6260_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
- u8 unit = drive->dn & 1;
- u8 tmp1 = 0, tmp2 = 0;
- u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
- const u8 speed = drive->dma_mode;
- unsigned long flags;
-
- local_irq_save(flags);
- /* high 4-bits: Active, low 4-bits: Recovery */
- pci_read_config_byte(dev, 0x40|drive->dn, &drive_conf);
- drive_conf = pci_bus_clock_list(speed, bus_clock);
- pci_write_config_byte(dev, 0x40|drive->dn, drive_conf);
-
- pci_read_config_byte(dev, (0x44|hwif->channel), &ultra);
- tmp1 = ((0x00 << (4*unit)) | (ultra & ~(7 << (4*unit))));
- ultra_conf = pci_bus_clock_list_ultra(speed, bus_clock);
- tmp2 = ((ultra_conf << (4*unit)) | (tmp1 & ~(7 << (4*unit))));
- pci_write_config_byte(dev, (0x44|hwif->channel), tmp2);
- local_irq_restore(flags);
-}
-
-static void aec_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- hwif->port_ops->set_dma_mode(hwif, drive);
-}
-
-static int init_chipset_aec62xx(struct pci_dev *dev)
-{
- /* These are necessary to get AEC6280 Macintosh cards to work */
- if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) ||
- (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)) {
- u8 reg49h = 0, reg4ah = 0;
- /* Clear reset and test bits. */
- pci_read_config_byte(dev, 0x49, &reg49h);
- pci_write_config_byte(dev, 0x49, reg49h & ~0x30);
- /* Enable chip interrupt output. */
- pci_read_config_byte(dev, 0x4a, &reg4ah);
- pci_write_config_byte(dev, 0x4a, reg4ah & ~0x01);
- /* Enable burst mode. */
- pci_read_config_byte(dev, 0x4a, &reg4ah);
- pci_write_config_byte(dev, 0x4a, reg4ah | 0x80);
- }
-
- return 0;
-}
-
-static u8 atp86x_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 ata66 = 0, mask = hwif->channel ? 0x02 : 0x01;
-
- pci_read_config_byte(dev, 0x49, &ata66);
-
- return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static const struct ide_port_ops atp850_port_ops = {
- .set_pio_mode = aec_set_pio_mode,
- .set_dma_mode = aec6210_set_mode,
-};
-
-static const struct ide_port_ops atp86x_port_ops = {
- .set_pio_mode = aec_set_pio_mode,
- .set_dma_mode = aec6260_set_mode,
- .cable_detect = atp86x_cable_detect,
-};
-
-static const struct ide_port_info aec62xx_chipsets[] = {
- { /* 0: AEC6210 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
- .port_ops = &atp850_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_NO_DSC |
- IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
- },
- { /* 1: AEC6260 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .port_ops = &atp86x_port_ops,
- .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
- IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
- },
- { /* 2: AEC6260R */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
- .port_ops = &atp86x_port_ops,
- .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_NON_BOOTABLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
- },
- { /* 3: AEC6280 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .port_ops = &atp86x_port_ops,
- .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 4: AEC6280R */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
- .port_ops = &atp86x_port_ops,
- .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- }
-};
-
-/**
- * aec62xx_init_one - called when a AEC is found
- * @dev: the aec62xx device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- *
- * NOTE: since we're going to modify the 'name' field for AEC-6[26]80[R]
- * chips, pass a local copy of 'struct ide_port_info' down the call chain.
- */
-
-static int aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct chipset_bus_clock_list_entry *bus_clock;
- struct ide_port_info d;
- u8 idx = id->driver_data;
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- int err;
-
- if (bus_speed <= 33)
- bus_clock = aec6xxx_33_base;
- else
- bus_clock = aec6xxx_34_base;
-
- err = pci_enable_device(dev);
- if (err)
- return err;
-
- d = aec62xx_chipsets[idx];
-
- if (idx == 3 || idx == 4) {
- unsigned long dma_base = pci_resource_start(dev, 4);
-
- if (inb(dma_base + 2) & 0x10) {
- printk(KERN_INFO DRV_NAME " %s: AEC6880%s card detected"
- "\n", pci_name(dev), (idx == 4) ? "R" : "");
- d.udma_mask = ATA_UDMA6;
- }
- }
-
- err = ide_pci_init_one(dev, &d, (void *)bus_clock);
- if (err)
- pci_disable_device(dev);
-
- return err;
-}
-
-static void aec62xx_remove(struct pci_dev *dev)
-{
- ide_pci_remove(dev);
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id aec62xx_pci_tbl[] = {
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF), 0 },
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP860), 1 },
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R), 2 },
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP865), 3 },
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R), 4 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, aec62xx_pci_tbl);
-
-static struct pci_driver aec62xx_pci_driver = {
- .name = "AEC62xx_IDE",
- .id_table = aec62xx_pci_tbl,
- .probe = aec62xx_init_one,
- .remove = aec62xx_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init aec62xx_ide_init(void)
-{
- return ide_pci_register_driver(&aec62xx_pci_driver);
-}
-
-static void __exit aec62xx_ide_exit(void)
-{
- pci_unregister_driver(&aec62xx_pci_driver);
-}
-
-module_init(aec62xx_ide_init);
-module_exit(aec62xx_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for ARTOP AEC62xx IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c
deleted file mode 100644
index 3268931c2c7a..000000000000
--- a/drivers/ide/ali14xx.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1996 Linus Torvalds & author (see below)
- */
-
-/*
- * ALI M14xx chipset EIDE controller
- *
- * Works for ALI M1439/1443/1445/1487/1489 chipsets.
- *
- * Adapted from code developed by derekn@vw.ece.cmu.edu. -ml
- * Derek's notes follow:
- *
- * I think the code should be pretty understandable,
- * but I'll be happy to (try to) answer questions.
- *
- * The critical part is in the setupDrive function. The initRegisters
- * function doesn't seem to be necessary, but the DOS driver does it, so
- * I threw it in.
- *
- * I've only tested this on my system, which only has one disk. I posted
- * it to comp.sys.linux.hardware, so maybe some other people will try it
- * out.
- *
- * Derek Noonburg (derekn@ece.cmu.edu)
- * 95-sep-26
- *
- * Update 96-jul-13:
- *
- * I've since upgraded to two disks and a CD-ROM, with no trouble, and
- * I've also heard from several others who have used it successfully.
- * This driver appears to work with both the 1443/1445 and the 1487/1489
- * chipsets. I've added support for PIO mode 4 for the 1487. This
- * seems to work just fine on the 1443 also, although I'm not sure it's
- * advertised as supporting mode 4. (I've been running a WDC AC21200 in
- * mode 4 for a while now with no trouble.) -Derek
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "ali14xx"
-
-/* port addresses for auto-detection */
-#define ALI_NUM_PORTS 4
-static const int ports[ALI_NUM_PORTS] __initconst =
- { 0x074, 0x0f4, 0x034, 0x0e4 };
-
-/* register initialization data */
-typedef struct { u8 reg, data; } RegInitializer;
-
-static const RegInitializer initData[] __initconst = {
- {0x01, 0x0f}, {0x02, 0x00}, {0x03, 0x00}, {0x04, 0x00},
- {0x05, 0x00}, {0x06, 0x00}, {0x07, 0x2b}, {0x0a, 0x0f},
- {0x25, 0x00}, {0x26, 0x00}, {0x27, 0x00}, {0x28, 0x00},
- {0x29, 0x00}, {0x2a, 0x00}, {0x2f, 0x00}, {0x2b, 0x00},
- {0x2c, 0x00}, {0x2d, 0x00}, {0x2e, 0x00}, {0x30, 0x00},
- {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00}, {0x34, 0xff},
- {0x35, 0x03}, {0x00, 0x00}
-};
-
-/* timing parameter registers for each drive */
-static struct { u8 reg1, reg2, reg3, reg4; } regTab[4] = {
- {0x03, 0x26, 0x04, 0x27}, /* drive 0 */
- {0x05, 0x28, 0x06, 0x29}, /* drive 1 */
- {0x2b, 0x30, 0x2c, 0x31}, /* drive 2 */
- {0x2d, 0x32, 0x2e, 0x33}, /* drive 3 */
-};
-
-static int basePort; /* base port address */
-static int regPort; /* port for register number */
-static int dataPort; /* port for register data */
-static u8 regOn; /* output to base port to access registers */
-static u8 regOff; /* output to base port to close registers */
-
-/*------------------------------------------------------------------------*/
-
-/*
- * Read a controller register.
- */
-static inline u8 inReg(u8 reg)
-{
- outb_p(reg, regPort);
- return inb(dataPort);
-}
-
-/*
- * Write a controller register.
- */
-static void outReg(u8 data, u8 reg)
-{
- outb_p(reg, regPort);
- outb_p(data, dataPort);
-}
-
-static DEFINE_SPINLOCK(ali14xx_lock);
-
-/*
- * Set PIO mode for the specified drive.
- * This function computes timing parameters
- * and sets controller registers accordingly.
- */
-static void ali14xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int driveNum;
- int time1, time2;
- u8 param1, param2, param3, param4;
- unsigned long flags;
- int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
-
- /* calculate timing, according to PIO mode */
- time1 = ide_pio_cycle_time(drive, pio);
- time2 = t->active;
- param3 = param1 = (time2 * bus_speed + 999) / 1000;
- param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1;
- if (pio < 3) {
- param3 += 8;
- param4 += 8;
- }
- printk(KERN_DEBUG "%s: PIO mode%d, t1=%dns, t2=%dns, cycles = %d+%d, %d+%d\n",
- drive->name, pio, time1, time2, param1, param2, param3, param4);
-
- /* stuff timing parameters into controller registers */
- driveNum = (drive->hwif->index << 1) + (drive->dn & 1);
- spin_lock_irqsave(&ali14xx_lock, flags);
- outb_p(regOn, basePort);
- outReg(param1, regTab[driveNum].reg1);
- outReg(param2, regTab[driveNum].reg2);
- outReg(param3, regTab[driveNum].reg3);
- outReg(param4, regTab[driveNum].reg4);
- outb_p(regOff, basePort);
- spin_unlock_irqrestore(&ali14xx_lock, flags);
-}
-
-/*
- * Auto-detect the IDE controller port.
- */
-static int __init findPort(void)
-{
- int i;
- u8 t;
- unsigned long flags;
-
- local_irq_save(flags);
- for (i = 0; i < ALI_NUM_PORTS; ++i) {
- basePort = ports[i];
- regOff = inb(basePort);
- for (regOn = 0x30; regOn <= 0x33; ++regOn) {
- outb_p(regOn, basePort);
- if (inb(basePort) == regOn) {
- regPort = basePort + 4;
- dataPort = basePort + 8;
- t = inReg(0) & 0xf0;
- outb_p(regOff, basePort);
- local_irq_restore(flags);
- if (t != 0x50)
- return 0;
- return 1; /* success */
- }
- }
- outb_p(regOff, basePort);
- }
- local_irq_restore(flags);
- return 0;
-}
-
-/*
- * Initialize controller registers with default values.
- */
-static int __init initRegisters(void)
-{
- const RegInitializer *p;
- u8 t;
- unsigned long flags;
-
- local_irq_save(flags);
- outb_p(regOn, basePort);
- for (p = initData; p->reg != 0; ++p)
- outReg(p->data, p->reg);
- outb_p(0x01, regPort);
- t = inb(regPort) & 0x01;
- outb_p(regOff, basePort);
- local_irq_restore(flags);
- return t;
-}
-
-static const struct ide_port_ops ali14xx_port_ops = {
- .set_pio_mode = ali14xx_set_pio_mode,
-};
-
-static const struct ide_port_info ali14xx_port_info = {
- .name = DRV_NAME,
- .chipset = ide_ali14xx,
- .port_ops = &ali14xx_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
-};
-
-static int __init ali14xx_probe(void)
-{
- printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n",
- basePort, regOn);
-
- /* initialize controller registers */
- if (!initRegisters()) {
- printk(KERN_ERR "ali14xx: Chip initialization failed.\n");
- return 1;
- }
-
- return ide_legacy_device_add(&ali14xx_port_info, 0);
-}
-
-static bool probe_ali14xx;
-
-module_param_named(probe, probe_ali14xx, bool, 0);
-MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
-
-static int __init ali14xx_init(void)
-{
- if (probe_ali14xx == 0)
- goto out;
-
- /* auto-detect IDE controller port */
- if (findPort()) {
- if (ali14xx_probe())
- return -ENODEV;
- return 0;
- }
- printk(KERN_ERR "ali14xx: not found.\n");
-out:
- return -ENODEV;
-}
-
-module_init(ali14xx_init);
-
-MODULE_AUTHOR("see local file");
-MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
deleted file mode 100644
index 3265970aee34..000000000000
--- a/drivers/ide/alim15x3.c
+++ /dev/null
@@ -1,602 +0,0 @@
-/*
- * Copyright (C) 1998-2000 Michel Aubry, Maintainer
- * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
- * Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
- *
- * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
- * May be copied or modified under the terms of the GNU General Public License
- * Copyright (C) 2002 Alan Cox
- * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
- * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
- *
- * (U)DMA capable version of ali 1533/1543(C), 1535(D)
- *
- **********************************************************************
- * 9/7/99 --Parts from the above author are included and need to be
- * converted into standard interface, once I finish the thought.
- *
- * Recent changes
- * Don't use LBA48 mode on ALi <= 0xC4
- * Don't poke 0x79 with a non ALi northbridge
- * Don't flip undefined bits on newer chipsets (fix Fujitsu laptop hang)
- * Allow UDMA6 on revisions > 0xC4
- *
- * Documentation
- * Chipset documentation available under NDA only
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/dmi.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "alim15x3"
-
-/*
- * ALi devices are not plug in. Otherwise these static values would
- * need to go. They ought to go away anyway
- */
-
-static u8 m5229_revision;
-static u8 chip_is_1543c_e;
-static struct pci_dev *isa_dev;
-
-static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- int pio_fifo = 0x54 + hwif->channel;
- u8 fifo;
- int shift = 4 * (drive->dn & 1);
-
- pci_read_config_byte(pdev, pio_fifo, &fifo);
- fifo &= ~(0x0F << shift);
- fifo |= (on << shift);
- pci_write_config_byte(pdev, pio_fifo, fifo);
-}
-
-static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive,
- struct ide_timing *t, u8 ultra)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int port = hwif->channel ? 0x5c : 0x58;
- int udmat = 0x56 + hwif->channel;
- u8 unit = drive->dn & 1, udma;
- int shift = 4 * unit;
-
- /* Set up the UDMA */
- pci_read_config_byte(dev, udmat, &udma);
- udma &= ~(0x0F << shift);
- udma |= ultra << shift;
- pci_write_config_byte(dev, udmat, udma);
-
- if (t == NULL)
- return;
-
- t->setup = clamp_val(t->setup, 1, 8) & 7;
- t->act8b = clamp_val(t->act8b, 1, 8) & 7;
- t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
- t->active = clamp_val(t->active, 1, 8) & 7;
- t->recover = clamp_val(t->recover, 1, 16) & 15;
-
- pci_write_config_byte(dev, port, t->setup);
- pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b);
- pci_write_config_byte(dev, port + unit + 2,
- (t->active << 4) | t->recover);
-}
-
-/**
- * ali_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Program the controller for the given PIO mode.
- */
-
-static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- ide_drive_t *pair = ide_get_pair_dev(drive);
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- unsigned long T = 1000000 / bus_speed; /* PCI clock based */
- struct ide_timing t;
-
- ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
- if (pair) {
- struct ide_timing p;
-
- ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
- ide_timing_merge(&p, &t, &t,
- IDE_TIMING_SETUP | IDE_TIMING_8BIT);
- if (pair->dma_mode) {
- ide_timing_compute(pair, pair->dma_mode, &p, T, 1);
- ide_timing_merge(&p, &t, &t,
- IDE_TIMING_SETUP | IDE_TIMING_8BIT);
- }
- }
-
- /*
- * PIO mode => ATA FIFO on, ATAPI FIFO off
- */
- ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00);
-
- ali_program_timings(hwif, drive, &t, 0);
-}
-
-/**
- * ali_udma_filter - compute UDMA mask
- * @drive: IDE device
- *
- * Return available UDMA modes.
- *
- * The actual rules for the ALi are:
- * No UDMA on revisions <= 0x20
- * Disk only for revisions < 0xC2
- * Not WDC drives on M1543C-E (?)
- */
-
-static u8 ali_udma_filter(ide_drive_t *drive)
-{
- if (m5229_revision > 0x20 && m5229_revision < 0xC2) {
- if (drive->media != ide_disk)
- return 0;
- if (chip_is_1543c_e &&
- strstr((char *)&drive->id[ATA_ID_PROD], "WDC "))
- return 0;
- }
-
- return drive->hwif->ultra_mask;
-}
-
-/**
- * ali_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Configure the hardware for the desired IDE transfer mode.
- */
-
-static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- unsigned long T = 1000000 / bus_speed; /* PCI clock based */
- const u8 speed = drive->dma_mode;
- u8 tmpbyte = 0x00;
- struct ide_timing t;
-
- if (speed < XFER_UDMA_0) {
- ide_timing_compute(drive, drive->dma_mode, &t, T, 1);
- if (pair) {
- struct ide_timing p;
-
- ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
- ide_timing_merge(&p, &t, &t,
- IDE_TIMING_SETUP | IDE_TIMING_8BIT);
- if (pair->dma_mode) {
- ide_timing_compute(pair, pair->dma_mode,
- &p, T, 1);
- ide_timing_merge(&p, &t, &t,
- IDE_TIMING_SETUP | IDE_TIMING_8BIT);
- }
- }
- ali_program_timings(hwif, drive, &t, 0);
- } else {
- ali_program_timings(hwif, drive, NULL,
- udma_timing[speed - XFER_UDMA_0]);
- if (speed >= XFER_UDMA_3) {
- pci_read_config_byte(dev, 0x4b, &tmpbyte);
- tmpbyte |= 1;
- pci_write_config_byte(dev, 0x4b, tmpbyte);
- }
- }
-}
-
-/**
- * ali_dma_check - DMA check
- * @drive: target device
- * @cmd: command
- *
- * Returns 1 if the DMA cannot be performed, zero on success.
- */
-
-static int ali_dma_check(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- if (m5229_revision < 0xC2 && drive->media != ide_disk) {
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- return 1; /* try PIO instead of DMA */
- }
- return 0;
-}
-
-/**
- * init_chipset_ali15x3 - Initialise an ALi IDE controller
- * @dev: PCI device
- *
- * This function initializes the ALI IDE controller and where
- * appropriate also sets up the 1533 southbridge.
- */
-
-static int init_chipset_ali15x3(struct pci_dev *dev)
-{
- unsigned long flags;
- u8 tmpbyte;
- struct pci_dev *north = pci_get_slot(dev->bus, PCI_DEVFN(0,0));
-
- m5229_revision = dev->revision;
-
- isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-
- local_irq_save(flags);
-
- if (m5229_revision < 0xC2) {
- /*
- * revision 0x20 (1543-E, 1543-F)
- * revision 0xC0, 0xC1 (1543C-C, 1543C-D, 1543C-E)
- * clear CD-ROM DMA write bit, m5229, 0x4b, bit 7
- */
- pci_read_config_byte(dev, 0x4b, &tmpbyte);
- /*
- * clear bit 7
- */
- pci_write_config_byte(dev, 0x4b, tmpbyte & 0x7F);
- /*
- * check m1533, 0x5e, bit 1~4 == 1001 => & 00011110 = 00010010
- */
- if (m5229_revision >= 0x20 && isa_dev) {
- pci_read_config_byte(isa_dev, 0x5e, &tmpbyte);
- chip_is_1543c_e = ((tmpbyte & 0x1e) == 0x12) ? 1: 0;
- }
- goto out;
- }
-
- /*
- * 1543C-B?, 1535, 1535D, 1553
- * Note 1: not all "motherboard" support this detection
- * Note 2: if no udma 66 device, the detection may "error".
- * but in this case, we will not set the device to
- * ultra 66, the detection result is not important
- */
-
- /*
- * enable "Cable Detection", m5229, 0x4b, bit3
- */
- pci_read_config_byte(dev, 0x4b, &tmpbyte);
- pci_write_config_byte(dev, 0x4b, tmpbyte | 0x08);
-
- /*
- * We should only tune the 1533 enable if we are using an ALi
- * North bridge. We might have no north found on some zany
- * box without a device at 0:0.0. The ALi bridge will be at
- * 0:0.0 so if we didn't find one we know what is cooking.
- */
- if (north && north->vendor != PCI_VENDOR_ID_AL)
- goto out;
-
- if (m5229_revision < 0xC5 && isa_dev)
- {
- /*
- * set south-bridge's enable bit, m1533, 0x79
- */
-
- pci_read_config_byte(isa_dev, 0x79, &tmpbyte);
- if (m5229_revision == 0xC2) {
- /*
- * 1543C-B0 (m1533, 0x79, bit 2)
- */
- pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x04);
- } else if (m5229_revision >= 0xC3) {
- /*
- * 1553/1535 (m1533, 0x79, bit 1)
- */
- pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x02);
- }
- }
-
-out:
- /*
- * CD_ROM DMA on (m5229, 0x53, bit0)
- * Enable this bit even if we want to use PIO.
- * PIO FIFO off (m5229, 0x53, bit1)
- * The hardware will use 0x54h and 0x55h to control PIO FIFO.
- * (Not on later devices it seems)
- *
- * 0x53 changes meaning on later revs - we must no touch
- * bit 1 on them. Need to check if 0x20 is the right break.
- */
- if (m5229_revision >= 0x20) {
- pci_read_config_byte(dev, 0x53, &tmpbyte);
-
- if (m5229_revision <= 0x20)
- tmpbyte = (tmpbyte & (~0x02)) | 0x01;
- else if (m5229_revision == 0xc7 || m5229_revision == 0xc8)
- tmpbyte |= 0x03;
- else
- tmpbyte |= 0x01;
-
- pci_write_config_byte(dev, 0x53, tmpbyte);
- }
- local_irq_restore(flags);
- pci_dev_put(north);
- pci_dev_put(isa_dev);
- return 0;
-}
-
-/*
- * Cable special cases
- */
-
-static const struct dmi_system_id cable_dmi_table[] = {
- {
- .ident = "HP Pavilion N5430",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
- },
- },
- {
- .ident = "Toshiba Satellite S1800-814",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
- },
- },
- { }
-};
-
-static int ali_cable_override(struct pci_dev *pdev)
-{
- /* Fujitsu P2000 */
- if (pdev->subsystem_vendor == 0x10CF &&
- pdev->subsystem_device == 0x10AF)
- return 1;
-
- /* Mitac 8317 (Winbook-A) and relatives */
- if (pdev->subsystem_vendor == 0x1071 &&
- pdev->subsystem_device == 0x8317)
- return 1;
-
- /* Systems by DMI */
- if (dmi_check_system(cable_dmi_table))
- return 1;
-
- return 0;
-}
-
-/**
- * ali_cable_detect - cable detection
- * @hwif: IDE interface
- *
- * This checks if the controller and the cable are capable
- * of UDMA66 transfers. It doesn't check the drives.
- */
-
-static u8 ali_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 cbl = ATA_CBL_PATA40, tmpbyte;
-
- if (m5229_revision >= 0xC2) {
- /*
- * m5229 80-pin cable detection (from Host View)
- *
- * 0x4a bit0 is 0 => primary channel has 80-pin
- * 0x4a bit1 is 0 => secondary channel has 80-pin
- *
- * Certain laptops use short but suitable cables
- * and don't implement the detect logic.
- */
- if (ali_cable_override(dev))
- cbl = ATA_CBL_PATA40_SHORT;
- else {
- pci_read_config_byte(dev, 0x4a, &tmpbyte);
- if ((tmpbyte & (1 << hwif->channel)) == 0)
- cbl = ATA_CBL_PATA80;
- }
- }
-
- return cbl;
-}
-
-#ifndef CONFIG_SPARC64
-/**
- * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
- * @hwif: interface to configure
- *
- * Obtain the IRQ tables for an ALi based IDE solution on the PC
- * class platforms. This part of the code isn't applicable to the
- * Sparc systems.
- */
-
-static void init_hwif_ali15x3(ide_hwif_t *hwif)
-{
- u8 ideic, inmir;
- s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
- 1, 11, 0, 12, 0, 14, 0, 15 };
- int irq = -1;
-
- if (isa_dev) {
- /*
- * read IDE interface control
- */
- pci_read_config_byte(isa_dev, 0x58, &ideic);
-
- /* bit0, bit1 */
- ideic = ideic & 0x03;
-
- /* get IRQ for IDE Controller */
- if ((hwif->channel && ideic == 0x03) ||
- (!hwif->channel && !ideic)) {
- /*
- * get SIRQ1 routing table
- */
- pci_read_config_byte(isa_dev, 0x44, &inmir);
- inmir = inmir & 0x0f;
- irq = irq_routing_table[inmir];
- } else if (hwif->channel && !(ideic & 0x01)) {
- /*
- * get SIRQ2 routing table
- */
- pci_read_config_byte(isa_dev, 0x75, &inmir);
- inmir = inmir & 0x0f;
- irq = irq_routing_table[inmir];
- }
- if(irq >= 0)
- hwif->irq = irq;
- }
-}
-#else
-#define init_hwif_ali15x3 NULL
-#endif /* CONFIG_SPARC64 */
-
-/**
- * init_dma_ali15x3 - set up DMA on ALi15x3
- * @hwif: IDE interface
- * @d: IDE port info
- *
- * Set up the DMA functionality on the ALi 15x3.
- */
-
-static int init_dma_ali15x3(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = ide_pci_dma_base(hwif, d);
-
- if (base == 0)
- return -1;
-
- hwif->dma_base = base;
-
- if (ide_pci_check_simplex(hwif, d) < 0)
- return -1;
-
- if (ide_pci_set_master(dev, d->name) < 0)
- return -1;
-
- if (!hwif->channel)
- outb(inb(base + 2) & 0x60, base + 2);
-
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, base, base + 7);
-
- if (ide_allocate_dma_engine(hwif))
- return -1;
-
- return 0;
-}
-
-static const struct ide_port_ops ali_port_ops = {
- .set_pio_mode = ali_set_pio_mode,
- .set_dma_mode = ali_set_dma_mode,
- .udma_filter = ali_udma_filter,
- .cable_detect = ali_cable_detect,
-};
-
-static const struct ide_dma_ops ali_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_check = ali_dma_check,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info ali15x3_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_ali15x3,
- .init_hwif = init_hwif_ali15x3,
- .init_dma = init_dma_ali15x3,
- .port_ops = &ali_port_ops,
- .dma_ops = &sff_dma_ops,
- .pio_mask = ATA_PIO5,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-/**
- * alim15x3_init_one - set up an ALi15x3 IDE controller
- * @dev: PCI device to set up
- *
- * Perform the actual set up for an ALi15x3 that has been found by the
- * hot plug layer.
- */
-
-static int alim15x3_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct ide_port_info d = ali15x3_chipset;
- u8 rev = dev->revision, idx = id->driver_data;
-
- /* don't use LBA48 DMA on ALi devices before rev 0xC5 */
- if (rev <= 0xC4)
- d.host_flags |= IDE_HFLAG_NO_LBA48_DMA;
-
- if (rev >= 0x20) {
- if (rev == 0x20)
- d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
-
- if (rev < 0xC2)
- d.udma_mask = ATA_UDMA2;
- else if (rev == 0xC2 || rev == 0xC3)
- d.udma_mask = ATA_UDMA4;
- else if (rev == 0xC4)
- d.udma_mask = ATA_UDMA5;
- else
- d.udma_mask = ATA_UDMA6;
-
- d.dma_ops = &ali_dma_ops;
- } else {
- d.host_flags |= IDE_HFLAG_NO_DMA;
-
- d.mwdma_mask = d.swdma_mask = 0;
- }
-
- if (idx == 0)
- d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-
-static const struct pci_device_id alim15x3_pci_tbl[] = {
- { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), 0 },
- { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), 1 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, alim15x3_pci_tbl);
-
-static struct pci_driver alim15x3_pci_driver = {
- .name = "ALI15x3_IDE",
- .id_table = alim15x3_pci_tbl,
- .probe = alim15x3_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init ali15x3_ide_init(void)
-{
- return ide_pci_register_driver(&alim15x3_pci_driver);
-}
-
-static void __exit ali15x3_ide_exit(void)
-{
- pci_unregister_driver(&alim15x3_pci_driver);
-}
-
-module_init(ali15x3_ide_init);
-module_exit(ali15x3_ide_exit);
-
-MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
deleted file mode 100644
index 7340597a373e..000000000000
--- a/drivers/ide/amd74xx.c
+++ /dev/null
@@ -1,343 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * AMD 755/756/766/8111 and nVidia nForce/2/2s/3/3s/CK804/MCP04
- * IDE driver for Linux.
- *
- * Copyright (c) 2000-2002 Vojtech Pavlik
- * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
- *
- * Based on the work of:
- * Andre Hedrick
- */
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "amd74xx"
-
-enum {
- AMD_IDE_CONFIG = 0x41,
- AMD_CABLE_DETECT = 0x42,
- AMD_DRIVE_TIMING = 0x48,
- AMD_8BIT_TIMING = 0x4e,
- AMD_ADDRESS_SETUP = 0x4c,
- AMD_UDMA_TIMING = 0x50,
-};
-
-static unsigned int amd_80w;
-static unsigned int amd_clock;
-
-static char *amd_dma[] = { "16", "25", "33", "44", "66", "100", "133" };
-static unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7 };
-
-static inline u8 amd_offset(struct pci_dev *dev)
-{
- return (dev->vendor == PCI_VENDOR_ID_NVIDIA) ? 0x10 : 0;
-}
-
-/*
- * amd_set_speed() writes timing values to the chipset registers
- */
-
-static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask,
- struct ide_timing *timing)
-{
- u8 t = 0, offset = amd_offset(dev);
-
- pci_read_config_byte(dev, AMD_ADDRESS_SETUP + offset, &t);
- t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(timing->setup, 1, 4) - 1) << ((3 - dn) << 1));
- pci_write_config_byte(dev, AMD_ADDRESS_SETUP + offset, t);
-
- pci_write_config_byte(dev, AMD_8BIT_TIMING + offset + (1 - (dn >> 1)),
- ((clamp_val(timing->act8b, 1, 16) - 1) << 4) | (clamp_val(timing->rec8b, 1, 16) - 1));
-
- pci_write_config_byte(dev, AMD_DRIVE_TIMING + offset + (3 - dn),
- ((clamp_val(timing->active, 1, 16) - 1) << 4) | (clamp_val(timing->recover, 1, 16) - 1));
-
- switch (udma_mask) {
- case ATA_UDMA2: t = timing->udma ? (0xc0 | (clamp_val(timing->udma, 2, 5) - 2)) : 0x03; break;
- case ATA_UDMA4: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 2, 10)]) : 0x03; break;
- case ATA_UDMA5: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 1, 10)]) : 0x03; break;
- case ATA_UDMA6: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 1, 15)]) : 0x03; break;
- default: return;
- }
-
- if (timing->udma)
- pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + 3 - dn, t);
-}
-
-/*
- * amd_set_drive() computes timing values and configures the chipset
- * to a desired transfer mode. It also can be called by upper layers.
- */
-
-static void amd_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- ide_drive_t *peer = ide_get_pair_dev(drive);
- struct ide_timing t, p;
- int T, UT;
- u8 udma_mask = hwif->ultra_mask;
- const u8 speed = drive->dma_mode;
-
- T = 1000000000 / amd_clock;
- UT = (udma_mask == ATA_UDMA2) ? T : (T / 2);
-
- ide_timing_compute(drive, speed, &t, T, UT);
-
- if (peer) {
- ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
- ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
- }
-
- if (speed == XFER_UDMA_5 && amd_clock <= 33333) t.udma = 1;
- if (speed == XFER_UDMA_6 && amd_clock <= 33333) t.udma = 15;
-
- amd_set_speed(dev, drive->dn, udma_mask, &t);
-}
-
-/*
- * amd_set_pio_mode() is a callback from upper layers for PIO-only tuning.
- */
-
-static void amd_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- amd_set_drive(hwif, drive);
-}
-
-static void amd7409_cable_detect(struct pci_dev *dev)
-{
- /* no host side cable detection */
- amd_80w = 0x03;
-}
-
-static void amd7411_cable_detect(struct pci_dev *dev)
-{
- int i;
- u32 u = 0;
- u8 t = 0, offset = amd_offset(dev);
-
- pci_read_config_byte(dev, AMD_CABLE_DETECT + offset, &t);
- pci_read_config_dword(dev, AMD_UDMA_TIMING + offset, &u);
- amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0);
- for (i = 24; i >= 0; i -= 8)
- if (((u >> i) & 4) && !(amd_80w & (1 << (1 - (i >> 4))))) {
- printk(KERN_WARNING DRV_NAME " %s: BIOS didn't set "
- "cable bits correctly. Enabling workaround.\n",
- pci_name(dev));
- amd_80w |= (1 << (1 - (i >> 4)));
- }
-}
-
-/*
- * The initialization callback. Initialize drive independent registers.
- */
-
-static int init_chipset_amd74xx(struct pci_dev *dev)
-{
- u8 t = 0, offset = amd_offset(dev);
-
-/*
- * Check 80-wire cable presence.
- */
-
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
- ; /* no UDMA > 2 */
- else if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_VIPER_7409)
- amd7409_cable_detect(dev);
- else
- amd7411_cable_detect(dev);
-
-/*
- * Take care of prefetch & postwrite.
- */
-
- pci_read_config_byte(dev, AMD_IDE_CONFIG + offset, &t);
- /*
- * Check for broken FIFO support.
- */
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
- t &= 0x0f;
- else
- t |= 0xf0;
- pci_write_config_byte(dev, AMD_IDE_CONFIG + offset, t);
-
- return 0;
-}
-
-static u8 amd_cable_detect(ide_hwif_t *hwif)
-{
- if ((amd_80w >> hwif->channel) & 1)
- return ATA_CBL_PATA80;
- else
- return ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops amd_port_ops = {
- .set_pio_mode = amd_set_pio_mode,
- .set_dma_mode = amd_set_drive,
- .cable_detect = amd_cable_detect,
-};
-
-#define IDE_HFLAGS_AMD \
- (IDE_HFLAG_PIO_NO_BLACKLIST | \
- IDE_HFLAG_POST_SET_MODE | \
- IDE_HFLAG_IO_32BIT | \
- IDE_HFLAG_UNMASK_IRQS)
-
-#define DECLARE_AMD_DEV(swdma, udma) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_amd74xx, \
- .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
- .port_ops = &amd_port_ops, \
- .host_flags = IDE_HFLAGS_AMD, \
- .pio_mask = ATA_PIO5, \
- .swdma_mask = swdma, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = udma, \
- }
-
-#define DECLARE_NV_DEV(udma) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_amd74xx, \
- .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
- .port_ops = &amd_port_ops, \
- .host_flags = IDE_HFLAGS_AMD, \
- .pio_mask = ATA_PIO5, \
- .swdma_mask = ATA_SWDMA2, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = udma, \
- }
-
-static const struct ide_port_info amd74xx_chipsets[] = {
- /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
- /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
- /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
- /* 3: AMD8111 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA6),
-
- /* 4: NFORCE */ DECLARE_NV_DEV(ATA_UDMA5),
- /* 5: >= NFORCE2 */ DECLARE_NV_DEV(ATA_UDMA6),
-
- /* 6: AMD5536 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
-};
-
-static int amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d;
- u8 idx = id->driver_data;
-
- d = amd74xx_chipsets[idx];
-
- /*
- * Check for bad SWDMA and incorrectly wired Serenade mainboards.
- */
- if (idx == 1) {
- if (dev->revision <= 7)
- d.swdma_mask = 0;
- d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
- } else if (idx == 3) {
- if (dev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
- dev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
- d.udma_mask = ATA_UDMA5;
- }
-
- /*
- * It seems that on some nVidia controllers using AltStatus
- * register can be unreliable so default to Status register
- * if the device is in Compatibility Mode.
- */
- if (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
- ide_pci_is_in_compatibility_mode(dev))
- d.host_flags |= IDE_HFLAG_BROKEN_ALTSTATUS;
-
- printk(KERN_INFO "%s %s: UDMA%s controller\n",
- d.name, pci_name(dev), amd_dma[fls(d.udma_mask) - 1]);
-
- /*
- * Determine the system bus clock.
- */
- amd_clock = (ide_pci_clk ? ide_pci_clk : 33) * 1000;
-
- switch (amd_clock) {
- case 33000: amd_clock = 33333; break;
- case 37000: amd_clock = 37500; break;
- case 41000: amd_clock = 41666; break;
- }
-
- if (amd_clock < 20000 || amd_clock > 50000) {
- printk(KERN_WARNING "%s: User given PCI clock speed impossible"
- " (%d), using 33 MHz instead.\n",
- d.name, amd_clock);
- amd_clock = 33333;
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id amd74xx_pci_tbl[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 2 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 2 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 3 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 4 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 5 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), 5 },
-#endif
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 5 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), 5 },
-#endif
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 5 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 6 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
-
-static struct pci_driver amd74xx_pci_driver = {
- .name = "AMD_IDE",
- .id_table = amd74xx_pci_tbl,
- .probe = amd74xx_probe,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init amd74xx_ide_init(void)
-{
- return ide_pci_register_driver(&amd74xx_pci_driver);
-}
-
-static void __exit amd74xx_ide_exit(void)
-{
- pci_unregister_driver(&amd74xx_pci_driver);
-}
-
-module_init(amd74xx_ide_init);
-module_exit(amd74xx_ide_exit);
-
-MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("AMD PCI IDE driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
deleted file mode 100644
index e08b0aac08b9..000000000000
--- a/drivers/ide/atiixp.c
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2003 ATI Inc. <hyu@ati.com>
- * Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "atiixp"
-
-#define ATIIXP_IDE_PIO_TIMING 0x40
-#define ATIIXP_IDE_MDMA_TIMING 0x44
-#define ATIIXP_IDE_PIO_CONTROL 0x48
-#define ATIIXP_IDE_PIO_MODE 0x4a
-#define ATIIXP_IDE_UDMA_CONTROL 0x54
-#define ATIIXP_IDE_UDMA_MODE 0x56
-
-struct atiixp_ide_timing {
- u8 command_width;
- u8 recover_width;
-};
-
-static struct atiixp_ide_timing pio_timing[] = {
- { 0x05, 0x0d },
- { 0x04, 0x07 },
- { 0x03, 0x04 },
- { 0x02, 0x02 },
- { 0x02, 0x00 },
-};
-
-static struct atiixp_ide_timing mdma_timing[] = {
- { 0x07, 0x07 },
- { 0x02, 0x01 },
- { 0x02, 0x00 },
-};
-
-static DEFINE_SPINLOCK(atiixp_lock);
-
-/**
- * atiixp_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Set the interface PIO mode.
- */
-
-static void atiixp_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long flags;
- int timing_shift = (drive->dn ^ 1) * 8;
- u32 pio_timing_data;
- u16 pio_mode_data;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- spin_lock_irqsave(&atiixp_lock, flags);
-
- pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
- pio_mode_data &= ~(0x07 << (drive->dn * 4));
- pio_mode_data |= (pio << (drive->dn * 4));
- pci_write_config_word(dev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
-
- pci_read_config_dword(dev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
- pio_timing_data &= ~(0xff << timing_shift);
- pio_timing_data |= (pio_timing[pio].recover_width << timing_shift) |
- (pio_timing[pio].command_width << (timing_shift + 4));
- pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
-
- spin_unlock_irqrestore(&atiixp_lock, flags);
-}
-
-/**
- * atiixp_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Set a ATIIXP host controller to the desired DMA mode. This involves
- * programming the right timing data into the PCI configuration space.
- */
-
-static void atiixp_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long flags;
- int timing_shift = (drive->dn ^ 1) * 8;
- u32 tmp32;
- u16 tmp16;
- u16 udma_ctl = 0;
- const u8 speed = drive->dma_mode;
-
- spin_lock_irqsave(&atiixp_lock, flags);
-
- pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &udma_ctl);
-
- if (speed >= XFER_UDMA_0) {
- pci_read_config_word(dev, ATIIXP_IDE_UDMA_MODE, &tmp16);
- tmp16 &= ~(0x07 << (drive->dn * 4));
- tmp16 |= ((speed & 0x07) << (drive->dn * 4));
- pci_write_config_word(dev, ATIIXP_IDE_UDMA_MODE, tmp16);
-
- udma_ctl |= (1 << drive->dn);
- } else if (speed >= XFER_MW_DMA_0) {
- u8 i = speed & 0x03;
-
- pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32);
- tmp32 &= ~(0xff << timing_shift);
- tmp32 |= (mdma_timing[i].recover_width << timing_shift) |
- (mdma_timing[i].command_width << (timing_shift + 4));
- pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32);
-
- udma_ctl &= ~(1 << drive->dn);
- }
-
- pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, udma_ctl);
-
- spin_unlock_irqrestore(&atiixp_lock, flags);
-}
-
-static u8 atiixp_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- u8 udma_mode = 0, ch = hwif->channel;
-
- pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ch, &udma_mode);
-
- if ((udma_mode & 0x07) >= 0x04 || (udma_mode & 0x70) >= 0x40)
- return ATA_CBL_PATA80;
- else
- return ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops atiixp_port_ops = {
- .set_pio_mode = atiixp_set_pio_mode,
- .set_dma_mode = atiixp_set_dma_mode,
- .cable_detect = atiixp_cable_detect,
-};
-
-static const struct ide_port_info atiixp_pci_info[] = {
- { /* 0: IXP200/300/400/700 */
- .name = DRV_NAME,
- .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
- .port_ops = &atiixp_port_ops,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 1: IXP600 */
- .name = DRV_NAME,
- .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
- .port_ops = &atiixp_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
-};
-
-/**
- * atiixp_init_one - called when a ATIIXP is found
- * @dev: the atiixp device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &atiixp_pci_info[id->driver_data], NULL);
-}
-
-static const struct pci_device_id atiixp_pci_tbl[] = {
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), 0 },
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), 0 },
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 },
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 },
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
-
-static struct pci_driver atiixp_pci_driver = {
- .name = "ATIIXP_IDE",
- .id_table = atiixp_pci_tbl,
- .probe = atiixp_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init atiixp_ide_init(void)
-{
- return ide_pci_register_driver(&atiixp_pci_driver);
-}
-
-static void __exit atiixp_ide_exit(void)
-{
- pci_unregister_driver(&atiixp_pci_driver);
-}
-
-module_init(atiixp_ide_init);
-module_exit(atiixp_ide_exit);
-
-MODULE_AUTHOR("HUI YU");
-MODULE_DESCRIPTION("PCI driver module for ATI IXP IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
deleted file mode 100644
index 46eaf58d881b..000000000000
--- a/drivers/ide/buddha.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Amiga Buddha, Catweasel and X-Surf IDE Driver
- *
- * Copyright (C) 1997, 2001 by Geert Uytterhoeven and others
- *
- * This driver was written based on the specifications in README.buddha and
- * the X-Surf info from Inside_XSurf.txt available at
- * http://www.jschoenfeld.com
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * TODO:
- * - test it :-)
- * - tune the timings using the speed-register
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/zorro.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/amigahw.h>
-#include <asm/amigaints.h>
-
-
- /*
- * The Buddha has 2 IDE interfaces, the Catweasel has 3, X-Surf has 2
- */
-
-#define BUDDHA_NUM_HWIFS 2
-#define CATWEASEL_NUM_HWIFS 3
-#define XSURF_NUM_HWIFS 2
-
-#define MAX_NUM_HWIFS 3
-
- /*
- * Bases of the IDE interfaces (relative to the board address)
- */
-
-#define BUDDHA_BASE1 0x800
-#define BUDDHA_BASE2 0xa00
-#define BUDDHA_BASE3 0xc00
-
-#define XSURF_BASE1 0xb000 /* 2.5" Interface */
-#define XSURF_BASE2 0xd000 /* 3.5" Interface */
-
-static u_int buddha_bases[CATWEASEL_NUM_HWIFS] __initdata = {
- BUDDHA_BASE1, BUDDHA_BASE2, BUDDHA_BASE3
-};
-
-static u_int xsurf_bases[XSURF_NUM_HWIFS] __initdata = {
- XSURF_BASE1, XSURF_BASE2
-};
-
- /*
- * Offsets from one of the above bases
- */
-
-#define BUDDHA_CONTROL 0x11a
-
- /*
- * Other registers
- */
-
-#define BUDDHA_IRQ1 0xf00 /* MSB = 1, Harddisk is source of */
-#define BUDDHA_IRQ2 0xf40 /* interrupt */
-#define BUDDHA_IRQ3 0xf80
-
-#define XSURF_IRQ1 0x7e
-#define XSURF_IRQ2 0x7e
-
-static int buddha_irqports[CATWEASEL_NUM_HWIFS] __initdata = {
- BUDDHA_IRQ1, BUDDHA_IRQ2, BUDDHA_IRQ3
-};
-
-static int xsurf_irqports[XSURF_NUM_HWIFS] __initdata = {
- XSURF_IRQ1, XSURF_IRQ2
-};
-
-#define BUDDHA_IRQ_MR 0xfc0 /* master interrupt enable */
-
-
- /*
- * Board information
- */
-
-typedef enum BuddhaType_Enum {
- BOARD_BUDDHA, BOARD_CATWEASEL, BOARD_XSURF
-} BuddhaType;
-
-static const char *buddha_board_name[] = { "Buddha", "Catweasel", "X-Surf" };
-
- /*
- * Check and acknowledge the interrupt status
- */
-
-static int buddha_test_irq(ide_hwif_t *hwif)
-{
- unsigned char ch;
-
- ch = z_readb(hwif->io_ports.irq_addr);
- if (!(ch & 0x80))
- return 0;
- return 1;
-}
-
-static void xsurf_clear_irq(ide_drive_t *drive)
-{
- /*
- * X-Surf needs 0 written to IRQ register to ensure ISA bit A11 stays at 0
- */
- z_writeb(0, drive->hwif->io_ports.irq_addr);
-}
-
-static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base,
- unsigned long ctl, unsigned long irq_port)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
-
- hw->io_ports.data_addr = base;
-
- for (i = 1; i < 8; i++)
- hw->io_ports_array[i] = base + 2 + i * 4;
-
- hw->io_ports.ctl_addr = ctl;
- hw->io_ports.irq_addr = irq_port;
-
- hw->irq = IRQ_AMIGA_PORTS;
-}
-
-static const struct ide_port_ops buddha_port_ops = {
- .test_irq = buddha_test_irq,
-};
-
-static const struct ide_port_ops xsurf_port_ops = {
- .clear_irq = xsurf_clear_irq,
- .test_irq = buddha_test_irq,
-};
-
-static const struct ide_port_info buddha_port_info = {
- .port_ops = &buddha_port_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
- /*
- * Probe for a Buddha or Catweasel IDE interface
- */
-
-static int __init buddha_init(void)
-{
- struct zorro_dev *z = NULL;
- u_long buddha_board = 0;
- BuddhaType type;
- int buddha_num_hwifs, i;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- unsigned long board;
- struct ide_hw hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS];
- struct ide_port_info d = buddha_port_info;
-
- if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
- buddha_num_hwifs = BUDDHA_NUM_HWIFS;
- type=BOARD_BUDDHA;
- } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL) {
- buddha_num_hwifs = CATWEASEL_NUM_HWIFS;
- type=BOARD_CATWEASEL;
- } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF) {
- buddha_num_hwifs = XSURF_NUM_HWIFS;
- type=BOARD_XSURF;
- d.port_ops = &xsurf_port_ops;
- } else
- continue;
-
- board = z->resource.start;
-
- if(type != BOARD_XSURF) {
- if (!request_mem_region(board+BUDDHA_BASE1, 0x800, "IDE"))
- continue;
- } else {
- if (!request_mem_region(board+XSURF_BASE1, 0x1000, "IDE"))
- continue;
- if (!request_mem_region(board+XSURF_BASE2, 0x1000, "IDE"))
- goto fail_base2;
- if (!request_mem_region(board+XSURF_IRQ1, 0x8, "IDE")) {
- release_mem_region(board+XSURF_BASE2, 0x1000);
-fail_base2:
- release_mem_region(board+XSURF_BASE1, 0x1000);
- continue;
- }
- }
- buddha_board = (unsigned long)ZTWO_VADDR(board);
-
- /* write to BUDDHA_IRQ_MR to enable the board IRQ */
- /* X-Surf doesn't have this. IRQs are always on */
- if (type != BOARD_XSURF)
- z_writeb(0, buddha_board+BUDDHA_IRQ_MR);
-
- printk(KERN_INFO "ide: %s IDE controller\n",
- buddha_board_name[type]);
-
- for (i = 0; i < buddha_num_hwifs; i++) {
- unsigned long base, ctl, irq_port;
-
- if (type != BOARD_XSURF) {
- base = buddha_board + buddha_bases[i];
- ctl = base + BUDDHA_CONTROL;
- irq_port = buddha_board + buddha_irqports[i];
- } else {
- base = buddha_board + xsurf_bases[i];
- /* X-Surf has no CS1* (Control/AltStat) */
- ctl = 0;
- irq_port = buddha_board + xsurf_irqports[i];
- }
-
- buddha_setup_ports(&hw[i], base, ctl, irq_port);
-
- hws[i] = &hw[i];
- }
-
- ide_host_add(&d, hws, i, NULL);
- }
-
- return 0;
-}
-
-module_init(buddha_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
deleted file mode 100644
index f48decb9fac4..000000000000
--- a/drivers/ide/cmd640.c
+++ /dev/null
@@ -1,848 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1995-1996 Linus Torvalds & authors (see below)
- */
-
-/*
- * Original authors: abramov@cecmow.enet.dec.com (Igor Abramov)
- * mlord@pobox.com (Mark Lord)
- *
- * See linux/MAINTAINERS for address of current maintainer.
- *
- * This file provides support for the advanced features and bugs
- * of IDE interfaces using the CMD Technologies 0640 IDE interface chip.
- *
- * These chips are basically fucked by design, and getting this driver
- * to work on every motherboard design that uses this screwed chip seems
- * bloody well impossible. However, we're still trying.
- *
- * Version 0.97 worked for everybody.
- *
- * User feedback is essential. Many thanks to the beta test team:
- *
- * A.Hartgers@stud.tue.nl, JZDQC@CUNYVM.CUNY.edu, abramov@cecmow.enet.dec.com,
- * bardj@utopia.ppp.sn.no, bart@gaga.tue.nl, bbol001@cs.auckland.ac.nz,
- * chrisc@dbass.demon.co.uk, dalecki@namu26.Num.Math.Uni-Goettingen.de,
- * derekn@vw.ece.cmu.edu, florian@btp2x3.phy.uni-bayreuth.de,
- * flynn@dei.unipd.it, gadio@netvision.net.il, godzilla@futuris.net,
- * j@pobox.com, jkemp1@mises.uni-paderborn.de, jtoppe@hiwaay.net,
- * kerouac@ssnet.com, meskes@informatik.rwth-aachen.de, hzoli@cs.elte.hu,
- * peter@udgaard.isgtec.com, phil@tazenda.demon.co.uk, roadcapw@cfw.com,
- * s0033las@sun10.vsz.bme.hu, schaffer@tam.cornell.edu, sjd@slip.net,
- * steve@ei.org, ulrpeg@bigcomm.gun.de, ism@tardis.ed.ac.uk, mack@cray.com
- * liug@mama.indstate.edu, and others.
- *
- * Version 0.01 Initial version, hacked out of ide.c,
- * and #include'd rather than compiled separately.
- * This will get cleaned up in a subsequent release.
- *
- * Version 0.02 Fixes for vlb initialization code, enable prefetch
- * for versions 'B' and 'C' of chip by default,
- * some code cleanup.
- *
- * Version 0.03 Added reset of secondary interface,
- * and black list for devices which are not compatible
- * with prefetch mode. Separate function for setting
- * prefetch is added, possibly it will be called some
- * day from ioctl processing code.
- *
- * Version 0.04 Now configs/compiles separate from ide.c
- *
- * Version 0.05 Major rewrite of interface timing code.
- * Added new function cmd640_set_mode to set PIO mode
- * from ioctl call. New drives added to black list.
- *
- * Version 0.06 More code cleanup. Prefetch is enabled only for
- * detected hard drives, not included in prefetch
- * black list.
- *
- * Version 0.07 Changed to more conservative drive tuning policy.
- * Unknown drives, which report PIO < 4 are set to
- * (reported_PIO - 1) if it is supported, or to PIO0.
- * List of known drives extended by info provided by
- * CMD at their ftp site.
- *
- * Version 0.08 Added autotune/noautotune support.
- *
- * Version 0.09 Try to be smarter about 2nd port enabling.
- * Version 0.10 Be nice and don't reset 2nd port.
- * Version 0.11 Try to handle more weird situations.
- *
- * Version 0.12 Lots of bug fixes from Laszlo Peter
- * irq unmasking disabled for reliability.
- * try to be even smarter about the second port.
- * tidy up source code formatting.
- * Version 0.13 permit irq unmasking again.
- * Version 0.90 massive code cleanup, some bugs fixed.
- * defaults all drives to PIO mode0, prefetch off.
- * autotune is OFF by default, with compile time flag.
- * prefetch can be turned OFF/ON using "hdparm -p8/-p9"
- * (requires hdparm-3.1 or newer)
- * Version 0.91 first release to linux-kernel list.
- * Version 0.92 move initial reg dump to separate callable function
- * change "readahead" to "prefetch" to avoid confusion
- * Version 0.95 respect original BIOS timings unless autotuning.
- * tons of code cleanup and rearrangement.
- * added CONFIG_BLK_DEV_CMD640_ENHANCED option
- * prevent use of unmask when prefetch is on
- * Version 0.96 prevent use of io_32bit when prefetch is off
- * Version 0.97 fix VLB secondary interface for sjd@slip.net
- * other minor tune-ups: 0.96 was very good.
- * Version 0.98 ignore PCI version when disabled by BIOS
- * Version 0.99 display setup/active/recovery clocks with PIO mode
- * Version 1.00 Mmm.. cannot depend on PCMD_ENA in all systems
- * Version 1.01 slow/fast devsel can be selected with "hdparm -p6/-p7"
- * ("fast" is necessary for 32bit I/O in some systems)
- * Version 1.02 fix bug that resulted in slow "setup times"
- * (patch courtesy of Zoltan Hidvegi)
- */
-
-#define CMD640_PREFETCH_MASKS 1
-
-/*#define CMD640_DUMP_REGS */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "cmd640"
-
-static bool cmd640_vlb;
-
-/*
- * CMD640 specific registers definition.
- */
-
-#define VID 0x00
-#define DID 0x02
-#define PCMD 0x04
-#define PCMD_ENA 0x01
-#define PSTTS 0x06
-#define REVID 0x08
-#define PROGIF 0x09
-#define SUBCL 0x0a
-#define BASCL 0x0b
-#define BaseA0 0x10
-#define BaseA1 0x14
-#define BaseA2 0x18
-#define BaseA3 0x1c
-#define INTLINE 0x3c
-#define INPINE 0x3d
-
-#define CFR 0x50
-#define CFR_DEVREV 0x03
-#define CFR_IDE01INTR 0x04
-#define CFR_DEVID 0x18
-#define CFR_AT_VESA_078h 0x20
-#define CFR_DSA1 0x40
-#define CFR_DSA0 0x80
-
-#define CNTRL 0x51
-#define CNTRL_DIS_RA0 0x40
-#define CNTRL_DIS_RA1 0x80
-#define CNTRL_ENA_2ND 0x08
-
-#define CMDTIM 0x52
-#define ARTTIM0 0x53
-#define DRWTIM0 0x54
-#define ARTTIM1 0x55
-#define DRWTIM1 0x56
-#define ARTTIM23 0x57
-#define ARTTIM23_DIS_RA2 0x04
-#define ARTTIM23_DIS_RA3 0x08
-#define ARTTIM23_IDE23INTR 0x10
-#define DRWTIM23 0x58
-#define BRST 0x59
-
-/*
- * Registers and masks for easy access by drive index:
- */
-static u8 prefetch_regs[4] = {CNTRL, CNTRL, ARTTIM23, ARTTIM23};
-static u8 prefetch_masks[4] = {CNTRL_DIS_RA0, CNTRL_DIS_RA1, ARTTIM23_DIS_RA2, ARTTIM23_DIS_RA3};
-
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
-
-static u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
-static u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM23, DRWTIM23};
-
-/*
- * Current cmd640 timing values for each drive.
- * The defaults for each are the slowest possible timings.
- */
-static u8 setup_counts[4] = {4, 4, 4, 4}; /* Address setup count (in clocks) */
-static u8 active_counts[4] = {16, 16, 16, 16}; /* Active count (encoded) */
-static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
-
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-
-static DEFINE_SPINLOCK(cmd640_lock);
-
-/*
- * Interface to access cmd640x registers
- */
-static unsigned int cmd640_key;
-static void (*__put_cmd640_reg)(u16 reg, u8 val);
-static u8 (*__get_cmd640_reg)(u16 reg);
-
-/*
- * This is read from the CFR reg, and is used in several places.
- */
-static unsigned int cmd640_chip_version;
-
-/*
- * The CMD640x chip does not support DWORD config write cycles, but some
- * of the BIOSes use them to implement the config services.
- * Therefore, we must use direct IO instead.
- */
-
-/* PCI method 1 access */
-
-static void put_cmd640_reg_pci1(u16 reg, u8 val)
-{
- outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
- outb_p(val, (reg & 3) | 0xcfc);
-}
-
-static u8 get_cmd640_reg_pci1(u16 reg)
-{
- outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
- return inb_p((reg & 3) | 0xcfc);
-}
-
-/* PCI method 2 access (from CMD datasheet) */
-
-static void put_cmd640_reg_pci2(u16 reg, u8 val)
-{
- outb_p(0x10, 0xcf8);
- outb_p(val, cmd640_key + reg);
- outb_p(0, 0xcf8);
-}
-
-static u8 get_cmd640_reg_pci2(u16 reg)
-{
- u8 b;
-
- outb_p(0x10, 0xcf8);
- b = inb_p(cmd640_key + reg);
- outb_p(0, 0xcf8);
- return b;
-}
-
-/* VLB access */
-
-static void put_cmd640_reg_vlb(u16 reg, u8 val)
-{
- outb_p(reg, cmd640_key);
- outb_p(val, cmd640_key + 4);
-}
-
-static u8 get_cmd640_reg_vlb(u16 reg)
-{
- outb_p(reg, cmd640_key);
- return inb_p(cmd640_key + 4);
-}
-
-static u8 get_cmd640_reg(u16 reg)
-{
- unsigned long flags;
- u8 b;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- b = __get_cmd640_reg(reg);
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return b;
-}
-
-static void put_cmd640_reg(u16 reg, u8 val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- __put_cmd640_reg(reg, val);
- spin_unlock_irqrestore(&cmd640_lock, flags);
-}
-
-static int __init match_pci_cmd640_device(void)
-{
- const u8 ven_dev[4] = {0x95, 0x10, 0x40, 0x06};
- unsigned int i;
- for (i = 0; i < 4; i++) {
- if (get_cmd640_reg(i) != ven_dev[i])
- return 0;
- }
-#ifdef STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT
- if ((get_cmd640_reg(PCMD) & PCMD_ENA) == 0) {
- printk("ide: cmd640 on PCI disabled by BIOS\n");
- return 0;
- }
-#endif /* STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT */
- return 1; /* success */
-}
-
-/*
- * Probe for CMD640x -- pci method 1
- */
-static int __init probe_for_cmd640_pci1(void)
-{
- __get_cmd640_reg = get_cmd640_reg_pci1;
- __put_cmd640_reg = put_cmd640_reg_pci1;
- for (cmd640_key = 0x80000000;
- cmd640_key <= 0x8000f800;
- cmd640_key += 0x800) {
- if (match_pci_cmd640_device())
- return 1; /* success */
- }
- return 0;
-}
-
-/*
- * Probe for CMD640x -- pci method 2
- */
-static int __init probe_for_cmd640_pci2(void)
-{
- __get_cmd640_reg = get_cmd640_reg_pci2;
- __put_cmd640_reg = put_cmd640_reg_pci2;
- for (cmd640_key = 0xc000; cmd640_key <= 0xcf00; cmd640_key += 0x100) {
- if (match_pci_cmd640_device())
- return 1; /* success */
- }
- return 0;
-}
-
-/*
- * Probe for CMD640x -- vlb
- */
-static int __init probe_for_cmd640_vlb(void)
-{
- u8 b;
-
- __get_cmd640_reg = get_cmd640_reg_vlb;
- __put_cmd640_reg = put_cmd640_reg_vlb;
- cmd640_key = 0x178;
- b = get_cmd640_reg(CFR);
- if (b == 0xff || b == 0x00 || (b & CFR_AT_VESA_078h)) {
- cmd640_key = 0x78;
- b = get_cmd640_reg(CFR);
- if (b == 0xff || b == 0x00 || !(b & CFR_AT_VESA_078h))
- return 0;
- }
- return 1; /* success */
-}
-
-/*
- * Returns 1 if an IDE interface/drive exists at 0x170,
- * Returns 0 otherwise.
- */
-static int __init secondary_port_responding(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd640_lock, flags);
-
- outb_p(0x0a, 0x176); /* select drive0 */
- udelay(100);
- if ((inb_p(0x176) & 0x1f) != 0x0a) {
- outb_p(0x1a, 0x176); /* select drive1 */
- udelay(100);
- if ((inb_p(0x176) & 0x1f) != 0x1a) {
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 0; /* nothing responded */
- }
- }
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 1; /* success */
-}
-
-#ifdef CMD640_DUMP_REGS
-/*
- * Dump out all cmd640 registers. May be called from ide.c
- */
-static void cmd640_dump_regs(void)
-{
- unsigned int reg = cmd640_vlb ? 0x50 : 0x00;
-
- /* Dump current state of chip registers */
- printk("ide: cmd640 internal register dump:");
- for (; reg <= 0x59; reg++) {
- if (!(reg & 0x0f))
- printk("\n%04x:", reg);
- printk(" %02x", get_cmd640_reg(reg));
- }
- printk("\n");
-}
-#endif
-
-static void __set_prefetch_mode(ide_drive_t *drive, int mode)
-{
- if (mode) { /* want prefetch on? */
-#if CMD640_PREFETCH_MASKS
- drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
-#endif
- drive->dev_flags &= ~IDE_DFLAG_NO_IO_32BIT;
- } else {
- drive->dev_flags &= ~IDE_DFLAG_NO_UNMASK;
- drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
- drive->io_32bit = 0;
- }
-}
-
-#ifndef CONFIG_BLK_DEV_CMD640_ENHANCED
-/*
- * Check whether prefetch is on for a drive,
- * and initialize the unmask flags for safe operation.
- */
-static void __init check_prefetch(ide_drive_t *drive, unsigned int index)
-{
- u8 b = get_cmd640_reg(prefetch_regs[index]);
-
- __set_prefetch_mode(drive, (b & prefetch_masks[index]) ? 0 : 1);
-}
-#else
-
-/*
- * Sets prefetch mode for a drive.
- */
-static void set_prefetch_mode(ide_drive_t *drive, unsigned int index, int mode)
-{
- unsigned long flags;
- int reg = prefetch_regs[index];
- u8 b;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- b = __get_cmd640_reg(reg);
- __set_prefetch_mode(drive, mode);
- if (mode)
- b &= ~prefetch_masks[index]; /* enable prefetch */
- else
- b |= prefetch_masks[index]; /* disable prefetch */
- __put_cmd640_reg(reg, b);
- spin_unlock_irqrestore(&cmd640_lock, flags);
-}
-
-/*
- * Dump out current drive clocks settings
- */
-static void display_clocks(unsigned int index)
-{
- u8 active_count, recovery_count;
-
- active_count = active_counts[index];
- if (active_count == 1)
- ++active_count;
- recovery_count = recovery_counts[index];
- if (active_count > 3 && recovery_count == 1)
- ++recovery_count;
- if (cmd640_chip_version > 1)
- recovery_count += 1; /* cmd640b uses (count + 1)*/
- printk(", clocks=%d/%d/%d\n", setup_counts[index], active_count, recovery_count);
-}
-
-/*
- * Pack active and recovery counts into single byte representation
- * used by controller
- */
-static inline u8 pack_nibbles(u8 upper, u8 lower)
-{
- return ((upper & 0x0f) << 4) | (lower & 0x0f);
-}
-
-/*
- * This routine writes the prepared setup/active/recovery counts
- * for a drive into the cmd640 chipset registers to active them.
- */
-static void program_drive_counts(ide_drive_t *drive, unsigned int index)
-{
- unsigned long flags;
- u8 setup_count = setup_counts[index];
- u8 active_count = active_counts[index];
- u8 recovery_count = recovery_counts[index];
-
- /*
- * Set up address setup count and drive read/write timing registers.
- * Primary interface has individual count/timing registers for
- * each drive. Secondary interface has one common set of registers,
- * so we merge the timings, using the slowest value for each timing.
- */
- if (index > 1) {
- ide_drive_t *peer = ide_get_pair_dev(drive);
- unsigned int mate = index ^ 1;
-
- if (peer) {
- if (setup_count < setup_counts[mate])
- setup_count = setup_counts[mate];
- if (active_count < active_counts[mate])
- active_count = active_counts[mate];
- if (recovery_count < recovery_counts[mate])
- recovery_count = recovery_counts[mate];
- }
- }
-
- /*
- * Convert setup_count to internal chipset representation
- */
- switch (setup_count) {
- case 4: setup_count = 0x00; break;
- case 3: setup_count = 0x80; break;
- case 1:
- case 2: setup_count = 0x40; break;
- default: setup_count = 0xc0; /* case 5 */
- }
-
- /*
- * Now that everything is ready, program the new timings
- */
- spin_lock_irqsave(&cmd640_lock, flags);
- /*
- * Program the address_setup clocks into ARTTIM reg,
- * and then the active/recovery counts into the DRWTIM reg
- * (this converts counts of 16 into counts of zero -- okay).
- */
- setup_count |= __get_cmd640_reg(arttim_regs[index]) & 0x3f;
- __put_cmd640_reg(arttim_regs[index], setup_count);
- __put_cmd640_reg(drwtim_regs[index], pack_nibbles(active_count, recovery_count));
- spin_unlock_irqrestore(&cmd640_lock, flags);
-}
-
-/*
- * Set a specific pio_mode for a drive
- */
-static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
- u8 pio_mode, unsigned int cycle_time)
-{
- struct ide_timing *t;
- int setup_time, active_time, recovery_time, clock_time;
- u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count;
- int bus_speed;
-
- if (cmd640_vlb)
- bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
- else
- bus_speed = ide_pci_clk ? ide_pci_clk : 33;
-
- if (pio_mode > 5)
- pio_mode = 5;
-
- t = ide_timing_find_mode(XFER_PIO_0 + pio_mode);
- setup_time = t->setup;
- active_time = t->active;
-
- recovery_time = cycle_time - (setup_time + active_time);
- clock_time = 1000 / bus_speed;
- cycle_count = DIV_ROUND_UP(cycle_time, clock_time);
-
- setup_count = DIV_ROUND_UP(setup_time, clock_time);
-
- active_count = DIV_ROUND_UP(active_time, clock_time);
- if (active_count < 2)
- active_count = 2; /* minimum allowed by cmd640 */
-
- recovery_count = DIV_ROUND_UP(recovery_time, clock_time);
- recovery_count2 = cycle_count - (setup_count + active_count);
- if (recovery_count2 > recovery_count)
- recovery_count = recovery_count2;
- if (recovery_count < 2)
- recovery_count = 2; /* minimum allowed by cmd640 */
- if (recovery_count > 17) {
- active_count += recovery_count - 17;
- recovery_count = 17;
- }
- if (active_count > 16)
- active_count = 16; /* maximum allowed by cmd640 */
- if (cmd640_chip_version > 1)
- recovery_count -= 1; /* cmd640b uses (count + 1)*/
- if (recovery_count > 16)
- recovery_count = 16; /* maximum allowed by cmd640 */
-
- setup_counts[index] = setup_count;
- active_counts[index] = active_count;
- recovery_counts[index] = recovery_count;
-
- /*
- * In a perfect world, we might set the drive pio mode here
- * (using WIN_SETFEATURE) before continuing.
- *
- * But we do not, because:
- * 1) this is the wrong place to do it (proper is do_special() in ide.c)
- * 2) in practice this is rarely, if ever, necessary
- */
- program_drive_counts(drive, index);
-}
-
-static void cmd640_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned int index = 0, cycle_time;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 b;
-
- switch (pio) {
- case 6: /* set fast-devsel off */
- case 7: /* set fast-devsel on */
- b = get_cmd640_reg(CNTRL) & ~0x27;
- if (pio & 1)
- b |= 0x27;
- put_cmd640_reg(CNTRL, b);
- printk("%s: %sabled cmd640 fast host timing (devsel)\n",
- drive->name, (pio & 1) ? "en" : "dis");
- return;
- case 8: /* set prefetch off */
- case 9: /* set prefetch on */
- set_prefetch_mode(drive, index, pio & 1);
- printk("%s: %sabled cmd640 prefetch\n",
- drive->name, (pio & 1) ? "en" : "dis");
- return;
- }
-
- cycle_time = ide_pio_cycle_time(drive, pio);
- cmd640_set_mode(drive, index, pio, cycle_time);
-
- printk("%s: selected cmd640 PIO mode%d (%dns)",
- drive->name, pio, cycle_time);
-
- display_clocks(index);
-}
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-
-static void __init cmd640_init_dev(ide_drive_t *drive)
-{
- unsigned int i = drive->hwif->channel * 2 + (drive->dn & 1);
-
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- /*
- * Reset timing to the slowest speed and turn off prefetch.
- * This way, the drive identify code has a better chance.
- */
- setup_counts[i] = 4; /* max possible */
- active_counts[i] = 16; /* max possible */
- recovery_counts[i] = 16; /* max possible */
- program_drive_counts(drive, i);
- set_prefetch_mode(drive, i, 0);
- printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch cleared\n", i);
-#else
- /*
- * Set the drive unmask flags to match the prefetch setting.
- */
- check_prefetch(drive, i);
- printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch(%s) preserved\n",
- i, (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT) ? "off" : "on");
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-}
-
-static int cmd640_test_irq(ide_hwif_t *hwif)
-{
- int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
- CFR_IDE01INTR;
- u8 irq_stat = get_cmd640_reg(irq_reg);
-
- return (irq_stat & irq_mask) ? 1 : 0;
-}
-
-static const struct ide_port_ops cmd640_port_ops = {
- .init_dev = cmd640_init_dev,
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- .set_pio_mode = cmd640_set_pio_mode,
-#endif
- .test_irq = cmd640_test_irq,
-};
-
-static int pci_conf1(void)
-{
- unsigned long flags;
- u32 tmp;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- outb(0x01, 0xCFB);
- tmp = inl(0xCF8);
- outl(0x80000000, 0xCF8);
- if (inl(0xCF8) == 0x80000000) {
- outl(tmp, 0xCF8);
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 1;
- }
- outl(tmp, 0xCF8);
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 0;
-}
-
-static int pci_conf2(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- outb(0x00, 0xCFB);
- outb(0x00, 0xCF8);
- outb(0x00, 0xCFA);
- if (inb(0xCF8) == 0x00 && inb(0xCF8) == 0x00) {
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 1;
- }
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 0;
-}
-
-static const struct ide_port_info cmd640_port_info __initconst = {
- .chipset = ide_cmd640,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_ABUSE_PREFETCH |
- IDE_HFLAG_ABUSE_FAST_DEVSEL,
- .port_ops = &cmd640_port_ops,
- .pio_mask = ATA_PIO5,
-};
-
-static int __init cmd640x_init_one(unsigned long base, unsigned long ctl)
-{
- if (!request_region(base, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- DRV_NAME, base, base + 7);
- return -EBUSY;
- }
-
- if (!request_region(ctl, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- DRV_NAME, ctl);
- release_region(base, 8);
- return -EBUSY;
- }
-
- return 0;
-}
-
-/*
- * Probe for a cmd640 chipset, and initialize it if found.
- */
-static int __init cmd640x_init(void)
-{
- int second_port_cmd640 = 0, rc;
- const char *bus_type, *port2;
- u8 b, cfr;
- struct ide_hw hw[2], *hws[2];
-
- if (cmd640_vlb && probe_for_cmd640_vlb()) {
- bus_type = "VLB";
- } else {
- cmd640_vlb = 0;
- /* Find out what kind of PCI probing is supported otherwise
- Justin Gibbs will sulk.. */
- if (pci_conf1() && probe_for_cmd640_pci1())
- bus_type = "PCI (type1)";
- else if (pci_conf2() && probe_for_cmd640_pci2())
- bus_type = "PCI (type2)";
- else
- return 0;
- }
- /*
- * Undocumented magic (there is no 0x5b reg in specs)
- */
- put_cmd640_reg(0x5b, 0xbd);
- if (get_cmd640_reg(0x5b) != 0xbd) {
- printk(KERN_ERR "ide: cmd640 init failed: wrong value in reg 0x5b\n");
- return 0;
- }
- put_cmd640_reg(0x5b, 0);
-
-#ifdef CMD640_DUMP_REGS
- cmd640_dump_regs();
-#endif
-
- /*
- * Documented magic begins here
- */
- cfr = get_cmd640_reg(CFR);
- cmd640_chip_version = cfr & CFR_DEVREV;
- if (cmd640_chip_version == 0) {
- printk("ide: bad cmd640 revision: %d\n", cmd640_chip_version);
- return 0;
- }
-
- rc = cmd640x_init_one(0x1f0, 0x3f6);
- if (rc)
- return rc;
-
- rc = cmd640x_init_one(0x170, 0x376);
- if (rc) {
- release_region(0x3f6, 1);
- release_region(0x1f0, 8);
- return rc;
- }
-
- memset(&hw, 0, sizeof(hw));
-
- ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
- hw[0].irq = 14;
-
- ide_std_init_ports(&hw[1], 0x170, 0x376);
- hw[1].irq = 15;
-
- printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
- "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
-
- /*
- * Initialize data for primary port
- */
- hws[0] = &hw[0];
-
- /*
- * Ensure compatibility by always using the slowest timings
- * for access to the drive's command register block,
- * and reset the prefetch burstsize to default (512 bytes).
- *
- * Maybe we need a way to NOT do these on *some* systems?
- */
- put_cmd640_reg(CMDTIM, 0);
- put_cmd640_reg(BRST, 0x40);
-
- b = get_cmd640_reg(CNTRL);
-
- /*
- * Try to enable the secondary interface, if not already enabled
- */
- if (secondary_port_responding()) {
- if ((b & CNTRL_ENA_2ND)) {
- second_port_cmd640 = 1;
- port2 = "okay";
- } else if (cmd640_vlb) {
- second_port_cmd640 = 1;
- port2 = "alive";
- } else
- port2 = "not cmd640";
- } else {
- put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */
- if (secondary_port_responding()) {
- second_port_cmd640 = 1;
- port2 = "enabled";
- } else {
- put_cmd640_reg(CNTRL, b); /* restore original setting */
- port2 = "not responding";
- }
- }
-
- /*
- * Initialize data for secondary cmd640 port, if enabled
- */
- if (second_port_cmd640)
- hws[1] = &hw[1];
-
- printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
- second_port_cmd640 ? "" : "not ", port2);
-
-#ifdef CMD640_DUMP_REGS
- cmd640_dump_regs();
-#endif
-
- return ide_host_add(&cmd640_port_info, hws, second_port_cmd640 ? 2 : 1,
- NULL);
-}
-
-module_param_named(probe_vlb, cmd640_vlb, bool, 0);
-MODULE_PARM_DESC(probe_vlb, "probe for VLB version of CMD640 chipset");
-
-module_init(cmd640x_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
deleted file mode 100644
index 943bf944bf72..000000000000
--- a/drivers/ide/cmd64x.c
+++ /dev/null
@@ -1,452 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
- * Due to massive hardware bugs, UltraDMA is only supported
- * on the 646U2 and not on the 646U.
- *
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1998 David S. Miller (davem@redhat.com)
- *
- * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
- * Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "cmd64x"
-
-/*
- * CMD64x specific registers definition.
- */
-#define CFR 0x50
-#define CFR_INTR_CH0 0x04
-
-#define CMDTIM 0x52
-#define ARTTIM0 0x53
-#define DRWTIM0 0x54
-#define ARTTIM1 0x55
-#define DRWTIM1 0x56
-#define ARTTIM23 0x57
-#define ARTTIM23_DIS_RA2 0x04
-#define ARTTIM23_DIS_RA3 0x08
-#define ARTTIM23_INTR_CH1 0x10
-#define DRWTIM2 0x58
-#define BRST 0x59
-#define DRWTIM3 0x5b
-
-#define BMIDECR0 0x70
-#define MRDMODE 0x71
-#define MRDMODE_INTR_CH0 0x04
-#define MRDMODE_INTR_CH1 0x08
-#define UDIDETCR0 0x73
-#define DTPR0 0x74
-#define BMIDECR1 0x78
-#define BMIDECSR 0x79
-#define UDIDETCR1 0x7B
-#define DTPR1 0x7C
-
-static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- const unsigned long T = 1000000 / bus_speed;
- static const u8 recovery_values[] =
- {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
- static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
- static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
- static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3};
- struct ide_timing t;
- u8 arttim = 0;
-
- if (drive->dn >= ARRAY_SIZE(drwtim_regs))
- return;
-
- ide_timing_compute(drive, mode, &t, T, 0);
-
- /*
- * In case we've got too long recovery phase, try to lengthen
- * the active phase
- */
- if (t.recover > 16) {
- t.active += t.recover - 16;
- t.recover = 16;
- }
- if (t.active > 16) /* shouldn't actually happen... */
- t.active = 16;
-
- /*
- * Convert values to internal chipset representation
- */
- t.recover = recovery_values[t.recover];
- t.active &= 0x0f;
-
- /* Program the active/recovery counts into the DRWTIM register */
- pci_write_config_byte(dev, drwtim_regs[drive->dn],
- (t.active << 4) | t.recover);
-
- /*
- * The primary channel has individual address setup timing registers
- * for each drive and the hardware selects the slowest timing itself.
- * The secondary channel has one common register and we have to select
- * the slowest address setup timing ourselves.
- */
- if (hwif->channel) {
- ide_drive_t *pair = ide_get_pair_dev(drive);
-
- if (pair) {
- struct ide_timing tp;
-
- ide_timing_compute(pair, pair->pio_mode, &tp, T, 0);
- ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP);
- if (pair->dma_mode) {
- ide_timing_compute(pair, pair->dma_mode,
- &tp, T, 0);
- ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP);
- }
- }
- }
-
- if (t.setup > 5) /* shouldn't actually happen... */
- t.setup = 5;
-
- /*
- * Program the address setup clocks into the ARTTIM registers.
- * Avoid clearing the secondary channel's interrupt bit.
- */
- (void) pci_read_config_byte (dev, arttim_regs[drive->dn], &arttim);
- if (hwif->channel)
- arttim &= ~ARTTIM23_INTR_CH1;
- arttim &= ~0xc0;
- arttim |= setup_values[t.setup];
- (void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim);
-}
-
-/*
- * Attempts to set drive's PIO mode.
- * Special cases are 8: prefetch off, 9: prefetch on (both never worked)
- */
-
-static void cmd64x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /*
- * Filter out the prefetch control values
- * to prevent PIO5 from being programmed
- */
- if (pio == 8 || pio == 9)
- return;
-
- cmd64x_program_timings(drive, XFER_PIO_0 + pio);
-}
-
-static void cmd64x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 unit = drive->dn & 0x01;
- u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_byte(dev, pciU, &regU);
- regU &= ~(unit ? 0xCA : 0x35);
-
- switch(speed) {
- case XFER_UDMA_5:
- regU |= unit ? 0x0A : 0x05;
- break;
- case XFER_UDMA_4:
- regU |= unit ? 0x4A : 0x15;
- break;
- case XFER_UDMA_3:
- regU |= unit ? 0x8A : 0x25;
- break;
- case XFER_UDMA_2:
- regU |= unit ? 0x42 : 0x11;
- break;
- case XFER_UDMA_1:
- regU |= unit ? 0x82 : 0x21;
- break;
- case XFER_UDMA_0:
- regU |= unit ? 0xC2 : 0x31;
- break;
- case XFER_MW_DMA_2:
- case XFER_MW_DMA_1:
- case XFER_MW_DMA_0:
- cmd64x_program_timings(drive, speed);
- break;
- }
-
- pci_write_config_byte(dev, pciU, regU);
-}
-
-static void cmd648_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = pci_resource_start(dev, 4);
- u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
- MRDMODE_INTR_CH0;
- u8 mrdmode = inb(base + 1);
-
- /* clear the interrupt bit */
- outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
- base + 1);
-}
-
-static void cmd64x_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
- CFR_INTR_CH0;
- u8 irq_stat = 0;
-
- (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
- /* clear the interrupt bit */
- (void) pci_write_config_byte(dev, irq_reg, irq_stat | irq_mask);
-}
-
-static int cmd648_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = pci_resource_start(dev, 4);
- u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
- MRDMODE_INTR_CH0;
- u8 mrdmode = inb(base + 1);
-
- pr_debug("%s: mrdmode: 0x%02x irq_mask: 0x%02x\n",
- hwif->name, mrdmode, irq_mask);
-
- return (mrdmode & irq_mask) ? 1 : 0;
-}
-
-static int cmd64x_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
- CFR_INTR_CH0;
- u8 irq_stat = 0;
-
- (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
-
- pr_debug("%s: irq_stat: 0x%02x irq_mask: 0x%02x\n",
- hwif->name, irq_stat, irq_mask);
-
- return (irq_stat & irq_mask) ? 1 : 0;
-}
-
-/*
- * ASUS P55T2P4D with CMD646 chipset revision 0x01 requires the old
- * event order for DMA transfers.
- */
-
-static int cmd646_1_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = 0, dma_cmd = 0;
-
- /* get DMA status */
- dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- /* read DMA command state */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- /* stop DMA */
- outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
- /* clear the INTR & ERROR bits */
- outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
- /* verify good DMA status */
- return (dma_stat & 7) != 4;
-}
-
-static int init_chipset_cmd64x(struct pci_dev *dev)
-{
- u8 mrdmode = 0;
-
- /* Set a good latency timer and cache line size value. */
- (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
- /* FIXME: pci_set_master() to ensure a good latency timer value */
-
- /*
- * Enable interrupts, select MEMORY READ LINE for reads.
- *
- * NOTE: although not mentioned in the PCI0646U specs,
- * bits 0-1 are write only and won't be read back as
- * set or not -- PCI0646U2 specs clarify this point.
- */
- (void) pci_read_config_byte (dev, MRDMODE, &mrdmode);
- mrdmode &= ~0x30;
- (void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
-
- return 0;
-}
-
-static u8 cmd64x_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
-
- switch (dev->device) {
- case PCI_DEVICE_ID_CMD_648:
- case PCI_DEVICE_ID_CMD_649:
- pci_read_config_byte(dev, BMIDECSR, &bmidecsr);
- return (bmidecsr & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
- default:
- return ATA_CBL_PATA40;
- }
-}
-
-static const struct ide_port_ops cmd64x_port_ops = {
- .set_pio_mode = cmd64x_set_pio_mode,
- .set_dma_mode = cmd64x_set_dma_mode,
- .clear_irq = cmd64x_clear_irq,
- .test_irq = cmd64x_test_irq,
- .cable_detect = cmd64x_cable_detect,
-};
-
-static const struct ide_port_ops cmd648_port_ops = {
- .set_pio_mode = cmd64x_set_pio_mode,
- .set_dma_mode = cmd64x_set_dma_mode,
- .clear_irq = cmd648_clear_irq,
- .test_irq = cmd648_test_irq,
- .cable_detect = cmd64x_cable_detect,
-};
-
-static const struct ide_dma_ops cmd646_rev1_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = cmd646_1_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info cmd64x_chipsets[] = {
- { /* 0: CMD643 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
- .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
- .port_ops = &cmd64x_port_ops,
- .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
- IDE_HFLAG_ABUSE_PREFETCH |
- IDE_HFLAG_SERIALIZE,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = 0x00, /* no udma */
- },
- { /* 1: CMD646 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
- .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
- .port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH |
- IDE_HFLAG_SERIALIZE,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
- },
- { /* 2: CMD648 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
- .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
- .port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
- },
- { /* 3: CMD649 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
- .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
- .port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- }
-};
-
-static int cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d;
- u8 idx = id->driver_data;
-
- d = cmd64x_chipsets[idx];
-
- if (idx == 1) {
- /*
- * UltraDMA only supported on PCI646U and PCI646U2, which
- * correspond to revisions 0x03, 0x05 and 0x07 respectively.
- * Actually, although the CMD tech support people won't
- * tell me the details, the 0x03 revision cannot support
- * UDMA correctly without hardware modifications, and even
- * then it only works with Quantum disks due to some
- * hold time assumptions in the 646U part which are fixed
- * in the 646U2.
- *
- * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
- */
- if (dev->revision < 5) {
- d.udma_mask = 0x00;
- /*
- * The original PCI0646 didn't have the primary
- * channel enable bit, it appeared starting with
- * PCI0646U (i.e. revision ID 3).
- */
- if (dev->revision < 3) {
- d.enablebits[0].reg = 0;
- d.port_ops = &cmd64x_port_ops;
- if (dev->revision == 1)
- d.dma_ops = &cmd646_rev1_dma_ops;
- }
- }
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id cmd64x_pci_tbl[] = {
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 2 },
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 3 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cmd64x_pci_tbl);
-
-static struct pci_driver cmd64x_pci_driver = {
- .name = "CMD64x_IDE",
- .id_table = cmd64x_pci_tbl,
- .probe = cmd64x_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cmd64x_ide_init(void)
-{
- return ide_pci_register_driver(&cmd64x_pci_driver);
-}
-
-static void __exit cmd64x_ide_exit(void)
-{
- pci_unregister_driver(&cmd64x_pci_driver);
-}
-
-module_init(cmd64x_ide_init);
-module_exit(cmd64x_ide_exit);
-
-MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for CMD64x IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
deleted file mode 100644
index 89a4ff100b7a..000000000000
--- a/drivers/ide/cs5520.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * IDE tuning and bus mastering support for the CS5510/CS5520
- * chipsets
- *
- * The CS5510/CS5520 are slightly unusual devices. Unlike the
- * typical IDE controllers they do bus mastering with the drive in
- * PIO mode and smarter silicon.
- *
- * The practical upshot of this is that we must always tune the
- * drive for the right PIO mode. We must also ignore all the blacklists
- * and the drive bus mastering DMA information.
- *
- * *** This driver is strictly experimental ***
- *
- * (c) Copyright Red Hat Inc 2002
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * For the avoidance of doubt the "preferred form" of this code is one which
- * is in an open non patent encumbered format. Where cryptographic key signing
- * forms part of the process of creating an executable the information
- * including keys needed to generate an equivalently functional executable
- * are deemed to be part of the source code.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/dma-mapping.h>
-
-#define DRV_NAME "cs5520"
-
-struct pio_clocks
-{
- int address;
- int assert;
- int recovery;
-};
-
-static struct pio_clocks cs5520_pio_clocks[]={
- {3, 6, 11},
- {2, 5, 6},
- {1, 4, 3},
- {1, 3, 2},
- {1, 2, 1}
-};
-
-static void cs5520_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- int controller = drive->dn > 1 ? 1 : 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /* 8bit CAT/CRT - 8bit command timing for channel */
- pci_write_config_byte(pdev, 0x62 + controller,
- (cs5520_pio_clocks[pio].recovery << 4) |
- (cs5520_pio_clocks[pio].assert));
-
- /* 0x64 - 16bit Primary, 0x68 - 16bit Secondary */
-
- /* FIXME: should these use address ? */
- /* Data read timing */
- pci_write_config_byte(pdev, 0x64 + 4*controller + (drive->dn&1),
- (cs5520_pio_clocks[pio].recovery << 4) |
- (cs5520_pio_clocks[pio].assert));
- /* Write command timing */
- pci_write_config_byte(pdev, 0x66 + 4*controller + (drive->dn&1),
- (cs5520_pio_clocks[pio].recovery << 4) |
- (cs5520_pio_clocks[pio].assert));
-}
-
-static void cs5520_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- printk(KERN_ERR "cs55x0: bad ide timing.\n");
-
- drive->pio_mode = XFER_PIO_0 + 0;
- cs5520_set_pio_mode(hwif, drive);
-}
-
-static const struct ide_port_ops cs5520_port_ops = {
- .set_pio_mode = cs5520_set_pio_mode,
- .set_dma_mode = cs5520_set_dma_mode,
-};
-
-static const struct ide_port_info cyrix_chipset = {
- .name = DRV_NAME,
- .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
- .port_ops = &cs5520_port_ops,
- .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_CS5520,
- .pio_mask = ATA_PIO4,
-};
-
-/*
- * The 5510/5520 are a bit weird. They don't quite set up the way
- * the PCI helper layer expects so we must do much of the set up
- * work longhand.
- */
-
-static int cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct ide_port_info *d = &cyrix_chipset;
- struct ide_hw hw[2], *hws[] = { NULL, NULL };
-
- ide_setup_pci_noise(dev, d);
-
- /* We must not grab the entire device, it has 'ISA' space in its
- * BARS too and we will freak out other bits of the kernel
- */
- if (pci_enable_device_io(dev)) {
- printk(KERN_WARNING "%s: Unable to enable 55x0.\n", d->name);
- return -ENODEV;
- }
- pci_set_master(dev);
- if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "%s: No suitable DMA available.\n",
- d->name);
- return -ENODEV;
- }
-
- /*
- * Now the chipset is configured we can let the core
- * do all the device setup for us
- */
-
- ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
- hw[0].irq = 14;
- hw[1].irq = 15;
-
- return ide_host_add(d, hws, 2, NULL);
-}
-
-static const struct pci_device_id cs5520_pci_tbl[] = {
- { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), 0 },
- { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), 1 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cs5520_pci_tbl);
-
-static struct pci_driver cs5520_pci_driver = {
- .name = "Cyrix_IDE",
- .id_table = cs5520_pci_tbl,
- .probe = cs5520_init_one,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cs5520_ide_init(void)
-{
- return ide_pci_register_driver(&cs5520_pci_driver);
-}
-
-module_init(cs5520_ide_init);
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for Cyrix 5510/5520 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
deleted file mode 100644
index 65371599b976..000000000000
--- a/drivers/ide/cs5530.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2000 Mark Lord <mlord@pobox.com>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * Development of this chipset driver was funded
- * by the nice folks at National Semiconductor.
- *
- * Documentation:
- * CS5530 documentation available from National Semiconductor.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "cs5530"
-
-/*
- * Here are the standard PIO mode 0-4 timings for each "format".
- * Format-0 uses fast data reg timings, with slower command reg timings.
- * Format-1 uses fast timings for all registers, but won't work with all drives.
- */
-static unsigned int cs5530_pio_timings[2][5] = {
- {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
- {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
-};
-
-/*
- * After chip reset, the PIO timings are set to 0x0000e132, which is not valid.
- */
-#define CS5530_BAD_PIO(timings) (((timings)&~0x80000000)==0x0000e132)
-#define CS5530_BASEREG(hwif) (((hwif)->dma_base & ~0xf) + ((hwif)->channel ? 0x30 : 0x20))
-
-/**
- * cs5530_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Handles setting of PIO mode for the chipset.
- *
- * The init_hwif_cs5530() routine guarantees that all drives
- * will have valid default PIO timings set up before we get here.
- */
-
-static void cs5530_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long basereg = CS5530_BASEREG(hwif);
- unsigned int format = (inl(basereg + 4) >> 31) & 1;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3));
-}
-
-/**
- * cs5530_udma_filter - UDMA filter
- * @drive: drive
- *
- * cs5530_udma_filter() does UDMA mask filtering for the given drive
- * taking into the consideration capabilities of the mate device.
- *
- * The CS5530 specifies that two drives sharing a cable cannot mix
- * UDMA/MDMA. It has to be one or the other, for the pair, though
- * different timings can still be chosen for each drive. We could
- * set the appropriate timing bits on the fly, but that might be
- * a bit confusing. So, for now we statically handle this requirement
- * by looking at our mate drive to see what it is capable of, before
- * choosing a mode for our own drive.
- *
- * Note: This relies on the fact we never fail from UDMA to MWDMA2
- * but instead drop to PIO.
- */
-
-static u8 cs5530_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_drive_t *mate = ide_get_pair_dev(drive);
- u16 *mateid;
- u8 mask = hwif->ultra_mask;
-
- if (mate == NULL)
- goto out;
- mateid = mate->id;
-
- if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) {
- if ((mateid[ATA_ID_FIELD_VALID] & 4) &&
- (mateid[ATA_ID_UDMA_MODES] & 7))
- goto out;
- if (mateid[ATA_ID_MWDMA_MODES] & 7)
- mask = 0;
- }
-out:
- return mask;
-}
-
-static void cs5530_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long basereg;
- unsigned int reg, timings = 0;
-
- switch (drive->dma_mode) {
- case XFER_UDMA_0: timings = 0x00921250; break;
- case XFER_UDMA_1: timings = 0x00911140; break;
- case XFER_UDMA_2: timings = 0x00911030; break;
- case XFER_MW_DMA_0: timings = 0x00077771; break;
- case XFER_MW_DMA_1: timings = 0x00012121; break;
- case XFER_MW_DMA_2: timings = 0x00002020; break;
- }
- basereg = CS5530_BASEREG(hwif);
- reg = inl(basereg + 4); /* get drive0 config register */
- timings |= reg & 0x80000000; /* preserve PIO format bit */
- if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */
- outl(timings, basereg + 4); /* write drive0 config register */
- } else {
- if (timings & 0x00100000)
- reg |= 0x00100000; /* enable UDMA timings for both drives */
- else
- reg &= ~0x00100000; /* disable UDMA timings for both drives */
- outl(reg, basereg + 4); /* write drive0 config register */
- outl(timings, basereg + 12); /* write drive1 config register */
- }
-}
-
-/**
- * init_chipset_5530 - set up 5530 bridge
- * @dev: PCI device
- *
- * Initialize the cs5530 bridge for reliable IDE DMA operation.
- */
-
-static int init_chipset_cs5530(struct pci_dev *dev)
-{
- struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
-
- if (pci_resource_start(dev, 4) == 0)
- return -EFAULT;
-
- dev = NULL;
- while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
- switch (dev->device) {
- case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
- master_0 = pci_dev_get(dev);
- break;
- case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
- cs5530_0 = pci_dev_get(dev);
- break;
- }
- }
- if (!master_0) {
- printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
- goto out;
- }
- if (!cs5530_0) {
- printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
- goto out;
- }
-
- /*
- * Enable BusMaster and MemoryWriteAndInvalidate for the cs5530:
- * --> OR 0x14 into 16-bit PCI COMMAND reg of function 0 of the cs5530
- */
-
- pci_set_master(cs5530_0);
- pci_try_set_mwi(cs5530_0);
-
- /*
- * Set PCI CacheLineSize to 16-bytes:
- * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
- */
-
- pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
-
- /*
- * Disable trapping of UDMA register accesses (Win98 hack):
- * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
- */
-
- pci_write_config_word(cs5530_0, 0xd0, 0x5006);
-
- /*
- * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
- * The other settings are what is necessary to get the register
- * into a sane state for IDE DMA operation.
- */
-
- pci_write_config_byte(master_0, 0x40, 0x1e);
-
- /*
- * Set max PCI burst size (16-bytes seems to work best):
- * 16bytes: set bit-1 at 0x41 (reg value of 0x16)
- * all others: clear bit-1 at 0x41, and do:
- * 128bytes: OR 0x00 at 0x41
- * 256bytes: OR 0x04 at 0x41
- * 512bytes: OR 0x08 at 0x41
- * 1024bytes: OR 0x0c at 0x41
- */
-
- pci_write_config_byte(master_0, 0x41, 0x14);
-
- /*
- * These settings are necessary to get the chip
- * into a sane state for IDE DMA operation.
- */
-
- pci_write_config_byte(master_0, 0x42, 0x00);
- pci_write_config_byte(master_0, 0x43, 0xc1);
-
-out:
- pci_dev_put(master_0);
- pci_dev_put(cs5530_0);
- return 0;
-}
-
-/**
- * init_hwif_cs5530 - initialise an IDE channel
- * @hwif: IDE to initialize
- *
- * This gets invoked by the IDE driver once for each channel. It
- * performs channel-specific pre-initialization before drive probing.
- */
-
-static void init_hwif_cs5530 (ide_hwif_t *hwif)
-{
- unsigned long basereg;
- u32 d0_timings;
-
- basereg = CS5530_BASEREG(hwif);
- d0_timings = inl(basereg + 0);
- if (CS5530_BAD_PIO(d0_timings))
- outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0);
- if (CS5530_BAD_PIO(inl(basereg + 8)))
- outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8);
-}
-
-static const struct ide_port_ops cs5530_port_ops = {
- .set_pio_mode = cs5530_set_pio_mode,
- .set_dma_mode = cs5530_set_dma_mode,
- .udma_filter = cs5530_udma_filter,
-};
-
-static const struct ide_port_info cs5530_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_cs5530,
- .init_hwif = init_hwif_cs5530,
- .port_ops = &cs5530_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_POST_SET_MODE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
-};
-
-static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &cs5530_chipset, NULL);
-}
-
-static const struct pci_device_id cs5530_pci_tbl[] = {
- { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cs5530_pci_tbl);
-
-static struct pci_driver cs5530_pci_driver = {
- .name = "CS5530 IDE",
- .id_table = cs5530_pci_tbl,
- .probe = cs5530_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cs5530_ide_init(void)
-{
- return ide_pci_register_driver(&cs5530_pci_driver);
-}
-
-static void __exit cs5530_ide_exit(void)
-{
- pci_unregister_driver(&cs5530_pci_driver);
-}
-
-module_init(cs5530_ide_init);
-module_exit(cs5530_ide_exit);
-
-MODULE_AUTHOR("Mark Lord");
-MODULE_DESCRIPTION("PCI driver module for Cyrix/NS 5530 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
deleted file mode 100644
index 70fdbe3161f8..000000000000
--- a/drivers/ide/cs5535.c
+++ /dev/null
@@ -1,216 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2004-2005 Advanced Micro Devices, Inc.
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- *
- * History:
- * 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com>
- * - Reworked tuneproc, set_drive, misc mods to prep for mainline
- * - Work was sponsored by CIS (M) Sdn Bhd.
- * Ported to Kernel 2.6.11 on June 26, 2005 by
- * Wolfgang Zuleger <wolfgang.zuleger@gmx.de>
- * Alexander Kiausch <alex.kiausch@t-online.de>
- * Originally developed by AMD for 2.4/2.6
- *
- * Development of this chipset driver was funded
- * by the nice folks at National Semiconductor/AMD.
- *
- * Documentation:
- * CS5535 documentation available from AMD
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "cs5535"
-
-#define MSR_ATAC_BASE 0x51300000
-#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
-#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
-#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
-#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
-#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
-#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
-#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
-#define ATAC_RESET (MSR_ATAC_BASE+0x10)
-#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
-#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
-#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
-#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
-#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
-#define ATAC_BM0_CMD_PRIM 0x00
-#define ATAC_BM0_STS_PRIM 0x02
-#define ATAC_BM0_PRD 0x04
-#define CS5535_CABLE_DETECT 0x48
-
-/* Format I PIO settings. We separate out cmd and data for safer timings */
-
-static unsigned int cs5535_pio_cmd_timings[5] =
-{ 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131 };
-static unsigned int cs5535_pio_dta_timings[5] =
-{ 0xF7F4, 0xF173, 0x8141, 0x5131, 0x1131 };
-
-static unsigned int cs5535_mwdma_timings[3] =
-{ 0x7F0FFFF3, 0x7F035352, 0x7f024241 };
-
-static unsigned int cs5535_udma_timings[5] =
-{ 0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061 };
-
-/* Macros to check if the register is the reset value - reset value is an
- invalid timing and indicates the register has not been set previously */
-
-#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL) == 0x00009172 )
-#define CS5535_BAD_DMA(timings) ( (timings & 0x000FFFFF) == 0x00077771 )
-
-/****
- * cs5535_set_speed - Configure the chipset to the new speed
- * @drive: Drive to set up
- * @speed: desired speed
- *
- * cs5535_set_speed() configures the chipset to a new speed.
- */
-static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
-{
- u32 reg = 0, dummy;
- u8 unit = drive->dn & 1;
-
- /* Set the PIO timings */
- if (speed < XFER_SW_DMA_0) {
- ide_drive_t *pair = ide_get_pair_dev(drive);
- u8 cmd, pioa;
-
- cmd = pioa = speed - XFER_PIO_0;
-
- if (pair) {
- u8 piob = pair->pio_mode - XFER_PIO_0;
-
- if (piob < cmd)
- cmd = piob;
- }
-
- /* Write the speed of the current drive */
- reg = (cs5535_pio_cmd_timings[cmd] << 16) |
- cs5535_pio_dta_timings[pioa];
- wrmsr(unit ? ATAC_CH0D1_PIO : ATAC_CH0D0_PIO, reg, 0);
-
- /* And if nessesary - change the speed of the other drive */
- rdmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, dummy);
-
- if (((reg >> 16) & cs5535_pio_cmd_timings[cmd]) !=
- cs5535_pio_cmd_timings[cmd]) {
- reg &= 0x0000FFFF;
- reg |= cs5535_pio_cmd_timings[cmd] << 16;
- wrmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, 0);
- }
-
- /* Set bit 31 of the DMA register for PIO format 1 timings */
- rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
- wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA,
- reg | 0x80000000UL, 0);
- } else {
- rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
-
- reg &= 0x80000000UL; /* Preserve the PIO format bit */
-
- if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_4)
- reg |= cs5535_udma_timings[speed - XFER_UDMA_0];
- else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
- reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0];
- else
- return;
-
- wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, 0);
- }
-}
-
-/**
- * cs5535_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Programs the chipset for DMA mode.
- */
-
-static void cs5535_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- cs5535_set_speed(drive, drive->dma_mode);
-}
-
-/**
- * cs5535_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * A callback from the upper layers for PIO-only tuning.
- */
-
-static void cs5535_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- cs5535_set_speed(drive, drive->pio_mode);
-}
-
-static u8 cs5535_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 bit;
-
- /* if a 80 wire cable was detected */
- pci_read_config_byte(dev, CS5535_CABLE_DETECT, &bit);
-
- return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops cs5535_port_ops = {
- .set_pio_mode = cs5535_set_pio_mode,
- .set_dma_mode = cs5535_set_dma_mode,
- .cable_detect = cs5535_cable_detect,
-};
-
-static const struct ide_port_info cs5535_chipset = {
- .name = DRV_NAME,
- .port_ops = &cs5535_port_ops,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
-};
-
-static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &cs5535_chipset, NULL);
-}
-
-static const struct pci_device_id cs5535_pci_tbl[] = {
- { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), 0 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, cs5535_pci_tbl);
-
-static struct pci_driver cs5535_pci_driver = {
- .name = "CS5535_IDE",
- .id_table = cs5535_pci_tbl,
- .probe = cs5535_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cs5535_ide_init(void)
-{
- return ide_pci_register_driver(&cs5535_pci_driver);
-}
-
-static void __exit cs5535_ide_exit(void)
-{
- pci_unregister_driver(&cs5535_pci_driver);
-}
-
-module_init(cs5535_ide_init);
-module_exit(cs5535_ide_exit);
-
-MODULE_AUTHOR("AMD");
-MODULE_DESCRIPTION("PCI driver module for AMD/NS CS5535 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
deleted file mode 100644
index 8b5ca145191b..000000000000
--- a/drivers/ide/cs5536.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CS5536 PATA support
- * (C) 2007 Martin K. Petersen <mkp@mkp.net>
- * (C) 2009 Bartlomiej Zolnierkiewicz
- *
- * Documentation:
- * Available from AMD web site.
- *
- * The IDE timing registers for the CS5536 live in the Geode Machine
- * Specific Register file and not PCI config space. Most BIOSes
- * virtualize the PCI registers so the chip looks like a standard IDE
- * controller. Unfortunately not all implementations get this right.
- * In particular some have problems with unaligned accesses to the
- * virtualized PCI registers. This driver always does full dword
- * writes to work around the issue. Also, in case of a bad BIOS this
- * driver can be loaded with the "msr=1" parameter which forces using
- * the Machine Specific Registers to configure the device.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <asm/msr.h>
-
-#define DRV_NAME "cs5536"
-
-enum {
- MSR_IDE_CFG = 0x51300010,
- PCI_IDE_CFG = 0x40,
-
- CFG = 0,
- DTC = 2,
- CAST = 3,
- ETC = 4,
-
- IDE_CFG_CHANEN = (1 << 1),
- IDE_CFG_CABLE = (1 << 17) | (1 << 16),
-
- IDE_D0_SHIFT = 24,
- IDE_D1_SHIFT = 16,
- IDE_DRV_MASK = 0xff,
-
- IDE_CAST_D0_SHIFT = 6,
- IDE_CAST_D1_SHIFT = 4,
- IDE_CAST_DRV_MASK = 0x3,
-
- IDE_CAST_CMD_SHIFT = 24,
- IDE_CAST_CMD_MASK = 0xff,
-
- IDE_ETC_UDMA_MASK = 0xc0,
-};
-
-static int use_msr;
-
-static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
-{
- if (unlikely(use_msr)) {
- u32 dummy;
-
- rdmsr(MSR_IDE_CFG + reg, *val, dummy);
- return 0;
- }
-
- return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
-}
-
-static int cs5536_write(struct pci_dev *pdev, int reg, int val)
-{
- if (unlikely(use_msr)) {
- wrmsr(MSR_IDE_CFG + reg, val, 0);
- return 0;
- }
-
- return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
-}
-
-static void cs5536_program_dtc(ide_drive_t *drive, u8 tim)
-{
- struct pci_dev *pdev = to_pci_dev(drive->hwif->dev);
- int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT;
- u32 dtc;
-
- cs5536_read(pdev, DTC, &dtc);
- dtc &= ~(IDE_DRV_MASK << dshift);
- dtc |= tim << dshift;
- cs5536_write(pdev, DTC, dtc);
-}
-
-/**
- * cs5536_cable_detect - detect cable type
- * @hwif: Port to detect on
- *
- * Perform cable detection for ATA66 capable cable.
- *
- * Returns a cable type.
- */
-
-static u8 cs5536_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- u32 cfg;
-
- cs5536_read(pdev, CFG, &cfg);
-
- if (cfg & IDE_CFG_CABLE)
- return ATA_CBL_PATA80;
- else
- return ATA_CBL_PATA40;
-}
-
-/**
- * cs5536_set_pio_mode - PIO timing setup
- * @hwif: ATA port
- * @drive: ATA device
- */
-
-static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 drv_timings[5] = {
- 0x98, 0x55, 0x32, 0x21, 0x20,
- };
-
- static const u8 addr_timings[5] = {
- 0x2, 0x1, 0x0, 0x0, 0x0,
- };
-
- static const u8 cmd_timings[5] = {
- 0x99, 0x92, 0x90, 0x22, 0x20,
- };
-
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
- u32 cast;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 cmd_pio = pio;
-
- if (pair)
- cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0);
-
- timings &= (IDE_DRV_MASK << 8);
- timings |= drv_timings[pio];
- ide_set_drivedata(drive, (void *)timings);
-
- cs5536_program_dtc(drive, drv_timings[pio]);
-
- cs5536_read(pdev, CAST, &cast);
-
- cast &= ~(IDE_CAST_DRV_MASK << cshift);
- cast |= addr_timings[pio] << cshift;
-
- cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
- cast |= cmd_timings[cmd_pio] << IDE_CAST_CMD_SHIFT;
-
- cs5536_write(pdev, CAST, cast);
-}
-
-/**
- * cs5536_set_dma_mode - DMA timing setup
- * @hwif: ATA port
- * @drive: ATA device
- */
-
-static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 udma_timings[6] = {
- 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
- };
-
- static const u8 mwdma_timings[3] = {
- 0x67, 0x21, 0x20,
- };
-
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT;
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
- u32 etc;
- const u8 mode = drive->dma_mode;
-
- cs5536_read(pdev, ETC, &etc);
-
- if (mode >= XFER_UDMA_0) {
- etc &= ~(IDE_DRV_MASK << dshift);
- etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
- } else { /* MWDMA */
- etc &= ~(IDE_ETC_UDMA_MASK << dshift);
- timings &= IDE_DRV_MASK;
- timings |= mwdma_timings[mode - XFER_MW_DMA_0] << 8;
- ide_set_drivedata(drive, (void *)timings);
- }
-
- cs5536_write(pdev, ETC, etc);
-}
-
-static void cs5536_dma_start(ide_drive_t *drive)
-{
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
-
- if (drive->current_speed < XFER_UDMA_0 &&
- (timings >> 8) != (timings & IDE_DRV_MASK))
- cs5536_program_dtc(drive, timings >> 8);
-
- ide_dma_start(drive);
-}
-
-static int cs5536_dma_end(ide_drive_t *drive)
-{
- int ret = ide_dma_end(drive);
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
-
- if (drive->current_speed < XFER_UDMA_0 &&
- (timings >> 8) != (timings & IDE_DRV_MASK))
- cs5536_program_dtc(drive, timings & IDE_DRV_MASK);
-
- return ret;
-}
-
-static const struct ide_port_ops cs5536_port_ops = {
- .set_pio_mode = cs5536_set_pio_mode,
- .set_dma_mode = cs5536_set_dma_mode,
- .cable_detect = cs5536_cable_detect,
-};
-
-static const struct ide_dma_ops cs5536_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = cs5536_dma_start,
- .dma_end = cs5536_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info cs5536_info = {
- .name = DRV_NAME,
- .port_ops = &cs5536_port_ops,
- .dma_ops = &cs5536_dma_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
-};
-
-/**
- * cs5536_init_one
- * @dev: PCI device
- * @id: Entry in match table
- */
-
-static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- u32 cfg;
-
- if (use_msr)
- printk(KERN_INFO DRV_NAME ": Using MSR regs instead of PCI\n");
-
- cs5536_read(dev, CFG, &cfg);
-
- if ((cfg & IDE_CFG_CHANEN) == 0) {
- printk(KERN_ERR DRV_NAME ": disabled by BIOS\n");
- return -ENODEV;
- }
-
- return ide_pci_init_one(dev, &cs5536_info, NULL);
-}
-
-static const struct pci_device_id cs5536_pci_tbl[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
- { },
-};
-
-static struct pci_driver cs5536_pci_driver = {
- .name = DRV_NAME,
- .id_table = cs5536_pci_tbl,
- .probe = cs5536_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-module_pci_driver(cs5536_pci_driver);
-
-MODULE_AUTHOR("Martin K. Petersen, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, cs5536_pci_tbl);
-
-module_param_named(msr, use_msr, int, 0644);
-MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
deleted file mode 100644
index bc01660ee8fd..000000000000
--- a/drivers/ide/cy82c693.c
+++ /dev/null
@@ -1,234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
- * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
- * Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz
- *
- * CYPRESS CY82C693 chipset IDE controller
- *
- * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "cy82c693"
-
-/*
- * NOTE: the value for busmaster timeout is tricky and I got it by
- * trial and error! By using a to low value will cause DMA timeouts
- * and drop IDE performance, and by using a to high value will cause
- * audio playback to scatter.
- * If you know a better value or how to calc it, please let me know.
- */
-
-/* twice the value written in cy82c693ub datasheet */
-#define BUSMASTER_TIMEOUT 0x50
-/*
- * the value above was tested on my machine and it seems to work okay
- */
-
-/* here are the offset definitions for the registers */
-#define CY82_IDE_CMDREG 0x04
-#define CY82_IDE_ADDRSETUP 0x48
-#define CY82_IDE_MASTER_IOR 0x4C
-#define CY82_IDE_MASTER_IOW 0x4D
-#define CY82_IDE_SLAVE_IOR 0x4E
-#define CY82_IDE_SLAVE_IOW 0x4F
-#define CY82_IDE_MASTER_8BIT 0x50
-#define CY82_IDE_SLAVE_8BIT 0x51
-
-#define CY82_INDEX_PORT 0x22
-#define CY82_DATA_PORT 0x23
-
-#define CY82_INDEX_CHANNEL0 0x30
-#define CY82_INDEX_CHANNEL1 0x31
-#define CY82_INDEX_TIMEOUT 0x32
-
-/*
- * set DMA mode a specific channel for CY82C693
- */
-
-static void cy82c693_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 mode = drive->dma_mode;
- u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
-
- index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
-
- data = (mode & 3) | (single << 2);
-
- outb(index, CY82_INDEX_PORT);
- outb(data, CY82_DATA_PORT);
-
- /*
- * note: below we set the value for Bus Master IDE TimeOut Register
- * I'm not absolutely sure what this does, but it solved my problem
- * with IDE DMA and sound, so I now can play sound and work with
- * my IDE driver at the same time :-)
- *
- * If you know the correct (best) value for this register please
- * let me know - ASK
- */
-
- data = BUSMASTER_TIMEOUT;
- outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT);
- outb(data, CY82_DATA_PORT);
-}
-
-static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- const unsigned long T = 1000000 / bus_speed;
- unsigned int addrCtrl;
- struct ide_timing t;
- u8 time_16, time_8;
-
- /* select primary or secondary channel */
- if (drive->dn > 1) { /* drive is on the secondary channel */
- dev = pci_get_slot(dev->bus, dev->devfn+1);
- if (!dev) {
- printk(KERN_ERR "%s: tune_drive: "
- "Cannot find secondary interface!\n",
- drive->name);
- return;
- }
- }
-
- ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
-
- time_16 = clamp_val(t.recover - 1, 0, 15) |
- (clamp_val(t.active - 1, 0, 15) << 4);
- time_8 = clamp_val(t.act8b - 1, 0, 15) |
- (clamp_val(t.rec8b - 1, 0, 15) << 4);
-
- /* now let's write the clocks registers */
- if ((drive->dn & 1) == 0) {
- /*
- * set master drive
- * address setup control register
- * is 32 bit !!!
- */
- pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
-
- addrCtrl &= (~0xF);
- addrCtrl |= clamp_val(t.setup - 1, 0, 15);
- pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
-
- /* now let's set the remaining registers */
- pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, time_16);
- pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, time_16);
- pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, time_8);
- } else {
- /*
- * set slave drive
- * address setup control register
- * is 32 bit !!!
- */
- pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
-
- addrCtrl &= (~0xF0);
- addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4);
- pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
-
- /* now let's set the remaining registers */
- pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, time_16);
- pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
- pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
- }
- if (drive->dn > 1)
- pci_dev_put(dev);
-}
-
-static void init_iops_cy82c693(ide_hwif_t *hwif)
-{
- static ide_hwif_t *primary;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if (PCI_FUNC(dev->devfn) == 1)
- primary = hwif;
- else {
- hwif->mate = primary;
- hwif->channel = 1;
- }
-}
-
-static const struct ide_port_ops cy82c693_port_ops = {
- .set_pio_mode = cy82c693_set_pio_mode,
- .set_dma_mode = cy82c693_set_dma_mode,
-};
-
-static const struct ide_port_info cy82c693_chipset = {
- .name = DRV_NAME,
- .init_iops = init_iops_cy82c693,
- .port_ops = &cy82c693_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int cy82c693_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct pci_dev *dev2;
- int ret = -ENODEV;
-
- /* CY82C693 is more than only a IDE controller.
- Function 1 is primary IDE channel, function 2 - secondary. */
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
- PCI_FUNC(dev->devfn) == 1) {
- dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
- ret = ide_pci_init_two(dev, dev2, &cy82c693_chipset, NULL);
- if (ret)
- pci_dev_put(dev2);
- }
- return ret;
-}
-
-static void cy82c693_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
-
- ide_pci_remove(dev);
- pci_dev_put(dev2);
-}
-
-static const struct pci_device_id cy82c693_pci_tbl[] = {
- { PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cy82c693_pci_tbl);
-
-static struct pci_driver cy82c693_pci_driver = {
- .name = "Cypress_IDE",
- .id_table = cy82c693_pci_tbl,
- .probe = cy82c693_init_one,
- .remove = cy82c693_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cy82c693_ide_init(void)
-{
- return ide_pci_register_driver(&cy82c693_pci_driver);
-}
-
-static void __exit cy82c693_ide_exit(void)
-{
- pci_unregister_driver(&cy82c693_pci_driver);
-}
-
-module_init(cy82c693_ide_init);
-module_exit(cy82c693_ide_exit);
-
-MODULE_AUTHOR("Andreas Krebs, Andre Hedrick, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c
deleted file mode 100644
index 300daabaa575..000000000000
--- a/drivers/ide/delkin_cb.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Created 20 Oct 2004 by Mark Lord
- *
- * Basic support for Delkin/ASKA/Workbit Cardbus CompactFlash adapter
- *
- * Modeled after the 16-bit PCMCIA driver: ide-cs.c
- *
- * This is slightly peculiar, in that it is a PCI driver,
- * but is NOT an IDE PCI driver -- the IDE layer does not directly
- * support hot insertion/removal of PCI interfaces, so this driver
- * is unable to use the IDE PCI interfaces. Instead, it uses the
- * same interfaces as the ide-cs (PCMCIA) driver uses.
- * On the plus side, the driver is also smaller/simpler this way.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <asm/io.h>
-
-/*
- * No chip documentation has yet been found,
- * so these configuration values were pulled from
- * a running Win98 system using "debug".
- * This gives around 3MByte/second read performance,
- * which is about 2/3 of what the chip is capable of.
- *
- * There is also a 4KByte mmio region on the card,
- * but its purpose has yet to be reverse-engineered.
- */
-static const u8 setup[] = {
- 0x00, 0x05, 0xbe, 0x01, 0x20, 0x8f, 0x00, 0x00,
- 0xa4, 0x1f, 0xb3, 0x1b, 0x00, 0x00, 0x00, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
-};
-
-static const struct ide_port_ops delkin_cb_port_ops = {
- .quirkproc = ide_undecoded_slave,
-};
-
-static int delkin_cb_init_chipset(struct pci_dev *dev)
-{
- unsigned long base = pci_resource_start(dev, 0);
- int i;
-
- outb(0x02, base + 0x1e); /* set nIEN to block interrupts */
- inb(base + 0x17); /* read status to clear interrupts */
-
- for (i = 0; i < sizeof(setup); ++i) {
- if (setup[i])
- outb(setup[i], base + i);
- }
-
- return 0;
-}
-
-static const struct ide_port_info delkin_cb_port_info = {
- .port_ops = &delkin_cb_port_ops,
- .host_flags = IDE_HFLAG_IO_32BIT | IDE_HFLAG_UNMASK_IRQS |
- IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .init_chipset = delkin_cb_init_chipset,
- .chipset = ide_pci,
-};
-
-static int delkin_cb_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_host *host;
- unsigned long base;
- int rc;
- struct ide_hw hw, *hws[] = { &hw };
-
- rc = pci_enable_device(dev);
- if (rc) {
- printk(KERN_ERR "delkin_cb: pci_enable_device failed (%d)\n", rc);
- return rc;
- }
- rc = pci_request_regions(dev, "delkin_cb");
- if (rc) {
- printk(KERN_ERR "delkin_cb: pci_request_regions failed (%d)\n", rc);
- pci_disable_device(dev);
- return rc;
- }
- base = pci_resource_start(dev, 0);
-
- delkin_cb_init_chipset(dev);
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
- hw.irq = dev->irq;
- hw.dev = &dev->dev;
-
- rc = ide_host_add(&delkin_cb_port_info, hws, 1, &host);
- if (rc)
- goto out_disable;
-
- pci_set_drvdata(dev, host);
-
- return 0;
-
-out_disable:
- pci_release_regions(dev);
- pci_disable_device(dev);
- return rc;
-}
-
-static void
-delkin_cb_remove (struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
-
- ide_host_remove(host);
-
- pci_release_regions(dev);
- pci_disable_device(dev);
-}
-
-#ifdef CONFIG_PM
-static int delkin_cb_suspend(struct pci_dev *dev, pm_message_t state)
-{
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
-
- return 0;
-}
-
-static int delkin_cb_resume(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- int rc;
-
- pci_set_power_state(dev, PCI_D0);
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- pci_restore_state(dev);
- pci_set_master(dev);
-
- if (host->init_chipset)
- host->init_chipset(dev);
-
- return 0;
-}
-#else
-#define delkin_cb_suspend NULL
-#define delkin_cb_resume NULL
-#endif
-
-static struct pci_device_id delkin_cb_pci_tbl[] = {
- { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
-
-static struct pci_driver delkin_cb_pci_driver = {
- .name = "Delkin-ASKA-Workbit Cardbus IDE",
- .id_table = delkin_cb_pci_tbl,
- .probe = delkin_cb_probe,
- .remove = delkin_cb_remove,
- .suspend = delkin_cb_suspend,
- .resume = delkin_cb_resume,
-};
-
-module_pci_driver(delkin_cb_pci_driver);
-
-MODULE_AUTHOR("Mark Lord");
-MODULE_DESCRIPTION("Basic support for Delkin/ASKA/Workbit Cardbus IDE");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c
deleted file mode 100644
index 714e8cd0fa49..000000000000
--- a/drivers/ide/dtc2278.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1996 Linus Torvalds & author (see below)
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "dtc2278"
-
-/*
- * Changing this #undef to #define may solve start up problems in some systems.
- */
-#undef ALWAYS_SET_DTC2278_PIO_MODE
-
-/*
- * From: andy@cercle.cts.com (Dyan Wile)
- *
- * Below is a patch for DTC-2278 - alike software-programmable controllers
- * The code enables the secondary IDE controller and the PIO4 (3?) timings on
- * the primary (EIDE). You may probably have to enable the 32-bit support to
- * get the full speed. You better get the disk interrupts disabled ( hdparm -u0
- * /dev/hd.. ) for the drives connected to the EIDE interface. (I get my
- * filesystem corrupted with -u1, but under heavy disk load only :-)
- *
- * This card is now forced to use the "serialize" feature,
- * and irq-unmasking is disallowed. If io_32bit is enabled,
- * it must be done for BOTH drives on each interface.
- *
- * This code was written for the DTC2278E, but might work with any of these:
- *
- * DTC2278S has only a single IDE interface.
- * DTC2278D has two IDE interfaces and is otherwise identical to the S version.
- * DTC2278E also has serial ports and a printer port
- * DTC2278EB: has onboard BIOS, and "works like a charm" -- Kent Bradford <kent@theory.caltech.edu>
- *
- * There may be a fourth controller type. The S and D versions use the
- * Winbond chip, and I think the E version does also.
- *
- */
-
-static void sub22 (char b, char c)
-{
- int i;
-
- for(i = 0; i < 3; ++i) {
- inb(0x3f6);
- outb_p(b,0xb0);
- inb(0x3f6);
- outb_p(c,0xb4);
- inb(0x3f6);
- if(inb(0xb4) == c) {
- outb_p(7,0xb0);
- inb(0x3f6);
- return; /* success */
- }
- }
-}
-
-static DEFINE_SPINLOCK(dtc2278_lock);
-
-static void dtc2278_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long flags;
-
- if (drive->pio_mode >= XFER_PIO_3) {
- spin_lock_irqsave(&dtc2278_lock, flags);
- /*
- * This enables PIO mode4 (3?) on the first interface
- */
- sub22(1,0xc3);
- sub22(0,0xa0);
- spin_unlock_irqrestore(&dtc2278_lock, flags);
- } else {
- /* we don't know how to set it back again.. */
- /* Actually we do - there is a data sheet available for the
- Winbond but does anyone actually care */
- }
-}
-
-static const struct ide_port_ops dtc2278_port_ops = {
- .set_pio_mode = dtc2278_set_pio_mode,
-};
-
-static const struct ide_port_info dtc2278_port_info __initconst = {
- .name = DRV_NAME,
- .chipset = ide_dtc2278,
- .port_ops = &dtc2278_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_UNMASK_IRQS |
- IDE_HFLAG_IO_32BIT |
- /* disallow ->io_32bit changes */
- IDE_HFLAG_NO_IO_32BIT |
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_DTC2278,
- .pio_mask = ATA_PIO4,
-};
-
-static int __init dtc2278_probe(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- /*
- * This enables the second interface
- */
- outb_p(4,0xb0);
- inb(0x3f6);
- outb_p(0x20,0xb4);
- inb(0x3f6);
-#ifdef ALWAYS_SET_DTC2278_PIO_MODE
- /*
- * This enables PIO mode4 (3?) on the first interface
- * and may solve start-up problems for some people.
- */
- sub22(1,0xc3);
- sub22(0,0xa0);
-#endif
- local_irq_restore(flags);
-
- return ide_legacy_device_add(&dtc2278_port_info, 0);
-}
-
-static bool probe_dtc2278;
-
-module_param_named(probe, probe_dtc2278, bool, 0);
-MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
-
-static int __init dtc2278_init(void)
-{
- if (probe_dtc2278 == 0)
- return -ENODEV;
-
- if (dtc2278_probe()) {
- printk(KERN_ERR "dtc2278: ide interfaces already in use!\n");
- return -EBUSY;
- }
- return 0;
-}
-
-module_init(dtc2278_init);
-
-MODULE_AUTHOR("See Local File");
-MODULE_DESCRIPTION("support of DTC-2278 VLB IDE chipsets");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
deleted file mode 100644
index bb86d84558d9..000000000000
--- a/drivers/ide/falconide.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Atari Falcon IDE Driver
- *
- * Created 12 Jul 1997 by Geert Uytterhoeven
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-
-#include <asm/setup.h>
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#include <asm/atari_stdma.h>
-#include <asm/ide.h>
-
-#define DRV_NAME "falconide"
-
- /*
- * Offsets from base address
- */
-
-#define ATA_HD_CONTROL 0x39
-
- /*
- * falconide_intr_lock is used to obtain access to the IDE interrupt,
- * which is shared between several drivers.
- */
-
-static int falconide_intr_lock;
-
-static void falconide_release_lock(void)
-{
- if (falconide_intr_lock == 0) {
- printk(KERN_ERR "%s: bug\n", __func__);
- return;
- }
- falconide_intr_lock = 0;
- stdma_release();
-}
-
-static void falconide_get_lock(irq_handler_t handler, void *data)
-{
- if (falconide_intr_lock == 0) {
- stdma_lock(handler, data);
- falconide_intr_lock = 1;
- }
-}
-
-static void falconide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
- __ide_mm_insw(data_addr, buf, (len + 1) / 2);
- return;
- }
-
- raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
-}
-
-static void falconide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
- __ide_mm_outsw(data_addr, buf, (len + 1) / 2);
- return;
- }
-
- raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
-}
-
-/* Atari has a byte-swapped IDE interface */
-static const struct ide_tp_ops falconide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = falconide_input_data,
- .output_data = falconide_output_data,
-};
-
-static const struct ide_port_info falconide_port_info = {
- .get_lock = falconide_get_lock,
- .release_lock = falconide_release_lock,
- .tp_ops = &falconide_tp_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
-static void __init falconide_setup_ports(struct ide_hw *hw, unsigned long base)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
-
- hw->io_ports.data_addr = base;
-
- for (i = 1; i < 8; i++)
- hw->io_ports_array[i] = base + 1 + i * 4;
-
- hw->io_ports.ctl_addr = base + ATA_HD_CONTROL;
-
- hw->irq = IRQ_MFP_IDE;
-}
-
- /*
- * Probe for a Falcon IDE interface
- */
-
-static int __init falconide_init(struct platform_device *pdev)
-{
- struct resource *res;
- struct ide_host *host;
- struct ide_hw hw, *hws[] = { &hw };
- unsigned long base;
- int rc;
-
- dev_info(&pdev->dev, "Atari Falcon IDE controller\n");
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), DRV_NAME)) {
- dev_err(&pdev->dev, "resources busy\n");
- return -EBUSY;
- }
-
- base = (unsigned long)res->start;
-
- falconide_setup_ports(&hw, base);
-
- host = ide_host_alloc(&falconide_port_info, hws, 1);
- if (host == NULL) {
- rc = -ENOMEM;
- goto err;
- }
-
- falconide_get_lock(NULL, NULL);
- rc = ide_host_register(host, &falconide_port_info, hws);
- falconide_release_lock();
-
- if (rc)
- goto err_free;
-
- platform_set_drvdata(pdev, host);
- return 0;
-err_free:
- ide_host_free(host);
-err:
- release_mem_region(res->start, resource_size(res));
- return rc;
-}
-
-static int falconide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
-
- return 0;
-}
-
-static struct platform_driver ide_falcon_driver = {
- .remove = falconide_remove,
- .driver = {
- .name = "atari-falcon-ide",
- },
-};
-
-module_platform_driver_probe(ide_falcon_driver, falconide_init);
-
-MODULE_AUTHOR("Geert Uytterhoeven");
-MODULE_DESCRIPTION("low-level driver for Atari Falcon IDE");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:atari-falcon-ide");
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
deleted file mode 100644
index 901e6ebfeb96..000000000000
--- a/drivers/ide/gayle.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Amiga Gayle IDE Driver
- *
- * Created 9 Jul 1997 by Geert Uytterhoeven
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/zorro.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <asm/setup.h>
-#include <asm/amigahw.h>
-#include <asm/amigaints.h>
-#include <asm/amigayle.h>
-
-
- /*
- * Offsets from one of the above bases
- */
-
-#define GAYLE_CONTROL 0x101a
-
- /*
- * These are at different offsets from the base
- */
-
-#define GAYLE_IRQ_4000 0xdd3020 /* MSB = 1, Harddisk is source of */
-#define GAYLE_IRQ_1200 0xda9000 /* interrupt */
-
-
- /*
- * Offset of the secondary port for IDE doublers
- * Note that GAYLE_CONTROL is NOT available then!
- */
-
-#define GAYLE_NEXT_PORT 0x1000
-
-#define GAYLE_NUM_HWIFS 2
-#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \
- GAYLE_NUM_HWIFS-1)
-#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
-
-static bool ide_doubler;
-module_param_named(doubler, ide_doubler, bool, 0);
-MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
-
- /*
- * Check and acknowledge the interrupt status
- */
-
-static int gayle_test_irq(ide_hwif_t *hwif)
-{
- unsigned char ch;
-
- ch = z_readb(hwif->io_ports.irq_addr);
- if (!(ch & GAYLE_IRQ_IDE))
- return 0;
- return 1;
-}
-
-static void gayle_a1200_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- (void)z_readb(hwif->io_ports.status_addr);
- z_writeb(0x7c, hwif->io_ports.irq_addr);
-}
-
-static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
- unsigned long ctl, unsigned long irq_port)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
-
- hw->io_ports.data_addr = base;
-
- for (i = 1; i < 8; i++)
- hw->io_ports_array[i] = base + 2 + i * 4;
-
- hw->io_ports.ctl_addr = ctl;
- hw->io_ports.irq_addr = irq_port;
-
- hw->irq = IRQ_AMIGA_PORTS;
-}
-
-static const struct ide_port_ops gayle_a4000_port_ops = {
- .test_irq = gayle_test_irq,
-};
-
-static const struct ide_port_ops gayle_a1200_port_ops = {
- .clear_irq = gayle_a1200_clear_irq,
- .test_irq = gayle_test_irq,
-};
-
-static const struct ide_port_info gayle_port_info = {
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
- /*
- * Probe for a Gayle IDE interface (and optionally for an IDE doubler)
- */
-
-static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct gayle_ide_platform_data *pdata;
- unsigned long base, ctrlport, irqport;
- unsigned int i;
- int error;
- struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
- struct ide_port_info d = gayle_port_info;
- struct ide_host *host;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- if (!request_mem_region(res->start, resource_size(res), "IDE"))
- return -EBUSY;
-
- pdata = dev_get_platdata(&pdev->dev);
- pr_info("ide: Gayle IDE controller (A%u style%s)\n",
- pdata->explicit_ack ? 1200 : 4000,
- ide_doubler ? ", IDE doubler" : "");
-
- base = (unsigned long)ZTWO_VADDR(pdata->base);
- ctrlport = 0;
- irqport = (unsigned long)ZTWO_VADDR(pdata->irqport);
- if (pdata->explicit_ack)
- d.port_ops = &gayle_a1200_port_ops;
- else
- d.port_ops = &gayle_a4000_port_ops;
-
- for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) {
- if (GAYLE_HAS_CONTROL_REG)
- ctrlport = base + GAYLE_CONTROL;
-
- gayle_setup_ports(&hw[i], base, ctrlport, irqport);
- hws[i] = &hw[i];
- }
-
- error = ide_host_add(&d, hws, i, &host);
- if (error)
- goto out;
-
- platform_set_drvdata(pdev, host);
- return 0;
-
-out:
- release_mem_region(res->start, resource_size(res));
- return error;
-}
-
-static int __exit amiga_gayle_ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- ide_host_remove(host);
- release_mem_region(res->start, resource_size(res));
- return 0;
-}
-
-static struct platform_driver amiga_gayle_ide_driver = {
- .remove = __exit_p(amiga_gayle_ide_remove),
- .driver = {
- .name = "amiga-gayle-ide",
- },
-};
-
-module_platform_driver_probe(amiga_gayle_ide_driver, amiga_gayle_ide_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:amiga-gayle-ide");
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
deleted file mode 100644
index 50c9a41467c8..000000000000
--- a/drivers/ide/hpt366.c
+++ /dev/null
@@ -1,1545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
- * Portions Copyright (C) 2001 Sun Microsystems, Inc.
- * Portions Copyright (C) 2003 Red Hat Inc
- * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
- *
- * Thanks to HighPoint Technologies for their assistance, and hardware.
- * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
- * donation of an ABit BP6 mainboard, processor, and memory acellerated
- * development and support.
- *
- *
- * HighPoint has its own drivers (open source except for the RAID part)
- * available from http://www.highpoint-tech.com/USA_new/service_support.htm
- * This may be useful to anyone wanting to work on this driver, however do not
- * trust them too much since the code tends to become less and less meaningful
- * as the time passes... :-/
- *
- * Note that final HPT370 support was done by force extraction of GPL.
- *
- * - add function for getting/setting power status of drive
- * - the HPT370's state machine can get confused. reset it before each dma
- * xfer to prevent that from happening.
- * - reset state engine whenever we get an error.
- * - check for busmaster state at end of dma.
- * - use new highpoint timings.
- * - detect bus speed using highpoint register.
- * - use pll if we don't have a clock table. added a 66MHz table that's
- * just 2x the 33MHz table.
- * - removed turnaround. NOTE: we never want to switch between pll and
- * pci clocks as the chip can glitch in those cases. the highpoint
- * approved workaround slows everything down too much to be useful. in
- * addition, we would have to serialize access to each chip.
- * Adrian Sun <a.sun@sun.com>
- *
- * add drive timings for 66MHz PCI bus,
- * fix ATA Cable signal detection, fix incorrect /proc info
- * add /proc display for per-drive PIO/DMA/UDMA mode and
- * per-channel ATA-33/66 Cable detect.
- * Duncan Laurie <void@sun.com>
- *
- * fixup /proc output for multiple controllers
- * Tim Hockin <thockin@sun.com>
- *
- * On hpt366:
- * Reset the hpt366 on error, reset on dma
- * Fix disabling Fast Interrupt hpt366.
- * Mike Waychison <crlf@sun.com>
- *
- * Added support for 372N clocking and clock switching. The 372N needs
- * different clocks on read/write. This requires overloading rw_disk and
- * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for
- * keeping me sane.
- * Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * - fix the clock turnaround code: it was writing to the wrong ports when
- * called for the secondary channel, caching the current clock mode per-
- * channel caused the cached register value to get out of sync with the
- * actual one, the channels weren't serialized, the turnaround shouldn't
- * be done on 66 MHz PCI bus
- * - disable UltraATA/100 for HPT370 by default as the 33 MHz clock being used
- * does not allow for this speed anyway
- * - avoid touching disabled channels (e.g. HPT371/N are single channel chips,
- * their primary channel is kind of virtual, it isn't tied to any pins)
- * - fix/remove bad/unused timing tables and use one set of tables for the whole
- * HPT37x chip family; save space by introducing the separate transfer mode
- * table in which the mode lookup is done
- * - use f_CNT value saved by the HighPoint BIOS as reading it directly gives
- * the wrong PCI frequency since DPLL has already been calibrated by BIOS;
- * read it only from the function 0 of HPT374 chips
- * - fix the hotswap code: it caused RESET- to glitch when tristating the bus,
- * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead
- * - pass to init_chipset() handlers a copy of the IDE PCI device structure as
- * they tamper with its fields
- * - pass to the init_setup handlers a copy of the ide_pci_device_t structure
- * since they may tamper with its fields
- * - prefix the driver startup messages with the real chip name
- * - claim the extra 240 bytes of I/O space for all chips
- * - optimize the UltraDMA filtering and the drive list lookup code
- * - use pci_get_slot() to get to the function 1 of HPT36x/374
- * - cache offset of the channel's misc. control registers (MCRs) being used
- * throughout the driver
- * - only touch the relevant MCR when detecting the cable type on HPT374's
- * function 1
- * - rename all the register related variables consistently
- * - move all the interrupt twiddling code from the speedproc handlers into
- * init_hwif_hpt366(), also grouping all the DMA related code together there
- * - merge HPT36x/HPT37x speedproc handlers, fix PIO timing register mask and
- * separate the UltraDMA and MWDMA masks there to avoid changing PIO timings
- * when setting an UltraDMA mode
- * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select
- * the best possible one
- * - clean up DMA timeout handling for HPT370
- * - switch to using the enumeration type to differ between the numerous chip
- * variants, matching PCI device/revision ID with the chip type early, at the
- * init_setup stage
- * - extend the hpt_info structure to hold the DPLL and PCI clock frequencies,
- * stop duplicating it for each channel by storing the pointer in the pci_dev
- * structure: first, at the init_setup stage, point it to a static "template"
- * with only the chip type and its specific base DPLL frequency, the highest
- * UltraDMA mode, and the chip settings table pointer filled, then, at the
- * init_chipset stage, allocate per-chip instance and fill it with the rest
- * of the necessary information
- * - get rid of the constant thresholds in the HPT37x PCI clock detection code,
- * switch to calculating PCI clock frequency based on the chip's base DPLL
- * frequency
- * - switch to using the DPLL clock and enable UltraATA/133 mode by default on
- * anything newer than HPT370/A (except HPT374 that is not capable of this
- * mode according to the manual)
- * - fold PCI clock detection and DPLL setup code into init_chipset_hpt366(),
- * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips;
- * unify HPT36x/37x timing setup code and the speedproc handlers by joining
- * the register setting lists into the table indexed by the clock selected
- * - set the correct hwif->ultra_mask for each individual chip
- * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards
- * - stop resetting HPT370's state machine before each DMA transfer as that has
- * caused more harm than good
- * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/slab.h>
-
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-#define DRV_NAME "hpt366"
-
-/* various tuning parameters */
-#undef HPT_RESET_STATE_ENGINE
-#undef HPT_DELAY_INTERRUPT
-
-static const char *bad_ata100_5[] = {
- "IBM-DTLA-307075",
- "IBM-DTLA-307060",
- "IBM-DTLA-307045",
- "IBM-DTLA-307030",
- "IBM-DTLA-307020",
- "IBM-DTLA-307015",
- "IBM-DTLA-305040",
- "IBM-DTLA-305030",
- "IBM-DTLA-305020",
- "IC35L010AVER07-0",
- "IC35L020AVER07-0",
- "IC35L030AVER07-0",
- "IC35L040AVER07-0",
- "IC35L060AVER07-0",
- "WDC AC310200R",
- NULL
-};
-
-static const char *bad_ata66_4[] = {
- "IBM-DTLA-307075",
- "IBM-DTLA-307060",
- "IBM-DTLA-307045",
- "IBM-DTLA-307030",
- "IBM-DTLA-307020",
- "IBM-DTLA-307015",
- "IBM-DTLA-305040",
- "IBM-DTLA-305030",
- "IBM-DTLA-305020",
- "IC35L010AVER07-0",
- "IC35L020AVER07-0",
- "IC35L030AVER07-0",
- "IC35L040AVER07-0",
- "IC35L060AVER07-0",
- "WDC AC310200R",
- "MAXTOR STM3320620A",
- NULL
-};
-
-static const char *bad_ata66_3[] = {
- "WDC AC310200R",
- NULL
-};
-
-static const char *bad_ata33[] = {
- "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
- "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
- "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
- "Maxtor 90510D4",
- "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
- "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
- "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
- NULL
-};
-
-static u8 xfer_speeds[] = {
- XFER_UDMA_6,
- XFER_UDMA_5,
- XFER_UDMA_4,
- XFER_UDMA_3,
- XFER_UDMA_2,
- XFER_UDMA_1,
- XFER_UDMA_0,
-
- XFER_MW_DMA_2,
- XFER_MW_DMA_1,
- XFER_MW_DMA_0,
-
- XFER_PIO_4,
- XFER_PIO_3,
- XFER_PIO_2,
- XFER_PIO_1,
- XFER_PIO_0
-};
-
-/* Key for bus clock timings
- * 36x 37x
- * bits bits
- * 0:3 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
- * cycles = value + 1
- * 4:7 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
- * cycles = value + 1
- * 8:11 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
- * register access.
- * 12:15 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file
- * register access.
- * 16:18 18:20 udma_cycle_time. Clock cycles for UDMA xfer.
- * - 21 CLK frequency: 0=ATA clock, 1=dual ATA clock.
- * 19:21 22:24 pre_high_time. Time to initialize the 1st cycle for PIO and
- * MW DMA xfer.
- * 22:24 25:27 cmd_pre_high_time. Time to initialize the 1st PIO cycle for
- * task file register access.
- * 28 28 UDMA enable.
- * 29 29 DMA enable.
- * 30 30 PIO MST enable. If set, the chip is in bus master mode during
- * PIO xfer.
- * 31 31 FIFO enable.
- */
-
-static u32 forty_base_hpt36x[] = {
- /* XFER_UDMA_6 */ 0x900fd943,
- /* XFER_UDMA_5 */ 0x900fd943,
- /* XFER_UDMA_4 */ 0x900fd943,
- /* XFER_UDMA_3 */ 0x900ad943,
- /* XFER_UDMA_2 */ 0x900bd943,
- /* XFER_UDMA_1 */ 0x9008d943,
- /* XFER_UDMA_0 */ 0x9008d943,
-
- /* XFER_MW_DMA_2 */ 0xa008d943,
- /* XFER_MW_DMA_1 */ 0xa010d955,
- /* XFER_MW_DMA_0 */ 0xa010d9fc,
-
- /* XFER_PIO_4 */ 0xc008d963,
- /* XFER_PIO_3 */ 0xc010d974,
- /* XFER_PIO_2 */ 0xc010d997,
- /* XFER_PIO_1 */ 0xc010d9c7,
- /* XFER_PIO_0 */ 0xc018d9d9
-};
-
-static u32 thirty_three_base_hpt36x[] = {
- /* XFER_UDMA_6 */ 0x90c9a731,
- /* XFER_UDMA_5 */ 0x90c9a731,
- /* XFER_UDMA_4 */ 0x90c9a731,
- /* XFER_UDMA_3 */ 0x90cfa731,
- /* XFER_UDMA_2 */ 0x90caa731,
- /* XFER_UDMA_1 */ 0x90cba731,
- /* XFER_UDMA_0 */ 0x90c8a731,
-
- /* XFER_MW_DMA_2 */ 0xa0c8a731,
- /* XFER_MW_DMA_1 */ 0xa0c8a732, /* 0xa0c8a733 */
- /* XFER_MW_DMA_0 */ 0xa0c8a797,
-
- /* XFER_PIO_4 */ 0xc0c8a731,
- /* XFER_PIO_3 */ 0xc0c8a742,
- /* XFER_PIO_2 */ 0xc0d0a753,
- /* XFER_PIO_1 */ 0xc0d0a7a3, /* 0xc0d0a793 */
- /* XFER_PIO_0 */ 0xc0d0a7aa /* 0xc0d0a7a7 */
-};
-
-static u32 twenty_five_base_hpt36x[] = {
- /* XFER_UDMA_6 */ 0x90c98521,
- /* XFER_UDMA_5 */ 0x90c98521,
- /* XFER_UDMA_4 */ 0x90c98521,
- /* XFER_UDMA_3 */ 0x90cf8521,
- /* XFER_UDMA_2 */ 0x90cf8521,
- /* XFER_UDMA_1 */ 0x90cb8521,
- /* XFER_UDMA_0 */ 0x90cb8521,
-
- /* XFER_MW_DMA_2 */ 0xa0ca8521,
- /* XFER_MW_DMA_1 */ 0xa0ca8532,
- /* XFER_MW_DMA_0 */ 0xa0ca8575,
-
- /* XFER_PIO_4 */ 0xc0ca8521,
- /* XFER_PIO_3 */ 0xc0ca8532,
- /* XFER_PIO_2 */ 0xc0ca8542,
- /* XFER_PIO_1 */ 0xc0d08572,
- /* XFER_PIO_0 */ 0xc0d08585
-};
-
-/*
- * The following are the new timing tables with PIO mode data/taskfile transfer
- * overclocking fixed...
- */
-
-/* This table is taken from the HPT370 data manual rev. 1.02 */
-static u32 thirty_three_base_hpt37x[] = {
- /* XFER_UDMA_6 */ 0x16455031, /* 0x16655031 ?? */
- /* XFER_UDMA_5 */ 0x16455031,
- /* XFER_UDMA_4 */ 0x16455031,
- /* XFER_UDMA_3 */ 0x166d5031,
- /* XFER_UDMA_2 */ 0x16495031,
- /* XFER_UDMA_1 */ 0x164d5033,
- /* XFER_UDMA_0 */ 0x16515097,
-
- /* XFER_MW_DMA_2 */ 0x26515031,
- /* XFER_MW_DMA_1 */ 0x26515033,
- /* XFER_MW_DMA_0 */ 0x26515097,
-
- /* XFER_PIO_4 */ 0x06515021,
- /* XFER_PIO_3 */ 0x06515022,
- /* XFER_PIO_2 */ 0x06515033,
- /* XFER_PIO_1 */ 0x06915065,
- /* XFER_PIO_0 */ 0x06d1508a
-};
-
-static u32 fifty_base_hpt37x[] = {
- /* XFER_UDMA_6 */ 0x1a861842,
- /* XFER_UDMA_5 */ 0x1a861842,
- /* XFER_UDMA_4 */ 0x1aae1842,
- /* XFER_UDMA_3 */ 0x1a8e1842,
- /* XFER_UDMA_2 */ 0x1a0e1842,
- /* XFER_UDMA_1 */ 0x1a161854,
- /* XFER_UDMA_0 */ 0x1a1a18ea,
-
- /* XFER_MW_DMA_2 */ 0x2a821842,
- /* XFER_MW_DMA_1 */ 0x2a821854,
- /* XFER_MW_DMA_0 */ 0x2a8218ea,
-
- /* XFER_PIO_4 */ 0x0a821842,
- /* XFER_PIO_3 */ 0x0a821843,
- /* XFER_PIO_2 */ 0x0a821855,
- /* XFER_PIO_1 */ 0x0ac218a8,
- /* XFER_PIO_0 */ 0x0b02190c
-};
-
-static u32 sixty_six_base_hpt37x[] = {
- /* XFER_UDMA_6 */ 0x1c86fe62,
- /* XFER_UDMA_5 */ 0x1caefe62, /* 0x1c8afe62 */
- /* XFER_UDMA_4 */ 0x1c8afe62,
- /* XFER_UDMA_3 */ 0x1c8efe62,
- /* XFER_UDMA_2 */ 0x1c92fe62,
- /* XFER_UDMA_1 */ 0x1c9afe62,
- /* XFER_UDMA_0 */ 0x1c82fe62,
-
- /* XFER_MW_DMA_2 */ 0x2c82fe62,
- /* XFER_MW_DMA_1 */ 0x2c82fe66,
- /* XFER_MW_DMA_0 */ 0x2c82ff2e,
-
- /* XFER_PIO_4 */ 0x0c82fe62,
- /* XFER_PIO_3 */ 0x0c82fe84,
- /* XFER_PIO_2 */ 0x0c82fea6,
- /* XFER_PIO_1 */ 0x0d02ff26,
- /* XFER_PIO_0 */ 0x0d42ff7f
-};
-
-#define HPT371_ALLOW_ATA133_6 1
-#define HPT302_ALLOW_ATA133_6 1
-#define HPT372_ALLOW_ATA133_6 1
-#define HPT370_ALLOW_ATA100_5 0
-#define HPT366_ALLOW_ATA66_4 1
-#define HPT366_ALLOW_ATA66_3 1
-
-/* Supported ATA clock frequencies */
-enum ata_clock {
- ATA_CLOCK_25MHZ,
- ATA_CLOCK_33MHZ,
- ATA_CLOCK_40MHZ,
- ATA_CLOCK_50MHZ,
- ATA_CLOCK_66MHZ,
- NUM_ATA_CLOCKS
-};
-
-struct hpt_timings {
- u32 pio_mask;
- u32 dma_mask;
- u32 ultra_mask;
- u32 *clock_table[NUM_ATA_CLOCKS];
-};
-
-/*
- * Hold all the HighPoint chip information in one place.
- */
-
-struct hpt_info {
- char *chip_name; /* Chip name */
- u8 chip_type; /* Chip type */
- u8 udma_mask; /* Allowed UltraDMA modes mask. */
- u8 dpll_clk; /* DPLL clock in MHz */
- u8 pci_clk; /* PCI clock in MHz */
- struct hpt_timings *timings; /* Chipset timing data */
- u8 clock; /* ATA clock selected */
-};
-
-/* Supported HighPoint chips */
-enum {
- HPT36x,
- HPT370,
- HPT370A,
- HPT374,
- HPT372,
- HPT372A,
- HPT302,
- HPT371,
- HPT372N,
- HPT302N,
- HPT371N
-};
-
-static struct hpt_timings hpt36x_timings = {
- .pio_mask = 0xc1f8ffff,
- .dma_mask = 0x303800ff,
- .ultra_mask = 0x30070000,
- .clock_table = {
- [ATA_CLOCK_25MHZ] = twenty_five_base_hpt36x,
- [ATA_CLOCK_33MHZ] = thirty_three_base_hpt36x,
- [ATA_CLOCK_40MHZ] = forty_base_hpt36x,
- [ATA_CLOCK_50MHZ] = NULL,
- [ATA_CLOCK_66MHZ] = NULL
- }
-};
-
-static struct hpt_timings hpt37x_timings = {
- .pio_mask = 0xcfc3ffff,
- .dma_mask = 0x31c001ff,
- .ultra_mask = 0x303c0000,
- .clock_table = {
- [ATA_CLOCK_25MHZ] = NULL,
- [ATA_CLOCK_33MHZ] = thirty_three_base_hpt37x,
- [ATA_CLOCK_40MHZ] = NULL,
- [ATA_CLOCK_50MHZ] = fifty_base_hpt37x,
- [ATA_CLOCK_66MHZ] = sixty_six_base_hpt37x
- }
-};
-
-static const struct hpt_info hpt36x = {
- .chip_name = "HPT36x",
- .chip_type = HPT36x,
- .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
- .dpll_clk = 0, /* no DPLL */
- .timings = &hpt36x_timings
-};
-
-static const struct hpt_info hpt370 = {
- .chip_name = "HPT370",
- .chip_type = HPT370,
- .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
- .dpll_clk = 48,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt370a = {
- .chip_name = "HPT370A",
- .chip_type = HPT370A,
- .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
- .dpll_clk = 48,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt374 = {
- .chip_name = "HPT374",
- .chip_type = HPT374,
- .udma_mask = ATA_UDMA5,
- .dpll_clk = 48,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt372 = {
- .chip_name = "HPT372",
- .chip_type = HPT372,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 55,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt372a = {
- .chip_name = "HPT372A",
- .chip_type = HPT372A,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 66,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt302 = {
- .chip_name = "HPT302",
- .chip_type = HPT302,
- .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 66,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt371 = {
- .chip_name = "HPT371",
- .chip_type = HPT371,
- .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 66,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt372n = {
- .chip_name = "HPT372N",
- .chip_type = HPT372N,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 77,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt302n = {
- .chip_name = "HPT302N",
- .chip_type = HPT302N,
- .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 77,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt371n = {
- .chip_name = "HPT371N",
- .chip_type = HPT371N,
- .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 77,
- .timings = &hpt37x_timings
-};
-
-static bool check_in_drive_list(ide_drive_t *drive, const char **list)
-{
- return match_string(list, -1, (char *)&drive->id[ATA_ID_PROD]) >= 0;
-}
-
-static struct hpt_info *hpt3xx_get_info(struct device *dev)
-{
- struct ide_host *host = dev_get_drvdata(dev);
- struct hpt_info *info = (struct hpt_info *)host->host_priv;
-
- return dev == host->dev[1] ? info + 1 : info;
-}
-
-/*
- * The Marvell bridge chips used on the HighPoint SATA cards do not seem
- * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
- */
-
-static u8 hpt3xx_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- u8 mask = hwif->ultra_mask;
-
- switch (info->chip_type) {
- case HPT36x:
- if (!HPT366_ALLOW_ATA66_4 ||
- check_in_drive_list(drive, bad_ata66_4))
- mask = ATA_UDMA3;
-
- if (!HPT366_ALLOW_ATA66_3 ||
- check_in_drive_list(drive, bad_ata66_3))
- mask = ATA_UDMA2;
- break;
- case HPT370:
- if (!HPT370_ALLOW_ATA100_5 ||
- check_in_drive_list(drive, bad_ata100_5))
- mask = ATA_UDMA4;
- break;
- case HPT370A:
- if (!HPT370_ALLOW_ATA100_5 ||
- check_in_drive_list(drive, bad_ata100_5))
- return ATA_UDMA4;
- fallthrough;
- case HPT372 :
- case HPT372A:
- case HPT372N:
- case HPT374 :
- if (ata_id_is_sata(drive->id))
- mask &= ~0x0e;
- fallthrough;
- default:
- return mask;
- }
-
- return check_in_drive_list(drive, bad_ata33) ? 0x00 : mask;
-}
-
-static u8 hpt3xx_mdma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
-
- switch (info->chip_type) {
- case HPT372 :
- case HPT372A:
- case HPT372N:
- case HPT374 :
- if (ata_id_is_sata(drive->id))
- return 0x00;
- fallthrough;
- default:
- return 0x07;
- }
-}
-
-static u32 get_speed_setting(u8 speed, struct hpt_info *info)
-{
- int i;
-
- /*
- * Lookup the transfer mode table to get the index into
- * the timing table.
- *
- * NOTE: For XFER_PIO_SLOW, PIO mode 0 timings will be used.
- */
- for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
- if (xfer_speeds[i] == speed)
- break;
-
- return info->timings->clock_table[info->clock][i];
-}
-
-static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- struct hpt_timings *t = info->timings;
- u8 itr_addr = 0x40 + (drive->dn * 4);
- u32 old_itr = 0;
- const u8 speed = drive->dma_mode;
- u32 new_itr = get_speed_setting(speed, info);
- u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask :
- (speed < XFER_UDMA_0 ? t->dma_mask :
- t->ultra_mask);
-
- pci_read_config_dword(dev, itr_addr, &old_itr);
- new_itr = (old_itr & ~itr_mask) | (new_itr & itr_mask);
- /*
- * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well)
- * to avoid problems handling I/O errors later
- */
- new_itr &= ~0xc0000000;
-
- pci_write_config_dword(dev, itr_addr, new_itr);
-}
-
-static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- hpt3xx_set_mode(hwif, drive);
-}
-
-static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
-
- if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
- return;
-
- if (info->chip_type >= HPT370) {
- u8 scr1 = 0;
-
- pci_read_config_byte(dev, 0x5a, &scr1);
- if (((scr1 & 0x10) >> 4) != mask) {
- if (mask)
- scr1 |= 0x10;
- else
- scr1 &= ~0x10;
- pci_write_config_byte(dev, 0x5a, scr1);
- }
- } else if (mask)
- disable_irq(hwif->irq);
- else
- enable_irq(hwif->irq);
-}
-
-/*
- * This is specific to the HPT366 UDMA chipset
- * by HighPoint|Triones Technologies, Inc.
- */
-static void hpt366_dma_lost_irq(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u8 mcr1 = 0, mcr3 = 0, scr1 = 0;
-
- pci_read_config_byte(dev, 0x50, &mcr1);
- pci_read_config_byte(dev, 0x52, &mcr3);
- pci_read_config_byte(dev, 0x5a, &scr1);
- printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n",
- drive->name, __func__, mcr1, mcr3, scr1);
- if (scr1 & 0x10)
- pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
- ide_dma_lost_irq(drive);
-}
-
-static void hpt370_clear_engine(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- pci_write_config_byte(dev, hwif->select_data, 0x37);
- udelay(10);
-}
-
-static void hpt370_irq_timeout(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 bfifo = 0;
- u8 dma_cmd;
-
- pci_read_config_word(dev, hwif->select_data + 2, &bfifo);
- printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
-
- /* get DMA command mode */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- /* stop DMA */
- outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
- hpt370_clear_engine(drive);
-}
-
-static void hpt370_dma_start(ide_drive_t *drive)
-{
-#ifdef HPT_RESET_STATE_ENGINE
- hpt370_clear_engine(drive);
-#endif
- ide_dma_start(drive);
-}
-
-static int hpt370_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
-
- if (dma_stat & ATA_DMA_ACTIVE) {
- /* wait a little */
- udelay(20);
- dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- if (dma_stat & ATA_DMA_ACTIVE)
- hpt370_irq_timeout(drive);
- }
- return ide_dma_end(drive);
-}
-
-/* returns 1 if DMA IRQ issued, 0 otherwise */
-static int hpt374_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 bfifo = 0;
- u8 dma_stat;
-
- pci_read_config_word(dev, hwif->select_data + 2, &bfifo);
- if (bfifo & 0x1FF) {
-// printk("%s: %d bytes in FIFO\n", drive->name, bfifo);
- return 0;
- }
-
- dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- /* return 1 if INTR asserted */
- if (dma_stat & ATA_DMA_INTR)
- return 1;
-
- return 0;
-}
-
-static int hpt374_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 mcr = 0, mcr_addr = hwif->select_data;
- u8 bwsr = 0, mask = hwif->channel ? 0x02 : 0x01;
-
- pci_read_config_byte(dev, 0x6a, &bwsr);
- pci_read_config_byte(dev, mcr_addr, &mcr);
- if (bwsr & mask)
- pci_write_config_byte(dev, mcr_addr, mcr | 0x30);
- return ide_dma_end(drive);
-}
-
-/**
- * hpt3xxn_set_clock - perform clock switching dance
- * @hwif: hwif to switch
- * @mode: clocking mode (0x21 for write, 0x23 otherwise)
- *
- * Switch the DPLL clock on the HPT3xxN devices. This is a right mess.
- */
-
-static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
-{
- unsigned long base = hwif->extra_base;
- u8 scr2 = inb(base + 0x6b);
-
- if ((scr2 & 0x7f) == mode)
- return;
-
- /* Tristate the bus */
- outb(0x80, base + 0x63);
- outb(0x80, base + 0x67);
-
- /* Switch clock and reset channels */
- outb(mode, base + 0x6b);
- outb(0xc0, base + 0x69);
-
- /*
- * Reset the state machines.
- * NOTE: avoid accidentally enabling the disabled channels.
- */
- outb(inb(base + 0x60) | 0x32, base + 0x60);
- outb(inb(base + 0x64) | 0x32, base + 0x64);
-
- /* Complete reset */
- outb(0x00, base + 0x69);
-
- /* Reconnect channels to bus */
- outb(0x00, base + 0x63);
- outb(0x00, base + 0x67);
-}
-
-/**
- * hpt3xxn_rw_disk - prepare for I/O
- * @drive: drive for command
- * @rq: block request structure
- *
- * This is called when a disk I/O is issued to HPT3xxN.
- * We need it because of the clock switching.
- */
-
-static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
-{
- hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x21 : 0x23);
-}
-
-/**
- * hpt37x_calibrate_dpll - calibrate the DPLL
- * @dev: PCI device
- *
- * Perform a calibration cycle on the DPLL.
- * Returns 1 if this succeeds
- */
-static int hpt37x_calibrate_dpll(struct pci_dev *dev, u16 f_low, u16 f_high)
-{
- u32 dpll = (f_high << 16) | f_low | 0x100;
- u8 scr2;
- int i;
-
- pci_write_config_dword(dev, 0x5c, dpll);
-
- /* Wait for oscillator ready */
- for(i = 0; i < 0x5000; ++i) {
- udelay(50);
- pci_read_config_byte(dev, 0x5b, &scr2);
- if (scr2 & 0x80)
- break;
- }
- /* See if it stays ready (we'll just bail out if it's not yet) */
- for(i = 0; i < 0x1000; ++i) {
- pci_read_config_byte(dev, 0x5b, &scr2);
- /* DPLL destabilized? */
- if(!(scr2 & 0x80))
- return 0;
- }
- /* Turn off tuning, we have the DPLL set */
- pci_read_config_dword (dev, 0x5c, &dpll);
- pci_write_config_dword(dev, 0x5c, (dpll & ~0x100));
- return 1;
-}
-
-static void hpt3xx_disable_fast_irq(struct pci_dev *dev, u8 mcr_addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct hpt_info *info = host->host_priv + (&dev->dev == host->dev[1]);
- u8 chip_type = info->chip_type;
- u8 new_mcr, old_mcr = 0;
-
- /*
- * Disable the "fast interrupt" prediction. Don't hold off
- * on interrupts. (== 0x01 despite what the docs say)
- */
- pci_read_config_byte(dev, mcr_addr + 1, &old_mcr);
-
- if (chip_type >= HPT374)
- new_mcr = old_mcr & ~0x07;
- else if (chip_type >= HPT370) {
- new_mcr = old_mcr;
- new_mcr &= ~0x02;
-#ifdef HPT_DELAY_INTERRUPT
- new_mcr &= ~0x01;
-#else
- new_mcr |= 0x01;
-#endif
- } else /* HPT366 and HPT368 */
- new_mcr = old_mcr & ~0x80;
-
- if (new_mcr != old_mcr)
- pci_write_config_byte(dev, mcr_addr + 1, new_mcr);
-}
-
-static int init_chipset_hpt366(struct pci_dev *dev)
-{
- unsigned long io_base = pci_resource_start(dev, 4);
- struct hpt_info *info = hpt3xx_get_info(&dev->dev);
- const char *name = DRV_NAME;
- u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */
- u8 chip_type;
- enum ata_clock clock;
-
- chip_type = info->chip_type;
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
- pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
- pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
-
- /*
- * First, try to estimate the PCI clock frequency...
- */
- if (chip_type >= HPT370) {
- u8 scr1 = 0;
- u16 f_cnt = 0;
- u32 temp = 0;
-
- /* Interrupt force enable. */
- pci_read_config_byte(dev, 0x5a, &scr1);
- if (scr1 & 0x10)
- pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
-
- /*
- * HighPoint does this for HPT372A.
- * NOTE: This register is only writeable via I/O space.
- */
- if (chip_type == HPT372A)
- outb(0x0e, io_base + 0x9c);
-
- /*
- * Default to PCI clock. Make sure MA15/16 are set to output
- * to prevent drives having problems with 40-pin cables.
- */
- pci_write_config_byte(dev, 0x5b, 0x23);
-
- /*
- * We'll have to read f_CNT value in order to determine
- * the PCI clock frequency according to the following ratio:
- *
- * f_CNT = Fpci * 192 / Fdpll
- *
- * First try reading the register in which the HighPoint BIOS
- * saves f_CNT value before reprogramming the DPLL from its
- * default setting (which differs for the various chips).
- *
- * NOTE: This register is only accessible via I/O space;
- * HPT374 BIOS only saves it for the function 0, so we have to
- * always read it from there -- no need to check the result of
- * pci_get_slot() for the function 0 as the whole device has
- * been already "pinned" (via function 1) in init_setup_hpt374()
- */
- if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) {
- struct pci_dev *dev1 = pci_get_slot(dev->bus,
- dev->devfn - 1);
- unsigned long io_base = pci_resource_start(dev1, 4);
-
- temp = inl(io_base + 0x90);
- pci_dev_put(dev1);
- } else
- temp = inl(io_base + 0x90);
-
- /*
- * In case the signature check fails, we'll have to
- * resort to reading the f_CNT register itself in hopes
- * that nobody has touched the DPLL yet...
- */
- if ((temp & 0xFFFFF000) != 0xABCDE000) {
- int i;
-
- printk(KERN_WARNING "%s %s: no clock data saved by "
- "BIOS\n", name, pci_name(dev));
-
- /* Calculate the average value of f_CNT. */
- for (temp = i = 0; i < 128; i++) {
- pci_read_config_word(dev, 0x78, &f_cnt);
- temp += f_cnt & 0x1ff;
- mdelay(1);
- }
- f_cnt = temp / 128;
- } else
- f_cnt = temp & 0x1ff;
-
- dpll_clk = info->dpll_clk;
- pci_clk = (f_cnt * dpll_clk) / 192;
-
- /* Clamp PCI clock to bands. */
- if (pci_clk < 40)
- pci_clk = 33;
- else if(pci_clk < 45)
- pci_clk = 40;
- else if(pci_clk < 55)
- pci_clk = 50;
- else
- pci_clk = 66;
-
- printk(KERN_INFO "%s %s: DPLL base: %d MHz, f_CNT: %d, "
- "assuming %d MHz PCI\n", name, pci_name(dev),
- dpll_clk, f_cnt, pci_clk);
- } else {
- u32 itr1 = 0;
-
- pci_read_config_dword(dev, 0x40, &itr1);
-
- /* Detect PCI clock by looking at cmd_high_time. */
- switch ((itr1 >> 8) & 0x0f) {
- case 0x09:
- pci_clk = 40;
- break;
- case 0x05:
- pci_clk = 25;
- break;
- case 0x07:
- default:
- pci_clk = 33;
- break;
- }
- }
-
- /* Let's assume we'll use PCI clock for the ATA clock... */
- switch (pci_clk) {
- case 25:
- clock = ATA_CLOCK_25MHZ;
- break;
- case 33:
- default:
- clock = ATA_CLOCK_33MHZ;
- break;
- case 40:
- clock = ATA_CLOCK_40MHZ;
- break;
- case 50:
- clock = ATA_CLOCK_50MHZ;
- break;
- case 66:
- clock = ATA_CLOCK_66MHZ;
- break;
- }
-
- /*
- * Only try the DPLL if we don't have a table for the PCI clock that
- * we are running at for HPT370/A, always use it for anything newer...
- *
- * NOTE: Using the internal DPLL results in slow reads on 33 MHz PCI.
- * We also don't like using the DPLL because this causes glitches
- * on PRST-/SRST- when the state engine gets reset...
- */
- if (chip_type >= HPT374 || info->timings->clock_table[clock] == NULL) {
- u16 f_low, delta = pci_clk < 50 ? 2 : 4;
- int adjust;
-
- /*
- * Select 66 MHz DPLL clock only if UltraATA/133 mode is
- * supported/enabled, use 50 MHz DPLL clock otherwise...
- */
- if (info->udma_mask == ATA_UDMA6) {
- dpll_clk = 66;
- clock = ATA_CLOCK_66MHZ;
- } else if (dpll_clk) { /* HPT36x chips don't have DPLL */
- dpll_clk = 50;
- clock = ATA_CLOCK_50MHZ;
- }
-
- if (info->timings->clock_table[clock] == NULL) {
- printk(KERN_ERR "%s %s: unknown bus timing!\n",
- name, pci_name(dev));
- return -EIO;
- }
-
- /* Select the DPLL clock. */
- pci_write_config_byte(dev, 0x5b, 0x21);
-
- /*
- * Adjust the DPLL based upon PCI clock, enable it,
- * and wait for stabilization...
- */
- f_low = (pci_clk * 48) / dpll_clk;
-
- for (adjust = 0; adjust < 8; adjust++) {
- if(hpt37x_calibrate_dpll(dev, f_low, f_low + delta))
- break;
-
- /*
- * See if it'll settle at a fractionally different clock
- */
- if (adjust & 1)
- f_low -= adjust >> 1;
- else
- f_low += adjust >> 1;
- }
- if (adjust == 8) {
- printk(KERN_ERR "%s %s: DPLL did not stabilize!\n",
- name, pci_name(dev));
- return -EIO;
- }
-
- printk(KERN_INFO "%s %s: using %d MHz DPLL clock\n",
- name, pci_name(dev), dpll_clk);
- } else {
- /* Mark the fact that we're not using the DPLL. */
- dpll_clk = 0;
-
- printk(KERN_INFO "%s %s: using %d MHz PCI clock\n",
- name, pci_name(dev), pci_clk);
- }
-
- /* Store the clock frequencies. */
- info->dpll_clk = dpll_clk;
- info->pci_clk = pci_clk;
- info->clock = clock;
-
- if (chip_type >= HPT370) {
- u8 mcr1, mcr4;
-
- /*
- * Reset the state engines.
- * NOTE: Avoid accidentally enabling the disabled channels.
- */
- pci_read_config_byte (dev, 0x50, &mcr1);
- pci_read_config_byte (dev, 0x54, &mcr4);
- pci_write_config_byte(dev, 0x50, (mcr1 | 0x32));
- pci_write_config_byte(dev, 0x54, (mcr4 | 0x32));
- udelay(100);
- }
-
- /*
- * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
- * the MISC. register to stretch the UltraDMA Tss timing.
- * NOTE: This register is only writeable via I/O space.
- */
- if (chip_type == HPT371N && clock == ATA_CLOCK_66MHZ)
- outb(inb(io_base + 0x9c) | 0x04, io_base + 0x9c);
-
- hpt3xx_disable_fast_irq(dev, 0x50);
- hpt3xx_disable_fast_irq(dev, 0x54);
-
- return 0;
-}
-
-static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- u8 chip_type = info->chip_type;
- u8 scr1 = 0, ata66 = hwif->channel ? 0x01 : 0x02;
-
- /*
- * The HPT37x uses the CBLID pins as outputs for MA15/MA16
- * address lines to access an external EEPROM. To read valid
- * cable detect state the pins must be enabled as inputs.
- */
- if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) {
- /*
- * HPT374 PCI function 1
- * - set bit 15 of reg 0x52 to enable TCBLID as input
- * - set bit 15 of reg 0x56 to enable FCBLID as input
- */
- u8 mcr_addr = hwif->select_data + 2;
- u16 mcr;
-
- pci_read_config_word(dev, mcr_addr, &mcr);
- pci_write_config_word(dev, mcr_addr, mcr | 0x8000);
- /* Debounce, then read cable ID register */
- udelay(10);
- pci_read_config_byte(dev, 0x5a, &scr1);
- pci_write_config_word(dev, mcr_addr, mcr);
- } else if (chip_type >= HPT370) {
- /*
- * HPT370/372 and 374 pcifn 0
- * - clear bit 0 of reg 0x5b to enable P/SCBLID as inputs
- */
- u8 scr2 = 0;
-
- pci_read_config_byte(dev, 0x5b, &scr2);
- pci_write_config_byte(dev, 0x5b, scr2 & ~1);
- /* Debounce, then read cable ID register */
- udelay(10);
- pci_read_config_byte(dev, 0x5a, &scr1);
- pci_write_config_byte(dev, 0x5b, scr2);
- } else
- pci_read_config_byte(dev, 0x5a, &scr1);
-
- return (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static void init_hwif_hpt366(ide_hwif_t *hwif)
-{
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- u8 chip_type = info->chip_type;
-
- /* Cache the channel's MISC. control registers' offset */
- hwif->select_data = hwif->channel ? 0x54 : 0x50;
-
- /*
- * HPT3xxN chips have some complications:
- *
- * - on 33 MHz PCI we must clock switch
- * - on 66 MHz PCI we must NOT use the PCI clock
- */
- if (chip_type >= HPT372N && info->dpll_clk && info->pci_clk < 66) {
- /*
- * Clock is shared between the channels,
- * so we'll have to serialize them... :-(
- */
- hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
- hwif->rw_disk = &hpt3xxn_rw_disk;
- }
-}
-
-static int init_dma_hpt366(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long flags, base = ide_pci_dma_base(hwif, d);
- u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
-
- if (base == 0)
- return -1;
-
- hwif->dma_base = base;
-
- if (ide_pci_check_simplex(hwif, d) < 0)
- return -1;
-
- if (ide_pci_set_master(dev, d->name) < 0)
- return -1;
-
- dma_old = inb(base + 2);
-
- local_irq_save(flags);
-
- dma_new = dma_old;
- pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
- pci_read_config_byte(dev, hwif->channel ? 0x4f : 0x47, &slavedma);
-
- if (masterdma & 0x30) dma_new |= 0x20;
- if ( slavedma & 0x30) dma_new |= 0x40;
- if (dma_new != dma_old)
- outb(dma_new, base + 2);
-
- local_irq_restore(flags);
-
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, base, base + 7);
-
- hwif->extra_base = base + (hwif->channel ? 8 : 16);
-
- if (ide_allocate_dma_engine(hwif))
- return -1;
-
- return 0;
-}
-
-static void hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
-{
- if (dev2->irq != dev->irq) {
- /* FIXME: we need a core pci_set_interrupt() */
- dev2->irq = dev->irq;
- printk(KERN_INFO DRV_NAME " %s: PCI config space interrupt "
- "fixed\n", pci_name(dev2));
- }
-}
-
-static void hpt371_init(struct pci_dev *dev)
-{
- u8 mcr1 = 0;
-
- /*
- * HPT371 chips physically have only one channel, the secondary one,
- * but the primary channel registers do exist! Go figure...
- * So, we manually disable the non-existing channel here
- * (if the BIOS hasn't done this already).
- */
- pci_read_config_byte(dev, 0x50, &mcr1);
- if (mcr1 & 0x04)
- pci_write_config_byte(dev, 0x50, mcr1 & ~0x04);
-}
-
-static int hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
-{
- u8 mcr1 = 0, pin1 = 0, pin2 = 0;
-
- /*
- * Now we'll have to force both channels enabled if
- * at least one of them has been enabled by BIOS...
- */
- pci_read_config_byte(dev, 0x50, &mcr1);
- if (mcr1 & 0x30)
- pci_write_config_byte(dev, 0x50, mcr1 | 0x30);
-
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1);
- pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2);
-
- if (pin1 != pin2 && dev->irq == dev2->irq) {
- printk(KERN_INFO DRV_NAME " %s: onboard version of chipset, "
- "pin1=%d pin2=%d\n", pci_name(dev), pin1, pin2);
- return 1;
- }
-
- return 0;
-}
-
-#define IDE_HFLAGS_HPT3XX \
- (IDE_HFLAG_NO_ATAPI_DMA | \
- IDE_HFLAG_OFF_BOARD)
-
-static const struct ide_port_ops hpt3xx_port_ops = {
- .set_pio_mode = hpt3xx_set_pio_mode,
- .set_dma_mode = hpt3xx_set_mode,
- .maskproc = hpt3xx_maskproc,
- .mdma_filter = hpt3xx_mdma_filter,
- .udma_filter = hpt3xx_udma_filter,
- .cable_detect = hpt3xx_cable_detect,
-};
-
-static const struct ide_dma_ops hpt37x_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = hpt374_dma_end,
- .dma_test_irq = hpt374_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_dma_ops hpt370_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = hpt370_dma_start,
- .dma_end = hpt370_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_clear = hpt370_irq_timeout,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_dma_ops hpt36x_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = hpt366_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info hpt366_chipsets[] = {
- { /* 0: HPT36x */
- .name = DRV_NAME,
- .init_chipset = init_chipset_hpt366,
- .init_hwif = init_hwif_hpt366,
- .init_dma = init_dma_hpt366,
- /*
- * HPT36x chips have one channel per function and have
- * both channel enable bits located differently and visible
- * to both functions -- really stupid design decision... :-(
- * Bit 4 is for the primary channel, bit 5 for the secondary.
- */
- .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
- .port_ops = &hpt3xx_port_ops,
- .dma_ops = &hpt36x_dma_ops,
- .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- },
- { /* 1: HPT3xx */
- .name = DRV_NAME,
- .init_chipset = init_chipset_hpt366,
- .init_hwif = init_hwif_hpt366,
- .init_dma = init_dma_hpt366,
- .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- .port_ops = &hpt3xx_port_ops,
- .dma_ops = &hpt37x_dma_ops,
- .host_flags = IDE_HFLAGS_HPT3XX,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- }
-};
-
-/**
- * hpt366_init_one - called when an HPT366 is found
- * @dev: the hpt366 device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-static int hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct hpt_info *info = NULL;
- struct hpt_info *dyn_info;
- struct pci_dev *dev2 = NULL;
- struct ide_port_info d;
- u8 idx = id->driver_data;
- u8 rev = dev->revision;
- int ret;
-
- if ((idx == 0 || idx == 4) && (PCI_FUNC(dev->devfn) & 1))
- return -ENODEV;
-
- switch (idx) {
- case 0:
- if (rev < 3)
- info = &hpt36x;
- else {
- switch (min_t(u8, rev, 6)) {
- case 3: info = &hpt370; break;
- case 4: info = &hpt370a; break;
- case 5: info = &hpt372; break;
- case 6: info = &hpt372n; break;
- }
- idx++;
- }
- break;
- case 1:
- info = (rev > 1) ? &hpt372n : &hpt372a;
- break;
- case 2:
- info = (rev > 1) ? &hpt302n : &hpt302;
- break;
- case 3:
- hpt371_init(dev);
- info = (rev > 1) ? &hpt371n : &hpt371;
- break;
- case 4:
- info = &hpt374;
- break;
- case 5:
- info = &hpt372n;
- break;
- }
-
- printk(KERN_INFO DRV_NAME ": %s chipset detected\n", info->chip_name);
-
- d = hpt366_chipsets[min_t(u8, idx, 1)];
-
- d.udma_mask = info->udma_mask;
-
- /* fixup ->dma_ops for HPT370/HPT370A */
- if (info == &hpt370 || info == &hpt370a)
- d.dma_ops = &hpt370_dma_ops;
-
- if (info == &hpt36x || info == &hpt374)
- dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
-
- dyn_info = kcalloc(dev2 ? 2 : 1, sizeof(*dyn_info), GFP_KERNEL);
- if (dyn_info == NULL) {
- printk(KERN_ERR "%s %s: out of memory!\n",
- d.name, pci_name(dev));
- pci_dev_put(dev2);
- return -ENOMEM;
- }
-
- /*
- * Copy everything from a static "template" structure
- * to just allocated per-chip hpt_info structure.
- */
- memcpy(dyn_info, info, sizeof(*dyn_info));
-
- if (dev2) {
- memcpy(dyn_info + 1, info, sizeof(*dyn_info));
-
- if (info == &hpt374)
- hpt374_init(dev, dev2);
- else {
- if (hpt36x_init(dev, dev2))
- d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE;
- }
-
- ret = ide_pci_init_two(dev, dev2, &d, dyn_info);
- if (ret < 0) {
- pci_dev_put(dev2);
- kfree(dyn_info);
- }
- return ret;
- }
-
- ret = ide_pci_init_one(dev, &d, dyn_info);
- if (ret < 0)
- kfree(dyn_info);
-
- return ret;
-}
-
-static void hpt366_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct ide_info *info = host->host_priv;
- struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
-
- ide_pci_remove(dev);
- pci_dev_put(dev2);
- kfree(info);
-}
-
-static const struct pci_device_id hpt366_pci_tbl[] = {
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), 3 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), 4 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), 5 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl);
-
-static struct pci_driver hpt366_pci_driver = {
- .name = "HPT366_IDE",
- .id_table = hpt366_pci_tbl,
- .probe = hpt366_init_one,
- .remove = hpt366_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init hpt366_ide_init(void)
-{
- return ide_pci_register_driver(&hpt366_pci_driver);
-}
-
-static void __exit hpt366_ide_exit(void)
-{
- pci_unregister_driver(&hpt366_pci_driver);
-}
-
-module_init(hpt366_ide_init);
-module_exit(hpt366_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
deleted file mode 100644
index 743bc3693ac8..000000000000
--- a/drivers/ide/ht6560b.c
+++ /dev/null
@@ -1,383 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1995-2000 Linus Torvalds & author (see below)
- */
-
-/*
- * HT-6560B EIDE-controller support
- * To activate controller support use kernel parameter "ide0=ht6560b".
- * Use hdparm utility to enable PIO mode support.
- *
- * Author: Mikko Ala-Fossi <maf@iki.fi>
- * Jan Evert van Grootheest <j.e.van.grootheest@caiway.nl>
- *
- */
-
-#define DRV_NAME "ht6560b"
-#define HT6560B_VERSION "v0.08"
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-/* #define DEBUG */ /* remove comments for DEBUG messages */
-
-/*
- * The special i/o-port that HT-6560B uses to configuration:
- * bit0 (0x01): "1" selects secondary interface
- * bit2 (0x04): "1" enables FIFO function
- * bit5 (0x20): "1" enables prefetched data read function (???)
- *
- * The special i/o-port that HT-6560A uses to configuration:
- * bit0 (0x01): "1" selects secondary interface
- * bit1 (0x02): "1" enables prefetched data read function
- * bit2 (0x04): "0" enables multi-master system (?)
- * bit3 (0x08): "1" 3 cycle time, "0" 2 cycle time (?)
- */
-#define HT_CONFIG_PORT 0x3e6
-
-static inline u8 HT_CONFIG(ide_drive_t *drive)
-{
- return ((unsigned long)ide_get_drivedata(drive) & 0xff00) >> 8;
-}
-
-/*
- * FIFO + PREFETCH (both a/b-model)
- */
-#define HT_CONFIG_DEFAULT 0x1c /* no prefetch */
-/* #define HT_CONFIG_DEFAULT 0x3c */ /* with prefetch */
-#define HT_SECONDARY_IF 0x01
-#define HT_PREFETCH_MODE 0x20
-
-/*
- * ht6560b Timing values:
- *
- * I reviewed some assembler source listings of htide drivers and found
- * out how they setup those cycle time interfacing values, as they at Holtek
- * call them. IDESETUP.COM that is supplied with the drivers figures out
- * optimal values and fetches those values to drivers. I found out that
- * they use Select register to fetch timings to the ide board right after
- * interface switching. After that it was quite easy to add code to
- * ht6560b.c.
- *
- * IDESETUP.COM gave me values 0x24, 0x45, 0xaa, 0xff that worked fine
- * for hda and hdc. But hdb needed higher values to work, so I guess
- * that sometimes it is necessary to give higher value than IDESETUP
- * gives. [see cmd640.c for an extreme example of this. -ml]
- *
- * Perhaps I should explain something about these timing values:
- * The higher nibble of value is the Recovery Time (rt) and the lower nibble
- * of the value is the Active Time (at). Minimum value 2 is the fastest and
- * the maximum value 15 is the slowest. Default values should be 15 for both.
- * So 0x24 means 2 for rt and 4 for at. Each of the drives should have
- * both values, and IDESETUP gives automatically rt=15 st=15 for CDROMs or
- * similar. If value is too small there will be all sorts of failures.
- *
- * Timing byte consists of
- * High nibble: Recovery Cycle Time (rt)
- * The valid values range from 2 to 15. The default is 15.
- *
- * Low nibble: Active Cycle Time (at)
- * The valid values range from 2 to 15. The default is 15.
- *
- * You can obtain optimized timing values by running Holtek IDESETUP.COM
- * for DOS. DOS drivers get their timing values from command line, where
- * the first value is the Recovery Time and the second value is the
- * Active Time for each drive. Smaller value gives higher speed.
- * In case of failures you should probably fall back to a higher value.
- */
-static inline u8 HT_TIMING(ide_drive_t *drive)
-{
- return (unsigned long)ide_get_drivedata(drive) & 0x00ff;
-}
-
-#define HT_TIMING_DEFAULT 0xff
-
-/*
- * This routine handles interface switching for the peculiar hardware design
- * on the F.G.I./Holtek HT-6560B VLB IDE interface.
- * The HT-6560B can only enable one IDE port at a time, and requires a
- * silly sequence (below) whenever we switch between primary and secondary.
- */
-
-/*
- * This routine is invoked from ide.c to prepare for access to a given drive.
- */
-static void ht6560b_dev_select(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
- static u8 current_select = 0;
- static u8 current_timing = 0;
- u8 select, timing;
-
- local_irq_save(flags);
-
- select = HT_CONFIG(drive);
- timing = HT_TIMING(drive);
-
- /*
- * Need to enforce prefetch sometimes because otherwise
- * it'll hang (hard).
- */
- if (drive->media != ide_disk ||
- (drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- select |= HT_PREFETCH_MODE;
-
- if (select != current_select || timing != current_timing) {
- current_select = select;
- current_timing = timing;
- (void)inb(HT_CONFIG_PORT);
- (void)inb(HT_CONFIG_PORT);
- (void)inb(HT_CONFIG_PORT);
- (void)inb(HT_CONFIG_PORT);
- outb(select, HT_CONFIG_PORT);
- /*
- * Set timing for this drive:
- */
- outb(timing, hwif->io_ports.device_addr);
- (void)inb(hwif->io_ports.status_addr);
-#ifdef DEBUG
- printk("ht6560b: %s: select=%#x timing=%#x\n",
- drive->name, select, timing);
-#endif
- }
- local_irq_restore(flags);
-
- outb(drive->select | ATA_DEVICE_OBS, hwif->io_ports.device_addr);
-}
-
-/*
- * Autodetection and initialization of ht6560b
- */
-static int __init try_to_init_ht6560b(void)
-{
- u8 orig_value;
- int i;
-
- /* Autodetect ht6560b */
- if ((orig_value = inb(HT_CONFIG_PORT)) == 0xff)
- return 0;
-
- for (i=3;i>0;i--) {
- outb(0x00, HT_CONFIG_PORT);
- if (!( (~inb(HT_CONFIG_PORT)) & 0x3f )) {
- outb(orig_value, HT_CONFIG_PORT);
- return 0;
- }
- }
- outb(0x00, HT_CONFIG_PORT);
- if ((~inb(HT_CONFIG_PORT))& 0x3f) {
- outb(orig_value, HT_CONFIG_PORT);
- return 0;
- }
- /*
- * Ht6560b autodetected
- */
- outb(HT_CONFIG_DEFAULT, HT_CONFIG_PORT);
- outb(HT_TIMING_DEFAULT, 0x1f6); /* Select register */
- (void)inb(0x1f7); /* Status register */
-
- printk("ht6560b " HT6560B_VERSION
- ": chipset detected and initialized"
-#ifdef DEBUG
- " with debug enabled"
-#endif
- "\n"
- );
- return 1;
-}
-
-static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
-{
- int active_time, recovery_time;
- int active_cycles, recovery_cycles;
- int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
-
- if (pio) {
- unsigned int cycle_time;
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
-
- cycle_time = ide_pio_cycle_time(drive, pio);
-
- /*
- * Just like opti621.c we try to calculate the
- * actual cycle time for recovery and activity
- * according system bus speed.
- */
- active_time = t->active;
- recovery_time = cycle_time - active_time - t->setup;
- /*
- * Cycle times should be Vesa bus cycles
- */
- active_cycles = (active_time * bus_speed + 999) / 1000;
- recovery_cycles = (recovery_time * bus_speed + 999) / 1000;
- /*
- * Upper and lower limits
- */
- if (active_cycles < 2) active_cycles = 2;
- if (recovery_cycles < 2) recovery_cycles = 2;
- if (active_cycles > 15) active_cycles = 15;
- if (recovery_cycles > 15) recovery_cycles = 0; /* 0==16 */
-
-#ifdef DEBUG
- printk("ht6560b: drive %s setting pio=%d recovery=%d (%dns) active=%d (%dns)\n", drive->name, pio, recovery_cycles, recovery_time, active_cycles, active_time);
-#endif
-
- return (u8)((recovery_cycles << 4) | active_cycles);
- } else {
-
-#ifdef DEBUG
- printk("ht6560b: drive %s setting pio=0\n", drive->name);
-#endif
-
- return HT_TIMING_DEFAULT; /* default setting */
- }
-}
-
-static DEFINE_SPINLOCK(ht6560b_lock);
-
-/*
- * Enable/Disable so called prefetch mode
- */
-static void ht_set_prefetch(ide_drive_t *drive, u8 state)
-{
- unsigned long flags, config;
- int t = HT_PREFETCH_MODE << 8;
-
- spin_lock_irqsave(&ht6560b_lock, flags);
-
- config = (unsigned long)ide_get_drivedata(drive);
-
- /*
- * Prefetch mode and unmask irq seems to conflict
- */
- if (state) {
- config |= t; /* enable prefetch mode */
- drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
- } else {
- config &= ~t; /* disable prefetch mode */
- drive->dev_flags &= ~IDE_DFLAG_NO_UNMASK;
- }
-
- ide_set_drivedata(drive, (void *)config);
-
- spin_unlock_irqrestore(&ht6560b_lock, flags);
-
-#ifdef DEBUG
- printk("ht6560b: drive %s prefetch mode %sabled\n", drive->name, (state ? "en" : "dis"));
-#endif
-}
-
-static void ht6560b_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long flags, config;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 timing;
-
- switch (pio) {
- case 8: /* set prefetch off */
- case 9: /* set prefetch on */
- ht_set_prefetch(drive, pio & 1);
- return;
- }
-
- timing = ht_pio2timings(drive, pio);
-
- spin_lock_irqsave(&ht6560b_lock, flags);
- config = (unsigned long)ide_get_drivedata(drive);
- config &= 0xff00;
- config |= timing;
- ide_set_drivedata(drive, (void *)config);
- spin_unlock_irqrestore(&ht6560b_lock, flags);
-
-#ifdef DEBUG
- printk("ht6560b: drive %s tuned to pio mode %#x timing=%#x\n", drive->name, pio, timing);
-#endif
-}
-
-static void __init ht6560b_init_dev(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- /* Setting default configurations for drives. */
- unsigned long t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
-
- if (hwif->channel)
- t |= (HT_SECONDARY_IF << 8);
-
- ide_set_drivedata(drive, (void *)t);
-}
-
-static bool probe_ht6560b;
-
-module_param_named(probe, probe_ht6560b, bool, 0);
-MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
-
-static const struct ide_tp_ops ht6560b_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ht6560b_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_port_ops ht6560b_port_ops = {
- .init_dev = ht6560b_init_dev,
- .set_pio_mode = ht6560b_set_pio_mode,
-};
-
-static const struct ide_port_info ht6560b_port_info __initconst = {
- .name = DRV_NAME,
- .chipset = ide_ht6560b,
- .tp_ops = &ht6560b_tp_ops,
- .port_ops = &ht6560b_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_ABUSE_PREFETCH,
- .pio_mask = ATA_PIO4,
-};
-
-static int __init ht6560b_init(void)
-{
- if (probe_ht6560b == 0)
- return -ENODEV;
-
- if (!request_region(HT_CONFIG_PORT, 1, DRV_NAME)) {
- printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n",
- __func__);
- return -ENODEV;
- }
-
- if (!try_to_init_ht6560b()) {
- printk(KERN_NOTICE "%s: HBA not found\n", __func__);
- goto release_region;
- }
-
- return ide_legacy_device_add(&ht6560b_port_info, 0);
-
-release_region:
- release_region(HT_CONFIG_PORT, 1);
- return -ENODEV;
-}
-
-module_init(ht6560b_init);
-
-MODULE_AUTHOR("See Local File");
-MODULE_DESCRIPTION("HT-6560B EIDE-controller support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
deleted file mode 100644
index 329c7e4bc9d0..000000000000
--- a/drivers/ide/icside.c
+++ /dev/null
@@ -1,692 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 1996-2004 Russell King.
- *
- * Please note that this platform does not support 32-bit IDE IO.
- */
-
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/errno.h>
-#include <linux/ide.h>
-#include <linux/dma-mapping.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/scatterlist.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/ecard.h>
-
-#define DRV_NAME "icside"
-
-#define ICS_IDENT_OFFSET 0x2280
-
-#define ICS_ARCIN_V5_INTRSTAT 0x0000
-#define ICS_ARCIN_V5_INTROFFSET 0x0004
-#define ICS_ARCIN_V5_IDEOFFSET 0x2800
-#define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80
-#define ICS_ARCIN_V5_IDESTEPPING 6
-
-#define ICS_ARCIN_V6_IDEOFFSET_1 0x2000
-#define ICS_ARCIN_V6_INTROFFSET_1 0x2200
-#define ICS_ARCIN_V6_INTRSTAT_1 0x2290
-#define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380
-#define ICS_ARCIN_V6_IDEOFFSET_2 0x3000
-#define ICS_ARCIN_V6_INTROFFSET_2 0x3200
-#define ICS_ARCIN_V6_INTRSTAT_2 0x3290
-#define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380
-#define ICS_ARCIN_V6_IDESTEPPING 6
-
-struct cardinfo {
- unsigned int dataoffset;
- unsigned int ctrloffset;
- unsigned int stepping;
-};
-
-static struct cardinfo icside_cardinfo_v5 = {
- .dataoffset = ICS_ARCIN_V5_IDEOFFSET,
- .ctrloffset = ICS_ARCIN_V5_IDEALTOFFSET,
- .stepping = ICS_ARCIN_V5_IDESTEPPING,
-};
-
-static struct cardinfo icside_cardinfo_v6_1 = {
- .dataoffset = ICS_ARCIN_V6_IDEOFFSET_1,
- .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_1,
- .stepping = ICS_ARCIN_V6_IDESTEPPING,
-};
-
-static struct cardinfo icside_cardinfo_v6_2 = {
- .dataoffset = ICS_ARCIN_V6_IDEOFFSET_2,
- .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_2,
- .stepping = ICS_ARCIN_V6_IDESTEPPING,
-};
-
-struct icside_state {
- unsigned int channel;
- unsigned int enabled;
- void __iomem *irq_port;
- void __iomem *ioc_base;
- unsigned int sel;
- unsigned int type;
- struct ide_host *host;
-};
-
-#define ICS_TYPE_A3IN 0
-#define ICS_TYPE_A3USER 1
-#define ICS_TYPE_V6 3
-#define ICS_TYPE_V5 15
-#define ICS_TYPE_NOTYPE ((unsigned int)-1)
-
-/* ---------------- Version 5 PCB Support Functions --------------------- */
-/* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
- * Purpose : enable interrupts from card
- */
-static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
-{
- struct icside_state *state = ec->irq_data;
-
- writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
-}
-
-/* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
- * Purpose : disable interrupts from card
- */
-static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
-{
- struct icside_state *state = ec->irq_data;
-
- readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
-}
-
-static const expansioncard_ops_t icside_ops_arcin_v5 = {
- .irqenable = icside_irqenable_arcin_v5,
- .irqdisable = icside_irqdisable_arcin_v5,
-};
-
-
-/* ---------------- Version 6 PCB Support Functions --------------------- */
-/* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
- * Purpose : enable interrupts from card
- */
-static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
-{
- struct icside_state *state = ec->irq_data;
- void __iomem *base = state->irq_port;
-
- state->enabled = 1;
-
- switch (state->channel) {
- case 0:
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
- readb(base + ICS_ARCIN_V6_INTROFFSET_2);
- break;
- case 1:
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
- readb(base + ICS_ARCIN_V6_INTROFFSET_1);
- break;
- }
-}
-
-/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
- * Purpose : disable interrupts from card
- */
-static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
-{
- struct icside_state *state = ec->irq_data;
-
- state->enabled = 0;
-
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
-}
-
-/* Prototype: icside_irqprobe(struct expansion_card *ec)
- * Purpose : detect an active interrupt from card
- */
-static int icside_irqpending_arcin_v6(struct expansion_card *ec)
-{
- struct icside_state *state = ec->irq_data;
-
- return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
- readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
-}
-
-static const expansioncard_ops_t icside_ops_arcin_v6 = {
- .irqenable = icside_irqenable_arcin_v6,
- .irqdisable = icside_irqdisable_arcin_v6,
- .irqpending = icside_irqpending_arcin_v6,
-};
-
-/*
- * Handle routing of interrupts. This is called before
- * we write the command to the drive.
- */
-static void icside_maskproc(ide_drive_t *drive, int mask)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
- struct icside_state *state = ecard_get_drvdata(ec);
- unsigned long flags;
-
- local_irq_save(flags);
-
- state->channel = hwif->channel;
-
- if (state->enabled && !mask) {
- switch (hwif->channel) {
- case 0:
- writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- break;
- case 1:
- writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- break;
- }
- } else {
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- }
-
- local_irq_restore(flags);
-}
-
-static const struct ide_port_ops icside_v6_no_dma_port_ops = {
- .maskproc = icside_maskproc,
-};
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
-/*
- * SG-DMA support.
- *
- * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
- * There is only one DMA controller per card, which means that only
- * one drive can be accessed at one time. NOTE! We do not enforce that
- * here, but we rely on the main IDE driver spotting that both
- * interfaces use the same IRQ, which should guarantee this.
- */
-
-/*
- * Configure the IOMD to give the appropriate timings for the transfer
- * mode being requested. We take the advice of the ATA standards, and
- * calculate the cycle time based on the transfer mode, and the EIDE
- * MW DMA specs that the drive provides in the IDENTIFY command.
- *
- * We have the following IOMD DMA modes to choose from:
- *
- * Type Active Recovery Cycle
- * A 250 (250) 312 (550) 562 (800)
- * B 187 250 437
- * C 125 (125) 125 (375) 250 (500)
- * D 62 125 187
- *
- * (figures in brackets are actual measured timings)
- *
- * However, we also need to take care of the read/write active and
- * recovery timings:
- *
- * Read Write
- * Mode Active -- Recovery -- Cycle IOMD type
- * MW0 215 50 215 480 A
- * MW1 80 50 50 150 C
- * MW2 70 25 25 120 C
- */
-static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long cycle_time = 0;
- int use_dma_info = 0;
- const u8 xfer_mode = drive->dma_mode;
-
- switch (xfer_mode) {
- case XFER_MW_DMA_2:
- cycle_time = 250;
- use_dma_info = 1;
- break;
-
- case XFER_MW_DMA_1:
- cycle_time = 250;
- use_dma_info = 1;
- break;
-
- case XFER_MW_DMA_0:
- cycle_time = 480;
- break;
-
- case XFER_SW_DMA_2:
- case XFER_SW_DMA_1:
- case XFER_SW_DMA_0:
- cycle_time = 480;
- break;
- }
-
- /*
- * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
- * take care to note the values in the ID...
- */
- if (use_dma_info && drive->id[ATA_ID_EIDE_DMA_TIME] > cycle_time)
- cycle_time = drive->id[ATA_ID_EIDE_DMA_TIME];
-
- ide_set_drivedata(drive, (void *)cycle_time);
-
- printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
- drive->name, ide_xfer_verbose(xfer_mode),
- 2000 / (cycle_time ? cycle_time : (unsigned long) -1));
-}
-
-static const struct ide_port_ops icside_v6_port_ops = {
- .set_dma_mode = icside_set_dma_mode,
- .maskproc = icside_maskproc,
-};
-
-static void icside_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static int icside_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
-
- disable_dma(ec->dma);
-
- return get_dma_residue(ec->dma) != 0;
-}
-
-static void icside_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
-
- /* We can not enable DMA on both channels simultaneously. */
- BUG_ON(dma_channel_active(ec->dma));
- enable_dma(ec->dma);
-}
-
-static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
- struct icside_state *state = ecard_get_drvdata(ec);
- unsigned int dma_mode;
-
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- dma_mode = DMA_MODE_WRITE;
- else
- dma_mode = DMA_MODE_READ;
-
- /*
- * We can not enable DMA on both channels.
- */
- BUG_ON(dma_channel_active(ec->dma));
-
- /*
- * Ensure that we have the right interrupt routed.
- */
- icside_maskproc(drive, 0);
-
- /*
- * Route the DMA signals to the correct interface.
- */
- writeb(state->sel | hwif->channel, state->ioc_base);
-
- /*
- * Select the correct timing for this drive.
- */
- set_dma_speed(ec->dma, (unsigned long)ide_get_drivedata(drive));
-
- /*
- * Tell the DMA engine about the SG table and
- * data direction.
- */
- set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents);
- set_dma_mode(ec->dma, dma_mode);
-
- return 0;
-}
-
-static int icside_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
- struct icside_state *state = ecard_get_drvdata(ec);
-
- return readb(state->irq_port +
- (hwif->channel ?
- ICS_ARCIN_V6_INTRSTAT_2 :
- ICS_ARCIN_V6_INTRSTAT_1)) & 1;
-}
-
-static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- hwif->dmatable_cpu = NULL;
- hwif->dmatable_dma = 0;
-
- return 0;
-}
-
-static const struct ide_dma_ops icside_v6_dma_ops = {
- .dma_host_set = icside_dma_host_set,
- .dma_setup = icside_dma_setup,
- .dma_start = icside_dma_start,
- .dma_end = icside_dma_end,
- .dma_test_irq = icside_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
-};
-#endif
-
-static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- return -EOPNOTSUPP;
-}
-
-static void icside_setup_ports(struct ide_hw *hw, void __iomem *base,
- struct cardinfo *info, struct expansion_card *ec)
-{
- unsigned long port = (unsigned long)base + info->dataoffset;
-
- hw->io_ports.data_addr = port;
- hw->io_ports.error_addr = port + (1 << info->stepping);
- hw->io_ports.nsect_addr = port + (2 << info->stepping);
- hw->io_ports.lbal_addr = port + (3 << info->stepping);
- hw->io_ports.lbam_addr = port + (4 << info->stepping);
- hw->io_ports.lbah_addr = port + (5 << info->stepping);
- hw->io_ports.device_addr = port + (6 << info->stepping);
- hw->io_ports.status_addr = port + (7 << info->stepping);
- hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset;
-
- hw->irq = ec->irq;
- hw->dev = &ec->dev;
-}
-
-static const struct ide_port_info icside_v5_port_info = {
- .host_flags = IDE_HFLAG_NO_DMA,
- .chipset = ide_acorn,
-};
-
-static int icside_register_v5(struct icside_state *state,
- struct expansion_card *ec)
-{
- void __iomem *base;
- struct ide_host *host;
- struct ide_hw hw, *hws[] = { &hw };
- int ret;
-
- base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
- if (!base)
- return -ENOMEM;
-
- state->irq_port = base;
-
- ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
- ec->irqmask = 1;
-
- ecard_setirq(ec, &icside_ops_arcin_v5, state);
-
- /*
- * Be on the safe side - disable interrupts
- */
- icside_irqdisable_arcin_v5(ec, 0);
-
- icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
-
- host = ide_host_alloc(&icside_v5_port_info, hws, 1);
- if (host == NULL)
- return -ENODEV;
-
- state->host = host;
-
- ecard_set_drvdata(ec, state);
-
- ret = ide_host_register(host, &icside_v5_port_info, hws);
- if (ret)
- goto err_free;
-
- return 0;
-err_free:
- ide_host_free(host);
- ecard_set_drvdata(ec, NULL);
- return ret;
-}
-
-static const struct ide_port_info icside_v6_port_info = {
- .init_dma = icside_dma_off_init,
- .port_ops = &icside_v6_no_dma_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
- .mwdma_mask = ATA_MWDMA2,
- .swdma_mask = ATA_SWDMA2,
- .chipset = ide_acorn,
-};
-
-static int icside_register_v6(struct icside_state *state,
- struct expansion_card *ec)
-{
- void __iomem *ioc_base, *easi_base;
- struct ide_host *host;
- unsigned int sel = 0;
- int ret;
- struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] };
- struct ide_port_info d = icside_v6_port_info;
-
- ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
- if (!ioc_base) {
- ret = -ENOMEM;
- goto out;
- }
-
- easi_base = ioc_base;
-
- if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
- easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
- if (!easi_base) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Enable access to the EASI region.
- */
- sel = 1 << 5;
- }
-
- writeb(sel, ioc_base);
-
- ecard_setirq(ec, &icside_ops_arcin_v6, state);
-
- state->irq_port = easi_base;
- state->ioc_base = ioc_base;
- state->sel = sel;
-
- /*
- * Be on the safe side - disable interrupts
- */
- icside_irqdisable_arcin_v6(ec, 0);
-
- icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
- icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
-
- host = ide_host_alloc(&d, hws, 2);
- if (host == NULL)
- return -ENODEV;
-
- state->host = host;
-
- ecard_set_drvdata(ec, state);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
- if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
- d.init_dma = icside_dma_init;
- d.port_ops = &icside_v6_port_ops;
- d.dma_ops = &icside_v6_dma_ops;
- }
-#endif
-
- ret = ide_host_register(host, &d, hws);
- if (ret)
- goto err_free;
-
- return 0;
-err_free:
- ide_host_free(host);
- if (d.dma_ops)
- free_dma(ec->dma);
- ecard_set_drvdata(ec, NULL);
-out:
- return ret;
-}
-
-static int icside_probe(struct expansion_card *ec, const struct ecard_id *id)
-{
- struct icside_state *state;
- void __iomem *idmem;
- int ret;
-
- ret = ecard_request_resources(ec);
- if (ret)
- goto out;
-
- state = kzalloc(sizeof(struct icside_state), GFP_KERNEL);
- if (!state) {
- ret = -ENOMEM;
- goto release;
- }
-
- state->type = ICS_TYPE_NOTYPE;
-
- idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
- if (idmem) {
- unsigned int type;
-
- type = readb(idmem + ICS_IDENT_OFFSET) & 1;
- type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
- type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
- type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
- ecardm_iounmap(ec, idmem);
-
- state->type = type;
- }
-
- switch (state->type) {
- case ICS_TYPE_A3IN:
- dev_warn(&ec->dev, "A3IN unsupported\n");
- ret = -ENODEV;
- break;
-
- case ICS_TYPE_A3USER:
- dev_warn(&ec->dev, "A3USER unsupported\n");
- ret = -ENODEV;
- break;
-
- case ICS_TYPE_V5:
- ret = icside_register_v5(state, ec);
- break;
-
- case ICS_TYPE_V6:
- ret = icside_register_v6(state, ec);
- break;
-
- default:
- dev_warn(&ec->dev, "unknown interface type\n");
- ret = -ENODEV;
- break;
- }
-
- if (ret == 0)
- goto out;
-
- kfree(state);
- release:
- ecard_release_resources(ec);
- out:
- return ret;
-}
-
-static void icside_remove(struct expansion_card *ec)
-{
- struct icside_state *state = ecard_get_drvdata(ec);
-
- switch (state->type) {
- case ICS_TYPE_V5:
- /* FIXME: tell IDE to stop using the interface */
-
- /* Disable interrupts */
- icside_irqdisable_arcin_v5(ec, 0);
- break;
-
- case ICS_TYPE_V6:
- /* FIXME: tell IDE to stop using the interface */
- if (ec->dma != NO_DMA)
- free_dma(ec->dma);
-
- /* Disable interrupts */
- icside_irqdisable_arcin_v6(ec, 0);
-
- /* Reset the ROM pointer/EASI selection */
- writeb(0, state->ioc_base);
- break;
- }
-
- ecard_set_drvdata(ec, NULL);
-
- kfree(state);
- ecard_release_resources(ec);
-}
-
-static void icside_shutdown(struct expansion_card *ec)
-{
- struct icside_state *state = ecard_get_drvdata(ec);
- unsigned long flags;
-
- /*
- * Disable interrupts from this card. We need to do
- * this before disabling EASI since we may be accessing
- * this register via that region.
- */
- local_irq_save(flags);
- ec->ops->irqdisable(ec, 0);
- local_irq_restore(flags);
-
- /*
- * Reset the ROM pointer so that we can read the ROM
- * after a soft reboot. This also disables access to
- * the IDE taskfile via the EASI region.
- */
- if (state->ioc_base)
- writeb(0, state->ioc_base);
-}
-
-static const struct ecard_id icside_ids[] = {
- { MANU_ICS, PROD_ICS_IDE },
- { MANU_ICS2, PROD_ICS2_IDE },
- { 0xffff, 0xffff }
-};
-
-static struct ecard_driver icside_driver = {
- .probe = icside_probe,
- .remove = icside_remove,
- .shutdown = icside_shutdown,
- .id_table = icside_ids,
- .drv = {
- .name = "icside",
- },
-};
-
-static int __init icside_init(void)
-{
- return ecard_register_driver(&icside_driver);
-}
-
-static void __exit icside_exit(void)
-{
- ecard_remove_driver(&icside_driver);
-}
-
-MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("ICS IDE driver");
-
-module_init(icside_init);
-module_exit(icside_exit);
diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c
deleted file mode 100644
index 06c6215e0cbe..000000000000
--- a/drivers/ide/ide-4drives.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "ide-4drives"
-
-static bool probe_4drives;
-
-module_param_named(probe, probe_4drives, bool, 0);
-MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
-
-static void ide_4drives_init_dev(ide_drive_t *drive)
-{
- if (drive->hwif->channel)
- drive->select ^= 0x20;
-}
-
-static const struct ide_port_ops ide_4drives_port_ops = {
- .init_dev = ide_4drives_init_dev,
-};
-
-static const struct ide_port_info ide_4drives_port_info = {
- .port_ops = &ide_4drives_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA |
- IDE_HFLAG_4DRIVES,
- .chipset = ide_4drives,
-};
-
-static int __init ide_4drives_init(void)
-{
- unsigned long base = 0x1f0, ctl = 0x3f6;
- struct ide_hw hw, *hws[] = { &hw, &hw };
-
- if (probe_4drives == 0)
- return -ENODEV;
-
- if (!request_region(base, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- DRV_NAME, base, base + 7);
- return -EBUSY;
- }
-
- if (!request_region(ctl, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- DRV_NAME, ctl);
- release_region(base, 8);
- return -EBUSY;
- }
-
- memset(&hw, 0, sizeof(hw));
-
- ide_std_init_ports(&hw, base, ctl);
- hw.irq = 14;
-
- return ide_host_add(&ide_4drives_port_info, hws, 2, NULL);
-}
-
-module_init(ide_4drives_init);
-
-MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("generic IDE chipset with 4 drives/port support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
deleted file mode 100644
index 05e18d658141..000000000000
--- a/drivers/ide/ide-acpi.c
+++ /dev/null
@@ -1,622 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Provides ACPI support for IDE drives.
- *
- * Copyright (C) 2005 Intel Corp.
- * Copyright (C) 2005 Randy Dunlap
- * Copyright (C) 2006 SUSE Linux Products GmbH
- * Copyright (C) 2006 Hannes Reinecke
- */
-
-#include <linux/acpi.h>
-#include <linux/ata.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/ide.h>
-#include <linux/pci.h>
-#include <linux/dmi.h>
-#include <linux/module.h>
-
-#define REGS_PER_GTF 7
-
-struct GTM_buffer {
- u32 PIO_speed0;
- u32 DMA_speed0;
- u32 PIO_speed1;
- u32 DMA_speed1;
- u32 GTM_flags;
-};
-
-struct ide_acpi_drive_link {
- acpi_handle obj_handle;
- u8 idbuff[512];
-};
-
-struct ide_acpi_hwif_link {
- ide_hwif_t *hwif;
- acpi_handle obj_handle;
- struct GTM_buffer gtm;
- struct ide_acpi_drive_link master;
- struct ide_acpi_drive_link slave;
-};
-
-#undef DEBUGGING
-/* note: adds function name and KERN_DEBUG */
-#ifdef DEBUGGING
-#define DEBPRINT(fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
-#else
-#define DEBPRINT(fmt, args...) do {} while (0)
-#endif /* DEBUGGING */
-
-static bool ide_noacpi;
-module_param_named(noacpi, ide_noacpi, bool, 0);
-MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
-
-static bool ide_acpigtf;
-module_param_named(acpigtf, ide_acpigtf, bool, 0);
-MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
-
-static bool ide_acpionboot;
-module_param_named(acpionboot, ide_acpionboot, bool, 0);
-MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
-
-static bool ide_noacpi_psx;
-static int no_acpi_psx(const struct dmi_system_id *id)
-{
- ide_noacpi_psx = true;
- printk(KERN_NOTICE"%s detected - disable ACPI _PSx.\n", id->ident);
- return 0;
-}
-
-static const struct dmi_system_id ide_acpi_dmi_table[] = {
- /* Bug 9673. */
- /* We should check if this is because ACPI NVS isn't save/restored. */
- {
- .callback = no_acpi_psx,
- .ident = "HP nx9005",
- .matches = {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies Ltd."),
- DMI_MATCH(DMI_BIOS_VERSION, "KAM1.60")
- },
- },
-
- { } /* terminate list */
-};
-
-int ide_acpi_init(void)
-{
- dmi_check_system(ide_acpi_dmi_table);
- return 0;
-}
-
-bool ide_port_acpi(ide_hwif_t *hwif)
-{
- return ide_noacpi == 0 && hwif->acpidata;
-}
-
-static acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
-{
- struct acpi_device *adev;
-
- if (!handle || acpi_bus_get_device(handle, &adev))
- return NULL;
-
- adev = acpi_find_child_device(adev, addr, false);
- return adev ? adev->handle : NULL;
-}
-
-/**
- * ide_get_dev_handle - finds acpi_handle and PCI device.function
- * @dev: device to locate
- * @handle: returned acpi_handle for @dev
- * @pcidevfn: return PCI device.func for @dev
- *
- * Returns the ACPI object handle to the corresponding PCI device.
- *
- * Returns 0 on success, <0 on error.
- */
-static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
- u64 *pcidevfn)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- unsigned int bus, devnum, func;
- u64 addr;
- acpi_handle dev_handle;
- acpi_status status;
- struct acpi_device_info *dinfo = NULL;
- int ret = -ENODEV;
-
- bus = pdev->bus->number;
- devnum = PCI_SLOT(pdev->devfn);
- func = PCI_FUNC(pdev->devfn);
- /* ACPI _ADR encoding for PCI bus: */
- addr = (u64)(devnum << 16 | func);
-
- DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func);
-
- dev_handle = ACPI_HANDLE(dev);
- if (!dev_handle) {
- DEBPRINT("no acpi handle for device\n");
- goto err;
- }
-
- status = acpi_get_object_info(dev_handle, &dinfo);
- if (ACPI_FAILURE(status)) {
- DEBPRINT("get_object_info for device failed\n");
- goto err;
- }
- if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
- dinfo->address == addr) {
- *pcidevfn = addr;
- *handle = dev_handle;
- } else {
- DEBPRINT("get_object_info for device has wrong "
- " address: %llu, should be %u\n",
- dinfo ? (unsigned long long)dinfo->address : -1ULL,
- (unsigned int)addr);
- goto err;
- }
-
- DEBPRINT("for dev=0x%x.%x, addr=0x%llx, *handle=0x%p\n",
- devnum, func, (unsigned long long)addr, *handle);
- ret = 0;
-err:
- kfree(dinfo);
- return ret;
-}
-
-/**
- * ide_acpi_hwif_get_handle - Get ACPI object handle for a given hwif
- * @hwif: device to locate
- *
- * Retrieves the object handle for a given hwif.
- *
- * Returns handle on success, 0 on error.
- */
-static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
-{
- struct device *dev = hwif->gendev.parent;
- acpi_handle dev_handle;
- u64 pcidevfn;
- acpi_handle chan_handle;
- int err;
-
- DEBPRINT("ENTER: device %s\n", hwif->name);
-
- if (!dev) {
- DEBPRINT("no PCI device for %s\n", hwif->name);
- return NULL;
- }
-
- err = ide_get_dev_handle(dev, &dev_handle, &pcidevfn);
- if (err < 0) {
- DEBPRINT("ide_get_dev_handle failed (%d)\n", err);
- return NULL;
- }
-
- /* get child objects of dev_handle == channel objects,
- * + _their_ children == drive objects */
- /* channel is hwif->channel */
- chan_handle = acpi_get_child(dev_handle, hwif->channel);
- DEBPRINT("chan adr=%d: handle=0x%p\n",
- hwif->channel, chan_handle);
-
- return chan_handle;
-}
-
-/**
- * do_drive_get_GTF - get the drive bootup default taskfile settings
- * @drive: the drive for which the taskfile settings should be retrieved
- * @gtf_length: number of bytes of _GTF data returned at @gtf_address
- * @gtf_address: buffer containing _GTF taskfile arrays
- *
- * The _GTF method has no input parameters.
- * It returns a variable number of register set values (registers
- * hex 1F1..1F7, taskfiles).
- * The <variable number> is not known in advance, so have ACPI-CA
- * allocate the buffer as needed and return it, then free it later.
- *
- * The returned @gtf_length and @gtf_address are only valid if the
- * function return value is 0.
- */
-static int do_drive_get_GTF(ide_drive_t *drive,
- unsigned int *gtf_length, unsigned long *gtf_address,
- unsigned long *obj_loc)
-{
- acpi_status status;
- struct acpi_buffer output;
- union acpi_object *out_obj;
- int err = -ENODEV;
-
- *gtf_length = 0;
- *gtf_address = 0UL;
- *obj_loc = 0UL;
-
- if (!drive->acpidata->obj_handle) {
- DEBPRINT("No ACPI object found for %s\n", drive->name);
- goto out;
- }
-
- /* Setting up output buffer */
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
-
- /* _GTF has no input parameters */
- err = -EIO;
- status = acpi_evaluate_object(drive->acpidata->obj_handle, "_GTF",
- NULL, &output);
- if (ACPI_FAILURE(status)) {
- printk(KERN_DEBUG
- "%s: Run _GTF error: status = 0x%x\n",
- __func__, status);
- goto out;
- }
-
- if (!output.length || !output.pointer) {
- DEBPRINT("Run _GTF: "
- "length or ptr is NULL (0x%llx, 0x%p)\n",
- (unsigned long long)output.length,
- output.pointer);
- goto out;
- }
-
- out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER) {
- DEBPRINT("Run _GTF: error: "
- "expected object type of ACPI_TYPE_BUFFER, "
- "got 0x%x\n", out_obj->type);
- err = -ENOENT;
- kfree(output.pointer);
- goto out;
- }
-
- if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
- out_obj->buffer.length % REGS_PER_GTF) {
- printk(KERN_ERR
- "%s: unexpected GTF length (%d) or addr (0x%p)\n",
- __func__, out_obj->buffer.length,
- out_obj->buffer.pointer);
- err = -ENOENT;
- kfree(output.pointer);
- goto out;
- }
-
- *gtf_length = out_obj->buffer.length;
- *gtf_address = (unsigned long)out_obj->buffer.pointer;
- *obj_loc = (unsigned long)out_obj;
- DEBPRINT("returning gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n",
- *gtf_length, *gtf_address, *obj_loc);
- err = 0;
-out:
- return err;
-}
-
-/**
- * do_drive_set_taskfiles - write the drive taskfile settings from _GTF
- * @drive: the drive to which the taskfile command should be sent
- * @gtf_length: total number of bytes of _GTF taskfiles
- * @gtf_address: location of _GTF taskfile arrays
- *
- * Write {gtf_address, length gtf_length} in groups of
- * REGS_PER_GTF bytes.
- */
-static int do_drive_set_taskfiles(ide_drive_t *drive,
- unsigned int gtf_length,
- unsigned long gtf_address)
-{
- int rc = 0, err;
- int gtf_count = gtf_length / REGS_PER_GTF;
- int ix;
-
- DEBPRINT("total GTF bytes=%u (0x%x), gtf_count=%d, addr=0x%lx\n",
- gtf_length, gtf_length, gtf_count, gtf_address);
-
- /* send all taskfile registers (0x1f1-0x1f7) *in*that*order* */
- for (ix = 0; ix < gtf_count; ix++) {
- u8 *gtf = (u8 *)(gtf_address + ix * REGS_PER_GTF);
- struct ide_cmd cmd;
-
- DEBPRINT("(0x1f1-1f7): "
- "hex: %02x %02x %02x %02x %02x %02x %02x\n",
- gtf[0], gtf[1], gtf[2],
- gtf[3], gtf[4], gtf[5], gtf[6]);
-
- if (!ide_acpigtf) {
- DEBPRINT("_GTF execution disabled\n");
- continue;
- }
-
- /* convert GTF to taskfile */
- memset(&cmd, 0, sizeof(cmd));
- memcpy(&cmd.tf.feature, gtf, REGS_PER_GTF);
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- err = ide_no_data_taskfile(drive, &cmd);
- if (err) {
- printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
- __func__, err);
- rc = err;
- }
- }
-
- return rc;
-}
-
-/**
- * ide_acpi_exec_tfs - get then write drive taskfile settings
- * @drive: the drive for which the taskfile settings should be
- * written.
- *
- * According to the ACPI spec this should be called after _STM
- * has been evaluated for the interface. Some ACPI vendors interpret
- * that as a hard requirement and modify the taskfile according
- * to the Identify Drive information passed down with _STM.
- * So one should really make sure to call this only after _STM has
- * been executed.
- */
-int ide_acpi_exec_tfs(ide_drive_t *drive)
-{
- int ret;
- unsigned int gtf_length;
- unsigned long gtf_address;
- unsigned long obj_loc;
-
- DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
-
- ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
- if (ret < 0) {
- DEBPRINT("get_GTF error (%d)\n", ret);
- return ret;
- }
-
- DEBPRINT("call set_taskfiles, drive=%s\n", drive->name);
-
- ret = do_drive_set_taskfiles(drive, gtf_length, gtf_address);
- kfree((void *)obj_loc);
- if (ret < 0) {
- DEBPRINT("set_taskfiles error (%d)\n", ret);
- }
-
- DEBPRINT("ret=%d\n", ret);
-
- return ret;
-}
-
-/**
- * ide_acpi_get_timing - get the channel (controller) timings
- * @hwif: target IDE interface (channel)
- *
- * This function executes the _GTM ACPI method for the target channel.
- *
- */
-void ide_acpi_get_timing(ide_hwif_t *hwif)
-{
- acpi_status status;
- struct acpi_buffer output;
- union acpi_object *out_obj;
-
- /* Setting up output buffer for _GTM */
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
-
- /* _GTM has no input parameters */
- status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_GTM",
- NULL, &output);
-
- DEBPRINT("_GTM status: %d, outptr: 0x%p, outlen: 0x%llx\n",
- status, output.pointer,
- (unsigned long long)output.length);
-
- if (ACPI_FAILURE(status)) {
- DEBPRINT("Run _GTM error: status = 0x%x\n", status);
- return;
- }
-
- if (!output.length || !output.pointer) {
- DEBPRINT("Run _GTM: length or ptr is NULL (0x%llx, 0x%p)\n",
- (unsigned long long)output.length,
- output.pointer);
- kfree(output.pointer);
- return;
- }
-
- out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER) {
- DEBPRINT("Run _GTM: error: "
- "expected object type of ACPI_TYPE_BUFFER, "
- "got 0x%x\n", out_obj->type);
- kfree(output.pointer);
- return;
- }
-
- if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
- out_obj->buffer.length != sizeof(struct GTM_buffer)) {
- printk(KERN_ERR
- "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
- "addr (0x%p)\n",
- __func__, out_obj->buffer.length,
- sizeof(struct GTM_buffer), out_obj->buffer.pointer);
- kfree(output.pointer);
- return;
- }
-
- memcpy(&hwif->acpidata->gtm, out_obj->buffer.pointer,
- sizeof(struct GTM_buffer));
-
- DEBPRINT("_GTM info: ptr: 0x%p, len: 0x%x, exp.len: 0x%zx\n",
- out_obj->buffer.pointer, out_obj->buffer.length,
- sizeof(struct GTM_buffer));
-
- DEBPRINT("_GTM fields: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
- hwif->acpidata->gtm.PIO_speed0,
- hwif->acpidata->gtm.DMA_speed0,
- hwif->acpidata->gtm.PIO_speed1,
- hwif->acpidata->gtm.DMA_speed1,
- hwif->acpidata->gtm.GTM_flags);
-
- kfree(output.pointer);
-}
-
-/**
- * ide_acpi_push_timing - set the channel (controller) timings
- * @hwif: target IDE interface (channel)
- *
- * This function executes the _STM ACPI method for the target channel.
- *
- * _STM requires Identify Drive data, which has to passed as an argument.
- * Unfortunately drive->id is a mangled version which we can't readily
- * use; hence we'll get the information afresh.
- */
-void ide_acpi_push_timing(ide_hwif_t *hwif)
-{
- acpi_status status;
- struct acpi_object_list input;
- union acpi_object in_params[3];
- struct ide_acpi_drive_link *master = &hwif->acpidata->master;
- struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
-
- /* Give the GTM buffer + drive Identify data to the channel via the
- * _STM method: */
- /* setup input parameters buffer for _STM */
- input.count = 3;
- input.pointer = in_params;
- in_params[0].type = ACPI_TYPE_BUFFER;
- in_params[0].buffer.length = sizeof(struct GTM_buffer);
- in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm;
- in_params[1].type = ACPI_TYPE_BUFFER;
- in_params[1].buffer.length = ATA_ID_WORDS * 2;
- in_params[1].buffer.pointer = (u8 *)&master->idbuff;
- in_params[2].type = ACPI_TYPE_BUFFER;
- in_params[2].buffer.length = ATA_ID_WORDS * 2;
- in_params[2].buffer.pointer = (u8 *)&slave->idbuff;
- /* Output buffer: _STM has no output */
-
- status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_STM",
- &input, NULL);
-
- if (ACPI_FAILURE(status)) {
- DEBPRINT("Run _STM error: status = 0x%x\n", status);
- }
- DEBPRINT("_STM status: %d\n", status);
-}
-
-/**
- * ide_acpi_set_state - set the channel power state
- * @hwif: target IDE interface
- * @on: state, on/off
- *
- * This function executes the _PS0/_PS3 ACPI method to set the power state.
- * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
- */
-void ide_acpi_set_state(ide_hwif_t *hwif, int on)
-{
- ide_drive_t *drive;
- int i;
-
- if (ide_noacpi_psx)
- return;
-
- DEBPRINT("ENTER:\n");
-
- /* channel first and then drives for power on and verse versa for power off */
- if (on)
- acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- if (drive->acpidata->obj_handle)
- acpi_bus_set_power(drive->acpidata->obj_handle,
- on ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
- }
-
- if (!on)
- acpi_bus_set_power(hwif->acpidata->obj_handle,
- ACPI_STATE_D3_COLD);
-}
-
-/**
- * ide_acpi_init_port - initialize the ACPI link for an IDE interface
- * @hwif: target IDE interface (channel)
- *
- * The ACPI spec is not quite clear when the drive identify buffer
- * should be obtained. Calling IDENTIFY DEVICE during shutdown
- * is not the best of ideas as the drive might already being put to
- * sleep. And obviously we can't call it during resume.
- * So we get the information during startup; but this means that
- * any changes during run-time will be lost after resume.
- */
-void ide_acpi_init_port(ide_hwif_t *hwif)
-{
- hwif->acpidata = kzalloc(sizeof(struct ide_acpi_hwif_link), GFP_KERNEL);
- if (!hwif->acpidata)
- return;
-
- hwif->acpidata->obj_handle = ide_acpi_hwif_get_handle(hwif);
- if (!hwif->acpidata->obj_handle) {
- DEBPRINT("no ACPI object for %s found\n", hwif->name);
- kfree(hwif->acpidata);
- hwif->acpidata = NULL;
- }
-}
-
-void ide_acpi_port_init_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i, err;
-
- if (hwif->acpidata == NULL)
- return;
-
- /*
- * The ACPI spec mandates that we send information
- * for both drives, regardless whether they are connected
- * or not.
- */
- hwif->devices[0]->acpidata = &hwif->acpidata->master;
- hwif->devices[1]->acpidata = &hwif->acpidata->slave;
-
- /* get _ADR info for each device */
- ide_port_for_each_present_dev(i, drive, hwif) {
- acpi_handle dev_handle;
-
- DEBPRINT("ENTER: %s at channel#: %d port#: %d\n",
- drive->name, hwif->channel, drive->dn & 1);
-
- /* TBD: could also check ACPI object VALID bits */
- dev_handle = acpi_get_child(hwif->acpidata->obj_handle,
- drive->dn & 1);
-
- DEBPRINT("drive %s handle 0x%p\n", drive->name, dev_handle);
-
- drive->acpidata->obj_handle = dev_handle;
- }
-
- /* send IDENTIFY for each device */
- ide_port_for_each_present_dev(i, drive, hwif) {
- err = taskfile_lib_get_identify(drive, drive->acpidata->idbuff);
- if (err)
- DEBPRINT("identify device %s failed (%d)\n",
- drive->name, err);
- }
-
- if (ide_noacpi || ide_acpionboot == 0) {
- DEBPRINT("ACPI methods disabled on boot\n");
- return;
- }
-
- /* ACPI _PS0 before _STM */
- ide_acpi_set_state(hwif, 1);
- /*
- * ACPI requires us to call _STM on startup
- */
- ide_acpi_get_timing(hwif);
- ide_acpi_push_timing(hwif);
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- ide_acpi_exec_tfs(drive);
- }
-}
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
deleted file mode 100644
index a1ce9f5ac3aa..000000000000
--- a/drivers/ide/ide-atapi.c
+++ /dev/null
@@ -1,756 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ATAPI support.
- */
-
-#include <linux/kernel.h>
-#include <linux/cdrom.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/scatterlist.h>
-#include <linux/gfp.h>
-
-#include <scsi/scsi.h>
-
-#define DRV_NAME "ide-atapi"
-#define PFX DRV_NAME ": "
-
-#ifdef DEBUG
-#define debug_log(fmt, args...) \
- printk(KERN_INFO "ide: " fmt, ## args)
-#else
-#define debug_log(fmt, args...) do {} while (0)
-#endif
-
-#define ATAPI_MIN_CDB_BYTES 12
-
-static inline int dev_is_idecd(ide_drive_t *drive)
-{
- return drive->media == ide_cdrom || drive->media == ide_optical;
-}
-
-/*
- * Check whether we can support a device,
- * based on the ATAPI IDENTIFY command results.
- */
-int ide_check_atapi_device(ide_drive_t *drive, const char *s)
-{
- u16 *id = drive->id;
- u8 gcw[2], protocol, device_type, removable, drq_type, packet_size;
-
- *((u16 *)&gcw) = id[ATA_ID_CONFIG];
-
- protocol = (gcw[1] & 0xC0) >> 6;
- device_type = gcw[1] & 0x1F;
- removable = (gcw[0] & 0x80) >> 7;
- drq_type = (gcw[0] & 0x60) >> 5;
- packet_size = gcw[0] & 0x03;
-
-#ifdef CONFIG_PPC
- /* kludge for Apple PowerBook internal zip */
- if (drive->media == ide_floppy && device_type == 5 &&
- !strstr((char *)&id[ATA_ID_PROD], "CD-ROM") &&
- strstr((char *)&id[ATA_ID_PROD], "ZIP"))
- device_type = 0;
-#endif
-
- if (protocol != 2)
- printk(KERN_ERR "%s: %s: protocol (0x%02x) is not ATAPI\n",
- s, drive->name, protocol);
- else if ((drive->media == ide_floppy && device_type != 0) ||
- (drive->media == ide_tape && device_type != 1))
- printk(KERN_ERR "%s: %s: invalid device type (0x%02x)\n",
- s, drive->name, device_type);
- else if (removable == 0)
- printk(KERN_ERR "%s: %s: the removable flag is not set\n",
- s, drive->name);
- else if (drive->media == ide_floppy && drq_type == 3)
- printk(KERN_ERR "%s: %s: sorry, DRQ type (0x%02x) not "
- "supported\n", s, drive->name, drq_type);
- else if (packet_size != 0)
- printk(KERN_ERR "%s: %s: packet size (0x%02x) is not 12 "
- "bytes\n", s, drive->name, packet_size);
- else
- return 1;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_check_atapi_device);
-
-void ide_init_pc(struct ide_atapi_pc *pc)
-{
- memset(pc, 0, sizeof(*pc));
-}
-EXPORT_SYMBOL_GPL(ide_init_pc);
-
-/*
- * Add a special packet command request to the tail of the request queue,
- * and wait for it to be serviced.
- */
-int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
- struct ide_atapi_pc *pc, void *buf, unsigned int bufflen)
-{
- struct request *rq;
- int error;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- ide_req(rq)->special = pc;
-
- if (buf && bufflen) {
- error = blk_rq_map_kern(drive->queue, rq, buf, bufflen,
- GFP_NOIO);
- if (error)
- goto put_req;
- }
-
- memcpy(scsi_req(rq)->cmd, pc->c, 12);
- if (drive->media == ide_tape)
- scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
- blk_execute_rq(disk, rq, 0);
- error = scsi_req(rq)->result ? -EIO : 0;
-put_req:
- blk_put_request(rq);
- return error;
-}
-EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
-
-int ide_do_test_unit_ready(ide_drive_t *drive, struct gendisk *disk)
-{
- struct ide_atapi_pc pc;
-
- ide_init_pc(&pc);
- pc.c[0] = TEST_UNIT_READY;
-
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ide_do_test_unit_ready);
-
-int ide_do_start_stop(ide_drive_t *drive, struct gendisk *disk, int start)
-{
- struct ide_atapi_pc pc;
-
- ide_init_pc(&pc);
- pc.c[0] = START_STOP;
- pc.c[4] = start;
-
- if (drive->media == ide_tape)
- pc.flags |= PC_FLAG_WAIT_FOR_DSC;
-
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ide_do_start_stop);
-
-int ide_set_media_lock(ide_drive_t *drive, struct gendisk *disk, int on)
-{
- struct ide_atapi_pc pc;
-
- if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
- return 0;
-
- ide_init_pc(&pc);
- pc.c[0] = ALLOW_MEDIUM_REMOVAL;
- pc.c[4] = on;
-
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ide_set_media_lock);
-
-void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = REQUEST_SENSE;
- if (drive->media == ide_floppy) {
- pc->c[4] = 255;
- pc->req_xfer = 18;
- } else {
- pc->c[4] = 20;
- pc->req_xfer = 20;
- }
-}
-EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq)
-{
- struct request_sense *sense = &drive->sense_data;
- struct request *sense_rq;
- struct scsi_request *req;
- unsigned int cmd_len, sense_len;
- int err;
-
- switch (drive->media) {
- case ide_floppy:
- cmd_len = 255;
- sense_len = 18;
- break;
- case ide_tape:
- cmd_len = 20;
- sense_len = 20;
- break;
- default:
- cmd_len = 18;
- sense_len = 18;
- }
-
- BUG_ON(sense_len > sizeof(*sense));
-
- if (ata_sense_request(rq) || drive->sense_rq_armed)
- return;
-
- sense_rq = drive->sense_rq;
- if (!sense_rq) {
- sense_rq = blk_mq_alloc_request(drive->queue, REQ_OP_DRV_IN,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
- drive->sense_rq = sense_rq;
- }
- req = scsi_req(sense_rq);
-
- memset(sense, 0, sizeof(*sense));
-
- scsi_req_init(req);
-
- err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
- GFP_NOIO);
- if (unlikely(err)) {
- if (printk_ratelimit())
- printk(KERN_WARNING PFX "%s: failed to map sense "
- "buffer\n", drive->name);
- blk_mq_free_request(sense_rq);
- drive->sense_rq = NULL;
- return;
- }
-
- sense_rq->rq_disk = rq->rq_disk;
- sense_rq->cmd_flags = REQ_OP_DRV_IN;
- ide_req(sense_rq)->type = ATA_PRIV_SENSE;
-
- req->cmd[0] = GPCMD_REQUEST_SENSE;
- req->cmd[4] = cmd_len;
- if (drive->media == ide_tape)
- req->cmd[13] = REQ_IDETAPE_PC1;
-
- drive->sense_rq_armed = true;
-}
-EXPORT_SYMBOL_GPL(ide_prep_sense);
-
-int ide_queue_sense_rq(ide_drive_t *drive, void *special)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request *sense_rq;
- unsigned long flags;
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- /* deferred failure from ide_prep_sense() */
- if (!drive->sense_rq_armed) {
- printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
- drive->name);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return -ENOMEM;
- }
-
- sense_rq = drive->sense_rq;
- ide_req(sense_rq)->special = special;
- drive->sense_rq_armed = false;
-
- drive->hwif->rq = NULL;
-
- ide_insert_request_head(drive, sense_rq);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
-
-/*
- * Called when an error was detected during the last packet command.
- * We queue a request sense packet command at the head of the request
- * queue.
- */
-void ide_retry_pc(ide_drive_t *drive)
-{
- struct request *failed_rq = drive->hwif->rq;
- struct request *sense_rq = drive->sense_rq;
- struct ide_atapi_pc *pc = &drive->request_sense_pc;
-
- (void)ide_read_error(drive);
-
- /* init pc from sense_rq */
- ide_init_pc(pc);
- memcpy(pc->c, scsi_req(sense_rq)->cmd, 12);
-
- if (drive->media == ide_tape)
- drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
-
- /*
- * Push back the failed request and put request sense on top
- * of it. The failed command will be retried after sense data
- * is acquired.
- */
- drive->hwif->rq = NULL;
- ide_requeue_and_plug(drive, failed_rq);
- if (ide_queue_sense_rq(drive, pc))
- ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq));
-}
-EXPORT_SYMBOL_GPL(ide_retry_pc);
-
-int ide_cd_expiry(ide_drive_t *drive)
-{
- struct request *rq = drive->hwif->rq;
- unsigned long wait = 0;
-
- debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]);
-
- /*
- * Some commands are *slow* and normally take a long time to complete.
- * Usually we can use the ATAPI "disconnect" to bypass this, but not all
- * commands/drives support that. Let ide_timer_expiry keep polling us
- * for these.
- */
- switch (scsi_req(rq)->cmd[0]) {
- case GPCMD_BLANK:
- case GPCMD_FORMAT_UNIT:
- case GPCMD_RESERVE_RZONE_TRACK:
- case GPCMD_CLOSE_TRACK:
- case GPCMD_FLUSH_CACHE:
- wait = ATAPI_WAIT_PC;
- break;
- default:
- if (!(rq->rq_flags & RQF_QUIET))
- printk(KERN_INFO PFX "cmd 0x%x timed out\n",
- scsi_req(rq)->cmd[0]);
- wait = 0;
- break;
- }
- return wait;
-}
-EXPORT_SYMBOL_GPL(ide_cd_expiry);
-
-int ide_cd_get_xferlen(struct request *rq)
-{
- switch (req_op(rq)) {
- default:
- return 32768;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- return blk_rq_bytes(rq);
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- switch (ide_req(rq)->type) {
- case ATA_PRIV_PC:
- case ATA_PRIV_SENSE:
- return blk_rq_bytes(rq);
- default:
- return 0;
- }
- }
-}
-EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
-
-void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
-{
- struct ide_taskfile tf;
-
- drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT |
- IDE_VALID_LBAM | IDE_VALID_LBAH);
-
- *bcount = (tf.lbah << 8) | tf.lbam;
- *ireason = tf.nsect & 3;
-}
-EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
-
-/*
- * Check the contents of the interrupt reason register and attempt to recover if
- * there are problems.
- *
- * Returns:
- * - 0 if everything's ok
- * - 1 if the request has to be terminated.
- */
-int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
- int ireason, int rw)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- debug_log("ireason: 0x%x, rw: 0x%x\n", ireason, rw);
-
- if (ireason == (!rw << 1))
- return 0;
- else if (ireason == (rw << 1)) {
- printk(KERN_ERR PFX "%s: %s: wrong transfer direction!\n",
- drive->name, __func__);
-
- if (dev_is_idecd(drive))
- ide_pad_transfer(drive, rw, len);
- } else if (!rw && ireason == ATAPI_COD) {
- if (dev_is_idecd(drive)) {
- /*
- * Some drives (ASUS) seem to tell us that status info
- * is available. Just get it and ignore.
- */
- (void)hwif->tp_ops->read_status(hwif);
- return 0;
- }
- } else {
- if (ireason & ATAPI_COD)
- printk(KERN_ERR PFX "%s: CoD != 0 in %s\n", drive->name,
- __func__);
-
- /* drive wants a command packet, or invalid ireason... */
- printk(KERN_ERR PFX "%s: %s: bad interrupt reason 0x%02x\n",
- drive->name, __func__, ireason);
- }
-
- if (dev_is_idecd(drive) && ata_pc_request(rq))
- rq->rq_flags |= RQF_FAILED;
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(ide_check_ireason);
-
-/*
- * This is the usual interrupt handler which will be called during a packet
- * command. We will transfer some of the data (as requested by the drive)
- * and will re-point interrupt handler to us.
- */
-static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
-{
- struct ide_atapi_pc *pc = drive->pc;
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- struct request *rq = hwif->rq;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- unsigned int timeout, done;
- u16 bcount;
- u8 stat, ireason, dsc = 0;
- u8 write = !!(pc->flags & PC_FLAG_WRITING);
-
- debug_log("Enter %s - interrupt handler\n", __func__);
-
- timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
- : WAIT_TAPE_CMD;
-
- /* Clear the interrupt */
- stat = tp_ops->read_status(hwif);
-
- if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
- int rc;
-
- drive->waiting_for_dma = 0;
- rc = hwif->dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
-
- if (rc || (drive->media == ide_tape && (stat & ATA_ERR))) {
- if (drive->media == ide_floppy)
- printk(KERN_ERR PFX "%s: DMA %s error\n",
- drive->name, rq_data_dir(pc->rq)
- ? "write" : "read");
- pc->flags |= PC_FLAG_DMA_ERROR;
- } else
- scsi_req(rq)->resid_len = 0;
- debug_log("%s: DMA finished\n", drive->name);
- }
-
- /* No more interrupts */
- if ((stat & ATA_DRQ) == 0) {
- int uptodate;
- blk_status_t error;
-
- debug_log("Packet command completed, %d bytes transferred\n",
- blk_rq_bytes(rq));
-
- pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
-
- local_irq_enable_in_hardirq();
-
- if (drive->media == ide_tape &&
- (stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE)
- stat &= ~ATA_ERR;
-
- if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
- /* Error detected */
- debug_log("%s: I/O error\n", drive->name);
-
- if (drive->media != ide_tape)
- scsi_req(pc->rq)->result++;
-
- if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) {
- printk(KERN_ERR PFX "%s: I/O error in request "
- "sense command\n", drive->name);
- return ide_do_reset(drive);
- }
-
- debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]);
-
- /* Retry operation */
- ide_retry_pc(drive);
-
- /* queued, but not started */
- return ide_stopped;
- }
- pc->error = 0;
-
- if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0)
- dsc = 1;
-
- /*
- * ->pc_callback() might change rq->data_len for
- * residual count, cache total length.
- */
- done = blk_rq_bytes(rq);
-
- /* Command finished - Call the callback function */
- uptodate = drive->pc_callback(drive, dsc);
-
- if (uptodate == 0)
- drive->failed_pc = NULL;
-
- if (ata_misc_request(rq)) {
- scsi_req(rq)->result = 0;
- error = BLK_STS_OK;
- } else {
-
- if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
- if (scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- }
-
- error = uptodate ? BLK_STS_OK : BLK_STS_IOERR;
- }
-
- ide_complete_rq(drive, error, blk_rq_bytes(rq));
- return ide_stopped;
- }
-
- if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
- pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
- printk(KERN_ERR PFX "%s: The device wants to issue more "
- "interrupts in DMA mode\n", drive->name);
- ide_dma_off(drive);
- return ide_do_reset(drive);
- }
-
- /* Get the number of bytes to transfer on this interrupt. */
- ide_read_bcount_and_ireason(drive, &bcount, &ireason);
-
- if (ide_check_ireason(drive, rq, bcount, ireason, write))
- return ide_do_reset(drive);
-
- done = min_t(unsigned int, bcount, cmd->nleft);
- ide_pio_bytes(drive, cmd, write, done);
-
- /* Update transferred byte count */
- scsi_req(rq)->resid_len -= done;
-
- bcount -= done;
-
- if (bcount)
- ide_pad_transfer(drive, write, bcount);
-
- debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
- scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len);
-
- /* And set the interrupt handler again */
- ide_set_handler(drive, ide_pc_intr, timeout);
- return ide_started;
-}
-
-static void ide_init_packet_cmd(struct ide_cmd *cmd, u8 valid_tf,
- u16 bcount, u8 dma)
-{
- cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO;
- cmd->valid.out.tf = IDE_VALID_LBAH | IDE_VALID_LBAM |
- IDE_VALID_FEATURE | valid_tf;
- cmd->tf.command = ATA_CMD_PACKET;
- cmd->tf.feature = dma; /* Use PIO/DMA */
- cmd->tf.lbam = bcount & 0xff;
- cmd->tf.lbah = (bcount >> 8) & 0xff;
-}
-
-static u8 ide_read_ireason(ide_drive_t *drive)
-{
- struct ide_taskfile tf;
-
- drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT);
-
- return tf.nsect & 3;
-}
-
-static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
-{
- int retries = 100;
-
- while (retries-- && ((ireason & ATAPI_COD) == 0 ||
- (ireason & ATAPI_IO))) {
- printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing "
- "a packet command, retrying\n", drive->name);
- udelay(100);
- ireason = ide_read_ireason(drive);
- if (retries == 0) {
- printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing"
- " a packet command, ignoring\n",
- drive->name);
- ireason |= ATAPI_COD;
- ireason &= ~ATAPI_IO;
- }
- }
-
- return ireason;
-}
-
-static int ide_delayed_transfer_pc(ide_drive_t *drive)
-{
- /* Send the actual packet */
- drive->hwif->tp_ops->output_data(drive, NULL, drive->pc->c, 12);
-
- /* Timeout for the packet command */
- return WAIT_FLOPPY_CMD;
-}
-
-static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
-{
- struct ide_atapi_pc *pc;
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- ide_expiry_t *expiry;
- unsigned int timeout;
- int cmd_len;
- ide_startstop_t startstop;
- u8 ireason;
-
- if (ide_wait_stat(&startstop, drive, ATA_DRQ, ATA_BUSY, WAIT_READY)) {
- printk(KERN_ERR PFX "%s: Strange, packet command initiated yet "
- "DRQ isn't asserted\n", drive->name);
- return startstop;
- }
-
- if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
- if (drive->dma)
- drive->waiting_for_dma = 1;
- }
-
- if (dev_is_idecd(drive)) {
- /* ATAPI commands get padded out to 12 bytes minimum */
- cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]);
- if (cmd_len < ATAPI_MIN_CDB_BYTES)
- cmd_len = ATAPI_MIN_CDB_BYTES;
-
- timeout = rq->timeout;
- expiry = ide_cd_expiry;
- } else {
- pc = drive->pc;
-
- cmd_len = ATAPI_MIN_CDB_BYTES;
-
- /*
- * If necessary schedule the packet transfer to occur 'timeout'
- * milliseconds later in ide_delayed_transfer_pc() after the
- * device says it's ready for a packet.
- */
- if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
- timeout = drive->pc_delay;
- expiry = &ide_delayed_transfer_pc;
- } else {
- timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
- : WAIT_TAPE_CMD;
- expiry = NULL;
- }
-
- ireason = ide_read_ireason(drive);
- if (drive->media == ide_tape)
- ireason = ide_wait_ireason(drive, ireason);
-
- if ((ireason & ATAPI_COD) == 0 || (ireason & ATAPI_IO)) {
- printk(KERN_ERR PFX "%s: (IO,CoD) != (0,1) while "
- "issuing a packet command\n", drive->name);
-
- return ide_do_reset(drive);
- }
- }
-
- hwif->expiry = expiry;
-
- /* Set the interrupt routine */
- ide_set_handler(drive,
- (dev_is_idecd(drive) ? drive->irq_handler
- : ide_pc_intr),
- timeout);
-
- /* Send the actual packet */
- if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
- hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len);
-
- /* Begin DMA, if necessary */
- if (dev_is_idecd(drive)) {
- if (drive->dma)
- hwif->dma_ops->dma_start(drive);
- } else {
- if (pc->flags & PC_FLAG_DMA_OK) {
- pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
- hwif->dma_ops->dma_start(drive);
- }
- }
-
- return ide_started;
-}
-
-ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- struct ide_atapi_pc *pc;
- ide_hwif_t *hwif = drive->hwif;
- ide_expiry_t *expiry = NULL;
- struct request *rq = hwif->rq;
- unsigned int timeout, bytes;
- u16 bcount;
- u8 valid_tf;
- u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT);
-
- if (dev_is_idecd(drive)) {
- valid_tf = IDE_VALID_NSECT | IDE_VALID_LBAL;
- bcount = ide_cd_get_xferlen(rq);
- expiry = ide_cd_expiry;
- timeout = ATAPI_WAIT_PC;
-
- if (drive->dma)
- drive->dma = !ide_dma_prepare(drive, cmd);
- } else {
- pc = drive->pc;
-
- valid_tf = IDE_VALID_DEVICE;
- bytes = blk_rq_bytes(rq);
- bcount = ((drive->media == ide_tape) ? bytes
- : min_t(unsigned int,
- bytes, 63 * 1024));
-
- /* We haven't transferred any data yet */
- scsi_req(rq)->resid_len = bcount;
-
- if (pc->flags & PC_FLAG_DMA_ERROR) {
- pc->flags &= ~PC_FLAG_DMA_ERROR;
- ide_dma_off(drive);
- }
-
- if (pc->flags & PC_FLAG_DMA_OK)
- drive->dma = !ide_dma_prepare(drive, cmd);
-
- if (!drive->dma)
- pc->flags &= ~PC_FLAG_DMA_OK;
-
- timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
- : WAIT_TAPE_CMD;
- }
-
- ide_init_packet_cmd(cmd, valid_tf, bcount, drive->dma);
-
- (void)do_rw_taskfile(drive, cmd);
-
- if (drq_int) {
- if (drive->dma)
- drive->waiting_for_dma = 0;
- hwif->expiry = expiry;
- }
-
- ide_execute_command(drive, cmd, ide_transfer_pc, timeout);
-
- return drq_int ? ide_started : ide_transfer_pc(drive);
-}
-EXPORT_SYMBOL_GPL(ide_issue_pc);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
deleted file mode 100644
index cffbcc27a34c..000000000000
--- a/drivers/ide/ide-cd.c
+++ /dev/null
@@ -1,1858 +0,0 @@
-/*
- * ATAPI CD-ROM driver.
- *
- * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov>
- * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org>
- * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de>
- * Copyright (C) 2005, 2007-2009 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * See Documentation/cdrom/ide-cd.rst for usage information.
- *
- * Suggestions are welcome. Patches that work are more welcome though. ;-)
- *
- * Documentation:
- * Mt. Fuji (SFF8090 version 4) and ATAPI (SFF-8020i rev 2.6) standards.
- *
- * For historical changelog please see:
- * Documentation/ide/ChangeLog.ide-cd.1994-2004
- */
-
-#define DRV_NAME "ide-cd"
-#define PFX DRV_NAME ": "
-
-#define IDECD_VERSION "5.00"
-
-#include <linux/compat.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched/task_stack.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/cdrom.h>
-#include <linux/ide.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/bcd.h>
-
-/* For SCSI -> ATAPI command conversion */
-#include <scsi/scsi.h>
-
-#include <linux/io.h>
-#include <asm/byteorder.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-
-#include "ide-cd.h"
-
-static DEFINE_MUTEX(ide_cd_mutex);
-static DEFINE_MUTEX(idecd_ref_mutex);
-
-static void ide_cd_release(struct device *);
-
-static struct cdrom_info *ide_cd_get(struct gendisk *disk)
-{
- struct cdrom_info *cd = NULL;
-
- mutex_lock(&idecd_ref_mutex);
- cd = ide_drv_g(disk, cdrom_info);
- if (cd) {
- if (ide_device_get(cd->drive))
- cd = NULL;
- else
- get_device(&cd->dev);
-
- }
- mutex_unlock(&idecd_ref_mutex);
- return cd;
-}
-
-static void ide_cd_put(struct cdrom_info *cd)
-{
- ide_drive_t *drive = cd->drive;
-
- mutex_lock(&idecd_ref_mutex);
- put_device(&cd->dev);
- ide_device_put(drive);
- mutex_unlock(&idecd_ref_mutex);
-}
-
-/*
- * Generic packet command support and error handling routines.
- */
-
-/* Mark that we've seen a media change and invalidate our internal buffers. */
-static void cdrom_saw_media_change(ide_drive_t *drive)
-{
- drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
- drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
-}
-
-static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
-{
- struct request_sense *sense = &drive->sense_data;
- int log = 0;
-
- if (!sense || !rq || (rq->rq_flags & RQF_QUIET))
- return 0;
-
- ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key);
-
- switch (sense->sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- break;
- case NOT_READY:
- /*
- * don't care about tray state messages for e.g. capacity
- * commands or in-progress or becoming ready
- */
- if (sense->asc == 0x3a || sense->asc == 0x04)
- break;
- log = 1;
- break;
- case ILLEGAL_REQUEST:
- /*
- * don't log START_STOP unit with LoEj set, since we cannot
- * reliably check if drive can auto-close
- */
- if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
- break;
- log = 1;
- break;
- case UNIT_ATTENTION:
- /*
- * Make good and sure we've seen this potential media change.
- * Some drives (i.e. Creative) fail to present the correct sense
- * key in the error register.
- */
- cdrom_saw_media_change(drive);
- break;
- default:
- log = 1;
- break;
- }
- return log;
-}
-
-static void cdrom_analyze_sense_data(ide_drive_t *drive,
- struct request *failed_command)
-{
- struct request_sense *sense = &drive->sense_data;
- struct cdrom_info *info = drive->driver_data;
- unsigned long sector;
- unsigned long bio_sectors;
-
- ide_debug_log(IDE_DBG_SENSE, "error_code: 0x%x, sense_key: 0x%x",
- sense->error_code, sense->sense_key);
-
- if (failed_command)
- ide_debug_log(IDE_DBG_SENSE, "failed cmd: 0x%x",
- failed_command->cmd[0]);
-
- if (!cdrom_log_sense(drive, failed_command))
- return;
-
- /*
- * If a read toc is executed for a CD-R or CD-RW medium where the first
- * toc has not been recorded yet, it will fail with 05/24/00 (which is a
- * confusing error)
- */
- if (failed_command && scsi_req(failed_command)->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
- if (sense->sense_key == 0x05 && sense->asc == 0x24)
- return;
-
- /* current error */
- if (sense->error_code == 0x70) {
- switch (sense->sense_key) {
- case MEDIUM_ERROR:
- case VOLUME_OVERFLOW:
- case ILLEGAL_REQUEST:
- if (!sense->valid)
- break;
- if (failed_command == NULL ||
- blk_rq_is_passthrough(failed_command))
- break;
- sector = (sense->information[0] << 24) |
- (sense->information[1] << 16) |
- (sense->information[2] << 8) |
- (sense->information[3]);
-
- if (queue_logical_block_size(drive->queue) == 2048)
- /* device sector size is 2K */
- sector <<= 2;
-
- bio_sectors = max(bio_sectors(failed_command->bio), 4U);
- sector &= ~(bio_sectors - 1);
-
- /*
- * The SCSI specification allows for the value
- * returned by READ CAPACITY to be up to 75 2K
- * sectors past the last readable block.
- * Therefore, if we hit a medium error within the
- * last 75 2K sectors, we decrease the saved size
- * value.
- */
- if (sector < get_capacity(info->disk) &&
- drive->probed_capacity - sector < 4 * 75)
- set_capacity(info->disk, sector);
- }
- }
-
- ide_cd_log_error(drive->name, failed_command, sense);
-}
-
-static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
-{
- /*
- * For ATA_PRIV_SENSE, "ide_req(rq)->special" points to the original
- * failed request. Also, the sense data should be read
- * directly from rq which might be different from the original
- * sense buffer if it got copied during mapping.
- */
- struct request *failed = ide_req(rq)->special;
- void *sense = bio_data(rq->bio);
-
- if (failed) {
- /*
- * Sense is always read into drive->sense_data, copy back to the
- * original request.
- */
- memcpy(scsi_req(failed)->sense, sense, 18);
- scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
- cdrom_analyze_sense_data(drive, failed);
-
- if (ide_end_rq(drive, failed, BLK_STS_IOERR, blk_rq_bytes(failed)))
- BUG();
- } else
- cdrom_analyze_sense_data(drive, NULL);
-}
-
-
-/*
- * Allow the drive 5 seconds to recover; some devices will return NOT_READY
- * while flushing data from cache.
- *
- * returns: 0 failed (write timeout expired)
- * 1 success
- */
-static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
-{
-
- struct cdrom_info *info = drive->driver_data;
-
- if (!scsi_req(rq)->result)
- info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY;
-
- scsi_req(rq)->result = 1;
-
- if (time_after(jiffies, info->write_timeout))
- return 0;
- else {
- /*
- * take a breather
- */
- blk_mq_requeue_request(rq, false);
- blk_mq_delay_kick_requeue_list(drive->queue, 1);
- return 1;
- }
-}
-
-static void ide_cd_free_sense(ide_drive_t *drive)
-{
- if (!drive->sense_rq)
- return;
-
- blk_mq_free_request(drive->sense_rq);
- drive->sense_rq = NULL;
- drive->sense_rq_armed = false;
-}
-
-/**
- * Returns:
- * 0: if the request should be continued.
- * 1: if the request will be going through error recovery.
- * 2: if the request should be ended.
- */
-static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- int err, sense_key, do_end_request = 0;
-
- /* get the IDE error register */
- err = ide_read_error(drive);
- sense_key = err >> 4;
-
- ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, rq->cmd_type: 0x%x, err: 0x%x, "
- "stat 0x%x",
- rq->cmd[0], rq->cmd_type, err, stat);
-
- if (ata_sense_request(rq)) {
- /*
- * We got an error trying to get sense info from the drive
- * (probably while trying to recover from a former error).
- * Just give up.
- */
- rq->rq_flags |= RQF_FAILED;
- return 2;
- }
-
- /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
- if (blk_rq_is_scsi(rq) && !scsi_req(rq)->result)
- scsi_req(rq)->result = SAM_STAT_CHECK_CONDITION;
-
- if (blk_noretry_request(rq))
- do_end_request = 1;
-
- switch (sense_key) {
- case NOT_READY:
- if (req_op(rq) == REQ_OP_WRITE) {
- if (ide_cd_breathe(drive, rq))
- return 1;
- } else {
- cdrom_saw_media_change(drive);
-
- if (!blk_rq_is_passthrough(rq) &&
- !(rq->rq_flags & RQF_QUIET))
- printk(KERN_ERR PFX "%s: tray open\n",
- drive->name);
- }
- do_end_request = 1;
- break;
- case UNIT_ATTENTION:
- cdrom_saw_media_change(drive);
-
- if (blk_rq_is_passthrough(rq))
- return 0;
-
- /*
- * Arrange to retry the request but be sure to give up if we've
- * retried too many times.
- */
- if (++scsi_req(rq)->result > ERROR_MAX)
- do_end_request = 1;
- break;
- case ILLEGAL_REQUEST:
- /*
- * Don't print error message for this condition -- SFF8090i
- * indicates that 5/24/00 is the correct response to a request
- * to close the tray if the drive doesn't have that capability.
- *
- * cdrom_log_sense() knows this!
- */
- if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
- break;
- fallthrough;
- case DATA_PROTECT:
- /*
- * No point in retrying after an illegal request or data
- * protect error.
- */
- if (!(rq->rq_flags & RQF_QUIET))
- ide_dump_status(drive, "command error", stat);
- do_end_request = 1;
- break;
- case MEDIUM_ERROR:
- /*
- * No point in re-trying a zillion times on a bad sector.
- * If we got here the error is not correctable.
- */
- if (!(rq->rq_flags & RQF_QUIET))
- ide_dump_status(drive, "media error "
- "(bad sector)", stat);
- do_end_request = 1;
- break;
- case BLANK_CHECK:
- /* disk appears blank? */
- if (!(rq->rq_flags & RQF_QUIET))
- ide_dump_status(drive, "media error (blank)",
- stat);
- do_end_request = 1;
- break;
- default:
- if (blk_rq_is_passthrough(rq))
- break;
- if (err & ~ATA_ABORTED) {
- /* go to the default handler for other errors */
- ide_error(drive, "cdrom_decode_status", stat);
- return 1;
- } else if (++scsi_req(rq)->result > ERROR_MAX)
- /* we've racked up too many retries, abort */
- do_end_request = 1;
- }
-
- if (blk_rq_is_passthrough(rq)) {
- rq->rq_flags |= RQF_FAILED;
- do_end_request = 1;
- }
-
- /*
- * End a request through request sense analysis when we have sense data.
- * We need this in order to perform end of media processing.
- */
- if (do_end_request)
- goto end_request;
-
- /* if we got a CHECK_CONDITION status, queue a request sense command */
- if (stat & ATA_ERR)
- return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
- return 1;
-
-end_request:
- if (stat & ATA_ERR) {
- hwif->rq = NULL;
- return ide_queue_sense_rq(drive, rq) ? 2 : 1;
- } else
- return 2;
-}
-
-static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- struct request *rq = cmd->rq;
-
- ide_debug_log(IDE_DBG_FUNC, "rq->cmd[0]: 0x%x", rq->cmd[0]);
-
- /*
- * Some of the trailing request sense fields are optional,
- * and some drives don't send them. Sigh.
- */
- if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE &&
- cmd->nleft > 0 && cmd->nleft <= 5)
- cmd->nleft = 0;
-}
-
-int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
- int write, void *buffer, unsigned *bufflen,
- struct scsi_sense_hdr *sshdr, int timeout,
- req_flags_t rq_flags)
-{
- struct cdrom_info *info = drive->driver_data;
- struct scsi_sense_hdr local_sshdr;
- int retries = 10;
- bool failed;
-
- ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
- "rq_flags: 0x%x",
- cmd[0], write, timeout, rq_flags);
-
- if (!sshdr)
- sshdr = &local_sshdr;
-
- /* start of retry loop */
- do {
- struct request *rq;
- int error;
- bool delay = false;
-
- rq = blk_get_request(drive->queue,
- write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
- ide_req(rq)->type = ATA_PRIV_PC;
- rq->rq_flags |= rq_flags;
- rq->timeout = timeout;
- if (buffer) {
- error = blk_rq_map_kern(drive->queue, rq, buffer,
- *bufflen, GFP_NOIO);
- if (error) {
- blk_put_request(rq);
- return error;
- }
- }
-
- blk_execute_rq(info->disk, rq, 0);
- error = scsi_req(rq)->result ? -EIO : 0;
-
- if (buffer)
- *bufflen = scsi_req(rq)->resid_len;
- scsi_normalize_sense(scsi_req(rq)->sense,
- scsi_req(rq)->sense_len, sshdr);
-
- /*
- * FIXME: we should probably abort/retry or something in case of
- * failure.
- */
- failed = (rq->rq_flags & RQF_FAILED) != 0;
- if (failed) {
- /*
- * The request failed. Retry if it was due to a unit
- * attention status (usually means media was changed).
- */
- if (sshdr->sense_key == UNIT_ATTENTION)
- cdrom_saw_media_change(drive);
- else if (sshdr->sense_key == NOT_READY &&
- sshdr->asc == 4 && sshdr->ascq != 4) {
- /*
- * The drive is in the process of loading
- * a disk. Retry, but wait a little to give
- * the drive time to complete the load.
- */
- delay = true;
- } else {
- /* otherwise, don't retry */
- retries = 0;
- }
- --retries;
- }
- blk_put_request(rq);
- if (delay)
- ssleep(2);
- } while (failed && retries >= 0);
-
- /* return an error if the command failed */
- return failed ? -EIO : 0;
-}
-
-/*
- * returns true if rq has been completed
- */
-static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- unsigned int nr_bytes = cmd->nbytes - cmd->nleft;
-
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- nr_bytes -= cmd->last_xfer_len;
-
- if (nr_bytes > 0) {
- ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
- return true;
- }
-
- return false;
-}
-
-/* standard prep_rq that builds 10 byte cmds */
-static bool ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
-{
- int hard_sect = queue_logical_block_size(q);
- long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
- unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
- struct scsi_request *req = scsi_req(rq);
-
- if (rq_data_dir(rq) == READ)
- req->cmd[0] = GPCMD_READ_10;
- else
- req->cmd[0] = GPCMD_WRITE_10;
-
- /*
- * fill in lba
- */
- req->cmd[2] = (block >> 24) & 0xff;
- req->cmd[3] = (block >> 16) & 0xff;
- req->cmd[4] = (block >> 8) & 0xff;
- req->cmd[5] = block & 0xff;
-
- /*
- * and transfer length
- */
- req->cmd[7] = (blocks >> 8) & 0xff;
- req->cmd[8] = blocks & 0xff;
- req->cmd_len = 10;
- return true;
-}
-
-/*
- * Most of the SCSI commands are supported directly by ATAPI devices.
- * This transform handles the few exceptions.
- */
-static bool ide_cdrom_prep_pc(struct request *rq)
-{
- u8 *c = scsi_req(rq)->cmd;
-
- /* transform 6-byte read/write commands to the 10-byte version */
- if (c[0] == READ_6 || c[0] == WRITE_6) {
- c[8] = c[4];
- c[5] = c[3];
- c[4] = c[2];
- c[3] = c[1] & 0x1f;
- c[2] = 0;
- c[1] &= 0xe0;
- c[0] += (READ_10 - READ_6);
- scsi_req(rq)->cmd_len = 10;
- return true;
- }
-
- /*
- * it's silly to pretend we understand 6-byte sense commands, just
- * reject with ILLEGAL_REQUEST and the caller should take the
- * appropriate action
- */
- if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) {
- scsi_req(rq)->result = ILLEGAL_REQUEST;
- return false;
- }
-
- return true;
-}
-
-static bool ide_cdrom_prep_rq(ide_drive_t *drive, struct request *rq)
-{
- if (!blk_rq_is_passthrough(rq)) {
- scsi_req_init(scsi_req(rq));
-
- return ide_cdrom_prep_fs(drive->queue, rq);
- } else if (blk_rq_is_scsi(rq))
- return ide_cdrom_prep_pc(rq);
-
- return true;
-}
-
-static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- struct request *rq = hwif->rq;
- ide_expiry_t *expiry = NULL;
- int dma_error = 0, dma, thislen, uptodate = 0;
- int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
- int sense = ata_sense_request(rq);
- unsigned int timeout;
- u16 len;
- u8 ireason, stat;
-
- ide_debug_log(IDE_DBG_PC, "cmd: 0x%x, write: 0x%x", rq->cmd[0], write);
-
- /* check for errors */
- dma = drive->dma;
- if (dma) {
- drive->dma = 0;
- drive->waiting_for_dma = 0;
- dma_error = hwif->dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- if (dma_error) {
- printk(KERN_ERR PFX "%s: DMA %s error\n", drive->name,
- write ? "write" : "read");
- ide_dma_off(drive);
- }
- }
-
- /* check status */
- stat = hwif->tp_ops->read_status(hwif);
-
- if (!OK_STAT(stat, 0, BAD_R_STAT)) {
- rc = cdrom_decode_status(drive, stat);
- if (rc) {
- if (rc == 2)
- goto out_end;
- return ide_stopped;
- }
- }
-
- /* using dma, transfer is complete now */
- if (dma) {
- if (dma_error)
- return ide_error(drive, "dma error", stat);
- uptodate = 1;
- goto out_end;
- }
-
- ide_read_bcount_and_ireason(drive, &len, &ireason);
-
- thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
- if (thislen > len)
- thislen = len;
-
- ide_debug_log(IDE_DBG_PC, "DRQ: stat: 0x%x, thislen: %d",
- stat, thislen);
-
- /* If DRQ is clear, the command has completed. */
- if ((stat & ATA_DRQ) == 0) {
- switch (req_op(rq)) {
- default:
- /*
- * If we're not done reading/writing, complain.
- * Otherwise, complete the command normally.
- */
- uptodate = 1;
- if (cmd->nleft > 0) {
- printk(KERN_ERR PFX "%s: %s: data underrun "
- "(%u bytes)\n", drive->name, __func__,
- cmd->nleft);
- if (!write)
- rq->rq_flags |= RQF_FAILED;
- uptodate = 0;
- }
- goto out_end;
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- ide_cd_request_sense_fixup(drive, cmd);
-
- uptodate = cmd->nleft ? 0 : 1;
-
- /*
- * suck out the remaining bytes from the drive in an
- * attempt to complete the data xfer. (see BZ#13399)
- */
- if (!(stat & ATA_ERR) && !uptodate && thislen) {
- ide_pio_bytes(drive, cmd, write, thislen);
- uptodate = cmd->nleft ? 0 : 1;
- }
-
- if (!uptodate)
- rq->rq_flags |= RQF_FAILED;
- goto out_end;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- goto out_end;
- }
- }
-
- rc = ide_check_ireason(drive, rq, len, ireason, write);
- if (rc)
- goto out_end;
-
- cmd->last_xfer_len = 0;
-
- ide_debug_log(IDE_DBG_PC, "data transfer, rq->cmd_type: 0x%x, "
- "ireason: 0x%x",
- rq->cmd_type, ireason);
-
- /* transfer data */
- while (thislen > 0) {
- int blen = min_t(int, thislen, cmd->nleft);
-
- if (cmd->nleft == 0)
- break;
-
- ide_pio_bytes(drive, cmd, write, blen);
- cmd->last_xfer_len += blen;
-
- thislen -= blen;
- len -= blen;
-
- if (sense && write == 0)
- scsi_req(rq)->sense_len += blen;
- }
-
- /* pad, if necessary */
- if (len > 0) {
- if (blk_rq_is_passthrough(rq) || write == 0)
- ide_pad_transfer(drive, write, len);
- else {
- printk(KERN_ERR PFX "%s: confused, missing data\n",
- drive->name);
- blk_dump_rq_flags(rq, "cdrom_newpc_intr");
- }
- }
-
- switch (req_op(rq)) {
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- timeout = rq->timeout;
- break;
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- expiry = ide_cd_expiry;
- fallthrough;
- default:
- timeout = ATAPI_WAIT_PC;
- break;
- }
-
- hwif->expiry = expiry;
- ide_set_handler(drive, cdrom_newpc_intr, timeout);
- return ide_started;
-
-out_end:
- if (blk_rq_is_scsi(rq) && rc == 0) {
- scsi_req(rq)->resid_len = 0;
- blk_mq_end_request(rq, BLK_STS_OK);
- hwif->rq = NULL;
- } else {
- if (sense && uptodate)
- ide_cd_complete_failed_rq(drive, rq);
-
- if (!blk_rq_is_passthrough(rq)) {
- if (cmd->nleft == 0)
- uptodate = 1;
- } else {
- if (uptodate <= 0 && scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- }
-
- if (uptodate == 0 && rq->bio)
- if (ide_cd_error_cmd(drive, cmd))
- return ide_stopped;
-
- /* make sure it's fully ended */
- if (blk_rq_is_passthrough(rq)) {
- scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
- if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
- scsi_req(rq)->resid_len += cmd->last_xfer_len;
- }
-
- ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, blk_rq_bytes(rq));
-
- if (sense && rc == 2)
- ide_error(drive, "request sense failure", stat);
- }
-
- ide_cd_free_sense(drive);
- return ide_stopped;
-}
-
-static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct request_queue *q = drive->queue;
- int write = rq_data_dir(rq) == WRITE;
- unsigned short sectors_per_frame =
- queue_logical_block_size(q) >> SECTOR_SHIFT;
-
- ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
- "secs_per_frame: %u",
- rq->cmd[0], rq->cmd_flags, sectors_per_frame);
-
- if (write) {
- /* disk has become write protected */
- if (get_disk_ro(cd->disk))
- return ide_stopped;
- } else {
- /*
- * We may be retrying this request after an error. Fix up any
- * weirdness which might be present in the request packet.
- */
- ide_cdrom_prep_rq(drive, rq);
- }
-
- /* fs requests *must* be hardware frame aligned */
- if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
- (blk_rq_pos(rq) & (sectors_per_frame - 1)))
- return ide_stopped;
-
- /* use DMA, if possible */
- drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
-
- if (write)
- cd->devinfo.media_written = 1;
-
- rq->timeout = ATAPI_WAIT_PC;
-
- return ide_started;
-}
-
-static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
-{
-
- ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
- rq->cmd[0], rq->cmd_type);
-
- if (blk_rq_is_scsi(rq))
- rq->rq_flags |= RQF_QUIET;
- else
- rq->rq_flags &= ~RQF_FAILED;
-
- drive->dma = 0;
-
- /* sg request */
- if (rq->bio) {
- struct request_queue *q = drive->queue;
- char *buf = bio_data(rq->bio);
- unsigned int alignment;
-
- drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
-
- /*
- * check if dma is safe
- *
- * NOTE! The "len" and "addr" checks should possibly have
- * separate masks.
- */
- alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- if ((unsigned long)buf & alignment
- || blk_rq_bytes(rq) & q->dma_pad_mask
- || object_is_on_stack(buf))
- drive->dma = 0;
- }
-}
-
-static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
- sector_t block)
-{
- struct ide_cmd cmd;
- int uptodate = 0;
- unsigned int nsectors;
-
- ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu",
- rq->cmd[0], (unsigned long long)block);
-
- if (drive->debug_mask & IDE_DBG_RQ)
- blk_dump_rq_flags(rq, "ide_cd_do_request");
-
- switch (req_op(rq)) {
- default:
- if (cdrom_start_rw(drive, rq) == ide_stopped)
- goto out_end;
- break;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- handle_pc:
- if (!rq->timeout)
- rq->timeout = ATAPI_WAIT_PC;
- cdrom_do_block_pc(drive, rq);
- break;
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- switch (ide_req(rq)->type) {
- case ATA_PRIV_MISC:
- /* right now this can only be a reset... */
- uptodate = 1;
- goto out_end;
- case ATA_PRIV_SENSE:
- case ATA_PRIV_PC:
- goto handle_pc;
- default:
- BUG();
- }
- }
-
- /* prepare sense request for this command */
- ide_prep_sense(drive, rq);
-
- memset(&cmd, 0, sizeof(cmd));
-
- if (rq_data_dir(rq))
- cmd.tf_flags |= IDE_TFLAG_WRITE;
-
- cmd.rq = rq;
-
- if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
- ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
- ide_map_sg(drive, &cmd);
- }
-
- return ide_issue_pc(drive, &cmd);
-out_end:
- nsectors = blk_rq_sectors(rq);
-
- if (nsectors == 0)
- nsectors = 1;
-
- ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, nsectors << 9);
-
- return ide_stopped;
-}
-
-/*
- * Ioctl handling.
- *
- * Routines which queue packet commands take as a final argument a pointer to a
- * request_sense struct. If execution of the command results in an error with a
- * CHECK CONDITION status, this structure will be filled with the results of the
- * subsequent request sense command. The pointer can also be NULL, in which case
- * no sense information is returned.
- */
-static void msf_from_bcd(struct atapi_msf *msf)
-{
- msf->minute = bcd2bin(msf->minute);
- msf->second = bcd2bin(msf->second);
- msf->frame = bcd2bin(msf->frame);
-}
-
-int cdrom_check_status(ide_drive_t *drive, struct scsi_sense_hdr *sshdr)
-{
- struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *cdi;
- unsigned char cmd[BLK_MAX_CDB];
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (!info)
- return -EIO;
-
- cdi = &info->devinfo;
-
- memset(cmd, 0, BLK_MAX_CDB);
- cmd[0] = GPCMD_TEST_UNIT_READY;
-
- /*
- * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to switch CDs
- * instead of supporting the LOAD_UNLOAD opcode.
- */
- cmd[7] = cdi->sanyo_slot % 3;
-
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sshdr, 0, RQF_QUIET);
-}
-
-static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
- unsigned long *sectors_per_frame)
-{
- struct {
- __be32 lba;
- __be32 blocklen;
- } capbuf;
-
- int stat;
- unsigned char cmd[BLK_MAX_CDB];
- unsigned len = sizeof(capbuf);
- u32 blocklen;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- memset(cmd, 0, BLK_MAX_CDB);
- cmd[0] = GPCMD_READ_CDVD_CAPACITY;
-
- stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, NULL, 0,
- RQF_QUIET);
- if (stat)
- return stat;
-
- /*
- * Sanity check the given block size, in so far as making
- * sure the sectors_per_frame we give to the caller won't
- * end up being bogus.
- */
- blocklen = be32_to_cpu(capbuf.blocklen);
- blocklen = (blocklen >> SECTOR_SHIFT) << SECTOR_SHIFT;
- switch (blocklen) {
- case 512:
- case 1024:
- case 2048:
- case 4096:
- break;
- default:
- printk_once(KERN_ERR PFX "%s: weird block size %u; "
- "setting default block size to 2048\n",
- drive->name, blocklen);
- blocklen = 2048;
- break;
- }
-
- *capacity = 1 + be32_to_cpu(capbuf.lba);
- *sectors_per_frame = blocklen >> SECTOR_SHIFT;
-
- ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu",
- *capacity, *sectors_per_frame);
-
- return 0;
-}
-
-static int ide_cdrom_read_tocentry(ide_drive_t *drive, int trackno,
- int msf_flag, int format, char *buf, int buflen)
-{
- unsigned char cmd[BLK_MAX_CDB];
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cmd[6] = trackno;
- cmd[7] = (buflen >> 8);
- cmd[8] = (buflen & 0xff);
- cmd[9] = (format << 6);
-
- if (msf_flag)
- cmd[1] = 2;
-
- return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, NULL, 0, RQF_QUIET);
-}
-
-/* Try to read the entire TOC for the disk into our internal buffer. */
-int ide_cd_read_toc(ide_drive_t *drive)
-{
- int stat, ntracks, i;
- struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *cdi = &info->devinfo;
- struct atapi_toc *toc = info->toc;
- struct {
- struct atapi_toc_header hdr;
- struct atapi_toc_entry ent;
- } ms_tmp;
- long last_written;
- unsigned long sectors_per_frame = SECTORS_PER_FRAME;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (toc == NULL) {
- /* try to allocate space */
- toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL);
- if (toc == NULL) {
- printk(KERN_ERR PFX "%s: No cdrom TOC buffer!\n",
- drive->name);
- return -ENOMEM;
- }
- info->toc = toc;
- }
-
- /*
- * Check to see if the existing data is still valid. If it is,
- * just return.
- */
- (void) cdrom_check_status(drive, NULL);
-
- if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
- return 0;
-
- /* try to get the total cdrom capacity and sector size */
- stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame);
- if (stat)
- toc->capacity = 0x1fffff;
-
- set_capacity(info->disk, toc->capacity * sectors_per_frame);
- /* save a private copy of the TOC capacity for error handling */
- drive->probed_capacity = toc->capacity * sectors_per_frame;
-
- blk_queue_logical_block_size(drive->queue,
- sectors_per_frame << SECTOR_SHIFT);
-
- /* first read just the header, so we know how long the TOC is */
- stat = ide_cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
- sizeof(struct atapi_toc_header));
- if (stat)
- return stat;
-
- if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
- toc->hdr.last_track = bcd2bin(toc->hdr.last_track);
- }
-
- ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
- if (ntracks <= 0)
- return -EIO;
- if (ntracks > MAX_TRACKS)
- ntracks = MAX_TRACKS;
-
- /* now read the whole schmeer */
- stat = ide_cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
- (char *)&toc->hdr,
- sizeof(struct atapi_toc_header) +
- (ntracks + 1) *
- sizeof(struct atapi_toc_entry));
-
- if (stat && toc->hdr.first_track > 1) {
- /*
- * Cds with CDI tracks only don't have any TOC entries, despite
- * of this the returned values are
- * first_track == last_track = number of CDI tracks + 1,
- * so that this case is indistinguishable from the same layout
- * plus an additional audio track. If we get an error for the
- * regular case, we assume a CDI without additional audio
- * tracks. In this case the readable TOC is empty (CDI tracks
- * are not included) and only holds the Leadout entry.
- *
- * Heiko Eißfeldt.
- */
- ntracks = 0;
- stat = ide_cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
- (char *)&toc->hdr,
- sizeof(struct atapi_toc_header) +
- (ntracks + 1) *
- sizeof(struct atapi_toc_entry));
- if (stat)
- return stat;
-
- if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = (u8)bin2bcd(CDROM_LEADOUT);
- toc->hdr.last_track = (u8)bin2bcd(CDROM_LEADOUT);
- } else {
- toc->hdr.first_track = CDROM_LEADOUT;
- toc->hdr.last_track = CDROM_LEADOUT;
- }
- }
-
- if (stat)
- return stat;
-
- toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
-
- if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
- toc->hdr.last_track = bcd2bin(toc->hdr.last_track);
- }
-
- for (i = 0; i <= ntracks; i++) {
- if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
- if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
- toc->ent[i].track = bcd2bin(toc->ent[i].track);
- msf_from_bcd(&toc->ent[i].addr.msf);
- }
- toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute,
- toc->ent[i].addr.msf.second,
- toc->ent[i].addr.msf.frame);
- }
-
- if (toc->hdr.first_track != CDROM_LEADOUT) {
- /* read the multisession information */
- stat = ide_cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
- sizeof(ms_tmp));
- if (stat)
- return stat;
-
- toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba);
- } else {
- ms_tmp.hdr.last_track = CDROM_LEADOUT;
- ms_tmp.hdr.first_track = ms_tmp.hdr.last_track;
- toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
- }
-
- if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
- /* re-read multisession information using MSF format */
- stat = ide_cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
- sizeof(ms_tmp));
- if (stat)
- return stat;
-
- msf_from_bcd(&ms_tmp.ent.addr.msf);
- toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute,
- ms_tmp.ent.addr.msf.second,
- ms_tmp.ent.addr.msf.frame);
- }
-
- toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
-
- /* now try to get the total cdrom capacity */
- stat = cdrom_get_last_written(cdi, &last_written);
- if (!stat && (last_written > toc->capacity)) {
- toc->capacity = last_written;
- set_capacity(info->disk, toc->capacity * sectors_per_frame);
- drive->probed_capacity = toc->capacity * sectors_per_frame;
- }
-
- /* Remember that we've read this stuff. */
- drive->atapi_flags |= IDE_AFLAG_TOC_VALID;
-
- return 0;
-}
-
-int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
-{
- struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *cdi = &info->devinfo;
- struct packet_command cgc;
- int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0)
- size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
-
- init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
- do {
- /* we seem to get stat=0x01,err=0x00 the first time (??) */
- stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (!stat)
- break;
- } while (--attempts);
- return stat;
-}
-
-void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
-{
- struct cdrom_info *cd = drive->driver_data;
- u16 curspeed, maxspeed;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) {
- curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]);
- maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]);
- } else {
- curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]);
- maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]);
- }
-
- ide_debug_log(IDE_DBG_PROBE, "curspeed: %u, maxspeed: %u",
- curspeed, maxspeed);
-
- cd->current_speed = DIV_ROUND_CLOSEST(curspeed, 176);
- cd->max_speed = DIV_ROUND_CLOSEST(maxspeed, 176);
-}
-
-#define IDE_CD_CAPABILITIES \
- (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | \
- CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | \
- CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R | \
- CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \
- CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM)
-
-static const struct cdrom_device_ops ide_cdrom_dops = {
- .open = ide_cdrom_open_real,
- .release = ide_cdrom_release_real,
- .drive_status = ide_cdrom_drive_status,
- .check_events = ide_cdrom_check_events_real,
- .tray_move = ide_cdrom_tray_move,
- .lock_door = ide_cdrom_lock_door,
- .select_speed = ide_cdrom_select_speed,
- .get_last_session = ide_cdrom_get_last_session,
- .get_mcn = ide_cdrom_get_mcn,
- .reset = ide_cdrom_reset,
- .audio_ioctl = ide_cdrom_audio_ioctl,
- .capability = IDE_CD_CAPABILITIES,
- .generic_packet = ide_cdrom_packet,
-};
-
-static int ide_cdrom_register(ide_drive_t *drive, int nslots)
-{
- struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *devinfo = &info->devinfo;
-
- ide_debug_log(IDE_DBG_PROBE, "nslots: %d", nslots);
-
- devinfo->ops = &ide_cdrom_dops;
- devinfo->speed = info->current_speed;
- devinfo->capacity = nslots;
- devinfo->handle = drive;
- strcpy(devinfo->name, drive->name);
-
- if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
- devinfo->mask |= CDC_SELECT_SPEED;
-
- return register_cdrom(info->disk, devinfo);
-}
-
-static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct cdrom_device_info *cdi = &cd->devinfo;
- u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
- mechtype_t mechtype;
- int nslots = 1;
-
- ide_debug_log(IDE_DBG_PROBE, "media: 0x%x, atapi_flags: 0x%lx",
- drive->media, drive->atapi_flags);
-
- cdi->mask = (CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R |
- CDC_DVD_RAM | CDC_SELECT_DISC | CDC_PLAY_AUDIO |
- CDC_MO_DRIVE | CDC_RAM);
-
- if (drive->media == ide_optical) {
- cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM);
- printk(KERN_ERR PFX "%s: ATAPI magneto-optical drive\n",
- drive->name);
- return nslots;
- }
-
- if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) {
- drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
- cdi->mask &= ~CDC_PLAY_AUDIO;
- return nslots;
- }
-
- /*
- * We have to cheat a little here. the packet will eventually be queued
- * with ide_cdrom_packet(), which extracts the drive from cdi->handle.
- * Since this device hasn't been registered with the Uniform layer yet,
- * it can't do this. Same goes for cdi->ops.
- */
- cdi->handle = drive;
- cdi->ops = &ide_cdrom_dops;
-
- if (ide_cdrom_get_capabilities(drive, buf))
- return 0;
-
- if ((buf[8 + 6] & 0x01) == 0)
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
- if (buf[8 + 6] & 0x08)
- drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
- if (buf[8 + 3] & 0x01)
- cdi->mask &= ~CDC_CD_R;
- if (buf[8 + 3] & 0x02)
- cdi->mask &= ~(CDC_CD_RW | CDC_RAM);
- if (buf[8 + 2] & 0x38)
- cdi->mask &= ~CDC_DVD;
- if (buf[8 + 3] & 0x20)
- cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM);
- if (buf[8 + 3] & 0x10)
- cdi->mask &= ~CDC_DVD_R;
- if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK))
- cdi->mask &= ~CDC_PLAY_AUDIO;
-
- mechtype = buf[8 + 6] >> 5;
- if (mechtype == mechtype_caddy ||
- mechtype == mechtype_popup ||
- (drive->atapi_flags & IDE_AFLAG_NO_AUTOCLOSE))
- cdi->mask |= CDC_CLOSE_TRAY;
-
- if (cdi->sanyo_slot > 0) {
- cdi->mask &= ~CDC_SELECT_DISC;
- nslots = 3;
- } else if (mechtype == mechtype_individual_changer ||
- mechtype == mechtype_cartridge_changer) {
- nslots = cdrom_number_of_slots(cdi);
- if (nslots > 1)
- cdi->mask &= ~CDC_SELECT_DISC;
- }
-
- ide_cdrom_update_speed(drive, buf);
-
- printk(KERN_INFO PFX "%s: ATAPI", drive->name);
-
- /* don't print speed if the drive reported 0 */
- if (cd->max_speed)
- printk(KERN_CONT " %dX", cd->max_speed);
-
- printk(KERN_CONT " %s", (cdi->mask & CDC_DVD) ? "CD-ROM" : "DVD-ROM");
-
- if ((cdi->mask & CDC_DVD_R) == 0 || (cdi->mask & CDC_DVD_RAM) == 0)
- printk(KERN_CONT " DVD%s%s",
- (cdi->mask & CDC_DVD_R) ? "" : "-R",
- (cdi->mask & CDC_DVD_RAM) ? "" : "/RAM");
-
- if ((cdi->mask & CDC_CD_R) == 0 || (cdi->mask & CDC_CD_RW) == 0)
- printk(KERN_CONT " CD%s%s",
- (cdi->mask & CDC_CD_R) ? "" : "-R",
- (cdi->mask & CDC_CD_RW) ? "" : "/RW");
-
- if ((cdi->mask & CDC_SELECT_DISC) == 0)
- printk(KERN_CONT " changer w/%d slots", nslots);
- else
- printk(KERN_CONT " drive");
-
- printk(KERN_CONT ", %dkB Cache\n",
- be16_to_cpup((__be16 *)&buf[8 + 12]));
-
- return nslots;
-}
-
-struct cd_list_entry {
- const char *id_model;
- const char *id_firmware;
- unsigned int cd_flags;
-};
-
-#ifdef CONFIG_IDE_PROC_FS
-static sector_t ide_cdrom_capacity(ide_drive_t *drive)
-{
- unsigned long capacity, sectors_per_frame;
-
- if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame))
- return 0;
-
- return capacity * sectors_per_frame;
-}
-
-static int idecd_capacity_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = m->private;
-
- seq_printf(m, "%llu\n", (long long)ide_cdrom_capacity(drive));
- return 0;
-}
-
-static ide_proc_entry_t idecd_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, idecd_capacity_proc_show },
- {}
-};
-
-static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
-{
- return idecd_proc;
-}
-
-static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive)
-{
- return NULL;
-}
-#endif
-
-static const struct cd_list_entry ide_cd_quirks_list[] = {
- /* SCR-3231 doesn't support the SET_CD_SPEED command. */
- { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT },
- /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */
- { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD |
- IDE_AFLAG_PRE_ATAPI12, },
- /* Vertos 300, some versions of this drive like to talk BCD. */
- { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, },
- /* Vertos 600 ESD. */
- { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, },
- /*
- * Sanyo 3 CD changer uses a non-standard command for CD changing
- * (by default standard ATAPI support for CD changers is used).
- */
- { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD },
- { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD },
- { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD },
- /* Stingray 8X CD-ROM. */
- { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 },
- /*
- * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length
- * mode sense page capabilities size, but older drives break.
- */
- { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
- { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
- /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */
- { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS },
- /*
- * Some drives used by Apple don't advertise audio play
- * but they do support reading TOC & audio datas.
- */
- { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE },
- { "TEAC CD-ROM CD-224E", NULL, IDE_AFLAG_NO_AUTOCLOSE },
- { NULL, NULL, 0 }
-};
-
-static unsigned int ide_cd_flags(u16 *id)
-{
- const struct cd_list_entry *cle = ide_cd_quirks_list;
-
- while (cle->id_model) {
- if (strcmp(cle->id_model, (char *)&id[ATA_ID_PROD]) == 0 &&
- (cle->id_firmware == NULL ||
- strstr((char *)&id[ATA_ID_FW_REV], cle->id_firmware)))
- return cle->cd_flags;
- cle++;
- }
-
- return 0;
-}
-
-static int ide_cdrom_setup(ide_drive_t *drive)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct cdrom_device_info *cdi = &cd->devinfo;
- struct request_queue *q = drive->queue;
- u16 *id = drive->id;
- char *fw_rev = (char *)&id[ATA_ID_FW_REV];
- int nslots;
-
- ide_debug_log(IDE_DBG_PROBE, "enter");
-
- drive->prep_rq = ide_cdrom_prep_rq;
- blk_queue_dma_alignment(q, 31);
- blk_queue_update_dma_pad(q, 15);
-
- drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
- drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id);
-
- if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) &&
- fw_rev[4] == '1' && fw_rev[6] <= '2')
- drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD |
- IDE_AFLAG_TOCADDR_AS_BCD);
- else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) &&
- fw_rev[4] == '1' && fw_rev[6] <= '2')
- drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD;
- else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD)
- /* 3 => use CD in slot 0 */
- cdi->sanyo_slot = 3;
-
- nslots = ide_cdrom_probe_capabilities(drive);
-
- blk_queue_logical_block_size(q, CD_FRAMESIZE);
-
- if (ide_cdrom_register(drive, nslots)) {
- printk(KERN_ERR PFX "%s: %s failed to register device with the"
- " cdrom driver.\n", drive->name, __func__);
- cd->devinfo.handle = NULL;
- return 1;
- }
-
- ide_proc_register_driver(drive, cd->driver);
- return 0;
-}
-
-static void ide_cd_remove(ide_drive_t *drive)
-{
- struct cdrom_info *info = drive->driver_data;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- ide_proc_unregister_driver(drive, info->driver);
- device_del(&info->dev);
- del_gendisk(info->disk);
-
- mutex_lock(&idecd_ref_mutex);
- put_device(&info->dev);
- mutex_unlock(&idecd_ref_mutex);
-}
-
-static void ide_cd_release(struct device *dev)
-{
- struct cdrom_info *info = to_ide_drv(dev, cdrom_info);
- struct cdrom_device_info *devinfo = &info->devinfo;
- ide_drive_t *drive = info->drive;
- struct gendisk *g = info->disk;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- kfree(info->toc);
- if (devinfo->handle == drive)
- unregister_cdrom(devinfo);
- drive->driver_data = NULL;
- drive->prep_rq = NULL;
- g->private_data = NULL;
- put_disk(g);
- kfree(info);
-}
-
-static int ide_cd_probe(ide_drive_t *);
-
-static struct ide_driver ide_cdrom_driver = {
- .gen_driver = {
- .owner = THIS_MODULE,
- .name = "ide-cdrom",
- .bus = &ide_bus_type,
- },
- .probe = ide_cd_probe,
- .remove = ide_cd_remove,
- .version = IDECD_VERSION,
- .do_request = ide_cd_do_request,
-#ifdef CONFIG_IDE_PROC_FS
- .proc_entries = ide_cd_proc_entries,
- .proc_devsets = ide_cd_proc_devsets,
-#endif
-};
-
-static int idecd_open(struct block_device *bdev, fmode_t mode)
-{
- struct cdrom_info *info;
- int rc = -ENXIO;
-
- if (bdev_check_media_change(bdev)) {
- info = ide_drv_g(bdev->bd_disk, cdrom_info);
-
- ide_cd_read_toc(info->drive);
- }
-
- mutex_lock(&ide_cd_mutex);
- info = ide_cd_get(bdev->bd_disk);
- if (!info)
- goto out;
-
- rc = cdrom_open(&info->devinfo, bdev, mode);
- if (rc < 0)
- ide_cd_put(info);
-out:
- mutex_unlock(&ide_cd_mutex);
- return rc;
-}
-
-static void idecd_release(struct gendisk *disk, fmode_t mode)
-{
- struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
-
- mutex_lock(&ide_cd_mutex);
- cdrom_release(&info->devinfo, mode);
-
- ide_cd_put(info);
- mutex_unlock(&ide_cd_mutex);
-}
-
-static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg)
-{
- struct packet_command cgc;
- char buffer[16];
- int stat;
- char spindown;
-
- if (copy_from_user(&spindown, (void __user *)arg, sizeof(char)))
- return -EFAULT;
-
- init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
-
- stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0);
- if (stat)
- return stat;
-
- buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f);
- return cdrom_mode_select(cdi, &cgc);
-}
-
-static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
-{
- struct packet_command cgc;
- char buffer[16];
- int stat;
- char spindown;
-
- init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
-
- stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0);
- if (stat)
- return stat;
-
- spindown = buffer[11] & 0x0f;
- if (copy_to_user((void __user *)arg, &spindown, sizeof(char)))
- return -EFAULT;
- return 0;
-}
-
-static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
- int err;
-
- switch (cmd) {
- case CDROMSETSPINDOWN:
- return idecd_set_spindown(&info->devinfo, arg);
- case CDROMGETSPINDOWN:
- return idecd_get_spindown(&info->devinfo, arg);
- default:
- break;
- }
-
- err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
- if (err == -EINVAL)
- err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, arg);
-
- return err;
-}
-
-static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ide_cd_mutex);
- ret = idecd_locked_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&ide_cd_mutex);
-
- return ret;
-}
-
-static int idecd_locked_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
- void __user *argp = compat_ptr(arg);
- int err;
-
- switch (cmd) {
- case CDROMSETSPINDOWN:
- return idecd_set_spindown(&info->devinfo, (unsigned long)argp);
- case CDROMGETSPINDOWN:
- return idecd_get_spindown(&info->devinfo, (unsigned long)argp);
- default:
- break;
- }
-
- err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
- if (err == -EINVAL)
- err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd,
- (unsigned long)argp);
-
- return err;
-}
-
-static int idecd_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ide_cd_mutex);
- ret = idecd_locked_compat_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&ide_cd_mutex);
-
- return ret;
-}
-
-static unsigned int idecd_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
- return cdrom_check_events(&info->devinfo, clearing);
-}
-
-static const struct block_device_operations idecd_ops = {
- .owner = THIS_MODULE,
- .open = idecd_open,
- .release = idecd_release,
- .ioctl = idecd_ioctl,
- .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
- idecd_compat_ioctl : NULL,
- .check_events = idecd_check_events,
-};
-
-/* module options */
-static unsigned long debug_mask;
-module_param(debug_mask, ulong, 0644);
-
-MODULE_DESCRIPTION("ATAPI CD-ROM Driver");
-
-static int ide_cd_probe(ide_drive_t *drive)
-{
- struct cdrom_info *info;
- struct gendisk *g;
-
- ide_debug_log(IDE_DBG_PROBE, "driver_req: %s, media: 0x%x",
- drive->driver_req, drive->media);
-
- if (!strstr("ide-cdrom", drive->driver_req))
- goto failed;
-
- if (drive->media != ide_cdrom && drive->media != ide_optical)
- goto failed;
-
- drive->debug_mask = debug_mask;
- drive->irq_handler = cdrom_newpc_intr;
-
- info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL);
- if (info == NULL) {
- printk(KERN_ERR PFX "%s: Can't allocate a cdrom structure\n",
- drive->name);
- goto failed;
- }
-
- g = alloc_disk(1 << PARTN_BITS);
- if (!g)
- goto out_free_cd;
-
- ide_init_disk(g, drive);
-
- info->dev.parent = &drive->gendev;
- info->dev.release = ide_cd_release;
- dev_set_name(&info->dev, "%s", dev_name(&drive->gendev));
-
- if (device_register(&info->dev))
- goto out_free_disk;
-
- info->drive = drive;
- info->driver = &ide_cdrom_driver;
- info->disk = g;
-
- g->private_data = &info->driver;
-
- drive->driver_data = info;
-
- g->minors = 1;
- g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
- if (ide_cdrom_setup(drive)) {
- put_device(&info->dev);
- goto failed;
- }
-
- ide_cd_read_toc(drive);
- g->fops = &idecd_ops;
- g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- g->events = DISK_EVENT_MEDIA_CHANGE;
- device_add_disk(&drive->gendev, g, NULL);
- return 0;
-
-out_free_disk:
- put_disk(g);
-out_free_cd:
- kfree(info);
-failed:
- return -ENODEV;
-}
-
-static void __exit ide_cdrom_exit(void)
-{
- driver_unregister(&ide_cdrom_driver.gen_driver);
-}
-
-static int __init ide_cdrom_init(void)
-{
- printk(KERN_INFO DRV_NAME " driver " IDECD_VERSION "\n");
- return driver_register(&ide_cdrom_driver.gen_driver);
-}
-
-MODULE_ALIAS("ide:*m-cdrom*");
-MODULE_ALIAS("ide-cd");
-module_init(ide_cdrom_init);
-module_exit(ide_cdrom_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
deleted file mode 100644
index a69dc7f61c4d..000000000000
--- a/drivers/ide/ide-cd.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 1996-98 Erik Andersen
- * Copyright (C) 1998-2000 Jens Axboe
- */
-#ifndef _IDE_CD_H
-#define _IDE_CD_H
-
-#include <linux/cdrom.h>
-#include <asm/byteorder.h>
-
-#define IDECD_DEBUG_LOG 0
-
-#if IDECD_DEBUG_LOG
-#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, ## args)
-#else
-#define ide_debug_log(lvl, fmt, args...) do {} while (0)
-#endif
-
-#define ATAPI_WAIT_WRITE_BUSY (10 * HZ)
-
-/************************************************************************/
-
-#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_SHIFT)
-#define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32)
-
-/* Capabilities Page size including 8 bytes of Mode Page Header */
-#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20)
-#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4
-
-/* Structure of a MSF cdrom address. */
-struct atapi_msf {
- u8 reserved;
- u8 minute;
- u8 second;
- u8 frame;
-};
-
-/* Space to hold the disk TOC. */
-#define MAX_TRACKS 99
-struct atapi_toc_header {
- unsigned short toc_length;
- u8 first_track;
- u8 last_track;
-};
-
-struct atapi_toc_entry {
- u8 reserved1;
-#if defined(__BIG_ENDIAN_BITFIELD)
- u8 adr : 4;
- u8 control : 4;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 control : 4;
- u8 adr : 4;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 track;
- u8 reserved2;
- union {
- unsigned lba;
- struct atapi_msf msf;
- } addr;
-};
-
-struct atapi_toc {
- int last_session_lba;
- int xa_flag;
- unsigned long capacity;
- struct atapi_toc_header hdr;
- struct atapi_toc_entry ent[MAX_TRACKS+1];
- /* One extra for the leadout. */
-};
-
-/* Extra per-device info for cdrom drives. */
-struct cdrom_info {
- ide_drive_t *drive;
- struct ide_driver *driver;
- struct gendisk *disk;
- struct device dev;
-
- /* Buffer for table of contents. NULL if we haven't allocated
- a TOC buffer for this device yet. */
-
- struct atapi_toc *toc;
-
- u8 max_speed; /* Max speed of the drive. */
- u8 current_speed; /* Current speed of the drive. */
-
- /* Per-device info needed by cdrom.c generic driver. */
- struct cdrom_device_info devinfo;
-
- unsigned long write_timeout;
-};
-
-/* ide-cd_verbose.c */
-void ide_cd_log_error(const char *, struct request *, struct request_sense *);
-
-/* ide-cd.c functions used by ide-cd_ioctl.c */
-int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
- unsigned *, struct scsi_sense_hdr *, int, req_flags_t);
-int ide_cd_read_toc(ide_drive_t *);
-int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
-void ide_cdrom_update_speed(ide_drive_t *, u8 *);
-int cdrom_check_status(ide_drive_t *, struct scsi_sense_hdr *);
-
-/* ide-cd_ioctl.c */
-int ide_cdrom_open_real(struct cdrom_device_info *, int);
-void ide_cdrom_release_real(struct cdrom_device_info *);
-int ide_cdrom_drive_status(struct cdrom_device_info *, int);
-unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *,
- unsigned int clearing, int slot_nr);
-int ide_cdrom_tray_move(struct cdrom_device_info *, int);
-int ide_cdrom_lock_door(struct cdrom_device_info *, int);
-int ide_cdrom_select_speed(struct cdrom_device_info *, int);
-int ide_cdrom_get_last_session(struct cdrom_device_info *,
- struct cdrom_multisession *);
-int ide_cdrom_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
-int ide_cdrom_reset(struct cdrom_device_info *cdi);
-int ide_cdrom_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
-int ide_cdrom_packet(struct cdrom_device_info *, struct packet_command *);
-
-#endif /* _IDE_CD_H */
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
deleted file mode 100644
index 011eab9c69b7..000000000000
--- a/drivers/ide/ide-cd_ioctl.c
+++ /dev/null
@@ -1,468 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * cdrom.c IOCTLs handling for ide-cd driver.
- *
- * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov>
- * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org>
- * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de>
- */
-
-#include <linux/kernel.h>
-#include <linux/cdrom.h>
-#include <linux/gfp.h>
-#include <linux/ide.h>
-#include <scsi/scsi.h>
-
-#include "ide-cd.h"
-
-/****************************************************************************
- * Other driver requests (open, close, check media change).
- */
-int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
-{
- return 0;
-}
-
-/*
- * Close down the device. Invalidate all cached blocks.
- */
-void ide_cdrom_release_real(struct cdrom_device_info *cdi)
-{
- ide_drive_t *drive = cdi->handle;
-
- if (!cdi->use_count)
- drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
-}
-
-/*
- * add logic to try GET_EVENT command first to check for media and tray
- * status. this should be supported by newer cd-r/w and all DVD etc
- * drives
- */
-int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
-{
- ide_drive_t *drive = cdi->handle;
- struct media_event_desc med;
- struct scsi_sense_hdr sshdr;
- int stat;
-
- if (slot_nr != CDSL_CURRENT)
- return -EINVAL;
-
- stat = cdrom_check_status(drive, &sshdr);
- if (!stat || sshdr.sense_key == UNIT_ATTENTION)
- return CDS_DISC_OK;
-
- if (!cdrom_get_media_event(cdi, &med)) {
- if (med.media_present)
- return CDS_DISC_OK;
- else if (med.door_open)
- return CDS_TRAY_OPEN;
- else
- return CDS_NO_DISC;
- }
-
- if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04
- && sshdr.ascq == 0x04)
- return CDS_DISC_OK;
-
- /*
- * If not using Mt Fuji extended media tray reports,
- * just return TRAY_OPEN since ATAPI doesn't provide
- * any other way to detect this...
- */
- if (sshdr.sense_key == NOT_READY) {
- if (sshdr.asc == 0x3a && sshdr.ascq == 1)
- return CDS_NO_DISC;
- else
- return CDS_TRAY_OPEN;
- }
- return CDS_DRIVE_NOT_READY;
-}
-
-/*
- * ide-cd always generates media changed event if media is missing, which
- * makes it impossible to use for proper event reporting, so
- * DISK_EVENT_FLAG_UEVENT is cleared in disk->event_flags
- * and the following function is used only to trigger
- * revalidation and never propagated to userland.
- */
-unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
- unsigned int clearing, int slot_nr)
-{
- ide_drive_t *drive = cdi->handle;
- int retval;
-
- if (slot_nr == CDSL_CURRENT) {
- (void) cdrom_check_status(drive, NULL);
- retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0;
- drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
- return retval ? DISK_EVENT_MEDIA_CHANGE : 0;
- } else {
- return 0;
- }
-}
-
-/* Eject the disk if EJECTFLAG is 0.
- If EJECTFLAG is 1, try to reload the disk. */
-static
-int cdrom_eject(ide_drive_t *drive, int ejectflag)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct cdrom_device_info *cdi = &cd->devinfo;
- char loej = 0x02;
- unsigned char cmd[BLK_MAX_CDB];
-
- if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
- return -EDRIVE_CANT_DO_THIS;
-
- /* reload fails on some drives, if the tray is locked */
- if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
- return 0;
-
- /* only tell drive to close tray if open, if it can do that */
- if (ejectflag && (cdi->mask & CDC_CLOSE_TRAY))
- loej = 0;
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_START_STOP_UNIT;
- cmd[4] = loej | (ejectflag != 0);
-
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
-}
-
-/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
-static
-int ide_cd_lockdoor(ide_drive_t *drive, int lockflag)
-{
- struct scsi_sense_hdr sshdr;
- int stat;
-
- /* If the drive cannot lock the door, just pretend. */
- if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) {
- stat = 0;
- } else {
- unsigned char cmd[BLK_MAX_CDB];
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
- cmd[4] = lockflag ? 1 : 0;
-
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
- &sshdr, 0, 0);
- }
-
- /* If we got an illegal field error, the drive
- probably cannot lock the door. */
- if (stat != 0 &&
- sshdr.sense_key == ILLEGAL_REQUEST &&
- (sshdr.asc == 0x24 || sshdr.asc == 0x20)) {
- printk(KERN_ERR "%s: door locking not supported\n",
- drive->name);
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
- stat = 0;
- }
-
- /* no medium, that's alright. */
- if (stat != 0 && sshdr.sense_key == NOT_READY && sshdr.asc == 0x3a)
- stat = 0;
-
- if (stat == 0) {
- if (lockflag)
- drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
- else
- drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
- }
-
- return stat;
-}
-
-int ide_cdrom_tray_move(struct cdrom_device_info *cdi, int position)
-{
- ide_drive_t *drive = cdi->handle;
-
- if (position) {
- int stat = ide_cd_lockdoor(drive, 0);
-
- if (stat)
- return stat;
- }
-
- return cdrom_eject(drive, !position);
-}
-
-int ide_cdrom_lock_door(struct cdrom_device_info *cdi, int lock)
-{
- ide_drive_t *drive = cdi->handle;
-
- return ide_cd_lockdoor(drive, lock);
-}
-
-/*
- * ATAPI devices are free to select the speed you request or any slower
- * rate. :-( Requesting too fast a speed will _not_ produce an error.
- */
-int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
-{
- ide_drive_t *drive = cdi->handle;
- struct cdrom_info *cd = drive->driver_data;
- u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
- int stat;
- unsigned char cmd[BLK_MAX_CDB];
-
- if (speed == 0)
- speed = 0xffff; /* set to max */
- else
- speed *= 177; /* Nx to kbytes/s */
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_SET_SPEED;
- /* Read Drive speed in kbytes/second MSB/LSB */
- cmd[2] = (speed >> 8) & 0xff;
- cmd[3] = speed & 0xff;
- if ((cdi->mask & (CDC_CD_R | CDC_CD_RW | CDC_DVD_R)) !=
- (CDC_CD_R | CDC_CD_RW | CDC_DVD_R)) {
- /* Write Drive speed in kbytes/second MSB/LSB */
- cmd[4] = (speed >> 8) & 0xff;
- cmd[5] = speed & 0xff;
- }
-
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
-
- if (!ide_cdrom_get_capabilities(drive, buf)) {
- ide_cdrom_update_speed(drive, buf);
- cdi->speed = cd->current_speed;
- }
-
- return 0;
-}
-
-int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
- struct cdrom_multisession *ms_info)
-{
- struct atapi_toc *toc;
- ide_drive_t *drive = cdi->handle;
- struct cdrom_info *info = drive->driver_data;
- int ret;
-
- if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
- ret = ide_cd_read_toc(drive);
- if (ret)
- return ret;
- }
-
- toc = info->toc;
- ms_info->addr.lba = toc->last_session_lba;
- ms_info->xa_flag = toc->xa_flag;
-
- return 0;
-}
-
-int ide_cdrom_get_mcn(struct cdrom_device_info *cdi,
- struct cdrom_mcn *mcn_info)
-{
- ide_drive_t *drive = cdi->handle;
- int stat, mcnlen;
- char buf[24];
- unsigned char cmd[BLK_MAX_CDB];
- unsigned len = sizeof(buf);
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_READ_SUBCHANNEL;
- cmd[1] = 2; /* MSF addressing */
- cmd[2] = 0x40; /* request subQ data */
- cmd[3] = 2; /* format */
- cmd[8] = len;
-
- stat = ide_cd_queue_pc(drive, cmd, 0, buf, &len, NULL, 0, 0);
- if (stat)
- return stat;
-
- mcnlen = sizeof(mcn_info->medium_catalog_number) - 1;
- memcpy(mcn_info->medium_catalog_number, buf + 9, mcnlen);
- mcn_info->medium_catalog_number[mcnlen] = '\0';
-
- return 0;
-}
-
-int ide_cdrom_reset(struct cdrom_device_info *cdi)
-{
- ide_drive_t *drive = cdi->handle;
- struct cdrom_info *cd = drive->driver_data;
- struct request *rq;
- int ret;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- rq->rq_flags = RQF_QUIET;
- blk_execute_rq(cd->disk, rq, 0);
- ret = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
- /*
- * A reset will unlock the door. If it was previously locked,
- * lock it again.
- */
- if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
- (void)ide_cd_lockdoor(drive, 1);
-
- return ret;
-}
-
-static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
- struct atapi_toc_entry **ent)
-{
- struct cdrom_info *info = drive->driver_data;
- struct atapi_toc *toc = info->toc;
- int ntracks;
-
- /*
- * don't serve cached data, if the toc isn't valid
- */
- if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
- return -EINVAL;
-
- /* Check validity of requested track number. */
- ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
-
- if (toc->hdr.first_track == CDROM_LEADOUT)
- ntracks = 0;
-
- if (track == CDROM_LEADOUT)
- *ent = &toc->ent[ntracks];
- else if (track < toc->hdr.first_track || track > toc->hdr.last_track)
- return -EINVAL;
- else
- *ent = &toc->ent[track - toc->hdr.first_track];
-
- return 0;
-}
-
-static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
-{
- struct cdrom_ti *ti = arg;
- struct atapi_toc_entry *first_toc, *last_toc;
- unsigned long lba_start, lba_end;
- int stat;
- unsigned char cmd[BLK_MAX_CDB];
-
- stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
- if (stat)
- return stat;
-
- stat = ide_cd_get_toc_entry(drive, ti->cdti_trk1, &last_toc);
- if (stat)
- return stat;
-
- if (ti->cdti_trk1 != CDROM_LEADOUT)
- ++last_toc;
- lba_start = first_toc->addr.lba;
- lba_end = last_toc->addr.lba;
-
- if (lba_end <= lba_start)
- return -EINVAL;
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_PLAY_AUDIO_MSF;
- lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
- lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
-
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
-}
-
-static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct cdrom_tochdr *tochdr = arg;
- struct atapi_toc *toc;
- int stat;
-
- /* Make sure our saved TOC is valid. */
- stat = ide_cd_read_toc(drive);
- if (stat)
- return stat;
-
- toc = cd->toc;
- tochdr->cdth_trk0 = toc->hdr.first_track;
- tochdr->cdth_trk1 = toc->hdr.last_track;
-
- return 0;
-}
-
-static int ide_cd_read_tocentry(ide_drive_t *drive, void *arg)
-{
- struct cdrom_tocentry *tocentry = arg;
- struct atapi_toc_entry *toce;
- int stat;
-
- stat = ide_cd_get_toc_entry(drive, tocentry->cdte_track, &toce);
- if (stat)
- return stat;
-
- tocentry->cdte_ctrl = toce->control;
- tocentry->cdte_adr = toce->adr;
- if (tocentry->cdte_format == CDROM_MSF) {
- lba_to_msf(toce->addr.lba,
- &tocentry->cdte_addr.msf.minute,
- &tocentry->cdte_addr.msf.second,
- &tocentry->cdte_addr.msf.frame);
- } else
- tocentry->cdte_addr.lba = toce->addr.lba;
-
- return 0;
-}
-
-int ide_cdrom_audio_ioctl(struct cdrom_device_info *cdi,
- unsigned int cmd, void *arg)
-{
- ide_drive_t *drive = cdi->handle;
-
- switch (cmd) {
- /*
- * emulate PLAY_AUDIO_TI command with PLAY_AUDIO_10, since
- * atapi doesn't support it
- */
- case CDROMPLAYTRKIND:
- return ide_cd_fake_play_trkind(drive, arg);
- case CDROMREADTOCHDR:
- return ide_cd_read_tochdr(drive, arg);
- case CDROMREADTOCENTRY:
- return ide_cd_read_tocentry(drive, arg);
- default:
- return -EINVAL;
- }
-}
-
-/* the generic packet interface to cdrom.c */
-int ide_cdrom_packet(struct cdrom_device_info *cdi,
- struct packet_command *cgc)
-{
- ide_drive_t *drive = cdi->handle;
- req_flags_t flags = 0;
- unsigned len = cgc->buflen;
-
- if (cgc->timeout <= 0)
- cgc->timeout = ATAPI_WAIT_PC;
-
- /* here we queue the commands from the uniform CD-ROM
- layer. the packet must be complete, as we do not
- touch it at all. */
-
- if (cgc->sshdr)
- memset(cgc->sshdr, 0, sizeof(*cgc->sshdr));
-
- if (cgc->quiet)
- flags |= RQF_QUIET;
-
- cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
- cgc->data_direction == CGC_DATA_WRITE,
- cgc->buffer, &len,
- cgc->sshdr, cgc->timeout, flags);
- if (!cgc->stat)
- cgc->buflen -= len;
- return cgc->stat;
-}
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
deleted file mode 100644
index 5ecd5b2f03a3..000000000000
--- a/drivers/ide/ide-cd_verbose.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Verbose error logging for ATAPI CD/DVD devices.
- *
- * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov>
- * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org>
- * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de>
- */
-
-#include <linux/kernel.h>
-#include <linux/blkdev.h>
-#include <linux/cdrom.h>
-#include <linux/ide.h>
-#include <scsi/scsi.h>
-#include "ide-cd.h"
-
-#ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS
-void ide_cd_log_error(const char *name, struct request *failed_command,
- struct request_sense *sense)
-{
- /* Suppress printing unit attention and `in progress of becoming ready'
- errors when we're not being verbose. */
- if (sense->sense_key == UNIT_ATTENTION ||
- (sense->sense_key == NOT_READY && (sense->asc == 4 ||
- sense->asc == 0x3a)))
- return;
-
- printk(KERN_ERR "%s: error code: 0x%02x sense_key: 0x%02x "
- "asc: 0x%02x ascq: 0x%02x\n",
- name, sense->error_code, sense->sense_key,
- sense->asc, sense->ascq);
-}
-#else
-/* The generic packet command opcodes for CD/DVD Logical Units,
- * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */
-static const struct {
- unsigned short packet_command;
- const char * const text;
-} packet_command_texts[] = {
- { GPCMD_TEST_UNIT_READY, "Test Unit Ready" },
- { GPCMD_REQUEST_SENSE, "Request Sense" },
- { GPCMD_FORMAT_UNIT, "Format Unit" },
- { GPCMD_INQUIRY, "Inquiry" },
- { GPCMD_START_STOP_UNIT, "Start/Stop Unit" },
- { GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, "Prevent/Allow Medium Removal" },
- { GPCMD_READ_FORMAT_CAPACITIES, "Read Format Capacities" },
- { GPCMD_READ_CDVD_CAPACITY, "Read Cd/Dvd Capacity" },
- { GPCMD_READ_10, "Read 10" },
- { GPCMD_WRITE_10, "Write 10" },
- { GPCMD_SEEK, "Seek" },
- { GPCMD_WRITE_AND_VERIFY_10, "Write and Verify 10" },
- { GPCMD_VERIFY_10, "Verify 10" },
- { GPCMD_FLUSH_CACHE, "Flush Cache" },
- { GPCMD_READ_SUBCHANNEL, "Read Subchannel" },
- { GPCMD_READ_TOC_PMA_ATIP, "Read Table of Contents" },
- { GPCMD_READ_HEADER, "Read Header" },
- { GPCMD_PLAY_AUDIO_10, "Play Audio 10" },
- { GPCMD_GET_CONFIGURATION, "Get Configuration" },
- { GPCMD_PLAY_AUDIO_MSF, "Play Audio MSF" },
- { GPCMD_PLAYAUDIO_TI, "Play Audio TrackIndex" },
- { GPCMD_GET_EVENT_STATUS_NOTIFICATION,
- "Get Event Status Notification" },
- { GPCMD_PAUSE_RESUME, "Pause/Resume" },
- { GPCMD_STOP_PLAY_SCAN, "Stop Play/Scan" },
- { GPCMD_READ_DISC_INFO, "Read Disc Info" },
- { GPCMD_READ_TRACK_RZONE_INFO, "Read Track Rzone Info" },
- { GPCMD_RESERVE_RZONE_TRACK, "Reserve Rzone Track" },
- { GPCMD_SEND_OPC, "Send OPC" },
- { GPCMD_MODE_SELECT_10, "Mode Select 10" },
- { GPCMD_REPAIR_RZONE_TRACK, "Repair Rzone Track" },
- { GPCMD_MODE_SENSE_10, "Mode Sense 10" },
- { GPCMD_CLOSE_TRACK, "Close Track" },
- { GPCMD_BLANK, "Blank" },
- { GPCMD_SEND_EVENT, "Send Event" },
- { GPCMD_SEND_KEY, "Send Key" },
- { GPCMD_REPORT_KEY, "Report Key" },
- { GPCMD_LOAD_UNLOAD, "Load/Unload" },
- { GPCMD_SET_READ_AHEAD, "Set Read-ahead" },
- { GPCMD_READ_12, "Read 12" },
- { GPCMD_GET_PERFORMANCE, "Get Performance" },
- { GPCMD_SEND_DVD_STRUCTURE, "Send DVD Structure" },
- { GPCMD_READ_DVD_STRUCTURE, "Read DVD Structure" },
- { GPCMD_SET_STREAMING, "Set Streaming" },
- { GPCMD_READ_CD_MSF, "Read CD MSF" },
- { GPCMD_SCAN, "Scan" },
- { GPCMD_SET_SPEED, "Set Speed" },
- { GPCMD_PLAY_CD, "Play CD" },
- { GPCMD_MECHANISM_STATUS, "Mechanism Status" },
- { GPCMD_READ_CD, "Read CD" },
-};
-
-/* From Table 303 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */
-static const char * const sense_key_texts[16] = {
- "No sense data",
- "Recovered error",
- "Not ready",
- "Medium error",
- "Hardware error",
- "Illegal request",
- "Unit attention",
- "Data protect",
- "Blank check",
- "(reserved)",
- "(reserved)",
- "Aborted command",
- "(reserved)",
- "(reserved)",
- "Miscompare",
- "(reserved)",
-};
-
-/* From Table 304 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */
-static const struct {
- unsigned long asc_ascq;
- const char * const text;
-} sense_data_texts[] = {
- { 0x000000, "No additional sense information" },
- { 0x000011, "Play operation in progress" },
- { 0x000012, "Play operation paused" },
- { 0x000013, "Play operation successfully completed" },
- { 0x000014, "Play operation stopped due to error" },
- { 0x000015, "No current audio status to return" },
- { 0x010c0a, "Write error - padding blocks added" },
- { 0x011700, "Recovered data with no error correction applied" },
- { 0x011701, "Recovered data with retries" },
- { 0x011702, "Recovered data with positive head offset" },
- { 0x011703, "Recovered data with negative head offset" },
- { 0x011704, "Recovered data with retries and/or CIRC applied" },
- { 0x011705, "Recovered data using previous sector ID" },
- { 0x011800, "Recovered data with error correction applied" },
- { 0x011801, "Recovered data with error correction and retries applied"},
- { 0x011802, "Recovered data - the data was auto-reallocated" },
- { 0x011803, "Recovered data with CIRC" },
- { 0x011804, "Recovered data with L-EC" },
- { 0x015d00, "Failure prediction threshold exceeded"
- " - Predicted logical unit failure" },
- { 0x015d01, "Failure prediction threshold exceeded"
- " - Predicted media failure" },
- { 0x015dff, "Failure prediction threshold exceeded - False" },
- { 0x017301, "Power calibration area almost full" },
- { 0x020400, "Logical unit not ready - cause not reportable" },
- /* Following is misspelled in ATAPI 2.6, _and_ in Mt. Fuji */
- { 0x020401, "Logical unit not ready"
- " - in progress [sic] of becoming ready" },
- { 0x020402, "Logical unit not ready - initializing command required" },
- { 0x020403, "Logical unit not ready - manual intervention required" },
- { 0x020404, "Logical unit not ready - format in progress" },
- { 0x020407, "Logical unit not ready - operation in progress" },
- { 0x020408, "Logical unit not ready - long write in progress" },
- { 0x020600, "No reference position found (media may be upside down)" },
- { 0x023000, "Incompatible medium installed" },
- { 0x023a00, "Medium not present" },
- { 0x025300, "Media load or eject failed" },
- { 0x025700, "Unable to recover table of contents" },
- { 0x030300, "Peripheral device write fault" },
- { 0x030301, "No write current" },
- { 0x030302, "Excessive write errors" },
- { 0x030c00, "Write error" },
- { 0x030c01, "Write error - Recovered with auto reallocation" },
- { 0x030c02, "Write error - auto reallocation failed" },
- { 0x030c03, "Write error - recommend reassignment" },
- { 0x030c04, "Compression check miscompare error" },
- { 0x030c05, "Data expansion occurred during compress" },
- { 0x030c06, "Block not compressible" },
- { 0x030c07, "Write error - recovery needed" },
- { 0x030c08, "Write error - recovery failed" },
- { 0x030c09, "Write error - loss of streaming" },
- { 0x031100, "Unrecovered read error" },
- { 0x031106, "CIRC unrecovered error" },
- { 0x033101, "Format command failed" },
- { 0x033200, "No defect spare location available" },
- { 0x033201, "Defect list update failure" },
- { 0x035100, "Erase failure" },
- { 0x037200, "Session fixation error" },
- { 0x037201, "Session fixation error writin lead-in" },
- { 0x037202, "Session fixation error writin lead-out" },
- { 0x037300, "CD control error" },
- { 0x037302, "Power calibration area is full" },
- { 0x037303, "Power calibration area error" },
- { 0x037304, "Program memory area / RMA update failure" },
- { 0x037305, "Program memory area / RMA is full" },
- { 0x037306, "Program memory area / RMA is (almost) full" },
- { 0x040200, "No seek complete" },
- { 0x040300, "Write fault" },
- { 0x040900, "Track following error" },
- { 0x040901, "Tracking servo failure" },
- { 0x040902, "Focus servo failure" },
- { 0x040903, "Spindle servo failure" },
- { 0x041500, "Random positioning error" },
- { 0x041501, "Mechanical positioning or changer error" },
- { 0x041502, "Positioning error detected by read of medium" },
- { 0x043c00, "Mechanical positioning or changer error" },
- { 0x044000, "Diagnostic failure on component (ASCQ)" },
- { 0x044400, "Internal CD/DVD logical unit failure" },
- { 0x04b600, "Media load mechanism failed" },
- { 0x051a00, "Parameter list length error" },
- { 0x052000, "Invalid command operation code" },
- { 0x052100, "Logical block address out of range" },
- { 0x052102, "Invalid address for write" },
- { 0x052400, "Invalid field in command packet" },
- { 0x052600, "Invalid field in parameter list" },
- { 0x052601, "Parameter not supported" },
- { 0x052602, "Parameter value invalid" },
- { 0x052700, "Write protected media" },
- { 0x052c00, "Command sequence error" },
- { 0x052c03, "Current program area is not empty" },
- { 0x052c04, "Current program area is empty" },
- { 0x053001, "Cannot read medium - unknown format" },
- { 0x053002, "Cannot read medium - incompatible format" },
- { 0x053900, "Saving parameters not supported" },
- { 0x054e00, "Overlapped commands attempted" },
- { 0x055302, "Medium removal prevented" },
- { 0x055500, "System resource failure" },
- { 0x056300, "End of user area encountered on this track" },
- { 0x056400, "Illegal mode for this track or incompatible medium" },
- { 0x056f00, "Copy protection key exchange failure"
- " - Authentication failure" },
- { 0x056f01, "Copy protection key exchange failure - Key not present" },
- { 0x056f02, "Copy protection key exchange failure"
- " - Key not established" },
- { 0x056f03, "Read of scrambled sector without authentication" },
- { 0x056f04, "Media region code is mismatched to logical unit" },
- { 0x056f05, "Drive region must be permanent"
- " / region reset count error" },
- { 0x057203, "Session fixation error - incomplete track in session" },
- { 0x057204, "Empty or partially written reserved track" },
- { 0x057205, "No more RZONE reservations are allowed" },
- { 0x05bf00, "Loss of streaming" },
- { 0x062800, "Not ready to ready transition, medium may have changed" },
- { 0x062900, "Power on, reset or hardware reset occurred" },
- { 0x062a00, "Parameters changed" },
- { 0x062a01, "Mode parameters changed" },
- { 0x062e00, "Insufficient time for operation" },
- { 0x063f00, "Logical unit operating conditions have changed" },
- { 0x063f01, "Microcode has been changed" },
- { 0x065a00, "Operator request or state change input (unspecified)" },
- { 0x065a01, "Operator medium removal request" },
- { 0x0bb900, "Play operation aborted" },
- /* Here we use 0xff for the key (not a valid key) to signify
- * that these can have _any_ key value associated with them... */
- { 0xff0401, "Logical unit is in process of becoming ready" },
- { 0xff0400, "Logical unit not ready, cause not reportable" },
- { 0xff0402, "Logical unit not ready, initializing command required" },
- { 0xff0403, "Logical unit not ready, manual intervention required" },
- { 0xff0500, "Logical unit does not respond to selection" },
- { 0xff0800, "Logical unit communication failure" },
- { 0xff0802, "Logical unit communication parity error" },
- { 0xff0801, "Logical unit communication time-out" },
- { 0xff2500, "Logical unit not supported" },
- { 0xff4c00, "Logical unit failed self-configuration" },
- { 0xff3e00, "Logical unit has not self-configured yet" },
-};
-
-void ide_cd_log_error(const char *name, struct request *failed_command,
- struct request_sense *sense)
-{
- int i;
- const char *s = "bad sense key!";
- char buf[80];
-
- printk(KERN_ERR "ATAPI device %s:\n", name);
- if (sense->error_code == 0x70)
- printk(KERN_CONT " Error: ");
- else if (sense->error_code == 0x71)
- printk(" Deferred Error: ");
- else if (sense->error_code == 0x7f)
- printk(KERN_CONT " Vendor-specific Error: ");
- else
- printk(KERN_CONT " Unknown Error Type: ");
-
- if (sense->sense_key < ARRAY_SIZE(sense_key_texts))
- s = sense_key_texts[sense->sense_key];
-
- printk(KERN_CONT "%s -- (Sense key=0x%02x)\n", s, sense->sense_key);
-
- if (sense->asc == 0x40) {
- sprintf(buf, "Diagnostic failure on component 0x%02x",
- sense->ascq);
- s = buf;
- } else {
- int lo = 0, mid, hi = ARRAY_SIZE(sense_data_texts);
- unsigned long key = (sense->sense_key << 16);
-
- key |= (sense->asc << 8);
- if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd))
- key |= sense->ascq;
- s = NULL;
-
- while (hi > lo) {
- mid = (lo + hi) / 2;
- if (sense_data_texts[mid].asc_ascq == key ||
- sense_data_texts[mid].asc_ascq == (0xff0000|key)) {
- s = sense_data_texts[mid].text;
- break;
- } else if (sense_data_texts[mid].asc_ascq > key)
- hi = mid;
- else
- lo = mid + 1;
- }
- }
-
- if (s == NULL) {
- if (sense->asc > 0x80)
- s = "(vendor-specific error)";
- else
- s = "(reserved error code)";
- }
-
- printk(KERN_ERR " %s -- (asc=0x%02x, ascq=0x%02x)\n",
- s, sense->asc, sense->ascq);
-
- if (failed_command != NULL) {
- int lo = 0, mid, hi = ARRAY_SIZE(packet_command_texts);
- s = NULL;
-
- while (hi > lo) {
- mid = (lo + hi) / 2;
- if (packet_command_texts[mid].packet_command ==
- scsi_req(failed_command)->cmd[0]) {
- s = packet_command_texts[mid].text;
- break;
- }
- if (packet_command_texts[mid].packet_command >
- scsi_req(failed_command)->cmd[0])
- hi = mid;
- else
- lo = mid + 1;
- }
-
- printk(KERN_ERR " The failed \"%s\" packet command "
- "was: \n \"", s);
- for (i = 0; i < BLK_MAX_CDB; i++)
- printk(KERN_CONT "%02x ", scsi_req(failed_command)->cmd[i]);
- printk(KERN_CONT "\"\n");
- }
-
- /* The SKSV bit specifies validity of the sense_key_specific
- * in the next two commands. It is bit 7 of the first byte.
- * In the case of NOT_READY, if SKSV is set the drive can
- * give us nice ETA readings.
- */
- if (sense->sense_key == NOT_READY && (sense->sks[0] & 0x80)) {
- int progress = (sense->sks[1] << 8 | sense->sks[2]) * 100;
-
- printk(KERN_ERR " Command is %02d%% complete\n",
- progress / 0xffff);
- }
-
- if (sense->sense_key == ILLEGAL_REQUEST &&
- (sense->sks[0] & 0x80) != 0) {
- printk(KERN_ERR " Error in %s byte %d",
- (sense->sks[0] & 0x40) != 0 ?
- "command packet" : "command data",
- (sense->sks[1] << 8) + sense->sks[2]);
-
- if ((sense->sks[0] & 0x40) != 0)
- printk(KERN_CONT " bit %d", sense->sks[0] & 0x07);
-
- printk(KERN_CONT "\n");
- }
-}
-#endif
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
deleted file mode 100644
index f1e922e2479a..000000000000
--- a/drivers/ide/ide-cs.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*======================================================================
-
- A driver for PCMCIA IDE/ATA disk cards
-
- The contents of this file are subject to the Mozilla Public
- License Version 1.1 (the "License"); you may not use this file
- except in compliance with the License. You may obtain a copy of
- the License at http://www.mozilla.org/MPL/
-
- Software distributed under the License is distributed on an "AS
- IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- implied. See the License for the specific language governing
- rights and limitations under the License.
-
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
- Alternatively, the contents of this file may be used under the
- terms of the GNU General Public License version 2 (the "GPL"), in
- which case the provisions of the GPL are applicable instead of the
- above. If you wish to allow the use of your version of this file
- only under the terms of the GPL and not to allow others to use
- your version of this file under the MPL, indicate your decision
- by deleting the provisions above and replace them with the notice
- and other provisions required by the GPL. If you do not delete
- the provisions above, a recipient may use your version of this
- file under either the MPL or the GPL.
-
-======================================================================*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/ide.h>
-#include <linux/major.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ciscode.h>
-
-#define DRV_NAME "ide-cs"
-
-/*====================================================================*/
-
-/* Module parameters */
-
-MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
-MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver");
-MODULE_LICENSE("Dual MPL/GPL");
-
-/*====================================================================*/
-
-typedef struct ide_info_t {
- struct pcmcia_device *p_dev;
- struct ide_host *host;
- int ndev;
-} ide_info_t;
-
-static void ide_release(struct pcmcia_device *);
-static int ide_config(struct pcmcia_device *);
-
-static void ide_detach(struct pcmcia_device *p_dev);
-
-static int ide_probe(struct pcmcia_device *link)
-{
- ide_info_t *info;
-
- dev_dbg(&link->dev, "ide_attach()\n");
-
- /* Create new ide device */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->p_dev = link;
- link->priv = info;
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO |
- CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
-
- return ide_config(link);
-} /* ide_attach */
-
-static void ide_detach(struct pcmcia_device *link)
-{
- ide_info_t *info = link->priv;
-
- dev_dbg(&link->dev, "ide_detach(0x%p)\n", link);
-
- ide_release(link);
-
- kfree(info);
-} /* ide_detach */
-
-static const struct ide_port_ops idecs_port_ops = {
- .quirkproc = ide_undecoded_slave,
-};
-
-static const struct ide_port_info idecs_port_info = {
- .port_ops = &idecs_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_pci,
-};
-
-static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
- unsigned long irq, struct pcmcia_device *handle)
-{
- struct ide_host *host;
- ide_hwif_t *hwif;
- int i, rc;
- struct ide_hw hw, *hws[] = { &hw };
-
- if (!request_region(io, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- DRV_NAME, io, io + 7);
- return NULL;
- }
-
- if (!request_region(ctl, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- DRV_NAME, ctl);
- release_region(io, 8);
- return NULL;
- }
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, io, ctl);
- hw.irq = irq;
- hw.dev = &handle->dev;
-
- rc = ide_host_add(&idecs_port_info, hws, 1, &host);
- if (rc)
- goto out_release;
-
- hwif = host->ports[0];
-
- if (hwif->present)
- return host;
-
- /* retry registration in case device is still spinning up */
- for (i = 0; i < 10; i++) {
- msleep(100);
- ide_port_scan(hwif);
- if (hwif->present)
- return host;
- }
-
- return host;
-
-out_release:
- release_region(ctl, 1);
- release_region(io, 8);
- return NULL;
-}
-
-static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
-{
- int *is_kme = priv_data;
-
- if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
- != IO_DATA_PATH_WIDTH_8) {
- pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
- pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
- }
- pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
- pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
-
- if (pdev->resource[1]->end) {
- pdev->resource[0]->end = 8;
- pdev->resource[1]->end = (*is_kme) ? 2 : 1;
- } else {
- if (pdev->resource[0]->end < 16)
- return -ENODEV;
- }
-
- return pcmcia_request_io(pdev);
-}
-
-static int ide_config(struct pcmcia_device *link)
-{
- ide_info_t *info = link->priv;
- int ret = 0, is_kme = 0;
- unsigned long io_base, ctl_base;
- struct ide_host *host;
-
- dev_dbg(&link->dev, "ide_config(0x%p)\n", link);
-
- is_kme = ((link->manf_id == MANFID_KME) &&
- ((link->card_id == PRODID_KME_KXLC005_A) ||
- (link->card_id == PRODID_KME_KXLC005_B)));
-
- if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme)) {
- link->config_flags &= ~CONF_AUTO_CHECK_VCC;
- if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme))
- goto failed; /* No suitable config found */
- }
- io_base = link->resource[0]->start;
- if (link->resource[1]->end)
- ctl_base = link->resource[1]->start;
- else
- ctl_base = link->resource[0]->start + 0x0e;
-
- if (!link->irq)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- /* disable drive interrupts during IDE probe */
- outb(0x02, ctl_base);
-
- /* special setup for KXLC005 card */
- if (is_kme)
- outb(0x81, ctl_base+1);
-
- host = idecs_register(io_base, ctl_base, link->irq, link);
- if (host == NULL && resource_size(link->resource[0]) == 0x20) {
- outb(0x02, ctl_base + 0x10);
- host = idecs_register(io_base + 0x10, ctl_base + 0x10,
- link->irq, link);
- }
-
- if (host == NULL)
- goto failed;
-
- info->ndev = 1;
- info->host = host;
- dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n",
- 'a' + host->ports[0]->index * 2,
- link->vpp / 10, link->vpp % 10);
-
- return 0;
-
-failed:
- ide_release(link);
- return -ENODEV;
-} /* ide_config */
-
-static void ide_release(struct pcmcia_device *link)
-{
- ide_info_t *info = link->priv;
- struct ide_host *host = info->host;
-
- dev_dbg(&link->dev, "ide_release(0x%p)\n", link);
-
- if (info->ndev) {
- ide_hwif_t *hwif = host->ports[0];
- unsigned long data_addr, ctl_addr;
-
- data_addr = hwif->io_ports.data_addr;
- ctl_addr = hwif->io_ports.ctl_addr;
-
- ide_host_remove(host);
- info->ndev = 0;
-
- release_region(ctl_addr, 1);
- release_region(data_addr, 8);
- }
-
- pcmcia_disable_device(link);
-} /* ide_release */
-
-
-static const struct pcmcia_device_id ide_ids[] = {
- PCMCIA_DEVICE_FUNC_ID(4),
- PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */
- PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
- PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
- PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
- PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
- PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
- PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
- PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */
- PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */
- PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
- PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
- PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
- PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
- PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
- PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
- PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
- PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
- PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
- PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
- PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
- PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
- PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
- PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
- PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
- PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
- PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
- PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
- PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
- PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
- PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
- PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb),
- PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
- PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
- PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
- PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
- PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
- PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
- PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee),
- PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
- PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
- PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
- PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
- PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
- PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
- PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
- PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
- PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
- PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
- PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
- PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
- PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
- PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
- PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, ide_ids);
-
-static struct pcmcia_driver ide_cs_driver = {
- .owner = THIS_MODULE,
- .name = "ide-cs",
- .probe = ide_probe,
- .remove = ide_detach,
- .id_table = ide_ids,
-};
-
-static int __init init_ide_cs(void)
-{
- return pcmcia_register_driver(&ide_cs_driver);
-}
-
-static void __exit exit_ide_cs(void)
-{
- pcmcia_unregister_driver(&ide_cs_driver);
-}
-
-late_initcall(init_ide_cs);
-module_exit(exit_ide_cs);
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
deleted file mode 100644
index ca1d4b3d3878..000000000000
--- a/drivers/ide/ide-devsets.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/ide.h>
-
-DEFINE_MUTEX(ide_setting_mtx);
-
-ide_devset_get(io_32bit, io_32bit);
-
-static int set_io_32bit(ide_drive_t *drive, int arg)
-{
- if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT)
- return -EPERM;
-
- if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1))
- return -EINVAL;
-
- drive->io_32bit = arg;
-
- return 0;
-}
-
-ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS);
-
-static int set_ksettings(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS;
- else
- drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS;
-
- return 0;
-}
-
-ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA);
-
-static int set_using_dma(ide_drive_t *drive, int arg)
-{
-#ifdef CONFIG_BLK_DEV_IDEDMA
- int err = -EPERM;
-
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (ata_id_has_dma(drive->id) == 0)
- goto out;
-
- if (drive->hwif->dma_ops == NULL)
- goto out;
-
- err = 0;
-
- if (arg) {
- if (ide_set_dma(drive))
- err = -EIO;
- } else
- ide_dma_off(drive);
-
-out:
- return err;
-#else
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- return -EPERM;
-#endif
-}
-
-/*
- * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away
- */
-static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
-{
- switch (req_pio) {
- case 202:
- case 201:
- case 200:
- case 102:
- case 101:
- case 100:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0;
- case 9:
- case 8:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0;
- case 7:
- case 6:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0;
- default:
- return 0;
- }
-}
-
-static int set_pio_mode(ide_drive_t *drive, int arg)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (arg < 0 || arg > 255)
- return -EINVAL;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return -ENOSYS;
-
- if (set_pio_mode_abuse(drive->hwif, arg)) {
- drive->pio_mode = arg + XFER_PIO_0;
-
- if (arg == 8 || arg == 9) {
- unsigned long flags;
-
- /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
- spin_lock_irqsave(&hwif->lock, flags);
- port_ops->set_pio_mode(hwif, drive);
- spin_unlock_irqrestore(&hwif->lock, flags);
- } else
- port_ops->set_pio_mode(hwif, drive);
- } else {
- int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
-
- ide_set_pio(drive, arg);
-
- if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
- if (keep_dma)
- ide_dma_on(drive);
- }
- }
-
- return 0;
-}
-
-ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK);
-
-static int set_unmaskirq(ide_drive_t *drive, int arg)
-{
- if (drive->dev_flags & IDE_DFLAG_NO_UNMASK)
- return -EPERM;
-
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_UNMASK;
- else
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
-
- return 0;
-}
-
-ide_ext_devset_rw_sync(io_32bit, io_32bit);
-ide_ext_devset_rw_sync(keepsettings, ksettings);
-ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
-ide_ext_devset_rw_sync(using_dma, using_dma);
-__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
-
-int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
- int arg)
-{
- struct request_queue *q = drive->queue;
- struct request *rq;
- int ret = 0;
-
- if (!(setting->flags & DS_SYNC))
- return setting->set(drive, arg);
-
- rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- scsi_req(rq)->cmd_len = 5;
- scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
- *(int *)&scsi_req(rq)->cmd[1] = arg;
- ide_req(rq)->special = setting->set;
-
- blk_execute_rq(NULL, rq, 0);
- ret = scsi_req(rq)->result;
- blk_put_request(rq);
-
- return ret;
-}
-
-ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
-{
- int err, (*setfunc)(ide_drive_t *, int) = ide_req(rq)->special;
-
- err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
- if (err)
- scsi_req(rq)->result = err;
- ide_complete_rq(drive, 0, blk_rq_bytes(rq));
- return ide_stopped;
-}
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
deleted file mode 100644
index 8413731c6259..000000000000
--- a/drivers/ide/ide-disk.c
+++ /dev/null
@@ -1,795 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
- * Copyright (C) 1998-2002 Linux ATA Development
- * Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
- */
-
-/*
- * Mostly written by Mark Lord <mlord@pobox.com>
- * and Gadi Oxman <gadio@netvision.net.il>
- * and Andre Hedrick <andre@linux-ide.org>
- *
- * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/leds.h>
-#include <linux/ide.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/div64.h>
-
-#include "ide-disk.h"
-
-static const u8 ide_rw_cmds[] = {
- ATA_CMD_READ_MULTI,
- ATA_CMD_WRITE_MULTI,
- ATA_CMD_READ_MULTI_EXT,
- ATA_CMD_WRITE_MULTI_EXT,
- ATA_CMD_PIO_READ,
- ATA_CMD_PIO_WRITE,
- ATA_CMD_PIO_READ_EXT,
- ATA_CMD_PIO_WRITE_EXT,
- ATA_CMD_READ,
- ATA_CMD_WRITE,
- ATA_CMD_READ_EXT,
- ATA_CMD_WRITE_EXT,
-};
-
-static void ide_tf_set_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 dma)
-{
- u8 index, lba48, write;
-
- lba48 = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
- write = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
-
- if (dma) {
- cmd->protocol = ATA_PROT_DMA;
- index = 8;
- } else {
- cmd->protocol = ATA_PROT_PIO;
- if (drive->mult_count) {
- cmd->tf_flags |= IDE_TFLAG_MULTI_PIO;
- index = 0;
- } else
- index = 4;
- }
-
- cmd->tf.command = ide_rw_cmds[index + lba48 + write];
-}
-
-/*
- * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
- * using LBA if supported, or CHS otherwise, to address sectors.
- */
-static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
- sector_t block)
-{
- ide_hwif_t *hwif = drive->hwif;
- u16 nsectors = (u16)blk_rq_sectors(rq);
- u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
- u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
- ide_startstop_t rc;
-
- if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
- if (block + blk_rq_sectors(rq) > 1ULL << 28)
- dma = 0;
- else
- lba48 = 0;
- }
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- if (drive->dev_flags & IDE_DFLAG_LBA) {
- if (lba48) {
- pr_debug("%s: LBA=0x%012llx\n", drive->name,
- (unsigned long long)block);
-
- tf->nsect = nsectors & 0xff;
- tf->lbal = (u8) block;
- tf->lbam = (u8)(block >> 8);
- tf->lbah = (u8)(block >> 16);
- tf->device = ATA_LBA;
-
- tf = &cmd.hob;
- tf->nsect = (nsectors >> 8) & 0xff;
- tf->lbal = (u8)(block >> 24);
- if (sizeof(block) != 4) {
- tf->lbam = (u8)((u64)block >> 32);
- tf->lbah = (u8)((u64)block >> 40);
- }
-
- cmd.valid.out.hob = IDE_VALID_OUT_HOB;
- cmd.valid.in.hob = IDE_VALID_IN_HOB;
- cmd.tf_flags |= IDE_TFLAG_LBA48;
- } else {
- tf->nsect = nsectors & 0xff;
- tf->lbal = block;
- tf->lbam = block >>= 8;
- tf->lbah = block >>= 8;
- tf->device = ((block >> 8) & 0xf) | ATA_LBA;
- }
- } else {
- unsigned int sect, head, cyl, track;
-
- track = (int)block / drive->sect;
- sect = (int)block % drive->sect + 1;
- head = track % drive->head;
- cyl = track / drive->head;
-
- pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
-
- tf->nsect = nsectors & 0xff;
- tf->lbal = sect;
- tf->lbam = cyl;
- tf->lbah = cyl >> 8;
- tf->device = head;
- }
-
- cmd.tf_flags |= IDE_TFLAG_FS;
-
- if (rq_data_dir(rq))
- cmd.tf_flags |= IDE_TFLAG_WRITE;
-
- ide_tf_set_cmd(drive, &cmd, dma);
- cmd.rq = rq;
-
- if (dma == 0) {
- ide_init_sg_cmd(&cmd, nsectors << 9);
- ide_map_sg(drive, &cmd);
- }
-
- rc = do_rw_taskfile(drive, &cmd);
-
- if (rc == ide_stopped && dma) {
- /* fallback to PIO */
- cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
- ide_tf_set_cmd(drive, &cmd, 0);
- ide_init_sg_cmd(&cmd, nsectors << 9);
- rc = do_rw_taskfile(drive, &cmd);
- }
-
- return rc;
-}
-
-/*
- * 268435455 == 137439 MB or 28bit limit
- * 320173056 == 163929 MB or 48bit addressing
- * 1073741822 == 549756 MB or 48bit addressing fake drive
- */
-
-static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
- sector_t block)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
- BUG_ON(blk_rq_is_passthrough(rq));
-
- ledtrig_disk_activity(rq_data_dir(rq) == WRITE);
-
- pr_debug("%s: %sing: block=%llu, sectors=%u\n",
- drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
- (unsigned long long)block, blk_rq_sectors(rq));
-
- if (hwif->rw_disk)
- hwif->rw_disk(drive, rq);
-
- return __ide_do_rw_disk(drive, rq, block);
-}
-
-/*
- * Queries for true maximum capacity of the drive.
- * Returns maximum LBA address (> 0) of the drive, 0 if failed.
- */
-static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
- u64 addr = 0;
-
- memset(&cmd, 0, sizeof(cmd));
- if (lba48)
- tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
- else
- tf->command = ATA_CMD_READ_NATIVE_MAX;
- tf->device = ATA_LBA;
-
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- if (lba48) {
- cmd.valid.out.hob = IDE_VALID_OUT_HOB;
- cmd.valid.in.hob = IDE_VALID_IN_HOB;
- cmd.tf_flags = IDE_TFLAG_LBA48;
- }
-
- ide_no_data_taskfile(drive, &cmd);
-
- /* if OK, compute maximum address value */
- if (!(tf->status & ATA_ERR))
- addr = ide_get_lba_addr(&cmd, lba48) + 1;
-
- return addr;
-}
-
-/*
- * Sets maximum virtual LBA address of the drive.
- * Returns new maximum virtual LBA address (> 0) or 0 on failure.
- */
-static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
- u64 addr_set = 0;
-
- addr_req--;
-
- memset(&cmd, 0, sizeof(cmd));
- tf->lbal = (addr_req >> 0) & 0xff;
- tf->lbam = (addr_req >>= 8) & 0xff;
- tf->lbah = (addr_req >>= 8) & 0xff;
- if (lba48) {
- cmd.hob.lbal = (addr_req >>= 8) & 0xff;
- cmd.hob.lbam = (addr_req >>= 8) & 0xff;
- cmd.hob.lbah = (addr_req >>= 8) & 0xff;
- tf->command = ATA_CMD_SET_MAX_EXT;
- } else {
- tf->device = (addr_req >>= 8) & 0x0f;
- tf->command = ATA_CMD_SET_MAX;
- }
- tf->device |= ATA_LBA;
-
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- if (lba48) {
- cmd.valid.out.hob = IDE_VALID_OUT_HOB;
- cmd.valid.in.hob = IDE_VALID_IN_HOB;
- cmd.tf_flags = IDE_TFLAG_LBA48;
- }
-
- ide_no_data_taskfile(drive, &cmd);
-
- /* if OK, compute maximum address value */
- if (!(tf->status & ATA_ERR))
- addr_set = ide_get_lba_addr(&cmd, lba48) + 1;
-
- return addr_set;
-}
-
-static unsigned long long sectors_to_MB(unsigned long long n)
-{
- n <<= 9; /* make it bytes */
- do_div(n, 1000000); /* make it MB */
- return n;
-}
-
-/*
- * Some disks report total number of sectors instead of
- * maximum sector address. We list them here.
- */
-static const struct drive_list_entry hpa_list[] = {
- { "ST340823A", NULL },
- { "ST320413A", NULL },
- { "ST310211A", NULL },
- { NULL, NULL }
-};
-
-static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
-{
- u64 capacity, set_max;
-
- capacity = drive->capacity64;
- set_max = idedisk_read_native_max_address(drive, lba48);
-
- if (ide_in_drive_list(drive->id, hpa_list)) {
- /*
- * Since we are inclusive wrt to firmware revisions do this
- * extra check and apply the workaround only when needed.
- */
- if (set_max == capacity + 1)
- set_max--;
- }
-
- return set_max;
-}
-
-static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
-{
- set_max = idedisk_set_max_address(drive, set_max, lba48);
- if (set_max)
- drive->capacity64 = set_max;
-
- return set_max;
-}
-
-static void idedisk_check_hpa(ide_drive_t *drive)
-{
- u64 capacity, set_max;
- int lba48 = ata_id_lba48_enabled(drive->id);
-
- capacity = drive->capacity64;
- set_max = ide_disk_hpa_get_native_capacity(drive, lba48);
-
- if (set_max <= capacity)
- return;
-
- drive->probed_capacity = set_max;
-
- printk(KERN_INFO "%s: Host Protected Area detected.\n"
- "\tcurrent capacity is %llu sectors (%llu MB)\n"
- "\tnative capacity is %llu sectors (%llu MB)\n",
- drive->name,
- capacity, sectors_to_MB(capacity),
- set_max, sectors_to_MB(set_max));
-
- if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
- return;
-
- set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
- if (set_max)
- printk(KERN_INFO "%s: Host Protected Area disabled.\n",
- drive->name);
-}
-
-static int ide_disk_get_capacity(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- int lba;
-
- if (ata_id_lba48_enabled(id)) {
- /* drive speaks 48-bit LBA */
- lba = 1;
- drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
- } else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
- /* drive speaks 28-bit LBA */
- lba = 1;
- drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
- } else {
- /* drive speaks boring old 28-bit CHS */
- lba = 0;
- drive->capacity64 = drive->cyl * drive->head * drive->sect;
- }
-
- drive->probed_capacity = drive->capacity64;
-
- if (lba) {
- drive->dev_flags |= IDE_DFLAG_LBA;
-
- /*
- * If this device supports the Host Protected Area feature set,
- * then we may need to change our opinion about its capacity.
- */
- if (ata_id_hpa_enabled(id))
- idedisk_check_hpa(drive);
- }
-
- /* limit drive capacity to 137GB if LBA48 cannot be used */
- if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
- drive->capacity64 > 1ULL << 28) {
- printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
- "%llu sectors (%llu MB)\n",
- drive->name, (unsigned long long)drive->capacity64,
- sectors_to_MB(drive->capacity64));
- drive->probed_capacity = drive->capacity64 = 1ULL << 28;
- }
-
- if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
- (drive->dev_flags & IDE_DFLAG_LBA48)) {
- if (drive->capacity64 > 1ULL << 28) {
- printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
- " will be used for accessing sectors "
- "> %u\n", drive->name, 1 << 28);
- } else
- drive->dev_flags &= ~IDE_DFLAG_LBA48;
- }
-
- return 0;
-}
-
-static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- int lba48 = ata_id_lba48_enabled(id);
-
- if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
- ata_id_hpa_enabled(id) == 0)
- return;
-
- /*
- * according to the spec the SET MAX ADDRESS command shall be
- * immediately preceded by a READ NATIVE MAX ADDRESS command
- */
- if (!ide_disk_hpa_get_native_capacity(drive, lba48))
- return;
-
- if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
- drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
-}
-
-static bool idedisk_prep_rq(ide_drive_t *drive, struct request *rq)
-{
- struct ide_cmd *cmd;
-
- if (req_op(rq) != REQ_OP_FLUSH)
- return true;
-
- if (ide_req(rq)->special) {
- cmd = ide_req(rq)->special;
- memset(cmd, 0, sizeof(*cmd));
- } else {
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- }
-
- /* FIXME: map struct ide_taskfile on rq->cmd[] */
- BUG_ON(cmd == NULL);
-
- if (ata_id_flush_ext_enabled(drive->id) &&
- (drive->capacity64 >= (1UL << 28)))
- cmd->tf.command = ATA_CMD_FLUSH_EXT;
- else
- cmd->tf.command = ATA_CMD_FLUSH;
- cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd->tf_flags = IDE_TFLAG_DYN;
- cmd->protocol = ATA_PROT_NODATA;
- rq->cmd_flags &= ~REQ_OP_MASK;
- rq->cmd_flags |= REQ_OP_DRV_OUT;
- ide_req(rq)->type = ATA_PRIV_TASKFILE;
- ide_req(rq)->special = cmd;
- cmd->rq = rq;
-
- return true;
-}
-
-ide_devset_get(multcount, mult_count);
-
-/*
- * This is tightly woven into the driver->do_special can not touch.
- * DON'T do it again until a total personality rewrite is committed.
- */
-static int set_multcount(ide_drive_t *drive, int arg)
-{
- struct request *rq;
-
- if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
- return -EINVAL;
-
- if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
- return -EBUSY;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_TASKFILE;
-
- drive->mult_req = arg;
- drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
- blk_execute_rq(NULL, rq, 0);
- blk_put_request(rq);
-
- return (drive->mult_count == arg) ? 0 : -EIO;
-}
-
-ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
-
-static int set_nowerr(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_NOWERR;
- else
- drive->dev_flags &= ~IDE_DFLAG_NOWERR;
-
- drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
-
- return 0;
-}
-
-static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
-{
- struct ide_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tf.feature = feature;
- cmd.tf.nsect = nsect;
- cmd.tf.command = ATA_CMD_SET_FEATURES;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- return ide_no_data_taskfile(drive, &cmd);
-}
-
-static void update_flush(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- bool wc = false;
-
- if (drive->dev_flags & IDE_DFLAG_WCACHE) {
- unsigned long long capacity;
- int barrier;
- /*
- * We must avoid issuing commands a drive does not
- * understand or we may crash it. We check flush cache
- * is supported. We also check we have the LBA48 flush
- * cache if the drive capacity is too large. By this
- * time we have trimmed the drive capacity if LBA48 is
- * not available so we don't need to recheck that.
- */
- capacity = ide_gd_capacity(drive);
- barrier = ata_id_flush_enabled(id) &&
- (drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
- ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
- capacity <= (1ULL << 28) ||
- ata_id_flush_ext_enabled(id));
-
- printk(KERN_INFO "%s: cache flushes %ssupported\n",
- drive->name, barrier ? "" : "not ");
-
- if (barrier) {
- wc = true;
- drive->prep_rq = idedisk_prep_rq;
- }
- }
-
- blk_queue_write_cache(drive->queue, wc, false);
-}
-
-ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
-
-static int set_wcache(ide_drive_t *drive, int arg)
-{
- int err = 1;
-
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (ata_id_flush_enabled(drive->id)) {
- err = ide_do_setfeature(drive,
- arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
- if (err == 0) {
- if (arg)
- drive->dev_flags |= IDE_DFLAG_WCACHE;
- else
- drive->dev_flags &= ~IDE_DFLAG_WCACHE;
- }
- }
-
- update_flush(drive);
-
- return err;
-}
-
-static int do_idedisk_flushcache(ide_drive_t *drive)
-{
- struct ide_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- if (ata_id_flush_ext_enabled(drive->id))
- cmd.tf.command = ATA_CMD_FLUSH_EXT;
- else
- cmd.tf.command = ATA_CMD_FLUSH;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- return ide_no_data_taskfile(drive, &cmd);
-}
-
-ide_devset_get(acoustic, acoustic);
-
-static int set_acoustic(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 254)
- return -EINVAL;
-
- ide_do_setfeature(drive,
- arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
-
- drive->acoustic = arg;
-
- return 0;
-}
-
-ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
-
-/*
- * drive->addressing:
- * 0: 28-bit
- * 1: 48-bit
- * 2: 48-bit capable doing 28-bit
- */
-static int set_addressing(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 2)
- return -EINVAL;
-
- if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
- ata_id_lba48_enabled(drive->id) == 0))
- return -EIO;
-
- if (arg == 2)
- arg = 0;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_LBA48;
- else
- drive->dev_flags &= ~IDE_DFLAG_LBA48;
-
- return 0;
-}
-
-ide_ext_devset_rw(acoustic, acoustic);
-ide_ext_devset_rw(address, addressing);
-ide_ext_devset_rw(multcount, multcount);
-ide_ext_devset_rw(wcache, wcache);
-
-ide_ext_devset_rw_sync(nowerr, nowerr);
-
-static int ide_disk_check(ide_drive_t *drive, const char *s)
-{
- return 1;
-}
-
-static void ide_disk_setup(ide_drive_t *drive)
-{
- struct ide_disk_obj *idkp = drive->driver_data;
- struct request_queue *q = drive->queue;
- ide_hwif_t *hwif = drive->hwif;
- u16 *id = drive->id;
- char *m = (char *)&id[ATA_ID_PROD];
- unsigned long long capacity;
-
- ide_proc_register_driver(drive, idkp->driver);
-
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
- return;
-
- if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
- /*
- * Removable disks (eg. SYQUEST); ignore 'WD' drives
- */
- if (m[0] != 'W' || m[1] != 'D')
- drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
- }
-
- (void)set_addressing(drive, 1);
-
- if (drive->dev_flags & IDE_DFLAG_LBA48) {
- int max_s = 2048;
-
- if (max_s > hwif->rqsize)
- max_s = hwif->rqsize;
-
- blk_queue_max_hw_sectors(q, max_s);
- }
-
- printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
- queue_max_sectors(q) / 2);
-
- if (ata_id_is_ssd(id)) {
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
- }
-
- /* calculate drive capacity, and select LBA if possible */
- ide_disk_get_capacity(drive);
-
- /*
- * if possible, give fdisk access to more of the drive,
- * by correcting bios_cyls:
- */
- capacity = ide_gd_capacity(drive);
-
- if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
- if (ata_id_lba48_enabled(drive->id)) {
- /* compatibility */
- drive->bios_sect = 63;
- drive->bios_head = 255;
- }
-
- if (drive->bios_sect && drive->bios_head) {
- unsigned int cap0 = capacity; /* truncate to 32 bits */
- unsigned int cylsz, cyl;
-
- if (cap0 != capacity)
- drive->bios_cyl = 65535;
- else {
- cylsz = drive->bios_sect * drive->bios_head;
- cyl = cap0 / cylsz;
- if (cyl > 65535)
- cyl = 65535;
- if (cyl > drive->bios_cyl)
- drive->bios_cyl = cyl;
- }
- }
- }
- printk(KERN_INFO "%s: %llu sectors (%llu MB)",
- drive->name, capacity, sectors_to_MB(capacity));
-
- /* Only print cache size when it was specified */
- if (id[ATA_ID_BUF_SIZE])
- printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
-
- printk(KERN_CONT ", CHS=%d/%d/%d\n",
- drive->bios_cyl, drive->bios_head, drive->bios_sect);
-
- /* write cache enabled? */
- if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
- drive->dev_flags |= IDE_DFLAG_WCACHE;
-
- set_wcache(drive, 1);
-
- if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
- (drive->head == 0 || drive->head > 16))
- printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
- drive->name, drive->head);
-}
-
-static void ide_disk_flush(ide_drive_t *drive)
-{
- if (ata_id_flush_enabled(drive->id) == 0 ||
- (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
- return;
-
- if (do_idedisk_flushcache(drive))
- printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
-}
-
-static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
-{
- return 0;
-}
-
-static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
- int on)
-{
- struct ide_cmd cmd;
- int ret;
-
- if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
- return 0;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- ret = ide_no_data_taskfile(drive, &cmd);
-
- if (ret)
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
-
- return ret;
-}
-
-const struct ide_disk_ops ide_ata_disk_ops = {
- .check = ide_disk_check,
- .unlock_native_capacity = ide_disk_unlock_native_capacity,
- .get_capacity = ide_disk_get_capacity,
- .setup = ide_disk_setup,
- .flush = ide_disk_flush,
- .init_media = ide_disk_init_media,
- .set_doorlock = ide_disk_set_doorlock,
- .do_request = ide_do_rw_disk,
- .ioctl = ide_disk_ioctl,
- .compat_ioctl = ide_disk_ioctl,
-};
diff --git a/drivers/ide/ide-disk.h b/drivers/ide/ide-disk.h
deleted file mode 100644
index 0e8cc18bfda6..000000000000
--- a/drivers/ide/ide-disk.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IDE_DISK_H
-#define __IDE_DISK_H
-
-#include "ide-gd.h"
-
-#ifdef CONFIG_IDE_GD_ATA
-/* ide-disk.c */
-extern const struct ide_disk_ops ide_ata_disk_ops;
-ide_decl_devset(address);
-ide_decl_devset(multcount);
-ide_decl_devset(nowerr);
-ide_decl_devset(wcache);
-ide_decl_devset(acoustic);
-
-/* ide-disk_ioctl.c */
-int ide_disk_ioctl(ide_drive_t *, struct block_device *, fmode_t, unsigned int,
- unsigned long);
-
-#ifdef CONFIG_IDE_PROC_FS
-/* ide-disk_proc.c */
-extern ide_proc_entry_t ide_disk_proc[];
-extern const struct ide_proc_devset ide_disk_settings[];
-#endif
-#else
-#define ide_disk_proc NULL
-#define ide_disk_settings NULL
-#endif
-
-#endif /* __IDE_DISK_H */
diff --git a/drivers/ide/ide-disk_ioctl.c b/drivers/ide/ide-disk_ioctl.c
deleted file mode 100644
index 2c45616cff4f..000000000000
--- a/drivers/ide/ide-disk_ioctl.c
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/hdreg.h>
-#include <linux/mutex.h>
-
-#include "ide-disk.h"
-
-static DEFINE_MUTEX(ide_disk_ioctl_mutex);
-static const struct ide_ioctl_devset ide_disk_ioctl_settings[] = {
-{ HDIO_GET_ADDRESS, HDIO_SET_ADDRESS, &ide_devset_address },
-{ HDIO_GET_MULTCOUNT, HDIO_SET_MULTCOUNT, &ide_devset_multcount },
-{ HDIO_GET_NOWERR, HDIO_SET_NOWERR, &ide_devset_nowerr },
-{ HDIO_GET_WCACHE, HDIO_SET_WCACHE, &ide_devset_wcache },
-{ HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, &ide_devset_acoustic },
-{ 0 }
-};
-
-int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int err;
-
- mutex_lock(&ide_disk_ioctl_mutex);
- err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
- if (err != -EOPNOTSUPP)
- goto out;
-
- err = generic_ide_ioctl(drive, bdev, cmd, arg);
-out:
- mutex_unlock(&ide_disk_ioctl_mutex);
- return err;
-}
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c
deleted file mode 100644
index 95d239b2f646..000000000000
--- a/drivers/ide/ide-disk_proc.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/seq_file.h>
-
-#include "ide-disk.h"
-
-static int smart_enable(ide_drive_t *drive)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
-
- memset(&cmd, 0, sizeof(cmd));
- tf->feature = ATA_SMART_ENABLE;
- tf->lbam = ATA_SMART_LBAM_PASS;
- tf->lbah = ATA_SMART_LBAH_PASS;
- tf->command = ATA_CMD_SMART;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- return ide_no_data_taskfile(drive, &cmd);
-}
-
-static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
-
- memset(&cmd, 0, sizeof(cmd));
- tf->feature = sub_cmd;
- tf->nsect = 0x01;
- tf->lbam = ATA_SMART_LBAM_PASS;
- tf->lbah = ATA_SMART_LBAH_PASS;
- tf->command = ATA_CMD_SMART;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd.protocol = ATA_PROT_PIO;
-
- return ide_raw_taskfile(drive, &cmd, buf, 1);
-}
-
-static int idedisk_cache_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) m->private;
-
- if (drive->dev_flags & IDE_DFLAG_ID_READ)
- seq_printf(m, "%i\n", drive->id[ATA_ID_BUF_SIZE] / 2);
- else
- seq_printf(m, "(none)\n");
- return 0;
-}
-
-static int idedisk_capacity_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t*drive = (ide_drive_t *)m->private;
-
- seq_printf(m, "%llu\n", (long long)ide_gd_capacity(drive));
- return 0;
-}
-
-static int __idedisk_proc_show(struct seq_file *m, ide_drive_t *drive, u8 sub_cmd)
-{
- u8 *buf;
-
- buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- (void)smart_enable(drive);
-
- if (get_smart_data(drive, buf, sub_cmd) == 0) {
- __le16 *val = (__le16 *)buf;
- int i;
-
- for (i = 0; i < SECTOR_SIZE / 2; i++) {
- seq_printf(m, "%04x%c", le16_to_cpu(val[i]),
- (i % 8) == 7 ? '\n' : ' ');
- }
- }
- kfree(buf);
- return 0;
-}
-
-static int idedisk_sv_proc_show(struct seq_file *m, void *v)
-{
- return __idedisk_proc_show(m, m->private, ATA_SMART_READ_VALUES);
-}
-
-static int idedisk_st_proc_show(struct seq_file *m, void *v)
-{
- return __idedisk_proc_show(m, m->private, ATA_SMART_READ_THRESHOLDS);
-}
-
-ide_proc_entry_t ide_disk_proc[] = {
- { "cache", S_IFREG|S_IRUGO, idedisk_cache_proc_show },
- { "capacity", S_IFREG|S_IRUGO, idedisk_capacity_proc_show },
- { "geometry", S_IFREG|S_IRUGO, ide_geometry_proc_show },
- { "smart_values", S_IFREG|S_IRUSR, idedisk_sv_proc_show },
- { "smart_thresholds", S_IFREG|S_IRUSR, idedisk_st_proc_show },
- {}
-};
-
-ide_devset_rw_field(bios_cyl, bios_cyl);
-ide_devset_rw_field(bios_head, bios_head);
-ide_devset_rw_field(bios_sect, bios_sect);
-ide_devset_rw_field(failures, failures);
-ide_devset_rw_field(lun, lun);
-ide_devset_rw_field(max_failures, max_failures);
-
-const struct ide_proc_devset ide_disk_settings[] = {
- IDE_PROC_DEVSET(acoustic, 0, 254),
- IDE_PROC_DEVSET(address, 0, 2),
- IDE_PROC_DEVSET(bios_cyl, 0, 65535),
- IDE_PROC_DEVSET(bios_head, 0, 255),
- IDE_PROC_DEVSET(bios_sect, 0, 63),
- IDE_PROC_DEVSET(failures, 0, 65535),
- IDE_PROC_DEVSET(lun, 0, 7),
- IDE_PROC_DEVSET(max_failures, 0, 65535),
- IDE_PROC_DEVSET(multcount, 0, 16),
- IDE_PROC_DEVSET(nowerr, 0, 1),
- IDE_PROC_DEVSET(wcache, 0, 1),
- { NULL },
-};
diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c
deleted file mode 100644
index b7c2c0bd18b5..000000000000
--- a/drivers/ide/ide-dma-sff.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-
-/**
- * config_drive_for_dma - attempt to activate IDE DMA
- * @drive: the drive to place in DMA mode
- *
- * If the drive supports at least mode 2 DMA or UDMA of any kind
- * then attempt to place it into DMA mode. Drives that are known to
- * support DMA but predate the DMA properties or that are known
- * to have DMA handling bugs are also set up appropriately based
- * on the good/bad drive lists.
- */
-
-int config_drive_for_dma(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u16 *id = drive->id;
-
- if (drive->media != ide_disk) {
- if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
- return 0;
- }
-
- /*
- * Enable DMA on any drive that has
- * UltraDMA (mode 0/1/2/3/4/5/6) enabled
- */
- if ((id[ATA_ID_FIELD_VALID] & 4) &&
- ((id[ATA_ID_UDMA_MODES] >> 8) & 0x7f))
- return 1;
-
- /*
- * Enable DMA on any drive that has mode2 DMA
- * (multi or single) enabled
- */
- if ((id[ATA_ID_MWDMA_MODES] & 0x404) == 0x404 ||
- (id[ATA_ID_SWDMA_MODES] & 0x404) == 0x404)
- return 1;
-
- /* Consult the list of known "good" drives */
- if (ide_dma_good_drive(drive))
- return 1;
-
- return 0;
-}
-
-u8 ide_dma_sff_read_status(ide_hwif_t *hwif)
-{
- unsigned long addr = hwif->dma_base + ATA_DMA_STATUS;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return readb((void __iomem *)addr);
- else
- return inb(addr);
-}
-EXPORT_SYMBOL_GPL(ide_dma_sff_read_status);
-
-static void ide_dma_sff_write_status(ide_hwif_t *hwif, u8 val)
-{
- unsigned long addr = hwif->dma_base + ATA_DMA_STATUS;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(val, (void __iomem *)addr);
- else
- outb(val, addr);
-}
-
-/**
- * ide_dma_host_set - Enable/disable DMA on a host
- * @drive: drive to control
- *
- * Enable/disable DMA on an IDE controller following generic
- * bus-mastering IDE controller behaviour.
- */
-
-void ide_dma_host_set(ide_drive_t *drive, int on)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 unit = drive->dn & 1;
- u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- if (on)
- dma_stat |= (1 << (5 + unit));
- else
- dma_stat &= ~(1 << (5 + unit));
-
- ide_dma_sff_write_status(hwif, dma_stat);
-}
-EXPORT_SYMBOL_GPL(ide_dma_host_set);
-
-/**
- * ide_build_dmatable - build IDE DMA table
- *
- * ide_build_dmatable() prepares a dma request. We map the command
- * to get the pci bus addresses of the buffers and then build up
- * the PRD table that the IDE layer wants to be fed.
- *
- * Most chipsets correctly interpret a length of 0x0000 as 64KB,
- * but at least one (e.g. CS5530) misinterprets it as zero (!).
- * So we break the 64KB entry into two 32KB entries instead.
- *
- * Returns the number of built PRD entries if all went okay,
- * returns 0 otherwise.
- *
- * May also be invoked from trm290.c
- */
-
-int ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- __le32 *table = (__le32 *)hwif->dmatable_cpu;
- unsigned int count = 0;
- int i;
- struct scatterlist *sg;
- u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
-
- for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) {
- u32 cur_addr, cur_len, xcount, bcount;
-
- cur_addr = sg_dma_address(sg);
- cur_len = sg_dma_len(sg);
-
- /*
- * Fill in the dma table, without crossing any 64kB boundaries.
- * Most hardware requires 16-bit alignment of all blocks,
- * but the trm290 requires 32-bit alignment.
- */
-
- while (cur_len) {
- if (count++ >= PRD_ENTRIES)
- goto use_pio_instead;
-
- bcount = 0x10000 - (cur_addr & 0xffff);
- if (bcount > cur_len)
- bcount = cur_len;
- *table++ = cpu_to_le32(cur_addr);
- xcount = bcount & 0xffff;
- if (is_trm290)
- xcount = ((xcount >> 2) - 1) << 16;
- else if (xcount == 0x0000) {
- if (count++ >= PRD_ENTRIES)
- goto use_pio_instead;
- *table++ = cpu_to_le32(0x8000);
- *table++ = cpu_to_le32(cur_addr + 0x8000);
- xcount = 0x8000;
- }
- *table++ = cpu_to_le32(xcount);
- cur_addr += bcount;
- cur_len -= bcount;
- }
- }
-
- if (count) {
- if (!is_trm290)
- *--table |= cpu_to_le32(0x80000000);
- return count;
- }
-
-use_pio_instead:
- printk(KERN_ERR "%s: %s\n", drive->name,
- count ? "DMA table too small" : "empty DMA table?");
-
- return 0; /* revert to PIO for this request */
-}
-EXPORT_SYMBOL_GPL(ide_build_dmatable);
-
-/**
- * ide_dma_setup - begin a DMA phase
- * @drive: target device
- * @cmd: command
- *
- * Build an IDE DMA PRD (IDE speak for scatter gather table)
- * and then set up the DMA transfer registers for a device
- * that follows generic IDE PCI DMA behaviour. Controllers can
- * override this function if they need to
- *
- * Returns 0 on success. If a PIO fallback is required then 1
- * is returned.
- */
-
-int ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
- u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
- u8 dma_stat;
-
- /* fall back to pio! */
- if (ide_build_dmatable(drive, cmd) == 0) {
- ide_map_sg(drive, cmd);
- return 1;
- }
-
- /* PRD table */
- if (mmio)
- writel(hwif->dmatable_dma,
- (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS));
- else
- outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
-
- /* specify r/w */
- if (mmio)
- writeb(rw, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- else
- outb(rw, hwif->dma_base + ATA_DMA_CMD);
-
- /* read DMA status for INTR & ERROR flags */
- dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- /* clear INTR & ERROR flags */
- ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_dma_setup);
-
-/**
- * ide_dma_sff_timer_expiry - handle a DMA timeout
- * @drive: Drive that timed out
- *
- * An IDE DMA transfer timed out. In the event of an error we ask
- * the driver to resolve the problem, if a DMA transfer is still
- * in progress we continue to wait (arguably we need to add a
- * secondary 'I don't care what the drive thinks' timeout here)
- * Finally if we have an interrupt we let it complete the I/O.
- * But only one time - we clear expiry and if it's still not
- * completed after WAIT_CMD, we error and retry in PIO.
- * This can occur if an interrupt is lost or due to hang or bugs.
- */
-
-int ide_dma_sff_timer_expiry(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- printk(KERN_WARNING "%s: %s: DMA status (0x%02x)\n",
- drive->name, __func__, dma_stat);
-
- if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */
- return WAIT_CMD;
-
- hwif->expiry = NULL; /* one free ride for now */
-
- if (dma_stat & ATA_DMA_ERR) /* ERROR */
- return -1;
-
- if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */
- return WAIT_CMD;
-
- if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */
- return WAIT_CMD;
-
- return 0; /* Status is unknown -- reset the bus */
-}
-EXPORT_SYMBOL_GPL(ide_dma_sff_timer_expiry);
-
-void ide_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_cmd;
-
- /* Note that this is done *after* the cmd has
- * been issued to the drive, as per the BM-IDE spec.
- * The Promise Ultra33 doesn't work correctly when
- * we do this part before issuing the drive cmd.
- */
- if (hwif->host_flags & IDE_HFLAG_MMIO) {
- dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- writeb(dma_cmd | ATA_DMA_START,
- (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- } else {
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
- }
-}
-EXPORT_SYMBOL_GPL(ide_dma_start);
-
-/* returns 1 on error, 0 otherwise */
-int ide_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = 0, dma_cmd = 0;
-
- /* stop DMA */
- if (hwif->host_flags & IDE_HFLAG_MMIO) {
- dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- writeb(dma_cmd & ~ATA_DMA_START,
- (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- } else {
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
- }
-
- /* get DMA status */
- dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- /* clear INTR & ERROR bits */
- ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR);
-
-#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR)
-
- /* verify good DMA status */
- if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR)
- return 0x10 | dma_stat;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_dma_end);
-
-/* returns 1 if dma irq issued, 0 otherwise */
-int ide_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- return (dma_stat & ATA_DMA_INTR) ? 1 : 0;
-}
-EXPORT_SYMBOL_GPL(ide_dma_test_irq);
-
-const struct ide_dma_ops sff_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-EXPORT_SYMBOL_GPL(sff_dma_ops);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
deleted file mode 100644
index 6f344654ef22..000000000000
--- a/drivers/ide/ide-dma.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * IDE DMA support (including IDE PCI BM-DMA).
- *
- * Copyright (C) 1995-1998 Mark Lord
- * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
- */
-
-/*
- * Special Thanks to Mark for his Six years of work.
- */
-
-/*
- * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
- * fixing the problem with the BIOS on some Acer motherboards.
- *
- * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
- * "TX" chipset compatibility and for providing patches for the "TX" chipset.
- *
- * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
- * at generic DMA -- his patches were referred to when preparing this code.
- *
- * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
- * for supplying a Promise UDMA board & WD UDMA drive for this work!
- */
-
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-
-static const struct drive_list_entry drive_whitelist[] = {
- { "Micropolis 2112A" , NULL },
- { "CONNER CTMA 4000" , NULL },
- { "CONNER CTT8000-A" , NULL },
- { "ST34342A" , NULL },
- { NULL , NULL }
-};
-
-static const struct drive_list_entry drive_blacklist[] = {
- { "WDC AC11000H" , NULL },
- { "WDC AC22100H" , NULL },
- { "WDC AC32500H" , NULL },
- { "WDC AC33100H" , NULL },
- { "WDC AC31600H" , NULL },
- { "WDC AC32100H" , "24.09P07" },
- { "WDC AC23200L" , "21.10N21" },
- { "Compaq CRD-8241B" , NULL },
- { "CRD-8400B" , NULL },
- { "CRD-8480B", NULL },
- { "CRD-8482B", NULL },
- { "CRD-84" , NULL },
- { "SanDisk SDP3B" , NULL },
- { "SanDisk SDP3B-64" , NULL },
- { "SANYO CD-ROM CRD" , NULL },
- { "HITACHI CDR-8" , NULL },
- { "HITACHI CDR-8335" , NULL },
- { "HITACHI CDR-8435" , NULL },
- { "Toshiba CD-ROM XM-6202B" , NULL },
- { "TOSHIBA CD-ROM XM-1702BC", NULL },
- { "CD-532E-A" , NULL },
- { "E-IDE CD-ROM CR-840", NULL },
- { "CD-ROM Drive/F5A", NULL },
- { "WPI CDD-820", NULL },
- { "SAMSUNG CD-ROM SC-148C", NULL },
- { "SAMSUNG CD-ROM SC", NULL },
- { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL },
- { "_NEC DV5800A", NULL },
- { "SAMSUNG CD-ROM SN-124", "N001" },
- { "Seagate STT20000A", NULL },
- { "CD-ROM CDR_U200", "1.09" },
- { NULL , NULL }
-
-};
-
-/**
- * ide_dma_intr - IDE DMA interrupt handler
- * @drive: the drive the interrupt is for
- *
- * Handle an interrupt completing a read/write DMA transfer on an
- * IDE device
- */
-
-ide_startstop_t ide_dma_intr(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- u8 stat = 0, dma_stat = 0;
-
- drive->waiting_for_dma = 0;
- dma_stat = hwif->dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- stat = hwif->tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
- if (!dma_stat) {
- if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
- ide_finish_cmd(drive, cmd, stat);
- else
- ide_complete_rq(drive, BLK_STS_OK,
- blk_rq_sectors(cmd->rq) << 9);
- return ide_stopped;
- }
- printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
- drive->name, __func__, dma_stat);
- }
- return ide_error(drive, "dma_intr", stat);
-}
-
-int ide_dma_good_drive(ide_drive_t *drive)
-{
- return ide_in_drive_list(drive->id, drive_whitelist);
-}
-
-/**
- * ide_dma_map_sg - map IDE scatter gather for DMA I/O
- * @drive: the drive to map the DMA table for
- * @cmd: command
- *
- * Perform the DMA mapping magic necessary to access the source or
- * target buffers of a request via DMA. The lower layers of the
- * kernel provide the necessary cache management so that we can
- * operate in a portable fashion.
- */
-
-static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct scatterlist *sg = hwif->sg_table;
- int i;
-
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- cmd->sg_dma_direction = DMA_TO_DEVICE;
- else
- cmd->sg_dma_direction = DMA_FROM_DEVICE;
-
- i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
- if (i) {
- cmd->orig_sg_nents = cmd->sg_nents;
- cmd->sg_nents = i;
- }
-
- return i;
-}
-
-/**
- * ide_dma_unmap_sg - clean up DMA mapping
- * @drive: The drive to unmap
- *
- * Teardown mappings after DMA has completed. This must be called
- * after the completion of each use of ide_build_dmatable and before
- * the next use of ide_build_dmatable. Failure to do so will cause
- * an oops as only one mapping can be live for each target at a given
- * time.
- */
-
-void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
- cmd->sg_dma_direction);
-}
-EXPORT_SYMBOL_GPL(ide_dma_unmap_sg);
-
-/**
- * ide_dma_off_quietly - Generic DMA kill
- * @drive: drive to control
- *
- * Turn off the current DMA on this IDE controller.
- */
-
-void ide_dma_off_quietly(ide_drive_t *drive)
-{
- drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
-
- drive->hwif->dma_ops->dma_host_set(drive, 0);
-}
-EXPORT_SYMBOL(ide_dma_off_quietly);
-
-/**
- * ide_dma_off - disable DMA on a device
- * @drive: drive to disable DMA on
- *
- * Disable IDE DMA for a device on this IDE controller.
- * Inform the user that DMA has been disabled.
- */
-
-void ide_dma_off(ide_drive_t *drive)
-{
- printk(KERN_INFO "%s: DMA disabled\n", drive->name);
- ide_dma_off_quietly(drive);
-}
-EXPORT_SYMBOL(ide_dma_off);
-
-/**
- * ide_dma_on - Enable DMA on a device
- * @drive: drive to enable DMA on
- *
- * Enable IDE DMA for a device on this IDE controller.
- */
-
-void ide_dma_on(ide_drive_t *drive)
-{
- drive->dev_flags |= IDE_DFLAG_USING_DMA;
-
- drive->hwif->dma_ops->dma_host_set(drive, 1);
-}
-
-int __ide_dma_bad_drive(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- int blacklist = ide_in_drive_list(id, drive_blacklist);
- if (blacklist) {
- printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
- drive->name, (char *)&id[ATA_ID_PROD]);
- return blacklist;
- }
- return 0;
-}
-EXPORT_SYMBOL(__ide_dma_bad_drive);
-
-static const u8 xfer_mode_bases[] = {
- XFER_UDMA_0,
- XFER_MW_DMA_0,
- XFER_SW_DMA_0,
-};
-
-static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
-{
- u16 *id = drive->id;
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
- unsigned int mask = 0;
-
- switch (base) {
- case XFER_UDMA_0:
- if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
- break;
- mask = id[ATA_ID_UDMA_MODES];
- if (port_ops && port_ops->udma_filter)
- mask &= port_ops->udma_filter(drive);
- else
- mask &= hwif->ultra_mask;
-
- /*
- * avoid false cable warning from eighty_ninty_three()
- */
- if (req_mode > XFER_UDMA_2) {
- if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
- mask &= 0x07;
- }
- break;
- case XFER_MW_DMA_0:
- mask = id[ATA_ID_MWDMA_MODES];
-
- /* Also look for the CF specific MWDMA modes... */
- if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) {
- u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1;
-
- mask |= ((2 << mode) - 1) << 3;
- }
-
- if (port_ops && port_ops->mdma_filter)
- mask &= port_ops->mdma_filter(drive);
- else
- mask &= hwif->mwdma_mask;
- break;
- case XFER_SW_DMA_0:
- mask = id[ATA_ID_SWDMA_MODES];
- if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) {
- u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
-
- /*
- * if the mode is valid convert it to the mask
- * (the maximum allowed mode is XFER_SW_DMA_2)
- */
- if (mode <= 2)
- mask = (2 << mode) - 1;
- }
- mask &= hwif->swdma_mask;
- break;
- default:
- BUG();
- break;
- }
-
- return mask;
-}
-
-/**
- * ide_find_dma_mode - compute DMA speed
- * @drive: IDE device
- * @req_mode: requested mode
- *
- * Checks the drive/host capabilities and finds the speed to use for
- * the DMA transfer. The speed is then limited by the requested mode.
- *
- * Returns 0 if the drive/host combination is incapable of DMA transfers
- * or if the requested mode is not a DMA mode.
- */
-
-u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned int mask;
- int x, i;
- u8 mode = 0;
-
- if (drive->media != ide_disk) {
- if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
- if (req_mode < xfer_mode_bases[i])
- continue;
- mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
- x = fls(mask) - 1;
- if (x >= 0) {
- mode = xfer_mode_bases[i] + x;
- break;
- }
- }
-
- if (hwif->chipset == ide_acorn && mode == 0) {
- /*
- * is this correct?
- */
- if (ide_dma_good_drive(drive) &&
- drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
- mode = XFER_MW_DMA_1;
- }
-
- mode = min(mode, req_mode);
-
- printk(KERN_INFO "%s: %s mode selected\n", drive->name,
- mode ? ide_xfer_verbose(mode) : "no DMA");
-
- return mode;
-}
-
-static int ide_tune_dma(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 speed;
-
- if (ata_id_has_dma(drive->id) == 0 ||
- (drive->dev_flags & IDE_DFLAG_NODMA))
- return 0;
-
- /* consult the list of known "bad" drives */
- if (__ide_dma_bad_drive(drive))
- return 0;
-
- if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
- return config_drive_for_dma(drive);
-
- speed = ide_max_dma_mode(drive);
-
- if (!speed)
- return 0;
-
- if (ide_set_dma_mode(drive, speed))
- return 0;
-
- return 1;
-}
-
-static int ide_dma_check(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- if (ide_tune_dma(drive))
- return 0;
-
- /* TODO: always do PIO fallback */
- if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
- return -1;
-
- ide_set_max_pio(drive);
-
- return -1;
-}
-
-int ide_set_dma(ide_drive_t *drive)
-{
- int rc;
-
- /*
- * Force DMAing for the beginning of the check.
- * Some chipsets appear to do interesting
- * things, if not checked and cleared.
- * PARANOIA!!!
- */
- ide_dma_off_quietly(drive);
-
- rc = ide_dma_check(drive);
- if (rc)
- return rc;
-
- ide_dma_on(drive);
-
- return 0;
-}
-
-void ide_check_dma_crc(ide_drive_t *drive)
-{
- u8 mode;
-
- ide_dma_off_quietly(drive);
- drive->crc_count = 0;
- mode = drive->current_speed;
- /*
- * Don't try non Ultra-DMA modes without iCRC's. Force the
- * device to PIO and make the user enable SWDMA/MWDMA modes.
- */
- if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
- mode--;
- else
- mode = XFER_PIO_4;
- ide_set_xfer_rate(drive, mode);
- if (drive->current_speed >= XFER_SW_DMA_0)
- ide_dma_on(drive);
-}
-
-void ide_dma_lost_irq(ide_drive_t *drive)
-{
- printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
-}
-EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
-
-/*
- * un-busy the port etc, and clear any pending DMA status. we want to
- * retry the current request in pio mode instead of risking tossing it
- * all away
- */
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_dma_ops *dma_ops = hwif->dma_ops;
- struct ide_cmd *cmd = &hwif->cmd;
- ide_startstop_t ret = ide_stopped;
-
- /*
- * end current dma transaction
- */
-
- if (error < 0) {
- printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
- drive->waiting_for_dma = 0;
- (void)dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- ret = ide_error(drive, "dma timeout error",
- hwif->tp_ops->read_status(hwif));
- } else {
- printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
- if (dma_ops->dma_clear)
- dma_ops->dma_clear(drive);
- printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
- if (dma_ops->dma_test_irq(drive) == 0) {
- ide_dump_status(drive, "DMA timeout",
- hwif->tp_ops->read_status(hwif));
- drive->waiting_for_dma = 0;
- (void)dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- }
- }
-
- /*
- * disable dma for now, but remember that we did so because of
- * a timeout -- we'll reenable after we finish this next request
- * (or rather the first chunk of it) in pio.
- */
- drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
- drive->retry_pio++;
- ide_dma_off_quietly(drive);
-
- /*
- * make sure request is sane
- */
- if (hwif->rq)
- scsi_req(hwif->rq)->result = 0;
- return ret;
-}
-
-void ide_release_dma_engine(ide_hwif_t *hwif)
-{
- if (hwif->dmatable_cpu) {
- int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
-
- dma_free_coherent(hwif->dev, prd_size,
- hwif->dmatable_cpu, hwif->dmatable_dma);
- hwif->dmatable_cpu = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(ide_release_dma_engine);
-
-int ide_allocate_dma_engine(ide_hwif_t *hwif)
-{
- int prd_size;
-
- if (hwif->prd_max_nents == 0)
- hwif->prd_max_nents = PRD_ENTRIES;
- if (hwif->prd_ent_size == 0)
- hwif->prd_ent_size = PRD_BYTES;
-
- prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
-
- hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
- &hwif->dmatable_dma,
- GFP_ATOMIC);
- if (hwif->dmatable_cpu == NULL) {
- printk(KERN_ERR "%s: unable to allocate PRD table\n",
- hwif->name);
- return -ENOMEM;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
-
-int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops;
-
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
- (dma_ops->dma_check && dma_ops->dma_check(drive, cmd)))
- goto out;
- ide_map_sg(drive, cmd);
- if (ide_dma_map_sg(drive, cmd) == 0)
- goto out_map;
- if (dma_ops->dma_setup(drive, cmd))
- goto out_dma_unmap;
- drive->waiting_for_dma = 1;
- return 0;
-out_dma_unmap:
- ide_dma_unmap_sg(drive, cmd);
-out_map:
- ide_map_sg(drive, cmd);
-out:
- return 1;
-}
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
deleted file mode 100644
index 2f378213e9b5..000000000000
--- a/drivers/ide/ide-eh.c
+++ /dev/null
@@ -1,443 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/delay.h>
-
-static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
- u8 stat, u8 err)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- if ((stat & ATA_BUSY) ||
- ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
- /* other bits are useless when BUSY */
- scsi_req(rq)->result |= ERROR_RESET;
- } else if (stat & ATA_ERR) {
- /* err has different meaning on cdrom and tape */
- if (err == ATA_ABORTED) {
- if ((drive->dev_flags & IDE_DFLAG_LBA) &&
- /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
- hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
- return ide_stopped;
- } else if ((err & BAD_CRC) == BAD_CRC) {
- /* UDMA crc error, just retry the operation */
- drive->crc_count++;
- } else if (err & (ATA_BBK | ATA_UNC)) {
- /* retries won't help these */
- scsi_req(rq)->result = ERROR_MAX;
- } else if (err & ATA_TRK0NF) {
- /* help it find track zero */
- scsi_req(rq)->result |= ERROR_RECAL;
- }
- }
-
- if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
- (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
- int nsect = drive->mult_count ? drive->mult_count : 1;
-
- ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
- }
-
- if (scsi_req(rq)->result >= ERROR_MAX || blk_noretry_request(rq)) {
- ide_kill_rq(drive, rq);
- return ide_stopped;
- }
-
- if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
- scsi_req(rq)->result |= ERROR_RESET;
-
- if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
- ++scsi_req(rq)->result;
- return ide_do_reset(drive);
- }
-
- if ((scsi_req(rq)->result & ERROR_RECAL) == ERROR_RECAL)
- drive->special_flags |= IDE_SFLAG_RECALIBRATE;
-
- ++scsi_req(rq)->result;
-
- return ide_stopped;
-}
-
-static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq,
- u8 stat, u8 err)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- if ((stat & ATA_BUSY) ||
- ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
- /* other bits are useless when BUSY */
- scsi_req(rq)->result |= ERROR_RESET;
- } else {
- /* add decoding error stuff */
- }
-
- if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
- /* force an abort */
- hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
-
- if (scsi_req(rq)->result >= ERROR_MAX) {
- ide_kill_rq(drive, rq);
- } else {
- if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
- ++scsi_req(rq)->result;
- return ide_do_reset(drive);
- }
- ++scsi_req(rq)->result;
- }
-
- return ide_stopped;
-}
-
-static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq,
- u8 stat, u8 err)
-{
- if (drive->media == ide_disk)
- return ide_ata_error(drive, rq, stat, err);
- return ide_atapi_error(drive, rq, stat, err);
-}
-
-/**
- * ide_error - handle an error on the IDE
- * @drive: drive the error occurred on
- * @msg: message to report
- * @stat: status bits
- *
- * ide_error() takes action based on the error returned by the drive.
- * For normal I/O that may well include retries. We deal with
- * both new-style (taskfile) and old style command handling here.
- * In the case of taskfile command handling there is work left to
- * do
- */
-
-ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
-{
- struct request *rq;
- u8 err;
-
- err = ide_dump_status(drive, msg, stat);
-
- rq = drive->hwif->rq;
- if (rq == NULL)
- return ide_stopped;
-
- /* retry only "normal" I/O: */
- if (blk_rq_is_passthrough(rq)) {
- if (ata_taskfile_request(rq)) {
- struct ide_cmd *cmd = ide_req(rq)->special;
-
- if (cmd)
- ide_complete_cmd(drive, cmd, stat, err);
- } else if (ata_pm_request(rq)) {
- scsi_req(rq)->result = 1;
- ide_complete_pm_rq(drive, rq);
- return ide_stopped;
- }
- scsi_req(rq)->result = err;
- ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
- return ide_stopped;
- }
-
- return __ide_error(drive, rq, stat, err);
-}
-EXPORT_SYMBOL_GPL(ide_error);
-
-static inline void ide_complete_drive_reset(ide_drive_t *drive, blk_status_t err)
-{
- struct request *rq = drive->hwif->rq;
-
- if (rq && ata_misc_request(rq) &&
- scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
- if (err <= 0 && scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- ide_complete_rq(drive, err, blk_rq_bytes(rq));
- }
-}
-
-/* needed below */
-static ide_startstop_t do_reset1(ide_drive_t *, int);
-
-/*
- * atapi_reset_pollfunc() gets invoked to poll the interface for completion
- * every 50ms during an atapi drive reset operation. If the drive has not yet
- * responded, and we have not yet hit our maximum waiting time, then the timer
- * is restarted for another 50ms.
- */
-static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- u8 stat;
-
- tp_ops->dev_select(drive);
- udelay(10);
- stat = tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, 0, ATA_BUSY))
- printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name);
- else {
- if (time_before(jiffies, hwif->poll_timeout)) {
- ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
- /* continue polling */
- return ide_started;
- }
- /* end of polling */
- hwif->polling = 0;
- printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n",
- drive->name, stat);
- /* do it the old fashioned way */
- return do_reset1(drive, 1);
- }
- /* done polling */
- hwif->polling = 0;
- ide_complete_drive_reset(drive, BLK_STS_OK);
- return ide_stopped;
-}
-
-static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
-{
- static const char *err_master_vals[] =
- { NULL, "passed", "formatter device error",
- "sector buffer error", "ECC circuitry error",
- "controlling MPU error" };
-
- u8 err_master = err & 0x7f;
-
- printk(KERN_ERR "%s: reset: master: ", hwif->name);
- if (err_master && err_master < 6)
- printk(KERN_CONT "%s", err_master_vals[err_master]);
- else
- printk(KERN_CONT "error (0x%02x?)", err);
- if (err & 0x80)
- printk(KERN_CONT "; slave: failed");
- printk(KERN_CONT "\n");
-}
-
-/*
- * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
- * during an ide reset operation. If the drives have not yet responded,
- * and we have not yet hit our maximum waiting time, then the timer is restarted
- * for another 50ms.
- */
-static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
- u8 tmp;
- blk_status_t err = BLK_STS_OK;
-
- if (port_ops && port_ops->reset_poll) {
- err = port_ops->reset_poll(drive);
- if (err) {
- printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
- hwif->name, drive->name);
- goto out;
- }
- }
-
- tmp = hwif->tp_ops->read_status(hwif);
-
- if (!OK_STAT(tmp, 0, ATA_BUSY)) {
- if (time_before(jiffies, hwif->poll_timeout)) {
- ide_set_handler(drive, &reset_pollfunc, HZ/20);
- /* continue polling */
- return ide_started;
- }
- printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
- hwif->name, tmp);
- drive->failures++;
- err = BLK_STS_IOERR;
- } else {
- tmp = ide_read_error(drive);
-
- if (tmp == 1) {
- printk(KERN_INFO "%s: reset: success\n", hwif->name);
- drive->failures = 0;
- } else {
- ide_reset_report_error(hwif, tmp);
- drive->failures++;
- err = BLK_STS_IOERR;
- }
- }
-out:
- hwif->polling = 0; /* done polling */
- ide_complete_drive_reset(drive, err);
- return ide_stopped;
-}
-
-static void ide_disk_pre_reset(ide_drive_t *drive)
-{
- int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
-
- drive->special_flags =
- legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0;
-
- drive->mult_count = 0;
- drive->dev_flags &= ~IDE_DFLAG_PARKED;
-
- if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
- (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
- drive->mult_req = 0;
-
- if (drive->mult_req != drive->mult_count)
- drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
-}
-
-static void pre_reset(ide_drive_t *drive)
-{
- const struct ide_port_ops *port_ops = drive->hwif->port_ops;
-
- if (drive->media == ide_disk)
- ide_disk_pre_reset(drive);
- else
- drive->dev_flags |= IDE_DFLAG_POST_RESET;
-
- if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
- if (drive->crc_count)
- ide_check_dma_crc(drive);
- else
- ide_dma_off(drive);
- }
-
- if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
- drive->io_32bit = 0;
- }
- return;
- }
-
- if (port_ops && port_ops->pre_reset)
- port_ops->pre_reset(drive);
-
- if (drive->current_speed != 0xff)
- drive->desired_speed = drive->current_speed;
- drive->current_speed = 0xff;
-}
-
-/*
- * do_reset1() attempts to recover a confused drive by resetting it.
- * Unfortunately, resetting a disk drive actually resets all devices on
- * the same interface, so it can really be thought of as resetting the
- * interface rather than resetting the drive.
- *
- * ATAPI devices have their own reset mechanism which allows them to be
- * individually reset without clobbering other devices on the same interface.
- *
- * Unfortunately, the IDE interface does not generate an interrupt to let
- * us know when the reset operation has finished, so we must poll for this.
- * Equally poor, though, is the fact that this may a very long time to complete,
- * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
- * we set a timer to poll at 50ms intervals.
- */
-static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- const struct ide_port_ops *port_ops;
- ide_drive_t *tdrive;
- unsigned long flags, timeout;
- int i;
- DEFINE_WAIT(wait);
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- /* We must not reset with running handlers */
- BUG_ON(hwif->handler != NULL);
-
- /* For an ATAPI device, first try an ATAPI SRST. */
- if (drive->media != ide_disk && !do_not_try_atapi) {
- pre_reset(drive);
- tp_ops->dev_select(drive);
- udelay(20);
- tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
- ndelay(400);
- hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
- hwif->polling = 1;
- __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return ide_started;
- }
-
- /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
- do {
- unsigned long now;
-
- prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
- timeout = jiffies;
- ide_port_for_each_present_dev(i, tdrive, hwif) {
- if ((tdrive->dev_flags & IDE_DFLAG_PARKED) &&
- time_after(tdrive->sleep, timeout))
- timeout = tdrive->sleep;
- }
-
- now = jiffies;
- if (time_before_eq(timeout, now))
- break;
-
- spin_unlock_irqrestore(&hwif->lock, flags);
- timeout = schedule_timeout_uninterruptible(timeout - now);
- spin_lock_irqsave(&hwif->lock, flags);
- } while (timeout);
- finish_wait(&ide_park_wq, &wait);
-
- /*
- * First, reset any device state data we were maintaining
- * for any of the drives on this interface.
- */
- ide_port_for_each_dev(i, tdrive, hwif)
- pre_reset(tdrive);
-
- if (io_ports->ctl_addr == 0) {
- spin_unlock_irqrestore(&hwif->lock, flags);
- ide_complete_drive_reset(drive, BLK_STS_IOERR);
- return ide_stopped;
- }
-
- /*
- * Note that we also set nIEN while resetting the device,
- * to mask unwanted interrupts from the interface during the reset.
- * However, due to the design of PC hardware, this will cause an
- * immediate interrupt due to the edge transition it produces.
- * This single interrupt gives us a "fast poll" for drives that
- * recover from reset very quickly, saving us the first 50ms wait time.
- */
- /* set SRST and nIEN */
- tp_ops->write_devctl(hwif, ATA_SRST | ATA_NIEN | ATA_DEVCTL_OBS);
- /* more than enough time */
- udelay(10);
- /* clear SRST, leave nIEN (unless device is on the quirk list) */
- tp_ops->write_devctl(hwif,
- ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) |
- ATA_DEVCTL_OBS);
- /* more than enough time */
- udelay(10);
- hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
- hwif->polling = 1;
- __ide_set_handler(drive, &reset_pollfunc, HZ/20);
-
- /*
- * Some weird controller like resetting themselves to a strange
- * state when the disks are reset this way. At least, the Winbond
- * 553 documentation says that
- */
- port_ops = hwif->port_ops;
- if (port_ops && port_ops->resetproc)
- port_ops->resetproc(drive);
-
- spin_unlock_irqrestore(&hwif->lock, flags);
- return ide_started;
-}
-
-/*
- * ide_do_reset() is the entry point to the drive/interface reset code.
- */
-
-ide_startstop_t ide_do_reset(ide_drive_t *drive)
-{
- return do_reset1(drive, 0);
-}
-EXPORT_SYMBOL(ide_do_reset);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
deleted file mode 100644
index f5a2870aaf54..000000000000
--- a/drivers/ide/ide-floppy.c
+++ /dev/null
@@ -1,551 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * IDE ATAPI floppy driver.
- *
- * Copyright (C) 1996-1999 Gadi Oxman <gadio@netvision.net.il>
- * Copyright (C) 2000-2002 Paul Bristow <paul@paulbristow.net>
- * Copyright (C) 2005 Bartlomiej Zolnierkiewicz
- *
- * This driver supports the following IDE floppy drives:
- *
- * LS-120/240 SuperDisk
- * Iomega Zip 100/250
- * Iomega PC Card Clik!/PocketZip
- *
- * For a historical changelog see
- * Documentation/ide/ChangeLog.ide-floppy.1996-2002
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/compat.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/cdrom.h>
-#include <linux/ide.h>
-#include <linux/hdreg.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-#include <linux/scatterlist.h>
-
-#include <scsi/scsi_ioctl.h>
-
-#include <asm/byteorder.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <asm/unaligned.h>
-
-#include "ide-floppy.h"
-
-/*
- * After each failed packet command we issue a request sense command and retry
- * the packet command IDEFLOPPY_MAX_PC_RETRIES times.
- */
-#define IDEFLOPPY_MAX_PC_RETRIES 3
-
-/* format capacities descriptor codes */
-#define CAPACITY_INVALID 0x00
-#define CAPACITY_UNFORMATTED 0x01
-#define CAPACITY_CURRENT 0x02
-#define CAPACITY_NO_CARTRIDGE 0x03
-
-/*
- * The following delay solves a problem with ATAPI Zip 100 drive where BSY bit
- * was apparently being deasserted before the unit was ready to receive data.
- */
-#define IDEFLOPPY_PC_DELAY (HZ/20) /* default delay for ZIP 100 (50ms) */
-
-static int ide_floppy_callback(ide_drive_t *drive, int dsc)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct ide_atapi_pc *pc = drive->pc;
- struct request *rq = pc->rq;
- int uptodate = pc->error ? 0 : 1;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (drive->failed_pc == pc)
- drive->failed_pc = NULL;
-
- if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
- blk_rq_is_scsi(rq))
- uptodate = 1; /* FIXME */
- else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
-
- u8 *buf = bio_data(rq->bio);
-
- if (!pc->error) {
- floppy->sense_key = buf[2] & 0x0F;
- floppy->asc = buf[12];
- floppy->ascq = buf[13];
- floppy->progress_indication = buf[15] & 0x80 ?
- (u16)get_unaligned((u16 *)&buf[16]) : 0x10000;
-
- if (drive->failed_pc)
- ide_debug_log(IDE_DBG_PC, "pc = %x",
- drive->failed_pc->c[0]);
-
- ide_debug_log(IDE_DBG_SENSE, "sense key = %x, asc = %x,"
- "ascq = %x", floppy->sense_key,
- floppy->asc, floppy->ascq);
- } else
- printk(KERN_ERR PFX "Error in REQUEST SENSE itself - "
- "Aborting request!\n");
- }
-
- if (ata_misc_request(rq))
- scsi_req(rq)->result = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
-
- return uptodate;
-}
-
-static void ide_floppy_report_error(struct ide_disk_obj *floppy,
- struct ide_atapi_pc *pc)
-{
- /* suppress error messages resulting from Medium not present */
- if (floppy->sense_key == 0x02 &&
- floppy->asc == 0x3a &&
- floppy->ascq == 0x00)
- return;
-
- printk(KERN_ERR PFX "%s: I/O error, pc = %2x, key = %2x, "
- "asc = %2x, ascq = %2x\n",
- floppy->drive->name, pc->c[0], floppy->sense_key,
- floppy->asc, floppy->ascq);
-
-}
-
-static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
- struct ide_cmd *cmd,
- struct ide_atapi_pc *pc)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
-
- if (drive->failed_pc == NULL &&
- pc->c[0] != GPCMD_REQUEST_SENSE)
- drive->failed_pc = pc;
-
- /* Set the current packet command */
- drive->pc = pc;
-
- if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) {
- unsigned int done = blk_rq_bytes(drive->hwif->rq);
-
- if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR))
- ide_floppy_report_error(floppy, pc);
-
- /* Giving up */
- pc->error = IDE_DRV_ERROR_GENERAL;
-
- drive->failed_pc = NULL;
- drive->pc_callback(drive, 0);
- ide_complete_rq(drive, BLK_STS_IOERR, done);
- return ide_stopped;
- }
-
- ide_debug_log(IDE_DBG_FUNC, "retry #%d", pc->retries);
-
- pc->retries++;
-
- return ide_issue_pc(drive, cmd);
-}
-
-void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = GPCMD_READ_FORMAT_CAPACITIES;
- pc->c[7] = 255;
- pc->c[8] = 255;
- pc->req_xfer = 255;
-}
-
-/* A mode sense command is used to "sense" floppy parameters. */
-void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
-{
- u16 length = 8; /* sizeof(Mode Parameter Header) = 8 Bytes */
-
- ide_init_pc(pc);
- pc->c[0] = GPCMD_MODE_SENSE_10;
- pc->c[1] = 0;
- pc->c[2] = page_code;
-
- switch (page_code) {
- case IDEFLOPPY_CAPABILITIES_PAGE:
- length += 12;
- break;
- case IDEFLOPPY_FLEXIBLE_DISK_PAGE:
- length += 32;
- break;
- default:
- printk(KERN_ERR PFX "unsupported page code in %s\n", __func__);
- }
- put_unaligned(cpu_to_be16(length), (u16 *) &pc->c[7]);
- pc->req_xfer = length;
-}
-
-static void idefloppy_create_rw_cmd(ide_drive_t *drive,
- struct ide_atapi_pc *pc, struct request *rq,
- unsigned long sector)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- int block = sector / floppy->bs_factor;
- int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
- int cmd = rq_data_dir(rq);
-
- ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
-
- ide_init_pc(pc);
- pc->c[0] = cmd == READ ? GPCMD_READ_10 : GPCMD_WRITE_10;
- put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
- put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
-
- memcpy(scsi_req(rq)->cmd, pc->c, 12);
-
- pc->rq = rq;
- if (cmd == WRITE)
- pc->flags |= PC_FLAG_WRITING;
-
- pc->flags |= PC_FLAG_DMA_OK;
-}
-
-static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
- struct ide_atapi_pc *pc, struct request *rq)
-{
- ide_init_pc(pc);
- memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c));
- pc->rq = rq;
- if (blk_rq_bytes(rq)) {
- pc->flags |= PC_FLAG_DMA_OK;
- if (rq_data_dir(rq) == WRITE)
- pc->flags |= PC_FLAG_WRITING;
- }
-}
-
-static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
- struct request *rq, sector_t block)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct ide_cmd cmd;
- struct ide_atapi_pc *pc;
-
- ide_debug_log(IDE_DBG_FUNC, "enter, cmd: 0x%x\n", rq->cmd[0]);
-
- if (drive->debug_mask & IDE_DBG_RQ)
- blk_dump_rq_flags(rq, (rq->rq_disk
- ? rq->rq_disk->disk_name
- : "dev?"));
-
- if (scsi_req(rq)->result >= ERROR_MAX) {
- if (drive->failed_pc) {
- ide_floppy_report_error(floppy, drive->failed_pc);
- drive->failed_pc = NULL;
- } else
- printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
-
- if (ata_misc_request(rq)) {
- scsi_req(rq)->result = 0;
- ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
- return ide_stopped;
- } else
- goto out_end;
- }
-
- switch (req_op(rq)) {
- default:
- if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
- (blk_rq_sectors(rq) % floppy->bs_factor)) {
- printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
- drive->name);
- goto out_end;
- }
- pc = &floppy->queued_pc;
- idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
- break;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- pc = &floppy->queued_pc;
- idefloppy_blockpc_cmd(floppy, pc, rq);
- break;
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- switch (ide_req(rq)->type) {
- case ATA_PRIV_MISC:
- case ATA_PRIV_SENSE:
- pc = (struct ide_atapi_pc *)ide_req(rq)->special;
- break;
- default:
- BUG();
- }
- }
-
- ide_prep_sense(drive, rq);
-
- memset(&cmd, 0, sizeof(cmd));
-
- if (rq_data_dir(rq))
- cmd.tf_flags |= IDE_TFLAG_WRITE;
-
- cmd.rq = rq;
-
- if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
- ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
- ide_map_sg(drive, &cmd);
- }
-
- pc->rq = rq;
-
- return ide_floppy_issue_pc(drive, &cmd, pc);
-out_end:
- drive->failed_pc = NULL;
- if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
- return ide_stopped;
-}
-
-/*
- * Look at the flexible disk page parameters. We ignore the CHS capacity
- * parameters and use the LBA parameters instead.
- */
-static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive,
- struct ide_atapi_pc *pc)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct gendisk *disk = floppy->disk;
- u8 *page, buf[40];
- int capacity, lba_capacity;
- u16 transfer_rate, sector_size, cyls, rpm;
- u8 heads, sectors;
-
- ide_floppy_create_mode_sense_cmd(pc, IDEFLOPPY_FLEXIBLE_DISK_PAGE);
-
- if (ide_queue_pc_tail(drive, disk, pc, buf, pc->req_xfer)) {
- printk(KERN_ERR PFX "Can't get flexible disk page params\n");
- return 1;
- }
-
- if (buf[3] & 0x80)
- drive->dev_flags |= IDE_DFLAG_WP;
- else
- drive->dev_flags &= ~IDE_DFLAG_WP;
-
- set_disk_ro(disk, !!(drive->dev_flags & IDE_DFLAG_WP));
-
- page = &buf[8];
-
- transfer_rate = be16_to_cpup((__be16 *)&buf[8 + 2]);
- sector_size = be16_to_cpup((__be16 *)&buf[8 + 6]);
- cyls = be16_to_cpup((__be16 *)&buf[8 + 8]);
- rpm = be16_to_cpup((__be16 *)&buf[8 + 28]);
- heads = buf[8 + 4];
- sectors = buf[8 + 5];
-
- capacity = cyls * heads * sectors * sector_size;
-
- if (memcmp(page, &floppy->flexible_disk_page, 32))
- printk(KERN_INFO PFX "%s: %dkB, %d/%d/%d CHS, %d kBps, "
- "%d sector size, %d rpm\n",
- drive->name, capacity / 1024, cyls, heads,
- sectors, transfer_rate / 8, sector_size, rpm);
-
- memcpy(&floppy->flexible_disk_page, page, 32);
- drive->bios_cyl = cyls;
- drive->bios_head = heads;
- drive->bios_sect = sectors;
- lba_capacity = floppy->blocks * floppy->block_size;
-
- if (capacity < lba_capacity) {
- printk(KERN_NOTICE PFX "%s: The disk reports a capacity of %d "
- "bytes, but the drive only handles %d\n",
- drive->name, lba_capacity, capacity);
- floppy->blocks = floppy->block_size ?
- capacity / floppy->block_size : 0;
- drive->capacity64 = floppy->blocks * floppy->bs_factor;
- }
-
- return 0;
-}
-
-/*
- * Determine if a media is present in the floppy drive, and if so, its LBA
- * capacity.
- */
-static int ide_floppy_get_capacity(ide_drive_t *drive)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct gendisk *disk = floppy->disk;
- struct ide_atapi_pc pc;
- u8 *cap_desc;
- u8 pc_buf[256], header_len, desc_cnt;
- int i, rc = 1, blocks, length;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- drive->bios_cyl = 0;
- drive->bios_head = drive->bios_sect = 0;
- floppy->blocks = 0;
- floppy->bs_factor = 1;
- drive->capacity64 = 0;
-
- ide_floppy_create_read_capacity_cmd(&pc);
- if (ide_queue_pc_tail(drive, disk, &pc, pc_buf, pc.req_xfer)) {
- printk(KERN_ERR PFX "Can't get floppy parameters\n");
- return 1;
- }
- header_len = pc_buf[3];
- cap_desc = &pc_buf[4];
- desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */
-
- for (i = 0; i < desc_cnt; i++) {
- unsigned int desc_start = 4 + i*8;
-
- blocks = be32_to_cpup((__be32 *)&pc_buf[desc_start]);
- length = be16_to_cpup((__be16 *)&pc_buf[desc_start + 6]);
-
- ide_debug_log(IDE_DBG_PROBE, "Descriptor %d: %dkB, %d blocks, "
- "%d sector size",
- i, blocks * length / 1024,
- blocks, length);
-
- if (i)
- continue;
- /*
- * the code below is valid only for the 1st descriptor, ie i=0
- */
-
- switch (pc_buf[desc_start + 4] & 0x03) {
- /* Clik! drive returns this instead of CAPACITY_CURRENT */
- case CAPACITY_UNFORMATTED:
- if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
- /*
- * If it is not a clik drive, break out
- * (maintains previous driver behaviour)
- */
- break;
- fallthrough;
- case CAPACITY_CURRENT:
- /* Normal Zip/LS-120 disks */
- if (memcmp(cap_desc, &floppy->cap_desc, 8))
- printk(KERN_INFO PFX "%s: %dkB, %d blocks, %d "
- "sector size\n",
- drive->name, blocks * length / 1024,
- blocks, length);
- memcpy(&floppy->cap_desc, cap_desc, 8);
-
- if (!length || length % 512) {
- printk(KERN_NOTICE PFX "%s: %d bytes block size"
- " not supported\n", drive->name, length);
- } else {
- floppy->blocks = blocks;
- floppy->block_size = length;
- floppy->bs_factor = length / 512;
- if (floppy->bs_factor != 1)
- printk(KERN_NOTICE PFX "%s: Warning: "
- "non 512 bytes block size not "
- "fully supported\n",
- drive->name);
- drive->capacity64 =
- floppy->blocks * floppy->bs_factor;
- rc = 0;
- }
- break;
- case CAPACITY_NO_CARTRIDGE:
- /*
- * This is a KERN_ERR so it appears on screen
- * for the user to see
- */
- printk(KERN_ERR PFX "%s: No disk in drive\n",
- drive->name);
- break;
- case CAPACITY_INVALID:
- printk(KERN_ERR PFX "%s: Invalid capacity for disk "
- "in drive\n", drive->name);
- break;
- }
- ide_debug_log(IDE_DBG_PROBE, "Descriptor 0 Code: %d",
- pc_buf[desc_start + 4] & 0x03);
- }
-
- /* Clik! disk does not support get_flexible_disk_page */
- if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
- (void) ide_floppy_get_flexible_disk_page(drive, &pc);
-
- return rc;
-}
-
-static void ide_floppy_setup(ide_drive_t *drive)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- u16 *id = drive->id;
-
- drive->pc_callback = ide_floppy_callback;
-
- /*
- * We used to check revisions here. At this point however I'm giving up.
- * Just assume they are all broken, its easier.
- *
- * The actual reason for the workarounds was likely a driver bug after
- * all rather than a firmware bug, and the workaround below used to hide
- * it. It should be fixed as of version 1.9, but to be on the safe side
- * we'll leave the limitation below for the 2.2.x tree.
- */
- if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI")) {
- drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
- /* This value will be visible in the /proc/ide/hdx/settings */
- drive->pc_delay = IDEFLOPPY_PC_DELAY;
- blk_queue_max_hw_sectors(drive->queue, 64);
- }
-
- /*
- * Guess what? The IOMEGA Clik! drive also needs the above fix. It makes
- * nasty clicking noises without it, so please don't remove this.
- */
- if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA Clik!")) {
- blk_queue_max_hw_sectors(drive->queue, 64);
- drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
- /* IOMEGA Clik! drives do not support lock/unlock commands */
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
- }
-
- (void) ide_floppy_get_capacity(drive);
-
- ide_proc_register_driver(drive, floppy->driver);
-}
-
-static void ide_floppy_flush(ide_drive_t *drive)
-{
-}
-
-static int ide_floppy_init_media(ide_drive_t *drive, struct gendisk *disk)
-{
- int ret = 0;
-
- if (ide_do_test_unit_ready(drive, disk))
- ide_do_start_stop(drive, disk, 1);
-
- ret = ide_floppy_get_capacity(drive);
-
- set_capacity(disk, ide_gd_capacity(drive));
-
- return ret;
-}
-
-const struct ide_disk_ops ide_atapi_disk_ops = {
- .check = ide_check_atapi_device,
- .get_capacity = ide_floppy_get_capacity,
- .setup = ide_floppy_setup,
- .flush = ide_floppy_flush,
- .init_media = ide_floppy_init_media,
- .set_doorlock = ide_set_media_lock,
- .do_request = ide_floppy_do_request,
- .ioctl = ide_floppy_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ide_floppy_compat_ioctl,
-#endif
-};
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
deleted file mode 100644
index 8505a5f58f4e..000000000000
--- a/drivers/ide/ide-floppy.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IDE_FLOPPY_H
-#define __IDE_FLOPPY_H
-
-#include "ide-gd.h"
-
-#ifdef CONFIG_IDE_GD_ATAPI
-/*
- * Pages of the SELECT SENSE / MODE SENSE packet commands.
- * See SFF-8070i spec.
- */
-#define IDEFLOPPY_CAPABILITIES_PAGE 0x1b
-#define IDEFLOPPY_FLEXIBLE_DISK_PAGE 0x05
-
-/* IOCTLs used in low-level formatting. */
-#define IDEFLOPPY_IOCTL_FORMAT_SUPPORTED 0x4600
-#define IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY 0x4601
-#define IDEFLOPPY_IOCTL_FORMAT_START 0x4602
-#define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS 0x4603
-
-/* ide-floppy.c */
-extern const struct ide_disk_ops ide_atapi_disk_ops;
-void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *, u8);
-void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
-
-/* ide-floppy_ioctl.c */
-int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
- unsigned int, unsigned long);
-int ide_floppy_compat_ioctl(ide_drive_t *, struct block_device *, fmode_t,
- unsigned int, unsigned long);
-
-#ifdef CONFIG_IDE_PROC_FS
-/* ide-floppy_proc.c */
-extern ide_proc_entry_t ide_floppy_proc[];
-extern const struct ide_proc_devset ide_floppy_settings[];
-#endif
-#else
-#define ide_floppy_proc NULL
-#define ide_floppy_settings NULL
-#endif
-
-#endif /*__IDE_FLOPPY_H */
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
deleted file mode 100644
index 39a790ac6cc3..000000000000
--- a/drivers/ide/ide-floppy_ioctl.c
+++ /dev/null
@@ -1,339 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ide-floppy IOCTLs handling.
- */
-
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/compat.h>
-#include <linux/cdrom.h>
-#include <linux/mutex.h>
-
-#include <asm/unaligned.h>
-
-#include <scsi/scsi_ioctl.h>
-
-#include "ide-floppy.h"
-
-/*
- * Obtain the list of formattable capacities.
- * Very similar to ide_floppy_get_capacity, except that we push the capacity
- * descriptors to userland, instead of our own structures.
- *
- * Userland gives us the following structure:
- *
- * struct idefloppy_format_capacities {
- * int nformats;
- * struct {
- * int nblocks;
- * int blocksize;
- * } formats[];
- * };
- *
- * userland initializes nformats to the number of allocated formats[] records.
- * On exit we set nformats to the number of records we've actually initialized.
- */
-
-static DEFINE_MUTEX(ide_floppy_ioctl_mutex);
-static int ide_floppy_get_format_capacities(ide_drive_t *drive,
- struct ide_atapi_pc *pc,
- int __user *arg)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- int i, blocks, length, u_array_size, u_index;
- int __user *argp;
- u8 pc_buf[256], header_len, desc_cnt;
-
- if (get_user(u_array_size, arg))
- return -EFAULT;
-
- if (u_array_size <= 0)
- return -EINVAL;
-
- ide_floppy_create_read_capacity_cmd(pc);
-
- if (ide_queue_pc_tail(drive, floppy->disk, pc, pc_buf, pc->req_xfer)) {
- printk(KERN_ERR "ide-floppy: Can't get floppy parameters\n");
- return -EIO;
- }
-
- header_len = pc_buf[3];
- desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */
-
- u_index = 0;
- argp = arg + 1;
-
- /*
- * We always skip the first capacity descriptor. That's the current
- * capacity. We are interested in the remaining descriptors, the
- * formattable capacities.
- */
- for (i = 1; i < desc_cnt; i++) {
- unsigned int desc_start = 4 + i*8;
-
- if (u_index >= u_array_size)
- break; /* User-supplied buffer too small */
-
- blocks = be32_to_cpup((__be32 *)&pc_buf[desc_start]);
- length = be16_to_cpup((__be16 *)&pc_buf[desc_start + 6]);
-
- if (put_user(blocks, argp))
- return -EFAULT;
-
- ++argp;
-
- if (put_user(length, argp))
- return -EFAULT;
-
- ++argp;
-
- ++u_index;
- }
-
- if (put_user(u_index, arg))
- return -EFAULT;
-
- return 0;
-}
-
-static void ide_floppy_create_format_unit_cmd(struct ide_atapi_pc *pc,
- u8 *buf, int b, int l,
- int flags)
-{
- ide_init_pc(pc);
- pc->c[0] = GPCMD_FORMAT_UNIT;
- pc->c[1] = 0x17;
-
- memset(buf, 0, 12);
- buf[1] = 0xA2;
- /* Default format list header, u8 1: FOV/DCRT/IMM bits set */
-
- if (flags & 1) /* Verify bit on... */
- buf[1] ^= 0x20; /* ... turn off DCRT bit */
- buf[3] = 8;
-
- put_unaligned(cpu_to_be32(b), (unsigned int *)(&buf[4]));
- put_unaligned(cpu_to_be32(l), (unsigned int *)(&buf[8]));
- pc->req_xfer = 12;
- pc->flags |= PC_FLAG_WRITING;
-}
-
-static int ide_floppy_get_sfrp_bit(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- u8 buf[20];
-
- drive->atapi_flags &= ~IDE_AFLAG_SRFP;
-
- ide_floppy_create_mode_sense_cmd(pc, IDEFLOPPY_CAPABILITIES_PAGE);
- pc->flags |= PC_FLAG_SUPPRESS_ERROR;
-
- if (ide_queue_pc_tail(drive, floppy->disk, pc, buf, pc->req_xfer))
- return 1;
-
- if (buf[8 + 2] & 0x40)
- drive->atapi_flags |= IDE_AFLAG_SRFP;
-
- return 0;
-}
-
-static int ide_floppy_format_unit(ide_drive_t *drive, struct ide_atapi_pc *pc,
- int __user *arg)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- u8 buf[12];
- int blocks, length, flags, err = 0;
-
- if (floppy->openers > 1) {
- /* Don't format if someone is using the disk */
- drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
- return -EBUSY;
- }
-
- drive->dev_flags |= IDE_DFLAG_FORMAT_IN_PROGRESS;
-
- /*
- * Send ATAPI_FORMAT_UNIT to the drive.
- *
- * Userland gives us the following structure:
- *
- * struct idefloppy_format_command {
- * int nblocks;
- * int blocksize;
- * int flags;
- * } ;
- *
- * flags is a bitmask, currently, the only defined flag is:
- *
- * 0x01 - verify media after format.
- */
- if (get_user(blocks, arg) ||
- get_user(length, arg+1) ||
- get_user(flags, arg+2)) {
- err = -EFAULT;
- goto out;
- }
-
- ide_floppy_get_sfrp_bit(drive, pc);
- ide_floppy_create_format_unit_cmd(pc, buf, blocks, length, flags);
-
- if (ide_queue_pc_tail(drive, floppy->disk, pc, buf, pc->req_xfer))
- err = -EIO;
-
-out:
- if (err)
- drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
- return err;
-}
-
-/*
- * Get ATAPI_FORMAT_UNIT progress indication.
- *
- * Userland gives a pointer to an int. The int is set to a progress
- * indicator 0-65536, with 65536=100%.
- *
- * If the drive does not support format progress indication, we just check
- * the dsc bit, and return either 0 or 65536.
- */
-
-static int ide_floppy_get_format_progress(ide_drive_t *drive,
- struct ide_atapi_pc *pc,
- int __user *arg)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- u8 sense_buf[18];
- int progress_indication = 0x10000;
-
- if (drive->atapi_flags & IDE_AFLAG_SRFP) {
- ide_create_request_sense_cmd(drive, pc);
- if (ide_queue_pc_tail(drive, floppy->disk, pc, sense_buf,
- pc->req_xfer))
- return -EIO;
-
- if (floppy->sense_key == 2 &&
- floppy->asc == 4 &&
- floppy->ascq == 4)
- progress_indication = floppy->progress_indication;
-
- /* Else assume format_unit has finished, and we're at 0x10000 */
- } else {
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
- u8 stat;
-
- local_irq_save(flags);
- stat = hwif->tp_ops->read_status(hwif);
- local_irq_restore(flags);
-
- progress_indication = ((stat & ATA_DSC) == 0) ? 0 : 0x10000;
- }
-
- if (put_user(progress_indication, arg))
- return -EFAULT;
-
- return 0;
-}
-
-static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned long arg, unsigned int cmd)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct gendisk *disk = floppy->disk;
- int prevent = (arg && cmd != CDROMEJECT) ? 1 : 0;
-
- if (floppy->openers > 1)
- return -EBUSY;
-
- ide_set_media_lock(drive, disk, prevent);
-
- if (cmd == CDROMEJECT)
- ide_do_start_stop(drive, disk, 2);
-
- return 0;
-}
-
-static int ide_floppy_format_ioctl(ide_drive_t *drive, struct ide_atapi_pc *pc,
- fmode_t mode, unsigned int cmd,
- void __user *argp)
-{
- switch (cmd) {
- case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED:
- return 0;
- case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
- return ide_floppy_get_format_capacities(drive, pc, argp);
- case IDEFLOPPY_IOCTL_FORMAT_START:
- if (!(mode & FMODE_WRITE))
- return -EPERM;
- return ide_floppy_format_unit(drive, pc, (int __user *)argp);
- case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS:
- return ide_floppy_get_format_progress(drive, pc, argp);
- default:
- return -ENOTTY;
- }
-}
-
-int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- struct ide_atapi_pc pc;
- void __user *argp = (void __user *)arg;
- int err;
-
- mutex_lock(&ide_floppy_ioctl_mutex);
- if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
- err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
- goto out;
- }
-
- err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
- if (err != -ENOTTY)
- goto out;
-
- /*
- * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
- * and CDROM_SEND_PACKET (legacy) ioctls
- */
- if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
- err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
-
- if (err == -ENOTTY)
- err = generic_ide_ioctl(drive, bdev, cmd, arg);
-
-out:
- mutex_unlock(&ide_floppy_ioctl_mutex);
- return err;
-}
-
-#ifdef CONFIG_COMPAT
-int ide_floppy_compat_ioctl(ide_drive_t *drive, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- struct ide_atapi_pc pc;
- void __user *argp = compat_ptr(arg);
- int err;
-
- mutex_lock(&ide_floppy_ioctl_mutex);
- if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
- err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
- goto out;
- }
-
- err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
- if (err != -ENOTTY)
- goto out;
-
- /*
- * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
- * and CDROM_SEND_PACKET (legacy) ioctls
- */
- if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
- err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
-
- if (err == -ENOTTY)
- err = generic_ide_ioctl(drive, bdev, cmd, arg);
-
-out:
- mutex_unlock(&ide_floppy_ioctl_mutex);
- return err;
-}
-#endif
diff --git a/drivers/ide/ide-floppy_proc.c b/drivers/ide/ide-floppy_proc.c
deleted file mode 100644
index 7f697ddb5fe5..000000000000
--- a/drivers/ide/ide-floppy_proc.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/seq_file.h>
-
-#include "ide-floppy.h"
-
-static int idefloppy_capacity_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t*drive = (ide_drive_t *)m->private;
-
- seq_printf(m, "%llu\n", (long long)ide_gd_capacity(drive));
- return 0;
-}
-
-ide_proc_entry_t ide_floppy_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, idefloppy_capacity_proc_show },
- { "geometry", S_IFREG|S_IRUGO, ide_geometry_proc_show },
- {}
-};
-
-ide_devset_rw_field(bios_cyl, bios_cyl);
-ide_devset_rw_field(bios_head, bios_head);
-ide_devset_rw_field(bios_sect, bios_sect);
-ide_devset_rw_field(ticks, pc_delay);
-
-const struct ide_proc_devset ide_floppy_settings[] = {
- IDE_PROC_DEVSET(bios_cyl, 0, 1023),
- IDE_PROC_DEVSET(bios_head, 0, 255),
- IDE_PROC_DEVSET(bios_sect, 0, 63),
- IDE_PROC_DEVSET(ticks, 0, 255),
- { NULL },
-};
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
deleted file mode 100644
index e2b6c82586ce..000000000000
--- a/drivers/ide/ide-gd.c
+++ /dev/null
@@ -1,432 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/mutex.h>
-#include <linux/ide.h>
-#include <linux/hdreg.h>
-#include <linux/dmi.h>
-#include <linux/slab.h>
-
-#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
-#define IDE_DISK_MINORS (1 << PARTN_BITS)
-#else
-#define IDE_DISK_MINORS 0
-#endif
-
-#include "ide-disk.h"
-#include "ide-floppy.h"
-
-#define IDE_GD_VERSION "1.18"
-
-/* module parameters */
-static DEFINE_MUTEX(ide_gd_mutex);
-static unsigned long debug_mask;
-module_param(debug_mask, ulong, 0644);
-
-static DEFINE_MUTEX(ide_disk_ref_mutex);
-
-static void ide_disk_release(struct device *);
-
-static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
-{
- struct ide_disk_obj *idkp = NULL;
-
- mutex_lock(&ide_disk_ref_mutex);
- idkp = ide_drv_g(disk, ide_disk_obj);
- if (idkp) {
- if (ide_device_get(idkp->drive))
- idkp = NULL;
- else
- get_device(&idkp->dev);
- }
- mutex_unlock(&ide_disk_ref_mutex);
- return idkp;
-}
-
-static void ide_disk_put(struct ide_disk_obj *idkp)
-{
- ide_drive_t *drive = idkp->drive;
-
- mutex_lock(&ide_disk_ref_mutex);
- put_device(&idkp->dev);
- ide_device_put(drive);
- mutex_unlock(&ide_disk_ref_mutex);
-}
-
-sector_t ide_gd_capacity(ide_drive_t *drive)
-{
- return drive->capacity64;
-}
-
-static int ide_gd_probe(ide_drive_t *);
-
-static void ide_gd_remove(ide_drive_t *drive)
-{
- struct ide_disk_obj *idkp = drive->driver_data;
- struct gendisk *g = idkp->disk;
-
- ide_proc_unregister_driver(drive, idkp->driver);
- device_del(&idkp->dev);
- del_gendisk(g);
- drive->disk_ops->flush(drive);
-
- mutex_lock(&ide_disk_ref_mutex);
- put_device(&idkp->dev);
- mutex_unlock(&ide_disk_ref_mutex);
-}
-
-static void ide_disk_release(struct device *dev)
-{
- struct ide_disk_obj *idkp = to_ide_drv(dev, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
- struct gendisk *g = idkp->disk;
-
- drive->disk_ops = NULL;
- drive->driver_data = NULL;
- g->private_data = NULL;
- put_disk(g);
- kfree(idkp);
-}
-
-/*
- * On HPA drives the capacity needs to be
- * reinitialized on resume otherwise the disk
- * can not be used and a hard reset is required
- */
-static void ide_gd_resume(ide_drive_t *drive)
-{
- if (ata_id_hpa_enabled(drive->id))
- (void)drive->disk_ops->get_capacity(drive);
-}
-
-static const struct dmi_system_id ide_coldreboot_table[] = {
- {
- /* Acer TravelMate 66x cuts power during reboot */
- .ident = "Acer TravelMate 660",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
- },
- },
-
- { } /* terminate list */
-};
-
-static void ide_gd_shutdown(ide_drive_t *drive)
-{
-#ifdef CONFIG_ALPHA
- /* On Alpha, halt(8) doesn't actually turn the machine off,
- it puts you into the sort of firmware monitor. Typically,
- it's used to boot another kernel image, so it's not much
- different from reboot(8). Therefore, we don't need to
- spin down the disk in this case, especially since Alpha
- firmware doesn't handle disks in standby mode properly.
- On the other hand, it's reasonably safe to turn the power
- off when the shutdown process reaches the firmware prompt,
- as the firmware initialization takes rather long time -
- at least 10 seconds, which should be sufficient for
- the disk to expire its write cache. */
- if (system_state != SYSTEM_POWER_OFF) {
-#else
- if (system_state == SYSTEM_RESTART &&
- !dmi_check_system(ide_coldreboot_table)) {
-#endif
- drive->disk_ops->flush(drive);
- return;
- }
-
- printk(KERN_INFO "Shutdown: %s\n", drive->name);
-
- drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
-}
-
-#ifdef CONFIG_IDE_PROC_FS
-static ide_proc_entry_t *ide_disk_proc_entries(ide_drive_t *drive)
-{
- return (drive->media == ide_disk) ? ide_disk_proc : ide_floppy_proc;
-}
-
-static const struct ide_proc_devset *ide_disk_proc_devsets(ide_drive_t *drive)
-{
- return (drive->media == ide_disk) ? ide_disk_settings
- : ide_floppy_settings;
-}
-#endif
-
-static ide_startstop_t ide_gd_do_request(ide_drive_t *drive,
- struct request *rq, sector_t sector)
-{
- return drive->disk_ops->do_request(drive, rq, sector);
-}
-
-static struct ide_driver ide_gd_driver = {
- .gen_driver = {
- .owner = THIS_MODULE,
- .name = "ide-gd",
- .bus = &ide_bus_type,
- },
- .probe = ide_gd_probe,
- .remove = ide_gd_remove,
- .resume = ide_gd_resume,
- .shutdown = ide_gd_shutdown,
- .version = IDE_GD_VERSION,
- .do_request = ide_gd_do_request,
-#ifdef CONFIG_IDE_PROC_FS
- .proc_entries = ide_disk_proc_entries,
- .proc_devsets = ide_disk_proc_devsets,
-#endif
-};
-
-static int ide_gd_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct ide_disk_obj *idkp;
- ide_drive_t *drive;
- int ret = 0;
-
- idkp = ide_disk_get(disk);
- if (idkp == NULL)
- return -ENXIO;
-
- drive = idkp->drive;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- idkp->openers++;
-
- if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
- drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
- /* Just in case */
-
- ret = drive->disk_ops->init_media(drive, disk);
-
- /*
- * Allow O_NDELAY to open a drive without a disk, or with an
- * unreadable disk, so that we can get the format capacity
- * of the drive or begin the format - Sam
- */
- if (ret && (mode & FMODE_NDELAY) == 0) {
- ret = -EIO;
- goto out_put_idkp;
- }
-
- if ((drive->dev_flags & IDE_DFLAG_WP) && (mode & FMODE_WRITE)) {
- ret = -EROFS;
- goto out_put_idkp;
- }
-
- /*
- * Ignore the return code from door_lock,
- * since the open() has already succeeded,
- * and the door_lock is irrelevant at this point.
- */
- drive->disk_ops->set_doorlock(drive, disk, 1);
- if (__invalidate_device(bdev, true))
- pr_warn("VFS: busy inodes on changed media %s\n",
- bdev->bd_disk->disk_name);
- drive->disk_ops->get_capacity(drive);
- set_capacity(disk, ide_gd_capacity(drive));
- set_bit(GD_NEED_PART_SCAN, &disk->state);
- } else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) {
- ret = -EBUSY;
- goto out_put_idkp;
- }
- return 0;
-
-out_put_idkp:
- idkp->openers--;
- ide_disk_put(idkp);
- return ret;
-}
-
-static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode)
-{
- int ret;
-
- mutex_lock(&ide_gd_mutex);
- ret = ide_gd_open(bdev, mode);
- mutex_unlock(&ide_gd_mutex);
-
- return ret;
-}
-
-
-static void ide_gd_release(struct gendisk *disk, fmode_t mode)
-{
- struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- mutex_lock(&ide_gd_mutex);
- if (idkp->openers == 1)
- drive->disk_ops->flush(drive);
-
- if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
- drive->disk_ops->set_doorlock(drive, disk, 0);
- drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
- }
-
- idkp->openers--;
-
- ide_disk_put(idkp);
- mutex_unlock(&ide_gd_mutex);
-}
-
-static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- geo->heads = drive->bios_head;
- geo->sectors = drive->bios_sect;
- geo->cylinders = (u16)drive->bios_cyl; /* truncate */
- return 0;
-}
-
-static void ide_gd_unlock_native_capacity(struct gendisk *disk)
-{
- struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
- const struct ide_disk_ops *disk_ops = drive->disk_ops;
-
- if (disk_ops->unlock_native_capacity)
- disk_ops->unlock_native_capacity(drive);
-}
-
-static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
-}
-
-#ifdef CONFIG_COMPAT
-static int ide_gd_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- if (!drive->disk_ops->compat_ioctl)
- return -ENOIOCTLCMD;
-
- return drive->disk_ops->compat_ioctl(drive, bdev, mode, cmd, arg);
-}
-#endif
-
-static const struct block_device_operations ide_gd_ops = {
- .owner = THIS_MODULE,
- .open = ide_gd_unlocked_open,
- .release = ide_gd_release,
- .ioctl = ide_gd_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ide_gd_compat_ioctl,
-#endif
- .getgeo = ide_gd_getgeo,
- .unlock_native_capacity = ide_gd_unlock_native_capacity,
-};
-
-static int ide_gd_probe(ide_drive_t *drive)
-{
- const struct ide_disk_ops *disk_ops = NULL;
- struct ide_disk_obj *idkp;
- struct gendisk *g;
-
- /* strstr("foo", "") is non-NULL */
- if (!strstr("ide-gd", drive->driver_req))
- goto failed;
-
-#ifdef CONFIG_IDE_GD_ATA
- if (drive->media == ide_disk)
- disk_ops = &ide_ata_disk_ops;
-#endif
-#ifdef CONFIG_IDE_GD_ATAPI
- if (drive->media == ide_floppy)
- disk_ops = &ide_atapi_disk_ops;
-#endif
- if (disk_ops == NULL)
- goto failed;
-
- if (disk_ops->check(drive, DRV_NAME) == 0) {
- printk(KERN_ERR PFX "%s: not supported by this driver\n",
- drive->name);
- goto failed;
- }
-
- idkp = kzalloc(sizeof(*idkp), GFP_KERNEL);
- if (!idkp) {
- printk(KERN_ERR PFX "%s: can't allocate a disk structure\n",
- drive->name);
- goto failed;
- }
-
- g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
- if (!g)
- goto out_free_idkp;
-
- ide_init_disk(g, drive);
-
- idkp->dev.parent = &drive->gendev;
- idkp->dev.release = ide_disk_release;
- dev_set_name(&idkp->dev, "%s", dev_name(&drive->gendev));
-
- if (device_register(&idkp->dev))
- goto out_free_disk;
-
- idkp->drive = drive;
- idkp->driver = &ide_gd_driver;
- idkp->disk = g;
-
- g->private_data = &idkp->driver;
-
- drive->driver_data = idkp;
- drive->debug_mask = debug_mask;
- drive->disk_ops = disk_ops;
-
- disk_ops->setup(drive);
-
- set_capacity(g, ide_gd_capacity(drive));
-
- g->minors = IDE_DISK_MINORS;
- g->flags |= GENHD_FL_EXT_DEVT;
- if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
- g->flags = GENHD_FL_REMOVABLE;
- g->fops = &ide_gd_ops;
- g->events = DISK_EVENT_MEDIA_CHANGE;
- device_add_disk(&drive->gendev, g, NULL);
- return 0;
-
-out_free_disk:
- put_disk(g);
-out_free_idkp:
- kfree(idkp);
-failed:
- return -ENODEV;
-}
-
-static int __init ide_gd_init(void)
-{
- printk(KERN_INFO DRV_NAME " driver " IDE_GD_VERSION "\n");
- return driver_register(&ide_gd_driver.gen_driver);
-}
-
-static void __exit ide_gd_exit(void)
-{
- driver_unregister(&ide_gd_driver.gen_driver);
-}
-
-MODULE_ALIAS("ide:*m-disk*");
-MODULE_ALIAS("ide-disk");
-MODULE_ALIAS("ide:*m-floppy*");
-MODULE_ALIAS("ide-floppy");
-module_init(ide_gd_init);
-module_exit(ide_gd_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("generic ATA/ATAPI disk driver");
diff --git a/drivers/ide/ide-gd.h b/drivers/ide/ide-gd.h
deleted file mode 100644
index af3fe1880e9e..000000000000
--- a/drivers/ide/ide-gd.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IDE_GD_H
-#define __IDE_GD_H
-
-#define DRV_NAME "ide-gd"
-#define PFX DRV_NAME ": "
-
-/* define to see debug info */
-#define IDE_GD_DEBUG_LOG 0
-
-#if IDE_GD_DEBUG_LOG
-#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, ## args)
-#else
-#define ide_debug_log(lvl, fmt, args...) do {} while (0)
-#endif
-
-struct ide_disk_obj {
- ide_drive_t *drive;
- struct ide_driver *driver;
- struct gendisk *disk;
- struct device dev;
- unsigned int openers; /* protected by BKL for now */
-
- /* used for blk_{fs,pc}_request() requests */
- struct ide_atapi_pc queued_pc;
-
- /* Last error information */
- u8 sense_key, asc, ascq;
-
- int progress_indication;
-
- /* Device information */
- /* Current format */
- int blocks, block_size, bs_factor;
- /* Last format capacity descriptor */
- u8 cap_desc[8];
- /* Copy of the flexible disk page */
- u8 flexible_disk_page[32];
-};
-
-sector_t ide_gd_capacity(ide_drive_t *);
-
-#endif /* __IDE_GD_H */
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
deleted file mode 100644
index 80c0d69b83ac..000000000000
--- a/drivers/ide/ide-generic.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * generic/default IDE host driver
- *
- * Copyright (C) 2004, 2008-2009 Bartlomiej Zolnierkiewicz
- * This code was split off from ide.c. See it for original copyrights.
- *
- * May be copied or modified under the terms of the GNU General Public License.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/ide.h>
-#include <linux/pci_ids.h>
-
-/* FIXME: convert arm to use ide_platform host driver */
-#ifdef CONFIG_ARM
-#include <asm/irq.h>
-#endif
-
-#define DRV_NAME "ide_generic"
-
-static int probe_mask;
-module_param(probe_mask, int, 0);
-MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
-
-static const struct ide_port_info ide_generic_port_info = {
- .host_flags = IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-#ifdef CONFIG_ARM
-static const u16 legacy_bases[] = { 0x1f0 };
-static const int legacy_irqs[] = { IRQ_HARDDISK };
-#elif defined(CONFIG_ALPHA)
-static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168 };
-static const int legacy_irqs[] = { 14, 15, 11, 10 };
-#else
-static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
-static const int legacy_irqs[] = { 14, 15, 11, 10, 8, 12 };
-#endif
-
-static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary)
-{
-#ifdef CONFIG_PCI
- struct pci_dev *p = NULL;
- u16 val;
-
- for_each_pci_dev(p) {
- if (pci_resource_start(p, 0) == 0x1f0)
- *primary = 1;
- if (pci_resource_start(p, 2) == 0x170)
- *secondary = 1;
-
- /* Cyrix CS55{1,2}0 pre SFF MWDMA ATA on the bridge */
- if (p->vendor == PCI_VENDOR_ID_CYRIX &&
- (p->device == PCI_DEVICE_ID_CYRIX_5510 ||
- p->device == PCI_DEVICE_ID_CYRIX_5520))
- *primary = *secondary = 1;
-
- /* Intel MPIIX - PIO ATA on non PCI side of bridge */
- if (p->vendor == PCI_VENDOR_ID_INTEL &&
- p->device == PCI_DEVICE_ID_INTEL_82371MX) {
- pci_read_config_word(p, 0x6C, &val);
- if (val & 0x8000) {
- /* ATA port enabled */
- if (val & 0x4000)
- *secondary = 1;
- else
- *primary = 1;
- }
- }
- }
-#endif
-}
-
-static int __init ide_generic_init(void)
-{
- struct ide_hw hw, *hws[] = { &hw };
- unsigned long io_addr;
- int i, rc = 0, primary = 0, secondary = 0;
-
- ide_generic_check_pci_legacy_iobases(&primary, &secondary);
-
- if (!probe_mask) {
- printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" "
- "module parameter for probing all legacy ISA IDE ports\n");
-
- if (primary == 0)
- probe_mask |= 0x1;
-
- if (secondary == 0)
- probe_mask |= 0x2;
- } else
- printk(KERN_INFO DRV_NAME ": enforcing probing of I/O ports "
- "upon user request\n");
-
- for (i = 0; i < ARRAY_SIZE(legacy_bases); i++) {
- io_addr = legacy_bases[i];
-
- if ((probe_mask & (1 << i)) && io_addr) {
- if (!request_region(io_addr, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
- "not free.\n",
- DRV_NAME, io_addr, io_addr + 7);
- rc = -EBUSY;
- continue;
- }
-
- if (!request_region(io_addr + 0x206, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX "
- "not free.\n",
- DRV_NAME, io_addr + 0x206);
- release_region(io_addr, 8);
- rc = -EBUSY;
- continue;
- }
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
-#ifdef CONFIG_IA64
- hw.irq = isa_irq_to_vector(legacy_irqs[i]);
-#else
- hw.irq = legacy_irqs[i];
-#endif
- rc = ide_host_add(&ide_generic_port_info, hws, 1, NULL);
- if (rc) {
- release_region(io_addr + 0x206, 1);
- release_region(io_addr, 8);
- }
- }
- }
-
- return rc;
-}
-
-module_init(ide_generic_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
deleted file mode 100644
index 94bdcf1ea186..000000000000
--- a/drivers/ide/ide-io-std.c
+++ /dev/null
@@ -1,262 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-
-#if defined(CONFIG_ARM) || defined(CONFIG_M68K) || defined(CONFIG_MIPS) || \
- defined(CONFIG_PARISC) || defined(CONFIG_PPC) || defined(CONFIG_SPARC)
-#include <asm/ide.h>
-#else
-#include <asm-generic/ide_iops.h>
-#endif
-
-/*
- * Conventional PIO operations for ATA devices
- */
-
-static u8 ide_inb(unsigned long port)
-{
- return (u8) inb(port);
-}
-
-static void ide_outb(u8 val, unsigned long port)
-{
- outb(val, port);
-}
-
-/*
- * MMIO operations, typically used for SATA controllers
- */
-
-static u8 ide_mm_inb(unsigned long port)
-{
- return (u8) readb((void __iomem *) port);
-}
-
-static void ide_mm_outb(u8 value, unsigned long port)
-{
- writeb(value, (void __iomem *) port);
-}
-
-void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
- else
- outb(cmd, hwif->io_ports.command_addr);
-}
-EXPORT_SYMBOL_GPL(ide_exec_command);
-
-u8 ide_read_status(ide_hwif_t *hwif)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return readb((void __iomem *)hwif->io_ports.status_addr);
- else
- return inb(hwif->io_ports.status_addr);
-}
-EXPORT_SYMBOL_GPL(ide_read_status);
-
-u8 ide_read_altstatus(ide_hwif_t *hwif)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return readb((void __iomem *)hwif->io_ports.ctl_addr);
- else
- return inb(hwif->io_ports.ctl_addr);
-}
-EXPORT_SYMBOL_GPL(ide_read_altstatus);
-
-void ide_write_devctl(ide_hwif_t *hwif, u8 ctl)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
- else
- outb(ctl, hwif->io_ports.ctl_addr);
-}
-EXPORT_SYMBOL_GPL(ide_write_devctl);
-
-void ide_dev_select(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 select = drive->select | ATA_DEVICE_OBS;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(select, (void __iomem *)hwif->io_ports.device_addr);
- else
- outb(select, hwif->io_ports.device_addr);
-}
-EXPORT_SYMBOL_GPL(ide_dev_select);
-
-void ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- void (*tf_outb)(u8 addr, unsigned long port);
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (mmio)
- tf_outb = ide_mm_outb;
- else
- tf_outb = ide_outb;
-
- if (valid & IDE_VALID_FEATURE)
- tf_outb(tf->feature, io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- tf_outb(tf->nsect, io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- tf_outb(tf->lbal, io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- tf_outb(tf->lbam, io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- tf_outb(tf->lbah, io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- tf_outb(tf->device, io_ports->device_addr);
-}
-EXPORT_SYMBOL_GPL(ide_tf_load);
-
-void ide_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- u8 (*tf_inb)(unsigned long port);
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (mmio)
- tf_inb = ide_mm_inb;
- else
- tf_inb = ide_inb;
-
- if (valid & IDE_VALID_ERROR)
- tf->error = tf_inb(io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- tf->nsect = tf_inb(io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- tf->lbal = tf_inb(io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- tf->lbam = tf_inb(io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- tf->lbah = tf_inb(io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- tf->device = tf_inb(io_ports->device_addr);
-}
-EXPORT_SYMBOL_GPL(ide_tf_read);
-
-/*
- * Some localbus EIDE interfaces require a special access sequence
- * when using 32-bit I/O instructions to transfer data. We call this
- * the "vlb_sync" sequence, which consists of three successive reads
- * of the sector count register location, with interrupts disabled
- * to ensure that the reads all happen together.
- */
-static void ata_vlb_sync(unsigned long port)
-{
- (void)inb(port);
- (void)inb(port);
- (void)inb(port);
-}
-
-/*
- * This is used for most PIO data transfers *from* the IDE interface
- *
- * These routines will round up any request for an odd number of bytes,
- * so if an odd len is specified, be sure that there's at least one
- * extra byte allocated for the buffer.
- */
-void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
- unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long data_addr = io_ports->data_addr;
- unsigned int words = (len + 1) >> 1;
- u8 io_32bit = drive->io_32bit;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (io_32bit) {
- unsigned long flags;
-
- if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
- words >>= 1;
- if (mmio)
- __ide_mm_insl((void __iomem *)data_addr, buf, words);
- else
- insl(data_addr, buf, words);
-
- if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
-
- if (((len + 1) & 3) < 2)
- return;
-
- buf += len & ~3;
- words = 1;
- }
-
- if (mmio)
- __ide_mm_insw((void __iomem *)data_addr, buf, words);
- else
- insw(data_addr, buf, words);
-}
-EXPORT_SYMBOL_GPL(ide_input_data);
-
-/*
- * This is used for most PIO data transfers *to* the IDE interface
- */
-void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
- unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long data_addr = io_ports->data_addr;
- unsigned int words = (len + 1) >> 1;
- u8 io_32bit = drive->io_32bit;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (io_32bit) {
- unsigned long flags;
-
- if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
- words >>= 1;
- if (mmio)
- __ide_mm_outsl((void __iomem *)data_addr, buf, words);
- else
- outsl(data_addr, buf, words);
-
- if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
-
- if (((len + 1) & 3) < 2)
- return;
-
- buf += len & ~3;
- words = 1;
- }
-
- if (mmio)
- __ide_mm_outsw((void __iomem *)data_addr, buf, words);
- else
- outsw(data_addr, buf, words);
-}
-EXPORT_SYMBOL_GPL(ide_output_data);
-
-const struct ide_tp_ops default_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
deleted file mode 100644
index 4867b67b60d6..000000000000
--- a/drivers/ide/ide-io.c
+++ /dev/null
@@ -1,904 +0,0 @@
-/*
- * IDE I/O functions
- *
- * Basic PIO and command management functionality.
- *
- * This code was split off from ide.c. See ide.c for history and original
- * copyrights.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * For the avoidance of doubt the "preferred form" of this code is one which
- * is in an open non patent encumbered format. Where cryptographic key signing
- * forms part of the process of creating an executable the information
- * including keys needed to generate an equivalently functional executable
- * are deemed to be part of the source code.
- */
-
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/blkpg.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/completion.h>
-#include <linux/reboot.h>
-#include <linux/cdrom.h>
-#include <linux/seq_file.h>
-#include <linux/device.h>
-#include <linux/kmod.h>
-#include <linux/scatterlist.h>
-#include <linux/bitops.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
- unsigned int nr_bytes)
-{
- /*
- * decide whether to reenable DMA -- 3 is a random magic for now,
- * if we DMA timeout more than 3 times, just stay in PIO
- */
- if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
- drive->retry_pio <= 3) {
- drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
- ide_dma_on(drive);
- }
-
- if (!blk_update_request(rq, error, nr_bytes)) {
- if (rq == drive->sense_rq) {
- drive->sense_rq = NULL;
- drive->sense_rq_active = false;
- }
-
- __blk_mq_end_request(rq, error);
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(ide_end_rq);
-
-void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
-{
- const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
- struct ide_taskfile *tf = &cmd->tf;
- struct request *rq = cmd->rq;
- u8 tf_cmd = tf->command;
-
- tf->error = err;
- tf->status = stat;
-
- if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
- u8 data[2];
-
- tp_ops->input_data(drive, cmd, data, 2);
-
- cmd->tf.data = data[0];
- cmd->hob.data = data[1];
- }
-
- ide_tf_readback(drive, cmd);
-
- if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
- tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
- if (tf->lbal != 0xc4) {
- printk(KERN_ERR "%s: head unload failed!\n",
- drive->name);
- ide_tf_dump(drive->name, cmd);
- } else
- drive->dev_flags |= IDE_DFLAG_PARKED;
- }
-
- if (rq && ata_taskfile_request(rq)) {
- struct ide_cmd *orig_cmd = ide_req(rq)->special;
-
- if (cmd->tf_flags & IDE_TFLAG_DYN)
- kfree(orig_cmd);
- else if (cmd != orig_cmd)
- memcpy(orig_cmd, cmd, sizeof(*cmd));
- }
-}
-
-int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- int rc;
-
- /*
- * if failfast is set on a request, override number of sectors
- * and complete the whole request right now
- */
- if (blk_noretry_request(rq) && error)
- nr_bytes = blk_rq_sectors(rq) << 9;
-
- rc = ide_end_rq(drive, rq, error, nr_bytes);
- if (rc == 0)
- hwif->rq = NULL;
-
- return rc;
-}
-EXPORT_SYMBOL(ide_complete_rq);
-
-void ide_kill_rq(ide_drive_t *drive, struct request *rq)
-{
- u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
- u8 media = drive->media;
-
- drive->failed_pc = NULL;
-
- if ((media == ide_floppy || media == ide_tape) && drv_req) {
- scsi_req(rq)->result = 0;
- } else {
- if (media == ide_tape)
- scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL;
- else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- }
-
- ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
-}
-
-static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
-{
- tf->nsect = drive->sect;
- tf->lbal = drive->sect;
- tf->lbam = drive->cyl;
- tf->lbah = drive->cyl >> 8;
- tf->device = (drive->head - 1) | drive->select;
- tf->command = ATA_CMD_INIT_DEV_PARAMS;
-}
-
-static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
-{
- tf->nsect = drive->sect;
- tf->command = ATA_CMD_RESTORE;
-}
-
-static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
-{
- tf->nsect = drive->mult_req;
- tf->command = ATA_CMD_SET_MULTI;
-}
-
-/**
- * do_special - issue some special commands
- * @drive: drive the command is for
- *
- * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
- * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
- */
-
-static ide_startstop_t do_special(ide_drive_t *drive)
-{
- struct ide_cmd cmd;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
- drive->special_flags);
-#endif
- if (drive->media != ide_disk) {
- drive->special_flags = 0;
- drive->mult_req = 0;
- return ide_stopped;
- }
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.protocol = ATA_PROT_NODATA;
-
- if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
- drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
- ide_tf_set_specify_cmd(drive, &cmd.tf);
- } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
- drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
- ide_tf_set_restore_cmd(drive, &cmd.tf);
- } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
- drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
- ide_tf_set_setmult_cmd(drive, &cmd.tf);
- } else
- BUG();
-
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
-
- do_rw_taskfile(drive, &cmd);
-
- return ide_started;
-}
-
-void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct scatterlist *sg = hwif->sg_table, *last_sg = NULL;
- struct request *rq = cmd->rq;
-
- cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg);
- if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask))
- last_sg->length +=
- (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
-}
-EXPORT_SYMBOL_GPL(ide_map_sg);
-
-void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
-{
- cmd->nbytes = cmd->nleft = nr_bytes;
- cmd->cursg_ofs = 0;
- cmd->cursg = NULL;
-}
-EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
-
-/**
- * execute_drive_command - issue special drive command
- * @drive: the drive to issue the command on
- * @rq: the request structure holding the command
- *
- * execute_drive_cmd() issues a special drive command, usually
- * initiated by ioctl() from the external hdparm program. The
- * command can be a drive command, drive task or taskfile
- * operation. Weirdly you can call it with NULL to wait for
- * all commands to finish. Don't do this as that is due to change
- */
-
-static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
- struct request *rq)
-{
- struct ide_cmd *cmd = ide_req(rq)->special;
-
- if (cmd) {
- if (cmd->protocol == ATA_PROT_PIO) {
- ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
- ide_map_sg(drive, cmd);
- }
-
- return do_rw_taskfile(drive, cmd);
- }
-
- /*
- * NULL is actually a valid way of waiting for
- * all current requests to be flushed from the queue.
- */
-#ifdef DEBUG
- printk("%s: DRIVE_CMD (null)\n", drive->name);
-#endif
- scsi_req(rq)->result = 0;
- ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
-
- return ide_stopped;
-}
-
-static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
-{
- u8 cmd = scsi_req(rq)->cmd[0];
-
- switch (cmd) {
- case REQ_PARK_HEADS:
- case REQ_UNPARK_HEADS:
- return ide_do_park_unpark(drive, rq);
- case REQ_DEVSET_EXEC:
- return ide_do_devset(drive, rq);
- case REQ_DRIVE_RESET:
- return ide_do_reset(drive);
- default:
- BUG();
- }
-}
-
-/**
- * start_request - start of I/O and command issuing for IDE
- *
- * start_request() initiates handling of a new I/O request. It
- * accepts commands and I/O (read/write) requests.
- *
- * FIXME: this function needs a rename
- */
-
-static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
-{
- ide_startstop_t startstop;
-
-#ifdef DEBUG
- printk("%s: start_request: current=0x%08lx\n",
- drive->hwif->name, (unsigned long) rq);
-#endif
-
- /* bail early if we've exceeded max_failures */
- if (drive->max_failures && (drive->failures > drive->max_failures)) {
- rq->rq_flags |= RQF_FAILED;
- goto kill_rq;
- }
-
- if (drive->prep_rq && !drive->prep_rq(drive, rq))
- return ide_stopped;
-
- if (ata_pm_request(rq))
- ide_check_pm_state(drive, rq);
-
- drive->hwif->tp_ops->dev_select(drive);
- if (ide_wait_stat(&startstop, drive, drive->ready_stat,
- ATA_BUSY | ATA_DRQ, WAIT_READY)) {
- printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
- return startstop;
- }
-
- if (drive->special_flags == 0) {
- struct ide_driver *drv;
-
- /*
- * We reset the drive so we need to issue a SETFEATURES.
- * Do it _after_ do_special() restored device parameters.
- */
- if (drive->current_speed == 0xff)
- ide_config_drive_speed(drive, drive->desired_speed);
-
- if (ata_taskfile_request(rq))
- return execute_drive_cmd(drive, rq);
- else if (ata_pm_request(rq)) {
- struct ide_pm_state *pm = ide_req(rq)->special;
-#ifdef DEBUG_PM
- printk("%s: start_power_step(step: %d)\n",
- drive->name, pm->pm_step);
-#endif
- startstop = ide_start_power_step(drive, rq);
- if (startstop == ide_stopped &&
- pm->pm_step == IDE_PM_COMPLETED)
- ide_complete_pm_rq(drive, rq);
- return startstop;
- } else if (!rq->rq_disk && ata_misc_request(rq))
- /*
- * TODO: Once all ULDs have been modified to
- * check for specific op codes rather than
- * blindly accepting any special request, the
- * check for ->rq_disk above may be replaced
- * by a more suitable mechanism or even
- * dropped entirely.
- */
- return ide_special_rq(drive, rq);
-
- drv = *(struct ide_driver **)rq->rq_disk->private_data;
-
- return drv->do_request(drive, rq, blk_rq_pos(rq));
- }
- return do_special(drive);
-kill_rq:
- ide_kill_rq(drive, rq);
- return ide_stopped;
-}
-
-/**
- * ide_stall_queue - pause an IDE device
- * @drive: drive to stall
- * @timeout: time to stall for (jiffies)
- *
- * ide_stall_queue() can be used by a drive to give excess bandwidth back
- * to the port by sleeping for timeout jiffies.
- */
-
-void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
-{
- if (timeout > WAIT_WORSTCASE)
- timeout = WAIT_WORSTCASE;
- drive->sleep = timeout + jiffies;
- drive->dev_flags |= IDE_DFLAG_SLEEPING;
-}
-EXPORT_SYMBOL(ide_stall_queue);
-
-static inline int ide_lock_port(ide_hwif_t *hwif)
-{
- if (hwif->busy)
- return 1;
-
- hwif->busy = 1;
-
- return 0;
-}
-
-static inline void ide_unlock_port(ide_hwif_t *hwif)
-{
- hwif->busy = 0;
-}
-
-static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
-{
- int rc = 0;
-
- if (host->host_flags & IDE_HFLAG_SERIALIZE) {
- rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
- if (rc == 0) {
- if (host->get_lock)
- host->get_lock(ide_intr, hwif);
- }
- }
- return rc;
-}
-
-static inline void ide_unlock_host(struct ide_host *host)
-{
- if (host->host_flags & IDE_HFLAG_SERIALIZE) {
- if (host->release_lock)
- host->release_lock();
- clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
- }
-}
-
-void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
-{
- struct request_queue *q = drive->queue;
-
- /* Use 3ms as that was the old plug delay */
- if (rq) {
- blk_mq_requeue_request(rq, false);
- blk_mq_delay_kick_requeue_list(q, 3);
- } else
- blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
-}
-
-blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
- bool local_requeue)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_host *host = hwif->host;
- ide_startstop_t startstop;
-
- if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
- rq->rq_flags |= RQF_DONTPREP;
- ide_req(rq)->special = NULL;
- }
-
- /* HLD do_request() callback might sleep, make sure it's okay */
- might_sleep();
-
- if (ide_lock_host(host, hwif))
- return BLK_STS_DEV_RESOURCE;
-
- spin_lock_irq(&hwif->lock);
-
- if (!ide_lock_port(hwif)) {
- ide_hwif_t *prev_port;
-
- WARN_ON_ONCE(hwif->rq);
-repeat:
- prev_port = hwif->host->cur_port;
- if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
- time_after(drive->sleep, jiffies)) {
- ide_unlock_port(hwif);
- goto plug_device;
- }
-
- if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
- hwif != prev_port) {
- ide_drive_t *cur_dev =
- prev_port ? prev_port->cur_dev : NULL;
-
- /*
- * set nIEN for previous port, drives in the
- * quirk list may not like intr setups/cleanups
- */
- if (cur_dev &&
- (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
- prev_port->tp_ops->write_devctl(prev_port,
- ATA_NIEN |
- ATA_DEVCTL_OBS);
-
- hwif->host->cur_port = hwif;
- }
- hwif->cur_dev = drive;
- drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
-
- /*
- * Sanity: don't accept a request that isn't a PM request
- * if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the blk_fetch_request()
- * above to return us whatever is in the queue. Since we call
- * ide_do_request() ourselves, we end up taking requests while
- * the queue is blocked...
- */
- if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
- ata_pm_request(rq) == 0 &&
- (rq->rq_flags & RQF_PM) == 0) {
- /* there should be no pending command at this point */
- ide_unlock_port(hwif);
- goto plug_device;
- }
-
- scsi_req(rq)->resid_len = blk_rq_bytes(rq);
- hwif->rq = rq;
-
- spin_unlock_irq(&hwif->lock);
- startstop = start_request(drive, rq);
- spin_lock_irq(&hwif->lock);
-
- if (startstop == ide_stopped) {
- rq = hwif->rq;
- hwif->rq = NULL;
- if (rq)
- goto repeat;
- ide_unlock_port(hwif);
- goto out;
- }
- } else {
-plug_device:
- if (local_requeue)
- list_add(&rq->queuelist, &drive->rq_list);
- spin_unlock_irq(&hwif->lock);
- ide_unlock_host(host);
- if (!local_requeue)
- ide_requeue_and_plug(drive, rq);
- return BLK_STS_OK;
- }
-
-out:
- spin_unlock_irq(&hwif->lock);
- if (rq == NULL)
- ide_unlock_host(host);
- return BLK_STS_OK;
-}
-
-/*
- * Issue a new request to a device.
- */
-blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- ide_drive_t *drive = hctx->queue->queuedata;
- ide_hwif_t *hwif = drive->hwif;
-
- spin_lock_irq(&hwif->lock);
- if (drive->sense_rq_active) {
- spin_unlock_irq(&hwif->lock);
- return BLK_STS_DEV_RESOURCE;
- }
- spin_unlock_irq(&hwif->lock);
-
- blk_mq_start_request(bd->rq);
- return ide_issue_rq(drive, bd->rq, false);
-}
-
-static int drive_is_ready(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 stat = 0;
-
- if (drive->waiting_for_dma)
- return hwif->dma_ops->dma_test_irq(drive);
-
- if (hwif->io_ports.ctl_addr &&
- (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
- stat = hwif->tp_ops->read_altstatus(hwif);
- else
- /* Note: this may clear a pending IRQ!! */
- stat = hwif->tp_ops->read_status(hwif);
-
- if (stat & ATA_BUSY)
- /* drive busy: definitely not interrupting */
- return 0;
-
- /* drive ready: *might* be interrupting */
- return 1;
-}
-
-/**
- * ide_timer_expiry - handle lack of an IDE interrupt
- * @data: timer callback magic (hwif)
- *
- * An IDE command has timed out before the expected drive return
- * occurred. At this point we attempt to clean up the current
- * mess. If the current handler includes an expiry handler then
- * we invoke the expiry handler, and providing it is happy the
- * work is done. If that fails we apply generic recovery rules
- * invoking the handler and checking the drive DMA status. We
- * have an excessively incestuous relationship with the DMA
- * logic that wants cleaning up.
- */
-
-void ide_timer_expiry (struct timer_list *t)
-{
- ide_hwif_t *hwif = from_timer(hwif, t, timer);
- ide_drive_t *drive;
- ide_handler_t *handler;
- unsigned long flags;
- int wait = -1;
- int plug_device = 0;
- struct request *rq_in_flight;
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- handler = hwif->handler;
-
- if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
- /*
- * Either a marginal timeout occurred
- * (got the interrupt just as timer expired),
- * or we were "sleeping" to give other devices a chance.
- * Either way, we don't really want to complain about anything.
- */
- } else {
- ide_expiry_t *expiry = hwif->expiry;
- ide_startstop_t startstop = ide_stopped;
-
- drive = hwif->cur_dev;
-
- if (expiry) {
- wait = expiry(drive);
- if (wait > 0) { /* continue */
- /* reset timer */
- hwif->timer.expires = jiffies + wait;
- hwif->req_gen_timer = hwif->req_gen;
- add_timer(&hwif->timer);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return;
- }
- }
- hwif->handler = NULL;
- hwif->expiry = NULL;
- /*
- * We need to simulate a real interrupt when invoking
- * the handler() function, which means we need to
- * globally mask the specific IRQ:
- */
- spin_unlock(&hwif->lock);
- /* disable_irq_nosync ?? */
- disable_irq(hwif->irq);
-
- if (hwif->polling) {
- startstop = handler(drive);
- } else if (drive_is_ready(drive)) {
- if (drive->waiting_for_dma)
- hwif->dma_ops->dma_lost_irq(drive);
- if (hwif->port_ops && hwif->port_ops->clear_irq)
- hwif->port_ops->clear_irq(drive);
-
- printk(KERN_WARNING "%s: lost interrupt\n",
- drive->name);
- startstop = handler(drive);
- } else {
- if (drive->waiting_for_dma)
- startstop = ide_dma_timeout_retry(drive, wait);
- else
- startstop = ide_error(drive, "irq timeout",
- hwif->tp_ops->read_status(hwif));
- }
- /* Disable interrupts again, `handler' might have enabled it */
- spin_lock_irq(&hwif->lock);
- enable_irq(hwif->irq);
- if (startstop == ide_stopped && hwif->polling == 0) {
- rq_in_flight = hwif->rq;
- hwif->rq = NULL;
- ide_unlock_port(hwif);
- plug_device = 1;
- }
- }
- spin_unlock_irqrestore(&hwif->lock, flags);
-
- if (plug_device) {
- ide_unlock_host(hwif->host);
- ide_requeue_and_plug(drive, rq_in_flight);
- }
-}
-
-/**
- * unexpected_intr - handle an unexpected IDE interrupt
- * @irq: interrupt line
- * @hwif: port being processed
- *
- * There's nothing really useful we can do with an unexpected interrupt,
- * other than reading the status register (to clear it), and logging it.
- * There should be no way that an irq can happen before we're ready for it,
- * so we needn't worry much about losing an "important" interrupt here.
- *
- * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
- * the drive enters "idle", "standby", or "sleep" mode, so if the status
- * looks "good", we just ignore the interrupt completely.
- *
- * This routine assumes __cli() is in effect when called.
- *
- * If an unexpected interrupt happens on irq15 while we are handling irq14
- * and if the two interfaces are "serialized" (CMD640), then it looks like
- * we could screw up by interfering with a new request being set up for
- * irq15.
- *
- * In reality, this is a non-issue. The new command is not sent unless
- * the drive is ready to accept one, in which case we know the drive is
- * not trying to interrupt us. And ide_set_handler() is always invoked
- * before completing the issuance of any new drive command, so we will not
- * be accidentally invoked as a result of any valid command completion
- * interrupt.
- */
-
-static void unexpected_intr(int irq, ide_hwif_t *hwif)
-{
- u8 stat = hwif->tp_ops->read_status(hwif);
-
- if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
- /* Try to not flood the console with msgs */
- static unsigned long last_msgtime, count;
- ++count;
-
- if (time_after(jiffies, last_msgtime + HZ)) {
- last_msgtime = jiffies;
- printk(KERN_ERR "%s: unexpected interrupt, "
- "status=0x%02x, count=%ld\n",
- hwif->name, stat, count);
- }
- }
-}
-
-/**
- * ide_intr - default IDE interrupt handler
- * @irq: interrupt number
- * @dev_id: hwif
- * @regs: unused weirdness from the kernel irq layer
- *
- * This is the default IRQ handler for the IDE layer. You should
- * not need to override it. If you do be aware it is subtle in
- * places
- *
- * hwif is the interface in the group currently performing
- * a command. hwif->cur_dev is the drive and hwif->handler is
- * the IRQ handler to call. As we issue a command the handlers
- * step through multiple states, reassigning the handler to the
- * next step in the process. Unlike a smart SCSI controller IDE
- * expects the main processor to sequence the various transfer
- * stages. We also manage a poll timer to catch up with most
- * timeout situations. There are still a few where the handlers
- * don't ever decide to give up.
- *
- * The handler eventually returns ide_stopped to indicate the
- * request completed. At this point we issue the next request
- * on the port and the process begins again.
- */
-
-irqreturn_t ide_intr (int irq, void *dev_id)
-{
- ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
- struct ide_host *host = hwif->host;
- ide_drive_t *drive;
- ide_handler_t *handler;
- unsigned long flags;
- ide_startstop_t startstop;
- irqreturn_t irq_ret = IRQ_NONE;
- int plug_device = 0;
- struct request *rq_in_flight;
-
- if (host->host_flags & IDE_HFLAG_SERIALIZE) {
- if (hwif != host->cur_port)
- goto out_early;
- }
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- if (hwif->port_ops && hwif->port_ops->test_irq &&
- hwif->port_ops->test_irq(hwif) == 0)
- goto out;
-
- handler = hwif->handler;
-
- if (handler == NULL || hwif->polling) {
- /*
- * Not expecting an interrupt from this drive.
- * That means this could be:
- * (1) an interrupt from another PCI device
- * sharing the same PCI INT# as us.
- * or (2) a drive just entered sleep or standby mode,
- * and is interrupting to let us know.
- * or (3) a spurious interrupt of unknown origin.
- *
- * For PCI, we cannot tell the difference,
- * so in that case we just ignore it and hope it goes away.
- */
- if ((host->irq_flags & IRQF_SHARED) == 0) {
- /*
- * Probably not a shared PCI interrupt,
- * so we can safely try to do something about it:
- */
- unexpected_intr(irq, hwif);
- } else {
- /*
- * Whack the status register, just in case
- * we have a leftover pending IRQ.
- */
- (void)hwif->tp_ops->read_status(hwif);
- }
- goto out;
- }
-
- drive = hwif->cur_dev;
-
- if (!drive_is_ready(drive))
- /*
- * This happens regularly when we share a PCI IRQ with
- * another device. Unfortunately, it can also happen
- * with some buggy drives that trigger the IRQ before
- * their status register is up to date. Hopefully we have
- * enough advance overhead that the latter isn't a problem.
- */
- goto out;
-
- hwif->handler = NULL;
- hwif->expiry = NULL;
- hwif->req_gen++;
- del_timer(&hwif->timer);
- spin_unlock(&hwif->lock);
-
- if (hwif->port_ops && hwif->port_ops->clear_irq)
- hwif->port_ops->clear_irq(drive);
-
- if (drive->dev_flags & IDE_DFLAG_UNMASK)
- local_irq_enable_in_hardirq();
-
- /* service this interrupt, may set handler for next interrupt */
- startstop = handler(drive);
-
- spin_lock_irq(&hwif->lock);
- /*
- * Note that handler() may have set things up for another
- * interrupt to occur soon, but it cannot happen until
- * we exit from this routine, because it will be the
- * same irq as is currently being serviced here, and Linux
- * won't allow another of the same (on any CPU) until we return.
- */
- if (startstop == ide_stopped && hwif->polling == 0) {
- BUG_ON(hwif->handler);
- rq_in_flight = hwif->rq;
- hwif->rq = NULL;
- ide_unlock_port(hwif);
- plug_device = 1;
- }
- irq_ret = IRQ_HANDLED;
-out:
- spin_unlock_irqrestore(&hwif->lock, flags);
-out_early:
- if (plug_device) {
- ide_unlock_host(hwif->host);
- ide_requeue_and_plug(drive, rq_in_flight);
- }
-
- return irq_ret;
-}
-EXPORT_SYMBOL_GPL(ide_intr);
-
-void ide_pad_transfer(ide_drive_t *drive, int write, int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 buf[4] = { 0 };
-
- while (len > 0) {
- if (write)
- hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
- else
- hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
- len -= 4;
- }
-}
-EXPORT_SYMBOL_GPL(ide_pad_transfer);
-
-void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
-{
- drive->sense_rq_active = true;
- list_add_tail(&rq->queuelist, &drive->rq_list);
- kblockd_schedule_work(&drive->rq_work);
-}
-EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
deleted file mode 100644
index 43fbc37d85c3..000000000000
--- a/drivers/ide/ide-ioctls.c
+++ /dev/null
@@ -1,306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * IDE ioctls handling.
- */
-
-#include <linux/compat.h>
-#include <linux/export.h>
-#include <linux/hdreg.h>
-#include <linux/ide.h>
-#include <linux/slab.h>
-
-static int put_user_long(long val, unsigned long arg)
-{
- if (in_compat_syscall())
- return put_user(val, (compat_long_t __user *)compat_ptr(arg));
-
- return put_user(val, (long __user *)arg);
-}
-
-static const struct ide_ioctl_devset ide_ioctl_settings[] = {
-{ HDIO_GET_32BIT, HDIO_SET_32BIT, &ide_devset_io_32bit },
-{ HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, &ide_devset_keepsettings },
-{ HDIO_GET_UNMASKINTR, HDIO_SET_UNMASKINTR, &ide_devset_unmaskirq },
-{ HDIO_GET_DMA, HDIO_SET_DMA, &ide_devset_using_dma },
-{ -1, HDIO_SET_PIO_MODE, &ide_devset_pio_mode },
-{ 0 }
-};
-
-int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
- unsigned int cmd, unsigned long arg,
- const struct ide_ioctl_devset *s)
-{
- const struct ide_devset *ds;
- int err = -EOPNOTSUPP;
-
- for (; (ds = s->setting); s++) {
- if (ds->get && s->get_ioctl == cmd)
- goto read_val;
- else if (ds->set && s->set_ioctl == cmd)
- goto set_val;
- }
-
- return err;
-
-read_val:
- mutex_lock(&ide_setting_mtx);
- err = ds->get(drive);
- mutex_unlock(&ide_setting_mtx);
- return err >= 0 ? put_user_long(err, arg) : err;
-
-set_val:
- if (bdev_is_partition(bdev))
- err = -EINVAL;
- else {
- if (!capable(CAP_SYS_ADMIN))
- err = -EACCES;
- else {
- mutex_lock(&ide_setting_mtx);
- err = ide_devset_execute(drive, ds, arg);
- mutex_unlock(&ide_setting_mtx);
- }
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(ide_setting_ioctl);
-
-static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
- void __user *argp)
-{
- u16 *id = NULL;
- int size = (cmd == HDIO_GET_IDENTITY) ? (ATA_ID_WORDS * 2) : 142;
- int rc = 0;
-
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
- rc = -ENOMSG;
- goto out;
- }
-
- /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
- id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
- if (id == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- memcpy(id, drive->id, size);
- ata_id_to_hd_driveid(id);
-
- if (copy_to_user(argp, id, size))
- rc = -EFAULT;
-
- kfree(id);
-out:
- return rc;
-}
-
-static int ide_get_nice_ioctl(ide_drive_t *drive, unsigned long arg)
-{
- return put_user_long((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
- << IDE_NICE_DSC_OVERLAP) |
- (!!(drive->dev_flags & IDE_DFLAG_NICE1)
- << IDE_NICE_1), arg);
-}
-
-static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
-{
- if (arg != (arg & ((1 << IDE_NICE_DSC_OVERLAP) | (1 << IDE_NICE_1))))
- return -EPERM;
-
- if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) &&
- (drive->media != ide_tape))
- return -EPERM;
-
- if ((arg >> IDE_NICE_DSC_OVERLAP) & 1)
- drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
- else
- drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
-
- if ((arg >> IDE_NICE_1) & 1)
- drive->dev_flags |= IDE_DFLAG_NICE1;
- else
- drive->dev_flags &= ~IDE_DFLAG_NICE1;
-
- return 0;
-}
-
-static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp)
-{
- u8 *buf = NULL;
- int bufsize = 0, err = 0;
- u8 args[4], xfer_rate = 0;
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
-
- if (NULL == argp) {
- struct request *rq;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_TASKFILE;
- blk_execute_rq(NULL, rq, 0);
- err = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
-
- return err;
- }
-
- if (copy_from_user(args, argp, 4))
- return -EFAULT;
-
- memset(&cmd, 0, sizeof(cmd));
- tf->feature = args[2];
- if (args[0] == ATA_CMD_SMART) {
- tf->nsect = args[3];
- tf->lbal = args[1];
- tf->lbam = ATA_SMART_LBAM_PASS;
- tf->lbah = ATA_SMART_LBAH_PASS;
- cmd.valid.out.tf = IDE_VALID_OUT_TF;
- cmd.valid.in.tf = IDE_VALID_NSECT;
- } else {
- tf->nsect = args[1];
- cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT;
- cmd.valid.in.tf = IDE_VALID_NSECT;
- }
- tf->command = args[0];
- cmd.protocol = args[3] ? ATA_PROT_PIO : ATA_PROT_NODATA;
-
- if (args[3]) {
- cmd.tf_flags |= IDE_TFLAG_IO_16BIT;
- bufsize = SECTOR_SIZE * args[3];
- buf = kzalloc(bufsize, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
- }
-
- if (tf->command == ATA_CMD_SET_FEATURES &&
- tf->feature == SETFEATURES_XFER &&
- tf->nsect >= XFER_SW_DMA_0) {
- xfer_rate = ide_find_dma_mode(drive, tf->nsect);
- if (xfer_rate != tf->nsect) {
- err = -EINVAL;
- goto abort;
- }
-
- cmd.tf_flags |= IDE_TFLAG_SET_XFER;
- }
-
- err = ide_raw_taskfile(drive, &cmd, buf, args[3]);
-
- args[0] = tf->status;
- args[1] = tf->error;
- args[2] = tf->nsect;
-abort:
- if (copy_to_user(argp, &args, 4))
- err = -EFAULT;
- if (buf) {
- if (copy_to_user((argp + 4), buf, bufsize))
- err = -EFAULT;
- kfree(buf);
- }
- return err;
-}
-
-static int ide_task_ioctl(ide_drive_t *drive, void __user *p)
-{
- int err = 0;
- u8 args[7];
- struct ide_cmd cmd;
-
- if (copy_from_user(args, p, 7))
- return -EFAULT;
-
- memset(&cmd, 0, sizeof(cmd));
- memcpy(&cmd.tf.feature, &args[1], 6);
- cmd.tf.command = args[0];
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- err = ide_no_data_taskfile(drive, &cmd);
-
- args[0] = cmd.tf.command;
- memcpy(&args[1], &cmd.tf.feature, 6);
-
- if (copy_to_user(p, args, 7))
- err = -EFAULT;
-
- return err;
-}
-
-static int generic_drive_reset(ide_drive_t *drive)
-{
- struct request *rq;
- int ret = 0;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- scsi_req(rq)->cmd_len = 1;
- scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
- blk_execute_rq(NULL, rq, 1);
- ret = scsi_req(rq)->result;
- blk_put_request(rq);
- return ret;
-}
-
-int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
- unsigned int cmd, unsigned long arg)
-{
- int err;
- void __user *argp = (void __user *)arg;
-
- if (in_compat_syscall())
- argp = compat_ptr(arg);
-
- err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings);
- if (err != -EOPNOTSUPP)
- return err;
-
- switch (cmd) {
- case HDIO_OBSOLETE_IDENTITY:
- case HDIO_GET_IDENTITY:
- if (bdev_is_partition(bdev))
- return -EINVAL;
- return ide_get_identity_ioctl(drive, cmd, argp);
- case HDIO_GET_NICE:
- return ide_get_nice_ioctl(drive, arg);
- case HDIO_SET_NICE:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- return ide_set_nice_ioctl(drive, arg);
-#ifdef CONFIG_IDE_TASK_IOCTL
- case HDIO_DRIVE_TASKFILE:
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- return -EACCES;
- /* missing compat handler for HDIO_DRIVE_TASKFILE */
- if (in_compat_syscall())
- return -ENOTTY;
- if (drive->media == ide_disk)
- return ide_taskfile_ioctl(drive, arg);
- return -ENOMSG;
-#endif
- case HDIO_DRIVE_CMD:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return ide_cmd_ioctl(drive, argp);
- case HDIO_DRIVE_TASK:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return ide_task_ioctl(drive, argp);
- case HDIO_DRIVE_RESET:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- return generic_drive_reset(drive);
- case HDIO_GET_BUSSTATE:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (put_user_long(BUSSTATE_ON, arg))
- return -EFAULT;
- return 0;
- case HDIO_SET_BUSSTATE:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- return -EOPNOTSUPP;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(generic_ide_ioctl);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
deleted file mode 100644
index f2be127ee96e..000000000000
--- a/drivers/ide/ide-iops.c
+++ /dev/null
@@ -1,536 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/blkpg.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/bitops.h>
-#include <linux/nmi.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-void SELECT_MASK(ide_drive_t *drive, int mask)
-{
- const struct ide_port_ops *port_ops = drive->hwif->port_ops;
-
- if (port_ops && port_ops->maskproc)
- port_ops->maskproc(drive, mask);
-}
-
-u8 ide_read_error(ide_drive_t *drive)
-{
- struct ide_taskfile tf;
-
- drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_ERROR);
-
- return tf.error;
-}
-EXPORT_SYMBOL_GPL(ide_read_error);
-
-void ide_fix_driveid(u16 *id)
-{
-#ifndef __LITTLE_ENDIAN
-# ifdef __BIG_ENDIAN
- int i;
-
- for (i = 0; i < 256; i++)
- id[i] = __le16_to_cpu(id[i]);
-# else
-# error "Please fix <asm/byteorder.h>"
-# endif
-#endif
-}
-
-/*
- * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
- * removing leading/trailing blanks and compressing internal blanks.
- * It is primarily used to tidy up the model name/number fields as
- * returned by the ATA_CMD_ID_ATA[PI] commands.
- */
-
-void ide_fixstring(u8 *s, const int bytecount, const int byteswap)
-{
- u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */
-
- if (byteswap) {
- /* convert from big-endian to host byte order */
- for (p = s ; p != end ; p += 2)
- be16_to_cpus((u16 *) p);
- }
-
- /* strip leading blanks */
- p = s;
- while (s != end && *s == ' ')
- ++s;
- /* compress internal blanks and strip trailing blanks */
- while (s != end && *s) {
- if (*s++ != ' ' || (s != end && *s && *s != ' '))
- *p++ = *(s-1);
- }
- /* wipe out trailing garbage */
- while (p != end)
- *p++ = '\0';
-}
-EXPORT_SYMBOL(ide_fixstring);
-
-/*
- * This routine busy-waits for the drive status to be not "busy".
- * It then checks the status for all of the "good" bits and none
- * of the "bad" bits, and if all is okay it returns 0. All other
- * cases return error -- caller may then invoke ide_error().
- *
- * This routine should get fixed to not hog the cpu during extra long waits..
- * That could be done by busy-waiting for the first jiffy or two, and then
- * setting a timer to wake up at half second intervals thereafter,
- * until timeout is achieved, before timing out.
- */
-int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
- unsigned long timeout, u8 *rstat)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- unsigned long flags;
- bool irqs_threaded = force_irqthreads;
- int i;
- u8 stat;
-
- udelay(1); /* spec allows drive 400ns to assert "BUSY" */
- stat = tp_ops->read_status(hwif);
-
- if (stat & ATA_BUSY) {
- if (!irqs_threaded) {
- local_save_flags(flags);
- local_irq_enable_in_hardirq();
- }
- timeout += jiffies;
- while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
- if (time_after(jiffies, timeout)) {
- /*
- * One last read after the timeout in case
- * heavy interrupt load made us not make any
- * progress during the timeout..
- */
- stat = tp_ops->read_status(hwif);
- if ((stat & ATA_BUSY) == 0)
- break;
-
- if (!irqs_threaded)
- local_irq_restore(flags);
- *rstat = stat;
- return -EBUSY;
- }
- }
- if (!irqs_threaded)
- local_irq_restore(flags);
- }
- /*
- * Allow status to settle, then read it again.
- * A few rare drives vastly violate the 400ns spec here,
- * so we'll wait up to 10usec for a "good" status
- * rather than expensively fail things immediately.
- * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
- */
- for (i = 0; i < 10; i++) {
- udelay(1);
- stat = tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, good, bad)) {
- *rstat = stat;
- return 0;
- }
- }
- *rstat = stat;
- return -EFAULT;
-}
-
-/*
- * In case of error returns error value after doing "*startstop = ide_error()".
- * The caller should return the updated value of "startstop" in this case,
- * "startstop" is unchanged when the function returns 0.
- */
-int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good,
- u8 bad, unsigned long timeout)
-{
- int err;
- u8 stat;
-
- /* bail early if we've exceeded max_failures */
- if (drive->max_failures && (drive->failures > drive->max_failures)) {
- *startstop = ide_stopped;
- return 1;
- }
-
- err = __ide_wait_stat(drive, good, bad, timeout, &stat);
-
- if (err) {
- char *s = (err == -EBUSY) ? "status timeout" : "status error";
- *startstop = ide_error(drive, s, stat);
- }
-
- return err;
-}
-EXPORT_SYMBOL(ide_wait_stat);
-
-/**
- * ide_in_drive_list - look for drive in black/white list
- * @id: drive identifier
- * @table: list to inspect
- *
- * Look for a drive in the blacklist and the whitelist tables
- * Returns 1 if the drive is found in the table.
- */
-
-int ide_in_drive_list(u16 *id, const struct drive_list_entry *table)
-{
- for ( ; table->id_model; table++)
- if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) &&
- (!table->id_firmware ||
- strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware)))
- return 1;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_in_drive_list);
-
-/*
- * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
- * Some optical devices with the buggy firmwares have the same problem.
- */
-static const struct drive_list_entry ivb_list[] = {
- { "QUANTUM FIREBALLlct10 05" , "A03.0900" },
- { "QUANTUM FIREBALLlct20 30" , "APL.0900" },
- { "TSSTcorp CDDVDW SH-S202J" , "SB00" },
- { "TSSTcorp CDDVDW SH-S202J" , "SB01" },
- { "TSSTcorp CDDVDW SH-S202N" , "SB00" },
- { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
- { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
- { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
- { "SAMSUNG SP0822N" , "WA100-10" },
- { NULL , NULL }
-};
-
-/*
- * All hosts that use the 80c ribbon must use!
- * The name is derived from upper byte of word 93 and the 80c ribbon.
- */
-u8 eighty_ninty_three(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u16 *id = drive->id;
- int ivb = ide_in_drive_list(id, ivb_list);
-
- if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT)
- return 1;
-
- if (ivb)
- printk(KERN_DEBUG "%s: skipping word 93 validity check\n",
- drive->name);
-
- if (ata_id_is_sata(id) && !ivb)
- return 1;
-
- if (hwif->cbl != ATA_CBL_PATA80 && !ivb)
- goto no_80w;
-
- /*
- * FIXME:
- * - change master/slave IDENTIFY order
- * - force bit13 (80c cable present) check also for !ivb devices
- * (unless the slave device is pre-ATA3)
- */
- if (id[ATA_ID_HW_CONFIG] & 0x4000)
- return 1;
-
- if (ivb) {
- const char *model = (char *)&id[ATA_ID_PROD];
-
- if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
- /*
- * These ATAPI devices always report 80c cable
- * so we have to depend on the host in this case.
- */
- if (hwif->cbl == ATA_CBL_PATA80)
- return 1;
- } else {
- /* Depend on the device side cable detection. */
- if (id[ATA_ID_HW_CONFIG] & 0x2000)
- return 1;
- }
- }
-no_80w:
- if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
- return 0;
-
- printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
- "limiting max speed to UDMA33\n",
- drive->name,
- hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
-
- drive->dev_flags |= IDE_DFLAG_UDMA33_WARNED;
-
- return 0;
-}
-
-static const char *nien_quirk_list[] = {
- "QUANTUM FIREBALLlct08 08",
- "QUANTUM FIREBALLP KA6.4",
- "QUANTUM FIREBALLP KA9.1",
- "QUANTUM FIREBALLP KX13.6",
- "QUANTUM FIREBALLP KX20.5",
- "QUANTUM FIREBALLP KX27.3",
- "QUANTUM FIREBALLP LM20.4",
- "QUANTUM FIREBALLP LM20.5",
- "FUJITSU MHZ2160BH G2",
- NULL
-};
-
-void ide_check_nien_quirk_list(ide_drive_t *drive)
-{
- const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
-
- for (list = nien_quirk_list; *list != NULL; list++)
- if (strstr(m, *list) != NULL) {
- drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK;
- return;
- }
-}
-
-int ide_driveid_update(ide_drive_t *drive)
-{
- u16 *id;
- int rc;
-
- id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
- if (id == NULL)
- return 0;
-
- SELECT_MASK(drive, 1);
- rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id, 1);
- SELECT_MASK(drive, 0);
-
- if (rc)
- goto out_err;
-
- drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES];
- drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
- drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES];
- drive->id[ATA_ID_CFA_MODES] = id[ATA_ID_CFA_MODES];
- /* anything more ? */
-
- kfree(id);
-
- return 1;
-out_err:
- if (rc == 2)
- printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__);
- kfree(id);
- return 0;
-}
-
-int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- struct ide_taskfile tf;
- u16 *id = drive->id, i;
- int error = 0;
- u8 stat;
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
- if (hwif->dma_ops) /* check if host supports DMA */
- hwif->dma_ops->dma_host_set(drive, 0);
-#endif
-
- /* Skip setting PIO flow-control modes on pre-EIDE drives */
- if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0)
- goto skip;
-
- /*
- * Don't use ide_wait_cmd here - it will
- * attempt to set_geometry and recalibrate,
- * but for some reason these don't work at
- * this point (lost interrupt).
- */
-
- udelay(1);
- tp_ops->dev_select(drive);
- SELECT_MASK(drive, 1);
- udelay(1);
- tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);
-
- memset(&tf, 0, sizeof(tf));
- tf.feature = SETFEATURES_XFER;
- tf.nsect = speed;
-
- tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT);
-
- tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
-
- if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK)
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
- error = __ide_wait_stat(drive, drive->ready_stat,
- ATA_BUSY | ATA_DRQ | ATA_ERR,
- WAIT_CMD, &stat);
-
- SELECT_MASK(drive, 0);
-
- if (error) {
- (void) ide_dump_status(drive, "set_drive_speed_status", stat);
- return error;
- }
-
- if (speed >= XFER_SW_DMA_0) {
- id[ATA_ID_UDMA_MODES] &= ~0xFF00;
- id[ATA_ID_MWDMA_MODES] &= ~0x0700;
- id[ATA_ID_SWDMA_MODES] &= ~0x0700;
- if (ata_id_is_cfa(id))
- id[ATA_ID_CFA_MODES] &= ~0x0E00;
- } else if (ata_id_is_cfa(id))
- id[ATA_ID_CFA_MODES] &= ~0x01C0;
-
- skip:
-#ifdef CONFIG_BLK_DEV_IDEDMA
- if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA))
- hwif->dma_ops->dma_host_set(drive, 1);
- else if (hwif->dma_ops) /* check if host supports DMA */
- ide_dma_off_quietly(drive);
-#endif
-
- if (speed >= XFER_UDMA_0) {
- i = 1 << (speed - XFER_UDMA_0);
- id[ATA_ID_UDMA_MODES] |= (i << 8 | i);
- } else if (ata_id_is_cfa(id) && speed >= XFER_MW_DMA_3) {
- i = speed - XFER_MW_DMA_2;
- id[ATA_ID_CFA_MODES] |= i << 9;
- } else if (speed >= XFER_MW_DMA_0) {
- i = 1 << (speed - XFER_MW_DMA_0);
- id[ATA_ID_MWDMA_MODES] |= (i << 8 | i);
- } else if (speed >= XFER_SW_DMA_0) {
- i = 1 << (speed - XFER_SW_DMA_0);
- id[ATA_ID_SWDMA_MODES] |= (i << 8 | i);
- } else if (ata_id_is_cfa(id) && speed >= XFER_PIO_5) {
- i = speed - XFER_PIO_4;
- id[ATA_ID_CFA_MODES] |= i << 6;
- }
-
- if (!drive->init_speed)
- drive->init_speed = speed;
- drive->current_speed = speed;
- return error;
-}
-
-/*
- * This should get invoked any time we exit the driver to
- * wait for an interrupt response from a drive. handler() points
- * at the appropriate code to handle the next interrupt, and a
- * timer is started to prevent us from waiting forever in case
- * something goes wrong (see the ide_timer_expiry() handler later on).
- *
- * See also ide_execute_command
- */
-void __ide_set_handler(ide_drive_t *drive, ide_handler_t *handler,
- unsigned int timeout)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- BUG_ON(hwif->handler);
- hwif->handler = handler;
- hwif->timer.expires = jiffies + timeout;
- hwif->req_gen_timer = hwif->req_gen;
- add_timer(&hwif->timer);
-}
-
-void ide_set_handler(ide_drive_t *drive, ide_handler_t *handler,
- unsigned int timeout)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
-
- spin_lock_irqsave(&hwif->lock, flags);
- __ide_set_handler(drive, handler, timeout);
- spin_unlock_irqrestore(&hwif->lock, flags);
-}
-EXPORT_SYMBOL(ide_set_handler);
-
-/**
- * ide_execute_command - execute an IDE command
- * @drive: IDE drive to issue the command against
- * @cmd: command
- * @handler: handler for next phase
- * @timeout: timeout for command
- *
- * Helper function to issue an IDE command. This handles the
- * atomicity requirements, command timing and ensures that the
- * handler and IRQ setup do not race. All IDE command kick off
- * should go via this function or do equivalent locking.
- */
-
-void ide_execute_command(ide_drive_t *drive, struct ide_cmd *cmd,
- ide_handler_t *handler, unsigned timeout)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
-
- spin_lock_irqsave(&hwif->lock, flags);
- if ((cmd->protocol != ATAPI_PROT_DMA &&
- cmd->protocol != ATAPI_PROT_PIO) ||
- (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT))
- __ide_set_handler(drive, handler, timeout);
- hwif->tp_ops->exec_command(hwif, cmd->tf.command);
- /*
- * Drive takes 400nS to respond, we must avoid the IRQ being
- * serviced before that.
- *
- * FIXME: we could skip this delay with care on non shared devices
- */
- ndelay(400);
- spin_unlock_irqrestore(&hwif->lock, flags);
-}
-
-/*
- * ide_wait_not_busy() waits for the currently selected device on the hwif
- * to report a non-busy status, see comments in ide_probe_port().
- */
-int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
-{
- u8 stat = 0;
-
- while (timeout--) {
- /*
- * Turn this into a schedule() sleep once I'm sure
- * about locking issues (2.5 work ?).
- */
- mdelay(1);
- stat = hwif->tp_ops->read_status(hwif);
- if ((stat & ATA_BUSY) == 0)
- return 0;
- /*
- * Assume a value of 0xff means nothing is connected to
- * the interface and it doesn't implement the pull-down
- * resistor on D7.
- */
- if (stat == 0xff)
- return -ENODEV;
- touch_nmi_watchdog();
- }
- return -EBUSY;
-}
diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c
deleted file mode 100644
index be65b411ab53..000000000000
--- a/drivers/ide/ide-legacy.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-
-static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw,
- u8 port_no, const struct ide_port_info *d,
- unsigned long config)
-{
- unsigned long base, ctl;
- int irq;
-
- if (port_no == 0) {
- base = 0x1f0;
- ctl = 0x3f6;
- irq = 14;
- } else {
- base = 0x170;
- ctl = 0x376;
- irq = 15;
- }
-
- if (!request_region(base, 8, d->name)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- d->name, base, base + 7);
- return;
- }
-
- if (!request_region(ctl, 1, d->name)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- d->name, ctl);
- release_region(base, 8);
- return;
- }
-
- ide_std_init_ports(hw, base, ctl);
- hw->irq = irq;
- hw->config = config;
-
- hws[port_no] = hw;
-}
-
-int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
-{
- struct ide_hw hw[2], *hws[] = { NULL, NULL };
-
- memset(&hw, 0, sizeof(hw));
-
- if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
- ide_legacy_init_one(hws, &hw[0], 0, d, config);
- ide_legacy_init_one(hws, &hw[1], 1, d, config);
-
- if (hws[0] == NULL && hws[1] == NULL &&
- (d->host_flags & IDE_HFLAG_SINGLE))
- return -ENOENT;
-
- return ide_host_add(d, hws, 2, NULL);
-}
-EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
deleted file mode 100644
index 7b9f655adbc2..000000000000
--- a/drivers/ide/ide-lib.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/ide.h>
-#include <linux/bitops.h>
-
-u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48)
-{
- struct ide_taskfile *tf = &cmd->tf;
- u32 high, low;
-
- low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
- if (lba48) {
- tf = &cmd->hob;
- high = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
- } else
- high = tf->device & 0xf;
-
- return ((u64)high << 24) | low;
-}
-EXPORT_SYMBOL_GPL(ide_get_lba_addr);
-
-static void ide_dump_sector(ide_drive_t *drive)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
- u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
-
- memset(&cmd, 0, sizeof(cmd));
- if (lba48) {
- cmd.valid.in.tf = IDE_VALID_LBA;
- cmd.valid.in.hob = IDE_VALID_LBA;
- cmd.tf_flags = IDE_TFLAG_LBA48;
- } else
- cmd.valid.in.tf = IDE_VALID_LBA | IDE_VALID_DEVICE;
-
- ide_tf_readback(drive, &cmd);
-
- if (lba48 || (tf->device & ATA_LBA))
- printk(KERN_CONT ", LBAsect=%llu",
- (unsigned long long)ide_get_lba_addr(&cmd, lba48));
- else
- printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
- tf->device & 0xf, tf->lbal);
-}
-
-static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
-{
- printk(KERN_CONT "{ ");
- if (err & ATA_ABORTED)
- printk(KERN_CONT "DriveStatusError ");
- if (err & ATA_ICRC)
- printk(KERN_CONT "%s",
- (err & ATA_ABORTED) ? "BadCRC " : "BadSector ");
- if (err & ATA_UNC)
- printk(KERN_CONT "UncorrectableError ");
- if (err & ATA_IDNF)
- printk(KERN_CONT "SectorIdNotFound ");
- if (err & ATA_TRK0NF)
- printk(KERN_CONT "TrackZeroNotFound ");
- if (err & ATA_AMNF)
- printk(KERN_CONT "AddrMarkNotFound ");
- printk(KERN_CONT "}");
- if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK ||
- (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) {
- struct request *rq = drive->hwif->rq;
-
- ide_dump_sector(drive);
-
- if (rq)
- printk(KERN_CONT ", sector=%llu",
- (unsigned long long)blk_rq_pos(rq));
- }
- printk(KERN_CONT "\n");
-}
-
-static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
-{
- printk(KERN_CONT "{ ");
- if (err & ATAPI_ILI)
- printk(KERN_CONT "IllegalLengthIndication ");
- if (err & ATAPI_EOM)
- printk(KERN_CONT "EndOfMedia ");
- if (err & ATA_ABORTED)
- printk(KERN_CONT "AbortedCommand ");
- if (err & ATA_MCR)
- printk(KERN_CONT "MediaChangeRequested ");
- if (err & ATAPI_LFS)
- printk(KERN_CONT "LastFailedSense=0x%02x ",
- (err & ATAPI_LFS) >> 4);
- printk(KERN_CONT "}\n");
-}
-
-/**
- * ide_dump_status - translate ATA/ATAPI error
- * @drive: drive that status applies to
- * @msg: text message to print
- * @stat: status byte to decode
- *
- * Error reporting, in human readable form (luxurious, but a memory hog).
- * Combines the drive name, message and status byte to provide a
- * user understandable explanation of the device error.
- */
-
-u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
-{
- u8 err = 0;
-
- printk(KERN_ERR "%s: %s: status=0x%02x { ", drive->name, msg, stat);
- if (stat & ATA_BUSY)
- printk(KERN_CONT "Busy ");
- else {
- if (stat & ATA_DRDY)
- printk(KERN_CONT "DriveReady ");
- if (stat & ATA_DF)
- printk(KERN_CONT "DeviceFault ");
- if (stat & ATA_DSC)
- printk(KERN_CONT "SeekComplete ");
- if (stat & ATA_DRQ)
- printk(KERN_CONT "DataRequest ");
- if (stat & ATA_CORR)
- printk(KERN_CONT "CorrectedError ");
- if (stat & ATA_SENSE)
- printk(KERN_CONT "Sense ");
- if (stat & ATA_ERR)
- printk(KERN_CONT "Error ");
- }
- printk(KERN_CONT "}\n");
- if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) {
- err = ide_read_error(drive);
- printk(KERN_ERR "%s: %s: error=0x%02x ", drive->name, msg, err);
- if (drive->media == ide_disk)
- ide_dump_ata_error(drive, err);
- else
- ide_dump_atapi_error(drive, err);
- }
-
- printk(KERN_ERR "%s: possibly failed opcode: 0x%02x\n",
- drive->name, drive->hwif->cmd.tf.command);
-
- return err;
-}
-EXPORT_SYMBOL(ide_dump_status);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
deleted file mode 100644
index a80a0f28f7b9..000000000000
--- a/drivers/ide/ide-park.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/ide.h>
-#include <linux/jiffies.h>
-#include <linux/blkdev.h>
-
-DECLARE_WAIT_QUEUE_HEAD(ide_park_wq);
-
-static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request_queue *q = drive->queue;
- struct request *rq;
- int rc;
-
- timeout += jiffies;
- spin_lock_irq(&hwif->lock);
- if (drive->dev_flags & IDE_DFLAG_PARKED) {
- int reset_timer = time_before(timeout, drive->sleep);
- int start_queue = 0;
-
- drive->sleep = timeout;
- wake_up_all(&ide_park_wq);
- if (reset_timer && del_timer(&hwif->timer))
- start_queue = 1;
- spin_unlock_irq(&hwif->lock);
-
- if (start_queue)
- blk_mq_run_hw_queues(q, true);
- return;
- }
- spin_unlock_irq(&hwif->lock);
-
- rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
- scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
- scsi_req(rq)->cmd_len = 1;
- ide_req(rq)->type = ATA_PRIV_MISC;
- ide_req(rq)->special = &timeout;
- blk_execute_rq(NULL, rq, 1);
- rc = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
- if (rc)
- goto out;
-
- /*
- * Make sure that *some* command is sent to the drive after the
- * timeout has expired, so power management will be reenabled.
- */
- rq = blk_get_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(rq))
- goto out;
-
- scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
- scsi_req(rq)->cmd_len = 1;
- ide_req(rq)->type = ATA_PRIV_MISC;
- spin_lock_irq(&hwif->lock);
- ide_insert_request_head(drive, rq);
- spin_unlock_irq(&hwif->lock);
-
-out:
- return;
-}
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
-
- memset(&cmd, 0, sizeof(cmd));
- if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
- drive->sleep = *(unsigned long *)ide_req(rq)->special;
- drive->dev_flags |= IDE_DFLAG_SLEEPING;
- tf->command = ATA_CMD_IDLEIMMEDIATE;
- tf->feature = 0x44;
- tf->lbal = 0x4c;
- tf->lbam = 0x4e;
- tf->lbah = 0x55;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- } else /* cmd == REQ_UNPARK_HEADS */
- tf->command = ATA_CMD_CHK_POWER;
-
- cmd.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
- cmd.protocol = ATA_PROT_NODATA;
-
- cmd.rq = rq;
-
- return do_rw_taskfile(drive, &cmd);
-}
-
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- ide_hwif_t *hwif = drive->hwif;
- unsigned long now;
- unsigned int msecs;
-
- if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
- return -EOPNOTSUPP;
-
- spin_lock_irq(&hwif->lock);
- now = jiffies;
- if (drive->dev_flags & IDE_DFLAG_PARKED &&
- time_after(drive->sleep, now))
- msecs = jiffies_to_msecs(drive->sleep - now);
- else
- msecs = 0;
- spin_unlock_irq(&hwif->lock);
-
- return snprintf(buf, 20, "%u\n", msecs);
-}
-
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
-#define MAX_PARK_TIMEOUT 30000
- ide_drive_t *drive = to_ide_device(dev);
- long int input;
- int rc;
-
- rc = kstrtol(buf, 10, &input);
- if (rc)
- return rc;
- if (input < -2)
- return -EINVAL;
- if (input > MAX_PARK_TIMEOUT) {
- input = MAX_PARK_TIMEOUT;
- rc = -EOVERFLOW;
- }
-
- mutex_lock(&ide_setting_mtx);
- if (input >= 0) {
- if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
- rc = -EOPNOTSUPP;
- else if (input || drive->dev_flags & IDE_DFLAG_PARKED)
- issue_park_cmd(drive, msecs_to_jiffies(input));
- } else {
- if (drive->media == ide_disk)
- switch (input) {
- case -1:
- drive->dev_flags &= ~IDE_DFLAG_NO_UNLOAD;
- break;
- case -2:
- drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
- break;
- }
- else
- rc = -EOPNOTSUPP;
- }
- mutex_unlock(&ide_setting_mtx);
-
- return rc ? rc : len;
-}
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
deleted file mode 100644
index 673420db953f..000000000000
--- a/drivers/ide/ide-pci-generic.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
- * Portions (C) Copyright 2002 Red Hat Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * For the avoidance of doubt the "preferred form" of this code is one which
- * is in an open non patent encumbered format. Where cryptographic key signing
- * forms part of the process of creating an executable the information
- * including keys needed to generate an equivalently functional executable
- * are deemed to be part of the source code.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "ide_pci_generic"
-
-static bool ide_generic_all; /* Set to claim all devices */
-
-module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
-MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
-
-static void netcell_quirkproc(ide_drive_t *drive)
-{
- /* mark words 85-87 as valid */
- drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000;
-}
-
-static const struct ide_port_ops netcell_port_ops = {
- .quirkproc = netcell_quirkproc,
-};
-
-#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
- { \
- .name = DRV_NAME, \
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \
- extra_flags, \
- .swdma_mask = ATA_SWDMA2, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = ATA_UDMA6, \
- }
-
-static const struct ide_port_info generic_chipsets[] = {
- /* 0: Unknown */
- DECLARE_GENERIC_PCI_DEV(0),
-
- { /* 1: NS87410 */
- .name = DRV_NAME,
- .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} },
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- },
-
- /* 2: SAMURAI / HT6565 / HINT_IDE */
- DECLARE_GENERIC_PCI_DEV(0),
- /* 3: UM8673F / UM8886A / UM8886BF */
- DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_DMA),
- /* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */
- DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA),
-
- { /* 5: VIA8237SATA */
- .name = DRV_NAME,
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_OFF_BOARD,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- },
-
- { /* 6: Revolution */
- .name = DRV_NAME,
- .port_ops = &netcell_port_ops,
- .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
- IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_OFF_BOARD,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- }
-};
-
-/**
- * generic_init_one - called when a PIIX is found
- * @dev: the generic device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct ide_port_info *d = &generic_chipsets[id->driver_data];
- int ret = -ENODEV;
-
- /* Don't use the generic entry unless instructed to do so */
- if (id->driver_data == 0 && ide_generic_all == 0)
- goto out;
-
- switch (dev->vendor) {
- case PCI_VENDOR_ID_UMC:
- if (dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
- !(PCI_FUNC(dev->devfn) & 1))
- goto out; /* UM8886A/BF pair */
- break;
- case PCI_VENDOR_ID_OPTI:
- if (dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
- !(PCI_FUNC(dev->devfn) & 1))
- goto out;
- break;
- case PCI_VENDOR_ID_JMICRON:
- if (dev->device != PCI_DEVICE_ID_JMICRON_JMB368 &&
- PCI_FUNC(dev->devfn) != 1)
- goto out;
- break;
- case PCI_VENDOR_ID_NS:
- if (dev->device == PCI_DEVICE_ID_NS_87410 &&
- (dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
- goto out;
- break;
- }
-
- if (dev->vendor != PCI_VENDOR_ID_JMICRON) {
- u16 command;
- pci_read_config_word(dev, PCI_COMMAND, &command);
- if (!(command & PCI_COMMAND_IO)) {
- printk(KERN_INFO "%s %s: skipping disabled "
- "controller\n", d->name, pci_name(dev));
- goto out;
- }
- }
- ret = ide_pci_init_one(dev, d, NULL);
-out:
- return ret;
-}
-
-static const struct pci_device_id generic_pci_tbl[] = {
- { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), 1 },
- { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), 2 },
- { PCI_VDEVICE(HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), 2 },
- { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8673F), 3 },
- { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886A), 3 },
- { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886BF), 3 },
- { PCI_VDEVICE(HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), 2 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C561), 4 },
- { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C558), 4 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237_SATA), 5 },
-#endif
- { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), 4 },
- { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), 4 },
- { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), 4 },
- { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), 4 },
- { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), 6 },
- /*
- * Must come last. If you add entries adjust
- * this table and generic_chipsets[] appropriately.
- */
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, generic_pci_tbl);
-
-static struct pci_driver generic_pci_driver = {
- .name = "PCI_IDE",
- .id_table = generic_pci_tbl,
- .probe = generic_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init generic_ide_init(void)
-{
- return ide_pci_register_driver(&generic_pci_driver);
-}
-
-static void __exit generic_ide_exit(void)
-{
- pci_unregister_driver(&generic_pci_driver);
-}
-
-module_init(generic_ide_init);
-module_exit(generic_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for generic PCI IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c
deleted file mode 100644
index 1fd24798e5c9..000000000000
--- a/drivers/ide/ide-pio-blacklist.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PIO blacklist. Some drives incorrectly report their maximal PIO mode,
- * at least in respect to CMD640. Here we keep info on some known drives.
- *
- * Changes to the ide_pio_blacklist[] should be made with EXTREME CAUTION
- * to avoid breaking the fragile cmd640.c support.
- */
-
-#include <linux/string.h>
-#include <linux/ide.h>
-
-static struct ide_pio_info {
- const char *name;
- int pio;
-} ide_pio_blacklist [] = {
- { "Conner Peripherals 540MB - CFS540A", 3 },
-
- { "WDC AC2700", 3 },
- { "WDC AC2540", 3 },
- { "WDC AC2420", 3 },
- { "WDC AC2340", 3 },
- { "WDC AC2250", 0 },
- { "WDC AC2200", 0 },
- { "WDC AC21200", 4 },
- { "WDC AC2120", 0 },
- { "WDC AC2850", 3 },
- { "WDC AC1270", 3 },
- { "WDC AC1170", 1 },
- { "WDC AC1210", 1 },
- { "WDC AC280", 0 },
- { "WDC AC31000", 3 },
- { "WDC AC31200", 3 },
-
- { "Maxtor 7131 AT", 1 },
- { "Maxtor 7171 AT", 1 },
- { "Maxtor 7213 AT", 1 },
- { "Maxtor 7245 AT", 1 },
- { "Maxtor 7345 AT", 1 },
- { "Maxtor 7546 AT", 3 },
- { "Maxtor 7540 AV", 3 },
-
- { "SAMSUNG SHD-3121A", 1 },
- { "SAMSUNG SHD-3122A", 1 },
- { "SAMSUNG SHD-3172A", 1 },
-
- { "ST5660A", 3 },
- { "ST3660A", 3 },
- { "ST3630A", 3 },
- { "ST3655A", 3 },
- { "ST3391A", 3 },
- { "ST3390A", 1 },
- { "ST3600A", 1 },
- { "ST3290A", 0 },
- { "ST3144A", 0 },
- { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on drive)
- according to Seagate's FIND-ATA program */
-
- { "QUANTUM ELS127A", 0 },
- { "QUANTUM ELS170A", 0 },
- { "QUANTUM LPS240A", 0 },
- { "QUANTUM LPS210A", 3 },
- { "QUANTUM LPS270A", 3 },
- { "QUANTUM LPS365A", 3 },
- { "QUANTUM LPS540A", 3 },
- { "QUANTUM LIGHTNING 540A", 3 },
- { "QUANTUM LIGHTNING 730A", 3 },
-
- { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */
- { "QUANTUM FIREBALL_640", 3 },
- { "QUANTUM FIREBALL_1080", 3 },
- { "QUANTUM FIREBALL_1280", 3 },
- { NULL, 0 }
-};
-
-/**
- * ide_scan_pio_blacklist - check for a blacklisted drive
- * @model: Drive model string
- *
- * This routine searches the ide_pio_blacklist for an entry
- * matching the start/whole of the supplied model name.
- *
- * Returns -1 if no match found.
- * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
- */
-
-int ide_scan_pio_blacklist(char *model)
-{
- struct ide_pio_info *p;
-
- for (p = ide_pio_blacklist; p->name != NULL; p++) {
- if (strncmp(p->name, model, strlen(p->name)) == 0)
- return p->pio;
- }
- return -1;
-}
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
deleted file mode 100644
index d680b3e3295f..000000000000
--- a/drivers/ide/ide-pm.c
+++ /dev/null
@@ -1,261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/ide.h>
-
-int generic_ide_suspend(struct device *dev, pm_message_t mesg)
-{
- ide_drive_t *drive = to_ide_device(dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq;
- struct ide_pm_state rqpm;
- int ret;
-
- if (ide_port_acpi(hwif)) {
- /* call ACPI _GTM only once */
- if ((drive->dn & 1) == 0 || pair == NULL)
- ide_acpi_get_timing(hwif);
- }
-
- memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
- ide_req(rq)->special = &rqpm;
- rqpm.pm_step = IDE_PM_START_SUSPEND;
- if (mesg.event == PM_EVENT_PRETHAW)
- mesg.event = PM_EVENT_FREEZE;
- rqpm.pm_state = mesg.event;
-
- blk_execute_rq(NULL, rq, 0);
- ret = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
-
- if (ret == 0 && ide_port_acpi(hwif)) {
- /* call ACPI _PS3 only after both devices are suspended */
- if ((drive->dn & 1) || pair == NULL)
- ide_acpi_set_state(hwif, 0);
- }
-
- return ret;
-}
-
-static int ide_pm_execute_rq(struct request *rq)
-{
- struct request_queue *q = rq->q;
-
- if (unlikely(blk_queue_dying(q))) {
- rq->rq_flags |= RQF_QUIET;
- scsi_req(rq)->result = -ENXIO;
- blk_mq_end_request(rq, BLK_STS_OK);
- return -ENXIO;
- }
- blk_execute_rq(NULL, rq, true);
-
- return scsi_req(rq)->result ? -EIO : 0;
-}
-
-int generic_ide_resume(struct device *dev)
-{
- ide_drive_t *drive = to_ide_device(dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq;
- struct ide_pm_state rqpm;
- int err;
-
- blk_mq_start_stopped_hw_queues(drive->queue, true);
-
- if (ide_port_acpi(hwif)) {
- /* call ACPI _PS0 / _STM only once */
- if ((drive->dn & 1) == 0 || pair == NULL) {
- ide_acpi_set_state(hwif, 1);
- ide_acpi_push_timing(hwif);
- }
-
- ide_acpi_exec_tfs(drive);
- }
-
- memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
- ide_req(rq)->type = ATA_PRIV_PM_RESUME;
- ide_req(rq)->special = &rqpm;
- rqpm.pm_step = IDE_PM_START_RESUME;
- rqpm.pm_state = PM_EVENT_ON;
-
- err = ide_pm_execute_rq(rq);
- blk_put_request(rq);
-
- if (err == 0 && dev->driver) {
- struct ide_driver *drv = to_ide_driver(dev->driver);
-
- if (drv->resume)
- drv->resume(drive);
- }
-
- return err;
-}
-
-void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
-{
- struct ide_pm_state *pm = ide_req(rq)->special;
-
-#ifdef DEBUG_PM
- printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
- drive->name, pm->pm_step);
-#endif
- if (drive->media != ide_disk)
- return;
-
- switch (pm->pm_step) {
- case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
- if (pm->pm_state == PM_EVENT_FREEZE)
- pm->pm_step = IDE_PM_COMPLETED;
- else
- pm->pm_step = IDE_PM_STANDBY;
- break;
- case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
- pm->pm_step = IDE_PM_COMPLETED;
- break;
- case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
- pm->pm_step = IDE_PM_IDLE;
- break;
- case IDE_PM_IDLE: /* Resume step 2 (idle)*/
- pm->pm_step = IDE_PM_RESTORE_DMA;
- break;
- }
-}
-
-ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
-{
- struct ide_pm_state *pm = ide_req(rq)->special;
- struct ide_cmd cmd = { };
-
- switch (pm->pm_step) {
- case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
- if (drive->media != ide_disk)
- break;
- /* Not supported? Switch to next step now. */
- if (ata_id_flush_enabled(drive->id) == 0 ||
- (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
- ide_complete_power_step(drive, rq);
- return ide_stopped;
- }
- if (ata_id_flush_ext_enabled(drive->id))
- cmd.tf.command = ATA_CMD_FLUSH_EXT;
- else
- cmd.tf.command = ATA_CMD_FLUSH;
- goto out_do_tf;
- case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
- cmd.tf.command = ATA_CMD_STANDBYNOW1;
- goto out_do_tf;
- case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
- ide_set_max_pio(drive);
- /*
- * skip IDE_PM_IDLE for ATAPI devices
- */
- if (drive->media != ide_disk)
- pm->pm_step = IDE_PM_RESTORE_DMA;
- else
- ide_complete_power_step(drive, rq);
- return ide_stopped;
- case IDE_PM_IDLE: /* Resume step 2 (idle) */
- cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
- goto out_do_tf;
- case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
- /*
- * Right now, all we do is call ide_set_dma(drive),
- * we could be smarter and check for current xfer_speed
- * in struct drive etc...
- */
- if (drive->hwif->dma_ops == NULL)
- break;
- /*
- * TODO: respect IDE_DFLAG_USING_DMA
- */
- ide_set_dma(drive);
- break;
- }
-
- pm->pm_step = IDE_PM_COMPLETED;
-
- return ide_stopped;
-
-out_do_tf:
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd.protocol = ATA_PROT_NODATA;
-
- return do_rw_taskfile(drive, &cmd);
-}
-
-/**
- * ide_complete_pm_rq - end the current Power Management request
- * @drive: target drive
- * @rq: request
- *
- * This function cleans up the current PM request and stops the queue
- * if necessary.
- */
-void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
-{
- struct request_queue *q = drive->queue;
- struct ide_pm_state *pm = ide_req(rq)->special;
-
- ide_complete_power_step(drive, rq);
- if (pm->pm_step != IDE_PM_COMPLETED)
- return;
-
-#ifdef DEBUG_PM
- printk("%s: completing PM request, %s\n", drive->name,
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
-#endif
- if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
- blk_mq_stop_hw_queues(q);
- else
- drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
-
- drive->hwif->rq = NULL;
-
- blk_mq_end_request(rq, BLK_STS_OK);
-}
-
-void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
-{
- struct ide_pm_state *pm = ide_req(rq)->special;
-
- if (blk_rq_is_private(rq) &&
- ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
- pm->pm_step == IDE_PM_START_SUSPEND)
- /* Mark drive blocked when starting the suspend sequence. */
- drive->dev_flags |= IDE_DFLAG_BLOCKED;
- else if (blk_rq_is_private(rq) &&
- ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
- pm->pm_step == IDE_PM_START_RESUME) {
- /*
- * The first thing we do on wakeup is to wait for BSY bit to
- * go away (with a looong timeout) as a drive on this hwif may
- * just be POSTing itself.
- * We do that before even selecting as the "other" device on
- * the bus may be broken enough to walk on our toes at this
- * point.
- */
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- struct request_queue *q = drive->queue;
- int rc;
-#ifdef DEBUG_PM
- printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
-#endif
- rc = ide_wait_not_busy(hwif, 35000);
- if (rc)
- printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
- tp_ops->dev_select(drive);
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
- rc = ide_wait_not_busy(hwif, 100000);
- if (rc)
- printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
-
- blk_mq_start_hw_queues(q);
- }
-}
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
deleted file mode 100644
index fc541f1cf8de..000000000000
--- a/drivers/ide/ide-pnp.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * This file provides autodetection for ISA PnP IDE interfaces.
- * It was tested with "ESS ES1868 Plug and Play AudioDrive" IDE interface.
- *
- * Copyright (C) 2000 Andrey Panin <pazke@donpac.ru>
- */
-
-#include <linux/init.h>
-#include <linux/pnp.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-
-#define DRV_NAME "ide-pnp"
-
-/* Add your devices here :)) */
-static const struct pnp_device_id idepnp_devices[] = {
- /* Generic ESDI/IDE/ATA compatible hard disk controller */
- {.id = "PNP0600", .driver_data = 0},
- {.id = ""}
-};
-
-static const struct ide_port_info ide_pnp_port_info = {
- .host_flags = IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
-{
- struct ide_host *host;
- unsigned long base, ctl;
- int rc;
- struct ide_hw hw, *hws[] = { &hw };
-
- printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
-
- if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
- return -1;
-
- base = pnp_port_start(dev, 0);
- ctl = pnp_port_start(dev, 1);
-
- if (!request_region(base, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- DRV_NAME, base, base + 7);
- return -EBUSY;
- }
-
- if (!request_region(ctl, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- DRV_NAME, ctl);
- release_region(base, 8);
- return -EBUSY;
- }
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, base, ctl);
- hw.irq = pnp_irq(dev, 0);
-
- rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host);
- if (rc)
- goto out;
-
- pnp_set_drvdata(dev, host);
-
- return 0;
-out:
- release_region(ctl, 1);
- release_region(base, 8);
-
- return rc;
-}
-
-static void idepnp_remove(struct pnp_dev *dev)
-{
- struct ide_host *host = pnp_get_drvdata(dev);
-
- ide_host_remove(host);
-
- release_region(pnp_port_start(dev, 1), 1);
- release_region(pnp_port_start(dev, 0), 8);
-}
-
-static struct pnp_driver idepnp_driver = {
- .name = "ide",
- .id_table = idepnp_devices,
- .probe = idepnp_probe,
- .remove = idepnp_remove,
-};
-
-module_pnp_driver(idepnp_driver);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
deleted file mode 100644
index aefd74c0d862..000000000000
--- a/drivers/ide/ide-probe.c
+++ /dev/null
@@ -1,1623 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
- * Copyright (C) 2005, 2007 Bartlomiej Zolnierkiewicz
- */
-
-/*
- * Mostly written by Mark Lord <mlord@pobox.com>
- * and Gadi Oxman <gadio@netvision.net.il>
- * and Andre Hedrick <andre@linux-ide.org>
- *
- * See linux/MAINTAINERS for address of current maintainer.
- *
- * This is the IDE probe module, as evolved from hd.c and ide.c.
- *
- * -- increase WAIT_PIDENTIFY to avoid CD-ROM locking at boot
- * by Andrea Arcangeli
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/spinlock.h>
-#include <linux/kmod.h>
-#include <linux/pci.h>
-#include <linux/scatterlist.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-/**
- * generic_id - add a generic drive id
- * @drive: drive to make an ID block for
- *
- * Add a fake id field to the drive we are passed. This allows
- * use to skip a ton of NULL checks (which people always miss)
- * and make drive properties unconditional outside of this file
- */
-
-static void generic_id(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- id[ATA_ID_CUR_CYLS] = id[ATA_ID_CYLS] = drive->cyl;
- id[ATA_ID_CUR_HEADS] = id[ATA_ID_HEADS] = drive->head;
- id[ATA_ID_CUR_SECTORS] = id[ATA_ID_SECTORS] = drive->sect;
-}
-
-static void ide_disk_init_chs(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- /* Extract geometry if we did not already have one for the drive */
- if (!drive->cyl || !drive->head || !drive->sect) {
- drive->cyl = drive->bios_cyl = id[ATA_ID_CYLS];
- drive->head = drive->bios_head = id[ATA_ID_HEADS];
- drive->sect = drive->bios_sect = id[ATA_ID_SECTORS];
- }
-
- /* Handle logical geometry translation by the drive */
- if (ata_id_current_chs_valid(id)) {
- drive->cyl = id[ATA_ID_CUR_CYLS];
- drive->head = id[ATA_ID_CUR_HEADS];
- drive->sect = id[ATA_ID_CUR_SECTORS];
- }
-
- /* Use physical geometry if what we have still makes no sense */
- if (drive->head > 16 && id[ATA_ID_HEADS] && id[ATA_ID_HEADS] <= 16) {
- drive->cyl = id[ATA_ID_CYLS];
- drive->head = id[ATA_ID_HEADS];
- drive->sect = id[ATA_ID_SECTORS];
- }
-}
-
-static void ide_disk_init_mult_count(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- u8 max_multsect = id[ATA_ID_MAX_MULTSECT] & 0xff;
-
- if (max_multsect) {
- if ((max_multsect / 2) > 1)
- id[ATA_ID_MULTSECT] = max_multsect | 0x100;
- else
- id[ATA_ID_MULTSECT] &= ~0x1ff;
-
- drive->mult_req = id[ATA_ID_MULTSECT] & 0xff;
-
- if (drive->mult_req)
- drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
- }
-}
-
-static void ide_classify_ata_dev(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- char *m = (char *)&id[ATA_ID_PROD];
- int is_cfa = ata_id_is_cfa(id);
-
- /* CF devices are *not* removable in Linux definition of the term */
- if (is_cfa == 0 && (id[ATA_ID_CONFIG] & (1 << 7)))
- drive->dev_flags |= IDE_DFLAG_REMOVABLE;
-
- drive->media = ide_disk;
-
- if (!ata_id_has_unload(drive->id))
- drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
-
- printk(KERN_INFO "%s: %s, %s DISK drive\n", drive->name, m,
- is_cfa ? "CFA" : "ATA");
-}
-
-static void ide_classify_atapi_dev(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- char *m = (char *)&id[ATA_ID_PROD];
- u8 type = (id[ATA_ID_CONFIG] >> 8) & 0x1f;
-
- printk(KERN_INFO "%s: %s, ATAPI ", drive->name, m);
- switch (type) {
- case ide_floppy:
- if (!strstr(m, "CD-ROM")) {
- if (!strstr(m, "oppy") &&
- !strstr(m, "poyp") &&
- !strstr(m, "ZIP"))
- printk(KERN_CONT "cdrom or floppy?, assuming ");
- if (drive->media != ide_cdrom) {
- printk(KERN_CONT "FLOPPY");
- drive->dev_flags |= IDE_DFLAG_REMOVABLE;
- break;
- }
- }
- /* Early cdrom models used zero */
- type = ide_cdrom;
- fallthrough;
- case ide_cdrom:
- drive->dev_flags |= IDE_DFLAG_REMOVABLE;
-#ifdef CONFIG_PPC
- /* kludge for Apple PowerBook internal zip */
- if (!strstr(m, "CD-ROM") && strstr(m, "ZIP")) {
- printk(KERN_CONT "FLOPPY");
- type = ide_floppy;
- break;
- }
-#endif
- printk(KERN_CONT "CD/DVD-ROM");
- break;
- case ide_tape:
- printk(KERN_CONT "TAPE");
- break;
- case ide_optical:
- printk(KERN_CONT "OPTICAL");
- drive->dev_flags |= IDE_DFLAG_REMOVABLE;
- break;
- default:
- printk(KERN_CONT "UNKNOWN (type %d)", type);
- break;
- }
-
- printk(KERN_CONT " drive\n");
- drive->media = type;
- /* an ATAPI device ignores DRDY */
- drive->ready_stat = 0;
- if (ata_id_cdb_intr(id))
- drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
- drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
- /* we don't do head unloading on ATAPI devices */
- drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
-}
-
-/**
- * do_identify - identify a drive
- * @drive: drive to identify
- * @cmd: command used
- * @id: buffer for IDENTIFY data
- *
- * Called when we have issued a drive identify command to
- * read and parse the results. This function is run with
- * interrupts disabled.
- */
-
-static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
-{
- ide_hwif_t *hwif = drive->hwif;
- char *m = (char *)&id[ATA_ID_PROD];
- unsigned long flags;
- int bswap = 1;
-
- /* local CPU only; some systems need this */
- local_irq_save(flags);
- /* read 512 bytes of id info */
- hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
- local_irq_restore(flags);
-
- drive->dev_flags |= IDE_DFLAG_ID_READ;
-#ifdef DEBUG
- printk(KERN_INFO "%s: dumping identify data\n", drive->name);
- ide_dump_identify((u8 *)id);
-#endif
- ide_fix_driveid(id);
-
- /*
- * ATA_CMD_ID_ATA returns little-endian info,
- * ATA_CMD_ID_ATAPI *usually* returns little-endian info.
- */
- if (cmd == ATA_CMD_ID_ATAPI) {
- if ((m[0] == 'N' && m[1] == 'E') || /* NEC */
- (m[0] == 'F' && m[1] == 'X') || /* Mitsumi */
- (m[0] == 'P' && m[1] == 'i')) /* Pioneer */
- /* Vertos drives may still be weird */
- bswap ^= 1;
- }
-
- ide_fixstring(m, ATA_ID_PROD_LEN, bswap);
- ide_fixstring((char *)&id[ATA_ID_FW_REV], ATA_ID_FW_REV_LEN, bswap);
- ide_fixstring((char *)&id[ATA_ID_SERNO], ATA_ID_SERNO_LEN, bswap);
-
- /* we depend on this a lot! */
- m[ATA_ID_PROD_LEN - 1] = '\0';
-
- if (strstr(m, "E X A B Y T E N E S T"))
- drive->dev_flags &= ~IDE_DFLAG_PRESENT;
- else
- drive->dev_flags |= IDE_DFLAG_PRESENT;
-}
-
-/**
- * ide_dev_read_id - send ATA/ATAPI IDENTIFY command
- * @drive: drive to identify
- * @cmd: command to use
- * @id: buffer for IDENTIFY data
- * @irq_ctx: flag set when called from the IRQ context
- *
- * Sends an ATA(PI) IDENTIFY request to a drive and waits for a response.
- *
- * Returns: 0 device was identified
- * 1 device timed-out (no response to identify request)
- * 2 device aborted the command (refused to identify itself)
- */
-
-int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id, int irq_ctx)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- int use_altstatus = 0, rc;
- unsigned long timeout;
- u8 s = 0, a = 0;
-
- /*
- * Disable device IRQ. Otherwise we'll get spurious interrupts
- * during the identify phase that the IRQ handler isn't expecting.
- */
- if (io_ports->ctl_addr)
- tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);
-
- /* take a deep breath */
- if (irq_ctx)
- mdelay(50);
- else
- msleep(50);
-
- if (io_ports->ctl_addr &&
- (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) {
- a = tp_ops->read_altstatus(hwif);
- s = tp_ops->read_status(hwif);
- if ((a ^ s) & ~ATA_SENSE)
- /* ancient Seagate drives, broken interfaces */
- printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
- "instead of ALTSTATUS(0x%02x)\n",
- drive->name, s, a);
- else
- /* use non-intrusive polling */
- use_altstatus = 1;
- }
-
- /* set features register for atapi
- * identify command to be sure of reply
- */
- if (cmd == ATA_CMD_ID_ATAPI) {
- struct ide_taskfile tf;
-
- memset(&tf, 0, sizeof(tf));
- /* disable DMA & overlap */
- tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE);
- }
-
- /* ask drive for ID */
- tp_ops->exec_command(hwif, cmd);
-
- timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
-
- /* wait for IRQ and ATA_DRQ */
- if (irq_ctx) {
- rc = __ide_wait_stat(drive, ATA_DRQ, BAD_R_STAT, timeout, &s);
- if (rc)
- return 1;
- } else {
- rc = ide_busy_sleep(drive, timeout, use_altstatus);
- if (rc)
- return 1;
-
- msleep(50);
- s = tp_ops->read_status(hwif);
- }
-
- if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
- /* drive returned ID */
- do_identify(drive, cmd, id);
- /* drive responded with ID */
- rc = 0;
- /* clear drive IRQ */
- (void)tp_ops->read_status(hwif);
- } else {
- /* drive refused ID */
- rc = 2;
- }
- return rc;
-}
-
-int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 stat;
-
- timeout += jiffies;
-
- do {
- msleep(50); /* give drive a breather */
- stat = altstatus ? hwif->tp_ops->read_altstatus(hwif)
- : hwif->tp_ops->read_status(hwif);
- if ((stat & ATA_BUSY) == 0)
- return 0;
- } while (time_before(jiffies, timeout));
-
- printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__);
-
- return 1; /* drive timed-out */
-}
-
-static u8 ide_read_device(ide_drive_t *drive)
-{
- struct ide_taskfile tf;
-
- drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_DEVICE);
-
- return tf.device;
-}
-
-/**
- * do_probe - probe an IDE device
- * @drive: drive to probe
- * @cmd: command to use
- *
- * do_probe() has the difficult job of finding a drive if it exists,
- * without getting hung up if it doesn't exist, without trampling on
- * ethernet cards, and without leaving any IRQs dangling to haunt us later.
- *
- * If a drive is "known" to exist (from CMOS or kernel parameters),
- * but does not respond right away, the probe will "hang in there"
- * for the maximum wait time (about 30 seconds), otherwise it will
- * exit much more quickly.
- *
- * Returns: 0 device was identified
- * 1 device timed-out (no response to identify request)
- * 2 device aborted the command (refused to identify itself)
- * 3 bad status from device (possible for ATAPI drives)
- * 4 probe was not attempted because failure was obvious
- */
-
-static int do_probe (ide_drive_t *drive, u8 cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- u16 *id = drive->id;
- int rc;
- u8 present = !!(drive->dev_flags & IDE_DFLAG_PRESENT), stat;
-
- /* avoid waiting for inappropriate probes */
- if (present && drive->media != ide_disk && cmd == ATA_CMD_ID_ATA)
- return 4;
-
-#ifdef DEBUG
- printk(KERN_INFO "probing for %s: present=%d, media=%d, probetype=%s\n",
- drive->name, present, drive->media,
- (cmd == ATA_CMD_ID_ATA) ? "ATA" : "ATAPI");
-#endif
-
- /* needed for some systems
- * (e.g. crw9624 as drive0 with disk as slave)
- */
- msleep(50);
- tp_ops->dev_select(drive);
- msleep(50);
-
- if (ide_read_device(drive) != drive->select && present == 0) {
- if (drive->dn & 1) {
- /* exit with drive0 selected */
- tp_ops->dev_select(hwif->devices[0]);
- /* allow ATA_BUSY to assert & clear */
- msleep(50);
- }
- /* no i/f present: mmm.. this should be a 4 -ml */
- return 3;
- }
-
- stat = tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, ATA_DRDY, ATA_BUSY) ||
- present || cmd == ATA_CMD_ID_ATAPI) {
- rc = ide_dev_read_id(drive, cmd, id, 0);
- if (rc)
- /* failed: try again */
- rc = ide_dev_read_id(drive, cmd, id, 0);
-
- stat = tp_ops->read_status(hwif);
-
- if (stat == (ATA_BUSY | ATA_DRDY))
- return 4;
-
- if (rc == 1 && cmd == ATA_CMD_ID_ATAPI) {
- printk(KERN_ERR "%s: no response (status = 0x%02x), "
- "resetting drive\n", drive->name, stat);
- msleep(50);
- tp_ops->dev_select(drive);
- msleep(50);
- tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
- (void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0);
- rc = ide_dev_read_id(drive, cmd, id, 0);
- }
-
- /* ensure drive IRQ is clear */
- stat = tp_ops->read_status(hwif);
-
- if (rc == 1)
- printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
- drive->name, stat);
- } else {
- /* not present or maybe ATAPI */
- rc = 3;
- }
- if (drive->dn & 1) {
- /* exit with drive0 selected */
- tp_ops->dev_select(hwif->devices[0]);
- msleep(50);
- /* ensure drive irq is clear */
- (void)tp_ops->read_status(hwif);
- }
- return rc;
-}
-
-/**
- * probe_for_drives - upper level drive probe
- * @drive: drive to probe for
- *
- * probe_for_drive() tests for existence of a given drive using do_probe()
- * and presents things to the user as needed.
- *
- * Returns: 0 no device was found
- * 1 device was found
- * (note: IDE_DFLAG_PRESENT might still be not set)
- */
-
-static u8 probe_for_drive(ide_drive_t *drive)
-{
- char *m;
- int rc;
- u8 cmd;
-
- drive->dev_flags &= ~IDE_DFLAG_ID_READ;
-
- m = (char *)&drive->id[ATA_ID_PROD];
- strcpy(m, "UNKNOWN");
-
- /* skip probing? */
- if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0) {
- /* if !(success||timed-out) */
- cmd = ATA_CMD_ID_ATA;
- rc = do_probe(drive, cmd);
- if (rc >= 2) {
- /* look for ATAPI device */
- cmd = ATA_CMD_ID_ATAPI;
- rc = do_probe(drive, cmd);
- }
-
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- return 0;
-
- /* identification failed? */
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
- if (drive->media == ide_disk) {
- printk(KERN_INFO "%s: non-IDE drive, CHS=%d/%d/%d\n",
- drive->name, drive->cyl,
- drive->head, drive->sect);
- } else if (drive->media == ide_cdrom) {
- printk(KERN_INFO "%s: ATAPI cdrom (?)\n", drive->name);
- } else {
- /* nuke it */
- printk(KERN_WARNING "%s: Unknown device on bus refused identification. Ignoring.\n", drive->name);
- drive->dev_flags &= ~IDE_DFLAG_PRESENT;
- }
- } else {
- if (cmd == ATA_CMD_ID_ATAPI)
- ide_classify_atapi_dev(drive);
- else
- ide_classify_ata_dev(drive);
- }
- }
-
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- return 0;
-
- /* The drive wasn't being helpful. Add generic info only */
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
- generic_id(drive);
- return 1;
- }
-
- if (drive->media == ide_disk) {
- ide_disk_init_chs(drive);
- ide_disk_init_mult_count(drive);
- }
-
- return 1;
-}
-
-static void hwif_release_dev(struct device *dev)
-{
- ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
-
- complete(&hwif->gendev_rel_comp);
-}
-
-static int ide_register_port(ide_hwif_t *hwif)
-{
- int ret;
-
- /* register with global device tree */
- dev_set_name(&hwif->gendev, "%s", hwif->name);
- dev_set_drvdata(&hwif->gendev, hwif);
- if (hwif->gendev.parent == NULL)
- hwif->gendev.parent = hwif->dev;
- hwif->gendev.release = hwif_release_dev;
-
- ret = device_register(&hwif->gendev);
- if (ret < 0) {
- printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
- __func__, ret);
- goto out;
- }
-
- hwif->portdev = device_create(ide_port_class, &hwif->gendev,
- MKDEV(0, 0), hwif, "%s", hwif->name);
- if (IS_ERR(hwif->portdev)) {
- ret = PTR_ERR(hwif->portdev);
- device_unregister(&hwif->gendev);
- }
-out:
- return ret;
-}
-
-/**
- * ide_port_wait_ready - wait for port to become ready
- * @hwif: IDE port
- *
- * This is needed on some PPCs and a bunch of BIOS-less embedded
- * platforms. Typical cases are:
- *
- * - The firmware hard reset the disk before booting the kernel,
- * the drive is still doing it's poweron-reset sequence, that
- * can take up to 30 seconds.
- *
- * - The firmware does nothing (or no firmware), the device is
- * still in POST state (same as above actually).
- *
- * - Some CD/DVD/Writer combo drives tend to drive the bus during
- * their reset sequence even when they are non-selected slave
- * devices, thus preventing discovery of the main HD.
- *
- * Doing this wait-for-non-busy should not harm any existing
- * configuration and fix some issues like the above.
- *
- * BenH.
- *
- * Returns 0 on success, error code (< 0) otherwise.
- */
-
-static int ide_port_wait_ready(ide_hwif_t *hwif)
-{
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- ide_drive_t *drive;
- int i, rc;
-
- printk(KERN_DEBUG "Probing IDE interface %s...\n", hwif->name);
-
- /* Let HW settle down a bit from whatever init state we
- * come from */
- mdelay(2);
-
- /* Wait for BSY bit to go away, spec timeout is 30 seconds,
- * I know of at least one disk who takes 31 seconds, I use 35
- * here to be safe
- */
- rc = ide_wait_not_busy(hwif, 35000);
- if (rc)
- return rc;
-
- /* Now make sure both master & slave are ready */
- ide_port_for_each_dev(i, drive, hwif) {
- /* Ignore disks that we will not probe for later. */
- if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0 ||
- (drive->dev_flags & IDE_DFLAG_PRESENT)) {
- tp_ops->dev_select(drive);
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
- mdelay(2);
- rc = ide_wait_not_busy(hwif, 35000);
- if (rc)
- goto out;
- } else
- printk(KERN_DEBUG "%s: ide_wait_not_busy() skipped\n",
- drive->name);
- }
-out:
- /* Exit function with master reselected (let's be sane) */
- if (i)
- tp_ops->dev_select(hwif->devices[0]);
-
- return rc;
-}
-
-/**
- * ide_undecoded_slave - look for bad CF adapters
- * @dev1: slave device
- *
- * Analyse the drives on the interface and attempt to decide if we
- * have the same drive viewed twice. This occurs with crap CF adapters
- * and PCMCIA sometimes.
- */
-
-void ide_undecoded_slave(ide_drive_t *dev1)
-{
- ide_drive_t *dev0 = dev1->hwif->devices[0];
-
- if ((dev1->dn & 1) == 0 || (dev0->dev_flags & IDE_DFLAG_PRESENT) == 0)
- return;
-
- /* If the models don't match they are not the same product */
- if (strcmp((char *)&dev0->id[ATA_ID_PROD],
- (char *)&dev1->id[ATA_ID_PROD]))
- return;
-
- /* Serial numbers do not match */
- if (strncmp((char *)&dev0->id[ATA_ID_SERNO],
- (char *)&dev1->id[ATA_ID_SERNO], ATA_ID_SERNO_LEN))
- return;
-
- /* No serial number, thankfully very rare for CF */
- if (*(char *)&dev0->id[ATA_ID_SERNO] == 0)
- return;
-
- /* Appears to be an IDE flash adapter with decode bugs */
- printk(KERN_WARNING "ide-probe: ignoring undecoded slave\n");
-
- dev1->dev_flags &= ~IDE_DFLAG_PRESENT;
-}
-
-EXPORT_SYMBOL_GPL(ide_undecoded_slave);
-
-static int ide_probe_port(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- unsigned int irqd;
- int i, rc = -ENODEV;
-
- BUG_ON(hwif->present);
-
- if ((hwif->devices[0]->dev_flags & IDE_DFLAG_NOPROBE) &&
- (hwif->devices[1]->dev_flags & IDE_DFLAG_NOPROBE))
- return -EACCES;
-
- /*
- * We must always disable IRQ, as probe_for_drive will assert IRQ, but
- * we'll install our IRQ driver much later...
- */
- irqd = hwif->irq;
- if (irqd)
- disable_irq(hwif->irq);
-
- if (ide_port_wait_ready(hwif) == -EBUSY)
- printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
-
- /*
- * Second drive should only exist if first drive was found,
- * but a lot of cdrom drives are configured as single slaves.
- */
- ide_port_for_each_dev(i, drive, hwif) {
- (void) probe_for_drive(drive);
- if (drive->dev_flags & IDE_DFLAG_PRESENT)
- rc = 0;
- }
-
- /*
- * Use cached IRQ number. It might be (and is...) changed by probe
- * code above
- */
- if (irqd)
- enable_irq(irqd);
-
- return rc;
-}
-
-static void ide_port_tune_devices(ide_hwif_t *hwif)
-{
- const struct ide_port_ops *port_ops = hwif->port_ops;
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- ide_check_nien_quirk_list(drive);
-
- if (port_ops && port_ops->quirkproc)
- port_ops->quirkproc(drive);
- }
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- ide_set_max_pio(drive);
-
- drive->dev_flags |= IDE_DFLAG_NICE1;
-
- if (hwif->dma_ops)
- ide_set_dma(drive);
- }
-}
-
-static void ide_initialize_rq(struct request *rq)
-{
- struct ide_request *req = blk_mq_rq_to_pdu(rq);
-
- req->special = NULL;
- scsi_req_init(&req->sreq);
- req->sreq.sense = req->sense;
-}
-
-static const struct blk_mq_ops ide_mq_ops = {
- .queue_rq = ide_queue_rq,
- .initialize_rq_fn = ide_initialize_rq,
-};
-
-/*
- * init request queue
- */
-static int ide_init_queue(ide_drive_t *drive)
-{
- struct request_queue *q;
- ide_hwif_t *hwif = drive->hwif;
- int max_sectors = 256;
- int max_sg_entries = PRD_ENTRIES;
- struct blk_mq_tag_set *set;
-
- /*
- * Our default set up assumes the normal IDE case,
- * that is 64K segmenting, standard PRD setup
- * and LBA28. Some drivers then impose their own
- * limits and LBA48 we could raise it but as yet
- * do not.
- */
-
- set = &drive->tag_set;
- set->ops = &ide_mq_ops;
- set->nr_hw_queues = 1;
- set->queue_depth = 32;
- set->reserved_tags = 1;
- set->cmd_size = sizeof(struct ide_request);
- set->numa_node = hwif_to_node(hwif);
- set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- if (blk_mq_alloc_tag_set(set))
- return 1;
-
- q = blk_mq_init_queue(set);
- if (IS_ERR(q)) {
- blk_mq_free_tag_set(set);
- return 1;
- }
-
- blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
-
- q->queuedata = drive;
- blk_queue_segment_boundary(q, 0xffff);
-
- if (hwif->rqsize < max_sectors)
- max_sectors = hwif->rqsize;
- blk_queue_max_hw_sectors(q, max_sectors);
-
-#ifdef CONFIG_PCI
- /* When we have an IOMMU, we may have a problem where pci_map_sg()
- * creates segments that don't completely match our boundary
- * requirements and thus need to be broken up again. Because it
- * doesn't align properly either, we may actually have to break up
- * to more segments than what was we got in the first place, a max
- * worst case is twice as many.
- * This will be fixed once we teach pci_map_sg() about our boundary
- * requirements, hopefully soon. *FIXME*
- */
- max_sg_entries >>= 1;
-#endif /* CONFIG_PCI */
-
- blk_queue_max_segments(q, max_sg_entries);
-
- /* assign drive queue */
- drive->queue = q;
-
- return 0;
-}
-
-static DEFINE_MUTEX(ide_cfg_mtx);
-
-/*
- * For any present drive:
- * - allocate the block device queue
- */
-static int ide_port_setup_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i, j = 0;
-
- mutex_lock(&ide_cfg_mtx);
- ide_port_for_each_present_dev(i, drive, hwif) {
- if (ide_init_queue(drive)) {
- printk(KERN_ERR "ide: failed to init %s\n",
- drive->name);
- drive->dev_flags &= ~IDE_DFLAG_PRESENT;
- continue;
- }
-
- j++;
- }
- mutex_unlock(&ide_cfg_mtx);
-
- return j;
-}
-
-static void ide_host_enable_irqs(struct ide_host *host)
-{
- ide_hwif_t *hwif;
- int i;
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
- /* clear any pending IRQs */
- hwif->tp_ops->read_status(hwif);
-
- /* unmask IRQs */
- if (hwif->io_ports.ctl_addr)
- hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
- }
-}
-
-/*
- * This routine sets up the IRQ for an IDE interface.
- */
-static int init_irq (ide_hwif_t *hwif)
-{
- struct ide_io_ports *io_ports = &hwif->io_ports;
- struct ide_host *host = hwif->host;
- irq_handler_t irq_handler = host->irq_handler;
- int sa = host->irq_flags;
-
- if (irq_handler == NULL)
- irq_handler = ide_intr;
-
- if (!host->get_lock)
- if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
- goto out_up;
-
-#if !defined(__mc68000__)
- printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
- io_ports->data_addr, io_ports->status_addr,
- io_ports->ctl_addr, hwif->irq);
-#else
- printk(KERN_INFO "%s at 0x%08lx on irq %d", hwif->name,
- io_ports->data_addr, hwif->irq);
-#endif /* __mc68000__ */
- if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE)
- printk(KERN_CONT " (serialized)");
- printk(KERN_CONT "\n");
-
- return 0;
-out_up:
- return 1;
-}
-
-static void ata_probe(dev_t dev)
-{
- request_module("ide-disk");
- request_module("ide-cd");
- request_module("ide-tape");
- request_module("ide-floppy");
-}
-
-void ide_init_disk(struct gendisk *disk, ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned int unit = drive->dn & 1;
-
- disk->major = hwif->major;
- disk->first_minor = unit << PARTN_BITS;
- sprintf(disk->disk_name, "hd%c", 'a' + hwif->index * MAX_DRIVES + unit);
- disk->queue = drive->queue;
-}
-
-EXPORT_SYMBOL_GPL(ide_init_disk);
-
-static void drive_release_dev (struct device *dev)
-{
- ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
-
- ide_proc_unregister_device(drive);
-
- if (drive->sense_rq)
- blk_mq_free_request(drive->sense_rq);
-
- blk_cleanup_queue(drive->queue);
- drive->queue = NULL;
- blk_mq_free_tag_set(&drive->tag_set);
-
- drive->dev_flags &= ~IDE_DFLAG_PRESENT;
-
- complete(&drive->gendev_rel_comp);
-}
-
-static int hwif_init(ide_hwif_t *hwif)
-{
- if (!hwif->irq) {
- printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name);
- return 0;
- }
-
- if (__register_blkdev(hwif->major, hwif->name, ata_probe))
- return 0;
-
- if (!hwif->sg_max_nents)
- hwif->sg_max_nents = PRD_ENTRIES;
-
- hwif->sg_table = kmalloc_array(hwif->sg_max_nents,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!hwif->sg_table) {
- printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
- goto out;
- }
-
- sg_init_table(hwif->sg_table, hwif->sg_max_nents);
-
- if (init_irq(hwif)) {
- printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n",
- hwif->name, hwif->irq);
- goto out;
- }
-
- return 1;
-
-out:
- unregister_blkdev(hwif->major, hwif->name);
- return 0;
-}
-
-static void hwif_register_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- unsigned int i;
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- struct device *dev = &drive->gendev;
- int ret;
-
- dev_set_name(dev, "%u.%u", hwif->index, i);
- dev_set_drvdata(dev, drive);
- dev->parent = &hwif->gendev;
- dev->bus = &ide_bus_type;
- dev->release = drive_release_dev;
-
- ret = device_register(dev);
- if (ret < 0)
- printk(KERN_WARNING "IDE: %s: device_register error: "
- "%d\n", __func__, ret);
- }
-}
-
-static void ide_port_init_devices(ide_hwif_t *hwif)
-{
- const struct ide_port_ops *port_ops = hwif->port_ops;
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_dev(i, drive, hwif) {
- drive->dn = i + hwif->channel * 2;
-
- if (hwif->host_flags & IDE_HFLAG_IO_32BIT)
- drive->io_32bit = 1;
- if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT)
- drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
- if (hwif->host_flags & IDE_HFLAG_UNMASK_IRQS)
- drive->dev_flags |= IDE_DFLAG_UNMASK;
- if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
- drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
-
- drive->pio_mode = XFER_PIO_0;
-
- if (port_ops && port_ops->init_dev)
- port_ops->init_dev(drive);
- }
-}
-
-static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
- const struct ide_port_info *d)
-{
- hwif->channel = port;
-
- hwif->chipset = d->chipset ? d->chipset : ide_pci;
-
- if (d->init_iops)
- d->init_iops(hwif);
-
- /* ->host_flags may be set by ->init_iops (or even earlier...) */
- hwif->host_flags |= d->host_flags;
- hwif->pio_mask = d->pio_mask;
-
- if (d->tp_ops)
- hwif->tp_ops = d->tp_ops;
-
- /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
- if ((hwif->host_flags & IDE_HFLAG_DTC2278) == 0 || hwif->channel == 0)
- hwif->port_ops = d->port_ops;
-
- hwif->swdma_mask = d->swdma_mask;
- hwif->mwdma_mask = d->mwdma_mask;
- hwif->ultra_mask = d->udma_mask;
-
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- int rc;
-
- hwif->dma_ops = d->dma_ops;
-
- if (d->init_dma)
- rc = d->init_dma(hwif, d);
- else
- rc = ide_hwif_setup_dma(hwif, d);
-
- if (rc < 0) {
- printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
-
- hwif->dma_ops = NULL;
- hwif->dma_base = 0;
- hwif->swdma_mask = 0;
- hwif->mwdma_mask = 0;
- hwif->ultra_mask = 0;
- }
- }
-
- if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
- ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base))
- hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
-
- if (d->max_sectors)
- hwif->rqsize = d->max_sectors;
- else {
- if ((hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
- (hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA))
- hwif->rqsize = 256;
- else
- hwif->rqsize = 65536;
- }
-
- /* call chipset specific routine for each enabled port */
- if (d->init_hwif)
- d->init_hwif(hwif);
-}
-
-static void ide_port_cable_detect(ide_hwif_t *hwif)
-{
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
- if (hwif->cbl != ATA_CBL_PATA40_SHORT)
- hwif->cbl = port_ops->cable_detect(hwif);
- }
-}
-
-/*
- * Deferred request list insertion handler
- */
-static void drive_rq_insert_work(struct work_struct *work)
-{
- ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq;
- blk_status_t ret;
- LIST_HEAD(list);
-
- blk_mq_quiesce_queue(drive->queue);
-
- ret = BLK_STS_OK;
- spin_lock_irq(&hwif->lock);
- while (!list_empty(&drive->rq_list)) {
- rq = list_first_entry(&drive->rq_list, struct request, queuelist);
- list_del_init(&rq->queuelist);
-
- spin_unlock_irq(&hwif->lock);
- ret = ide_issue_rq(drive, rq, true);
- spin_lock_irq(&hwif->lock);
- }
- spin_unlock_irq(&hwif->lock);
-
- blk_mq_unquiesce_queue(drive->queue);
-
- if (ret != BLK_STS_OK)
- kblockd_schedule_work(&drive->rq_work);
-}
-
-static const u8 ide_hwif_to_major[] =
- { IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR,
- IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR };
-
-static void ide_port_init_devices_data(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_dev(i, drive, hwif) {
- u8 j = (hwif->index * MAX_DRIVES) + i;
- u16 *saved_id = drive->id;
-
- memset(drive, 0, sizeof(*drive));
- memset(saved_id, 0, SECTOR_SIZE);
- drive->id = saved_id;
-
- drive->media = ide_disk;
- drive->select = (i << 4) | ATA_DEVICE_OBS;
- drive->hwif = hwif;
- drive->ready_stat = ATA_DRDY;
- drive->bad_wstat = BAD_W_STAT;
- drive->special_flags = IDE_SFLAG_RECALIBRATE |
- IDE_SFLAG_SET_GEOMETRY;
- drive->name[0] = 'h';
- drive->name[1] = 'd';
- drive->name[2] = 'a' + j;
- drive->max_failures = IDE_DEFAULT_MAX_FAILURES;
-
- INIT_LIST_HEAD(&drive->list);
- init_completion(&drive->gendev_rel_comp);
-
- INIT_WORK(&drive->rq_work, drive_rq_insert_work);
- INIT_LIST_HEAD(&drive->rq_list);
- }
-}
-
-static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
-{
- /* fill in any non-zero initial values */
- hwif->index = index;
- hwif->major = ide_hwif_to_major[index];
-
- hwif->name[0] = 'i';
- hwif->name[1] = 'd';
- hwif->name[2] = 'e';
- hwif->name[3] = '0' + index;
-
- spin_lock_init(&hwif->lock);
-
- timer_setup(&hwif->timer, ide_timer_expiry, 0);
-
- init_completion(&hwif->gendev_rel_comp);
-
- hwif->tp_ops = &default_tp_ops;
-
- ide_port_init_devices_data(hwif);
-}
-
-static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw)
-{
- memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
- hwif->irq = hw->irq;
- hwif->dev = hw->dev;
- hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
- hwif->config_data = hw->config;
-}
-
-static unsigned int ide_indexes;
-
-/**
- * ide_find_port_slot - find free port slot
- * @d: IDE port info
- *
- * Return the new port slot index or -ENOENT if we are out of free slots.
- */
-
-static int ide_find_port_slot(const struct ide_port_info *d)
-{
- int idx = -ENOENT;
- u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
- u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
-
- /*
- * Claim an unassigned slot.
- *
- * Give preference to claiming other slots before claiming ide0/ide1,
- * just in case there's another interface yet-to-be-scanned
- * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults).
- *
- * Unless there is a bootable card that does not use the standard
- * ports 0x1f0/0x170 (the ide0/ide1 defaults).
- */
- mutex_lock(&ide_cfg_mtx);
- if (bootable) {
- if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
- idx = ffz(ide_indexes | i);
- } else {
- if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
- idx = ffz(ide_indexes | 3);
- else if ((ide_indexes & 3) != 3)
- idx = ffz(ide_indexes);
- }
- if (idx >= 0)
- ide_indexes |= (1 << idx);
- mutex_unlock(&ide_cfg_mtx);
-
- return idx;
-}
-
-static void ide_free_port_slot(int idx)
-{
- mutex_lock(&ide_cfg_mtx);
- ide_indexes &= ~(1 << idx);
- mutex_unlock(&ide_cfg_mtx);
-}
-
-static void ide_port_free_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_dev(i, drive, hwif) {
- kfree(drive->id);
- kfree(drive);
- }
-}
-
-static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
-{
- ide_drive_t *drive;
- int i;
-
- for (i = 0; i < MAX_DRIVES; i++) {
- drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
- if (drive == NULL)
- goto out_nomem;
-
- /*
- * In order to keep things simple we have an id
- * block for all drives at all times. If the device
- * is pre ATA or refuses ATA/ATAPI identify we
- * will add faked data to this.
- *
- * Also note that 0 everywhere means "can't do X"
- */
- drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
- if (drive->id == NULL)
- goto out_free_drive;
-
- hwif->devices[i] = drive;
- }
- return 0;
-
-out_free_drive:
- kfree(drive);
-out_nomem:
- ide_port_free_devices(hwif);
- return -ENOMEM;
-}
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *d,
- struct ide_hw **hws, unsigned int n_ports)
-{
- struct ide_host *host;
- struct device *dev = hws[0] ? hws[0]->dev : NULL;
- int node = dev ? dev_to_node(dev) : -1;
- int i;
-
- host = kzalloc_node(sizeof(*host), GFP_KERNEL, node);
- if (host == NULL)
- return NULL;
-
- for (i = 0; i < n_ports; i++) {
- ide_hwif_t *hwif;
- int idx;
-
- if (hws[i] == NULL)
- continue;
-
- hwif = kzalloc_node(sizeof(*hwif), GFP_KERNEL, node);
- if (hwif == NULL)
- continue;
-
- if (ide_port_alloc_devices(hwif, node) < 0) {
- kfree(hwif);
- continue;
- }
-
- idx = ide_find_port_slot(d);
- if (idx < 0) {
- printk(KERN_ERR "%s: no free slot for interface\n",
- d ? d->name : "ide");
- ide_port_free_devices(hwif);
- kfree(hwif);
- continue;
- }
-
- ide_init_port_data(hwif, idx);
-
- hwif->host = host;
-
- host->ports[i] = hwif;
- host->n_ports++;
- }
-
- if (host->n_ports == 0) {
- kfree(host);
- return NULL;
- }
-
- host->dev[0] = dev;
-
- if (d) {
- host->init_chipset = d->init_chipset;
- host->get_lock = d->get_lock;
- host->release_lock = d->release_lock;
- host->host_flags = d->host_flags;
- host->irq_flags = d->irq_flags;
- }
-
- return host;
-}
-EXPORT_SYMBOL_GPL(ide_host_alloc);
-
-static void ide_port_free(ide_hwif_t *hwif)
-{
- ide_port_free_devices(hwif);
- ide_free_port_slot(hwif->index);
- kfree(hwif);
-}
-
-static void ide_disable_port(ide_hwif_t *hwif)
-{
- struct ide_host *host = hwif->host;
- int i;
-
- printk(KERN_INFO "%s: disabling port\n", hwif->name);
-
- for (i = 0; i < MAX_HOST_PORTS; i++) {
- if (host->ports[i] == hwif) {
- host->ports[i] = NULL;
- host->n_ports--;
- }
- }
-
- ide_port_free(hwif);
-}
-
-int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
- struct ide_hw **hws)
-{
- ide_hwif_t *hwif, *mate = NULL;
- int i, j = 0;
-
- pr_warn("legacy IDE will be removed in 2021, please switch to libata\n"
- "Report any missing HW support to linux-ide@vger.kernel.org\n");
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL) {
- mate = NULL;
- continue;
- }
-
- ide_init_port_hw(hwif, hws[i]);
- ide_port_apply_params(hwif);
-
- if ((i & 1) && mate) {
- hwif->mate = mate;
- mate->mate = hwif;
- }
-
- mate = (i & 1) ? NULL : hwif;
-
- ide_init_port(hwif, i & 1, d);
- ide_port_cable_detect(hwif);
-
- hwif->port_flags |= IDE_PFLAG_PROBING;
-
- ide_port_init_devices(hwif);
- }
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
- if (ide_probe_port(hwif) == 0)
- hwif->present = 1;
-
- hwif->port_flags &= ~IDE_PFLAG_PROBING;
-
- if ((hwif->host_flags & IDE_HFLAG_4DRIVES) == 0 ||
- hwif->mate == NULL || hwif->mate->present == 0) {
- if (ide_register_port(hwif)) {
- ide_disable_port(hwif);
- continue;
- }
- }
-
- if (hwif->present)
- ide_port_tune_devices(hwif);
- }
-
- ide_host_enable_irqs(host);
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
- if (hwif_init(hwif) == 0) {
- printk(KERN_INFO "%s: failed to initialize IDE "
- "interface\n", hwif->name);
- device_unregister(hwif->portdev);
- device_unregister(&hwif->gendev);
- ide_disable_port(hwif);
- continue;
- }
-
- if (hwif->present)
- if (ide_port_setup_devices(hwif) == 0) {
- hwif->present = 0;
- continue;
- }
-
- j++;
-
- ide_acpi_init_port(hwif);
-
- if (hwif->present)
- ide_acpi_port_init_devices(hwif);
- }
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
- ide_sysfs_register_port(hwif);
- ide_proc_register_port(hwif);
-
- if (hwif->present) {
- ide_proc_port_register_devices(hwif);
- hwif_register_devices(hwif);
- }
- }
-
- return j ? 0 : -1;
-}
-EXPORT_SYMBOL_GPL(ide_host_register);
-
-int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws,
- unsigned int n_ports, struct ide_host **hostp)
-{
- struct ide_host *host;
- int rc;
-
- host = ide_host_alloc(d, hws, n_ports);
- if (host == NULL)
- return -ENOMEM;
-
- rc = ide_host_register(host, d, hws);
- if (rc) {
- ide_host_free(host);
- return rc;
- }
-
- if (hostp)
- *hostp = host;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_host_add);
-
-static void __ide_port_unregister_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- device_unregister(&drive->gendev);
- wait_for_completion(&drive->gendev_rel_comp);
- }
-}
-
-void ide_port_unregister_devices(ide_hwif_t *hwif)
-{
- mutex_lock(&ide_cfg_mtx);
- __ide_port_unregister_devices(hwif);
- hwif->present = 0;
- ide_port_init_devices_data(hwif);
- mutex_unlock(&ide_cfg_mtx);
-}
-EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
-
-/**
- * ide_unregister - free an IDE interface
- * @hwif: IDE interface
- *
- * Perform the final unregister of an IDE interface.
- *
- * Locking:
- * The caller must not hold the IDE locks.
- *
- * It is up to the caller to be sure there is no pending I/O here,
- * and that the interface will not be reopened (present/vanishing
- * locking isn't yet done BTW).
- */
-
-static void ide_unregister(ide_hwif_t *hwif)
-{
- mutex_lock(&ide_cfg_mtx);
-
- if (hwif->present) {
- __ide_port_unregister_devices(hwif);
- hwif->present = 0;
- }
-
- ide_proc_unregister_port(hwif);
-
- if (!hwif->host->get_lock)
- free_irq(hwif->irq, hwif);
-
- device_unregister(hwif->portdev);
- device_unregister(&hwif->gendev);
- wait_for_completion(&hwif->gendev_rel_comp);
-
- /*
- * Remove us from the kernel's knowledge
- */
- kfree(hwif->sg_table);
- unregister_blkdev(hwif->major, hwif->name);
-
- ide_release_dma_engine(hwif);
-
- mutex_unlock(&ide_cfg_mtx);
-}
-
-void ide_host_free(struct ide_host *host)
-{
- ide_hwif_t *hwif;
- int i;
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif)
- ide_port_free(hwif);
- }
-
- kfree(host);
-}
-EXPORT_SYMBOL_GPL(ide_host_free);
-
-void ide_host_remove(struct ide_host *host)
-{
- ide_hwif_t *hwif;
- int i;
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif)
- ide_unregister(hwif);
- }
-
- ide_host_free(host);
-}
-EXPORT_SYMBOL_GPL(ide_host_remove);
-
-void ide_port_scan(ide_hwif_t *hwif)
-{
- int rc;
-
- ide_port_apply_params(hwif);
- ide_port_cable_detect(hwif);
-
- hwif->port_flags |= IDE_PFLAG_PROBING;
-
- ide_port_init_devices(hwif);
-
- rc = ide_probe_port(hwif);
-
- hwif->port_flags &= ~IDE_PFLAG_PROBING;
-
- if (rc < 0)
- return;
-
- hwif->present = 1;
-
- ide_port_tune_devices(hwif);
- ide_port_setup_devices(hwif);
- ide_acpi_port_init_devices(hwif);
- hwif_register_devices(hwif);
- ide_proc_port_register_devices(hwif);
-}
-EXPORT_SYMBOL_GPL(ide_port_scan);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
deleted file mode 100644
index 15c17f3781ee..000000000000
--- a/drivers/ide/ide-proc.c
+++ /dev/null
@@ -1,633 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1997-1998 Mark Lord
- * Copyright (C) 2003 Red Hat
- *
- * Some code was moved here from ide.c, see it for original copyrights.
- */
-
-/*
- * This is the /proc/ide/ filesystem implementation.
- *
- * Drive/Driver settings can be retrieved by reading the drive's
- * "settings" files. e.g. "cat /proc/ide0/hda/settings"
- * To write a new value "val" into a specific setting "name", use:
- * echo "name:val" >/proc/ide/ide0/hda/settings
- */
-
-#include <linux/module.h>
-
-#include <linux/uaccess.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/ctype.h>
-#include <linux/ide.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-
-static struct proc_dir_entry *proc_ide_root;
-
-static int ide_imodel_proc_show(struct seq_file *m, void *v)
-{
- ide_hwif_t *hwif = (ide_hwif_t *) m->private;
- const char *name;
-
- switch (hwif->chipset) {
- case ide_generic: name = "generic"; break;
- case ide_pci: name = "pci"; break;
- case ide_cmd640: name = "cmd640"; break;
- case ide_dtc2278: name = "dtc2278"; break;
- case ide_ali14xx: name = "ali14xx"; break;
- case ide_qd65xx: name = "qd65xx"; break;
- case ide_umc8672: name = "umc8672"; break;
- case ide_ht6560b: name = "ht6560b"; break;
- case ide_4drives: name = "4drives"; break;
- case ide_pmac: name = "mac-io"; break;
- case ide_au1xxx: name = "au1xxx"; break;
- case ide_palm3710: name = "palm3710"; break;
- case ide_acorn: name = "acorn"; break;
- default: name = "(unknown)"; break;
- }
- seq_printf(m, "%s\n", name);
- return 0;
-}
-
-static int ide_mate_proc_show(struct seq_file *m, void *v)
-{
- ide_hwif_t *hwif = (ide_hwif_t *) m->private;
-
- if (hwif && hwif->mate)
- seq_printf(m, "%s\n", hwif->mate->name);
- else
- seq_printf(m, "(none)\n");
- return 0;
-}
-
-static int ide_channel_proc_show(struct seq_file *m, void *v)
-{
- ide_hwif_t *hwif = (ide_hwif_t *) m->private;
-
- seq_printf(m, "%c\n", hwif->channel ? '1' : '0');
- return 0;
-}
-
-static int ide_identify_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *)m->private;
- u8 *buf;
-
- if (!drive) {
- seq_putc(m, '\n');
- return 0;
- }
-
- buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- if (taskfile_lib_get_identify(drive, buf) == 0) {
- __le16 *val = (__le16 *)buf;
- int i;
-
- for (i = 0; i < SECTOR_SIZE / 2; i++) {
- seq_printf(m, "%04x%c", le16_to_cpu(val[i]),
- (i % 8) == 7 ? '\n' : ' ');
- }
- } else
- seq_putc(m, buf[0]);
- kfree(buf);
- return 0;
-}
-
-/**
- * ide_find_setting - find a specific setting
- * @st: setting table pointer
- * @name: setting name
- *
- * Scan's the setting table for a matching entry and returns
- * this or NULL if no entry is found. The caller must hold the
- * setting semaphore
- */
-
-static
-const struct ide_proc_devset *ide_find_setting(const struct ide_proc_devset *st,
- char *name)
-{
- while (st->name) {
- if (strcmp(st->name, name) == 0)
- break;
- st++;
- }
- return st->name ? st : NULL;
-}
-
-/**
- * ide_read_setting - read an IDE setting
- * @drive: drive to read from
- * @setting: drive setting
- *
- * Read a drive setting and return the value. The caller
- * must hold the ide_setting_mtx when making this call.
- *
- * BUGS: the data return and error are the same return value
- * so an error -EINVAL and true return of the same value cannot
- * be told apart
- */
-
-static int ide_read_setting(ide_drive_t *drive,
- const struct ide_proc_devset *setting)
-{
- const struct ide_devset *ds = setting->setting;
- int val = -EINVAL;
-
- if (ds->get)
- val = ds->get(drive);
-
- return val;
-}
-
-/**
- * ide_write_setting - read an IDE setting
- * @drive: drive to read from
- * @setting: drive setting
- * @val: value
- *
- * Write a drive setting if it is possible. The caller
- * must hold the ide_setting_mtx when making this call.
- *
- * BUGS: the data return and error are the same return value
- * so an error -EINVAL and true return of the same value cannot
- * be told apart
- *
- * FIXME: This should be changed to enqueue a special request
- * to the driver to change settings, and then wait on a sema for completion.
- * The current scheme of polling is kludgy, though safe enough.
- */
-
-static int ide_write_setting(ide_drive_t *drive,
- const struct ide_proc_devset *setting, int val)
-{
- const struct ide_devset *ds = setting->setting;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (!ds->set)
- return -EPERM;
- if ((ds->flags & DS_SYNC)
- && (val < setting->min || val > setting->max))
- return -EINVAL;
- return ide_devset_execute(drive, ds, val);
-}
-
-ide_devset_get(xfer_rate, current_speed);
-
-static int set_xfer_rate (ide_drive_t *drive, int arg)
-{
- struct ide_cmd cmd;
-
- if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
- return -EINVAL;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tf.command = ATA_CMD_SET_FEATURES;
- cmd.tf.feature = SETFEATURES_XFER;
- cmd.tf.nsect = (u8)arg;
- cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT;
- cmd.valid.in.tf = IDE_VALID_NSECT;
- cmd.tf_flags = IDE_TFLAG_SET_XFER;
-
- return ide_no_data_taskfile(drive, &cmd);
-}
-
-ide_devset_rw(current_speed, xfer_rate);
-ide_devset_rw_field(init_speed, init_speed);
-ide_devset_rw_flag(nice1, IDE_DFLAG_NICE1);
-ide_devset_ro_field(number, dn);
-
-static const struct ide_proc_devset ide_generic_settings[] = {
- IDE_PROC_DEVSET(current_speed, 0, 70),
- IDE_PROC_DEVSET(init_speed, 0, 70),
- IDE_PROC_DEVSET(io_32bit, 0, 1 + (SUPPORT_VLB_SYNC << 1)),
- IDE_PROC_DEVSET(keepsettings, 0, 1),
- IDE_PROC_DEVSET(nice1, 0, 1),
- IDE_PROC_DEVSET(number, 0, 3),
- IDE_PROC_DEVSET(pio_mode, 0, 255),
- IDE_PROC_DEVSET(unmaskirq, 0, 1),
- IDE_PROC_DEVSET(using_dma, 0, 1),
- { NULL },
-};
-
-static void proc_ide_settings_warn(void)
-{
- printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
- "obsolete, and will be removed soon!\n");
-}
-
-static int ide_settings_proc_show(struct seq_file *m, void *v)
-{
- const struct ide_proc_devset *setting, *g, *d;
- const struct ide_devset *ds;
- ide_drive_t *drive = (ide_drive_t *) m->private;
- int rc, mul_factor, div_factor;
-
- proc_ide_settings_warn();
-
- mutex_lock(&ide_setting_mtx);
- g = ide_generic_settings;
- d = drive->settings;
- seq_printf(m, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
- seq_printf(m, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
- while (g->name || (d && d->name)) {
- /* read settings in the alphabetical order */
- if (g->name && d && d->name) {
- if (strcmp(d->name, g->name) < 0)
- setting = d++;
- else
- setting = g++;
- } else if (d && d->name) {
- setting = d++;
- } else
- setting = g++;
- mul_factor = setting->mulf ? setting->mulf(drive) : 1;
- div_factor = setting->divf ? setting->divf(drive) : 1;
- seq_printf(m, "%-24s", setting->name);
- rc = ide_read_setting(drive, setting);
- if (rc >= 0)
- seq_printf(m, "%-16d", rc * mul_factor / div_factor);
- else
- seq_printf(m, "%-16s", "write-only");
- seq_printf(m, "%-16d%-16d", (setting->min * mul_factor + div_factor - 1) / div_factor, setting->max * mul_factor / div_factor);
- ds = setting->setting;
- if (ds->get)
- seq_printf(m, "r");
- if (ds->set)
- seq_printf(m, "w");
- seq_printf(m, "\n");
- }
- mutex_unlock(&ide_setting_mtx);
- return 0;
-}
-
-static int ide_settings_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ide_settings_proc_show, PDE_DATA(inode));
-}
-
-#define MAX_LEN 30
-
-static ssize_t ide_settings_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- ide_drive_t *drive = PDE_DATA(file_inode(file));
- char name[MAX_LEN + 1];
- int for_real = 0, mul_factor, div_factor;
- unsigned long n;
-
- const struct ide_proc_devset *setting;
- char *buf, *s;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- proc_ide_settings_warn();
-
- if (count >= PAGE_SIZE)
- return -EINVAL;
-
- s = buf = (char *)__get_free_page(GFP_USER);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, buffer, count)) {
- free_page((unsigned long)buf);
- return -EFAULT;
- }
-
- buf[count] = '\0';
-
- /*
- * Skip over leading whitespace
- */
- while (count && isspace(*s)) {
- --count;
- ++s;
- }
- /*
- * Do one full pass to verify all parameters,
- * then do another to actually write the new settings.
- */
- do {
- char *p = s;
- n = count;
- while (n > 0) {
- unsigned val;
- char *q = p;
-
- while (n > 0 && *p != ':') {
- --n;
- p++;
- }
- if (*p != ':')
- goto parse_error;
- if (p - q > MAX_LEN)
- goto parse_error;
- memcpy(name, q, p - q);
- name[p - q] = 0;
-
- if (n > 0) {
- --n;
- p++;
- } else
- goto parse_error;
-
- val = simple_strtoul(p, &q, 10);
- n -= q - p;
- p = q;
- if (n > 0 && !isspace(*p))
- goto parse_error;
- while (n > 0 && isspace(*p)) {
- --n;
- ++p;
- }
-
- mutex_lock(&ide_setting_mtx);
- /* generic settings first, then driver specific ones */
- setting = ide_find_setting(ide_generic_settings, name);
- if (!setting) {
- if (drive->settings)
- setting = ide_find_setting(drive->settings, name);
- if (!setting) {
- mutex_unlock(&ide_setting_mtx);
- goto parse_error;
- }
- }
- if (for_real) {
- mul_factor = setting->mulf ? setting->mulf(drive) : 1;
- div_factor = setting->divf ? setting->divf(drive) : 1;
- ide_write_setting(drive, setting, val * div_factor / mul_factor);
- }
- mutex_unlock(&ide_setting_mtx);
- }
- } while (!for_real++);
- free_page((unsigned long)buf);
- return count;
-parse_error:
- free_page((unsigned long)buf);
- printk("%s(): parse error\n", __func__);
- return -EINVAL;
-}
-
-static const struct proc_ops ide_settings_proc_ops = {
- .proc_open = ide_settings_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = single_release,
- .proc_write = ide_settings_proc_write,
-};
-
-int ide_capacity_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%llu\n", (long long)0x7fffffff);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_capacity_proc_show);
-
-int ide_geometry_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) m->private;
-
- seq_printf(m, "physical %d/%d/%d\n",
- drive->cyl, drive->head, drive->sect);
- seq_printf(m, "logical %d/%d/%d\n",
- drive->bios_cyl, drive->bios_head, drive->bios_sect);
- return 0;
-}
-EXPORT_SYMBOL(ide_geometry_proc_show);
-
-static int ide_dmodel_proc_show(struct seq_file *seq, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) seq->private;
- char *m = (char *)&drive->id[ATA_ID_PROD];
-
- seq_printf(seq, "%.40s\n", m[0] ? m : "(none)");
- return 0;
-}
-
-static int ide_driver_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *)m->private;
- struct device *dev = &drive->gendev;
- struct ide_driver *ide_drv;
-
- if (dev->driver) {
- ide_drv = to_ide_driver(dev->driver);
- seq_printf(m, "%s version %s\n",
- dev->driver->name, ide_drv->version);
- } else
- seq_printf(m, "ide-default version 0.9.newide\n");
- return 0;
-}
-
-static int ide_media_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) m->private;
- const char *media;
-
- switch (drive->media) {
- case ide_disk: media = "disk\n"; break;
- case ide_cdrom: media = "cdrom\n"; break;
- case ide_tape: media = "tape\n"; break;
- case ide_floppy: media = "floppy\n"; break;
- case ide_optical: media = "optical\n"; break;
- default: media = "UNKNOWN\n"; break;
- }
- seq_puts(m, media);
- return 0;
-}
-
-static int ide_media_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ide_media_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations ide_media_proc_fops = {
- .owner = THIS_MODULE,
- .open = ide_media_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static ide_proc_entry_t generic_drive_entries[] = {
- { "driver", S_IFREG|S_IRUGO, ide_driver_proc_show },
- { "identify", S_IFREG|S_IRUSR, ide_identify_proc_show },
- { "media", S_IFREG|S_IRUGO, ide_media_proc_show },
- { "model", S_IFREG|S_IRUGO, ide_dmodel_proc_show },
- {}
-};
-
-static void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data)
-{
- struct proc_dir_entry *ent;
-
- if (!dir || !p)
- return;
- while (p->name != NULL) {
- ent = proc_create_single_data(p->name, p->mode, dir, p->show, data);
- if (!ent) return;
- p++;
- }
-}
-
-static void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
-{
- if (!dir || !p)
- return;
- while (p->name != NULL) {
- remove_proc_entry(p->name, dir);
- p++;
- }
-}
-
-void ide_proc_register_driver(ide_drive_t *drive, struct ide_driver *driver)
-{
- mutex_lock(&ide_setting_mtx);
- drive->settings = driver->proc_devsets(drive);
- mutex_unlock(&ide_setting_mtx);
-
- ide_add_proc_entries(drive->proc, driver->proc_entries(drive), drive);
-}
-
-EXPORT_SYMBOL(ide_proc_register_driver);
-
-/**
- * ide_proc_unregister_driver - remove driver specific data
- * @drive: drive
- * @driver: driver
- *
- * Clean up the driver specific /proc files and IDE settings
- * for a given drive.
- *
- * Takes ide_setting_mtx.
- */
-
-void ide_proc_unregister_driver(ide_drive_t *drive, struct ide_driver *driver)
-{
- ide_remove_proc_entries(drive->proc, driver->proc_entries(drive));
-
- mutex_lock(&ide_setting_mtx);
- /*
- * ide_setting_mtx protects both the settings list and the use
- * of settings (we cannot take a setting out that is being used).
- */
- drive->settings = NULL;
- mutex_unlock(&ide_setting_mtx);
-}
-EXPORT_SYMBOL(ide_proc_unregister_driver);
-
-void ide_proc_port_register_devices(ide_hwif_t *hwif)
-{
- struct proc_dir_entry *ent;
- struct proc_dir_entry *parent = hwif->proc;
- ide_drive_t *drive;
- char name[64];
- int i;
-
- ide_port_for_each_dev(i, drive, hwif) {
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- continue;
-
- drive->proc = proc_mkdir(drive->name, parent);
- if (drive->proc) {
- ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
- proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
- drive->proc, &ide_settings_proc_ops,
- drive);
- }
- sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
- ent = proc_symlink(drive->name, proc_ide_root, name);
- if (!ent) return;
- }
-}
-
-void ide_proc_unregister_device(ide_drive_t *drive)
-{
- if (drive->proc) {
- remove_proc_entry("settings", drive->proc);
- ide_remove_proc_entries(drive->proc, generic_drive_entries);
- remove_proc_entry(drive->name, proc_ide_root);
- remove_proc_entry(drive->name, drive->hwif->proc);
- drive->proc = NULL;
- }
-}
-
-static ide_proc_entry_t hwif_entries[] = {
- { "channel", S_IFREG|S_IRUGO, ide_channel_proc_show },
- { "mate", S_IFREG|S_IRUGO, ide_mate_proc_show },
- { "model", S_IFREG|S_IRUGO, ide_imodel_proc_show },
- {}
-};
-
-void ide_proc_register_port(ide_hwif_t *hwif)
-{
- if (!hwif->proc) {
- hwif->proc = proc_mkdir(hwif->name, proc_ide_root);
-
- if (!hwif->proc)
- return;
-
- ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
- }
-}
-
-void ide_proc_unregister_port(ide_hwif_t *hwif)
-{
- if (hwif->proc) {
- ide_remove_proc_entries(hwif->proc, hwif_entries);
- remove_proc_entry(hwif->name, proc_ide_root);
- hwif->proc = NULL;
- }
-}
-
-static int proc_print_driver(struct device_driver *drv, void *data)
-{
- struct ide_driver *ide_drv = to_ide_driver(drv);
- struct seq_file *s = data;
-
- seq_printf(s, "%s version %s\n", drv->name, ide_drv->version);
-
- return 0;
-}
-
-static int ide_drivers_show(struct seq_file *s, void *p)
-{
- int err;
-
- err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
- if (err < 0)
- printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n",
- __func__, err);
- return 0;
-}
-
-DEFINE_PROC_SHOW_ATTRIBUTE(ide_drivers);
-
-void proc_ide_create(void)
-{
- proc_ide_root = proc_mkdir("ide", NULL);
-
- if (!proc_ide_root)
- return;
-
- proc_create("drivers", 0, proc_ide_root, &ide_drivers_proc_ops);
-}
-
-void proc_ide_destroy(void)
-{
- remove_proc_entry("drivers", proc_ide_root);
- remove_proc_entry("ide", NULL);
-}
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
deleted file mode 100644
index b0411a1827a3..000000000000
--- a/drivers/ide/ide-scan-pci.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * support for probing IDE PCI devices in the PCI bus order
- *
- * Copyright (c) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (c) 1995-1998 Mark Lord
- *
- * May be copied or modified under the terms of the GNU General Public License
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/ide.h>
-
-/*
- * Module interfaces
- */
-
-static int pre_init = 1; /* Before first ordered IDE scan */
-static LIST_HEAD(ide_pci_drivers);
-
-/*
- * __ide_pci_register_driver - attach IDE driver
- * @driver: pci driver
- * @module: owner module of the driver
- *
- * Registers a driver with the IDE layer. The IDE layer arranges that
- * boot time setup is done in the expected device order and then
- * hands the controllers off to the core PCI code to do the rest of
- * the work.
- *
- * Returns are the same as for pci_register_driver
- */
-
-int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
- const char *mod_name)
-{
- if (!pre_init)
- return __pci_register_driver(driver, module, mod_name);
- driver->driver.owner = module;
- list_add_tail(&driver->node, &ide_pci_drivers);
- return 0;
-}
-EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
-
-/**
- * ide_scan_pcidev - find an IDE driver for a device
- * @dev: PCI device to check
- *
- * Look for an IDE driver to handle the device we are considering.
- * This is only used during boot up to get the ordering correct. After
- * boot up the pci layer takes over the job.
- */
-
-static int __init ide_scan_pcidev(struct pci_dev *dev)
-{
- struct list_head *l;
- struct pci_driver *d;
- int ret;
-
- list_for_each(l, &ide_pci_drivers) {
- d = list_entry(l, struct pci_driver, node);
- if (d->id_table) {
- const struct pci_device_id *id =
- pci_match_id(d->id_table, dev);
-
- if (id != NULL) {
- pci_assign_irq(dev);
- ret = d->probe(dev, id);
- if (ret >= 0) {
- dev->driver = d;
- pci_dev_get(dev);
- return 1;
- }
- }
- }
- }
- return 0;
-}
-
-/**
- * ide_scan_pcibus - perform the initial IDE driver scan
- *
- * Perform the initial bus rather than driver ordered scan of the
- * PCI drivers. After this all IDE pci handling becomes standard
- * module ordering not traditionally ordered.
- */
-
-static int __init ide_scan_pcibus(void)
-{
- struct pci_dev *dev = NULL;
- struct pci_driver *d, *tmp;
-
- pre_init = 0;
- for_each_pci_dev(dev)
- ide_scan_pcidev(dev);
-
- /*
- * Hand the drivers over to the PCI layer now we
- * are post init.
- */
-
- list_for_each_entry_safe(d, tmp, &ide_pci_drivers, node) {
- list_del(&d->node);
- if (__pci_register_driver(d, d->driver.owner,
- d->driver.mod_name))
- printk(KERN_ERR "%s: failed to register %s driver\n",
- __func__, d->driver.mod_name);
- }
-
- return 0;
-}
-device_initcall(ide_scan_pcibus);
diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c
deleted file mode 100644
index c08a8a0916e2..000000000000
--- a/drivers/ide/ide-sysfs.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/ide.h>
-
-char *ide_media_string(ide_drive_t *drive)
-{
- switch (drive->media) {
- case ide_disk:
- return "disk";
- case ide_cdrom:
- return "cdrom";
- case ide_tape:
- return "tape";
- case ide_floppy:
- return "floppy";
- case ide_optical:
- return "optical";
- default:
- return "UNKNOWN";
- }
-}
-
-static ssize_t media_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", ide_media_string(drive));
-}
-static DEVICE_ATTR_RO(media);
-
-static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", drive->name);
-}
-static DEVICE_ATTR_RO(drivename);
-
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "ide:m-%s\n", ide_media_string(drive));
-}
-static DEVICE_ATTR_RO(modalias);
-
-static ssize_t model_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]);
-}
-static DEVICE_ATTR_RO(model);
-
-static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]);
-}
-static DEVICE_ATTR_RO(firmware);
-
-static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]);
-}
-static DEVICE_ATTR(serial, 0400, serial_show, NULL);
-
-static DEVICE_ATTR(unload_heads, 0644, ide_park_show, ide_park_store);
-
-static struct attribute *ide_attrs[] = {
- &dev_attr_media.attr,
- &dev_attr_drivename.attr,
- &dev_attr_modalias.attr,
- &dev_attr_model.attr,
- &dev_attr_firmware.attr,
- &dev_attr_serial.attr,
- &dev_attr_unload_heads.attr,
- NULL,
-};
-
-static const struct attribute_group ide_attr_group = {
- .attrs = ide_attrs,
-};
-
-const struct attribute_group *ide_dev_groups[] = {
- &ide_attr_group,
- NULL,
-};
-
-static ssize_t store_delete_devices(struct device *portdev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- ide_hwif_t *hwif = dev_get_drvdata(portdev);
-
- if (strncmp(buf, "1", n))
- return -EINVAL;
-
- ide_port_unregister_devices(hwif);
-
- return n;
-};
-
-static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices);
-
-static ssize_t store_scan(struct device *portdev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- ide_hwif_t *hwif = dev_get_drvdata(portdev);
-
- if (strncmp(buf, "1", n))
- return -EINVAL;
-
- ide_port_unregister_devices(hwif);
- ide_port_scan(hwif);
-
- return n;
-};
-
-static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
-
-static struct device_attribute *ide_port_attrs[] = {
- &dev_attr_delete_devices,
- &dev_attr_scan,
- NULL
-};
-
-int ide_sysfs_register_port(ide_hwif_t *hwif)
-{
- int i, rc;
-
- for (i = 0; ide_port_attrs[i]; i++) {
- rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
- if (rc)
- break;
- }
-
- return rc;
-}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
deleted file mode 100644
index fa05e7e7d609..000000000000
--- a/drivers/ide/ide-tape.c
+++ /dev/null
@@ -1,2083 +0,0 @@
-/*
- * IDE ATAPI streaming tape driver.
- *
- * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
- * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
- *
- * This driver was constructed as a student project in the software laboratory
- * of the faculty of electrical engineering in the Technion - Israel's
- * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
- *
- * It is hereby placed under the terms of the GNU general public license.
- * (See linux/COPYING).
- *
- * For a historical changelog see
- * Documentation/ide/ChangeLog.ide-tape.1995-2002
- */
-
-#define DRV_NAME "ide-tape"
-
-#define IDETAPE_VERSION "1.20"
-
-#include <linux/compat.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/completion.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-#include <scsi/scsi.h>
-
-#include <asm/byteorder.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <asm/unaligned.h>
-#include <linux/mtio.h>
-
-/* define to see debug info */
-#undef IDETAPE_DEBUG_LOG
-
-#ifdef IDETAPE_DEBUG_LOG
-#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, ## args)
-#else
-#define ide_debug_log(lvl, fmt, args...) do {} while (0)
-#endif
-
-/**************************** Tunable parameters *****************************/
-/*
- * After each failed packet command we issue a request sense command and retry
- * the packet command IDETAPE_MAX_PC_RETRIES times.
- *
- * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
- */
-#define IDETAPE_MAX_PC_RETRIES 3
-
-/*
- * The following parameter is used to select the point in the internal tape fifo
- * in which we will start to refill the buffer. Decreasing the following
- * parameter will improve the system's latency and interactive response, while
- * using a high value might improve system throughput.
- */
-#define IDETAPE_FIFO_THRESHOLD 2
-
-/*
- * DSC polling parameters.
- *
- * Polling for DSC (a single bit in the status register) is a very important
- * function in ide-tape. There are two cases in which we poll for DSC:
- *
- * 1. Before a read/write packet command, to ensure that we can transfer data
- * from/to the tape's data buffers, without causing an actual media access.
- * In case the tape is not ready yet, we take out our request from the device
- * request queue, so that ide.c could service requests from the other device
- * on the same interface in the meantime.
- *
- * 2. After the successful initialization of a "media access packet command",
- * which is a command that can take a long time to complete (the interval can
- * range from several seconds to even an hour). Again, we postpone our request
- * in the middle to free the bus for the other device. The polling frequency
- * here should be lower than the read/write frequency since those media access
- * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
- * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
- * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
- *
- * We also set a timeout for the timer, in case something goes wrong. The
- * timeout should be longer then the maximum execution time of a tape operation.
- */
-
-/* DSC timings. */
-#define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
-#define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
-#define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
-#define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
-#define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
-#define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
-#define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
-
-/*************************** End of tunable parameters ***********************/
-
-/* tape directions */
-enum {
- IDETAPE_DIR_NONE = (1 << 0),
- IDETAPE_DIR_READ = (1 << 1),
- IDETAPE_DIR_WRITE = (1 << 2),
-};
-
-/* Tape door status */
-#define DOOR_UNLOCKED 0
-#define DOOR_LOCKED 1
-#define DOOR_EXPLICITLY_LOCKED 2
-
-/* Some defines for the SPACE command */
-#define IDETAPE_SPACE_OVER_FILEMARK 1
-#define IDETAPE_SPACE_TO_EOD 3
-
-/* Some defines for the LOAD UNLOAD command */
-#define IDETAPE_LU_LOAD_MASK 1
-#define IDETAPE_LU_RETENSION_MASK 2
-#define IDETAPE_LU_EOT_MASK 4
-
-/* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
-#define IDETAPE_BLOCK_DESCRIPTOR 0
-#define IDETAPE_CAPABILITIES_PAGE 0x2a
-
-/*
- * Most of our global data which we need to save even as we leave the driver due
- * to an interrupt or a timer event is stored in the struct defined below.
- */
-typedef struct ide_tape_obj {
- ide_drive_t *drive;
- struct ide_driver *driver;
- struct gendisk *disk;
- struct device dev;
-
- /* used by REQ_IDETAPE_{READ,WRITE} requests */
- struct ide_atapi_pc queued_pc;
-
- /*
- * DSC polling variables.
- *
- * While polling for DSC we use postponed_rq to postpone the current
- * request so that ide.c will be able to service pending requests on the
- * other device. Note that at most we will have only one DSC (usually
- * data transfer) request in the device request queue.
- */
- bool postponed_rq;
-
- /* The time in which we started polling for DSC */
- unsigned long dsc_polling_start;
- /* Timer used to poll for dsc */
- struct timer_list dsc_timer;
- /* Read/Write dsc polling frequency */
- unsigned long best_dsc_rw_freq;
- unsigned long dsc_poll_freq;
- unsigned long dsc_timeout;
-
- /* Read position information */
- u8 partition;
- /* Current block */
- unsigned int first_frame;
-
- /* Last error information */
- u8 sense_key, asc, ascq;
-
- /* Character device operation */
- unsigned int minor;
- /* device name */
- char name[4];
- /* Current character device data transfer direction */
- u8 chrdev_dir;
-
- /* tape block size, usually 512 or 1024 bytes */
- unsigned short blk_size;
- int user_bs_factor;
-
- /* Copy of the tape's Capabilities and Mechanical Page */
- u8 caps[20];
-
- /*
- * Active data transfer request parameters.
- *
- * At most, there is only one ide-tape originated data transfer request
- * in the device request queue. This allows ide.c to easily service
- * requests from the other device when we postpone our active request.
- */
-
- /* Data buffer size chosen based on the tape's recommendation */
- int buffer_size;
- /* Staging buffer of buffer_size bytes */
- void *buf;
- /* The read/write cursor */
- void *cur;
- /* The number of valid bytes in buf */
- size_t valid;
-
- /* Measures average tape speed */
- unsigned long avg_time;
- int avg_size;
- int avg_speed;
-
- /* the door is currently locked */
- int door_locked;
- /* the tape hardware is write protected */
- char drv_write_prot;
- /* the tape is write protected (hardware or opened as read-only) */
- char write_prot;
-} idetape_tape_t;
-
-static DEFINE_MUTEX(ide_tape_mutex);
-static DEFINE_MUTEX(idetape_ref_mutex);
-
-static DEFINE_MUTEX(idetape_chrdev_mutex);
-
-static struct class *idetape_sysfs_class;
-
-static void ide_tape_release(struct device *);
-
-static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
-
-static struct ide_tape_obj *ide_tape_get(struct gendisk *disk, bool cdev,
- unsigned int i)
-{
- struct ide_tape_obj *tape = NULL;
-
- mutex_lock(&idetape_ref_mutex);
-
- if (cdev)
- tape = idetape_devs[i];
- else
- tape = ide_drv_g(disk, ide_tape_obj);
-
- if (tape) {
- if (ide_device_get(tape->drive))
- tape = NULL;
- else
- get_device(&tape->dev);
- }
-
- mutex_unlock(&idetape_ref_mutex);
- return tape;
-}
-
-static void ide_tape_put(struct ide_tape_obj *tape)
-{
- ide_drive_t *drive = tape->drive;
-
- mutex_lock(&idetape_ref_mutex);
- put_device(&tape->dev);
- ide_device_put(drive);
- mutex_unlock(&idetape_ref_mutex);
-}
-
-/*
- * called on each failed packet command retry to analyze the request sense. We
- * currently do not utilize this information.
- */
-static void idetape_analyze_error(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc *pc = drive->failed_pc;
- struct request *rq = drive->hwif->rq;
- u8 *sense = bio_data(rq->bio);
-
- tape->sense_key = sense[2] & 0xF;
- tape->asc = sense[12];
- tape->ascq = sense[13];
-
- ide_debug_log(IDE_DBG_FUNC,
- "cmd: 0x%x, sense key = %x, asc = %x, ascq = %x",
- rq->cmd[0], tape->sense_key, tape->asc, tape->ascq);
-
- /* correct remaining bytes to transfer */
- if (pc->flags & PC_FLAG_DMA_ERROR)
- scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
-
- /*
- * If error was the result of a zero-length read or write command,
- * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
- * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
- */
- if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
- /* length == 0 */
- && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
- if (tape->sense_key == 5) {
- /* don't report an error, everything's ok */
- pc->error = 0;
- /* don't retry read/write */
- pc->flags |= PC_FLAG_ABORT;
- }
- }
- if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
- pc->error = IDE_DRV_ERROR_FILEMARK;
- pc->flags |= PC_FLAG_ABORT;
- }
- if (pc->c[0] == WRITE_6) {
- if ((sense[2] & 0x40) || (tape->sense_key == 0xd
- && tape->asc == 0x0 && tape->ascq == 0x2)) {
- pc->error = IDE_DRV_ERROR_EOD;
- pc->flags |= PC_FLAG_ABORT;
- }
- }
- if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
- if (tape->sense_key == 8) {
- pc->error = IDE_DRV_ERROR_EOD;
- pc->flags |= PC_FLAG_ABORT;
- }
- if (!(pc->flags & PC_FLAG_ABORT) &&
- (blk_rq_bytes(rq) - scsi_req(rq)->resid_len))
- pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
- }
-}
-
-static void ide_tape_handle_dsc(ide_drive_t *);
-
-static int ide_tape_callback(ide_drive_t *drive, int dsc)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc *pc = drive->pc;
- struct request *rq = drive->hwif->rq;
- int uptodate = pc->error ? 0 : 1;
- int err = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, dsc: %d, err: %d", rq->cmd[0],
- dsc, err);
-
- if (dsc)
- ide_tape_handle_dsc(drive);
-
- if (drive->failed_pc == pc)
- drive->failed_pc = NULL;
-
- if (pc->c[0] == REQUEST_SENSE) {
- if (uptodate)
- idetape_analyze_error(drive);
- else
- printk(KERN_ERR "ide-tape: Error in REQUEST SENSE "
- "itself - Aborting request!\n");
- } else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
- unsigned int blocks =
- (blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size;
-
- tape->avg_size += blocks * tape->blk_size;
-
- if (time_after_eq(jiffies, tape->avg_time + HZ)) {
- tape->avg_speed = tape->avg_size * HZ /
- (jiffies - tape->avg_time) / 1024;
- tape->avg_size = 0;
- tape->avg_time = jiffies;
- }
-
- tape->first_frame += blocks;
-
- if (pc->error) {
- uptodate = 0;
- err = pc->error;
- }
- }
- scsi_req(rq)->result = err;
-
- return uptodate;
-}
-
-/*
- * Postpone the current request so that ide.c will be able to service requests
- * from another device on the same port while we are polling for DSC.
- */
-static void ide_tape_stall_queue(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, dsc_poll_freq: %lu",
- drive->hwif->rq->cmd[0], tape->dsc_poll_freq);
-
- tape->postponed_rq = true;
-
- ide_stall_queue(drive, tape->dsc_poll_freq);
-}
-
-static void ide_tape_handle_dsc(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- /* Media access command */
- tape->dsc_polling_start = jiffies;
- tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
- tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
- /* Allow ide.c to handle other requests */
- ide_tape_stall_queue(drive);
-}
-
-/*
- * Packet Command Interface
- *
- * The current Packet Command is available in drive->pc, and will not change
- * until we finish handling it. Each packet command is associated with a
- * callback function that will be called when the command is finished.
- *
- * The handling will be done in three stages:
- *
- * 1. ide_tape_issue_pc will send the packet command to the drive, and will set
- * the interrupt handler to ide_pc_intr.
- *
- * 2. On each interrupt, ide_pc_intr will be called. This step will be
- * repeated until the device signals us that no more interrupts will be issued.
- *
- * 3. ATAPI Tape media access commands have immediate status with a delayed
- * process. In case of a successful initiation of a media access packet command,
- * the DSC bit will be set when the actual execution of the command is finished.
- * Since the tape drive will not issue an interrupt, we have to poll for this
- * event. In this case, we define the request as "low priority request" by
- * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
- * exit the driver.
- *
- * ide.c will then give higher priority to requests which originate from the
- * other device, until will change rq_status to RQ_ACTIVE.
- *
- * 4. When the packet command is finished, it will be checked for errors.
- *
- * 5. In case an error was found, we queue a request sense packet command in
- * front of the request queue and retry the operation up to
- * IDETAPE_MAX_PC_RETRIES times.
- *
- * 6. In case no error was found, or we decided to give up and not to retry
- * again, the callback function will be called and then we will handle the next
- * request.
- */
-
-static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
- struct ide_cmd *cmd,
- struct ide_atapi_pc *pc)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct request *rq = drive->hwif->rq;
-
- if (drive->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
- drive->failed_pc = pc;
-
- /* Set the current packet command */
- drive->pc = pc;
-
- if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
- (pc->flags & PC_FLAG_ABORT)) {
-
- /*
- * We will "abort" retrying a packet command in case legitimate
- * error code was received (crossing a filemark, or end of the
- * media, for example).
- */
- if (!(pc->flags & PC_FLAG_ABORT)) {
- if (!(pc->c[0] == TEST_UNIT_READY &&
- tape->sense_key == 2 && tape->asc == 4 &&
- (tape->ascq == 1 || tape->ascq == 8))) {
- printk(KERN_ERR "ide-tape: %s: I/O error, "
- "pc = %2x, key = %2x, "
- "asc = %2x, ascq = %2x\n",
- tape->name, pc->c[0],
- tape->sense_key, tape->asc,
- tape->ascq);
- }
- /* Giving up */
- pc->error = IDE_DRV_ERROR_GENERAL;
- }
-
- drive->failed_pc = NULL;
- drive->pc_callback(drive, 0);
- ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
- return ide_stopped;
- }
- ide_debug_log(IDE_DBG_SENSE, "retry #%d, cmd: 0x%02x", pc->retries,
- pc->c[0]);
-
- pc->retries++;
-
- return ide_issue_pc(drive, cmd);
-}
-
-/* A mode sense command is used to "sense" tape parameters. */
-static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
-{
- ide_init_pc(pc);
- pc->c[0] = MODE_SENSE;
- if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
- /* DBD = 1 - Don't return block descriptors */
- pc->c[1] = 8;
- pc->c[2] = page_code;
- /*
- * Changed pc->c[3] to 0 (255 will at best return unused info).
- *
- * For SCSI this byte is defined as subpage instead of high byte
- * of length and some IDE drives seem to interpret it this way
- * and return an error when 255 is used.
- */
- pc->c[3] = 0;
- /* We will just discard data in that case */
- pc->c[4] = 255;
- if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
- pc->req_xfer = 12;
- else if (page_code == IDETAPE_CAPABILITIES_PAGE)
- pc->req_xfer = 24;
- else
- pc->req_xfer = 50;
-}
-
-static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc *pc = drive->pc;
- u8 stat;
-
- stat = hwif->tp_ops->read_status(hwif);
-
- if (stat & ATA_DSC) {
- if (stat & ATA_ERR) {
- /* Error detected */
- if (pc->c[0] != TEST_UNIT_READY)
- printk(KERN_ERR "ide-tape: %s: I/O error, ",
- tape->name);
- /* Retry operation */
- ide_retry_pc(drive);
- return ide_stopped;
- }
- pc->error = 0;
- } else {
- pc->error = IDE_DRV_ERROR_GENERAL;
- drive->failed_pc = NULL;
- }
- drive->pc_callback(drive, 0);
- return ide_stopped;
-}
-
-static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
- struct ide_atapi_pc *pc, struct request *rq,
- u8 opcode)
-{
- unsigned int length = blk_rq_sectors(rq) / (tape->blk_size >> 9);
-
- ide_init_pc(pc);
- put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
- pc->c[1] = 1;
-
- if (blk_rq_bytes(rq) == tape->buffer_size)
- pc->flags |= PC_FLAG_DMA_OK;
-
- if (opcode == READ_6)
- pc->c[0] = READ_6;
- else if (opcode == WRITE_6) {
- pc->c[0] = WRITE_6;
- pc->flags |= PC_FLAG_WRITING;
- }
-
- memcpy(scsi_req(rq)->cmd, pc->c, 12);
-}
-
-static ide_startstop_t idetape_do_request(ide_drive_t *drive,
- struct request *rq, sector_t block)
-{
- ide_hwif_t *hwif = drive->hwif;
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc *pc = NULL;
- struct ide_cmd cmd;
- struct scsi_request *req = scsi_req(rq);
- u8 stat;
-
- ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u",
- req->cmd[0], (unsigned long long)blk_rq_pos(rq),
- blk_rq_sectors(rq));
-
- BUG_ON(!blk_rq_is_private(rq));
- BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
- ide_req(rq)->type != ATA_PRIV_SENSE);
-
- /* Retry a failed packet command */
- if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
- pc = drive->failed_pc;
- goto out;
- }
-
- /*
- * If the tape is still busy, postpone our request and service
- * the other device meanwhile.
- */
- stat = hwif->tp_ops->read_status(hwif);
-
- if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
- (req->cmd[13] & REQ_IDETAPE_PC2) == 0)
- drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
-
- if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
- drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
- drive->dev_flags &= ~IDE_DFLAG_POST_RESET;
- }
-
- if (!(drive->atapi_flags & IDE_AFLAG_IGNORE_DSC) &&
- !(stat & ATA_DSC)) {
- if (!tape->postponed_rq) {
- tape->dsc_polling_start = jiffies;
- tape->dsc_poll_freq = tape->best_dsc_rw_freq;
- tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
- } else if (time_after(jiffies, tape->dsc_timeout)) {
- printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
- tape->name);
- if (req->cmd[13] & REQ_IDETAPE_PC2) {
- idetape_media_access_finished(drive);
- return ide_stopped;
- } else {
- return ide_do_reset(drive);
- }
- } else if (time_after(jiffies,
- tape->dsc_polling_start +
- IDETAPE_DSC_MA_THRESHOLD))
- tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
- ide_tape_stall_queue(drive);
- return ide_stopped;
- } else {
- drive->atapi_flags &= ~IDE_AFLAG_IGNORE_DSC;
- tape->postponed_rq = false;
- }
-
- if (req->cmd[13] & REQ_IDETAPE_READ) {
- pc = &tape->queued_pc;
- ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
- goto out;
- }
- if (req->cmd[13] & REQ_IDETAPE_WRITE) {
- pc = &tape->queued_pc;
- ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
- goto out;
- }
- if (req->cmd[13] & REQ_IDETAPE_PC1) {
- pc = (struct ide_atapi_pc *)ide_req(rq)->special;
- req->cmd[13] &= ~(REQ_IDETAPE_PC1);
- req->cmd[13] |= REQ_IDETAPE_PC2;
- goto out;
- }
- if (req->cmd[13] & REQ_IDETAPE_PC2) {
- idetape_media_access_finished(drive);
- return ide_stopped;
- }
- BUG();
-
-out:
- /* prepare sense request for this command */
- ide_prep_sense(drive, rq);
-
- memset(&cmd, 0, sizeof(cmd));
-
- if (rq_data_dir(rq))
- cmd.tf_flags |= IDE_TFLAG_WRITE;
-
- cmd.rq = rq;
-
- ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
- ide_map_sg(drive, &cmd);
-
- return ide_tape_issue_pc(drive, &cmd, pc);
-}
-
-/*
- * Write a filemark if write_filemark=1. Flush the device buffers without
- * writing a filemark otherwise.
- */
-static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
- struct ide_atapi_pc *pc, int write_filemark)
-{
- ide_init_pc(pc);
- pc->c[0] = WRITE_FILEMARKS;
- pc->c[4] = write_filemark;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- int load_attempted = 0;
-
- /* Wait for the tape to become ready */
- set_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), &drive->atapi_flags);
- timeout += jiffies;
- while (time_before(jiffies, timeout)) {
- if (ide_do_test_unit_ready(drive, disk) == 0)
- return 0;
- if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
- || (tape->asc == 0x3A)) {
- /* no media */
- if (load_attempted)
- return -ENOMEDIUM;
- ide_do_start_stop(drive, disk, IDETAPE_LU_LOAD_MASK);
- load_attempted = 1;
- /* not about to be ready */
- } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
- (tape->ascq == 1 || tape->ascq == 8)))
- return -EIO;
- msleep(100);
- }
- return -EIO;
-}
-
-static int idetape_flush_tape_buffers(ide_drive_t *drive)
-{
- struct ide_tape_obj *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- int rc;
-
- idetape_create_write_filemark_cmd(drive, &pc, 0);
- rc = ide_queue_pc_tail(drive, tape->disk, &pc, NULL, 0);
- if (rc)
- return rc;
- idetape_wait_ready(drive, 60 * 5 * HZ);
- return 0;
-}
-
-static int ide_tape_read_position(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- u8 buf[20];
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- /* prep cmd */
- ide_init_pc(&pc);
- pc.c[0] = READ_POSITION;
- pc.req_xfer = 20;
-
- if (ide_queue_pc_tail(drive, tape->disk, &pc, buf, pc.req_xfer))
- return -1;
-
- if (!pc.error) {
- ide_debug_log(IDE_DBG_FUNC, "BOP - %s",
- (buf[0] & 0x80) ? "Yes" : "No");
- ide_debug_log(IDE_DBG_FUNC, "EOP - %s",
- (buf[0] & 0x40) ? "Yes" : "No");
-
- if (buf[0] & 0x4) {
- printk(KERN_INFO "ide-tape: Block location is unknown"
- "to the tape\n");
- clear_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
- &drive->atapi_flags);
- return -1;
- } else {
- ide_debug_log(IDE_DBG_FUNC, "Block Location: %u",
- be32_to_cpup((__be32 *)&buf[4]));
-
- tape->partition = buf[1];
- tape->first_frame = be32_to_cpup((__be32 *)&buf[4]);
- set_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
- &drive->atapi_flags);
- }
- }
-
- return tape->first_frame;
-}
-
-static void idetape_create_locate_cmd(ide_drive_t *drive,
- struct ide_atapi_pc *pc,
- unsigned int block, u8 partition, int skip)
-{
- ide_init_pc(pc);
- pc->c[0] = POSITION_TO_ELEMENT;
- pc->c[1] = 2;
- put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
- pc->c[8] = partition;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- if (tape->chrdev_dir != IDETAPE_DIR_READ)
- return;
-
- clear_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags);
- tape->valid = 0;
- if (tape->buf != NULL) {
- kfree(tape->buf);
- tape->buf = NULL;
- }
-
- tape->chrdev_dir = IDETAPE_DIR_NONE;
-}
-
-/*
- * Position the tape to the requested block using the LOCATE packet command.
- * A READ POSITION command is then issued to check where we are positioned. Like
- * all higher level operations, we queue the commands at the tail of the request
- * queue and wait for their completion.
- */
-static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
- u8 partition, int skip)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- int ret;
- struct ide_atapi_pc pc;
-
- if (tape->chrdev_dir == IDETAPE_DIR_READ)
- __ide_tape_discard_merge_buffer(drive);
- idetape_wait_ready(drive, 60 * 5 * HZ);
- idetape_create_locate_cmd(drive, &pc, block, partition, skip);
- ret = ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- if (ret)
- return ret;
-
- ret = ide_tape_read_position(drive);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
- int restore_position)
-{
- idetape_tape_t *tape = drive->driver_data;
- int seek, position;
-
- __ide_tape_discard_merge_buffer(drive);
- if (restore_position) {
- position = ide_tape_read_position(drive);
- seek = position > 0 ? position : 0;
- if (idetape_position_tape(drive, seek, 0, 0)) {
- printk(KERN_INFO "ide-tape: %s: position_tape failed in"
- " %s\n", tape->name, __func__);
- return;
- }
- }
-}
-
-/*
- * Generate a read/write request for the block device interface and wait for it
- * to be serviced.
- */
-static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct request *rq;
- int ret;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, size: %d", cmd, size);
-
- BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
- BUG_ON(size < 0 || size % tape->blk_size);
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- scsi_req(rq)->cmd[13] = cmd;
- rq->rq_disk = tape->disk;
- rq->__sector = tape->first_frame;
-
- if (size) {
- ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
- GFP_NOIO);
- if (ret)
- goto out_put;
- }
-
- blk_execute_rq(tape->disk, rq, 0);
-
- /* calculate the number of transferred bytes and update buffer state */
- size -= scsi_req(rq)->resid_len;
- tape->cur = tape->buf;
- if (cmd == REQ_IDETAPE_READ)
- tape->valid = size;
- else
- tape->valid = 0;
-
- ret = size;
- if (scsi_req(rq)->result == IDE_DRV_ERROR_GENERAL)
- ret = -EIO;
-out_put:
- blk_put_request(rq);
- return ret;
-}
-
-static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = INQUIRY;
- pc->c[4] = 254;
- pc->req_xfer = 254;
-}
-
-static void idetape_create_rewind_cmd(ide_drive_t *drive,
- struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = REZERO_UNIT;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = ERASE;
- pc->c[1] = 1;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
-{
- ide_init_pc(pc);
- pc->c[0] = SPACE;
- put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
- pc->c[1] = cmd;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
- printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
- " but we are not writing.\n");
- return;
- }
- if (tape->buf) {
- size_t aligned = roundup(tape->valid, tape->blk_size);
-
- memset(tape->cur, 0, aligned - tape->valid);
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, aligned);
- kfree(tape->buf);
- tape->buf = NULL;
- }
- tape->chrdev_dir = IDETAPE_DIR_NONE;
-}
-
-static int idetape_init_rw(ide_drive_t *drive, int dir)
-{
- idetape_tape_t *tape = drive->driver_data;
- int rc;
-
- BUG_ON(dir != IDETAPE_DIR_READ && dir != IDETAPE_DIR_WRITE);
-
- if (tape->chrdev_dir == dir)
- return 0;
-
- if (tape->chrdev_dir == IDETAPE_DIR_READ)
- ide_tape_discard_merge_buffer(drive, 1);
- else if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- ide_tape_flush_merge_buffer(drive);
- idetape_flush_tape_buffers(drive);
- }
-
- if (tape->buf || tape->valid) {
- printk(KERN_ERR "ide-tape: valid should be 0 now\n");
- tape->valid = 0;
- }
-
- tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
- if (!tape->buf)
- return -ENOMEM;
- tape->chrdev_dir = dir;
- tape->cur = tape->buf;
-
- /*
- * Issue a 0 rw command to ensure that DSC handshake is
- * switched from completion mode to buffer available mode. No
- * point in issuing this if DSC overlap isn't supported, some
- * drives (Seagate STT3401A) will return an error.
- */
- if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
- int cmd = dir == IDETAPE_DIR_READ ? REQ_IDETAPE_READ
- : REQ_IDETAPE_WRITE;
-
- rc = idetape_queue_rw_tail(drive, cmd, 0);
- if (rc < 0) {
- kfree(tape->buf);
- tape->buf = NULL;
- tape->chrdev_dir = IDETAPE_DIR_NONE;
- return rc;
- }
- }
-
- return 0;
-}
-
-static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- memset(tape->buf, 0, tape->buffer_size);
-
- while (bcount) {
- unsigned int count = min(tape->buffer_size, bcount);
-
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, count);
- bcount -= count;
- }
-}
-
-/*
- * Rewinds the tape to the Beginning Of the current Partition (BOP). We
- * currently support only one partition.
- */
-static int idetape_rewind_tape(ide_drive_t *drive)
-{
- struct ide_tape_obj *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- struct ide_atapi_pc pc;
- int ret;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- idetape_create_rewind_cmd(drive, &pc);
- ret = ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- if (ret)
- return ret;
-
- ret = ide_tape_read_position(drive);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-/* mtio.h compatible commands should be issued to the chrdev interface. */
-static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
- unsigned long arg)
-{
- idetape_tape_t *tape = drive->driver_data;
- void __user *argp = (void __user *)arg;
-
- struct idetape_config {
- int dsc_rw_frequency;
- int dsc_media_access_frequency;
- int nr_stages;
- } config;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%04x", cmd);
-
- switch (cmd) {
- case 0x0340:
- if (copy_from_user(&config, argp, sizeof(config)))
- return -EFAULT;
- tape->best_dsc_rw_freq = config.dsc_rw_frequency;
- break;
- case 0x0350:
- memset(&config, 0, sizeof(config));
- config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
- config.nr_stages = 1;
- if (copy_to_user(argp, &config, sizeof(config)))
- return -EFAULT;
- break;
- default:
- return -EIO;
- }
- return 0;
-}
-
-static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
- int mt_count)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- struct ide_atapi_pc pc;
- int retval, count = 0;
- int sprev = !!(tape->caps[4] & 0x20);
-
-
- ide_debug_log(IDE_DBG_FUNC, "mt_op: %d, mt_count: %d", mt_op, mt_count);
-
- if (mt_count == 0)
- return 0;
- if (MTBSF == mt_op || MTBSFM == mt_op) {
- if (!sprev)
- return -EIO;
- mt_count = -mt_count;
- }
-
- if (tape->chrdev_dir == IDETAPE_DIR_READ) {
- tape->valid = 0;
- if (test_and_clear_bit(ilog2(IDE_AFLAG_FILEMARK),
- &drive->atapi_flags))
- ++count;
- ide_tape_discard_merge_buffer(drive, 0);
- }
-
- switch (mt_op) {
- case MTFSF:
- case MTBSF:
- idetape_create_space_cmd(&pc, mt_count - count,
- IDETAPE_SPACE_OVER_FILEMARK);
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- case MTFSFM:
- case MTBSFM:
- if (!sprev)
- return -EIO;
- retval = idetape_space_over_filemarks(drive, MTFSF,
- mt_count - count);
- if (retval)
- return retval;
- count = (MTBSFM == mt_op ? 1 : -1);
- return idetape_space_over_filemarks(drive, MTFSF, count);
- default:
- printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
- mt_op);
- return -EIO;
- }
-}
-
-/*
- * Our character device read / write functions.
- *
- * The tape is optimized to maximize throughput when it is transferring an
- * integral number of the "continuous transfer limit", which is a parameter of
- * the specific tape (26kB on my particular tape, 32kB for Onstream).
- *
- * As of version 1.3 of the driver, the character device provides an abstract
- * continuous view of the media - any mix of block sizes (even 1 byte) on the
- * same backup/restore procedure is supported. The driver will internally
- * convert the requests to the recommended transfer unit, so that an unmatch
- * between the user's block size to the recommended size will only result in a
- * (slightly) increased driver overhead, but will no longer hit performance.
- * This is not applicable to Onstream.
- */
-static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ide_tape_obj *tape = file->private_data;
- ide_drive_t *drive = tape->drive;
- size_t done = 0;
- ssize_t ret = 0;
- int rc;
-
- ide_debug_log(IDE_DBG_FUNC, "count %zd", count);
-
- if (tape->chrdev_dir != IDETAPE_DIR_READ) {
- if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags))
- if (count > tape->blk_size &&
- (count % tape->blk_size) == 0)
- tape->user_bs_factor = count / tape->blk_size;
- }
-
- rc = idetape_init_rw(drive, IDETAPE_DIR_READ);
- if (rc < 0)
- return rc;
-
- while (done < count) {
- size_t todo;
-
- /* refill if staging buffer is empty */
- if (!tape->valid) {
- /* If we are at a filemark, nothing more to read */
- if (test_bit(ilog2(IDE_AFLAG_FILEMARK),
- &drive->atapi_flags))
- break;
- /* read */
- if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
- tape->buffer_size) <= 0)
- break;
- }
-
- /* copy out */
- todo = min_t(size_t, count - done, tape->valid);
- if (copy_to_user(buf + done, tape->cur, todo))
- ret = -EFAULT;
-
- tape->cur += todo;
- tape->valid -= todo;
- done += todo;
- }
-
- if (!done && test_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags)) {
- idetape_space_over_filemarks(drive, MTFSF, 1);
- return 0;
- }
-
- return ret ? ret : done;
-}
-
-static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ide_tape_obj *tape = file->private_data;
- ide_drive_t *drive = tape->drive;
- size_t done = 0;
- ssize_t ret = 0;
- int rc;
-
- /* The drive is write protected. */
- if (tape->write_prot)
- return -EACCES;
-
- ide_debug_log(IDE_DBG_FUNC, "count %zd", count);
-
- /* Initialize write operation */
- rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE);
- if (rc < 0)
- return rc;
-
- while (done < count) {
- size_t todo;
-
- /* flush if staging buffer is full */
- if (tape->valid == tape->buffer_size &&
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
- tape->buffer_size) <= 0)
- return rc;
-
- /* copy in */
- todo = min_t(size_t, count - done,
- tape->buffer_size - tape->valid);
- if (copy_from_user(tape->cur, buf + done, todo))
- ret = -EFAULT;
-
- tape->cur += todo;
- tape->valid += todo;
- done += todo;
- }
-
- return ret ? ret : done;
-}
-
-static int idetape_write_filemark(ide_drive_t *drive)
-{
- struct ide_tape_obj *tape = drive->driver_data;
- struct ide_atapi_pc pc;
-
- /* Write a filemark */
- idetape_create_write_filemark_cmd(drive, &pc, 1);
- if (ide_queue_pc_tail(drive, tape->disk, &pc, NULL, 0)) {
- printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
- return -EIO;
- }
- return 0;
-}
-
-/*
- * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
- * requested.
- *
- * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
- * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
- * usually not supported.
- *
- * The following commands are currently not supported:
- *
- * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
- * MT_ST_WRITE_THRESHOLD.
- */
-static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- struct ide_atapi_pc pc;
- int i, retval;
-
- ide_debug_log(IDE_DBG_FUNC, "MTIOCTOP ioctl: mt_op: %d, mt_count: %d",
- mt_op, mt_count);
-
- switch (mt_op) {
- case MTFSF:
- case MTFSFM:
- case MTBSF:
- case MTBSFM:
- if (!mt_count)
- return 0;
- return idetape_space_over_filemarks(drive, mt_op, mt_count);
- default:
- break;
- }
-
- switch (mt_op) {
- case MTWEOF:
- if (tape->write_prot)
- return -EACCES;
- ide_tape_discard_merge_buffer(drive, 1);
- for (i = 0; i < mt_count; i++) {
- retval = idetape_write_filemark(drive);
- if (retval)
- return retval;
- }
- return 0;
- case MTREW:
- ide_tape_discard_merge_buffer(drive, 0);
- if (idetape_rewind_tape(drive))
- return -EIO;
- return 0;
- case MTLOAD:
- ide_tape_discard_merge_buffer(drive, 0);
- return ide_do_start_stop(drive, disk, IDETAPE_LU_LOAD_MASK);
- case MTUNLOAD:
- case MTOFFL:
- /*
- * If door is locked, attempt to unlock before
- * attempting to eject.
- */
- if (tape->door_locked) {
- if (!ide_set_media_lock(drive, disk, 0))
- tape->door_locked = DOOR_UNLOCKED;
- }
- ide_tape_discard_merge_buffer(drive, 0);
- retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK);
- if (!retval)
- clear_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
- &drive->atapi_flags);
- return retval;
- case MTNOP:
- ide_tape_discard_merge_buffer(drive, 0);
- return idetape_flush_tape_buffers(drive);
- case MTRETEN:
- ide_tape_discard_merge_buffer(drive, 0);
- return ide_do_start_stop(drive, disk,
- IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
- case MTEOM:
- idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- case MTERASE:
- (void)idetape_rewind_tape(drive);
- idetape_create_erase_cmd(&pc);
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- case MTSETBLK:
- if (mt_count) {
- if (mt_count < tape->blk_size ||
- mt_count % tape->blk_size)
- return -EIO;
- tape->user_bs_factor = mt_count / tape->blk_size;
- clear_bit(ilog2(IDE_AFLAG_DETECT_BS),
- &drive->atapi_flags);
- } else
- set_bit(ilog2(IDE_AFLAG_DETECT_BS),
- &drive->atapi_flags);
- return 0;
- case MTSEEK:
- ide_tape_discard_merge_buffer(drive, 0);
- return idetape_position_tape(drive,
- mt_count * tape->user_bs_factor, tape->partition, 0);
- case MTSETPART:
- ide_tape_discard_merge_buffer(drive, 0);
- return idetape_position_tape(drive, 0, mt_count, 0);
- case MTFSR:
- case MTBSR:
- case MTLOCK:
- retval = ide_set_media_lock(drive, disk, 1);
- if (retval)
- return retval;
- tape->door_locked = DOOR_EXPLICITLY_LOCKED;
- return 0;
- case MTUNLOCK:
- retval = ide_set_media_lock(drive, disk, 0);
- if (retval)
- return retval;
- tape->door_locked = DOOR_UNLOCKED;
- return 0;
- default:
- printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
- mt_op);
- return -EIO;
- }
-}
-
-/*
- * Our character device ioctls. General mtio.h magnetic io commands are
- * supported here, and not in the corresponding block interface. Our own
- * ide-tape ioctls are supported on both interfaces.
- */
-static long do_idetape_chrdev_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct ide_tape_obj *tape = file->private_data;
- ide_drive_t *drive = tape->drive;
- struct mtop mtop;
- struct mtget mtget;
- struct mtpos mtpos;
- int block_offset = 0, position = tape->first_frame;
- void __user *argp = (void __user *)arg;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x", cmd);
-
- if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- ide_tape_flush_merge_buffer(drive);
- idetape_flush_tape_buffers(drive);
- }
- if (cmd == MTIOCGET || cmd == MTIOCPOS) {
- block_offset = tape->valid /
- (tape->blk_size * tape->user_bs_factor);
- position = ide_tape_read_position(drive);
- if (position < 0)
- return -EIO;
- }
- switch (cmd) {
- case MTIOCTOP:
- if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
- return -EFAULT;
- return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
- case MTIOCGET:
- memset(&mtget, 0, sizeof(struct mtget));
- mtget.mt_type = MT_ISSCSI2;
- mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
- mtget.mt_dsreg =
- ((tape->blk_size * tape->user_bs_factor)
- << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
-
- if (tape->drv_write_prot)
- mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
-
- return put_user_mtget(argp, &mtget);
- case MTIOCPOS:
- mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
- return put_user_mtpos(argp, &mtpos);
- default:
- if (tape->chrdev_dir == IDETAPE_DIR_READ)
- ide_tape_discard_merge_buffer(drive, 1);
- return idetape_blkdev_ioctl(drive, cmd, arg);
- }
-}
-
-static long idetape_chrdev_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- long ret;
- mutex_lock(&ide_tape_mutex);
- ret = do_idetape_chrdev_ioctl(file, cmd, arg);
- mutex_unlock(&ide_tape_mutex);
- return ret;
-}
-
-static long idetape_chrdev_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (cmd == MTIOCPOS32)
- cmd = MTIOCPOS;
- else if (cmd == MTIOCGET32)
- cmd = MTIOCGET;
-
- mutex_lock(&ide_tape_mutex);
- ret = do_idetape_chrdev_ioctl(file, cmd, arg);
- mutex_unlock(&ide_tape_mutex);
- return ret;
-}
-
-/*
- * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
- * block size with the reported value.
- */
-static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- u8 buf[12];
-
- idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
- if (ide_queue_pc_tail(drive, tape->disk, &pc, buf, pc.req_xfer)) {
- printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
- if (tape->blk_size == 0) {
- printk(KERN_WARNING "ide-tape: Cannot deal with zero "
- "block size, assuming 32k\n");
- tape->blk_size = 32768;
- }
- return;
- }
- tape->blk_size = (buf[4 + 5] << 16) +
- (buf[4 + 6] << 8) +
- buf[4 + 7];
- tape->drv_write_prot = (buf[2] & 0x80) >> 7;
-
- ide_debug_log(IDE_DBG_FUNC, "blk_size: %d, write_prot: %d",
- tape->blk_size, tape->drv_write_prot);
-}
-
-static int idetape_chrdev_open(struct inode *inode, struct file *filp)
-{
- unsigned int minor = iminor(inode), i = minor & ~0xc0;
- ide_drive_t *drive;
- idetape_tape_t *tape;
- int retval;
-
- if (i >= MAX_HWIFS * MAX_DRIVES)
- return -ENXIO;
-
- mutex_lock(&idetape_chrdev_mutex);
-
- tape = ide_tape_get(NULL, true, i);
- if (!tape) {
- mutex_unlock(&idetape_chrdev_mutex);
- return -ENXIO;
- }
-
- drive = tape->drive;
- filp->private_data = tape;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- /*
- * We really want to do nonseekable_open(inode, filp); here, but some
- * versions of tar incorrectly call lseek on tapes and bail out if that
- * fails. So we disallow pread() and pwrite(), but permit lseeks.
- */
- filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
-
-
- if (test_and_set_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags)) {
- retval = -EBUSY;
- goto out_put_tape;
- }
-
- retval = idetape_wait_ready(drive, 60 * HZ);
- if (retval) {
- clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
- printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
- goto out_put_tape;
- }
-
- ide_tape_read_position(drive);
- if (!test_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), &drive->atapi_flags))
- (void)idetape_rewind_tape(drive);
-
- /* Read block size and write protect status from drive. */
- ide_tape_get_bsize_from_bdesc(drive);
-
- /* Set write protect flag if device is opened as read-only. */
- if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
- tape->write_prot = 1;
- else
- tape->write_prot = tape->drv_write_prot;
-
- /* Make sure drive isn't write protected if user wants to write. */
- if (tape->write_prot) {
- if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
- (filp->f_flags & O_ACCMODE) == O_RDWR) {
- clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
- retval = -EROFS;
- goto out_put_tape;
- }
- }
-
- /* Lock the tape drive door so user can't eject. */
- if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
- if (!ide_set_media_lock(drive, tape->disk, 1)) {
- if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
- tape->door_locked = DOOR_LOCKED;
- }
- }
- mutex_unlock(&idetape_chrdev_mutex);
-
- return 0;
-
-out_put_tape:
- ide_tape_put(tape);
-
- mutex_unlock(&idetape_chrdev_mutex);
-
- return retval;
-}
-
-static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- ide_tape_flush_merge_buffer(drive);
- tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
- if (tape->buf != NULL) {
- idetape_pad_zeros(drive, tape->blk_size *
- (tape->user_bs_factor - 1));
- kfree(tape->buf);
- tape->buf = NULL;
- }
- idetape_write_filemark(drive);
- idetape_flush_tape_buffers(drive);
- idetape_flush_tape_buffers(drive);
-}
-
-static int idetape_chrdev_release(struct inode *inode, struct file *filp)
-{
- struct ide_tape_obj *tape = filp->private_data;
- ide_drive_t *drive = tape->drive;
- unsigned int minor = iminor(inode);
-
- mutex_lock(&idetape_chrdev_mutex);
-
- tape = drive->driver_data;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
- idetape_write_release(drive, minor);
- if (tape->chrdev_dir == IDETAPE_DIR_READ) {
- if (minor < 128)
- ide_tape_discard_merge_buffer(drive, 1);
- }
-
- if (minor < 128 && test_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
- &drive->atapi_flags))
- (void) idetape_rewind_tape(drive);
-
- if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
- if (tape->door_locked == DOOR_LOCKED) {
- if (!ide_set_media_lock(drive, tape->disk, 0))
- tape->door_locked = DOOR_UNLOCKED;
- }
- }
- clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
- ide_tape_put(tape);
-
- mutex_unlock(&idetape_chrdev_mutex);
-
- return 0;
-}
-
-static void idetape_get_inquiry_results(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- u8 pc_buf[256];
- char fw_rev[4], vendor_id[8], product_id[16];
-
- idetape_create_inquiry_cmd(&pc);
- if (ide_queue_pc_tail(drive, tape->disk, &pc, pc_buf, pc.req_xfer)) {
- printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
- tape->name);
- return;
- }
- memcpy(vendor_id, &pc_buf[8], 8);
- memcpy(product_id, &pc_buf[16], 16);
- memcpy(fw_rev, &pc_buf[32], 4);
-
- ide_fixstring(vendor_id, 8, 0);
- ide_fixstring(product_id, 16, 0);
- ide_fixstring(fw_rev, 4, 0);
-
- printk(KERN_INFO "ide-tape: %s <-> %s: %.8s %.16s rev %.4s\n",
- drive->name, tape->name, vendor_id, product_id, fw_rev);
-}
-
-/*
- * Ask the tape about its various parameters. In particular, we will adjust our
- * data transfer buffer size to the recommended value as returned by the tape.
- */
-static void idetape_get_mode_sense_results(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- u8 buf[24], *caps;
- u8 speed, max_speed;
-
- idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
- if (ide_queue_pc_tail(drive, tape->disk, &pc, buf, pc.req_xfer)) {
- printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
- " some default values\n");
- tape->blk_size = 512;
- put_unaligned(52, (u16 *)&tape->caps[12]);
- put_unaligned(540, (u16 *)&tape->caps[14]);
- put_unaligned(6*52, (u16 *)&tape->caps[16]);
- return;
- }
- caps = buf + 4 + buf[3];
-
- /* convert to host order and save for later use */
- speed = be16_to_cpup((__be16 *)&caps[14]);
- max_speed = be16_to_cpup((__be16 *)&caps[8]);
-
- *(u16 *)&caps[8] = max_speed;
- *(u16 *)&caps[12] = be16_to_cpup((__be16 *)&caps[12]);
- *(u16 *)&caps[14] = speed;
- *(u16 *)&caps[16] = be16_to_cpup((__be16 *)&caps[16]);
-
- if (!speed) {
- printk(KERN_INFO "ide-tape: %s: invalid tape speed "
- "(assuming 650KB/sec)\n", drive->name);
- *(u16 *)&caps[14] = 650;
- }
- if (!max_speed) {
- printk(KERN_INFO "ide-tape: %s: invalid max_speed "
- "(assuming 650KB/sec)\n", drive->name);
- *(u16 *)&caps[8] = 650;
- }
-
- memcpy(&tape->caps, caps, 20);
-
- /* device lacks locking support according to capabilities page */
- if ((caps[6] & 1) == 0)
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
-
- if (caps[7] & 0x02)
- tape->blk_size = 512;
- else if (caps[7] & 0x04)
- tape->blk_size = 1024;
-}
-
-#ifdef CONFIG_IDE_PROC_FS
-#define ide_tape_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- idetape_tape_t *tape = drive->driver_data; \
- return tape->field; \
-}
-
-#define ide_tape_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- idetape_tape_t *tape = drive->driver_data; \
- tape->field = arg; \
- return 0; \
-}
-
-#define ide_tape_devset_rw_field(_name, _field) \
-ide_tape_devset_get(_name, _field) \
-ide_tape_devset_set(_name, _field) \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_tape_devset_r_field(_name, _field) \
-ide_tape_devset_get(_name, _field) \
-IDE_DEVSET(_name, 0, get_##_name, NULL)
-
-static int mulf_tdsc(ide_drive_t *drive) { return 1000; }
-static int divf_tdsc(ide_drive_t *drive) { return HZ; }
-static int divf_buffer(ide_drive_t *drive) { return 2; }
-static int divf_buffer_size(ide_drive_t *drive) { return 1024; }
-
-ide_devset_rw_flag(dsc_overlap, IDE_DFLAG_DSC_OVERLAP);
-
-ide_tape_devset_rw_field(tdsc, best_dsc_rw_freq);
-
-ide_tape_devset_r_field(avg_speed, avg_speed);
-ide_tape_devset_r_field(speed, caps[14]);
-ide_tape_devset_r_field(buffer, caps[16]);
-ide_tape_devset_r_field(buffer_size, buffer_size);
-
-static const struct ide_proc_devset idetape_settings[] = {
- __IDE_PROC_DEVSET(avg_speed, 0, 0xffff, NULL, NULL),
- __IDE_PROC_DEVSET(buffer, 0, 0xffff, NULL, divf_buffer),
- __IDE_PROC_DEVSET(buffer_size, 0, 0xffff, NULL, divf_buffer_size),
- __IDE_PROC_DEVSET(dsc_overlap, 0, 1, NULL, NULL),
- __IDE_PROC_DEVSET(speed, 0, 0xffff, NULL, NULL),
- __IDE_PROC_DEVSET(tdsc, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX,
- mulf_tdsc, divf_tdsc),
- { NULL },
-};
-#endif
-
-/*
- * The function below is called to:
- *
- * 1. Initialize our various state variables.
- * 2. Ask the tape for its capabilities.
- * 3. Allocate a buffer which will be used for data transfer. The buffer size
- * is chosen based on the recommendation which we received in step 2.
- *
- * Note that at this point ide.c already assigned us an irq, so that we can
- * queue requests here and wait for their completion.
- */
-static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
-{
- unsigned long t;
- int speed;
- u16 *ctl = (u16 *)&tape->caps[12];
-
- ide_debug_log(IDE_DBG_FUNC, "minor: %d", minor);
-
- drive->pc_callback = ide_tape_callback;
-
- drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
-
- if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
- printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
- tape->name);
- drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
- }
-
- /* Seagate Travan drives do not support DSC overlap. */
- if (strstr((char *)&drive->id[ATA_ID_PROD], "Seagate STT3401"))
- drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
-
- tape->minor = minor;
- tape->name[0] = 'h';
- tape->name[1] = 't';
- tape->name[2] = '0' + minor;
- tape->chrdev_dir = IDETAPE_DIR_NONE;
-
- idetape_get_inquiry_results(drive);
- idetape_get_mode_sense_results(drive);
- ide_tape_get_bsize_from_bdesc(drive);
- tape->user_bs_factor = 1;
- tape->buffer_size = *ctl * tape->blk_size;
- while (tape->buffer_size > 0xffff) {
- printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
- *ctl /= 2;
- tape->buffer_size = *ctl * tape->blk_size;
- }
-
- /* select the "best" DSC read/write polling freq */
- speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
-
- t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000);
-
- /*
- * Ensure that the number we got makes sense; limit it within
- * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
- */
- tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
- IDETAPE_DSC_RW_MAX);
- printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
- "%ums tDSC%s\n",
- drive->name, tape->name, *(u16 *)&tape->caps[14],
- (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
- tape->buffer_size / 1024,
- jiffies_to_msecs(tape->best_dsc_rw_freq),
- (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : "");
-
- ide_proc_register_driver(drive, tape->driver);
-}
-
-static void ide_tape_remove(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- ide_proc_unregister_driver(drive, tape->driver);
- device_del(&tape->dev);
-
- mutex_lock(&idetape_ref_mutex);
- put_device(&tape->dev);
- mutex_unlock(&idetape_ref_mutex);
-}
-
-static void ide_tape_release(struct device *dev)
-{
- struct ide_tape_obj *tape = to_ide_drv(dev, ide_tape_obj);
- ide_drive_t *drive = tape->drive;
- struct gendisk *g = tape->disk;
-
- BUG_ON(tape->valid);
-
- drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
- drive->driver_data = NULL;
- device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
- device_destroy(idetape_sysfs_class,
- MKDEV(IDETAPE_MAJOR, tape->minor + 128));
- idetape_devs[tape->minor] = NULL;
- g->private_data = NULL;
- put_disk(g);
- kfree(tape);
-}
-
-#ifdef CONFIG_IDE_PROC_FS
-static int idetape_name_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) m->private;
- idetape_tape_t *tape = drive->driver_data;
-
- seq_printf(m, "%s\n", tape->name);
- return 0;
-}
-
-static ide_proc_entry_t idetape_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, ide_capacity_proc_show },
- { "name", S_IFREG|S_IRUGO, idetape_name_proc_show },
- {}
-};
-
-static ide_proc_entry_t *ide_tape_proc_entries(ide_drive_t *drive)
-{
- return idetape_proc;
-}
-
-static const struct ide_proc_devset *ide_tape_proc_devsets(ide_drive_t *drive)
-{
- return idetape_settings;
-}
-#endif
-
-static int ide_tape_probe(ide_drive_t *);
-
-static struct ide_driver idetape_driver = {
- .gen_driver = {
- .owner = THIS_MODULE,
- .name = "ide-tape",
- .bus = &ide_bus_type,
- },
- .probe = ide_tape_probe,
- .remove = ide_tape_remove,
- .version = IDETAPE_VERSION,
- .do_request = idetape_do_request,
-#ifdef CONFIG_IDE_PROC_FS
- .proc_entries = ide_tape_proc_entries,
- .proc_devsets = ide_tape_proc_devsets,
-#endif
-};
-
-/* Our character device supporting functions, passed to register_chrdev. */
-static const struct file_operations idetape_fops = {
- .owner = THIS_MODULE,
- .read = idetape_chrdev_read,
- .write = idetape_chrdev_write,
- .unlocked_ioctl = idetape_chrdev_ioctl,
- .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
- idetape_chrdev_compat_ioctl : NULL,
- .open = idetape_chrdev_open,
- .release = idetape_chrdev_release,
- .llseek = noop_llseek,
-};
-
-static int idetape_open(struct block_device *bdev, fmode_t mode)
-{
- struct ide_tape_obj *tape;
-
- mutex_lock(&ide_tape_mutex);
- tape = ide_tape_get(bdev->bd_disk, false, 0);
- mutex_unlock(&ide_tape_mutex);
-
- if (!tape)
- return -ENXIO;
-
- return 0;
-}
-
-static void idetape_release(struct gendisk *disk, fmode_t mode)
-{
- struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
-
- mutex_lock(&ide_tape_mutex);
- ide_tape_put(tape);
- mutex_unlock(&ide_tape_mutex);
-}
-
-static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
- ide_drive_t *drive = tape->drive;
- int err;
-
- mutex_lock(&ide_tape_mutex);
- err = generic_ide_ioctl(drive, bdev, cmd, arg);
- if (err == -EINVAL)
- err = idetape_blkdev_ioctl(drive, cmd, arg);
- mutex_unlock(&ide_tape_mutex);
-
- return err;
-}
-
-static int idetape_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- if (cmd == 0x0340 || cmd == 0x350)
- arg = (unsigned long)compat_ptr(arg);
-
- return idetape_ioctl(bdev, mode, cmd, arg);
-}
-
-static const struct block_device_operations idetape_block_ops = {
- .owner = THIS_MODULE,
- .open = idetape_open,
- .release = idetape_release,
- .ioctl = idetape_ioctl,
- .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
- idetape_compat_ioctl : NULL,
-};
-
-static int ide_tape_probe(ide_drive_t *drive)
-{
- idetape_tape_t *tape;
- struct gendisk *g;
- int minor;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (!strstr(DRV_NAME, drive->driver_req))
- goto failed;
-
- if (drive->media != ide_tape)
- goto failed;
-
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) &&
- ide_check_atapi_device(drive, DRV_NAME) == 0) {
- printk(KERN_ERR "ide-tape: %s: not supported by this version of"
- " the driver\n", drive->name);
- goto failed;
- }
- tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
- if (tape == NULL) {
- printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
- drive->name);
- goto failed;
- }
-
- g = alloc_disk(1 << PARTN_BITS);
- if (!g)
- goto out_free_tape;
-
- ide_init_disk(g, drive);
-
- tape->dev.parent = &drive->gendev;
- tape->dev.release = ide_tape_release;
- dev_set_name(&tape->dev, "%s", dev_name(&drive->gendev));
-
- if (device_register(&tape->dev))
- goto out_free_disk;
-
- tape->drive = drive;
- tape->driver = &idetape_driver;
- tape->disk = g;
-
- g->private_data = &tape->driver;
-
- drive->driver_data = tape;
-
- mutex_lock(&idetape_ref_mutex);
- for (minor = 0; idetape_devs[minor]; minor++)
- ;
- idetape_devs[minor] = tape;
- mutex_unlock(&idetape_ref_mutex);
-
- idetape_setup(drive, tape, minor);
-
- device_create(idetape_sysfs_class, &drive->gendev,
- MKDEV(IDETAPE_MAJOR, minor), NULL, "%s", tape->name);
- device_create(idetape_sysfs_class, &drive->gendev,
- MKDEV(IDETAPE_MAJOR, minor + 128), NULL,
- "n%s", tape->name);
-
- g->fops = &idetape_block_ops;
-
- return 0;
-
-out_free_disk:
- put_disk(g);
-out_free_tape:
- kfree(tape);
-failed:
- return -ENODEV;
-}
-
-static void __exit idetape_exit(void)
-{
- driver_unregister(&idetape_driver.gen_driver);
- class_destroy(idetape_sysfs_class);
- unregister_chrdev(IDETAPE_MAJOR, "ht");
-}
-
-static int __init idetape_init(void)
-{
- int error = 1;
- idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
- if (IS_ERR(idetape_sysfs_class)) {
- idetape_sysfs_class = NULL;
- printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
- error = -EBUSY;
- goto out;
- }
-
- if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
- printk(KERN_ERR "ide-tape: Failed to register chrdev"
- " interface\n");
- error = -EBUSY;
- goto out_free_class;
- }
-
- error = driver_register(&idetape_driver.gen_driver);
- if (error)
- goto out_free_chrdev;
-
- return 0;
-
-out_free_chrdev:
- unregister_chrdev(IDETAPE_MAJOR, "ht");
-out_free_class:
- class_destroy(idetape_sysfs_class);
-out:
- return error;
-}
-
-MODULE_ALIAS("ide:*m-tape*");
-module_init(idetape_init);
-module_exit(idetape_exit);
-MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
-MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
deleted file mode 100644
index 6665fc4724b9..000000000000
--- a/drivers/ide/ide-taskfile.c
+++ /dev/null
@@ -1,668 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
- * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2001-2002 Klaus Smolin
- * IBM Storage Technology Division
- * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
- *
- * The big the bad and the ugly.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/hdreg.h>
-#include <linux/ide.h>
-#include <linux/nmi.h>
-#include <linux/scatterlist.h>
-#include <linux/uaccess.h>
-
-#include <asm/io.h>
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
-
- /* Be sure we're looking at the low order bytes */
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
- tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf);
-
- if (cmd->tf_flags & IDE_TFLAG_LBA48) {
- tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS);
-
- tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob);
- }
-}
-
-void ide_tf_dump(const char *s, struct ide_cmd *cmd)
-{
-#ifdef DEBUG
- printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
- "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
- s, cmd->tf.feature, cmd->tf.nsect,
- cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah,
- cmd->tf.device, cmd->tf.command);
- printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n",
- s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah);
-#endif
-}
-
-int taskfile_lib_get_identify(ide_drive_t *drive, u8 *buf)
-{
- struct ide_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tf.nsect = 0x01;
- if (drive->media == ide_disk)
- cmd.tf.command = ATA_CMD_ID_ATA;
- else
- cmd.tf.command = ATA_CMD_ID_ATAPI;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd.protocol = ATA_PROT_PIO;
-
- return ide_raw_taskfile(drive, &cmd, buf, 1);
-}
-
-static ide_startstop_t task_no_data_intr(ide_drive_t *);
-static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *);
-static ide_startstop_t task_pio_intr(ide_drive_t *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- struct ide_taskfile *tf = &cmd->tf;
- ide_handler_t *handler = NULL;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- const struct ide_dma_ops *dma_ops = hwif->dma_ops;
-
- if (orig_cmd->protocol == ATA_PROT_PIO &&
- (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) &&
- drive->mult_count == 0) {
- pr_err("%s: multimode not set!\n", drive->name);
- return ide_stopped;
- }
-
- if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
- orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS;
-
- memcpy(cmd, orig_cmd, sizeof(*cmd));
-
- if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
- ide_tf_dump(drive->name, cmd);
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
- if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
- u8 data[2] = { cmd->tf.data, cmd->hob.data };
-
- tp_ops->output_data(drive, cmd, data, 2);
- }
-
- if (cmd->valid.out.tf & IDE_VALID_DEVICE) {
- u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ?
- 0xE0 : 0xEF;
-
- if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED))
- cmd->tf.device &= HIHI;
- cmd->tf.device |= drive->select;
- }
-
- tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob);
- tp_ops->tf_load(drive, &cmd->tf, cmd->valid.out.tf);
- }
-
- switch (cmd->protocol) {
- case ATA_PROT_PIO:
- if (cmd->tf_flags & IDE_TFLAG_WRITE) {
- tp_ops->exec_command(hwif, tf->command);
- ndelay(400); /* FIXME */
- return pre_task_out_intr(drive, cmd);
- }
- handler = task_pio_intr;
- fallthrough;
- case ATA_PROT_NODATA:
- if (handler == NULL)
- handler = task_no_data_intr;
- ide_execute_command(drive, cmd, handler, WAIT_WORSTCASE);
- return ide_started;
- case ATA_PROT_DMA:
- if (ide_dma_prepare(drive, cmd))
- return ide_stopped;
- hwif->expiry = dma_ops->dma_timer_expiry;
- ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
- dma_ops->dma_start(drive);
- fallthrough;
- default:
- return ide_started;
- }
-}
-EXPORT_SYMBOL_GPL(do_rw_taskfile);
-
-static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- struct ide_taskfile *tf = &cmd->tf;
- int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
- int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
- u8 stat;
-
- local_irq_enable_in_hardirq();
-
- while (1) {
- stat = hwif->tp_ops->read_status(hwif);
- if ((stat & ATA_BUSY) == 0 || retries-- == 0)
- break;
- udelay(10);
- };
-
- if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
- if (custom && tf->command == ATA_CMD_SET_MULTI) {
- drive->mult_req = drive->mult_count = 0;
- drive->special_flags |= IDE_SFLAG_RECALIBRATE;
- (void)ide_dump_status(drive, __func__, stat);
- return ide_stopped;
- } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
- if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
- ide_set_handler(drive, &task_no_data_intr,
- WAIT_WORSTCASE);
- return ide_started;
- }
- }
- return ide_error(drive, "task_no_data_intr", stat);
- }
-
- if (custom && tf->command == ATA_CMD_SET_MULTI)
- drive->mult_count = drive->mult_req;
-
- if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE ||
- tf->command == ATA_CMD_CHK_POWER) {
- struct request *rq = hwif->rq;
-
- if (ata_pm_request(rq))
- ide_complete_pm_rq(drive, rq);
- else
- ide_finish_cmd(drive, cmd, stat);
- }
-
- return ide_stopped;
-}
-
-static u8 wait_drive_not_busy(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- int retries;
- u8 stat;
-
- /*
- * Last sector was transferred, wait until device is ready. This can
- * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
- */
- for (retries = 0; retries < 1000; retries++) {
- stat = hwif->tp_ops->read_status(hwif);
-
- if (stat & ATA_BUSY)
- udelay(10);
- else
- break;
- }
-
- if (stat & ATA_BUSY)
- pr_err("%s: drive still BUSY!\n", drive->name);
-
- return stat;
-}
-
-void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
- unsigned int write, unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct scatterlist *sg = hwif->sg_table;
- struct scatterlist *cursg = cmd->cursg;
- struct page *page;
- unsigned int offset;
- u8 *buf;
-
- if (cursg == NULL)
- cursg = cmd->cursg = sg;
-
- while (len) {
- unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs);
-
- page = sg_page(cursg);
- offset = cursg->offset + cmd->cursg_ofs;
-
- /* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
- offset %= PAGE_SIZE;
-
- nr_bytes = min_t(unsigned, nr_bytes, (PAGE_SIZE - offset));
-
- buf = kmap_atomic(page) + offset;
-
- cmd->nleft -= nr_bytes;
- cmd->cursg_ofs += nr_bytes;
-
- if (cmd->cursg_ofs == cursg->length) {
- cursg = cmd->cursg = sg_next(cmd->cursg);
- cmd->cursg_ofs = 0;
- }
-
- /* do the actual data transfer */
- if (write)
- hwif->tp_ops->output_data(drive, cmd, buf, nr_bytes);
- else
- hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
-
- kunmap_atomic(buf);
-
- len -= nr_bytes;
- }
-}
-EXPORT_SYMBOL_GPL(ide_pio_bytes);
-
-static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd,
- unsigned int write)
-{
- unsigned int nr_bytes;
-
- u8 saved_io_32bit = drive->io_32bit;
-
- if (cmd->tf_flags & IDE_TFLAG_FS)
- scsi_req(cmd->rq)->result = 0;
-
- if (cmd->tf_flags & IDE_TFLAG_IO_16BIT)
- drive->io_32bit = 0;
-
- touch_softlockup_watchdog();
-
- if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
- nr_bytes = min_t(unsigned, cmd->nleft, drive->mult_count << 9);
- else
- nr_bytes = SECTOR_SIZE;
-
- ide_pio_bytes(drive, cmd, write, nr_bytes);
-
- drive->io_32bit = saved_io_32bit;
-}
-
-static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- if (cmd->tf_flags & IDE_TFLAG_FS) {
- int nr_bytes = cmd->nbytes - cmd->nleft;
-
- if (cmd->protocol == ATA_PROT_PIO &&
- ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) {
- if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
- nr_bytes -= drive->mult_count << 9;
- else
- nr_bytes -= SECTOR_SIZE;
- }
-
- if (nr_bytes > 0)
- ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
- }
-}
-
-void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
-{
- struct request *rq = drive->hwif->rq;
- u8 err = ide_read_error(drive), nsect = cmd->tf.nsect;
- u8 set_xfer = !!(cmd->tf_flags & IDE_TFLAG_SET_XFER);
-
- ide_complete_cmd(drive, cmd, stat, err);
- scsi_req(rq)->result = err;
-
- if (err == 0 && set_xfer) {
- ide_set_xfer_rate(drive, nsect);
- ide_driveid_update(drive);
- }
-
- ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
-}
-
-/*
- * Handler for command with PIO data phase.
- */
-static ide_startstop_t task_pio_intr(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &drive->hwif->cmd;
- u8 stat = hwif->tp_ops->read_status(hwif);
- u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
-
- if (write == 0) {
- /* Error? */
- if (stat & ATA_ERR)
- goto out_err;
-
- /* Didn't want any data? Odd. */
- if ((stat & ATA_DRQ) == 0) {
- /* Command all done? */
- if (OK_STAT(stat, ATA_DRDY, ATA_BUSY))
- goto out_end;
-
- /* Assume it was a spurious irq */
- goto out_wait;
- }
- } else {
- if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
- goto out_err;
-
- /* Deal with unexpected ATA data phase. */
- if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0))
- goto out_err;
- }
-
- if (write && cmd->nleft == 0)
- goto out_end;
-
- /* Still data left to transfer. */
- ide_pio_datablock(drive, cmd, write);
-
- /* Are we done? Check status and finish transfer. */
- if (write == 0 && cmd->nleft == 0) {
- stat = wait_drive_not_busy(drive);
- if (!OK_STAT(stat, 0, BAD_STAT))
- goto out_err;
-
- goto out_end;
- }
-out_wait:
- /* Still data left to transfer. */
- ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
- return ide_started;
-out_end:
- if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
- ide_finish_cmd(drive, cmd, stat);
- else
- ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9);
- return ide_stopped;
-out_err:
- ide_error_cmd(drive, cmd);
- return ide_error(drive, __func__, stat);
-}
-
-static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
- struct ide_cmd *cmd)
-{
- ide_startstop_t startstop;
-
- if (ide_wait_stat(&startstop, drive, ATA_DRQ,
- drive->bad_wstat, WAIT_DRQ)) {
- pr_err("%s: no DRQ after issuing %sWRITE%s\n", drive->name,
- (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "",
- (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
- return startstop;
- }
-
- if (!force_irqthreads && (drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
- local_irq_disable();
-
- ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-
- ide_pio_datablock(drive, cmd, 1);
-
- return ide_started;
-}
-
-int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
- u16 nsect)
-{
- struct request *rq;
- int error;
-
- rq = blk_get_request(drive->queue,
- (cmd->tf_flags & IDE_TFLAG_WRITE) ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_TASKFILE;
-
- /*
- * (ks) We transfer currently only whole sectors.
- * This is suffient for now. But, it would be great,
- * if we would find a solution to transfer any size.
- * To support special commands like READ LONG.
- */
- if (nsect) {
- error = blk_rq_map_kern(drive->queue, rq, buf,
- nsect * SECTOR_SIZE, GFP_NOIO);
- if (error)
- goto put_req;
- }
-
- ide_req(rq)->special = cmd;
- cmd->rq = rq;
-
- blk_execute_rq(NULL, rq, 0);
- error = scsi_req(rq)->result ? -EIO : 0;
-put_req:
- blk_put_request(rq);
- return error;
-}
-EXPORT_SYMBOL(ide_raw_taskfile);
-
-int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- cmd->protocol = ATA_PROT_NODATA;
-
- return ide_raw_taskfile(drive, cmd, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
-
-#ifdef CONFIG_IDE_TASK_IOCTL
-int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
-{
- ide_task_request_t *req_task;
- struct ide_cmd cmd;
- u8 *outbuf = NULL;
- u8 *inbuf = NULL;
- u8 *data_buf = NULL;
- int err = 0;
- int tasksize = sizeof(struct ide_task_request_s);
- unsigned int taskin = 0;
- unsigned int taskout = 0;
- u16 nsect = 0;
- char __user *buf = (char __user *)arg;
-
- req_task = memdup_user(buf, tasksize);
- if (IS_ERR(req_task))
- return PTR_ERR(req_task);
-
- taskout = req_task->out_size;
- taskin = req_task->in_size;
-
- if (taskin > 65536 || taskout > 65536) {
- err = -EINVAL;
- goto abort;
- }
-
- if (taskout) {
- int outtotal = tasksize;
- outbuf = kzalloc(taskout, GFP_KERNEL);
- if (outbuf == NULL) {
- err = -ENOMEM;
- goto abort;
- }
- if (copy_from_user(outbuf, buf + outtotal, taskout)) {
- err = -EFAULT;
- goto abort;
- }
- }
-
- if (taskin) {
- int intotal = tasksize + taskout;
- inbuf = kzalloc(taskin, GFP_KERNEL);
- if (inbuf == NULL) {
- err = -ENOMEM;
- goto abort;
- }
- if (copy_from_user(inbuf, buf + intotal, taskin)) {
- err = -EFAULT;
- goto abort;
- }
- }
-
- memset(&cmd, 0, sizeof(cmd));
-
- memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
- memcpy(&cmd.tf, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
-
- cmd.valid.out.tf = IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_DEVICE | IDE_VALID_IN_TF;
- cmd.tf_flags = IDE_TFLAG_IO_16BIT;
-
- if (drive->dev_flags & IDE_DFLAG_LBA48) {
- cmd.tf_flags |= IDE_TFLAG_LBA48;
- cmd.valid.in.hob = IDE_VALID_IN_HOB;
- }
-
- if (req_task->out_flags.all) {
- cmd.ftf_flags |= IDE_FTFLAG_FLAGGED;
-
- if (req_task->out_flags.b.data)
- cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA;
-
- if (req_task->out_flags.b.nsector_hob)
- cmd.valid.out.hob |= IDE_VALID_NSECT;
- if (req_task->out_flags.b.sector_hob)
- cmd.valid.out.hob |= IDE_VALID_LBAL;
- if (req_task->out_flags.b.lcyl_hob)
- cmd.valid.out.hob |= IDE_VALID_LBAM;
- if (req_task->out_flags.b.hcyl_hob)
- cmd.valid.out.hob |= IDE_VALID_LBAH;
-
- if (req_task->out_flags.b.error_feature)
- cmd.valid.out.tf |= IDE_VALID_FEATURE;
- if (req_task->out_flags.b.nsector)
- cmd.valid.out.tf |= IDE_VALID_NSECT;
- if (req_task->out_flags.b.sector)
- cmd.valid.out.tf |= IDE_VALID_LBAL;
- if (req_task->out_flags.b.lcyl)
- cmd.valid.out.tf |= IDE_VALID_LBAM;
- if (req_task->out_flags.b.hcyl)
- cmd.valid.out.tf |= IDE_VALID_LBAH;
- } else {
- cmd.valid.out.tf |= IDE_VALID_OUT_TF;
- if (cmd.tf_flags & IDE_TFLAG_LBA48)
- cmd.valid.out.hob |= IDE_VALID_OUT_HOB;
- }
-
- if (req_task->in_flags.b.data)
- cmd.ftf_flags |= IDE_FTFLAG_IN_DATA;
-
- if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) {
- /* fixup data phase if needed */
- if (req_task->data_phase == TASKFILE_IN_DMAQ ||
- req_task->data_phase == TASKFILE_IN_DMA)
- cmd.tf_flags |= IDE_TFLAG_WRITE;
- }
-
- cmd.protocol = ATA_PROT_DMA;
-
- switch (req_task->data_phase) {
- case TASKFILE_MULTI_OUT:
- if (!drive->mult_count) {
- /* (hs): give up if multcount is not set */
- pr_err("%s: %s Multimode Write multcount is not set\n",
- drive->name, __func__);
- err = -EPERM;
- goto abort;
- }
- cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
- fallthrough;
- case TASKFILE_OUT:
- cmd.protocol = ATA_PROT_PIO;
- fallthrough;
- case TASKFILE_OUT_DMAQ:
- case TASKFILE_OUT_DMA:
- cmd.tf_flags |= IDE_TFLAG_WRITE;
- nsect = taskout / SECTOR_SIZE;
- data_buf = outbuf;
- break;
- case TASKFILE_MULTI_IN:
- if (!drive->mult_count) {
- /* (hs): give up if multcount is not set */
- pr_err("%s: %s Multimode Read multcount is not set\n",
- drive->name, __func__);
- err = -EPERM;
- goto abort;
- }
- cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
- fallthrough;
- case TASKFILE_IN:
- cmd.protocol = ATA_PROT_PIO;
- fallthrough;
- case TASKFILE_IN_DMAQ:
- case TASKFILE_IN_DMA:
- nsect = taskin / SECTOR_SIZE;
- data_buf = inbuf;
- break;
- case TASKFILE_NO_DATA:
- cmd.protocol = ATA_PROT_NODATA;
- break;
- default:
- err = -EFAULT;
- goto abort;
- }
-
- if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
- nsect = 0;
- else if (!nsect) {
- nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect;
-
- if (!nsect) {
- pr_err("%s: in/out command without data\n",
- drive->name);
- err = -EFAULT;
- goto abort;
- }
- }
-
- err = ide_raw_taskfile(drive, &cmd, data_buf, nsect);
-
- memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2);
- memcpy(req_task->io_ports, &cmd.tf, HDIO_DRIVE_TASK_HDR_SIZE);
-
- if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) &&
- req_task->in_flags.all == 0) {
- req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
- if (drive->dev_flags & IDE_DFLAG_LBA48)
- req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
- }
-
- if (copy_to_user(buf, req_task, tasksize)) {
- err = -EFAULT;
- goto abort;
- }
- if (taskout) {
- int outtotal = tasksize;
- if (copy_to_user(buf + outtotal, outbuf, taskout)) {
- err = -EFAULT;
- goto abort;
- }
- }
- if (taskin) {
- int intotal = tasksize + taskout;
- if (copy_to_user(buf + intotal, inbuf, taskin)) {
- err = -EFAULT;
- goto abort;
- }
- }
-abort:
- kfree(req_task);
- kfree(outbuf);
- kfree(inbuf);
-
- return err;
-}
-#endif
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
deleted file mode 100644
index cfe78df74b7d..000000000000
--- a/drivers/ide/ide-timings.c
+++ /dev/null
@@ -1,198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 1999-2001 Vojtech Pavlik
- * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
- */
-
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-
-/*
- * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
- * These were taken from ATA/ATAPI-6 standard, rev 0a, except
- * for PIO 5, which is a nonstandard extension and UDMA6, which
- * is currently supported only by Maxtor drives.
- */
-
-static struct ide_timing ide_timing[] = {
-
- { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
- { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
- { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
- { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
-
- { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
- { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
- { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
-
- { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
- { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
- { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
- { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
- { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
-
- { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
- { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
- { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
-
- { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
- { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
- { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
- { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
-
- { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
- { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
- { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
-
- { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 },
-
- { 0xff }
-};
-
-struct ide_timing *ide_timing_find_mode(u8 speed)
-{
- struct ide_timing *t;
-
- for (t = ide_timing; t->mode != speed; t++)
- if (t->mode == 0xff)
- return NULL;
- return t;
-}
-EXPORT_SYMBOL_GPL(ide_timing_find_mode);
-
-u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
-{
- u16 *id = drive->id;
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- u16 cycle = 0;
-
- if (id[ATA_ID_FIELD_VALID] & 2) {
- if (ata_id_has_iordy(drive->id))
- cycle = id[ATA_ID_EIDE_PIO_IORDY];
- else
- cycle = id[ATA_ID_EIDE_PIO];
-
- /* conservative "downgrade" for all pre-ATA2 drives */
- if (pio < 3 && cycle < t->cycle)
- cycle = 0; /* use standard timing */
-
- /* Use the standard timing for the CF specific modes too */
- if (pio > 4 && ata_id_is_cfa(id))
- cycle = 0;
- }
-
- return cycle ? cycle : t->cycle;
-}
-EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
-
-#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
-#define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0)
-
-static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
- int T, int UT)
-{
- q->setup = EZ(t->setup, T);
- q->act8b = EZ(t->act8b, T);
- q->rec8b = EZ(t->rec8b, T);
- q->cyc8b = EZ(t->cyc8b, T);
- q->active = EZ(t->active, T);
- q->recover = EZ(t->recover, T);
- q->cycle = EZ(t->cycle, T);
- q->udma = EZ(t->udma, UT);
-}
-
-void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
- struct ide_timing *m, unsigned int what)
-{
- if (what & IDE_TIMING_SETUP)
- m->setup = max(a->setup, b->setup);
- if (what & IDE_TIMING_ACT8B)
- m->act8b = max(a->act8b, b->act8b);
- if (what & IDE_TIMING_REC8B)
- m->rec8b = max(a->rec8b, b->rec8b);
- if (what & IDE_TIMING_CYC8B)
- m->cyc8b = max(a->cyc8b, b->cyc8b);
- if (what & IDE_TIMING_ACTIVE)
- m->active = max(a->active, b->active);
- if (what & IDE_TIMING_RECOVER)
- m->recover = max(a->recover, b->recover);
- if (what & IDE_TIMING_CYCLE)
- m->cycle = max(a->cycle, b->cycle);
- if (what & IDE_TIMING_UDMA)
- m->udma = max(a->udma, b->udma);
-}
-EXPORT_SYMBOL_GPL(ide_timing_merge);
-
-int ide_timing_compute(ide_drive_t *drive, u8 speed,
- struct ide_timing *t, int T, int UT)
-{
- u16 *id = drive->id;
- struct ide_timing *s, p;
-
- /*
- * Find the mode.
- */
- s = ide_timing_find_mode(speed);
- if (s == NULL)
- return -EINVAL;
-
- /*
- * Copy the timing from the table.
- */
- *t = *s;
-
- /*
- * If the drive is an EIDE drive, it can tell us it needs extended
- * PIO/MWDMA cycle timing.
- */
- if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
- memset(&p, 0, sizeof(p));
-
- if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
- if (speed <= XFER_PIO_2)
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
- else if ((speed <= XFER_PIO_4) ||
- (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
- } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
- p.cycle = id[ATA_ID_EIDE_DMA_MIN];
-
- ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
- }
-
- /*
- * Convert the timing to bus clock counts.
- */
- ide_timing_quantize(t, t, T, UT);
-
- /*
- * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
- * S.M.A.R.T and some other commands. We have to ensure that the
- * DMA cycle timing is slower/equal than the current PIO timing.
- */
- if (speed >= XFER_SW_DMA_0) {
- ide_timing_compute(drive, drive->pio_mode, &p, T, UT);
- ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
- }
-
- /*
- * Lengthen active & recovery time so that cycle time is correct.
- */
- if (t->act8b + t->rec8b < t->cyc8b) {
- t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
- t->rec8b = t->cyc8b - t->act8b;
- }
-
- if (t->active + t->recover < t->cycle) {
- t->active += (t->cycle - (t->active + t->recover)) / 2;
- t->recover = t->cycle - t->active;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_timing_compute);
diff --git a/drivers/ide/ide-xfer-mode.c b/drivers/ide/ide-xfer-mode.c
deleted file mode 100644
index 0b9709b489b7..000000000000
--- a/drivers/ide/ide-xfer-mode.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/ide.h>
-#include <linux/bitops.h>
-
-static const char *udma_str[] =
- { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
- "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
-static const char *mwdma_str[] =
- { "MWDMA0", "MWDMA1", "MWDMA2", "MWDMA3", "MWDMA4" };
-static const char *swdma_str[] =
- { "SWDMA0", "SWDMA1", "SWDMA2" };
-static const char *pio_str[] =
- { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5", "PIO6" };
-
-/**
- * ide_xfer_verbose - return IDE mode names
- * @mode: transfer mode
- *
- * Returns a constant string giving the name of the mode
- * requested.
- */
-
-const char *ide_xfer_verbose(u8 mode)
-{
- const char *s;
- u8 i = mode & 0xf;
-
- if (mode >= XFER_UDMA_0 && mode <= XFER_UDMA_7)
- s = udma_str[i];
- else if (mode >= XFER_MW_DMA_0 && mode <= XFER_MW_DMA_4)
- s = mwdma_str[i];
- else if (mode >= XFER_SW_DMA_0 && mode <= XFER_SW_DMA_2)
- s = swdma_str[i];
- else if (mode >= XFER_PIO_0 && mode <= XFER_PIO_6)
- s = pio_str[i & 0x7];
- else if (mode == XFER_PIO_SLOW)
- s = "PIO SLOW";
- else
- s = "XFER ERROR";
-
- return s;
-}
-EXPORT_SYMBOL(ide_xfer_verbose);
-
-/**
- * ide_get_best_pio_mode - get PIO mode from drive
- * @drive: drive to consider
- * @mode_wanted: preferred mode
- * @max_mode: highest allowed mode
- *
- * This routine returns the recommended PIO settings for a given drive,
- * based on the drive->id information and the ide_pio_blacklist[].
- *
- * Drive PIO mode is auto-selected if 255 is passed as mode_wanted.
- * This is used by most chipset support modules when "auto-tuning".
- */
-
-static u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
-{
- u16 *id = drive->id;
- int pio_mode = -1, overridden = 0;
-
- if (mode_wanted != 255)
- return min_t(u8, mode_wanted, max_mode);
-
- if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0)
- pio_mode = ide_scan_pio_blacklist((char *)&id[ATA_ID_PROD]);
-
- if (pio_mode != -1) {
- printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name);
- } else {
- pio_mode = id[ATA_ID_OLD_PIO_MODES] >> 8;
- if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
- pio_mode = 2;
- overridden = 1;
- }
-
- if (id[ATA_ID_FIELD_VALID] & 2) { /* ATA2? */
- if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 7))
- pio_mode = 4 + min_t(int, 2,
- id[ATA_ID_CFA_MODES] & 7);
- else if (ata_id_has_iordy(id)) {
- if (id[ATA_ID_PIO_MODES] & 7) {
- overridden = 0;
- if (id[ATA_ID_PIO_MODES] & 4)
- pio_mode = 5;
- else if (id[ATA_ID_PIO_MODES] & 2)
- pio_mode = 4;
- else
- pio_mode = 3;
- }
- }
- }
-
- if (overridden)
- printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
- drive->name);
- }
-
- if (pio_mode > max_mode)
- pio_mode = max_mode;
-
- return pio_mode;
-}
-
-int ide_pio_need_iordy(ide_drive_t *drive, const u8 pio)
-{
- /*
- * IORDY may lead to controller lock up on certain controllers
- * if the port is not occupied.
- */
- if (pio == 0 && (drive->hwif->port_flags & IDE_PFLAG_PROBING))
- return 0;
- return ata_id_pio_need_iordy(drive->id, pio);
-}
-EXPORT_SYMBOL_GPL(ide_pio_need_iordy);
-
-int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
- return 0;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL)
- return -1;
-
- /*
- * TODO: temporary hack for some legacy host drivers that didn't
- * set transfer mode on the device in ->set_pio_mode method...
- */
- if (port_ops->set_dma_mode == NULL) {
- drive->pio_mode = mode;
- port_ops->set_pio_mode(hwif, drive);
- return 0;
- }
-
- if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
- if (ide_config_drive_speed(drive, mode))
- return -1;
- drive->pio_mode = mode;
- port_ops->set_pio_mode(hwif, drive);
- return 0;
- } else {
- drive->pio_mode = mode;
- port_ops->set_pio_mode(hwif, drive);
- return ide_config_drive_speed(drive, mode);
- }
-}
-
-int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
- return 0;
-
- if (port_ops == NULL || port_ops->set_dma_mode == NULL)
- return -1;
-
- if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
- if (ide_config_drive_speed(drive, mode))
- return -1;
- drive->dma_mode = mode;
- port_ops->set_dma_mode(hwif, drive);
- return 0;
- } else {
- drive->dma_mode = mode;
- port_ops->set_dma_mode(hwif, drive);
- return ide_config_drive_speed(drive, mode);
- }
-}
-EXPORT_SYMBOL_GPL(ide_set_dma_mode);
-
-/* req_pio == "255" for auto-tune */
-void ide_set_pio(ide_drive_t *drive, u8 req_pio)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
- u8 host_pio, pio;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return;
-
- BUG_ON(hwif->pio_mask == 0x00);
-
- host_pio = fls(hwif->pio_mask) - 1;
-
- pio = ide_get_best_pio_mode(drive, req_pio, host_pio);
-
- /*
- * TODO:
- * - report device max PIO mode
- * - check req_pio != 255 against device max PIO mode
- */
- printk(KERN_DEBUG "%s: host max PIO%d wanted PIO%d%s selected PIO%d\n",
- drive->name, host_pio, req_pio,
- req_pio == 255 ? "(auto-tune)" : "", pio);
-
- (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
-}
-EXPORT_SYMBOL_GPL(ide_set_pio);
-
-/**
- * ide_rate_filter - filter transfer mode
- * @drive: IDE device
- * @speed: desired speed
- *
- * Given the available transfer modes this function returns
- * the best available speed at or below the speed requested.
- *
- * TODO: check device PIO capabilities
- */
-
-static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 mode = ide_find_dma_mode(drive, speed);
-
- if (mode == 0) {
- if (hwif->pio_mask)
- mode = fls(hwif->pio_mask) - 1 + XFER_PIO_0;
- else
- mode = XFER_PIO_4;
- }
-
-/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
-
- return min(speed, mode);
-}
-
-/**
- * ide_set_xfer_rate - set transfer rate
- * @drive: drive to set
- * @rate: speed to attempt to set
- *
- * General helper for setting the speed of an IDE device. This
- * function knows about user enforced limits from the configuration
- * which ->set_pio_mode/->set_dma_mode does not.
- */
-
-int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return -1;
-
- rate = ide_rate_filter(drive, rate);
-
- BUG_ON(rate < XFER_PIO_0);
-
- if (rate >= XFER_PIO_0 && rate <= XFER_PIO_6)
- return ide_set_pio_mode(drive, rate);
-
- return ide_set_dma_mode(drive, rate);
-}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
deleted file mode 100644
index 9a9c64fd1032..000000000000
--- a/drivers/ide/ide.c
+++ /dev/null
@@ -1,415 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
- * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
- */
-
-/*
- * Mostly written by Mark Lord <mlord@pobox.com>
- * and Gadi Oxman <gadio@netvision.net.il>
- * and Andre Hedrick <andre@linux-ide.org>
- *
- * See linux/MAINTAINERS for address of current maintainer.
- *
- * This is the multiple IDE interface driver, as evolved from hd.c.
- * It supports up to MAX_HWIFS IDE interfaces, on one or more IRQs
- * (usually 14 & 15).
- * There can be up to two drives per interface, as per the ATA-2 spec.
- *
- * ...
- *
- * From hd.c:
- * |
- * | It traverses the request-list, using interrupts to jump between functions.
- * | As nearly all functions can be called within interrupts, we may not sleep.
- * | Special care is recommended. Have Fun!
- * |
- * | modified by Drew Eckhardt to check nr of hd's from the CMOS.
- * |
- * | Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
- * | in the early extended-partition checks and added DM partitions.
- * |
- * | Early work on error handling by Mika Liljeberg (liljeber@cs.Helsinki.FI).
- * |
- * | IRQ-unmask, drive-id, multiple-mode, support for ">16 heads",
- * | and general streamlining by Mark Lord (mlord@pobox.com).
- *
- * October, 1994 -- Complete line-by-line overhaul for linux 1.1.x, by:
- *
- * Mark Lord (mlord@pobox.com) (IDE Perf.Pkg)
- * Delman Lee (delman@ieee.org) ("Mr. atdisk2")
- * Scott Snyder (snyder@fnald0.fnal.gov) (ATAPI IDE cd-rom)
- *
- * This was a rewrite of just about everything from hd.c, though some original
- * code is still sprinkled about. Think of it as a major evolution, with
- * inspiration from lots of linux users, esp. hamish@zot.apana.org.au
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/hdreg.h>
-#include <linux/completion.h>
-#include <linux/device.h>
-
-struct class *ide_port_class;
-
-/**
- * ide_device_get - get an additional reference to a ide_drive_t
- * @drive: device to get a reference to
- *
- * Gets a reference to the ide_drive_t and increments the use count of the
- * underlying LLDD module.
- */
-int ide_device_get(ide_drive_t *drive)
-{
- struct device *host_dev;
- struct module *module;
-
- if (!get_device(&drive->gendev))
- return -ENXIO;
-
- host_dev = drive->hwif->host->dev[0];
- module = host_dev ? host_dev->driver->owner : NULL;
-
- if (module && !try_module_get(module)) {
- put_device(&drive->gendev);
- return -ENXIO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_device_get);
-
-/**
- * ide_device_put - release a reference to a ide_drive_t
- * @drive: device to release a reference on
- *
- * Release a reference to the ide_drive_t and decrements the use count of
- * the underlying LLDD module.
- */
-void ide_device_put(ide_drive_t *drive)
-{
-#ifdef CONFIG_MODULE_UNLOAD
- struct device *host_dev = drive->hwif->host->dev[0];
- struct module *module = host_dev ? host_dev->driver->owner : NULL;
-
- module_put(module);
-#endif
- put_device(&drive->gendev);
-}
-EXPORT_SYMBOL_GPL(ide_device_put);
-
-static int ide_bus_match(struct device *dev, struct device_driver *drv)
-{
- return 1;
-}
-
-static int ide_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- ide_drive_t *drive = to_ide_device(dev);
-
- add_uevent_var(env, "MEDIA=%s", ide_media_string(drive));
- add_uevent_var(env, "DRIVENAME=%s", drive->name);
- add_uevent_var(env, "MODALIAS=ide:m-%s", ide_media_string(drive));
- return 0;
-}
-
-static int generic_ide_probe(struct device *dev)
-{
- ide_drive_t *drive = to_ide_device(dev);
- struct ide_driver *drv = to_ide_driver(dev->driver);
-
- return drv->probe ? drv->probe(drive) : -ENODEV;
-}
-
-static int generic_ide_remove(struct device *dev)
-{
- ide_drive_t *drive = to_ide_device(dev);
- struct ide_driver *drv = to_ide_driver(dev->driver);
-
- if (drv->remove)
- drv->remove(drive);
-
- return 0;
-}
-
-static void generic_ide_shutdown(struct device *dev)
-{
- ide_drive_t *drive = to_ide_device(dev);
- struct ide_driver *drv = to_ide_driver(dev->driver);
-
- if (dev->driver && drv->shutdown)
- drv->shutdown(drive);
-}
-
-struct bus_type ide_bus_type = {
- .name = "ide",
- .match = ide_bus_match,
- .uevent = ide_uevent,
- .probe = generic_ide_probe,
- .remove = generic_ide_remove,
- .shutdown = generic_ide_shutdown,
- .dev_groups = ide_dev_groups,
- .suspend = generic_ide_suspend,
- .resume = generic_ide_resume,
-};
-
-EXPORT_SYMBOL_GPL(ide_bus_type);
-
-int ide_vlb_clk;
-EXPORT_SYMBOL_GPL(ide_vlb_clk);
-
-module_param_named(vlb_clock, ide_vlb_clk, int, 0);
-MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
-
-int ide_pci_clk;
-EXPORT_SYMBOL_GPL(ide_pci_clk);
-
-module_param_named(pci_clock, ide_pci_clk, int, 0);
-MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
-
-static int ide_set_dev_param_mask(const char *s, const struct kernel_param *kp)
-{
- unsigned int a, b, i, j = 1;
- unsigned int *dev_param_mask = (unsigned int *)kp->arg;
-
- /* controller . device (0 or 1) [ : 1 (set) | 0 (clear) ] */
- if (sscanf(s, "%u.%u:%u", &a, &b, &j) != 3 &&
- sscanf(s, "%u.%u", &a, &b) != 2)
- return -EINVAL;
-
- i = a * MAX_DRIVES + b;
-
- if (i >= MAX_HWIFS * MAX_DRIVES || j > 1)
- return -EINVAL;
-
- if (j)
- *dev_param_mask |= (1 << i);
- else
- *dev_param_mask &= ~(1 << i);
-
- return 0;
-}
-
-static const struct kernel_param_ops param_ops_ide_dev_mask = {
- .set = ide_set_dev_param_mask
-};
-
-#define param_check_ide_dev_mask(name, p) param_check_uint(name, p)
-
-static unsigned int ide_nodma;
-
-module_param_named(nodma, ide_nodma, ide_dev_mask, 0);
-MODULE_PARM_DESC(nodma, "disallow DMA for a device");
-
-static unsigned int ide_noflush;
-
-module_param_named(noflush, ide_noflush, ide_dev_mask, 0);
-MODULE_PARM_DESC(noflush, "disable flush requests for a device");
-
-static unsigned int ide_nohpa;
-
-module_param_named(nohpa, ide_nohpa, ide_dev_mask, 0);
-MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device");
-
-static unsigned int ide_noprobe;
-
-module_param_named(noprobe, ide_noprobe, ide_dev_mask, 0);
-MODULE_PARM_DESC(noprobe, "skip probing for a device");
-
-static unsigned int ide_nowerr;
-
-module_param_named(nowerr, ide_nowerr, ide_dev_mask, 0);
-MODULE_PARM_DESC(nowerr, "ignore the ATA_DF bit for a device");
-
-static unsigned int ide_cdroms;
-
-module_param_named(cdrom, ide_cdroms, ide_dev_mask, 0);
-MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
-
-struct chs_geom {
- unsigned int cyl;
- u8 head;
- u8 sect;
-};
-
-static unsigned int ide_disks;
-static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
-
-static int ide_set_disk_chs(const char *str, const struct kernel_param *kp)
-{
- unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1;
-
- /* controller . device (0 or 1) : Cylinders , Heads , Sectors */
- /* controller . device (0 or 1) : 1 (use CHS) | 0 (ignore CHS) */
- if (sscanf(str, "%u.%u:%u,%u,%u", &a, &b, &c, &h, &s) != 5 &&
- sscanf(str, "%u.%u:%u", &a, &b, &j) != 3)
- return -EINVAL;
-
- i = a * MAX_DRIVES + b;
-
- if (i >= MAX_HWIFS * MAX_DRIVES || j > 1)
- return -EINVAL;
-
- if (c > INT_MAX || h > 255 || s > 255)
- return -EINVAL;
-
- if (j)
- ide_disks |= (1 << i);
- else
- ide_disks &= ~(1 << i);
-
- ide_disks_chs[i].cyl = c;
- ide_disks_chs[i].head = h;
- ide_disks_chs[i].sect = s;
-
- return 0;
-}
-
-module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
-MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
-
-static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
-{
- int i = drive->hwif->index * MAX_DRIVES + unit;
-
- if (ide_nodma & (1 << i)) {
- printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
- drive->dev_flags |= IDE_DFLAG_NODMA;
- }
- if (ide_noflush & (1 << i)) {
- printk(KERN_INFO "ide: disabling flush requests for %s\n",
- drive->name);
- drive->dev_flags |= IDE_DFLAG_NOFLUSH;
- }
- if (ide_nohpa & (1 << i)) {
- printk(KERN_INFO "ide: disabling Host Protected Area for %s\n",
- drive->name);
- drive->dev_flags |= IDE_DFLAG_NOHPA;
- }
- if (ide_noprobe & (1 << i)) {
- printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
- drive->dev_flags |= IDE_DFLAG_NOPROBE;
- }
- if (ide_nowerr & (1 << i)) {
- printk(KERN_INFO "ide: ignoring the ATA_DF bit for %s\n",
- drive->name);
- drive->bad_wstat = BAD_R_STAT;
- }
- if (ide_cdroms & (1 << i)) {
- printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
- drive->dev_flags |= IDE_DFLAG_PRESENT;
- drive->media = ide_cdrom;
- /* an ATAPI device ignores DRDY */
- drive->ready_stat = 0;
- }
- if (ide_disks & (1 << i)) {
- drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl;
- drive->head = drive->bios_head = ide_disks_chs[i].head;
- drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
-
- printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
- drive->name,
- drive->cyl, drive->head, drive->sect);
-
- drive->dev_flags |= IDE_DFLAG_FORCED_GEOM | IDE_DFLAG_PRESENT;
- drive->media = ide_disk;
- drive->ready_stat = ATA_DRDY;
- }
-}
-
-static unsigned int ide_ignore_cable;
-
-static int ide_set_ignore_cable(const char *s, const struct kernel_param *kp)
-{
- int i, j = 1;
-
- /* controller (ignore) */
- /* controller : 1 (ignore) | 0 (use) */
- if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
- return -EINVAL;
-
- if (i >= MAX_HWIFS || j < 0 || j > 1)
- return -EINVAL;
-
- if (j)
- ide_ignore_cable |= (1 << i);
- else
- ide_ignore_cable &= ~(1 << i);
-
- return 0;
-}
-
-module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
-MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
-
-void ide_port_apply_params(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i;
-
- if (ide_ignore_cable & (1 << hwif->index)) {
- printk(KERN_INFO "ide: ignoring cable detection for %s\n",
- hwif->name);
- hwif->cbl = ATA_CBL_PATA40_SHORT;
- }
-
- ide_port_for_each_dev(i, drive, hwif)
- ide_dev_apply_params(drive, i);
-}
-
-/*
- * This is gets invoked once during initialization, to set *everything* up
- */
-static int __init ide_init(void)
-{
- int ret;
-
- printk(KERN_INFO "Uniform Multi-Platform E-IDE driver\n");
-
- ret = bus_register(&ide_bus_type);
- if (ret < 0) {
- printk(KERN_WARNING "IDE: bus_register error: %d\n", ret);
- return ret;
- }
-
- ide_port_class = class_create(THIS_MODULE, "ide_port");
- if (IS_ERR(ide_port_class)) {
- ret = PTR_ERR(ide_port_class);
- goto out_port_class;
- }
-
- ide_acpi_init();
-
- proc_ide_create();
-
- return 0;
-
-out_port_class:
- bus_unregister(&ide_bus_type);
-
- return ret;
-}
-
-static void __exit ide_exit(void)
-{
- proc_ide_destroy();
-
- class_destroy(ide_port_class);
-
- bus_unregister(&ide_bus_type);
-}
-
-module_init(ide_init);
-module_exit(ide_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
deleted file mode 100644
index 91639fd6c276..000000000000
--- a/drivers/ide/ide_platform.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Platform IDE driver
- *
- * Copyright (C) 2007 MontaVista Software
- *
- * Maintainer: Kumar Gala <galak@kernel.crashing.org>
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/ata_platform.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-static void plat_ide_setup_ports(struct ide_hw *hw, void __iomem *base,
- void __iomem *ctrl,
- struct pata_platform_info *pdata, int irq)
-{
- unsigned long port = (unsigned long)base;
- int i;
-
- hw->io_ports.data_addr = port;
-
- port += (1 << pdata->ioport_shift);
- for (i = 1; i <= 7;
- i++, port += (1 << pdata->ioport_shift))
- hw->io_ports_array[i] = port;
-
- hw->io_ports.ctl_addr = (unsigned long)ctrl;
-
- hw->irq = irq;
-}
-
-static const struct ide_port_info platform_ide_port_info = {
- .host_flags = IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-static int plat_ide_probe(struct platform_device *pdev)
-{
- struct resource *res_base, *res_alt, *res_irq;
- void __iomem *base, *alt_base;
- struct pata_platform_info *pdata;
- struct ide_host *host;
- int ret = 0, mmio = 0;
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_port_info d = platform_ide_port_info;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- /* get a pointer to the register memory */
- res_base = platform_get_resource(pdev, IORESOURCE_IO, 0);
- res_alt = platform_get_resource(pdev, IORESOURCE_IO, 1);
-
- if (!res_base || !res_alt) {
- res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res_alt = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_base || !res_alt) {
- ret = -ENOMEM;
- goto out;
- }
- mmio = 1;
- }
-
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq) {
- ret = -EINVAL;
- goto out;
- }
-
- if (mmio) {
- base = devm_ioremap(&pdev->dev,
- res_base->start, resource_size(res_base));
- alt_base = devm_ioremap(&pdev->dev,
- res_alt->start, resource_size(res_alt));
- } else {
- base = devm_ioport_map(&pdev->dev,
- res_base->start, resource_size(res_base));
- alt_base = devm_ioport_map(&pdev->dev,
- res_alt->start, resource_size(res_alt));
- }
-
- memset(&hw, 0, sizeof(hw));
- plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
- hw.dev = &pdev->dev;
-
- d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
- if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
- d.irq_flags |= IRQF_SHARED;
-
- if (mmio)
- d.host_flags |= IDE_HFLAG_MMIO;
-
- ret = ide_host_add(&d, hws, 1, &host);
- if (ret)
- goto out;
-
- platform_set_drvdata(pdev, host);
-
- return 0;
-
-out:
- return ret;
-}
-
-static int plat_ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = dev_get_drvdata(&pdev->dev);
-
- ide_host_remove(host);
-
- return 0;
-}
-
-static struct platform_driver platform_ide_driver = {
- .driver = {
- .name = "pata_platform",
- },
- .probe = plat_ide_probe,
- .remove = plat_ide_remove,
-};
-
-module_platform_driver(platform_ide_driver);
-
-MODULE_DESCRIPTION("Platform IDE driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pata_platform");
diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
deleted file mode 100644
index b6f674ab4fb7..000000000000
--- a/drivers/ide/it8172.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * IT8172 IDE controller support
- *
- * Copyright (C) 2000 MontaVista Software Inc.
- * Copyright (C) 2008 Shane McDonald
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "IT8172"
-
-static void it8172_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 drive_enables;
- u32 drive_timing;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /*
- * The highest value of DIOR/DIOW pulse width and recovery time
- * that can be set in the IT8172 is 8 PCI clock cycles. As a result,
- * it cannot be configured for PIO mode 0. This table sets these
- * parameters to the maximum supported by the IT8172.
- */
- static const u8 timings[] = { 0x3f, 0x3c, 0x1b, 0x12, 0x0a };
-
- pci_read_config_word(dev, 0x40, &drive_enables);
- pci_read_config_dword(dev, 0x44, &drive_timing);
-
- /*
- * Enable port 0x44. The IT8172 spec is confused; it calls
- * this register the "Slave IDE Timing Register", but in fact,
- * it controls timing for both master and slave drives.
- */
- drive_enables |= 0x4000;
-
- drive_enables &= drive->dn ? 0xc006 : 0xc060;
- if (drive->media == ide_disk)
- /* enable prefetch */
- drive_enables |= 0x0004 << (drive->dn * 4);
- if (ide_pio_need_iordy(drive, pio))
- /* enable IORDY sample-point */
- drive_enables |= 0x0002 << (drive->dn * 4);
-
- drive_timing &= drive->dn ? 0x00003f00 : 0x000fc000;
- drive_timing |= timings[pio] << (drive->dn * 6 + 8);
-
- pci_write_config_word(dev, 0x40, drive_enables);
- pci_write_config_dword(dev, 0x44, drive_timing);
-}
-
-static void it8172_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int a_speed = 3 << (drive->dn * 4);
- int u_flag = 1 << drive->dn;
- int u_speed = 0;
- u8 reg48, reg4a;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_byte(dev, 0x48, &reg48);
- pci_read_config_byte(dev, 0x4a, &reg4a);
-
- if (speed >= XFER_UDMA_0) {
- u8 udma = speed - XFER_UDMA_0;
- u_speed = udma << (drive->dn * 4);
-
- pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- reg4a &= ~a_speed;
- pci_write_config_byte(dev, 0x4a, reg4a | u_speed);
- } else {
- const u8 mwdma_to_pio[] = { 0, 3, 4 };
-
- pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
- pci_write_config_byte(dev, 0x4a, reg4a & ~a_speed);
-
- drive->pio_mode =
- mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
-
- it8172_set_pio_mode(hwif, drive);
- }
-}
-
-
-static const struct ide_port_ops it8172_port_ops = {
- .set_pio_mode = it8172_set_pio_mode,
- .set_dma_mode = it8172_set_dma_mode,
-};
-
-static const struct ide_port_info it8172_port_info = {
- .name = DRV_NAME,
- .port_ops = &it8172_port_ops,
- .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4 & ~ATA_PIO0,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
-};
-
-static int it8172_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
- return -ENODEV; /* IT8172 is more than an IDE controller */
- return ide_pci_init_one(dev, &it8172_port_info, NULL);
-}
-
-static struct pci_device_id it8172_pci_tbl[] = {
- { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8172), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, it8172_pci_tbl);
-
-static struct pci_driver it8172_pci_driver = {
- .name = "IT8172_IDE",
- .id_table = it8172_pci_tbl,
- .probe = it8172_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init it8172_ide_init(void)
-{
- return ide_pci_register_driver(&it8172_pci_driver);
-}
-
-static void __exit it8172_ide_exit(void)
-{
- pci_unregister_driver(&it8172_pci_driver);
-}
-
-module_init(it8172_ide_init);
-module_exit(it8172_ide_exit);
-
-MODULE_AUTHOR("Steve Longerbeam");
-MODULE_DESCRIPTION("PCI driver module for ITE 8172 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
deleted file mode 100644
index d0bf4430c437..000000000000
--- a/drivers/ide/it8213.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ITE 8213 IDE driver
- *
- * Copyright (C) 2006 Jack Lee
- * Copyright (C) 2006 Alan Cox
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "it8213"
-
-/**
- * it8213_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Set the interface PIO mode.
- */
-
-static void it8213_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int is_slave = drive->dn & 1;
- int master_port = 0x40;
- int slave_port = 0x44;
- unsigned long flags;
- u16 master_data;
- u8 slave_data;
- static DEFINE_SPINLOCK(tune_lock);
- int control = 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- static const u8 timings[][2] = {
- { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
- spin_lock_irqsave(&tune_lock, flags);
- pci_read_config_word(dev, master_port, &master_data);
-
- if (pio > 1)
- control |= 1; /* Programmable timing on */
- if (drive->media != ide_disk)
- control |= 4; /* ATAPI */
- if (ide_pio_need_iordy(drive, pio))
- control |= 2; /* IORDY */
- if (is_slave) {
- master_data |= 0x4000;
- master_data &= ~0x0070;
- if (pio > 1)
- master_data = master_data | (control << 4);
- pci_read_config_byte(dev, slave_port, &slave_data);
- slave_data = slave_data & 0xf0;
- slave_data = slave_data | (timings[pio][0] << 2) | timings[pio][1];
- } else {
- master_data &= ~0x3307;
- if (pio > 1)
- master_data = master_data | control;
- master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
- }
- pci_write_config_word(dev, master_port, master_data);
- if (is_slave)
- pci_write_config_byte(dev, slave_port, slave_data);
- spin_unlock_irqrestore(&tune_lock, flags);
-}
-
-/**
- * it8213_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Tune the ITE chipset for the DMA mode.
- */
-
-static void it8213_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 maslave = 0x40;
- int a_speed = 3 << (drive->dn * 4);
- int u_flag = 1 << drive->dn;
- int v_flag = 0x01 << drive->dn;
- int w_flag = 0x10 << drive->dn;
- int u_speed = 0;
- u16 reg4042, reg4a;
- u8 reg48, reg54, reg55;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_word(dev, maslave, &reg4042);
- pci_read_config_byte(dev, 0x48, &reg48);
- pci_read_config_word(dev, 0x4a, &reg4a);
- pci_read_config_byte(dev, 0x54, &reg54);
- pci_read_config_byte(dev, 0x55, &reg55);
-
- if (speed >= XFER_UDMA_0) {
- u8 udma = speed - XFER_UDMA_0;
-
- u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
-
- if (!(reg48 & u_flag))
- pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- if (speed >= XFER_UDMA_5)
- pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
- else
- pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
-
- if ((reg4a & a_speed) != u_speed)
- pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
- if (speed > XFER_UDMA_2) {
- if (!(reg54 & v_flag))
- pci_write_config_byte(dev, 0x54, reg54 | v_flag);
- } else
- pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
- } else {
- const u8 mwdma_to_pio[] = { 0, 3, 4 };
-
- if (reg48 & u_flag)
- pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
- if (reg54 & v_flag)
- pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
- if (reg55 & w_flag)
- pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
-
- if (speed >= XFER_MW_DMA_0)
- drive->pio_mode =
- mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
- else
- drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
-
- it8213_set_pio_mode(hwif, drive);
- }
-}
-
-static u8 it8213_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 reg42h = 0;
-
- pci_read_config_byte(dev, 0x42, &reg42h);
-
- return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static const struct ide_port_ops it8213_port_ops = {
- .set_pio_mode = it8213_set_pio_mode,
- .set_dma_mode = it8213_set_dma_mode,
- .cable_detect = it8213_cable_detect,
-};
-
-static const struct ide_port_info it8213_chipset = {
- .name = DRV_NAME,
- .enablebits = { {0x41, 0x80, 0x80} },
- .port_ops = &it8213_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .swdma_mask = ATA_SWDMA2_ONLY,
- .mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA6,
-};
-
-/**
- * it8213_init_one - pci layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds an ITE8213 controller. As
- * this device follows the standard interfaces we can use the
- * standard helper functions to do almost all the work for us.
- */
-
-static int it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &it8213_chipset, NULL);
-}
-
-static const struct pci_device_id it8213_pci_tbl[] = {
- { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), 0 },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
-
-static struct pci_driver it8213_pci_driver = {
- .name = "ITE8213_IDE",
- .id_table = it8213_pci_tbl,
- .probe = it8213_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init it8213_ide_init(void)
-{
- return ide_pci_register_driver(&it8213_pci_driver);
-}
-
-static void __exit it8213_ide_exit(void)
-{
- pci_unregister_driver(&it8213_pci_driver);
-}
-
-module_init(it8213_ide_init);
-module_exit(it8213_ide_exit);
-
-MODULE_AUTHOR("Jack Lee, Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for the ITE 8213");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
deleted file mode 100644
index 36a64c8ea575..000000000000
--- a/drivers/ide/it821x.c
+++ /dev/null
@@ -1,715 +0,0 @@
-/*
- * Copyright (C) 2004 Red Hat
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- * Based in part on the ITE vendor provided SCSI driver.
- *
- * Documentation:
- * Datasheet is freely available, some other documents under NDA.
- *
- * The ITE8212 isn't exactly a standard IDE controller. It has two
- * modes. In pass through mode then it is an IDE controller. In its smart
- * mode its actually quite a capable hardware raid controller disguised
- * as an IDE controller. Smart mode only understands DMA read/write and
- * identify, none of the fancier commands apply. The IT8211 is identical
- * in other respects but lacks the raid mode.
- *
- * Errata:
- * o Rev 0x10 also requires master/slave hold the same DMA timings and
- * cannot do ATAPI MWDMA.
- * o The identify data for raid volumes lacks CHS info (technically ok)
- * but also fails to set the LBA28 and other bits. We fix these in
- * the IDE probe quirk code.
- * o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
- * raid then the controller firmware dies
- * o Smart mode without RAID doesn't clear all the necessary identify
- * bits to reduce the command set to the one used
- *
- * This has a few impacts on the driver
- * - In pass through mode we do all the work you would expect
- * - In smart mode the clocking set up is done by the controller generally
- * but we must watch the other limits and filter.
- * - There are a few extra vendor commands that actually talk to the
- * controller but only work PIO with no IRQ.
- *
- * Vendor areas of the identify block in smart mode are used for the
- * timing and policy set up. Each HDD in raid mode also has a serial
- * block on the disk. The hardware extra commands are get/set chip status,
- * rebuild, get rebuild status.
- *
- * In Linux the driver supports pass through mode as if the device was
- * just another IDE controller. If the smart mode is running then
- * volumes are managed by the controller firmware and each IDE "disk"
- * is a raid volume. Even more cute - the controller can do automated
- * hotplug and rebuild.
- *
- * The pass through controller itself is a little demented. It has a
- * flaw that it has a single set of PIO/MWDMA timings per channel so
- * non UDMA devices restrict each others performance. It also has a
- * single clock source per channel so mixed UDMA100/133 performance
- * isn't perfect and we have to pick a clock. Thankfully none of this
- * matters in smart mode. ATAPI DMA is not currently supported.
- *
- * It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
- *
- * TODO
- * - ATAPI UDMA is ok but not MWDMA it seems
- * - RAID configuration ioctls
- * - Move to libata once it grows up
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "it821x"
-
-#define QUIRK_VORTEX86 1
-
-struct it821x_dev
-{
- unsigned int smart:1, /* Are we in smart raid mode */
- timing10:1; /* Rev 0x10 */
- u8 clock_mode; /* 0, ATA_50 or ATA_66 */
- u8 want[2][2]; /* Mode/Pri log for master slave */
- /* We need these for switching the clock when DMA goes on/off
- The high byte is the 66Mhz timing */
- u16 pio[2]; /* Cached PIO values */
- u16 mwdma[2]; /* Cached MWDMA values */
- u16 udma[2]; /* Cached UDMA values (per drive) */
- u16 quirks;
-};
-
-#define ATA_66 0
-#define ATA_50 1
-#define ATA_ANY 2
-
-#define UDMA_OFF 0
-#define MWDMA_OFF 0
-
-/*
- * We allow users to force the card into non raid mode without
- * flashing the alternative BIOS. This is also necessary right now
- * for embedded platforms that cannot run a PC BIOS but are using this
- * device.
- */
-
-static int it8212_noraid;
-
-/**
- * it821x_program - program the PIO/MWDMA registers
- * @drive: drive to tune
- * @timing: timing info
- *
- * Program the PIO/MWDMA timing for this channel according to the
- * current clock.
- */
-
-static void it821x_program(ide_drive_t *drive, u16 timing)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- int channel = hwif->channel;
- u8 conf;
-
- /* Program PIO/MWDMA timing bits */
- if(itdev->clock_mode == ATA_66)
- conf = timing >> 8;
- else
- conf = timing & 0xFF;
-
- pci_write_config_byte(dev, 0x54 + 4 * channel, conf);
-}
-
-/**
- * it821x_program_udma - program the UDMA registers
- * @drive: drive to tune
- * @timing: timing info
- *
- * Program the UDMA timing for this drive according to the
- * current clock.
- */
-
-static void it821x_program_udma(ide_drive_t *drive, u16 timing)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- int channel = hwif->channel;
- u8 unit = drive->dn & 1, conf;
-
- /* Program UDMA timing bits */
- if(itdev->clock_mode == ATA_66)
- conf = timing >> 8;
- else
- conf = timing & 0xFF;
-
- if (itdev->timing10 == 0)
- pci_write_config_byte(dev, 0x56 + 4 * channel + unit, conf);
- else {
- pci_write_config_byte(dev, 0x56 + 4 * channel, conf);
- pci_write_config_byte(dev, 0x56 + 4 * channel + 1, conf);
- }
-}
-
-/**
- * it821x_clock_strategy
- * @drive: drive to set up
- *
- * Select between the 50 and 66Mhz base clocks to get the best
- * results for this interface.
- */
-
-static void it821x_clock_strategy(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- int clock, altclock, sel = 0;
- u8 unit = drive->dn & 1, v;
-
- if(itdev->want[0][0] > itdev->want[1][0]) {
- clock = itdev->want[0][1];
- altclock = itdev->want[1][1];
- } else {
- clock = itdev->want[1][1];
- altclock = itdev->want[0][1];
- }
-
- /*
- * if both clocks can be used for the mode with the higher priority
- * use the clock needed by the mode with the lower priority
- */
- if (clock == ATA_ANY)
- clock = altclock;
-
- /* Nobody cares - keep the same clock */
- if(clock == ATA_ANY)
- return;
- /* No change */
- if(clock == itdev->clock_mode)
- return;
-
- /* Load this into the controller ? */
- if(clock == ATA_66)
- itdev->clock_mode = ATA_66;
- else {
- itdev->clock_mode = ATA_50;
- sel = 1;
- }
-
- pci_read_config_byte(dev, 0x50, &v);
- v &= ~(1 << (1 + hwif->channel));
- v |= sel << (1 + hwif->channel);
- pci_write_config_byte(dev, 0x50, v);
-
- /*
- * Reprogram the UDMA/PIO of the pair drive for the switch
- * MWDMA will be dealt with by the dma switcher
- */
- if(pair && itdev->udma[1-unit] != UDMA_OFF) {
- it821x_program_udma(pair, itdev->udma[1-unit]);
- it821x_program(pair, itdev->pio[1-unit]);
- }
- /*
- * Reprogram the UDMA/PIO of our drive for the switch.
- * MWDMA will be dealt with by the dma switcher
- */
- if(itdev->udma[unit] != UDMA_OFF) {
- it821x_program_udma(drive, itdev->udma[unit]);
- it821x_program(drive, itdev->pio[unit]);
- }
-}
-
-/**
- * it821x_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Tune the host to the desired PIO mode taking into the consideration
- * the maximum PIO mode supported by the other device on the cable.
- */
-
-static void it821x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 unit = drive->dn & 1, set_pio = pio;
-
- /* Spec says 89 ref driver uses 88 */
- static u16 pio_timings[]= { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
- static u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
-
- /*
- * Compute the best PIO mode we can for a given device. We must
- * pick a speed that does not cause problems with the other device
- * on the cable.
- */
- if (pair) {
- u8 pair_pio = pair->pio_mode - XFER_PIO_0;
- /* trim PIO to the slowest of the master/slave */
- if (pair_pio < set_pio)
- set_pio = pair_pio;
- }
-
- /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
- itdev->want[unit][1] = pio_want[set_pio];
- itdev->want[unit][0] = 1; /* PIO is lowest priority */
- itdev->pio[unit] = pio_timings[set_pio];
- it821x_clock_strategy(drive);
- it821x_program(drive, itdev->pio[unit]);
-}
-
-/**
- * it821x_tune_mwdma - tune a channel for MWDMA
- * @drive: drive to set up
- * @mode_wanted: the target operating mode
- *
- * Load the timing settings for this device mode into the
- * controller when doing MWDMA in pass through mode. The caller
- * must manage the whole lack of per device MWDMA/PIO timings and
- * the shared MWDMA/PIO timing register.
- */
-
-static void it821x_tune_mwdma(ide_drive_t *drive, u8 mode_wanted)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = (void *)ide_get_hwifdata(hwif);
- u8 unit = drive->dn & 1, channel = hwif->channel, conf;
-
- static u16 dma[] = { 0x8866, 0x3222, 0x3121 };
- static u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY };
-
- itdev->want[unit][1] = mwdma_want[mode_wanted];
- itdev->want[unit][0] = 2; /* MWDMA is low priority */
- itdev->mwdma[unit] = dma[mode_wanted];
- itdev->udma[unit] = UDMA_OFF;
-
- /* UDMA bits off - Revision 0x10 do them in pairs */
- pci_read_config_byte(dev, 0x50, &conf);
- if (itdev->timing10)
- conf |= channel ? 0x60: 0x18;
- else
- conf |= 1 << (3 + 2 * channel + unit);
- pci_write_config_byte(dev, 0x50, conf);
-
- it821x_clock_strategy(drive);
- /* FIXME: do we need to program this ? */
- /* it821x_program(drive, itdev->mwdma[unit]); */
-}
-
-/**
- * it821x_tune_udma - tune a channel for UDMA
- * @drive: drive to set up
- * @mode_wanted: the target operating mode
- *
- * Load the timing settings for this device mode into the
- * controller when doing UDMA modes in pass through.
- */
-
-static void it821x_tune_udma(ide_drive_t *drive, u8 mode_wanted)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- u8 unit = drive->dn & 1, channel = hwif->channel, conf;
-
- static u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
- static u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
-
- itdev->want[unit][1] = udma_want[mode_wanted];
- itdev->want[unit][0] = 3; /* UDMA is high priority */
- itdev->mwdma[unit] = MWDMA_OFF;
- itdev->udma[unit] = udma[mode_wanted];
- if(mode_wanted >= 5)
- itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */
-
- /* UDMA on. Again revision 0x10 must do the pair */
- pci_read_config_byte(dev, 0x50, &conf);
- if (itdev->timing10)
- conf &= channel ? 0x9F: 0xE7;
- else
- conf &= ~ (1 << (3 + 2 * channel + unit));
- pci_write_config_byte(dev, 0x50, conf);
-
- it821x_clock_strategy(drive);
- it821x_program_udma(drive, itdev->udma[unit]);
-
-}
-
-/**
- * it821x_dma_read - DMA hook
- * @drive: drive for DMA
- *
- * The IT821x has a single timing register for MWDMA and for PIO
- * operations. As we flip back and forth we have to reload the
- * clock. In addition the rev 0x10 device only works if the same
- * timing value is loaded into the master and slave UDMA clock
- * so we must also reload that.
- *
- * FIXME: we could figure out in advance if we need to do reloads
- */
-
-static void it821x_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- u8 unit = drive->dn & 1;
-
- if(itdev->mwdma[unit] != MWDMA_OFF)
- it821x_program(drive, itdev->mwdma[unit]);
- else if(itdev->udma[unit] != UDMA_OFF && itdev->timing10)
- it821x_program_udma(drive, itdev->udma[unit]);
- ide_dma_start(drive);
-}
-
-/**
- * it821x_dma_write - DMA hook
- * @drive: drive for DMA stop
- *
- * The IT821x has a single timing register for MWDMA and for PIO
- * operations. As we flip back and forth we have to reload the
- * clock.
- */
-
-static int it821x_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- int ret = ide_dma_end(drive);
- u8 unit = drive->dn & 1;
-
- if(itdev->mwdma[unit] != MWDMA_OFF)
- it821x_program(drive, itdev->pio[unit]);
- return ret;
-}
-
-/**
- * it821x_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Tune the ITE chipset for the desired DMA mode.
- */
-
-static void it821x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 speed = drive->dma_mode;
-
- /*
- * MWDMA tuning is really hard because our MWDMA and PIO
- * timings are kept in the same place. We can switch in the
- * host dma on/off callbacks.
- */
- if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_6)
- it821x_tune_udma(drive, speed - XFER_UDMA_0);
- else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
- it821x_tune_mwdma(drive, speed - XFER_MW_DMA_0);
-}
-
-/**
- * it821x_cable_detect - cable detection
- * @hwif: interface to check
- *
- * Check for the presence of an ATA66 capable cable on the
- * interface. Problematic as it seems some cards don't have
- * the needed logic onboard.
- */
-
-static u8 it821x_cable_detect(ide_hwif_t *hwif)
-{
- /* The reference driver also only does disk side */
- return ATA_CBL_PATA80;
-}
-
-/**
- * it821x_quirkproc - post init callback
- * @drive: drive
- *
- * This callback is run after the drive has been probed but
- * before anything gets attached. It allows drivers to do any
- * final tuning that is needed, or fixups to work around bugs.
- */
-
-static void it821x_quirkproc(ide_drive_t *drive)
-{
- struct it821x_dev *itdev = ide_get_hwifdata(drive->hwif);
- u16 *id = drive->id;
-
- if (!itdev->smart) {
- /*
- * If we are in pass through mode then not much
- * needs to be done, but we do bother to clear the
- * IRQ mask as we may well be in PIO (eg rev 0x10)
- * for now and we know unmasking is safe on this chipset.
- */
- drive->dev_flags |= IDE_DFLAG_UNMASK;
- } else {
- /*
- * Perform fixups on smart mode. We need to "lose" some
- * capabilities the firmware lacks but does not filter, and
- * also patch up some capability bits that it forgets to set
- * in RAID mode.
- */
-
- /* Check for RAID v native */
- if (strstr((char *)&id[ATA_ID_PROD],
- "Integrated Technology Express")) {
- /* In raid mode the ident block is slightly buggy
- We need to set the bits so that the IDE layer knows
- LBA28. LBA48 and DMA ar valid */
- id[ATA_ID_CAPABILITY] |= (3 << 8); /* LBA28, DMA */
- id[ATA_ID_COMMAND_SET_2] |= 0x0400; /* LBA48 valid */
- id[ATA_ID_CFS_ENABLE_2] |= 0x0400; /* LBA48 on */
- /* Reporting logic */
- printk(KERN_INFO "%s: IT8212 %sRAID %d volume",
- drive->name, id[147] ? "Bootable " : "",
- id[ATA_ID_CSFO]);
- if (id[ATA_ID_CSFO] != 1)
- printk(KERN_CONT "(%dK stripe)", id[146]);
- printk(KERN_CONT ".\n");
- } else {
- /* Non RAID volume. Fixups to stop the core code
- doing unsupported things */
- id[ATA_ID_FIELD_VALID] &= 3;
- id[ATA_ID_QUEUE_DEPTH] = 0;
- id[ATA_ID_COMMAND_SET_1] = 0;
- id[ATA_ID_COMMAND_SET_2] &= 0xC400;
- id[ATA_ID_CFSSE] &= 0xC000;
- id[ATA_ID_CFS_ENABLE_1] = 0;
- id[ATA_ID_CFS_ENABLE_2] &= 0xC400;
- id[ATA_ID_CSF_DEFAULT] &= 0xC000;
- id[127] = 0;
- id[ATA_ID_DLF] = 0;
- id[ATA_ID_CSFO] = 0;
- id[ATA_ID_CFA_POWER] = 0;
- printk(KERN_INFO "%s: Performing identify fixups.\n",
- drive->name);
- }
-
- /*
- * Set MWDMA0 mode as enabled/support - just to tell
- * IDE core that DMA is supported (it821x hardware
- * takes care of DMA mode programming).
- */
- if (ata_id_has_dma(id)) {
- id[ATA_ID_MWDMA_MODES] |= 0x0101;
- drive->current_speed = XFER_MW_DMA_0;
- }
- }
-
-}
-
-static const struct ide_dma_ops it821x_pass_through_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = it821x_dma_start,
- .dma_end = it821x_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-/**
- * init_hwif_it821x - set up hwif structs
- * @hwif: interface to set up
- *
- * We do the basic set up of the interface structure. The IT8212
- * requires several custom handlers so we override the default
- * ide DMA handlers appropriately
- */
-
-static void init_hwif_it821x(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- struct it821x_dev *itdevs = host->host_priv;
- struct it821x_dev *idev = itdevs + hwif->channel;
- u8 conf;
-
- ide_set_hwifdata(hwif, idev);
-
- pci_read_config_byte(dev, 0x50, &conf);
- if (conf & 1) {
- idev->smart = 1;
- hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
- /* Long I/O's although allowed in LBA48 space cause the
- onboard firmware to enter the twighlight zone */
- hwif->rqsize = 256;
- }
-
- /* Pull the current clocks from 0x50 also */
- if (conf & (1 << (1 + hwif->channel)))
- idev->clock_mode = ATA_50;
- else
- idev->clock_mode = ATA_66;
-
- idev->want[0][1] = ATA_ANY;
- idev->want[1][1] = ATA_ANY;
-
- /*
- * Not in the docs but according to the reference driver
- * this is necessary.
- */
-
- if (dev->revision == 0x10) {
- idev->timing10 = 1;
- hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
- if (idev->smart == 0)
- printk(KERN_WARNING DRV_NAME " %s: revision 0x10, "
- "workarounds activated\n", pci_name(dev));
- }
-
- if (idev->smart == 0) {
- /* MWDMA/PIO clock switching for pass through mode */
- hwif->dma_ops = &it821x_pass_through_dma_ops;
- } else
- hwif->host_flags |= IDE_HFLAG_NO_SET_MODE;
-
- if (hwif->dma_base == 0)
- return;
-
- hwif->ultra_mask = ATA_UDMA6;
- hwif->mwdma_mask = ATA_MWDMA2;
-
- /* Vortex86SX quirk: prevent Ultra-DMA mode to fix BadCRC issue */
- if (idev->quirks & QUIRK_VORTEX86) {
- if (dev->revision == 0x11)
- hwif->ultra_mask = 0;
- }
-}
-
-static void it8212_disable_raid(struct pci_dev *dev)
-{
- /* Reset local CPU, and set BIOS not ready */
- pci_write_config_byte(dev, 0x5E, 0x01);
-
- /* Set to bypass mode, and reset PCI bus */
- pci_write_config_byte(dev, 0x50, 0x00);
- pci_write_config_word(dev, PCI_COMMAND,
- PCI_COMMAND_PARITY | PCI_COMMAND_IO |
- PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
- pci_write_config_word(dev, 0x40, 0xA0F3);
-
- pci_write_config_dword(dev,0x4C, 0x02040204);
- pci_write_config_byte(dev, 0x42, 0x36);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
-}
-
-static int init_chipset_it821x(struct pci_dev *dev)
-{
- u8 conf;
- static char *mode[2] = { "pass through", "smart" };
-
- /* Force the card into bypass mode if so requested */
- if (it8212_noraid) {
- printk(KERN_INFO DRV_NAME " %s: forcing bypass mode\n",
- pci_name(dev));
- it8212_disable_raid(dev);
- }
- pci_read_config_byte(dev, 0x50, &conf);
- printk(KERN_INFO DRV_NAME " %s: controller in %s mode\n",
- pci_name(dev), mode[conf & 1]);
- return 0;
-}
-
-static const struct ide_port_ops it821x_port_ops = {
- /* it821x_set_{pio,dma}_mode() are only used in pass-through mode */
- .set_pio_mode = it821x_set_pio_mode,
- .set_dma_mode = it821x_set_dma_mode,
- .quirkproc = it821x_quirkproc,
- .cable_detect = it821x_cable_detect,
-};
-
-static const struct ide_port_info it821x_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_it821x,
- .init_hwif = init_hwif_it821x,
- .port_ops = &it821x_port_ops,
- .pio_mask = ATA_PIO4,
-};
-
-/**
- * it821x_init_one - pci layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds an ITE821x controller.
- * We then use the IDE PCI generic helper to do most of the work.
- */
-
-static int it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct it821x_dev *itdevs;
- int rc;
-
- itdevs = kcalloc(2, sizeof(*itdevs), GFP_KERNEL);
- if (itdevs == NULL) {
- printk(KERN_ERR DRV_NAME " %s: out of memory\n", pci_name(dev));
- return -ENOMEM;
- }
-
- itdevs->quirks = id->driver_data;
-
- rc = ide_pci_init_one(dev, &it821x_chipset, itdevs);
- if (rc)
- kfree(itdevs);
-
- return rc;
-}
-
-static void it821x_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct it821x_dev *itdevs = host->host_priv;
-
- ide_pci_remove(dev);
- kfree(itdevs);
-}
-
-static const struct pci_device_id it821x_pci_tbl[] = {
- { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), 0 },
- { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), 0 },
- { PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), QUIRK_VORTEX86 },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, it821x_pci_tbl);
-
-static struct pci_driver it821x_pci_driver = {
- .name = "ITE821x IDE",
- .id_table = it821x_pci_tbl,
- .probe = it821x_init_one,
- .remove = it821x_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init it821x_ide_init(void)
-{
- return ide_pci_register_driver(&it821x_pci_driver);
-}
-
-static void __exit it821x_ide_exit(void)
-{
- pci_unregister_driver(&it821x_pci_driver);
-}
-
-module_init(it821x_ide_init);
-module_exit(it821x_ide_exit);
-
-module_param_named(noraid, it8212_noraid, int, S_IRUGO);
-MODULE_PARM_DESC(noraid, "Force card into bypass mode");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for the ITE 821x");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
deleted file mode 100644
index ae6480dcbadf..000000000000
--- a/drivers/ide/jmicron.c
+++ /dev/null
@@ -1,176 +0,0 @@
-
-/*
- * Copyright (C) 2006 Red Hat
- *
- * May be copied or modified under the terms of the GNU General Public License
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "jmicron"
-
-typedef enum {
- PORT_PATA0 = 0,
- PORT_PATA1 = 1,
- PORT_SATA = 2,
-} port_type;
-
-/**
- * jmicron_cable_detect - cable detection
- * @hwif: IDE port
- *
- * Returns the cable type.
- */
-
-static u8 jmicron_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
-
- u32 control;
- u32 control5;
-
- int port = hwif->channel;
- port_type port_map[2];
-
- pci_read_config_dword(pdev, 0x40, &control);
-
- /* There are two basic mappings. One has the two SATA ports merged
- as master/slave and the secondary as PATA, the other has only the
- SATA port mapped */
- if (control & (1 << 23)) {
- port_map[0] = PORT_SATA;
- port_map[1] = PORT_PATA0;
- } else {
- port_map[0] = PORT_SATA;
- port_map[1] = PORT_SATA;
- }
-
- /* The 365/366 may have this bit set to map the second PATA port
- as the internal primary channel */
- pci_read_config_dword(pdev, 0x80, &control5);
- if (control5 & (1<<24))
- port_map[0] = PORT_PATA1;
-
- /* The two ports may then be logically swapped by the firmware */
- if (control & (1 << 22))
- port = port ^ 1;
-
- /*
- * Now we know which physical port we are talking about we can
- * actually do our cable checking etc. Thankfully we don't need
- * to do the plumbing for other cases.
- */
- switch (port_map[port]) {
- case PORT_PATA0:
- if (control & (1 << 3)) /* 40/80 pin primary */
- return ATA_CBL_PATA40;
- return ATA_CBL_PATA80;
- case PORT_PATA1:
- if (control5 & (1 << 19)) /* 40/80 pin secondary */
- return ATA_CBL_PATA40;
- return ATA_CBL_PATA80;
- case PORT_SATA:
- break;
- }
- /* Avoid bogus "control reaches end of non-void function" */
- return ATA_CBL_PATA80;
-}
-
-static void jmicron_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
-}
-
-/**
- * jmicron_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * As the JMicron snoops for timings we don't need to do anything here.
- */
-
-static void jmicron_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
-}
-
-static const struct ide_port_ops jmicron_port_ops = {
- .set_pio_mode = jmicron_set_pio_mode,
- .set_dma_mode = jmicron_set_dma_mode,
- .cable_detect = jmicron_cable_detect,
-};
-
-static const struct ide_port_info jmicron_chipset = {
- .name = DRV_NAME,
- .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
- .port_ops = &jmicron_port_ops,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
-};
-
-/**
- * jmicron_init_one - pci layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds a Jmicron controller.
- * We then use the IDE PCI generic helper to do most of the work.
- */
-
-static int jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &jmicron_chipset, NULL);
-}
-
-/* All JMB PATA controllers have and will continue to have the same
- * interface. Matching vendor and device class is enough for all
- * current and future controllers if the controller is programmed
- * properly.
- *
- * If libata is configured, jmicron PCI quirk programs the controller
- * into the correct mode. If libata isn't configured, match known
- * device IDs too to maintain backward compatibility.
- */
-static struct pci_device_id jmicron_pci_tbl[] = {
-#if !defined(CONFIG_ATA) && !defined(CONFIG_ATA_MODULE)
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361) },
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363) },
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB365) },
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB366) },
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB368) },
-#endif
- { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
-
-static struct pci_driver jmicron_pci_driver = {
- .name = "JMicron IDE",
- .id_table = jmicron_pci_tbl,
- .probe = jmicron_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init jmicron_ide_init(void)
-{
- return ide_pci_register_driver(&jmicron_pci_driver);
-}
-
-static void __exit jmicron_ide_exit(void)
-{
- pci_unregister_driver(&jmicron_pci_driver);
-}
-
-module_init(jmicron_ide_init);
-module_exit(jmicron_ide_exit);
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for the JMicron in legacy modes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c
deleted file mode 100644
index 8d2bf73bc548..000000000000
--- a/drivers/ide/macide.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Macintosh IDE Driver
- *
- * Copyright (C) 1998 by Michael Schmitz
- *
- * This driver was written based on information obtained from the MacOS IDE
- * driver binary by Mikael Forselius
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <asm/macintosh.h>
-
-#define DRV_NAME "mac_ide"
-
-#define IDE_BASE 0x50F1A000 /* Base address of IDE controller */
-
-/*
- * Generic IDE registers as offsets from the base
- * These match MkLinux so they should be correct.
- */
-
-#define IDE_CONTROL 0x38 /* control/altstatus */
-
-/*
- * Mac-specific registers
- */
-
-/*
- * this register is odd; it doesn't seem to do much and it's
- * not word-aligned like virtually every other hardware register
- * on the Mac...
- */
-
-#define IDE_IFR 0x101 /* (0x101) IDE interrupt flags on Quadra:
- *
- * Bit 0+1: some interrupt flags
- * Bit 2+3: some interrupt enable
- * Bit 4: ??
- * Bit 5: IDE interrupt flag (any hwif)
- * Bit 6: maybe IDE interrupt enable (any hwif) ??
- * Bit 7: Any interrupt condition
- */
-
-volatile unsigned char *ide_ifr = (unsigned char *) (IDE_BASE + IDE_IFR);
-
-int macide_test_irq(ide_hwif_t *hwif)
-{
- if (*ide_ifr & 0x20)
- return 1;
- return 0;
-}
-
-static void macide_clear_irq(ide_drive_t *drive)
-{
- *ide_ifr &= ~0x20;
-}
-
-static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base,
- int irq)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
-
- for (i = 0; i < 8; i++)
- hw->io_ports_array[i] = base + i * 4;
-
- hw->io_ports.ctl_addr = base + IDE_CONTROL;
-
- hw->irq = irq;
-}
-
-static const struct ide_port_ops macide_port_ops = {
- .clear_irq = macide_clear_irq,
- .test_irq = macide_test_irq,
-};
-
-static const struct ide_port_info macide_port_info = {
- .port_ops = &macide_port_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
-static const char *mac_ide_name[] =
- { "Quadra", "Powerbook", "Powerbook Baboon" };
-
-/*
- * Probe for a Macintosh IDE interface
- */
-
-static int mac_ide_probe(struct platform_device *pdev)
-{
- struct resource *mem, *irq;
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_port_info d = macide_port_info;
- struct ide_host *host;
- int rc;
-
- if (!MACH_IS_MAC)
- return -ENODEV;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENODEV;
-
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem), DRV_NAME)) {
- dev_err(&pdev->dev, "resources busy\n");
- return -EBUSY;
- }
-
- printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
- mac_ide_name[macintosh_config->ide_type - 1]);
-
- macide_setup_ports(&hw, mem->start, irq->start);
-
- rc = ide_host_add(&d, hws, 1, &host);
- if (rc)
- return rc;
-
- platform_set_drvdata(pdev, host);
- return 0;
-}
-
-static int mac_ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
- return 0;
-}
-
-static struct platform_driver mac_ide_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .probe = mac_ide_probe,
- .remove = mac_ide_remove,
-};
-
-module_platform_driver(mac_ide_driver);
-
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
deleted file mode 100644
index 11a672aba6ee..000000000000
--- a/drivers/ide/ns87415.c
+++ /dev/null
@@ -1,350 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1997-1998 Mark Lord <mlord@pobox.com>
- * Copyright (C) 1998 Eddie C. Dost <ecd@skynet.be>
- * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2004 Grant Grundler <grundler at parisc-linux.org>
- *
- * Inspired by an earlier effort from David S. Miller <davem@redhat.com>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "ns87415"
-
-#ifdef CONFIG_SUPERIO
-/* SUPERIO 87560 is a PoS chip that NatSem denies exists.
- * Unfortunately, it's built-in on all Astro-based PA-RISC workstations
- * which use the integrated NS87514 cell for CD-ROM support.
- * i.e we have to support for CD-ROM installs.
- * See drivers/parisc/superio.c for more gory details.
- */
-#include <asm/superio.h>
-
-#define SUPERIO_IDE_MAX_RETRIES 25
-
-/* Because of a defect in Super I/O, all reads of the PCI DMA status
- * registers, IDE status register and the IDE select register need to be
- * retried
- */
-static u8 superio_ide_inb (unsigned long port)
-{
- u8 tmp;
- int retries = SUPERIO_IDE_MAX_RETRIES;
-
- /* printk(" [ reading port 0x%x with retry ] ", port); */
-
- do {
- tmp = inb(port);
- if (tmp == 0)
- udelay(50);
- } while (tmp == 0 && retries-- > 0);
-
- return tmp;
-}
-
-static u8 superio_read_status(ide_hwif_t *hwif)
-{
- return superio_ide_inb(hwif->io_ports.status_addr);
-}
-
-static u8 superio_dma_sff_read_status(ide_hwif_t *hwif)
-{
- return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS);
-}
-
-static void superio_tf_read(ide_drive_t *drive, struct ide_taskfile *tf,
- u8 valid)
-{
- struct ide_io_ports *io_ports = &drive->hwif->io_ports;
-
- if (valid & IDE_VALID_ERROR)
- tf->error = inb(io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- tf->nsect = inb(io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- tf->lbal = inb(io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- tf->lbam = inb(io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- tf->lbah = inb(io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- tf->device = superio_ide_inb(io_ports->device_addr);
-}
-
-static void ns87415_dev_select(ide_drive_t *drive);
-
-static const struct ide_tp_ops superio_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = superio_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ns87415_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = superio_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static void superio_init_iops(struct hwif_s *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- u32 dma_stat;
- u8 port = hwif->channel, tmp;
-
- dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa);
-
- /* Clear error/interrupt, enable dma */
- tmp = superio_ide_inb(dma_stat);
- outb(tmp | 0x66, dma_stat);
-}
-#else
-#define superio_dma_sff_read_status ide_dma_sff_read_status
-#endif
-
-static unsigned int ns87415_count = 0, ns87415_control[MAX_HWIFS] = { 0 };
-
-/*
- * This routine either enables/disables (according to IDE_DFLAG_PRESENT)
- * the IRQ associated with the port,
- * and selects either PIO or DMA handshaking for the next I/O operation.
- */
-static void ns87415_prepare_drive (ide_drive_t *drive, unsigned int use_dma)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int bit, other, new, *old = (unsigned int *) hwif->select_data;
- unsigned long flags;
-
- local_irq_save(flags);
- new = *old;
-
- /* Adjust IRQ enable bit */
- bit = 1 << (8 + hwif->channel);
-
- if (drive->dev_flags & IDE_DFLAG_PRESENT)
- new &= ~bit;
- else
- new |= bit;
-
- /* Select PIO or DMA, DMA may only be selected for one drive/channel. */
- bit = 1 << (20 + (drive->dn & 1) + (hwif->channel << 1));
- other = 1 << (20 + (1 - (drive->dn & 1)) + (hwif->channel << 1));
- new = use_dma ? ((new & ~other) | bit) : (new & ~bit);
-
- if (new != *old) {
- unsigned char stat;
-
- /*
- * Don't change DMA engine settings while Write Buffers
- * are busy.
- */
- (void) pci_read_config_byte(dev, 0x43, &stat);
- while (stat & 0x03) {
- udelay(1);
- (void) pci_read_config_byte(dev, 0x43, &stat);
- }
-
- *old = new;
- (void) pci_write_config_dword(dev, 0x40, new);
-
- /*
- * And let things settle...
- */
- udelay(10);
- }
-
- local_irq_restore(flags);
-}
-
-static void ns87415_dev_select(ide_drive_t *drive)
-{
- ns87415_prepare_drive(drive,
- !!(drive->dev_flags & IDE_DFLAG_USING_DMA));
-
- outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr);
-}
-
-static void ns87415_dma_start(ide_drive_t *drive)
-{
- ns87415_prepare_drive(drive, 1);
- ide_dma_start(drive);
-}
-
-static int ns87415_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = 0, dma_cmd = 0;
-
- dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
- /* get DMA command mode */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- /* stop DMA */
- outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
- /* from ERRATA: clear the INTR & ERROR bits */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD);
-
- ns87415_prepare_drive(drive, 0);
-
- /* verify good DMA status */
- return (dma_stat & 7) != 4;
-}
-
-static void init_hwif_ns87415 (ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int ctrl, using_inta;
- u8 progif;
-#ifdef __sparc_v9__
- int timeout;
- u8 stat;
-#endif
-
- /*
- * We cannot probe for IRQ: both ports share common IRQ on INTA.
- * Also, leave IRQ masked during drive probing, to prevent infinite
- * interrupts from a potentially floating INTA..
- *
- * IRQs get unmasked in dev_select() when drive is first used.
- */
- (void) pci_read_config_dword(dev, 0x40, &ctrl);
- (void) pci_read_config_byte(dev, 0x09, &progif);
- /* is irq in "native" mode? */
- using_inta = progif & (1 << (hwif->channel << 1));
- if (!using_inta)
- using_inta = ctrl & (1 << (4 + hwif->channel));
- if (hwif->mate) {
- hwif->select_data = hwif->mate->select_data;
- } else {
- hwif->select_data = (unsigned long)
- &ns87415_control[ns87415_count++];
- ctrl |= (1 << 8) | (1 << 9); /* mask both IRQs */
- if (using_inta)
- ctrl &= ~(1 << 6); /* unmask INTA */
- *((unsigned int *)hwif->select_data) = ctrl;
- (void) pci_write_config_dword(dev, 0x40, ctrl);
-
- /*
- * Set prefetch size to 512 bytes for both ports,
- * but don't turn on/off prefetching here.
- */
- pci_write_config_byte(dev, 0x55, 0xee);
-
-#ifdef __sparc_v9__
- /*
- * XXX: Reset the device, if we don't it will not respond to
- * dev_select() properly during first ide_probe_port().
- */
- timeout = 10000;
- outb(12, hwif->io_ports.ctl_addr);
- udelay(10);
- outb(8, hwif->io_ports.ctl_addr);
- do {
- udelay(50);
- stat = hwif->tp_ops->read_status(hwif);
- if (stat == 0xff)
- break;
- } while ((stat & ATA_BUSY) && --timeout);
-#endif
- }
-
- if (!using_inta)
- hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel);
-
- if (!hwif->dma_base)
- return;
-
- outb(0x60, hwif->dma_base + ATA_DMA_STATUS);
-}
-
-static const struct ide_tp_ops ns87415_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ns87415_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_dma_ops ns87415_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ns87415_dma_start,
- .dma_end = ns87415_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = superio_dma_sff_read_status,
-};
-
-static const struct ide_port_info ns87415_chipset = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_ns87415,
- .tp_ops = &ns87415_tp_ops,
- .dma_ops = &ns87415_dma_ops,
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_NO_ATAPI_DMA,
-};
-
-static int ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d = ns87415_chipset;
-
-#ifdef CONFIG_SUPERIO
- if (PCI_SLOT(dev->devfn) == 0xE) {
- /* Built-in - assume it's under superio. */
- d.init_iops = superio_init_iops;
- d.tp_ops = &superio_tp_ops;
- }
-#endif
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id ns87415_pci_tbl[] = {
- { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl);
-
-static struct pci_driver ns87415_pci_driver = {
- .name = "NS87415_IDE",
- .id_table = ns87415_pci_tbl,
- .probe = ns87415_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init ns87415_ide_init(void)
-{
- return ide_pci_register_driver(&ns87415_pci_driver);
-}
-
-static void __exit ns87415_ide_exit(void)
-{
- pci_unregister_driver(&ns87415_pci_driver);
-}
-
-module_init(ns87415_ide_init);
-module_exit(ns87415_ide_exit);
-
-MODULE_AUTHOR("Mark Lord, Eddie Dost, Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for NS87415 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
deleted file mode 100644
index c374f82333c6..000000000000
--- a/drivers/ide/opti621.c
+++ /dev/null
@@ -1,179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1996-1998 Linus Torvalds & authors (see below)
- */
-
-/*
- * Authors:
- * Jaromir Koutek <miri@punknet.cz>,
- * Jan Harkes <jaharkes@cwi.nl>,
- * Mark Lord <mlord@pobox.com>
- * Some parts of code are from ali14xx.c and from rz1000.c.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "opti621"
-
-#define READ_REG 0 /* index of Read cycle timing register */
-#define WRITE_REG 1 /* index of Write cycle timing register */
-#define CNTRL_REG 3 /* index of Control register */
-#define STRAP_REG 5 /* index of Strap register */
-#define MISC_REG 6 /* index of Miscellaneous register */
-
-static int reg_base;
-
-static DEFINE_SPINLOCK(opti621_lock);
-
-/* Write value to register reg, base of register
- * is at reg_base (0x1f0 primary, 0x170 secondary,
- * if not changed by PCI configuration).
- * This is from setupvic.exe program.
- */
-static void write_reg(u8 value, int reg)
-{
- inw(reg_base + 1);
- inw(reg_base + 1);
- outb(3, reg_base + 2);
- outb(value, reg_base + reg);
- outb(0x83, reg_base + 2);
-}
-
-/* Read value from register reg, base of register
- * is at reg_base (0x1f0 primary, 0x170 secondary,
- * if not changed by PCI configuration).
- * This is from setupvic.exe program.
- */
-static u8 read_reg(int reg)
-{
- u8 ret = 0;
-
- inw(reg_base + 1);
- inw(reg_base + 1);
- outb(3, reg_base + 2);
- ret = inb(reg_base + reg);
- outb(0x83, reg_base + 2);
-
- return ret;
-}
-
-static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- ide_drive_t *pair = ide_get_pair_dev(drive);
- unsigned long flags;
- unsigned long mode = drive->pio_mode, pair_mode;
- const u8 pio = mode - XFER_PIO_0;
- u8 tim, misc, addr_pio = pio, clk;
-
- /* DRDY is default 2 (by OPTi Databook) */
- static const u8 addr_timings[2][5] = {
- { 0x20, 0x10, 0x00, 0x00, 0x00 }, /* 33 MHz */
- { 0x10, 0x10, 0x00, 0x00, 0x00 }, /* 25 MHz */
- };
- static const u8 data_rec_timings[2][5] = {
- { 0x5b, 0x45, 0x32, 0x21, 0x20 }, /* 33 MHz */
- { 0x48, 0x34, 0x21, 0x10, 0x10 } /* 25 MHz */
- };
-
- ide_set_drivedata(drive, (void *)mode);
-
- if (pair) {
- pair_mode = (unsigned long)ide_get_drivedata(pair);
- if (pair_mode && pair_mode < mode)
- addr_pio = pair_mode - XFER_PIO_0;
- }
-
- spin_lock_irqsave(&opti621_lock, flags);
-
- reg_base = hwif->io_ports.data_addr;
-
- /* allow Register-B */
- outb(0xc0, reg_base + CNTRL_REG);
- /* hmm, setupvic.exe does this ;-) */
- outb(0xff, reg_base + 5);
- /* if reads 0xff, adapter not exist? */
- (void)inb(reg_base + CNTRL_REG);
- /* if reads 0xc0, no interface exist? */
- read_reg(CNTRL_REG);
-
- /* check CLK speed */
- clk = read_reg(STRAP_REG) & 1;
-
- printk(KERN_INFO "%s: CLK = %d MHz\n", hwif->name, clk ? 25 : 33);
-
- tim = data_rec_timings[clk][pio];
- misc = addr_timings[clk][addr_pio];
-
- /* select Index-0/1 for Register-A/B */
- write_reg(drive->dn & 1, MISC_REG);
- /* set read cycle timings */
- write_reg(tim, READ_REG);
- /* set write cycle timings */
- write_reg(tim, WRITE_REG);
-
- /* use Register-A for drive 0 */
- /* use Register-B for drive 1 */
- write_reg(0x85, CNTRL_REG);
-
- /* set address setup, DRDY timings, */
- /* and read prefetch for both drives */
- write_reg(misc, MISC_REG);
-
- spin_unlock_irqrestore(&opti621_lock, flags);
-}
-
-static const struct ide_port_ops opti621_port_ops = {
- .set_pio_mode = opti621_set_pio_mode,
-};
-
-static const struct ide_port_info opti621_chipset = {
- .name = DRV_NAME,
- .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
- .port_ops = &opti621_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
-};
-
-static int opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &opti621_chipset, NULL);
-}
-
-static const struct pci_device_id opti621_pci_tbl[] = {
- { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 },
- { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, opti621_pci_tbl);
-
-static struct pci_driver opti621_pci_driver = {
- .name = "Opti621_IDE",
- .id_table = opti621_pci_tbl,
- .probe = opti621_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init opti621_ide_init(void)
-{
- return ide_pci_register_driver(&opti621_pci_driver);
-}
-
-static void __exit opti621_ide_exit(void)
-{
- pci_unregister_driver(&opti621_pci_driver);
-}
-
-module_init(opti621_ide_init);
-module_exit(opti621_ide_exit);
-
-MODULE_AUTHOR("Jaromir Koutek, Jan Harkes, Mark Lord");
-MODULE_DESCRIPTION("PCI driver module for Opti621 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
deleted file mode 100644
index d1fe4c13e35c..000000000000
--- a/drivers/ide/palm_bk3710.c
+++ /dev/null
@@ -1,387 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Palmchip bk3710 IDE controller
- *
- * Copyright (C) 2006 Texas Instruments.
- * Copyright (C) 2007 MontaVista Software, Inc., <source@mvista.com>
- *
- * ----------------------------------------------------------------------------
- *
- * ----------------------------------------------------------------------------
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/ide.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-/* Offset of the primary interface registers */
-#define IDE_PALM_ATA_PRI_REG_OFFSET 0x1F0
-
-/* Primary Control Offset */
-#define IDE_PALM_ATA_PRI_CTL_OFFSET 0x3F6
-
-#define BK3710_BMICP 0x00
-#define BK3710_BMISP 0x02
-#define BK3710_BMIDTP 0x04
-#define BK3710_IDETIMP 0x40
-#define BK3710_IDESTATUS 0x47
-#define BK3710_UDMACTL 0x48
-#define BK3710_MISCCTL 0x50
-#define BK3710_REGSTB 0x54
-#define BK3710_REGRCVR 0x58
-#define BK3710_DATSTB 0x5C
-#define BK3710_DATRCVR 0x60
-#define BK3710_DMASTB 0x64
-#define BK3710_DMARCVR 0x68
-#define BK3710_UDMASTB 0x6C
-#define BK3710_UDMATRP 0x70
-#define BK3710_UDMAENV 0x74
-#define BK3710_IORDYTMP 0x78
-
-static unsigned ideclk_period; /* in nanoseconds */
-
-struct palm_bk3710_udmatiming {
- unsigned int rptime; /* tRP -- Ready to pause time (nsec) */
- unsigned int cycletime; /* tCYCTYP2/2 -- avg Cycle Time (nsec) */
- /* tENV is always a minimum of 20 nsec */
-};
-
-static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
- { 160, 240 / 2 }, /* UDMA Mode 0 */
- { 125, 160 / 2 }, /* UDMA Mode 1 */
- { 100, 120 / 2 }, /* UDMA Mode 2 */
- { 100, 90 / 2 }, /* UDMA Mode 3 */
- { 100, 60 / 2 }, /* UDMA Mode 4 */
- { 85, 40 / 2 }, /* UDMA Mode 5 */
-};
-
-static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
- unsigned int mode)
-{
- u8 tenv, trp, t0;
- u32 val32;
- u16 val16;
-
- /* DMA Data Setup */
- t0 = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].cycletime,
- ideclk_period) - 1;
- tenv = DIV_ROUND_UP(20, ideclk_period) - 1;
- trp = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].rptime,
- ideclk_period) - 1;
-
- /* udmastb Ultra DMA Access Strobe Width */
- val32 = readl(base + BK3710_UDMASTB) & (0xFF << (dev ? 0 : 8));
- val32 |= (t0 << (dev ? 8 : 0));
- writel(val32, base + BK3710_UDMASTB);
-
- /* udmatrp Ultra DMA Ready to Pause Time */
- val32 = readl(base + BK3710_UDMATRP) & (0xFF << (dev ? 0 : 8));
- val32 |= (trp << (dev ? 8 : 0));
- writel(val32, base + BK3710_UDMATRP);
-
- /* udmaenv Ultra DMA envelop Time */
- val32 = readl(base + BK3710_UDMAENV) & (0xFF << (dev ? 0 : 8));
- val32 |= (tenv << (dev ? 8 : 0));
- writel(val32, base + BK3710_UDMAENV);
-
- /* Enable UDMA for Device */
- val16 = readw(base + BK3710_UDMACTL) | (1 << dev);
- writew(val16, base + BK3710_UDMACTL);
-}
-
-static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
- unsigned short min_cycle,
- unsigned int mode)
-{
- u8 td, tkw, t0;
- u32 val32;
- u16 val16;
- struct ide_timing *t;
- int cycletime;
-
- t = ide_timing_find_mode(mode);
- cycletime = max_t(int, t->cycle, min_cycle);
-
- /* DMA Data Setup */
- t0 = DIV_ROUND_UP(cycletime, ideclk_period);
- td = DIV_ROUND_UP(t->active, ideclk_period);
- tkw = t0 - td - 1;
- td -= 1;
-
- val32 = readl(base + BK3710_DMASTB) & (0xFF << (dev ? 0 : 8));
- val32 |= (td << (dev ? 8 : 0));
- writel(val32, base + BK3710_DMASTB);
-
- val32 = readl(base + BK3710_DMARCVR) & (0xFF << (dev ? 0 : 8));
- val32 |= (tkw << (dev ? 8 : 0));
- writel(val32, base + BK3710_DMARCVR);
-
- /* Disable UDMA for Device */
- val16 = readw(base + BK3710_UDMACTL) & ~(1 << dev);
- writew(val16, base + BK3710_UDMACTL);
-}
-
-static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
- unsigned int dev, unsigned int cycletime,
- unsigned int mode)
-{
- u8 t2, t2i, t0;
- u32 val32;
- struct ide_timing *t;
-
- t = ide_timing_find_mode(XFER_PIO_0 + mode);
-
- /* PIO Data Setup */
- t0 = DIV_ROUND_UP(cycletime, ideclk_period);
- t2 = DIV_ROUND_UP(t->active, ideclk_period);
-
- t2i = t0 - t2 - 1;
- t2 -= 1;
-
- val32 = readl(base + BK3710_DATSTB) & (0xFF << (dev ? 0 : 8));
- val32 |= (t2 << (dev ? 8 : 0));
- writel(val32, base + BK3710_DATSTB);
-
- val32 = readl(base + BK3710_DATRCVR) & (0xFF << (dev ? 0 : 8));
- val32 |= (t2i << (dev ? 8 : 0));
- writel(val32, base + BK3710_DATRCVR);
-
- if (mate) {
- u8 mode2 = mate->pio_mode - XFER_PIO_0;
-
- if (mode2 < mode)
- mode = mode2;
- }
-
- /* TASKFILE Setup */
- t0 = DIV_ROUND_UP(t->cyc8b, ideclk_period);
- t2 = DIV_ROUND_UP(t->act8b, ideclk_period);
-
- t2i = t0 - t2 - 1;
- t2 -= 1;
-
- val32 = readl(base + BK3710_REGSTB) & (0xFF << (dev ? 0 : 8));
- val32 |= (t2 << (dev ? 8 : 0));
- writel(val32, base + BK3710_REGSTB);
-
- val32 = readl(base + BK3710_REGRCVR) & (0xFF << (dev ? 0 : 8));
- val32 |= (t2i << (dev ? 8 : 0));
- writel(val32, base + BK3710_REGRCVR);
-}
-
-static void palm_bk3710_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int is_slave = drive->dn & 1;
- void __iomem *base = (void __iomem *)hwif->dma_base;
- const u8 xferspeed = drive->dma_mode;
-
- if (xferspeed >= XFER_UDMA_0) {
- palm_bk3710_setudmamode(base, is_slave,
- xferspeed - XFER_UDMA_0);
- } else {
- palm_bk3710_setdmamode(base, is_slave,
- drive->id[ATA_ID_EIDE_DMA_MIN],
- xferspeed);
- }
-}
-
-static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned int cycle_time;
- int is_slave = drive->dn & 1;
- ide_drive_t *mate;
- void __iomem *base = (void __iomem *)hwif->dma_base;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /*
- * Obtain the drive PIO data for tuning the Palm Chip registers
- */
- cycle_time = ide_pio_cycle_time(drive, pio);
- mate = ide_get_pair_dev(drive);
- palm_bk3710_setpiomode(base, mate, is_slave, cycle_time, pio);
-}
-
-static void palm_bk3710_chipinit(void __iomem *base)
-{
- /*
- * REVISIT: the ATA reset signal needs to be managed through a
- * GPIO, which means it should come from platform_data. Until
- * we get and use such information, we have to trust that things
- * have been reset before we get here.
- */
-
- /*
- * Program the IDETIMP Register Value based on the following assumptions
- *
- * (ATA_IDETIMP_IDEEN , ENABLE ) |
- * (ATA_IDETIMP_PREPOST1 , DISABLE) |
- * (ATA_IDETIMP_PREPOST0 , DISABLE) |
- *
- * DM6446 silicon rev 2.1 and earlier have no observed net benefit
- * from enabling prefetch/postwrite.
- */
- writew(BIT(15), base + BK3710_IDETIMP);
-
- /*
- * UDMACTL Ultra-ATA DMA Control
- * (ATA_UDMACTL_UDMAP1 , 0 ) |
- * (ATA_UDMACTL_UDMAP0 , 0 )
- *
- */
- writew(0, base + BK3710_UDMACTL);
-
- /*
- * MISCCTL Miscellaneous Conrol Register
- * (ATA_MISCCTL_HWNHLD1P , 1 cycle)
- * (ATA_MISCCTL_HWNHLD0P , 1 cycle)
- * (ATA_MISCCTL_TIMORIDE , 1)
- */
- writel(0x001, base + BK3710_MISCCTL);
-
- /*
- * IORDYTMP IORDY Timer for Primary Register
- * (ATA_IORDYTMP_IORDYTMP , 0xffff )
- */
- writel(0xFFFF, base + BK3710_IORDYTMP);
-
- /*
- * Configure BMISP Register
- * (ATA_BMISP_DMAEN1 , DISABLE ) |
- * (ATA_BMISP_DMAEN0 , DISABLE ) |
- * (ATA_BMISP_IORDYINT , CLEAR) |
- * (ATA_BMISP_INTRSTAT , CLEAR) |
- * (ATA_BMISP_DMAERROR , CLEAR)
- */
- writew(0, base + BK3710_BMISP);
-
- palm_bk3710_setpiomode(base, NULL, 0, 600, 0);
- palm_bk3710_setpiomode(base, NULL, 1, 600, 0);
-}
-
-static u8 palm_bk3710_cable_detect(ide_hwif_t *hwif)
-{
- return ATA_CBL_PATA80;
-}
-
-static int palm_bk3710_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
-
- if (ide_allocate_dma_engine(hwif))
- return -1;
-
- hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
-
- return 0;
-}
-
-static const struct ide_port_ops palm_bk3710_ports_ops = {
- .set_pio_mode = palm_bk3710_set_pio_mode,
- .set_dma_mode = palm_bk3710_set_dma_mode,
- .cable_detect = palm_bk3710_cable_detect,
-};
-
-static struct ide_port_info palm_bk3710_port_info __initdata = {
- .init_dma = palm_bk3710_init_dma,
- .port_ops = &palm_bk3710_ports_ops,
- .dma_ops = &sff_dma_ops,
- .host_flags = IDE_HFLAG_MMIO,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .chipset = ide_palm3710,
-};
-
-static int __init palm_bk3710_probe(struct platform_device *pdev)
-{
- struct clk *clk;
- struct resource *mem, *irq;
- void __iomem *base;
- unsigned long rate, mem_size;
- int i, rc;
- struct ide_hw hw, *hws[] = { &hw };
-
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return -ENODEV;
-
- clk_enable(clk);
- rate = clk_get_rate(clk);
- if (!rate)
- return -EINVAL;
-
- /* NOTE: round *down* to meet minimum timings; we count in clocks */
- ideclk_period = 1000000000UL / rate;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL) {
- printk(KERN_ERR "failed to get memory region resource\n");
- return -ENODEV;
- }
-
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq == NULL) {
- printk(KERN_ERR "failed to get IRQ resource\n");
- return -ENODEV;
- }
-
- mem_size = resource_size(mem);
- if (request_mem_region(mem->start, mem_size, "palm_bk3710") == NULL) {
- printk(KERN_ERR "failed to request memory region\n");
- return -EBUSY;
- }
-
- base = ioremap(mem->start, mem_size);
- if (!base) {
- printk(KERN_ERR "failed to map IO memory\n");
- release_mem_region(mem->start, mem_size);
- return -ENOMEM;
- }
-
- /* Configure the Palm Chip controller */
- palm_bk3710_chipinit(base);
-
- memset(&hw, 0, sizeof(hw));
- for (i = 0; i < IDE_NR_PORTS - 2; i++)
- hw.io_ports_array[i] = (unsigned long)
- (base + IDE_PALM_ATA_PRI_REG_OFFSET + i);
- hw.io_ports.ctl_addr = (unsigned long)
- (base + IDE_PALM_ATA_PRI_CTL_OFFSET);
- hw.irq = irq->start;
- hw.dev = &pdev->dev;
-
- palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 :
- ATA_UDMA5;
-
- /* Register the IDE interface with Linux */
- rc = ide_host_add(&palm_bk3710_port_info, hws, 1, NULL);
- if (rc)
- goto out;
-
- return 0;
-out:
- printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
- return rc;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:palm_bk3710");
-
-static struct platform_driver platform_bk_driver = {
- .driver = {
- .name = "palm_bk3710",
- },
-};
-
-static int __init palm_bk3710_init(void)
-{
- return platform_driver_probe(&platform_bk_driver, palm_bk3710_probe);
-}
-
-module_init(palm_bk3710_init);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
deleted file mode 100644
index 4fcafb9121e0..000000000000
--- a/drivers/ide/pdc202xx_new.c
+++ /dev/null
@@ -1,557 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Promise TX2/TX4/TX2000/133 IDE driver
- *
- * Split from:
- * linux/drivers/ide/pdc202xx.c Version 0.35 Mar. 30, 2002
- * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2005-2007 MontaVista Software, Inc.
- * Portions Copyright (C) 1999 Promise Technology, Inc.
- * Author: Frank Tiernan (frankt@promise.com)
- * Released under terms of General Public License
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/ktime.h>
-
-#include <asm/io.h>
-
-#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
-#endif
-
-#define DRV_NAME "pdc202xx_new"
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(fmt, args...) printk("%s: " fmt, __func__, ## args)
-#else
-#define DBG(fmt, args...)
-#endif
-
-static u8 max_dma_rate(struct pci_dev *pdev)
-{
- u8 mode;
-
- switch(pdev->device) {
- case PCI_DEVICE_ID_PROMISE_20277:
- case PCI_DEVICE_ID_PROMISE_20276:
- case PCI_DEVICE_ID_PROMISE_20275:
- case PCI_DEVICE_ID_PROMISE_20271:
- case PCI_DEVICE_ID_PROMISE_20269:
- mode = 4;
- break;
- case PCI_DEVICE_ID_PROMISE_20270:
- case PCI_DEVICE_ID_PROMISE_20268:
- mode = 3;
- break;
- default:
- return 0;
- }
-
- return mode;
-}
-
-/**
- * get_indexed_reg - Get indexed register
- * @hwif: for the port address
- * @index: index of the indexed register
- */
-static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
-{
- u8 value;
-
- outb(index, hwif->dma_base + 1);
- value = inb(hwif->dma_base + 3);
-
- DBG("index[%02X] value[%02X]\n", index, value);
- return value;
-}
-
-/**
- * set_indexed_reg - Set indexed register
- * @hwif: for the port address
- * @index: index of the indexed register
- */
-static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value)
-{
- outb(index, hwif->dma_base + 1);
- outb(value, hwif->dma_base + 3);
- DBG("index[%02X] value[%02X]\n", index, value);
-}
-
-/*
- * ATA Timing Tables based on 133 MHz PLL output clock.
- *
- * If the PLL outputs 100 MHz clock, the ASIC hardware will set
- * the timing registers automatically when "set features" command is
- * issued to the device. However, if the PLL output clock is 133 MHz,
- * the following tables must be used.
- */
-static struct pio_timing {
- u8 reg0c, reg0d, reg13;
-} pio_timings [] = {
- { 0xfb, 0x2b, 0xac }, /* PIO mode 0, IORDY off, Prefetch off */
- { 0x46, 0x29, 0xa4 }, /* PIO mode 1, IORDY off, Prefetch off */
- { 0x23, 0x26, 0x64 }, /* PIO mode 2, IORDY off, Prefetch off */
- { 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
- { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
-};
-
-static struct mwdma_timing {
- u8 reg0e, reg0f;
-} mwdma_timings [] = {
- { 0xdf, 0x5f }, /* MWDMA mode 0 */
- { 0x6b, 0x27 }, /* MWDMA mode 1 */
- { 0x69, 0x25 }, /* MWDMA mode 2 */
-};
-
-static struct udma_timing {
- u8 reg10, reg11, reg12;
-} udma_timings [] = {
- { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
- { 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
- { 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
- { 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
- { 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
- { 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
- { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
-};
-
-static void pdcnew_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
- const u8 speed = drive->dma_mode;
-
- /*
- * IDE core issues SETFEATURES_XFER to the drive first (thanks to
- * IDE_HFLAG_POST_SET_MODE in ->host_flags). PDC202xx hardware will
- * automatically set the timing registers based on 100 MHz PLL output.
- *
- * As we set up the PLL to output 133 MHz for UltraDMA/133 capable
- * chips, we must override the default register settings...
- */
- if (max_dma_rate(dev) == 4) {
- u8 mode = speed & 0x07;
-
- if (speed >= XFER_UDMA_0) {
- set_indexed_reg(hwif, 0x10 + adj,
- udma_timings[mode].reg10);
- set_indexed_reg(hwif, 0x11 + adj,
- udma_timings[mode].reg11);
- set_indexed_reg(hwif, 0x12 + adj,
- udma_timings[mode].reg12);
- } else {
- set_indexed_reg(hwif, 0x0e + adj,
- mwdma_timings[mode].reg0e);
- set_indexed_reg(hwif, 0x0f + adj,
- mwdma_timings[mode].reg0f);
- }
- } else if (speed == XFER_UDMA_2) {
- /* Set tHOLD bit to 0 if using UDMA mode 2 */
- u8 tmp = get_indexed_reg(hwif, 0x10 + adj);
-
- set_indexed_reg(hwif, 0x10 + adj, tmp & 0x7f);
- }
-}
-
-static void pdcnew_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- if (max_dma_rate(dev) == 4) {
- set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
- set_indexed_reg(hwif, 0x0d + adj, pio_timings[pio].reg0d);
- set_indexed_reg(hwif, 0x13 + adj, pio_timings[pio].reg13);
- }
-}
-
-static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
-{
- if (get_indexed_reg(hwif, 0x0b) & 0x04)
- return ATA_CBL_PATA40;
- else
- return ATA_CBL_PATA80;
-}
-
-static void pdcnew_reset(ide_drive_t *drive)
-{
- /*
- * Deleted this because it is redundant from the caller.
- */
- printk(KERN_WARNING "pdc202xx_new: %s channel reset.\n",
- drive->hwif->channel ? "Secondary" : "Primary");
-}
-
-/**
- * read_counter - Read the byte count registers
- * @dma_base: for the port address
- */
-static long read_counter(u32 dma_base)
-{
- u32 pri_dma_base = dma_base, sec_dma_base = dma_base + 0x08;
- u8 cnt0, cnt1, cnt2, cnt3;
- long count = 0, last;
- int retry = 3;
-
- do {
- last = count;
-
- /* Read the current count */
- outb(0x20, pri_dma_base + 0x01);
- cnt0 = inb(pri_dma_base + 0x03);
- outb(0x21, pri_dma_base + 0x01);
- cnt1 = inb(pri_dma_base + 0x03);
- outb(0x20, sec_dma_base + 0x01);
- cnt2 = inb(sec_dma_base + 0x03);
- outb(0x21, sec_dma_base + 0x01);
- cnt3 = inb(sec_dma_base + 0x03);
-
- count = (cnt3 << 23) | (cnt2 << 15) | (cnt1 << 8) | cnt0;
-
- /*
- * The 30-bit decrementing counter is read in 4 pieces.
- * Incorrect value may be read when the most significant bytes
- * are changing...
- */
- } while (retry-- && (((last ^ count) & 0x3fff8000) || last < count));
-
- DBG("cnt0[%02X] cnt1[%02X] cnt2[%02X] cnt3[%02X]\n",
- cnt0, cnt1, cnt2, cnt3);
-
- return count;
-}
-
-/**
- * detect_pll_input_clock - Detect the PLL input clock in Hz.
- * @dma_base: for the port address
- * E.g. 16949000 on 33 MHz PCI bus, i.e. half of the PCI clock.
- */
-static long detect_pll_input_clock(unsigned long dma_base)
-{
- ktime_t start_time, end_time;
- long start_count, end_count;
- long pll_input, usec_elapsed;
- u8 scr1;
-
- start_count = read_counter(dma_base);
- start_time = ktime_get();
-
- /* Start the test mode */
- outb(0x01, dma_base + 0x01);
- scr1 = inb(dma_base + 0x03);
- DBG("scr1[%02X]\n", scr1);
- outb(scr1 | 0x40, dma_base + 0x03);
-
- /* Let the counter run for 10 ms. */
- mdelay(10);
-
- end_count = read_counter(dma_base);
- end_time = ktime_get();
-
- /* Stop the test mode */
- outb(0x01, dma_base + 0x01);
- scr1 = inb(dma_base + 0x03);
- DBG("scr1[%02X]\n", scr1);
- outb(scr1 & ~0x40, dma_base + 0x03);
-
- /*
- * Calculate the input clock in Hz
- * (the clock counter is 30 bit wide and counts down)
- */
- usec_elapsed = ktime_us_delta(end_time, start_time);
- pll_input = ((start_count - end_count) & 0x3fffffff) / 10 *
- (10000000 / usec_elapsed);
-
- DBG("start[%ld] end[%ld]\n", start_count, end_count);
-
- return pll_input;
-}
-
-#ifdef CONFIG_PPC_PMAC
-static void apple_kiwi_init(struct pci_dev *pdev)
-{
- struct device_node *np = pci_device_to_OF_node(pdev);
- u8 conf;
-
- if (np == NULL || !of_device_is_compatible(np, "kiwi-root"))
- return;
-
- if (pdev->revision >= 0x03) {
- /* Setup chip magic config stuff (from darwin) */
- pci_read_config_byte (pdev, 0x40, &conf);
- pci_write_config_byte(pdev, 0x40, (conf | 0x01));
- }
-}
-#endif /* CONFIG_PPC_PMAC */
-
-static int init_chipset_pdcnew(struct pci_dev *dev)
-{
- const char *name = DRV_NAME;
- unsigned long dma_base = pci_resource_start(dev, 4);
- unsigned long sec_dma_base = dma_base + 0x08;
- long pll_input, pll_output, ratio;
- int f, r;
- u8 pll_ctl0, pll_ctl1;
-
- if (dma_base == 0)
- return -EFAULT;
-
-#ifdef CONFIG_PPC_PMAC
- apple_kiwi_init(dev);
-#endif
-
- /* Calculate the required PLL output frequency */
- switch(max_dma_rate(dev)) {
- case 4: /* it's 133 MHz for Ultra133 chips */
- pll_output = 133333333;
- break;
- case 3: /* and 100 MHz for Ultra100 chips */
- default:
- pll_output = 100000000;
- break;
- }
-
- /*
- * Detect PLL input clock.
- * On some systems, where PCI bus is running at non-standard clock rate
- * (e.g. 25 or 40 MHz), we have to adjust the cycle time.
- * PDC20268 and newer chips employ PLL circuit to help correct timing
- * registers setting.
- */
- pll_input = detect_pll_input_clock(dma_base);
- printk(KERN_INFO "%s %s: PLL input clock is %ld kHz\n",
- name, pci_name(dev), pll_input / 1000);
-
- /* Sanity check */
- if (unlikely(pll_input < 5000000L || pll_input > 70000000L)) {
- printk(KERN_ERR "%s %s: Bad PLL input clock %ld Hz, giving up!"
- "\n", name, pci_name(dev), pll_input);
- goto out;
- }
-
-#ifdef DEBUG
- DBG("pll_output is %ld Hz\n", pll_output);
-
- /* Show the current clock value of PLL control register
- * (maybe already configured by the BIOS)
- */
- outb(0x02, sec_dma_base + 0x01);
- pll_ctl0 = inb(sec_dma_base + 0x03);
- outb(0x03, sec_dma_base + 0x01);
- pll_ctl1 = inb(sec_dma_base + 0x03);
-
- DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
-#endif
-
- /*
- * Calculate the ratio of F, R and NO
- * POUT = (F + 2) / (( R + 2) * NO)
- */
- ratio = pll_output / (pll_input / 1000);
- if (ratio < 8600L) { /* 8.6x */
- /* Using NO = 0x01, R = 0x0d */
- r = 0x0d;
- } else if (ratio < 12900L) { /* 12.9x */
- /* Using NO = 0x01, R = 0x08 */
- r = 0x08;
- } else if (ratio < 16100L) { /* 16.1x */
- /* Using NO = 0x01, R = 0x06 */
- r = 0x06;
- } else if (ratio < 64000L) { /* 64x */
- r = 0x00;
- } else {
- /* Invalid ratio */
- printk(KERN_ERR "%s %s: Bad ratio %ld, giving up!\n",
- name, pci_name(dev), ratio);
- goto out;
- }
-
- f = (ratio * (r + 2)) / 1000 - 2;
-
- DBG("F[%d] R[%d] ratio*1000[%ld]\n", f, r, ratio);
-
- if (unlikely(f < 0 || f > 127)) {
- /* Invalid F */
- printk(KERN_ERR "%s %s: F[%d] invalid!\n",
- name, pci_name(dev), f);
- goto out;
- }
-
- pll_ctl0 = (u8) f;
- pll_ctl1 = (u8) r;
-
- DBG("Writing pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
-
- outb(0x02, sec_dma_base + 0x01);
- outb(pll_ctl0, sec_dma_base + 0x03);
- outb(0x03, sec_dma_base + 0x01);
- outb(pll_ctl1, sec_dma_base + 0x03);
-
- /* Wait the PLL circuit to be stable */
- mdelay(30);
-
-#ifdef DEBUG
- /*
- * Show the current clock value of PLL control register
- */
- outb(0x02, sec_dma_base + 0x01);
- pll_ctl0 = inb(sec_dma_base + 0x03);
- outb(0x03, sec_dma_base + 0x01);
- pll_ctl1 = inb(sec_dma_base + 0x03);
-
- DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
-#endif
-
- out:
- return 0;
-}
-
-static struct pci_dev *pdc20270_get_dev2(struct pci_dev *dev)
-{
- struct pci_dev *dev2;
-
- dev2 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn) + 1,
- PCI_FUNC(dev->devfn)));
-
- if (dev2 &&
- dev2->vendor == dev->vendor &&
- dev2->device == dev->device) {
-
- if (dev2->irq != dev->irq) {
- dev2->irq = dev->irq;
- printk(KERN_INFO DRV_NAME " %s: PCI config space "
- "interrupt fixed\n", pci_name(dev));
- }
-
- return dev2;
- }
-
- return NULL;
-}
-
-static const struct ide_port_ops pdcnew_port_ops = {
- .set_pio_mode = pdcnew_set_pio_mode,
- .set_dma_mode = pdcnew_set_dma_mode,
- .resetproc = pdcnew_reset,
- .cable_detect = pdcnew_cable_detect,
-};
-
-#define DECLARE_PDCNEW_DEV(udma) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_pdcnew, \
- .port_ops = &pdcnew_port_ops, \
- .host_flags = IDE_HFLAG_POST_SET_MODE | \
- IDE_HFLAG_ERROR_STOPS_FIFO | \
- IDE_HFLAG_OFF_BOARD, \
- .pio_mask = ATA_PIO4, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = udma, \
- }
-
-static const struct ide_port_info pdcnew_chipsets[] = {
- /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
- /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
-};
-
-/**
- * pdc202new_init_one - called when a pdc202xx is found
- * @dev: the pdc202new device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct ide_port_info *d = &pdcnew_chipsets[id->driver_data];
- struct pci_dev *bridge = dev->bus->self;
-
- if (dev->device == PCI_DEVICE_ID_PROMISE_20270 && bridge &&
- bridge->vendor == PCI_VENDOR_ID_DEC &&
- bridge->device == PCI_DEVICE_ID_DEC_21150) {
- struct pci_dev *dev2;
-
- if (PCI_SLOT(dev->devfn) & 2)
- return -ENODEV;
-
- dev2 = pdc20270_get_dev2(dev);
-
- if (dev2) {
- int ret = ide_pci_init_two(dev, dev2, d, NULL);
- if (ret < 0)
- pci_dev_put(dev2);
- return ret;
- }
- }
-
- if (dev->device == PCI_DEVICE_ID_PROMISE_20276 && bridge &&
- bridge->vendor == PCI_VENDOR_ID_INTEL &&
- (bridge->device == PCI_DEVICE_ID_INTEL_I960 ||
- bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) {
- printk(KERN_INFO DRV_NAME " %s: attached to I2O RAID controller,"
- " skipping\n", pci_name(dev));
- return -ENODEV;
- }
-
- return ide_pci_init_one(dev, d, NULL);
-}
-
-static void pdc202new_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
-
- ide_pci_remove(dev);
- pci_dev_put(dev2);
-}
-
-static const struct pci_device_id pdc202new_pci_tbl[] = {
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), 0 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), 0 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), 1 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, pdc202new_pci_tbl);
-
-static struct pci_driver pdc202new_pci_driver = {
- .name = "Promise_IDE",
- .id_table = pdc202new_pci_tbl,
- .probe = pdc202new_init_one,
- .remove = pdc202new_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init pdc202new_ide_init(void)
-{
- return ide_pci_register_driver(&pdc202new_pci_driver);
-}
-
-static void __exit pdc202new_ide_exit(void)
-{
- pci_unregister_driver(&pdc202new_pci_driver);
-}
-
-module_init(pdc202new_ide_init);
-module_exit(pdc202new_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick, Frank Tiernan");
-MODULE_DESCRIPTION("PCI driver module for Promise PDC20268 and higher");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
deleted file mode 100644
index 5248ac064e6e..000000000000
--- a/drivers/ide/pdc202xx_old.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc.
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
- *
- * Portions Copyright (C) 1999 Promise Technology, Inc.
- * Author: Frank Tiernan (frankt@promise.com)
- * Released under terms of General Public License
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "pdc202xx_old"
-
-static void pdc202xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 drive_pci = 0x60 + (drive->dn << 2);
- const u8 speed = drive->dma_mode;
-
- u8 AP = 0, BP = 0, CP = 0;
- u8 TA = 0, TB = 0, TC = 0;
-
- pci_read_config_byte(dev, drive_pci, &AP);
- pci_read_config_byte(dev, drive_pci + 1, &BP);
- pci_read_config_byte(dev, drive_pci + 2, &CP);
-
- switch(speed) {
- case XFER_UDMA_5:
- case XFER_UDMA_4: TB = 0x20; TC = 0x01; break;
- case XFER_UDMA_2: TB = 0x20; TC = 0x01; break;
- case XFER_UDMA_3:
- case XFER_UDMA_1: TB = 0x40; TC = 0x02; break;
- case XFER_UDMA_0:
- case XFER_MW_DMA_2: TB = 0x60; TC = 0x03; break;
- case XFER_MW_DMA_1: TB = 0x60; TC = 0x04; break;
- case XFER_MW_DMA_0: TB = 0xE0; TC = 0x0F; break;
- case XFER_PIO_4: TA = 0x01; TB = 0x04; break;
- case XFER_PIO_3: TA = 0x02; TB = 0x06; break;
- case XFER_PIO_2: TA = 0x03; TB = 0x08; break;
- case XFER_PIO_1: TA = 0x05; TB = 0x0C; break;
- case XFER_PIO_0:
- default: TA = 0x09; TB = 0x13; break;
- }
-
- if (speed < XFER_SW_DMA_0) {
- /*
- * preserve SYNC_INT / ERDDY_EN bits while clearing
- * Prefetch_EN / IORDY_EN / PA[3:0] bits of register A
- */
- AP &= ~0x3f;
- if (ide_pio_need_iordy(drive, speed - XFER_PIO_0))
- AP |= 0x20; /* set IORDY_EN bit */
- if (drive->media == ide_disk)
- AP |= 0x10; /* set Prefetch_EN bit */
- /* clear PB[4:0] bits of register B */
- BP &= ~0x1f;
- pci_write_config_byte(dev, drive_pci, AP | TA);
- pci_write_config_byte(dev, drive_pci + 1, BP | TB);
- } else {
- /* clear MB[2:0] bits of register B */
- BP &= ~0xe0;
- /* clear MC[3:0] bits of register C */
- CP &= ~0x0f;
- pci_write_config_byte(dev, drive_pci + 1, BP | TB);
- pci_write_config_byte(dev, drive_pci + 2, CP | TC);
- }
-}
-
-static void pdc202xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- pdc202xx_set_mode(hwif, drive);
-}
-
-static int pdc202xx_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long high_16 = pci_resource_start(dev, 4);
- u8 sc1d = inb(high_16 + 0x1d);
-
- if (hwif->channel) {
- /*
- * bit 7: error, bit 6: interrupting,
- * bit 5: FIFO full, bit 4: FIFO empty
- */
- return (sc1d & 0x40) ? 1 : 0;
- } else {
- /*
- * bit 3: error, bit 2: interrupting,
- * bit 1: FIFO full, bit 0: FIFO empty
- */
- return (sc1d & 0x04) ? 1 : 0;
- }
-}
-
-static u8 pdc2026x_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10);
-
- pci_read_config_word(dev, 0x50, &CIS);
-
- return (CIS & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-/*
- * Set the control register to use the 66MHz system
- * clock for UDMA 3/4/5 mode operation when necessary.
- *
- * FIXME: this register is shared by both channels, some locking is needed
- *
- * It may also be possible to leave the 66MHz clock on
- * and readjust the timing parameters.
- */
-static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif)
-{
- unsigned long clock_reg = hwif->extra_base + 0x01;
- u8 clock = inb(clock_reg);
-
- outb(clock | (hwif->channel ? 0x08 : 0x02), clock_reg);
-}
-
-static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
-{
- unsigned long clock_reg = hwif->extra_base + 0x01;
- u8 clock = inb(clock_reg);
-
- outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
-}
-
-static void pdc2026x_init_hwif(ide_hwif_t *hwif)
-{
- pdc_old_disable_66MHz_clock(hwif);
-}
-
-static void pdc202xx_dma_start(ide_drive_t *drive)
-{
- if (drive->current_speed > XFER_UDMA_2)
- pdc_old_enable_66MHz_clock(drive->hwif);
- if (drive->media != ide_disk || (drive->dev_flags & IDE_DFLAG_LBA48)) {
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- unsigned long high_16 = hwif->extra_base - 16;
- unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20);
- u32 word_count = 0;
- u8 clock = inb(high_16 + 0x11);
-
- outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
- word_count = (blk_rq_sectors(rq) << 8);
- word_count = (rq_data_dir(rq) == READ) ?
- word_count | 0x05000000 :
- word_count | 0x06000000;
- outl(word_count, atapi_reg);
- }
- ide_dma_start(drive);
-}
-
-static int pdc202xx_dma_end(ide_drive_t *drive)
-{
- if (drive->media != ide_disk || (drive->dev_flags & IDE_DFLAG_LBA48)) {
- ide_hwif_t *hwif = drive->hwif;
- unsigned long high_16 = hwif->extra_base - 16;
- unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20);
- u8 clock = 0;
-
- outl(0, atapi_reg); /* zero out extra */
- clock = inb(high_16 + 0x11);
- outb(clock & ~(hwif->channel ? 0x08:0x02), high_16 + 0x11);
- }
- if (drive->current_speed > XFER_UDMA_2)
- pdc_old_disable_66MHz_clock(drive->hwif);
- return ide_dma_end(drive);
-}
-
-static int init_chipset_pdc202xx(struct pci_dev *dev)
-{
- unsigned long dmabase = pci_resource_start(dev, 4);
- u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0;
-
- if (dmabase == 0)
- goto out;
-
- udma_speed_flag = inb(dmabase | 0x1f);
- primary_mode = inb(dmabase | 0x1a);
- secondary_mode = inb(dmabase | 0x1b);
- printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \
- "Primary %s Mode " \
- "Secondary %s Mode.\n", pci_name(dev),
- (udma_speed_flag & 1) ? "EN" : "DIS",
- (primary_mode & 1) ? "MASTER" : "PCI",
- (secondary_mode & 1) ? "MASTER" : "PCI" );
-
- if (!(udma_speed_flag & 1)) {
- printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ",
- pci_name(dev), udma_speed_flag,
- (udma_speed_flag|1));
- outb(udma_speed_flag | 1, dmabase | 0x1f);
- printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
- }
-out:
- return 0;
-}
-
-static void pdc202ata4_fixup_irq(struct pci_dev *dev, const char *name)
-{
- if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) {
- u8 irq = 0, irq2 = 0;
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
- /* 0xbc */
- pci_read_config_byte(dev, (PCI_INTERRUPT_LINE)|0x80, &irq2);
- if (irq != irq2) {
- pci_write_config_byte(dev,
- (PCI_INTERRUPT_LINE)|0x80, irq); /* 0xbc */
- printk(KERN_INFO "%s %s: PCI config space interrupt "
- "mirror fixed\n", name, pci_name(dev));
- }
- }
-}
-
-#define IDE_HFLAGS_PDC202XX \
- (IDE_HFLAG_ERROR_STOPS_FIFO | \
- IDE_HFLAG_OFF_BOARD)
-
-static const struct ide_port_ops pdc20246_port_ops = {
- .set_pio_mode = pdc202xx_set_pio_mode,
- .set_dma_mode = pdc202xx_set_mode,
- .test_irq = pdc202xx_test_irq,
-};
-
-static const struct ide_port_ops pdc2026x_port_ops = {
- .set_pio_mode = pdc202xx_set_pio_mode,
- .set_dma_mode = pdc202xx_set_mode,
- .test_irq = pdc202xx_test_irq,
- .cable_detect = pdc2026x_cable_detect,
-};
-
-static const struct ide_dma_ops pdc2026x_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = pdc202xx_dma_start,
- .dma_end = pdc202xx_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-#define DECLARE_PDC2026X_DEV(udma, sectors) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_pdc202xx, \
- .init_hwif = pdc2026x_init_hwif, \
- .port_ops = &pdc2026x_port_ops, \
- .dma_ops = &pdc2026x_dma_ops, \
- .host_flags = IDE_HFLAGS_PDC202XX, \
- .pio_mask = ATA_PIO4, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = udma, \
- .max_sectors = sectors, \
- }
-
-static const struct ide_port_info pdc202xx_chipsets[] = {
- { /* 0: PDC20246 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_pdc202xx,
- .port_ops = &pdc20246_port_ops,
- .dma_ops = &sff_dma_ops,
- .host_flags = IDE_HFLAGS_PDC202XX,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
- },
-
- /* 1: PDC2026{2,3} */
- DECLARE_PDC2026X_DEV(ATA_UDMA4, 0),
- /* 2: PDC2026{5,7}: UDMA5, limit LBA48 requests to 256 sectors */
- DECLARE_PDC2026X_DEV(ATA_UDMA5, 256),
-};
-
-/**
- * pdc202xx_init_one - called when a PDC202xx is found
- * @dev: the pdc202xx device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int pdc202xx_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- const struct ide_port_info *d;
- u8 idx = id->driver_data;
-
- d = &pdc202xx_chipsets[idx];
-
- if (idx < 2)
- pdc202ata4_fixup_irq(dev, d->name);
-
- if (dev->vendor == PCI_DEVICE_ID_PROMISE_20265) {
- struct pci_dev *bridge = dev->bus->self;
-
- if (bridge &&
- bridge->vendor == PCI_VENDOR_ID_INTEL &&
- (bridge->device == PCI_DEVICE_ID_INTEL_I960 ||
- bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) {
- printk(KERN_INFO DRV_NAME " %s: skipping Promise "
- "PDC20265 attached to I2O RAID controller\n",
- pci_name(dev));
- return -ENODEV;
- }
- }
-
- return ide_pci_init_one(dev, d, NULL);
-}
-
-static const struct pci_device_id pdc202xx_pci_tbl[] = {
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, pdc202xx_pci_tbl);
-
-static struct pci_driver pdc202xx_pci_driver = {
- .name = "Promise_Old_IDE",
- .id_table = pdc202xx_pci_tbl,
- .probe = pdc202xx_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init pdc202xx_ide_init(void)
-{
- return ide_pci_register_driver(&pdc202xx_pci_driver);
-}
-
-static void __exit pdc202xx_ide_exit(void)
-{
- pci_unregister_driver(&pdc202xx_pci_driver);
-}
-
-module_init(pdc202xx_ide_init);
-module_exit(pdc202xx_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for older Promise IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
deleted file mode 100644
index a671cead6ae7..000000000000
--- a/drivers/ide/piix.c
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
- * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * Documentation:
- *
- * Publicly available from Intel web site. Errata documentation
- * is also publicly available. As an aide to anyone hacking on this
- * driver the list of errata that are relevant is below.going back to
- * PIIX4. Older device documentation is now a bit tricky to find.
- *
- * Errata of note:
- *
- * Unfixable
- * PIIX4 errata #9 - Only on ultra obscure hw
- * ICH3 errata #13 - Not observed to affect real hw
- * by Intel
- *
- * Things we must deal with
- * PIIX4 errata #10 - BM IDE hang with non UDMA
- * (must stop/start dma to recover)
- * 440MX errata #15 - As PIIX4 errata #10
- * PIIX4 errata #15 - Must not read control registers
- * during a PIO transfer
- * 440MX errata #13 - As PIIX4 errata #15
- * ICH2 errata #21 - DMA mode 0 doesn't work right
- * ICH0/1 errata #55 - As ICH2 errata #21
- * ICH2 spec c #9 - Extra operations needed to handle
- * drive hotswap [NOT YET SUPPORTED]
- * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
- * and must be dword aligned
- * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
- *
- * Should have been BIOS fixed:
- * 450NX: errata #19 - DMA hangs on old 450NX
- * 450NX: errata #20 - DMA hangs on old 450NX
- * 450NX: errata #25 - Corruption with DMA on old 450NX
- * ICH3 errata #15 - IDE deadlock under high load
- * (BIOS must set dev 31 fn 0 bit 23)
- * ICH3 errata #18 - Don't use native mode
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "piix"
-
-static int no_piix_dma;
-
-/**
- * piix_set_pio_mode - set host controller for PIO mode
- * @port: port
- * @drive: drive
- *
- * Set the interface PIO mode based upon the settings done by AMI BIOS.
- */
-
-static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int is_slave = drive->dn & 1;
- int master_port = hwif->channel ? 0x42 : 0x40;
- int slave_port = 0x44;
- unsigned long flags;
- u16 master_data;
- u8 slave_data;
- static DEFINE_SPINLOCK(tune_lock);
- int control = 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /* ISP RTC */
- static const u8 timings[][2]= {
- { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
- /*
- * Master vs slave is synchronized above us but the slave register is
- * shared by the two hwifs so the corner case of two slave timeouts in
- * parallel must be locked.
- */
- spin_lock_irqsave(&tune_lock, flags);
- pci_read_config_word(dev, master_port, &master_data);
-
- if (pio > 1)
- control |= 1; /* Programmable timing on */
- if (drive->media == ide_disk)
- control |= 4; /* Prefetch, post write */
- if (ide_pio_need_iordy(drive, pio))
- control |= 2; /* IORDY */
- if (is_slave) {
- master_data |= 0x4000;
- master_data &= ~0x0070;
- if (pio > 1) {
- /* Set PPE, IE and TIME */
- master_data |= control << 4;
- }
- pci_read_config_byte(dev, slave_port, &slave_data);
- slave_data &= hwif->channel ? 0x0f : 0xf0;
- slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
- (hwif->channel ? 4 : 0);
- } else {
- master_data &= ~0x3307;
- if (pio > 1) {
- /* enable PPE, IE and TIME */
- master_data |= control;
- }
- master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
- }
- pci_write_config_word(dev, master_port, master_data);
- if (is_slave)
- pci_write_config_byte(dev, slave_port, slave_data);
- spin_unlock_irqrestore(&tune_lock, flags);
-}
-
-/**
- * piix_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Set a PIIX host controller to the desired DMA mode. This involves
- * programming the right timing data into the PCI configuration space.
- */
-
-static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 maslave = hwif->channel ? 0x42 : 0x40;
- int a_speed = 3 << (drive->dn * 4);
- int u_flag = 1 << drive->dn;
- int v_flag = 0x01 << drive->dn;
- int w_flag = 0x10 << drive->dn;
- int u_speed = 0;
- int sitre;
- u16 reg4042, reg4a;
- u8 reg48, reg54, reg55;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_word(dev, maslave, &reg4042);
- sitre = (reg4042 & 0x4000) ? 1 : 0;
- pci_read_config_byte(dev, 0x48, &reg48);
- pci_read_config_word(dev, 0x4a, &reg4a);
- pci_read_config_byte(dev, 0x54, &reg54);
- pci_read_config_byte(dev, 0x55, &reg55);
-
- if (speed >= XFER_UDMA_0) {
- u8 udma = speed - XFER_UDMA_0;
-
- u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
-
- if (!(reg48 & u_flag))
- pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- if (speed == XFER_UDMA_5) {
- pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
- } else {
- pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
- }
- if ((reg4a & a_speed) != u_speed)
- pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
- if (speed > XFER_UDMA_2) {
- if (!(reg54 & v_flag))
- pci_write_config_byte(dev, 0x54, reg54 | v_flag);
- } else
- pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
- } else {
- const u8 mwdma_to_pio[] = { 0, 3, 4 };
-
- if (reg48 & u_flag)
- pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
- if (reg54 & v_flag)
- pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
- if (reg55 & w_flag)
- pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
-
- if (speed >= XFER_MW_DMA_0)
- drive->pio_mode =
- mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
- else
- drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
-
- piix_set_pio_mode(hwif, drive);
- }
-}
-
-/**
- * init_chipset_ich - set up the ICH chipset
- * @dev: PCI device to set up
- *
- * Initialize the PCI device as required. For the ICH this turns
- * out to be nice and simple.
- */
-
-static int init_chipset_ich(struct pci_dev *dev)
-{
- u32 extra = 0;
-
- pci_read_config_dword(dev, 0x54, &extra);
- pci_write_config_dword(dev, 0x54, extra | 0x400);
-
- return 0;
-}
-
-/**
- * ich_clear_irq - clear BMDMA status
- * @drive: IDE drive
- *
- * ICHx contollers set DMA INTR no matter DMA or PIO.
- * BMDMA status might need to be cleared even for
- * PIO interrupts to prevent spurious/lost IRQ.
- */
-static void ich_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat;
-
- /*
- * ide_dma_end() needs BMDMA status for error checking.
- * So, skip clearing BMDMA status here and leave it
- * to ide_dma_end() if this is DMA interrupt.
- */
- if (drive->waiting_for_dma || hwif->dma_base == 0)
- return;
-
- /* clear the INTR & ERROR bits */
- dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- /* Should we force the bit as well ? */
- outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
-}
-
-struct ich_laptop {
- u16 device;
- u16 subvendor;
- u16 subdevice;
-};
-
-/*
- * List of laptops that use short cables rather than 80 wire
- */
-
-static const struct ich_laptop ich_laptop[] = {
- /* devid, subvendor, subdev */
- { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
- { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
- { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
- { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
- { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
- { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
- { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */
- { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
- { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
- { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
- { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
- /* end marker */
- { 0, }
-};
-
-static u8 piix_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- const struct ich_laptop *lap = &ich_laptop[0];
- u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30;
-
- /* check for specials */
- while (lap->device) {
- if (lap->device == pdev->device &&
- lap->subvendor == pdev->subsystem_vendor &&
- lap->subdevice == pdev->subsystem_device) {
- return ATA_CBL_PATA40_SHORT;
- }
- lap++;
- }
-
- pci_read_config_byte(pdev, 0x54, &reg54h);
-
- return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-}
-
-/**
- * init_hwif_piix - fill in the hwif for the PIIX
- * @hwif: IDE interface
- *
- * Set up the ide_hwif_t for the PIIX interface according to the
- * capabilities of the hardware.
- */
-
-static void init_hwif_piix(ide_hwif_t *hwif)
-{
- if (!hwif->dma_base)
- return;
-
- if (no_piix_dma)
- hwif->ultra_mask = hwif->mwdma_mask = hwif->swdma_mask = 0;
-}
-
-static const struct ide_port_ops piix_port_ops = {
- .set_pio_mode = piix_set_pio_mode,
- .set_dma_mode = piix_set_dma_mode,
- .cable_detect = piix_cable_detect,
-};
-
-static const struct ide_port_ops ich_port_ops = {
- .set_pio_mode = piix_set_pio_mode,
- .set_dma_mode = piix_set_dma_mode,
- .clear_irq = ich_clear_irq,
- .cable_detect = piix_cable_detect,
-};
-
-#define DECLARE_PIIX_DEV(udma) \
- { \
- .name = DRV_NAME, \
- .init_hwif = init_hwif_piix, \
- .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
- .port_ops = &piix_port_ops, \
- .pio_mask = ATA_PIO4, \
- .swdma_mask = ATA_SWDMA2_ONLY, \
- .mwdma_mask = ATA_MWDMA12_ONLY, \
- .udma_mask = udma, \
- }
-
-#define DECLARE_ICH_DEV(mwdma, udma) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_ich, \
- .init_hwif = init_hwif_piix, \
- .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
- .port_ops = &ich_port_ops, \
- .pio_mask = ATA_PIO4, \
- .swdma_mask = ATA_SWDMA2_ONLY, \
- .mwdma_mask = mwdma, \
- .udma_mask = udma, \
- }
-
-static const struct ide_port_info piix_pci_info[] = {
- /* 0: MPIIX */
- { /*
- * MPIIX actually has only a single IDE channel mapped to
- * the primary or secondary ports depending on the value
- * of the bit 14 of the IDETIM register at offset 0x6c
- */
- .name = DRV_NAME,
- .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
- .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
- /* This is a painful system best to let it self tune for now */
- },
- /* 1: PIIXa/PIIXb/PIIX3 */
- DECLARE_PIIX_DEV(0x00), /* no udma */
- /* 2: PIIX4 */
- DECLARE_PIIX_DEV(ATA_UDMA2),
- /* 3: ICH0 */
- DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2),
- /* 4: ICH */
- DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4),
- /* 5: PIIX4 */
- DECLARE_PIIX_DEV(ATA_UDMA4),
- /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
- DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5),
- /* 7: ICH7/7-R, no MWDMA1 */
- DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5),
-};
-
-/**
- * piix_init_one - called when a PIIX is found
- * @dev: the piix device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL);
-}
-
-/**
- * piix_check_450nx - Check for problem 450NX setup
- *
- * Check for the present of 450NX errata #19 and errata #25. If
- * they are found, disable use of DMA IDE
- */
-
-static void piix_check_450nx(void)
-{
- struct pci_dev *pdev = NULL;
- u16 cfg;
- while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL)
- {
- /* Look for 450NX PXB. Check for problem configurations
- A PCI quirk checks bit 6 already */
- pci_read_config_word(pdev, 0x41, &cfg);
- /* Only on the original revision: IDE DMA can hang */
- if (pdev->revision == 0x00)
- no_piix_dma = 1;
- /* On all revisions below 5 PXB bus lock must be disabled for IDE */
- else if (cfg & (1<<14) && pdev->revision < 5)
- no_piix_dma = 2;
- }
- if(no_piix_dma)
- printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n");
- if(no_piix_dma == 2)
- printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n");
-}
-
-static const struct pci_device_id piix_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 },
-#endif
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
-
-static struct pci_driver piix_pci_driver = {
- .name = "PIIX_IDE",
- .id_table = piix_pci_tbl,
- .probe = piix_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init piix_ide_init(void)
-{
- piix_check_450nx();
- return ide_pci_register_driver(&piix_pci_driver);
-}
-
-static void __exit piix_ide_exit(void)
-{
- pci_unregister_driver(&piix_pci_driver);
-}
-
-module_init(piix_ide_init);
-module_exit(piix_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz");
-MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
deleted file mode 100644
index ea0b064b5f56..000000000000
--- a/drivers/ide/pmac.c
+++ /dev/null
@@ -1,1703 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Support for IDE interfaces on PowerMacs.
- *
- * These IDE interfaces are memory-mapped and have a DBDMA channel
- * for doing DMA.
- *
- * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt
- * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
- *
- * Some code taken from drivers/ide/ide-dma.c:
- *
- * Copyright (c) 1995-1998 Mark Lord
- *
- * TODO: - Use pre-calculated (kauai) timing tables all the time and
- * get rid of the "rounded" tables used previously, so we have the
- * same table format for all controllers and can then just have one
- * big table
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/notifier.h>
-#include <linux/module.h>
-#include <linux/reboot.h>
-#include <linux/pci.h>
-#include <linux/adb.h>
-#include <linux/pmu.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-#include <asm/prom.h>
-#include <asm/io.h>
-#include <asm/dbdma.h>
-#include <asm/ide.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/sections.h>
-#include <asm/irq.h>
-#include <asm/mediabay.h>
-
-#define DRV_NAME "ide-pmac"
-
-#undef IDE_PMAC_DEBUG
-
-#define DMA_WAIT_TIMEOUT 50
-
-typedef struct pmac_ide_hwif {
- unsigned long regbase;
- int irq;
- int kind;
- int aapl_bus_id;
- unsigned broken_dma : 1;
- unsigned broken_dma_warn : 1;
- struct device_node* node;
- struct macio_dev *mdev;
- u32 timings[4];
- volatile u32 __iomem * *kauai_fcr;
- ide_hwif_t *hwif;
-
- /* Those fields are duplicating what is in hwif. We currently
- * can't use the hwif ones because of some assumptions that are
- * beeing done by the generic code about the kind of dma controller
- * and format of the dma table. This will have to be fixed though.
- */
- volatile struct dbdma_regs __iomem * dma_regs;
- struct dbdma_cmd* dma_table_cpu;
-} pmac_ide_hwif_t;
-
-enum {
- controller_ohare, /* OHare based */
- controller_heathrow, /* Heathrow/Paddington */
- controller_kl_ata3, /* KeyLargo ATA-3 */
- controller_kl_ata4, /* KeyLargo ATA-4 */
- controller_un_ata6, /* UniNorth2 ATA-6 */
- controller_k2_ata6, /* K2 ATA-6 */
- controller_sh_ata6, /* Shasta ATA-6 */
-};
-
-static const char* model_name[] = {
- "OHare ATA", /* OHare based */
- "Heathrow ATA", /* Heathrow/Paddington */
- "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
- "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
- "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
- "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
- "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
-};
-
-/*
- * Extra registers, both 32-bit little-endian
- */
-#define IDE_TIMING_CONFIG 0x200
-#define IDE_INTERRUPT 0x300
-
-/* Kauai (U2) ATA has different register setup */
-#define IDE_KAUAI_PIO_CONFIG 0x200
-#define IDE_KAUAI_ULTRA_CONFIG 0x210
-#define IDE_KAUAI_POLL_CONFIG 0x220
-
-/*
- * Timing configuration register definitions
- */
-
-/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
-#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
-#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
-#define IDE_SYSCLK_NS 30 /* 33Mhz cell */
-#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
-
-/* 133Mhz cell, found in shasta.
- * See comments about 100 Mhz Uninorth 2...
- * Note that PIO_MASK and MDMA_MASK seem to overlap
- */
-#define TR_133_PIOREG_PIO_MASK 0xff000fff
-#define TR_133_PIOREG_MDMA_MASK 0x00fff800
-#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
-#define TR_133_UDMAREG_UDMA_EN 0x00000001
-
-/* 100Mhz cell, found in Uninorth 2. I don't have much infos about
- * this one yet, it appears as a pci device (106b/0033) on uninorth
- * internal PCI bus and it's clock is controlled like gem or fw. It
- * appears to be an evolution of keylargo ATA4 with a timing register
- * extended to 2 32bits registers and a similar DBDMA channel. Other
- * registers seem to exist but I can't tell much about them.
- *
- * So far, I'm using pre-calculated tables for this extracted from
- * the values used by the MacOS X driver.
- *
- * The "PIO" register controls PIO and MDMA timings, the "ULTRA"
- * register controls the UDMA timings. At least, it seems bit 0
- * of this one enables UDMA vs. MDMA, and bits 4..7 are the
- * cycle time in units of 10ns. Bits 8..15 are used by I don't
- * know their meaning yet
- */
-#define TR_100_PIOREG_PIO_MASK 0xff000fff
-#define TR_100_PIOREG_MDMA_MASK 0x00fff000
-#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
-#define TR_100_UDMAREG_UDMA_EN 0x00000001
-
-
-/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
- * 40 connector cable and to 4 on 80 connector one.
- * Clock unit is 15ns (66Mhz)
- *
- * 3 Values can be programmed:
- * - Write data setup, which appears to match the cycle time. They
- * also call it DIOW setup.
- * - Ready to pause time (from spec)
- * - Address setup. That one is weird. I don't see where exactly
- * it fits in UDMA cycles, I got it's name from an obscure piece
- * of commented out code in Darwin. They leave it to 0, we do as
- * well, despite a comment that would lead to think it has a
- * min value of 45ns.
- * Apple also add 60ns to the write data setup (or cycle time ?) on
- * reads.
- */
-#define TR_66_UDMA_MASK 0xfff00000
-#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
-#define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */
-#define TR_66_UDMA_ADDRSETUP_SHIFT 29
-#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
-#define TR_66_UDMA_RDY2PAUS_SHIFT 25
-#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
-#define TR_66_UDMA_WRDATASETUP_SHIFT 21
-#define TR_66_MDMA_MASK 0x000ffc00
-#define TR_66_MDMA_RECOVERY_MASK 0x000f8000
-#define TR_66_MDMA_RECOVERY_SHIFT 15
-#define TR_66_MDMA_ACCESS_MASK 0x00007c00
-#define TR_66_MDMA_ACCESS_SHIFT 10
-#define TR_66_PIO_MASK 0x000003ff
-#define TR_66_PIO_RECOVERY_MASK 0x000003e0
-#define TR_66_PIO_RECOVERY_SHIFT 5
-#define TR_66_PIO_ACCESS_MASK 0x0000001f
-#define TR_66_PIO_ACCESS_SHIFT 0
-
-/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
- * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
- *
- * The access time and recovery time can be programmed. Some older
- * Darwin code base limit OHare to 150ns cycle time. I decided to do
- * the same here fore safety against broken old hardware ;)
- * The HalfTick bit, when set, adds half a clock (15ns) to the access
- * time and removes one from recovery. It's not supported on KeyLargo
- * implementation afaik. The E bit appears to be set for PIO mode 0 and
- * is used to reach long timings used in this mode.
- */
-#define TR_33_MDMA_MASK 0x003ff800
-#define TR_33_MDMA_RECOVERY_MASK 0x001f0000
-#define TR_33_MDMA_RECOVERY_SHIFT 16
-#define TR_33_MDMA_ACCESS_MASK 0x0000f800
-#define TR_33_MDMA_ACCESS_SHIFT 11
-#define TR_33_MDMA_HALFTICK 0x00200000
-#define TR_33_PIO_MASK 0x000007ff
-#define TR_33_PIO_E 0x00000400
-#define TR_33_PIO_RECOVERY_MASK 0x000003e0
-#define TR_33_PIO_RECOVERY_SHIFT 5
-#define TR_33_PIO_ACCESS_MASK 0x0000001f
-#define TR_33_PIO_ACCESS_SHIFT 0
-
-/*
- * Interrupt register definitions
- */
-#define IDE_INTR_DMA 0x80000000
-#define IDE_INTR_DEVICE 0x40000000
-
-/*
- * FCR Register on Kauai. Not sure what bit 0x4 is ...
- */
-#define KAUAI_FCR_UATA_MAGIC 0x00000004
-#define KAUAI_FCR_UATA_RESET_N 0x00000002
-#define KAUAI_FCR_UATA_ENABLE 0x00000001
-
-/* Rounded Multiword DMA timings
- *
- * I gave up finding a generic formula for all controller
- * types and instead, built tables based on timing values
- * used by Apple in Darwin's implementation.
- */
-struct mdma_timings_t {
- int accessTime;
- int recoveryTime;
- int cycleTime;
-};
-
-struct mdma_timings_t mdma_timings_33[] =
-{
- { 240, 240, 480 },
- { 180, 180, 360 },
- { 135, 135, 270 },
- { 120, 120, 240 },
- { 105, 105, 210 },
- { 90, 90, 180 },
- { 75, 75, 150 },
- { 75, 45, 120 },
- { 0, 0, 0 }
-};
-
-struct mdma_timings_t mdma_timings_33k[] =
-{
- { 240, 240, 480 },
- { 180, 180, 360 },
- { 150, 150, 300 },
- { 120, 120, 240 },
- { 90, 120, 210 },
- { 90, 90, 180 },
- { 90, 60, 150 },
- { 90, 30, 120 },
- { 0, 0, 0 }
-};
-
-struct mdma_timings_t mdma_timings_66[] =
-{
- { 240, 240, 480 },
- { 180, 180, 360 },
- { 135, 135, 270 },
- { 120, 120, 240 },
- { 105, 105, 210 },
- { 90, 90, 180 },
- { 90, 75, 165 },
- { 75, 45, 120 },
- { 0, 0, 0 }
-};
-
-/* KeyLargo ATA-4 Ultra DMA timings (rounded) */
-struct {
- int addrSetup; /* ??? */
- int rdy2pause;
- int wrDataSetup;
-} kl66_udma_timings[] =
-{
- { 0, 180, 120 }, /* Mode 0 */
- { 0, 150, 90 }, /* 1 */
- { 0, 120, 60 }, /* 2 */
- { 0, 90, 45 }, /* 3 */
- { 0, 90, 30 } /* 4 */
-};
-
-/* UniNorth 2 ATA/100 timings */
-struct kauai_timing {
- int cycle_time;
- u32 timing_reg;
-};
-
-static struct kauai_timing kauai_pio_timings[] =
-{
- { 930 , 0x08000fff },
- { 600 , 0x08000a92 },
- { 383 , 0x0800060f },
- { 360 , 0x08000492 },
- { 330 , 0x0800048f },
- { 300 , 0x080003cf },
- { 270 , 0x080003cc },
- { 240 , 0x0800038b },
- { 239 , 0x0800030c },
- { 180 , 0x05000249 },
- { 120 , 0x04000148 },
- { 0 , 0 },
-};
-
-static struct kauai_timing kauai_mdma_timings[] =
-{
- { 1260 , 0x00fff000 },
- { 480 , 0x00618000 },
- { 360 , 0x00492000 },
- { 270 , 0x0038e000 },
- { 240 , 0x0030c000 },
- { 210 , 0x002cb000 },
- { 180 , 0x00249000 },
- { 150 , 0x00209000 },
- { 120 , 0x00148000 },
- { 0 , 0 },
-};
-
-static struct kauai_timing kauai_udma_timings[] =
-{
- { 120 , 0x000070c0 },
- { 90 , 0x00005d80 },
- { 60 , 0x00004a60 },
- { 45 , 0x00003a50 },
- { 30 , 0x00002a30 },
- { 20 , 0x00002921 },
- { 0 , 0 },
-};
-
-static struct kauai_timing shasta_pio_timings[] =
-{
- { 930 , 0x08000fff },
- { 600 , 0x0A000c97 },
- { 383 , 0x07000712 },
- { 360 , 0x040003cd },
- { 330 , 0x040003cd },
- { 300 , 0x040003cd },
- { 270 , 0x040003cd },
- { 240 , 0x040003cd },
- { 239 , 0x040003cd },
- { 180 , 0x0400028b },
- { 120 , 0x0400010a },
- { 0 , 0 },
-};
-
-static struct kauai_timing shasta_mdma_timings[] =
-{
- { 1260 , 0x00fff000 },
- { 480 , 0x00820800 },
- { 360 , 0x00820800 },
- { 270 , 0x00820800 },
- { 240 , 0x00820800 },
- { 210 , 0x00820800 },
- { 180 , 0x00820800 },
- { 150 , 0x0028b000 },
- { 120 , 0x001ca000 },
- { 0 , 0 },
-};
-
-static struct kauai_timing shasta_udma133_timings[] =
-{
- { 120 , 0x00035901, },
- { 90 , 0x000348b1, },
- { 60 , 0x00033881, },
- { 45 , 0x00033861, },
- { 30 , 0x00033841, },
- { 20 , 0x00033031, },
- { 15 , 0x00033021, },
- { 0 , 0 },
-};
-
-
-static inline u32
-kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
-{
- int i;
-
- for (i=0; table[i].cycle_time; i++)
- if (cycle_time > table[i+1].cycle_time)
- return table[i].timing_reg;
- BUG();
- return 0;
-}
-
-/* allow up to 256 DBDMA commands per xfer */
-#define MAX_DCMDS 256
-
-/*
- * Wait 1s for disk to answer on IDE bus after a hard reset
- * of the device (via GPIO/FCR).
- *
- * Some devices seem to "pollute" the bus even after dropping
- * the BSY bit (typically some combo drives slave on the UDMA
- * bus) after a hard reset. Since we hard reset all drives on
- * KeyLargo ATA66, we have to keep that delay around. I may end
- * up not hard resetting anymore on these and keep the delay only
- * for older interfaces instead (we have to reset when coming
- * from MacOS...) --BenH.
- */
-#define IDE_WAKEUP_DELAY (1*HZ)
-
-static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
-
-#define PMAC_IDE_REG(x) \
- ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
-
-/*
- * Apply the timings of the proper unit (master/slave) to the shared
- * timing register when selecting that unit. This version is for
- * ASICs with a single timing register
- */
-static void pmac_ide_apply_timings(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
-
- if (drive->dn & 1)
- writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
- else
- writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG));
- (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
-}
-
-/*
- * Apply the timings of the proper unit (master/slave) to the shared
- * timing register when selecting that unit. This version is for
- * ASICs with a dual timing register (Kauai)
- */
-static void pmac_ide_kauai_apply_timings(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
-
- if (drive->dn & 1) {
- writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
- writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
- } else {
- writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
- writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
- }
- (void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
-}
-
-/*
- * Force an update of controller timing values for a given drive
- */
-static void
-pmac_ide_do_update_timings(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
-
- if (pmif->kind == controller_sh_ata6 ||
- pmif->kind == controller_un_ata6 ||
- pmif->kind == controller_k2_ata6)
- pmac_ide_kauai_apply_timings(drive);
- else
- pmac_ide_apply_timings(drive);
-}
-
-static void pmac_dev_select(ide_drive_t *drive)
-{
- pmac_ide_apply_timings(drive);
-
- writeb(drive->select | ATA_DEVICE_OBS,
- (void __iomem *)drive->hwif->io_ports.device_addr);
-}
-
-static void pmac_kauai_dev_select(ide_drive_t *drive)
-{
- pmac_ide_kauai_apply_timings(drive);
-
- writeb(drive->select | ATA_DEVICE_OBS,
- (void __iomem *)drive->hwif->io_ports.device_addr);
-}
-
-static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
-{
- writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
- (void)readl((void __iomem *)(hwif->io_ports.data_addr
- + IDE_TIMING_CONFIG));
-}
-
-static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
-{
- writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
- (void)readl((void __iomem *)(hwif->io_ports.data_addr
- + IDE_TIMING_CONFIG));
-}
-
-/*
- * Old tuning functions (called on hdparm -p), sets up drive PIO timings
- */
-static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
- u32 *timings, t;
- unsigned accessTicks, recTicks;
- unsigned accessTime, recTime;
- unsigned int cycle_time;
-
- /* which drive is it ? */
- timings = &pmif->timings[drive->dn & 1];
- t = *timings;
-
- cycle_time = ide_pio_cycle_time(drive, pio);
-
- switch (pmif->kind) {
- case controller_sh_ata6: {
- /* 133Mhz cell */
- u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time);
- t = (t & ~TR_133_PIOREG_PIO_MASK) | tr;
- break;
- }
- case controller_un_ata6:
- case controller_k2_ata6: {
- /* 100Mhz cell */
- u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time);
- t = (t & ~TR_100_PIOREG_PIO_MASK) | tr;
- break;
- }
- case controller_kl_ata4:
- /* 66Mhz cell */
- recTime = cycle_time - tim->active - tim->setup;
- recTime = max(recTime, 150U);
- accessTime = tim->active;
- accessTime = max(accessTime, 150U);
- accessTicks = SYSCLK_TICKS_66(accessTime);
- accessTicks = min(accessTicks, 0x1fU);
- recTicks = SYSCLK_TICKS_66(recTime);
- recTicks = min(recTicks, 0x1fU);
- t = (t & ~TR_66_PIO_MASK) |
- (accessTicks << TR_66_PIO_ACCESS_SHIFT) |
- (recTicks << TR_66_PIO_RECOVERY_SHIFT);
- break;
- default: {
- /* 33Mhz cell */
- int ebit = 0;
- recTime = cycle_time - tim->active - tim->setup;
- recTime = max(recTime, 150U);
- accessTime = tim->active;
- accessTime = max(accessTime, 150U);
- accessTicks = SYSCLK_TICKS(accessTime);
- accessTicks = min(accessTicks, 0x1fU);
- accessTicks = max(accessTicks, 4U);
- recTicks = SYSCLK_TICKS(recTime);
- recTicks = min(recTicks, 0x1fU);
- recTicks = max(recTicks, 5U) - 4;
- if (recTicks > 9) {
- recTicks--; /* guess, but it's only for PIO0, so... */
- ebit = 1;
- }
- t = (t & ~TR_33_PIO_MASK) |
- (accessTicks << TR_33_PIO_ACCESS_SHIFT) |
- (recTicks << TR_33_PIO_RECOVERY_SHIFT);
- if (ebit)
- t |= TR_33_PIO_E;
- break;
- }
- }
-
-#ifdef IDE_PMAC_DEBUG
- printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n",
- drive->name, pio, *timings);
-#endif
-
- *timings = t;
- pmac_ide_do_update_timings(drive);
-}
-
-/*
- * Calculate KeyLargo ATA/66 UDMA timings
- */
-static int
-set_timings_udma_ata4(u32 *timings, u8 speed)
-{
- unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
-
- if (speed > XFER_UDMA_4)
- return 1;
-
- rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause);
- wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup);
- addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup);
-
- *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) |
- (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) |
- (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) |
- (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) |
- TR_66_UDMA_EN;
-#ifdef IDE_PMAC_DEBUG
- printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n",
- speed & 0xf, *timings);
-#endif
-
- return 0;
-}
-
-/*
- * Calculate Kauai ATA/100 UDMA timings
- */
-static int
-set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
-{
- struct ide_timing *t = ide_timing_find_mode(speed);
- u32 tr;
-
- if (speed > XFER_UDMA_5 || t == NULL)
- return 1;
- tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
- *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
- *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
-
- return 0;
-}
-
-/*
- * Calculate Shasta ATA/133 UDMA timings
- */
-static int
-set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
-{
- struct ide_timing *t = ide_timing_find_mode(speed);
- u32 tr;
-
- if (speed > XFER_UDMA_6 || t == NULL)
- return 1;
- tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma);
- *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr;
- *ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN;
-
- return 0;
-}
-
-/*
- * Calculate MDMA timings for all cells
- */
-static void
-set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
- u8 speed)
-{
- u16 *id = drive->id;
- int cycleTime, accessTime = 0, recTime = 0;
- unsigned accessTicks, recTicks;
- struct mdma_timings_t* tm = NULL;
- int i;
-
- /* Get default cycle time for mode */
- switch(speed & 0xf) {
- case 0: cycleTime = 480; break;
- case 1: cycleTime = 150; break;
- case 2: cycleTime = 120; break;
- default:
- BUG();
- break;
- }
-
- /* Check if drive provides explicit DMA cycle time */
- if ((id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_DMA_TIME])
- cycleTime = max_t(int, id[ATA_ID_EIDE_DMA_TIME], cycleTime);
-
- /* OHare limits according to some old Apple sources */
- if ((intf_type == controller_ohare) && (cycleTime < 150))
- cycleTime = 150;
- /* Get the proper timing array for this controller */
- switch(intf_type) {
- case controller_sh_ata6:
- case controller_un_ata6:
- case controller_k2_ata6:
- break;
- case controller_kl_ata4:
- tm = mdma_timings_66;
- break;
- case controller_kl_ata3:
- tm = mdma_timings_33k;
- break;
- default:
- tm = mdma_timings_33;
- break;
- }
- if (tm != NULL) {
- /* Lookup matching access & recovery times */
- i = -1;
- for (;;) {
- if (tm[i+1].cycleTime < cycleTime)
- break;
- i++;
- }
- cycleTime = tm[i].cycleTime;
- accessTime = tm[i].accessTime;
- recTime = tm[i].recoveryTime;
-
-#ifdef IDE_PMAC_DEBUG
- printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n",
- drive->name, cycleTime, accessTime, recTime);
-#endif
- }
- switch(intf_type) {
- case controller_sh_ata6: {
- /* 133Mhz cell */
- u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime);
- *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr;
- *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN;
- }
- break;
- case controller_un_ata6:
- case controller_k2_ata6: {
- /* 100Mhz cell */
- u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
- *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
- *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
- }
- break;
- case controller_kl_ata4:
- /* 66Mhz cell */
- accessTicks = SYSCLK_TICKS_66(accessTime);
- accessTicks = min(accessTicks, 0x1fU);
- accessTicks = max(accessTicks, 0x1U);
- recTicks = SYSCLK_TICKS_66(recTime);
- recTicks = min(recTicks, 0x1fU);
- recTicks = max(recTicks, 0x3U);
- /* Clear out mdma bits and disable udma */
- *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) |
- (accessTicks << TR_66_MDMA_ACCESS_SHIFT) |
- (recTicks << TR_66_MDMA_RECOVERY_SHIFT);
- break;
- case controller_kl_ata3:
- /* 33Mhz cell on KeyLargo */
- accessTicks = SYSCLK_TICKS(accessTime);
- accessTicks = max(accessTicks, 1U);
- accessTicks = min(accessTicks, 0x1fU);
- accessTime = accessTicks * IDE_SYSCLK_NS;
- recTicks = SYSCLK_TICKS(recTime);
- recTicks = max(recTicks, 1U);
- recTicks = min(recTicks, 0x1fU);
- *timings = ((*timings) & ~TR_33_MDMA_MASK) |
- (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
- (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
- break;
- default: {
- /* 33Mhz cell on others */
- int halfTick = 0;
- int origAccessTime = accessTime;
- int origRecTime = recTime;
-
- accessTicks = SYSCLK_TICKS(accessTime);
- accessTicks = max(accessTicks, 1U);
- accessTicks = min(accessTicks, 0x1fU);
- accessTime = accessTicks * IDE_SYSCLK_NS;
- recTicks = SYSCLK_TICKS(recTime);
- recTicks = max(recTicks, 2U) - 1;
- recTicks = min(recTicks, 0x1fU);
- recTime = (recTicks + 1) * IDE_SYSCLK_NS;
- if ((accessTicks > 1) &&
- ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) &&
- ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) {
- halfTick = 1;
- accessTicks--;
- }
- *timings = ((*timings) & ~TR_33_MDMA_MASK) |
- (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
- (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
- if (halfTick)
- *timings |= TR_33_MDMA_HALFTICK;
- }
- }
-#ifdef IDE_PMAC_DEBUG
- printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n",
- drive->name, speed & 0xf, *timings);
-#endif
-}
-
-static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- int ret = 0;
- u32 *timings, *timings2, tl[2];
- u8 unit = drive->dn & 1;
- const u8 speed = drive->dma_mode;
-
- timings = &pmif->timings[unit];
- timings2 = &pmif->timings[unit+2];
-
- /* Copy timings to local image */
- tl[0] = *timings;
- tl[1] = *timings2;
-
- if (speed >= XFER_UDMA_0) {
- if (pmif->kind == controller_kl_ata4)
- ret = set_timings_udma_ata4(&tl[0], speed);
- else if (pmif->kind == controller_un_ata6
- || pmif->kind == controller_k2_ata6)
- ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
- else if (pmif->kind == controller_sh_ata6)
- ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
- else
- ret = -1;
- } else
- set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
-
- if (ret)
- return;
-
- /* Apply timings to controller */
- *timings = tl[0];
- *timings2 = tl[1];
-
- pmac_ide_do_update_timings(drive);
-}
-
-/*
- * Blast some well known "safe" values to the timing registers at init or
- * wakeup from sleep time, before we do real calculation
- */
-static void
-sanitize_timings(pmac_ide_hwif_t *pmif)
-{
- unsigned int value, value2 = 0;
-
- switch(pmif->kind) {
- case controller_sh_ata6:
- value = 0x0a820c97;
- value2 = 0x00033031;
- break;
- case controller_un_ata6:
- case controller_k2_ata6:
- value = 0x08618a92;
- value2 = 0x00002921;
- break;
- case controller_kl_ata4:
- value = 0x0008438c;
- break;
- case controller_kl_ata3:
- value = 0x00084526;
- break;
- case controller_heathrow:
- case controller_ohare:
- default:
- value = 0x00074526;
- break;
- }
- pmif->timings[0] = pmif->timings[1] = value;
- pmif->timings[2] = pmif->timings[3] = value2;
-}
-
-static int on_media_bay(pmac_ide_hwif_t *pmif)
-{
- return pmif->mdev && pmif->mdev->media_bay != NULL;
-}
-
-/* Suspend call back, should be called after the child devices
- * have actually been suspended
- */
-static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
-{
- /* We clear the timings */
- pmif->timings[0] = 0;
- pmif->timings[1] = 0;
-
- disable_irq(pmif->irq);
-
- /* The media bay will handle itself just fine */
- if (on_media_bay(pmif))
- return 0;
-
- /* Kauai has bus control FCRs directly here */
- if (pmif->kauai_fcr) {
- u32 fcr = readl(pmif->kauai_fcr);
- fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
- writel(fcr, pmif->kauai_fcr);
- }
-
- /* Disable the bus on older machines and the cell on kauai */
- ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id,
- 0);
-
- return 0;
-}
-
-/* Resume call back, should be called before the child devices
- * are resumed
- */
-static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
-{
- /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
- if (!on_media_bay(pmif)) {
- ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
- ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
- msleep(10);
- ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
-
- /* Kauai has it different */
- if (pmif->kauai_fcr) {
- u32 fcr = readl(pmif->kauai_fcr);
- fcr |= KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE;
- writel(fcr, pmif->kauai_fcr);
- }
-
- msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
- }
-
- /* Sanitize drive timings */
- sanitize_timings(pmif);
-
- enable_irq(pmif->irq);
-
- return 0;
-}
-
-static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- struct device_node *np = pmif->node;
- const char *cable = of_get_property(np, "cable-type", NULL);
- struct device_node *root = of_find_node_by_path("/");
- const char *model = of_get_property(root, "model", NULL);
-
- of_node_put(root);
- /* Get cable type from device-tree. */
- if (cable && !strncmp(cable, "80-", 3)) {
- /* Some drives fail to detect 80c cable in PowerBook */
- /* These machine use proprietary short IDE cable anyway */
- if (!strncmp(model, "PowerBook", 9))
- return ATA_CBL_PATA40_SHORT;
- else
- return ATA_CBL_PATA80;
- }
-
- /*
- * G5's seem to have incorrect cable type in device-tree.
- * Let's assume they have a 80 conductor cable, this seem
- * to be always the case unless the user mucked around.
- */
- if (of_device_is_compatible(np, "K2-UATA") ||
- of_device_is_compatible(np, "shasta-ata"))
- return ATA_CBL_PATA80;
-
- return ATA_CBL_PATA40;
-}
-
-static void pmac_ide_init_dev(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
-
- if (on_media_bay(pmif)) {
- if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
- drive->dev_flags &= ~IDE_DFLAG_NOPROBE;
- return;
- }
- drive->dev_flags |= IDE_DFLAG_NOPROBE;
- }
-}
-
-static const struct ide_tp_ops pmac_tp_ops = {
- .exec_command = pmac_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = pmac_write_devctl,
-
- .dev_select = pmac_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_tp_ops pmac_ata6_tp_ops = {
- .exec_command = pmac_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = pmac_write_devctl,
-
- .dev_select = pmac_kauai_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_port_ops pmac_ide_ata4_port_ops = {
- .init_dev = pmac_ide_init_dev,
- .set_pio_mode = pmac_ide_set_pio_mode,
- .set_dma_mode = pmac_ide_set_dma_mode,
- .cable_detect = pmac_ide_cable_detect,
-};
-
-static const struct ide_port_ops pmac_ide_port_ops = {
- .init_dev = pmac_ide_init_dev,
- .set_pio_mode = pmac_ide_set_pio_mode,
- .set_dma_mode = pmac_ide_set_dma_mode,
-};
-
-static const struct ide_dma_ops pmac_dma_ops;
-
-static const struct ide_port_info pmac_port_info = {
- .name = DRV_NAME,
- .init_dma = pmac_ide_init_dma,
- .chipset = ide_pmac,
- .tp_ops = &pmac_tp_ops,
- .port_ops = &pmac_ide_port_ops,
- .dma_ops = &pmac_dma_ops,
- .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
- IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_MMIO |
- IDE_HFLAG_UNMASK_IRQS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-/*
- * Setup, register & probe an IDE channel driven by this driver, this is
- * called by one of the 2 probe functions (macio or PCI).
- */
-static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
-{
- struct device_node *np = pmif->node;
- const int *bidp;
- struct ide_host *host;
- struct ide_hw *hws[] = { hw };
- struct ide_port_info d = pmac_port_info;
- int rc;
-
- pmif->broken_dma = pmif->broken_dma_warn = 0;
- if (of_device_is_compatible(np, "shasta-ata")) {
- pmif->kind = controller_sh_ata6;
- d.tp_ops = &pmac_ata6_tp_ops;
- d.port_ops = &pmac_ide_ata4_port_ops;
- d.udma_mask = ATA_UDMA6;
- } else if (of_device_is_compatible(np, "kauai-ata")) {
- pmif->kind = controller_un_ata6;
- d.tp_ops = &pmac_ata6_tp_ops;
- d.port_ops = &pmac_ide_ata4_port_ops;
- d.udma_mask = ATA_UDMA5;
- } else if (of_device_is_compatible(np, "K2-UATA")) {
- pmif->kind = controller_k2_ata6;
- d.tp_ops = &pmac_ata6_tp_ops;
- d.port_ops = &pmac_ide_ata4_port_ops;
- d.udma_mask = ATA_UDMA5;
- } else if (of_device_is_compatible(np, "keylargo-ata")) {
- if (of_node_name_eq(np, "ata-4")) {
- pmif->kind = controller_kl_ata4;
- d.port_ops = &pmac_ide_ata4_port_ops;
- d.udma_mask = ATA_UDMA4;
- } else
- pmif->kind = controller_kl_ata3;
- } else if (of_device_is_compatible(np, "heathrow-ata")) {
- pmif->kind = controller_heathrow;
- } else {
- pmif->kind = controller_ohare;
- pmif->broken_dma = 1;
- }
-
- bidp = of_get_property(np, "AAPL,bus-id", NULL);
- pmif->aapl_bus_id = bidp ? *bidp : 0;
-
- /* On Kauai-type controllers, we make sure the FCR is correct */
- if (pmif->kauai_fcr)
- writel(KAUAI_FCR_UATA_MAGIC |
- KAUAI_FCR_UATA_RESET_N |
- KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
-
- /* Make sure we have sane timings */
- sanitize_timings(pmif);
-
- /* If we are on a media bay, wait for it to settle and lock it */
- if (pmif->mdev)
- lock_media_bay(pmif->mdev->media_bay);
-
- host = ide_host_alloc(&d, hws, 1);
- if (host == NULL) {
- rc = -ENOMEM;
- goto bail;
- }
- pmif->hwif = host->ports[0];
-
- if (on_media_bay(pmif)) {
- /* Fixup bus ID for media bay */
- if (!bidp)
- pmif->aapl_bus_id = 1;
- } else if (pmif->kind == controller_ohare) {
- /* The code below is having trouble on some ohare machines
- * (timing related ?). Until I can put my hand on one of these
- * units, I keep the old way
- */
- ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
- } else {
- /* This is necessary to enable IDE when net-booting */
- ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
- ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
- msleep(10);
- ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
- msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
- }
-
- printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
- "bus ID %d%s, irq %d\n", model_name[pmif->kind],
- pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
- on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);
-
- rc = ide_host_register(host, &d, hws);
- if (rc)
- pmif->hwif = NULL;
-
- if (pmif->mdev)
- unlock_media_bay(pmif->mdev->media_bay);
-
- bail:
- if (rc && host)
- ide_host_free(host);
- return rc;
-}
-
-static void pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
-{
- int i;
-
- for (i = 0; i < 8; ++i)
- hw->io_ports_array[i] = base + i * 0x10;
-
- hw->io_ports.ctl_addr = base + 0x160;
-}
-
-/*
- * Attach to a macio probed interface
- */
-static int pmac_ide_macio_attach(struct macio_dev *mdev,
- const struct of_device_id *match)
-{
- void __iomem *base;
- unsigned long regbase;
- pmac_ide_hwif_t *pmif;
- int irq, rc;
- struct ide_hw hw;
-
- pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
- if (pmif == NULL)
- return -ENOMEM;
-
- if (macio_resource_count(mdev) == 0) {
- printk(KERN_WARNING "ide-pmac: no address for %pOF\n",
- mdev->ofdev.dev.of_node);
- rc = -ENXIO;
- goto out_free_pmif;
- }
-
- /* Request memory resource for IO ports */
- if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
- printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
- "%pOF!\n", mdev->ofdev.dev.of_node);
- rc = -EBUSY;
- goto out_free_pmif;
- }
-
- /* XXX This is bogus. Should be fixed in the registry by checking
- * the kind of host interrupt controller, a bit like gatwick
- * fixes in irq.c. That works well enough for the single case
- * where that happens though...
- */
- if (macio_irq_count(mdev) == 0) {
- printk(KERN_WARNING "ide-pmac: no intrs for device %pOF, using "
- "13\n", mdev->ofdev.dev.of_node);
- irq = irq_create_mapping(NULL, 13);
- } else
- irq = macio_irq(mdev, 0);
-
- base = ioremap(macio_resource_start(mdev, 0), 0x400);
- regbase = (unsigned long) base;
-
- pmif->mdev = mdev;
- pmif->node = mdev->ofdev.dev.of_node;
- pmif->regbase = regbase;
- pmif->irq = irq;
- pmif->kauai_fcr = NULL;
-
- if (macio_resource_count(mdev) >= 2) {
- if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
- printk(KERN_WARNING "ide-pmac: can't request DMA "
- "resource for %pOF!\n",
- mdev->ofdev.dev.of_node);
- else
- pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
- } else
- pmif->dma_regs = NULL;
-
- dev_set_drvdata(&mdev->ofdev.dev, pmif);
-
- memset(&hw, 0, sizeof(hw));
- pmac_ide_init_ports(&hw, pmif->regbase);
- hw.irq = irq;
- hw.dev = &mdev->bus->pdev->dev;
- hw.parent = &mdev->ofdev.dev;
-
- rc = pmac_ide_setup_device(pmif, &hw);
- if (rc != 0) {
- /* The inteface is released to the common IDE layer */
- dev_set_drvdata(&mdev->ofdev.dev, NULL);
- iounmap(base);
- if (pmif->dma_regs) {
- iounmap(pmif->dma_regs);
- macio_release_resource(mdev, 1);
- }
- macio_release_resource(mdev, 0);
- kfree(pmif);
- }
-
- return rc;
-
-out_free_pmif:
- kfree(pmif);
- return rc;
-}
-
-static int
-pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
- int rc = 0;
-
- if (mesg.event != mdev->ofdev.dev.power.power_state.event
- && (mesg.event & PM_EVENT_SLEEP)) {
- rc = pmac_ide_do_suspend(pmif);
- if (rc == 0)
- mdev->ofdev.dev.power.power_state = mesg;
- }
-
- return rc;
-}
-
-static int
-pmac_ide_macio_resume(struct macio_dev *mdev)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
- int rc = 0;
-
- if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
- rc = pmac_ide_do_resume(pmif);
- if (rc == 0)
- mdev->ofdev.dev.power.power_state = PMSG_ON;
- }
-
- return rc;
-}
-
-/*
- * Attach to a PCI probed interface
- */
-static int pmac_ide_pci_attach(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct device_node *np;
- pmac_ide_hwif_t *pmif;
- void __iomem *base;
- unsigned long rbase, rlen;
- int rc;
- struct ide_hw hw;
-
- np = pci_device_to_OF_node(pdev);
- if (np == NULL) {
- printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
- return -ENODEV;
- }
-
- pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
- if (pmif == NULL)
- return -ENOMEM;
-
- if (pci_enable_device(pdev)) {
- printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
- "%pOF\n", np);
- rc = -ENXIO;
- goto out_free_pmif;
- }
- pci_set_master(pdev);
-
- if (pci_request_regions(pdev, "Kauai ATA")) {
- printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
- "%pOF\n", np);
- rc = -ENXIO;
- goto out_free_pmif;
- }
-
- pmif->mdev = NULL;
- pmif->node = np;
-
- rbase = pci_resource_start(pdev, 0);
- rlen = pci_resource_len(pdev, 0);
-
- base = ioremap(rbase, rlen);
- pmif->regbase = (unsigned long) base + 0x2000;
- pmif->dma_regs = base + 0x1000;
- pmif->kauai_fcr = base;
- pmif->irq = pdev->irq;
-
- pci_set_drvdata(pdev, pmif);
-
- memset(&hw, 0, sizeof(hw));
- pmac_ide_init_ports(&hw, pmif->regbase);
- hw.irq = pdev->irq;
- hw.dev = &pdev->dev;
-
- rc = pmac_ide_setup_device(pmif, &hw);
- if (rc != 0) {
- /* The inteface is released to the common IDE layer */
- iounmap(base);
- pci_release_regions(pdev);
- kfree(pmif);
- }
-
- return rc;
-
-out_free_pmif:
- kfree(pmif);
- return rc;
-}
-
-static int
-pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
-{
- pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
- int rc = 0;
-
- if (mesg.event != pdev->dev.power.power_state.event
- && (mesg.event & PM_EVENT_SLEEP)) {
- rc = pmac_ide_do_suspend(pmif);
- if (rc == 0)
- pdev->dev.power.power_state = mesg;
- }
-
- return rc;
-}
-
-static int
-pmac_ide_pci_resume(struct pci_dev *pdev)
-{
- pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
- int rc = 0;
-
- if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
- rc = pmac_ide_do_resume(pmif);
- if (rc == 0)
- pdev->dev.power.power_state = PMSG_ON;
- }
-
- return rc;
-}
-
-#ifdef CONFIG_PMAC_MEDIABAY
-static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
-
- switch(mb_state) {
- case MB_CD:
- if (!pmif->hwif->present)
- ide_port_scan(pmif->hwif);
- break;
- default:
- if (pmif->hwif->present)
- ide_port_unregister_devices(pmif->hwif);
- }
-}
-#endif /* CONFIG_PMAC_MEDIABAY */
-
-
-static struct of_device_id pmac_ide_macio_match[] =
-{
- {
- .name = "IDE",
- },
- {
- .name = "ATA",
- },
- {
- .type = "ide",
- },
- {
- .type = "ata",
- },
- {},
-};
-
-static struct macio_driver pmac_ide_macio_driver =
-{
- .driver = {
- .name = "ide-pmac",
- .owner = THIS_MODULE,
- .of_match_table = pmac_ide_macio_match,
- },
- .probe = pmac_ide_macio_attach,
- .suspend = pmac_ide_macio_suspend,
- .resume = pmac_ide_macio_resume,
-#ifdef CONFIG_PMAC_MEDIABAY
- .mediabay_event = pmac_ide_macio_mb_event,
-#endif
-};
-
-static const struct pci_device_id pmac_ide_pci_match[] = {
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
- {},
-};
-
-static struct pci_driver pmac_ide_pci_driver = {
- .name = "ide-pmac",
- .id_table = pmac_ide_pci_match,
- .probe = pmac_ide_pci_attach,
- .suspend = pmac_ide_pci_suspend,
- .resume = pmac_ide_pci_resume,
-};
-MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match);
-
-int __init pmac_ide_probe(void)
-{
- int error;
-
- if (!machine_is(powermac))
- return -ENODEV;
-
-#ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
- error = pci_register_driver(&pmac_ide_pci_driver);
- if (error)
- goto out;
- error = macio_register_driver(&pmac_ide_macio_driver);
- if (error) {
- pci_unregister_driver(&pmac_ide_pci_driver);
- goto out;
- }
-#else
- error = macio_register_driver(&pmac_ide_macio_driver);
- if (error)
- goto out;
- error = pci_register_driver(&pmac_ide_pci_driver);
- if (error) {
- macio_unregister_driver(&pmac_ide_macio_driver);
- goto out;
- }
-#endif
-out:
- return error;
-}
-
-/*
- * pmac_ide_build_dmatable builds the DBDMA command list
- * for a transfer and sets the DBDMA channel to point to it.
- */
-static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- struct dbdma_cmd *table;
- volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
- struct scatterlist *sg;
- int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
- int i = cmd->sg_nents, count = 0;
-
- /* DMA table is already aligned */
- table = (struct dbdma_cmd *) pmif->dma_table_cpu;
-
- /* Make sure DMA controller is stopped (necessary ?) */
- writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
- while (readl(&dma->status) & RUN)
- udelay(1);
-
- /* Build DBDMA commands list */
- sg = hwif->sg_table;
- while (i && sg_dma_len(sg)) {
- u32 cur_addr;
- u32 cur_len;
-
- cur_addr = sg_dma_address(sg);
- cur_len = sg_dma_len(sg);
-
- if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {
- if (pmif->broken_dma_warn == 0) {
- printk(KERN_WARNING "%s: DMA on non aligned address, "
- "switching to PIO on Ohare chipset\n", drive->name);
- pmif->broken_dma_warn = 1;
- }
- return 0;
- }
- while (cur_len) {
- unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
-
- if (count++ >= MAX_DCMDS) {
- printk(KERN_WARNING "%s: DMA table too small\n",
- drive->name);
- return 0;
- }
- table->command = cpu_to_le16(wr? OUTPUT_MORE: INPUT_MORE);
- table->req_count = cpu_to_le16(tc);
- table->phy_addr = cpu_to_le32(cur_addr);
- table->cmd_dep = 0;
- table->xfer_status = 0;
- table->res_count = 0;
- cur_addr += tc;
- cur_len -= tc;
- ++table;
- }
- sg = sg_next(sg);
- i--;
- }
-
- /* convert the last command to an input/output last command */
- if (count) {
- table[-1].command = cpu_to_le16(wr? OUTPUT_LAST: INPUT_LAST);
- /* add the stop command to the end of the list */
- memset(table, 0, sizeof(struct dbdma_cmd));
- table->command = cpu_to_le16(DBDMA_STOP);
- mb();
- writel(hwif->dmatable_dma, &dma->cmdptr);
- return 1;
- }
-
- printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
-
- return 0; /* revert to PIO for this request */
-}
-
-/*
- * Prepare a DMA transfer. We build the DMA table, adjust the timings for
- * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
- */
-static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4);
- u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
-
- if (pmac_ide_build_dmatable(drive, cmd) == 0)
- return 1;
-
- /* Apple adds 60ns to wrDataSetup on reads */
- if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
- writel(pmif->timings[unit] + (write ? 0 : 0x00800000UL),
- PMAC_IDE_REG(IDE_TIMING_CONFIG));
- (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
- }
-
- return 0;
-}
-
-/*
- * Kick the DMA controller into life after the DMA command has been issued
- * to the drive.
- */
-static void
-pmac_ide_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- volatile struct dbdma_regs __iomem *dma;
-
- dma = pmif->dma_regs;
-
- writel((RUN << 16) | RUN, &dma->control);
- /* Make sure it gets to the controller right now */
- (void)readl(&dma->control);
-}
-
-/*
- * After a DMA transfer, make sure the controller is stopped
- */
-static int
-pmac_ide_dma_end (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
- u32 dstat;
-
- dstat = readl(&dma->status);
- writel(((RUN|WAKE|DEAD) << 16), &dma->control);
-
- /* verify good dma status. we don't check for ACTIVE beeing 0. We should...
- * in theory, but with ATAPI decices doing buffer underruns, that would
- * cause us to disable DMA, which isn't what we want
- */
- return (dstat & (RUN|DEAD)) != RUN;
-}
-
-/*
- * Check out that the interrupt we got was for us. We can't always know this
- * for sure with those Apple interfaces (well, we could on the recent ones but
- * that's not implemented yet), on the other hand, we don't have shared interrupts
- * so it's not really a problem
- */
-static int
-pmac_ide_dma_test_irq (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
- unsigned long status, timeout;
-
- /* We have to things to deal with here:
- *
- * - The dbdma won't stop if the command was started
- * but completed with an error without transferring all
- * datas. This happens when bad blocks are met during
- * a multi-block transfer.
- *
- * - The dbdma fifo hasn't yet finished flushing to
- * to system memory when the disk interrupt occurs.
- *
- */
-
- /* If ACTIVE is cleared, the STOP command have passed and
- * transfer is complete.
- */
- status = readl(&dma->status);
- if (!(status & ACTIVE))
- return 1;
-
- /* If dbdma didn't execute the STOP command yet, the
- * active bit is still set. We consider that we aren't
- * sharing interrupts (which is hopefully the case with
- * those controllers) and so we just try to flush the
- * channel for pending data in the fifo
- */
- udelay(1);
- writel((FLUSH << 16) | FLUSH, &dma->control);
- timeout = 0;
- for (;;) {
- udelay(1);
- status = readl(&dma->status);
- if ((status & FLUSH) == 0)
- break;
- if (++timeout > 100) {
- printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n",
- hwif->index);
- break;
- }
- }
- return 1;
-}
-
-static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static void
-pmac_ide_dma_lost_irq (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
- unsigned long status = readl(&dma->status);
-
- printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
-}
-
-static const struct ide_dma_ops pmac_dma_ops = {
- .dma_host_set = pmac_ide_dma_host_set,
- .dma_setup = pmac_ide_dma_setup,
- .dma_start = pmac_ide_dma_start,
- .dma_end = pmac_ide_dma_end,
- .dma_test_irq = pmac_ide_dma_test_irq,
- .dma_lost_irq = pmac_ide_dma_lost_irq,
-};
-
-/*
- * Allocate the data structures needed for using DMA with an interface
- * and fill the proper list of functions pointers
- */
-static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- /* We won't need pci_dev if we switch to generic consistent
- * DMA routines ...
- */
- if (dev == NULL || pmif->dma_regs == 0)
- return -ENODEV;
- /*
- * Allocate space for the DBDMA commands.
- * The +2 is +1 for the stop command and +1 to allow for
- * aligning the start address to a multiple of 16 bytes.
- */
- pmif->dma_table_cpu = dma_alloc_coherent(&dev->dev,
- (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
- &hwif->dmatable_dma, GFP_KERNEL);
- if (pmif->dma_table_cpu == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA command list\n",
- hwif->name);
- return -ENOMEM;
- }
-
- hwif->sg_max_nents = MAX_DCMDS;
-
- return 0;
-}
-
-module_init(pmac_ide_probe);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c
deleted file mode 100644
index ecd0a69245f6..000000000000
--- a/drivers/ide/q40ide.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Q40 I/O port IDE Driver
- *
- * (c) Richard Zidlicky
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- *
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-
-#include <asm/ide.h>
-
- /*
- * Bases of the IDE interfaces
- */
-
-#define Q40IDE_NUM_HWIFS 2
-
-#define PCIDE_BASE1 0x1f0
-#define PCIDE_BASE2 0x170
-#define PCIDE_BASE3 0x1e8
-#define PCIDE_BASE4 0x168
-#define PCIDE_BASE5 0x1e0
-#define PCIDE_BASE6 0x160
-
-static const unsigned long pcide_bases[Q40IDE_NUM_HWIFS] = {
- PCIDE_BASE1, PCIDE_BASE2, /* PCIDE_BASE3, PCIDE_BASE4 , PCIDE_BASE5,
- PCIDE_BASE6 */
-};
-
-static int q40ide_default_irq(unsigned long base)
-{
- switch (base) {
- case 0x1f0: return 14;
- case 0x170: return 15;
- case 0x1e8: return 11;
- default:
- return 0;
- }
-}
-
-
-/*
- * Addresses are pretranslated for Q40 ISA access.
- */
-static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base, int irq)
-{
- memset(hw, 0, sizeof(*hw));
- /* BIG FAT WARNING:
- assumption: only DATA port is ever used in 16 bit mode */
- hw->io_ports.data_addr = Q40_ISA_IO_W(base);
- hw->io_ports.error_addr = Q40_ISA_IO_B(base + 1);
- hw->io_ports.nsect_addr = Q40_ISA_IO_B(base + 2);
- hw->io_ports.lbal_addr = Q40_ISA_IO_B(base + 3);
- hw->io_ports.lbam_addr = Q40_ISA_IO_B(base + 4);
- hw->io_ports.lbah_addr = Q40_ISA_IO_B(base + 5);
- hw->io_ports.device_addr = Q40_ISA_IO_B(base + 6);
- hw->io_ports.status_addr = Q40_ISA_IO_B(base + 7);
- hw->io_ports.ctl_addr = Q40_ISA_IO_B(base + 0x206);
-
- hw->irq = irq;
-}
-
-static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
- __ide_mm_insw(data_addr, buf, (len + 1) / 2);
- return;
- }
-
- raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
-}
-
-static void q40ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
- __ide_mm_outsw(data_addr, buf, (len + 1) / 2);
- return;
- }
-
- raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
-}
-
-/* Q40 has a byte-swapped IDE interface */
-static const struct ide_tp_ops q40ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = q40ide_input_data,
- .output_data = q40ide_output_data,
-};
-
-static const struct ide_port_info q40ide_port_info = {
- .tp_ops = &q40ide_tp_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
-/*
- * the static array is needed to have the name reported in /proc/ioports,
- * hwif->name unfortunately isn't available yet
- */
-static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
- "ide0", "ide1"
-};
-
-/*
- * Probe for Q40 IDE interfaces
- */
-
-static int __init q40ide_init(void)
-{
- int i;
- struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL };
-
- if (!MACH_IS_Q40)
- return -ENODEV;
-
- printk(KERN_INFO "ide: Q40 IDE controller\n");
-
- for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
- const char *name = q40_ide_names[i];
-
- if (!request_region(pcide_bases[i], 8, name)) {
- printk("could not reserve ports %lx-%lx for %s\n",
- pcide_bases[i],pcide_bases[i]+8,name);
- continue;
- }
- if (!request_region(pcide_bases[i]+0x206, 1, name)) {
- printk("could not reserve port %lx for %s\n",
- pcide_bases[i]+0x206,name);
- release_region(pcide_bases[i], 8);
- continue;
- }
- q40_ide_setup_ports(&hw[i], pcide_bases[i],
- q40ide_default_irq(pcide_bases[i]));
-
- hws[i] = &hw[i];
- }
-
- return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL);
-}
-
-module_init(q40ide_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
deleted file mode 100644
index ab79b6289464..000000000000
--- a/drivers/ide/qd65xx.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1996-2001 Linus Torvalds & author (see below)
- */
-
-/*
- * Version 0.03 Cleaned auto-tune, added probe
- * Version 0.04 Added second channel tuning
- * Version 0.05 Enhanced tuning ; added qd6500 support
- * Version 0.06 Added dos driver's list
- * Version 0.07 Second channel bug fix
- *
- * QDI QD6500/QD6580 EIDE controller fast support
- *
- * To activate controller support, use "ide0=qd65xx"
- */
-
-/*
- * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
- * Samuel Thibault <samuel.thibault@ens-lyon.org>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <asm/io.h>
-
-#define DRV_NAME "qd65xx"
-
-#include "qd65xx.h"
-
-/*
- * I/O ports are 0x30-0x31 (and 0x32-0x33 for qd6580)
- * or 0xb0-0xb1 (and 0xb2-0xb3 for qd6580)
- * -- qd6500 is a single IDE interface
- * -- qd6580 is a dual IDE interface
- *
- * More research on qd6580 being done by willmore@cig.mot.com (David)
- * More Information given by Petr Soucek (petr@ryston.cz)
- * http://www.ryston.cz/petr/vlb
- */
-
-/*
- * base: Timer1
- *
- *
- * base+0x01: Config (R/O)
- *
- * bit 0: ide baseport: 1 = 0x1f0 ; 0 = 0x170 (only useful for qd6500)
- * bit 1: qd65xx baseport: 1 = 0xb0 ; 0 = 0x30
- * bit 2: ID3: bus speed: 1 = <=33MHz ; 0 = >33MHz
- * bit 3: qd6500: 1 = disabled, 0 = enabled
- * qd6580: 1
- * upper nibble:
- * qd6500: 1100
- * qd6580: either 1010 or 0101
- *
- *
- * base+0x02: Timer2 (qd6580 only)
- *
- *
- * base+0x03: Control (qd6580 only)
- *
- * bits 0-3 must always be set 1
- * bit 4 must be set 1, but is set 0 by dos driver while measuring vlb clock
- * bit 0 : 1 = Only primary port enabled : channel 0 for hda, channel 1 for hdb
- * 0 = Primary and Secondary ports enabled : channel 0 for hda & hdb
- * channel 1 for hdc & hdd
- * bit 1 : 1 = only disks on primary port
- * 0 = disks & ATAPI devices on primary port
- * bit 2-4 : always 0
- * bit 5 : status, but of what ?
- * bit 6 : always set 1 by dos driver
- * bit 7 : set 1 for non-ATAPI devices on primary port
- * (maybe read-ahead and post-write buffer ?)
- */
-
-static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */
-
-/*
- * qd65xx_select:
- *
- * This routine is invoked to prepare for access to a given drive.
- */
-
-static void qd65xx_dev_select(ide_drive_t *drive)
-{
- u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) |
- (QD_TIMREG(drive) & 0x02);
-
- if (timings[index] != QD_TIMING(drive))
- outb(timings[index] = QD_TIMING(drive), QD_TIMREG(drive));
-
- outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr);
-}
-
-/*
- * qd6500_compute_timing
- *
- * computes the timing value where
- * lower nibble represents active time, in count of VLB clocks
- * upper nibble represents recovery time, in count of VLB clocks
- */
-
-static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time)
-{
- int clk = ide_vlb_clk ? ide_vlb_clk : 50;
- u8 act_cyc, rec_cyc;
-
- if (clk <= 33) {
- act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9);
- rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15);
- } else {
- act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8);
- rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18);
- }
-
- return (rec_cyc << 4) | 0x08 | act_cyc;
-}
-
-/*
- * qd6580_compute_timing
- *
- * idem for qd6580
- */
-
-static u8 qd6580_compute_timing (int active_time, int recovery_time)
-{
- int clk = ide_vlb_clk ? ide_vlb_clk : 50;
- u8 act_cyc, rec_cyc;
-
- act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17);
- rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15);
-
- return (rec_cyc << 4) | act_cyc;
-}
-
-/*
- * qd_find_disk_type
- *
- * tries to find timing from dos driver's table
- */
-
-static int qd_find_disk_type (ide_drive_t *drive,
- int *active_time, int *recovery_time)
-{
- struct qd65xx_timing_s *p;
- char *m = (char *)&drive->id[ATA_ID_PROD];
- char model[ATA_ID_PROD_LEN];
-
- if (*m == 0)
- return 0;
-
- strncpy(model, m, ATA_ID_PROD_LEN);
- ide_fixstring(model, ATA_ID_PROD_LEN, 1); /* byte-swap */
-
- for (p = qd65xx_timing ; p->offset != -1 ; p++) {
- if (!strncmp(p->model, model+p->offset, 4)) {
- printk(KERN_DEBUG "%s: listed !\n", drive->name);
- *active_time = p->active;
- *recovery_time = p->recovery;
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * qd_set_timing:
- *
- * records the timing
- */
-
-static void qd_set_timing (ide_drive_t *drive, u8 timing)
-{
- unsigned long data = (unsigned long)ide_get_drivedata(drive);
-
- data &= 0xff00;
- data |= timing;
- ide_set_drivedata(drive, (void *)data);
-
- printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
-}
-
-static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- u16 *id = drive->id;
- int active_time = 175;
- int recovery_time = 415; /* worst case values from the dos driver */
-
- /* FIXME: use drive->pio_mode value */
- if (!qd_find_disk_type(drive, &active_time, &recovery_time) &&
- (id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) &&
- id[ATA_ID_EIDE_PIO] >= 240) {
- printk(KERN_INFO "%s: PIO mode%d\n", drive->name,
- id[ATA_ID_OLD_PIO_MODES] & 0xff);
- active_time = 110;
- recovery_time = drive->id[ATA_ID_EIDE_PIO] - 120;
- }
-
- qd_set_timing(drive, qd6500_compute_timing(drive->hwif,
- active_time, recovery_time));
-}
-
-static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- unsigned int cycle_time;
- int active_time = 175;
- int recovery_time = 415; /* worst case values from the dos driver */
- u8 base = (hwif->config_data & 0xff00) >> 8;
-
- if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) {
- cycle_time = ide_pio_cycle_time(drive, pio);
-
- switch (pio) {
- case 0: break;
- case 3:
- if (cycle_time >= 110) {
- active_time = 86;
- recovery_time = cycle_time - 102;
- } else
- printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name);
- break;
- case 4:
- if (cycle_time >= 69) {
- active_time = 70;
- recovery_time = cycle_time - 61;
- } else
- printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name);
- break;
- default:
- if (cycle_time >= 180) {
- active_time = 110;
- recovery_time = cycle_time - 120;
- } else {
- active_time = t->active;
- recovery_time = cycle_time - active_time;
- }
- }
- printk(KERN_INFO "%s: PIO mode%d\n", drive->name,pio);
- }
-
- if (!hwif->channel && drive->media != ide_disk) {
- outb(0x5f, QD_CONTROL_PORT);
- printk(KERN_WARNING "%s: ATAPI: disabled read-ahead FIFO "
- "and post-write buffer on %s.\n",
- drive->name, hwif->name);
- }
-
- qd_set_timing(drive, qd6580_compute_timing(active_time, recovery_time));
-}
-
-/*
- * qd_testreg
- *
- * tests if the given port is a register
- */
-
-static int __init qd_testreg(int port)
-{
- unsigned long flags;
- u8 savereg, readreg;
-
- local_irq_save(flags);
- savereg = inb_p(port);
- outb_p(QD_TESTVAL, port); /* safe value */
- readreg = inb_p(port);
- outb(savereg, port);
- local_irq_restore(flags);
-
- if (savereg == QD_TESTVAL) {
- printk(KERN_ERR "Outch ! the probe for qd65xx isn't reliable !\n");
- printk(KERN_ERR "Please contact maintainers to tell about your hardware\n");
- printk(KERN_ERR "Assuming qd65xx is not present.\n");
- return 1;
- }
-
- return (readreg != QD_TESTVAL);
-}
-
-static void __init qd6500_init_dev(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 base = (hwif->config_data & 0xff00) >> 8;
- u8 config = QD_CONFIG(hwif);
-
- ide_set_drivedata(drive, (void *)QD6500_DEF_DATA);
-}
-
-static void __init qd6580_init_dev(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long t1, t2;
- u8 base = (hwif->config_data & 0xff00) >> 8;
- u8 config = QD_CONFIG(hwif);
-
- if (hwif->host_flags & IDE_HFLAG_SINGLE) {
- t1 = QD6580_DEF_DATA;
- t2 = QD6580_DEF_DATA2;
- } else
- t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA;
-
- ide_set_drivedata(drive, (void *)((drive->dn & 1) ? t2 : t1));
-}
-
-static const struct ide_tp_ops qd65xx_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = qd65xx_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_port_ops qd6500_port_ops = {
- .init_dev = qd6500_init_dev,
- .set_pio_mode = qd6500_set_pio_mode,
-};
-
-static const struct ide_port_ops qd6580_port_ops = {
- .init_dev = qd6580_init_dev,
- .set_pio_mode = qd6580_set_pio_mode,
-};
-
-static const struct ide_port_info qd65xx_port_info __initconst = {
- .name = DRV_NAME,
- .tp_ops = &qd65xx_tp_ops,
- .chipset = ide_qd65xx,
- .host_flags = IDE_HFLAG_IO_32BIT |
- IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
-};
-
-/*
- * qd_probe:
- *
- * looks at the specified baseport, and if qd found, registers & initialises it
- * return 1 if another qd may be probed
- */
-
-static int __init qd_probe(int base)
-{
- int rc;
- u8 config, unit, control;
- struct ide_port_info d = qd65xx_port_info;
-
- config = inb(QD_CONFIG_PORT);
-
- if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) )
- return -ENODEV;
-
- unit = ! (config & QD_CONFIG_IDE_BASEPORT);
-
- if (unit)
- d.host_flags |= IDE_HFLAG_QD_2ND_PORT;
-
- switch (config & 0xf0) {
- case QD_CONFIG_QD6500:
- if (qd_testreg(base))
- return -ENODEV; /* bad register */
-
- if (config & QD_CONFIG_DISABLED) {
- printk(KERN_WARNING "qd6500 is disabled !\n");
- return -ENODEV;
- }
-
- printk(KERN_NOTICE "qd6500 at %#x\n", base);
- printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
- config, QD_ID3);
-
- d.port_ops = &qd6500_port_ops;
- d.host_flags |= IDE_HFLAG_SINGLE;
- break;
- case QD_CONFIG_QD6580_A:
- case QD_CONFIG_QD6580_B:
- if (qd_testreg(base) || qd_testreg(base + 0x02))
- return -ENODEV; /* bad registers */
-
- control = inb(QD_CONTROL_PORT);
-
- printk(KERN_NOTICE "qd6580 at %#x\n", base);
- printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n",
- config, control, QD_ID3);
-
- outb(QD_DEF_CONTR, QD_CONTROL_PORT);
-
- d.port_ops = &qd6580_port_ops;
- if (control & QD_CONTR_SEC_DISABLED)
- d.host_flags |= IDE_HFLAG_SINGLE;
-
- printk(KERN_INFO "qd6580: %s IDE board\n",
- (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual");
- break;
- default:
- return -ENODEV;
- }
-
- rc = ide_legacy_device_add(&d, (base << 8) | config);
-
- if (d.host_flags & IDE_HFLAG_SINGLE)
- return (rc == 0) ? 1 : rc;
-
- return rc;
-}
-
-static bool probe_qd65xx;
-
-module_param_named(probe, probe_qd65xx, bool, 0);
-MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
-
-static int __init qd65xx_init(void)
-{
- int rc1, rc2 = -ENODEV;
-
- if (probe_qd65xx == 0)
- return -ENODEV;
-
- rc1 = qd_probe(0x30);
- if (rc1)
- rc2 = qd_probe(0xb0);
-
- if (rc1 < 0 && rc2 < 0)
- return -ENODEV;
-
- return 0;
-}
-
-module_init(qd65xx_init);
-
-MODULE_AUTHOR("Samuel Thibault");
-MODULE_DESCRIPTION("support of qd65xx vlb ide chipset");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/qd65xx.h b/drivers/ide/qd65xx.h
deleted file mode 100644
index 01a43ab45e0e..000000000000
--- a/drivers/ide/qd65xx.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2000 Linus Torvalds & authors
- */
-
-/*
- * Authors: Petr Soucek <petr@ryston.cz>
- * Samuel Thibault <samuel.thibault@ens-lyon.org>
- */
-
-/* truncates a in [b,c] */
-#define IDE_IN(a,b,c) ( ((a)<(b)) ? (b) : ( (a)>(c) ? (c) : (a)) )
-
-#define IDE_IMPLY(a,b) ((!(a)) || (b))
-
-#define QD_TIM1_PORT (base)
-#define QD_CONFIG_PORT (base+0x01)
-#define QD_TIM2_PORT (base+0x02)
-#define QD_CONTROL_PORT (base+0x03)
-
-#define QD_CONFIG_IDE_BASEPORT 0x01
-#define QD_CONFIG_BASEPORT 0x02
-#define QD_CONFIG_ID3 0x04
-#define QD_CONFIG_DISABLED 0x08
-#define QD_CONFIG_QD6500 0xc0
-#define QD_CONFIG_QD6580_A 0xa0
-#define QD_CONFIG_QD6580_B 0x50
-
-#define QD_CONTR_SEC_DISABLED 0x01
-
-#define QD_ID3 ((config & QD_CONFIG_ID3)!=0)
-
-#define QD_CONFIG(hwif) ((hwif)->config_data & 0x00ff)
-
-static inline u8 QD_TIMING(ide_drive_t *drive)
-{
- return (unsigned long)ide_get_drivedata(drive) & 0x00ff;
-}
-
-static inline u8 QD_TIMREG(ide_drive_t *drive)
-{
- return ((unsigned long)ide_get_drivedata(drive) & 0xff00) >> 8;
-}
-
-#define QD6500_DEF_DATA ((QD_TIM1_PORT<<8) | (QD_ID3 ? 0x0c : 0x08))
-#define QD6580_DEF_DATA ((QD_TIM1_PORT<<8) | (QD_ID3 ? 0x0a : 0x00))
-#define QD6580_DEF_DATA2 ((QD_TIM2_PORT<<8) | (QD_ID3 ? 0x0a : 0x00))
-#define QD_DEF_CONTR (0x40 | ((control & 0x02) ? 0x9f : 0x1f))
-
-#define QD_TESTVAL 0x19 /* safe value */
-
-/* Drive specific timing taken from DOS driver v3.7 */
-
-static struct qd65xx_timing_s {
- s8 offset; /* ofset from the beginning of Model Number" */
- char model[4]; /* 4 chars from Model number, no conversion */
- s16 active; /* active time */
- s16 recovery; /* recovery time */
-} qd65xx_timing [] = {
- { 30, "2040", 110, 225 }, /* Conner CP30204 */
- { 30, "2045", 135, 225 }, /* Conner CP30254 */
- { 30, "1040", 155, 325 }, /* Conner CP30104 */
- { 30, "1047", 135, 265 }, /* Conner CP30174 */
- { 30, "5344", 135, 225 }, /* Conner CP3544 */
- { 30, "01 4", 175, 405 }, /* Conner CP-3104 */
- { 27, "C030", 175, 375 }, /* Conner CP3000 */
- { 8, "PL42", 110, 295 }, /* Quantum LP240 */
- { 8, "PL21", 110, 315 }, /* Quantum LP120 */
- { 8, "PL25", 175, 385 }, /* Quantum LP52 */
- { 4, "PA24", 110, 285 }, /* WD Piranha SP4200 */
- { 6, "2200", 110, 260 }, /* WD Caviar AC2200 */
- { 6, "3204", 110, 235 }, /* WD Caviar AC2340 */
- { 6, "1202", 110, 265 }, /* WD Caviar AC2120 */
- { 0, "DS3-", 135, 315 }, /* Teac SD340 */
- { 8, "KM32", 175, 355 }, /* Toshiba MK234 */
- { 2, "53A1", 175, 355 }, /* Seagate ST351A */
- { 2, "4108", 175, 295 }, /* Seagate ST1480A */
- { 2, "1344", 175, 335 }, /* Seagate ST3144A */
- { 6, "7 12", 110, 225 }, /* Maxtor 7213A */
- { 30, "02F4", 145, 295 }, /* Conner 3204F */
- { 2, "1302", 175, 335 }, /* Seagate ST3120A */
- { 2, "2334", 145, 265 }, /* Seagate ST3243A */
- { 2, "2338", 145, 275 }, /* Seagate ST3283A */
- { 2, "3309", 145, 275 }, /* Seagate ST3390A */
- { 2, "5305", 145, 275 }, /* Seagate ST3550A */
- { 2, "4100", 175, 295 }, /* Seagate ST1400A */
- { 2, "4110", 175, 295 }, /* Seagate ST1401A */
- { 2, "6300", 135, 265 }, /* Seagate ST3600A */
- { 2, "5300", 135, 265 }, /* Seagate ST3500A */
- { 6, "7 31", 135, 225 }, /* Maxtor 7131 AT */
- { 6, "7 43", 115, 265 }, /* Maxtor 7345 AT */
- { 6, "7 42", 110, 255 }, /* Maxtor 7245 AT */
- { 6, "3 04", 135, 265 }, /* Maxtor 340 AT */
- { 6, "61 0", 135, 285 }, /* WD AC160 */
- { 6, "1107", 135, 235 }, /* WD AC1170 */
- { 6, "2101", 110, 220 }, /* WD AC1210 */
- { 6, "4202", 135, 245 }, /* WD AC2420 */
- { 6, "41 0", 175, 355 }, /* WD Caviar 140 */
- { 6, "82 0", 175, 355 }, /* WD Caviar 280 */
- { 8, "PL01", 175, 375 }, /* Quantum LP105 */
- { 8, "PL25", 110, 295 }, /* Quantum LP525 */
- { 10, "4S 2", 175, 385 }, /* Quantum ELS42 */
- { 10, "8S 5", 175, 385 }, /* Quantum ELS85 */
- { 10, "1S72", 175, 385 }, /* Quantum ELS127 */
- { 10, "1S07", 175, 385 }, /* Quantum ELS170 */
- { 8, "ZE42", 135, 295 }, /* Quantum EZ240 */
- { 8, "ZE21", 175, 385 }, /* Quantum EZ127 */
- { 8, "ZE58", 175, 385 }, /* Quantum EZ85 */
- { 8, "ZE24", 175, 385 }, /* Quantum EZ42 */
- { 27, "C036", 155, 325 }, /* Conner CP30064 */
- { 27, "C038", 155, 325 }, /* Conner CP30084 */
- { 6, "2205", 110, 255 }, /* WDC AC2250 */
- { 2, " CHA", 140, 415 }, /* WDC AH series; WDC AH260, WDC */
- { 2, " CLA", 140, 415 }, /* WDC AL series: WDC AL2120, 2170, */
- { 4, "UC41", 140, 415 }, /* WDC CU140 */
- { 6, "1207", 130, 275 }, /* WDC AC2170 */
- { 6, "2107", 130, 275 }, /* WDC AC1270 */
- { 6, "5204", 130, 275 }, /* WDC AC2540 */
- { 30, "3004", 110, 235 }, /* Conner CP30340 */
- { 30, "0345", 135, 255 }, /* Conner CP30544 */
- { 12, "12A3", 175, 320 }, /* MAXTOR LXT-213A */
- { 12, "43A0", 145, 240 }, /* MAXTOR LXT-340A */
- { 6, "7 21", 180, 290 }, /* Maxtor 7120 AT */
- { 6, "7 71", 135, 240 }, /* Maxtor 7170 AT */
- { 12, "45\0000", 110, 205 }, /* MAXTOR MXT-540 */
- { 8, "PL11", 180, 290 }, /* QUANTUM LP110A */
- { 8, "OG21", 150, 275 }, /* QUANTUM GO120 */
- { 12, "42A5", 175, 320 }, /* MAXTOR LXT-245A */
- { 2, "2309", 175, 295 }, /* ST3290A */
- { 2, "3358", 180, 310 }, /* ST3385A */
- { 2, "6355", 180, 310 }, /* ST3655A */
- { 2, "1900", 175, 270 }, /* ST9100A */
- { 2, "1954", 175, 270 }, /* ST9145A */
- { 2, "1909", 175, 270 }, /* ST9190AG */
- { 2, "2953", 175, 270 }, /* ST9235A */
- { 2, "1359", 175, 270 }, /* ST3195A */
- { 24, "3R11", 175, 290 }, /* ALPS ELECTRIC Co.,LTD, DR311C */
- { 0, "2M26", 175, 215 }, /* M262XT-0Ah */
- { 4, "2253", 175, 300 }, /* HP C2235A */
- { 4, "-32A", 145, 245 }, /* H3133-A2 */
- { 30, "0326", 150, 270 }, /* Samsung Electronics 120MB */
- { 30, "3044", 110, 195 }, /* Conner CFA340A */
- { 30, "43A0", 110, 195 }, /* Conner CFA340A */
- { -1, " ", 175, 415 } /* unknown disk name */
-};
diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c
deleted file mode 100644
index 0ab8b86b7ed7..000000000000
--- a/drivers/ide/rapide.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 1996-2002 Russell King.
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/errno.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/ecard.h>
-
-static const struct ide_port_info rapide_port_info = {
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base,
- void __iomem *ctrl, unsigned int sz, int irq)
-{
- unsigned long port = (unsigned long)base;
- int i;
-
- for (i = 0; i <= 7; i++) {
- hw->io_ports_array[i] = port;
- port += sz;
- }
- hw->io_ports.ctl_addr = (unsigned long)ctrl;
- hw->irq = irq;
-}
-
-static int rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
-{
- void __iomem *base;
- struct ide_host *host;
- int ret;
- struct ide_hw hw, *hws[] = { &hw };
-
- ret = ecard_request_resources(ec);
- if (ret)
- goto out;
-
- base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
- if (!base) {
- ret = -ENOMEM;
- goto release;
- }
-
- memset(&hw, 0, sizeof(hw));
- rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
- hw.dev = &ec->dev;
-
- ret = ide_host_add(&rapide_port_info, hws, 1, &host);
- if (ret)
- goto release;
-
- ecard_set_drvdata(ec, host);
- goto out;
-
- release:
- ecard_release_resources(ec);
- out:
- return ret;
-}
-
-static void rapide_remove(struct expansion_card *ec)
-{
- struct ide_host *host = ecard_get_drvdata(ec);
-
- ecard_set_drvdata(ec, NULL);
-
- ide_host_remove(host);
-
- ecard_release_resources(ec);
-}
-
-static struct ecard_id rapide_ids[] = {
- { MANU_YELLOWSTONE, PROD_YELLOWSTONE_RAPIDE32 },
- { 0xffff, 0xffff }
-};
-
-static struct ecard_driver rapide_driver = {
- .probe = rapide_probe,
- .remove = rapide_remove,
- .id_table = rapide_ids,
- .drv = {
- .name = "rapide",
- },
-};
-
-static int __init rapide_init(void)
-{
- return ecard_register_driver(&rapide_driver);
-}
-
-static void __exit rapide_exit(void)
-{
- ecard_remove_driver(&rapide_driver);
-}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Yellowstone RAPIDE driver");
-
-module_init(rapide_init);
-module_exit(rapide_exit);
diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
deleted file mode 100644
index fce2b7de5a19..000000000000
--- a/drivers/ide/rz1000.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1995-1998 Linus Torvalds & author (see below)
- */
-
-/*
- * Principal Author: mlord@pobox.com (Mark Lord)
- *
- * See linux/MAINTAINERS for address of current maintainer.
- *
- * This file provides support for disabling the buggy read-ahead
- * mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
- *
- * Dunno if this fixes both ports, or only the primary port (?).
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "rz1000"
-
-static int rz1000_disable_readahead(struct pci_dev *dev)
-{
- u16 reg;
-
- if (!pci_read_config_word (dev, 0x40, &reg) &&
- !pci_write_config_word(dev, 0x40, reg & 0xdfff)) {
- printk(KERN_INFO "%s: disabled chipset read-ahead "
- "(buggy RZ1000/RZ1001)\n", pci_name(dev));
- return 0;
- } else {
- printk(KERN_INFO "%s: serialized, disabled unmasking "
- "(buggy RZ1000/RZ1001)\n", pci_name(dev));
- return 1;
- }
-}
-
-static const struct ide_port_info rz1000_chipset = {
- .name = DRV_NAME,
- .host_flags = IDE_HFLAG_NO_DMA,
-};
-
-static int rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d = rz1000_chipset;
- int rc;
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- if (rz1000_disable_readahead(dev)) {
- d.host_flags |= IDE_HFLAG_SERIALIZE;
- d.host_flags |= IDE_HFLAG_NO_UNMASK_IRQS;
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static void rz1000_remove(struct pci_dev *dev)
-{
- ide_pci_remove(dev);
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id rz1000_pci_tbl[] = {
- { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), 0 },
- { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, rz1000_pci_tbl);
-
-static struct pci_driver rz1000_pci_driver = {
- .name = "RZ1000_IDE",
- .id_table = rz1000_pci_tbl,
- .probe = rz1000_init_one,
- .remove = rz1000_remove,
-};
-
-static int __init rz1000_ide_init(void)
-{
- return ide_pci_register_driver(&rz1000_pci_driver);
-}
-
-static void __exit rz1000_ide_exit(void)
-{
- pci_unregister_driver(&rz1000_pci_driver);
-}
-
-module_init(rz1000_ide_init);
-module_exit(rz1000_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for RZ1000 IDE");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
deleted file mode 100644
index a5b701818405..000000000000
--- a/drivers/ide/sc1200.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * Development of this chipset driver was funded
- * by the nice folks at National Semiconductor.
- *
- * Documentation:
- * Available from National Semiconductor
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/pm.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "sc1200"
-
-#define SC1200_REV_A 0x00
-#define SC1200_REV_B1 0x01
-#define SC1200_REV_B3 0x02
-#define SC1200_REV_C1 0x03
-#define SC1200_REV_D1 0x04
-
-#define PCI_CLK_33 0x00
-#define PCI_CLK_48 0x01
-#define PCI_CLK_66 0x02
-#define PCI_CLK_33A 0x03
-
-static unsigned short sc1200_get_pci_clock (void)
-{
- unsigned char chip_id, silicon_revision;
- unsigned int pci_clock;
- /*
- * Check the silicon revision, as not all versions of the chip
- * have the register with the fast PCI bus timings.
- */
- chip_id = inb (0x903c);
- silicon_revision = inb (0x903d);
-
- // Read the fast pci clock frequency
- if (chip_id == 0x04 && silicon_revision < SC1200_REV_B1) {
- pci_clock = PCI_CLK_33;
- } else {
- // check clock generator configuration (cfcc)
- // the clock is in bits 8 and 9 of this word
-
- pci_clock = inw (0x901e);
- pci_clock >>= 8;
- pci_clock &= 0x03;
- if (pci_clock == PCI_CLK_33A)
- pci_clock = PCI_CLK_33;
- }
- return pci_clock;
-}
-
-/*
- * Here are the standard PIO mode 0-4 timings for each "format".
- * Format-0 uses fast data reg timings, with slower command reg timings.
- * Format-1 uses fast timings for all registers, but won't work with all drives.
- */
-static const unsigned int sc1200_pio_timings[4][5] =
- {{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz
- {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz
- {0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz
- {0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131}}; // format1, 66Mhz
-
-/*
- * After chip reset, the PIO timings are set to 0x00009172, which is not valid.
- */
-//#define SC1200_BAD_PIO(timings) (((timings)&~0x80000000)==0x00009172)
-
-static void sc1200_tunepio(ide_drive_t *drive, u8 pio)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- unsigned int basereg = hwif->channel ? 0x50 : 0x40, format = 0;
-
- pci_read_config_dword(pdev, basereg + 4, &format);
- format = (format >> 31) & 1;
- if (format)
- format += sc1200_get_pci_clock();
- pci_write_config_dword(pdev, basereg + ((drive->dn & 1) << 3),
- sc1200_pio_timings[format][pio]);
-}
-
-/*
- * The SC1200 specifies that two drives sharing a cable cannot mix
- * UDMA/MDMA. It has to be one or the other, for the pair, though
- * different timings can still be chosen for each drive. We could
- * set the appropriate timing bits on the fly, but that might be
- * a bit confusing. So, for now we statically handle this requirement
- * by looking at our mate drive to see what it is capable of, before
- * choosing a mode for our own drive.
- */
-static u8 sc1200_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_drive_t *mate = ide_get_pair_dev(drive);
- u16 *mateid;
- u8 mask = hwif->ultra_mask;
-
- if (mate == NULL)
- goto out;
- mateid = mate->id;
-
- if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) {
- if ((mateid[ATA_ID_FIELD_VALID] & 4) &&
- (mateid[ATA_ID_UDMA_MODES] & 7))
- goto out;
- if (mateid[ATA_ID_MWDMA_MODES] & 7)
- mask = 0;
- }
-out:
- return mask;
-}
-
-static void sc1200_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int reg, timings;
- unsigned short pci_clock;
- unsigned int basereg = hwif->channel ? 0x50 : 0x40;
- const u8 mode = drive->dma_mode;
-
- static const u32 udma_timing[3][3] = {
- { 0x00921250, 0x00911140, 0x00911030 },
- { 0x00932470, 0x00922260, 0x00922140 },
- { 0x009436a1, 0x00933481, 0x00923261 },
- };
-
- static const u32 mwdma_timing[3][3] = {
- { 0x00077771, 0x00012121, 0x00002020 },
- { 0x000bbbb2, 0x00024241, 0x00013131 },
- { 0x000ffff3, 0x00035352, 0x00015151 },
- };
-
- pci_clock = sc1200_get_pci_clock();
-
- /*
- * Note that each DMA mode has several timings associated with it.
- * The correct timing depends on the fast PCI clock freq.
- */
-
- if (mode >= XFER_UDMA_0)
- timings = udma_timing[pci_clock][mode - XFER_UDMA_0];
- else
- timings = mwdma_timing[pci_clock][mode - XFER_MW_DMA_0];
-
- if ((drive->dn & 1) == 0) {
- pci_read_config_dword(dev, basereg + 4, &reg);
- timings |= reg & 0x80000000; /* preserve PIO format bit */
- pci_write_config_dword(dev, basereg + 4, timings);
- } else
- pci_write_config_dword(dev, basereg + 12, timings);
-}
-
-/* Replacement for the standard ide_dma_end action in
- * dma_proc.
- *
- * returns 1 on error, 0 otherwise
- */
-static int sc1200_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long dma_base = hwif->dma_base;
- u8 dma_stat;
-
- dma_stat = inb(dma_base+2); /* get DMA status */
-
- if (!(dma_stat & 4))
- printk(" ide_dma_end dma_stat=%0x err=%x newerr=%x\n",
- dma_stat, ((dma_stat&7)!=4), ((dma_stat&2)==2));
-
- outb(dma_stat|0x1b, dma_base+2); /* clear the INTR & ERROR bits */
- outb(inb(dma_base)&~1, dma_base); /* !! DO THIS HERE !! stop DMA */
-
- return (dma_stat & 7) != 4; /* verify good DMA status */
-}
-
-/*
- * sc1200_set_pio_mode() handles setting of PIO modes
- * for both the chipset and drive.
- *
- * All existing BIOSs for this chipset guarantee that all drives
- * will have valid default PIO timings set up before we get here.
- */
-
-static void sc1200_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int mode = -1;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /*
- * bad abuse of ->set_pio_mode interface
- */
- switch (pio) {
- case 200: mode = XFER_UDMA_0; break;
- case 201: mode = XFER_UDMA_1; break;
- case 202: mode = XFER_UDMA_2; break;
- case 100: mode = XFER_MW_DMA_0; break;
- case 101: mode = XFER_MW_DMA_1; break;
- case 102: mode = XFER_MW_DMA_2; break;
- }
- if (mode != -1) {
- printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
- ide_dma_off_quietly(drive);
- if (ide_set_dma_mode(drive, mode) == 0 &&
- (drive->dev_flags & IDE_DFLAG_USING_DMA))
- hwif->dma_ops->dma_host_set(drive, 1);
- return;
- }
-
- sc1200_tunepio(drive, pio);
-}
-
-#ifdef CONFIG_PM
-struct sc1200_saved_state {
- u32 regs[8];
-};
-
-static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
-{
- printk("SC1200: suspend(%u)\n", state.event);
-
- /*
- * we only save state when going from full power to less
- */
- if (state.event == PM_EVENT_ON) {
- struct ide_host *host = pci_get_drvdata(dev);
- struct sc1200_saved_state *ss = host->host_priv;
- unsigned int r;
-
- /*
- * save timing registers
- * (this may be unnecessary if BIOS also does it)
- */
- for (r = 0; r < 8; r++)
- pci_read_config_dword(dev, 0x40 + r * 4, &ss->regs[r]);
- }
-
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
- return 0;
-}
-
-static int sc1200_resume (struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct sc1200_saved_state *ss = host->host_priv;
- unsigned int r;
- int i;
-
- i = pci_enable_device(dev);
- if (i)
- return i;
-
- /*
- * restore timing registers
- * (this may be unnecessary if BIOS also does it)
- */
- for (r = 0; r < 8; r++)
- pci_write_config_dword(dev, 0x40 + r * 4, ss->regs[r]);
-
- return 0;
-}
-#endif
-
-static const struct ide_port_ops sc1200_port_ops = {
- .set_pio_mode = sc1200_set_pio_mode,
- .set_dma_mode = sc1200_set_dma_mode,
- .udma_filter = sc1200_udma_filter,
-};
-
-static const struct ide_dma_ops sc1200_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = sc1200_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info sc1200_chipset = {
- .name = DRV_NAME,
- .port_ops = &sc1200_port_ops,
- .dma_ops = &sc1200_dma_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_ABUSE_DMA_MODES,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
-};
-
-static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct sc1200_saved_state *ss = NULL;
- int rc;
-
-#ifdef CONFIG_PM
- ss = kmalloc(sizeof(*ss), GFP_KERNEL);
- if (ss == NULL)
- return -ENOMEM;
-#endif
- rc = ide_pci_init_one(dev, &sc1200_chipset, ss);
- if (rc)
- kfree(ss);
-
- return rc;
-}
-
-static const struct pci_device_id sc1200_pci_tbl[] = {
- { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), 0},
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, sc1200_pci_tbl);
-
-static struct pci_driver sc1200_pci_driver = {
- .name = "SC1200_IDE",
- .id_table = sc1200_pci_tbl,
- .probe = sc1200_init_one,
- .remove = ide_pci_remove,
-#ifdef CONFIG_PM
- .suspend = sc1200_suspend,
- .resume = sc1200_resume,
-#endif
-};
-
-static int __init sc1200_ide_init(void)
-{
- return ide_pci_register_driver(&sc1200_pci_driver);
-}
-
-static void __exit sc1200_ide_exit(void)
-{
- pci_unregister_driver(&sc1200_pci_driver);
-}
-
-module_init(sc1200_ide_init);
-module_exit(sc1200_ide_exit);
-
-MODULE_AUTHOR("Mark Lord");
-MODULE_DESCRIPTION("PCI driver module for NS SC1200 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
deleted file mode 100644
index 458e72e034b0..000000000000
--- a/drivers/ide/serverworks.c
+++ /dev/null
@@ -1,456 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1998-2000 Michel Aubry
- * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
- * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
- * Portions copyright (c) 2001 Sun Microsystems
- *
- *
- * RCC/ServerWorks IDE driver for Linux
- *
- * OSB4: `Open South Bridge' IDE Interface (fn 1)
- * supports UDMA mode 2 (33 MB/s)
- *
- * CSB5: `Champion South Bridge' IDE Interface (fn 1)
- * all revisions support UDMA mode 4 (66 MB/s)
- * revision A2.0 and up support UDMA mode 5 (100 MB/s)
- *
- * *** The CSB5 does not provide ANY register ***
- * *** to detect 80-conductor cable presence. ***
- *
- * CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
- *
- * HT1000: AKA BCM5785 - Hypertransport Southbridge for Opteron systems. IDE
- * controller same as the CSB6. Single channel ATA100 only.
- *
- * Documentation:
- * Available under NDA only. Errata info very hard to get.
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "serverworks"
-
-#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
-#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
-
-/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
- * can overrun their FIFOs when used with the CSB5 */
-static const char *svwks_bad_ata100[] = {
- "ST320011A",
- "ST340016A",
- "ST360021A",
- "ST380021A",
- NULL
-};
-
-static int check_in_drive_lists (ide_drive_t *drive, const char **list)
-{
- char *m = (char *)&drive->id[ATA_ID_PROD];
-
- while (*list)
- if (!strcmp(*list++, m))
- return 1;
- return 0;
-}
-
-static u8 svwks_udma_filter(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
-
- if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
- return 0x1f;
- } else if (dev->revision < SVWKS_CSB5_REVISION_NEW) {
- return 0x07;
- } else {
- u8 btr = 0, mode, mask;
-
- pci_read_config_byte(dev, 0x5A, &btr);
- mode = btr & 0x3;
-
- /* If someone decides to do UDMA133 on CSB5 the same
- issue will bite so be inclusive */
- if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100))
- mode = 2;
-
- switch(mode) {
- case 3: mask = 0x3f; break;
- case 2: mask = 0x1f; break;
- case 1: mask = 0x07; break;
- default: mask = 0x00; break;
- }
-
- return mask;
- }
-}
-
-static u8 svwks_csb_check (struct pci_dev *dev)
-{
- switch (dev->device) {
- case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
- case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
- case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
- case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
- return 1;
- default:
- break;
- }
- return 0;
-}
-
-static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
- static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
-
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- if (drive->dn >= ARRAY_SIZE(drive_pci))
- return;
-
- pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
-
- if (svwks_csb_check(dev)) {
- u16 csb_pio = 0;
-
- pci_read_config_word(dev, 0x4a, &csb_pio);
-
- csb_pio &= ~(0x0f << (4 * drive->dn));
- csb_pio |= (pio << (4 * drive->dn));
-
- pci_write_config_word(dev, 0x4a, csb_pio);
- }
-}
-
-static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
- static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
- static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
-
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- const u8 speed = drive->dma_mode;
- u8 unit = drive->dn & 1;
-
- u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
-
- if (drive->dn >= ARRAY_SIZE(drive_pci2))
- return;
-
- pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
- pci_read_config_byte(dev, 0x54, &ultra_enable);
-
- ultra_timing &= ~(0x0F << (4*unit));
- ultra_enable &= ~(0x01 << drive->dn);
-
- if (speed >= XFER_UDMA_0) {
- dma_timing |= dma_modes[2];
- ultra_timing |= (udma_modes[speed - XFER_UDMA_0] << (4 * unit));
- ultra_enable |= (0x01 << drive->dn);
- } else if (speed >= XFER_MW_DMA_0)
- dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
-
- pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
- pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
- pci_write_config_byte(dev, 0x54, ultra_enable);
-}
-
-static int init_chipset_svwks(struct pci_dev *dev)
-{
- unsigned int reg;
- u8 btr;
-
- /* force Master Latency Timer value to 64 PCICLKs */
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x40);
-
- /* OSB4 : South Bridge and IDE */
- if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
- struct pci_dev *isa_dev =
- pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
- PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
- if (isa_dev) {
- pci_read_config_dword(isa_dev, 0x64, &reg);
- reg &= ~0x00002000; /* disable 600ns interrupt mask */
- if(!(reg & 0x00004000))
- printk(KERN_DEBUG DRV_NAME " %s: UDMA not BIOS "
- "enabled.\n", pci_name(dev));
- reg |= 0x00004000; /* enable UDMA/33 support */
- pci_write_config_dword(isa_dev, 0x64, reg);
- pci_dev_put(isa_dev);
- }
- }
-
- /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
- else if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
-
- /* Third Channel Test */
- if (!(PCI_FUNC(dev->devfn) & 1)) {
- struct pci_dev * findev = NULL;
- u32 reg4c = 0;
- findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
- PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
- if (findev) {
- pci_read_config_dword(findev, 0x4C, &reg4c);
- reg4c &= ~0x000007FF;
- reg4c |= 0x00000040;
- reg4c |= 0x00000020;
- pci_write_config_dword(findev, 0x4C, reg4c);
- pci_dev_put(findev);
- }
- outb_p(0x06, 0x0c00);
- dev->irq = inb_p(0x0c01);
- } else {
- struct pci_dev * findev = NULL;
- u8 reg41 = 0;
-
- findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
- PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
- if (findev) {
- pci_read_config_byte(findev, 0x41, &reg41);
- reg41 &= ~0x40;
- pci_write_config_byte(findev, 0x41, reg41);
- pci_dev_put(findev);
- }
- /*
- * This is a device pin issue on CSB6.
- * Since there will be a future raid mode,
- * early versions of the chipset require the
- * interrupt pin to be set, and it is a compatibility
- * mode issue.
- */
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
- dev->irq = 0;
- }
-// pci_read_config_dword(dev, 0x40, &pioreg)
-// pci_write_config_dword(dev, 0x40, 0x99999999);
-// pci_read_config_dword(dev, 0x44, &dmareg);
-// pci_write_config_dword(dev, 0x44, 0xFFFFFFFF);
- /* setup the UDMA Control register
- *
- * 1. clear bit 6 to enable DMA
- * 2. enable DMA modes with bits 0-1
- * 00 : legacy
- * 01 : udma2
- * 10 : udma2/udma4
- * 11 : udma2/udma4/udma5
- */
- pci_read_config_byte(dev, 0x5A, &btr);
- btr &= ~0x40;
- if (!(PCI_FUNC(dev->devfn) & 1))
- btr |= 0x2;
- else
- btr |= (dev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
- pci_write_config_byte(dev, 0x5A, btr);
- }
- /* Setup HT1000 SouthBridge Controller - Single Channel Only */
- else if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
- pci_read_config_byte(dev, 0x5A, &btr);
- btr &= ~0x40;
- btr |= 0x3;
- pci_write_config_byte(dev, 0x5A, btr);
- }
-
- return 0;
-}
-
-static u8 ata66_svwks_svwks(ide_hwif_t *hwif)
-{
- return ATA_CBL_PATA80;
-}
-
-/* On Dell PowerEdge servers with a CSB5/CSB6, the top two bits
- * of the subsystem device ID indicate presence of an 80-pin cable.
- * Bit 15 clear = secondary IDE channel does not have 80-pin cable.
- * Bit 15 set = secondary IDE channel has 80-pin cable.
- * Bit 14 clear = primary IDE channel does not have 80-pin cable.
- * Bit 14 set = primary IDE channel has 80-pin cable.
- */
-static u8 ata66_svwks_dell(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
- dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE ||
- dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE))
- return ((1 << (hwif->channel + 14)) &
- dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
- return ATA_CBL_PATA40;
-}
-
-/* Sun Cobalt Alpine hardware avoids the 80-pin cable
- * detect issue by attaching the drives directly to the board.
- * This check follows the Dell precedent (how scary is that?!)
- *
- * WARNING: this only works on Alpine hardware!
- */
-static u8 ata66_svwks_cobalt(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN &&
- dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
- dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
- return ((1 << (hwif->channel + 14)) &
- dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
- return ATA_CBL_PATA40;
-}
-
-static u8 svwks_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- /* Server Works */
- if (dev->subsystem_vendor == PCI_VENDOR_ID_SERVERWORKS)
- return ata66_svwks_svwks (hwif);
-
- /* Dell PowerEdge */
- if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL)
- return ata66_svwks_dell (hwif);
-
- /* Cobalt Alpine */
- if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN)
- return ata66_svwks_cobalt (hwif);
-
- /* Per Specified Design by OEM, and ASIC Architect */
- if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2))
- return ATA_CBL_PATA80;
-
- return ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops osb4_port_ops = {
- .set_pio_mode = svwks_set_pio_mode,
- .set_dma_mode = svwks_set_dma_mode,
-};
-
-static const struct ide_port_ops svwks_port_ops = {
- .set_pio_mode = svwks_set_pio_mode,
- .set_dma_mode = svwks_set_dma_mode,
- .udma_filter = svwks_udma_filter,
- .cable_detect = svwks_cable_detect,
-};
-
-static const struct ide_port_info serverworks_chipsets[] = {
- { /* 0: OSB4 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &osb4_port_ops,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = 0x00, /* UDMA is problematic on OSB4 */
- },
- { /* 1: CSB5 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &svwks_port_ops,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 2: CSB6 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &svwks_port_ops,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 3: CSB6-2 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &svwks_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 4: HT1000 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &svwks_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- }
-};
-
-/**
- * svwks_init_one - called when a OSB/CSB is found
- * @dev: the svwks device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d;
- u8 idx = id->driver_data;
-
- d = serverworks_chipsets[idx];
-
- if (idx == 1)
- d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
- else if (idx == 2 || idx == 3) {
- if ((PCI_FUNC(dev->devfn) & 1) == 0) {
- if (pci_resource_start(dev, 0) != 0x01f1)
- d.host_flags |= IDE_HFLAG_NON_BOOTABLE;
- d.host_flags |= IDE_HFLAG_SINGLE;
- } else
- d.host_flags &= ~IDE_HFLAG_SINGLE;
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id svwks_pci_tbl[] = {
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0 },
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 1 },
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2 },
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 3 },
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 4 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
-
-static struct pci_driver svwks_pci_driver = {
- .name = "Serverworks_IDE",
- .id_table = svwks_pci_tbl,
- .probe = svwks_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init svwks_ide_init(void)
-{
- return ide_pci_register_driver(&svwks_pci_driver);
-}
-
-static void __exit svwks_ide_exit(void)
-{
- pci_unregister_driver(&svwks_pci_driver);
-}
-
-module_init(svwks_ide_init);
-module_exit(svwks_ide_exit);
-
-MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
deleted file mode 100644
index fdc8e813170c..000000000000
--- a/drivers/ide/setup-pci.c
+++ /dev/null
@@ -1,682 +0,0 @@
-/*
- * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 1995-1998 Mark Lord
- * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ide.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/io.h>
-
-/**
- * ide_setup_pci_baseregs - place a PCI IDE controller native
- * @dev: PCI device of interface to switch native
- * @name: Name of interface
- *
- * We attempt to place the PCI interface into PCI native mode. If
- * we succeed the BARs are ok and the controller is in PCI mode.
- * Returns 0 on success or an errno code.
- *
- * FIXME: if we program the interface and then fail to set the BARS
- * we don't switch it back to legacy mode. Do we actually care ??
- */
-
-static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
-{
- u8 progif = 0;
-
- /*
- * Place both IDE interfaces into PCI "native" mode:
- */
- if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
- (progif & 5) != 5) {
- if ((progif & 0xa) != 0xa) {
- printk(KERN_INFO "%s %s: device not capable of full "
- "native PCI mode\n", name, pci_name(dev));
- return -EOPNOTSUPP;
- }
- printk(KERN_INFO "%s %s: placing both ports into native PCI "
- "mode\n", name, pci_name(dev));
- (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
- if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
- (progif & 5) != 5) {
- printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
- "wanted 0x%04x, got 0x%04x\n",
- name, pci_name(dev), progif | 5, progif);
- return -EOPNOTSUPP;
- }
- }
- return 0;
-}
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
-{
- u8 dma_stat = inb(dma_base + 2);
-
- outb(dma_stat & 0x60, dma_base + 2);
- dma_stat = inb(dma_base + 2);
-
- return (dma_stat & 0x80) ? 1 : 0;
-}
-
-/**
- * ide_pci_dma_base - setup BMIBA
- * @hwif: IDE interface
- * @d: IDE port info
- *
- * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
- */
-
-unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long dma_base = 0;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return hwif->dma_base;
-
- if (hwif->mate && hwif->mate->dma_base) {
- dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
- } else {
- u8 baridx = (d->host_flags & IDE_HFLAG_CS5520) ? 2 : 4;
-
- dma_base = pci_resource_start(dev, baridx);
-
- if (dma_base == 0) {
- printk(KERN_ERR "%s %s: DMA base is invalid\n",
- d->name, pci_name(dev));
- return 0;
- }
- }
-
- if (hwif->channel)
- dma_base += 8;
-
- return dma_base;
-}
-EXPORT_SYMBOL_GPL(ide_pci_dma_base);
-
-int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 dma_stat;
-
- if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
- goto out;
-
- if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
- if (ide_pci_clear_simplex(hwif->dma_base, d->name))
- printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
- d->name, pci_name(dev));
- goto out;
- }
-
- /*
- * If the device claims "simplex" DMA, this means that only one of
- * the two interfaces can be trusted with DMA at any point in time
- * (so we should enable DMA only on one of the two interfaces).
- *
- * FIXME: At this point we haven't probed the drives so we can't make
- * the appropriate decision. Really we should defer this problem until
- * we tune the drive then try to grab DMA ownership if we want to be
- * the DMA end. This has to be become dynamic to handle hot-plug.
- */
- dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
- if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
- printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
- d->name, pci_name(dev));
- return -1;
- }
-out:
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
-
-/*
- * Set up BM-DMA capability (PnP BIOS should have done this)
- */
-int ide_pci_set_master(struct pci_dev *dev, const char *name)
-{
- u16 pcicmd;
-
- pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
-
- if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
- pci_set_master(dev);
-
- if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
- (pcicmd & PCI_COMMAND_MASTER) == 0) {
- printk(KERN_ERR "%s %s: error updating PCICMD\n",
- name, pci_name(dev));
- return -EIO;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_set_master);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
-
-void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
-{
- printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
- d->name, pci_name(dev),
- dev->vendor, dev->device, dev->revision);
-}
-EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
-
-
-/**
- * ide_pci_enable - do PCI enables
- * @dev: PCI device
- * @bars: PCI BARs mask
- * @d: IDE port info
- *
- * Enable the IDE PCI device. We attempt to enable the device in full
- * but if that fails then we only need IO space. The PCI code should
- * have setup the proper resources for us already for controllers in
- * legacy mode.
- *
- * Returns zero on success or an error code
- */
-
-static int ide_pci_enable(struct pci_dev *dev, int bars,
- const struct ide_port_info *d)
-{
- int ret;
-
- if (pci_enable_device(dev)) {
- ret = pci_enable_device_io(dev);
- if (ret < 0) {
- printk(KERN_WARNING "%s %s: couldn't enable device\n",
- d->name, pci_name(dev));
- goto out;
- }
- printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
- d->name, pci_name(dev));
- }
-
- /*
- * assume all devices can do 32-bit DMA for now, we can add
- * a DMA mask field to the struct ide_port_info if we need it
- * (or let lower level driver set the DMA mask)
- */
- ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- if (ret < 0) {
- printk(KERN_ERR "%s %s: can't set DMA mask\n",
- d->name, pci_name(dev));
- goto out;
- }
-
- ret = pci_request_selected_regions(dev, bars, d->name);
- if (ret < 0)
- printk(KERN_ERR "%s %s: can't reserve resources\n",
- d->name, pci_name(dev));
-out:
- return ret;
-}
-
-/**
- * ide_pci_configure - configure an unconfigured device
- * @dev: PCI device
- * @d: IDE port info
- *
- * Enable and configure the PCI device we have been passed.
- * Returns zero on success or an error code.
- */
-
-static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
-{
- u16 pcicmd = 0;
- /*
- * PnP BIOS was *supposed* to have setup this device, but we
- * can do it ourselves, so long as the BIOS has assigned an IRQ
- * (or possibly the device is using a "legacy header" for IRQs).
- * Maybe the user deliberately *disabled* the device,
- * but we'll eventually ignore it again if no drives respond.
- */
- if (ide_setup_pci_baseregs(dev, d->name) ||
- pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
- printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
- d->name, pci_name(dev));
- return -ENODEV;
- }
- if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
- printk(KERN_ERR "%s %s: error accessing PCI regs\n",
- d->name, pci_name(dev));
- return -EIO;
- }
- if (!(pcicmd & PCI_COMMAND_IO)) {
- printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
- d->name, pci_name(dev));
- return -ENXIO;
- }
- return 0;
-}
-
-/**
- * ide_pci_check_iomem - check a register is I/O
- * @dev: PCI device
- * @d: IDE port info
- * @bar: BAR number
- *
- * Checks if a BAR is configured and points to MMIO space. If so,
- * return an error code. Otherwise return 0
- */
-
-static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
- int bar)
-{
- ulong flags = pci_resource_flags(dev, bar);
-
- /* Unconfigured ? */
- if (!flags || pci_resource_len(dev, bar) == 0)
- return 0;
-
- /* I/O space */
- if (flags & IORESOURCE_IO)
- return 0;
-
- /* Bad */
- return -EINVAL;
-}
-
-/**
- * ide_hw_configure - configure a struct ide_hw instance
- * @dev: PCI device holding interface
- * @d: IDE port info
- * @port: port number
- * @hw: struct ide_hw instance corresponding to this port
- *
- * Perform the initial set up for the hardware interface structure. This
- * is done per interface port rather than per PCI device. There may be
- * more than one port per device.
- *
- * Returns zero on success or an error code.
- */
-
-static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
- unsigned int port, struct ide_hw *hw)
-{
- unsigned long ctl = 0, base = 0;
-
- if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
- if (ide_pci_check_iomem(dev, d, 2 * port) ||
- ide_pci_check_iomem(dev, d, 2 * port + 1)) {
- printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
- "reported as MEM for port %d!\n",
- d->name, pci_name(dev), port);
- return -EINVAL;
- }
-
- ctl = pci_resource_start(dev, 2*port+1);
- base = pci_resource_start(dev, 2*port);
- } else {
- /* Use default values */
- ctl = port ? 0x374 : 0x3f4;
- base = port ? 0x170 : 0x1f0;
- }
-
- if (!base || !ctl) {
- printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
- d->name, pci_name(dev), port);
- return -EINVAL;
- }
-
- memset(hw, 0, sizeof(*hw));
- hw->dev = &dev->dev;
- ide_std_init_ports(hw, base, ctl | 2);
-
- return 0;
-}
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-/**
- * ide_hwif_setup_dma - configure DMA interface
- * @hwif: IDE interface
- * @d: IDE port info
- *
- * Set up the DMA base for the interface. Enable the master bits as
- * necessary and attempt to bring the device DMA into a ready to use
- * state
- */
-
-int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
- ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
- (dev->class & 0x80))) {
- unsigned long base = ide_pci_dma_base(hwif, d);
-
- if (base == 0)
- return -1;
-
- hwif->dma_base = base;
-
- if (hwif->dma_ops == NULL)
- hwif->dma_ops = &sff_dma_ops;
-
- if (ide_pci_check_simplex(hwif, d) < 0)
- return -1;
-
- if (ide_pci_set_master(dev, d->name) < 0)
- return -1;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
- else
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, base, base + 7);
-
- hwif->extra_base = base + (hwif->channel ? 8 : 16);
-
- if (ide_allocate_dma_engine(hwif))
- return -1;
- }
-
- return 0;
-}
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
-
-/**
- * ide_setup_pci_controller - set up IDE PCI
- * @dev: PCI device
- * @bars: PCI BARs mask
- * @d: IDE port info
- * @noisy: verbose flag
- *
- * Set up the PCI and controller side of the IDE interface. This brings
- * up the PCI side of the device, checks that the device is enabled
- * and enables it if need be
- */
-
-static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
- const struct ide_port_info *d, int noisy)
-{
- int ret;
- u16 pcicmd;
-
- if (noisy)
- ide_setup_pci_noise(dev, d);
-
- ret = ide_pci_enable(dev, bars, d);
- if (ret < 0)
- goto out;
-
- ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
- if (ret < 0) {
- printk(KERN_ERR "%s %s: error accessing PCI regs\n",
- d->name, pci_name(dev));
- goto out_free_bars;
- }
- if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
- ret = ide_pci_configure(dev, d);
- if (ret < 0)
- goto out_free_bars;
- printk(KERN_INFO "%s %s: device enabled (Linux)\n",
- d->name, pci_name(dev));
- }
-
- goto out;
-
-out_free_bars:
- pci_release_selected_regions(dev, bars);
-out:
- return ret;
-}
-
-/**
- * ide_pci_setup_ports - configure ports/devices on PCI IDE
- * @dev: PCI device
- * @d: IDE port info
- * @hw: struct ide_hw instances corresponding to this PCI IDE device
- * @hws: struct ide_hw pointers table to update
- *
- * Scan the interfaces attached to this device and do any
- * necessary per port setup. Attach the devices and ask the
- * generic DMA layer to do its work for us.
- *
- * Normally called automaticall from do_ide_pci_setup_device,
- * but is also used directly as a helper function by some controllers
- * where the chipset setup is not the default PCI IDE one.
- */
-
-void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
- struct ide_hw *hw, struct ide_hw **hws)
-{
- int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
- u8 tmp;
-
- /*
- * Set up the IDE ports
- */
-
- for (port = 0; port < channels; ++port) {
- const struct ide_pci_enablebit *e = &d->enablebits[port];
-
- if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
- (tmp & e->mask) != e->val)) {
- printk(KERN_INFO "%s %s: IDE port disabled\n",
- d->name, pci_name(dev));
- continue; /* port not enabled */
- }
-
- if (ide_hw_configure(dev, d, port, hw + port))
- continue;
-
- *(hws + port) = hw + port;
- }
-}
-EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
-
-/*
- * ide_setup_pci_device() looks at the primary/secondary interfaces
- * on a PCI IDE device and, if they are enabled, prepares the IDE driver
- * for use with them. This generic code works for most PCI chipsets.
- *
- * One thing that is not standardized is the location of the
- * primary/secondary interface "enable/disable" bits. For chipsets that
- * we "know" about, this information is in the struct ide_port_info;
- * for all other chipsets, we just assume both interfaces are enabled.
- */
-static int do_ide_setup_pci_device(struct pci_dev *dev,
- const struct ide_port_info *d,
- u8 noisy)
-{
- int pciirq, ret;
-
- /*
- * Can we trust the reported IRQ?
- */
- pciirq = dev->irq;
-
- /*
- * This allows offboard ide-pci cards the enable a BIOS,
- * verify interrupt settings of split-mirror pci-config
- * space, place chipset into init-mode, and/or preserve
- * an interrupt if the card is not native ide support.
- */
- ret = d->init_chipset ? d->init_chipset(dev) : 0;
- if (ret < 0)
- goto out;
-
- if (ide_pci_is_in_compatibility_mode(dev)) {
- if (noisy)
- printk(KERN_INFO "%s %s: not 100%% native mode: will "
- "probe irqs later\n", d->name, pci_name(dev));
- pciirq = 0;
- } else if (!pciirq && noisy) {
- printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
- d->name, pci_name(dev), pciirq);
- } else if (noisy) {
- printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
- d->name, pci_name(dev), pciirq);
- }
-
- ret = pciirq;
-out:
- return ret;
-}
-
-int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
- const struct ide_port_info *d, void *priv)
-{
- struct pci_dev *pdev[] = { dev1, dev2 };
- struct ide_host *host;
- int ret, i, n_ports = dev2 ? 4 : 2, bars;
- struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
-
- if (d->host_flags & IDE_HFLAG_SINGLE)
- bars = (1 << 2) - 1;
- else
- bars = (1 << 4) - 1;
-
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- if (d->host_flags & IDE_HFLAG_CS5520)
- bars |= (1 << 2);
- else
- bars |= (1 << 4);
- }
-
- for (i = 0; i < n_ports / 2; i++) {
- ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
- if (ret < 0) {
- if (i == 1)
- pci_release_selected_regions(pdev[0], bars);
- goto out;
- }
-
- ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
- }
-
- host = ide_host_alloc(d, hws, n_ports);
- if (host == NULL) {
- ret = -ENOMEM;
- goto out_free_bars;
- }
-
- host->dev[0] = &dev1->dev;
- if (dev2)
- host->dev[1] = &dev2->dev;
-
- host->host_priv = priv;
- host->irq_flags = IRQF_SHARED;
-
- pci_set_drvdata(pdev[0], host);
- if (dev2)
- pci_set_drvdata(pdev[1], host);
-
- for (i = 0; i < n_ports / 2; i++) {
- ret = do_ide_setup_pci_device(pdev[i], d, !i);
-
- /*
- * FIXME: Mom, mom, they stole me the helper function to undo
- * do_ide_setup_pci_device() on the first device!
- */
- if (ret < 0)
- goto out_free_bars;
-
- /* fixup IRQ */
- if (ide_pci_is_in_compatibility_mode(pdev[i])) {
- hw[i*2].irq = pci_get_legacy_ide_irq(pdev[i], 0);
- hw[i*2 + 1].irq = pci_get_legacy_ide_irq(pdev[i], 1);
- } else
- hw[i*2 + 1].irq = hw[i*2].irq = ret;
- }
-
- ret = ide_host_register(host, d, hws);
- if (ret)
- ide_host_free(host);
- else
- goto out;
-
-out_free_bars:
- i = n_ports / 2;
- while (i--)
- pci_release_selected_regions(pdev[i], bars);
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(ide_pci_init_two);
-
-int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
- void *priv)
-{
- return ide_pci_init_two(dev, NULL, d, priv);
-}
-EXPORT_SYMBOL_GPL(ide_pci_init_one);
-
-void ide_pci_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
- int bars;
-
- if (host->host_flags & IDE_HFLAG_SINGLE)
- bars = (1 << 2) - 1;
- else
- bars = (1 << 4) - 1;
-
- if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- if (host->host_flags & IDE_HFLAG_CS5520)
- bars |= (1 << 2);
- else
- bars |= (1 << 4);
- }
-
- ide_host_remove(host);
-
- if (dev2)
- pci_release_selected_regions(dev2, bars);
- pci_release_selected_regions(dev, bars);
-
- if (dev2)
- pci_disable_device(dev2);
- pci_disable_device(dev);
-}
-EXPORT_SYMBOL_GPL(ide_pci_remove);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_suspend);
-
-int ide_pci_resume(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- int rc;
-
- pci_set_power_state(dev, PCI_D0);
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- pci_restore_state(dev);
- pci_set_master(dev);
-
- if (host->init_chipset)
- host->init_chipset(dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_resume);
-#endif
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
deleted file mode 100644
index c4b20f350b84..000000000000
--- a/drivers/ide/siimage.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- * Copyright (C) 2007-2008 MontaVista Software, Inc.
- * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * Documentation for CMD680:
- * http://gkernel.sourceforge.net/specs/sii/sii-0680a-v1.31.pdf.bz2
- *
- * Documentation for SiI 3112:
- * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
- *
- * Errata and other documentation only available under NDA.
- *
- *
- * FAQ Items:
- * If you are using Marvell SATA-IDE adapters with Maxtor drives
- * ensure the system is set up for ATA100/UDMA5, not UDMA6.
- *
- * If you are using WD drives with SATA bridges you must set the
- * drive to "Single". "Master" will hang.
- *
- * If you have strange problems with nVidia chipset systems please
- * see the SI support documentation and update your system BIOS
- * if necessary
- *
- * The Dell DRAC4 has some interesting features including effectively hot
- * unplugging/replugging the virtual CD interface when the DRAC is reset.
- * This often causes drivers/ide/siimage to panic but is ok with the rather
- * smarter code in libata.
- *
- * TODO:
- * - VDMA support
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#define DRV_NAME "siimage"
-
-/**
- * pdev_is_sata - check if device is SATA
- * @pdev: PCI device to check
- *
- * Returns true if this is a SATA controller
- */
-
-static int pdev_is_sata(struct pci_dev *pdev)
-{
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- switch (pdev->device) {
- case PCI_DEVICE_ID_SII_3112:
- case PCI_DEVICE_ID_SII_1210SA:
- return 1;
- case PCI_DEVICE_ID_SII_680:
- return 0;
- }
- BUG();
-#endif
- return 0;
-}
-
-/**
- * is_sata - check if hwif is SATA
- * @hwif: interface to check
- *
- * Returns true if this is a SATA controller
- */
-
-static inline int is_sata(ide_hwif_t *hwif)
-{
- return pdev_is_sata(to_pci_dev(hwif->dev));
-}
-
-/**
- * siimage_selreg - return register base
- * @hwif: interface
- * @r: config offset
- *
- * Turn a config register offset into the right address in either
- * PCI space or MMIO space to access the control register in question
- * Thankfully this is a configuration operation, so isn't performance
- * critical.
- */
-
-static unsigned long siimage_selreg(ide_hwif_t *hwif, int r)
-{
- unsigned long base = (unsigned long)hwif->hwif_data;
-
- base += 0xA0 + r;
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- base += hwif->channel << 6;
- else
- base += hwif->channel << 4;
- return base;
-}
-
-/**
- * siimage_seldev - return register base
- * @hwif: interface
- * @r: config offset
- *
- * Turn a config register offset into the right address in either
- * PCI space or MMIO space to access the control register in question
- * including accounting for the unit shift.
- */
-
-static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long base = (unsigned long)hwif->hwif_data;
- u8 unit = drive->dn & 1;
-
- base += 0xA0 + r;
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- base += hwif->channel << 6;
- else
- base += hwif->channel << 4;
- base |= unit << unit;
- return base;
-}
-
-static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- u8 tmp = 0;
-
- if (host->host_priv)
- tmp = readb((void __iomem *)addr);
- else
- pci_read_config_byte(dev, addr, &tmp);
-
- return tmp;
-}
-
-static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- u16 tmp = 0;
-
- if (host->host_priv)
- tmp = readw((void __iomem *)addr);
- else
- pci_read_config_word(dev, addr, &tmp);
-
- return tmp;
-}
-
-static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
-
- if (host->host_priv)
- writeb(val, (void __iomem *)addr);
- else
- pci_write_config_byte(dev, addr, val);
-}
-
-static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
-
- if (host->host_priv)
- writew(val, (void __iomem *)addr);
- else
- pci_write_config_word(dev, addr, val);
-}
-
-static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
-
- if (host->host_priv)
- writel(val, (void __iomem *)addr);
- else
- pci_write_config_dword(dev, addr, val);
-}
-
-/**
- * sil_udma_filter - compute UDMA mask
- * @drive: IDE device
- *
- * Compute the available UDMA speeds for the device on the interface.
- *
- * For the CMD680 this depends on the clocking mode (scsc), for the
- * SI3112 SATA controller life is a bit simpler.
- */
-
-static u8 sil_pata_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = (unsigned long)hwif->hwif_data;
- u8 scsc, mask = 0;
-
- base += (hwif->host_flags & IDE_HFLAG_MMIO) ? 0x4A : 0x8A;
-
- scsc = sil_ioread8(dev, base);
-
- switch (scsc & 0x30) {
- case 0x10: /* 133 */
- mask = ATA_UDMA6;
- break;
- case 0x20: /* 2xPCI */
- mask = ATA_UDMA6;
- break;
- case 0x00: /* 100 */
- mask = ATA_UDMA5;
- break;
- default: /* Disabled ? */
- BUG();
- }
-
- return mask;
-}
-
-static u8 sil_sata_udma_filter(ide_drive_t *drive)
-{
- char *m = (char *)&drive->id[ATA_ID_PROD];
-
- return strstr(m, "Maxtor") ? ATA_UDMA5 : ATA_UDMA6;
-}
-
-/**
- * sil_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Load the timing settings for this device mode into the
- * controller.
- */
-
-static void sil_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
- static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
-
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- u32 speedt = 0;
- u16 speedp = 0;
- unsigned long addr = siimage_seldev(drive, 0x04);
- unsigned long tfaddr = siimage_selreg(hwif, 0x02);
- unsigned long base = (unsigned long)hwif->hwif_data;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 tf_pio = pio;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
- u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
- : (mmio ? 0xB4 : 0x80);
- u8 mode = 0;
- u8 unit = drive->dn & 1;
-
- /* trim *taskfile* PIO to the slowest of the master/slave */
- if (pair) {
- u8 pair_pio = pair->pio_mode - XFER_PIO_0;
-
- if (pair_pio < tf_pio)
- tf_pio = pair_pio;
- }
-
- /* cheat for now and use the docs */
- speedp = data_speed[pio];
- speedt = tf_speed[tf_pio];
-
- sil_iowrite16(dev, speedp, addr);
- sil_iowrite16(dev, speedt, tfaddr);
-
- /* now set up IORDY */
- speedp = sil_ioread16(dev, tfaddr - 2);
- speedp &= ~0x200;
-
- mode = sil_ioread8(dev, base + addr_mask);
- mode &= ~(unit ? 0x30 : 0x03);
-
- if (ide_pio_need_iordy(drive, pio)) {
- speedp |= 0x200;
- mode |= unit ? 0x10 : 0x01;
- }
-
- sil_iowrite16(dev, speedp, tfaddr - 2);
- sil_iowrite8(dev, mode, base + addr_mask);
-}
-
-/**
- * sil_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Tune the SiI chipset for the desired DMA mode.
- */
-
-static void sil_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
- static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
- static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 };
-
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = (unsigned long)hwif->hwif_data;
- u16 ultra = 0, multi = 0;
- u8 mode = 0, unit = drive->dn & 1;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
- u8 scsc = 0, addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
- : (mmio ? 0xB4 : 0x80);
- unsigned long ma = siimage_seldev(drive, 0x08);
- unsigned long ua = siimage_seldev(drive, 0x0C);
- const u8 speed = drive->dma_mode;
-
- scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A));
- mode = sil_ioread8 (dev, base + addr_mask);
- multi = sil_ioread16(dev, ma);
- ultra = sil_ioread16(dev, ua);
-
- mode &= ~(unit ? 0x30 : 0x03);
- ultra &= ~0x3F;
- scsc = ((scsc & 0x30) == 0x00) ? 0 : 1;
-
- scsc = is_sata(hwif) ? 1 : scsc;
-
- if (speed >= XFER_UDMA_0) {
- multi = dma[2];
- ultra |= scsc ? ultra6[speed - XFER_UDMA_0] :
- ultra5[speed - XFER_UDMA_0];
- mode |= unit ? 0x30 : 0x03;
- } else {
- multi = dma[speed - XFER_MW_DMA_0];
- mode |= unit ? 0x20 : 0x02;
- }
-
- sil_iowrite8 (dev, mode, base + addr_mask);
- sil_iowrite16(dev, multi, ma);
- sil_iowrite16(dev, ultra, ua);
-}
-
-static int sil_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long addr = siimage_selreg(hwif, 1);
- u8 val = sil_ioread8(dev, addr);
-
- /* Return 1 if INTRQ asserted */
- return (val & 8) ? 1 : 0;
-}
-
-/**
- * siimage_mmio_dma_test_irq - check we caused an IRQ
- * @drive: drive we are testing
- *
- * Check if we caused an IDE DMA interrupt. We may also have caused
- * SATA status interrupts, if so we clean them up and continue.
- */
-
-static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *sata_error_addr
- = (void __iomem *)hwif->sata_scr[SATA_ERROR_OFFSET];
-
- if (sata_error_addr) {
- unsigned long base = (unsigned long)hwif->hwif_data;
- u32 ext_stat = readl((void __iomem *)(base + 0x10));
- u8 watchdog = 0;
-
- if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) {
- u32 sata_error = readl(sata_error_addr);
-
- writel(sata_error, sata_error_addr);
- watchdog = (sata_error & 0x00680000) ? 1 : 0;
- printk(KERN_WARNING "%s: sata_error = 0x%08x, "
- "watchdog = %d, %s\n",
- drive->name, sata_error, watchdog, __func__);
- } else
- watchdog = (ext_stat & 0x8000) ? 1 : 0;
-
- ext_stat >>= 16;
- if (!(ext_stat & 0x0404) && !watchdog)
- return 0;
- }
-
- /* return 1 if INTR asserted */
- if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4)
- return 1;
-
- return 0;
-}
-
-static int siimage_dma_test_irq(ide_drive_t *drive)
-{
- if (drive->hwif->host_flags & IDE_HFLAG_MMIO)
- return siimage_mmio_dma_test_irq(drive);
- else
- return ide_dma_test_irq(drive);
-}
-
-/**
- * sil_sata_reset_poll - wait for SATA reset
- * @drive: drive we are resetting
- *
- * Poll the SATA phy and see whether it has come back from the dead
- * yet.
- */
-
-static blk_status_t sil_sata_reset_poll(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *sata_status_addr
- = (void __iomem *)hwif->sata_scr[SATA_STATUS_OFFSET];
-
- if (sata_status_addr) {
- /* SATA Status is available only when in MMIO mode */
- u32 sata_stat = readl(sata_status_addr);
-
- if ((sata_stat & 0x03) != 0x03) {
- printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n",
- hwif->name, sata_stat);
- return BLK_STS_IOERR;
- }
- }
-
- return BLK_STS_OK;
-}
-
-/**
- * sil_sata_pre_reset - reset hook
- * @drive: IDE device being reset
- *
- * For the SATA devices we need to handle recalibration/geometry
- * differently
- */
-
-static void sil_sata_pre_reset(ide_drive_t *drive)
-{
- if (drive->media == ide_disk) {
- drive->special_flags &=
- ~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE);
- }
-}
-
-/**
- * init_chipset_siimage - set up an SI device
- * @dev: PCI device
- *
- * Perform the initial PCI set up for this device. Attempt to switch
- * to 133 MHz clocking if the system isn't already set up to do it.
- */
-
-static int init_chipset_siimage(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- void __iomem *ioaddr = host->host_priv;
- unsigned long base, scsc_addr;
- u8 rev = dev->revision, tmp;
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255);
-
- if (ioaddr)
- pci_set_master(dev);
-
- base = (unsigned long)ioaddr;
-
- if (ioaddr && pdev_is_sata(dev)) {
- u32 tmp32, irq_mask;
-
- /* make sure IDE0/1 interrupts are not masked */
- irq_mask = (1 << 22) | (1 << 23);
- tmp32 = readl(ioaddr + 0x48);
- if (tmp32 & irq_mask) {
- tmp32 &= ~irq_mask;
- writel(tmp32, ioaddr + 0x48);
- readl(ioaddr + 0x48); /* flush */
- }
- writel(0, ioaddr + 0x148);
- writel(0, ioaddr + 0x1C8);
- }
-
- sil_iowrite8(dev, 0, base ? (base + 0xB4) : 0x80);
- sil_iowrite8(dev, 0, base ? (base + 0xF4) : 0x84);
-
- scsc_addr = base ? (base + 0x4A) : 0x8A;
- tmp = sil_ioread8(dev, scsc_addr);
-
- switch (tmp & 0x30) {
- case 0x00:
- /* On 100 MHz clocking, try and switch to 133 MHz */
- sil_iowrite8(dev, tmp | 0x10, scsc_addr);
- break;
- case 0x30:
- /* Clocking is disabled, attempt to force 133MHz clocking. */
- sil_iowrite8(dev, tmp & ~0x20, scsc_addr);
- case 0x10:
- /* On 133Mhz clocking. */
- break;
- case 0x20:
- /* On PCIx2 clocking. */
- break;
- }
-
- tmp = sil_ioread8(dev, scsc_addr);
-
- sil_iowrite8 (dev, 0x72, base + 0xA1);
- sil_iowrite16(dev, 0x328A, base + 0xA2);
- sil_iowrite32(dev, 0x62DD62DD, base + 0xA4);
- sil_iowrite32(dev, 0x43924392, base + 0xA8);
- sil_iowrite32(dev, 0x40094009, base + 0xAC);
- sil_iowrite8 (dev, 0x72, base ? (base + 0xE1) : 0xB1);
- sil_iowrite16(dev, 0x328A, base ? (base + 0xE2) : 0xB2);
- sil_iowrite32(dev, 0x62DD62DD, base ? (base + 0xE4) : 0xB4);
- sil_iowrite32(dev, 0x43924392, base ? (base + 0xE8) : 0xB8);
- sil_iowrite32(dev, 0x40094009, base ? (base + 0xEC) : 0xBC);
-
- if (base && pdev_is_sata(dev)) {
- writel(0xFFFF0000, ioaddr + 0x108);
- writel(0xFFFF0000, ioaddr + 0x188);
- writel(0x00680000, ioaddr + 0x148);
- writel(0x00680000, ioaddr + 0x1C8);
- }
-
- /* report the clocking mode of the controller */
- if (!pdev_is_sata(dev)) {
- static const char *clk_str[] =
- { "== 100", "== 133", "== 2X PCI", "DISABLED!" };
-
- tmp >>= 4;
- printk(KERN_INFO DRV_NAME " %s: BASE CLOCK %s\n",
- pci_name(dev), clk_str[tmp & 3]);
- }
-
- return 0;
-}
-
-/**
- * init_mmio_iops_siimage - set up the iops for MMIO
- * @hwif: interface to set up
- *
- * The basic setup here is fairly simple, we can use standard MMIO
- * operations. However we do have to set the taskfile register offsets
- * by hand as there isn't a standard defined layout for them this time.
- *
- * The hardware supports buffered taskfiles and also some rather nice
- * extended PRD tables. For better SI3112 support use the libata driver
- */
-
-static void init_mmio_iops_siimage(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- void *addr = host->host_priv;
- u8 ch = hwif->channel;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long base;
-
- /*
- * Fill in the basic hwif bits
- */
- hwif->host_flags |= IDE_HFLAG_MMIO;
-
- hwif->hwif_data = addr;
-
- /*
- * Now set up the hw. We have to do this ourselves as the
- * MMIO layout isn't the same as the standard port based I/O.
- */
- memset(io_ports, 0, sizeof(*io_ports));
-
- base = (unsigned long)addr;
- if (ch)
- base += 0xC0;
- else
- base += 0x80;
-
- /*
- * The buffered task file doesn't have status/control, so we
- * can't currently use it sanely since we want to use LBA48 mode.
- */
- io_ports->data_addr = base;
- io_ports->error_addr = base + 1;
- io_ports->nsect_addr = base + 2;
- io_ports->lbal_addr = base + 3;
- io_ports->lbam_addr = base + 4;
- io_ports->lbah_addr = base + 5;
- io_ports->device_addr = base + 6;
- io_ports->status_addr = base + 7;
- io_ports->ctl_addr = base + 10;
-
- if (pdev_is_sata(dev)) {
- base = (unsigned long)addr;
- if (ch)
- base += 0x80;
- hwif->sata_scr[SATA_STATUS_OFFSET] = base + 0x104;
- hwif->sata_scr[SATA_ERROR_OFFSET] = base + 0x108;
- hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
- }
-
- hwif->irq = dev->irq;
-
- hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00);
-}
-
-static int is_dev_seagate_sata(ide_drive_t *drive)
-{
- const char *s = (const char *)&drive->id[ATA_ID_PROD];
- unsigned len = strnlen(s, ATA_ID_PROD_LEN);
-
- if ((len > 4) && (!memcmp(s, "ST", 2)))
- if ((!memcmp(s + len - 2, "AS", 2)) ||
- (!memcmp(s + len - 3, "ASL", 3))) {
- printk(KERN_INFO "%s: applying pessimistic Seagate "
- "errata fix\n", drive->name);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * sil_quirkproc - post probe fixups
- * @drive: drive
- *
- * Called after drive probe we use this to decide whether the
- * Seagate fixup must be applied. This used to be in init_iops but
- * that can occur before we know what drives are present.
- */
-
-static void sil_quirkproc(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- /* Try and rise the rqsize */
- if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
- hwif->rqsize = 128;
-}
-
-/**
- * init_iops_siimage - set up iops
- * @hwif: interface to set up
- *
- * Do the basic setup for the SIIMAGE hardware interface
- * and then do the MMIO setup if we can. This is the first
- * look in we get for setting up the hwif so that we
- * can get the iops right before using them.
- */
-
-static void init_iops_siimage(ide_hwif_t *hwif)
-{
- struct ide_host *host = dev_get_drvdata(hwif->dev);
-
- hwif->hwif_data = NULL;
-
- /* Pessimal until we finish probing */
- hwif->rqsize = 15;
-
- if (host->host_priv)
- init_mmio_iops_siimage(hwif);
-}
-
-/**
- * sil_cable_detect - cable detection
- * @hwif: interface to check
- *
- * Check for the presence of an ATA66 capable cable on the interface.
- */
-
-static u8 sil_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long addr = siimage_selreg(hwif, 0);
- u8 ata66 = sil_ioread8(dev, addr);
-
- return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops sil_pata_port_ops = {
- .set_pio_mode = sil_set_pio_mode,
- .set_dma_mode = sil_set_dma_mode,
- .quirkproc = sil_quirkproc,
- .test_irq = sil_test_irq,
- .udma_filter = sil_pata_udma_filter,
- .cable_detect = sil_cable_detect,
-};
-
-static const struct ide_port_ops sil_sata_port_ops = {
- .set_pio_mode = sil_set_pio_mode,
- .set_dma_mode = sil_set_dma_mode,
- .reset_poll = sil_sata_reset_poll,
- .pre_reset = sil_sata_pre_reset,
- .quirkproc = sil_quirkproc,
- .test_irq = sil_test_irq,
- .udma_filter = sil_sata_udma_filter,
- .cable_detect = sil_cable_detect,
-};
-
-static const struct ide_dma_ops sil_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = siimage_dma_test_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-#define DECLARE_SII_DEV(p_ops) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_siimage, \
- .init_iops = init_iops_siimage, \
- .port_ops = p_ops, \
- .dma_ops = &sil_dma_ops, \
- .pio_mask = ATA_PIO4, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = ATA_UDMA6, \
- }
-
-static const struct ide_port_info siimage_chipsets[] = {
- /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
- /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
-};
-
-/**
- * siimage_init_one - PCI layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds an SiI680 or SiI3112 controller.
- * We then use the IDE PCI generic helper to do most of the work.
- */
-
-static int siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- void __iomem *ioaddr = NULL;
- resource_size_t bar5 = pci_resource_start(dev, 5);
- unsigned long barsize = pci_resource_len(dev, 5);
- int rc;
- struct ide_port_info d;
- u8 idx = id->driver_data;
- u8 BA5_EN;
-
- d = siimage_chipsets[idx];
-
- if (idx) {
- static int first = 1;
-
- if (first) {
- printk(KERN_INFO DRV_NAME ": For full SATA support you "
- "should use the libata sata_sil module.\n");
- first = 0;
- }
-
- d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
- }
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- pci_read_config_byte(dev, 0x8A, &BA5_EN);
- if ((BA5_EN & 0x01) || bar5) {
- /*
- * Drop back to PIO if we can't map the MMIO. Some systems
- * seem to get terminally confused in the PCI spaces.
- */
- if (!request_mem_region(bar5, barsize, d.name)) {
- printk(KERN_WARNING DRV_NAME " %s: MMIO ports not "
- "available\n", pci_name(dev));
- } else {
- ioaddr = pci_ioremap_bar(dev, 5);
- if (ioaddr == NULL)
- release_mem_region(bar5, barsize);
- }
- }
-
- rc = ide_pci_init_one(dev, &d, ioaddr);
- if (rc) {
- if (ioaddr) {
- iounmap(ioaddr);
- release_mem_region(bar5, barsize);
- }
- pci_disable_device(dev);
- }
-
- return rc;
-}
-
-static void siimage_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- void __iomem *ioaddr = host->host_priv;
-
- ide_pci_remove(dev);
-
- if (ioaddr) {
- resource_size_t bar5 = pci_resource_start(dev, 5);
- unsigned long barsize = pci_resource_len(dev, 5);
-
- iounmap(ioaddr);
- release_mem_region(bar5, barsize);
- }
-
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id siimage_pci_tbl[] = {
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), 0 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_3112), 1 },
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_1210SA), 1 },
-#endif
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, siimage_pci_tbl);
-
-static struct pci_driver siimage_pci_driver = {
- .name = "SiI_IDE",
- .id_table = siimage_pci_tbl,
- .probe = siimage_init_one,
- .remove = siimage_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init siimage_ide_init(void)
-{
- return ide_pci_register_driver(&siimage_pci_driver);
-}
-
-static void __exit siimage_ide_exit(void)
-{
- pci_unregister_driver(&siimage_pci_driver);
-}
-
-module_init(siimage_ide_init);
-module_exit(siimage_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick, Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for SiI IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
deleted file mode 100644
index 1a700bef6c56..000000000000
--- a/drivers/ide/sis5513.c
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
- * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- *
- * Thanks :
- *
- * SiS Taiwan : for direct support and hardware.
- * Daniela Engert : for initial ATA100 advices and numerous others.
- * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt :
- * for checking code correctness, providing patches.
- *
- *
- * Original tests and design on the SiS620 chipset.
- * ATA100 tests and design on the SiS735 chipset.
- * ATA16/33 support from specs
- * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw>
- * ATA133 961/962/963 fixes by Vojtech Pavlik <vojtech@suse.cz>
- *
- * Documentation:
- * SiS chipset documentation available under NDA to companies only
- * (not to individuals).
- */
-
-/*
- * The original SiS5513 comes from a SiS5511/55112/5513 chipset. The original
- * SiS5513 was also used in the SiS5596/5513 chipset. Thus if we see a SiS5511
- * or SiS5596, we can assume we see the first MWDMA-16 capable SiS5513 chip.
- *
- * Later SiS chipsets integrated the 5513 functionality into the NorthBridge,
- * starting with SiS5571 and up to SiS745. The PCI ID didn't change, though. We
- * can figure out that we have a more modern and more capable 5513 by looking
- * for the respective NorthBridge IDs.
- *
- * Even later (96x family) SiS chipsets use the MuTIOL link and place the 5513
- * into the SouthBrige. Here we cannot rely on looking up the NorthBridge PCI
- * ID, while the now ATA-133 capable 5513 still has the same PCI ID.
- * Fortunately the 5513 can be 'unmasked' by fiddling with some config space
- * bits, changing its device id to the true one - 5517 for 961 and 5518 for
- * 962/963.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "sis5513"
-
-/* registers layout and init values are chipset family dependent */
-#undef ATA_16
-#define ATA_16 0x01
-#define ATA_33 0x02
-#define ATA_66 0x03
-#define ATA_100a 0x04 /* SiS730/SiS550 is ATA100 with ATA66 layout */
-#define ATA_100 0x05
-#define ATA_133a 0x06 /* SiS961b with 133 support */
-#define ATA_133 0x07 /* SiS962/963 */
-
-static u8 chipset_family;
-
-/*
- * Devices supported
- */
-static const struct {
- const char *name;
- u16 host_id;
- u8 chipset_family;
- u8 flags;
-} SiSHostChipInfo[] = {
- { "SiS968", PCI_DEVICE_ID_SI_968, ATA_133 },
- { "SiS966", PCI_DEVICE_ID_SI_966, ATA_133 },
- { "SiS965", PCI_DEVICE_ID_SI_965, ATA_133 },
- { "SiS745", PCI_DEVICE_ID_SI_745, ATA_100 },
- { "SiS735", PCI_DEVICE_ID_SI_735, ATA_100 },
- { "SiS733", PCI_DEVICE_ID_SI_733, ATA_100 },
- { "SiS635", PCI_DEVICE_ID_SI_635, ATA_100 },
- { "SiS633", PCI_DEVICE_ID_SI_633, ATA_100 },
-
- { "SiS730", PCI_DEVICE_ID_SI_730, ATA_100a },
- { "SiS550", PCI_DEVICE_ID_SI_550, ATA_100a },
-
- { "SiS640", PCI_DEVICE_ID_SI_640, ATA_66 },
- { "SiS630", PCI_DEVICE_ID_SI_630, ATA_66 },
- { "SiS620", PCI_DEVICE_ID_SI_620, ATA_66 },
- { "SiS540", PCI_DEVICE_ID_SI_540, ATA_66 },
- { "SiS530", PCI_DEVICE_ID_SI_530, ATA_66 },
-
- { "SiS5600", PCI_DEVICE_ID_SI_5600, ATA_33 },
- { "SiS5598", PCI_DEVICE_ID_SI_5598, ATA_33 },
- { "SiS5597", PCI_DEVICE_ID_SI_5597, ATA_33 },
- { "SiS5591/2", PCI_DEVICE_ID_SI_5591, ATA_33 },
- { "SiS5582", PCI_DEVICE_ID_SI_5582, ATA_33 },
- { "SiS5581", PCI_DEVICE_ID_SI_5581, ATA_33 },
-
- { "SiS5596", PCI_DEVICE_ID_SI_5596, ATA_16 },
- { "SiS5571", PCI_DEVICE_ID_SI_5571, ATA_16 },
- { "SiS5517", PCI_DEVICE_ID_SI_5517, ATA_16 },
- { "SiS551x", PCI_DEVICE_ID_SI_5511, ATA_16 },
-};
-
-/* Cycle time bits and values vary across chip dma capabilities
- These three arrays hold the register layout and the values to set.
- Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */
-
-/* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */
-static u8 cycle_time_offset[] = { 0, 0, 5, 4, 4, 0, 0 };
-static u8 cycle_time_range[] = { 0, 0, 2, 3, 3, 4, 4 };
-static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
- { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
- { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
- { 3, 2, 1, 0, 0, 0, 0 }, /* ATA_33 */
- { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_66 */
- { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_100a (730 specific),
- different cycle_time range and offset */
- { 11, 7, 5, 4, 2, 1, 0 }, /* ATA_100 */
- { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133a (earliest 691 southbridges) */
- { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133 */
-};
-/* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133
- See SiS962 data sheet for more detail */
-static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
- { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
- { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
- { 2, 1, 1, 0, 0, 0, 0 },
- { 4, 3, 2, 1, 0, 0, 0 },
- { 4, 3, 2, 1, 0, 0, 0 },
- { 6, 4, 3, 1, 1, 1, 0 },
- { 9, 6, 4, 2, 2, 2, 2 },
- { 9, 6, 4, 2, 2, 2, 2 },
-};
-/* Initialize time, Active time, Recovery time vary across
- IDE clock settings. These 3 arrays hold the register value
- for PIO0/1/2/3/4 and DMA0/1/2 mode in order */
-static u8 ini_time_value[][8] = {
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 2, 1, 0, 0, 0, 1, 0, 0 },
- { 4, 3, 1, 1, 1, 3, 1, 1 },
- { 4, 3, 1, 1, 1, 3, 1, 1 },
- { 6, 4, 2, 2, 2, 4, 2, 2 },
- { 9, 6, 3, 3, 3, 6, 3, 3 },
- { 9, 6, 3, 3, 3, 6, 3, 3 },
-};
-static u8 act_time_value[][8] = {
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 9, 9, 9, 2, 2, 7, 2, 2 },
- { 19, 19, 19, 5, 4, 14, 5, 4 },
- { 19, 19, 19, 5, 4, 14, 5, 4 },
- { 28, 28, 28, 7, 6, 21, 7, 6 },
- { 38, 38, 38, 10, 9, 28, 10, 9 },
- { 38, 38, 38, 10, 9, 28, 10, 9 },
-};
-static u8 rco_time_value[][8] = {
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 9, 2, 0, 2, 0, 7, 1, 1 },
- { 19, 5, 1, 5, 2, 16, 3, 2 },
- { 19, 5, 1, 5, 2, 16, 3, 2 },
- { 30, 9, 3, 9, 4, 25, 6, 4 },
- { 40, 12, 4, 12, 5, 34, 12, 5 },
- { 40, 12, 4, 12, 5, 34, 12, 5 },
-};
-
-/*
- * Printing configuration
- */
-/* Used for chipset type printing at boot time */
-static char *chipset_capability[] = {
- "ATA", "ATA 16",
- "ATA 33", "ATA 66",
- "ATA 100 (1st gen)", "ATA 100 (2nd gen)",
- "ATA 133 (1st gen)", "ATA 133 (2nd gen)"
-};
-
-/*
- * Configuration functions
- */
-
-static u8 sis_ata133_get_base(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 reg54 = 0;
-
- pci_read_config_dword(dev, 0x54, &reg54);
-
- return ((reg54 & 0x40000000) ? 0x70 : 0x40) + drive->dn * 4;
-}
-
-static void sis_ata16_program_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u16 t1 = 0;
- u8 drive_pci = 0x40 + drive->dn * 2;
-
- const u16 pio_timings[] = { 0x000, 0x607, 0x404, 0x303, 0x301 };
- const u16 mwdma_timings[] = { 0x008, 0x302, 0x301 };
-
- pci_read_config_word(dev, drive_pci, &t1);
-
- /* clear active/recovery timings */
- t1 &= ~0x070f;
- if (mode >= XFER_MW_DMA_0) {
- if (chipset_family > ATA_16)
- t1 &= ~0x8000; /* disable UDMA */
- t1 |= mwdma_timings[mode - XFER_MW_DMA_0];
- } else
- t1 |= pio_timings[mode - XFER_PIO_0];
-
- pci_write_config_word(dev, drive_pci, t1);
-}
-
-static void sis_ata100_program_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u8 t1, drive_pci = 0x40 + drive->dn * 2;
-
- /* timing bits: 7:4 active 3:0 recovery */
- const u8 pio_timings[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
- const u8 mwdma_timings[] = { 0x08, 0x32, 0x31 };
-
- if (mode >= XFER_MW_DMA_0) {
- u8 t2 = 0;
-
- pci_read_config_byte(dev, drive_pci, &t2);
- t2 &= ~0x80; /* disable UDMA */
- pci_write_config_byte(dev, drive_pci, t2);
-
- t1 = mwdma_timings[mode - XFER_MW_DMA_0];
- } else
- t1 = pio_timings[mode - XFER_PIO_0];
-
- pci_write_config_byte(dev, drive_pci + 1, t1);
-}
-
-static void sis_ata133_program_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 t1 = 0;
- u8 drive_pci = sis_ata133_get_base(drive), clk, idx;
-
- pci_read_config_dword(dev, drive_pci, &t1);
-
- t1 &= 0xc0c00fff;
- clk = (t1 & 0x08) ? ATA_133 : ATA_100;
- if (mode >= XFER_MW_DMA_0) {
- t1 &= ~0x04; /* disable UDMA */
- idx = mode - XFER_MW_DMA_0 + 5;
- } else
- idx = mode - XFER_PIO_0;
- t1 |= ini_time_value[clk][idx] << 12;
- t1 |= act_time_value[clk][idx] << 16;
- t1 |= rco_time_value[clk][idx] << 24;
-
- pci_write_config_dword(dev, drive_pci, t1);
-}
-
-static void sis_program_timings(ide_drive_t *drive, const u8 mode)
-{
- if (chipset_family < ATA_100) /* ATA_16/33/66/100a */
- sis_ata16_program_timings(drive, mode);
- else if (chipset_family < ATA_133) /* ATA_100/133a */
- sis_ata100_program_timings(drive, mode);
- else /* ATA_133 */
- sis_ata133_program_timings(drive, mode);
-}
-
-static void config_drive_art_rwp(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 reg4bh = 0;
- u8 rw_prefetch = 0;
-
- pci_read_config_byte(dev, 0x4b, &reg4bh);
-
- rw_prefetch = reg4bh & ~(0x11 << drive->dn);
-
- if (drive->media == ide_disk)
- rw_prefetch |= 0x11 << drive->dn;
-
- if (reg4bh != rw_prefetch)
- pci_write_config_byte(dev, 0x4b, rw_prefetch);
-}
-
-static void sis_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- config_drive_art_rwp(drive);
- sis_program_timings(drive, drive->pio_mode);
-}
-
-static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 regdw = 0;
- u8 drive_pci = sis_ata133_get_base(drive), clk, idx;
-
- pci_read_config_dword(dev, drive_pci, &regdw);
-
- regdw |= 0x04;
- regdw &= 0xfffff00f;
- /* check if ATA133 enable */
- clk = (regdw & 0x08) ? ATA_133 : ATA_100;
- idx = mode - XFER_UDMA_0;
- regdw |= cycle_time_value[clk][idx] << 4;
- regdw |= cvs_time_value[clk][idx] << 8;
-
- pci_write_config_dword(dev, drive_pci, regdw);
-}
-
-static void sis_ata33_program_udma_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u8 drive_pci = 0x40 + drive->dn * 2, reg = 0, i = chipset_family;
-
- pci_read_config_byte(dev, drive_pci + 1, &reg);
-
- /* force the UDMA bit on if we want to use UDMA */
- reg |= 0x80;
- /* clean reg cycle time bits */
- reg &= ~((0xff >> (8 - cycle_time_range[i])) << cycle_time_offset[i]);
- /* set reg cycle time bits */
- reg |= cycle_time_value[i][mode - XFER_UDMA_0] << cycle_time_offset[i];
-
- pci_write_config_byte(dev, drive_pci + 1, reg);
-}
-
-static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode)
-{
- if (chipset_family >= ATA_133) /* ATA_133 */
- sis_ata133_program_udma_timings(drive, mode);
- else /* ATA_33/66/100a/100/133a */
- sis_ata33_program_udma_timings(drive, mode);
-}
-
-static void sis_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 speed = drive->dma_mode;
-
- if (speed >= XFER_UDMA_0)
- sis_program_udma_timings(drive, speed);
- else
- sis_program_timings(drive, speed);
-}
-
-static u8 sis_ata133_udma_filter(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 regdw = 0;
- u8 drive_pci = sis_ata133_get_base(drive);
-
- pci_read_config_dword(dev, drive_pci, &regdw);
-
- /* if ATA133 disable, we should not set speed above UDMA5 */
- return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5;
-}
-
-static int sis_find_family(struct pci_dev *dev)
-{
- struct pci_dev *host;
- int i = 0;
-
- chipset_family = 0;
-
- for (i = 0; i < ARRAY_SIZE(SiSHostChipInfo) && !chipset_family; i++) {
-
- host = pci_get_device(PCI_VENDOR_ID_SI, SiSHostChipInfo[i].host_id, NULL);
-
- if (!host)
- continue;
-
- chipset_family = SiSHostChipInfo[i].chipset_family;
-
- /* Special case for SiS630 : 630S/ET is ATA_100a */
- if (SiSHostChipInfo[i].host_id == PCI_DEVICE_ID_SI_630) {
- if (host->revision >= 0x30)
- chipset_family = ATA_100a;
- }
- pci_dev_put(host);
-
- printk(KERN_INFO DRV_NAME " %s: %s %s controller\n",
- pci_name(dev), SiSHostChipInfo[i].name,
- chipset_capability[chipset_family]);
- }
-
- if (!chipset_family) { /* Belongs to pci-quirks */
-
- u32 idemisc;
- u16 trueid;
-
- /* Disable ID masking and register remapping */
- pci_read_config_dword(dev, 0x54, &idemisc);
- pci_write_config_dword(dev, 0x54, (idemisc & 0x7fffffff));
- pci_read_config_word(dev, PCI_DEVICE_ID, &trueid);
- pci_write_config_dword(dev, 0x54, idemisc);
-
- if (trueid == 0x5518) {
- printk(KERN_INFO DRV_NAME " %s: SiS 962/963 MuTIOL IDE UDMA133 controller\n",
- pci_name(dev));
- chipset_family = ATA_133;
-
- /* Check for 5513 compatibility mapping
- * We must use this, else the port enabled code will fail,
- * as it expects the enablebits at 0x4a.
- */
- if ((idemisc & 0x40000000) == 0) {
- pci_write_config_dword(dev, 0x54, idemisc | 0x40000000);
- printk(KERN_INFO DRV_NAME " %s: Switching to 5513 register mapping\n",
- pci_name(dev));
- }
- }
- }
-
- if (!chipset_family) { /* Belongs to pci-quirks */
-
- struct pci_dev *lpc_bridge;
- u16 trueid;
- u8 prefctl;
- u8 idecfg;
-
- pci_read_config_byte(dev, 0x4a, &idecfg);
- pci_write_config_byte(dev, 0x4a, idecfg | 0x10);
- pci_read_config_word(dev, PCI_DEVICE_ID, &trueid);
- pci_write_config_byte(dev, 0x4a, idecfg);
-
- if (trueid == 0x5517) { /* SiS 961/961B */
-
- lpc_bridge = pci_get_slot(dev->bus, 0x10); /* Bus 0, Dev 2, Fn 0 */
- pci_read_config_byte(dev, 0x49, &prefctl);
- pci_dev_put(lpc_bridge);
-
- if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) {
- printk(KERN_INFO DRV_NAME " %s: SiS 961B MuTIOL IDE UDMA133 controller\n",
- pci_name(dev));
- chipset_family = ATA_133a;
- } else {
- printk(KERN_INFO DRV_NAME " %s: SiS 961 MuTIOL IDE UDMA100 controller\n",
- pci_name(dev));
- chipset_family = ATA_100;
- }
- }
- }
-
- return chipset_family;
-}
-
-static int init_chipset_sis5513(struct pci_dev *dev)
-{
- /* Make general config ops here
- 1/ tell IDE channels to operate in Compatibility mode only
- 2/ tell old chips to allow per drive IDE timings */
-
- u8 reg;
- u16 regw;
-
- switch (chipset_family) {
- case ATA_133:
- /* SiS962 operation mode */
- pci_read_config_word(dev, 0x50, &regw);
- if (regw & 0x08)
- pci_write_config_word(dev, 0x50, regw&0xfff7);
- pci_read_config_word(dev, 0x52, &regw);
- if (regw & 0x08)
- pci_write_config_word(dev, 0x52, regw&0xfff7);
- break;
- case ATA_133a:
- case ATA_100:
- /* Fixup latency */
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
- /* Set compatibility bit */
- pci_read_config_byte(dev, 0x49, &reg);
- if (!(reg & 0x01))
- pci_write_config_byte(dev, 0x49, reg|0x01);
- break;
- case ATA_100a:
- case ATA_66:
- /* Fixup latency */
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10);
-
- /* On ATA_66 chips the bit was elsewhere */
- pci_read_config_byte(dev, 0x52, &reg);
- if (!(reg & 0x04))
- pci_write_config_byte(dev, 0x52, reg|0x04);
- break;
- case ATA_33:
- /* On ATA_33 we didn't have a single bit to set */
- pci_read_config_byte(dev, 0x09, &reg);
- if ((reg & 0x0f) != 0x00)
- pci_write_config_byte(dev, 0x09, reg&0xf0);
- fallthrough;
- case ATA_16:
- /* force per drive recovery and active timings
- needed on ATA_33 and below chips */
- pci_read_config_byte(dev, 0x52, &reg);
- if (!(reg & 0x08))
- pci_write_config_byte(dev, 0x52, reg|0x08);
- break;
- }
-
- return 0;
-}
-
-struct sis_laptop {
- u16 device;
- u16 subvendor;
- u16 subdevice;
-};
-
-static const struct sis_laptop sis_laptop[] = {
- /* devid, subvendor, subdev */
- { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
- { 0x5513, 0x1734, 0x105f }, /* FSC Amilo A1630 */
- { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
- /* end marker */
- { 0, }
-};
-
-static u8 sis_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- const struct sis_laptop *lap = &sis_laptop[0];
- u8 ata66 = 0;
-
- while (lap->device) {
- if (lap->device == pdev->device &&
- lap->subvendor == pdev->subsystem_vendor &&
- lap->subdevice == pdev->subsystem_device)
- return ATA_CBL_PATA40_SHORT;
- lap++;
- }
-
- if (chipset_family >= ATA_133) {
- u16 regw = 0;
- u16 reg_addr = hwif->channel ? 0x52: 0x50;
- pci_read_config_word(pdev, reg_addr, &regw);
- ata66 = (regw & 0x8000) ? 0 : 1;
- } else if (chipset_family >= ATA_66) {
- u8 reg48h = 0;
- u8 mask = hwif->channel ? 0x20 : 0x10;
- pci_read_config_byte(pdev, 0x48, &reg48h);
- ata66 = (reg48h & mask) ? 0 : 1;
- }
-
- return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops sis_port_ops = {
- .set_pio_mode = sis_set_pio_mode,
- .set_dma_mode = sis_set_dma_mode,
- .cable_detect = sis_cable_detect,
-};
-
-static const struct ide_port_ops sis_ata133_port_ops = {
- .set_pio_mode = sis_set_pio_mode,
- .set_dma_mode = sis_set_dma_mode,
- .udma_filter = sis_ata133_udma_filter,
- .cable_detect = sis_cable_detect,
-};
-
-static const struct ide_port_info sis5513_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_sis5513,
- .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
- .host_flags = IDE_HFLAG_NO_AUTODMA,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d = sis5513_chipset;
- u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
- int rc;
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- if (sis_find_family(dev) == 0)
- return -ENOTSUPP;
-
- if (chipset_family >= ATA_133)
- d.port_ops = &sis_ata133_port_ops;
- else
- d.port_ops = &sis_port_ops;
-
- d.udma_mask = udma_rates[chipset_family];
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static void sis5513_remove(struct pci_dev *dev)
-{
- ide_pci_remove(dev);
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id sis5513_pci_tbl[] = {
- { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_5513), 0 },
- { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_5518), 0 },
- { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_1180), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, sis5513_pci_tbl);
-
-static struct pci_driver sis5513_pci_driver = {
- .name = "SIS_IDE",
- .id_table = sis5513_pci_tbl,
- .probe = sis5513_init_one,
- .remove = sis5513_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init sis5513_ide_init(void)
-{
- return ide_pci_register_driver(&sis5513_pci_driver);
-}
-
-static void __exit sis5513_ide_exit(void)
-{
- pci_unregister_driver(&sis5513_pci_driver);
-}
-
-module_init(sis5513_ide_init);
-module_exit(sis5513_ide_exit);
-
-MODULE_AUTHOR("Lionel Bouton, L C Chang, Andre Hedrick, Vojtech Pavlik");
-MODULE_DESCRIPTION("PCI driver module for SIS IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
deleted file mode 100644
index 5c24c420c438..000000000000
--- a/drivers/ide/sl82c105.c
+++ /dev/null
@@ -1,367 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SL82C105/Winbond 553 IDE driver
- *
- * Maintainer unknown.
- *
- * Drive tuning added from Rebel.com's kernel sources
- * -- Russell King (15/11/98) linux@arm.linux.org.uk
- *
- * Merge in Russell's HW workarounds, fix various problems
- * with the timing registers setup.
- * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org
- *
- * Copyright (C) 2006-2007,2009 MontaVista Software, Inc. <source@mvista.com>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "sl82c105"
-
-/*
- * SL82C105 PCI config register 0x40 bits.
- */
-#define CTRL_IDE_IRQB (1 << 30)
-#define CTRL_IDE_IRQA (1 << 28)
-#define CTRL_LEGIRQ (1 << 11)
-#define CTRL_P1F16 (1 << 5)
-#define CTRL_P1EN (1 << 4)
-#define CTRL_P0F16 (1 << 1)
-#define CTRL_P0EN (1 << 0)
-
-/*
- * Convert a PIO mode and cycle time to the required on/off times
- * for the interface. This has protection against runaway timings.
- */
-static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio)
-{
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- unsigned int cmd_on, cmd_off;
- u8 iordy = 0;
-
- cmd_on = (t->active + 29) / 30;
- cmd_off = (ide_pio_cycle_time(drive, pio) - 30 * cmd_on + 29) / 30;
-
- if (cmd_on == 0)
- cmd_on = 1;
-
- if (cmd_off == 0)
- cmd_off = 1;
-
- if (ide_pio_need_iordy(drive, pio))
- iordy = 0x40;
-
- return (cmd_on - 1) << 8 | (cmd_off - 1) | iordy;
-}
-
-/*
- * Configure the chipset for PIO mode.
- */
-static void sl82c105_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
- int reg = 0x44 + drive->dn * 4;
- u16 drv_ctrl;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- drv_ctrl = get_pio_timings(drive, pio);
-
- /*
- * Store the PIO timings so that we can restore them
- * in case DMA will be turned off...
- */
- timings &= 0xffff0000;
- timings |= drv_ctrl;
- ide_set_drivedata(drive, (void *)timings);
-
- pci_write_config_word(dev, reg, drv_ctrl);
- pci_read_config_word (dev, reg, &drv_ctrl);
-
- printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name,
- ide_xfer_verbose(pio + XFER_PIO_0),
- ide_pio_cycle_time(drive, pio), drv_ctrl);
-}
-
-/*
- * Configure the chipset for DMA mode.
- */
-static void sl82c105_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200};
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
- u16 drv_ctrl;
- const u8 speed = drive->dma_mode;
-
- drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
-
- /*
- * Store the DMA timings so that we can actually program
- * them when DMA will be turned on...
- */
- timings &= 0x0000ffff;
- timings |= (unsigned long)drv_ctrl << 16;
- ide_set_drivedata(drive, (void *)timings);
-}
-
-static int sl82c105_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
-
- pci_read_config_dword(dev, 0x40, &val);
-
- return (val & mask) ? 1 : 0;
-}
-
-/*
- * The SL82C105 holds off all IDE interrupts while in DMA mode until
- * all DMA activity is completed. Sometimes this causes problems (eg,
- * when the drive wants to report an error condition).
- *
- * 0x7e is a "chip testing" register. Bit 2 resets the DMA controller
- * state machine. We need to kick this to work around various bugs.
- */
-static inline void sl82c105_reset_host(struct pci_dev *dev)
-{
- u16 val;
-
- pci_read_config_word(dev, 0x7e, &val);
- pci_write_config_word(dev, 0x7e, val | (1 << 2));
- pci_write_config_word(dev, 0x7e, val & ~(1 << 2));
-}
-
-/*
- * If we get an IRQ timeout, it might be that the DMA state machine
- * got confused. Fix from Todd Inglett. Details from Winbond.
- *
- * This function is called when the IDE timer expires, the drive
- * indicates that it is READY, and we were waiting for DMA to complete.
- */
-static void sl82c105_dma_lost_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
- u8 dma_cmd;
-
- printk(KERN_WARNING "sl82c105: lost IRQ, resetting host\n");
-
- /*
- * Check the raw interrupt from the drive.
- */
- pci_read_config_dword(dev, 0x40, &val);
- if (val & mask)
- printk(KERN_INFO "sl82c105: drive was requesting IRQ, "
- "but host lost it\n");
-
- /*
- * Was DMA enabled? If so, disable it - we're resetting the
- * host. The IDE layer will be handling the drive for us.
- */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- if (dma_cmd & 1) {
- outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
- printk(KERN_INFO "sl82c105: DMA was enabled\n");
- }
-
- sl82c105_reset_host(dev);
-}
-
-/*
- * ATAPI devices can cause the SL82C105 DMA state machine to go gaga.
- * Winbond recommend that the DMA state machine is reset prior to
- * setting the bus master DMA enable bit.
- *
- * The generic IDE core will have disabled the BMEN bit before this
- * function is called.
- */
-static void sl82c105_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int reg = 0x44 + drive->dn * 4;
-
- pci_write_config_word(dev, reg,
- (unsigned long)ide_get_drivedata(drive) >> 16);
-
- sl82c105_reset_host(dev);
- ide_dma_start(drive);
-}
-
-static void sl82c105_dma_clear(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
-
- sl82c105_reset_host(dev);
-}
-
-static int sl82c105_dma_end(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- int reg = 0x44 + drive->dn * 4;
- int ret = ide_dma_end(drive);
-
- pci_write_config_word(dev, reg,
- (unsigned long)ide_get_drivedata(drive));
-
- return ret;
-}
-
-/*
- * ATA reset will clear the 16 bits mode in the control
- * register, we need to reprogram it
- */
-static void sl82c105_resetproc(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 val;
-
- pci_read_config_dword(dev, 0x40, &val);
- val |= (CTRL_P1F16 | CTRL_P0F16);
- pci_write_config_dword(dev, 0x40, val);
-}
-
-/*
- * Return the revision of the Winbond bridge
- * which this function is part of.
- */
-static u8 sl82c105_bridge_revision(struct pci_dev *dev)
-{
- struct pci_dev *bridge;
-
- /*
- * The bridge should be part of the same device, but function 0.
- */
- bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
- dev->bus->number,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
- if (!bridge)
- return -1;
-
- /*
- * Make sure it is a Winbond 553 and is an ISA bridge.
- */
- if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
- bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
- bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
- pci_dev_put(bridge);
- return -1;
- }
- /*
- * We need to find function 0's revision, not function 1
- */
- pci_dev_put(bridge);
-
- return bridge->revision;
-}
-
-/*
- * Enable the PCI device
- *
- * --BenH: It's arch fixup code that should enable channels that
- * have not been enabled by firmware. I decided we can still enable
- * channel 0 here at least, but channel 1 has to be enabled by
- * firmware or arch code. We still set both to 16 bits mode.
- */
-static int init_chipset_sl82c105(struct pci_dev *dev)
-{
- u32 val;
-
- pci_read_config_dword(dev, 0x40, &val);
- val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
- pci_write_config_dword(dev, 0x40, val);
-
- return 0;
-}
-
-static const struct ide_port_ops sl82c105_port_ops = {
- .set_pio_mode = sl82c105_set_pio_mode,
- .set_dma_mode = sl82c105_set_dma_mode,
- .resetproc = sl82c105_resetproc,
- .test_irq = sl82c105_test_irq,
-};
-
-static const struct ide_dma_ops sl82c105_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = sl82c105_dma_start,
- .dma_end = sl82c105_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = sl82c105_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_clear = sl82c105_dma_clear,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info sl82c105_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_sl82c105,
- .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
- .port_ops = &sl82c105_port_ops,
- .dma_ops = &sl82c105_dma_ops,
- .host_flags = IDE_HFLAG_IO_32BIT |
- IDE_HFLAG_UNMASK_IRQS |
- IDE_HFLAG_SERIALIZE_DMA |
- IDE_HFLAG_NO_AUTODMA,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d = sl82c105_chipset;
- u8 rev = sl82c105_bridge_revision(dev);
-
- if (rev <= 5) {
- /*
- * Never ever EVER under any circumstances enable
- * DMA when the bridge is this old.
- */
- printk(KERN_INFO DRV_NAME ": Winbond W83C553 bridge "
- "revision %d, BM-DMA disabled\n", rev);
- d.dma_ops = NULL;
- d.mwdma_mask = 0;
- d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA;
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id sl82c105_pci_tbl[] = {
- { PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, sl82c105_pci_tbl);
-
-static struct pci_driver sl82c105_pci_driver = {
- .name = "W82C105_IDE",
- .id_table = sl82c105_pci_tbl,
- .probe = sl82c105_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init sl82c105_ide_init(void)
-{
- return ide_pci_register_driver(&sl82c105_pci_driver);
-}
-
-static void __exit sl82c105_ide_exit(void)
-{
- pci_unregister_driver(&sl82c105_pci_driver);
-}
-
-module_init(sl82c105_ide_init);
-module_exit(sl82c105_ide_exit);
-
-MODULE_DESCRIPTION("PCI driver module for W82C105 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
deleted file mode 100644
index f521d5ebf916..000000000000
--- a/drivers/ide/slc90e66.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
- *
- * This is a look-alike variation of the ICH0 PIIX4 Ultra-66,
- * but this keeps the ISA-Bridge and slots alive.
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "slc90e66"
-
-static DEFINE_SPINLOCK(slc90e66_lock);
-
-static void slc90e66_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int is_slave = drive->dn & 1;
- int master_port = hwif->channel ? 0x42 : 0x40;
- int slave_port = 0x44;
- unsigned long flags;
- u16 master_data;
- u8 slave_data;
- int control = 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /* ISP RTC */
- static const u8 timings[][2] = {
- { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
- spin_lock_irqsave(&slc90e66_lock, flags);
- pci_read_config_word(dev, master_port, &master_data);
-
- if (pio > 1)
- control |= 1; /* Programmable timing on */
- if (drive->media == ide_disk)
- control |= 4; /* Prefetch, post write */
- if (ide_pio_need_iordy(drive, pio))
- control |= 2; /* IORDY */
- if (is_slave) {
- master_data |= 0x4000;
- master_data &= ~0x0070;
- if (pio > 1) {
- /* Set PPE, IE and TIME */
- master_data |= control << 4;
- }
- pci_read_config_byte(dev, slave_port, &slave_data);
- slave_data &= hwif->channel ? 0x0f : 0xf0;
- slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
- (hwif->channel ? 4 : 0);
- } else {
- master_data &= ~0x3307;
- if (pio > 1) {
- /* enable PPE, IE and TIME */
- master_data |= control;
- }
- master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
- }
- pci_write_config_word(dev, master_port, master_data);
- if (is_slave)
- pci_write_config_byte(dev, slave_port, slave_data);
- spin_unlock_irqrestore(&slc90e66_lock, flags);
-}
-
-static void slc90e66_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 maslave = hwif->channel ? 0x42 : 0x40;
- int sitre = 0, a_speed = 7 << (drive->dn * 4);
- int u_speed = 0, u_flag = 1 << drive->dn;
- u16 reg4042, reg44, reg48, reg4a;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_word(dev, maslave, &reg4042);
- sitre = (reg4042 & 0x4000) ? 1 : 0;
- pci_read_config_word(dev, 0x44, &reg44);
- pci_read_config_word(dev, 0x48, &reg48);
- pci_read_config_word(dev, 0x4a, &reg4a);
-
- if (speed >= XFER_UDMA_0) {
- u_speed = (speed - XFER_UDMA_0) << (drive->dn * 4);
-
- if (!(reg48 & u_flag))
- pci_write_config_word(dev, 0x48, reg48|u_flag);
- if ((reg4a & a_speed) != u_speed) {
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
- pci_read_config_word(dev, 0x4a, &reg4a);
- pci_write_config_word(dev, 0x4a, reg4a|u_speed);
- }
- } else {
- const u8 mwdma_to_pio[] = { 0, 3, 4 };
-
- if (reg48 & u_flag)
- pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
-
- if (speed >= XFER_MW_DMA_0)
- drive->pio_mode =
- mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
- else
- drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
-
- slc90e66_set_pio_mode(hwif, drive);
- }
-}
-
-static u8 slc90e66_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 reg47 = 0, mask = hwif->channel ? 0x01 : 0x02;
-
- pci_read_config_byte(dev, 0x47, &reg47);
-
- /* bit[0(1)]: 0:80, 1:40 */
- return (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static const struct ide_port_ops slc90e66_port_ops = {
- .set_pio_mode = slc90e66_set_pio_mode,
- .set_dma_mode = slc90e66_set_dma_mode,
- .cable_detect = slc90e66_cable_detect,
-};
-
-static const struct ide_port_info slc90e66_chipset = {
- .name = DRV_NAME,
- .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
- .port_ops = &slc90e66_port_ops,
- .pio_mask = ATA_PIO4,
- .swdma_mask = ATA_SWDMA2_ONLY,
- .mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA4,
-};
-
-static int slc90e66_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &slc90e66_chipset, NULL);
-}
-
-static const struct pci_device_id slc90e66_pci_tbl[] = {
- { PCI_VDEVICE(EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_1), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, slc90e66_pci_tbl);
-
-static struct pci_driver slc90e66_pci_driver = {
- .name = "SLC90e66_IDE",
- .id_table = slc90e66_pci_tbl,
- .probe = slc90e66_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init slc90e66_ide_init(void)
-{
- return ide_pci_register_driver(&slc90e66_pci_driver);
-}
-
-static void __exit slc90e66_ide_exit(void)
-{
- pci_unregister_driver(&slc90e66_pci_driver);
-}
-
-module_init(slc90e66_ide_init);
-module_exit(slc90e66_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for SLC90E66 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
deleted file mode 100644
index 17e6132b99bf..000000000000
--- a/drivers/ide/tc86c001.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (C) 2002 Toshiba Corporation
- * Copyright (C) 2005-2006 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-
-#define DRV_NAME "tc86c001"
-
-static void tc86c001_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
- u16 mode, scr = inw(scr_port);
- const u8 speed = drive->dma_mode;
-
- switch (speed) {
- case XFER_UDMA_4: mode = 0x00c0; break;
- case XFER_UDMA_3: mode = 0x00b0; break;
- case XFER_UDMA_2: mode = 0x00a0; break;
- case XFER_UDMA_1: mode = 0x0090; break;
- case XFER_UDMA_0: mode = 0x0080; break;
- case XFER_MW_DMA_2: mode = 0x0070; break;
- case XFER_MW_DMA_1: mode = 0x0060; break;
- case XFER_MW_DMA_0: mode = 0x0050; break;
- case XFER_PIO_4: mode = 0x0400; break;
- case XFER_PIO_3: mode = 0x0300; break;
- case XFER_PIO_2: mode = 0x0200; break;
- case XFER_PIO_1: mode = 0x0100; break;
- case XFER_PIO_0:
- default: mode = 0x0000; break;
- }
-
- scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
- scr |= mode;
- outw(scr, scr_port);
-}
-
-static void tc86c001_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- tc86c001_set_mode(hwif, drive);
-}
-
-/*
- * HACKITY HACK
- *
- * This is a workaround for the limitation 5 of the TC86C001 IDE controller:
- * if a DMA transfer terminates prematurely, the controller leaves the device's
- * interrupt request (INTRQ) pending and does not generate a PCI interrupt (or
- * set the interrupt bit in the DMA status register), thus no PCI interrupt
- * will occur until a DMA transfer has been successfully completed.
- *
- * We work around this by initiating dummy, zero-length DMA transfer on
- * a DMA timeout expiration. I found no better way to do this with the current
- * IDE core than to temporarily replace a higher level driver's timer expiry
- * handler with our own backing up to that handler in case our recovery fails.
- */
-static int tc86c001_timer_expiry(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_expiry_t *expiry = ide_get_hwifdata(hwif);
- u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
-
- /* Restore a higher level driver's expiry handler first. */
- hwif->expiry = expiry;
-
- if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
- unsigned long sc_base = hwif->config_data;
- unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
- u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
-
- printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
- "attempting recovery...\n", drive->name);
-
- /* Stop DMA */
- outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD);
-
- /* Setup the dummy DMA transfer */
- outw(0, sc_base + 0x0a); /* Sector Count */
- outw(0, twcr_port); /* Transfer Word Count 1 or 2 */
-
- /* Start the dummy DMA transfer */
-
- /* clear R_OR_WCTR for write */
- outb(0x00, hwif->dma_base + ATA_DMA_CMD);
- /* set START_STOPBM */
- outb(0x01, hwif->dma_base + ATA_DMA_CMD);
-
- /*
- * If an interrupt was pending, it should come thru shortly.
- * If not, a higher level driver's expiry handler should
- * eventually cause some kind of recovery from the DMA stall.
- */
- return WAIT_MIN_SLEEP;
- }
-
- /* Chain to the restored expiry handler if DMA wasn't active. */
- if (likely(expiry != NULL))
- return expiry(drive);
-
- /* If there was no handler, "emulate" that for ide_timer_expiry()... */
- return -1;
-}
-
-static void tc86c001_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long sc_base = hwif->config_data;
- unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
- unsigned long nsectors = blk_rq_sectors(hwif->rq);
-
- /*
- * We have to manually load the sector count and size into
- * the appropriate system control registers for DMA to work
- * with LBA48 and ATAPI devices...
- */
- outw(nsectors, sc_base + 0x0a); /* Sector Count */
- outw(SECTOR_SIZE / 2, twcr_port); /* Transfer Word Count 1/2 */
-
- /* Install our timeout expiry hook, saving the current handler... */
- ide_set_hwifdata(hwif, hwif->expiry);
- hwif->expiry = &tc86c001_timer_expiry;
-
- ide_dma_start(drive);
-}
-
-static u8 tc86c001_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long sc_base = pci_resource_start(dev, 5);
- u16 scr1 = inw(sc_base + 0x00);
-
- /*
- * System Control 1 Register bit 13 (PDIAGN):
- * 0=80-pin cable, 1=40-pin cable
- */
- return (scr1 & 0x2000) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static void init_hwif_tc86c001(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long sc_base = pci_resource_start(dev, 5);
- u16 scr1 = inw(sc_base + 0x00);
-
- /* System Control 1 Register bit 15 (Soft Reset) set */
- outw(scr1 | 0x8000, sc_base + 0x00);
-
- /* System Control 1 Register bit 14 (FIFO Reset) set */
- outw(scr1 | 0x4000, sc_base + 0x00);
-
- /* System Control 1 Register: reset clear */
- outw(scr1 & ~0xc000, sc_base + 0x00);
-
- /* Store the system control register base for convenience... */
- hwif->config_data = sc_base;
-
- if (!hwif->dma_base)
- return;
-
- /*
- * Sector Count Control Register bits 0 and 1 set:
- * software sets Sector Count Register for master and slave device
- */
- outw(0x0003, sc_base + 0x0c);
-
- /* Sector Count Register limit */
- hwif->rqsize = 0xffff;
-}
-
-static const struct ide_port_ops tc86c001_port_ops = {
- .set_pio_mode = tc86c001_set_pio_mode,
- .set_dma_mode = tc86c001_set_mode,
- .cable_detect = tc86c001_cable_detect,
-};
-
-static const struct ide_dma_ops tc86c001_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = tc86c001_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info tc86c001_chipset = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_tc86c001,
- .port_ops = &tc86c001_port_ops,
- .dma_ops = &tc86c001_dma_ops,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
-};
-
-static int tc86c001_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- int rc;
-
- rc = pci_enable_device(dev);
- if (rc)
- goto out;
-
- rc = pci_request_region(dev, 5, DRV_NAME);
- if (rc) {
- printk(KERN_ERR DRV_NAME ": system control regs already in use");
- goto out_disable;
- }
-
- rc = ide_pci_init_one(dev, &tc86c001_chipset, NULL);
- if (rc)
- goto out_release;
-
- goto out;
-
-out_release:
- pci_release_region(dev, 5);
-out_disable:
- pci_disable_device(dev);
-out:
- return rc;
-}
-
-static void tc86c001_remove(struct pci_dev *dev)
-{
- ide_pci_remove(dev);
- pci_release_region(dev, 5);
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id tc86c001_pci_tbl[] = {
- { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE), 0 },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, tc86c001_pci_tbl);
-
-static struct pci_driver tc86c001_pci_driver = {
- .name = "TC86C001",
- .id_table = tc86c001_pci_tbl,
- .probe = tc86c001_init_one,
- .remove = tc86c001_remove,
-};
-
-static int __init tc86c001_ide_init(void)
-{
- return ide_pci_register_driver(&tc86c001_pci_driver);
-}
-
-static void __exit tc86c001_ide_exit(void)
-{
- pci_unregister_driver(&tc86c001_pci_driver);
-}
-
-module_init(tc86c001_ide_init);
-module_exit(tc86c001_ide_exit);
-
-MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
-MODULE_DESCRIPTION("PCI driver module for TC86C001 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
deleted file mode 100644
index 16ddd0956832..000000000000
--- a/drivers/ide/triflex.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * IDE Chipset driver for the Compaq TriFlex IDE controller.
- *
- * Known to work with the Compaq Workstation 5x00 series.
- *
- * Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
- * Author: Torben Mathiasen <torben.mathiasen@hp.com>
- *
- * Loosely based on the piix & svwks drivers.
- *
- * Documentation:
- * Not publicly available.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "triflex"
-
-static void triflex_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u32 triflex_timings = 0;
- u16 timing = 0;
- u8 channel_offset = hwif->channel ? 0x74 : 0x70, unit = drive->dn & 1;
-
- pci_read_config_dword(dev, channel_offset, &triflex_timings);
-
- switch (drive->dma_mode) {
- case XFER_MW_DMA_2:
- timing = 0x0103;
- break;
- case XFER_MW_DMA_1:
- timing = 0x0203;
- break;
- case XFER_MW_DMA_0:
- timing = 0x0808;
- break;
- case XFER_SW_DMA_2:
- case XFER_SW_DMA_1:
- case XFER_SW_DMA_0:
- timing = 0x0f0f;
- break;
- case XFER_PIO_4:
- timing = 0x0202;
- break;
- case XFER_PIO_3:
- timing = 0x0204;
- break;
- case XFER_PIO_2:
- timing = 0x0404;
- break;
- case XFER_PIO_1:
- timing = 0x0508;
- break;
- case XFER_PIO_0:
- timing = 0x0808;
- break;
- }
-
- triflex_timings &= ~(0xFFFF << (16 * unit));
- triflex_timings |= (timing << (16 * unit));
-
- pci_write_config_dword(dev, channel_offset, triflex_timings);
-}
-
-static void triflex_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- triflex_set_mode(hwif, drive);
-}
-
-static const struct ide_port_ops triflex_port_ops = {
- .set_pio_mode = triflex_set_pio_mode,
- .set_dma_mode = triflex_set_mode,
-};
-
-static const struct ide_port_info triflex_device = {
- .name = DRV_NAME,
- .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
- .port_ops = &triflex_port_ops,
- .pio_mask = ATA_PIO4,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &triflex_device, NULL);
-}
-
-static const struct pci_device_id triflex_pci_tbl[] = {
- { PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, triflex_pci_tbl);
-
-#ifdef CONFIG_PM
-static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
- /*
- * We must not disable or powerdown the device.
- * APM bios refuses to suspend if IDE is not accessible.
- */
- pci_save_state(dev);
- return 0;
-}
-#else
-#define triflex_ide_pci_suspend NULL
-#endif
-
-static struct pci_driver triflex_pci_driver = {
- .name = "TRIFLEX_IDE",
- .id_table = triflex_pci_tbl,
- .probe = triflex_init_one,
- .remove = ide_pci_remove,
- .suspend = triflex_ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init triflex_ide_init(void)
-{
- return ide_pci_register_driver(&triflex_pci_driver);
-}
-
-static void __exit triflex_ide_exit(void)
-{
- pci_unregister_driver(&triflex_pci_driver);
-}
-
-module_init(triflex_ide_init);
-module_exit(triflex_ide_exit);
-
-MODULE_AUTHOR("Torben Mathiasen");
-MODULE_DESCRIPTION("PCI driver module for Compaq Triflex IDE");
-MODULE_LICENSE("GPL");
-
-
diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
deleted file mode 100644
index d550b379b0f1..000000000000
--- a/drivers/ide/trm290.c
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright (c) 1997-1998 Mark Lord
- * Copyright (c) 2007 MontaVista Software, Inc. <source@mvista.com>
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * June 22, 2004 - get rid of check_region
- * - Jesper Juhl
- *
- */
-
-/*
- * This module provides support for the bus-master IDE DMA function
- * of the Tekram TRM290 chip, used on a variety of PCI IDE add-on boards,
- * including a "Precision Instruments" board. The TRM290 pre-dates
- * the sff-8038 standard (ide-dma.c) by a few months, and differs
- * significantly enough to warrant separate routines for some functions,
- * while re-using others from ide-dma.c.
- *
- * EXPERIMENTAL! It works for me (a sample of one).
- *
- * Works reliably for me in DMA mode (READs only),
- * DMA WRITEs are disabled by default (see #define below);
- *
- * DMA is not enabled automatically for this chipset,
- * but can be turned on manually (with "hdparm -d1") at run time.
- *
- * I need volunteers with "spare" drives for further testing
- * and development, and maybe to help figure out the peculiarities.
- * Even knowing the registers (below), some things behave strangely.
- */
-
-#define TRM290_NO_DMA_WRITES /* DMA writes seem unreliable sometimes */
-
-/*
- * TRM-290 PCI-IDE2 Bus Master Chip
- * ================================
- * The configuration registers are addressed in normal I/O port space
- * and are used as follows:
- *
- * trm290_base depends on jumper settings, and is probed for by ide-dma.c
- *
- * trm290_base+2 when WRITTEN: chiptest register (byte, write-only)
- * bit7 must always be written as "1"
- * bits6-2 undefined
- * bit1 1=legacy_compatible_mode, 0=native_pci_mode
- * bit0 1=test_mode, 0=normal(default)
- *
- * trm290_base+2 when READ: status register (byte, read-only)
- * bits7-2 undefined
- * bit1 channel0 busmaster interrupt status 0=none, 1=asserted
- * bit0 channel0 interrupt status 0=none, 1=asserted
- *
- * trm290_base+3 Interrupt mask register
- * bits7-5 undefined
- * bit4 legacy_header: 1=present, 0=absent
- * bit3 channel1 busmaster interrupt status 0=none, 1=asserted (read only)
- * bit2 channel1 interrupt status 0=none, 1=asserted (read only)
- * bit1 channel1 interrupt mask: 1=masked, 0=unmasked(default)
- * bit0 channel0 interrupt mask: 1=masked, 0=unmasked(default)
- *
- * trm290_base+1 "CPR" Config Pointer Register (byte)
- * bit7 1=autoincrement CPR bits 2-0 after each access of CDR
- * bit6 1=min. 1 wait-state posted write cycle (default), 0=0 wait-state
- * bit5 0=enabled master burst access (default), 1=disable (write only)
- * bit4 PCI DEVSEL# timing select: 1=medium(default), 0=fast
- * bit3 0=primary IDE channel, 1=secondary IDE channel
- * bits2-0 register index for accesses through CDR port
- *
- * trm290_base+0 "CDR" Config Data Register (word)
- * two sets of seven config registers,
- * selected by CPR bit 3 (channel) and CPR bits 2-0 (index 0 to 6),
- * each index defined below:
- *
- * Index-0 Base address register for command block (word)
- * defaults: 0x1f0 for primary, 0x170 for secondary
- *
- * Index-1 general config register (byte)
- * bit7 1=DMA enable, 0=DMA disable
- * bit6 1=activate IDE_RESET, 0=no action (default)
- * bit5 1=enable IORDY, 0=disable IORDY (default)
- * bit4 0=16-bit data port(default), 1=8-bit (XT) data port
- * bit3 interrupt polarity: 1=active_low, 0=active_high(default)
- * bit2 power-saving-mode(?): 1=enable, 0=disable(default) (write only)
- * bit1 bus_master_mode(?): 1=enable, 0=disable(default)
- * bit0 enable_io_ports: 1=enable(default), 0=disable
- *
- * Index-2 read-ahead counter preload bits 0-7 (byte, write only)
- * bits7-0 bits7-0 of readahead count
- *
- * Index-3 read-ahead config register (byte, write only)
- * bit7 1=enable_readahead, 0=disable_readahead(default)
- * bit6 1=clear_FIFO, 0=no_action
- * bit5 undefined
- * bit4 mode4 timing control: 1=enable, 0=disable(default)
- * bit3 undefined
- * bit2 undefined
- * bits1-0 bits9-8 of read-ahead count
- *
- * Index-4 base address register for control block (word)
- * defaults: 0x3f6 for primary, 0x376 for secondary
- *
- * Index-5 data port timings (shared by both drives) (byte)
- * standard PCI "clk" (clock) counts, default value = 0xf5
- *
- * bits7-6 setup time: 00=1clk, 01=2clk, 10=3clk, 11=4clk
- * bits5-3 hold time: 000=1clk, 001=2clk, 010=3clk,
- * 011=4clk, 100=5clk, 101=6clk,
- * 110=8clk, 111=12clk
- * bits2-0 active time: 000=2clk, 001=3clk, 010=4clk,
- * 011=5clk, 100=6clk, 101=8clk,
- * 110=12clk, 111=16clk
- *
- * Index-6 command/control port timings (shared by both drives) (byte)
- * same layout as Index-5, default value = 0xde
- *
- * Suggested CDR programming for PIO mode0 (600ns):
- * 0x01f0,0x21,0xff,0x80,0x03f6,0xf5,0xde ; primary
- * 0x0170,0x21,0xff,0x80,0x0376,0xf5,0xde ; secondary
- *
- * Suggested CDR programming for PIO mode3 (180ns):
- * 0x01f0,0x21,0xff,0x80,0x03f6,0x09,0xde ; primary
- * 0x0170,0x21,0xff,0x80,0x0376,0x09,0xde ; secondary
- *
- * Suggested CDR programming for PIO mode4 (120ns):
- * 0x01f0,0x21,0xff,0x80,0x03f6,0x00,0xde ; primary
- * 0x0170,0x21,0xff,0x80,0x0376,0x00,0xde ; secondary
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "trm290"
-
-static void trm290_prepare_drive (ide_drive_t *drive, unsigned int use_dma)
-{
- ide_hwif_t *hwif = drive->hwif;
- u16 reg = 0;
- unsigned long flags;
-
- /* select PIO or DMA */
- reg = use_dma ? (0x21 | 0x82) : (0x21 & ~0x82);
-
- local_irq_save(flags);
-
- if (reg != hwif->select_data) {
- hwif->select_data = reg;
- /* set PIO/DMA */
- outb(0x51 | (hwif->channel << 3), hwif->config_data + 1);
- outw(reg & 0xff, hwif->config_data);
- }
-
- /* enable IRQ if not probing */
- if (drive->dev_flags & IDE_DFLAG_PRESENT) {
- reg = inw(hwif->config_data + 3);
- reg &= 0x13;
- reg &= ~(1 << hwif->channel);
- outw(reg, hwif->config_data + 3);
- }
-
- local_irq_restore(flags);
-}
-
-static void trm290_dev_select(ide_drive_t *drive)
-{
- trm290_prepare_drive(drive, !!(drive->dev_flags & IDE_DFLAG_USING_DMA));
-
- outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr);
-}
-
-static int trm290_dma_check(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- if (cmd->tf_flags & IDE_TFLAG_WRITE) {
-#ifdef TRM290_NO_DMA_WRITES
- /* always use PIO for writes */
- return 1;
-#endif
- }
- return 0;
-}
-
-static int trm290_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned int count, rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 2;
-
- count = ide_build_dmatable(drive, cmd);
- if (count == 0)
- /* try PIO instead of DMA */
- return 1;
-
- outl(hwif->dmatable_dma | rw, hwif->dma_base);
- /* start DMA */
- outw(count * 2 - 1, hwif->dma_base + 2);
-
- return 0;
-}
-
-static void trm290_dma_start(ide_drive_t *drive)
-{
- trm290_prepare_drive(drive, 1);
-}
-
-static int trm290_dma_end(ide_drive_t *drive)
-{
- u16 status = inw(drive->hwif->dma_base + 2);
-
- trm290_prepare_drive(drive, 0);
-
- return status != 0x00ff;
-}
-
-static int trm290_dma_test_irq(ide_drive_t *drive)
-{
- u16 status = inw(drive->hwif->dma_base + 2);
-
- return status == 0x00ff;
-}
-
-static void trm290_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static void init_hwif_trm290(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int cfg_base = pci_resource_start(dev, 4);
- unsigned long flags;
- u8 reg = 0;
-
- if ((dev->class & 5) && cfg_base)
- printk(KERN_INFO DRV_NAME " %s: chip", pci_name(dev));
- else {
- cfg_base = 0x3df0;
- printk(KERN_INFO DRV_NAME " %s: using default", pci_name(dev));
- }
- printk(KERN_CONT " config base at 0x%04x\n", cfg_base);
- hwif->config_data = cfg_base;
- hwif->dma_base = (cfg_base + 4) ^ (hwif->channel ? 0x80 : 0);
-
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, hwif->dma_base, hwif->dma_base + 3);
-
- if (ide_allocate_dma_engine(hwif))
- return;
-
- local_irq_save(flags);
- /* put config reg into first byte of hwif->select_data */
- outb(0x51 | (hwif->channel << 3), hwif->config_data + 1);
- /* select PIO as default */
- hwif->select_data = 0x21;
- outb(hwif->select_data, hwif->config_data);
- /* get IRQ info */
- reg = inb(hwif->config_data + 3);
- /* mask IRQs for both ports */
- reg = (reg & 0x10) | 0x03;
- outb(reg, hwif->config_data + 3);
- local_irq_restore(flags);
-
- if (reg & 0x10)
- /* legacy mode */
- hwif->irq = hwif->channel ? 15 : 14;
-
-#if 1
- {
- /*
- * My trm290-based card doesn't seem to work with all possible values
- * for the control basereg, so this kludge ensures that we use only
- * values that are known to work. Ugh. -ml
- */
- u16 new, old, compat = hwif->channel ? 0x374 : 0x3f4;
- static u16 next_offset = 0;
- u8 old_mask;
-
- outb(0x54 | (hwif->channel << 3), hwif->config_data + 1);
- old = inw(hwif->config_data);
- old &= ~1;
- old_mask = inb(old + 2);
- if (old != compat && old_mask == 0xff) {
- /* leave lower 10 bits untouched */
- compat += (next_offset += 0x400);
- hwif->io_ports.ctl_addr = compat + 2;
- outw(compat | 1, hwif->config_data);
- new = inw(hwif->config_data);
- printk(KERN_INFO "%s: control basereg workaround: "
- "old=0x%04x, new=0x%04x\n",
- hwif->name, old, new & ~1);
- }
- }
-#endif
-}
-
-static const struct ide_tp_ops trm290_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = trm290_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_dma_ops trm290_dma_ops = {
- .dma_host_set = trm290_dma_host_set,
- .dma_setup = trm290_dma_setup,
- .dma_start = trm290_dma_start,
- .dma_end = trm290_dma_end,
- .dma_test_irq = trm290_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_check = trm290_dma_check,
-};
-
-static const struct ide_port_info trm290_chipset = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_trm290,
- .tp_ops = &trm290_tp_ops,
- .dma_ops = &trm290_dma_ops,
- .host_flags = IDE_HFLAG_TRM290 |
- IDE_HFLAG_NO_ATAPI_DMA |
-#if 0 /* play it safe for now */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA |
-#endif
- IDE_HFLAG_NO_AUTODMA |
- IDE_HFLAG_NO_LBA48,
-};
-
-static int trm290_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &trm290_chipset, NULL);
-}
-
-static const struct pci_device_id trm290_pci_tbl[] = {
- { PCI_VDEVICE(TEKRAM, PCI_DEVICE_ID_TEKRAM_DC290), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, trm290_pci_tbl);
-
-static struct pci_driver trm290_pci_driver = {
- .name = "TRM290_IDE",
- .id_table = trm290_pci_tbl,
- .probe = trm290_init_one,
- .remove = ide_pci_remove,
-};
-
-static int __init trm290_ide_init(void)
-{
- return ide_pci_register_driver(&trm290_pci_driver);
-}
-
-static void __exit trm290_ide_exit(void)
-{
- pci_unregister_driver(&trm290_pci_driver);
-}
-
-module_init(trm290_ide_init);
-module_exit(trm290_ide_exit);
-
-MODULE_AUTHOR("Mark Lord");
-MODULE_DESCRIPTION("PCI driver module for Tekram TRM290 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
deleted file mode 100644
index 962eb92501b5..000000000000
--- a/drivers/ide/tx4938ide.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * TX4938 internal IDE driver
- * Based on tx4939ide.c.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * (C) Copyright TOSHIBA CORPORATION 2005-2007
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/ide.h>
-#include <asm/txx9/tx4938.h>
-
-static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
- unsigned int gbus_clock,
- u8 pio)
-{
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- u64 cr = __raw_readq(&tx4938_ebuscptr->cr[ebus_ch]);
- unsigned int sp = (cr >> 4) & 3;
- unsigned int clock = gbus_clock / (4 - sp);
- unsigned int cycle = 1000000000 / clock;
- unsigned int shwt;
- int wt;
-
- /* Minimum DIOx- active time */
- wt = DIV_ROUND_UP(t->act8b, cycle) - 2;
- /* IORDY setup time: 35ns */
- wt = max_t(int, wt, DIV_ROUND_UP(35, cycle));
- /* actual wait-cycle is max(wt & ~1, 1) */
- if (wt > 2 && (wt & 1))
- wt++;
- wt &= ~1;
- /* Address-valid to DIOR/DIOW setup */
- shwt = DIV_ROUND_UP(t->setup, cycle);
-
- /* -DIOx recovery time (SHWT * 4) and cycle time requirement */
- while ((shwt * 4 + wt + (wt ? 2 : 3)) * cycle < t->cycle)
- shwt++;
- if (shwt > 7) {
- pr_warn("tx4938ide: SHWT violation (%d)\n", shwt);
- shwt = 7;
- }
- pr_debug("tx4938ide: ebus %d, bus cycle %dns, WT %d, SHWT %d\n",
- ebus_ch, cycle, wt, shwt);
-
- __raw_writeq((cr & ~0x3f007ull) | (wt << 12) | shwt,
- &tx4938_ebuscptr->cr[ebus_ch]);
-}
-
-static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct tx4938ide_platform_info *pdata = dev_get_platdata(hwif->dev);
- u8 safe = drive->pio_mode - XFER_PIO_0;
- ide_drive_t *pair;
-
- pair = ide_get_pair_dev(drive);
- if (pair)
- safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0);
- tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe);
-}
-
-#ifdef __BIG_ENDIAN
-
-/* custom iops (independent from SWAP_IO_SPACE) */
-static void tx4938ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long port = drive->hwif->io_ports.data_addr;
- unsigned short *ptr = buf;
- unsigned int count = (len + 1) / 2;
-
- while (count--)
- *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
- __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
-}
-
-static void tx4938ide_output_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long port = drive->hwif->io_ports.data_addr;
- unsigned short *ptr = buf;
- unsigned int count = (len + 1) / 2;
-
- while (count--) {
- __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
- ptr++;
- }
- __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
-}
-
-static const struct ide_tp_ops tx4938ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = tx4938ide_input_data_swap,
- .output_data = tx4938ide_output_data_swap,
-};
-
-#endif /* __BIG_ENDIAN */
-
-static const struct ide_port_ops tx4938ide_port_ops = {
- .set_pio_mode = tx4938ide_set_pio_mode,
-};
-
-static const struct ide_port_info tx4938ide_port_info __initconst = {
- .port_ops = &tx4938ide_port_ops,
-#ifdef __BIG_ENDIAN
- .tp_ops = &tx4938ide_tp_ops,
-#endif
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO5,
- .chipset = ide_generic,
-};
-
-static int __init tx4938ide_probe(struct platform_device *pdev)
-{
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_host *host;
- struct resource *res;
- struct tx4938ide_platform_info *pdata = dev_get_platdata(&pdev->dev);
- int irq, ret, i;
- unsigned long mapbase, mapctl;
- struct ide_port_info d = tx4938ide_port_info;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), "tx4938ide"))
- return -EBUSY;
- mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
- 8 << pdata->ioport_shift);
- mapctl = (unsigned long)devm_ioremap(&pdev->dev,
- res->start + 0x10000 +
- (6 << pdata->ioport_shift),
- 1 << pdata->ioport_shift);
- if (!mapbase || !mapctl)
- return -EBUSY;
-
- memset(&hw, 0, sizeof(hw));
- if (pdata->ioport_shift) {
- unsigned long port = mapbase;
- unsigned long ctl = mapctl;
-
- hw.io_ports_array[0] = port;
-#ifdef __BIG_ENDIAN
- port++;
- ctl++;
-#endif
- for (i = 1; i <= 7; i++)
- hw.io_ports_array[i] =
- port + (i << pdata->ioport_shift);
- hw.io_ports.ctl_addr = ctl;
- } else
- ide_std_init_ports(&hw, mapbase, mapctl);
- hw.irq = irq;
- hw.dev = &pdev->dev;
-
- pr_info("TX4938 IDE interface (base %#lx, ctl %#lx, irq %d)\n",
- mapbase, mapctl, hw.irq);
- if (pdata->gbus_clock)
- tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0);
- else
- d.port_ops = NULL;
- ret = ide_host_add(&d, hws, 1, &host);
- if (!ret)
- platform_set_drvdata(pdev, host);
- return ret;
-}
-
-static int __exit tx4938ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
- return 0;
-}
-
-static struct platform_driver tx4938ide_driver = {
- .driver = {
- .name = "tx4938ide",
- },
- .remove = __exit_p(tx4938ide_remove),
-};
-
-module_platform_driver_probe(tx4938ide_driver, tx4938ide_probe);
-
-MODULE_DESCRIPTION("TX4938 internal IDE driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:tx4938ide");
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
deleted file mode 100644
index b1bbf807bb3d..000000000000
--- a/drivers/ide/tx4939ide.c
+++ /dev/null
@@ -1,628 +0,0 @@
-/*
- * TX4939 internal IDE driver
- * Based on RBTX49xx patch from CELF patch archive.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * (C) Copyright TOSHIBA CORPORATION 2005-2007
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/scatterlist.h>
-
-#include <asm/ide.h>
-
-#define MODNAME "tx4939ide"
-
-/* ATA Shadow Registers (8-bit except for Data which is 16-bit) */
-#define TX4939IDE_Data 0x000
-#define TX4939IDE_Error_Feature 0x001
-#define TX4939IDE_Sec 0x002
-#define TX4939IDE_LBA0 0x003
-#define TX4939IDE_LBA1 0x004
-#define TX4939IDE_LBA2 0x005
-#define TX4939IDE_DevHead 0x006
-#define TX4939IDE_Stat_Cmd 0x007
-#define TX4939IDE_AltStat_DevCtl 0x402
-/* H/W DMA Registers */
-#define TX4939IDE_DMA_Cmd 0x800 /* 8-bit */
-#define TX4939IDE_DMA_Stat 0x802 /* 8-bit */
-#define TX4939IDE_PRD_Ptr 0x804 /* 32-bit */
-/* ATA100 CORE Registers (16-bit) */
-#define TX4939IDE_Sys_Ctl 0xc00
-#define TX4939IDE_Xfer_Cnt_1 0xc08
-#define TX4939IDE_Xfer_Cnt_2 0xc0a
-#define TX4939IDE_Sec_Cnt 0xc10
-#define TX4939IDE_Start_Lo_Addr 0xc18
-#define TX4939IDE_Start_Up_Addr 0xc20
-#define TX4939IDE_Add_Ctl 0xc28
-#define TX4939IDE_Lo_Burst_Cnt 0xc30
-#define TX4939IDE_Up_Burst_Cnt 0xc38
-#define TX4939IDE_PIO_Addr 0xc88
-#define TX4939IDE_H_Rst_Tim 0xc90
-#define TX4939IDE_Int_Ctl 0xc98
-#define TX4939IDE_Pkt_Cmd 0xcb8
-#define TX4939IDE_Bxfer_Cnt_Hi 0xcc0
-#define TX4939IDE_Bxfer_Cnt_Lo 0xcc8
-#define TX4939IDE_Dev_TErr 0xcd0
-#define TX4939IDE_Pkt_Xfer_Ctl 0xcd8
-#define TX4939IDE_Start_TAddr 0xce0
-
-/* bits for Int_Ctl */
-#define TX4939IDE_INT_ADDRERR 0x80
-#define TX4939IDE_INT_REACHMUL 0x40
-#define TX4939IDE_INT_DEVTIMING 0x20
-#define TX4939IDE_INT_UDMATERM 0x10
-#define TX4939IDE_INT_TIMER 0x08
-#define TX4939IDE_INT_BUSERR 0x04
-#define TX4939IDE_INT_XFEREND 0x02
-#define TX4939IDE_INT_HOST 0x01
-
-#define TX4939IDE_IGNORE_INTS \
- (TX4939IDE_INT_ADDRERR | TX4939IDE_INT_REACHMUL | \
- TX4939IDE_INT_DEVTIMING | TX4939IDE_INT_UDMATERM | \
- TX4939IDE_INT_TIMER | TX4939IDE_INT_XFEREND)
-
-#ifdef __BIG_ENDIAN
-#define tx4939ide_swizzlel(a) ((a) ^ 4)
-#define tx4939ide_swizzlew(a) ((a) ^ 6)
-#define tx4939ide_swizzleb(a) ((a) ^ 7)
-#else
-#define tx4939ide_swizzlel(a) (a)
-#define tx4939ide_swizzlew(a) (a)
-#define tx4939ide_swizzleb(a) (a)
-#endif
-
-static u16 tx4939ide_readw(void __iomem *base, u32 reg)
-{
- return __raw_readw(base + tx4939ide_swizzlew(reg));
-}
-static u8 tx4939ide_readb(void __iomem *base, u32 reg)
-{
- return __raw_readb(base + tx4939ide_swizzleb(reg));
-}
-static void tx4939ide_writel(u32 val, void __iomem *base, u32 reg)
-{
- __raw_writel(val, base + tx4939ide_swizzlel(reg));
-}
-static void tx4939ide_writew(u16 val, void __iomem *base, u32 reg)
-{
- __raw_writew(val, base + tx4939ide_swizzlew(reg));
-}
-static void tx4939ide_writeb(u8 val, void __iomem *base, u32 reg)
-{
- __raw_writeb(val, base + tx4939ide_swizzleb(reg));
-}
-
-#define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base)
-
-static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int is_slave = drive->dn;
- u32 mask, val;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 safe = pio;
- ide_drive_t *pair;
-
- pair = ide_get_pair_dev(drive);
- if (pair)
- safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0);
- /*
- * Update Command Transfer Mode for master/slave and Data
- * Transfer Mode for this drive.
- */
- mask = is_slave ? 0x07f00000 : 0x000007f0;
- val = ((safe << 8) | (pio << 4)) << (is_slave ? 16 : 0);
- hwif->select_data = (hwif->select_data & ~mask) | val;
- /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
-}
-
-static void tx4939ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- u32 mask, val;
- const u8 mode = drive->dma_mode;
-
- /* Update Data Transfer Mode for this drive. */
- if (mode >= XFER_UDMA_0)
- val = mode - XFER_UDMA_0 + 8;
- else
- val = mode - XFER_MW_DMA_0 + 5;
- if (drive->dn) {
- mask = 0x00f00000;
- val <<= 20;
- } else {
- mask = 0x000000f0;
- val <<= 4;
- }
- hwif->select_data = (hwif->select_data & ~mask) | val;
- /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
-}
-
-static u16 tx4939ide_check_error_ints(ide_hwif_t *hwif)
-{
- void __iomem *base = TX4939IDE_BASE(hwif);
- u16 ctl = tx4939ide_readw(base, TX4939IDE_Int_Ctl);
-
- if (ctl & TX4939IDE_INT_BUSERR) {
- /* reset FIFO */
- u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl);
-
- tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl);
- /* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */
- ndelay(270);
- tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
- }
- if (ctl & (TX4939IDE_INT_ADDRERR |
- TX4939IDE_INT_DEVTIMING | TX4939IDE_INT_BUSERR))
- pr_err("%s: Error interrupt %#x (%s%s%s )\n",
- hwif->name, ctl,
- ctl & TX4939IDE_INT_ADDRERR ? " Address-Error" : "",
- ctl & TX4939IDE_INT_DEVTIMING ? " DEV-Timing" : "",
- ctl & TX4939IDE_INT_BUSERR ? " Bus-Error" : "");
- return ctl;
-}
-
-static void tx4939ide_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif;
- void __iomem *base;
- u16 ctl;
-
- /*
- * tx4939ide_dma_test_irq() and tx4939ide_dma_end() do all job
- * for DMA case.
- */
- if (drive->waiting_for_dma)
- return;
- hwif = drive->hwif;
- base = TX4939IDE_BASE(hwif);
- ctl = tx4939ide_check_error_ints(hwif);
- tx4939ide_writew(ctl, base, TX4939IDE_Int_Ctl);
-}
-
-static u8 tx4939ide_cable_detect(ide_hwif_t *hwif)
-{
- void __iomem *base = TX4939IDE_BASE(hwif);
-
- return tx4939ide_readw(base, TX4939IDE_Sys_Ctl) & 0x2000 ?
- ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-#ifdef __BIG_ENDIAN
-static void tx4939ide_dma_host_set(ide_drive_t *drive, int on)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 unit = drive->dn;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u8 dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
-
- if (on)
- dma_stat |= (1 << (5 + unit));
- else
- dma_stat &= ~(1 << (5 + unit));
-
- tx4939ide_writeb(dma_stat, base, TX4939IDE_DMA_Stat);
-}
-#else
-#define tx4939ide_dma_host_set ide_dma_host_set
-#endif
-
-static u8 tx4939ide_clear_dma_status(void __iomem *base)
-{
- u8 dma_stat;
-
- /* read DMA status for INTR & ERROR flags */
- dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
- /* clear INTR & ERROR flags */
- tx4939ide_writeb(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR, base,
- TX4939IDE_DMA_Stat);
- /* recover intmask cleared by writing to bit2 of DMA_Stat */
- tx4939ide_writew(TX4939IDE_IGNORE_INTS << 8, base, TX4939IDE_Int_Ctl);
- return dma_stat;
-}
-
-#ifdef __BIG_ENDIAN
-/* custom ide_build_dmatable to handle swapped layout */
-static int tx4939ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- u32 *table = (u32 *)hwif->dmatable_cpu;
- unsigned int count = 0;
- int i;
- struct scatterlist *sg;
-
- for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) {
- u32 cur_addr, cur_len, bcount;
-
- cur_addr = sg_dma_address(sg);
- cur_len = sg_dma_len(sg);
-
- /*
- * Fill in the DMA table, without crossing any 64kB boundaries.
- */
-
- while (cur_len) {
- if (count++ >= PRD_ENTRIES)
- goto use_pio_instead;
-
- bcount = 0x10000 - (cur_addr & 0xffff);
- if (bcount > cur_len)
- bcount = cur_len;
- /*
- * This workaround for zero count seems required.
- * (standard ide_build_dmatable does it too)
- */
- if (bcount == 0x10000)
- bcount = 0x8000;
- *table++ = bcount & 0xffff;
- *table++ = cur_addr;
- cur_addr += bcount;
- cur_len -= bcount;
- }
- }
-
- if (count) {
- *(table - 2) |= 0x80000000;
- return count;
- }
-
-use_pio_instead:
- printk(KERN_ERR "%s: %s\n", drive->name,
- count ? "DMA table too small" : "empty DMA table?");
-
- return 0; /* revert to PIO for this request */
-}
-#else
-#define tx4939ide_build_dmatable ide_build_dmatable
-#endif
-
-static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
-
- /* fall back to PIO! */
- if (tx4939ide_build_dmatable(drive, cmd) == 0)
- return 1;
-
- /* PRD table */
- tx4939ide_writel(hwif->dmatable_dma, base, TX4939IDE_PRD_Ptr);
-
- /* specify r/w */
- tx4939ide_writeb(rw, base, TX4939IDE_DMA_Cmd);
-
- /* clear INTR & ERROR flags */
- tx4939ide_clear_dma_status(base);
-
- tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
- TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
-
- tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
-
- return 0;
-}
-
-static int tx4939ide_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat, dma_cmd;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u16 ctl = tx4939ide_readw(base, TX4939IDE_Int_Ctl);
-
- /* get DMA command mode */
- dma_cmd = tx4939ide_readb(base, TX4939IDE_DMA_Cmd);
- /* stop DMA */
- tx4939ide_writeb(dma_cmd & ~ATA_DMA_START, base, TX4939IDE_DMA_Cmd);
-
- /* read and clear the INTR & ERROR bits */
- dma_stat = tx4939ide_clear_dma_status(base);
-
-#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR)
-
- /* verify good DMA status */
- if ((dma_stat & CHECK_DMA_MASK) == 0 &&
- (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) ==
- (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST))
- /* INT_IDE lost... bug? */
- return 0;
- return ((dma_stat & CHECK_DMA_MASK) !=
- ATA_DMA_INTR) ? 0x10 | dma_stat : 0;
-}
-
-/* returns 1 if DMA IRQ issued, 0 otherwise */
-static int tx4939ide_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u16 ctl, ide_int;
- u8 dma_stat, stat;
- int found = 0;
-
- ctl = tx4939ide_check_error_ints(hwif);
- ide_int = ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST);
- switch (ide_int) {
- case TX4939IDE_INT_HOST:
- /* On error, XFEREND might not be asserted. */
- stat = tx4939ide_readb(base, TX4939IDE_AltStat_DevCtl);
- if ((stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) == ATA_ERR)
- found = 1;
- else
- /* Wait for XFEREND (Mask HOST and unmask XFEREND) */
- ctl &= ~TX4939IDE_INT_XFEREND << 8;
- ctl |= ide_int << 8;
- break;
- case TX4939IDE_INT_HOST | TX4939IDE_INT_XFEREND:
- dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
- if (!(dma_stat & ATA_DMA_INTR))
- pr_warn("%s: weird interrupt status. "
- "DMA_Stat %#02x int_ctl %#04x\n",
- hwif->name, dma_stat, ctl);
- found = 1;
- break;
- }
- /*
- * Do not clear XFEREND, HOST now. They will be cleared by
- * clearing bit2 of DMA_Stat.
- */
- ctl &= ~ide_int;
- tx4939ide_writew(ctl, base, TX4939IDE_Int_Ctl);
- return found;
-}
-
-#ifdef __BIG_ENDIAN
-static u8 tx4939ide_dma_sff_read_status(ide_hwif_t *hwif)
-{
- void __iomem *base = TX4939IDE_BASE(hwif);
-
- return tx4939ide_readb(base, TX4939IDE_DMA_Stat);
-}
-#else
-#define tx4939ide_dma_sff_read_status ide_dma_sff_read_status
-#endif
-
-static void tx4939ide_init_hwif(ide_hwif_t *hwif)
-{
- void __iomem *base = TX4939IDE_BASE(hwif);
-
- /* Soft Reset */
- tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl);
- /* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */
- ndelay(450);
- tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl);
- /* mask some interrupts and clear all interrupts */
- tx4939ide_writew((TX4939IDE_IGNORE_INTS << 8) | 0xff, base,
- TX4939IDE_Int_Ctl);
-
- tx4939ide_writew(0x0008, base, TX4939IDE_Lo_Burst_Cnt);
- tx4939ide_writew(0, base, TX4939IDE_Up_Burst_Cnt);
-}
-
-static int tx4939ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- hwif->dma_base =
- hwif->extra_base + tx4939ide_swizzleb(TX4939IDE_DMA_Cmd);
- /*
- * Note that we cannot use ATA_DMA_TABLE_OFS, ATA_DMA_STATUS
- * for big endian.
- */
- return ide_allocate_dma_engine(hwif);
-}
-
-static void tx4939ide_tf_load_fixup(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u16 sysctl = hwif->select_data >> (drive->dn ? 16 : 0);
-
- /*
- * Fix ATA100 CORE System Control Register. (The write to the
- * Device/Head register may write wrong data to the System
- * Control Register)
- * While Sys_Ctl is written here, dev_select() is not needed.
- */
- tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
-}
-
-static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf,
- u8 valid)
-{
- ide_tf_load(drive, tf, valid);
-
- if (valid & IDE_VALID_DEVICE)
- tx4939ide_tf_load_fixup(drive);
-}
-
-#ifdef __BIG_ENDIAN
-
-/* custom iops (independent from SWAP_IO_SPACE) */
-static void tx4939ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long port = drive->hwif->io_ports.data_addr;
- unsigned short *ptr = buf;
- unsigned int count = (len + 1) / 2;
-
- while (count--)
- *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
- __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
-}
-
-static void tx4939ide_output_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long port = drive->hwif->io_ports.data_addr;
- unsigned short *ptr = buf;
- unsigned int count = (len + 1) / 2;
-
- while (count--) {
- __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
- ptr++;
- }
- __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
-}
-
-static const struct ide_tp_ops tx4939ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = tx4939ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = tx4939ide_input_data_swap,
- .output_data = tx4939ide_output_data_swap,
-};
-
-#else /* __LITTLE_ENDIAN */
-
-static const struct ide_tp_ops tx4939ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = tx4939ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-#endif /* __LITTLE_ENDIAN */
-
-static const struct ide_port_ops tx4939ide_port_ops = {
- .set_pio_mode = tx4939ide_set_pio_mode,
- .set_dma_mode = tx4939ide_set_dma_mode,
- .clear_irq = tx4939ide_clear_irq,
- .cable_detect = tx4939ide_cable_detect,
-};
-
-static const struct ide_dma_ops tx4939ide_dma_ops = {
- .dma_host_set = tx4939ide_dma_host_set,
- .dma_setup = tx4939ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = tx4939ide_dma_end,
- .dma_test_irq = tx4939ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = tx4939ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info tx4939ide_port_info __initconst = {
- .init_hwif = tx4939ide_init_hwif,
- .init_dma = tx4939ide_init_dma,
- .port_ops = &tx4939ide_port_ops,
- .dma_ops = &tx4939ide_dma_ops,
- .tp_ops = &tx4939ide_tp_ops,
- .host_flags = IDE_HFLAG_MMIO,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- .chipset = ide_generic,
-};
-
-static int __init tx4939ide_probe(struct platform_device *pdev)
-{
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_host *host;
- struct resource *res;
- int irq, ret;
- unsigned long mapbase;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), MODNAME))
- return -EBUSY;
- mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!mapbase)
- return -EBUSY;
- memset(&hw, 0, sizeof(hw));
- hw.io_ports.data_addr =
- mapbase + tx4939ide_swizzlew(TX4939IDE_Data);
- hw.io_ports.error_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_Error_Feature);
- hw.io_ports.nsect_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_Sec);
- hw.io_ports.lbal_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_LBA0);
- hw.io_ports.lbam_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_LBA1);
- hw.io_ports.lbah_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_LBA2);
- hw.io_ports.device_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_DevHead);
- hw.io_ports.command_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_Stat_Cmd);
- hw.io_ports.ctl_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_AltStat_DevCtl);
- hw.irq = irq;
- hw.dev = &pdev->dev;
-
- pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq);
- host = ide_host_alloc(&tx4939ide_port_info, hws, 1);
- if (!host)
- return -ENOMEM;
- /* use extra_base for base address of the all registers */
- host->ports[0]->extra_base = mapbase;
- ret = ide_host_register(host, &tx4939ide_port_info, hws);
- if (ret) {
- ide_host_free(host);
- return ret;
- }
- platform_set_drvdata(pdev, host);
- return 0;
-}
-
-static int __exit tx4939ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int tx4939ide_resume(struct platform_device *dev)
-{
- struct ide_host *host = platform_get_drvdata(dev);
- ide_hwif_t *hwif = host->ports[0];
-
- tx4939ide_init_hwif(hwif);
- return 0;
-}
-#else
-#define tx4939ide_resume NULL
-#endif
-
-static struct platform_driver tx4939ide_driver = {
- .driver = {
- .name = MODNAME,
- },
- .remove = __exit_p(tx4939ide_remove),
- .resume = tx4939ide_resume,
-};
-
-module_platform_driver_probe(tx4939ide_driver, tx4939ide_probe);
-
-MODULE_DESCRIPTION("TX4939 internal IDE driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:tx4939ide");
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
deleted file mode 100644
index cf996f788292..000000000000
--- a/drivers/ide/umc8672.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1995-1996 Linus Torvalds & author (see below)
- */
-
-/*
- * Principal Author/Maintainer: PODIEN@hml2.atlas.de (Wolfram Podien)
- *
- * This file provides support for the advanced features
- * of the UMC 8672 IDE interface.
- *
- * Version 0.01 Initial version, hacked out of ide.c,
- * and #include'd rather than compiled separately.
- * This will get cleaned up in a subsequent release.
- *
- * Version 0.02 now configs/compiles separate from ide.c -ml
- * Version 0.03 enhanced auto-tune, fix display bug
- * Version 0.05 replace sti() with restore_flags() -ml
- * add detection of possible race condition -ml
- */
-
-/*
- * VLB Controller Support from
- * Wolfram Podien
- * Rohoefe 3
- * D28832 Achim
- * Germany
- *
- * To enable UMC8672 support there must a lilo line like
- * append="ide0=umc8672"...
- * To set the speed according to the abilities of the hardware there must be a
- * line like
- * #define UMC_DRIVE0 11
- * in the beginning of the driver, which sets the speed of drive 0 to 11 (there
- * are some lines present). 0 - 11 are allowed speed values. These values are
- * the results from the DOS speed test program supplied from UMC. 11 is the
- * highest speed (about PIO mode 3)
- */
-#define REALLY_SLOW_IO /* some systems can safely undef this */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "umc8672"
-
-/*
- * Default speeds. These can be changed with "auto-tune" and/or hdparm.
- */
-#define UMC_DRIVE0 1 /* DOS measured drive speeds */
-#define UMC_DRIVE1 1 /* 0 to 11 allowed */
-#define UMC_DRIVE2 1 /* 11 = Fastest Speed */
-#define UMC_DRIVE3 1 /* In case of crash reduce speed */
-
-static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3};
-static const u8 pio_to_umc [5] = {0, 3, 7, 10, 11}; /* rough guesses */
-
-/* 0 1 2 3 4 5 6 7 8 9 10 11 */
-static const u8 speedtab [3][12] = {
- {0x0f, 0x0b, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
- {0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
- {0xff, 0xcb, 0xc0, 0x58, 0x36, 0x33, 0x23, 0x22, 0x21, 0x11, 0x10, 0x0}
-};
-
-static void out_umc(char port, char wert)
-{
- outb_p(port, 0x108);
- outb_p(wert, 0x109);
-}
-
-static inline u8 in_umc(char port)
-{
- outb_p(port, 0x108);
- return inb_p(0x109);
-}
-
-static void umc_set_speeds(u8 speeds[])
-{
- int i, tmp;
-
- outb_p(0x5A, 0x108); /* enable umc */
-
- out_umc(0xd7, (speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4)));
- out_umc(0xd6, (speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4)));
- tmp = 0;
- for (i = 3; i >= 0; i--)
- tmp = (tmp << 2) | speedtab[1][speeds[i]];
- out_umc(0xdc, tmp);
- for (i = 0; i < 4; i++) {
- out_umc(0xd0 + i, speedtab[2][speeds[i]]);
- out_umc(0xd8 + i, speedtab[2][speeds[i]]);
- }
- outb_p(0xa5, 0x108); /* disable umc */
-
- printk("umc8672: drive speeds [0 to 11]: %d %d %d %d\n",
- speeds[0], speeds[1], speeds[2], speeds[3]);
-}
-
-static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- ide_hwif_t *mate = hwif->mate;
- unsigned long flags;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
- drive->name, pio, pio_to_umc[pio]);
- if (mate)
- spin_lock_irqsave(&mate->lock, flags);
- if (mate && mate->handler) {
- printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
- } else {
- current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
- umc_set_speeds(current_speeds);
- }
- if (mate)
- spin_unlock_irqrestore(&mate->lock, flags);
-}
-
-static const struct ide_port_ops umc8672_port_ops = {
- .set_pio_mode = umc_set_pio_mode,
-};
-
-static const struct ide_port_info umc8672_port_info __initconst = {
- .name = DRV_NAME,
- .chipset = ide_umc8672,
- .port_ops = &umc8672_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
-};
-
-static int __init umc8672_probe(void)
-{
- unsigned long flags;
-
- if (!request_region(0x108, 2, "umc8672")) {
- printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n");
- return 1;
- }
- local_irq_save(flags);
- outb_p(0x5A, 0x108); /* enable umc */
- if (in_umc (0xd5) != 0xa0) {
- local_irq_restore(flags);
- printk(KERN_ERR "umc8672: not found\n");
- release_region(0x108, 2);
- return 1;
- }
- outb_p(0xa5, 0x108); /* disable umc */
-
- umc_set_speeds(current_speeds);
- local_irq_restore(flags);
-
- return ide_legacy_device_add(&umc8672_port_info, 0);
-}
-
-static bool probe_umc8672;
-
-module_param_named(probe, probe_umc8672, bool, 0);
-MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
-
-static int __init umc8672_init(void)
-{
- if (probe_umc8672 == 0)
- goto out;
-
- if (umc8672_probe() == 0)
- return 0;
-out:
- return -ENODEV;
-}
-
-module_init(umc8672_init);
-
-MODULE_AUTHOR("Wolfram Podien");
-MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
deleted file mode 100644
index 63a3aca506fc..000000000000
--- a/drivers/ide/via82cxxx.c
+++ /dev/null
@@ -1,532 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * VIA IDE driver for Linux. Supported southbridges:
- *
- * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
- * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
- * vt8235, vt8237, vt8237a
- *
- * Copyright (c) 2000-2002 Vojtech Pavlik
- * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
- *
- * Based on the work of:
- * Michel Aubry
- * Jeff Garzik
- * Andre Hedrick
- *
- * Documentation:
- * Obsolete device documentation publicly available from via.com.tw
- * Current device documentation available under NDA only
- */
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/dmi.h>
-
-#ifdef CONFIG_PPC_CHRP
-#include <asm/processor.h>
-#endif
-
-#define DRV_NAME "via82cxxx"
-
-#define VIA_IDE_ENABLE 0x40
-#define VIA_IDE_CONFIG 0x41
-#define VIA_FIFO_CONFIG 0x43
-#define VIA_MISC_1 0x44
-#define VIA_MISC_2 0x45
-#define VIA_MISC_3 0x46
-#define VIA_DRIVE_TIMING 0x48
-#define VIA_8BIT_TIMING 0x4e
-#define VIA_ADDRESS_SETUP 0x4c
-#define VIA_UDMA_TIMING 0x50
-
-#define VIA_BAD_PREQ 0x01 /* Crashes if PREQ# till DDACK# set */
-#define VIA_BAD_CLK66 0x02 /* 66 MHz clock doesn't work correctly */
-#define VIA_SET_FIFO 0x04 /* Needs to have FIFO split set */
-#define VIA_NO_UNMASK 0x08 /* Doesn't work with IRQ unmasking on */
-#define VIA_BAD_ID 0x10 /* Has wrong vendor ID (0x1107) */
-#define VIA_BAD_AST 0x20 /* Don't touch Address Setup Timing */
-#define VIA_SATA_PATA 0x80 /* SATA/PATA combined configuration */
-
-enum {
- VIA_IDFLAG_SINGLE = (1 << 1), /* single channel controller */
-};
-
-/*
- * VIA SouthBridge chips.
- */
-
-static struct via_isa_bridge {
- char *name;
- u16 id;
- u8 rev_min;
- u8 rev_max;
- u8 udma_mask;
- u8 flags;
-} via_isa_bridges[] = {
- { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
- { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
- { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
- { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST },
- { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, ATA_UDMA5, },
- { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, ATA_UDMA5, },
- { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, ATA_UDMA5, },
- { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, ATA_UDMA5, },
- { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, ATA_UDMA4, },
- { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
- { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, ATA_UDMA4, },
- { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
- { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO },
- { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ },
- { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO },
- { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO },
- { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO },
- { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
- { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
- { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { NULL }
-};
-
-static unsigned int via_clock;
-static char *via_dma[] = { "16", "25", "33", "44", "66", "100", "133" };
-
-struct via82cxxx_dev
-{
- struct via_isa_bridge *via_config;
- unsigned int via_80w;
-};
-
-/**
- * via_set_speed - write timing registers
- * @dev: PCI device
- * @dn: device
- * @timing: IDE timing data to use
- *
- * via_set_speed writes timing values to the chipset registers
- */
-
-static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- struct via82cxxx_dev *vdev = host->host_priv;
- u8 t;
-
- if (~vdev->via_config->flags & VIA_BAD_AST) {
- pci_read_config_byte(dev, VIA_ADDRESS_SETUP, &t);
- t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(timing->setup, 1, 4) - 1) << ((3 - dn) << 1));
- pci_write_config_byte(dev, VIA_ADDRESS_SETUP, t);
- }
-
- pci_write_config_byte(dev, VIA_8BIT_TIMING + (1 - (dn >> 1)),
- ((clamp_val(timing->act8b, 1, 16) - 1) << 4) | (clamp_val(timing->rec8b, 1, 16) - 1));
-
- pci_write_config_byte(dev, VIA_DRIVE_TIMING + (3 - dn),
- ((clamp_val(timing->active, 1, 16) - 1) << 4) | (clamp_val(timing->recover, 1, 16) - 1));
-
- switch (vdev->via_config->udma_mask) {
- case ATA_UDMA2: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 5) - 2)) : 0x03; break;
- case ATA_UDMA4: t = timing->udma ? (0xe8 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x0f; break;
- case ATA_UDMA5: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
- case ATA_UDMA6: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
- }
-
- /* Set UDMA unless device is not UDMA capable */
- if (vdev->via_config->udma_mask) {
- u8 udma_etc;
-
- pci_read_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, &udma_etc);
-
- /* clear transfer mode bit */
- udma_etc &= ~0x20;
-
- if (timing->udma) {
- /* preserve 80-wire cable detection bit */
- udma_etc &= 0x10;
- udma_etc |= t;
- }
-
- pci_write_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, udma_etc);
- }
-}
-
-/**
- * via_set_drive - configure transfer mode
- * @hwif: port
- * @drive: Drive to set up
- *
- * via_set_drive() computes timing values configures the chipset to
- * a desired transfer mode. It also can be called by upper layers.
- */
-
-static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- ide_drive_t *peer = ide_get_pair_dev(drive);
- struct ide_host *host = dev_get_drvdata(hwif->dev);
- struct via82cxxx_dev *vdev = host->host_priv;
- struct ide_timing t, p;
- unsigned int T, UT;
- const u8 speed = drive->dma_mode;
-
- T = 1000000000 / via_clock;
-
- switch (vdev->via_config->udma_mask) {
- case ATA_UDMA2: UT = T; break;
- case ATA_UDMA4: UT = T/2; break;
- case ATA_UDMA5: UT = T/3; break;
- case ATA_UDMA6: UT = T/4; break;
- default: UT = T;
- }
-
- ide_timing_compute(drive, speed, &t, T, UT);
-
- if (peer) {
- ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
- ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
- }
-
- via_set_speed(hwif, drive->dn, &t);
-}
-
-/**
- * via_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * A callback from the upper layers for PIO-only tuning.
- */
-
-static void via_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- via_set_drive(hwif, drive);
-}
-
-static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
-{
- struct via_isa_bridge *via_config;
-
- for (via_config = via_isa_bridges;
- via_config->id != PCI_DEVICE_ID_VIA_ANON; via_config++)
- if ((*isa = pci_get_device(PCI_VENDOR_ID_VIA +
- !!(via_config->flags & VIA_BAD_ID),
- via_config->id, NULL))) {
-
- if ((*isa)->revision >= via_config->rev_min &&
- (*isa)->revision <= via_config->rev_max)
- break;
- pci_dev_put(*isa);
- }
-
- return via_config;
-}
-
-/*
- * Check and handle 80-wire cable presence
- */
-static void via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
-{
- int i;
-
- switch (vdev->via_config->udma_mask) {
- case ATA_UDMA4:
- for (i = 24; i >= 0; i -= 8)
- if (((u >> (i & 16)) & 8) &&
- ((u >> i) & 0x20) &&
- (((u >> i) & 7) < 2)) {
- /*
- * 2x PCI clock and
- * UDMA w/ < 3T/cycle
- */
- vdev->via_80w |= (1 << (1 - (i >> 4)));
- }
- break;
-
- case ATA_UDMA5:
- for (i = 24; i >= 0; i -= 8)
- if (((u >> i) & 0x10) ||
- (((u >> i) & 0x20) &&
- (((u >> i) & 7) < 4))) {
- /* BIOS 80-wire bit or
- * UDMA w/ < 60ns/cycle
- */
- vdev->via_80w |= (1 << (1 - (i >> 4)));
- }
- break;
-
- case ATA_UDMA6:
- for (i = 24; i >= 0; i -= 8)
- if (((u >> i) & 0x10) ||
- (((u >> i) & 0x20) &&
- (((u >> i) & 7) < 6))) {
- /* BIOS 80-wire bit or
- * UDMA w/ < 60ns/cycle
- */
- vdev->via_80w |= (1 << (1 - (i >> 4)));
- }
- break;
- }
-}
-
-/**
- * init_chipset_via82cxxx - initialization handler
- * @dev: PCI device
- *
- * The initialization callback. Here we determine the IDE chip type
- * and initialize its drive independent registers.
- */
-
-static int init_chipset_via82cxxx(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct via82cxxx_dev *vdev = host->host_priv;
- struct via_isa_bridge *via_config = vdev->via_config;
- u8 t, v;
- u32 u;
-
- /*
- * Detect cable and configure Clk66
- */
- pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
-
- via_cable_detect(vdev, u);
-
- if (via_config->udma_mask == ATA_UDMA4) {
- /* Enable Clk66 */
- pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008);
- } else if (via_config->flags & VIA_BAD_CLK66) {
- /* Would cause trouble on 596a and 686 */
- pci_write_config_dword(dev, VIA_UDMA_TIMING, u & ~0x80008);
- }
-
- /*
- * Check whether interfaces are enabled.
- */
-
- pci_read_config_byte(dev, VIA_IDE_ENABLE, &v);
-
- /*
- * Set up FIFO sizes and thresholds.
- */
-
- pci_read_config_byte(dev, VIA_FIFO_CONFIG, &t);
-
- /* Disable PREQ# till DDACK# */
- if (via_config->flags & VIA_BAD_PREQ) {
- /* Would crash on 586b rev 41 */
- t &= 0x7f;
- }
-
- /* Fix FIFO split between channels */
- if (via_config->flags & VIA_SET_FIFO) {
- t &= (t & 0x9f);
- switch (v & 3) {
- case 2: t |= 0x00; break; /* 16 on primary */
- case 1: t |= 0x60; break; /* 16 on secondary */
- case 3: t |= 0x20; break; /* 8 pri 8 sec */
- }
- }
-
- pci_write_config_byte(dev, VIA_FIFO_CONFIG, t);
-
- return 0;
-}
-
-/*
- * Cable special cases
- */
-
-static const struct dmi_system_id cable_dmi_table[] = {
- {
- .ident = "Acer Ferrari 3400",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
- DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
- },
- },
- { }
-};
-
-static int via_cable_override(struct pci_dev *pdev)
-{
- /* Systems by DMI */
- if (dmi_check_system(cable_dmi_table))
- return 1;
-
- /* Arima W730-K8/Targa Visionary 811/... */
- if (pdev->subsystem_vendor == 0x161F &&
- pdev->subsystem_device == 0x2032)
- return 1;
-
- return 0;
-}
-
-static u8 via82cxxx_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(pdev);
- struct via82cxxx_dev *vdev = host->host_priv;
-
- if (via_cable_override(pdev))
- return ATA_CBL_PATA40_SHORT;
-
- if ((vdev->via_config->flags & VIA_SATA_PATA) && hwif->channel == 0)
- return ATA_CBL_SATA;
-
- if ((vdev->via_80w >> hwif->channel) & 1)
- return ATA_CBL_PATA80;
- else
- return ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops via_port_ops = {
- .set_pio_mode = via_set_pio_mode,
- .set_dma_mode = via_set_drive,
- .cable_detect = via82cxxx_cable_detect,
-};
-
-static const struct ide_port_info via82cxxx_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_via82cxxx,
- .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
- .port_ops = &via_port_ops,
- .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
- IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_IO_32BIT,
- .pio_mask = ATA_PIO5,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct pci_dev *isa = NULL;
- struct via_isa_bridge *via_config;
- struct via82cxxx_dev *vdev;
- int rc;
- u8 idx = id->driver_data;
- struct ide_port_info d;
-
- d = via82cxxx_chipset;
-
- /*
- * Find the ISA bridge and check we know what it is.
- */
- via_config = via_config_find(&isa);
-
- /*
- * Print the boot message.
- */
- printk(KERN_INFO DRV_NAME " %s: VIA %s (rev %02x) IDE %sDMA%s\n",
- pci_name(dev), via_config->name, isa->revision,
- via_config->udma_mask ? "U" : "MW",
- via_dma[via_config->udma_mask ?
- (fls(via_config->udma_mask) - 1) : 0]);
-
- pci_dev_put(isa);
-
- /*
- * Determine system bus clock.
- */
- via_clock = (ide_pci_clk ? ide_pci_clk : 33) * 1000;
-
- switch (via_clock) {
- case 33000: via_clock = 33333; break;
- case 37000: via_clock = 37500; break;
- case 41000: via_clock = 41666; break;
- }
-
- if (via_clock < 20000 || via_clock > 50000) {
- printk(KERN_WARNING DRV_NAME ": User given PCI clock speed "
- "impossible (%d), using 33 MHz instead.\n", via_clock);
- via_clock = 33333;
- }
-
- if (idx == 1)
- d.enablebits[1].reg = d.enablebits[0].reg = 0;
- else
- d.host_flags |= IDE_HFLAG_NO_AUTODMA;
-
- if (idx == VIA_IDFLAG_SINGLE)
- d.host_flags |= IDE_HFLAG_SINGLE;
-
- if ((via_config->flags & VIA_NO_UNMASK) == 0)
- d.host_flags |= IDE_HFLAG_UNMASK_IRQS;
-
- d.udma_mask = via_config->udma_mask;
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev) {
- printk(KERN_ERR DRV_NAME " %s: out of memory :(\n",
- pci_name(dev));
- return -ENOMEM;
- }
-
- vdev->via_config = via_config;
-
- rc = ide_pci_init_one(dev, &d, vdev);
- if (rc)
- kfree(vdev);
-
- return rc;
-}
-
-static void via_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct via82cxxx_dev *vdev = host->host_priv;
-
- ide_pci_remove(dev);
- kfree(vdev);
-}
-
-static const struct pci_device_id via_pci_tbl[] = {
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), VIA_IDFLAG_SINGLE },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6415), 1 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, via_pci_tbl);
-
-static struct pci_driver via_pci_driver = {
- .name = "VIA_IDE",
- .id_table = via_pci_tbl,
- .probe = via_init_one,
- .remove = via_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init via_ide_init(void)
-{
- return ide_pci_register_driver(&via_pci_driver);
-}
-
-static void __exit via_ide_exit(void)
-{
- pci_unregister_driver(&via_pci_driver);
-}
-
-module_init(via_ide_init);
-module_exit(via_ide_exit);
-
-MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz, Michel Aubry, Jeff Garzik, Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for VIA IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ec1b9d306ba6..e6c543b5ee1d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1484,6 +1484,36 @@ static void __init sklh_idle_state_table_update(void)
skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
}
+/**
+ * skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake
+ * idle states table.
+ */
+static void __init skx_idle_state_table_update(void)
+{
+ unsigned long long msr;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+
+ /*
+ * 000b: C0/C1 (no package C-state support)
+ * 001b: C2
+ * 010b: C6 (non-retention)
+ * 011b: C6 (retention)
+ * 111b: No Package C state limits.
+ */
+ if ((msr & 0x7) < 2) {
+ /*
+ * Uses the CC6 + PC0 latency and 3 times of
+ * latency for target_residency if the PC6
+ * is disabled in BIOS. This is consistent
+ * with how intel_idle driver uses _CST
+ * to set the target_residency.
+ */
+ skx_cstates[2].exit_latency = 92;
+ skx_cstates[2].target_residency = 276;
+ }
+}
+
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
@@ -1515,6 +1545,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
case INTEL_FAM6_SKYLAKE:
sklh_idle_state_table_update();
break;
+ case INTEL_FAM6_SKYLAKE_X:
+ skx_idle_state_table_update();
+ break;
}
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 9d3952b4674f..a27db78ea13e 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -771,6 +771,13 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
if (ret)
goto err;
+ if (channel >= indio_dev->num_channels) {
+ dev_err(indio_dev->dev.parent,
+ "Channel index >= number of channels\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
ret = of_property_read_u32_array(child, "diff-channels",
ain, 2);
if (ret)
@@ -850,6 +857,11 @@ static int ad7124_setup(struct ad7124_state *st)
return ret;
}
+static void ad7124_reg_disable(void *r)
+{
+ regulator_disable(r);
+}
+
static int ad7124_probe(struct spi_device *spi)
{
const struct ad7124_chip_info *info;
@@ -895,17 +907,20 @@ static int ad7124_probe(struct spi_device *spi)
ret = regulator_enable(st->vref[i]);
if (ret)
return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
+ st->vref[i]);
+ if (ret)
+ return ret;
}
st->mclk = devm_clk_get(&spi->dev, "mclk");
- if (IS_ERR(st->mclk)) {
- ret = PTR_ERR(st->mclk);
- goto error_regulator_disable;
- }
+ if (IS_ERR(st->mclk))
+ return PTR_ERR(st->mclk);
ret = clk_prepare_enable(st->mclk);
if (ret < 0)
- goto error_regulator_disable;
+ return ret;
ret = ad7124_soft_reset(st);
if (ret < 0)
@@ -935,11 +950,6 @@ error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_clk_disable_unprepare:
clk_disable_unprepare(st->mclk);
-error_regulator_disable:
- for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
- if (!IS_ERR_OR_NULL(st->vref[i]))
- regulator_disable(st->vref[i]);
- }
return ret;
}
@@ -948,17 +958,11 @@ static int ad7124_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7124_state *st = iio_priv(indio_dev);
- int i;
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
clk_disable_unprepare(st->mclk);
- for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
- if (!IS_ERR_OR_NULL(st->vref[i]))
- regulator_disable(st->vref[i]);
- }
-
return 0;
}
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 2ed580521d81..1141cc13a124 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -912,7 +912,7 @@ static int ad7192_probe(struct spi_device *spi)
{
struct ad7192_state *st;
struct iio_dev *indio_dev;
- int ret, voltage_uv = 0;
+ int ret;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ?\n");
@@ -949,15 +949,12 @@ static int ad7192_probe(struct spi_device *spi)
goto error_disable_avdd;
}
- voltage_uv = regulator_get_voltage(st->avdd);
-
- if (voltage_uv > 0) {
- st->int_vref_mv = voltage_uv / 1000;
- } else {
- ret = voltage_uv;
+ ret = regulator_get_voltage(st->avdd);
+ if (ret < 0) {
dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
goto error_disable_avdd;
}
+ st->int_vref_mv = ret / 1000;
spi_set_drvdata(spi, indio_dev);
st->chip_info = of_device_get_match_data(&spi->dev);
@@ -1014,7 +1011,9 @@ static int ad7192_probe(struct spi_device *spi)
return 0;
error_disable_clk:
- clk_disable_unprepare(st->mclk);
+ if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+ st->clock_sel == AD7192_CLK_EXT_MCLK2)
+ clk_disable_unprepare(st->mclk);
error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_disable_dvdd:
@@ -1031,7 +1030,9 @@ static int ad7192_remove(struct spi_device *spi)
struct ad7192_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- clk_disable_unprepare(st->mclk);
+ if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+ st->clock_sel == AD7192_CLK_EXT_MCLK2)
+ clk_disable_unprepare(st->mclk);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
regulator_disable(st->dvdd);
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index c945f1349623..60f21fed6dcb 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -167,6 +167,10 @@ struct ad7768_state {
* transfer buffers to live in their own cache lines.
*/
union {
+ struct {
+ __be32 chan;
+ s64 timestamp;
+ } scan;
__be32 d32;
u8 d8[2];
} data ____cacheline_aligned;
@@ -469,11 +473,11 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
mutex_lock(&st->lock);
- ret = spi_read(st->spi, &st->data.d32, 3);
+ ret = spi_read(st->spi, &st->data.scan.chan, 3);
if (ret < 0)
goto err_unlock;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->data.d32,
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 5e980a06258e..440ef4c7be07 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
id &= AD7793_ID_MASK;
if (id != st->chip_info->id) {
+ ret = -ENODEV;
dev_err(&st->sd.spi->dev, "device ID query failed\n");
goto out;
}
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 9a649745cd0a..069b561ee768 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -59,8 +59,10 @@ struct ad7923_state {
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * Ensure rx_buf can be directly used in iio_push_to_buffers_with_timetamp
+ * Length = 8 channels + 4 extra for 8 byte timestamp
*/
- __be16 rx_buf[4] ____cacheline_aligned;
+ __be16 rx_buf[12] ____cacheline_aligned;
__be16 tx_buf[4];
};
diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c
index 7ab2ccf90863..8107f7bbbe3c 100644
--- a/drivers/iio/dac/ad5770r.c
+++ b/drivers/iio/dac/ad5770r.c
@@ -524,23 +524,29 @@ static int ad5770r_channel_config(struct ad5770r_state *st)
device_for_each_child_node(&st->spi->dev, child) {
ret = fwnode_property_read_u32(child, "num", &num);
if (ret)
- return ret;
- if (num >= AD5770R_MAX_CHANNELS)
- return -EINVAL;
+ goto err_child_out;
+ if (num >= AD5770R_MAX_CHANNELS) {
+ ret = -EINVAL;
+ goto err_child_out;
+ }
ret = fwnode_property_read_u32_array(child,
"adi,range-microamp",
tmp, 2);
if (ret)
- return ret;
+ goto err_child_out;
min = tmp[0] / 1000;
max = tmp[1] / 1000;
ret = ad5770r_store_output_range(st, min, max, num);
if (ret)
- return ret;
+ goto err_child_out;
}
+ return 0;
+
+err_child_out:
+ fwnode_handle_put(child);
return ret;
}
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 1a20c6b88e7d..645461c70454 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -399,6 +399,7 @@ static int fxas21002c_temp_get(struct fxas21002c_data *data, int *val)
ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp);
if (ret < 0) {
dev_err(dev, "failed to read temp: %d\n", ret);
+ fxas21002c_pm_put(data);
goto data_unlock;
}
@@ -432,6 +433,7 @@ static int fxas21002c_axis_get(struct fxas21002c_data *data,
&axis_be, sizeof(axis_be));
if (ret < 0) {
dev_err(dev, "failed to read axis: %d: %d\n", index, ret);
+ fxas21002c_pm_put(data);
goto data_unlock;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d5e15a8c870d..64e4be1cbec7 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3248,6 +3248,11 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
goto err_free_attr;
}
+ if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
+ err = -EINVAL;
+ goto err_uobj;
+ }
+
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
err = -EINVAL;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index b496f30ce066..364f69cd620f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1423,7 +1423,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
struct i40e_qv_info *iw_qvinfo;
u32 ceq_idx;
u32 i;
- u32 size;
+ size_t size;
if (!ldev->msix_count) {
i40iw_pr_err("No MSI-X vectors\n");
@@ -1433,8 +1433,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
iwdev->msix_count = ldev->msix_count;
size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
- size += sizeof(struct i40e_qvlist_info);
- size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
+ size += struct_size(iw_qvlist, qv_info, iwdev->msix_count);
iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
if (!iwdev->iw_msixtbl)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 22898d97ecbd..230a6ae0ab5a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
- if (!mlx4_is_slave(dev->dev))
- err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
-
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
resp.response_length += sizeof(resp.hca_core_clock_offset);
- if (!err && !mlx4_is_slave(dev->dev)) {
+ if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
}
@@ -1702,9 +1699,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
- if (!rdma_is_port_valid(qp->device, flow_attr->port))
- return ERR_PTR(-EINVAL);
-
if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index eb92cefffd77..9ce01f729673 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -849,15 +849,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
ib_umem_release(cq->buf.umem);
}
-static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
- struct mlx5_ib_cq_buf *buf)
+static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
for (i = 0; i < buf->nent; i++) {
- cqe = get_cqe(cq, i);
+ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
@@ -883,7 +882,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto err_db;
- init_cq_frag_buf(cq, &cq->buf);
+ init_cq_frag_buf(&cq->buf);
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
@@ -1184,7 +1183,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto ex;
- init_cq_frag_buf(cq, cq->resize_buf);
+ init_cq_frag_buf(cq->resize_buf);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 61475b571531..7af4df7a6823 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -41,6 +41,7 @@ struct mlx5_ib_user_db_page {
struct ib_umem *umem;
unsigned long user_virt;
int refcnt;
+ struct mm_struct *mm;
};
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
@@ -53,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
mutex_lock(&context->db_page_mutex);
list_for_each_entry(page, &context->db_page_list, list)
- if (page->user_virt == (virt & PAGE_MASK))
+ if ((current->mm == page->mm) &&
+ (page->user_virt == (virt & PAGE_MASK)))
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -71,6 +73,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
kfree(page);
goto out;
}
+ mmgrab(current->mm);
+ page->mm = current->mm;
list_add(&page->list, &context->db_page_list);
@@ -91,6 +95,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
if (!--db->u.user_page->refcnt) {
list_del(&db->u.user_page->list);
+ mmdrop(db->u.user_page->mm);
ib_umem_release(db->u.user_page->umem);
kfree(db->u.user_page);
}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 2fc6a60c4e77..5fbc0a8454b9 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
goto free_ucmd;
}
- if (flow_attr->port > dev->num_ports ||
- (flow_attr->flags &
- ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
+ if (flow_attr->flags &
+ ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
err = -EINVAL;
goto free_ucmd;
}
@@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
if (err)
goto end;
+ if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
+ err = -EINVAL;
+ goto end;
+ }
+
uobj->object = obj;
obj->mdev = dev->mdev;
atomic_set(&obj->usecnt, 0);
@@ -2280,6 +2285,7 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx(
u8 ft_type, u8 dv_prt,
void *in, size_t len)
{
+ struct mlx5_pkt_reformat_params reformat_params;
enum mlx5_flow_namespace_type namespace;
u8 prm_prt;
int ret;
@@ -2292,9 +2298,13 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx(
if (ret)
return ret;
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = prm_prt;
+ reformat_params.size = len;
+ reformat_params.data = in;
maction->flow_action_raw.pkt_reformat =
- mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
- in, namespace);
+ mlx5_packet_reformat_alloc(dev->mdev, &reformat_params,
+ namespace);
if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
return ret;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4388afeff251..425423dfac72 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -743,10 +743,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
- if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
+ if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
mlx5_ib_can_load_pas_with_umr(dev, 0))
- ent->limit = dev->mdev->profile->mr_cache[i].limit;
+ ent->limit = dev->mdev->profile.mr_cache[i].limit;
else
ent->limit = 0;
spin_lock_irq(&ent->lock);
@@ -1940,8 +1940,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
mlx5r_deref_wait_odp_mkey(&mr->mmkey);
if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
- xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
- NULL, GFP_KERNEL);
+ xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, NULL, GFP_KERNEL);
if (mr->mtt_mr) {
rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 782b2af8f211..1338c11fd121 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1559,12 +1559,16 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
}
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
- param = (struct mlx5_eq_param){
- .irq_index = 0,
+ param = (struct mlx5_eq_param) {
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
+ if (!zalloc_cpumask_var(&param.affinity, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
eq->core = mlx5_eq_create_generic(dev->mdev, &param);
+ free_cpumask_var(param.affinity);
if (IS_ERR(eq->core)) {
err = PTR_ERR(eq->core);
goto err_wq;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index d5a90a66b45c..5b05cf3837da 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -163,6 +163,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.kind = "ipoib",
+ .netns_refund = true,
.maxtype = IFLA_IPOIB_MAX,
.policy = ipoib_policy,
.priv_size = sizeof(struct ipoib_dev_priv),
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 32d15809ae58..40a070a2e7f5 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -67,9 +67,6 @@ config KEYBOARD_AMIGA
To compile this driver as a module, choose M here: the
module will be called amikbd.
-config ATARI_KBD_CORE
- bool
-
config KEYBOARD_APPLESPI
tristate "Apple SPI keyboard and trackpad"
depends on ACPI && EFI
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index c682b028f0a2..4f53d3c57e69 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -178,51 +178,6 @@ static const unsigned long goodix_irq_flags[] = {
IRQ_TYPE_LEVEL_HIGH,
};
-/*
- * Those tablets have their coordinates origin at the bottom right
- * of the tablet, as if rotated 180 degrees
- */
-static const struct dmi_system_id rotated_screen[] = {
-#if defined(CONFIG_DMI) && defined(CONFIG_X86)
- {
- .ident = "Teclast X89",
- .matches = {
- /* tPAD is too generic, also match on bios date */
- DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
- DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
- DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
- },
- },
- {
- .ident = "Teclast X98 Pro",
- .matches = {
- /*
- * Only match BIOS date, because the manufacturers
- * BIOS does not report the board name at all
- * (sometimes)...
- */
- DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
- DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
- },
- },
- {
- .ident = "WinBook TW100",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
- }
- },
- {
- .ident = "WinBook TW700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
- },
- },
-#endif
- {}
-};
-
static const struct dmi_system_id nine_bytes_report[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
@@ -1123,13 +1078,6 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
ABS_MT_POSITION_Y, ts->prop.max_y);
}
- if (dmi_check_system(rotated_screen)) {
- ts->prop.invert_x = true;
- ts->prop.invert_y = true;
- dev_dbg(&ts->client->dev,
- "Applying '180 degrees rotated screen' quirk\n");
- }
-
if (dmi_check_system(nine_bytes_report)) {
ts->contact_size = 9;
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index d1591a28b743..8f385f9c2dd3 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <asm/div64.h>
@@ -205,6 +205,7 @@ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
}
mutex_unlock(&bcm_voter_lock);
+ of_node_put(node);
return voter;
}
EXPORT_SYMBOL_GPL(of_bcm_voter_get);
@@ -362,6 +363,7 @@ static const struct of_device_id bcm_voter_of_match[] = {
{ .compatible = "qcom,bcm-voter" },
{ }
};
+MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
static struct platform_driver qcom_icc_bcm_voter_driver = {
.probe = qcom_icc_bcm_voter_probe,
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 80e8e1916dd1..3ac42bbdefc6 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -884,7 +884,7 @@ static inline u64 build_inv_address(u64 address, size_t size)
* The msb-bit must be clear on the address. Just set all the
* lower bits.
*/
- address |= 1ull << (msb_diff - 1);
+ address |= (1ull << msb_diff) - 1;
}
/* Clear bits 11:0 */
@@ -1714,6 +1714,8 @@ static void amd_iommu_probe_finalize(struct device *dev)
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_DMA)
iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
+ else
+ set_dma_ops(dev, NULL);
}
static void amd_iommu_release_device(struct device *dev)
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 1757ac1e1623..84057cb9596c 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1142,7 +1142,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
if (err)
- goto err_unmap;
+ goto err_sysfs;
}
drhd->iommu = iommu;
@@ -1150,6 +1150,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
+err_sysfs:
+ iommu_device_sysfs_remove(&iommu->iommu);
err_unmap:
unmap_iommu(iommu);
error_free_seq_id:
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 708f430af1c4..be35284a2016 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2525,9 +2525,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
struct device *dev,
u32 pasid)
{
- int flags = PASID_FLAG_SUPERVISOR_MODE;
struct dma_pte *pgd = domain->pgd;
int agaw, level;
+ int flags = 0;
/*
* Skip top levels of page tables for iommu which has
@@ -2543,7 +2543,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
if (level != 4 && level != 5)
return -EINVAL;
- flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+ if (pasid != PASID_RID2PASID)
+ flags |= PASID_FLAG_SUPERVISOR_MODE;
+ if (level == 5)
+ flags |= PASID_FLAG_FL5LP;
if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
flags |= PASID_FLAG_PAGE_SNOOP;
@@ -4606,6 +4609,8 @@ static int auxiliary_link_device(struct dmar_domain *domain,
if (!sinfo) {
sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+ if (!sinfo)
+ return -ENOMEM;
sinfo->domain = domain;
sinfo->pdev = dev;
list_add(&sinfo->link_phys, &info->subdevices);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 72646bafc52f..72dc84821dad 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -699,7 +699,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
*/
- pasid_set_sre(pte);
+ if (pasid != PASID_RID2PASID)
+ pasid_set_sre(pte);
pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 7c02481a81b4..c6e5ee4d9cef 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1136,6 +1136,7 @@ static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
{ 0 },
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_iommu_drv = {
.driver.name = KBUILD_MODNAME,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 62543a4eccc0..4d5924e9f766 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -415,7 +415,7 @@ config GOLDFISH_PIC
for Goldfish based virtual platforms.
config QCOM_PDC
- bool "QCOM PDC"
+ tristate "QCOM PDC"
depends on ARCH_QCOM
select IRQ_DOMAIN_HIERARCHY
help
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 0b85d9a3fbff..552aa04ff063 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -66,8 +66,9 @@ static void combiner_handle_cascade_irq(struct irq_desc *desc)
{
struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int cascade_irq, combiner_irq;
+ unsigned int combiner_irq;
unsigned long status;
+ int ret;
chained_irq_enter(chip, desc);
@@ -80,12 +81,9 @@ static void combiner_handle_cascade_irq(struct irq_desc *desc)
goto out;
combiner_irq = chip_data->hwirq_offset + __ffs(status);
- cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
-
- if (unlikely(!cascade_irq))
+ ret = generic_handle_domain_irq(combiner_irq_domain, combiner_irq);
+ if (unlikely(ret))
handle_bad_irq(desc);
- else
- generic_handle_irq(cascade_irq);
out:
chained_irq_exit(chip, desc);
@@ -179,10 +177,8 @@ static void __init combiner_init(void __iomem *combiner_base,
nr_irq = max_nr * IRQ_IN_COMBINER;
combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
- if (!combiner_data) {
- pr_warn("%s: could not allocate combiner data\n", __func__);
+ if (!combiner_data)
return;
- }
combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
&combiner_irq_domain_ops, combiner_data);
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
index 0b0a73739756..886de028a901 100644
--- a/drivers/irqchip/irq-al-fic.c
+++ b/drivers/irqchip/irq-al-fic.c
@@ -111,7 +111,6 @@ static void al_fic_irq_handler(struct irq_desc *desc)
struct irq_chip *irqchip = irq_desc_get_chip(desc);
struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
unsigned long pending;
- unsigned int irq;
u32 hwirq;
chained_irq_enter(irqchip, desc);
@@ -119,10 +118,8 @@ static void al_fic_irq_handler(struct irq_desc *desc)
pending = readl_relaxed(fic->base + AL_FIC_CAUSE);
pending &= ~gc->mask_cache;
- for_each_set_bit(hwirq, &pending, NR_FIC_IRQS) {
- irq = irq_find_mapping(domain, hwirq);
- generic_handle_irq(irq);
- }
+ for_each_set_bit(hwirq, &pending, NR_FIC_IRQS)
+ generic_handle_domain_irq(domain, hwirq);
chained_irq_exit(irqchip, desc);
}
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index c179e27062fd..b8c06bd8659e 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -50,6 +50,7 @@
#include <linux/cpuhotplug.h>
#include <linux/io.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/arm-vgic-info.h>
#include <linux/irqdomain.h>
#include <linux/limits.h>
#include <linux/of_address.h>
@@ -787,6 +788,12 @@ static int aic_init_cpu(unsigned int cpu)
return 0;
}
+static struct gic_kvm_info vgic_info __initdata = {
+ .type = GIC_V3,
+ .no_maint_irq_mask = true,
+ .no_hw_deactivation = true,
+};
+
static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
{
int i;
@@ -843,6 +850,8 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
"irqchip/apple-aic/ipi:starting",
aic_init_cpu, NULL);
+ vgic_set_kvm_info(&vgic_info);
+
pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n",
irqc->nr_hw, AIC_NR_FIQ, AIC_NR_SWIPI);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 32938dfc0e46..7557ab551295 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -582,20 +582,19 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
for (msinr = PCI_MSI_DOORBELL_START;
msinr < PCI_MSI_DOORBELL_END; msinr++) {
- int irq;
+ unsigned int irq;
if (!(msimask & BIT(msinr)))
continue;
- if (is_chained) {
- irq = irq_find_mapping(armada_370_xp_msi_inner_domain,
- msinr - PCI_MSI_DOORBELL_START);
- generic_handle_irq(irq);
- } else {
- irq = msinr - PCI_MSI_DOORBELL_START;
+ irq = msinr - PCI_MSI_DOORBELL_START;
+
+ if (is_chained)
+ generic_handle_domain_irq(armada_370_xp_msi_inner_domain,
+ irq);
+ else
handle_domain_irq(armada_370_xp_msi_inner_domain,
irq, regs);
- }
}
}
#else
@@ -606,7 +605,6 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long irqmap, irqn, irqsrc, cpuid;
- unsigned int cascade_irq;
chained_irq_enter(chip, desc);
@@ -628,8 +626,7 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
continue;
}
- cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
- generic_handle_irq(cascade_irq);
+ generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index 8d591c179f81..a47db16ff960 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -34,14 +34,12 @@ static void aspeed_i2c_ic_irq_handler(struct irq_desc *desc)
struct aspeed_i2c_ic *i2c_ic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long bit, status;
- unsigned int bus_irq;
chained_irq_enter(chip, desc);
status = readl(i2c_ic->base);
- for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS) {
- bus_irq = irq_find_mapping(i2c_ic->irq_domain, bit);
- generic_handle_irq(bus_irq);
- }
+ for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS)
+ generic_handle_domain_irq(i2c_ic->irq_domain, bit);
+
chained_irq_exit(chip, desc);
}
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index c90a3346b985..f3c6855a4cef 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -44,7 +44,6 @@ struct aspeed_scu_ic {
static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
{
- unsigned int irq;
unsigned int sts;
unsigned long bit;
unsigned long enabled;
@@ -74,9 +73,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
max = scu_ic->num_irqs + bit;
for_each_set_bit_from(bit, &status, max) {
- irq = irq_find_mapping(scu_ic->irq_domain,
- bit - scu_ic->irq_shift);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(scu_ic->irq_domain,
+ bit - scu_ic->irq_shift);
regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
index 3d641bb6f3f1..92f001a5ff8d 100644
--- a/drivers/irqchip/irq-ath79-misc.c
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -50,7 +50,7 @@ static void ath79_misc_irq_handler(struct irq_desc *desc)
while (pending) {
int bit = __ffs(pending);
- generic_handle_irq(irq_linear_revmap(domain, bit));
+ generic_handle_domain_irq(domain, bit);
pending &= ~BIT(bit);
}
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index a1e004af23e7..adc1556ed332 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -254,7 +254,7 @@ static void bcm2836_chained_handle_irq(struct irq_desc *desc)
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
- generic_handle_irq(irq_linear_revmap(intc.domain, hwirq));
+ generic_handle_domain_irq(intc.domain, hwirq);
}
IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic",
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 25c9a9c06e41..501facdb4570 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -161,7 +161,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
mbox_val = readl_relaxed(intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
if (mbox_val) {
int hwirq = ffs(mbox_val) - 1;
- generic_handle_irq(irq_find_mapping(ipi_domain, hwirq));
+ generic_handle_domain_irq(ipi_domain, hwirq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 9dc9bf8cdcc4..a035c385ca7a 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -145,10 +145,8 @@ static void bcm7038_l1_irq_handle(struct irq_desc *desc)
~cpu->mask_cache[idx];
raw_spin_unlock_irqrestore(&intc->lock, flags);
- for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
- generic_handle_irq(irq_find_mapping(intc->domain,
- base + hwirq));
- }
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
+ generic_handle_domain_irq(intc->domain, base + hwirq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index ad59656ccc28..f23d7651ea84 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -74,10 +74,8 @@ static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
data->irq_map_mask[idx];
irq_gc_unlock(gc);
- for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
- generic_handle_irq(irq_find_mapping(b->domain,
- base + hwirq));
- }
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
+ generic_handle_domain_irq(b->domain, base + hwirq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index cdd6a42d4efa..8e0911561f2d 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -110,7 +110,7 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
do {
irq = ffs(status) - 1;
status &= ~(1 << irq);
- generic_handle_irq(irq_linear_revmap(b->domain, irq));
+ generic_handle_domain_irq(b->domain, irq);
} while (status);
out:
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index 54b09d6c407c..a67266e44491 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -62,9 +62,8 @@ static void dw_apb_ictl_handle_irq_cascaded(struct irq_desc *desc)
while (stat) {
u32 hwirq = ffs(stat) - 1;
- u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
+ generic_handle_domain_irq(d, gc->irq_base + hwirq);
- generic_handle_irq(virq);
stat &= ~BIT(hwirq);
}
}
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index f47b41dfd023..a610821c8ff2 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -12,19 +12,6 @@
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
-static const struct gic_kvm_info *gic_kvm_info;
-
-const struct gic_kvm_info *gic_get_kvm_info(void)
-{
- return gic_kvm_info;
-}
-
-void gic_set_kvm_info(const struct gic_kvm_info *info)
-{
- BUG_ON(gic_kvm_info != NULL);
- gic_kvm_info = info;
-}
-
void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data)
{
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index ccba8b0fe0f5..27e3d4ed4f32 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -28,6 +28,4 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data);
-void gic_set_kvm_info(const struct gic_kvm_info *info);
-
#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
index 1337ceceb59b..b60e1853593f 100644
--- a/drivers/irqchip/irq-gic-pm.c
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -30,10 +30,8 @@ static int gic_runtime_resume(struct device *dev)
int ret;
ret = clk_bulk_prepare_enable(data->num_clocks, chip_pm->clks);
- if (ret) {
- dev_err(dev, "clk_enable failed: %d\n", ret);
+ if (ret)
return ret;
- }
/*
* On the very first resume, the pointer to chip_pm->chip_data
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 4116b48e60af..be9ea6fd6f8b 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -323,10 +323,8 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
struct v2m_data *v2m;
v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
- if (!v2m) {
- pr_err("Failed to allocate struct v2m_data.\n");
+ if (!v2m)
return -ENOMEM;
- }
INIT_LIST_HEAD(&v2m->entry);
v2m->fwnode = fwnode;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 2e6923c2c8a8..ba39668c3e08 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4895,10 +4895,8 @@ static int its_init_vpe_domain(void)
entries = roundup_pow_of_two(nr_cpu_ids);
vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
GFP_KERNEL);
- if (!vpe_proxy.vpes) {
- pr_err("ITS: Can't allocate GICv4 proxy device array\n");
+ if (!vpe_proxy.vpes)
return -ENOMEM;
- }
/* Use the last possible DevID */
devid = GENMASK(device_ids(its) - 1, 0);
@@ -5314,10 +5312,8 @@ static void __init acpi_table_parse_srat_its(void)
its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
GFP_KERNEL);
- if (!its_srat_maps) {
- pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
+ if (!its_srat_maps)
return;
- }
acpi_table_parse_entries(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 37a23aa6de37..e0f4debe64e1 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -103,7 +103,7 @@ EXPORT_SYMBOL(gic_nonsecure_priorities);
/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
static refcount_t *ppi_nmi_refs;
-static struct gic_kvm_info gic_v3_kvm_info;
+static struct gic_kvm_info gic_v3_kvm_info __initdata;
static DEFINE_PER_CPU(bool, has_rss);
#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
@@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
nmi_exit();
}
+static u32 do_read_iar(struct pt_regs *regs)
+{
+ u32 iar;
+
+ if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
+ u64 pmr;
+
+ /*
+ * We were in a context with IRQs disabled. However, the
+ * entry code has set PMR to a value that allows any
+ * interrupt to be acknowledged, and not just NMIs. This can
+ * lead to surprising effects if the NMI has been retired in
+ * the meantime, and that there is an IRQ pending. The IRQ
+ * would then be taken in NMI context, something that nobody
+ * wants to debug twice.
+ *
+ * Until we sort this, drop PMR again to a level that will
+ * actually only allow NMIs before reading IAR, and then
+ * restore it to what it was.
+ */
+ pmr = gic_read_pmr();
+ gic_pmr_mask_irqs();
+ isb();
+
+ iar = gic_read_iar();
+
+ gic_write_pmr(pmr);
+ } else {
+ iar = gic_read_iar();
+ }
+
+ return iar;
+}
+
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
- irqnr = gic_read_iar();
+ irqnr = do_read_iar(regs);
/* Check for special IDs first */
if ((irqnr >= 1020 && irqnr <= 1023))
@@ -1852,7 +1886,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
- gic_set_kvm_info(&gic_v3_kvm_info);
+ vgic_set_kvm_info(&gic_v3_kvm_info);
}
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
@@ -2168,7 +2202,7 @@ static void __init gic_acpi_setup_kvm_info(void)
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
- gic_set_kvm_info(&gic_v3_kvm_info);
+ vgic_set_kvm_info(&gic_v3_kvm_info);
}
static int __init
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index b1d9c22caf2e..d329ec3d64d8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -119,7 +119,7 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
-static struct gic_kvm_info gic_v2_kvm_info;
+static struct gic_kvm_info gic_v2_kvm_info __initdata;
static DEFINE_PER_CPU(u32, sgi_intid);
@@ -375,8 +375,9 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
{
struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int cascade_irq, gic_irq;
+ unsigned int gic_irq;
unsigned long status;
+ int ret;
chained_irq_enter(chip, desc);
@@ -386,14 +387,10 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
if (gic_irq == GICC_INT_SPURIOUS)
goto out;
- cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
- if (unlikely(gic_irq < 32 || gic_irq > 1020)) {
+ isb();
+ ret = generic_handle_domain_irq(chip_data->domain, gic_irq);
+ if (unlikely(ret))
handle_bad_irq(desc);
- } else {
- isb();
- generic_handle_irq(cascade_irq);
- }
-
out:
chained_irq_exit(chip, desc);
}
@@ -1451,7 +1448,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
return;
if (static_branch_likely(&supports_deactivate_key))
- gic_set_kvm_info(&gic_v2_kvm_info);
+ vgic_set_kvm_info(&gic_v2_kvm_info);
}
int __init
@@ -1618,7 +1615,7 @@ static void __init gic_acpi_setup_kvm_info(void)
gic_v2_kvm_info.maint_irq = irq;
- gic_set_kvm_info(&gic_v2_kvm_info);
+ vgic_set_kvm_info(&gic_v2_kvm_info);
}
static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
index 4f021530e7f3..513f6edbbe95 100644
--- a/drivers/irqchip/irq-goldfish-pic.c
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -34,15 +34,14 @@ static void goldfish_pic_cascade(struct irq_desc *desc)
{
struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
struct irq_chip *host_chip = irq_desc_get_chip(desc);
- u32 pending, hwirq, virq;
+ u32 pending, hwirq;
chained_irq_enter(host_chip, desc);
pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
while (pending) {
hwirq = __fls(pending);
- virq = irq_linear_revmap(gfpic->irq_domain, hwirq);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(gfpic->irq_domain, hwirq);
pending &= ~(1 << hwirq);
}
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index b6f6aa7b2862..b70ce0d3c092 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -333,13 +333,11 @@ static void i8259_irq_dispatch(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
int hwirq = i8259_poll();
- unsigned int irq;
if (hwirq < 0)
return;
- irq = irq_linear_revmap(domain, hwirq);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(domain, hwirq);
}
int __init i8259_of_init(struct device_node *node, struct device_node *parent)
diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c
index f0996820077a..0732a0e9af62 100644
--- a/drivers/irqchip/irq-idt3243x.c
+++ b/drivers/irqchip/irq-idt3243x.c
@@ -28,7 +28,7 @@ static void idt_irq_dispatch(struct irq_desc *desc)
{
struct idt_pic_data *idtpic = irq_desc_get_handler_data(desc);
struct irq_chip *host_chip = irq_desc_get_chip(desc);
- u32 pending, hwirq, virq;
+ u32 pending, hwirq;
chained_irq_enter(host_chip, desc);
@@ -36,9 +36,7 @@ static void idt_irq_dispatch(struct irq_desc *desc)
pending &= ~idtpic->gc->mask_cache;
while (pending) {
hwirq = __fls(pending);
- virq = irq_linear_revmap(idtpic->irq_domain, hwirq);
- if (virq)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(idtpic->irq_domain, hwirq);
pending &= ~(1 << hwirq);
}
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 698d07f48fed..5831be454673 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -223,7 +223,7 @@ static void pdc_intc_perip_isr(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct pdc_intc_priv *priv;
- unsigned int i, irq_no;
+ unsigned int i;
priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
@@ -237,14 +237,13 @@ static void pdc_intc_perip_isr(struct irq_desc *desc)
found:
/* pass on the interrupt */
- irq_no = irq_linear_revmap(priv->domain, i);
- generic_handle_irq(irq_no);
+ generic_handle_domain_irq(priv->domain, i);
}
static void pdc_intc_syswake_isr(struct irq_desc *desc)
{
struct pdc_intc_priv *priv;
- unsigned int syswake, irq_no;
+ unsigned int syswake;
unsigned int status;
priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
@@ -258,9 +257,7 @@ static void pdc_intc_syswake_isr(struct irq_desc *desc)
if (!(status & 1))
continue;
- irq_no = irq_linear_revmap(priv->domain,
- syswake_to_hwirq(syswake));
- generic_handle_irq(irq_no);
+ generic_handle_domain_irq(priv->domain, syswake_to_hwirq(syswake));
}
}
@@ -316,10 +313,8 @@ static int pdc_intc_probe(struct platform_device *pdev)
/* Allocate driver data */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "cannot allocate device data\n");
+ if (!priv)
return -ENOMEM;
- }
raw_spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
@@ -356,10 +351,8 @@ static int pdc_intc_probe(struct platform_device *pdev)
/* Get peripheral IRQ numbers */
priv->perip_irqs = devm_kcalloc(&pdev->dev, 4, priv->nr_perips,
GFP_KERNEL);
- if (!priv->perip_irqs) {
- dev_err(&pdev->dev, "cannot allocate perip IRQ list\n");
+ if (!priv->perip_irqs)
return -ENOMEM;
- }
for (i = 0; i < priv->nr_perips; ++i) {
irq = platform_get_irq(pdev, 1 + i);
if (irq < 0)
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 7031ef44de4f..5b5a365dbd5e 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -228,10 +228,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
}
cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
- if (!cd) {
- pr_err("%pOF: kzalloc failed!\n", node);
+ if (!cd)
return -ENOMEM;
- }
raw_spin_lock_init(&cd->rlock);
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index 7709f9712cb3..e86ff743e98c 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -182,18 +182,15 @@ static void imx_intmux_irq_handler(struct irq_desc *desc)
struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
irqchip_data[idx]);
unsigned long irqstat;
- int pos, virq;
+ int pos;
chained_irq_enter(irq_desc_get_chip(desc), desc);
/* read the interrupt source pending status of this channel */
irqstat = readl_relaxed(data->regs + CHANIPR(idx));
- for_each_set_bit(pos, &irqstat, 32) {
- virq = irq_find_mapping(irqchip_data->domain, pos);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(pos, &irqstat, 32)
+ generic_handle_domain_irq(irqchip_data->domain, pos);
chained_irq_exit(irq_desc_get_chip(desc), desc);
}
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 1edf7692a790..8d91a02593fc 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -122,7 +122,7 @@ static void imx_irqsteer_irq_handler(struct irq_desc *desc)
for (i = 0; i < 2; i++, hwirq += 32) {
int idx = imx_irqsteer_get_reg_index(data, hwirq);
unsigned long irqmap;
- int pos, virq;
+ int pos;
if (hwirq >= data->reg_num * 32)
break;
@@ -130,11 +130,8 @@ static void imx_irqsteer_irq_handler(struct irq_desc *desc)
irqmap = readl_relaxed(data->regs +
CHANSTATUS(idx, data->reg_num));
- for_each_set_bit(pos, &irqmap, 32) {
- virq = irq_find_mapping(data->domain, pos + hwirq);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(pos, &irqmap, 32)
+ generic_handle_domain_irq(data->domain, pos + hwirq);
}
chained_irq_exit(irq_desc_get_chip(desc), desc);
diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
index b938d1d04d96..34a7d261b710 100644
--- a/drivers/irqchip/irq-ingenic-tcu.c
+++ b/drivers/irqchip/irq-ingenic-tcu.c
@@ -38,7 +38,7 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc)
irq_reg &= ~irq_mask;
for_each_set_bit(i, (unsigned long *)&irq_reg, 32)
- generic_handle_irq(irq_linear_revmap(domain, i));
+ generic_handle_domain_irq(domain, i);
chained_irq_exit(irq_chip, desc);
}
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index ea36bb00be80..cee839ca627e 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -49,8 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data)
while (pending) {
int bit = __fls(pending);
- irq = irq_linear_revmap(domain, bit + (i * 32));
- generic_handle_irq(irq);
+ generic_handle_domain_irq(domain, bit + (i * 32));
pending &= ~BIT(bit);
}
}
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 8118ebe80b09..d47c8041e5bc 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -89,7 +89,7 @@ static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
struct keystone_irq_device *kirq = keystone_irq;
unsigned long wa_lock_flags;
unsigned long pending;
- int src, virq;
+ int src, err;
dev_dbg(kirq->dev, "start irq %d\n", irq);
@@ -104,16 +104,14 @@ static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
for (src = 0; src < KEYSTONE_N_IRQ; src++) {
if (BIT(src) & pending) {
- virq = irq_find_mapping(kirq->irqd, src);
- dev_dbg(kirq->dev, "dispatch bit %d, virq %d\n",
- src, virq);
- if (!virq)
- dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
- src, virq);
raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
- generic_handle_irq(virq);
+ err = generic_handle_domain_irq(kirq->irqd, src);
raw_spin_unlock_irqrestore(&kirq->wa_lock,
wa_lock_flags);
+
+ if (err)
+ dev_warn_ratelimited(kirq->dev, "spurious irq detected hwirq %d\n",
+ src);
}
}
diff --git a/drivers/irqchip/irq-loongson-htpic.c b/drivers/irqchip/irq-loongson-htpic.c
index 1b801c4fb026..f4abdf156de7 100644
--- a/drivers/irqchip/irq-loongson-htpic.c
+++ b/drivers/irqchip/irq-loongson-htpic.c
@@ -48,7 +48,7 @@ static void htpic_irq_dispatch(struct irq_desc *desc)
break;
}
- generic_handle_irq(irq_linear_revmap(priv->domain, bit));
+ generic_handle_domain_irq(priv->domain, bit);
pending &= ~BIT(bit);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index 6392aafb9a63..60a335d7e64e 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -47,8 +47,8 @@ static void htvec_irq_dispatch(struct irq_desc *desc)
while (pending) {
int bit = __ffs(pending);
- generic_handle_irq(irq_linear_revmap(priv->htvec_domain, bit +
- VEC_COUNT_PER_REG * i));
+ generic_handle_domain_irq(priv->htvec_domain,
+ bit + VEC_COUNT_PER_REG * i);
pending &= ~BIT(bit);
handled = true;
}
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 8ccb30421806..649c58391618 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -73,7 +73,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc)
while (pending) {
int bit = __ffs(pending);
- generic_handle_irq(irq_find_mapping(gc->domain, bit));
+ generic_handle_domain_irq(gc->domain, bit);
pending &= ~BIT(bit);
}
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 7d9b388afe64..5e6f6e25f2ae 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -141,7 +141,7 @@ static void lpc32xx_sic_handler(struct irq_desc *desc)
while (hwirq) {
irq = __ffs(hwirq);
hwirq &= ~BIT(irq);
- generic_handle_irq(irq_find_mapping(ic->domain, irq));
+ generic_handle_domain_irq(ic->domain, irq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 61dbfda08527..55322da51c56 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -194,7 +194,7 @@ static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
struct ls_scfg_msi *msi_data = msir->msi_data;
unsigned long val;
- int pos, size, virq, hwirq;
+ int pos, size, hwirq;
chained_irq_enter(irq_desc_get_chip(desc), desc);
@@ -206,9 +206,7 @@ static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
for_each_set_bit_from(pos, &val, size) {
hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
msir->srs;
- virq = irq_find_mapping(msi_data->parent, hwirq);
- if (virq)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi_data->parent, hwirq);
}
chained_irq_exit(irq_desc_get_chip(desc), desc);
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
index 353111a10413..77a3f7dfaaf0 100644
--- a/drivers/irqchip/irq-ls1x.c
+++ b/drivers/irqchip/irq-ls1x.c
@@ -50,7 +50,7 @@ static void ls1x_chained_handle_irq(struct irq_desc *desc)
while (pending) {
int bit = __ffs(pending);
- generic_handle_irq(irq_find_mapping(priv->domain, bit));
+ generic_handle_domain_irq(priv->domain, bit);
pending &= ~BIT(bit);
}
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 2cb45c6b8501..f565317a3da3 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -273,6 +273,12 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
}
#ifdef CONFIG_ACPI
+static const struct acpi_device_id mbigen_acpi_match[] = {
+ { "HISI0152", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match);
+
static int mbigen_acpi_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
@@ -369,12 +375,6 @@ static const struct of_device_id mbigen_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mbigen_of_match);
-static const struct acpi_device_id mbigen_acpi_match[] = {
- { "HISI0152", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match);
-
static struct platform_driver mbigen_platform_driver = {
.driver = {
.name = "Hisilicon MBIGEN-V2",
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 215885962bb0..b146e069bf5b 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/percpu.h>
#include <linux/sched.h>
@@ -147,7 +148,7 @@ int gic_get_c0_fdc_int(void)
static void gic_handle_shared_int(bool chained)
{
- unsigned int intr, virq;
+ unsigned int intr;
unsigned long *pcpu_mask;
DECLARE_BITMAP(pending, GIC_MAX_INTRS);
@@ -164,12 +165,12 @@ static void gic_handle_shared_int(bool chained)
bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
for_each_set_bit(intr, pending, gic_shared_intrs) {
- virq = irq_linear_revmap(gic_irq_domain,
- GIC_SHARED_TO_HWIRQ(intr));
if (chained)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(gic_irq_domain,
+ GIC_SHARED_TO_HWIRQ(intr));
else
- do_IRQ(virq);
+ do_IRQ(irq_find_mapping(gic_irq_domain,
+ GIC_SHARED_TO_HWIRQ(intr)));
}
}
@@ -307,7 +308,7 @@ static struct irq_chip gic_edge_irq_controller = {
static void gic_handle_local_int(bool chained)
{
unsigned long pending, masked;
- unsigned int intr, virq;
+ unsigned int intr;
pending = read_gic_vl_pend();
masked = read_gic_vl_mask();
@@ -315,12 +316,12 @@ static void gic_handle_local_int(bool chained)
bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
- virq = irq_linear_revmap(gic_irq_domain,
- GIC_LOCAL_TO_HWIRQ(intr));
if (chained)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(intr));
else
- do_IRQ(virq);
+ do_IRQ(irq_find_mapping(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(intr)));
}
}
diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c
index 8235d98650c1..4d0c3532dbe7 100644
--- a/drivers/irqchip/irq-mscc-ocelot.c
+++ b/drivers/irqchip/irq-mscc-ocelot.c
@@ -107,7 +107,7 @@ static void ocelot_irq_handler(struct irq_desc *desc)
while (reg) {
u32 hwirq = __fls(reg);
- generic_handle_irq(irq_find_mapping(d, hwirq));
+ generic_handle_domain_irq(d, hwirq);
reg &= ~(BIT(hwirq));
}
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index eec63951129a..dc1cee4b0fe1 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -91,15 +91,12 @@ static void mvebu_pic_handle_cascade_irq(struct irq_desc *desc)
struct mvebu_pic *pic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long irqmap, irqn;
- unsigned int cascade_irq;
irqmap = readl_relaxed(pic->base + PIC_CAUSE);
chained_irq_enter(chip, desc);
- for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
- cascade_irq = irq_find_mapping(pic->domain, irqn);
- generic_handle_irq(cascade_irq);
- }
+ for_each_set_bit(irqn, &irqmap, BITS_PER_LONG)
+ generic_handle_domain_irq(pic->domain, irqn);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 3a7b7a7f20ca..4ecef6d83777 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -337,17 +337,12 @@ static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
unsigned long hwirq;
- unsigned int virq;
+ int err;
hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
- virq = irq_find_mapping(sei->sei_domain, hwirq);
- if (likely(virq)) {
- generic_handle_irq(virq);
- continue;
- }
-
- dev_warn(sei->dev,
- "Spurious IRQ detected (hwirq %lu)\n", hwirq);
+ err = generic_handle_domain_irq(sei->sei_domain, hwirq);
+ if (unlikely(err))
+ dev_warn(sei->dev, "Spurious IRQ detected (hwirq %lu)\n", hwirq);
}
}
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index f747e2209ea9..b31c4cff4d3a 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -40,9 +40,7 @@ static struct irq_domain *nvic_irq_domain;
asmlinkage void __exception_irq_entry
nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
{
- unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq);
-
- handle_IRQ(irq, regs);
+ handle_domain_irq(nvic_irq_domain, hwirq, regs);
}
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index c4b5ffb61954..b6868f7b805a 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -117,7 +117,7 @@ static void orion_bridge_irq_handler(struct irq_desc *desc)
while (stat) {
u32 hwirq = __fls(stat);
- generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
+ generic_handle_domain_irq(d, gc->irq_base + hwirq);
stat &= ~(1 << hwirq);
}
}
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
index 0c4c8ed7064e..89c23a1566dc 100644
--- a/drivers/irqchip/irq-partition-percpu.c
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -124,13 +124,10 @@ static void partition_handle_irq(struct irq_desc *desc)
break;
}
- if (unlikely(hwirq == part->nr_parts)) {
+ if (unlikely(hwirq == part->nr_parts))
handle_bad_irq(desc);
- } else {
- unsigned int irq;
- irq = irq_find_mapping(part->domain, hwirq);
- generic_handle_irq(irq);
- }
+ else
+ generic_handle_domain_irq(part->domain, hwirq);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
index 92fb5780dc10..fa8d89b02ec0 100644
--- a/drivers/irqchip/irq-pruss-intc.c
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -488,8 +488,7 @@ static void pruss_intc_irq_handler(struct irq_desc *desc)
while (true) {
u32 hipir;
- unsigned int virq;
- int hwirq;
+ int hwirq, err;
/* get highest priority pending PRUSS system event */
hipir = pruss_intc_read_reg(intc, PRU_INTC_HIPIR(host_irq));
@@ -497,16 +496,14 @@ static void pruss_intc_irq_handler(struct irq_desc *desc)
break;
hwirq = hipir & GENMASK(9, 0);
- virq = irq_find_mapping(intc->domain, hwirq);
+ err = generic_handle_domain_irq(intc->domain, hwirq);
/*
* NOTE: manually ACK any system events that do not have a
* handler mapped yet
*/
- if (WARN_ON_ONCE(!virq))
+ if (WARN_ON_ONCE(err))
pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
- else
- generic_handle_irq(virq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index b57c67dfab5b..fd9f275592d2 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -85,7 +85,7 @@ static void realtek_irq_dispatch(struct irq_desc *desc)
goto out;
}
domain = irq_desc_get_handler_data(desc);
- generic_handle_irq(irq_find_mapping(domain, __ffs(pending)));
+ generic_handle_domain_irq(domain, __ffs(pending));
out:
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 11abc09ef76c..07a6d8b42b63 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -115,7 +115,7 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
if (ioread32(p->iomem + DETECT_STATUS) & bit) {
iowrite32(bit, p->iomem + DETECT_STATUS);
irqc_dbg(i, "demux2");
- generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
+ generic_handle_domain_irq(p->irq_domain, i->hw_irq);
return IRQ_HANDLED;
}
return IRQ_NONE;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 97d4d04b0a80..cf74cfa82045 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -233,13 +233,11 @@ static void plic_handle_irq(struct irq_desc *desc)
chained_irq_enter(chip, desc);
while ((hwirq = readl(claim))) {
- int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
-
- if (unlikely(irq <= 0))
+ int err = generic_handle_domain_irq(handler->priv->irqdomain,
+ hwirq);
+ if (unlikely(err))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
hwirq);
- else
- generic_handle_irq(irq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 4704f2ee5797..33c76710f845 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -257,7 +257,7 @@ static void stm32_irq_handler(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int virq, nbanks = domain->gc->num_chips;
+ unsigned int nbanks = domain->gc->num_chips;
struct irq_chip_generic *gc;
unsigned long pending;
int n, i, irq_base = 0;
@@ -268,11 +268,9 @@ static void stm32_irq_handler(struct irq_desc *desc)
gc = irq_get_domain_generic_chip(domain, irq_base);
while ((pending = stm32_exti_pending(gc))) {
- for_each_set_bit(n, &pending, IRQS_PER_BANK) {
- virq = irq_find_mapping(domain, irq_base + n);
- generic_handle_irq(virq);
- }
- }
+ for_each_set_bit(n, &pending, IRQS_PER_BANK)
+ generic_handle_domain_irq(domain, irq_base + n);
+ }
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 9ea94456b178..8a315d6a3399 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -147,10 +147,8 @@ static int __init sun4i_ic_of_init(struct device_node *node,
struct device_node *parent)
{
irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL);
- if (!irq_ic_data) {
- pr_err("kzalloc failed!\n");
+ if (!irq_ic_data)
return -ENOMEM;
- }
irq_ic_data->enable_reg_offset = SUN4I_IRQ_ENABLE_REG_OFFSET;
irq_ic_data->mask_reg_offset = SUN4I_IRQ_MASK_REG_OFFSET;
@@ -164,10 +162,8 @@ static int __init suniv_ic_of_init(struct device_node *node,
struct device_node *parent)
{
irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL);
- if (!irq_ic_data) {
- pr_err("kzalloc failed!\n");
+ if (!irq_ic_data)
return -ENOMEM;
- }
irq_ic_data->enable_reg_offset = SUNIV_IRQ_ENABLE_REG_OFFSET;
irq_ic_data->mask_reg_offset = SUNIV_IRQ_MASK_REG_OFFSET;
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 9f2bd0c5d289..21d49791f855 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -88,10 +88,9 @@ static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int virq = irq_find_mapping(domain, 0);
chained_irq_enter(chip, desc);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(domain, 0);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 9a63b02b8176..8a0e69298e83 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -91,7 +91,7 @@ static void tb10x_irq_cascade(struct irq_desc *desc)
struct irq_domain *domain = irq_desc_get_handler_data(desc);
unsigned int irq = irq_desc_get_irq(desc);
- generic_handle_irq(irq_find_mapping(domain, irq));
+ generic_handle_domain_irq(domain, irq);
}
static int __init of_tb10x_init_irq(struct device_node *ictl,
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index ca1f593f4d13..97f454ec376b 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -147,7 +147,7 @@ static void ti_sci_inta_irq_handler(struct irq_desc *desc)
struct ti_sci_inta_vint_desc *vint_desc;
struct ti_sci_inta_irq_domain *inta;
struct irq_domain *domain;
- unsigned int virq, bit;
+ unsigned int bit;
unsigned long val;
vint_desc = irq_desc_get_handler_data(desc);
@@ -159,11 +159,8 @@ static void ti_sci_inta_irq_handler(struct irq_desc *desc)
val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
VINT_STATUS_MASKED_OFFSET);
- for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) {
- virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT)
+ generic_handle_domain_irq(domain, vint_desc->events[bit].hwirq);
chained_irq_exit(irq_desc_get_chip(desc), desc);
}
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 2325fb3c482b..34337a61b1ef 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -79,10 +79,9 @@ static void ts4800_ic_chained_handle_irq(struct irq_desc *desc)
do {
unsigned int bit = __ffs(status);
- int irq = irq_find_mapping(data->domain, bit);
+ generic_handle_domain_irq(data->domain, bit);
status &= ~(1 << bit);
- generic_handle_irq(irq);
} while (status);
out:
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index f1386733d3bc..75be350cf82f 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -85,7 +85,7 @@ static void fpga_irq_handle(struct irq_desc *desc)
unsigned int irq = ffs(status) - 1;
status &= ~(1 << irq);
- generic_handle_irq(irq_find_mapping(f->domain, irq));
+ generic_handle_domain_irq(f->domain, irq);
} while (status);
out:
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 62f3d29f9042..1e1f2d115257 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -225,7 +225,7 @@ static void vic_handle_irq_cascaded(struct irq_desc *desc)
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
hwirq = ffs(stat) - 1;
- generic_handle_irq(irq_find_mapping(vic->domain, hwirq));
+ generic_handle_domain_irq(vic->domain, hwirq);
}
chained_irq_exit(host_chip, desc);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 8cd1bfc73057..356a59755d63 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -110,20 +110,6 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack,
};
-static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
-{
- unsigned int irq = 0;
- u32 hwirq;
-
- hwirq = xintc_read(irqc, IVR);
- if (hwirq != -1U)
- irq = irq_find_mapping(irqc->root_domain, hwirq);
-
- pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
-
- return irq;
-}
-
unsigned int xintc_get_irq(void)
{
unsigned int irq = -1;
@@ -164,15 +150,16 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct xintc_irq_chip *irqc;
- u32 pending;
irqc = irq_data_get_irq_handler_data(&desc->irq_data);
chained_irq_enter(chip, desc);
do {
- pending = xintc_get_irq_local(irqc);
- if (pending == 0)
+ u32 hwirq = xintc_read(irqc, IVR);
+
+ if (hwirq == -1U)
break;
- generic_handle_irq(pending);
+
+ generic_handle_domain_irq(irqc->root_domain, hwirq);
} while (true);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index aa54bfcb0433..18e696dc7f4d 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -53,7 +53,6 @@ static void combiner_handle_irq(struct irq_desc *desc)
chained_irq_enter(chip, desc);
for (reg = 0; reg < combiner->nregs; reg++) {
- int virq;
int hwirq;
u32 bit;
u32 status;
@@ -70,10 +69,7 @@ static void combiner_handle_irq(struct irq_desc *desc)
bit = __ffs(status);
status &= ~(1 << bit);
hwirq = irq_nr(reg, bit);
- virq = irq_find_mapping(combiner->domain, hwirq);
- if (virq > 0)
- generic_handle_irq(virq);
-
+ generic_handle_domain_irq(combiner->domain, hwirq);
}
}
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 5dc63c20b67e..32d59202d408 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -11,9 +11,11 @@
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -459,4 +461,8 @@ fail:
return ret;
}
-IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc)
+IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init)
+IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc)
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 56bd2e9db6ed..e501cb03f211 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2342,7 +2342,7 @@ static void __exit
HFC_cleanup(void)
{
if (timer_pending(&hfc_tl))
- del_timer(&hfc_tl);
+ del_timer_sync(&hfc_tl);
pci_unregister_driver(&hfc_driver);
}
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index ee925b58bbce..2a1ddd47a096 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1100,7 +1100,6 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
card->typ = NETJET_S_TJ300;
card->base = pci_resource_start(pdev, 0);
- card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 40588692cec7..e11ca6bbc7f4 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -17,9 +17,6 @@
#include "dsp.h"
#include "dsp_hwec.h"
-/* uncomment for debugging */
-/*#define PIPELINE_DEBUG*/
-
struct dsp_pipeline_entry {
struct mISDN_dsp_element *elem;
void *p;
@@ -104,10 +101,6 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
}
}
-#ifdef PIPELINE_DEBUG
- printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name);
-#endif
-
return 0;
err2:
@@ -129,10 +122,6 @@ void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem)
list_for_each_entry_safe(entry, n, &dsp_elements, list)
if (entry->elem == elem) {
device_unregister(&entry->dev);
-#ifdef PIPELINE_DEBUG
- printk(KERN_DEBUG "%s: %s unregistered\n",
- __func__, elem->name);
-#endif
return;
}
printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
@@ -145,10 +134,6 @@ int dsp_pipeline_module_init(void)
if (IS_ERR(elements_class))
return PTR_ERR(elements_class);
-#ifdef PIPELINE_DEBUG
- printk(KERN_DEBUG "%s: dsp pipeline module initialized\n", __func__);
-#endif
-
dsp_hwec_init();
return 0;
@@ -168,10 +153,6 @@ void dsp_pipeline_module_exit(void)
__func__, entry->elem->name);
kfree(entry);
}
-
-#ifdef PIPELINE_DEBUG
- printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__);
-#endif
}
int dsp_pipeline_init(struct dsp_pipeline *pipeline)
@@ -181,10 +162,6 @@ int dsp_pipeline_init(struct dsp_pipeline *pipeline)
INIT_LIST_HEAD(&pipeline->list);
-#ifdef PIPELINE_DEBUG
- printk(KERN_DEBUG "%s: dsp pipeline ready\n", __func__);
-#endif
-
return 0;
}
@@ -210,15 +187,11 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
return;
_dsp_pipeline_destroy(pipeline);
-
-#ifdef PIPELINE_DEBUG
- printk(KERN_DEBUG "%s: dsp pipeline destroyed\n", __func__);
-#endif
}
int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
{
- int incomplete = 0, found = 0;
+ int found = 0;
char *dup, *tok, *name, *args;
struct dsp_element_entry *entry, *n;
struct dsp_pipeline_entry *pipeline_entry;
@@ -251,7 +224,6 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
printk(KERN_ERR "%s: failed to add "
"entry to pipeline: %s (out of "
"memory)\n", __func__, elem->name);
- incomplete = 1;
goto _out;
}
pipeline_entry->elem = elem;
@@ -268,20 +240,12 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
if (pipeline_entry->p) {
list_add_tail(&pipeline_entry->
list, &pipeline->list);
-#ifdef PIPELINE_DEBUG
- printk(KERN_DEBUG "%s: created "
- "instance of %s%s%s\n",
- __func__, name, args ?
- " with args " : "", args ?
- args : "");
-#endif
} else {
printk(KERN_ERR "%s: failed "
"to add entry to pipeline: "
"%s (new() returned NULL)\n",
__func__, elem->name);
kfree(pipeline_entry);
- incomplete = 1;
}
}
found = 1;
@@ -290,11 +254,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
if (found)
found = 0;
- else {
+ else
printk(KERN_ERR "%s: element not found, skipping: "
"%s\n", __func__, name);
- incomplete = 1;
- }
}
_out:
@@ -303,10 +265,6 @@ _out:
else
pipeline->inuse = 0;
-#ifdef PIPELINE_DEBUG
- printk(KERN_DEBUG "%s: dsp pipeline built%s: %s\n",
- __func__, incomplete ? " incomplete" : "", cfg);
-#endif
kfree(dup);
return 0;
}
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 40a948c08a0b..cf8a75494833 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -305,7 +305,6 @@ static int __nvm_config_extended(struct nvm_dev *dev,
static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
struct nvm_ioctl_create_extended e;
- struct request_queue *tqueue;
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
struct nvm_target *t;
@@ -370,24 +369,16 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_t;
}
- tdisk = alloc_disk(0);
+ tdisk = blk_alloc_disk(dev->q->node);
if (!tdisk) {
ret = -ENOMEM;
goto err_dev;
}
- tqueue = blk_alloc_queue(dev->q->node);
- if (!tqueue) {
- ret = -ENOMEM;
- goto err_disk;
- }
-
strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
- tdisk->flags = GENHD_FL_EXT_DEVT;
tdisk->major = 0;
tdisk->first_minor = 0;
tdisk->fops = tt->bops;
- tdisk->queue = tqueue;
targetdata = tt->init(tgt_dev, tdisk, create->flags);
if (IS_ERR(targetdata)) {
@@ -396,14 +387,14 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
}
tdisk->private_data = targetdata;
- tqueue->queuedata = targetdata;
+ tdisk->queue->queuedata = targetdata;
mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
if (dev->geo.mdts) {
mdts = min_t(u32, dev->geo.mdts,
(dev->geo.csecs >> 9) * NVM_MAX_VLBA);
}
- blk_queue_max_hw_sectors(tqueue, mdts);
+ blk_queue_max_hw_sectors(tdisk->queue, mdts);
set_capacity(tdisk, tt->capacity(targetdata));
add_disk(tdisk);
@@ -428,10 +419,7 @@ err_sysfs:
if (tt->exit)
tt->exit(targetdata, true);
err_init:
- blk_cleanup_queue(tqueue);
- tdisk->queue = NULL;
-err_disk:
- put_disk(tdisk);
+ blk_cleanup_disk(tdisk);
err_dev:
nvm_remove_tgt_dev(tgt_dev, 0);
err_t:
@@ -445,10 +433,8 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
{
struct nvm_tgt_type *tt = t->type;
struct gendisk *tdisk = t->disk;
- struct request_queue *q = tdisk->queue;
del_gendisk(tdisk);
- blk_cleanup_queue(q);
if (tt->sysfs_exit)
tt->sysfs_exit(tdisk);
@@ -457,7 +443,7 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
tt->exit(tdisk->private_data, graceful);
nvm_remove_tgt_dev(t->dev, 1);
- put_disk(tdisk);
+ blk_cleanup_disk(tdisk);
module_put(t->type->owner);
list_del(&t->list);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 68de2c6af727..b4b780ea2ac8 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -160,6 +160,18 @@ config MAILBOX_TEST
Test client to help with testing new Controller driver
implementations.
+config POLARFIRE_SOC_MAILBOX
+ tristate "PolarFire SoC (MPFS) Mailbox"
+ depends on HAS_IOMEM
+ depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+ help
+ This driver adds support for the PolarFire SoC (MPFS) mailbox controller.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mailbox-mpfs.
+
+ If unsure, say N.
+
config QCOM_APCS_IPC
tristate "Qualcomm APCS IPC driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 7194fa92c787..c2089f04887e 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -41,6 +41,8 @@ obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
+obj-$(CONFIG_POLARFIRE_SOC_MAILBOX) += mailbox-mpfs.o
+
obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index b7fbf276eb62..22243cabe056 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -122,10 +122,8 @@ static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
mhu->base = devm_ioremap_resource(dev, &adev->res);
- if (IS_ERR(mhu->base)) {
- dev_err(dev, "ioremap failed\n");
+ if (IS_ERR(mhu->base))
return PTR_ERR(mhu->base);
- }
for (i = 0; i < MHU_CHANS; i++) {
mhu->chan[i].con_priv = &mhu->mlink[i];
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index b4f33dc399a0..78073ad1f2f1 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1523,7 +1523,6 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
- dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
goto fail;
}
regs_end = mbox->regs + resource_size(iomem);
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 5b375985f7b8..8d3a4c1fe761 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -1577,7 +1577,6 @@ static int pdc_probe(struct platform_device *pdev)
pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
if (IS_ERR(pdcs->pdc_reg_vbase)) {
err = PTR_ERR(pdcs->pdc_reg_vbase);
- dev_err(&pdev->dev, "Failed to map registers: %d\n", err);
goto cleanup_ring_pool;
}
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index 39761d190545..86b7ce3549c5 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -157,7 +157,6 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
- dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
return ret;
}
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index 53f4bc2488c5..395ddc250828 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2017-2018 Hisilicon Limited.
+// Copyright (c) 2017-2018 HiSilicon Limited.
// Copyright (c) 2017-2018 Linaro Limited.
#include <linux/bitops.h>
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index cc236ac7a0b5..560cd09538b1 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -2,7 +2,7 @@
/*
* Hisilicon's Hi6220 mailbox driver
*
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 HiSilicon Limited.
* Copyright (c) 2015 Linaro Limited.
*
* Author: Leo Yan <leo.yan@linaro.org>
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 2543c7b6948b..0ce75c6b36b6 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -15,20 +15,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
-#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
-#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
-#define IMX_MU_xSR_BRDIP BIT(9)
-
-/* General Purpose Interrupt Enable */
-#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
-/* Receive Interrupt Enable */
-#define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x)))
-/* Transmit Interrupt Enable */
-#define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x)))
-/* General Purpose Interrupt Request */
-#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
-
#define IMX_MU_CHANS 16
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
@@ -41,6 +27,21 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RXDB, /* Rx doorbell */
};
+enum imx_mu_xcr {
+ IMX_MU_GIER,
+ IMX_MU_GCR,
+ IMX_MU_TCR,
+ IMX_MU_RCR,
+ IMX_MU_xCR_MAX,
+};
+
+enum imx_mu_xsr {
+ IMX_MU_SR,
+ IMX_MU_GSR,
+ IMX_MU_TSR,
+ IMX_MU_RSR,
+};
+
struct imx_sc_rpc_msg_max {
struct imx_sc_rpc_msg hdr;
u32 data[7];
@@ -67,21 +68,41 @@ struct imx_mu_priv {
struct clk *clk;
int irq;
- u32 xcr;
+ u32 xcr[4];
bool side_b;
};
+enum imx_mu_type {
+ IMX_MU_V1,
+ IMX_MU_V2,
+};
+
struct imx_mu_dcfg {
int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
void (*init)(struct imx_mu_priv *priv);
- u32 xTR[4]; /* Transmit Registers */
- u32 xRR[4]; /* Receive Registers */
- u32 xSR; /* Status Register */
- u32 xCR; /* Control Register */
+ enum imx_mu_type type;
+ u32 xTR; /* Transmit Register0 */
+ u32 xRR; /* Receive Register0 */
+ u32 xSR[4]; /* Status Registers */
+ u32 xCR[4]; /* Control Registers */
};
+#define IMX_MU_xSR_GIPn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+#define IMX_MU_xSR_RFn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xSR_TEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+
+/* General Purpose Interrupt Enable */
+#define IMX_MU_xCR_GIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+/* Receive Interrupt Enable */
+#define IMX_MU_xCR_RIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+/* Transmit Interrupt Enable */
+#define IMX_MU_xCR_TIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+/* General Purpose Interrupt Request */
+#define IMX_MU_xCR_GIRn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+
+
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
{
return container_of(mbox, struct imx_mu_priv, mbox);
@@ -97,16 +118,16 @@ static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
return ioread32(priv->base + offs);
}
-static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
+static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, enum imx_mu_xcr type, u32 set, u32 clr)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&priv->xcr_lock, flags);
- val = imx_mu_read(priv, priv->dcfg->xCR);
+ val = imx_mu_read(priv, priv->dcfg->xCR[type]);
val &= ~clr;
val |= set;
- imx_mu_write(priv, val, priv->dcfg->xCR);
+ imx_mu_write(priv, val, priv->dcfg->xCR[type]);
spin_unlock_irqrestore(&priv->xcr_lock, flags);
return val;
@@ -120,11 +141,11 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
switch (cp->type) {
case IMX_MU_TYPE_TX:
- imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ imx_mu_write(priv, *arg, priv->dcfg->xTR + cp->idx * 4);
+ imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
break;
case IMX_MU_TYPE_TXDB:
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
+ imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
tasklet_schedule(&cp->txdb_tasklet);
break;
default:
@@ -140,7 +161,7 @@ static int imx_mu_generic_rx(struct imx_mu_priv *priv,
{
u32 dat;
- dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
+ dat = imx_mu_read(priv, priv->dcfg->xRR + (cp->idx) * 4);
mbox_chan_received_data(cp->chan, (void *)&dat);
return 0;
@@ -172,20 +193,20 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
}
for (i = 0; i < 4 && i < msg->hdr.size; i++)
- imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % 4) * 4);
for (; i < msg->hdr.size; i++) {
- ret = readl_poll_timeout(priv->base + priv->dcfg->xSR,
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
xsr,
- xsr & IMX_MU_xSR_TEn(i % 4),
+ xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % 4),
0, 100);
if (ret) {
dev_err(priv->dev, "Send data index: %d timeout\n", i);
return ret;
}
- imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % 4) * 4);
}
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
@@ -203,8 +224,8 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
int i, ret;
u32 xsr;
- imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
- *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
+ imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR);
if (msg.hdr.size > sizeof(msg) / 4) {
dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
@@ -212,16 +233,16 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
}
for (i = 1; i < msg.hdr.size; i++) {
- ret = readl_poll_timeout(priv->base + priv->dcfg->xSR, xsr,
- xsr & IMX_MU_xSR_RFn(i % 4), 0, 100);
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
+ xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0, 100);
if (ret) {
dev_err(priv->dev, "timeout read idx %d\n", i);
return ret;
}
- *data++ = imx_mu_read(priv, priv->dcfg->xRR[i % 4]);
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
}
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(0), 0);
+ imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
mbox_chan_received_data(cp->chan, (void *)&msg);
return 0;
@@ -241,36 +262,45 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
struct imx_mu_con_priv *cp = chan->con_priv;
u32 val, ctrl;
- ctrl = imx_mu_read(priv, priv->dcfg->xCR);
- val = imx_mu_read(priv, priv->dcfg->xSR);
-
switch (cp->type) {
case IMX_MU_TYPE_TX:
- val &= IMX_MU_xSR_TEn(cp->idx) &
- (ctrl & IMX_MU_xCR_TIEn(cp->idx));
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_TCR]);
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
+ val &= IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx) &
+ (ctrl & IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RX:
- val &= IMX_MU_xSR_RFn(cp->idx) &
- (ctrl & IMX_MU_xCR_RIEn(cp->idx));
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_RCR]);
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
+ val &= IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx) &
+ (ctrl & IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RXDB:
- val &= IMX_MU_xSR_GIPn(cp->idx) &
- (ctrl & IMX_MU_xCR_GIEn(cp->idx));
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_GIER]);
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
+ val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
+ (ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
default:
- break;
+ dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
+ cp->type);
+ return IRQ_NONE;
}
if (!val)
return IRQ_NONE;
- if (val == IMX_MU_xSR_TEn(cp->idx)) {
- imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
+ if ((val == IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx)) &&
+ (cp->type == IMX_MU_TYPE_TX)) {
+ imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
mbox_chan_txdone(chan, 0);
- } else if (val == IMX_MU_xSR_RFn(cp->idx)) {
+ } else if ((val == IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx)) &&
+ (cp->type == IMX_MU_TYPE_RX)) {
priv->dcfg->rx(priv, cp);
- } else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
- imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
+ } else if ((val == IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx)) &&
+ (cp->type == IMX_MU_TYPE_RXDB)) {
+ imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
+ priv->dcfg->xSR[IMX_MU_GSR]);
mbox_chan_received_data(chan, NULL);
} else {
dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
@@ -317,10 +347,10 @@ static int imx_mu_startup(struct mbox_chan *chan)
switch (cp->type) {
case IMX_MU_TYPE_RX:
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0);
+ imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx), 0);
break;
case IMX_MU_TYPE_RXDB:
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0);
+ imx_mu_xcr_rmw(priv, IMX_MU_GIER, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx), 0);
break;
default:
break;
@@ -342,13 +372,13 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
switch (cp->type) {
case IMX_MU_TYPE_TX:
- imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
+ imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RX:
- imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
+ imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
break;
case IMX_MU_TYPE_RXDB:
- imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
+ imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
default:
break;
@@ -444,7 +474,8 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
return;
/* Set default MU configuration */
- imx_mu_write(priv, 0, priv->dcfg->xCR);
+ for (i = 0; i < IMX_MU_xCR_MAX; i++)
+ imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
}
static void imx_mu_init_scu(struct imx_mu_priv *priv)
@@ -466,7 +497,8 @@ static void imx_mu_init_scu(struct imx_mu_priv *priv)
priv->mbox.of_xlate = imx_mu_scu_xlate;
/* Set default MU configuration */
- imx_mu_write(priv, 0, priv->dcfg->xCR);
+ for (i = 0; i < IMX_MU_xCR_MAX; i++)
+ imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
}
static int imx_mu_probe(struct platform_device *pdev)
@@ -564,35 +596,47 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
.tx = imx_mu_generic_tx,
.rx = imx_mu_generic_rx,
.init = imx_mu_init_generic,
- .xTR = {0x0, 0x4, 0x8, 0xc},
- .xRR = {0x10, 0x14, 0x18, 0x1c},
- .xSR = 0x20,
- .xCR = 0x24,
+ .xTR = 0x0,
+ .xRR = 0x10,
+ .xSR = {0x20, 0x20, 0x20, 0x20},
+ .xCR = {0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
.tx = imx_mu_generic_tx,
.rx = imx_mu_generic_rx,
.init = imx_mu_init_generic,
- .xTR = {0x20, 0x24, 0x28, 0x2c},
- .xRR = {0x40, 0x44, 0x48, 0x4c},
- .xSR = 0x60,
- .xCR = 0x64,
+ .xTR = 0x20,
+ .xRR = 0x40,
+ .xSR = {0x60, 0x60, 0x60, 0x60},
+ .xCR = {0x64, 0x64, 0x64, 0x64},
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .type = IMX_MU_V2,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {0xC, 0x118, 0x124, 0x12C},
+ .xCR = {0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
.tx = imx_mu_scu_tx,
.rx = imx_mu_scu_rx,
.init = imx_mu_init_scu,
- .xTR = {0x0, 0x4, 0x8, 0xc},
- .xRR = {0x10, 0x14, 0x18, 0x1c},
- .xSR = 0x20,
- .xCR = 0x24,
+ .xTR = 0x0,
+ .xRR = 0x10,
+ .xSR = {0x20, 0x20, 0x20, 0x20},
+ .xCR = {0x24, 0x24, 0x24, 0x24},
};
static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
+ { .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
{ .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ },
};
@@ -601,9 +645,12 @@ MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
{
struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int i;
- if (!priv->clk)
- priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
+ if (!priv->clk) {
+ for (i = 0; i < IMX_MU_xCR_MAX; i++)
+ priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]);
+ }
return 0;
}
@@ -611,6 +658,7 @@ static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
{
struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int i;
/*
* ONLY restore MU when context lost, the TIE could
@@ -620,8 +668,10 @@ static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
* send failed, may lead to system freeze. This issue
* is observed by testing freeze mode suspend.
*/
- if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
- imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
+ if (!imx_mu_read(priv, priv->dcfg->xCR[0]) && !priv->clk) {
+ for (i = 0; i < IMX_MU_xCR_MAX; i++)
+ imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]);
+ }
return 0;
}
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
new file mode 100644
index 000000000000..0d6e2231a2c7
--- /dev/null
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver
+ *
+ * Copyright (c) 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <soc/microchip/mpfs.h>
+
+#define SERVICES_CR_OFFSET 0x50u
+#define SERVICES_SR_OFFSET 0x54u
+#define MAILBOX_REG_OFFSET 0x800u
+#define MSS_SYS_MAILBOX_DATA_OFFSET 0u
+#define SCB_MASK_WIDTH 16u
+
+/* SCBCTRL service control register */
+
+#define SCB_CTRL_REQ (0)
+#define SCB_CTRL_REQ_MASK BIT(SCB_CTRL_REQ)
+
+#define SCB_CTRL_BUSY (1)
+#define SCB_CTRL_BUSY_MASK BIT(SCB_CTRL_BUSY)
+
+#define SCB_CTRL_ABORT (2)
+#define SCB_CTRL_ABORT_MASK BIT(SCB_CTRL_ABORT)
+
+#define SCB_CTRL_NOTIFY (3)
+#define SCB_CTRL_NOTIFY_MASK BIT(SCB_CTRL_NOTIFY)
+
+#define SCB_CTRL_POS (16)
+#define SCB_CTRL_MASK GENMASK_ULL(SCB_CTRL_POS + SCB_MASK_WIDTH, SCB_CTRL_POS)
+
+/* SCBCTRL service status register */
+
+#define SCB_STATUS_REQ (0)
+#define SCB_STATUS_REQ_MASK BIT(SCB_STATUS_REQ)
+
+#define SCB_STATUS_BUSY (1)
+#define SCB_STATUS_BUSY_MASK BIT(SCB_STATUS_BUSY)
+
+#define SCB_STATUS_ABORT (2)
+#define SCB_STATUS_ABORT_MASK BIT(SCB_STATUS_ABORT)
+
+#define SCB_STATUS_NOTIFY (3)
+#define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY)
+
+#define SCB_STATUS_POS (16)
+#define SCB_STATUS_MASK GENMASK_ULL(SCB_STATUS_POS + SCB_MASK_WIDTH, SCB_STATUS_POS)
+
+struct mpfs_mbox {
+ struct mbox_controller controller;
+ struct device *dev;
+ int irq;
+ void __iomem *mbox_base;
+ void __iomem *int_reg;
+ struct mbox_chan chans[1];
+ struct mpfs_mss_response *response;
+ u16 resp_offset;
+};
+
+static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + SERVICES_SR_OFFSET);
+
+ return status & SCB_STATUS_BUSY_MASK;
+}
+
+static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ struct mpfs_mss_msg *msg = data;
+ u32 tx_trigger;
+ u16 opt_sel;
+ u32 val = 0u;
+
+ mbox->response = msg->response;
+ mbox->resp_offset = msg->resp_offset;
+
+ if (mpfs_mbox_busy(mbox))
+ return -EBUSY;
+
+ if (msg->cmd_data_size) {
+ u32 index;
+ u8 extra_bits = msg->cmd_data_size & 3;
+ u32 *word_buf = (u32 *)msg->cmd_data;
+
+ for (index = 0; index < (msg->cmd_data_size / 4); index++)
+ writel_relaxed(word_buf[index],
+ mbox->mbox_base + MAILBOX_REG_OFFSET + index * 0x4);
+ if (extra_bits) {
+ u8 i;
+ u8 byte_off = ALIGN_DOWN(msg->cmd_data_size, 4);
+ u8 *byte_buf = msg->cmd_data + byte_off;
+
+ val = readl_relaxed(mbox->mbox_base +
+ MAILBOX_REG_OFFSET + index * 0x4);
+
+ for (i = 0u; i < extra_bits; i++) {
+ val &= ~(0xffu << (i * 8u));
+ val |= (byte_buf[i] << (i * 8u));
+ }
+
+ writel_relaxed(val,
+ mbox->mbox_base + MAILBOX_REG_OFFSET + index * 0x4);
+ }
+ }
+
+ opt_sel = ((msg->mbox_offset << 7u) | (msg->cmd_opcode & 0x7fu));
+ tx_trigger = (opt_sel << SCB_CTRL_POS) & SCB_CTRL_MASK;
+ tx_trigger |= SCB_CTRL_REQ_MASK | SCB_STATUS_NOTIFY_MASK;
+ writel_relaxed(tx_trigger, mbox->mbox_base + SERVICES_CR_OFFSET);
+
+ return 0;
+}
+
+static void mpfs_mbox_rx_data(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ struct mpfs_mss_response *response = mbox->response;
+ u16 num_words = ALIGN((response->resp_size), (4)) / 4U;
+ u32 i;
+
+ if (!response->resp_msg) {
+ dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM);
+ return;
+ }
+
+ if (!mpfs_mbox_busy(mbox)) {
+ for (i = 0; i < num_words; i++) {
+ response->resp_msg[i] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_REG_OFFSET
+ + mbox->resp_offset + i * 0x4);
+ }
+ }
+
+ mbox_chan_received_data(chan, response);
+}
+
+static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data)
+{
+ struct mbox_chan *chan = data;
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+
+ writel_relaxed(0, mbox->int_reg);
+
+ mpfs_mbox_rx_data(chan);
+
+ mbox_chan_txdone(chan, 0);
+ return IRQ_HANDLED;
+}
+
+static int mpfs_mbox_startup(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ int ret = 0;
+
+ if (!mbox)
+ return -EINVAL;
+
+ ret = devm_request_irq(mbox->dev, mbox->irq, mpfs_mbox_inbox_isr, 0, "mpfs-mailbox", chan);
+ if (ret)
+ dev_err(mbox->dev, "failed to register mailbox interrupt:%d\n", ret);
+
+ return ret;
+}
+
+static void mpfs_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+
+ devm_free_irq(mbox->dev, mbox->irq, chan);
+}
+
+static const struct mbox_chan_ops mpfs_mbox_ops = {
+ .send_data = mpfs_mbox_send_data,
+ .startup = mpfs_mbox_startup,
+ .shutdown = mpfs_mbox_shutdown,
+};
+
+static int mpfs_mbox_probe(struct platform_device *pdev)
+{
+ struct mpfs_mbox *mbox;
+ struct resource *regs;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
+ if (IS_ERR(mbox->mbox_base))
+ return PTR_ERR(mbox->mbox_base);
+
+ mbox->int_reg = devm_platform_get_and_ioremap_resource(pdev, 1, &regs);
+ if (IS_ERR(mbox->int_reg))
+ return PTR_ERR(mbox->int_reg);
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ mbox->dev = &pdev->dev;
+
+ mbox->chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = mbox->chans;
+ mbox->controller.ops = &mpfs_mbox_ops;
+ mbox->controller.txdone_irq = true;
+
+ ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Registering MPFS mailbox controller failed\n");
+ return ret;
+ }
+ dev_info(&pdev->dev, "Registered MPFS mailbox controller driver\n");
+
+ return 0;
+}
+
+static const struct of_device_id mpfs_mbox_of_match[] = {
+ {.compatible = "microchip,polarfire-soc-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpfs_mbox_of_match);
+
+static struct platform_driver mpfs_mbox_driver = {
+ .driver = {
+ .name = "mpfs-mailbox",
+ .of_match_table = mpfs_mbox_of_match,
+ },
+ .probe = mpfs_mbox_probe,
+};
+module_platform_driver(mpfs_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("MPFS mailbox controller driver");
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 5665b6ea8119..67a42b514429 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -180,7 +180,7 @@ static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
}
-static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
+static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
{
struct cmdq_task_cb *cb = &task->pkt->async_cb;
struct cmdq_cb_data data;
@@ -188,7 +188,11 @@ static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
data.sta = sta;
data.data = cb->data;
- cb->cb(data);
+ data.pkt = task->pkt;
+ if (cb->cb)
+ cb->cb(data);
+
+ mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
}
@@ -244,10 +248,10 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
curr_task = task;
if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
- cmdq_task_exec_done(task, CMDQ_CB_NORMAL);
+ cmdq_task_exec_done(task, 0);
kfree(task);
} else if (err) {
- cmdq_task_exec_done(task, CMDQ_CB_ERROR);
+ cmdq_task_exec_done(task, -ENOEXEC);
cmdq_task_handle_error(curr_task);
kfree(task);
}
@@ -415,7 +419,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
- cmdq_task_exec_done(task, CMDQ_CB_ERROR);
+ cmdq_task_exec_done(task, -ECONNABORTED);
kfree(task);
}
@@ -452,11 +456,13 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
cb = &task->pkt->async_cb;
- if (cb->cb) {
- data.sta = CMDQ_CB_ERROR;
- data.data = cb->data;
+ data.sta = -ECONNABORTED;
+ data.data = cb->data;
+ data.pkt = task->pkt;
+ if (cb->cb)
cb->cb(data);
- }
+
+ mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
kfree(task);
}
@@ -519,10 +525,8 @@ static int cmdq_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cmdq->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(cmdq->base)) {
- dev_err(dev, "failed to ioremap gce\n");
+ if (IS_ERR(cmdq->base))
return PTR_ERR(cmdq->base);
- }
cmdq->irq = platform_get_irq(pdev, 0);
if (cmdq->irq < 0)
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index f25324d03842..03bdc96dc457 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -57,6 +57,10 @@ static const struct qcom_apcs_ipc_data sdm660_apcs_data = {
.offset = 8, .clk_name = NULL
};
+static const struct qcom_apcs_ipc_data sm6125_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
.offset = 12, .clk_name = NULL
};
@@ -132,7 +136,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
if (apcs_data->clk_name) {
apcs->clk = platform_device_register_data(&pdev->dev,
apcs_data->clk_name,
- PLATFORM_DEVID_NONE,
+ PLATFORM_DEVID_AUTO,
NULL, 0);
if (IS_ERR(apcs->clk))
dev_err(&pdev->dev, "failed to register APCS clk\n");
@@ -158,6 +162,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
{ .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
@@ -166,6 +171,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = &sdm660_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &sm6125_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
{}
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 2d13c72944c6..584700cd1585 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -155,6 +155,11 @@ static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
return 0;
}
+static void qcom_ipcc_mbox_shutdown(struct mbox_chan *chan)
+{
+ chan->con_priv = NULL;
+}
+
static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *ph)
{
@@ -184,6 +189,7 @@ static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
.send_data = qcom_ipcc_mbox_send_data,
+ .shutdown = qcom_ipcc_mbox_shutdown,
};
static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index f2014385d48b..0602e82a9516 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -47,7 +47,7 @@ config MD_AUTODETECT
If unsure, say Y.
config MD_LINEAR
- tristate "Linear (append) mode"
+ tristate "Linear (append) mode (deprecated)"
depends on BLK_DEV_MD
help
If you say Y here, then your multiple devices driver will be able to
@@ -158,7 +158,7 @@ config MD_RAID456
If unsure, say Y.
config MD_MULTIPATH
- tristate "Multipath I/O support"
+ tristate "Multipath I/O support (deprecated)"
depends on BLK_DEV_MD
help
MD_MULTIPATH provides a simple multi-path personality for use
@@ -169,7 +169,7 @@ config MD_MULTIPATH
If unsure, say N.
config MD_FAULTY
- tristate "Faulty test module for MD"
+ tristate "Faulty test module for MD (deprecated)"
depends on BLK_DEV_MD
help
The "faulty" module allows for a block device that occasionally returns
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0a4551e165ab..5fc989a6d452 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -364,7 +364,6 @@ struct cached_dev {
/* The rest of this all shows up in sysfs */
unsigned int sequential_cutoff;
- unsigned int readahead;
unsigned int io_disable:1;
unsigned int verify:1;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 29c231758293..6d1de889baeb 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned int sectors)
{
int ret = MAP_CONTINUE;
- unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
+ unsigned int size_limit;
s->cache_missed = 1;
@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
}
- if (!(bio->bi_opf & REQ_RAHEAD) &&
- !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
- s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
- reada = min_t(sector_t, dc->readahead >> 9,
- get_capacity(bio->bi_bdev->bd_disk) -
- bio_end_sector(bio));
-
- s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
+ /* Limitation for valid replace key size and cache_bio bvecs number */
+ size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
+ (1 << KEY_SIZE_BITS) - 1);
+ s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
s->iop.replace_key = KEY(s->iop.inode,
bio->bi_iter.bi_sector + s->insert_bio_sectors,
@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true;
- miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
+ miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
+ &s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
- if (reada)
- bch_mark_cache_readahead(s->iop.c, s->d);
-
s->cache_miss = miss;
s->iop.bio = cache_bio;
bio_get(cache_bio);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 503aafe188dc..4c7ee5fedb9d 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -46,7 +46,6 @@ read_attribute(cache_misses);
read_attribute(cache_bypass_hits);
read_attribute(cache_bypass_misses);
read_attribute(cache_hit_ratio);
-read_attribute(cache_readaheads);
read_attribute(cache_miss_collisions);
read_attribute(bypassed);
@@ -64,7 +63,6 @@ SHOW(bch_stats)
DIV_SAFE(var(cache_hits) * 100,
var(cache_hits) + var(cache_misses)));
- var_print(cache_readaheads);
var_print(cache_miss_collisions);
sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
#undef var
@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = {
&sysfs_cache_bypass_hits,
&sysfs_cache_bypass_misses,
&sysfs_cache_hit_ratio,
- &sysfs_cache_readaheads,
&sysfs_cache_miss_collisions,
&sysfs_bypassed,
NULL
@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc)
acc->total.cache_misses = 0;
acc->total.cache_bypass_hits = 0;
acc->total.cache_bypass_misses = 0;
- acc->total.cache_readaheads = 0;
acc->total.cache_miss_collisions = 0;
acc->total.sectors_bypassed = 0;
}
@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
scale_stat(&stats->cache_misses);
scale_stat(&stats->cache_bypass_hits);
scale_stat(&stats->cache_bypass_misses);
- scale_stat(&stats->cache_readaheads);
scale_stat(&stats->cache_miss_collisions);
scale_stat(&stats->sectors_bypassed);
}
@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t)
move_stat(cache_misses);
move_stat(cache_bypass_hits);
move_stat(cache_bypass_misses);
- move_stat(cache_readaheads);
move_stat(cache_miss_collisions);
move_stat(sectors_bypassed);
@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
mark_cache_stats(&c->accounting.collector, hit, bypass);
}
-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
-{
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
-
- atomic_inc(&dc->accounting.collector.cache_readaheads);
- atomic_inc(&c->accounting.collector.cache_readaheads);
-}
-
void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index abfaabf7e7fc..ca4f435f7216 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -7,7 +7,6 @@ struct cache_stat_collector {
atomic_t cache_misses;
atomic_t cache_bypass_hits;
atomic_t cache_bypass_misses;
- atomic_t cache_readaheads;
atomic_t cache_miss_collisions;
atomic_t sectors_bypassed;
};
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index bea8c4429ae8..185246a0d855 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -890,13 +890,9 @@ static void bcache_device_free(struct bcache_device *d)
if (disk_added)
del_gendisk(disk);
- if (disk->queue)
- blk_cleanup_queue(disk->queue);
-
+ blk_cleanup_disk(disk);
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
- if (disk_added)
- put_disk(disk);
}
bioset_exit(&d->bio_split);
@@ -946,7 +942,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err;
- d->disk = alloc_disk(BCACHE_MINORS);
+ d->disk = blk_alloc_disk(NUMA_NO_NODE);
if (!d->disk)
goto err;
@@ -955,14 +951,11 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->major = bcache_major;
d->disk->first_minor = idx_to_first_minor(idx);
+ d->disk->minors = BCACHE_MINORS;
d->disk->fops = ops;
d->disk->private_data = d;
- q = blk_alloc_queue(NUMA_NO_NODE);
- if (!q)
- return -ENOMEM;
-
- d->disk->queue = q;
+ q = d->disk->queue;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index cc89f3156d1a..05ac1d6fbbf3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -137,7 +137,6 @@ rw_attribute(io_disable);
rw_attribute(discard);
rw_attribute(running);
rw_attribute(label);
-rw_attribute(readahead);
rw_attribute(errors);
rw_attribute(io_error_limit);
rw_attribute(io_error_halflife);
@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev)
var_printf(partial_stripes_expensive, "%u");
var_hprint(sequential_cutoff);
- var_hprint(readahead);
sysfs_print(running, atomic_read(&dc->running));
sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
@@ -365,7 +363,6 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(sequential_cutoff,
dc->sequential_cutoff,
0, UINT_MAX);
- d_strtoi_h(readahead);
if (attr == &sysfs_clear_stats)
bch_cache_accounting_clear(&dc->accounting);
@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_running,
&sysfs_state,
&sysfs_label,
- &sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
&sysfs_bypass_torture_test,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9c3bc3711b33..0dbd48cbdff9 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -530,7 +530,6 @@ static const struct blk_mq_ops dm_mq_ops = {
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
- struct request_queue *q;
struct dm_target *immutable_tgt;
int err;
@@ -557,12 +556,10 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
if (err)
goto out_kfree_tag_set;
- q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
- if (IS_ERR(q)) {
- err = PTR_ERR(q);
+ err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
+ if (err)
goto out_tag_set;
- }
-
+ elevator_init_mq(md->queue);
return 0;
out_tag_set:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 420a12b42708..2c5f9e585211 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1694,13 +1694,13 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
del_gendisk(md->disk);
- put_disk(md->disk);
}
- if (md->queue) {
+ if (md->queue)
dm_queue_destroy_keyslot_manager(md->queue);
- blk_cleanup_queue(md->queue);
- }
+
+ if (md->disk)
+ blk_cleanup_disk(md->disk);
cleanup_srcu_struct(&md->io_barrier);
@@ -1763,13 +1763,10 @@ static struct mapped_device *alloc_dev(int minor)
* established. If request-based table is loaded: blk-mq will
* override accordingly.
*/
- md->queue = blk_alloc_queue(numa_node_id);
- if (!md->queue)
- goto bad;
-
- md->disk = alloc_disk_node(1, md->numa_node_id);
+ md->disk = blk_alloc_disk(md->numa_node_id);
if (!md->disk)
goto bad;
+ md->queue = md->disk->queue;
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
@@ -1782,6 +1779,7 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->major = _major;
md->disk->first_minor = minor;
+ md->disk->minors = 1;
md->disk->fops = &dm_blk_dops;
md->disk->queue = md->queue;
md->disk->private_data = md;
@@ -2230,7 +2228,7 @@ static bool md_in_flight_bios(struct mapped_device *md)
return sum != 0;
}
-static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
+static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
{
int r = 0;
DEFINE_WAIT(wait);
@@ -2253,7 +2251,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state
return r;
}
-static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
{
int r = 0;
@@ -2380,7 +2378,7 @@ static void unlock_fs(struct mapped_device *md)
* are being added to md->deferred list.
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, long task_state,
+ unsigned suspend_flags, unsigned int task_state,
int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index ea3130e11680..e29c6298ef5c 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2616,7 +2616,7 @@ static struct attribute *md_bitmap_attrs[] = {
&max_backlog_used.attr,
NULL
};
-struct attribute_group md_bitmap_group = {
+const struct attribute_group md_bitmap_group = {
.name = "bitmap",
.attrs = md_bitmap_attrs,
};
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index fda4cb3f936f..c0dc6f2ef4a3 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -357,7 +357,7 @@ static void raid_exit(void)
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Fault injection personality for MD");
+MODULE_DESCRIPTION("Fault injection personality for MD (deprecated)");
MODULE_ALIAS("md-personality-10"); /* faulty */
MODULE_ALIAS("md-faulty");
MODULE_ALIAS("md-level--5");
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 63ed8329a98d..1ff51647a682 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -312,7 +312,7 @@ static void linear_exit (void)
module_init(linear_init);
module_exit(linear_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Linear device concatenation personality for MD");
+MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)");
MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
MODULE_ALIAS("md-linear");
MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 776bbe542db5..e7d6486f090f 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -471,7 +471,7 @@ static void __exit multipath_exit (void)
module_init(multipath_init);
module_exit(multipath_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("simple multi-path personality for MD");
+MODULE_DESCRIPTION("simple multi-path personality for MD (deprecated)");
MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
MODULE_ALIAS("md-multipath");
MODULE_ALIAS("md-level--4");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 49f897fbb89b..ae8fe54ea358 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -441,30 +441,6 @@ check_suspended:
}
EXPORT_SYMBOL(md_handle_request);
-struct md_io {
- struct mddev *mddev;
- bio_end_io_t *orig_bi_end_io;
- void *orig_bi_private;
- struct block_device *orig_bi_bdev;
- unsigned long start_time;
-};
-
-static void md_end_io(struct bio *bio)
-{
- struct md_io *md_io = bio->bi_private;
- struct mddev *mddev = md_io->mddev;
-
- bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
-
- bio->bi_end_io = md_io->orig_bi_end_io;
- bio->bi_private = md_io->orig_bi_private;
-
- mempool_free(md_io, &mddev->md_io_pool);
-
- if (bio->bi_end_io)
- bio->bi_end_io(bio);
-}
-
static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
@@ -489,21 +465,6 @@ static blk_qc_t md_submit_bio(struct bio *bio)
return BLK_QC_T_NONE;
}
- if (bio->bi_end_io != md_end_io) {
- struct md_io *md_io;
-
- md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
- md_io->mddev = mddev;
- md_io->orig_bi_end_io = bio->bi_end_io;
- md_io->orig_bi_private = bio->bi_private;
- md_io->orig_bi_bdev = bio->bi_bdev;
-
- bio->bi_end_io = md_end_io;
- bio->bi_private = md_io;
-
- md_io->start_time = bio_start_io_acct(bio);
- }
-
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
@@ -824,7 +785,7 @@ out_free_new:
return ERR_PTR(error);
}
-static struct attribute_group md_redundancy_group;
+static const struct attribute_group md_redundancy_group;
void mddev_unlock(struct mddev *mddev)
{
@@ -841,7 +802,7 @@ void mddev_unlock(struct mddev *mddev)
* test it under the same mutex to ensure its correct value
* is seen.
*/
- struct attribute_group *to_remove = mddev->to_remove;
+ const struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL;
mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
@@ -2379,7 +2340,15 @@ int md_integrity_register(struct mddev *mddev)
bdev_get_integrity(reference->bdev));
pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
- if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
+ if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
+ (mddev->level != 1 && mddev->level != 10 &&
+ bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) {
+ /*
+ * No need to handle the failure of bioset_integrity_create,
+ * because the function is called by md_run() -> pers->run(),
+ * md_run calls bioset_exit -> bioset_integrity_free in case
+ * of failure case.
+ */
pr_err("md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
@@ -5538,7 +5507,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_degraded.attr,
NULL,
};
-static struct attribute_group md_redundancy_group = {
+static const struct attribute_group md_redundancy_group = {
.name = NULL,
.attrs = md_redundancy_attrs,
};
@@ -5598,17 +5567,16 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_level)
sysfs_put(mddev->sysfs_level);
- if (mddev->gendisk)
+ if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
- if (mddev->queue)
- blk_cleanup_queue(mddev->queue);
- if (mddev->gendisk)
- put_disk(mddev->gendisk);
+ blk_cleanup_disk(mddev->gendisk);
+ }
percpu_ref_exit(&mddev->writes_pending);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
- mempool_exit(&mddev->md_io_pool);
+ if (mddev->level != 1 && mddev->level != 10)
+ bioset_exit(&mddev->io_acct_set);
kfree(mddev);
}
@@ -5705,26 +5673,14 @@ static int md_alloc(dev_t dev, char *name)
*/
mddev->hold_active = UNTIL_STOP;
- error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
- sizeof(struct md_io));
- if (error)
- goto abort;
-
error = -ENOMEM;
- mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (!mddev->queue)
+ disk = blk_alloc_disk(NUMA_NO_NODE);
+ if (!disk)
goto abort;
- blk_set_stacking_limits(&mddev->queue->limits);
-
- disk = alloc_disk(1 << shift);
- if (!disk) {
- blk_cleanup_queue(mddev->queue);
- mddev->queue = NULL;
- goto abort;
- }
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
+ disk->minors = 1 << shift;
if (name)
strcpy(disk->disk_name, name);
else if (partitioned)
@@ -5733,7 +5689,9 @@ static int md_alloc(dev_t dev, char *name)
sprintf(disk->disk_name, "md%d", unit);
disk->fops = &md_fops;
disk->private_data = mddev;
- disk->queue = mddev->queue;
+
+ mddev->queue = disk->queue;
+ blk_set_stacking_limits(&mddev->queue->limits);
blk_queue_write_cache(mddev->queue, true, true);
/* Allow extended partitions. This makes the
* 'mdp' device redundant, but we can't really
@@ -5907,7 +5865,14 @@ int md_run(struct mddev *mddev)
if (!bioset_initialized(&mddev->sync_set)) {
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
- return err;
+ goto exit_bio_set;
+ }
+ if (mddev->level != 1 && mddev->level != 10 &&
+ !bioset_initialized(&mddev->io_acct_set)) {
+ err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
+ offsetof(struct md_io_acct, bio_clone), 0);
+ if (err)
+ goto exit_sync_set;
}
spin_lock(&pers_lock);
@@ -6035,6 +6000,7 @@ int md_run(struct mddev *mddev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -6084,8 +6050,12 @@ bitmap_abort:
module_put(pers->owner);
md_bitmap_destroy(mddev);
abort:
- bioset_exit(&mddev->bio_set);
+ if (mddev->level != 1 && mddev->level != 10)
+ bioset_exit(&mddev->io_acct_set);
+exit_sync_set:
bioset_exit(&mddev->sync_set);
+exit_bio_set:
+ bioset_exit(&mddev->bio_set);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
@@ -6309,6 +6279,8 @@ void md_stop(struct mddev *mddev)
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
+ if (mddev->level != 1 && mddev->level != 10)
+ bioset_exit(&mddev->io_acct_set);
}
EXPORT_SYMBOL_GPL(md_stop);
@@ -8613,6 +8585,41 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+static void md_end_io_acct(struct bio *bio)
+{
+ struct md_io_acct *md_io_acct = bio->bi_private;
+ struct bio *orig_bio = md_io_acct->orig_bio;
+
+ orig_bio->bi_status = bio->bi_status;
+
+ bio_end_io_acct(orig_bio, md_io_acct->start_time);
+ bio_put(bio);
+ bio_endio(orig_bio);
+}
+
+/*
+ * Used by personalities that don't already clone the bio and thus can't
+ * easily add the timestamp to their extended bio structure.
+ */
+void md_account_bio(struct mddev *mddev, struct bio **bio)
+{
+ struct md_io_acct *md_io_acct;
+ struct bio *clone;
+
+ if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue))
+ return;
+
+ clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set);
+ md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
+ md_io_acct->orig_bio = *bio;
+ md_io_acct->start_time = bio_start_io_acct(*bio);
+
+ clone->bi_end_io = md_end_io_acct;
+ clone->bi_private = md_io_acct;
+ *bio = clone;
+}
+EXPORT_SYMBOL_GPL(md_account_bio);
+
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
diff --git a/drivers/md/md.h b/drivers/md/md.h
index fb7eab58cfd5..832547cf038f 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -395,10 +395,10 @@ struct mddev {
* that we are never stopping an array while it is open.
* 'reconfig_mutex' protects all other reconfiguration.
* These locks are separate due to conflicting interactions
- * with bdev->bd_mutex.
+ * with disk->open_mutex.
* Lock ordering is:
- * reconfig_mutex -> bd_mutex
- * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
+ * reconfig_mutex -> disk->open_mutex
+ * disk->open_mutex -> open_mutex: e.g. __blkdev_get -> md_open
*/
struct mutex open_mutex;
struct mutex reconfig_mutex;
@@ -481,13 +481,13 @@ struct mddev {
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
- struct attribute_group *to_remove;
+ const struct attribute_group *to_remove;
struct bio_set bio_set;
struct bio_set sync_set; /* for sync operations like
* metadata and bitmap writes
*/
- mempool_t md_io_pool;
+ struct bio_set io_acct_set; /* for raid0 and raid5 io accounting */
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit
@@ -613,7 +613,7 @@ struct md_sysfs_entry {
ssize_t (*show)(struct mddev *, char *);
ssize_t (*store)(struct mddev *, const char *, size_t);
};
-extern struct attribute_group md_bitmap_group;
+extern const struct attribute_group md_bitmap_group;
static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
{
@@ -684,6 +684,12 @@ struct md_thread {
void *private;
};
+struct md_io_acct {
+ struct bio *orig_bio;
+ unsigned long start_time;
+ struct bio bio_clone;
+};
+
#define THREAD_WAKEUP 0
static inline void safe_put_page(struct page *p)
@@ -715,6 +721,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
+void md_account_bio(struct mddev *mddev, struct bio **bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e5d7411cba9b..62c8b6adac70 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -546,6 +546,9 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
bio = split;
}
+ if (bio->bi_pool != &mddev->bio_set)
+ md_account_bio(mddev, &bio);
+
orig_sector = sector;
zone = find_zone(mddev->private, &sector);
switch (conf->layout) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ced076ba560e..51f2547c2007 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -300,6 +300,8 @@ static void call_bio_endio(struct r1bio *r1_bio)
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
bio->bi_status = BLK_STS_IOERR;
+ if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ bio_end_io_acct(bio, r1_bio->start_time);
bio_endio(bio);
}
@@ -1210,7 +1212,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
int max_sectors;
int rdisk;
- bool print_msg = !!r1_bio;
+ bool r1bio_existed = !!r1_bio;
char b[BDEVNAME_SIZE];
/*
@@ -1220,7 +1222,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
*/
gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
- if (print_msg) {
+ if (r1bio_existed) {
/* Need to get the block device name carefully */
struct md_rdev *rdev;
rcu_read_lock();
@@ -1252,7 +1254,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (rdisk < 0) {
/* couldn't find anywhere to read from */
- if (print_msg) {
+ if (r1bio_existed) {
pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
mdname(mddev),
b,
@@ -1263,7 +1265,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
}
mirror = conf->mirrors + rdisk;
- if (print_msg)
+ if (r1bio_existed)
pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
mdname(mddev),
(unsigned long long)r1_bio->sector,
@@ -1292,6 +1294,9 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
r1_bio->read_disk = rdisk;
+ if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ r1_bio->start_time = bio_start_io_acct(bio);
+
read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
r1_bio->bios[rdisk] = read_bio;
@@ -1461,6 +1466,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio->sectors = max_sectors;
}
+ if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ r1_bio->start_time = bio_start_io_acct(bio);
atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index b7eb09e8c025..ccf10e59b116 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -158,6 +158,7 @@ struct r1bio {
sector_t sector;
int sectors;
unsigned long state;
+ unsigned long start_time;
struct mddev *mddev;
/*
* original bio going to /dev/mdx
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 13f5e6b2a73d..16977e8e075d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -297,6 +297,8 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
bio->bi_status = BLK_STS_IOERR;
+ if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ bio_end_io_acct(bio, r10_bio->start_time);
bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
@@ -1184,6 +1186,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
}
slot = r10_bio->read_slot;
+ if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ r10_bio->start_time = bio_start_io_acct(bio);
read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
r10_bio->devs[slot].bio = read_bio;
@@ -1483,6 +1487,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->master_bio = bio;
}
+ if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ r10_bio->start_time = bio_start_io_acct(bio);
atomic_set(&r10_bio->remaining, 1);
md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 1461fd55311b..c34bb196790e 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -124,6 +124,7 @@ struct r10bio {
sector_t sector; /* virtual sector number */
int sectors;
unsigned long state;
+ unsigned long start_time;
struct mddev *mddev;
/*
* original bio going to /dev/mdx
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 841e1c1aa5e6..b8436e4930ed 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5311,8 +5311,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
- WARN_ON_ONCE(bio->bi_bdev->bd_partno);
-
chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors);
@@ -5364,11 +5362,13 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf,
*/
static void raid5_align_endio(struct bio *bi)
{
- struct bio* raid_bi = bi->bi_private;
+ struct md_io_acct *md_io_acct = bi->bi_private;
+ struct bio *raid_bi = md_io_acct->orig_bio;
struct mddev *mddev;
struct r5conf *conf;
struct md_rdev *rdev;
blk_status_t error = bi->bi_status;
+ unsigned long start_time = md_io_acct->start_time;
bio_put(bi);
@@ -5380,6 +5380,8 @@ static void raid5_align_endio(struct bio *bi)
rdev_dec_pending(rdev, conf->mddev);
if (!error) {
+ if (blk_queue_io_stat(raid_bi->bi_bdev->bd_disk->queue))
+ bio_end_io_acct(raid_bi, start_time);
bio_endio(raid_bi);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_quiescent);
@@ -5398,6 +5400,8 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
struct md_rdev *rdev;
sector_t sector, end_sector, first_bad;
int bad_sectors, dd_idx;
+ struct md_io_acct *md_io_acct;
+ bool did_inc;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("%s: non aligned\n", __func__);
@@ -5427,29 +5431,46 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
- bio_set_dev(align_bio, rdev->bdev);
- align_bio->bi_end_io = raid5_align_endio;
- align_bio->bi_private = raid_bio;
- align_bio->bi_iter.bi_sector = sector;
-
- raid_bio->bi_next = (void *)rdev;
-
- if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
+ if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
&bad_sectors)) {
- bio_put(align_bio);
+ bio_put(raid_bio);
rdev_dec_pending(rdev, mddev);
return 0;
}
+ align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
+ md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
+ raid_bio->bi_next = (void *)rdev;
+ if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue))
+ md_io_acct->start_time = bio_start_io_acct(raid_bio);
+ md_io_acct->orig_bio = raid_bio;
+
+ bio_set_dev(align_bio, rdev->bdev);
+ align_bio->bi_end_io = raid5_align_endio;
+ align_bio->bi_private = md_io_acct;
+ align_bio->bi_iter.bi_sector = sector;
+
/* No reshape active, so we can trust rdev->data_offset */
align_bio->bi_iter.bi_sector += rdev->data_offset;
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
- conf->device_lock);
- atomic_inc(&conf->active_aligned_reads);
- spin_unlock_irq(&conf->device_lock);
+ did_inc = false;
+ if (conf->quiesce == 0) {
+ atomic_inc(&conf->active_aligned_reads);
+ did_inc = true;
+ }
+ /* need a memory barrier to detect the race with raid5_quiesce() */
+ if (!did_inc || smp_load_acquire(&conf->quiesce) != 0) {
+ /* quiesce is in progress, so we need to undo io activation and wait
+ * for it to finish
+ */
+ if (did_inc && atomic_dec_and_test(&conf->active_aligned_reads))
+ wake_up(&conf->wait_for_quiescent);
+ spin_lock_irq(&conf->device_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
+ conf->device_lock);
+ atomic_inc(&conf->active_aligned_reads);
+ spin_unlock_irq(&conf->device_lock);
+ }
if (mddev->gendisk)
trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
@@ -5798,6 +5819,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
+ md_account_bio(mddev, &bi);
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
int previous;
@@ -6930,7 +6952,7 @@ static struct attribute *raid5_attrs[] = {
&ppl_write_hint.attr,
NULL,
};
-static struct attribute_group raid5_attrs_group = {
+static const struct attribute_group raid5_attrs_group = {
.name = NULL,
.attrs = raid5_attrs,
};
@@ -8336,7 +8358,10 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce)
* active stripes can drain
*/
r5c_flush_cache(conf, INT_MAX);
- conf->quiesce = 2;
+ /* need a memory barrier to make sure read_one_chunk() sees
+ * quiesce started and reverts to slow (locked) path.
+ */
+ smp_store_release(&conf->quiesce, 2);
wait_event_cmd(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
index 2a3e7ffefe0a..028a09a7531e 100644
--- a/drivers/media/cec/platform/s5p/s5p_cec.c
+++ b/drivers/media/cec/platform/s5p/s5p_cec.c
@@ -35,10 +35,13 @@ MODULE_PARM_DESC(debug, "debug level (0-2)");
static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
+ int ret;
struct s5p_cec_dev *cec = cec_get_drvdata(adap);
if (enable) {
- pm_runtime_get_sync(cec->dev);
+ ret = pm_runtime_resume_and_get(cec->dev);
+ if (ret < 0)
+ return ret;
s5p_cec_reset(cec);
@@ -51,7 +54,7 @@ static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
} else {
s5p_cec_mask_tx_interrupts(cec);
s5p_cec_mask_rx_interrupts(cec);
- pm_runtime_disable(cec->dev);
+ pm_runtime_put(cec->dev);
}
return 0;
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 4ea03b7899a8..0f6bde0f793e 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -13,6 +13,10 @@ config VIDEO_TVEEPROM
tristate
depends on I2C
+config TTPCI_EEPROM
+ tristate
+ depends on I2C
+
config CYPRESS_FIRMWARE
tristate
depends on USB
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index b71e4b62eea5..55b5a1900124 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -3,3 +3,4 @@ obj-y += b2c2/ saa7146/ siano/ v4l2-tpg/ videobuf2/
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o
+obj-$(CONFIG_TTPCI_EEPROM) += ttpci-eeprom.o
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 410cc3ac6f94..bceaf91faa15 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -908,7 +908,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
void *buffer, size_t size)
{
struct sms_firmware *firmware = (struct sms_firmware *) buffer;
- struct sms_msg_data4 *msg;
+ struct sms_msg_data5 *msg;
u32 mem_address, calc_checksum = 0;
u32 i, *ptr;
u8 *payload = firmware->payload;
@@ -989,24 +989,20 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
goto exit_fw_download;
if (coredev->mode == DEVICE_MODE_NONE) {
- struct sms_msg_data *trigger_msg =
- (struct sms_msg_data *) msg;
-
pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n");
SMS_INIT_MSG(&msg->x_msg_header,
MSG_SMS_SWDOWNLOAD_TRIGGER_REQ,
- sizeof(struct sms_msg_hdr) +
- sizeof(u32) * 5);
+ sizeof(*msg));
- trigger_msg->msg_data[0] = firmware->start_address;
+ msg->msg_data[0] = firmware->start_address;
/* Entry point */
- trigger_msg->msg_data[1] = 6; /* Priority */
- trigger_msg->msg_data[2] = 0x200; /* Stack size */
- trigger_msg->msg_data[3] = 0; /* Parameter */
- trigger_msg->msg_data[4] = 4; /* Task ID */
+ msg->msg_data[1] = 6; /* Priority */
+ msg->msg_data[2] = 0x200; /* Stack size */
+ msg->msg_data[3] = 0; /* Parameter */
+ msg->msg_data[4] = 4; /* Task ID */
- rc = smscore_sendrequest_and_wait(coredev, trigger_msg,
- trigger_msg->x_msg_header.msg_length,
+ rc = smscore_sendrequest_and_wait(coredev, msg,
+ msg->x_msg_header.msg_length,
&coredev->trigger_done);
} else {
SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_EXEC_REQ,
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index 4a6b9f4c44ac..f8789ee0d554 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -624,9 +624,9 @@ struct sms_msg_data2 {
u32 msg_data[2];
};
-struct sms_msg_data4 {
+struct sms_msg_data5 {
struct sms_msg_hdr x_msg_header;
- u32 msg_data[4];
+ u32 msg_data[5];
};
struct sms_data_download {
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index cd5bafe9a3ac..f80caaa333da 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -26,8 +26,8 @@ Copyright (C) 2006-2008, Uri Shkolnik
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static struct list_head g_smsdvb_clients;
-static struct mutex g_smsdvb_clientslock;
+static LIST_HEAD(g_smsdvb_clients);
+static DEFINE_MUTEX(g_smsdvb_clientslock);
static u32 sms_to_guard_interval_table[] = {
[0] = GUARD_INTERVAL_1_32,
@@ -1212,6 +1212,10 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
return 0;
media_graph_error:
+ mutex_lock(&g_smsdvb_clientslock);
+ list_del(&client->entry);
+ mutex_unlock(&g_smsdvb_clientslock);
+
smsdvb_debugfs_release(client);
client_error:
@@ -1236,9 +1240,6 @@ static int __init smsdvb_module_init(void)
{
int rc;
- INIT_LIST_HEAD(&g_smsdvb_clients);
- mutex_init(&g_smsdvb_clientslock);
-
smsdvb_debugfs_register();
rc = smscore_register_hotplug(smsdvb_hotplug);
diff --git a/drivers/media/pci/ttpci/ttpci-eeprom.c b/drivers/media/common/ttpci-eeprom.c
index ef8746684d31..ef8746684d31 100644
--- a/drivers/media/pci/ttpci/ttpci-eeprom.c
+++ b/drivers/media/common/ttpci-eeprom.c
diff --git a/drivers/media/pci/ttpci/ttpci-eeprom.h b/drivers/media/common/ttpci-eeprom.h
index ee741867ba47..ee741867ba47 100644
--- a/drivers/media/pci/ttpci/ttpci-eeprom.h
+++ b/drivers/media/common/ttpci-eeprom.h
diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
index 381158320a90..ce879f6f8f82 100644
--- a/drivers/media/common/videobuf2/frame_vector.c
+++ b/drivers/media/common/videobuf2/frame_vector.c
@@ -64,7 +64,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
do {
unsigned long *nums = frame_vector_pfns(vec);
- vma = find_vma_intersection(mm, start, start + 1);
+ vma = vma_lookup(mm, start);
if (!vma)
break;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 7e96f67c60ba..2988bb38ceb1 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -939,6 +939,20 @@ void vb2_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_queue_release);
+int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
+{
+ if (type == q->type)
+ return 0;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ q->type = type;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_queue_change_type);
+
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
struct video_device *vfd = video_devdata(file);
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index f14a872d1268..5d5a48475a54 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -720,7 +720,7 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
secfeed,
dvb_dmxdev_section_callback);
- if (ret < 0) {
+ if (!*secfeed) {
pr_err("DVB (%s): could not alloc feed\n",
__func__);
return ret;
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index b7e4a3371176..15a08d8c69ef 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1386,6 +1386,7 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
err = -EINVAL;
goto out_unlock;
}
+ slot = array_index_nospec(slot, ca->slot_count);
info->type = CA_CI_LINK;
info->flags = 0;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index a6915582d1a6..258637d762d6 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -23,6 +23,7 @@
#include <linux/poll.h>
#include <linux/semaphore.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/list.h>
#include <linux/freezer.h>
#include <linux/jiffies.h>
@@ -1063,107 +1064,97 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
return 0;
}
-#define _DTV_CMD(n, s, b) \
-[n] = { \
- .name = #n, \
- .cmd = n, \
- .set = s,\
- .buffer = b \
-}
-
-struct dtv_cmds_h {
- char *name; /* A display name for debugging purposes */
+#define _DTV_CMD(n) \
+ [n] = #n
- __u32 cmd; /* A unique ID */
-
- /* Flags */
- __u32 set:1; /* Either a set or get property */
- __u32 buffer:1; /* Does this property use the buffer? */
- __u32 reserved:30; /* Align */
-};
-
-static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
- _DTV_CMD(DTV_TUNE, 1, 0),
- _DTV_CMD(DTV_CLEAR, 1, 0),
+static char *dtv_cmds[DTV_MAX_COMMAND + 1] = {
+ _DTV_CMD(DTV_TUNE),
+ _DTV_CMD(DTV_CLEAR),
/* Set */
- _DTV_CMD(DTV_FREQUENCY, 1, 0),
- _DTV_CMD(DTV_BANDWIDTH_HZ, 1, 0),
- _DTV_CMD(DTV_MODULATION, 1, 0),
- _DTV_CMD(DTV_INVERSION, 1, 0),
- _DTV_CMD(DTV_DISEQC_MASTER, 1, 1),
- _DTV_CMD(DTV_SYMBOL_RATE, 1, 0),
- _DTV_CMD(DTV_INNER_FEC, 1, 0),
- _DTV_CMD(DTV_VOLTAGE, 1, 0),
- _DTV_CMD(DTV_TONE, 1, 0),
- _DTV_CMD(DTV_PILOT, 1, 0),
- _DTV_CMD(DTV_ROLLOFF, 1, 0),
- _DTV_CMD(DTV_DELIVERY_SYSTEM, 1, 0),
- _DTV_CMD(DTV_HIERARCHY, 1, 0),
- _DTV_CMD(DTV_CODE_RATE_HP, 1, 0),
- _DTV_CMD(DTV_CODE_RATE_LP, 1, 0),
- _DTV_CMD(DTV_GUARD_INTERVAL, 1, 0),
- _DTV_CMD(DTV_TRANSMISSION_MODE, 1, 0),
- _DTV_CMD(DTV_INTERLEAVING, 1, 0),
-
- _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 1, 0),
- _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 1, 0),
- _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 1, 0),
- _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 1, 0),
- _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
-
- _DTV_CMD(DTV_STREAM_ID, 1, 0),
- _DTV_CMD(DTV_DVBT2_PLP_ID_LEGACY, 1, 0),
- _DTV_CMD(DTV_SCRAMBLING_SEQUENCE_INDEX, 1, 0),
- _DTV_CMD(DTV_LNA, 1, 0),
+ _DTV_CMD(DTV_FREQUENCY),
+ _DTV_CMD(DTV_BANDWIDTH_HZ),
+ _DTV_CMD(DTV_MODULATION),
+ _DTV_CMD(DTV_INVERSION),
+ _DTV_CMD(DTV_DISEQC_MASTER),
+ _DTV_CMD(DTV_SYMBOL_RATE),
+ _DTV_CMD(DTV_INNER_FEC),
+ _DTV_CMD(DTV_VOLTAGE),
+ _DTV_CMD(DTV_TONE),
+ _DTV_CMD(DTV_PILOT),
+ _DTV_CMD(DTV_ROLLOFF),
+ _DTV_CMD(DTV_DELIVERY_SYSTEM),
+ _DTV_CMD(DTV_HIERARCHY),
+ _DTV_CMD(DTV_CODE_RATE_HP),
+ _DTV_CMD(DTV_CODE_RATE_LP),
+ _DTV_CMD(DTV_GUARD_INTERVAL),
+ _DTV_CMD(DTV_TRANSMISSION_MODE),
+ _DTV_CMD(DTV_INTERLEAVING),
+
+ _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION),
+ _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING),
+ _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID),
+ _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX),
+ _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT),
+ _DTV_CMD(DTV_ISDBT_LAYER_ENABLED),
+ _DTV_CMD(DTV_ISDBT_LAYERA_FEC),
+ _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION),
+ _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT),
+ _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING),
+ _DTV_CMD(DTV_ISDBT_LAYERB_FEC),
+ _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION),
+ _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT),
+ _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING),
+ _DTV_CMD(DTV_ISDBT_LAYERC_FEC),
+ _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION),
+ _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT),
+ _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING),
+
+ _DTV_CMD(DTV_STREAM_ID),
+ _DTV_CMD(DTV_DVBT2_PLP_ID_LEGACY),
+ _DTV_CMD(DTV_SCRAMBLING_SEQUENCE_INDEX),
+ _DTV_CMD(DTV_LNA),
/* Get */
- _DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1),
- _DTV_CMD(DTV_API_VERSION, 0, 0),
-
- _DTV_CMD(DTV_ENUM_DELSYS, 0, 0),
-
- _DTV_CMD(DTV_ATSCMH_PARADE_ID, 1, 0),
- _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 1, 0),
-
- _DTV_CMD(DTV_ATSCMH_FIC_VER, 0, 0),
- _DTV_CMD(DTV_ATSCMH_NOG, 0, 0),
- _DTV_CMD(DTV_ATSCMH_TNOG, 0, 0),
- _DTV_CMD(DTV_ATSCMH_SGN, 0, 0),
- _DTV_CMD(DTV_ATSCMH_PRC, 0, 0),
- _DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE, 0, 0),
- _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI, 0, 0),
- _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC, 0, 0),
- _DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE, 0, 0),
- _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_A, 0, 0),
- _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
- _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
- _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
+ _DTV_CMD(DTV_DISEQC_SLAVE_REPLY),
+ _DTV_CMD(DTV_API_VERSION),
+
+ _DTV_CMD(DTV_ENUM_DELSYS),
+
+ _DTV_CMD(DTV_ATSCMH_PARADE_ID),
+ _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE),
+
+ _DTV_CMD(DTV_ATSCMH_FIC_VER),
+ _DTV_CMD(DTV_ATSCMH_NOG),
+ _DTV_CMD(DTV_ATSCMH_TNOG),
+ _DTV_CMD(DTV_ATSCMH_SGN),
+ _DTV_CMD(DTV_ATSCMH_PRC),
+ _DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE),
+ _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI),
+ _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC),
+ _DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_A),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D),
/* Statistics API */
- _DTV_CMD(DTV_STAT_SIGNAL_STRENGTH, 0, 0),
- _DTV_CMD(DTV_STAT_CNR, 0, 0),
- _DTV_CMD(DTV_STAT_PRE_ERROR_BIT_COUNT, 0, 0),
- _DTV_CMD(DTV_STAT_PRE_TOTAL_BIT_COUNT, 0, 0),
- _DTV_CMD(DTV_STAT_POST_ERROR_BIT_COUNT, 0, 0),
- _DTV_CMD(DTV_STAT_POST_TOTAL_BIT_COUNT, 0, 0),
- _DTV_CMD(DTV_STAT_ERROR_BLOCK_COUNT, 0, 0),
- _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_SIGNAL_STRENGTH),
+ _DTV_CMD(DTV_STAT_CNR),
+ _DTV_CMD(DTV_STAT_PRE_ERROR_BIT_COUNT),
+ _DTV_CMD(DTV_STAT_PRE_TOTAL_BIT_COUNT),
+ _DTV_CMD(DTV_STAT_POST_ERROR_BIT_COUNT),
+ _DTV_CMD(DTV_STAT_POST_TOTAL_BIT_COUNT),
+ _DTV_CMD(DTV_STAT_ERROR_BLOCK_COUNT),
+ _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT),
};
+static char *dtv_cmd_name(u32 cmd)
+{
+ cmd = array_index_nospec(cmd, DTV_MAX_COMMAND);
+ return dtv_cmds[cmd];
+}
+
/* Synchronise the legacy tuning parameters into the cache, so that demodulator
* drivers can use a single set_frontend tuning function, regardless of whether
* it's being used for the legacy or new API, reducing code and complexity.
@@ -1346,6 +1337,7 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
struct file *file)
{
int ncaps;
+ unsigned int len = 1;
switch (tvp->cmd) {
case DTV_ENUM_DELSYS:
@@ -1355,6 +1347,7 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
ncaps++;
}
tvp->u.buffer.len = ncaps;
+ len = ncaps;
break;
case DTV_FREQUENCY:
tvp->u.data = c->frequency;
@@ -1532,27 +1525,51 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
/* Fill quality measures */
case DTV_STAT_SIGNAL_STRENGTH:
tvp->u.st = c->strength;
+ if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32))
+ tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32);
+ len = tvp->u.buffer.len;
break;
case DTV_STAT_CNR:
tvp->u.st = c->cnr;
+ if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32))
+ tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32);
+ len = tvp->u.buffer.len;
break;
case DTV_STAT_PRE_ERROR_BIT_COUNT:
tvp->u.st = c->pre_bit_error;
+ if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32))
+ tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32);
+ len = tvp->u.buffer.len;
break;
case DTV_STAT_PRE_TOTAL_BIT_COUNT:
tvp->u.st = c->pre_bit_count;
+ if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32))
+ tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32);
+ len = tvp->u.buffer.len;
break;
case DTV_STAT_POST_ERROR_BIT_COUNT:
tvp->u.st = c->post_bit_error;
+ if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32))
+ tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32);
+ len = tvp->u.buffer.len;
break;
case DTV_STAT_POST_TOTAL_BIT_COUNT:
tvp->u.st = c->post_bit_count;
+ if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32))
+ tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32);
+ len = tvp->u.buffer.len;
break;
case DTV_STAT_ERROR_BLOCK_COUNT:
tvp->u.st = c->block_error;
+ if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32))
+ tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32);
+ len = tvp->u.buffer.len;
break;
case DTV_STAT_TOTAL_BLOCK_COUNT:
tvp->u.st = c->block_count;
+ if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32))
+ tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32);
+ len = tvp->u.buffer.len;
break;
default:
dev_dbg(fe->dvb->device,
@@ -1561,18 +1578,13 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
return -EINVAL;
}
- if (!dtv_cmds[tvp->cmd].buffer)
- dev_dbg(fe->dvb->device,
- "%s: GET cmd 0x%08x (%s) = 0x%08x\n",
- __func__, tvp->cmd, dtv_cmds[tvp->cmd].name,
- tvp->u.data);
- else
- dev_dbg(fe->dvb->device,
- "%s: GET cmd 0x%08x (%s) len %d: %*ph\n",
- __func__,
- tvp->cmd, dtv_cmds[tvp->cmd].name,
- tvp->u.buffer.len,
- tvp->u.buffer.len, tvp->u.buffer.data);
+ if (len < 1)
+ len = 1;
+
+ dev_dbg(fe->dvb->device,
+ "%s: GET cmd 0x%08x (%s) len %d: %*ph\n",
+ __func__, tvp->cmd, dtv_cmd_name(tvp->cmd),
+ tvp->u.buffer.len, tvp->u.buffer.len, tvp->u.buffer.data);
return 0;
}
@@ -1870,7 +1882,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
else
dev_dbg(fe->dvb->device,
"%s: SET cmd 0x%08x (%s) to 0x%08x\n",
- __func__, cmd, dtv_cmds[cmd].name, data);
+ __func__, cmd, dtv_cmd_name(cmd), data);
switch (cmd) {
case DTV_CLEAR:
/*
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 89620da983ba..dddebea644bb 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -45,6 +45,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/nospec.h>
#include <linux/etherdevice.h>
#include <linux/dvb/net.h>
#include <linux/uio.h>
@@ -1462,14 +1463,20 @@ static int dvb_net_do_ioctl(struct file *file,
struct net_device *netdev;
struct dvb_net_priv *priv_data;
struct dvb_net_if *dvbnetif = parg;
+ int if_num = dvbnetif->if_num;
- if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
- !dvbnet->state[dvbnetif->if_num]) {
+ if (if_num >= DVB_NET_DEVICES_MAX) {
ret = -EINVAL;
goto ioctl_error;
}
+ if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
- netdev = dvbnet->device[dvbnetif->if_num];
+ if (!dvbnet->state[if_num]) {
+ ret = -EINVAL;
+ goto ioctl_error;
+ }
+
+ netdev = dvbnet->device[if_num];
priv_data = netdev_priv(netdev);
dvbnetif->pid=priv_data->pid;
@@ -1522,14 +1529,20 @@ static int dvb_net_do_ioctl(struct file *file,
struct net_device *netdev;
struct dvb_net_priv *priv_data;
struct __dvb_net_if_old *dvbnetif = parg;
+ int if_num = dvbnetif->if_num;
+
+ if (if_num >= DVB_NET_DEVICES_MAX) {
+ ret = -EINVAL;
+ goto ioctl_error;
+ }
+ if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
- if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
- !dvbnet->state[dvbnetif->if_num]) {
+ if (!dvbnet->state[if_num]) {
ret = -EINVAL;
goto ioctl_error;
}
- netdev = dvbnet->device[dvbnetif->if_num];
+ netdev = dvbnet->device[if_num];
priv_data = netdev_priv(netdev);
dvbnetif->pid=priv_data->pid;
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 3862ddc86ec4..795d9bfaba5c 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -506,6 +506,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
break;
if (minor == MAX_DVB_MINORS) {
+ list_del (&dvbdev->list_head);
kfree(dvbdevfops);
kfree(dvbdev);
up_write(&minor_rwsem);
@@ -526,6 +527,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
__func__);
dvb_media_device_free(dvbdev);
+ list_del (&dvbdev->list_head);
kfree(dvbdevfops);
kfree(dvbdev);
mutex_unlock(&dvbdev_register_lock);
@@ -541,6 +543,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
dvb_media_device_free(dvbdev);
+ list_del (&dvbdev->list_head);
kfree(dvbdevfops);
kfree(dvbdev);
return PTR_ERR(clsdev);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 3468b07b62fe..2c1ed98d43c5 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -323,18 +323,6 @@ config DVB_TDA10071
comment "DVB-T (terrestrial) frontends"
depends on DVB_CORE
-config DVB_SP8870
- tristate "Spase sp8870 based"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- A DVB-T tuner module. Say Y when you want to support this frontend.
-
- This driver needs external firmware. Please use the command
- "<kerneldir>/scripts/get_dvb_firmware sp8870" to
- download/extract it, and then copy it to /usr/lib/hotplug/firmware
- or /lib/firmware (depending on configuration of firmware hotplug).
-
config DVB_SP887X
tristate "Spase sp887x based"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index b9f47d68e14e..d32e4c0be576 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_DVB_PLL) += dvb-pll.o
obj-$(CONFIG_DVB_STV0299) += stv0299.o
obj-$(CONFIG_DVB_STB0899) += stb0899.o
obj-$(CONFIG_DVB_STB6100) += stb6100.o
-obj-$(CONFIG_DVB_SP8870) += sp8870.o
obj-$(CONFIG_DVB_CX22700) += cx22700.o
obj-$(CONFIG_DVB_S5H1432) += s5h1432.o
obj-$(CONFIG_DVB_CX24110) += cx24110.o
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.h b/drivers/media/dvb-frontends/drx39xyj/drxj.h
index d62412f71c88..232b3b0d68c8 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.h
@@ -75,9 +75,9 @@ TYPEDEFS
u16 result_len;
/*< result length in byte */
u16 *parameter;
- /*< General purpous param */
+ /*< General purpose param */
u16 *result;
- /*< General purpous param */};
+ /*< General purpose param */};
/*============================================================================*/
/*============================================================================*/
@@ -131,7 +131,7 @@ TYPEDEFS
DRXJ_CFG_MAX /* dummy, never to be used */};
/*
-* /struct enum drxj_cfg_smart_ant_io * smart antenna i/o.
+* /enum drxj_cfg_smart_ant_io * smart antenna i/o.
*/
enum drxj_cfg_smart_ant_io {
DRXJ_SMT_ANT_OUTPUT = 0,
@@ -139,7 +139,7 @@ enum drxj_cfg_smart_ant_io {
};
/*
-* /struct struct drxj_cfg_smart_ant * Set smart antenna.
+* /struct drxj_cfg_smart_ant * Set smart antenna.
*/
struct drxj_cfg_smart_ant {
enum drxj_cfg_smart_ant_io io;
@@ -159,7 +159,7 @@ struct drxj_agc_status {
/* DRXJ_CFG_AGC_RF, DRXJ_CFG_AGC_IF */
/*
-* /struct enum drxj_agc_ctrl_mode * Available AGCs modes in the DRXJ.
+* /enum drxj_agc_ctrl_mode * Available AGCs modes in the DRXJ.
*/
enum drxj_agc_ctrl_mode {
DRX_AGC_CTRL_AUTO = 0,
@@ -167,7 +167,7 @@ struct drxj_agc_status {
DRX_AGC_CTRL_OFF};
/*
-* /struct struct drxj_cfg_agc * Generic interface for all AGCs present on the DRXJ.
+* /struct drxj_cfg_agc * Generic interface for all AGCs present on the DRXJ.
*/
struct drxj_cfg_agc {
enum drx_standard standard; /* standard for which these settings apply */
@@ -183,7 +183,7 @@ struct drxj_agc_status {
/* DRXJ_CFG_PRE_SAW */
/*
-* /struct struct drxj_cfg_pre_saw * Interface to configure pre SAW sense.
+* /struct drxj_cfg_pre_saw * Interface to configure pre SAW sense.
*/
struct drxj_cfg_pre_saw {
enum drx_standard standard; /* standard to which these settings apply */
@@ -193,7 +193,7 @@ struct drxj_agc_status {
/* DRXJ_CFG_AFE_GAIN */
/*
-* /struct struct drxj_cfg_afe_gain * Interface to configure gain of AFE (LNA + PGA).
+* /struct drxj_cfg_afe_gain * Interface to configure gain of AFE (LNA + PGA).
*/
struct drxj_cfg_afe_gain {
enum drx_standard standard; /* standard to which these settings apply */
@@ -220,14 +220,14 @@ struct drxj_agc_status {
};
/*
-* /struct struct drxj_cfg_vsb_misc * symbol error rate
+* /struct drxj_cfg_vsb_misc * symbol error rate
*/
struct drxj_cfg_vsb_misc {
u32 symb_error;
/*< symbol error rate sps */};
/*
-* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
+* /enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
*
*/
enum drxj_mpeg_start_width {
@@ -235,7 +235,7 @@ struct drxj_agc_status {
DRXJ_MPEG_START_WIDTH_8CLKCYC};
/*
-* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
+* /enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
*
*/
enum drxj_mpeg_output_clock_rate {
@@ -261,7 +261,7 @@ struct drxj_agc_status {
enum drxj_mpeg_start_width mpeg_start_width; /*< set MPEG output start width */};
/*
-* /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
+* /enum drxj_xtal_freq * Supported external crystal reference frequency.
*/
enum drxj_xtal_freq {
DRXJ_XTAL_FREQ_RSVD,
@@ -270,14 +270,15 @@ struct drxj_agc_status {
DRXJ_XTAL_FREQ_4MHZ};
/*
-* /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
+* /enum drxj_xtal_freq * Supported external crystal reference frequency.
*/
enum drxji2c_speed {
DRXJ_I2C_SPEED_400KBPS,
DRXJ_I2C_SPEED_100KBPS};
/*
-* /struct struct drxj_cfg_hw_cfg * Get hw configuration, such as crystal reference frequency, I2C speed, etc...
+* /struct drxj_cfg_hw_cfg * Get hw configuration, such as crystal
+* reference frequency, I2C speed, etc...
*/
struct drxj_cfg_hw_cfg {
enum drxj_xtal_freq xtal_freq;
@@ -364,7 +365,7 @@ struct drxj_cfg_oob_misc {
DRXJ_SIF_ATTENUATION_9DB};
/*
-* /struct struct drxj_cfg_atv_output * SIF attenuation setting.
+* /struct drxj_cfg_atv_output * SIF attenuation setting.
*
*/
struct drxj_cfg_atv_output {
@@ -453,10 +454,10 @@ struct drxj_cfg_atv_output {
enum drxuio_mode uio_gpio_mode; /*< current mode of ASEL pin */
enum drxuio_mode uio_irqn_mode; /*< current mode of IRQN pin */
- /* IQM fs frequecy shift and inversion */
+ /* IQM fs frequency shift and inversion */
u32 iqm_fs_rate_ofs; /*< frequency shifter setting after setchannel */
bool pos_image; /*< True: positive image */
- /* IQM RC frequecy shift */
+ /* IQM RC frequency shift */
u32 iqm_rc_rate_ofs; /*< frequency shifter setting after setchannel */
/* ATV configuration */
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index 83030643aba7..a246db683cdf 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -224,7 +224,9 @@ static int mxl692_validate_fw_header(struct mxl692_dev *dev,
u32 ix, temp;
__be32 *local_buf = NULL;
u8 temp_cksum = 0;
- const u8 fw_hdr[] = { 0x4D, 0x31, 0x10, 0x02, 0x40, 0x00, 0x00, 0x80 };
+ static const u8 fw_hdr[] = {
+ 0x4D, 0x31, 0x10, 0x02, 0x40, 0x00, 0x00, 0x80
+ };
if (memcmp(buffer, fw_hdr, 8) != 0) {
status = -EINVAL;
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index ef6feb299d46..1a2f0d2adadf 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1130,8 +1130,6 @@ static int rtl2832_sdr_g_fmt_sdr_cap(struct file *file, void *priv,
f->fmt.sdr.pixelformat = dev->pixelformat;
f->fmt.sdr.buffersize = dev->buffersize;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
-
return 0;
}
@@ -1149,7 +1147,6 @@ static int rtl2832_sdr_s_fmt_sdr_cap(struct file *file, void *priv,
if (vb2_is_busy(q))
return -EBUSY;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < dev->num_formats; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
dev->pixelformat = formats[i].pixelformat;
@@ -1177,7 +1174,6 @@ static int rtl2832_sdr_try_fmt_sdr_cap(struct file *file, void *priv,
dev_dbg(&pdev->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < dev->num_formats; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
f->fmt.sdr.buffersize = formats[i].buffersize;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 462c0e059754..588f8eb95984 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -217,6 +217,7 @@ config VIDEO_ADV7180
depends on GPIOLIB && VIDEO_V4L2 && I2C
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
help
Support for the Analog Devices ADV7180 video decoder.
@@ -534,6 +535,7 @@ config VIDEO_ADV7175
config VIDEO_ADV7343
tristate "ADV7343 video encoder"
depends on I2C
+ select V4L2_ASYNC
help
Support for Analog Devices I2C bus based ADV7343 encoder.
@@ -652,6 +654,7 @@ config SDR_MAX2175
tristate "Maxim 2175 RF to Bits tuner"
depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
select REGMAP_I2C
+ select V4L2_ASYNC
help
Support for Maxim 2175 tuner. It is an advanced analog/digital
radio receiver with RF-to-Bits front-end designed for SDR solutions.
@@ -668,6 +671,7 @@ menu "Miscellaneous helper chips"
config VIDEO_THS7303
tristate "THS7303/53 Video Amplifier"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for TI THS7303/53 video amplifier
@@ -738,6 +742,17 @@ config VIDEO_HI556
To compile this driver as a module, choose M here: the
module will be called hi556.
+config VIDEO_IMX208
+ tristate "Sony IMX208 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX208 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx208.
+
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB && I2C && VIDEO_V4L2
@@ -1341,6 +1356,7 @@ config VIDEO_AD5820
tristate "AD5820 lens voice coil support"
depends on GPIOLIB && I2C && VIDEO_V4L2
select MEDIA_CONTROLLER
+ select V4L2_ASYNC
help
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
@@ -1350,6 +1366,7 @@ config VIDEO_AK7375
depends on I2C && VIDEO_V4L2
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
help
This is a driver for the AK7375 camera lens voice coil.
AK7375 is a 12 bit DAC with 120mA output current sink
@@ -1361,6 +1378,7 @@ config VIDEO_DW9714
depends on I2C && VIDEO_V4L2
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
help
This is a driver for the DW9714 camera lens voice coil.
DW9714 is a 10 bit DAC with 120mA output current sink
@@ -1384,6 +1402,7 @@ config VIDEO_DW9807_VCM
depends on I2C && VIDEO_V4L2
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
help
This is a driver for the DW9807 camera lens voice coil.
DW9807 is a 10 bit DAC with 100mA output current sink
@@ -1399,6 +1418,7 @@ config VIDEO_ADP1653
tristate "ADP1653 flash support"
depends on I2C && VIDEO_V4L2
select MEDIA_CONTROLLER
+ select V4L2_ASYNC
help
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
@@ -1408,6 +1428,7 @@ config VIDEO_LM3560
depends on I2C && VIDEO_V4L2
select MEDIA_CONTROLLER
select REGMAP_I2C
+ select V4L2_ASYNC
help
This is a driver for the lm3560 dual flash controllers. It controls
flash, torch LEDs.
@@ -1417,6 +1438,7 @@ config VIDEO_LM3646
depends on I2C && VIDEO_V4L2
select MEDIA_CONTROLLER
select REGMAP_I2C
+ select V4L2_ASYNC
help
This is a driver for the lm3646 dual flash controllers. It controls
flash, torch LEDs.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 0c067beca066..1168fa6b84ed 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -116,6 +116,7 @@ obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
obj-$(CONFIG_VIDEO_HI556) += hi556.o
+obj-$(CONFIG_VIDEO_IMX208) += imx208.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX219) += imx219.o
obj-$(CONFIG_VIDEO_IMX258) += imx258.o
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index e4e8fda51ad8..714e31f993e1 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -250,7 +250,7 @@ static int adv7170_s_routing(struct v4l2_subdev *sd,
}
static int adv7170_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(adv7170_codes))
@@ -261,7 +261,7 @@ static int adv7170_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv7170_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -284,7 +284,7 @@ static int adv7170_get_fmt(struct v4l2_subdev *sd,
}
static int adv7170_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index 0cdd8e033197..1813f67f0fe1 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -288,7 +288,7 @@ static int adv7175_s_routing(struct v4l2_subdev *sd,
}
static int adv7175_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(adv7175_codes))
@@ -299,7 +299,7 @@ static int adv7175_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv7175_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -322,7 +322,7 @@ static int adv7175_get_fmt(struct v4l2_subdev *sd,
}
static int adv7175_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 44bb6fe85644..fa5bc55bc944 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -633,7 +633,7 @@ static void adv7180_exit_controls(struct adv7180_state *state)
}
static int adv7180_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -699,13 +699,13 @@ static int adv7180_set_field_mode(struct adv7180_state *state)
}
static int adv7180_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7180_state *state = to_state(sd);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
} else {
adv7180_mbus_fmt(sd, &format->format);
format->format.field = state->field;
@@ -715,7 +715,7 @@ static int adv7180_get_pad_format(struct v4l2_subdev *sd,
}
static int adv7180_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7180_state *state = to_state(sd);
@@ -742,7 +742,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
adv7180_set_power(state, true);
}
} else {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
*framefmt = format->format;
}
@@ -750,14 +750,14 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
}
static int adv7180_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
- .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
};
- return adv7180_set_pad_format(sd, cfg, &fmt);
+ return adv7180_set_pad_format(sd, sd_state, &fmt);
}
static int adv7180_get_mbus_config(struct v4l2_subdev *sd,
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 8bcd632c081a..92cafdea3f1f 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -409,7 +409,7 @@ static int adv7183_g_input_status(struct v4l2_subdev *sd, u32 *status)
}
static int adv7183_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -420,7 +420,7 @@ static int adv7183_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv7183_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7183 *decoder = to_adv7183(sd);
@@ -443,12 +443,12 @@ static int adv7183_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
decoder->fmt = *fmt;
else
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
static int adv7183_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7183 *decoder = to_adv7183(sd);
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
index 4052cf67bf16..02eabe10ab97 100644
--- a/drivers/media/i2c/adv748x/adv748x-afe.c
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -331,7 +331,7 @@ static int adv748x_afe_propagate_pixelrate(struct adv748x_afe *afe)
}
static int adv748x_afe_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -343,7 +343,7 @@ static int adv748x_afe_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv748x_afe_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
@@ -354,7 +354,8 @@ static int adv748x_afe_get_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) {
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state,
+ sdformat->pad);
sdformat->format = *mbusformat;
} else {
adv748x_afe_fill_format(afe, &sdformat->format);
@@ -365,7 +366,7 @@ static int adv748x_afe_get_format(struct v4l2_subdev *sd,
}
static int adv748x_afe_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct v4l2_mbus_framefmt *mbusformat;
@@ -375,9 +376,9 @@ static int adv748x_afe_set_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return adv748x_afe_get_format(sd, cfg, sdformat);
+ return adv748x_afe_get_format(sd, sd_state, sdformat);
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state, sdformat->pad);
*mbusformat = sdformat->format;
return 0;
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index fa9278a08fde..589e9644fcdc 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -141,26 +141,26 @@ static const struct v4l2_subdev_video_ops adv748x_csi2_video_ops = {
static struct v4l2_mbus_framefmt *
adv748x_csi2_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(sd, cfg, pad);
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
return &tx->format;
}
static int adv748x_csi2_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
struct adv748x_state *state = tx->state;
struct v4l2_mbus_framefmt *mbusformat;
- mbusformat = adv748x_csi2_get_pad_format(sd, cfg, sdformat->pad,
+ mbusformat = adv748x_csi2_get_pad_format(sd, sd_state, sdformat->pad,
sdformat->which);
if (!mbusformat)
return -EINVAL;
@@ -175,7 +175,7 @@ static int adv748x_csi2_get_format(struct v4l2_subdev *sd,
}
static int adv748x_csi2_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
@@ -183,7 +183,7 @@ static int adv748x_csi2_set_format(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mbusformat;
int ret = 0;
- mbusformat = adv748x_csi2_get_pad_format(sd, cfg, sdformat->pad,
+ mbusformat = adv748x_csi2_get_pad_format(sd, sd_state, sdformat->pad,
sdformat->which);
if (!mbusformat)
return -EINVAL;
@@ -193,7 +193,7 @@ static int adv748x_csi2_set_format(struct v4l2_subdev *sd,
if (sdformat->pad == ADV748X_CSI2_SOURCE) {
const struct v4l2_mbus_framefmt *sink_fmt;
- sink_fmt = adv748x_csi2_get_pad_format(sd, cfg,
+ sink_fmt = adv748x_csi2_get_pad_format(sd, sd_state,
ADV748X_CSI2_SINK,
sdformat->which);
diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
index c557f8fdf11a..52fa7bd75660 100644
--- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
+++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
@@ -409,7 +409,7 @@ static int adv748x_hdmi_propagate_pixelrate(struct adv748x_hdmi *hdmi)
}
static int adv748x_hdmi_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -421,7 +421,7 @@ static int adv748x_hdmi_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv748x_hdmi_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
@@ -431,7 +431,8 @@ static int adv748x_hdmi_get_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) {
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state,
+ sdformat->pad);
sdformat->format = *mbusformat;
} else {
adv748x_hdmi_fill_format(hdmi, &sdformat->format);
@@ -442,7 +443,7 @@ static int adv748x_hdmi_get_format(struct v4l2_subdev *sd,
}
static int adv748x_hdmi_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct v4l2_mbus_framefmt *mbusformat;
@@ -451,9 +452,9 @@ static int adv748x_hdmi_set_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return adv748x_hdmi_get_format(sd, cfg, sdformat);
+ return adv748x_hdmi_get_format(sd, sd_state, sdformat);
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state, sdformat->pad);
*mbusformat = sdformat->format;
return 0;
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 5fc6c06edda1..41f4e749a859 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1216,7 +1216,7 @@ static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
}
static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad != 0)
@@ -1247,7 +1247,7 @@ static void adv7511_fill_format(struct adv7511_state *state,
}
static int adv7511_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7511_state *state = get_adv7511_state(sd);
@@ -1261,7 +1261,7 @@ static int adv7511_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format.code = fmt->code;
format->format.colorspace = fmt->colorspace;
format->format.ycbcr_enc = fmt->ycbcr_enc;
@@ -1279,7 +1279,7 @@ static int adv7511_get_fmt(struct v4l2_subdev *sd,
}
static int adv7511_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7511_state *state = get_adv7511_state(sd);
@@ -1316,7 +1316,7 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
fmt->code = format->format.code;
fmt->colorspace = format->format.colorspace;
fmt->ycbcr_enc = format->format.ycbcr_enc;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 3049aa2fd0f0..122e1fdccd96 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1833,7 +1833,7 @@ static int adv76xx_s_routing(struct v4l2_subdev *sd,
}
static int adv76xx_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct adv76xx_state *state = to_state(sd);
@@ -1913,7 +1913,7 @@ static void adv76xx_setup_format(struct adv76xx_state *state)
}
static int adv76xx_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv76xx_state *state = to_state(sd);
@@ -1926,7 +1926,7 @@ static int adv76xx_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format.code = fmt->code;
} else {
format->format.code = state->format->code;
@@ -1936,7 +1936,7 @@ static int adv76xx_get_format(struct v4l2_subdev *sd,
}
static int adv76xx_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct adv76xx_state *state = to_state(sd);
@@ -1956,7 +1956,7 @@ static int adv76xx_get_selection(struct v4l2_subdev *sd,
}
static int adv76xx_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv76xx_state *state = to_state(sd);
@@ -1975,7 +1975,7 @@ static int adv76xx_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
fmt->code = format->format.code;
} else {
state->format = info;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index ff10af757b99..7f8acbdf0db4 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -98,12 +98,12 @@ struct adv7842_state {
v4l2_std_id norm;
struct {
- u8 edid[256];
+ u8 edid[512];
u32 blocks;
u32 present;
} hdmi_edid;
struct {
- u8 edid[256];
+ u8 edid[128];
u32 blocks;
u32 present;
} vga_edid;
@@ -720,6 +720,9 @@ static int edid_write_vga_segment(struct v4l2_subdev *sd)
v4l2_dbg(2, debug, sd, "%s: write EDID on VGA port\n", __func__);
+ if (!state->vga_edid.present)
+ return 0;
+
/* HPA disable on port A and B */
io_write_and_or(sd, 0x20, 0xcf, 0x00);
@@ -763,7 +766,7 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
struct adv7842_state *state = to_state(sd);
const u8 *edid = state->hdmi_edid.edid;
u32 blocks = state->hdmi_edid.blocks;
- int spa_loc;
+ unsigned int spa_loc;
u16 pa, parent_pa;
int err = 0;
int i;
@@ -796,12 +799,14 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
pa = (edid[spa_loc] << 8) | edid[spa_loc + 1];
}
- /* edid segment pointer '0' for HDMI ports */
- rep_write_and_or(sd, 0x77, 0xef, 0x00);
- for (i = 0; !err && i < blocks * 128; i += I2C_SMBUS_BLOCK_MAX)
+ for (i = 0; !err && i < blocks * 128; i += I2C_SMBUS_BLOCK_MAX) {
+ /* set edid segment pointer for HDMI ports */
+ if (i % 256 == 0)
+ rep_write_and_or(sd, 0x77, 0xef, i >= 256 ? 0x10 : 0x00);
err = i2c_smbus_write_i2c_block_data(state->i2c_edid, i,
I2C_SMBUS_BLOCK_MAX, edid + i);
+ }
if (err)
return err;
@@ -1988,7 +1993,7 @@ static int adv7842_s_routing(struct v4l2_subdev *sd,
}
static int adv7842_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(adv7842_formats))
@@ -2064,7 +2069,7 @@ static void adv7842_setup_format(struct adv7842_state *state)
}
static int adv7842_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7842_state *state = to_state(sd);
@@ -2092,7 +2097,7 @@ static int adv7842_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format.code = fmt->code;
} else {
format->format.code = state->format->code;
@@ -2102,7 +2107,7 @@ static int adv7842_get_format(struct v4l2_subdev *sd,
}
static int adv7842_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7842_state *state = to_state(sd);
@@ -2112,7 +2117,7 @@ static int adv7842_set_format(struct v4l2_subdev *sd,
return -EINVAL;
if (state->mode == ADV7842_MODE_SDP)
- return adv7842_get_format(sd, cfg, format);
+ return adv7842_get_format(sd, sd_state, format);
info = adv7842_format_info(state, format->format.code);
if (info == NULL)
@@ -2124,7 +2129,7 @@ static int adv7842_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
fmt->code = format->format.code;
} else {
state->format = info;
@@ -2491,9 +2496,17 @@ static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
return 0;
}
+/*
+ * If the VGA_EDID_ENABLE bit is set (Repeater Map 0x7f, bit 7), then
+ * the first two blocks of the EDID are for the HDMI, and the first block
+ * of segment 1 (i.e. the third block of the EDID) is for VGA.
+ * So if a VGA EDID is installed, then the maximum size of the HDMI EDID
+ * is 2 blocks.
+ */
static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
{
struct adv7842_state *state = to_state(sd);
+ unsigned int max_blocks = e->pad == ADV7842_EDID_PORT_VGA ? 1 : 4;
int err = 0;
memset(e->reserved, 0, sizeof(e->reserved));
@@ -2502,8 +2515,12 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
return -EINVAL;
if (e->start_block != 0)
return -EINVAL;
- if (e->blocks > 2) {
- e->blocks = 2;
+ if (e->pad < ADV7842_EDID_PORT_VGA && state->vga_edid.blocks)
+ max_blocks = 2;
+ if (e->pad == ADV7842_EDID_PORT_VGA && state->hdmi_edid.blocks > 2)
+ return -EBUSY;
+ if (e->blocks > max_blocks) {
+ e->blocks = max_blocks;
return -E2BIG;
}
@@ -2514,20 +2531,20 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
switch (e->pad) {
case ADV7842_EDID_PORT_VGA:
- memset(&state->vga_edid.edid, 0, 256);
+ memset(state->vga_edid.edid, 0, sizeof(state->vga_edid.edid));
state->vga_edid.blocks = e->blocks;
state->vga_edid.present = e->blocks ? 0x1 : 0x0;
if (e->blocks)
- memcpy(&state->vga_edid.edid, e->edid, 128 * e->blocks);
+ memcpy(state->vga_edid.edid, e->edid, 128);
err = edid_write_vga_segment(sd);
break;
case ADV7842_EDID_PORT_A:
case ADV7842_EDID_PORT_B:
- memset(&state->hdmi_edid.edid, 0, 256);
+ memset(state->hdmi_edid.edid, 0, sizeof(state->hdmi_edid.edid));
state->hdmi_edid.blocks = e->blocks;
if (e->blocks) {
state->hdmi_edid.present |= 0x04 << e->pad;
- memcpy(&state->hdmi_edid.edid, e->edid, 128 * e->blocks);
+ memcpy(state->hdmi_edid.edid, e->edid, 128 * e->blocks);
} else {
state->hdmi_edid.present &= ~(0x04 << e->pad);
adv7842_s_detect_tx_5v_ctrl(sd);
diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c
index e1f94ee0f48f..40b1a4aa846c 100644
--- a/drivers/media/i2c/ak7375.c
+++ b/drivers/media/i2c/ak7375.c
@@ -87,15 +87,7 @@ static const struct v4l2_ctrl_ops ak7375_vcm_ctrl_ops = {
static int ak7375_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- int ret;
-
- ret = pm_runtime_get_sync(sd->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(sd->dev);
- return ret;
- }
-
- return 0;
+ return pm_runtime_resume_and_get(sd->dev);
}
static int ak7375_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index 1adaf470c75a..dc569d5a4d9d 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -91,7 +91,7 @@ static int ak881x_s_register(struct v4l2_subdev *sd,
#endif
static int ak881x_fill_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -111,7 +111,7 @@ static int ak881x_fill_fmt(struct v4l2_subdev *sd,
}
static int ak881x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index)
@@ -122,7 +122,7 @@ static int ak881x_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ak881x_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 9dc3f45da3dc..a9403a227c6b 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -1880,21 +1880,33 @@ static int ccs_pm_get_init(struct ccs_sensor *sensor)
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
+ /*
+ * It can't use pm_runtime_resume_and_get() here, as the driver
+ * relies at the returned value to detect if the device was already
+ * active or not.
+ */
rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- pm_runtime_put_noidle(&client->dev);
+ if (rval < 0)
+ goto error;
- return rval;
- } else if (!rval) {
- rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->
- ctrl_handler);
- if (rval)
- return rval;
+ /* Device was already active, so don't set controls */
+ if (rval == 1)
+ return 0;
- return v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
- }
+ /* Restore V4L2 controls to the previously suspended device */
+ rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
+ if (rval)
+ goto error;
+
+ rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+ if (rval)
+ goto error;
+ /* Keep PM runtime usage_count incremented on success */
return 0;
+error:
+ pm_runtime_put(&client->dev);
+ return rval;
}
static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
@@ -1932,7 +1944,7 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
}
static int ccs_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
@@ -1985,13 +1997,13 @@ static u32 __ccs_get_mbus_code(struct v4l2_subdev *subdev, unsigned int pad)
}
static int __ccs_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(subdev, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(subdev, sd_state,
fmt->pad);
} else {
struct v4l2_rect *r;
@@ -2011,21 +2023,21 @@ static int __ccs_get_format(struct v4l2_subdev *subdev,
}
static int ccs_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
int rval;
mutex_lock(&sensor->mutex);
- rval = __ccs_get_format(subdev, cfg, fmt);
+ rval = __ccs_get_format(subdev, sd_state, fmt);
mutex_unlock(&sensor->mutex);
return rval;
}
static void ccs_get_crop_compose(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect **crops,
struct v4l2_rect **comps, int which)
{
@@ -2042,24 +2054,25 @@ static void ccs_get_crop_compose(struct v4l2_subdev *subdev,
if (crops) {
for (i = 0; i < subdev->entity.num_pads; i++)
crops[i] = v4l2_subdev_get_try_crop(subdev,
- cfg, i);
+ sd_state,
+ i);
}
if (comps)
- *comps = v4l2_subdev_get_try_compose(subdev, cfg,
+ *comps = v4l2_subdev_get_try_compose(subdev, sd_state,
CCS_PAD_SINK);
}
}
/* Changes require propagation only on sink pad. */
static void ccs_propagate(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg, int which,
+ struct v4l2_subdev_state *sd_state, int which,
int target)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
struct v4l2_rect *comp, *crops[CCS_PADS];
- ccs_get_crop_compose(subdev, cfg, crops, &comp, which);
+ ccs_get_crop_compose(subdev, sd_state, crops, &comp, which);
switch (target) {
case V4L2_SEL_TGT_CROP:
@@ -2099,7 +2112,7 @@ static const struct ccs_csi_data_format
}
static int ccs_set_format_source(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
@@ -2110,7 +2123,7 @@ static int ccs_set_format_source(struct v4l2_subdev *subdev,
unsigned int i;
int rval;
- rval = __ccs_get_format(subdev, cfg, fmt);
+ rval = __ccs_get_format(subdev, sd_state, fmt);
if (rval)
return rval;
@@ -2152,7 +2165,7 @@ static int ccs_set_format_source(struct v4l2_subdev *subdev,
}
static int ccs_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
@@ -2164,7 +2177,7 @@ static int ccs_set_format(struct v4l2_subdev *subdev,
if (fmt->pad == ssd->source_pad) {
int rval;
- rval = ccs_set_format_source(subdev, cfg, fmt);
+ rval = ccs_set_format_source(subdev, sd_state, fmt);
mutex_unlock(&sensor->mutex);
@@ -2186,7 +2199,7 @@ static int ccs_set_format(struct v4l2_subdev *subdev,
CCS_LIM(sensor, MIN_Y_OUTPUT_SIZE),
CCS_LIM(sensor, MAX_Y_OUTPUT_SIZE));
- ccs_get_crop_compose(subdev, cfg, crops, NULL, fmt->which);
+ ccs_get_crop_compose(subdev, sd_state, crops, NULL, fmt->which);
crops[ssd->sink_pad]->left = 0;
crops[ssd->sink_pad]->top = 0;
@@ -2194,7 +2207,7 @@ static int ccs_set_format(struct v4l2_subdev *subdev,
crops[ssd->sink_pad]->height = fmt->format.height;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
ssd->sink_fmt = *crops[ssd->sink_pad];
- ccs_propagate(subdev, cfg, fmt->which, V4L2_SEL_TGT_CROP);
+ ccs_propagate(subdev, sd_state, fmt->which, V4L2_SEL_TGT_CROP);
mutex_unlock(&sensor->mutex);
@@ -2246,7 +2259,7 @@ static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
}
static void ccs_set_compose_binner(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel,
struct v4l2_rect **crops,
struct v4l2_rect *comp)
@@ -2294,7 +2307,7 @@ static void ccs_set_compose_binner(struct v4l2_subdev *subdev,
* result.
*/
static void ccs_set_compose_scaler(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel,
struct v4l2_rect **crops,
struct v4l2_rect *comp)
@@ -2409,25 +2422,25 @@ static void ccs_set_compose_scaler(struct v4l2_subdev *subdev,
}
/* We're only called on source pads. This function sets scaling. */
static int ccs_set_compose(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
struct ccs_subdev *ssd = to_ccs_subdev(subdev);
struct v4l2_rect *comp, *crops[CCS_PADS];
- ccs_get_crop_compose(subdev, cfg, crops, &comp, sel->which);
+ ccs_get_crop_compose(subdev, sd_state, crops, &comp, sel->which);
sel->r.top = 0;
sel->r.left = 0;
if (ssd == sensor->binner)
- ccs_set_compose_binner(subdev, cfg, sel, crops, comp);
+ ccs_set_compose_binner(subdev, sd_state, sel, crops, comp);
else
- ccs_set_compose_scaler(subdev, cfg, sel, crops, comp);
+ ccs_set_compose_scaler(subdev, sd_state, sel, crops, comp);
*comp = sel->r;
- ccs_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
+ ccs_propagate(subdev, sd_state, sel->which, V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return ccs_pll_blanking_update(sensor);
@@ -2474,7 +2487,7 @@ static int __ccs_sel_supported(struct v4l2_subdev *subdev,
}
static int ccs_set_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
@@ -2482,7 +2495,7 @@ static int ccs_set_crop(struct v4l2_subdev *subdev,
struct v4l2_rect *src_size, *crops[CCS_PADS];
struct v4l2_rect _r;
- ccs_get_crop_compose(subdev, cfg, crops, NULL, sel->which);
+ ccs_get_crop_compose(subdev, sd_state, crops, NULL, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
if (sel->pad == ssd->sink_pad)
@@ -2493,16 +2506,18 @@ static int ccs_set_crop(struct v4l2_subdev *subdev,
if (sel->pad == ssd->sink_pad) {
_r.left = 0;
_r.top = 0;
- _r.width = v4l2_subdev_get_try_format(subdev, cfg,
+ _r.width = v4l2_subdev_get_try_format(subdev,
+ sd_state,
sel->pad)
->width;
- _r.height = v4l2_subdev_get_try_format(subdev, cfg,
+ _r.height = v4l2_subdev_get_try_format(subdev,
+ sd_state,
sel->pad)
->height;
src_size = &_r;
} else {
src_size = v4l2_subdev_get_try_compose(
- subdev, cfg, ssd->sink_pad);
+ subdev, sd_state, ssd->sink_pad);
}
}
@@ -2520,7 +2535,7 @@ static int ccs_set_crop(struct v4l2_subdev *subdev,
*crops[sel->pad] = sel->r;
if (ssd != sensor->pixel_array && sel->pad == CCS_PAD_SINK)
- ccs_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_CROP);
+ ccs_propagate(subdev, sd_state, sel->which, V4L2_SEL_TGT_CROP);
return 0;
}
@@ -2534,7 +2549,7 @@ static void ccs_get_native_size(struct ccs_subdev *ssd, struct v4l2_rect *r)
}
static int __ccs_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
@@ -2547,13 +2562,14 @@ static int __ccs_get_selection(struct v4l2_subdev *subdev,
if (ret)
return ret;
- ccs_get_crop_compose(subdev, cfg, crops, &comp, sel->which);
+ ccs_get_crop_compose(subdev, sd_state, crops, &comp, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
sink_fmt = ssd->sink_fmt;
} else {
struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_try_format(subdev, cfg, ssd->sink_pad);
+ v4l2_subdev_get_try_format(subdev, sd_state,
+ ssd->sink_pad);
sink_fmt.left = 0;
sink_fmt.top = 0;
@@ -2584,21 +2600,21 @@ static int __ccs_get_selection(struct v4l2_subdev *subdev,
}
static int ccs_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
int rval;
mutex_lock(&sensor->mutex);
- rval = __ccs_get_selection(subdev, cfg, sel);
+ rval = __ccs_get_selection(subdev, sd_state, sel);
mutex_unlock(&sensor->mutex);
return rval;
}
static int ccs_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
@@ -2622,10 +2638,10 @@ static int ccs_set_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- ret = ccs_set_crop(subdev, cfg, sel);
+ ret = ccs_set_crop(subdev, sd_state, sel);
break;
case V4L2_SEL_TGT_COMPOSE:
- ret = ccs_set_compose(subdev, cfg, sel);
+ ret = ccs_set_compose(subdev, sd_state, sel);
break;
default:
ret = -EINVAL;
@@ -3016,9 +3032,9 @@ static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
for (i = 0; i < ssd->npads; i++) {
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, i);
+ v4l2_subdev_get_try_format(sd, fh->state, i);
struct v4l2_rect *try_crop =
- v4l2_subdev_get_try_crop(sd, fh->pad, i);
+ v4l2_subdev_get_try_crop(sd, fh->state, i);
struct v4l2_rect *try_comp;
ccs_get_native_size(ssd, try_crop);
@@ -3031,7 +3047,7 @@ static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
if (ssd != sensor->pixel_array)
continue;
- try_comp = v4l2_subdev_get_try_compose(sd, fh->pad, i);
+ try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
*try_comp = *try_crop;
}
@@ -3089,12 +3105,9 @@ static int __maybe_unused ccs_suspend(struct device *dev)
bool streaming = sensor->streaming;
int rval;
- rval = pm_runtime_get_sync(dev);
- if (rval < 0) {
- pm_runtime_put_noidle(dev);
-
- return -EAGAIN;
- }
+ rval = pm_runtime_resume_and_get(dev);
+ if (rval < 0)
+ return rval;
if (sensor->streaming)
ccs_stop_streaming(sensor);
diff --git a/drivers/media/i2c/ccs/ccs-limits.c b/drivers/media/i2c/ccs/ccs-limits.c
index f5511789ac83..4969fa425317 100644
--- a/drivers/media/i2c/ccs/ccs-limits.c
+++ b/drivers/media/i2c/ccs/ccs-limits.c
@@ -1,5 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/* Copyright (C) 2019--2020 Intel Corporation */
+/*
+ * Generated by Documentation/driver-api/media/drivers/ccs/mk-ccs-regs;
+ * do not modify.
+ */
#include "ccs-limits.h"
#include "ccs-regs.h"
diff --git a/drivers/media/i2c/ccs/ccs-limits.h b/drivers/media/i2c/ccs/ccs-limits.h
index 1efa43c23a2e..551d3ee9d04e 100644
--- a/drivers/media/i2c/ccs/ccs-limits.h
+++ b/drivers/media/i2c/ccs/ccs-limits.h
@@ -1,5 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/* Copyright (C) 2019--2020 Intel Corporation */
+/*
+ * Generated by Documentation/driver-api/media/drivers/ccs/mk-ccs-regs;
+ * do not modify.
+ */
#ifndef __CCS_LIMITS_H__
#define __CCS_LIMITS_H__
diff --git a/drivers/media/i2c/ccs/ccs-regs.h b/drivers/media/i2c/ccs/ccs-regs.h
index 4b3e5df2121f..6ce84c5ecf20 100644
--- a/drivers/media/i2c/ccs/ccs-regs.h
+++ b/drivers/media/i2c/ccs/ccs-regs.h
@@ -1,5 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/* Copyright (C) 2019--2020 Intel Corporation */
+/*
+ * Generated by Documentation/driver-api/media/drivers/ccs/mk-ccs-regs;
+ * do not modify.
+ */
#ifndef __CCS_REGS_H__
#define __CCS_REGS_H__
@@ -202,7 +206,7 @@
#define CCS_R_OP_PIX_CLK_DIV (0x0308 | CCS_FL_16BIT)
#define CCS_R_OP_SYS_CLK_DIV (0x030a | CCS_FL_16BIT)
#define CCS_R_OP_PRE_PLL_CLK_DIV (0x030c | CCS_FL_16BIT)
-#define CCS_R_OP_PLL_MULTIPLIER (0x031e | CCS_FL_16BIT)
+#define CCS_R_OP_PLL_MULTIPLIER (0x030e | CCS_FL_16BIT)
#define CCS_R_PLL_MODE 0x0310
#define CCS_PLL_MODE_SHIFT 0U
#define CCS_PLL_MODE_MASK 0x1
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index e2e935f78986..dc31944c7d5b 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -1746,7 +1746,7 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
/* ----------------------------------------------------------------------- */
static int cx25840_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 3f0b082f863f..c8b4292512dc 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -85,15 +85,7 @@ static const struct v4l2_ctrl_ops dw9714_vcm_ctrl_ops = {
static int dw9714_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- int rval;
-
- rval = pm_runtime_get_sync(sd->dev);
- if (rval < 0) {
- pm_runtime_put_noidle(sd->dev);
- return rval;
- }
-
- return 0;
+ return pm_runtime_resume_and_get(sd->dev);
}
static int dw9714_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
index 8b8cb4b077b5..c086580efac7 100644
--- a/drivers/media/i2c/dw9768.c
+++ b/drivers/media/i2c/dw9768.c
@@ -374,15 +374,7 @@ static const struct v4l2_ctrl_ops dw9768_ctrl_ops = {
static int dw9768_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- int ret;
-
- ret = pm_runtime_get_sync(sd->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(sd->dev);
- return ret;
- }
-
- return 0;
+ return pm_runtime_resume_and_get(sd->dev);
}
static int dw9768_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c
index 438a44b76da8..95e06f13bc9e 100644
--- a/drivers/media/i2c/dw9807-vcm.c
+++ b/drivers/media/i2c/dw9807-vcm.c
@@ -130,15 +130,7 @@ static const struct v4l2_ctrl_ops dw9807_vcm_ctrl_ops = {
static int dw9807_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- int rval;
-
- rval = pm_runtime_get_sync(sd->dev);
- if (rval < 0) {
- pm_runtime_put_noidle(sd->dev);
- return rval;
- }
-
- return 0;
+ return pm_runtime_resume_and_get(sd->dev);
}
static int dw9807_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index bb3eac5e005e..c7b91c0c03b5 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -882,7 +882,7 @@ out:
*/
#define MAX_FMTS 4
static int et8ek8_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct et8ek8_reglist **list =
@@ -920,7 +920,7 @@ static int et8ek8_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int et8ek8_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct et8ek8_reglist **list =
@@ -958,7 +958,7 @@ static int et8ek8_enum_frame_size(struct v4l2_subdev *subdev,
}
static int et8ek8_enum_frame_ival(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct et8ek8_reglist **list =
@@ -990,12 +990,13 @@ static int et8ek8_enum_frame_ival(struct v4l2_subdev *subdev,
static struct v4l2_mbus_framefmt *
__et8ek8_get_pad_format(struct et8ek8_sensor *sensor,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&sensor->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&sensor->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->format;
default:
@@ -1004,13 +1005,14 @@ __et8ek8_get_pad_format(struct et8ek8_sensor *sensor,
}
static int et8ek8_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
struct v4l2_mbus_framefmt *format;
- format = __et8ek8_get_pad_format(sensor, cfg, fmt->pad, fmt->which);
+ format = __et8ek8_get_pad_format(sensor, sd_state, fmt->pad,
+ fmt->which);
if (!format)
return -EINVAL;
@@ -1020,14 +1022,15 @@ static int et8ek8_get_pad_format(struct v4l2_subdev *subdev,
}
static int et8ek8_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
struct v4l2_mbus_framefmt *format;
struct et8ek8_reglist *reglist;
- format = __et8ek8_get_pad_format(sensor, cfg, fmt->pad, fmt->which);
+ format = __et8ek8_get_pad_format(sensor, sd_state, fmt->pad,
+ fmt->which);
if (!format)
return -EINVAL;
@@ -1327,7 +1330,7 @@ static int et8ek8_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct et8ek8_reglist *reglist;
reglist = et8ek8_reglist_find_type(&meta_reglist, ET8EK8_REGLIST_MODE);
- format = __et8ek8_get_pad_format(sensor, fh->pad, 0,
+ format = __et8ek8_get_pad_format(sensor, fh->state, 0,
V4L2_SUBDEV_FORMAT_TRY);
et8ek8_reglist_to_mbus(reglist, format);
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 6f05c1138e3b..8db1cbedc1fd 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -813,9 +813,8 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&hi556->mutex);
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
mutex_unlock(&hi556->mutex);
return ret;
}
@@ -876,7 +875,7 @@ error:
}
static int hi556_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct hi556 *hi556 = to_hi556(sd);
@@ -891,7 +890,7 @@ static int hi556_set_format(struct v4l2_subdev *sd,
mutex_lock(&hi556->mutex);
hi556_assign_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
hi556->cur_mode = mode;
__v4l2_ctrl_s_ctrl(hi556->link_freq, mode->link_freq_index);
@@ -918,14 +917,15 @@ static int hi556_set_format(struct v4l2_subdev *sd,
}
static int hi556_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct hi556 *hi556 = to_hi556(sd);
mutex_lock(&hi556->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&hi556->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&hi556->sd,
+ sd_state,
fmt->pad);
else
hi556_assign_pad_format(hi556->cur_mode, &fmt->format);
@@ -936,7 +936,7 @@ static int hi556_get_format(struct v4l2_subdev *sd,
}
static int hi556_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -948,7 +948,7 @@ static int hi556_enum_mbus_code(struct v4l2_subdev *sd,
}
static int hi556_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -971,7 +971,7 @@ static int hi556_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&hi556->mutex);
hi556_assign_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&hi556->mutex);
return 0;
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
new file mode 100644
index 000000000000..6f3d9c1b5879
--- /dev/null
+++ b/drivers/media/i2c/imx208.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2021 Intel Corporation
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <asm/unaligned.h>
+
+#define IMX208_REG_MODE_SELECT 0x0100
+#define IMX208_MODE_STANDBY 0x00
+#define IMX208_MODE_STREAMING 0x01
+
+/* Chip ID */
+#define IMX208_REG_CHIP_ID 0x0000
+#define IMX208_CHIP_ID 0x0208
+
+/* V_TIMING internal */
+#define IMX208_REG_VTS 0x0340
+#define IMX208_VTS_60FPS 0x0472
+#define IMX208_VTS_BINNING 0x0239
+#define IMX208_VTS_60FPS_MIN 0x0458
+#define IMX208_VTS_BINNING_MIN 0x0230
+#define IMX208_VTS_MAX 0xffff
+
+/* HBLANK control - read only */
+#define IMX208_PPL_384MHZ 2248
+#define IMX208_PPL_96MHZ 2248
+
+/* Exposure control */
+#define IMX208_REG_EXPOSURE 0x0202
+#define IMX208_EXPOSURE_MIN 4
+#define IMX208_EXPOSURE_STEP 1
+#define IMX208_EXPOSURE_DEFAULT 0x190
+#define IMX208_EXPOSURE_MAX 65535
+
+/* Analog gain control */
+#define IMX208_REG_ANALOG_GAIN 0x0204
+#define IMX208_ANA_GAIN_MIN 0
+#define IMX208_ANA_GAIN_MAX 0x00e0
+#define IMX208_ANA_GAIN_STEP 1
+#define IMX208_ANA_GAIN_DEFAULT 0x0
+
+/* Digital gain control */
+#define IMX208_REG_GR_DIGITAL_GAIN 0x020e
+#define IMX208_REG_R_DIGITAL_GAIN 0x0210
+#define IMX208_REG_B_DIGITAL_GAIN 0x0212
+#define IMX208_REG_GB_DIGITAL_GAIN 0x0214
+#define IMX208_DIGITAL_GAIN_SHIFT 8
+
+/* Orientation */
+#define IMX208_REG_ORIENTATION_CONTROL 0x0101
+
+/* Test Pattern Control */
+#define IMX208_REG_TEST_PATTERN_MODE 0x0600
+#define IMX208_TEST_PATTERN_DISABLE 0x0
+#define IMX208_TEST_PATTERN_SOLID_COLOR 0x1
+#define IMX208_TEST_PATTERN_COLOR_BARS 0x2
+#define IMX208_TEST_PATTERN_GREY_COLOR 0x3
+#define IMX208_TEST_PATTERN_PN9 0x4
+#define IMX208_TEST_PATTERN_FIX_1 0x100
+#define IMX208_TEST_PATTERN_FIX_2 0x101
+#define IMX208_TEST_PATTERN_FIX_3 0x102
+#define IMX208_TEST_PATTERN_FIX_4 0x103
+#define IMX208_TEST_PATTERN_FIX_5 0x104
+#define IMX208_TEST_PATTERN_FIX_6 0x105
+
+/* OTP Access */
+#define IMX208_OTP_BASE 0x3500
+#define IMX208_OTP_SIZE 40
+
+struct imx208_reg {
+ u16 address;
+ u8 val;
+};
+
+struct imx208_reg_list {
+ u32 num_of_regs;
+ const struct imx208_reg *regs;
+};
+
+/* Link frequency config */
+struct imx208_link_freq_config {
+ u32 pixels_per_line;
+
+ /* PLL registers for this link frequency */
+ struct imx208_reg_list reg_list;
+};
+
+/* Mode : resolution and related config&values */
+struct imx208_mode {
+ /* Frame width */
+ u32 width;
+ /* Frame height */
+ u32 height;
+
+ /* V-timing */
+ u32 vts_def;
+ u32 vts_min;
+
+ /* Index of Link frequency config to be used */
+ u32 link_freq_index;
+ /* Default register values */
+ struct imx208_reg_list reg_list;
+};
+
+static const struct imx208_reg pll_ctrl_reg[] = {
+ {0x0305, 0x02},
+ {0x0307, 0x50},
+ {0x303C, 0x3C},
+};
+
+static const struct imx208_reg mode_1936x1096_60fps_regs[] = {
+ {0x0340, 0x04},
+ {0x0341, 0x72},
+ {0x0342, 0x04},
+ {0x0343, 0x64},
+ {0x034C, 0x07},
+ {0x034D, 0x90},
+ {0x034E, 0x04},
+ {0x034F, 0x48},
+ {0x0381, 0x01},
+ {0x0383, 0x01},
+ {0x0385, 0x01},
+ {0x0387, 0x01},
+ {0x3048, 0x00},
+ {0x3050, 0x01},
+ {0x30D5, 0x00},
+ {0x3301, 0x00},
+ {0x3318, 0x62},
+ {0x0202, 0x01},
+ {0x0203, 0x90},
+ {0x0205, 0x00},
+};
+
+static const struct imx208_reg mode_968_548_60fps_regs[] = {
+ {0x0340, 0x02},
+ {0x0341, 0x39},
+ {0x0342, 0x08},
+ {0x0343, 0xC8},
+ {0x034C, 0x03},
+ {0x034D, 0xC8},
+ {0x034E, 0x02},
+ {0x034F, 0x24},
+ {0x0381, 0x01},
+ {0x0383, 0x03},
+ {0x0385, 0x01},
+ {0x0387, 0x03},
+ {0x3048, 0x01},
+ {0x3050, 0x02},
+ {0x30D5, 0x03},
+ {0x3301, 0x10},
+ {0x3318, 0x75},
+ {0x0202, 0x01},
+ {0x0203, 0x90},
+ {0x0205, 0x00},
+};
+
+static const s64 imx208_discrete_digital_gain[] = {
+ 1, 2, 4, 8, 16,
+};
+
+static const char * const imx208_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Color",
+ "100% Color Bar",
+ "Fade to Grey Color Bar",
+ "PN9",
+ "Fixed Pattern1",
+ "Fixed Pattern2",
+ "Fixed Pattern3",
+ "Fixed Pattern4",
+ "Fixed Pattern5",
+ "Fixed Pattern6"
+};
+
+static const int imx208_test_pattern_val[] = {
+ IMX208_TEST_PATTERN_DISABLE,
+ IMX208_TEST_PATTERN_SOLID_COLOR,
+ IMX208_TEST_PATTERN_COLOR_BARS,
+ IMX208_TEST_PATTERN_GREY_COLOR,
+ IMX208_TEST_PATTERN_PN9,
+ IMX208_TEST_PATTERN_FIX_1,
+ IMX208_TEST_PATTERN_FIX_2,
+ IMX208_TEST_PATTERN_FIX_3,
+ IMX208_TEST_PATTERN_FIX_4,
+ IMX208_TEST_PATTERN_FIX_5,
+ IMX208_TEST_PATTERN_FIX_6,
+};
+
+/* Configurations for supported link frequencies */
+#define IMX208_MHZ (1000 * 1000ULL)
+#define IMX208_LINK_FREQ_384MHZ (384ULL * IMX208_MHZ)
+#define IMX208_LINK_FREQ_96MHZ (96ULL * IMX208_MHZ)
+
+#define IMX208_DATA_RATE_DOUBLE 2
+#define IMX208_NUM_OF_LANES 2
+#define IMX208_PIXEL_BITS 10
+
+enum {
+ IMX208_LINK_FREQ_384MHZ_INDEX,
+ IMX208_LINK_FREQ_96MHZ_INDEX,
+};
+
+/*
+ * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
+ * data rate => double data rate; number of lanes => 2; bits per pixel => 10
+ */
+static u64 link_freq_to_pixel_rate(u64 f)
+{
+ f *= IMX208_DATA_RATE_DOUBLE * IMX208_NUM_OF_LANES;
+ do_div(f, IMX208_PIXEL_BITS);
+
+ return f;
+}
+
+/* Menu items for LINK_FREQ V4L2 control */
+static const s64 link_freq_menu_items[] = {
+ [IMX208_LINK_FREQ_384MHZ_INDEX] = IMX208_LINK_FREQ_384MHZ,
+ [IMX208_LINK_FREQ_96MHZ_INDEX] = IMX208_LINK_FREQ_96MHZ,
+};
+
+/* Link frequency configs */
+static const struct imx208_link_freq_config link_freq_configs[] = {
+ [IMX208_LINK_FREQ_384MHZ_INDEX] = {
+ .pixels_per_line = IMX208_PPL_384MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(pll_ctrl_reg),
+ .regs = pll_ctrl_reg,
+ }
+ },
+ [IMX208_LINK_FREQ_96MHZ_INDEX] = {
+ .pixels_per_line = IMX208_PPL_96MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(pll_ctrl_reg),
+ .regs = pll_ctrl_reg,
+ }
+ },
+};
+
+/* Mode configs */
+static const struct imx208_mode supported_modes[] = {
+ {
+ .width = 1936,
+ .height = 1096,
+ .vts_def = IMX208_VTS_60FPS,
+ .vts_min = IMX208_VTS_60FPS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1936x1096_60fps_regs),
+ .regs = mode_1936x1096_60fps_regs,
+ },
+ .link_freq_index = IMX208_LINK_FREQ_384MHZ_INDEX,
+ },
+ {
+ .width = 968,
+ .height = 548,
+ .vts_def = IMX208_VTS_BINNING,
+ .vts_min = IMX208_VTS_BINNING_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_968_548_60fps_regs),
+ .regs = mode_968_548_60fps_regs,
+ },
+ .link_freq_index = IMX208_LINK_FREQ_96MHZ_INDEX,
+ },
+};
+
+struct imx208 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+
+ /* Current mode */
+ const struct imx208_mode *cur_mode;
+
+ /*
+ * Mutex for serialized access:
+ * Protect sensor set pad format and start/stop streaming safely.
+ * Protect access to sensor v4l2 controls.
+ */
+ struct mutex imx208_mx;
+
+ /* Streaming on/off */
+ bool streaming;
+
+ /* OTP data */
+ bool otp_read;
+ char otp_data[IMX208_OTP_SIZE];
+};
+
+static inline struct imx208 *to_imx208(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx208, sd);
+}
+
+/* Get bayer order based on flip setting. */
+static u32 imx208_get_format_code(struct imx208 *imx208)
+{
+ /*
+ * Only one bayer order is supported.
+ * It depends on the flip settings.
+ */
+ static const u32 codes[2][2] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, },
+ };
+
+ return codes[imx208->vflip->val][imx208->hflip->val];
+}
+
+/* Read registers up to 4 at a time */
+static int imx208_read_reg(struct imx208 *imx208, u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2] = { reg >> 8, reg & 0xff };
+ u8 data_buf[4] = { 0, };
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int imx208_write_reg(struct imx208 *imx208, u16 reg, u32 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int imx208_write_regs(struct imx208 *imx208,
+ const struct imx208_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < len; i++) {
+ ret = imx208_write_reg(imx208, regs[i].address, 1,
+ regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "Failed to write reg 0x%4.4x. error = %d\n",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Open sub-device */
+static int imx208_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
+
+ /* Initialize try_fmt */
+ try_fmt->width = supported_modes[0].width;
+ try_fmt->height = supported_modes[0].height;
+ try_fmt->code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int imx208_update_digital_gain(struct imx208 *imx208, u32 len, u32 val)
+{
+ int ret;
+
+ val = imx208_discrete_digital_gain[val] << IMX208_DIGITAL_GAIN_SHIFT;
+
+ ret = imx208_write_reg(imx208, IMX208_REG_GR_DIGITAL_GAIN, 2, val);
+ if (ret)
+ return ret;
+
+ ret = imx208_write_reg(imx208, IMX208_REG_GB_DIGITAL_GAIN, 2, val);
+ if (ret)
+ return ret;
+
+ ret = imx208_write_reg(imx208, IMX208_REG_R_DIGITAL_GAIN, 2, val);
+ if (ret)
+ return ret;
+
+ return imx208_write_reg(imx208, IMX208_REG_B_DIGITAL_GAIN, 2, val);
+}
+
+static int imx208_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx208 *imx208 =
+ container_of(ctrl->handler, struct imx208, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ int ret;
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = imx208_write_reg(imx208, IMX208_REG_ANALOG_GAIN,
+ 2, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = imx208_write_reg(imx208, IMX208_REG_EXPOSURE,
+ 2, ctrl->val);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx208_update_digital_gain(imx208, 2, ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ /* Update VTS that meets expected vertical blanking */
+ ret = imx208_write_reg(imx208, IMX208_REG_VTS, 2,
+ imx208->cur_mode->height + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx208_write_reg(imx208, IMX208_REG_TEST_PATTERN_MODE,
+ 2, imx208_test_pattern_val[ctrl->val]);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ ret = imx208_write_reg(imx208, IMX208_REG_ORIENTATION_CONTROL,
+ 1,
+ imx208->hflip->val |
+ imx208->vflip->val << 1);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(&client->dev,
+ "ctrl(id:0x%x,val:0x%x) is not handled\n",
+ ctrl->id, ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx208_ctrl_ops = {
+ .s_ctrl = imx208_set_ctrl,
+};
+
+static const struct v4l2_ctrl_config imx208_digital_gain_control = {
+ .ops = &imx208_ctrl_ops,
+ .id = V4L2_CID_DIGITAL_GAIN,
+ .name = "Digital Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 0,
+ .max = ARRAY_SIZE(imx208_discrete_digital_gain) - 1,
+ .step = 0,
+ .def = 0,
+ .menu_skip_mask = 0,
+ .qmenu_int = imx208_discrete_digital_gain,
+};
+
+static int imx208_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = imx208_get_format_code(imx208);
+
+ return 0;
+}
+
+static int imx208_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != imx208_get_format_code(imx208))
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void imx208_mode_to_pad_format(struct imx208 *imx208,
+ const struct imx208_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = imx208_get_format_code(imx208);
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int __imx208_get_pad_format(struct imx208 *imx208,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&imx208->sd,
+ sd_state,
+ fmt->pad);
+ else
+ imx208_mode_to_pad_format(imx208, imx208->cur_mode, fmt);
+
+ return 0;
+}
+
+static int imx208_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+ int ret;
+
+ mutex_lock(&imx208->imx208_mx);
+ ret = __imx208_get_pad_format(imx208, sd_state, fmt);
+ mutex_unlock(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static int imx208_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+ const struct imx208_mode *mode;
+ s32 vblank_def;
+ s32 vblank_min;
+ s64 h_blank;
+ s64 pixel_rate;
+ s64 link_freq;
+
+ mutex_lock(&imx208->imx208_mx);
+
+ fmt->format.code = imx208_get_format_code(imx208);
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width, height,
+ fmt->format.width, fmt->format.height);
+ imx208_mode_to_pad_format(imx208, mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ } else {
+ imx208->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(imx208->link_freq, mode->link_freq_index);
+ link_freq = link_freq_menu_items[mode->link_freq_index];
+ pixel_rate = link_freq_to_pixel_rate(link_freq);
+ __v4l2_ctrl_s_ctrl_int64(imx208->pixel_rate, pixel_rate);
+ /* Update limits and set FPS to default */
+ vblank_def = imx208->cur_mode->vts_def -
+ imx208->cur_mode->height;
+ vblank_min = imx208->cur_mode->vts_min -
+ imx208->cur_mode->height;
+ __v4l2_ctrl_modify_range(imx208->vblank, vblank_min,
+ IMX208_VTS_MAX - imx208->cur_mode->height,
+ 1, vblank_def);
+ __v4l2_ctrl_s_ctrl(imx208->vblank, vblank_def);
+ h_blank =
+ link_freq_configs[mode->link_freq_index].pixels_per_line
+ - imx208->cur_mode->width;
+ __v4l2_ctrl_modify_range(imx208->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ mutex_unlock(&imx208->imx208_mx);
+
+ return 0;
+}
+
+/* Start streaming */
+static int imx208_start_streaming(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ const struct imx208_reg_list *reg_list;
+ int ret, link_freq_index;
+
+ /* Setup PLL */
+ link_freq_index = imx208->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &imx208->cur_mode->reg_list;
+ ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(imx208->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ /* set stream on register */
+ return imx208_write_reg(imx208, IMX208_REG_MODE_SELECT,
+ 1, IMX208_MODE_STREAMING);
+}
+
+/* Stop streaming */
+static int imx208_stop_streaming(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ int ret;
+
+ /* set stream off register */
+ ret = imx208_write_reg(imx208, IMX208_REG_MODE_SELECT,
+ 1, IMX208_MODE_STANDBY);
+ if (ret)
+ dev_err(&client->dev, "%s failed to set stream\n", __func__);
+
+ /*
+ * Return success even if it was an error, as there is nothing the
+ * caller can do about it.
+ */
+ return 0;
+}
+
+static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&imx208->imx208_mx);
+ if (imx208->streaming == enable) {
+ mutex_unlock(&imx208->imx208_mx);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0)
+ goto err_rpm_put;
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = imx208_start_streaming(imx208);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ imx208_stop_streaming(imx208);
+ pm_runtime_put(&client->dev);
+ }
+
+ imx208->streaming = enable;
+ mutex_unlock(&imx208->imx208_mx);
+
+ /* vflip and hflip cannot change during streaming */
+ v4l2_ctrl_grab(imx208->vflip, enable);
+ v4l2_ctrl_grab(imx208->hflip, enable);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+ mutex_unlock(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static int __maybe_unused imx208_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx208 *imx208 = to_imx208(sd);
+
+ if (imx208->streaming)
+ imx208_stop_streaming(imx208);
+
+ return 0;
+}
+
+static int __maybe_unused imx208_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx208 *imx208 = to_imx208(sd);
+ int ret;
+
+ if (imx208->streaming) {
+ ret = imx208_start_streaming(imx208);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ imx208_stop_streaming(imx208);
+ imx208->streaming = 0;
+
+ return ret;
+}
+
+/* Verify chip ID */
+static int imx208_identify_module(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ int ret;
+ u32 val;
+
+ ret = imx208_read_reg(imx208, IMX208_REG_CHIP_ID,
+ 2, &val);
+ if (ret) {
+ dev_err(&client->dev, "failed to read chip id %x\n",
+ IMX208_CHIP_ID);
+ return ret;
+ }
+
+ if (val != IMX208_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ IMX208_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops imx208_video_ops = {
+ .s_stream = imx208_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx208_pad_ops = {
+ .enum_mbus_code = imx208_enum_mbus_code,
+ .get_fmt = imx208_get_pad_format,
+ .set_fmt = imx208_set_pad_format,
+ .enum_frame_size = imx208_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops imx208_subdev_ops = {
+ .video = &imx208_video_ops,
+ .pad = &imx208_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops imx208_internal_ops = {
+ .open = imx208_open,
+};
+
+static int imx208_read_otp(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2] = { IMX208_OTP_BASE >> 8, IMX208_OTP_BASE & 0xff };
+ int ret = 0;
+
+ mutex_lock(&imx208->imx208_mx);
+
+ if (imx208->otp_read)
+ goto out_unlock;
+
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto out_unlock;
+ }
+
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ /* Read data from registers */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(imx208->otp_data);
+ msgs[1].buf = imx208->otp_data;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret == ARRAY_SIZE(msgs)) {
+ imx208->otp_read = true;
+ ret = 0;
+ }
+
+ pm_runtime_put(&client->dev);
+
+out_unlock:
+ mutex_unlock(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static ssize_t otp_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx208 *imx208 = to_imx208(sd);
+ int ret;
+
+ ret = imx208_read_otp(imx208);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &imx208->otp_data[off], count);
+ return count;
+}
+
+static const BIN_ATTR_RO(otp, IMX208_OTP_SIZE);
+
+/* Initialize control handlers */
+static int imx208_init_controls(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr = &imx208->ctrl_handler;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 pixel_rate_min;
+ s64 pixel_rate_max;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ mutex_init(&imx208->imx208_mx);
+ ctrl_hdlr->lock = &imx208->imx208_mx;
+ imx208->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &imx208_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+
+ if (imx208->link_freq)
+ imx208->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_min =
+ link_freq_to_pixel_rate(link_freq_menu_items[ARRAY_SIZE(link_freq_menu_items) - 1]);
+ /* By default, PIXEL_RATE is read only */
+ imx208->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1, pixel_rate_max);
+
+ vblank_def = imx208->cur_mode->vts_def - imx208->cur_mode->height;
+ vblank_min = imx208->cur_mode->vts_min - imx208->cur_mode->height;
+ imx208->vblank =
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops, V4L2_CID_VBLANK,
+ vblank_min,
+ IMX208_VTS_MAX - imx208->cur_mode->height, 1,
+ vblank_def);
+
+ imx208->hblank =
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops, V4L2_CID_HBLANK,
+ IMX208_PPL_384MHZ - imx208->cur_mode->width,
+ IMX208_PPL_384MHZ - imx208->cur_mode->width,
+ 1,
+ IMX208_PPL_384MHZ - imx208->cur_mode->width);
+
+ if (imx208->hblank)
+ imx208->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ exposure_max = imx208->cur_mode->vts_def - 8;
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops, V4L2_CID_EXPOSURE,
+ IMX208_EXPOSURE_MIN, exposure_max,
+ IMX208_EXPOSURE_STEP, IMX208_EXPOSURE_DEFAULT);
+
+ imx208->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ imx208->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX208_ANA_GAIN_MIN, IMX208_ANA_GAIN_MAX,
+ IMX208_ANA_GAIN_STEP, IMX208_ANA_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_custom(ctrl_hdlr, &imx208_digital_gain_control, NULL);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx208_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx208_test_pattern_menu) - 1,
+ 0, 0, imx208_test_pattern_menu);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto error;
+ }
+
+ imx208->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ mutex_destroy(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static void imx208_free_controls(struct imx208 *imx208)
+{
+ v4l2_ctrl_handler_free(imx208->sd.ctrl_handler);
+}
+
+static int imx208_probe(struct i2c_client *client)
+{
+ struct imx208 *imx208;
+ int ret;
+ u32 val = 0;
+
+ device_property_read_u32(&client->dev, "clock-frequency", &val);
+ if (val != 19200000) {
+ dev_err(&client->dev,
+ "Unsupported clock-frequency %u. Expected 19200000.\n",
+ val);
+ return -EINVAL;
+ }
+
+ imx208 = devm_kzalloc(&client->dev, sizeof(*imx208), GFP_KERNEL);
+ if (!imx208)
+ return -ENOMEM;
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&imx208->sd, client, &imx208_subdev_ops);
+
+ /* Check module identity */
+ ret = imx208_identify_module(imx208);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto error_probe;
+ }
+
+ /* Set default mode to max resolution */
+ imx208->cur_mode = &supported_modes[0];
+
+ ret = imx208_init_controls(imx208);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto error_probe;
+ }
+
+ /* Initialize subdev */
+ imx208->sd.internal_ops = &imx208_internal_ops;
+ imx208->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx208->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ imx208->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&imx208->sd.entity, 1, &imx208->pad);
+ if (ret) {
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&imx208->sd);
+ if (ret < 0)
+ goto error_media_entity;
+
+ ret = device_create_bin_file(&client->dev, &bin_attr_otp);
+ if (ret) {
+ dev_err(&client->dev, "sysfs otp creation failed\n");
+ goto error_async_subdev;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+error_async_subdev:
+ v4l2_async_unregister_subdev(&imx208->sd);
+
+error_media_entity:
+ media_entity_cleanup(&imx208->sd.entity);
+
+error_handler_free:
+ imx208_free_controls(imx208);
+
+error_probe:
+ mutex_destroy(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static int imx208_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx208 *imx208 = to_imx208(sd);
+
+ device_remove_bin_file(&client->dev, &bin_attr_otp);
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ imx208_free_controls(imx208);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ mutex_destroy(&imx208->imx208_mx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx208_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx208_suspend, imx208_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id imx208_acpi_ids[] = {
+ { "INT3478" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, imx208_acpi_ids);
+#endif
+
+static struct i2c_driver imx208_i2c_driver = {
+ .driver = {
+ .name = "imx208",
+ .pm = &imx208_pm_ops,
+ .acpi_match_table = ACPI_PTR(imx208_acpi_ids),
+ },
+ .probe_new = imx208_probe,
+ .remove = imx208_remove,
+};
+
+module_i2c_driver(imx208_i2c_driver);
+
+MODULE_AUTHOR("Yeh, Andy <andy.yeh@intel.com>");
+MODULE_AUTHOR("Chen, Ping-chung <ping-chung.chen@intel.com>");
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_DESCRIPTION("Sony IMX208 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index e8b281e432e8..83c1737abeec 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -474,7 +474,7 @@ static int __maybe_unused imx214_power_off(struct device *dev)
}
static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -486,7 +486,7 @@ static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx214_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->code != IMX214_MBUS_CODE)
@@ -534,13 +534,13 @@ static const struct v4l2_subdev_core_ops imx214_core_ops = {
static struct v4l2_mbus_framefmt *
__imx214_get_pad_format(struct imx214 *imx214,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&imx214->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&imx214->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &imx214->fmt;
default:
@@ -549,13 +549,14 @@ __imx214_get_pad_format(struct imx214 *imx214,
}
static int imx214_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct imx214 *imx214 = to_imx214(sd);
mutex_lock(&imx214->mutex);
- format->format = *__imx214_get_pad_format(imx214, cfg, format->pad,
+ format->format = *__imx214_get_pad_format(imx214, sd_state,
+ format->pad,
format->which);
mutex_unlock(&imx214->mutex);
@@ -563,12 +564,13 @@ static int imx214_get_format(struct v4l2_subdev *sd,
}
static struct v4l2_rect *
-__imx214_get_pad_crop(struct imx214 *imx214, struct v4l2_subdev_pad_config *cfg,
+__imx214_get_pad_crop(struct imx214 *imx214,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&imx214->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&imx214->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &imx214->crop;
default:
@@ -577,7 +579,7 @@ __imx214_get_pad_crop(struct imx214 *imx214, struct v4l2_subdev_pad_config *cfg,
}
static int imx214_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct imx214 *imx214 = to_imx214(sd);
@@ -587,7 +589,8 @@ static int imx214_set_format(struct v4l2_subdev *sd,
mutex_lock(&imx214->mutex);
- __crop = __imx214_get_pad_crop(imx214, cfg, format->pad, format->which);
+ __crop = __imx214_get_pad_crop(imx214, sd_state, format->pad,
+ format->which);
mode = v4l2_find_nearest_size(imx214_modes,
ARRAY_SIZE(imx214_modes), width, height,
@@ -597,7 +600,7 @@ static int imx214_set_format(struct v4l2_subdev *sd,
__crop->width = mode->width;
__crop->height = mode->height;
- __format = __imx214_get_pad_format(imx214, cfg, format->pad,
+ __format = __imx214_get_pad_format(imx214, sd_state, format->pad,
format->which);
__format->width = __crop->width;
__format->height = __crop->height;
@@ -617,7 +620,7 @@ static int imx214_set_format(struct v4l2_subdev *sd,
}
static int imx214_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct imx214 *imx214 = to_imx214(sd);
@@ -626,22 +629,22 @@ static int imx214_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&imx214->mutex);
- sel->r = *__imx214_get_pad_crop(imx214, cfg, sel->pad,
+ sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
sel->which);
mutex_unlock(&imx214->mutex);
return 0;
}
static int imx214_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { };
- fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
fmt.format.width = imx214_modes[0].width;
fmt.format.height = imx214_modes[0].height;
- imx214_set_format(subdev, cfg, &fmt);
+ imx214_set_format(subdev, sd_state, &fmt);
return 0;
}
@@ -776,11 +779,9 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
return 0;
if (enable) {
- ret = pm_runtime_get_sync(imx214->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(imx214->dev);
+ ret = pm_runtime_resume_and_get(imx214->dev);
+ if (ret < 0)
return ret;
- }
ret = imx214_start_streaming(imx214);
if (ret < 0)
@@ -810,7 +811,7 @@ static int imx214_g_frame_interval(struct v4l2_subdev *subdev,
}
static int imx214_enum_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
const struct imx214_mode *mode;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 1054ffedaefd..e10af3f74b38 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -689,7 +689,7 @@ static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imx219 *imx219 = to_imx219(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
struct v4l2_rect *try_crop;
mutex_lock(&imx219->mutex);
@@ -702,7 +702,7 @@ static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
try_fmt->field = V4L2_FIELD_NONE;
/* Initialize try_crop rectangle. */
- try_crop = v4l2_subdev_get_try_crop(sd, fh->pad, 0);
+ try_crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
try_crop->top = IMX219_PIXEL_ARRAY_TOP;
try_crop->left = IMX219_PIXEL_ARRAY_LEFT;
try_crop->width = IMX219_PIXEL_ARRAY_WIDTH;
@@ -803,7 +803,7 @@ static const struct v4l2_ctrl_ops imx219_ctrl_ops = {
};
static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx219 *imx219 = to_imx219(sd);
@@ -819,7 +819,7 @@ static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx219_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct imx219 *imx219 = to_imx219(sd);
@@ -863,12 +863,13 @@ static void imx219_update_pad_format(struct imx219 *imx219,
}
static int __imx219_get_pad_format(struct imx219 *imx219,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(&imx219->sd, cfg, fmt->pad);
+ v4l2_subdev_get_try_format(&imx219->sd, sd_state,
+ fmt->pad);
/* update the code which could change due to vflip or hflip: */
try_fmt->code = imx219_get_format_code(imx219, try_fmt->code);
fmt->format = *try_fmt;
@@ -882,21 +883,21 @@ static int __imx219_get_pad_format(struct imx219 *imx219,
}
static int imx219_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx219 *imx219 = to_imx219(sd);
int ret;
mutex_lock(&imx219->mutex);
- ret = __imx219_get_pad_format(imx219, cfg, fmt);
+ ret = __imx219_get_pad_format(imx219, sd_state, fmt);
mutex_unlock(&imx219->mutex);
return ret;
}
static int imx219_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx219 *imx219 = to_imx219(sd);
@@ -922,7 +923,7 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx219_update_pad_format(imx219, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else if (imx219->mode != mode ||
imx219->fmt.code != fmt->format.code) {
@@ -979,12 +980,13 @@ static int imx219_set_framefmt(struct imx219 *imx219)
}
static const struct v4l2_rect *
-__imx219_get_pad_crop(struct imx219 *imx219, struct v4l2_subdev_pad_config *cfg,
+__imx219_get_pad_crop(struct imx219 *imx219,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&imx219->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&imx219->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &imx219->mode->crop;
}
@@ -993,7 +995,7 @@ __imx219_get_pad_crop(struct imx219 *imx219, struct v4l2_subdev_pad_config *cfg,
}
static int imx219_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
switch (sel->target) {
@@ -1001,7 +1003,7 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
struct imx219 *imx219 = to_imx219(sd);
mutex_lock(&imx219->mutex);
- sel->r = *__imx219_get_pad_crop(imx219, cfg, sel->pad,
+ sel->r = *__imx219_get_pad_crop(imx219, sd_state, sel->pad,
sel->which);
mutex_unlock(&imx219->mutex);
@@ -1035,11 +1037,9 @@ static int imx219_start_streaming(struct imx219 *imx219)
const struct imx219_reg_list *reg_list;
int ret;
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
return ret;
- }
/* Apply default values of current mode */
reg_list = &imx219->mode->reg_list;
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index a017ec4e0f50..7ab9e5f9f267 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -710,7 +710,7 @@ static int imx258_write_regs(struct imx258 *imx258,
static int imx258_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
/* Initialize try_fmt */
try_fmt->width = supported_modes[0].width;
@@ -820,7 +820,7 @@ static const struct v4l2_ctrl_ops imx258_ctrl_ops = {
};
static int imx258_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Only one bayer order(GRBG) is supported */
@@ -833,7 +833,7 @@ static int imx258_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx258_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -860,11 +860,12 @@ static void imx258_update_pad_format(const struct imx258_mode *mode,
}
static int __imx258_get_pad_format(struct imx258 *imx258,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&imx258->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&imx258->sd,
+ sd_state,
fmt->pad);
else
imx258_update_pad_format(imx258->cur_mode, fmt);
@@ -873,21 +874,21 @@ static int __imx258_get_pad_format(struct imx258 *imx258,
}
static int imx258_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx258 *imx258 = to_imx258(sd);
int ret;
mutex_lock(&imx258->mutex);
- ret = __imx258_get_pad_format(imx258, cfg, fmt);
+ ret = __imx258_get_pad_format(imx258, sd_state, fmt);
mutex_unlock(&imx258->mutex);
return ret;
}
static int imx258_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx258 *imx258 = to_imx258(sd);
@@ -909,7 +910,7 @@ static int imx258_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx258_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx258->cur_mode = mode;
@@ -1039,11 +1040,9 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
}
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto err_unlock;
- }
/*
* Apply default & customized values
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index cdccaab3043a..0dce92872176 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -996,7 +996,7 @@ static int imx274_binning_goodness(struct stimx274 *imx274,
* Must be called with imx274->lock locked.
*
* @imx274: The device object
- * @cfg: The pad config we are editing for TRY requests
+ * @sd_state: The subdev state we are editing for TRY requests
* @which: V4L2_SUBDEV_FORMAT_ACTIVE or V4L2_SUBDEV_FORMAT_TRY from the caller
* @width: Input-output parameter: set to the desired width before
* the call, contains the chosen value after returning successfully
@@ -1005,7 +1005,7 @@ static int imx274_binning_goodness(struct stimx274 *imx274,
* available (when called from set_fmt)
*/
static int __imx274_change_compose(struct stimx274 *imx274,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which,
u32 *width,
u32 *height,
@@ -1019,8 +1019,8 @@ static int __imx274_change_compose(struct stimx274 *imx274,
int best_goodness = INT_MIN;
if (which == V4L2_SUBDEV_FORMAT_TRY) {
- cur_crop = &cfg->try_crop;
- tgt_fmt = &cfg->try_fmt;
+ cur_crop = &sd_state->pads->try_crop;
+ tgt_fmt = &sd_state->pads->try_fmt;
} else {
cur_crop = &imx274->crop;
tgt_fmt = &imx274->format;
@@ -1061,7 +1061,7 @@ static int __imx274_change_compose(struct stimx274 *imx274,
/**
* imx274_get_fmt - Get the pad format
* @sd: Pointer to V4L2 Sub device structure
- * @cfg: Pointer to sub device pad information structure
+ * @sd_state: Pointer to sub device state structure
* @fmt: Pointer to pad level media bus format
*
* This function is used to get the pad format information.
@@ -1069,7 +1069,7 @@ static int __imx274_change_compose(struct stimx274 *imx274,
* Return: 0 on success
*/
static int imx274_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct stimx274 *imx274 = to_imx274(sd);
@@ -1083,7 +1083,7 @@ static int imx274_get_fmt(struct v4l2_subdev *sd,
/**
* imx274_set_fmt - This is used to set the pad format
* @sd: Pointer to V4L2 Sub device structure
- * @cfg: Pointer to sub device pad information structure
+ * @sd_state: Pointer to sub device state information structure
* @format: Pointer to pad level media bus format
*
* This function is used to set the pad format.
@@ -1091,7 +1091,7 @@ static int imx274_get_fmt(struct v4l2_subdev *sd,
* Return: 0 on success
*/
static int imx274_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1100,7 +1100,7 @@ static int imx274_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&imx274->lock);
- err = __imx274_change_compose(imx274, cfg, format->which,
+ err = __imx274_change_compose(imx274, sd_state, format->which,
&fmt->width, &fmt->height, 0);
if (err)
@@ -1113,7 +1113,7 @@ static int imx274_set_fmt(struct v4l2_subdev *sd,
*/
fmt->field = V4L2_FIELD_NONE;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
else
imx274->format = *fmt;
@@ -1124,7 +1124,7 @@ out:
}
static int imx274_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct stimx274 *imx274 = to_imx274(sd);
@@ -1144,8 +1144,8 @@ static int imx274_get_selection(struct v4l2_subdev *sd,
}
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- src_crop = &cfg->try_crop;
- src_fmt = &cfg->try_fmt;
+ src_crop = &sd_state->pads->try_crop;
+ src_fmt = &sd_state->pads->try_fmt;
} else {
src_crop = &imx274->crop;
src_fmt = &imx274->format;
@@ -1179,7 +1179,7 @@ static int imx274_get_selection(struct v4l2_subdev *sd,
}
static int imx274_set_selection_crop(struct stimx274 *imx274,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct v4l2_rect *tgt_crop;
@@ -1216,7 +1216,7 @@ static int imx274_set_selection_crop(struct stimx274 *imx274,
sel->r = new_crop;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
- tgt_crop = &cfg->try_crop;
+ tgt_crop = &sd_state->pads->try_crop;
else
tgt_crop = &imx274->crop;
@@ -1230,7 +1230,7 @@ static int imx274_set_selection_crop(struct stimx274 *imx274,
/* if crop size changed then reset the output image size */
if (size_changed)
- __imx274_change_compose(imx274, cfg, sel->which,
+ __imx274_change_compose(imx274, sd_state, sel->which,
&new_crop.width, &new_crop.height,
sel->flags);
@@ -1240,7 +1240,7 @@ static int imx274_set_selection_crop(struct stimx274 *imx274,
}
static int imx274_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct stimx274 *imx274 = to_imx274(sd);
@@ -1249,13 +1249,13 @@ static int imx274_set_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->target == V4L2_SEL_TGT_CROP)
- return imx274_set_selection_crop(imx274, cfg, sel);
+ return imx274_set_selection_crop(imx274, sd_state, sel);
if (sel->target == V4L2_SEL_TGT_COMPOSE) {
int err;
mutex_lock(&imx274->lock);
- err = __imx274_change_compose(imx274, cfg, sel->which,
+ err = __imx274_change_compose(imx274, sd_state, sel->which,
&sel->r.width, &sel->r.height,
sel->flags);
mutex_unlock(&imx274->lock);
@@ -1441,9 +1441,8 @@ static int imx274_s_stream(struct v4l2_subdev *sd, int on)
mutex_lock(&imx274->lock);
if (on) {
- ret = pm_runtime_get_sync(&imx274->client->dev);
+ ret = pm_runtime_resume_and_get(&imx274->client->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&imx274->client->dev);
mutex_unlock(&imx274->lock);
return ret;
}
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 6319a42057d2..bf7a6c37ca5d 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -516,7 +516,7 @@ static const struct v4l2_ctrl_ops imx290_ctrl_ops = {
};
static int imx290_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(imx290_formats))
@@ -528,7 +528,7 @@ static int imx290_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx290_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct imx290 *imx290 = to_imx290(sd);
@@ -550,7 +550,7 @@ static int imx290_enum_frame_size(struct v4l2_subdev *sd,
}
static int imx290_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx290 *imx290 = to_imx290(sd);
@@ -559,7 +559,7 @@ static int imx290_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&imx290->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- framefmt = v4l2_subdev_get_try_format(&imx290->sd, cfg,
+ framefmt = v4l2_subdev_get_try_format(&imx290->sd, sd_state,
fmt->pad);
else
framefmt = &imx290->current_format;
@@ -596,8 +596,8 @@ static u64 imx290_calc_pixel_rate(struct imx290 *imx290)
}
static int imx290_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct imx290 *imx290 = to_imx290(sd);
const struct imx290_mode *mode;
@@ -624,7 +624,7 @@ static int imx290_set_fmt(struct v4l2_subdev *sd,
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- format = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ format = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
} else {
format = &imx290->current_format;
imx290->current_mode = mode;
@@ -646,15 +646,15 @@ static int imx290_set_fmt(struct v4l2_subdev *sd,
}
static int imx290_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { 0 };
- fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
fmt.format.width = 1920;
fmt.format.height = 1080;
- imx290_set_fmt(subdev, cfg, &fmt);
+ imx290_set_fmt(subdev, sd_state, &fmt);
return 0;
}
@@ -764,11 +764,9 @@ static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
int ret = 0;
if (enable) {
- ret = pm_runtime_get_sync(imx290->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(imx290->dev);
+ ret = pm_runtime_resume_and_get(imx290->dev);
+ if (ret < 0)
goto unlock_and_return;
- }
ret = imx290_start_streaming(imx290);
if (ret) {
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 38540323a156..dba0854ab5aa 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -1860,7 +1860,7 @@ static int imx319_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imx319 *imx319 = to_imx319(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
mutex_lock(&imx319->mutex);
@@ -1947,7 +1947,7 @@ static const struct v4l2_ctrl_ops imx319_ctrl_ops = {
};
static int imx319_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx319 *imx319 = to_imx319(sd);
@@ -1963,7 +1963,7 @@ static int imx319_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx319_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct imx319 *imx319 = to_imx319(sd);
@@ -1997,14 +1997,14 @@ static void imx319_update_pad_format(struct imx319 *imx319,
}
static int imx319_do_get_pad_format(struct imx319 *imx319,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
struct v4l2_subdev *sd = &imx319->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx319_update_pad_format(imx319, imx319->cur_mode, fmt);
@@ -2014,14 +2014,14 @@ static int imx319_do_get_pad_format(struct imx319 *imx319,
}
static int imx319_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx319 *imx319 = to_imx319(sd);
int ret;
mutex_lock(&imx319->mutex);
- ret = imx319_do_get_pad_format(imx319, cfg, fmt);
+ ret = imx319_do_get_pad_format(imx319, sd_state, fmt);
mutex_unlock(&imx319->mutex);
return ret;
@@ -2029,7 +2029,7 @@ static int imx319_get_pad_format(struct v4l2_subdev *sd,
static int
imx319_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx319 *imx319 = to_imx319(sd);
@@ -2055,7 +2055,7 @@ imx319_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx319_update_pad_format(imx319, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx319->cur_mode = mode;
@@ -2141,11 +2141,9 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
}
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto err_unlock;
- }
/*
* Apply default & customized values
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 047aa7658d21..062125501788 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -497,13 +497,13 @@ static const struct v4l2_ctrl_ops imx334_ctrl_ops = {
/**
* imx334_enum_mbus_code() - Enumerate V4L2 sub-device mbus codes
* @sd: pointer to imx334 V4L2 sub-device structure
- * @cfg: V4L2 sub-device pad configuration
+ * @sd_state: V4L2 sub-device state
* @code: V4L2 sub-device code enumeration need to be filled
*
* Return: 0 if successful, error code otherwise.
*/
static int imx334_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -517,13 +517,13 @@ static int imx334_enum_mbus_code(struct v4l2_subdev *sd,
/**
* imx334_enum_frame_size() - Enumerate V4L2 sub-device frame sizes
* @sd: pointer to imx334 V4L2 sub-device structure
- * @cfg: V4L2 sub-device pad configuration
+ * @sd_state: V4L2 sub-device state
* @fsize: V4L2 sub-device size enumeration need to be filled
*
* Return: 0 if successful, error code otherwise.
*/
static int imx334_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fsize)
{
if (fsize->index > 0)
@@ -564,13 +564,13 @@ static void imx334_fill_pad_format(struct imx334 *imx334,
/**
* imx334_get_pad_format() - Get subdevice pad format
* @sd: pointer to imx334 V4L2 sub-device structure
- * @cfg: V4L2 sub-device pad configuration
+ * @sd_state: V4L2 sub-device state
* @fmt: V4L2 sub-device format need to be set
*
* Return: 0 if successful, error code otherwise.
*/
static int imx334_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx334 *imx334 = to_imx334(sd);
@@ -580,7 +580,7 @@ static int imx334_get_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx334_fill_pad_format(imx334, imx334->cur_mode, fmt);
@@ -594,13 +594,13 @@ static int imx334_get_pad_format(struct v4l2_subdev *sd,
/**
* imx334_set_pad_format() - Set subdevice pad format
* @sd: pointer to imx334 V4L2 sub-device structure
- * @cfg: V4L2 sub-device pad configuration
+ * @sd_state: V4L2 sub-device state
* @fmt: V4L2 sub-device format need to be set
*
* Return: 0 if successful, error code otherwise.
*/
static int imx334_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx334 *imx334 = to_imx334(sd);
@@ -615,7 +615,7 @@ static int imx334_set_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ret = imx334_update_controls(imx334, mode);
@@ -631,20 +631,20 @@ static int imx334_set_pad_format(struct v4l2_subdev *sd,
/**
* imx334_init_pad_cfg() - Initialize sub-device pad configuration
* @sd: pointer to imx334 V4L2 sub-device structure
- * @cfg: V4L2 sub-device pad configuration
+ * @sd_state: V4L2 sub-device state
*
* Return: 0 if successful, error code otherwise.
*/
static int imx334_init_pad_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct imx334 *imx334 = to_imx334(sd);
struct v4l2_subdev_format fmt = { 0 };
- fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
imx334_fill_pad_format(imx334, &supported_mode, &fmt);
- return imx334_set_pad_format(sd, cfg, &fmt);
+ return imx334_set_pad_format(sd, sd_state, &fmt);
}
/**
@@ -717,9 +717,9 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
}
if (enable) {
- ret = pm_runtime_get_sync(imx334->dev);
- if (ret)
- goto error_power_off;
+ ret = pm_runtime_resume_and_get(imx334->dev);
+ if (ret < 0)
+ goto error_unlock;
ret = imx334_start_streaming(imx334);
if (ret)
@@ -737,6 +737,7 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
error_power_off:
pm_runtime_put(imx334->dev);
+error_unlock:
mutex_unlock(&imx334->mutex);
return ret;
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index ccedcd4c520a..cb51c81786bd 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1161,7 +1161,7 @@ static int imx355_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imx355 *imx355 = to_imx355(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
mutex_lock(&imx355->mutex);
@@ -1248,7 +1248,7 @@ static const struct v4l2_ctrl_ops imx355_ctrl_ops = {
};
static int imx355_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx355 *imx355 = to_imx355(sd);
@@ -1264,7 +1264,7 @@ static int imx355_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx355_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct imx355 *imx355 = to_imx355(sd);
@@ -1298,14 +1298,14 @@ static void imx355_update_pad_format(struct imx355 *imx355,
}
static int imx355_do_get_pad_format(struct imx355 *imx355,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
struct v4l2_subdev *sd = &imx355->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx355_update_pad_format(imx355, imx355->cur_mode, fmt);
@@ -1315,14 +1315,14 @@ static int imx355_do_get_pad_format(struct imx355 *imx355,
}
static int imx355_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx355 *imx355 = to_imx355(sd);
int ret;
mutex_lock(&imx355->mutex);
- ret = imx355_do_get_pad_format(imx355, cfg, fmt);
+ ret = imx355_do_get_pad_format(imx355, sd_state, fmt);
mutex_unlock(&imx355->mutex);
return ret;
@@ -1330,7 +1330,7 @@ static int imx355_get_pad_format(struct v4l2_subdev *sd,
static int
imx355_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx355 *imx355 = to_imx355(sd);
@@ -1356,7 +1356,7 @@ imx355_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx355_update_pad_format(imx355, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx355->cur_mode = mode;
@@ -1442,11 +1442,9 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
}
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto err_unlock;
- }
/*
* Apply default & customized values
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index e8119ad0bc71..92376592455e 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -678,8 +678,8 @@ static int zilog_tx(struct rc_dev *rcdev, unsigned int *txbuf,
goto out_unlock;
}
- i = i2c_master_recv(ir->tx_c, buf, 1);
- if (i != 1) {
+ ret = i2c_master_recv(ir->tx_c, buf, 1);
+ if (ret != 1) {
dev_err(&ir->rc->dev, "i2c_master_recv failed with %d\n", ret);
ret = -EIO;
goto out_unlock;
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 21666d705e37..e29be0242f07 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -539,17 +539,19 @@ static int __find_resolution(struct v4l2_subdev *sd,
}
static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which,
enum m5mols_restype type)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return cfg ? v4l2_subdev_get_try_format(&info->sd, cfg, 0) : NULL;
+ return sd_state ? v4l2_subdev_get_try_format(&info->sd,
+ sd_state, 0) : NULL;
return &info->ffmt[type];
}
-static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int m5mols_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct m5mols_info *info = to_m5mols(sd);
@@ -558,7 +560,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
mutex_lock(&info->lock);
- format = __find_format(info, cfg, fmt->which, info->res_type);
+ format = __find_format(info, sd_state, fmt->which, info->res_type);
if (format)
fmt->format = *format;
else
@@ -568,7 +570,8 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
return ret;
}
-static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int m5mols_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct m5mols_info *info = to_m5mols(sd);
@@ -582,7 +585,7 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
if (ret < 0)
return ret;
- sfmt = __find_format(info, cfg, fmt->which, type);
+ sfmt = __find_format(info, sd_state, fmt->which, type);
if (!sfmt)
return 0;
@@ -648,7 +651,7 @@ static int m5mols_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!code || code->index >= SIZE_DEFAULT_FFMT)
@@ -909,7 +912,9 @@ static const struct v4l2_subdev_core_ops m5mols_core_ops = {
*/
static int m5mols_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
*format = m5mols_default_ffmt[0];
return 0;
diff --git a/drivers/media/i2c/max9271.c b/drivers/media/i2c/max9271.c
index c495582dcff6..ff86c8c4ea61 100644
--- a/drivers/media/i2c/max9271.c
+++ b/drivers/media/i2c/max9271.c
@@ -80,6 +80,18 @@ static int max9271_pclk_detect(struct max9271_device *dev)
return -EIO;
}
+void max9271_wake_up(struct max9271_device *dev)
+{
+ /*
+ * Use the chip default address as this function has to be called
+ * before any other one.
+ */
+ dev->client->addr = MAX9271_DEFAULT_ADDR;
+ i2c_smbus_read_byte(dev->client);
+ usleep_range(5000, 8000);
+}
+EXPORT_SYMBOL_GPL(max9271_wake_up);
+
int max9271_set_serial_link(struct max9271_device *dev, bool enable)
{
int ret;
@@ -106,7 +118,10 @@ int max9271_set_serial_link(struct max9271_device *dev, bool enable)
* Short delays here appear to show bit-errors in the writes following.
* Therefore a conservative delay seems best here.
*/
- max9271_write(dev, 0x04, val);
+ ret = max9271_write(dev, 0x04, val);
+ if (ret < 0)
+ return ret;
+
usleep_range(5000, 8000);
return 0;
@@ -118,7 +133,7 @@ int max9271_configure_i2c(struct max9271_device *dev, u8 i2c_config)
int ret;
ret = max9271_write(dev, 0x0d, i2c_config);
- if (ret)
+ if (ret < 0)
return ret;
/* The delay required after an I2C bus configuration change is not
@@ -143,7 +158,10 @@ int max9271_set_high_threshold(struct max9271_device *dev, bool enable)
* Enable or disable reverse channel high threshold to increase
* immunity to power supply noise.
*/
- max9271_write(dev, 0x08, enable ? ret | BIT(0) : ret & ~BIT(0));
+ ret = max9271_write(dev, 0x08, enable ? ret | BIT(0) : ret & ~BIT(0));
+ if (ret < 0)
+ return ret;
+
usleep_range(2000, 2500);
return 0;
@@ -152,6 +170,8 @@ EXPORT_SYMBOL_GPL(max9271_set_high_threshold);
int max9271_configure_gmsl_link(struct max9271_device *dev)
{
+ int ret;
+
/*
* Configure the GMSL link:
*
@@ -162,16 +182,24 @@ int max9271_configure_gmsl_link(struct max9271_device *dev)
*
* TODO: Make the GMSL link configuration parametric.
*/
- max9271_write(dev, 0x07, MAX9271_DBL | MAX9271_HVEN |
- MAX9271_EDC_1BIT_PARITY);
+ ret = max9271_write(dev, 0x07, MAX9271_DBL | MAX9271_HVEN |
+ MAX9271_EDC_1BIT_PARITY);
+ if (ret < 0)
+ return ret;
+
usleep_range(5000, 8000);
/*
* Adjust spread spectrum to +4% and auto-detect pixel clock
* and serial link rate.
*/
- max9271_write(dev, 0x02, MAX9271_SPREAD_SPECT_4 | MAX9271_R02_RES |
- MAX9271_PCLK_AUTODETECT | MAX9271_SERIAL_AUTODETECT);
+ ret = max9271_write(dev, 0x02,
+ MAX9271_SPREAD_SPECT_4 | MAX9271_R02_RES |
+ MAX9271_PCLK_AUTODETECT |
+ MAX9271_SERIAL_AUTODETECT);
+ if (ret < 0)
+ return ret;
+
usleep_range(5000, 8000);
return 0;
diff --git a/drivers/media/i2c/max9271.h b/drivers/media/i2c/max9271.h
index d78fb21441e9..dc5e4e70ba6f 100644
--- a/drivers/media/i2c/max9271.h
+++ b/drivers/media/i2c/max9271.h
@@ -86,6 +86,15 @@ struct max9271_device {
};
/**
+ * max9271_wake_up() - Wake up the serializer by issuing an i2c transaction
+ * @dev: The max9271 device
+ *
+ * This function shall be called before any other interaction with the
+ * serializer.
+ */
+void max9271_wake_up(struct max9271_device *dev);
+
+/**
* max9271_set_serial_link() - Enable/disable serial link
* @dev: The max9271 device
* @enable: Serial link enable/disable flag
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 6fd4d59fcc72..1aa2c58fd38c 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -113,6 +113,7 @@
#define MAX9286_REV_TRF(n) ((n) << 4)
#define MAX9286_REV_AMP(n) ((((n) - 30) / 10) << 1) /* in mV */
#define MAX9286_REV_AMP_X BIT(0)
+#define MAX9286_REV_AMP_HIGH 170
/* Register 0x3f */
#define MAX9286_EN_REV_CFG BIT(6)
#define MAX9286_REV_FLEN(n) ((n) - 20)
@@ -163,7 +164,9 @@ struct max9286_priv {
unsigned int mux_channel;
bool mux_open;
- u32 reverse_channel_mv;
+ /* The initial reverse control channel amplitude. */
+ u32 init_rev_chan_mv;
+ u32 rev_chan_mv;
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *pixelrate;
@@ -287,9 +290,8 @@ static int max9286_i2c_mux_select(struct i2c_mux_core *muxc, u32 chan)
priv->mux_channel = chan;
- max9286_i2c_mux_configure(priv,
- MAX9286_FWDCCEN(chan) |
- MAX9286_REVCCEN(chan));
+ max9286_i2c_mux_configure(priv, MAX9286_FWDCCEN(chan) |
+ MAX9286_REVCCEN(chan));
return 0;
}
@@ -341,8 +343,15 @@ static void max9286_configure_i2c(struct max9286_priv *priv, bool localack)
static void max9286_reverse_channel_setup(struct max9286_priv *priv,
unsigned int chan_amplitude)
{
+ u8 chan_config;
+
+ if (priv->rev_chan_mv == chan_amplitude)
+ return;
+
+ priv->rev_chan_mv = chan_amplitude;
+
/* Reverse channel transmission time: default to 1. */
- u8 chan_config = MAX9286_REV_TRF(1);
+ chan_config = MAX9286_REV_TRF(1);
/*
* Reverse channel setup.
@@ -547,9 +556,9 @@ static int max9286_notify_bound(struct v4l2_async_notifier *notifier,
subdev->name, src_pad, index);
/*
- * We can only register v4l2_async_notifiers, which do not provide a
- * means to register a complete callback. bound_sources allows us to
- * identify when all remote serializers have completed their probe.
+ * As we register a subdev notifiers we won't get a .complete() callback
+ * here, so we have to use bound_sources to identify when all remote
+ * serializers have probed.
*/
if (priv->bound_sources != priv->source_mask)
return 0;
@@ -559,19 +568,13 @@ static int max9286_notify_bound(struct v4l2_async_notifier *notifier,
* channels:
*
* - Increase the reverse channel amplitude to compensate for the
- * remote ends high threshold, if not done already
+ * remote ends high threshold
* - Verify all configuration links are properly detected
* - Disable auto-ack as communication on the control channel are now
* stable.
*/
- if (priv->reverse_channel_mv < 170)
- max9286_reverse_channel_setup(priv, 170);
+ max9286_reverse_channel_setup(priv, MAX9286_REV_AMP_HIGH);
max9286_check_config_link(priv, priv->source_mask);
-
- /*
- * Re-configure I2C with local acknowledge disabled after cameras have
- * probed.
- */
max9286_configure_i2c(priv, false);
return max9286_set_pixelrate(priv);
@@ -712,7 +715,7 @@ static int max9286_s_stream(struct v4l2_subdev *sd, int enable)
}
static int max9286_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -725,12 +728,12 @@ static int max9286_enum_mbus_code(struct v4l2_subdev *sd,
static struct v4l2_mbus_framefmt *
max9286_get_pad_format(struct max9286_priv *priv,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &priv->fmt[pad];
default:
@@ -739,7 +742,7 @@ max9286_get_pad_format(struct max9286_priv *priv,
}
static int max9286_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct max9286_priv *priv = sd_to_max9286(sd);
@@ -760,7 +763,8 @@ static int max9286_set_fmt(struct v4l2_subdev *sd,
break;
}
- cfg_fmt = max9286_get_pad_format(priv, cfg, format->pad, format->which);
+ cfg_fmt = max9286_get_pad_format(priv, sd_state, format->pad,
+ format->which);
if (!cfg_fmt)
return -EINVAL;
@@ -772,7 +776,7 @@ static int max9286_set_fmt(struct v4l2_subdev *sd,
}
static int max9286_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct max9286_priv *priv = sd_to_max9286(sd);
@@ -788,7 +792,7 @@ static int max9286_get_fmt(struct v4l2_subdev *sd,
if (pad == MAX9286_SRC_PAD)
pad = __ffs(priv->bound_sources);
- cfg_fmt = max9286_get_pad_format(priv, cfg, pad, format->which);
+ cfg_fmt = max9286_get_pad_format(priv, sd_state, pad, format->which);
if (!cfg_fmt)
return -EINVAL;
@@ -832,7 +836,7 @@ static int max9286_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
unsigned int i;
for (i = 0; i < MAX9286_N_SINKS; i++) {
- format = v4l2_subdev_get_try_format(subdev, fh->pad, i);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, i);
max9286_init_format(format);
}
@@ -972,7 +976,7 @@ static int max9286_setup(struct max9286_priv *priv)
* only. This should be disabled after the mux is initialised.
*/
max9286_configure_i2c(priv, true);
- max9286_reverse_channel_setup(priv, priv->reverse_channel_mv);
+ max9286_reverse_channel_setup(priv, priv->init_rev_chan_mv);
/*
* Enable GMSL links, mask unused ones and autodetect link
@@ -1237,9 +1241,9 @@ static int max9286_parse_dt(struct max9286_priv *priv)
if (of_property_read_u32(dev->of_node,
"maxim,reverse-channel-microvolt",
&reverse_channel_microvolt))
- priv->reverse_channel_mv = 170;
+ priv->init_rev_chan_mv = 170;
else
- priv->reverse_channel_mv = reverse_channel_microvolt / 1000U;
+ priv->init_rev_chan_mv = reverse_channel_microvolt / 1000U;
priv->route_mask = priv->source_mask;
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index ff212335326a..4a1410ebb4c8 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -188,7 +188,7 @@ static int ml86v7667_g_input_status(struct v4l2_subdev *sd, u32 *status)
}
static int ml86v7667_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -200,7 +200,7 @@ static int ml86v7667_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ml86v7667_fill_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ml86v7667_priv *priv = to_ml86v7667(sd);
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 3b0ba8ed5233..c9f0bd997ea7 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -217,9 +217,9 @@ static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
goto done;
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0)
- goto put_unlock;
+ goto unlock;
ret = mt9m001_apply_selection(sd);
if (ret)
@@ -247,13 +247,14 @@ done:
put_unlock:
pm_runtime_put(&client->dev);
+unlock:
mutex_unlock(&mt9m001->mutex);
return ret;
}
static int mt9m001_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -294,7 +295,7 @@ static int mt9m001_set_selection(struct v4l2_subdev *sd,
}
static int mt9m001_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -319,7 +320,7 @@ static int mt9m001_get_selection(struct v4l2_subdev *sd,
}
static int mt9m001_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -330,7 +331,7 @@ static int mt9m001_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mf;
return 0;
}
@@ -376,7 +377,7 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd,
}
static int mt9m001_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -410,7 +411,7 @@ static int mt9m001_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return mt9m001_s_fmt(sd, fmt, mf);
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
@@ -656,12 +657,12 @@ static const struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
};
static int mt9m001_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, cfg, 0);
+ v4l2_subdev_get_try_format(sd, sd_state, 0);
try_fmt->width = MT9M001_MAX_WIDTH;
try_fmt->height = MT9M001_MAX_HEIGHT;
@@ -676,7 +677,7 @@ static int mt9m001_init_cfg(struct v4l2_subdev *sd,
}
static int mt9m001_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -834,6 +835,10 @@ static int mt9m001_remove(struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
+ /*
+ * As it increments RPM usage_count even on errors, we don't need to
+ * check the returned code here.
+ */
pm_runtime_get_sync(&client->dev);
v4l2_async_unregister_subdev(&mt9m001->subdev);
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 5a4c0f9d1eee..ba0c0ea91c95 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -304,7 +304,7 @@ static int mt9m032_setup_pll(struct mt9m032 *sensor)
*/
static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -315,7 +315,7 @@ static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index != 0 || fse->code != MEDIA_BUS_FMT_Y8_1X8)
@@ -332,18 +332,19 @@ static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
/**
* __mt9m032_get_pad_crop() - get crop rect
* @sensor: pointer to the sensor struct
- * @cfg: v4l2_subdev_pad_config for getting the try crop rect from
+ * @sd_state: v4l2_subdev_state for getting the try crop rect from
* @which: select try or active crop rect
*
* Returns a pointer the current active or fh relative try crop rect
*/
static struct v4l2_rect *
-__mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cfg,
+__mt9m032_get_pad_crop(struct mt9m032 *sensor,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&sensor->subdev, cfg, 0);
+ return v4l2_subdev_get_try_crop(&sensor->subdev, sd_state, 0);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->crop;
default:
@@ -354,18 +355,20 @@ __mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cf
/**
* __mt9m032_get_pad_format() - get format
* @sensor: pointer to the sensor struct
- * @cfg: v4l2_subdev_pad_config for getting the try format from
+ * @sd_state: v4l2_subdev_state for getting the try format from
* @which: select try or active format
*
* Returns a pointer the current active or fh relative try format
*/
static struct v4l2_mbus_framefmt *
-__mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cfg,
+__mt9m032_get_pad_format(struct mt9m032 *sensor,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&sensor->subdev, cfg, 0);
+ return v4l2_subdev_get_try_format(&sensor->subdev, sd_state,
+ 0);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->format;
default:
@@ -374,20 +377,20 @@ __mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *
}
static int mt9m032_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
mutex_lock(&sensor->lock);
- fmt->format = *__mt9m032_get_pad_format(sensor, cfg, fmt->which);
+ fmt->format = *__mt9m032_get_pad_format(sensor, sd_state, fmt->which);
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -401,7 +404,7 @@ static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
}
/* Scaling is not supported, the format is thus fixed. */
- fmt->format = *__mt9m032_get_pad_format(sensor, cfg, fmt->which);
+ fmt->format = *__mt9m032_get_pad_format(sensor, sd_state, fmt->which);
ret = 0;
done:
@@ -410,7 +413,7 @@ done:
}
static int mt9m032_get_pad_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -419,14 +422,14 @@ static int mt9m032_get_pad_selection(struct v4l2_subdev *subdev,
return -EINVAL;
mutex_lock(&sensor->lock);
- sel->r = *__mt9m032_get_pad_crop(sensor, cfg, sel->which);
+ sel->r = *__mt9m032_get_pad_crop(sensor, sd_state, sel->which);
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_pad_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -462,13 +465,14 @@ static int mt9m032_set_pad_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9m032_get_pad_crop(sensor, cfg, sel->which);
+ __crop = __mt9m032_get_pad_crop(sensor, sd_state, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- format = __mt9m032_get_pad_format(sensor, cfg, sel->which);
+ format = __mt9m032_get_pad_format(sensor, sd_state,
+ sel->which);
format->width = rect.width;
format->height = rect.height;
}
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 0e11734f75aa..91a44359bcd3 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -449,7 +449,7 @@ static int mt9m111_reset(struct mt9m111 *mt9m111)
}
static int mt9m111_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -493,7 +493,7 @@ static int mt9m111_set_selection(struct v4l2_subdev *sd,
}
static int mt9m111_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -518,7 +518,7 @@ static int mt9m111_get_selection(struct v4l2_subdev *sd,
}
static int mt9m111_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -529,7 +529,7 @@ static int mt9m111_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mf = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format = *mf;
return 0;
#else
@@ -624,7 +624,7 @@ static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
}
static int mt9m111_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -678,7 +678,7 @@ static int mt9m111_set_fmt(struct v4l2_subdev *sd,
mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
@@ -1100,7 +1100,7 @@ static int mt9m111_s_frame_interval(struct v4l2_subdev *sd,
}
static int mt9m111_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(mt9m111_colour_fmts))
@@ -1119,11 +1119,11 @@ static int mt9m111_s_stream(struct v4l2_subdev *sd, int enable)
}
static int mt9m111_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, cfg, 0);
+ v4l2_subdev_get_try_format(sd, sd_state, 0);
format->width = MT9M111_MAX_WIDTH;
format->height = MT9M111_MAX_HEIGHT;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index a633b934d93e..6eb88ef99783 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -470,7 +470,7 @@ static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9p031_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -483,7 +483,7 @@ static int mt9p031_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9p031_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -501,12 +501,14 @@ static int mt9p031_enum_frame_size(struct v4l2_subdev *subdev,
}
static struct v4l2_mbus_framefmt *
-__mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config *cfg,
+__mt9p031_get_pad_format(struct mt9p031 *mt9p031,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9p031->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&mt9p031->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->format;
default:
@@ -515,12 +517,14 @@ __mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config
}
static struct v4l2_rect *
-__mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, u32 which)
+__mt9p031_get_pad_crop(struct mt9p031 *mt9p031,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&mt9p031->subdev, cfg, pad);
+ return v4l2_subdev_get_try_crop(&mt9p031->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->crop;
default:
@@ -529,18 +533,18 @@ __mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config *c
}
static int mt9p031_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
- fmt->format = *__mt9p031_get_pad_format(mt9p031, cfg, fmt->pad,
+ fmt->format = *__mt9p031_get_pad_format(mt9p031, sd_state, fmt->pad,
fmt->which);
return 0;
}
static int mt9p031_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -551,7 +555,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9p031_get_pad_crop(mt9p031, cfg, format->pad,
+ __crop = __mt9p031_get_pad_crop(mt9p031, sd_state, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -567,7 +571,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
- __format = __mt9p031_get_pad_format(mt9p031, cfg, format->pad,
+ __format = __mt9p031_get_pad_format(mt9p031, sd_state, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -578,7 +582,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
}
static int mt9p031_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -586,12 +590,13 @@ static int mt9p031_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9p031_get_pad_crop(mt9p031, cfg, sel->pad, sel->which);
+ sel->r = *__mt9p031_get_pad_crop(mt9p031, sd_state, sel->pad,
+ sel->which);
return 0;
}
static int mt9p031_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -621,13 +626,15 @@ static int mt9p031_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9p031_get_pad_crop(mt9p031, cfg, sel->pad, sel->which);
+ __crop = __mt9p031_get_pad_crop(mt9p031, sd_state, sel->pad,
+ sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9p031_get_pad_format(mt9p031, cfg, sel->pad,
+ __format = __mt9p031_get_pad_format(mt9p031, sd_state,
+ sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -942,13 +949,13 @@ static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->state, 0);
crop->left = MT9P031_COLUMN_START_DEF;
crop->top = MT9P031_ROW_START_DEF;
crop->width = MT9P031_WINDOW_WIDTH_DEF;
crop->height = MT9P031_WINDOW_HEIGHT_DEF;
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
format->code = MEDIA_BUS_FMT_Y12_1X12;
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 2e96ff5234b4..b651ee4a26e8 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -252,12 +252,14 @@ e_power:
*/
static struct v4l2_mbus_framefmt *
-__mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_pad_config *cfg,
+__mt9t001_get_pad_format(struct mt9t001 *mt9t001,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9t001->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&mt9t001->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9t001->format;
default:
@@ -266,12 +268,14 @@ __mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_pad_config
}
static struct v4l2_rect *
-__mt9t001_get_pad_crop(struct mt9t001 *mt9t001, struct v4l2_subdev_pad_config *cfg,
+__mt9t001_get_pad_crop(struct mt9t001 *mt9t001,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&mt9t001->subdev, cfg, pad);
+ return v4l2_subdev_get_try_crop(&mt9t001->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9t001->crop;
default:
@@ -335,7 +339,7 @@ static int mt9t001_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -346,7 +350,7 @@ static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
@@ -361,18 +365,19 @@ static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
}
static int mt9t001_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
- format->format = *__mt9t001_get_pad_format(mt9t001, cfg, format->pad,
+ format->format = *__mt9t001_get_pad_format(mt9t001, sd_state,
+ format->pad,
format->which);
return 0;
}
static int mt9t001_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -383,7 +388,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9t001_get_pad_crop(mt9t001, cfg, format->pad,
+ __crop = __mt9t001_get_pad_crop(mt9t001, sd_state, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -399,7 +404,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
- __format = __mt9t001_get_pad_format(mt9t001, cfg, format->pad,
+ __format = __mt9t001_get_pad_format(mt9t001, sd_state, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -410,7 +415,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
}
static int mt9t001_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -418,12 +423,13 @@ static int mt9t001_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9t001_get_pad_crop(mt9t001, cfg, sel->pad, sel->which);
+ sel->r = *__mt9t001_get_pad_crop(mt9t001, sd_state, sel->pad,
+ sel->which);
return 0;
}
static int mt9t001_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -455,13 +461,15 @@ static int mt9t001_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9t001_get_pad_crop(mt9t001, cfg, sel->pad, sel->which);
+ __crop = __mt9t001_get_pad_crop(mt9t001, sd_state, sel->pad,
+ sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9t001_get_pad_format(mt9t001, cfg, sel->pad,
+ __format = __mt9t001_get_pad_format(mt9t001, sd_state,
+ sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -798,13 +806,13 @@ static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->state, 0);
crop->left = MT9T001_COLUMN_START_DEF;
crop->top = MT9T001_ROW_START_DEF;
crop->width = MT9T001_WINDOW_WIDTH_DEF + 1;
crop->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
format->width = MT9T001_WINDOW_WIDTH_DEF + 1;
format->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index ae3c336eadf5..8d2e3caa9b28 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -872,8 +872,8 @@ static int mt9t112_set_params(struct mt9t112_priv *priv,
}
static int mt9t112_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
@@ -897,7 +897,7 @@ static int mt9t112_get_selection(struct v4l2_subdev *sd,
}
static int mt9t112_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -912,7 +912,7 @@ static int mt9t112_set_selection(struct v4l2_subdev *sd,
}
static int mt9t112_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -953,7 +953,7 @@ static int mt9t112_s_fmt(struct v4l2_subdev *sd,
}
static int mt9t112_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -982,13 +982,13 @@ static int mt9t112_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return mt9t112_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
static int mt9t112_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 46ef74a2ca36..7699e64e1127 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -327,7 +327,7 @@ static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
}
static int mt9v011_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -338,7 +338,7 @@ static int mt9v011_enum_mbus_code(struct v4l2_subdev *sd,
}
static int mt9v011_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -358,7 +358,7 @@ static int mt9v011_set_fmt(struct v4l2_subdev *sd,
set_res(sd);
} else {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
}
return 0;
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 5bd3ae82992f..4cfdd3dfbd42 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -349,12 +349,14 @@ static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
*/
static struct v4l2_mbus_framefmt *
-__mt9v032_get_pad_format(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config *cfg,
+__mt9v032_get_pad_format(struct mt9v032 *mt9v032,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9v032->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&mt9v032->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v032->format;
default:
@@ -363,12 +365,14 @@ __mt9v032_get_pad_format(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config
}
static struct v4l2_rect *
-__mt9v032_get_pad_crop(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config *cfg,
+__mt9v032_get_pad_crop(struct mt9v032 *mt9v032,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&mt9v032->subdev, cfg, pad);
+ return v4l2_subdev_get_try_crop(&mt9v032->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v032->crop;
default:
@@ -425,7 +429,7 @@ static int mt9v032_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -438,7 +442,7 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -457,12 +461,13 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
}
static int mt9v032_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
- format->format = *__mt9v032_get_pad_format(mt9v032, cfg, format->pad,
+ format->format = *__mt9v032_get_pad_format(mt9v032, sd_state,
+ format->pad,
format->which);
return 0;
}
@@ -492,7 +497,7 @@ static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
}
static int mt9v032_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -503,7 +508,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9v032_get_pad_crop(mt9v032, cfg, format->pad,
+ __crop = __mt9v032_get_pad_crop(mt9v032, sd_state, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -519,7 +524,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
hratio = mt9v032_calc_ratio(__crop->width, width);
vratio = mt9v032_calc_ratio(__crop->height, height);
- __format = __mt9v032_get_pad_format(mt9v032, cfg, format->pad,
+ __format = __mt9v032_get_pad_format(mt9v032, sd_state, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -536,7 +541,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
}
static int mt9v032_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -544,12 +549,13 @@ static int mt9v032_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9v032_get_pad_crop(mt9v032, cfg, sel->pad, sel->which);
+ sel->r = *__mt9v032_get_pad_crop(mt9v032, sd_state, sel->pad,
+ sel->which);
return 0;
}
static int mt9v032_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -581,13 +587,15 @@ static int mt9v032_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int,
rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9v032_get_pad_crop(mt9v032, cfg, sel->pad, sel->which);
+ __crop = __mt9v032_get_pad_crop(mt9v032, sd_state, sel->pad,
+ sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9v032_get_pad_format(mt9v032, cfg, sel->pad,
+ __format = __mt9v032_get_pad_format(mt9v032, sd_state,
+ sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -922,13 +930,13 @@ static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->state, 0);
crop->left = MT9V032_COLUMN_START_DEF;
crop->top = MT9V032_ROW_START_DEF;
crop->width = MT9V032_WINDOW_WIDTH_DEF;
crop->height = MT9V032_WINDOW_HEIGHT_DEF;
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
if (mt9v032->model->color)
format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 97c7527b74ed..2dc4a0f24ce8 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -791,16 +791,16 @@ static int mt9v111_g_frame_interval(struct v4l2_subdev *sd,
static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
struct mt9v111_dev *mt9v111,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
#if IS_ENABLED(CONFIG_VIDEO_V4L2_SUBDEV_API)
- return v4l2_subdev_get_try_format(&mt9v111->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&mt9v111->sd, sd_state, pad);
#else
- return &cfg->try_fmt;
+ return &sd_state->pads->try_fmt;
#endif
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v111->fmt;
@@ -810,7 +810,7 @@ static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
}
static int mt9v111_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > ARRAY_SIZE(mt9v111_formats) - 1)
@@ -822,7 +822,7 @@ static int mt9v111_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9v111_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
unsigned int i;
@@ -845,7 +845,7 @@ static int mt9v111_enum_frame_interval(struct v4l2_subdev *sd,
}
static int mt9v111_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->pad || fse->index >= ARRAY_SIZE(mt9v111_frame_sizes))
@@ -860,7 +860,7 @@ static int mt9v111_enum_frame_size(struct v4l2_subdev *subdev,
}
static int mt9v111_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
@@ -869,7 +869,8 @@ static int mt9v111_get_format(struct v4l2_subdev *subdev,
return -EINVAL;
mutex_lock(&mt9v111->stream_mutex);
- format->format = *__mt9v111_get_pad_format(mt9v111, cfg, format->pad,
+ format->format = *__mt9v111_get_pad_format(mt9v111, sd_state,
+ format->pad,
format->which);
mutex_unlock(&mt9v111->stream_mutex);
@@ -877,7 +878,7 @@ static int mt9v111_get_format(struct v4l2_subdev *subdev,
}
static int mt9v111_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
@@ -925,7 +926,7 @@ static int mt9v111_set_format(struct v4l2_subdev *subdev,
new_fmt.height = mt9v111_frame_sizes[idx].height;
/* Update the device (or pad) format if it has changed. */
- __fmt = __mt9v111_get_pad_format(mt9v111, cfg, format->pad,
+ __fmt = __mt9v111_get_pad_format(mt9v111, sd_state, format->pad,
format->which);
/* Format hasn't changed, stop here. */
@@ -954,9 +955,9 @@ done:
}
static int mt9v111_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
- cfg->try_fmt = mt9v111_def_fmt;
+ sd_state->pads->try_fmt = mt9v111_def_fmt;
return 0;
}
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index 87d76a7f691a..f3ac379ef34a 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -488,7 +488,7 @@ unlock:
}
static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(noon010_formats))
@@ -499,15 +499,15 @@ static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
}
static int noon010_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
fmt->format = *mf;
}
return 0;
@@ -539,7 +539,8 @@ static const struct noon010_format *noon010_try_fmt(struct v4l2_subdev *sd,
return &noon010_formats[i];
}
-static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int noon010_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
@@ -554,8 +555,8 @@ static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
*mf = fmt->format;
}
return 0;
@@ -637,7 +638,9 @@ static int noon010_log_status(struct v4l2_subdev *sd)
static int noon010_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
mf->width = noon010_sizes[0].width;
mf->height = noon010_sizes[0].height;
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index c47b1d45d8fd..a3ce5500d355 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -295,7 +295,7 @@ static void ov02a10_fill_fmt(const struct ov02a10_mode *mode,
}
static int ov02a10_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov02a10 *ov02a10 = to_ov02a10(sd);
@@ -315,7 +315,7 @@ static int ov02a10_set_fmt(struct v4l2_subdev *sd,
ov02a10_fill_fmt(ov02a10->cur_mode, mbus_fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- frame_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ frame_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
else
frame_fmt = &ov02a10->fmt;
@@ -327,7 +327,7 @@ out_unlock:
}
static int ov02a10_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov02a10 *ov02a10 = to_ov02a10(sd);
@@ -336,7 +336,8 @@ static int ov02a10_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov02a10->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
} else {
fmt->format = ov02a10->fmt;
mbus_fmt->code = ov02a10->fmt.code;
@@ -349,7 +350,7 @@ static int ov02a10_get_fmt(struct v4l2_subdev *sd,
}
static int ov02a10_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct ov02a10 *ov02a10 = to_ov02a10(sd);
@@ -363,7 +364,7 @@ static int ov02a10_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov02a10_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -511,7 +512,7 @@ static int __ov02a10_stop_stream(struct ov02a10 *ov02a10)
}
static int ov02a10_entity_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_TRY,
@@ -521,7 +522,7 @@ static int ov02a10_entity_init_cfg(struct v4l2_subdev *sd,
}
};
- ov02a10_set_fmt(sd, cfg, &fmt);
+ ov02a10_set_fmt(sd, sd_state, &fmt);
return 0;
}
@@ -540,11 +541,9 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
}
if (on) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto unlock_and_return;
- }
ret = __ov02a10_start_stream(ov02a10);
if (ret) {
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 4a2885ff0cbe..7fc70af53e45 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1150,7 +1150,7 @@ static int ov13858_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov13858 *ov13858 = to_ov13858(sd);
struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd,
- fh->pad,
+ fh->state,
0);
mutex_lock(&ov13858->mutex);
@@ -1275,7 +1275,7 @@ static const struct v4l2_ctrl_ops ov13858_ctrl_ops = {
};
static int ov13858_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Only one bayer order(GRBG) is supported */
@@ -1288,7 +1288,7 @@ static int ov13858_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov13858_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -1315,14 +1315,14 @@ static void ov13858_update_pad_format(const struct ov13858_mode *mode,
}
static int ov13858_do_get_pad_format(struct ov13858 *ov13858,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
struct v4l2_subdev *sd = &ov13858->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
ov13858_update_pad_format(ov13858->cur_mode, fmt);
@@ -1332,14 +1332,14 @@ static int ov13858_do_get_pad_format(struct ov13858 *ov13858,
}
static int ov13858_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov13858 *ov13858 = to_ov13858(sd);
int ret;
mutex_lock(&ov13858->mutex);
- ret = ov13858_do_get_pad_format(ov13858, cfg, fmt);
+ ret = ov13858_do_get_pad_format(ov13858, sd_state, fmt);
mutex_unlock(&ov13858->mutex);
return ret;
@@ -1347,7 +1347,7 @@ static int ov13858_get_pad_format(struct v4l2_subdev *sd,
static int
ov13858_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov13858 *ov13858 = to_ov13858(sd);
@@ -1371,7 +1371,7 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
ov13858_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ov13858->cur_mode = mode;
@@ -1472,11 +1472,9 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
}
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto err_unlock;
- }
/*
* Apply default & customized values
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 4a4bd5b665a1..4b75da55b260 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -913,7 +913,7 @@ err:
}
static int ov2640_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -925,7 +925,7 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mf;
return 0;
#else
@@ -946,7 +946,7 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
}
static int ov2640_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -996,7 +996,7 @@ static int ov2640_set_fmt(struct v4l2_subdev *sd,
/* select format */
priv->cfmt_code = mf->code;
} else {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
}
out:
mutex_unlock(&priv->lock);
@@ -1005,11 +1005,11 @@ out:
}
static int ov2640_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, cfg, 0);
+ v4l2_subdev_get_try_format(sd, sd_state, 0);
const struct ov2640_win_size *win =
ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
@@ -1026,7 +1026,7 @@ static int ov2640_init_cfg(struct v4l2_subdev *sd,
}
static int ov2640_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(ov2640_codes))
@@ -1037,7 +1037,7 @@ static int ov2640_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2640_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 42f64175a6df..13ded5b2aa66 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -204,6 +204,7 @@ struct ov2659 {
struct i2c_client *client;
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *link_frequency;
+ struct clk *clk;
const struct ov2659_framesize *frame_size;
struct sensor_register *format_ctrl_regs;
struct ov2659_pll_ctrl pll;
@@ -979,7 +980,7 @@ static int ov2659_init(struct v4l2_subdev *sd, u32 val)
*/
static int ov2659_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -995,7 +996,7 @@ static int ov2659_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2659_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1021,7 +1022,7 @@ static int ov2659_enum_frame_sizes(struct v4l2_subdev *sd,
}
static int ov2659_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1033,7 +1034,7 @@ static int ov2659_get_fmt(struct v4l2_subdev *sd,
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
mutex_lock(&ov2659->lock);
fmt->format = *mf;
mutex_unlock(&ov2659->lock);
@@ -1083,7 +1084,7 @@ static void __ov2659_try_frame_size(struct v4l2_mbus_framefmt *mf,
}
static int ov2659_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1113,7 +1114,7 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
#endif
} else {
@@ -1186,11 +1187,9 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
goto unlock;
}
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto unlock;
- }
ret = ov2659_init(sd, 0);
if (!ret)
@@ -1270,6 +1269,8 @@ static int ov2659_power_off(struct device *dev)
gpiod_set_value(ov2659->pwdn_gpio, 1);
+ clk_disable_unprepare(ov2659->clk);
+
return 0;
}
@@ -1278,9 +1279,17 @@ static int ov2659_power_on(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2659 *ov2659 = to_ov2659(sd);
+ int ret;
dev_dbg(&client->dev, "%s:\n", __func__);
+ ret = clk_prepare_enable(ov2659->clk);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable clock\n",
+ __func__);
+ return ret;
+ }
+
gpiod_set_value(ov2659->pwdn_gpio, 0);
if (ov2659->resetb_gpio) {
@@ -1302,7 +1311,7 @@ static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
dev_dbg(&client->dev, "%s:\n", __func__);
@@ -1368,8 +1377,7 @@ static int ov2659_detect(struct v4l2_subdev *sd)
id = OV265X_ID(pid, ver);
if (id != OV2659_ID) {
dev_err(&client->dev,
- "Sensor detection failed (%04X, %d)\n",
- id, ret);
+ "Sensor detection failed (%04X)\n", id);
ret = -ENODEV;
} else {
dev_info(&client->dev, "Found OV%04X sensor\n", id);
@@ -1425,7 +1433,6 @@ static int ov2659_probe(struct i2c_client *client)
const struct ov2659_platform_data *pdata = ov2659_get_pdata(client);
struct v4l2_subdev *sd;
struct ov2659 *ov2659;
- struct clk *clk;
int ret;
if (!pdata) {
@@ -1440,11 +1447,11 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->pdata = pdata;
ov2659->client = client;
- clk = devm_clk_get(&client->dev, "xvclk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ov2659->clk = devm_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(ov2659->clk))
+ return PTR_ERR(ov2659->clk);
- ov2659->xvclk_frequency = clk_get_rate(clk);
+ ov2659->xvclk_frequency = clk_get_rate(ov2659->clk);
if (ov2659->xvclk_frequency < 6000000 ||
ov2659->xvclk_frequency > 27000000)
return -EINVAL;
@@ -1506,7 +1513,9 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->frame_size = &ov2659_framesizes[2];
ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
- ov2659_power_on(&client->dev);
+ ret = ov2659_power_on(&client->dev);
+ if (ret < 0)
+ goto error;
ret = ov2659_detect(sd);
if (ret < 0)
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 178dfe985a25..906c711f6821 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -645,7 +645,7 @@ unlock:
}
static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -659,7 +659,7 @@ static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2680_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -673,7 +673,8 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- fmt = v4l2_subdev_get_try_format(&sensor->sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
+ format->pad);
#else
ret = -EINVAL;
#endif
@@ -690,7 +691,7 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
}
static int ov2680_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -721,7 +722,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- try_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ try_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *try_fmt;
#endif
goto unlock;
@@ -743,22 +744,22 @@ unlock:
}
static int ov2680_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
- .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
.width = 800,
.height = 600,
}
};
- return ov2680_set_fmt(sd, cfg, &fmt);
+ return ov2680_set_fmt(sd, sd_state, &fmt);
}
static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
@@ -775,7 +776,7 @@ static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
}
static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct v4l2_fract tpf;
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index 49a2dcedb347..b6e010ea3249 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -328,7 +328,7 @@ static void ov2685_fill_fmt(const struct ov2685_mode *mode,
}
static int ov2685_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov2685 *ov2685 = to_ov2685(sd);
@@ -341,7 +341,7 @@ static int ov2685_set_fmt(struct v4l2_subdev *sd,
}
static int ov2685_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov2685 *ov2685 = to_ov2685(sd);
@@ -353,7 +353,7 @@ static int ov2685_get_fmt(struct v4l2_subdev *sd,
}
static int ov2685_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(supported_modes))
@@ -365,7 +365,7 @@ static int ov2685_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2685_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
@@ -456,11 +456,10 @@ static int ov2685_s_stream(struct v4l2_subdev *sd, int on)
goto unlock_and_return;
if (on) {
- ret = pm_runtime_get_sync(&ov2685->client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&ov2685->client->dev);
+ if (ret < 0)
goto unlock_and_return;
- }
+
ret = __v4l2_ctrl_handler_setup(&ov2685->ctrl_handler);
if (ret) {
pm_runtime_put(&client->dev);
@@ -494,7 +493,7 @@ static int ov2685_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov2685->mutex);
- try_fmt = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ try_fmt = v4l2_subdev_get_try_format(sd, fh->state, 0);
/* Initialize try_fmt */
ov2685_fill_fmt(&supported_modes[0], try_fmt);
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 0f3f17f3c426..599369a3d192 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -751,9 +751,8 @@ static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&ov2740->mutex);
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
mutex_unlock(&ov2740->mutex);
return ret;
}
@@ -811,7 +810,7 @@ exit:
}
static int ov2740_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov2740 *ov2740 = to_ov2740(sd);
@@ -826,7 +825,7 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov2740->mutex);
ov2740_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov2740->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov2740->link_freq, mode->link_freq_index);
@@ -851,14 +850,15 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
}
static int ov2740_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov2740 *ov2740 = to_ov2740(sd);
mutex_lock(&ov2740->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd,
+ sd_state,
fmt->pad);
else
ov2740_update_pad_format(ov2740->cur_mode, &fmt->format);
@@ -869,7 +869,7 @@ static int ov2740_get_format(struct v4l2_subdev *sd,
}
static int ov2740_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -881,7 +881,7 @@ static int ov2740_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -904,7 +904,7 @@ static int ov2740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov2740->mutex);
ov2740_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov2740->mutex);
return 0;
@@ -1049,9 +1049,8 @@ static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
goto exit;
}
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
goto exit;
}
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 5b9cc71df473..f6e1e51e0375 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2227,7 +2227,7 @@ find_mode:
}
static int ov5640_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
@@ -2239,7 +2239,7 @@ static int ov5640_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&sensor->lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(&sensor->sd, cfg,
+ fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
format->pad);
else
fmt = &sensor->fmt;
@@ -2285,7 +2285,7 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
}
static int ov5640_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
@@ -2310,7 +2310,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
goto out;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
else
fmt = &sensor->fmt;
@@ -2818,7 +2818,7 @@ free_ctrls:
}
static int ov5640_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->pad != 0)
@@ -2838,7 +2838,7 @@ static int ov5640_enum_frame_size(struct v4l2_subdev *sd,
static int ov5640_enum_frame_interval(
struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
@@ -2924,7 +2924,7 @@ out:
}
static int ov5640_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad != 0)
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index a6c17d15d754..368fa21e675e 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -837,7 +837,7 @@ static const struct v4l2_ctrl_ops ov5645_ctrl_ops = {
};
static int ov5645_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -849,7 +849,7 @@ static int ov5645_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5645_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->code != MEDIA_BUS_FMT_UYVY8_2X8)
@@ -868,13 +868,13 @@ static int ov5645_enum_frame_size(struct v4l2_subdev *subdev,
static struct v4l2_mbus_framefmt *
__ov5645_get_pad_format(struct ov5645 *ov5645,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&ov5645->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&ov5645->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5645->fmt;
default:
@@ -883,23 +883,25 @@ __ov5645_get_pad_format(struct ov5645 *ov5645,
}
static int ov5645_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5645 *ov5645 = to_ov5645(sd);
- format->format = *__ov5645_get_pad_format(ov5645, cfg, format->pad,
+ format->format = *__ov5645_get_pad_format(ov5645, sd_state,
+ format->pad,
format->which);
return 0;
}
static struct v4l2_rect *
-__ov5645_get_pad_crop(struct ov5645 *ov5645, struct v4l2_subdev_pad_config *cfg,
+__ov5645_get_pad_crop(struct ov5645 *ov5645,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov5645->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&ov5645->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5645->crop;
default:
@@ -908,7 +910,7 @@ __ov5645_get_pad_crop(struct ov5645 *ov5645, struct v4l2_subdev_pad_config *cfg,
}
static int ov5645_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5645 *ov5645 = to_ov5645(sd);
@@ -917,8 +919,8 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
const struct ov5645_mode_info *new_mode;
int ret;
- __crop = __ov5645_get_pad_crop(ov5645, cfg, format->pad,
- format->which);
+ __crop = __ov5645_get_pad_crop(ov5645, sd_state, format->pad,
+ format->which);
new_mode = v4l2_find_nearest_size(ov5645_mode_info_data,
ARRAY_SIZE(ov5645_mode_info_data),
@@ -942,8 +944,8 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
ov5645->current_mode = new_mode;
}
- __format = __ov5645_get_pad_format(ov5645, cfg, format->pad,
- format->which);
+ __format = __ov5645_get_pad_format(ov5645, sd_state, format->pad,
+ format->which);
__format->width = __crop->width;
__format->height = __crop->height;
__format->code = MEDIA_BUS_FMT_UYVY8_2X8;
@@ -956,21 +958,21 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
}
static int ov5645_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { 0 };
- fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
fmt.format.width = 1920;
fmt.format.height = 1080;
- ov5645_set_format(subdev, cfg, &fmt);
+ ov5645_set_format(subdev, sd_state, &fmt);
return 0;
}
static int ov5645_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ov5645 *ov5645 = to_ov5645(sd);
@@ -978,7 +980,7 @@ static int ov5645_get_selection(struct v4l2_subdev *sd,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__ov5645_get_pad_crop(ov5645, cfg, sel->pad,
+ sel->r = *__ov5645_get_pad_crop(ov5645, sd_state, sel->pad,
sel->which);
return 0;
}
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 1cefa15729ce..d346d18ce629 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -856,12 +856,13 @@ static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = {
};
static const struct v4l2_rect *
-__ov5647_get_pad_crop(struct ov5647 *ov5647, struct v4l2_subdev_pad_config *cfg,
+__ov5647_get_pad_crop(struct ov5647 *ov5647,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov5647->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&ov5647->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5647->mode->crop;
}
@@ -882,20 +883,20 @@ static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
}
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0)
goto error_unlock;
ret = ov5647_stream_on(sd);
if (ret < 0) {
dev_err(&client->dev, "stream start failed: %d\n", ret);
- goto error_unlock;
+ goto error_pm;
}
} else {
ret = ov5647_stream_off(sd);
if (ret < 0) {
dev_err(&client->dev, "stream stop failed: %d\n", ret);
- goto error_unlock;
+ goto error_pm;
}
pm_runtime_put(&client->dev);
}
@@ -905,8 +906,9 @@ static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
-error_unlock:
+error_pm:
pm_runtime_put(&client->dev);
+error_unlock:
mutex_unlock(&sensor->lock);
return ret;
@@ -917,7 +919,7 @@ static const struct v4l2_subdev_video_ops ov5647_subdev_video_ops = {
};
static int ov5647_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -929,7 +931,7 @@ static int ov5647_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5647_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct v4l2_mbus_framefmt *fmt;
@@ -948,7 +950,7 @@ static int ov5647_enum_frame_size(struct v4l2_subdev *sd,
}
static int ov5647_get_pad_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -958,7 +960,8 @@ static int ov5647_get_pad_fmt(struct v4l2_subdev *sd,
mutex_lock(&sensor->lock);
switch (format->which) {
case V4L2_SUBDEV_FORMAT_TRY:
- sensor_format = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ sensor_format = v4l2_subdev_get_try_format(sd, sd_state,
+ format->pad);
break;
default:
sensor_format = &sensor->mode->format;
@@ -972,7 +975,7 @@ static int ov5647_get_pad_fmt(struct v4l2_subdev *sd,
}
static int ov5647_set_pad_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -986,7 +989,7 @@ static int ov5647_set_pad_fmt(struct v4l2_subdev *sd,
/* Update the sensor mode and apply at it at streamon time. */
mutex_lock(&sensor->lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, format->pad) = mode->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = mode->format;
} else {
int exposure_max, exposure_def;
int hblank, vblank;
@@ -1019,7 +1022,7 @@ static int ov5647_set_pad_fmt(struct v4l2_subdev *sd,
}
static int ov5647_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
switch (sel->target) {
@@ -1027,7 +1030,7 @@ static int ov5647_get_selection(struct v4l2_subdev *sd,
struct ov5647 *sensor = to_sensor(sd);
mutex_lock(&sensor->lock);
- sel->r = *__ov5647_get_pad_crop(sensor, cfg, sel->pad,
+ sel->r = *__ov5647_get_pad_crop(sensor, sd_state, sel->pad,
sel->which);
mutex_unlock(&sensor->lock);
@@ -1103,8 +1106,8 @@ static int ov5647_detect(struct v4l2_subdev *sd)
static int ov5647_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
- struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
+ struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
crop->left = OV5647_PIXEL_ARRAY_LEFT;
crop->top = OV5647_PIXEL_ARRAY_TOP;
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index 3ecb4a3e8773..947d437ed0ef 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -2132,11 +2132,9 @@ static int ov5648_s_stream(struct v4l2_subdev *subdev, int enable)
int ret;
if (enable) {
- ret = pm_runtime_get_sync(sensor->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(sensor->dev);
+ ret = pm_runtime_resume_and_get(sensor->dev);
+ if (ret < 0)
return ret;
- }
}
mutex_lock(&sensor->mutex);
@@ -2190,7 +2188,7 @@ static const struct v4l2_subdev_video_ops ov5648_subdev_video_ops = {
/* Subdev Pad Operations */
static int ov5648_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code_enum)
{
if (code_enum->index >= ARRAY_SIZE(ov5648_mbus_codes))
@@ -2219,7 +2217,7 @@ static void ov5648_mbus_format_fill(struct v4l2_mbus_framefmt *mbus_format,
}
static int ov5648_get_fmt(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
@@ -2228,7 +2226,7 @@ static int ov5648_get_fmt(struct v4l2_subdev *subdev,
mutex_lock(&sensor->mutex);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *mbus_format = *v4l2_subdev_get_try_format(subdev, config,
+ *mbus_format = *v4l2_subdev_get_try_format(subdev, sd_state,
format->pad);
else
ov5648_mbus_format_fill(mbus_format, sensor->state.mbus_code,
@@ -2240,7 +2238,7 @@ static int ov5648_get_fmt(struct v4l2_subdev *subdev,
}
static int ov5648_set_fmt(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
@@ -2281,7 +2279,7 @@ static int ov5648_set_fmt(struct v4l2_subdev *subdev,
ov5648_mbus_format_fill(mbus_format, mbus_code, mode);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(subdev, config, format->pad) =
+ *v4l2_subdev_get_try_format(subdev, sd_state, format->pad) =
*mbus_format;
else if (sensor->state.mode != mode ||
sensor->state.mbus_code != mbus_code)
@@ -2294,7 +2292,7 @@ complete:
}
static int ov5648_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *size_enum)
{
const struct ov5648_mode *mode;
@@ -2311,7 +2309,7 @@ static int ov5648_enum_frame_size(struct v4l2_subdev *subdev,
}
static int ov5648_enum_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *interval_enum)
{
const struct ov5648_mode *mode = NULL;
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index dee7df8dd100..49189926afd6 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -1937,7 +1937,7 @@ static int ov5670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov5670 *ov5670 = to_ov5670(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
mutex_lock(&ov5670->mutex);
@@ -2153,7 +2153,7 @@ error:
}
static int ov5670_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Only one bayer order GRBG is supported */
@@ -2166,7 +2166,7 @@ static int ov5670_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5670_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -2193,11 +2193,12 @@ static void ov5670_update_pad_format(const struct ov5670_mode *mode,
}
static int ov5670_do_get_pad_format(struct ov5670 *ov5670,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov5670->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov5670->sd,
+ sd_state,
fmt->pad);
else
ov5670_update_pad_format(ov5670->cur_mode, fmt);
@@ -2206,21 +2207,21 @@ static int ov5670_do_get_pad_format(struct ov5670 *ov5670,
}
static int ov5670_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5670 *ov5670 = to_ov5670(sd);
int ret;
mutex_lock(&ov5670->mutex);
- ret = ov5670_do_get_pad_format(ov5670, cfg, fmt);
+ ret = ov5670_do_get_pad_format(ov5670, sd_state, fmt);
mutex_unlock(&ov5670->mutex);
return ret;
}
static int ov5670_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5670 *ov5670 = to_ov5670(sd);
@@ -2238,7 +2239,7 @@ static int ov5670_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
ov5670_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov5670->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov5670->link_freq, mode->link_freq_index);
@@ -2347,11 +2348,9 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
goto unlock_and_return;
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto unlock_and_return;
- }
ret = ov5670_start_streaming(ov5670);
if (ret)
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index dea32859459a..da5850b7ad07 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -863,9 +863,8 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&ov5675->mutex);
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
mutex_unlock(&ov5675->mutex);
return ret;
}
@@ -924,7 +923,7 @@ static int __maybe_unused ov5675_resume(struct device *dev)
}
static int ov5675_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5675 *ov5675 = to_ov5675(sd);
@@ -939,7 +938,7 @@ static int ov5675_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov5675->mutex);
ov5675_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov5675->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov5675->link_freq, mode->link_freq_index);
@@ -965,14 +964,15 @@ static int ov5675_set_format(struct v4l2_subdev *sd,
}
static int ov5675_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5675 *ov5675 = to_ov5675(sd);
mutex_lock(&ov5675->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov5675->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov5675->sd,
+ sd_state,
fmt->pad);
else
ov5675_update_pad_format(ov5675->cur_mode, &fmt->format);
@@ -983,7 +983,7 @@ static int ov5675_get_format(struct v4l2_subdev *sd,
}
static int ov5675_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -995,7 +995,7 @@ static int ov5675_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5675_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -1018,7 +1018,7 @@ static int ov5675_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov5675->mutex);
ov5675_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov5675->mutex);
return 0;
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 09bee57a241d..439385938a51 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -806,7 +806,7 @@ ov5695_find_best_fit(struct v4l2_subdev_format *fmt)
}
static int ov5695_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5695 *ov5695 = to_ov5695(sd);
@@ -822,7 +822,7 @@ static int ov5695_set_fmt(struct v4l2_subdev *sd,
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
#endif
} else {
ov5695->cur_mode = mode;
@@ -841,7 +841,7 @@ static int ov5695_set_fmt(struct v4l2_subdev *sd,
}
static int ov5695_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5695 *ov5695 = to_ov5695(sd);
@@ -850,7 +850,8 @@ static int ov5695_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov5695->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
#else
mutex_unlock(&ov5695->mutex);
return -EINVAL;
@@ -867,7 +868,7 @@ static int ov5695_get_fmt(struct v4l2_subdev *sd,
}
static int ov5695_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -878,7 +879,7 @@ static int ov5695_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5695_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -946,11 +947,9 @@ static int ov5695_s_stream(struct v4l2_subdev *sd, int on)
goto unlock_and_return;
if (on) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto unlock_and_return;
- }
ret = __ov5695_start_stream(ov5695);
if (ret) {
@@ -1054,7 +1053,7 @@ static int ov5695_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov5695 *ov5695 = to_ov5695(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
const struct ov5695_mode *def_mode = &supported_modes[0];
mutex_lock(&ov5695->mutex);
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 85dd13694bd2..f67412150b16 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -467,7 +467,7 @@ static int ov6650_s_power(struct v4l2_subdev *sd, int on)
}
static int ov6650_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -492,7 +492,7 @@ static int ov6650_get_selection(struct v4l2_subdev *sd,
}
static int ov6650_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -535,7 +535,7 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
}
static int ov6650_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -550,9 +550,9 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
/* update media bus format code and frame size */
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf->width = cfg->try_fmt.width;
- mf->height = cfg->try_fmt.height;
- mf->code = cfg->try_fmt.code;
+ mf->width = sd_state->pads->try_fmt.width;
+ mf->height = sd_state->pads->try_fmt.height;
+ mf->code = sd_state->pads->try_fmt.code;
} else {
mf->width = priv->rect.width >> priv->half_scale;
@@ -668,7 +668,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
}
static int ov6650_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -701,15 +701,15 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
/* store media bus format code and frame size in pad config */
- cfg->try_fmt.width = mf->width;
- cfg->try_fmt.height = mf->height;
- cfg->try_fmt.code = mf->code;
+ sd_state->pads->try_fmt.width = mf->width;
+ sd_state->pads->try_fmt.height = mf->height;
+ sd_state->pads->try_fmt.code = mf->code;
/* return default mbus frame format updated with pad config */
*mf = ov6650_def_fmt;
- mf->width = cfg->try_fmt.width;
- mf->height = cfg->try_fmt.height;
- mf->code = cfg->try_fmt.code;
+ mf->width = sd_state->pads->try_fmt.width;
+ mf->height = sd_state->pads->try_fmt.height;
+ mf->code = sd_state->pads->try_fmt.code;
} else {
/* apply new media bus format code and frame size */
@@ -728,7 +728,7 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
}
static int ov6650_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(ov6650_codes))
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 0c10203f822b..ebb299f207e5 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -898,7 +898,7 @@ static const struct v4l2_ctrl_ops ov7251_ctrl_ops = {
};
static int ov7251_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -910,7 +910,7 @@ static int ov7251_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov7251_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->code != MEDIA_BUS_FMT_Y10_1X10)
@@ -928,7 +928,7 @@ static int ov7251_enum_frame_size(struct v4l2_subdev *subdev,
}
static int ov7251_enum_frame_ival(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
unsigned int index = fie->index;
@@ -950,13 +950,13 @@ static int ov7251_enum_frame_ival(struct v4l2_subdev *subdev,
static struct v4l2_mbus_framefmt *
__ov7251_get_pad_format(struct ov7251 *ov7251,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&ov7251->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&ov7251->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov7251->fmt;
default:
@@ -965,13 +965,14 @@ __ov7251_get_pad_format(struct ov7251 *ov7251,
}
static int ov7251_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7251 *ov7251 = to_ov7251(sd);
mutex_lock(&ov7251->lock);
- format->format = *__ov7251_get_pad_format(ov7251, cfg, format->pad,
+ format->format = *__ov7251_get_pad_format(ov7251, sd_state,
+ format->pad,
format->which);
mutex_unlock(&ov7251->lock);
@@ -979,12 +980,13 @@ static int ov7251_get_format(struct v4l2_subdev *sd,
}
static struct v4l2_rect *
-__ov7251_get_pad_crop(struct ov7251 *ov7251, struct v4l2_subdev_pad_config *cfg,
+__ov7251_get_pad_crop(struct ov7251 *ov7251,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov7251->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&ov7251->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov7251->crop;
default:
@@ -1027,7 +1029,7 @@ ov7251_find_mode_by_ival(struct ov7251 *ov7251, struct v4l2_fract *timeperframe)
}
static int ov7251_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7251 *ov7251 = to_ov7251(sd);
@@ -1038,7 +1040,8 @@ static int ov7251_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov7251->lock);
- __crop = __ov7251_get_pad_crop(ov7251, cfg, format->pad, format->which);
+ __crop = __ov7251_get_pad_crop(ov7251, sd_state, format->pad,
+ format->which);
new_mode = v4l2_find_nearest_size(ov7251_mode_info_data,
ARRAY_SIZE(ov7251_mode_info_data),
@@ -1077,7 +1080,7 @@ static int ov7251_set_format(struct v4l2_subdev *sd,
ov7251->current_mode = new_mode;
}
- __format = __ov7251_get_pad_format(ov7251, cfg, format->pad,
+ __format = __ov7251_get_pad_format(ov7251, sd_state, format->pad,
format->which);
__format->width = __crop->width;
__format->height = __crop->height;
@@ -1098,24 +1101,24 @@ exit:
}
static int ov7251_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
- .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
.width = 640,
.height = 480
}
};
- ov7251_set_format(subdev, cfg, &fmt);
+ ov7251_set_format(subdev, sd_state, &fmt);
return 0;
}
static int ov7251_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ov7251 *ov7251 = to_ov7251(sd);
@@ -1124,7 +1127,7 @@ static int ov7251_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&ov7251->lock);
- sel->r = *__ov7251_get_pad_crop(ov7251, cfg, sel->pad,
+ sel->r = *__ov7251_get_pad_crop(ov7251, sd_state, sel->pad,
sel->which);
mutex_unlock(&ov7251->lock);
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index d2df811b1a40..196746423116 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -960,7 +960,7 @@ static int ov7670_set_hw(struct v4l2_subdev *sd, int hstart, int hstop,
static int ov7670_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= N_OV7670_FMTS)
@@ -1105,7 +1105,7 @@ static int ov7670_apply_fmt(struct v4l2_subdev *sd)
* Set a format.
*/
static int ov7670_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7670_info *info = to_state(sd);
@@ -1122,7 +1122,8 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
if (ret)
return ret;
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ format->pad);
*mbus_fmt = format->format;
#endif
return 0;
@@ -1144,7 +1145,7 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
}
static int ov7670_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7670_info *info = to_state(sd);
@@ -1154,7 +1155,7 @@ static int ov7670_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mbus_fmt;
return 0;
#else
@@ -1202,7 +1203,7 @@ static int ov7670_s_frame_interval(struct v4l2_subdev *sd,
static int ov7670_frame_rates[] = { 30, 15, 10, 5, 1 };
static int ov7670_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct ov7670_info *info = to_state(sd);
@@ -1241,7 +1242,7 @@ static int ov7670_enum_frame_interval(struct v4l2_subdev *sd,
* Frame size enumeration
*/
static int ov7670_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct ov7670_info *info = to_state(sd);
@@ -1724,7 +1725,7 @@ static void ov7670_get_default_format(struct v4l2_subdev *sd,
static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
ov7670_get_default_format(sd, format);
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index d94cf2d39c2a..78602a2f70b0 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1157,7 +1157,7 @@ ov772x_set_fmt_error:
}
static int ov772x_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ov772x_priv *priv = to_ov772x(sd);
@@ -1179,7 +1179,7 @@ static int ov772x_get_selection(struct v4l2_subdev *sd,
}
static int ov772x_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -1198,7 +1198,7 @@ static int ov772x_get_fmt(struct v4l2_subdev *sd,
}
static int ov772x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov772x_priv *priv = to_ov772x(sd);
@@ -1222,7 +1222,7 @@ static int ov772x_set_fmt(struct v4l2_subdev *sd,
mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
@@ -1320,7 +1320,7 @@ static const struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
};
static int ov772x_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (fie->pad || fie->index >= ARRAY_SIZE(ov772x_frame_intervals))
@@ -1338,7 +1338,7 @@ static int ov772x_enum_frame_interval(struct v4l2_subdev *sd,
}
static int ov772x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(ov772x_cfmts))
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 47a9003d29d6..2539cfee85c8 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -624,11 +624,9 @@ static int ov7740_set_stream(struct v4l2_subdev *sd, int enable)
}
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
goto err_unlock;
- }
ret = ov7740_start_streaming(ov7740);
if (ret)
@@ -709,7 +707,7 @@ static const struct ov7740_pixfmt ov7740_formats[] = {
#define N_OV7740_FMTS ARRAY_SIZE(ov7740_formats)
static int ov7740_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= N_OV7740_FMTS)
@@ -721,7 +719,7 @@ static int ov7740_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov7740_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (fie->pad)
@@ -740,7 +738,7 @@ static int ov7740_enum_frame_interval(struct v4l2_subdev *sd,
}
static int ov7740_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->pad)
@@ -803,7 +801,7 @@ static int ov7740_try_fmt_internal(struct v4l2_subdev *sd,
}
static int ov7740_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
@@ -825,7 +823,8 @@ static int ov7740_set_fmt(struct v4l2_subdev *sd,
if (ret)
goto error;
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ format->pad);
*mbus_fmt = format->format;
#endif
mutex_unlock(&ov7740->mutex);
@@ -848,7 +847,7 @@ error:
}
static int ov7740_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
@@ -860,7 +859,7 @@ static int ov7740_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov7740->mutex);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mbus_fmt;
ret = 0;
#else
@@ -905,7 +904,7 @@ static int ov7740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
mutex_lock(&ov7740->mutex);
ov7740_get_default_format(sd, format);
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index e3af3ea277af..88e19f30d376 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -18,8 +18,6 @@
#define OV8856_REG_VALUE_16BIT 2
#define OV8856_REG_VALUE_24BIT 3
-#define OV8856_LINK_FREQ_360MHZ 360000000ULL
-#define OV8856_LINK_FREQ_180MHZ 180000000ULL
#define OV8856_SCLK 144000000ULL
#define OV8856_XVCLK_19_2 19200000
#define OV8856_DATA_LANES 4
@@ -78,6 +76,29 @@
#define OV8856_TEST_PATTERN_ENABLE BIT(7)
#define OV8856_TEST_PATTERN_BAR_SHIFT 2
+#define NUM_REGS 7
+#define NUM_MODE_REGS 187
+#define NUM_MODE_REGS_2 200
+
+/* Flip Mirror Controls from sensor */
+#define OV8856_REG_FORMAT1 0x3820
+#define OV8856_REG_FORMAT2 0x3821
+#define OV8856_REG_FORMAT1_OP_1 BIT(1)
+#define OV8856_REG_FORMAT1_OP_2 BIT(2)
+#define OV8856_REG_FORMAT1_OP_3 BIT(6)
+#define OV8856_REG_FORMAT2_OP_1 BIT(1)
+#define OV8856_REG_FORMAT2_OP_2 BIT(2)
+#define OV8856_REG_FORMAT2_OP_3 BIT(6)
+#define OV8856_REG_FLIP_OPT_1 0x376b
+#define OV8856_REG_FLIP_OPT_2 0x5001
+#define OV8856_REG_FLIP_OPT_3 0x502e
+#define OV8856_REG_MIRROR_OPT_1 0x5004
+#define OV8856_REG_FLIP_OP_0 BIT(0)
+#define OV8856_REG_FLIP_OP_1 BIT(1)
+#define OV8856_REG_FLIP_OP_2 BIT(2)
+#define OV8856_REG_MIRROR_OP_1 BIT(1)
+#define OV8856_REG_MIRROR_OP_2 BIT(2)
+
#define to_ov8856(_sd) container_of(_sd, struct ov8856, sd)
static const char * const ov8856_supply_names[] = {
@@ -86,11 +107,6 @@ static const char * const ov8856_supply_names[] = {
"dvdd", /* Digital core power */
};
-enum {
- OV8856_LINK_FREQ_720MBPS,
- OV8856_LINK_FREQ_360MBPS,
-};
-
struct ov8856_reg {
u16 address;
u8 val;
@@ -126,891 +142,1242 @@ struct ov8856_mode {
/* Sensor register settings for this resolution */
const struct ov8856_reg_list reg_list;
+
+ /* Number of data lanes */
+ u8 data_lanes;
};
-static const struct ov8856_reg mipi_data_rate_720mbps[] = {
- {0x0103, 0x01},
- {0x0100, 0x00},
- {0x0302, 0x4b},
- {0x0303, 0x01},
- {0x030b, 0x02},
- {0x030d, 0x4b},
- {0x031e, 0x0c},
+struct ov8856_mipi_data_rates {
+ const struct ov8856_reg regs_0[NUM_REGS];
+ const struct ov8856_reg regs_1[NUM_REGS];
};
-static const struct ov8856_reg mipi_data_rate_360mbps[] = {
- {0x0103, 0x01},
- {0x0100, 0x00},
- {0x0302, 0x4b},
- {0x0303, 0x03},
- {0x030b, 0x02},
- {0x030d, 0x4b},
- {0x031e, 0x0c},
+static const struct ov8856_mipi_data_rates mipi_data_rate_lane_2 = {
+ //mipi_data_rate_1440mbps
+ {
+ {0x0103, 0x01},
+ {0x0100, 0x00},
+ {0x0302, 0x43},
+ {0x0303, 0x00},
+ {0x030b, 0x02},
+ {0x030d, 0x4b},
+ {0x031e, 0x0c}
+ },
+ //mipi_data_rate_720mbps
+ {
+ {0x0103, 0x01},
+ {0x0100, 0x00},
+ {0x0302, 0x4b},
+ {0x0303, 0x01},
+ {0x030b, 0x02},
+ {0x030d, 0x4b},
+ {0x031e, 0x0c}
+ }
};
-static const struct ov8856_reg mode_3280x2464_regs[] = {
- {0x3000, 0x20},
- {0x3003, 0x08},
- {0x300e, 0x20},
- {0x3010, 0x00},
- {0x3015, 0x84},
- {0x3018, 0x72},
- {0x3021, 0x23},
- {0x3033, 0x24},
- {0x3500, 0x00},
- {0x3501, 0x9a},
- {0x3502, 0x20},
- {0x3503, 0x08},
- {0x3505, 0x83},
- {0x3508, 0x01},
- {0x3509, 0x80},
- {0x350c, 0x00},
- {0x350d, 0x80},
- {0x350e, 0x04},
- {0x350f, 0x00},
- {0x3510, 0x00},
- {0x3511, 0x02},
- {0x3512, 0x00},
- {0x3600, 0x72},
- {0x3601, 0x40},
- {0x3602, 0x30},
- {0x3610, 0xc5},
- {0x3611, 0x58},
- {0x3612, 0x5c},
- {0x3613, 0xca},
- {0x3614, 0x20},
- {0x3628, 0xff},
- {0x3629, 0xff},
- {0x362a, 0xff},
- {0x3633, 0x10},
- {0x3634, 0x10},
- {0x3635, 0x10},
- {0x3636, 0x10},
- {0x3663, 0x08},
- {0x3669, 0x34},
- {0x366e, 0x10},
- {0x3706, 0x86},
- {0x370b, 0x7e},
- {0x3714, 0x23},
- {0x3730, 0x12},
- {0x3733, 0x10},
- {0x3764, 0x00},
- {0x3765, 0x00},
- {0x3769, 0x62},
- {0x376a, 0x2a},
- {0x376b, 0x30},
- {0x3780, 0x00},
- {0x3781, 0x24},
- {0x3782, 0x00},
- {0x3783, 0x23},
- {0x3798, 0x2f},
- {0x37a1, 0x60},
- {0x37a8, 0x6a},
- {0x37ab, 0x3f},
- {0x37c2, 0x04},
- {0x37c3, 0xf1},
- {0x37c9, 0x80},
- {0x37cb, 0x16},
- {0x37cc, 0x16},
- {0x37cd, 0x16},
- {0x37ce, 0x16},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
- {0x3803, 0x06},
- {0x3804, 0x0c},
- {0x3805, 0xdf},
- {0x3806, 0x09},
- {0x3807, 0xa7},
- {0x3808, 0x0c},
- {0x3809, 0xd0},
- {0x380a, 0x09},
- {0x380b, 0xa0},
- {0x380c, 0x07},
- {0x380d, 0x88},
- {0x380e, 0x09},
- {0x380f, 0xb8},
- {0x3810, 0x00},
- {0x3811, 0x00},
- {0x3812, 0x00},
- {0x3813, 0x01},
- {0x3814, 0x01},
- {0x3815, 0x01},
- {0x3816, 0x00},
- {0x3817, 0x00},
- {0x3818, 0x00},
- {0x3819, 0x10},
- {0x3820, 0x80},
- {0x3821, 0x46},
- {0x382a, 0x01},
- {0x382b, 0x01},
- {0x3830, 0x06},
- {0x3836, 0x02},
- {0x3862, 0x04},
- {0x3863, 0x08},
- {0x3cc0, 0x33},
- {0x3d85, 0x17},
- {0x3d8c, 0x73},
- {0x3d8d, 0xde},
- {0x4001, 0xe0},
- {0x4003, 0x40},
- {0x4008, 0x00},
- {0x4009, 0x0b},
- {0x400a, 0x00},
- {0x400b, 0x84},
- {0x400f, 0x80},
- {0x4010, 0xf0},
- {0x4011, 0xff},
- {0x4012, 0x02},
- {0x4013, 0x01},
- {0x4014, 0x01},
- {0x4015, 0x01},
- {0x4042, 0x00},
- {0x4043, 0x80},
- {0x4044, 0x00},
- {0x4045, 0x80},
- {0x4046, 0x00},
- {0x4047, 0x80},
- {0x4048, 0x00},
- {0x4049, 0x80},
- {0x4041, 0x03},
- {0x404c, 0x20},
- {0x404d, 0x00},
- {0x404e, 0x20},
- {0x4203, 0x80},
- {0x4307, 0x30},
- {0x4317, 0x00},
- {0x4503, 0x08},
- {0x4601, 0x80},
- {0x4800, 0x44},
- {0x4816, 0x53},
- {0x481b, 0x58},
- {0x481f, 0x27},
- {0x4837, 0x16},
- {0x483c, 0x0f},
- {0x484b, 0x05},
- {0x5000, 0x57},
- {0x5001, 0x0a},
- {0x5004, 0x04},
- {0x502e, 0x03},
- {0x5030, 0x41},
- {0x5780, 0x14},
- {0x5781, 0x0f},
- {0x5782, 0x44},
- {0x5783, 0x02},
- {0x5784, 0x01},
- {0x5785, 0x01},
- {0x5786, 0x00},
- {0x5787, 0x04},
- {0x5788, 0x02},
- {0x5789, 0x0f},
- {0x578a, 0xfd},
- {0x578b, 0xf5},
- {0x578c, 0xf5},
- {0x578d, 0x03},
- {0x578e, 0x08},
- {0x578f, 0x0c},
- {0x5790, 0x08},
- {0x5791, 0x04},
- {0x5792, 0x00},
- {0x5793, 0x52},
- {0x5794, 0xa3},
- {0x5795, 0x02},
- {0x5796, 0x20},
- {0x5797, 0x20},
- {0x5798, 0xd5},
- {0x5799, 0xd5},
- {0x579a, 0x00},
- {0x579b, 0x50},
- {0x579c, 0x00},
- {0x579d, 0x2c},
- {0x579e, 0x0c},
- {0x579f, 0x40},
- {0x57a0, 0x09},
- {0x57a1, 0x40},
- {0x59f8, 0x3d},
- {0x5a08, 0x02},
- {0x5b00, 0x02},
- {0x5b01, 0x10},
- {0x5b02, 0x03},
- {0x5b03, 0xcf},
- {0x5b05, 0x6c},
- {0x5e00, 0x00}
+static const struct ov8856_mipi_data_rates mipi_data_rate_lane_4 = {
+ //mipi_data_rate_720mbps
+ {
+ {0x0103, 0x01},
+ {0x0100, 0x00},
+ {0x0302, 0x4b},
+ {0x0303, 0x01},
+ {0x030b, 0x02},
+ {0x030d, 0x4b},
+ {0x031e, 0x0c}
+ },
+ //mipi_data_rate_360mbps
+ {
+ {0x0103, 0x01},
+ {0x0100, 0x00},
+ {0x0302, 0x4b},
+ {0x0303, 0x03},
+ {0x030b, 0x02},
+ {0x030d, 0x4b},
+ {0x031e, 0x0c}
+ }
};
-static const struct ov8856_reg mode_3264x2448_regs[] = {
- {0x0103, 0x01},
- {0x0302, 0x3c},
- {0x0303, 0x01},
- {0x031e, 0x0c},
- {0x3000, 0x20},
- {0x3003, 0x08},
- {0x300e, 0x20},
- {0x3010, 0x00},
- {0x3015, 0x84},
- {0x3018, 0x72},
- {0x3021, 0x23},
- {0x3033, 0x24},
- {0x3500, 0x00},
- {0x3501, 0x9a},
- {0x3502, 0x20},
- {0x3503, 0x08},
- {0x3505, 0x83},
- {0x3508, 0x01},
- {0x3509, 0x80},
- {0x350c, 0x00},
- {0x350d, 0x80},
- {0x350e, 0x04},
- {0x350f, 0x00},
- {0x3510, 0x00},
- {0x3511, 0x02},
- {0x3512, 0x00},
- {0x3600, 0x72},
- {0x3601, 0x40},
- {0x3602, 0x30},
- {0x3610, 0xc5},
- {0x3611, 0x58},
- {0x3612, 0x5c},
- {0x3613, 0xca},
- {0x3614, 0x60},
- {0x3628, 0xff},
- {0x3629, 0xff},
- {0x362a, 0xff},
- {0x3633, 0x10},
- {0x3634, 0x10},
- {0x3635, 0x10},
- {0x3636, 0x10},
- {0x3663, 0x08},
- {0x3669, 0x34},
- {0x366d, 0x00},
- {0x366e, 0x10},
- {0x3706, 0x86},
- {0x370b, 0x7e},
- {0x3714, 0x23},
- {0x3730, 0x12},
- {0x3733, 0x10},
- {0x3764, 0x00},
- {0x3765, 0x00},
- {0x3769, 0x62},
- {0x376a, 0x2a},
- {0x376b, 0x30},
- {0x3780, 0x00},
- {0x3781, 0x24},
- {0x3782, 0x00},
- {0x3783, 0x23},
- {0x3798, 0x2f},
- {0x37a1, 0x60},
- {0x37a8, 0x6a},
- {0x37ab, 0x3f},
- {0x37c2, 0x04},
- {0x37c3, 0xf1},
- {0x37c9, 0x80},
- {0x37cb, 0x16},
- {0x37cc, 0x16},
- {0x37cd, 0x16},
- {0x37ce, 0x16},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
- {0x3803, 0x0c},
- {0x3804, 0x0c},
- {0x3805, 0xdf},
- {0x3806, 0x09},
- {0x3807, 0xa3},
- {0x3808, 0x0c},
- {0x3809, 0xc0},
- {0x380a, 0x09},
- {0x380b, 0x90},
- {0x380c, 0x07},
- {0x380d, 0x8c},
- {0x380e, 0x09},
- {0x380f, 0xb2},
- {0x3810, 0x00},
- {0x3811, 0x04},
- {0x3812, 0x00},
- {0x3813, 0x01},
- {0x3814, 0x01},
- {0x3815, 0x01},
- {0x3816, 0x00},
- {0x3817, 0x00},
- {0x3818, 0x00},
- {0x3819, 0x10},
- {0x3820, 0x80},
- {0x3821, 0x46},
- {0x382a, 0x01},
- {0x382b, 0x01},
- {0x3830, 0x06},
- {0x3836, 0x02},
- {0x3862, 0x04},
- {0x3863, 0x08},
- {0x3cc0, 0x33},
- {0x3d85, 0x17},
- {0x3d8c, 0x73},
- {0x3d8d, 0xde},
- {0x4001, 0xe0},
- {0x4003, 0x40},
- {0x4008, 0x00},
- {0x4009, 0x0b},
- {0x400a, 0x00},
- {0x400b, 0x84},
- {0x400f, 0x80},
- {0x4010, 0xf0},
- {0x4011, 0xff},
- {0x4012, 0x02},
- {0x4013, 0x01},
- {0x4014, 0x01},
- {0x4015, 0x01},
- {0x4042, 0x00},
- {0x4043, 0x80},
- {0x4044, 0x00},
- {0x4045, 0x80},
- {0x4046, 0x00},
- {0x4047, 0x80},
- {0x4048, 0x00},
- {0x4049, 0x80},
- {0x4041, 0x03},
- {0x404c, 0x20},
- {0x404d, 0x00},
- {0x404e, 0x20},
- {0x4203, 0x80},
- {0x4307, 0x30},
- {0x4317, 0x00},
- {0x4502, 0x50},
- {0x4503, 0x08},
- {0x4601, 0x80},
- {0x4800, 0x44},
- {0x4816, 0x53},
- {0x481b, 0x50},
- {0x481f, 0x27},
- {0x4823, 0x3c},
- {0x482b, 0x00},
- {0x4831, 0x66},
- {0x4837, 0x16},
- {0x483c, 0x0f},
- {0x484b, 0x05},
- {0x5000, 0x77},
- {0x5001, 0x0a},
- {0x5003, 0xc8},
- {0x5004, 0x04},
- {0x5006, 0x00},
- {0x5007, 0x00},
- {0x502e, 0x03},
- {0x5030, 0x41},
- {0x5780, 0x14},
- {0x5781, 0x0f},
- {0x5782, 0x44},
- {0x5783, 0x02},
- {0x5784, 0x01},
- {0x5785, 0x01},
- {0x5786, 0x00},
- {0x5787, 0x04},
- {0x5788, 0x02},
- {0x5789, 0x0f},
- {0x578a, 0xfd},
- {0x578b, 0xf5},
- {0x578c, 0xf5},
- {0x578d, 0x03},
- {0x578e, 0x08},
- {0x578f, 0x0c},
- {0x5790, 0x08},
- {0x5791, 0x04},
- {0x5792, 0x00},
- {0x5793, 0x52},
- {0x5794, 0xa3},
- {0x5795, 0x02},
- {0x5796, 0x20},
- {0x5797, 0x20},
- {0x5798, 0xd5},
- {0x5799, 0xd5},
- {0x579a, 0x00},
- {0x579b, 0x50},
- {0x579c, 0x00},
- {0x579d, 0x2c},
- {0x579e, 0x0c},
- {0x579f, 0x40},
- {0x57a0, 0x09},
- {0x57a1, 0x40},
- {0x59f8, 0x3d},
- {0x5a08, 0x02},
- {0x5b00, 0x02},
- {0x5b01, 0x10},
- {0x5b02, 0x03},
- {0x5b03, 0xcf},
- {0x5b05, 0x6c},
- {0x5e00, 0x00},
- {0x5e10, 0xfc}
+static const struct ov8856_reg lane_2_mode_3280x2464[] = {
+ /* 3280x2464 resolution */
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x32},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x9a},
+ {0x3502, 0x20},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x50},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366e, 0x10},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x23},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x06},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xa7},
+ {0x3808, 0x0c},
+ {0x3809, 0xd0},
+ {0x380a, 0x09},
+ {0x380b, 0xa0},
+ {0x380c, 0x07},
+ {0x380d, 0x88},
+ {0x380e, 0x09},
+ {0x380f, 0xb8},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x01},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x00},
+ {0x3820, 0x80},
+ {0x3821, 0x46},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3837, 0x10},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x14},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x0b},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x58},
+ {0x481f, 0x27},
+ {0x4837, 0x0c},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x57},
+ {0x5001, 0x0a},
+ {0x5004, 0x04},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5795, 0x02},
+ {0x5796, 0x20},
+ {0x5797, 0x20},
+ {0x5798, 0xd5},
+ {0x5799, 0xd5},
+ {0x579a, 0x00},
+ {0x579b, 0x50},
+ {0x579c, 0x00},
+ {0x579d, 0x2c},
+ {0x579e, 0x0c},
+ {0x579f, 0x40},
+ {0x57a0, 0x09},
+ {0x57a1, 0x40},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00}
};
-static const struct ov8856_reg mode_1640x1232_regs[] = {
- {0x3000, 0x20},
- {0x3003, 0x08},
- {0x300e, 0x20},
- {0x3010, 0x00},
- {0x3015, 0x84},
- {0x3018, 0x72},
- {0x3021, 0x23},
- {0x3033, 0x24},
- {0x3500, 0x00},
- {0x3501, 0x4c},
- {0x3502, 0xe0},
- {0x3503, 0x08},
- {0x3505, 0x83},
- {0x3508, 0x01},
- {0x3509, 0x80},
- {0x350c, 0x00},
- {0x350d, 0x80},
- {0x350e, 0x04},
- {0x350f, 0x00},
- {0x3510, 0x00},
- {0x3511, 0x02},
- {0x3512, 0x00},
- {0x3600, 0x72},
- {0x3601, 0x40},
- {0x3602, 0x30},
- {0x3610, 0xc5},
- {0x3611, 0x58},
- {0x3612, 0x5c},
- {0x3613, 0xca},
- {0x3614, 0x20},
- {0x3628, 0xff},
- {0x3629, 0xff},
- {0x362a, 0xff},
- {0x3633, 0x10},
- {0x3634, 0x10},
- {0x3635, 0x10},
- {0x3636, 0x10},
- {0x3663, 0x08},
- {0x3669, 0x34},
- {0x366e, 0x08},
- {0x3706, 0x86},
- {0x370b, 0x7e},
- {0x3714, 0x27},
- {0x3730, 0x12},
- {0x3733, 0x10},
- {0x3764, 0x00},
- {0x3765, 0x00},
- {0x3769, 0x62},
- {0x376a, 0x2a},
- {0x376b, 0x30},
- {0x3780, 0x00},
- {0x3781, 0x24},
- {0x3782, 0x00},
- {0x3783, 0x23},
- {0x3798, 0x2f},
- {0x37a1, 0x60},
- {0x37a8, 0x6a},
- {0x37ab, 0x3f},
- {0x37c2, 0x14},
- {0x37c3, 0xf1},
- {0x37c9, 0x80},
- {0x37cb, 0x16},
- {0x37cc, 0x16},
- {0x37cd, 0x16},
- {0x37ce, 0x16},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
- {0x3803, 0x06},
- {0x3804, 0x0c},
- {0x3805, 0xdf},
- {0x3806, 0x09},
- {0x3807, 0xa7},
- {0x3808, 0x06},
- {0x3809, 0x68},
- {0x380a, 0x04},
- {0x380b, 0xd0},
- {0x380c, 0x0e},
- {0x380d, 0xec},
- {0x380e, 0x04},
- {0x380f, 0xe8},
- {0x3810, 0x00},
- {0x3811, 0x00},
- {0x3812, 0x00},
- {0x3813, 0x01},
- {0x3814, 0x03},
- {0x3815, 0x01},
- {0x3816, 0x00},
- {0x3817, 0x00},
- {0x3818, 0x00},
- {0x3819, 0x10},
- {0x3820, 0x90},
- {0x3821, 0x67},
- {0x382a, 0x03},
- {0x382b, 0x01},
- {0x3830, 0x06},
- {0x3836, 0x02},
- {0x3862, 0x04},
- {0x3863, 0x08},
- {0x3cc0, 0x33},
- {0x3d85, 0x17},
- {0x3d8c, 0x73},
- {0x3d8d, 0xde},
- {0x4001, 0xe0},
- {0x4003, 0x40},
- {0x4008, 0x00},
- {0x4009, 0x05},
- {0x400a, 0x00},
- {0x400b, 0x84},
- {0x400f, 0x80},
- {0x4010, 0xf0},
- {0x4011, 0xff},
- {0x4012, 0x02},
- {0x4013, 0x01},
- {0x4014, 0x01},
- {0x4015, 0x01},
- {0x4042, 0x00},
- {0x4043, 0x80},
- {0x4044, 0x00},
- {0x4045, 0x80},
- {0x4046, 0x00},
- {0x4047, 0x80},
- {0x4048, 0x00},
- {0x4049, 0x80},
- {0x4041, 0x03},
- {0x404c, 0x20},
- {0x404d, 0x00},
- {0x404e, 0x20},
- {0x4203, 0x80},
- {0x4307, 0x30},
- {0x4317, 0x00},
- {0x4503, 0x08},
- {0x4601, 0x80},
- {0x4800, 0x44},
- {0x4816, 0x53},
- {0x481b, 0x58},
- {0x481f, 0x27},
- {0x4837, 0x16},
- {0x483c, 0x0f},
- {0x484b, 0x05},
- {0x5000, 0x57},
- {0x5001, 0x0a},
- {0x5004, 0x04},
- {0x502e, 0x03},
- {0x5030, 0x41},
- {0x5780, 0x14},
- {0x5781, 0x0f},
- {0x5782, 0x44},
- {0x5783, 0x02},
- {0x5784, 0x01},
- {0x5785, 0x01},
- {0x5786, 0x00},
- {0x5787, 0x04},
- {0x5788, 0x02},
- {0x5789, 0x0f},
- {0x578a, 0xfd},
- {0x578b, 0xf5},
- {0x578c, 0xf5},
- {0x578d, 0x03},
- {0x578e, 0x08},
- {0x578f, 0x0c},
- {0x5790, 0x08},
- {0x5791, 0x04},
- {0x5792, 0x00},
- {0x5793, 0x52},
- {0x5794, 0xa3},
- {0x5795, 0x00},
- {0x5796, 0x10},
- {0x5797, 0x10},
- {0x5798, 0x73},
- {0x5799, 0x73},
- {0x579a, 0x00},
- {0x579b, 0x28},
- {0x579c, 0x00},
- {0x579d, 0x16},
- {0x579e, 0x06},
- {0x579f, 0x20},
- {0x57a0, 0x04},
- {0x57a1, 0xa0},
- {0x59f8, 0x3d},
- {0x5a08, 0x02},
- {0x5b00, 0x02},
- {0x5b01, 0x10},
- {0x5b02, 0x03},
- {0x5b03, 0xcf},
- {0x5b05, 0x6c},
- {0x5e00, 0x00}
+static const struct ov8856_reg lane_2_mode_1640x1232[] = {
+ /* 1640x1232 resolution */
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x32},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x4c},
+ {0x3502, 0xe0},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x50},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366e, 0x08},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x27},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x14},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xaf},
+ {0x3808, 0x06},
+ {0x3809, 0x68},
+ {0x380a, 0x04},
+ {0x380b, 0xd0},
+ {0x380c, 0x0c},
+ {0x380d, 0x60},
+ {0x380e, 0x05},
+ {0x380f, 0xea},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x05},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x00},
+ {0x3820, 0x90},
+ {0x3821, 0x67},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3837, 0x10},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x14},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x58},
+ {0x481f, 0x27},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x57},
+ {0x5001, 0x0a},
+ {0x5004, 0x04},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5795, 0x00},
+ {0x5796, 0x10},
+ {0x5797, 0x10},
+ {0x5798, 0x73},
+ {0x5799, 0x73},
+ {0x579a, 0x00},
+ {0x579b, 0x28},
+ {0x579c, 0x00},
+ {0x579d, 0x16},
+ {0x579e, 0x06},
+ {0x579f, 0x20},
+ {0x57a0, 0x04},
+ {0x57a1, 0xa0},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00}
};
-static const struct ov8856_reg mode_1632x1224_regs[] = {
- {0x0103, 0x01},
- {0x0302, 0x3c},
- {0x0303, 0x01},
- {0x031e, 0x0c},
- {0x3000, 0x20},
- {0x3003, 0x08},
- {0x300e, 0x20},
- {0x3010, 0x00},
- {0x3015, 0x84},
- {0x3018, 0x72},
- {0x3021, 0x23},
- {0x3033, 0x24},
- {0x3500, 0x00},
- {0x3501, 0x4c},
- {0x3502, 0xe0},
- {0x3503, 0x08},
- {0x3505, 0x83},
- {0x3508, 0x01},
- {0x3509, 0x80},
- {0x350c, 0x00},
- {0x350d, 0x80},
- {0x350e, 0x04},
- {0x350f, 0x00},
- {0x3510, 0x00},
- {0x3511, 0x02},
- {0x3512, 0x00},
- {0x3600, 0x72},
- {0x3601, 0x40},
- {0x3602, 0x30},
- {0x3610, 0xc5},
- {0x3611, 0x58},
- {0x3612, 0x5c},
- {0x3613, 0xca},
- {0x3614, 0x60},
- {0x3628, 0xff},
- {0x3629, 0xff},
- {0x362a, 0xff},
- {0x3633, 0x10},
- {0x3634, 0x10},
- {0x3635, 0x10},
- {0x3636, 0x10},
- {0x3663, 0x08},
- {0x3669, 0x34},
- {0x366d, 0x00},
- {0x366e, 0x08},
- {0x3706, 0x86},
- {0x370b, 0x7e},
- {0x3714, 0x27},
- {0x3730, 0x12},
- {0x3733, 0x10},
- {0x3764, 0x00},
- {0x3765, 0x00},
- {0x3769, 0x62},
- {0x376a, 0x2a},
- {0x376b, 0x30},
- {0x3780, 0x00},
- {0x3781, 0x24},
- {0x3782, 0x00},
- {0x3783, 0x23},
- {0x3798, 0x2f},
- {0x37a1, 0x60},
- {0x37a8, 0x6a},
- {0x37ab, 0x3f},
- {0x37c2, 0x14},
- {0x37c3, 0xf1},
- {0x37c9, 0x80},
- {0x37cb, 0x16},
- {0x37cc, 0x16},
- {0x37cd, 0x16},
- {0x37ce, 0x16},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
- {0x3803, 0x0c},
- {0x3804, 0x0c},
- {0x3805, 0xdf},
- {0x3806, 0x09},
- {0x3807, 0xa3},
- {0x3808, 0x06},
- {0x3809, 0x60},
- {0x380a, 0x04},
- {0x380b, 0xc8},
- {0x380c, 0x07},
- {0x380d, 0x8c},
- {0x380e, 0x09},
- {0x380f, 0xb2},
- {0x3810, 0x00},
- {0x3811, 0x02},
- {0x3812, 0x00},
- {0x3813, 0x01},
- {0x3814, 0x03},
- {0x3815, 0x01},
- {0x3816, 0x00},
- {0x3817, 0x00},
- {0x3818, 0x00},
- {0x3819, 0x10},
- {0x3820, 0x80},
- {0x3821, 0x47},
- {0x382a, 0x03},
- {0x382b, 0x01},
- {0x3830, 0x06},
- {0x3836, 0x02},
- {0x3862, 0x04},
- {0x3863, 0x08},
- {0x3cc0, 0x33},
- {0x3d85, 0x17},
- {0x3d8c, 0x73},
- {0x3d8d, 0xde},
- {0x4001, 0xe0},
- {0x4003, 0x40},
- {0x4008, 0x00},
- {0x4009, 0x05},
- {0x400a, 0x00},
- {0x400b, 0x84},
- {0x400f, 0x80},
- {0x4010, 0xf0},
- {0x4011, 0xff},
- {0x4012, 0x02},
- {0x4013, 0x01},
- {0x4014, 0x01},
- {0x4015, 0x01},
- {0x4042, 0x00},
- {0x4043, 0x80},
- {0x4044, 0x00},
- {0x4045, 0x80},
- {0x4046, 0x00},
- {0x4047, 0x80},
- {0x4048, 0x00},
- {0x4049, 0x80},
- {0x4041, 0x03},
- {0x404c, 0x20},
- {0x404d, 0x00},
- {0x404e, 0x20},
- {0x4203, 0x80},
- {0x4307, 0x30},
- {0x4317, 0x00},
- {0x4502, 0x50},
- {0x4503, 0x08},
- {0x4601, 0x80},
- {0x4800, 0x44},
- {0x4816, 0x53},
- {0x481b, 0x50},
- {0x481f, 0x27},
- {0x4823, 0x3c},
- {0x482b, 0x00},
- {0x4831, 0x66},
- {0x4837, 0x16},
- {0x483c, 0x0f},
- {0x484b, 0x05},
- {0x5000, 0x77},
- {0x5001, 0x0a},
- {0x5003, 0xc8},
- {0x5004, 0x04},
- {0x5006, 0x00},
- {0x5007, 0x00},
- {0x502e, 0x03},
- {0x5030, 0x41},
- {0x5795, 0x00},
- {0x5796, 0x10},
- {0x5797, 0x10},
- {0x5798, 0x73},
- {0x5799, 0x73},
- {0x579a, 0x00},
- {0x579b, 0x28},
- {0x579c, 0x00},
- {0x579d, 0x16},
- {0x579e, 0x06},
- {0x579f, 0x20},
- {0x57a0, 0x04},
- {0x57a1, 0xa0},
- {0x5780, 0x14},
- {0x5781, 0x0f},
- {0x5782, 0x44},
- {0x5783, 0x02},
- {0x5784, 0x01},
- {0x5785, 0x01},
- {0x5786, 0x00},
- {0x5787, 0x04},
- {0x5788, 0x02},
- {0x5789, 0x0f},
- {0x578a, 0xfd},
- {0x578b, 0xf5},
- {0x578c, 0xf5},
- {0x578d, 0x03},
- {0x578e, 0x08},
- {0x578f, 0x0c},
- {0x5790, 0x08},
- {0x5791, 0x04},
- {0x5792, 0x00},
- {0x5793, 0x52},
- {0x5794, 0xa3},
- {0x59f8, 0x3d},
- {0x5a08, 0x02},
- {0x5b00, 0x02},
- {0x5b01, 0x10},
- {0x5b02, 0x03},
- {0x5b03, 0xcf},
- {0x5b05, 0x6c},
- {0x5e00, 0x00},
- {0x5e10, 0xfc}
+static const struct ov8856_reg lane_4_mode_3280x2464[] = {
+ /* 3280x2464 resolution */
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x72},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x9a},
+ {0x3502, 0x20},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x20},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366e, 0x10},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x23},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x06},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xa7},
+ {0x3808, 0x0c},
+ {0x3809, 0xd0},
+ {0x380a, 0x09},
+ {0x380b, 0xa0},
+ {0x380c, 0x07},
+ {0x380d, 0x88},
+ {0x380e, 0x09},
+ {0x380f, 0xb8},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x01},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x10},
+ {0x3820, 0x80},
+ {0x3821, 0x46},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x17},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x0b},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x58},
+ {0x481f, 0x27},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x57},
+ {0x5001, 0x0a},
+ {0x5004, 0x04},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x5795, 0x02},
+ {0x5796, 0x20},
+ {0x5797, 0x20},
+ {0x5798, 0xd5},
+ {0x5799, 0xd5},
+ {0x579a, 0x00},
+ {0x579b, 0x50},
+ {0x579c, 0x00},
+ {0x579d, 0x2c},
+ {0x579e, 0x0c},
+ {0x579f, 0x40},
+ {0x57a0, 0x09},
+ {0x57a1, 0x40},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00}
};
-static const char * const ov8856_test_pattern_menu[] = {
- "Disabled",
- "Standard Color Bar",
- "Top-Bottom Darker Color Bar",
- "Right-Left Darker Color Bar",
- "Bottom-Top Darker Color Bar"
+static const struct ov8856_reg lane_4_mode_1640x1232[] = {
+ /* 1640x1232 resolution */
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x72},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x4c},
+ {0x3502, 0xe0},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x20},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366e, 0x08},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x27},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x14},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xaf},
+ {0x3808, 0x06},
+ {0x3809, 0x68},
+ {0x380a, 0x04},
+ {0x380b, 0xd0},
+ {0x380c, 0x0e},
+ {0x380d, 0xec},
+ {0x380e, 0x04},
+ {0x380f, 0xe8},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x05},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x10},
+ {0x3820, 0x90},
+ {0x3821, 0x67},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x17},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x58},
+ {0x481f, 0x27},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x57},
+ {0x5001, 0x0a},
+ {0x5004, 0x04},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x5795, 0x00},
+ {0x5796, 0x10},
+ {0x5797, 0x10},
+ {0x5798, 0x73},
+ {0x5799, 0x73},
+ {0x579a, 0x00},
+ {0x579b, 0x28},
+ {0x579c, 0x00},
+ {0x579d, 0x16},
+ {0x579e, 0x06},
+ {0x579f, 0x20},
+ {0x57a0, 0x04},
+ {0x57a1, 0xa0},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00}
};
-static const s64 link_freq_menu_items[] = {
- OV8856_LINK_FREQ_360MHZ,
- OV8856_LINK_FREQ_180MHZ
+static const struct ov8856_reg lane_4_mode_3264x2448[] = {
+ /* 3264x2448 resolution */
+ {0x0103, 0x01},
+ {0x0302, 0x3c},
+ {0x0303, 0x01},
+ {0x031e, 0x0c},
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x72},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x9a},
+ {0x3502, 0x20},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x60},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366d, 0x00},
+ {0x366e, 0x10},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x23},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xa3},
+ {0x3808, 0x0c},
+ {0x3809, 0xc0},
+ {0x380a, 0x09},
+ {0x380b, 0x90},
+ {0x380c, 0x07},
+ {0x380d, 0x8c},
+ {0x380e, 0x09},
+ {0x380f, 0xb2},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x01},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x10},
+ {0x3820, 0x80},
+ {0x3821, 0x46},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x17},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x0b},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4502, 0x50},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x50},
+ {0x481f, 0x27},
+ {0x4823, 0x3c},
+ {0x482b, 0x00},
+ {0x4831, 0x66},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x77},
+ {0x5001, 0x0a},
+ {0x5003, 0xc8},
+ {0x5004, 0x04},
+ {0x5006, 0x00},
+ {0x5007, 0x00},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x5795, 0x02},
+ {0x5796, 0x20},
+ {0x5797, 0x20},
+ {0x5798, 0xd5},
+ {0x5799, 0xd5},
+ {0x579a, 0x00},
+ {0x579b, 0x50},
+ {0x579c, 0x00},
+ {0x579d, 0x2c},
+ {0x579e, 0x0c},
+ {0x579f, 0x40},
+ {0x57a0, 0x09},
+ {0x57a1, 0x40},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00},
+ {0x5e10, 0xfc}
};
-static const struct ov8856_link_freq_config link_freq_configs[] = {
- [OV8856_LINK_FREQ_720MBPS] = {
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps),
- .regs = mipi_data_rate_720mbps,
- }
- },
- [OV8856_LINK_FREQ_360MBPS] = {
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mipi_data_rate_360mbps),
- .regs = mipi_data_rate_360mbps,
- }
- }
+static const struct ov8856_reg lane_4_mode_1632x1224[] = {
+ /* 1632x1224 resolution */
+ {0x0103, 0x01},
+ {0x0302, 0x3c},
+ {0x0303, 0x01},
+ {0x031e, 0x0c},
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x72},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x4c},
+ {0x3502, 0xe0},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x60},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366d, 0x00},
+ {0x366e, 0x08},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x27},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x14},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xa3},
+ {0x3808, 0x06},
+ {0x3809, 0x60},
+ {0x380a, 0x04},
+ {0x380b, 0xc8},
+ {0x380c, 0x07},
+ {0x380d, 0x8c},
+ {0x380e, 0x09},
+ {0x380f, 0xb2},
+ {0x3810, 0x00},
+ {0x3811, 0x02},
+ {0x3812, 0x00},
+ {0x3813, 0x01},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x10},
+ {0x3820, 0x80},
+ {0x3821, 0x47},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x17},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4502, 0x50},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x50},
+ {0x481f, 0x27},
+ {0x4823, 0x3c},
+ {0x482b, 0x00},
+ {0x4831, 0x66},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x77},
+ {0x5001, 0x0a},
+ {0x5003, 0xc8},
+ {0x5004, 0x04},
+ {0x5006, 0x00},
+ {0x5007, 0x00},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5795, 0x00},
+ {0x5796, 0x10},
+ {0x5797, 0x10},
+ {0x5798, 0x73},
+ {0x5799, 0x73},
+ {0x579a, 0x00},
+ {0x579b, 0x28},
+ {0x579c, 0x00},
+ {0x579d, 0x16},
+ {0x579e, 0x06},
+ {0x579f, 0x20},
+ {0x57a0, 0x04},
+ {0x57a1, 0xa0},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00},
+ {0x5e10, 0xfc}
};
-static const struct ov8856_mode supported_modes[] = {
- {
- .width = 3280,
- .height = 2464,
- .hts = 1928,
- .vts_def = 2488,
- .vts_min = 2488,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
- .regs = mode_3280x2464_regs,
- },
- .link_freq_index = OV8856_LINK_FREQ_720MBPS,
- },
- {
- .width = 3264,
- .height = 2448,
- .hts = 1932,
- .vts_def = 2482,
- .vts_min = 2482,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs),
- .regs = mode_3264x2448_regs,
- },
- .link_freq_index = OV8856_LINK_FREQ_720MBPS,
- },
- {
- .width = 1640,
- .height = 1232,
- .hts = 3820,
- .vts_def = 1256,
- .vts_min = 1256,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_1640x1232_regs),
- .regs = mode_1640x1232_regs,
- },
- .link_freq_index = OV8856_LINK_FREQ_360MBPS,
- },
- {
- .width = 1632,
- .height = 1224,
- .hts = 1932,
- .vts_def = 2482,
- .vts_min = 2482,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_1632x1224_regs),
- .regs = mode_1632x1224_regs,
- },
- .link_freq_index = OV8856_LINK_FREQ_360MBPS,
- }
+static const char * const ov8856_test_pattern_menu[] = {
+ "Disabled",
+ "Standard Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Bottom-Top Darker Color Bar"
};
struct ov8856 {
@@ -1037,20 +1404,173 @@ struct ov8856 {
/* Streaming on/off */
bool streaming;
+
+ /* lanes index */
+ u8 nlanes;
+
+ const struct ov8856_lane_cfg *priv_lane;
+ u8 modes_size;
+};
+
+struct ov8856_lane_cfg {
+ const s64 link_freq_menu_items[2];
+ const struct ov8856_link_freq_config link_freq_configs[2];
+ const struct ov8856_mode supported_modes[4];
+};
+
+static const struct ov8856_lane_cfg lane_cfg_2 = {
+ {
+ 720000000,
+ 360000000,
+ },
+ {{
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(mipi_data_rate_lane_2.regs_0),
+ .regs = mipi_data_rate_lane_2.regs_0,
+ }
+ },
+ {
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(mipi_data_rate_lane_2.regs_1),
+ .regs = mipi_data_rate_lane_2.regs_1,
+ }
+ }},
+ {{
+ .width = 3280,
+ .height = 2464,
+ .hts = 1928,
+ .vts_def = 2488,
+ .vts_min = 2488,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(lane_2_mode_3280x2464),
+ .regs = lane_2_mode_3280x2464,
+ },
+ .link_freq_index = 0,
+ .data_lanes = 2,
+ },
+ {
+ .width = 1640,
+ .height = 1232,
+ .hts = 3168,
+ .vts_def = 1514,
+ .vts_min = 1514,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(lane_2_mode_1640x1232),
+ .regs = lane_2_mode_1640x1232,
+ },
+ .link_freq_index = 1,
+ .data_lanes = 2,
+ }}
};
-static u64 to_pixel_rate(u32 f_index)
+static const struct ov8856_lane_cfg lane_cfg_4 = {
+ {
+ 360000000,
+ 180000000,
+ },
+ {{
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(mipi_data_rate_lane_4.regs_0),
+ .regs = mipi_data_rate_lane_4.regs_0,
+ }
+ },
+ {
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(mipi_data_rate_lane_4.regs_1),
+ .regs = mipi_data_rate_lane_4.regs_1,
+ }
+ }},
+ {{
+ .width = 3280,
+ .height = 2464,
+ .hts = 1928,
+ .vts_def = 2488,
+ .vts_min = 2488,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(lane_4_mode_3280x2464),
+ .regs = lane_4_mode_3280x2464,
+ },
+ .link_freq_index = 0,
+ .data_lanes = 4,
+ },
+ {
+ .width = 1640,
+ .height = 1232,
+ .hts = 3820,
+ .vts_def = 1256,
+ .vts_min = 1256,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(lane_4_mode_1640x1232),
+ .regs = lane_4_mode_1640x1232,
+ },
+ .link_freq_index = 1,
+ .data_lanes = 4,
+ },
+ {
+ .width = 3264,
+ .height = 2448,
+ .hts = 1932,
+ .vts_def = 2482,
+ .vts_min = 2482,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(lane_4_mode_3264x2448),
+ .regs = lane_4_mode_3264x2448,
+ },
+ .link_freq_index = 0,
+ .data_lanes = 4,
+ },
+ {
+ .width = 1632,
+ .height = 1224,
+ .hts = 1932,
+ .vts_def = 2482,
+ .vts_min = 2482,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(lane_4_mode_1632x1224),
+ .regs = lane_4_mode_1632x1224,
+ },
+ .link_freq_index = 1,
+ .data_lanes = 4,
+ }}
+};
+
+static unsigned int ov8856_modes_num(const struct ov8856 *ov8856)
{
- u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV8856_DATA_LANES;
+ unsigned int i, count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ov8856->priv_lane->supported_modes); i++) {
+ if (ov8856->priv_lane->supported_modes[i].width == 0)
+ break;
+ count++;
+ }
+
+ return count;
+}
+
+static u64 to_rate(const s64 *link_freq_menu_items,
+ u32 f_index, u8 nlanes)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * nlanes;
do_div(pixel_rate, OV8856_RGB_DEPTH);
return pixel_rate;
}
-static u64 to_pixels_per_line(u32 hts, u32 f_index)
+static u64 to_pixels_per_line(const s64 *link_freq_menu_items, u32 hts,
+ u32 f_index, u8 nlanes)
{
- u64 ppl = hts * to_pixel_rate(f_index);
+ u64 ppl = hts * to_rate(link_freq_menu_items, f_index, nlanes);
do_div(ppl, OV8856_SCLK);
@@ -1152,6 +1672,93 @@ static int ov8856_test_pattern(struct ov8856 *ov8856, u32 pattern)
OV8856_REG_VALUE_08BIT, pattern);
}
+static int ov8856_set_ctrl_hflip(struct ov8856 *ov8856, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_MIRROR_OPT_1,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MIRROR_OPT_1,
+ OV8856_REG_VALUE_08BIT,
+ ctrl_val ? val & ~OV8856_REG_MIRROR_OP_2 :
+ val | OV8856_REG_MIRROR_OP_2);
+
+ if (ret)
+ return ret;
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_FORMAT2,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ return ov8856_write_reg(ov8856, OV8856_REG_FORMAT2,
+ OV8856_REG_VALUE_08BIT,
+ ctrl_val ? val & ~OV8856_REG_FORMAT2_OP_1 &
+ ~OV8856_REG_FORMAT2_OP_2 &
+ ~OV8856_REG_FORMAT2_OP_3 :
+ val | OV8856_REG_FORMAT2_OP_1 |
+ OV8856_REG_FORMAT2_OP_2 |
+ OV8856_REG_FORMAT2_OP_3);
+}
+
+static int ov8856_set_ctrl_vflip(struct ov8856 *ov8856, u8 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_FLIP_OPT_1,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_FLIP_OPT_1,
+ OV8856_REG_VALUE_08BIT,
+ ctrl_val ? val | OV8856_REG_FLIP_OP_1 |
+ OV8856_REG_FLIP_OP_2 :
+ val & ~OV8856_REG_FLIP_OP_1 &
+ ~OV8856_REG_FLIP_OP_2);
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_FLIP_OPT_2,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_FLIP_OPT_2,
+ OV8856_REG_VALUE_08BIT,
+ ctrl_val ? val | OV8856_REG_FLIP_OP_2 :
+ val & ~OV8856_REG_FLIP_OP_2);
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_FLIP_OPT_3,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_FLIP_OPT_3,
+ OV8856_REG_VALUE_08BIT,
+ ctrl_val ? val & ~OV8856_REG_FLIP_OP_0 &
+ ~OV8856_REG_FLIP_OP_1 :
+ val | OV8856_REG_FLIP_OP_0 |
+ OV8856_REG_FLIP_OP_1);
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_FORMAT1,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ return ov8856_write_reg(ov8856, OV8856_REG_FORMAT1,
+ OV8856_REG_VALUE_08BIT,
+ ctrl_val ? val | OV8856_REG_FORMAT1_OP_1 |
+ OV8856_REG_FORMAT1_OP_3 |
+ OV8856_REG_FORMAT1_OP_2 :
+ val & ~OV8856_REG_FORMAT1_OP_1 &
+ ~OV8856_REG_FORMAT1_OP_3 &
+ ~OV8856_REG_FORMAT1_OP_2);
+}
+
static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov8856 *ov8856 = container_of(ctrl->handler,
@@ -1201,6 +1808,14 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
ret = ov8856_test_pattern(ov8856, ctrl->val);
break;
+ case V4L2_CID_HFLIP:
+ ret = ov8856_set_ctrl_hflip(ov8856, ctrl->val);
+ break;
+
+ case V4L2_CID_VFLIP:
+ ret = ov8856_set_ctrl_vflip(ov8856, ctrl->val);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -1229,23 +1844,32 @@ static int ov8856_init_controls(struct ov8856 *ov8856)
ctrl_hdlr->lock = &ov8856->mutex;
ov8856->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov8856_ctrl_ops,
V4L2_CID_LINK_FREQ,
- ARRAY_SIZE(link_freq_menu_items) - 1,
- 0, link_freq_menu_items);
+ ARRAY_SIZE
+ (ov8856->priv_lane->link_freq_menu_items)
+ - 1,
+ 0, ov8856->priv_lane->link_freq_menu_items);
if (ov8856->link_freq)
ov8856->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ov8856->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
V4L2_CID_PIXEL_RATE, 0,
- to_pixel_rate(OV8856_LINK_FREQ_720MBPS),
- 1,
- to_pixel_rate(OV8856_LINK_FREQ_720MBPS));
+ to_rate(ov8856->priv_lane->link_freq_menu_items,
+ 0,
+ ov8856->cur_mode->data_lanes), 1,
+ to_rate(ov8856->priv_lane->link_freq_menu_items,
+ 0,
+ ov8856->cur_mode->data_lanes));
ov8856->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
V4L2_CID_VBLANK,
ov8856->cur_mode->vts_min - ov8856->cur_mode->height,
OV8856_VTS_MAX - ov8856->cur_mode->height, 1,
- ov8856->cur_mode->vts_def - ov8856->cur_mode->height);
- h_blank = to_pixels_per_line(ov8856->cur_mode->hts,
- ov8856->cur_mode->link_freq_index) - ov8856->cur_mode->width;
+ ov8856->cur_mode->vts_def -
+ ov8856->cur_mode->height);
+ h_blank = to_pixels_per_line(ov8856->priv_lane->link_freq_menu_items,
+ ov8856->cur_mode->hts,
+ ov8856->cur_mode->link_freq_index,
+ ov8856->cur_mode->data_lanes) -
+ ov8856->cur_mode->width;
ov8856->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
V4L2_CID_HBLANK, h_blank, h_blank, 1,
h_blank);
@@ -1268,6 +1892,10 @@ static int ov8856_init_controls(struct ov8856 *ov8856)
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(ov8856_test_pattern_menu) - 1,
0, 0, ov8856_test_pattern_menu);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
if (ctrl_hdlr->error)
return ctrl_hdlr->error;
@@ -1292,7 +1920,8 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
int link_freq_index, ret;
link_freq_index = ov8856->cur_mode->link_freq_index;
- reg_list = &link_freq_configs[link_freq_index].reg_list;
+ reg_list = &ov8856->priv_lane->link_freq_configs[link_freq_index].reg_list;
+
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
dev_err(&client->dev, "failed to set plls");
@@ -1340,9 +1969,8 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&ov8856->mutex);
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
mutex_unlock(&ov8856->mutex);
return ret;
}
@@ -1455,27 +2083,29 @@ static int __maybe_unused ov8856_resume(struct device *dev)
}
static int ov8856_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov8856 *ov8856 = to_ov8856(sd);
const struct ov8856_mode *mode;
s32 vblank_def, h_blank;
- mode = v4l2_find_nearest_size(supported_modes,
- ARRAY_SIZE(supported_modes), width,
- height, fmt->format.width,
+ mode = v4l2_find_nearest_size(ov8856->priv_lane->supported_modes,
+ ov8856->modes_size,
+ width, height, fmt->format.width,
fmt->format.height);
mutex_lock(&ov8856->mutex);
ov8856_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov8856->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov8856->link_freq, mode->link_freq_index);
__v4l2_ctrl_s_ctrl_int64(ov8856->pixel_rate,
- to_pixel_rate(mode->link_freq_index));
+ to_rate(ov8856->priv_lane->link_freq_menu_items,
+ mode->link_freq_index,
+ ov8856->cur_mode->data_lanes));
/* Update limits and set FPS to default */
vblank_def = mode->vts_def - mode->height;
@@ -1484,8 +2114,11 @@ static int ov8856_set_format(struct v4l2_subdev *sd,
OV8856_VTS_MAX - mode->height, 1,
vblank_def);
__v4l2_ctrl_s_ctrl(ov8856->vblank, vblank_def);
- h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
- mode->width;
+ h_blank = to_pixels_per_line(ov8856->priv_lane->link_freq_menu_items,
+ mode->hts,
+ mode->link_freq_index,
+ ov8856->cur_mode->data_lanes)
+ - mode->width;
__v4l2_ctrl_modify_range(ov8856->hblank, h_blank, h_blank, 1,
h_blank);
}
@@ -1496,14 +2129,15 @@ static int ov8856_set_format(struct v4l2_subdev *sd,
}
static int ov8856_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov8856 *ov8856 = to_ov8856(sd);
mutex_lock(&ov8856->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd,
+ sd_state,
fmt->pad);
else
ov8856_update_pad_format(ov8856->cur_mode, &fmt->format);
@@ -1514,7 +2148,7 @@ static int ov8856_get_format(struct v4l2_subdev *sd,
}
static int ov8856_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Only one bayer order GRBG is supported */
@@ -1527,18 +2161,20 @@ static int ov8856_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov8856_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= ARRAY_SIZE(supported_modes))
+ struct ov8856 *ov8856 = to_ov8856(sd);
+
+ if (fse->index >= ov8856->modes_size)
return -EINVAL;
if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
return -EINVAL;
- fse->min_width = supported_modes[fse->index].width;
+ fse->min_width = ov8856->priv_lane->supported_modes[fse->index].width;
fse->max_width = fse->min_width;
- fse->min_height = supported_modes[fse->index].height;
+ fse->min_height = ov8856->priv_lane->supported_modes[fse->index].height;
fse->max_height = fse->min_height;
return 0;
@@ -1549,8 +2185,8 @@ static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct ov8856 *ov8856 = to_ov8856(sd);
mutex_lock(&ov8856->mutex);
- ov8856_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ ov8856_update_pad_format(&ov8856->priv_lane->supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov8856->mutex);
return 0;
@@ -1696,29 +2332,40 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
if (ret)
return ret;
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV8856_DATA_LANES) {
+ /* Get number of data lanes */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2 &&
+ bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
dev_err(dev, "number of CSI2 data lanes %d is not supported",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto check_hwcfg_error;
}
+ dev_dbg(dev, "Using %u data lanes\n", ov8856->cur_mode->data_lanes);
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes == 2)
+ ov8856->priv_lane = &lane_cfg_2;
+ else
+ ov8856->priv_lane = &lane_cfg_4;
+
+ ov8856->modes_size = ov8856_modes_num(ov8856);
+
if (!bus_cfg.nr_of_link_frequencies) {
dev_err(dev, "no link frequencies defined");
ret = -EINVAL;
goto check_hwcfg_error;
}
- for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (i = 0; i < ARRAY_SIZE(ov8856->priv_lane->link_freq_menu_items); i++) {
for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
- if (link_freq_menu_items[i] ==
- bus_cfg.link_frequencies[j])
+ if (ov8856->priv_lane->link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
break;
}
if (j == bus_cfg.nr_of_link_frequencies) {
dev_err(dev, "no link frequency %lld supported",
- link_freq_menu_items[i]);
+ ov8856->priv_lane->link_freq_menu_items[i]);
ret = -EINVAL;
goto check_hwcfg_error;
}
@@ -1777,7 +2424,7 @@ static int ov8856_probe(struct i2c_client *client)
}
mutex_init(&ov8856->mutex);
- ov8856->cur_mode = &supported_modes[0];
+ ov8856->cur_mode = &ov8856->priv_lane->supported_modes[0];
ret = ov8856_init_controls(ov8856);
if (ret) {
dev_err(&client->dev, "failed to init controls: %d", ret);
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index 9ecf180635ee..ce50f3ea87b8 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -2497,11 +2497,9 @@ static int ov8865_s_stream(struct v4l2_subdev *subdev, int enable)
int ret;
if (enable) {
- ret = pm_runtime_get_sync(sensor->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(sensor->dev);
+ ret = pm_runtime_resume_and_get(sensor->dev);
+ if (ret < 0)
return ret;
- }
}
mutex_lock(&sensor->mutex);
@@ -2544,7 +2542,7 @@ static const struct v4l2_subdev_video_ops ov8865_subdev_video_ops = {
/* Subdev Pad Operations */
static int ov8865_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code_enum)
{
if (code_enum->index >= ARRAY_SIZE(ov8865_mbus_codes))
@@ -2573,7 +2571,7 @@ static void ov8865_mbus_format_fill(struct v4l2_mbus_framefmt *mbus_format,
}
static int ov8865_get_fmt(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
@@ -2582,7 +2580,7 @@ static int ov8865_get_fmt(struct v4l2_subdev *subdev,
mutex_lock(&sensor->mutex);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *mbus_format = *v4l2_subdev_get_try_format(subdev, config,
+ *mbus_format = *v4l2_subdev_get_try_format(subdev, sd_state,
format->pad);
else
ov8865_mbus_format_fill(mbus_format, sensor->state.mbus_code,
@@ -2594,7 +2592,7 @@ static int ov8865_get_fmt(struct v4l2_subdev *subdev,
}
static int ov8865_set_fmt(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
@@ -2635,7 +2633,7 @@ static int ov8865_set_fmt(struct v4l2_subdev *subdev,
ov8865_mbus_format_fill(mbus_format, mbus_code, mode);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(subdev, config, format->pad) =
+ *v4l2_subdev_get_try_format(subdev, sd_state, format->pad) =
*mbus_format;
else if (sensor->state.mode != mode ||
sensor->state.mbus_code != mbus_code)
@@ -2648,7 +2646,7 @@ complete:
}
static int ov8865_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *size_enum)
{
const struct ov8865_mode *mode;
@@ -2665,7 +2663,7 @@ static int ov8865_enum_frame_size(struct v4l2_subdev *subdev,
}
static int ov8865_enum_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *interval_enum)
{
const struct ov8865_mode *mode = NULL;
@@ -2691,7 +2689,7 @@ static int ov8865_enum_frame_interval(struct v4l2_subdev *subdev,
}
}
- if (mode_index == ARRAY_SIZE(ov8865_modes) || !mode)
+ if (mode_index == ARRAY_SIZE(ov8865_modes))
return -EINVAL;
interval_enum->interval = mode->frame_interval;
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index d36b04c49628..0bab8c2cf160 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -519,7 +519,7 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd,
}
static int ov9640_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -547,13 +547,13 @@ static int ov9640_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return ov9640_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
static int ov9640_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(ov9640_codes))
@@ -565,7 +565,7 @@ static int ov9640_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov9640_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 4fe68aa55789..c313e11a9754 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1070,7 +1070,7 @@ static void ov965x_get_default_format(struct v4l2_mbus_framefmt *mf)
}
static int ov965x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(ov965x_formats))
@@ -1081,7 +1081,7 @@ static int ov965x_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov965x_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int i = ARRAY_SIZE(ov965x_formats);
@@ -1167,14 +1167,14 @@ static int ov965x_s_frame_interval(struct v4l2_subdev *sd,
}
static int ov965x_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov965x *ov965x = to_ov965x(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
fmt->format = *mf;
return 0;
}
@@ -1212,7 +1212,7 @@ static void __ov965x_try_frame_size(struct v4l2_mbus_framefmt *mf,
}
static int ov965x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
unsigned int index = ARRAY_SIZE(ov965x_formats);
@@ -1234,8 +1234,9 @@ static int ov965x_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov965x->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
*mf = fmt->format;
}
} else {
@@ -1364,7 +1365,7 @@ static int ov965x_s_stream(struct v4l2_subdev *sd, int on)
static int ov965x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
ov965x_get_default_format(mf);
return 0;
@@ -1479,8 +1480,8 @@ static int ov965x_detect_sensor(struct v4l2_subdev *sd)
if (ov965x->id == OV9650_ID || ov965x->id == OV9652_ID) {
v4l2_info(sd, "Found OV%04X sensor\n", ov965x->id);
} else {
- v4l2_err(sd, "Sensor detection failed (%04X, %d)\n",
- ov965x->id, ret);
+ v4l2_err(sd, "Sensor detection failed (%04X)\n",
+ ov965x->id);
ret = -ENODEV;
}
}
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index b7309a551cae..af50c66cf5ce 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -644,9 +644,8 @@ static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
}
if (enable) {
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
mutex_unlock(&ov9734->mutex);
return ret;
}
@@ -706,7 +705,7 @@ exit:
}
static int ov9734_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov9734 *ov9734 = to_ov9734(sd);
@@ -721,7 +720,7 @@ static int ov9734_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov9734->mutex);
ov9734_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov9734->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov9734->link_freq, mode->link_freq_index);
@@ -747,14 +746,15 @@ static int ov9734_set_format(struct v4l2_subdev *sd,
}
static int ov9734_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov9734 *ov9734 = to_ov9734(sd);
mutex_lock(&ov9734->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov9734->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov9734->sd,
+ sd_state,
fmt->pad);
else
ov9734_update_pad_format(ov9734->cur_mode, &fmt->format);
@@ -765,7 +765,7 @@ static int ov9734_get_format(struct v4l2_subdev *sd,
}
static int ov9734_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -777,7 +777,7 @@ static int ov9734_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov9734_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -800,7 +800,7 @@ static int ov9734_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov9734->mutex);
ov9734_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov9734->mutex);
return 0;
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index 90eb73f0e6e9..025a610de893 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -312,7 +312,7 @@ static const struct ov10635_reg {
struct rdacm20_device {
struct device *dev;
- struct max9271_device *serializer;
+ struct max9271_device serializer;
struct i2c_client *sensor;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -399,11 +399,11 @@ static int rdacm20_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rdacm20_device *dev = sd_to_rdacm20(sd);
- return max9271_set_serial_link(dev->serializer, enable);
+ return max9271_set_serial_link(&dev->serializer, enable);
}
static int rdacm20_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -415,7 +415,7 @@ static int rdacm20_enum_mbus_code(struct v4l2_subdev *sd,
}
static int rdacm20_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -455,12 +455,10 @@ static int rdacm20_initialize(struct rdacm20_device *dev)
unsigned int retry = 3;
int ret;
- /* Verify communication with the MAX9271: ping to wakeup. */
- dev->serializer->client->addr = MAX9271_DEFAULT_ADDR;
- i2c_smbus_read_byte(dev->serializer->client);
+ max9271_wake_up(&dev->serializer);
/* Serial link disabled during config as it needs a valid pixel clock. */
- ret = max9271_set_serial_link(dev->serializer, false);
+ ret = max9271_set_serial_link(&dev->serializer, false);
if (ret)
return ret;
@@ -468,38 +466,48 @@ static int rdacm20_initialize(struct rdacm20_device *dev)
* Ensure that we have a good link configuration before attempting to
* identify the device.
*/
- max9271_configure_i2c(dev->serializer, MAX9271_I2CSLVSH_469NS_234NS |
- MAX9271_I2CSLVTO_1024US |
- MAX9271_I2CMSTBT_105KBPS);
-
- max9271_configure_gmsl_link(dev->serializer);
-
- ret = max9271_verify_id(dev->serializer);
- if (ret < 0)
- return ret;
-
- ret = max9271_set_address(dev->serializer, dev->addrs[0]);
- if (ret < 0)
+ ret = max9271_configure_i2c(&dev->serializer,
+ MAX9271_I2CSLVSH_469NS_234NS |
+ MAX9271_I2CSLVTO_1024US |
+ MAX9271_I2CMSTBT_105KBPS);
+ if (ret)
return ret;
- dev->serializer->client->addr = dev->addrs[0];
/*
- * Reset the sensor by cycling the OV10635 reset signal connected to the
- * MAX9271 GPIO1 and verify communication with the OV10635.
+ * Hold OV10635 in reset during max9271 configuration. The reset signal
+ * has to be asserted for at least 200 microseconds.
*/
- ret = max9271_enable_gpios(dev->serializer, MAX9271_GPIO1OUT);
+ ret = max9271_enable_gpios(&dev->serializer, MAX9271_GPIO1OUT);
+ if (ret)
+ return ret;
+
+ ret = max9271_clear_gpios(&dev->serializer, MAX9271_GPIO1OUT);
if (ret)
return ret;
+ usleep_range(200, 500);
- ret = max9271_clear_gpios(dev->serializer, MAX9271_GPIO1OUT);
+ ret = max9271_configure_gmsl_link(&dev->serializer);
if (ret)
return ret;
- usleep_range(10000, 15000);
- ret = max9271_set_gpios(dev->serializer, MAX9271_GPIO1OUT);
+ ret = max9271_verify_id(&dev->serializer);
+ if (ret < 0)
+ return ret;
+
+ ret = max9271_set_address(&dev->serializer, dev->addrs[0]);
+ if (ret < 0)
+ return ret;
+ dev->serializer.client->addr = dev->addrs[0];
+
+ /*
+ * Release ov10635 from reset and initialize it. The image sensor
+ * requires at least 2048 XVCLK cycles (85 micro-seconds at 24MHz)
+ * before being available. Stay safe and wait up to 500 micro-seconds.
+ */
+ ret = max9271_set_gpios(&dev->serializer, MAX9271_GPIO1OUT);
if (ret)
return ret;
- usleep_range(10000, 15000);
+ usleep_range(100, 500);
again:
ret = ov10635_read16(dev, OV10635_PID);
@@ -539,9 +547,21 @@ again:
if (ret)
return ret;
- dev_info(dev->dev, "Identified MAX9271 + OV10635 device\n");
+ dev_info(dev->dev, "Identified RDACM20 camera module\n");
- return 0;
+ /*
+ * Set reverse channel high threshold to increase noise immunity.
+ *
+ * This should be compensated by increasing the reverse channel
+ * amplitude on the remote deserializer side.
+ *
+ * TODO Inspect the embedded MCU programming sequence to make sure
+ * there are no conflicts with the configuration applied here.
+ *
+ * TODO Clarify the embedded MCU startup delay to avoid write
+ * collisions on the I2C bus.
+ */
+ return max9271_set_high_threshold(&dev->serializer, true);
}
static int rdacm20_probe(struct i2c_client *client)
@@ -554,13 +574,7 @@ static int rdacm20_probe(struct i2c_client *client)
if (!dev)
return -ENOMEM;
dev->dev = &client->dev;
-
- dev->serializer = devm_kzalloc(&client->dev, sizeof(*dev->serializer),
- GFP_KERNEL);
- if (!dev->serializer)
- return -ENOMEM;
-
- dev->serializer->client = client;
+ dev->serializer.client = client;
ret = of_property_read_u32_array(client->dev.of_node, "reg",
dev->addrs, 2);
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index 179d107f494c..12ec5467ed1e 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -69,6 +69,7 @@
#define OV490_ISP_VSIZE_LOW 0x80820062
#define OV490_ISP_VSIZE_HIGH 0x80820063
+#define OV10640_PID_TIMEOUT 20
#define OV10640_ID_HIGH 0xa6
#define OV10640_CHIP_ID 0x300a
#define OV10640_PIXEL_RATE 55000000
@@ -281,7 +282,7 @@ static int rdacm21_s_stream(struct v4l2_subdev *sd, int enable)
}
static int rdacm21_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -293,7 +294,7 @@ static int rdacm21_enum_mbus_code(struct v4l2_subdev *sd,
}
static int rdacm21_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -329,30 +330,51 @@ static const struct v4l2_subdev_ops rdacm21_subdev_ops = {
.pad = &rdacm21_subdev_pad_ops,
};
-static int ov10640_initialize(struct rdacm21_device *dev)
+static void ov10640_power_up(struct rdacm21_device *dev)
{
- u8 val;
-
- /* Power-up OV10640 by setting RESETB and PWDNB pins high. */
+ /* Enable GPIO0#0 (reset) and GPIO1#0 (pwdn) as output lines. */
ov490_write_reg(dev, OV490_GPIO_SEL0, OV490_GPIO0);
ov490_write_reg(dev, OV490_GPIO_SEL1, OV490_SPWDN0);
ov490_write_reg(dev, OV490_GPIO_DIRECTION0, OV490_GPIO0);
ov490_write_reg(dev, OV490_GPIO_DIRECTION1, OV490_SPWDN0);
+
+ /* Power up OV10640 and then reset it. */
+ ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE1, OV490_SPWDN0);
+ usleep_range(1500, 3000);
+
+ ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, 0x00);
+ usleep_range(1500, 3000);
ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, OV490_GPIO0);
- ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, OV490_SPWDN0);
usleep_range(3000, 5000);
+}
- /* Read OV10640 ID to test communications. */
- ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
- ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
- ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, OV10640_CHIP_ID & 0xff);
-
- /* Trigger SCCB slave transaction and give it some time to complete. */
- ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
- usleep_range(1000, 1500);
+static int ov10640_check_id(struct rdacm21_device *dev)
+{
+ unsigned int i;
+ u8 val;
- ov490_read_reg(dev, OV490_SCCB_SLAVE0_DIR, &val);
- if (val != OV10640_ID_HIGH) {
+ /* Read OV10640 ID to test communications. */
+ for (i = 0; i < OV10640_PID_TIMEOUT; ++i) {
+ ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR,
+ OV490_SCCB_SLAVE_READ);
+ ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH,
+ OV10640_CHIP_ID >> 8);
+ ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW,
+ OV10640_CHIP_ID & 0xff);
+
+ /*
+ * Trigger SCCB slave transaction and give it some time
+ * to complete.
+ */
+ ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
+ usleep_range(1000, 1500);
+
+ ov490_read_reg(dev, OV490_SCCB_SLAVE0_DIR, &val);
+ if (val == OV10640_ID_HIGH)
+ break;
+ usleep_range(1000, 1500);
+ }
+ if (i == OV10640_PID_TIMEOUT) {
dev_err(dev->dev, "OV10640 ID mismatch: (0x%02x)\n", val);
return -ENODEV;
}
@@ -368,6 +390,8 @@ static int ov490_initialize(struct rdacm21_device *dev)
unsigned int i;
int ret;
+ ov10640_power_up(dev);
+
/*
* Read OV490 Id to test communications. Give it up to 40msec to
* exit from reset.
@@ -405,7 +429,7 @@ static int ov490_initialize(struct rdacm21_device *dev)
return -ENODEV;
}
- ret = ov10640_initialize(dev);
+ ret = ov10640_check_id(dev);
if (ret)
return ret;
@@ -450,10 +474,7 @@ static int rdacm21_initialize(struct rdacm21_device *dev)
{
int ret;
- /* Verify communication with the MAX9271: ping to wakeup. */
- dev->serializer.client->addr = MAX9271_DEFAULT_ADDR;
- i2c_smbus_read_byte(dev->serializer.client);
- usleep_range(3000, 5000);
+ max9271_wake_up(&dev->serializer);
/* Enable reverse channel and disable the serial link. */
ret = max9271_set_serial_link(&dev->serializer, false);
@@ -472,7 +493,10 @@ static int rdacm21_initialize(struct rdacm21_device *dev)
if (ret)
return ret;
- /* Enable GPIO1 and hold OV490 in reset during max9271 configuration. */
+ /*
+ * Enable GPIO1 and hold OV490 in reset during max9271 configuration.
+ * The reset signal has to be asserted for at least 250 useconds.
+ */
ret = max9271_enable_gpios(&dev->serializer, MAX9271_GPIO1OUT);
if (ret)
return ret;
@@ -480,6 +504,7 @@ static int rdacm21_initialize(struct rdacm21_device *dev)
ret = max9271_clear_gpios(&dev->serializer, MAX9271_GPIO1OUT);
if (ret)
return ret;
+ usleep_range(250, 500);
ret = max9271_configure_gmsl_link(&dev->serializer);
if (ret)
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index 4cc51e001874..2e4018c26912 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -488,7 +488,7 @@ static int reg_write_multiple(struct i2c_client *client,
}
static int rj54n1_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(rj54n1_colour_fmts))
@@ -541,7 +541,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
s32 *out_w, s32 *out_h);
static int rj54n1_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -578,7 +578,7 @@ static int rj54n1_set_selection(struct v4l2_subdev *sd,
}
static int rj54n1_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -603,7 +603,7 @@ static int rj54n1_get_selection(struct v4l2_subdev *sd,
}
static int rj54n1_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -973,7 +973,7 @@ static int rj54n1_reg_init(struct i2c_client *client)
}
static int rj54n1_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -1009,7 +1009,7 @@ static int rj54n1_set_fmt(struct v4l2_subdev *sd,
&mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 5b4c4a3547c9..e2b88c5e4f98 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -817,7 +817,7 @@ static const struct s5c73m3_frame_size *s5c73m3_find_frame_size(
}
static void s5c73m3_oif_try_format(struct s5c73m3 *state,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt,
const struct s5c73m3_frame_size **fs)
{
@@ -844,8 +844,8 @@ static void s5c73m3_oif_try_format(struct s5c73m3 *state,
*fs = state->oif_pix_size[RES_ISP];
else
*fs = s5c73m3_find_frame_size(
- v4l2_subdev_get_try_format(sd, cfg,
- OIF_ISP_PAD),
+ v4l2_subdev_get_try_format(sd, sd_state,
+ OIF_ISP_PAD),
RES_ISP);
break;
}
@@ -854,7 +854,7 @@ static void s5c73m3_oif_try_format(struct s5c73m3 *state,
}
static void s5c73m3_try_format(struct s5c73m3 *state,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt,
const struct s5c73m3_frame_size **fs)
{
@@ -946,7 +946,7 @@ static int s5c73m3_oif_s_frame_interval(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
@@ -984,7 +984,7 @@ static int s5c73m3_oif_get_pad_code(int pad, int index)
}
static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
@@ -992,7 +992,8 @@ static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
u32 code;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
return 0;
}
@@ -1018,7 +1019,7 @@ static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
@@ -1026,7 +1027,8 @@ static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
u32 code;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
return 0;
}
@@ -1056,7 +1058,7 @@ static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
const struct s5c73m3_frame_size *frame_size = NULL;
@@ -1066,10 +1068,10 @@ static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&state->lock);
- s5c73m3_try_format(state, cfg, fmt, &frame_size);
+ s5c73m3_try_format(state, sd_state, fmt, &frame_size);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
} else {
switch (fmt->pad) {
@@ -1095,7 +1097,7 @@ static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
const struct s5c73m3_frame_size *frame_size = NULL;
@@ -1105,13 +1107,14 @@ static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&state->lock);
- s5c73m3_oif_try_format(state, cfg, fmt, &frame_size);
+ s5c73m3_oif_try_format(state, sd_state, fmt, &frame_size);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
if (fmt->pad == OIF_ISP_PAD) {
- mf = v4l2_subdev_get_try_format(sd, cfg, OIF_SOURCE_PAD);
+ mf = v4l2_subdev_get_try_format(sd, sd_state,
+ OIF_SOURCE_PAD);
mf->width = fmt->format.width;
mf->height = fmt->format.height;
}
@@ -1183,7 +1186,7 @@ static int s5c73m3_oif_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
}
static int s5c73m3_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const int codes[] = {
@@ -1199,7 +1202,7 @@ static int s5c73m3_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
int ret;
@@ -1214,7 +1217,7 @@ static int s5c73m3_oif_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5c73m3_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int idx;
@@ -1241,7 +1244,7 @@ static int s5c73m3_enum_frame_size(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
@@ -1259,7 +1262,7 @@ static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
if (fse->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, cfg,
+ mf = v4l2_subdev_get_try_format(sd, sd_state,
OIF_ISP_PAD);
w = mf->width;
@@ -1315,11 +1318,11 @@ static int s5c73m3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->pad, S5C73M3_ISP_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, S5C73M3_ISP_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->pad, S5C73M3_JPEG_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, S5C73M3_JPEG_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
S5C73M3_JPEG_FMT);
@@ -1330,15 +1333,15 @@ static int s5c73m3_oif_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_ISP_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_ISP_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_JPEG_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_JPEG_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
S5C73M3_JPEG_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_SOURCE_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_SOURCE_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
return 0;
@@ -1386,7 +1389,7 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
s5c73m3_gpio_deassert(state, STBY);
usleep_range(100, 200);
- s5c73m3_gpio_deassert(state, RST);
+ s5c73m3_gpio_deassert(state, RSET);
usleep_range(50, 100);
return 0;
@@ -1401,7 +1404,7 @@ static int __s5c73m3_power_off(struct s5c73m3 *state)
{
int i, ret;
- if (s5c73m3_gpio_assert(state, RST))
+ if (s5c73m3_gpio_assert(state, RSET))
usleep_range(10, 50);
if (s5c73m3_gpio_assert(state, STBY))
@@ -1606,7 +1609,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
state->mclk_frequency = pdata->mclk_frequency;
state->gpio[STBY] = pdata->gpio_stby;
- state->gpio[RST] = pdata->gpio_reset;
+ state->gpio[RSET] = pdata->gpio_reset;
return 0;
}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index ef7e85b34263..c3fcfdd3ea66 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -353,7 +353,7 @@ struct s5c73m3_ctrls {
enum s5c73m3_gpio_id {
STBY,
- RST,
+ RSET,
GPIO_NUM,
};
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index b2d53417badf..af9a305242cd 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -173,7 +173,7 @@ static const char * const s5k4ecgx_supply_names[] = {
enum s5k4ecgx_gpio_id {
STBY,
- RST,
+ RSET,
GPIO_NUM,
};
@@ -476,7 +476,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv)
if (s5k4ecgx_gpio_set_value(priv, STBY, priv->gpio[STBY].level))
usleep_range(30, 50);
- if (s5k4ecgx_gpio_set_value(priv, RST, priv->gpio[RST].level))
+ if (s5k4ecgx_gpio_set_value(priv, RSET, priv->gpio[RSET].level))
usleep_range(30, 50);
return 0;
@@ -484,7 +484,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv)
static int __s5k4ecgx_power_off(struct s5k4ecgx *priv)
{
- if (s5k4ecgx_gpio_set_value(priv, RST, !priv->gpio[RST].level))
+ if (s5k4ecgx_gpio_set_value(priv, RSET, !priv->gpio[RSET].level))
usleep_range(30, 50);
if (s5k4ecgx_gpio_set_value(priv, STBY, !priv->gpio[STBY].level))
@@ -525,7 +525,7 @@ static int s5k4ecgx_try_frame_size(struct v4l2_mbus_framefmt *mf,
}
static int s5k4ecgx_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k4ecgx_formats))
@@ -535,15 +535,16 @@ static int s5k4ecgx_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int s5k4ecgx_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int s5k4ecgx_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct s5k4ecgx *priv = to_s5k4ecgx(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
fmt->format = *mf;
}
return 0;
@@ -575,7 +576,8 @@ static const struct s5k4ecgx_pixfmt *s5k4ecgx_try_fmt(struct v4l2_subdev *sd,
return &s5k4ecgx_formats[i];
}
-static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k4ecgx *priv = to_s5k4ecgx(sd);
@@ -590,8 +592,8 @@ static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_confi
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
*mf = fmt->format;
}
return 0;
@@ -686,7 +688,9 @@ static int s5k4ecgx_registered(struct v4l2_subdev *sd)
*/
static int s5k4ecgx_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
mf->width = s5k4ecgx_prev_sizes[0].size.width;
mf->height = s5k4ecgx_prev_sizes[0].size.height;
@@ -872,7 +876,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv,
int ret;
priv->gpio[STBY].gpio = -EINVAL;
- priv->gpio[RST].gpio = -EINVAL;
+ priv->gpio[RSET].gpio = -EINVAL;
ret = s5k4ecgx_config_gpio(gpio->gpio, gpio->level, "S5K4ECGX_STBY");
@@ -891,7 +895,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv,
s5k4ecgx_free_gpios(priv);
return ret;
}
- priv->gpio[RST] = *gpio;
+ priv->gpio[RSET] = *gpio;
if (gpio_is_valid(gpio->gpio))
gpio_set_value(gpio->gpio, 0);
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 6e702b57c37d..6a5dceb699a8 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -235,7 +235,7 @@ struct s5k5baf_gpio {
enum s5k5baf_gpio_id {
STBY,
- RST,
+ RSET,
NUM_GPIOS,
};
@@ -969,7 +969,7 @@ static int s5k5baf_power_on(struct s5k5baf *state)
s5k5baf_gpio_deassert(state, STBY);
usleep_range(50, 100);
- s5k5baf_gpio_deassert(state, RST);
+ s5k5baf_gpio_deassert(state, RSET);
return 0;
err_reg_dis:
@@ -987,7 +987,7 @@ static int s5k5baf_power_off(struct s5k5baf *state)
state->apply_cfg = 0;
state->apply_crop = 0;
- s5k5baf_gpio_assert(state, RST);
+ s5k5baf_gpio_assert(state, RSET);
s5k5baf_gpio_assert(state, STBY);
if (!IS_ERR(state->clock))
@@ -1180,7 +1180,7 @@ static int s5k5baf_s_frame_interval(struct v4l2_subdev *sd,
* V4L2 subdev pad level and video operations
*/
static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (fie->index > S5K5BAF_MAX_FR_TIME - S5K5BAF_MIN_FR_TIME ||
@@ -1199,7 +1199,7 @@ static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
}
static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad == PAD_CIS) {
@@ -1217,7 +1217,7 @@ static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5k5baf_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int i;
@@ -1274,15 +1274,16 @@ static int s5k5baf_try_isp_format(struct v4l2_mbus_framefmt *mf)
return pixfmt;
}
-static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int s5k5baf_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct s5k5baf *state = to_s5k5baf(sd);
const struct s5k5baf_pixfmt *pixfmt;
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1304,8 +1305,9 @@ static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
return 0;
}
-static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int s5k5baf_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *mf = &fmt->format;
struct s5k5baf *state = to_s5k5baf(sd);
@@ -1315,7 +1317,7 @@ static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
mf->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = *mf;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = *mf;
return 0;
}
@@ -1367,7 +1369,7 @@ static int s5k5baf_is_bound_target(u32 target)
}
static int s5k5baf_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
enum selection_rect rtype;
@@ -1387,9 +1389,11 @@ static int s5k5baf_get_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
if (rtype == R_COMPOSE)
- sel->r = *v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ sel->r = *v4l2_subdev_get_try_compose(sd, sd_state,
+ sel->pad);
else
- sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, sd_state,
+ sel->pad);
return 0;
}
@@ -1458,7 +1462,7 @@ static bool s5k5baf_cmp_rect(const struct v4l2_rect *r1,
}
static int s5k5baf_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
static enum selection_rect rtype;
@@ -1479,9 +1483,12 @@ static int s5k5baf_set_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
rects = (struct v4l2_rect * []) {
&s5k5baf_cis_rect,
- v4l2_subdev_get_try_crop(sd, cfg, PAD_CIS),
- v4l2_subdev_get_try_compose(sd, cfg, PAD_CIS),
- v4l2_subdev_get_try_crop(sd, cfg, PAD_OUT)
+ v4l2_subdev_get_try_crop(sd, sd_state,
+ PAD_CIS),
+ v4l2_subdev_get_try_compose(sd, sd_state,
+ PAD_CIS),
+ v4l2_subdev_get_try_crop(sd, sd_state,
+ PAD_OUT)
};
s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
return 0;
@@ -1699,22 +1706,22 @@ static int s5k5baf_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->pad, PAD_CIS);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, PAD_CIS);
s5k5baf_try_cis_format(mf);
if (s5k5baf_is_cis_subdev(sd))
return 0;
- mf = v4l2_subdev_get_try_format(sd, fh->pad, PAD_OUT);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, PAD_OUT);
mf->colorspace = s5k5baf_formats[0].colorspace;
mf->code = s5k5baf_formats[0].code;
mf->width = s5k5baf_cis_rect.width;
mf->height = s5k5baf_cis_rect.height;
mf->field = V4L2_FIELD_NONE;
- *v4l2_subdev_get_try_crop(sd, fh->pad, PAD_CIS) = s5k5baf_cis_rect;
- *v4l2_subdev_get_try_compose(sd, fh->pad, PAD_CIS) = s5k5baf_cis_rect;
- *v4l2_subdev_get_try_crop(sd, fh->pad, PAD_OUT) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_crop(sd, fh->state, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_compose(sd, fh->state, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_crop(sd, fh->state, PAD_OUT) = s5k5baf_cis_rect;
return 0;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index f26c168ef942..b97dd6149e90 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -99,7 +99,7 @@ static const struct v4l2_mbus_framefmt *find_sensor_format(
}
static int s5k6a3_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k6a3_formats))
@@ -123,17 +123,18 @@ static void s5k6a3_try_format(struct v4l2_mbus_framefmt *mf)
}
static struct v4l2_mbus_framefmt *__s5k6a3_get_format(
- struct s5k6a3 *sensor, struct v4l2_subdev_pad_config *cfg,
+ struct s5k6a3 *sensor, struct v4l2_subdev_state *sd_state,
u32 pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return cfg ? v4l2_subdev_get_try_format(&sensor->subdev, cfg, pad) : NULL;
+ return sd_state ? v4l2_subdev_get_try_format(&sensor->subdev,
+ sd_state, pad) : NULL;
return &sensor->format;
}
static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
@@ -141,7 +142,7 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
s5k6a3_try_format(&fmt->format);
- mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
+ mf = __s5k6a3_get_format(sensor, sd_state, fmt->pad, fmt->which);
if (mf) {
mutex_lock(&sensor->lock);
*mf = fmt->format;
@@ -151,13 +152,13 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
}
static int s5k6a3_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
struct v4l2_mbus_framefmt *mf;
- mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
+ mf = __s5k6a3_get_format(sensor, sd_state, fmt->pad, fmt->which);
mutex_lock(&sensor->lock);
fmt->format = *mf;
@@ -173,7 +174,9 @@ static const struct v4l2_subdev_pad_ops s5k6a3_pad_ops = {
static int s5k6a3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
*format = s5k6a3_formats[0];
format->width = S5K6A3_DEFAULT_WIDTH;
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 038e38500760..105a4b7d8354 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -177,7 +177,7 @@ static const char * const s5k6aa_supply_names[] = {
enum s5k6aa_gpio_id {
STBY,
- RST,
+ RSET,
GPIO_NUM,
};
@@ -841,7 +841,7 @@ static int __s5k6aa_power_on(struct s5k6aa *s5k6aa)
ret = s5k6aa->s_power(1);
usleep_range(4000, 5000);
- if (s5k6aa_gpio_deassert(s5k6aa, RST))
+ if (s5k6aa_gpio_deassert(s5k6aa, RSET))
msleep(20);
return ret;
@@ -851,7 +851,7 @@ static int __s5k6aa_power_off(struct s5k6aa *s5k6aa)
{
int ret;
- if (s5k6aa_gpio_assert(s5k6aa, RST))
+ if (s5k6aa_gpio_assert(s5k6aa, RSET))
usleep_range(100, 150);
if (s5k6aa->s_power) {
@@ -997,7 +997,7 @@ static int s5k6aa_s_frame_interval(struct v4l2_subdev *sd,
* V4L2 subdev pad level and video operations
*/
static int s5k6aa_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1024,7 +1024,7 @@ static int s5k6aa_enum_frame_interval(struct v4l2_subdev *sd,
}
static int s5k6aa_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k6aa_formats))
@@ -1035,7 +1035,7 @@ static int s5k6aa_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5k6aa_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int i = ARRAY_SIZE(s5k6aa_formats);
@@ -1057,14 +1057,15 @@ static int s5k6aa_enum_frame_size(struct v4l2_subdev *sd,
}
static struct v4l2_rect *
-__s5k6aa_get_crop_rect(struct s5k6aa *s5k6aa, struct v4l2_subdev_pad_config *cfg,
+__s5k6aa_get_crop_rect(struct s5k6aa *s5k6aa,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
return &s5k6aa->ccd_rect;
WARN_ON(which != V4L2_SUBDEV_FORMAT_TRY);
- return v4l2_subdev_get_try_crop(&s5k6aa->sd, cfg, 0);
+ return v4l2_subdev_get_try_crop(&s5k6aa->sd, sd_state, 0);
}
static void s5k6aa_try_format(struct s5k6aa *s5k6aa,
@@ -1088,7 +1089,8 @@ static void s5k6aa_try_format(struct s5k6aa *s5k6aa,
mf->field = V4L2_FIELD_NONE;
}
-static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5k6aa_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1097,7 +1099,7 @@ static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
memset(fmt->reserved, 0, sizeof(fmt->reserved));
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
fmt->format = *mf;
return 0;
}
@@ -1109,7 +1111,8 @@ static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
return 0;
}
-static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5k6aa_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1122,8 +1125,8 @@ static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
s5k6aa_try_format(s5k6aa, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
- crop = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ crop = v4l2_subdev_get_try_crop(sd, sd_state, 0);
} else {
if (s5k6aa->streaming) {
ret = -EBUSY;
@@ -1163,7 +1166,7 @@ static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
}
static int s5k6aa_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1175,7 +1178,7 @@ static int s5k6aa_get_selection(struct v4l2_subdev *sd,
memset(sel->reserved, 0, sizeof(sel->reserved));
mutex_lock(&s5k6aa->lock);
- rect = __s5k6aa_get_crop_rect(s5k6aa, cfg, sel->which);
+ rect = __s5k6aa_get_crop_rect(s5k6aa, sd_state, sel->which);
sel->r = *rect;
mutex_unlock(&s5k6aa->lock);
@@ -1186,7 +1189,7 @@ static int s5k6aa_get_selection(struct v4l2_subdev *sd,
}
static int s5k6aa_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1198,13 +1201,13 @@ static int s5k6aa_set_selection(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&s5k6aa->lock);
- crop_r = __s5k6aa_get_crop_rect(s5k6aa, cfg, sel->which);
+ crop_r = __s5k6aa_get_crop_rect(s5k6aa, sd_state, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
mf = &s5k6aa->preset->mbus_fmt;
s5k6aa->apply_crop = 1;
} else {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
}
v4l_bound_align_image(&sel->r.width, mf->width,
S5K6AA_WIN_WIDTH_MAX, 1,
@@ -1425,8 +1428,10 @@ static int s5k6aa_initialize_ctrls(struct s5k6aa *s5k6aa)
*/
static int s5k6aa_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
- struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
+ struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
format->colorspace = s5k6aa_formats[0].colorspace;
format->code = s5k6aa_formats[0].code;
@@ -1510,7 +1515,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
int ret;
s5k6aa->gpio[STBY].gpio = -EINVAL;
- s5k6aa->gpio[RST].gpio = -EINVAL;
+ s5k6aa->gpio[RSET].gpio = -EINVAL;
gpio = &pdata->gpio_stby;
if (gpio_is_valid(gpio->gpio)) {
@@ -1533,7 +1538,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
if (ret < 0)
return ret;
- s5k6aa->gpio[RST] = *gpio;
+ s5k6aa->gpio[RSET] = *gpio;
}
return 0;
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index ecb491d5f2ab..d1e0716bdfff 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -380,7 +380,7 @@ static void saa6588_configure(struct saa6588 *s)
/* ---------------------------------------------------------------------- */
-static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+static long saa6588_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct saa6588 *s = to_saa6588(sd);
struct saa6588_command *a = arg;
@@ -433,7 +433,7 @@ static int saa6588_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops saa6588_core_ops = {
- .ioctl = saa6588_ioctl,
+ .command = saa6588_command,
};
static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = {
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 6171ced809bb..a7f043cad149 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -543,7 +543,7 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
}
static int saa6752hs_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *f = &format->format;
@@ -563,7 +563,7 @@ static int saa6752hs_get_fmt(struct v4l2_subdev *sd,
}
static int saa6752hs_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *f = &format->format;
@@ -595,7 +595,7 @@ static int saa6752hs_set_fmt(struct v4l2_subdev *sd,
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *f;
+ sd_state->pads->try_fmt = *f;
return 0;
}
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 88dc6baac639..a958bbc2c33d 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1167,7 +1167,7 @@ static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
}
static int saa711x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index ba103a6a1875..adf905360171 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -980,7 +980,7 @@ static int saa717x_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regi
#endif
static int saa717x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index 46924024faa8..19c0252df2f1 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -468,7 +468,7 @@ static int sr030pc30_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int sr030pc30_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!code || code->pad ||
@@ -480,7 +480,7 @@ static int sr030pc30_enum_mbus_code(struct v4l2_subdev *sd,
}
static int sr030pc30_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf;
@@ -525,7 +525,7 @@ static const struct sr030pc30_format *try_fmt(struct v4l2_subdev *sd,
/* Return nearest media bus frame format. */
static int sr030pc30_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct sr030pc30_info *info = sd ? to_sr030pc30(sd) : NULL;
@@ -541,7 +541,7 @@ static int sr030pc30_set_fmt(struct v4l2_subdev *sd,
fmt = try_fmt(sd, mf);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 7f07ef56fbbd..f630b88cbfaa 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -643,7 +643,7 @@ out:
}
static int mipid02_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -670,7 +670,7 @@ static int mipid02_enum_mbus_code(struct v4l2_subdev *sd,
}
static int mipid02_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
@@ -687,7 +687,8 @@ static int mipid02_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(&bridge->sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(&bridge->sd, sd_state,
+ format->pad);
else
fmt = &bridge->fmt;
@@ -704,7 +705,7 @@ static int mipid02_get_fmt(struct v4l2_subdev *sd,
}
static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -718,11 +719,11 @@ static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
if (format->which != V4L2_SUBDEV_FORMAT_TRY)
return;
- *v4l2_subdev_get_try_format(sd, cfg, format->pad) = format->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = format->format;
}
static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -731,7 +732,7 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
format->format.code = get_fmt_code(format->format.code);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
else
fmt = &bridge->fmt;
@@ -739,7 +740,7 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
}
static int mipid02_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -762,9 +763,9 @@ static int mipid02_set_fmt(struct v4l2_subdev *sd,
}
if (format->pad == MIPID02_SOURCE)
- mipid02_set_fmt_source(sd, cfg, format);
+ mipid02_set_fmt_source(sd, sd_state, format);
else
- mipid02_set_fmt_sink(sd, cfg, format);
+ mipid02_set_fmt_sink(sd, sd_state, format);
error:
mutex_unlock(&bridge->lock);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 1b309bb743c7..3205cd8298dd 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1649,7 +1649,7 @@ static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
/* --------------- PAD OPS --------------- */
static int tc358743_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->index) {
@@ -1666,7 +1666,7 @@ static int tc358743_enum_mbus_code(struct v4l2_subdev *sd,
}
static int tc358743_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tc358743_state *state = to_state(sd);
@@ -1702,13 +1702,13 @@ static int tc358743_get_fmt(struct v4l2_subdev *sd,
}
static int tc358743_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tc358743_state *state = to_state(sd);
u32 code = format->format.code; /* is overwritten by get_fmt */
- int ret = tc358743_get_fmt(sd, cfg, format);
+ int ret = tc358743_get_fmt(sd, sd_state, format);
format->format.code = code;
@@ -1974,6 +1974,7 @@ static int tc358743_probe_of(struct tc358743_state *state)
bps_pr_lane = 2 * endpoint.link_frequencies[0];
if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) {
dev_err(dev, "unsupported bps per lane: %u bps\n", bps_pr_lane);
+ ret = -EINVAL;
goto disable_clk;
}
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 89bb7e6dc7a4..91e6db847bb5 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1718,19 +1718,19 @@ static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
*/
static int tda1997x_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct tda1997x_state *state = to_state(sd);
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
mf->code = state->mbus_codes[0];
return 0;
}
static int tda1997x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct tda1997x_state *state = to_state(sd);
@@ -1762,7 +1762,7 @@ static void tda1997x_fill_format(struct tda1997x_state *state,
}
static int tda1997x_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tda1997x_state *state = to_state(sd);
@@ -1775,7 +1775,7 @@ static int tda1997x_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format.code = fmt->code;
} else
format->format.code = state->mbus_code;
@@ -1784,7 +1784,7 @@ static int tda1997x_get_format(struct v4l2_subdev *sd,
}
static int tda1997x_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tda1997x_state *state = to_state(sd);
@@ -1809,7 +1809,7 @@ static int tda1997x_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
*fmt = format->format;
} else {
int ret = tda1997x_setup_format(state, format->format.code);
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index a7fbe5b400c2..cee60f945036 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -853,13 +853,13 @@ static const struct v4l2_ctrl_ops tvp514x_ctrl_ops = {
/**
* tvp514x_enum_mbus_code() - V4L2 decoder interface handler for enum_mbus_code
* @sd: pointer to standard V4L2 sub-device structure
- * @cfg: pad configuration
+ * @sd_state: subdev state
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*
* Enumertaes mbus codes supported
*/
static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
u32 pad = code->pad;
@@ -880,13 +880,13 @@ static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
/**
* tvp514x_get_pad_format() - V4L2 decoder interface handler for get pad format
* @sd: pointer to standard V4L2 sub-device structure
- * @cfg: pad configuration
+ * @sd_state: subdev state
* @format: pointer to v4l2_subdev_format structure
*
* Retrieves pad format which is active or tried based on requirement
*/
static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
@@ -912,13 +912,13 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
/**
* tvp514x_set_pad_format() - V4L2 decoder interface handler for set pad format
* @sd: pointer to standard V4L2 sub-device structure
- * @cfg: pad configuration
+ * @sd_state: subdev state
* @fmt: pointer to v4l2_subdev_format structure
*
* Set pad format for the output pad
*/
static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index e26e3f544054..30c63552556d 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1027,7 +1027,7 @@ static void tvp5150_set_default(v4l2_std_id std, struct v4l2_rect *crop)
static struct v4l2_rect *
tvp5150_get_pad_crop(struct tvp5150 *decoder,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
@@ -1035,7 +1035,7 @@ tvp5150_get_pad_crop(struct tvp5150 *decoder,
return &decoder->rect;
case V4L2_SUBDEV_FORMAT_TRY:
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- return v4l2_subdev_get_try_crop(&decoder->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&decoder->sd, sd_state, pad);
#else
return ERR_PTR(-EINVAL);
#endif
@@ -1045,7 +1045,7 @@ tvp5150_get_pad_crop(struct tvp5150 *decoder,
}
static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *f;
@@ -1104,7 +1104,7 @@ static void tvp5150_set_hw_selection(struct v4l2_subdev *sd,
}
static int tvp5150_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -1138,7 +1138,7 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
sel->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
- crop = tvp5150_get_pad_crop(decoder, cfg, sel->pad, sel->which);
+ crop = tvp5150_get_pad_crop(decoder, sd_state, sel->pad, sel->which);
if (IS_ERR(crop))
return PTR_ERR(crop);
@@ -1156,7 +1156,7 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
}
static int tvp5150_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct tvp5150 *decoder = container_of(sd, struct tvp5150, sd);
@@ -1180,7 +1180,7 @@ static int tvp5150_get_selection(struct v4l2_subdev *sd,
sel->r.height = TVP5150_V_MAX_OTHERS;
return 0;
case V4L2_SEL_TGT_CROP:
- crop = tvp5150_get_pad_crop(decoder, cfg, sel->pad,
+ crop = tvp5150_get_pad_crop(decoder, sd_state, sel->pad,
sel->which);
if (IS_ERR(crop))
return PTR_ERR(crop);
@@ -1208,7 +1208,7 @@ static int tvp5150_get_mbus_config(struct v4l2_subdev *sd,
V4L2 subdev pad ops
****************************************************************************/
static int tvp5150_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct tvp5150 *decoder = to_tvp5150(sd);
v4l2_std_id std;
@@ -1229,7 +1229,7 @@ static int tvp5150_init_cfg(struct v4l2_subdev *sd,
}
static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index)
@@ -1240,7 +1240,7 @@ static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
}
static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -1448,11 +1448,9 @@ static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
TVP5150_MISC_CTL_CLOCK_OE;
if (enable) {
- ret = pm_runtime_get_sync(sd->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(sd->dev);
+ ret = pm_runtime_resume_and_get(sd->dev);
+ if (ret < 0)
return ret;
- }
tvp5150_enable(sd);
@@ -1675,15 +1673,7 @@ err:
static int tvp5150_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- int ret;
-
- ret = pm_runtime_get_sync(sd->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(sd->dev);
- return ret;
- }
-
- return 0;
+ return pm_runtime_resume_and_get(sd->dev);
}
static int tvp5150_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index ada4ec5ef782..2de18833b07b 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -797,7 +797,8 @@ static const struct v4l2_ctrl_ops tvp7002_ctrl_ops = {
* Enumerate supported digital video formats for pad.
*/
static int
-tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+tvp7002_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Check requested format index is within range */
@@ -818,7 +819,8 @@ tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cf
* get video format for pad.
*/
static int
-tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+tvp7002_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tvp7002 *tvp7002 = to_tvp7002(sd);
@@ -841,10 +843,11 @@ tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cf
* set video format for pad.
*/
static int
-tvp7002_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+tvp7002_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return tvp7002_get_pad_format(sd, cfg, fmt);
+ return tvp7002_get_pad_format(sd, sd_state, fmt);
}
/* V4L2 core operation handlers */
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index a25a350b0ddc..09f5b3986928 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -720,7 +720,7 @@ tw9910_set_fmt_error:
}
static int tw9910_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -746,7 +746,7 @@ static int tw9910_get_selection(struct v4l2_subdev *sd,
}
static int tw9910_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -797,7 +797,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
}
static int tw9910_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -829,7 +829,7 @@ static int tw9910_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return tw9910_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
@@ -886,7 +886,7 @@ static const struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
};
static int tw9910_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index)
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 0465832a4090..de12f38f347c 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -286,11 +286,9 @@ static int amg88xx_read(struct device *dev, enum hwmon_sensor_types type,
__le16 buf;
int tmp;
- tmp = pm_runtime_get_sync(regmap_get_device(data->regmap));
- if (tmp < 0) {
- pm_runtime_put_noidle(regmap_get_device(data->regmap));
+ tmp = pm_runtime_resume_and_get(regmap_get_device(data->regmap));
+ if (tmp < 0)
return tmp;
- }
tmp = regmap_bulk_read(data->regmap, AMG88XX_REG_TTHL, &buf, 2);
pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
@@ -512,11 +510,9 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
if (data->kthread_vid_cap)
return 0;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto error_del_list;
- }
ret = data->chip->setup(data);
if (ret)
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index c292c92e37b9..29003dec6f2d 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -546,7 +546,7 @@ static int vs6624_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int vs6624_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(vs6624_formats))
@@ -557,7 +557,7 @@ static int vs6624_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vs6624_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -587,7 +587,7 @@ static int vs6624_set_fmt(struct v4l2_subdev *sd,
fmt->colorspace = vs6624_formats[index].colorspace;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
@@ -637,7 +637,7 @@ static int vs6624_set_fmt(struct v4l2_subdev *sd,
}
static int vs6624_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct vs6624 *sensor = to_vs6624(sd);
diff --git a/drivers/media/mc/Makefile b/drivers/media/mc/Makefile
index 119037f0e686..2b7af42ba59c 100644
--- a/drivers/media/mc/Makefile
+++ b/drivers/media/mc/Makefile
@@ -3,7 +3,7 @@
mc-objs := mc-device.o mc-devnode.o mc-entity.o \
mc-request.o
-ifeq ($(CONFIG_USB),y)
+ifneq ($(CONFIG_USB),)
mc-objs += mc-dev-allocator.o
endif
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 678b99771cfa..f40f41977142 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -323,7 +323,7 @@ static void media_graph_walk_iter(struct media_graph *graph)
return;
}
- /* Get the entity in the other end of the link . */
+ /* Get the entity at the other end of the link. */
next = media_entity_other(entity, link);
/* Has the entity already been visited? */
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
index c0782fd96c59..addb8f2d8939 100644
--- a/drivers/media/mc/mc-request.c
+++ b/drivers/media/mc/mc-request.c
@@ -414,7 +414,8 @@ int media_request_object_bind(struct media_request *req,
spin_lock_irqsave(&req->lock, flags);
- if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
+ req->state != MEDIA_REQUEST_STATE_QUEUED))
goto unlock;
obj->req = req;
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 78dd35c9b65d..90972d6952f1 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -300,7 +300,8 @@ static irqreturn_t bt878_irq(int irq, void *dev_id)
}
if (astat & BT878_ARISCI) {
bt->finished_block = (stat & BT878_ARISCS) >> 28;
- tasklet_schedule(&bt->tasklet);
+ if (bt->tasklet.callback)
+ tasklet_schedule(&bt->tasklet);
break;
}
count++;
@@ -477,6 +478,9 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
btwrite(0, BT878_AINT_MASK);
bt878_num++;
+ if (!bt->tasklet.func)
+ tasklet_disable(&bt->tasklet);
+
return 0;
fail2:
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 1f62a9d8ea1d..0e9df8b35ac6 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3179,7 +3179,7 @@ static int radio_release(struct file *file)
btv->radio_user--;
- bttv_call_all(btv, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
+ bttv_call_all(btv, core, command, SAA6588_CMD_CLOSE, &cmd);
if (btv->radio_user == 0)
btv->has_radio_tuner = 0;
@@ -3260,7 +3260,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
cmd.result = -ENODEV;
radio_enable(btv);
- bttv_call_all(btv, core, ioctl, SAA6588_CMD_READ, &cmd);
+ bttv_call_all(btv, core, command, SAA6588_CMD_READ, &cmd);
return cmd.result;
}
@@ -3281,7 +3281,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
cmd.instance = file;
cmd.event_list = wait;
cmd.poll_mask = res;
- bttv_call_all(btv, core, ioctl, SAA6588_CMD_POLL, &cmd);
+ bttv_call_all(btv, core, command, SAA6588_CMD_POLL, &cmd);
return cmd.poll_mask;
}
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 839503e654f4..16af58f2f93c 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -667,6 +667,7 @@ static int cobalt_probe(struct pci_dev *pci_dev,
return -ENOMEM;
cobalt->pci_dev = pci_dev;
cobalt->instance = i;
+ mutex_init(&cobalt->pci_lock);
retval = v4l2_device_register(&pci_dev->dev, &cobalt->v4l2_dev);
if (retval) {
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index bca68572b324..12c33e035904 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -251,6 +251,8 @@ struct cobalt {
int instance;
struct pci_dev *pci_dev;
struct v4l2_device v4l2_dev;
+ /* serialize PCI access in cobalt_s_bit_sysctrl() */
+ struct mutex pci_lock;
void __iomem *bar0, *bar1;
@@ -320,10 +322,13 @@ static inline u32 cobalt_g_sysctrl(struct cobalt *cobalt)
static inline void cobalt_s_bit_sysctrl(struct cobalt *cobalt,
int bit, int val)
{
- u32 ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE);
+ u32 ctrl;
+ mutex_lock(&cobalt->pci_lock);
+ ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE);
cobalt_write_bar1(cobalt, COBALT_SYS_CTRL_BASE,
(ctrl & ~(1UL << bit)) | (val << bit));
+ mutex_unlock(&cobalt->pci_lock);
}
static inline u32 cobalt_g_sysstat(struct cobalt *cobalt)
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index 11cfe35fd730..76e5a504df8c 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -930,7 +930,7 @@ static int cx18_av_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int cx18_av_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index c83814c052d3..29fb1311e443 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -357,8 +357,8 @@ static int dsp_buffer_free(struct cx88_audio_dev *chip)
cx88_alsa_dma_unmap(chip);
cx88_alsa_dma_free(chip->buf);
if (risc->cpu)
- pci_free_consistent(chip->pci, risc->size,
- risc->cpu, risc->dma);
+ dma_free_coherent(&chip->pci->dev, risc->size, risc->cpu,
+ risc->dma);
kfree(chip->buf);
chip->buf = NULL;
@@ -868,7 +868,7 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
return err;
}
- err = pci_set_dma_mask(pci, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
if (err) {
dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n", core->name);
cx88_core_put(core, pci);
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index fa4ca002ed19..d5da3bd5695d 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -685,7 +685,8 @@ static void buffer_finish(struct vb2_buffer *vb)
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
- pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
+ risc->dma);
memset(risc, 0, sizeof(*risc));
}
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 48c8a3429542..89d4d5a3ba34 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -152,7 +152,8 @@ int cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
instructions += 4;
risc->size = instructions * 8;
risc->dma = 0;
- risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
+ risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
+ GFP_KERNEL);
if (!risc->cpu)
return -ENOMEM;
@@ -190,7 +191,8 @@ int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
instructions += 3;
risc->size = instructions * 8;
risc->dma = 0;
- risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
+ risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
+ GFP_KERNEL);
if (!risc->cpu)
return -ENOMEM;
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 202ff9e8c257..2087f2491c42 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -103,7 +103,8 @@ static void buffer_finish(struct vb2_buffer *vb)
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
- pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
+ risc->dma);
memset(risc, 0, sizeof(*risc));
}
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index a3edb548afde..680e1e3fe89b 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -226,8 +226,8 @@ int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
dev->ts_packet_size, dev->ts_packet_count, 0);
if (rc) {
if (risc->cpu)
- pci_free_consistent(dev->pci, risc->size,
- risc->cpu, risc->dma);
+ dma_free_coherent(&dev->pci->dev, risc->size,
+ risc->cpu, risc->dma);
memset(risc, 0, sizeof(*risc));
return rc;
}
@@ -386,7 +386,7 @@ static int cx8802_init_common(struct cx8802_dev *dev)
if (pci_enable_device(dev->pci))
return -EIO;
pci_set_master(dev->pci);
- err = pci_set_dma_mask(dev->pci, DMA_BIT_MASK(32));
+ err = dma_set_mask(&dev->pci->dev, DMA_BIT_MASK(32));
if (err) {
pr_err("Oops: no 32bit PCI DMA ???\n");
return -EIO;
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index 58489ea0c1da..a075788c64d4 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -159,7 +159,8 @@ static void buffer_finish(struct vb2_buffer *vb)
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
- pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
+ risc->dma);
memset(risc, 0, sizeof(*risc));
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 8cffdacf6007..c17ad9f7d822 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -492,7 +492,8 @@ static void buffer_finish(struct vb2_buffer *vb)
struct cx88_riscmem *risc = &buf->risc;
if (risc->cpu)
- pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
+ risc->dma);
memset(risc, 0, sizeof(*risc));
}
@@ -1288,7 +1289,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
(unsigned long long)pci_resource_start(pci_dev, 0));
pci_set_master(pci_dev);
- err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (err) {
pr_err("Oops: no 32bit PCI DMA ???\n");
goto fail_core;
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
index e8511787c1e4..4657e99df033 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -173,14 +173,15 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
int ret;
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
- if (!adev->status.enabled)
+ if (!adev->status.enabled) {
+ acpi_dev_put(adev);
continue;
+ }
if (bridge->n_sensors >= CIO2_NUM_PORTS) {
+ acpi_dev_put(adev);
dev_err(&cio2->dev, "Exceeded available CIO2 ports\n");
- cio2_bridge_unregister_sensors(bridge);
- ret = -EINVAL;
- goto err_out;
+ return -EINVAL;
}
sensor = &bridge->sensors[bridge->n_sensors];
@@ -228,7 +229,6 @@ err_free_swnodes:
software_node_unregister_nodes(sensor->swnodes);
err_put_adev:
acpi_dev_put(sensor->adev);
-err_out:
return ret;
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
index fecef85bd62e..47db0ee0fcbf 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
@@ -975,10 +975,9 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
cio2->cur_queue = q;
atomic_set(&q->frame_sequence, 0);
- r = pm_runtime_get_sync(&cio2->pci_dev->dev);
+ r = pm_runtime_resume_and_get(&cio2->pci_dev->dev);
if (r < 0) {
dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
- pm_runtime_put_noidle(&cio2->pci_dev->dev);
return r;
}
@@ -1200,11 +1199,11 @@ static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
};
/* Initialize try_fmt */
- format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
+ format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
*format = fmt_default;
/* same as sink */
- format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
+ format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
*format = fmt_default;
return 0;
@@ -1218,7 +1217,7 @@ static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
* return -EINVAL or zero on success
*/
static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
@@ -1226,7 +1225,8 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&q->subdev_lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
else
fmt->format = q->subdev_fmt;
@@ -1243,7 +1243,7 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
@@ -1256,10 +1256,10 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
* source always propagates from sink
*/
if (fmt->pad == CIO2_PAD_SOURCE)
- return cio2_subdev_get_fmt(sd, cfg, fmt);
+ return cio2_subdev_get_fmt(sd, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
else
mbus = &q->subdev_fmt;
@@ -1284,7 +1284,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(formats))
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index c729e54692c4..e70502902b73 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -29,18 +29,6 @@ config VIDEO_IVTV
To compile this driver as a module, choose M here: the
module will be called ivtv.
-config VIDEO_IVTV_DEPRECATED_IOCTLS
- bool "enable the DVB ioctls abuse on ivtv driver"
- depends on VIDEO_IVTV
- help
- Enable the usage of the a DVB set of ioctls that were abused by
- IVTV driver for a while.
-
- Those ioctls were not needed for a long time, as IVTV implements
- the proper V4L2 ioctls since kernel 3.3.
-
- If unsure, say N.
-
config VIDEO_IVTV_ALSA
tristate "Conexant cx23415/cx23416 ALSA interface for PCM audio capture"
depends on VIDEO_IVTV && SND
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index e5efe525ad7b..4cf92dee6527 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -57,8 +57,6 @@
#include <linux/uaccess.h>
#include <asm/byteorder.h>
-#include <linux/dvb/video.h>
-#include <linux/dvb/audio.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 35dccb31174c..da19b2e95e6c 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -23,11 +23,6 @@
#include <media/i2c/saa7127.h>
#include <media/tveeprom.h>
#include <media/v4l2-event.h>
-#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
-#include <linux/compat.h>
-#include <linux/dvb/audio.h>
-#include <linux/dvb/video.h>
-#endif
u16 ivtv_service2vbi(int type)
{
@@ -1606,38 +1601,11 @@ static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder
return ivtv_video_command(itv, id, dec, true);
}
-#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
-static __inline__ void warn_deprecated_ioctl(const char *name)
-{
- pr_warn_once("warning: the %s ioctl is deprecated. Don't use it, as it will be removed soon\n",
- name);
-}
-
-#ifdef CONFIG_COMPAT
-struct compat_video_event {
- __s32 type;
- /* unused, make sure to use atomic time for y2038 if it ever gets used */
- compat_long_t timestamp;
- union {
- video_size_t size;
- unsigned int frame_rate; /* in frames per 1000sec */
- unsigned char vsync_field; /* unknown/odd/even/progressive */
- } u;
-};
-#define VIDEO_GET_EVENT32 _IOR('o', 28, struct compat_video_event)
-#endif
-
-#endif
-
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
-#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
- int nonblocking = filp->f_flags & O_NONBLOCK;
- unsigned long iarg = (unsigned long)arg;
-#endif
switch (cmd) {
case IVTV_IOC_DMA_FRAME: {
@@ -1669,169 +1637,6 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
return ivtv_passthrough_mode(itv, *(int *)arg != 0);
-#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
- case VIDEO_GET_PTS: {
- s64 *pts = arg;
- s64 frame;
-
- warn_deprecated_ioctl("VIDEO_GET_PTS");
- if (s->type < IVTV_DEC_STREAM_TYPE_MPG) {
- *pts = s->dma_pts;
- break;
- }
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
- return -EINVAL;
- return ivtv_g_pts_frame(itv, pts, &frame);
- }
-
- case VIDEO_GET_FRAME_COUNT: {
- s64 *frame = arg;
- s64 pts;
-
- warn_deprecated_ioctl("VIDEO_GET_FRAME_COUNT");
- if (s->type < IVTV_DEC_STREAM_TYPE_MPG) {
- *frame = 0;
- break;
- }
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
- return -EINVAL;
- return ivtv_g_pts_frame(itv, &pts, frame);
- }
-
- case VIDEO_PLAY: {
- struct v4l2_decoder_cmd dc;
-
- warn_deprecated_ioctl("VIDEO_PLAY");
- memset(&dc, 0, sizeof(dc));
- dc.cmd = V4L2_DEC_CMD_START;
- return ivtv_video_command(itv, id, &dc, 0);
- }
-
- case VIDEO_STOP: {
- struct v4l2_decoder_cmd dc;
-
- warn_deprecated_ioctl("VIDEO_STOP");
- memset(&dc, 0, sizeof(dc));
- dc.cmd = V4L2_DEC_CMD_STOP;
- dc.flags = V4L2_DEC_CMD_STOP_TO_BLACK | V4L2_DEC_CMD_STOP_IMMEDIATELY;
- return ivtv_video_command(itv, id, &dc, 0);
- }
-
- case VIDEO_FREEZE: {
- struct v4l2_decoder_cmd dc;
-
- warn_deprecated_ioctl("VIDEO_FREEZE");
- memset(&dc, 0, sizeof(dc));
- dc.cmd = V4L2_DEC_CMD_PAUSE;
- return ivtv_video_command(itv, id, &dc, 0);
- }
-
- case VIDEO_CONTINUE: {
- struct v4l2_decoder_cmd dc;
-
- warn_deprecated_ioctl("VIDEO_CONTINUE");
- memset(&dc, 0, sizeof(dc));
- dc.cmd = V4L2_DEC_CMD_RESUME;
- return ivtv_video_command(itv, id, &dc, 0);
- }
-
- case VIDEO_COMMAND:
- case VIDEO_TRY_COMMAND: {
- /* Note: struct v4l2_decoder_cmd has the same layout as
- struct video_command */
- struct v4l2_decoder_cmd *dc = arg;
- int try = (cmd == VIDEO_TRY_COMMAND);
-
- if (try)
- warn_deprecated_ioctl("VIDEO_TRY_COMMAND");
- else
- warn_deprecated_ioctl("VIDEO_COMMAND");
- return ivtv_video_command(itv, id, dc, try);
- }
-
-#ifdef CONFIG_COMPAT
- case VIDEO_GET_EVENT32:
-#endif
- case VIDEO_GET_EVENT: {
-#ifdef CONFIG_COMPAT
- struct compat_video_event *ev32 = arg;
-#endif
- struct video_event *ev = arg;
- DEFINE_WAIT(wait);
-
- warn_deprecated_ioctl("VIDEO_GET_EVENT");
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
- return -EINVAL;
- memset(ev, 0, sizeof(*ev));
- set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
-
- while (1) {
- if (test_and_clear_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
- ev->type = VIDEO_EVENT_DECODER_STOPPED;
- else if (test_and_clear_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) {
- unsigned char vsync_field;
-
- ev->type = VIDEO_EVENT_VSYNC;
- vsync_field = test_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags) ?
- VIDEO_VSYNC_FIELD_ODD : VIDEO_VSYNC_FIELD_EVEN;
- if (itv->output_mode == OUT_UDMA_YUV &&
- (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) ==
- IVTV_YUV_MODE_PROGRESSIVE) {
- vsync_field = VIDEO_VSYNC_FIELD_PROGRESSIVE;
- }
-#ifdef CONFIG_COMPAT
- if (cmd == VIDEO_GET_EVENT32)
- ev32->u.vsync_field = vsync_field;
- else
-#endif
- ev->u.vsync_field = vsync_field;
- }
- if (ev->type)
- return 0;
- if (nonblocking)
- return -EAGAIN;
- /* Wait for event. Note that serialize_lock is locked,
- so to allow other processes to access the driver while
- we are waiting unlock first and later lock again. */
- mutex_unlock(&itv->serialize_lock);
- prepare_to_wait(&itv->event_waitq, &wait, TASK_INTERRUPTIBLE);
- if (!test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags) &&
- !test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags))
- schedule();
- finish_wait(&itv->event_waitq, &wait);
- mutex_lock(&itv->serialize_lock);
- if (signal_pending(current)) {
- /* return if a signal was received */
- IVTV_DEBUG_INFO("User stopped wait for event\n");
- return -EINTR;
- }
- }
- break;
- }
-
- case VIDEO_SELECT_SOURCE:
- warn_deprecated_ioctl("VIDEO_SELECT_SOURCE");
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
- return -EINVAL;
- return ivtv_passthrough_mode(itv, iarg == VIDEO_SOURCE_DEMUX);
-
- case AUDIO_SET_MUTE:
- warn_deprecated_ioctl("AUDIO_SET_MUTE");
- itv->speed_mute_audio = iarg;
- return 0;
-
- case AUDIO_CHANNEL_SELECT:
- warn_deprecated_ioctl("AUDIO_CHANNEL_SELECT");
- if (iarg > AUDIO_STEREO_SWAPPED)
- return -EINVAL;
- return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg + 1);
-
- case AUDIO_BILINGUAL_CHANNEL_SELECT:
- warn_deprecated_ioctl("AUDIO_BILINGUAL_CHANNEL_SELECT");
- if (iarg > AUDIO_STEREO_SWAPPED)
- return -EINVAL;
- return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg + 1);
-#endif
default:
return -EINVAL;
}
@@ -1846,17 +1651,6 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
if (!valid_prio) {
switch (cmd) {
case IVTV_IOC_PASSTHROUGH_MODE:
-#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
- case VIDEO_PLAY:
- case VIDEO_STOP:
- case VIDEO_FREEZE:
- case VIDEO_CONTINUE:
- case VIDEO_COMMAND:
- case VIDEO_SELECT_SOURCE:
- case AUDIO_SET_MUTE:
- case AUDIO_CHANNEL_SELECT:
- case AUDIO_BILINGUAL_CHANNEL_SELECT:
-#endif
return -EBUSY;
}
}
@@ -1874,21 +1668,6 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
case IVTV_IOC_DMA_FRAME:
case IVTV_IOC_PASSTHROUGH_MODE:
-#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
- case VIDEO_GET_PTS:
- case VIDEO_GET_FRAME_COUNT:
- case VIDEO_GET_EVENT:
- case VIDEO_PLAY:
- case VIDEO_STOP:
- case VIDEO_FREEZE:
- case VIDEO_CONTINUE:
- case VIDEO_COMMAND:
- case VIDEO_TRY_COMMAND:
- case VIDEO_SELECT_SOURCE:
- case AUDIO_SET_MUTE:
- case AUDIO_CHANNEL_SELECT:
- case AUDIO_BILINGUAL_CHANNEL_SELECT:
-#endif
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index efb757d5168a..47158ab3956b 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -1031,7 +1031,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
dev->media_dev = kzalloc(sizeof(*dev->media_dev), GFP_KERNEL);
if (!dev->media_dev) {
err = -ENOMEM;
- goto fail0;
+ goto err_free_dev;
}
media_device_pci_init(dev->media_dev, pci_dev, dev->name);
dev->v4l2_dev.mdev = dev->media_dev;
@@ -1039,13 +1039,13 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err)
- goto fail0;
+ goto err_free_dev;
/* pci init */
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
- goto fail1;
+ goto err_v4l2_unregister;
}
/* pci quirks */
@@ -1095,7 +1095,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
if (err) {
pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
- goto fail1;
+ goto err_v4l2_unregister;
}
/* board config */
@@ -1129,7 +1129,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = -EBUSY;
pr_err("%s: can't get MMIO memory @ 0x%llx\n",
dev->name,(unsigned long long)pci_resource_start(pci_dev,0));
- goto fail1;
+ goto err_v4l2_unregister;
}
dev->lmmio = ioremap(pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
@@ -1138,7 +1138,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = -EIO;
pr_err("%s: can't ioremap() MMIO memory\n",
dev->name);
- goto fail2;
+ goto err_release_mem_reg;
}
/* initialize hardware #1 */
@@ -1151,7 +1151,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (err < 0) {
pr_err("%s: can't get IRQ %d\n",
dev->name,pci_dev->irq);
- goto fail3;
+ goto err_iounmap;
}
/* wait a bit, register i2c bus */
@@ -1217,7 +1217,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (err < 0) {
pr_info("%s: can't register video device\n",
dev->name);
- goto fail4;
+ goto err_unregister_video;
}
pr_info("%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
@@ -1234,7 +1234,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0)
- goto fail4;
+ goto err_unregister_video;
pr_info("%s: registered device %s\n",
dev->name, video_device_node_name(dev->vbi_dev));
@@ -1248,7 +1248,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
- goto fail4;
+ goto err_unregister_video;
pr_info("%s: registered device %s\n",
dev->name, video_device_node_name(dev->radio_dev));
}
@@ -1259,7 +1259,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = v4l2_mc_create_media_graph(dev->media_dev);
if (err) {
pr_err("failed to create media graph\n");
- goto fail4;
+ goto err_unregister_video;
}
#endif
/* everything worked */
@@ -1277,25 +1277,28 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
*/
#ifdef CONFIG_MEDIA_CONTROLLER
err = media_device_register(dev->media_dev);
- if (err)
- goto fail4;
+ if (err) {
+ media_device_cleanup(dev->media_dev);
+ goto err_unregister_video;
+ }
#endif
return 0;
- fail4:
+err_unregister_video:
saa7134_unregister_video(dev);
+ list_del(&dev->devlist);
saa7134_i2c_unregister(dev);
free_irq(pci_dev->irq, dev);
- fail3:
+err_iounmap:
saa7134_hwfini(dev);
iounmap(dev->lmmio);
- fail2:
+err_release_mem_reg:
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
- fail1:
+err_v4l2_unregister:
v4l2_device_unregister(&dev->v4l2_dev);
- fail0:
+err_free_dev:
#ifdef CONFIG_MEDIA_CONTROLLER
kfree(dev->media_dev);
#endif
@@ -1524,7 +1527,6 @@ static struct pci_driver saa7134_pci_driver = {
static int __init saa7134_init(void)
{
- INIT_LIST_HEAD(&saa7134_devlist);
pr_info("saa7130/34: v4l2 driver version %s loaded\n",
SAA7134_VERSION);
return pci_register_driver(&saa7134_pci_driver);
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 76a37fbd8458..aafbb34765b0 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -138,12 +138,15 @@ static int empress_try_fmt_vid_cap(struct file *file, void *priv,
{
struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
v4l2_fill_mbus_format(&format.format, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
- saa_call_all(dev, pad, set_fmt, &pad_cfg, &format);
+ saa_call_all(dev, pad, set_fmt, &pad_state, &format);
v4l2_fill_pix_format(&f->fmt.pix, &format.format);
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
index aa0895d2d735..9e0c442abc76 100644
--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
@@ -871,7 +871,7 @@ void saa7134_enable_i2s(struct saa7134_dev *dev)
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7133:
case PCI_DEVICE_ID_PHILIPS_SAA7135:
- /* Set I2S format (SONY)  */
+ /* Set I2S format (SONY) */
saa_writeb(SAA7133_I2S_AUDIO_CONTROL, 0x00);
/* Start I2S */
saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x11);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 0f9d6b9edb90..374c8e1087de 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1181,7 +1181,7 @@ static int video_release(struct file *file)
saa_call_all(dev, tuner, standby);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
+ saa_call_all(dev, core, command, SAA6588_CMD_CLOSE, &cmd);
mutex_unlock(&dev->lock);
return 0;
@@ -1200,7 +1200,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
cmd.result = -ENODEV;
mutex_lock(&dev->lock);
- saa_call_all(dev, core, ioctl, SAA6588_CMD_READ, &cmd);
+ saa_call_all(dev, core, command, SAA6588_CMD_READ, &cmd);
mutex_unlock(&dev->lock);
return cmd.result;
@@ -1216,7 +1216,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
cmd.event_list = wait;
cmd.poll_mask = 0;
mutex_lock(&dev->lock);
- saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd);
+ saa_call_all(dev, core, command, SAA6588_CMD_POLL, &cmd);
mutex_unlock(&dev->lock);
return rc | cmd.poll_mask;
diff --git a/drivers/media/pci/ttpci/Kconfig b/drivers/media/pci/ttpci/Kconfig
index 8a362ee9105f..65a6832a6b96 100644
--- a/drivers/media/pci/ttpci/Kconfig
+++ b/drivers/media/pci/ttpci/Kconfig
@@ -1,56 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config DVB_AV7110_IR
- bool
- depends on RC_CORE=y || RC_CORE = DVB_AV7110
- default DVB_AV7110
-
-config DVB_AV7110
- tristate "AV7110 cards"
- depends on DVB_CORE && PCI && I2C
- select TTPCI_EEPROM
- select VIDEO_SAA7146_VV
- depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV
- select DVB_VES1820 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_VES1X93 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TDA8083 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_SP8870 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_STV0297 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_L64781 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for SAA7146 and AV7110 based DVB cards as produced
- by Fujitsu-Siemens, Technotrend, Hauppauge and others.
-
- This driver only supports the fullfeatured cards with
- onboard MPEG2 decoder.
-
- This driver needs an external firmware. Please use the script
- "<kerneldir>/scripts/get_dvb_firmware av7110" to
- download/extract it, and then copy it to /usr/lib/hotplug/firmware
- or /lib/firmware (depending on configuration of firmware hotplug).
-
- Alternatively, you can download the file and use the kernel's
- EXTRA_FIRMWARE configuration option to build it into your
- kernel image by adding the filename to the EXTRA_FIRMWARE
- configuration option string.
-
- Say Y if you own such a card and want to use it.
-
-config DVB_AV7110_OSD
- bool "AV7110 OSD support"
- depends on DVB_AV7110
- default y if DVB_AV7110=y || DVB_AV7110=m
- help
- The AV7110 firmware provides some code to generate an OnScreenDisplay
- on the video output. This is kind of nonstandard and not guaranteed to
- be maintained.
-
- Anyway, some popular DVB software like VDR uses this OSD to render
- its menus, so say Y if you want to use this software.
-
- All other people say N.
-
config DVB_BUDGET_CORE
tristate "SAA7146 DVB cards (aka Budget, Nova-PCI)"
depends on DVB_CORE && PCI && I2C
@@ -136,25 +84,3 @@ config DVB_BUDGET_AV
To compile this driver as a module, choose M here: the
module will be called budget-av.
-
-config DVB_BUDGET_PATCH
- tristate "AV7110 cards with Budget Patch"
- depends on DVB_BUDGET_CORE && I2C
- depends on DVB_AV7110
- select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_VES1X93 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_TDA8083 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for Budget Patch (full TS) modification on
- SAA7146+AV7110 based cards (DVB-S cards). This
- driver doesn't use onboard MPEG2 decoder. The
- card is driven in Budget-only mode. Card is
- required to have loaded firmware to tune properly.
- Firmware can be loaded by insertion and removal of
- standard AV7110 driver prior to loading this
- driver.
-
- Say Y if you own such a card and want to use it.
-
- To compile this driver as a module, choose M here: the
- module will be called budget-patch.
diff --git a/drivers/media/pci/ttpci/Makefile b/drivers/media/pci/ttpci/Makefile
index 9b44c479fcdd..b0708f6e40cc 100644
--- a/drivers/media/pci/ttpci/Makefile
+++ b/drivers/media/pci/ttpci/Makefile
@@ -1,22 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel SAA7146 FULL TS DVB device driver
-# and the AV7110 DVB device driver
#
-dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o av7110_ipack.o dvb_filter.o
-
-ifdef CONFIG_DVB_AV7110_IR
-dvb-ttpci-objs += av7110_ir.o
-endif
-
-obj-$(CONFIG_TTPCI_EEPROM) += ttpci-eeprom.o
obj-$(CONFIG_DVB_BUDGET_CORE) += budget-core.o
obj-$(CONFIG_DVB_BUDGET) += budget.o
obj-$(CONFIG_DVB_BUDGET_AV) += budget-av.o
obj-$(CONFIG_DVB_BUDGET_CI) += budget-ci.o
-obj-$(CONFIG_DVB_BUDGET_PATCH) += budget-patch.o
-obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o
ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/common
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index d405eea5c37f..5d5796f24469 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -180,7 +180,8 @@ static void vpeirq(struct tasklet_struct *t)
u32 count;
/* Ensure streamed PCI data is synced to CPU */
- pci_dma_sync_sg_for_cpu(budget->dev->pci, budget->pt.slist, budget->pt.nents, PCI_DMA_FROMDEVICE);
+ dma_sync_sg_for_cpu(&budget->dev->pci->dev, budget->pt.slist,
+ budget->pt.nents, DMA_FROM_DEVICE);
/* nearest lower position divisible by 188 */
newdma -= newdma % 188;
diff --git a/drivers/media/pci/ttpci/budget.h b/drivers/media/pci/ttpci/budget.h
index a7463daf39f1..bd87432e6cde 100644
--- a/drivers/media/pci/ttpci/budget.h
+++ b/drivers/media/pci/ttpci/budget.h
@@ -8,7 +8,6 @@
#include <media/demux.h>
#include <media/dvb_demux.h>
#include <media/dmxdev.h>
-#include "dvb_filter.h"
#include <media/dvb_net.h>
#include <linux/module.h>
@@ -28,6 +27,7 @@ extern int budget_debug;
__func__, ##arg); \
} while (0)
+#define TS_SIZE 188
struct budget_info {
char *name;
diff --git a/drivers/media/pci/tw5864/tw5864-reg.h b/drivers/media/pci/tw5864/tw5864-reg.h
index a74f30f2f78e..a26a439c4dc0 100644
--- a/drivers/media/pci/tw5864/tw5864-reg.h
+++ b/drivers/media/pci/tw5864/tw5864-reg.h
@@ -289,13 +289,13 @@
/* OSD enable bit for each channel */
#define TW5864_DSP_OSD_ENABLE 0x0228
-/* 0x0280 ~ 0x029c – Motion Vector for 1st 4x4 Block, e.g., 80 (X), 84 (Y) */
+/* 0x0280 ~ 0x029c - Motion Vector for 1st 4x4 Block, e.g., 80 (X), 84 (Y) */
#define TW5864_ME_MV_VEC1 0x0280
-/* 0x02a0 ~ 0x02bc – Motion Vector for 2nd 4x4 Block, e.g., A0 (X), A4 (Y) */
+/* 0x02a0 ~ 0x02bc - Motion Vector for 2nd 4x4 Block, e.g., A0 (X), A4 (Y) */
#define TW5864_ME_MV_VEC2 0x02a0
-/* 0x02c0 ~ 0x02dc – Motion Vector for 3rd 4x4 Block, e.g., C0 (X), C4 (Y) */
+/* 0x02c0 ~ 0x02dc - Motion Vector for 3rd 4x4 Block, e.g., C0 (X), C4 (Y) */
#define TW5864_ME_MV_VEC3 0x02c0
-/* 0x02e0 ~ 0x02fc – Motion Vector for 4th 4x4 Block, e.g., E0 (X), E4 (Y) */
+/* 0x02e0 ~ 0x02fc - Motion Vector for 4th 4x4 Block, e.g., E0 (X), E4 (Y) */
#define TW5864_ME_MV_VEC4 0x02e0
/*
@@ -462,13 +462,13 @@
#define TW5864_VLC_BUF 0x100c
/* Define controls in register TW5864_VLC_BUF */
-/* VLC BK0 full status, write ‘1’ to clear */
+/* VLC BK0 full status, write '1' to clear */
#define TW5864_VLC_BK0_FULL BIT(0)
-/* VLC BK1 full status, write ‘1’ to clear */
+/* VLC BK1 full status, write '1' to clear */
#define TW5864_VLC_BK1_FULL BIT(1)
-/* VLC end slice status, write ‘1’ to clear */
+/* VLC end slice status, write '1' to clear */
#define TW5864_VLC_END_SLICE BIT(2)
-/* VLC Buffer overflow status, write ‘1’ to clear */
+/* VLC Buffer overflow status, write '1' to clear */
#define TW5864_DSP_RD_OF BIT(3)
/* VLC string length in either buffer 0 or 1 at end of frame */
#define TW5864_VLC_STREAM_LEN_SHIFT 4
@@ -476,7 +476,7 @@
/* [15:0] Total coefficient number in a frame */
#define TW5864_TOTAL_COEF_NO 0x1010
-/* [0] VLC Encoder Interrupt. Write ‘1’ to clear */
+/* [0] VLC Encoder Interrupt. Write '1' to clear */
#define TW5864_VLC_DSP_INTR 0x1014
/* [31:0] VLC stream CRC checksum */
#define TW5864_VLC_STREAM_CRC 0x1018
@@ -494,7 +494,7 @@
*/
#define TW5864_VLC_RD_BRST BIT(1)
-/* 0x2000 ~ 0x2ffc -- H264 Stream Memory Map */
+/* 0x2000 ~ 0x2ffc - H264 Stream Memory Map */
/*
* A word is 4 bytes. I.e.,
* VLC_STREAM_MEM[0] address: 0x2000
@@ -506,7 +506,7 @@
#define TW5864_VLC_STREAM_MEM_MAX_OFFSET 0x3ff
#define TW5864_VLC_STREAM_MEM(offset) (TW5864_VLC_STREAM_MEM_START + 4 * offset)
-/* 0x4000 ~ 0x4ffc -- Audio Register Map */
+/* 0x4000 ~ 0x4ffc - Audio Register Map */
/* [31:0] config 1ms cnt = Realtime clk/1000 */
#define TW5864_CFG_1MS_CNT 0x4000
@@ -688,10 +688,10 @@
/*
* [1:0]
- * 2’b00 phase set to 180 degree
- * 2’b01 phase set to 270 degree
- * 2’b10 phase set to 0 degree
- * 2’b11 phase set to 90 degree
+ * 2'b00 phase set to 180 degree
+ * 2'b01 phase set to 270 degree
+ * 2'b10 phase set to 0 degree
+ * 2'b11 phase set to 90 degree
*/
#define TW5864_I2C_PHASE_CFG 0x800c
@@ -826,7 +826,7 @@
/* SPLL_IREF, SPLL_LPX4, SPLL_CPX4, SPLL_PD, SPLL_DBG */
#define TW5864_SPLL 0x8028
-/* 0x8800 ~ 0x88fc -- Interrupt Register Map */
+/* 0x8800 ~ 0x88fc - Interrupt Register Map */
/*
* Trigger mode of interrupt source 0 ~ 15
* 1 Edge trigger mode
@@ -909,7 +909,7 @@
#define TW5864_INTR_I2C_DONE BIT(25)
#define TW5864_INTR_AD BIT(26)
-/* 0x9000 ~ 0x920c -- Video Capture (VIF) Register Map */
+/* 0x9000 ~ 0x920c - Video Capture (VIF) Register Map */
/*
* H264EN_CH_STATUS[n] Status of Vsync synchronized H264EN_CH_EN (Read Only)
* 1 Channel Enabled
@@ -1009,7 +1009,7 @@
/* GPIO Output Enable of Group n */
#define TW5864_GPIO_OEN (0xff << 8)
-/* 0xa000 ~ 0xa8ff – DDR Controller Register Map */
+/* 0xa000 ~ 0xa8ff - DDR Controller Register Map */
/* DDR Controller A */
/*
* [2:0] Data valid counter after read command to DDR. This is the delay value
@@ -1111,7 +1111,7 @@
*/
#define TW5864_DDR_B_OFFSET 0x0800
-/* 0xb004 ~ 0xb018 – HW version/ARB12 Register Map */
+/* 0xb004 ~ 0xb018 - HW version/ARB12 Register Map */
/* [15:0] Default is C013 */
#define TW5864_HW_VERSION 0xb004
@@ -1145,7 +1145,7 @@
/* ARB12 maximum value of time out counter (default 15"h1FF) */
#define TW5864_ARB12_TIME_OUT_CNT 0x7fff
-/* 0xb800 ~ 0xb80c -- Indirect Access Register Map */
+/* 0xb800 ~ 0xb80c - Indirect Access Register Map */
/*
* Spec says:
* In order to access the indirect register space, the following procedure is
@@ -1177,7 +1177,7 @@
/* [31:0] Data used to read/write indirect register space */
#define TW5864_IND_DATA 0xb804
-/* 0xc000 ~ 0xc7fc -- Preview Register Map */
+/* 0xc000 ~ 0xc7fc - Preview Register Map */
/* Mostly skipped this section. */
/*
* [15:0] Status of Vsync Synchronized PCI_PV_CH_EN (Read Only)
@@ -1192,12 +1192,12 @@
*/
#define TW5864_PCI_PV_CH_EN 0xc004
-/* 0xc800 ~ 0xc804 -- JPEG Capture Register Map */
+/* 0xc800 ~ 0xc804 - JPEG Capture Register Map */
/* Skipped. */
-/* 0xd000 ~ 0xd0fc -- JPEG Control Register Map */
+/* 0xd000 ~ 0xd0fc - JPEG Control Register Map */
/* Skipped. */
-/* 0xe000 ~ 0xfc04 – Motion Vector Register Map */
+/* 0xe000 ~ 0xfc04 - Motion Vector Register Map */
/* ME Motion Vector data (Four Byte Each) 0xe000 ~ 0xe7fc */
#define TW5864_ME_MV_VEC_START 0xe000
@@ -1231,7 +1231,7 @@
*/
#define TW5864_MPI_DDR_SEL2 BIT(15)
-/* 0x18000 ~ 0x181fc – PCI Master/Slave Control Map */
+/* 0x18000 ~ 0x181fc - PCI Master/Slave Control Map */
#define TW5864_PCI_INTR_STATUS 0x18000
/* Define controls in register TW5864_PCI_INTR_STATUS */
/* vlc done */
@@ -1400,11 +1400,11 @@
#define TW5864_VLC_STREAM_BASE_ADDR 0x18080
/* MV stream base address */
#define TW5864_MV_STREAM_BASE_ADDR 0x18084
-/* 0x180a0 – 0x180bc: audio burst base address. Skipped. */
-/* 0x180c0 ~ 0x180dc – JPEG Push Mode Buffer Base Address. Skipped. */
-/* 0x18100 – 0x1817c: preview burst base address. Skipped. */
+/* 0x180a0 ~ 0x180bc: audio burst base address. Skipped. */
+/* 0x180c0 ~ 0x180dc: JPEG Push Mode Buffer Base Address. Skipped. */
+/* 0x18100 ~ 0x1817c: preview burst base address. Skipped. */
-/* 0x80000 ~ 0x87fff -- DDR Burst RW Register Map */
+/* 0x80000 ~ 0x87fff - DDR Burst RW Register Map */
#define TW5864_DDR_CTL 0x80000
/* Define controls in register TW5864_DDR_CTL */
#define TW5864_BRST_LENGTH_SHIFT 2
@@ -1516,7 +1516,7 @@
* Vertical Sharpness Control. Writable.
* 0 = None (default)
* 7 = Highest
- * **Note: VSHP must be set to ‘0’ if COMB = 0
+ * **Note: VSHP must be set to '0' if COMB = 0
*/
#define TW5864_INDIR_VIN_1_VSHP 0x07
@@ -1595,7 +1595,7 @@
#define TW5864_INDIR_VIN_9_CNTRST(channel) (0x009 + channel * 0x010)
/*
- * These bits control the brightness. They have value of –128 to 127 in 2's
+ * These bits control the brightness. They have value of -128 to 127 in 2's
* complement form. Positive value increases brightness. A value 0 has no
* effect on the data. The default is 00h.
*/
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index eedc14aafb32..73ce083c2fc6 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel/
+obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel/
obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32/
diff --git a/drivers/media/platform/allegro-dvt/nal-h264.c b/drivers/media/platform/allegro-dvt/nal-h264.c
index 94dd9266d850..0ab2fcbee1b9 100644
--- a/drivers/media/platform/allegro-dvt/nal-h264.c
+++ b/drivers/media/platform/allegro-dvt/nal-h264.c
@@ -25,7 +25,7 @@
#include "nal-rbsp.h"
/*
- * See Rec. ITU-T H.264 (04/2017) Table 7-1 – NAL unit type codes, syntax
+ * See Rec. ITU-T H.264 (04/2017) Table 7-1 - NAL unit type codes, syntax
* element categories, and NAL unit type classes
*/
enum nal_unit_type {
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.c b/drivers/media/platform/allegro-dvt/nal-hevc.c
index 5db540c69bfe..15a352e45831 100644
--- a/drivers/media/platform/allegro-dvt/nal-hevc.c
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.c
@@ -25,7 +25,7 @@
#include "nal-rbsp.h"
/*
- * See Rec. ITU-T H.265 (02/2018) Table 7-1 – NAL unit type codes and NAL unit
+ * See Rec. ITU-T H.265 (02/2018) Table 7-1 - NAL unit type codes and NAL unit
* type classes
*/
enum nal_unit_type {
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 6cdc77dda0e4..1c9cb9e05fdf 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1021,7 +1021,9 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
if (ret)
return ret;
- pm_runtime_get_sync(vpfe->pdev);
+ ret = pm_runtime_resume_and_get(vpfe->pdev);
+ if (ret < 0)
+ return ret;
vpfe_config_enable(&vpfe->ccdc, 1);
@@ -2443,7 +2445,11 @@ static int vpfe_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
/* for now just enable it here instead of waiting for the open */
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ vpfe_err(vpfe, "Unable to resume device.\n");
+ goto probe_out_v4l2_unregister;
+ }
vpfe_ccdc_config_defaults(ccdc);
@@ -2530,6 +2536,11 @@ static int vpfe_suspend(struct device *dev)
/* only do full suspend if streaming has started */
if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ /*
+ * ignore RPM resume errors here, as it is already too late.
+ * A check like that should happen earlier, either at
+ * open() or just before start streaming.
+ */
pm_runtime_get_sync(dev);
vpfe_config_enable(ccdc, 1);
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
index 1850fe7f9360..99b51213f871 100644
--- a/drivers/media/platform/atmel/Kconfig
+++ b/drivers/media/platform/atmel/Kconfig
@@ -12,6 +12,17 @@ config VIDEO_ATMEL_ISC
This module makes the ATMEL Image Sensor Controller available
as a v4l2 device.
+config VIDEO_ATMEL_XISC
+ tristate "ATMEL eXtended Image Sensor Controller (XISC) support"
+ depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API
+ depends on ARCH_AT91 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP_MMIO
+ select V4L2_FWNODE
+ help
+ This module makes the ATMEL eXtended Image Sensor Controller
+ available as a v4l2 device.
+
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
depends on VIDEO_V4L2 && OF
diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile
index 2dba38994a70..c5c01556c653 100644
--- a/drivers/media/platform/atmel/Makefile
+++ b/drivers/media/platform/atmel/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o
+atmel-xisc-objs = atmel-sama7g5-isc.o atmel-isc-base.o
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
+obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index fe3ec8d0eaee..19daa49bf604 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -45,179 +45,6 @@ module_param(sensor_preferred, uint, 0644);
MODULE_PARM_DESC(sensor_preferred,
"Sensor is preferred to output the specified format (1-on 0-off), default 1");
-/* This is a list of the formats that the ISC can *output* */
-const struct isc_format controller_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_ARGB444,
- },
- {
- .fourcc = V4L2_PIX_FMT_ARGB555,
- },
- {
- .fourcc = V4L2_PIX_FMT_RGB565,
- },
- {
- .fourcc = V4L2_PIX_FMT_ABGR32,
- },
- {
- .fourcc = V4L2_PIX_FMT_XBGR32,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUV420,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUV422P,
- },
- {
- .fourcc = V4L2_PIX_FMT_GREY,
- },
- {
- .fourcc = V4L2_PIX_FMT_Y10,
- },
-};
-
-/* This is a list of formats that the ISC can receive as *input* */
-struct isc_format formats_list[] = {
- {
- .fourcc = V4L2_PIX_FMT_SBGGR8,
- .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGBRG8,
- .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_GBGB,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGRBG8,
- .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_GRGR,
- },
- {
- .fourcc = V4L2_PIX_FMT_SRGGB8,
- .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- .cfa_baycfg = ISC_BAY_CFG_RGRG,
- },
- {
- .fourcc = V4L2_PIX_FMT_SBGGR10,
- .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
- .cfa_baycfg = ISC_BAY_CFG_RGRG,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGBRG10,
- .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
- .cfa_baycfg = ISC_BAY_CFG_GBGB,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGRBG10,
- .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
- .cfa_baycfg = ISC_BAY_CFG_GRGR,
- },
- {
- .fourcc = V4L2_PIX_FMT_SRGGB10,
- .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
- .cfa_baycfg = ISC_BAY_CFG_RGRG,
- },
- {
- .fourcc = V4L2_PIX_FMT_SBGGR12,
- .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
- .cfa_baycfg = ISC_BAY_CFG_BGBG,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGBRG12,
- .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
- .cfa_baycfg = ISC_BAY_CFG_GBGB,
- },
- {
- .fourcc = V4L2_PIX_FMT_SGRBG12,
- .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
- .cfa_baycfg = ISC_BAY_CFG_GRGR,
- },
- {
- .fourcc = V4L2_PIX_FMT_SRGGB12,
- .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
- .cfa_baycfg = ISC_BAY_CFG_RGRG,
- },
- {
- .fourcc = V4L2_PIX_FMT_GREY,
- .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- },
- {
- .fourcc = V4L2_PIX_FMT_RGB565,
- .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
- },
- {
- .fourcc = V4L2_PIX_FMT_Y10,
- .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
- .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
- },
-
-};
-
-/* Gamma table with gamma 1/2.2 */
-const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
- /* 0 --> gamma 1/1.8 */
- { 0x65, 0x66002F, 0x950025, 0xBB0020, 0xDB001D, 0xF8001A,
- 0x1130018, 0x12B0017, 0x1420016, 0x1580014, 0x16D0013, 0x1810012,
- 0x1940012, 0x1A60012, 0x1B80011, 0x1C90010, 0x1DA0010, 0x1EA000F,
- 0x1FA000F, 0x209000F, 0x218000F, 0x227000E, 0x235000E, 0x243000E,
- 0x251000E, 0x25F000D, 0x26C000D, 0x279000D, 0x286000D, 0x293000C,
- 0x2A0000C, 0x2AC000C, 0x2B8000C, 0x2C4000C, 0x2D0000B, 0x2DC000B,
- 0x2E7000B, 0x2F3000B, 0x2FE000B, 0x309000B, 0x314000B, 0x31F000A,
- 0x32A000A, 0x334000B, 0x33F000A, 0x349000A, 0x354000A, 0x35E000A,
- 0x368000A, 0x372000A, 0x37C000A, 0x386000A, 0x3900009, 0x399000A,
- 0x3A30009, 0x3AD0009, 0x3B60009, 0x3BF000A, 0x3C90009, 0x3D20009,
- 0x3DB0009, 0x3E40009, 0x3ED0009, 0x3F60009 },
-
- /* 1 --> gamma 1/2 */
- { 0x7F, 0x800034, 0xB50028, 0xDE0021, 0x100001E, 0x11E001B,
- 0x1390019, 0x1520017, 0x16A0015, 0x1800014, 0x1940014, 0x1A80013,
- 0x1BB0012, 0x1CD0011, 0x1DF0010, 0x1EF0010, 0x200000F, 0x20F000F,
- 0x21F000E, 0x22D000F, 0x23C000E, 0x24A000E, 0x258000D, 0x265000D,
- 0x273000C, 0x27F000D, 0x28C000C, 0x299000C, 0x2A5000C, 0x2B1000B,
- 0x2BC000C, 0x2C8000B, 0x2D3000C, 0x2DF000B, 0x2EA000A, 0x2F5000A,
- 0x2FF000B, 0x30A000A, 0x314000B, 0x31F000A, 0x329000A, 0x333000A,
- 0x33D0009, 0x3470009, 0x350000A, 0x35A0009, 0x363000A, 0x36D0009,
- 0x3760009, 0x37F0009, 0x3880009, 0x3910009, 0x39A0009, 0x3A30009,
- 0x3AC0008, 0x3B40009, 0x3BD0008, 0x3C60008, 0x3CE0008, 0x3D60009,
- 0x3DF0008, 0x3E70008, 0x3EF0008, 0x3F70008 },
-
- /* 2 --> gamma 1/2.2 */
- { 0x99, 0x9B0038, 0xD4002A, 0xFF0023, 0x122001F, 0x141001B,
- 0x15D0019, 0x1760017, 0x18E0015, 0x1A30015, 0x1B80013, 0x1CC0012,
- 0x1DE0011, 0x1F00010, 0x2010010, 0x2110010, 0x221000F, 0x230000F,
- 0x23F000E, 0x24D000E, 0x25B000D, 0x269000C, 0x276000C, 0x283000C,
- 0x28F000C, 0x29B000C, 0x2A7000C, 0x2B3000B, 0x2BF000B, 0x2CA000B,
- 0x2D5000B, 0x2E0000A, 0x2EB000A, 0x2F5000A, 0x2FF000A, 0x30A000A,
- 0x3140009, 0x31E0009, 0x327000A, 0x3310009, 0x33A0009, 0x3440009,
- 0x34D0009, 0x3560009, 0x35F0009, 0x3680008, 0x3710008, 0x3790009,
- 0x3820008, 0x38A0008, 0x3930008, 0x39B0008, 0x3A30008, 0x3AB0008,
- 0x3B30008, 0x3BB0008, 0x3C30008, 0x3CB0007, 0x3D20008, 0x3DA0007,
- 0x3E20007, 0x3E90007, 0x3F00008, 0x3F80007 },
-};
-
#define ISC_IS_FORMAT_RAW(mbus_code) \
(((mbus_code) & 0xf000) == 0x3000)
@@ -294,9 +121,13 @@ static int isc_wait_clk_stable(struct clk_hw *hw)
static int isc_clk_prepare(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
+ int ret;
- if (isc_clk->id == ISC_ISPCK)
- pm_runtime_get_sync(isc_clk->dev);
+ if (isc_clk->id == ISC_ISPCK) {
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return ret;
+ }
return isc_wait_clk_stable(hw);
}
@@ -319,8 +150,8 @@ static int isc_clk_enable(struct clk_hw *hw)
unsigned long flags;
unsigned int status;
- dev_dbg(isc_clk->dev, "ISC CLK: %s, div = %d, parent id = %d\n",
- __func__, isc_clk->div, isc_clk->parent_id);
+ dev_dbg(isc_clk->dev, "ISC CLK: %s, id = %d, div = %d, parent id = %d\n",
+ __func__, id, isc_clk->div, isc_clk->parent_id);
spin_lock_irqsave(&isc_clk->lock, flags);
regmap_update_bits(regmap, ISC_CLKCFG,
@@ -353,9 +184,13 @@ static int isc_clk_is_enabled(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 status;
+ int ret;
- if (isc_clk->id == ISC_ISPCK)
- pm_runtime_get_sync(isc_clk->dev);
+ if (isc_clk->id == ISC_ISPCK) {
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return 0;
+ }
regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
@@ -635,16 +470,20 @@ static void isc_start_dma(struct isc_device *isc)
ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN);
addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
- regmap_write(regmap, ISC_DAD0, addr0);
+ regmap_write(regmap, ISC_DAD0 + isc->offsets.dma, addr0);
switch (isc->config.fourcc) {
case V4L2_PIX_FMT_YUV420:
- regmap_write(regmap, ISC_DAD1, addr0 + (sizeimage * 2) / 3);
- regmap_write(regmap, ISC_DAD2, addr0 + (sizeimage * 5) / 6);
+ regmap_write(regmap, ISC_DAD1 + isc->offsets.dma,
+ addr0 + (sizeimage * 2) / 3);
+ regmap_write(regmap, ISC_DAD2 + isc->offsets.dma,
+ addr0 + (sizeimage * 5) / 6);
break;
case V4L2_PIX_FMT_YUV422P:
- regmap_write(regmap, ISC_DAD1, addr0 + sizeimage / 2);
- regmap_write(regmap, ISC_DAD2, addr0 + (sizeimage * 3) / 4);
+ regmap_write(regmap, ISC_DAD1 + isc->offsets.dma,
+ addr0 + sizeimage / 2);
+ regmap_write(regmap, ISC_DAD2 + isc->offsets.dma,
+ addr0 + (sizeimage * 3) / 4);
break;
default:
break;
@@ -652,7 +491,8 @@ static void isc_start_dma(struct isc_device *isc)
dctrl_dview = isc->config.dctrl_dview;
- regmap_write(regmap, ISC_DCTRL, dctrl_dview | ISC_DCTRL_IE_IS);
+ regmap_write(regmap, ISC_DCTRL + isc->offsets.dma,
+ dctrl_dview | ISC_DCTRL_IE_IS);
spin_lock(&isc->awb_lock);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
spin_unlock(&isc->awb_lock);
@@ -683,21 +523,16 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL);
- gamma = &isc_gamma_table[ctrls->gamma_index][0];
+ gamma = &isc->gamma_table[ctrls->gamma_index][0];
regmap_bulk_write(regmap, ISC_GAM_BENTRY, gamma, GAMMA_ENTRIES);
regmap_bulk_write(regmap, ISC_GAM_GENTRY, gamma, GAMMA_ENTRIES);
regmap_bulk_write(regmap, ISC_GAM_RENTRY, gamma, GAMMA_ENTRIES);
- /* Convert RGB to YUV */
- regmap_write(regmap, ISC_CSC_YR_YG, 0x42 | (0x81 << 16));
- regmap_write(regmap, ISC_CSC_YB_OY, 0x19 | (0x10 << 16));
- regmap_write(regmap, ISC_CSC_CBR_CBG, 0xFDA | (0xFB6 << 16));
- regmap_write(regmap, ISC_CSC_CBB_OCB, 0x70 | (0x80 << 16));
- regmap_write(regmap, ISC_CSC_CRR_CRG, 0x70 | (0xFA2 << 16));
- regmap_write(regmap, ISC_CSC_CRB_OCR, 0xFEE | (0x80 << 16));
-
- regmap_write(regmap, ISC_CBC_BRIGHT, ctrls->brightness);
- regmap_write(regmap, ISC_CBC_CONTRAST, ctrls->contrast);
+ isc->config_dpc(isc);
+ isc->config_csc(isc);
+ isc->config_cbc(isc);
+ isc->config_cc(isc);
+ isc->config_gam(isc);
}
static int isc_update_profile(struct isc_device *isc)
@@ -728,12 +563,13 @@ static void isc_set_histogram(struct isc_device *isc, bool enable)
struct isc_ctrls *ctrls = &isc->ctrls;
if (enable) {
- regmap_write(regmap, ISC_HIS_CFG,
+ regmap_write(regmap, ISC_HIS_CFG + isc->offsets.his,
ISC_HIS_CFG_MODE_GR |
(isc->config.sd_format->cfa_baycfg
<< ISC_HIS_CFG_BAYSEL_SHIFT) |
ISC_HIS_CFG_RAR);
- regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_EN);
+ regmap_write(regmap, ISC_HIS_CTRL + isc->offsets.his,
+ ISC_HIS_CTRL_EN);
regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
ctrls->hist_id = ISC_HIS_CFG_MODE_GR;
isc_update_profile(isc);
@@ -742,7 +578,8 @@ static void isc_set_histogram(struct isc_device *isc, bool enable)
ctrls->hist_stat = HIST_ENABLED;
} else {
regmap_write(regmap, ISC_INTDIS, ISC_INT_HISDONE);
- regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_DIS);
+ regmap_write(regmap, ISC_HIS_CTRL + isc->offsets.his,
+ ISC_HIS_CTRL_DIS);
ctrls->hist_stat = HIST_DISABLED;
}
@@ -751,28 +588,25 @@ static void isc_set_histogram(struct isc_device *isc, bool enable)
static int isc_configure(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
- u32 pfe_cfg0, rlp_mode, dcfg, mask, pipeline;
+ u32 pfe_cfg0, dcfg, mask, pipeline;
struct isc_subdev_entity *subdev = isc->current_subdev;
pfe_cfg0 = isc->config.sd_format->pfe_cfg0_bps;
- rlp_mode = isc->config.rlp_cfg_mode;
pipeline = isc->config.bits_pipeline;
- dcfg = isc->config.dcfg_imode |
- ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
+ dcfg = isc->config.dcfg_imode | isc->dcfg;
pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW |
ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW |
ISC_PFE_CFG0_MODE_MASK | ISC_PFE_CFG0_CCIR_CRC |
- ISC_PFE_CFG0_CCIR656;
+ ISC_PFE_CFG0_CCIR656 | ISC_PFE_CFG0_MIPI;
regmap_update_bits(regmap, ISC_PFE_CFG0, mask, pfe_cfg0);
- regmap_update_bits(regmap, ISC_RLP_CFG, ISC_RLP_CFG_MODE_MASK,
- rlp_mode);
+ isc->config_rlp(isc);
- regmap_write(regmap, ISC_DCFG, dcfg);
+ regmap_write(regmap, ISC_DCFG + isc->offsets.dma, dcfg);
/* Set the pipeline */
isc_set_pipeline(isc, pipeline);
@@ -807,7 +641,12 @@ static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
goto err_start_stream;
}
- pm_runtime_get_sync(isc->dev);
+ ret = pm_runtime_resume_and_get(isc->dev);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev, "RPM resume failed in subdev %d\n",
+ ret);
+ goto err_pm_get;
+ }
ret = isc_configure(isc);
if (unlikely(ret))
@@ -838,7 +677,7 @@ static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
err_configure:
pm_runtime_put_sync(isc->dev);
-
+err_pm_get:
v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
err_start_stream:
@@ -938,7 +777,7 @@ static int isc_querycap(struct file *file, void *priv,
{
struct isc_device *isc = video_drvdata(file);
- strscpy(cap->driver, ATMEL_ISC_NAME, sizeof(cap->driver));
+ strscpy(cap->driver, "microchip-isc", sizeof(cap->driver));
strscpy(cap->card, "Atmel Image Sensor Controller", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", isc->v4l2_dev.name);
@@ -949,25 +788,25 @@ static int isc_querycap(struct file *file, void *priv,
static int isc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
+ struct isc_device *isc = video_drvdata(file);
u32 index = f->index;
u32 i, supported_index;
- if (index < ARRAY_SIZE(controller_formats)) {
- f->pixelformat = controller_formats[index].fourcc;
+ if (index < isc->controller_formats_size) {
+ f->pixelformat = isc->controller_formats[index].fourcc;
return 0;
}
- index -= ARRAY_SIZE(controller_formats);
+ index -= isc->controller_formats_size;
- i = 0;
supported_index = 0;
- for (i = 0; i < ARRAY_SIZE(formats_list); i++) {
- if (!ISC_IS_FORMAT_RAW(formats_list[i].mbus_code) ||
- !formats_list[i].sd_support)
+ for (i = 0; i < isc->formats_list_size; i++) {
+ if (!ISC_IS_FORMAT_RAW(isc->formats_list[i].mbus_code) ||
+ !isc->formats_list[i].sd_support)
continue;
if (supported_index == index) {
- f->pixelformat = formats_list[i].fourcc;
+ f->pixelformat = isc->formats_list[i].fourcc;
return 0;
}
supported_index++;
@@ -1016,6 +855,8 @@ static int isc_try_validate_formats(struct isc_device *isc)
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
ret = 0;
yuv = true;
break;
@@ -1030,6 +871,7 @@ static int isc_try_validate_formats(struct isc_device *isc)
break;
case V4L2_PIX_FMT_GREY:
case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y16:
ret = 0;
grey = true;
break;
@@ -1060,6 +902,8 @@ static int isc_try_validate_formats(struct isc_device *isc)
*/
static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
{
+ isc->try_config.rlp_cfg_mode = 0;
+
switch (isc->try_config.fourcc) {
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
@@ -1126,7 +970,19 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
isc->try_config.bpp = 16;
break;
case V4L2_PIX_FMT_YUYV:
- isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_YUYV;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_UYVY;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_VYUY;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
@@ -1137,8 +993,11 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 8;
break;
+ case V4L2_PIX_FMT_Y16:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY10 | ISC_RLP_CFG_LSH;
+ fallthrough;
case V4L2_PIX_FMT_Y10:
- isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY10;
+ isc->try_config.rlp_cfg_mode |= ISC_RLP_CFG_MODE_DATY10;
isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 16;
@@ -1172,7 +1031,8 @@ static int isc_try_configure_pipeline(struct isc_device *isc)
/* if sensor format is RAW, we convert inside ISC */
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
- WB_ENABLE | GAM_ENABLES;
+ WB_ENABLE | GAM_ENABLES | DPC_BLCENABLE |
+ CC_ENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
@@ -1181,8 +1041,9 @@ static int isc_try_configure_pipeline(struct isc_device *isc)
/* if sensor format is RAW, we convert inside ISC */
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
- CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
- SUB420_ENABLE | SUB422_ENABLE | CBC_ENABLE;
+ CSC_ENABLE | GAM_ENABLES | WB_ENABLE |
+ SUB420_ENABLE | SUB422_ENABLE | CBC_ENABLE |
+ DPC_BLCENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
@@ -1192,39 +1053,49 @@ static int isc_try_configure_pipeline(struct isc_device *isc)
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
- SUB422_ENABLE | CBC_ENABLE;
+ SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
break;
case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
/* if sensor format is RAW, we convert inside ISC */
if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
- SUB422_ENABLE | CBC_ENABLE;
+ SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
break;
case V4L2_PIX_FMT_GREY:
- if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ case V4L2_PIX_FMT_Y16:
/* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
isc->try_config.bits_pipeline = CFA_ENABLE |
CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
- CBC_ENABLE;
+ CBC_ENABLE | DPC_BLCENABLE;
} else {
isc->try_config.bits_pipeline = 0x0;
}
break;
default:
- isc->try_config.bits_pipeline = 0x0;
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
+ isc->try_config.bits_pipeline = WB_ENABLE | DPC_BLCENABLE;
+ else
+ isc->try_config.bits_pipeline = 0x0;
}
+
+ /* Tune the pipeline to product specific */
+ isc->adapt_pipeline(isc);
+
return 0;
}
static void isc_try_fse(struct isc_device *isc,
- struct v4l2_subdev_pad_config *pad_cfg)
+ struct v4l2_subdev_state *sd_state)
{
int ret;
struct v4l2_subdev_frame_size_enum fse = {};
@@ -1240,17 +1111,17 @@ static void isc_try_fse(struct isc_device *isc,
fse.which = V4L2_SUBDEV_FORMAT_TRY;
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
- pad_cfg, &fse);
+ sd_state, &fse);
/*
* Attempt to obtain format size from subdev. If not available,
* just use the maximum ISC can receive.
*/
if (ret) {
- pad_cfg->try_crop.width = ISC_MAX_SUPPORT_WIDTH;
- pad_cfg->try_crop.height = ISC_MAX_SUPPORT_HEIGHT;
+ sd_state->pads->try_crop.width = isc->max_width;
+ sd_state->pads->try_crop.height = isc->max_height;
} else {
- pad_cfg->try_crop.width = fse.max_width;
- pad_cfg->try_crop.height = fse.max_height;
+ sd_state->pads->try_crop.width = fse.max_width;
+ sd_state->pads->try_crop.height = fse.max_height;
}
}
@@ -1261,6 +1132,9 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
struct isc_format *sd_fmt = NULL, *direct_fmt = NULL;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg = {};
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1324,10 +1198,10 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
isc->try_config.sd_format = sd_fmt;
/* Limit to Atmel ISC hardware capabilities */
- if (pixfmt->width > ISC_MAX_SUPPORT_WIDTH)
- pixfmt->width = ISC_MAX_SUPPORT_WIDTH;
- if (pixfmt->height > ISC_MAX_SUPPORT_HEIGHT)
- pixfmt->height = ISC_MAX_SUPPORT_HEIGHT;
+ if (pixfmt->width > isc->max_width)
+ pixfmt->width = isc->max_width;
+ if (pixfmt->height > isc->max_height)
+ pixfmt->height = isc->max_height;
/*
* The mbus format is the one the subdev outputs.
@@ -1358,16 +1232,22 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
goto isc_try_fmt_err;
/* Obtain frame sizes if possible to have crop requirements ready */
- isc_try_fse(isc, &pad_cfg);
+ isc_try_fse(isc, &pad_state);
v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
- &pad_cfg, &format);
+ &pad_state, &format);
if (ret < 0)
goto isc_try_fmt_subdev_err;
v4l2_fill_pix_format(pixfmt, &format.format);
+ /* Limit to Atmel ISC hardware capabilities */
+ if (pixfmt->width > isc->max_width)
+ pixfmt->width = isc->max_width;
+ if (pixfmt->height > isc->max_height)
+ pixfmt->height = isc->max_height;
+
pixfmt->field = V4L2_FIELD_NONE;
pixfmt->bytesperline = (pixfmt->width * isc->try_config.bpp) >> 3;
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
@@ -1403,6 +1283,12 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
if (ret < 0)
return ret;
+ /* Limit to Atmel ISC hardware capabilities */
+ if (f->fmt.pix.width > isc->max_width)
+ f->fmt.pix.width = isc->max_width;
+ if (f->fmt.pix.height > isc->max_height)
+ f->fmt.pix.height = isc->max_height;
+
isc->fmt = *f;
if (isc->try_config.sd_format && isc->config.sd_format &&
@@ -1496,8 +1382,8 @@ static int isc_enum_framesizes(struct file *file, void *fh,
if (isc->user_formats[i]->fourcc == fsize->pixel_format)
ret = 0;
- for (i = 0; i < ARRAY_SIZE(controller_formats); i++)
- if (controller_formats[i].fourcc == fsize->pixel_format)
+ for (i = 0; i < isc->controller_formats_size; i++)
+ if (isc->controller_formats[i].fourcc == fsize->pixel_format)
ret = 0;
if (ret)
@@ -1533,8 +1419,8 @@ static int isc_enum_frameintervals(struct file *file, void *fh,
if (isc->user_formats[i]->fourcc == fival->pixel_format)
ret = 0;
- for (i = 0; i < ARRAY_SIZE(controller_formats); i++)
- if (controller_formats[i].fourcc == fival->pixel_format)
+ for (i = 0; i < isc->controller_formats_size; i++)
+ if (isc->controller_formats[i].fourcc == fival->pixel_format)
ret = 0;
if (ret)
@@ -1704,7 +1590,8 @@ static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
*min = 0;
*max = HIST_ENTRIES;
- regmap_bulk_read(regmap, ISC_HIS_ENTRY, hist_entry, HIST_ENTRIES);
+ regmap_bulk_read(regmap, ISC_HIS_ENTRY + isc->offsets.his_entry,
+ hist_entry, HIST_ENTRIES);
*hist_count = 0;
/*
@@ -1809,6 +1696,7 @@ static void isc_awb_work(struct work_struct *w)
u32 baysel;
unsigned long flags;
u32 min, max;
+ int ret;
/* streaming is not active anymore */
if (isc->stop)
@@ -1831,7 +1719,9 @@ static void isc_awb_work(struct work_struct *w)
ctrls->hist_id = hist_id;
baysel = isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
- pm_runtime_get_sync(isc->dev);
+ ret = pm_runtime_resume_and_get(isc->dev);
+ if (ret < 0)
+ return;
/*
* only update if we have all the required histograms and controls
@@ -1860,7 +1750,8 @@ static void isc_awb_work(struct work_struct *w)
ctrls->awb = ISC_WB_NONE;
}
}
- regmap_write(regmap, ISC_HIS_CFG, hist_id | baysel | ISC_HIS_CFG_RAR);
+ regmap_write(regmap, ISC_HIS_CFG + isc->offsets.his,
+ hist_id | baysel | ISC_HIS_CFG_RAR);
isc_update_profile(isc);
/* if awb has been disabled, we don't need to start another histogram */
if (ctrls->awb)
@@ -2065,12 +1956,14 @@ static int isc_ctrl_init(struct isc_device *isc)
if (ret < 0)
return ret;
+ /* Initialize product specific controls. For example, contrast */
+ isc->config_ctrls(isc, ops);
+
ctrls->brightness = 0;
- ctrls->contrast = 256;
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0);
- v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256);
- v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 2);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, isc->gamma_max, 1,
+ isc->gamma_max);
isc->awb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops,
V4L2_CID_AUTO_WHITE_BALANCE,
0, 1, 1, 1);
@@ -2138,12 +2031,13 @@ static void isc_async_unbind(struct v4l2_async_notifier *notifier,
v4l2_ctrl_handler_free(&isc->ctrls.handler);
}
-static struct isc_format *find_format_by_code(unsigned int code, int *index)
+static struct isc_format *find_format_by_code(struct isc_device *isc,
+ unsigned int code, int *index)
{
- struct isc_format *fmt = &formats_list[0];
+ struct isc_format *fmt = &isc->formats_list[0];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(formats_list); i++) {
+ for (i = 0; i < isc->formats_list_size; i++) {
if (fmt->mbus_code == code) {
*index = i;
return fmt;
@@ -2160,7 +2054,7 @@ static int isc_formats_init(struct isc_device *isc)
struct isc_format *fmt;
struct v4l2_subdev *subdev = isc->current_subdev->sd;
unsigned int num_fmts, i, j;
- u32 list_size = ARRAY_SIZE(formats_list);
+ u32 list_size = isc->formats_list_size;
struct v4l2_subdev_mbus_code_enum mbus_code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
@@ -2170,7 +2064,7 @@ static int isc_formats_init(struct isc_device *isc)
NULL, &mbus_code)) {
mbus_code.index++;
- fmt = find_format_by_code(mbus_code.code, &i);
+ fmt = find_format_by_code(isc, mbus_code.code, &i);
if (!fmt) {
v4l2_warn(&isc->v4l2_dev, "Mbus code %x not supported\n",
mbus_code.code);
@@ -2191,7 +2085,7 @@ static int isc_formats_init(struct isc_device *isc)
if (!isc->user_formats)
return -ENOMEM;
- fmt = &formats_list[0];
+ fmt = &isc->formats_list[0];
for (i = 0, j = 0; i < list_size; i++) {
if (fmt->sd_support)
isc->user_formats[j++] = fmt;
@@ -2287,7 +2181,7 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
}
/* Register video device */
- strscpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
+ strscpy(vdev->name, "microchip-isc", sizeof(vdev->name));
vdev->release = video_device_release_empty;
vdev->fops = &isc_fops;
vdev->ioctl_ops = &isc_ioctl_ops;
@@ -2338,8 +2232,14 @@ int isc_pipeline_init(struct isc_device *isc)
struct regmap_field *regs;
unsigned int i;
- /* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */
+ /*
+ * DPCEN-->GDCEN-->BLCEN-->WB-->CFA-->CC-->
+ * GAM-->VHXS-->CSC-->CBC-->SUB422-->SUB420
+ */
const struct reg_field regfields[ISC_PIPE_LINE_NODE_NUM] = {
+ REG_FIELD(ISC_DPC_CTRL, 0, 0),
+ REG_FIELD(ISC_DPC_CTRL, 1, 1),
+ REG_FIELD(ISC_DPC_CTRL, 2, 2),
REG_FIELD(ISC_WB_CTRL, 0, 0),
REG_FIELD(ISC_CFA_CTRL, 0, 0),
REG_FIELD(ISC_CC_CTRL, 0, 0),
@@ -2347,10 +2247,11 @@ int isc_pipeline_init(struct isc_device *isc)
REG_FIELD(ISC_GAM_CTRL, 1, 1),
REG_FIELD(ISC_GAM_CTRL, 2, 2),
REG_FIELD(ISC_GAM_CTRL, 3, 3),
- REG_FIELD(ISC_CSC_CTRL, 0, 0),
- REG_FIELD(ISC_CBC_CTRL, 0, 0),
- REG_FIELD(ISC_SUB422_CTRL, 0, 0),
- REG_FIELD(ISC_SUB420_CTRL, 0, 0),
+ REG_FIELD(ISC_VHXS_CTRL, 0, 0),
+ REG_FIELD(ISC_CSC_CTRL + isc->offsets.csc, 0, 0),
+ REG_FIELD(ISC_CBC_CTRL + isc->offsets.cbc, 0, 0),
+ REG_FIELD(ISC_SUB422_CTRL + isc->offsets.sub422, 0, 0),
+ REG_FIELD(ISC_SUB420_CTRL + isc->offsets.sub420, 0, 0),
};
for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
@@ -2365,7 +2266,7 @@ int isc_pipeline_init(struct isc_device *isc)
}
/* regmap configuration */
-#define ATMEL_ISC_REG_MAX 0xbfc
+#define ATMEL_ISC_REG_MAX 0xd5c
const struct regmap_config isc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index f1e160ed4351..d06b72228d4f 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -26,6 +26,7 @@
#define ISC_PFE_CFG0_PPOL_LOW BIT(2)
#define ISC_PFE_CFG0_CCIR656 BIT(9)
#define ISC_PFE_CFG0_CCIR_CRC BIT(10)
+#define ISC_PFE_CFG0_MIPI BIT(14)
#define ISC_PFE_CFG0_MODE_PROGRESSIVE (0x0 << 4)
#define ISC_PFE_CFG0_MODE_MASK GENMASK(6, 4)
@@ -90,6 +91,46 @@
#define ISC_INT_DDONE BIT(8)
#define ISC_INT_HISDONE BIT(12)
+/* ISC DPC Control Register */
+#define ISC_DPC_CTRL 0x40
+
+#define ISC_DPC_CTRL_DPCEN BIT(0)
+#define ISC_DPC_CTRL_GDCEN BIT(1)
+#define ISC_DPC_CTRL_BLCEN BIT(2)
+
+/* ISC DPC Config Register */
+#define ISC_DPC_CFG 0x44
+
+#define ISC_DPC_CFG_BAYSEL_SHIFT 0
+
+#define ISC_DPC_CFG_EITPOL BIT(4)
+
+#define ISC_DPC_CFG_TA_ENABLE BIT(14)
+#define ISC_DPC_CFG_TC_ENABLE BIT(13)
+#define ISC_DPC_CFG_TM_ENABLE BIT(12)
+
+#define ISC_DPC_CFG_RE_MODE BIT(17)
+
+#define ISC_DPC_CFG_GDCCLP_SHIFT 20
+#define ISC_DPC_CFG_GDCCLP_MASK GENMASK(22, 20)
+
+#define ISC_DPC_CFG_BLOFF_SHIFT 24
+#define ISC_DPC_CFG_BLOFF_MASK GENMASK(31, 24)
+
+#define ISC_DPC_CFG_BAYCFG_SHIFT 0
+#define ISC_DPC_CFG_BAYCFG_MASK GENMASK(1, 0)
+/* ISC DPC Threshold Median Register */
+#define ISC_DPC_THRESHM 0x48
+
+/* ISC DPC Threshold Closest Register */
+#define ISC_DPC_THRESHC 0x4C
+
+/* ISC DPC Threshold Average Register */
+#define ISC_DPC_THRESHA 0x50
+
+/* ISC DPC STatus Register */
+#define ISC_DPC_SR 0x54
+
/* ISC White Balance Control Register */
#define ISC_WB_CTRL 0x00000058
@@ -144,6 +185,8 @@
/* ISC Gamma Correction Control Register */
#define ISC_GAM_CTRL 0x00000094
+#define ISC_GAM_CTRL_BIPART BIT(4)
+
/* ISC_Gamma Correction Blue Entry Register */
#define ISC_GAM_BENTRY 0x00000098
@@ -153,6 +196,38 @@
/* ISC_Gamma Correction Green Entry Register */
#define ISC_GAM_RENTRY 0x00000298
+/* ISC VHXS Control Register */
+#define ISC_VHXS_CTRL 0x398
+
+/* ISC VHXS Source Size Register */
+#define ISC_VHXS_SS 0x39C
+
+/* ISC VHXS Destination Size Register */
+#define ISC_VHXS_DS 0x3A0
+
+/* ISC Vertical Factor Register */
+#define ISC_VXS_FACT 0x3a4
+
+/* ISC Horizontal Factor Register */
+#define ISC_HXS_FACT 0x3a8
+
+/* ISC Vertical Config Register */
+#define ISC_VXS_CFG 0x3ac
+
+/* ISC Horizontal Config Register */
+#define ISC_HXS_CFG 0x3b0
+
+/* ISC Vertical Tap Register */
+#define ISC_VXS_TAP 0x3b4
+
+/* ISC Horizontal Tap Register */
+#define ISC_HXS_TAP 0x434
+
+/* Offset for CSC register specific to sama5d2 product */
+#define ISC_SAMA5D2_CSC_OFFSET 0
+/* Offset for CSC register specific to sama7g5 product */
+#define ISC_SAMA7G5_CSC_OFFSET 0x11c
+
/* Color Space Conversion Control Register */
#define ISC_CSC_CTRL 0x00000398
@@ -174,6 +249,11 @@
/* Color Space Conversion CRB OCR Register */
#define ISC_CSC_CRB_OCR 0x000003b0
+/* Offset for CBC register specific to sama5d2 product */
+#define ISC_SAMA5D2_CBC_OFFSET 0
+/* Offset for CBC register specific to sama7g5 product */
+#define ISC_SAMA7G5_CBC_OFFSET 0x11c
+
/* Contrast And Brightness Control Register */
#define ISC_CBC_CTRL 0x000003b4
@@ -188,12 +268,30 @@
#define ISC_CBC_CONTRAST 0x000003c0
#define ISC_CBC_CONTRAST_MASK GENMASK(11, 0)
+/* Hue Register */
+#define ISC_CBCHS_HUE 0x4e0
+/* Saturation Register */
+#define ISC_CBCHS_SAT 0x4e4
+
+/* Offset for SUB422 register specific to sama5d2 product */
+#define ISC_SAMA5D2_SUB422_OFFSET 0
+/* Offset for SUB422 register specific to sama7g5 product */
+#define ISC_SAMA7G5_SUB422_OFFSET 0x124
+
/* Subsampling 4:4:4 to 4:2:2 Control Register */
#define ISC_SUB422_CTRL 0x000003c4
+/* Offset for SUB420 register specific to sama5d2 product */
+#define ISC_SAMA5D2_SUB420_OFFSET 0
+/* Offset for SUB420 register specific to sama7g5 product */
+#define ISC_SAMA7G5_SUB420_OFFSET 0x124
/* Subsampling 4:2:2 to 4:2:0 Control Register */
#define ISC_SUB420_CTRL 0x000003cc
+/* Offset for RLP register specific to sama5d2 product */
+#define ISC_SAMA5D2_RLP_OFFSET 0
+/* Offset for RLP register specific to sama7g5 product */
+#define ISC_SAMA7G5_RLP_OFFSET 0x124
/* Rounding, Limiting and Packing Configuration Register */
#define ISC_RLP_CFG 0x000003d0
@@ -210,8 +308,22 @@
#define ISC_RLP_CFG_MODE_ARGB32 0xa
#define ISC_RLP_CFG_MODE_YYCC 0xb
#define ISC_RLP_CFG_MODE_YYCC_LIMITED 0xc
+#define ISC_RLP_CFG_MODE_YCYC 0xd
#define ISC_RLP_CFG_MODE_MASK GENMASK(3, 0)
+#define ISC_RLP_CFG_LSH BIT(5)
+
+#define ISC_RLP_CFG_YMODE_YUYV (3 << 6)
+#define ISC_RLP_CFG_YMODE_YVYU (2 << 6)
+#define ISC_RLP_CFG_YMODE_VYUY (0 << 6)
+#define ISC_RLP_CFG_YMODE_UYVY (1 << 6)
+
+#define ISC_RLP_CFG_YMODE_MASK GENMASK(7, 6)
+
+/* Offset for HIS register specific to sama5d2 product */
+#define ISC_SAMA5D2_HIS_OFFSET 0
+/* Offset for HIS register specific to sama7g5 product */
+#define ISC_SAMA7G5_HIS_OFFSET 0x124
/* Histogram Control Register */
#define ISC_HIS_CTRL 0x000003d4
@@ -233,6 +345,11 @@
#define ISC_HIS_CFG_RAR BIT(8)
+/* Offset for DMA register specific to sama5d2 product */
+#define ISC_SAMA5D2_DMA_OFFSET 0
+/* Offset for DMA register specific to sama7g5 product */
+#define ISC_SAMA7G5_DMA_OFFSET 0x13c
+
/* DMA Configuration Register */
#define ISC_DCFG 0x000003e0
#define ISC_DCFG_IMODE_PACKED8 0x0
@@ -248,13 +365,15 @@
#define ISC_DCFG_YMBSIZE_BEATS4 (0x1 << 4)
#define ISC_DCFG_YMBSIZE_BEATS8 (0x2 << 4)
#define ISC_DCFG_YMBSIZE_BEATS16 (0x3 << 4)
-#define ISC_DCFG_YMBSIZE_MASK GENMASK(5, 4)
+#define ISC_DCFG_YMBSIZE_BEATS32 (0x4 << 4)
+#define ISC_DCFG_YMBSIZE_MASK GENMASK(6, 4)
#define ISC_DCFG_CMBSIZE_SINGLE (0x0 << 8)
#define ISC_DCFG_CMBSIZE_BEATS4 (0x1 << 8)
#define ISC_DCFG_CMBSIZE_BEATS8 (0x2 << 8)
#define ISC_DCFG_CMBSIZE_BEATS16 (0x3 << 8)
-#define ISC_DCFG_CMBSIZE_MASK GENMASK(9, 8)
+#define ISC_DCFG_CMBSIZE_BEATS32 (0x4 << 8)
+#define ISC_DCFG_CMBSIZE_MASK GENMASK(10, 8)
/* DMA Control Register */
#define ISC_DCTRL 0x000003e4
@@ -278,6 +397,16 @@
/* DMA Address 2 Register */
#define ISC_DAD2 0x000003fc
+/* Offset for version register specific to sama5d2 product */
+#define ISC_SAMA5D2_VERSION_OFFSET 0
+#define ISC_SAMA7G5_VERSION_OFFSET 0x13c
+/* Version Register */
+#define ISC_VERSION 0x0000040c
+
+/* Offset for version register specific to sama5d2 product */
+#define ISC_SAMA5D2_HIS_ENTRY_OFFSET 0
+/* Offset for version register specific to sama7g5 product */
+#define ISC_SAMA7G5_HIS_ENTRY_OFFSET 0x14c
/* Histogram Entry */
#define ISC_HIS_ENTRY 0x00000410
diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h
index fab8eca58d93..19cc60dfcbe0 100644
--- a/drivers/media/platform/atmel/atmel-isc.h
+++ b/drivers/media/platform/atmel/atmel-isc.h
@@ -10,9 +10,6 @@
*/
#ifndef _ATMEL_ISC_H_
-#define ISC_MAX_SUPPORT_WIDTH 2592
-#define ISC_MAX_SUPPORT_HEIGHT 1944
-
#define ISC_CLK_MAX_DIV 255
enum isc_clk_id {
@@ -71,17 +68,21 @@ struct isc_format {
};
/* Pipeline bitmap */
-#define WB_ENABLE BIT(0)
-#define CFA_ENABLE BIT(1)
-#define CC_ENABLE BIT(2)
-#define GAM_ENABLE BIT(3)
-#define GAM_BENABLE BIT(4)
-#define GAM_GENABLE BIT(5)
-#define GAM_RENABLE BIT(6)
-#define CSC_ENABLE BIT(7)
-#define CBC_ENABLE BIT(8)
-#define SUB422_ENABLE BIT(9)
-#define SUB420_ENABLE BIT(10)
+#define DPC_DPCENABLE BIT(0)
+#define DPC_GDCENABLE BIT(1)
+#define DPC_BLCENABLE BIT(2)
+#define WB_ENABLE BIT(3)
+#define CFA_ENABLE BIT(4)
+#define CC_ENABLE BIT(5)
+#define GAM_ENABLE BIT(6)
+#define GAM_BENABLE BIT(7)
+#define GAM_GENABLE BIT(8)
+#define GAM_RENABLE BIT(9)
+#define VHXS_ENABLE BIT(10)
+#define CSC_ENABLE BIT(11)
+#define CBC_ENABLE BIT(12)
+#define SUB422_ENABLE BIT(13)
+#define SUB420_ENABLE BIT(14)
#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
@@ -145,7 +146,31 @@ struct isc_ctrls {
u32 hist_minmax[HIST_BAYER][2];
};
-#define ISC_PIPE_LINE_NODE_NUM 11
+#define ISC_PIPE_LINE_NODE_NUM 15
+
+/*
+ * struct isc_reg_offsets - ISC device register offsets
+ * @csc: Offset for the CSC register
+ * @cbc: Offset for the CBC register
+ * @sub422: Offset for the SUB422 register
+ * @sub420: Offset for the SUB420 register
+ * @rlp: Offset for the RLP register
+ * @his: Offset for the HIS related registers
+ * @dma: Offset for the DMA related registers
+ * @version: Offset for the version register
+ * @his_entry: Offset for the HIS entries registers
+ */
+struct isc_reg_offsets {
+ u32 csc;
+ u32 cbc;
+ u32 sub422;
+ u32 sub420;
+ u32 rlp;
+ u32 his;
+ u32 dma;
+ u32 version;
+ u32 his_entry;
+};
/*
* struct isc_device - ISC device driver data/config struct
@@ -153,6 +178,7 @@ struct isc_ctrls {
* @hclock: Hclock clock input (refer datasheet)
* @ispck: iscpck clock (refer datasheet)
* @isc_clks: ISC clocks
+ * @dcfg: DMA master configuration, architecture dependent
*
* @dev: Registered device driver
* @v4l2_dev: v4l2 registered device
@@ -187,12 +213,46 @@ struct isc_ctrls {
*
* @current_subdev: current subdevice: the sensor
* @subdev_entities: list of subdevice entitites
+ *
+ * @gamma_table: pointer to the table with gamma values, has
+ * gamma_max sets of GAMMA_ENTRIES entries each
+ * @gamma_max: maximum number of sets of inside the gamma_table
+ *
+ * @max_width: maximum frame width, dependent on the internal RAM
+ * @max_height: maximum frame height, dependent on the internal RAM
+ *
+ * @config_dpc: pointer to a function that initializes product
+ * specific DPC module
+ * @config_csc: pointer to a function that initializes product
+ * specific CSC module
+ * @config_cbc: pointer to a function that initializes product
+ * specific CBC module
+ * @config_cc: pointer to a function that initializes product
+ * specific CC module
+ * @config_gam: pointer to a function that initializes product
+ * specific GAMMA module
+ * @config_rlp: pointer to a function that initializes product
+ * specific RLP module
+ * @config_ctrls: pointer to a functoin that initializes product
+ * specific v4l2 controls.
+ *
+ * @adapt_pipeline: pointer to a function that adapts the pipeline bits
+ * to the product specific pipeline
+ *
+ * @offsets: struct holding the product specific register offsets
+ * @controller_formats: pointer to the array of possible formats that the
+ * controller can output
+ * @formats_list: pointer to the array of possible formats that can
+ * be used as an input to the controller
+ * @controller_formats_size: size of controller_formats array
+ * @formats_list_size: size of formats_list array
*/
struct isc_device {
struct regmap *regmap;
struct clk *hclock;
struct clk *ispck;
struct isc_clk isc_clks[2];
+ u32 dcfg;
struct device *dev;
struct v4l2_device v4l2_dev;
@@ -245,16 +305,36 @@ struct isc_device {
struct v4l2_ctrl *gr_off_ctrl;
struct v4l2_ctrl *gb_off_ctrl;
};
-};
-#define GAMMA_MAX 2
#define GAMMA_ENTRIES 64
+ /* pointer to the defined gamma table */
+ const u32 (*gamma_table)[GAMMA_ENTRIES];
+ u32 gamma_max;
+
+ u32 max_width;
+ u32 max_height;
+
+ struct {
+ void (*config_dpc)(struct isc_device *isc);
+ void (*config_csc)(struct isc_device *isc);
+ void (*config_cbc)(struct isc_device *isc);
+ void (*config_cc)(struct isc_device *isc);
+ void (*config_gam)(struct isc_device *isc);
+ void (*config_rlp)(struct isc_device *isc);
-#define ATMEL_ISC_NAME "atmel-isc"
+ void (*config_ctrls)(struct isc_device *isc,
+ const struct v4l2_ctrl_ops *ops);
+
+ void (*adapt_pipeline)(struct isc_device *isc);
+ };
+
+ struct isc_reg_offsets offsets;
+ const struct isc_format *controller_formats;
+ struct isc_format *formats_list;
+ u32 controller_formats_size;
+ u32 formats_list_size;
+};
-extern struct isc_format formats_list[];
-extern const struct isc_format controller_formats[];
-extern const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES];
extern const struct regmap_config isc_regmap_config;
extern const struct v4l2_async_notifier_operations isc_async_ops;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index e392b3efe363..095d80c4f59e 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -422,7 +422,9 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
struct frame_buffer *buf, *node;
int ret;
- pm_runtime_get_sync(isi->dev);
+ ret = pm_runtime_resume_and_get(isi->dev);
+ if (ret < 0)
+ return ret;
/* Enable stream on the sub device */
ret = v4l2_subdev_call(isi->entity.subdev, video, s_stream, 1);
@@ -555,7 +557,7 @@ static const struct isi_format *find_format_by_fourcc(struct atmel_isi *isi,
}
static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt,
- struct v4l2_subdev_pad_config *pad_cfg)
+ struct v4l2_subdev_state *sd_state)
{
int ret;
struct v4l2_subdev_frame_size_enum fse = {
@@ -564,17 +566,17 @@ static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt,
};
ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size,
- pad_cfg, &fse);
+ sd_state, &fse);
/*
* Attempt to obtain format size from subdev. If not available,
* just use the maximum ISI can receive.
*/
if (ret) {
- pad_cfg->try_crop.width = MAX_SUPPORT_WIDTH;
- pad_cfg->try_crop.height = MAX_SUPPORT_HEIGHT;
+ sd_state->pads->try_crop.width = MAX_SUPPORT_WIDTH;
+ sd_state->pads->try_crop.height = MAX_SUPPORT_HEIGHT;
} else {
- pad_cfg->try_crop.width = fse.max_width;
- pad_cfg->try_crop.height = fse.max_height;
+ sd_state->pads->try_crop.width = fse.max_width;
+ sd_state->pads->try_crop.height = fse.max_height;
}
}
@@ -584,6 +586,9 @@ static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
const struct isi_format *isi_fmt;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg = {};
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -601,10 +606,10 @@ static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
v4l2_fill_mbus_format(&format.format, pixfmt, isi_fmt->mbus_code);
- isi_try_fse(isi, isi_fmt, &pad_cfg);
+ isi_try_fse(isi, isi_fmt, &pad_state);
ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt,
- &pad_cfg, &format);
+ &pad_state, &format);
if (ret < 0)
return ret;
@@ -782,9 +787,10 @@ static int isi_enum_frameintervals(struct file *file, void *fh,
return 0;
}
-static void isi_camera_set_bus_param(struct atmel_isi *isi)
+static int isi_camera_set_bus_param(struct atmel_isi *isi)
{
u32 cfg1 = 0;
+ int ret;
/* set bus param for ISI */
if (isi->pdata.hsync_act_low)
@@ -801,12 +807,16 @@ static void isi_camera_set_bus_param(struct atmel_isi *isi)
cfg1 |= ISI_CFG1_THMASK_BEATS_16;
/* Enable PM and peripheral clock before operate isi registers */
- pm_runtime_get_sync(isi->dev);
+ ret = pm_runtime_resume_and_get(isi->dev);
+ if (ret < 0)
+ return ret;
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
isi_writel(isi, ISI_CFG1, cfg1);
pm_runtime_put(isi->dev);
+
+ return 0;
}
/* -----------------------------------------------------------------------*/
@@ -1085,7 +1095,11 @@ static int isi_graph_notify_complete(struct v4l2_async_notifier *notifier)
dev_err(isi->dev, "No supported mediabus format found\n");
return ret;
}
- isi_camera_set_bus_param(isi);
+ ret = isi_camera_set_bus_param(isi);
+ if (ret) {
+ dev_err(isi->dev, "Can't wake up device\n");
+ return ret;
+ }
ret = isi_set_default_fmt(isi);
if (ret) {
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
index 61d9885765f4..925aa80a139b 100644
--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -49,10 +49,262 @@
#include "atmel-isc-regs.h"
#include "atmel-isc.h"
-#define ISC_MAX_SUPPORT_WIDTH 2592
-#define ISC_MAX_SUPPORT_HEIGHT 1944
+#define ISC_SAMA5D2_MAX_SUPPORT_WIDTH 2592
+#define ISC_SAMA5D2_MAX_SUPPORT_HEIGHT 1944
-#define ISC_CLK_MAX_DIV 255
+#define ISC_SAMA5D2_PIPELINE \
+ (WB_ENABLE | CFA_ENABLE | CC_ENABLE | GAM_ENABLES | CSC_ENABLE | \
+ CBC_ENABLE | SUB422_ENABLE | SUB420_ENABLE)
+
+/* This is a list of the formats that the ISC can *output* */
+static const struct isc_format sama5d2_controller_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ },
+};
+
+/* This is a list of formats that the ISC can receive as *input* */
+static struct isc_format sama5d2_formats_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ },
+
+};
+
+static void isc_sama5d2_config_csc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Convert RGB to YUV */
+ regmap_write(regmap, ISC_CSC_YR_YG + isc->offsets.csc,
+ 0x42 | (0x81 << 16));
+ regmap_write(regmap, ISC_CSC_YB_OY + isc->offsets.csc,
+ 0x19 | (0x10 << 16));
+ regmap_write(regmap, ISC_CSC_CBR_CBG + isc->offsets.csc,
+ 0xFDA | (0xFB6 << 16));
+ regmap_write(regmap, ISC_CSC_CBB_OCB + isc->offsets.csc,
+ 0x70 | (0x80 << 16));
+ regmap_write(regmap, ISC_CSC_CRR_CRG + isc->offsets.csc,
+ 0x70 | (0xFA2 << 16));
+ regmap_write(regmap, ISC_CSC_CRB_OCR + isc->offsets.csc,
+ 0xFEE | (0x80 << 16));
+}
+
+static void isc_sama5d2_config_cbc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ regmap_write(regmap, ISC_CBC_BRIGHT + isc->offsets.cbc,
+ isc->ctrls.brightness);
+ regmap_write(regmap, ISC_CBC_CONTRAST + isc->offsets.cbc,
+ isc->ctrls.contrast);
+}
+
+static void isc_sama5d2_config_cc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Configure each register at the neutral fixed point 1.0 or 0.0 */
+ regmap_write(regmap, ISC_CC_RR_RG, (1 << 8));
+ regmap_write(regmap, ISC_CC_RB_OR, 0);
+ regmap_write(regmap, ISC_CC_GR_GG, (1 << 8) << 16);
+ regmap_write(regmap, ISC_CC_GB_OG, 0);
+ regmap_write(regmap, ISC_CC_BR_BG, 0);
+ regmap_write(regmap, ISC_CC_BB_OB, (1 << 8));
+}
+
+static void isc_sama5d2_config_ctrls(struct isc_device *isc,
+ const struct v4l2_ctrl_ops *ops)
+{
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+
+ ctrls->contrast = 256;
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256);
+}
+
+static void isc_sama5d2_config_dpc(struct isc_device *isc)
+{
+ /* This module is not present on sama5d2 pipeline */
+}
+
+static void isc_sama5d2_config_gam(struct isc_device *isc)
+{
+ /* No specific gamma configuration */
+}
+
+static void isc_sama5d2_config_rlp(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 rlp_mode = isc->config.rlp_cfg_mode;
+
+ regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp,
+ ISC_RLP_CFG_MODE_MASK, rlp_mode);
+}
+
+static void isc_sama5d2_adapt_pipeline(struct isc_device *isc)
+{
+ isc->try_config.bits_pipeline &= ISC_SAMA5D2_PIPELINE;
+}
+
+/* Gamma table with gamma 1/2.2 */
+static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = {
+ /* 0 --> gamma 1/1.8 */
+ { 0x65, 0x66002F, 0x950025, 0xBB0020, 0xDB001D, 0xF8001A,
+ 0x1130018, 0x12B0017, 0x1420016, 0x1580014, 0x16D0013, 0x1810012,
+ 0x1940012, 0x1A60012, 0x1B80011, 0x1C90010, 0x1DA0010, 0x1EA000F,
+ 0x1FA000F, 0x209000F, 0x218000F, 0x227000E, 0x235000E, 0x243000E,
+ 0x251000E, 0x25F000D, 0x26C000D, 0x279000D, 0x286000D, 0x293000C,
+ 0x2A0000C, 0x2AC000C, 0x2B8000C, 0x2C4000C, 0x2D0000B, 0x2DC000B,
+ 0x2E7000B, 0x2F3000B, 0x2FE000B, 0x309000B, 0x314000B, 0x31F000A,
+ 0x32A000A, 0x334000B, 0x33F000A, 0x349000A, 0x354000A, 0x35E000A,
+ 0x368000A, 0x372000A, 0x37C000A, 0x386000A, 0x3900009, 0x399000A,
+ 0x3A30009, 0x3AD0009, 0x3B60009, 0x3BF000A, 0x3C90009, 0x3D20009,
+ 0x3DB0009, 0x3E40009, 0x3ED0009, 0x3F60009 },
+
+ /* 1 --> gamma 1/2 */
+ { 0x7F, 0x800034, 0xB50028, 0xDE0021, 0x100001E, 0x11E001B,
+ 0x1390019, 0x1520017, 0x16A0015, 0x1800014, 0x1940014, 0x1A80013,
+ 0x1BB0012, 0x1CD0011, 0x1DF0010, 0x1EF0010, 0x200000F, 0x20F000F,
+ 0x21F000E, 0x22D000F, 0x23C000E, 0x24A000E, 0x258000D, 0x265000D,
+ 0x273000C, 0x27F000D, 0x28C000C, 0x299000C, 0x2A5000C, 0x2B1000B,
+ 0x2BC000C, 0x2C8000B, 0x2D3000C, 0x2DF000B, 0x2EA000A, 0x2F5000A,
+ 0x2FF000B, 0x30A000A, 0x314000B, 0x31F000A, 0x329000A, 0x333000A,
+ 0x33D0009, 0x3470009, 0x350000A, 0x35A0009, 0x363000A, 0x36D0009,
+ 0x3760009, 0x37F0009, 0x3880009, 0x3910009, 0x39A0009, 0x3A30009,
+ 0x3AC0008, 0x3B40009, 0x3BD0008, 0x3C60008, 0x3CE0008, 0x3D60009,
+ 0x3DF0008, 0x3E70008, 0x3EF0008, 0x3F70008 },
+
+ /* 2 --> gamma 1/2.2 */
+ { 0x99, 0x9B0038, 0xD4002A, 0xFF0023, 0x122001F, 0x141001B,
+ 0x15D0019, 0x1760017, 0x18E0015, 0x1A30015, 0x1B80013, 0x1CC0012,
+ 0x1DE0011, 0x1F00010, 0x2010010, 0x2110010, 0x221000F, 0x230000F,
+ 0x23F000E, 0x24D000E, 0x25B000D, 0x269000C, 0x276000C, 0x283000C,
+ 0x28F000C, 0x29B000C, 0x2A7000C, 0x2B3000B, 0x2BF000B, 0x2CA000B,
+ 0x2D5000B, 0x2E0000A, 0x2EB000A, 0x2F5000A, 0x2FF000A, 0x30A000A,
+ 0x3140009, 0x31E0009, 0x327000A, 0x3310009, 0x33A0009, 0x3440009,
+ 0x34D0009, 0x3560009, 0x35F0009, 0x3680008, 0x3710008, 0x3790009,
+ 0x3820008, 0x38A0008, 0x3930008, 0x39B0008, 0x3A30008, 0x3AB0008,
+ 0x3B30008, 0x3BB0008, 0x3C30008, 0x3CB0007, 0x3D20008, 0x3DA0007,
+ 0x3E20007, 0x3E90007, 0x3F00008, 0x3F80007 },
+};
static int isc_parse_dt(struct device *dev, struct isc_device *isc)
{
@@ -118,6 +370,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
struct isc_subdev_entity *subdev_entity;
int irq;
int ret;
+ u32 ver;
isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
if (!isc)
@@ -143,13 +396,47 @@ static int atmel_isc_probe(struct platform_device *pdev)
return irq;
ret = devm_request_irq(dev, irq, isc_interrupt, 0,
- ATMEL_ISC_NAME, isc);
+ "atmel-sama5d2-isc", isc);
if (ret < 0) {
dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
irq, ret);
return ret;
}
+ isc->gamma_table = isc_sama5d2_gamma_table;
+ isc->gamma_max = 2;
+
+ isc->max_width = ISC_SAMA5D2_MAX_SUPPORT_WIDTH;
+ isc->max_height = ISC_SAMA5D2_MAX_SUPPORT_HEIGHT;
+
+ isc->config_dpc = isc_sama5d2_config_dpc;
+ isc->config_csc = isc_sama5d2_config_csc;
+ isc->config_cbc = isc_sama5d2_config_cbc;
+ isc->config_cc = isc_sama5d2_config_cc;
+ isc->config_gam = isc_sama5d2_config_gam;
+ isc->config_rlp = isc_sama5d2_config_rlp;
+ isc->config_ctrls = isc_sama5d2_config_ctrls;
+
+ isc->adapt_pipeline = isc_sama5d2_adapt_pipeline;
+
+ isc->offsets.csc = ISC_SAMA5D2_CSC_OFFSET;
+ isc->offsets.cbc = ISC_SAMA5D2_CBC_OFFSET;
+ isc->offsets.sub422 = ISC_SAMA5D2_SUB422_OFFSET;
+ isc->offsets.sub420 = ISC_SAMA5D2_SUB420_OFFSET;
+ isc->offsets.rlp = ISC_SAMA5D2_RLP_OFFSET;
+ isc->offsets.his = ISC_SAMA5D2_HIS_OFFSET;
+ isc->offsets.dma = ISC_SAMA5D2_DMA_OFFSET;
+ isc->offsets.version = ISC_SAMA5D2_VERSION_OFFSET;
+ isc->offsets.his_entry = ISC_SAMA5D2_HIS_ENTRY_OFFSET;
+
+ isc->controller_formats = sama5d2_controller_formats;
+ isc->controller_formats_size = ARRAY_SIZE(sama5d2_controller_formats);
+ isc->formats_list = sama5d2_formats_list;
+ isc->formats_list_size = ARRAY_SIZE(sama5d2_formats_list);
+
+ /* sama5d2-isc - 8 bits per beat */
+ isc->dcfg = ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
+
ret = isc_pipeline_init(isc);
if (ret)
return ret;
@@ -241,6 +528,9 @@ static int atmel_isc_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_request_idle(dev);
+ regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
+ dev_info(dev, "Microchip ISC version %x\n", ver);
+
return 0;
cleanup_subdev:
@@ -319,7 +609,7 @@ static struct platform_driver atmel_isc_driver = {
.probe = atmel_isc_probe,
.remove = atmel_isc_remove,
.driver = {
- .name = ATMEL_ISC_NAME,
+ .name = "atmel-sama5d2-isc",
.pm = &atmel_isc_dev_pm_ops,
.of_match_table = of_match_ptr(atmel_isc_of_match),
},
diff --git a/drivers/media/platform/atmel/atmel-sama7g5-isc.c b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
new file mode 100644
index 000000000000..f2785131ff56
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip eXtended Image Sensor Controller (XISC) driver
+ *
+ * Copyright (C) 2019-2021 Microchip Technology, Inc. and its subsidiaries
+ *
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ * Sensor-->PFE-->DPC-->WB-->CFA-->CC-->GAM-->VHXS-->CSC-->CBHS-->SUB-->RLP-->DMA-->HIS
+ *
+ * ISC video pipeline integrates the following submodules:
+ * PFE: Parallel Front End to sample the camera sensor input stream
+ * DPC: Defective Pixel Correction with black offset correction, green disparity
+ * correction and defective pixel correction (3 modules total)
+ * WB: Programmable white balance in the Bayer domain
+ * CFA: Color filter array interpolation module
+ * CC: Programmable color correction
+ * GAM: Gamma correction
+ *VHXS: Vertical and Horizontal Scaler
+ * CSC: Programmable color space conversion
+ *CBHS: Contrast Brightness Hue and Saturation control
+ * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
+ * RLP: This module performs rounding, range limiting
+ * and packing of the incoming data
+ * DMA: This module performs DMA master accesses to write frames to external RAM
+ * HIS: Histogram module performs statistic counters on the frames
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "atmel-isc-regs.h"
+#include "atmel-isc.h"
+
+#define ISC_SAMA7G5_MAX_SUPPORT_WIDTH 3264
+#define ISC_SAMA7G5_MAX_SUPPORT_HEIGHT 2464
+
+#define ISC_SAMA7G5_PIPELINE \
+ (WB_ENABLE | CFA_ENABLE | CC_ENABLE | GAM_ENABLES | CSC_ENABLE | \
+ CBC_ENABLE | SUB422_ENABLE | SUB420_ENABLE)
+
+/* This is a list of the formats that the ISC can *output* */
+static const struct isc_format sama7g5_controller_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y16,
+ },
+};
+
+/* This is a list of formats that the ISC can receive as *input* */
+static struct isc_format sama7g5_formats_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ },
+
+};
+
+static void isc_sama7g5_config_csc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Convert RGB to YUV */
+ regmap_write(regmap, ISC_CSC_YR_YG + isc->offsets.csc,
+ 0x42 | (0x81 << 16));
+ regmap_write(regmap, ISC_CSC_YB_OY + isc->offsets.csc,
+ 0x19 | (0x10 << 16));
+ regmap_write(regmap, ISC_CSC_CBR_CBG + isc->offsets.csc,
+ 0xFDA | (0xFB6 << 16));
+ regmap_write(regmap, ISC_CSC_CBB_OCB + isc->offsets.csc,
+ 0x70 | (0x80 << 16));
+ regmap_write(regmap, ISC_CSC_CRR_CRG + isc->offsets.csc,
+ 0x70 | (0xFA2 << 16));
+ regmap_write(regmap, ISC_CSC_CRB_OCR + isc->offsets.csc,
+ 0xFEE | (0x80 << 16));
+}
+
+static void isc_sama7g5_config_cbc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Configure what is set via v4l2 ctrls */
+ regmap_write(regmap, ISC_CBC_BRIGHT + isc->offsets.cbc, isc->ctrls.brightness);
+ regmap_write(regmap, ISC_CBC_CONTRAST + isc->offsets.cbc, isc->ctrls.contrast);
+ /* Configure Hue and Saturation as neutral midpoint */
+ regmap_write(regmap, ISC_CBCHS_HUE, 0);
+ regmap_write(regmap, ISC_CBCHS_SAT, (1 << 4));
+}
+
+static void isc_sama7g5_config_cc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Configure each register at the neutral fixed point 1.0 or 0.0 */
+ regmap_write(regmap, ISC_CC_RR_RG, (1 << 8));
+ regmap_write(regmap, ISC_CC_RB_OR, 0);
+ regmap_write(regmap, ISC_CC_GR_GG, (1 << 8) << 16);
+ regmap_write(regmap, ISC_CC_GB_OG, 0);
+ regmap_write(regmap, ISC_CC_BR_BG, 0);
+ regmap_write(regmap, ISC_CC_BB_OB, (1 << 8));
+}
+
+static void isc_sama7g5_config_ctrls(struct isc_device *isc,
+ const struct v4l2_ctrl_ops *ops)
+{
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+
+ ctrls->contrast = 16;
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 16);
+}
+
+static void isc_sama7g5_config_dpc(struct isc_device *isc)
+{
+ u32 bay_cfg = isc->config.sd_format->cfa_baycfg;
+ struct regmap *regmap = isc->regmap;
+
+ regmap_update_bits(regmap, ISC_DPC_CFG, ISC_DPC_CFG_BLOFF_MASK,
+ (64 << ISC_DPC_CFG_BLOFF_SHIFT));
+ regmap_update_bits(regmap, ISC_DPC_CFG, ISC_DPC_CFG_BAYCFG_MASK,
+ (bay_cfg << ISC_DPC_CFG_BAYCFG_SHIFT));
+}
+
+static void isc_sama7g5_config_gam(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ regmap_update_bits(regmap, ISC_GAM_CTRL, ISC_GAM_CTRL_BIPART,
+ ISC_GAM_CTRL_BIPART);
+}
+
+static void isc_sama7g5_config_rlp(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 rlp_mode = isc->config.rlp_cfg_mode;
+
+ regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp,
+ ISC_RLP_CFG_MODE_MASK | ISC_RLP_CFG_LSH |
+ ISC_RLP_CFG_YMODE_MASK, rlp_mode);
+}
+
+static void isc_sama7g5_adapt_pipeline(struct isc_device *isc)
+{
+ isc->try_config.bits_pipeline &= ISC_SAMA7G5_PIPELINE;
+}
+
+/* Gamma table with gamma 1/2.2 */
+static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = {
+ /* index 0 --> gamma bipartite */
+ {
+ 0x980, 0x4c0320, 0x650260, 0x7801e0, 0x8701a0, 0x940180,
+ 0xa00160, 0xab0120, 0xb40120, 0xbd0120, 0xc60100, 0xce0100,
+ 0xd600e0, 0xdd00e0, 0xe400e0, 0xeb00c0, 0xf100c0, 0xf700c0,
+ 0xfd00c0, 0x10300a0, 0x10800c0, 0x10e00a0, 0x11300a0, 0x11800a0,
+ 0x11d00a0, 0x12200a0, 0x12700a0, 0x12c0080, 0x13000a0, 0x1350080,
+ 0x13900a0, 0x13e0080, 0x1420076, 0x17d0062, 0x1ae0054, 0x1d8004a,
+ 0x1fd0044, 0x21f003e, 0x23e003a, 0x25b0036, 0x2760032, 0x28f0030,
+ 0x2a7002e, 0x2be002c, 0x2d4002c, 0x2ea0028, 0x2fe0028, 0x3120026,
+ 0x3250024, 0x3370024, 0x3490022, 0x35a0022, 0x36b0020, 0x37b0020,
+ 0x38b0020, 0x39b001e, 0x3aa001e, 0x3b9001c, 0x3c7001c, 0x3d5001c,
+ 0x3e3001c, 0x3f1001c, 0x3ff001a, 0x40c001a },
+};
+
+static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *epn = NULL;
+ struct isc_subdev_entity *subdev_entity;
+ unsigned int flags;
+ int ret;
+ bool mipi_mode;
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+
+ mipi_mode = of_property_read_bool(np, "microchip,mipi-mode");
+
+ while (1) {
+ struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
+
+ epn = of_graph_get_next_endpoint(np, epn);
+ if (!epn)
+ return 0;
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
+ &v4l2_epn);
+ if (ret) {
+ ret = -EINVAL;
+ dev_err(dev, "Could not parse the endpoint\n");
+ break;
+ }
+
+ subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
+ GFP_KERNEL);
+ if (!subdev_entity) {
+ ret = -ENOMEM;
+ break;
+ }
+ subdev_entity->epn = epn;
+
+ flags = v4l2_epn.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
+
+ if (v4l2_epn.bus_type == V4L2_MBUS_BT656)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
+ ISC_PFE_CFG0_CCIR656;
+
+ if (mipi_mode)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_MIPI;
+
+ list_add_tail(&subdev_entity->list, &isc->subdev_entities);
+ }
+ of_node_put(epn);
+
+ return ret;
+}
+
+static int microchip_xisc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct isc_device *isc;
+ struct resource *res;
+ void __iomem *io_base;
+ struct isc_subdev_entity *subdev_entity;
+ int irq;
+ int ret;
+ u32 ver;
+
+ isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
+ if (!isc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, isc);
+ isc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ isc->regmap = devm_regmap_init_mmio(dev, io_base, &isc_regmap_config);
+ if (IS_ERR(isc->regmap)) {
+ ret = PTR_ERR(isc->regmap);
+ dev_err(dev, "failed to init register map: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, isc_interrupt, 0,
+ "microchip-sama7g5-xisc", isc);
+ if (ret < 0) {
+ dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
+ irq, ret);
+ return ret;
+ }
+
+ isc->gamma_table = isc_sama7g5_gamma_table;
+ isc->gamma_max = 0;
+
+ isc->max_width = ISC_SAMA7G5_MAX_SUPPORT_WIDTH;
+ isc->max_height = ISC_SAMA7G5_MAX_SUPPORT_HEIGHT;
+
+ isc->config_dpc = isc_sama7g5_config_dpc;
+ isc->config_csc = isc_sama7g5_config_csc;
+ isc->config_cbc = isc_sama7g5_config_cbc;
+ isc->config_cc = isc_sama7g5_config_cc;
+ isc->config_gam = isc_sama7g5_config_gam;
+ isc->config_rlp = isc_sama7g5_config_rlp;
+ isc->config_ctrls = isc_sama7g5_config_ctrls;
+
+ isc->adapt_pipeline = isc_sama7g5_adapt_pipeline;
+
+ isc->offsets.csc = ISC_SAMA7G5_CSC_OFFSET;
+ isc->offsets.cbc = ISC_SAMA7G5_CBC_OFFSET;
+ isc->offsets.sub422 = ISC_SAMA7G5_SUB422_OFFSET;
+ isc->offsets.sub420 = ISC_SAMA7G5_SUB420_OFFSET;
+ isc->offsets.rlp = ISC_SAMA7G5_RLP_OFFSET;
+ isc->offsets.his = ISC_SAMA7G5_HIS_OFFSET;
+ isc->offsets.dma = ISC_SAMA7G5_DMA_OFFSET;
+ isc->offsets.version = ISC_SAMA7G5_VERSION_OFFSET;
+ isc->offsets.his_entry = ISC_SAMA7G5_HIS_ENTRY_OFFSET;
+
+ isc->controller_formats = sama7g5_controller_formats;
+ isc->controller_formats_size = ARRAY_SIZE(sama7g5_controller_formats);
+ isc->formats_list = sama7g5_formats_list;
+ isc->formats_list_size = ARRAY_SIZE(sama7g5_formats_list);
+
+ /* sama7g5-isc RAM access port is full AXI4 - 32 bits per beat */
+ isc->dcfg = ISC_DCFG_YMBSIZE_BEATS32 | ISC_DCFG_CMBSIZE_BEATS32;
+
+ ret = isc_pipeline_init(isc);
+ if (ret)
+ return ret;
+
+ isc->hclock = devm_clk_get(dev, "hclock");
+ if (IS_ERR(isc->hclock)) {
+ ret = PTR_ERR(isc->hclock);
+ dev_err(dev, "failed to get hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret) {
+ dev_err(dev, "failed to enable hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = isc_clk_init(isc);
+ if (ret) {
+ dev_err(dev, "failed to init isc clock: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
+ isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
+ /* ispck should be greater or equal to hclock */
+ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+ if (ret) {
+ dev_err(dev, "failed to set ispck rate: %d\n", ret);
+ goto unprepare_clk;
+ }
+
+ ret = v4l2_device_register(dev, &isc->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "unable to register v4l2 device.\n");
+ goto unprepare_clk;
+ }
+
+ ret = xisc_parse_dt(dev, isc);
+ if (ret) {
+ dev_err(dev, "fail to parse device tree\n");
+ goto unregister_v4l2_device;
+ }
+
+ if (list_empty(&isc->subdev_entities)) {
+ dev_err(dev, "no subdev found\n");
+ ret = -ENODEV;
+ goto unregister_v4l2_device;
+ }
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+ struct v4l2_async_subdev *asd;
+
+ v4l2_async_notifier_init(&subdev_entity->notifier);
+
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &subdev_entity->notifier,
+ of_fwnode_handle(subdev_entity->epn),
+ struct v4l2_async_subdev);
+
+ of_node_put(subdev_entity->epn);
+ subdev_entity->epn = NULL;
+
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto cleanup_subdev;
+ }
+
+ subdev_entity->notifier.ops = &isc_async_ops;
+
+ ret = v4l2_async_notifier_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
+ if (ret) {
+ dev_err(dev, "fail to register async notifier\n");
+ goto cleanup_subdev;
+ }
+
+ if (video_is_registered(&isc->video_dev))
+ break;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_request_idle(dev);
+
+ regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
+ dev_info(dev, "Microchip XISC version %x\n", ver);
+
+ return 0;
+
+cleanup_subdev:
+ isc_subdev_cleanup(isc);
+
+unregister_v4l2_device:
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+unprepare_hclk:
+ clk_disable_unprepare(isc->hclock);
+
+ isc_clk_cleanup(isc);
+
+ return ret;
+}
+
+static int microchip_xisc_remove(struct platform_device *pdev)
+{
+ struct isc_device *isc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ isc_subdev_cleanup(isc);
+
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ isc_clk_cleanup(isc);
+
+ return 0;
+}
+
+static int __maybe_unused xisc_runtime_suspend(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ return 0;
+}
+
+static int __maybe_unused xisc_runtime_resume(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret)
+ clk_disable_unprepare(isc->hclock);
+
+ return ret;
+}
+
+static const struct dev_pm_ops microchip_xisc_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(xisc_runtime_suspend, xisc_runtime_resume, NULL)
+};
+
+static const struct of_device_id microchip_xisc_of_match[] = {
+ { .compatible = "microchip,sama7g5-isc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, microchip_xisc_of_match);
+
+static struct platform_driver microchip_xisc_driver = {
+ .probe = microchip_xisc_probe,
+ .remove = microchip_xisc_remove,
+ .driver = {
+ .name = "microchip-sama7g5-xisc",
+ .pm = &microchip_xisc_dev_pm_ops,
+ .of_match_table = of_match_ptr(microchip_xisc_of_match),
+ },
+};
+
+module_platform_driver(microchip_xisc_driver);
+
+MODULE_AUTHOR("Eugen Hristev <eugen.hristev@microchip.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Microchip-XISC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index c68a3eac62cd..f2b4ddd31177 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -282,6 +282,7 @@ static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
struct resource *res;
unsigned char i;
u32 dev_cfg;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
csi2rx->base = devm_ioremap_resource(&pdev->dev, res);
@@ -315,7 +316,12 @@ static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
return -EINVAL;
}
- clk_prepare_enable(csi2rx->p_clk);
+ ret = clk_prepare_enable(csi2rx->p_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n");
+ return ret;
+ }
+
dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG);
clk_disable_unprepare(csi2rx->p_clk);
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index e4d08acfbb49..5a67fba73ddd 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -156,7 +156,7 @@ static const struct csi2tx_fmt *csi2tx_get_fmt_from_mbus(u32 mbus)
}
static int csi2tx_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(csi2tx_formats))
@@ -169,20 +169,20 @@ static int csi2tx_enum_mbus_code(struct v4l2_subdev *subdev,
static struct v4l2_mbus_framefmt *
__csi2tx_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csi2tx_priv *csi2tx = v4l2_subdev_to_csi2tx(subdev);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(subdev, cfg,
+ return v4l2_subdev_get_try_format(subdev, sd_state,
fmt->pad);
return &csi2tx->pad_fmts[fmt->pad];
}
static int csi2tx_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
const struct v4l2_mbus_framefmt *format;
@@ -191,7 +191,7 @@ static int csi2tx_get_pad_format(struct v4l2_subdev *subdev,
if (fmt->pad == CSI2TX_PAD_SOURCE)
return -EINVAL;
- format = __csi2tx_get_pad_format(subdev, cfg, fmt);
+ format = __csi2tx_get_pad_format(subdev, sd_state, fmt);
if (!format)
return -EINVAL;
@@ -201,7 +201,7 @@ static int csi2tx_get_pad_format(struct v4l2_subdev *subdev,
}
static int csi2tx_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
const struct v4l2_mbus_framefmt *src_format = &fmt->format;
@@ -214,7 +214,7 @@ static int csi2tx_set_pad_format(struct v4l2_subdev *subdev,
if (!csi2tx_get_fmt_from_mbus(fmt->format.code))
src_format = &fmt_default;
- dst_format = __csi2tx_get_pad_format(subdev, cfg, fmt);
+ dst_format = __csi2tx_get_pad_format(subdev, sd_state, fmt);
if (!dst_format)
return -EINVAL;
@@ -436,6 +436,7 @@ static int csi2tx_get_resources(struct csi2tx_priv *csi2tx,
struct resource *res;
unsigned int i;
u32 dev_cfg;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
csi2tx->base = devm_ioremap_resource(&pdev->dev, res);
@@ -454,7 +455,12 @@ static int csi2tx_get_resources(struct csi2tx_priv *csi2tx,
return PTR_ERR(csi2tx->esc_clk);
}
- clk_prepare_enable(csi2tx->p_clk);
+ ret = clk_prepare_enable(csi2tx->p_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't prepare and enable p_clk\n");
+ return ret;
+ }
+
dev_cfg = readl(csi2tx->base + CSI2TX_DEVICE_CONFIG_REG);
clk_disable_unprepare(csi2tx->p_clk);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index bd666c858fa1..0e312b0842d7 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1935,7 +1935,7 @@ int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
if (name && parent) {
buf->blob.data = buf->vaddr;
buf->blob.size = size;
- buf->dentry = debugfs_create_blob(name, 0644, parent,
+ buf->dentry = debugfs_create_blob(name, 0444, parent,
&buf->blob);
}
@@ -2660,7 +2660,7 @@ static int coda_open(struct file *file)
ctx->use_vdoa = false;
/* Power up and upload firmware if necessary */
- ret = pm_runtime_get_sync(dev->dev);
+ ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to power up: %d\n", ret);
goto err_pm_get;
@@ -2668,7 +2668,7 @@ static int coda_open(struct file *file)
ret = clk_prepare_enable(dev->clk_per);
if (ret)
- goto err_pm_get;
+ goto err_clk_enable;
ret = clk_prepare_enable(dev->clk_ahb);
if (ret)
@@ -2707,8 +2707,9 @@ err_ctx_init:
clk_disable_unprepare(dev->clk_ahb);
err_clk_ahb:
clk_disable_unprepare(dev->clk_per);
-err_pm_get:
+err_clk_enable:
pm_runtime_put_sync(dev->dev);
+err_pm_get:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
err_coda_name_init:
@@ -3232,7 +3233,7 @@ static int coda_probe(struct platform_device *pdev)
memset(dev->iram.vaddr, 0, dev->iram.size);
dev->iram.blob.data = dev->iram.vaddr;
dev->iram.blob.size = dev->iram.size;
- dev->iram.dentry = debugfs_create_blob("iram", 0644,
+ dev->iram.dentry = debugfs_create_blob("iram", 0444,
dev->debugfs_root,
&dev->iram.blob);
}
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index d19bad997f30..bf3c3e76b921 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -47,7 +47,7 @@ static int venc_is_second_field(struct vpbe_display *disp_dev)
ret = v4l2_subdev_call(vpbe_dev->venc,
core,
- ioctl,
+ command,
VENC_GET_FLD,
&val);
if (ret < 0) {
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index 8caa084e5704..bde241c26d79 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -521,9 +521,7 @@ static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
return ret;
}
-static long venc_ioctl(struct v4l2_subdev *sd,
- unsigned int cmd,
- void *arg)
+static long venc_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
u32 val;
@@ -542,7 +540,7 @@ static long venc_ioctl(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_core_ops venc_core_ops = {
- .ioctl = venc_ioctl,
+ .command = venc_command,
};
static const struct v4l2_subdev_video_ops venc_video_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 8d2e165bf7de..c034e25dd9aa 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -99,7 +99,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
* vpif_buffer_queue_setup : Callback function for buffer setup.
* @vq: vb2_queue ptr
* @nbuffers: ptr to number of buffers requested by application
- * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @nplanes: contains number of distinct video planes needed to hold a frame
* @sizes: contains the size (in bytes) of each plane.
* @alloc_devs: ptr to allocation context
*
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index e5f61d9b221d..59f6b782e104 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -101,7 +101,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
* vpif_buffer_queue_setup : Callback function for buffer setup.
* @vq: vb2_queue ptr
* @nbuffers: ptr to number of buffers requested by application
- * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @nplanes: contains number of distinct video planes needed to hold a frame
* @sizes: contains the size (in bytes) of each plane.
* @alloc_devs: ptr to allocation context
*
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 9f41c2e7097a..f49f3322f835 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1210,18 +1210,19 @@ static int gsc_remove(struct platform_device *pdev)
struct gsc_dev *gsc = platform_get_drvdata(pdev);
int i;
- pm_runtime_get_sync(&pdev->dev);
-
gsc_unregister_m2m_device(gsc);
v4l2_device_unregister(&gsc->v4l2_dev);
vb2_dma_contig_clear_max_seg_size(&pdev->dev);
- for (i = 0; i < gsc->num_clocks; i++)
- clk_disable_unprepare(gsc->clock[i]);
- pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ for (i = 0; i < gsc->num_clocks; i++)
+ clk_disable_unprepare(gsc->clock[i]);
+
+ pm_runtime_set_suspended(&pdev->dev);
+
dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 27a3c92c73bc..f1cf847d1cc2 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -56,10 +56,8 @@ static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct gsc_ctx *ctx = q->drv_priv;
- int ret;
- ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
- return ret > 0 ? 0 : ret;
+ return pm_runtime_resume_and_get(&ctx->gsc_dev->pdev->dev);
}
static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 13c838d3f947..7ff4024003f4 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -478,11 +478,9 @@ static int fimc_capture_open(struct file *file)
goto unlock;
set_bit(ST_CAPT_BUSY, &fimc->state);
- ret = pm_runtime_get_sync(&fimc->pdev->dev);
- if (ret < 0) {
- pm_runtime_put_sync(&fimc->pdev->dev);
+ ret = pm_runtime_resume_and_get(&fimc->pdev->dev);
+ if (ret < 0)
goto unlock;
- }
ret = v4l2_fh_open(file);
if (ret) {
@@ -1456,7 +1454,7 @@ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
}
static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct fimc_fmt *fmt;
@@ -1469,7 +1467,7 @@ static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1478,7 +1476,7 @@ static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1510,7 +1508,7 @@ static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
}
static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1533,7 +1531,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mf->colorspace = V4L2_COLORSPACE_JPEG;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
return 0;
}
@@ -1576,7 +1574,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1603,10 +1601,10 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
return 0;
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
f = &ctx->d_frame;
break;
default:
@@ -1632,7 +1630,7 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
}
static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1650,10 +1648,10 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
f = &ctx->d_frame;
break;
default:
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 972d9601d236..1b24f5bfc4af 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -828,9 +828,9 @@ static int fimc_is_probe(struct platform_device *pdev)
goto err_irq;
}
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- goto err_pm;
+ goto err_irq;
vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 612b9872afc8..83688a7982f7 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -275,7 +275,7 @@ static int isp_video_open(struct file *file)
if (ret < 0)
goto unlock;
- ret = pm_runtime_get_sync(&isp->pdev->dev);
+ ret = pm_runtime_resume_and_get(&isp->pdev->dev);
if (ret < 0)
goto rel_fh;
@@ -293,7 +293,6 @@ static int isp_video_open(struct file *file)
if (!ret)
goto unlock;
rel_fh:
- pm_runtime_put_noidle(&isp->pdev->dev);
v4l2_fh_release(file);
unlock:
mutex_unlock(&isp->video_lock);
@@ -306,17 +305,20 @@ static int isp_video_release(struct file *file)
struct fimc_is_video *ivc = &isp->video_capture;
struct media_entity *entity = &ivc->ve.vdev.entity;
struct media_device *mdev = entity->graph_obj.mdev;
+ bool is_singular_file;
mutex_lock(&isp->video_lock);
- if (v4l2_fh_is_singular_file(file) && ivc->streaming) {
+ is_singular_file = v4l2_fh_is_singular_file(file);
+
+ if (is_singular_file && ivc->streaming) {
media_pipeline_stop(entity);
ivc->streaming = 0;
}
_vb2_fop_release(file, NULL);
- if (v4l2_fh_is_singular_file(file)) {
+ if (is_singular_file) {
fimc_pipeline_call(&ivc->ve, close);
mutex_lock(&mdev->graph_mutex);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index a77c49b18511..855235bea46d 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -106,7 +106,7 @@ static const struct media_entity_operations fimc_is_subdev_media_ops = {
};
static int fimc_is_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct fimc_fmt *fmt;
@@ -119,14 +119,14 @@ static int fimc_is_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_isp *isp = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *mf = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *mf = *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
return 0;
}
@@ -156,7 +156,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
}
static void __isp_subdev_try_format(struct fimc_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *mf = &fmt->format;
@@ -172,8 +172,9 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
mf->code = MEDIA_BUS_FMT_SGRBG10_1X10;
} else {
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- format = v4l2_subdev_get_try_format(&isp->subdev, cfg,
- FIMC_ISP_SD_PAD_SINK);
+ format = v4l2_subdev_get_try_format(&isp->subdev,
+ sd_state,
+ FIMC_ISP_SD_PAD_SINK);
else
format = &isp->sink_fmt;
@@ -191,7 +192,7 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
}
static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_isp *isp = v4l2_get_subdevdata(sd);
@@ -203,10 +204,10 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
__func__, fmt->pad, mf->code, mf->width, mf->height);
mutex_lock(&isp->subdev_lock);
- __isp_subdev_try_format(isp, cfg, fmt);
+ __isp_subdev_try_format(isp, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
/* Propagate format to the source pads */
@@ -217,8 +218,10 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
for (pad = FIMC_ISP_SD_PAD_SRC_FIFO;
pad < FIMC_ISP_SD_PADS_NUM; pad++) {
format.pad = pad;
- __isp_subdev_try_format(isp, cfg, &format);
- mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ __isp_subdev_try_format(isp, sd_state,
+ &format);
+ mf = v4l2_subdev_get_try_format(sd, sd_state,
+ pad);
*mf = format.format;
}
}
@@ -230,7 +233,8 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
isp->sink_fmt = *mf;
format.pad = FIMC_ISP_SD_PAD_SRC_DMA;
- __isp_subdev_try_format(isp, cfg, &format);
+ __isp_subdev_try_format(isp, sd_state,
+ &format);
isp->src_fmt = format.format;
__is_set_frame_size(is, &isp->src_fmt);
@@ -304,11 +308,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
pr_debug("on: %d\n", on);
if (on) {
- ret = pm_runtime_get_sync(&is->pdev->dev);
- if (ret < 0) {
- pm_runtime_put(&is->pdev->dev);
+ ret = pm_runtime_resume_and_get(&is->pdev->dev);
+ if (ret < 0)
return ret;
- }
+
set_bit(IS_ST_PWR_ON, &is->state);
ret = fimc_is_start_firmware(is);
@@ -371,15 +374,18 @@ static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
.field = V4L2_FIELD_NONE,
};
- format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SINK);
+ format = v4l2_subdev_get_try_format(sd, fh->state,
+ FIMC_ISP_SD_PAD_SINK);
*format = fmt;
- format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SRC_FIFO);
+ format = v4l2_subdev_get_try_format(sd, fh->state,
+ FIMC_ISP_SD_PAD_SRC_FIFO);
fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
*format = fmt;
- format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SRC_DMA);
+ format = v4l2_subdev_get_try_format(sd, fh->state,
+ FIMC_ISP_SD_PAD_SRC_DMA);
*format = fmt;
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index fe20af3a7178..aaa3af0493ce 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -469,9 +469,9 @@ static int fimc_lite_open(struct file *file)
}
set_bit(ST_FLITE_IN_USE, &fimc->state);
- ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ ret = pm_runtime_resume_and_get(&fimc->pdev->dev);
if (ret < 0)
- goto err_pm;
+ goto err_in_use;
ret = v4l2_fh_open(file);
if (ret < 0)
@@ -499,6 +499,7 @@ static int fimc_lite_open(struct file *file)
v4l2_fh_release(file);
err_pm:
pm_runtime_put_sync(&fimc->pdev->dev);
+err_in_use:
clear_bit(ST_FLITE_IN_USE, &fimc->state);
unlock:
mutex_unlock(&fimc->lock);
@@ -549,7 +550,7 @@ static const struct v4l2_file_operations fimc_lite_fops = {
*/
static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct flite_drvdata *dd = fimc->dd;
@@ -573,14 +574,16 @@ static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
struct v4l2_rect *rect;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sink_fmt = v4l2_subdev_get_try_format(&fimc->subdev, cfg,
- FLITE_SD_PAD_SINK);
+ sink_fmt = v4l2_subdev_get_try_format(&fimc->subdev,
+ sd_state,
+ FLITE_SD_PAD_SINK);
mf->code = sink_fmt->code;
mf->colorspace = sink_fmt->colorspace;
- rect = v4l2_subdev_get_try_crop(&fimc->subdev, cfg,
- FLITE_SD_PAD_SINK);
+ rect = v4l2_subdev_get_try_crop(&fimc->subdev,
+ sd_state,
+ FLITE_SD_PAD_SINK);
} else {
mf->code = sink->fmt->mbus_code;
mf->colorspace = sink->fmt->colorspace;
@@ -1001,7 +1004,7 @@ static const struct media_entity_operations fimc_lite_subdev_media_ops = {
};
static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct fimc_fmt *fmt;
@@ -1015,16 +1018,16 @@ static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
static struct v4l2_mbus_framefmt *__fimc_lite_subdev_get_try_fmt(
struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad)
+ struct v4l2_subdev_state *sd_state, unsigned int pad)
{
if (pad != FLITE_SD_PAD_SINK)
pad = FLITE_SD_PAD_SOURCE_DMA;
- return v4l2_subdev_get_try_format(sd, cfg, pad);
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
}
static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1032,7 +1035,7 @@ static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
struct flite_frame *f = &fimc->inp_frame;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = __fimc_lite_subdev_get_try_fmt(sd, cfg, fmt->pad);
+ mf = __fimc_lite_subdev_get_try_fmt(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1055,7 +1058,7 @@ static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1077,17 +1080,18 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
return -EBUSY;
}
- ffmt = fimc_lite_subdev_try_fmt(fimc, cfg, fmt);
+ ffmt = fimc_lite_subdev_try_fmt(fimc, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *src_fmt;
- mf = __fimc_lite_subdev_get_try_fmt(sd, cfg, fmt->pad);
+ mf = __fimc_lite_subdev_get_try_fmt(sd, sd_state, fmt->pad);
*mf = fmt->format;
if (fmt->pad == FLITE_SD_PAD_SINK) {
unsigned int pad = FLITE_SD_PAD_SOURCE_DMA;
- src_fmt = __fimc_lite_subdev_get_try_fmt(sd, cfg, pad);
+ src_fmt = __fimc_lite_subdev_get_try_fmt(sd, sd_state,
+ pad);
*src_fmt = *mf;
}
@@ -1115,7 +1119,7 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1127,7 +1131,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
return 0;
}
@@ -1150,7 +1154,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1164,7 +1168,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
fimc_lite_try_crop(fimc, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_crop(sd, cfg, sel->pad) = sel->r;
+ *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad) = sel->r;
} else {
unsigned long flags;
spin_lock_irqsave(&fimc->slock, flags);
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index c9704a147e5c..df8e2aa454d8 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -73,17 +73,14 @@ static void fimc_m2m_shutdown(struct fimc_ctx *ctx)
static int start_streaming(struct vb2_queue *q, unsigned int count)
{
struct fimc_ctx *ctx = q->drv_priv;
- int ret;
- ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
- return ret > 0 ? 0 : ret;
+ return pm_runtime_resume_and_get(&ctx->fimc_dev->pdev->dev);
}
static void stop_streaming(struct vb2_queue *q)
{
struct fimc_ctx *ctx = q->drv_priv;
-
fimc_m2m_shutdown(ctx);
fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
pm_runtime_put(&ctx->fimc_dev->pdev->dev);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 13d192ba4aa6..3b8a24bb724c 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -512,11 +512,9 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
if (!fmd->pmf)
return -ENXIO;
- ret = pm_runtime_get_sync(fmd->pmf);
- if (ret < 0) {
- pm_runtime_put(fmd->pmf);
+ ret = pm_runtime_resume_and_get(fmd->pmf);
+ if (ret < 0)
return ret;
- }
fmd->num_sensors = 0;
@@ -1286,13 +1284,11 @@ static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
static int cam_clk_prepare(struct clk_hw *hw)
{
struct cam_clk *camclk = to_cam_clk(hw);
- int ret;
if (camclk->fmd->pmf == NULL)
return -ENODEV;
- ret = pm_runtime_get_sync(camclk->fmd->pmf);
- return ret < 0 ? ret : 0;
+ return pm_runtime_resume_and_get(camclk->fmd->pmf);
}
static void cam_clk_unprepare(struct clk_hw *hw)
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 1aac167abb17..32b23329b033 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -494,7 +494,7 @@ static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
struct device *dev = &state->pdev->dev;
if (on)
- return pm_runtime_get_sync(dev);
+ return pm_runtime_resume_and_get(dev);
return pm_runtime_put_sync(dev);
}
@@ -509,11 +509,9 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
if (enable) {
s5pcsis_clear_counters(state);
- ret = pm_runtime_get_sync(&state->pdev->dev);
- if (ret && ret != 1) {
- pm_runtime_put_noidle(&state->pdev->dev);
+ ret = pm_runtime_resume_and_get(&state->pdev->dev);
+ if (ret < 0)
return ret;
- }
}
mutex_lock(&state->lock);
@@ -535,11 +533,11 @@ unlock:
if (!enable)
pm_runtime_put(&state->pdev->dev);
- return ret == 1 ? 0 : ret;
+ return ret;
}
static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5pcsis_formats))
@@ -567,23 +565,25 @@ static struct csis_pix_format const *s5pcsis_try_format(
}
static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
- struct csis_state *state, struct v4l2_subdev_pad_config *cfg,
+ struct csis_state *state, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return cfg ? v4l2_subdev_get_try_format(&state->sd, cfg, 0) : NULL;
+ return sd_state ? v4l2_subdev_get_try_format(&state->sd,
+ sd_state, 0) : NULL;
return &state->format;
}
-static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5pcsis_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct csis_pix_format const *csis_fmt;
struct v4l2_mbus_framefmt *mf;
- mf = __s5pcsis_get_format(state, cfg, fmt->which);
+ mf = __s5pcsis_get_format(state, sd_state, fmt->which);
if (fmt->pad == CSIS_PAD_SOURCE) {
if (mf) {
@@ -604,13 +604,14 @@ static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
return 0;
}
-static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5pcsis_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct v4l2_mbus_framefmt *mf;
- mf = __s5pcsis_get_format(state, cfg, fmt->which);
+ mf = __s5pcsis_get_format(state, sd_state, fmt->which);
if (!mf)
return -EINVAL;
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
index 03b9264af068..755138063ee6 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
@@ -62,7 +62,7 @@
#include "mxc-jpeg-hw.h"
#include "mxc-jpeg.h"
-static struct mxc_jpeg_fmt mxc_formats[] = {
+static const struct mxc_jpeg_fmt mxc_formats[] = {
{
.name = "JPEG",
.fourcc = V4L2_PIX_FMT_JPEG,
@@ -341,7 +341,7 @@ static inline struct mxc_jpeg_ctx *mxc_jpeg_fh_to_ctx(struct v4l2_fh *fh)
return container_of(fh, struct mxc_jpeg_ctx, fh);
}
-static int enum_fmt(struct mxc_jpeg_fmt *mxc_formats, int n,
+static int enum_fmt(const struct mxc_jpeg_fmt *mxc_formats, int n,
struct v4l2_fmtdesc *f, u32 type)
{
int i, num = 0;
@@ -368,13 +368,13 @@ static int enum_fmt(struct mxc_jpeg_fmt *mxc_formats, int n,
return 0;
}
-static struct mxc_jpeg_fmt *mxc_jpeg_find_format(struct mxc_jpeg_ctx *ctx,
- u32 pixelformat)
+static const struct mxc_jpeg_fmt *mxc_jpeg_find_format(struct mxc_jpeg_ctx *ctx,
+ u32 pixelformat)
{
unsigned int k;
for (k = 0; k < MXC_JPEG_NUM_FORMATS; k++) {
- struct mxc_jpeg_fmt *fmt = &mxc_formats[k];
+ const struct mxc_jpeg_fmt *fmt = &mxc_formats[k];
if (fmt->fourcc == pixelformat)
return fmt;
@@ -1536,7 +1536,7 @@ static int mxc_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
MXC_JPEG_FMT_TYPE_RAW);
}
-static int mxc_jpeg_try_fmt(struct v4l2_format *f, struct mxc_jpeg_fmt *fmt,
+static int mxc_jpeg_try_fmt(struct v4l2_format *f, const struct mxc_jpeg_fmt *fmt,
struct mxc_jpeg_ctx *ctx, int q_type)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
@@ -1612,7 +1612,7 @@ static int mxc_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
struct device *dev = jpeg->dev;
- struct mxc_jpeg_fmt *fmt;
+ const struct mxc_jpeg_fmt *fmt;
u32 fourcc = f->fmt.pix_mp.pixelformat;
int q_type = (jpeg->mode == MXC_JPEG_DECODE) ?
@@ -1643,7 +1643,7 @@ static int mxc_jpeg_try_fmt_vid_out(struct file *file, void *priv,
struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
struct device *dev = jpeg->dev;
- struct mxc_jpeg_fmt *fmt;
+ const struct mxc_jpeg_fmt *fmt;
u32 fourcc = f->fmt.pix_mp.pixelformat;
int q_type = (jpeg->mode == MXC_JPEG_ENCODE) ?
@@ -1890,7 +1890,7 @@ static const struct v4l2_file_operations mxc_jpeg_fops = {
.mmap = v4l2_m2m_fop_mmap,
};
-static struct v4l2_m2m_ops mxc_jpeg_m2m_ops = {
+static const struct v4l2_m2m_ops mxc_jpeg_m2m_ops = {
.device_run = mxc_jpeg_device_run,
};
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/imx-jpeg/mxc-jpeg.h
index 7697de490d2e..4c210852e876 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.h
@@ -51,7 +51,7 @@ enum mxc_jpeg_mode {
* @flags: flags describing format applicability
*/
struct mxc_jpeg_fmt {
- char *name;
+ const char *name;
u32 fourcc;
enum v4l2_jpeg_chroma_subsampling subsampling;
int nc;
@@ -74,14 +74,14 @@ struct mxc_jpeg_desc {
} __packed;
struct mxc_jpeg_q_data {
- struct mxc_jpeg_fmt *fmt;
- u32 sizeimage[MXC_JPEG_MAX_PLANES];
- u32 bytesperline[MXC_JPEG_MAX_PLANES];
- int w;
- int w_adjusted;
- int h;
- int h_adjusted;
- unsigned int sequence;
+ const struct mxc_jpeg_fmt *fmt;
+ u32 sizeimage[MXC_JPEG_MAX_PLANES];
+ u32 bytesperline[MXC_JPEG_MAX_PLANES];
+ int w;
+ int w_adjusted;
+ int h;
+ int h_adjusted;
+ unsigned int sequence;
};
struct mxc_jpeg_ctx {
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index baac86f3d153..9aa374fa8b36 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -486,6 +486,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
struct cafe_camera *cam;
struct mcam_camera *mcam;
struct v4l2_async_subdev *asd;
+ struct i2c_client *i2c_dev;
/*
* Start putting together one of our big camera structures.
@@ -561,11 +562,16 @@ static int cafe_pci_probe(struct pci_dev *pdev,
clkdev_create(mcam->mclk, "xclk", "%d-%04x",
i2c_adapter_id(cam->i2c_adapter), ov7670_info.addr);
- if (!IS_ERR(i2c_new_client_device(cam->i2c_adapter, &ov7670_info))) {
- cam->registered = 1;
- return 0;
+ i2c_dev = i2c_new_client_device(cam->i2c_adapter, &ov7670_info);
+ if (IS_ERR(i2c_dev)) {
+ ret = PTR_ERR(i2c_dev);
+ goto out_mccic_shutdown;
}
+ cam->registered = 1;
+ return 0;
+
+out_mccic_shutdown:
mccic_shutdown(mcam);
out_smbus_shutdown:
cafe_smbus_shutdown(cam);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 141bf5d97a04..070a0f3fc337 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -918,6 +918,7 @@ static int mclk_enable(struct clk_hw *hw)
struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
int mclk_src;
int mclk_div;
+ int ret;
/*
* Clock the sensor appropriately. Controller clock should
@@ -931,7 +932,9 @@ static int mclk_enable(struct clk_hw *hw)
mclk_div = 2;
}
- pm_runtime_get_sync(cam->dev);
+ ret = pm_runtime_resume_and_get(cam->dev);
+ if (ret < 0)
+ return ret;
clk_enable(cam->clk[0]);
mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
mcam_ctlr_power_up(cam);
@@ -1347,6 +1350,9 @@ static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
struct mcam_format_struct *f;
struct v4l2_pix_format *pix = &fmt->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1355,7 +1361,7 @@ static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
f = mcam_find_format(pix->pixelformat);
pix->pixelformat = f->pixelformat;
v4l2_fill_mbus_format(&format.format, pix, f->mbus_code);
- ret = sensor_call(cam, pad, set_fmt, &pad_cfg, &format);
+ ret = sensor_call(cam, pad, set_fmt, &pad_state, &format);
v4l2_fill_pix_format(pix, &format.format);
pix->bytesperline = pix->width * f->bpp;
switch (f->pixelformat) {
@@ -1611,7 +1617,9 @@ static int mcam_v4l_open(struct file *filp)
ret = sensor_call(cam, core, s_power, 1);
if (ret)
goto out;
- pm_runtime_get_sync(cam->dev);
+ ret = pm_runtime_resume_and_get(cam->dev);
+ if (ret < 0)
+ goto out;
__mcam_cam_reset(cam);
mcam_set_config_needed(cam, 1);
}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index 88a23bce569d..a89c7b206eef 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -920,7 +920,7 @@ static void mtk_jpeg_enc_device_run(void *priv)
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- ret = pm_runtime_get_sync(jpeg->dev);
+ ret = pm_runtime_resume_and_get(jpeg->dev);
if (ret < 0)
goto enc_end;
@@ -973,7 +973,7 @@ static void mtk_jpeg_dec_device_run(void *priv)
return;
}
- ret = pm_runtime_get_sync(jpeg->dev);
+ ret = pm_runtime_resume_and_get(jpeg->dev);
if (ret < 0)
goto dec_end;
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index ace4528cdc5e..f14779e7596e 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -391,12 +391,12 @@ static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
struct mtk_mdp_ctx *ctx = q->drv_priv;
int ret;
- ret = pm_runtime_get_sync(&ctx->mdp_dev->pdev->dev);
+ ret = pm_runtime_resume_and_get(&ctx->mdp_dev->pdev->dev);
if (ret < 0)
- mtk_mdp_dbg(1, "[%d] pm_runtime_get_sync failed:%d",
+ mtk_mdp_dbg(1, "[%d] pm_runtime_resume_and_get failed:%d",
ctx->id, ret);
- return 0;
+ return ret;
}
static void *mtk_mdp_m2m_buf_remove(struct mtk_mdp_ctx *ctx,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 147dfef1638d..f87dc47d9e63 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -126,7 +126,9 @@ static int fops_vcodec_open(struct file *file)
mtk_vcodec_dec_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
- mtk_vcodec_dec_pw_on(&dev->pm);
+ ret = mtk_vcodec_dec_pw_on(&dev->pm);
+ if (ret < 0)
+ goto err_load_fw;
/*
* Does nothing if firmware was already loaded.
*/
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
index ddee7046ce42..6038db96f71c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -88,13 +88,15 @@ void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
put_device(dev->pm.larbvdec);
}
-void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
+int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
{
int ret;
- ret = pm_runtime_get_sync(pm->dev);
+ ret = pm_runtime_resume_and_get(pm->dev);
if (ret)
- mtk_v4l2_err("pm_runtime_get_sync fail %d", ret);
+ mtk_v4l2_err("pm_runtime_resume_and_get fail %d", ret);
+
+ return ret;
}
void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
index 872d8bf8cfaf..280aeaefdb65 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
@@ -12,7 +12,7 @@
int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
-void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
+int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index d03cca95e99b..c6c7672fecfb 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -25,7 +25,7 @@
#define MTK_V4L2_BENCHMARK 0
#define WAIT_INTR_TIMEOUT_MS 1000
-/**
+/*
* enum mtk_hw_reg_idx - MTK hw register base index
*/
enum mtk_hw_reg_idx {
@@ -49,7 +49,7 @@ enum mtk_hw_reg_idx {
NUM_MAX_VCODEC_REG_BASE
};
-/**
+/*
* enum mtk_instance_type - The type of an MTK Vcodec instance.
*/
enum mtk_instance_type {
@@ -74,7 +74,7 @@ enum mtk_instance_state {
MTK_STATE_ABORT = 4,
};
-/**
+/*
* enum mtk_encode_param - General encoding parameters type
*/
enum mtk_encode_param {
@@ -92,7 +92,7 @@ enum mtk_fmt_type {
MTK_FMT_FRAME = 2,
};
-/**
+/*
* struct mtk_video_fmt - Structure used to store information about pixelformats
*/
struct mtk_video_fmt {
@@ -102,7 +102,7 @@ struct mtk_video_fmt {
u32 flags;
};
-/**
+/*
* struct mtk_codec_framesizes - Structure used to store information about
* framesizes
*/
@@ -111,7 +111,7 @@ struct mtk_codec_framesizes {
struct v4l2_frmsize_stepwise stepwise;
};
-/**
+/*
* enum mtk_q_type - Type of queue
*/
enum mtk_q_type {
@@ -119,7 +119,7 @@ enum mtk_q_type {
MTK_Q_DATA_DST = 1,
};
-/**
+/*
* struct mtk_q_data - Structure used to store information about queue
*/
struct mtk_q_data {
@@ -168,7 +168,7 @@ struct mtk_enc_params {
unsigned int force_intra;
};
-/**
+/*
* struct mtk_vcodec_clk_info - Structure used to store clock name
*/
struct mtk_vcodec_clk_info {
@@ -176,7 +176,7 @@ struct mtk_vcodec_clk_info {
struct clk *vcodec_clk;
};
-/**
+/*
* struct mtk_vcodec_clk - Structure used to store vcodec clock information
*/
struct mtk_vcodec_clk {
@@ -184,7 +184,7 @@ struct mtk_vcodec_clk {
int clk_num;
};
-/**
+/*
* struct mtk_vcodec_pm - Power management data structure
*/
struct mtk_vcodec_pm {
@@ -255,6 +255,7 @@ struct vdec_pic_info {
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
* @quantization: enum v4l2_quantization, colorspace quantization
* @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @decoded_frame_cnt: number of decoded frames
* @lock: protect variables accessed by V4L2 threads and worker thread such as
* mtk_video_dec_buf.
*/
@@ -302,6 +303,7 @@ struct mtk_vcodec_ctx {
enum mtk_chip {
MTK_MT8173,
MTK_MT8183,
+ MTK_MT8192,
};
/**
@@ -310,7 +312,7 @@ enum mtk_chip {
* @chip: chip this encoder is compatible with
*
* @uses_ext: whether the encoder uses the extended firmware messaging format
- * @min_birate: minimum supported encoding bitrate
+ * @min_bitrate: minimum supported encoding bitrate
* @max_bitrate: maximum supported encoding bitrate
* @capture_formats: array of supported capture formats
* @num_capture_formats: number of entries in capture_formats
@@ -347,10 +349,12 @@ struct mtk_vcodec_enc_pdata {
* @curr_ctx: The context that is waiting for codec hardware
*
* @reg_base: Mapped address of MTK Vcodec registers.
+ * @venc_pdata: encoder IC-specific data
*
* @fw_handler: used to communicate with the firmware.
* @id_counter: used to identify current opened instance
*
+ * @decode_workqueue: decode work queue
* @encode_workqueue: encode work queue
*
* @int_cond: used to identify interrupt condition happen
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 4831052f475d..416f356af363 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -19,23 +19,30 @@
#define MTK_VENC_MIN_W 160U
#define MTK_VENC_MIN_H 128U
-#define MTK_VENC_MAX_W 1920U
-#define MTK_VENC_MAX_H 1088U
+#define MTK_VENC_HD_MAX_W 1920U
+#define MTK_VENC_HD_MAX_H 1088U
+#define MTK_VENC_4K_MAX_W 3840U
+#define MTK_VENC_4K_MAX_H 2176U
+
#define DFT_CFG_WIDTH MTK_VENC_MIN_W
#define DFT_CFG_HEIGHT MTK_VENC_MIN_H
#define MTK_MAX_CTRLS_HINT 20
#define MTK_DEFAULT_FRAMERATE_NUM 1001
#define MTK_DEFAULT_FRAMERATE_DENOM 30000
+#define MTK_VENC_4K_CAPABILITY_ENABLE BIT(0)
static void mtk_venc_worker(struct work_struct *work);
-static const struct v4l2_frmsize_stepwise mtk_venc_framesizes = {
- MTK_VENC_MIN_W, MTK_VENC_MAX_W, 16,
- MTK_VENC_MIN_H, MTK_VENC_MAX_H, 16,
+static const struct v4l2_frmsize_stepwise mtk_venc_hd_framesizes = {
+ MTK_VENC_MIN_W, MTK_VENC_HD_MAX_W, 16,
+ MTK_VENC_MIN_H, MTK_VENC_HD_MAX_H, 16,
};
-#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_venc_framesizes)
+static const struct v4l2_frmsize_stepwise mtk_venc_4k_framesizes = {
+ MTK_VENC_MIN_W, MTK_VENC_4K_MAX_W, 16,
+ MTK_VENC_MIN_H, MTK_VENC_4K_MAX_H, 16,
+};
static int vidioc_venc_s_ctrl(struct v4l2_ctrl *ctrl)
{
@@ -151,17 +158,22 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
const struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(fh);
if (fsize->index != 0)
return -EINVAL;
fmt = mtk_venc_find_format(fsize->pixel_format,
- fh_to_ctx(fh)->dev->venc_pdata);
+ ctx->dev->venc_pdata);
if (!fmt)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = mtk_venc_framesizes;
+
+ if (ctx->dev->enc_capability & MTK_VENC_4K_CAPABILITY_ENABLE)
+ fsize->stepwise = mtk_venc_4k_framesizes;
+ else
+ fsize->stepwise = mtk_venc_hd_framesizes;
return 0;
}
@@ -248,7 +260,7 @@ static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_ctx *ctx,
/* V4L2 specification suggests the driver corrects the format struct if any of
* the dimensions is unsupported
*/
-static int vidioc_try_fmt(struct v4l2_format *f,
+static int vidioc_try_fmt(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
@@ -260,13 +272,22 @@ static int vidioc_try_fmt(struct v4l2_format *f,
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
int tmp_w, tmp_h;
+ unsigned int max_width, max_height;
+
+ if (ctx->dev->enc_capability & MTK_VENC_4K_CAPABILITY_ENABLE) {
+ max_width = MTK_VENC_4K_MAX_W;
+ max_height = MTK_VENC_4K_MAX_H;
+ } else {
+ max_width = MTK_VENC_HD_MAX_W;
+ max_height = MTK_VENC_HD_MAX_H;
+ }
pix_fmt_mp->height = clamp(pix_fmt_mp->height,
MTK_VENC_MIN_H,
- MTK_VENC_MAX_H);
+ max_height);
pix_fmt_mp->width = clamp(pix_fmt_mp->width,
MTK_VENC_MIN_W,
- MTK_VENC_MAX_W);
+ max_width);
/* find next closer width align 16, heign align 32, size align
* 64 rectangle
@@ -275,16 +296,16 @@ static int vidioc_try_fmt(struct v4l2_format *f,
tmp_h = pix_fmt_mp->height;
v4l_bound_align_image(&pix_fmt_mp->width,
MTK_VENC_MIN_W,
- MTK_VENC_MAX_W, 4,
+ max_width, 4,
&pix_fmt_mp->height,
MTK_VENC_MIN_H,
- MTK_VENC_MAX_H, 5, 6);
+ max_height, 5, 6);
if (pix_fmt_mp->width < tmp_w &&
- (pix_fmt_mp->width + 16) <= MTK_VENC_MAX_W)
+ (pix_fmt_mp->width + 16) <= max_width)
pix_fmt_mp->width += 16;
if (pix_fmt_mp->height < tmp_h &&
- (pix_fmt_mp->height + 32) <= MTK_VENC_MAX_H)
+ (pix_fmt_mp->height + 32) <= max_height)
pix_fmt_mp->height += 32;
mtk_v4l2_debug(0,
@@ -405,7 +426,7 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
}
q_data->fmt = fmt;
- ret = vidioc_try_fmt(f, q_data->fmt);
+ ret = vidioc_try_fmt(ctx, f, q_data->fmt);
if (ret)
return ret;
@@ -443,7 +464,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct mtk_q_data *q_data;
int ret, i;
const struct mtk_video_fmt *fmt;
- struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq) {
@@ -468,20 +488,13 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
f->fmt.pix.pixelformat = fmt->fourcc;
}
- pix_fmt_mp->height = clamp(pix_fmt_mp->height,
- MTK_VENC_MIN_H,
- MTK_VENC_MAX_H);
- pix_fmt_mp->width = clamp(pix_fmt_mp->width,
- MTK_VENC_MIN_W,
- MTK_VENC_MAX_W);
-
- q_data->visible_width = f->fmt.pix_mp.width;
- q_data->visible_height = f->fmt.pix_mp.height;
- q_data->fmt = fmt;
- ret = vidioc_try_fmt(f, q_data->fmt);
+ ret = vidioc_try_fmt(ctx, f, fmt);
if (ret)
return ret;
+ q_data->fmt = fmt;
+ q_data->visible_width = f->fmt.pix_mp.width;
+ q_data->visible_height = f->fmt.pix_mp.height;
q_data->coded_width = f->fmt.pix_mp.width;
q_data->coded_height = f->fmt.pix_mp.height;
@@ -553,7 +566,7 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
f->fmt.pix_mp.quantization = ctx->quantization;
f->fmt.pix_mp.xfer_func = ctx->xfer_func;
- return vidioc_try_fmt(f, fmt);
+ return vidioc_try_fmt(ctx, f, fmt);
}
static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
@@ -575,7 +588,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
- return vidioc_try_fmt(f, fmt);
+ return vidioc_try_fmt(ctx, f, fmt);
}
static int vidioc_venc_g_selection(struct file *file, void *priv,
@@ -1179,16 +1192,16 @@ void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
v4l_bound_align_image(&q_data->coded_width,
MTK_VENC_MIN_W,
- MTK_VENC_MAX_W, 4,
+ MTK_VENC_HD_MAX_W, 4,
&q_data->coded_height,
MTK_VENC_MIN_H,
- MTK_VENC_MAX_H, 5, 6);
+ MTK_VENC_HD_MAX_H, 5, 6);
if (q_data->coded_width < DFT_CFG_WIDTH &&
- (q_data->coded_width + 16) <= MTK_VENC_MAX_W)
+ (q_data->coded_width + 16) <= MTK_VENC_HD_MAX_W)
q_data->coded_width += 16;
if (q_data->coded_height < DFT_CFG_HEIGHT &&
- (q_data->coded_height + 32) <= MTK_VENC_MAX_H)
+ (q_data->coded_height + 32) <= MTK_VENC_HD_MAX_H)
q_data->coded_height += 32;
q_data->sizeimage[0] =
@@ -1218,6 +1231,12 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
{
const struct v4l2_ctrl_ops *ops = &mtk_vcodec_enc_ctrl_ops;
struct v4l2_ctrl_handler *handler = &ctx->ctrl_hdl;
+ u8 h264_max_level;
+
+ if (ctx->dev->enc_capability & MTK_VENC_4K_CAPABILITY_ENABLE)
+ h264_max_level = V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+ else
+ h264_max_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
v4l2_ctrl_handler_init(handler, MTK_MAX_CTRLS_HINT);
@@ -1248,8 +1267,9 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
- V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
- 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+ h264_max_level,
+ 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+
if (handler->error) {
mtk_v4l2_err("Init control handler fail %d",
handler->error);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 7d7b8cfc2cc5..45d1870c83dd 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -361,6 +361,9 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_event_workq;
}
+ if (of_get_property(pdev->dev.of_node, "dma-ranges", NULL))
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
+
ret = video_register_device(vfd_enc, VFL_TYPE_VIDEO, 1);
if (ret) {
mtk_v4l2_err("Failed to register video device");
@@ -422,12 +425,26 @@ static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
.core_id = VENC_SYS,
};
+static const struct mtk_vcodec_enc_pdata mt8192_pdata = {
+ .chip = MTK_MT8192,
+ .uses_ext = true,
+ /* MT8192 supports the same capture formats as MT8183 */
+ .capture_formats = mtk_video_formats_capture_mt8183,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8183),
+ /* MT8192 supports the same output formats as MT8173 */
+ .output_formats = mtk_video_formats_output_mt8173,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .min_bitrate = 64,
+ .max_bitrate = 100000000,
+ .core_id = VENC_SYS,
+};
static const struct of_device_id mtk_vcodec_enc_match[] = {
{.compatible = "mediatek,mt8173-vcodec-enc",
.data = &mt8173_avc_pdata},
{.compatible = "mediatek,mt8173-vcodec-enc-vp8",
.data = &mt8173_vp8_pdata},
{.compatible = "mediatek,mt8183-vcodec-enc", .data = &mt8183_pdata},
+ {.compatible = "mediatek,mt8192-vcodec-enc", .data = &mt8192_pdata},
{},
};
MODULE_DEVICE_TABLE(of, mtk_vcodec_enc_match);
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
index 47a1c1c0fd04..68e8d5cb16d7 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -7,7 +7,7 @@
#ifndef _VDEC_IPI_MSG_H_
#define _VDEC_IPI_MSG_H_
-/**
+/*
* enum vdec_ipi_msgid - message id between AP and VPU
* @AP_IPIMSG_XXX : AP to VPU cmd message id
* @VPU_IPIMSG_XXX_ACK : VPU ack AP cmd message id
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index d0123dfc5f93..b6a4f2074fa5 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -215,6 +215,10 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
return 41;
case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
return 42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 50;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return 51;
default:
mtk_vcodec_debug(inst, "unsupported level %d", level);
return 31;
diff --git a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
index 5f53d4255c36..587a2cf15b76 100644
--- a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
+++ b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
@@ -12,7 +12,7 @@
#define AP_IPIMSG_VENC_BASE 0xC000
#define VPU_IPIMSG_VENC_BASE 0xD000
-/**
+/*
* enum venc_ipi_msg_id - message id between AP and VPU
* (ipi stands for inter-processor interrupt)
* @AP_IPIMSG_ENC_XXX: AP to VPU cmd message id
@@ -111,7 +111,7 @@ struct venc_ap_ipi_msg_deinit {
uint32_t vpu_inst_addr;
};
-/**
+/*
* enum venc_ipi_msg_status - VPU ack AP cmd status
*/
enum venc_ipi_msg_status {
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index c8a56271b259..ec290dde59cf 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -821,13 +821,11 @@ static int mtk_vpu_probe(struct platform_device *pdev)
return -ENOMEM;
vpu->dev = &pdev->dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcm");
- vpu->reg.tcm = devm_ioremap_resource(dev, res);
+ vpu->reg.tcm = devm_platform_ioremap_resource_byname(pdev, "tcm");
if (IS_ERR((__force void *)vpu->reg.tcm))
return PTR_ERR((__force void *)vpu->reg.tcm);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_reg");
- vpu->reg.cfg = devm_ioremap_resource(dev, res);
+ vpu->reg.cfg = devm_platform_ioremap_resource_byname(pdev, "cfg_reg");
if (IS_ERR((__force void *)vpu->reg.cfg))
return PTR_ERR((__force void *)vpu->reg.cfg);
@@ -987,6 +985,12 @@ static int mtk_vpu_suspend(struct device *dev)
return ret;
}
+ if (!vpu_running(vpu)) {
+ vpu_clock_disable(vpu);
+ clk_unprepare(vpu->clk);
+ return 0;
+ }
+
mutex_lock(&vpu->vpu_mutex);
/* disable vpu timer interrupt */
vpu_cfg_writel(vpu, vpu_cfg_readl(vpu, VPU_INT_STATUS) | VPU_IDLE_STATE,
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 4e8905ef362f..108b5e9f82cb 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -29,7 +29,8 @@
#define CCDC_MIN_HEIGHT 32
static struct v4l2_mbus_framefmt *
-__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+__ccdc_get_format(struct isp_ccdc_device *ccdc,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which);
static const unsigned int ccdc_fmts[] = {
@@ -1936,21 +1937,25 @@ static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
}
static struct v4l2_mbus_framefmt *
-__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+__ccdc_get_format(struct isp_ccdc_device *ccdc,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ccdc->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&ccdc->subdev, sd_state,
+ pad);
else
return &ccdc->formats[pad];
}
static struct v4l2_rect *
-__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+__ccdc_get_crop(struct isp_ccdc_device *ccdc,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&ccdc->subdev, cfg, CCDC_PAD_SOURCE_OF);
+ return v4l2_subdev_get_try_crop(&ccdc->subdev, sd_state,
+ CCDC_PAD_SOURCE_OF);
else
return &ccdc->crop;
}
@@ -1963,7 +1968,8 @@ __ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg
* @fmt: Format
*/
static void
-ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+ccdc_try_format(struct isp_ccdc_device *ccdc,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1999,7 +2005,8 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg
case CCDC_PAD_SOURCE_OF:
pixelcode = fmt->code;
field = fmt->field;
- *fmt = *__ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, which);
+ *fmt = *__ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK,
+ which);
/* In SYNC mode the bridge converts YUV formats from 2X8 to
* 1X16. In BT.656 no such conversion occurs. As we don't know
@@ -2024,7 +2031,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg
}
/* Hardcode the output size to the crop rectangle size. */
- crop = __ccdc_get_crop(ccdc, cfg, which);
+ crop = __ccdc_get_crop(ccdc, sd_state, which);
fmt->width = crop->width;
fmt->height = crop->height;
@@ -2041,7 +2048,8 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg
break;
case CCDC_PAD_SOURCE_VP:
- *fmt = *__ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, which);
+ *fmt = *__ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK,
+ which);
/* The video port interface truncates the data to 10 bits. */
info = omap3isp_video_format_info(fmt->code);
@@ -2118,7 +2126,7 @@ static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
* return -EINVAL or zero on success
*/
static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2133,7 +2141,7 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
break;
case CCDC_PAD_SOURCE_OF:
- format = __ccdc_get_format(ccdc, cfg, code->pad,
+ format = __ccdc_get_format(ccdc, sd_state, code->pad,
code->which);
if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
@@ -2164,7 +2172,7 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ccdc_get_format(ccdc, cfg, code->pad,
+ format = __ccdc_get_format(ccdc, sd_state, code->pad,
code->which);
/* A pixel code equal to 0 means that the video port doesn't
@@ -2184,7 +2192,7 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2196,7 +2204,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ccdc_try_format(ccdc, cfg, fse->pad, &format, fse->which);
+ ccdc_try_format(ccdc, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -2206,7 +2214,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ccdc_try_format(ccdc, cfg, fse->pad, &format, fse->which);
+ ccdc_try_format(ccdc, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -2224,7 +2232,8 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
*
* Return 0 on success or a negative error code otherwise.
*/
-static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccdc_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2240,12 +2249,13 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
sel->r.width = INT_MAX;
sel->r.height = INT_MAX;
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, sel->which);
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK,
+ sel->which);
ccdc_try_crop(ccdc, format, &sel->r);
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__ccdc_get_crop(ccdc, cfg, sel->which);
+ sel->r = *__ccdc_get_crop(ccdc, sd_state, sel->which);
break;
default:
@@ -2266,7 +2276,8 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
*
* Return 0 on success or a negative error code otherwise.
*/
-static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccdc_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2285,17 +2296,19 @@ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* rectangle.
*/
if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
- sel->r = *__ccdc_get_crop(ccdc, cfg, sel->which);
+ sel->r = *__ccdc_get_crop(ccdc, sd_state, sel->which);
return 0;
}
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, sel->which);
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK, sel->which);
ccdc_try_crop(ccdc, format, &sel->r);
- *__ccdc_get_crop(ccdc, cfg, sel->which) = sel->r;
+ *__ccdc_get_crop(ccdc, sd_state, sel->which) = sel->r;
/* Update the source format. */
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, sel->which);
- ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, format, sel->which);
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF,
+ sel->which);
+ ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF, format,
+ sel->which);
return 0;
}
@@ -2309,13 +2322,14 @@ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccdc_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccdc_get_format(ccdc, cfg, fmt->pad, fmt->which);
+ format = __ccdc_get_format(ccdc, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -2332,24 +2346,25 @@ static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccdc_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __ccdc_get_format(ccdc, cfg, fmt->pad, fmt->which);
+ format = __ccdc_get_format(ccdc, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ccdc_try_format(ccdc, cfg, fmt->pad, &fmt->format, fmt->which);
+ ccdc_try_format(ccdc, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CCDC_PAD_SINK) {
/* Reset the crop rectangle. */
- crop = __ccdc_get_crop(ccdc, cfg, fmt->which);
+ crop = __ccdc_get_crop(ccdc, sd_state, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
@@ -2358,16 +2373,16 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
ccdc_try_crop(ccdc, &fmt->format, crop);
/* Update the source formats. */
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_OF,
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF,
fmt->which);
*format = fmt->format;
- ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, format,
+ ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF, format,
fmt->which);
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_VP,
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_VP, format,
+ ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_VP, format,
fmt->which);
}
@@ -2454,7 +2469,7 @@ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ccdc_set_format(sd, fh ? fh->pad : NULL, &format);
+ ccdc_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index d0a49cdfd22d..acb58b6ddba1 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -618,11 +618,13 @@ static const unsigned int ccp2_fmts[] = {
* return format structure or NULL on error
*/
static struct v4l2_mbus_framefmt *
-__ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, enum v4l2_subdev_format_whence which)
+__ccp2_get_format(struct isp_ccp2_device *ccp2,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ccp2->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&ccp2->subdev, sd_state,
+ pad);
else
return &ccp2->formats[pad];
}
@@ -636,7 +638,8 @@ __ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_pad_config *c
* @which : wanted subdev format
*/
static void ccp2_try_format(struct isp_ccp2_device *ccp2,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -670,7 +673,8 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
* When CCP2 write to memory feature will be added this
* should be changed properly.
*/
- format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SINK, which);
+ format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SINK,
+ which);
memcpy(fmt, format, sizeof(*fmt));
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
break;
@@ -688,7 +692,7 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
* return -EINVAL or zero on success
*/
static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
@@ -703,8 +707,8 @@ static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SINK,
- code->which);
+ format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SINK,
+ code->which);
code->code = format->code;
}
@@ -712,7 +716,7 @@ static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
@@ -724,7 +728,7 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ccp2_try_format(ccp2, cfg, fse->pad, &format, fse->which);
+ ccp2_try_format(ccp2, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -734,7 +738,7 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ccp2_try_format(ccp2, cfg, fse->pad, &format, fse->which);
+ ccp2_try_format(ccp2, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -748,13 +752,14 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int ccp2_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccp2_get_format(ccp2, cfg, fmt->pad, fmt->which);
+ format = __ccp2_get_format(ccp2, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -769,25 +774,27 @@ static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
* @fmt : pointer to v4l2 subdev format structure
* returns zero
*/
-static int ccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int ccp2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccp2_get_format(ccp2, cfg, fmt->pad, fmt->which);
+ format = __ccp2_get_format(ccp2, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ccp2_try_format(ccp2, cfg, fmt->pad, &fmt->format, fmt->which);
+ ccp2_try_format(ccp2, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CCP2_PAD_SINK) {
- format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SOURCE,
+ format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- ccp2_try_format(ccp2, cfg, CCP2_PAD_SOURCE, format, fmt->which);
+ ccp2_try_format(ccp2, sd_state, CCP2_PAD_SOURCE, format,
+ fmt->which);
}
return 0;
@@ -812,7 +819,7 @@ static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ccp2_set_format(sd, fh ? fh->pad : NULL, &format);
+ ccp2_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index fd493c5e4e24..6302e0c94034 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -827,17 +827,20 @@ static const struct isp_video_operations csi2_ispvideo_ops = {
*/
static struct v4l2_mbus_framefmt *
-__csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
+__csi2_get_format(struct isp_csi2_device *csi2,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
+ pad);
else
return &csi2->formats[pad];
}
static void
-csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
+csi2_try_format(struct isp_csi2_device *csi2,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -867,7 +870,8 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg
* compression.
*/
pixelcode = fmt->code;
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK, which);
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
+ which);
memcpy(fmt, format, sizeof(*fmt));
/*
@@ -893,7 +897,7 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -906,7 +910,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csi2_input_fmts[code->index];
} else {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK,
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
code->which);
switch (code->index) {
case 0:
@@ -930,7 +934,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int csi2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -942,7 +946,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -952,7 +956,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -966,13 +970,14 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int csi2_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -987,25 +992,27 @@ static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int csi2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- csi2_try_format(csi2, cfg, fmt->pad, &fmt->format, fmt->which);
+ csi2_try_format(csi2, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CSI2_PAD_SINK) {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SOURCE,
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- csi2_try_format(csi2, cfg, CSI2_PAD_SOURCE, format, fmt->which);
+ csi2_try_format(csi2, sd_state, CSI2_PAD_SOURCE, format,
+ fmt->which);
}
return 0;
@@ -1030,7 +1037,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- csi2_set_format(sd, fh ? fh->pad : NULL, &format);
+ csi2_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 607b7685c982..53aedec7990d 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -1679,21 +1679,25 @@ static int preview_set_stream(struct v4l2_subdev *sd, int enable)
}
static struct v4l2_mbus_framefmt *
-__preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_pad_config *cfg,
+__preview_get_format(struct isp_prev_device *prev,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&prev->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&prev->subdev, sd_state,
+ pad);
else
return &prev->formats[pad];
}
static struct v4l2_rect *
-__preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_pad_config *cfg,
+__preview_get_crop(struct isp_prev_device *prev,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&prev->subdev, cfg, PREV_PAD_SINK);
+ return v4l2_subdev_get_try_crop(&prev->subdev, sd_state,
+ PREV_PAD_SINK);
else
return &prev->crop;
}
@@ -1729,7 +1733,8 @@ static const unsigned int preview_output_fmts[] = {
* engine limits and the format and crop rectangles on other pads.
*/
static void preview_try_format(struct isp_prev_device *prev,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1770,7 +1775,8 @@ static void preview_try_format(struct isp_prev_device *prev,
case PREV_PAD_SOURCE:
pixelcode = fmt->code;
- *fmt = *__preview_get_format(prev, cfg, PREV_PAD_SINK, which);
+ *fmt = *__preview_get_format(prev, sd_state, PREV_PAD_SINK,
+ which);
switch (pixelcode) {
case MEDIA_BUS_FMT_YUYV8_1X16:
@@ -1788,7 +1794,7 @@ static void preview_try_format(struct isp_prev_device *prev,
* is not supported yet, hardcode the output size to the crop
* rectangle size.
*/
- crop = __preview_get_crop(prev, cfg, which);
+ crop = __preview_get_crop(prev, sd_state, which);
fmt->width = crop->width;
fmt->height = crop->height;
@@ -1862,7 +1868,7 @@ static void preview_try_crop(struct isp_prev_device *prev,
* return -EINVAL or zero on success
*/
static int preview_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -1886,7 +1892,7 @@ static int preview_enum_mbus_code(struct v4l2_subdev *sd,
}
static int preview_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1898,7 +1904,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- preview_try_format(prev, cfg, fse->pad, &format, fse->which);
+ preview_try_format(prev, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1908,7 +1914,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- preview_try_format(prev, cfg, fse->pad, &format, fse->which);
+ preview_try_format(prev, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1926,7 +1932,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int preview_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1942,13 +1948,13 @@ static int preview_get_selection(struct v4l2_subdev *sd,
sel->r.width = INT_MAX;
sel->r.height = INT_MAX;
- format = __preview_get_format(prev, cfg, PREV_PAD_SINK,
+ format = __preview_get_format(prev, sd_state, PREV_PAD_SINK,
sel->which);
preview_try_crop(prev, format, &sel->r);
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__preview_get_crop(prev, cfg, sel->which);
+ sel->r = *__preview_get_crop(prev, sd_state, sel->which);
break;
default:
@@ -1969,7 +1975,7 @@ static int preview_get_selection(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int preview_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1988,17 +1994,20 @@ static int preview_set_selection(struct v4l2_subdev *sd,
* rectangle.
*/
if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
- sel->r = *__preview_get_crop(prev, cfg, sel->which);
+ sel->r = *__preview_get_crop(prev, sd_state, sel->which);
return 0;
}
- format = __preview_get_format(prev, cfg, PREV_PAD_SINK, sel->which);
+ format = __preview_get_format(prev, sd_state, PREV_PAD_SINK,
+ sel->which);
preview_try_crop(prev, format, &sel->r);
- *__preview_get_crop(prev, cfg, sel->which) = sel->r;
+ *__preview_get_crop(prev, sd_state, sel->which) = sel->r;
/* Update the source format. */
- format = __preview_get_format(prev, cfg, PREV_PAD_SOURCE, sel->which);
- preview_try_format(prev, cfg, PREV_PAD_SOURCE, format, sel->which);
+ format = __preview_get_format(prev, sd_state, PREV_PAD_SOURCE,
+ sel->which);
+ preview_try_format(prev, sd_state, PREV_PAD_SOURCE, format,
+ sel->which);
return 0;
}
@@ -2010,13 +2019,14 @@ static int preview_set_selection(struct v4l2_subdev *sd,
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int preview_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __preview_get_format(prev, cfg, fmt->pad, fmt->which);
+ format = __preview_get_format(prev, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -2031,24 +2041,25 @@ static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int preview_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __preview_get_format(prev, cfg, fmt->pad, fmt->which);
+ format = __preview_get_format(prev, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- preview_try_format(prev, cfg, fmt->pad, &fmt->format, fmt->which);
+ preview_try_format(prev, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == PREV_PAD_SINK) {
/* Reset the crop rectangle. */
- crop = __preview_get_crop(prev, cfg, fmt->which);
+ crop = __preview_get_crop(prev, sd_state, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
@@ -2057,9 +2068,9 @@ static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
preview_try_crop(prev, &fmt->format, crop);
/* Update the source format. */
- format = __preview_get_format(prev, cfg, PREV_PAD_SOURCE,
+ format = __preview_get_format(prev, sd_state, PREV_PAD_SOURCE,
fmt->which);
- preview_try_format(prev, cfg, PREV_PAD_SOURCE, format,
+ preview_try_format(prev, sd_state, PREV_PAD_SOURCE, format,
fmt->which);
}
@@ -2086,7 +2097,7 @@ static int preview_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- preview_set_format(sd, fh ? fh->pad : NULL, &format);
+ preview_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 78d9dd7ea2da..ed2fb0c7a57e 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -114,11 +114,12 @@ static const struct isprsz_coef filter_coefs = {
* return zero
*/
static struct v4l2_mbus_framefmt *
-__resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_pad_config *cfg,
+__resizer_get_format(struct isp_res_device *res,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&res->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&res->subdev, sd_state, pad);
else
return &res->formats[pad];
}
@@ -130,11 +131,13 @@ __resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_pad_config *
* @which : wanted subdev crop rectangle
*/
static struct v4l2_rect *
-__resizer_get_crop(struct isp_res_device *res, struct v4l2_subdev_pad_config *cfg,
+__resizer_get_crop(struct isp_res_device *res,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&res->subdev, cfg, RESZ_PAD_SINK);
+ return v4l2_subdev_get_try_crop(&res->subdev, sd_state,
+ RESZ_PAD_SINK);
else
return &res->crop.request;
}
@@ -1220,7 +1223,7 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
* Return 0 on success or a negative error code otherwise.
*/
static int resizer_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1231,9 +1234,9 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
if (sel->pad != RESZ_PAD_SINK)
return -EINVAL;
- format_sink = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ format_sink = __resizer_get_format(res, sd_state, RESZ_PAD_SINK,
sel->which);
- format_source = __resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ format_source = __resizer_get_format(res, sd_state, RESZ_PAD_SOURCE,
sel->which);
switch (sel->target) {
@@ -1248,7 +1251,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__resizer_get_crop(res, cfg, sel->which);
+ sel->r = *__resizer_get_crop(res, sd_state, sel->which);
resizer_calc_ratios(res, &sel->r, format_source, &ratio);
break;
@@ -1273,7 +1276,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int resizer_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1287,9 +1290,9 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
sel->pad != RESZ_PAD_SINK)
return -EINVAL;
- format_sink = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ format_sink = __resizer_get_format(res, sd_state, RESZ_PAD_SINK,
sel->which);
- format_source = *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ format_source = *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE,
sel->which);
dev_dbg(isp->dev, "%s(%s): req %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
@@ -1307,7 +1310,7 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
* stored the mangled rectangle.
*/
resizer_try_crop(format_sink, &format_source, &sel->r);
- *__resizer_get_crop(res, cfg, sel->which) = sel->r;
+ *__resizer_get_crop(res, sd_state, sel->which) = sel->r;
resizer_calc_ratios(res, &sel->r, &format_source, &ratio);
dev_dbg(isp->dev, "%s(%s): got %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
@@ -1317,7 +1320,8 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
format_source.width, format_source.height);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE, sel->which) =
+ *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE,
+ sel->which) =
format_source;
return 0;
}
@@ -1328,7 +1332,7 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
*/
spin_lock_irqsave(&res->lock, flags);
- *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE, sel->which) =
+ *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE, sel->which) =
format_source;
res->ratio = ratio;
@@ -1371,7 +1375,8 @@ static unsigned int resizer_max_in_width(struct isp_res_device *res)
* @which : wanted subdev format
*/
static void resizer_try_format(struct isp_res_device *res,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1392,10 +1397,11 @@ static void resizer_try_format(struct isp_res_device *res,
break;
case RESZ_PAD_SOURCE:
- format = __resizer_get_format(res, cfg, RESZ_PAD_SINK, which);
+ format = __resizer_get_format(res, sd_state, RESZ_PAD_SINK,
+ which);
fmt->code = format->code;
- crop = *__resizer_get_crop(res, cfg, which);
+ crop = *__resizer_get_crop(res, sd_state, which);
resizer_calc_ratios(res, &crop, fmt, &ratio);
break;
}
@@ -1412,7 +1418,7 @@ static void resizer_try_format(struct isp_res_device *res,
* return -EINVAL or zero on success
*/
static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1427,7 +1433,7 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ format = __resizer_get_format(res, sd_state, RESZ_PAD_SINK,
code->which);
code->code = format->code;
}
@@ -1436,7 +1442,7 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
}
static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1448,7 +1454,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- resizer_try_format(res, cfg, fse->pad, &format, fse->which);
+ resizer_try_format(res, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1458,7 +1464,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- resizer_try_format(res, cfg, fse->pad, &format, fse->which);
+ resizer_try_format(res, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1472,13 +1478,14 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int resizer_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(res, cfg, fmt->pad, fmt->which);
+ format = __resizer_get_format(res, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1493,33 +1500,34 @@ static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int resizer_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __resizer_get_format(res, cfg, fmt->pad, fmt->which);
+ format = __resizer_get_format(res, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- resizer_try_format(res, cfg, fmt->pad, &fmt->format, fmt->which);
+ resizer_try_format(res, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->pad == RESZ_PAD_SINK) {
/* reset crop rectangle */
- crop = __resizer_get_crop(res, cfg, fmt->which);
+ crop = __resizer_get_crop(res, sd_state, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
crop->height = fmt->format.height;
/* Propagate the format from sink to source */
- format = __resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ format = __resizer_get_format(res, sd_state, RESZ_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- resizer_try_format(res, cfg, RESZ_PAD_SOURCE, format,
+ resizer_try_format(res, sd_state, RESZ_PAD_SOURCE, format,
fmt->which);
}
@@ -1570,7 +1578,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_YUYV8_1X16;
format.format.width = 4096;
format.format.height = 4096;
- resizer_set_format(sd, fh ? fh->pad : NULL, &format);
+ resizer_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index dd510ee9b58a..ec4c010644ca 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -1792,6 +1792,9 @@ static int pxac_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
const struct pxa_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1816,7 +1819,7 @@ static int pxac_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
v4l2_fill_mbus_format(mf, pix, xlate->code);
- ret = sensor_call(pcdev, pad, set_fmt, &pad_cfg, &format);
+ ret = sensor_call(pcdev, pad, set_fmt, &pad_state, &format);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index cc11fbfdae13..a1637b78568b 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -156,11 +156,9 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
ret = regulator_enable(csid->vdda);
if (ret < 0) {
@@ -247,12 +245,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
*/
static struct v4l2_mbus_framefmt *
__csid_get_format(struct csid_device *csid,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csid->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csid->subdev, sd_state,
+ pad);
return &csid->fmt[pad];
}
@@ -266,7 +265,7 @@ __csid_get_format(struct csid_device *csid,
* @which: wanted subdev format
*/
static void csid_try_format(struct csid_device *csid,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -299,7 +298,7 @@ static void csid_try_format(struct csid_device *csid,
/* keep pad formats in sync */
u32 code = fmt->code;
- *fmt = *__csid_get_format(csid, cfg,
+ *fmt = *__csid_get_format(csid, sd_state,
MSM_CSID_PAD_SINK, which);
fmt->code = csid->ops->src_pad_code(csid, fmt->code, 0, code);
} else {
@@ -333,7 +332,7 @@ static void csid_try_format(struct csid_device *csid,
* return -EINVAL or zero on success
*/
static int csid_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
@@ -347,7 +346,7 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
if (csid->testgen_mode->cur.val == 0) {
struct v4l2_mbus_framefmt *sink_fmt;
- sink_fmt = __csid_get_format(csid, cfg,
+ sink_fmt = __csid_get_format(csid, sd_state,
MSM_CSID_PAD_SINK,
code->which);
@@ -374,7 +373,7 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csid_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
@@ -386,7 +385,7 @@ static int csid_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csid_try_format(csid, cfg, fse->pad, &format, fse->which);
+ csid_try_format(csid, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -396,7 +395,7 @@ static int csid_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csid_try_format(csid, cfg, fse->pad, &format, fse->which);
+ csid_try_format(csid, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -412,13 +411,13 @@ static int csid_enum_frame_size(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int csid_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
+ format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -436,26 +435,26 @@ static int csid_get_format(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int csid_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
+ format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- csid_try_format(csid, cfg, fmt->pad, &fmt->format, fmt->which);
+ csid_try_format(csid, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == MSM_CSID_PAD_SINK) {
- format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SRC,
+ format = __csid_get_format(csid, sd_state, MSM_CSID_PAD_SRC,
fmt->which);
*format = fmt->format;
- csid_try_format(csid, cfg, MSM_CSID_PAD_SRC, format,
+ csid_try_format(csid, sd_state, MSM_CSID_PAD_SRC, format,
fmt->which);
}
@@ -484,7 +483,7 @@ static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
};
- return csid_set_format(sd, fh ? fh->pad : NULL, &format);
+ return csid_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
@@ -566,8 +565,7 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
/* Memory */
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
- csid->base = devm_ioremap_resource(dev, r);
+ csid->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
if (IS_ERR(csid->base))
return PTR_ERR(csid->base);
@@ -584,14 +582,13 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
snprintf(csid->irq_name, sizeof(csid->irq_name), "%s_%s%d",
dev_name(dev), MSM_CSID_NAME, csid->id);
ret = devm_request_irq(dev, csid->irq, csid->ops->isr,
- IRQF_TRIGGER_RISING, csid->irq_name, csid);
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN,
+ csid->irq_name, csid);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
return ret;
}
- disable_irq(csid->irq);
-
/* Clocks */
csid->nclocks = 0;
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index b3c3bf19e522..24eec16197e7 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -197,11 +197,9 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
if (on) {
int ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
ret = csiphy_set_clock_rates(csiphy);
if (ret < 0) {
@@ -340,12 +338,13 @@ static int csiphy_set_stream(struct v4l2_subdev *sd, int enable)
*/
static struct v4l2_mbus_framefmt *
__csiphy_get_format(struct csiphy_device *csiphy,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csiphy->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csiphy->subdev, sd_state,
+ pad);
return &csiphy->fmt[pad];
}
@@ -359,7 +358,7 @@ __csiphy_get_format(struct csiphy_device *csiphy,
* @which: wanted subdev format
*/
static void csiphy_try_format(struct csiphy_device *csiphy,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -389,7 +388,8 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
case MSM_CSIPHY_PAD_SRC:
/* Set and return a format same as sink pad */
- *fmt = *__csiphy_get_format(csiphy, cfg, MSM_CSID_PAD_SINK,
+ *fmt = *__csiphy_get_format(csiphy, sd_state,
+ MSM_CSID_PAD_SINK,
which);
break;
@@ -404,7 +404,7 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
* return -EINVAL or zero on success
*/
static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
@@ -419,7 +419,8 @@ static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SINK,
+ format = __csiphy_get_format(csiphy, sd_state,
+ MSM_CSIPHY_PAD_SINK,
code->which);
code->code = format->code;
@@ -436,7 +437,7 @@ static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
@@ -448,7 +449,7 @@ static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which);
+ csiphy_try_format(csiphy, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -458,7 +459,7 @@ static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which);
+ csiphy_try_format(csiphy, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -474,13 +475,13 @@ static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int csiphy_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which);
+ format = __csiphy_get_format(csiphy, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -498,26 +499,29 @@ static int csiphy_get_format(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int csiphy_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which);
+ format = __csiphy_get_format(csiphy, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- csiphy_try_format(csiphy, cfg, fmt->pad, &fmt->format, fmt->which);
+ csiphy_try_format(csiphy, sd_state, fmt->pad, &fmt->format,
+ fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == MSM_CSIPHY_PAD_SINK) {
- format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC,
+ format = __csiphy_get_format(csiphy, sd_state,
+ MSM_CSIPHY_PAD_SRC,
fmt->which);
*format = fmt->format;
- csiphy_try_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC, format,
+ csiphy_try_format(csiphy, sd_state, MSM_CSIPHY_PAD_SRC,
+ format,
fmt->which);
}
@@ -547,7 +551,7 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
}
};
- return csiphy_set_format(sd, fh ? fh->pad : NULL, &format);
+ return csiphy_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
@@ -591,16 +595,14 @@ int msm_csiphy_subdev_init(struct camss *camss,
/* Memory */
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
- csiphy->base = devm_ioremap_resource(dev, r);
+ csiphy->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
if (IS_ERR(csiphy->base))
return PTR_ERR(csiphy->base);
if (camss->version == CAMSS_8x16 ||
camss->version == CAMSS_8x96) {
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- res->reg[1]);
- csiphy->base_clk_mux = devm_ioremap_resource(dev, r);
+ csiphy->base_clk_mux =
+ devm_platform_ioremap_resource_byname(pdev, res->reg[1]);
if (IS_ERR(csiphy->base_clk_mux))
return PTR_ERR(csiphy->base_clk_mux);
} else {
@@ -621,14 +623,13 @@ int msm_csiphy_subdev_init(struct camss *camss,
dev_name(dev), MSM_CSIPHY_NAME, csiphy->id);
ret = devm_request_irq(dev, csiphy->irq, csiphy->ops->isr,
- IRQF_TRIGGER_RISING, csiphy->irq_name, csiphy);
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN,
+ csiphy->irq_name, csiphy);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
return ret;
}
- disable_irq(csiphy->irq);
-
/* Clocks */
csiphy->nclocks = 0;
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 37611c8861da..ba5d65f6ef34 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -372,11 +372,9 @@ static int ispif_set_power(struct v4l2_subdev *sd, int on)
goto exit;
}
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto exit;
- }
ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev);
if (ret < 0) {
@@ -876,12 +874,13 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
*/
static struct v4l2_mbus_framefmt *
__ispif_get_format(struct ispif_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&line->subdev, sd_state,
+ pad);
return &line->fmt[pad];
}
@@ -895,7 +894,7 @@ __ispif_get_format(struct ispif_line *line,
* @which: wanted subdev format
*/
static void ispif_try_format(struct ispif_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -925,7 +924,7 @@ static void ispif_try_format(struct ispif_line *line,
case MSM_ISPIF_PAD_SRC:
/* Set and return a format same as sink pad */
- *fmt = *__ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK,
+ *fmt = *__ispif_get_format(line, sd_state, MSM_ISPIF_PAD_SINK,
which);
break;
@@ -942,7 +941,7 @@ static void ispif_try_format(struct ispif_line *line,
* return -EINVAL or zero on success
*/
static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
@@ -957,7 +956,8 @@ static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK,
+ format = __ispif_get_format(line, sd_state,
+ MSM_ISPIF_PAD_SINK,
code->which);
code->code = format->code;
@@ -974,7 +974,7 @@ static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int ispif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
@@ -986,7 +986,7 @@ static int ispif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ispif_try_format(line, cfg, fse->pad, &format, fse->which);
+ ispif_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -996,7 +996,7 @@ static int ispif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ispif_try_format(line, cfg, fse->pad, &format, fse->which);
+ ispif_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1012,13 +1012,13 @@ static int ispif_enum_frame_size(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int ispif_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ispif_get_format(line, cfg, fmt->pad, fmt->which);
+ format = __ispif_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1036,26 +1036,26 @@ static int ispif_get_format(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int ispif_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ispif_get_format(line, cfg, fmt->pad, fmt->which);
+ format = __ispif_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ispif_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
+ ispif_try_format(line, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == MSM_ISPIF_PAD_SINK) {
- format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SRC,
+ format = __ispif_get_format(line, sd_state, MSM_ISPIF_PAD_SRC,
fmt->which);
*format = fmt->format;
- ispif_try_format(line, cfg, MSM_ISPIF_PAD_SRC, format,
+ ispif_try_format(line, sd_state, MSM_ISPIF_PAD_SRC, format,
fmt->which);
}
@@ -1084,7 +1084,7 @@ static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
};
- return ispif_set_format(sd, fh ? fh->pad : NULL, &format);
+ return ispif_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
@@ -1143,13 +1143,11 @@ int msm_ispif_subdev_init(struct camss *camss,
/* Memory */
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
- ispif->base = devm_ioremap_resource(dev, r);
+ ispif->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
if (IS_ERR(ispif->base))
return PTR_ERR(ispif->base);
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]);
- ispif->base_clk_mux = devm_ioremap_resource(dev, r);
+ ispif->base_clk_mux = devm_platform_ioremap_resource_byname(pdev, res->reg[1]);
if (IS_ERR(ispif->base_clk_mux))
return PTR_ERR(ispif->base_clk_mux);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 15695fd466c4..e0f3a36f3f3f 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -584,9 +584,9 @@ static int vfe_get(struct vfe_device *vfe)
if (ret < 0)
goto error_pm_domain;
- ret = pm_runtime_get_sync(vfe->camss->dev);
+ ret = pm_runtime_resume_and_get(vfe->camss->dev);
if (ret < 0)
- goto error_pm_runtime_get;
+ goto error_domain_off;
ret = vfe_set_clock_rates(vfe);
if (ret < 0)
@@ -620,6 +620,7 @@ error_reset:
error_pm_runtime_get:
pm_runtime_put_sync(vfe->camss->dev);
+error_domain_off:
vfe->ops->pm_domain_off(vfe);
error_pm_domain:
@@ -762,12 +763,13 @@ static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
*/
static struct v4l2_mbus_framefmt *
__vfe_get_format(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&line->subdev, sd_state,
+ pad);
return &line->fmt[pad];
}
@@ -782,11 +784,11 @@ __vfe_get_format(struct vfe_line *line,
*/
static struct v4l2_rect *
__vfe_get_compose(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_compose(&line->subdev, cfg,
+ return v4l2_subdev_get_try_compose(&line->subdev, sd_state,
MSM_VFE_PAD_SINK);
return &line->compose;
@@ -802,11 +804,11 @@ __vfe_get_compose(struct vfe_line *line,
*/
static struct v4l2_rect *
__vfe_get_crop(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&line->subdev, cfg,
+ return v4l2_subdev_get_try_crop(&line->subdev, sd_state,
MSM_VFE_PAD_SRC);
return &line->crop;
@@ -821,7 +823,7 @@ __vfe_get_crop(struct vfe_line *line,
* @which: wanted subdev format
*/
static void vfe_try_format(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -853,14 +855,15 @@ static void vfe_try_format(struct vfe_line *line,
/* Set and return a format same as sink pad */
code = fmt->code;
- *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
+ *fmt = *__vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK,
+ which);
fmt->code = vfe_src_pad_code(line, fmt->code, 0, code);
if (line->id == VFE_LINE_PIX) {
struct v4l2_rect *rect;
- rect = __vfe_get_crop(line, cfg, which);
+ rect = __vfe_get_crop(line, sd_state, which);
fmt->width = rect->width;
fmt->height = rect->height;
@@ -880,13 +883,13 @@ static void vfe_try_format(struct vfe_line *line,
* @which: wanted subdev format
*/
static void vfe_try_compose(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *rect,
enum v4l2_subdev_format_whence which)
{
struct v4l2_mbus_framefmt *fmt;
- fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
+ fmt = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK, which);
if (rect->width > fmt->width)
rect->width = fmt->width;
@@ -919,13 +922,13 @@ static void vfe_try_compose(struct vfe_line *line,
* @which: wanted subdev format
*/
static void vfe_try_crop(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *rect,
enum v4l2_subdev_format_whence which)
{
struct v4l2_rect *compose;
- compose = __vfe_get_compose(line, cfg, which);
+ compose = __vfe_get_compose(line, sd_state, which);
if (rect->width > compose->width)
rect->width = compose->width;
@@ -963,7 +966,7 @@ static void vfe_try_crop(struct vfe_line *line,
* return -EINVAL or zero on success
*/
static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
@@ -976,7 +979,7 @@ static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
} else {
struct v4l2_mbus_framefmt *sink_fmt;
- sink_fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
+ sink_fmt = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK,
code->which);
code->code = vfe_src_pad_code(line, sink_fmt->code,
@@ -997,7 +1000,7 @@ static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
@@ -1009,7 +1012,7 @@ static int vfe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- vfe_try_format(line, cfg, fse->pad, &format, fse->which);
+ vfe_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1019,7 +1022,7 @@ static int vfe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- vfe_try_format(line, cfg, fse->pad, &format, fse->which);
+ vfe_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1035,13 +1038,13 @@ static int vfe_enum_frame_size(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
+ format = __vfe_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1051,7 +1054,7 @@ static int vfe_get_format(struct v4l2_subdev *sd,
}
static int vfe_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel);
/*
@@ -1063,17 +1066,17 @@ static int vfe_set_selection(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
+ format = __vfe_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
+ vfe_try_format(line, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->pad == MSM_VFE_PAD_SINK) {
@@ -1081,11 +1084,11 @@ static int vfe_set_format(struct v4l2_subdev *sd,
int ret;
/* Propagate the format from sink to source */
- format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC,
+ format = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SRC,
fmt->which);
*format = fmt->format;
- vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format,
+ vfe_try_format(line, sd_state, MSM_VFE_PAD_SRC, format,
fmt->which);
if (line->id != VFE_LINE_PIX)
@@ -1097,7 +1100,7 @@ static int vfe_set_format(struct v4l2_subdev *sd,
sel.target = V4L2_SEL_TGT_COMPOSE;
sel.r.width = fmt->format.width;
sel.r.height = fmt->format.height;
- ret = vfe_set_selection(sd, cfg, &sel);
+ ret = vfe_set_selection(sd, sd_state, &sel);
if (ret < 0)
return ret;
}
@@ -1114,7 +1117,7 @@ static int vfe_set_format(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
@@ -1130,7 +1133,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
fmt.pad = sel->pad;
fmt.which = sel->which;
- ret = vfe_get_format(sd, cfg, &fmt);
+ ret = vfe_get_format(sd, sd_state, &fmt);
if (ret < 0)
return ret;
@@ -1140,7 +1143,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
sel->r.height = fmt.format.height;
break;
case V4L2_SEL_TGT_COMPOSE:
- rect = __vfe_get_compose(line, cfg, sel->which);
+ rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
@@ -1152,7 +1155,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
else if (sel->pad == MSM_VFE_PAD_SRC)
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
- rect = __vfe_get_compose(line, cfg, sel->which);
+ rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
@@ -1162,7 +1165,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
sel->r.height = rect->height;
break;
case V4L2_SEL_TGT_CROP:
- rect = __vfe_get_crop(line, cfg, sel->which);
+ rect = __vfe_get_crop(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
@@ -1184,7 +1187,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
@@ -1198,11 +1201,11 @@ static int vfe_set_selection(struct v4l2_subdev *sd,
sel->pad == MSM_VFE_PAD_SINK) {
struct v4l2_subdev_selection crop = { 0 };
- rect = __vfe_get_compose(line, cfg, sel->which);
+ rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
- vfe_try_compose(line, cfg, &sel->r, sel->which);
+ vfe_try_compose(line, sd_state, &sel->r, sel->which);
*rect = sel->r;
/* Reset source crop selection */
@@ -1210,28 +1213,28 @@ static int vfe_set_selection(struct v4l2_subdev *sd,
crop.pad = MSM_VFE_PAD_SRC;
crop.target = V4L2_SEL_TGT_CROP;
crop.r = *rect;
- ret = vfe_set_selection(sd, cfg, &crop);
+ ret = vfe_set_selection(sd, sd_state, &crop);
} else if (sel->target == V4L2_SEL_TGT_CROP &&
sel->pad == MSM_VFE_PAD_SRC) {
struct v4l2_subdev_format fmt = { 0 };
- rect = __vfe_get_crop(line, cfg, sel->which);
+ rect = __vfe_get_crop(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
- vfe_try_crop(line, cfg, &sel->r, sel->which);
+ vfe_try_crop(line, sd_state, &sel->r, sel->which);
*rect = sel->r;
/* Reset source pad format width and height */
fmt.which = sel->which;
fmt.pad = MSM_VFE_PAD_SRC;
- ret = vfe_get_format(sd, cfg, &fmt);
+ ret = vfe_get_format(sd, sd_state, &fmt);
if (ret < 0)
return ret;
fmt.format.width = rect->width;
fmt.format.height = rect->height;
- ret = vfe_set_format(sd, cfg, &fmt);
+ ret = vfe_set_format(sd, sd_state, &fmt);
} else {
ret = -EINVAL;
}
@@ -1261,7 +1264,7 @@ static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
};
- return vfe_set_format(sd, fh ? fh->pad : NULL, &format);
+ return vfe_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
@@ -1301,8 +1304,7 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
/* Memory */
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
- vfe->base = devm_ioremap_resource(dev, r);
+ vfe->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
if (IS_ERR(vfe->base)) {
dev_err(dev, "could not map memory\n");
return PTR_ERR(vfe->base);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 54bac7ec14c5..91b15842c555 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -78,22 +78,32 @@ static const struct hfi_core_ops venus_core_ops = {
.event_notify = venus_event_notify,
};
+#define RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS 10
+
static void venus_sys_error_handler(struct work_struct *work)
{
struct venus_core *core =
container_of(work, struct venus_core, work.work);
- int ret = 0;
-
- pm_runtime_get_sync(core->dev);
+ int ret, i, max_attempts = RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS;
+ const char *err_msg = "";
+ bool failed = false;
+
+ ret = pm_runtime_get_sync(core->dev);
+ if (ret < 0) {
+ err_msg = "resume runtime PM";
+ max_attempts = 0;
+ failed = true;
+ }
hfi_core_deinit(core, true);
- dev_warn(core->dev, "system error has occurred, starting recovery!\n");
-
mutex_lock(&core->lock);
- while (pm_runtime_active(core->dev_dec) || pm_runtime_active(core->dev_enc))
+ for (i = 0; i < max_attempts; i++) {
+ if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc))
+ break;
msleep(10);
+ }
venus_shutdown(core);
@@ -101,31 +111,55 @@ static void venus_sys_error_handler(struct work_struct *work)
pm_runtime_put_sync(core->dev);
- while (core->pmdomains[0] && pm_runtime_active(core->pmdomains[0]))
+ for (i = 0; i < max_attempts; i++) {
+ if (!core->pmdomains[0] || !pm_runtime_active(core->pmdomains[0]))
+ break;
usleep_range(1000, 1500);
+ }
hfi_reinit(core);
- pm_runtime_get_sync(core->dev);
+ ret = pm_runtime_get_sync(core->dev);
+ if (ret < 0) {
+ err_msg = "resume runtime PM";
+ failed = true;
+ }
- ret |= venus_boot(core);
- ret |= hfi_core_resume(core, true);
+ ret = venus_boot(core);
+ if (ret && !failed) {
+ err_msg = "boot Venus";
+ failed = true;
+ }
+
+ ret = hfi_core_resume(core, true);
+ if (ret && !failed) {
+ err_msg = "resume HFI";
+ failed = true;
+ }
enable_irq(core->irq);
mutex_unlock(&core->lock);
- ret |= hfi_core_init(core);
+ ret = hfi_core_init(core);
+ if (ret && !failed) {
+ err_msg = "init HFI";
+ failed = true;
+ }
pm_runtime_put_sync(core->dev);
- if (ret) {
+ if (failed) {
disable_irq_nosync(core->irq);
- dev_warn(core->dev, "recovery failed (%d)\n", ret);
+ dev_warn_ratelimited(core->dev,
+ "System error has occurred, recovery failed to %s\n",
+ err_msg);
schedule_delayed_work(&core->work, msecs_to_jiffies(10));
return;
}
+ dev_warn(core->dev, "system error has occurred (recovered)\n");
+
mutex_lock(&core->lock);
core->sys_error = false;
mutex_unlock(&core->lock);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 745f226a523f..8df2d497d706 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -155,7 +155,6 @@ struct venus_core {
struct clk *vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX];
struct icc_path *video_path;
struct icc_path *cpucfg_path;
- struct opp_table *opp_table;
bool has_opp_table;
struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX];
struct device_link *opp_dl_venus;
@@ -293,6 +292,7 @@ struct clock_data {
unsigned long freq;
unsigned long vpp_freq;
unsigned long vsp_freq;
+ unsigned long low_power_freq;
};
#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
@@ -316,6 +316,10 @@ struct venus_ts_metadata {
struct v4l2_timecode tc;
};
+enum venus_inst_modes {
+ VENUS_LOW_POWER = BIT(0),
+};
+
/**
* struct venus_inst - holds per instance parameters
*
@@ -445,6 +449,7 @@ struct venus_inst {
unsigned int pic_struct;
bool next_buf_last;
bool drain_active;
+ enum venus_inst_modes flags;
};
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index b813d6dba481..1fe6d463dc99 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -595,8 +595,7 @@ static int platform_get_bufreq(struct venus_inst *inst, u32 buftype,
params.dec.is_secondary_output =
inst->opb_buftype == HFI_BUFFER_OUTPUT2;
params.dec.is_interlaced =
- inst->pic_struct != HFI_INTERLACE_FRAME_PROGRESSIVE ?
- true : false;
+ inst->pic_struct != HFI_INTERLACE_FRAME_PROGRESSIVE;
} else {
params.width = inst->out_width;
params.height = inst->out_height;
@@ -1627,6 +1626,8 @@ int venus_helper_session_init(struct venus_inst *inst)
session_type);
inst->clk_data.vsp_freq = hfi_platform_get_codec_vsp_freq(version, codec,
session_type);
+ inst->clk_data.low_power_freq = hfi_platform_get_codec_lp_freq(version, codec,
+ session_type);
return 0;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 11a8347e5f5c..f51024786991 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
+#include <linux/overflow.h>
#include <linux/errno.h>
#include <linux/hash.h>
@@ -27,7 +28,7 @@ void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable)
{
struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
- pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
@@ -39,7 +40,7 @@ void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
{
struct hfi_debug_config *hfi;
- pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
@@ -50,7 +51,7 @@ void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode)
{
- pkt->hdr.size = sizeof(*pkt) + sizeof(u32);
+ pkt->hdr.size = struct_size(pkt, data, 2);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
@@ -116,7 +117,7 @@ void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable)
{
struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
- pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
@@ -1226,6 +1227,17 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hdr10);
break;
}
+ case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: {
+ struct hfi_conceal_color_v4 *color = prop_data;
+ u32 *in = pdata;
+
+ color->conceal_color_8bit = *in & 0xff;
+ color->conceal_color_8bit |= ((*in >> 10) & 0xff) << 8;
+ color->conceal_color_8bit |= ((*in >> 20) & 0xff) << 16;
+ color->conceal_color_10bit = *in;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
+ break;
+ }
case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
@@ -1279,17 +1291,6 @@ pkt_session_set_property_6xx(struct hfi_session_set_property_pkt *pkt,
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cq);
break;
}
- case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: {
- struct hfi_conceal_color_v4 *color = prop_data;
- u32 *in = pdata;
-
- color->conceal_color_8bit = *in & 0xff;
- color->conceal_color_8bit |= ((*in >> 10) & 0xff) << 8;
- color->conceal_color_8bit |= ((*in >> 20) & 0xff) << 16;
- color->conceal_color_10bit = *in;
- pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
- break;
- }
default:
return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
}
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
index 83705e237f1c..327ed90a2788 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.h
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -68,7 +68,7 @@ struct hfi_sys_release_resource_pkt {
struct hfi_sys_set_property_pkt {
struct hfi_pkt_hdr hdr;
u32 num_properties;
- u32 data[1];
+ u32 data[];
};
struct hfi_sys_get_property_pkt {
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 63cd347a62da..b0a9beb4163c 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -415,9 +415,6 @@
#define HFI_BUFFER_MODE_RING 0x1000002
#define HFI_BUFFER_MODE_DYNAMIC 0x1000003
-#define HFI_VENC_PERFMODE_MAX_QUALITY 0x1
-#define HFI_VENC_PERFMODE_POWER_SAVE 0x2
-
/*
* HFI_PROPERTY_SYS_COMMON_START
* HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000
@@ -848,6 +845,13 @@ struct hfi_framesize {
u32 height;
};
+#define HFI_VENC_PERFMODE_MAX_QUALITY 0x1
+#define HFI_VENC_PERFMODE_POWER_SAVE 0x2
+
+struct hfi_perf_mode {
+ u32 video_perf_mode;
+};
+
#define VIDC_CORE_ID_DEFAULT 0
#define VIDC_CORE_ID_1 1
#define VIDC_CORE_ID_2 2
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index a2d436d407b2..d9fde66f6fa8 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -251,11 +251,11 @@ sys_get_prop_image_version(struct device *dev,
req_bytes = pkt->hdr.size - sizeof(*pkt);
- if (req_bytes < VER_STR_SZ || !pkt->data[1] || pkt->num_properties > 1)
+ if (req_bytes < VER_STR_SZ || !pkt->data[0] || pkt->num_properties > 1)
/* bad packet */
return;
- img_ver = (u8 *)&pkt->data[1];
+ img_ver = pkt->data;
dev_dbg(dev, VDBGL "F/W version: %s\n", img_ver);
@@ -277,7 +277,7 @@ static void hfi_sys_property_info(struct venus_core *core,
return;
}
- switch (pkt->data[0]) {
+ switch (pkt->property) {
case HFI_PROPERTY_SYS_IMAGE_VERSION:
sys_get_prop_image_version(dev, pkt);
break;
@@ -338,7 +338,7 @@ session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt,
/* bad packet */
return HFI_ERR_SESSION_INVALID_PARAMETER;
- hfi = (struct hfi_profile_level *)&pkt->data[1];
+ hfi = (struct hfi_profile_level *)&pkt->data[0];
profile_level->profile = hfi->profile;
profile_level->level = hfi->level;
@@ -355,11 +355,11 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
- if (!req_bytes || req_bytes % sizeof(*buf_req) || !pkt->data[1])
+ if (!req_bytes || req_bytes % sizeof(*buf_req) || !pkt->data[0])
/* bad packet */
return HFI_ERR_SESSION_INVALID_PARAMETER;
- buf_req = (struct hfi_buffer_requirements *)&pkt->data[1];
+ buf_req = (struct hfi_buffer_requirements *)&pkt->data[0];
if (!buf_req)
return HFI_ERR_SESSION_INVALID_PARAMETER;
@@ -391,7 +391,7 @@ static void hfi_session_prop_info(struct venus_core *core,
goto done;
}
- switch (pkt->data[0]) {
+ switch (pkt->property) {
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
memset(hprop->bufreq, 0, sizeof(hprop->bufreq));
error = session_get_prop_buf_req(pkt, hprop->bufreq);
@@ -404,7 +404,7 @@ static void hfi_session_prop_info(struct venus_core *core,
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
break;
default:
- dev_dbg(dev, VDBGM "unknown property id:%x\n", pkt->data[0]);
+ dev_dbg(dev, VDBGM "unknown property id:%x\n", pkt->property);
return;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.h b/drivers/media/platform/qcom/venus/hfi_msgs.h
index 526d9f5b487b..510513697335 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.h
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.h
@@ -113,7 +113,8 @@ struct hfi_msg_sys_ping_ack_pkt {
struct hfi_msg_sys_property_info_pkt {
struct hfi_pkt_hdr hdr;
u32 num_properties;
- u32 data[1];
+ u32 property;
+ u8 data[];
};
struct hfi_msg_session_load_resources_done_pkt {
@@ -233,7 +234,8 @@ struct hfi_msg_session_parse_sequence_header_done_pkt {
struct hfi_msg_session_property_info_pkt {
struct hfi_session_hdr_pkt shdr;
u32 num_properties;
- u32 data[1];
+ u32 property;
+ u8 data[];
};
struct hfi_msg_session_release_resources_done_pkt {
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
index 8f47804e973f..f5b4e1f4764f 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -50,6 +50,22 @@ hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, u32 session
return freq;
}
+unsigned long
+hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_type)
+{
+ const struct hfi_platform *plat;
+ unsigned long freq = 0;
+
+ plat = hfi_platform_get(version);
+ if (!plat)
+ return 0;
+
+ if (plat->codec_lp_freq)
+ freq = plat->codec_lp_freq(session_type, codec);
+
+ return freq;
+}
+
u8 hfi_platform_num_vpp_pipes(enum hfi_version version)
{
const struct hfi_platform *plat;
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h
index 3819bb2b36bd..2dbe608c53af 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.h
+++ b/drivers/media/platform/qcom/venus/hfi_platform.h
@@ -43,11 +43,13 @@ struct hfi_platform_codec_freq_data {
u32 session_type;
unsigned long vpp_freq;
unsigned long vsp_freq;
+ unsigned long low_power_freq;
};
struct hfi_platform {
unsigned long (*codec_vpp_freq)(u32 session_type, u32 codec);
unsigned long (*codec_vsp_freq)(u32 session_type, u32 codec);
+ unsigned long (*codec_lp_freq)(u32 session_type, u32 codec);
void (*codecs)(u32 *enc_codecs, u32 *dec_codecs, u32 *count);
const struct hfi_plat_caps *(*capabilities)(unsigned int *entries);
u8 (*num_vpp_pipes)(void);
@@ -63,5 +65,7 @@ unsigned long hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 code
u32 session_type);
unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec,
u32 session_type);
+unsigned long hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec,
+ u32 session_type);
u8 hfi_platform_num_vpp_pipes(enum hfi_version version);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v4.c b/drivers/media/platform/qcom/venus/hfi_platform_v4.c
index 3848bb6d7408..3f7f5277a50e 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v4.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v4.c
@@ -262,14 +262,14 @@ static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
}
static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
- { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10 },
- { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10 },
- { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10 },
- { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10 },
- { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10 },
- { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10 },
- { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10 },
- { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10, 320 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10, 320 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10, 320 },
+ { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
};
static const struct hfi_platform_codec_freq_data *
@@ -311,9 +311,21 @@ static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
return 0;
}
+static unsigned long codec_lp_freq(u32 session_type, u32 codec)
+{
+ const struct hfi_platform_codec_freq_data *data;
+
+ data = get_codec_freq_data(session_type, codec);
+ if (data)
+ return data->low_power_freq;
+
+ return 0;
+}
+
const struct hfi_platform hfi_plat_v4 = {
.codec_vpp_freq = codec_vpp_freq,
.codec_vsp_freq = codec_vsp_freq,
+ .codec_lp_freq = codec_lp_freq,
.codecs = get_codecs,
.capabilities = get_capabilities,
};
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v6.c b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
index dd1a03911b6c..d8243b22568a 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
@@ -262,14 +262,14 @@ static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
}
static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
- { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 25 },
- { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 25 },
- { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 60 },
- { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 25 },
- { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 25 },
- { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 25 },
- { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 60 },
- { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 60 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 25, 320 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 25, 320 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 60, 320 },
+ { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 25, 200 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 25, 200 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 25, 200 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 60, 200 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 60, 200 },
};
static const struct hfi_platform_codec_freq_data *
@@ -311,6 +311,17 @@ static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
return 0;
}
+static unsigned long codec_lp_freq(u32 session_type, u32 codec)
+{
+ const struct hfi_platform_codec_freq_data *data;
+
+ data = get_codec_freq_data(session_type, codec);
+ if (data)
+ return data->low_power_freq;
+
+ return 0;
+}
+
static u8 num_vpp_pipes(void)
{
return 4;
@@ -319,6 +330,7 @@ static u8 num_vpp_pipes(void)
const struct hfi_platform hfi_plat_v6 = {
.codec_vpp_freq = codec_vpp_freq,
.codec_vsp_freq = codec_vsp_freq,
+ .codec_lp_freq = codec_lp_freq,
.codecs = get_codecs,
.capabilities = get_capabilities,
.num_vpp_pipes = num_vpp_pipes,
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index c7e1ebec47ee..3e2345eb47f7 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -300,16 +300,15 @@ static int core_get_v1(struct venus_core *core)
if (ret)
return ret;
- core->opp_table = dev_pm_opp_set_clkname(core->dev, "core");
- if (IS_ERR(core->opp_table))
- return PTR_ERR(core->opp_table);
+ ret = devm_pm_opp_set_clkname(core->dev, "core");
+ if (ret)
+ return ret;
return 0;
}
static void core_put_v1(struct venus_core *core)
{
- dev_pm_opp_put_clkname(core->opp_table);
}
static int core_power_v1(struct venus_core *core, int on)
@@ -524,8 +523,50 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
return 0;
}
+static inline int power_save_mode_enable(struct venus_inst *inst,
+ bool enable)
+{
+ struct venc_controls *enc_ctr = &inst->controls.enc;
+ const u32 ptype = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
+ u32 venc_mode;
+ int ret = 0;
+
+ if (inst->session_type != VIDC_SESSION_TYPE_ENC)
+ return 0;
+
+ if (enc_ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+ enable = false;
+
+ venc_mode = enable ? HFI_VENC_PERFMODE_POWER_SAVE :
+ HFI_VENC_PERFMODE_MAX_QUALITY;
+
+ ret = hfi_session_set_property(inst, ptype, &venc_mode);
+ if (ret)
+ return ret;
+
+ inst->flags = enable ? inst->flags | VENUS_LOW_POWER :
+ inst->flags & ~VENUS_LOW_POWER;
+
+ return ret;
+}
+
+static int move_core_to_power_save_mode(struct venus_core *core,
+ u32 core_id)
+{
+ struct venus_inst *inst = NULL;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->clk_data.core_id == core_id &&
+ inst->session_type == VIDC_SESSION_TYPE_ENC)
+ power_save_mode_enable(inst, true);
+ }
+ mutex_unlock(&core->lock);
+ return 0;
+}
+
static void
-min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load)
+min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load, bool low_power)
{
u32 mbs_per_sec, load, core1_load = 0, core2_load = 0;
u32 cores_max = core_num_max(inst);
@@ -543,7 +584,14 @@ min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load)
if (inst_pos->state != INST_START)
continue;
- vpp_freq = inst_pos->clk_data.vpp_freq;
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC)
+ vpp_freq = inst_pos->clk_data.vpp_freq;
+ else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ vpp_freq = low_power ? inst_pos->clk_data.vpp_freq :
+ inst_pos->clk_data.low_power_freq;
+ else
+ continue;
+
coreid = inst_pos->clk_data.core_id;
mbs_per_sec = load_per_instance(inst_pos);
@@ -575,9 +623,11 @@ static int decide_core(struct venus_inst *inst)
{
const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
struct venus_core *core = inst->core;
- u32 min_coreid, min_load, inst_load;
+ u32 min_coreid, min_load, cur_inst_load;
+ u32 min_lp_coreid, min_lp_load, cur_inst_lp_load;
struct hfi_videocores_usage_type cu;
unsigned long max_freq;
+ int ret = 0;
if (legacy_binding) {
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
@@ -591,23 +641,43 @@ static int decide_core(struct venus_inst *inst)
if (inst->clk_data.core_id != VIDC_CORE_ID_DEFAULT)
return 0;
- inst_load = load_per_instance(inst);
- inst_load *= inst->clk_data.vpp_freq;
- max_freq = core->res->freq_tbl[0].freq;
+ cur_inst_load = load_per_instance(inst);
+ cur_inst_load *= inst->clk_data.vpp_freq;
+ /*TODO : divide this inst->load by work_route */
- min_loaded_core(inst, &min_coreid, &min_load);
+ cur_inst_lp_load = load_per_instance(inst);
+ cur_inst_lp_load *= inst->clk_data.low_power_freq;
+ /*TODO : divide this inst->load by work_route */
- if ((inst_load + min_load) > max_freq) {
- dev_warn(core->dev, "HW is overloaded, needed: %u max: %lu\n",
- inst_load, max_freq);
+ max_freq = core->res->freq_tbl[0].freq;
+
+ min_loaded_core(inst, &min_coreid, &min_load, false);
+ min_loaded_core(inst, &min_lp_coreid, &min_lp_load, true);
+
+ if (cur_inst_load + min_load <= max_freq) {
+ inst->clk_data.core_id = min_coreid;
+ cu.video_core_enable_mask = min_coreid;
+ } else if (cur_inst_lp_load + min_load <= max_freq) {
+ /* Move current instance to LP and return */
+ inst->clk_data.core_id = min_coreid;
+ cu.video_core_enable_mask = min_coreid;
+ power_save_mode_enable(inst, true);
+ } else if (cur_inst_lp_load + min_lp_load <= max_freq) {
+ /* Move all instances to LP mode and return */
+ inst->clk_data.core_id = min_lp_coreid;
+ cu.video_core_enable_mask = min_lp_coreid;
+ move_core_to_power_save_mode(core, min_lp_coreid);
+ } else {
+ dev_warn(core->dev, "HW can't support this load");
return -EINVAL;
}
- inst->clk_data.core_id = min_coreid;
- cu.video_core_enable_mask = min_coreid;
-
done:
- return hfi_session_set_property(inst, ptype, &cu);
+ ret = hfi_session_set_property(inst, ptype, &cu);
+ if (ret)
+ return ret;
+
+ return ret;
}
static int acquire_core(struct venus_inst *inst)
@@ -788,7 +858,6 @@ static int venc_power_v4(struct device *dev, int on)
static int vcodec_domains_get(struct venus_core *core)
{
int ret;
- struct opp_table *opp_table;
struct device **opp_virt_dev;
struct device *dev = core->dev;
const struct venus_resources *res = core->res;
@@ -811,11 +880,9 @@ skip_pmdomains:
return 0;
/* Attach the power domain for setting performance state */
- opp_table = dev_pm_opp_attach_genpd(dev, res->opp_pmdomain, &opp_virt_dev);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
+ ret = devm_pm_opp_attach_genpd(dev, res->opp_pmdomain, &opp_virt_dev);
+ if (ret)
goto opp_attach_err;
- }
core->opp_pmdomain = *opp_virt_dev;
core->opp_dl_venus = device_link_add(dev, core->opp_pmdomain,
@@ -824,13 +891,11 @@ skip_pmdomains:
DL_FLAG_STATELESS);
if (!core->opp_dl_venus) {
ret = -ENODEV;
- goto opp_dl_add_err;
+ goto opp_attach_err;
}
return 0;
-opp_dl_add_err:
- dev_pm_opp_detach_genpd(core->opp_table);
opp_attach_err:
for (i = 0; i < res->vcodec_pmdomains_num; i++) {
if (IS_ERR_OR_NULL(core->pmdomains[i]))
@@ -861,8 +926,6 @@ skip_pmdomains:
if (core->opp_dl_venus)
device_link_del(core->opp_dl_venus);
-
- dev_pm_opp_detach_genpd(core->opp_table);
}
static int core_resets_reset(struct venus_core *core)
@@ -941,45 +1004,33 @@ static int core_get_v4(struct venus_core *core)
if (legacy_binding)
return 0;
- core->opp_table = dev_pm_opp_set_clkname(dev, "core");
- if (IS_ERR(core->opp_table))
- return PTR_ERR(core->opp_table);
+ ret = devm_pm_opp_set_clkname(dev, "core");
+ if (ret)
+ return ret;
if (core->res->opp_pmdomain) {
- ret = dev_pm_opp_of_add_table(dev);
+ ret = devm_pm_opp_of_add_table(dev);
if (!ret) {
core->has_opp_table = true;
} else if (ret != -ENODEV) {
dev_err(dev, "invalid OPP table in device tree\n");
- dev_pm_opp_put_clkname(core->opp_table);
return ret;
}
}
ret = vcodec_domains_get(core);
- if (ret) {
- if (core->has_opp_table)
- dev_pm_opp_of_remove_table(dev);
- dev_pm_opp_put_clkname(core->opp_table);
+ if (ret)
return ret;
- }
return 0;
}
static void core_put_v4(struct venus_core *core)
{
- struct device *dev = core->dev;
-
if (legacy_binding)
return;
vcodec_domains_put(core);
-
- if (core->has_opp_table)
- dev_pm_opp_of_remove_table(dev);
- dev_pm_opp_put_clkname(core->opp_table);
-
}
static int core_power_v4(struct venus_core *core, int on)
@@ -990,9 +1041,8 @@ static int core_power_v4(struct venus_core *core, int on)
if (on == POWER_ON) {
if (pmctrl) {
- ret = pm_runtime_get_sync(pmctrl);
+ ret = pm_runtime_resume_and_get(pmctrl);
if (ret < 0) {
- pm_runtime_put_noidle(pmctrl);
return ret;
}
}
@@ -1026,7 +1076,7 @@ static int core_power_v4(struct venus_core *core, int on)
static unsigned long calculate_inst_freq(struct venus_inst *inst,
unsigned long filled_len)
{
- unsigned long vpp_freq = 0, vsp_freq = 0;
+ unsigned long vpp_freq_per_mb = 0, vpp_freq = 0, vsp_freq = 0;
u32 fps = (u32)inst->fps;
u32 mbs_per_sec;
@@ -1035,7 +1085,12 @@ static unsigned long calculate_inst_freq(struct venus_inst *inst,
if (inst->state != INST_START)
return 0;
- vpp_freq = mbs_per_sec * inst->clk_data.vpp_freq;
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ vpp_freq_per_mb = inst->flags & VENUS_LOW_POWER ?
+ inst->clk_data.low_power_freq :
+ inst->clk_data.vpp_freq;
+
+ vpp_freq = mbs_per_sec * vpp_freq_per_mb;
/* 21 / 20 is overhead factor */
vpp_freq += vpp_freq / 20;
vsp_freq = mbs_per_sec * inst->clk_data.vsp_freq;
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index ddb7cd39424e..198e47eb63f4 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -568,10 +568,10 @@ static int vdec_pm_get(struct venus_inst *inst)
int ret;
mutex_lock(&core->pm_lock);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
mutex_unlock(&core->pm_lock);
- return ret < 0 ? ret : 0;
+ return ret;
}
static int vdec_pm_put(struct venus_inst *inst, bool autosuspend)
@@ -601,7 +601,7 @@ static int vdec_pm_get_put(struct venus_inst *inst)
mutex_lock(&core->pm_lock);
if (pm_runtime_suspended(dev)) {
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 4a7291f934b6..8dd49d4f124c 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -1205,9 +1205,9 @@ static int venc_open(struct file *file)
venus_helper_init_instance(inst);
- ret = pm_runtime_get_sync(core->dev_enc);
+ ret = pm_runtime_resume_and_get(core->dev_enc);
if (ret < 0)
- goto err_put_sync;
+ goto err_free;
ret = venc_ctrl_init(inst);
if (ret)
@@ -1252,6 +1252,7 @@ err_ctrl_deinit:
venc_ctrl_deinit(inst);
err_put_sync:
pm_runtime_put_sync(core->dev_enc);
+err_free:
kfree(inst);
return ret;
}
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 5c03318ae07b..eb59a3ba6d0f 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -96,18 +96,10 @@ EXPORT_SYMBOL_GPL(rcar_fcp_get_device);
*/
int rcar_fcp_enable(struct rcar_fcp_device *fcp)
{
- int ret;
-
if (!fcp)
return 0;
- ret = pm_runtime_get_sync(fcp->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(fcp->dev);
- return ret;
- }
-
- return 0;
+ return pm_runtime_resume_and_get(fcp->dev);
}
EXPORT_SYMBOL_GPL(rcar_fcp_enable);
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index cb3025992817..33957cc9118c 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -1363,6 +1363,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a7796,
},
{
+ .compatible = "renesas,vin-r8a77961",
+ .data = &rcar_info_r8a7796,
+ },
+ {
.compatible = "renesas,vin-r8a77965",
.data = &rcar_info_r8a77965,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index e06cd512aba2..e28eff039688 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -320,10 +320,12 @@ static const struct rcar_csi2_format rcar_csi2_formats[] = {
{ .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .bpp = 16 },
{ .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .bpp = 16 },
{ .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .bpp = 20 },
+ { .code = MEDIA_BUS_FMT_Y10_1X10, .datatype = 0x2b, .bpp = 10 },
{ .code = MEDIA_BUS_FMT_SBGGR8_1X8, .datatype = 0x2a, .bpp = 8 },
{ .code = MEDIA_BUS_FMT_SGBRG8_1X8, .datatype = 0x2a, .bpp = 8 },
{ .code = MEDIA_BUS_FMT_SGRBG8_1X8, .datatype = 0x2a, .bpp = 8 },
{ .code = MEDIA_BUS_FMT_SRGGB8_1X8, .datatype = 0x2a, .bpp = 8 },
+ { .code = MEDIA_BUS_FMT_Y8_1X8, .datatype = 0x2a, .bpp = 8 },
};
static const struct rcar_csi2_format *rcsi2_code_to_fmt(unsigned int code)
@@ -406,10 +408,17 @@ static void rcsi2_enter_standby(struct rcar_csi2 *priv)
pm_runtime_put(priv->dev);
}
-static void rcsi2_exit_standby(struct rcar_csi2 *priv)
+static int rcsi2_exit_standby(struct rcar_csi2 *priv)
{
- pm_runtime_get_sync(priv->dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret < 0)
+ return ret;
+
reset_control_deassert(priv->rstc);
+
+ return 0;
}
static int rcsi2_wait_phy_start(struct rcar_csi2 *priv,
@@ -657,7 +666,9 @@ static int rcsi2_start(struct rcar_csi2 *priv)
{
int ret;
- rcsi2_exit_standby(priv);
+ ret = rcsi2_exit_standby(priv);
+ if (ret < 0)
+ return ret;
ret = rcsi2_start_receiver(priv);
if (ret) {
@@ -708,7 +719,7 @@ out:
}
static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
@@ -720,7 +731,7 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
priv->mf = format->format;
} else {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
*framefmt = format->format;
}
@@ -728,7 +739,7 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
}
static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
@@ -736,7 +747,7 @@ static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
format->format = priv->mf;
else
- format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
return 0;
}
@@ -1112,6 +1123,11 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a7796 = {
.num_channels = 4,
};
+static const struct rcar_csi2_info rcar_csi2_info_r8a77961 = {
+ .hsfreqrange = hsfreqrange_m3w_h3es1,
+ .num_channels = 4,
+};
+
static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
.init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
.hsfreqrange = hsfreqrange_h3_v3h_m3n,
@@ -1165,6 +1181,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.data = &rcar_csi2_info_r8a7796,
},
{
+ .compatible = "renesas,r8a77961-csi2",
+ .data = &rcar_csi2_info_r8a77961,
+ },
+ {
.compatible = "renesas,r8a77965-csi2",
.data = &rcar_csi2_info_r8a77965,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index f30dafbdf61c..f5f722ab1d4e 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -1458,11 +1458,9 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
u32 vnmc;
int ret;
- ret = pm_runtime_get_sync(vin->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(vin->dev);
+ ret = pm_runtime_resume_and_get(vin->dev);
+ if (ret < 0)
return ret;
- }
/* Make register writes take effect immediately. */
vnmc = rvin_read(vin, VNMC_REG);
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 457a65bf6b66..cca15a10c0b3 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -243,7 +243,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
struct v4l2_rect *src_rect)
{
struct v4l2_subdev *sd = vin_to_source(vin);
- struct v4l2_subdev_pad_config *pad_cfg;
+ struct v4l2_subdev_state *sd_state;
struct v4l2_subdev_format format = {
.which = which,
.pad = vin->parallel.source_pad,
@@ -252,8 +252,8 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
u32 width, height;
int ret;
- pad_cfg = v4l2_subdev_alloc_pad_config(sd);
- if (pad_cfg == NULL)
+ sd_state = v4l2_subdev_alloc_state(sd);
+ if (sd_state == NULL)
return -ENOMEM;
if (!rvin_format_from_pixel(vin, pix->pixelformat))
@@ -266,7 +266,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
width = pix->width;
height = pix->height;
- ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
+ ret = v4l2_subdev_call(sd, pad, set_fmt, sd_state, &format);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto done;
ret = 0;
@@ -288,7 +288,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
rvin_format_align(vin, pix);
done:
- v4l2_subdev_free_pad_config(pad_cfg);
+ v4l2_subdev_free_state(sd_state);
return ret;
}
@@ -870,11 +870,9 @@ static int rvin_open(struct file *file)
struct rvin_dev *vin = video_drvdata(file);
int ret;
- ret = pm_runtime_get_sync(vin->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(vin->dev);
+ ret = pm_runtime_resume_and_get(vin->dev);
+ if (ret < 0)
return ret;
- }
ret = mutex_lock_interruptible(&vin->lock);
if (ret)
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 01c1fbb97bf6..89aac60066d9 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2117,9 +2117,7 @@ static int fdp1_open(struct file *file)
if (ctx->hdl.error) {
ret = ctx->hdl.error;
- v4l2_ctrl_handler_free(&ctx->hdl);
- kfree(ctx);
- goto done;
+ goto error_ctx;
}
ctx->fh.ctrl_handler = &ctx->hdl;
@@ -2133,20 +2131,27 @@ static int fdp1_open(struct file *file)
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
-
- v4l2_ctrl_handler_free(&ctx->hdl);
- kfree(ctx);
- goto done;
+ goto error_ctx;
}
/* Perform any power management required */
- pm_runtime_get_sync(fdp1->dev);
+ ret = pm_runtime_resume_and_get(fdp1->dev);
+ if (ret < 0)
+ goto error_pm;
v4l2_fh_add(&ctx->fh);
dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
+ mutex_unlock(&fdp1->dev_mutex);
+ return 0;
+
+error_pm:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+error_ctx:
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
done:
mutex_unlock(&fdp1->dev_mutex);
return ret;
@@ -2351,7 +2356,9 @@ static int fdp1_probe(struct platform_device *pdev)
/* Power up the cells to read HW */
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(fdp1->dev);
+ ret = pm_runtime_resume_and_get(fdp1->dev);
+ if (ret < 0)
+ goto disable_pm;
hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
switch (hw_version) {
@@ -2380,6 +2387,9 @@ static int fdp1_probe(struct platform_device *pdev)
return 0;
+disable_pm:
+ pm_runtime_disable(fdp1->dev);
+
release_m2m:
v4l2_m2m_release(fdp1->m2m_dev);
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index a7c198c17deb..f57158bf2b11 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -42,7 +42,7 @@
/*
* Align JPEG header end to cache line to make sure we will not have any issues
- * with cache; additionally to requerment (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ * with cache; additionally to requirement (33.3.27 R01UH0501EJ0100 Rev.1.00)
*/
#define JPU_JPEG_HDR_SIZE (ALIGN(0x258, L1_CACHE_BYTES))
#define JPU_JPEG_MAX_BYTES_PER_PIXEL 2 /* 16 bit precision format */
@@ -121,7 +121,7 @@
#define JCCMD_JEND (1 << 2)
#define JCCMD_JSRT (1 << 0)
-/* JPEG code quantanization table number register */
+/* JPEG code quantization table number register */
#define JCQTN 0x0c
#define JCQTN_SHIFT(t) (((t) - 1) << 1)
@@ -1644,7 +1644,7 @@ static int jpu_probe(struct platform_device *pdev)
goto device_register_rollback;
}
- /* fill in qantization and Huffman tables for encoder */
+ /* fill in quantization and Huffman tables for encoder */
for (i = 0; i < JPU_MAX_QUALITY; i++)
jpu_generate_hdr(i, (unsigned char *)jpeg_hdrs[i]);
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index cd137101d41e..f432032c7084 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -794,6 +794,9 @@ static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt
struct v4l2_pix_format_mplane *pix = &v4l2_fmt->fmt.pix_mp;
struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
const struct ceu_fmt *ceu_fmt;
u32 mbus_code_old;
u32 mbus_code;
@@ -850,13 +853,13 @@ static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt
* time.
*/
sd_format.format.code = mbus_code;
- ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_cfg, &sd_format);
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_state, &sd_format);
if (ret) {
if (ret == -EINVAL) {
/* fallback */
sd_format.format.code = mbus_code_old;
ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt,
- &pad_cfg, &sd_format);
+ &pad_state, &sd_format);
}
if (ret)
@@ -1099,10 +1102,10 @@ static int ceu_open(struct file *file)
mutex_lock(&ceudev->mlock);
/* Causes soft-reset and sensor power on on first open */
- pm_runtime_get_sync(ceudev->dev);
+ ret = pm_runtime_resume_and_get(ceudev->dev);
mutex_unlock(&ceudev->mlock);
- return 0;
+ return ret;
}
static int ceu_release(struct file *file)
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index bf9a75b75083..81508ed5abf3 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -79,9 +79,8 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
struct rockchip_rga *rga = ctx->rga;
int ret;
- ret = pm_runtime_get_sync(rga->dev);
+ ret = pm_runtime_resume_and_get(rga->dev);
if (ret < 0) {
- pm_runtime_put_noidle(rga->dev);
rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
return ret;
}
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 9d122429706e..bf3fd71ec3af 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -866,7 +866,9 @@ static int rga_probe(struct platform_device *pdev)
goto unreg_video_dev;
}
- pm_runtime_get_sync(rga->dev);
+ ret = pm_runtime_resume_and_get(rga->dev);
+ if (ret < 0)
+ goto unreg_video_dev;
rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 5f6c9d1623e4..60cd2200e7ae 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -830,8 +830,8 @@ static void rkisp1_return_all_buffers(struct rkisp1_capture *cap,
}
/*
- * Most of registers inside rockchip ISP1 have shadow register since
- * they must be not be changed during processing a frame.
+ * Most registers inside the rockchip ISP1 have shadow register since
+ * they must not be changed while processing a frame.
* Usually, each sub-module updates its shadow register after
* processing the last pixel of a frame.
*/
@@ -847,14 +847,14 @@ static void rkisp1_cap_stream_enable(struct rkisp1_capture *cap)
spin_lock_irq(&cap->buf.lock);
rkisp1_set_next_buf(cap);
cap->ops->enable(cap);
- /* It's safe to config ACTIVE and SHADOW regs for the
+ /* It's safe to configure ACTIVE and SHADOW registers for the
* first stream. While when the second is starting, do NOT
- * force update because it also update the first one.
+ * force update because it also updates the first one.
*
- * The latter case would drop one more buf(that is 2) since
- * there's not buf in shadow when the second FE received. This's
- * also required because the second FE maybe corrupt especially
- * when run at 120fps.
+ * The latter case would drop one more buffer(that is 2) since
+ * there's no buffer in a shadow register when the second FE received.
+ * This's also required because the second FE maybe corrupt
+ * especially when run at 120fps.
*/
if (!other->is_streaming) {
/* force cfg update */
@@ -1003,9 +1003,8 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
if (ret)
goto err_pipeline_stop;
- ret = pm_runtime_get_sync(cap->rkisp1->dev);
+ ret = pm_runtime_resume_and_get(cap->rkisp1->dev);
if (ret < 0) {
- pm_runtime_put_noidle(cap->rkisp1->dev);
dev_err(cap->rkisp1->dev, "power up failed %d\n", ret);
goto err_destroy_dummy;
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 2e5b57e3aedc..d596bc040005 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -208,24 +208,30 @@ static struct v4l2_subdev *rkisp1_get_remote_sensor(struct v4l2_subdev *sd)
static struct v4l2_mbus_framefmt *
rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
+ struct v4l2_subdev_state state = {
+ .pads = isp->pad_cfg
+ };
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&isp->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&isp->sd, sd_state, pad);
else
- return v4l2_subdev_get_try_format(&isp->sd, isp->pad_cfg, pad);
+ return v4l2_subdev_get_try_format(&isp->sd, &state, pad);
}
static struct v4l2_rect *
rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
+ struct v4l2_subdev_state state = {
+ .pads = isp->pad_cfg
+ };
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&isp->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&isp->sd, sd_state, pad);
else
- return v4l2_subdev_get_try_crop(&isp->sd, isp->pad_cfg, pad);
+ return v4l2_subdev_get_try_crop(&isp->sd, &state, pad);
}
/* ----------------------------------------------------------------------------
@@ -561,7 +567,7 @@ static void rkisp1_isp_start(struct rkisp1_device *rkisp1)
*/
static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
unsigned int i, dir;
@@ -601,7 +607,7 @@ static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
}
static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct rkisp1_isp_mbus_info *mbus_info;
@@ -634,37 +640,37 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
}
static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop, *src_crop;
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
- sink_crop = v4l2_subdev_get_try_crop(sd, cfg,
+ sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
- src_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_fmt = *sink_fmt;
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
- src_crop = v4l2_subdev_get_try_crop(sd, cfg,
+ src_crop = v4l2_subdev_get_try_crop(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_crop = *sink_crop;
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SINK_PARAMS);
- src_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_STATS);
sink_fmt->width = 0;
sink_fmt->height = 0;
@@ -676,7 +682,7 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
}
static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
@@ -684,9 +690,9 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
struct v4l2_mbus_framefmt *src_fmt;
const struct v4l2_rect *src_crop;
- src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg,
+ src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
src_fmt->code = format->code;
@@ -717,17 +723,17 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
}
static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r, unsigned int which)
{
struct v4l2_mbus_framefmt *src_fmt;
const struct v4l2_rect *sink_crop;
struct v4l2_rect *src_crop;
- src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO,
which);
- sink_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
which);
@@ -740,21 +746,23 @@ static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
*r = *src_crop;
/* Propagate to out format */
- src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg,
+ src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- rkisp1_isp_set_src_fmt(isp, cfg, src_fmt, which);
+ rkisp1_isp_set_src_fmt(isp, sd_state, src_fmt, which);
}
static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r, unsigned int which)
{
struct v4l2_rect *sink_crop, *src_crop;
struct v4l2_mbus_framefmt *sink_fmt;
- sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
which);
- sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
which);
sink_crop->left = ALIGN(r->left, 2);
@@ -766,13 +774,13 @@ static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
*r = *sink_crop;
/* Propagate to out crop */
- src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- rkisp1_isp_set_src_crop(isp, cfg, src_crop, which);
+ rkisp1_isp_set_src_crop(isp, sd_state, src_crop, which);
}
static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
@@ -780,7 +788,8 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
which);
sink_fmt->code = format->code;
mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
@@ -801,36 +810,40 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
*format = *sink_fmt;
/* Propagate to in crop */
- sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
which);
- rkisp1_isp_set_sink_crop(isp, cfg, sink_crop, which);
+ rkisp1_isp_set_sink_crop(isp, sd_state, sink_crop, which);
}
static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
mutex_lock(&isp->ops_lock);
- fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, fmt->which);
+ fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
+ fmt->which);
mutex_unlock(&isp->ops_lock);
return 0;
}
static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
mutex_lock(&isp->ops_lock);
if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
- rkisp1_isp_set_sink_fmt(isp, cfg, &fmt->format, fmt->which);
+ rkisp1_isp_set_sink_fmt(isp, sd_state, &fmt->format,
+ fmt->which);
else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
- rkisp1_isp_set_src_fmt(isp, cfg, &fmt->format, fmt->which);
+ rkisp1_isp_set_src_fmt(isp, sd_state, &fmt->format,
+ fmt->which);
else
- fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad,
+ fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
fmt->which);
mutex_unlock(&isp->ops_lock);
@@ -838,7 +851,7 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
}
static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
@@ -854,20 +867,20 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
struct v4l2_mbus_framefmt *fmt;
- fmt = rkisp1_isp_get_pad_fmt(isp, cfg, sel->pad,
+ fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, sel->pad,
sel->which);
sel->r.height = fmt->height;
sel->r.width = fmt->width;
sel->r.left = 0;
sel->r.top = 0;
} else {
- sel->r = *rkisp1_isp_get_pad_crop(isp, cfg,
- RKISP1_ISP_PAD_SINK_VIDEO,
- sel->which);
+ sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ sel->which);
}
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *rkisp1_isp_get_pad_crop(isp, cfg, sel->pad,
+ sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state, sel->pad,
sel->which);
break;
default:
@@ -878,7 +891,7 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
}
static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_device *rkisp1 =
@@ -893,9 +906,9 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
mutex_lock(&isp->ops_lock);
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
- rkisp1_isp_set_sink_crop(isp, cfg, &sel->r, sel->which);
+ rkisp1_isp_set_sink_crop(isp, sd_state, &sel->r, sel->which);
else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
- rkisp1_isp_set_src_crop(isp, cfg, &sel->r, sel->which);
+ rkisp1_isp_set_src_crop(isp, sd_state, &sel->r, sel->which);
else
ret = -EINVAL;
@@ -1037,6 +1050,9 @@ static const struct v4l2_subdev_ops rkisp1_isp_ops = {
int rkisp1_isp_register(struct rkisp1_device *rkisp1)
{
+ struct v4l2_subdev_state state = {
+ .pads = rkisp1->isp.pad_cfg
+ };
struct rkisp1_isp *isp = &rkisp1->isp;
struct media_pad *pads = isp->pads;
struct v4l2_subdev *sd = &isp->sd;
@@ -1069,7 +1085,7 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1)
goto err_cleanup_media_entity;
}
- rkisp1_isp_init_config(sd, rkisp1->isp.pad_cfg);
+ rkisp1_isp_init_config(sd, &state);
return 0;
err_cleanup_media_entity:
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index b6beddd988d0..529c6e21815f 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -1258,7 +1258,10 @@ void rkisp1_params_configure(struct rkisp1_params *params,
rkisp1_params_config_parameter(params);
}
-/* Not called when the camera active, thus not isr protection. */
+/*
+ * Not called when the camera is active, therefore there is no need to acquire
+ * a lock.
+ */
void rkisp1_params_disable(struct rkisp1_params *params)
{
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index 79deed8adcea..2070f4b06705 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -180,24 +180,30 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
static struct v4l2_mbus_framefmt *
rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
+ struct v4l2_subdev_state state = {
+ .pads = rsz->pad_cfg
+ };
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&rsz->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&rsz->sd, sd_state, pad);
else
- return v4l2_subdev_get_try_format(&rsz->sd, rsz->pad_cfg, pad);
+ return v4l2_subdev_get_try_format(&rsz->sd, &state, pad);
}
static struct v4l2_rect *
rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
+ struct v4l2_subdev_state state = {
+ .pads = rsz->pad_cfg
+ };
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&rsz->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&rsz->sd, sd_state, pad);
else
- return v4l2_subdev_get_try_crop(&rsz->sd, rsz->pad_cfg, pad);
+ return v4l2_subdev_get_try_crop(&rsz->sd, &state, pad);
}
/* ----------------------------------------------------------------------------
@@ -451,12 +457,15 @@ static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
*/
static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
struct v4l2_subdev_pad_config dummy_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &dummy_cfg
+ };
u32 pad = code->pad;
int ret;
@@ -481,7 +490,7 @@ static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
/* supported mbus codes on the sink pad are the same as isp src pad */
code->pad = RKISP1_ISP_PAD_SOURCE_VIDEO;
ret = v4l2_subdev_call(&rsz->rkisp1->isp.sd, pad, enum_mbus_code,
- &dummy_cfg, code);
+ &pad_state, code);
/* restore pad */
code->pad = pad;
@@ -490,24 +499,27 @@ static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
}
static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, RKISP1_RSZ_PAD_SRC);
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ RKISP1_RSZ_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_DEF_FMT;
- sink_crop = v4l2_subdev_get_try_crop(sd, cfg, RKISP1_RSZ_PAD_SINK);
+ sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
- src_fmt = v4l2_subdev_get_try_format(sd, cfg, RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
*src_fmt = *sink_fmt;
/* NOTE: there is no crop in the source pad, only in the sink */
@@ -516,15 +528,17 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
}
static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
const struct rkisp1_isp_mbus_info *sink_mbus_info;
struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
- src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
+ which);
+ src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
+ which);
sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
/* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
@@ -543,7 +557,7 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
}
static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r,
unsigned int which)
{
@@ -551,8 +565,10 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
- sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
+ which);
+ sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
+ RKISP1_RSZ_PAD_SINK,
which);
/* Not crop for MP bayer raw data */
@@ -579,7 +595,7 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
}
static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
@@ -587,9 +603,12 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
- src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
- sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
+ which);
+ src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
+ which);
+ sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
+ RKISP1_RSZ_PAD_SINK,
which);
if (rsz->id == RKISP1_SELFPATH)
sink_fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
@@ -617,24 +636,25 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
*format = *sink_fmt;
/* Update sink crop */
- rkisp1_rsz_set_sink_crop(rsz, cfg, sink_crop, which);
+ rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop, which);
}
static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
mutex_lock(&rsz->ops_lock);
- fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, cfg, fmt->pad, fmt->which);
+ fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, sd_state, fmt->pad,
+ fmt->which);
mutex_unlock(&rsz->ops_lock);
return 0;
}
static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_resizer *rsz =
@@ -642,16 +662,18 @@ static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&rsz->ops_lock);
if (fmt->pad == RKISP1_RSZ_PAD_SINK)
- rkisp1_rsz_set_sink_fmt(rsz, cfg, &fmt->format, fmt->which);
+ rkisp1_rsz_set_sink_fmt(rsz, sd_state, &fmt->format,
+ fmt->which);
else
- rkisp1_rsz_set_src_fmt(rsz, cfg, &fmt->format, fmt->which);
+ rkisp1_rsz_set_src_fmt(rsz, sd_state, &fmt->format,
+ fmt->which);
mutex_unlock(&rsz->ops_lock);
return 0;
}
static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_resizer *rsz =
@@ -665,7 +687,8 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
mutex_lock(&rsz->ops_lock);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
- mf_sink = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ mf_sink = rkisp1_rsz_get_pad_fmt(rsz, sd_state,
+ RKISP1_RSZ_PAD_SINK,
sel->which);
sel->r.height = mf_sink->height;
sel->r.width = mf_sink->width;
@@ -673,7 +696,8 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
sel->r.top = 0;
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ sel->r = *rkisp1_rsz_get_pad_crop(rsz, sd_state,
+ RKISP1_RSZ_PAD_SINK,
sel->which);
break;
default:
@@ -685,7 +709,7 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
}
static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_resizer *rsz =
@@ -698,7 +722,7 @@ static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
mutex_lock(&rsz->ops_lock);
- rkisp1_rsz_set_sink_crop(rsz, cfg, &sel->r, sel->which);
+ rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r, sel->which);
mutex_unlock(&rsz->ops_lock);
return 0;
@@ -764,6 +788,9 @@ static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
{
+ struct v4l2_subdev_state state = {
+ .pads = rsz->pad_cfg
+ };
static const char * const dev_names[] = {
RKISP1_RSZ_MP_DEV_NAME,
RKISP1_RSZ_SP_DEV_NAME
@@ -802,7 +829,7 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
goto err_cleanup_media_entity;
}
- rkisp1_rsz_init_config(sd, rsz->pad_cfg);
+ rkisp1_rsz_init_config(sd, &state);
return 0;
err_cleanup_media_entity:
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 9ca49af29542..140854ab4dd8 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -547,7 +547,7 @@ static int s3c_camif_open(struct file *file)
if (ret < 0)
goto unlock;
- ret = pm_runtime_get_sync(camif->dev);
+ ret = pm_runtime_resume_and_get(camif->dev);
if (ret < 0)
goto err_pm;
@@ -1199,7 +1199,7 @@ static const u32 camif_mbus_formats[] = {
*/
static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(camif_mbus_formats))
@@ -1210,14 +1210,14 @@ static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1278,7 +1278,7 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
}
static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1306,7 +1306,7 @@ static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
__camif_subdev_try_format(camif, mf, fmt->pad);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
mutex_unlock(&camif->lock);
return 0;
@@ -1345,7 +1345,7 @@ static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1358,7 +1358,7 @@ static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
return 0;
}
@@ -1432,7 +1432,7 @@ static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
}
static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1446,7 +1446,7 @@ static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
__camif_try_crop(camif, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_crop(sd, cfg, sel->pad) = sel->r;
+ *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad) = sel->r;
} else {
unsigned long flags;
unsigned int i;
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 4c3c00d59c92..e1d51fd3e700 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -460,9 +460,9 @@ static int s3c_camif_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- goto err_pm;
+ goto err_disable;
ret = camif_media_dev_init(camif);
if (ret < 0)
@@ -502,6 +502,7 @@ err_sens:
camif_unregister_media_entities(camif);
err_pm:
pm_runtime_put(dev);
+err_disable:
pm_runtime_disable(dev);
camif_clk_put(camif);
err_clk:
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 15bcb7f6e113..1cb5eaabf340 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -276,6 +276,9 @@ static int g2d_release(struct file *file)
struct g2d_dev *dev = video_drvdata(file);
struct g2d_ctx *ctx = fh2ctx(file->private_data);
+ mutex_lock(&dev->mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 026111505f5a..d402e456f27d 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2566,11 +2566,8 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
- int ret;
-
- ret = pm_runtime_get_sync(ctx->jpeg->dev);
- return ret > 0 ? 0 : ret;
+ return pm_runtime_resume_and_get(ctx->jpeg->dev);
}
static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index a92a9ca6e87e..c1d3bda8385b 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -172,6 +172,7 @@ static struct mfc_control controls[] = {
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0,
.maximum = 16383,
+ .step = 1,
.default_value = 0,
},
{
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 62d2320a7218..88b7d33c9197 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -78,11 +78,9 @@ int s5p_mfc_power_on(void)
{
int i, ret = 0;
- ret = pm_runtime_get_sync(pm->device);
- if (ret < 0) {
- pm_runtime_put_noidle(pm->device);
+ ret = pm_runtime_resume_and_get(pm->device);
+ if (ret < 0)
return ret;
- }
/* clock control */
for (i = 0; i < pm->num_clocks; i++) {
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 4ac48441f22c..ca4310e26c49 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -1133,7 +1133,11 @@ static int sh_vou_open(struct file *file)
if (v4l2_fh_is_singular_file(file) &&
vou_dev->status == SH_VOU_INITIALISING) {
/* First open */
- pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
+ err = pm_runtime_resume_and_get(vou_dev->v4l2_dev.dev);
+ if (err < 0) {
+ v4l2_fh_release(file);
+ goto done_open;
+ }
err = sh_vou_hw_init(vou_dev);
if (err < 0) {
pm_runtime_put(vou_dev->v4l2_dev.dev);
diff --git a/drivers/media/platform/sti/bdisp/Makefile b/drivers/media/platform/sti/bdisp/Makefile
index caf7ccd193ea..39ade0a34723 100644
--- a/drivers/media/platform/sti/bdisp/Makefile
+++ b/drivers/media/platform/sti/bdisp/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_STI_BDISP) := bdisp.o
+obj-$(CONFIG_VIDEO_STI_BDISP) += bdisp.o
bdisp-objs := bdisp-v4l2.o bdisp-hw.o bdisp-debug.o
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 060ca85f64d5..6413cd279125 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -499,7 +499,7 @@ static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct bdisp_ctx *ctx = q->drv_priv;
struct vb2_v4l2_buffer *buf;
- int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);
+ int ret = pm_runtime_resume_and_get(ctx->bdisp_dev->dev);
if (ret < 0) {
dev_err(ctx->bdisp_dev->dev, "failed to set runtime PM\n");
@@ -1318,7 +1318,6 @@ static int bdisp_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bdisp->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(bdisp->regs)) {
- dev_err(dev, "failed to get regs\n");
ret = PTR_ERR(bdisp->regs);
goto err_wq;
}
@@ -1364,10 +1363,10 @@ static int bdisp_probe(struct platform_device *pdev)
/* Power management */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "failed to set PM\n");
- goto err_pm;
+ goto err_remove;
}
/* Filters */
@@ -1395,6 +1394,7 @@ err_filter:
bdisp_hw_free_filters(bdisp->dev);
err_pm:
pm_runtime_put(dev);
+err_remove:
bdisp_debugfs_remove(bdisp);
v4l2_device_unregister(&bdisp->v4l2_dev);
err_clk:
diff --git a/drivers/media/platform/sti/delta/Makefile b/drivers/media/platform/sti/delta/Makefile
index 92b37e216f00..32412fa4c632 100644
--- a/drivers/media/platform/sti/delta/Makefile
+++ b/drivers/media/platform/sti/delta/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) := st-delta.o
+obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) += st-delta.o
st-delta-y := delta-v4l2.o delta-mem.o delta-ipc.o delta-debug.o
# MJPEG support
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
index c691b3d81549..c887a31ebb54 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -954,10 +954,8 @@ static void delta_run_work(struct work_struct *work)
/* enable the hardware */
if (!dec->pm) {
ret = delta_get_sync(ctx);
- if (ret) {
- delta_put_autosuspend(ctx);
+ if (ret)
goto err;
- }
}
/* decode this access unit */
@@ -1009,7 +1007,6 @@ static void delta_run_work(struct work_struct *work)
dev_err(delta->dev,
"%s NULL decoded frame\n",
ctx->name);
- ret = -EIO;
goto out;
}
@@ -1277,9 +1274,9 @@ int delta_get_sync(struct delta_ctx *ctx)
int ret = 0;
/* enable the hardware */
- ret = pm_runtime_get_sync(delta->dev);
+ ret = pm_runtime_resume_and_get(delta->dev);
if (ret < 0) {
- dev_err(delta->dev, "%s pm_runtime_get_sync failed (%d)\n",
+ dev_err(delta->dev, "%s pm_runtime_resume_and_get failed (%d)\n",
__func__, ret);
return ret;
}
diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/sti/hva/Makefile
index 74b41ec52f97..b5a5478bdd01 100644
--- a/drivers/media/platform/sti/hva/Makefile
+++ b/drivers/media/platform/sti/hva/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_STI_HVA) := st-hva.o
+obj-$(CONFIG_VIDEO_STI_HVA) += st-hva.o
st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
st-hva-$(CONFIG_VIDEO_STI_HVA_DEBUGFS) += hva-debugfs.o
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index f59811e27f51..30fb1aa4a351 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -130,8 +130,7 @@ static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
ctx_id = (hva->sts_reg & 0xFF00) >> 8;
if (ctx_id >= HVA_MAX_INSTANCES) {
dev_err(dev, "%s %s: bad context identifier: %d\n",
- ctx->name, __func__, ctx_id);
- ctx->hw_err = true;
+ HVA_PREFIX, __func__, ctx_id);
goto out;
}
@@ -270,9 +269,8 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
struct device *dev = hva_to_dev(hva);
unsigned long int version;
- if (pm_runtime_get_sync(dev) < 0) {
+ if (pm_runtime_resume_and_get(dev) < 0) {
dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
- pm_runtime_put_noidle(dev);
mutex_unlock(&hva->protect_mutex);
return -EFAULT;
}
@@ -386,10 +384,10 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
pm_runtime_set_suspended(dev);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
- goto err_pm;
+ goto err_clk;
}
/* check IP hardware version */
@@ -462,6 +460,7 @@ int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
u8 client_id = ctx->id;
int ret;
u32 reg = 0;
+ bool got_pm = false;
mutex_lock(&hva->protect_mutex);
@@ -469,12 +468,13 @@ int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
enable_irq(hva->irq_its);
enable_irq(hva->irq_err);
- if (pm_runtime_get_sync(dev) < 0) {
+ if (pm_runtime_resume_and_get(dev) < 0) {
dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
ctx->sys_errors++;
ret = -EFAULT;
goto out;
}
+ got_pm = true;
reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
switch (cmd) {
@@ -537,7 +537,8 @@ out:
dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
}
- pm_runtime_put_autosuspend(dev);
+ if (got_pm)
+ pm_runtime_put_autosuspend(dev);
mutex_unlock(&hva->protect_mutex);
return ret;
@@ -553,9 +554,8 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
mutex_lock(&hva->protect_mutex);
- if (pm_runtime_get_sync(dev) < 0) {
+ if (pm_runtime_resume_and_get(dev) < 0) {
seq_puts(s, "Cannot wake up IP\n");
- pm_runtime_put_noidle(dev);
mutex_unlock(&hva->protect_mutex);
return;
}
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index d9b4ad0abf0c..d914ccef9831 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -600,7 +600,7 @@ static struct media_entity *dcmi_find_source(struct stm32_dcmi *dcmi)
}
static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
- struct v4l2_subdev_pad_config *pad_cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct media_entity *entity = &dcmi->source->entity;
@@ -642,7 +642,7 @@ static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
format->format.width, format->format.height);
fmt.pad = pad->index;
- ret = v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to set format 0x%x %ux%u on \"%s\":%d pad (%d)\n",
__func__, format->format.code,
@@ -723,11 +723,11 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
u32 val = 0;
int ret;
- ret = pm_runtime_get_sync(dcmi->dev);
+ ret = pm_runtime_resume_and_get(dcmi->dev);
if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
__func__, ret);
- goto err_pm_put;
+ goto err_unlocked;
}
ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
@@ -848,6 +848,7 @@ err_media_pipeline_stop:
err_pm_put:
pm_runtime_put(dcmi->dev);
+err_unlocked:
spin_lock_irq(&dcmi->irqlock);
/*
* Return all buffers to vb2 in QUEUED state.
@@ -977,6 +978,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
struct dcmi_framesize sd_fsize;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1012,7 +1016,7 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
ret = v4l2_subdev_call(dcmi->source, pad, set_fmt,
- &pad_cfg, &format);
+ &pad_state, &format);
if (ret < 0)
return ret;
@@ -1162,6 +1166,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
.which = V4L2_SUBDEV_FORMAT_TRY,
};
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
int ret;
sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
@@ -1175,7 +1182,7 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
ret = v4l2_subdev_call(dcmi->source, pad, set_fmt,
- &pad_cfg, &format);
+ &pad_state, &format);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
index 4785faddf630..3872027ed2fa 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
@@ -206,9 +206,9 @@ static int sun4i_csi_open(struct file *file)
if (ret)
return ret;
- ret = pm_runtime_get_sync(csi->dev);
+ ret = pm_runtime_resume_and_get(csi->dev);
if (ret < 0)
- goto err_pm_put;
+ goto err_unlock;
ret = v4l2_pipeline_pm_get(&csi->vdev.entity);
if (ret)
@@ -227,6 +227,8 @@ err_pipeline_pm_put:
err_pm_put:
pm_runtime_put(csi->dev);
+
+err_unlock:
mutex_unlock(&csi->lock);
return ret;
@@ -269,25 +271,26 @@ static const struct v4l2_mbus_framefmt sun4i_csi_pad_fmt_default = {
};
static int sun4i_csi_subdev_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(subdev, cfg, CSI_SUBDEV_SINK);
+ fmt = v4l2_subdev_get_try_format(subdev, sd_state, CSI_SUBDEV_SINK);
*fmt = sun4i_csi_pad_fmt_default;
return 0;
}
static int sun4i_csi_subdev_get_fmt(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct sun4i_csi *csi = container_of(subdev, struct sun4i_csi, subdev);
struct v4l2_mbus_framefmt *subdev_fmt;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- subdev_fmt = v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+ subdev_fmt = v4l2_subdev_get_try_format(subdev, sd_state,
+ fmt->pad);
else
subdev_fmt = &csi->subdev_fmt;
@@ -297,14 +300,15 @@ static int sun4i_csi_subdev_get_fmt(struct v4l2_subdev *subdev,
}
static int sun4i_csi_subdev_set_fmt(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct sun4i_csi *csi = container_of(subdev, struct sun4i_csi, subdev);
struct v4l2_mbus_framefmt *subdev_fmt;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- subdev_fmt = v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+ subdev_fmt = v4l2_subdev_get_try_format(subdev, sd_state,
+ fmt->pad);
else
subdev_fmt = &csi->subdev_fmt;
@@ -323,7 +327,7 @@ static int sun4i_csi_subdev_set_fmt(struct v4l2_subdev *subdev,
static int
sun4i_csi_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *mbus)
{
if (mbus->index >= ARRAY_SIZE(sun4i_csi_formats))
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 3181d0781b61..07b2161392d2 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -481,8 +481,10 @@ static int sun6i_video_open(struct file *file)
goto fh_release;
/* check if already powered */
- if (!v4l2_fh_is_singular_file(file))
+ if (!v4l2_fh_is_singular_file(file)) {
+ ret = -EBUSY;
goto unlock;
+ }
ret = sun6i_csi_set_power(video->csi, true);
if (ret < 0)
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
index 3f81dd17755c..fbcca59a0517 100644
--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
@@ -494,7 +494,7 @@ static int rotate_start_streaming(struct vb2_queue *vq, unsigned int count)
struct device *dev = ctx->dev->dev;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to enable module\n");
diff --git a/drivers/media/platform/ti-vpe/cal-camerarx.c b/drivers/media/platform/ti-vpe/cal-camerarx.c
index cbe6114908de..124a4e2bdefe 100644
--- a/drivers/media/platform/ti-vpe/cal-camerarx.c
+++ b/drivers/media/platform/ti-vpe/cal-camerarx.c
@@ -586,12 +586,12 @@ static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd)
static struct v4l2_mbus_framefmt *
cal_camerarx_get_pad_format(struct cal_camerarx *phy,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&phy->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&phy->subdev, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &phy->formats[pad];
default:
@@ -611,7 +611,7 @@ static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable)
}
static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -623,7 +623,7 @@ static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- fmt = cal_camerarx_get_pad_format(phy, cfg,
+ fmt = cal_camerarx_get_pad_format(phy, sd_state,
CAL_CAMERARX_PAD_SINK,
code->which);
code->code = fmt->code;
@@ -639,7 +639,7 @@ static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
}
static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -652,7 +652,7 @@ static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
if (fse->pad == CAL_CAMERARX_PAD_SOURCE) {
struct v4l2_mbus_framefmt *fmt;
- fmt = cal_camerarx_get_pad_format(phy, cfg,
+ fmt = cal_camerarx_get_pad_format(phy, sd_state,
CAL_CAMERARX_PAD_SINK,
fse->which);
if (fse->code != fmt->code)
@@ -679,20 +679,21 @@ static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
}
static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
struct v4l2_mbus_framefmt *fmt;
- fmt = cal_camerarx_get_pad_format(phy, cfg, format->pad, format->which);
+ fmt = cal_camerarx_get_pad_format(phy, sd_state, format->pad,
+ format->which);
format->format = *fmt;
return 0;
}
static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -702,7 +703,7 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
/* No transcoding, source and sink formats must match. */
if (format->pad == CAL_CAMERARX_PAD_SOURCE)
- return cal_camerarx_sd_get_fmt(sd, cfg, format);
+ return cal_camerarx_sd_get_fmt(sd, sd_state, format);
/*
* Default to the first format is the requested media bus code isn't
@@ -727,11 +728,13 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
format->format.code = fmtinfo->code;
/* Store the format and propagate it to the source pad. */
- fmt = cal_camerarx_get_pad_format(phy, cfg, CAL_CAMERARX_PAD_SINK,
+ fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ CAL_CAMERARX_PAD_SINK,
format->which);
*fmt = format->format;
- fmt = cal_camerarx_get_pad_format(phy, cfg, CAL_CAMERARX_PAD_SOURCE,
+ fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ CAL_CAMERARX_PAD_SOURCE,
format->which);
*fmt = format->format;
@@ -742,11 +745,11 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
}
static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format format = {
- .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
.pad = CAL_CAMERARX_PAD_SINK,
.format = {
.width = 640,
@@ -760,7 +763,7 @@ static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
},
};
- return cal_camerarx_sd_set_fmt(sd, cfg, &format);
+ return cal_camerarx_sd_set_fmt(sd, sd_state, &format);
}
static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = {
diff --git a/drivers/media/platform/ti-vpe/cal-video.c b/drivers/media/platform/ti-vpe/cal-video.c
index 7b7436a355ee..15fb5360cf13 100644
--- a/drivers/media/platform/ti-vpe/cal-video.c
+++ b/drivers/media/platform/ti-vpe/cal-video.c
@@ -700,7 +700,9 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
- pm_runtime_get_sync(ctx->cal->dev);
+ ret = pm_runtime_resume_and_get(ctx->cal->dev);
+ if (ret < 0)
+ goto error_pipeline;
cal_ctx_set_dma_addr(ctx, addr);
cal_ctx_start(ctx);
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 2e2bef91b2b0..76fe7a8b33f6 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -1024,7 +1024,7 @@ static int cal_probe(struct platform_device *pdev)
/* Read the revision and hardware info to verify hardware access. */
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
goto error_pm_runtime;
@@ -1098,10 +1098,11 @@ static int cal_remove(struct platform_device *pdev)
{
struct cal_dev *cal = platform_get_drvdata(pdev);
unsigned int i;
+ int ret;
cal_dbg(1, cal, "Removing %s\n", CAL_MODULE_NAME);
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
cal_media_unregister(cal);
@@ -1115,7 +1116,8 @@ static int cal_remove(struct platform_device *pdev)
for (i = 0; i < cal->data->num_csi2_phy; i++)
cal_camerarx_destroy(cal->phy[i]);
- pm_runtime_put_sync(&pdev->dev);
+ if (ret >= 0)
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 10251b787674..5b1c5d96a407 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -2471,11 +2471,9 @@ static int vpe_runtime_get(struct platform_device *pdev)
dev_dbg(&pdev->dev, "vpe_runtime_get\n");
- r = pm_runtime_get_sync(&pdev->dev);
+ r = pm_runtime_resume_and_get(&pdev->dev);
WARN_ON(r < 0);
- if (r)
- pm_runtime_put_noidle(&pdev->dev);
- return r < 0 ? r : 0;
+ return r;
}
static void vpe_runtime_put(struct platform_device *pdev)
@@ -2580,7 +2578,7 @@ static int vpe_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = vpe_runtime_get(pdev);
- if (ret)
+ if (ret < 0)
goto rel_m2m;
/* Perform clk enable followed by reset */
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index ed0ad68c5c48..3655573e8581 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -844,6 +844,9 @@ static int viacam_do_try_fmt(struct via_camera *cam,
{
int ret;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -852,7 +855,7 @@ static int viacam_do_try_fmt(struct via_camera *cam,
upix->pixelformat = f->pixelformat;
viacam_fmt_pre(upix, spix);
v4l2_fill_mbus_format(&format.format, spix, f->mbus_code);
- ret = sensor_call(cam, pad, set_fmt, &pad_cfg, &format);
+ ret = sensor_call(cam, pad, set_fmt, &pad_state, &format);
v4l2_fill_pix_format(spix, &format.format);
viacam_fmt_post(upix, spix);
return ret;
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 133122e38515..905005e271ca 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -140,14 +140,14 @@ static const struct v4l2_subdev_video_ops video_mux_subdev_video_ops = {
static struct v4l2_mbus_framefmt *
__video_mux_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(sd, cfg, pad);
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &vmux->format_mbus[pad];
default:
@@ -156,14 +156,15 @@ __video_mux_get_pad_format(struct v4l2_subdev *sd,
}
static int video_mux_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
mutex_lock(&vmux->lock);
- sdformat->format = *__video_mux_get_pad_format(sd, cfg, sdformat->pad,
+ sdformat->format = *__video_mux_get_pad_format(sd, sd_state,
+ sdformat->pad,
sdformat->which);
mutex_unlock(&vmux->lock);
@@ -172,7 +173,7 @@ static int video_mux_get_format(struct v4l2_subdev *sd,
}
static int video_mux_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
@@ -180,12 +181,13 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
struct media_pad *pad = &vmux->pads[sdformat->pad];
u16 source_pad = sd->entity.num_pads - 1;
- mbusformat = __video_mux_get_pad_format(sd, cfg, sdformat->pad,
- sdformat->which);
+ mbusformat = __video_mux_get_pad_format(sd, sd_state, sdformat->pad,
+ sdformat->which);
if (!mbusformat)
return -EINVAL;
- source_mbusformat = __video_mux_get_pad_format(sd, cfg, source_pad,
+ source_mbusformat = __video_mux_get_pad_format(sd, sd_state,
+ source_pad,
sdformat->which);
if (!source_mbusformat)
return -EINVAL;
@@ -310,7 +312,7 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
}
static int video_mux_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
struct v4l2_mbus_framefmt *mbusformat;
@@ -319,7 +321,7 @@ static int video_mux_init_cfg(struct v4l2_subdev *sd,
mutex_lock(&vmux->lock);
for (i = 0; i < sd->entity.num_pads; i++) {
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, i);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state, i);
*mbusformat = video_mux_format_mbus_default;
}
@@ -362,7 +364,7 @@ static int video_mux_async_register(struct video_mux *vmux,
for (i = 0; i < num_input_pads; i++) {
struct v4l2_async_subdev *asd;
- struct fwnode_handle *ep;
+ struct fwnode_handle *ep, *remote_ep;
ep = fwnode_graph_get_endpoint_by_id(
dev_fwnode(vmux->subdev.dev), i, 0,
@@ -370,6 +372,14 @@ static int video_mux_async_register(struct video_mux *vmux,
if (!ep)
continue;
+ /* Skip dangling endpoints for backwards compatibility */
+ remote_ep = fwnode_graph_get_remote_endpoint(ep);
+ if (!remote_ep) {
+ fwnode_handle_put(ep);
+ continue;
+ }
+ fwnode_handle_put(remote_ep);
+
asd = v4l2_async_notifier_add_fwnode_remote_subdev(
&vmux->notifier, ep, struct v4l2_async_subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_brx.c b/drivers/media/platform/vsp1/vsp1_brx.c
index 2d86c718a5cf..89385b4cabe5 100644
--- a/drivers/media/platform/vsp1/vsp1_brx.c
+++ b/drivers/media/platform/vsp1/vsp1_brx.c
@@ -65,7 +65,7 @@ static const struct v4l2_ctrl_ops brx_ctrl_ops = {
*/
static int brx_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -73,12 +73,12 @@ static int brx_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
ARRAY_SIZE(codes));
}
static int brx_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index)
@@ -97,14 +97,14 @@ static int brx_enum_frame_size(struct v4l2_subdev *subdev,
}
static struct v4l2_rect *brx_get_compose(struct vsp1_brx *brx,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad)
{
- return v4l2_subdev_get_try_compose(&brx->entity.subdev, cfg, pad);
+ return v4l2_subdev_get_try_compose(&brx->entity.subdev, sd_state, pad);
}
static void brx_try_format(struct vsp1_brx *brx,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
@@ -119,7 +119,7 @@ static void brx_try_format(struct vsp1_brx *brx,
default:
/* The BRx can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&brx->entity, config,
+ format = vsp1_entity_get_pad_format(&brx->entity, sd_state,
BRX_PAD_SINK(0));
fmt->code = format->code;
break;
@@ -132,17 +132,18 @@ static void brx_try_format(struct vsp1_brx *brx,
}
static int brx_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&brx->entity.lock);
- config = vsp1_entity_get_pad_config(&brx->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -181,11 +182,11 @@ done:
}
static int brx_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
if (sel->pad == brx->entity.source_pad)
return -EINVAL;
@@ -199,7 +200,7 @@ static int brx_get_selection(struct v4l2_subdev *subdev,
return 0;
case V4L2_SEL_TGT_COMPOSE:
- config = vsp1_entity_get_pad_config(&brx->entity, cfg,
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
sel->which);
if (!config)
return -EINVAL;
@@ -215,11 +216,11 @@ static int brx_get_selection(struct v4l2_subdev *subdev,
}
static int brx_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *compose;
int ret = 0;
@@ -232,7 +233,8 @@ static int brx_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&brx->entity.lock);
- config = vsp1_entity_get_pad_config(&brx->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_clu.c b/drivers/media/platform/vsp1/vsp1_clu.c
index a47b23bf5abf..c5217fee24f1 100644
--- a/drivers/media/platform/vsp1/vsp1_clu.c
+++ b/drivers/media/platform/vsp1/vsp1_clu.c
@@ -123,27 +123,28 @@ static const unsigned int clu_codes[] = {
};
static int clu_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, clu_codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, clu_codes,
ARRAY_SIZE(clu_codes));
}
static int clu_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, CLU_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ CLU_MIN_SIZE,
CLU_MIN_SIZE, CLU_MAX_SIZE,
CLU_MAX_SIZE);
}
static int clu_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt, clu_codes,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, clu_codes,
ARRAY_SIZE(clu_codes),
CLU_MIN_SIZE, CLU_MIN_SIZE,
CLU_MAX_SIZE, CLU_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index aa66e4f5f3f3..de442d6c9926 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -559,15 +559,7 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
*/
int vsp1_device_get(struct vsp1_device *vsp1)
{
- int ret;
-
- ret = pm_runtime_get_sync(vsp1->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(vsp1->dev);
- return ret;
- }
-
- return 0;
+ return pm_runtime_resume_and_get(vsp1->dev);
}
/*
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index aa9d2286056e..6f51e5c75543 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -103,7 +103,7 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
/**
* vsp1_entity_get_pad_config - Get the pad configuration for an entity
* @entity: the entity
- * @cfg: the TRY pad configuration
+ * @sd_state: the TRY state
* @which: configuration selector (ACTIVE or TRY)
*
* When called with which set to V4L2_SUBDEV_FORMAT_ACTIVE the caller must hold
@@ -114,9 +114,9 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
* and simply returned when requested. The ACTIVE configuration comes from the
* entity structure.
*/
-struct v4l2_subdev_pad_config *
+struct v4l2_subdev_state *
vsp1_entity_get_pad_config(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
switch (which) {
@@ -124,14 +124,14 @@ vsp1_entity_get_pad_config(struct vsp1_entity *entity,
return entity->config;
case V4L2_SUBDEV_FORMAT_TRY:
default:
- return cfg;
+ return sd_state;
}
}
/**
* vsp1_entity_get_pad_format - Get a pad format from storage for an entity
* @entity: the entity
- * @cfg: the configuration storage
+ * @sd_state: the state storage
* @pad: the pad number
*
* Return the format stored in the given configuration for an entity's pad. The
@@ -139,16 +139,16 @@ vsp1_entity_get_pad_config(struct vsp1_entity *entity,
*/
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad)
{
- return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&entity->subdev, sd_state, pad);
}
/**
* vsp1_entity_get_pad_selection - Get a pad selection from storage for entity
* @entity: the entity
- * @cfg: the configuration storage
+ * @sd_state: the state storage
* @pad: the pad number
* @target: the selection target
*
@@ -158,14 +158,16 @@ vsp1_entity_get_pad_format(struct vsp1_entity *entity,
*/
struct v4l2_rect *
vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, unsigned int target)
{
switch (target) {
case V4L2_SEL_TGT_COMPOSE:
- return v4l2_subdev_get_try_compose(&entity->subdev, cfg, pad);
+ return v4l2_subdev_get_try_compose(&entity->subdev, sd_state,
+ pad);
case V4L2_SEL_TGT_CROP:
- return v4l2_subdev_get_try_crop(&entity->subdev, cfg, pad);
+ return v4l2_subdev_get_try_crop(&entity->subdev, sd_state,
+ pad);
default:
return NULL;
}
@@ -180,7 +182,7 @@ vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
* function can be used as a handler for the subdev pad::init_cfg operation.
*/
int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format format;
unsigned int pad;
@@ -189,10 +191,10 @@ int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
memset(&format, 0, sizeof(format));
format.pad = pad;
- format.which = cfg ? V4L2_SUBDEV_FORMAT_TRY
+ format.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
: V4L2_SUBDEV_FORMAT_ACTIVE;
- v4l2_subdev_call(subdev, pad, set_fmt, cfg, &format);
+ v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &format);
}
return 0;
@@ -208,13 +210,13 @@ int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
* a direct drop-in for the operation handler.
*/
int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
- config = vsp1_entity_get_pad_config(entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which);
if (!config)
return -EINVAL;
@@ -239,7 +241,7 @@ int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
* the sink pad.
*/
int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code,
const unsigned int *codes, unsigned int ncodes)
{
@@ -251,7 +253,7 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
code->code = codes[code->index];
} else {
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
/*
@@ -261,7 +263,8 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- config = vsp1_entity_get_pad_config(entity, cfg, code->which);
+ config = vsp1_entity_get_pad_config(entity, sd_state,
+ code->which);
if (!config)
return -EINVAL;
@@ -290,17 +293,17 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
* source pad size identical to the sink pad.
*/
int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse,
unsigned int min_width, unsigned int min_height,
unsigned int max_width, unsigned int max_height)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(entity, cfg, fse->which);
+ config = vsp1_entity_get_pad_config(entity, sd_state, fse->which);
if (!config)
return -EINVAL;
@@ -353,14 +356,14 @@ done:
* source pad.
*/
int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt,
const unsigned int *codes, unsigned int ncodes,
unsigned int min_width, unsigned int min_height,
unsigned int max_width, unsigned int max_height)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *selection;
unsigned int i;
@@ -368,7 +371,7 @@ int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
mutex_lock(&entity->lock);
- config = vsp1_entity_get_pad_config(entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -672,7 +675,7 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
* Allocate the pad configuration to store formats and selection
* rectangles.
*/
- entity->config = v4l2_subdev_alloc_pad_config(&entity->subdev);
+ entity->config = v4l2_subdev_alloc_state(&entity->subdev);
if (entity->config == NULL) {
media_entity_cleanup(&entity->subdev.entity);
return -ENOMEM;
@@ -687,6 +690,6 @@ void vsp1_entity_destroy(struct vsp1_entity *entity)
entity->ops->destroy(entity);
if (entity->subdev.ctrl_handler)
v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
- v4l2_subdev_free_pad_config(entity->config);
+ v4l2_subdev_free_state(entity->config);
media_entity_cleanup(&entity->subdev.entity);
}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index a1ceb37bb837..f22724439cdc 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -115,7 +115,7 @@ struct vsp1_entity {
unsigned int sink_pad;
struct v4l2_subdev subdev;
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct mutex lock; /* Protects the pad config */
};
@@ -136,20 +136,20 @@ int vsp1_entity_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
-struct v4l2_subdev_pad_config *
+struct v4l2_subdev_state *
vsp1_entity_get_pad_config(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which);
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad);
struct v4l2_rect *
vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, unsigned int target);
int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg);
+ struct v4l2_subdev_state *sd_state);
void vsp1_entity_route_setup(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
@@ -173,20 +173,20 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad);
int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt);
int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt,
const unsigned int *codes, unsigned int ncodes,
unsigned int min_width, unsigned int min_height,
unsigned int max_width, unsigned int max_height);
int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code,
const unsigned int *codes, unsigned int ncodes);
int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse,
unsigned int min_w, unsigned int min_h,
unsigned int max_w, unsigned int max_h);
diff --git a/drivers/media/platform/vsp1/vsp1_histo.c b/drivers/media/platform/vsp1/vsp1_histo.c
index a91e142bcb94..5e5013d2cd2a 100644
--- a/drivers/media/platform/vsp1/vsp1_histo.c
+++ b/drivers/media/platform/vsp1/vsp1_histo.c
@@ -170,7 +170,7 @@ static const struct vb2_ops histo_video_queue_qops = {
*/
static int histo_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
@@ -180,28 +180,30 @@ static int histo_enum_mbus_code(struct v4l2_subdev *subdev,
return 0;
}
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, histo->formats,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code,
+ histo->formats,
histo->num_formats);
}
static int histo_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->pad != HISTO_PAD_SINK)
return -EINVAL;
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HISTO_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ HISTO_MIN_SIZE,
HISTO_MIN_SIZE, HISTO_MAX_SIZE,
HISTO_MAX_SIZE);
}
static int histo_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
int ret = 0;
@@ -211,7 +213,8 @@ static int histo_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&histo->entity.lock);
- config = vsp1_entity_get_pad_config(&histo->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&histo->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -256,15 +259,15 @@ done:
}
static int histo_set_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
- struct v4l2_subdev_selection *sel)
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *selection;
/* The crop rectangle must be inside the input frame. */
- format = vsp1_entity_get_pad_format(&histo->entity, config,
+ format = vsp1_entity_get_pad_format(&histo->entity, sd_state,
HISTO_PAD_SINK);
sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
@@ -274,11 +277,11 @@ static int histo_set_crop(struct v4l2_subdev *subdev,
format->height - sel->r.top);
/* Set the crop rectangle and reset the compose rectangle. */
- selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ selection = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
sel->pad, V4L2_SEL_TGT_CROP);
*selection = sel->r;
- selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ selection = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
sel->pad,
V4L2_SEL_TGT_COMPOSE);
*selection = sel->r;
@@ -287,7 +290,7 @@ static int histo_set_crop(struct v4l2_subdev *subdev,
}
static int histo_set_compose(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
@@ -303,7 +306,8 @@ static int histo_set_compose(struct v4l2_subdev *subdev,
sel->r.left = 0;
sel->r.top = 0;
- crop = vsp1_entity_get_pad_selection(&histo->entity, config, sel->pad,
+ crop = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
+ sel->pad,
V4L2_SEL_TGT_CROP);
/*
@@ -329,7 +333,7 @@ static int histo_set_compose(struct v4l2_subdev *subdev,
ratio = 1 << (crop->height * 2 / sel->r.height / 3);
sel->r.height = crop->height / ratio;
- compose = vsp1_entity_get_pad_selection(&histo->entity, config,
+ compose = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
sel->pad,
V4L2_SEL_TGT_COMPOSE);
*compose = sel->r;
@@ -338,11 +342,11 @@ static int histo_set_compose(struct v4l2_subdev *subdev,
}
static int histo_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
int ret;
if (sel->pad != HISTO_PAD_SINK)
@@ -350,7 +354,8 @@ static int histo_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&histo->entity.lock);
- config = vsp1_entity_get_pad_config(&histo->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&histo->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -369,7 +374,7 @@ done:
}
static int histo_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
if (fmt->pad == HISTO_PAD_SOURCE) {
@@ -381,19 +386,19 @@ static int histo_get_format(struct v4l2_subdev *subdev,
return 0;
}
- return vsp1_subdev_get_pad_format(subdev, cfg, fmt);
+ return vsp1_subdev_get_pad_format(subdev, sd_state, fmt);
}
static int histo_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
if (fmt->pad != HISTO_PAD_SINK)
- return histo_get_format(subdev, cfg, fmt);
+ return histo_get_format(subdev, sd_state, fmt);
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt,
histo->formats, histo->num_formats,
HISTO_MIN_SIZE, HISTO_MIN_SIZE,
HISTO_MAX_SIZE, HISTO_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index d5ebd9d08c8a..361a870380c2 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -34,7 +34,7 @@ static inline void vsp1_hsit_write(struct vsp1_hsit *hsit,
*/
static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
@@ -52,26 +52,28 @@ static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HSIT_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ HSIT_MIN_SIZE,
HSIT_MIN_SIZE, HSIT_MAX_SIZE,
HSIT_MAX_SIZE);
}
static int hsit_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&hsit->entity.lock);
- config = vsp1_entity_get_pad_config(&hsit->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&hsit->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 14ed5d7bd061..6a6857ac9327 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -40,27 +40,28 @@ static const unsigned int lif_codes[] = {
};
static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, lif_codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, lif_codes,
ARRAY_SIZE(lif_codes));
}
static int lif_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LIF_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ LIF_MIN_SIZE,
LIF_MIN_SIZE, LIF_MAX_SIZE,
LIF_MAX_SIZE);
}
static int lif_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt, lif_codes,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, lif_codes,
ARRAY_SIZE(lif_codes),
LIF_MIN_SIZE, LIF_MIN_SIZE,
LIF_MAX_SIZE, LIF_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index 9f88842d7048..ac6802a325f5 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -99,27 +99,28 @@ static const unsigned int lut_codes[] = {
};
static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, lut_codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, lut_codes,
ARRAY_SIZE(lut_codes));
}
static int lut_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LUT_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ LUT_MIN_SIZE,
LUT_MIN_SIZE, LUT_MAX_SIZE,
LUT_MAX_SIZE);
}
static int lut_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt, lut_codes,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, lut_codes,
ARRAY_SIZE(lut_codes),
LUT_MIN_SIZE, LUT_MIN_SIZE,
LUT_MAX_SIZE, LUT_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 049bdd958e56..22a82d218152 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -17,9 +17,9 @@
#define RWPF_MIN_HEIGHT 1
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
- struct v4l2_subdev_pad_config *config)
+ struct v4l2_subdev_state *sd_state)
{
- return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, config,
+ return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, sd_state,
RWPF_PAD_SINK);
}
@@ -28,7 +28,7 @@ struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
*/
static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -46,28 +46,30 @@ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, RWPF_MIN_WIDTH,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ RWPF_MIN_WIDTH,
RWPF_MIN_HEIGHT, rwpf->max_width,
rwpf->max_height);
}
static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -128,11 +130,11 @@ done:
}
static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
@@ -145,7 +147,8 @@ static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -176,11 +179,11 @@ done:
}
static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
int ret = 0;
@@ -197,7 +200,8 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 2f3582590618..eac5c04c2239 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -84,6 +84,6 @@ int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
- struct v4l2_subdev_pad_config *config);
+ struct v4l2_subdev_state *sd_state);
#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 2b65457ee12f..b614a2aea461 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -106,7 +106,7 @@ static const struct v4l2_ctrl_config sru_intensity_control = {
*/
static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -114,20 +114,21 @@ static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
ARRAY_SIZE(codes));
}
static int sru_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_sru *sru = to_sru(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(&sru->entity, cfg, fse->which);
+ config = vsp1_entity_get_pad_config(&sru->entity, sd_state,
+ fse->which);
if (!config)
return -EINVAL;
@@ -164,7 +165,7 @@ done:
}
static void sru_try_format(struct vsp1_sru *sru,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
@@ -184,7 +185,7 @@ static void sru_try_format(struct vsp1_sru *sru,
case SRU_PAD_SOURCE:
/* The SRU can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&sru->entity, config,
+ format = vsp1_entity_get_pad_format(&sru->entity, sd_state,
SRU_PAD_SINK);
fmt->code = format->code;
@@ -216,17 +217,18 @@ static void sru_try_format(struct vsp1_sru *sru,
}
static int sru_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_sru *sru = to_sru(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&sru->entity.lock);
- config = vsp1_entity_get_pad_config(&sru->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&sru->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index 5fc04c082d1a..1c290cda005a 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -111,7 +111,7 @@ static unsigned int uds_compute_ratio(unsigned int input, unsigned int output)
*/
static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -119,20 +119,21 @@ static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
ARRAY_SIZE(codes));
}
static int uds_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_uds *uds = to_uds(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(&uds->entity, cfg, fse->which);
+ config = vsp1_entity_get_pad_config(&uds->entity, sd_state,
+ fse->which);
if (!config)
return -EINVAL;
@@ -164,7 +165,7 @@ done:
}
static void uds_try_format(struct vsp1_uds *uds,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
@@ -184,7 +185,7 @@ static void uds_try_format(struct vsp1_uds *uds,
case UDS_PAD_SOURCE:
/* The UDS scales but can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&uds->entity, config,
+ format = vsp1_entity_get_pad_format(&uds->entity, sd_state,
UDS_PAD_SINK);
fmt->code = format->code;
@@ -200,17 +201,18 @@ static void uds_try_format(struct vsp1_uds *uds,
}
static int uds_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_uds *uds = to_uds(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&uds->entity.lock);
- config = vsp1_entity_get_pad_config(&uds->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&uds->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_uif.c b/drivers/media/platform/vsp1/vsp1_uif.c
index 467d1072577b..83d7f17df80e 100644
--- a/drivers/media/platform/vsp1/vsp1_uif.c
+++ b/drivers/media/platform/vsp1/vsp1_uif.c
@@ -54,38 +54,39 @@ static const unsigned int uif_codes[] = {
};
static int uif_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, uif_codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, uif_codes,
ARRAY_SIZE(uif_codes));
}
static int uif_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, UIF_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ UIF_MIN_SIZE,
UIF_MIN_SIZE, UIF_MAX_SIZE,
UIF_MAX_SIZE);
}
static int uif_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt, uif_codes,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, uif_codes,
ARRAY_SIZE(uif_codes),
UIF_MIN_SIZE, UIF_MIN_SIZE,
UIF_MAX_SIZE, UIF_MAX_SIZE);
}
static int uif_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_uif *uif = to_uif(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
@@ -94,7 +95,8 @@ static int uif_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&uif->entity.lock);
- config = vsp1_entity_get_pad_config(&uif->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&uif->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -127,11 +129,11 @@ done:
}
static int uif_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_uif *uif = to_uif(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *selection;
int ret = 0;
@@ -142,7 +144,8 @@ static int uif_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&uif->entity.lock);
- config = vsp1_entity_get_pad_config(&uif->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&uif->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
index fff7ddec6745..b1baf9d7b6ec 100644
--- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -681,12 +681,13 @@ stream_done:
static struct v4l2_mbus_framefmt *
__xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&xcsi2rxss->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&xcsi2rxss->subdev,
+ sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &xcsi2rxss->format;
default:
@@ -697,7 +698,7 @@ __xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
/**
* xcsi2rxss_init_cfg - Initialise the pad format config to default
* @sd: Pointer to V4L2 Sub device structure
- * @cfg: Pointer to sub device pad information structure
+ * @sd_state: Pointer to sub device state structure
*
* This function is used to initialize the pad format with the default
* values.
@@ -705,7 +706,7 @@ __xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
* Return: 0 on success
*/
static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
struct v4l2_mbus_framefmt *format;
@@ -713,7 +714,7 @@ static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
mutex_lock(&xcsi2rxss->lock);
for (i = 0; i < XCSI_MEDIA_PADS; i++) {
- format = v4l2_subdev_get_try_format(sd, cfg, i);
+ format = v4l2_subdev_get_try_format(sd, sd_state, i);
*format = xcsi2rxss->default_format;
}
mutex_unlock(&xcsi2rxss->lock);
@@ -724,7 +725,7 @@ static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
/**
* xcsi2rxss_get_format - Get the pad format
* @sd: Pointer to V4L2 Sub device structure
- * @cfg: Pointer to sub device pad information structure
+ * @sd_state: Pointer to sub device state structure
* @fmt: Pointer to pad level media bus format
*
* This function is used to get the pad format information.
@@ -732,13 +733,14 @@ static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
* Return: 0 on success
*/
static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
mutex_lock(&xcsi2rxss->lock);
- fmt->format = *__xcsi2rxss_get_pad_format(xcsi2rxss, cfg, fmt->pad,
+ fmt->format = *__xcsi2rxss_get_pad_format(xcsi2rxss, sd_state,
+ fmt->pad,
fmt->which);
mutex_unlock(&xcsi2rxss->lock);
@@ -748,7 +750,7 @@ static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
/**
* xcsi2rxss_set_format - This is used to set the pad format
* @sd: Pointer to V4L2 Sub device structure
- * @cfg: Pointer to sub device pad information structure
+ * @sd_state: Pointer to sub device state structure
* @fmt: Pointer to pad level media bus format
*
* This function is used to set the pad format. Since the pad format is fixed
@@ -759,7 +761,7 @@ static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
* Return: 0 on success
*/
static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
@@ -773,7 +775,7 @@ static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
* CSI format cannot be changed at runtime.
* Ensure that format to set is copied to over to CSI pad format
*/
- __format = __xcsi2rxss_get_pad_format(xcsi2rxss, cfg,
+ __format = __xcsi2rxss_get_pad_format(xcsi2rxss, sd_state,
fmt->pad, fmt->which);
/* only sink pad format can be updated */
@@ -811,7 +813,7 @@ static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
* Return: -EINVAL or zero on success
*/
static int xcsi2rxss_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct xcsi2rxss_state *state = to_xcsi2rxssstate(sd);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 2a56201cb853..338c3661d809 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -26,7 +26,6 @@
#include "xilinx-vip.h"
#include "xilinx-vipp.h"
-#define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV
#define XVIP_DMA_DEF_WIDTH 1920
#define XVIP_DMA_DEF_HEIGHT 1080
@@ -549,8 +548,6 @@ __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
* requested format isn't supported.
*/
info = xvip_get_format_by_fourcc(pix->pixelformat);
- if (IS_ERR(info))
- info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
pix->pixelformat = info->fourcc;
pix->field = V4L2_FIELD_NONE;
@@ -660,7 +657,7 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
INIT_LIST_HEAD(&dma->queued_bufs);
spin_lock_init(&dma->queued_lock);
- dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
+ dma->fmtinfo = xvip_get_format_by_fourcc(V4L2_PIX_FMT_YUYV);
dma->format.pixelformat = dma->fmtinfo->fourcc;
dma->format.colorspace = V4L2_COLORSPACE_SRGB;
dma->format.field = V4L2_FIELD_NONE;
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index ed01bedb5db6..0f2d5a0edf0c 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -251,12 +251,13 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
static struct v4l2_mbus_framefmt *
__xtpg_get_pad_format(struct xtpg_device *xtpg,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&xtpg->xvip.subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&xtpg->xvip.subdev,
+ sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &xtpg->formats[pad];
default:
@@ -265,25 +266,26 @@ __xtpg_get_pad_format(struct xtpg_device *xtpg,
}
static int xtpg_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct xtpg_device *xtpg = to_tpg(subdev);
- fmt->format = *__xtpg_get_pad_format(xtpg, cfg, fmt->pad, fmt->which);
+ fmt->format = *__xtpg_get_pad_format(xtpg, sd_state, fmt->pad,
+ fmt->which);
return 0;
}
static int xtpg_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct xtpg_device *xtpg = to_tpg(subdev);
struct v4l2_mbus_framefmt *__format;
u32 bayer_phase;
- __format = __xtpg_get_pad_format(xtpg, cfg, fmt->pad, fmt->which);
+ __format = __xtpg_get_pad_format(xtpg, sd_state, fmt->pad, fmt->which);
/* In two pads mode the source pad format is always identical to the
* sink pad format.
@@ -306,7 +308,8 @@ static int xtpg_set_format(struct v4l2_subdev *subdev,
/* Propagate the format to the source pad. */
if (xtpg->npads == 2) {
- __format = __xtpg_get_pad_format(xtpg, cfg, 1, fmt->which);
+ __format = __xtpg_get_pad_format(xtpg, sd_state, 1,
+ fmt->which);
*__format = fmt->format;
}
@@ -318,12 +321,12 @@ static int xtpg_set_format(struct v4l2_subdev *subdev,
*/
static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+ format = v4l2_subdev_get_try_format(subdev, sd_state, fse->pad);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -351,11 +354,11 @@ static int xtpg_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct xtpg_device *xtpg = to_tpg(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
*format = xtpg->default_format;
if (xtpg->npads == 2) {
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 1);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 1);
*format = xtpg->default_format;
}
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 6ad61b08a31a..425a32dd5d19 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -70,8 +70,8 @@ EXPORT_SYMBOL_GPL(xvip_get_format_by_code);
* @fourcc: the format 4CC
*
* Return: a pointer to the format information structure corresponding to the
- * given V4L2 format @fourcc, or ERR_PTR if no corresponding format can be
- * found.
+ * given V4L2 format @fourcc. If not found, return a pointer to the first
+ * available format (V4L2_PIX_FMT_YUYV).
*/
const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc)
{
@@ -84,7 +84,7 @@ const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc)
return format;
}
- return ERR_PTR(-EINVAL);
+ return &xvip_video_formats[0];
}
EXPORT_SYMBOL_GPL(xvip_get_format_by_fourcc);
@@ -234,7 +234,7 @@ EXPORT_SYMBOL_GPL(xvip_cleanup_resources);
/**
* xvip_enum_mbus_code - Enumerate the media format code
* @subdev: V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code: returning media bus code
*
* Enumerate the media bus code of the subdevice. Return the corresponding
@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(xvip_cleanup_resources);
* is not valid.
*/
int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct v4l2_mbus_framefmt *format;
@@ -260,7 +260,7 @@ int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- format = v4l2_subdev_get_try_format(subdev, cfg, code->pad);
+ format = v4l2_subdev_get_try_format(subdev, sd_state, code->pad);
code->code = format->code;
@@ -271,7 +271,7 @@ EXPORT_SYMBOL_GPL(xvip_enum_mbus_code);
/**
* xvip_enum_frame_size - Enumerate the media bus frame size
* @subdev: V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fse: returning media bus frame size
*
* This function is a drop-in implementation of the subdev enum_frame_size pad
@@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(xvip_enum_mbus_code);
* if the index or the code is not valid.
*/
int xvip_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt *format;
@@ -295,7 +295,7 @@ int xvip_enum_frame_size(struct v4l2_subdev *subdev,
if (fse->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+ format = v4l2_subdev_get_try_format(subdev, sd_state, fse->pad);
if (fse->index || fse->code != format->code)
return -EINVAL;
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index a528a32ea1dc..d0b0e0600952 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -125,10 +125,10 @@ const struct xvip_video_format *xvip_of_get_format(struct device_node *node);
void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
const struct v4l2_subdev_format *fmt);
int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code);
int xvip_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse);
static inline u32 xvip_read(struct xvip_device *xvip, u32 addr)
diff --git a/drivers/media/radio/si4713/radio-platform-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index a7dfe5f55c18..433f9642786d 100644
--- a/drivers/media/radio/si4713/radio-platform-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
@@ -110,7 +110,7 @@ static long radio_si4713_default(struct file *file, void *p,
ioctl, cmd, arg);
}
-static struct v4l2_ioctl_ops radio_si4713_ioctl_ops = {
+static const struct v4l2_ioctl_ops radio_si4713_ioctl_ops = {
.vidioc_querycap = radio_si4713_querycap,
.vidioc_g_modulator = radio_si4713_g_modulator,
.vidioc_s_modulator = radio_si4713_s_modulator,
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index f016b35c2b17..d0a8326b75c2 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -19,7 +19,6 @@ source "drivers/media/rc/keymaps/Kconfig"
config LIRC
bool "LIRC user interface"
- depends on RC_CORE
help
Enable this option to enable the Linux Infrared Remote
Control user interface (e.g. /dev/lirc*). This interface
@@ -41,12 +40,10 @@ config BPF_LIRC_MODE2
menuconfig RC_DECODERS
bool "Remote controller decoders"
- depends on RC_CORE
if RC_DECODERS
config IR_NEC_DECODER
tristate "Enable IR raw decoder for the NEC protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -55,7 +52,6 @@ config IR_NEC_DECODER
config IR_RC5_DECODER
tristate "Enable IR raw decoder for the RC-5 protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -64,7 +60,6 @@ config IR_RC5_DECODER
config IR_RC6_DECODER
tristate "Enable IR raw decoder for the RC6 protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -73,7 +68,6 @@ config IR_RC6_DECODER
config IR_JVC_DECODER
tristate "Enable IR raw decoder for the JVC protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -82,7 +76,6 @@ config IR_JVC_DECODER
config IR_SONY_DECODER
tristate "Enable IR raw decoder for the Sony protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -91,7 +84,6 @@ config IR_SONY_DECODER
config IR_SANYO_DECODER
tristate "Enable IR raw decoder for the Sanyo protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -101,7 +93,6 @@ config IR_SANYO_DECODER
config IR_SHARP_DECODER
tristate "Enable IR raw decoder for the Sharp protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -111,7 +102,6 @@ config IR_SHARP_DECODER
config IR_MCE_KBD_DECODER
tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -121,7 +111,6 @@ config IR_MCE_KBD_DECODER
config IR_XMP_DECODER
tristate "Enable IR raw decoder for the XMP protocol"
- depends on RC_CORE
select BITREVERSE
help
@@ -130,7 +119,6 @@ config IR_XMP_DECODER
config IR_IMON_DECODER
tristate "Enable IR raw decoder for the iMON protocol"
- depends on RC_CORE
help
Enable this option if you have iMON PAD or Antec Veris infrared
remote control and you would like to use it with a raw IR
@@ -138,7 +126,6 @@ config IR_IMON_DECODER
config IR_RCMM_DECODER
tristate "Enable IR raw decoder for the RC-MM protocol"
- depends on RC_CORE
help
Enable this option when you have IR with RC-MM protocol, and
you need the software decoder. The driver supports 12,
@@ -153,15 +140,12 @@ endif #RC_DECODERS
menuconfig RC_DEVICES
bool "Remote Controller devices"
- depends on RC_CORE
if RC_DEVICES
config RC_ATI_REMOTE
tristate "ATI / X10 based USB RF remote controls"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
- select USB
+ depends on USB
help
Say Y here if you want to use an X10 based USB remote control.
These are RF remotes with USB receivers.
@@ -179,7 +163,6 @@ config RC_ATI_REMOTE
config IR_ENE
tristate "ENE eHome Receiver/Transceiver (pnp id: ENE0100/ENE02xxx)"
depends on PNP || COMPILE_TEST
- depends on RC_CORE
help
Say Y here to enable support for integrated infrared receiver
/transceiver made by ENE.
@@ -192,7 +175,6 @@ config IR_ENE
config IR_HIX5HD2
tristate "Hisilicon hix5hd2 IR remote control"
- depends on RC_CORE
depends on OF || COMPILE_TEST
help
Say Y here if you want to use hisilicon hix5hd2 remote control.
@@ -203,9 +185,7 @@ config IR_HIX5HD2
config IR_IMON
tristate "SoundGraph iMON Receiver and Display"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
- select USB
+ depends on USB
help
Say Y here if you want to use a SoundGraph iMON (aka Antec Veris)
IR Receiver and/or LCD/VFD/VGA display.
@@ -215,9 +195,7 @@ config IR_IMON
config IR_IMON_RAW
tristate "SoundGraph iMON Receiver (early raw IR models)"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
- select USB
+ depends on USB
help
Say Y here if you want to use a SoundGraph iMON IR Receiver,
early raw models.
@@ -227,9 +205,7 @@ config IR_IMON_RAW
config IR_MCEUSB
tristate "Windows Media Center Ed. eHome Infrared Transceiver"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
- select USB
+ depends on USB
help
Say Y here if you want to use a Windows Media Center Edition
eHome Infrared Transceiver.
@@ -240,7 +216,6 @@ config IR_MCEUSB
config IR_ITE_CIR
tristate "ITE Tech Inc. IT8712/IT8512 Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
- depends on RC_CORE
help
Say Y here to enable support for integrated infrared receivers
/transceivers made by ITE Tech Inc. These are found in
@@ -253,7 +228,6 @@ config IR_ITE_CIR
config IR_FINTEK
tristate "Fintek Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
- depends on RC_CORE
help
Say Y here to enable support for integrated infrared receiver
/transceiver made by Fintek. This chip is found on assorted
@@ -264,7 +238,6 @@ config IR_FINTEK
config IR_MESON
tristate "Amlogic Meson IR remote receiver"
- depends on RC_CORE
depends on ARCH_MESON || COMPILE_TEST
help
Say Y if you want to use the IR remote receiver available
@@ -275,7 +248,6 @@ config IR_MESON
config IR_MTK
tristate "Mediatek IR remote receiver"
- depends on RC_CORE
depends on ARCH_MEDIATEK || COMPILE_TEST
help
Say Y if you want to use the IR remote receiver available
@@ -287,7 +259,6 @@ config IR_MTK
config IR_NUVOTON
tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
- depends on RC_CORE
help
Say Y here to enable support for integrated infrared receiver
/transceiver made by Nuvoton (formerly Winbond). This chip is
@@ -299,11 +270,9 @@ config IR_NUVOTON
config IR_REDRAT3
tristate "RedRat3 IR Transceiver"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
+ depends on USB
select NEW_LEDS
select LEDS_CLASS
- select USB
help
Say Y here if you want to use a RedRat3 Infrared Transceiver.
@@ -322,9 +291,7 @@ config IR_SPI
config IR_STREAMZAP
tristate "Streamzap PC Remote IR Receiver"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
- select USB
+ depends on USB
help
Say Y here if you want to use a Streamzap PC Remote
Infrared Receiver.
@@ -335,7 +302,6 @@ config IR_STREAMZAP
config IR_WINBOND_CIR
tristate "Winbond IR remote control"
depends on (X86 && PNP) || COMPILE_TEST
- depends on RC_CORE
select NEW_LEDS
select LEDS_CLASS
select BITREVERSE
@@ -350,9 +316,7 @@ config IR_WINBOND_CIR
config IR_IGORPLUGUSB
tristate "IgorPlug-USB IR Receiver"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
- select USB
+ depends on USB
help
Say Y here if you want to use the IgorPlug-USB IR Receiver by
Igor Cesko. This device is included on the Fit-PC2.
@@ -365,9 +329,7 @@ config IR_IGORPLUGUSB
config IR_IGUANA
tristate "IguanaWorks USB IR Transceiver"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
- select USB
+ depends on USB
help
Say Y here if you want to use the IguanaWorks USB IR Transceiver.
Both infrared receive and send are supported. If you want to
@@ -381,9 +343,7 @@ config IR_IGUANA
config IR_TTUSBIR
tristate "TechnoTrend USB IR Receiver"
- depends on USB_ARCH_HAS_HCD
- depends on RC_CORE
- select USB
+ depends on USB
select NEW_LEDS
select LEDS_CLASS
help
@@ -407,7 +367,6 @@ source "drivers/media/rc/img-ir/Kconfig"
config RC_LOOPBACK
tristate "Remote Control Loopback Driver"
- depends on RC_CORE
help
Say Y here if you want support for the remote control loopback
driver which allows TX data to be sent back as RX data.
@@ -420,7 +379,6 @@ config RC_LOOPBACK
config IR_GPIO_CIR
tristate "GPIO IR remote control"
- depends on RC_CORE
depends on (OF && GPIOLIB) || COMPILE_TEST
help
Say Y if you want to use GPIO based IR Receiver.
@@ -430,7 +388,6 @@ config IR_GPIO_CIR
config IR_GPIO_TX
tristate "GPIO IR Bit Banging Transmitter"
- depends on RC_CORE
depends on LIRC
depends on (OF && GPIOLIB) || COMPILE_TEST
help
@@ -442,7 +399,6 @@ config IR_GPIO_TX
config IR_PWM_TX
tristate "PWM IR transmitter"
- depends on RC_CORE
depends on LIRC
depends on PWM
depends on OF || COMPILE_TEST
@@ -455,7 +411,6 @@ config IR_PWM_TX
config RC_ST
tristate "ST remote control receiver"
- depends on RC_CORE
depends on ARCH_STI || COMPILE_TEST
help
Say Y here if you want support for ST remote control driver
@@ -466,7 +421,6 @@ config RC_ST
config IR_SUNXI
tristate "SUNXI IR remote control"
- depends on RC_CORE
depends on ARCH_SUNXI || COMPILE_TEST
help
Say Y if you want to use sunXi internal IR Controller
@@ -476,7 +430,6 @@ config IR_SUNXI
config IR_SERIAL
tristate "Homebrew Serial Port Receiver"
- depends on RC_CORE
help
Say Y if you want to use Homebrew Serial Port Receivers and
Transceivers.
@@ -492,28 +445,15 @@ config IR_SERIAL_TRANSMITTER
config IR_SIR
tristate "Built-in SIR IrDA port"
- depends on RC_CORE
help
Say Y if you want to use a IrDA SIR port Transceivers.
To compile this driver as a module, choose M here: the module will
be called sir-ir.
-config IR_TANGO
- tristate "Sigma Designs SMP86xx IR decoder"
- depends on RC_CORE
- depends on ARCH_TANGO || COMPILE_TEST
- help
- Adds support for the HW IR decoder embedded on Sigma Designs
- Tango-based systems (SMP86xx, SMP87xx).
- The HW decoder supports NEC, RC-5, RC-6 IR protocols.
- When compiled as a module, look for tango-ir.
-
config RC_XBOX_DVD
tristate "Xbox DVD Movie Playback Kit"
- depends on RC_CORE
- depends on USB_ARCH_HAS_HCD
- select USB
+ depends on USB
help
Say Y here if you want to use the Xbox DVD Movie Playback Kit.
These are IR remotes with USB receivers for the Original Xbox (2001).
@@ -523,8 +463,7 @@ config RC_XBOX_DVD
config IR_TOY
tristate "Infrared Toy and IR Droid"
- depends on RC_CORE
- depends on USB_ARCH_HAS_HCD
+ depends on USB
help
Say Y here if you want to use the Infrared Toy or IR Droid, USB
versions.
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index f31002288f7c..692e9b6b203f 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -48,6 +48,5 @@ obj-$(CONFIG_IR_IMG) += img-ir/
obj-$(CONFIG_IR_SERIAL) += serial_ir.o
obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
-obj-$(CONFIG_IR_TANGO) += tango-ir.o
obj-$(CONFIG_RC_XBOX_DVD) += xbox_remote.o
obj-$(CONFIG_IR_TOY) += ir_toy.o
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 3fe3edd80876..afae0afe3f81 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -326,7 +326,8 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
}
if (attr->query.prog_cnt != 0 && prog_ids && cnt)
- ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
+ ret = bpf_prog_array_copy_to_user(progs, prog_ids,
+ attr->query.prog_cnt);
unlock:
mutex_unlock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index a7962ca2ac8e..2ca4e86c7b9f 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -780,7 +780,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
/*
* These are the sysfs functions to handle the association on the iMON 2.4G LT.
*/
-static ssize_t show_associate_remote(struct device *d,
+static ssize_t associate_remote_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -800,7 +800,7 @@ static ssize_t show_associate_remote(struct device *d,
return strlen(buf);
}
-static ssize_t store_associate_remote(struct device *d,
+static ssize_t associate_remote_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -822,7 +822,7 @@ static ssize_t store_associate_remote(struct device *d,
/*
* sysfs functions to control internal imon clock
*/
-static ssize_t show_imon_clock(struct device *d,
+static ssize_t imon_clock_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct imon_context *ictx = dev_get_drvdata(d);
@@ -848,7 +848,7 @@ static ssize_t show_imon_clock(struct device *d,
return len;
}
-static ssize_t store_imon_clock(struct device *d,
+static ssize_t imon_clock_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -895,11 +895,8 @@ exit:
}
-static DEVICE_ATTR(imon_clock, S_IWUSR | S_IRUGO, show_imon_clock,
- store_imon_clock);
-
-static DEVICE_ATTR(associate_remote, S_IWUSR | S_IRUGO, show_associate_remote,
- store_associate_remote);
+static DEVICE_ATTR_RW(imon_clock);
+static DEVICE_ATTR_RW(associate_remote);
static struct attribute *imon_display_sysfs_entries[] = {
&dev_attr_imon_clock.attr,
diff --git a/drivers/media/rc/ite-cir.h b/drivers/media/rc/ite-cir.h
index ce7a40b10828..4b4294d77555 100644
--- a/drivers/media/rc/ite-cir.h
+++ b/drivers/media/rc/ite-cir.h
@@ -167,7 +167,7 @@ struct ite_dev {
* hardware data obtained from:
*
* IT8712F
- * Environment Control – Low Pin Count Input / Output
+ * Environment Control - Low Pin Count Input / Output
* (EC - LPC I/O)
* Preliminary Specification V0. 81
*/
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 50b2833dbe4f..5fe5c9e1a46d 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-budget-ci-old.o \
rc-cinergy-1400.o \
rc-cinergy.o \
+ rc-ct-90405.o \
rc-d680-dmb.o \
rc-delock-61959.o \
rc-dib0700-nec.o \
@@ -100,7 +101,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-reddo.o \
rc-snapstream-firefly.o \
rc-streamzap.o \
- rc-tango.o \
rc-tanix-tx3mini.o \
rc-tanix-tx5max.o \
rc-tbs-nec.o \
diff --git a/drivers/media/rc/keymaps/rc-ct-90405.c b/drivers/media/rc/keymaps/rc-ct-90405.c
new file mode 100644
index 000000000000..8914c83c9d9f
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-ct-90405.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Toshiba CT-90405 remote controller keytable
+ *
+ * Copyright (C) 2021 Alexander Voronov <avv.0@ya.ru>
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table ct_90405[] = {
+ { 0x4014, KEY_SWITCHVIDEOMODE },
+ { 0x4012, KEY_POWER },
+ { 0x4044, KEY_TV },
+ { 0x40be43, KEY_3D_MODE },
+ { 0x400c, KEY_SUBTITLE },
+ { 0x4001, KEY_NUMERIC_1 },
+ { 0x4002, KEY_NUMERIC_2 },
+ { 0x4003, KEY_NUMERIC_3 },
+ { 0x4004, KEY_NUMERIC_4 },
+ { 0x4005, KEY_NUMERIC_5 },
+ { 0x4006, KEY_NUMERIC_6 },
+ { 0x4007, KEY_NUMERIC_7 },
+ { 0x4008, KEY_NUMERIC_8 },
+ { 0x4009, KEY_NUMERIC_9 },
+ { 0x4062, KEY_AUDIO_DESC },
+ { 0x4000, KEY_NUMERIC_0 },
+ { 0x401a, KEY_VOLUMEUP },
+ { 0x401e, KEY_VOLUMEDOWN },
+ { 0x4016, KEY_INFO },
+ { 0x4010, KEY_MUTE },
+ { 0x401b, KEY_CHANNELUP },
+ { 0x401f, KEY_CHANNELDOWN },
+ { 0x40da, KEY_VENDOR },
+ { 0x4066, KEY_PLAYER },
+ { 0x4017, KEY_TEXT },
+ { 0x4047, KEY_LIST },
+ { 0x4073, KEY_PAGEUP },
+ { 0x4045, KEY_PROGRAM },
+ { 0x4043, KEY_EXIT },
+ { 0x4074, KEY_PAGEDOWN },
+ { 0x4064, KEY_BACK },
+ { 0x405b, KEY_MENU },
+ { 0x4019, KEY_UP },
+ { 0x4040, KEY_RIGHT },
+ { 0x401d, KEY_DOWN },
+ { 0x4042, KEY_LEFT },
+ { 0x4021, KEY_OK },
+ { 0x4053, KEY_REWIND },
+ { 0x4067, KEY_PLAY },
+ { 0x400d, KEY_FASTFORWARD },
+ { 0x4054, KEY_PREVIOUS },
+ { 0x4068, KEY_STOP },
+ { 0x406a, KEY_PAUSE },
+ { 0x4015, KEY_NEXT },
+ { 0x4048, KEY_RED },
+ { 0x4049, KEY_GREEN },
+ { 0x404a, KEY_YELLOW },
+ { 0x404b, KEY_BLUE },
+ { 0x406f, KEY_RECORD }
+};
+
+static struct rc_map_list ct_90405_map = {
+ .map = {
+ .scan = ct_90405,
+ .size = ARRAY_SIZE(ct_90405),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_CT_90405,
+ }
+};
+
+static int __init init_rc_map_ct_90405(void)
+{
+ return rc_map_register(&ct_90405_map);
+}
+
+static void __exit exit_rc_map_ct_90405(void)
+{
+ rc_map_unregister(&ct_90405_map);
+}
+
+module_init(init_rc_map_ct_90405)
+module_exit(exit_rc_map_ct_90405)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Voronov <avv.0@ya.ru>");
diff --git a/drivers/media/rc/keymaps/rc-tango.c b/drivers/media/rc/keymaps/rc-tango.c
deleted file mode 100644
index 2b9cef6ef5b5..000000000000
--- a/drivers/media/rc/keymaps/rc-tango.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sigma Designs
- */
-
-#include <linux/module.h>
-#include <media/rc-map.h>
-
-static struct rc_map_table tango_table[] = {
- { 0x4cb4a, KEY_POWER },
- { 0x4cb48, KEY_FILE },
- { 0x4cb0f, KEY_SETUP },
- { 0x4cb4d, KEY_SUSPEND },
- { 0x4cb4e, KEY_VOLUMEUP },
- { 0x4cb44, KEY_EJECTCD },
- { 0x4cb13, KEY_TV },
- { 0x4cb51, KEY_MUTE },
- { 0x4cb52, KEY_VOLUMEDOWN },
-
- { 0x4cb41, KEY_NUMERIC_1 },
- { 0x4cb03, KEY_NUMERIC_2 },
- { 0x4cb42, KEY_NUMERIC_3 },
- { 0x4cb45, KEY_NUMERIC_4 },
- { 0x4cb07, KEY_NUMERIC_5 },
- { 0x4cb46, KEY_NUMERIC_6 },
- { 0x4cb55, KEY_NUMERIC_7 },
- { 0x4cb17, KEY_NUMERIC_8 },
- { 0x4cb56, KEY_NUMERIC_9 },
- { 0x4cb1b, KEY_NUMERIC_0 },
- { 0x4cb59, KEY_DELETE },
- { 0x4cb5a, KEY_CAPSLOCK },
-
- { 0x4cb47, KEY_BACK },
- { 0x4cb05, KEY_SWITCHVIDEOMODE },
- { 0x4cb06, KEY_UP },
- { 0x4cb43, KEY_LEFT },
- { 0x4cb01, KEY_RIGHT },
- { 0x4cb0a, KEY_DOWN },
- { 0x4cb02, KEY_ENTER },
- { 0x4cb4b, KEY_INFO },
- { 0x4cb09, KEY_HOME },
-
- { 0x4cb53, KEY_MENU },
- { 0x4cb12, KEY_PREVIOUS },
- { 0x4cb50, KEY_PLAY },
- { 0x4cb11, KEY_NEXT },
- { 0x4cb4f, KEY_TITLE },
- { 0x4cb0e, KEY_REWIND },
- { 0x4cb4c, KEY_STOP },
- { 0x4cb0d, KEY_FORWARD },
- { 0x4cb57, KEY_MEDIA_REPEAT },
- { 0x4cb16, KEY_ANGLE },
- { 0x4cb54, KEY_PAUSE },
- { 0x4cb15, KEY_SLOW },
- { 0x4cb5b, KEY_TIME },
- { 0x4cb1a, KEY_AUDIO },
- { 0x4cb58, KEY_SUBTITLE },
- { 0x4cb19, KEY_ZOOM },
-
- { 0x4cb5f, KEY_RED },
- { 0x4cb1e, KEY_GREEN },
- { 0x4cb5c, KEY_YELLOW },
- { 0x4cb1d, KEY_BLUE },
-};
-
-static struct rc_map_list tango_map = {
- .map = {
- .scan = tango_table,
- .size = ARRAY_SIZE(tango_table),
- .rc_proto = RC_PROTO_NECX,
- .name = RC_MAP_TANGO,
- }
-};
-
-static int __init init_rc_map_tango(void)
-{
- return rc_map_register(&tango_map);
-}
-
-static void __exit exit_rc_map_tango(void)
-{
- rc_map_unregister(&tango_map);
-}
-
-module_init(init_rc_map_tango)
-module_exit(exit_rc_map_tango)
-
-MODULE_AUTHOR("Sigma Designs");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 3237fef5d502..d79d1e3996b2 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -157,8 +157,9 @@ static irqreturn_t st_rc_rx_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static void st_rc_hardware_init(struct st_rc_device *dev)
+static int st_rc_hardware_init(struct st_rc_device *dev)
{
+ int ret;
int baseclock, freqdiff;
unsigned int rx_max_symbol_per = MAX_SYMB_TIME;
unsigned int rx_sampling_freq_div;
@@ -166,7 +167,12 @@ static void st_rc_hardware_init(struct st_rc_device *dev)
/* Enable the IP */
reset_control_deassert(dev->rstc);
- clk_prepare_enable(dev->sys_clock);
+ ret = clk_prepare_enable(dev->sys_clock);
+ if (ret) {
+ dev_err(dev->dev, "Failed to prepare/enable system clock\n");
+ return ret;
+ }
+
baseclock = clk_get_rate(dev->sys_clock);
/* IRB input pins are inverted internally from high to low. */
@@ -184,6 +190,8 @@ static void st_rc_hardware_init(struct st_rc_device *dev)
}
writel(rx_max_symbol_per, dev->rx_base + IRB_MAX_SYM_PERIOD);
+
+ return 0;
}
static int st_rc_remove(struct platform_device *pdev)
@@ -287,7 +295,9 @@ static int st_rc_probe(struct platform_device *pdev)
rc_dev->dev = dev;
platform_set_drvdata(pdev, rc_dev);
- st_rc_hardware_init(rc_dev);
+ ret = st_rc_hardware_init(rc_dev);
+ if (ret)
+ goto err;
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
/* rx sampling rate is 10Mhz */
@@ -359,6 +369,7 @@ static int st_rc_suspend(struct device *dev)
static int st_rc_resume(struct device *dev)
{
+ int ret;
struct st_rc_device *rc_dev = dev_get_drvdata(dev);
struct rc_dev *rdev = rc_dev->rdev;
@@ -367,7 +378,10 @@ static int st_rc_resume(struct device *dev)
rc_dev->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(dev);
- st_rc_hardware_init(rc_dev);
+ ret = st_rc_hardware_init(rc_dev);
+ if (ret)
+ return ret;
+
if (rdev->users) {
writel(IRB_RX_INTS, rc_dev->rx_base + IRB_RX_INT_EN);
writel(0x01, rc_dev->rx_base + IRB_RX_EN);
diff --git a/drivers/media/rc/tango-ir.c b/drivers/media/rc/tango-ir.c
deleted file mode 100644
index b8eb5bc4d9be..000000000000
--- a/drivers/media/rc/tango-ir.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
- */
-
-#include <linux/input.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <media/rc-core.h>
-
-#define DRIVER_NAME "tango-ir"
-
-#define IR_NEC_CTRL 0x00
-#define IR_NEC_DATA 0x04
-#define IR_CTRL 0x08
-#define IR_RC5_CLK_DIV 0x0c
-#define IR_RC5_DATA 0x10
-#define IR_INT 0x14
-
-#define NEC_TIME_BASE 560
-#define RC5_TIME_BASE 1778
-
-#define RC6_CTRL 0x00
-#define RC6_CLKDIV 0x04
-#define RC6_DATA0 0x08
-#define RC6_DATA1 0x0c
-#define RC6_DATA2 0x10
-#define RC6_DATA3 0x14
-#define RC6_DATA4 0x18
-
-#define RC6_CARRIER 36000
-#define RC6_TIME_BASE 16
-
-#define NEC_CAP(n) ((n) << 24)
-#define GPIO_SEL(n) ((n) << 16)
-#define DISABLE_NEC (BIT(4) | BIT(8))
-#define ENABLE_RC5 (BIT(0) | BIT(9))
-#define ENABLE_RC6 (BIT(0) | BIT(7))
-#define ACK_IR_INT (BIT(0) | BIT(1))
-#define ACK_RC6_INT (BIT(31))
-
-#define NEC_ANY (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32)
-
-struct tango_ir {
- void __iomem *rc5_base;
- void __iomem *rc6_base;
- struct rc_dev *rc;
- struct clk *clk;
-};
-
-static void tango_ir_handle_nec(struct tango_ir *ir)
-{
- u32 v, code;
- enum rc_proto proto;
-
- v = readl_relaxed(ir->rc5_base + IR_NEC_DATA);
- if (!v) {
- rc_repeat(ir->rc);
- return;
- }
-
- code = ir_nec_bytes_to_scancode(v, v >> 8, v >> 16, v >> 24, &proto);
- rc_keydown(ir->rc, proto, code, 0);
-}
-
-static void tango_ir_handle_rc5(struct tango_ir *ir)
-{
- u32 data, field, toggle, addr, cmd, code;
-
- data = readl_relaxed(ir->rc5_base + IR_RC5_DATA);
- if (data & BIT(31))
- return;
-
- field = data >> 12 & 1;
- toggle = data >> 11 & 1;
- addr = data >> 6 & 0x1f;
- cmd = (data & 0x3f) | (field ^ 1) << 6;
-
- code = RC_SCANCODE_RC5(addr, cmd);
- rc_keydown(ir->rc, RC_PROTO_RC5, code, toggle);
-}
-
-static void tango_ir_handle_rc6(struct tango_ir *ir)
-{
- u32 data0, data1, toggle, mode, addr, cmd, code;
-
- data0 = readl_relaxed(ir->rc6_base + RC6_DATA0);
- data1 = readl_relaxed(ir->rc6_base + RC6_DATA1);
-
- mode = data0 >> 1 & 7;
- if (mode != 0)
- return;
-
- toggle = data0 & 1;
- addr = data0 >> 16;
- cmd = data1;
-
- code = RC_SCANCODE_RC6_0(addr, cmd);
- rc_keydown(ir->rc, RC_PROTO_RC6_0, code, toggle);
-}
-
-static irqreturn_t tango_ir_irq(int irq, void *dev_id)
-{
- struct tango_ir *ir = dev_id;
- unsigned int rc5_stat;
- unsigned int rc6_stat;
-
- rc5_stat = readl_relaxed(ir->rc5_base + IR_INT);
- writel_relaxed(rc5_stat, ir->rc5_base + IR_INT);
-
- rc6_stat = readl_relaxed(ir->rc6_base + RC6_CTRL);
- writel_relaxed(rc6_stat, ir->rc6_base + RC6_CTRL);
-
- if (!(rc5_stat & 3) && !(rc6_stat & BIT(31)))
- return IRQ_NONE;
-
- if (rc5_stat & BIT(0))
- tango_ir_handle_rc5(ir);
-
- if (rc5_stat & BIT(1))
- tango_ir_handle_nec(ir);
-
- if (rc6_stat & BIT(31))
- tango_ir_handle_rc6(ir);
-
- return IRQ_HANDLED;
-}
-
-static int tango_change_protocol(struct rc_dev *dev, u64 *rc_type)
-{
- struct tango_ir *ir = dev->priv;
- u32 rc5_ctrl = DISABLE_NEC;
- u32 rc6_ctrl = 0;
-
- if (*rc_type & NEC_ANY)
- rc5_ctrl = 0;
-
- if (*rc_type & RC_PROTO_BIT_RC5)
- rc5_ctrl |= ENABLE_RC5;
-
- if (*rc_type & RC_PROTO_BIT_RC6_0)
- rc6_ctrl = ENABLE_RC6;
-
- writel_relaxed(rc5_ctrl, ir->rc5_base + IR_CTRL);
- writel_relaxed(rc6_ctrl, ir->rc6_base + RC6_CTRL);
-
- return 0;
-}
-
-static int tango_ir_probe(struct platform_device *pdev)
-{
- const char *map_name = RC_MAP_TANGO;
- struct device *dev = &pdev->dev;
- struct rc_dev *rc;
- struct tango_ir *ir;
- u64 clkrate, clkdiv;
- int irq, err;
- u32 val;
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
-
- ir = devm_kzalloc(dev, sizeof(*ir), GFP_KERNEL);
- if (!ir)
- return -ENOMEM;
-
- ir->rc5_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ir->rc5_base))
- return PTR_ERR(ir->rc5_base);
-
- ir->rc6_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(ir->rc6_base))
- return PTR_ERR(ir->rc6_base);
-
- ir->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ir->clk))
- return PTR_ERR(ir->clk);
-
- rc = devm_rc_allocate_device(dev, RC_DRIVER_SCANCODE);
- if (!rc)
- return -ENOMEM;
-
- of_property_read_string(dev->of_node, "linux,rc-map-name", &map_name);
-
- rc->device_name = DRIVER_NAME;
- rc->driver_name = DRIVER_NAME;
- rc->input_phys = DRIVER_NAME "/input0";
- rc->map_name = map_name;
- rc->allowed_protocols = NEC_ANY | RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_0;
- rc->change_protocol = tango_change_protocol;
- rc->priv = ir;
- ir->rc = rc;
-
- err = clk_prepare_enable(ir->clk);
- if (err)
- return err;
-
- clkrate = clk_get_rate(ir->clk);
-
- clkdiv = clkrate * NEC_TIME_BASE;
- do_div(clkdiv, 1000000);
-
- val = NEC_CAP(31) | GPIO_SEL(12) | clkdiv;
- writel_relaxed(val, ir->rc5_base + IR_NEC_CTRL);
-
- clkdiv = clkrate * RC5_TIME_BASE;
- do_div(clkdiv, 1000000);
-
- writel_relaxed(DISABLE_NEC, ir->rc5_base + IR_CTRL);
- writel_relaxed(clkdiv, ir->rc5_base + IR_RC5_CLK_DIV);
- writel_relaxed(ACK_IR_INT, ir->rc5_base + IR_INT);
-
- clkdiv = clkrate * RC6_TIME_BASE;
- do_div(clkdiv, RC6_CARRIER);
-
- writel_relaxed(ACK_RC6_INT, ir->rc6_base + RC6_CTRL);
- writel_relaxed((clkdiv >> 2) << 18 | clkdiv, ir->rc6_base + RC6_CLKDIV);
-
- err = devm_request_irq(dev, irq, tango_ir_irq, IRQF_SHARED,
- dev_name(dev), ir);
- if (err)
- goto err_clk;
-
- err = devm_rc_register_device(dev, rc);
- if (err)
- goto err_clk;
-
- platform_set_drvdata(pdev, ir);
- return 0;
-
-err_clk:
- clk_disable_unprepare(ir->clk);
- return err;
-}
-
-static int tango_ir_remove(struct platform_device *pdev)
-{
- struct tango_ir *ir = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(ir->clk);
- return 0;
-}
-
-static const struct of_device_id tango_ir_dt_ids[] = {
- { .compatible = "sigma,smp8642-ir" },
- { }
-};
-MODULE_DEVICE_TABLE(of, tango_ir_dt_ids);
-
-static struct platform_driver tango_ir_driver = {
- .probe = tango_ir_probe,
- .remove = tango_ir_remove,
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = tango_ir_dt_ids,
- },
-};
-module_platform_driver(tango_ir_driver);
-
-MODULE_DESCRIPTION("SMP86xx IR decoder driver");
-MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index 931ec0727cd3..e5094fff04c5 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -147,7 +147,7 @@ static int cxd2880_spi_read_ts(struct spi_device *spi,
ret = spi_sync(spi, &message);
if (ret)
- pr_err("spi_write_then_read failed\n");
+ pr_err("spi_sync failed\n");
return ret;
}
@@ -401,7 +401,7 @@ static int cxd2880_start_feed(struct dvb_demux_feed *feed)
dvb_spi,
"cxd2880_ts_read");
if (IS_ERR(dvb_spi->cxd2880_ts_read_thread)) {
- pr_err("kthread_run failed/\n");
+ pr_err("kthread_run failed\n");
kfree(dvb_spi->ts_buf);
dvb_spi->ts_buf = NULL;
memset(&dvb_spi->filter_config, 0,
@@ -448,7 +448,7 @@ static int cxd2880_stop_feed(struct dvb_demux_feed *feed)
* in dvb_spi->all_pid_feed_count.
*/
if (dvb_spi->all_pid_feed_count <= 0) {
- pr_err("PID %d not found.\n", feed->pid);
+ pr_err("PID %d not found\n", feed->pid);
return -EINVAL;
}
dvb_spi->all_pid_feed_count--;
@@ -485,7 +485,7 @@ static int cxd2880_stop_feed(struct dvb_demux_feed *feed)
ret_stop = kthread_stop(dvb_spi->cxd2880_ts_read_thread);
if (ret_stop) {
- pr_err("'kthread_stop failed. (%d)\n", ret_stop);
+ pr_err("kthread_stop failed. (%d)\n", ret_stop);
ret = ret_stop;
}
kfree(dvb_spi->ts_buf);
@@ -512,7 +512,7 @@ cxd2880_spi_probe(struct spi_device *spi)
struct cxd2880_config config;
if (!spi) {
- pr_err("invalid arg.\n");
+ pr_err("invalid arg\n");
return -EINVAL;
}
@@ -596,7 +596,7 @@ cxd2880_spi_probe(struct spi_device *spi)
ret = dvb_spi->demux.dmx.connect_frontend(&dvb_spi->demux.dmx,
&dvb_spi->dmx_fe);
if (ret < 0) {
- pr_err("dvb_register_frontend() failed\n");
+ pr_err("connect_frontend() failed\n");
goto fail_fe_conn;
}
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index a24624353f9e..d714fe50afe5 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -624,11 +624,6 @@ static void device_work(struct work_struct *w)
curr_ctx = container_of(w, struct vim2m_ctx, work_run.work);
- if (!curr_ctx) {
- pr_err("Instance released before the end of transaction\n");
- return;
- }
-
vim2m_dev = curr_ctx->dev;
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
diff --git a/drivers/media/test-drivers/vimc/vimc-debayer.c b/drivers/media/test-drivers/vimc/vimc-debayer.c
index c3f6fef34f68..2d06cdbacc76 100644
--- a/drivers/media/test-drivers/vimc/vimc-debayer.c
+++ b/drivers/media/test-drivers/vimc/vimc-debayer.c
@@ -150,17 +150,17 @@ static bool vimc_deb_src_code_is_valid(u32 code)
}
static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
unsigned int i;
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
*mf = sink_fmt_default;
for (i = 1; i < sd->entity.num_pads; i++) {
- mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, i);
*mf = sink_fmt_default;
mf->code = vdeb->src_code;
}
@@ -169,7 +169,7 @@ static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
}
static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (VIMC_IS_SRC(code->pad)) {
@@ -188,7 +188,7 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index)
@@ -213,14 +213,14 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
}
static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
/* Get the current sink format */
fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
- *v4l2_subdev_get_try_format(sd, cfg, 0) :
+ *v4l2_subdev_get_try_format(sd, sd_state, 0) :
vdeb->sink_fmt;
/* Set the right code for the source pad */
@@ -251,7 +251,7 @@ static void vimc_deb_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
}
static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
@@ -266,8 +266,8 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
sink_fmt = &vdeb->sink_fmt;
src_code = &vdeb->src_code;
} else {
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
- src_code = &v4l2_subdev_get_try_format(sd, cfg, 1)->code;
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ src_code = &v4l2_subdev_get_try_format(sd, sd_state, 1)->code;
}
/*
diff --git a/drivers/media/test-drivers/vimc/vimc-scaler.c b/drivers/media/test-drivers/vimc/vimc-scaler.c
index 121fa7d62a2e..06880dd0b6ac 100644
--- a/drivers/media/test-drivers/vimc/vimc-scaler.c
+++ b/drivers/media/test-drivers/vimc/vimc-scaler.c
@@ -84,20 +84,20 @@ static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
}
static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mf;
struct v4l2_rect *r;
unsigned int i;
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
*mf = sink_fmt_default;
- r = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ r = v4l2_subdev_get_try_crop(sd, sd_state, 0);
*r = crop_rect_default;
for (i = 1; i < sd->entity.num_pads; i++) {
- mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, i);
*mf = sink_fmt_default;
mf->width = mf->width * sca_mult;
mf->height = mf->height * sca_mult;
@@ -107,7 +107,7 @@ static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
}
static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
u32 mbus_code = vimc_mbus_code_by_index(code->index);
@@ -128,7 +128,7 @@ static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct vimc_pix_map *vpix;
@@ -156,7 +156,7 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
}
static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
@@ -164,8 +164,8 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
/* Get the current sink format */
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
} else {
format->format = vsca->sink_fmt;
crop_rect = &vsca->crop_rect;
@@ -201,7 +201,7 @@ static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
}
static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
@@ -216,8 +216,8 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
sink_fmt = &vsca->sink_fmt;
crop_rect = &vsca->crop_rect;
} else {
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
}
/*
@@ -254,7 +254,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
}
static int vimc_sca_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
@@ -268,8 +268,8 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
sink_fmt = &vsca->sink_fmt;
crop_rect = &vsca->crop_rect;
} else {
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
}
switch (sel->target) {
@@ -287,7 +287,7 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
}
static int vimc_sca_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
@@ -305,8 +305,8 @@ static int vimc_sca_set_selection(struct v4l2_subdev *sd,
crop_rect = &vsca->crop_rect;
sink_fmt = &vsca->sink_fmt;
} else {
- crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
}
switch (sel->target) {
diff --git a/drivers/media/test-drivers/vimc/vimc-sensor.c b/drivers/media/test-drivers/vimc/vimc-sensor.c
index ba5db5a150b4..74ab79cadb5d 100644
--- a/drivers/media/test-drivers/vimc/vimc-sensor.c
+++ b/drivers/media/test-drivers/vimc/vimc-sensor.c
@@ -42,14 +42,14 @@ static const struct v4l2_mbus_framefmt fmt_default = {
};
static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
unsigned int i;
for (i = 0; i < sd->entity.num_pads; i++) {
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, i);
*mf = fmt_default;
}
@@ -57,7 +57,7 @@ static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
}
static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
u32 mbus_code = vimc_mbus_code_by_index(code->index);
@@ -71,7 +71,7 @@ static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct vimc_pix_map *vpix;
@@ -93,14 +93,14 @@ static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
}
static int vimc_sen_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_sen_device *vsen =
container_of(sd, struct vimc_sen_device, sd);
fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) :
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) :
vsen->mbus_format;
return 0;
@@ -146,7 +146,7 @@ static void vimc_sen_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
}
static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_sen_device *vsen = v4l2_get_subdevdata(sd);
@@ -159,7 +159,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
mf = &vsen->mbus_format;
} else {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
}
/* Set the new format */
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index ca0ebf6ad9cc..d2bd2653cf54 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -656,6 +656,46 @@ static const struct v4l2_file_operations vivid_radio_fops = {
.unlocked_ioctl = video_ioctl2,
};
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int r;
+
+ /*
+ * Sliced and raw VBI capture share the same queue so we must
+ * change the type.
+ */
+ if (p->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ||
+ p->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ r = vb2_queue_change_type(vdev->queue, p->type);
+ if (r)
+ return r;
+ }
+
+ return vb2_ioctl_reqbufs(file, priv, p);
+}
+
+static int vidioc_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int r;
+
+ /*
+ * Sliced and raw VBI capture share the same queue so we must
+ * change the type.
+ */
+ if (p->format.type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ||
+ p->format.type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ r = vb2_queue_change_type(vdev->queue, p->format.type);
+ if (r)
+ return r;
+ }
+
+ return vb2_ioctl_create_bufs(file, priv, p);
+}
+
static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
@@ -717,8 +757,8 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_g_fbuf = vidioc_g_fbuf,
.vidioc_s_fbuf = vidioc_s_fbuf,
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_create_bufs = vidioc_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index cdff6cd264d0..1e3c4f5a9413 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -429,7 +429,6 @@ struct vivid_dev {
u32 vbi_cap_seq_start;
u32 vbi_cap_seq_count;
bool vbi_cap_streaming;
- bool stream_sliced_vbi_cap;
u32 meta_cap_seq_start;
u32 meta_cap_seq_count;
bool meta_cap_streaming;
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
index c0dc609c1358..9da730ccfa94 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
@@ -752,7 +752,7 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req,
&dev->ctrl_hdl_vbi_cap);
- if (dev->stream_sliced_vbi_cap)
+ if (vbi_cap_buf->vb.vb2_buf.type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
else
vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
index a1e52708b7ca..265db2114671 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
@@ -455,7 +455,6 @@ int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
f->fmt.sdr.pixelformat = dev->sdr_pixelformat;
f->fmt.sdr.buffersize = dev->sdr_buffersize;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
return 0;
}
@@ -468,7 +467,6 @@ int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
if (vb2_is_busy(q))
return -EBUSY;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
dev->sdr_pixelformat = formats[i].pixelformat;
@@ -488,7 +486,6 @@ int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
{
int i;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
f->fmt.sdr.buffersize = formats[i].buffersize;
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-cap.c b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
index 1a9348eea781..b65b02eeeb97 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
@@ -255,10 +255,8 @@ int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
if (ret)
return ret;
- if (dev->stream_sliced_vbi_cap && vb2_is_busy(&dev->vb_vbi_cap_q))
+ if (f->type != V4L2_BUF_TYPE_VBI_CAPTURE && vb2_is_busy(&dev->vb_vbi_cap_q))
return -EBUSY;
- dev->stream_sliced_vbi_cap = false;
- dev->vbi_cap_dev.queue->type = V4L2_BUF_TYPE_VBI_CAPTURE;
return 0;
}
@@ -322,11 +320,9 @@ int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format
if (ret)
return ret;
- if (!dev->stream_sliced_vbi_cap && vb2_is_busy(&dev->vb_vbi_cap_q))
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE && vb2_is_busy(&dev->vb_vbi_cap_q))
return -EBUSY;
dev->service_set_cap = vbi->service_set;
- dev->stream_sliced_vbi_cap = true;
- dev->vbi_cap_dev.queue->type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
return 0;
}
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 00feadb217d8..f97153df3c84 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -1,10 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-# This Kconfig option is also used by the legacy av7110 driver
-config TTPCI_EEPROM
- tristate
- depends on I2C
-
if USB && MEDIA_SUPPORT
menuconfig MEDIA_USB_SUPPORT
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 751703db06f5..7a81be7970b2 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -632,7 +632,6 @@ static int airspy_g_fmt_sdr_cap(struct file *file, void *priv,
f->fmt.sdr.pixelformat = s->pixelformat;
f->fmt.sdr.buffersize = s->buffersize;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
return 0;
}
@@ -647,7 +646,6 @@ static int airspy_s_fmt_sdr_cap(struct file *file, void *priv,
if (vb2_is_busy(q))
return -EBUSY;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < NUM_FORMATS; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
s->pixelformat = formats[i].pixelformat;
@@ -670,7 +668,6 @@ static int airspy_try_fmt_sdr_cap(struct file *file, void *priv,
{
int i;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < NUM_FORMATS; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
f->fmt.sdr.buffersize = formats[i].buffersize;
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index a8a72d5fbd12..caefac07af92 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -199,8 +199,8 @@ static int au0828_media_device_init(struct au0828_dev *dev,
struct media_device *mdev;
mdev = media_device_usb_allocate(udev, KBUILD_MODNAME, THIS_MODULE);
- if (!mdev)
- return -ENOMEM;
+ if (IS_ERR(mdev))
+ return PTR_ERR(mdev);
dev->media_dev = mdev;
#endif
diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
index 50835f5f7512..57b7f1ea68da 100644
--- a/drivers/media/usb/cpia2/cpia2.h
+++ b/drivers/media/usb/cpia2/cpia2.h
@@ -429,6 +429,7 @@ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd);
int cpia2_do_command(struct camera_data *cam,
unsigned int command,
unsigned char direction, unsigned char param);
+void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf);
struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf);
int cpia2_init_camera(struct camera_data *cam);
int cpia2_allocate_buffers(struct camera_data *cam);
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index e747548ab286..b5a2d06fb356 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -2167,6 +2167,18 @@ static void reset_camera_struct(struct camera_data *cam)
*
* cpia2_init_camera_struct
*
+ * Deinitialize camera struct
+ *****************************************************************************/
+void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf)
+{
+ v4l2_device_unregister(&cam->v4l2_dev);
+ kfree(cam);
+}
+
+/******************************************************************************
+ *
+ * cpia2_init_camera_struct
+ *
* Initializes camera struct, does not call reset to fill in defaults.
*****************************************************************************/
struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf)
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 3ab80a7b4498..76aac06f9fb8 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -844,15 +844,13 @@ static int cpia2_usb_probe(struct usb_interface *intf,
ret = set_alternate(cam, USBIF_CMDONLY);
if (ret < 0) {
ERR("%s: usb_set_interface error (ret = %d)\n", __func__, ret);
- kfree(cam);
- return ret;
+ goto alt_err;
}
if((ret = cpia2_init_camera(cam)) < 0) {
ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret);
- kfree(cam);
- return ret;
+ goto alt_err;
}
LOG(" CPiA Version: %d.%02d (%d.%d)\n",
cam->params.version.firmware_revision_hi,
@@ -872,11 +870,14 @@ static int cpia2_usb_probe(struct usb_interface *intf,
ret = cpia2_register_camera(cam);
if (ret < 0) {
ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
- kfree(cam);
- return ret;
+ goto alt_err;
}
return 0;
+
+alt_err:
+ cpia2_deinit_camera_struct(cam, intf);
+ return ret;
}
/******************************************************************************
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 69d5c628a797..926ecfc9b64a 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -140,10 +140,10 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
loff_t *off)
{
struct camera_data *cam = video_drvdata(file);
- int noblock = file->f_flags&O_NONBLOCK;
+ int noblock = file->f_flags & O_NONBLOCK;
ssize_t ret;
- if(!cam)
+ if (!cam)
return -EINVAL;
if (mutex_lock_interruptible(&cam->v4l2_lock))
@@ -153,7 +153,6 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
return ret;
}
-
/******************************************************************************
*
* cpia2_v4l_poll
@@ -170,7 +169,6 @@ static __poll_t cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait
return res;
}
-
static int sync(struct camera_data *cam, int frame_nr)
{
struct framebuf *frame = &cam->buffers[frame_nr];
@@ -247,8 +245,8 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
break;
}
- if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0)
- memset(vc->bus_info,0, sizeof(vc->bus_info));
+ if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) < 0)
+ memset(vc->bus_info, 0, sizeof(vc->bus_info));
return 0;
}
@@ -289,7 +287,7 @@ static int cpia2_s_input(struct file *file, void *fh, unsigned int i)
*****************************************************************************/
static int cpia2_enum_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_fmtdesc *f)
+ struct v4l2_fmtdesc *f)
{
if (f->index > 1)
return -EINVAL;
@@ -310,13 +308,13 @@ static int cpia2_enum_fmt_vid_cap(struct file *file, void *fh,
*****************************************************************************/
static int cpia2_try_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct camera_data *cam = video_drvdata(file);
if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG &&
f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG)
- return -EINVAL;
+ return -EINVAL;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.bytesperline = 0;
@@ -371,19 +369,20 @@ static int cpia2_try_fmt_vid_cap(struct file *file, void *fh,
*****************************************************************************/
static int cpia2_s_fmt_vid_cap(struct file *file, void *_fh,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct camera_data *cam = video_drvdata(file);
int err, frame;
err = cpia2_try_fmt_vid_cap(file, _fh, f);
- if(err != 0)
+ if (err != 0)
return err;
cam->pixelformat = f->fmt.pix.pixelformat;
/* NOTE: This should be set to 1 for MJPEG, but some apps don't handle
- * the missing Huffman table properly. */
+ * the missing Huffman table properly.
+ */
cam->params.compression.inhibit_htables = 0;
/*f->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG;*/
@@ -421,7 +420,7 @@ static int cpia2_s_fmt_vid_cap(struct file *file, void *_fh,
*****************************************************************************/
static int cpia2_g_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct camera_data *cam = video_drvdata(file);
@@ -547,9 +546,8 @@ static const struct {
};
static int cpia2_enum_framesizes(struct file *file, void *fh,
- struct v4l2_frmsizeenum *fsize)
+ struct v4l2_frmsizeenum *fsize)
{
-
if (fsize->pixel_format != V4L2_PIX_FMT_MJPEG &&
fsize->pixel_format != V4L2_PIX_FMT_JPEG)
return -EINVAL;
@@ -563,7 +561,7 @@ static int cpia2_enum_framesizes(struct file *file, void *fh,
}
static int cpia2_enum_frameintervals(struct file *file, void *fh,
- struct v4l2_frmivalenum *fival)
+ struct v4l2_frmivalenum *fival)
{
struct camera_data *cam = video_drvdata(file);
int max = ARRAY_SIZE(framerate_controls) - 1;
@@ -665,19 +663,18 @@ static int cpia2_g_jpegcomp(struct file *file, void *fh, struct v4l2_jpegcompres
parms->quality = 80; // TODO: Can this be made meaningful?
parms->jpeg_markers = V4L2_JPEG_MARKER_DQT | V4L2_JPEG_MARKER_DRI;
- if(!cam->params.compression.inhibit_htables) {
+ if (!cam->params.compression.inhibit_htables)
parms->jpeg_markers |= V4L2_JPEG_MARKER_DHT;
- }
parms->APPn = cam->APPn;
parms->APP_len = cam->APP_len;
- if(cam->APP_len > 0) {
+ if (cam->APP_len > 0) {
memcpy(parms->APP_data, cam->APP_data, cam->APP_len);
parms->jpeg_markers |= V4L2_JPEG_MARKER_APP;
}
parms->COM_len = cam->COM_len;
- if(cam->COM_len > 0) {
+ if (cam->COM_len > 0) {
memcpy(parms->COM_data, cam->COM_data, cam->COM_len);
parms->jpeg_markers |= JPEG_MARKER_COM;
}
@@ -698,7 +695,7 @@ static int cpia2_g_jpegcomp(struct file *file, void *fh, struct v4l2_jpegcompres
*****************************************************************************/
static int cpia2_s_jpegcomp(struct file *file, void *fh,
- const struct v4l2_jpegcompression *parms)
+ const struct v4l2_jpegcompression *parms)
{
struct camera_data *cam = video_drvdata(file);
@@ -708,9 +705,9 @@ static int cpia2_s_jpegcomp(struct file *file, void *fh,
cam->params.compression.inhibit_htables =
!(parms->jpeg_markers & V4L2_JPEG_MARKER_DHT);
- if(parms->APP_len != 0) {
- if(parms->APP_len > 0 &&
- parms->APP_len <= sizeof(cam->APP_data) &&
+ if (parms->APP_len != 0) {
+ if (parms->APP_len > 0 &&
+ parms->APP_len <= sizeof(cam->APP_data) &&
parms->APPn >= 0 && parms->APPn <= 15) {
cam->APPn = parms->APPn;
cam->APP_len = parms->APP_len;
@@ -724,9 +721,9 @@ static int cpia2_s_jpegcomp(struct file *file, void *fh,
cam->APP_len = 0;
}
- if(parms->COM_len != 0) {
- if(parms->COM_len > 0 &&
- parms->COM_len <= sizeof(cam->COM_data)) {
+ if (parms->COM_len != 0) {
+ if (parms->COM_len > 0 &&
+ parms->COM_len <= sizeof(cam->COM_data)) {
cam->COM_len = parms->COM_len;
memcpy(cam->COM_data, parms->COM_data, parms->COM_len);
} else {
@@ -751,8 +748,8 @@ static int cpia2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers
{
struct camera_data *cam = video_drvdata(file);
- if(req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- req->memory != V4L2_MEMORY_MMAP)
+ if (req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ req->memory != V4L2_MEMORY_MMAP)
return -EINVAL;
DBG("REQBUFS requested:%d returning:%d\n", req->count, cam->num_frames);
@@ -774,8 +771,8 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct camera_data *cam = video_drvdata(file);
- if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- buf->index >= cam->num_frames)
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ buf->index >= cam->num_frames)
return -EINVAL;
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
@@ -783,7 +780,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->memory = V4L2_MEMORY_MMAP;
- if(cam->mmapped)
+ if (cam->mmapped)
buf->flags = V4L2_BUF_FLAG_MAPPED;
else
buf->flags = 0;
@@ -806,8 +803,8 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
}
DBG("QUERYBUF index:%d offset:%d flags:%d seq:%d bytesused:%d\n",
- buf->index, buf->m.offset, buf->flags, buf->sequence,
- buf->bytesused);
+ buf->index, buf->m.offset, buf->flags, buf->sequence,
+ buf->bytesused);
return 0;
}
@@ -824,14 +821,14 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct camera_data *cam = video_drvdata(file);
- if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- buf->memory != V4L2_MEMORY_MMAP ||
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ buf->memory != V4L2_MEMORY_MMAP ||
buf->index >= cam->num_frames)
return -EINVAL;
DBG("QBUF #%d\n", buf->index);
- if(cam->buffers[buf->index].status == FRAME_READY)
+ if (cam->buffers[buf->index].status == FRAME_READY)
cam->buffers[buf->index].status = FRAME_EMPTY;
return 0;
@@ -849,9 +846,10 @@ static int find_earliest_filled_buffer(struct camera_data *cam)
{
int i;
int found = -1;
- for (i=0; i<cam->num_frames; i++) {
- if(cam->buffers[i].status == FRAME_READY) {
- if(found < 0) {
+
+ for (i = 0; i < cam->num_frames; i++) {
+ if (cam->buffers[i].status == FRAME_READY) {
+ if (found < 0) {
found = i;
} else {
/* find which buffer is earlier */
@@ -876,22 +874,23 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
struct camera_data *cam = video_drvdata(file);
int frame;
- if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- buf->memory != V4L2_MEMORY_MMAP)
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ buf->memory != V4L2_MEMORY_MMAP)
return -EINVAL;
frame = find_earliest_filled_buffer(cam);
- if(frame < 0 && file->f_flags&O_NONBLOCK)
+ if (frame < 0 && file->f_flags & O_NONBLOCK)
return -EAGAIN;
- if(frame < 0) {
+ if (frame < 0) {
/* Wait for a frame to become available */
- struct framebuf *cb=cam->curbuff;
+ struct framebuf *cb = cam->curbuff;
+
mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
!video_is_registered(&cam->vdev) ||
- (cb=cam->curbuff)->status == FRAME_READY);
+ (cb = cam->curbuff)->status == FRAME_READY);
mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
@@ -900,7 +899,6 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
frame = cb->num;
}
-
buf->index = frame;
buf->bytesused = cam->buffers[buf->index].length;
buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE
@@ -931,7 +929,7 @@ static int cpia2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (!cam->streaming) {
ret = cpia2_usb_stream_start(cam,
- cam->params.camera_state.stream_mode);
+ cam->params.camera_state.stream_mode);
if (!ret)
v4l2_ctrl_grab(cam->usb_alt, true);
}
@@ -969,7 +967,7 @@ static int cpia2_mmap(struct file *file, struct vm_area_struct *area)
return -ERESTARTSYS;
retval = cpia2_remap_buffer(cam, area);
- if(!retval)
+ if (!retval)
cam->stream_fh = file->private_data;
mutex_unlock(&cam->v4l2_lock);
return retval;
@@ -1080,39 +1078,42 @@ int cpia2_register_camera(struct camera_data *cam)
v4l2_ctrl_handler_init(hdl, 12);
v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_BRIGHTNESS,
- cam->params.pnp_id.device_type == DEVICE_STV_672 ? 1 : 0,
- 255, 1, DEFAULT_BRIGHTNESS);
+ V4L2_CID_BRIGHTNESS,
+ cam->params.pnp_id.device_type == DEVICE_STV_672 ? 1 : 0,
+ 255, 1, DEFAULT_BRIGHTNESS);
v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_CONTRAST, 0, 255, 1, DEFAULT_CONTRAST);
+ V4L2_CID_CONTRAST, 0, 255, 1, DEFAULT_CONTRAST);
v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_SATURATION, 0, 255, 1, DEFAULT_SATURATION);
+ V4L2_CID_SATURATION, 0, 255, 1, DEFAULT_SATURATION);
v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_JPEG_ACTIVE_MARKER, 0,
- V4L2_JPEG_ACTIVE_MARKER_DHT, 0,
- V4L2_JPEG_ACTIVE_MARKER_DHT);
+ V4L2_CID_JPEG_ACTIVE_MARKER, 0,
+ V4L2_JPEG_ACTIVE_MARKER_DHT, 0,
+ V4L2_JPEG_ACTIVE_MARKER_DHT);
v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_JPEG_COMPRESSION_QUALITY, 1,
- 100, 1, 100);
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 1,
+ 100, 1, 100);
cpia2_usb_alt.def = alternate;
cam->usb_alt = v4l2_ctrl_new_custom(hdl, &cpia2_usb_alt, NULL);
/* VP5 Only */
if (cam->params.pnp_id.device_type != DEVICE_STV_672)
v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
/* Flicker control only valid for 672 */
if (cam->params.pnp_id.device_type == DEVICE_STV_672)
v4l2_ctrl_new_std_menu(hdl, &cpia2_ctrl_ops,
- V4L2_CID_POWER_LINE_FREQUENCY,
- V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0);
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
+ 0, 0);
/* Light control only valid for the QX5 Microscope */
if (cam->params.pnp_id.product == 0x151) {
cam->top_light = v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_ILLUMINATORS_1, 0, 1, 1, 0);
+ V4L2_CID_ILLUMINATORS_1,
+ 0, 1, 1, 0);
cam->bottom_light = v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
- V4L2_CID_ILLUMINATORS_2, 0, 1, 1, 0);
+ V4L2_CID_ILLUMINATORS_2,
+ 0, 1, 1, 0);
v4l2_ctrl_cluster(2, &cam->top_light);
}
@@ -1159,28 +1160,28 @@ void cpia2_unregister_camera(struct camera_data *cam)
*****************************************************************************/
static void __init check_parameters(void)
{
- if(buffer_size < PAGE_SIZE) {
+ if (buffer_size < PAGE_SIZE) {
buffer_size = PAGE_SIZE;
LOG("buffer_size too small, setting to %d\n", buffer_size);
- } else if(buffer_size > 1024*1024) {
+ } else if (buffer_size > 1024 * 1024) {
/* arbitrary upper limiit */
- buffer_size = 1024*1024;
+ buffer_size = 1024 * 1024;
LOG("buffer_size ridiculously large, setting to %d\n",
buffer_size);
} else {
- buffer_size += PAGE_SIZE-1;
- buffer_size &= ~(PAGE_SIZE-1);
+ buffer_size += PAGE_SIZE - 1;
+ buffer_size &= ~(PAGE_SIZE - 1);
}
- if(num_buffers < 1) {
+ if (num_buffers < 1) {
num_buffers = 1;
LOG("num_buffers too small, setting to %d\n", num_buffers);
- } else if(num_buffers > VIDEO_MAX_FRAME) {
+ } else if (num_buffers > VIDEO_MAX_FRAME) {
num_buffers = VIDEO_MAX_FRAME;
LOG("num_buffers too large, setting to %d\n", num_buffers);
}
- if(alternate < USBIF_ISO_1 || alternate > USBIF_ISO_6) {
+ if (alternate < USBIF_ISO_1 || alternate > USBIF_ISO_6) {
alternate = DEFAULT_ALT;
LOG("alternate specified is invalid, using %d\n", alternate);
}
@@ -1197,7 +1198,6 @@ static void __init check_parameters(void)
/************ Module Stuff ***************/
-
/******************************************************************************
*
* cpia2_init/module_init
@@ -1211,7 +1211,6 @@ static int __init cpia2_init(void)
return cpia2_usb_init();
}
-
/******************************************************************************
*
* cpia2_exit/module_exit
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 1b6d4e4c52ca..fe4d886442a4 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -1122,11 +1122,6 @@ static int lme2510_powerup(struct dvb_usb_device *d, int onoff)
return ret;
}
-static int lme2510_get_adapter_count(struct dvb_usb_device *d)
-{
- return 1;
-}
-
static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
{
struct lme2510_state *st = d->priv;
@@ -1211,12 +1206,12 @@ static struct dvb_usb_device_properties lme2510_props = {
.frontend_attach = dm04_lme2510_frontend_attach,
.tuner_attach = dm04_lme2510_tuner,
.get_stream_config = lme2510_get_stream_config,
- .get_adapter_count = lme2510_get_adapter_count,
.streaming_ctrl = lme2510_streaming_ctrl,
.get_rc_config = lme2510_get_rc_config,
.exit = lme2510_exit,
+ .num_adapters = 1,
.adapter = {
{
.caps = DVB_USB_ADAP_HAS_PID_FILTER|
@@ -1227,8 +1222,6 @@ static struct dvb_usb_device_properties lme2510_props = {
.stream =
DVB_USB_STREAM_BULK(0x86, 10, 4096),
},
- {
- }
},
};
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 97ed17a141bb..83705730e37e 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -612,8 +612,9 @@ static int rtl28xxu_read_config(struct dvb_usb_device *d)
static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
{
struct rtl28xxu_dev *dev = d_to_priv(d);
+ u8 buf[1];
int ret;
- struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL};
+ struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 1, buf};
dev_dbg(&d->intf->dev, "\n");
@@ -1776,7 +1777,7 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
ir_raw_event_store_with_filter(d->rc_dev, &ev);
}
- /* 'flush' ir_raw_event_store_with_filter() */
+ /* 'flush' ir_raw_event_store_with_filter() */
ir_raw_event_handle(d->rc_dev);
exit:
return ret;
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 28e4806a87cd..c22514948db2 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -83,4 +83,4 @@ obj-$(CONFIG_DVB_USB_TECHNISAT_USB2) += dvb-usb-technisat-usb2.o
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends/
# due to tuner-xc3028
ccflags-y += -I$(srctree)/drivers/media/tuners
-ccflags-y += -I$(srctree)/drivers/media/pci/ttpci
+ccflags-y += -I$(srctree)/drivers/media/common
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 969a7ec71dff..23f1093d28f8 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -29,10 +29,8 @@ struct cinergyt2_state {
unsigned char data[64];
};
-/* We are missing a release hook with usb_device data */
-static struct dvb_usb_device *cinergyt2_usb_device;
-
-static struct dvb_usb_device_properties cinergyt2_properties;
+/* Forward declaration */
+static const struct dvb_usb_device_properties cinergyt2_properties;
static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
{
@@ -78,13 +76,12 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
if (ret < 0) {
+ if (adap->fe_adap[0].fe)
+ adap->fe_adap[0].fe->ops.release(adap->fe_adap[0].fe);
deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep state info\n");
}
mutex_unlock(&d->data_mutex);
- /* Copy this pointer as we are gonna need it in the release phase */
- cinergyt2_usb_device = adap->dev;
-
return ret;
}
@@ -203,7 +200,7 @@ static struct usb_device_id cinergyt2_usb_table[] = {
MODULE_DEVICE_TABLE(usb, cinergyt2_usb_table);
-static struct dvb_usb_device_properties cinergyt2_properties = {
+static const struct dvb_usb_device_properties cinergyt2_properties = {
.size_of_priv = sizeof(struct cinergyt2_state),
.num_adapters = 1,
.adapter = {
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 761992ad05e2..7707de7bae7c 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -1947,7 +1947,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.size_of_priv = sizeof(struct cxusb_state),
- .num_adapters = 2,
+ .num_adapters = 1,
.adapter = {
{
.num_frontends = 1,
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index fba06932a9e0..1c13e493322c 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -26,6 +26,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
struct dtv5100_state *st = d->priv;
+ unsigned int pipe;
u8 request;
u8 type;
u16 value;
@@ -34,6 +35,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
switch (wlen) {
case 1:
/* write { reg }, read { value } */
+ pipe = usb_rcvctrlpipe(d->udev, 0);
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ :
DTV5100_TUNER_READ);
type = USB_TYPE_VENDOR | USB_DIR_IN;
@@ -41,6 +43,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
break;
case 2:
/* write { reg, value } */
+ pipe = usb_sndctrlpipe(d->udev, 0);
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE :
DTV5100_TUNER_WRITE);
type = USB_TYPE_VENDOR | USB_DIR_OUT;
@@ -54,7 +57,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
memcpy(st->data, rbuf, rlen);
msleep(1); /* avoid I2C errors */
- return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
+ return usb_control_msg(d->udev, pipe, request,
type, value, index, st->data, rlen,
DTV5100_USB_TIMEOUT);
}
@@ -141,7 +144,7 @@ static int dtv5100_probe(struct usb_interface *intf,
/* initialize non qt1010/zl10353 part? */
for (i = 0; dtv5100_init[i].request; i++) {
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
dtv5100_init[i].request,
USB_TYPE_VENDOR | USB_DIR_OUT,
dtv5100_init[i].value,
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index ba9292e2a587..c1e0dccb7408 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -4065,15 +4065,15 @@ static int em28xx_usb_probe(struct usb_interface *intf,
dev->dev_next->dvb_max_pkt_size_isoc = dev->dvb_max_pkt_size_isoc_ts2;
dev->dev_next->dvb_alt_isoc = dev->dvb_alt_isoc;
- /* Configuare hardware to support TS2*/
+ /* Configure hardware to support TS2*/
if (dev->dvb_xfer_bulk) {
- /* The ep4 and ep5 are configuared for BULK */
+ /* The ep4 and ep5 are configured for BULK */
em28xx_write_reg(dev, 0x0b, 0x96);
mdelay(100);
em28xx_write_reg(dev, 0x0b, 0x80);
mdelay(100);
} else {
- /* The ep4 and ep5 are configuared for ISO */
+ /* The ep4 and ep5 are configured for ISO */
em28xx_write_reg(dev, 0x0b, 0x96);
mdelay(100);
em28xx_write_reg(dev, 0x0b, 0x82);
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 5aa15a7a49de..59529cbf9cd0 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -720,7 +720,8 @@ static int em28xx_ir_init(struct em28xx *dev)
dev->board.has_ir_i2c = 0;
dev_warn(&dev->intf->dev,
"No i2c IR remote control device found.\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto ref_put;
}
}
@@ -735,7 +736,7 @@ static int em28xx_ir_init(struct em28xx *dev)
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
if (!ir)
- return -ENOMEM;
+ goto ref_put;
rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc)
goto error;
@@ -839,6 +840,9 @@ error:
dev->ir = NULL;
rc_free_device(rc);
kfree(ir);
+ref_put:
+ em28xx_shutdown_buttons(dev);
+ kref_put(&dev->ref, em28xx_free_device);
return err;
}
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index b9e45124673b..c742cc88fac5 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -398,7 +398,7 @@ static int s2250_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int s2250_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index d93d384286c1..46ed95483e22 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -365,8 +365,9 @@ struct sd {
static const struct v4l2_pix_format mode[] = {
{160, 120, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE,
/* The sizeimage is trial and error, as with low framerates
- the camera will pad out usb frames, making the image
- data larger then strictly necessary */
+ * the camera will pad out usb frames, making the image
+ * data larger than strictly necessary
+ */
.bytesperline = 160,
.sizeimage = 65536,
.colorspace = V4L2_COLORSPACE_SRGB,
diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c
index 2c05ea2598e7..ce4ee8bc75c8 100644
--- a/drivers/media/usb/gspca/gl860/gl860.c
+++ b/drivers/media/usb/gspca/gl860/gl860.c
@@ -561,8 +561,8 @@ int gl860_RTx(struct gspca_dev *gspca_dev,
len, 400 + 200 * (len > 1));
memcpy(pdata, gspca_dev->usb_buf, len);
} else {
- r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- req, pref, val, index, NULL, len, 400);
+ gspca_err(gspca_dev, "zero-length read request\n");
+ r = -EINVAL;
}
}
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index cd6776c3163b..bffa94e76da5 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -614,7 +614,7 @@ static const struct ov_i2c_regvals norm_3620b[] = {
/*
* From the datasheet: "Note that after writing to register COMH
* (0x12) to change the sensor mode, registers related to the
- * sensor’s cropping window will be reset back to their default
+ * sensor's cropping window will be reset back to their default
* values."
*
* "wait 4096 external clock ... to make sure the sensor is
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index 949111070971..32504ebcfd4d 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -116,7 +116,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
}
ret = usb_control_msg(gspca_dev->dev,
- usb_sndctrlpipe(gspca_dev->dev, 0),
+ usb_rcvctrlpipe(gspca_dev->dev, 0),
USB_REQ_SYNCH_FRAME, /* request */
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
SQ905_PING, 0, gspca_dev->usb_buf, 1,
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index ace3da40006e..971dee0a56da 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -242,6 +242,10 @@ static void reg_r(struct gspca_dev *gspca_dev,
gspca_err(gspca_dev, "reg_r: buffer overflow\n");
return;
}
+ if (len == 0) {
+ gspca_err(gspca_dev, "reg_r: zero-length read\n");
+ return;
+ }
if (gspca_dev->usb_err < 0)
return;
ret = usb_control_msg(gspca_dev->dev,
@@ -250,7 +254,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index,
- len ? gspca_dev->usb_buf : NULL, len,
+ gspca_dev->usb_buf, len,
500);
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
@@ -727,7 +731,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
case MegaImageVI:
reg_w_riv(gspca_dev, 0xf0, 0, 0);
spca504B_WaitCmdStatus(gspca_dev);
- reg_r(gspca_dev, 0xf0, 4, 0);
+ reg_w_riv(gspca_dev, 0xf0, 4, 0);
spca504B_WaitCmdStatus(gspca_dev);
break;
default:
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index cec841ad7495..3e535be2c520 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -929,7 +929,6 @@ static int hackrf_s_fmt_sdr(struct file *file, void *priv,
if (vb2_is_busy(q))
return -EBUSY;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < NUM_FORMATS; i++) {
if (f->fmt.sdr.pixelformat == formats[i].pixelformat) {
dev->pixelformat = formats[i].pixelformat;
@@ -955,7 +954,6 @@ static int hackrf_g_fmt_sdr(struct file *file, void *priv,
dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n",
(char *)&dev->pixelformat);
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = dev->pixelformat;
f->fmt.sdr.buffersize = dev->buffersize;
@@ -971,7 +969,6 @@ static int hackrf_try_fmt_sdr(struct file *file, void *priv,
dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < NUM_FORMATS; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
f->fmt.sdr.buffersize = formats[i].buffersize;
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 63882a5248ae..71de6b4c4e4c 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -912,7 +912,6 @@ static int msi2500_g_fmt_sdr_cap(struct file *file, void *priv,
f->fmt.sdr.pixelformat = dev->pixelformat;
f->fmt.sdr.buffersize = dev->buffersize;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
return 0;
}
@@ -930,7 +929,6 @@ static int msi2500_s_fmt_sdr_cap(struct file *file, void *priv,
if (vb2_is_busy(q))
return -EBUSY;
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < dev->num_formats; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
dev->pixelformat = formats[i].pixelformat;
@@ -957,7 +955,6 @@ static int msi2500_try_fmt_sdr_cap(struct file *file, void *priv,
dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < dev->num_formats; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
f->fmt.sdr.buffersize = formats[i].buffersize;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index f4a727918e35..d38dee1792e4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2676,9 +2676,8 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
pvr2_stream_destroy(hdw->vid_stream);
hdw->vid_stream = NULL;
}
- pvr2_i2c_core_done(hdw);
v4l2_device_unregister(&hdw->v4l2_dev);
- pvr2_hdw_remove_usb_stuff(hdw);
+ pvr2_hdw_disconnect(hdw);
mutex_lock(&pvr2_unit_mtx);
do {
if ((hdw->unit_number >= 0) &&
@@ -2705,6 +2704,7 @@ void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
{
pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw);
LOCK_TAKE(hdw->big_lock);
+ pvr2_i2c_core_done(hdw);
LOCK_TAKE(hdw->ctl_lock);
pvr2_hdw_remove_usb_stuff(hdw);
LOCK_GIVE(hdw->ctl_lock);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 4af55e2478be..3b0e4ed75d99 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -767,8 +767,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
if (fmt == NULL)
return -EINVAL;
- field = f->fmt.pix.field;
-
dprintk(vc->dev, 50, "%s NTSC: %d suggested width: %d, height: %d\n",
__func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height);
if (is_ntsc) {
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index a852ee5f7ac9..bfda46a36dc5 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -324,10 +324,10 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (!b)
return -ENOMEM;
- if ((result = mutex_lock_interruptible(&dec->usb_mutex))) {
- kfree(b);
+ result = mutex_lock_interruptible(&dec->usb_mutex);
+ if (result) {
printk("%s: Failed to lock usb mutex.\n", __func__);
- return result;
+ goto err;
}
b[0] = 0xaa;
@@ -349,9 +349,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (result) {
printk("%s: command bulk message failed: error %d\n",
__func__, result);
- mutex_unlock(&dec->usb_mutex);
- kfree(b);
- return result;
+ goto err;
}
result = usb_bulk_msg(dec->udev, dec->result_pipe, b,
@@ -360,9 +358,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (result) {
printk("%s: result bulk message failed: error %d\n",
__func__, result);
- mutex_unlock(&dec->usb_mutex);
- kfree(b);
- return result;
+ goto err;
} else {
if (debug) {
printk(KERN_DEBUG "%s: result: %*ph\n",
@@ -373,12 +369,13 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
*result_length = b[3];
if (cmd_result && b[3] > 0)
memcpy(cmd_result, &b[4], b[3]);
+ }
- mutex_unlock(&dec->usb_mutex);
+err:
+ mutex_unlock(&dec->usb_mutex);
- kfree(b);
- return 0;
- }
+ kfree(b);
+ return result;
}
static int ttusb_dec_get_stb_state (struct ttusb_dec *dec, unsigned int *mode,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a777b389a66e..e16464606b14 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -127,10 +127,37 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl)
{
+ static const struct usb_device_id elgato_cam_link_4k = {
+ USB_DEVICE(0x0fd9, 0x0066)
+ };
struct uvc_format *format = NULL;
struct uvc_frame *frame = NULL;
unsigned int i;
+ /*
+ * The response of the Elgato Cam Link 4K is incorrect: The second byte
+ * contains bFormatIndex (instead of being the second byte of bmHint).
+ * The first byte is always zero. The third byte is always 1.
+ *
+ * The UVC 1.5 class specification defines the first five bits in the
+ * bmHint bitfield. The remaining bits are reserved and should be zero.
+ * Therefore a valid bmHint will be less than 32.
+ *
+ * Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix.
+ * MCU: 20.02.19, FPGA: 67
+ */
+ if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) &&
+ ctrl->bmHint > 255) {
+ u8 corrected_format_index = ctrl->bmHint >> 8;
+
+ uvc_dbg(stream->dev, VIDEO,
+ "Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n",
+ ctrl->bmHint, ctrl->bFormatIndex,
+ 1, corrected_format_index);
+ ctrl->bmHint = 1;
+ ctrl->bFormatIndex = corrected_format_index;
+ }
+
for (i = 0; i < stream->nformats; ++i) {
if (stream->format[i].index == ctrl->bFormatIndex) {
format = &stream->format[i];
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 1ef611e08323..538a330046ec 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1032,6 +1032,7 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam)
DBG("submitting URB %p\n", pipe_info->stream_urb);
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
if (retval) {
+ usb_free_urb(pipe_info->stream_urb);
printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n");
return retval;
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index bf49f83cb86f..02dc1787e953 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -62,6 +62,7 @@ config V4L2_FLASH_LED_CLASS
tristate "V4L2 flash API for LED flash class devices"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on LEDS_CLASS_FLASH
+ select V4L2_ASYNC
help
Say Y here to enable V4L2 flash API support for LED flash
class drivers.
@@ -70,6 +71,10 @@ config V4L2_FLASH_LED_CLASS
config V4L2_FWNODE
tristate
+ select V4L2_ASYNC
+
+config V4L2_ASYNC
+ tristate
# Used by drivers that need Videobuf modules
config VIDEOBUF_GEN
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index e4cd589b99a5..66a78c556c98 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -6,16 +6,18 @@
tuner-objs := tuner-core.o
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
- v4l2-event.o v4l2-ctrls.o v4l2-subdev.o \
- v4l2-async.o v4l2-common.o
+ v4l2-event.o v4l2-subdev.o v4l2-common.o \
+ v4l2-ctrls-core.o v4l2-ctrls-api.o \
+ v4l2-ctrls-request.o v4l2-ctrls-defs.o
videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o
videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o
videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
videodev-$(CONFIG_SPI) += v4l2-spi.o
videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o
-obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
+obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
+obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index e638aa8aecb7..cd9e78c63791 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -854,8 +854,27 @@ static int pending_subdevs_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
-void v4l2_async_debug_init(struct dentry *debugfs_dir)
+static struct dentry *v4l2_async_debugfs_dir;
+
+static int __init v4l2_async_init(void)
{
- debugfs_create_file("pending_async_subdevices", 0444, debugfs_dir, NULL,
+ v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
+ debugfs_create_file("pending_async_subdevices", 0444,
+ v4l2_async_debugfs_dir, NULL,
&pending_subdevs_fops);
+
+ return 0;
+}
+
+static void __exit v4l2_async_exit(void)
+{
+ debugfs_remove_recursive(v4l2_async_debugfs_dir);
}
+
+subsys_initcall(v4l2_async_init);
+module_exit(v4l2_async_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 0ca75f6784c5..47aff3b19742 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -1244,6 +1244,9 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
if (!file->f_op->unlocked_ioctl)
return ret;
+ if (!video_is_registered(vdev))
+ return -ENODEV;
+
if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
ret = file->f_op->unlocked_ioctl(file, cmd,
(unsigned long)compat_ptr(arg));
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
new file mode 100644
index 000000000000..db9baa0bd05f
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -0,0 +1,1225 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework uAPI implementation:
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#define pr_fmt(fmt) "v4l2-ctrls: " fmt
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "v4l2-ctrls-priv.h"
+
+/* Internal temporary helper struct, one for each v4l2_ext_control */
+struct v4l2_ctrl_helper {
+ /* Pointer to the control reference of the master control */
+ struct v4l2_ctrl_ref *mref;
+ /* The control ref corresponding to the v4l2_ext_control ID field. */
+ struct v4l2_ctrl_ref *ref;
+ /*
+ * v4l2_ext_control index of the next control belonging to the
+ * same cluster, or 0 if there isn't any.
+ */
+ u32 next;
+};
+
+/*
+ * Helper functions to copy control payload data from kernel space to
+ * user space and vice versa.
+ */
+
+/* Helper function: copy the given control value back to the caller */
+static int ptr_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr)
+{
+ u32 len;
+
+ if (ctrl->is_ptr && !ctrl->is_string)
+ return copy_to_user(c->ptr, ptr.p_const, c->size) ?
+ -EFAULT : 0;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ len = strlen(ptr.p_char);
+ if (c->size < len + 1) {
+ c->size = ctrl->elem_size;
+ return -ENOSPC;
+ }
+ return copy_to_user(c->string, ptr.p_char, len + 1) ?
+ -EFAULT : 0;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ c->value64 = *ptr.p_s64;
+ break;
+ default:
+ c->value = *ptr.p_s32;
+ break;
+ }
+ return 0;
+}
+
+/* Helper function: copy the current control value back to the caller */
+static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ return ptr_to_user(c, ctrl, ctrl->p_cur);
+}
+
+/* Helper function: copy the new control value back to the caller */
+static int new_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the request value back to the caller */
+static int req_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl_ref *ref)
+{
+ return ptr_to_user(c, ref->ctrl, ref->p_req);
+}
+
+/* Helper function: copy the initial control value back to the caller */
+static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ int idx;
+
+ for (idx = 0; idx < ctrl->elems; idx++)
+ ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the caller-provider value to the given control value */
+static int user_to_ptr(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr)
+{
+ int ret;
+ u32 size;
+
+ ctrl->is_new = 1;
+ if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned int idx;
+
+ ret = copy_from_user(ptr.p, c->ptr, c->size) ? -EFAULT : 0;
+ if (ret || !ctrl->is_array)
+ return ret;
+ for (idx = c->size / ctrl->elem_size; idx < ctrl->elems; idx++)
+ ctrl->type_ops->init(ctrl, idx, ptr);
+ return 0;
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ *ptr.p_s64 = c->value64;
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ size = c->size;
+ if (size == 0)
+ return -ERANGE;
+ if (size > ctrl->maximum + 1)
+ size = ctrl->maximum + 1;
+ ret = copy_from_user(ptr.p_char, c->string, size) ? -EFAULT : 0;
+ if (!ret) {
+ char last = ptr.p_char[size - 1];
+
+ ptr.p_char[size - 1] = 0;
+ /*
+ * If the string was longer than ctrl->maximum,
+ * then return an error.
+ */
+ if (strlen(ptr.p_char) == ctrl->maximum && last)
+ return -ERANGE;
+ }
+ return ret;
+ default:
+ *ptr.p_s32 = c->value;
+ break;
+ }
+ return 0;
+}
+
+/* Helper function: copy the caller-provider value as the new control value */
+static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ return user_to_ptr(c, ctrl, ctrl->p_new);
+}
+
+/*
+ * VIDIOC_G/TRY/S_EXT_CTRLS implementation
+ */
+
+/*
+ * Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
+ *
+ * It is not a fully atomic operation, just best-effort only. After all, if
+ * multiple controls have to be set through multiple i2c writes (for example)
+ * then some initial writes may succeed while others fail. Thus leaving the
+ * system in an inconsistent state. The question is how much effort you are
+ * willing to spend on trying to make something atomic that really isn't.
+ *
+ * From the point of view of an application the main requirement is that
+ * when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
+ * error should be returned without actually affecting any controls.
+ *
+ * If all the values are correct, then it is acceptable to just give up
+ * in case of low-level errors.
+ *
+ * It is important though that the application can tell when only a partial
+ * configuration was done. The way we do that is through the error_idx field
+ * of struct v4l2_ext_controls: if that is equal to the count field then no
+ * controls were affected. Otherwise all controls before that index were
+ * successful in performing their 'get' or 'set' operation, the control at
+ * the given index failed, and you don't know what happened with the controls
+ * after the failed one. Since if they were part of a control cluster they
+ * could have been successfully processed (if a cluster member was encountered
+ * at index < error_idx), they could have failed (if a cluster member was at
+ * error_idx), or they may not have been processed yet (if the first cluster
+ * member appeared after error_idx).
+ *
+ * It is all fairly theoretical, though. In practice all you can do is to
+ * bail out. If error_idx == count, then it is an application bug. If
+ * error_idx < count then it is only an application bug if the error code was
+ * EBUSY. That usually means that something started streaming just when you
+ * tried to set the controls. In all other cases it is a driver/hardware
+ * problem and all you can do is to retry or bail out.
+ *
+ * Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
+ * never modifies controls the error_idx is just set to whatever control
+ * has an invalid value.
+ */
+
+/*
+ * Prepare for the extended g/s/try functions.
+ * Find the controls in the control array and do some basic checks.
+ */
+static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers,
+ struct video_device *vdev,
+ bool get)
+{
+ struct v4l2_ctrl_helper *h;
+ bool have_clusters = false;
+ u32 i;
+
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ext_control *c = &cs->controls[i];
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+ u32 id = c->id & V4L2_CTRL_ID_MASK;
+
+ cs->error_idx = i;
+
+ if (cs->which &&
+ cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
+ cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
+ V4L2_CTRL_ID2WHICH(id) != cs->which) {
+ dprintk(vdev,
+ "invalid which 0x%x or control id 0x%x\n",
+ cs->which, id);
+ return -EINVAL;
+ }
+
+ /*
+ * Old-style private controls are not allowed for
+ * extended controls.
+ */
+ if (id >= V4L2_CID_PRIVATE_BASE) {
+ dprintk(vdev,
+ "old-style private controls not allowed\n");
+ return -EINVAL;
+ }
+ ref = find_ref_lock(hdl, id);
+ if (!ref) {
+ dprintk(vdev, "cannot find control id 0x%x\n", id);
+ return -EINVAL;
+ }
+ h->ref = ref;
+ ctrl = ref->ctrl;
+ if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) {
+ dprintk(vdev, "control id 0x%x is disabled\n", id);
+ return -EINVAL;
+ }
+
+ if (ctrl->cluster[0]->ncontrols > 1)
+ have_clusters = true;
+ if (ctrl->cluster[0] != ctrl)
+ ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
+ if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned int tot_size = ctrl->elems * ctrl->elem_size;
+
+ if (c->size < tot_size) {
+ /*
+ * In the get case the application first
+ * queries to obtain the size of the control.
+ */
+ if (get) {
+ c->size = tot_size;
+ return -ENOSPC;
+ }
+ dprintk(vdev,
+ "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n",
+ id, c->size, tot_size);
+ return -EFAULT;
+ }
+ c->size = tot_size;
+ }
+ /* Store the ref to the master control of the cluster */
+ h->mref = ref;
+ /*
+ * Initially set next to 0, meaning that there is no other
+ * control in this helper array belonging to the same
+ * cluster.
+ */
+ h->next = 0;
+ }
+
+ /*
+ * We are done if there were no controls that belong to a multi-
+ * control cluster.
+ */
+ if (!have_clusters)
+ return 0;
+
+ /*
+ * The code below figures out in O(n) time which controls in the list
+ * belong to the same cluster.
+ */
+
+ /* This has to be done with the handler lock taken. */
+ mutex_lock(hdl->lock);
+
+ /* First zero the helper field in the master control references */
+ for (i = 0; i < cs->count; i++)
+ helpers[i].mref->helper = NULL;
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ctrl_ref *mref = h->mref;
+
+ /*
+ * If the mref->helper is set, then it points to an earlier
+ * helper that belongs to the same cluster.
+ */
+ if (mref->helper) {
+ /*
+ * Set the next field of mref->helper to the current
+ * index: this means that the earlier helper now
+ * points to the next helper in the same cluster.
+ */
+ mref->helper->next = i;
+ /*
+ * mref should be set only for the first helper in the
+ * cluster, clear the others.
+ */
+ h->mref = NULL;
+ }
+ /* Point the mref helper to the current helper struct. */
+ mref->helper = h;
+ }
+ mutex_unlock(hdl->lock);
+ return 0;
+}
+
+/*
+ * Handles the corner case where cs->count == 0. It checks whether the
+ * specified control class exists. If that class ID is 0, then it checks
+ * whether there are any controls at all.
+ */
+static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
+{
+ if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
+ which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ return 0;
+ return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
+}
+
+/*
+ * Get extended controls. Allocates the helpers array if needed.
+ *
+ * Note that v4l2_g_ext_ctrls_common() with 'which' set to
+ * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
+ * completed, and in that case valid_p_req is true for all controls.
+ */
+int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev)
+{
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ int ret;
+ int i, j;
+ bool is_default, is_request;
+
+ is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
+ is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
+
+ cs->error_idx = cs->count;
+ cs->which = V4L2_CTRL_ID2WHICH(cs->which);
+
+ if (!hdl)
+ return -EINVAL;
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->which);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
+ if (!helpers)
+ return -ENOMEM;
+ }
+
+ ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true);
+ cs->error_idx = cs->count;
+
+ for (i = 0; !ret && i < cs->count; i++)
+ if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ ret = -EACCES;
+
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ bool is_volatile = false;
+ u32 idx = i;
+
+ if (!helpers[i].mref)
+ continue;
+
+ master = helpers[i].mref->ctrl;
+ cs->error_idx = i;
+
+ v4l2_ctrl_lock(master);
+
+ /*
+ * g_volatile_ctrl will update the new control values.
+ * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
+ * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
+ * it is v4l2_ctrl_request_complete() that copies the
+ * volatile controls at the time of request completion
+ * to the request, so you don't want to do that again.
+ */
+ if (!is_default && !is_request &&
+ ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
+ (master->has_volatiles && !is_cur_manual(master)))) {
+ for (j = 0; j < master->ncontrols; j++)
+ cur_to_new(master->cluster[j]);
+ ret = call_op(master, g_volatile_ctrl);
+ is_volatile = true;
+ }
+
+ if (ret) {
+ v4l2_ctrl_unlock(master);
+ break;
+ }
+
+ /*
+ * Copy the default value (if is_default is true), the
+ * request value (if is_request is true and p_req is valid),
+ * the new volatile value (if is_volatile is true) or the
+ * current value.
+ */
+ do {
+ struct v4l2_ctrl_ref *ref = helpers[idx].ref;
+
+ if (is_default)
+ ret = def_to_user(cs->controls + idx, ref->ctrl);
+ else if (is_request && ref->valid_p_req)
+ ret = req_to_user(cs->controls + idx, ref);
+ else if (is_volatile)
+ ret = new_to_user(cs->controls + idx, ref->ctrl);
+ else
+ ret = cur_to_user(cs->controls + idx, ref->ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ v4l2_ctrl_unlock(master);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kvfree(helpers);
+ return ret;
+}
+
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
+{
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs);
+
+ return v4l2_g_ext_ctrls_common(hdl, cs, vdev);
+}
+EXPORT_SYMBOL(v4l2_g_ext_ctrls);
+
+/* Validate controls. */
+static int validate_ctrls(struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers,
+ struct video_device *vdev,
+ bool set)
+{
+ unsigned int i;
+ int ret = 0;
+
+ cs->error_idx = cs->count;
+ for (i = 0; i < cs->count; i++) {
+ struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl;
+ union v4l2_ctrl_ptr p_new;
+
+ cs->error_idx = i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) {
+ dprintk(vdev,
+ "control id 0x%x is read-only\n",
+ ctrl->id);
+ return -EACCES;
+ }
+ /*
+ * This test is also done in try_set_control_cluster() which
+ * is called in atomic context, so that has the final say,
+ * but it makes sense to do an up-front check as well. Once
+ * an error occurs in try_set_control_cluster() some other
+ * controls may have been set already and we want to do a
+ * best-effort to avoid that.
+ */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) {
+ dprintk(vdev,
+ "control id 0x%x is grabbed, cannot set\n",
+ ctrl->id);
+ return -EBUSY;
+ }
+ /*
+ * Skip validation for now if the payload needs to be copied
+ * from userspace into kernelspace. We'll validate those later.
+ */
+ if (ctrl->is_ptr)
+ continue;
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ p_new.p_s64 = &cs->controls[i].value64;
+ else
+ p_new.p_s32 = &cs->controls[i].value;
+ ret = validate_new(ctrl, p_new);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Try or try-and-set controls */
+int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev, bool set)
+{
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ unsigned int i, j;
+ int ret;
+
+ cs->error_idx = cs->count;
+
+ /* Default value cannot be changed */
+ if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) {
+ dprintk(vdev, "%s: cannot change default value\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ cs->which = V4L2_CTRL_ID2WHICH(cs->which);
+
+ if (!hdl) {
+ dprintk(vdev, "%s: invalid null control handler\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->which);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
+ if (!helpers)
+ return -ENOMEM;
+ }
+ ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false);
+ if (!ret)
+ ret = validate_ctrls(cs, helpers, vdev, set);
+ if (ret && set)
+ cs->error_idx = cs->count;
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ u32 idx = i;
+
+ if (!helpers[i].mref)
+ continue;
+
+ cs->error_idx = i;
+ master = helpers[i].mref->ctrl;
+ v4l2_ctrl_lock(master);
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (j = 0; j < master->ncontrols; j++)
+ if (master->cluster[j])
+ master->cluster[j]->is_new = 0;
+
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ /* Pick an initial non-manual value */
+ s32 new_auto_val = master->manual_mode_value + 1;
+ u32 tmp_idx = idx;
+
+ do {
+ /*
+ * Check if the auto control is part of the
+ * list, and remember the new value.
+ */
+ if (helpers[tmp_idx].ref->ctrl == master)
+ new_auto_val = cs->controls[tmp_idx].value;
+ tmp_idx = helpers[tmp_idx].next;
+ } while (tmp_idx);
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ /*
+ * Copy the new caller-supplied control values.
+ * user_to_new() sets 'is_new' to 1.
+ */
+ do {
+ struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl;
+
+ ret = user_to_new(cs->controls + idx, ctrl);
+ if (!ret && ctrl->is_ptr) {
+ ret = validate_new(ctrl, ctrl->p_new);
+ if (ret)
+ dprintk(vdev,
+ "failed to validate control %s (%d)\n",
+ v4l2_ctrl_get_name(ctrl->id), ret);
+ }
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ if (!ret)
+ ret = try_or_set_cluster(fh, master,
+ !hdl->req_obj.req && set, 0);
+ if (!ret && hdl->req_obj.req && set) {
+ for (j = 0; j < master->ncontrols; j++) {
+ struct v4l2_ctrl_ref *ref =
+ find_ref(hdl, master->cluster[j]->id);
+
+ new_to_req(ref);
+ }
+ }
+
+ /* Copy the new values back to userspace. */
+ if (!ret) {
+ idx = i;
+ do {
+ ret = new_to_user(cs->controls + idx,
+ helpers[idx].ref->ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
+ v4l2_ctrl_unlock(master);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kvfree(helpers);
+ return ret;
+}
+
+static int try_set_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
+{
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set);
+
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
+ if (ret)
+ dprintk(vdev,
+ "%s: try_set_ext_ctrls_common failed (%d)\n",
+ video_device_node_name(vdev), ret);
+
+ return ret;
+}
+
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false);
+}
+EXPORT_SYMBOL(v4l2_try_ext_ctrls);
+
+int v4l2_s_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true);
+}
+EXPORT_SYMBOL(v4l2_s_ext_ctrls);
+
+/*
+ * VIDIOC_G/S_CTRL implementation
+ */
+
+/* Helper function to get a single control */
+static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret = 0;
+ int i;
+
+ /* Compound controls are not supported. The new_to_user() and
+ * cur_to_user() calls below would need to be modified not to access
+ * userspace memory when called from get_ctrl().
+ */
+ if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64)
+ return -EINVAL;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ return -EACCES;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ ret = call_op(master, g_volatile_ctrl);
+ new_to_user(c, ctrl);
+ } else {
+ cur_to_user(c, ctrl);
+ }
+ v4l2_ctrl_unlock(master);
+ return ret;
+}
+
+int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+ struct v4l2_ext_control c;
+ int ret;
+
+ if (!ctrl || !ctrl->is_int)
+ return -EINVAL;
+ ret = get_ctrl(ctrl, &c);
+ control->value = c.value;
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_g_ctrl);
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret;
+ int i;
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (i = 0; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 0;
+
+ ret = validate_new(ctrl, ctrl->p_new);
+ if (ret)
+ return ret;
+
+ /*
+ * For autoclusters with volatiles that are switched from auto to
+ * manual mode we have to update the current volatile values since
+ * those will become the initial manual values after such a switch.
+ */
+ if (master->is_auto && master->has_volatiles && ctrl == master &&
+ !is_cur_manual(master) && ctrl->val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+
+ ctrl->is_new = 1;
+ return try_or_set_cluster(fh, master, true, ch_flags);
+}
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ struct v4l2_ext_control *c)
+{
+ int ret;
+
+ v4l2_ctrl_lock(ctrl);
+ user_to_new(c, ctrl);
+ ret = set_ctrl(fh, ctrl, 0);
+ if (!ret)
+ cur_to_user(c, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ return ret;
+}
+
+int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+ struct v4l2_ext_control c = { control->id };
+ int ret;
+
+ if (!ctrl || !ctrl->is_int)
+ return -EINVAL;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+
+ c.value = control->value;
+ ret = set_ctrl_lock(fh, ctrl, &c);
+ control->value = c.value;
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_s_ctrl);
+
+/*
+ * Helper functions for drivers to get/set controls.
+ */
+
+s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ext_control c;
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(!ctrl->is_int))
+ return 0;
+ c.value = 0;
+ get_ctrl(ctrl, &c);
+ return c.value;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
+
+s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ext_control c;
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
+ return 0;
+ c.value64 = 0;
+ get_ctrl(ctrl, &c);
+ return c.value64;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
+
+int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(!ctrl->is_int))
+ return -EINVAL;
+ ctrl->val = val;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl);
+
+int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
+ return -EINVAL;
+ *ctrl->p_new.p_s64 = val;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64);
+
+int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING))
+ return -EINVAL;
+ strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+
+int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
+ enum v4l2_ctrl_type type, const void *p)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->type != type))
+ return -EINVAL;
+ memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size);
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound);
+
+/*
+ * Modify the range of a control.
+ */
+int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
+ s64 min, s64 max, u64 step, s64 def)
+{
+ bool value_changed;
+ bool range_changed = false;
+ int ret;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
+ if (ctrl->is_array)
+ return -EINVAL;
+ ret = check_range(ctrl->type, min, max, step, def);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ctrl->minimum != min || ctrl->maximum != max ||
+ ctrl->step != step || ctrl->default_value != def) {
+ range_changed = true;
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ }
+ cur_to_new(ctrl);
+ if (validate_new(ctrl, ctrl->p_new)) {
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ *ctrl->p_new.p_s64 = def;
+ else
+ *ctrl->p_new.p_s32 = def;
+ }
+
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64;
+ else
+ value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32;
+ if (value_changed)
+ ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ else if (range_changed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ return ret;
+}
+EXPORT_SYMBOL(__v4l2_ctrl_modify_range);
+
+/* Implement VIDIOC_QUERY_EXT_CTRL */
+int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc)
+{
+ const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND;
+ u32 id = qc->id & V4L2_CTRL_ID_MASK;
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+
+ if (!hdl)
+ return -EINVAL;
+
+ mutex_lock(hdl->lock);
+
+ /* Try to find it */
+ ref = find_ref(hdl, id);
+
+ if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) {
+ bool is_compound;
+ /* Match any control that is not hidden */
+ unsigned int mask = 1;
+ bool match = false;
+
+ if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) {
+ /* Match any hidden control */
+ match = true;
+ } else if ((qc->id & next_flags) == next_flags) {
+ /* Match any control, compound or not */
+ mask = 0;
+ }
+
+ /* Find the next control with ID > qc->id */
+
+ /* Did we reach the end of the control list? */
+ if (id >= node2id(hdl->ctrl_refs.prev)) {
+ ref = NULL; /* Yes, so there is no next control */
+ } else if (ref) {
+ /*
+ * We found a control with the given ID, so just get
+ * the next valid one in the list.
+ */
+ list_for_each_entry_continue(ref, &hdl->ctrl_refs, node) {
+ is_compound = ref->ctrl->is_array ||
+ ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < ref->ctrl->id &&
+ (is_compound & mask) == match)
+ break;
+ }
+ if (&ref->node == &hdl->ctrl_refs)
+ ref = NULL;
+ } else {
+ /*
+ * No control with the given ID exists, so start
+ * searching for the next largest ID. We know there
+ * is one, otherwise the first 'if' above would have
+ * been true.
+ */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ is_compound = ref->ctrl->is_array ||
+ ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < ref->ctrl->id &&
+ (is_compound & mask) == match)
+ break;
+ }
+ if (&ref->node == &hdl->ctrl_refs)
+ ref = NULL;
+ }
+ }
+ mutex_unlock(hdl->lock);
+
+ if (!ref)
+ return -EINVAL;
+
+ ctrl = ref->ctrl;
+ memset(qc, 0, sizeof(*qc));
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ qc->id = id;
+ else
+ qc->id = ctrl->id;
+ strscpy(qc->name, ctrl->name, sizeof(qc->name));
+ qc->flags = user_flags(ctrl);
+ qc->type = ctrl->type;
+ qc->elem_size = ctrl->elem_size;
+ qc->elems = ctrl->elems;
+ qc->nr_of_dims = ctrl->nr_of_dims;
+ memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0]));
+ qc->minimum = ctrl->minimum;
+ qc->maximum = ctrl->maximum;
+ qc->default_value = ctrl->default_value;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU ||
+ ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qc->step = 1;
+ else
+ qc->step = ctrl->step;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_query_ext_ctrl);
+
+/* Implement VIDIOC_QUERYCTRL */
+int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+{
+ struct v4l2_query_ext_ctrl qec = { qc->id };
+ int rc;
+
+ rc = v4l2_query_ext_ctrl(hdl, &qec);
+ if (rc)
+ return rc;
+
+ qc->id = qec.id;
+ qc->type = qec.type;
+ qc->flags = qec.flags;
+ strscpy(qc->name, qec.name, sizeof(qc->name));
+ switch (qc->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_STRING:
+ case V4L2_CTRL_TYPE_BITMASK:
+ qc->minimum = qec.minimum;
+ qc->maximum = qec.maximum;
+ qc->step = qec.step;
+ qc->default_value = qec.default_value;
+ break;
+ default:
+ qc->minimum = 0;
+ qc->maximum = 0;
+ qc->step = 0;
+ qc->default_value = 0;
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_queryctrl);
+
+/* Implement VIDIOC_QUERYMENU */
+int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
+{
+ struct v4l2_ctrl *ctrl;
+ u32 i = qm->index;
+
+ ctrl = v4l2_ctrl_find(hdl, qm->id);
+ if (!ctrl)
+ return -EINVAL;
+
+ qm->reserved = 0;
+ /* Sanity checks */
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_MENU:
+ if (!ctrl->qmenu)
+ return -EINVAL;
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (!ctrl->qmenu_int)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (i < ctrl->minimum || i > ctrl->maximum)
+ return -EINVAL;
+
+ /* Use mask to see if this menu item should be skipped */
+ if (ctrl->menu_skip_mask & (1ULL << i))
+ return -EINVAL;
+ /* Empty menu items should also be skipped */
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+ if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0')
+ return -EINVAL;
+ strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ } else {
+ qm->value = ctrl->qmenu_int[i];
+ }
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_querymenu);
+
+/*
+ * VIDIOC_LOG_STATUS helpers
+ */
+
+int v4l2_ctrl_log_status(struct file *file, void *fh)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
+ v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
+ vfd->v4l2_dev->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_log_status);
+
+int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
+
+/*
+ * VIDIOC_(UN)SUBSCRIBE_EVENT implementation
+ */
+
+static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev,
+ unsigned int elems)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (!ctrl)
+ return -EINVAL;
+
+ v4l2_ctrl_lock(ctrl);
+ list_add_tail(&sev->node, &ctrl->ev_subs);
+ if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL))
+ send_initial_event(sev->fh, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ return 0;
+}
+
+static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (!ctrl)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ list_del(&sev->node);
+ v4l2_ctrl_unlock(ctrl);
+}
+
+void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.ctrl.changes;
+
+ old->u.ctrl = new->u.ctrl;
+ old->u.ctrl.changes |= old_changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_replace);
+
+void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
+{
+ new->u.ctrl.changes |= old->u.ctrl.changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_merge);
+
+const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
+ .add = v4l2_ctrl_add_event,
+ .del = v4l2_ctrl_del_event,
+ .replace = v4l2_ctrl_replace,
+ .merge = v4l2_ctrl_merge,
+};
+EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
+
+int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_CTRL)
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
+
+int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (!sd->ctrl_handler)
+ return -EINVAL;
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
+
+/*
+ * poll helper
+ */
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ return EPOLLPRI;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_poll);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
new file mode 100644
index 000000000000..c4b5082849b6
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -0,0 +1,1946 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework core implementation.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#include "v4l2-ctrls-priv.h"
+
+static const union v4l2_ctrl_ptr ptr_null;
+
+static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl,
+ u32 changes)
+{
+ memset(ev, 0, sizeof(*ev));
+ ev->type = V4L2_EVENT_CTRL;
+ ev->id = ctrl->id;
+ ev->u.ctrl.changes = changes;
+ ev->u.ctrl.type = ctrl->type;
+ ev->u.ctrl.flags = user_flags(ctrl);
+ if (ctrl->is_ptr)
+ ev->u.ctrl.value64 = 0;
+ else
+ ev->u.ctrl.value64 = *ctrl->p_cur.p_s64;
+ ev->u.ctrl.minimum = ctrl->minimum;
+ ev->u.ctrl.maximum = ctrl->maximum;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ev->u.ctrl.step = 1;
+ else
+ ev->u.ctrl.step = ctrl->step;
+ ev->u.ctrl.default_value = ctrl->default_value;
+}
+
+void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_event ev;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+ fill_event(&ev, ctrl, changes);
+ v4l2_event_queue_fh(fh, &ev);
+}
+
+void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ struct v4l2_event ev;
+ struct v4l2_subscribed_event *sev;
+
+ if (list_empty(&ctrl->ev_subs))
+ return;
+ fill_event(&ev, ctrl, changes);
+
+ list_for_each_entry(sev, &ctrl->ev_subs, node)
+ if (sev->fh != fh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
+ v4l2_event_queue_fh(sev->fh, &ev);
+}
+
+static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr1,
+ union v4l2_ctrl_ptr ptr2)
+{
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_BUTTON:
+ return false;
+ case V4L2_CTRL_TYPE_STRING:
+ idx *= ctrl->elem_size;
+ /* strings are always 0-terminated */
+ return !strcmp(ptr1.p_char + idx, ptr2.p_char + idx);
+ case V4L2_CTRL_TYPE_INTEGER64:
+ return ptr1.p_s64[idx] == ptr2.p_s64[idx];
+ case V4L2_CTRL_TYPE_U8:
+ return ptr1.p_u8[idx] == ptr2.p_u8[idx];
+ case V4L2_CTRL_TYPE_U16:
+ return ptr1.p_u16[idx] == ptr2.p_u16[idx];
+ case V4L2_CTRL_TYPE_U32:
+ return ptr1.p_u32[idx] == ptr2.p_u32[idx];
+ default:
+ if (ctrl->is_int)
+ return ptr1.p_s32[idx] == ptr2.p_s32[idx];
+ idx *= ctrl->elem_size;
+ return !memcmp(ptr1.p_const + idx, ptr2.p_const + idx,
+ ctrl->elem_size);
+ }
+}
+
+/* Default intra MPEG-2 quantisation coefficients, from the specification. */
+static const u8 mpeg2_intra_quant_matrix[64] = {
+ 8, 16, 16, 19, 16, 19, 22, 22,
+ 22, 22, 22, 22, 26, 24, 26, 27,
+ 27, 27, 26, 26, 26, 26, 27, 27,
+ 27, 29, 29, 29, 34, 34, 34, 29,
+ 29, 29, 27, 27, 29, 29, 32, 32,
+ 34, 34, 37, 38, 37, 35, 35, 34,
+ 35, 38, 38, 40, 40, 40, 48, 48,
+ 46, 46, 56, 56, 58, 69, 69, 83
+};
+
+static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant;
+ struct v4l2_ctrl_vp8_frame *p_vp8_frame;
+ struct v4l2_ctrl_fwht_params *p_fwht_params;
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_def.p_const)
+ memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ p_mpeg2_sequence = p;
+
+ /* 4:2:0 */
+ p_mpeg2_sequence->chroma_format = 1;
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ p_mpeg2_picture = p;
+
+ /* interlaced top field */
+ p_mpeg2_picture->picture_structure = V4L2_MPEG2_PIC_TOP_FIELD;
+ p_mpeg2_picture->picture_coding_type =
+ V4L2_MPEG2_PIC_CODING_TYPE_I;
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ p_mpeg2_quant = p;
+
+ memcpy(p_mpeg2_quant->intra_quantiser_matrix,
+ mpeg2_intra_quant_matrix,
+ ARRAY_SIZE(mpeg2_intra_quant_matrix));
+ /*
+ * The default non-intra MPEG-2 quantisation
+ * coefficients are all 16, as per the specification.
+ */
+ memset(p_mpeg2_quant->non_intra_quantiser_matrix, 16,
+ sizeof(p_mpeg2_quant->non_intra_quantiser_matrix));
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ p_vp8_frame = p;
+ p_vp8_frame->num_dct_parts = 1;
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ p_fwht_params = p;
+ p_fwht_params->version = V4L2_FWHT_VERSION;
+ p_fwht_params->width = 1280;
+ p_fwht_params->height = 720;
+ p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
+ (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
+ break;
+ }
+}
+
+static void std_init(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ idx *= ctrl->elem_size;
+ memset(ptr.p_char + idx, ' ', ctrl->minimum);
+ ptr.p_char[idx + ctrl->minimum] = '\0';
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ ptr.p_s64[idx] = ctrl->default_value;
+ break;
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ptr.p_s32[idx] = ctrl->default_value;
+ break;
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ ptr.p_s32[idx] = 0;
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ ptr.p_u8[idx] = ctrl->default_value;
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ ptr.p_u16[idx] = ctrl->default_value;
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ ptr.p_u32[idx] = ctrl->default_value;
+ break;
+ default:
+ std_init_compound(ctrl, idx, ptr);
+ break;
+ }
+}
+
+static void std_log(const struct v4l2_ctrl *ctrl)
+{
+ union v4l2_ctrl_ptr ptr = ctrl->p_cur;
+
+ if (ctrl->is_array) {
+ unsigned i;
+
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ pr_cont("[%u]", ctrl->dims[i]);
+ pr_cont(" ");
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ pr_cont("%d", *ptr.p_s32);
+ break;
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ pr_cont("%s", *ptr.p_s32 ? "true" : "false");
+ break;
+ case V4L2_CTRL_TYPE_MENU:
+ pr_cont("%s", ctrl->qmenu[*ptr.p_s32]);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]);
+ break;
+ case V4L2_CTRL_TYPE_BITMASK:
+ pr_cont("0x%08x", *ptr.p_s32);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ pr_cont("%lld", *ptr.p_s64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ pr_cont("%s", ptr.p_char);
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ pr_cont("%u", (unsigned)*ptr.p_u8);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ pr_cont("%u", (unsigned)*ptr.p_u16);
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ pr_cont("%u", (unsigned)*ptr.p_u32);
+ break;
+ case V4L2_CTRL_TYPE_H264_SPS:
+ pr_cont("H264_SPS");
+ break;
+ case V4L2_CTRL_TYPE_H264_PPS:
+ pr_cont("H264_PPS");
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ pr_cont("H264_SCALING_MATRIX");
+ break;
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ pr_cont("H264_SLICE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ pr_cont("H264_DECODE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ pr_cont("H264_PRED_WEIGHTS");
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ pr_cont("FWHT_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ pr_cont("VP8_FRAME");
+ break;
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ pr_cont("HDR10_CLL_INFO");
+ break;
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ pr_cont("HDR10_MASTERING_DISPLAY");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ pr_cont("MPEG2_QUANTISATION");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ pr_cont("MPEG2_SEQUENCE");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ pr_cont("MPEG2_PICTURE");
+ break;
+ default:
+ pr_cont("unknown type %d", ctrl->type);
+ break;
+ }
+}
+
+/*
+ * Round towards the closest legal value. Be careful when we are
+ * close to the maximum range of the control type to prevent
+ * wrap-arounds.
+ */
+#define ROUND_TO_RANGE(val, offset_type, ctrl) \
+({ \
+ offset_type offset; \
+ if ((ctrl)->maximum >= 0 && \
+ val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \
+ val = (ctrl)->maximum; \
+ else \
+ val += (s32)((ctrl)->step / 2); \
+ val = clamp_t(typeof(val), val, \
+ (ctrl)->minimum, (ctrl)->maximum); \
+ offset = (val) - (ctrl)->minimum; \
+ offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \
+ val = (ctrl)->minimum + offset; \
+ 0; \
+})
+
+/* Validate a new control */
+
+#define zero_padding(s) \
+ memset(&(s).padding, 0, sizeof((s).padding))
+#define zero_reserved(s) \
+ memset(&(s).reserved, 0, sizeof((s).reserved))
+
+/*
+ * Compound controls validation requires setting unused fields/flags to zero
+ * in order to properly detect unchanged controls with std_equal's memcmp.
+ */
+static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_vp8_frame *p_vp8_frame;
+ struct v4l2_ctrl_fwht_params *p_fwht_params;
+ struct v4l2_ctrl_h264_sps *p_h264_sps;
+ struct v4l2_ctrl_h264_pps *p_h264_pps;
+ struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
+ struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
+ struct v4l2_ctrl_h264_decode_params *p_h264_dec_params;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
+ struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
+ struct v4l2_area *area;
+ void *p = ptr.p + idx * ctrl->elem_size;
+ unsigned int i;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ p_mpeg2_sequence = p;
+
+ switch (p_mpeg2_sequence->chroma_format) {
+ case 1: /* 4:2:0 */
+ case 2: /* 4:2:2 */
+ case 3: /* 4:4:4 */
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ p_mpeg2_picture = p;
+
+ switch (p_mpeg2_picture->intra_dc_precision) {
+ case 0: /* 8 bits */
+ case 1: /* 9 bits */
+ case 2: /* 10 bits */
+ case 3: /* 11 bits */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_picture->picture_structure) {
+ case V4L2_MPEG2_PIC_TOP_FIELD:
+ case V4L2_MPEG2_PIC_BOTTOM_FIELD:
+ case V4L2_MPEG2_PIC_FRAME:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_picture->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_I:
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ break;
+ default:
+ return -EINVAL;
+ }
+ zero_reserved(*p_mpeg2_picture);
+ break;
+
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ break;
+
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ p_fwht_params = p;
+ if (p_fwht_params->version < V4L2_FWHT_VERSION)
+ return -EINVAL;
+ if (!p_fwht_params->width || !p_fwht_params->height)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SPS:
+ p_h264_sps = p;
+
+ /* Some syntax elements are only conditionally valid */
+ if (p_h264_sps->pic_order_cnt_type != 0) {
+ p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0;
+ } else if (p_h264_sps->pic_order_cnt_type != 1) {
+ p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0;
+ p_h264_sps->offset_for_non_ref_pic = 0;
+ p_h264_sps->offset_for_top_to_bottom_field = 0;
+ memset(&p_h264_sps->offset_for_ref_frame, 0,
+ sizeof(p_h264_sps->offset_for_ref_frame));
+ }
+
+ if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) {
+ p_h264_sps->chroma_format_idc = 1;
+ p_h264_sps->bit_depth_luma_minus8 = 0;
+ p_h264_sps->bit_depth_chroma_minus8 = 0;
+
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
+
+ if (p_h264_sps->chroma_format_idc < 3)
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
+ }
+
+ if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
+
+ /*
+ * Chroma 4:2:2 format require at least High 4:2:2 profile.
+ *
+ * The H264 specification and well-known parser implementations
+ * use profile-idc values directly, as that is clearer and
+ * less ambiguous. We do the same here.
+ */
+ if (p_h264_sps->profile_idc < 122 &&
+ p_h264_sps->chroma_format_idc > 1)
+ return -EINVAL;
+ /* Chroma 4:4:4 format require at least High 4:2:2 profile */
+ if (p_h264_sps->profile_idc < 244 &&
+ p_h264_sps->chroma_format_idc > 2)
+ return -EINVAL;
+ if (p_h264_sps->chroma_format_idc > 3)
+ return -EINVAL;
+
+ if (p_h264_sps->bit_depth_luma_minus8 > 6)
+ return -EINVAL;
+ if (p_h264_sps->bit_depth_chroma_minus8 > 6)
+ return -EINVAL;
+ if (p_h264_sps->log2_max_frame_num_minus4 > 12)
+ return -EINVAL;
+ if (p_h264_sps->pic_order_cnt_type > 2)
+ return -EINVAL;
+ if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12)
+ return -EINVAL;
+ if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_PPS:
+ p_h264_pps = p;
+
+ if (p_h264_pps->num_slice_groups_minus1 > 7)
+ return -EINVAL;
+ if (p_h264_pps->num_ref_idx_l0_default_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_pps->num_ref_idx_l1_default_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_pps->weighted_bipred_idc > 2)
+ return -EINVAL;
+ /*
+ * pic_init_qp_minus26 shall be in the range of
+ * -(26 + QpBdOffset_y) to +25, inclusive,
+ * where QpBdOffset_y is 6 * bit_depth_luma_minus8
+ */
+ if (p_h264_pps->pic_init_qp_minus26 < -62 ||
+ p_h264_pps->pic_init_qp_minus26 > 25)
+ return -EINVAL;
+ if (p_h264_pps->pic_init_qs_minus26 < -26 ||
+ p_h264_pps->pic_init_qs_minus26 > 25)
+ return -EINVAL;
+ if (p_h264_pps->chroma_qp_index_offset < -12 ||
+ p_h264_pps->chroma_qp_index_offset > 12)
+ return -EINVAL;
+ if (p_h264_pps->second_chroma_qp_index_offset < -12 ||
+ p_h264_pps->second_chroma_qp_index_offset > 12)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ break;
+
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ p_h264_pred_weights = p;
+
+ if (p_h264_pred_weights->luma_log2_weight_denom > 7)
+ return -EINVAL;
+ if (p_h264_pred_weights->chroma_log2_weight_denom > 7)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ p_h264_slice_params = p;
+
+ if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
+ p_h264_slice_params->flags &=
+ ~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED;
+
+ if (p_h264_slice_params->colour_plane_id > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->cabac_init_idc > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->disable_deblocking_filter_idc > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 ||
+ p_h264_slice_params->slice_alpha_c0_offset_div2 > 6)
+ return -EINVAL;
+ if (p_h264_slice_params->slice_beta_offset_div2 < -6 ||
+ p_h264_slice_params->slice_beta_offset_div2 > 6)
+ return -EINVAL;
+
+ if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I ||
+ p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI)
+ p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0;
+ if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
+ p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0;
+
+ if (p_h264_slice_params->num_ref_idx_l0_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_slice_params->num_ref_idx_l1_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ zero_reserved(*p_h264_slice_params);
+ break;
+
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ p_h264_dec_params = p;
+
+ if (p_h264_dec_params->nal_ref_idc > 3)
+ return -EINVAL;
+ for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
+ struct v4l2_h264_dpb_entry *dpb_entry =
+ &p_h264_dec_params->dpb[i];
+
+ zero_reserved(*dpb_entry);
+ }
+ zero_reserved(*p_h264_dec_params);
+ break;
+
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ p_vp8_frame = p;
+
+ switch (p_vp8_frame->num_dct_parts) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ zero_padding(p_vp8_frame->segment);
+ zero_padding(p_vp8_frame->lf);
+ zero_padding(p_vp8_frame->quant);
+ zero_padding(p_vp8_frame->entropy);
+ zero_padding(p_vp8_frame->coder_state);
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ p_hevc_sps = p;
+
+ if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
+ p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
+ p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
+ p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+ p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
+ }
+
+ if (!(p_hevc_sps->flags &
+ V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
+ p_hevc_sps->num_long_term_ref_pics_sps = 0;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ p_hevc_pps = p;
+
+ if (!(p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
+ p_hevc_pps->diff_cu_qp_delta_depth = 0;
+
+ if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
+ p_hevc_pps->num_tile_columns_minus1 = 0;
+ p_hevc_pps->num_tile_rows_minus1 = 0;
+ memset(&p_hevc_pps->column_width_minus1, 0,
+ sizeof(p_hevc_pps->column_width_minus1));
+ memset(&p_hevc_pps->row_height_minus1, 0,
+ sizeof(p_hevc_pps->row_height_minus1));
+
+ p_hevc_pps->flags &=
+ ~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED;
+ }
+
+ if (p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
+ p_hevc_pps->pps_beta_offset_div2 = 0;
+ p_hevc_pps->pps_tc_offset_div2 = 0;
+ }
+
+ zero_padding(*p_hevc_pps);
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ p_hevc_decode_params = p;
+
+ if (p_hevc_decode_params->num_active_dpb_entries >
+ V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < p_hevc_decode_params->num_active_dpb_entries;
+ i++) {
+ struct v4l2_hevc_dpb_entry *dpb_entry =
+ &p_hevc_decode_params->dpb[i];
+
+ zero_padding(*dpb_entry);
+ }
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ p_hevc_slice_params = p;
+
+ zero_padding(p_hevc_slice_params->pred_weight_table);
+ zero_padding(*p_hevc_slice_params);
+ break;
+
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ break;
+
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ p_hdr10_mastering = p;
+
+ for (i = 0; i < 3; ++i) {
+ if (p_hdr10_mastering->display_primaries_x[i] <
+ V4L2_HDR10_MASTERING_PRIMARIES_X_LOW ||
+ p_hdr10_mastering->display_primaries_x[i] >
+ V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH ||
+ p_hdr10_mastering->display_primaries_y[i] <
+ V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW ||
+ p_hdr10_mastering->display_primaries_y[i] >
+ V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH)
+ return -EINVAL;
+ }
+
+ if (p_hdr10_mastering->white_point_x <
+ V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW ||
+ p_hdr10_mastering->white_point_x >
+ V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH ||
+ p_hdr10_mastering->white_point_y <
+ V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW ||
+ p_hdr10_mastering->white_point_y >
+ V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH)
+ return -EINVAL;
+
+ if (p_hdr10_mastering->max_display_mastering_luminance <
+ V4L2_HDR10_MASTERING_MAX_LUMA_LOW ||
+ p_hdr10_mastering->max_display_mastering_luminance >
+ V4L2_HDR10_MASTERING_MAX_LUMA_HIGH ||
+ p_hdr10_mastering->min_display_mastering_luminance <
+ V4L2_HDR10_MASTERING_MIN_LUMA_LOW ||
+ p_hdr10_mastering->min_display_mastering_luminance >
+ V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
+ return -EINVAL;
+
+ /* The following restriction comes from ITU-T Rec. H.265 spec */
+ if (p_hdr10_mastering->max_display_mastering_luminance ==
+ V4L2_HDR10_MASTERING_MAX_LUMA_LOW &&
+ p_hdr10_mastering->min_display_mastering_luminance ==
+ V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
+ return -EINVAL;
+
+ break;
+
+ case V4L2_CTRL_TYPE_AREA:
+ area = p;
+ if (!area->width || !area->height)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ size_t len;
+ u64 offset;
+ s64 val;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
+ case V4L2_CTRL_TYPE_INTEGER64:
+ /*
+ * We can't use the ROUND_TO_RANGE define here due to
+ * the u64 divide that needs special care.
+ */
+ val = ptr.p_s64[idx];
+ if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2))
+ val = ctrl->maximum;
+ else
+ val += (s64)(ctrl->step / 2);
+ val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum);
+ offset = val - ctrl->minimum;
+ do_div(offset, ctrl->step);
+ ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step;
+ return 0;
+ case V4L2_CTRL_TYPE_U8:
+ return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl);
+ case V4L2_CTRL_TYPE_U16:
+ return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl);
+ case V4L2_CTRL_TYPE_U32:
+ return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl);
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ptr.p_s32[idx] = !!ptr.p_s32[idx];
+ return 0;
+
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
+ return -ERANGE;
+ if (ptr.p_s32[idx] < BITS_PER_LONG_LONG &&
+ (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx])))
+ return -EINVAL;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
+ ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
+ return -EINVAL;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BITMASK:
+ ptr.p_s32[idx] &= ctrl->maximum;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ ptr.p_s32[idx] = 0;
+ return 0;
+
+ case V4L2_CTRL_TYPE_STRING:
+ idx *= ctrl->elem_size;
+ len = strlen(ptr.p_char + idx);
+ if (len < ctrl->minimum)
+ return -ERANGE;
+ if ((len - (u32)ctrl->minimum) % (u32)ctrl->step)
+ return -ERANGE;
+ return 0;
+
+ default:
+ return std_validate_compound(ctrl, idx, ptr);
+ }
+}
+
+static const struct v4l2_ctrl_type_ops std_type_ops = {
+ .equal = std_equal,
+ .init = std_init,
+ .log = std_log,
+ .validate = std_validate,
+};
+
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
+{
+ if (!ctrl)
+ return;
+ if (!notify) {
+ ctrl->call_notify = 0;
+ return;
+ }
+ if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
+ return;
+ ctrl->handler->notify = notify;
+ ctrl->handler->notify_priv = priv;
+ ctrl->call_notify = 1;
+}
+EXPORT_SYMBOL(v4l2_ctrl_notify);
+
+/* Copy the one value to another. */
+static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to)
+{
+ if (ctrl == NULL)
+ return;
+ memcpy(to.p, from.p_const, ctrl->elems * ctrl->elem_size);
+}
+
+/* Copy the new value to the current value. */
+void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
+{
+ bool changed;
+
+ if (ctrl == NULL)
+ return;
+
+ /* has_changed is set by cluster_changed */
+ changed = ctrl->has_changed;
+ if (changed)
+ ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur);
+
+ if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
+ /* Note: CH_FLAGS is only set for auto clusters. */
+ ctrl->flags &=
+ ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
+ if (!is_cur_manual(ctrl->cluster[0])) {
+ ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ if (ctrl->cluster[0]->has_volatiles)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+ fh = NULL;
+ }
+ if (changed || ch_flags) {
+ /* If a control was changed that was not one of the controls
+ modified by the application, then send the event to all. */
+ if (!ctrl->is_new)
+ fh = NULL;
+ send_event(fh, ctrl,
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
+ if (ctrl->call_notify && changed && ctrl->handler->notify)
+ ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
+ }
+}
+
+/* Copy the current value to the new value */
+void cur_to_new(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl == NULL)
+ return;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new);
+}
+
+/* Copy the new value to the request value */
+void new_to_req(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
+ ref->valid_p_req = true;
+}
+
+/* Copy the current value to the request value */
+void cur_to_req(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
+ ref->valid_p_req = true;
+}
+
+/* Copy the request value to the new value */
+void req_to_new(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ if (ref->valid_p_req)
+ ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
+ else
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
+}
+
+/* Control range checking */
+int check_range(enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def)
+{
+ switch (type) {
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (step != 1 || max > 1 || min < 0)
+ return -ERANGE;
+ fallthrough;
+ case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ if (step == 0 || min > max || def < min || def > max)
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (step || min || !max || (def & ~max))
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (min > max || def < min || def > max)
+ return -ERANGE;
+ /* Note: step == menu_skip_mask for menu controls.
+ So here we check if the default value is masked out. */
+ if (step && ((1 << def) & step))
+ return -EINVAL;
+ return 0;
+ case V4L2_CTRL_TYPE_STRING:
+ if (min > max || min < 0 || step < 1 || def)
+ return -ERANGE;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+/* Validate a new control */
+int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
+{
+ unsigned idx;
+ int err = 0;
+
+ for (idx = 0; !err && idx < ctrl->elems; idx++)
+ err = ctrl->type_ops->validate(ctrl, idx, p_new);
+ return err;
+}
+
+/* Set the handler's error code if it wasn't set earlier already */
+static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
+{
+ if (hdl->error == 0)
+ hdl->error = err;
+ return err;
+}
+
+/* Initialize the handler */
+int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
+ unsigned nr_of_controls_hint,
+ struct lock_class_key *key, const char *name)
+{
+ mutex_init(&hdl->_lock);
+ hdl->lock = &hdl->_lock;
+ lockdep_set_class_and_name(hdl->lock, key, name);
+ INIT_LIST_HEAD(&hdl->ctrls);
+ INIT_LIST_HEAD(&hdl->ctrl_refs);
+ hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
+ hdl->buckets = kvmalloc_array(hdl->nr_of_buckets,
+ sizeof(hdl->buckets[0]),
+ GFP_KERNEL | __GFP_ZERO);
+ hdl->error = hdl->buckets ? 0 : -ENOMEM;
+ v4l2_ctrl_handler_init_request(hdl);
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
+
+/* Free all controls and control refs */
+void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl_ref *ref, *next_ref;
+ struct v4l2_ctrl *ctrl, *next_ctrl;
+ struct v4l2_subscribed_event *sev, *next_sev;
+
+ if (hdl == NULL || hdl->buckets == NULL)
+ return;
+
+ v4l2_ctrl_handler_free_request(hdl);
+
+ mutex_lock(hdl->lock);
+ /* Free all nodes */
+ list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+ /* Free all controls owned by the handler */
+ list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
+ list_del(&ctrl->node);
+ list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
+ list_del(&sev->node);
+ kvfree(ctrl);
+ }
+ kvfree(hdl->buckets);
+ hdl->buckets = NULL;
+ hdl->cached = NULL;
+ hdl->error = 0;
+ mutex_unlock(hdl->lock);
+ mutex_destroy(&hdl->_lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_free);
+
+/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
+ be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
+ with applications that do not use the NEXT_CTRL flag.
+
+ We just find the n-th private user control. It's O(N), but that should not
+ be an issue in this particular case. */
+static struct v4l2_ctrl_ref *find_private_ref(
+ struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+
+ id -= V4L2_CID_PRIVATE_BASE;
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ /* Search for private user controls that are compatible with
+ VIDIOC_G/S_CTRL. */
+ if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
+ V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
+ if (!ref->ctrl->is_int)
+ continue;
+ if (id == 0)
+ return ref;
+ id--;
+ }
+ }
+ return NULL;
+}
+
+/* Find a control with the given ID. */
+struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+ int bucket;
+
+ id &= V4L2_CTRL_ID_MASK;
+
+ /* Old-style private controls need special handling */
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ return find_private_ref(hdl, id);
+ bucket = id % hdl->nr_of_buckets;
+
+ /* Simple optimization: cache the last control found */
+ if (hdl->cached && hdl->cached->ctrl->id == id)
+ return hdl->cached;
+
+ /* Not in cache, search the hash */
+ ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
+ while (ref && ref->ctrl->id != id)
+ ref = ref->next;
+
+ if (ref)
+ hdl->cached = ref; /* cache it! */
+ return ref;
+}
+
+/* Find a control with the given ID. Take the handler's lock first. */
+struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = NULL;
+
+ if (hdl) {
+ mutex_lock(hdl->lock);
+ ref = find_ref(hdl, id);
+ mutex_unlock(hdl->lock);
+ }
+ return ref;
+}
+
+/* Find a control with the given ID. */
+struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return ref ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_find);
+
+/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
+int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req)
+{
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl_ref *new_ref;
+ u32 id = ctrl->id;
+ u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1;
+ int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
+ unsigned int size_extra_req = 0;
+
+ if (ctrl_ref)
+ *ctrl_ref = NULL;
+
+ /*
+ * Automatically add the control class if it is not yet present and
+ * the new control is not a compound control.
+ */
+ if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES &&
+ id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
+ if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
+ return hdl->error;
+
+ if (hdl->error)
+ return hdl->error;
+
+ if (allocate_req)
+ size_extra_req = ctrl->elems * ctrl->elem_size;
+ new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
+ if (!new_ref)
+ return handler_set_err(hdl, -ENOMEM);
+ new_ref->ctrl = ctrl;
+ new_ref->from_other_dev = from_other_dev;
+ if (size_extra_req)
+ new_ref->p_req.p = &new_ref[1];
+
+ INIT_LIST_HEAD(&new_ref->node);
+
+ mutex_lock(hdl->lock);
+
+ /* Add immediately at the end of the list if the list is empty, or if
+ the last element in the list has a lower ID.
+ This ensures that when elements are added in ascending order the
+ insertion is an O(1) operation. */
+ if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
+ list_add_tail(&new_ref->node, &hdl->ctrl_refs);
+ goto insert_in_hash;
+ }
+
+ /* Find insert position in sorted list */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ if (ref->ctrl->id < id)
+ continue;
+ /* Don't add duplicates */
+ if (ref->ctrl->id == id) {
+ kfree(new_ref);
+ goto unlock;
+ }
+ list_add(&new_ref->node, ref->node.prev);
+ break;
+ }
+
+insert_in_hash:
+ /* Insert the control node in the hash */
+ new_ref->next = hdl->buckets[bucket];
+ hdl->buckets[bucket] = new_ref;
+ if (ctrl_ref)
+ *ctrl_ref = new_ref;
+ if (ctrl->handler == hdl) {
+ /* By default each control starts in a cluster of its own.
+ * new_ref->ctrl is basically a cluster array with one
+ * element, so that's perfect to use as the cluster pointer.
+ * But only do this for the handler that owns the control.
+ */
+ ctrl->cluster = &new_ref->ctrl;
+ ctrl->ncontrols = 1;
+ }
+
+unlock:
+ mutex_unlock(hdl->lock);
+ return 0;
+}
+
+/* Add a new control */
+static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ const struct v4l2_ctrl_type_ops *type_ops,
+ u32 id, const char *name, enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def,
+ const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
+ u32 flags, const char * const *qmenu,
+ const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
+ void *priv)
+{
+ struct v4l2_ctrl *ctrl;
+ unsigned sz_extra;
+ unsigned nr_of_dims = 0;
+ unsigned elems = 1;
+ bool is_array;
+ unsigned tot_ctrl_size;
+ unsigned idx;
+ void *data;
+ int err;
+
+ if (hdl->error)
+ return NULL;
+
+ while (dims && dims[nr_of_dims]) {
+ elems *= dims[nr_of_dims];
+ nr_of_dims++;
+ if (nr_of_dims == V4L2_CTRL_MAX_DIMS)
+ break;
+ }
+ is_array = nr_of_dims > 0;
+
+ /* Prefill elem_size for all types handled by std_type_ops */
+ switch ((u32)type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ elem_size = sizeof(s64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ elem_size = max + 1;
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ elem_size = sizeof(u8);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ elem_size = sizeof(u16);
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ elem_size = sizeof(u32);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_sequence);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_picture);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantisation);
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_fwht_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_sps);
+ break;
+ case V4L2_CTRL_TYPE_H264_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_pps);
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix);
+ break;
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_decode_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights);
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_vp8_frame);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params);
+ break;
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info);
+ break;
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display);
+ break;
+ case V4L2_CTRL_TYPE_AREA:
+ elem_size = sizeof(struct v4l2_area);
+ break;
+ default:
+ if (type < V4L2_CTRL_COMPOUND_TYPES)
+ elem_size = sizeof(s32);
+ break;
+ }
+ tot_ctrl_size = elem_size * elems;
+
+ /* Sanity checks */
+ if (id == 0 || name == NULL || !elem_size ||
+ id >= V4L2_CID_PRIVATE_BASE ||
+ (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
+ err = check_range(type, min, max, step, def);
+ if (err) {
+ handler_set_err(hdl, err);
+ return NULL;
+ }
+ if (is_array &&
+ (type == V4L2_CTRL_TYPE_BUTTON ||
+ type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ sz_extra = 0;
+ if (type == V4L2_CTRL_TYPE_BUTTON)
+ flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ else if (type == V4L2_CTRL_TYPE_INTEGER64 ||
+ type == V4L2_CTRL_TYPE_STRING ||
+ type >= V4L2_CTRL_COMPOUND_TYPES ||
+ is_array)
+ sz_extra += 2 * tot_ctrl_size;
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
+ sz_extra += elem_size;
+
+ ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
+ if (ctrl == NULL) {
+ handler_set_err(hdl, -ENOMEM);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ctrl->node);
+ INIT_LIST_HEAD(&ctrl->ev_subs);
+ ctrl->handler = hdl;
+ ctrl->ops = ops;
+ ctrl->type_ops = type_ops ? type_ops : &std_type_ops;
+ ctrl->id = id;
+ ctrl->name = name;
+ ctrl->type = type;
+ ctrl->flags = flags;
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING;
+ ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string;
+ ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64;
+ ctrl->is_array = is_array;
+ ctrl->elems = elems;
+ ctrl->nr_of_dims = nr_of_dims;
+ if (nr_of_dims)
+ memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0]));
+ ctrl->elem_size = elem_size;
+ if (type == V4L2_CTRL_TYPE_MENU)
+ ctrl->qmenu = qmenu;
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ctrl->qmenu_int = qmenu_int;
+ ctrl->priv = priv;
+ ctrl->cur.val = ctrl->val = def;
+ data = &ctrl[1];
+
+ if (!ctrl->is_int) {
+ ctrl->p_new.p = data;
+ ctrl->p_cur.p = data + tot_ctrl_size;
+ } else {
+ ctrl->p_new.p = &ctrl->val;
+ ctrl->p_cur.p = &ctrl->cur.val;
+ }
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
+ ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
+ memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
+ }
+
+ for (idx = 0; idx < elems; idx++) {
+ ctrl->type_ops->init(ctrl, idx, ctrl->p_cur);
+ ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
+ }
+
+ if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
+ kvfree(ctrl);
+ return NULL;
+ }
+ mutex_lock(hdl->lock);
+ list_add_tail(&ctrl->node, &hdl->ctrls);
+ mutex_unlock(hdl->lock);
+ return ctrl;
+}
+
+struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_config *cfg, void *priv)
+{
+ bool is_menu;
+ struct v4l2_ctrl *ctrl;
+ const char *name = cfg->name;
+ const char * const *qmenu = cfg->qmenu;
+ const s64 *qmenu_int = cfg->qmenu_int;
+ enum v4l2_ctrl_type type = cfg->type;
+ u32 flags = cfg->flags;
+ s64 min = cfg->min;
+ s64 max = cfg->max;
+ u64 step = cfg->step;
+ s64 def = cfg->def;
+
+ if (name == NULL)
+ v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
+ &def, &flags);
+
+ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ if (is_menu)
+ WARN_ON(step);
+ else
+ WARN_ON(cfg->menu_skip_mask);
+ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
+ qmenu = v4l2_ctrl_get_menu(cfg->id);
+ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name,
+ type, min, max,
+ is_menu ? cfg->menu_skip_mask : step, def,
+ cfg->dims, cfg->elem_size,
+ flags, qmenu, qmenu_int, cfg->p_def, priv);
+ if (ctrl)
+ ctrl->is_private = cfg->is_private;
+ return ctrl;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_custom);
+
+/* Helper function for standard non-menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s64 min, s64 max, u64 step, s64 def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU ||
+ type >= V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std);
+
+/* Helper function for standard menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, u8 _max, u64 mask, u8 _def)
+{
+ const char * const *qmenu = NULL;
+ const s64 *qmenu_int = NULL;
+ unsigned int qmenu_int_len = 0;
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s64 min;
+ s64 max = _max;
+ s64 def = _def;
+ u64 step;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+
+ if (type == V4L2_CTRL_TYPE_MENU)
+ qmenu = v4l2_ctrl_get_menu(id);
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
+
+ if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, mask, def, NULL, 0,
+ flags, qmenu, qmenu_int, ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
+
+/* Helper function for standard menu controls with driver defined menu */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id, u8 _max,
+ u64 mask, u8 _def, const char * const *qmenu)
+{
+ enum v4l2_ctrl_type type;
+ const char *name;
+ u32 flags;
+ u64 step;
+ s64 min;
+ s64 max = _max;
+ s64 def = _def;
+
+ /* v4l2_ctrl_new_std_menu_items() should only be called for
+ * standard controls without a standard menu.
+ */
+ if (v4l2_ctrl_get_menu(id)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, mask, def, NULL, 0,
+ flags, qmenu, NULL, ptr_null, NULL);
+
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
+
+/* Helper function for standard compound controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+ s64 min, max, step, def;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type < V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, p_def, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
+
+/* Helper function for standard integer menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, u8 _max, u8 _def, const s64 *qmenu_int)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s64 min;
+ u64 step;
+ s64 max = _max;
+ s64 def = _def;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, 0, def, NULL, 0,
+ flags, NULL, qmenu_int, ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
+
+/* Add the controls from another handler to our own. */
+int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *add,
+ bool (*filter)(const struct v4l2_ctrl *ctrl),
+ bool from_other_dev)
+{
+ struct v4l2_ctrl_ref *ref;
+ int ret = 0;
+
+ /* Do nothing if either handler is NULL or if they are the same */
+ if (!hdl || !add || hdl == add)
+ return 0;
+ if (hdl->error)
+ return hdl->error;
+ mutex_lock(add->lock);
+ list_for_each_entry(ref, &add->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+
+ /* Skip handler-private controls. */
+ if (ctrl->is_private)
+ continue;
+ /* And control classes */
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ continue;
+ /* Filter any unwanted controls */
+ if (filter && !filter(ctrl))
+ continue;
+ ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false);
+ if (ret)
+ break;
+ }
+ mutex_unlock(add->lock);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_handler);
+
+bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
+{
+ if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
+ return true;
+ if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
+ return true;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+EXPORT_SYMBOL(v4l2_ctrl_radio_filter);
+
+/* Cluster controls */
+void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
+{
+ bool has_volatiles = false;
+ int i;
+
+ /* The first control is the master control and it must not be NULL */
+ if (WARN_ON(ncontrols == 0 || controls[0] == NULL))
+ return;
+
+ for (i = 0; i < ncontrols; i++) {
+ if (controls[i]) {
+ controls[i]->cluster = controls;
+ controls[i]->ncontrols = ncontrols;
+ if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
+ has_volatiles = true;
+ }
+ }
+ controls[0]->has_volatiles = has_volatiles;
+}
+EXPORT_SYMBOL(v4l2_ctrl_cluster);
+
+void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
+ u8 manual_val, bool set_volatile)
+{
+ struct v4l2_ctrl *master = controls[0];
+ u32 flag = 0;
+ int i;
+
+ v4l2_ctrl_cluster(ncontrols, controls);
+ WARN_ON(ncontrols <= 1);
+ WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
+ WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
+ master->is_auto = true;
+ master->has_volatiles = set_volatile;
+ master->manual_mode_value = manual_val;
+ master->flags |= V4L2_CTRL_FLAG_UPDATE;
+
+ if (!is_cur_manual(master))
+ flag = V4L2_CTRL_FLAG_INACTIVE |
+ (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
+
+ for (i = 1; i < ncontrols; i++)
+ if (controls[i])
+ controls[i]->flags |= flag;
+}
+EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
+
+/*
+ * Obtain the current volatile values of an autocluster and mark them
+ * as new.
+ */
+void update_from_auto_cluster(struct v4l2_ctrl *master)
+{
+ int i;
+
+ for (i = 1; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ if (!call_op(master, g_volatile_ctrl))
+ for (i = 1; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 1;
+}
+
+/*
+ * Return non-zero if one or more of the controls in the cluster has a new
+ * value that differs from the current value.
+ */
+static int cluster_changed(struct v4l2_ctrl *master)
+{
+ bool changed = false;
+ unsigned int idx;
+ int i;
+
+ for (i = 0; i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+ bool ctrl_changed = false;
+
+ if (!ctrl)
+ continue;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE) {
+ changed = true;
+ ctrl_changed = true;
+ }
+
+ /*
+ * Set has_changed to false to avoid generating
+ * the event V4L2_EVENT_CTRL_CH_VALUE
+ */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ ctrl->has_changed = false;
+ continue;
+ }
+
+ for (idx = 0; !ctrl_changed && idx < ctrl->elems; idx++)
+ ctrl_changed = !ctrl->type_ops->equal(ctrl, idx,
+ ctrl->p_cur, ctrl->p_new);
+ ctrl->has_changed = ctrl_changed;
+ changed |= ctrl->has_changed;
+ }
+ return changed;
+}
+
+/*
+ * Core function that calls try/s_ctrl and ensures that the new value is
+ * copied to the current value on a set.
+ * Must be called with ctrl->handler->lock held.
+ */
+int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags)
+{
+ bool update_flag;
+ int ret;
+ int i;
+
+ /*
+ * Go through the cluster and either validate the new value or
+ * (if no new value was set), copy the current value to the new
+ * value, ensuring a consistent view for the control ops when
+ * called.
+ */
+ for (i = 0; i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+
+ if (!ctrl)
+ continue;
+
+ if (!ctrl->is_new) {
+ cur_to_new(ctrl);
+ continue;
+ }
+ /*
+ * Check again: it may have changed since the
+ * previous check in try_or_set_ext_ctrls().
+ */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
+ }
+
+ ret = call_op(master, try_ctrl);
+
+ /* Don't set if there is no change */
+ if (ret || !set || !cluster_changed(master))
+ return ret;
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ return ret;
+
+ /* If OK, then make the new values permanent. */
+ update_flag = is_cur_manual(master) != is_new_manual(master);
+
+ for (i = 0; i < master->ncontrols; i++) {
+ /*
+ * If we switch from auto to manual mode, and this cluster
+ * contains volatile controls, then all non-master controls
+ * have to be marked as changed. The 'new' value contains
+ * the volatile value (obtained by update_from_auto_cluster),
+ * which now has to become the current value.
+ */
+ if (i && update_flag && is_new_manual(master) &&
+ master->has_volatiles && master->cluster[i])
+ master->cluster[i]->has_changed = true;
+
+ new_to_cur(fh, master->cluster[i], ch_flags |
+ ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ }
+ return 0;
+}
+
+/* Activate/deactivate a control. */
+void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
+{
+ /* invert since the actual flag is called 'inactive' */
+ bool inactive = !active;
+ bool old;
+
+ if (ctrl == NULL)
+ return;
+
+ if (inactive)
+ /* set V4L2_CTRL_FLAG_INACTIVE */
+ old = test_and_set_bit(4, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_INACTIVE */
+ old = test_and_clear_bit(4, &ctrl->flags);
+ if (old != inactive)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+}
+EXPORT_SYMBOL(v4l2_ctrl_activate);
+
+void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+{
+ bool old;
+
+ if (ctrl == NULL)
+ return;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ if (grabbed)
+ /* set V4L2_CTRL_FLAG_GRABBED */
+ old = test_and_set_bit(1, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_GRABBED */
+ old = test_and_clear_bit(1, &ctrl->flags);
+ if (old != grabbed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_grab);
+
+/* Call s_ctrl for all controls owned by the handler */
+int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (hdl == NULL)
+ return 0;
+
+ lockdep_assert_held(hdl->lock);
+
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ ctrl->done = false;
+
+ list_for_each_entry(ctrl, &hdl->ctrls, node) {
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int i;
+
+ /* Skip if this control was already handled by a cluster. */
+ /* Skip button controls and read-only controls. */
+ if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ cur_to_new(master->cluster[i]);
+ master->cluster[i]->is_new = 1;
+ master->cluster[i]->done = true;
+ }
+ }
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup);
+
+int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ int ret;
+
+ if (hdl == NULL)
+ return 0;
+
+ mutex_lock(hdl->lock);
+ ret = __v4l2_ctrl_handler_setup(hdl);
+ mutex_unlock(hdl->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
+
+/* Log the control name and value */
+static void log_ctrl(const struct v4l2_ctrl *ctrl,
+ const char *prefix, const char *colon)
+{
+ if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
+ return;
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ return;
+
+ pr_info("%s%s%s: ", prefix, colon, ctrl->name);
+
+ ctrl->type_ops->log(ctrl);
+
+ if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
+ V4L2_CTRL_FLAG_GRABBED |
+ V4L2_CTRL_FLAG_VOLATILE)) {
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ pr_cont(" inactive");
+ if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
+ pr_cont(" grabbed");
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
+ pr_cont(" volatile");
+ }
+ pr_cont("\n");
+}
+
+/* Log all controls owned by the handler */
+void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
+ const char *prefix)
+{
+ struct v4l2_ctrl *ctrl;
+ const char *colon = "";
+ int len;
+
+ if (!hdl)
+ return;
+ if (!prefix)
+ prefix = "";
+ len = strlen(prefix);
+ if (len && prefix[len - 1] != ' ')
+ colon = ": ";
+ mutex_lock(hdl->lock);
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
+ log_ctrl(ctrl, prefix, colon);
+ mutex_unlock(hdl->lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+
+int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ctrl_ops,
+ const struct v4l2_fwnode_device_properties *p)
+{
+ if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) {
+ u32 orientation_ctrl;
+
+ switch (p->orientation) {
+ case V4L2_FWNODE_ORIENTATION_FRONT:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT;
+ break;
+ case V4L2_FWNODE_ORIENTATION_BACK:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK;
+ break;
+ case V4L2_FWNODE_ORIENTATION_EXTERNAL:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CAMERA_ORIENTATION_EXTERNAL, 0,
+ orientation_ctrl))
+ return hdl->error;
+ }
+
+ if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) {
+ if (!v4l2_ctrl_new_std(hdl, ctrl_ops,
+ V4L2_CID_CAMERA_SENSOR_ROTATION,
+ p->rotation, p->rotation, 1,
+ p->rotation))
+ return hdl->error;
+ }
+
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
new file mode 100644
index 000000000000..b6344bbf1e00
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -0,0 +1,1579 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework control definitions.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#include <linux/export.h>
+#include <media/v4l2-ctrls.h>
+
+/*
+ * Returns NULL or a character pointer array containing the menu for
+ * the given control ID. The pointer array ends with a NULL pointer.
+ * An empty string signifies a menu entry that is invalid. This allows
+ * drivers to disable certain options if it is not supported.
+ */
+const char * const *v4l2_ctrl_get_menu(u32 id)
+{
+ static const char * const mpeg_audio_sampling_freq[] = {
+ "44.1 kHz",
+ "48 kHz",
+ "32 kHz",
+ NULL
+ };
+ static const char * const mpeg_audio_encoding[] = {
+ "MPEG-1/2 Layer I",
+ "MPEG-1/2 Layer II",
+ "MPEG-1/2 Layer III",
+ "MPEG-2/4 AAC",
+ "AC-3",
+ NULL
+ };
+ static const char * const mpeg_audio_l1_bitrate[] = {
+ "32 kbps",
+ "64 kbps",
+ "96 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "288 kbps",
+ "320 kbps",
+ "352 kbps",
+ "384 kbps",
+ "416 kbps",
+ "448 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_l2_bitrate[] = {
+ "32 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_l3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_ac3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ "448 kbps",
+ "512 kbps",
+ "576 kbps",
+ "640 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_mode[] = {
+ "Stereo",
+ "Joint Stereo",
+ "Dual",
+ "Mono",
+ NULL
+ };
+ static const char * const mpeg_audio_mode_extension[] = {
+ "Bound 4",
+ "Bound 8",
+ "Bound 12",
+ "Bound 16",
+ NULL
+ };
+ static const char * const mpeg_audio_emphasis[] = {
+ "No Emphasis",
+ "50/15 us",
+ "CCITT J17",
+ NULL
+ };
+ static const char * const mpeg_audio_crc[] = {
+ "No CRC",
+ "16-bit CRC",
+ NULL
+ };
+ static const char * const mpeg_audio_dec_playback[] = {
+ "Auto",
+ "Stereo",
+ "Left",
+ "Right",
+ "Mono",
+ "Swapped Stereo",
+ NULL
+ };
+ static const char * const mpeg_video_encoding[] = {
+ "MPEG-1",
+ "MPEG-2",
+ "MPEG-4 AVC",
+ NULL
+ };
+ static const char * const mpeg_video_aspect[] = {
+ "1x1",
+ "4x3",
+ "16x9",
+ "2.21x1",
+ NULL
+ };
+ static const char * const mpeg_video_bitrate_mode[] = {
+ "Variable Bitrate",
+ "Constant Bitrate",
+ "Constant Quality",
+ NULL
+ };
+ static const char * const mpeg_stream_type[] = {
+ "MPEG-2 Program Stream",
+ "MPEG-2 Transport Stream",
+ "MPEG-1 System Stream",
+ "MPEG-2 DVD-compatible Stream",
+ "MPEG-1 VCD-compatible Stream",
+ "MPEG-2 SVCD-compatible Stream",
+ NULL
+ };
+ static const char * const mpeg_stream_vbi_fmt[] = {
+ "No VBI",
+ "Private Packet, IVTV Format",
+ NULL
+ };
+ static const char * const camera_power_line_frequency[] = {
+ "Disabled",
+ "50 Hz",
+ "60 Hz",
+ "Auto",
+ NULL
+ };
+ static const char * const camera_exposure_auto[] = {
+ "Auto Mode",
+ "Manual Mode",
+ "Shutter Priority Mode",
+ "Aperture Priority Mode",
+ NULL
+ };
+ static const char * const camera_exposure_metering[] = {
+ "Average",
+ "Center Weighted",
+ "Spot",
+ "Matrix",
+ NULL
+ };
+ static const char * const camera_auto_focus_range[] = {
+ "Auto",
+ "Normal",
+ "Macro",
+ "Infinity",
+ NULL
+ };
+ static const char * const colorfx[] = {
+ "None",
+ "Black & White",
+ "Sepia",
+ "Negative",
+ "Emboss",
+ "Sketch",
+ "Sky Blue",
+ "Grass Green",
+ "Skin Whiten",
+ "Vivid",
+ "Aqua",
+ "Art Freeze",
+ "Silhouette",
+ "Solarization",
+ "Antique",
+ "Set Cb/Cr",
+ NULL
+ };
+ static const char * const auto_n_preset_white_balance[] = {
+ "Manual",
+ "Auto",
+ "Incandescent",
+ "Fluorescent",
+ "Fluorescent H",
+ "Horizon",
+ "Daylight",
+ "Flash",
+ "Cloudy",
+ "Shade",
+ NULL,
+ };
+ static const char * const camera_iso_sensitivity_auto[] = {
+ "Manual",
+ "Auto",
+ NULL
+ };
+ static const char * const scene_mode[] = {
+ "None",
+ "Backlight",
+ "Beach/Snow",
+ "Candle Light",
+ "Dusk/Dawn",
+ "Fall Colors",
+ "Fireworks",
+ "Landscape",
+ "Night",
+ "Party/Indoor",
+ "Portrait",
+ "Sports",
+ "Sunset",
+ "Text",
+ NULL
+ };
+ static const char * const tune_emphasis[] = {
+ "None",
+ "50 Microseconds",
+ "75 Microseconds",
+ NULL,
+ };
+ static const char * const header_mode[] = {
+ "Separate Buffer",
+ "Joined With 1st Frame",
+ NULL,
+ };
+ static const char * const multi_slice[] = {
+ "Single",
+ "Max Macroblocks",
+ "Max Bytes",
+ NULL,
+ };
+ static const char * const entropy_mode[] = {
+ "CAVLC",
+ "CABAC",
+ NULL,
+ };
+ static const char * const mpeg_h264_level[] = {
+ "1",
+ "1b",
+ "1.1",
+ "1.2",
+ "1.3",
+ "2",
+ "2.1",
+ "2.2",
+ "3",
+ "3.1",
+ "3.2",
+ "4",
+ "4.1",
+ "4.2",
+ "5",
+ "5.1",
+ "5.2",
+ "6.0",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+ static const char * const h264_loop_filter[] = {
+ "Enabled",
+ "Disabled",
+ "Disabled at Slice Boundary",
+ NULL,
+ };
+ static const char * const h264_profile[] = {
+ "Baseline",
+ "Constrained Baseline",
+ "Main",
+ "Extended",
+ "High",
+ "High 10",
+ "High 422",
+ "High 444 Predictive",
+ "High 10 Intra",
+ "High 422 Intra",
+ "High 444 Intra",
+ "CAVLC 444 Intra",
+ "Scalable Baseline",
+ "Scalable High",
+ "Scalable High Intra",
+ "Stereo High",
+ "Multiview High",
+ "Constrained High",
+ NULL,
+ };
+ static const char * const vui_sar_idc[] = {
+ "Unspecified",
+ "1:1",
+ "12:11",
+ "10:11",
+ "16:11",
+ "40:33",
+ "24:11",
+ "20:11",
+ "32:11",
+ "80:33",
+ "18:11",
+ "15:11",
+ "64:33",
+ "160:99",
+ "4:3",
+ "3:2",
+ "2:1",
+ "Extended SAR",
+ NULL,
+ };
+ static const char * const h264_fp_arrangement_type[] = {
+ "Checkerboard",
+ "Column",
+ "Row",
+ "Side by Side",
+ "Top Bottom",
+ "Temporal",
+ NULL,
+ };
+ static const char * const h264_fmo_map_type[] = {
+ "Interleaved Slices",
+ "Scattered Slices",
+ "Foreground with Leftover",
+ "Box Out",
+ "Raster Scan",
+ "Wipe Scan",
+ "Explicit",
+ NULL,
+ };
+ static const char * const h264_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const h264_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
+ static const char * const h264_hierarchical_coding_type[] = {
+ "Hier Coding B",
+ "Hier Coding P",
+ NULL,
+ };
+ static const char * const mpeg_mpeg2_level[] = {
+ "Low",
+ "Main",
+ "High 1440",
+ "High",
+ NULL,
+ };
+ static const char * const mpeg2_profile[] = {
+ "Simple",
+ "Main",
+ "SNR Scalable",
+ "Spatially Scalable",
+ "High",
+ NULL,
+ };
+ static const char * const mpeg_mpeg4_level[] = {
+ "0",
+ "0b",
+ "1",
+ "2",
+ "3",
+ "3b",
+ "4",
+ "5",
+ NULL,
+ };
+ static const char * const mpeg4_profile[] = {
+ "Simple",
+ "Advanced Simple",
+ "Core",
+ "Simple Scalable",
+ "Advanced Coding Efficiency",
+ NULL,
+ };
+
+ static const char * const vpx_golden_frame_sel[] = {
+ "Use Previous Frame",
+ "Use Previous Specific Frame",
+ NULL,
+ };
+ static const char * const vp8_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_level[] = {
+ "1",
+ "1.1",
+ "2",
+ "2.1",
+ "3",
+ "3.1",
+ "4",
+ "4.1",
+ "5",
+ "5.1",
+ "5.2",
+ "6",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+
+ static const char * const flash_led_mode[] = {
+ "Off",
+ "Flash",
+ "Torch",
+ NULL,
+ };
+ static const char * const flash_strobe_source[] = {
+ "Software",
+ "External",
+ NULL,
+ };
+
+ static const char * const jpeg_chroma_subsampling[] = {
+ "4:4:4",
+ "4:2:2",
+ "4:2:0",
+ "4:1:1",
+ "4:1:0",
+ "Gray",
+ NULL,
+ };
+ static const char * const dv_tx_mode[] = {
+ "DVI-D",
+ "HDMI",
+ NULL,
+ };
+ static const char * const dv_rgb_range[] = {
+ "Automatic",
+ "RGB Limited Range (16-235)",
+ "RGB Full Range (0-255)",
+ NULL,
+ };
+ static const char * const dv_it_content_type[] = {
+ "Graphics",
+ "Photo",
+ "Cinema",
+ "Game",
+ "No IT Content",
+ NULL,
+ };
+ static const char * const detect_md_mode[] = {
+ "Disabled",
+ "Global",
+ "Threshold Grid",
+ "Region Grid",
+ NULL,
+ };
+
+ static const char * const hevc_profile[] = {
+ "Main",
+ "Main Still Picture",
+ "Main 10",
+ NULL,
+ };
+ static const char * const hevc_level[] = {
+ "1",
+ "2",
+ "2.1",
+ "3",
+ "3.1",
+ "4",
+ "4.1",
+ "5",
+ "5.1",
+ "5.2",
+ "6",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+ static const char * const hevc_hierarchial_coding_type[] = {
+ "B",
+ "P",
+ NULL,
+ };
+ static const char * const hevc_refresh_type[] = {
+ "None",
+ "CRA",
+ "IDR",
+ NULL,
+ };
+ static const char * const hevc_size_of_length_field[] = {
+ "0",
+ "1",
+ "2",
+ "4",
+ NULL,
+ };
+ static const char * const hevc_tier[] = {
+ "Main",
+ "High",
+ NULL,
+ };
+ static const char * const hevc_loop_filter_mode[] = {
+ "Disabled",
+ "Enabled",
+ "Disabled at slice boundary",
+ "NULL",
+ };
+ static const char * const hevc_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const hevc_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
+ static const char * const camera_orientation[] = {
+ "Front",
+ "Back",
+ "External",
+ NULL,
+ };
+ static const char * const mpeg_video_frame_skip[] = {
+ "Disabled",
+ "Level Limit",
+ "VBV/CPB Limit",
+ NULL,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ return mpeg_audio_sampling_freq;
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ return mpeg_audio_encoding;
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ return mpeg_audio_l1_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ return mpeg_audio_l2_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ return mpeg_audio_l3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ return mpeg_audio_ac3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ return mpeg_audio_mode;
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ return mpeg_audio_mode_extension;
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ return mpeg_audio_emphasis;
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ return mpeg_audio_crc;
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
+ return mpeg_audio_dec_playback;
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ return mpeg_video_encoding;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return mpeg_video_aspect;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return mpeg_video_bitrate_mode;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return mpeg_stream_type;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ return mpeg_stream_vbi_fmt;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ return camera_power_line_frequency;
+ case V4L2_CID_EXPOSURE_AUTO:
+ return camera_exposure_auto;
+ case V4L2_CID_EXPOSURE_METERING:
+ return camera_exposure_metering;
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ return camera_auto_focus_range;
+ case V4L2_CID_COLORFX:
+ return colorfx;
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ return auto_n_preset_white_balance;
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ return camera_iso_sensitivity_auto;
+ case V4L2_CID_SCENE_MODE:
+ return scene_mode;
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ return tune_emphasis;
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ return tune_emphasis;
+ case V4L2_CID_FLASH_LED_MODE:
+ return flash_led_mode;
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ return flash_strobe_source;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ return header_mode;
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ return mpeg_video_frame_skip;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ return multi_slice;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return entropy_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return mpeg_h264_level;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ return h264_loop_filter;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return h264_profile;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ return vui_sar_idc;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ return h264_fp_arrangement_type;
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ return h264_fmo_map_type;
+ case V4L2_CID_STATELESS_H264_DECODE_MODE:
+ return h264_decode_mode;
+ case V4L2_CID_STATELESS_H264_START_CODE:
+ return h264_start_code;
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
+ return h264_hierarchical_coding_type;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
+ return mpeg_mpeg2_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
+ return mpeg2_profile;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ return mpeg_mpeg4_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ return mpeg4_profile;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ return vpx_golden_frame_sel;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ return vp8_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ return vp9_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ return vp9_level;
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ return jpeg_chroma_subsampling;
+ case V4L2_CID_DV_TX_MODE:
+ return dv_tx_mode;
+ case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ return dv_rgb_range;
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ return dv_it_content_type;
+ case V4L2_CID_DETECT_MD_MODE:
+ return detect_md_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ return hevc_profile;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ return hevc_level;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
+ return hevc_hierarchial_coding_type;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ return hevc_refresh_type;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
+ return hevc_size_of_length_field;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
+ return hevc_tier;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ return hevc_loop_filter_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ return hevc_decode_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ return hevc_start_code;
+ case V4L2_CID_CAMERA_ORIENTATION:
+ return camera_orientation;
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_menu);
+
+#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); })
+/*
+ * Returns NULL or an s64 type array containing the menu for given
+ * control ID. The total number of the menu items is returned in @len.
+ */
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
+{
+ static const s64 qmenu_int_vpx_num_partitions[] = {
+ 1, 2, 4, 8,
+ };
+
+ static const s64 qmenu_int_vpx_num_ref_frames[] = {
+ 1, 2, 3,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len);
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len);
+ default:
+ *len = 0;
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
+
+/* Return the control name. */
+const char *v4l2_ctrl_get_name(u32 id)
+{
+ switch (id) {
+ /* USER controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_USER_CLASS: return "User Controls";
+ case V4L2_CID_BRIGHTNESS: return "Brightness";
+ case V4L2_CID_CONTRAST: return "Contrast";
+ case V4L2_CID_SATURATION: return "Saturation";
+ case V4L2_CID_HUE: return "Hue";
+ case V4L2_CID_AUDIO_VOLUME: return "Volume";
+ case V4L2_CID_AUDIO_BALANCE: return "Balance";
+ case V4L2_CID_AUDIO_BASS: return "Bass";
+ case V4L2_CID_AUDIO_TREBLE: return "Treble";
+ case V4L2_CID_AUDIO_MUTE: return "Mute";
+ case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
+ case V4L2_CID_BLACK_LEVEL: return "Black Level";
+ case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
+ case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
+ case V4L2_CID_RED_BALANCE: return "Red Balance";
+ case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
+ case V4L2_CID_GAMMA: return "Gamma";
+ case V4L2_CID_EXPOSURE: return "Exposure";
+ case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
+ case V4L2_CID_GAIN: return "Gain";
+ case V4L2_CID_HFLIP: return "Horizontal Flip";
+ case V4L2_CID_VFLIP: return "Vertical Flip";
+ case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
+ case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
+ case V4L2_CID_SHARPNESS: return "Sharpness";
+ case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
+ case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
+ case V4L2_CID_COLOR_KILLER: return "Color Killer";
+ case V4L2_CID_COLORFX: return "Color Effects";
+ case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
+ case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
+ case V4L2_CID_ROTATE: return "Rotate";
+ case V4L2_CID_BG_COLOR: return "Background Color";
+ case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
+ case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
+ case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
+ case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
+ case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
+
+ /*
+ * Codec controls
+ *
+ * The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical.
+ *
+ * Keep the order of the 'case's the same as in videodev2.h!
+ */
+ case V4L2_CID_CODEC_CLASS: return "Codec Controls";
+ case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
+ case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
+ case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
+ case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
+ case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
+ case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
+ case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
+ case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback";
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback";
+ case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
+ case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
+ case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality";
+ case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
+ case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
+ case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
+ case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode";
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay";
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable";
+ case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters";
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs";
+ case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering";
+ case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
+ return "H264 Set QP Value for HC Layers";
+ case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
+ return "H264 Constrained Intra Pred";
+ case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
+ case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
+ case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color";
+ case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
+ case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID";
+ case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count";
+ case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index";
+ case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames";
+ case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
+ case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value";
+
+ /* VPX controls */
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
+ case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable";
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level";
+
+ /* HEVC controls */
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier";
+ case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth";
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type";
+ case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding";
+ case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID";
+ case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing";
+ case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs";
+ case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode";
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
+ case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
+ case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
+
+ /* CAMERA controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
+ case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
+ case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
+ case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
+ case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
+ case V4L2_CID_PAN_RESET: return "Pan, Reset";
+ case V4L2_CID_TILT_RESET: return "Tilt, Reset";
+ case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
+ case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
+ case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
+ case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
+ case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
+ case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
+ case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
+ case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
+ case V4L2_CID_PRIVACY: return "Privacy";
+ case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
+ case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
+ case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
+ case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
+ case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
+ case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
+ case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
+ case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
+ case V4L2_CID_SCENE_MODE: return "Scene Mode";
+ case V4L2_CID_3A_LOCK: return "3A Lock";
+ case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
+ case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
+ case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
+ case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
+ case V4L2_CID_PAN_SPEED: return "Pan, Speed";
+ case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
+ case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
+ case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation";
+ case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation";
+
+ /* FM Radio Modulator controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
+ case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
+ case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
+ case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo";
+ case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head";
+ case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed";
+ case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY";
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music";
+ case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies";
+ case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies";
+ case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
+ case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
+ case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
+ case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
+ case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
+ case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
+
+ /* Flash controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_FLASH_CLASS: return "Flash Controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
+ case V4L2_CID_FLASH_STROBE: return "Strobe";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
+ case V4L2_CID_FLASH_FAULT: return "Faults";
+ case V4L2_CID_FLASH_CHARGE: return "Charge";
+ case V4L2_CID_FLASH_READY: return "Ready to Strobe";
+
+ /* JPEG encoder controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls";
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling";
+ case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
+ case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
+
+ /* Image source controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
+ case V4L2_CID_VBLANK: return "Vertical Blanking";
+ case V4L2_CID_HBLANK: return "Horizontal Blanking";
+ case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
+ case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
+ case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
+
+ /* Image processing controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
+ case V4L2_CID_LINK_FREQ: return "Link Frequency";
+ case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
+ case V4L2_CID_TEST_PATTERN: return "Test Pattern";
+ case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode";
+ case V4L2_CID_DIGITAL_GAIN: return "Digital Gain";
+
+ /* DV controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_DV_CLASS: return "Digital Video Controls";
+ case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present";
+ case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present";
+ case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present";
+ case V4L2_CID_DV_TX_MODE: return "Transmit Mode";
+ case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range";
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type";
+ case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
+ case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type";
+
+ case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
+ case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
+ case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
+ case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
+ case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain";
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
+ case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain";
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto";
+ case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain";
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto";
+ case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth";
+ case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock";
+ case V4L2_CID_RDS_RX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music";
+
+ /* Detection controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_DETECT_CLASS: return "Detection Controls";
+ case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode";
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold";
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid";
+ case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid";
+
+ /* Stateless Codec controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls";
+ case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode";
+ case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code";
+ case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set";
+ case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set";
+ case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix";
+ case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table";
+ case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters";
+ case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters";
+ case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters";
+ case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters";
+ case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header";
+ case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header";
+ case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices";
+
+ /* Colorimetry controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls";
+ case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info";
+ case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display";
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_name);
+
+void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
+ s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags)
+{
+ *name = v4l2_ctrl_get_name(id);
+ *flags = 0;
+
+ switch (id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ case V4L2_CID_AUTOGAIN:
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ case V4L2_CID_HUE_AUTO:
+ case V4L2_CID_CHROMA_AGC:
+ case V4L2_CID_COLOR_KILLER:
+ case V4L2_CID_AUTOBRIGHTNESS:
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN:
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
+ case V4L2_CID_FOCUS_AUTO:
+ case V4L2_CID_PRIVACY:
+ case V4L2_CID_AUDIO_LIMITER_ENABLED:
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
+ case V4L2_CID_PILOT_TONE_ENABLED:
+ case V4L2_CID_ILLUMINATORS_1:
+ case V4L2_CID_ILLUMINATORS_2:
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_FLASH_CHARGE:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:
+ case V4L2_CID_MPEG_VIDEO_AU_DELIMITER:
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ case V4L2_CID_IMAGE_STABILIZATION:
+ case V4L2_CID_RDS_RECEPTION:
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ case V4L2_CID_RDS_TX_MONO_STEREO:
+ case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD:
+ case V4L2_CID_RDS_TX_COMPRESSED:
+ case V4L2_CID_RDS_TX_DYNAMIC_PTY:
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH:
+ case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE:
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ *type = V4L2_CTRL_TYPE_BOOLEAN;
+ *min = 0;
+ *max = *step = 1;
+ break;
+ case V4L2_CID_ROTATE:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_LTR_COUNT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ case V4L2_CID_PAN_RESET:
+ case V4L2_CID_TILT_RESET:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_STROBE_STOP:
+ case V4L2_CID_AUTO_FOCUS_START:
+ case V4L2_CID_AUTO_FOCUS_STOP:
+ case V4L2_CID_DO_WHITE_BALANCE:
+ *type = V4L2_CTRL_TYPE_BUTTON;
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ case V4L2_CID_COLORFX:
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ case V4L2_CID_FLASH_LED_MODE:
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_SCENE_MODE:
+ case V4L2_CID_DV_TX_MODE:
+ case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_DEINTERLACING_MODE:
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ case V4L2_CID_DETECT_MD_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ case V4L2_CID_STATELESS_H264_DECODE_MODE:
+ case V4L2_CID_STATELESS_H264_START_CODE:
+ case V4L2_CID_CAMERA_ORIENTATION:
+ *type = V4L2_CTRL_TYPE_MENU;
+ break;
+ case V4L2_CID_LINK_FREQ:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
+ case V4L2_CID_RDS_TX_PS_NAME:
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ case V4L2_CID_RDS_RX_PS_NAME:
+ case V4L2_CID_RDS_RX_RADIO_TEXT:
+ *type = V4L2_CTRL_TYPE_STRING;
+ break;
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
+ case V4L2_CID_USER_CLASS:
+ case V4L2_CID_CAMERA_CLASS:
+ case V4L2_CID_CODEC_CLASS:
+ case V4L2_CID_FM_TX_CLASS:
+ case V4L2_CID_FLASH_CLASS:
+ case V4L2_CID_JPEG_CLASS:
+ case V4L2_CID_IMAGE_SOURCE_CLASS:
+ case V4L2_CID_IMAGE_PROC_CLASS:
+ case V4L2_CID_DV_CLASS:
+ case V4L2_CID_FM_RX_CLASS:
+ case V4L2_CID_RF_TUNER_CLASS:
+ case V4L2_CID_DETECT_CLASS:
+ case V4L2_CID_CODEC_STATELESS_CLASS:
+ case V4L2_CID_COLORIMETRY_CLASS:
+ *type = V4L2_CTRL_TYPE_CTRL_CLASS;
+ /* You can neither read nor write these */
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_BG_COLOR:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *step = 1;
+ *min = 0;
+ /* Max is calculated as RGB888 that is 2^24 */
+ *max = 0xFFFFFF;
+ break;
+ case V4L2_CID_FLASH_FAULT:
+ case V4L2_CID_JPEG_ACTIVE_MARKER:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
+ case V4L2_CID_DV_TX_HOTPLUG:
+ case V4L2_CID_DV_TX_RXSENSE:
+ case V4L2_CID_DV_TX_EDID_PRESENT:
+ case V4L2_CID_DV_RX_POWER_PRESENT:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_PTS:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *def = 0;
+ *max = 0x1ffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *def = 0;
+ *max = 0x7fffffffffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *min = 0;
+ /* default for 8 bit black, luma is 16, chroma is 128 */
+ *def = 0x8000800010LL;
+ *max = 0xffffffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_PIXEL_RATE:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_DETECT_MD_REGION_GRID:
+ *type = V4L2_CTRL_TYPE_U8;
+ break;
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
+ *type = V4L2_CTRL_TYPE_U16;
+ break;
+ case V4L2_CID_RDS_TX_ALT_FREQS:
+ *type = V4L2_CTRL_TYPE_U32;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_SEQUENCE:
+ *type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_PICTURE:
+ *type = V4L2_CTRL_TYPE_MPEG2_PICTURE;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_QUANTISATION:
+ *type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION;
+ break;
+ case V4L2_CID_STATELESS_FWHT_PARAMS:
+ *type = V4L2_CTRL_TYPE_FWHT_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_SPS:
+ *type = V4L2_CTRL_TYPE_H264_SPS;
+ break;
+ case V4L2_CID_STATELESS_H264_PPS:
+ *type = V4L2_CTRL_TYPE_H264_PPS;
+ break;
+ case V4L2_CID_STATELESS_H264_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX;
+ break;
+ case V4L2_CID_STATELESS_H264_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_DECODE_PARAMS:
+ *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_PRED_WEIGHTS:
+ *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS;
+ break;
+ case V4L2_CID_STATELESS_VP8_FRAME:
+ *type = V4L2_CTRL_TYPE_VP8_FRAME;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS:
+ *type = V4L2_CTRL_TYPE_HEVC_SPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS:
+ *type = V4L2_CTRL_TYPE_HEVC_PPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
+ break;
+ case V4L2_CID_UNIT_CELL_SIZE:
+ *type = V4L2_CTRL_TYPE_AREA;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO:
+ *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO;
+ break;
+ case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY:
+ *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY;
+ break;
+ default:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ }
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ *flags |= V4L2_CTRL_FLAG_UPDATE;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ case V4L2_CID_RED_BALANCE:
+ case V4L2_CID_BLUE_BALANCE:
+ case V4L2_CID_GAMMA:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_CHROMA_GAIN:
+ case V4L2_CID_RDS_TX_DEVIATION:
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION:
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN:
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
+ case V4L2_CID_PILOT_TONE_DEVIATION:
+ case V4L2_CID_PILOT_TONE_FREQUENCY:
+ case V4L2_CID_TUNE_POWER_LEVEL:
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
+ case V4L2_CID_RF_TUNER_RF_GAIN:
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN:
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
+ *flags |= V4L2_CTRL_FLAG_SLIDER;
+ break;
+ case V4L2_CID_PAN_RELATIVE:
+ case V4L2_CID_TILT_RELATIVE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_IRIS_RELATIVE:
+ case V4L2_CID_ZOOM_RELATIVE:
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_DV_TX_HOTPLUG:
+ case V4L2_CID_DV_TX_RXSENSE:
+ case V4L2_CID_DV_TX_EDID_PRESENT:
+ case V4L2_CID_DV_RX_POWER_PRESENT:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ case V4L2_CID_RDS_RX_PTY:
+ case V4L2_CID_RDS_RX_PS_NAME:
+ case V4L2_CID_RDS_RX_RADIO_TEXT:
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ case V4L2_CID_CAMERA_ORIENTATION:
+ case V4L2_CID_CAMERA_SENSOR_ROTATION:
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ *flags |= V4L2_CTRL_FLAG_VOLATILE;
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_fill);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
new file mode 100644
index 000000000000..d4bf2c716f97
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * V4L2 controls framework private header.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#ifndef _V4L2_CTRLS_PRIV_H_
+#define _V4L2_CTRLS_PRIV_H_
+
+#define dprintk(vdev, fmt, arg...) do { \
+ if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \
+ printk(KERN_DEBUG pr_fmt("%s: %s: " fmt), \
+ __func__, video_device_node_name(vdev), ##arg); \
+} while (0)
+
+#define has_op(master, op) \
+ ((master)->ops && (master)->ops->op)
+#define call_op(master, op) \
+ (has_op(master, op) ? (master)->ops->op(master) : 0)
+
+static inline u32 node2id(struct list_head *node)
+{
+ return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
+}
+
+/*
+ * Small helper function to determine if the autocluster is set to manual
+ * mode.
+ */
+static inline bool is_cur_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->cur.val == master->manual_mode_value;
+}
+
+/*
+ * Small helper function to determine if the autocluster will be set to manual
+ * mode.
+ */
+static inline bool is_new_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->val == master->manual_mode_value;
+}
+
+static inline u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+ u32 flags = ctrl->flags;
+
+ if (ctrl->is_ptr)
+ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+ return flags;
+}
+
+/* v4l2-ctrls-core.c */
+void cur_to_new(struct v4l2_ctrl *ctrl);
+void cur_to_req(struct v4l2_ctrl_ref *ref);
+void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags);
+void new_to_req(struct v4l2_ctrl_ref *ref);
+void req_to_new(struct v4l2_ctrl_ref *ref);
+void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl);
+void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes);
+int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new);
+int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req);
+struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id);
+struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id);
+int check_range(enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def);
+void update_from_auto_cluster(struct v4l2_ctrl *master);
+int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags);
+
+/* v4l2-ctrls-api.c */
+int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev);
+int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev, bool set);
+
+/* v4l2-ctrls-request.c */
+void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl);
+void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl);
+int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs);
+int try_set_ext_ctrls_request(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set);
+
+#endif
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-request.c b/drivers/media/v4l2-core/v4l2-ctrls-request.c
new file mode 100644
index 000000000000..7d098f287fd9
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-request.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework Request API implementation.
+ *
+ * Copyright (C) 2018-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#define pr_fmt(fmt) "v4l2-ctrls: " fmt
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+
+#include "v4l2-ctrls-priv.h"
+
+/* Initialize the request-related fields in a control handler */
+void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl)
+{
+ INIT_LIST_HEAD(&hdl->requests);
+ INIT_LIST_HEAD(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ media_request_object_init(&hdl->req_obj);
+}
+
+/* Free the request-related fields in a control handler */
+void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl_handler *req, *next_req;
+
+ /*
+ * Do nothing if this isn't the main handler or the main
+ * handler is not used in any request.
+ *
+ * The main handler can be identified by having a NULL ops pointer in
+ * the request object.
+ */
+ if (hdl->req_obj.ops || list_empty(&hdl->requests))
+ return;
+
+ /*
+ * If the main handler is freed and it is used by handler objects in
+ * outstanding requests, then unbind and put those objects before
+ * freeing the main handler.
+ */
+ list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
+ media_request_object_unbind(&req->req_obj);
+ media_request_object_put(&req->req_obj);
+ }
+}
+
+static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_handler *from)
+{
+ struct v4l2_ctrl_ref *ref;
+ int err = 0;
+
+ if (WARN_ON(!hdl || hdl == from))
+ return -EINVAL;
+
+ if (hdl->error)
+ return hdl->error;
+
+ WARN_ON(hdl->lock != &hdl->_lock);
+
+ mutex_lock(from->lock);
+ list_for_each_entry(ref, &from->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl_ref *new_ref;
+
+ /* Skip refs inherited from other devices */
+ if (ref->from_other_dev)
+ continue;
+ err = handler_new_ref(hdl, ctrl, &new_ref, false, true);
+ if (err)
+ break;
+ }
+ mutex_unlock(from->lock);
+ return err;
+}
+
+static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+
+ mutex_lock(main_hdl->lock);
+ list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
+}
+
+static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+
+ mutex_lock(main_hdl->lock);
+ list_del_init(&hdl->requests);
+ if (hdl->request_is_queued) {
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ }
+ mutex_unlock(main_hdl->lock);
+}
+
+static void v4l2_ctrl_request_release(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+}
+
+static const struct media_request_object_ops req_ops = {
+ .queue = v4l2_ctrl_request_queue,
+ .unbind = v4l2_ctrl_request_unbind,
+ .release = v4l2_ctrl_request_release,
+};
+
+struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
+ struct v4l2_ctrl_handler *parent)
+{
+ struct media_request_object *obj;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
+ req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return NULL;
+
+ obj = media_request_object_find(req, &req_ops, parent);
+ if (obj)
+ return container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find);
+
+struct v4l2_ctrl *
+v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
+
+static int v4l2_ctrl_request_bind(struct media_request *req,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *from)
+{
+ int ret;
+
+ ret = v4l2_ctrl_request_clone(hdl, from);
+
+ if (!ret) {
+ ret = media_request_object_bind(req, &req_ops,
+ from, false, &hdl->req_obj);
+ if (!ret) {
+ mutex_lock(from->lock);
+ list_add_tail(&hdl->requests, &from->requests);
+ mutex_unlock(from->lock);
+ }
+ }
+ return ret;
+}
+
+static struct media_request_object *
+v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
+ struct media_request *req, bool set)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *new_hdl;
+ int ret;
+
+ if (IS_ERR(req))
+ return ERR_CAST(req);
+
+ if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ return ERR_PTR(-EBUSY);
+
+ obj = media_request_object_find(req, &req_ops, hdl);
+ if (obj)
+ return obj;
+ /*
+ * If there are no controls in this completed request,
+ * then that can only happen if:
+ *
+ * 1) no controls were present in the queued request, and
+ * 2) v4l2_ctrl_request_complete() could not allocate a
+ * control handler object to store the completed state in.
+ *
+ * So return ENOMEM to indicate that there was an out-of-memory
+ * error.
+ */
+ if (!set)
+ return ERR_PTR(-ENOMEM);
+
+ new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL);
+ if (!new_hdl)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &new_hdl->req_obj;
+ ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
+ if (ret) {
+ v4l2_ctrl_handler_free(new_hdl);
+ kfree(new_hdl);
+ return ERR_PTR(ret);
+ }
+
+ media_request_object_get(obj);
+ return obj;
+}
+
+int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ media_request_put(req);
+ return -EACCES;
+ }
+
+ ret = media_request_lock_for_access(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, false);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_access(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev);
+
+ media_request_unlock_for_access(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ return ret;
+}
+
+int try_set_ext_ctrls_request(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (!mdev) {
+ dprintk(vdev, "%s: missing media device\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ if (cs->request_fd < 0) {
+ dprintk(vdev, "%s: invalid request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ return -EINVAL;
+ }
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req)) {
+ dprintk(vdev, "%s: cannot find request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ return PTR_ERR(req);
+ }
+
+ ret = media_request_lock_for_update(req);
+ if (ret) {
+ dprintk(vdev, "%s: cannot lock request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, set);
+ if (IS_ERR(obj)) {
+ dprintk(vdev,
+ "%s: cannot find request object for request fd %d\n",
+ video_device_node_name(vdev),
+ cs->request_fd);
+ media_request_unlock_for_update(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
+ if (ret)
+ dprintk(vdev,
+ "%s: try_set_ext_ctrls_common failed (%d)\n",
+ video_device_node_name(vdev), ret);
+
+ media_request_unlock_for_update(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+
+ return ret;
+}
+
+void v4l2_ctrl_request_complete(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj) {
+ int ret;
+
+ /* Create a new request so the driver can return controls */
+ hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
+ if (!hdl)
+ return;
+
+ ret = v4l2_ctrl_handler_init(hdl, (main_hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, hdl, main_hdl);
+ if (ret) {
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+ return;
+ }
+ hdl->request_is_queued = true;
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ unsigned int i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ call_op(master, g_volatile_ctrl);
+ new_to_req(ref);
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+ if (ref->valid_p_req)
+ continue;
+
+ /* Copy the current control value into the request */
+ v4l2_ctrl_lock(ctrl);
+ cur_to_req(ref);
+ v4l2_ctrl_unlock(ctrl);
+ }
+
+ mutex_lock(main_hdl->lock);
+ WARN_ON(!hdl->request_is_queued);
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
+ media_request_object_complete(obj);
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_complete);
+
+int v4l2_ctrl_request_setup(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+ int ret = 0;
+
+ if (!req || !main_hdl)
+ return 0;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return -EBUSY;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return 0;
+ if (obj->completed) {
+ media_request_object_put(obj);
+ return -EBUSY;
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node)
+ ref->req_done = false;
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ bool have_new_data = false;
+ int i;
+
+ /*
+ * Skip if this control was already handled by a cluster.
+ * Skip button controls and read-only controls.
+ */
+ if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ v4l2_ctrl_lock(master);
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ if (r->valid_p_req) {
+ have_new_data = true;
+ break;
+ }
+ }
+ }
+ if (!have_new_data) {
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ req_to_new(r);
+ master->cluster[i]->is_new = 1;
+ r->req_done = true;
+ }
+ }
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ s32 new_auto_val = *master->p_new.p_s32;
+
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ ret = try_or_set_cluster(NULL, master, true, 0);
+ v4l2_ctrl_unlock(master);
+
+ if (ret)
+ break;
+ }
+
+ media_request_object_put(obj);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_setup);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
deleted file mode 100644
index 0d7fe1bd975a..000000000000
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ /dev/null
@@ -1,5035 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- V4L2 controls framework implementation.
-
- Copyright (C) 2010 Hans Verkuil <hverkuil@xs4all.nl>
-
- */
-
-#define pr_fmt(fmt) "v4l2-ctrls: " fmt
-
-#include <linux/ctype.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-fwnode.h>
-#include <media/v4l2-ioctl.h>
-
-#define dprintk(vdev, fmt, arg...) do { \
- if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \
- printk(KERN_DEBUG pr_fmt("%s: %s: " fmt), \
- __func__, video_device_node_name(vdev), ##arg); \
-} while (0)
-
-#define has_op(master, op) \
- (master->ops && master->ops->op)
-#define call_op(master, op) \
- (has_op(master, op) ? master->ops->op(master) : 0)
-
-static const union v4l2_ctrl_ptr ptr_null;
-
-/* Internal temporary helper struct, one for each v4l2_ext_control */
-struct v4l2_ctrl_helper {
- /* Pointer to the control reference of the master control */
- struct v4l2_ctrl_ref *mref;
- /* The control ref corresponding to the v4l2_ext_control ID field. */
- struct v4l2_ctrl_ref *ref;
- /* v4l2_ext_control index of the next control belonging to the
- same cluster, or 0 if there isn't any. */
- u32 next;
-};
-
-/* Small helper function to determine if the autocluster is set to manual
- mode. */
-static bool is_cur_manual(const struct v4l2_ctrl *master)
-{
- return master->is_auto && master->cur.val == master->manual_mode_value;
-}
-
-/* Same as above, but this checks the against the new value instead of the
- current value. */
-static bool is_new_manual(const struct v4l2_ctrl *master)
-{
- return master->is_auto && master->val == master->manual_mode_value;
-}
-
-/* Returns NULL or a character pointer array containing the menu for
- the given control ID. The pointer array ends with a NULL pointer.
- An empty string signifies a menu entry that is invalid. This allows
- drivers to disable certain options if it is not supported. */
-const char * const *v4l2_ctrl_get_menu(u32 id)
-{
- static const char * const mpeg_audio_sampling_freq[] = {
- "44.1 kHz",
- "48 kHz",
- "32 kHz",
- NULL
- };
- static const char * const mpeg_audio_encoding[] = {
- "MPEG-1/2 Layer I",
- "MPEG-1/2 Layer II",
- "MPEG-1/2 Layer III",
- "MPEG-2/4 AAC",
- "AC-3",
- NULL
- };
- static const char * const mpeg_audio_l1_bitrate[] = {
- "32 kbps",
- "64 kbps",
- "96 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "288 kbps",
- "320 kbps",
- "352 kbps",
- "384 kbps",
- "416 kbps",
- "448 kbps",
- NULL
- };
- static const char * const mpeg_audio_l2_bitrate[] = {
- "32 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- "384 kbps",
- NULL
- };
- static const char * const mpeg_audio_l3_bitrate[] = {
- "32 kbps",
- "40 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- NULL
- };
- static const char * const mpeg_audio_ac3_bitrate[] = {
- "32 kbps",
- "40 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- "384 kbps",
- "448 kbps",
- "512 kbps",
- "576 kbps",
- "640 kbps",
- NULL
- };
- static const char * const mpeg_audio_mode[] = {
- "Stereo",
- "Joint Stereo",
- "Dual",
- "Mono",
- NULL
- };
- static const char * const mpeg_audio_mode_extension[] = {
- "Bound 4",
- "Bound 8",
- "Bound 12",
- "Bound 16",
- NULL
- };
- static const char * const mpeg_audio_emphasis[] = {
- "No Emphasis",
- "50/15 us",
- "CCITT J17",
- NULL
- };
- static const char * const mpeg_audio_crc[] = {
- "No CRC",
- "16-bit CRC",
- NULL
- };
- static const char * const mpeg_audio_dec_playback[] = {
- "Auto",
- "Stereo",
- "Left",
- "Right",
- "Mono",
- "Swapped Stereo",
- NULL
- };
- static const char * const mpeg_video_encoding[] = {
- "MPEG-1",
- "MPEG-2",
- "MPEG-4 AVC",
- NULL
- };
- static const char * const mpeg_video_aspect[] = {
- "1x1",
- "4x3",
- "16x9",
- "2.21x1",
- NULL
- };
- static const char * const mpeg_video_bitrate_mode[] = {
- "Variable Bitrate",
- "Constant Bitrate",
- "Constant Quality",
- NULL
- };
- static const char * const mpeg_stream_type[] = {
- "MPEG-2 Program Stream",
- "MPEG-2 Transport Stream",
- "MPEG-1 System Stream",
- "MPEG-2 DVD-compatible Stream",
- "MPEG-1 VCD-compatible Stream",
- "MPEG-2 SVCD-compatible Stream",
- NULL
- };
- static const char * const mpeg_stream_vbi_fmt[] = {
- "No VBI",
- "Private Packet, IVTV Format",
- NULL
- };
- static const char * const camera_power_line_frequency[] = {
- "Disabled",
- "50 Hz",
- "60 Hz",
- "Auto",
- NULL
- };
- static const char * const camera_exposure_auto[] = {
- "Auto Mode",
- "Manual Mode",
- "Shutter Priority Mode",
- "Aperture Priority Mode",
- NULL
- };
- static const char * const camera_exposure_metering[] = {
- "Average",
- "Center Weighted",
- "Spot",
- "Matrix",
- NULL
- };
- static const char * const camera_auto_focus_range[] = {
- "Auto",
- "Normal",
- "Macro",
- "Infinity",
- NULL
- };
- static const char * const colorfx[] = {
- "None",
- "Black & White",
- "Sepia",
- "Negative",
- "Emboss",
- "Sketch",
- "Sky Blue",
- "Grass Green",
- "Skin Whiten",
- "Vivid",
- "Aqua",
- "Art Freeze",
- "Silhouette",
- "Solarization",
- "Antique",
- "Set Cb/Cr",
- NULL
- };
- static const char * const auto_n_preset_white_balance[] = {
- "Manual",
- "Auto",
- "Incandescent",
- "Fluorescent",
- "Fluorescent H",
- "Horizon",
- "Daylight",
- "Flash",
- "Cloudy",
- "Shade",
- NULL,
- };
- static const char * const camera_iso_sensitivity_auto[] = {
- "Manual",
- "Auto",
- NULL
- };
- static const char * const scene_mode[] = {
- "None",
- "Backlight",
- "Beach/Snow",
- "Candle Light",
- "Dusk/Dawn",
- "Fall Colors",
- "Fireworks",
- "Landscape",
- "Night",
- "Party/Indoor",
- "Portrait",
- "Sports",
- "Sunset",
- "Text",
- NULL
- };
- static const char * const tune_emphasis[] = {
- "None",
- "50 Microseconds",
- "75 Microseconds",
- NULL,
- };
- static const char * const header_mode[] = {
- "Separate Buffer",
- "Joined With 1st Frame",
- NULL,
- };
- static const char * const multi_slice[] = {
- "Single",
- "Max Macroblocks",
- "Max Bytes",
- NULL,
- };
- static const char * const entropy_mode[] = {
- "CAVLC",
- "CABAC",
- NULL,
- };
- static const char * const mpeg_h264_level[] = {
- "1",
- "1b",
- "1.1",
- "1.2",
- "1.3",
- "2",
- "2.1",
- "2.2",
- "3",
- "3.1",
- "3.2",
- "4",
- "4.1",
- "4.2",
- "5",
- "5.1",
- "5.2",
- "6.0",
- "6.1",
- "6.2",
- NULL,
- };
- static const char * const h264_loop_filter[] = {
- "Enabled",
- "Disabled",
- "Disabled at Slice Boundary",
- NULL,
- };
- static const char * const h264_profile[] = {
- "Baseline",
- "Constrained Baseline",
- "Main",
- "Extended",
- "High",
- "High 10",
- "High 422",
- "High 444 Predictive",
- "High 10 Intra",
- "High 422 Intra",
- "High 444 Intra",
- "CAVLC 444 Intra",
- "Scalable Baseline",
- "Scalable High",
- "Scalable High Intra",
- "Stereo High",
- "Multiview High",
- "Constrained High",
- NULL,
- };
- static const char * const vui_sar_idc[] = {
- "Unspecified",
- "1:1",
- "12:11",
- "10:11",
- "16:11",
- "40:33",
- "24:11",
- "20:11",
- "32:11",
- "80:33",
- "18:11",
- "15:11",
- "64:33",
- "160:99",
- "4:3",
- "3:2",
- "2:1",
- "Extended SAR",
- NULL,
- };
- static const char * const h264_fp_arrangement_type[] = {
- "Checkerboard",
- "Column",
- "Row",
- "Side by Side",
- "Top Bottom",
- "Temporal",
- NULL,
- };
- static const char * const h264_fmo_map_type[] = {
- "Interleaved Slices",
- "Scattered Slices",
- "Foreground with Leftover",
- "Box Out",
- "Raster Scan",
- "Wipe Scan",
- "Explicit",
- NULL,
- };
- static const char * const h264_decode_mode[] = {
- "Slice-Based",
- "Frame-Based",
- NULL,
- };
- static const char * const h264_start_code[] = {
- "No Start Code",
- "Annex B Start Code",
- NULL,
- };
- static const char * const h264_hierarchical_coding_type[] = {
- "Hier Coding B",
- "Hier Coding P",
- NULL,
- };
- static const char * const mpeg_mpeg2_level[] = {
- "Low",
- "Main",
- "High 1440",
- "High",
- NULL,
- };
- static const char * const mpeg2_profile[] = {
- "Simple",
- "Main",
- "SNR Scalable",
- "Spatially Scalable",
- "High",
- NULL,
- };
- static const char * const mpeg_mpeg4_level[] = {
- "0",
- "0b",
- "1",
- "2",
- "3",
- "3b",
- "4",
- "5",
- NULL,
- };
- static const char * const mpeg4_profile[] = {
- "Simple",
- "Advanced Simple",
- "Core",
- "Simple Scalable",
- "Advanced Coding Efficiency",
- NULL,
- };
-
- static const char * const vpx_golden_frame_sel[] = {
- "Use Previous Frame",
- "Use Previous Specific Frame",
- NULL,
- };
- static const char * const vp8_profile[] = {
- "0",
- "1",
- "2",
- "3",
- NULL,
- };
- static const char * const vp9_profile[] = {
- "0",
- "1",
- "2",
- "3",
- NULL,
- };
- static const char * const vp9_level[] = {
- "1",
- "1.1",
- "2",
- "2.1",
- "3",
- "3.1",
- "4",
- "4.1",
- "5",
- "5.1",
- "5.2",
- "6",
- "6.1",
- "6.2",
- NULL,
- };
-
- static const char * const flash_led_mode[] = {
- "Off",
- "Flash",
- "Torch",
- NULL,
- };
- static const char * const flash_strobe_source[] = {
- "Software",
- "External",
- NULL,
- };
-
- static const char * const jpeg_chroma_subsampling[] = {
- "4:4:4",
- "4:2:2",
- "4:2:0",
- "4:1:1",
- "4:1:0",
- "Gray",
- NULL,
- };
- static const char * const dv_tx_mode[] = {
- "DVI-D",
- "HDMI",
- NULL,
- };
- static const char * const dv_rgb_range[] = {
- "Automatic",
- "RGB Limited Range (16-235)",
- "RGB Full Range (0-255)",
- NULL,
- };
- static const char * const dv_it_content_type[] = {
- "Graphics",
- "Photo",
- "Cinema",
- "Game",
- "No IT Content",
- NULL,
- };
- static const char * const detect_md_mode[] = {
- "Disabled",
- "Global",
- "Threshold Grid",
- "Region Grid",
- NULL,
- };
-
- static const char * const hevc_profile[] = {
- "Main",
- "Main Still Picture",
- "Main 10",
- NULL,
- };
- static const char * const hevc_level[] = {
- "1",
- "2",
- "2.1",
- "3",
- "3.1",
- "4",
- "4.1",
- "5",
- "5.1",
- "5.2",
- "6",
- "6.1",
- "6.2",
- NULL,
- };
- static const char * const hevc_hierarchial_coding_type[] = {
- "B",
- "P",
- NULL,
- };
- static const char * const hevc_refresh_type[] = {
- "None",
- "CRA",
- "IDR",
- NULL,
- };
- static const char * const hevc_size_of_length_field[] = {
- "0",
- "1",
- "2",
- "4",
- NULL,
- };
- static const char * const hevc_tier[] = {
- "Main",
- "High",
- NULL,
- };
- static const char * const hevc_loop_filter_mode[] = {
- "Disabled",
- "Enabled",
- "Disabled at slice boundary",
- "NULL",
- };
- static const char * const hevc_decode_mode[] = {
- "Slice-Based",
- "Frame-Based",
- NULL,
- };
- static const char * const hevc_start_code[] = {
- "No Start Code",
- "Annex B Start Code",
- NULL,
- };
- static const char * const camera_orientation[] = {
- "Front",
- "Back",
- "External",
- NULL,
- };
- static const char * const mpeg_video_frame_skip[] = {
- "Disabled",
- "Level Limit",
- "VBV/CPB Limit",
- NULL,
- };
-
- switch (id) {
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
- return mpeg_audio_sampling_freq;
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- return mpeg_audio_encoding;
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
- return mpeg_audio_l1_bitrate;
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
- return mpeg_audio_l2_bitrate;
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
- return mpeg_audio_l3_bitrate;
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
- return mpeg_audio_ac3_bitrate;
- case V4L2_CID_MPEG_AUDIO_MODE:
- return mpeg_audio_mode;
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
- return mpeg_audio_mode_extension;
- case V4L2_CID_MPEG_AUDIO_EMPHASIS:
- return mpeg_audio_emphasis;
- case V4L2_CID_MPEG_AUDIO_CRC:
- return mpeg_audio_crc;
- case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
- case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
- return mpeg_audio_dec_playback;
- case V4L2_CID_MPEG_VIDEO_ENCODING:
- return mpeg_video_encoding;
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- return mpeg_video_aspect;
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- return mpeg_video_bitrate_mode;
- case V4L2_CID_MPEG_STREAM_TYPE:
- return mpeg_stream_type;
- case V4L2_CID_MPEG_STREAM_VBI_FMT:
- return mpeg_stream_vbi_fmt;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- return camera_power_line_frequency;
- case V4L2_CID_EXPOSURE_AUTO:
- return camera_exposure_auto;
- case V4L2_CID_EXPOSURE_METERING:
- return camera_exposure_metering;
- case V4L2_CID_AUTO_FOCUS_RANGE:
- return camera_auto_focus_range;
- case V4L2_CID_COLORFX:
- return colorfx;
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
- return auto_n_preset_white_balance;
- case V4L2_CID_ISO_SENSITIVITY_AUTO:
- return camera_iso_sensitivity_auto;
- case V4L2_CID_SCENE_MODE:
- return scene_mode;
- case V4L2_CID_TUNE_PREEMPHASIS:
- return tune_emphasis;
- case V4L2_CID_TUNE_DEEMPHASIS:
- return tune_emphasis;
- case V4L2_CID_FLASH_LED_MODE:
- return flash_led_mode;
- case V4L2_CID_FLASH_STROBE_SOURCE:
- return flash_strobe_source;
- case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
- return header_mode;
- case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
- return mpeg_video_frame_skip;
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
- return multi_slice;
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
- return entropy_mode;
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- return mpeg_h264_level;
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
- return h264_loop_filter;
- case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- return h264_profile;
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
- return vui_sar_idc;
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
- return h264_fp_arrangement_type;
- case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
- return h264_fmo_map_type;
- case V4L2_CID_STATELESS_H264_DECODE_MODE:
- return h264_decode_mode;
- case V4L2_CID_STATELESS_H264_START_CODE:
- return h264_start_code;
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
- return h264_hierarchical_coding_type;
- case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
- return mpeg_mpeg2_level;
- case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
- return mpeg2_profile;
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
- return mpeg_mpeg4_level;
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- return mpeg4_profile;
- case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
- return vpx_golden_frame_sel;
- case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
- return vp8_profile;
- case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
- return vp9_profile;
- case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
- return vp9_level;
- case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
- return jpeg_chroma_subsampling;
- case V4L2_CID_DV_TX_MODE:
- return dv_tx_mode;
- case V4L2_CID_DV_TX_RGB_RANGE:
- case V4L2_CID_DV_RX_RGB_RANGE:
- return dv_rgb_range;
- case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
- case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
- return dv_it_content_type;
- case V4L2_CID_DETECT_MD_MODE:
- return detect_md_mode;
- case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
- return hevc_profile;
- case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
- return hevc_level;
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
- return hevc_hierarchial_coding_type;
- case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
- return hevc_refresh_type;
- case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
- return hevc_size_of_length_field;
- case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
- return hevc_tier;
- case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
- return hevc_loop_filter_mode;
- case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
- return hevc_decode_mode;
- case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
- return hevc_start_code;
- case V4L2_CID_CAMERA_ORIENTATION:
- return camera_orientation;
- default:
- return NULL;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_get_menu);
-
-#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); arr; })
-/*
- * Returns NULL or an s64 type array containing the menu for given
- * control ID. The total number of the menu items is returned in @len.
- */
-const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
-{
- static const s64 qmenu_int_vpx_num_partitions[] = {
- 1, 2, 4, 8,
- };
-
- static const s64 qmenu_int_vpx_num_ref_frames[] = {
- 1, 2, 3,
- };
-
- switch (id) {
- case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
- return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len);
- case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
- return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len);
- default:
- *len = 0;
- return NULL;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
-
-/* Return the control name. */
-const char *v4l2_ctrl_get_name(u32 id)
-{
- switch (id) {
- /* USER controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_USER_CLASS: return "User Controls";
- case V4L2_CID_BRIGHTNESS: return "Brightness";
- case V4L2_CID_CONTRAST: return "Contrast";
- case V4L2_CID_SATURATION: return "Saturation";
- case V4L2_CID_HUE: return "Hue";
- case V4L2_CID_AUDIO_VOLUME: return "Volume";
- case V4L2_CID_AUDIO_BALANCE: return "Balance";
- case V4L2_CID_AUDIO_BASS: return "Bass";
- case V4L2_CID_AUDIO_TREBLE: return "Treble";
- case V4L2_CID_AUDIO_MUTE: return "Mute";
- case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
- case V4L2_CID_BLACK_LEVEL: return "Black Level";
- case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
- case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
- case V4L2_CID_RED_BALANCE: return "Red Balance";
- case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
- case V4L2_CID_GAMMA: return "Gamma";
- case V4L2_CID_EXPOSURE: return "Exposure";
- case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
- case V4L2_CID_GAIN: return "Gain";
- case V4L2_CID_HFLIP: return "Horizontal Flip";
- case V4L2_CID_VFLIP: return "Vertical Flip";
- case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
- case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
- case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
- case V4L2_CID_SHARPNESS: return "Sharpness";
- case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
- case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
- case V4L2_CID_COLOR_KILLER: return "Color Killer";
- case V4L2_CID_COLORFX: return "Color Effects";
- case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
- case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
- case V4L2_CID_ROTATE: return "Rotate";
- case V4L2_CID_BG_COLOR: return "Background Color";
- case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
- case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
- case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
- case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
- case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
- case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
-
- /* Codec controls */
- /* The MPEG controls are applicable to all codec controls
- * and the 'MPEG' part of the define is historical */
- /* Keep the order of the 'case's the same as in videodev2.h! */
- case V4L2_CID_CODEC_CLASS: return "Codec Controls";
- case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
- case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
- case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
- case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
- case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
- case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
- case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
- case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
- case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
- case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
- case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
- case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
- case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
- case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
- case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback";
- case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback";
- case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
- case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
- case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
- case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
- case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
- case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality";
- case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
- case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
- case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
- case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
- case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
- case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
- case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
- case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
- case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
- case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
- case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
- case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
- case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode";
- case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay";
- case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable";
- case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters";
- case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
- case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
- case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
- case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
- case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
- case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI";
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0";
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type";
- case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs";
- case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering";
- case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order";
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding";
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type";
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
- return "H264 Set QP Value for HC Layers";
- case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
- return "H264 Constrained Intra Pred";
- case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset";
- case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate";
- case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate";
- case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate";
- case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate";
- case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate";
- case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate";
- case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate";
- case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
- case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
- case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
- case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
- case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
- case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
- case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
- case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color";
- case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
- case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
- case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
- case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
- case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
- case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID";
- case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count";
- case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index";
- case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames";
- case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters";
- case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices";
- case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
- case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value";
-
- /* VPX controls */
- case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
- case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable";
- case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame";
- case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range";
- case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
- case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
- case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
- case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
- case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
- case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level";
-
- /* HEVC controls */
- case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile";
- case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level";
- case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier";
- case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution";
- case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth";
- case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type";
- case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction";
- case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding";
- case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront";
- case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate";
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate";
- case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB";
- case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID";
- case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing";
- case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split";
- case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction";
- case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs";
- case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode";
- case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR";
- case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset";
- case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset";
- case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
- case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
- case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
- case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
- case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
- case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
- case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
- case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
-
- /* CAMERA controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
- case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
- case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
- case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
- case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
- case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
- case V4L2_CID_PAN_RESET: return "Pan, Reset";
- case V4L2_CID_TILT_RESET: return "Tilt, Reset";
- case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
- case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
- case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
- case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
- case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
- case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
- case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
- case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
- case V4L2_CID_PRIVACY: return "Privacy";
- case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
- case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
- case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
- case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
- case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
- case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
- case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
- case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
- case V4L2_CID_SCENE_MODE: return "Scene Mode";
- case V4L2_CID_3A_LOCK: return "3A Lock";
- case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
- case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
- case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
- case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
- case V4L2_CID_PAN_SPEED: return "Pan, Speed";
- case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
- case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
- case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation";
- case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation";
-
- /* FM Radio Modulator controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
- case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
- case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
- case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
- case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
- case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
- case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo";
- case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head";
- case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed";
- case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY";
- case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
- case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
- case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music";
- case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies";
- case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies";
- case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
- case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
- case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
- case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
- case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
- case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
- case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
- case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
- case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
- case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
- case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
- case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
-
- /* Flash controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_FLASH_CLASS: return "Flash Controls";
- case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
- case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
- case V4L2_CID_FLASH_STROBE: return "Strobe";
- case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
- case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
- case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
- case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
- case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
- case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
- case V4L2_CID_FLASH_FAULT: return "Faults";
- case V4L2_CID_FLASH_CHARGE: return "Charge";
- case V4L2_CID_FLASH_READY: return "Ready to Strobe";
-
- /* JPEG encoder controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls";
- case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling";
- case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
- case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
- case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
-
- /* Image source controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
- case V4L2_CID_VBLANK: return "Vertical Blanking";
- case V4L2_CID_HBLANK: return "Horizontal Blanking";
- case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
- case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value";
- case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
- case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
- case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
-
- /* Image processing controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
- case V4L2_CID_LINK_FREQ: return "Link Frequency";
- case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
- case V4L2_CID_TEST_PATTERN: return "Test Pattern";
- case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode";
- case V4L2_CID_DIGITAL_GAIN: return "Digital Gain";
-
- /* DV controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_DV_CLASS: return "Digital Video Controls";
- case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present";
- case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present";
- case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present";
- case V4L2_CID_DV_TX_MODE: return "Transmit Mode";
- case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range";
- case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type";
- case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
- case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
- case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type";
-
- case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
- case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
- case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
- case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
- case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain";
- case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
- case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
- case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
- case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain";
- case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto";
- case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain";
- case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto";
- case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth";
- case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock";
- case V4L2_CID_RDS_RX_PTY: return "RDS Program Type";
- case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name";
- case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text";
- case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
- case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
- case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music";
-
- /* Detection controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_DETECT_CLASS: return "Detection Controls";
- case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode";
- case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold";
- case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid";
- case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid";
-
- /* Stateless Codec controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls";
- case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode";
- case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code";
- case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set";
- case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set";
- case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix";
- case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table";
- case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters";
- case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters";
- case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters";
- case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters";
-
- /* Colorimetry controls */
- /* Keep the order of the 'case's the same as in v4l2-controls.h! */
- case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls";
- case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info";
- case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display";
- default:
- return NULL;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_get_name);
-
-void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
- s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags)
-{
- *name = v4l2_ctrl_get_name(id);
- *flags = 0;
-
- switch (id) {
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_LOUDNESS:
- case V4L2_CID_AUTO_WHITE_BALANCE:
- case V4L2_CID_AUTOGAIN:
- case V4L2_CID_HFLIP:
- case V4L2_CID_VFLIP:
- case V4L2_CID_HUE_AUTO:
- case V4L2_CID_CHROMA_AGC:
- case V4L2_CID_COLOR_KILLER:
- case V4L2_CID_AUTOBRIGHTNESS:
- case V4L2_CID_MPEG_AUDIO_MUTE:
- case V4L2_CID_MPEG_VIDEO_MUTE:
- case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
- case V4L2_CID_MPEG_VIDEO_PULLDOWN:
- case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
- case V4L2_CID_FOCUS_AUTO:
- case V4L2_CID_PRIVACY:
- case V4L2_CID_AUDIO_LIMITER_ENABLED:
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
- case V4L2_CID_PILOT_TONE_ENABLED:
- case V4L2_CID_ILLUMINATORS_1:
- case V4L2_CID_ILLUMINATORS_2:
- case V4L2_CID_FLASH_STROBE_STATUS:
- case V4L2_CID_FLASH_CHARGE:
- case V4L2_CID_FLASH_READY:
- case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
- case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
- case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
- case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
- case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
- case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
- case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
- case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:
- case V4L2_CID_MPEG_VIDEO_AU_DELIMITER:
- case V4L2_CID_WIDE_DYNAMIC_RANGE:
- case V4L2_CID_IMAGE_STABILIZATION:
- case V4L2_CID_RDS_RECEPTION:
- case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
- case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
- case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
- case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
- case V4L2_CID_RF_TUNER_PLL_LOCK:
- case V4L2_CID_RDS_TX_MONO_STEREO:
- case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD:
- case V4L2_CID_RDS_TX_COMPRESSED:
- case V4L2_CID_RDS_TX_DYNAMIC_PTY:
- case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT:
- case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM:
- case V4L2_CID_RDS_TX_MUSIC_SPEECH:
- case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE:
- case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
- case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
- case V4L2_CID_RDS_RX_MUSIC_SPEECH:
- *type = V4L2_CTRL_TYPE_BOOLEAN;
- *min = 0;
- *max = *step = 1;
- break;
- case V4L2_CID_ROTATE:
- *type = V4L2_CTRL_TYPE_INTEGER;
- *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
- break;
- case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
- case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
- case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
- *type = V4L2_CTRL_TYPE_INTEGER;
- break;
- case V4L2_CID_MPEG_VIDEO_LTR_COUNT:
- *type = V4L2_CTRL_TYPE_INTEGER;
- break;
- case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
- *type = V4L2_CTRL_TYPE_INTEGER;
- *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
- break;
- case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
- *type = V4L2_CTRL_TYPE_BITMASK;
- *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
- break;
- case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
- case V4L2_CID_PAN_RESET:
- case V4L2_CID_TILT_RESET:
- case V4L2_CID_FLASH_STROBE:
- case V4L2_CID_FLASH_STROBE_STOP:
- case V4L2_CID_AUTO_FOCUS_START:
- case V4L2_CID_AUTO_FOCUS_STOP:
- case V4L2_CID_DO_WHITE_BALANCE:
- *type = V4L2_CTRL_TYPE_BUTTON;
- *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
- V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
- *min = *max = *step = *def = 0;
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
- case V4L2_CID_MPEG_AUDIO_MODE:
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
- case V4L2_CID_MPEG_AUDIO_EMPHASIS:
- case V4L2_CID_MPEG_AUDIO_CRC:
- case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
- case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
- case V4L2_CID_MPEG_VIDEO_ENCODING:
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- case V4L2_CID_MPEG_STREAM_TYPE:
- case V4L2_CID_MPEG_STREAM_VBI_FMT:
- case V4L2_CID_EXPOSURE_AUTO:
- case V4L2_CID_AUTO_FOCUS_RANGE:
- case V4L2_CID_COLORFX:
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
- case V4L2_CID_TUNE_PREEMPHASIS:
- case V4L2_CID_FLASH_LED_MODE:
- case V4L2_CID_FLASH_STROBE_SOURCE:
- case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
- case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
- case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
- case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
- case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
- case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
- case V4L2_CID_ISO_SENSITIVITY_AUTO:
- case V4L2_CID_EXPOSURE_METERING:
- case V4L2_CID_SCENE_MODE:
- case V4L2_CID_DV_TX_MODE:
- case V4L2_CID_DV_TX_RGB_RANGE:
- case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
- case V4L2_CID_DV_RX_RGB_RANGE:
- case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
- case V4L2_CID_TEST_PATTERN:
- case V4L2_CID_DEINTERLACING_MODE:
- case V4L2_CID_TUNE_DEEMPHASIS:
- case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
- case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
- case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
- case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
- case V4L2_CID_DETECT_MD_MODE:
- case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
- case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
- case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
- case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
- case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
- case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
- case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
- case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
- case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
- case V4L2_CID_STATELESS_H264_DECODE_MODE:
- case V4L2_CID_STATELESS_H264_START_CODE:
- case V4L2_CID_CAMERA_ORIENTATION:
- *type = V4L2_CTRL_TYPE_MENU;
- break;
- case V4L2_CID_LINK_FREQ:
- *type = V4L2_CTRL_TYPE_INTEGER_MENU;
- break;
- case V4L2_CID_RDS_TX_PS_NAME:
- case V4L2_CID_RDS_TX_RADIO_TEXT:
- case V4L2_CID_RDS_RX_PS_NAME:
- case V4L2_CID_RDS_RX_RADIO_TEXT:
- *type = V4L2_CTRL_TYPE_STRING;
- break;
- case V4L2_CID_ISO_SENSITIVITY:
- case V4L2_CID_AUTO_EXPOSURE_BIAS:
- case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
- case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
- *type = V4L2_CTRL_TYPE_INTEGER_MENU;
- break;
- case V4L2_CID_USER_CLASS:
- case V4L2_CID_CAMERA_CLASS:
- case V4L2_CID_CODEC_CLASS:
- case V4L2_CID_FM_TX_CLASS:
- case V4L2_CID_FLASH_CLASS:
- case V4L2_CID_JPEG_CLASS:
- case V4L2_CID_IMAGE_SOURCE_CLASS:
- case V4L2_CID_IMAGE_PROC_CLASS:
- case V4L2_CID_DV_CLASS:
- case V4L2_CID_FM_RX_CLASS:
- case V4L2_CID_RF_TUNER_CLASS:
- case V4L2_CID_DETECT_CLASS:
- case V4L2_CID_CODEC_STATELESS_CLASS:
- case V4L2_CID_COLORIMETRY_CLASS:
- *type = V4L2_CTRL_TYPE_CTRL_CLASS;
- /* You can neither read nor write these */
- *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
- *min = *max = *step = *def = 0;
- break;
- case V4L2_CID_BG_COLOR:
- *type = V4L2_CTRL_TYPE_INTEGER;
- *step = 1;
- *min = 0;
- /* Max is calculated as RGB888 that is 2^24 */
- *max = 0xFFFFFF;
- break;
- case V4L2_CID_FLASH_FAULT:
- case V4L2_CID_JPEG_ACTIVE_MARKER:
- case V4L2_CID_3A_LOCK:
- case V4L2_CID_AUTO_FOCUS_STATUS:
- case V4L2_CID_DV_TX_HOTPLUG:
- case V4L2_CID_DV_TX_RXSENSE:
- case V4L2_CID_DV_TX_EDID_PRESENT:
- case V4L2_CID_DV_RX_POWER_PRESENT:
- *type = V4L2_CTRL_TYPE_BITMASK;
- break;
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
- case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
- *type = V4L2_CTRL_TYPE_INTEGER;
- *flags |= V4L2_CTRL_FLAG_READ_ONLY;
- break;
- case V4L2_CID_MPEG_VIDEO_DEC_PTS:
- *type = V4L2_CTRL_TYPE_INTEGER64;
- *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
- *min = *def = 0;
- *max = 0x1ffffffffLL;
- *step = 1;
- break;
- case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
- *type = V4L2_CTRL_TYPE_INTEGER64;
- *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
- *min = *def = 0;
- *max = 0x7fffffffffffffffLL;
- *step = 1;
- break;
- case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR:
- *type = V4L2_CTRL_TYPE_INTEGER64;
- *min = 0;
- /* default for 8 bit black, luma is 16, chroma is 128 */
- *def = 0x8000800010LL;
- *max = 0xffffffffffffLL;
- *step = 1;
- break;
- case V4L2_CID_PIXEL_RATE:
- *type = V4L2_CTRL_TYPE_INTEGER64;
- *flags |= V4L2_CTRL_FLAG_READ_ONLY;
- break;
- case V4L2_CID_DETECT_MD_REGION_GRID:
- *type = V4L2_CTRL_TYPE_U8;
- break;
- case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
- *type = V4L2_CTRL_TYPE_U16;
- break;
- case V4L2_CID_RDS_TX_ALT_FREQS:
- *type = V4L2_CTRL_TYPE_U32;
- break;
- case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS:
- *type = V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS;
- break;
- case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION:
- *type = V4L2_CTRL_TYPE_MPEG2_QUANTIZATION;
- break;
- case V4L2_CID_STATELESS_FWHT_PARAMS:
- *type = V4L2_CTRL_TYPE_FWHT_PARAMS;
- break;
- case V4L2_CID_STATELESS_H264_SPS:
- *type = V4L2_CTRL_TYPE_H264_SPS;
- break;
- case V4L2_CID_STATELESS_H264_PPS:
- *type = V4L2_CTRL_TYPE_H264_PPS;
- break;
- case V4L2_CID_STATELESS_H264_SCALING_MATRIX:
- *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX;
- break;
- case V4L2_CID_STATELESS_H264_SLICE_PARAMS:
- *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS;
- break;
- case V4L2_CID_STATELESS_H264_DECODE_PARAMS:
- *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS;
- break;
- case V4L2_CID_STATELESS_H264_PRED_WEIGHTS:
- *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS;
- break;
- case V4L2_CID_STATELESS_VP8_FRAME:
- *type = V4L2_CTRL_TYPE_VP8_FRAME;
- break;
- case V4L2_CID_MPEG_VIDEO_HEVC_SPS:
- *type = V4L2_CTRL_TYPE_HEVC_SPS;
- break;
- case V4L2_CID_MPEG_VIDEO_HEVC_PPS:
- *type = V4L2_CTRL_TYPE_HEVC_PPS;
- break;
- case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
- *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
- break;
- case V4L2_CID_UNIT_CELL_SIZE:
- *type = V4L2_CTRL_TYPE_AREA;
- *flags |= V4L2_CTRL_FLAG_READ_ONLY;
- break;
- case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO:
- *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO;
- break;
- case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY:
- *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY;
- break;
- default:
- *type = V4L2_CTRL_TYPE_INTEGER;
- break;
- }
- switch (id) {
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- case V4L2_CID_MPEG_AUDIO_MODE:
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- case V4L2_CID_MPEG_STREAM_TYPE:
- *flags |= V4L2_CTRL_FLAG_UPDATE;
- break;
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_CONTRAST:
- case V4L2_CID_SATURATION:
- case V4L2_CID_HUE:
- case V4L2_CID_RED_BALANCE:
- case V4L2_CID_BLUE_BALANCE:
- case V4L2_CID_GAMMA:
- case V4L2_CID_SHARPNESS:
- case V4L2_CID_CHROMA_GAIN:
- case V4L2_CID_RDS_TX_DEVIATION:
- case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
- case V4L2_CID_AUDIO_LIMITER_DEVIATION:
- case V4L2_CID_AUDIO_COMPRESSION_GAIN:
- case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
- case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
- case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
- case V4L2_CID_PILOT_TONE_DEVIATION:
- case V4L2_CID_PILOT_TONE_FREQUENCY:
- case V4L2_CID_TUNE_POWER_LEVEL:
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- case V4L2_CID_RF_TUNER_RF_GAIN:
- case V4L2_CID_RF_TUNER_LNA_GAIN:
- case V4L2_CID_RF_TUNER_MIXER_GAIN:
- case V4L2_CID_RF_TUNER_IF_GAIN:
- case V4L2_CID_RF_TUNER_BANDWIDTH:
- case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
- *flags |= V4L2_CTRL_FLAG_SLIDER;
- break;
- case V4L2_CID_PAN_RELATIVE:
- case V4L2_CID_TILT_RELATIVE:
- case V4L2_CID_FOCUS_RELATIVE:
- case V4L2_CID_IRIS_RELATIVE:
- case V4L2_CID_ZOOM_RELATIVE:
- *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
- V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
- break;
- case V4L2_CID_FLASH_STROBE_STATUS:
- case V4L2_CID_AUTO_FOCUS_STATUS:
- case V4L2_CID_FLASH_READY:
- case V4L2_CID_DV_TX_HOTPLUG:
- case V4L2_CID_DV_TX_RXSENSE:
- case V4L2_CID_DV_TX_EDID_PRESENT:
- case V4L2_CID_DV_RX_POWER_PRESENT:
- case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
- case V4L2_CID_RDS_RX_PTY:
- case V4L2_CID_RDS_RX_PS_NAME:
- case V4L2_CID_RDS_RX_RADIO_TEXT:
- case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
- case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
- case V4L2_CID_RDS_RX_MUSIC_SPEECH:
- case V4L2_CID_CAMERA_ORIENTATION:
- case V4L2_CID_CAMERA_SENSOR_ROTATION:
- *flags |= V4L2_CTRL_FLAG_READ_ONLY;
- break;
- case V4L2_CID_RF_TUNER_PLL_LOCK:
- *flags |= V4L2_CTRL_FLAG_VOLATILE;
- break;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_fill);
-
-static u32 user_flags(const struct v4l2_ctrl *ctrl)
-{
- u32 flags = ctrl->flags;
-
- if (ctrl->is_ptr)
- flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
-
- return flags;
-}
-
-static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
-{
- memset(ev, 0, sizeof(*ev));
- ev->type = V4L2_EVENT_CTRL;
- ev->id = ctrl->id;
- ev->u.ctrl.changes = changes;
- ev->u.ctrl.type = ctrl->type;
- ev->u.ctrl.flags = user_flags(ctrl);
- if (ctrl->is_ptr)
- ev->u.ctrl.value64 = 0;
- else
- ev->u.ctrl.value64 = *ctrl->p_cur.p_s64;
- ev->u.ctrl.minimum = ctrl->minimum;
- ev->u.ctrl.maximum = ctrl->maximum;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU
- || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
- ev->u.ctrl.step = 1;
- else
- ev->u.ctrl.step = ctrl->step;
- ev->u.ctrl.default_value = ctrl->default_value;
-}
-
-static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
-{
- struct v4l2_event ev;
- struct v4l2_subscribed_event *sev;
-
- if (list_empty(&ctrl->ev_subs))
- return;
- fill_event(&ev, ctrl, changes);
-
- list_for_each_entry(sev, &ctrl->ev_subs, node)
- if (sev->fh != fh ||
- (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
- v4l2_event_queue_fh(sev->fh, &ev);
-}
-
-static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr1,
- union v4l2_ctrl_ptr ptr2)
-{
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_BUTTON:
- return false;
- case V4L2_CTRL_TYPE_STRING:
- idx *= ctrl->elem_size;
- /* strings are always 0-terminated */
- return !strcmp(ptr1.p_char + idx, ptr2.p_char + idx);
- case V4L2_CTRL_TYPE_INTEGER64:
- return ptr1.p_s64[idx] == ptr2.p_s64[idx];
- case V4L2_CTRL_TYPE_U8:
- return ptr1.p_u8[idx] == ptr2.p_u8[idx];
- case V4L2_CTRL_TYPE_U16:
- return ptr1.p_u16[idx] == ptr2.p_u16[idx];
- case V4L2_CTRL_TYPE_U32:
- return ptr1.p_u32[idx] == ptr2.p_u32[idx];
- default:
- if (ctrl->is_int)
- return ptr1.p_s32[idx] == ptr2.p_s32[idx];
- idx *= ctrl->elem_size;
- return !memcmp(ptr1.p_const + idx, ptr2.p_const + idx,
- ctrl->elem_size);
- }
-}
-
-static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr)
-{
- struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
- struct v4l2_ctrl_vp8_frame *p_vp8_frame;
- struct v4l2_ctrl_fwht_params *p_fwht_params;
- void *p = ptr.p + idx * ctrl->elem_size;
-
- if (ctrl->p_def.p_const)
- memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
- else
- memset(p, 0, ctrl->elem_size);
-
- /*
- * The cast is needed to get rid of a gcc warning complaining that
- * V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS is not part of the
- * v4l2_ctrl_type enum.
- */
- switch ((u32)ctrl->type) {
- case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
- p_mpeg2_slice_params = p;
- /* 4:2:0 */
- p_mpeg2_slice_params->sequence.chroma_format = 1;
- /* interlaced top field */
- p_mpeg2_slice_params->picture.picture_structure = 1;
- p_mpeg2_slice_params->picture.picture_coding_type =
- V4L2_MPEG2_PICTURE_CODING_TYPE_I;
- break;
- case V4L2_CTRL_TYPE_VP8_FRAME:
- p_vp8_frame = p;
- p_vp8_frame->num_dct_parts = 1;
- break;
- case V4L2_CTRL_TYPE_FWHT_PARAMS:
- p_fwht_params = p;
- p_fwht_params->version = V4L2_FWHT_VERSION;
- p_fwht_params->width = 1280;
- p_fwht_params->height = 720;
- p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
- (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
- break;
- }
-}
-
-static void std_init(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr)
-{
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_STRING:
- idx *= ctrl->elem_size;
- memset(ptr.p_char + idx, ' ', ctrl->minimum);
- ptr.p_char[idx + ctrl->minimum] = '\0';
- break;
- case V4L2_CTRL_TYPE_INTEGER64:
- ptr.p_s64[idx] = ctrl->default_value;
- break;
- case V4L2_CTRL_TYPE_INTEGER:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_BITMASK:
- case V4L2_CTRL_TYPE_BOOLEAN:
- ptr.p_s32[idx] = ctrl->default_value;
- break;
- case V4L2_CTRL_TYPE_BUTTON:
- case V4L2_CTRL_TYPE_CTRL_CLASS:
- ptr.p_s32[idx] = 0;
- break;
- case V4L2_CTRL_TYPE_U8:
- ptr.p_u8[idx] = ctrl->default_value;
- break;
- case V4L2_CTRL_TYPE_U16:
- ptr.p_u16[idx] = ctrl->default_value;
- break;
- case V4L2_CTRL_TYPE_U32:
- ptr.p_u32[idx] = ctrl->default_value;
- break;
- default:
- std_init_compound(ctrl, idx, ptr);
- break;
- }
-}
-
-static void std_log(const struct v4l2_ctrl *ctrl)
-{
- union v4l2_ctrl_ptr ptr = ctrl->p_cur;
-
- if (ctrl->is_array) {
- unsigned i;
-
- for (i = 0; i < ctrl->nr_of_dims; i++)
- pr_cont("[%u]", ctrl->dims[i]);
- pr_cont(" ");
- }
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER:
- pr_cont("%d", *ptr.p_s32);
- break;
- case V4L2_CTRL_TYPE_BOOLEAN:
- pr_cont("%s", *ptr.p_s32 ? "true" : "false");
- break;
- case V4L2_CTRL_TYPE_MENU:
- pr_cont("%s", ctrl->qmenu[*ptr.p_s32]);
- break;
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]);
- break;
- case V4L2_CTRL_TYPE_BITMASK:
- pr_cont("0x%08x", *ptr.p_s32);
- break;
- case V4L2_CTRL_TYPE_INTEGER64:
- pr_cont("%lld", *ptr.p_s64);
- break;
- case V4L2_CTRL_TYPE_STRING:
- pr_cont("%s", ptr.p_char);
- break;
- case V4L2_CTRL_TYPE_U8:
- pr_cont("%u", (unsigned)*ptr.p_u8);
- break;
- case V4L2_CTRL_TYPE_U16:
- pr_cont("%u", (unsigned)*ptr.p_u16);
- break;
- case V4L2_CTRL_TYPE_U32:
- pr_cont("%u", (unsigned)*ptr.p_u32);
- break;
- case V4L2_CTRL_TYPE_H264_SPS:
- pr_cont("H264_SPS");
- break;
- case V4L2_CTRL_TYPE_H264_PPS:
- pr_cont("H264_PPS");
- break;
- case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
- pr_cont("H264_SCALING_MATRIX");
- break;
- case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
- pr_cont("H264_SLICE_PARAMS");
- break;
- case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
- pr_cont("H264_DECODE_PARAMS");
- break;
- case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
- pr_cont("H264_PRED_WEIGHTS");
- break;
- case V4L2_CTRL_TYPE_FWHT_PARAMS:
- pr_cont("FWHT_PARAMS");
- break;
- case V4L2_CTRL_TYPE_VP8_FRAME:
- pr_cont("VP8_FRAME");
- break;
- case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
- pr_cont("HDR10_CLL_INFO");
- break;
- case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
- pr_cont("HDR10_MASTERING_DISPLAY");
- break;
- default:
- pr_cont("unknown type %d", ctrl->type);
- break;
- }
-}
-
-/*
- * Round towards the closest legal value. Be careful when we are
- * close to the maximum range of the control type to prevent
- * wrap-arounds.
- */
-#define ROUND_TO_RANGE(val, offset_type, ctrl) \
-({ \
- offset_type offset; \
- if ((ctrl)->maximum >= 0 && \
- val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \
- val = (ctrl)->maximum; \
- else \
- val += (s32)((ctrl)->step / 2); \
- val = clamp_t(typeof(val), val, \
- (ctrl)->minimum, (ctrl)->maximum); \
- offset = (val) - (ctrl)->minimum; \
- offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \
- val = (ctrl)->minimum + offset; \
- 0; \
-})
-
-/* Validate a new control */
-
-#define zero_padding(s) \
- memset(&(s).padding, 0, sizeof((s).padding))
-#define zero_reserved(s) \
- memset(&(s).reserved, 0, sizeof((s).reserved))
-
-/*
- * Compound controls validation requires setting unused fields/flags to zero
- * in order to properly detect unchanged controls with std_equal's memcmp.
- */
-static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr)
-{
- struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
- struct v4l2_ctrl_vp8_frame *p_vp8_frame;
- struct v4l2_ctrl_fwht_params *p_fwht_params;
- struct v4l2_ctrl_h264_sps *p_h264_sps;
- struct v4l2_ctrl_h264_pps *p_h264_pps;
- struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
- struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
- struct v4l2_ctrl_h264_decode_params *p_h264_dec_params;
- struct v4l2_ctrl_hevc_sps *p_hevc_sps;
- struct v4l2_ctrl_hevc_pps *p_hevc_pps;
- struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
- struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
- struct v4l2_area *area;
- void *p = ptr.p + idx * ctrl->elem_size;
- unsigned int i;
-
- switch ((u32)ctrl->type) {
- case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
- p_mpeg2_slice_params = p;
-
- switch (p_mpeg2_slice_params->sequence.chroma_format) {
- case 1: /* 4:2:0 */
- case 2: /* 4:2:2 */
- case 3: /* 4:4:4 */
- break;
- default:
- return -EINVAL;
- }
-
- switch (p_mpeg2_slice_params->picture.intra_dc_precision) {
- case 0: /* 8 bits */
- case 1: /* 9 bits */
- case 2: /* 10 bits */
- case 3: /* 11 bits */
- break;
- default:
- return -EINVAL;
- }
-
- switch (p_mpeg2_slice_params->picture.picture_structure) {
- case 1: /* interlaced top field */
- case 2: /* interlaced bottom field */
- case 3: /* progressive */
- break;
- default:
- return -EINVAL;
- }
-
- switch (p_mpeg2_slice_params->picture.picture_coding_type) {
- case V4L2_MPEG2_PICTURE_CODING_TYPE_I:
- case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- break;
- default:
- return -EINVAL;
- }
-
- break;
-
- case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
- break;
-
- case V4L2_CTRL_TYPE_FWHT_PARAMS:
- p_fwht_params = p;
- if (p_fwht_params->version < V4L2_FWHT_VERSION)
- return -EINVAL;
- if (!p_fwht_params->width || !p_fwht_params->height)
- return -EINVAL;
- break;
-
- case V4L2_CTRL_TYPE_H264_SPS:
- p_h264_sps = p;
-
- /* Some syntax elements are only conditionally valid */
- if (p_h264_sps->pic_order_cnt_type != 0) {
- p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0;
- } else if (p_h264_sps->pic_order_cnt_type != 1) {
- p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0;
- p_h264_sps->offset_for_non_ref_pic = 0;
- p_h264_sps->offset_for_top_to_bottom_field = 0;
- memset(&p_h264_sps->offset_for_ref_frame, 0,
- sizeof(p_h264_sps->offset_for_ref_frame));
- }
-
- if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) {
- p_h264_sps->chroma_format_idc = 1;
- p_h264_sps->bit_depth_luma_minus8 = 0;
- p_h264_sps->bit_depth_chroma_minus8 = 0;
-
- p_h264_sps->flags &=
- ~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
-
- if (p_h264_sps->chroma_format_idc < 3)
- p_h264_sps->flags &=
- ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
- }
-
- if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
- p_h264_sps->flags &=
- ~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
-
- /*
- * Chroma 4:2:2 format require at least High 4:2:2 profile.
- *
- * The H264 specification and well-known parser implementations
- * use profile-idc values directly, as that is clearer and
- * less ambiguous. We do the same here.
- */
- if (p_h264_sps->profile_idc < 122 &&
- p_h264_sps->chroma_format_idc > 1)
- return -EINVAL;
- /* Chroma 4:4:4 format require at least High 4:2:2 profile */
- if (p_h264_sps->profile_idc < 244 &&
- p_h264_sps->chroma_format_idc > 2)
- return -EINVAL;
- if (p_h264_sps->chroma_format_idc > 3)
- return -EINVAL;
-
- if (p_h264_sps->bit_depth_luma_minus8 > 6)
- return -EINVAL;
- if (p_h264_sps->bit_depth_chroma_minus8 > 6)
- return -EINVAL;
- if (p_h264_sps->log2_max_frame_num_minus4 > 12)
- return -EINVAL;
- if (p_h264_sps->pic_order_cnt_type > 2)
- return -EINVAL;
- if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12)
- return -EINVAL;
- if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN)
- return -EINVAL;
- break;
-
- case V4L2_CTRL_TYPE_H264_PPS:
- p_h264_pps = p;
-
- if (p_h264_pps->num_slice_groups_minus1 > 7)
- return -EINVAL;
- if (p_h264_pps->num_ref_idx_l0_default_active_minus1 >
- (V4L2_H264_REF_LIST_LEN - 1))
- return -EINVAL;
- if (p_h264_pps->num_ref_idx_l1_default_active_minus1 >
- (V4L2_H264_REF_LIST_LEN - 1))
- return -EINVAL;
- if (p_h264_pps->weighted_bipred_idc > 2)
- return -EINVAL;
- /*
- * pic_init_qp_minus26 shall be in the range of
- * -(26 + QpBdOffset_y) to +25, inclusive,
- * where QpBdOffset_y is 6 * bit_depth_luma_minus8
- */
- if (p_h264_pps->pic_init_qp_minus26 < -62 ||
- p_h264_pps->pic_init_qp_minus26 > 25)
- return -EINVAL;
- if (p_h264_pps->pic_init_qs_minus26 < -26 ||
- p_h264_pps->pic_init_qs_minus26 > 25)
- return -EINVAL;
- if (p_h264_pps->chroma_qp_index_offset < -12 ||
- p_h264_pps->chroma_qp_index_offset > 12)
- return -EINVAL;
- if (p_h264_pps->second_chroma_qp_index_offset < -12 ||
- p_h264_pps->second_chroma_qp_index_offset > 12)
- return -EINVAL;
- break;
-
- case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
- break;
-
- case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
- p_h264_pred_weights = p;
-
- if (p_h264_pred_weights->luma_log2_weight_denom > 7)
- return -EINVAL;
- if (p_h264_pred_weights->chroma_log2_weight_denom > 7)
- return -EINVAL;
- break;
-
- case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
- p_h264_slice_params = p;
-
- if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
- p_h264_slice_params->flags &=
- ~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED;
-
- if (p_h264_slice_params->colour_plane_id > 2)
- return -EINVAL;
- if (p_h264_slice_params->cabac_init_idc > 2)
- return -EINVAL;
- if (p_h264_slice_params->disable_deblocking_filter_idc > 2)
- return -EINVAL;
- if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 ||
- p_h264_slice_params->slice_alpha_c0_offset_div2 > 6)
- return -EINVAL;
- if (p_h264_slice_params->slice_beta_offset_div2 < -6 ||
- p_h264_slice_params->slice_beta_offset_div2 > 6)
- return -EINVAL;
-
- if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I ||
- p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI)
- p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0;
- if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
- p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0;
-
- if (p_h264_slice_params->num_ref_idx_l0_active_minus1 >
- (V4L2_H264_REF_LIST_LEN - 1))
- return -EINVAL;
- if (p_h264_slice_params->num_ref_idx_l1_active_minus1 >
- (V4L2_H264_REF_LIST_LEN - 1))
- return -EINVAL;
- zero_reserved(*p_h264_slice_params);
- break;
-
- case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
- p_h264_dec_params = p;
-
- if (p_h264_dec_params->nal_ref_idc > 3)
- return -EINVAL;
- for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
- struct v4l2_h264_dpb_entry *dpb_entry =
- &p_h264_dec_params->dpb[i];
-
- zero_reserved(*dpb_entry);
- }
- zero_reserved(*p_h264_dec_params);
- break;
-
- case V4L2_CTRL_TYPE_VP8_FRAME:
- p_vp8_frame = p;
-
- switch (p_vp8_frame->num_dct_parts) {
- case 1:
- case 2:
- case 4:
- case 8:
- break;
- default:
- return -EINVAL;
- }
- zero_padding(p_vp8_frame->segment);
- zero_padding(p_vp8_frame->lf);
- zero_padding(p_vp8_frame->quant);
- zero_padding(p_vp8_frame->entropy);
- zero_padding(p_vp8_frame->coder_state);
- break;
-
- case V4L2_CTRL_TYPE_HEVC_SPS:
- p_hevc_sps = p;
-
- if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
- p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
- p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
- p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
- p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
- }
-
- if (!(p_hevc_sps->flags &
- V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
- p_hevc_sps->num_long_term_ref_pics_sps = 0;
- break;
-
- case V4L2_CTRL_TYPE_HEVC_PPS:
- p_hevc_pps = p;
-
- if (!(p_hevc_pps->flags &
- V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
- p_hevc_pps->diff_cu_qp_delta_depth = 0;
-
- if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
- p_hevc_pps->num_tile_columns_minus1 = 0;
- p_hevc_pps->num_tile_rows_minus1 = 0;
- memset(&p_hevc_pps->column_width_minus1, 0,
- sizeof(p_hevc_pps->column_width_minus1));
- memset(&p_hevc_pps->row_height_minus1, 0,
- sizeof(p_hevc_pps->row_height_minus1));
-
- p_hevc_pps->flags &=
- ~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED;
- }
-
- if (p_hevc_pps->flags &
- V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
- p_hevc_pps->pps_beta_offset_div2 = 0;
- p_hevc_pps->pps_tc_offset_div2 = 0;
- }
-
- zero_padding(*p_hevc_pps);
- break;
-
- case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
- p_hevc_slice_params = p;
-
- if (p_hevc_slice_params->num_active_dpb_entries >
- V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
- return -EINVAL;
-
- zero_padding(p_hevc_slice_params->pred_weight_table);
-
- for (i = 0; i < p_hevc_slice_params->num_active_dpb_entries;
- i++) {
- struct v4l2_hevc_dpb_entry *dpb_entry =
- &p_hevc_slice_params->dpb[i];
-
- zero_padding(*dpb_entry);
- }
-
- zero_padding(*p_hevc_slice_params);
- break;
-
- case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
- break;
-
- case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
- p_hdr10_mastering = p;
-
- for (i = 0; i < 3; ++i) {
- if (p_hdr10_mastering->display_primaries_x[i] <
- V4L2_HDR10_MASTERING_PRIMARIES_X_LOW ||
- p_hdr10_mastering->display_primaries_x[i] >
- V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH ||
- p_hdr10_mastering->display_primaries_y[i] <
- V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW ||
- p_hdr10_mastering->display_primaries_y[i] >
- V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH)
- return -EINVAL;
- }
-
- if (p_hdr10_mastering->white_point_x <
- V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW ||
- p_hdr10_mastering->white_point_x >
- V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH ||
- p_hdr10_mastering->white_point_y <
- V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW ||
- p_hdr10_mastering->white_point_y >
- V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH)
- return -EINVAL;
-
- if (p_hdr10_mastering->max_display_mastering_luminance <
- V4L2_HDR10_MASTERING_MAX_LUMA_LOW ||
- p_hdr10_mastering->max_display_mastering_luminance >
- V4L2_HDR10_MASTERING_MAX_LUMA_HIGH ||
- p_hdr10_mastering->min_display_mastering_luminance <
- V4L2_HDR10_MASTERING_MIN_LUMA_LOW ||
- p_hdr10_mastering->min_display_mastering_luminance >
- V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
- return -EINVAL;
-
- /* The following restriction comes from ITU-T Rec. H.265 spec */
- if (p_hdr10_mastering->max_display_mastering_luminance ==
- V4L2_HDR10_MASTERING_MAX_LUMA_LOW &&
- p_hdr10_mastering->min_display_mastering_luminance ==
- V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
- return -EINVAL;
-
- break;
-
- case V4L2_CTRL_TYPE_AREA:
- area = p;
- if (!area->width || !area->height)
- return -EINVAL;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr)
-{
- size_t len;
- u64 offset;
- s64 val;
-
- switch ((u32)ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER:
- return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
- case V4L2_CTRL_TYPE_INTEGER64:
- /*
- * We can't use the ROUND_TO_RANGE define here due to
- * the u64 divide that needs special care.
- */
- val = ptr.p_s64[idx];
- if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2))
- val = ctrl->maximum;
- else
- val += (s64)(ctrl->step / 2);
- val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum);
- offset = val - ctrl->minimum;
- do_div(offset, ctrl->step);
- ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step;
- return 0;
- case V4L2_CTRL_TYPE_U8:
- return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl);
- case V4L2_CTRL_TYPE_U16:
- return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl);
- case V4L2_CTRL_TYPE_U32:
- return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl);
-
- case V4L2_CTRL_TYPE_BOOLEAN:
- ptr.p_s32[idx] = !!ptr.p_s32[idx];
- return 0;
-
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
- return -ERANGE;
- if (ptr.p_s32[idx] < BITS_PER_LONG_LONG &&
- (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx])))
- return -EINVAL;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
- ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
- return -EINVAL;
- return 0;
-
- case V4L2_CTRL_TYPE_BITMASK:
- ptr.p_s32[idx] &= ctrl->maximum;
- return 0;
-
- case V4L2_CTRL_TYPE_BUTTON:
- case V4L2_CTRL_TYPE_CTRL_CLASS:
- ptr.p_s32[idx] = 0;
- return 0;
-
- case V4L2_CTRL_TYPE_STRING:
- idx *= ctrl->elem_size;
- len = strlen(ptr.p_char + idx);
- if (len < ctrl->minimum)
- return -ERANGE;
- if ((len - (u32)ctrl->minimum) % (u32)ctrl->step)
- return -ERANGE;
- return 0;
-
- default:
- return std_validate_compound(ctrl, idx, ptr);
- }
-}
-
-static const struct v4l2_ctrl_type_ops std_type_ops = {
- .equal = std_equal,
- .init = std_init,
- .log = std_log,
- .validate = std_validate,
-};
-
-/* Helper function: copy the given control value back to the caller */
-static int ptr_to_user(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl,
- union v4l2_ctrl_ptr ptr)
-{
- u32 len;
-
- if (ctrl->is_ptr && !ctrl->is_string)
- return copy_to_user(c->ptr, ptr.p_const, c->size) ?
- -EFAULT : 0;
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_STRING:
- len = strlen(ptr.p_char);
- if (c->size < len + 1) {
- c->size = ctrl->elem_size;
- return -ENOSPC;
- }
- return copy_to_user(c->string, ptr.p_char, len + 1) ?
- -EFAULT : 0;
- case V4L2_CTRL_TYPE_INTEGER64:
- c->value64 = *ptr.p_s64;
- break;
- default:
- c->value = *ptr.p_s32;
- break;
- }
- return 0;
-}
-
-/* Helper function: copy the current control value back to the caller */
-static int cur_to_user(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl)
-{
- return ptr_to_user(c, ctrl, ctrl->p_cur);
-}
-
-/* Helper function: copy the new control value back to the caller */
-static int new_to_user(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl)
-{
- return ptr_to_user(c, ctrl, ctrl->p_new);
-}
-
-/* Helper function: copy the request value back to the caller */
-static int req_to_user(struct v4l2_ext_control *c,
- struct v4l2_ctrl_ref *ref)
-{
- return ptr_to_user(c, ref->ctrl, ref->p_req);
-}
-
-/* Helper function: copy the initial control value back to the caller */
-static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
-{
- int idx;
-
- for (idx = 0; idx < ctrl->elems; idx++)
- ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
-
- return ptr_to_user(c, ctrl, ctrl->p_new);
-}
-
-/* Helper function: copy the caller-provider value to the given control value */
-static int user_to_ptr(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl,
- union v4l2_ctrl_ptr ptr)
-{
- int ret;
- u32 size;
-
- ctrl->is_new = 1;
- if (ctrl->is_ptr && !ctrl->is_string) {
- unsigned idx;
-
- ret = copy_from_user(ptr.p, c->ptr, c->size) ? -EFAULT : 0;
- if (ret || !ctrl->is_array)
- return ret;
- for (idx = c->size / ctrl->elem_size; idx < ctrl->elems; idx++)
- ctrl->type_ops->init(ctrl, idx, ptr);
- return 0;
- }
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER64:
- *ptr.p_s64 = c->value64;
- break;
- case V4L2_CTRL_TYPE_STRING:
- size = c->size;
- if (size == 0)
- return -ERANGE;
- if (size > ctrl->maximum + 1)
- size = ctrl->maximum + 1;
- ret = copy_from_user(ptr.p_char, c->string, size) ? -EFAULT : 0;
- if (!ret) {
- char last = ptr.p_char[size - 1];
-
- ptr.p_char[size - 1] = 0;
- /* If the string was longer than ctrl->maximum,
- then return an error. */
- if (strlen(ptr.p_char) == ctrl->maximum && last)
- return -ERANGE;
- }
- return ret;
- default:
- *ptr.p_s32 = c->value;
- break;
- }
- return 0;
-}
-
-/* Helper function: copy the caller-provider value as the new control value */
-static int user_to_new(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl)
-{
- return user_to_ptr(c, ctrl, ctrl->p_new);
-}
-
-/* Copy the one value to another. */
-static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
- union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to)
-{
- if (ctrl == NULL)
- return;
- memcpy(to.p, from.p_const, ctrl->elems * ctrl->elem_size);
-}
-
-/* Copy the new value to the current value. */
-static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
-{
- bool changed;
-
- if (ctrl == NULL)
- return;
-
- /* has_changed is set by cluster_changed */
- changed = ctrl->has_changed;
- if (changed)
- ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur);
-
- if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
- /* Note: CH_FLAGS is only set for auto clusters. */
- ctrl->flags &=
- ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
- if (!is_cur_manual(ctrl->cluster[0])) {
- ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
- if (ctrl->cluster[0]->has_volatiles)
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
- }
- fh = NULL;
- }
- if (changed || ch_flags) {
- /* If a control was changed that was not one of the controls
- modified by the application, then send the event to all. */
- if (!ctrl->is_new)
- fh = NULL;
- send_event(fh, ctrl,
- (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
- if (ctrl->call_notify && changed && ctrl->handler->notify)
- ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
- }
-}
-
-/* Copy the current value to the new value */
-static void cur_to_new(struct v4l2_ctrl *ctrl)
-{
- if (ctrl == NULL)
- return;
- ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new);
-}
-
-/* Copy the new value to the request value */
-static void new_to_req(struct v4l2_ctrl_ref *ref)
-{
- if (!ref)
- return;
- ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
- ref->valid_p_req = true;
-}
-
-/* Copy the current value to the request value */
-static void cur_to_req(struct v4l2_ctrl_ref *ref)
-{
- if (!ref)
- return;
- ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
- ref->valid_p_req = true;
-}
-
-/* Copy the request value to the new value */
-static void req_to_new(struct v4l2_ctrl_ref *ref)
-{
- if (!ref)
- return;
- if (ref->valid_p_req)
- ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
- else
- ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
-}
-
-/* Return non-zero if one or more of the controls in the cluster has a new
- value that differs from the current value. */
-static int cluster_changed(struct v4l2_ctrl *master)
-{
- bool changed = false;
- unsigned idx;
- int i;
-
- for (i = 0; i < master->ncontrols; i++) {
- struct v4l2_ctrl *ctrl = master->cluster[i];
- bool ctrl_changed = false;
-
- if (ctrl == NULL)
- continue;
-
- if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE)
- changed = ctrl_changed = true;
-
- /*
- * Set has_changed to false to avoid generating
- * the event V4L2_EVENT_CTRL_CH_VALUE
- */
- if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
- ctrl->has_changed = false;
- continue;
- }
-
- for (idx = 0; !ctrl_changed && idx < ctrl->elems; idx++)
- ctrl_changed = !ctrl->type_ops->equal(ctrl, idx,
- ctrl->p_cur, ctrl->p_new);
- ctrl->has_changed = ctrl_changed;
- changed |= ctrl->has_changed;
- }
- return changed;
-}
-
-/* Control range checking */
-static int check_range(enum v4l2_ctrl_type type,
- s64 min, s64 max, u64 step, s64 def)
-{
- switch (type) {
- case V4L2_CTRL_TYPE_BOOLEAN:
- if (step != 1 || max > 1 || min < 0)
- return -ERANGE;
- fallthrough;
- case V4L2_CTRL_TYPE_U8:
- case V4L2_CTRL_TYPE_U16:
- case V4L2_CTRL_TYPE_U32:
- case V4L2_CTRL_TYPE_INTEGER:
- case V4L2_CTRL_TYPE_INTEGER64:
- if (step == 0 || min > max || def < min || def > max)
- return -ERANGE;
- return 0;
- case V4L2_CTRL_TYPE_BITMASK:
- if (step || min || !max || (def & ~max))
- return -ERANGE;
- return 0;
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- if (min > max || def < min || def > max)
- return -ERANGE;
- /* Note: step == menu_skip_mask for menu controls.
- So here we check if the default value is masked out. */
- if (step && ((1 << def) & step))
- return -EINVAL;
- return 0;
- case V4L2_CTRL_TYPE_STRING:
- if (min > max || min < 0 || step < 1 || def)
- return -ERANGE;
- return 0;
- default:
- return 0;
- }
-}
-
-/* Validate a new control */
-static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
-{
- unsigned idx;
- int err = 0;
-
- for (idx = 0; !err && idx < ctrl->elems; idx++)
- err = ctrl->type_ops->validate(ctrl, idx, p_new);
- return err;
-}
-
-static inline u32 node2id(struct list_head *node)
-{
- return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
-}
-
-/* Set the handler's error code if it wasn't set earlier already */
-static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
-{
- if (hdl->error == 0)
- hdl->error = err;
- return err;
-}
-
-/* Initialize the handler */
-int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
- unsigned nr_of_controls_hint,
- struct lock_class_key *key, const char *name)
-{
- mutex_init(&hdl->_lock);
- hdl->lock = &hdl->_lock;
- lockdep_set_class_and_name(hdl->lock, key, name);
- INIT_LIST_HEAD(&hdl->ctrls);
- INIT_LIST_HEAD(&hdl->ctrl_refs);
- INIT_LIST_HEAD(&hdl->requests);
- INIT_LIST_HEAD(&hdl->requests_queued);
- hdl->request_is_queued = false;
- hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
- hdl->buckets = kvmalloc_array(hdl->nr_of_buckets,
- sizeof(hdl->buckets[0]),
- GFP_KERNEL | __GFP_ZERO);
- hdl->error = hdl->buckets ? 0 : -ENOMEM;
- media_request_object_init(&hdl->req_obj);
- return hdl->error;
-}
-EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
-
-/* Free all controls and control refs */
-void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
-{
- struct v4l2_ctrl_ref *ref, *next_ref;
- struct v4l2_ctrl *ctrl, *next_ctrl;
- struct v4l2_subscribed_event *sev, *next_sev;
-
- if (hdl == NULL || hdl->buckets == NULL)
- return;
-
- /*
- * If the main handler is freed and it is used by handler objects in
- * outstanding requests, then unbind and put those objects before
- * freeing the main handler.
- *
- * The main handler can be identified by having a NULL ops pointer in
- * the request object.
- */
- if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) {
- struct v4l2_ctrl_handler *req, *next_req;
-
- list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
- media_request_object_unbind(&req->req_obj);
- media_request_object_put(&req->req_obj);
- }
- }
- mutex_lock(hdl->lock);
- /* Free all nodes */
- list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
- list_del(&ref->node);
- kfree(ref);
- }
- /* Free all controls owned by the handler */
- list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
- list_del(&ctrl->node);
- list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
- list_del(&sev->node);
- kvfree(ctrl);
- }
- kvfree(hdl->buckets);
- hdl->buckets = NULL;
- hdl->cached = NULL;
- hdl->error = 0;
- mutex_unlock(hdl->lock);
- mutex_destroy(&hdl->_lock);
-}
-EXPORT_SYMBOL(v4l2_ctrl_handler_free);
-
-/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
- be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
- with applications that do not use the NEXT_CTRL flag.
-
- We just find the n-th private user control. It's O(N), but that should not
- be an issue in this particular case. */
-static struct v4l2_ctrl_ref *find_private_ref(
- struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref;
-
- id -= V4L2_CID_PRIVATE_BASE;
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- /* Search for private user controls that are compatible with
- VIDIOC_G/S_CTRL. */
- if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
- V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
- if (!ref->ctrl->is_int)
- continue;
- if (id == 0)
- return ref;
- id--;
- }
- }
- return NULL;
-}
-
-/* Find a control with the given ID. */
-static struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref;
- int bucket;
-
- id &= V4L2_CTRL_ID_MASK;
-
- /* Old-style private controls need special handling */
- if (id >= V4L2_CID_PRIVATE_BASE)
- return find_private_ref(hdl, id);
- bucket = id % hdl->nr_of_buckets;
-
- /* Simple optimization: cache the last control found */
- if (hdl->cached && hdl->cached->ctrl->id == id)
- return hdl->cached;
-
- /* Not in cache, search the hash */
- ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
- while (ref && ref->ctrl->id != id)
- ref = ref->next;
-
- if (ref)
- hdl->cached = ref; /* cache it! */
- return ref;
-}
-
-/* Find a control with the given ID. Take the handler's lock first. */
-static struct v4l2_ctrl_ref *find_ref_lock(
- struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref = NULL;
-
- if (hdl) {
- mutex_lock(hdl->lock);
- ref = find_ref(hdl, id);
- mutex_unlock(hdl->lock);
- }
- return ref;
-}
-
-/* Find a control with the given ID. */
-struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
-
- return ref ? ref->ctrl : NULL;
-}
-EXPORT_SYMBOL(v4l2_ctrl_find);
-
-/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
-static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl *ctrl,
- struct v4l2_ctrl_ref **ctrl_ref,
- bool from_other_dev, bool allocate_req)
-{
- struct v4l2_ctrl_ref *ref;
- struct v4l2_ctrl_ref *new_ref;
- u32 id = ctrl->id;
- u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1;
- int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
- unsigned int size_extra_req = 0;
-
- if (ctrl_ref)
- *ctrl_ref = NULL;
-
- /*
- * Automatically add the control class if it is not yet present and
- * the new control is not a compound control.
- */
- if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES &&
- id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
- if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
- return hdl->error;
-
- if (hdl->error)
- return hdl->error;
-
- if (allocate_req)
- size_extra_req = ctrl->elems * ctrl->elem_size;
- new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
- if (!new_ref)
- return handler_set_err(hdl, -ENOMEM);
- new_ref->ctrl = ctrl;
- new_ref->from_other_dev = from_other_dev;
- if (size_extra_req)
- new_ref->p_req.p = &new_ref[1];
-
- INIT_LIST_HEAD(&new_ref->node);
-
- mutex_lock(hdl->lock);
-
- /* Add immediately at the end of the list if the list is empty, or if
- the last element in the list has a lower ID.
- This ensures that when elements are added in ascending order the
- insertion is an O(1) operation. */
- if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
- list_add_tail(&new_ref->node, &hdl->ctrl_refs);
- goto insert_in_hash;
- }
-
- /* Find insert position in sorted list */
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- if (ref->ctrl->id < id)
- continue;
- /* Don't add duplicates */
- if (ref->ctrl->id == id) {
- kfree(new_ref);
- goto unlock;
- }
- list_add(&new_ref->node, ref->node.prev);
- break;
- }
-
-insert_in_hash:
- /* Insert the control node in the hash */
- new_ref->next = hdl->buckets[bucket];
- hdl->buckets[bucket] = new_ref;
- if (ctrl_ref)
- *ctrl_ref = new_ref;
- if (ctrl->handler == hdl) {
- /* By default each control starts in a cluster of its own.
- * new_ref->ctrl is basically a cluster array with one
- * element, so that's perfect to use as the cluster pointer.
- * But only do this for the handler that owns the control.
- */
- ctrl->cluster = &new_ref->ctrl;
- ctrl->ncontrols = 1;
- }
-
-unlock:
- mutex_unlock(hdl->lock);
- return 0;
-}
-
-/* Add a new control */
-static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops,
- const struct v4l2_ctrl_type_ops *type_ops,
- u32 id, const char *name, enum v4l2_ctrl_type type,
- s64 min, s64 max, u64 step, s64 def,
- const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
- u32 flags, const char * const *qmenu,
- const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
- void *priv)
-{
- struct v4l2_ctrl *ctrl;
- unsigned sz_extra;
- unsigned nr_of_dims = 0;
- unsigned elems = 1;
- bool is_array;
- unsigned tot_ctrl_size;
- unsigned idx;
- void *data;
- int err;
-
- if (hdl->error)
- return NULL;
-
- while (dims && dims[nr_of_dims]) {
- elems *= dims[nr_of_dims];
- nr_of_dims++;
- if (nr_of_dims == V4L2_CTRL_MAX_DIMS)
- break;
- }
- is_array = nr_of_dims > 0;
-
- /* Prefill elem_size for all types handled by std_type_ops */
- switch ((u32)type) {
- case V4L2_CTRL_TYPE_INTEGER64:
- elem_size = sizeof(s64);
- break;
- case V4L2_CTRL_TYPE_STRING:
- elem_size = max + 1;
- break;
- case V4L2_CTRL_TYPE_U8:
- elem_size = sizeof(u8);
- break;
- case V4L2_CTRL_TYPE_U16:
- elem_size = sizeof(u16);
- break;
- case V4L2_CTRL_TYPE_U32:
- elem_size = sizeof(u32);
- break;
- case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
- elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params);
- break;
- case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
- elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization);
- break;
- case V4L2_CTRL_TYPE_FWHT_PARAMS:
- elem_size = sizeof(struct v4l2_ctrl_fwht_params);
- break;
- case V4L2_CTRL_TYPE_H264_SPS:
- elem_size = sizeof(struct v4l2_ctrl_h264_sps);
- break;
- case V4L2_CTRL_TYPE_H264_PPS:
- elem_size = sizeof(struct v4l2_ctrl_h264_pps);
- break;
- case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
- elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix);
- break;
- case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
- elem_size = sizeof(struct v4l2_ctrl_h264_slice_params);
- break;
- case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
- elem_size = sizeof(struct v4l2_ctrl_h264_decode_params);
- break;
- case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
- elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights);
- break;
- case V4L2_CTRL_TYPE_VP8_FRAME:
- elem_size = sizeof(struct v4l2_ctrl_vp8_frame);
- break;
- case V4L2_CTRL_TYPE_HEVC_SPS:
- elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
- break;
- case V4L2_CTRL_TYPE_HEVC_PPS:
- elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
- break;
- case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
- elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
- break;
- case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
- elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info);
- break;
- case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
- elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display);
- break;
- case V4L2_CTRL_TYPE_AREA:
- elem_size = sizeof(struct v4l2_area);
- break;
- default:
- if (type < V4L2_CTRL_COMPOUND_TYPES)
- elem_size = sizeof(s32);
- break;
- }
- tot_ctrl_size = elem_size * elems;
-
- /* Sanity checks */
- if (id == 0 || name == NULL || !elem_size ||
- id >= V4L2_CID_PRIVATE_BASE ||
- (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
- (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
- err = check_range(type, min, max, step, def);
- if (err) {
- handler_set_err(hdl, err);
- return NULL;
- }
- if (is_array &&
- (type == V4L2_CTRL_TYPE_BUTTON ||
- type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
-
- sz_extra = 0;
- if (type == V4L2_CTRL_TYPE_BUTTON)
- flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
- V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
- else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
- flags |= V4L2_CTRL_FLAG_READ_ONLY;
- else if (type == V4L2_CTRL_TYPE_INTEGER64 ||
- type == V4L2_CTRL_TYPE_STRING ||
- type >= V4L2_CTRL_COMPOUND_TYPES ||
- is_array)
- sz_extra += 2 * tot_ctrl_size;
-
- if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
- sz_extra += elem_size;
-
- ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
- if (ctrl == NULL) {
- handler_set_err(hdl, -ENOMEM);
- return NULL;
- }
-
- INIT_LIST_HEAD(&ctrl->node);
- INIT_LIST_HEAD(&ctrl->ev_subs);
- ctrl->handler = hdl;
- ctrl->ops = ops;
- ctrl->type_ops = type_ops ? type_ops : &std_type_ops;
- ctrl->id = id;
- ctrl->name = name;
- ctrl->type = type;
- ctrl->flags = flags;
- ctrl->minimum = min;
- ctrl->maximum = max;
- ctrl->step = step;
- ctrl->default_value = def;
- ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING;
- ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string;
- ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64;
- ctrl->is_array = is_array;
- ctrl->elems = elems;
- ctrl->nr_of_dims = nr_of_dims;
- if (nr_of_dims)
- memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0]));
- ctrl->elem_size = elem_size;
- if (type == V4L2_CTRL_TYPE_MENU)
- ctrl->qmenu = qmenu;
- else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
- ctrl->qmenu_int = qmenu_int;
- ctrl->priv = priv;
- ctrl->cur.val = ctrl->val = def;
- data = &ctrl[1];
-
- if (!ctrl->is_int) {
- ctrl->p_new.p = data;
- ctrl->p_cur.p = data + tot_ctrl_size;
- } else {
- ctrl->p_new.p = &ctrl->val;
- ctrl->p_cur.p = &ctrl->cur.val;
- }
-
- if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
- ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
- memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
- }
-
- for (idx = 0; idx < elems; idx++) {
- ctrl->type_ops->init(ctrl, idx, ctrl->p_cur);
- ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
- }
-
- if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
- kvfree(ctrl);
- return NULL;
- }
- mutex_lock(hdl->lock);
- list_add_tail(&ctrl->node, &hdl->ctrls);
- mutex_unlock(hdl->lock);
- return ctrl;
-}
-
-struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_config *cfg, void *priv)
-{
- bool is_menu;
- struct v4l2_ctrl *ctrl;
- const char *name = cfg->name;
- const char * const *qmenu = cfg->qmenu;
- const s64 *qmenu_int = cfg->qmenu_int;
- enum v4l2_ctrl_type type = cfg->type;
- u32 flags = cfg->flags;
- s64 min = cfg->min;
- s64 max = cfg->max;
- u64 step = cfg->step;
- s64 def = cfg->def;
-
- if (name == NULL)
- v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
- &def, &flags);
-
- is_menu = (type == V4L2_CTRL_TYPE_MENU ||
- type == V4L2_CTRL_TYPE_INTEGER_MENU);
- if (is_menu)
- WARN_ON(step);
- else
- WARN_ON(cfg->menu_skip_mask);
- if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
- qmenu = v4l2_ctrl_get_menu(cfg->id);
- } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
-
- ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name,
- type, min, max,
- is_menu ? cfg->menu_skip_mask : step, def,
- cfg->dims, cfg->elem_size,
- flags, qmenu, qmenu_int, cfg->p_def, priv);
- if (ctrl)
- ctrl->is_private = cfg->is_private;
- return ctrl;
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_custom);
-
-/* Helper function for standard non-menu controls */
-struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops,
- u32 id, s64 min, s64 max, u64 step, s64 def)
-{
- const char *name;
- enum v4l2_ctrl_type type;
- u32 flags;
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type == V4L2_CTRL_TYPE_MENU ||
- type == V4L2_CTRL_TYPE_INTEGER_MENU ||
- type >= V4L2_CTRL_COMPOUND_TYPES) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
- min, max, step, def, NULL, 0,
- flags, NULL, NULL, ptr_null, NULL);
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_std);
-
-/* Helper function for standard menu controls */
-struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops,
- u32 id, u8 _max, u64 mask, u8 _def)
-{
- const char * const *qmenu = NULL;
- const s64 *qmenu_int = NULL;
- unsigned int qmenu_int_len = 0;
- const char *name;
- enum v4l2_ctrl_type type;
- s64 min;
- s64 max = _max;
- s64 def = _def;
- u64 step;
- u32 flags;
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
-
- if (type == V4L2_CTRL_TYPE_MENU)
- qmenu = v4l2_ctrl_get_menu(id);
- else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
- qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
-
- if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
- 0, max, mask, def, NULL, 0,
- flags, qmenu, qmenu_int, ptr_null, NULL);
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
-
-/* Helper function for standard menu controls with driver defined menu */
-struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops, u32 id, u8 _max,
- u64 mask, u8 _def, const char * const *qmenu)
-{
- enum v4l2_ctrl_type type;
- const char *name;
- u32 flags;
- u64 step;
- s64 min;
- s64 max = _max;
- s64 def = _def;
-
- /* v4l2_ctrl_new_std_menu_items() should only be called for
- * standard controls without a standard menu.
- */
- if (v4l2_ctrl_get_menu(id)) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
- 0, max, mask, def, NULL, 0,
- flags, qmenu, NULL, ptr_null, NULL);
-
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
-
-/* Helper function for standard compound controls */
-struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops, u32 id,
- const union v4l2_ctrl_ptr p_def)
-{
- const char *name;
- enum v4l2_ctrl_type type;
- u32 flags;
- s64 min, max, step, def;
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type < V4L2_CTRL_COMPOUND_TYPES) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
- min, max, step, def, NULL, 0,
- flags, NULL, NULL, p_def, NULL);
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
-
-/* Helper function for standard integer menu controls */
-struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops,
- u32 id, u8 _max, u8 _def, const s64 *qmenu_int)
-{
- const char *name;
- enum v4l2_ctrl_type type;
- s64 min;
- u64 step;
- s64 max = _max;
- s64 def = _def;
- u32 flags;
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
- 0, max, 0, def, NULL, 0,
- flags, NULL, qmenu_int, ptr_null, NULL);
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
-
-/* Add the controls from another handler to our own. */
-int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl_handler *add,
- bool (*filter)(const struct v4l2_ctrl *ctrl),
- bool from_other_dev)
-{
- struct v4l2_ctrl_ref *ref;
- int ret = 0;
-
- /* Do nothing if either handler is NULL or if they are the same */
- if (!hdl || !add || hdl == add)
- return 0;
- if (hdl->error)
- return hdl->error;
- mutex_lock(add->lock);
- list_for_each_entry(ref, &add->ctrl_refs, node) {
- struct v4l2_ctrl *ctrl = ref->ctrl;
-
- /* Skip handler-private controls. */
- if (ctrl->is_private)
- continue;
- /* And control classes */
- if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
- continue;
- /* Filter any unwanted controls */
- if (filter && !filter(ctrl))
- continue;
- ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false);
- if (ret)
- break;
- }
- mutex_unlock(add->lock);
- return ret;
-}
-EXPORT_SYMBOL(v4l2_ctrl_add_handler);
-
-bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
-{
- if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
- return true;
- if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
- return true;
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_LOUDNESS:
- return true;
- default:
- break;
- }
- return false;
-}
-EXPORT_SYMBOL(v4l2_ctrl_radio_filter);
-
-/* Cluster controls */
-void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
-{
- bool has_volatiles = false;
- int i;
-
- /* The first control is the master control and it must not be NULL */
- if (WARN_ON(ncontrols == 0 || controls[0] == NULL))
- return;
-
- for (i = 0; i < ncontrols; i++) {
- if (controls[i]) {
- controls[i]->cluster = controls;
- controls[i]->ncontrols = ncontrols;
- if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
- has_volatiles = true;
- }
- }
- controls[0]->has_volatiles = has_volatiles;
-}
-EXPORT_SYMBOL(v4l2_ctrl_cluster);
-
-void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
- u8 manual_val, bool set_volatile)
-{
- struct v4l2_ctrl *master = controls[0];
- u32 flag = 0;
- int i;
-
- v4l2_ctrl_cluster(ncontrols, controls);
- WARN_ON(ncontrols <= 1);
- WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
- WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
- master->is_auto = true;
- master->has_volatiles = set_volatile;
- master->manual_mode_value = manual_val;
- master->flags |= V4L2_CTRL_FLAG_UPDATE;
-
- if (!is_cur_manual(master))
- flag = V4L2_CTRL_FLAG_INACTIVE |
- (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
-
- for (i = 1; i < ncontrols; i++)
- if (controls[i])
- controls[i]->flags |= flag;
-}
-EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
-
-/* Activate/deactivate a control. */
-void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
-{
- /* invert since the actual flag is called 'inactive' */
- bool inactive = !active;
- bool old;
-
- if (ctrl == NULL)
- return;
-
- if (inactive)
- /* set V4L2_CTRL_FLAG_INACTIVE */
- old = test_and_set_bit(4, &ctrl->flags);
- else
- /* clear V4L2_CTRL_FLAG_INACTIVE */
- old = test_and_clear_bit(4, &ctrl->flags);
- if (old != inactive)
- send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
-}
-EXPORT_SYMBOL(v4l2_ctrl_activate);
-
-void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
-{
- bool old;
-
- if (ctrl == NULL)
- return;
-
- lockdep_assert_held(ctrl->handler->lock);
-
- if (grabbed)
- /* set V4L2_CTRL_FLAG_GRABBED */
- old = test_and_set_bit(1, &ctrl->flags);
- else
- /* clear V4L2_CTRL_FLAG_GRABBED */
- old = test_and_clear_bit(1, &ctrl->flags);
- if (old != grabbed)
- send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
-}
-EXPORT_SYMBOL(__v4l2_ctrl_grab);
-
-/* Log the control name and value */
-static void log_ctrl(const struct v4l2_ctrl *ctrl,
- const char *prefix, const char *colon)
-{
- if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
- return;
- if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
- return;
-
- pr_info("%s%s%s: ", prefix, colon, ctrl->name);
-
- ctrl->type_ops->log(ctrl);
-
- if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
- V4L2_CTRL_FLAG_GRABBED |
- V4L2_CTRL_FLAG_VOLATILE)) {
- if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
- pr_cont(" inactive");
- if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
- pr_cont(" grabbed");
- if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
- pr_cont(" volatile");
- }
- pr_cont("\n");
-}
-
-/* Log all controls owned by the handler */
-void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
- const char *prefix)
-{
- struct v4l2_ctrl *ctrl;
- const char *colon = "";
- int len;
-
- if (hdl == NULL)
- return;
- if (prefix == NULL)
- prefix = "";
- len = strlen(prefix);
- if (len && prefix[len - 1] != ' ')
- colon = ": ";
- mutex_lock(hdl->lock);
- list_for_each_entry(ctrl, &hdl->ctrls, node)
- if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
- log_ctrl(ctrl, prefix, colon);
- mutex_unlock(hdl->lock);
-}
-EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
-
-int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
-{
- v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
-
-/* Call s_ctrl for all controls owned by the handler */
-int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
-{
- struct v4l2_ctrl *ctrl;
- int ret = 0;
-
- if (hdl == NULL)
- return 0;
-
- lockdep_assert_held(hdl->lock);
-
- list_for_each_entry(ctrl, &hdl->ctrls, node)
- ctrl->done = false;
-
- list_for_each_entry(ctrl, &hdl->ctrls, node) {
- struct v4l2_ctrl *master = ctrl->cluster[0];
- int i;
-
- /* Skip if this control was already handled by a cluster. */
- /* Skip button controls and read-only controls. */
- if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
- (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
- continue;
-
- for (i = 0; i < master->ncontrols; i++) {
- if (master->cluster[i]) {
- cur_to_new(master->cluster[i]);
- master->cluster[i]->is_new = 1;
- master->cluster[i]->done = true;
- }
- }
- ret = call_op(master, s_ctrl);
- if (ret)
- break;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup);
-
-int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
-{
- int ret;
-
- if (hdl == NULL)
- return 0;
-
- mutex_lock(hdl->lock);
- ret = __v4l2_ctrl_handler_setup(hdl);
- mutex_unlock(hdl->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
-
-/* Implement VIDIOC_QUERY_EXT_CTRL */
-int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc)
-{
- const unsigned next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND;
- u32 id = qc->id & V4L2_CTRL_ID_MASK;
- struct v4l2_ctrl_ref *ref;
- struct v4l2_ctrl *ctrl;
-
- if (hdl == NULL)
- return -EINVAL;
-
- mutex_lock(hdl->lock);
-
- /* Try to find it */
- ref = find_ref(hdl, id);
-
- if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) {
- bool is_compound;
- /* Match any control that is not hidden */
- unsigned mask = 1;
- bool match = false;
-
- if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) {
- /* Match any hidden control */
- match = true;
- } else if ((qc->id & next_flags) == next_flags) {
- /* Match any control, compound or not */
- mask = 0;
- }
-
- /* Find the next control with ID > qc->id */
-
- /* Did we reach the end of the control list? */
- if (id >= node2id(hdl->ctrl_refs.prev)) {
- ref = NULL; /* Yes, so there is no next control */
- } else if (ref) {
- /* We found a control with the given ID, so just get
- the next valid one in the list. */
- list_for_each_entry_continue(ref, &hdl->ctrl_refs, node) {
- is_compound = ref->ctrl->is_array ||
- ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
- if (id < ref->ctrl->id &&
- (is_compound & mask) == match)
- break;
- }
- if (&ref->node == &hdl->ctrl_refs)
- ref = NULL;
- } else {
- /* No control with the given ID exists, so start
- searching for the next largest ID. We know there
- is one, otherwise the first 'if' above would have
- been true. */
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- is_compound = ref->ctrl->is_array ||
- ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
- if (id < ref->ctrl->id &&
- (is_compound & mask) == match)
- break;
- }
- if (&ref->node == &hdl->ctrl_refs)
- ref = NULL;
- }
- }
- mutex_unlock(hdl->lock);
-
- if (!ref)
- return -EINVAL;
-
- ctrl = ref->ctrl;
- memset(qc, 0, sizeof(*qc));
- if (id >= V4L2_CID_PRIVATE_BASE)
- qc->id = id;
- else
- qc->id = ctrl->id;
- strscpy(qc->name, ctrl->name, sizeof(qc->name));
- qc->flags = user_flags(ctrl);
- qc->type = ctrl->type;
- qc->elem_size = ctrl->elem_size;
- qc->elems = ctrl->elems;
- qc->nr_of_dims = ctrl->nr_of_dims;
- memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0]));
- qc->minimum = ctrl->minimum;
- qc->maximum = ctrl->maximum;
- qc->default_value = ctrl->default_value;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU
- || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
- qc->step = 1;
- else
- qc->step = ctrl->step;
- return 0;
-}
-EXPORT_SYMBOL(v4l2_query_ext_ctrl);
-
-/* Implement VIDIOC_QUERYCTRL */
-int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
-{
- struct v4l2_query_ext_ctrl qec = { qc->id };
- int rc;
-
- rc = v4l2_query_ext_ctrl(hdl, &qec);
- if (rc)
- return rc;
-
- qc->id = qec.id;
- qc->type = qec.type;
- qc->flags = qec.flags;
- strscpy(qc->name, qec.name, sizeof(qc->name));
- switch (qc->type) {
- case V4L2_CTRL_TYPE_INTEGER:
- case V4L2_CTRL_TYPE_BOOLEAN:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- case V4L2_CTRL_TYPE_STRING:
- case V4L2_CTRL_TYPE_BITMASK:
- qc->minimum = qec.minimum;
- qc->maximum = qec.maximum;
- qc->step = qec.step;
- qc->default_value = qec.default_value;
- break;
- default:
- qc->minimum = 0;
- qc->maximum = 0;
- qc->step = 0;
- qc->default_value = 0;
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(v4l2_queryctrl);
-
-/* Implement VIDIOC_QUERYMENU */
-int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
-{
- struct v4l2_ctrl *ctrl;
- u32 i = qm->index;
-
- ctrl = v4l2_ctrl_find(hdl, qm->id);
- if (!ctrl)
- return -EINVAL;
-
- qm->reserved = 0;
- /* Sanity checks */
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_MENU:
- if (ctrl->qmenu == NULL)
- return -EINVAL;
- break;
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- if (ctrl->qmenu_int == NULL)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- if (i < ctrl->minimum || i > ctrl->maximum)
- return -EINVAL;
-
- /* Use mask to see if this menu item should be skipped */
- if (ctrl->menu_skip_mask & (1ULL << i))
- return -EINVAL;
- /* Empty menu items should also be skipped */
- if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
- if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
- return -EINVAL;
- strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
- } else {
- qm->value = ctrl->qmenu_int[i];
- }
- return 0;
-}
-EXPORT_SYMBOL(v4l2_querymenu);
-
-static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_handler *from)
-{
- struct v4l2_ctrl_ref *ref;
- int err = 0;
-
- if (WARN_ON(!hdl || hdl == from))
- return -EINVAL;
-
- if (hdl->error)
- return hdl->error;
-
- WARN_ON(hdl->lock != &hdl->_lock);
-
- mutex_lock(from->lock);
- list_for_each_entry(ref, &from->ctrl_refs, node) {
- struct v4l2_ctrl *ctrl = ref->ctrl;
- struct v4l2_ctrl_ref *new_ref;
-
- /* Skip refs inherited from other devices */
- if (ref->from_other_dev)
- continue;
- err = handler_new_ref(hdl, ctrl, &new_ref, false, true);
- if (err)
- break;
- }
- mutex_unlock(from->lock);
- return err;
-}
-
-static void v4l2_ctrl_request_queue(struct media_request_object *obj)
-{
- struct v4l2_ctrl_handler *hdl =
- container_of(obj, struct v4l2_ctrl_handler, req_obj);
- struct v4l2_ctrl_handler *main_hdl = obj->priv;
-
- mutex_lock(main_hdl->lock);
- list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
- hdl->request_is_queued = true;
- mutex_unlock(main_hdl->lock);
-}
-
-static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
-{
- struct v4l2_ctrl_handler *hdl =
- container_of(obj, struct v4l2_ctrl_handler, req_obj);
- struct v4l2_ctrl_handler *main_hdl = obj->priv;
-
- mutex_lock(main_hdl->lock);
- list_del_init(&hdl->requests);
- if (hdl->request_is_queued) {
- list_del_init(&hdl->requests_queued);
- hdl->request_is_queued = false;
- }
- mutex_unlock(main_hdl->lock);
-}
-
-static void v4l2_ctrl_request_release(struct media_request_object *obj)
-{
- struct v4l2_ctrl_handler *hdl =
- container_of(obj, struct v4l2_ctrl_handler, req_obj);
-
- v4l2_ctrl_handler_free(hdl);
- kfree(hdl);
-}
-
-static const struct media_request_object_ops req_ops = {
- .queue = v4l2_ctrl_request_queue,
- .unbind = v4l2_ctrl_request_unbind,
- .release = v4l2_ctrl_request_release,
-};
-
-struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
- struct v4l2_ctrl_handler *parent)
-{
- struct media_request_object *obj;
-
- if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
- req->state != MEDIA_REQUEST_STATE_QUEUED))
- return NULL;
-
- obj = media_request_object_find(req, &req_ops, parent);
- if (obj)
- return container_of(obj, struct v4l2_ctrl_handler, req_obj);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find);
-
-struct v4l2_ctrl *
-v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
-
- return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
-}
-EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
-
-static int v4l2_ctrl_request_bind(struct media_request *req,
- struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl_handler *from)
-{
- int ret;
-
- ret = v4l2_ctrl_request_clone(hdl, from);
-
- if (!ret) {
- ret = media_request_object_bind(req, &req_ops,
- from, false, &hdl->req_obj);
- if (!ret) {
- mutex_lock(from->lock);
- list_add_tail(&hdl->requests, &from->requests);
- mutex_unlock(from->lock);
- }
- }
- return ret;
-}
-
-/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
-
- It is not a fully atomic operation, just best-effort only. After all, if
- multiple controls have to be set through multiple i2c writes (for example)
- then some initial writes may succeed while others fail. Thus leaving the
- system in an inconsistent state. The question is how much effort you are
- willing to spend on trying to make something atomic that really isn't.
-
- From the point of view of an application the main requirement is that
- when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
- error should be returned without actually affecting any controls.
-
- If all the values are correct, then it is acceptable to just give up
- in case of low-level errors.
-
- It is important though that the application can tell when only a partial
- configuration was done. The way we do that is through the error_idx field
- of struct v4l2_ext_controls: if that is equal to the count field then no
- controls were affected. Otherwise all controls before that index were
- successful in performing their 'get' or 'set' operation, the control at
- the given index failed, and you don't know what happened with the controls
- after the failed one. Since if they were part of a control cluster they
- could have been successfully processed (if a cluster member was encountered
- at index < error_idx), they could have failed (if a cluster member was at
- error_idx), or they may not have been processed yet (if the first cluster
- member appeared after error_idx).
-
- It is all fairly theoretical, though. In practice all you can do is to
- bail out. If error_idx == count, then it is an application bug. If
- error_idx < count then it is only an application bug if the error code was
- EBUSY. That usually means that something started streaming just when you
- tried to set the controls. In all other cases it is a driver/hardware
- problem and all you can do is to retry or bail out.
-
- Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
- never modifies controls the error_idx is just set to whatever control
- has an invalid value.
- */
-
-/* Prepare for the extended g/s/try functions.
- Find the controls in the control array and do some basic checks. */
-static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- struct v4l2_ctrl_helper *helpers,
- struct video_device *vdev,
- bool get)
-{
- struct v4l2_ctrl_helper *h;
- bool have_clusters = false;
- u32 i;
-
- for (i = 0, h = helpers; i < cs->count; i++, h++) {
- struct v4l2_ext_control *c = &cs->controls[i];
- struct v4l2_ctrl_ref *ref;
- struct v4l2_ctrl *ctrl;
- u32 id = c->id & V4L2_CTRL_ID_MASK;
-
- cs->error_idx = i;
-
- if (cs->which &&
- cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
- cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
- V4L2_CTRL_ID2WHICH(id) != cs->which) {
- dprintk(vdev,
- "invalid which 0x%x or control id 0x%x\n",
- cs->which, id);
- return -EINVAL;
- }
-
- /* Old-style private controls are not allowed for
- extended controls */
- if (id >= V4L2_CID_PRIVATE_BASE) {
- dprintk(vdev,
- "old-style private controls not allowed\n");
- return -EINVAL;
- }
- ref = find_ref_lock(hdl, id);
- if (ref == NULL) {
- dprintk(vdev, "cannot find control id 0x%x\n", id);
- return -EINVAL;
- }
- h->ref = ref;
- ctrl = ref->ctrl;
- if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) {
- dprintk(vdev, "control id 0x%x is disabled\n", id);
- return -EINVAL;
- }
-
- if (ctrl->cluster[0]->ncontrols > 1)
- have_clusters = true;
- if (ctrl->cluster[0] != ctrl)
- ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
- if (ctrl->is_ptr && !ctrl->is_string) {
- unsigned tot_size = ctrl->elems * ctrl->elem_size;
-
- if (c->size < tot_size) {
- /*
- * In the get case the application first
- * queries to obtain the size of the control.
- */
- if (get) {
- c->size = tot_size;
- return -ENOSPC;
- }
- dprintk(vdev,
- "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n",
- id, c->size, tot_size);
- return -EFAULT;
- }
- c->size = tot_size;
- }
- /* Store the ref to the master control of the cluster */
- h->mref = ref;
- /* Initially set next to 0, meaning that there is no other
- control in this helper array belonging to the same
- cluster */
- h->next = 0;
- }
-
- /* We are done if there were no controls that belong to a multi-
- control cluster. */
- if (!have_clusters)
- return 0;
-
- /* The code below figures out in O(n) time which controls in the list
- belong to the same cluster. */
-
- /* This has to be done with the handler lock taken. */
- mutex_lock(hdl->lock);
-
- /* First zero the helper field in the master control references */
- for (i = 0; i < cs->count; i++)
- helpers[i].mref->helper = NULL;
- for (i = 0, h = helpers; i < cs->count; i++, h++) {
- struct v4l2_ctrl_ref *mref = h->mref;
-
- /* If the mref->helper is set, then it points to an earlier
- helper that belongs to the same cluster. */
- if (mref->helper) {
- /* Set the next field of mref->helper to the current
- index: this means that that earlier helper now
- points to the next helper in the same cluster. */
- mref->helper->next = i;
- /* mref should be set only for the first helper in the
- cluster, clear the others. */
- h->mref = NULL;
- }
- /* Point the mref helper to the current helper struct. */
- mref->helper = h;
- }
- mutex_unlock(hdl->lock);
- return 0;
-}
-
-/* Handles the corner case where cs->count == 0. It checks whether the
- specified control class exists. If that class ID is 0, then it checks
- whether there are any controls at all. */
-static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
-{
- if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
- which == V4L2_CTRL_WHICH_REQUEST_VAL)
- return 0;
- return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
-}
-
-/*
- * Get extended controls. Allocates the helpers array if needed.
- *
- * Note that v4l2_g_ext_ctrls_common() with 'which' set to
- * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
- * completed, and in that case valid_p_req is true for all controls.
- */
-static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- struct video_device *vdev)
-{
- struct v4l2_ctrl_helper helper[4];
- struct v4l2_ctrl_helper *helpers = helper;
- int ret;
- int i, j;
- bool is_default, is_request;
-
- is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
- is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
-
- cs->error_idx = cs->count;
- cs->which = V4L2_CTRL_ID2WHICH(cs->which);
-
- if (hdl == NULL)
- return -EINVAL;
-
- if (cs->count == 0)
- return class_check(hdl, cs->which);
-
- if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
- GFP_KERNEL);
- if (helpers == NULL)
- return -ENOMEM;
- }
-
- ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true);
- cs->error_idx = cs->count;
-
- for (i = 0; !ret && i < cs->count; i++)
- if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
- ret = -EACCES;
-
- for (i = 0; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *master;
- bool is_volatile = false;
- u32 idx = i;
-
- if (helpers[i].mref == NULL)
- continue;
-
- master = helpers[i].mref->ctrl;
- cs->error_idx = i;
-
- v4l2_ctrl_lock(master);
-
- /*
- * g_volatile_ctrl will update the new control values.
- * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
- * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
- * it is v4l2_ctrl_request_complete() that copies the
- * volatile controls at the time of request completion
- * to the request, so you don't want to do that again.
- */
- if (!is_default && !is_request &&
- ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
- (master->has_volatiles && !is_cur_manual(master)))) {
- for (j = 0; j < master->ncontrols; j++)
- cur_to_new(master->cluster[j]);
- ret = call_op(master, g_volatile_ctrl);
- is_volatile = true;
- }
-
- if (ret) {
- v4l2_ctrl_unlock(master);
- break;
- }
-
- /*
- * Copy the default value (if is_default is true), the
- * request value (if is_request is true and p_req is valid),
- * the new volatile value (if is_volatile is true) or the
- * current value.
- */
- do {
- struct v4l2_ctrl_ref *ref = helpers[idx].ref;
-
- if (is_default)
- ret = def_to_user(cs->controls + idx, ref->ctrl);
- else if (is_request && ref->valid_p_req)
- ret = req_to_user(cs->controls + idx, ref);
- else if (is_volatile)
- ret = new_to_user(cs->controls + idx, ref->ctrl);
- else
- ret = cur_to_user(cs->controls + idx, ref->ctrl);
- idx = helpers[idx].next;
- } while (!ret && idx);
-
- v4l2_ctrl_unlock(master);
- }
-
- if (cs->count > ARRAY_SIZE(helper))
- kvfree(helpers);
- return ret;
-}
-
-static struct media_request_object *
-v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
- struct media_request *req, bool set)
-{
- struct media_request_object *obj;
- struct v4l2_ctrl_handler *new_hdl;
- int ret;
-
- if (IS_ERR(req))
- return ERR_CAST(req);
-
- if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
- return ERR_PTR(-EBUSY);
-
- obj = media_request_object_find(req, &req_ops, hdl);
- if (obj)
- return obj;
- if (!set)
- return ERR_PTR(-ENOENT);
-
- new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL);
- if (!new_hdl)
- return ERR_PTR(-ENOMEM);
-
- obj = &new_hdl->req_obj;
- ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8);
- if (!ret)
- ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
- if (ret) {
- kfree(new_hdl);
-
- return ERR_PTR(ret);
- }
-
- media_request_object_get(obj);
- return obj;
-}
-
-int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
- struct media_device *mdev, struct v4l2_ext_controls *cs)
-{
- struct media_request_object *obj = NULL;
- struct media_request *req = NULL;
- int ret;
-
- if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
- if (!mdev || cs->request_fd < 0)
- return -EINVAL;
-
- req = media_request_get_by_fd(mdev, cs->request_fd);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
- media_request_put(req);
- return -EACCES;
- }
-
- ret = media_request_lock_for_access(req);
- if (ret) {
- media_request_put(req);
- return ret;
- }
-
- obj = v4l2_ctrls_find_req_obj(hdl, req, false);
- if (IS_ERR(obj)) {
- media_request_unlock_for_access(req);
- media_request_put(req);
- return PTR_ERR(obj);
- }
-
- hdl = container_of(obj, struct v4l2_ctrl_handler,
- req_obj);
- }
-
- ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev);
-
- if (obj) {
- media_request_unlock_for_access(req);
- media_request_object_put(obj);
- media_request_put(req);
- }
- return ret;
-}
-EXPORT_SYMBOL(v4l2_g_ext_ctrls);
-
-/* Helper function to get a single control */
-static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
-{
- struct v4l2_ctrl *master = ctrl->cluster[0];
- int ret = 0;
- int i;
-
- /* Compound controls are not supported. The new_to_user() and
- * cur_to_user() calls below would need to be modified not to access
- * userspace memory when called from get_ctrl().
- */
- if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64)
- return -EINVAL;
-
- if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
- return -EACCES;
-
- v4l2_ctrl_lock(master);
- /* g_volatile_ctrl will update the current control values */
- if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
- for (i = 0; i < master->ncontrols; i++)
- cur_to_new(master->cluster[i]);
- ret = call_op(master, g_volatile_ctrl);
- new_to_user(c, ctrl);
- } else {
- cur_to_user(c, ctrl);
- }
- v4l2_ctrl_unlock(master);
- return ret;
-}
-
-int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
-{
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
- struct v4l2_ext_control c;
- int ret;
-
- if (ctrl == NULL || !ctrl->is_int)
- return -EINVAL;
- ret = get_ctrl(ctrl, &c);
- control->value = c.value;
- return ret;
-}
-EXPORT_SYMBOL(v4l2_g_ctrl);
-
-s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_ext_control c;
-
- /* It's a driver bug if this happens. */
- if (WARN_ON(!ctrl->is_int))
- return 0;
- c.value = 0;
- get_ctrl(ctrl, &c);
- return c.value;
-}
-EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
-
-s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_ext_control c;
-
- /* It's a driver bug if this happens. */
- if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
- return 0;
- c.value64 = 0;
- get_ctrl(ctrl, &c);
- return c.value64;
-}
-EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
-
-
-/* Core function that calls try/s_ctrl and ensures that the new value is
- copied to the current value on a set.
- Must be called with ctrl->handler->lock held. */
-static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
- bool set, u32 ch_flags)
-{
- bool update_flag;
- int ret;
- int i;
-
- /* Go through the cluster and either validate the new value or
- (if no new value was set), copy the current value to the new
- value, ensuring a consistent view for the control ops when
- called. */
- for (i = 0; i < master->ncontrols; i++) {
- struct v4l2_ctrl *ctrl = master->cluster[i];
-
- if (ctrl == NULL)
- continue;
-
- if (!ctrl->is_new) {
- cur_to_new(ctrl);
- continue;
- }
- /* Check again: it may have changed since the
- previous check in try_or_set_ext_ctrls(). */
- if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
- return -EBUSY;
- }
-
- ret = call_op(master, try_ctrl);
-
- /* Don't set if there is no change */
- if (ret || !set || !cluster_changed(master))
- return ret;
- ret = call_op(master, s_ctrl);
- if (ret)
- return ret;
-
- /* If OK, then make the new values permanent. */
- update_flag = is_cur_manual(master) != is_new_manual(master);
-
- for (i = 0; i < master->ncontrols; i++) {
- /*
- * If we switch from auto to manual mode, and this cluster
- * contains volatile controls, then all non-master controls
- * have to be marked as changed. The 'new' value contains
- * the volatile value (obtained by update_from_auto_cluster),
- * which now has to become the current value.
- */
- if (i && update_flag && is_new_manual(master) &&
- master->has_volatiles && master->cluster[i])
- master->cluster[i]->has_changed = true;
-
- new_to_cur(fh, master->cluster[i], ch_flags |
- ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
- }
- return 0;
-}
-
-/* Validate controls. */
-static int validate_ctrls(struct v4l2_ext_controls *cs,
- struct v4l2_ctrl_helper *helpers,
- struct video_device *vdev,
- bool set)
-{
- unsigned i;
- int ret = 0;
-
- cs->error_idx = cs->count;
- for (i = 0; i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl;
- union v4l2_ctrl_ptr p_new;
-
- cs->error_idx = i;
-
- if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) {
- dprintk(vdev,
- "control id 0x%x is read-only\n",
- ctrl->id);
- return -EACCES;
- }
- /* This test is also done in try_set_control_cluster() which
- is called in atomic context, so that has the final say,
- but it makes sense to do an up-front check as well. Once
- an error occurs in try_set_control_cluster() some other
- controls may have been set already and we want to do a
- best-effort to avoid that. */
- if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) {
- dprintk(vdev,
- "control id 0x%x is grabbed, cannot set\n",
- ctrl->id);
- return -EBUSY;
- }
- /*
- * Skip validation for now if the payload needs to be copied
- * from userspace into kernelspace. We'll validate those later.
- */
- if (ctrl->is_ptr)
- continue;
- if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
- p_new.p_s64 = &cs->controls[i].value64;
- else
- p_new.p_s32 = &cs->controls[i].value;
- ret = validate_new(ctrl, p_new);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-/* Obtain the current volatile values of an autocluster and mark them
- as new. */
-static void update_from_auto_cluster(struct v4l2_ctrl *master)
-{
- int i;
-
- for (i = 1; i < master->ncontrols; i++)
- cur_to_new(master->cluster[i]);
- if (!call_op(master, g_volatile_ctrl))
- for (i = 1; i < master->ncontrols; i++)
- if (master->cluster[i])
- master->cluster[i]->is_new = 1;
-}
-
-/* Try or try-and-set controls */
-static int try_set_ext_ctrls_common(struct v4l2_fh *fh,
- struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- struct video_device *vdev, bool set)
-{
- struct v4l2_ctrl_helper helper[4];
- struct v4l2_ctrl_helper *helpers = helper;
- unsigned i, j;
- int ret;
-
- cs->error_idx = cs->count;
-
- /* Default value cannot be changed */
- if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) {
- dprintk(vdev, "%s: cannot change default value\n",
- video_device_node_name(vdev));
- return -EINVAL;
- }
-
- cs->which = V4L2_CTRL_ID2WHICH(cs->which);
-
- if (hdl == NULL) {
- dprintk(vdev, "%s: invalid null control handler\n",
- video_device_node_name(vdev));
- return -EINVAL;
- }
-
- if (cs->count == 0)
- return class_check(hdl, cs->which);
-
- if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
- GFP_KERNEL);
- if (!helpers)
- return -ENOMEM;
- }
- ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false);
- if (!ret)
- ret = validate_ctrls(cs, helpers, vdev, set);
- if (ret && set)
- cs->error_idx = cs->count;
- for (i = 0; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *master;
- u32 idx = i;
-
- if (helpers[i].mref == NULL)
- continue;
-
- cs->error_idx = i;
- master = helpers[i].mref->ctrl;
- v4l2_ctrl_lock(master);
-
- /* Reset the 'is_new' flags of the cluster */
- for (j = 0; j < master->ncontrols; j++)
- if (master->cluster[j])
- master->cluster[j]->is_new = 0;
-
- /* For volatile autoclusters that are currently in auto mode
- we need to discover if it will be set to manual mode.
- If so, then we have to copy the current volatile values
- first since those will become the new manual values (which
- may be overwritten by explicit new values from this set
- of controls). */
- if (master->is_auto && master->has_volatiles &&
- !is_cur_manual(master)) {
- /* Pick an initial non-manual value */
- s32 new_auto_val = master->manual_mode_value + 1;
- u32 tmp_idx = idx;
-
- do {
- /* Check if the auto control is part of the
- list, and remember the new value. */
- if (helpers[tmp_idx].ref->ctrl == master)
- new_auto_val = cs->controls[tmp_idx].value;
- tmp_idx = helpers[tmp_idx].next;
- } while (tmp_idx);
- /* If the new value == the manual value, then copy
- the current volatile values. */
- if (new_auto_val == master->manual_mode_value)
- update_from_auto_cluster(master);
- }
-
- /* Copy the new caller-supplied control values.
- user_to_new() sets 'is_new' to 1. */
- do {
- struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl;
-
- ret = user_to_new(cs->controls + idx, ctrl);
- if (!ret && ctrl->is_ptr) {
- ret = validate_new(ctrl, ctrl->p_new);
- if (ret)
- dprintk(vdev,
- "failed to validate control %s (%d)\n",
- v4l2_ctrl_get_name(ctrl->id), ret);
- }
- idx = helpers[idx].next;
- } while (!ret && idx);
-
- if (!ret)
- ret = try_or_set_cluster(fh, master,
- !hdl->req_obj.req && set, 0);
- if (!ret && hdl->req_obj.req && set) {
- for (j = 0; j < master->ncontrols; j++) {
- struct v4l2_ctrl_ref *ref =
- find_ref(hdl, master->cluster[j]->id);
-
- new_to_req(ref);
- }
- }
-
- /* Copy the new values back to userspace. */
- if (!ret) {
- idx = i;
- do {
- ret = new_to_user(cs->controls + idx,
- helpers[idx].ref->ctrl);
- idx = helpers[idx].next;
- } while (!ret && idx);
- }
- v4l2_ctrl_unlock(master);
- }
-
- if (cs->count > ARRAY_SIZE(helper))
- kvfree(helpers);
- return ret;
-}
-
-static int try_set_ext_ctrls(struct v4l2_fh *fh,
- struct v4l2_ctrl_handler *hdl,
- struct video_device *vdev,
- struct media_device *mdev,
- struct v4l2_ext_controls *cs, bool set)
-{
- struct media_request_object *obj = NULL;
- struct media_request *req = NULL;
- int ret;
-
- if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
- if (!mdev) {
- dprintk(vdev, "%s: missing media device\n",
- video_device_node_name(vdev));
- return -EINVAL;
- }
-
- if (cs->request_fd < 0) {
- dprintk(vdev, "%s: invalid request fd %d\n",
- video_device_node_name(vdev), cs->request_fd);
- return -EINVAL;
- }
-
- req = media_request_get_by_fd(mdev, cs->request_fd);
- if (IS_ERR(req)) {
- dprintk(vdev, "%s: cannot find request fd %d\n",
- video_device_node_name(vdev), cs->request_fd);
- return PTR_ERR(req);
- }
-
- ret = media_request_lock_for_update(req);
- if (ret) {
- dprintk(vdev, "%s: cannot lock request fd %d\n",
- video_device_node_name(vdev), cs->request_fd);
- media_request_put(req);
- return ret;
- }
-
- obj = v4l2_ctrls_find_req_obj(hdl, req, set);
- if (IS_ERR(obj)) {
- dprintk(vdev,
- "%s: cannot find request object for request fd %d\n",
- video_device_node_name(vdev),
- cs->request_fd);
- media_request_unlock_for_update(req);
- media_request_put(req);
- return PTR_ERR(obj);
- }
- hdl = container_of(obj, struct v4l2_ctrl_handler,
- req_obj);
- }
-
- ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
- if (ret)
- dprintk(vdev,
- "%s: try_set_ext_ctrls_common failed (%d)\n",
- video_device_node_name(vdev), ret);
-
- if (obj) {
- media_request_unlock_for_update(req);
- media_request_object_put(obj);
- media_request_put(req);
- }
-
- return ret;
-}
-
-int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
- struct video_device *vdev,
- struct media_device *mdev,
- struct v4l2_ext_controls *cs)
-{
- return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false);
-}
-EXPORT_SYMBOL(v4l2_try_ext_ctrls);
-
-int v4l2_s_ext_ctrls(struct v4l2_fh *fh,
- struct v4l2_ctrl_handler *hdl,
- struct video_device *vdev,
- struct media_device *mdev,
- struct v4l2_ext_controls *cs)
-{
- return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true);
-}
-EXPORT_SYMBOL(v4l2_s_ext_ctrls);
-
-/* Helper function for VIDIOC_S_CTRL compatibility */
-static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
-{
- struct v4l2_ctrl *master = ctrl->cluster[0];
- int ret;
- int i;
-
- /* Reset the 'is_new' flags of the cluster */
- for (i = 0; i < master->ncontrols; i++)
- if (master->cluster[i])
- master->cluster[i]->is_new = 0;
-
- ret = validate_new(ctrl, ctrl->p_new);
- if (ret)
- return ret;
-
- /* For autoclusters with volatiles that are switched from auto to
- manual mode we have to update the current volatile values since
- those will become the initial manual values after such a switch. */
- if (master->is_auto && master->has_volatiles && ctrl == master &&
- !is_cur_manual(master) && ctrl->val == master->manual_mode_value)
- update_from_auto_cluster(master);
-
- ctrl->is_new = 1;
- return try_or_set_cluster(fh, master, true, ch_flags);
-}
-
-/* Helper function for VIDIOC_S_CTRL compatibility */
-static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c)
-{
- int ret;
-
- v4l2_ctrl_lock(ctrl);
- user_to_new(c, ctrl);
- ret = set_ctrl(fh, ctrl, 0);
- if (!ret)
- cur_to_user(c, ctrl);
- v4l2_ctrl_unlock(ctrl);
- return ret;
-}
-
-int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_control *control)
-{
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
- struct v4l2_ext_control c = { control->id };
- int ret;
-
- if (ctrl == NULL || !ctrl->is_int)
- return -EINVAL;
-
- if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
- return -EACCES;
-
- c.value = control->value;
- ret = set_ctrl_lock(fh, ctrl, &c);
- control->value = c.value;
- return ret;
-}
-EXPORT_SYMBOL(v4l2_s_ctrl);
-
-int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
-{
- lockdep_assert_held(ctrl->handler->lock);
-
- /* It's a driver bug if this happens. */
- if (WARN_ON(!ctrl->is_int))
- return -EINVAL;
- ctrl->val = val;
- return set_ctrl(NULL, ctrl, 0);
-}
-EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl);
-
-int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
-{
- lockdep_assert_held(ctrl->handler->lock);
-
- /* It's a driver bug if this happens. */
- if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
- return -EINVAL;
- *ctrl->p_new.p_s64 = val;
- return set_ctrl(NULL, ctrl, 0);
-}
-EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64);
-
-int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
-{
- lockdep_assert_held(ctrl->handler->lock);
-
- /* It's a driver bug if this happens. */
- if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING))
- return -EINVAL;
- strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
- return set_ctrl(NULL, ctrl, 0);
-}
-EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
-
-int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
- enum v4l2_ctrl_type type, const void *p)
-{
- lockdep_assert_held(ctrl->handler->lock);
-
- /* It's a driver bug if this happens. */
- if (WARN_ON(ctrl->type != type))
- return -EINVAL;
- memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size);
- return set_ctrl(NULL, ctrl, 0);
-}
-EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound);
-
-void v4l2_ctrl_request_complete(struct media_request *req,
- struct v4l2_ctrl_handler *main_hdl)
-{
- struct media_request_object *obj;
- struct v4l2_ctrl_handler *hdl;
- struct v4l2_ctrl_ref *ref;
-
- if (!req || !main_hdl)
- return;
-
- /*
- * Note that it is valid if nothing was found. It means
- * that this request doesn't have any controls and so just
- * wants to leave the controls unchanged.
- */
- obj = media_request_object_find(req, &req_ops, main_hdl);
- if (!obj)
- return;
- hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
-
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- struct v4l2_ctrl *ctrl = ref->ctrl;
- struct v4l2_ctrl *master = ctrl->cluster[0];
- unsigned int i;
-
- if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
- v4l2_ctrl_lock(master);
- /* g_volatile_ctrl will update the current control values */
- for (i = 0; i < master->ncontrols; i++)
- cur_to_new(master->cluster[i]);
- call_op(master, g_volatile_ctrl);
- new_to_req(ref);
- v4l2_ctrl_unlock(master);
- continue;
- }
- if (ref->valid_p_req)
- continue;
-
- /* Copy the current control value into the request */
- v4l2_ctrl_lock(ctrl);
- cur_to_req(ref);
- v4l2_ctrl_unlock(ctrl);
- }
-
- mutex_lock(main_hdl->lock);
- WARN_ON(!hdl->request_is_queued);
- list_del_init(&hdl->requests_queued);
- hdl->request_is_queued = false;
- mutex_unlock(main_hdl->lock);
- media_request_object_complete(obj);
- media_request_object_put(obj);
-}
-EXPORT_SYMBOL(v4l2_ctrl_request_complete);
-
-int v4l2_ctrl_request_setup(struct media_request *req,
- struct v4l2_ctrl_handler *main_hdl)
-{
- struct media_request_object *obj;
- struct v4l2_ctrl_handler *hdl;
- struct v4l2_ctrl_ref *ref;
- int ret = 0;
-
- if (!req || !main_hdl)
- return 0;
-
- if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
- return -EBUSY;
-
- /*
- * Note that it is valid if nothing was found. It means
- * that this request doesn't have any controls and so just
- * wants to leave the controls unchanged.
- */
- obj = media_request_object_find(req, &req_ops, main_hdl);
- if (!obj)
- return 0;
- if (obj->completed) {
- media_request_object_put(obj);
- return -EBUSY;
- }
- hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
-
- list_for_each_entry(ref, &hdl->ctrl_refs, node)
- ref->req_done = false;
-
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- struct v4l2_ctrl *ctrl = ref->ctrl;
- struct v4l2_ctrl *master = ctrl->cluster[0];
- bool have_new_data = false;
- int i;
-
- /*
- * Skip if this control was already handled by a cluster.
- * Skip button controls and read-only controls.
- */
- if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
- continue;
-
- v4l2_ctrl_lock(master);
- for (i = 0; i < master->ncontrols; i++) {
- if (master->cluster[i]) {
- struct v4l2_ctrl_ref *r =
- find_ref(hdl, master->cluster[i]->id);
-
- if (r->valid_p_req) {
- have_new_data = true;
- break;
- }
- }
- }
- if (!have_new_data) {
- v4l2_ctrl_unlock(master);
- continue;
- }
-
- for (i = 0; i < master->ncontrols; i++) {
- if (master->cluster[i]) {
- struct v4l2_ctrl_ref *r =
- find_ref(hdl, master->cluster[i]->id);
-
- req_to_new(r);
- master->cluster[i]->is_new = 1;
- r->req_done = true;
- }
- }
- /*
- * For volatile autoclusters that are currently in auto mode
- * we need to discover if it will be set to manual mode.
- * If so, then we have to copy the current volatile values
- * first since those will become the new manual values (which
- * may be overwritten by explicit new values from this set
- * of controls).
- */
- if (master->is_auto && master->has_volatiles &&
- !is_cur_manual(master)) {
- s32 new_auto_val = *master->p_new.p_s32;
-
- /*
- * If the new value == the manual value, then copy
- * the current volatile values.
- */
- if (new_auto_val == master->manual_mode_value)
- update_from_auto_cluster(master);
- }
-
- ret = try_or_set_cluster(NULL, master, true, 0);
- v4l2_ctrl_unlock(master);
-
- if (ret)
- break;
- }
-
- media_request_object_put(obj);
- return ret;
-}
-EXPORT_SYMBOL(v4l2_ctrl_request_setup);
-
-void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
-{
- if (ctrl == NULL)
- return;
- if (notify == NULL) {
- ctrl->call_notify = 0;
- return;
- }
- if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
- return;
- ctrl->handler->notify = notify;
- ctrl->handler->notify_priv = priv;
- ctrl->call_notify = 1;
-}
-EXPORT_SYMBOL(v4l2_ctrl_notify);
-
-int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
- s64 min, s64 max, u64 step, s64 def)
-{
- bool value_changed;
- bool range_changed = false;
- int ret;
-
- lockdep_assert_held(ctrl->handler->lock);
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER:
- case V4L2_CTRL_TYPE_INTEGER64:
- case V4L2_CTRL_TYPE_BOOLEAN:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- case V4L2_CTRL_TYPE_BITMASK:
- case V4L2_CTRL_TYPE_U8:
- case V4L2_CTRL_TYPE_U16:
- case V4L2_CTRL_TYPE_U32:
- if (ctrl->is_array)
- return -EINVAL;
- ret = check_range(ctrl->type, min, max, step, def);
- if (ret)
- return ret;
- break;
- default:
- return -EINVAL;
- }
- if ((ctrl->minimum != min) || (ctrl->maximum != max) ||
- (ctrl->step != step) || ctrl->default_value != def) {
- range_changed = true;
- ctrl->minimum = min;
- ctrl->maximum = max;
- ctrl->step = step;
- ctrl->default_value = def;
- }
- cur_to_new(ctrl);
- if (validate_new(ctrl, ctrl->p_new)) {
- if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
- *ctrl->p_new.p_s64 = def;
- else
- *ctrl->p_new.p_s32 = def;
- }
-
- if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
- value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64;
- else
- value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32;
- if (value_changed)
- ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
- else if (range_changed)
- send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
- return ret;
-}
-EXPORT_SYMBOL(__v4l2_ctrl_modify_range);
-
-static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
-{
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
-
- if (ctrl == NULL)
- return -EINVAL;
-
- v4l2_ctrl_lock(ctrl);
- list_add_tail(&sev->node, &ctrl->ev_subs);
- if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
- (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) {
- struct v4l2_event ev;
- u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
-
- if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
- changes |= V4L2_EVENT_CTRL_CH_VALUE;
- fill_event(&ev, ctrl, changes);
- /* Mark the queue as active, allowing this initial
- event to be accepted. */
- sev->elems = elems;
- v4l2_event_queue_fh(sev->fh, &ev);
- }
- v4l2_ctrl_unlock(ctrl);
- return 0;
-}
-
-static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
-{
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
-
- if (ctrl == NULL)
- return;
-
- v4l2_ctrl_lock(ctrl);
- list_del(&sev->node);
- v4l2_ctrl_unlock(ctrl);
-}
-
-void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
-{
- u32 old_changes = old->u.ctrl.changes;
-
- old->u.ctrl = new->u.ctrl;
- old->u.ctrl.changes |= old_changes;
-}
-EXPORT_SYMBOL(v4l2_ctrl_replace);
-
-void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
-{
- new->u.ctrl.changes |= old->u.ctrl.changes;
-}
-EXPORT_SYMBOL(v4l2_ctrl_merge);
-
-const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
- .add = v4l2_ctrl_add_event,
- .del = v4l2_ctrl_del_event,
- .replace = v4l2_ctrl_replace,
- .merge = v4l2_ctrl_merge,
-};
-EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
-
-int v4l2_ctrl_log_status(struct file *file, void *fh)
-{
- struct video_device *vfd = video_devdata(file);
- struct v4l2_fh *vfh = file->private_data;
-
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
- v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
- vfd->v4l2_dev->name);
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_log_status);
-
-int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
-{
- if (sub->type == V4L2_EVENT_CTRL)
- return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
- return -EINVAL;
-}
-EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
-
-int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- if (!sd->ctrl_handler)
- return -EINVAL;
- return v4l2_ctrl_subscribe_event(fh, sub);
-}
-EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
-
-__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct v4l2_fh *fh = file->private_data;
-
- poll_wait(file, &fh->wait, wait);
- if (v4l2_event_pending(fh))
- return EPOLLPRI;
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_poll);
-
-int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ctrl_ops,
- const struct v4l2_fwnode_device_properties *p)
-{
- if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) {
- u32 orientation_ctrl;
-
- switch (p->orientation) {
- case V4L2_FWNODE_ORIENTATION_FRONT:
- orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT;
- break;
- case V4L2_FWNODE_ORIENTATION_BACK:
- orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK;
- break;
- case V4L2_FWNODE_ORIENTATION_EXTERNAL:
- orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL;
- break;
- default:
- return -EINVAL;
- }
- if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops,
- V4L2_CID_CAMERA_ORIENTATION,
- V4L2_CAMERA_ORIENTATION_EXTERNAL, 0,
- orientation_ctrl))
- return hdl->error;
- }
-
- if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) {
- if (!v4l2_ctrl_new_std(hdl, ctrl_ops,
- V4L2_CID_CAMERA_SENSOR_ROTATION,
- p->rotation, p->rotation, 1,
- p->rotation))
- return hdl->error;
- }
-
- return hdl->error;
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 7d0edf3530be..d03ace324db0 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -39,8 +39,6 @@
__func__, ##arg); \
} while (0)
-static struct dentry *v4l2_debugfs_dir;
-
/*
* sysfs stuff
*/
@@ -520,9 +518,8 @@ static int get_index(struct video_device *vdev)
return find_first_zero_bit(used, VIDEO_NUM_DEVICES);
}
-#define SET_VALID_IOCTL(ops, cmd, op) \
- if (ops->op) \
- set_bit(_IOC_NR(cmd), valid_ioctls)
+#define SET_VALID_IOCTL(ops, cmd, op) \
+ do { if ((ops)->op) set_bit(_IOC_NR(cmd), valid_ioctls); } while (0)
/* This determines which ioctls are actually implemented in the driver.
It's a one-time thing which simplifies video_ioctl2 as it can just do
@@ -1121,8 +1118,6 @@ static int __init videodev_init(void)
return -EIO;
}
- v4l2_debugfs_dir = debugfs_create_dir("video4linux", NULL);
- v4l2_async_debug_init(v4l2_debugfs_dir);
return 0;
}
@@ -1130,7 +1125,6 @@ static void __exit videodev_exit(void)
{
dev_t dev = MKDEV(VIDEO_MAJOR, 0);
- debugfs_remove_recursive(v4l2_debugfs_dir);
class_unregister(&video_class);
unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
}
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index caad58bde326..c5ce9f11ad7b 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -18,7 +18,7 @@
#include <linux/slab.h>
#include <linux/export.h>
-static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
+static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx)
{
idx += sev->first;
return idx >= sev->elems ? idx - sev->elems : idx;
@@ -221,12 +221,12 @@ static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
}
int v4l2_event_subscribe(struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub, unsigned elems,
+ const struct v4l2_event_subscription *sub, unsigned int elems,
const struct v4l2_subscribed_event_ops *ops)
{
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
- unsigned i;
+ unsigned int i;
int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 684574f58e82..90eec79ee995 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -96,6 +96,7 @@ int v4l2_fh_release(struct file *filp)
v4l2_fh_del(fh);
v4l2_fh_exit(fh);
kfree(fh);
+ filp->private_data = NULL;
}
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 2673f51aafa4..05d5db3d85e5 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -3072,8 +3072,8 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
static unsigned int video_translate_cmd(unsigned int cmd)
{
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
switch (cmd) {
-#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT_TIME32:
return VIDIOC_DQEVENT;
case VIDIOC_QUERYBUF_TIME32:
@@ -3084,8 +3084,8 @@ static unsigned int video_translate_cmd(unsigned int cmd)
return VIDIOC_DQBUF;
case VIDIOC_PREPARE_BUF_TIME32:
return VIDIOC_PREPARE_BUF;
-#endif
}
+#endif
if (in_compat_syscall())
return v4l2_compat_translate_cmd(cmd);
@@ -3124,10 +3124,12 @@ static int video_get_user(void __user *arg, void *parg,
if (copy_from_user(parg, (void __user *)arg, n))
err = -EFAULT;
} else if (in_compat_syscall()) {
+ memset(parg, 0, n);
err = v4l2_compat_get_user(arg, parg, cmd);
} else {
+ memset(parg, 0, n);
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
switch (cmd) {
-#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_QUERYBUF_TIME32:
case VIDIOC_QBUF_TIME32:
case VIDIOC_DQBUF_TIME32:
@@ -3140,23 +3142,23 @@ static int video_get_user(void __user *arg, void *parg,
*vb = (struct v4l2_buffer) {
.index = vb32.index,
- .type = vb32.type,
- .bytesused = vb32.bytesused,
- .flags = vb32.flags,
- .field = vb32.field,
- .timestamp.tv_sec = vb32.timestamp.tv_sec,
- .timestamp.tv_usec = vb32.timestamp.tv_usec,
- .timecode = vb32.timecode,
- .sequence = vb32.sequence,
- .memory = vb32.memory,
- .m.userptr = vb32.m.userptr,
- .length = vb32.length,
- .request_fd = vb32.request_fd,
+ .type = vb32.type,
+ .bytesused = vb32.bytesused,
+ .flags = vb32.flags,
+ .field = vb32.field,
+ .timestamp.tv_sec = vb32.timestamp.tv_sec,
+ .timestamp.tv_usec = vb32.timestamp.tv_usec,
+ .timecode = vb32.timecode,
+ .sequence = vb32.sequence,
+ .memory = vb32.memory,
+ .m.userptr = vb32.m.userptr,
+ .length = vb32.length,
+ .request_fd = vb32.request_fd,
};
break;
}
-#endif
}
+#endif
}
/* zero out anything we don't copy from userspace */
@@ -3181,8 +3183,8 @@ static int video_put_user(void __user *arg, void *parg,
if (in_compat_syscall())
return v4l2_compat_put_user(arg, parg, cmd);
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
switch (cmd) {
-#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT_TIME32: {
struct v4l2_event *ev = parg;
struct v4l2_event_time32 ev32;
@@ -3230,8 +3232,8 @@ static int video_put_user(void __user *arg, void *parg,
return -EFAULT;
break;
}
-#endif
}
+#endif
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 956dafab43d4..5d27a27cc2f2 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -26,19 +26,21 @@
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
{
- if (sd->entity.num_pads) {
- fh->pad = v4l2_subdev_alloc_pad_config(sd);
- if (fh->pad == NULL)
- return -ENOMEM;
- }
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_alloc_state(sd);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ fh->state = state;
return 0;
}
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
{
- v4l2_subdev_free_pad_config(fh->pad);
- fh->pad = NULL;
+ v4l2_subdev_free_state(fh->state);
+ fh->state = NULL;
}
static int subdev_open(struct file *file)
@@ -146,63 +148,63 @@ static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
return 0;
}
-static int check_cfg(u32 which, struct v4l2_subdev_pad_config *cfg)
+static int check_state_pads(u32 which, struct v4l2_subdev_state *state)
{
- if (which == V4L2_SUBDEV_FORMAT_TRY && !cfg)
+ if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
return -EINVAL;
return 0;
}
static inline int check_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
if (!format)
return -EINVAL;
return check_which(format->which) ? : check_pad(sd, format->pad) ? :
- check_cfg(format->which, cfg);
+ check_state_pads(format->which, state);
}
static int call_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
- return check_format(sd, cfg, format) ? :
- sd->ops->pad->get_fmt(sd, cfg, format);
+ return check_format(sd, state, format) ? :
+ sd->ops->pad->get_fmt(sd, state, format);
}
static int call_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
- return check_format(sd, cfg, format) ? :
- sd->ops->pad->set_fmt(sd, cfg, format);
+ return check_format(sd, state, format) ? :
+ sd->ops->pad->set_fmt(sd, state, format);
}
static int call_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!code)
return -EINVAL;
return check_which(code->which) ? : check_pad(sd, code->pad) ? :
- check_cfg(code->which, cfg) ? :
- sd->ops->pad->enum_mbus_code(sd, cfg, code);
+ check_state_pads(code->which, state) ? :
+ sd->ops->pad->enum_mbus_code(sd, state, code);
}
static int call_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (!fse)
return -EINVAL;
return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
- check_cfg(fse->which, cfg) ? :
- sd->ops->pad->enum_frame_size(sd, cfg, fse);
+ check_state_pads(fse->which, state) ? :
+ sd->ops->pad->enum_frame_size(sd, state, fse);
}
static inline int check_frame_interval(struct v4l2_subdev *sd,
@@ -229,42 +231,42 @@ static int call_s_frame_interval(struct v4l2_subdev *sd,
}
static int call_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (!fie)
return -EINVAL;
return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
- check_cfg(fie->which, cfg) ? :
- sd->ops->pad->enum_frame_interval(sd, cfg, fie);
+ check_state_pads(fie->which, state) ? :
+ sd->ops->pad->enum_frame_interval(sd, state, fie);
}
static inline int check_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
if (!sel)
return -EINVAL;
return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
- check_cfg(sel->which, cfg);
+ check_state_pads(sel->which, state);
}
static int call_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
- return check_selection(sd, cfg, sel) ? :
- sd->ops->pad->get_selection(sd, cfg, sel);
+ return check_selection(sd, state, sel) ? :
+ sd->ops->pad->get_selection(sd, state, sel);
}
static int call_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
- return check_selection(sd, cfg, sel) ? :
- sd->ops->pad->set_selection(sd, cfg, sel);
+ return check_selection(sd, state, sel) ? :
+ sd->ops->pad->set_selection(sd, state, sel);
}
static inline int check_edid(struct v4l2_subdev *sd,
@@ -428,30 +430,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
- case VIDIOC_DQEVENT_TIME32: {
- struct v4l2_event_time32 *ev32 = arg;
- struct v4l2_event ev = { };
-
- if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
- return -ENOIOCTLCMD;
-
- rval = v4l2_event_dequeue(vfh, &ev, file->f_flags & O_NONBLOCK);
-
- *ev32 = (struct v4l2_event_time32) {
- .type = ev.type,
- .pending = ev.pending,
- .sequence = ev.sequence,
- .timestamp.tv_sec = ev.timestamp.tv_sec,
- .timestamp.tv_nsec = ev.timestamp.tv_nsec,
- .id = ev.id,
- };
-
- memcpy(&ev32->u, &ev.u, sizeof(ev.u));
- memcpy(&ev32->reserved, &ev.reserved, sizeof(ev.reserved));
-
- return rval;
- }
-
case VIDIOC_SUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
@@ -506,7 +484,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
- return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh->pad, format);
+ return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh->state, format);
}
case VIDIOC_SUBDEV_S_FMT: {
@@ -517,7 +495,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
- return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh->pad, format);
+ return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh->state, format);
}
case VIDIOC_SUBDEV_G_CROP: {
@@ -531,7 +509,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
sel.target = V4L2_SEL_TGT_CROP;
rval = v4l2_subdev_call(
- sd, pad, get_selection, subdev_fh->pad, &sel);
+ sd, pad, get_selection, subdev_fh->state, &sel);
crop->rect = sel.r;
@@ -553,7 +531,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
sel.r = crop->rect;
rval = v4l2_subdev_call(
- sd, pad, set_selection, subdev_fh->pad, &sel);
+ sd, pad, set_selection, subdev_fh->state, &sel);
crop->rect = sel.r;
@@ -564,7 +542,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_mbus_code_enum *code = arg;
memset(code->reserved, 0, sizeof(code->reserved));
- return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh->pad,
+ return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh->state,
code);
}
@@ -572,7 +550,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_frame_size_enum *fse = arg;
memset(fse->reserved, 0, sizeof(fse->reserved));
- return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh->pad,
+ return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh->state,
fse);
}
@@ -597,7 +575,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_frame_interval_enum *fie = arg;
memset(fie->reserved, 0, sizeof(fie->reserved));
- return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh->pad,
+ return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh->state,
fie);
}
@@ -606,7 +584,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
- sd, pad, get_selection, subdev_fh->pad, sel);
+ sd, pad, get_selection, subdev_fh->state, sel);
}
case VIDIOC_SUBDEV_S_SELECTION: {
@@ -617,7 +595,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
- sd, pad, set_selection, subdev_fh->pad, sel);
+ sd, pad, set_selection, subdev_fh->state, sel);
}
case VIDIOC_G_EDID: {
@@ -892,35 +870,51 @@ int v4l2_subdev_link_validate(struct media_link *link)
}
EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
-struct v4l2_subdev_pad_config *
-v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd)
+struct v4l2_subdev_state *v4l2_subdev_alloc_state(struct v4l2_subdev *sd)
{
- struct v4l2_subdev_pad_config *cfg;
+ struct v4l2_subdev_state *state;
int ret;
- if (!sd->entity.num_pads)
- return NULL;
-
- cfg = kvmalloc_array(sd->entity.num_pads, sizeof(*cfg),
- GFP_KERNEL | __GFP_ZERO);
- if (!cfg)
- return NULL;
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
- ret = v4l2_subdev_call(sd, pad, init_cfg, cfg);
- if (ret < 0 && ret != -ENOIOCTLCMD) {
- kvfree(cfg);
- return NULL;
+ if (sd->entity.num_pads) {
+ state->pads = kvmalloc_array(sd->entity.num_pads,
+ sizeof(*state->pads),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!state->pads) {
+ ret = -ENOMEM;
+ goto err;
+ }
}
- return cfg;
+ ret = v4l2_subdev_call(sd, pad, init_cfg, state);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto err;
+
+ return state;
+
+err:
+ if (state && state->pads)
+ kvfree(state->pads);
+
+ kfree(state);
+
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(v4l2_subdev_alloc_pad_config);
+EXPORT_SYMBOL_GPL(v4l2_subdev_alloc_state);
-void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg)
+void v4l2_subdev_free_state(struct v4l2_subdev_state *state)
{
- kvfree(cfg);
+ if (!state)
+ return;
+
+ kvfree(state->pads);
+ kfree(state);
}
-EXPORT_SYMBOL_GPL(v4l2_subdev_free_pad_config);
+EXPORT_SYMBOL_GPL(v4l2_subdev_free_state);
+
#endif /* CONFIG_MEDIA_CONTROLLER */
void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 8dd0562de287..f75e5eedeee0 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -423,7 +423,6 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
videobuf_queue_unlock(q);
kfree(map);
}
- return;
}
/*
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 8004dd64d09a..4a4573fa7b0f 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -129,7 +129,7 @@ static int msb_sg_compare_to_buffer(struct scatterlist *sg,
* Each zone consists of 512 eraseblocks, out of which in first
* zone 494 are used and 496 are for all following zones.
* Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
-*/
+ */
static int msb_get_zone_from_lba(int lba)
{
if (lba < 494)
@@ -348,8 +348,9 @@ again:
switch (msb->state) {
case MSB_RP_SEND_BLOCK_ADDRESS:
/* msb_write_regs sometimes "fails" because it needs to update
- the reg window, and thus it returns request for that.
- Then we stay in this state and retry */
+ * the reg window, and thus it returns request for that.
+ * Then we stay in this state and retry
+ */
if (!msb_write_regs(msb,
offsetof(struct ms_register, param),
sizeof(struct ms_param_register),
@@ -368,7 +369,8 @@ again:
case MSB_RP_SEND_INT_REQ:
msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
/* If dont actually need to send the int read request (only in
- serial mode), then just fall through */
+ * serial mode), then just fall through
+ */
if (msb_read_int_reg(msb, -1))
return 0;
fallthrough;
@@ -702,7 +704,8 @@ static int h_msb_parallel_switch(struct memstick_dev *card,
case MSB_PS_SWICH_HOST:
/* Set parallel interface on our side + send a dummy request
- to see if card responds */
+ * to see if card responds
+ */
host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
msb->state = MSB_PS_CONFIRM;
@@ -821,6 +824,7 @@ static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
static int msb_erase_block(struct msb_data *msb, u16 pba)
{
int error, try;
+
if (msb->read_only)
return -EROFS;
@@ -997,6 +1001,7 @@ static int msb_write_block(struct msb_data *msb,
u16 pba, u32 lba, struct scatterlist *sg, int offset)
{
int error, current_try = 1;
+
BUG_ON(sg->length < msb->page_size);
if (msb->read_only)
@@ -1045,11 +1050,12 @@ static int msb_write_block(struct msb_data *msb,
error = msb_run_state_machine(msb, h_msb_write_block);
/* Sector we just wrote to is assumed erased since its pba
- was erased. If it wasn't erased, write will succeed
- and will just clear the bits that were set in the block
- thus test that what we have written,
- matches what we expect.
- We do trust the blocks that we erased */
+ * was erased. If it wasn't erased, write will succeed
+ * and will just clear the bits that were set in the block
+ * thus test that what we have written,
+ * matches what we expect.
+ * We do trust the blocks that we erased
+ */
if (!error && (verify_writes ||
!test_bit(pba, msb->erased_blocks_bitmap)))
error = msb_verify_block(msb, pba, sg, offset);
@@ -1493,6 +1499,7 @@ static int msb_ftl_scan(struct msb_data *msb)
static void msb_cache_flush_timer(struct timer_list *t)
{
struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
+
msb->need_flush_cache = true;
queue_work(msb->io_queue, &msb->io_work);
}
@@ -1673,7 +1680,8 @@ static int msb_cache_read(struct msb_data *msb, int lba,
* This table content isn't that importaint,
* One could put here different values, providing that they still
* cover whole disk.
- * 64 MB entry is what windows reports for my 64M memstick */
+ * 64 MB entry is what windows reports for my 64M memstick
+ */
static const struct chs_entry chs_table[] = {
/* size sectors cylynders heads */
@@ -1706,8 +1714,9 @@ static int msb_init_card(struct memstick_dev *card)
return error;
/* Due to a bug in Jmicron driver written by Alex Dubov,
- its serial mode barely works,
- so we switch to parallel mode right away */
+ * its serial mode barely works,
+ * so we switch to parallel mode right away
+ */
if (host->caps & MEMSTICK_CAP_PAR4)
msb_switch_to_parallel(msb);
@@ -2033,6 +2042,7 @@ static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
static int msb_check_card(struct memstick_dev *card)
{
struct msb_data *msb = memstick_get_drvdata(card);
+
return (msb->card_dead == 0);
}
@@ -2110,21 +2120,17 @@ static int msb_init_disk(struct memstick_dev *card)
if (msb->disk_id < 0)
return msb->disk_id;
- msb->disk = alloc_disk(0);
- if (!msb->disk) {
- rc = -ENOMEM;
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (rc)
goto out_release_id;
- }
- msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(msb->queue)) {
- rc = PTR_ERR(msb->queue);
- msb->queue = NULL;
- goto out_put_disk;
+ msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
+ if (IS_ERR(msb->disk)) {
+ rc = PTR_ERR(msb->disk);
+ goto out_free_tag_set;
}
-
- msb->queue->queuedata = card;
+ msb->queue = msb->disk->queue;
blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
@@ -2135,8 +2141,6 @@ static int msb_init_disk(struct memstick_dev *card)
sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
msb->disk->fops = &msb_bdops;
msb->disk->private_data = msb;
- msb->disk->queue = msb->queue;
- msb->disk->flags |= GENHD_FL_EXT_DEVT;
capacity = msb->pages_in_block * msb->logical_block_count;
capacity *= (msb->page_size / 512);
@@ -2156,8 +2160,8 @@ static int msb_init_disk(struct memstick_dev *card)
dbg("Disk added");
return 0;
-out_put_disk:
- put_disk(msb->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
mutex_lock(&msb_disk_lock);
idr_remove(&msb_disk_idr, msb->disk_id);
@@ -2333,6 +2337,7 @@ static struct memstick_driver msb_driver = {
static int __init msb_init(void)
{
int rc = memstick_register_driver(&msb_driver);
+
if (rc)
pr_err("failed to register memstick driver (error %d)\n", rc);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index cf7fe0d58ee7..22778d0e24f5 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1205,21 +1205,17 @@ static int mspro_block_init_disk(struct memstick_dev *card)
if (disk_id < 0)
return disk_id;
- msb->disk = alloc_disk(1 << MSPRO_BLOCK_PART_SHIFT);
- if (!msb->disk) {
- rc = -ENOMEM;
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (rc)
goto out_release_id;
- }
- msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &mspro_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(msb->queue)) {
- rc = PTR_ERR(msb->queue);
- msb->queue = NULL;
- goto out_put_disk;
+ msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
+ if (IS_ERR(msb->disk)) {
+ rc = PTR_ERR(msb->disk);
+ goto out_free_tag_set;
}
-
- msb->queue->queuedata = card;
+ msb->queue = msb->disk->queue;
blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
@@ -1228,10 +1224,10 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->disk->major = major;
msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT;
+ msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT;
msb->disk->fops = &ms_block_bdops;
msb->usage_count = 1;
msb->disk->private_data = msb;
- msb->disk->queue = msb->queue;
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
@@ -1247,8 +1243,8 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->active = 1;
return 0;
-out_put_disk:
- put_disk(msb->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
mutex_lock(&mspro_block_disk_lock);
idr_remove(&mspro_block_disk_idr, disk_id);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 102dbb8080da..29271ad4728a 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -799,9 +799,9 @@ static int rtsx_usb_ms_drv_probe(struct platform_device *pdev)
return 0;
err_out:
- memstick_free_host(msh);
pm_runtime_disable(ms_dev(host));
pm_runtime_put_noidle(ms_dev(host));
+ memstick_free_host(msh);
return err;
}
@@ -828,9 +828,6 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
}
mutex_unlock(&host->host_mutex);
- memstick_remove_host(msh);
- memstick_free_host(msh);
-
/* Balance possible unbalanced usage count
* e.g. unconditional module removal
*/
@@ -838,10 +835,11 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
pm_runtime_put(ms_dev(host));
pm_runtime_disable(ms_dev(host));
- platform_set_drvdata(pdev, NULL);
-
+ memstick_remove_host(msh);
dev_dbg(ms_dev(host),
": Realtek USB Memstick controller has been removed\n");
+ memstick_free_host(msh);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 5c7f2b100191..99c4e1a80ae0 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1499,24 +1499,6 @@ config MFD_TPS65217
This driver can also be built as a module. If so, the module
will be called tps65217.
-config MFD_TPS68470
- bool "TI TPS68470 Power Management / LED chips"
- depends on ACPI && PCI && I2C=y
- depends on I2C_DESIGNWARE_PLATFORM=y
- select MFD_CORE
- select REGMAP_I2C
- help
- If you say yes here you get support for the TPS68470 series of
- Power Management / LED chips.
-
- These include voltage regulators, LEDs and other features
- that are often used in portable devices.
-
- This option is a bool as it provides an ACPI operation
- region, which must be available before any of the devices
- using this are probed. This option also configures the
- designware-i2c driver to be built-in, for the same reason.
-
config MFD_TI_LP873X
tristate "TI LP873X Power Management IC"
depends on I2C
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 4f6d2b8a5f76..8b322d89a0c5 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -105,7 +105,6 @@ obj-$(CONFIG_MFD_TPS65910) += tps65910.o
obj-$(CONFIG_MFD_TPS65912) += tps65912-core.o
obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
-obj-$(CONFIG_MFD_TPS68470) += tps68470.o
obj-$(CONFIG_MFD_TPS80031) += tps80031.o
obj-$(CONFIG_MENELAUS) += menelaus.o
diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c
index c73ec78f255b..99b9c113f964 100644
--- a/drivers/mfd/ioc3.c
+++ b/drivers/mfd/ioc3.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index db734f2831ff..83f3ffbdbb4c 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -5,6 +5,8 @@
#include <linux/interrupt.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6359/core.h>
+#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -13,7 +15,9 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-static struct irq_top_t mt6358_ints[] = {
+#define MTK_PMIC_REG_WIDTH 16
+
+static const struct irq_top_t mt6358_ints[] = {
MT6358_TOP_GEN(BUCK),
MT6358_TOP_GEN(LDO),
MT6358_TOP_GEN(PSC),
@@ -24,6 +28,31 @@ static struct irq_top_t mt6358_ints[] = {
MT6358_TOP_GEN(MISC),
};
+static const struct irq_top_t mt6359_ints[] = {
+ MT6359_TOP_GEN(BUCK),
+ MT6359_TOP_GEN(LDO),
+ MT6359_TOP_GEN(PSC),
+ MT6359_TOP_GEN(SCK),
+ MT6359_TOP_GEN(BM),
+ MT6359_TOP_GEN(HK),
+ MT6359_TOP_GEN(AUD),
+ MT6359_TOP_GEN(MISC),
+};
+
+static struct pmic_irq_data mt6358_irqd = {
+ .num_top = ARRAY_SIZE(mt6358_ints),
+ .num_pmic_irqs = MT6358_IRQ_NR,
+ .top_int_status_reg = MT6358_TOP_INT_STATUS0,
+ .pmic_ints = mt6358_ints,
+};
+
+static struct pmic_irq_data mt6359_irqd = {
+ .num_top = ARRAY_SIZE(mt6359_ints),
+ .num_pmic_irqs = MT6359_IRQ_NR,
+ .top_int_status_reg = MT6359_TOP_INT_STATUS0,
+ .pmic_ints = mt6359_ints,
+};
+
static void pmic_irq_enable(struct irq_data *data)
{
unsigned int hwirq = irqd_to_hwirq(data);
@@ -62,15 +91,15 @@ static void pmic_irq_sync_unlock(struct irq_data *data)
/* Find out the IRQ group */
top_gp = 0;
while ((top_gp + 1) < irqd->num_top &&
- i >= mt6358_ints[top_gp + 1].hwirq_base)
+ i >= irqd->pmic_ints[top_gp + 1].hwirq_base)
top_gp++;
/* Find the IRQ registers */
- gp_offset = i - mt6358_ints[top_gp].hwirq_base;
- int_regs = gp_offset / MT6358_REG_WIDTH;
- shift = gp_offset % MT6358_REG_WIDTH;
- en_reg = mt6358_ints[top_gp].en_reg +
- (mt6358_ints[top_gp].en_reg_shift * int_regs);
+ gp_offset = i - irqd->pmic_ints[top_gp].hwirq_base;
+ int_regs = gp_offset / MTK_PMIC_REG_WIDTH;
+ shift = gp_offset % MTK_PMIC_REG_WIDTH;
+ en_reg = irqd->pmic_ints[top_gp].en_reg +
+ (irqd->pmic_ints[top_gp].en_reg_shift * int_regs);
regmap_update_bits(chip->regmap, en_reg, BIT(shift),
irqd->enable_hwirq[i] << shift);
@@ -95,10 +124,11 @@ static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
unsigned int irq_status, sta_reg, status;
unsigned int hwirq, virq;
int i, j, ret;
+ struct pmic_irq_data *irqd = chip->irq_data;
- for (i = 0; i < mt6358_ints[top_gp].num_int_regs; i++) {
- sta_reg = mt6358_ints[top_gp].sta_reg +
- mt6358_ints[top_gp].sta_reg_shift * i;
+ for (i = 0; i < irqd->pmic_ints[top_gp].num_int_regs; i++) {
+ sta_reg = irqd->pmic_ints[top_gp].sta_reg +
+ irqd->pmic_ints[top_gp].sta_reg_shift * i;
ret = regmap_read(chip->regmap, sta_reg, &irq_status);
if (ret) {
@@ -114,8 +144,8 @@ static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
do {
j = __ffs(status);
- hwirq = mt6358_ints[top_gp].hwirq_base +
- MT6358_REG_WIDTH * i + j;
+ hwirq = irqd->pmic_ints[top_gp].hwirq_base +
+ MTK_PMIC_REG_WIDTH * i + j;
virq = irq_find_mapping(chip->irq_domain, hwirq);
if (virq)
@@ -131,12 +161,12 @@ static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
static irqreturn_t mt6358_irq_handler(int irq, void *data)
{
struct mt6397_chip *chip = data;
- struct pmic_irq_data *mt6358_irq_data = chip->irq_data;
+ struct pmic_irq_data *irqd = chip->irq_data;
unsigned int bit, i, top_irq_status = 0;
int ret;
ret = regmap_read(chip->regmap,
- mt6358_irq_data->top_int_status_reg,
+ irqd->top_int_status_reg,
&top_irq_status);
if (ret) {
dev_err(chip->dev,
@@ -144,8 +174,8 @@ static irqreturn_t mt6358_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- for (i = 0; i < mt6358_irq_data->num_top; i++) {
- bit = BIT(mt6358_ints[i].top_offset);
+ for (i = 0; i < irqd->num_top; i++) {
+ bit = BIT(irqd->pmic_ints[i].top_offset);
if (top_irq_status & bit) {
mt6358_irq_sp_handler(chip, i);
top_irq_status &= ~bit;
@@ -180,17 +210,22 @@ int mt6358_irq_init(struct mt6397_chip *chip)
int i, j, ret;
struct pmic_irq_data *irqd;
- irqd = devm_kzalloc(chip->dev, sizeof(*irqd), GFP_KERNEL);
- if (!irqd)
- return -ENOMEM;
+ switch (chip->chip_id) {
+ case MT6358_CHIP_ID:
+ chip->irq_data = &mt6358_irqd;
+ break;
- chip->irq_data = irqd;
+ case MT6359_CHIP_ID:
+ chip->irq_data = &mt6359_irqd;
+ break;
- mutex_init(&chip->irqlock);
- irqd->top_int_status_reg = MT6358_TOP_INT_STATUS0;
- irqd->num_pmic_irqs = MT6358_IRQ_NR;
- irqd->num_top = ARRAY_SIZE(mt6358_ints);
+ default:
+ dev_err(chip->dev, "unsupported chip: 0x%x\n", chip->chip_id);
+ return -ENODEV;
+ }
+ mutex_init(&chip->irqlock);
+ irqd = chip->irq_data;
irqd->enable_hwirq = devm_kcalloc(chip->dev,
irqd->num_pmic_irqs,
sizeof(*irqd->enable_hwirq),
@@ -207,10 +242,10 @@ int mt6358_irq_init(struct mt6397_chip *chip)
/* Disable all interrupts for initializing */
for (i = 0; i < irqd->num_top; i++) {
- for (j = 0; j < mt6358_ints[i].num_int_regs; j++)
+ for (j = 0; j < irqd->pmic_ints[i].num_int_regs; j++)
regmap_write(chip->regmap,
- mt6358_ints[i].en_reg +
- mt6358_ints[i].en_reg_shift * j, 0);
+ irqd->pmic_ints[i].en_reg +
+ irqd->pmic_ints[i].en_reg_shift * j, 0);
}
chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 7518d74c3b4c..9a615f75fbde 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -13,9 +13,11 @@
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6358/core.h>
+#include <linux/mfd/mt6359/core.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6323/registers.h>
#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/registers.h>
#define MT6323_RTC_BASE 0x8000
@@ -99,6 +101,17 @@ static const struct mfd_cell mt6358_devs[] = {
},
};
+static const struct mfd_cell mt6359_devs[] = {
+ { .name = "mt6359-regulator", },
+ {
+ .name = "mt6359-rtc",
+ .num_resources = ARRAY_SIZE(mt6358_rtc_resources),
+ .resources = mt6358_rtc_resources,
+ .of_compatible = "mediatek,mt6358-rtc",
+ },
+ { .name = "mt6359-sound", },
+};
+
static const struct mfd_cell mt6397_devs[] = {
{
.name = "mt6397-rtc",
@@ -149,6 +162,14 @@ static const struct chip_data mt6358_core = {
.irq_init = mt6358_irq_init,
};
+static const struct chip_data mt6359_core = {
+ .cid_addr = MT6359_SWCID,
+ .cid_shift = 8,
+ .cells = mt6359_devs,
+ .cell_size = ARRAY_SIZE(mt6359_devs),
+ .irq_init = mt6358_irq_init,
+};
+
static const struct chip_data mt6397_core = {
.cid_addr = MT6397_CID,
.cid_shift = 0,
@@ -219,6 +240,9 @@ static const struct of_device_id mt6397_of_match[] = {
.compatible = "mediatek,mt6358",
.data = &mt6358_core,
}, {
+ .compatible = "mediatek,mt6359",
+ .data = &mt6359_core,
+ }, {
.compatible = "mediatek,mt6397",
.data = &mt6397_core,
}, {
diff --git a/drivers/mfd/tps68470.c b/drivers/mfd/tps68470.c
deleted file mode 100644
index 4a4df4ffd18c..000000000000
--- a/drivers/mfd/tps68470.c
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * TPS68470 chip Parent driver
- *
- * Copyright (C) 2017 Intel Corporation
- *
- * Authors:
- * Rajmohan Mani <rajmohan.mani@intel.com>
- * Tianshu Qiu <tian.shu.qiu@intel.com>
- * Jian Xu Zheng <jian.xu.zheng@intel.com>
- * Yuning Pu <yuning.pu@intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tps68470.h>
-#include <linux/regmap.h>
-
-static const struct mfd_cell tps68470s[] = {
- { .name = "tps68470-gpio" },
- { .name = "tps68470_pmic_opregion" },
-};
-
-static const struct regmap_config tps68470_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = TPS68470_REG_MAX,
-};
-
-static int tps68470_chip_init(struct device *dev, struct regmap *regmap)
-{
- unsigned int version;
- int ret;
-
- /* Force software reset */
- ret = regmap_write(regmap, TPS68470_REG_RESET, TPS68470_REG_RESET_MASK);
- if (ret)
- return ret;
-
- ret = regmap_read(regmap, TPS68470_REG_REVID, &version);
- if (ret) {
- dev_err(dev, "Failed to read revision register: %d\n", ret);
- return ret;
- }
-
- dev_info(dev, "TPS68470 REVID: 0x%x\n", version);
-
- return 0;
-}
-
-static int tps68470_probe(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- struct regmap *regmap;
- int ret;
-
- regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "devm_regmap_init_i2c Error %ld\n",
- PTR_ERR(regmap));
- return PTR_ERR(regmap);
- }
-
- i2c_set_clientdata(client, regmap);
-
- ret = tps68470_chip_init(dev, regmap);
- if (ret < 0) {
- dev_err(dev, "TPS68470 Init Error %d\n", ret);
- return ret;
- }
-
- ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, tps68470s,
- ARRAY_SIZE(tps68470s), NULL, 0, NULL);
- if (ret < 0) {
- dev_err(dev, "devm_mfd_add_devices failed: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static const struct acpi_device_id tps68470_acpi_ids[] = {
- {"INT3472"},
- {},
-};
-
-static struct i2c_driver tps68470_driver = {
- .driver = {
- .name = "tps68470",
- .acpi_match_table = tps68470_acpi_ids,
- },
- .probe_new = tps68470_probe,
-};
-builtin_i2c_driver(tps68470_driver);
diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index a07674ed0596..4c5621b17a6f 100644
--- a/drivers/misc/cardreader/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -468,6 +468,7 @@ static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
pcr->ic_version = rtl8411_get_ic_version(pcr);
diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
index 39a6a7ecc32e..29f5414072bf 100644
--- a/drivers/misc/cardreader/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -255,6 +255,7 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 8200af22b529..4bcfbc9afbac 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -358,6 +358,7 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
@@ -483,6 +484,7 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
rts5227_init_params(pcr);
pcr->ops = &rts522a_pcr_ops;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index 781a86def59a..ffc128278613 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -718,6 +718,7 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 89e6f124ca5c..c748eaf1ec1f 100644
--- a/drivers/misc/cardreader/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -246,6 +246,7 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index b2676e7f5027..53f3a1f45c4a 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -566,6 +566,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
@@ -729,6 +730,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
void rts524a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
@@ -845,6 +847,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
void rts525a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 080a7d67a8e1..9b42b20a3e5a 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -628,6 +628,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 6c64dade8e1a..1fd4e0e50730 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -783,6 +783,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = 0x00;
pcr->sd30_drive_sel_3v3 = 0x00;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 273311184669..baf83594a01d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -85,12 +85,18 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
if (pcr->aspm_enabled == enable)
return;
- if (pcr->aspm_en & 0x02)
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
- FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
- else
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
- FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ if (pcr->aspm_mode == ASPM_MODE_CFG) {
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ enable ? pcr->aspm_en : 0);
+ } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+ if (pcr->aspm_en & 0x02)
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+ FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ else
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+ FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ }
if (!enable && (pcr->aspm_en & 0x02))
mdelay(10);
@@ -1394,7 +1400,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
return err;
}
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+ if (pcr->aspm_mode == ASPM_MODE_REG)
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
/* No CD interrupt if probing driver with card inserted.
* So we need to initialize pcr->card_exist here.
@@ -1410,6 +1417,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
{
int err;
+ u16 cfg_val;
+ u8 val;
spin_lock_init(&pcr->lock);
mutex_init(&pcr->pcr_mutex);
@@ -1477,6 +1486,21 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
if (!pcr->slots)
return -ENOMEM;
+ if (pcr->aspm_mode == ASPM_MODE_CFG) {
+ pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
+ if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
+ pcr->aspm_enabled = true;
+ else
+ pcr->aspm_enabled = false;
+
+ } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+ rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+ if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+ pcr->aspm_enabled = false;
+ else
+ pcr->aspm_enabled = true;
+ }
+
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
@@ -1506,7 +1530,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
- u8 val;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1572,11 +1595,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
- rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
- if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
- pcr->aspm_enabled = false;
- else
- pcr->aspm_enabled = true;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 64d33e368509..67c5b452dd35 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -101,8 +101,9 @@
printk(KERN_INFO a); \
} while (0)
#define v2printk(a...) do { \
- if (verbose > 1) \
+ if (verbose > 1) { \
printk(KERN_INFO a); \
+ } \
touch_nmi_watchdog(); \
} while (0)
#define eprintk(a...) do { \
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 0e8254d0cf0b..a164896dc6d4 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -463,7 +463,7 @@ void lkdtm_DOUBLE_FAULT(void)
#ifdef CONFIG_ARM64
static noinline void change_pac_parameters(void)
{
- if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
/* Reset the keys of current task */
ptrauth_thread_init_kernel(current);
ptrauth_thread_switch_kernel(current);
@@ -477,8 +477,8 @@ noinline void lkdtm_CORRUPT_PAC(void)
#define CORRUPT_PAC_ITERATE 10
int i;
- if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
- pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
if (!system_supports_address_auth()) {
pr_err("FAIL: CPU lacks pointer authentication feature\n");
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index a98f6b895af7..aab3ebfa9fc4 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -277,6 +277,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
}
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_request_autosuspend(dev->dev);
+
list_move_tail(&cb->list, &cl->rd_pending);
return 0;
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 723825524ea0..d7ef61e602ed 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -49,8 +49,8 @@ struct vm_area_struct *gru_find_vma(unsigned long vaddr)
{
struct vm_area_struct *vma;
- vma = find_vma(current->mm, vaddr);
- if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
+ vma = vma_lookup(current->mm, vaddr);
+ if (vma && vma->vm_ops == &gru_vm_ops)
return vma;
return NULL;
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 689eb9afeeed..88f4c215caa6 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block driver for media (i.e., flash cards)
*
@@ -1004,6 +1005,12 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
+ if (card->ext_csd.cmdq_en) {
+ ret = mmc_cmdq_disable(card);
+ if (ret)
+ break;
+ }
+ fallthrough;
case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
@@ -1014,6 +1021,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
/* Always switch back to main area after RPMB access */
if (rpmb_ioctl)
mmc_blk_part_switch(card, 0);
+ else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ mmc_cmdq_enable(card);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1159,7 +1168,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
int ret = 0;
- ret = mmc_flush_cache(card);
+ ret = mmc_flush_cache(card->host);
blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f194940c5974..b039dcff17f8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1582,7 +1582,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
{
struct mmc_command cmd = {};
unsigned int qty = 0, busy_timeout = 0;
- bool use_r1b_resp = false;
+ bool use_r1b_resp;
int err;
mmc_retune_hold(card->host);
@@ -1650,23 +1650,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
busy_timeout = mmc_erase_timeout(card, arg, qty);
- /*
- * If the host controller supports busy signalling and the timeout for
- * the erase operation does not exceed the max_busy_timeout, we should
- * use R1B response. Or we need to prevent the host from doing hw busy
- * detection, which is done by converting to a R1 response instead.
- * Note, some hosts requires R1B, which also means they are on their own
- * when it comes to deal with the busy timeout.
- */
- if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
- card->host->max_busy_timeout &&
- busy_timeout > card->host->max_busy_timeout) {
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- } else {
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = busy_timeout;
- use_r1b_resp = true;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
@@ -1687,7 +1671,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
goto out;
/* Let's poll to find out when the erase operation completes. */
- err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE);
+ err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE);
out:
mmc_retune_release(card->host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index db3c9c68875d..0c4de2030b3f 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -30,6 +30,7 @@ struct mmc_bus_ops {
int (*hw_reset)(struct mmc_host *);
int (*sw_reset)(struct mmc_host *);
bool (*cache_enabled)(struct mmc_host *);
+ int (*flush_cache)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -172,4 +173,12 @@ static inline bool mmc_cache_enabled(struct mmc_host *host)
return false;
}
+static inline int mmc_flush_cache(struct mmc_host *host)
+{
+ if (host->bus_ops->flush_cache)
+ return host->bus_ops->flush_cache(host);
+
+ return 0;
+}
+
#endif
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 9ec84c86c46a..3fdbc801e64a 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -26,6 +26,7 @@
static DECLARE_FAULT_ATTR(fail_default_attr);
static char *fail_request;
module_param(fail_request, charp, 0);
+MODULE_PARM_DESC(fail_request, "default fault injection attributes");
#endif /* CONFIG_FAIL_MMC_REQUEST */
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0b0577990ddc..eda4a1892c33 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -388,6 +388,9 @@ int mmc_of_parse(struct mmc_host *host)
host->caps2 |= MMC_CAP2_NO_SD;
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
+ if (device_property_read_bool(dev, "no-mmc-hs400"))
+ host->caps2 &= ~(MMC_CAP2_HS400_1_8V | MMC_CAP2_HS400_1_2V |
+ MMC_CAP2_HS400_ES);
/* Must be after "non-removable" check */
if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 8674c3e0c02c..838726b68ff3 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -28,6 +28,7 @@
#define DEFAULT_CMD6_TIMEOUT_MS 500
#define MIN_CACHE_EN_TIMEOUT_MS 1600
+#define CACHE_FLUSH_TIMEOUT_MS 30000 /* 30s */
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
@@ -1905,11 +1906,20 @@ static int mmc_can_sleep(struct mmc_card *card)
return card->ext_csd.rev >= 3;
}
+static int mmc_sleep_busy_cb(void *cb_data, bool *busy)
+{
+ struct mmc_host *host = cb_data;
+
+ *busy = host->ops->card_busy(host);
+ return 0;
+}
+
static int mmc_sleep(struct mmc_host *host)
{
struct mmc_command cmd = {};
struct mmc_card *card = host->card;
unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ bool use_r1b_resp;
int err;
/* Re-tuning can't be done once the card is deselected */
@@ -1922,35 +1932,27 @@ static int mmc_sleep(struct mmc_host *host)
cmd.opcode = MMC_SLEEP_AWAKE;
cmd.arg = card->rca << 16;
cmd.arg |= 1 << 15;
-
- /*
- * If the max_busy_timeout of the host is specified, validate it against
- * the sleep cmd timeout. A failure means we need to prevent the host
- * from doing hw busy detection, which is done by converting to a R1
- * response instead of a R1B. Note, some hosts requires R1B, which also
- * means they are on their own when it comes to deal with the busy
- * timeout.
- */
- if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout)) {
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- } else {
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = timeout_ms;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto out_release;
/*
- * If the host does not wait while the card signals busy, then we will
- * will have to wait the sleep/awake timeout. Note, we cannot use the
- * SEND_STATUS command to poll the status because that command (and most
- * others) is invalid while the card sleeps.
+ * If the host does not wait while the card signals busy, then we can
+ * try to poll, but only if the host supports HW polling, as the
+ * SEND_STATUS cmd is not allowed. If we can't poll, then we simply need
+ * to wait the sleep/awake timeout.
*/
- if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
+ goto out_release;
+
+ if (!host->ops->card_busy) {
mmc_delay(timeout_ms);
+ goto out_release;
+ }
+
+ err = __mmc_poll_for_busy(card, timeout_ms, &mmc_sleep_busy_cb, host);
out_release:
mmc_retune_release(host);
@@ -2035,6 +2037,25 @@ static bool _mmc_cache_enabled(struct mmc_host *host)
host->card->ext_csd.cache_ctrl & 1;
}
+/*
+ * Flush the internal cache of the eMMC to non-volatile storage.
+ */
+static int _mmc_flush_cache(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (_mmc_cache_enabled(host)) {
+ err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ CACHE_FLUSH_TIMEOUT_MS);
+ if (err)
+ pr_err("%s: cache flush error %d\n",
+ mmc_hostname(host), err);
+ }
+
+ return err;
+}
+
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
int err = 0;
@@ -2046,7 +2067,7 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
if (mmc_card_suspended(host->card))
goto out;
- err = mmc_flush_cache(host->card);
+ err = _mmc_flush_cache(host);
if (err)
goto out;
@@ -2187,7 +2208,7 @@ static int _mmc_hw_reset(struct mmc_host *host)
* In the case of recovery, we can't expect flushing the cache to work
* always, but we have a go and ignore errors.
*/
- mmc_flush_cache(host->card);
+ _mmc_flush_cache(host);
if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
mmc_can_reset(card)) {
@@ -2215,6 +2236,7 @@ static const struct mmc_bus_ops mmc_ops = {
.shutdown = mmc_shutdown,
.hw_reset = _mmc_hw_reset,
.cache_enabled = _mmc_cache_enabled,
+ .flush_cache = _mmc_flush_cache,
};
/*
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 5756781fef37..973756ed4016 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -20,7 +20,6 @@
#include "mmc_ops.h"
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
-#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
static const u8 tuning_blk_pattern_4bit[] = {
@@ -53,6 +52,12 @@ static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
+struct mmc_busy_data {
+ struct mmc_card *card;
+ bool retry_crc_err;
+ enum mmc_busy_cmd busy_cmd;
+};
+
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
{
int err;
@@ -246,9 +251,8 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
* NOTE: void *buf, caller for the buf is required to use DMA-capable
* buffer or on-stack buffer (with some overhead in callee).
*/
-static int
-mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
- u32 opcode, void *buf, unsigned len)
+int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
+ u32 args, void *buf, unsigned len)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
@@ -259,7 +263,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
mrq.data = &data;
cmd.opcode = opcode;
- cmd.arg = 0;
+ cmd.arg = args;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
@@ -305,7 +309,7 @@ static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
if (!cxd_tmp)
return -ENOMEM;
- ret = mmc_send_cxd_data(NULL, host, opcode, cxd_tmp, 16);
+ ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
if (ret)
goto err;
@@ -353,7 +357,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
if (!ext_csd)
return -ENOMEM;
- err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
+ err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
512);
if (err)
kfree(ext_csd);
@@ -424,10 +428,10 @@ int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
return mmc_switch_status_error(card->host, status);
}
-static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
- enum mmc_busy_cmd busy_cmd, bool *busy)
+static int mmc_busy_cb(void *cb_data, bool *busy)
{
- struct mmc_host *host = card->host;
+ struct mmc_busy_data *data = cb_data;
+ struct mmc_host *host = data->card->host;
u32 status = 0;
int err;
@@ -436,22 +440,23 @@ static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
return 0;
}
- err = mmc_send_status(card, &status);
- if (retry_crc_err && err == -EILSEQ) {
+ err = mmc_send_status(data->card, &status);
+ if (data->retry_crc_err && err == -EILSEQ) {
*busy = true;
return 0;
}
if (err)
return err;
- switch (busy_cmd) {
+ switch (data->busy_cmd) {
case MMC_BUSY_CMD6:
- err = mmc_switch_status_error(card->host, status);
+ err = mmc_switch_status_error(host, status);
break;
case MMC_BUSY_ERASE:
err = R1_STATUS(status) ? -EIO : 0;
break;
case MMC_BUSY_HPI:
+ case MMC_BUSY_EXTR_SINGLE:
break;
default:
err = -EINVAL;
@@ -464,9 +469,9 @@ static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
return 0;
}
-static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- bool send_status, bool retry_crc_err,
- enum mmc_busy_cmd busy_cmd)
+int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ int (*busy_cb)(void *cb_data, bool *busy),
+ void *cb_data)
{
struct mmc_host *host = card->host;
int err;
@@ -475,16 +480,6 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool expired = false;
bool busy = false;
- /*
- * In cases when not allowed to poll by using CMD13 or because we aren't
- * capable of polling by using ->card_busy(), then rely on waiting the
- * stated timeout to be sufficient.
- */
- if (!send_status && !host->ops->card_busy) {
- mmc_delay(timeout_ms);
- return 0;
- }
-
timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
/*
@@ -493,7 +488,7 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
*/
expired = time_after(jiffies, timeout);
- err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
+ err = (*busy_cb)(cb_data, &busy);
if (err)
return err;
@@ -516,9 +511,36 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
}
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- enum mmc_busy_cmd busy_cmd)
+ bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
+{
+ struct mmc_busy_data cb_data;
+
+ cb_data.card = card;
+ cb_data.retry_crc_err = retry_crc_err;
+ cb_data.busy_cmd = busy_cmd;
+
+ return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
+}
+
+bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ unsigned int timeout_ms)
{
- return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
+ /*
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B. Note, some hosts requires R1B, which also means
+ * they are on their own when it comes to deal with the busy timeout.
+ */
+ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+ (timeout_ms > host->max_busy_timeout)) {
+ cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
+ return false;
+ }
+
+ cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ cmd->busy_timeout = timeout_ms;
+ return true;
}
/**
@@ -543,7 +565,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {};
- bool use_r1b_resp = true;
+ bool use_r1b_resp;
unsigned char old_timing = host->ios.timing;
mmc_retune_hold(host);
@@ -554,29 +576,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
timeout_ms = card->ext_csd.generic_cmd6_time;
}
- /*
- * If the max_busy_timeout of the host is specified, make sure it's
- * enough to fit the used timeout_ms. In case it's not, let's instruct
- * the host to avoid HW busy detection, by converting to a R1 response
- * instead of a R1B. Note, some hosts requires R1B, which also means
- * they are on their own when it comes to deal with the busy timeout.
- */
- if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
-
cmd.opcode = MMC_SWITCH;
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
(index << 16) |
(value << 8) |
set;
- cmd.flags = MMC_CMD_AC;
- if (use_r1b_resp) {
- cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, retries);
if (err)
@@ -587,9 +592,18 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
mmc_host_is_spi(host))
goto out_tim;
+ /*
+ * If the host doesn't support HW polling via the ->card_busy() ops and
+ * when it's not allowed to poll by using CMD13, then we need to rely on
+ * waiting the stated timeout to be sufficient.
+ */
+ if (!send_status && !host->ops->card_busy) {
+ mmc_delay(timeout_ms);
+ goto out_tim;
+ }
+
/* Let's try to poll to find out when the command is completed. */
- err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
- MMC_BUSY_CMD6);
+ err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
if (err)
goto out;
@@ -686,7 +700,7 @@ out:
}
EXPORT_SYMBOL_GPL(mmc_send_tuning);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
{
struct mmc_command cmd = {};
@@ -709,7 +723,7 @@ int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
return mmc_wait_for_cmd(host, &cmd, 0);
}
-EXPORT_SYMBOL_GPL(mmc_abort_tuning);
+EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
@@ -813,28 +827,17 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
{
unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
struct mmc_host *host = card->host;
- bool use_r1b_resp = true;
+ bool use_r1b_resp = false;
struct mmc_command cmd = {};
int err;
cmd.opcode = card->ext_csd.hpi_cmd;
cmd.arg = card->rca << 16 | 1;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- /*
- * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
- * In case it doesn't, let's instruct the host to avoid HW busy
- * detection, by using a R1 response instead of R1B.
- */
- if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
- use_r1b_resp = false;
-
- if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = busy_timeout_ms;
- } else {
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- use_r1b_resp = false;
- }
+ if (cmd.opcode == MMC_STOP_TRANSMISSION)
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
+ busy_timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err) {
@@ -848,7 +851,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
return 0;
/* Let's poll to find out when the HPI request completes. */
- return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
+ return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
}
/**
@@ -961,26 +964,6 @@ void mmc_run_bkops(struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_run_bkops);
-/*
- * Flush the cache to the non-volatile storage.
- */
-int mmc_flush_cache(struct mmc_card *card)
-{
- int err = 0;
-
- if (mmc_cache_enabled(card->host)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1,
- MMC_CACHE_FLUSH_TIMEOUT_MS);
- if (err)
- pr_err("%s: cache flush error %d\n",
- mmc_hostname(card->host), err);
- }
-
- return err;
-}
-EXPORT_SYMBOL(mmc_flush_cache);
-
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
{
u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 7bc1cfb0654c..41ab4f573a31 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -14,10 +14,12 @@ enum mmc_busy_cmd {
MMC_BUSY_CMD6,
MMC_BUSY_ERASE,
MMC_BUSY_HPI,
+ MMC_BUSY_EXTR_SINGLE,
};
struct mmc_host;
struct mmc_card;
+struct mmc_command;
int mmc_select_card(struct mmc_card *card);
int mmc_deselect_cards(struct mmc_host *host);
@@ -25,6 +27,8 @@ int mmc_set_dsr(struct mmc_host *host);
int mmc_go_idle(struct mmc_host *host);
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_set_relative_addr(struct mmc_card *card);
+int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
+ u32 args, void *buf, unsigned len);
int mmc_send_csd(struct mmc_card *card, u32 *csd);
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
int mmc_send_status(struct mmc_card *card, u32 *status);
@@ -35,15 +39,19 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_can_ext_csd(struct mmc_card *card);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
+bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ unsigned int timeout_ms);
+int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ int (*busy_cb)(void *cb_data, bool *busy),
+ void *cb_data);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- enum mmc_busy_cmd busy_cmd);
+ bool retry_crc_err, enum mmc_busy_cmd busy_cmd);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
bool send_status, bool retry_crc_err, unsigned int retries);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms);
void mmc_run_bkops(struct mmc_card *card);
-int mmc_flush_cache(struct mmc_card *card);
int mmc_cmdq_enable(struct mmc_card *card);
int mmc_cmdq_disable(struct mmc_card *card);
int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 2c48d6504101..4646b7a03db6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -66,6 +66,14 @@ static const unsigned int sd_au_size[] = {
__res & __mask; \
})
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
+
+struct sd_busy_data {
+ struct mmc_card *card;
+ u8 *reg_buf;
+};
+
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
@@ -222,7 +230,9 @@ static int mmc_decode_scr(struct mmc_card *card)
else
card->erased_byte = 0x0;
- if (scr->sda_spec3)
+ if (scr->sda_spec4)
+ scr->cmds = UNSTUFF_BITS(resp, 32, 4);
+ else if (scr->sda_spec3)
scr->cmds = UNSTUFF_BITS(resp, 32, 2);
/* SD Spec says: any SD Card shall set at least bits 0 and 2 */
@@ -847,11 +857,13 @@ try_again:
return err;
/*
- * In case CCS and S18A in the response is set, start Signal Voltage
- * Switch procedure. SPI mode doesn't support CMD11.
+ * In case the S18A bit is set in the response, let's start the signal
+ * voltage switch procedure. SPI mode doesn't support CMD11.
+ * Note that, according to the spec, the S18A bit is not valid unless
+ * the CCS bit is set as well. We deliberately deviate from the spec in
+ * regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr &&
- ((*rocr & 0x41000000) == 0x41000000)) {
+ if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
@@ -994,6 +1006,380 @@ static bool mmc_sd_card_using_v18(struct mmc_card *card)
(SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
}
+static int sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
+ u8 reg_data)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ /*
+ * Arguments of CMD49:
+ * [31:31] MIO (0 = memory).
+ * [30:27] FNO (function number).
+ * [26:26] MW - mask write mode (0 = disable).
+ * [25:18] page number.
+ * [17:9] offset address.
+ * [8:0] length (0 = 1 byte).
+ */
+ cmd.arg = fno << 27 | page << 18 | offset << 9;
+
+ /* The first byte in the buffer is the data to be written. */
+ reg_buf[0] = reg_data;
+
+ data.flags = MMC_DATA_WRITE;
+ data.blksz = 512;
+ data.blocks = 1;
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, reg_buf, 512);
+
+ cmd.opcode = SD_WRITE_EXTR_SINGLE;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ mmc_set_data_timeout(&data, card);
+ mmc_wait_for_req(host, &mrq);
+
+ kfree(reg_buf);
+
+ /*
+ * Note that, the SD card is allowed to signal busy on DAT0 up to 1s
+ * after the CMD49. Although, let's leave this to be managed by the
+ * caller.
+ */
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+static int sd_read_ext_reg(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset, u16 len, u8 *reg_buf)
+{
+ u32 cmd_args;
+
+ /*
+ * Command arguments of CMD48:
+ * [31:31] MIO (0 = memory).
+ * [30:27] FNO (function number).
+ * [26:26] reserved (0).
+ * [25:18] page number.
+ * [17:9] offset address.
+ * [8:0] length (0 = 1 byte, 1ff = 512 bytes).
+ */
+ cmd_args = fno << 27 | page << 18 | offset << 9 | (len -1);
+
+ return mmc_send_adtc_data(card, card->host, SD_READ_EXTR_SINGLE,
+ cmd_args, reg_buf, 512);
+}
+
+static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset)
+{
+ int err;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /* Read the extension register for power management function. */
+ err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading PM func of ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* PM revision consists of 4 bits. */
+ card->ext_power.rev = reg_buf[0] & 0xf;
+
+ /* Power Off Notification support at bit 4. */
+ if (reg_buf[1] & BIT(4))
+ card->ext_power.feature_support |= SD_EXT_POWER_OFF_NOTIFY;
+
+ /* Power Sustenance support at bit 5. */
+ if (reg_buf[1] & BIT(5))
+ card->ext_power.feature_support |= SD_EXT_POWER_SUSTENANCE;
+
+ /* Power Down Mode support at bit 6. */
+ if (reg_buf[1] & BIT(6))
+ card->ext_power.feature_support |= SD_EXT_POWER_DOWN_MODE;
+
+ card->ext_power.fno = fno;
+ card->ext_power.page = page;
+ card->ext_power.offset = offset;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset)
+{
+ int err;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading PERF func of ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* PERF revision. */
+ card->ext_perf.rev = reg_buf[0];
+
+ /* FX_EVENT support at bit 0. */
+ if (reg_buf[1] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_FX_EVENT;
+
+ /* Card initiated self-maintenance support at bit 0. */
+ if (reg_buf[2] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_CARD_MAINT;
+
+ /* Host initiated self-maintenance support at bit 1. */
+ if (reg_buf[2] & BIT(1))
+ card->ext_perf.feature_support |= SD_EXT_PERF_HOST_MAINT;
+
+ /* Cache support at bit 0. */
+ if (reg_buf[4] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
+
+ /* Command queue support indicated via queue depth bits (0 to 4). */
+ if (reg_buf[6] & 0x1f)
+ card->ext_perf.feature_support |= SD_EXT_PERF_CMD_QUEUE;
+
+ card->ext_perf.fno = fno;
+ card->ext_perf.page = page;
+ card->ext_perf.offset = offset;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_parse_ext_reg(struct mmc_card *card, u8 *gen_info_buf,
+ u16 *next_ext_addr)
+{
+ u8 num_regs, fno, page;
+ u16 sfc, offset, ext = *next_ext_addr;
+ u32 reg_addr;
+
+ /*
+ * Parse only one register set per extension, as that is sufficient to
+ * support the standard functions. This means another 48 bytes in the
+ * buffer must be available.
+ */
+ if (ext + 48 > 512)
+ return -EFAULT;
+
+ /* Standard Function Code */
+ memcpy(&sfc, &gen_info_buf[ext], 2);
+
+ /* Address to the next extension. */
+ memcpy(next_ext_addr, &gen_info_buf[ext + 40], 2);
+
+ /* Number of registers for this extension. */
+ num_regs = gen_info_buf[ext + 42];
+
+ /* We support only one register per extension. */
+ if (num_regs != 1)
+ return 0;
+
+ /* Extension register address. */
+ memcpy(&reg_addr, &gen_info_buf[ext + 44], 4);
+
+ /* 9 bits (0 to 8) contains the offset address. */
+ offset = reg_addr & 0x1ff;
+
+ /* 8 bits (9 to 16) contains the page number. */
+ page = reg_addr >> 9 & 0xff ;
+
+ /* 4 bits (18 to 21) contains the function number. */
+ fno = reg_addr >> 18 & 0xf;
+
+ /* Standard Function Code for power management. */
+ if (sfc == 0x1)
+ return sd_parse_ext_reg_power(card, fno, page, offset);
+
+ /* Standard Function Code for performance enhancement. */
+ if (sfc == 0x2)
+ return sd_parse_ext_reg_perf(card, fno, page, offset);
+
+ return 0;
+}
+
+static int sd_read_ext_regs(struct mmc_card *card)
+{
+ int err, i;
+ u8 num_ext, *gen_info_buf;
+ u16 rev, len, next_ext_addr;
+
+ if (mmc_host_is_spi(card->host))
+ return 0;
+
+ if (!(card->scr.cmds & SD_SCR_CMD48_SUPPORT))
+ return 0;
+
+ gen_info_buf = kzalloc(512, GFP_KERNEL);
+ if (!gen_info_buf)
+ return -ENOMEM;
+
+ /*
+ * Read 512 bytes of general info, which is found at function number 0,
+ * at page 0 and with no offset.
+ */
+ err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
+ if (err) {
+ pr_warn("%s: error %d reading general info of SD ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* General info structure revision. */
+ memcpy(&rev, &gen_info_buf[0], 2);
+
+ /* Length of general info in bytes. */
+ memcpy(&len, &gen_info_buf[2], 2);
+
+ /* Number of extensions to be find. */
+ num_ext = gen_info_buf[4];
+
+ /* We support revision 0, but limit it to 512 bytes for simplicity. */
+ if (rev != 0 || len > 512) {
+ pr_warn("%s: non-supported SD ext reg layout\n",
+ mmc_hostname(card->host));
+ goto out;
+ }
+
+ /*
+ * Parse the extension registers. The first extension should start
+ * immediately after the general info header (16 bytes).
+ */
+ next_ext_addr = 16;
+ for (i = 0; i < num_ext; i++) {
+ err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr);
+ if (err) {
+ pr_warn("%s: error %d parsing SD ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+ }
+
+out:
+ kfree(gen_info_buf);
+ return err;
+}
+
+static bool sd_cache_enabled(struct mmc_host *host)
+{
+ return host->card->ext_perf.feature_enabled & SD_EXT_PERF_CACHE;
+}
+
+static int sd_flush_cache(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ u8 *reg_buf, fno, page;
+ u16 offset;
+ int err;
+
+ if (!sd_cache_enabled(host))
+ return 0;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set Flush Cache at bit 0 in the performance enhancement register at
+ * 261 bytes offset.
+ */
+ fno = card->ext_perf.fno;
+ page = card->ext_perf.page;
+ offset = card->ext_perf.offset + 261;
+
+ err = sd_write_ext_reg(card, fno, page, offset, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Cache Flush bit\n",
+ mmc_hostname(host), err);
+ goto out;
+ }
+
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (err)
+ goto out;
+
+ /*
+ * Read the Flush Cache bit. The card shall reset it, to confirm that
+ * it's has completed the flushing of the cache.
+ */
+ err = sd_read_ext_reg(card, fno, page, offset, 1, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading Cache Flush bit\n",
+ mmc_hostname(host), err);
+ goto out;
+ }
+
+ if (reg_buf[0] & BIT(0))
+ err = -ETIMEDOUT;
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_enable_cache(struct mmc_card *card)
+{
+ u8 *reg_buf;
+ int err;
+
+ card->ext_perf.feature_enabled &= ~SD_EXT_PERF_CACHE;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set Cache Enable at bit 0 in the performance enhancement register at
+ * 260 bytes offset.
+ */
+ err = sd_write_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
+ card->ext_perf.offset + 260, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Cache Enable bit\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (!err)
+ card->ext_perf.feature_enabled |= SD_EXT_PERF_CACHE;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -1142,6 +1528,20 @@ retry:
}
}
+ if (!oldcard) {
+ /* Read/parse the extension registers. */
+ err = sd_read_ext_regs(card);
+ if (err)
+ goto free_card;
+ }
+
+ /* Enable internal SD cache if supported. */
+ if (card->ext_perf.feature_support & SD_EXT_PERF_CACHE) {
+ err = sd_enable_cache(card);
+ if (err)
+ goto free_card;
+ }
+
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
@@ -1213,21 +1613,84 @@ static void mmc_sd_detect(struct mmc_host *host)
}
}
+static int sd_can_poweroff_notify(struct mmc_card *card)
+{
+ return card->ext_power.feature_support & SD_EXT_POWER_OFF_NOTIFY;
+}
+
+static int sd_busy_poweroff_notify_cb(void *cb_data, bool *busy)
+{
+ struct sd_busy_data *data = cb_data;
+ struct mmc_card *card = data->card;
+ int err;
+
+ /*
+ * Read the status register for the power management function. It's at
+ * one byte offset and is one byte long. The Power Off Notification
+ * Ready is bit 0.
+ */
+ err = sd_read_ext_reg(card, card->ext_power.fno, card->ext_power.page,
+ card->ext_power.offset + 1, 1, data->reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading status reg of PM func\n",
+ mmc_hostname(card->host), err);
+ return err;
+ }
+
+ *busy = !(data->reg_buf[0] & BIT(0));
+ return 0;
+}
+
+static int sd_poweroff_notify(struct mmc_card *card)
+{
+ struct sd_busy_data cb_data;
+ u8 *reg_buf;
+ int err;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set the Power Off Notification bit in the power management settings
+ * register at 2 bytes offset.
+ */
+ err = sd_write_ext_reg(card, card->ext_power.fno, card->ext_power.page,
+ card->ext_power.offset + 2, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Power Off Notify bit\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ cb_data.card = card;
+ cb_data.reg_buf = reg_buf;
+ err = __mmc_poll_for_busy(card, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
+ &sd_busy_poweroff_notify_cb, &cb_data);
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
static int _mmc_sd_suspend(struct mmc_host *host)
{
+ struct mmc_card *card = host->card;
int err = 0;
mmc_claim_host(host);
- if (mmc_card_suspended(host->card))
+ if (mmc_card_suspended(card))
goto out;
- if (!mmc_host_is_spi(host))
+ if (sd_can_poweroff_notify(card))
+ err = sd_poweroff_notify(card);
+ else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
if (!err) {
mmc_power_off(host);
- mmc_card_set_suspended(host->card);
+ mmc_card_set_suspended(card);
}
out:
@@ -1331,6 +1794,8 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.alive = mmc_sd_alive,
.shutdown = mmc_sd_suspend,
.hw_reset = mmc_sd_hw_reset,
+ .cache_enabled = sd_cache_enabled,
+ .flush_cache = sd_flush_cache,
};
/*
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index d61ff811218c..ef8d1dce5af1 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -17,6 +17,7 @@
#include "core.h"
#include "sd_ops.h"
+#include "mmc_ops.h"
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
@@ -309,43 +310,18 @@ int mmc_app_send_scr(struct mmc_card *card)
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
- struct mmc_request mrq = {};
- struct mmc_command cmd = {};
- struct mmc_data data = {};
- struct scatterlist sg;
+ u32 cmd_args;
/* NOTE: caller guarantees resp is heap-allocated */
mode = !!mode;
value &= 0xF;
+ cmd_args = mode << 31 | 0x00FFFFFF;
+ cmd_args &= ~(0xF << (group * 4));
+ cmd_args |= value << (group * 4);
- mrq.cmd = &cmd;
- mrq.data = &data;
-
- cmd.opcode = SD_SWITCH;
- cmd.arg = mode << 31 | 0x00FFFFFF;
- cmd.arg &= ~(0xF << (group * 4));
- cmd.arg |= value << (group * 4);
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-
- data.blksz = 64;
- data.blocks = 1;
- data.flags = MMC_DATA_READ;
- data.sg = &sg;
- data.sg_len = 1;
-
- sg_init_one(&sg, resp, 64);
-
- mmc_set_data_timeout(&data, card);
-
- mmc_wait_for_req(card->host, &mrq);
-
- if (cmd.error)
- return cmd.error;
- if (data.error)
- return data.error;
-
- return 0;
+ return mmc_send_adtc_data(card, card->host, SD_SWITCH, cmd_args, resp,
+ 64);
}
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 3eb94ac2712e..68edf7a615be 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -937,11 +937,9 @@ static void mmc_sdio_detect(struct mmc_host *host)
/* Make sure card is powered before detecting it */
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
- err = pm_runtime_get_sync(&host->card->dev);
- if (err < 0) {
- pm_runtime_put_noidle(&host->card->dev);
+ err = pm_runtime_resume_and_get(&host->card->dev);
+ if (err < 0)
goto out;
- }
}
mmc_claim_host(host);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index a4d4c757eea0..561184fa7eb9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -412,7 +412,7 @@ config MMC_SDHCI_MILBEAUT
config MMC_SDHCI_IPROC
tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
- depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
+ depends on ARCH_BCM2835 || ARCH_BCM_IPROC || ARCH_BRCMSTB || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
depends on OF || ACPI
default ARCH_BCM_IPROC
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 93b0432bb601..38559a956330 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -45,17 +45,23 @@ static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
return desc + cq_host->task_desc_len;
}
+static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
+}
+
static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
{
- return cq_host->trans_desc_dma_base +
- (cq_host->mmc->max_segs * tag *
- cq_host->trans_desc_len);
+ size_t offset = get_trans_desc_offset(cq_host, tag);
+
+ return cq_host->trans_desc_dma_base + offset;
}
static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
{
- return cq_host->trans_desc_base +
- (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+ size_t offset = get_trans_desc_offset(cq_host, tag);
+
+ return cq_host->trans_desc_base + offset;
}
static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
@@ -146,7 +152,7 @@ static void cqhci_dumpregs(struct cqhci_host *cq_host)
}
/*
- * The allocated descriptor table for task, link & transfer descritors
+ * The allocated descriptor table for task, link & transfer descriptors
* looks like:
* |----------|
* |task desc | |->|----------|
@@ -194,8 +200,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
- cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
- cq_host->mmc->cqe_qdepth;
+ cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 73731cd3ba23..9901208be797 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -17,7 +17,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
-#include <linux/clk.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b3c636edbb46..0db17bcc9c16 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -674,7 +674,7 @@ static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
cmdat |= JZ_MMC_CMDAT_WRITE;
if (host->use_dma) {
/*
- * The 4780's MMC controller has integrated DMA ability
+ * The JZ4780's MMC controller has integrated DMA ability
* in addition to being able to use the external DMA
* controller. It moves DMA control bits to a separate
* register. The DMA_SEL bit chooses the external
@@ -866,7 +866,7 @@ static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
writew(div, host->base + JZ_REG_MMC_CLKRT);
if (real_rate > 25000000) {
- if (host->version >= JZ_MMC_X1000) {
+ if (host->version >= JZ_MMC_JZ4780) {
writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
JZ_MMC_LPM_LOW_POWER_MODE_EN,
@@ -959,6 +959,7 @@ static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
{ .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
+ { .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
{ .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
{},
@@ -1013,7 +1014,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
- dev_err(&pdev->dev, "Failed to ioremap base memory\n");
goto err_free_host;
}
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 016a6106151a..3f28eb4d17fe 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -165,6 +165,7 @@ struct meson_host {
unsigned int bounce_buf_size;
void *bounce_buf;
+ void __iomem *bounce_iomem_buf;
dma_addr_t bounce_dma_addr;
struct sd_emmc_desc *descs;
dma_addr_t descs_dma_addr;
@@ -745,6 +746,47 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
writel(start, host->regs + SD_EMMC_START);
}
+/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
+ size_t buflen, bool to_buffer)
+{
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ struct scatterlist *sgl = data->sg;
+ unsigned int nents = data->sg_len;
+ struct sg_mapping_iter miter;
+ unsigned int offset = 0;
+
+ if (to_buffer)
+ sg_flags |= SG_MITER_FROM_SG;
+ else
+ sg_flags |= SG_MITER_TO_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ while ((offset < buflen) && sg_miter_next(&miter)) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ /* When dram_access_quirk, the bounce buffer is a iomem mapping */
+ if (host->dram_access_quirk) {
+ if (to_buffer)
+ memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
+ else
+ memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+ } else {
+ if (to_buffer)
+ memcpy(host->bounce_buf + offset, miter.addr, len);
+ else
+ memcpy(miter.addr, host->bounce_buf + offset, len);
+ }
+
+ offset += len;
+ }
+
+ sg_miter_stop(&miter);
+}
+
static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct meson_host *host = mmc_priv(mmc);
@@ -788,8 +830,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
if (data->flags & MMC_DATA_WRITE) {
cmd_cfg |= CMD_CFG_DATA_WR;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- sg_copy_to_buffer(data->sg, data->sg_len,
- host->bounce_buf, xfer_bytes);
+ meson_mmc_copy_buffer(host, data, xfer_bytes, true);
dma_wmb();
}
@@ -958,8 +999,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
if (meson_mmc_bounce_buf_read(data)) {
xfer_bytes = data->blksz * data->blocks;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- sg_copy_from_buffer(data->sg, data->sg_len,
- host->bounce_buf, xfer_bytes);
+ meson_mmc_copy_buffer(host, data, xfer_bytes, false);
}
next_cmd = meson_mmc_get_next_command(cmd);
@@ -1179,7 +1219,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
* instead of the DDR memory
*/
host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
- host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
+ host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
} else {
/* data bounce buffer */
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 9776a03a10f5..65c65bb5737f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -504,7 +504,7 @@ mmc_spi_command_send(struct mmc_spi_host *host,
/* else: R1 (most commands) */
}
- dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n",
+ dev_dbg(&host->spi->dev, " CMD%d, resp %s\n",
cmd->opcode, maptype(cmd));
/* send command, leaving chipselect active */
@@ -928,8 +928,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
while (length) {
t->len = min(length, blk_size);
- dev_dbg(&host->spi->dev,
- " mmc_spi: %s block, %d bytes\n",
+ dev_dbg(&host->spi->dev, " %s block, %d bytes\n",
(direction == DMA_TO_DEVICE) ? "write" : "read",
t->len);
@@ -974,7 +973,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
int tmp;
const unsigned statlen = sizeof(scratch->status);
- dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n");
+ dev_dbg(&spi->dev, " STOP_TRAN\n");
/* Tweak the per-block message we set up earlier by morphing
* it to hold single buffer with the token followed by some
@@ -1175,7 +1174,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
canpower = host->pdata && host->pdata->setpower;
- dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
+ dev_dbg(&host->spi->dev, "power %s (%d)%s\n",
mmc_powerstring(ios->power_mode),
ios->vdd,
canpower ? ", can switch" : "");
@@ -1248,8 +1247,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->spi->max_speed_hz = ios->clock;
status = spi_setup(host->spi);
- dev_dbg(&host->spi->dev,
- "mmc_spi: clock to %d Hz, %d\n",
+ dev_dbg(&host->spi->dev, " clock to %d Hz, %d\n",
host->spi->max_speed_hz, status);
}
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 898ed1b023df..4dfc246c5f95 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -724,10 +724,8 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA);
}
-static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
+static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data)
{
- struct mmc_data *data = mrq->data;
-
if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
data->host_cookie |= MSDC_PREPARE_FLAG;
data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
@@ -735,10 +733,8 @@ static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
}
}
-static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
+static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
{
- struct mmc_data *data = mrq->data;
-
if (data->host_cookie & MSDC_ASYNC_FLAG)
return;
@@ -1140,7 +1136,7 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
msdc_track_cmd_data(host, mrq->cmd, mrq->data);
if (mrq->data)
- msdc_unprepare_data(host, mrq);
+ msdc_unprepare_data(host, mrq->data);
if (host->error)
msdc_reset_hw(host);
mmc_request_done(mmc_from_priv(host), mrq);
@@ -1311,7 +1307,7 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
if (mrq->data)
- msdc_prepare_data(host, mrq);
+ msdc_prepare_data(host, mrq->data);
/* if SBC is required, we have HW option and SW option.
* if HW option is enabled, and SBC does not have "special" flags,
@@ -1332,7 +1328,7 @@ static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
if (!data)
return;
- msdc_prepare_data(host, mrq);
+ msdc_prepare_data(host, data);
data->host_cookie |= MSDC_ASYNC_FLAG;
}
@@ -1340,19 +1336,18 @@ static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
int err)
{
struct msdc_host *host = mmc_priv(mmc);
- struct mmc_data *data;
+ struct mmc_data *data = mrq->data;
- data = mrq->data;
if (!data)
return;
+
if (data->host_cookie) {
data->host_cookie &= ~MSDC_ASYNC_FLAG;
- msdc_unprepare_data(host, mrq);
+ msdc_unprepare_data(host, data);
}
}
-static void msdc_data_xfer_next(struct msdc_host *host,
- struct mmc_request *mrq, struct mmc_data *data)
+static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq)
{
if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
!mrq->sbc)
@@ -1411,7 +1406,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
(int)data->error, data->bytes_xfered);
}
- msdc_data_xfer_next(host, mrq, data);
+ msdc_data_xfer_next(host, mrq);
done = true;
}
return done;
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 9d480a05f655..3629550528b6 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -22,8 +22,8 @@
MODULE_LICENSE("GPL");
struct of_mmc_spi {
- int detect_irq;
struct mmc_spi_platform_data pdata;
+ int detect_irq;
};
static struct of_mmc_spi *to_of_mmc_spi(struct device *dev)
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 635bf31a6735..e49ca0f7fe9a 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -692,14 +692,19 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* Issue CMD19 twice for each tap */
for (i = 0; i < 2 * priv->tap_num; i++) {
+ int cmd_error;
+
/* Set sampling clock position */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
- if (mmc_send_tuning(mmc, opcode, NULL) == 0)
+ if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0)
set_bit(i, priv->taps);
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
set_bit(i, priv->smpcmp);
+
+ if (cmd_error)
+ mmc_send_abort_tuning(mmc, opcode);
}
ret = renesas_sdhi_select_tuning(host);
@@ -939,7 +944,7 @@ static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
- { .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
+ { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
{ .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 0ca6f6d30b75..8d5929a32d34 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1578,17 +1578,12 @@ static int s3cmci_probe(struct platform_device *pdev)
goto probe_iounmap;
}
- if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) {
+ if (request_irq(host->irq, s3cmci_irq, IRQF_NO_AUTOEN, DRIVER_NAME, host)) {
dev_err(&pdev->dev, "failed to request mci interrupt.\n");
ret = -ENOENT;
goto probe_iounmap;
}
- /* We get spurious interrupts even when we have set the IMSK
- * register to ignore everything, so use disable_irq() to make
- * ensure we don't lock the system with un-serviceable requests. */
-
- disable_irq(host->irq);
host->irq_state = false;
/* Depending on the dma state, get a DMA channel to use. */
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index c3fbf8c825c4..8fe65f172a61 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -822,6 +822,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
},
+ {
+ /*
+ * The Toshiba WT8-B's microSD slot always reports the card being
+ * write-protected.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA ENCORE 2 WT8-B"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
{} /* Terminating entry */
};
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index b991cf0e60c5..72c0bf0c1887 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -324,11 +324,6 @@ static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
return data->socdata == &esdhc_imx53_data;
}
-static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
-{
- return data->socdata == &usdhc_imx6q_data;
-}
-
static inline int esdhc_is_usdhc(struct pltfm_imx_data *data)
{
return !!(data->socdata->flags & ESDHC_FLAG_USDHC);
@@ -427,9 +422,6 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
| FIELD_PREP(SDHCI_RETUNING_MODE_MASK,
SDHCI_TUNING_MODE_3);
- if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
- val |= SDHCI_SUPPORT_HS400;
-
/*
* Do not advertise faster UHS modes if there are no
* pinctrl states for 100MHz/200MHz.
@@ -1591,7 +1583,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
- host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
+ host->mmc->caps2 |= MMC_CAP2_HS400;
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
@@ -1628,6 +1620,14 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ahb_clk;
+ /*
+ * Setup the wakeup capability here, let user to decide
+ * whether need to enable this wakeup through sysfs interface.
+ */
+ if ((host->mmc->pm_caps & MMC_PM_KEEP_POWER) &&
+ (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ))
+ device_set_wakeup_capable(&pdev->dev, true);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index ddeaf8e1f72f..cce390fe9cf3 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -286,11 +286,35 @@ static const struct sdhci_iproc_data bcm2711_data = {
.mmc_caps = MMC_CAP_3_3V_DDR,
};
+static const struct sdhci_pltfm_data sdhci_bcm7211a0_pltfm_data = {
+ .quirks = SDHCI_QUIRK_MISSING_CAPS |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_BROKEN_DMA |
+ SDHCI_QUIRK_BROKEN_ADMA,
+ .ops = &sdhci_iproc_ops,
+};
+
+#define BCM7211A0_BASE_CLK_MHZ 100
+static const struct sdhci_iproc_data bcm7211a0_data = {
+ .pdata = &sdhci_bcm7211a0_pltfm_data,
+ .caps = ((BCM7211A0_BASE_CLK_MHZ / 2) << SDHCI_TIMEOUT_CLK_SHIFT) |
+ (BCM7211A0_BASE_CLK_MHZ << SDHCI_CLOCK_BASE_SHIFT) |
+ ((0x2 << SDHCI_MAX_BLOCK_SHIFT)
+ & SDHCI_MAX_BLOCK_MASK) |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_VDD_180 |
+ SDHCI_CAN_DO_SUSPEND |
+ SDHCI_CAN_DO_HISPD,
+ .caps1 = SDHCI_DRIVER_TYPE_C |
+ SDHCI_DRIVER_TYPE_D,
+};
+
static const struct of_device_id sdhci_iproc_of_match[] = {
{ .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
{ .compatible = "brcm,bcm2711-emmc2", .data = &bcm2711_data },
{ .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_cygnus_data},
{ .compatible = "brcm,sdhci-iproc", .data = &iproc_data },
+ { .compatible = "brcm,bcm7211a0-sdhci", .data = &bcm7211a0_data },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
@@ -384,6 +408,11 @@ err:
return ret;
}
+static void sdhci_iproc_shutdown(struct platform_device *pdev)
+{
+ sdhci_pltfm_suspend(&pdev->dev);
+}
+
static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
@@ -394,6 +423,7 @@ static struct platform_driver sdhci_iproc_driver = {
},
.probe = sdhci_iproc_probe,
.remove = sdhci_pltfm_unregister,
+ .shutdown = sdhci_iproc_shutdown,
};
module_platform_driver(sdhci_iproc_driver);
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index d001c51074a0..6e4e132903a6 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -31,6 +31,11 @@
#define ASPEED_SDC_S0_PHASE_OUT_EN GENMASK(1, 0)
#define ASPEED_SDC_PHASE_MAX 31
+/* SDIO{10,20} */
+#define ASPEED_SDC_CAP1_1_8V (0 * 32 + 26)
+/* SDIO{14,24} */
+#define ASPEED_SDC_CAP2_SDR104 (1 * 32 + 1)
+
struct aspeed_sdc {
struct clk *clk;
struct resource *res;
@@ -72,6 +77,37 @@ struct aspeed_sdhci {
const struct aspeed_sdhci_phase_desc *phase_desc;
};
+/*
+ * The function sets the mirror register for updating
+ * capbilities of the current slot.
+ *
+ * slot | capability | caps_reg | mirror_reg
+ * -----|-------------|----------|------------
+ * 0 | CAP1_1_8V | SDIO140 | SDIO10
+ * 0 | CAP2_SDR104 | SDIO144 | SDIO14
+ * 1 | CAP1_1_8V | SDIO240 | SDIO20
+ * 1 | CAP2_SDR104 | SDIO244 | SDIO24
+ */
+static void aspeed_sdc_set_slot_capability(struct sdhci_host *host, struct aspeed_sdc *sdc,
+ int capability, bool enable, u8 slot)
+{
+ u32 mirror_reg_offset;
+ u32 cap_val;
+ u8 cap_reg;
+
+ if (slot > 1)
+ return;
+
+ cap_reg = capability / 32;
+ cap_val = sdhci_readl(host, 0x40 + (cap_reg * 4));
+ if (enable)
+ cap_val |= BIT(capability % 32);
+ else
+ cap_val &= ~BIT(capability % 32);
+ mirror_reg_offset = ((slot + 1) * 0x10) + (cap_reg * 4);
+ writel(cap_val, sdc->regs + mirror_reg_offset);
+}
+
static void aspeed_sdc_configure_8bit_mode(struct aspeed_sdc *sdc,
struct aspeed_sdhci *sdhci,
bool bus8)
@@ -150,7 +186,7 @@ static int aspeed_sdhci_phase_to_tap(struct device *dev, unsigned long rate_hz,
tap = div_u64(phase_period_ps, prop_delay_ps);
if (tap > ASPEED_SDHCI_NR_TAPS) {
- dev_warn(dev,
+ dev_dbg(dev,
"Requested out of range phase tap %d for %d degrees of phase compensation at %luHz, clamping to tap %d\n",
tap, phase_deg, rate_hz, ASPEED_SDHCI_NR_TAPS);
tap = ASPEED_SDHCI_NR_TAPS;
@@ -328,6 +364,7 @@ static inline int aspeed_sdhci_calculate_slot(struct aspeed_sdhci *dev,
static int aspeed_sdhci_probe(struct platform_device *pdev)
{
const struct aspeed_sdhci_pdata *aspeed_pdata;
+ struct device_node *np = pdev->dev.of_node;
struct sdhci_pltfm_host *pltfm_host;
struct aspeed_sdhci *dev;
struct sdhci_host *host;
@@ -372,6 +409,17 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ if (of_property_read_bool(np, "mmc-hs200-1_8v") ||
+ of_property_read_bool(np, "sd-uhs-sdr104")) {
+ aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP1_1_8V,
+ true, slot);
+ }
+
+ if (of_property_read_bool(np, "sd-uhs-sdr104")) {
+ aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP2_SDR104,
+ true, slot);
+ }
+
pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pltfm_host->clk))
return PTR_ERR(pltfm_host->clk);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 7893fd3599b6..8f4d1f003f65 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1173,10 +1173,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
* as part of pm_runtime_get_sync.
*/
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
dev_err(dev, "pm_runtime_get_sync failed\n");
- pm_runtime_put_noidle(dev);
goto err_rpm_disable;
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 061618aa247f..4fd99c1e82ba 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -94,7 +94,7 @@
#define PCIE_GLI_9763E_CFG2 0x8A4
#define GLI_9763E_CFG2_L1DLY GENMASK(28, 19)
-#define GLI_9763E_CFG2_L1DLY_MID 0x50
+#define GLI_9763E_CFG2_L1DLY_MID 0x54
#define PCIE_GLI_9763E_MMC_CTRL 0x960
#define GLI_9763E_HS400_SLOW BIT(3)
@@ -847,7 +847,7 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG2, &value);
value &= ~GLI_9763E_CFG2_L1DLY;
- /* set ASPM L1 entry delay to 20us */
+ /* set ASPM L1 entry delay to 21us */
value |= FIELD_PREP(GLI_9763E_CFG2_L1DLY, GLI_9763E_CFG2_L1DLY_MID);
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG2, value);
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 5dc36efff47f..11e375579cfb 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -393,6 +393,7 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
+ .write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock,
.get_max_clock = sdhci_sprd_get_max_clock,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bf238ade1602..6aaf5c3ce34c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2680,7 +2680,7 @@ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
sdhci_end_tuning(host);
- mmc_abort_tuning(host->mmc, opcode);
+ mmc_send_abort_tuning(host->mmc, opcode);
}
EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0770c036e2ff..c35ed4be75b7 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -201,8 +201,10 @@
#define SDHCI_CAPABILITIES 0x40
#define SDHCI_TIMEOUT_CLK_MASK GENMASK(5, 0)
+#define SDHCI_TIMEOUT_CLK_SHIFT 0
#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
#define SDHCI_CLOCK_BASE_MASK GENMASK(13, 8)
+#define SDHCI_CLOCK_BASE_SHIFT 8
#define SDHCI_CLOCK_V3_BASE_MASK GENMASK(15, 8)
#define SDHCI_MAX_BLOCK_MASK 0x00030000
#define SDHCI_MAX_BLOCK_SHIFT 16
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 1fad6e442688..f654afbe8e83 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -809,11 +809,9 @@ static int sdhci_am654_probe(struct platform_device *pdev)
/* Clocks are enabled using pm_runtime */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
goto pm_runtime_disable;
- }
base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base)) {
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 615f3d008af1..b9b79b1089a0 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1801,6 +1801,7 @@ static int usdhi6_probe(struct platform_device *pdev)
version = usdhi6_read(host, USDHI6_VERSION);
if ((version & 0xfff) != 0xa0d) {
+ ret = -EPERM;
dev_err(dev, "Version not recognized %x\n", version);
goto e_clk_off;
}
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index a1d098560099..c32df5530b94 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -857,6 +857,9 @@ static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
{
BUG_ON(intmask == 0);
+ if (!host->data)
+ return;
+
if (intmask & VIA_CRDR_SDSTS_DT)
host->data->error = -ETIMEDOUT;
else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 739cf63ef6e2..4950d10d3a19 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2279,7 +2279,7 @@ static int vub300_probe(struct usb_interface *interface,
if (retval < 0)
goto error5;
retval =
- usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_ROM_WAIT_STATES,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index fb8e12d590a1..6ce4bc57f919 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -30,11 +30,9 @@ static void blktrans_dev_release(struct kref *kref)
struct mtd_blktrans_dev *dev =
container_of(kref, struct mtd_blktrans_dev, ref);
- dev->disk->private_data = NULL;
- blk_cleanup_queue(dev->rq);
+ blk_cleanup_disk(dev->disk);
blk_mq_free_tag_set(dev->tag_set);
kfree(dev->tag_set);
- put_disk(dev->disk);
list_del(&dev->list);
kfree(dev);
}
@@ -354,7 +352,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (new->devnum > (MINORMASK >> tr->part_bits) ||
(tr->part_bits && new->devnum >= 27 * 26)) {
mutex_unlock(&blktrans_ref_mutex);
- goto error1;
+ return ret;
}
list_add_tail(&new->list, &tr->devs);
@@ -366,17 +364,29 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (!tr->writesect)
new->readonly = 1;
- /* Create gendisk */
ret = -ENOMEM;
- gd = alloc_disk(1 << tr->part_bits);
+ new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
+ if (!new->tag_set)
+ goto out_list_del;
- if (!gd)
- goto error2;
+ ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (ret)
+ goto out_kfree_tag_set;
+
+ /* Create gendisk */
+ gd = blk_mq_alloc_disk(new->tag_set, new);
+ if (IS_ERR(gd)) {
+ ret = PTR_ERR(gd);
+ goto out_free_tag_set;
+ }
new->disk = gd;
+ new->rq = new->disk->queue;
gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
+ gd->minors = 1 << tr->part_bits;
gd->fops = &mtd_block_ops;
if (tr->part_bits)
@@ -398,22 +408,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
spin_lock_init(&new->queue_lock);
INIT_LIST_HEAD(&new->rq_list);
- new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
- if (!new->tag_set)
- goto error3;
-
- new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
- if (IS_ERR(new->rq)) {
- ret = PTR_ERR(new->rq);
- new->rq = NULL;
- goto error4;
- }
-
if (tr->flush)
blk_queue_write_cache(new->rq, true, false);
- new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
@@ -437,13 +434,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
WARN_ON(ret);
}
return 0;
-error4:
+
+out_free_tag_set:
+ blk_mq_free_tag_set(new->tag_set);
+out_kfree_tag_set:
kfree(new->tag_set);
-error3:
- put_disk(new->disk);
-error2:
+out_list_del:
list_del(&new->list);
-error1:
return ret;
}
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
index a3ae8778f6a9..e13d42c0acb0 100644
--- a/drivers/mtd/mtdpstore.c
+++ b/drivers/mtd/mtdpstore.c
@@ -423,13 +423,13 @@ static void mtdpstore_notify_add(struct mtd_info *mtd)
longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
- cxt->dev.total_size = mtd->size;
/* just support dmesg right now */
cxt->dev.flags = PSTORE_FLAGS_DMESG;
- cxt->dev.read = mtdpstore_read;
- cxt->dev.write = mtdpstore_write;
- cxt->dev.erase = mtdpstore_erase;
- cxt->dev.panic_write = mtdpstore_panic_write;
+ cxt->dev.zone.read = mtdpstore_read;
+ cxt->dev.zone.write = mtdpstore_write;
+ cxt->dev.zone.erase = mtdpstore_erase;
+ cxt->dev.zone.panic_write = mtdpstore_panic_write;
+ cxt->dev.zone.total_size = mtd->size;
ret = register_pstore_device(&cxt->dev);
if (ret) {
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index 6edf78c16fc8..df40927e5678 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/iopoll.h>
@@ -240,6 +241,15 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
return 0;
}
+static int cs553x_ecc_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
static struct cs553x_nand_controller *controllers[4];
static int cs553x_attach_chip(struct nand_chip *chip)
@@ -251,7 +261,7 @@ static int cs553x_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = 3;
chip->ecc.hwctl = cs_enable_hwecc;
chip->ecc.calculate = cs_calculate_ecc;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = cs553x_ecc_correct;
chip->ecc.strength = 1;
return 0;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index bf695255b43a..a3e66155ae40 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -25,6 +25,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -432,6 +433,15 @@ static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
return 0;
}
+static int fsmc_correct_ecc1(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
/* Count the number of 0's in buff upto a max of max_bits */
static int count_written_bits(u8 *buff, int size, int max_bits)
{
@@ -917,7 +927,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
case NAND_ECC_ENGINE_TYPE_ON_HOST:
dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
- nand->ecc.correct = rawnand_sw_hamming_correct;
+ nand->ecc.correct = fsmc_correct_ecc1;
nand->ecc.hwctl = fsmc_enable_hwecc;
nand->ecc.bytes = 3;
nand->ecc.strength = 1;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 6b7269cfb7d8..d7dfc6fd85ca 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/mtd/lpc32xx_slc.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#define LPC32XX_MODNAME "lpc32xx-nand"
@@ -345,6 +346,18 @@ static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
}
/*
+ * Corrects the data
+ */
+static int lpc32xx_nand_ecc_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
+/*
* Read a single byte from NAND device
*/
static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
@@ -802,7 +815,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = lpc32xx_nand_ecc_correct;
chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
/*
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 338d6b1a189e..98d5a94c3a24 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -22,6 +22,7 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
@@ -100,6 +101,15 @@ static int ndfc_calculate_ecc(struct nand_chip *chip,
return 0;
}
+static int ndfc_correct_ecc(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
/*
* Speedups for buffer read/write/verify
*
@@ -145,7 +155,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->controller = &ndfc->ndfc_control;
chip->legacy.read_buf = ndfc_read_buf;
chip->legacy.write_buf = ndfc_write_buf;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = ndfc_correct_ecc;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 5612ee628425..2f1fe464e663 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/sharpsl.h>
@@ -96,6 +97,15 @@ static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
return readb(sharpsl->io + ECCCNTR) != 0;
}
+static int sharpsl_nand_correct_ecc(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
static int sharpsl_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
@@ -106,7 +116,7 @@ static int sharpsl_attach_chip(struct nand_chip *chip)
chip->ecc.strength = 1;
chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
chip->ecc.calculate = sharpsl_nand_calculate_ecc;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = sharpsl_nand_correct_ecc;
return 0;
}
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index de8e919d0ebe..6d93dd31969b 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -34,6 +34,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
@@ -292,11 +293,12 @@ static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
int r0, r1;
/* assume ecc.size = 512 and ecc.bytes = 6 */
- r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc);
+ r0 = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
if (r0 < 0)
return r0;
- r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3,
- calc_ecc + 3);
+ r1 = ecc_sw_hamming_correct(buf + 256, read_ecc + 3, calc_ecc + 3,
+ chip->ecc.size, false);
if (r1 < 0)
return r1;
return r0 + r1;
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index 1a9449e53bf9..b8894ac27073 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
@@ -193,8 +194,8 @@ static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
int stat;
for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
- stat = rawnand_sw_hamming_correct(chip, buf, read_ecc,
- calc_ecc);
+ stat = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
if (stat < 0)
return stat;
corrected += stat;
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 17f63f95f4a2..3131fae0c715 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -473,20 +473,26 @@ static int spinand_erase_op(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand, u8 *s)
+static int spinand_wait(struct spinand_device *spinand,
+ unsigned long initial_delay_us,
+ unsigned long poll_delay_us,
+ u8 *s)
{
- unsigned long timeo = jiffies + msecs_to_jiffies(400);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
+ spinand->scratchbuf);
u8 status;
int ret;
- do {
- ret = spinand_read_status(spinand, &status);
- if (ret)
- return ret;
+ ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
+ initial_delay_us,
+ poll_delay_us,
+ SPINAND_WAITRDY_TIMEOUT_MS);
+ if (ret)
+ return ret;
- if (!(status & STATUS_BUSY))
- goto out;
- } while (time_before(jiffies, timeo));
+ status = *spinand->scratchbuf;
+ if (!(status & STATUS_BUSY))
+ goto out;
/*
* Extra read, just in case the STATUS_READY bit has changed
@@ -526,7 +532,10 @@ static int spinand_reset_op(struct spinand_device *spinand)
if (ret)
return ret;
- return spinand_wait(spinand, NULL);
+ return spinand_wait(spinand,
+ SPINAND_RESET_INITIAL_DELAY_US,
+ SPINAND_RESET_POLL_DELAY_US,
+ NULL);
}
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
@@ -549,7 +558,10 @@ static int spinand_read_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US,
+ &status);
if (ret < 0)
return ret;
@@ -585,7 +597,10 @@ static int spinand_write_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
if (!ret && (status & STATUS_PROG_FAILED))
return -EIO;
@@ -768,7 +783,11 @@ static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_ERASE_INITIAL_DELAY_US,
+ SPINAND_ERASE_POLL_DELAY_US,
+ &status);
+
if (!ret && (status & STATUS_ERASE_FAILED))
ret = -EIO;
diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
index 0fd8d2a0db97..192190c42fc8 100644
--- a/drivers/mtd/parsers/ofpart_core.c
+++ b/drivers/mtd/parsers/ofpart_core.c
@@ -57,20 +57,22 @@ static int parse_fixed_partitions(struct mtd_info *master,
if (!mtd_node)
return 0;
- ofpart_node = of_get_child_by_name(mtd_node, "partitions");
- if (!ofpart_node && !master->parent) {
- /*
- * We might get here even when ofpart isn't used at all (e.g.,
- * when using another parser), so don't be louder than
- * KERN_DEBUG
- */
- pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
- master->name, mtd_node);
+ if (!master->parent) { /* Master */
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ /*
+ * We might get here even when ofpart isn't used at all (e.g.,
+ * when using another parser), so don't be louder than
+ * KERN_DEBUG
+ */
+ pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
+ master->name, mtd_node);
+ ofpart_node = mtd_node;
+ dedicated = false;
+ }
+ } else { /* Partition */
ofpart_node = mtd_node;
- dedicated = false;
}
- if (!ofpart_node)
- return 0;
of_id = of_match_node(parse_ofpart_match_table, ofpart_node);
if (dedicated && !of_id) {
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index e1a2ae21dfd3..e003b4b44ffa 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -394,53 +394,46 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->vol_id = vi->vol_id;
dev->leb_size = vi->usable_leb_size;
+ dev->tag_set.ops = &ubiblock_mq_ops;
+ dev->tag_set.queue_depth = 64;
+ dev->tag_set.numa_node = NUMA_NO_NODE;
+ dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
+ dev->tag_set.driver_data = dev;
+ dev->tag_set.nr_hw_queues = 1;
+
+ ret = blk_mq_alloc_tag_set(&dev->tag_set);
+ if (ret) {
+ dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
+ goto out_free_dev;;
+ }
+
+
/* Initialize the gendisk of this ubiblock device */
- gd = alloc_disk(1);
- if (!gd) {
- pr_err("UBI: block: alloc_disk failed\n");
- ret = -ENODEV;
- goto out_free_dev;
+ gd = blk_mq_alloc_disk(&dev->tag_set, dev);
+ if (IS_ERR(gd)) {
+ ret = PTR_ERR(gd);
+ goto out_free_tags;
}
gd->fops = &ubiblock_ops;
gd->major = ubiblock_major;
+ gd->minors = 1;
gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
if (gd->first_minor < 0) {
dev_err(disk_to_dev(gd),
"block: dynamic minor allocation failed");
ret = -ENODEV;
- goto out_put_disk;
+ goto out_cleanup_disk;
}
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
set_capacity(gd, disk_capacity);
dev->gd = gd;
- dev->tag_set.ops = &ubiblock_mq_ops;
- dev->tag_set.queue_depth = 64;
- dev->tag_set.numa_node = NUMA_NO_NODE;
- dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
- dev->tag_set.driver_data = dev;
- dev->tag_set.nr_hw_queues = 1;
-
- ret = blk_mq_alloc_tag_set(&dev->tag_set);
- if (ret) {
- dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
- goto out_remove_minor;
- }
-
- dev->rq = blk_mq_init_queue(&dev->tag_set);
- if (IS_ERR(dev->rq)) {
- dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
- ret = PTR_ERR(dev->rq);
- goto out_free_tags;
- }
+ dev->rq = gd->queue;
blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
- dev->rq->queuedata = dev;
- dev->gd->queue = dev->rq;
-
/*
* Create one workqueue per volume (per registered block device).
* Rembember workqueues are cheap, they're not threads.
@@ -448,7 +441,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
if (!dev->wq) {
ret = -ENOMEM;
- goto out_free_queue;
+ goto out_remove_minor;
}
list_add_tail(&dev->list, &ubiblock_devices);
@@ -460,14 +453,12 @@ int ubiblock_create(struct ubi_volume_info *vi)
mutex_unlock(&devices_mutex);
return 0;
-out_free_queue:
- blk_cleanup_queue(dev->rq);
-out_free_tags:
- blk_mq_free_tag_set(&dev->tag_set);
out_remove_minor:
idr_remove(&ubiblock_minor_idr, gd->first_minor);
-out_put_disk:
- put_disk(dev->gd);
+out_cleanup_disk:
+ blk_cleanup_disk(dev->gd);
+out_free_tags:
+ blk_mq_free_tag_set(&dev->tag_set);
out_free_dev:
kfree(dev);
out_unlock:
@@ -483,11 +474,10 @@ static void ubiblock_cleanup(struct ubiblock *dev)
/* Flush pending work */
destroy_workqueue(dev->wq);
/* Finally destroy the blk queue */
- blk_cleanup_queue(dev->rq);
- blk_mq_free_tag_set(&dev->tag_set);
dev_info(disk_to_dev(dev->gd), "released");
+ blk_cleanup_disk(dev->gd);
+ blk_mq_free_tag_set(&dev->tag_set);
idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
- put_disk(dev->gd);
}
int ubiblock_remove(struct ubi_volume_info *vi)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 74dc8e249faa..6977f8248df7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -262,17 +262,17 @@ config GENEVE
will be called geneve.
config BAREUDP
- tristate "Bare UDP Encapsulation"
- depends on INET
- depends on IPV6 || !IPV6
- select NET_UDP_TUNNEL
- select GRO_CELLS
- help
- This adds a bare UDP tunnel module for tunnelling different
- kinds of traffic like MPLS, IP, etc. inside a UDP tunnel.
-
- To compile this driver as a module, choose M here: the module
- will be called bareudp.
+ tristate "Bare UDP Encapsulation"
+ depends on INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select GRO_CELLS
+ help
+ This adds a bare UDP tunnel module for tunnelling different
+ kinds of traffic like MPLS, IP, etc. inside a UDP tunnel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bareudp.
config GTP
tristate "GPRS Tunneling Protocol datapath (GTP-U)"
@@ -431,6 +431,7 @@ config VSOCKMON
config MHI_NET
tristate "MHI network driver"
depends on MHI_BUS
+ select WWAN
help
This is the network driver for MHI bus. It can be used with
QCOM based WWAN modems (like SDX55). Say Y or M.
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index ba8e70a8e312..f0695d68c47e 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -327,6 +327,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
break;
}
+ dev->base_addr = ioaddr;
+
/* Reserve any actual interrupt. */
if (dev->irq) {
retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
@@ -334,8 +336,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
goto err_out;
}
- dev->base_addr = ioaddr;
-
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
@@ -609,12 +609,12 @@ static int cops_nodeid (struct net_device *dev, int nodeid)
if(lp->board == DAYNA)
{
- /* Empty any pending adapter responses. */
+ /* Empty any pending adapter responses. */
while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
{
outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupts. */
- if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev); /* Kick any packets waiting. */
+ if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
+ cops_rx(dev); /* Kick any packets waiting. */
schedule();
}
@@ -630,13 +630,13 @@ static int cops_nodeid (struct net_device *dev, int nodeid)
while(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
{
outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupt. */
- cops_rx(dev); /* Kick out packets waiting. */
+ cops_rx(dev); /* Kick out packets waiting. */
schedule();
}
/* Not sure what Tangent does if nodeid picked is used. */
if(nodeid == 0) /* Seed. */
- nodeid = jiffies&0xFF; /* Get a random try */
+ nodeid = jiffies&0xFF; /* Get a random try */
outb(2, ioaddr); /* Command length LSB */
outb(0, ioaddr); /* Command length MSB */
outb(LAP_INIT, ioaddr); /* Send LAP_INIT byte */
@@ -651,13 +651,13 @@ static int cops_nodeid (struct net_device *dev, int nodeid)
if(lp->board == DAYNA)
{
- if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
+ if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
+ cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
}
if(lp->board == TANGENT)
{
if(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
- cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
+ cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
}
schedule();
}
@@ -719,16 +719,16 @@ static irqreturn_t cops_interrupt(int irq, void *dev_id)
{
do {
outb(0, ioaddr + COPS_CLEAR_INT);
- status=inb(ioaddr+DAYNA_CARD_STATUS);
- if((status&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev);
- netif_wake_queue(dev);
+ status=inb(ioaddr+DAYNA_CARD_STATUS);
+ if((status&0x03)==DAYNA_RX_REQUEST)
+ cops_rx(dev);
+ netif_wake_queue(dev);
} while(++boguscount < 20);
}
else
{
do {
- status=inb(ioaddr+TANG_CARD_STATUS);
+ status=inb(ioaddr+TANG_CARD_STATUS);
if(status & TANG_RX_READY)
cops_rx(dev);
if(status & TANG_TX_READY)
@@ -855,7 +855,7 @@ static void cops_timeout(struct net_device *dev, unsigned int txqueue)
if(lp->board==TANGENT)
{
if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
- printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name);
+ printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name);
}
printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
cops_jumpstart(dev); /* Restart the card. */
@@ -897,7 +897,7 @@ static netdev_tx_t cops_send_packet(struct sk_buff *skb,
outb(LAP_WRITE, ioaddr);
if(lp->board == DAYNA) /* Check the transmit buffer again. */
- while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0);
+ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0);
outsb(ioaddr, skb->data, skb->len); /* Send out the data. */
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index c6f73aa3700c..69c270885ff0 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -584,11 +584,13 @@ loop:
printk("%02x ",ltdmacbuf[i]);
printk("\n");
}
+
handlecommand(dev);
- if(0xfa==inb_p(base+6)) {
- /* we timed out, so return */
- goto done;
- }
+
+ if (0xfa == inb_p(base + 6)) {
+ /* we timed out, so return */
+ goto done;
+ }
} else {
/* we don't seem to have a command */
if (!mboxinuse[0]) {
@@ -935,10 +937,10 @@ static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
static int __init ltpc_probe_dma(int base, int dma)
{
int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
- unsigned long timeout;
- unsigned long f;
+ unsigned long timeout;
+ unsigned long f;
- if (want & 1) {
+ if (want & 1) {
if (request_dma(1,"ltpc")) {
want &= ~1;
} else {
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index edfad93e7b68..a7ee0af1af90 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -133,6 +133,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb->dev = bareudp->dev;
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 3455f2cc13f2..22e5632089ac 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -104,6 +104,7 @@ static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
index = SLAVE_TLB_INFO(slave).head;
while (index != TLB_NULL_INDEX) {
u32 next_index = tx_hash_table[index].next;
+
tlb_init_table_entry(&tx_hash_table[index], save_load);
index = next_index;
}
@@ -228,7 +229,7 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
{
struct slave *tx_slave;
- /* We don't need to disable softirq here, becase
+ /* We don't need to disable softirq here, because
* tlb_choose_channel() is only called by bond_alb_xmit()
* which already has softirq disabled.
*/
@@ -608,7 +609,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
client_info->ip_src = arp->ip_src;
client_info->ip_dst = arp->ip_dst;
- /* arp->mac_dst is broadcast for arp reqeusts.
+ /* arp->mac_dst is broadcast for arp requests.
* will be updated with clients actual unicast mac address
* upon receiving an arp reply.
*/
@@ -628,6 +629,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
if (!client_info->assigned) {
u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
+
bond_info->rx_hashtbl_used_head = hash_index;
client_info->used_next = prev_tbl_head;
if (prev_tbl_head != RLB_NULL_INDEX) {
@@ -830,9 +832,10 @@ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
while (index != RLB_NULL_INDEX) {
struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
u32 next_index = entry->src_next;
+
if (entry->ip_src == arp->ip_src &&
!ether_addr_equal_64bits(arp->mac_src, entry->mac_src))
- rlb_delete_table_entry(bond, index);
+ rlb_delete_table_entry(bond, index);
index = next_index;
}
spin_unlock_bh(&bond->mode_lock);
@@ -1268,7 +1271,7 @@ unwind:
return res;
}
-/************************ exported alb funcions ************************/
+/************************ exported alb functions ************************/
int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
{
@@ -1547,7 +1550,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_for_each_slave_rcu(bond, slave, iter) {
/* If updating current_active, use all currently
- * user mac addreses (!strict_match). Otherwise, only
+ * user mac addresses (!strict_match). Otherwise, only
* use mac of the slave device.
* In RLB mode, we always use strict matches.
*/
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index f3f86ef68ae0..4f9b4a18c74c 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -88,9 +88,8 @@ void bond_create_debugfs(void)
{
bonding_debug_root = debugfs_create_dir("bonding", NULL);
- if (!bonding_debug_root) {
+ if (!bonding_debug_root)
pr_warn("Warning: Cannot create bonding directory in debugfs\n");
- }
}
void bond_destroy_debugfs(void)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 20bbda1b36e1..0ff7567bd04f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -620,7 +620,7 @@ static int bond_check_dev_link(struct bonding *bond,
*/
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
- strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
+ strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
mii = if_mii(&ifr);
if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
@@ -1013,9 +1013,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
- if (bond_uses_primary(bond)) {
+ if (bond_uses_primary(bond))
slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
- }
}
}
@@ -1526,6 +1525,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
slave->bond = bond;
slave->dev = slave_dev;
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
if (bond_kobj_init(slave))
return NULL;
@@ -1538,7 +1538,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
return NULL;
}
}
- INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
return slave;
}
@@ -1601,6 +1600,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
int link_reporting;
int res = 0, i;
+ if (slave_dev->flags & IFF_MASTER &&
+ !netif_is_bond_master(slave_dev)) {
+ NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved");
+ netdev_err(bond_dev,
+ "Error: Device with IFF_MASTER cannot be enslaved\n");
+ return -EPERM;
+ }
+
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
@@ -2272,6 +2279,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
+
bond_fill_ifbond(bond, info);
}
@@ -4202,16 +4210,16 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
slave_id = prandom_u32();
break;
case 1:
- slave_id = bond->rr_tx_counter;
+ slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
break;
default:
reciprocal_packets_per_slave =
bond->params.reciprocal_packets_per_slave;
- slave_id = reciprocal_divide(bond->rr_tx_counter,
+ slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
+ slave_id = reciprocal_divide(slave_id,
reciprocal_packets_per_slave);
break;
}
- bond->rr_tx_counter++;
return slave_id;
}
@@ -4849,8 +4857,12 @@ static const struct device_type bond_type = {
static void bond_destructor(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+
if (bond->wq)
destroy_workqueue(bond->wq);
+
+ if (bond->rr_tx_counter)
+ free_percpu(bond->rr_tx_counter);
}
void bond_setup(struct net_device *bond_dev)
@@ -5329,10 +5341,8 @@ static int bond_check_params(struct bond_params *params)
(struct reciprocal_value) { 0 };
}
- if (primary) {
- strncpy(params->primary, primary, IFNAMSIZ);
- params->primary[IFNAMSIZ - 1] = 0;
- }
+ if (primary)
+ strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
@@ -5351,6 +5361,15 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter) {
+ destroy_workqueue(bond->wq);
+ bond->wq = NULL;
+ return -ENOMEM;
+ }
+ }
+
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index f0f9138e967f..0561ece1ba45 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -598,7 +598,7 @@ static int bond_fill_info(struct sk_buff *skb,
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
- bond->params.resend_igmp))
+ bond->params.resend_igmp))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index c9d3604ae129..0cf25de6f46d 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -705,7 +705,7 @@ out:
int __bond_opt_set_notify(struct bonding *bond,
unsigned int option, struct bond_opt_value *val)
{
- int ret = -ENOENT;
+ int ret;
ASSERT_RTNL();
@@ -1206,8 +1206,7 @@ static int bond_option_primary_set(struct bonding *bond,
RCU_INIT_POINTER(bond->primary_slave, NULL);
bond_select_active_slave(bond);
}
- strncpy(bond->params.primary, primary, IFNAMSIZ);
- bond->params.primary[IFNAMSIZ - 1] = 0;
+ strscpy_pad(bond->params.primary, primary, IFNAMSIZ);
netdev_dbg(bond->dev, "Recording %s as primary, but it has not been enslaved yet\n",
primary);
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 56d34be5e797..0fb1da361bb1 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -112,6 +112,7 @@ static void bond_info_show_master(struct seq_file *seq)
/* ARP information */
if (bond->params.arp_interval > 0) {
int printed = 0;
+
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
bond->params.arp_interval);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 2d615a93685e..5f9e9a240226 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -385,6 +385,7 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+
return sprintf(buf, "%d\n", bond->params.num_peer_notif);
}
static DEVICE_ATTR(num_grat_arp, 0644,
@@ -496,6 +497,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
+
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.aggregator_id);
@@ -516,6 +518,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
+
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.ports);
@@ -536,6 +539,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
+
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.actor_key);
@@ -556,6 +560,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
+
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.partner_key);
@@ -576,6 +581,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
+
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
count = sprintf(buf, "%pM\n", ad_info.partner_system);
}
@@ -660,6 +666,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+
return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
static DEVICE_ATTR(tlb_dynamic_lb, 0644,
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index d17482395a4d..4ffbfd534f18 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -350,6 +350,7 @@ static int ldisc_open(struct tty_struct *tty)
rtnl_lock();
result = register_netdevice(dev);
if (result) {
+ tty_kref_put(tty);
rtnl_unlock();
free_netdev(dev);
return -ENODEV;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 106f089eb2a8..91230894692d 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -315,7 +315,7 @@ exit:
case 0:
++cfv->stats.rx_napi_complete;
- /* Really out of patckets? (stolen from virtio_net)*/
+ /* Really out of packets? (stolen from virtio_net)*/
napi_complete(napi);
if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
napi_schedule_prep(napi)) {
@@ -463,7 +463,7 @@ static int cfv_netdev_close(struct net_device *netdev)
vringh_notify_disable_kern(cfv->vr_rx);
napi_disable(&cfv->napi);
- /* Release any TX buffers on both used and avilable rings */
+ /* Release any TX buffers on both used and available rings */
cfv_release_used_buf(cfv->vq_tx);
spin_lock_irqsave(&cfv->tx_lock, flags);
while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
@@ -497,7 +497,7 @@ static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
if (unlikely(!buf_info))
goto err;
- /* Make the IP header aligned in tbe buffer */
+ /* Make the IP header aligned in the buffer */
hdr_ofs = cfv->tx_hr + info->hdr_len;
pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 9ad9b39f480e..04d0bb3ffe89 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -169,7 +169,7 @@ static const struct can_bittiming_const at91_bittiming_const = {
};
#define AT91_IS(_model) \
-static inline int at91_is_sam##_model(const struct at91_priv *priv) \
+static inline int __maybe_unused at91_is_sam##_model(const struct at91_priv *priv) \
{ \
return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
}
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index e6a94c948531..6fa3b2b9e4b9 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -4,5 +4,10 @@
#
obj-$(CONFIG_CAN_C_CAN) += c_can.o
+
+c_can-objs :=
+c_can-objs += c_can_ethtool.o
+c_can-objs += c_can_main.o
+
obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 06045f610f0e..4247ff80a29c 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -205,7 +205,6 @@ struct c_can_priv {
struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
void (*raminit)(const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
- u32 rxmasked;
u32 dlc[];
};
@@ -219,4 +218,6 @@ int c_can_power_up(struct net_device *dev);
int c_can_power_down(struct net_device *dev);
#endif
+void c_can_set_ethtool_ops(struct net_device *dev);
+
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
new file mode 100644
index 000000000000..cd5f07fca2a5
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021, Dario Binacchi <dariobin@libero.it>
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+static void c_can_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct c_can_priv *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->device);
+
+ strscpy(info->driver, "c_can", sizeof(info->driver));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+}
+
+static void c_can_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct c_can_priv *priv = netdev_priv(netdev);
+
+ ring->rx_max_pending = priv->msg_obj_num;
+ ring->tx_max_pending = priv->msg_obj_num;
+ ring->rx_pending = priv->msg_obj_rx_num;
+ ring->tx_pending = priv->msg_obj_tx_num;
+}
+
+static const struct ethtool_ops c_can_ethtool_ops = {
+ .get_drvinfo = c_can_get_drvinfo,
+ .get_ringparam = c_can_get_ringparam,
+};
+
+void c_can_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &c_can_ethtool_ops;
+}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can_main.c
index 313793f6922d..7588f70ca0fe 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -599,7 +599,6 @@ static int c_can_chip_config(struct net_device *dev)
/* Clear all internal status */
atomic_set(&priv->tx_active, 0);
- priv->rxmasked = 0;
priv->tx_dir = 0;
/* set bittiming params */
@@ -1335,6 +1334,7 @@ int register_c_can_dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
+ c_can_set_ethtool_ops(dev);
err = register_candev(dev);
if (!err)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 3cf6de21d19c..bba2a449ac70 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -83,44 +83,25 @@ enum m_can_reg {
#define MRAM_CFG_LEN 8
/* Core Release Register (CREL) */
-#define CREL_REL_SHIFT 28
-#define CREL_REL_MASK (0xF << CREL_REL_SHIFT)
-#define CREL_STEP_SHIFT 24
-#define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT)
-#define CREL_SUBSTEP_SHIFT 20
-#define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT)
+#define CREL_REL_MASK GENMASK(31, 28)
+#define CREL_STEP_MASK GENMASK(27, 24)
+#define CREL_SUBSTEP_MASK GENMASK(23, 20)
/* Data Bit Timing & Prescaler Register (DBTP) */
#define DBTP_TDC BIT(23)
-#define DBTP_DBRP_SHIFT 16
-#define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT)
-#define DBTP_DTSEG1_SHIFT 8
-#define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT)
-#define DBTP_DTSEG2_SHIFT 4
-#define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT)
-#define DBTP_DSJW_SHIFT 0
-#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
+#define DBTP_DBRP_MASK GENMASK(20, 16)
+#define DBTP_DTSEG1_MASK GENMASK(12, 8)
+#define DBTP_DTSEG2_MASK GENMASK(7, 4)
+#define DBTP_DSJW_MASK GENMASK(3, 0)
/* Transmitter Delay Compensation Register (TDCR) */
-#define TDCR_TDCO_SHIFT 8
-#define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT)
-#define TDCR_TDCF_SHIFT 0
-#define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT)
+#define TDCR_TDCO_MASK GENMASK(14, 8)
+#define TDCR_TDCF_MASK GENMASK(6, 0)
/* Test Register (TEST) */
#define TEST_LBCK BIT(4)
-/* CC Control Register(CCCR) */
-#define CCCR_CMR_MASK 0x3
-#define CCCR_CMR_SHIFT 10
-#define CCCR_CMR_CANFD 0x1
-#define CCCR_CMR_CANFD_BRS 0x2
-#define CCCR_CMR_CAN 0x3
-#define CCCR_CME_MASK 0x3
-#define CCCR_CME_SHIFT 8
-#define CCCR_CME_CAN 0
-#define CCCR_CME_CANFD 0x1
-#define CCCR_CME_CANFD_BRS 0x2
+/* CC Control Register (CCCR) */
#define CCCR_TXP BIT(14)
#define CCCR_TEST BIT(7)
#define CCCR_DAR BIT(6)
@@ -130,24 +111,31 @@ enum m_can_reg {
#define CCCR_ASM BIT(2)
#define CCCR_CCE BIT(1)
#define CCCR_INIT BIT(0)
-#define CCCR_CANFD 0x10
+/* for version 3.0.x */
+#define CCCR_CMR_MASK GENMASK(11, 10)
+#define CCCR_CMR_CANFD 0x1
+#define CCCR_CMR_CANFD_BRS 0x2
+#define CCCR_CMR_CAN 0x3
+#define CCCR_CME_MASK GENMASK(9, 8)
+#define CCCR_CME_CAN 0
+#define CCCR_CME_CANFD 0x1
+#define CCCR_CME_CANFD_BRS 0x2
/* for version >=3.1.x */
#define CCCR_EFBI BIT(13)
#define CCCR_PXHD BIT(12)
#define CCCR_BRSE BIT(9)
#define CCCR_FDOE BIT(8)
-/* only for version >=3.2.x */
+/* for version >=3.2.x */
#define CCCR_NISO BIT(15)
+/* for version >=3.3.x */
+#define CCCR_WMM BIT(11)
+#define CCCR_UTSU BIT(10)
/* Nominal Bit Timing & Prescaler Register (NBTP) */
-#define NBTP_NSJW_SHIFT 25
-#define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT)
-#define NBTP_NBRP_SHIFT 16
-#define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT)
-#define NBTP_NTSEG1_SHIFT 8
-#define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT)
-#define NBTP_NTSEG2_SHIFT 0
-#define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT)
+#define NBTP_NSJW_MASK GENMASK(31, 25)
+#define NBTP_NBRP_MASK GENMASK(24, 16)
+#define NBTP_NTSEG1_MASK GENMASK(15, 8)
+#define NBTP_NTSEG2_MASK GENMASK(6, 0)
/* Timestamp Counter Configuration Register (TSCC) */
#define TSCC_TCP_MASK GENMASK(19, 16)
@@ -159,20 +147,18 @@ enum m_can_reg {
/* Timestamp Counter Value Register (TSCV) */
#define TSCV_TSC_MASK GENMASK(15, 0)
-/* Error Counter Register(ECR) */
+/* Error Counter Register (ECR) */
#define ECR_RP BIT(15)
-#define ECR_REC_SHIFT 8
-#define ECR_REC_MASK (0x7f << ECR_REC_SHIFT)
-#define ECR_TEC_SHIFT 0
-#define ECR_TEC_MASK 0xff
+#define ECR_REC_MASK GENMASK(14, 8)
+#define ECR_TEC_MASK GENMASK(7, 0)
-/* Protocol Status Register(PSR) */
+/* Protocol Status Register (PSR) */
#define PSR_BO BIT(7)
#define PSR_EW BIT(6)
#define PSR_EP BIT(5)
-#define PSR_LEC_MASK 0x7
+#define PSR_LEC_MASK GENMASK(2, 0)
-/* Interrupt Register(IR) */
+/* Interrupt Register (IR) */
#define IR_ALL_INT 0xffffffff
/* Renamed bits for versions > 3.1.x */
@@ -221,6 +207,7 @@ enum m_can_reg {
IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
IR_RF1L | IR_RF0L)
#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
+
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
@@ -237,58 +224,47 @@ enum m_can_reg {
#define ILE_EINT0 BIT(0)
/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
-#define RXFC_FWM_SHIFT 24
-#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
-#define RXFC_FS_SHIFT 16
-#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
+#define RXFC_FWM_MASK GENMASK(30, 24)
+#define RXFC_FS_MASK GENMASK(22, 16)
/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
#define RXFS_RFL BIT(25)
#define RXFS_FF BIT(24)
-#define RXFS_FPI_SHIFT 16
-#define RXFS_FPI_MASK 0x3f0000
-#define RXFS_FGI_SHIFT 8
-#define RXFS_FGI_MASK 0x3f00
-#define RXFS_FFL_MASK 0x7f
+#define RXFS_FPI_MASK GENMASK(21, 16)
+#define RXFS_FGI_MASK GENMASK(13, 8)
+#define RXFS_FFL_MASK GENMASK(6, 0)
/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
-#define M_CAN_RXESC_8BYTES 0x0
-#define M_CAN_RXESC_64BYTES 0x777
+#define RXESC_RBDS_MASK GENMASK(10, 8)
+#define RXESC_F1DS_MASK GENMASK(6, 4)
+#define RXESC_F0DS_MASK GENMASK(2, 0)
+#define RXESC_64B 0x7
-/* Tx Buffer Configuration(TXBC) */
-#define TXBC_NDTB_SHIFT 16
-#define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT)
-#define TXBC_TFQS_SHIFT 24
-#define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT)
+/* Tx Buffer Configuration (TXBC) */
+#define TXBC_TFQS_MASK GENMASK(29, 24)
+#define TXBC_NDTB_MASK GENMASK(21, 16)
/* Tx FIFO/Queue Status (TXFQS) */
#define TXFQS_TFQF BIT(21)
-#define TXFQS_TFQPI_SHIFT 16
-#define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT)
-#define TXFQS_TFGI_SHIFT 8
-#define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT)
-#define TXFQS_TFFL_SHIFT 0
-#define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT)
+#define TXFQS_TFQPI_MASK GENMASK(20, 16)
+#define TXFQS_TFGI_MASK GENMASK(12, 8)
+#define TXFQS_TFFL_MASK GENMASK(5, 0)
-/* Tx Buffer Element Size Configuration(TXESC) */
-#define TXESC_TBDS_8BYTES 0x0
-#define TXESC_TBDS_64BYTES 0x7
+/* Tx Buffer Element Size Configuration (TXESC) */
+#define TXESC_TBDS_MASK GENMASK(2, 0)
+#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
-#define TXEFC_EFS_SHIFT 16
-#define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT)
+#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
#define TXEFS_TEFL BIT(25)
#define TXEFS_EFF BIT(24)
-#define TXEFS_EFGI_SHIFT 8
-#define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT)
-#define TXEFS_EFFL_SHIFT 0
-#define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT)
+#define TXEFS_EFGI_MASK GENMASK(12, 8)
+#define TXEFS_EFFL_MASK GENMASK(5, 0)
/* Tx Event FIFO Acknowledge (TXEFA) */
-#define TXEFA_EFAI_SHIFT 0
-#define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT)
+#define TXEFA_EFAI_MASK GENMASK(4, 0)
/* Message RAM Configuration (in bytes) */
#define SIDF_ELEMENT_SIZE 4
@@ -324,13 +300,12 @@ enum m_can_reg {
#define TX_BUF_EFC BIT(23)
#define TX_BUF_FDF BIT(21)
#define TX_BUF_BRS BIT(20)
-#define TX_BUF_MM_SHIFT 24
-#define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT)
+#define TX_BUF_MM_MASK GENMASK(31, 24)
+#define TX_BUF_DLC_MASK GENMASK(19, 16)
/* Tx event FIFO Element */
/* E1 */
-#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
-#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
+#define TX_EVENT_MM_MASK GENMASK(31, 24)
#define TX_EVENT_TXTS_MASK GENMASK(15, 0)
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
@@ -449,8 +424,8 @@ static void m_can_clean(struct net_device *net)
net->stats.tx_errors++;
if (cdev->version > 30)
- putidx = ((m_can_read(cdev, M_CAN_TXFQS) &
- TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT);
+ putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
can_free_echo_skb(cdev->net, putidx, NULL);
cdev->tx_skb = NULL;
@@ -490,7 +465,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
int i;
/* calculate the fifo get index for where to read data */
- fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT;
+ fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC);
if (dlc & RX_BUF_FDF)
skb = alloc_canfd_skb(dev, &cf);
@@ -663,8 +638,8 @@ static int __m_can_get_berr_counter(const struct net_device *dev,
unsigned int ecr;
ecr = m_can_read(cdev, M_CAN_ECR);
- bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
- bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT;
+ bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr);
+ bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr);
return 0;
}
@@ -1004,24 +979,23 @@ static void m_can_echo_tx_event(struct net_device *dev)
m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
/* Get Tx Event fifo element count */
- txe_count = (m_can_txefs & TXEFS_EFFL_MASK) >> TXEFS_EFFL_SHIFT;
+ txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs);
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
u32 txe, timestamp = 0;
/* retrieve get index */
- fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >>
- TXEFS_EFGI_SHIFT;
+ fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS));
/* get message marker, timestamp */
txe = m_can_txe_fifo_read(cdev, fgi, 4);
- msg_mark = (txe & TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
+ msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
/* ack txe element */
- m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
- (fgi << TXEFA_EFAI_SHIFT)));
+ m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
+ fgi));
/* update stats */
m_can_tx_update_stats(cdev, msg_mark, timestamp);
@@ -1147,8 +1121,10 @@ static int m_can_set_bittiming(struct net_device *dev)
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
- reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) |
- (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT);
+ reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
+ FIELD_PREP(NBTP_NSJW_MASK, sjw) |
+ FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
+ FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
m_can_write(cdev, M_CAN_NBTP, reg_btp);
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
@@ -1185,13 +1161,13 @@ static int m_can_set_bittiming(struct net_device *dev)
reg_btp |= DBTP_TDC;
m_can_write(cdev, M_CAN_TDCR,
- tdco << TDCR_TDCO_SHIFT);
+ FIELD_PREP(TDCR_TDCO_MASK, tdco));
}
- reg_btp |= (brp << DBTP_DBRP_SHIFT) |
- (sjw << DBTP_DSJW_SHIFT) |
- (tseg1 << DBTP_DTSEG1_SHIFT) |
- (tseg2 << DBTP_DTSEG2_SHIFT);
+ reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
+ FIELD_PREP(NBTP_NSJW_MASK, sjw) |
+ FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
+ FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
m_can_write(cdev, M_CAN_DBTP, reg_btp);
}
@@ -1217,44 +1193,50 @@ static void m_can_chip_config(struct net_device *dev)
m_can_config_endisable(cdev, true);
/* RX Buffer/FIFO Element Size 64 bytes data field */
- m_can_write(cdev, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
+ m_can_write(cdev, M_CAN_RXESC,
+ FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) |
+ FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) |
+ FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B));
/* Accept Non-matching Frames Into FIFO 0 */
m_can_write(cdev, M_CAN_GFC, 0x0);
if (cdev->version == 30) {
/* only support one Tx Buffer currently */
- m_can_write(cdev, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
+ m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) |
cdev->mcfg[MRAM_TXB].off);
} else {
/* TX FIFO is used for newer IP Core versions */
m_can_write(cdev, M_CAN_TXBC,
- (cdev->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
- (cdev->mcfg[MRAM_TXB].off));
+ FIELD_PREP(TXBC_TFQS_MASK,
+ cdev->mcfg[MRAM_TXB].num) |
+ cdev->mcfg[MRAM_TXB].off);
}
/* support 64 bytes payload */
- m_can_write(cdev, M_CAN_TXESC, TXESC_TBDS_64BYTES);
+ m_can_write(cdev, M_CAN_TXESC,
+ FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B));
/* TX Event FIFO */
if (cdev->version == 30) {
- m_can_write(cdev, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
+ m_can_write(cdev, M_CAN_TXEFC,
+ FIELD_PREP(TXEFC_EFS_MASK, 1) |
cdev->mcfg[MRAM_TXE].off);
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
- ((cdev->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
- & TXEFC_EFS_MASK) |
+ FIELD_PREP(TXEFC_EFS_MASK,
+ cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
}
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
- (cdev->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
+ FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
m_can_write(cdev, M_CAN_RXF1C,
- (cdev->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
+ FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) |
cdev->mcfg[MRAM_RXF1].off);
cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -1264,11 +1246,11 @@ static void m_can_chip_config(struct net_device *dev)
/* Version 3.0.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR |
- (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
- (CCCR_CME_MASK << CCCR_CME_SHIFT));
+ FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) |
+ FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK)));
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
- cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
+ cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS);
} else {
/* Version 3.1.x or 3.2.x */
@@ -1372,8 +1354,8 @@ static int m_can_check_core_release(struct m_can_classdev *cdev)
* Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
*/
crel_reg = m_can_read(cdev, M_CAN_CREL);
- rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT);
- step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT);
+ rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg);
+ step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg);
if (rel == 3) {
/* M_CAN v3.x.y: create return value */
@@ -1593,16 +1575,16 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
- cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
+ cccr &= ~CCCR_CMR_MASK;
if (can_is_canfd_skb(skb)) {
if (cf->flags & CANFD_BRS)
- cccr |= CCCR_CMR_CANFD_BRS <<
- CCCR_CMR_SHIFT;
+ cccr |= FIELD_PREP(CCCR_CMR_MASK,
+ CCCR_CMR_CANFD_BRS);
else
- cccr |= CCCR_CMR_CANFD <<
- CCCR_CMR_SHIFT;
+ cccr |= FIELD_PREP(CCCR_CMR_MASK,
+ CCCR_CMR_CANFD);
} else {
- cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
+ cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN);
}
m_can_write(cdev, M_CAN_CCCR, cccr);
}
@@ -1629,8 +1611,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
/* get put index for frame */
- putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
- >> TXFQS_TFQPI_SHIFT);
+ putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
/* Write ID Field to FIFO Element */
m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id);
@@ -1648,9 +1630,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
* sending the correct echo frame
*/
m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC,
- ((putidx << TX_BUF_MM_SHIFT) &
- TX_BUF_MM_MASK) |
- (can_fd_len2dlc(cf->len) << 16) |
+ FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ FIELD_PREP(TX_BUF_DLC_MASK,
+ can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC);
for (i = 0; i < cf->len; i += 4)
@@ -1810,11 +1792,11 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off +
cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
- (RXFC_FS_MASK >> RXFC_FS_SHIFT);
+ FIELD_MAX(RXFC_FS_MASK);
cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off +
cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
- (RXFC_FS_MASK >> RXFC_FS_SHIFT);
+ FIELD_MAX(RXFC_FS_MASK);
cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off +
cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXB].num = mram_config_vals[5];
@@ -1824,7 +1806,7 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off +
cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
- (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT);
+ FIELD_MAX(TXBC_NDTB_MASK);
dev_dbg(cdev->dev,
"sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 00847cbaf7b6..d08718e98e11 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -351,8 +351,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
return err;
}
- /* start network queue (echo_skb array is empty) */
- netif_start_queue(ndev);
+ /* wake network queue up (echo_skb array is empty) */
+ netif_wake_queue(ndev);
return 0;
}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index c44f3411e561..cfc1325aad10 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -239,7 +239,6 @@ static int softing_handle_1(struct softing *card)
DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
/* timestamp */
tmp_u32 = le32_to_cpup((void *)ptr);
- ptr += 4;
ktime = softing_raw2ktime(card, tmp_u32);
++netdev->stats.rx_errors;
@@ -276,7 +275,6 @@ static int softing_handle_1(struct softing *card)
ktime = softing_raw2ktime(card, tmp_u32);
if (!(msg.can_id & CAN_RTR_FLAG))
memcpy(&msg.data[0], ptr, 8);
- ptr += 8;
/* update socket */
if (cmd & CMD_ACK) {
/* acknowledge, was tx msg */
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 6f5d6d04a8b9..dd17b8c53e1c 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -871,7 +871,7 @@ static int hi3110_can_probe(struct spi_device *spi)
CAN_CTRLMODE_BERR_REPORTING;
if (of_id)
- priv->model = (enum hi3110_model)of_id->data;
+ priv->model = (enum hi3110_model)(uintptr_t)of_id->data;
else
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 173c6614086f..0579ab74f728 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1330,7 +1330,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
if (match)
- priv->model = (enum mcp251x_model)match;
+ priv->model = (enum mcp251x_model)(uintptr_t)match;
else
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index e0ae00e34c7b..47c3f408a799 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -560,7 +560,7 @@ mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
return __mcp251xfd_chip_set_mode(priv, mode_req, false);
}
-static inline int
+static inline int __maybe_unused
mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
const u8 mode_req)
{
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 3deb9f1cd292..f959215c9d53 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -76,7 +76,9 @@ config CAN_KVASER_USB
- Scania VCI2 (if you have the Kvaser logo on top)
- Kvaser BlackBird v2
- Kvaser Leaf Pro HS v2
+ - Kvaser Hybrid CAN/LIN
- Kvaser Hybrid 2xCAN/LIN
+ - Kvaser Hybrid Pro CAN/LIN
- Kvaser Hybrid Pro 2xCAN/LIN
- Kvaser Memorator 2xHS v2
- Kvaser Memorator Pro 2xHS v2
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 5af69787d9d5..0a37af4a3fa4 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1053,7 +1053,6 @@ static void ems_usb_disconnect(struct usb_interface *intf)
if (dev) {
unregister_netdev(dev->netdev);
- free_candev(dev->netdev);
unlink_all_urbs(dev);
@@ -1061,6 +1060,8 @@ static void ems_usb_disconnect(struct usb_interface *intf)
kfree(dev->intr_in_buffer);
kfree(dev->tx_msg_buffer);
+
+ free_candev(dev->netdev);
}
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 90ebcae13409..0cc0fc866a2a 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -79,16 +79,18 @@
#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264
#define USB_MEMO_2HS_PRODUCT_ID 265
#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266
-#define USB_HYBRID_CANLIN_PRODUCT_ID 267
+#define USB_HYBRID_2CANLIN_PRODUCT_ID 267
#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
-#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270
+#define USB_HYBRID_PRO_2CANLIN_PRODUCT_ID 270
#define USB_U100_PRODUCT_ID 273
#define USB_U100P_PRODUCT_ID 274
#define USB_U100S_PRODUCT_ID 275
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276
+#define USB_HYBRID_CANLIN_PRODUCT_ID 277
+#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
#define USB_HYDRA_PRODUCT_ID_END \
- USB_USBCAN_PRO_4HS_PRODUCT_ID
+ USB_HYBRID_PRO_CANLIN_PRODUCT_ID
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
@@ -187,14 +189,16 @@ static const struct usb_device_id kvaser_usb_table[] = {
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 029e77dfa773..a45865bd7254 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -82,6 +82,8 @@ struct mcba_priv {
bool can_ka_first_pass;
bool can_speed_check;
atomic_t free_ctx_cnt;
+ void *rxbuf[MCBA_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
};
/* CAN frame */
@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
}
buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
- GFP_KERNEL, &urb->transfer_dma);
+ GFP_KERNEL, &buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
if (err) {
usb_unanchor_urb(urb);
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
- buf, urb->transfer_dma);
+ buf, buf_dma);
usb_free_urb(urb);
break;
}
+ priv->rxbuf[i] = buf;
+ priv->rxbuf_dma[i] = buf_dma;
+
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
static void mcba_urb_unlink(struct mcba_priv *priv)
{
+ int i;
+
usb_kill_anchored_urbs(&priv->rx_submitted);
+
+ for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
+ usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+ priv->rxbuf[i], priv->rxbuf_dma[i]);
+
usb_kill_anchored_urbs(&priv->tx_submitted);
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3ca6b394dd5f..b23e3488695b 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -728,6 +728,13 @@ static u16 b53_default_pvid(struct b53_device *dev)
return 0;
}
+static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -748,9 +755,20 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
- b53_for_each_port(dev, i)
+ /* Create an untagged VLAN entry for the default PVID in case
+ * CONFIG_VLAN_8021Q is disabled and there are no calls to
+ * dsa_slave_vlan_rx_add_vid() to create the default VLAN
+ * entry. Do this only when the tagging protocol is not
+ * DSA_TAG_PROTO_NONE
+ */
+ b53_for_each_port(dev, i) {
+ v = &dev->vlans[def_vid];
+ v->members |= BIT(i);
+ if (!b53_vlan_port_needs_forced_tagged(ds, i))
+ v->untag = v->members;
b53_write16(dev, B53_VLAN_PAGE,
B53_VLAN_PORT_DEF_TAG(i), def_vid);
+ }
/* Upon initial call we have not set-up any VLANs, but upon
* system resume, we need to restore all VLAN entries.
@@ -1084,6 +1102,11 @@ static int b53_setup(struct dsa_switch *ds)
unsigned int port;
int ret;
+ /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
+ * which forces the CPU port to be tagged in all VLANs.
+ */
+ ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
@@ -1477,7 +1500,7 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
untagged = true;
vl->members |= BIT(port);
- if (untagged && !dsa_is_cpu_port(ds, port))
+ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag |= BIT(port);
else
vl->untag &= ~BIT(port);
@@ -1514,7 +1537,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
if (pvid == vlan->vid)
pvid = b53_default_pvid(dev);
- if (untagged && !dsa_is_cpu_port(ds, port))
+ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag &= ~(BIT(port));
b53_set_vlan_entry(dev, vlan->vid, vl);
@@ -2660,7 +2683,6 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
- ds->untag_bridge_pvid = true;
dev->vlan_enabled = true;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index aaa12d73784e..3f4249de70c5 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -632,8 +632,7 @@ static int b53_srab_remove(struct platform_device *pdev)
struct b53_srab_priv *priv = dev->priv;
b53_srab_intr_set(priv, false);
- if (dev)
- b53_switch_remove(dev);
+ b53_switch_remove(dev);
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9150038b60cb..3b018fcf4412 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -821,11 +821,9 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl;
+ u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
-
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
@@ -836,6 +834,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
interface == PHY_INTERFACE_MODE_RGMII_TXID ||
interface == PHY_INTERFACE_MODE_MII ||
interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 4d78219da253..9fdcc4bde480 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -927,7 +927,6 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
/* Read table */
for (i = 0; i < hellcreek->fdb_entries; ++i) {
- unsigned char null_addr[ETH_ALEN] = { 0 };
struct hellcreek_fdb_entry entry = { 0 };
/* Read entry */
@@ -937,7 +936,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
/* Check valid */
- if (!memcmp(entry.mac, null_addr, ETH_ALEN))
+ if (is_zero_ether_addr(entry.mac))
continue;
/* Check port mask */
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index ad509a57a945..560f6843bb65 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -6,6 +6,7 @@
* Tristram Ha <Tristram.Ha@microchip.com>
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
@@ -15,8 +16,10 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/micrel_phy.h>
#include <net/dsa.h>
#include <net/switchdev.h>
+#include <linux/phylink.h>
#include "ksz_common.h"
#include "ksz8795_reg.h"
@@ -727,92 +730,114 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
u8 restart, speed, ctrl, link;
const u8 *regs = ksz8->regs;
int processed = true;
+ u8 val1, val2;
u16 data = 0;
u8 p = phy;
switch (reg) {
- case PHY_REG_CTRL:
+ case MII_BMCR:
ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
if (restart & PORT_PHY_LOOPBACK)
- data |= PHY_LOOPBACK;
+ data |= BMCR_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
- data |= PHY_SPEED_100MBIT;
+ data |= BMCR_SPEED100;
if (ksz_is_ksz88x3(dev)) {
if ((ctrl & PORT_AUTO_NEG_ENABLE))
- data |= PHY_AUTO_NEG_ENABLE;
+ data |= BMCR_ANENABLE;
} else {
if (!(ctrl & PORT_AUTO_NEG_DISABLE))
- data |= PHY_AUTO_NEG_ENABLE;
+ data |= BMCR_ANENABLE;
}
if (restart & PORT_POWER_DOWN)
- data |= PHY_POWER_DOWN;
+ data |= BMCR_PDOWN;
if (restart & PORT_AUTO_NEG_RESTART)
- data |= PHY_AUTO_NEG_RESTART;
+ data |= BMCR_ANRESTART;
if (ctrl & PORT_FORCE_FULL_DUPLEX)
- data |= PHY_FULL_DUPLEX;
+ data |= BMCR_FULLDPLX;
if (speed & PORT_HP_MDIX)
- data |= PHY_HP_MDIX;
+ data |= KSZ886X_BMCR_HP_MDIX;
if (restart & PORT_FORCE_MDIX)
- data |= PHY_FORCE_MDIX;
+ data |= KSZ886X_BMCR_FORCE_MDI;
if (restart & PORT_AUTO_MDIX_DISABLE)
- data |= PHY_AUTO_MDIX_DISABLE;
+ data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
if (restart & PORT_TX_DISABLE)
- data |= PHY_TRANSMIT_DISABLE;
+ data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
if (restart & PORT_LED_OFF)
- data |= PHY_LED_DISABLE;
+ data |= KSZ886X_BMCR_DISABLE_LED;
break;
- case PHY_REG_STATUS:
+ case MII_BMSR:
ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
- data = PHY_100BTX_FD_CAPABLE |
- PHY_100BTX_CAPABLE |
- PHY_10BT_FD_CAPABLE |
- PHY_10BT_CAPABLE |
- PHY_AUTO_NEG_CAPABLE;
+ data = BMSR_100FULL |
+ BMSR_100HALF |
+ BMSR_10FULL |
+ BMSR_10HALF |
+ BMSR_ANEGCAPABLE;
if (link & PORT_AUTO_NEG_COMPLETE)
- data |= PHY_AUTO_NEG_ACKNOWLEDGE;
+ data |= BMSR_ANEGCOMPLETE;
if (link & PORT_STAT_LINK_GOOD)
- data |= PHY_LINK_STATUS;
+ data |= BMSR_LSTATUS;
break;
- case PHY_REG_ID_1:
+ case MII_PHYSID1:
data = KSZ8795_ID_HI;
break;
- case PHY_REG_ID_2:
+ case MII_PHYSID2:
if (ksz_is_ksz88x3(dev))
data = KSZ8863_ID_LO;
else
data = KSZ8795_ID_LO;
break;
- case PHY_REG_AUTO_NEGOTIATION:
+ case MII_ADVERTISE:
ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
- data = PHY_AUTO_NEG_802_3;
+ data = ADVERTISE_CSMA;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
- data |= PHY_AUTO_NEG_SYM_PAUSE;
+ data |= ADVERTISE_PAUSE_CAP;
if (ctrl & PORT_AUTO_NEG_100BTX_FD)
- data |= PHY_AUTO_NEG_100BTX_FD;
+ data |= ADVERTISE_100FULL;
if (ctrl & PORT_AUTO_NEG_100BTX)
- data |= PHY_AUTO_NEG_100BTX;
+ data |= ADVERTISE_100HALF;
if (ctrl & PORT_AUTO_NEG_10BT_FD)
- data |= PHY_AUTO_NEG_10BT_FD;
+ data |= ADVERTISE_10FULL;
if (ctrl & PORT_AUTO_NEG_10BT)
- data |= PHY_AUTO_NEG_10BT;
+ data |= ADVERTISE_10HALF;
break;
- case PHY_REG_REMOTE_CAPABILITY:
+ case MII_LPA:
ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
- data = PHY_AUTO_NEG_802_3;
+ data = LPA_SLCT;
if (link & PORT_REMOTE_SYM_PAUSE)
- data |= PHY_AUTO_NEG_SYM_PAUSE;
+ data |= LPA_PAUSE_CAP;
if (link & PORT_REMOTE_100BTX_FD)
- data |= PHY_AUTO_NEG_100BTX_FD;
+ data |= LPA_100FULL;
if (link & PORT_REMOTE_100BTX)
- data |= PHY_AUTO_NEG_100BTX;
+ data |= LPA_100HALF;
if (link & PORT_REMOTE_10BT_FD)
- data |= PHY_AUTO_NEG_10BT_FD;
+ data |= LPA_10FULL;
if (link & PORT_REMOTE_10BT)
- data |= PHY_AUTO_NEG_10BT;
- if (data & ~PHY_AUTO_NEG_802_3)
- data |= PHY_REMOTE_ACKNOWLEDGE_NOT;
+ data |= LPA_10HALF;
+ if (data & ~LPA_SLCT)
+ data |= LPA_LPACK;
+ break;
+ case PHY_REG_LINK_MD:
+ ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (val1 & PORT_START_CABLE_DIAG)
+ data |= PHY_START_CABLE_DIAG;
+
+ if (val1 & PORT_CABLE_10M_SHORT)
+ data |= PHY_CABLE_10M_SHORT;
+
+ data |= FIELD_PREP(PHY_CABLE_DIAG_RESULT_M,
+ FIELD_GET(PORT_CABLE_DIAG_RESULT_M, val1));
+
+ data |= FIELD_PREP(PHY_CABLE_FAULT_COUNTER_M,
+ (FIELD_GET(PORT_CABLE_FAULT_COUNTER_H, val1) << 8) |
+ FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
+ break;
+ case PHY_REG_PHY_CTRL:
+ ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (link & PORT_MDIX_STATUS)
+ data |= KSZ886X_CTRL_MDIX_STAT;
break;
default:
processed = false;
@@ -830,14 +855,14 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
u8 p = phy;
switch (reg) {
- case PHY_REG_CTRL:
+ case MII_BMCR:
/* Do not support PHY reset function. */
- if (val & PHY_RESET)
+ if (val & BMCR_RESET)
break;
ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
data = speed;
- if (val & PHY_HP_MDIX)
+ if (val & KSZ886X_BMCR_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
@@ -846,12 +871,12 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
data = ctrl;
if (ksz_is_ksz88x3(dev)) {
- if ((val & PHY_AUTO_NEG_ENABLE))
+ if ((val & BMCR_ANENABLE))
data |= PORT_AUTO_NEG_ENABLE;
else
data &= ~PORT_AUTO_NEG_ENABLE;
} else {
- if (!(val & PHY_AUTO_NEG_ENABLE))
+ if (!(val & BMCR_ANENABLE))
data |= PORT_AUTO_NEG_DISABLE;
else
data &= ~PORT_AUTO_NEG_DISABLE;
@@ -861,11 +886,11 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_AUTO_NEG_DISABLE;
}
- if (val & PHY_SPEED_100MBIT)
+ if (val & BMCR_SPEED100)
data |= PORT_FORCE_100_MBIT;
else
data &= ~PORT_FORCE_100_MBIT;
- if (val & PHY_FULL_DUPLEX)
+ if (val & BMCR_FULLDPLX)
data |= PORT_FORCE_FULL_DUPLEX;
else
data &= ~PORT_FORCE_FULL_DUPLEX;
@@ -873,38 +898,38 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
data = restart;
- if (val & PHY_LED_DISABLE)
+ if (val & KSZ886X_BMCR_DISABLE_LED)
data |= PORT_LED_OFF;
else
data &= ~PORT_LED_OFF;
- if (val & PHY_TRANSMIT_DISABLE)
+ if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
data |= PORT_TX_DISABLE;
else
data &= ~PORT_TX_DISABLE;
- if (val & PHY_AUTO_NEG_RESTART)
+ if (val & BMCR_ANRESTART)
data |= PORT_AUTO_NEG_RESTART;
else
data &= ~(PORT_AUTO_NEG_RESTART);
- if (val & PHY_POWER_DOWN)
+ if (val & BMCR_PDOWN)
data |= PORT_POWER_DOWN;
else
data &= ~PORT_POWER_DOWN;
- if (val & PHY_AUTO_MDIX_DISABLE)
+ if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
data |= PORT_AUTO_MDIX_DISABLE;
else
data &= ~PORT_AUTO_MDIX_DISABLE;
- if (val & PHY_FORCE_MDIX)
+ if (val & KSZ886X_BMCR_FORCE_MDI)
data |= PORT_FORCE_MDIX;
else
data &= ~PORT_FORCE_MDIX;
- if (val & PHY_LOOPBACK)
+ if (val & BMCR_LOOPBACK)
data |= PORT_PHY_LOOPBACK;
else
data &= ~PORT_PHY_LOOPBACK;
if (data != restart)
ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
break;
- case PHY_REG_AUTO_NEGOTIATION:
+ case MII_ADVERTISE:
ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
@@ -912,19 +937,23 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
PORT_AUTO_NEG_100BTX |
PORT_AUTO_NEG_10BT_FD |
PORT_AUTO_NEG_10BT);
- if (val & PHY_AUTO_NEG_SYM_PAUSE)
+ if (val & ADVERTISE_PAUSE_CAP)
data |= PORT_AUTO_NEG_SYM_PAUSE;
- if (val & PHY_AUTO_NEG_100BTX_FD)
+ if (val & ADVERTISE_100FULL)
data |= PORT_AUTO_NEG_100BTX_FD;
- if (val & PHY_AUTO_NEG_100BTX)
+ if (val & ADVERTISE_100HALF)
data |= PORT_AUTO_NEG_100BTX;
- if (val & PHY_AUTO_NEG_10BT_FD)
+ if (val & ADVERTISE_10FULL)
data |= PORT_AUTO_NEG_10BT_FD;
- if (val & PHY_AUTO_NEG_10BT)
+ if (val & ADVERTISE_10HALF)
data |= PORT_AUTO_NEG_10BT;
if (data != ctrl)
ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
break;
+ case PHY_REG_LINK_MD:
+ if (val & PHY_START_CABLE_DIAG)
+ ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true);
+ break;
default:
break;
}
@@ -941,6 +970,18 @@ static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
}
+static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ /* Silicon Errata Sheet (DS80000830A):
+ * Port 1 does not work with LinkMD Cable-Testing.
+ * Port 1 does not respond to received PAUSE control frames.
+ */
+ if (!port)
+ return MICREL_KSZ8_P1_ERRATA;
+
+ return 0;
+}
+
static void ksz8_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *buf)
{
@@ -1419,11 +1460,66 @@ static int ksz8_setup(struct dsa_switch *ds)
return 0;
}
+static void ksz8_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct ksz_device *dev = ds->priv;
+
+ if (port == dev->cpu_port) {
+ if (state->interface != PHY_INTERFACE_MODE_RMII &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_NA)
+ goto unsupported;
+ } else {
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_NA)
+ goto unsupported;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+
+ /* Silicon Errata Sheet (DS80000830A):
+ * "Port 1 does not respond to received flow control PAUSE frames"
+ * So, disable Pause support on "Port 1" (port == 0) for all ksz88x3
+ * switches.
+ */
+ if (!ksz_is_ksz88x3(dev) || port)
+ phylink_set(mask, Pause);
+
+ /* Asym pause is not supported on KSZ8863 and KSZ8873 */
+ if (!ksz_is_ksz88x3(dev))
+ phylink_set(mask, Asym_Pause);
+
+ /* 10M and 100M are only supported */
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return;
+
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
+ phy_modes(state->interface), port);
+}
+
static const struct dsa_switch_ops ksz8_switch_ops = {
.get_tag_protocol = ksz8_get_tag_protocol,
+ .get_phy_flags = ksz8_sw_get_phy_flags,
.setup = ksz8_setup,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
+ .phylink_validate = ksz8_validate,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
.get_strings = ksz8_get_strings,
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index c2e52c40a54c..a32355624f31 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -249,7 +249,7 @@
#define REG_PORT_4_LINK_MD_CTRL 0x4A
#define PORT_CABLE_10M_SHORT BIT(7)
-#define PORT_CABLE_DIAG_RESULT_M 0x3
+#define PORT_CABLE_DIAG_RESULT_M GENMASK(6, 5)
#define PORT_CABLE_DIAG_RESULT_S 5
#define PORT_CABLE_STAT_NORMAL 0
#define PORT_CABLE_STAT_OPEN 1
@@ -744,68 +744,6 @@
#define PORT_ACL_FORCE_DLR_MISS BIT(0)
-#ifndef PHY_REG_CTRL
-#define PHY_REG_CTRL 0
-
-#define PHY_RESET BIT(15)
-#define PHY_LOOPBACK BIT(14)
-#define PHY_SPEED_100MBIT BIT(13)
-#define PHY_AUTO_NEG_ENABLE BIT(12)
-#define PHY_POWER_DOWN BIT(11)
-#define PHY_MII_DISABLE BIT(10)
-#define PHY_AUTO_NEG_RESTART BIT(9)
-#define PHY_FULL_DUPLEX BIT(8)
-#define PHY_COLLISION_TEST_NOT BIT(7)
-#define PHY_HP_MDIX BIT(5)
-#define PHY_FORCE_MDIX BIT(4)
-#define PHY_AUTO_MDIX_DISABLE BIT(3)
-#define PHY_REMOTE_FAULT_DISABLE BIT(2)
-#define PHY_TRANSMIT_DISABLE BIT(1)
-#define PHY_LED_DISABLE BIT(0)
-
-#define PHY_REG_STATUS 1
-
-#define PHY_100BT4_CAPABLE BIT(15)
-#define PHY_100BTX_FD_CAPABLE BIT(14)
-#define PHY_100BTX_CAPABLE BIT(13)
-#define PHY_10BT_FD_CAPABLE BIT(12)
-#define PHY_10BT_CAPABLE BIT(11)
-#define PHY_MII_SUPPRESS_CAPABLE_NOT BIT(6)
-#define PHY_AUTO_NEG_ACKNOWLEDGE BIT(5)
-#define PHY_REMOTE_FAULT BIT(4)
-#define PHY_AUTO_NEG_CAPABLE BIT(3)
-#define PHY_LINK_STATUS BIT(2)
-#define PHY_JABBER_DETECT_NOT BIT(1)
-#define PHY_EXTENDED_CAPABILITY BIT(0)
-
-#define PHY_REG_ID_1 2
-#define PHY_REG_ID_2 3
-
-#define PHY_REG_AUTO_NEGOTIATION 4
-
-#define PHY_AUTO_NEG_NEXT_PAGE_NOT BIT(15)
-#define PHY_AUTO_NEG_REMOTE_FAULT_NOT BIT(13)
-#define PHY_AUTO_NEG_SYM_PAUSE BIT(10)
-#define PHY_AUTO_NEG_100BT4 BIT(9)
-#define PHY_AUTO_NEG_100BTX_FD BIT(8)
-#define PHY_AUTO_NEG_100BTX BIT(7)
-#define PHY_AUTO_NEG_10BT_FD BIT(6)
-#define PHY_AUTO_NEG_10BT BIT(5)
-#define PHY_AUTO_NEG_SELECTOR 0x001F
-#define PHY_AUTO_NEG_802_3 0x0001
-
-#define PHY_REG_REMOTE_CAPABILITY 5
-
-#define PHY_REMOTE_NEXT_PAGE_NOT BIT(15)
-#define PHY_REMOTE_ACKNOWLEDGE_NOT BIT(14)
-#define PHY_REMOTE_REMOTE_FAULT_NOT BIT(13)
-#define PHY_REMOTE_SYM_PAUSE BIT(10)
-#define PHY_REMOTE_100BTX_FD BIT(8)
-#define PHY_REMOTE_100BTX BIT(7)
-#define PHY_REMOTE_10BT_FD BIT(6)
-#define PHY_REMOTE_10BT BIT(5)
-#endif
-
#define KSZ8795_ID_HI 0x0022
#define KSZ8795_ID_LO 0x1550
#define KSZ8863_ID_LO 0x1430
@@ -815,13 +753,14 @@
#define PHY_REG_LINK_MD 0x1D
#define PHY_START_CABLE_DIAG BIT(15)
+#define PHY_CABLE_DIAG_RESULT_M GENMASK(14, 13)
#define PHY_CABLE_DIAG_RESULT 0x6000
#define PHY_CABLE_STAT_NORMAL 0x0000
#define PHY_CABLE_STAT_OPEN 0x2000
#define PHY_CABLE_STAT_SHORT 0x4000
#define PHY_CABLE_STAT_FAILED 0x6000
#define PHY_CABLE_10M_SHORT BIT(12)
-#define PHY_CABLE_FAULT_COUNTER 0x01FF
+#define PHY_CABLE_FAULT_COUNTER_M GENMASK(8, 0)
#define PHY_REG_PHY_CTRL 0x1F
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 55e5d479acce..854e25f43fa7 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1530,6 +1530,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
},
};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 96f7c9eede35..93136f7e69f5 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -10,6 +10,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
@@ -596,18 +597,14 @@ mt7530_mib_reset(struct dsa_switch *ds)
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
}
-static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum)
+static int mt7530_phy_read(struct mt7530_priv *priv, int port, int regnum)
{
- struct mt7530_priv *priv = ds->priv;
-
return mdiobus_read_nested(priv->bus, port, regnum);
}
-static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum,
+static int mt7530_phy_write(struct mt7530_priv *priv, int port, int regnum,
u16 val)
{
- struct mt7530_priv *priv = ds->priv;
-
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
@@ -785,9 +782,8 @@ out:
}
static int
-mt7531_ind_phy_read(struct dsa_switch *ds, int port, int regnum)
+mt7531_ind_phy_read(struct mt7530_priv *priv, int port, int regnum)
{
- struct mt7530_priv *priv = ds->priv;
int devad;
int ret;
@@ -803,10 +799,9 @@ mt7531_ind_phy_read(struct dsa_switch *ds, int port, int regnum)
}
static int
-mt7531_ind_phy_write(struct dsa_switch *ds, int port, int regnum,
+mt7531_ind_phy_write(struct mt7530_priv *priv, int port, int regnum,
u16 data)
{
- struct mt7530_priv *priv = ds->priv;
int devad;
int ret;
@@ -822,6 +817,22 @@ mt7531_ind_phy_write(struct dsa_switch *ds, int port, int regnum,
return ret;
}
+static int
+mt753x_phy_read(struct mii_bus *bus, int port, int regnum)
+{
+ struct mt7530_priv *priv = bus->priv;
+
+ return priv->info->phy_read(priv, port, regnum);
+}
+
+static int
+mt753x_phy_write(struct mii_bus *bus, int port, int regnum, u16 val)
+{
+ struct mt7530_priv *priv = bus->priv;
+
+ return priv->info->phy_write(priv, port, regnum, val);
+}
+
static void
mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -1262,14 +1273,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- /* The real fabric path would be decided on the membership in the
- * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
- * means potential VLAN can be consisting of certain subset of all
- * ports.
- */
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
-
/* Trapped into security mode allows packet forwarding through VLAN
* table lookup. CPU port is set to fallback mode to let untagged
* frames pass through.
@@ -1828,6 +1831,210 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
}
#endif /* CONFIG_GPIOLIB */
+static irqreturn_t
+mt7530_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mt7530_priv *priv = dev_id;
+ bool handled = false;
+ u32 val;
+ int p;
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
+ mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
+ mutex_unlock(&priv->bus->mdio_lock);
+
+ for (p = 0; p < MT7530_NUM_PHYS; p++) {
+ if (BIT(p) & val) {
+ unsigned int irq;
+
+ irq = irq_find_mapping(priv->irq_domain, p);
+ handle_nested_irq(irq);
+ handled = true;
+ }
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void
+mt7530_irq_mask(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_enable &= ~BIT(d->hwirq);
+}
+
+static void
+mt7530_irq_unmask(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_enable |= BIT(d->hwirq);
+}
+
+static void
+mt7530_irq_bus_lock(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void
+mt7530_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static struct irq_chip mt7530_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .irq_mask = mt7530_irq_mask,
+ .irq_unmask = mt7530_irq_unmask,
+ .irq_bus_lock = mt7530_irq_bus_lock,
+ .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
+};
+
+static int
+mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, true);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mt7530_irq_domain_ops = {
+ .map = mt7530_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void
+mt7530_setup_mdio_irq(struct mt7530_priv *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int p;
+
+ for (p = 0; p < MT7530_NUM_PHYS; p++) {
+ if (BIT(p) & ds->phys_mii_mask) {
+ unsigned int irq;
+
+ irq = irq_create_mapping(priv->irq_domain, p);
+ ds->slave_mii_bus->irq[p] = irq;
+ }
+ }
+}
+
+static int
+mt7530_setup_irq(struct mt7530_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!of_property_read_bool(np, "interrupt-controller")) {
+ dev_info(dev, "no interrupt support\n");
+ return 0;
+ }
+
+ priv->irq = of_irq_get(np, 0);
+ if (priv->irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
+ return priv->irq ? : -EINVAL;
+ }
+
+ priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
+ &mt7530_irq_domain_ops, priv);
+ if (!priv->irq_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ /* This register must be set for MT7530 to properly fire interrupts */
+ if (priv->id != ID_MT7531)
+ mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
+
+ ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
+ IRQF_ONESHOT, KBUILD_MODNAME, priv);
+ if (ret) {
+ irq_domain_remove(priv->irq_domain);
+ dev_err(dev, "failed to request IRQ: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+mt7530_free_mdio_irq(struct mt7530_priv *priv)
+{
+ int p;
+
+ for (p = 0; p < MT7530_NUM_PHYS; p++) {
+ if (BIT(p) & priv->ds->phys_mii_mask) {
+ unsigned int irq;
+
+ irq = irq_find_mapping(priv->irq_domain, p);
+ irq_dispose_mapping(irq);
+ }
+ }
+}
+
+static void
+mt7530_free_irq_common(struct mt7530_priv *priv)
+{
+ free_irq(priv->irq, priv);
+ irq_domain_remove(priv->irq_domain);
+}
+
+static void
+mt7530_free_irq(struct mt7530_priv *priv)
+{
+ mt7530_free_mdio_irq(priv);
+ mt7530_free_irq_common(priv);
+}
+
+static int
+mt7530_setup_mdio(struct mt7530_priv *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ struct device *dev = priv->dev;
+ struct mii_bus *bus;
+ static int idx;
+ int ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ ds->slave_mii_bus = bus;
+ bus->priv = priv;
+ bus->name = KBUILD_MODNAME "-mii";
+ snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
+ bus->read = mt753x_phy_read;
+ bus->write = mt753x_phy_write;
+ bus->parent = dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ if (priv->irq)
+ mt7530_setup_mdio_irq(priv);
+
+ ret = mdiobus_register(bus);
+ if (ret) {
+ dev_err(dev, "failed to register MDIO bus: %d\n", ret);
+ if (priv->irq)
+ mt7530_free_mdio_irq(priv);
+ }
+
+ return ret;
+}
+
static int
mt7530_setup(struct dsa_switch *ds)
{
@@ -2791,24 +2998,20 @@ static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
+ int ret = priv->info->sw_setup(ds);
- return priv->info->sw_setup(ds);
-}
-
-static int
-mt753x_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
- struct mt7530_priv *priv = ds->priv;
+ if (ret)
+ return ret;
- return priv->info->phy_read(ds, port, regnum);
-}
+ ret = mt7530_setup_irq(priv);
+ if (ret)
+ return ret;
-static int
-mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
-{
- struct mt7530_priv *priv = ds->priv;
+ ret = mt7530_setup_mdio(priv);
+ if (ret && priv->irq)
+ mt7530_free_irq_common(priv);
- return priv->info->phy_write(ds, port, regnum, val);
+ return ret;
}
static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
@@ -2845,8 +3048,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
.get_strings = mt7530_get_strings,
- .phy_read = mt753x_phy_read,
- .phy_write = mt753x_phy_write,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
.set_ageing_time = mt7530_set_ageing_time,
@@ -3029,6 +3230,9 @@ mt7530_remove(struct mdio_device *mdiodev)
dev_err(priv->dev, "Failed to disable io pwr: %d\n",
ret);
+ if (priv->irq)
+ mt7530_free_irq(priv);
+
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
}
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 0204da486f3a..334d610a503d 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -7,6 +7,7 @@
#define __MT7530_H
#define MT7530_NUM_PORTS 7
+#define MT7530_NUM_PHYS 5
#define MT7530_CPU_PORT 6
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
@@ -393,6 +394,12 @@ enum mt7531_sgmii_force_duplex {
#define SYS_CTRL_SW_RST BIT(1)
#define SYS_CTRL_REG_RST BIT(0)
+/* Register for system interrupt */
+#define MT7530_SYS_INT_EN 0x7008
+
+/* Register for system interrupt status */
+#define MT7530_SYS_INT_STS 0x700c
+
/* Register for PHY Indirect Access Control */
#define MT7531_PHY_IAC 0x701C
#define MT7531_PHY_ACS_ST BIT(31)
@@ -714,6 +721,8 @@ static const char *p5_intf_modes(unsigned int p5_interface)
}
}
+struct mt7530_priv;
+
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
* @sw_setup: Holding the handler to a device initialization
@@ -738,8 +747,8 @@ struct mt753x_info {
enum mt753x_id id;
int (*sw_setup)(struct dsa_switch *ds);
- int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
- int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val);
+ int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
+ int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
int (*cpu_port_config)(struct dsa_switch *ds, int port);
bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
@@ -773,6 +782,10 @@ struct mt753x_info {
* registers
* @p6_interface Holding the current port 6 interface
* @p5_intf_sel: Holding the current port 5 interface select
+ *
+ * @irq: IRQ number of the switch
+ * @irq_domain: IRQ domain of the switch irq_chip
+ * @irq_enable: IRQ enable bits, synced to SYS_INT_EN
*/
struct mt7530_priv {
struct device *dev;
@@ -794,6 +807,9 @@ struct mt7530_priv {
struct mt7530_port ports[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
+ int irq;
+ struct irq_domain *irq_domain;
+ u32 irq_enable;
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index eca285aaf72f..961fa6b75cad 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1618,9 +1618,6 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
struct mv88e6xxx_vtu_entry vlan;
int i, err;
- if (!vid)
- return -EOPNOTSUPP;
-
/* DSA and CPU ports have to be members of multiple vlans */
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
return 0;
@@ -2109,6 +2106,9 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
u8 member;
int err;
+ if (!vlan->vid)
+ return 0;
+
err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
if (err)
return err;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index ce607fbaaa3a..a2a15919b960 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -940,6 +940,8 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
+
/* Undo the effects of felix_phylink_mac_link_down:
* enable MAC module
*/
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2473bebe48e6..f966a253d1c7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1227,12 +1227,17 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
return -ERANGE;
- /* Set port num and disable ALWAYS_GUARD_BAND_SCH_Q, which means set
- * guard band to be implemented for nonschedule queues to schedule
- * queues transition.
+ /* Enable guard band. The switch will schedule frames without taking
+ * their length into account. Thus we'll always need to enable the
+ * guard band which reserves the time of a maximum sized frame at the
+ * end of the time window.
+ *
+ * Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we
+ * need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n
+ * operate on the port number.
*/
- ocelot_rmw(ocelot,
- QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 84f93a874d50..deae923c8b7a 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1206,6 +1206,11 @@ static int seville_probe(struct platform_device *pdev)
felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Invalid resource\n");
+ goto err_alloc_felix;
+ }
felix->switch_base = res->start;
ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index cdaf9f85a2cb..1f63f50f73f1 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <net/dsa.h>
#include <linux/of_net.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/if_bridge.h>
#include <linux/mdio.h>
@@ -88,26 +89,26 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
*page = regaddr & 0x3ff;
}
-static u32
-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum)
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
- u32 val;
int ret;
ret = bus->read(bus, phy_id, regnum);
if (ret >= 0) {
- val = ret;
+ *val = ret;
ret = bus->read(bus, phy_id, regnum + 1);
- val |= ret << 16;
+ *val |= ret << 16;
}
if (ret < 0) {
dev_err_ratelimited(&bus->dev,
"failed to read qca8k 32bit register\n");
+ *val = 0;
return ret;
}
- return val;
+ return 0;
}
static void
@@ -127,82 +128,110 @@ qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
"failed to write qca8k 32bit register\n");
}
-static void
+static int
qca8k_set_page(struct mii_bus *bus, u16 page)
{
+ int ret;
+
if (page == qca8k_current_page)
- return;
+ return 0;
- if (bus->write(bus, 0x18, 0, page) < 0)
+ ret = bus->write(bus, 0x18, 0, page);
+ if (ret < 0) {
dev_err_ratelimited(&bus->dev,
"failed to set qca8k page\n");
+ return ret;
+ }
+
qca8k_current_page = page;
+ usleep_range(1000, 2000);
+ return 0;
}
-static u32
-qca8k_read(struct qca8k_priv *priv, u32 reg)
+static int
+qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
- u32 val;
+ int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- qca8k_set_page(priv->bus, page);
- val = qca8k_mii_read32(priv->bus, 0x10 | r2, r1);
+ ret = qca8k_set_page(bus, page);
+ if (ret < 0)
+ goto exit;
- mutex_unlock(&priv->bus->mdio_lock);
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
- return val;
+exit:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
}
-static void
+static int
qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
+ int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(bus, page);
+ if (ret < 0)
+ goto exit;
- qca8k_set_page(priv->bus, page);
- qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
- mutex_unlock(&priv->bus->mdio_lock);
+exit:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
}
-static u32
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 val)
+static int
+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
- u32 ret;
+ u32 val;
+ int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(bus, page);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+ if (ret < 0)
+ goto exit;
- qca8k_set_page(priv->bus, page);
- ret = qca8k_mii_read32(priv->bus, 0x10 | r2, r1);
- ret &= ~mask;
- ret |= val;
- qca8k_mii_write32(priv->bus, 0x10 | r2, r1, ret);
+ val &= ~mask;
+ val |= write_val;
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
- mutex_unlock(&priv->bus->mdio_lock);
+exit:
+ mutex_unlock(&bus->mdio_lock);
return ret;
}
-static void
+static int
qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
{
- qca8k_rmw(priv, reg, 0, val);
+ return qca8k_rmw(priv, reg, 0, val);
}
-static void
+static int
qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
{
- qca8k_rmw(priv, reg, val, 0);
+ return qca8k_rmw(priv, reg, val, 0);
}
static int
@@ -210,9 +239,7 @@ qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- *val = qca8k_read(priv, reg);
-
- return 0;
+ return qca8k_read(priv, reg, val);
}
static int
@@ -220,9 +247,7 @@ qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- qca8k_write(priv, reg, val);
-
- return 0;
+ return qca8k_write(priv, reg, val);
}
static const struct regmap_range qca8k_readable_ranges[] = {
@@ -262,32 +287,36 @@ static struct regmap_config qca8k_regmap_config = {
static int
qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(20);
+ int ret, ret1;
+ u32 val;
- /* loop until the busy flag has cleared */
- do {
- u32 val = qca8k_read(priv, reg);
- int busy = val & mask;
+ ret = read_poll_timeout(qca8k_read, ret1, !(val & mask),
+ 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ priv, reg, &val);
- if (!busy)
- break;
- cond_resched();
- } while (!time_after_eq(jiffies, timeout));
+ /* Check if qca8k_read has failed for a different reason
+ * before returning -ETIMEDOUT
+ */
+ if (ret < 0 && ret1 < 0)
+ return ret1;
- return time_after_eq(jiffies, timeout);
+ return ret;
}
-static void
+static int
qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
- u32 reg[4];
- int i;
+ u32 reg[4], val;
+ int i, ret;
/* load the ARL table into an array */
- for (i = 0; i < 4; i++)
- reg[i] = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4));
+ for (i = 0; i < 4; i++) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
+ if (ret < 0)
+ return ret;
+
+ reg[i] = val;
+ }
/* vid - 83:72 */
fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
@@ -302,6 +331,8 @@ qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
fdb->mac[5] = reg[0] & 0xff;
+
+ return 0;
}
static void
@@ -334,6 +365,7 @@ static int
qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
{
u32 reg;
+ int ret;
/* Set the command and FDB index */
reg = QCA8K_ATU_FUNC_BUSY;
@@ -344,15 +376,20 @@ qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
}
/* Write the function register triggering the table access */
- qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ if (ret)
+ return ret;
/* wait for completion */
- if (qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY))
- return -1;
+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+ if (ret)
+ return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_FDB_LOAD) {
- reg = qca8k_read(priv, QCA8K_REG_ATU_FUNC);
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+ if (ret < 0)
+ return ret;
if (reg & QCA8K_ATU_FUNC_FULL)
return -1;
}
@@ -367,10 +404,10 @@ qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
- if (ret >= 0)
- qca8k_fdb_read(priv, fdb);
+ if (ret < 0)
+ return ret;
- return ret;
+ return qca8k_fdb_read(priv, fdb);
}
static int
@@ -412,6 +449,7 @@ static int
qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
{
u32 reg;
+ int ret;
/* Set the command and VLAN index */
reg = QCA8K_VTU_FUNC1_BUSY;
@@ -419,15 +457,20 @@ qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
reg |= vid << QCA8K_VTU_FUNC1_VID_S;
/* Write the function register triggering the table access */
- qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ if (ret)
+ return ret;
/* wait for completion */
- if (qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY))
- return -ETIMEDOUT;
+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+ if (ret)
+ return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_VLAN_LOAD) {
- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC1);
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+ if (ret < 0)
+ return ret;
if (reg & QCA8K_VTU_FUNC1_FULL)
return -ENOMEM;
}
@@ -453,7 +496,9 @@ qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
if (ret < 0)
goto out;
- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0);
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
if (untagged)
@@ -463,7 +508,9 @@ qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
QCA8K_VTU_FUNC0_EG_MODE_S(port);
- qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
out:
@@ -484,7 +531,9 @@ qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
if (ret < 0)
goto out;
- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0);
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
QCA8K_VTU_FUNC0_EG_MODE_S(port);
@@ -504,7 +553,9 @@ qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
if (del) {
ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
} else {
- qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
}
@@ -514,15 +565,29 @@ out:
return ret;
}
-static void
+static int
qca8k_mib_init(struct qca8k_priv *priv)
{
+ int ret;
+
mutex_lock(&priv->reg_mutex);
- qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
- qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
- qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
- qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+ ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
mutex_unlock(&priv->reg_mutex);
+ return ret;
}
static void
@@ -556,54 +621,109 @@ qca8k_port_to_phy(int port)
}
static int
-qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
+qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
{
- u32 phy, val;
+ u16 r1, r2, page;
+ u32 val;
+ int ret, ret1;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ bus, 0x10 | r2, r1, &val);
+
+ /* Check if qca8k_read has failed for a different reason
+ * before returnting -ETIMEDOUT
+ */
+ if (ret < 0 && ret1 < 0)
+ return ret1;
+
+ return ret;
+}
+
+static int
+qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
+{
+ struct qca8k_priv *priv = salve_bus->priv;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
- /* callee is responsible for not passing bad ports,
- * but we still would like to make spills impossible.
- */
- phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
QCA8K_MDIO_MASTER_DATA(data);
- qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(bus, page);
+ if (ret)
+ goto exit;
+
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
- return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_BUSY);
+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+
+exit:
+ /* even if the busy_wait timeouts try to clear the MASTER_EN */
+ qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
}
static int
-qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
+qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
{
- u32 phy, val;
+ struct qca8k_priv *priv = salve_bus->priv;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
- /* callee is responsible for not passing bad ports,
- * but we still would like to make spills impossible.
- */
- phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
- qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(bus, page);
+ if (ret)
+ goto exit;
+
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
- if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_BUSY))
- return -ETIMEDOUT;
+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+
+exit:
+ /* even if the busy_wait timeouts try to clear the MASTER_EN */
+ qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
- val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
- QCA8K_MDIO_MASTER_DATA_MASK);
+ mutex_unlock(&bus->mdio_lock);
+
+ if (ret >= 0)
+ ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
- return val;
+ return ret;
}
static int
@@ -611,7 +731,14 @@ qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
struct qca8k_priv *priv = ds->priv;
- return qca8k_mdio_write(priv, port, regnum, data);
+ /* Check if the legacy mapping should be used and the
+ * port is not correctly mapped to the right PHY in the
+ * devicetree
+ */
+ if (priv->legacy_phy_port_mapping)
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+ return qca8k_mdio_write(priv->bus, port, regnum, data);
}
static int
@@ -620,7 +747,14 @@ qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
struct qca8k_priv *priv = ds->priv;
int ret;
- ret = qca8k_mdio_read(priv, port, regnum);
+ /* Check if the legacy mapping should be used and the
+ * port is not correctly mapped to the right PHY in the
+ * devicetree
+ */
+ if (priv->legacy_phy_port_mapping)
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+ ret = qca8k_mdio_read(priv->bus, port, regnum);
if (ret < 0)
return 0xffff;
@@ -629,14 +763,44 @@ qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
}
static int
+qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
+{
+ struct dsa_switch *ds = priv->ds;
+ struct mii_bus *bus;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+
+ if (!bus)
+ return -ENOMEM;
+
+ bus->priv = (void *)priv;
+ bus->name = "qca8k slave mii";
+ bus->read = qca8k_mdio_read;
+ bus->write = qca8k_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
+ ds->index);
+
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ return devm_of_mdiobus_register(priv->dev, bus, mdio);
+}
+
+static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
- struct device_node *ports, *port;
+ struct device_node *ports, *port, *mdio;
+ phy_interface_t mode;
int err;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
+ ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
+
+ if (!ports)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
@@ -650,7 +814,10 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
if (!dsa_is_user_port(priv->ds, reg))
continue;
- if (of_property_read_bool(port, "phy-handle"))
+ of_get_phy_mode(port, &mode);
+
+ if (of_property_read_bool(port, "phy-handle") &&
+ mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
internal_mdio_mask |= BIT(reg);
@@ -683,13 +850,89 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
* a dt-overlay and driver reload changed the configuration
*/
- qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_EN);
- return 0;
+ return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
+ }
+
+ /* Check if the devicetree declare the port:phy mapping */
+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ err = qca8k_mdio_register(priv, mdio);
+ if (err)
+ of_node_put(mdio);
+
+ return err;
}
+ /* If a mapping can't be found the legacy mapping is used,
+ * using the qca8k_port_to_phy function
+ */
+ priv->legacy_phy_port_mapping = true;
priv->ops.phy_read = qca8k_phy_read;
priv->ops.phy_write = qca8k_phy_write;
+
+ return 0;
+}
+
+static int
+qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv)
+{
+ struct device_node *port_dn;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ u32 val;
+
+ /* CPU port is already checked */
+ dp = dsa_to_port(priv->ds, 0);
+
+ port_dn = dp->dn;
+
+ /* Check if port 0 is set to the correct type */
+ of_get_phy_mode(port_dn, &mode);
+ if (mode != PHY_INTERFACE_MODE_RGMII_ID &&
+ mode != PHY_INTERFACE_MODE_RGMII_RXID &&
+ mode != PHY_INTERFACE_MODE_RGMII_TXID) {
+ return 0;
+ }
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val))
+ val = 2;
+ else
+ /* Switch regs accept value in ns, convert ps to ns */
+ val = val / 1000;
+
+ if (val > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ val = 3;
+ }
+
+ priv->rgmii_rx_delay = val;
+ /* Stop here if we need to check only for rx delay */
+ if (mode != PHY_INTERFACE_MODE_RGMII_ID)
+ break;
+
+ fallthrough;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val))
+ val = 1;
+ else
+ /* Switch regs accept value in ns, convert ps to ns */
+ val = val / 1000;
+
+ if (val > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ val = 3;
+ }
+
+ priv->rgmii_tx_delay = val;
+ break;
+ default:
+ return 0;
+ }
+
return 0;
}
@@ -698,10 +941,11 @@ qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int ret, i;
+ u32 mask;
/* Make sure that port 0 is the cpu port */
if (!dsa_is_cpu_port(ds, 0)) {
- pr_err("port 0 is not the CPU port\n");
+ dev_err(priv->dev, "port 0 is not the CPU port");
return -EINVAL;
}
@@ -711,76 +955,163 @@ qca8k_setup(struct dsa_switch *ds)
priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
&qca8k_regmap_config);
if (IS_ERR(priv->regmap))
- pr_warn("regmap initialization failed");
+ dev_warn(priv->dev, "regmap initialization failed");
ret = qca8k_setup_mdio_bus(priv);
if (ret)
return ret;
+ ret = qca8k_setup_of_rgmii_delay(priv);
+ if (ret)
+ return ret;
+
/* Enable CPU Port */
- qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+ }
/* Enable MIB counters */
- qca8k_mib_init(priv);
+ ret = qca8k_mib_init(priv);
+ if (ret)
+ dev_warn(priv->dev, "mib init failed");
/* Enable QCA header mode on the cpu port */
- qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
/* Disable forwarding by default on all ports */
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, 0);
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+ if (ret)
+ return ret;
+ }
/* Disable MAC by default on all ports */
for (i = 1; i < QCA8K_NUM_PORTS; i++)
qca8k_port_set_status(priv, i, 0);
/* Forward all unknown frames to CPU port for Linux processing */
- qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ if (ret)
+ return ret;
/* Setup connection between CPU port & user ports */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ if (ret)
+ return ret;
}
/* Individual user ports get connected to CPU port only */
if (dsa_is_user_port(ds, i)) {
int shift = 16 * (i % 2);
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER,
- BIT(QCA8K_CPU_PORT));
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(QCA8K_CPU_PORT));
+ if (ret)
+ return ret;
/* Enable ARP Auto-learning by default */
- qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_LEARN);
+ ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
/* For port based vlans to work we need to set the
* default egress vid
*/
- qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- 0xfff << shift,
- QCA8K_PORT_VID_DEF << shift);
- qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+ 0xfff << shift,
+ QCA8K_PORT_VID_DEF << shift);
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
}
}
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ switch (i) {
+ /* The 2 CPU port and port 5 requires some different
+ * priority than any other ports.
+ */
+ case 0:
+ case 5:
+ case 6:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
+ break;
+ default:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
+ }
+ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
+
+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN;
+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
+ QCA8K_PORT_HOL_CTRL1_ING_BUF |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN,
+ mask);
+ }
+ }
+
+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_S |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S,
+ mask);
+ }
+
/* Setup our port MTUs to match power on defaults */
for (i = 0; i < QCA8K_NUM_PORTS; i++)
priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
- qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ dev_warn(priv->dev, "failed setting MTU settings");
/* Flush the FDB table */
qca8k_fdb_flush(priv);
@@ -797,11 +1128,14 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
{
struct qca8k_priv *priv = ds->priv;
u32 reg, val;
+ int ret;
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII)
return;
@@ -817,6 +1151,8 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case 6: /* 2nd CPU port / external PHY */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_1000BASEX)
return;
@@ -840,16 +1176,22 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
break;
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
/* RGMII_ID needs internal delay. This is enabled through
* PORT5_PAD_CTRL for all ports, rather than individual port
* registers
*/
qca8k_write(priv, reg,
QCA8K_PORT_PAD_RGMII_EN |
- QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY));
- qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ /* QCA8337 requires to set rgmii rx delay */
+ if (priv->switch_id == QCA8K_ID_QCA8337)
+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
@@ -857,7 +1199,9 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
/* Enable/disable SerDes auto-negotiation as necessary */
- val = qca8k_read(priv, QCA8K_REG_PWS);
+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
+ if (ret)
+ return;
if (phylink_autoneg_inband(mode))
val &= ~QCA8K_PWS_SERDES_AEN_DIS;
else
@@ -865,7 +1209,9 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, QCA8K_REG_PWS, val);
/* Configure the SGMII parameters */
- val = qca8k_read(priv, QCA8K_REG_SGMII_CTRL);
+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+ if (ret)
+ return;
val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD;
@@ -903,6 +1249,8 @@ qca8k_phylink_validate(struct dsa_switch *ds, int port,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII)
goto unsupported;
break;
@@ -913,13 +1261,16 @@ qca8k_phylink_validate(struct dsa_switch *ds, int port,
case 5:
/* Internal PHY */
if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII)
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL)
goto unsupported;
break;
case 6: /* 2nd CPU port / external PHY */
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_1000BASEX)
goto unsupported;
@@ -955,8 +1306,11 @@ qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
{
struct qca8k_priv *priv = ds->priv;
u32 reg;
+ int ret;
- reg = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port));
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+ if (ret < 0)
+ return ret;
state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
state->an_complete = state->link;
@@ -1057,18 +1411,27 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
const struct qca8k_mib_desc *mib;
- u32 reg, i;
- u64 hi;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
mib = &ar8327_mib[i];
reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
- data[i] = qca8k_read(priv, reg);
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
+ continue;
+
if (mib->size == 2) {
- hi = qca8k_read(priv, reg + 4);
- data[i] |= hi << 32;
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
+ continue;
}
+
+ data[i] = val;
+ if (mib->size == 2)
+ data[i] |= (u64)hi << 32;
}
}
@@ -1087,17 +1450,22 @@ qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
u32 reg;
+ int ret;
mutex_lock(&priv->reg_mutex);
- reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL);
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+ if (ret < 0)
+ goto exit;
+
if (eee->eee_enabled)
reg |= lpi_en;
else
reg &= ~lpi_en;
- qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
- mutex_unlock(&priv->reg_mutex);
+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
- return 0;
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
}
static int
@@ -1141,7 +1509,7 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int port_mask = BIT(QCA8K_CPU_PORT);
- int i;
+ int i, ret;
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
if (dsa_to_port(ds, i)->bridge_dev != br)
@@ -1149,17 +1517,20 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
/* Add this port to the portvlan mask of the other ports
* in the bridge
*/
- qca8k_reg_set(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
+ ret = qca8k_reg_set(priv,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (ret)
+ return ret;
if (i != port)
port_mask |= BIT(i);
}
+
/* Add all other ports to this ports portvlan mask */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
- return 0;
+ return ret;
}
static void
@@ -1223,9 +1594,7 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
mtu = priv->port_mtu[i];
/* Include L2 header / FCS length */
- qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
-
- return 0;
+ return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
}
static int
@@ -1298,18 +1667,19 @@ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
+ int ret;
if (vlan_filtering) {
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
} else {
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
}
- return 0;
+ return ret;
}
static int
@@ -1320,7 +1690,7 @@ qca8k_port_vlan_add(struct dsa_switch *ds, int port,
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct qca8k_priv *priv = ds->priv;
- int ret = 0;
+ int ret;
ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
if (ret) {
@@ -1331,14 +1701,17 @@ qca8k_port_vlan_add(struct dsa_switch *ds, int port,
if (pvid) {
int shift = 16 * (port % 2);
- qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift, vlan->vid << shift);
- qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid) |
- QCA8K_PORT_VLAN_SVID(vlan->vid));
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ 0xfff << shift, vlan->vid << shift);
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
}
- return 0;
+ return ret;
}
static int
@@ -1346,7 +1719,7 @@ qca8k_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct qca8k_priv *priv = ds->priv;
- int ret = 0;
+ int ret;
ret = qca8k_vlan_del(priv, port, vlan->vid);
if (ret)
@@ -1355,6 +1728,22 @@ qca8k_port_vlan_del(struct dsa_switch *ds, int port,
return ret;
}
+static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Communicate to the phy internal driver the switch revision.
+ * Based on the switch revision different values needs to be
+ * set to the dbg and mmd reg on the phy.
+ * The first 2 bit are used to communicate the switch revision
+ * to the phy driver.
+ */
+ if (port > 0 && port < 6)
+ return priv->switch_revision;
+
+ return 0;
+}
+
static enum dsa_tag_protocol
qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
@@ -1388,13 +1777,44 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.phylink_mac_config = qca8k_phylink_mac_config,
.phylink_mac_link_down = qca8k_phylink_mac_link_down,
.phylink_mac_link_up = qca8k_phylink_mac_link_up,
+ .get_phy_flags = qca8k_get_phy_flags,
};
+static int qca8k_read_switch_id(struct qca8k_priv *priv)
+{
+ const struct qca8k_match_data *data;
+ u32 val;
+ u8 id;
+ int ret;
+
+ /* get the switches ID from the compatible */
+ data = of_device_get_match_data(priv->dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+ if (ret < 0)
+ return -ENODEV;
+
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
+ if (id != data->id) {
+ dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
+ return -ENODEV;
+ }
+
+ priv->switch_id = id;
+
+ /* Save revision to communicate to the internal PHY driver */
+ priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK);
+
+ return 0;
+}
+
static int
qca8k_sw_probe(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv;
- u32 id;
+ int ret;
/* allocate the private data struct so that we can probe the switches
* ID register
@@ -1420,12 +1840,10 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
gpiod_set_value_cansleep(priv->reset_gpio, 0);
}
- /* read the switches ID register */
- id = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
- id >>= QCA8K_MASK_CTRL_ID_S;
- id &= QCA8K_MASK_CTRL_ID_M;
- if (id != QCA8K_ID_QCA8337)
- return -ENODEV;
+ /* Check the detected switch id */
+ ret = qca8k_read_switch_id(priv);
+ if (ret)
+ return ret;
priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
@@ -1490,9 +1908,18 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
+static const struct qca8k_match_data qca832x = {
+ .id = QCA8K_ID_QCA8327,
+};
+
+static const struct qca8k_match_data qca833x = {
+ .id = QCA8K_ID_QCA8337,
+};
+
static const struct of_device_id qca8k_of_match[] = {
- { .compatible = "qca,qca8334" },
- { .compatible = "qca,qca8337" },
+ { .compatible = "qca,qca8327", .data = &qca832x },
+ { .compatible = "qca,qca8334", .data = &qca833x },
+ { .compatible = "qca,qca8337", .data = &qca833x },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 7ca4b93e0bb5..ed3b05ad6745 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -15,9 +15,13 @@
#define QCA8K_NUM_PORTS 7
#define QCA8K_MAX_MTU 9000
+#define PHY_ID_QCA8327 0x004dd034
+#define QCA8K_ID_QCA8327 0x12
#define PHY_ID_QCA8337 0x004dd036
#define QCA8K_ID_QCA8337 0x13
+#define QCA8K_BUSY_WAIT_TIMEOUT 2000
+
#define QCA8K_NUM_FDB_RECORDS 2048
#define QCA8K_CPU_PORT 0
@@ -26,18 +30,19 @@
/* Global control registers */
#define QCA8K_REG_MASK_CTRL 0x000
-#define QCA8K_MASK_CTRL_ID_M 0xff
-#define QCA8K_MASK_CTRL_ID_S 8
+#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
+#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
+#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
+#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
#define QCA8K_REG_PORT5_PAD_CTRL 0x008
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) \
- ((0x8 + (x & 0x3)) << 22)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \
- ((0x10 + (x & 0x3)) << 20)
-#define QCA8K_MAX_DELAY 3
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
+#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_PWS 0x010
#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
@@ -164,6 +169,36 @@
#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
+
+#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
+
+#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
+#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
+#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
+#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
+#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
+
/* Pkt edit registers */
#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
@@ -211,7 +246,16 @@ struct ar8xxx_port_status {
int enabled;
};
+struct qca8k_match_data {
+ u8 id;
+};
+
struct qca8k_priv {
+ u8 switch_id;
+ u8 switch_revision;
+ u8 rgmii_tx_delay;
+ u8 rgmii_rx_delay;
+ bool legacy_phy_port_mapping;
struct regmap *regmap;
struct mii_bus *bus;
struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index 5e83b365f17a..b29d41e5e1e7 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -3,11 +3,12 @@ config NET_DSA_SJA1105
tristate "NXP SJA1105 Ethernet switch family support"
depends on NET_DSA && SPI
select NET_DSA_TAG_SJA1105
+ select PCS_XPCS
select PACKING
select CRC32
help
- This is the driver for the NXP SJA1105 automotive Ethernet switch
- family. These are 5-port devices and are managed over an SPI
+ This is the driver for the NXP SJA1105 (5-port) and SJA1110 (10-port)
+ automotive Ethernet switch family. These are managed over an SPI
interface. Probing is handled based on OF bindings and so is the
linkage to PHYLINK. The driver supports the following revisions:
- SJA1105E (Gen. 1, No TT-Ethernet)
@@ -16,6 +17,10 @@ tristate "NXP SJA1105 Ethernet switch family support"
- SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
- SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
- SJA1105S (Gen. 2, SGMII, TT-Ethernet)
+ - SJA1110A (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 10 ports)
+ - SJA1110B (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 9 ports)
+ - SJA1110C (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 7 ports)
+ - SJA1110D (Gen. 3, SGMII, TT-Ethernet, no 100base-TX PHY, 7 ports)
config NET_DSA_SJA1105_PTP
bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index a860e3a910be..40d69e6c0bae 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
sja1105-objs := \
sja1105_spi.o \
sja1105_main.o \
+ sja1105_mdio.o \
sja1105_flower.o \
sja1105_ethtool.o \
sja1105_devlink.o \
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index f9e87fb33da0..221c7abdef0e 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -13,14 +13,12 @@
#include <linux/mutex.h>
#include "sja1105_static_config.h"
-#define SJA1105_NUM_PORTS 5
-#define SJA1105_NUM_TC 8
#define SJA1105ET_FDB_BIN_SIZE 4
/* The hardware value is in multiples of 10 ms.
* The passed parameter is in multiples of 1 ms.
*/
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
-#define SJA1105_NUM_L2_POLICERS 45
+#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
typedef enum {
SPI_READ = 0,
@@ -30,6 +28,14 @@ typedef enum {
#include "sja1105_tas.h"
#include "sja1105_ptp.h"
+enum sja1105_stats_area {
+ MAC,
+ HL1,
+ HL2,
+ ETHER,
+ __MAX_SJA1105_STATS_AREA,
+};
+
/* Keeps the different addresses between E/T and P/Q/R/S */
struct sja1105_regs {
u64 device_id;
@@ -39,7 +45,6 @@ struct sja1105_regs {
u64 rgu;
u64 vl_status;
u64 config;
- u64 sgmii;
u64 rmii_pll1;
u64 ptppinst;
u64 ptppindur;
@@ -49,23 +54,41 @@ struct sja1105_regs {
u64 ptpclkcorp;
u64 ptpsyncts;
u64 ptpschtm;
- u64 ptpegr_ts[SJA1105_NUM_PORTS];
- u64 pad_mii_tx[SJA1105_NUM_PORTS];
- u64 pad_mii_rx[SJA1105_NUM_PORTS];
- u64 pad_mii_id[SJA1105_NUM_PORTS];
- u64 cgu_idiv[SJA1105_NUM_PORTS];
- u64 mii_tx_clk[SJA1105_NUM_PORTS];
- u64 mii_rx_clk[SJA1105_NUM_PORTS];
- u64 mii_ext_tx_clk[SJA1105_NUM_PORTS];
- u64 mii_ext_rx_clk[SJA1105_NUM_PORTS];
- u64 rgmii_tx_clk[SJA1105_NUM_PORTS];
- u64 rmii_ref_clk[SJA1105_NUM_PORTS];
- u64 rmii_ext_tx_clk[SJA1105_NUM_PORTS];
- u64 mac[SJA1105_NUM_PORTS];
- u64 mac_hl1[SJA1105_NUM_PORTS];
- u64 mac_hl2[SJA1105_NUM_PORTS];
- u64 ether_stats[SJA1105_NUM_PORTS];
- u64 qlevel[SJA1105_NUM_PORTS];
+ u64 ptpegr_ts[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_tx[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_rx[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_id[SJA1105_MAX_NUM_PORTS];
+ u64 cgu_idiv[SJA1105_MAX_NUM_PORTS];
+ u64 mii_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_rx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_ext_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_ext_rx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rgmii_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rmii_ref_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rmii_ext_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 stats[__MAX_SJA1105_STATS_AREA][SJA1105_MAX_NUM_PORTS];
+ u64 mdio_100base_tx;
+ u64 mdio_100base_t1;
+ u64 pcs_base[SJA1105_MAX_NUM_PORTS];
+};
+
+struct sja1105_mdio_private {
+ struct sja1105_private *priv;
+};
+
+enum {
+ SJA1105_SPEED_AUTO,
+ SJA1105_SPEED_10MBPS,
+ SJA1105_SPEED_100MBPS,
+ SJA1105_SPEED_1000MBPS,
+ SJA1105_SPEED_2500MBPS,
+ SJA1105_SPEED_MAX,
+};
+
+enum sja1105_internal_phy_t {
+ SJA1105_NO_PHY = 0,
+ SJA1105_PHY_BASE_TX,
+ SJA1105_PHY_BASE_T1,
};
struct sja1105_info {
@@ -85,6 +108,10 @@ struct sja1105_info {
*/
int ptpegr_ts_bytes;
int num_cbs_shapers;
+ int max_frame_mem;
+ int num_ports;
+ bool multiple_cascade_ports;
+ enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
@@ -104,7 +131,20 @@ struct sja1105_info {
const unsigned char *addr, u16 vid);
void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op);
+ bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
+ void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
+ int (*clocking_setup)(struct sja1105_private *priv);
+ int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
+ int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
+ int (*disable_microcontroller)(struct sja1105_private *priv);
const char *name;
+ bool supports_mii[SJA1105_MAX_NUM_PORTS];
+ bool supports_rmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_rgmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_sgmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_2500basex[SJA1105_MAX_NUM_PORTS];
+ enum sja1105_internal_phy_t internal_phy[SJA1105_MAX_NUM_PORTS];
+ const u64 port_speed[SJA1105_SPEED_MAX];
};
enum sja1105_key_type {
@@ -202,20 +242,23 @@ enum sja1105_vlan_state {
struct sja1105_private {
struct sja1105_static_config static_config;
- bool rgmii_rx_delay[SJA1105_NUM_PORTS];
- bool rgmii_tx_delay[SJA1105_NUM_PORTS];
+ bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
+ bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
+ phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
+ bool fixed_link[SJA1105_MAX_NUM_PORTS];
bool best_effort_vlan_filtering;
unsigned long learn_ena;
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
const struct sja1105_info *info;
+ size_t max_xfer_len;
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
struct list_head dsa_8021q_vlans;
struct list_head bridge_vlans;
struct sja1105_flow_block flow_block;
- struct sja1105_port ports[SJA1105_NUM_PORTS];
+ struct sja1105_port ports[SJA1105_MAX_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
@@ -224,6 +267,10 @@ struct sja1105_private {
enum sja1105_vlan_state vlan_state;
struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
+ struct mii_bus *mdio_base_t1;
+ struct mii_bus *mdio_base_tx;
+ struct mii_bus *mdio_pcs;
+ struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
struct sja1105_tagger_data tagger_data;
struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
@@ -253,6 +300,14 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack);
void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+/* From sja1105_mdio.c */
+int sja1105_mdiobus_register(struct dsa_switch *ds);
+void sja1105_mdiobus_unregister(struct dsa_switch *ds);
+int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
+int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
+int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
void sja1105_devlink_teardown(struct dsa_switch *ds);
@@ -286,6 +341,10 @@ extern const struct sja1105_info sja1105p_info;
extern const struct sja1105_info sja1105q_info;
extern const struct sja1105_info sja1105r_info;
extern const struct sja1105_info sja1105s_info;
+extern const struct sja1105_info sja1110a_info;
+extern const struct sja1105_info sja1110b_info;
+extern const struct sja1105_info sja1110c_info;
+extern const struct sja1105_info sja1110d_info;
/* From sja1105_clocking.c */
@@ -301,16 +360,11 @@ typedef enum {
XMII_MODE_SGMII = 3,
} sja1105_phy_interface_t;
-typedef enum {
- SJA1105_SPEED_10MBPS = 3,
- SJA1105_SPEED_100MBPS = 2,
- SJA1105_SPEED_1000MBPS = 1,
- SJA1105_SPEED_AUTO = 0,
-} sja1105_speed_t;
-
int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port);
+int sja1110_setup_rgmii_delay(const void *ctx, int port);
int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
int sja1105_clocking_setup(struct sja1105_private *priv);
+int sja1110_disable_microcontroller(struct sja1105_private *priv);
/* From sja1105_ethtool.c */
void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data);
@@ -331,6 +385,18 @@ enum sja1105_iotag {
SJA1105_S_TAG = 1, /* Outer VLAN header */
};
+enum sja1110_vlan_type {
+ SJA1110_VLAN_INVALID = 0,
+ SJA1110_VLAN_C_TAG = 1, /* Single inner VLAN tag */
+ SJA1110_VLAN_S_TAG = 2, /* Single outer VLAN tag */
+ SJA1110_VLAN_D_TAG = 3, /* Double tagged, use outer tag for lookup */
+};
+
+enum sja1110_shaper_type {
+ SJA1110_LEAKY_BUCKET_SHAPER = 0,
+ SJA1110_CBS_SHAPER = 1,
+};
+
u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
int sja1105et_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 2a9b8a6a5306..387a1f2f161c 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -6,6 +6,8 @@
#include "sja1105.h"
#define SJA1105_SIZE_CGU_CMD 4
+#define SJA1110_BASE_MCSS_CLK SJA1110_CGU_ADDR(0x70)
+#define SJA1110_BASE_TIMER_CLK SJA1110_CGU_ADDR(0x74)
/* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
struct sja1105_cfg_pad_mii {
@@ -61,6 +63,12 @@ struct sja1105_cgu_pll_ctrl {
u64 pd;
};
+struct sja1110_cgu_outclk {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
enum {
CLKSRC_MII0_TX_CLK = 0x00,
CLKSRC_MII0_RX_CLK = 0x01,
@@ -110,6 +118,9 @@ static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
struct sja1105_cgu_idiv idiv;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR)
+ return 0;
+
if (enabled && factor != 1 && factor != 10) {
dev_err(dev, "idiv factor must be 1 or 10\n");
return -ERANGE;
@@ -159,6 +170,9 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int clksrc;
+ if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
if (role == XMII_MAC)
clksrc = mac_clk_sources[port];
else
@@ -188,6 +202,9 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
CLKSRC_MII4_RX_CLK,
};
+ if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
mii_rx_clk.clksrc = clk_sources[port];
mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -212,6 +229,9 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
CLKSRC_IDIV4,
};
+ if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
mii_ext_tx_clk.clksrc = clk_sources[port];
mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -236,6 +256,9 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
CLKSRC_IDIV4,
};
+ if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
mii_ext_rx_clk.clksrc = clk_sources[port];
mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -313,14 +336,17 @@ sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
}
static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
- int port, sja1105_speed_t speed)
+ int port, u64 speed)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl txc;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int clksrc;
- if (speed == SJA1105_SPEED_1000MBPS) {
+ if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
clksrc = CLKSRC_PLL0;
} else {
int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
@@ -368,6 +394,9 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
struct sja1105_cfg_pad_mii pad_mii_tx = {0};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload */
pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
/* high noise/high speed */
@@ -394,6 +423,9 @@ static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
struct sja1105_cfg_pad_mii pad_mii_rx = {0};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload */
pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
/* non-Schmitt (default) */
@@ -437,6 +469,35 @@ sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
}
+static void
+sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_CGU_CMD;
+ u64 range = 4;
+
+ /* Fields RXC_RANGE and TXC_RANGE select the input frequency range:
+ * 0 = 2.5MHz
+ * 1 = 25MHz
+ * 2 = 50MHz
+ * 3 = 125MHz
+ * 4 = Automatically determined by port speed.
+ * There's no point in defining a structure different than the one for
+ * SJA1105, so just hardcode the frequency range to automatic, just as
+ * before.
+ */
+ sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op);
+ sja1105_packing(buf, &cmd->rxc_delay, 25, 21, size, op);
+ sja1105_packing(buf, &range, 20, 18, size, op);
+ sja1105_packing(buf, &cmd->rxc_bypass, 17, 17, size, op);
+ sja1105_packing(buf, &cmd->rxc_pd, 16, 16, size, op);
+ sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op);
+ sja1105_packing(buf, &cmd->txc_delay, 9, 5, size, op);
+ sja1105_packing(buf, &range, 4, 2, size, op);
+ sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
+}
+
/* Valid range in degrees is an integer between 73.8 and 101.7 */
static u64 sja1105_rgmii_delay(u64 phase)
{
@@ -495,40 +556,65 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
packed_buf, SJA1105_SIZE_CGU_CMD);
}
+int sja1110_setup_rgmii_delay(const void *ctx, int port)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ pad_mii_id.rxc_pd = 1;
+ pad_mii_id.txc_pd = 1;
+
+ if (priv->rgmii_rx_delay[port]) {
+ pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
+ /* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
+ pad_mii_id.rxc_bypass = 1;
+ pad_mii_id.rxc_pd = 0;
+ }
+
+ if (priv->rgmii_tx_delay[port]) {
+ pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ pad_mii_id.txc_bypass = 1;
+ pad_mii_id.txc_pd = 0;
+ }
+
+ sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
sja1105_mii_role_t role)
{
struct device *dev = priv->ds->dev;
struct sja1105_mac_config_entry *mac;
- sja1105_speed_t speed;
+ u64 speed;
int rc;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
speed = mac[port].speed;
- dev_dbg(dev, "Configuring port %d RGMII at speed %dMbps\n",
+ dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n",
port, speed);
- switch (speed) {
- case SJA1105_SPEED_1000MBPS:
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
/* 1000Mbps, IDIV disabled (125 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, false, 1);
- break;
- case SJA1105_SPEED_100MBPS:
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) {
/* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, true, 1);
- break;
- case SJA1105_SPEED_10MBPS:
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) {
/* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, true, 10);
- break;
- case SJA1105_SPEED_AUTO:
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) {
/* Skip CGU configuration if there is no speed available
* (e.g. link is not established yet)
*/
dev_dbg(dev, "Speed not available, skipping CGU config\n");
return 0;
- default:
+ } else {
rc = -EINVAL;
}
@@ -546,14 +632,9 @@ static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
dev_err(dev, "Failed to configure Tx pad registers\n");
return rc;
}
+
if (!priv->info->setup_rgmii_delay)
return 0;
- /* The role has no hardware effect for RGMII. However we use it as
- * a proxy for this interface being a MAC-to-MAC connection, with
- * the RGMII internal delays needing to be applied by us.
- */
- if (role == XMII_MAC)
- return 0;
return priv->info->setup_rgmii_delay(priv, port);
}
@@ -572,6 +653,9 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
CLKSRC_MII4_TX_CLK,
};
+ if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
ref_clk.clksrc = clk_sources[port];
ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -589,6 +673,9 @@ sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
struct sja1105_cgu_mii_ctrl ext_tx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
ext_tx_clk.clksrc = CLKSRC_PLL1;
ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -607,6 +694,9 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
struct device *dev = priv->ds->dev;
int rc;
+ if (regs->rmii_pll1 == SJA1105_RSV_ADDR)
+ return 0;
+
/* PLL1 must be enabled and output 50 Mhz.
* This is done by writing first 0x0A010941 to
* the PLL_1_C register and then deasserting
@@ -721,12 +811,52 @@ int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
int sja1105_clocking_setup(struct sja1105_private *priv)
{
+ struct dsa_switch *ds = priv->ds;
int port, rc;
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
rc = sja1105_clocking_setup_port(priv, port);
if (rc < 0)
return rc;
}
return 0;
}
+
+static void
+sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &outclk->clksrc, 27, 24, size, op);
+ sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &outclk->pd, 0, 0, size, op);
+}
+
+int sja1110_disable_microcontroller(struct sja1105_private *priv)
+{
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1110_cgu_outclk outclk_6_c = {
+ .clksrc = 0x3,
+ .pd = true,
+ };
+ struct sja1110_cgu_outclk outclk_7_c = {
+ .clksrc = 0x5,
+ .pd = true,
+ };
+ int rc;
+
+ /* Power down the BASE_TIMER_CLK to disable the watchdog timer */
+ sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc)
+ return rc;
+
+ /* Power down the BASE_MCSS_CLOCK to gate the microcontroller off */
+ sja1110_cgu_outclk_packing(packed_buf, &outclk_6_c, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_MCSS_CLK,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index b777d3f37573..56fead68ea9f 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -78,6 +78,9 @@
* on its ENTRY portion, as a result of a SPI write command.
* Only the TCAM-based FDB table on SJA1105 P/Q/R/S supports
* this.
+ * OP_VALID_ANYWAY: Reading some tables through the dynamic config
+ * interface is possible even if the VALIDENT bit is not
+ * set in the writeback. So don't error out in that case.
* - .max_entry_count: The number of entries, counting from zero, that can be
* reconfigured through the dynamic interface. If a static
* table can be reconfigured at all dynamically, this
@@ -103,6 +106,9 @@
#define SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
+#define SJA1110_SIZE_VL_POLICING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_POLICING_ENTRY)
+
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
SJA1105_SIZE_DYN_CMD
@@ -112,9 +118,15 @@
#define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY)
+#define SJA1110_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_ENTRY)
+
#define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY)
+#define SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_VLAN_LOOKUP_ENTRY)
+
#define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY)
@@ -130,12 +142,18 @@
#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+#define SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+
#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
SJA1105_SIZE_DYN_CMD
#define SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY)
+#define SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_GENERAL_PARAMS_ENTRY)
+
#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
@@ -148,8 +166,17 @@
#define SJA1105PQRS_SIZE_CBS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_CBS_ENTRY)
+#define SJA1110_SIZE_XMII_PARAMS_DYN_CMD \
+ SJA1110_SIZE_XMII_PARAMS_ENTRY
+
+#define SJA1110_SIZE_L2_POLICING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_POLICING_ENTRY)
+
+#define SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD \
+ SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY
+
#define SJA1105_MAX_DYN_CMD_SIZE \
- SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD
+ SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD
struct sja1105_dyn_cmd {
bool search;
@@ -167,9 +194,10 @@ enum sja1105_hostcmd {
SJA1105_HOSTCMD_INVALIDATE = 4,
};
+/* Command and entry overlap */
static void
-sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
- enum packing_op op)
+sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
{
const int size = SJA1105_SIZE_DYN_CMD;
@@ -179,6 +207,33 @@ sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(buf, &cmd->index, 9, 0, size, op);
}
+/* Command and entry are separate */
+static void
+sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 9, 0, size, op);
+}
+
+static void
+sja1110_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 11, 0, size, op);
+}
+
static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -191,11 +246,23 @@ static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
}
static void
-sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
- enum packing_op op)
+sja1110_vl_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->index, 11, 0, size, op);
+}
+
+static void
+sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op, int entry_size)
{
- u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
+ u8 *p = buf + entry_size;
u64 hostcmd;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
@@ -250,6 +317,24 @@ sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
}
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+
+ return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+}
+
+static void
+sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ int size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+
+ return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+}
+
/* The switch is so retarded that it makes our command/entry abstraction
* crumble apart.
*
@@ -308,6 +393,18 @@ sja1105pqrs_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
return sja1105pqrs_l2_lookup_entry_packing(buf, entry_ptr, op);
}
+static size_t sja1110_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1110_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
static void
sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
@@ -419,6 +516,39 @@ sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op);
}
+/* In SJA1110 there is no gap between the command and the data, yay... */
+static void
+sja1110_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+ u64 type_entry = 0;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ /* Hack: treat 'vlanid' field of struct sja1105_vlan_lookup_entry as
+ * cmd->index.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, op);
+
+ /* But the VALIDENT bit has disappeared, now we are supposed to
+ * invalidate an entry through the TYPE_ENTRY field of the entry..
+ * This is a hack to transform the non-zero quality of the TYPE_ENTRY
+ * field into a VALIDENT bit.
+ */
+ if (op == PACK && !cmd->valident) {
+ sja1105_packing(buf, &type_entry, 40, 39,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, PACK);
+ } else if (op == UNPACK) {
+ sja1105_packing(buf, &type_entry, 40, 39,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, UNPACK);
+ cmd->valident = !!type_entry;
+ }
+}
+
static void
sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
@@ -433,6 +563,19 @@ sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
}
static void
+sja1110_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -487,6 +630,19 @@ sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
}
static void
+sja1110_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 3, 0, size, op);
+}
+
+static void
sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -519,6 +675,18 @@ sja1105pqrs_l2_lookup_params_cmd_packing(void *buf,
}
static void
+sja1110_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+}
+
+static void
sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -553,6 +721,18 @@ sja1105pqrs_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
}
static void
+sja1110_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+}
+
+static void
sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -578,6 +758,20 @@ sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(p, &cmd->index, 5, 0, size, op);
}
+static void
+sja1110_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
static void sja1105et_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -617,6 +811,18 @@ static void sja1105pqrs_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(p, &cmd->index, 3, 0, size, op);
}
+static void sja1110_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 7, 0, size, op);
+}
+
static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -632,16 +838,50 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t sja1110_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+ u64 entry_type = SJA1110_CBS_SHAPER;
+
+ sja1105_packing(buf, &entry_type, 159, 159, size, op);
+ sja1105_packing(buf, &entry->credit_lo, 151, 120, size, op);
+ sja1105_packing(buf, &entry->credit_hi, 119, 88, size, op);
+ sja1105_packing(buf, &entry->send_slope, 87, 56, size, op);
+ sja1105_packing(buf, &entry->idle_slope, 55, 24, size, op);
+ return size;
+}
+
+static void sja1110_dummy_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+}
+
+static void
+sja1110_l2_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_POLICING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 6, 0, size, op);
+}
+
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
#define OP_SEARCH BIT(3)
+#define OP_VALID_ANYWAY BIT(4)
/* SJA1105E/T: First generation */
const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105et_vl_lookup_entry_packing,
- .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .cmd_packing = sja1105et_vl_lookup_cmd_packing,
.access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
@@ -658,7 +898,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_MGMT_ROUTE] = {
.entry_packing = sja1105et_mgmt_route_entry_packing,
.cmd_packing = sja1105et_mgmt_route_cmd_packing,
- .access = (OP_READ | OP_WRITE),
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.max_entry_count = SJA1105_NUM_PORTS,
.packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x20,
@@ -725,7 +965,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105_vl_lookup_entry_packing,
- .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
@@ -742,7 +982,7 @@ const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_MGMT_ROUTE] = {
.entry_packing = sja1105pqrs_mgmt_route_entry_packing,
.cmd_packing = sja1105pqrs_mgmt_route_cmd_packing,
- .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH | OP_VALID_ANYWAY),
.max_entry_count = SJA1105_NUM_PORTS,
.packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x24,
@@ -813,6 +1053,122 @@ const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
},
};
+/* SJA1110: Third generation */
+const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1110_vl_lookup_entry_packing,
+ .cmd_packing = sja1110_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x124),
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .entry_packing = sja1110_vl_policing_entry_packing,
+ .cmd_packing = sja1110_vl_policing_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
+ .packed_size = SJA1110_SIZE_VL_POLICING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x310),
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1110_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1110_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1110_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x8c),
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1110_vlan_lookup_entry_packing,
+ .cmd_packing = sja1110_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xb4),
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1110_l2_forwarding_entry_packing,
+ .cmd_packing = sja1110_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xa8),
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1110_mac_config_entry_packing,
+ .cmd_packing = sja1110_mac_config_cmd_packing,
+ .max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x134),
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1110_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1110_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x158),
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x2000C),
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1110_general_params_entry_packing,
+ .cmd_packing = sja1110_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xe8),
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1110_retagging_entry_packing,
+ .cmd_packing = sja1110_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xdc),
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1110_cbs_entry_packing,
+ .cmd_packing = sja1110_cbs_cmd_packing,
+ .max_entry_count = SJA1110_MAX_CBS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xc4),
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .entry_packing = sja1110_xmii_params_entry_packing,
+ .cmd_packing = sja1110_dummy_cmd_packing,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ .access = (OP_READ | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_XMII_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x3c),
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .entry_packing = sja1110_l2_policing_entry_packing,
+ .cmd_packing = sja1110_l2_policing_cmd_packing,
+ .max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_POLICING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x2fc),
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .entry_packing = sja1110_l2_forwarding_params_entry_packing,
+ .cmd_packing = sja1110_dummy_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ .access = (OP_READ | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x20000),
+ },
+};
+
/* Provides read access to the settings through the dynamic interface
* of the switch.
* @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
@@ -896,11 +1252,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
cmd = (struct sja1105_dyn_cmd) {0};
ops->cmd_packing(packed_buf, &cmd, UNPACK);
- /* UM10944: [valident] will always be found cleared
- * during a read access with MGMTROUTE set.
- * So don't error out in that case.
- */
- if (!cmd.valident && blk_idx != BLK_IDX_MGMT_ROUTE)
+
+ if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
cpu_relax();
} while (cmd.valid && --retries);
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
index 28d4eb5efb8b..a1472f80a059 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -36,5 +36,6 @@ struct sja1105_mgmt_entry {
extern const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
extern const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN];
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index 9133a831ec79..decc6c931dc1 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -3,552 +3,627 @@
*/
#include "sja1105.h"
-#define SJA1105_SIZE_MAC_AREA (0x02 * 4)
-#define SJA1105_SIZE_HL1_AREA (0x10 * 4)
-#define SJA1105_SIZE_HL2_AREA (0x4 * 4)
-#define SJA1105_SIZE_QLEVEL_AREA (0x8 * 4) /* 0x4 to 0xB */
-#define SJA1105_SIZE_ETHER_AREA (0x17 * 4)
-
-struct sja1105_port_status_mac {
- u64 n_runt;
- u64 n_soferr;
- u64 n_alignerr;
- u64 n_miierr;
- u64 typeerr;
- u64 sizeerr;
- u64 tctimeout;
- u64 priorerr;
- u64 nomaster;
- u64 memov;
- u64 memerr;
- u64 invtyp;
- u64 intcyov;
- u64 domerr;
- u64 pcfbagdrop;
- u64 spcprior;
- u64 ageprior;
- u64 portdrop;
- u64 lendrop;
- u64 bagdrop;
- u64 policeerr;
- u64 drpnona664err;
- u64 spcerr;
- u64 agedrp;
-};
-
-struct sja1105_port_status_hl1 {
- u64 n_n664err;
- u64 n_vlanerr;
- u64 n_unreleased;
- u64 n_sizeerr;
- u64 n_crcerr;
- u64 n_vlnotfound;
- u64 n_ctpolerr;
- u64 n_polerr;
- u64 n_rxfrmsh;
- u64 n_rxfrm;
- u64 n_rxbytesh;
- u64 n_rxbyte;
- u64 n_txfrmsh;
- u64 n_txfrm;
- u64 n_txbytesh;
- u64 n_txbyte;
+enum sja1105_counter_index {
+ __SJA1105_COUNTER_UNUSED,
+ /* MAC */
+ N_RUNT,
+ N_SOFERR,
+ N_ALIGNERR,
+ N_MIIERR,
+ TYPEERR,
+ SIZEERR,
+ TCTIMEOUT,
+ PRIORERR,
+ NOMASTER,
+ MEMOV,
+ MEMERR,
+ INVTYP,
+ INTCYOV,
+ DOMERR,
+ PCFBAGDROP,
+ SPCPRIOR,
+ AGEPRIOR,
+ PORTDROP,
+ LENDROP,
+ BAGDROP,
+ POLICEERR,
+ DRPNONA664ERR,
+ SPCERR,
+ AGEDRP,
+ /* HL1 */
+ N_N664ERR,
+ N_VLANERR,
+ N_UNRELEASED,
+ N_SIZEERR,
+ N_CRCERR,
+ N_VLNOTFOUND,
+ N_CTPOLERR,
+ N_POLERR,
+ N_RXFRM,
+ N_RXBYTE,
+ N_TXFRM,
+ N_TXBYTE,
+ /* HL2 */
+ N_QFULL,
+ N_PART_DROP,
+ N_EGR_DISABLED,
+ N_NOT_REACH,
+ __MAX_SJA1105ET_PORT_COUNTER,
+ /* P/Q/R/S only */
+ /* ETHER */
+ N_DROPS_NOLEARN = __MAX_SJA1105ET_PORT_COUNTER,
+ N_DROPS_NOROUTE,
+ N_DROPS_ILL_DTAG,
+ N_DROPS_DTAG,
+ N_DROPS_SOTAG,
+ N_DROPS_SITAG,
+ N_DROPS_UTAG,
+ N_TX_BYTES_1024_2047,
+ N_TX_BYTES_512_1023,
+ N_TX_BYTES_256_511,
+ N_TX_BYTES_128_255,
+ N_TX_BYTES_65_127,
+ N_TX_BYTES_64,
+ N_TX_MCAST,
+ N_TX_BCAST,
+ N_RX_BYTES_1024_2047,
+ N_RX_BYTES_512_1023,
+ N_RX_BYTES_256_511,
+ N_RX_BYTES_128_255,
+ N_RX_BYTES_65_127,
+ N_RX_BYTES_64,
+ N_RX_MCAST,
+ N_RX_BCAST,
+ __MAX_SJA1105PQRS_PORT_COUNTER,
};
-struct sja1105_port_status_hl2 {
- u64 n_qfull;
- u64 n_part_drop;
- u64 n_egr_disabled;
- u64 n_not_reach;
- u64 qlevel_hwm[8]; /* Only for P/Q/R/S */
- u64 qlevel[8]; /* Only for P/Q/R/S */
+struct sja1105_port_counter {
+ enum sja1105_stats_area area;
+ const char name[ETH_GSTRING_LEN];
+ int offset;
+ int start;
+ int end;
+ bool is_64bit;
};
-struct sja1105_port_status_ether {
- u64 n_drops_nolearn;
- u64 n_drops_noroute;
- u64 n_drops_ill_dtag;
- u64 n_drops_dtag;
- u64 n_drops_sotag;
- u64 n_drops_sitag;
- u64 n_drops_utag;
- u64 n_tx_bytes_1024_2047;
- u64 n_tx_bytes_512_1023;
- u64 n_tx_bytes_256_511;
- u64 n_tx_bytes_128_255;
- u64 n_tx_bytes_65_127;
- u64 n_tx_bytes_64;
- u64 n_tx_mcast;
- u64 n_tx_bcast;
- u64 n_rx_bytes_1024_2047;
- u64 n_rx_bytes_512_1023;
- u64 n_rx_bytes_256_511;
- u64 n_rx_bytes_128_255;
- u64 n_rx_bytes_65_127;
- u64 n_rx_bytes_64;
- u64 n_rx_mcast;
- u64 n_rx_bcast;
-};
-
-struct sja1105_port_status {
- struct sja1105_port_status_mac mac;
- struct sja1105_port_status_hl1 hl1;
- struct sja1105_port_status_hl2 hl2;
- struct sja1105_port_status_ether ether;
+static const struct sja1105_port_counter sja1105_port_counters[] = {
+ /* MAC-Level Diagnostic Counters */
+ [N_RUNT] = {
+ .area = MAC,
+ .name = "n_runt",
+ .offset = 0,
+ .start = 31,
+ .end = 24,
+ },
+ [N_SOFERR] = {
+ .area = MAC,
+ .name = "n_soferr",
+ .offset = 0x0,
+ .start = 23,
+ .end = 16,
+ },
+ [N_ALIGNERR] = {
+ .area = MAC,
+ .name = "n_alignerr",
+ .offset = 0x0,
+ .start = 15,
+ .end = 8,
+ },
+ [N_MIIERR] = {
+ .area = MAC,
+ .name = "n_miierr",
+ .offset = 0x0,
+ .start = 7,
+ .end = 0,
+ },
+ /* MAC-Level Diagnostic Flags */
+ [TYPEERR] = {
+ .area = MAC,
+ .name = "typeerr",
+ .offset = 0x1,
+ .start = 27,
+ .end = 27,
+ },
+ [SIZEERR] = {
+ .area = MAC,
+ .name = "sizeerr",
+ .offset = 0x1,
+ .start = 26,
+ .end = 26,
+ },
+ [TCTIMEOUT] = {
+ .area = MAC,
+ .name = "tctimeout",
+ .offset = 0x1,
+ .start = 25,
+ .end = 25,
+ },
+ [PRIORERR] = {
+ .area = MAC,
+ .name = "priorerr",
+ .offset = 0x1,
+ .start = 24,
+ .end = 24,
+ },
+ [NOMASTER] = {
+ .area = MAC,
+ .name = "nomaster",
+ .offset = 0x1,
+ .start = 23,
+ .end = 23,
+ },
+ [MEMOV] = {
+ .area = MAC,
+ .name = "memov",
+ .offset = 0x1,
+ .start = 22,
+ .end = 22,
+ },
+ [MEMERR] = {
+ .area = MAC,
+ .name = "memerr",
+ .offset = 0x1,
+ .start = 21,
+ .end = 21,
+ },
+ [INVTYP] = {
+ .area = MAC,
+ .name = "invtyp",
+ .offset = 0x1,
+ .start = 19,
+ .end = 19,
+ },
+ [INTCYOV] = {
+ .area = MAC,
+ .name = "intcyov",
+ .offset = 0x1,
+ .start = 18,
+ .end = 18,
+ },
+ [DOMERR] = {
+ .area = MAC,
+ .name = "domerr",
+ .offset = 0x1,
+ .start = 17,
+ .end = 17,
+ },
+ [PCFBAGDROP] = {
+ .area = MAC,
+ .name = "pcfbagdrop",
+ .offset = 0x1,
+ .start = 16,
+ .end = 16,
+ },
+ [SPCPRIOR] = {
+ .area = MAC,
+ .name = "spcprior",
+ .offset = 0x1,
+ .start = 15,
+ .end = 12,
+ },
+ [AGEPRIOR] = {
+ .area = MAC,
+ .name = "ageprior",
+ .offset = 0x1,
+ .start = 11,
+ .end = 8,
+ },
+ [PORTDROP] = {
+ .area = MAC,
+ .name = "portdrop",
+ .offset = 0x1,
+ .start = 6,
+ .end = 6,
+ },
+ [LENDROP] = {
+ .area = MAC,
+ .name = "lendrop",
+ .offset = 0x1,
+ .start = 5,
+ .end = 5,
+ },
+ [BAGDROP] = {
+ .area = MAC,
+ .name = "bagdrop",
+ .offset = 0x1,
+ .start = 4,
+ .end = 4,
+ },
+ [POLICEERR] = {
+ .area = MAC,
+ .name = "policeerr",
+ .offset = 0x1,
+ .start = 3,
+ .end = 3,
+ },
+ [DRPNONA664ERR] = {
+ .area = MAC,
+ .name = "drpnona664err",
+ .offset = 0x1,
+ .start = 2,
+ .end = 2,
+ },
+ [SPCERR] = {
+ .area = MAC,
+ .name = "spcerr",
+ .offset = 0x1,
+ .start = 1,
+ .end = 1,
+ },
+ [AGEDRP] = {
+ .area = MAC,
+ .name = "agedrp",
+ .offset = 0x1,
+ .start = 0,
+ .end = 0,
+ },
+ /* High-Level Diagnostic Counters */
+ [N_N664ERR] = {
+ .area = HL1,
+ .name = "n_n664err",
+ .offset = 0xF,
+ .start = 31,
+ .end = 0,
+ },
+ [N_VLANERR] = {
+ .area = HL1,
+ .name = "n_vlanerr",
+ .offset = 0xE,
+ .start = 31,
+ .end = 0,
+ },
+ [N_UNRELEASED] = {
+ .area = HL1,
+ .name = "n_unreleased",
+ .offset = 0xD,
+ .start = 31,
+ .end = 0,
+ },
+ [N_SIZEERR] = {
+ .area = HL1,
+ .name = "n_sizeerr",
+ .offset = 0xC,
+ .start = 31,
+ .end = 0,
+ },
+ [N_CRCERR] = {
+ .area = HL1,
+ .name = "n_crcerr",
+ .offset = 0xB,
+ .start = 31,
+ .end = 0,
+ },
+ [N_VLNOTFOUND] = {
+ .area = HL1,
+ .name = "n_vlnotfound",
+ .offset = 0xA,
+ .start = 31,
+ .end = 0,
+ },
+ [N_CTPOLERR] = {
+ .area = HL1,
+ .name = "n_ctpolerr",
+ .offset = 0x9,
+ .start = 31,
+ .end = 0,
+ },
+ [N_POLERR] = {
+ .area = HL1,
+ .name = "n_polerr",
+ .offset = 0x8,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RXFRM] = {
+ .area = HL1,
+ .name = "n_rxfrm",
+ .offset = 0x6,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_RXBYTE] = {
+ .area = HL1,
+ .name = "n_rxbyte",
+ .offset = 0x4,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_TXFRM] = {
+ .area = HL1,
+ .name = "n_txfrm",
+ .offset = 0x2,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_TXBYTE] = {
+ .area = HL1,
+ .name = "n_txbyte",
+ .offset = 0x0,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_QFULL] = {
+ .area = HL2,
+ .name = "n_qfull",
+ .offset = 0x3,
+ .start = 31,
+ .end = 0,
+ },
+ [N_PART_DROP] = {
+ .area = HL2,
+ .name = "n_part_drop",
+ .offset = 0x2,
+ .start = 31,
+ .end = 0,
+ },
+ [N_EGR_DISABLED] = {
+ .area = HL2,
+ .name = "n_egr_disabled",
+ .offset = 0x1,
+ .start = 31,
+ .end = 0,
+ },
+ [N_NOT_REACH] = {
+ .area = HL2,
+ .name = "n_not_reach",
+ .offset = 0x0,
+ .start = 31,
+ .end = 0,
+ },
+ /* Ether Stats */
+ [N_DROPS_NOLEARN] = {
+ .area = ETHER,
+ .name = "n_drops_nolearn",
+ .offset = 0x16,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_NOROUTE] = {
+ .area = ETHER,
+ .name = "n_drops_noroute",
+ .offset = 0x15,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_ILL_DTAG] = {
+ .area = ETHER,
+ .name = "n_drops_ill_dtag",
+ .offset = 0x14,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_DTAG] = {
+ .area = ETHER,
+ .name = "n_drops_dtag",
+ .offset = 0x13,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_SOTAG] = {
+ .area = ETHER,
+ .name = "n_drops_sotag",
+ .offset = 0x12,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_SITAG] = {
+ .area = ETHER,
+ .name = "n_drops_sitag",
+ .offset = 0x11,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_UTAG] = {
+ .area = ETHER,
+ .name = "n_drops_utag",
+ .offset = 0x10,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_1024_2047] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_1024_2047",
+ .offset = 0x0F,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_512_1023] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_512_1023",
+ .offset = 0x0E,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_256_511] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_256_511",
+ .offset = 0x0D,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_128_255] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_128_255",
+ .offset = 0x0C,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_65_127] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_65_127",
+ .offset = 0x0B,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_64] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_64",
+ .offset = 0x0A,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_MCAST] = {
+ .area = ETHER,
+ .name = "n_tx_mcast",
+ .offset = 0x09,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BCAST] = {
+ .area = ETHER,
+ .name = "n_tx_bcast",
+ .offset = 0x08,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_1024_2047] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_1024_2047",
+ .offset = 0x07,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_512_1023] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_512_1023",
+ .offset = 0x06,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_256_511] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_256_511",
+ .offset = 0x05,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_128_255] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_128_255",
+ .offset = 0x04,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_65_127] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_65_127",
+ .offset = 0x03,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_64] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_64",
+ .offset = 0x02,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_MCAST] = {
+ .area = ETHER,
+ .name = "n_rx_mcast",
+ .offset = 0x01,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BCAST] = {
+ .area = ETHER,
+ .name = "n_rx_bcast",
+ .offset = 0x00,
+ .start = 31,
+ .end = 0,
+ },
};
-static void
-sja1105_port_status_mac_unpack(void *buf,
- struct sja1105_port_status_mac *status)
-{
- /* Make pointer arithmetic work on 4 bytes */
- u32 *p = buf;
-
- sja1105_unpack(p + 0x0, &status->n_runt, 31, 24, 4);
- sja1105_unpack(p + 0x0, &status->n_soferr, 23, 16, 4);
- sja1105_unpack(p + 0x0, &status->n_alignerr, 15, 8, 4);
- sja1105_unpack(p + 0x0, &status->n_miierr, 7, 0, 4);
- sja1105_unpack(p + 0x1, &status->typeerr, 27, 27, 4);
- sja1105_unpack(p + 0x1, &status->sizeerr, 26, 26, 4);
- sja1105_unpack(p + 0x1, &status->tctimeout, 25, 25, 4);
- sja1105_unpack(p + 0x1, &status->priorerr, 24, 24, 4);
- sja1105_unpack(p + 0x1, &status->nomaster, 23, 23, 4);
- sja1105_unpack(p + 0x1, &status->memov, 22, 22, 4);
- sja1105_unpack(p + 0x1, &status->memerr, 21, 21, 4);
- sja1105_unpack(p + 0x1, &status->invtyp, 19, 19, 4);
- sja1105_unpack(p + 0x1, &status->intcyov, 18, 18, 4);
- sja1105_unpack(p + 0x1, &status->domerr, 17, 17, 4);
- sja1105_unpack(p + 0x1, &status->pcfbagdrop, 16, 16, 4);
- sja1105_unpack(p + 0x1, &status->spcprior, 15, 12, 4);
- sja1105_unpack(p + 0x1, &status->ageprior, 11, 8, 4);
- sja1105_unpack(p + 0x1, &status->portdrop, 6, 6, 4);
- sja1105_unpack(p + 0x1, &status->lendrop, 5, 5, 4);
- sja1105_unpack(p + 0x1, &status->bagdrop, 4, 4, 4);
- sja1105_unpack(p + 0x1, &status->policeerr, 3, 3, 4);
- sja1105_unpack(p + 0x1, &status->drpnona664err, 2, 2, 4);
- sja1105_unpack(p + 0x1, &status->spcerr, 1, 1, 4);
- sja1105_unpack(p + 0x1, &status->agedrp, 0, 0, 4);
-}
-
-static void
-sja1105_port_status_hl1_unpack(void *buf,
- struct sja1105_port_status_hl1 *status)
-{
- /* Make pointer arithmetic work on 4 bytes */
- u32 *p = buf;
-
- sja1105_unpack(p + 0xF, &status->n_n664err, 31, 0, 4);
- sja1105_unpack(p + 0xE, &status->n_vlanerr, 31, 0, 4);
- sja1105_unpack(p + 0xD, &status->n_unreleased, 31, 0, 4);
- sja1105_unpack(p + 0xC, &status->n_sizeerr, 31, 0, 4);
- sja1105_unpack(p + 0xB, &status->n_crcerr, 31, 0, 4);
- sja1105_unpack(p + 0xA, &status->n_vlnotfound, 31, 0, 4);
- sja1105_unpack(p + 0x9, &status->n_ctpolerr, 31, 0, 4);
- sja1105_unpack(p + 0x8, &status->n_polerr, 31, 0, 4);
- sja1105_unpack(p + 0x7, &status->n_rxfrmsh, 31, 0, 4);
- sja1105_unpack(p + 0x6, &status->n_rxfrm, 31, 0, 4);
- sja1105_unpack(p + 0x5, &status->n_rxbytesh, 31, 0, 4);
- sja1105_unpack(p + 0x4, &status->n_rxbyte, 31, 0, 4);
- sja1105_unpack(p + 0x3, &status->n_txfrmsh, 31, 0, 4);
- sja1105_unpack(p + 0x2, &status->n_txfrm, 31, 0, 4);
- sja1105_unpack(p + 0x1, &status->n_txbytesh, 31, 0, 4);
- sja1105_unpack(p + 0x0, &status->n_txbyte, 31, 0, 4);
- status->n_rxfrm += status->n_rxfrmsh << 32;
- status->n_rxbyte += status->n_rxbytesh << 32;
- status->n_txfrm += status->n_txfrmsh << 32;
- status->n_txbyte += status->n_txbytesh << 32;
-}
-
-static void
-sja1105_port_status_hl2_unpack(void *buf,
- struct sja1105_port_status_hl2 *status)
-{
- /* Make pointer arithmetic work on 4 bytes */
- u32 *p = buf;
-
- sja1105_unpack(p + 0x3, &status->n_qfull, 31, 0, 4);
- sja1105_unpack(p + 0x2, &status->n_part_drop, 31, 0, 4);
- sja1105_unpack(p + 0x1, &status->n_egr_disabled, 31, 0, 4);
- sja1105_unpack(p + 0x0, &status->n_not_reach, 31, 0, 4);
-}
-
-static void
-sja1105pqrs_port_status_qlevel_unpack(void *buf,
- struct sja1105_port_status_hl2 *status)
-{
- /* Make pointer arithmetic work on 4 bytes */
- u32 *p = buf;
- int i;
-
- for (i = 0; i < 8; i++) {
- sja1105_unpack(p + i, &status->qlevel_hwm[i], 24, 16, 4);
- sja1105_unpack(p + i, &status->qlevel[i], 8, 0, 4);
- }
-}
-
-static void
-sja1105pqrs_port_status_ether_unpack(void *buf,
- struct sja1105_port_status_ether *status)
-{
- /* Make pointer arithmetic work on 4 bytes */
- u32 *p = buf;
-
- sja1105_unpack(p + 0x16, &status->n_drops_nolearn, 31, 0, 4);
- sja1105_unpack(p + 0x15, &status->n_drops_noroute, 31, 0, 4);
- sja1105_unpack(p + 0x14, &status->n_drops_ill_dtag, 31, 0, 4);
- sja1105_unpack(p + 0x13, &status->n_drops_dtag, 31, 0, 4);
- sja1105_unpack(p + 0x12, &status->n_drops_sotag, 31, 0, 4);
- sja1105_unpack(p + 0x11, &status->n_drops_sitag, 31, 0, 4);
- sja1105_unpack(p + 0x10, &status->n_drops_utag, 31, 0, 4);
- sja1105_unpack(p + 0x0F, &status->n_tx_bytes_1024_2047, 31, 0, 4);
- sja1105_unpack(p + 0x0E, &status->n_tx_bytes_512_1023, 31, 0, 4);
- sja1105_unpack(p + 0x0D, &status->n_tx_bytes_256_511, 31, 0, 4);
- sja1105_unpack(p + 0x0C, &status->n_tx_bytes_128_255, 31, 0, 4);
- sja1105_unpack(p + 0x0B, &status->n_tx_bytes_65_127, 31, 0, 4);
- sja1105_unpack(p + 0x0A, &status->n_tx_bytes_64, 31, 0, 4);
- sja1105_unpack(p + 0x09, &status->n_tx_mcast, 31, 0, 4);
- sja1105_unpack(p + 0x08, &status->n_tx_bcast, 31, 0, 4);
- sja1105_unpack(p + 0x07, &status->n_rx_bytes_1024_2047, 31, 0, 4);
- sja1105_unpack(p + 0x06, &status->n_rx_bytes_512_1023, 31, 0, 4);
- sja1105_unpack(p + 0x05, &status->n_rx_bytes_256_511, 31, 0, 4);
- sja1105_unpack(p + 0x04, &status->n_rx_bytes_128_255, 31, 0, 4);
- sja1105_unpack(p + 0x03, &status->n_rx_bytes_65_127, 31, 0, 4);
- sja1105_unpack(p + 0x02, &status->n_rx_bytes_64, 31, 0, 4);
- sja1105_unpack(p + 0x01, &status->n_rx_mcast, 31, 0, 4);
- sja1105_unpack(p + 0x00, &status->n_rx_bcast, 31, 0, 4);
-}
-
-static int
-sja1105pqrs_port_status_get_ether(struct sja1105_private *priv,
- struct sja1105_port_status_ether *ether,
- int port)
-{
- const struct sja1105_regs *regs = priv->info->regs;
- u8 packed_buf[SJA1105_SIZE_ETHER_AREA] = {0};
- int rc;
-
- /* Ethernet statistics area */
- rc = sja1105_xfer_buf(priv, SPI_READ, regs->ether_stats[port],
- packed_buf, SJA1105_SIZE_ETHER_AREA);
- if (rc < 0)
- return rc;
-
- sja1105pqrs_port_status_ether_unpack(packed_buf, ether);
-
- return 0;
-}
-
-static int sja1105_port_status_get_mac(struct sja1105_private *priv,
- struct sja1105_port_status_mac *status,
- int port)
-{
- const struct sja1105_regs *regs = priv->info->regs;
- u8 packed_buf[SJA1105_SIZE_MAC_AREA] = {0};
- int rc;
-
- /* MAC area */
- rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac[port], packed_buf,
- SJA1105_SIZE_MAC_AREA);
- if (rc < 0)
- return rc;
-
- sja1105_port_status_mac_unpack(packed_buf, status);
-
- return 0;
-}
-
-static int sja1105_port_status_get_hl1(struct sja1105_private *priv,
- struct sja1105_port_status_hl1 *status,
- int port)
-{
- const struct sja1105_regs *regs = priv->info->regs;
- u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0};
- int rc;
-
- rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl1[port], packed_buf,
- SJA1105_SIZE_HL1_AREA);
- if (rc < 0)
- return rc;
-
- sja1105_port_status_hl1_unpack(packed_buf, status);
-
- return 0;
-}
-
-static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
- struct sja1105_port_status_hl2 *status,
- int port)
+static int sja1105_port_counter_read(struct sja1105_private *priv, int port,
+ enum sja1105_counter_index idx, u64 *ctr)
{
- const struct sja1105_regs *regs = priv->info->regs;
- u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0};
+ const struct sja1105_port_counter *c = &sja1105_port_counters[idx];
+ size_t size = c->is_64bit ? 8 : 4;
+ u8 buf[8] = {0};
+ u64 regs;
int rc;
- rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl2[port], packed_buf,
- SJA1105_SIZE_HL2_AREA);
- if (rc < 0)
- return rc;
-
- sja1105_port_status_hl2_unpack(packed_buf, status);
-
- /* Code below is strictly P/Q/R/S specific. */
- if (priv->info->device_id == SJA1105E_DEVICE_ID ||
- priv->info->device_id == SJA1105T_DEVICE_ID)
- return 0;
+ regs = priv->info->regs->stats[c->area][port];
- rc = sja1105_xfer_buf(priv, SPI_READ, regs->qlevel[port], packed_buf,
- SJA1105_SIZE_QLEVEL_AREA);
- if (rc < 0)
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs + c->offset, buf, size);
+ if (rc)
return rc;
- sja1105pqrs_port_status_qlevel_unpack(packed_buf, status);
+ sja1105_unpack(buf, ctr, c->start, c->end, size);
return 0;
}
-static int sja1105_port_status_get(struct sja1105_private *priv,
- struct sja1105_port_status *status,
- int port)
-{
- int rc;
-
- rc = sja1105_port_status_get_mac(priv, &status->mac, port);
- if (rc < 0)
- return rc;
- rc = sja1105_port_status_get_hl1(priv, &status->hl1, port);
- if (rc < 0)
- return rc;
- rc = sja1105_port_status_get_hl2(priv, &status->hl2, port);
- if (rc < 0)
- return rc;
-
- if (priv->info->device_id == SJA1105E_DEVICE_ID ||
- priv->info->device_id == SJA1105T_DEVICE_ID)
- return 0;
-
- return sja1105pqrs_port_status_get_ether(priv, &status->ether, port);
-}
-
-static char sja1105_port_stats[][ETH_GSTRING_LEN] = {
- /* MAC-Level Diagnostic Counters */
- "n_runt",
- "n_soferr",
- "n_alignerr",
- "n_miierr",
- /* MAC-Level Diagnostic Flags */
- "typeerr",
- "sizeerr",
- "tctimeout",
- "priorerr",
- "nomaster",
- "memov",
- "memerr",
- "invtyp",
- "intcyov",
- "domerr",
- "pcfbagdrop",
- "spcprior",
- "ageprior",
- "portdrop",
- "lendrop",
- "bagdrop",
- "policeerr",
- "drpnona664err",
- "spcerr",
- "agedrp",
- /* High-Level Diagnostic Counters */
- "n_n664err",
- "n_vlanerr",
- "n_unreleased",
- "n_sizeerr",
- "n_crcerr",
- "n_vlnotfound",
- "n_ctpolerr",
- "n_polerr",
- "n_rxfrm",
- "n_rxbyte",
- "n_txfrm",
- "n_txbyte",
- "n_qfull",
- "n_part_drop",
- "n_egr_disabled",
- "n_not_reach",
-};
-
-static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = {
- /* Queue Levels */
- "qlevel_hwm_0",
- "qlevel_hwm_1",
- "qlevel_hwm_2",
- "qlevel_hwm_3",
- "qlevel_hwm_4",
- "qlevel_hwm_5",
- "qlevel_hwm_6",
- "qlevel_hwm_7",
- "qlevel_0",
- "qlevel_1",
- "qlevel_2",
- "qlevel_3",
- "qlevel_4",
- "qlevel_5",
- "qlevel_6",
- "qlevel_7",
- /* Ether Stats */
- "n_drops_nolearn",
- "n_drops_noroute",
- "n_drops_ill_dtag",
- "n_drops_dtag",
- "n_drops_sotag",
- "n_drops_sitag",
- "n_drops_utag",
- "n_tx_bytes_1024_2047",
- "n_tx_bytes_512_1023",
- "n_tx_bytes_256_511",
- "n_tx_bytes_128_255",
- "n_tx_bytes_65_127",
- "n_tx_bytes_64",
- "n_tx_mcast",
- "n_tx_bcast",
- "n_rx_bytes_1024_2047",
- "n_rx_bytes_512_1023",
- "n_rx_bytes_256_511",
- "n_rx_bytes_128_255",
- "n_rx_bytes_65_127",
- "n_rx_bytes_64",
- "n_rx_mcast",
- "n_rx_bcast",
-};
-
void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_port_status *status;
- int rc, i, k = 0;
-
- status = kzalloc(sizeof(*status), GFP_KERNEL);
- if (!status)
- goto out;
-
- rc = sja1105_port_status_get(priv, status, port);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to read port %d counters: %d\n",
- port, rc);
- goto out;
- }
- memset(data, 0, ARRAY_SIZE(sja1105_port_stats) * sizeof(u64));
- data[k++] = status->mac.n_runt;
- data[k++] = status->mac.n_soferr;
- data[k++] = status->mac.n_alignerr;
- data[k++] = status->mac.n_miierr;
- data[k++] = status->mac.typeerr;
- data[k++] = status->mac.sizeerr;
- data[k++] = status->mac.tctimeout;
- data[k++] = status->mac.priorerr;
- data[k++] = status->mac.nomaster;
- data[k++] = status->mac.memov;
- data[k++] = status->mac.memerr;
- data[k++] = status->mac.invtyp;
- data[k++] = status->mac.intcyov;
- data[k++] = status->mac.domerr;
- data[k++] = status->mac.pcfbagdrop;
- data[k++] = status->mac.spcprior;
- data[k++] = status->mac.ageprior;
- data[k++] = status->mac.portdrop;
- data[k++] = status->mac.lendrop;
- data[k++] = status->mac.bagdrop;
- data[k++] = status->mac.policeerr;
- data[k++] = status->mac.drpnona664err;
- data[k++] = status->mac.spcerr;
- data[k++] = status->mac.agedrp;
- data[k++] = status->hl1.n_n664err;
- data[k++] = status->hl1.n_vlanerr;
- data[k++] = status->hl1.n_unreleased;
- data[k++] = status->hl1.n_sizeerr;
- data[k++] = status->hl1.n_crcerr;
- data[k++] = status->hl1.n_vlnotfound;
- data[k++] = status->hl1.n_ctpolerr;
- data[k++] = status->hl1.n_polerr;
- data[k++] = status->hl1.n_rxfrm;
- data[k++] = status->hl1.n_rxbyte;
- data[k++] = status->hl1.n_txfrm;
- data[k++] = status->hl1.n_txbyte;
- data[k++] = status->hl2.n_qfull;
- data[k++] = status->hl2.n_part_drop;
- data[k++] = status->hl2.n_egr_disabled;
- data[k++] = status->hl2.n_not_reach;
+ enum sja1105_counter_index max_ctr, i;
+ int rc, k = 0;
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID)
- goto out;
-
- memset(data + k, 0, ARRAY_SIZE(sja1105pqrs_extra_port_stats) *
- sizeof(u64));
- for (i = 0; i < 8; i++) {
- data[k++] = status->hl2.qlevel_hwm[i];
- data[k++] = status->hl2.qlevel[i];
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
+ if (rc) {
+ dev_err(ds->dev,
+ "Failed to read port %d counters: %d\n",
+ port, rc);
+ break;
+ }
}
- data[k++] = status->ether.n_drops_nolearn;
- data[k++] = status->ether.n_drops_noroute;
- data[k++] = status->ether.n_drops_ill_dtag;
- data[k++] = status->ether.n_drops_dtag;
- data[k++] = status->ether.n_drops_sotag;
- data[k++] = status->ether.n_drops_sitag;
- data[k++] = status->ether.n_drops_utag;
- data[k++] = status->ether.n_tx_bytes_1024_2047;
- data[k++] = status->ether.n_tx_bytes_512_1023;
- data[k++] = status->ether.n_tx_bytes_256_511;
- data[k++] = status->ether.n_tx_bytes_128_255;
- data[k++] = status->ether.n_tx_bytes_65_127;
- data[k++] = status->ether.n_tx_bytes_64;
- data[k++] = status->ether.n_tx_mcast;
- data[k++] = status->ether.n_tx_bcast;
- data[k++] = status->ether.n_rx_bytes_1024_2047;
- data[k++] = status->ether.n_rx_bytes_512_1023;
- data[k++] = status->ether.n_rx_bytes_256_511;
- data[k++] = status->ether.n_rx_bytes_128_255;
- data[k++] = status->ether.n_rx_bytes_65_127;
- data[k++] = status->ether.n_rx_bytes_64;
- data[k++] = status->ether.n_rx_mcast;
- data[k++] = status->ether.n_rx_bcast;
-out:
- kfree(status);
}
void sja1105_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
struct sja1105_private *priv = ds->priv;
- u8 *p = data;
- int i;
+ enum sja1105_counter_index max_ctr, i;
+ char *p = data;
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(sja1105_port_stats); i++) {
- strlcpy(p, sja1105_port_stats[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- if (priv->info->device_id == SJA1105E_DEVICE_ID ||
- priv->info->device_id == SJA1105T_DEVICE_ID)
- return;
- for (i = 0; i < ARRAY_SIZE(sja1105pqrs_extra_port_stats); i++) {
- strlcpy(p, sja1105pqrs_extra_port_stats[i],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- break;
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ strscpy(p, sja1105_port_counters[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
}
}
int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
- int count = ARRAY_SIZE(sja1105_port_stats);
struct sja1105_private *priv = ds->priv;
+ enum sja1105_counter_index max_ctr, i;
+ int sset_count = 0;
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- if (priv->info->device_id == SJA1105PR_DEVICE_ID ||
- priv->info->device_id == SJA1105QS_DEVICE_ID)
- count += ARRAY_SIZE(sja1105pqrs_extra_port_stats);
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
+ sset_count++;
+ }
- return count;
+ return sset_count;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 973761132fc3..6c10ffa968ce 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -35,6 +35,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
struct sja1105_l2_policing_entry *policing;
+ struct dsa_switch *ds = priv->ds;
bool new_rule = false;
unsigned long p;
int rc;
@@ -59,7 +60,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
- if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
+ if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
NL_SET_ERR_MSG_MOD(extack,
"Port already has a broadcast policer");
rc = -EEXIST;
@@ -71,8 +72,8 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
/* Make the broadcast policers of all ports attached to this block
* point to the newly allocated policer
*/
- for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
+ for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
policing[bcast].sharindx = rule->bcast_pol.sharindx;
}
@@ -143,7 +144,7 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv,
/* Make the policers for traffic class @tc of all ports attached to
* this block point to the newly allocated policer
*/
- for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+ for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
int index = (p * SJA1105_NUM_TC) + tc;
policing[index].sharindx = rule->tc_pol.sharindx;
@@ -435,7 +436,7 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
if (rule->type == SJA1105_RULE_BCAST_POLICER) {
- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
old_sharindx = policing[bcast].sharindx;
policing[bcast].sharindx = port;
@@ -486,7 +487,7 @@ void sja1105_flower_setup(struct dsa_switch *ds)
INIT_LIST_HEAD(&priv->flow_block.rules);
- for (port = 0; port < SJA1105_NUM_PORTS; port++)
+ for (port = 0; port < ds->num_ports; port++)
priv->flow_block.l2_policer_used[port] = true;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 405024b637d6..4f0545605f6b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -16,16 +16,17 @@
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/of_device.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/dsa/8021q.h>
#include "sja1105.h"
-#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
static const struct dsa_switch_ops sja1105_switch_ops;
@@ -56,14 +57,6 @@ static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
return !!(l2_fwd[from].reach_port & BIT(to));
}
-/* Structure used to temporarily transport device tree
- * settings into sja1105_setup
- */
-struct sja1105_dt_port {
- phy_interface_t phy_mode;
- sja1105_mii_role_t role;
-};
-
static int sja1105_init_mac_settings(struct sja1105_private *priv)
{
struct sja1105_mac_config_entry default_mac = {
@@ -79,7 +72,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
/* Always put the MAC speed in automatic mode, where it can be
* adjusted at runtime by PHYLINK.
*/
- .speed = SJA1105_SPEED_AUTO,
+ .speed = priv->info->port_speed[SJA1105_SPEED_AUTO],
/* No static correction for 1-step 1588 events */
.tp_delin = 0,
.tp_delout = 0,
@@ -106,6 +99,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
.ingress = false,
};
struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int i;
@@ -117,16 +111,16 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_NUM_PORTS,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_NUM_PORTS;
+ table->entry_count = table->ops->max_entry_count;
mac = table->entries;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
mac[i] = default_mac;
if (i == dsa_upstream_port(priv->ds, i)) {
/* STP doesn't get called for CPU port, so we need to
@@ -141,26 +135,11 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
return 0;
}
-static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
-{
- if (priv->info->part_no != SJA1105R_PART_NO &&
- priv->info->part_no != SJA1105S_PART_NO)
- return false;
-
- if (port != SJA1105_SGMII_PORT)
- return false;
-
- if (dsa_is_unused_port(priv->ds, port))
- return false;
-
- return true;
-}
-
-static int sja1105_init_mii_settings(struct sja1105_private *priv,
- struct sja1105_dt_port *ports)
+static int sja1105_init_mii_settings(struct sja1105_private *priv)
{
struct device *dev = &priv->spidev->dev;
struct sja1105_xmii_params_entry *mii;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int i;
@@ -172,51 +151,81 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
/* Override table based on PHYLINK DT bindings */
- table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
mii = table->entries;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
+ sja1105_mii_role_t role = XMII_MAC;
+
if (dsa_is_unused_port(priv->ds, i))
continue;
- switch (ports[i].phy_mode) {
+ switch (priv->phy_mode[i]) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ if (priv->info->internal_phy[i] == SJA1105_NO_PHY)
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX)
+ mii->special[i] = true;
+
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ role = XMII_PHY;
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
+ if (!priv->info->supports_mii[i])
+ goto unsupported;
+
mii->xmii_mode[i] = XMII_MODE_MII;
break;
+ case PHY_INTERFACE_MODE_REVRMII:
+ role = XMII_PHY;
+ fallthrough;
case PHY_INTERFACE_MODE_RMII:
+ if (!priv->info->supports_rmii[i])
+ goto unsupported;
+
mii->xmii_mode[i] = XMII_MODE_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (!priv->info->supports_rgmii[i])
+ goto unsupported;
+
mii->xmii_mode[i] = XMII_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_SGMII:
- if (!sja1105_supports_sgmii(priv, i))
- return -EINVAL;
+ if (!priv->info->supports_sgmii[i])
+ goto unsupported;
+
mii->xmii_mode[i] = XMII_MODE_SGMII;
+ mii->special[i] = true;
break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (!priv->info->supports_2500basex[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ mii->special[i] = true;
+ break;
+unsupported:
default:
- dev_err(dev, "Unsupported PHY mode %s!\n",
- phy_modes(ports[i].phy_mode));
+ dev_err(dev, "Unsupported PHY mode %s on port %d!\n",
+ phy_modes(priv->phy_mode[i]), i);
+ return -EINVAL;
}
- /* Even though the SerDes port is able to drive SGMII autoneg
- * like a PHY would, from the perspective of the XMII tables,
- * the SGMII port should always be put in MAC mode.
- */
- if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
- mii->phy_mac[i] = XMII_MAC;
- else
- mii->phy_mac[i] = ports[i].role;
+ mii->phy_mac[i] = role;
}
return 0;
}
@@ -265,8 +274,6 @@ static int sja1105_init_static_fdb(struct sja1105_private *priv)
static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
{
- struct sja1105_table *table;
- u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
/* Learned FDB entries are forgotten after 300 seconds */
.maxage = SJA1105_AGEING_TIME_MS(300000),
@@ -274,8 +281,6 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
/* And the P/Q/R/S equivalent setting: */
.start_dynspc = 0,
- .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
- max_fdb_entries, max_fdb_entries, },
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
/* This selects between Independent VLAN Learning (IVL) and
@@ -299,6 +304,23 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.owr_dyn = true,
.drpnolearn = true,
};
+ struct dsa_switch *ds = priv->ds;
+ int port, num_used_ports = 0;
+ struct sja1105_table *table;
+ u64 max_fdb_entries;
+
+ for (port = 0; port < ds->num_ports; port++)
+ if (!dsa_is_unused_port(ds, port))
+ num_used_ports++;
+
+ max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ default_l2_lookup_params.maxaddrp[port] = max_fdb_entries;
+ }
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
@@ -307,12 +329,12 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
/* This table only has a single entry */
((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
@@ -321,26 +343,30 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
return 0;
}
+/* Set up a default VLAN for untagged traffic injected from the CPU
+ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
+ * All DT-defined ports are members of this VLAN, and there are no
+ * restrictions on forwarding (since the CPU selects the destination).
+ * Frames from this VLAN will always be transmitted as untagged, and
+ * neither the bridge nor the 8021q module cannot create this VLAN ID.
+ */
static int sja1105_init_static_vlan(struct sja1105_private *priv)
{
struct sja1105_table *table;
struct sja1105_vlan_lookup_entry pvid = {
+ .type_entry = SJA1110_VLAN_D_TAG,
.ving_mirr = 0,
.vegr_mirr = 0,
.vmemb_port = 0,
.vlan_bc = 0,
.tag_port = 0,
- .vlanid = 1,
+ .vlanid = SJA1105_DEFAULT_VLAN,
};
struct dsa_switch *ds = priv->ds;
int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- /* The static VLAN table will only contain the initial pvid of 1.
- * All other VLANs are to be configured through dynamic entries,
- * and kept in the static configuration table as backing memory.
- */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
@@ -353,9 +379,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
- /* VLAN 1: all DT-defined ports are members; no restrictions on
- * forwarding; always transmit as untagged.
- */
for (port = 0; port < ds->num_ports; port++) {
struct sja1105_bridge_vlan *v;
@@ -366,15 +389,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port);
- /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
- * transmitted as untagged.
- */
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v)
return -ENOMEM;
v->port = port;
- v->vid = 1;
+ v->vid = SJA1105_DEFAULT_VLAN;
v->untagged = true;
if (dsa_is_cpu_port(ds, port))
v->pvid = true;
@@ -388,6 +408,7 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_entry *l2fwd;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int i, j;
@@ -398,19 +419,22 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
+ table->entry_count = table->ops->max_entry_count;
l2fwd = table->entries;
/* First 5 entries define the forwarding rules */
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
unsigned int upstream = dsa_upstream_port(priv->ds, i);
+ if (dsa_is_unused_port(ds, i))
+ continue;
+
for (j = 0; j < SJA1105_NUM_TC; j++)
l2fwd[i].vlan_pmap[j] = j;
@@ -432,24 +456,66 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
l2fwd[upstream].bc_domain |= BIT(i);
l2fwd[upstream].fl_domain |= BIT(i);
}
+
/* Next 8 entries define VLAN PCP mapping from ingress to egress.
* Create a one-to-one mapping.
*/
- for (i = 0; i < SJA1105_NUM_TC; i++)
- for (j = 0; j < SJA1105_NUM_PORTS; j++)
- l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
+ for (i = 0; i < SJA1105_NUM_TC; i++) {
+ for (j = 0; j < ds->num_ports; j++) {
+ if (dsa_is_unused_port(ds, j))
+ continue;
+
+ l2fwd[ds->num_ports + i].vlan_pmap[j] = i;
+ }
+
+ l2fwd[ds->num_ports + i].type_egrpcp2outputq = true;
+ }
+
+ return 0;
+}
+
+static int sja1110_init_pcp_remapping(struct sja1105_private *priv)
+{
+ struct sja1110_pcp_remapping_entry *pcp_remap;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int port, tc;
+
+ table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING];
+
+ /* Nothing to do for SJA1105 */
+ if (!table->ops->max_entry_count)
+ return 0;
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ pcp_remap = table->entries;
+
+ /* Repeat the configuration done for vlan_pmap */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ pcp_remap[port].egrpcp[tc] = tc;
+ }
return 0;
}
static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
{
- struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
- /* Disallow dynamic reconfiguration of vlan_pmap */
- .max_dynp = 0,
- /* Use a single memory partition for all ingress queues */
- .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
- };
+ struct sja1105_l2_forwarding_params_entry *l2fwd_params;
struct sja1105_table *table;
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
@@ -459,16 +525,20 @@ static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
/* This table only has a single entry */
- ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
- default_l2fwd_params;
+ l2fwd_params = table->entries;
+
+ /* Disallow dynamic reconfiguration of vlan_pmap */
+ l2fwd_params->max_dynp = 0;
+ /* Use a single memory partition for all ingress queues */
+ l2fwd_params->part_spc[0] = priv->info->max_frame_mem;
return 0;
}
@@ -477,16 +547,14 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ int max_mem = priv->info->max_frame_mem;
struct sja1105_table *table;
- int max_mem;
/* VLAN retagging is implemented using a loopback port that consumes
* frame buffers. That leaves less for us.
*/
if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
- max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
- else
- max_mem = SJA1105_MAX_FRAME_MEMORY;
+ max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
l2_fwd_params = table->entries;
@@ -508,6 +576,60 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
}
+/* SJA1110 TDMACONFIGIDX values:
+ *
+ * | 100 Mbps ports | 1Gbps ports | 2.5Gbps ports | Disabled ports
+ * -----+----------------+---------------+---------------+---------------
+ * 0 | 0, [5:10] | [1:2] | [3:4] | retag
+ * 1 |0, [5:10], retag| [1:2] | [3:4] | -
+ * 2 | 0, [5:10] | [1:3], retag | 4 | -
+ * 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
+ * 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
+ * 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
+ * 14 | 0, [5:10] | [1:4], retag | - | -
+ * 15 | [5:10] | [0:4], retag | - | -
+ */
+static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ bool port_1_is_base_tx;
+ bool port_3_is_2500;
+ bool port_4_is_2500;
+ u64 tdmaconfigidx;
+
+ if (priv->info->device_id != SJA1110_DEVICE_ID)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+
+ /* All the settings below are "as opposed to SGMII", which is the
+ * other pinmuxing option.
+ */
+ port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL;
+ port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX;
+ port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX;
+
+ if (port_1_is_base_tx)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 5;
+ else if (port_3_is_2500 && port_4_is_2500)
+ /* Retagging port will operate at 100 Mbps */
+ tdmaconfigidx = 1;
+ else if (port_3_is_2500)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 3;
+ else if (port_4_is_2500)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 2;
+ else
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 14;
+
+ general_params->tdmaconfigidx = tdmaconfigidx;
+}
+
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
@@ -531,17 +653,9 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
* receieved on host_port itself would be dropped, except
* by installing a temporary 'management route'
*/
- .host_port = dsa_upstream_port(priv->ds, 0),
+ .host_port = priv->ds->num_ports,
/* Default to an invalid value */
- .mirr_port = SJA1105_NUM_PORTS,
- /* Link-local traffic received on casc_port will be forwarded
- * to host_port without embedding the source port and device ID
- * info in the destination MAC address (presumably because it
- * is a cascaded port and a downstream SJA switch already did
- * that). Default to an invalid port (to disable the feature)
- * and overwrite this if we find any DSA (cascaded) ports.
- */
- .casc_port = SJA1105_NUM_PORTS,
+ .mirr_port = priv->ds->num_ports,
/* No TTEthernet */
.vllupformat = SJA1105_VL_FORMAT_PSFP,
.vlmarker = 0,
@@ -553,8 +667,22 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
*/
.tpid = ETH_P_SJA1105,
.tpid2 = ETH_P_SJA1105,
+ /* Enable the TTEthernet engine on SJA1110 */
+ .tte_en = true,
+ /* Set up the EtherType for control packets on SJA1110 */
+ .header_type = ETH_P_SJA1110,
};
+ struct sja1105_general_params_entry *general_params;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
+ int port;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_cpu_port(ds, port)) {
+ default_general_params.host_port = port;
+ break;
+ }
+ }
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
@@ -563,16 +691,32 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
+
+ general_params = table->entries;
/* This table only has a single entry */
- ((struct sja1105_general_params_entry *)table->entries)[0] =
- default_general_params;
+ general_params[0] = default_general_params;
+
+ sja1110_select_tdmaconfigidx(priv);
+
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address, and no RX timestamps will be
+ * taken either (presumably because it is a cascaded port and a
+ * downstream SJA switch already did that).
+ * To disable the feature, we need to do different things depending on
+ * switch generation. On SJA1105 we need to set an invalid port, while
+ * on SJA1110 which support multiple cascaded ports, this field is a
+ * bitmask so it must be left zero.
+ */
+ if (!priv->info->multiple_cascade_ports)
+ general_params->casc_port = ds->num_ports;
return 0;
}
@@ -590,12 +734,12 @@ static int sja1105_init_avb_params(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
avb = table->entries;
@@ -662,6 +806,7 @@ static int sja1105_init_avb_params(struct sja1105_private *priv)
static int sja1105_init_l2_policing(struct sja1105_private *priv)
{
struct sja1105_l2_policing_entry *policing;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int port, tc;
@@ -673,27 +818,31 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
+ table->entry_count = table->ops->max_entry_count;
policing = table->entries;
/* Setup shared indices for the matchall policers */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
+ for (port = 0; port < ds->num_ports; port++) {
+ int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port;
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
for (tc = 0; tc < SJA1105_NUM_TC; tc++)
policing[port * SJA1105_NUM_TC + tc].sharindx = port;
policing[bcast].sharindx = port;
+ /* Only SJA1110 has multicast policers */
+ if (mcast <= table->ops->max_entry_count)
+ policing[mcast].sharindx = port;
}
/* Setup the matchall policer parameters */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
if (dsa_is_cpu_port(priv->ds, port))
@@ -708,8 +857,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
return 0;
}
-static int sja1105_static_config_load(struct sja1105_private *priv,
- struct sja1105_dt_port *ports)
+static int sja1105_static_config_load(struct sja1105_private *priv)
{
int rc;
@@ -724,7 +872,7 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_mac_settings(priv);
if (rc < 0)
return rc;
- rc = sja1105_init_mii_settings(priv, ports);
+ rc = sja1105_init_mii_settings(priv);
if (rc < 0)
return rc;
rc = sja1105_init_static_fdb(priv);
@@ -751,37 +899,39 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_avb_params(priv);
if (rc < 0)
return rc;
+ rc = sja1110_init_pcp_remapping(priv);
+ if (rc < 0)
+ return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
}
-static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
- const struct sja1105_dt_port *ports)
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
{
- int i;
+ struct dsa_switch *ds = priv->ds;
+ int port;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (ports[i].role == XMII_MAC)
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->fixed_link[port])
continue;
- if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
- ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_rx_delay[i] = true;
+ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
+ priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_rx_delay[port] = true;
- if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
- ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_tx_delay[i] = true;
+ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
+ priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_tx_delay[port] = true;
- if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
- !priv->info->setup_rgmii_delay)
+ if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
+ !priv->info->setup_rgmii_delay)
return -EINVAL;
}
return 0;
}
static int sja1105_parse_ports_node(struct sja1105_private *priv,
- struct sja1105_dt_port *ports,
struct device_node *ports_node)
{
struct device *dev = &priv->spidev->dev;
@@ -810,7 +960,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
of_node_put(child);
return -ENODEV;
}
- ports[index].phy_mode = phy_mode;
phy_node = of_parse_phandle(child, "phy-handle", 0);
if (!phy_node) {
@@ -823,25 +972,18 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
/* phy-handle is missing, but fixed-link isn't.
* So it's a fixed link. Default to PHY role.
*/
- ports[index].role = XMII_PHY;
+ priv->fixed_link[index] = true;
} else {
- /* phy-handle present => put port in MAC role */
- ports[index].role = XMII_MAC;
of_node_put(phy_node);
}
- /* The MAC/PHY role can be overridden with explicit bindings */
- if (of_property_read_bool(child, "sja1105,role-mac"))
- ports[index].role = XMII_MAC;
- else if (of_property_read_bool(child, "sja1105,role-phy"))
- ports[index].role = XMII_PHY;
+ priv->phy_mode[index] = phy_mode;
}
return 0;
}
-static int sja1105_parse_dt(struct sja1105_private *priv,
- struct sja1105_dt_port *ports)
+static int sja1105_parse_dt(struct sja1105_private *priv)
{
struct device *dev = &priv->spidev->dev;
struct device_node *switch_node = dev->of_node;
@@ -849,113 +991,41 @@ static int sja1105_parse_dt(struct sja1105_private *priv,
int rc;
ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
return -ENODEV;
}
- rc = sja1105_parse_ports_node(priv, ports, ports_node);
+ rc = sja1105_parse_ports_node(priv, ports_node);
of_node_put(ports_node);
return rc;
}
-static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
-{
- const struct sja1105_regs *regs = priv->info->regs;
- u32 val;
- int rc;
-
- rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
- NULL);
- if (rc < 0)
- return rc;
-
- return val;
-}
-
-static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
- u16 pcs_val)
-{
- const struct sja1105_regs *regs = priv->info->regs;
- u32 val = pcs_val;
- int rc;
-
- rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
- NULL);
- if (rc < 0)
- return rc;
-
- return val;
-}
-
-static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
- bool an_enabled, bool an_master)
-{
- u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
-
- /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
- * stop the clock during LPI mode, make the MAC reconfigure
- * autonomously after PCS autoneg is done, flush the internal FIFOs.
- */
- sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
- SJA1105_DC1_CLOCK_STOP_EN |
- SJA1105_DC1_MAC_AUTO_SW |
- SJA1105_DC1_INIT);
- /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
- sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
- /* AUTONEG_CONTROL: Use SGMII autoneg */
- if (an_master)
- ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
- sja1105_sgmii_write(priv, SJA1105_AC, ac);
- /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
- * sja1105_sgmii_pcs_force_speed must be called later for the link
- * to become operational.
- */
- if (an_enabled)
- sja1105_sgmii_write(priv, MII_BMCR,
- BMCR_ANENABLE | BMCR_ANRESTART);
-}
-
-static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
- int speed)
+/* Convert link speed from SJA1105 to ethtool encoding */
+static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
+ u64 speed)
{
- int pcs_speed;
-
- switch (speed) {
- case SPEED_1000:
- pcs_speed = BMCR_SPEED1000;
- break;
- case SPEED_100:
- pcs_speed = BMCR_SPEED100;
- break;
- case SPEED_10:
- pcs_speed = BMCR_SPEED10;
- break;
- default:
- dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
- return;
- }
- sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
+ if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
+ return SPEED_10;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
+ return SPEED_100;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
+ return SPEED_1000;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
+ return SPEED_2500;
+ return SPEED_UNKNOWN;
}
-/* Convert link speed from SJA1105 to ethtool encoding */
-static int sja1105_speed[] = {
- [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
- [SJA1105_SPEED_10MBPS] = SPEED_10,
- [SJA1105_SPEED_100MBPS] = SPEED_100,
- [SJA1105_SPEED_1000MBPS] = SPEED_1000,
-};
-
/* Set link speed in the MAC configuration for a specific port. */
static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
int speed_mbps)
{
- struct sja1105_xmii_params_entry *mii;
struct sja1105_mac_config_entry *mac;
struct device *dev = priv->ds->dev;
- sja1105_phy_interface_t phy_mode;
- sja1105_speed_t speed;
+ u64 speed;
int rc;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
@@ -965,7 +1035,6 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* reasonable approximation for both E/T and P/Q/R/S.
*/
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
switch (speed_mbps) {
case SPEED_UNKNOWN:
@@ -976,16 +1045,19 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* ok for power consumption in case AN will never complete -
* otherwise PHYLINK should come back with a new update.
*/
- speed = SJA1105_SPEED_AUTO;
+ speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
break;
case SPEED_10:
- speed = SJA1105_SPEED_10MBPS;
+ speed = priv->info->port_speed[SJA1105_SPEED_10MBPS];
break;
case SPEED_100:
- speed = SJA1105_SPEED_100MBPS;
+ speed = priv->info->port_speed[SJA1105_SPEED_100MBPS];
break;
case SPEED_1000:
- speed = SJA1105_SPEED_1000MBPS;
+ speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
+ break;
+ case SPEED_2500:
+ speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
break;
default:
dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
@@ -999,8 +1071,10 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* Actually for the SGMII port, the MAC is fixed at 1 Gbps and
* we need to configure the PCS only (if even that).
*/
- if (sja1105_supports_sgmii(priv, port))
- mac[port].speed = SJA1105_SPEED_1000MBPS;
+ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
+ mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
+ else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
+ mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
else
mac[port].speed = speed;
@@ -1018,8 +1092,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* the clock setup does interrupt the clock signal for a certain time
* which causes trouble for all PHYs relying on this signal.
*/
- phy_mode = mii->xmii_mode[port];
- if (phy_mode != XMII_MODE_RGMII)
+ if (!phy_interface_mode_is_rgmii(priv->phy_mode[port]))
return 0;
return sja1105_clocking_setup_port(priv, port);
@@ -1035,35 +1108,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
phy_interface_t interface)
{
- struct sja1105_xmii_params_entry *mii;
- sja1105_phy_interface_t phy_mode;
-
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
- phy_mode = mii->xmii_mode[port];
-
- switch (interface) {
- case PHY_INTERFACE_MODE_MII:
- return (phy_mode != XMII_MODE_MII);
- case PHY_INTERFACE_MODE_RMII:
- return (phy_mode != XMII_MODE_RMII);
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- return (phy_mode != XMII_MODE_RGMII);
- case PHY_INTERFACE_MODE_SGMII:
- return (phy_mode != XMII_MODE_SGMII);
- default:
- return true;
- }
+ return priv->phy_mode[port] != interface;
}
static void sja1105_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
- bool is_sgmii = sja1105_supports_sgmii(priv, port);
+ struct dw_xpcs *xpcs;
if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
@@ -1071,14 +1125,10 @@ static void sja1105_mac_config(struct dsa_switch *ds, int port,
return;
}
- if (phylink_autoneg_inband(mode) && !is_sgmii) {
- dev_err(ds->dev, "In-band AN not supported!\n");
- return;
- }
+ xpcs = priv->xpcs[port];
- if (is_sgmii)
- sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
- false);
+ if (xpcs)
+ phylink_set_pcs(dp->pl, &xpcs->pcs);
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
@@ -1099,9 +1149,6 @@ static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
sja1105_adjust_port_config(priv, port, speed);
- if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
- sja1105_sgmii_pcs_force_speed(priv, speed);
-
sja1105_inhibit_tx(priv, BIT(port), false);
}
@@ -1140,44 +1187,16 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
mii->xmii_mode[port] == XMII_MODE_SGMII)
phylink_set(mask, 1000baseT_Full);
+ if (priv->info->supports_2500basex[port]) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct sja1105_private *priv = ds->priv;
- int ais;
-
- /* Read the vendor-specific AUTONEG_INTR_STATUS register */
- ais = sja1105_sgmii_read(priv, SJA1105_AIS);
- if (ais < 0)
- return ais;
-
- switch (SJA1105_AIS_SPEED(ais)) {
- case 0:
- state->speed = SPEED_10;
- break;
- case 1:
- state->speed = SPEED_100;
- break;
- case 2:
- state->speed = SPEED_1000;
- break;
- default:
- dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
- SJA1105_AIS_SPEED(ais));
- }
- state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
- state->an_complete = SJA1105_AIS_COMPLETE(ais);
- state->link = SJA1105_AIS_LINK_STATUS(ais);
-
- return 0;
-}
-
static int
sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
const struct sja1105_l2_lookup_entry *requested)
@@ -1636,7 +1655,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
/* Add this port to the forwarding matrix of the
* other ports in the same bridge, and viceversa.
*/
@@ -1799,6 +1818,12 @@ static int sja1105_reload_cbs(struct sja1105_private *priv)
{
int rc = 0, i;
+ /* The credit based shapers are only allocated if
+ * CONFIG_NET_SCH_CBS is enabled.
+ */
+ if (!priv->cbs)
+ return 0;
+
for (i = 0; i < priv->info->num_cbs_shapers; i++) {
struct sja1105_cbs_entry *cbs = &priv->cbs[i];
@@ -1834,12 +1859,12 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
{
struct ptp_system_timestamp ptp_sts_before;
struct ptp_system_timestamp ptp_sts_after;
+ int speed_mbps[SJA1105_MAX_NUM_PORTS];
+ u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
struct sja1105_mac_config_entry *mac;
- int speed_mbps[SJA1105_NUM_PORTS];
struct dsa_switch *ds = priv->ds;
s64 t1, t2, t3, t4;
s64 t12, t34;
- u16 bmcr = 0;
int rc, i;
s64 now;
@@ -1852,29 +1877,38 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
* switch wants to see in the static config in order to allow us to
* change it through the dynamic interface later.
*/
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- speed_mbps[i] = sja1105_speed[mac[i].speed];
- mac[i].speed = SJA1105_SPEED_AUTO;
- }
+ for (i = 0; i < ds->num_ports; i++) {
+ u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1);
- if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
- bmcr = sja1105_sgmii_read(priv, MII_BMCR);
+ speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
+ mac[i].speed);
+ mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
+
+ if (priv->xpcs[i])
+ bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr);
+ }
/* No PTP operations can run right now */
mutex_lock(&priv->ptp_data.lock);
rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
- if (rc < 0)
- goto out_unlock_ptp;
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
/* Reset switch and send updated static configuration */
rc = sja1105_static_config_upload(priv);
- if (rc < 0)
- goto out_unlock_ptp;
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
- if (rc < 0)
- goto out_unlock_ptp;
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
@@ -1889,7 +1923,6 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
__sja1105_ptp_adjtime(ds, now);
-out_unlock_ptp:
mutex_unlock(&priv->ptp_data.lock);
dev_info(priv->ds->dev,
@@ -1900,32 +1933,48 @@ out_unlock_ptp:
* For these interfaces there is no dynamic configuration
* needed, since PLLs have same settings at all speeds.
*/
- rc = sja1105_clocking_setup(priv);
- if (rc < 0)
- goto out;
+ if (priv->info->clocking_setup) {
+ rc = priv->info->clocking_setup(priv);
+ if (rc < 0)
+ goto out;
+ }
+
+ for (i = 0; i < ds->num_ports; i++) {
+ struct dw_xpcs *xpcs = priv->xpcs[i];
+ unsigned int mode;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
if (rc < 0)
goto out;
- }
- if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
- bool an_enabled = !!(bmcr & BMCR_ANENABLE);
+ if (!xpcs)
+ continue;
+
+ if (bmcr[i] & BMCR_ANENABLE)
+ mode = MLO_AN_INBAND;
+ else if (priv->fixed_link[i])
+ mode = MLO_AN_FIXED;
+ else
+ mode = MLO_AN_PHY;
- sja1105_sgmii_pcs_config(priv, an_enabled, false);
+ rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode);
+ if (rc < 0)
+ goto out;
- if (!an_enabled) {
+ if (!phylink_autoneg_inband(mode)) {
int speed = SPEED_UNKNOWN;
- if (bmcr & BMCR_SPEED1000)
+ if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX)
+ speed = SPEED_2500;
+ else if (bmcr[i] & BMCR_SPEED1000)
speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
+ else if (bmcr[i] & BMCR_SPEED100)
speed = SPEED_100;
else
speed = SPEED_10;
- sja1105_sgmii_pcs_force_speed(priv, speed);
+ xpcs_link_up(&xpcs->pcs, mode, priv->phy_mode[i],
+ speed, DUPLEX_FULL);
}
}
@@ -2033,7 +2082,9 @@ static enum dsa_tag_protocol
sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
- return DSA_TAG_PROTO_SJA1105;
+ struct sja1105_private *priv = ds->priv;
+
+ return priv->info->tag_proto;
}
static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
@@ -2273,6 +2324,7 @@ sja1105_build_bridge_vlans(struct sja1105_private *priv,
new_vlan[match].vlan_bc |= BIT(v->port);
if (!v->untagged)
new_vlan[match].tag_port |= BIT(v->port);
+ new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
}
return 0;
@@ -2295,6 +2347,7 @@ sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
new_vlan[match].vlan_bc |= BIT(v->port);
if (!v->untagged)
new_vlan[match].tag_port |= BIT(v->port);
+ new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
}
return 0;
@@ -2355,6 +2408,7 @@ static int sja1105_build_subvlans(struct sja1105_private *priv,
new_vlan[match].tag_port |= BIT(v->port);
/* But it's always tagged towards the CPU */
new_vlan[match].tag_port |= BIT(upstream);
+ new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
/* The Retagging Table generates packet *clones* with
* the new VLAN. This is a very odd hardware quirk
@@ -2522,6 +2576,7 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
if (!tmp->untagged)
new_vlan[match].tag_port |= BIT(tmp->port);
new_vlan[match].tag_port |= BIT(upstream);
+ new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
/* Deny egress of @rx_vid towards our front-panel port.
* This will force the switch to drop it, and we'll see
* only the re-retagged packets (having the original,
@@ -2612,7 +2667,7 @@ out:
static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
{
- u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
+ u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
struct sja1105_retagging_entry *new_retagging;
struct sja1105_vlan_lookup_entry *new_vlan;
struct sja1105_table *table;
@@ -2817,11 +2872,22 @@ static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
struct sja1105_bridge_vlan *v;
- list_for_each_entry(v, vlan_list, list)
- if (v->port == port && v->vid == vid &&
- v->untagged == untagged && v->pvid == pvid)
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
/* Already added */
- return 0;
+ if (v->untagged == untagged && v->pvid == pvid)
+ /* Nothing changed */
+ return 0;
+
+ /* It's the same VLAN, but some of the flags changed
+ * and the user did not bother to delete it first.
+ * Update it and trigger sja1105_build_vlan_table.
+ */
+ v->untagged = untagged;
+ v->pvid = pvid;
+ return 1;
+ }
+ }
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) {
@@ -2948,11 +3014,10 @@ static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
*/
static int sja1105_setup(struct dsa_switch *ds)
{
- struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
struct sja1105_private *priv = ds->priv;
int rc;
- rc = sja1105_parse_dt(priv, ports);
+ rc = sja1105_parse_dt(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
return rc;
@@ -2961,7 +3026,7 @@ static int sja1105_setup(struct dsa_switch *ds)
/* Error out early if internal delays are required through DT
* and we can't apply them.
*/
- rc = sja1105_parse_rgmii_delays(priv, ports);
+ rc = sja1105_parse_rgmii_delays(priv);
if (rc < 0) {
dev_err(ds->dev, "RGMII delay not supported\n");
return rc;
@@ -2972,18 +3037,42 @@ static int sja1105_setup(struct dsa_switch *ds)
dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
return rc;
}
+
+ rc = sja1105_mdiobus_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
+ ERR_PTR(rc));
+ goto out_ptp_clock_unregister;
+ }
+
+ if (priv->info->disable_microcontroller) {
+ rc = priv->info->disable_microcontroller(priv);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to disable microcontroller: %pe\n",
+ ERR_PTR(rc));
+ goto out_mdiobus_unregister;
+ }
+ }
+
/* Create and send configuration down to device */
- rc = sja1105_static_config_load(priv, ports);
+ rc = sja1105_static_config_load(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to load static config: %d\n", rc);
- return rc;
+ goto out_mdiobus_unregister;
}
+
/* Configure the CGU (PHY link modes and speeds) */
- rc = sja1105_clocking_setup(priv);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
- return rc;
+ if (priv->info->clocking_setup) {
+ rc = priv->info->clocking_setup(priv);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to configure MII clocking: %pe\n",
+ ERR_PTR(rc));
+ goto out_static_config_free;
+ }
}
+
/* On SJA1105, VLAN filtering per se is always enabled in hardware.
* The only thing we can do to disable it is lie about what the 802.1Q
* EtherType is.
@@ -3003,7 +3092,7 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_devlink_setup(ds);
if (rc < 0)
- return rc;
+ goto out_static_config_free;
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
@@ -3012,6 +3101,19 @@ static int sja1105_setup(struct dsa_switch *ds)
rtnl_lock();
rc = sja1105_setup_8021q_tagging(ds, true);
rtnl_unlock();
+ if (rc)
+ goto out_devlink_teardown;
+
+ return 0;
+
+out_devlink_teardown:
+ sja1105_devlink_teardown(ds);
+out_mdiobus_unregister:
+ sja1105_mdiobus_unregister(ds);
+out_ptp_clock_unregister:
+ sja1105_ptp_clock_unregister(ds);
+out_static_config_free:
+ sja1105_static_config_free(&priv->static_config);
return rc;
}
@@ -3022,7 +3124,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
struct sja1105_bridge_vlan *v, *n;
int port;
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
struct sja1105_port *sp = &priv->ports[port];
if (!dsa_is_user_port(ds, port))
@@ -3225,6 +3327,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
{
struct sja1105_general_params_entry *general_params;
struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
bool already_enabled;
u64 new_mirr_port;
@@ -3235,7 +3338,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
+ already_enabled = (general_params->mirr_port != ds->num_ports);
if (already_enabled && enabled && general_params->mirr_port != to) {
dev_err(priv->ds->dev,
"Delete mirroring rules towards port %llu first\n",
@@ -3249,7 +3352,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
int port;
/* Anybody still referencing mirr_port? */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (mac[port].ing_mirr || mac[port].egr_mirr) {
keep = true;
break;
@@ -3257,7 +3360,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
}
/* Unset already_enabled for next time */
if (!keep)
- new_mirr_port = SJA1105_NUM_PORTS;
+ new_mirr_port = ds->num_ports;
}
if (new_mirr_port != general_params->mirr_port) {
general_params->mirr_port = new_mirr_port;
@@ -3468,7 +3571,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
.phylink_validate = sja1105_phylink_validate,
- .phylink_mac_link_state = sja1105_mac_pcs_get_state,
.phylink_mac_config = sja1105_mac_config,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
@@ -3563,6 +3665,7 @@ static int sja1105_probe(struct spi_device *spi)
struct sja1105_tagger_data *tagger_data;
struct device *dev = &spi->dev;
struct sja1105_private *priv;
+ size_t max_xfer, max_msg;
struct dsa_switch *ds;
int rc, port;
@@ -3596,6 +3699,33 @@ static int sja1105_probe(struct spi_device *spi)
return rc;
}
+ /* In sja1105_xfer, we send spi_messages composed of two spi_transfers:
+ * a small one for the message header and another one for the current
+ * chunk of the packed buffer.
+ * Check that the restrictions imposed by the SPI controller are
+ * respected: the chunk buffer is smaller than the max transfer size,
+ * and the total length of the chunk plus its message header is smaller
+ * than the max message size.
+ * We do that during probe time since the maximum transfer size is a
+ * runtime invariant.
+ */
+ max_xfer = spi_max_transfer_size(spi);
+ max_msg = spi_max_message_size(spi);
+
+ /* We need to send at least one 64-bit word of SPI payload per message
+ * in order to be able to make useful progress.
+ */
+ if (max_msg < SJA1105_SIZE_SPI_MSG_HEADER + 8) {
+ dev_err(dev, "SPI master cannot send large enough buffers, aborting\n");
+ return -EINVAL;
+ }
+
+ priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN;
+ if (priv->max_xfer_len > max_xfer)
+ priv->max_xfer_len = max_xfer;
+ if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER)
+ priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER;
+
priv->info = of_device_get_match_data(dev);
/* Detect hardware device */
@@ -3612,7 +3742,7 @@ static int sja1105_probe(struct spi_device *spi)
return -ENOMEM;
ds->dev = dev;
- ds->num_ports = SJA1105_NUM_PORTS;
+ ds->num_ports = priv->info->num_ports;
ds->ops = &sja1105_switch_ops;
ds->priv = priv;
priv->ds = ds;
@@ -3646,12 +3776,14 @@ static int sja1105_probe(struct spi_device *spi)
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
GFP_KERNEL);
- if (!priv->cbs)
- return -ENOMEM;
+ if (!priv->cbs) {
+ rc = -ENOMEM;
+ goto out_unregister_switch;
+ }
}
/* Connections between dsa_port and sja1105_port */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
struct sja1105_port *sp = &priv->ports[port];
struct dsa_port *dp = dsa_to_port(ds, port);
struct net_device *slave;
@@ -3672,7 +3804,7 @@ static int sja1105_probe(struct spi_device *spi)
dev_err(ds->dev,
"failed to create deferred xmit thread: %d\n",
rc);
- goto out;
+ goto out_destroy_workers;
}
skb_queue_head_init(&sp->xmit_queue);
sp->xmit_tpid = ETH_P_SJA1105;
@@ -3682,7 +3814,8 @@ static int sja1105_probe(struct spi_device *spi)
}
return 0;
-out:
+
+out_destroy_workers:
while (port-- > 0) {
struct sja1105_port *sp = &priv->ports[port];
@@ -3691,6 +3824,10 @@ out:
kthread_destroy_worker(sp->xmit_worker);
}
+
+out_unregister_switch:
+ dsa_unregister_switch(ds);
+
return rc;
}
@@ -3709,6 +3846,10 @@ static const struct of_device_id sja1105_dt_ids[] = {
{ .compatible = "nxp,sja1105q", .data = &sja1105q_info },
{ .compatible = "nxp,sja1105r", .data = &sja1105r_info },
{ .compatible = "nxp,sja1105s", .data = &sja1105s_info },
+ { .compatible = "nxp,sja1110a", .data = &sja1110a_info },
+ { .compatible = "nxp,sja1110b", .data = &sja1110b_info },
+ { .compatible = "nxp,sja1110c", .data = &sja1110c_info },
+ { .compatible = "nxp,sja1110d", .data = &sja1110d_info },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
new file mode 100644
index 000000000000..19aea8fb76f6
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2021, NXP Semiconductors
+ */
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/of_mdio.h>
+#include "sja1105.h"
+
+#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
+
+int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
+ return 0xffff;
+
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
+ return NXP_SJA1105_XPCS_ID >> 16;
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
+ return NXP_SJA1105_XPCS_ID & GENMASK(15, 0);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ tmp = val;
+
+ if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
+ return -EINVAL;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
+
+int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int offset, bank;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
+ return -ENODEV;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
+ return NXP_SJA1110_XPCS_ID >> 16;
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
+ return NXP_SJA1110_XPCS_ID & GENMASK(15, 0);
+
+ bank = addr >> 8;
+ offset = addr & GENMASK(7, 0);
+
+ /* This addressing scheme reserves register 0xff for the bank address
+ * register, so that can never be addressed.
+ */
+ if (WARN_ON(offset == 0xff))
+ return -ENODEV;
+
+ tmp = bank;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE,
+ regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->pcs_base[phy] + offset,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int offset, bank;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
+ return -ENODEV;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ bank = addr >> 8;
+ offset = addr & GENMASK(7, 0);
+
+ /* This addressing scheme reserves register 0xff for the bank address
+ * register, so that can never be addressed.
+ */
+ if (WARN_ON(offset == 0xff))
+ return -ENODEV;
+
+ tmp = bank;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE,
+ regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ tmp = val;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->pcs_base[phy] + offset,
+ &tmp, NULL);
+}
+
+enum sja1105_mdio_opcode {
+ SJA1105_C45_ADDR = 0,
+ SJA1105_C22 = 1,
+ SJA1105_C45_DATA = 2,
+ SJA1105_C45_DATA_AUTOINC = 3,
+};
+
+static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
+ int phy, enum sja1105_mdio_opcode op,
+ int xad)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
+}
+
+static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45) {
+ u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
+ mmd);
+
+ tmp = reg & MII_REGADDR_C45_MASK;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
+ mmd);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+ }
+
+ /* Clause 22 read */
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45) {
+ u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
+ mmd);
+
+ tmp = reg & MII_REGADDR_C45_MASK;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
+ mmd);
+
+ tmp = val & 0xffff;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+ }
+
+ /* Clause 22 write */
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+
+ tmp = val & 0xffff;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
+
+static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 tmp;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 tmp = val;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
+ &tmp, NULL);
+}
+
+static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
+ struct device_node *mdio_node)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int rc = 0;
+
+ np = of_find_compatible_node(mdio_node, NULL,
+ "nxp,sja1110-base-tx-mdio");
+ if (!np)
+ return 0;
+
+ if (!of_device_is_available(np))
+ goto out_put_np;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto out_put_np;
+ }
+
+ bus->name = "SJA1110 100base-TX MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-tx",
+ dev_name(priv->ds->dev));
+ bus->read = sja1105_base_tx_mdio_read;
+ bus->write = sja1105_base_tx_mdio_write;
+ bus->parent = priv->ds->dev;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc) {
+ mdiobus_free(bus);
+ goto out_put_np;
+ }
+
+ priv->mdio_base_tx = bus;
+
+out_put_np:
+ of_node_put(np);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_base_tx_unregister(struct sja1105_private *priv)
+{
+ if (!priv->mdio_base_tx)
+ return;
+
+ mdiobus_unregister(priv->mdio_base_tx);
+ mdiobus_free(priv->mdio_base_tx);
+ priv->mdio_base_tx = NULL;
+}
+
+static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
+ struct device_node *mdio_node)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int rc = 0;
+
+ np = of_find_compatible_node(mdio_node, NULL,
+ "nxp,sja1110-base-t1-mdio");
+ if (!np)
+ return 0;
+
+ if (!of_device_is_available(np))
+ goto out_put_np;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto out_put_np;
+ }
+
+ bus->name = "SJA1110 100base-T1 MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
+ dev_name(priv->ds->dev));
+ bus->read = sja1105_base_t1_mdio_read;
+ bus->write = sja1105_base_t1_mdio_write;
+ bus->parent = priv->ds->dev;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc) {
+ mdiobus_free(bus);
+ goto out_put_np;
+ }
+
+ priv->mdio_base_t1 = bus;
+
+out_put_np:
+ of_node_put(np);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_base_t1_unregister(struct sja1105_private *priv)
+{
+ if (!priv->mdio_base_t1)
+ return;
+
+ mdiobus_unregister(priv->mdio_base_t1);
+ mdiobus_free(priv->mdio_base_t1);
+ priv->mdio_base_t1 = NULL;
+}
+
+static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct dsa_switch *ds = priv->ds;
+ struct mii_bus *bus;
+ int rc = 0;
+ int port;
+
+ if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write)
+ return 0;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "SJA1105 PCS MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(ds->dev));
+ bus->read = priv->info->pcs_mdio_read;
+ bus->write = priv->info->pcs_mdio_write;
+ bus->parent = ds->dev;
+ /* There is no PHY on this MDIO bus => mask out all PHY addresses
+ * from auto probing.
+ */
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = mdiobus_register(bus);
+ if (rc) {
+ mdiobus_free(bus);
+ return rc;
+ }
+
+ for (port = 0; port < ds->num_ports; port++) {
+ struct mdio_device *mdiodev;
+ struct dw_xpcs *xpcs;
+
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ if (priv->phy_mode[port] != PHY_INTERFACE_MODE_SGMII &&
+ priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX)
+ continue;
+
+ mdiodev = mdio_device_create(bus, port);
+ if (IS_ERR(mdiodev)) {
+ rc = PTR_ERR(mdiodev);
+ goto out_pcs_free;
+ }
+
+ xpcs = xpcs_create(mdiodev, priv->phy_mode[port]);
+ if (IS_ERR(xpcs)) {
+ rc = PTR_ERR(xpcs);
+ goto out_pcs_free;
+ }
+
+ priv->xpcs[port] = xpcs;
+ }
+
+ priv->mdio_pcs = bus;
+
+ return 0;
+
+out_pcs_free:
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->xpcs[port])
+ continue;
+
+ mdio_device_free(priv->xpcs[port]->mdiodev);
+ xpcs_destroy(priv->xpcs[port]);
+ priv->xpcs[port] = NULL;
+ }
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_pcs_unregister(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ if (!priv->mdio_pcs)
+ return;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->xpcs[port])
+ continue;
+
+ mdio_device_free(priv->xpcs[port]->mdiodev);
+ xpcs_destroy(priv->xpcs[port]);
+ priv->xpcs[port] = NULL;
+ }
+
+ mdiobus_unregister(priv->mdio_pcs);
+ mdiobus_free(priv->mdio_pcs);
+ priv->mdio_pcs = NULL;
+}
+
+int sja1105_mdiobus_register(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device_node *switch_node = ds->dev->of_node;
+ struct device_node *mdio_node;
+ int rc;
+
+ rc = sja1105_mdiobus_pcs_register(priv);
+ if (rc)
+ return rc;
+
+ mdio_node = of_get_child_by_name(switch_node, "mdios");
+ if (!mdio_node)
+ return 0;
+
+ if (!of_device_is_available(mdio_node))
+ goto out_put_mdio_node;
+
+ if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
+ rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
+ if (rc)
+ goto err_put_mdio_node;
+ }
+
+ if (regs->mdio_100base_t1 != SJA1105_RSV_ADDR) {
+ rc = sja1105_mdiobus_base_t1_register(priv, mdio_node);
+ if (rc)
+ goto err_free_base_tx_mdiobus;
+ }
+
+out_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return 0;
+
+err_free_base_tx_mdiobus:
+ sja1105_mdiobus_base_tx_unregister(priv);
+err_put_mdio_node:
+ of_node_put(mdio_node);
+ sja1105_mdiobus_pcs_unregister(priv);
+
+ return rc;
+}
+
+void sja1105_mdiobus_unregister(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_mdiobus_base_t1_unregister(priv);
+ sja1105_mdiobus_base_tx_unregister(priv);
+ sja1105_mdiobus_pcs_unregister(priv);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 0bc566b9e958..691f6dd7e669 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -79,6 +79,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
priv->tagger_data.stampable_skb = NULL;
}
ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
@@ -397,7 +398,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
*shwt = (struct skb_shared_hwtstamps) {0};
- ts = SJA1105_SKB_CB(skb)->meta_tstamp;
+ ts = SJA1105_SKB_CB(skb)->tstamp;
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
@@ -413,9 +414,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
return -1;
}
-/* Called from dsa_skb_defer_rx_timestamp */
-bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
+bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
@@ -431,6 +430,89 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
return true;
}
+bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ts = SJA1105_SKB_CB(skb)->tstamp;
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+
+ /* Don't defer */
+ return false;
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ return priv->info->rxtstamp(ds, port, skb);
+}
+
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shwt = {0};
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&ptp_data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+EXPORT_SYMBOL_GPL(sja1110_process_meta_tstamp);
+
+/* In addition to cloning the skb which is done by the common
+ * sja1105_port_txtstamp, we need to generate a timestamp ID and save the
+ * packet to the TX timestamping queue.
+ */
+void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_port *sp = &priv->ports[port];
+ u8 ts_id;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ spin_lock(&sp->data->meta_lock);
+
+ ts_id = sp->data->ts_id;
+ /* Deal automatically with 8-bit wraparound */
+ sp->data->ts_id++;
+
+ SJA1105_SKB_CB(clone)->ts_id = ts_id;
+
+ spin_unlock(&sp->data->meta_lock);
+
+ skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
+}
+
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
* the skb and have it available in SJA1105_SKB_CB in the .port_deferred_xmit
* callback, where we will timestamp it synchronously.
@@ -449,6 +531,9 @@ void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
return;
SJA1105_SKB_CB(skb)->clone = clone;
+
+ if (priv->info->txtstamp)
+ priv->info->txtstamp(ds, port, skb);
}
static int sja1105_ptp_reset(struct dsa_switch *ds)
@@ -865,7 +950,10 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.n_per_out = 1,
};
+ /* Only used on SJA1105 */
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
+ /* Only used on SJA1110 */
+ skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
spin_lock_init(&tagger_data->meta_lock);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
@@ -890,6 +978,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
del_timer_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 34f97f58a355..3c874bb4c17b 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -75,7 +75,12 @@ struct sja1105_ptp_cmd {
struct sja1105_ptp_data {
struct timer_list extts_timer;
+ /* Used only on SJA1105 to reconstruct partial timestamps */
struct sk_buff_head skb_rxtstamp_queue;
+ /* Used on SJA1110 where meta frames are generated only for
+ * 2-step TX timestamps
+ */
+ struct sk_buff_head skb_txtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
@@ -122,6 +127,10 @@ int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
sja1105_spi_rw_mode_t rw);
+bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+
#else
struct sja1105_ptp_cmd;
@@ -184,6 +193,10 @@ static inline int sja1105_ptp_commit(struct dsa_switch *ds,
#define sja1105_hwtstamp_set NULL
+#define sja1105_rxtstamp NULL
+#define sja1110_rxtstamp NULL
+#define sja1110_txtstamp NULL
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_sgmii.h b/drivers/net/dsa/sja1105/sja1105_sgmii.h
deleted file mode 100644
index 24d9bc046e70..000000000000
--- a/drivers/net/dsa/sja1105/sja1105_sgmii.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright 2020, NXP Semiconductors
- */
-#ifndef _SJA1105_SGMII_H
-#define _SJA1105_SGMII_H
-
-#define SJA1105_SGMII_PORT 4
-
-/* DIGITAL_CONTROL_1 (address 1f8000h) */
-#define SJA1105_DC1 0x8000
-#define SJA1105_DC1_VS_RESET BIT(15)
-#define SJA1105_DC1_REMOTE_LOOPBACK BIT(14)
-#define SJA1105_DC1_EN_VSMMD1 BIT(13)
-#define SJA1105_DC1_POWER_SAVE BIT(11)
-#define SJA1105_DC1_CLOCK_STOP_EN BIT(10)
-#define SJA1105_DC1_MAC_AUTO_SW BIT(9)
-#define SJA1105_DC1_INIT BIT(8)
-#define SJA1105_DC1_TX_DISABLE BIT(4)
-#define SJA1105_DC1_AUTONEG_TIMER_OVRR BIT(3)
-#define SJA1105_DC1_BYP_POWERUP BIT(1)
-#define SJA1105_DC1_PHY_MODE_CONTROL BIT(0)
-
-/* DIGITAL_CONTROL_2 register (address 1f80E1h) */
-#define SJA1105_DC2 0x80e1
-#define SJA1105_DC2_TX_POL_INV_DISABLE BIT(4)
-#define SJA1105_DC2_RX_POL_INV BIT(0)
-
-/* DIGITAL_ERROR_CNT register (address 1f80E2h) */
-#define SJA1105_DEC 0x80e2
-#define SJA1105_DEC_ICG_EC_ENA BIT(4)
-#define SJA1105_DEC_CLEAR_ON_READ BIT(0)
-
-/* AUTONEG_CONTROL register (address 1f8001h) */
-#define SJA1105_AC 0x8001
-#define SJA1105_AC_MII_CONTROL BIT(8)
-#define SJA1105_AC_SGMII_LINK BIT(4)
-#define SJA1105_AC_PHY_MODE BIT(3)
-#define SJA1105_AC_AUTONEG_MODE(x) (((x) << 1) & GENMASK(2, 1))
-#define SJA1105_AC_AUTONEG_MODE_SGMII SJA1105_AC_AUTONEG_MODE(2)
-
-/* AUTONEG_INTR_STATUS register (address 1f8002h) */
-#define SJA1105_AIS 0x8002
-#define SJA1105_AIS_LINK_STATUS(x) (!!((x) & BIT(4)))
-#define SJA1105_AIS_SPEED(x) (((x) & GENMASK(3, 2)) >> 2)
-#define SJA1105_AIS_DUPLEX_MODE(x) (!!((x) & BIT(1)))
-#define SJA1105_AIS_COMPLETE(x) (!!((x) & BIT(0)))
-
-/* DEBUG_CONTROL register (address 1f8005h) */
-#define SJA1105_DC 0x8005
-#define SJA1105_DC_SUPPRESS_LOS BIT(4)
-#define SJA1105_DC_RESTART_SYNC BIT(0)
-
-#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index f7a1514f81e8..08cc5dbf2fa6 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -7,10 +7,6 @@
#include <linux/packing.h>
#include "sja1105.h"
-#define SJA1105_SIZE_RESET_CMD 4
-#define SJA1105_SIZE_SPI_MSG_HEADER 4
-#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
-
struct sja1105_chunk {
u8 *buf;
size_t len;
@@ -29,13 +25,6 @@ sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
sja1105_pack(buf, &msg->address, 24, 4, size);
}
-#define sja1105_hdr_xfer(xfers, chunk) \
- ((xfers) + 2 * (chunk))
-#define sja1105_chunk_xfer(xfers, chunk) \
- ((xfers) + 2 * (chunk) + 1)
-#define sja1105_hdr_buf(hdr_bufs, chunk) \
- ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
-
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
* address reg_addr, taking @len bytes from *buf
@@ -46,41 +35,25 @@ static int sja1105_xfer(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
size_t len, struct ptp_system_timestamp *ptp_sts)
{
- struct sja1105_chunk chunk = {
- .len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
- .reg_addr = reg_addr,
- .buf = buf,
- };
+ u8 hdr_buf[SJA1105_SIZE_SPI_MSG_HEADER] = {0};
struct spi_device *spi = priv->spidev;
- struct spi_transfer *xfers;
+ struct spi_transfer xfers[2] = {0};
+ struct spi_transfer *chunk_xfer;
+ struct spi_transfer *hdr_xfer;
+ struct sja1105_chunk chunk;
int num_chunks;
int rc, i = 0;
- u8 *hdr_bufs;
- num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN);
+ num_chunks = DIV_ROUND_UP(len, priv->max_xfer_len);
- /* One transfer for each message header, one for each message
- * payload (chunk).
- */
- xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer),
- GFP_KERNEL);
- if (!xfers)
- return -ENOMEM;
+ chunk.reg_addr = reg_addr;
+ chunk.buf = buf;
+ chunk.len = min_t(size_t, len, priv->max_xfer_len);
- /* Packed buffers for the num_chunks SPI message headers,
- * stored as a contiguous array
- */
- hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER,
- GFP_KERNEL);
- if (!hdr_bufs) {
- kfree(xfers);
- return -ENOMEM;
- }
+ hdr_xfer = &xfers[0];
+ chunk_xfer = &xfers[1];
for (i = 0; i < num_chunks; i++) {
- struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
- struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
- u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
struct spi_transfer *ptp_sts_xfer;
struct sja1105_spi_message msg;
@@ -127,21 +100,16 @@ static int sja1105_xfer(const struct sja1105_private *priv,
chunk.buf += chunk.len;
chunk.reg_addr += chunk.len / 4;
chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
- SJA1105_SIZE_SPI_MSG_MAXLEN);
+ priv->max_xfer_len);
- /* De-assert the chip select after each chunk. */
- if (chunk.len)
- chunk_xfer->cs_change = 1;
+ rc = spi_sync_transfer(spi, xfers, 2);
+ if (rc < 0) {
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
+ return rc;
+ }
}
- rc = spi_sync_transfer(spi, xfers, 2 * num_chunks);
- if (rc < 0)
- dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
-
- kfree(hdr_bufs);
- kfree(xfers);
-
- return rc;
+ return 0;
}
int sja1105_xfer_buf(const struct sja1105_private *priv,
@@ -209,28 +177,34 @@ static int sja1105et_reset_cmd(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
- const int size = SJA1105_SIZE_RESET_CMD;
- u64 cold_rst = 1;
+ u32 cold_reset = BIT(3);
- sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
-
- return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
- SJA1105_SIZE_RESET_CMD);
+ /* Cold reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
}
static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
- const int size = SJA1105_SIZE_RESET_CMD;
- u64 cold_rst = 1;
+ u32 cold_reset = BIT(2);
- sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
+ /* Cold reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
+}
- return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
- SJA1105_SIZE_RESET_CMD);
+static int sja1110_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 switch_reset = BIT(20);
+
+ /* Only reset the switch core.
+ * A full cold reset would re-enable the BASE_MCSS_CLOCK PLL which
+ * would turn on the microcontroller, potentially letting it execute
+ * code which could interfere with our configuration.
+ */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &switch_reset, NULL);
}
int sja1105_inhibit_tx(const struct sja1105_private *priv,
@@ -311,7 +285,8 @@ int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
char *final_header_ptr;
int crc_len;
- valid = sja1105_static_config_check_valid(config);
+ valid = sja1105_static_config_check_valid(config,
+ priv->info->max_frame_mem);
if (valid != SJA1105_CONFIG_OK) {
dev_err(&priv->spidev->dev,
sja1105_static_config_error_msg[valid]);
@@ -339,10 +314,10 @@ int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
int sja1105_static_config_upload(struct sja1105_private *priv)
{
- unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
struct sja1105_static_config *config = &priv->static_config;
const struct sja1105_regs *regs = priv->info->regs;
struct device *dev = &priv->spidev->dev;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_status status;
int rc, retries = RETRIES;
u8 *config_buf;
@@ -363,7 +338,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
* Tx on all ports and waiting for current packet to drain.
* Otherwise, the PHY will see an unterminated Ethernet packet.
*/
- rc = sja1105_inhibit_tx(priv, port_bitmap, true);
+ rc = sja1105_inhibit_tx(priv, GENMASK_ULL(ds->num_ports - 1, 0), true);
if (rc < 0) {
dev_err(dev, "Failed to inhibit Tx on ports\n");
rc = -ENXIO;
@@ -433,7 +408,7 @@ out:
return rc;
}
-static struct sja1105_regs sja1105et_regs = {
+static const struct sja1105_regs sja1105et_regs = {
.device_id = 0x0,
.prod_id = 0x100BC3,
.status = 0x1,
@@ -446,9 +421,9 @@ static struct sja1105_regs sja1105et_regs = {
.pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
- .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
- .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
- .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640},
/* UM10944.pdf, Table 78, CGU Register overview */
.mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
.mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
@@ -465,9 +440,11 @@ static struct sja1105_regs sja1105et_regs = {
.ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
.ptpclkcorp = 0x1D,
+ .mdio_100base_tx = SJA1105_RSV_ADDR,
+ .mdio_100base_t1 = SJA1105_RSV_ADDR,
};
-static struct sja1105_regs sja1105pqrs_regs = {
+static const struct sja1105_regs sja1105pqrs_regs = {
.device_id = 0x0,
.prod_id = 0x100BC3,
.status = 0x1,
@@ -479,13 +456,12 @@ static struct sja1105_regs sja1105pqrs_regs = {
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
- .sgmii = 0x1F0000,
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
- .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
- .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
- .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
- .ether_stats = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640},
+ .stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
/* UM11040.pdf, Table 114 */
.mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
.mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
@@ -494,7 +470,6 @@ static struct sja1105_regs sja1105pqrs_regs = {
.rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
.rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
- .qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
.ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
.ptppinst = 0x15,
@@ -504,6 +479,95 @@ static struct sja1105_regs sja1105pqrs_regs = {
.ptpclkrate = 0x1B,
.ptpclkcorp = 0x1E,
.ptpsyncts = 0x1F,
+ .mdio_100base_tx = SJA1105_RSV_ADDR,
+ .mdio_100base_t1 = SJA1105_RSV_ADDR,
+};
+
+static const struct sja1105_regs sja1110_regs = {
+ .device_id = SJA1110_SPI_ADDR(0x0),
+ .prod_id = SJA1110_ACU_ADDR(0xf00),
+ .status = SJA1110_SPI_ADDR(0x4),
+ .port_control = SJA1110_SPI_ADDR(0x50), /* actually INHIB_TX */
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = SJA1110_RGU_ADDR(0x100), /* Reset Control Register 0 */
+ /* Ports 2 and 3 are capable of xMII, but there isn't anything to
+ * configure in the CGU/ACU for them.
+ */
+ .pad_mii_tx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .pad_mii_rx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .pad_mii_id = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1110_ACU_ADDR(0x18), SJA1110_ACU_ADDR(0x28),
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .rmii_pll1 = SJA1105_RSV_ADDR,
+ .cgu_idiv = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208, 0x20a,
+ 0x20c, 0x20e, 0x210, 0x212, 0x214},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440, 0x450,
+ 0x460, 0x470, 0x480, 0x490, 0x4a0},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640, 0x650,
+ 0x660, 0x670, 0x680, 0x690, 0x6a0},
+ .stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460, 0x1478,
+ 0x1490, 0x14a8, 0x14c0, 0x14d8, 0x14f0},
+ .mii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_ext_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rgmii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rmii_ref_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rmii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .ptpschtm = SJA1110_SPI_ADDR(0x54),
+ .ptppinst = SJA1110_SPI_ADDR(0x5c),
+ .ptppindur = SJA1110_SPI_ADDR(0x64),
+ .ptp_control = SJA1110_SPI_ADDR(0x68),
+ .ptpclkval = SJA1110_SPI_ADDR(0x6c),
+ .ptpclkrate = SJA1110_SPI_ADDR(0x74),
+ .ptpclkcorp = SJA1110_SPI_ADDR(0x80),
+ .ptpsyncts = SJA1110_SPI_ADDR(0x84),
+ .mdio_100base_tx = 0x1c2400,
+ .mdio_100base_t1 = 0x1c1000,
+ .pcs_base = {SJA1105_RSV_ADDR, 0x1c1400, 0x1c1800, 0x1c1c00, 0x1c2000,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
};
const struct sja1105_info sja1105e_info = {
@@ -512,15 +576,30 @@ const struct sja1105_info sja1105e_info = {
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
.regs = &sja1105et_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
.name = "SJA1105E",
};
@@ -530,15 +609,30 @@ const struct sja1105_info sja1105t_info = {
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
.regs = &sja1105et_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
.name = "SJA1105T",
};
@@ -548,16 +642,31 @@ const struct sja1105_info sja1105p_info = {
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
.name = "SJA1105P",
};
@@ -567,16 +676,31 @@ const struct sja1105_info sja1105q_info = {
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
.name = "SJA1105Q",
};
@@ -586,16 +710,34 @@ const struct sja1105_info sja1105r_info = {
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .pcs_mdio_read = sja1105_pcs_mdio_read,
+ .pcs_mdio_write = sja1105_pcs_mdio_write,
.regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .supports_sgmii = {false, false, false, false, true},
.name = "SJA1105R",
};
@@ -606,14 +748,236 @@ const struct sja1105_info sja1105s_info = {
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
.qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .pcs_mdio_read = sja1105_pcs_mdio_read,
+ .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .supports_sgmii = {false, false, false, false, true},
.name = "SJA1105S",
};
+
+const struct sja1105_info sja1110a_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110A_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, true, true, true},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, true, true, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1},
+ .name = "SJA1110A",
+};
+
+const struct sja1105_info sja1110b_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110B_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, true, true, false},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY},
+ .name = "SJA1110B",
+};
+
+const struct sja1105_info sja1110c_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110C_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, false, false, false},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, false, false, false, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, false, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY},
+ .name = "SJA1110C",
+};
+
+const struct sja1105_info sja1110d_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110D_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, false, true, false, false,
+ true, true, true, false, false, false},
+ .supports_rmii = {false, false, true, false, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, false, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, true, true, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY},
+ .name = "SJA1110D",
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index a8efb7fac395..7a422ef4deb6 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -180,6 +180,43 @@ size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
+
+ sja1105_packing(buf, &entry->vllupformat, 447, 447, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 446, 446, size, op);
+ sja1105_packing(buf, &entry->switchid, 445, 442, size, op);
+ sja1105_packing(buf, &entry->hostprio, 441, 439, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 438, 391, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 390, 343, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 342, 295, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 294, 247, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 246, 246, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 245, 245, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 244, 244, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 243, 243, size, op);
+ sja1105_packing(buf, &entry->casc_port, 242, 232, size, op);
+ sja1105_packing(buf, &entry->host_port, 231, 228, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 227, 224, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 223, 192, size, op);
+ sja1105_packing(buf, &entry->vlmask, 191, 160, size, op);
+ sja1105_packing(buf, &entry->tpid2, 159, 144, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 143, 143, size, op);
+ sja1105_packing(buf, &entry->tpid, 142, 127, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 126, 126, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 125, 114, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 113, 111, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 110, 110, size, op);
+ sja1105_packing(buf, &entry->replay_port, 109, 106, size, op);
+ sja1105_packing(buf, &entry->tdmaconfigidx, 70, 67, size, op);
+ sja1105_packing(buf, &entry->header_type, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tte_en, 16, 16, size, op);
+ return size;
+}
+
static size_t
sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -195,6 +232,20 @@ sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 5; i < 8; i++, offset += 11)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 10, offset + 0, size, op);
+ return size;
+}
+
size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -211,6 +262,27 @@ size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ int offset, i;
+
+ if (entry->type_egrpcp2outputq) {
+ for (i = 0, offset = 31; i < SJA1110_NUM_PORTS;
+ i++, offset += 3) {
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ }
+ } else {
+ sja1105_packing(buf, &entry->bc_domain, 63, 53, size, op);
+ sja1105_packing(buf, &entry->reach_port, 52, 42, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 41, 31, size, op);
+ }
+ return size;
+}
+
static size_t
sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -249,6 +321,28 @@ size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 70; i < SJA1110_NUM_PORTS; i++, offset += 11)
+ sja1105_packing(buf, &entry->maxaddrp[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->maxage, 69, 55, size, op);
+ sja1105_packing(buf, &entry->start_dynspc, 54, 45, size, op);
+ sja1105_packing(buf, &entry->drpnolearn, 44, 34, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 32, 32, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 31, 31, size, op);
+ sja1105_packing(buf, &entry->use_static, 30, 30, size, op);
+ sja1105_packing(buf, &entry->owr_dyn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->learn_once, 28, 28, size, op);
+ return size;
+}
+
size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -291,6 +385,36 @@ size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->trap, 168, 168, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 167, 156, size, op);
+ sja1105_packing(buf, &entry->takets, 155, 155, size, op);
+ sja1105_packing(buf, &entry->mirr, 154, 154, size, op);
+ sja1105_packing(buf, &entry->retag, 153, 153, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 168, 168, size, op);
+ sja1105_packing(buf, &entry->age, 167, 153, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 152, 152, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 151, 140, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 139, 92, size, op);
+ sja1105_packing(buf, &entry->mask_srcport, 91, 88, size, op);
+ sja1105_packing(buf, &entry->iotag, 87, 87, size, op);
+ sja1105_packing(buf, &entry->vlanid, 86, 75, size, op);
+ sja1105_packing(buf, &entry->macaddr, 74, 27, size, op);
+ sja1105_packing(buf, &entry->srcport, 26, 23, size, op);
+ sja1105_packing(buf, &entry->destports, 22, 12, size, op);
+ sja1105_packing(buf, &entry->enfport, 11, 11, size, op);
+ sja1105_packing(buf, &entry->index, 10, 1, size, op);
+ return size;
+}
+
static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -305,6 +429,20 @@ static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 57, size, op);
+ sja1105_packing(buf, &entry->smax, 56, 39, size, op);
+ sja1105_packing(buf, &entry->rate, 38, 21, size, op);
+ sja1105_packing(buf, &entry->maxlen, 20, 10, size, op);
+ sja1105_packing(buf, &entry->partition, 9, 7, size, op);
+ return size;
+}
+
static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -373,6 +511,40 @@ size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->speed, 98, 96, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 95, 80, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 79, 64, size, op);
+ sja1105_packing(buf, &entry->maxage, 63, 56, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 55, 53, size, op);
+ sja1105_packing(buf, &entry->vlanid, 52, 41, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 37, 37, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->retag, 33, 33, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 32, 32, size, op);
+ sja1105_packing(buf, &entry->egress, 31, 31, size, op);
+ sja1105_packing(buf, &entry->ingress, 30, 30, size, op);
+ sja1105_packing(buf, &entry->ifg, 10, 5, size, op);
+ return size;
+}
+
static size_t
sja1105_schedule_entry_points_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -398,6 +570,19 @@ sja1105_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t
+sja1110_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
+
+ sja1105_packing(buf, &entry->subschindx, 63, 61, size, op);
+ sja1105_packing(buf, &entry->delta, 60, 43, size, op);
+ sja1105_packing(buf, &entry->address, 42, 31, size, op);
+ return size;
+}
+
static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -411,6 +596,19 @@ static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t sja1110_schedule_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 0; i < 8; i++, offset += 12)
+ sja1105_packing(buf, &entry->subscheind[i],
+ offset + 11, offset + 0, size, op);
+ return size;
+}
+
static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -430,6 +628,25 @@ static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t sja1110_schedule_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY;
+ struct sja1105_schedule_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->winstindex, 95, 84, size, op);
+ sja1105_packing(buf, &entry->winend, 83, 83, size, op);
+ sja1105_packing(buf, &entry->winst, 82, 82, size, op);
+ sja1105_packing(buf, &entry->destports, 81, 71, size, op);
+ sja1105_packing(buf, &entry->setvalid, 70, 70, size, op);
+ sja1105_packing(buf, &entry->txen, 69, 69, size, op);
+ sja1105_packing(buf, &entry->resmedia_en, 68, 68, size, op);
+ sja1105_packing(buf, &entry->resmedia, 67, 60, size, op);
+ sja1105_packing(buf, &entry->vlindex, 59, 48, size, op);
+ sja1105_packing(buf, &entry->delta, 47, 30, size, op);
+ return size;
+}
+
static size_t
sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -445,6 +662,21 @@ sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t
+sja1110_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 8; i < 8; i++, offset += 11)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 7, 7, size, op);
+ return size;
+}
+
static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -458,6 +690,19 @@ static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t sja1110_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 14, size, op);
+ return size;
+}
+
size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -492,6 +737,40 @@ size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 94, 84, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 83, 83, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 82, 35, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 34, 23, size, op);
+ sja1105_packing(buf, &entry->port,
+ 22, 19, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 18, 16, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 94, 84, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 83, 83, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 50, 35, size, op);
+ sja1105_packing(buf, &entry->port,
+ 22, 19, size, op);
+ }
+ return size;
+}
+
static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -508,6 +787,22 @@ static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 40, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -523,6 +818,22 @@ size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
+
+ sja1105_packing(buf, &entry->ving_mirr, 95, 85, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 84, 74, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 73, 63, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 62, 52, size, op);
+ sja1105_packing(buf, &entry->tag_port, 51, 41, size, op);
+ sja1105_packing(buf, &entry->type_entry, 40, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -539,6 +850,24 @@ static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 20; i < SJA1110_NUM_PORTS; i++, offset += 4) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ sja1105_packing(buf, &entry->special[i],
+ offset + 3, offset + 3, size, op);
+ }
+ return size;
+}
+
size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -555,6 +884,36 @@ size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 53, size, op);
+ sja1105_packing(buf, &entry->ing_port, 52, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 41, 30, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 29, 18, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 17, 17, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 16, 16, size, op);
+ sja1105_packing(buf, &entry->destports, 15, 5, size, op);
+ return size;
+}
+
+static size_t sja1110_pcp_remapping_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1110_pcp_remapping_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_PCP_REMAPPING_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 8; i < SJA1105_NUM_TC; i++, offset += 3)
+ sja1105_packing(buf, &entry->egrpcp[i],
+ offset + 2, offset + 0, size, op);
+
+ return size;
+}
+
size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -619,6 +978,7 @@ static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
[BLK_IDX_RETAGGING] = BLKID_RETAGGING,
[BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
+ [BLK_IDX_PCP_REMAPPING] = BLKID_PCP_REMAPPING,
};
const char *sja1105_static_config_error_msg[] = {
@@ -657,11 +1017,11 @@ const char *sja1105_static_config_error_msg[] = {
};
static sja1105_config_valid_t
-static_config_check_memory_size(const struct sja1105_table *tables)
+static_config_check_memory_size(const struct sja1105_table *tables, int max_mem)
{
const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
const struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
- int i, max_mem, mem = 0;
+ int i, mem = 0;
l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
@@ -675,9 +1035,7 @@ static_config_check_memory_size(const struct sja1105_table *tables)
}
if (tables[BLK_IDX_RETAGGING].entry_count)
- max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
- else
- max_mem = SJA1105_MAX_FRAME_MEMORY;
+ max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
if (mem > max_mem)
return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
@@ -686,15 +1044,15 @@ static_config_check_memory_size(const struct sja1105_table *tables)
}
sja1105_config_valid_t
-sja1105_static_config_check_valid(const struct sja1105_static_config *config)
+sja1105_static_config_check_valid(const struct sja1105_static_config *config,
+ int max_mem)
{
const struct sja1105_table *tables = config->tables;
#define IS_FULL(blk_idx) \
(tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
if (tables[BLK_IDX_SCHEDULE].entry_count) {
- if (config->device_id != SJA1105T_DEVICE_ID &&
- config->device_id != SJA1105QS_DEVICE_ID)
+ if (!tables[BLK_IDX_SCHEDULE].ops->max_entry_count)
return SJA1105_TTETHERNET_NOT_SUPPORTED;
if (tables[BLK_IDX_SCHEDULE_ENTRY_POINTS].entry_count == 0)
@@ -754,7 +1112,7 @@ sja1105_static_config_check_valid(const struct sja1105_static_config *config)
if (!IS_FULL(BLK_IDX_XMII_PARAMS))
return SJA1105_MISSING_XMII_TABLE;
- return static_config_check_memory_size(tables);
+ return static_config_check_memory_size(tables, max_mem);
#undef IS_FULL
}
@@ -1401,6 +1759,130 @@ const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
},
};
+/* SJA1110A: Third generation */
+const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1110_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1110_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1110_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1110_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1110_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1110_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1110_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1110_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1110_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1110_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1110_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1110_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1110_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1110_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1110_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1110_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1110_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1110_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1110_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+ [BLK_IDX_PCP_REMAPPING] = {
+ .packing = sja1110_pcp_remapping_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1110_pcp_remapping_entry),
+ .packed_entry_size = SJA1110_SIZE_PCP_REMAPPING_ENTRY,
+ .max_entry_count = SJA1110_MAX_PCP_REMAPPING_COUNT,
+ },
+};
+
int sja1105_static_config_init(struct sja1105_static_config *config,
const struct sja1105_table_ops *static_ops,
u64 device_id)
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index bc7606899289..bce0f5c03d0b 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -9,19 +9,30 @@
#include <linux/types.h>
#include <asm/types.h>
+#define SJA1105_NUM_PORTS 5
+#define SJA1110_NUM_PORTS 11
+#define SJA1105_MAX_NUM_PORTS SJA1110_NUM_PORTS
+#define SJA1105_NUM_TC 8
+
+#define SJA1105_SIZE_SPI_MSG_HEADER 4
+#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
#define SJA1105_SIZE_DEVICE_ID 4
#define SJA1105_SIZE_TABLE_HEADER 12
#define SJA1105_SIZE_SCHEDULE_ENTRY 8
+#define SJA1110_SIZE_SCHEDULE_ENTRY 12
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4
+#define SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 8
#define SJA1105_SIZE_VL_LOOKUP_ENTRY 12
#define SJA1105_SIZE_VL_POLICING_ENTRY 8
#define SJA1105_SIZE_VL_FORWARDING_ENTRY 4
#define SJA1105_SIZE_L2_POLICING_ENTRY 8
#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
+#define SJA1110_SIZE_VLAN_LOOKUP_ENTRY 12
#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
#define SJA1105_SIZE_RETAGGING_ENTRY 8
#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
+#define SJA1110_SIZE_XMII_PARAMS_ENTRY 8
#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4
#define SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY 12
@@ -32,11 +43,15 @@
#define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12
#define SJA1105ET_SIZE_CBS_ENTRY 16
#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
+#define SJA1110_SIZE_L2_LOOKUP_ENTRY 24
#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
+#define SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY 28
#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
+#define SJA1110_SIZE_GENERAL_PARAMS_ENTRY 56
#define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16
#define SJA1105PQRS_SIZE_CBS_ENTRY 20
+#define SJA1110_SIZE_PCP_REMAPPING_ENTRY 4
/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
enum {
@@ -59,6 +74,7 @@ enum {
BLKID_GENERAL_PARAMS = 0x11,
BLKID_RETAGGING = 0x12,
BLKID_CBS = 0x13,
+ BLKID_PCP_REMAPPING = 0x1C,
BLKID_XMII_PARAMS = 0x4E,
};
@@ -83,6 +99,7 @@ enum sja1105_blk_idx {
BLK_IDX_RETAGGING,
BLK_IDX_CBS,
BLK_IDX_XMII_PARAMS,
+ BLK_IDX_PCP_REMAPPING,
BLK_IDX_MAX,
/* Fake block indices that are only valid for dynamic access */
BLK_IDX_MGMT_ROUTE,
@@ -91,15 +108,22 @@ enum sja1105_blk_idx {
};
#define SJA1105_MAX_SCHEDULE_COUNT 1024
+#define SJA1110_MAX_SCHEDULE_COUNT 4096
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048
#define SJA1105_MAX_VL_LOOKUP_COUNT 1024
+#define SJA1110_MAX_VL_LOOKUP_COUNT 4096
#define SJA1105_MAX_VL_POLICING_COUNT 1024
+#define SJA1110_MAX_VL_POLICING_COUNT 4096
#define SJA1105_MAX_VL_FORWARDING_COUNT 1024
+#define SJA1110_MAX_VL_FORWARDING_COUNT 4096
#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
#define SJA1105_MAX_L2_POLICING_COUNT 45
+#define SJA1110_MAX_L2_POLICING_COUNT 110
#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
#define SJA1105_MAX_L2_FORWARDING_COUNT 13
+#define SJA1110_MAX_L2_FORWARDING_COUNT 19
#define SJA1105_MAX_MAC_CONFIG_COUNT 5
+#define SJA1110_MAX_MAC_CONFIG_COUNT 11
#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1
#define SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT 1
@@ -111,21 +135,40 @@ enum sja1105_blk_idx {
#define SJA1105_MAX_AVB_PARAMS_COUNT 1
#define SJA1105ET_MAX_CBS_COUNT 10
#define SJA1105PQRS_MAX_CBS_COUNT 16
+#define SJA1110_MAX_CBS_COUNT 80
+#define SJA1110_MAX_PCP_REMAPPING_COUNT 11
#define SJA1105_MAX_FRAME_MEMORY 929
-#define SJA1105_MAX_FRAME_MEMORY_RETAGGING 910
+#define SJA1110_MAX_FRAME_MEMORY 1820
+#define SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD 19
#define SJA1105_VL_FRAME_MEMORY 100
#define SJA1105E_DEVICE_ID 0x9C00000Cull
#define SJA1105T_DEVICE_ID 0x9E00030Eull
#define SJA1105PR_DEVICE_ID 0xAF00030Eull
#define SJA1105QS_DEVICE_ID 0xAE00030Eull
+#define SJA1110_DEVICE_ID 0xB700030Full
#define SJA1105ET_PART_NO 0x9A83
#define SJA1105P_PART_NO 0x9A84
#define SJA1105Q_PART_NO 0x9A85
#define SJA1105R_PART_NO 0x9A86
#define SJA1105S_PART_NO 0x9A87
+#define SJA1110A_PART_NO 0x1110
+#define SJA1110B_PART_NO 0x1111
+#define SJA1110C_PART_NO 0x1112
+#define SJA1110D_PART_NO 0x1113
+
+#define SJA1110_ACU 0x1c4400
+#define SJA1110_RGU 0x1c6000
+#define SJA1110_CGU 0x1c6400
+
+#define SJA1110_SPI_ADDR(x) ((x) / 4)
+#define SJA1110_ACU_ADDR(x) (SJA1110_ACU + SJA1110_SPI_ADDR(x))
+#define SJA1110_CGU_ADDR(x) (SJA1110_CGU + SJA1110_SPI_ADDR(x))
+#define SJA1110_RGU_ADDR(x) (SJA1110_RGU + SJA1110_SPI_ADDR(x))
+
+#define SJA1105_RSV_ADDR 0xffffffffffffffffull
struct sja1105_schedule_entry {
u64 winstindex;
@@ -171,6 +214,10 @@ struct sja1105_general_params_entry {
u64 egrmirrpcp;
u64 egrmirrdei;
u64 replay_port;
+ /* SJA1110 only */
+ u64 tte_en;
+ u64 tdmaconfigidx;
+ u64 header_type;
};
struct sja1105_schedule_entry_points_entry {
@@ -191,6 +238,7 @@ struct sja1105_vlan_lookup_entry {
u64 vlan_bc;
u64 tag_port;
u64 vlanid;
+ u64 type_entry; /* SJA1110 only */
};
struct sja1105_l2_lookup_entry {
@@ -203,11 +251,17 @@ struct sja1105_l2_lookup_entry {
u64 mask_iotag;
u64 mask_vlanid;
u64 mask_macaddr;
+ u64 mask_srcport;
u64 iotag;
+ u64 srcport;
u64 lockeds;
union {
/* LOCKEDS=1: Static FDB entries */
struct {
+ /* TSREG is deprecated in SJA1110, TRAP is supported only
+ * in SJA1110.
+ */
+ u64 trap;
u64 tsreg;
u64 mirrvlan;
u64 takets;
@@ -223,7 +277,7 @@ struct sja1105_l2_lookup_entry {
};
struct sja1105_l2_lookup_params_entry {
- u64 maxaddrp[5]; /* P/Q/R/S only */
+ u64 maxaddrp[SJA1105_MAX_NUM_PORTS]; /* P/Q/R/S only */
u64 start_dynspc; /* P/Q/R/S only */
u64 drpnolearn; /* P/Q/R/S only */
u64 use_static; /* P/Q/R/S only */
@@ -241,7 +295,9 @@ struct sja1105_l2_forwarding_entry {
u64 bc_domain;
u64 reach_port;
u64 fl_domain;
- u64 vlan_pmap[8];
+ /* This is actually max(SJA1105_NUM_TC, SJA1105_MAX_NUM_PORTS) */
+ u64 vlan_pmap[SJA1105_MAX_NUM_PORTS];
+ bool type_egrpcp2outputq;
};
struct sja1105_l2_forwarding_params_entry {
@@ -296,8 +352,8 @@ struct sja1105_retagging_entry {
};
struct sja1105_cbs_entry {
- u64 port;
- u64 prio;
+ u64 port; /* Not used for SJA1110 */
+ u64 prio; /* Not used for SJA1110 */
u64 credit_hi;
u64 credit_lo;
u64 send_slope;
@@ -305,8 +361,19 @@ struct sja1105_cbs_entry {
};
struct sja1105_xmii_params_entry {
- u64 phy_mac[5];
- u64 xmii_mode[5];
+ u64 phy_mac[SJA1105_MAX_NUM_PORTS];
+ u64 xmii_mode[SJA1105_MAX_NUM_PORTS];
+ /* The SJA1110 insists being a snowflake, and requires SGMII,
+ * 2500base-x and internal MII ports connected to the 100base-TX PHY to
+ * set this bit. We set it unconditionally from the high-level logic,
+ * and only sja1110_xmii_params_entry_packing writes it to the static
+ * config. I have no better name for it than "special".
+ */
+ u64 special[SJA1105_MAX_NUM_PORTS];
+};
+
+struct sja1110_pcp_remapping_entry {
+ u64 egrpcp[SJA1105_NUM_TC];
};
enum {
@@ -387,6 +454,7 @@ extern const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
extern const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
extern const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
extern const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX];
size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op);
void
@@ -412,7 +480,8 @@ typedef enum {
extern const char *sja1105_static_config_error_msg[];
sja1105_config_valid_t
-sja1105_static_config_check_valid(const struct sja1105_static_config *config);
+sja1105_static_config_check_valid(const struct sja1105_static_config *config,
+ int max_mem);
void
sja1105_static_config_pack(void *buf, struct sja1105_static_config *config);
int sja1105_static_config_init(struct sja1105_static_config *config,
@@ -433,23 +502,47 @@ void sja1105_packing(void *buf, u64 *val, int start, int end,
/* Common implementations for the static and dynamic configs */
size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 31d8acff1f01..e6153848a950 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -27,7 +27,7 @@ static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
tas_data->enabled = false;
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
const struct tc_taprio_qopt_offload *offload;
offload = tas_data->offload[port];
@@ -164,6 +164,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
struct sja1105_tas_data *tas_data = &priv->tas_data;
struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
struct sja1105_schedule_entry *schedule;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int schedule_start_idx;
s64 entry_point_delta;
@@ -207,7 +208,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
}
/* Figure out the dimensioning of the problem */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (tas_data->offload[port]) {
num_entries += tas_data->offload[port]->num_entries;
num_cycles++;
@@ -269,7 +270,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
schedule_entry_points_params->actsubsch = num_cycles - 1;
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
const struct tc_taprio_qopt_offload *offload;
/* Relative base time */
s64 rbt;
@@ -468,6 +469,7 @@ bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
size_t num_entries = gating_cfg->num_entries;
struct tc_taprio_qopt_offload *dummy;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_gate_entry *e;
bool conflict;
int i = 0;
@@ -491,7 +493,7 @@ bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
if (port != -1) {
conflict = sja1105_tas_check_conflicts(priv, port, dummy);
} else {
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
conflict = sja1105_tas_check_conflicts(priv, port,
dummy);
if (conflict)
@@ -554,7 +556,7 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
}
}
- for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) {
+ for (other_port = 0; other_port < ds->num_ports; other_port++) {
if (other_port == port)
continue;
@@ -885,7 +887,7 @@ void sja1105_tas_teardown(struct dsa_switch *ds)
cancel_work_sync(&priv->tas_data.tas_work);
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
offload = priv->tas_data.offload[port];
if (!offload)
continue;
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index 0c173ff51751..c05bd07e8221 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -39,7 +39,7 @@ struct sja1105_gating_config {
};
struct sja1105_tas_data {
- struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
+ struct tc_taprio_qopt_offload *offload[SJA1105_MAX_NUM_PORTS];
struct sja1105_gating_config gating_cfg;
enum sja1105_tas_state state;
enum sja1105_ptp_op last_op;
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index ffc4042b4502..f6e13e6c6a18 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -386,7 +386,7 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
if (rule->type != SJA1105_RULE_VL)
continue;
- for_each_set_bit(port, &rule->port_mask, SJA1105_NUM_PORTS) {
+ for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
vl_lookup[k].port = port;
vl_lookup[k].macaddr = rule->key.vl.dmac;
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index fde6e99274b6..130abb0f1438 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -79,6 +79,9 @@ static const struct xrs700x_mib xrs700x_mibs[] = {
XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
};
+static const u8 eth_hsrsup_addr[ETH_ALEN] = {
+ 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00};
+
static void xrs700x_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -329,6 +332,54 @@ static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
return 0;
}
+/* Add an inbound policy filter which matches the HSR/PRP supervision MAC
+ * range and forwards to the CPU port without discarding duplicates.
+ * This is required to correctly populate the HSR/PRP node_table.
+ * Leave the policy disabled, it will be enabled as needed.
+ */
+static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch *ds, int port,
+ int fwdport)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int i = 0;
+ int ret;
+
+ /* Compare 40 bits of the destination MAC address. */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 40 << 2);
+ if (ret)
+ return ret;
+
+ /* match HSR/PRP supervision destination 01:15:4e:00:01:XX */
+ for (i = 0; i < sizeof(eth_hsrsup_addr); i += 2) {
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 1) + i,
+ eth_hsrsup_addr[i] |
+ (eth_hsrsup_addr[i + 1] << 8));
+ if (ret)
+ return ret;
+ }
+
+ /* Mirror HSR/PRP supervision to CPU port */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 1), val);
+ if (ret)
+ return ret;
+
+ if (fwdport >= 0)
+ val |= BIT(fwdport);
+
+ /* Allow must be set prevent duplicate discard */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 1), val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int xrs700x_port_setup(struct dsa_switch *ds, int port)
{
bool cpu_port = dsa_is_cpu_port(ds, port);
@@ -511,6 +562,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
struct net_device *slave;
int ret, i, hsr_pair[2];
enum hsr_version ver;
+ bool fwd = false;
ret = hsr_get_version(hsr, &ver);
if (ret)
@@ -556,6 +608,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
if (ver == HSR_V1) {
val &= ~BIT(partner->index);
val &= ~BIT(port);
+ fwd = true;
}
val &= ~BIT(dsa_upstream_port(ds, port));
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
@@ -565,6 +618,23 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
XRS_PORT_FORWARDING);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
+ /* Enable inbound policy which allows HSR/PRP supervision forwarding
+ * to the CPU port without discarding duplicates. Continue to
+ * forward to redundant ports when in HSR mode while discarding
+ * duplicates.
+ */
+ ret = xrs700x_port_add_hsrsup_ipf(ds, partner->index, fwd ? port : -1);
+ if (ret)
+ return ret;
+
+ ret = xrs700x_port_add_hsrsup_ipf(ds, port, fwd ? partner->index : -1);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->regmap,
+ XRS_ETH_ADDR_CFG(partner->index, 1), 1, 1);
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 1);
+
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
@@ -611,6 +681,14 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
XRS_PORT_FORWARDING);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
+ /* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf()
+ * which allows HSR/PRP supervision forwarding to the CPU port without
+ * discarding duplicates.
+ */
+ regmap_update_bits(priv->regmap,
+ XRS_ETH_ADDR_CFG(partner->index, 1), 1, 0);
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 0);
+
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 741c67e546d4..7d7d3ffe25c3 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1464,7 +1464,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if (pdev) {
vp->pm_state_valid = 1;
pci_save_state(pdev);
- acpi_set_WOL(dev);
+ acpi_set_WOL(dev);
}
retval = register_netdev(dev);
if (retval == 0)
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 2488bfdb9133..8c321dfc7b3b 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -767,7 +767,7 @@ module_pcmcia_driver(axnet_cs_driver);
Paul Gortmaker : tweak ANK's above multicast changes a bit.
Paul Gortmaker : update packet statistics for v2.1.x
Alan Cox : support arbitrary stupid port mappings on the
- 68K Macintosh. Support >16bit I/O spaces
+ 68K Macintosh. Support >16bit I/O spaces
Paul Gortmaker : add kmod support for auto-loading of the 8390
module by all drivers that require it.
Alan Cox : Spinlocking work, added 'BUG_83C690'
@@ -1091,7 +1091,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
long e8390_base;
int interrupts, nr_serviced = 0, i;
struct ei_device *ei_local;
- int handled = 0;
+ int handled = 0;
unsigned long flags;
e8390_base = dev->base_addr;
@@ -1587,12 +1587,12 @@ static void do_set_multicast_list(struct net_device *dev)
}
outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
- if(dev->flags&IFF_PROMISC)
- outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
+ if(dev->flags&IFF_PROMISC)
+ outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
- outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
- else
- outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
+ outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
+ else
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
}
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9d3b1e0e425c..cac036706382 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1527,7 +1527,7 @@ static const struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2216-PCMCIA-ETHERNET", 0xdfc6b5b2, 0x5542bfff),
PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA100-PCM-T V2 100/10M LAN PC Card", 0xbb7fbdd7, 0xcd91cc68),
PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA100-PCM V2", 0x36634a66, 0xc6d05997),
- PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA-PCM_V2", 0xbb7fBdd7, 0x28e299f8),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA-PCM_V2", 0xbb7fBdd7, 0x28e299f8),
PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA-PCM V3", 0x36634a66, 0x62241d96),
PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8010", 0x5070a7f9, 0x82f96e96),
PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8610", 0x5070a7f9, 0x86741224),
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 3fe3b4dfa7c5..1d8ed7357b7f 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -347,11 +347,11 @@ static int __init ultra_probe_isapnp(struct net_device *dev)
idev))) {
/* Avoid already found cards from previous calls */
if (pnp_device_attach(idev) < 0)
- continue;
+ continue;
if (pnp_activate_dev(idev) < 0) {
__again:
- pnp_device_detach(idev);
- continue;
+ pnp_device_detach(idev);
+ continue;
}
/* if no io and irq, search for next */
if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0))
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 1f0670cd3ea3..fbbd7f22c142 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -114,7 +114,7 @@ static int __init stnic_probe(void)
/* New style probing API */
dev = alloc_ei_netdev();
if (!dev)
- return -ENOMEM;
+ return -ENOMEM;
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index d77fafbc1530..c560ad06f0be 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1539,10 +1539,11 @@ static int greth_of_remove(struct platform_device *of_dev)
mdiobus_unregister(greth->mdio);
unregister_netdev(ndev);
- free_netdev(ndev);
of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
+ free_netdev(ndev);
+
return 0;
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 1a7e4df9b3e9..9dc12b13061f 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1883,16 +1883,16 @@ static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
}
}
- if (ACE_IS_TIGON_I(ap)) {
- struct cmd cmd;
- cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
- cmd.code = 0;
- cmd.idx = 0;
- ace_issue_cmd(ap->regs, &cmd);
- } else {
- writel(0, &((ap->regs)->RxJumboPrd));
- wmb();
- }
+ if (ACE_IS_TIGON_I(ap)) {
+ struct cmd cmd;
+ cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(ap->regs, &cmd);
+ } else {
+ writel(0, &((ap->regs)->RxJumboPrd));
+ wmb();
+ }
ap->jumbo = 0;
ap->rx_jumbo_skbprd = 0;
@@ -2489,9 +2489,9 @@ restart:
}
}
- wmb();
- ap->tx_prd = idx;
- ace_set_txprd(regs, ap, idx);
+ wmb();
+ ap->tx_prd = idx;
+ ace_set_txprd(regs, ap, idx);
if (flagsize & BD_FLG_COAL_NOW) {
netif_stop_queue(dev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 4164eacc5c28..f5ec35fa4c63 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -1042,8 +1042,6 @@ enum ena_admin_aenq_group {
};
enum ena_admin_aenq_notification_syndrome {
- ENA_ADMIN_SUSPEND = 0,
- ENA_ADMIN_RESUME = 1,
ENA_ADMIN_UPDATE_HINTS = 2,
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 764852ead1d6..ab413fc1f68e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1979,7 +1979,8 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ if (get_resp.u.max_queue_ext.version !=
+ ENA_FEATURE_MAX_QUEUE_EXT_VER)
return -EINVAL;
memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index c3be751e7379..3d6f0a466a9e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -151,11 +151,14 @@ static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
return 0;
/* bounce buffer was used, so write it and get a new one */
- if (pkt_ctrl->idx) {
+ if (likely(pkt_ctrl->idx)) {
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to write bounce buffer to device\n");
return rc;
+ }
pkt_ctrl->curr_bounce_buf =
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
@@ -185,8 +188,11 @@ static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
if (!pkt_ctrl->descs_left_in_line) {
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to write bounce buffer to device\n");
return rc;
+ }
pkt_ctrl->curr_bounce_buf =
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
@@ -406,8 +412,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
}
if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
- !buffer_to_push))
+ !buffer_to_push)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Push header wasn't provided in LLQ mode\n");
return -EINVAL;
+ }
rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
if (unlikely(rc))
@@ -423,6 +432,9 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* If the caller doesn't want to send packets */
if (unlikely(!num_bufs && !header_len)) {
rc = ena_com_close_bounce_buffer(io_sq);
+ if (rc)
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to write buffers to LLQ\n");
*nb_hw_desc = io_sq->tail - start_tail;
return rc;
}
@@ -482,8 +494,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* The first desc share the same desc as the header */
if (likely(i != 0)) {
rc = ena_com_sq_update_tail(io_sq);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to update sq tail\n");
return rc;
+ }
desc = get_sq_desc(io_sq);
if (unlikely(!desc))
@@ -512,8 +527,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
rc = ena_com_sq_update_tail(io_sq);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to update sq tail of the last descriptor\n");
return rc;
+ }
rc = ena_com_close_bounce_buffer(io_sq);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 2fe7ccee55b2..27dae632efcb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -233,10 +233,13 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- if (sset != ETH_SS_STATS)
- return -EOPNOTSUPP;
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ena_get_sw_stats_count(adapter) +
+ ena_get_hw_stats_count(adapter);
+ }
- return ena_get_sw_stats_count(adapter) + ena_get_hw_stats_count(adapter);
+ return -EOPNOTSUPP;
}
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
@@ -314,10 +317,11 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
- if (sset != ETH_SS_STATS)
- return;
-
- ena_get_strings(adapter, data, adapter->eni_stats_supported);
+ switch (sset) {
+ case ETH_SS_STATS:
+ ena_get_strings(adapter, data, adapter->eni_stats_supported);
+ break;
+ }
}
static int ena_get_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 881f88754bf6..0e43000614ab 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -35,9 +35,6 @@ MODULE_LICENSE("GPL");
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static struct ena_aenq_handlers aenq_handlers;
@@ -89,6 +86,12 @@ static void ena_increase_stat(u64 *statp, u64 cnt,
u64_stats_update_end(syncp);
}
+static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
+{
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
+}
+
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -147,7 +150,7 @@ static int ena_xmit_common(struct net_device *dev,
netif_dbg(adapter, tx_queued, dev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid);
- ena_com_write_sq_doorbell(ring->ena_com_io_sq);
+ ena_ring_tx_doorbell(ring);
}
/* prepare the packet's descriptors to dma engine */
@@ -197,7 +200,6 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
int ret;
xdp_ring = ena_napi->xdp_ring;
- xdp_ring->first_interrupt = ena_napi->first_interrupt;
xdp_budget = budget;
@@ -229,6 +231,7 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
xdp_ring->tx_stats.napi_comp += napi_comp_call;
xdp_ring->tx_stats.tx_poll++;
u64_stats_update_end(&xdp_ring->syncp);
+ xdp_ring->tx_stats.last_napi_jiffies = jiffies;
return ret;
}
@@ -236,36 +239,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
struct ena_tx_buffer *tx_info,
struct xdp_frame *xdpf,
- void **push_hdr,
- u32 *push_len)
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_adapter *adapter = xdp_ring->adapter;
struct ena_com_buf *ena_buf;
- dma_addr_t dma = 0;
+ int push_len = 0;
+ dma_addr_t dma;
+ void *data;
u32 size;
tx_info->xdpf = xdpf;
+ data = tx_info->xdpf->data;
size = tx_info->xdpf->len;
- ena_buf = tx_info->bufs;
- /* llq push buffer */
- *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
- *push_hdr = tx_info->xdpf->data;
+ if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Designate part of the packet for LLQ */
+ push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+
+ ena_tx_ctx->push_header = data;
- if (size - *push_len > 0) {
+ size -= push_len;
+ data += push_len;
+ }
+
+ ena_tx_ctx->header_len = push_len;
+
+ if (size > 0) {
dma = dma_map_single(xdp_ring->dev,
- *push_hdr + *push_len,
- size - *push_len,
+ data,
+ size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
goto error_report_dma_error;
- tx_info->map_linear_data = 1;
- tx_info->num_of_bufs = 1;
- }
+ tx_info->map_linear_data = 0;
- ena_buf->paddr = dma;
- ena_buf->len = size;
+ ena_buf = tx_info->bufs;
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ ena_tx_ctx->ena_bufs = ena_buf;
+ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+ }
return 0;
@@ -274,10 +289,6 @@ error_report_dma_error:
&xdp_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
- xdp_return_frame_rx_napi(tx_info->xdpf);
- tx_info->xdpf = NULL;
- tx_info->num_of_bufs = 0;
-
return -EINVAL;
}
@@ -289,8 +300,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
struct ena_com_tx_ctx ena_tx_ctx = {};
struct ena_tx_buffer *tx_info;
u16 next_to_use, req_id;
- void *push_hdr;
- u32 push_len;
int rc;
next_to_use = xdp_ring->next_to_use;
@@ -298,15 +307,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
+ rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
return rc;
- ena_tx_ctx.ena_bufs = tx_info->bufs;
- ena_tx_ctx.push_header = push_hdr;
- ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
ena_tx_ctx.req_id = req_id;
- ena_tx_ctx.header_len = push_len;
rc = ena_xmit_common(dev,
xdp_ring,
@@ -316,14 +321,12 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
xdpf->len);
if (rc)
goto error_unmap_dma;
- /* trigger the dma engine. ena_com_write_sq_doorbell()
- * has a mb
+
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
*/
- if (flags & XDP_XMIT_FLUSH) {
- ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
- ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
- &xdp_ring->syncp);
- }
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(xdp_ring);
return rc;
@@ -364,11 +367,8 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
}
/* Ring doorbell to make device aware of the packets */
- if (flags & XDP_XMIT_FLUSH) {
- ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
- ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
- &xdp_ring->syncp);
- }
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(xdp_ring);
spin_unlock(&xdp_ring->xdp_tx_lock);
@@ -383,9 +383,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
u32 verdict = XDP_PASS;
struct xdp_frame *xdpf;
u64 *xdp_stat;
- int qid;
- rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
if (!xdp_prog)
@@ -404,8 +402,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
}
/* Find xmit queue */
- qid = rx_ring->qid + rx_ring->adapter->num_io_queues;
- xdp_ring = &rx_ring->adapter->tx_ring[qid];
+ xdp_ring = rx_ring->xdp_ring;
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
spin_lock(&xdp_ring->xdp_tx_lock);
@@ -443,8 +440,6 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
out:
- rcu_read_unlock();
-
return verdict;
}
@@ -532,7 +527,7 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
} else {
ena_xdp_unregister_rxq_info(rx_ring);
- rx_ring->rx_headroom = 0;
+ rx_ring->rx_headroom = NET_SKB_PAD;
}
}
}
@@ -681,7 +676,6 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0;
ring->cpu = 0;
- ring->first_interrupt = false;
ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
@@ -724,7 +718,9 @@ static void ena_init_io_rings(struct ena_adapter *adapter,
rxr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
rxr->empty_rx_queue = 0;
+ rxr->rx_headroom = NET_SKB_PAD;
adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues];
}
}
}
@@ -978,47 +974,66 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
ena_free_rx_resources(adapter, i);
}
-static int ena_alloc_rx_page(struct ena_ring *rx_ring,
- struct ena_rx_buffer *rx_info, gfp_t gfp)
+static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
+ dma_addr_t *dma)
{
- int headroom = rx_ring->rx_headroom;
- struct ena_com_buf *ena_buf;
struct page *page;
- dma_addr_t dma;
- /* restore page offset value in case it has been changed by device */
- rx_info->page_offset = headroom;
-
- /* if previous allocated page is not used */
- if (unlikely(rx_info->page))
- return 0;
-
- page = alloc_page(gfp);
- if (unlikely(!page)) {
+ /* This would allocate the page on the same NUMA node the executing code
+ * is running on.
+ */
+ page = dev_alloc_page();
+ if (!page) {
ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
&rx_ring->syncp);
- return -ENOMEM;
+ return ERR_PTR(-ENOSPC);
}
/* To enable NIC-side port-mirroring, AKA SPAN port,
* we make the buffer readable from the nic as well
*/
- dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
+ *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
&rx_ring->syncp);
-
__free_page(page);
- return -EIO;
+ return ERR_PTR(-EIO);
}
+
+ return page;
+}
+
+static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info)
+{
+ int headroom = rx_ring->rx_headroom;
+ struct ena_com_buf *ena_buf;
+ struct page *page;
+ dma_addr_t dma;
+ int tailroom;
+
+ /* restore page offset value in case it has been changed by device */
+ rx_info->page_offset = headroom;
+
+ /* if previous allocated page is not used */
+ if (unlikely(rx_info->page))
+ return 0;
+
+ /* We handle DMA here */
+ page = ena_alloc_map_page(rx_ring, &dma);
+ if (unlikely(IS_ERR(page)))
+ return PTR_ERR(page);
+
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"Allocate page %p, rx_info %p\n", page, rx_info);
+ tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
rx_info->page = page;
ena_buf = &rx_info->ena_buf;
ena_buf->paddr = dma + headroom;
- ena_buf->len = ENA_PAGE_SIZE - headroom;
+ ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
return 0;
}
@@ -1065,8 +1080,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rx_info = &rx_ring->rx_buffer_info[req_id];
- rc = ena_alloc_rx_page(rx_ring, rx_info,
- GFP_ATOMIC | __GFP_COMP);
+ rc = ena_alloc_rx_buffer(rx_ring, rx_info);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate buffer for rx queue %d\n",
@@ -1384,21 +1398,23 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
return tx_pkts;
}
-static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
+static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
{
struct sk_buff *skb;
- if (frags)
- skb = napi_get_frags(rx_ring->napi);
- else
+ if (!first_frag)
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_copybreak);
+ else
+ skb = build_skb(first_frag, ENA_PAGE_SIZE);
if (unlikely(!skb)) {
ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
&rx_ring->syncp);
+
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Failed to allocate skb. frags: %d\n", frags);
+ "Failed to allocate skb. first_frag %s\n",
+ first_frag ? "provided" : "not provided");
return NULL;
}
@@ -1410,10 +1426,12 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
u32 descs,
u16 *next_to_clean)
{
- struct sk_buff *skb;
struct ena_rx_buffer *rx_info;
u16 len, req_id, buf = 0;
- void *va;
+ struct sk_buff *skb;
+ void *page_addr;
+ u32 page_offset;
+ void *data_addr;
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
@@ -1431,12 +1449,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info, rx_info->page);
/* save virt address of first buffer */
- va = page_address(rx_info->page) + rx_info->page_offset;
+ page_addr = page_address(rx_info->page);
+ page_offset = rx_info->page_offset;
+ data_addr = page_addr + page_offset;
- prefetch(va);
+ prefetch(data_addr);
if (len <= rx_ring->rx_copybreak) {
- skb = ena_alloc_skb(rx_ring, false);
+ skb = ena_alloc_skb(rx_ring, NULL);
if (unlikely(!skb))
return NULL;
@@ -1449,7 +1469,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
dma_unmap_addr(&rx_info->ena_buf, paddr),
len,
DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, va, len);
+ skb_copy_to_linear_data(skb, data_addr, len);
dma_sync_single_for_device(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr),
len,
@@ -1463,16 +1483,18 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
return skb;
}
- skb = ena_alloc_skb(rx_ring, true);
+ ena_unmap_rx_buff(rx_ring, rx_info);
+
+ skb = ena_alloc_skb(rx_ring, page_addr);
if (unlikely(!skb))
return NULL;
- do {
- ena_unmap_rx_buff(rx_ring, rx_info);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
- rx_info->page_offset, len, ENA_PAGE_SIZE);
+ /* Populate skb's linear part */
+ skb_reserve(skb, page_offset);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ do {
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"RX skb updated. len %d. data_len %d\n",
skb->len, skb->data_len);
@@ -1491,6 +1513,12 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id];
+
+ ena_unmap_rx_buff(rx_ring, rx_info);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+ rx_info->page_offset, len, ENA_PAGE_SIZE);
+
} while (1);
return skb;
@@ -1703,14 +1731,12 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
skb_record_rx_queue(skb, rx_ring->qid);
- if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
- total_len += rx_ring->ena_bufs[0].len;
+ if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak)
rx_copybreak_pkt++;
- napi_gro_receive(napi, skb);
- } else {
- total_len += skb->len;
- napi_gro_frags(napi);
- }
+
+ total_len += skb->len;
+
+ napi_gro_receive(napi, skb);
res_budget--;
} while (likely(res_budget));
@@ -1922,9 +1948,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_ring = ena_napi->tx_ring;
rx_ring = ena_napi->rx_ring;
- tx_ring->first_interrupt = ena_napi->first_interrupt;
- rx_ring->first_interrupt = ena_napi->first_interrupt;
-
tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
@@ -1979,6 +2002,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_ring->tx_stats.tx_poll++;
u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->tx_stats.last_napi_jiffies = jiffies;
+
return ret;
}
@@ -2003,7 +2028,8 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
- ena_napi->first_interrupt = true;
+ /* Used to check HW health */
+ WRITE_ONCE(ena_napi->first_interrupt, true);
WRITE_ONCE(ena_napi->interrupts_masked, true);
smp_wmb(); /* write interrupts_masked before calling napi */
@@ -3089,14 +3115,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
- /* trigger the dma engine. ena_com_write_sq_doorbell()
- * has a mb
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
*/
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- ena_increase_stat(&tx_ring->tx_stats.doorbells, 1,
- &tx_ring->syncp);
- }
+ ena_ring_tx_doorbell(tx_ring);
return NETDEV_TX_OK;
@@ -3346,7 +3369,7 @@ static int ena_set_queues_placement_policy(struct pci_dev *pdev,
llq_feature_mask = 1 << ENA_ADMIN_LLQ;
if (!(ena_dev->supported_features & llq_feature_mask)) {
- dev_err(&pdev->dev,
+ dev_warn(&pdev->dev,
"LLQ is not supported Fallback to host mode policy.\n");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
return 0;
@@ -3657,7 +3680,9 @@ static void ena_fw_reset_device(struct work_struct *work)
static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
struct ena_ring *rx_ring)
{
- if (likely(rx_ring->first_interrupt))
+ struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi);
+
+ if (likely(READ_ONCE(ena_napi->first_interrupt)))
return 0;
if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
@@ -3681,6 +3706,10 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
+ struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
+ unsigned int time_since_last_napi;
+ unsigned int missing_tx_comp_to;
+ bool is_tx_comp_time_expired;
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
u32 missed_tx = 0;
@@ -3694,8 +3723,10 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
/* no pending Tx at this location */
continue;
- if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
- 2 * adapter->missing_tx_completion_to))) {
+ is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to);
+
+ if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) {
/* If after graceful period interrupt is still not
* received, we schedule a reset
*/
@@ -3708,12 +3739,17 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
return -EIO;
}
- if (unlikely(time_is_before_jiffies(last_jiffies +
- adapter->missing_tx_completion_to))) {
- if (!tx_buf->print_once)
+ is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
+ adapter->missing_tx_completion_to);
+
+ if (unlikely(is_tx_comp_time_expired)) {
+ if (!tx_buf->print_once) {
+ time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
netif_notice(adapter, tx_err, adapter->netdev,
- "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
- tx_ring->qid, i);
+ "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
+ tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
+ }
tx_buf->print_once = 1;
missed_tx++;
@@ -4244,7 +4280,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ena_dev = ena_dev;
adapter->netdev = netdev;
adapter->pdev = pdev;
- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ adapter->msg_enable = DEFAULT_MSG_ENABLE;
ena_dev->net_device = netdev;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 74af15d62ee1..0c39fc2fa345 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -55,12 +55,6 @@
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
-/* limit the buffer size to 600 bytes to handle MTU changes from very
- * small to very large, in which case the number of buffers per packet
- * could exceed ENA_PKT_MAX_BUFS
- */
-#define ENA_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600
-
#define ENA_MIN_MTU 128
#define ENA_NAME_MAX_LEN 20
@@ -135,12 +129,12 @@ struct ena_irq {
};
struct ena_napi {
- struct napi_struct napi ____cacheline_aligned;
+ u8 first_interrupt ____cacheline_aligned;
+ u8 interrupts_masked;
+ struct napi_struct napi;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
struct ena_ring *xdp_ring;
- bool first_interrupt;
- bool interrupts_masked;
u32 qid;
struct dim dim;
};
@@ -212,6 +206,7 @@ struct ena_stats_tx {
u64 llq_buffer_copy;
u64 missed_tx;
u64 unmask_interrupt;
+ u64 last_napi_jiffies;
};
struct ena_stats_rx {
@@ -259,6 +254,10 @@ struct ena_ring {
struct bpf_prog *xdp_bpf_prog;
struct xdp_rxq_info xdp_rxq;
spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */
+ /* Used for rx queues only to point to the xdp tx ring, to
+ * which traffic should be redirected from this rx ring.
+ */
+ struct ena_ring *xdp_ring;
u16 next_to_use;
u16 next_to_clean;
@@ -271,7 +270,6 @@ struct ena_ring {
/* The maximum header length the device can handle */
u8 tx_max_header_size;
- bool first_interrupt;
bool disable_meta_caching;
u16 no_interrupt_event_cnt;
@@ -414,11 +412,6 @@ enum ena_xdp_errors_t {
ENA_XDP_NO_ENOUGH_QUEUES,
};
-static inline bool ena_xdp_queues_present(struct ena_adapter *adapter)
-{
- return adapter->xdp_first_ring != 0;
-}
-
static inline bool ena_xdp_present(struct ena_adapter *adapter)
{
return !!adapter->xdp_bpf_prog;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 4a1220cc6f10..9cac5aa75a73 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -19,14 +19,14 @@ Module Name:
Abstract:
- AMD8111 based 10/100 Ethernet Controller Driver.
+ AMD8111 based 10/100 Ethernet Controller Driver.
Environment:
Kernel Mode
Revision History:
- 3.0.0
+ 3.0.0
Initial Revision.
3.0.1
1. Dynamic interrupt coalescing.
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 493f154eccf4..37da79da5f5e 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -10,14 +10,14 @@ Module Name:
Abstract:
- AMD8111 based 10/100 Ethernet Controller driver definitions.
+ AMD8111 based 10/100 Ethernet Controller driver definitions.
Environment:
Kernel Mode
Revision History:
- 3.0.0
+ 3.0.0
Initial Revision.
3.0.1
*/
@@ -692,7 +692,7 @@ enum coal_type{
};
enum coal_mode{
- RX_INTR_COAL,
+ RX_INTR_COAL,
TX_INTR_COAL,
DISABLE_COAL,
ENABLE_COAL,
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index c1eab916438f..36f54d13a2eb 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -706,7 +706,7 @@ static void lance_init_ring( struct net_device *dev )
CHECK_OFFSET(offset);
MEM->tx_head[i].base = offset;
MEM->tx_head[i].flag = TMD1_OWN_HOST;
- MEM->tx_head[i].base_hi = 0;
+ MEM->tx_head[i].base_hi = 0;
MEM->tx_head[i].length = 0;
MEM->tx_head[i].misc = 0;
offset += PKT_BUF_SZ;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 7282ce55ffb8..493b0cefcc2a 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -937,7 +937,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
static void lance_load_multicast(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index aff44241988c..2178e6b89dbd 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -780,7 +780,7 @@ lance_open(struct net_device *dev)
outw(0x0002, ioaddr+LANCE_ADDR);
/* Only touch autoselect bit. */
outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
- }
+ }
if (lance_debug > 1)
printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
@@ -812,7 +812,7 @@ lance_open(struct net_device *dev)
* We used to clear the InitDone bit, 0x0100, here but Mark Stockton
* reports that doing so triggers a bug in the '974.
*/
- outw(0x0042, ioaddr+LANCE_DATA);
+ outw(0x0042, ioaddr+LANCE_DATA);
if (lance_debug > 2)
printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index c38edf6f03a3..5c1cfb0c4a42 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -193,7 +193,7 @@ static struct card {
.vendor_id = ni_vendor,
.cardname = "ni6510",
.config = 0x1,
- },
+ },
{
.id0 = NI65_EB_ID0,
.id1 = NI65_EB_ID1,
@@ -204,7 +204,7 @@ static struct card {
.vendor_id = ni_vendor,
.cardname = "ni6510 EtherBlaster",
.config = 0x2,
- },
+ },
{
.id0 = NE2100_ID0,
.id1 = NE2100_ID1,
@@ -1232,15 +1232,15 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int __init init_module(void)
{
- dev_ni65 = ni65_probe(-1);
+ dev_ni65 = ni65_probe(-1);
return PTR_ERR_OR_ZERO(dev_ni65);
}
void __exit cleanup_module(void)
{
- unregister_netdev(dev_ni65);
- cleanup_card(dev_ni65);
- free_netdev(dev_ni65);
+ unregister_netdev(dev_ni65);
+ cleanup_card(dev_ni65);
+ free_netdev(dev_ni65);
}
#endif /* MODULE */
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 11c0b13edd30..4019cab87505 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -541,7 +541,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
if(++ct > 500)
{
pr_err("reset failed, card removed?\n");
- return -1;
+ return -1;
}
udelay(1);
}
@@ -585,11 +585,11 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
ct = 0;
while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
{
- if(++ ct > 500)
- {
+ if(++ ct > 500)
+ {
pr_err("ADDRCHG timeout, card removed?\n");
- return -1;
- }
+ return -1;
+ }
}
/* Set PADR register */
for (i = 0; i < ETH_ALEN; i++)
@@ -655,7 +655,7 @@ static int nmclan_config(struct pcmcia_device *link)
}
if(mace_init(lp, ioaddr, dev->dev_addr) == -1)
- goto failed;
+ goto failed;
/* The if_port symbol can be set when the module is loaded */
if (if_port <= 2)
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 00ae1081254d..f8d7a9387a56 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -150,7 +150,7 @@ struct lance_memory {
struct lance_private {
volatile unsigned short *iobase;
struct lance_memory *mem;
- int new_rx, new_tx; /* The next free ring entry */
+ int new_rx, new_tx; /* The next free ring entry */
int old_tx, old_rx; /* ring entry to be processed */
/* These two must be longs for set_bit() */
long tx_full;
@@ -465,7 +465,7 @@ static void lance_init_ring( struct net_device *dev )
for( i = 0; i < TX_RING_SIZE; i++ ) {
MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]);
MEM->tx_head[i].flag = 0;
- MEM->tx_head[i].base_hi =
+ MEM->tx_head[i].base_hi =
(dvma_vtob(MEM->tx_data[i])) >>16;
MEM->tx_head[i].length = 0;
MEM->tx_head[i].misc = 0;
@@ -581,8 +581,8 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
AREG = CSR0;
- DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
- dev->name, DREG ));
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
#ifdef CONFIG_SUN3X
/* this weirdness doesn't appear on sun3... */
@@ -636,8 +636,8 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Trigger an immediate send poll. */
REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT;
AREG = CSR0;
- DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
- dev->name, DREG ));
+ DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
+ dev->name, DREG ));
dev_kfree_skb(skb);
lp->lock = 0;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 1e4e402f07d7..a989d2df59ad 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -477,26 +477,26 @@ static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
config = bmread(dev, RXCFG);
bmwrite(dev, RXCFG, (config & ~RxMACEnable));
config = bmread(dev, TXCFG);
- bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
- /* disable rx and tx dma */
+ /* disable rx and tx dma */
rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
- /* free some skb's */
- for (i=0; i<N_RX_RING; i++) {
- if (bp->rx_bufs[i] != NULL) {
- dev_kfree_skb(bp->rx_bufs[i]);
- bp->rx_bufs[i] = NULL;
- }
- }
- for (i = 0; i<N_TX_RING; i++) {
+ /* free some skb's */
+ for (i=0; i<N_RX_RING; i++) {
+ if (bp->rx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->rx_bufs[i]);
+ bp->rx_bufs[i] = NULL;
+ }
+ }
+ for (i = 0; i<N_TX_RING; i++) {
if (bp->tx_bufs[i] != NULL) {
dev_kfree_skb(bp->tx_bufs[i]);
bp->tx_bufs[i] = NULL;
}
}
}
- pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
return 0;
}
@@ -510,9 +510,9 @@ static int bmac_resume(struct macio_dev *mdev)
bmac_reset_and_enable(dev);
enable_irq(dev->irq);
- enable_irq(bp->tx_dma_intr);
- enable_irq(bp->rx_dma_intr);
- netif_device_attach(dev);
+ enable_irq(bp->tx_dma_intr);
+ enable_irq(bp->rx_dma_intr);
+ netif_device_attach(dev);
return 0;
}
@@ -1599,7 +1599,7 @@ static int bmac_remove(struct macio_dev *mdev)
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(dev->irq, dev);
free_irq(bp->tx_dma_intr, dev);
free_irq(bp->rx_dma_intr, dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 9e5006e59215..4b80e3a52a19 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -364,9 +364,9 @@ static void mace_reset(struct net_device *dev)
out_8(&mb->iac, 0);
if (mp->port_aaui)
- out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
+ out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
else
- out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
+ out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
}
static void __mace_set_address(struct net_device *dev, void *addr)
@@ -378,9 +378,9 @@ static void __mace_set_address(struct net_device *dev, void *addr)
/* load up the hardware address */
if (mp->chipid == BROKEN_ADDRCHG_REV)
- out_8(&mb->iac, PHYADDR);
+ out_8(&mb->iac, PHYADDR);
else {
- out_8(&mb->iac, ADDRCHG | PHYADDR);
+ out_8(&mb->iac, ADDRCHG | PHYADDR);
while ((in_8(&mb->iac) & ADDRCHG) != 0)
;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
index f5fba8b8cdea..a47e2710487e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
@@ -91,7 +91,7 @@ struct aq_macsec_txsc {
u32 hw_sc_idx;
unsigned long tx_sa_idx_busy;
const struct macsec_secy *sw_secy;
- u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
+ u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
struct aq_macsec_tx_sc_stats stats;
struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
};
@@ -101,7 +101,7 @@ struct aq_macsec_rxsc {
unsigned long rx_sa_idx_busy;
const struct macsec_secy *sw_secy;
const struct macsec_rx_sc *sw_rxsc;
- u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
+ u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
};
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 48ecdf15eddc..1c9ca3bcb871 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* emac-rockchip.c - Rockchip EMAC specific glue layer
*
* Copyright (C) 2014 Romain Perier <romain.perier@gmail.com>
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 9d0e74f6b089..693006c5a498 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -137,6 +137,8 @@ struct alx_priv {
/* protects hw.stats */
spinlock_t stats_lock;
+
+ struct mutex mtx;
};
extern const struct ethtool_ops alx_ethtool_ops;
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index 2f4eabf652e8..b716adacd815 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -163,8 +163,10 @@ static int alx_get_link_ksettings(struct net_device *netdev,
}
}
+ mutex_lock(&alx->mtx);
cmd->base.speed = hw->link_speed;
cmd->base.duplex = hw->duplex;
+ mutex_unlock(&alx->mtx);
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
@@ -181,8 +183,7 @@ static int alx_set_link_ksettings(struct net_device *netdev,
struct alx_hw *hw = &alx->hw;
u32 adv_cfg;
u32 advertising;
-
- ASSERT_RTNL();
+ int ret;
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
@@ -200,7 +201,12 @@ static int alx_set_link_ksettings(struct net_device *netdev,
}
hw->adv_cfg = adv_cfg;
- return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
+
+ mutex_lock(&alx->mtx);
+ ret = alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
+ mutex_unlock(&alx->mtx);
+
+ return ret;
}
static void alx_get_pauseparam(struct net_device *netdev,
@@ -209,10 +215,12 @@ static void alx_get_pauseparam(struct net_device *netdev,
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
+ mutex_lock(&alx->mtx);
pause->autoneg = !!(hw->flowctrl & ALX_FC_ANEG &&
hw->adv_cfg & ADVERTISED_Autoneg);
pause->tx_pause = !!(hw->flowctrl & ALX_FC_TX);
pause->rx_pause = !!(hw->flowctrl & ALX_FC_RX);
+ mutex_unlock(&alx->mtx);
}
@@ -232,7 +240,7 @@ static int alx_set_pauseparam(struct net_device *netdev,
if (pause->autoneg)
fc |= ALX_FC_ANEG;
- ASSERT_RTNL();
+ mutex_lock(&alx->mtx);
/* restart auto-neg for auto-mode */
if (hw->adv_cfg & ADVERTISED_Autoneg) {
@@ -245,8 +253,10 @@ static int alx_set_pauseparam(struct net_device *netdev,
if (reconfig_phy) {
err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
- if (err)
+ if (err) {
+ mutex_unlock(&alx->mtx);
return err;
+ }
}
/* flow control on mac */
@@ -254,6 +264,7 @@ static int alx_set_pauseparam(struct net_device *netdev,
alx_cfg_mac_flowcontrol(hw, fc);
hw->flowctrl = fc;
+ mutex_unlock(&alx->mtx);
return 0;
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index b3d74332ed33..11ef1fbe7aee 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (c) 2013, 2021 Johannes Berg <johannes@sipsolutions.net>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -1091,8 +1091,9 @@ static int alx_init_sw(struct alx_priv *alx)
ALX_MAC_CTRL_RXFC_EN |
ALX_MAC_CTRL_TXFC_EN |
7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
+ mutex_init(&alx->mtx);
- return err;
+ return 0;
}
@@ -1122,6 +1123,8 @@ static void alx_halt(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ lockdep_assert_held(&alx->mtx);
+
alx_netif_stop(alx);
hw->link_speed = SPEED_UNKNOWN;
hw->duplex = DUPLEX_UNKNOWN;
@@ -1147,6 +1150,8 @@ static void alx_configure(struct alx_priv *alx)
static void alx_activate(struct alx_priv *alx)
{
+ lockdep_assert_held(&alx->mtx);
+
/* hardware setting lost, restore it */
alx_reinit_rings(alx);
alx_configure(alx);
@@ -1161,7 +1166,7 @@ static void alx_activate(struct alx_priv *alx)
static void alx_reinit(struct alx_priv *alx)
{
- ASSERT_RTNL();
+ lockdep_assert_held(&alx->mtx);
alx_halt(alx);
alx_activate(alx);
@@ -1249,6 +1254,8 @@ out_disable_adv_intr:
static void __alx_stop(struct alx_priv *alx)
{
+ lockdep_assert_held(&alx->mtx);
+
alx_free_irq(alx);
cancel_work_sync(&alx->link_check_wk);
@@ -1284,6 +1291,8 @@ static void alx_check_link(struct alx_priv *alx)
int old_speed;
int err;
+ lockdep_assert_held(&alx->mtx);
+
/* clear PHY internal interrupt status, otherwise the main
* interrupt status will be asserted forever
*/
@@ -1338,12 +1347,24 @@ reset:
static int alx_open(struct net_device *netdev)
{
- return __alx_open(netdev_priv(netdev), false);
+ struct alx_priv *alx = netdev_priv(netdev);
+ int ret;
+
+ mutex_lock(&alx->mtx);
+ ret = __alx_open(alx, false);
+ mutex_unlock(&alx->mtx);
+
+ return ret;
}
static int alx_stop(struct net_device *netdev)
{
- __alx_stop(netdev_priv(netdev));
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ mutex_lock(&alx->mtx);
+ __alx_stop(alx);
+ mutex_unlock(&alx->mtx);
+
return 0;
}
@@ -1353,18 +1374,18 @@ static void alx_link_check(struct work_struct *work)
alx = container_of(work, struct alx_priv, link_check_wk);
- rtnl_lock();
+ mutex_lock(&alx->mtx);
alx_check_link(alx);
- rtnl_unlock();
+ mutex_unlock(&alx->mtx);
}
static void alx_reset(struct work_struct *work)
{
struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
- rtnl_lock();
+ mutex_lock(&alx->mtx);
alx_reinit(alx);
- rtnl_unlock();
+ mutex_unlock(&alx->mtx);
}
static int alx_tpd_req(struct sk_buff *skb)
@@ -1771,6 +1792,8 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_unmap;
}
+ mutex_lock(&alx->mtx);
+
alx_reset_pcie(hw);
phy_configured = alx_phy_configured(hw);
@@ -1781,7 +1804,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = alx_reset_mac(hw);
if (err) {
dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
- goto out_unmap;
+ goto out_unlock;
}
/* setup link to put it in a known good starting state */
@@ -1791,7 +1814,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"failed to configure PHY speed/duplex (err=%d)\n",
err);
- goto out_unmap;
+ goto out_unlock;
}
}
@@ -1824,9 +1847,11 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!alx_get_phy_info(hw)) {
dev_err(&pdev->dev, "failed to identify PHY\n");
err = -EIO;
- goto out_unmap;
+ goto out_unlock;
}
+ mutex_unlock(&alx->mtx);
+
INIT_WORK(&alx->link_check_wk, alx_link_check);
INIT_WORK(&alx->reset_wk, alx_reset);
netif_carrier_off(netdev);
@@ -1843,12 +1868,15 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+out_unlock:
+ mutex_unlock(&alx->mtx);
out_unmap:
iounmap(hw->hw_addr);
out_free_netdev:
free_netdev(netdev);
out_pci_release:
pci_release_mem_regions(pdev);
+ pci_disable_pcie_error_reporting(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
@@ -1869,6 +1897,8 @@ static void alx_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
+ mutex_destroy(&alx->mtx);
+
free_netdev(alx->dev);
}
@@ -1880,7 +1910,11 @@ static int alx_suspend(struct device *dev)
if (!netif_running(alx->dev))
return 0;
netif_device_detach(alx->dev);
+
+ mutex_lock(&alx->mtx);
__alx_stop(alx);
+ mutex_unlock(&alx->mtx);
+
return 0;
}
@@ -1890,20 +1924,23 @@ static int alx_resume(struct device *dev)
struct alx_hw *hw = &alx->hw;
int err;
+ mutex_lock(&alx->mtx);
alx_reset_phy(hw);
- if (!netif_running(alx->dev))
- return 0;
+ if (!netif_running(alx->dev)) {
+ err = 0;
+ goto unlock;
+ }
- rtnl_lock();
err = __alx_open(alx, true);
- rtnl_unlock();
if (err)
- return err;
+ goto unlock;
netif_device_attach(alx->dev);
- return 0;
+unlock:
+ mutex_unlock(&alx->mtx);
+ return err;
}
static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
@@ -1922,7 +1959,7 @@ static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "pci error detected\n");
- rtnl_lock();
+ mutex_lock(&alx->mtx);
if (netif_running(netdev)) {
netif_device_detach(netdev);
@@ -1934,7 +1971,7 @@ static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
else
pci_disable_device(pdev);
- rtnl_unlock();
+ mutex_unlock(&alx->mtx);
return rc;
}
@@ -1947,7 +1984,7 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
dev_info(&pdev->dev, "pci error slot reset\n");
- rtnl_lock();
+ mutex_lock(&alx->mtx);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
@@ -1960,7 +1997,7 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
if (!alx_reset_mac(hw))
rc = PCI_ERS_RESULT_RECOVERED;
out:
- rtnl_unlock();
+ mutex_unlock(&alx->mtx);
return rc;
}
@@ -1972,14 +2009,14 @@ static void alx_pci_error_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "pci error resume\n");
- rtnl_lock();
+ mutex_lock(&alx->mtx);
if (netif_running(netdev)) {
alx_activate(alx);
netif_device_attach(netdev);
}
- rtnl_unlock();
+ mutex_unlock(&alx->mtx);
}
static const struct pci_error_handlers alx_err_handlers = {
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 28ae5c16831e..43d821fe7a54 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -63,7 +63,7 @@
#define AT_MAX_RECEIVE_QUEUE 4
#define AT_DEF_RECEIVE_QUEUE 1
-#define AT_MAX_TRANSMIT_QUEUE 2
+#define AT_MAX_TRANSMIT_QUEUE 4
#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL
#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL
@@ -241,6 +241,8 @@ struct atl1c_tpd_ext_desc {
#define RRS_PACKET_PROT_IS_IPV6_ONLY(word) \
((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 6)
+#define RRS_MT_PROT_ID_TCPUDP BIT(19)
+
struct atl1c_recv_ret_status {
__le32 word0;
__le32 rss_hash;
@@ -289,11 +291,7 @@ enum atl1c_nic_type {
athr_l2c_b2,
athr_l1d,
athr_l1d_2,
-};
-
-enum atl1c_trans_queue {
- atl1c_trans_normal = 0,
- atl1c_trans_high = 1
+ athr_mt,
};
struct atl1c_hw_stats {
@@ -472,13 +470,16 @@ struct atl1c_buffer {
/* transimit packet descriptor (tpd) ring */
struct atl1c_tpd_ring {
+ struct atl1c_adapter *adapter;
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
+ u16 num;
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 next_to_use;
atomic_t next_to_clean;
struct atl1c_buffer *buffer_info;
+ struct napi_struct napi;
};
/* receive free descriptor (rfd) ring */
@@ -494,27 +495,30 @@ struct atl1c_rfd_ring {
/* receive return descriptor (rrd) ring */
struct atl1c_rrd_ring {
+ struct atl1c_adapter *adapter;
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
+ u16 num;
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 next_to_use;
u16 next_to_clean;
+ struct napi_struct napi;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
};
/* board specific private data structure */
struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- struct napi_struct napi;
- struct napi_struct tx_napi;
- struct page *rx_page;
- unsigned int rx_page_offset;
unsigned int rx_frag_size;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
struct mii_if_info mii; /* MII interface info */
u16 rx_buffer_len;
+ unsigned int tx_queue_count;
+ unsigned int rx_queue_count;
unsigned long flags;
#define __AT_TESTING 0x0001
@@ -540,8 +544,8 @@ struct atl1c_adapter {
/* All Descriptor memory */
struct atl1c_ring_header ring_header;
struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
- struct atl1c_rfd_ring rfd_ring;
- struct atl1c_rrd_ring rrd_ring;
+ struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE];
+ struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE];
u32 bd_number; /* board number;*/
};
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 140358dcf61e..7dff20350865 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -636,6 +636,23 @@ int atl1c_phy_init(struct atl1c_hw *hw)
return 0;
}
+bool atl1c_get_link_status(struct atl1c_hw *hw)
+{
+ u16 phy_data;
+
+ if (hw->nic_type == athr_mt) {
+ u32 spd;
+
+ AT_READ_REG(hw, REG_MT_SPEED, &spd);
+ return !!spd;
+ }
+
+ /* MII_BMSR must be read twice */
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ return !!(phy_data & BMSR_LSTATUS);
+}
+
/*
* Detects the current speed and duplex settings of the hardware.
*
@@ -648,6 +665,15 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
int err;
u16 phy_data;
+ if (hw->nic_type == athr_mt) {
+ u32 spd;
+
+ AT_READ_REG(hw, REG_MT_SPEED, &spd);
+ *speed = spd;
+ *duplex = FULL_DUPLEX;
+ return 0;
+ }
+
/* Read PHY Specific Status Register (17) */
err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data);
if (err)
@@ -686,15 +712,12 @@ int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
int ret = 0;
u16 autoneg_advertised = ADVERTISED_10baseT_Half;
u16 save_autoneg_advertised;
- u16 phy_data;
u16 mii_lpa_data;
u16 speed = SPEED_0;
u16 duplex = FULL_DUPLEX;
int i;
- atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
- atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
- if (phy_data & BMSR_LSTATUS) {
+ if (atl1c_get_link_status(hw)) {
atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data);
if (mii_lpa_data & LPA_10FULL)
autoneg_advertised = ADVERTISED_10baseT_Full;
@@ -717,9 +740,7 @@ int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
if (mii_lpa_data) {
for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
mdelay(100);
- atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
- atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
- if (phy_data & BMSR_LSTATUS) {
+ if (atl1c_get_link_status(hw)) {
if (atl1c_get_speed_and_duplex(hw, &speed,
&duplex) != 0)
dev_dbg(&pdev->dev,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index ce1a123dce2c..c567c920628f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -26,6 +26,7 @@ void atl1c_phy_disable(struct atl1c_hw *hw);
void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr);
int atl1c_phy_reset(struct atl1c_hw *hw);
int atl1c_read_mac_addr(struct atl1c_hw *hw);
+bool atl1c_get_link_status(struct atl1c_hw *hw);
int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex);
u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr);
void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value);
@@ -527,15 +528,24 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define REG_RX_BASE_ADDR_HI 0x1540
#define REG_TX_BASE_ADDR_HI 0x1544
#define REG_RFD0_HEAD_ADDR_LO 0x1550
+#define REG_RFD1_HEAD_ADDR_LO 0x1554
+#define REG_RFD2_HEAD_ADDR_LO 0x1558
+#define REG_RFD3_HEAD_ADDR_LO 0x155C
#define REG_RFD_RING_SIZE 0x1560
#define RFD_RING_SIZE_MASK 0x0FFF
#define REG_RX_BUF_SIZE 0x1564
#define RX_BUF_SIZE_MASK 0xFFFF
#define REG_RRD0_HEAD_ADDR_LO 0x1568
+#define REG_RRD1_HEAD_ADDR_LO 0x156C
+#define REG_RRD2_HEAD_ADDR_LO 0x1570
+#define REG_RRD3_HEAD_ADDR_LO 0x1574
#define REG_RRD_RING_SIZE 0x1578
#define RRD_RING_SIZE_MASK 0x0FFF
#define REG_TPD_PRI1_ADDR_LO 0x157C
#define REG_TPD_PRI0_ADDR_LO 0x1580
+#define REG_TPD_PRI2_ADDR_LO 0x1F10
+#define REG_TPD_PRI3_ADDR_LO 0x1F14
+
#define REG_TPD_RING_SIZE 0x1584
#define TPD_RING_SIZE_MASK 0xFFFF
@@ -654,15 +664,26 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
/* Mail box */
#define MB_RFDX_PROD_IDX_MASK 0xFFFF
#define REG_MB_RFD0_PROD_IDX 0x15E0
+#define REG_MB_RFD1_PROD_IDX 0x15E4
+#define REG_MB_RFD2_PROD_IDX 0x15E8
+#define REG_MB_RFD3_PROD_IDX 0x15EC
#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */
#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */
#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */
#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */
+#define REG_TPD_PRI3_PIDX 0x1F18
+#define REG_TPD_PRI2_PIDX 0x1F1A
+#define REG_TPD_PRI3_CIDX 0x1F1C
+#define REG_TPD_PRI2_CIDX 0x1F1E
+
#define REG_MB_RFD01_CONS_IDX 0x15F8
#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF
#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000
+#define REG_MB_RFD23_CONS_IDX 0x15FC
+#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF
+#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000
/* Interrupt Status Register */
#define REG_ISR 0x1600
@@ -686,7 +707,7 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
/* GPHY low power state interrupt */
#define ISR_GPHY_LPW 0x00002000
#define ISR_TXQ_TO_RST 0x00004000
-#define ISR_TX_PKT 0x00008000
+#define ISR_TX_PKT_0 0x00008000
#define ISR_RX_PKT_0 0x00010000
#define ISR_RX_PKT_1 0x00020000
#define ISR_RX_PKT_2 0x00040000
@@ -698,6 +719,9 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define ISR_NFERR_DETECTED 0x01000000
#define ISR_CERR_DETECTED 0x02000000
#define ISR_PHY_LINKDOWN 0x04000000
+#define ISR_TX_PKT_1 0x10000000
+#define ISR_TX_PKT_2 0x20000000
+#define ISR_TX_PKT_3 0x40000000
#define ISR_DIS_INT 0x80000000
/* Interrupt Mask Register */
@@ -712,11 +736,15 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
ISR_TXQ_TO_RST |\
ISR_DMAW_TO_RST |\
ISR_GPHY |\
- ISR_TX_PKT |\
- ISR_RX_PKT_0 |\
ISR_GPHY_LPW |\
ISR_PHY_LINKDOWN)
+#define ISR_TX_PKT ( \
+ ISR_TX_PKT_0 | \
+ ISR_TX_PKT_1 | \
+ ISR_TX_PKT_2 | \
+ ISR_TX_PKT_3)
+
#define ISR_RX_PKT (\
ISR_RX_PKT_0 |\
ISR_RX_PKT_1 |\
@@ -764,6 +792,14 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define REG_DEBUG_DATA0 0x1900
#define REG_DEBUG_DATA1 0x1904
+#define REG_MT_MAGIC 0x1F00
+#define REG_MT_MODE 0x1F04
+#define REG_MT_SPEED 0x1F08
+#define REG_MT_VERSION 0x1F0C
+
+#define MT_MAGIC 0xaabb1234
+#define MT_MODE_4Q BIT(0)
+
#define L1D_MPW_PHYID1 0xD01C /* V7 */
#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
#define L1D_MPW_PHYID3 0xD01E /* V8 */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index c6263cf8d3c0..1c6246a5dc22 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -36,18 +36,51 @@ MODULE_AUTHOR("Qualcomm Atheros Inc.");
MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
+struct atl1c_qregs {
+ u16 tpd_addr_lo;
+ u16 tpd_prod;
+ u16 tpd_cons;
+ u16 rfd_addr_lo;
+ u16 rrd_addr_lo;
+ u16 rfd_prod;
+ u32 tx_isr;
+ u32 rx_isr;
+};
+
+static struct atl1c_qregs atl1c_qregs[AT_MAX_TRANSMIT_QUEUE] = {
+ {
+ REG_TPD_PRI0_ADDR_LO, REG_TPD_PRI0_PIDX, REG_TPD_PRI0_CIDX,
+ REG_RFD0_HEAD_ADDR_LO, REG_RRD0_HEAD_ADDR_LO,
+ REG_MB_RFD0_PROD_IDX, ISR_TX_PKT_0, ISR_RX_PKT_0
+ },
+ {
+ REG_TPD_PRI1_ADDR_LO, REG_TPD_PRI1_PIDX, REG_TPD_PRI1_CIDX,
+ REG_RFD1_HEAD_ADDR_LO, REG_RRD1_HEAD_ADDR_LO,
+ REG_MB_RFD1_PROD_IDX, ISR_TX_PKT_1, ISR_RX_PKT_1
+ },
+ {
+ REG_TPD_PRI2_ADDR_LO, REG_TPD_PRI2_PIDX, REG_TPD_PRI2_CIDX,
+ REG_RFD2_HEAD_ADDR_LO, REG_RRD2_HEAD_ADDR_LO,
+ REG_MB_RFD2_PROD_IDX, ISR_TX_PKT_2, ISR_RX_PKT_2
+ },
+ {
+ REG_TPD_PRI3_ADDR_LO, REG_TPD_PRI3_PIDX, REG_TPD_PRI3_CIDX,
+ REG_RFD3_HEAD_ADDR_LO, REG_RRD3_HEAD_ADDR_LO,
+ REG_MB_RFD3_PROD_IDX, ISR_TX_PKT_3, ISR_RX_PKT_3
+ },
+};
+
static int atl1c_stop_mac(struct atl1c_hw *hw);
static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
static void atl1c_start_mac(struct atl1c_adapter *adapter);
-static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
- int *work_done, int work_to_do);
static int atl1c_up(struct atl1c_adapter *adapter);
static void atl1c_down(struct atl1c_adapter *adapter);
static int atl1c_reset_mac(struct atl1c_hw *hw);
static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
static int atl1c_configure(struct atl1c_adapter *adapter);
-static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode);
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ bool napi_mode);
static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -232,15 +265,14 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err;
unsigned long flags;
- u16 speed, duplex, phy_data;
+ u16 speed, duplex;
+ bool link;
spin_lock_irqsave(&adapter->mdio_lock, flags);
- /* MII_BMSR must read twise */
- atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
- atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ link = atl1c_get_link_status(hw);
spin_unlock_irqrestore(&adapter->mdio_lock, flags);
- if ((phy_data & BMSR_LSTATUS) == 0) {
+ if (!link) {
/* link down */
netif_carrier_off(netdev);
hw->hibernate = true;
@@ -284,16 +316,13 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- u16 phy_data;
- u16 link_up;
+ bool link;
spin_lock(&adapter->mdio_lock);
- atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
- atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ link = atl1c_get_link_status(&adapter->hw);
spin_unlock(&adapter->mdio_lock);
- link_up = phy_data & BMSR_LSTATUS;
/* notify upper layer link down ASAP */
- if (!link_up) {
+ if (!link) {
if (netif_carrier_ok(netdev)) {
/* old link state: Up */
netif_carrier_off(netdev);
@@ -436,7 +465,7 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
}
/**
- * atl1c_set_mac - Change the Ethernet Address of the NIC
+ * atl1c_set_mac_addr - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
*
@@ -478,6 +507,9 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
static netdev_features_t atl1c_fix_features(struct net_device *netdev,
netdev_features_t features)
{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw *hw = &adapter->hw;
+
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
@@ -487,8 +519,10 @@ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
- if (netdev->mtu > MAX_TSO_FRAME_SIZE)
- features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ if (hw->nic_type != athr_mt) {
+ if (netdev->mtu > MAX_TSO_FRAME_SIZE)
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ }
return features;
}
@@ -515,9 +549,12 @@ static void atl1c_set_max_mtu(struct net_device *netdev)
case athr_l1d:
case athr_l1d_2:
netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ break;
+ case athr_mt:
+ netdev->max_mtu = 9500;
break;
- /* The 10/100 devices don't support jumbo packets, max_mtu 1500 */
+ /* The 10/100 devices don't support jumbo packets, max_mtu 1500 */
default:
netdev->max_mtu = ETH_DATA_LEN;
break;
@@ -642,29 +679,26 @@ static int atl1c_alloc_queues(struct atl1c_adapter *adapter)
return 0;
}
-static void atl1c_set_mac_type(struct atl1c_hw *hw)
+static enum atl1c_nic_type atl1c_get_mac_type(struct pci_dev *pdev,
+ u8 __iomem *hw_addr)
{
- switch (hw->device_id) {
+ switch (pdev->device) {
case PCI_DEVICE_ID_ATTANSIC_L2C:
- hw->nic_type = athr_l2c;
- break;
+ return athr_l2c;
case PCI_DEVICE_ID_ATTANSIC_L1C:
- hw->nic_type = athr_l1c;
- break;
+ return athr_l1c;
case PCI_DEVICE_ID_ATHEROS_L2C_B:
- hw->nic_type = athr_l2c_b;
- break;
+ return athr_l2c_b;
case PCI_DEVICE_ID_ATHEROS_L2C_B2:
- hw->nic_type = athr_l2c_b2;
- break;
+ return athr_l2c_b2;
case PCI_DEVICE_ID_ATHEROS_L1D:
- hw->nic_type = athr_l1d;
- break;
+ return athr_l1d;
case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
- hw->nic_type = athr_l1d_2;
- break;
+ if (readl(hw_addr + REG_MT_MAGIC) == MT_MAGIC)
+ return athr_mt;
+ return athr_l1d_2;
default:
- break;
+ return athr_l1c;
}
}
@@ -672,7 +706,6 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
{
u32 link_ctrl_data;
- atl1c_set_mac_type(hw);
AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
@@ -763,14 +796,14 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
struct atl1c_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
u32 revision;
-
+ int i;
adapter->wol = 0;
device_set_wakeup_enable(&pdev->dev, false);
adapter->link_speed = SPEED_0;
adapter->link_duplex = FULL_DUPLEX;
adapter->tpd_ring[0].count = 1024;
- adapter->rfd_ring.count = 512;
+ adapter->rfd_ring[0].count = 512;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
@@ -788,6 +821,10 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
atl1c_patch_assign(hw);
hw->intr_mask = IMR_NORMAL_MASK;
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ hw->intr_mask |= atl1c_qregs[i].tx_isr;
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ hw->intr_mask |= atl1c_qregs[i].rx_isr;
hw->phy_configured = false;
hw->preamble_len = 7;
hw->max_frame_size = adapter->netdev->mtu;
@@ -847,12 +884,12 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
/**
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
- * @type: type of transmit queue
+ * @queue: idx of transmit queue
*/
static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
- enum atl1c_trans_queue type)
+ u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 index, ring_count;
@@ -875,11 +912,12 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
/**
* atl1c_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
+ * @queue: idx of transmit queue
*/
-static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
+static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue)
{
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
int j;
@@ -902,26 +940,28 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
{
struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
struct atl1c_buffer *buffer_info;
int i, j;
- for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
+ for (i = 0; i < adapter->tx_queue_count; i++) {
tpd_ring[i].next_to_use = 0;
atomic_set(&tpd_ring[i].next_to_clean, 0);
buffer_info = tpd_ring[i].buffer_info;
for (j = 0; j < tpd_ring->count; j++)
ATL1C_SET_BUFFER_STATE(&buffer_info[i],
- ATL1C_BUFFER_FREE);
- }
- rfd_ring->next_to_use = 0;
- rfd_ring->next_to_clean = 0;
- rrd_ring->next_to_use = 0;
- rrd_ring->next_to_clean = 0;
- for (j = 0; j < rfd_ring->count; j++) {
- buffer_info = &rfd_ring->buffer_info[j];
- ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+ ATL1C_BUFFER_FREE);
+ }
+ for (i = 0; i < adapter->rx_queue_count; i++) {
+ rfd_ring[i].next_to_use = 0;
+ rfd_ring[i].next_to_clean = 0;
+ rrd_ring[i].next_to_use = 0;
+ rrd_ring[i].next_to_clean = 0;
+ for (j = 0; j < rfd_ring[i].count; j++) {
+ buffer_info = &rfd_ring[i].buffer_info[j];
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+ }
}
}
@@ -934,25 +974,29 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
+ int i;
dma_free_coherent(&pdev->dev, adapter->ring_header.size,
adapter->ring_header.desc, adapter->ring_header.dma);
adapter->ring_header.desc = NULL;
/* Note: just free tdp_ring.buffer_info,
- * it contain rfd_ring.buffer_info, do not double free */
+ * it contain rfd_ring.buffer_info, do not double free
+ */
if (adapter->tpd_ring[0].buffer_info) {
kfree(adapter->tpd_ring[0].buffer_info);
adapter->tpd_ring[0].buffer_info = NULL;
}
- if (adapter->rx_page) {
- put_page(adapter->rx_page);
- adapter->rx_page = NULL;
+ for (i = 0; i < adapter->rx_queue_count; ++i) {
+ if (adapter->rrd_ring[i].rx_page) {
+ put_page(adapter->rrd_ring[i].rx_page);
+ adapter->rrd_ring[i].rx_page = NULL;
+ }
}
}
/**
- * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
+ * atl1c_setup_ring_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
* Return 0 on success, negative on failure
@@ -961,37 +1005,46 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
struct atl1c_ring_header *ring_header = &adapter->ring_header;
+ int tqc = adapter->tx_queue_count;
+ int rqc = adapter->rx_queue_count;
int size;
int i;
int count = 0;
- int rx_desc_count = 0;
u32 offset = 0;
- rrd_ring->count = rfd_ring->count;
- for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
+ /* Even though only one tpd queue is actually used, the "high"
+ * priority tpd queue also gets initialized
+ */
+ if (tqc == 1)
+ tqc = 2;
+
+ for (i = 1; i < tqc; i++)
tpd_ring[i].count = tpd_ring[0].count;
- /* 2 tpd queue, one high priority queue,
- * another normal priority queue */
- size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
- rfd_ring->count);
+ size = sizeof(struct atl1c_buffer) * (tpd_ring->count * tqc +
+ rfd_ring->count * rqc);
tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
if (unlikely(!tpd_ring->buffer_info))
goto err_nomem;
- for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
- tpd_ring[i].buffer_info =
- (tpd_ring->buffer_info + count);
+ for (i = 0; i < tqc; i++) {
+ tpd_ring[i].adapter = adapter;
+ tpd_ring[i].num = i;
+ tpd_ring[i].buffer_info = (tpd_ring->buffer_info + count);
count += tpd_ring[i].count;
}
- rfd_ring->buffer_info =
- (tpd_ring->buffer_info + count);
- count += rfd_ring->count;
- rx_desc_count += rfd_ring->count;
+ for (i = 0; i < rqc; i++) {
+ rrd_ring[i].adapter = adapter;
+ rrd_ring[i].num = i;
+ rrd_ring[i].count = rfd_ring[0].count;
+ rfd_ring[i].count = rfd_ring[0].count;
+ rfd_ring[i].buffer_info = (tpd_ring->buffer_info + count);
+ count += rfd_ring->count;
+ }
/*
* real ring DMA buffer
@@ -999,9 +1052,9 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
* additional bytes tacked onto the end.
*/
ring_header->size = size =
- sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
- sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
- sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
+ sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
+ sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
+ sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
8 * 4;
ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
@@ -1014,25 +1067,28 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
tpd_ring[0].dma = roundup(ring_header->dma, 8);
offset = tpd_ring[0].dma - ring_header->dma;
- for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
+ for (i = 0; i < tqc; i++) {
tpd_ring[i].dma = ring_header->dma + offset;
- tpd_ring[i].desc = (u8 *) ring_header->desc + offset;
+ tpd_ring[i].desc = (u8 *)ring_header->desc + offset;
tpd_ring[i].size =
sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
offset += roundup(tpd_ring[i].size, 8);
}
- /* init RFD ring */
- rfd_ring->dma = ring_header->dma + offset;
- rfd_ring->desc = (u8 *) ring_header->desc + offset;
- rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count;
- offset += roundup(rfd_ring->size, 8);
+ for (i = 0; i < rqc; i++) {
+ /* init RFD ring */
+ rfd_ring[i].dma = ring_header->dma + offset;
+ rfd_ring[i].desc = (u8 *)ring_header->desc + offset;
+ rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
+ rfd_ring[i].count;
+ offset += roundup(rfd_ring[i].size, 8);
- /* init RRD ring */
- rrd_ring->dma = ring_header->dma + offset;
- rrd_ring->desc = (u8 *) ring_header->desc + offset;
- rrd_ring->size = sizeof(struct atl1c_recv_ret_status) *
- rrd_ring->count;
- offset += roundup(rrd_ring->size, 8);
+ /* init RRD ring */
+ rrd_ring[i].dma = ring_header->dma + offset;
+ rrd_ring[i].desc = (u8 *)ring_header->desc + offset;
+ rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
+ rrd_ring[i].count;
+ offset += roundup(rrd_ring[i].size, 8);
+ }
return 0;
@@ -1044,31 +1100,34 @@ err_nomem:
static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
- struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
- adapter->tpd_ring;
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+ struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
+ int i;
+ int tx_queue_count = adapter->tx_queue_count;
+
+ if (tx_queue_count == 1)
+ tx_queue_count = 2;
/* TPD */
AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
- (u32)((tpd_ring[atl1c_trans_normal].dma &
- AT_DMA_HI_ADDR_MASK) >> 32));
+ (u32)((tpd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
/* just enable normal priority TX queue */
- AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO,
- (u32)(tpd_ring[atl1c_trans_normal].dma &
- AT_DMA_LO_ADDR_MASK));
- AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO,
- (u32)(tpd_ring[atl1c_trans_high].dma &
- AT_DMA_LO_ADDR_MASK));
+ for (i = 0; i < tx_queue_count; i++) {
+ AT_WRITE_REG(hw, atl1c_qregs[i].tpd_addr_lo,
+ (u32)(tpd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+ }
AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
(u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
/* RFD */
AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
- (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
- AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO,
- (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK));
+ (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
+ for (i = 0; i < adapter->rx_queue_count; i++) {
+ AT_WRITE_REG(hw, atl1c_qregs[i].rfd_addr_lo,
+ (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+ }
AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
rfd_ring->count & RFD_RING_SIZE_MASK);
@@ -1076,8 +1135,10 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
/* RRD */
- AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO,
- (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK));
+ for (i = 0; i < adapter->rx_queue_count; i++) {
+ AT_WRITE_REG(hw, atl1c_qregs[i].rrd_addr_lo,
+ (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+ }
AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
(rrd_ring->count & RRD_RING_SIZE_MASK));
@@ -1358,7 +1419,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
}
/**
- * atl1c_configure - Configure Transmit&Receive Unit after Reset
+ * atl1c_configure_mac - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
* Configure the Tx /Rx unit of the MAC after a reset.
@@ -1430,14 +1491,28 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
+ int i;
+
+ if (adapter->hw.nic_type == athr_mt) {
+ u32 mode;
+
+ AT_READ_REG(&adapter->hw, REG_MT_MODE, &mode);
+ if (adapter->rx_queue_count == 4)
+ mode |= MT_MODE_4Q;
+ else
+ mode &= ~MT_MODE_4Q;
+ AT_WRITE_REG(&adapter->hw, REG_MT_MODE, mode);
+ }
atl1c_init_ring_ptrs(adapter);
atl1c_set_multi(netdev);
atl1c_restore_vlan(adapter);
- num = atl1c_alloc_rx_buffer(adapter, false);
- if (unlikely(num == 0))
- return -ENOMEM;
+ for (i = 0; i < adapter->rx_queue_count; ++i) {
+ num = atl1c_alloc_rx_buffer(adapter, i, false);
+ if (unlikely(num == 0))
+ return -ENOMEM;
+ }
if (atl1c_configure_mac(adapter))
return -EIO;
@@ -1533,9 +1608,11 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
static int atl1c_clean_tx(struct napi_struct *napi, int budget)
{
- struct atl1c_adapter *adapter =
- container_of(napi, struct atl1c_adapter, tx_napi);
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[atl1c_trans_normal];
+ struct atl1c_tpd_ring *tpd_ring =
+ container_of(napi, struct atl1c_tpd_ring, napi);
+ struct atl1c_adapter *adapter = tpd_ring->adapter;
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(napi->dev, tpd_ring->num);
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@ -1543,7 +1620,8 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
unsigned int total_bytes = 0, total_packets = 0;
unsigned long flags;
- AT_READ_REGW(&adapter->hw, REG_TPD_PRI0_CIDX, &hw_next_to_clean);
+ AT_READ_REGW(&adapter->hw, atl1c_qregs[tpd_ring->num].tpd_cons,
+ &hw_next_to_clean);
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
@@ -1557,17 +1635,15 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
}
- netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
+ netdev_tx_completed_queue(txq, total_packets, total_bytes);
- if (netif_queue_stopped(adapter->netdev) &&
- netif_carrier_ok(adapter->netdev)) {
- netif_wake_queue(adapter->netdev);
- }
+ if (netif_tx_queue_stopped(txq) && netif_carrier_ok(adapter->netdev))
+ netif_tx_wake_queue(txq);
if (total_packets < budget) {
napi_complete_done(napi, total_packets);
spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
- adapter->hw.intr_mask |= ISR_TX_PKT;
+ adapter->hw.intr_mask |= atl1c_qregs[tpd_ring->num].tx_isr;
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
return total_packets;
@@ -1575,6 +1651,38 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
return budget;
}
+static void atl1c_intr_rx_tx(struct atl1c_adapter *adapter, u32 status)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 intr_mask;
+ int i;
+
+ spin_lock(&hw->intr_mask_lock);
+ intr_mask = hw->intr_mask;
+ for (i = 0; i < adapter->rx_queue_count; ++i) {
+ if (!(status & atl1c_qregs[i].rx_isr))
+ continue;
+ if (napi_schedule_prep(&adapter->rrd_ring[i].napi)) {
+ intr_mask &= ~atl1c_qregs[i].rx_isr;
+ __napi_schedule(&adapter->rrd_ring[i].napi);
+ }
+ }
+ for (i = 0; i < adapter->tx_queue_count; ++i) {
+ if (!(status & atl1c_qregs[i].tx_isr))
+ continue;
+ if (napi_schedule_prep(&adapter->tpd_ring[i].napi)) {
+ intr_mask &= ~atl1c_qregs[i].tx_isr;
+ __napi_schedule(&adapter->tpd_ring[i].napi);
+ }
+ }
+
+ if (hw->intr_mask != intr_mask) {
+ hw->intr_mask = intr_mask;
+ AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
+ }
+ spin_unlock(&hw->intr_mask_lock);
+}
+
/**
* atl1c_intr - Interrupt Handler
* @irq: interrupt number
@@ -1605,24 +1713,8 @@ static irqreturn_t atl1c_intr(int irq, void *data)
atl1c_clear_phy_int(adapter);
/* Ack ISR */
AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
- if (status & ISR_RX_PKT) {
- if (likely(napi_schedule_prep(&adapter->napi))) {
- spin_lock(&hw->intr_mask_lock);
- hw->intr_mask &= ~ISR_RX_PKT;
- AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
- spin_unlock(&hw->intr_mask_lock);
- __napi_schedule(&adapter->napi);
- }
- }
- if (status & ISR_TX_PKT) {
- if (napi_schedule_prep(&adapter->tx_napi)) {
- spin_lock(&hw->intr_mask_lock);
- hw->intr_mask &= ~ISR_TX_PKT;
- AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
- spin_unlock(&hw->intr_mask_lock);
- __napi_schedule(&adapter->tx_napi);
- }
- }
+ if (status & (ISR_RX_PKT | ISR_TX_PKT))
+ atl1c_intr_rx_tx(adapter, status);
handled = IRQ_HANDLED;
/* check if PCIE PHY Link down */
@@ -1659,6 +1751,11 @@ static irqreturn_t atl1c_intr(int irq, void *data)
static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
{
+ if (adapter->hw.nic_type == athr_mt) {
+ if (prrs->word3 & RRS_MT_PROT_ID_TCPUDP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
/*
* The pid field in RRS in not correct sometimes, so we
* cannot figure out if the packet is fragmented or not,
@@ -1668,44 +1765,47 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
}
static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
- bool napi_mode)
+ u32 queue, bool napi_mode)
{
+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
struct sk_buff *skb;
struct page *page;
if (adapter->rx_frag_size > PAGE_SIZE) {
if (likely(napi_mode))
- return napi_alloc_skb(&adapter->napi,
+ return napi_alloc_skb(&rrd_ring->napi,
adapter->rx_buffer_len);
else
return netdev_alloc_skb_ip_align(adapter->netdev,
adapter->rx_buffer_len);
}
- page = adapter->rx_page;
+ page = rrd_ring->rx_page;
if (!page) {
- adapter->rx_page = page = alloc_page(GFP_ATOMIC);
+ page = alloc_page(GFP_ATOMIC);
if (unlikely(!page))
return NULL;
- adapter->rx_page_offset = 0;
+ rrd_ring->rx_page = page;
+ rrd_ring->rx_page_offset = 0;
}
- skb = build_skb(page_address(page) + adapter->rx_page_offset,
+ skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- adapter->rx_page_offset += adapter->rx_frag_size;
- if (adapter->rx_page_offset >= PAGE_SIZE)
- adapter->rx_page = NULL;
+ rrd_ring->rx_page_offset += adapter->rx_frag_size;
+ if (rrd_ring->rx_page_offset >= PAGE_SIZE)
+ rrd_ring->rx_page = NULL;
else
get_page(page);
}
return skb;
}
-static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode)
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ bool napi_mode)
{
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
struct pci_dev *pdev = adapter->pdev;
struct atl1c_buffer *buffer_info, *next_info;
struct sk_buff *skb;
@@ -1724,7 +1824,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode)
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = atl1c_alloc_skb(adapter, napi_mode);
+ skb = atl1c_alloc_skb(adapter, queue, napi_mode);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
@@ -1766,8 +1866,8 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode)
/* TODO: update mailbox here */
wmb();
rfd_ring->next_to_use = rfd_next_to_use;
- AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX,
- rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
+ AT_WRITE_REG(&adapter->hw, atl1c_qregs[queue].rfd_prod,
+ rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
}
return num_alloc;
@@ -1805,22 +1905,33 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
rfd_ring->next_to_clean = rfd_index;
}
-static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
- int *work_done, int work_to_do)
+/**
+ * atl1c_clean_rx - NAPI Rx polling callback
+ * @napi: napi info
+ * @budget: limit of packets to clean
+ */
+static int atl1c_clean_rx(struct napi_struct *napi, int budget)
{
+ struct atl1c_rrd_ring *rrd_ring =
+ container_of(napi, struct atl1c_rrd_ring, napi);
+ struct atl1c_adapter *adapter = rrd_ring->adapter;
u16 rfd_num, rfd_index;
- u16 count = 0;
u16 length;
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[rrd_ring->num];
struct sk_buff *skb;
struct atl1c_recv_ret_status *rrs;
struct atl1c_buffer *buffer_info;
+ int work_done = 0;
+ unsigned long flags;
+
+ /* Keep link state information with original netdev */
+ if (!netif_carrier_ok(adapter->netdev))
+ goto quit_polling;
while (1) {
- if (*work_done >= work_to_do)
+ if (work_done >= budget)
break;
rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
@@ -1874,38 +1985,18 @@ rrs_checked:
vlan = le16_to_cpu(vlan);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
}
- napi_gro_receive(&adapter->napi, skb);
+ napi_gro_receive(napi, skb);
- (*work_done)++;
- count++;
+ work_done++;
}
- if (count)
- atl1c_alloc_rx_buffer(adapter, true);
-}
-
-/**
- * atl1c_clean - NAPI Rx polling callback
- * @napi: napi info
- * @budget: limit of packets to clean
- */
-static int atl1c_clean(struct napi_struct *napi, int budget)
-{
- struct atl1c_adapter *adapter =
- container_of(napi, struct atl1c_adapter, napi);
- int work_done = 0;
- unsigned long flags;
-
- /* Keep link state information with original netdev */
- if (!netif_carrier_ok(adapter->netdev))
- goto quit_polling;
- /* just enable one RXQ */
- atl1c_clean_rx_irq(adapter, &work_done, budget);
+ if (work_done)
+ atl1c_alloc_rx_buffer(adapter, rrd_ring->num, true);
if (work_done < budget) {
quit_polling:
napi_complete_done(napi, work_done);
spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
- adapter->hw.intr_mask |= ISR_RX_PKT;
+ adapter->hw.intr_mask |= atl1c_qregs[rrd_ring->num].rx_isr;
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
}
@@ -1929,9 +2020,9 @@ static void atl1c_netpoll(struct net_device *netdev)
}
#endif
-static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type)
+static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
u16 next_to_use = 0;
u16 next_to_clean = 0;
@@ -1949,9 +2040,9 @@ static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_tran
* there is enough tpd to use
*/
static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
- enum atl1c_trans_queue type)
+ u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
struct atl1c_tpd_desc *tpd_desc;
u16 next_to_use = 0;
@@ -1993,7 +2084,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
static int atl1c_tso_csum(struct atl1c_adapter *adapter,
struct sk_buff *skb,
struct atl1c_tpd_desc **tpd,
- enum atl1c_trans_queue type)
+ u32 queue)
{
struct pci_dev *pdev = adapter->pdev;
unsigned short offload_type;
@@ -2038,7 +2129,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
*(struct atl1c_tpd_ext_desc **)(tpd);
memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
- *tpd = atl1c_get_tpd(adapter, type);
+ *tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
@@ -2090,9 +2181,9 @@ check_sum:
static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
struct atl1c_tpd_desc *first_tpd,
- enum atl1c_trans_queue type)
+ u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[queue];
struct atl1c_buffer *buffer_info;
struct atl1c_tpd_desc *tpd;
u16 first_index, index;
@@ -2111,8 +2202,8 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
}
static int atl1c_tx_map(struct atl1c_adapter *adapter,
- struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
- enum atl1c_trans_queue type)
+ struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
+ u32 queue)
{
struct atl1c_tpd_desc *use_tpd = NULL;
struct atl1c_buffer *buffer_info = NULL;
@@ -2152,7 +2243,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
if (mapped_len == 0)
use_tpd = tpd;
else {
- use_tpd = atl1c_get_tpd(adapter, type);
+ use_tpd = atl1c_get_tpd(adapter, queue);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
}
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2174,7 +2265,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
for (f = 0; f < nr_frags; f++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- use_tpd = atl1c_get_tpd(adapter, type);
+ use_tpd = atl1c_get_tpd(adapter, queue);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2207,23 +2298,22 @@ err_dma:
return -1;
}
-static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
- struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
+static void atl1c_tx_queue(struct atl1c_adapter *adapter, u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
- u16 reg;
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
- reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX;
- AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use);
+ AT_WRITE_REGW(&adapter->hw, atl1c_qregs[queue].tpd_prod,
+ tpd_ring->next_to_use);
}
static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- u16 tpd_req;
+ u32 queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, queue);
struct atl1c_tpd_desc *tpd;
- enum atl1c_trans_queue type = atl1c_trans_normal;
+ u16 tpd_req;
if (test_bit(__AT_DOWN, &adapter->flags)) {
dev_kfree_skb_any(skb);
@@ -2232,16 +2322,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
tpd_req = atl1c_cal_tpd_req(skb);
- if (atl1c_tpd_avail(adapter, type) < tpd_req) {
+ if (atl1c_tpd_avail(adapter, queue) < tpd_req) {
/* no enough descriptor, just stop queue */
- netif_stop_queue(netdev);
+ atl1c_tx_queue(adapter, queue);
+ netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
}
- tpd = atl1c_get_tpd(adapter, type);
+ tpd = atl1c_get_tpd(adapter, queue);
/* do TSO and check sum */
- if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
+ if (atl1c_tso_csum(adapter, skb, &tpd, queue) != 0) {
+ atl1c_tx_queue(adapter, queue);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2259,15 +2351,17 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
if (skb_network_offset(skb) != ETH_HLEN)
tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
- if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
+ if (atl1c_tx_map(adapter, skb, tpd, queue) < 0) {
netif_info(adapter, tx_done, adapter->netdev,
"tx-skb dropped due to dma error\n");
/* roll back tpd/buffer */
- atl1c_tx_rollback(adapter, tpd, type);
+ atl1c_tx_rollback(adapter, tpd, queue);
dev_kfree_skb_any(skb);
} else {
- netdev_sent_queue(adapter->netdev, skb->len);
- atl1c_tx_queue(adapter, skb, tpd, type);
+ bool more = netdev_xmit_more();
+
+ if (__netdev_tx_sent_queue(txq, skb->len, more))
+ atl1c_tx_queue(adapter, queue);
}
return NETDEV_TX_OK;
@@ -2321,16 +2415,19 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter)
{
+ int i;
/* release tx-pending skbs and reset tx/rx ring index */
- atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
- atl1c_clean_tx_ring(adapter, atl1c_trans_high);
- atl1c_clean_rx_ring(adapter);
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ atl1c_clean_tx_ring(adapter, i);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ atl1c_clean_rx_ring(adapter, i);
}
static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
+ int i;
netif_carrier_off(netdev);
@@ -2344,20 +2441,24 @@ static int atl1c_up(struct atl1c_adapter *adapter)
atl1c_check_link_status(adapter);
clear_bit(__AT_DOWN, &adapter->flags);
- napi_enable(&adapter->napi);
- napi_enable(&adapter->tx_napi);
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ napi_enable(&adapter->tpd_ring[i].napi);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ napi_enable(&adapter->rrd_ring[i].napi);
atl1c_irq_enable(adapter);
netif_start_queue(netdev);
return err;
err_up:
- atl1c_clean_rx_ring(adapter);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ atl1c_clean_rx_ring(adapter, i);
return err;
}
static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ int i;
atl1c_del_timer(adapter);
adapter->work_event = 0; /* clear all event */
@@ -2365,8 +2466,10 @@ static void atl1c_down(struct atl1c_adapter *adapter)
* reschedule our watchdog timer */
set_bit(__AT_DOWN, &adapter->flags);
netif_carrier_off(netdev);
- napi_disable(&adapter->napi);
- napi_disable(&adapter->tx_napi);
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ napi_disable(&adapter->tpd_ring[i].napi);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ napi_disable(&adapter->rrd_ring[i].napi);
atl1c_irq_disable(adapter);
atl1c_free_irq(adapter);
/* disable ASPM if device inactive */
@@ -2551,8 +2654,11 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct atl1c_adapter *adapter;
static int cards_found;
-
+ u8 __iomem *hw_addr;
+ enum atl1c_nic_type nic_type;
+ u32 queue_count = 1;
int err = 0;
+ int i;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
err = pci_enable_device_mem(pdev);
@@ -2585,7 +2691,18 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
+ hw_addr = pci_ioremap_bar(pdev, 0);
+ if (!hw_addr) {
+ err = -EIO;
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ goto err_ioremap;
+ }
+
+ nic_type = atl1c_get_mac_type(pdev, hw_addr);
+ if (nic_type == athr_mt)
+ queue_count = 4;
+
+ netdev = alloc_etherdev_mq(sizeof(struct atl1c_adapter), queue_count);
if (netdev == NULL) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -2601,13 +2718,11 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.adapter = adapter;
+ adapter->hw.nic_type = nic_type;
adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
- adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
- if (!adapter->hw.hw_addr) {
- err = -EIO;
- dev_err(&pdev->dev, "cannot map device registers\n");
- goto err_ioremap;
- }
+ adapter->hw.hw_addr = hw_addr;
+ adapter->tx_queue_count = queue_count;
+ adapter->rx_queue_count = queue_count;
/* init mii data */
adapter->mii.dev = netdev;
@@ -2616,8 +2731,12 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
dev_set_threaded(netdev, true);
- netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
- netif_napi_add(netdev, &adapter->tx_napi, atl1c_clean_tx, 64);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
+ atl1c_clean_rx, 64);
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ netif_napi_add(netdev, &adapter->tpd_ring[i].napi,
+ atl1c_clean_tx, 64);
timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
@@ -2670,11 +2789,11 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_reset:
err_register:
err_sw_init:
- iounmap(adapter->hw.hw_addr);
err_init_netdev:
-err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ iounmap(hw_addr);
+err_ioremap:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index ff9f96de74b8..2eb0a2ab69f6 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -357,7 +357,7 @@ static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
}
/**
- * atl1e_set_mac - Change the Ethernet Address of the NIC
+ * atl1e_set_mac_addr - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
*
@@ -787,7 +787,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
}
/**
- * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
+ * atl1e_setup_ring_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
* Return 0 on success, negative on failure
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index eaf96d002fa5..c67201a13cf5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1011,7 +1011,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
/**
- * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
+ * atl1_setup_ring_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
* Return 0 on success, negative on failure
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index cb88ffb8f12f..1a02ca600b71 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -206,6 +206,7 @@ config SYSTEMPORT
config BNXT
tristate "Broadcom NetXtreme-C/E support"
depends on PCI
+ imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
select NET_DEVLINK
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index b455b60a5434..ad2655efe423 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1556,8 +1556,8 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
B44_ETHIPV4UDP_HLEN);
- bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
- bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
/* Raw ethernet II magic packet pattern - pattern 1 */
memset(pwol_pattern, 0, B44_PATTERN_SIZE);
@@ -1565,9 +1565,9 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
ETH_HLEN);
- bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
B44_PATTERN_BASE + B44_PATTERN_SIZE);
- bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
B44_PMASK_BASE + B44_PMASK_SIZE);
/* Ipv6 magic packet pattern - pattern 2 */
@@ -1576,9 +1576,9 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
B44_ETHIPV6UDP_HLEN);
- bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
- bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
kfree(pwol_pattern);
@@ -1631,9 +1631,9 @@ static void b44_setup_wol(struct b44 *bp)
val = br32(bp, B44_DEVCTRL);
bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
- } else {
- b44_setup_pseudo_magicp(bp);
- }
+ } else {
+ b44_setup_pseudo_magicp(bp);
+ }
b44_setup_wol_pci(bp);
}
@@ -1757,7 +1757,7 @@ static void __b44_set_rx_mode(struct net_device *dev)
__b44_cam_write(bp, zero, i);
bw32(bp, B44_RXCONFIG, val);
- val = br32(bp, B44_CAM_CTRL);
+ val = br32(bp, B44_CAM_CTRL);
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
}
}
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 60d908507f51..02a569500234 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -174,9 +174,6 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
if (!ring->slots)
goto err_free_buf_descs;
- ring->read_idx = 0;
- ring->write_idx = 0;
-
return 0;
err_free_buf_descs:
@@ -304,6 +301,9 @@ static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
(uint32_t)ring->dma_addr);
+
+ ring->read_idx = 0;
+ ring->write_idx = 0;
}
static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 9834b77cf4b6..4ab5bf64d353 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,7 +172,6 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
- struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -206,21 +205,15 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
- if (regs) {
- bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
+ bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
+ else
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
- }
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
- if (regs) {
- bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
- regs);
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
- }
+ bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index c0986096c701..bee6cfad9fc6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -572,7 +572,7 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
}
if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
- ret = -EBUSY;
+ ret = -EBUSY;
else
ret = 0;
@@ -3599,7 +3599,7 @@ bnx2_set_rx_mode(struct net_device *dev)
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
0xffffffff);
- }
+ }
sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
}
else {
@@ -4674,7 +4674,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
if (addr == page_end-4) {
cmd_flags = BNX2_NVM_COMMAND_LAST;
- }
+ }
rc = bnx2_nvram_write_dword(bp, addr,
&flash_buffer[i], cmd_flags);
@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
BNX2_WR(bp, PCI_COMMAND, reg);
} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) {
-
dev_err(&pdev->dev,
"5706 A1 can only be used in a PCIX bus, aborting\n");
+ rc = -EPERM;
goto err_out_unmap;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 281b1c2e04a7..2acbc73dcd18 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13586,7 +13586,7 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
}
/**
- * bnx2x_get_num_none_def_sbs - return the number of none default SBs
+ * bnx2x_get_num_non_def_sbs - return the number of none default SBs
* @pdev: pci device
* @cnic_cnt: count
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6cd1523ad9e5..542c69822649 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4152,7 +4152,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
/*************************** Credit handling **********************************/
/**
- * atomic_add_ifless - add if the result is less than a given value.
+ * __atomic_add_ifless - add if the result is less than a given value.
*
* @v: pointer of type atomic_t
* @a: the amount to add to v...
@@ -4180,7 +4180,7 @@ static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
}
/**
- * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ * __atomic_dec_ifmoe - dec if the result is more or equal than a given value.
*
* @v: pointer of type atomic_t
* @a: the amount to dec from v...
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index d21f085044cd..27943b0446c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1223,8 +1223,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
goto failed;
/* SR-IOV capability was enabled but there are no VFs*/
- if (iov->total == 0)
+ if (iov->total == 0) {
+ err = -EINVAL;
goto failed;
+ }
iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 3a716c015415..966d5722c5e2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -504,7 +504,6 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
/* VF side vfpf channel functions */
int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
int bnx2x_vfpf_release(struct bnx2x *bp);
-int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index cb97ec56fdec..2b8ae687b3c1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2985844634c8..f56245eeef7b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -49,6 +49,8 @@
#include <linux/log2.h>
#include <linux/aer.h>
#include <linux/bitmap.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
@@ -63,6 +65,7 @@
#include "bnxt_ethtool.h"
#include "bnxt_dcb.h"
#include "bnxt_xdp.h"
+#include "bnxt_ptp.h"
#include "bnxt_vfr.h"
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
@@ -282,7 +285,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
- idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
+ idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
+ idx == NETXTREME_E_P5_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -417,12 +421,25 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
- if (unlikely(skb->no_fcs)) {
- lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
- goto normal_tx;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
+ atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
+ if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else {
+ atomic_inc(&bp->ptp_cfg->tx_avail);
+ }
+ }
}
- if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+ if (unlikely(skb->no_fcs))
+ lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
+
+ if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
+ !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -589,6 +606,8 @@ normal_tx:
netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
/* Sync BD data before updating doorbell */
wmb();
@@ -618,6 +637,9 @@ tx_done:
return NETDEV_TX_OK;
tx_dma_error:
+ if (BNXT_TX_PTP_IS_SET(lflags))
+ atomic_inc(&bp->ptp_cfg->tx_avail);
+
last_frag = i;
/* start back at beginning and unmap skb */
@@ -652,6 +674,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
+ bool compl_deferred = false;
struct sk_buff *skb;
int j, last;
@@ -678,12 +701,21 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb_frag_size(&skb_shinfo(skb)->frags[j]),
PCI_DMA_TODEVICE);
}
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (!bnxt_get_tx_ts_p5(bp, skb))
+ compl_deferred = true;
+ else
+ atomic_inc(&bp->ptp_cfg->tx_avail);
+ }
+ }
next_tx_int:
cons = NEXT_TX(cons);
tx_bytes += skb->len;
- dev_kfree_skb_any(skb);
+ if (!compl_deferred)
+ dev_kfree_skb_any(skb);
}
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
@@ -1705,9 +1737,9 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u8 *data_ptr, agg_bufs, cmp_type;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ u32 flags, misc;
void *data;
int rc = 0;
- u32 misc;
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -1805,7 +1837,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
goto next_rx_no_len;
}
- len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
+ len = flags >> RX_CMP_LEN_SHIFT;
dma_addr = rx_buf->mapping;
if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
@@ -1882,6 +1915,24 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
+ if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
+ RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+ u64 ns, ts;
+
+ if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+ memset(skb_hwtstamps(skb), 0,
+ sizeof(*skb_hwtstamps(skb)));
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+ }
+ }
+ }
bnxt_deliver_skb(bp, bnapi, skb);
rc = 1;
@@ -2183,6 +2234,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
bnxt_async_event_process(bp,
(struct hwrm_async_event_cmpl *)txcmp);
+ break;
default:
break;
@@ -6932,17 +6984,10 @@ ctx_err:
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
__le64 *pg_dir)
{
- u8 pg_size = 0;
-
if (!rmem->nr_pages)
return;
- if (BNXT_PAGE_SHIFT == 13)
- pg_size = 1 << 4;
- else if (BNXT_PAGE_SIZE == 16)
- pg_size = 2 << 4;
-
- *pg_attr = pg_size;
+ BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= 2;
@@ -7314,7 +7359,7 @@ skip_rdma:
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
- entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
+ entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
@@ -7397,6 +7442,56 @@ hwrm_func_resc_qcaps_exit:
return rc;
}
+/* bp->hwrm_cmd_lock already held. */
+static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+{
+ struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_mac_ptp_qcfg_input req = {0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u8 flags;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10801) {
+ rc = -ENODEV;
+ goto no_ptp;
+ }
+
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto no_ptp;
+
+ flags = resp->flags;
+ if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
+ rc = -ENODEV;
+ goto no_ptp;
+ }
+ if (!ptp) {
+ ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+ if (!ptp)
+ return -ENOMEM;
+ ptp->bp = bp;
+ bp->ptp_cfg = ptp;
+ }
+ if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
+ ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
+ ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
+ } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
+ ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
+ } else {
+ rc = -ENODEV;
+ goto no_ptp;
+ }
+ return 0;
+
+no_ptp:
+ kfree(ptp);
+ bp->ptp_cfg = NULL;
+ return rc;
+}
+
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
@@ -7468,6 +7563,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED)
+ __bnxt_hwrm_ptp_qcfg(bp);
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -10026,6 +10123,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
+ bnxt_ptp_start(bp);
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
@@ -10341,6 +10439,12 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
mdio->val_in);
+ case SIOCSHWTSTAMP:
+ return bnxt_hwtstamp_set(dev, ifr);
+
+ case SIOCGHWTSTAMP:
+ return bnxt_hwtstamp_get(dev, ifr);
+
default:
/* do nothing */
break;
@@ -10785,37 +10889,125 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
+ u8 **nextp)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+ int hdr_count = 0;
+ u8 *nexthdr;
+ int start;
+
+ /* Check that there are at most 2 IPv6 extension headers, no
+ * fragment header, and each is <= 64 bytes.
+ */
+ start = nw_off + sizeof(*ip6h);
+ nexthdr = &ip6h->nexthdr;
+ while (ipv6_ext_hdr(*nexthdr)) {
+ struct ipv6_opt_hdr *hp;
+ int hdrlen;
+
+ if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
+ *nexthdr == NEXTHDR_FRAGMENT)
+ return false;
+ hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
+ skb_headlen(skb), NULL);
+ if (!hp)
+ return false;
+ if (*nexthdr == NEXTHDR_AUTH)
+ hdrlen = ipv6_authlen(hp);
+ else
+ hdrlen = ipv6_optlen(hp);
+
+ if (hdrlen > 64)
+ return false;
+ nexthdr = &hp->nexthdr;
+ start += hdrlen;
+ hdr_count++;
+ }
+ if (nextp) {
+ /* Caller will check inner protocol */
+ if (skb->encapsulation) {
+ *nextp = nexthdr;
+ return true;
+ }
+ *nextp = NULL;
+ }
+ /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
+ return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
+}
+
+/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
+static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ __be16 udp_port = uh->dest;
+
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ return false;
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ struct ethhdr *eh = inner_eth_hdr(skb);
+
+ switch (eh->h_proto) {
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ return bnxt_exthdr_check(bp, skb,
+ skb_inner_network_offset(skb),
+ NULL);
+ }
+ }
+ return false;
+}
+
+static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
+{
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ return bnxt_udp_tunl_check(bp, skb);
+ case IPPROTO_IPIP:
+ return true;
+ case IPPROTO_GRE: {
+ switch (skb->inner_protocol) {
+ default:
+ return false;
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ fallthrough;
+ }
+ }
+ case IPPROTO_IPV6:
+ /* Check ext headers of inner ipv6 */
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
+ }
+ return false;
+}
+
static netdev_features_t bnxt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- struct bnxt *bp;
- __be16 udp_port;
- u8 l4_proto = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u8 *l4_proto;
features = vlan_features_check(skb, features);
- if (!skb->encapsulation)
- return features;
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
- l4_proto = ip_hdr(skb)->protocol;
+ if (!skb->encapsulation)
+ return features;
+ l4_proto = &ip_hdr(skb)->protocol;
+ if (bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
case htons(ETH_P_IPV6):
- l4_proto = ipv6_hdr(skb)->nexthdr;
+ if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
+ &l4_proto))
+ break;
+ if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
- default:
- return features;
}
-
- if (l4_proto != IPPROTO_UDP)
- return features;
-
- bp = netdev_priv(dev);
- /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
- udp_port = udp_hdr(skb)->dest;
- if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
- return features;
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
@@ -11668,6 +11860,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
bnxt_hwrm_coal_params_qcaps(bp);
}
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
+
static int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
@@ -11682,6 +11876,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
netdev_err(bp->dev, "Firmware init phase 2 failed\n");
return rc;
}
+ rc = bnxt_probe_phy(bp, false);
+ if (rc)
+ return rc;
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
if (rc)
return rc;
@@ -12464,6 +12661,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
+
+ bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
@@ -12484,6 +12683,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
@@ -13045,6 +13246,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc);
}
+ if (bnxt_ptp_init(bp)) {
+ netdev_warn(dev, "PTP initialization failed.\n");
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
bnxt_inv_fw_health_reg(bp);
bnxt_dl_register(bp);
@@ -13073,6 +13279,9 @@ init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_ethtool_free(bp);
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 98e0cef4532c..bcf8d00b8c80 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -89,6 +89,8 @@ struct tx_bd_ext {
#define TX_BD_CFA_META_KEY_VLAN (1 << 28)
};
+#define BNXT_TX_PTP_IS_SET(lflags) ((lflags) & cpu_to_le32(TX_BD_FLAGS_STAMP))
+
struct rx_bd {
__le32 rx_bd_len_flags_type;
#define RX_BD_TYPE (0x3f << 0)
@@ -159,6 +161,7 @@ struct rx_cmp {
#define RX_CMP_FLAGS_RSS_VALID (1 << 10)
#define RX_CMP_FLAGS_UNUSED (1 << 11)
#define RX_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_CMP_FLAGS_ITYPES_MASK 0xf000
#define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
#define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
#define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
@@ -240,7 +243,7 @@ struct rx_cmp_ext {
#define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
#define RX_CMPL_CFA_CODE_SFT 16
- __le32 rx_cmp_unused3;
+ __le32 rx_cmp_timestamp;
};
#define RX_CMP_L2_ERRORS \
@@ -1362,6 +1365,9 @@ struct bnxt_test_info {
#define BNXT_GRC_REG_CHIP_NUM 0x48
#define BNXT_GRC_REG_BASE 0x260000
+#define BNXT_TS_REG_TIMESYNC_TS0_LOWER 0x640180c
+#define BNXT_TS_REG_TIMESYNC_TS0_UPPER 0x6401810
+
#define BNXT_GRC_BASE_MASK 0xfffff000
#define BNXT_GRC_OFFSET_MASK 0x00000ffc
@@ -1457,6 +1463,16 @@ struct bnxt_ctx_pg_info {
#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNXT_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNXT_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNXT_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -2032,6 +2048,8 @@ struct bnxt {
struct bpf_prog *xdp_prog;
+ struct bnxt_ptp_cfg *ptp_cfg;
+
/* devlink interface and vf-rep structs */
struct devlink *dl;
struct devlink_port dl_port;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index c664ec52ebcf..786ca51e669b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -19,9 +19,13 @@
#include <linux/firmware.h>
#include <linux/utsname.h>
#include <linux/time.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
+#include "bnxt_ptp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
@@ -3926,6 +3930,35 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
return 0;
}
+static int bnxt_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ptp_cfg *ptp;
+
+ ptp = bp->ptp_cfg;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ info->phc_index = -1;
+ if (!ptp)
+ return 0;
+
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ if (ptp->ptp_clock)
+ info->phc_index = ptp_clock_index(ptp->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+ return 0;
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
@@ -4172,6 +4205,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
+ .get_ts_info = bnxt_get_ts_info,
.reset = bnxt_reset,
.set_dump = bnxt_set_dump,
.get_dump_flag = bnxt_get_dump_flag,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 6199f125bc13..3fc6781c5b98 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -189,6 +189,8 @@ struct cmd_nums {
#define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
#define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
#define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
+ #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
+ #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -250,6 +252,8 @@ struct cmd_nums {
#define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
#define HWRM_FW_STATE_UNQUIESCE 0xd8UL
#define HWRM_PORT_DSC_DUMP 0xd9UL
+ #define HWRM_PORT_EP_TX_QCFG 0xdaUL
+ #define HWRM_PORT_EP_TX_CFG 0xdbUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
@@ -305,6 +309,8 @@ struct cmd_nums {
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_CFA_TFLIB 0x125UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -356,6 +362,12 @@ struct cmd_nums {
#define HWRM_STAT_EXT_CTX_QUERY 0x199UL
#define HWRM_FUNC_SPD_CFG 0x19aUL
#define HWRM_FUNC_SPD_QCFG 0x19bUL
+ #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
+ #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
+ #define HWRM_FUNC_PTP_CFG 0x19eUL
+ #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
+ #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
+ #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -373,6 +385,10 @@ struct cmd_nums {
#define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
#define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
#define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
+ #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
+ #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
+ #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
+ #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -385,6 +401,7 @@ struct cmd_nums {
#define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
#define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
#define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
#define HWRM_TF_TBL_TYPE_GET 0x2daUL
#define HWRM_TF_TBL_TYPE_SET 0x2dbUL
#define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
@@ -399,6 +416,7 @@ struct cmd_nums {
#define HWRM_TF_EM_INSERT 0x2eaUL
#define HWRM_TF_EM_DELETE 0x2ebUL
#define HWRM_TF_EM_HASH_INSERT 0x2ecUL
+ #define HWRM_TF_EM_MOVE 0x2edUL
#define HWRM_TF_TCAM_SET 0x2f8UL
#define HWRM_TF_TCAM_GET 0x2f9UL
#define HWRM_TF_TCAM_MOVE 0x2faUL
@@ -427,6 +445,16 @@ struct cmd_nums {
#define HWRM_DBG_QCAPS 0xff20UL
#define HWRM_DBG_QCFG 0xff21UL
#define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_DBG_USEQ_ALLOC 0xff23UL
+ #define HWRM_DBG_USEQ_FREE 0xff24UL
+ #define HWRM_DBG_USEQ_FLUSH 0xff25UL
+ #define HWRM_DBG_USEQ_QCAPS 0xff26UL
+ #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
+ #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
+ #define HWRM_DBG_USEQ_RUN 0xff29UL
+ #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
+ #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_NVM_DEFRAG 0xffecUL
#define HWRM_NVM_REQ_ARBITRATION 0xffedUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
@@ -471,6 +499,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_BUSY 0x10UL
#define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
+ #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -502,8 +531,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 16
-#define HWRM_VERSION_STR "1.10.2.16"
+#define HWRM_VERSION_RSVD 47
+#define HWRM_VERSION_STR "1.10.2.47"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -604,7 +633,8 @@ struct hwrm_ver_get_output {
__le16 roce_fw_build;
__le16 roce_fw_patch;
__le16 max_ext_req_len;
- u8 unused_1[5];
+ __le16 max_req_timeout;
+ u8 unused_1[3];
u8 valid;
};
@@ -725,7 +755,10 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_MASTER 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x46UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -919,6 +952,8 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
__le32 event_data2;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
u8 opaque_v;
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
@@ -1074,6 +1109,223 @@ struct hwrm_async_event_cmpl_echo_request {
__le32 event_data1;
};
+/* hwrm_async_event_cmpl_phc_master (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_master {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_LAST ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1302,7 +1554,7 @@ struct hwrm_func_qcaps_output {
__le32 max_flow_id;
__le32 max_hw_ring_grps;
__le16 max_sp_tx_rings;
- u8 unused_0[2];
+ __le16 max_msix_vfs;
__le32 flags_ext;
#define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
@@ -1320,6 +1572,14 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1342,7 +1602,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:768b/96B) */
+/* hwrm_func_qcfg_output (size:832b/104B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1366,6 +1626,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
#define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
+ #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1374,7 +1635,7 @@ struct hwrm_func_qcfg_output {
__le16 alloc_rx_rings;
__le16 alloc_l2_ctx;
__le16 alloc_vnics;
- __le16 mtu;
+ __le16 admin_mtu;
__le16 mru;
__le16 stat_ctx_id;
u8 port_partition_type;
@@ -1383,6 +1644,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
u8 port_pf_cnt;
@@ -1463,11 +1725,35 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
#define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
#define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
- u8 unused_2[6];
+ u8 unused_2[3];
+ __le32 partition_min_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le16 host_mtu;
+ u8 unused_3;
u8 valid;
};
-/* hwrm_func_cfg_input (size:768b/96B) */
+/* hwrm_func_cfg_input (size:832b/104B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1504,7 +1790,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
__le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
@@ -1530,7 +1816,11 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
#define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
#define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
- __le16 mtu;
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
+ #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
+ #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ __le16 admin_mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
__le16 num_cmpl_rings;
@@ -1615,7 +1905,30 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
#define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
#define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
- u8 unused_0[4];
+ __le32 partition_min_bw;
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __be16 tpid;
+ __le16 host_mtu;
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -1777,14 +2090,15 @@ struct hwrm_func_drv_rgtr_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -2047,7 +2361,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:704b/88B) */
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2085,6 +2399,8 @@ struct hwrm_func_backing_store_qcaps_output {
#define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
#define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
#define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
u8 qp_init_offset;
u8 srq_init_offset;
u8 cq_init_offset;
@@ -2093,7 +2409,13 @@ struct hwrm_func_backing_store_qcaps_output {
u8 stat_init_offset;
u8 mrav_init_offset;
u8 tqm_fp_rings_count_ext;
- u8 rsvd[5];
+ u8 tkc_init_offset;
+ u8 rkc_init_offset;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ __le32 tkc_max_entries;
+ __le32 rkc_max_entries;
+ u8 rsvd[7];
u8 valid;
};
@@ -2120,7 +2442,7 @@ struct tqm_fp_ring_cfg {
__le64 tqm_ring_page_dir;
};
-/* hwrm_func_backing_store_cfg_input (size:2432b/304B) */
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
struct hwrm_func_backing_store_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2150,6 +2472,8 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
u8 qpc_pg_size_qpc_lvl;
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
@@ -2508,6 +2832,45 @@ struct hwrm_func_backing_store_cfg_input {
u8 ring10_unused[3];
__le32 tqm_ring10_num_entries;
__le64 tqm_ring10_page_dir;
+ __le32 tkc_num_entries;
+ __le32 rkc_num_entries;
+ __le64 tkc_page_dir;
+ __le64 rkc_page_dir;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ u8 tkc_pg_size_tkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+ u8 rkc_pg_size_rkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+ u8 rsvd[2];
};
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -2634,6 +2997,212 @@ struct hwrm_func_echo_response_output {
u8 valid;
};
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_pins;
+ u8 state;
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
+ u8 pin0_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+ u8 pin1_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+ u8 pin2_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT
+ u8 pin3_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:320b/40B) */
+struct hwrm_func_ptp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ u8 ptp_pps_event;
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
+ u8 ptp_freq_adj_dll_source;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+ u8 ptp_freq_adj_dll_phase;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ext_period;
+ __le32 ptp_freq_adj_ext_up;
+ __le32 ptp_freq_adj_ext_phase_lower;
+ __le32 ptp_freq_adj_ext_phase_upper;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 pps_event_ts;
+ __le64 ptm_res_local_ts;
+ __le64 ptm_pmstr_ts;
+ __le32 ptm_mstr_prop_dly;
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -3156,6 +3725,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -3188,8 +3758,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
- __s32 ptp_freq_adj_ppb;
- u8 unused_1[4];
+ __le32 ptp_freq_adj_ppb;
+ __le32 ptp_adj_phase;
};
/* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -3221,16 +3791,17 @@ struct hwrm_port_mac_ptp_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
struct hwrm_port_mac_ptp_qcfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -3247,6 +3818,8 @@ struct hwrm_port_mac_ptp_qcfg_output {
__le32 tx_ts_reg_off_seq_id;
__le32 tx_ts_reg_off_fifo;
__le32 tx_ts_reg_off_granularity;
+ __le32 ts_ref_clock_reg_lower;
+ __le32 ts_ref_clock_reg_upper;
u8 unused_1[7];
u8 valid;
};
@@ -3647,7 +4220,7 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid;
};
-/* hwrm_port_ts_query_input (size:192b/24B) */
+/* hwrm_port_ts_query_input (size:256b/32B) */
struct hwrm_port_ts_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3662,6 +4235,11 @@ struct hwrm_port_ts_query_input {
#define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
__le16 port_id;
u8 unused_0[2];
+ __le16 enables;
+ #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ __le16 ts_req_timeout;
+ __le32 ptp_seq_id;
};
/* hwrm_port_ts_query_output (size:192b/24B) */
@@ -4215,7 +4793,8 @@ struct hwrm_queue_qportcfg_output {
u8 max_configurable_lossless_queues;
u8 queue_cfg_allowed;
u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
u8 queue_pfcenable_cfg_allowed;
u8 queue_pri2cos_cfg_allowed;
u8 queue_cos2bw_cfg_allowed;
@@ -5467,6 +6046,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
#define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
#define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -7224,6 +7804,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
u8 unused_0[3];
u8 valid;
};
@@ -7914,11 +8495,14 @@ struct hwrm_temp_monitor_query_output {
u8 phy_temp;
u8 om_temp;
u8 flags;
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
- u8 unused_0[3];
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ u8 temp2;
+ u8 phy_temp2;
+ u8 om_temp2;
u8 valid;
};
@@ -8109,6 +8693,7 @@ struct hwrm_dbg_qcaps_output {
#define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
#define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
#define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
u8 unused_1[3];
u8 valid;
};
@@ -8632,10 +9217,11 @@ struct hwrm_nvm_install_update_output {
/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK
u8 unused_0[7];
};
@@ -8876,6 +9462,7 @@ struct fw_status_reg {
#define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
#define FW_STATUS_REG_SHUTDOWN 0x100000UL
#define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+ #define FW_STATUS_REG_RECOVERING 0x400000UL
};
/* hcomm_status (size:64b/8B) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
new file mode 100644
index 000000000000..f698b6bd4ff8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -0,0 +1,473 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#include <linux/timekeeping.h>
+#include <linux/ptp_classify.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ptp.h"
+
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+
+ ptp_class = ptp_classify_raw(skb);
+
+ switch (ptp_class & PTP_CLASS_VMASK) {
+ case PTP_CLASS_V1:
+ case PTP_CLASS_V2:
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ *seq_id = ntohs(hdr->sequence_id);
+ return 0;
+ default:
+ return -ERANGE;
+ }
+}
+
+static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ u64 ns = timespec64_to_ns(ts);
+
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ spin_unlock_bh(&ptp->ptp_lock);
+ return 0;
+}
+
+/* Caller holds ptp_lock */
+static u64 bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u64 ns;
+
+ ptp_read_system_prets(sts);
+ ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+ return ns;
+}
+
+static void bnxt_ptp_get_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return;
+ spin_lock_bh(&ptp->ptp_lock);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+ ptp->current_time = bnxt_refclk_read(bp, NULL);
+ spin_unlock_bh(&ptp->ptp_lock);
+}
+
+static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
+{
+ struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_ts_query_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1);
+ req.flags = cpu_to_le32(flags);
+ if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
+ PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+ req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
+ req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+ req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+ }
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *ts = le64_to_cpu(resp->ptp_msg_ts);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ u64 ns, cycles;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ cycles = bnxt_refclk_read(ptp->bp, sts);
+ ns = timecounter_cyc2time(&ptp->tc, cycles);
+ spin_unlock_bh(&ptp->ptp_lock);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_adjtime(&ptp->tc, delta);
+ spin_unlock_bh(&ptp->ptp_lock);
+ return 0;
+}
+
+static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ struct hwrm_port_mac_cfg_input req = {0};
+ struct bnxt *bp = ptp->bp;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+ req.ptp_freq_adj_ppb = cpu_to_le32(ppb);
+ req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_err(ptp->bp->dev,
+ "ptp adjfreq failed. rc = %d\n", rc);
+ return rc;
+}
+
+static int bnxt_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+{
+ struct hwrm_port_mac_cfg_input req = {0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 flags = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+ if (ptp->rx_filter)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ else
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (ptp->tx_tstamp_en)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+ else
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+ struct bnxt_ptp_cfg *ptp;
+ u16 old_rxctl;
+ int old_rx_filter, rc;
+ u8 old_tx_tstamp_en;
+
+ ptp = bp->ptp_cfg;
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
+ return -EFAULT;
+
+ if (stmpconf.flags)
+ return -EINVAL;
+
+ if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
+ stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ return -ERANGE;
+
+ old_rx_filter = ptp->rx_filter;
+ old_rxctl = ptp->rxctl;
+ old_tx_tstamp_en = ptp->tx_tstamp_en;
+ switch (stmpconf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ptp->rxctl = 0;
+ ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ ptp->rxctl = BNXT_PTP_MSG_EVENTS;
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ ptp->rxctl = BNXT_PTP_MSG_SYNC;
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp->rxctl = BNXT_PTP_MSG_DELAY_REQ;
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ ptp->tx_tstamp_en = 1;
+ else
+ ptp->tx_tstamp_en = 0;
+
+ rc = bnxt_hwrm_ptp_cfg(bp);
+ if (rc)
+ goto ts_set_err;
+
+ stmpconf.rx_filter = ptp->rx_filter;
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+
+ts_set_err:
+ ptp->rx_filter = old_rx_filter;
+ ptp->rxctl = old_rxctl;
+ ptp->tx_tstamp_en = old_tx_tstamp_en;
+ return rc;
+}
+
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+ struct bnxt_ptp_cfg *ptp;
+
+ ptp = bp->ptp_cfg;
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ stmpconf.flags = 0;
+ stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ stmpconf.rx_filter = ptp->rx_filter;
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+}
+
+static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
+{
+ u32 reg_base = *reg_arr & BNXT_GRC_BASE_MASK;
+ u32 win_off;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((reg_arr[i] & BNXT_GRC_BASE_MASK) != reg_base)
+ return -ERANGE;
+ }
+ win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
+ writel(reg_base, bp->bar0 + win_off);
+ return 0;
+}
+
+static int bnxt_map_ptp_regs(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 *reg_arr;
+ int rc, i;
+
+ reg_arr = ptp->refclk_regs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN);
+ if (rc)
+ return rc;
+ for (i = 0; i < 2; i++)
+ ptp->refclk_mapped_regs[i] = BNXT_PTP_GRC_WIN_BASE +
+ (ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static void bnxt_unmap_ptp_regs(struct bnxt *bp)
+{
+ writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+ (BNXT_PTP_GRC_WIN - 1) * 4);
+}
+
+static u64 bnxt_cc_read(const struct cyclecounter *cc)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
+
+ return bnxt_refclk_read(ptp->bp, NULL);
+}
+
+static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct skb_shared_hwtstamps timestamp;
+ u64 ts = 0, ns = 0;
+ int rc;
+
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
+ if (!rc) {
+ memset(&timestamp, 0, sizeof(timestamp));
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+ timestamp.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(ptp->tx_skb, &timestamp);
+ } else {
+ netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n",
+ rc);
+ }
+
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ atomic_inc(&ptp->tx_avail);
+}
+
+static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ unsigned long now = jiffies;
+ struct bnxt *bp = ptp->bp;
+
+ if (ptp->tx_skb)
+ bnxt_stamp_tx_skb(bp, ptp->tx_skb);
+
+ if (!time_after_eq(now, ptp->next_period))
+ return ptp->next_period - now;
+
+ bnxt_ptp_get_current_time(bp);
+ ptp->next_period = now + HZ;
+ return HZ;
+}
+
+int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp->tx_skb) {
+ netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
+ return -EBUSY;
+ }
+ ptp->tx_skb = skb;
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ return 0;
+}
+
+int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u64 time;
+
+ if (!ptp)
+ return -ENODEV;
+
+ BNXT_READ_TIME64(ptp, time, ptp->old_time);
+ *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts;
+ if (pkt_ts < (time & BNXT_LO_TIMER_MASK))
+ *ts += BNXT_LO_TIMER_MASK + 1;
+
+ return 0;
+}
+
+void bnxt_ptp_start(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ spin_lock_bh(&ptp->ptp_lock);
+ ptp->current_time = bnxt_refclk_read(bp, NULL);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+ spin_unlock_bh(&ptp->ptp_lock);
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ }
+}
+
+static const struct ptp_clock_info bnxt_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "bnxt clock",
+ .max_adj = BNXT_MAX_PHC_DRIFT,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = bnxt_ptp_adjfreq,
+ .adjtime = bnxt_ptp_adjtime,
+ .do_aux_work = bnxt_ptp_ts_aux_work,
+ .gettimex64 = bnxt_ptp_gettimex,
+ .settime64 = bnxt_ptp_settime,
+ .enable = bnxt_ptp_enable,
+};
+
+int bnxt_ptp_init(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ int rc;
+
+ if (!ptp)
+ return 0;
+
+ rc = bnxt_map_ptp_regs(bp);
+ if (rc)
+ return rc;
+
+ atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
+ spin_lock_init(&ptp->ptp_lock);
+
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = bnxt_cc_read;
+ ptp->cc.mask = CYCLECOUNTER_MASK(48);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+ ptp->ptp_info = bnxt_ptp_caps;
+ ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev);
+ if (IS_ERR(ptp->ptp_clock)) {
+ int err = PTR_ERR(ptp->ptp_clock);
+
+ ptp->ptp_clock = NULL;
+ bnxt_unmap_ptp_regs(bp);
+ return err;
+ }
+
+ return 0;
+}
+
+void bnxt_ptp_clear(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return;
+
+ if (ptp->ptp_clock)
+ ptp_clock_unregister(ptp->ptp_clock);
+
+ ptp->ptp_clock = NULL;
+ if (ptp->tx_skb) {
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ }
+ bnxt_unmap_ptp_regs(bp);
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
new file mode 100644
index 000000000000..6b6245750e20
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -0,0 +1,81 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_PTP_H
+#define BNXT_PTP_H
+
+#define BNXT_PTP_GRC_WIN 5
+#define BNXT_PTP_GRC_WIN_BASE 0x5000
+
+#define BNXT_MAX_PHC_DRIFT 31000000
+#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
+#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+
+#define BNXT_PTP_QTS_TIMEOUT 1000
+#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
+ PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
+
+struct bnxt_ptp_cfg {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ /* serialize timecounter access */
+ spinlock_t ptp_lock;
+ struct sk_buff *tx_skb;
+ u64 current_time;
+ u64 old_time;
+ unsigned long next_period;
+ u16 tx_seqid;
+ struct bnxt *bp;
+ atomic_t tx_avail;
+#define BNXT_MAX_TX_TS 1
+ u16 rxctl;
+#define BNXT_PTP_MSG_SYNC (1 << 0)
+#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
+#define BNXT_PTP_MSG_PDELAY_REQ (1 << 2)
+#define BNXT_PTP_MSG_PDELAY_RESP (1 << 3)
+#define BNXT_PTP_MSG_FOLLOW_UP (1 << 8)
+#define BNXT_PTP_MSG_DELAY_RESP (1 << 9)
+#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP (1 << 10)
+#define BNXT_PTP_MSG_ANNOUNCE (1 << 11)
+#define BNXT_PTP_MSG_SIGNALING (1 << 12)
+#define BNXT_PTP_MSG_MANAGEMENT (1 << 13)
+#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \
+ BNXT_PTP_MSG_DELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_RESP)
+ u8 tx_tstamp_en:1;
+ int rx_filter;
+
+ u32 refclk_regs[2];
+ u32 refclk_mapped_regs[2];
+};
+
+#if BITS_PER_LONG == 32
+#define BNXT_READ_TIME64(ptp, dst, src) \
+do { \
+ spin_lock_bh(&(ptp)->ptp_lock); \
+ (dst) = (src); \
+ spin_unlock_bh(&(ptp)->ptp_lock); \
+} while (0)
+#else
+#define BNXT_READ_TIME64(ptp, dst, src) \
+ ((dst) = READ_ONCE(src))
+#endif
+
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
+int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+void bnxt_ptp_start(struct bnxt *bp);
+int bnxt_ptp_init(struct bnxt *bp);
+void bnxt_ptp_clear(struct bnxt *bp);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index eb00a219aa51..7fa881e1cd80 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -632,7 +632,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
FUNC_CFG_REQ_ENABLES_MRU |
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
@@ -645,7 +645,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
req.mru = cpu_to_le16(mtu);
- req.mtu = cpu_to_le16(mtu);
+ req.admin_mtu = cpu_to_le16(mtu);
req.num_rsscos_ctxs = cpu_to_le16(1);
req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index ec9564e584e0..bee6e091a997 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -138,9 +138,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
- rcu_read_lock();
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- rcu_read_unlock();
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fcca023f22e5..41f7f078cd27 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -4296,3 +4296,4 @@ MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
MODULE_ALIAS("platform:bcmgenet");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: mdio-bcm-unimac");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5335244e4577..89d16c587bb7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -423,6 +423,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
int id, ret;
pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pres) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
memset(&res, 0, sizeof(res));
memset(&ppd, 0, sizeof(ppd));
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 06f221c44802..eeb05e31713f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -82,7 +82,7 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
}
/**
- * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w
+ * bfa_cee_get_stats_isr - CEE ISR for get-stats responses from f/w
*
* @cee: Pointer to the CEE module
* @status: Return status from the f/w
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6bc7d41d519b..7d2fe13a52f8 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2867,6 +2867,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
struct gem_stats *hwstat = &bp->hw_stats.gem;
struct net_device_stats *nstat = &bp->dev->stats;
+ if (!netif_running(bp->dev))
+ return nstat;
+
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
@@ -4652,8 +4655,7 @@ static int macb_probe(struct platform_device *pdev)
struct macb *bp;
int err, val;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(&pdev->dev, regs);
+ mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index 353393dea639..8b7b59908a1a 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* DOC: Cadence GEM PCI wrapper.
*
* Copyright (C) 2016 Cadence Design Systems - https://www.cadence.com
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 283918aeb741..5c368a9cbbbc 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* 1588 PTP support for Cadence GEM device.
*
* Copyright (C) 2017 Cadence Design Systems - https://www.cadence.com
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index bbb453c6a5f7..b6a066404f4b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -711,7 +711,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
}
/**
- * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
+ * xgmac_dma_desc_rings_init - init the RX/TX descriptor rings
* @dev: net device structure
* Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers.
@@ -859,7 +859,7 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
}
/**
- * xgmac_tx:
+ * xgmac_tx_complete:
* @priv: private driver structure
* Description: it reclaims resources after transmission completes.
*/
@@ -1040,7 +1040,7 @@ static int xgmac_open(struct net_device *dev)
}
/**
- * xgmac_release - close entry point of the driver
+ * xgmac_stop - close entry point of the driver
* @dev : device pointer.
* Description:
* This is the stop entry point of the driver.
@@ -1812,7 +1812,7 @@ err_alloc:
}
/**
- * xgmac_dvr_remove
+ * xgmac_remove
* @pdev: platform device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
* changes the link status, releases the DMA descriptor rings,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c33b4e837515..e2b290135fd9 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -555,9 +555,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
orig_data = xdp.data;
- rcu_read_lock();
action = bpf_prog_run_xdp(prog, &xdp);
- rcu_read_unlock();
len = xdp.data_end - xdp.data;
/* Check if XDP program has changed headers */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 0c783aadf393..c36fed9c3d73 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -594,9 +594,6 @@ static void bgx_lmac_handler(struct net_device *netdev)
struct phy_device *phydev;
int link_changed = 0;
- if (!lmac)
- return;
-
phydev = lmac->phydev;
if (!phydev->link && lmac->last_link)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index f80fbd81b609..6d682b7c7aac 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -178,7 +178,7 @@ struct sge_txq { /* state for an SGE Tx queue */
unsigned int token; /* WR token */
dma_addr_t phys_addr; /* physical address of the ring */
struct sk_buff_head sendq; /* List of backpressured offload packets */
- struct tasklet_struct qresume_tsk; /* restarts the queue */
+ struct work_struct qresume_task; /* restarts the queue */
unsigned int cntxt_id; /* SGE context id for the Tx q */
unsigned long stops; /* # of times q has been stopped */
unsigned long restarts; /* # of queue restarts */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index 1bd7d89666c4..b706f2fbe4f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -770,4 +770,6 @@ int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
+
+extern struct workqueue_struct *cxgb3_wq;
#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 84ad7261e243..57f210c53afc 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1273,14 +1273,14 @@ static int cxgb_up(struct adapter *adap)
free_irq(adap->msix_info[0].vec, adap);
goto irq_err;
}
- } else if ((err = request_irq(adap->pdev->irq,
- t3_intr_handler(adap,
- adap->sge.qs[0].rspq.
- polling),
- (adap->flags & USING_MSI) ?
- 0 : IRQF_SHARED,
- adap->name, adap)))
- goto irq_err;
+ } else {
+ err = request_irq(adap->pdev->irq,
+ t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
+ (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ adap->name, adap);
+ if (err)
+ goto irq_err;
+ }
enable_all_napi(adap);
t3_sge_start(adap);
@@ -3098,8 +3098,9 @@ static void set_nqsets(struct adapter *adap)
nqsets = num_cpus;
if (nqsets < 1 || hwports == 4)
nqsets = 1;
- } else
+ } else {
nqsets = 1;
+ }
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 1cc3c51eff71..cb5c79c43bc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -665,7 +665,7 @@ static void t3_reset_qset(struct sge_qset *q)
/**
- * free_qset - free the resources of an SGE queue set
+ * t3_free_qset - free the resources of an SGE queue set
* @adapter: the adapter owning the queue set
* @q: the queue set
*
@@ -1256,7 +1256,7 @@ static inline void t3_stop_tx_queue(struct netdev_queue *txq,
}
/**
- * eth_xmit - add a packet to the Ethernet Tx queue
+ * t3_eth_xmit - add a packet to the Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
*
@@ -1518,14 +1518,15 @@ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
/**
* restart_ctrlq - restart a suspended control queue
- * @t: pointer to the tasklet associated with this handler
+ * @w: pointer to the work associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(struct tasklet_struct *t)
+static void restart_ctrlq(struct work_struct *w)
{
struct sk_buff *skb;
- struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk);
+ struct sge_qset *qs = container_of(w, struct sge_qset,
+ txq[TXQ_CTRL].qresume_task);
struct sge_txq *q = &qs->txq[TXQ_CTRL];
spin_lock(&q->lock);
@@ -1736,14 +1737,15 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
/**
* restart_offloadq - restart a suspended offload queue
- * @t: pointer to the tasklet associated with this handler
+ * @w: pointer to the work associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_offloadq(struct tasklet_struct *t)
+static void restart_offloadq(struct work_struct *w)
{
struct sk_buff *skb;
- struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk);
+ struct sge_qset *qs = container_of(w, struct sge_qset,
+ txq[TXQ_OFLD].qresume_task);
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
@@ -1998,13 +2000,17 @@ static void restart_tx(struct sge_qset *qs)
should_restart_tx(&qs->txq[TXQ_OFLD]) &&
test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
qs->txq[TXQ_OFLD].restarts++;
- tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
+
+ /* The work can be quite lengthy so we use driver's own queue */
+ queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
}
if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
should_restart_tx(&qs->txq[TXQ_CTRL]) &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
qs->txq[TXQ_CTRL].restarts++;
- tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
+
+ /* The work can be quite lengthy so we use driver's own queue */
+ queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
}
}
@@ -3085,8 +3091,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
skb_queue_head_init(&q->txq[i].sendq);
}
- tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq);
- tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq);
+ INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
+ INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
q->fl[0].gen = q->fl[1].gen = 1;
q->fl[0].size = p->fl_size;
@@ -3276,11 +3282,11 @@ void t3_sge_start(struct adapter *adap)
*
* Can be invoked from interrupt context e.g. error handler.
*
- * Note that this function cannot disable the restart of tasklets as
+ * Note that this function cannot disable the restart of works as
* it cannot wait if called from interrupt context, however the
- * tasklets will have no effect since the doorbells are disabled. The
+ * works will have no effect since the doorbells are disabled. The
* driver will call tg3_sge_stop() later from process context, at
- * which time the tasklets will be stopped if they are still running.
+ * which time the works will be stopped if they are still running.
*/
void t3_sge_stop_dma(struct adapter *adap)
{
@@ -3292,7 +3298,7 @@ void t3_sge_stop_dma(struct adapter *adap)
* @adap: the adapter
*
* Called from process context. Disables the DMA engine and any
- * pending queue restart tasklets.
+ * pending queue restart works.
*/
void t3_sge_stop(struct adapter *adap)
{
@@ -3303,8 +3309,8 @@ void t3_sge_stop(struct adapter *adap)
for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *qs = &adap->sge.qs[i];
- tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
- tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
+ cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
+ cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
}
}
@@ -3371,7 +3377,7 @@ void t3_sge_prep(struct adapter *adap, struct sge_params *p)
q->coalesce_usecs = 5;
q->rspq_size = 1024;
q->fl_size = 1024;
- q->jumbo_size = 512;
+ q->jumbo_size = 512;
q->txq_size[TXQ_ETH] = 1024;
q->txq_size[TXQ_OFLD] = 1024;
q->txq_size[TXQ_CTRL] = 256;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 12fcf84d67ad..163efab27e9b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -106,8 +106,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
if (!list_empty(&ctbl->ce_free_head)) {
ce = list_first_entry(&ctbl->ce_free_head,
struct clip_entry, list);
- list_del(&ce->list);
- INIT_LIST_HEAD(&ce->list);
+ list_del_init(&ce->list);
spin_lock_init(&ce->lock);
refcount_set(&ce->refcnt, 0);
atomic_dec(&ctbl->nfree);
@@ -179,8 +178,7 @@ found:
write_lock_bh(&ctbl->lock);
spin_lock_bh(&ce->lock);
if (refcount_dec_and_test(&ce->refcnt)) {
- list_del(&ce->list);
- INIT_LIST_HEAD(&ce->list);
+ list_del_init(&ce->list);
list_add_tail(&ce->list, &ctbl->ce_free_head);
atomic_inc(&ctbl->nfree);
if (v6)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 314f8d806723..9058f09f921e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
bool persistent, u8 *smt_idx);
int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
-int cxgb_open(struct net_device *dev);
-int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
int cxgb4_port_mirror_alloc(struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 61ea3ec5c3fc..83ed10ac8660 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
return ret;
}
- spin_lock_bh(&adap->win0_lock);
+ /* We have to RESET the chip/firmware because we need the
+ * chip in uninitialized state for loading new PHY image.
+ * Otherwise, the running firmware will only store the PHY
+ * image in local RAM which will be lost after next reset.
+ */
+ ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev,
+ "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
+ ret);
+ return ret;
+ }
+
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
- spin_unlock_bh(&adap->win0_lock);
- if (ret)
- dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
+ ret);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
u32 ftid)
{
struct tid_info *t = &adap->tids;
- struct filter_entry *f;
- if (ftid < t->nhpftids)
- f = &adap->tids.hpftid_tab[ftid];
- else if (ftid < t->nftids)
- f = &adap->tids.ftid_tab[ftid - t->nhpftids];
- else
- f = lookup_tid(&adap->tids, ftid);
+ if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
+ return &t->hpftid_tab[ftid - t->hpftid_base];
- return f;
+ if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
+ return &t->ftid_tab[ftid - t->ftid_base];
+
+ return lookup_tid(t, ftid);
}
static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
filter_id = filter_info->loc_array[cmd->fs.location];
f = cxgb4_get_filter_entry(adapter, filter_id);
+ if (f->fs.prio)
+ filter_id -= adapter->tids.hpftid_base;
+ else if (!f->fs.hash)
+ filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
if (ret)
goto err;
@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
filter_info = &adapter->ethtool_filters->port[pi->port_id];
+ if (fs.prio)
+ tid += adapter->tids.hpftid_base;
+ else if (!fs.hash)
+ tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
filter_info->loc_array[cmd->fs.location] = tid;
set_bit(cmd->fs.location, filter_info->bmap);
filter_info->in_use++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index bc581b149b11..6260b3bebd2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
WORD_MASK, f->fs.nat_lip[3] |
f->fs.nat_lip[2] << 8 |
f->fs.nat_lip[1] << 16 |
- (u64)f->fs.nat_lip[0] << 25, 1);
+ (u64)f->fs.nat_lip[0] << 24, 1);
}
}
@@ -1042,7 +1042,7 @@ void clear_all_filters(struct adapter *adapter)
cxgb4_del_filter(dev, f->tid, &f->fs);
}
- sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+ sb = adapter->tids.stid_base;
for (i = 0; i < sb; i++) {
f = (struct filter_entry *)adapter->tids.tid_tab[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6264bc66a4fc..9a2b166d651e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
-int cxgb_open(struct net_device *dev)
+static int cxgb_open(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2882,7 +2882,7 @@ out_unlock:
return err;
}
-int cxgb_close(struct net_device *dev)
+static int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -3894,7 +3894,6 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
.ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
.ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
};
-#endif
static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -3909,6 +3908,7 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
.get_drvinfo = cxgb4_mgmt_get_drvinfo,
};
+#endif
static void notify_fatal_err(struct work_struct *work)
{
@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
/* Load PHY Firmware onto adapter.
*/
- spin_lock_bh(&adap->win0_lock);
ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
(u8 *)phyf->data, phyf->size);
- spin_unlock_bh(&adap->win0_lock);
if (ret < 0)
dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
-ret);
@@ -6480,9 +6478,9 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
direction);
- cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
mutex_unlock(&uld_mutex);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 70dbee89118e..5bf117d2179f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -446,7 +446,7 @@ void cxgb4_ptp_init(struct adapter *adapter)
}
/**
- * cxgb4_ptp_remove - disable PTP device and stop the overflow check
+ * cxgb4_ptp_stop - disable PTP device and stop the overflow check
* @adapter: board private structure
*
* Stop the PTP support.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 1b88bd1c2dbe..dd9be229819a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
if (!ch_flower)
return -ENOENT;
+ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+ adap->flower_ht_params);
+
ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
&ch_flower->fs, ch_flower->filter_id);
if (ret)
- goto err;
+ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
+ ch_flower->filter_id, ret);
- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
- adap->flower_ht_params);
- if (ret) {
- netdev_err(dev, "Flow remove from rhashtable failed");
- goto err;
- }
kfree_rcu(ch_flower, rcu);
-
-err:
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 6c259de96f96..338b04f339b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
* down before configuring tc params.
*/
if (netif_running(dev)) {
- cxgb_close(dev);
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
needs_bring_up = true;
}
@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
}
out:
- if (needs_bring_up)
- cxgb_open(dev);
+ if (needs_bring_up) {
+ netif_tx_start_all_queues(dev);
+ netif_carrier_on(dev);
+ }
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1e5f2edb70cf..6a099cb34b12 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
if (!eosw_txq)
return -ENOMEM;
+ if (!(adap->flags & CXGB4_FW_OK)) {
+ /* Don't stall caller when access to FW is lost */
+ complete(&eosw_txq->completion);
+ return -EIO;
+ }
+
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9428ef1f04a8..6606fb8b3e42 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
* @addr: the start address to write
* @n: length of data to write in bytes
* @data: the data to write
+ * @byte_oriented: whether to store data as bytes or as words
*
* Writes up to a page of data (256 bytes) to the serial flash starting
* at the given address. All the data must be written to the same page.
+ * If @byte_oriented is set the write data is stored as byte stream
+ * (i.e. matches what on disk), otherwise in big-endian.
*/
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
- unsigned int n, const u8 *data)
+ unsigned int n, const u8 *data, bool byte_oriented)
{
- int ret;
- u32 buf[64];
unsigned int i, c, left, val, offset = addr & 0xff;
+ u32 buf[64];
+ int ret;
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
return -EINVAL;
@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
goto unlock;
- for (left = n; left; left -= c) {
+ for (left = n; left; left -= c, data += c) {
c = min(left, 4U);
- for (val = 0, i = 0; i < c; ++i)
- val = (val << 8) + *data++;
+ for (val = 0, i = 0; i < c; ++i) {
+ if (byte_oriented)
+ val = (val << 8) + data[i];
+ else
+ val = (val << 8) + data[c - i - 1];
+ }
ret = sf1_write(adapter, c, c != left, 1, val);
if (ret)
@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
/* Read the page to verify the write succeeded */
- ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
+ byte_oriented);
if (ret)
return ret;
@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
- ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
if (ret)
goto out;
@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
if (ret)
goto out;
}
- ret = t4_write_flash(adap,
- fw_start + offsetof(struct fw_hdr, fw_ver),
- sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+ ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
+ true);
out:
if (ret)
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
/* Copy the supplied PHY Firmware image to the adapter memory location
* allocated by the adapter firmware.
*/
+ spin_lock_bh(&adap->win0_lock);
ret = t4_memory_rw(adap, win, mtype, maddr,
phy_fw_size, (__be32 *)phy_fw_data,
T4_MEMORY_WRITE);
+ spin_unlock_bh(&adap->win0_lock);
if (ret)
return ret;
@@ -6983,7 +6993,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox)
}
/**
- * t4_init_cmd - ask FW to initialize the device
+ * t4_early_init - ask FW to initialize the device
* @adap: the adapter
* @mbox: mailbox to use for the FW command
*
@@ -7782,7 +7792,6 @@ int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
int idx, bool sleep_ok)
{
struct fw_vi_mac_exact *p;
- u8 addr[] = {0, 0, 0, 0, 0, 0};
struct fw_vi_mac_cmd c;
int ret = 0;
u32 exact;
@@ -7799,7 +7808,7 @@ int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
p = c.u.exact;
p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
FW_VI_MAC_CMD_IDX_V(idx));
- memcpy(p->macaddr, addr, sizeof(p->macaddr));
+ eth_zero_addr(p->macaddr);
ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
return ret;
}
@@ -10208,7 +10217,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
n = size - i;
else
n = SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, n, cfg_data);
+ ret = t4_write_flash(adap, addr, n, cfg_data, true);
if (ret)
goto out;
@@ -10224,7 +10233,7 @@ out:
}
/**
- * t4_set_vf_mac - Set MAC address for the specified VF
+ * t4_set_vf_mac_acl - Set MAC address for the specified VF
* @adapter: The adapter
* @vf: one of the VFs instantiated by the specified PF
* @naddr: the number of MAC addresses
@@ -10677,13 +10686,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
boot_data += SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
+ false);
if (ret)
goto out;
}
ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
- (const u8 *)header);
+ (const u8 *)header, false);
out:
if (ret)
@@ -10758,7 +10768,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
for (i = 0; i < size; i += SF_PAGE_SIZE) {
n = min_t(u32, size - i, SF_PAGE_SIZE);
- ret = t4_write_flash(adap, addr, n, cfg_data);
+ ret = t4_write_flash(adap, addr, n, cfg_data, false);
if (ret)
goto out;
@@ -10770,7 +10780,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
for (i = 0; i < npad; i++) {
u8 data = 0;
- ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
+ ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
+ false);
if (ret)
goto out;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 95657da0aa4b..7bc80eeb2c21 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -954,7 +954,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
}
/**
- * check_ring_tx_db - check and potentially ring a TX queue's doorbell
+ * ring_tx_db - check and potentially ring a TX queue's doorbell
* @adapter: the adapter
* @tq: the TX queue
* @n: number of new descriptors to give to HW
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index ef3f1e92632f..59683f79959c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -59,6 +59,7 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
}
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+static void clear_conn_resources(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
* @tx_info - driver specific tls info.
@@ -364,10 +365,14 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_uld_ctx *u_ctx;
if (!tx_info)
return;
+ u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx && u_ctx->detach)
+ return;
/* clear l2t entry */
if (tx_info->l2te)
cxgb4_l2t_release(tx_info->l2te);
@@ -384,6 +389,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
if (tx_info->tid != -1) {
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
}
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -411,6 +418,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
struct adapter *adap;
@@ -425,6 +433,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_open);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n");
@@ -434,6 +443,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (tx_ctx->chcr_info)
goto out;
+ if (u_ctx && u_ctx->detach)
+ goto out;
+
tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
if (!tx_info)
goto out;
@@ -569,6 +581,8 @@ free_tid:
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
+
put_module:
/* release module refcount */
module_put(THIS_MODULE);
@@ -633,8 +647,12 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status;
+ struct tls_context *tls_ctx;
struct tid_info *t;
+ int ret = 0;
tid = GET_TID(p);
status = AOPEN_STATUS_G(ntohl(p->atid_status));
@@ -666,14 +684,29 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (!status) {
tx_info->tid = tid;
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+ /* Adding tid */
+ tls_ctx = tls_get_ctx(tx_info->sk);
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx) {
+ ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
+ GFP_NOWAIT);
+ if (ret < 0) {
+ pr_err("%s: Failed to allocate tid XA entry = %d\n",
+ __func__, tx_info->tid);
+ tx_info->open_state = CH_KTLS_OPEN_FAILURE;
+ goto out;
+ }
+ }
tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
} else {
tx_info->open_state = CH_KTLS_OPEN_FAILURE;
}
+out:
spin_unlock(&tx_info->lock);
complete(&tx_info->completion);
- return 0;
+ return ret;
}
/*
@@ -2090,6 +2123,8 @@ static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
goto out;
}
u_ctx->lldi = *lldi;
+ u_ctx->detach = false;
+ xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
out:
return u_ctx;
}
@@ -2123,6 +2158,45 @@ static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+static void clear_conn_resources(struct chcr_ktls_info *tx_info)
+{
+ /* clear l2t entry */
+ if (tx_info->l2te)
+ cxgb4_l2t_release(tx_info->l2te);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(tx_info->netdev, (const u32 *)
+ &tx_info->sk->sk_v6_rcv_saddr,
+ 1);
+#endif
+
+ /* clear tid */
+ if (tx_info->tid != -1)
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+}
+
+static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
+{
+ struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_info *tx_info;
+ unsigned long index;
+
+ xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
+ tx_info = tx_ctx->chcr_info;
+ clear_conn_resources(tx_info);
+ port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_close);
+ kvfree(tx_info);
+ tx_ctx->chcr_info = NULL;
+ /* release module refcount */
+ module_put(THIS_MODULE);
+ }
+}
+
static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct chcr_ktls_uld_ctx *u_ctx = handle;
@@ -2139,7 +2213,10 @@ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_DETACH:
pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
mutex_lock(&dev_mutex);
+ u_ctx->detach = true;
list_del(&u_ctx->entry);
+ ch_ktls_reset_all_conn(u_ctx);
+ xa_destroy(&u_ctx->tid_list);
mutex_unlock(&dev_mutex);
break;
default:
@@ -2178,6 +2255,7 @@ static void __exit chcr_ktls_exit(void)
adap = pci_get_drvdata(u_ctx->lldi.pdev);
memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
list_del(&u_ctx->entry);
+ xa_destroy(&u_ctx->tid_list);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 18b3b1f02415..10572dc55365 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -75,6 +75,8 @@ struct chcr_ktls_ofld_ctx_tx {
struct chcr_ktls_uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
+ struct xarray tid_list;
+ bool detach;
};
static inline struct chcr_ktls_ofld_ctx_tx *
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 19dc7dc054a2..bcad69c48074 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -2134,7 +2134,7 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
sk->sk_err = ETIMEDOUT;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 188d871f6b8c..c320cc8ca68d 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1564,8 +1564,10 @@ found_ok_skb:
cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
sizeof(thdr->type), &thdr->type);
- if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
- return -EIO;
+ if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) {
+ copied = -EIO;
+ break;
+ }
/* don't send tls header, skip copy */
goto skip_copy;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 8df6f081f244..c2ebb3388789 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2356,8 +2356,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
struct net_device *netdev;
- struct resource *gmacres;
- struct resource *dmares;
struct device *parent;
unsigned int id;
int irq;
@@ -2390,24 +2388,18 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* DMA memory */
- dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!dmares) {
- dev_err(dev, "no DMA resource\n");
- return -ENODEV;
- }
- port->dma_base = devm_ioremap_resource(dev, dmares);
- if (IS_ERR(port->dma_base))
+ port->dma_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(port->dma_base)) {
+ dev_err(dev, "get DMA address failed\n");
return PTR_ERR(port->dma_base);
+ }
/* GMAC config memory */
- gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!gmacres) {
- dev_err(dev, "no GMAC resource\n");
- return -ENODEV;
- }
- port->gmac_base = devm_ioremap_resource(dev, gmacres);
- if (IS_ERR(port->gmac_base))
+ port->gmac_base = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(port->gmac_base)) {
+ dev_err(dev, "get GMAC address failed\n");
return PTR_ERR(port->gmac_base);
+ }
/* Interrupt */
irq = platform_get_irq(pdev, 0);
@@ -2502,10 +2494,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
if (ret)
goto unprepare;
- netdev_info(netdev,
- "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
- port->irq, &dmares->start,
- &gmacres->start);
return 0;
unprepare:
@@ -2544,17 +2532,13 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
unsigned int retry = 5;
- struct resource *res;
u32 val;
/* Global registers */
geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL);
if (!geth)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- geth->base = devm_ioremap_resource(dev, res);
+ geth->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(geth->base))
return PTR_ERR(geth->base);
geth->dev = dev;
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index b018195f0243..117c26fa5909 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -832,8 +832,8 @@ static struct net_device_stats *de_get_stats(struct net_device *dev)
/* The chip only need report frame silently dropped. */
spin_lock_irq(&de->lock);
- if (netif_running(dev) && netif_device_present(dev))
- __de_get_stats(de);
+ if (netif_running(dev) && netif_device_present(dev))
+ __de_get_stats(de);
spin_unlock_irq(&de->lock);
return &dev->stats;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 683e328b5461..b125d7faefdf 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -396,7 +396,7 @@
<earl@exis.net>.
Updated the PCI interface to conform with the latest
version. I hope nothing is broken...
- Add TX done interrupt modification from suggestion
+ Add TX done interrupt modification from suggestion
by <Austin.Donnelly@cl.cam.ac.uk>.
Fix is_anc_capable() bug reported by
<Austin.Donnelly@cl.cam.ac.uk>.
@@ -1499,7 +1499,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
netif_stop_queue(dev);
load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
- lp->stats.tx_bytes += skb->len;
+ lp->stats.tx_bytes += skb->len;
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
@@ -1651,7 +1651,7 @@ de4x5_rx(struct net_device *dev)
/* Update stats */
lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
+ lp->stats.rx_bytes += pkt_len;
}
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 87a27fe2992d..c763b692e164 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -518,7 +518,7 @@ static void dmfe_remove_one(struct pci_dev *pdev)
DMFE_DBUG(0, "dmfe_remove_one()", 0);
- if (dev) {
+ if (dev) {
unregister_netdev(dev);
pci_iounmap(db->pdev, db->ioaddr);
@@ -567,10 +567,10 @@ static int dmfe_open(struct net_device *dev)
/* CR6 operation mode decision */
if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
(db->chip_revision >= 0x30) ) {
- db->cr6_data |= DMFE_TXTH_256;
+ db->cr6_data |= DMFE_TXTH_256;
db->cr0_data = CR0_DEFAULT;
db->dm910x_chk_mode=4; /* Enter the normal mode */
- } else {
+ } else {
db->cr6_data |= CR6_SFT; /* Store & Forward mode */
db->cr0_data = 0;
db->dm910x_chk_mode = 1; /* Enter the check mode */
@@ -903,7 +903,7 @@ static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
}
}
- txptr = txptr->next_tx_desc;
+ txptr = txptr->next_tx_desc;
}/* End of while */
/* Update TX remove pointer to next */
@@ -1121,7 +1121,7 @@ static void dmfe_timer(struct timer_list *t)
void __iomem *ioaddr = db->ioaddr;
u32 tmp_cr8;
unsigned char tmp_cr12;
- unsigned long flags;
+ unsigned long flags;
int link_ok, link_ok_phy;
@@ -1217,7 +1217,7 @@ static void dmfe_timer(struct timer_list *t)
if (link_ok_phy != link_ok) {
DMFE_DBUG (0, "PHY and chip report different link status", 0);
link_ok = link_ok | link_ok_phy;
- }
+ }
if ( !link_ok && netif_carrier_ok(dev)) {
/* Link Failed */
@@ -1699,14 +1699,14 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db)
if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
}
- /* Write new capability to Phyxcer Reg4 */
+ /* Write new capability to Phyxcer Reg4 */
if ( !(phy_reg & 0x01e0)) {
phy_reg|=db->PHY_reg4;
db->media_mode|=DMFE_AUTO;
}
dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
- /* Restart Auto-Negotiation */
+ /* Restart Auto-Negotiation */
if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
if ( !db->chip_type )
@@ -1754,7 +1754,7 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
}
dmfe_phy_write(db->ioaddr,
db->phy_addr, 0, phy_reg, db->chip_id);
- if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
+ if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
mdelay(20);
dmfe_phy_write(db->ioaddr,
db->phy_addr, 0, phy_reg, db->chip_id);
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 412adaa7fdf8..72a09156b48b 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -351,7 +351,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
del_timer_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
- add_timer(&tp->timer);
+ add_timer(&tp->timer);
}
return;
@@ -375,7 +375,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
del_timer_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
- add_timer(&tp->timer);
+ add_timer(&tp->timer);
}
return;
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index 815907259048..0ed598dc7569 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -478,7 +478,6 @@ void t21142_lnk_change(struct net_device *dev, int csr5);
void pnic2_lnk_change(struct net_device *dev, int csr5);
void pnic2_timer(struct timer_list *t);
void pnic2_start_nway(struct net_device *dev);
-void pnic2_lnk_change(struct net_device *dev, int csr5);
/* eeprom.c */
void tulip_parse_eeprom(struct net_device *dev);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 13e73ed15ef0..d67ef7d02d6b 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -780,7 +780,7 @@ static void uli526x_free_tx_pkt(struct net_device *dev,
}
}
- txptr = txptr->next_tx_desc;
+ txptr = txptr->next_tx_desc;
}/* End of while */
/* Update TX remove pointer to next */
@@ -1015,7 +1015,7 @@ static void uli526x_timer(struct timer_list *t)
struct net_device *dev = pci_get_drvdata(db->pdev);
struct uli_phy_ops *phy = &db->phy;
void __iomem *ioaddr = db->ioaddr;
- unsigned long flags;
+ unsigned long flags;
u8 tmp_cr12 = 0;
u32 tmp_cr8;
@@ -1535,14 +1535,14 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
}
- /* Write new capability to Phyxcer Reg4 */
+ /* Write new capability to Phyxcer Reg4 */
if ( !(phy_reg & 0x01e0)) {
phy_reg|=db->PHY_reg4;
db->media_mode|=ULI526X_AUTO;
}
phy->write(db, db->phy_addr, 4, phy_reg);
- /* Restart Auto-Negotiation */
+ /* Restart Auto-Negotiation */
phy->write(db, db->phy_addr, 0, 0x1200);
udelay(50);
}
@@ -1550,7 +1550,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
/*
* Process op-mode
- AUTO mode : PHY controller in Auto-negotiation Mode
+ AUTO mode : PHY controller in Auto-negotiation Mode
* Force mode: PHY controller in force mode with HUB
* N-way force capability with SWITCH
*/
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 514df170ec5d..f6ff1f76eacb 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -36,7 +36,7 @@
power management.
support for big endian descriptors
Copyright (C) 2001 Manfred Spraul
- * ethtool support (jgarzik)
+ * ethtool support (jgarzik)
* Replace some MII-related magic numbers with constants (jgarzik)
TODO:
@@ -1479,7 +1479,7 @@ static int netdev_close(struct net_device *dev)
np->cur_rx, np->dirty_rx);
}
- /* Stop the chip's Tx and Rx processes. */
+ /* Stop the chip's Tx and Rx processes. */
spin_lock_irq(&np->lock);
netif_device_detach(dev);
update_csr6(dev, 0);
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index ce61f79f3b7c..ee0ca712dd1c 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1847,20 +1847,20 @@ static int netdev_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
- for (i = 2000; i > 0; i--) {
- if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+ for (i = 2000; i > 0; i--) {
+ if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
break;
mdelay(1);
- }
+ }
- iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+ iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
ioaddr + ASIC_HI_WORD(ASICCtrl));
- for (i = 2000; i > 0; i--) {
+ for (i = 2000; i > 0; i--) {
if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
break;
mdelay(1);
- }
+ }
#ifdef __i386__
if (netif_msg_hw(np)) {
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 46b0dbab8aad..7c992172933b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
struct ec_bhf_priv *priv = netdev_priv(net_dev);
unregister_netdev(net_dev);
- free_netdev(net_dev);
pci_iounmap(dev, priv->dma_io);
pci_iounmap(dev, priv->io);
+
+ free_netdev(net_dev);
+
pci_release_regions(dev);
pci_clear_master(dev);
pci_disable_device(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 701c12c9e033..649c5c429bd7 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter)
int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- spin_lock_bh(&adapter->mcc_cq_lock);
+ spin_lock(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter)
if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
- spin_unlock_bh(&adapter->mcc_cq_lock);
+ spin_unlock(&adapter->mcc_cq_lock);
return status;
}
@@ -581,7 +581,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
+ local_bh_disable();
status = be_process_mcc(adapter);
+ local_bh_enable();
if (atomic_read(&mcc_obj->q.used) == 0)
break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b6eba29d8e99..361c1c87c183 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5501,7 +5501,9 @@ static void be_worker(struct work_struct *work)
* mcc completions
*/
if (!netif_running(adapter->netdev)) {
+ local_bh_disable();
be_process_mcc(adapter);
+ local_bh_enable();
goto reschedule;
}
@@ -5897,6 +5899,7 @@ drv_cleanup:
unmap_bars:
be_unmap_pci_bars(adapter);
free_netdev:
+ pci_disable_pcie_error_reporting(pdev);
free_netdev(netdev);
rel_reg:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index e3954d8835e7..f9a288a6ec8c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -607,7 +607,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* Get IRQ number */
priv->irq = platform_get_irq(pdev, 0);
- if (!priv->irq) {
+ if (priv->irq < 0) {
dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
err = -ENODEV;
goto out_netdev;
@@ -630,8 +630,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
out_netif_api:
netif_napi_del(&priv->napi);
out_netdev:
- if (err)
- free_netdev(ndev);
+ free_netdev(ndev);
return err;
}
@@ -642,8 +641,8 @@ static s32 nps_enet_remove(struct platform_device *pdev)
struct nps_enet_priv *priv = netdev_priv(ndev);
unregister_netdev(ndev);
- free_netdev(ndev);
netif_napi_del(&priv->napi);
+ free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 04421aec2dfd..11dbbfd38770 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1830,14 +1830,17 @@ static int ftgmac100_probe(struct platform_device *pdev)
if (np && of_get_property(np, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
dev_err(&pdev->dev, "NCSI stack not enabled\n");
+ err = -EINVAL;
goto err_phy_connect;
}
dev_info(&pdev->dev, "Using NCSI interface\n");
priv->use_ncsi = true;
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
- if (!priv->ndev)
+ if (!priv->ndev) {
+ err = -EINVAL;
goto err_phy_connect;
+ }
} else if (np && of_get_property(np, "phy-handle", NULL)) {
struct phy_device *phy;
@@ -1856,6 +1859,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
&ftgmac100_adjust_link);
if (!phy) {
dev_err(&pdev->dev, "Failed to connect to phy\n");
+ err = -EINVAL;
goto err_phy_connect;
}
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 0908771aa9ac..0f141c14d72d 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -144,7 +144,7 @@ struct chip_info {
};
static const struct chip_info skel_netdrv_tbl[] = {
- { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+ { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
{ "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
{ "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
};
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 177c020bf34a..e6826561cf11 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2558,13 +2558,9 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
u32 xdp_act;
int err;
- rcu_read_lock();
-
xdp_prog = READ_ONCE(priv->xdp_prog);
- if (!xdp_prog) {
- rcu_read_unlock();
+ if (!xdp_prog)
return XDP_PASS;
- }
xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
&dpaa_fq->xdp_rxq);
@@ -2638,8 +2634,6 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
break;
}
- rcu_read_unlock();
-
return xdp_act;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index b87db0846e10..8356af4631fd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -121,10 +121,14 @@ DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
+ struct fsl_mc_device *dpni_dev;
struct dentry *dir;
+ char name[10];
/* Create a directory for the interface */
- dir = debugfs_create_dir(priv->net_dev->name, dpaa2_dbg_root);
+ dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+ snprintf(name, 10, "dpni.%d", dpni_dev->obj_desc.id);
+ dir = debugfs_create_dir(name, dpaa2_dbg_root);
priv->dbg.dir = dir;
/* per-cpu stats file */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index e0c3c58e2ac7..973352393bd4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -352,8 +352,6 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
u32 xdp_act = XDP_PASS;
int err, offset;
- rcu_read_lock();
-
xdp_prog = READ_ONCE(ch->xdp.prog);
if (!xdp_prog)
goto out;
@@ -414,7 +412,6 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
ch->xdp.res |= xdp_act;
out:
- rcu_read_unlock();
return xdp_act;
}
@@ -4164,10 +4161,11 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
if (dpaa2_eth_is_type_phy(priv)) {
err = dpaa2_mac_connect(mac);
- if (err) {
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ if (err && err != -EPROBE_DEFER)
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
+ ERR_PTR(err));
+ if (err)
goto err_close_mac;
- }
}
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ccaf7e35abeb..ae6d382d8735 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
+#include <linux/acpi.h>
+#include <linux/property.h>
+
#include "dpaa2-eth.h"
#include "dpaa2-mac.h"
@@ -34,39 +37,51 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
return 0;
}
-/* Caller must call of_node_put on the returned value */
-static struct device_node *dpaa2_mac_get_node(u16 dpmac_id)
+static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
+ u16 dpmac_id)
{
- struct device_node *dpmacs, *dpmac = NULL;
- u32 id;
+ struct fwnode_handle *fwnode, *parent, *child = NULL;
+ struct device_node *dpmacs = NULL;
int err;
+ u32 id;
- dpmacs = of_find_node_by_name(NULL, "dpmacs");
- if (!dpmacs)
- return NULL;
+ fwnode = dev_fwnode(dev->parent);
+ if (is_of_node(fwnode)) {
+ dpmacs = of_find_node_by_name(NULL, "dpmacs");
+ if (!dpmacs)
+ return NULL;
+ parent = of_fwnode_handle(dpmacs);
+ } else if (is_acpi_node(fwnode)) {
+ parent = fwnode;
+ }
- while ((dpmac = of_get_next_child(dpmacs, dpmac)) != NULL) {
- err = of_property_read_u32(dpmac, "reg", &id);
+ fwnode_for_each_child_node(parent, child) {
+ err = -EINVAL;
+ if (is_acpi_device_node(child))
+ err = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &id);
+ else if (is_of_node(child))
+ err = of_property_read_u32(to_of_node(child), "reg", &id);
if (err)
continue;
- if (id == dpmac_id)
- break;
- }
+ if (id == dpmac_id) {
+ of_node_put(dpmacs);
+ return child;
+ }
+ }
of_node_put(dpmacs);
-
- return dpmac;
+ return NULL;
}
-static int dpaa2_mac_get_if_mode(struct device_node *node,
+static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
struct dpmac_attr attr)
{
phy_interface_t if_mode;
int err;
- err = of_get_phy_mode(node, &if_mode);
- if (!err)
- return if_mode;
+ err = fwnode_get_phy_mode(dpmac_node);
+ if (err > 0)
+ return err;
err = phy_mode(attr.eth_if, &if_mode);
if (!err)
@@ -235,26 +250,27 @@ static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
};
static int dpaa2_pcs_create(struct dpaa2_mac *mac,
- struct device_node *dpmac_node, int id)
+ struct fwnode_handle *dpmac_node,
+ int id)
{
struct mdio_device *mdiodev;
- struct device_node *node;
+ struct fwnode_handle *node;
- node = of_parse_phandle(dpmac_node, "pcs-handle", 0);
- if (!node) {
+ node = fwnode_find_reference(dpmac_node, "pcs-handle", 0);
+ if (IS_ERR(node)) {
/* do not error out on old DTS files */
netdev_warn(mac->net_dev, "pcs-handle node not found\n");
return 0;
}
- if (!of_device_is_available(node)) {
+ if (!fwnode_device_is_available(node)) {
netdev_err(mac->net_dev, "pcs-handle node not available\n");
- of_node_put(node);
+ fwnode_handle_put(node);
return -ENODEV;
}
- mdiodev = of_mdio_find_device(node);
- of_node_put(node);
+ mdiodev = fwnode_mdio_find_device(node);
+ fwnode_handle_put(node);
if (!mdiodev)
return -EPROBE_DEFER;
@@ -283,36 +299,33 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct net_device *net_dev = mac->net_dev;
- struct device_node *dpmac_node;
+ struct fwnode_handle *dpmac_node;
struct phylink *phylink;
int err;
mac->if_link_type = mac->attr.link_type;
- dpmac_node = dpaa2_mac_get_node(mac->attr.id);
+ dpmac_node = mac->fw_node;
if (!dpmac_node) {
netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
return -ENODEV;
}
err = dpaa2_mac_get_if_mode(dpmac_node, mac->attr);
- if (err < 0) {
- err = -EINVAL;
- goto err_put_node;
- }
+ if (err < 0)
+ return -EINVAL;
mac->if_mode = err;
/* The MAC does not have the capability to add RGMII delays so
* error out if the interface mode requests them and there is no PHY
* to act upon them
*/
- if (of_phy_is_fixed_link(dpmac_node) &&
+ if (of_phy_is_fixed_link(to_of_node(dpmac_node)) &&
(mac->if_mode == PHY_INTERFACE_MODE_RGMII_ID ||
mac->if_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
mac->if_mode == PHY_INTERFACE_MODE_RGMII_TXID)) {
netdev_err(net_dev, "RGMII delay not supported\n");
- err = -EINVAL;
- goto err_put_node;
+ return -EINVAL;
}
if ((mac->attr.link_type == DPMAC_LINK_TYPE_PHY &&
@@ -320,14 +333,14 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE) {
err = dpaa2_pcs_create(mac, dpmac_node, mac->attr.id);
if (err)
- goto err_put_node;
+ return err;
}
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
phylink = phylink_create(&mac->phylink_config,
- of_fwnode_handle(dpmac_node), mac->if_mode,
+ dpmac_node, mac->if_mode,
&dpaa2_mac_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
@@ -338,22 +351,18 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
if (mac->pcs)
phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
- err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
+ err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
- netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
+ netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
goto err_phylink_destroy;
}
- of_node_put(dpmac_node);
-
return 0;
err_phylink_destroy:
phylink_destroy(mac->phylink);
err_pcs_destroy:
dpaa2_pcs_destroy(mac);
-err_put_node:
- of_node_put(dpmac_node);
return err;
}
@@ -388,6 +397,12 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
goto err_close_dpmac;
}
+ /* Find the device node representing the MAC device and link the device
+ * behind the associated netdev to it.
+ */
+ mac->fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ net_dev->dev.of_node = to_of_node(mac->fw_node);
+
return 0;
err_close_dpmac:
@@ -400,6 +415,8 @@ void dpaa2_mac_close(struct dpaa2_mac *mac)
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ if (mac->fw_node)
+ fwnode_handle_put(mac->fw_node);
}
static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 13d42dd58ec9..7842cbb2207a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -24,6 +24,7 @@ struct dpaa2_mac {
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
struct lynx_pcs *pcs;
+ struct fwnode_handle *fw_node;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 05de37c3b64c..f3d12d0714fb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -1625,7 +1625,7 @@ static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
return 0;
}
-static int dpaa2_switch_port_attr_set(struct net_device *netdev,
+static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index 8b356c485507..ee1468e3eaa3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -99,15 +99,13 @@ EXPORT_SYMBOL(enetc_ierb_register_pf);
static int enetc_ierb_probe(struct platform_device *pdev)
{
struct enetc_ierb *ierb;
- struct resource *res;
void __iomem *regs;
ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), GFP_KERNEL);
if (!ierb)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 31274325159a..c84f6c226743 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
+#include <asm/unaligned.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/fsl/enetc_mdio.h>
@@ -17,15 +18,15 @@ static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si));
u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si));
- *(u32 *)addr = upper;
- *(u16 *)(addr + 4) = lower;
+ put_unaligned_le32(upper, addr);
+ put_unaligned_le16(lower, addr + 4);
}
static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
const u8 *addr)
{
- u32 upper = *(const u32 *)addr;
- u16 lower = *(const u16 *)(addr + 4);
+ u32 upper = get_unaligned_le32(addr);
+ u16 lower = get_unaligned_le16(addr + 4);
__raw_writel(upper, hw->port + ENETC_PSIPMAR0(si));
__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index af699f2ad095..4577226d3c6a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -465,8 +465,13 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct streamid_conf *si_conf;
u16 data_size;
dma_addr_t dma;
+ int port;
int err;
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
if (sid->index >= priv->psfp_cap.max_streamid)
return -EINVAL;
@@ -499,7 +504,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf = &cbd.sid_set;
/* Only one port supported for one entry, set itself */
- si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
+ si_conf->iports = cpu_to_le32(1 << port);
si_conf->id_type = 1;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -524,7 +529,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf->en = 0x80;
si_conf->stream_handle = cpu_to_le32(sid->handle);
- si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
+ si_conf->iports = cpu_to_le32(1 << port);
si_conf->id_type = sid->filtertype;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -567,6 +572,11 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
{
struct enetc_cbd cbd = {.cmd = 0};
struct sfi_conf *sfi_config;
+ int port;
+
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
cbd.index = cpu_to_le16(sfi->index);
cbd.cls = BDCR_CMD_STREAM_FILTER;
@@ -586,8 +596,7 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
}
sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
- sfi_config->input_ports =
- cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
+ sfi_config->input_ports = cpu_to_le32(1 << port);
/* The priority value which may be matched against the
* frame’s priority value to determine a match for this entry.
@@ -1548,7 +1557,7 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct flow_block_offload *f = type_data;
- int err;
+ int port, err;
err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
enetc_setup_tc_block_cb,
@@ -1558,10 +1567,18 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
switch (f->command) {
case FLOW_BLOCK_BIND:
- set_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
+ set_bit(port, &epsfp.dev_bitmap);
break;
case FLOW_BLOCK_UNBIND:
- clear_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
+ clear_bit(port, &epsfp.dev_bitmap);
if (!epsfp.dev_bitmap)
clean_psfp_all();
break;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 0602d5d5d2ee..2e002e4b4b4a 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -467,6 +467,11 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
+/* i.MX6SX ENET IP supports multiple queues (3 queues), use this quirk to
+ * represents this ENET IP.
+ */
+#define FEC_QUIRK_HAS_MULTI_QUEUES (1 << 19)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f2065f9d02e6..8aea707a65a7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -76,6 +76,8 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define DRIVER_NAME "fec"
+static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
+
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
@@ -122,7 +124,7 @@ static const struct fec_devinfo fec_imx6x_info = {
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
- FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -421,6 +423,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+
ebdp->cbd_bdu = 0;
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
@@ -954,7 +957,7 @@ fec_restart(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
@@ -1165,7 +1168,7 @@ fec_stop(struct net_device *ndev)
* instead of reset MAC itself.
*/
if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -1662,7 +1665,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
}
/* ------------------------------------------------------------------------- */
-static void fec_get_mac(struct net_device *ndev)
+static int fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned char *iap, tmpaddr[ETH_ALEN];
@@ -1685,6 +1688,8 @@ static void fec_get_mac(struct net_device *ndev)
ret = of_get_mac_address(np, tmpaddr);
if (!ret)
iap = tmpaddr;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
}
}
@@ -1723,7 +1728,7 @@ static void fec_get_mac(struct net_device *ndev)
eth_hw_addr_random(ndev);
dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
ndev->dev_addr);
- return;
+ return 0;
}
memcpy(ndev->dev_addr, iap, ETH_ALEN);
@@ -1731,6 +1736,8 @@ static void fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+
+ return 0;
}
/* ------------------------------------------------------------------------- */
@@ -2566,7 +2573,7 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
writel(tx_itr, fep->hwp + FEC_TXIC1);
writel(rx_itr, fep->hwp + FEC_RXIC1);
writel(tx_itr, fep->hwp + FEC_TXIC2);
@@ -3235,10 +3242,40 @@ static int fec_set_features(struct net_device *netdev,
return 0;
}
+static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
+{
+ struct vlan_ethhdr *vhdr;
+ unsigned short vlan_TCI = 0;
+
+ if (skb->protocol == htons(ETH_P_ALL)) {
+ vhdr = (struct vlan_ethhdr *)(skb->data);
+ vlan_TCI = ntohs(vhdr->h_vlan_TCI);
+ }
+
+ return vlan_TCI;
+}
+
+static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u16 vlan_tag;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ return netdev_pick_tx(ndev, skb, NULL);
+
+ vlan_tag = fec_enet_get_raw_vlan_tci(skb);
+ if (!vlan_tag)
+ return vlan_tag;
+
+ return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
.ndo_start_xmit = fec_enet_start_xmit,
+ .ndo_select_queue = fec_enet_select_queue,
.ndo_set_rx_mode = set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
@@ -3290,7 +3327,9 @@ static int fec_enet_init(struct net_device *ndev)
return ret;
}
- fec_enet_alloc_queue(ndev);
+ ret = fec_enet_alloc_queue(ndev);
+ if (ret)
+ return ret;
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
@@ -3298,11 +3337,15 @@ static int fec_enet_init(struct net_device *ndev)
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
GFP_KERNEL);
if (!cbd_base) {
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_queue_mem;
}
/* Get the Ethernet address */
- fec_get_mac(ndev);
+ ret = fec_get_mac(ndev);
+ if (ret)
+ goto free_queue_mem;
+
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address(ndev, NULL);
@@ -3361,7 +3404,7 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
fep->tx_align = 0;
fep->rx_align = 0x3f;
}
@@ -3376,6 +3419,10 @@ static int fec_enet_init(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
return 0;
+
+free_queue_mem:
+ fec_enet_free_queue(ndev);
+ return ret;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1753807cbf97..d71eac7e1924 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
u32 tempval;
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
- if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
udelay(1);
return readl(fep->hwp + FEC_ATIME);
@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
fep->ptp_caps.enable = fec_ptp_enable;
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+ if (!fep->cycle_speed) {
+ fep->cycle_speed = NSEC_PER_SEC;
+ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
+ }
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
spin_lock_init(&fep->tmreg_lock);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f2945abdb041..9646483137c4 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -274,32 +274,44 @@ static void gfar_configure_coalescing_all(struct gfar_private *priv)
gfar_configure_coalescing(priv, 0xFF, 0xFF);
}
-static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
- unsigned long tx_packets = 0, tx_bytes = 0;
int i;
for (i = 0; i < priv->num_rx_queues; i++) {
- rx_packets += priv->rx_queue[i]->stats.rx_packets;
- rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
- rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+ stats->rx_packets += priv->rx_queue[i]->stats.rx_packets;
+ stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
}
- dev->stats.rx_packets = rx_packets;
- dev->stats.rx_bytes = rx_bytes;
- dev->stats.rx_dropped = rx_dropped;
-
for (i = 0; i < priv->num_tx_queues; i++) {
- tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
- tx_packets += priv->tx_queue[i]->stats.tx_packets;
+ stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
+ unsigned long flags;
+ u32 rdrp, car, car_before;
+ u64 rdrp_offset;
+
+ spin_lock_irqsave(&priv->rmon_overflow.lock, flags);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ do {
+ car_before = car;
+ rdrp = gfar_read(&rmon->rdrp);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ } while (car != car_before);
+ if (car) {
+ priv->rmon_overflow.rdrp++;
+ gfar_write(&rmon->car1, car);
+ }
+ rdrp_offset = priv->rmon_overflow.rdrp;
+ spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags);
- return &dev->stats;
+ stats->rx_missed_errors = rdrp + (rdrp_offset << 16);
+ }
}
/* Set the appropriate hash bit for the given addr */
@@ -390,7 +402,8 @@ static void gfar_ints_enable(struct gfar_private *priv)
for (i = 0; i < priv->num_grps; i++) {
struct gfar __iomem *regs = priv->gfargrp[i].regs;
/* Unmask the interrupts we look for */
- gfar_write(&regs->imask, IMASK_DEFAULT);
+ gfar_write(&regs->imask,
+ IMASK_DEFAULT | priv->rmon_overflow.imask);
}
}
@@ -2298,7 +2311,7 @@ static irqreturn_t gfar_receive(int irq, void *grp_id)
if (likely(napi_schedule_prep(&grp->napi_rx))) {
spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask);
- imask &= IMASK_RX_DISABLED;
+ imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_rx);
@@ -2322,7 +2335,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
if (likely(napi_schedule_prep(&grp->napi_tx))) {
spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask);
- imask &= IMASK_TX_DISABLED;
+ imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_tx);
@@ -2693,6 +2706,18 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
+ if (events & IEVENT_MSRO) {
+ struct rmon_mib __iomem *rmon = &regs->rmon;
+ u32 car;
+
+ spin_lock(&priv->rmon_overflow.lock);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ if (car) {
+ priv->rmon_overflow.rdrp++;
+ gfar_write(&rmon->car1, car);
+ }
+ spin_unlock(&priv->rmon_overflow.lock);
+ }
if (events & IEVENT_BSY) {
dev->stats.rx_over_errors++;
atomic64_inc(&priv->extra_stats.rx_bsy);
@@ -3109,11 +3134,14 @@ static void gfar_hw_init(struct gfar_private *priv)
/* Zero out the rmon mib registers if it has them */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
- memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
+ memset_io(&regs->rmon, 0, offsetof(struct rmon_mib, car1));
/* Mask off the CAM interrupts */
gfar_write(&regs->rmon.cam1, 0xffffffff);
gfar_write(&regs->rmon.cam2, 0xffffffff);
+ /* Clear the CAR registers (w1c style) */
+ gfar_write(&regs->rmon.car1, 0xffffffff);
+ gfar_write(&regs->rmon.car2, 0xffffffff);
}
/* Initialize ECNTRL */
@@ -3157,7 +3185,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
- .ndo_get_stats = gfar_get_stats,
+ .ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -3267,6 +3295,14 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_hw_init(priv);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
+
+ spin_lock_init(&priv->rmon_overflow.lock);
+ priv->rmon_overflow.imask = IMASK_MSRO;
+ gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR);
+ }
+
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 5ea47df93e5e..ca5e14f908fe 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -445,6 +445,60 @@ struct ethtool_rx_list {
#define RQFPR_PER 0x00000002
#define RQFPR_EER 0x00000001
+/* CAR1 bits */
+#define CAR1_C164 0x80000000
+#define CAR1_C1127 0x40000000
+#define CAR1_C1255 0x20000000
+#define CAR1_C1511 0x10000000
+#define CAR1_C11K 0x08000000
+#define CAR1_C1MAX 0x04000000
+#define CAR1_C1MGV 0x02000000
+#define CAR1_C1REJ 0x00020000
+#define CAR1_C1RBY 0x00010000
+#define CAR1_C1RPK 0x00008000
+#define CAR1_C1RFC 0x00004000
+#define CAR1_C1RMC 0x00002000
+#define CAR1_C1RBC 0x00001000
+#define CAR1_C1RXC 0x00000800
+#define CAR1_C1RXP 0x00000400
+#define CAR1_C1RXU 0x00000200
+#define CAR1_C1RAL 0x00000100
+#define CAR1_C1RFL 0x00000080
+#define CAR1_C1RCD 0x00000040
+#define CAR1_C1RCS 0x00000020
+#define CAR1_C1RUN 0x00000010
+#define CAR1_C1ROV 0x00000008
+#define CAR1_C1RFR 0x00000004
+#define CAR1_C1RJB 0x00000002
+#define CAR1_C1RDR 0x00000001
+
+/* CAM1 bits */
+#define CAM1_M164 0x80000000
+#define CAM1_M1127 0x40000000
+#define CAM1_M1255 0x20000000
+#define CAM1_M1511 0x10000000
+#define CAM1_M11K 0x08000000
+#define CAM1_M1MAX 0x04000000
+#define CAM1_M1MGV 0x02000000
+#define CAM1_M1REJ 0x00020000
+#define CAM1_M1RBY 0x00010000
+#define CAM1_M1RPK 0x00008000
+#define CAM1_M1RFC 0x00004000
+#define CAM1_M1RMC 0x00002000
+#define CAM1_M1RBC 0x00001000
+#define CAM1_M1RXC 0x00000800
+#define CAM1_M1RXP 0x00000400
+#define CAM1_M1RXU 0x00000200
+#define CAM1_M1RAL 0x00000100
+#define CAM1_M1RFL 0x00000080
+#define CAM1_M1RCD 0x00000040
+#define CAM1_M1RCS 0x00000020
+#define CAM1_M1RUN 0x00000010
+#define CAM1_M1ROV 0x00000008
+#define CAM1_M1RFR 0x00000004
+#define CAM1_M1RJB 0x00000002
+#define CAM1_M1RDR 0x00000001
+
/* TxBD status field bits */
#define TXBD_READY 0x8000
#define TXBD_PADCRC 0x4000
@@ -609,6 +663,15 @@ struct rmon_mib
u32 cam2; /* 0x.73c - Carry Mask Register Two */
};
+struct rmon_overflow {
+ /* lock for synchronization of the rdrp field of this struct, and
+ * CAR1/CAR2 registers
+ */
+ spinlock_t lock;
+ u32 imask;
+ u64 rdrp;
+};
+
struct gfar_extra_stats {
atomic64_t rx_alloc_err;
atomic64_t rx_large;
@@ -913,8 +976,8 @@ enum {
* Per TX queue stats
*/
struct tx_q_stats {
- unsigned long tx_packets;
- unsigned long tx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
};
/**
@@ -963,9 +1026,9 @@ struct gfar_priv_tx_q {
* Per RX queue stats
*/
struct rx_q_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long rx_dropped;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
};
struct gfar_rx_buff {
@@ -1096,6 +1159,7 @@ struct gfar_private {
/* Network Statistics */
struct gfar_extra_stats extra_stats;
+ struct rmon_overflow rmon_overflow;
/* PHY stuff */
phy_interface_t interface;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index e0936510fa34..0acfafb73db1 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3590,10 +3590,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
- ug_info = kmalloc(sizeof(*ug_info), GFP_KERNEL);
+ ug_info = kmemdup(&ugeth_primary_info, sizeof(*ug_info), GFP_KERNEL);
if (ug_info == NULL)
return -ENOMEM;
- memcpy(ug_info, &ugeth_primary_info, sizeof(*ug_info));
ug_info->uf_info.ucc_num = ucc_num;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index bfa2826c5545..0b68852379da 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -2,6 +2,7 @@
* QorIQ 10G MDIO Controller
*
* Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2021 NXP
*
* Authors: Andy Fleming <afleming@freescale.com>
* Timur Tabi <timur@freescale.com>
@@ -11,15 +12,17 @@
* kind, whether express or implied.
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/phy.h>
+#include <linux/kernel.h>
#include <linux/mdio.h>
+#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
/* Number of microseconds to wait for a register to respond */
#define TIMEOUT 1000
@@ -243,10 +246,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
static int xgmac_mdio_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct mii_bus *bus;
- struct resource *res;
+ struct fwnode_handle *fwnode;
struct mdio_fsl_priv *priv;
+ struct resource *res;
+ struct mii_bus *bus;
int ret;
/* In DPAA-1, MDIO is one of the many FMan sub-devices. The FMan
@@ -279,13 +282,22 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ /* For both ACPI and DT cases, endianness of MDIO controller
+ * needs to be specified using "little-endian" property.
+ */
priv->is_little_endian = device_property_read_bool(&pdev->dev,
"little-endian");
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
- ret = of_mdiobus_register(bus, np);
+ fwnode = pdev->dev.fwnode;
+ if (is_of_node(fwnode))
+ ret = of_mdiobus_register(bus, to_of_node(fwnode));
+ else if (is_acpi_node(fwnode))
+ ret = acpi_mdiobus_register(bus, fwnode);
+ else
+ ret = -EINVAL;
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
goto err_registration;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index b0c0504950d8..62c0bed82ced 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -812,9 +812,9 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
if (length < ETH_ZLEN)
{
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
}
netif_stop_queue(dev);
diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig
index b8f04d052fda..8641a00f8e63 100644
--- a/drivers/net/ethernet/google/Kconfig
+++ b/drivers/net/ethernet/google/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_GOOGLE
config GVE
tristate "Google Virtual NIC (gVNIC) support"
- depends on PCI_MSI
+ depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
help
This driver supports Google Virtual NIC (gVNIC)"
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index 3354ce40eb97..b9a6be76531b 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,4 +1,4 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_rx.o gve_ethtool.o gve_adminq.o
+gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index daf07c0f790b..1d3188e8e3b3 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
*/
#ifndef _GVE_H_
@@ -11,7 +11,9 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
+
#include "gve_desc.h"
+#include "gve_desc_dqo.h"
#ifndef PCI_VENDOR_ID_GOOGLE
#define PCI_VENDOR_ID_GOOGLE 0x1ae0
@@ -40,6 +42,11 @@
#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
+/* PTYPEs are always 10 bits. */
+#define GVE_NUM_PTYPES 1024
+
+#define GVE_RX_BUFFER_SIZE_DQO 2048
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -51,7 +58,8 @@ struct gve_rx_desc_queue {
struct gve_rx_slot_page_info {
struct page *page;
void *page_address;
- u8 page_offset; /* flipped to second half? */
+ u32 page_offset; /* offset to write to in page */
+ int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
u8 can_flip;
};
@@ -76,17 +84,117 @@ struct gve_rx_data_queue {
struct gve_priv;
-/* An RX ring that contains a power-of-two sized desc and data ring. */
+/* RX buffer queue for posting buffers to HW.
+ * Each RX (completion) queue has a corresponding buffer queue.
+ */
+struct gve_rx_buf_queue_dqo {
+ struct gve_rx_desc_dqo *desc_ring;
+ dma_addr_t bus;
+ u32 head; /* Pointer to start cleaning buffers at. */
+ u32 tail; /* Last posted buffer index + 1 */
+ u32 mask; /* Mask for indices to the size of the ring */
+};
+
+/* RX completion queue to receive packets from HW. */
+struct gve_rx_compl_queue_dqo {
+ struct gve_rx_compl_desc_dqo *desc_ring;
+ dma_addr_t bus;
+
+ /* Number of slots which did not have a buffer posted yet. We should not
+ * post more buffers than the queue size to avoid HW overrunning the
+ * queue.
+ */
+ int num_free_slots;
+
+ /* HW uses a "generation bit" to notify SW of new descriptors. When a
+ * descriptor's generation bit is different from the current generation,
+ * that descriptor is ready to be consumed by SW.
+ */
+ u8 cur_gen_bit;
+
+ /* Pointer into desc_ring where the next completion descriptor will be
+ * received.
+ */
+ u32 head;
+ u32 mask; /* Mask for indices to the size of the ring */
+};
+
+/* Stores state for tracking buffers posted to HW */
+struct gve_rx_buf_state_dqo {
+ /* The page posted to HW. */
+ struct gve_rx_slot_page_info page_info;
+
+ /* The DMA address corresponding to `page_info`. */
+ dma_addr_t addr;
+
+ /* Last offset into the page when it only had a single reference, at
+ * which point every other offset is free to be reused.
+ */
+ u32 last_single_ref_offset;
+
+ /* Linked list index to next element in the list, or -1 if none */
+ s16 next;
+};
+
+/* `head` and `tail` are indices into an array, or -1 if empty. */
+struct gve_index_list {
+ s16 head;
+ s16 tail;
+};
+
+/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
- struct gve_rx_desc_queue desc;
- struct gve_rx_data_queue data;
+ union {
+ /* GQI fields */
+ struct {
+ struct gve_rx_desc_queue desc;
+ struct gve_rx_data_queue data;
+
+ /* threshold for posting new buffs and descs */
+ u32 db_threshold;
+ };
+
+ /* DQO fields. */
+ struct {
+ struct gve_rx_buf_queue_dqo bufq;
+ struct gve_rx_compl_queue_dqo complq;
+
+ struct gve_rx_buf_state_dqo *buf_states;
+ u16 num_buf_states;
+
+ /* Linked list of gve_rx_buf_state_dqo. Index into
+ * buf_states, or -1 if empty.
+ */
+ s16 free_buf_states;
+
+ /* Linked list of gve_rx_buf_state_dqo. Indexes into
+ * buf_states, or -1 if empty.
+ *
+ * This list contains buf_states which are pointing to
+ * valid buffers.
+ *
+ * We use a FIFO here in order to increase the
+ * probability that buffers can be reused by increasing
+ * the time between usages.
+ */
+ struct gve_index_list recycled_buf_states;
+
+ /* Linked list of gve_rx_buf_state_dqo. Indexes into
+ * buf_states, or -1 if empty.
+ *
+ * This list contains buf_states which have buffers
+ * which cannot be reused yet.
+ */
+ struct gve_index_list used_buf_states;
+ } dqo;
+ };
+
u64 rbytes; /* free-running bytes received */
u64 rpackets; /* free-running packets received */
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
- u32 db_threshold; /* threshold for posting new buffs and descs */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
@@ -97,6 +205,10 @@ struct gve_rx_ring {
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
dma_addr_t q_resources_bus; /* dma address for the queue resources */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
+
+ /* head and tail of skb chain for the current packet or NULL if none */
+ struct sk_buff *skb_head;
+ struct sk_buff *skb_tail;
};
/* A TX desc ring entry */
@@ -137,23 +249,161 @@ struct gve_tx_fifo {
struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
};
-/* A TX ring that contains a power-of-two sized desc ring and a FIFO buffer */
+/* TX descriptor for DQO format */
+union gve_tx_desc_dqo {
+ struct gve_tx_pkt_desc_dqo pkt;
+ struct gve_tx_tso_context_desc_dqo tso_ctx;
+ struct gve_tx_general_context_desc_dqo general_ctx;
+};
+
+enum gve_packet_state {
+ /* Packet is in free list, available to be allocated.
+ * This should always be zero since state is not explicitly initialized.
+ */
+ GVE_PACKET_STATE_UNALLOCATED,
+ /* Packet is expecting a regular data completion or miss completion */
+ GVE_PACKET_STATE_PENDING_DATA_COMPL,
+ /* Packet has received a miss completion and is expecting a
+ * re-injection completion.
+ */
+ GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
+ /* No valid completion received within the specified timeout. */
+ GVE_PACKET_STATE_TIMED_OUT_COMPL,
+};
+
+struct gve_tx_pending_packet_dqo {
+ struct sk_buff *skb; /* skb for this packet */
+
+ /* 0th element corresponds to the linear portion of `skb`, should be
+ * unmapped with `dma_unmap_single`.
+ *
+ * All others correspond to `skb`'s frags and should be unmapped with
+ * `dma_unmap_page`.
+ */
+ struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
+ u16 num_bufs;
+
+ /* Linked list index to next element in the list, or -1 if none */
+ s16 next;
+
+ /* Linked list index to prev element in the list, or -1 if none.
+ * Used for tracking either outstanding miss completions or prematurely
+ * freed packets.
+ */
+ s16 prev;
+
+ /* Identifies the current state of the packet as defined in
+ * `enum gve_packet_state`.
+ */
+ u8 state;
+
+ /* If packet is an outstanding miss completion, then the packet is
+ * freed if the corresponding re-injection completion is not received
+ * before kernel jiffies exceeds timeout_jiffies.
+ */
+ unsigned long timeout_jiffies;
+};
+
+/* Contains datapath state used to represent a TX queue. */
struct gve_tx_ring {
/* Cacheline 0 -- Accessed & dirtied during transmit */
- struct gve_tx_fifo tx_fifo;
- u32 req; /* driver tracked head pointer */
- u32 done; /* driver tracked tail pointer */
+ union {
+ /* GQI fields */
+ struct {
+ struct gve_tx_fifo tx_fifo;
+ u32 req; /* driver tracked head pointer */
+ u32 done; /* driver tracked tail pointer */
+ };
+
+ /* DQO fields. */
+ struct {
+ /* Linked list of gve_tx_pending_packet_dqo. Index into
+ * pending_packets, or -1 if empty.
+ *
+ * This is a consumer list owned by the TX path. When it
+ * runs out, the producer list is stolen from the
+ * completion handling path
+ * (dqo_compl.free_pending_packets).
+ */
+ s16 free_pending_packets;
+
+ /* Cached value of `dqo_compl.hw_tx_head` */
+ u32 head;
+ u32 tail; /* Last posted buffer index + 1 */
+
+ /* Index of the last descriptor with "report event" bit
+ * set.
+ */
+ u32 last_re_idx;
+ } dqo_tx;
+ };
/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
- __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */
+ union {
+ /* GQI fields */
+ struct {
+ /* NIC tail pointer */
+ __be32 last_nic_done;
+ };
+
+ /* DQO fields. */
+ struct {
+ u32 head; /* Last read on compl_desc */
+
+ /* Tracks the current gen bit of compl_q */
+ u8 cur_gen_bit;
+
+ /* Linked list of gve_tx_pending_packet_dqo. Index into
+ * pending_packets, or -1 if empty.
+ *
+ * This is the producer list, owned by the completion
+ * handling path. When the consumer list
+ * (dqo_tx.free_pending_packets) is runs out, this list
+ * will be stolen.
+ */
+ atomic_t free_pending_packets;
+
+ /* Last TX ring index fetched by HW */
+ atomic_t hw_tx_head;
+
+ /* List to track pending packets which received a miss
+ * completion but not a corresponding reinjection.
+ */
+ struct gve_index_list miss_completions;
+
+ /* List to track pending packets that were completed
+ * before receiving a valid completion because they
+ * reached a specified timeout.
+ */
+ struct gve_index_list timed_out_completions;
+ } dqo_compl;
+ } ____cacheline_aligned;
u64 pkt_done; /* free-running - total packets completed */
u64 bytes_done; /* free-running - total bytes completed */
u64 dropped_pkt; /* free-running - total packets dropped */
u64 dma_mapping_error; /* count of dma mapping errors */
/* Cacheline 2 -- Read-mostly fields */
- union gve_tx_desc *desc ____cacheline_aligned;
- struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */
+ union {
+ /* GQI fields */
+ struct {
+ union gve_tx_desc *desc;
+
+ /* Maps 1:1 to a desc */
+ struct gve_tx_buffer_state *info;
+ };
+
+ /* DQO fields. */
+ struct {
+ union gve_tx_desc_dqo *tx_ring;
+ struct gve_tx_compl_desc *compl_ring;
+
+ struct gve_tx_pending_packet_dqo *pending_packets;
+ s16 num_pending_packets;
+
+ u32 complq_mask; /* complq size is complq_mask + 1 */
+ } dqo;
+ } ____cacheline_aligned;
struct netdev_queue *netdev_txq;
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
struct device *dev;
@@ -167,6 +417,7 @@ struct gve_tx_ring {
u32 ntfy_id; /* notification block index */
dma_addr_t bus; /* dma address of the descr ring */
dma_addr_t q_resources_bus; /* dma address of the queue resources */
+ dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
} ____cacheline_aligned;
@@ -194,6 +445,31 @@ struct gve_qpl_config {
unsigned long *qpl_id_map; /* bitmap of used qpl ids */
};
+struct gve_options_dqo_rda {
+ u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
+ u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
+};
+
+struct gve_ptype {
+ u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
+ u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
+};
+
+struct gve_ptype_lut {
+ struct gve_ptype ptypes[GVE_NUM_PTYPES];
+};
+
+/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
+ * when the entire configure_device_resources command is zeroed out and the
+ * queue_format is not specified.
+ */
+enum gve_queue_format {
+ GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
+ GVE_GQI_RDA_FORMAT = 0x1,
+ GVE_GQI_QPL_FORMAT = 0x2,
+ GVE_DQO_RDA_FORMAT = 0x3,
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -216,7 +492,6 @@ struct gve_priv {
u64 num_registered_pages; /* num pages registered with NIC */
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
- u8 raw_addressing; /* 1 if this dev supports raw addressing, 0 otherwise */
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
@@ -251,6 +526,7 @@ struct gve_priv {
u32 adminq_set_driver_parameter_cnt;
u32 adminq_report_stats_cnt;
u32 adminq_report_link_speed_cnt;
+ u32 adminq_get_ptype_map_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -275,6 +551,14 @@ struct gve_priv {
/* Gvnic device link speed from hypervisor. */
u64 link_speed;
+
+ struct gve_options_dqo_rda options_dqo_rda;
+ struct gve_ptype_lut *ptype_lut_dqo;
+
+ /* Must be a power of two. */
+ int data_buffer_size_dqo;
+
+ enum gve_queue_format queue_format;
};
enum gve_service_task_flags_bit {
@@ -454,14 +738,20 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
*/
static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
{
- return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues;
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ return 0;
+
+ return priv->tx_cfg.num_queues;
}
/* Returns the number of rx queue page lists
*/
static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
{
- return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues;
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ return 0;
+
+ return priv->rx_cfg.num_queues;
}
/* Returns a pointer to the next available tx qpl in the list of qpls
@@ -515,6 +805,12 @@ static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
return DMA_FROM_DEVICE;
}
+static inline bool gve_is_gqi(struct gve_priv *priv)
+{
+ return priv->queue_format == GVE_GQI_RDA_FORMAT ||
+ priv->queue_format == GVE_GQI_QPL_FORMAT;
+}
+
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
@@ -525,14 +821,14 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv);
-void gve_tx_free_rings(struct gve_priv *priv);
+void gve_tx_free_rings_gqi(struct gve_priv *priv);
__be32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx);
/* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
bool gve_rx_poll(struct gve_notify_block *block, int budget);
int gve_rx_alloc_rings(struct gve_priv *priv);
-void gve_rx_free_rings(struct gve_priv *priv);
+void gve_rx_free_rings_gqi(struct gve_priv *priv);
bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat);
/* Reset */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 53864f200599..5bb56b454541 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
*/
#include <linux/etherdevice.h>
@@ -18,6 +18,8 @@
"Expected: length=%d, feature_mask=%x.\n" \
"Actual: length=%d, feature_mask=%x.\n"
+#define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
+
static
struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
struct gve_device_option *option)
@@ -33,28 +35,81 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
static
void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_descriptor *device_descriptor,
- struct gve_device_option *option)
+ struct gve_device_option *option,
+ struct gve_device_option_gqi_rda **dev_op_gqi_rda,
+ struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+ struct gve_device_option_dqo_rda **dev_op_dqo_rda)
{
+ u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
u16 option_id = be16_to_cpu(option->option_id);
+ /* If the length or feature mask doesn't match, continue without
+ * enabling the feature.
+ */
switch (option_id) {
- case GVE_DEV_OPT_ID_RAW_ADDRESSING:
- /* If the length or feature mask doesn't match,
- * continue without enabling the feature.
- */
- if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
- option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
- dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing",
- GVE_DEV_OPT_LEN_RAW_ADDRESSING,
- cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
- option_length, option->feat_mask);
- priv->raw_addressing = 0;
- } else {
- dev_info(&priv->pdev->dev,
- "Raw addressing device option enabled.\n");
- priv->raw_addressing = 1;
+ case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
+ if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Raw Addressing",
+ GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ dev_info(&priv->pdev->dev,
+ "Gqi raw addressing device option enabled.\n");
+ priv->queue_format = GVE_GQI_RDA_FORMAT;
+ break;
+ case GVE_DEV_OPT_ID_GQI_RDA:
+ if (option_length < sizeof(**dev_op_gqi_rda) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_gqi_rda)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
+ }
+ *dev_op_gqi_rda = (void *)(option + 1);
+ break;
+ case GVE_DEV_OPT_ID_GQI_QPL:
+ if (option_length < sizeof(**dev_op_gqi_qpl) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_gqi_qpl)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
+ }
+ *dev_op_gqi_qpl = (void *)(option + 1);
+ break;
+ case GVE_DEV_OPT_ID_DQO_RDA:
+ if (option_length < sizeof(**dev_op_dqo_rda) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_dqo_rda)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
}
+ *dev_op_dqo_rda = (void *)(option + 1);
break;
default:
/* If we don't recognize the option just continue
@@ -65,6 +120,39 @@ void gve_parse_device_option(struct gve_priv *priv,
}
}
+/* Process all device options for a given describe device call. */
+static int
+gve_process_device_options(struct gve_priv *priv,
+ struct gve_device_descriptor *descriptor,
+ struct gve_device_option_gqi_rda **dev_op_gqi_rda,
+ struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+ struct gve_device_option_dqo_rda **dev_op_dqo_rda)
+{
+ const int num_options = be16_to_cpu(descriptor->num_device_options);
+ struct gve_device_option *dev_opt;
+ int i;
+
+ /* The options struct directly follows the device descriptor. */
+ dev_opt = (void *)(descriptor + 1);
+ for (i = 0; i < num_options; i++) {
+ struct gve_device_option *next_opt;
+
+ next_opt = gve_get_next_option(descriptor, dev_opt);
+ if (!next_opt) {
+ dev_err(&priv->dev->dev,
+ "options exceed device_descriptor's total length.\n");
+ return -EINVAL;
+ }
+
+ gve_parse_device_option(priv, descriptor, dev_opt,
+ dev_op_gqi_rda, dev_op_gqi_qpl,
+ dev_op_dqo_rda);
+ dev_opt = next_opt;
+ }
+
+ return 0;
+}
+
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
{
priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
@@ -88,6 +176,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_set_driver_parameter_cnt = 0;
priv->adminq_report_stats_cnt = 0;
priv->adminq_report_link_speed_cnt = 0;
+ priv->adminq_get_ptype_map_cnt = 0;
/* Setup Admin queue with the device */
iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
@@ -293,6 +382,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_REPORT_LINK_SPEED:
priv->adminq_report_link_speed_cnt++;
break;
+ case GVE_ADMINQ_GET_PTYPE_MAP:
+ priv->adminq_get_ptype_map_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
@@ -305,7 +397,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
* The caller is also responsible for making sure there are no commands
* waiting to be executed.
*/
-static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
+static int gve_adminq_execute_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd_orig)
{
u32 tail, head;
int err;
@@ -350,6 +443,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
.irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
.ntfy_blk_msix_base_idx =
cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
+ .queue_format = priv->queue_format,
};
return gve_adminq_execute_cmd(priv, &cmd);
@@ -369,27 +463,32 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_tx_ring *tx = &priv->tx[queue_index];
union gve_adminq_command cmd;
- u32 qpl_id;
- int err;
- qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
.queue_id = cpu_to_be32(queue_index),
- .reserved = 0,
.queue_resources_addr =
cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
- .queue_page_list_id = cpu_to_be32(qpl_id),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
};
- err = gve_adminq_issue_cmd(priv, &cmd);
- if (err)
- return err;
+ if (gve_is_gqi(priv)) {
+ u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
+ GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
+
+ cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ } else {
+ cmd.create_tx_queue.tx_ring_size =
+ cpu_to_be16(priv->tx_desc_cnt);
+ cmd.create_tx_queue.tx_comp_ring_addr =
+ cpu_to_be64(tx->complq_bus_dqo);
+ cmd.create_tx_queue.tx_comp_ring_size =
+ cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries);
+ }
- return 0;
+ return gve_adminq_issue_cmd(priv, &cmd);
}
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
@@ -410,28 +509,41 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
union gve_adminq_command cmd;
- u32 qpl_id;
- int err;
- qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
.queue_id = cpu_to_be32(queue_index),
- .index = cpu_to_be32(queue_index),
- .reserved = 0,
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
- .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
- .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
- .queue_page_list_id = cpu_to_be32(qpl_id),
};
- err = gve_adminq_issue_cmd(priv, &cmd);
- if (err)
- return err;
+ if (gve_is_gqi(priv)) {
+ u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
+ GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
+
+ cmd.create_rx_queue.rx_desc_ring_addr =
+ cpu_to_be64(rx->desc.bus),
+ cmd.create_rx_queue.rx_data_ring_addr =
+ cpu_to_be64(rx->data.data_bus),
+ cmd.create_rx_queue.index = cpu_to_be32(queue_index);
+ cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ } else {
+ cmd.create_rx_queue.rx_ring_size =
+ cpu_to_be16(priv->rx_desc_cnt);
+ cmd.create_rx_queue.rx_desc_ring_addr =
+ cpu_to_be64(rx->dqo.complq.bus);
+ cmd.create_rx_queue.rx_data_ring_addr =
+ cpu_to_be64(rx->dqo.bufq.bus);
+ cmd.create_rx_queue.packet_buffer_size =
+ cpu_to_be16(priv->data_buffer_size_dqo);
+ cmd.create_rx_queue.rx_buff_ring_size =
+ cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries);
+ cmd.create_rx_queue.enable_rsc =
+ !!(priv->dev->features & NETIF_F_LRO);
+ }
- return 0;
+ return gve_adminq_issue_cmd(priv, &cmd);
}
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
@@ -512,17 +624,51 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
return gve_adminq_kick_and_wait(priv);
}
+static int gve_set_desc_cnt(struct gve_priv *priv,
+ struct gve_device_descriptor *descriptor)
+{
+ priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+ if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
+ dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
+ priv->tx_desc_cnt);
+ return -EINVAL;
+ }
+ priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+ if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
+ < PAGE_SIZE) {
+ dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
+ priv->rx_desc_cnt);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+gve_set_desc_cnt_dqo(struct gve_priv *priv,
+ const struct gve_device_descriptor *descriptor,
+ const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
+{
+ priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+ priv->options_dqo_rda.tx_comp_ring_entries =
+ be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
+ priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+ priv->options_dqo_rda.rx_buff_ring_entries =
+ be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
+
+ return 0;
+}
+
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
+ struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
+ struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
struct gve_device_descriptor *descriptor;
- struct gve_device_option *dev_opt;
union gve_adminq_command cmd;
dma_addr_t descriptor_bus;
- u16 num_options;
int err = 0;
u8 *mac;
u16 mtu;
- int i;
memset(&cmd, 0, sizeof(cmd));
descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
@@ -540,21 +686,41 @@ int gve_adminq_describe_device(struct gve_priv *priv)
if (err)
goto free_device_descriptor;
- priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
- dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
- err = -EINVAL;
+ err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
+ &dev_op_gqi_qpl, &dev_op_dqo_rda);
+ if (err)
goto free_device_descriptor;
+
+ /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
+ * is not set to GqiRda, choose the queue format in a priority order:
+ * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
+ */
+ if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+ dev_info(&priv->pdev->dev,
+ "Driver is running with GQI RDA queue format.\n");
+ } else if (dev_op_dqo_rda) {
+ priv->queue_format = GVE_DQO_RDA_FORMAT;
+ dev_info(&priv->pdev->dev,
+ "Driver is running with DQO RDA queue format.\n");
+ } else if (dev_op_gqi_rda) {
+ priv->queue_format = GVE_GQI_RDA_FORMAT;
+ dev_info(&priv->pdev->dev,
+ "Driver is running with GQI RDA queue format.\n");
+ } else {
+ priv->queue_format = GVE_GQI_QPL_FORMAT;
+ dev_info(&priv->pdev->dev,
+ "Driver is running with GQI QPL queue format.\n");
}
- priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
- < PAGE_SIZE ||
- priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
- < PAGE_SIZE) {
- dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
- err = -EINVAL;
- goto free_device_descriptor;
+ if (gve_is_gqi(priv)) {
+ err = gve_set_desc_cnt(priv, descriptor);
+ } else {
+ /* DQO supports LRO. */
+ priv->dev->hw_features |= NETIF_F_LRO;
+ err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
}
+ if (err)
+ goto free_device_descriptor;
+
priv->max_registered_pages =
be64_to_cpu(descriptor->max_registered_pages);
mtu = be16_to_cpu(descriptor->mtu);
@@ -570,32 +736,16 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
- if (priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
+
+ if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
priv->rx_data_slot_cnt);
priv->rx_desc_cnt = priv->rx_data_slot_cnt;
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
- dev_opt = (void *)(descriptor + 1);
-
- num_options = be16_to_cpu(descriptor->num_device_options);
- for (i = 0; i < num_options; i++) {
- struct gve_device_option *next_opt;
-
- next_opt = gve_get_next_option(descriptor, dev_opt);
- if (!next_opt) {
- dev_err(&priv->dev->dev,
- "options exceed device_descriptor's total length.\n");
- err = -EINVAL;
- goto free_device_descriptor;
- }
-
- gve_parse_device_option(priv, descriptor, dev_opt);
- dev_opt = next_opt;
- }
free_device_descriptor:
- dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
+ dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
descriptor_bus);
return err;
}
@@ -701,3 +851,41 @@ int gve_adminq_report_link_speed(struct gve_priv *priv)
link_speed_region_bus);
return err;
}
+
+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+ struct gve_ptype_lut *ptype_lut)
+{
+ struct gve_ptype_map *ptype_map;
+ union gve_adminq_command cmd;
+ dma_addr_t ptype_map_bus;
+ int err = 0;
+ int i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
+ &ptype_map_bus, GFP_KERNEL);
+ if (!ptype_map)
+ return -ENOMEM;
+
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
+ cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
+ .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
+ .ptype_map_addr = cpu_to_be64(ptype_map_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto err;
+
+ /* Populate ptype_lut. */
+ for (i = 0; i < GVE_NUM_PTYPES; i++) {
+ ptype_lut->ptypes[i].l3_type =
+ ptype_map->ptypes[i].l3_type;
+ ptype_lut->ptypes[i].l4_type =
+ ptype_map->ptypes[i].l4_type;
+ }
+err:
+ dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,
+ ptype_map_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index d320c2ffd87c..47c3d8f313fc 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
*/
#ifndef _GVE_ADMINQ_H
@@ -22,7 +22,8 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
GVE_ADMINQ_REPORT_STATS = 0xC,
- GVE_ADMINQ_REPORT_LINK_SPEED = 0xD
+ GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
+ GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
};
/* Admin queue status codes */
@@ -82,14 +83,54 @@ static_assert(sizeof(struct gve_device_descriptor) == 40);
struct gve_device_option {
__be16 option_id;
__be16 option_length;
- __be32 feat_mask;
+ __be32 required_features_mask;
};
static_assert(sizeof(struct gve_device_option) == 8);
-#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1
-#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0
-#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0
+struct gve_device_option_gqi_rda {
+ __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
+
+struct gve_device_option_gqi_qpl {
+ __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
+
+struct gve_device_option_dqo_rda {
+ __be32 supported_features_mask;
+ __be16 tx_comp_ring_entries;
+ __be16 rx_buff_ring_entries;
+};
+
+static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
+
+/* Terminology:
+ *
+ * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
+ * mapped and read/updated by the device.
+ *
+ * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
+ * the device for read/write and data is copied from/to SKBs.
+ */
+enum gve_dev_opt_id {
+ GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+ GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+ GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+ GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+};
+
+enum gve_dev_opt_req_feat_mask {
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+};
+
+#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
struct gve_adminq_configure_device_resources {
__be64 counter_array;
@@ -98,9 +139,11 @@ struct gve_adminq_configure_device_resources {
__be32 num_irq_dbs;
__be32 irq_db_stride;
__be32 ntfy_blk_msix_base_idx;
+ u8 queue_format;
+ u8 padding[7];
};
-static_assert(sizeof(struct gve_adminq_configure_device_resources) == 32);
+static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
struct gve_adminq_register_page_list {
__be32 page_list_id;
@@ -125,9 +168,13 @@ struct gve_adminq_create_tx_queue {
__be64 tx_ring_addr;
__be32 queue_page_list_id;
__be32 ntfy_id;
+ __be64 tx_comp_ring_addr;
+ __be16 tx_ring_size;
+ __be16 tx_comp_ring_size;
+ u8 padding[4];
};
-static_assert(sizeof(struct gve_adminq_create_tx_queue) == 32);
+static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
struct gve_adminq_create_rx_queue {
__be32 queue_id;
@@ -138,10 +185,14 @@ struct gve_adminq_create_rx_queue {
__be64 rx_desc_ring_addr;
__be64 rx_data_ring_addr;
__be32 queue_page_list_id;
- u8 padding[4];
+ __be16 rx_ring_size;
+ __be16 packet_buffer_size;
+ __be16 rx_buff_ring_size;
+ u8 enable_rsc;
+ u8 padding[5];
};
-static_assert(sizeof(struct gve_adminq_create_rx_queue) == 48);
+static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
/* Queue resources that are shared with the device */
struct gve_queue_resources {
@@ -226,6 +277,41 @@ enum gve_stat_names {
RX_DROPS_INVALID_CHECKSUM = 68,
};
+enum gve_l3_type {
+ /* Must be zero so zero initialized LUT is unknown. */
+ GVE_L3_TYPE_UNKNOWN = 0,
+ GVE_L3_TYPE_OTHER,
+ GVE_L3_TYPE_IPV4,
+ GVE_L3_TYPE_IPV6,
+};
+
+enum gve_l4_type {
+ /* Must be zero so zero initialized LUT is unknown. */
+ GVE_L4_TYPE_UNKNOWN = 0,
+ GVE_L4_TYPE_OTHER,
+ GVE_L4_TYPE_TCP,
+ GVE_L4_TYPE_UDP,
+ GVE_L4_TYPE_ICMP,
+ GVE_L4_TYPE_SCTP,
+};
+
+/* These are control path types for PTYPE which are the same as the data path
+ * types.
+ */
+struct gve_ptype_entry {
+ u8 l3_type;
+ u8 l4_type;
+};
+
+struct gve_ptype_map {
+ struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+};
+
+struct gve_adminq_get_ptype_map {
+ __be64 ptype_map_len;
+ __be64 ptype_map_addr;
+};
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -243,6 +329,7 @@ union gve_adminq_command {
struct gve_adminq_set_driver_parameter set_driver_param;
struct gve_adminq_report_stats report_stats;
struct gve_adminq_report_link_speed report_link_speed;
+ struct gve_adminq_get_ptype_map get_ptype_map;
};
};
u8 reserved[64];
@@ -271,4 +358,9 @@ int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
int gve_adminq_report_link_speed(struct gve_priv *priv);
+
+struct gve_ptype_lut;
+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+ struct gve_ptype_lut *ptype_lut);
+
#endif /* _GVE_ADMINQ_H */
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
new file mode 100644
index 000000000000..e8fe9adef7f2
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+/* GVE DQO Descriptor formats */
+
+#ifndef _GVE_DESC_DQO_H_
+#define _GVE_DESC_DQO_H_
+
+#include <linux/build_bug.h>
+
+#define GVE_TX_MAX_HDR_SIZE_DQO 255
+#define GVE_TX_MIN_TSO_MSS_DQO 88
+
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#error "Only little endian supported"
+#endif
+
+/* Basic TX descriptor (DTYPE 0x0C) */
+struct gve_tx_pkt_desc_dqo {
+ __le64 buf_addr;
+
+ /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */
+ u8 dtype: 5;
+
+ /* Denotes the last descriptor of a packet. */
+ u8 end_of_packet: 1;
+ u8 checksum_offload_enable: 1;
+
+ /* If set, will generate a descriptor completion for this descriptor. */
+ u8 report_event: 1;
+ u8 reserved0;
+ __le16 reserved1;
+
+ /* The TX completion associated with this packet will contain this tag.
+ */
+ __le16 compl_tag;
+ u16 buf_size: 14;
+ u16 reserved2: 2;
+} __packed;
+static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16);
+
+#define GVE_TX_PKT_DESC_DTYPE_DQO 0xc
+#define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1)
+
+/* Maximum number of data descriptors allowed per packet, or per-TSO segment. */
+#define GVE_TX_MAX_DATA_DESCS 10
+
+/* Min gap between tail and head to avoid cacheline overlap */
+#define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4
+
+/* "report_event" on TX packet descriptors may only be reported on the last
+ * descriptor of a TX packet, and they must be spaced apart with at least this
+ * value.
+ */
+#define GVE_TX_MIN_RE_INTERVAL 32
+
+struct gve_tx_context_cmd_dtype {
+ u8 dtype: 5;
+ u8 tso: 1;
+ u8 reserved1: 2;
+
+ u8 reserved2;
+};
+
+static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2);
+
+/* TX Native TSO Context DTYPE (0x05)
+ *
+ * "flex" fields allow the driver to send additional packet context to HW.
+ */
+struct gve_tx_tso_context_desc_dqo {
+ /* The L4 payload bytes that should be segmented. */
+ u32 tso_total_len: 24;
+ u32 flex10: 8;
+
+ /* Max segment size in TSO excluding headers. */
+ u16 mss: 14;
+ u16 reserved: 2;
+
+ u8 header_len; /* Header length to use for TSO offload */
+ u8 flex11;
+ struct gve_tx_context_cmd_dtype cmd_dtype;
+ u8 flex0;
+ u8 flex5;
+ u8 flex6;
+ u8 flex7;
+ u8 flex8;
+ u8 flex9;
+} __packed;
+static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16);
+
+#define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5
+
+/* General context descriptor for sending metadata. */
+struct gve_tx_general_context_desc_dqo {
+ u8 flex4;
+ u8 flex5;
+ u8 flex6;
+ u8 flex7;
+ u8 flex8;
+ u8 flex9;
+ u8 flex10;
+ u8 flex11;
+ struct gve_tx_context_cmd_dtype cmd_dtype;
+ u16 reserved;
+ u8 flex0;
+ u8 flex1;
+ u8 flex2;
+ u8 flex3;
+} __packed;
+static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16);
+
+#define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4
+
+/* Logical structure of metadata which is packed into context descriptor flex
+ * fields.
+ */
+struct gve_tx_metadata_dqo {
+ union {
+ struct {
+ u8 version;
+
+ /* If `skb->l4_hash` is set, this value should be
+ * derived from `skb->hash`.
+ *
+ * A zero value means no l4_hash was associated with the
+ * skb.
+ */
+ u16 path_hash: 15;
+
+ /* Should be set to 1 if the flow associated with the
+ * skb had a rehash from the TCP stack.
+ */
+ u16 rehash_event: 1;
+ } __packed;
+ u8 bytes[12];
+ };
+} __packed;
+static_assert(sizeof(struct gve_tx_metadata_dqo) == 12);
+
+#define GVE_TX_METADATA_VERSION_DQO 0
+
+/* TX completion descriptor */
+struct gve_tx_compl_desc {
+ /* For types 0-4 this is the TX queue ID associated with this
+ * completion.
+ */
+ u16 id: 11;
+
+ /* See: GVE_COMPL_TYPE_DQO* */
+ u16 type: 3;
+ u16 reserved0: 1;
+
+ /* Flipped by HW to notify the descriptor is populated. */
+ u16 generation: 1;
+ union {
+ /* For descriptor completions, this is the last index fetched
+ * by HW + 1.
+ */
+ __le16 tx_head;
+
+ /* For packet completions, this is the completion tag set on the
+ * TX packet descriptors.
+ */
+ __le16 completion_tag;
+ };
+ __le32 reserved1;
+} __packed;
+static_assert(sizeof(struct gve_tx_compl_desc) == 8);
+
+#define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */
+#define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */
+#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */
+#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */
+
+/* Descriptor to post buffers to HW on buffer queue. */
+struct gve_rx_desc_dqo {
+ __le16 buf_id; /* ID returned in Rx completion descriptor */
+ __le16 reserved0;
+ __le32 reserved1;
+ __le64 buf_addr; /* DMA address of the buffer */
+ __le64 header_buf_addr;
+ __le64 reserved2;
+} __packed;
+static_assert(sizeof(struct gve_rx_desc_dqo) == 32);
+
+/* Descriptor for HW to notify SW of new packets received on RX queue. */
+struct gve_rx_compl_desc_dqo {
+ /* Must be 1 */
+ u8 rxdid: 4;
+ u8 reserved0: 4;
+
+ /* Packet originated from this system rather than the network. */
+ u8 loopback: 1;
+ /* Set when IPv6 packet contains a destination options header or routing
+ * header.
+ */
+ u8 ipv6_ex_add: 1;
+ /* Invalid packet was received. */
+ u8 rx_error: 1;
+ u8 reserved1: 5;
+
+ u16 packet_type: 10;
+ u16 ip_hdr_err: 1;
+ u16 udp_len_err: 1;
+ u16 raw_cs_invalid: 1;
+ u16 reserved2: 3;
+
+ u16 packet_len: 14;
+ /* Flipped by HW to notify the descriptor is populated. */
+ u16 generation: 1;
+ /* Should be zero. */
+ u16 buffer_queue_id: 1;
+
+ u16 header_len: 10;
+ u16 rsc: 1;
+ u16 split_header: 1;
+ u16 reserved3: 4;
+
+ u8 descriptor_done: 1;
+ u8 end_of_packet: 1;
+ u8 header_buffer_overflow: 1;
+ u8 l3_l4_processed: 1;
+ u8 csum_ip_err: 1;
+ u8 csum_l4_err: 1;
+ u8 csum_external_ip_err: 1;
+ u8 csum_external_udp_err: 1;
+
+ u8 status_error1;
+
+ __le16 reserved5;
+ __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
+
+ union {
+ /* Packet checksum. */
+ __le16 raw_cs;
+ /* Segment length for RSC packets. */
+ __le16 rsc_seg_len;
+ };
+ __le32 hash;
+ __le32 reserved6;
+ __le64 reserved7;
+} __packed;
+
+static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
+
+/* Ringing the doorbell too often can hurt performance.
+ *
+ * HW requires this value to be at least 8.
+ */
+#define GVE_RX_BUF_THRESH_DQO 32
+
+#endif /* _GVE_DESC_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
new file mode 100644
index 000000000000..836042364124
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#ifndef _GVE_DQO_H_
+#define _GVE_DQO_H_
+
+#include "gve_adminq.h"
+
+#define GVE_ITR_ENABLE_BIT_DQO BIT(0)
+#define GVE_ITR_CLEAR_PBA_BIT_DQO BIT(1)
+#define GVE_ITR_NO_UPDATE_DQO (3 << 3)
+
+#define GVE_ITR_INTERVAL_DQO_SHIFT 5
+#define GVE_ITR_INTERVAL_DQO_MASK ((1 << 12) - 1)
+
+#define GVE_TX_IRQ_RATELIMIT_US_DQO 50
+#define GVE_RX_IRQ_RATELIMIT_US_DQO 20
+
+/* Timeout in seconds to wait for a reinjection completion after receiving
+ * its corresponding miss completion.
+ */
+#define GVE_REINJECT_COMPL_TIMEOUT 1
+
+/* Timeout in seconds to deallocate the completion tag for a packet that was
+ * prematurely freed for not receiving a valid completion. This should be large
+ * enough to rule out the possibility of receiving the corresponding valid
+ * completion after this interval.
+ */
+#define GVE_DEALLOCATE_COMPL_TIMEOUT 60
+
+netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
+bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
+int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
+void gve_tx_free_rings_dqo(struct gve_priv *priv);
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
+void gve_rx_free_rings_dqo(struct gve_priv *priv);
+int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct napi_struct *napi);
+void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
+void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
+
+static inline void
+gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
+ const struct gve_queue_resources *q_resources, u32 val)
+{
+ u64 index;
+
+ index = be32_to_cpu(q_resources->db_index);
+ iowrite32(val, &priv->db_bar2[index]);
+}
+
+/* Builds register value to write to DQO IRQ doorbell to enable with specified
+ * ratelimit.
+ */
+static inline u32 gve_set_itr_ratelimit_dqo(u32 ratelimit_us)
+{
+ u32 result = GVE_ITR_ENABLE_BIT_DQO;
+
+ /* Interval has 2us granularity. */
+ ratelimit_us >>= 1;
+
+ ratelimit_us &= GVE_ITR_INTERVAL_DQO_MASK;
+ result |= (ratelimit_us << GVE_ITR_INTERVAL_DQO_SHIFT);
+
+ return result;
+}
+
+static inline void
+gve_write_irq_doorbell_dqo(const struct gve_priv *priv,
+ const struct gve_notify_block *block, u32 val)
+{
+ u32 index = be32_to_cpu(block->irq_db_index);
+
+ iowrite32(val, &priv->db_bar2[index]);
+}
+
+#endif /* _GVE_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 5fb05cf36b49..716e6240305d 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
*/
#include <linux/ethtool.h>
@@ -311,8 +311,16 @@ gve_get_ethtool_stats(struct net_device *netdev,
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
struct gve_tx_ring *tx = &priv->tx[ring];
- data[i++] = tx->req;
- data[i++] = tx->done;
+ if (gve_is_gqi(priv)) {
+ data[i++] = tx->req;
+ data[i++] = tx->done;
+ } else {
+ /* DQO doesn't currently support
+ * posted/completed descriptor counts;
+ */
+ data[i++] = 0;
+ data[i++] = 0;
+ }
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
@@ -453,11 +461,16 @@ static int gve_set_tunable(struct net_device *netdev,
switch (etuna->id) {
case ETHTOOL_RX_COPYBREAK:
+ {
+ u32 max_copybreak = gve_is_gqi(priv) ?
+ (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
+
len = *(u32 *)value;
- if (len > PAGE_SIZE / 2)
+ if (len > max_copybreak)
return -EINVAL;
priv->rx_copybreak = len;
return 0;
+ }
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 7302498c6df3..867e87af3432 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
*/
#include <linux/cpumask.h>
@@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include "gve.h"
+#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_register.h"
@@ -26,6 +27,16 @@
const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
+static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (gve_is_gqi(priv))
+ return gve_tx(skb, dev);
+ else
+ return gve_tx_dqo(skb, dev);
+}
+
static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
{
struct gve_priv *priv = netdev_priv(dev);
@@ -155,6 +166,15 @@ static irqreturn_t gve_intr(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t gve_intr_dqo(int irq, void *arg)
+{
+ struct gve_notify_block *block = arg;
+
+ /* Interrupts are automatically masked */
+ napi_schedule_irqoff(&block->napi);
+ return IRQ_HANDLED;
+}
+
static int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
@@ -180,7 +200,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
/* Double check we have no extra work.
* Ensure unmask synchronizes with checking for work.
*/
- dma_rmb();
+ mb();
if (block->tx)
reschedule |= gve_tx_poll(block, -1);
if (block->rx)
@@ -191,6 +211,54 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
+static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+{
+ struct gve_notify_block *block =
+ container_of(napi, struct gve_notify_block, napi);
+ struct gve_priv *priv = block->priv;
+ bool reschedule = false;
+ int work_done = 0;
+
+ /* Clear PCI MSI-X Pending Bit Array (PBA)
+ *
+ * This bit is set if an interrupt event occurs while the vector is
+ * masked. If this bit is set and we reenable the interrupt, it will
+ * fire again. Since we're just about to poll the queue state, we don't
+ * need it to fire again.
+ *
+ * Under high softirq load, it's possible that the interrupt condition
+ * is triggered twice before we got the chance to process it.
+ */
+ gve_write_irq_doorbell_dqo(priv, block,
+ GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
+
+ if (block->tx)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+
+ if (block->rx) {
+ work_done = gve_rx_poll_dqo(block, budget);
+ reschedule |= work_done == budget;
+ }
+
+ if (reschedule)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done))) {
+ /* Enable interrupts again.
+ *
+ * We don't need to repoll afterwards because HW supports the
+ * PCI MSI-X PBA feature.
+ *
+ * Another interrupt would be triggered if a new event came in
+ * since the last one.
+ */
+ gve_write_irq_doorbell_dqo(priv, block,
+ GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
+ }
+
+ return work_done;
+}
+
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
@@ -220,6 +288,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int vecs_left = new_num_ntfy_blks % 2;
priv->num_ntfy_blks = new_num_ntfy_blks;
+ priv->mgmt_msix_idx = priv->num_ntfy_blks;
priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
vecs_per_type);
priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
@@ -263,7 +332,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
name, i);
block->priv = priv;
err = request_irq(priv->msix_vectors[msix_idx].vector,
- gve_intr, 0, block->name, block);
+ gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
+ 0, block->name, block);
if (err) {
dev_err(&priv->pdev->dev,
"Failed to receive msix vector %d\n", i);
@@ -300,20 +370,22 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
{
int i;
- /* Free the irqs */
- for (i = 0; i < priv->num_ntfy_blks; i++) {
- struct gve_notify_block *block = &priv->ntfy_blocks[i];
- int msix_idx = i;
+ if (priv->msix_vectors) {
+ /* Free the irqs */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- NULL);
- free_irq(priv->msix_vectors[msix_idx].vector, block);
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
+ }
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
}
dma_free_coherent(&priv->pdev->dev,
priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
priv->ntfy_blocks, priv->ntfy_block_bus);
priv->ntfy_blocks = NULL;
- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
pci_disable_msix(priv->pdev);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
@@ -343,6 +415,22 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = -ENXIO;
goto abort_with_stats_report;
}
+
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
+ GFP_KERNEL);
+ if (!priv->ptype_lut_dqo) {
+ err = -ENOMEM;
+ goto abort_with_stats_report;
+ }
+ err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to get ptype map: err=%d\n", err);
+ goto abort_with_ptype_lut;
+ }
+ }
+
err = gve_adminq_report_stats(priv, priv->stats_report_len,
priv->stats_report_bus,
GVE_STATS_REPORT_TIMER_PERIOD);
@@ -351,12 +439,17 @@ static int gve_setup_device_resources(struct gve_priv *priv)
"Failed to report stats: err=%d\n", err);
gve_set_device_resources_ok(priv);
return 0;
+
+abort_with_ptype_lut:
+ kvfree(priv->ptype_lut_dqo);
+ priv->ptype_lut_dqo = NULL;
abort_with_stats_report:
gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
gve_free_counter_array(priv);
+
return err;
}
@@ -383,17 +476,22 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_trigger_reset(priv);
}
}
+
+ kvfree(priv->ptype_lut_dqo);
+ priv->ptype_lut_dqo = NULL;
+
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
gve_clear_device_resources_ok(priv);
}
-static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
+static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+ int (*gve_poll)(struct napi_struct *, int))
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
+ netif_napi_add(priv->dev, &block->napi, gve_poll,
NAPI_POLL_WEIGHT);
}
@@ -473,31 +571,75 @@ static int gve_create_rings(struct gve_priv *priv)
netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
priv->rx_cfg.num_queues);
- /* Rx data ring has been prefilled with packet buffers at queue
- * allocation time.
- * Write the doorbell to provide descriptor slots and packet buffers
- * to the NIC.
- */
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
- gve_rx_write_doorbell(priv, &priv->rx[i]);
+ if (gve_is_gqi(priv)) {
+ /* Rx data ring has been prefilled with packet buffers at queue
+ * allocation time.
+ *
+ * Write the doorbell to provide descriptor slots and packet
+ * buffers to the NIC.
+ */
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ gve_rx_write_doorbell(priv, &priv->rx[i]);
+ } else {
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ /* Post buffers and ring doorbell. */
+ gve_rx_post_buffers_dqo(&priv->rx[i]);
+ }
+ }
return 0;
}
+static void add_napi_init_sync_stats(struct gve_priv *priv,
+ int (*napi_poll)(struct napi_struct *napi,
+ int budget))
+{
+ int i;
+
+ /* Add tx napi & init sync stats*/
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+
+ u64_stats_init(&priv->tx[i].statss);
+ priv->tx[i].ntfy_id = ntfy_idx;
+ gve_add_napi(priv, ntfy_idx, napi_poll);
+ }
+ /* Add rx napi & init sync stats*/
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
+
+ u64_stats_init(&priv->rx[i].statss);
+ priv->rx[i].ntfy_id = ntfy_idx;
+ gve_add_napi(priv, ntfy_idx, napi_poll);
+ }
+}
+
+static void gve_tx_free_rings(struct gve_priv *priv)
+{
+ if (gve_is_gqi(priv)) {
+ gve_tx_free_rings_gqi(priv);
+ } else {
+ gve_tx_free_rings_dqo(priv);
+ }
+}
+
static int gve_alloc_rings(struct gve_priv *priv)
{
- int ntfy_idx;
int err;
- int i;
/* Setup tx rings */
priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
GFP_KERNEL);
if (!priv->tx)
return -ENOMEM;
- err = gve_tx_alloc_rings(priv);
+
+ if (gve_is_gqi(priv))
+ err = gve_tx_alloc_rings(priv);
+ else
+ err = gve_tx_alloc_rings_dqo(priv);
if (err)
goto free_tx;
+
/* Setup rx rings */
priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
GFP_KERNEL);
@@ -505,21 +647,18 @@ static int gve_alloc_rings(struct gve_priv *priv)
err = -ENOMEM;
goto free_tx_queue;
}
- err = gve_rx_alloc_rings(priv);
+
+ if (gve_is_gqi(priv))
+ err = gve_rx_alloc_rings(priv);
+ else
+ err = gve_rx_alloc_rings_dqo(priv);
if (err)
goto free_rx;
- /* Add tx napi & init sync stats*/
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- u64_stats_init(&priv->tx[i].statss);
- ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
- gve_add_napi(priv, ntfy_idx);
- }
- /* Add rx napi & init sync stats*/
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- u64_stats_init(&priv->rx[i].statss);
- ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
- gve_add_napi(priv, ntfy_idx);
- }
+
+ if (gve_is_gqi(priv))
+ add_napi_init_sync_stats(priv, gve_napi_poll);
+ else
+ add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
return 0;
@@ -557,6 +696,14 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
+static void gve_rx_free_rings(struct gve_priv *priv)
+{
+ if (gve_is_gqi(priv))
+ gve_rx_free_rings_gqi(priv);
+ else
+ gve_rx_free_rings_dqo(priv);
+}
+
static void gve_free_rings(struct gve_priv *priv)
{
int ntfy_idx;
@@ -678,7 +825,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
int err;
/* Raw addressing means no QPLs */
- if (priv->raw_addressing)
+ if (priv->queue_format == GVE_GQI_RDA_FORMAT)
return 0;
priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
@@ -722,7 +869,7 @@ static void gve_free_qpls(struct gve_priv *priv)
int i;
/* Raw addressing means no QPLs */
- if (priv->raw_addressing)
+ if (priv->queue_format == GVE_GQI_RDA_FORMAT)
return;
kvfree(priv->qpl_cfg.qpl_id_map);
@@ -756,6 +903,7 @@ static int gve_open(struct net_device *dev)
err = gve_alloc_qpls(priv);
if (err)
return err;
+
err = gve_alloc_rings(priv);
if (err)
goto free_qpls;
@@ -770,9 +918,17 @@ static int gve_open(struct net_device *dev)
err = gve_register_qpls(priv);
if (err)
goto reset;
+
+ if (!gve_is_gqi(priv)) {
+ /* Hard code this for now. This may be tuned in the future for
+ * performance.
+ */
+ priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
+ }
err = gve_create_rings(priv);
if (err)
goto reset;
+
gve_set_device_rings_ok(priv);
if (gve_get_report_stats(priv))
@@ -921,14 +1077,26 @@ static void gve_turnup(struct gve_priv *priv)
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
napi_enable(&block->napi);
- iowrite32be(0, gve_irq_doorbell(priv, block));
+ if (gve_is_gqi(priv)) {
+ iowrite32be(0, gve_irq_doorbell(priv, block));
+ } else {
+ u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
+
+ gve_write_irq_doorbell_dqo(priv, block, val);
+ }
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
napi_enable(&block->napi);
- iowrite32be(0, gve_irq_doorbell(priv, block));
+ if (gve_is_gqi(priv)) {
+ iowrite32be(0, gve_irq_doorbell(priv, block));
+ } else {
+ u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
+
+ gve_write_irq_doorbell_dqo(priv, block, val);
+ }
}
gve_set_napi_enabled(priv);
@@ -942,12 +1110,49 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
priv->tx_timeo_cnt++;
}
+static int gve_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ const netdev_features_t orig_features = netdev->features;
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err;
+
+ if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
+ netdev->features ^= NETIF_F_LRO;
+ if (netif_carrier_ok(netdev)) {
+ /* To make this process as simple as possible we
+ * teardown the device, set the new configuration,
+ * and then bring the device up again.
+ */
+ err = gve_close(netdev);
+ /* We have already tried to reset in close, just fail
+ * at this point.
+ */
+ if (err)
+ goto err;
+
+ err = gve_open(netdev);
+ if (err)
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ /* Reverts the change on error. */
+ netdev->features = orig_features;
+ netif_err(priv, drv, netdev,
+ "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
+ return err;
+}
+
static const struct net_device_ops gve_netdev_ops = {
- .ndo_start_xmit = gve_tx,
+ .ndo_start_xmit = gve_start_xmit,
.ndo_open = gve_open,
.ndo_stop = gve_close,
.ndo_get_stats64 = gve_get_stats,
.ndo_tx_timeout = gve_tx_timeout,
+ .ndo_set_features = gve_set_features,
};
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -991,6 +1196,15 @@ void gve_handle_report_stats(struct gve_priv *priv)
/* tx stats */
if (priv->tx) {
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ u32 last_completion = 0;
+ u32 tx_frames = 0;
+
+ /* DQO doesn't currently support these metrics. */
+ if (gve_is_gqi(priv)) {
+ last_completion = priv->tx[idx].done;
+ tx_frames = priv->tx[idx].req;
+ }
+
do {
start = u64_stats_fetch_begin(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done;
@@ -1007,7 +1221,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
};
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_FRAMES_SENT),
- .value = cpu_to_be64(priv->tx[idx].req),
+ .value = cpu_to_be64(tx_frames),
.queue_id = cpu_to_be32(idx),
};
stats[stats_idx++] = (struct stats) {
@@ -1017,7 +1231,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
};
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
- .value = cpu_to_be64(priv->tx[idx].done),
+ .value = cpu_to_be64(last_completion),
.queue_id = cpu_to_be32(idx),
};
}
@@ -1085,7 +1299,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
if (skip_describe_device)
goto setup_device;
- priv->raw_addressing = false;
+ priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
/* Get the initial information we need from the device */
err = gve_adminq_describe_device(priv);
if (err) {
@@ -1093,7 +1307,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
"Could not get device information: err=%d\n", err);
goto err;
}
- if (priv->dev->max_mtu > PAGE_SIZE) {
+ if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
priv->dev->max_mtu = PAGE_SIZE;
err = gve_adminq_set_mtu(priv, priv->dev->mtu);
if (err) {
@@ -1292,8 +1506,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gve_write_version(&reg_bar->driver_version);
/* Get max queues to alloc etherdev */
- max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
- max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
+ max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
+ max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
/* Alloc and setup the netdev and priv */
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
@@ -1304,7 +1518,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
dev->ethtool_ops = &gve_ethtool_ops;
dev->netdev_ops = &gve_netdev_ops;
- /* advertise features */
+
+ /* Set default and supported features.
+ *
+ * Features might be set in other locations as well (such as
+ * `gve_adminq_describe_device`).
+ */
dev->hw_features = NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_SG;
dev->hw_features |= NETIF_F_HW_CSUM;
@@ -1349,6 +1568,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_wq;
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
+ dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
gve_clear_probe_in_progress(priv);
queue_work(priv->gve_wq, &priv->service_task);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index bf123fe524c4..bb8261368250 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -1,21 +1,14 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
*/
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_utils.h"
#include <linux/etherdevice.h>
-static void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
-{
- struct gve_notify_block *block =
- &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
-
- block->rx = NULL;
-}
-
static void gve_rx_free_buffer(struct device *dev,
struct gve_rx_slot_page_info *page_info,
union gve_rx_data_slot *data_slot)
@@ -137,16 +130,6 @@ alloc_err:
return err;
}
-static void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
-{
- u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
- struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- struct gve_rx_ring *rx = &priv->rx[queue_idx];
-
- block->rx = rx;
- rx->ntfy_id = ntfy_idx;
-}
-
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
@@ -165,7 +148,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
slots = priv->rx_data_slot_cnt;
rx->mask = slots - 1;
- rx->data.raw_addressing = priv->raw_addressing;
+ rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
/* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots;
@@ -255,7 +238,7 @@ int gve_rx_alloc_rings(struct gve_priv *priv)
return err;
}
-void gve_rx_free_rings(struct gve_priv *priv)
+void gve_rx_free_rings_gqi(struct gve_priv *priv)
{
int i;
@@ -279,27 +262,6 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
-static struct sk_buff *gve_rx_copy(struct net_device *dev,
- struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info,
- u16 len)
-{
- struct sk_buff *skb = napi_alloc_skb(napi, len);
- void *va = page_info->page_address + GVE_RX_PAD +
- (page_info->page_offset ? PAGE_SIZE / 2 : 0);
-
- if (unlikely(!skb))
- return NULL;
-
- __skb_put(skb, len);
-
- skb_copy_to_linear_data(skb, va, len);
-
- skb->protocol = eth_type_trans(skb, dev);
-
- return skb;
-}
-
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 len)
@@ -310,7 +272,7 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
return NULL;
skb_add_rx_frag(skb, 0, page_info->page,
- (page_info->page_offset ? PAGE_SIZE / 2 : 0) +
+ page_info->page_offset +
GVE_RX_PAD, len, PAGE_SIZE / 2);
return skb;
@@ -321,7 +283,7 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl
const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
/* "flip" to other packet buffer on this page */
- page_info->page_offset ^= 0x1;
+ page_info->page_offset ^= PAGE_SIZE / 2;
*(slot_addr) ^= offset;
}
@@ -388,7 +350,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
- skb = gve_rx_copy(netdev, napi, page_info, len);
+ skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD);
if (skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
@@ -430,7 +392,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
if (len <= priv->rx_copybreak) {
/* Just copy small packets */
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(dev, napi, page_info, len, GVE_RX_PAD);
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
rx->rx_copybreak_pkt++;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
new file mode 100644
index 000000000000..77bb8227f89b
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_dqo.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/ip6_checksum.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+
+static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
+{
+ return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
+}
+
+static void gve_free_page_dqo(struct gve_priv *priv,
+ struct gve_rx_buf_state_dqo *bs)
+{
+ page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
+ gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
+ DMA_FROM_DEVICE);
+ bs->page_info.page = NULL;
+}
+
+static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ s16 buffer_id;
+
+ buffer_id = rx->dqo.free_buf_states;
+ if (unlikely(buffer_id == -1))
+ return NULL;
+
+ buf_state = &rx->dqo.buf_states[buffer_id];
+
+ /* Remove buf_state from free list */
+ rx->dqo.free_buf_states = buf_state->next;
+
+ /* Point buf_state to itself to mark it as allocated */
+ buf_state->next = buffer_id;
+
+ return buf_state;
+}
+
+static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ return buf_state->next == buffer_id;
+}
+
+static void gve_free_buf_state(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ buf_state->next = rx->dqo.free_buf_states;
+ rx->dqo.free_buf_states = buffer_id;
+}
+
+static struct gve_rx_buf_state_dqo *
+gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ s16 buffer_id;
+
+ buffer_id = list->head;
+ if (unlikely(buffer_id == -1))
+ return NULL;
+
+ buf_state = &rx->dqo.buf_states[buffer_id];
+
+ /* Remove buf_state from list */
+ list->head = buf_state->next;
+ if (buf_state->next == -1)
+ list->tail = -1;
+
+ /* Point buf_state to itself to mark it as allocated */
+ buf_state->next = buffer_id;
+
+ return buf_state;
+}
+
+static void gve_enqueue_buf_state(struct gve_rx_ring *rx,
+ struct gve_index_list *list,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ buf_state->next = -1;
+
+ if (list->head == -1) {
+ list->head = buffer_id;
+ list->tail = buffer_id;
+ } else {
+ int tail = list->tail;
+
+ rx->dqo.buf_states[tail].next = buffer_id;
+ list->tail = buffer_id;
+ }
+}
+
+static struct gve_rx_buf_state_dqo *
+gve_get_recycled_buf_state(struct gve_rx_ring *rx)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ int i;
+
+ /* Recycled buf states are immediately usable. */
+ buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
+ if (likely(buf_state))
+ return buf_state;
+
+ if (unlikely(rx->dqo.used_buf_states.head == -1))
+ return NULL;
+
+ /* Used buf states are only usable when ref count reaches 0, which means
+ * no SKBs refer to them.
+ *
+ * Search a limited number before giving up.
+ */
+ for (i = 0; i < 5; i++) {
+ buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
+ if (gve_buf_ref_cnt(buf_state) == 0)
+ return buf_state;
+
+ gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+ }
+
+ /* If there are no free buf states discard an entry from
+ * `used_buf_states` so it can be used.
+ */
+ if (unlikely(rx->dqo.free_buf_states == -1)) {
+ buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
+ if (gve_buf_ref_cnt(buf_state) == 0)
+ return buf_state;
+
+ gve_free_page_dqo(rx->gve, buf_state);
+ gve_free_buf_state(rx, buf_state);
+ }
+
+ return NULL;
+}
+
+static int gve_alloc_page_dqo(struct gve_priv *priv,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ int err;
+
+ err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
+ &buf_state->addr, DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+ buf_state->page_info.page_offset = 0;
+ buf_state->page_info.page_address =
+ page_address(buf_state->page_info.page);
+ buf_state->last_single_ref_offset = 0;
+
+ /* The page already has 1 ref. */
+ page_ref_add(buf_state->page_info.page, INT_MAX - 1);
+ buf_state->page_info.pagecnt_bias = INT_MAX;
+
+ return 0;
+}
+
+static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ struct device *hdev = &priv->pdev->dev;
+ size_t completion_queue_slots;
+ size_t buffer_queue_slots;
+ size_t size;
+ int i;
+
+ completion_queue_slots = rx->dqo.complq.mask + 1;
+ buffer_queue_slots = rx->dqo.bufq.mask + 1;
+
+ gve_rx_remove_from_block(priv, idx);
+
+ if (rx->q_resources) {
+ dma_free_coherent(hdev, sizeof(*rx->q_resources),
+ rx->q_resources, rx->q_resources_bus);
+ rx->q_resources = NULL;
+ }
+
+ for (i = 0; i < rx->dqo.num_buf_states; i++) {
+ struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
+
+ if (bs->page_info.page)
+ gve_free_page_dqo(priv, bs);
+ }
+
+ if (rx->dqo.bufq.desc_ring) {
+ size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
+ dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring,
+ rx->dqo.bufq.bus);
+ rx->dqo.bufq.desc_ring = NULL;
+ }
+
+ if (rx->dqo.complq.desc_ring) {
+ size = sizeof(rx->dqo.complq.desc_ring[0]) *
+ completion_queue_slots;
+ dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring,
+ rx->dqo.complq.bus);
+ rx->dqo.complq.desc_ring = NULL;
+ }
+
+ kvfree(rx->dqo.buf_states);
+ rx->dqo.buf_states = NULL;
+
+ netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
+}
+
+static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ struct device *hdev = &priv->pdev->dev;
+ size_t size;
+ int i;
+
+ const u32 buffer_queue_slots =
+ priv->options_dqo_rda.rx_buff_ring_entries;
+ const u32 completion_queue_slots = priv->rx_desc_cnt;
+
+ netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
+
+ memset(rx, 0, sizeof(*rx));
+ rx->gve = priv;
+ rx->q_num = idx;
+ rx->dqo.bufq.mask = buffer_queue_slots - 1;
+ rx->dqo.complq.num_free_slots = completion_queue_slots;
+ rx->dqo.complq.mask = completion_queue_slots - 1;
+ rx->skb_head = NULL;
+ rx->skb_tail = NULL;
+
+ rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
+ rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
+ sizeof(rx->dqo.buf_states[0]),
+ GFP_KERNEL);
+ if (!rx->dqo.buf_states)
+ return -ENOMEM;
+
+ /* Set up linked list of buffer IDs */
+ for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
+ rx->dqo.buf_states[i].next = i + 1;
+
+ rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
+ rx->dqo.recycled_buf_states.head = -1;
+ rx->dqo.recycled_buf_states.tail = -1;
+ rx->dqo.used_buf_states.head = -1;
+ rx->dqo.used_buf_states.tail = -1;
+
+ /* Allocate RX completion queue */
+ size = sizeof(rx->dqo.complq.desc_ring[0]) *
+ completion_queue_slots;
+ rx->dqo.complq.desc_ring =
+ dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL);
+ if (!rx->dqo.complq.desc_ring)
+ goto err;
+
+ /* Allocate RX buffer queue */
+ size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
+ rx->dqo.bufq.desc_ring =
+ dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL);
+ if (!rx->dqo.bufq.desc_ring)
+ goto err;
+
+ rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources),
+ &rx->q_resources_bus, GFP_KERNEL);
+ if (!rx->q_resources)
+ goto err;
+
+ gve_rx_add_to_block(priv, idx);
+
+ return 0;
+
+err:
+ gve_rx_free_ring_dqo(priv, idx);
+ return -ENOMEM;
+}
+
+void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx)
+{
+ const struct gve_rx_ring *rx = &priv->rx[queue_idx];
+ u64 index = be32_to_cpu(rx->q_resources->db_index);
+
+ iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]);
+}
+
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ err = gve_rx_alloc_ring_dqo(priv, i);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to alloc rx ring=%d: err=%d\n",
+ i, err);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ gve_rx_free_ring_dqo(priv, i);
+
+ return err;
+}
+
+void gve_rx_free_rings_dqo(struct gve_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ gve_rx_free_ring_dqo(priv, i);
+}
+
+void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
+{
+ struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
+ struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq;
+ struct gve_priv *priv = rx->gve;
+ u32 num_avail_slots;
+ u32 num_full_slots;
+ u32 num_posted = 0;
+
+ num_full_slots = (bufq->tail - bufq->head) & bufq->mask;
+ num_avail_slots = bufq->mask - num_full_slots;
+
+ num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots);
+ while (num_posted < num_avail_slots) {
+ struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail];
+ struct gve_rx_buf_state_dqo *buf_state;
+
+ buf_state = gve_get_recycled_buf_state(rx);
+ if (unlikely(!buf_state)) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (unlikely(!buf_state))
+ break;
+
+ if (unlikely(gve_alloc_page_dqo(priv, buf_state))) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
+ gve_free_buf_state(rx, buf_state);
+ break;
+ }
+ }
+
+ desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+ desc->buf_addr = cpu_to_le64(buf_state->addr +
+ buf_state->page_info.page_offset);
+
+ bufq->tail = (bufq->tail + 1) & bufq->mask;
+ complq->num_free_slots--;
+ num_posted++;
+
+ if ((bufq->tail & (GVE_RX_BUF_THRESH_DQO - 1)) == 0)
+ gve_rx_write_doorbell_dqo(priv, rx->q_num);
+ }
+
+ rx->fill_cnt += num_posted;
+}
+
+static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ const int data_buffer_size = priv->data_buffer_size_dqo;
+ int pagecount;
+
+ /* Can't reuse if we only fit one buffer per page */
+ if (data_buffer_size * 2 > PAGE_SIZE)
+ goto mark_used;
+
+ pagecount = gve_buf_ref_cnt(buf_state);
+
+ /* Record the offset when we have a single remaining reference.
+ *
+ * When this happens, we know all of the other offsets of the page are
+ * usable.
+ */
+ if (pagecount == 1) {
+ buf_state->last_single_ref_offset =
+ buf_state->page_info.page_offset;
+ }
+
+ /* Use the next buffer sized chunk in the page. */
+ buf_state->page_info.page_offset += data_buffer_size;
+ buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
+
+ /* If we wrap around to the same offset without ever dropping to 1
+ * reference, then we don't know if this offset was ever freed.
+ */
+ if (buf_state->page_info.page_offset ==
+ buf_state->last_single_ref_offset) {
+ goto mark_used;
+ }
+
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+ return;
+
+mark_used:
+ gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+}
+
+static void gve_rx_skb_csum(struct sk_buff *skb,
+ const struct gve_rx_compl_desc_dqo *desc,
+ struct gve_ptype ptype)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* HW did not identify and process L3 and L4 headers. */
+ if (unlikely(!desc->l3_l4_processed))
+ return;
+
+ if (ptype.l3_type == GVE_L3_TYPE_IPV4) {
+ if (unlikely(desc->csum_ip_err || desc->csum_external_ip_err))
+ return;
+ } else if (ptype.l3_type == GVE_L3_TYPE_IPV6) {
+ /* Checksum should be skipped if this flag is set. */
+ if (unlikely(desc->ipv6_ex_add))
+ return;
+ }
+
+ if (unlikely(desc->csum_l4_err))
+ return;
+
+ switch (ptype.l4_type) {
+ case GVE_L4_TYPE_TCP:
+ case GVE_L4_TYPE_UDP:
+ case GVE_L4_TYPE_ICMP:
+ case GVE_L4_TYPE_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ break;
+ }
+}
+
+static void gve_rx_skb_hash(struct sk_buff *skb,
+ const struct gve_rx_compl_desc_dqo *compl_desc,
+ struct gve_ptype ptype)
+{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_L2;
+
+ if (ptype.l4_type != GVE_L4_TYPE_UNKNOWN)
+ hash_type = PKT_HASH_TYPE_L4;
+ else if (ptype.l3_type != GVE_L3_TYPE_UNKNOWN)
+ hash_type = PKT_HASH_TYPE_L3;
+
+ skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
+}
+
+static void gve_rx_free_skb(struct gve_rx_ring *rx)
+{
+ if (!rx->skb_head)
+ return;
+
+ dev_kfree_skb_any(rx->skb_head);
+ rx->skb_head = NULL;
+ rx->skb_tail = NULL;
+}
+
+/* Chains multi skbs for single rx packet.
+ * Returns 0 if buffer is appended, -1 otherwise.
+ */
+static int gve_rx_append_frags(struct napi_struct *napi,
+ struct gve_rx_buf_state_dqo *buf_state,
+ u16 buf_len, struct gve_rx_ring *rx,
+ struct gve_priv *priv)
+{
+ int num_frags = skb_shinfo(rx->skb_tail)->nr_frags;
+
+ if (unlikely(num_frags == MAX_SKB_FRAGS)) {
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(napi, 0);
+ if (!skb)
+ return -1;
+
+ skb_shinfo(rx->skb_tail)->frag_list = skb;
+ rx->skb_tail = skb;
+ num_frags = 0;
+ }
+ if (rx->skb_tail != rx->skb_head) {
+ rx->skb_head->len += buf_len;
+ rx->skb_head->data_len += buf_len;
+ rx->skb_head->truesize += priv->data_buffer_size_dqo;
+ }
+
+ skb_add_rx_frag(rx->skb_tail, num_frags,
+ buf_state->page_info.page,
+ buf_state->page_info.page_offset,
+ buf_len, priv->data_buffer_size_dqo);
+ gve_dec_pagecnt_bias(&buf_state->page_info);
+
+ return 0;
+}
+
+/* Returns 0 if descriptor is completed successfully.
+ * Returns -EINVAL if descriptor is invalid.
+ * Returns -ENOMEM if data cannot be copied to skb.
+ */
+static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
+ const struct gve_rx_compl_desc_dqo *compl_desc,
+ int queue_idx)
+{
+ const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
+ const bool eop = compl_desc->end_of_packet != 0;
+ struct gve_rx_buf_state_dqo *buf_state;
+ struct gve_priv *priv = rx->gve;
+ u16 buf_len;
+
+ if (unlikely(buffer_id >= rx->dqo.num_buf_states)) {
+ net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
+ priv->dev->name, buffer_id);
+ return -EINVAL;
+ }
+ buf_state = &rx->dqo.buf_states[buffer_id];
+ if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) {
+ net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n",
+ priv->dev->name, buffer_id);
+ return -EINVAL;
+ }
+
+ if (unlikely(compl_desc->rx_error)) {
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
+ buf_state);
+ return -EINVAL;
+ }
+
+ buf_len = compl_desc->packet_len;
+
+ /* Page might have not been used for awhile and was likely last written
+ * by a different thread.
+ */
+ prefetch(buf_state->page_info.page);
+
+ /* Sync the portion of dma buffer for CPU to read. */
+ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+ buf_state->page_info.page_offset,
+ buf_len, DMA_FROM_DEVICE);
+
+ /* Append to current skb if one exists. */
+ if (rx->skb_head) {
+ if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
+ priv)) != 0) {
+ goto error;
+ }
+
+ gve_try_recycle_buf(priv, rx, buf_state);
+ return 0;
+ }
+
+ /* Prefetch the payload header. */
+ prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
+#if L1_CACHE_BYTES < 128
+ prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
+ L1_CACHE_BYTES);
+#endif
+
+ if (eop && buf_len <= priv->rx_copybreak) {
+ rx->skb_head = gve_rx_copy(priv->dev, napi,
+ &buf_state->page_info, buf_len, 0);
+ if (unlikely(!rx->skb_head))
+ goto error;
+ rx->skb_tail = rx->skb_head;
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ rx->rx_copybreak_pkt++;
+ u64_stats_update_end(&rx->statss);
+
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
+ buf_state);
+ return 0;
+ }
+
+ rx->skb_head = napi_get_frags(napi);
+ if (unlikely(!rx->skb_head))
+ goto error;
+ rx->skb_tail = rx->skb_head;
+
+ skb_add_rx_frag(rx->skb_head, 0, buf_state->page_info.page,
+ buf_state->page_info.page_offset, buf_len,
+ priv->data_buffer_size_dqo);
+ gve_dec_pagecnt_bias(&buf_state->page_info);
+
+ gve_try_recycle_buf(priv, rx, buf_state);
+ return 0;
+
+error:
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+ return -ENOMEM;
+}
+
+static int gve_rx_complete_rsc(struct sk_buff *skb,
+ const struct gve_rx_compl_desc_dqo *desc,
+ struct gve_ptype ptype)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ /* Only TCP is supported right now. */
+ if (ptype.l4_type != GVE_L4_TYPE_TCP)
+ return -EINVAL;
+
+ switch (ptype.l3_type) {
+ case GVE_L3_TYPE_IPV4:
+ shinfo->gso_type = SKB_GSO_TCPV4;
+ break;
+ case GVE_L3_TYPE_IPV6:
+ shinfo->gso_type = SKB_GSO_TCPV6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len);
+ return 0;
+}
+
+/* Returns 0 if skb is completed successfully, -1 otherwise. */
+static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
+ const struct gve_rx_compl_desc_dqo *desc,
+ netdev_features_t feat)
+{
+ struct gve_ptype ptype =
+ rx->gve->ptype_lut_dqo->ptypes[desc->packet_type];
+ int err;
+
+ skb_record_rx_queue(rx->skb_head, rx->q_num);
+
+ if (feat & NETIF_F_RXHASH)
+ gve_rx_skb_hash(rx->skb_head, desc, ptype);
+
+ if (feat & NETIF_F_RXCSUM)
+ gve_rx_skb_csum(rx->skb_head, desc, ptype);
+
+ /* RSC packets must set gso_size otherwise the TCP stack will complain
+ * that packets are larger than MTU.
+ */
+ if (desc->rsc) {
+ err = gve_rx_complete_rsc(rx->skb_head, desc, ptype);
+ if (err < 0)
+ return err;
+ }
+
+ if (skb_headlen(rx->skb_head) == 0)
+ napi_gro_frags(napi);
+ else
+ napi_gro_receive(napi, rx->skb_head);
+
+ return 0;
+}
+
+int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+{
+ struct napi_struct *napi = &block->napi;
+ netdev_features_t feat = napi->dev->features;
+
+ struct gve_rx_ring *rx = block->rx;
+ struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
+
+ u32 work_done = 0;
+ u64 bytes = 0;
+ int err;
+
+ while (work_done < budget) {
+ struct gve_rx_compl_desc_dqo *compl_desc =
+ &complq->desc_ring[complq->head];
+ u32 pkt_bytes;
+
+ /* No more new packets */
+ if (compl_desc->generation == complq->cur_gen_bit)
+ break;
+
+ /* Prefetch the next two descriptors. */
+ prefetch(&complq->desc_ring[(complq->head + 1) & complq->mask]);
+ prefetch(&complq->desc_ring[(complq->head + 2) & complq->mask]);
+
+ /* Do not read data until we own the descriptor */
+ dma_rmb();
+
+ err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+ if (err < 0) {
+ gve_rx_free_skb(rx);
+ u64_stats_update_begin(&rx->statss);
+ if (err == -ENOMEM)
+ rx->rx_skb_alloc_fail++;
+ else if (err == -EINVAL)
+ rx->rx_desc_err_dropped_pkt++;
+ u64_stats_update_end(&rx->statss);
+ }
+
+ complq->head = (complq->head + 1) & complq->mask;
+ complq->num_free_slots++;
+
+ /* When the ring wraps, the generation bit is flipped. */
+ complq->cur_gen_bit ^= (complq->head == 0);
+
+ /* Receiving a completion means we have space to post another
+ * buffer on the buffer queue.
+ */
+ {
+ struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq;
+
+ bufq->head = (bufq->head + 1) & bufq->mask;
+ }
+
+ /* Free running counter of completed descriptors */
+ rx->cnt++;
+
+ if (!rx->skb_head)
+ continue;
+
+ if (!compl_desc->end_of_packet)
+ continue;
+
+ work_done++;
+ pkt_bytes = rx->skb_head->len;
+ /* The ethernet header (first ETH_HLEN bytes) is snipped off
+ * by eth_type_trans.
+ */
+ if (skb_headlen(rx->skb_head))
+ pkt_bytes += ETH_HLEN;
+
+ /* gve_rx_complete_skb() will consume skb if successful */
+ if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
+ gve_rx_free_skb(rx);
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_desc_err_dropped_pkt++;
+ u64_stats_update_end(&rx->statss);
+ continue;
+ }
+
+ bytes += pkt_bytes;
+ rx->skb_head = NULL;
+ rx->skb_tail = NULL;
+ }
+
+ gve_rx_post_buffers_dqo(rx);
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rpackets += work_done;
+ rx->rbytes += bytes;
+ u64_stats_update_end(&rx->statss);
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 6938f3a939d6..665ac795a1ad 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
*/
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_utils.h"
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/vmalloc.h>
@@ -131,14 +132,6 @@ static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
atomic_add(bytes, &fifo->available);
}
-static void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
-{
- struct gve_notify_block *block =
- &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
-
- block->tx = NULL;
-}
-
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake);
@@ -174,16 +167,6 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
}
-static void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
-{
- int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
- struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- struct gve_tx_ring *tx = &priv->tx[queue_idx];
-
- block->tx = tx;
- tx->ntfy_id = ntfy_idx;
-}
-
static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
{
struct gve_tx_ring *tx = &priv->tx[idx];
@@ -208,14 +191,15 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
if (!tx->desc)
goto abort_with_info;
- tx->raw_addressing = priv->raw_addressing;
+ tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
tx->dev = &priv->pdev->dev;
if (!tx->raw_addressing) {
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
-
+ if (!tx->tx_fifo.qpl)
+ goto abort_with_desc;
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
- goto abort_with_desc;
+ goto abort_with_qpl;
}
tx->q_resources =
@@ -236,6 +220,9 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
+abort_with_qpl:
+ if (!tx->raw_addressing)
+ gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -269,7 +256,7 @@ int gve_tx_alloc_rings(struct gve_priv *priv)
return err;
}
-void gve_tx_free_rings(struct gve_priv *priv)
+void gve_tx_free_rings_gqi(struct gve_priv *priv)
{
int i;
@@ -589,7 +576,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
struct gve_tx_ring *tx;
int nsegs;
- WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
+ WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
if (unlikely(gve_maybe_stop_tx(tx, skb))) {
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
new file mode 100644
index 000000000000..05ddb6a75c38
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+#include "gve_dqo.h"
+#include <linux/tcp.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+
+/* Returns true if a gve_tx_pending_packet_dqo object is available. */
+static bool gve_has_pending_packet(struct gve_tx_ring *tx)
+{
+ /* Check TX path's list. */
+ if (tx->dqo_tx.free_pending_packets != -1)
+ return true;
+
+ /* Check completion handler's list. */
+ if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1)
+ return true;
+
+ return false;
+}
+
+static struct gve_tx_pending_packet_dqo *
+gve_alloc_pending_packet(struct gve_tx_ring *tx)
+{
+ struct gve_tx_pending_packet_dqo *pending_packet;
+ s16 index;
+
+ index = tx->dqo_tx.free_pending_packets;
+
+ /* No pending_packets available, try to steal the list from the
+ * completion handler.
+ */
+ if (unlikely(index == -1)) {
+ tx->dqo_tx.free_pending_packets =
+ atomic_xchg(&tx->dqo_compl.free_pending_packets, -1);
+ index = tx->dqo_tx.free_pending_packets;
+
+ if (unlikely(index == -1))
+ return NULL;
+ }
+
+ pending_packet = &tx->dqo.pending_packets[index];
+
+ /* Remove pending_packet from free list */
+ tx->dqo_tx.free_pending_packets = pending_packet->next;
+ pending_packet->state = GVE_PACKET_STATE_PENDING_DATA_COMPL;
+
+ return pending_packet;
+}
+
+static void
+gve_free_pending_packet(struct gve_tx_ring *tx,
+ struct gve_tx_pending_packet_dqo *pending_packet)
+{
+ s16 index = pending_packet - tx->dqo.pending_packets;
+
+ pending_packet->state = GVE_PACKET_STATE_UNALLOCATED;
+ while (true) {
+ s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets);
+
+ pending_packet->next = old_head;
+ if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets,
+ old_head, index) == old_head) {
+ break;
+ }
+ }
+}
+
+/* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
+ */
+static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
+{
+ int i;
+
+ for (i = 0; i < tx->dqo.num_pending_packets; i++) {
+ struct gve_tx_pending_packet_dqo *cur_state =
+ &tx->dqo.pending_packets[i];
+ int j;
+
+ for (j = 0; j < cur_state->num_bufs; j++) {
+ struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
+
+ if (j == 0) {
+ dma_unmap_single(tx->dev,
+ dma_unmap_addr(buf, dma),
+ dma_unmap_len(buf, len),
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(tx->dev,
+ dma_unmap_addr(buf, dma),
+ dma_unmap_len(buf, len),
+ DMA_TO_DEVICE);
+ }
+ }
+ if (cur_state->skb) {
+ dev_consume_skb_any(cur_state->skb);
+ cur_state->skb = NULL;
+ }
+ }
+}
+
+static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
+{
+ struct gve_tx_ring *tx = &priv->tx[idx];
+ struct device *hdev = &priv->pdev->dev;
+ size_t bytes;
+
+ gve_tx_remove_from_block(priv, idx);
+
+ if (tx->q_resources) {
+ dma_free_coherent(hdev, sizeof(*tx->q_resources),
+ tx->q_resources, tx->q_resources_bus);
+ tx->q_resources = NULL;
+ }
+
+ if (tx->dqo.compl_ring) {
+ bytes = sizeof(tx->dqo.compl_ring[0]) *
+ (tx->dqo.complq_mask + 1);
+ dma_free_coherent(hdev, bytes, tx->dqo.compl_ring,
+ tx->complq_bus_dqo);
+ tx->dqo.compl_ring = NULL;
+ }
+
+ if (tx->dqo.tx_ring) {
+ bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
+ dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus);
+ tx->dqo.tx_ring = NULL;
+ }
+
+ kvfree(tx->dqo.pending_packets);
+ tx->dqo.pending_packets = NULL;
+
+ netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
+}
+
+static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+{
+ struct gve_tx_ring *tx = &priv->tx[idx];
+ struct device *hdev = &priv->pdev->dev;
+ int num_pending_packets;
+ size_t bytes;
+ int i;
+
+ memset(tx, 0, sizeof(*tx));
+ tx->q_num = idx;
+ tx->dev = &priv->pdev->dev;
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
+
+ /* Queue sizes must be a power of 2 */
+ tx->mask = priv->tx_desc_cnt - 1;
+ tx->dqo.complq_mask = priv->options_dqo_rda.tx_comp_ring_entries - 1;
+
+ /* The max number of pending packets determines the maximum number of
+ * descriptors which maybe written to the completion queue.
+ *
+ * We must set the number small enough to make sure we never overrun the
+ * completion queue.
+ */
+ num_pending_packets = tx->dqo.complq_mask + 1;
+
+ /* Reserve space for descriptor completions, which will be reported at
+ * most every GVE_TX_MIN_RE_INTERVAL packets.
+ */
+ num_pending_packets -=
+ (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL;
+
+ /* Each packet may have at most 2 buffer completions if it receives both
+ * a miss and reinjection completion.
+ */
+ num_pending_packets /= 2;
+
+ tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX);
+ tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets,
+ sizeof(tx->dqo.pending_packets[0]),
+ GFP_KERNEL);
+ if (!tx->dqo.pending_packets)
+ goto err;
+
+ /* Set up linked list of pending packets */
+ for (i = 0; i < tx->dqo.num_pending_packets - 1; i++)
+ tx->dqo.pending_packets[i].next = i + 1;
+
+ tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
+ atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
+ tx->dqo_compl.miss_completions.head = -1;
+ tx->dqo_compl.miss_completions.tail = -1;
+ tx->dqo_compl.timed_out_completions.head = -1;
+ tx->dqo_compl.timed_out_completions.tail = -1;
+
+ bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
+ tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
+ if (!tx->dqo.tx_ring)
+ goto err;
+
+ bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1);
+ tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes,
+ &tx->complq_bus_dqo,
+ GFP_KERNEL);
+ if (!tx->dqo.compl_ring)
+ goto err;
+
+ tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources),
+ &tx->q_resources_bus, GFP_KERNEL);
+ if (!tx->q_resources)
+ goto err;
+
+ gve_tx_add_to_block(priv, idx);
+
+ return 0;
+
+err:
+ gve_tx_free_ring_dqo(priv, idx);
+ return -ENOMEM;
+}
+
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ err = gve_tx_alloc_ring_dqo(priv, i);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to alloc tx ring=%d: err=%d\n",
+ i, err);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ gve_tx_free_ring_dqo(priv, i);
+
+ return err;
+}
+
+void gve_tx_free_rings_dqo(struct gve_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ struct gve_tx_ring *tx = &priv->tx[i];
+
+ gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ gve_tx_clean_pending_packets(tx);
+
+ gve_tx_free_ring_dqo(priv, i);
+ }
+}
+
+/* Returns the number of slots available in the ring */
+static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
+{
+ u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask;
+
+ return tx->mask - num_used;
+}
+
+/* Stops the queue if available descriptors is less than 'count'.
+ * Return: 0 if stop is not required.
+ */
+static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count)
+{
+ if (likely(gve_has_pending_packet(tx) &&
+ num_avail_tx_slots(tx) >= count))
+ return 0;
+
+ /* Update cached TX head pointer */
+ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+ if (likely(gve_has_pending_packet(tx) &&
+ num_avail_tx_slots(tx) >= count))
+ return 0;
+
+ /* No space, so stop the queue */
+ tx->stop_queue++;
+ netif_tx_stop_queue(tx->netdev_txq);
+
+ /* Sync with restarting queue in `gve_tx_poll_dqo()` */
+ mb();
+
+ /* After stopping queue, check if we can transmit again in order to
+ * avoid TOCTOU bug.
+ */
+ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+ if (likely(!gve_has_pending_packet(tx) ||
+ num_avail_tx_slots(tx) < count))
+ return -EBUSY;
+
+ netif_tx_start_queue(tx->netdev_txq);
+ tx->wake_queue++;
+ return 0;
+}
+
+static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
+ struct gve_tx_metadata_dqo *metadata)
+{
+ memset(metadata, 0, sizeof(*metadata));
+ metadata->version = GVE_TX_METADATA_VERSION_DQO;
+
+ if (skb->l4_hash) {
+ u16 path_hash = skb->hash ^ (skb->hash >> 16);
+
+ path_hash &= (1 << 15) - 1;
+ if (unlikely(path_hash == 0))
+ path_hash = ~path_hash;
+
+ metadata->path_hash = path_hash;
+ }
+}
+
+static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
+ struct sk_buff *skb, u32 len, u64 addr,
+ s16 compl_tag, bool eop, bool is_gso)
+{
+ const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
+
+ while (len > 0) {
+ struct gve_tx_pkt_desc_dqo *desc =
+ &tx->dqo.tx_ring[*desc_idx].pkt;
+ u32 cur_len = min_t(u32, len, GVE_TX_MAX_BUF_SIZE_DQO);
+ bool cur_eop = eop && cur_len == len;
+
+ *desc = (struct gve_tx_pkt_desc_dqo){
+ .buf_addr = cpu_to_le64(addr),
+ .dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
+ .end_of_packet = cur_eop,
+ .checksum_offload_enable = checksum_offload_en,
+ .compl_tag = cpu_to_le16(compl_tag),
+ .buf_size = cur_len,
+ };
+
+ addr += cur_len;
+ len -= cur_len;
+ *desc_idx = (*desc_idx + 1) & tx->mask;
+ }
+}
+
+/* Validates and prepares `skb` for TSO.
+ *
+ * Returns header length, or < 0 if invalid.
+ */
+static int gve_prep_tso(struct sk_buff *skb)
+{
+ struct tcphdr *tcp;
+ int header_len;
+ u32 paylen;
+ int err;
+
+ /* Note: HW requires MSS (gso_size) to be <= 9728 and the total length
+ * of the TSO to be <= 262143.
+ *
+ * However, we don't validate these because:
+ * - Hypervisor enforces a limit of 9K MTU
+ * - Kernel will not produce a TSO larger than 64k
+ */
+
+ if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
+ return -1;
+
+ /* Needed because we will modify header. */
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ tcp = tcp_hdr(skb);
+
+ /* Remove payload length from checksum. */
+ paylen = skb->len - skb_transport_offset(skb);
+
+ switch (skb_shinfo(skb)->gso_type) {
+ case SKB_GSO_TCPV4:
+ case SKB_GSO_TCPV6:
+ csum_replace_by_diff(&tcp->check,
+ (__force __wsum)htonl(paylen));
+
+ /* Compute length of segmentation header. */
+ header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
+ return -EINVAL;
+
+ return header_len;
+}
+
+static void gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc,
+ const struct sk_buff *skb,
+ const struct gve_tx_metadata_dqo *metadata,
+ int header_len)
+{
+ *desc = (struct gve_tx_tso_context_desc_dqo){
+ .header_len = header_len,
+ .cmd_dtype = {
+ .dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO,
+ .tso = 1,
+ },
+ .flex0 = metadata->bytes[0],
+ .flex5 = metadata->bytes[5],
+ .flex6 = metadata->bytes[6],
+ .flex7 = metadata->bytes[7],
+ .flex8 = metadata->bytes[8],
+ .flex9 = metadata->bytes[9],
+ .flex10 = metadata->bytes[10],
+ .flex11 = metadata->bytes[11],
+ };
+ desc->tso_total_len = skb->len - header_len;
+ desc->mss = skb_shinfo(skb)->gso_size;
+}
+
+static void
+gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
+ const struct gve_tx_metadata_dqo *metadata)
+{
+ *desc = (struct gve_tx_general_context_desc_dqo){
+ .flex0 = metadata->bytes[0],
+ .flex1 = metadata->bytes[1],
+ .flex2 = metadata->bytes[2],
+ .flex3 = metadata->bytes[3],
+ .flex4 = metadata->bytes[4],
+ .flex5 = metadata->bytes[5],
+ .flex6 = metadata->bytes[6],
+ .flex7 = metadata->bytes[7],
+ .flex8 = metadata->bytes[8],
+ .flex9 = metadata->bytes[9],
+ .flex10 = metadata->bytes[10],
+ .flex11 = metadata->bytes[11],
+ .cmd_dtype = {.dtype = GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO},
+ };
+}
+
+/* Returns 0 on success, or < 0 on error.
+ *
+ * Before this function is called, the caller must ensure
+ * gve_has_pending_packet(tx) returns true.
+ */
+static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
+ struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const bool is_gso = skb_is_gso(skb);
+ u32 desc_idx = tx->dqo_tx.tail;
+
+ struct gve_tx_pending_packet_dqo *pending_packet;
+ struct gve_tx_metadata_dqo metadata;
+ s16 completion_tag;
+ int i;
+
+ pending_packet = gve_alloc_pending_packet(tx);
+ pending_packet->skb = skb;
+ pending_packet->num_bufs = 0;
+ completion_tag = pending_packet - tx->dqo.pending_packets;
+
+ gve_extract_tx_metadata_dqo(skb, &metadata);
+ if (is_gso) {
+ int header_len = gve_prep_tso(skb);
+
+ if (unlikely(header_len < 0))
+ goto err;
+
+ gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx,
+ skb, &metadata, header_len);
+ desc_idx = (desc_idx + 1) & tx->mask;
+ }
+
+ gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
+ &metadata);
+ desc_idx = (desc_idx + 1) & tx->mask;
+
+ /* Note: HW requires that the size of a non-TSO packet be within the
+ * range of [17, 9728].
+ *
+ * We don't double check because
+ * - We limited `netdev->min_mtu` to ETH_MIN_MTU.
+ * - Hypervisor won't allow MTU larger than 9216.
+ */
+
+ /* Map the linear portion of skb */
+ {
+ struct gve_tx_dma_buf *buf =
+ &pending_packet->bufs[pending_packet->num_bufs];
+ u32 len = skb_headlen(skb);
+ dma_addr_t addr;
+
+ addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx->dev, addr)))
+ goto err;
+
+ dma_unmap_len_set(buf, len, len);
+ dma_unmap_addr_set(buf, dma, addr);
+ ++pending_packet->num_bufs;
+
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
+ completion_tag,
+ /*eop=*/shinfo->nr_frags == 0, is_gso);
+ }
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ struct gve_tx_dma_buf *buf =
+ &pending_packet->bufs[pending_packet->num_bufs];
+ const skb_frag_t *frag = &shinfo->frags[i];
+ bool is_eop = i == (shinfo->nr_frags - 1);
+ u32 len = skb_frag_size(frag);
+ dma_addr_t addr;
+
+ addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx->dev, addr)))
+ goto err;
+
+ dma_unmap_len_set(buf, len, len);
+ dma_unmap_addr_set(buf, dma, addr);
+ ++pending_packet->num_bufs;
+
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
+ completion_tag, is_eop, is_gso);
+ }
+
+ /* Commit the changes to our state */
+ tx->dqo_tx.tail = desc_idx;
+
+ /* Request a descriptor completion on the last descriptor of the
+ * packet if we are allowed to by the HW enforced interval.
+ */
+ {
+ u32 last_desc_idx = (desc_idx - 1) & tx->mask;
+ u32 last_report_event_interval =
+ (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
+
+ if (unlikely(last_report_event_interval >=
+ GVE_TX_MIN_RE_INTERVAL)) {
+ tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
+ tx->dqo_tx.last_re_idx = last_desc_idx;
+ }
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < pending_packet->num_bufs; i++) {
+ struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
+
+ if (i == 0) {
+ dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
+ dma_unmap_len(buf, len),
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
+ dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ }
+ }
+
+ pending_packet->skb = NULL;
+ pending_packet->num_bufs = 0;
+ gve_free_pending_packet(tx, pending_packet);
+
+ return -1;
+}
+
+static int gve_num_descs_per_buf(size_t size)
+{
+ return DIV_ROUND_UP(size, GVE_TX_MAX_BUF_SIZE_DQO);
+}
+
+static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int num_descs;
+ int i;
+
+ num_descs = gve_num_descs_per_buf(skb_headlen(skb));
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ unsigned int frag_size = skb_frag_size(&shinfo->frags[i]);
+
+ num_descs += gve_num_descs_per_buf(frag_size);
+ }
+
+ return num_descs;
+}
+
+/* Returns true if HW is capable of sending TSO represented by `skb`.
+ *
+ * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers.
+ * - The header is counted as one buffer for every single segment.
+ * - A buffer which is split between two segments is counted for both.
+ * - If a buffer contains both header and payload, it is counted as two buffers.
+ */
+static bool gve_can_send_tso(const struct sk_buff *skb)
+{
+ const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
+ const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const int gso_size = shinfo->gso_size;
+ int cur_seg_num_bufs;
+ int cur_seg_size;
+ int i;
+
+ cur_seg_size = skb_headlen(skb) - header_len;
+ cur_seg_num_bufs = cur_seg_size > 0;
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ if (cur_seg_size >= gso_size) {
+ cur_seg_size %= gso_size;
+ cur_seg_num_bufs = cur_seg_size > 0;
+ }
+
+ if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
+ return false;
+
+ cur_seg_size += skb_frag_size(&shinfo->frags[i]);
+ }
+
+ return true;
+}
+
+/* Attempt to transmit specified SKB.
+ *
+ * Returns 0 if the SKB was transmitted or dropped.
+ * Returns -1 if there is not currently enough space to transmit the SKB.
+ */
+static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct sk_buff *skb)
+{
+ int num_buffer_descs;
+ int total_num_descs;
+
+ if (skb_is_gso(skb)) {
+ /* If TSO doesn't meet HW requirements, attempt to linearize the
+ * packet.
+ */
+ if (unlikely(!gve_can_send_tso(skb) &&
+ skb_linearize(skb) < 0)) {
+ net_err_ratelimited("%s: Failed to transmit TSO packet\n",
+ priv->dev->name);
+ goto drop;
+ }
+
+ num_buffer_descs = gve_num_buffer_descs_needed(skb);
+ } else {
+ num_buffer_descs = gve_num_buffer_descs_needed(skb);
+
+ if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
+ if (unlikely(skb_linearize(skb) < 0))
+ goto drop;
+
+ num_buffer_descs = 1;
+ }
+ }
+
+ /* Metadata + (optional TSO) + data descriptors. */
+ total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
+ if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
+ GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP))) {
+ return -1;
+ }
+
+ if (unlikely(gve_tx_add_skb_no_copy_dqo(tx, skb) < 0))
+ goto drop;
+
+ netdev_tx_sent_queue(tx->netdev_txq, skb->len);
+ skb_tx_timestamp(skb);
+ return 0;
+
+drop:
+ tx->dropped_pkt++;
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+/* Transmit a given skb and ring the doorbell. */
+netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+
+ tx = &priv->tx[skb_get_queue_mapping(skb)];
+ if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) {
+ /* We need to ring the txq doorbell -- we have stopped the Tx
+ * queue for want of resources, but prior calls to gve_tx()
+ * may have added descriptors without ringing the doorbell.
+ */
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
+ return NETDEV_TX_OK;
+
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+ return NETDEV_TX_OK;
+}
+
+static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
+ struct gve_tx_pending_packet_dqo *pending_packet)
+{
+ s16 old_tail, index;
+
+ index = pending_packet - tx->dqo.pending_packets;
+ old_tail = list->tail;
+ list->tail = index;
+ if (old_tail == -1)
+ list->head = index;
+ else
+ tx->dqo.pending_packets[old_tail].next = index;
+
+ pending_packet->next = -1;
+ pending_packet->prev = old_tail;
+}
+
+static void remove_from_list(struct gve_tx_ring *tx,
+ struct gve_index_list *list,
+ struct gve_tx_pending_packet_dqo *pending_packet)
+{
+ s16 prev_index, next_index;
+
+ prev_index = pending_packet->prev;
+ next_index = pending_packet->next;
+
+ if (prev_index == -1) {
+ /* Node is head */
+ list->head = next_index;
+ } else {
+ tx->dqo.pending_packets[prev_index].next = next_index;
+ }
+ if (next_index == -1) {
+ /* Node is tail */
+ list->tail = prev_index;
+ } else {
+ tx->dqo.pending_packets[next_index].prev = prev_index;
+ }
+}
+
+static void gve_unmap_packet(struct device *dev,
+ struct gve_tx_pending_packet_dqo *pending_packet)
+{
+ struct gve_tx_dma_buf *buf;
+ int i;
+
+ /* SKB linear portion is guaranteed to be mapped */
+ buf = &pending_packet->bufs[0];
+ dma_unmap_single(dev, dma_unmap_addr(buf, dma),
+ dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ for (i = 1; i < pending_packet->num_bufs; i++) {
+ buf = &pending_packet->bufs[i];
+ dma_unmap_page(dev, dma_unmap_addr(buf, dma),
+ dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ }
+ pending_packet->num_bufs = 0;
+}
+
+/* Completion types and expected behavior:
+ * No Miss compl + Packet compl = Packet completed normally.
+ * Miss compl + Re-inject compl = Packet completed normally.
+ * No Miss compl + Re-inject compl = Skipped i.e. packet not completed.
+ * Miss compl + Packet compl = Skipped i.e. packet not completed.
+ */
+static void gve_handle_packet_completion(struct gve_priv *priv,
+ struct gve_tx_ring *tx, bool is_napi,
+ u16 compl_tag, u64 *bytes, u64 *pkts,
+ bool is_reinjection)
+{
+ struct gve_tx_pending_packet_dqo *pending_packet;
+
+ if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
+ net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
+ priv->dev->name, (int)compl_tag);
+ return;
+ }
+
+ pending_packet = &tx->dqo.pending_packets[compl_tag];
+
+ if (unlikely(is_reinjection)) {
+ if (unlikely(pending_packet->state ==
+ GVE_PACKET_STATE_TIMED_OUT_COMPL)) {
+ net_err_ratelimited("%s: Re-injection completion: %d received after timeout.\n",
+ priv->dev->name, (int)compl_tag);
+ /* Packet was already completed as a result of timeout,
+ * so just remove from list and free pending packet.
+ */
+ remove_from_list(tx,
+ &tx->dqo_compl.timed_out_completions,
+ pending_packet);
+ gve_free_pending_packet(tx, pending_packet);
+ return;
+ }
+ if (unlikely(pending_packet->state !=
+ GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
+ /* No outstanding miss completion but packet allocated
+ * implies packet receives a re-injection completion
+ * without a a prior miss completion. Return without
+ * completing the packet.
+ */
+ net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
+ priv->dev->name, (int)compl_tag);
+ return;
+ }
+ remove_from_list(tx, &tx->dqo_compl.miss_completions,
+ pending_packet);
+ } else {
+ /* Packet is allocated but not a pending data completion. */
+ if (unlikely(pending_packet->state !=
+ GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
+ net_err_ratelimited("%s: No pending data completion: %d\n",
+ priv->dev->name, (int)compl_tag);
+ return;
+ }
+ }
+ gve_unmap_packet(tx->dev, pending_packet);
+
+ *bytes += pending_packet->skb->len;
+ (*pkts)++;
+ napi_consume_skb(pending_packet->skb, is_napi);
+ pending_packet->skb = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+}
+
+static void gve_handle_miss_completion(struct gve_priv *priv,
+ struct gve_tx_ring *tx, u16 compl_tag,
+ u64 *bytes, u64 *pkts)
+{
+ struct gve_tx_pending_packet_dqo *pending_packet;
+
+ if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
+ net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
+ priv->dev->name, (int)compl_tag);
+ return;
+ }
+
+ pending_packet = &tx->dqo.pending_packets[compl_tag];
+ if (unlikely(pending_packet->state !=
+ GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
+ net_err_ratelimited("%s: Unexpected packet state: %d for completion tag : %d\n",
+ priv->dev->name, (int)pending_packet->state,
+ (int)compl_tag);
+ return;
+ }
+
+ pending_packet->state = GVE_PACKET_STATE_PENDING_REINJECT_COMPL;
+ /* jiffies can wraparound but time comparisons can handle overflows. */
+ pending_packet->timeout_jiffies =
+ jiffies +
+ msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
+ MSEC_PER_SEC);
+ add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
+
+ *bytes += pending_packet->skb->len;
+ (*pkts)++;
+}
+
+static void remove_miss_completions(struct gve_priv *priv,
+ struct gve_tx_ring *tx)
+{
+ struct gve_tx_pending_packet_dqo *pending_packet;
+ s16 next_index;
+
+ next_index = tx->dqo_compl.miss_completions.head;
+ while (next_index != -1) {
+ pending_packet = &tx->dqo.pending_packets[next_index];
+ next_index = pending_packet->next;
+ /* Break early because packets should timeout in order. */
+ if (time_is_after_jiffies(pending_packet->timeout_jiffies))
+ break;
+
+ remove_from_list(tx, &tx->dqo_compl.miss_completions,
+ pending_packet);
+ /* Unmap buffers and free skb but do not unallocate packet i.e.
+ * the completion tag is not freed to ensure that the driver
+ * can take appropriate action if a corresponding valid
+ * completion is received later.
+ */
+ gve_unmap_packet(tx->dev, pending_packet);
+ /* This indicates the packet was dropped. */
+ dev_kfree_skb_any(pending_packet->skb);
+ pending_packet->skb = NULL;
+ tx->dropped_pkt++;
+ net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
+ priv->dev->name,
+ (int)(pending_packet - tx->dqo.pending_packets));
+
+ pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
+ pending_packet->timeout_jiffies =
+ jiffies +
+ msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
+ MSEC_PER_SEC);
+ /* Maintain pending packet in another list so the packet can be
+ * unallocated at a later time.
+ */
+ add_to_list(tx, &tx->dqo_compl.timed_out_completions,
+ pending_packet);
+ }
+}
+
+static void remove_timed_out_completions(struct gve_priv *priv,
+ struct gve_tx_ring *tx)
+{
+ struct gve_tx_pending_packet_dqo *pending_packet;
+ s16 next_index;
+
+ next_index = tx->dqo_compl.timed_out_completions.head;
+ while (next_index != -1) {
+ pending_packet = &tx->dqo.pending_packets[next_index];
+ next_index = pending_packet->next;
+ /* Break early because packets should timeout in order. */
+ if (time_is_after_jiffies(pending_packet->timeout_jiffies))
+ break;
+
+ remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
+ pending_packet);
+ gve_free_pending_packet(tx, pending_packet);
+ }
+}
+
+int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct napi_struct *napi)
+{
+ u64 reinject_compl_bytes = 0;
+ u64 reinject_compl_pkts = 0;
+ int num_descs_cleaned = 0;
+ u64 miss_compl_bytes = 0;
+ u64 miss_compl_pkts = 0;
+ u64 pkt_compl_bytes = 0;
+ u64 pkt_compl_pkts = 0;
+
+ /* Limit in order to avoid blocking for too long */
+ while (!napi || pkt_compl_pkts < napi->weight) {
+ struct gve_tx_compl_desc *compl_desc =
+ &tx->dqo.compl_ring[tx->dqo_compl.head];
+ u16 type;
+
+ if (compl_desc->generation == tx->dqo_compl.cur_gen_bit)
+ break;
+
+ /* Prefetch the next descriptor. */
+ prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) &
+ tx->dqo.complq_mask]);
+
+ /* Do not read data until we own the descriptor */
+ dma_rmb();
+ type = compl_desc->type;
+
+ if (type == GVE_COMPL_TYPE_DQO_DESC) {
+ /* This is the last descriptor fetched by HW plus one */
+ u16 tx_head = le16_to_cpu(compl_desc->tx_head);
+
+ atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head);
+ } else if (type == GVE_COMPL_TYPE_DQO_PKT) {
+ u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+ gve_handle_packet_completion(priv, tx, !!napi,
+ compl_tag,
+ &pkt_compl_bytes,
+ &pkt_compl_pkts,
+ /*is_reinjection=*/false);
+ } else if (type == GVE_COMPL_TYPE_DQO_MISS) {
+ u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+ gve_handle_miss_completion(priv, tx, compl_tag,
+ &miss_compl_bytes,
+ &miss_compl_pkts);
+ } else if (type == GVE_COMPL_TYPE_DQO_REINJECTION) {
+ u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+ gve_handle_packet_completion(priv, tx, !!napi,
+ compl_tag,
+ &reinject_compl_bytes,
+ &reinject_compl_pkts,
+ /*is_reinjection=*/true);
+ }
+
+ tx->dqo_compl.head =
+ (tx->dqo_compl.head + 1) & tx->dqo.complq_mask;
+ /* Flip the generation bit when we wrap around */
+ tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0;
+ num_descs_cleaned++;
+ }
+
+ netdev_tx_completed_queue(tx->netdev_txq,
+ pkt_compl_pkts + miss_compl_pkts,
+ pkt_compl_bytes + miss_compl_bytes);
+
+ remove_miss_completions(priv, tx);
+ remove_timed_out_completions(priv, tx);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
+ tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
+ u64_stats_update_end(&tx->statss);
+ return num_descs_cleaned;
+}
+
+bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
+{
+ struct gve_tx_compl_desc *compl_desc;
+ struct gve_tx_ring *tx = block->tx;
+ struct gve_priv *priv = block->priv;
+
+ if (do_clean) {
+ int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx,
+ &block->napi);
+
+ /* Sync with queue being stopped in `gve_maybe_stop_tx_dqo()` */
+ mb();
+
+ if (netif_tx_queue_stopped(tx->netdev_txq) &&
+ num_descs_cleaned > 0) {
+ tx->wake_queue++;
+ netif_tx_wake_queue(tx->netdev_txq);
+ }
+ }
+
+ /* Return true if we still have work. */
+ compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
+ return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
new file mode 100644
index 000000000000..93f3dcbeeea9
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+
+void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
+
+ block->tx = NULL;
+}
+
+void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
+{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ struct gve_tx_ring *tx = &priv->tx[queue_idx];
+
+ block->tx = tx;
+ tx->ntfy_id = ntfy_idx;
+}
+
+void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
+
+ block->rx = NULL;
+}
+
+void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
+{
+ u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ struct gve_rx_ring *rx = &priv->rx[queue_idx];
+
+ block->rx = rx;
+ rx->ntfy_id = ntfy_idx;
+}
+
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info, u16 len,
+ u16 pad)
+{
+ struct sk_buff *skb = napi_alloc_skb(napi, len);
+ void *va = page_info->page_address + pad +
+ page_info->page_offset;
+
+ if (unlikely(!skb))
+ return NULL;
+
+ __skb_put(skb, len);
+
+ skb_copy_to_linear_data(skb, va, len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ return skb;
+}
+
+void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
+{
+ page_info->pagecnt_bias--;
+ if (page_info->pagecnt_bias == 0) {
+ int pagecount = page_count(page_info->page);
+
+ /* If we have run out of bias - set it back up to INT_MAX
+ * minus the existing refs.
+ */
+ page_info->pagecnt_bias = INT_MAX - pagecount;
+
+ /* Set pagecount back up to max. */
+ page_ref_add(page_info->page, INT_MAX - pagecount);
+ }
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
new file mode 100644
index 000000000000..79595940b351
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#ifndef _GVE_UTILS_H
+#define _GVE_UTILS_H
+
+#include <linux/etherdevice.h>
+
+#include "gve.h"
+
+void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx);
+void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx);
+
+void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
+void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
+
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info, u16 len,
+ u16 pad);
+
+/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
+void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
+
+#endif /* _GVE_UTILS_H */
+
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 44f9279cdde1..bb062b02fb85 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -102,6 +102,7 @@ config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
default m
depends on PCI_MSI
+ imply PTP_1588_CLOCK
help
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
@@ -130,6 +131,7 @@ config HNS3_ENET
default m
depends on 64BIT && PCI
depends on INET
+ select DIMLIB
help
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index c615fbf9094e..75e4ec569da8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -462,8 +462,6 @@ static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
default:
break;
}
-
- return;
}
static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f4cf569a2599..f41379de2186 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -111,7 +111,7 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
}
/**
- *hns_mac_is_adjust_link - check is need change mac speed and duplex register
+ *hns_mac_need_adjust_link - check is need change mac speed and duplex register
*@mac_cb: mac device
*@speed: phy device speed
*@duplex:phy device duplex
@@ -374,7 +374,7 @@ static void hns_mac_param_get(struct mac_params *param,
}
/**
- * hns_mac_queue_config_bc_en - set broadcast rx&tx enable
+ * hns_mac_port_config_bc_en - set broadcast rx&tx enable
* @mac_cb: mac device
* @port_num: queue number
* @vlan_id: vlan id`
@@ -597,7 +597,7 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
}
/**
- * hns_mac_set_autoneg - set rx & tx pause parameter
+ * hns_mac_set_pauseparam - set rx & tx pause parameter
* @mac_cb: mac control block
* @rx_en: rx enable or not
* @tx_en: tx enable or not
@@ -914,8 +914,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
}
} else if (is_acpi_node(mac_cb->fw_port)) {
ret = hns_mac_register_phy(mac_cb);
- /*
- * Mac can work well if there is phy or not.If the port don't
+ /* Mac can work well if there is phy or not.If the port don't
* connect with phy, the return value will be ignored. Only
* when there is phy but can't find mdio bus, the return value
* will be handled.
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index c2a60612f503..fcaf5132b865 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -227,7 +227,7 @@ hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
}
/**
- * hns_ppe_qid_cfg - config ppe qid
+ * hns_dsaf_ppe_qid_cfg - config ppe qid
* @dsaf_dev: dsa fabric id
* @qid_cfg: value array
*/
@@ -613,7 +613,7 @@ static void hns_dsaf_tbl_tcam_data_cfg(
}
/**
- * dsaf_tbl_tcam_mcast_cfg - tbl
+ * hns_dsaf_tbl_tcam_mcast_cfg - tbl
* @dsaf_dev: dsa fabric id
* @mcast: addr
*/
@@ -1213,7 +1213,7 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
}
/**
- * hns_dsaf_tbl_tcam_init - INT
+ * hns_dsaf_comm_init - INT
* @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
@@ -2111,7 +2111,7 @@ static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
}
/**
- * dsaf_pfc_unit_cnt - set pfc unit count
+ * hns_dsaf_pfc_unit_cnt - set pfc unit count
* @dsaf_dev: dsa fabric id
* @mac_id: id in use
* @rate: value array
@@ -2142,7 +2142,7 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
}
/**
- * dsaf_port_work_rate_cfg - fifo
+ * hns_dsaf_port_work_rate_cfg - fifo
* @dsaf_dev: dsa fabric id
* @mac_id: mac contrl block
* @rate_mode: value array
@@ -2738,7 +2738,7 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port,
}
/**
- *hns_dsaf_get_sset_count - get dsaf regs count
+ *hns_dsaf_get_regs_count - get dsaf regs count
*return dsaf regs count
*/
int hns_dsaf_get_regs_count(void)
@@ -2949,7 +2949,7 @@ int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
}
/**
- * dsaf_probe - probo dsaf dev
+ * hns_dsaf_probe - probo dsaf dev
* @pdev: dasf platform device
* return 0 - success , negative --fail
*/
@@ -3004,7 +3004,7 @@ free_dev:
}
/**
- * dsaf_remove - remove dsaf dev
+ * hns_dsaf_remove - remove dsaf dev
* @pdev: dasf platform device
*/
static int hns_dsaf_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 325e81d30cfd..23d9cbf262c3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -56,31 +56,31 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
}
static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
- u32 link, u32 port, u32 act)
+ u32 link, u32 port, u32 act)
{
- union acpi_object *obj;
- union acpi_object obj_args[3], argv4;
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
- obj_args[0].integer.type = ACPI_TYPE_INTEGER;
- obj_args[0].integer.value = link;
- obj_args[1].integer.type = ACPI_TYPE_INTEGER;
- obj_args[1].integer.value = port;
- obj_args[2].integer.type = ACPI_TYPE_INTEGER;
- obj_args[2].integer.value = act;
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = link;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = act;
- argv4.type = ACPI_TYPE_PACKAGE;
- argv4.package.count = 3;
- argv4.package.elements = obj_args;
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
- &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
- if (!obj) {
- dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
- link, port, act);
- return;
- }
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
+ link, port, act);
+ return;
+ }
- ACPI_FREE(obj);
+ ACPI_FREE(obj);
}
static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
@@ -151,15 +151,15 @@ static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
}
static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status,
- u16 speed, int data)
+ u16 speed, int data)
{
- if (!mac_cb) {
- pr_err("cpld_led_set mac_cb is null!\n");
- return;
- }
+ if (!mac_cb) {
+ pr_err("cpld_led_set mac_cb is null!\n");
+ return;
+ }
- hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
- link_status, mac_cb->mac_id, data);
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ link_status, mac_cb->mac_id, data);
}
static void cpld_led_reset(struct hns_mac_cb *mac_cb)
@@ -174,16 +174,16 @@ static void cpld_led_reset(struct hns_mac_cb *mac_cb)
static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
{
- if (!mac_cb) {
- pr_err("cpld_led_reset mac_cb is null!\n");
- return;
- }
+ if (!mac_cb) {
+ pr_err("cpld_led_reset mac_cb is null!\n");
+ return;
+ }
- if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
- return;
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
+ return;
- hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
- 0, mac_cb->mac_id, 0);
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ 0, mac_cb->mac_id, 0);
}
static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
@@ -351,7 +351,7 @@ hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
}
/**
- * hns_dsaf_srst_chns - reset dsaf channels
+ * hns_dsaf_srst_chns_acpi - reset dsaf channels
* @dsaf_dev: dsaf device struct pointer
* @msk: xbar channels mask value:
* @dereset: false - request reset , true - drop reset
@@ -501,7 +501,7 @@ static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset)
}
/**
- * hns_mac_get_sds_mode - get phy ifterface form serdes mode
+ * hns_mac_get_phy_if - get phy ifterface form serdes mode
* @mac_cb: mac control block
* retuen phy interface
*/
@@ -521,7 +521,7 @@ static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
reg = HNS_MAC_HILINK4_REG;
else
reg = HNS_MAC_HILINK3_REG;
- } else{
+ } else {
if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3)
reg = HNS_MAC_HILINK4V2_REG;
else
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index ff03cafccb66..a7eb87da4e70 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -296,7 +296,7 @@ int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
}
/**
- * ppe_init_hw - init ppe
+ * hns_ppe_init_hw - init ppe
* @ppe_cb: ppe device
*/
static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
@@ -343,7 +343,7 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
}
/**
- * ppe_uninit_hw - uninit ppe
+ * hns_ppe_uninit_hw - uninit ppe
* @ppe_cb: ppe device
*/
static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
@@ -382,7 +382,7 @@ void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
}
/**
- * hns_ppe_reset - reinit ppe/rcb hw
+ * hns_ppe_reset_common - reinit ppe/rcb hw
* @dsaf_dev: dasf device
* @ppe_common_index: the index
* return void
@@ -455,7 +455,7 @@ int hns_ppe_get_regs_count(void)
}
/**
- * ppe_get_strings - get ppe srting
+ * hns_ppe_get_strings - get ppe srting
* @ppe_cb: ppe device
* @stringset: string set type
* @data: output string
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 5d5dc6942232..e2ff3ca198d1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -913,7 +913,7 @@ int hns_rcb_get_common_regs_count(void)
}
/**
- *rcb_get_sset_count - rcb ring regs count
+ *hns_rcb_get_ring_regs_count - rcb ring regs count
*return regs count
*/
int hns_rcb_get_ring_regs_count(void)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index be52acd448f9..401fef5f1d07 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -104,7 +104,7 @@ static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
}
/**
- * hns_xgmac_tx_lf_rf_insert - insert lf rf control about xgmac
+ * hns_xgmac_lf_rf_insert - insert lf rf control about xgmac
* @mac_drv: mac driver
* @mode: inserf rf or lf
*/
@@ -115,7 +115,7 @@ static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode)
}
/**
- * hns_xgmac__lf_rf_control_init - initial the lf rf control register
+ * hns_xgmac_lf_rf_control_init - initial the lf rf control register
* @mac_drv: mac driver
*/
static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 5e349c0bdecc..ad534f9e41ab 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -770,7 +770,7 @@ static u32 smooth_alg(u32 new_param, u32 old_param)
}
/**
- * hns_nic_adp_coalesce - self adapte coalesce according to rx rate
+ * hns_nic_adpt_coalesce - self adapte coalesce according to rx rate
* @ring_data: pointer to hns_nic_ring_data
**/
static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index da48c05435ea..7e62dcff2426 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -192,7 +192,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
}
/**
- *hns_nic_set_link_settings - implement ethtool set link ksettings
+ *hns_nic_set_link_ksettings - implement ethtool set link ksettings
*@net_dev: net_device
*@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
@@ -827,7 +827,7 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
}
/**
- * get_ethtool_stats - get detail statistics.
+ * hns_get_ethtool_stats - get detail statistics.
* @netdev: net device
* @stats: statistics info.
* @data: statistics data.
@@ -885,7 +885,7 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
}
/**
- * get_strings: Return a set of strings that describe the requested objects
+ * hns_get_strings: Return a set of strings that describe the requested objects
* @netdev: net device
* @stringset: string set ID.
* @data: objects data.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index a2c17af57fde..0a6cda309b24 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -20,7 +20,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */
- HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
+ HCLGE_MBX_GET_BASIC_INFO, /* (VF -> PF) get basic info */
HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
@@ -69,6 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
+ HCLGE_MBX_ENABLE_VLAN_FILTER,
};
enum hclge_mbx_tbl_cfg_subcode {
@@ -85,6 +86,13 @@ struct hclge_ring_chain_param {
u8 int_gl_index;
};
+struct hclge_basic_info {
+ u8 hw_tc_map;
+ u8 rsv;
+ u16 mbx_api_version;
+ u32 pf_caps;
+};
+
struct hclgevf_mbx_resp_status {
struct mutex mbx_mutex; /* protects against contending sync cmd resp */
u32 origin_mbx_msg;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 1d2189047781..e0b7c3c44e7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -91,6 +91,10 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_STASH_B,
HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
HNAE3_DEV_SUPPORT_PAUSE_B,
+ HNAE3_DEV_SUPPORT_RAS_IMP_B,
+ HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
+ HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -126,6 +130,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_dev_phy_imp_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (hdev)->ae_dev->caps)
+#define hnae3_dev_ras_imp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, (hdev)->ae_dev->caps)
+
#define hnae3_dev_tqp_txrx_indep_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (hdev)->ae_dev->caps)
@@ -141,18 +148,17 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps)
+#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
+
+enum HNAE3_PF_CAP_BITS {
+ HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
+};
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
-enum hns_desc_type {
- DESC_TYPE_UNKNOWN,
- DESC_TYPE_SKB,
- DESC_TYPE_FRAGLIST_SKB,
- DESC_TYPE_PAGE,
-};
-
struct hnae3_handle;
struct hnae3_queue {
@@ -234,7 +240,6 @@ enum hnae3_reset_type {
HNAE3_FUNC_RESET,
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
- HNAE3_UNKNOWN_RESET,
HNAE3_NONE_RESET,
HNAE3_MAX_RESET,
};
@@ -246,6 +251,52 @@ enum hnae3_port_base_vlan_state {
HNAE3_PORT_BASE_VLAN_NOCHANGE,
};
+enum hnae3_dbg_cmd {
+ HNAE3_DBG_CMD_TM_NODES,
+ HNAE3_DBG_CMD_TM_PRI,
+ HNAE3_DBG_CMD_TM_QSET,
+ HNAE3_DBG_CMD_TM_MAP,
+ HNAE3_DBG_CMD_TM_PG,
+ HNAE3_DBG_CMD_TM_PORT,
+ HNAE3_DBG_CMD_TC_SCH_INFO,
+ HNAE3_DBG_CMD_QOS_PAUSE_CFG,
+ HNAE3_DBG_CMD_QOS_PRI_MAP,
+ HNAE3_DBG_CMD_QOS_BUF_CFG,
+ HNAE3_DBG_CMD_DEV_INFO,
+ HNAE3_DBG_CMD_TX_BD,
+ HNAE3_DBG_CMD_RX_BD,
+ HNAE3_DBG_CMD_MAC_UC,
+ HNAE3_DBG_CMD_MAC_MC,
+ HNAE3_DBG_CMD_MNG_TBL,
+ HNAE3_DBG_CMD_LOOPBACK,
+ HNAE3_DBG_CMD_PTP_INFO,
+ HNAE3_DBG_CMD_INTERRUPT_INFO,
+ HNAE3_DBG_CMD_RESET_INFO,
+ HNAE3_DBG_CMD_IMP_INFO,
+ HNAE3_DBG_CMD_NCL_CONFIG,
+ HNAE3_DBG_CMD_REG_BIOS_COMMON,
+ HNAE3_DBG_CMD_REG_SSU,
+ HNAE3_DBG_CMD_REG_IGU_EGU,
+ HNAE3_DBG_CMD_REG_RPU,
+ HNAE3_DBG_CMD_REG_NCSI,
+ HNAE3_DBG_CMD_REG_RTC,
+ HNAE3_DBG_CMD_REG_PPP,
+ HNAE3_DBG_CMD_REG_RCB,
+ HNAE3_DBG_CMD_REG_TQP,
+ HNAE3_DBG_CMD_REG_MAC,
+ HNAE3_DBG_CMD_REG_DCB,
+ HNAE3_DBG_CMD_VLAN_CONFIG,
+ HNAE3_DBG_CMD_QUEUE_MAP,
+ HNAE3_DBG_CMD_RX_QUEUE_INFO,
+ HNAE3_DBG_CMD_TX_QUEUE_INFO,
+ HNAE3_DBG_CMD_FD_TCAM,
+ HNAE3_DBG_CMD_FD_COUNTER,
+ HNAE3_DBG_CMD_MAC_TNL_STATUS,
+ HNAE3_DBG_CMD_SERV_INFO,
+ HNAE3_DBG_CMD_UMV_INFO,
+ HNAE3_DBG_CMD_UNKNOWN,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -470,6 +521,12 @@ struct hnae3_ae_dev {
* Check if any cls flower rule exist
* dbg_read_cmd
* Execute debugfs read command.
+ * set_tx_hwts_info
+ * Save information for 1588 tx packet
+ * get_rx_hwts
+ * Get 1588 rx hwstamp
+ * get_ts_info
+ * Get phc info
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -585,7 +642,7 @@ struct hnae3_ae_ops {
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
- void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
+ int (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
@@ -622,8 +679,7 @@ struct hnae3_ae_ops {
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
- int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
- int (*dbg_read_cmd)(struct hnae3_handle *handle, const char *cmd_buf,
+ int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
@@ -656,6 +712,12 @@ struct hnae3_ae_ops {
struct ethtool_link_ksettings *cmd);
int (*set_phy_link_ksettings)(struct hnae3_handle *handle,
const struct ethtool_link_ksettings *cmd);
+ bool (*set_tx_hwts_info)(struct hnae3_handle *handle,
+ struct sk_buff *skb);
+ void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec);
+ int (*get_ts_info)(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info);
};
struct hnae3_dcb_ops {
@@ -700,6 +762,7 @@ struct hnae3_knic_private_info {
u16 rx_buf_len;
u16 num_tx_desc;
u16 num_rx_desc;
+ u32 tx_spare_buf_size;
struct hnae3_tc_info tc_info;
@@ -738,7 +801,6 @@ struct hnae3_roce_private_info {
#define HNAE3_BPE BIT(2) /* broadcast promisc enable */
#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */
#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */
-#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */
#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
@@ -786,10 +848,6 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field(origin, 0x1 << (shift), shift)
-#define HNAE3_DBG_TM_NODES "tm_nodes"
-#define HNAE3_DBG_TM_PRI "tm_priority"
-#define HNAE3_DBG_TM_QSET "tm_qset"
-
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 9d702bd0c7c1..532523069d74 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -5,46 +5,539 @@
#include <linux/device.h>
#include "hnae3.h"
+#include "hns3_debugfs.h"
#include "hns3_enet.h"
-#define HNS3_DBG_READ_LEN 65536
-#define HNS3_DBG_WRITE_LEN 1024
-
static struct dentry *hns3_dbgfs_root;
-static int hns3_dbg_queue_info(struct hnae3_handle *h,
- const char *cmd_buf)
+static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
+ {
+ .name = "tm"
+ },
+ {
+ .name = "tx_bd_info"
+ },
+ {
+ .name = "rx_bd_info"
+ },
+ {
+ .name = "mac_list"
+ },
+ {
+ .name = "reg"
+ },
+ {
+ .name = "queue"
+ },
+ {
+ .name = "fd"
+ },
+ /* keep common at the bottom and add new directory above */
+ {
+ .name = "common"
+ },
+};
+
+static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, unsigned int cmd);
+static int hns3_dbg_common_file_init(struct hnae3_handle *handle,
+ unsigned int cmd);
+
+static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ {
+ .name = "tm_nodes",
+ .cmd = HNAE3_DBG_CMD_TM_NODES,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_priority",
+ .cmd = HNAE3_DBG_CMD_TM_PRI,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_qset",
+ .cmd = HNAE3_DBG_CMD_TM_QSET,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_map",
+ .cmd = HNAE3_DBG_CMD_TM_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_pg",
+ .cmd = HNAE3_DBG_CMD_TM_PG,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_port",
+ .cmd = HNAE3_DBG_CMD_TM_PORT,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tc_sch_info",
+ .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "qos_pause_cfg",
+ .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "qos_pri_map",
+ .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "qos_buf_cfg",
+ .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "dev_info",
+ .cmd = HNAE3_DBG_CMD_DEV_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tx_bd_queue",
+ .cmd = HNAE3_DBG_CMD_TX_BD,
+ .dentry = HNS3_DBG_DENTRY_TX_BD,
+ .buf_len = HNS3_DBG_READ_LEN_4MB,
+ .init = hns3_dbg_bd_file_init,
+ },
+ {
+ .name = "rx_bd_queue",
+ .cmd = HNAE3_DBG_CMD_RX_BD,
+ .dentry = HNS3_DBG_DENTRY_RX_BD,
+ .buf_len = HNS3_DBG_READ_LEN_4MB,
+ .init = hns3_dbg_bd_file_init,
+ },
+ {
+ .name = "uc",
+ .cmd = HNAE3_DBG_CMD_MAC_UC,
+ .dentry = HNS3_DBG_DENTRY_MAC,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "mc",
+ .cmd = HNAE3_DBG_CMD_MAC_MC,
+ .dentry = HNS3_DBG_DENTRY_MAC,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "mng_tbl",
+ .cmd = HNAE3_DBG_CMD_MNG_TBL,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "loopback",
+ .cmd = HNAE3_DBG_CMD_LOOPBACK,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "interrupt_info",
+ .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "reset_info",
+ .cmd = HNAE3_DBG_CMD_RESET_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "imp_info",
+ .cmd = HNAE3_DBG_CMD_IMP_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ncl_config",
+ .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "mac_tnl_status",
+ .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "bios_common",
+ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ssu",
+ .cmd = HNAE3_DBG_CMD_REG_SSU,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "igu_egu",
+ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "rpu",
+ .cmd = HNAE3_DBG_CMD_REG_RPU,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ncsi",
+ .cmd = HNAE3_DBG_CMD_REG_NCSI,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "rtc",
+ .cmd = HNAE3_DBG_CMD_REG_RTC,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ppp",
+ .cmd = HNAE3_DBG_CMD_REG_PPP,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "rcb",
+ .cmd = HNAE3_DBG_CMD_REG_RCB,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tqp",
+ .cmd = HNAE3_DBG_CMD_REG_TQP,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "mac",
+ .cmd = HNAE3_DBG_CMD_REG_MAC,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "dcb",
+ .cmd = HNAE3_DBG_CMD_REG_DCB,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "queue_map",
+ .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
+ .dentry = HNS3_DBG_DENTRY_QUEUE,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "rx_queue_info",
+ .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
+ .dentry = HNS3_DBG_DENTRY_QUEUE,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tx_queue_info",
+ .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
+ .dentry = HNS3_DBG_DENTRY_QUEUE,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "fd_tcam",
+ .cmd = HNAE3_DBG_CMD_FD_TCAM,
+ .dentry = HNS3_DBG_DENTRY_FD,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "service_task_info",
+ .cmd = HNAE3_DBG_CMD_SERV_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "vlan_config",
+ .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ptp_info",
+ .cmd = HNAE3_DBG_CMD_PTP_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "fd_counter",
+ .cmd = HNAE3_DBG_CMD_FD_COUNTER,
+ .dentry = HNS3_DBG_DENTRY_FD,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "umv_info",
+ .cmd = HNAE3_DBG_CMD_UMV_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+};
+
+static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
+ {
+ .name = "support FD",
+ .cap_bit = HNAE3_DEV_SUPPORT_FD_B,
+ }, {
+ .name = "support GRO",
+ .cap_bit = HNAE3_DEV_SUPPORT_GRO_B,
+ }, {
+ .name = "support FEC",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_B,
+ }, {
+ .name = "support UDP GSO",
+ .cap_bit = HNAE3_DEV_SUPPORT_UDP_GSO_B,
+ }, {
+ .name = "support PTP",
+ .cap_bit = HNAE3_DEV_SUPPORT_PTP_B,
+ }, {
+ .name = "support INT QL",
+ .cap_bit = HNAE3_DEV_SUPPORT_INT_QL_B,
+ }, {
+ .name = "support HW TX csum",
+ .cap_bit = HNAE3_DEV_SUPPORT_HW_TX_CSUM_B,
+ }, {
+ .name = "support UDP tunnel csum",
+ .cap_bit = HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
+ }, {
+ .name = "support TX push",
+ .cap_bit = HNAE3_DEV_SUPPORT_TX_PUSH_B,
+ }, {
+ .name = "support imp-controlled PHY",
+ .cap_bit = HNAE3_DEV_SUPPORT_PHY_IMP_B,
+ }, {
+ .name = "support imp-controlled RAS",
+ .cap_bit = HNAE3_DEV_SUPPORT_RAS_IMP_B,
+ }, {
+ .name = "support rxd advanced layout",
+ .cap_bit = HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
+ }, {
+ .name = "support port vlan bypass",
+ .cap_bit = HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ }, {
+ .name = "support modify vlan filter state",
+ .cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }
+};
+
+static void hns3_dbg_fill_content(char *content, u16 len,
+ const struct hns3_dbg_item *items,
+ const char **result, u16 size)
+{
+ char *pos = content;
+ u16 i;
+
+ memset(content, ' ', len);
+ for (i = 0; i < size; i++) {
+ if (result)
+ strncpy(pos, result[i], strlen(result[i]));
+ else
+ strncpy(pos, items[i].name, strlen(items[i].name));
+
+ pos += strlen(items[i].name) + items[i].interval;
+ }
+
+ *pos++ = '\n';
+ *pos++ = '\0';
+}
+
+static const struct hns3_dbg_item tx_spare_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "COPYBREAK", 2 },
+ { "LEN", 7 },
+ { "NTU", 4 },
+ { "NTC", 4 },
+ { "LTC", 4 },
+ { "DMA", 17 },
+};
+
+static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
+ int len, u32 ring_num, int *pos)
+{
+ char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ char *result[ARRAY_SIZE(tx_spare_info_items)];
+ char content[HNS3_DBG_INFO_LEN];
+ u32 i, j;
+
+ if (!tx_spare) {
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "tx spare buffer is not enabled\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n");
+ hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items,
+ NULL, ARRAY_SIZE(tx_spare_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < ring_num; i++) {
+ j = 0;
+ sprintf(result[j++], "%8u", i);
+ sprintf(result[j++], "%9u", ring->tx_copybreak);
+ sprintf(result[j++], "%3u", tx_spare->len);
+ sprintf(result[j++], "%3u", tx_spare->next_to_use);
+ sprintf(result[j++], "%3u", tx_spare->next_to_clean);
+ sprintf(result[j++], "%3u", tx_spare->last_to_clean);
+ sprintf(result[j++], "%pad", &tx_spare->dma);
+ hns3_dbg_fill_content(content, sizeof(content),
+ tx_spare_info_items,
+ (const char **)result,
+ ARRAY_SIZE(tx_spare_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+}
+
+static const struct hns3_dbg_item rx_queue_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "BD_NUM", 2 },
+ { "BD_LEN", 2 },
+ { "TAIL", 2 },
+ { "HEAD", 2 },
+ { "FBDNUM", 2 },
+ { "PKTNUM", 2 },
+ { "COPYBREAK", 2 },
+ { "RING_EN", 2 },
+ { "RX_RING_EN", 2 },
+ { "BASE_ADDR", 10 },
+};
+
+static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
+ struct hnae3_ae_dev *ae_dev, char **result,
+ u32 index)
{
+ u32 base_add_l, base_add_h;
+ u32 j = 0;
+
+ sprintf(result[j++], "%8u", index);
+
+ sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_NUM_REG));
+
+ sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_LEN_REG));
+
+ sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_TAIL_REG));
+
+ sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG));
+
+ sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_FBDNUM_REG));
+
+ sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
+ sprintf(result[j++], "%9u", ring->rx_copybreak);
+
+ sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG) ? "on" : "off");
+
+ if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
+ sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG) ? "on" : "off");
+ else
+ sprintf(result[j++], "%10s", "NA");
+
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_L_REG);
+ sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+}
+
+static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
+ char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ char *result[ARRAY_SIZE(rx_queue_info_items)];
struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- u32 base_add_l, base_add_h;
- u32 queue_num, queue_max;
- u32 value, i;
- int cnt;
+ int pos = 0;
+ u32 i;
if (!priv->ring) {
dev_err(&h->pdev->dev, "priv->ring is NULL\n");
return -EFAULT;
}
- queue_max = h->kinfo.num_tqps;
- cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
- if (cnt)
- queue_num = 0;
- else
- queue_max = queue_num + 1;
+ for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++)
+ result[i] = &data_str[i][0];
- dev_info(&h->pdev->dev, "queue info\n");
-
- if (queue_num >= h->kinfo.num_tqps) {
- dev_err(&h->pdev->dev,
- "Queue number(%u) is out of range(0-%u)\n", queue_num,
- h->kinfo.num_tqps - 1);
- return -EINVAL;
- }
-
- for (i = queue_num; i < queue_max; i++) {
+ hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items,
+ NULL, ARRAY_SIZE(rx_queue_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
* that the following code is executed within 100ms.
@@ -54,491 +547,524 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_L_REG);
- dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i,
- base_add_h, base_add_l);
+ hns3_dump_rx_queue_info(ring, ae_dev, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ rx_queue_info_items,
+ (const char **)result,
+ ARRAY_SIZE(rx_queue_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_NUM_REG);
- dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value);
+static const struct hns3_dbg_item tx_queue_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "BD_NUM", 2 },
+ { "TC", 2 },
+ { "TAIL", 2 },
+ { "HEAD", 2 },
+ { "FBDNUM", 2 },
+ { "OFFSET", 2 },
+ { "PKTNUM", 2 },
+ { "RING_EN", 2 },
+ { "TX_RING_EN", 2 },
+ { "BASE_ADDR", 10 },
+};
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_LEN_REG);
- dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value);
+static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
+ struct hnae3_ae_dev *ae_dev, char **result,
+ u32 index)
+{
+ u32 base_add_l, base_add_h;
+ u32 j = 0;
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_TAIL_REG);
- dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value);
+ sprintf(result[j++], "%8u", index);
+ sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG));
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_HEAD_REG);
- dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value);
+ sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TC_REG));
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_FBDNUM_REG);
- dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value);
+ sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG));
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value);
+ sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG));
- ring = &priv->ring[i];
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_L_REG);
- dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i,
- base_add_h, base_add_l);
-
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value);
-
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TC_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value);
-
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value);
-
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value);
-
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value);
-
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value);
-
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n", i, value);
-
- value = readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG);
- dev_info(&h->pdev->dev, "TX/RX(%u) RING EN: %s\n", i,
- value ? "enable" : "disable");
-
- if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) {
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING EN: %s\n", i,
- value ? "enable" : "disable");
-
- value = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG);
- dev_info(&h->pdev->dev, "RX(%u) RING EN: %s\n", i,
- value ? "enable" : "disable");
- }
+ sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG));
+
+ sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG));
+
+ sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
+
+ sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG) ? "on" : "off");
+
+ if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
+ sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG) ? "on" : "off");
+ else
+ sprintf(result[j++], "%10s", "NA");
+
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_L_REG);
+ sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+}
+
+static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
+ char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ char *result[ARRAY_SIZE(tx_queue_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items,
+ NULL, ARRAY_SIZE(tx_queue_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ /* Each cycle needs to determine whether the instance is reset,
+ * to prevent reference to invalid memory. And need to ensure
+ * that the following code is executed within 100ms.
+ */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
- dev_info(&h->pdev->dev, "\n");
+ ring = &priv->ring[i];
+ hns3_dump_tx_queue_info(ring, ae_dev, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ tx_queue_info_items,
+ (const char **)result,
+ ARRAY_SIZE(tx_queue_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
}
+ hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
+
return 0;
}
-static int hns3_dbg_queue_map(struct hnae3_handle *h)
+static const struct hns3_dbg_item queue_map_items[] = {
+ { "local_queue_id", 2 },
+ { "global_queue_id", 2 },
+ { "vector_id", 2 },
+};
+
+static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len)
{
+ char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(queue_map_items)];
struct hns3_nic_priv *priv = h->priv;
- int i;
+ char content[HNS3_DBG_INFO_LEN];
+ int pos = 0;
+ int j;
+ u32 i;
if (!h->ae_algo->ops->get_global_queue_id)
return -EOPNOTSUPP;
- dev_info(&h->pdev->dev, "map info for queue id and vector id\n");
- dev_info(&h->pdev->dev,
- "local queue id | global queue id | vector id\n");
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- u16 global_qid;
+ for (i = 0; i < ARRAY_SIZE(queue_map_items); i++)
+ result[i] = &data_str[i][0];
- global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
+ hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
+ NULL, ARRAY_SIZE(queue_map_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
-
- dev_info(&h->pdev->dev,
- " %4d %4u %4d\n",
- i, global_qid, priv->ring[i].tqp_vector->vector_irq);
+ j = 0;
+ sprintf(result[j++], "%u", i);
+ sprintf(result[j++], "%u",
+ h->ae_algo->ops->get_global_queue_id(h, i));
+ sprintf(result[j++], "%u",
+ priv->ring[i].tqp_vector->vector_irq);
+ hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
+ (const char **)result,
+ ARRAY_SIZE(queue_map_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
}
return 0;
}
-static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
+static const struct hns3_dbg_item rx_bd_info_items[] = {
+ { "BD_IDX", 3 },
+ { "L234_INFO", 2 },
+ { "PKT_LEN", 3 },
+ { "SIZE", 4 },
+ { "RSS_HASH", 4 },
+ { "FD_ID", 2 },
+ { "VLAN_TAG", 2 },
+ { "O_DM_VLAN_ID_FB", 2 },
+ { "OT_VLAN_TAG", 2 },
+ { "BD_BASE_INFO", 2 },
+ { "PTYPE", 2 },
+ { "HW_CSUM", 2 },
+};
+
+static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
+ struct hns3_desc *desc, char **result, int idx)
{
- struct hns3_nic_priv *priv = h->priv;
- struct hns3_desc *rx_desc, *tx_desc;
- struct device *dev = &h->pdev->dev;
- struct hns3_enet_ring *ring;
- u32 tx_index, rx_index;
- u32 q_num, value;
- dma_addr_t addr;
- u16 mss_hw_csum;
- u32 l234info;
- int cnt;
-
- cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index);
- if (cnt == 2) {
- rx_index = tx_index;
- } else if (cnt != 1) {
- dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt);
- return -EINVAL;
+ unsigned int j = 0;
+
+ sprintf(result[j++], "%5d", idx);
+ sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
+ sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len));
+ sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size));
+ sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
+ sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id));
+ sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag));
+ sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
+ sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag));
+ sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
+ u32 ol_info = le32_to_cpu(desc->rx.ol_info);
+
+ sprintf(result[j++], "%5lu", hnae3_get_field(ol_info,
+ HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S));
+ sprintf(result[j++], "%7u", le16_to_cpu(desc->csum));
+ } else {
+ sprintf(result[j++], "NA");
+ sprintf(result[j++], "NA");
}
+}
+
+static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hns3_nic_priv *priv = d->handle->priv;
+ char *result[ARRAY_SIZE(rx_bd_info_items)];
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ struct hns3_desc *desc;
+ unsigned int i;
+ int pos = 0;
- if (q_num >= h->kinfo.num_tqps) {
- dev_err(dev, "Queue number(%u) is out of range(0-%u)\n", q_num,
- h->kinfo.num_tqps - 1);
+ if (d->qid >= d->handle->kinfo.num_tqps) {
+ dev_err(&d->handle->pdev->dev,
+ "queue%u is not in use\n", d->qid);
return -EINVAL;
}
- ring = &priv->ring[q_num];
- value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
- tx_index = (cnt == 1) ? value : tx_index;
+ for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++)
+ result[i] = &data_str[i][0];
- if (tx_index >= ring->desc_num) {
- dev_err(dev, "bd index(%u) is out of range(0-%u)\n", tx_index,
- ring->desc_num - 1);
- return -EINVAL;
- }
+ pos += scnprintf(buf + pos, len - pos,
+ "Queue %u rx bd info:\n", d->qid);
+ hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items,
+ NULL, ARRAY_SIZE(rx_bd_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
- tx_desc = &ring->desc[tx_index];
- addr = le64_to_cpu(tx_desc->addr);
- mss_hw_csum = le16_to_cpu(tx_desc->tx.mss_hw_csum);
- dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
- dev_info(dev, "(TX)addr: %pad\n", &addr);
- dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
- dev_info(dev, "(TX)send_size: %u\n",
- le16_to_cpu(tx_desc->tx.send_size));
-
- if (mss_hw_csum & BIT(HNS3_TXD_HW_CS_B)) {
- u32 offset = le32_to_cpu(tx_desc->tx.ol_type_vlan_len_msec);
- u32 start = le32_to_cpu(tx_desc->tx.type_cs_vlan_tso_len);
-
- dev_info(dev, "(TX)csum start: %u\n",
- hnae3_get_field(start,
- HNS3_TXD_CSUM_START_M,
- HNS3_TXD_CSUM_START_S));
- dev_info(dev, "(TX)csum offset: %u\n",
- hnae3_get_field(offset,
- HNS3_TXD_CSUM_OFFSET_M,
- HNS3_TXD_CSUM_OFFSET_S));
- } else {
- dev_info(dev, "(TX)vlan_tso: %u\n",
- tx_desc->tx.type_cs_vlan_tso);
- dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
- dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
- dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
- dev_info(dev, "(TX)vlan_msec: %u\n",
- tx_desc->tx.ol_type_vlan_msec);
- dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
- dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
- dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
- }
+ ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps];
+ for (i = 0; i < ring->desc_num; i++) {
+ desc = &ring->desc[i];
- dev_info(dev, "(TX)vlan_tag: %u\n",
- le16_to_cpu(tx_desc->tx.outer_vlan_tag));
- dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
- dev_info(dev, "(TX)paylen_ol4cs: %u\n",
- le32_to_cpu(tx_desc->tx.paylen_ol4cs));
- dev_info(dev, "(TX)vld_ra_ri: %u\n",
- le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
- dev_info(dev, "(TX)mss_hw_csum: %u\n", mss_hw_csum);
-
- ring = &priv->ring[q_num + h->kinfo.num_tqps];
- value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
- rx_index = (cnt == 1) ? value : tx_index;
- rx_desc = &ring->desc[rx_index];
-
- addr = le64_to_cpu(rx_desc->addr);
- l234info = le32_to_cpu(rx_desc->rx.l234_info);
- dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
- dev_info(dev, "(RX)addr: %pad\n", &addr);
- dev_info(dev, "(RX)l234_info: %u\n", l234info);
-
- if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
- u32 lo, hi;
-
- lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
- HNS3_RXD_L2_CSUM_L_S);
- hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
- HNS3_RXD_L2_CSUM_H_S);
- dev_info(dev, "(RX)csum: %u\n", lo | hi << 8);
+ hns3_dump_rx_bd_info(priv, desc, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ rx_bd_info_items, (const char **)result,
+ ARRAY_SIZE(rx_bd_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
}
- dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
- dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
- dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
- dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id));
- dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag));
- dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n",
- le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb));
- dev_info(dev, "(RX)ot_vlan_tag: %u\n",
- le16_to_cpu(rx_desc->rx.ot_vlan_tag));
- dev_info(dev, "(RX)bd_base_info: %u\n",
- le32_to_cpu(rx_desc->rx.bd_base_info));
-
return 0;
}
-static void hns3_dbg_help(struct hnae3_handle *h)
-{
-#define HNS3_DBG_BUF_LEN 256
-
- char printf_buf[HNS3_DBG_BUF_LEN];
-
- dev_info(&h->pdev->dev, "available commands\n");
- dev_info(&h->pdev->dev, "queue info <number>\n");
- dev_info(&h->pdev->dev, "queue map\n");
- dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
- dev_info(&h->pdev->dev, "dev capability\n");
- dev_info(&h->pdev->dev, "dev spec\n");
-
- if (!hns3_is_phys_func(h->pdev))
- return;
-
- dev_info(&h->pdev->dev, "dump fd tcam\n");
- dev_info(&h->pdev->dev, "dump tc\n");
- dev_info(&h->pdev->dev, "dump tm map <q_num>\n");
- dev_info(&h->pdev->dev, "dump tm\n");
- dev_info(&h->pdev->dev, "dump qos pause cfg\n");
- dev_info(&h->pdev->dev, "dump qos pri map\n");
- dev_info(&h->pdev->dev, "dump qos buf cfg\n");
- dev_info(&h->pdev->dev, "dump mng tbl\n");
- dev_info(&h->pdev->dev, "dump reset info\n");
- dev_info(&h->pdev->dev, "dump m7 info\n");
- dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
- dev_info(&h->pdev->dev, "dump mac tnl status\n");
- dev_info(&h->pdev->dev, "dump loopback\n");
- dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
- dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
- dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
- dev_info(&h->pdev->dev, "dump intr\n");
-
- memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
- strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
- HNS3_DBG_BUF_LEN - 1);
- strncat(printf_buf + strlen(printf_buf),
- " [igu egu <port_id>] [rpu <tc_queue_num>]",
- HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
- strncat(printf_buf + strlen(printf_buf),
- " [rtc] [ppp] [rcb] [tqp <queue_num>] [mac]]\n",
- HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
- dev_info(&h->pdev->dev, "%s", printf_buf);
-
- memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
- strncat(printf_buf, "dump reg dcb <port_id> <pri_id> <pg_id>",
- HNS3_DBG_BUF_LEN - 1);
- strncat(printf_buf + strlen(printf_buf), " <rq_id> <nq_id> <qset_id>\n",
- HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
- dev_info(&h->pdev->dev, "%s", printf_buf);
-}
+static const struct hns3_dbg_item tx_bd_info_items[] = {
+ { "BD_IDX", 5 },
+ { "ADDRESS", 2 },
+ { "VLAN_TAG", 2 },
+ { "SIZE", 2 },
+ { "T_CS_VLAN_TSO", 2 },
+ { "OT_VLAN_TAG", 3 },
+ { "TV", 2 },
+ { "OLT_VLAN_LEN", 2},
+ { "PAYLEN_OL4CS", 2},
+ { "BD_FE_SC_VLD", 2},
+ { "MSS_HW_CSUM", 0},
+};
-static void hns3_dbg_dev_caps(struct hnae3_handle *h)
+static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
+ struct hns3_desc *desc, char **result, int idx)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- unsigned long *caps;
-
- caps = ae_dev->caps;
-
- dev_info(&h->pdev->dev, "support FD: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_FD_B, caps) ? "yes" : "no");
- dev_info(&h->pdev->dev, "support GRO: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_GRO_B, caps) ? "yes" : "no");
- dev_info(&h->pdev->dev, "support FEC: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_FEC_B, caps) ? "yes" : "no");
- dev_info(&h->pdev->dev, "support UDP GSO: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, caps) ? "yes" : "no");
- dev_info(&h->pdev->dev, "support PTP: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no");
- dev_info(&h->pdev->dev, "support INT QL: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
- dev_info(&h->pdev->dev, "support HW TX csum: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, caps) ? "yes" : "no");
- dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ?
- "yes" : "no");
- dev_info(&h->pdev->dev, "support PAUSE: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps) ?
- "yes" : "no");
- dev_info(&h->pdev->dev, "support imp-controlled PHY: %s\n",
- test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, caps) ? "yes" : "no");
+ unsigned int j = 0;
+
+ sprintf(result[j++], "%6d", idx);
+ sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
+ sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag));
+ sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size));
+ sprintf(result[j++], "%#x",
+ le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
+ sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag));
+ sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv));
+ sprintf(result[j++], "%10u",
+ le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
+ sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
+ sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
+ sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum));
}
-static void hns3_dbg_dev_specs(struct hnae3_handle *h)
+static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
- struct hnae3_knic_private_info *kinfo = &h->kinfo;
- struct hns3_nic_priv *priv = h->priv;
-
- dev_info(priv->dev, "MAC entry num: %u\n", dev_specs->mac_entry_num);
- dev_info(priv->dev, "MNG entry num: %u\n", dev_specs->mng_entry_num);
- dev_info(priv->dev, "MAX non tso bd num: %u\n",
- dev_specs->max_non_tso_bd_num);
- dev_info(priv->dev, "RSS ind tbl size: %u\n",
- dev_specs->rss_ind_tbl_size);
- dev_info(priv->dev, "RSS key size: %u\n", dev_specs->rss_key_size);
- dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
- dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
- dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
-
- dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
- dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
- dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
- dev_info(priv->dev, "Total number of enabled TCs: %u\n",
- kinfo->tc_info.num_tc);
- dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
- dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
- dev_info(priv->dev, "MAX frame size: %u\n", dev_specs->max_frm_size);
- dev_info(priv->dev, "MAX TM RATE: %uMbps\n", dev_specs->max_tm_rate);
- dev_info(priv->dev, "MAX QSET number: %u\n", dev_specs->max_qset_num);
-}
+ char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hns3_nic_priv *priv = d->handle->priv;
+ char *result[ARRAY_SIZE(tx_bd_info_items)];
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ struct hns3_desc *desc;
+ unsigned int i;
+ int pos = 0;
-static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- int uncopy_bytes;
- char *buf;
- int len;
+ if (d->qid >= d->handle->kinfo.num_tqps) {
+ dev_err(&d->handle->pdev->dev,
+ "queue%u is not in use\n", d->qid);
+ return -EINVAL;
+ }
- if (*ppos != 0)
- return 0;
+ for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++)
+ result[i] = &data_str[i][0];
- if (count < HNS3_DBG_READ_LEN)
- return -ENOSPC;
+ pos += scnprintf(buf + pos, len - pos,
+ "Queue %u tx bd info:\n", d->qid);
+ hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items,
+ NULL, ARRAY_SIZE(tx_bd_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
- buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ ring = &priv->ring[d->qid];
+ for (i = 0; i < ring->desc_num; i++) {
+ desc = &ring->desc[i];
- len = scnprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
- "Please echo help to cmd to get help information");
- uncopy_bytes = copy_to_user(buffer, buf, len);
+ hns3_dump_tx_bd_info(priv, desc, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ tx_bd_info_items, (const char **)result,
+ ARRAY_SIZE(tx_bd_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
- kfree(buf);
+ return 0;
+}
- if (uncopy_bytes)
- return -EFAULT;
+static void
+hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ static const char * const str[] = {"no", "yes"};
+ unsigned long *caps = ae_dev->caps;
+ u32 i, state;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n");
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
+ state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
+ hns3_dbg_cap[i].name, str[state]);
+ }
- return (*ppos = len);
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
}
-static int hns3_dbg_check_cmd(struct hnae3_handle *handle, char *cmd_buf)
+static void
+hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
{
- int ret = 0;
-
- if (strncmp(cmd_buf, "help", 4) == 0)
- hns3_dbg_help(handle);
- else if (strncmp(cmd_buf, "queue info", 10) == 0)
- ret = hns3_dbg_queue_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "queue map", 9) == 0)
- ret = hns3_dbg_queue_map(handle);
- else if (strncmp(cmd_buf, "bd info", 7) == 0)
- ret = hns3_dbg_bd_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "dev capability", 14) == 0)
- hns3_dbg_dev_caps(handle);
- else if (strncmp(cmd_buf, "dev spec", 8) == 0)
- hns3_dbg_dev_specs(handle);
- else if (handle->ae_algo->ops->dbg_run_cmd)
- ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
- else
- ret = -EOPNOTSUPP;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
- return ret;
+ *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
+ dev_specs->mac_entry_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n",
+ dev_specs->mng_entry_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n",
+ dev_specs->rss_ind_tbl_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n",
+ dev_specs->rss_key_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n",
+ kinfo->rss_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n",
+ kinfo->req_rss_size);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "Task queue pairs numbers: %u\n",
+ kinfo->num_tqps);
+ *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n",
+ kinfo->rx_buf_len);
+ *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n",
+ kinfo->num_tx_desc);
+ *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n",
+ kinfo->num_rx_desc);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n",
+ dev_specs->int_ql_max);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n",
+ dev_specs->max_int_gl);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n",
+ dev_specs->max_tm_rate);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
+ dev_specs->max_qset_num);
}
-static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
- size_t count, loff_t *ppos)
+static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
{
- struct hnae3_handle *handle = filp->private_data;
- struct hns3_nic_priv *priv = handle->priv;
- char *cmd_buf, *cmd_buf_tmp;
- int uncopied_bytes;
- int ret;
+ int pos = 0;
- if (*ppos != 0)
- return 0;
+ hns3_dbg_dev_caps(h, buf, len, &pos);
- /* Judge if the instance is being reset. */
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
- test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
- return 0;
+ hns3_dbg_dev_specs(h, buf, len, &pos);
- if (count > HNS3_DBG_WRITE_LEN)
- return -ENOSPC;
+ return 0;
+}
- cmd_buf = kzalloc(count + 1, GFP_KERNEL);
- if (!cmd_buf)
- return count;
+static int hns3_dbg_get_cmd_index(struct hnae3_handle *handle,
+ const unsigned char *name, u32 *index)
+{
+ u32 i;
- uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
- if (uncopied_bytes) {
- kfree(cmd_buf);
- return -EFAULT;
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
+ if (!strncmp(name, hns3_dbg_cmd[i].name,
+ strlen(hns3_dbg_cmd[i].name))) {
+ *index = i;
+ return 0;
+ }
}
- cmd_buf[count] = '\0';
+ dev_err(&handle->pdev->dev, "unknown command(%s)\n", name);
+ return -EINVAL;
+}
- cmd_buf_tmp = strchr(cmd_buf, '\n');
- if (cmd_buf_tmp) {
- *cmd_buf_tmp = '\0';
- count = cmd_buf_tmp - cmd_buf + 1;
- }
+static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
+ {
+ .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
+ .dbg_dump = hns3_dbg_queue_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_DEV_INFO,
+ .dbg_dump = hns3_dbg_dev_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TX_BD,
+ .dbg_dump_bd = hns3_dbg_tx_bd_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_RX_BD,
+ .dbg_dump_bd = hns3_dbg_rx_bd_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
+ .dbg_dump = hns3_dbg_rx_queue_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
+ .dbg_dump = hns3_dbg_tx_queue_info,
+ },
+};
- ret = hns3_dbg_check_cmd(handle, cmd_buf);
- if (ret)
- hns3_dbg_help(handle);
+static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
+ enum hnae3_dbg_cmd cmd, char *buf, int len)
+{
+ const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops;
+ const struct hns3_dbg_func *cmd_func;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) {
+ if (cmd == hns3_dbg_cmd_func[i].cmd) {
+ cmd_func = &hns3_dbg_cmd_func[i];
+ if (cmd_func->dbg_dump)
+ return cmd_func->dbg_dump(dbg_data->handle, buf,
+ len);
+ else
+ return cmd_func->dbg_dump_bd(dbg_data, buf,
+ len);
+ }
+ }
- kfree(cmd_buf);
- cmd_buf = NULL;
+ if (!ops->dbg_read_cmd)
+ return -EOPNOTSUPP;
- return count;
+ return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len);
}
static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- struct hnae3_handle *handle = filp->private_data;
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_dbg_data *dbg_data = filp->private_data;
+ struct hnae3_handle *handle = dbg_data->handle;
struct hns3_nic_priv *priv = handle->priv;
- char *cmd_buf, *read_buf;
ssize_t size = 0;
- int ret = 0;
-
- read_buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
- if (!read_buf)
- return -ENOMEM;
+ char **save_buf;
+ char *read_buf;
+ u32 index;
+ int ret;
- cmd_buf = filp->f_path.dentry->d_iname;
+ ret = hns3_dbg_get_cmd_index(handle, filp->f_path.dentry->d_iname,
+ &index);
+ if (ret)
+ return ret;
- if (ops->dbg_read_cmd)
- ret = ops->dbg_read_cmd(handle, cmd_buf, read_buf,
- HNS3_DBG_READ_LEN);
+ save_buf = &hns3_dbg_cmd[index].buf;
- if (ret) {
- dev_info(priv->dev, "unknown command\n");
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
+ ret = -EBUSY;
goto out;
}
+ if (*save_buf) {
+ read_buf = *save_buf;
+ } else {
+ read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
+ if (!read_buf)
+ return -ENOMEM;
+
+ /* save the buffer addr until the last read operation */
+ *save_buf = read_buf;
+ }
+
+ /* get data ready for the first time to read */
+ if (!*ppos) {
+ ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
+ read_buf, hns3_dbg_cmd[index].buf_len);
+ if (ret)
+ goto out;
+ }
+
size = simple_read_from_buffer(buffer, count, ppos, read_buf,
strlen(read_buf));
+ if (size > 0)
+ return size;
out:
- kfree(read_buf);
- return size;
-}
+ /* free the buffer for the last read operation */
+ if (*save_buf) {
+ kvfree(*save_buf);
+ *save_buf = NULL;
+ }
-static const struct file_operations hns3_dbg_cmd_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = hns3_dbg_cmd_read,
- .write = hns3_dbg_cmd_write,
-};
+ return ret;
+}
static const struct file_operations hns3_dbg_fops = {
.owner = THIS_MODULE,
@@ -546,29 +1072,108 @@ static const struct file_operations hns3_dbg_fops = {
.read = hns3_dbg_read,
};
-void hns3_dbg_init(struct hnae3_handle *handle)
+static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
+{
+ struct dentry *entry_dir;
+ struct hns3_dbg_data *data;
+ u16 max_queue_num;
+ unsigned int i;
+
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ max_queue_num = hns3_get_max_available_channels(handle);
+ data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i < max_queue_num; i++) {
+ char name[HNS3_DBG_FILE_NAME_LEN];
+
+ data[i].handle = handle;
+ data[i].qid = i;
+ sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
+ debugfs_create_file(name, 0400, entry_dir, &data[i],
+ &hns3_dbg_fops);
+ }
+
+ return 0;
+}
+
+static int
+hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
+{
+ struct hns3_dbg_data *data;
+ struct dentry *entry_dir;
+
+ data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->handle = handle;
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
+ data, &hns3_dbg_fops);
+
+ return 0;
+}
+
+int hns3_dbg_init(struct hnae3_handle *handle)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const char *name = pci_name(handle->pdev);
- struct dentry *entry_dir;
+ int ret;
+ u32 i;
+
+ hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
+ debugfs_create_dir(name, hns3_dbgfs_root);
+ handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
+
+ for (i = 0; i < HNS3_DBG_DENTRY_COMMON; i++)
+ hns3_dbg_dentry[i].dentry =
+ debugfs_create_dir(hns3_dbg_dentry[i].name,
+ handle->hnae3_dbgfs);
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
+ if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
+ (hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_PTP_INFO &&
+ !test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps)))
+ continue;
- handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
+ if (!hns3_dbg_cmd[i].init) {
+ dev_err(&handle->pdev->dev,
+ "cmd %s lack of init func\n",
+ hns3_dbg_cmd[i].name);
+ ret = -EINVAL;
+ goto out;
+ }
- debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
- &hns3_dbg_cmd_fops);
+ ret = hns3_dbg_cmd[i].init(handle, i);
+ if (ret) {
+ dev_err(&handle->pdev->dev, "failed to init cmd %s\n",
+ hns3_dbg_cmd[i].name);
+ goto out;
+ }
+ }
- entry_dir = debugfs_create_dir("tm", handle->hnae3_dbgfs);
- if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2)
- debugfs_create_file(HNAE3_DBG_TM_NODES, 0600, entry_dir, handle,
- &hns3_dbg_fops);
- debugfs_create_file(HNAE3_DBG_TM_PRI, 0600, entry_dir, handle,
- &hns3_dbg_fops);
- debugfs_create_file(HNAE3_DBG_TM_QSET, 0600, entry_dir, handle,
- &hns3_dbg_fops);
+ return 0;
+
+out:
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+ return ret;
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
+ if (hns3_dbg_cmd[i].buf) {
+ kvfree(hns3_dbg_cmd[i].buf);
+ hns3_dbg_cmd[i].buf = NULL;
+ }
+
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
new file mode 100644
index 000000000000..f3766ff38bb7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HNS3_DEBUGFS_H
+#define __HNS3_DEBUGFS_H
+
+#define HNS3_DBG_READ_LEN 65536
+#define HNS3_DBG_READ_LEN_128KB 0x20000
+#define HNS3_DBG_READ_LEN_1MB 0x100000
+#define HNS3_DBG_READ_LEN_4MB 0x400000
+#define HNS3_DBG_WRITE_LEN 1024
+
+#define HNS3_DBG_DATA_STR_LEN 32
+#define HNS3_DBG_INFO_LEN 256
+#define HNS3_DBG_ITEM_NAME_LEN 32
+#define HNS3_DBG_FILE_NAME_LEN 16
+
+struct hns3_dbg_item {
+ char name[HNS3_DBG_ITEM_NAME_LEN];
+ u16 interval; /* blank numbers after the item */
+};
+
+struct hns3_dbg_data {
+ struct hnae3_handle *handle;
+ u16 qid;
+};
+
+enum hns3_dbg_dentry_type {
+ HNS3_DBG_DENTRY_TM,
+ HNS3_DBG_DENTRY_TX_BD,
+ HNS3_DBG_DENTRY_RX_BD,
+ HNS3_DBG_DENTRY_MAC,
+ HNS3_DBG_DENTRY_REG,
+ HNS3_DBG_DENTRY_QUEUE,
+ HNS3_DBG_DENTRY_FD,
+ HNS3_DBG_DENTRY_COMMON,
+};
+
+struct hns3_dbg_dentry_info {
+ const char *name;
+ struct dentry *dentry;
+};
+
+struct hns3_dbg_cmd_info {
+ const char *name;
+ enum hnae3_dbg_cmd cmd;
+ enum hns3_dbg_dentry_type dentry;
+ u32 buf_len;
+ char *buf;
+ int (*init)(struct hnae3_handle *handle, unsigned int cmd);
+};
+
+struct hns3_dbg_func {
+ enum hnae3_dbg_cmd cmd;
+ int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len);
+ int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len);
+};
+
+struct hns3_dbg_cap_info {
+ const char *name;
+ enum HNAE3_DEV_CAP_BITS cap_bit;
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 783fdaf8f8d6..cdb5f14fb6bc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -53,6 +53,19 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Network interface message level setting");
+static unsigned int tx_spare_buf_size;
+module_param(tx_spare_buf_size, uint, 0400);
+MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer");
+
+static unsigned int tx_sgl = 1;
+module_param(tx_sgl, uint, 0600);
+MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
+
+#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
+ sizeof(struct sg_table))
+#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM),\
+ dma_get_cache_alignment())
+
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
@@ -91,11 +104,284 @@ static const struct pci_device_id hns3_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
+#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \
+ { ptype, \
+ l, \
+ CHECKSUM_##s, \
+ HNS3_L3_TYPE_##t, \
+ 1 }
+
+#define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
+ { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 }
+
+static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
+ HNS3_RX_PTYPE_UNUSED_ENTRY(0),
+ HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP),
+ HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP),
+ HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP),
+ HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM),
+ HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(9),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(10),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(11),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(12),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(13),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(14),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(15),
+ HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(26),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(27),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(28),
+ HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(38),
+ HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(46),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(47),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(48),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(49),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(50),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(51),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(52),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(53),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(54),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(55),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(56),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(57),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(58),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(59),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(60),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(61),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(62),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(63),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(64),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(65),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(66),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(67),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(68),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(69),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(70),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(71),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(72),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(73),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(74),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(75),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(76),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(77),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(78),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(79),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(80),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(81),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(82),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(83),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(84),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(85),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(86),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(87),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(88),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(89),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(90),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(91),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(92),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(93),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(94),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(95),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(96),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(97),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(98),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(99),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(100),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(101),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(102),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(103),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(104),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(105),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(106),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(107),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(108),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(109),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(110),
+ HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(120),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(121),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(122),
+ HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(132),
+ HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(140),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(141),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(142),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(143),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(144),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(145),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(146),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(147),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(148),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(149),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(150),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(151),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(152),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(153),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(154),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(155),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(156),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(157),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(158),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(159),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(160),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(161),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(162),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(163),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(164),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(165),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(166),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(167),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(168),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(169),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(170),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(171),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(172),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(173),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(174),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(175),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(176),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(177),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(178),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(179),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(180),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(181),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(182),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(183),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(184),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(185),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(186),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(187),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(188),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(189),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(190),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(191),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(192),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(193),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(194),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(195),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(196),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(197),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(198),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(199),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(200),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(201),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(202),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(203),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(204),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(205),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(206),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(207),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(208),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(209),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(210),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(211),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(212),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(213),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(214),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(215),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(216),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(217),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(218),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(219),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(220),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(221),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(222),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(223),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(224),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(225),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(226),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(227),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(228),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(229),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(230),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(231),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(232),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(233),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(234),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(235),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(236),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(237),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(238),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(239),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(240),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(241),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(242),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(243),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(244),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(245),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(246),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(247),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(248),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(249),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(250),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(251),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(252),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(253),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(254),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(255),
+};
+
+#define HNS3_INVALID_PTYPE \
+ ARRAY_SIZE(hns3_rx_ptype_tbl)
+
static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
napi_schedule_irqoff(&tqp_vector->napi);
+ tqp_vector->event_cnt++;
return IRQ_HANDLED;
}
@@ -199,6 +485,8 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
+ cancel_work_sync(&tqp_vector->rx_group.dim.work);
+ cancel_work_sync(&tqp_vector->tx_group.dim.work);
}
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
@@ -264,22 +552,17 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
- /* initialize the configuration for interrupt coalescing.
- * 1. GL (Interrupt Gap Limiter)
- * 2. RL (Interrupt Rate Limiter)
- * 3. QL (Interrupt Quantity Limiter)
- *
- * Default: enable interrupt coalescing self-adaptive and GL
- */
- tx_coal->adapt_enable = 1;
- rx_coal->adapt_enable = 1;
+ tx_coal->adapt_enable = ptx_coal->adapt_enable;
+ rx_coal->adapt_enable = prx_coal->adapt_enable;
- tx_coal->int_gl = HNS3_INT_GL_50K;
- rx_coal->int_gl = HNS3_INT_GL_50K;
+ tx_coal->int_gl = ptx_coal->int_gl;
+ rx_coal->int_gl = prx_coal->int_gl;
- rx_coal->flow_level = HNS3_FLOW_LOW;
- tx_coal->flow_level = HNS3_FLOW_LOW;
+ rx_coal->flow_level = prx_coal->flow_level;
+ tx_coal->flow_level = ptx_coal->flow_level;
/* device version above V3(include V3), GL can configure 1us
* unit, so uses 1us unit.
@@ -294,8 +577,8 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
rx_coal->ql_enable = 1;
tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
- tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
- rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ tx_coal->int_ql = ptx_coal->int_ql;
+ rx_coal->int_ql = prx_coal->int_ql;
}
}
@@ -362,7 +645,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
return 0;
}
-static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
+u16 hns3_get_max_available_channels(struct hnae3_handle *h)
{
u16 alloc_tqps, max_rss_size, rss_size;
@@ -638,13 +921,10 @@ static u8 hns3_get_netdev_flags(struct net_device *netdev)
{
u8 flags = 0;
- if (netdev->flags & IFF_PROMISC) {
+ if (netdev->flags & IFF_PROMISC)
flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
- } else {
- flags |= HNAE3_VLAN_FLTR;
- if (netdev->flags & IFF_ALLMULTI)
- flags |= HNAE3_USER_MPE;
- }
+ else if (netdev->flags & IFF_ALLMULTI)
+ flags = HNAE3_USER_MPE;
return flags;
}
@@ -674,23 +954,202 @@ void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
ops->request_update_promisc_mode(handle);
}
-void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
+static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- bool last_state;
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntc, ntu;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
- h->ae_algo->ops->enable_vlan_filter) {
- last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
- if (enable != last_state) {
- netdev_info(netdev,
- "%s vlan filter\n",
- enable ? "enable" : "disable");
- h->ae_algo->ops->enable_vlan_filter(h, enable);
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_tx_spare_update() called in tx desc cleaning process.
+ */
+ ntc = smp_load_acquire(&tx_spare->last_to_clean);
+ ntu = tx_spare->next_to_use;
+
+ if (ntc > ntu)
+ return ntc - ntu - 1;
+
+ /* The free tx buffer is divided into two part, so pick the
+ * larger one.
+ */
+ return (ntc > (tx_spare->len - ntu) ? ntc :
+ (tx_spare->len - ntu)) - 1;
+}
+
+static void hns3_tx_spare_update(struct hns3_enet_ring *ring)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ if (!tx_spare ||
+ tx_spare->last_to_clean == tx_spare->next_to_clean)
+ return;
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * hns3_tx_spare_space() called in xmit process.
+ */
+ smp_store_release(&tx_spare->last_to_clean,
+ tx_spare->next_to_clean);
+}
+
+static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ u32 space)
+{
+ u32 len = skb->len <= ring->tx_copybreak ? skb->len :
+ skb_headlen(skb);
+
+ if (len > ring->tx_copybreak)
+ return false;
+
+ if (ALIGN(len, dma_get_cache_alignment()) > space) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_spare_full++;
+ u64_stats_update_end(&ring->syncp);
+ return false;
+ }
+
+ return true;
+}
+
+static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ u32 space)
+{
+ if (skb->len <= ring->tx_copybreak || !tx_sgl ||
+ (!skb_has_frag_list(skb) &&
+ skb_shinfo(skb)->nr_frags < tx_sgl))
+ return false;
+
+ if (space < HNS3_MAX_SGL_SIZE) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_spare_full++;
+ u64_stats_update_end(&ring->syncp);
+ return false;
+ }
+
+ return true;
+}
+
+static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
+{
+ struct hns3_tx_spare *tx_spare;
+ struct page *page;
+ u32 alloc_size;
+ dma_addr_t dma;
+ int order;
+
+ alloc_size = tx_spare_buf_size ? tx_spare_buf_size :
+ ring->tqp->handle->kinfo.tx_spare_buf_size;
+ if (!alloc_size)
+ return;
+
+ order = get_order(alloc_size);
+ tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
+ GFP_KERNEL);
+ if (!tx_spare) {
+ /* The driver still work without the tx spare buffer */
+ dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
+ return;
+ }
+
+ page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
+ GFP_KERNEL, order);
+ if (!page) {
+ dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
+ devm_kfree(ring_to_dev(ring), tx_spare);
+ return;
+ }
+
+ dma = dma_map_page(ring_to_dev(ring), page, 0,
+ PAGE_SIZE << order, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring_to_dev(ring), dma)) {
+ dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
+ put_page(page);
+ devm_kfree(ring_to_dev(ring), tx_spare);
+ return;
+ }
+
+ tx_spare->dma = dma;
+ tx_spare->buf = page_address(page);
+ tx_spare->len = PAGE_SIZE << order;
+ ring->tx_spare = tx_spare;
+}
+
+/* Use hns3_tx_spare_space() to make sure there is enough buffer
+ * before calling below function to allocate tx buffer.
+ */
+static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring,
+ unsigned int size, dma_addr_t *dma,
+ u32 *cb_len)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntu = tx_spare->next_to_use;
+
+ size = ALIGN(size, dma_get_cache_alignment());
+ *cb_len = size;
+
+ /* Tx spare buffer wraps back here because the end of
+ * freed tx buffer is not enough.
+ */
+ if (ntu + size > tx_spare->len) {
+ *cb_len += (tx_spare->len - ntu);
+ ntu = 0;
+ }
+
+ tx_spare->next_to_use = ntu + size;
+ if (tx_spare->next_to_use == tx_spare->len)
+ tx_spare->next_to_use = 0;
+
+ *dma = tx_spare->dma + ntu;
+
+ return tx_spare->buf + ntu;
+}
+
+static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ if (len > tx_spare->next_to_use) {
+ len -= tx_spare->next_to_use;
+ tx_spare->next_to_use = tx_spare->len - len;
+ } else {
+ tx_spare->next_to_use -= len;
+ }
+}
+
+static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntc = tx_spare->next_to_clean;
+ u32 len = cb->length;
+
+ tx_spare->next_to_clean += len;
+
+ if (tx_spare->next_to_clean >= tx_spare->len) {
+ tx_spare->next_to_clean -= tx_spare->len;
+
+ if (tx_spare->next_to_clean) {
+ ntc = 0;
+ len = tx_spare->next_to_clean;
}
}
+
+ /* This tx spare buffer is only really reclaimed after calling
+ * hns3_tx_spare_update(), so it is still safe to use the info in
+ * the tx buffer to do the dma sync or sg unmapping after
+ * tx_spare->next_to_clean is moved forword.
+ */
+ if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) {
+ dma_addr_t dma = tx_spare->dma + ntc;
+
+ dma_sync_single_for_cpu(ring_to_dev(ring), dma, len,
+ DMA_TO_DEVICE);
+ } else {
+ struct sg_table *sgt = tx_spare->buf + ntc;
+
+ dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
+ DMA_TO_DEVICE);
+ }
}
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
@@ -846,8 +1305,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
l4.udp->dest == htons(4790))))
return false;
- skb_checksum_help(skb);
-
return true;
}
@@ -924,8 +1381,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
@@ -970,7 +1426,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
- break;
+ return skb_checksum_help(skb);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -995,8 +1451,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
return 0;
@@ -1168,40 +1623,14 @@ out_hw_tx_csum:
return 0;
}
-static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- unsigned int size, enum hns_desc_type type)
+static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
+ unsigned int size)
{
#define HNS3_LIKELY_BD_NUM 1
- struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- struct device *dev = ring_to_dev(ring);
- skb_frag_t *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
- dma_addr_t dma;
-
- if (type == DESC_TYPE_FRAGLIST_SKB ||
- type == DESC_TYPE_SKB) {
- struct sk_buff *skb = (struct sk_buff *)priv;
-
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- } else {
- frag = (skb_frag_t *)priv;
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- }
-
- if (unlikely(dma_mapping_error(dev, dma))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- return -ENOMEM;
- }
-
- desc_cb->priv = priv;
- desc_cb->length = size;
- desc_cb->dma = dma;
- desc_cb->type = type;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1237,6 +1666,52 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return frag_buf_num;
}
+static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
+ unsigned int type)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct device *dev = ring_to_dev(ring);
+ unsigned int size;
+ dma_addr_t dma;
+
+ if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) {
+ struct sk_buff *skb = (struct sk_buff *)priv;
+
+ size = skb_headlen(skb);
+ if (!size)
+ return 0;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ } else if (type & DESC_TYPE_BOUNCE_HEAD) {
+ /* Head data has been filled in hns3_handle_tx_bounce(),
+ * just return 0 here.
+ */
+ return 0;
+ } else {
+ skb_frag_t *frag = (skb_frag_t *)priv;
+
+ size = skb_frag_size(frag);
+ if (!size)
+ return 0;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ return hns3_fill_desc(ring, dma, size);
+}
+
static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num)
{
@@ -1460,6 +1935,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
for (i = 0; i < ring->desc_num; i++) {
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+ struct hns3_desc_cb *desc_cb;
memset(desc, 0, sizeof(*desc));
@@ -1470,52 +1946,44 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
/* rollback one */
ring_ptr_move_bw(ring, next_to_use);
- if (!ring->desc_cb[ring->next_to_use].dma)
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+
+ if (!desc_cb->dma)
continue;
/* unmap the descriptor dma address */
- if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
- ring->desc_cb[ring->next_to_use].type ==
- DESC_TYPE_FRAGLIST_SKB)
- dma_unmap_single(dev,
- ring->desc_cb[ring->next_to_use].dma,
- ring->desc_cb[ring->next_to_use].length,
- DMA_TO_DEVICE);
- else if (ring->desc_cb[ring->next_to_use].length)
- dma_unmap_page(dev,
- ring->desc_cb[ring->next_to_use].dma,
- ring->desc_cb[ring->next_to_use].length,
+ if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
+ dma_unmap_single(dev, desc_cb->dma, desc_cb->length,
+ DMA_TO_DEVICE);
+ else if (desc_cb->type &
+ (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL))
+ hns3_tx_spare_rollback(ring, desc_cb->length);
+ else if (desc_cb->length)
+ dma_unmap_page(dev, desc_cb->dma, desc_cb->length,
DMA_TO_DEVICE);
- ring->desc_cb[ring->next_to_use].length = 0;
- ring->desc_cb[ring->next_to_use].dma = 0;
- ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
+ desc_cb->length = 0;
+ desc_cb->dma = 0;
+ desc_cb->type = DESC_TYPE_UNKNOWN;
}
}
static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
- struct sk_buff *skb, enum hns_desc_type type)
+ struct sk_buff *skb, unsigned int type)
{
- unsigned int size = skb_headlen(skb);
struct sk_buff *frag_skb;
int i, ret, bd_num = 0;
- if (size) {
- ret = hns3_fill_desc(ring, skb, size, type);
- if (unlikely(ret < 0))
- return ret;
+ ret = hns3_map_and_fill_desc(ring, skb, type);
+ if (unlikely(ret < 0))
+ return ret;
- bd_num += ret;
- }
+ bd_num += ret;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- size = skb_frag_size(frag);
- if (!size)
- continue;
-
- ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
+ ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE);
if (unlikely(ret < 0))
return ret;
@@ -1555,6 +2023,153 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
+static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
+ struct hns3_desc *desc)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!(h->ae_algo->ops->set_tx_hwts_info &&
+ h->ae_algo->ops->set_tx_hwts_info(h, skb)))
+ return;
+
+ desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B));
+}
+
+static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ unsigned int type = DESC_TYPE_BOUNCE_HEAD;
+ unsigned int size = skb_headlen(skb);
+ dma_addr_t dma;
+ int bd_num = 0;
+ u32 cb_len;
+ void *buf;
+ int ret;
+
+ if (skb->len <= ring->tx_copybreak) {
+ size = skb->len;
+ type = DESC_TYPE_BOUNCE_ALL;
+ }
+
+ /* hns3_can_use_tx_bounce() is called to ensure the below
+ * function can always return the tx buffer.
+ */
+ buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len);
+
+ ret = skb_copy_bits(skb, 0, buf, size);
+ if (unlikely(ret < 0)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.copy_bits_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ desc_cb->priv = skb;
+ desc_cb->length = cb_len;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ bd_num += hns3_fill_desc(ring, dma, size);
+
+ if (type == DESC_TYPE_BOUNCE_HEAD) {
+ ret = hns3_fill_skb_to_desc(ring, skb,
+ DESC_TYPE_BOUNCE_HEAD);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ dma_sync_single_for_device(ring_to_dev(ring), dma, size,
+ DMA_TO_DEVICE);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_bounce++;
+ u64_stats_update_end(&ring->syncp);
+ return bd_num;
+}
+
+static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
+ struct sg_table *sgt;
+ int i, bd_num = 0;
+ dma_addr_t dma;
+ u32 cb_len;
+ int nents;
+
+ if (skb_has_frag_list(skb))
+ nfrag = HNS3_MAX_TSO_BD_NUM;
+
+ /* hns3_can_use_tx_sgl() is called to ensure the below
+ * function can always return the tx buffer.
+ */
+ sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag),
+ &dma, &cb_len);
+
+ /* scatterlist follows by the sg table */
+ sgt->sgl = (struct scatterlist *)(sgt + 1);
+ sg_init_table(sgt->sgl, nfrag);
+ nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
+ if (unlikely(nents < 0)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.skb2sgl_err++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ sgt->orig_nents = nents;
+ sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!sgt->nents)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.map_sg_err++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ desc_cb->priv = skb;
+ desc_cb->length = cb_len;
+ desc_cb->dma = dma;
+ desc_cb->type = DESC_TYPE_SGL_SKB;
+
+ for (i = 0; i < sgt->nents; i++)
+ bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
+ sg_dma_len(sgt->sgl + i));
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_sgl++;
+ u64_stats_update_end(&ring->syncp);
+
+ return bd_num;
+}
+
+static int hns3_handle_desc_filling(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ u32 space;
+
+ if (!ring->tx_spare)
+ goto out;
+
+ space = hns3_tx_spare_space(ring);
+
+ if (hns3_can_use_tx_sgl(ring, skb, space))
+ return hns3_handle_tx_sgl(ring, skb);
+
+ if (hns3_can_use_tx_bounce(ring, skb, space))
+ return hns3_handle_tx_bounce(ring, skb);
+
+out:
+ return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -1601,16 +2216,22 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
* zero, which is unlikely, and 'ret > 0' means how many tx desc
* need to be notified to the hw.
*/
- ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+ ret = hns3_handle_desc_filling(ring, skb);
if (unlikely(ret <= 0))
goto fill_err;
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]);
+
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
cpu_to_le16(BIT(HNS3_TXD_FE_B));
trace_hns3_tx_desc(ring, pre_ntu);
+ skb_tx_timestamp(skb);
+
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
@@ -1714,6 +2335,14 @@ static int hns3_nic_set_features(struct net_device *netdev,
return -EINVAL;
}
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ ret = h->ae_algo->ops->enable_vlan_filter(h, enable);
+ if (ret)
+ return ret;
+ }
+
netdev->features = features;
return 0;
}
@@ -1789,6 +2418,9 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_drop += ring->stats.tx_tso_err;
tx_drop += ring->stats.over_max_recursion;
tx_drop += ring->stats.hw_limitation;
+ tx_drop += ring->stats.copy_bits_err;
+ tx_drop += ring->stats.skb2sgl_err;
+ tx_drop += ring->stats.map_sg_err;
tx_errors += ring->stats.sw_err_cnt;
tx_errors += ring->stats.tx_vlan_err;
tx_errors += ring->stats.tx_l4_proto_err;
@@ -1796,6 +2428,9 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_errors += ring->stats.tx_tso_err;
tx_errors += ring->stats.over_max_recursion;
tx_errors += ring->stats.hw_limitation;
+ tx_errors += ring->stats.copy_bits_err;
+ tx_errors += ring->stats.skb2sgl_err;
+ tx_errors += ring->stats.map_sg_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -2559,6 +3194,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HW_TC;
}
+
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2586,7 +3224,8 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
static void hns3_free_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb, int budget)
{
- if (cb->type == DESC_TYPE_SKB)
+ if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
napi_consume_skb(cb->priv, budget);
else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
__page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
@@ -2607,12 +3246,15 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
+ if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else if (cb->length)
+ else if ((cb->type & DESC_TYPE_PAGE) && cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
+ else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_SGL_SKB))
+ hns3_tx_spare_reclaim_cb(ring, cb);
}
static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
@@ -2764,7 +3406,9 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
desc_cb = &ring->desc_cb[ntc];
- if (desc_cb->type == DESC_TYPE_SKB) {
+ if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL |
+ DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_SGL_SKB)) {
(*pkts)++;
(*bytes) += desc_cb->send_bytes;
}
@@ -2787,6 +3431,9 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
* ring_space called by hns3_nic_net_xmit.
*/
smp_store_release(&ring->next_to_clean, ntc);
+
+ hns3_tx_spare_update(ring);
+
return true;
}
@@ -2878,7 +3525,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
- return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
+ return page_count(cb->priv) == cb->pagecnt_bias;
}
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
@@ -2886,40 +3533,74 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ u32 frag_offset = desc_cb->page_offset + pull_len;
int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring);
+ u32 frag_size = size - pull_len;
+ bool reused;
- desc_cb->pagecnt_bias--;
- skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
- size - pull_len, truesize);
+ /* Avoid re-using remote or pfmem page */
+ if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
+ goto out;
- /* Avoid re-using remote and pfmemalloc pages, or the stack is still
- * using the page when page_offset rollback to zero, flag default
- * unreuse
+ reused = hns3_can_reuse_page(desc_cb);
+
+ /* Rx page can be reused when:
+ * 1. Rx page is only owned by the driver when page_offset
+ * is zero, which means 0 @ truesize will be used by
+ * stack after skb_add_rx_frag() is called, and the rest
+ * of rx page can be reused by driver.
+ * Or
+ * 2. Rx page is only owned by the driver when page_offset
+ * is non-zero, which means page_offset @ truesize will
+ * be used by stack after skb_add_rx_frag() is called,
+ * and 0 @ truesize can be reused by driver.
*/
- if (!dev_page_is_reusable(desc_cb->priv) ||
- (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
- __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
- return;
- }
+ if ((!desc_cb->page_offset && reused) ||
+ ((desc_cb->page_offset + truesize + truesize) <=
+ hns3_page_size(ring) && desc_cb->page_offset)) {
+ desc_cb->page_offset += truesize;
+ desc_cb->reuse_flag = 1;
+ } else if (desc_cb->page_offset && reused) {
+ desc_cb->page_offset = 0;
+ desc_cb->reuse_flag = 1;
+ } else if (frag_size <= ring->rx_copybreak) {
+ void *frag = napi_alloc_frag(frag_size);
- /* Move offset up to the next cache line */
- desc_cb->page_offset += truesize;
+ if (unlikely(!frag)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.frag_alloc_err++;
+ u64_stats_update_end(&ring->syncp);
+
+ hns3_rl_err(ring_to_netdev(ring),
+ "failed to allocate rx frag\n");
+ goto out;
+ }
- if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
- desc_cb->reuse_flag = 1;
- } else if (hns3_can_reuse_page(desc_cb)) {
desc_cb->reuse_flag = 1;
- desc_cb->page_offset = 0;
- } else if (desc_cb->pagecnt_bias) {
- __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
+ memcpy(frag, desc_cb->buf + frag_offset, frag_size);
+ skb_add_rx_frag(skb, i, virt_to_page(frag),
+ offset_in_page(frag), frag_size, frag_size);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.frag_alloc++;
+ u64_stats_update_end(&ring->syncp);
return;
}
+out:
+ desc_cb->pagecnt_bias--;
+
if (unlikely(!desc_cb->pagecnt_bias)) {
page_ref_add(desc_cb->priv, USHRT_MAX);
desc_cb->pagecnt_bias = USHRT_MAX;
}
+
+ skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
+ frag_size, truesize);
+
+ if (unlikely(!desc_cb->reuse_flag))
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
}
static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
@@ -2980,51 +3661,31 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
return 0;
}
-static void hns3_checksum_complete(struct hns3_enet_ring *ring,
- struct sk_buff *skb, u32 l234info)
+static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 ptype, u16 csum)
{
- u32 lo, hi;
+ if (ptype == HNS3_INVALID_PTYPE ||
+ hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
+ return false;
u64_stats_update_begin(&ring->syncp);
ring->stats.csum_complete++;
u64_stats_update_end(&ring->syncp);
skb->ip_summed = CHECKSUM_COMPLETE;
- lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
- HNS3_RXD_L2_CSUM_L_S);
- hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
- HNS3_RXD_L2_CSUM_H_S);
- skb->csum = csum_unfold((__force __sum16)(lo | hi << 8));
+ skb->csum = csum_unfold((__force __sum16)csum);
+
+ return true;
}
-static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
- u32 l234info, u32 bd_base_info, u32 ol_info)
+static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
+ u32 ol_info, u32 ptype)
{
- struct net_device *netdev = ring_to_netdev(ring);
int l3_type, l4_type;
int ol4_type;
- skb->ip_summed = CHECKSUM_NONE;
-
- skb_checksum_none_assert(skb);
-
- if (!(netdev->features & NETIF_F_RXCSUM))
- return;
-
- if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
- hns3_checksum_complete(ring, skb, l234info);
- return;
- }
-
- /* check if hardware has done checksum */
- if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
- return;
-
- if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
- BIT(HNS3_RXD_OL3E_B) |
- BIT(HNS3_RXD_OL4E_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.l3l4_csum_err++;
- u64_stats_update_end(&ring->syncp);
+ if (ptype != HNS3_INVALID_PTYPE) {
+ skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level;
+ skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed;
return;
}
@@ -3054,6 +3715,45 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
}
}
+static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
+ u32 l234info, u32 bd_base_info, u32 ol_info,
+ u16 csum)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u32 ptype = HNS3_INVALID_PTYPE;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state))
+ ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S);
+
+ if (hns3_checksum_complete(ring, skb, ptype, csum))
+ return;
+
+ /* check if hardware has done checksum */
+ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
+ return;
+
+ if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
+ BIT(HNS3_RXD_OL3E_B) |
+ BIT(HNS3_RXD_OL4E_B)))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.l3l4_csum_err++;
+ u64_stats_update_end(&ring->syncp);
+
+ return;
+ }
+
+ hns3_rx_handle_csum(skb, l234info, ol_info, ptype);
+}
+
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
if (skb_has_frag_list(skb))
@@ -3235,8 +3935,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info,
- u32 bd_base_info, u32 ol_info)
+ u32 bd_base_info, u32 ol_info, u16 csum)
{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
u32 l3_type;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
@@ -3244,7 +3946,8 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
HNS3_RXD_GRO_SIZE_S);
/* if there is no HW GRO, do not set gro params */
if (!skb_shinfo(skb)->gso_size) {
- hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
+ hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info,
+ csum);
return 0;
}
@@ -3252,7 +3955,16 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
- l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
+ u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S);
+
+ l3_type = hns3_rx_ptype_tbl[ptype].l3_type;
+ } else {
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ }
+
if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6)
@@ -3285,6 +3997,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
struct hns3_desc *desc;
unsigned int len;
int pre_ntc, ret;
+ u16 csum;
/* bdinfo handled below is only valid on the last BD of the
* current packet, and ring->next_to_clean indicates the first
@@ -3296,6 +4009,16 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
l234info = le32_to_cpu(desc->rx.l234_info);
ol_info = le32_to_cpu(desc->rx.ol_info);
+ csum = le16_to_cpu(desc->csum);
+
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 nsec = le32_to_cpu(desc->ts_nsec);
+ u32 sec = le32_to_cpu(desc->ts_sec);
+
+ if (h->ae_algo->ops->get_rx_hwts)
+ h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
+ }
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -3328,7 +4051,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
/* This is needed in order to enable forwarding support */
ret = hns3_set_gro_and_checksum(ring, skb, l234info,
- bd_base_info, ol_info);
+ bd_base_info, ol_info, csum);
if (unlikely(ret)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.rx_err_cnt++;
@@ -3467,139 +4190,30 @@ out:
return recv_pkts;
}
-static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
+static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
{
-#define HNS3_RX_LOW_BYTE_RATE 10000
-#define HNS3_RX_MID_BYTE_RATE 20000
-#define HNS3_RX_ULTRA_PACKET_RATE 40
-
- enum hns3_flow_level_range new_flow_level;
- struct hns3_enet_tqp_vector *tqp_vector;
- int packets_per_msecs, bytes_per_msecs;
- u32 time_passed_ms;
-
- tqp_vector = ring_group->ring->tqp_vector;
- time_passed_ms =
- jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
- if (!time_passed_ms)
- return false;
-
- do_div(ring_group->total_packets, time_passed_ms);
- packets_per_msecs = ring_group->total_packets;
-
- do_div(ring_group->total_bytes, time_passed_ms);
- bytes_per_msecs = ring_group->total_bytes;
-
- new_flow_level = ring_group->coal.flow_level;
-
- /* Simple throttlerate management
- * 0-10MB/s lower (50000 ints/s)
- * 10-20MB/s middle (20000 ints/s)
- * 20-1249MB/s high (18000 ints/s)
- * > 40000pps ultra (8000 ints/s)
- */
- switch (new_flow_level) {
- case HNS3_FLOW_LOW:
- if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
- new_flow_level = HNS3_FLOW_MID;
- break;
- case HNS3_FLOW_MID:
- if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
- new_flow_level = HNS3_FLOW_HIGH;
- else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
- new_flow_level = HNS3_FLOW_LOW;
- break;
- case HNS3_FLOW_HIGH:
- case HNS3_FLOW_ULTRA:
- default:
- if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
- new_flow_level = HNS3_FLOW_MID;
- break;
- }
-
- if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
- &tqp_vector->rx_group == ring_group)
- new_flow_level = HNS3_FLOW_ULTRA;
-
- ring_group->total_bytes = 0;
- ring_group->total_packets = 0;
- ring_group->coal.flow_level = new_flow_level;
-
- return true;
-}
-
-static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
-{
- struct hns3_enet_tqp_vector *tqp_vector;
- u16 new_int_gl;
-
- if (!ring_group->ring)
- return false;
-
- tqp_vector = ring_group->ring->tqp_vector;
- if (!tqp_vector->last_jiffies)
- return false;
-
- if (ring_group->total_packets == 0) {
- ring_group->coal.int_gl = HNS3_INT_GL_50K;
- ring_group->coal.flow_level = HNS3_FLOW_LOW;
- return true;
- }
-
- if (!hns3_get_new_flow_lvl(ring_group))
- return false;
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct dim_sample sample = {};
- new_int_gl = ring_group->coal.int_gl;
- switch (ring_group->coal.flow_level) {
- case HNS3_FLOW_LOW:
- new_int_gl = HNS3_INT_GL_50K;
- break;
- case HNS3_FLOW_MID:
- new_int_gl = HNS3_INT_GL_20K;
- break;
- case HNS3_FLOW_HIGH:
- new_int_gl = HNS3_INT_GL_18K;
- break;
- case HNS3_FLOW_ULTRA:
- new_int_gl = HNS3_INT_GL_8K;
- break;
- default:
- break;
- }
+ if (!rx_group->coal.adapt_enable)
+ return;
- if (new_int_gl != ring_group->coal.int_gl) {
- ring_group->coal.int_gl = new_int_gl;
- return true;
- }
- return false;
+ dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
+ rx_group->total_bytes, &sample);
+ net_dim(&rx_group->dim, sample);
}
-static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
{
- struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
- bool rx_update, tx_update;
+ struct dim_sample sample = {};
- /* update param every 1000ms */
- if (time_before(jiffies,
- tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
+ if (!tx_group->coal.adapt_enable)
return;
- if (rx_group->coal.adapt_enable) {
- rx_update = hns3_get_new_int_gl(rx_group);
- if (rx_update)
- hns3_set_vector_coalesce_rx_gl(tqp_vector,
- rx_group->coal.int_gl);
- }
-
- if (tx_group->coal.adapt_enable) {
- tx_update = hns3_get_new_int_gl(tx_group);
- if (tx_update)
- hns3_set_vector_coalesce_tx_gl(tqp_vector,
- tx_group->coal.int_gl);
- }
-
- tqp_vector->last_jiffies = jiffies;
+ dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
+ tx_group->total_bytes, &sample);
+ net_dim(&tx_group->dim, sample);
}
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
@@ -3644,7 +4258,9 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (napi_complete(napi) &&
likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
- hns3_update_new_int_gl(tqp_vector);
+ hns3_update_rx_int_coalesce(tqp_vector);
+ hns3_update_tx_int_coalesce(tqp_vector);
+
hns3_mask_vector_irq(tqp_vector, 1);
}
@@ -3775,6 +4391,54 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
}
}
+static void hns3_rx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct hns3_enet_ring_group *group = container_of(dim,
+ struct hns3_enet_ring_group, dim);
+ struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
+ struct dim_cq_moder cur_moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+
+ hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec);
+ tqp_vector->rx_group.coal.int_gl = cur_moder.usec;
+
+ if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) {
+ hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts);
+ tqp_vector->rx_group.coal.int_ql = cur_moder.pkts;
+ }
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void hns3_tx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct hns3_enet_ring_group *group = container_of(dim,
+ struct hns3_enet_ring_group, dim);
+ struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
+ struct dim_cq_moder cur_moder =
+ net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec);
+ tqp_vector->tx_group.coal.int_gl = cur_moder.usec;
+
+ if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) {
+ hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts);
+ tqp_vector->tx_group.coal.int_ql = cur_moder.pkts;
+ }
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
+ tqp_vector->rx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
+ tqp_vector->tx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -3788,6 +4452,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[i];
hns3_vector_coalesce_init_hw(tqp_vector, priv);
tqp_vector->num_tqps = 0;
+ hns3_nic_init_dim(tqp_vector);
}
for (i = 0; i < h->kinfo.num_tqps; i++) {
@@ -3844,6 +4509,34 @@ map_ring_fail:
return ret;
}
+static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+
+ /* initialize the configuration for interrupt coalescing.
+ * 1. GL (Interrupt Gap Limiter)
+ * 2. RL (Interrupt Rate Limiter)
+ * 3. QL (Interrupt Quantity Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
+ */
+ tx_coal->adapt_enable = 1;
+ rx_coal->adapt_enable = 1;
+
+ tx_coal->int_gl = HNS3_INT_GL_50K;
+ rx_coal->int_gl = HNS3_INT_GL_50K;
+
+ rx_coal->flow_level = HNS3_FLOW_LOW;
+ tx_coal->flow_level = HNS3_FLOW_LOW;
+
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ }
+}
+
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -3955,10 +4648,13 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring = &priv->ring[q->tqp_index];
desc_num = priv->ae_handle->kinfo.num_tx_desc;
ring->queue_index = q->tqp_index;
+ ring->tx_copybreak = priv->tx_copybreak;
+ ring->last_to_use = 0;
} else {
ring = &priv->ring[q->tqp_index + queue_num];
desc_num = priv->ae_handle->kinfo.num_rx_desc;
ring->queue_index = q->tqp_index;
+ ring->rx_copybreak = priv->rx_copybreak;
}
hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
@@ -3972,7 +4668,6 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
- ring->last_to_use = 0;
}
static void hns3_queue_to_ring(struct hnae3_queue *tqp,
@@ -4032,6 +4727,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
ret = hns3_alloc_ring_buffers(ring);
if (ret)
goto out_with_desc;
+ } else {
+ hns3_init_tx_spare_buffer(ring);
}
return 0;
@@ -4054,9 +4751,18 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->next_to_use = 0;
ring->last_to_use = 0;
ring->pending_buf = 0;
- if (ring->skb) {
+ if (!HNAE3_IS_TX_RING(ring) && ring->skb) {
dev_kfree_skb_any(ring->skb);
ring->skb = NULL;
+ } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) {
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len,
+ DMA_TO_DEVICE);
+ free_pages((unsigned long)tx_spare->buf,
+ get_order(tx_spare->len));
+ devm_kfree(ring_to_dev(ring), tx_spare);
+ ring->tx_spare = NULL;
}
}
@@ -4295,6 +5001,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_get_ring_cfg;
}
+ hns3_nic_init_coal_cfg(priv);
+
ret = hns3_nic_alloc_vector_data(priv);
if (ret) {
ret = -ENOMEM;
@@ -4317,12 +5025,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ret)
goto out_init_phy;
- ret = register_netdev(netdev);
- if (ret) {
- dev_err(priv->dev, "probe register netdev fail!\n");
- goto out_reg_netdev_fail;
- }
-
/* the device can work without cpu rmap, only aRFS needs it */
ret = hns3_set_rx_cpu_rmap(netdev);
if (ret)
@@ -4343,29 +5045,43 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_dcbnl_setup(handle);
- hns3_dbg_init(handle);
+ ret = hns3_dbg_init(handle);
+ if (ret) {
+ dev_err(priv->dev, "failed to init debugfs, ret = %d\n",
+ ret);
+ goto out_client_start;
+ }
netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
+ if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
+ set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
+
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_netdev_fail;
+ }
+
if (netif_msg_drv(handle))
hns3_info_show(priv);
return ret;
+out_reg_netdev_fail:
+ hns3_dbg_uninit(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
out_init_irq_fail:
- unregister_netdev(netdev);
-out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
@@ -4571,31 +5287,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
return 0;
}
-static void hns3_store_coal(struct hns3_nic_priv *priv)
-{
- /* ethtool only support setting and querying one coal
- * configuration for now, so save the vector 0' coal
- * configuration here in order to restore it.
- */
- memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
- sizeof(struct hns3_enet_coalesce));
-}
-
-static void hns3_restore_coal(struct hns3_nic_priv *priv)
-{
- u16 vector_num = priv->vector_num;
- int i;
-
- for (i = 0; i < vector_num; i++) {
- memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
- sizeof(struct hns3_enet_coalesce));
- }
-}
-
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -4654,8 +5345,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_put_ring;
- hns3_restore_coal(priv);
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
goto err_dealloc_vector;
@@ -4721,8 +5410,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_nic_uninit_vector_data(priv);
- hns3_store_coal(priv);
-
hns3_nic_dealloc_vector_data(priv);
hns3_uninit_all_ring(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index daa04aeb0942..15af3d93857b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -4,6 +4,7 @@
#ifndef __HNS3_ENET_H
#define __HNS3_ENET_H
+#include <linux/dim.h>
#include <linux/if_vlan.h>
#include "hnae3.h"
@@ -19,6 +20,7 @@ enum hns3_nic_state {
HNS3_NIC_STATE_SERVICE_SCHED,
HNS3_NIC_STATE2_RESET_REQUESTED,
HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
+ HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
HNS3_NIC_STATE_MAX
};
@@ -82,12 +84,6 @@ enum hns3_nic_state {
#define HNS3_RXD_STRP_TAGP_S 13
#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
-#define HNS3_RXD_L2_CSUM_B 15
-#define HNS3_RXD_L2_CSUM_L_S 4
-#define HNS3_RXD_L2_CSUM_L_M (0xff << HNS3_RXD_L2_CSUM_L_S)
-#define HNS3_RXD_L2_CSUM_H_S 24
-#define HNS3_RXD_L2_CSUM_H_M (0xff << HNS3_RXD_L2_CSUM_H_S)
-
#define HNS3_RXD_L2E_B 16
#define HNS3_RXD_L3E_B 17
#define HNS3_RXD_L4E_B 18
@@ -114,6 +110,9 @@ enum hns3_nic_state {
#define HNS3_RXD_FBLI_S 14
#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
+#define HNS3_RXD_PTYPE_S 4
+#define HNS3_RXD_PTYPE_M GENMASK(11, 4)
+
#define HNS3_RXD_BDTYPE_S 0
#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
#define HNS3_RXD_VLD_B 4
@@ -123,8 +122,9 @@ enum hns3_nic_state {
#define HNS3_RXD_LUM_B 9
#define HNS3_RXD_CRCP_B 10
#define HNS3_RXD_L3L4P_B 11
-#define HNS3_RXD_TSIND_S 12
-#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
+#define HNS3_RXD_TSIDX_S 12
+#define HNS3_RXD_TSIDX_M (0x3 << HNS3_RXD_TSIDX_S)
+#define HNS3_RXD_TS_VLD_B 14
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_GRO_SIZE_S 16
#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
@@ -238,7 +238,14 @@ enum hns3_pkt_tun_type {
/* hardware spec ring buffer format */
struct __packed hns3_desc {
- __le64 addr;
+ union {
+ __le64 addr;
+ __le16 csum;
+ struct {
+ __le32 ts_nsec;
+ __le32 ts_sec;
+ };
+ };
union {
struct {
__le16 vlan_tag;
@@ -292,6 +299,16 @@ struct __packed hns3_desc {
};
};
+enum hns3_desc_type {
+ DESC_TYPE_UNKNOWN = 0,
+ DESC_TYPE_SKB = 1 << 0,
+ DESC_TYPE_FRAGLIST_SKB = 1 << 1,
+ DESC_TYPE_PAGE = 1 << 2,
+ DESC_TYPE_BOUNCE_ALL = 1 << 3,
+ DESC_TYPE_BOUNCE_HEAD = 1 << 4,
+ DESC_TYPE_SGL_SKB = 1 << 5,
+};
+
struct hns3_desc_cb {
dma_addr_t dma; /* dma address of this desc */
void *buf; /* cpu addr for a desc */
@@ -366,6 +383,14 @@ enum hns3_pkt_ol4type {
HNS3_OL4_TYPE_UNKNOWN
};
+struct hns3_rx_ptype {
+ u32 ptype:8;
+ u32 csum_level:2;
+ u32 ip_summed:2;
+ u32 l3_type:4;
+ u32 valid:1;
+};
+
struct ring_stats {
u64 sw_err_cnt;
u64 seg_pkt_cnt;
@@ -383,6 +408,12 @@ struct ring_stats {
u64 tx_tso_err;
u64 over_max_recursion;
u64 hw_limitation;
+ u64 tx_bounce;
+ u64 tx_spare_full;
+ u64 copy_bits_err;
+ u64 tx_sgl;
+ u64 skb2sgl_err;
+ u64 map_sg_err;
};
struct {
u64 rx_pkts;
@@ -396,10 +427,22 @@ struct ring_stats {
u64 csum_complete;
u64 rx_multicast;
u64 non_reuse_pg;
+ u64 frag_alloc_err;
+ u64 frag_alloc;
};
+ __le16 csum;
};
};
+struct hns3_tx_spare {
+ dma_addr_t dma;
+ void *buf;
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 last_to_clean;
+ u32 len;
+};
+
struct hns3_enet_ring {
struct hns3_desc *desc; /* dma map address space */
struct hns3_desc_cb *desc_cb;
@@ -422,18 +465,29 @@ struct hns3_enet_ring {
* next_to_use
*/
int next_to_clean;
- union {
- int last_to_use; /* last idx used by xmit */
- u32 pull_len; /* memcpy len for current rx packet */
- };
- u32 frag_num;
- void *va; /* first buffer address for current packet */
-
u32 flag; /* ring attribute */
int pending_buf;
- struct sk_buff *skb;
- struct sk_buff *tail_skb;
+ union {
+ /* for Tx ring */
+ struct {
+ u32 fd_qb_tx_sample;
+ int last_to_use; /* last idx used by xmit */
+ u32 tx_copybreak;
+ struct hns3_tx_spare *tx_spare;
+ };
+
+ /* for Rx ring */
+ struct {
+ u32 pull_len; /* memcpy len for current rx packet */
+ u32 rx_copybreak;
+ u32 frag_num;
+ /* first buffer address for current packet */
+ unsigned char *va;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
+ };
+ };
} ____cacheline_internodealigned_in_smp;
enum hns3_flow_level_range {
@@ -472,6 +526,7 @@ struct hns3_enet_ring_group {
u64 total_packets; /* total packets processed this group */
u16 count;
struct hns3_enet_coalesce coal;
+ struct dim dim;
};
struct hns3_enet_tqp_vector {
@@ -493,7 +548,7 @@ struct hns3_enet_tqp_vector {
char name[HNAE3_INT_NAME_LEN];
- unsigned long last_jiffies;
+ u64 event_cnt;
} ____cacheline_internodealigned_in_smp;
struct hns3_nic_priv {
@@ -516,6 +571,8 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
+ u32 tx_copybreak;
+ u32 rx_copybreak;
};
union l3_hdr_info {
@@ -631,7 +688,6 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value);
-void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_DCB
@@ -640,9 +696,10 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle);
static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
#endif
-void hns3_dbg_init(struct hnae3_handle *handle);
+int hns3_dbg_init(struct hnae3_handle *handle);
void hns3_dbg_uninit(struct hnae3_handle *handle);
void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
void hns3_dbg_unregister_debugfs(void);
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
+u16 hns3_get_max_available_channels(struct hnae3_handle *h);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b48faf769b1c..82061ab6930f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -46,6 +46,12 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("tso_err", tx_tso_err),
HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
HNS3_TQP_STAT("hw_limitation", hw_limitation),
+ HNS3_TQP_STAT("bounce", tx_bounce),
+ HNS3_TQP_STAT("spare_full", tx_spare_full),
+ HNS3_TQP_STAT("copy_bits_err", copy_bits_err),
+ HNS3_TQP_STAT("sgl", tx_sgl),
+ HNS3_TQP_STAT("skb2sgl_err", skb2sgl_err),
+ HNS3_TQP_STAT("map_sg_err", map_sg_err),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -65,6 +71,8 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("csum_complete", csum_complete),
HNS3_TQP_STAT("multicast", rx_multicast),
HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
+ HNS3_TQP_STAT("frag_alloc_err", frag_alloc_err),
+ HNS3_TQP_STAT("frag_alloc", frag_alloc),
};
#define HNS3_PRIV_FLAGS_LEN ARRAY_SIZE(hns3_priv_flags)
@@ -88,7 +96,6 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- bool vlan_filter_enable;
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -110,14 +117,11 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
return ret;
- if (en) {
+ if (en)
h->ae_algo->ops->set_promisc_mode(h, true, true);
- } else {
+ else
/* recover promisc mode before loopback test */
hns3_request_update_promisc_mode(h);
- vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
- hns3_enable_vlan_filter(ndev, vlan_filter_enable);
- }
return ret;
}
@@ -1134,50 +1138,32 @@ static void hns3_get_channels(struct net_device *netdev,
h->ae_algo->ops->get_channels(h, ch);
}
-static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
- struct ethtool_coalesce *cmd)
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
{
- struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
struct hnae3_handle *h = priv->ae_handle;
- u16 queue_num = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev))
return -EBUSY;
- if (queue >= queue_num) {
- netdev_err(netdev,
- "Invalid queue value %u! Queue max id=%u\n",
- queue, queue_num - 1);
- return -EINVAL;
- }
-
- tx_vector = priv->ring[queue].tqp_vector;
- rx_vector = priv->ring[queue_num + queue].tqp_vector;
-
- cmd->use_adaptive_tx_coalesce =
- tx_vector->tx_group.coal.adapt_enable;
- cmd->use_adaptive_rx_coalesce =
- rx_vector->rx_group.coal.adapt_enable;
+ cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
- cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
- cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
+ cmd->tx_coalesce_usecs = tx_coal->int_gl;
+ cmd->rx_coalesce_usecs = rx_coal->int_gl;
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
- cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
- cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
+ cmd->tx_max_coalesced_frames = tx_coal->int_ql;
+ cmd->rx_max_coalesced_frames = rx_coal->int_ql;
return 0;
}
-static int hns3_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
-{
- return hns3_get_coalesce_per_queue(netdev, 0, cmd);
-}
-
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
@@ -1292,19 +1278,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
return ret;
}
- ret = hns3_check_ql_coalesce_param(netdev, cmd);
- if (ret)
- return ret;
-
- if (cmd->use_adaptive_tx_coalesce == 1 ||
- cmd->use_adaptive_rx_coalesce == 1) {
- netdev_info(netdev,
- "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
- cmd->use_adaptive_tx_coalesce,
- cmd->use_adaptive_rx_coalesce);
- }
-
- return 0;
+ return hns3_check_ql_coalesce_param(netdev, cmd);
}
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
@@ -1350,6 +1324,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
u16 queue_num = h->kinfo.num_tqps;
int ret;
int i;
@@ -1364,6 +1341,15 @@ static int hns3_set_coalesce(struct net_device *netdev,
h->kinfo.int_rl_setting =
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_coal->int_gl = cmd->tx_coalesce_usecs;
+ rx_coal->int_gl = cmd->rx_coalesce_usecs;
+
+ tx_coal->int_ql = cmd->tx_max_coalesced_frames;
+ rx_coal->int_ql = cmd->rx_max_coalesced_frames;
+
for (i = 0; i < queue_num; i++)
hns3_set_coalesce_per_queue(netdev, cmd, i);
@@ -1614,12 +1600,77 @@ static int hns3_set_priv_flags(struct net_device *netdev, u32 pflags)
return 0;
}
+static int hns3_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ /* all the tx rings have the same tx_copybreak */
+ *(u32 *)data = priv->tx_copybreak;
+ break;
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int hns3_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i, ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ priv->tx_copybreak = *(u32 *)data;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ priv->ring[i].tx_copybreak = priv->tx_copybreak;
+
+ break;
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+
+ for (i = h->kinfo.num_tqps; i < h->kinfo.num_tqps * 2; i++)
+ priv->ring[i].rx_copybreak = priv->rx_copybreak;
+
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
ETHTOOL_COALESCE_TX_USECS_HIGH | \
ETHTOOL_COALESCE_MAX_FRAMES)
+static int hns3_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (handle->ae_algo->ops->get_ts_info)
+ return handle->ae_algo->ops->get_ts_info(handle, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.get_drvinfo = hns3_get_drvinfo,
@@ -1646,6 +1697,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_priv_flags = hns3_get_priv_flags,
.set_priv_flags = hns3_set_priv_flags,
+ .get_tunable = hns3_get_tunable,
+ .set_tunable = hns3_set_tunable,
};
static const struct ethtool_ops hns3_ethtool_ops = {
@@ -1684,6 +1737,9 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
.set_priv_flags = hns3_set_priv_flags,
+ .get_ts_info = hns3_get_ts_info,
+ .get_tunable = hns3_get_tunable,
+ .set_tunable = hns3_set_tunable,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 6c28c8f6292c..a685392dbfe9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -7,6 +7,6 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 76a482456f1f..887297e37cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -178,7 +178,8 @@ static bool hclge_is_special_opcode(u16 opcode)
HCLGE_QUERY_CLEAR_MPF_RAS_INT,
HCLGE_QUERY_CLEAR_PF_RAS_INT,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HCLGE_QUERY_ALL_ERR_INFO};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -386,6 +387,14 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B))
set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_RAS_IMP_B))
+ set_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B))
+ set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_PORT_VLAN_BYPASS_B)) {
+ set_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
+ }
}
static __le32 hclge_build_api_caps(void)
@@ -469,7 +478,7 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
struct hclge_desc desc;
u32 compat = 0;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
req = (struct hclge_firmware_compat_cmd *)desc.data;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index c6fc22e29581..18bde77ef944 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -130,6 +130,10 @@ enum hclge_opcode_type {
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
+ /* PTP commands */
+ HCLGE_OPC_PTP_INT_EN = 0x0501,
+ HCLGE_OPC_PTP_MODE_CFG = 0x0507,
+
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
@@ -236,6 +240,7 @@ enum hclge_opcode_type {
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
/* Flow Director commands */
HCLGE_OPC_FD_MODE_CTRL = 0x1200,
@@ -243,6 +248,7 @@ enum hclge_opcode_type {
HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
HCLGE_OPC_FD_TCAM_OP = 0x1203,
HCLGE_OPC_FD_AD_OP = 0x1204,
+ HCLGE_OPC_FD_CNT_OP = 0x1205,
HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
/* MDIO command */
@@ -267,10 +273,10 @@ enum hclge_opcode_type {
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
- /* M7 stats command */
- HCLGE_OPC_M7_STATS_BD = 0x7012,
- HCLGE_OPC_M7_STATS_INFO = 0x7013,
- HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
+ /* IMP stats command */
+ HCLGE_OPC_IMP_STATS_BD = 0x7012,
+ HCLGE_OPC_IMP_STATS_INFO = 0x7013,
+ HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
@@ -292,6 +298,8 @@ enum hclge_opcode_type {
HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
+ HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
@@ -389,8 +397,11 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_HW_PAD_B,
HCLGE_CAP_STASH_B,
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
+ HCLGE_CAP_RAS_IMP_B = 12,
HCLGE_CAP_FEC_B = 13,
HCLGE_CAP_PAUSE_B = 14,
+ HCLGE_CAP_RXD_ADV_LAYOUT_B = 15,
+ HCLGE_CAP_PORT_VLAN_BYPASS_B = 17,
};
enum HCLGE_API_CAP_BITS {
@@ -526,10 +537,14 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10
#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
+#define HCLGE_CFG_VLAN_FLTR_CAP_S 8
+#define HCLGE_CFG_VLAN_FLTR_CAP_M GENMASK(9, 8)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
#define HCLGE_CFG_PF_RSS_SIZE_S 0
#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0)
+#define HCLGE_CFG_TX_SPARE_BUF_SIZE_S 4
+#define HCLGE_CFG_TX_SPARE_BUF_SIZE_M GENMASK(15, 4)
#define HCLGE_CFG_CMD_CNT 4
@@ -810,6 +825,14 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
};
+#define HCLGE_INGRESS_BYPASS_B 0
+struct hclge_port_vlan_filter_bypass_cmd {
+ u8 bypass_state;
+ u8 rsv1[3];
+ u8 vf_id;
+ u8 rsv2[19];
+};
+
#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
#define HCLGE_SWITCH_ALW_LPBK_B 1U
#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U
@@ -1087,6 +1110,14 @@ struct hclge_fd_ad_config_cmd {
u8 rsv2[8];
};
+struct hclge_fd_ad_cnt_read_cmd {
+ u8 rsv0[4];
+ __le16 index;
+ u8 rsv1[2];
+ __le64 cnt;
+ u8 rsv2[8];
+};
+
#define HCLGE_FD_USER_DEF_OFT_S 0
#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0)
#define HCLGE_FD_USER_DEF_EN_B 15
@@ -1100,7 +1131,7 @@ struct hclge_fd_user_def_cfg_cmd {
u8 rsv[12];
};
-struct hclge_get_m7_bd_cmd {
+struct hclge_get_imp_bd_cmd {
__le32 bd_num;
u8 rsv[20];
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 85d306459e36..288788186ecc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -4,74 +4,110 @@
#include <linux/device.h>
#include "hclge_debugfs.h"
+#include "hclge_err.h"
#include "hclge_main.h"
#include "hclge_tm.h"
#include "hnae3.h"
+static const char * const state_str[] = { "off", "on" };
+static const char * const hclge_mac_state_str[] = {
+ "TO_ADD", "TO_DEL", "ACTIVE"
+};
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
- { .reg_type = "bios common",
+ { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
.offset = HCLGE_DBG_DFX_BIOS_OFFSET,
.cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
- { .reg_type = "ssu",
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
.dfx_msg = &hclge_dbg_ssu_reg_0[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
.offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
.cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
- { .reg_type = "ssu",
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
.dfx_msg = &hclge_dbg_ssu_reg_1[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
.offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
.cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
- { .reg_type = "ssu",
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
.dfx_msg = &hclge_dbg_ssu_reg_2[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
.offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
.cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
- { .reg_type = "igu egu",
+ { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
.dfx_msg = &hclge_dbg_igu_egu_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
.offset = HCLGE_DBG_DFX_IGU_OFFSET,
.cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
- { .reg_type = "rpu",
+ { .cmd = HNAE3_DBG_CMD_REG_RPU,
.dfx_msg = &hclge_dbg_rpu_reg_0[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
.offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
.cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
- { .reg_type = "rpu",
+ { .cmd = HNAE3_DBG_CMD_REG_RPU,
.dfx_msg = &hclge_dbg_rpu_reg_1[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
.offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
.cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
- { .reg_type = "ncsi",
+ { .cmd = HNAE3_DBG_CMD_REG_NCSI,
.dfx_msg = &hclge_dbg_ncsi_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
.offset = HCLGE_DBG_DFX_NCSI_OFFSET,
.cmd = HCLGE_OPC_DFX_NCSI_REG } },
- { .reg_type = "rtc",
+ { .cmd = HNAE3_DBG_CMD_REG_RTC,
.dfx_msg = &hclge_dbg_rtc_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
.offset = HCLGE_DBG_DFX_RTC_OFFSET,
.cmd = HCLGE_OPC_DFX_RTC_REG } },
- { .reg_type = "ppp",
+ { .cmd = HNAE3_DBG_CMD_REG_PPP,
.dfx_msg = &hclge_dbg_ppp_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
.offset = HCLGE_DBG_DFX_PPP_OFFSET,
.cmd = HCLGE_OPC_DFX_PPP_REG } },
- { .reg_type = "rcb",
+ { .cmd = HNAE3_DBG_CMD_REG_RCB,
.dfx_msg = &hclge_dbg_rcb_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
.offset = HCLGE_DBG_DFX_RCB_OFFSET,
.cmd = HCLGE_OPC_DFX_RCB_REG } },
- { .reg_type = "tqp",
+ { .cmd = HNAE3_DBG_CMD_REG_TQP,
.dfx_msg = &hclge_dbg_tqp_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
.offset = HCLGE_DBG_DFX_TQP_OFFSET,
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
-static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
+static void hclge_dbg_fill_content(char *content, u16 len,
+ const struct hclge_dbg_item *items,
+ const char **result, u16 size)
+{
+ char *pos = content;
+ u16 i;
+
+ memset(content, ' ', len);
+ for (i = 0; i < size; i++) {
+ if (result)
+ strncpy(pos, result[i], strlen(result[i]));
+ else
+ strncpy(pos, items[i].name, strlen(items[i].name));
+ pos += strlen(items[i].name) + items[i].interval;
+ }
+ *pos++ = '\n';
+ *pos++ = '\0';
+}
+
+static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
+{
+ if (id)
+ sprintf(buf, "vf%u", id - 1);
+ else
+ sprintf(buf, "pf");
+
+ return buf;
+}
+
+static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
+ u32 *bd_num)
{
struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int entries_per_desc;
@@ -81,13 +117,21 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
ret = hclge_query_bd_num_cmd_send(hdev, desc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "get dfx bdnum fail, ret = %d\n", ret);
+ "failed to get dfx bd_num, offset = %d, ret = %d\n",
+ offset, ret);
return ret;
}
entries_per_desc = ARRAY_SIZE(desc[0].data);
index = offset % entries_per_desc;
- return le32_to_cpu(desc[offset / entries_per_desc].data[index]);
+
+ *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
+ if (!(*bd_num)) {
+ dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
@@ -114,66 +158,108 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
return ret;
}
-static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
- const struct hclge_dbg_reg_type_info *reg_info,
- const char *cmd_buf)
+static int
+hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
+ const struct hclge_dbg_reg_type_info *reg_info,
+ char *buf, int len, int *pos)
{
-#define IDX_OFFSET 1
-
- const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
struct hclge_desc *desc_src;
+ u32 index, entry, i, cnt;
+ int bd_num, min_num, ret;
struct hclge_desc *desc;
- int entries_per_desc;
- int bd_num, buf_len;
- int index = 0;
- int min_num;
- int ret, i;
- if (*s) {
- ret = kstrtouint(s, 0, &index);
- index = (ret != 0) ? 0 : index;
- }
+ ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
+ if (ret)
+ return ret;
+
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc_src)
+ return -ENOMEM;
+
+ min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
+
+ for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
+ *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
+ cnt++, dfx_message->message);
+
+ for (i = 0; i < cnt; i++)
+ *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
- bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
- if (bd_num <= 0) {
- dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
- reg_msg->offset, bd_num);
- return;
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
+
+ for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
+ dfx_message = reg_info->dfx_msg;
+ desc = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
+ reg_msg->cmd);
+ if (ret)
+ break;
+
+ for (i = 0; i < min_num; i++, dfx_message++) {
+ entry = i % HCLGE_DESC_DATA_LEN;
+ if (i > 0 && !entry)
+ desc++;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
+ le32_to_cpu(desc->data[entry]));
+ }
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
}
- buf_len = sizeof(struct hclge_desc) * bd_num;
- desc_src = kzalloc(buf_len, GFP_KERNEL);
+ kfree(desc_src);
+ return ret;
+}
+
+static int
+hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
+ const struct hclge_dbg_reg_type_info *reg_info,
+ char *buf, int len, int *pos)
+{
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ struct hclge_desc *desc_src;
+ int bd_num, min_num, ret;
+ struct hclge_desc *desc;
+ u32 entry, i;
+
+ ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
+ if (ret)
+ return ret;
+
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc_src)
- return;
+ return -ENOMEM;
desc = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
if (ret) {
- kfree(desc_src);
- return;
+ kfree(desc);
+ return ret;
}
- entries_per_desc = ARRAY_SIZE(desc->data);
- min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
+ min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
- desc = desc_src;
- for (i = 0; i < min_num; i++) {
- if (i > 0 && (i % entries_per_desc) == 0)
+ for (i = 0; i < min_num; i++, dfx_message++) {
+ entry = i % HCLGE_DESC_DATA_LEN;
+ if (i > 0 && !entry)
desc++;
- if (dfx_message->flag)
- dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
- dfx_message->message,
- le32_to_cpu(desc->data[i % entries_per_desc]));
+ if (!dfx_message->flag)
+ continue;
- dfx_message++;
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
+ dfx_message->message,
+ le32_to_cpu(desc->data[entry]));
}
kfree(desc_src);
+ return 0;
}
-static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
+static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
@@ -186,43 +272,51 @@ static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to dump mac enable status, ret = %d\n", ret);
- return;
+ return ret;
}
req = (struct hclge_config_mac_mode_cmd *)desc.data;
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
- dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
- dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
- dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
- dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
- dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
- dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
- dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
- dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
- dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
- dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
- dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
- dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
- dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
-}
-
-static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
+ *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "mac_rx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en,
+ HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
+ *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "mac_tx_under_min_err_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "mac_tx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en,
+ HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
@@ -234,17 +328,21 @@ static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to dump mac frame size, ret = %d\n", ret);
- return;
+ return ret;
}
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
- le16_to_cpu(req->max_frm_size));
- dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
+ le16_to_cpu(req->max_frm_size));
+ *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
+ req->min_frm_size);
+
+ return 0;
}
-static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
+static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
{
#define HCLGE_MAC_SPEED_SHIFT 0
#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
@@ -260,543 +358,540 @@ static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to dump mac speed duplex, ret = %d\n", ret);
- return;
+ return ret;
}
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "speed: %#lx\n",
- hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
- HCLGE_MAC_SPEED_SHIFT));
- dev_info(&hdev->pdev->dev, "duplex: %#x\n",
- hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
+ *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup,
+ HCLGE_MAC_DUPLEX_SHIFT));
+ return 0;
}
-static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
+static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
{
- hclge_dbg_dump_mac_enable_status(hdev);
+ int pos = 0;
+ int ret;
- hclge_dbg_dump_mac_frame_size(hdev);
+ ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
- hclge_dbg_dump_mac_speed_duplex(hdev);
+ ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
}
-static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
+static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
{
- struct device *dev = &hdev->pdev->dev;
struct hclge_dbg_bitmap_cmd *bitmap;
- enum hclge_opcode_type cmd;
- int rq_id, pri_id, qset_id;
- int port_id, nq_id, pg_id;
- struct hclge_desc desc[2];
-
- int cnt, ret;
-
- cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
- &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
- if (cnt != 6) {
- dev_err(&hdev->pdev->dev,
- "dump dcb: bad command parameter, cnt=%d\n", cnt);
- return;
- }
+ struct hclge_desc desc;
+ u16 qset_id, qset_num;
+ int ret;
- cmd = HCLGE_OPC_QSET_DFX_STS;
- ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd);
+ ret = hclge_tm_get_qset_num(hdev, &qset_num);
if (ret)
- goto err_dcb_cmd_send;
+ return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
- dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
- dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
- dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
- dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
+ for (qset_id = 0; qset_id < qset_num; qset_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
+ HCLGE_OPC_QSET_DFX_STS);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_PRI_DFX_STS;
- ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd);
- if (ret)
- goto err_dcb_cmd_send;
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
- dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
- dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
- dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%04u %#x %#x %#x %#x\n",
+ qset_id, bitmap->bit0, bitmap->bit1,
+ bitmap->bit2, bitmap->bit3);
+ }
- cmd = HCLGE_OPC_PG_DFX_STS;
- ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd);
- if (ret)
- goto err_dcb_cmd_send;
+ return 0;
+}
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
- dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
- dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
- dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
+static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_desc desc;
+ u8 pri_id, pri_num;
+ int ret;
- cmd = HCLGE_OPC_PORT_DFX_STS;
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
+ ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
- goto err_dcb_cmd_send;
-
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
- dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
- dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
+ return ret;
- cmd = HCLGE_OPC_SCH_NQ_CNT;
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
- if (ret)
- goto err_dcb_cmd_send;
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
+ for (pri_id = 0; pri_id < pri_num; pri_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
+ HCLGE_OPC_PRI_DFX_STS);
+ if (ret)
+ return ret;
- dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
- cmd = HCLGE_OPC_SCH_RQ_CNT;
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
- if (ret)
- goto err_dcb_cmd_send;
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%03u %#x %#x %#x\n",
+ pri_id, bitmap->bit0, bitmap->bit1,
+ bitmap->bit2);
+ }
- dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ return 0;
+}
- cmd = HCLGE_OPC_TM_INTERNAL_STS;
- ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd);
- if (ret)
- goto err_dcb_cmd_send;
-
- dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
- dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
- le32_to_cpu(desc[0].data[3]));
- dev_info(dev, "tx_private_waterline: 0x%x\n",
- le32_to_cpu(desc[0].data[4]));
- dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
- dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
-
- cmd = HCLGE_OPC_TM_INTERNAL_CNT;
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
- if (ret)
- goto err_dcb_cmd_send;
+static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_desc desc;
+ u8 pg_id;
+ int ret;
- dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
+ for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
+ HCLGE_OPC_PG_DFX_STS);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_TM_INTERNAL_STS_1;
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
- if (ret)
- goto err_dcb_cmd_send;
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
- dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
- dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
- dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
- le32_to_cpu(desc[0].data[4]));
- dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
- le32_to_cpu(desc[0].data[5]));
- return;
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%03u %#x %#x %#x\n",
+ pg_id, bitmap->bit0, bitmap->bit1,
+ bitmap->bit2);
+ }
-err_dcb_cmd_send:
- dev_err(&hdev->pdev->dev,
- "failed to dump dcb dfx, cmd = %#x, ret = %d\n",
- cmd, ret);
+ return 0;
}
-static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
+static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
{
- const struct hclge_dbg_reg_type_info *reg_info;
- bool has_dump = false;
- int i;
+ struct hclge_desc desc;
+ u16 nq_id;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
- reg_info = &hclge_dbg_reg_info[i];
- if (!strncmp(cmd_buf, reg_info->reg_type,
- strlen(reg_info->reg_type))) {
- hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
- has_dump = true;
- }
- }
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
+ for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
+ HCLGE_OPC_SCH_NQ_CNT);
+ if (ret)
+ return ret;
- if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
- hclge_dbg_dump_mac(hdev);
- has_dump = true;
- }
+ *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
+ nq_id, le32_to_cpu(desc.data[1]));
- if (strncmp(cmd_buf, "dcb", 3) == 0) {
- hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
- has_dump = true;
- }
+ ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
+ HCLGE_OPC_SCH_RQ_CNT);
+ if (ret)
+ return ret;
- if (!has_dump) {
- dev_info(&hdev->pdev->dev, "unknown command\n");
- return;
+ *pos += scnprintf(buf + *pos, len - *pos,
+ " %#x\n",
+ le32_to_cpu(desc.data[1]));
}
-}
-static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index)
-{
- if (flag)
- dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n",
- index, hdev->tm_info.pg_info[0].tc_dwrr[index]);
- else
- dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index);
+ return 0;
}
-static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
+static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
{
- struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_dbg_bitmap_cmd *bitmap;
struct hclge_desc desc;
- int i, ret;
-
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports tc\n");
- return;
- }
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
+ u8 port_id = 0;
+ int ret;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
- return;
- }
+ ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
+ HCLGE_OPC_PORT_DFX_STS);
+ if (ret)
+ return ret;
- ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
- dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
- hdev->tm_info.num_tc);
- dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
- ets_weight->weight_offset);
+ *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
+ bitmap->bit0);
+ *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
+ bitmap->bit1);
- for (i = 0; i < HNAE3_MAX_TC; i++)
- hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i);
+ return 0;
}
-static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
{
- struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
- struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
- struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
- enum hclge_opcode_type cmd;
- struct hclge_desc desc;
+ struct hclge_desc desc[2];
+ u8 port_id = 0;
int ret;
- cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_TM_INTERNAL_CNT);
if (ret)
- goto err_tm_pg_cmd_send;
+ return ret;
- pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
- dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
- le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
+ *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
- cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
+ HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
- goto err_tm_pg_cmd_send;
-
- pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
- dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
- le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
- dev_info(&hdev->pdev->dev, "PG_P flag: %#x\n", pg_shap_cfg_cmd->flag);
- dev_info(&hdev->pdev->dev, "PG_P pg_rate: %u(Mbps)\n",
- le32_to_cpu(pg_shap_cfg_cmd->pg_rate));
-
- cmd = HCLGE_OPC_TM_PORT_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "sch_roce_fifo_afull_gap: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "tx_private_waterline: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
+ le32_to_cpu(desc[1].data[0]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
+ le32_to_cpu(desc[1].data[1]));
+
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
+ return 0;
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_TM_INTERNAL_STS_1);
if (ret)
- goto err_tm_pg_cmd_send;
+ return ret;
- port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
- le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
- dev_info(&hdev->pdev->dev, "PORT flag: %#x\n", port_shap_cfg_cmd->flag);
- dev_info(&hdev->pdev->dev, "PORT port_rate: %u(Mbps)\n",
- le32_to_cpu(port_shap_cfg_cmd->port_rate));
+ *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
- cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
if (ret)
- goto err_tm_pg_cmd_send;
+ return ret;
- dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
- le32_to_cpu(desc.data[0]));
+ ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
if (ret)
- goto err_tm_pg_cmd_send;
+ return ret;
- dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
- le32_to_cpu(desc.data[0]));
+ ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
if (ret)
- goto err_tm_pg_cmd_send;
+ return ret;
- dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
- le32_to_cpu(desc.data[0]));
+ return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
+}
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports tm mapping\n");
- return;
+static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
+ enum hnae3_dbg_cmd cmd, char *buf, int len)
+{
+ const struct hclge_dbg_reg_type_info *reg_info;
+ int pos = 0, ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
+ reg_info = &hclge_dbg_reg_info[i];
+ if (cmd == reg_info->cmd) {
+ if (cmd == HNAE3_DBG_CMD_REG_TQP)
+ return hclge_dbg_dump_reg_tqp(hdev, reg_info,
+ buf, len, &pos);
+
+ ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
+ len, &pos);
+ if (ret)
+ break;
+ }
}
- cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_pg_cmd_send;
-
- bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
- bp_to_qs_map_cmd->tc_id);
- dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
- bp_to_qs_map_cmd->qs_group_id);
- dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
- le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
- return;
-
-err_tm_pg_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
- cmd, ret);
-}
-
-static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
-{
- struct hclge_priority_weight_cmd *priority_weight;
- struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
- struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
- struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
- struct hclge_pri_shapping_cmd *shap_cfg_cmd;
- struct hclge_pg_weight_cmd *pg_weight;
- struct hclge_qs_weight_cmd *qs_weight;
- enum hclge_opcode_type cmd;
+ return ret;
+}
+
+static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_ets_tc_weight_cmd *ets_weight;
struct hclge_desc desc;
+ char *sch_mode_str;
+ int pos = 0;
int ret;
+ u8 i;
- cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tc\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
+ ret);
+ return ret;
+ }
- pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump tm\n");
- dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
- pg_to_pri_map->pg_id);
- dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
- pg_to_pri_map->pri_bit_map);
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
-
- qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
- le16_to_cpu(qs_to_pri_map->qs_id));
- dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
- qs_to_pri_map->priority);
- dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
- qs_to_pri_map->link_vld);
-
- cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
+ hdev->tm_info.num_tc);
+ pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
+ ets_weight->weight_offset);
+
+ pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
+ pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
+ i, sch_mode_str,
+ hdev->tm_info.pg_info[0].tc_dwrr[i]);
+ }
- nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
- le16_to_cpu(nq_to_qs_map->nq_id));
- dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
- le16_to_cpu(nq_to_qs_map->qset_id));
+ return 0;
+}
- cmd = HCLGE_OPC_TM_PG_WEIGHT;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+static const struct hclge_dbg_item tm_pg_items[] = {
+ { "ID", 2 },
+ { "PRI_MAP", 2 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "C_IR_B", 2 },
+ { "C_IR_U", 2 },
+ { "C_IR_S", 2 },
+ { "C_BS_B", 2 },
+ { "C_BS_S", 2 },
+ { "C_FLAG", 2 },
+ { "C_RATE(Mbps)", 2 },
+ { "P_IR_B", 2 },
+ { "P_IR_U", 2 },
+ { "P_IR_S", 2 },
+ { "P_BS_B", 2 },
+ { "P_BS_S", 2 },
+ { "P_FLAG", 2 },
+ { "P_RATE(Mbps)", 0 }
+};
- pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
- dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
+static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
+ char **result, u8 *index)
+{
+ sprintf(result[(*index)++], "%3u", para->ir_b);
+ sprintf(result[(*index)++], "%3u", para->ir_u);
+ sprintf(result[(*index)++], "%3u", para->ir_s);
+ sprintf(result[(*index)++], "%3u", para->bs_b);
+ sprintf(result[(*index)++], "%3u", para->bs_s);
+ sprintf(result[(*index)++], "%3u", para->flag);
+ sprintf(result[(*index)++], "%6u", para->rate);
+}
- cmd = HCLGE_OPC_TM_QS_WEIGHT;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN];
+ struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
+ char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
+ u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ int pos = 0;
+ int ret;
- qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
- le16_to_cpu(qs_weight->qs_id));
- dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
+ for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++)
+ result[i] = &data_str[i][0];
- cmd = HCLGE_OPC_TM_PRI_WEIGHT;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
+ NULL, ARRAY_SIZE(tm_pg_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
- priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
- dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
+ for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
+ ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
+ if (ret)
+ return ret;
- cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
-
- shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
- dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
- le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
- dev_info(&hdev->pdev->dev, "PRI_C flag: %#x\n", shap_cfg_cmd->flag);
- dev_info(&hdev->pdev->dev, "PRI_C pri_rate: %u(Mbps)\n",
- le32_to_cpu(shap_cfg_cmd->pri_rate));
-
- cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_cmd_send;
+ ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
+ if (ret)
+ return ret;
- shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
- dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
- le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
- dev_info(&hdev->pdev->dev, "PRI_P flag: %#x\n", shap_cfg_cmd->flag);
- dev_info(&hdev->pdev->dev, "PRI_P pri_rate: %u(Mbps)\n",
- le32_to_cpu(shap_cfg_cmd->pri_rate));
+ ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
+ if (ret)
+ return ret;
- hclge_dbg_dump_tm_pg(hdev);
+ ret = hclge_tm_get_pg_shaper(hdev, pg_id,
+ HCLGE_OPC_TM_PG_C_SHAPPING,
+ &c_shaper_para);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pg_shaper(hdev, pg_id,
+ HCLGE_OPC_TM_PG_P_SHAPPING,
+ &p_shaper_para);
+ if (ret)
+ return ret;
- return;
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ j = 0;
+ sprintf(result[j++], "%02u", pg_id);
+ sprintf(result[j++], "0x%02x", pri_bit_map);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
+ hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_pg_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
-err_tm_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
- cmd, ret);
+ return 0;
}
-static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
- const char *cmd_buf)
+static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
{
- struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
- struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
- u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
- struct hclge_qs_to_pri_link_cmd *map;
- struct hclge_tqp_tx_queue_tc_cmd *tc;
- u16 group_id, queue_id, qset_id;
- enum hclge_opcode_type cmd;
- u8 grp_num, pri_id, tc_id;
- struct hclge_desc desc;
- u16 qs_id_l;
- u16 qs_id_h;
+ struct hclge_tm_shaper_para shaper_para;
+ int pos = 0;
int ret;
- u32 i;
- ret = kstrtou16(cmd_buf, 0, &queue_id);
- queue_id = (ret != 0) ? 0 : queue_id;
-
- cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
- nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
if (ret)
- goto err_tm_map_cmd_send;
- qset_id = le16_to_cpu(nq_to_qs_map->qset_id);
-
- /* convert qset_id to the following format, drop the vld bit
- * | qs_id_h | vld | qs_id_l |
- * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
- * \ \ / /
- * \ \ / /
- * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
- */
- qs_id_l = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_L_MSK,
- HCLGE_TM_QS_ID_L_S);
- qs_id_h = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
- HCLGE_TM_QS_ID_H_EXT_S);
- qset_id = 0;
- hnae3_set_field(qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
- qs_id_l);
- hnae3_set_field(qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
- qs_id_h);
-
- cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
- map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- map->qs_id = cpu_to_le16(qset_id);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_map_cmd_send;
- pri_id = map->priority;
+ return ret;
- cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
- tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- tc->queue_id = cpu_to_le16(queue_id);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_map_cmd_send;
- tc_id = tc->tc_id & 0x7;
+ pos += scnprintf(buf + pos, len - pos,
+ "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %3u %3u %3u %1u %6u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
- dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
- dev_info(&hdev->pdev->dev, "%04u | %04u | %02u | %02u\n",
- queue_id, qset_id, pri_id, tc_id);
+ return 0;
+}
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports tm mapping\n");
- return;
- }
+static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
+ char *buf, int len)
+{
+ u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
+ struct hclge_bp_to_qs_map_cmd *map;
+ struct hclge_desc desc;
+ int pos = 0;
+ u8 group_id;
+ u8 grp_num;
+ u16 i = 0;
+ int ret;
grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
- cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
- bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
for (group_id = 0; group_id < grp_num; group_id++) {
- hclge_cmd_setup_basic_desc(&desc, cmd, true);
- bp_to_qs_map_cmd->tc_id = tc_id;
- bp_to_qs_map_cmd->qs_group_id = group_id;
+ hclge_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
+ true);
+ map->tc_id = tc_id;
+ map->qs_group_id = group_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- goto err_tm_map_cmd_send;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get bp to qset map, ret = %d\n",
+ ret);
+ return ret;
+ }
- qset_mapping[group_id] =
- le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
+ qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
}
- dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
-
- i = 0;
+ pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
for (group_id = 0; group_id < grp_num / 8; group_id++) {
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_mapping[(u32)(i + 7)],
- qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
- qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
- qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
+ group_id * 256, qset_mapping[i + 7],
+ qset_mapping[i + 6], qset_mapping[i + 5],
+ qset_mapping[i + 4], qset_mapping[i + 3],
+ qset_mapping[i + 2], qset_mapping[i + 1],
qset_mapping[i]);
i += 8;
}
- return;
+ return pos;
+}
+
+static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
+{
+ u16 queue_id;
+ u16 qset_id;
+ u8 link_vld;
+ int pos = 0;
+ u8 pri_id;
+ u8 tc_id;
+ int ret;
+
+ for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
+ ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
+ &link_vld);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4u %3u %2u\n",
+ queue_id, qset_id, pri_id, tc_id);
+
+ if (!hnae3_dev_dcb_supported(hdev))
+ continue;
+
+ ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
+ len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ }
-err_tm_map_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
- cmd, ret);
+ return 0;
}
static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
@@ -833,8 +928,8 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
{
- struct hclge_pri_shaper_para c_shaper_para;
- struct hclge_pri_shaper_para p_shaper_para;
+ struct hclge_tm_shaper_para c_shaper_para;
+ struct hclge_tm_shaper_para p_shaper_para;
u8 pri_num, sch_mode, weight;
char *sch_mode_str;
int pos = 0;
@@ -896,19 +991,42 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
+static const struct hclge_dbg_item tm_qset_items[] = {
+ { "ID", 4 },
+ { "MAP_PRI", 2 },
+ { "LINK_VLD", 2 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "IR_B", 2 },
+ { "IR_U", 2 },
+ { "IR_S", 2 },
+ { "BS_B", 2 },
+ { "BS_S", 2 },
+ { "FLAG", 2 },
+ { "RATE(Mbps)", 0 }
+};
+
static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
{
+ char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
u8 priority, link_vld, sch_mode, weight;
- char *sch_mode_str;
+ struct hclge_tm_shaper_para shaper_para;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ u16 qset_num, i;
int ret, pos;
- u16 qset_num;
- u16 i;
+ u8 j;
ret = hclge_tm_get_qset_num(hdev, &qset_num);
if (ret)
return ret;
- pos = scnprintf(buf, len, "ID MAP_PRI LINK_VLD MODE DWRR\n");
+ for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
+ result[i] = &data_str[i][0];
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
+ NULL, ARRAY_SIZE(tm_qset_items));
+ pos = scnprintf(buf, len, "%s", content);
for (i = 0; i < qset_num; i++) {
ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
@@ -923,280 +1041,326 @@ static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
+ ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
+ if (ret)
+ return ret;
+
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- pos += scnprintf(buf + pos, len - pos,
- "%04u %4u %1u %4s %3u\n",
- i, priority, link_vld, sch_mode_str, weight);
+
+ j = 0;
+ sprintf(result[j++], "%04u", i);
+ sprintf(result[j++], "%4u", priority);
+ sprintf(result[j++], "%4u", link_vld);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_qset_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
}
return 0;
}
-static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
{
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
+ int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
- ret);
- return;
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos pause, ret = %d\n", ret);
+ return ret;
}
pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
- dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
- pause_param->pause_trans_gap);
- dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
- le16_to_cpu(pause_param->pause_trans_time));
+
+ pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
+ pause_param->pause_trans_gap);
+ pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
+ return 0;
}
-static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
+static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
+ int len)
{
+#define HCLGE_DBG_TC_MASK 0x0F
+#define HCLGE_DBG_TC_BIT_WIDTH 4
+
struct hclge_qos_pri_map_cmd *pri_map;
struct hclge_desc desc;
+ int pos = 0;
+ u8 *pri_tc;
+ u8 tc, i;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
-
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "dump qos pri map fail, ret = %d\n", ret);
- return;
+ "failed to dump qos pri map, ret = %d\n", ret);
+ return ret;
}
pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump qos pri map\n");
- dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
- dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
- dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
- dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
- dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
- dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
- dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
- dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
- dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
+
+ pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
+ pri_map->vlan_pri);
+ pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
+
+ pri_tc = (u8 *)pri_map;
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
+ tc &= HCLGE_DBG_TC_MASK;
+ pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
+ }
+
+ return 0;
}
-static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
struct hclge_desc desc;
+ int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump tx buf, ret = %d\n", ret);
return ret;
+ }
- dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+ pos += scnprintf(buf + pos, len - pos,
+ "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- return 0;
+ return pos;
}
-static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
{
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
struct hclge_desc desc;
+ int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx priv buf, ret = %d\n", ret);
return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\n");
- dev_info(&hdev->pdev->dev, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(rx_buf_cmd->buf_num[i]));
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
- dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
- le16_to_cpu(rx_buf_cmd->shared_buf));
+ pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
- return 0;
+ return pos;
}
-static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
{
struct hclge_rx_com_wl *rx_com_wl;
struct hclge_desc desc;
+ int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx common wl, ret = %d\n", ret);
return ret;
+ }
rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
- dev_info(&hdev->pdev->dev, "\n");
- dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_com_wl->com_wl.high),
- le16_to_cpu(rx_com_wl->com_wl.low));
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
- return 0;
+ return pos;
}
-static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev)
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
+ int len)
{
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_desc desc;
+ int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx global pkt cnt, ret = %d\n", ret);
return ret;
+ }
rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
- dev_info(&hdev->pdev->dev,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_packet_cnt->com_wl.high),
- le16_to_cpu(rx_packet_cnt->com_wl.low));
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
- return 0;
+ return pos;
}
-static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
{
struct hclge_rx_priv_wl_buf *rx_priv_wl;
struct hclge_desc desc[2];
+ int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx priv wl buf, ret = %d\n", ret);
return ret;
+ }
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- return 0;
+ return pos;
}
-static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
+ char *buf, int len)
{
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_desc desc[2];
+ int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx common threshold, ret = %d\n", ret);
return ret;
+ }
- dev_info(&hdev->pdev->dev, "\n");
+ pos += scnprintf(buf + pos, len - pos, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- dev_info(&hdev->pdev->dev,
+ pos += scnprintf(buf + pos, len - pos,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
- return 0;
+ return pos;
}
-static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
{
- enum hclge_opcode_type cmd;
+ int pos = 0;
int ret;
- cmd = HCLGE_OPC_TX_BUFF_ALLOC;
- ret = hclge_dbg_dump_tx_buf_cfg(hdev);
- if (ret)
- goto err_qos_cmd_send;
-
- cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
- ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev);
- if (ret)
- goto err_qos_cmd_send;
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- ret = hclge_dbg_dump_rx_common_wl_cfg(hdev);
- if (ret)
- goto err_qos_cmd_send;
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev);
- if (ret)
- goto err_qos_cmd_send;
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
- dev_info(&hdev->pdev->dev, "\n");
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports rx priv wl\n");
- return;
- }
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
- cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
- ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev);
- if (ret)
- goto err_qos_cmd_send;
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
- cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
- ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev);
- if (ret)
- goto err_qos_cmd_send;
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
- return;
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
+ len - pos);
+ if (ret < 0)
+ return ret;
-err_qos_cmd_send:
- dev_err(&hdev->pdev->dev,
- "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
+ return 0;
}
-static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
+static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_mac_ethertype_idx_rd_cmd *req0;
- char printf_buf[HCLGE_DBG_BUF_LEN];
struct hclge_desc desc;
u32 msg_egress_port;
+ int pos = 0;
int ret, i;
- dev_info(&hdev->pdev->dev, "mng tab:\n");
- memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
- strncat(printf_buf,
- "entry|mac_addr |mask|ether|mask|vlan|mask",
- HCLGE_DBG_BUF_LEN - 1);
- strncat(printf_buf + strlen(printf_buf),
- "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
- HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
-
- dev_info(&hdev->pdev->dev, "%s", printf_buf);
+ pos += scnprintf(buf + pos, len - pos,
+ "entry mac_addr mask ether ");
+ pos += scnprintf(buf + pos, len - pos,
+ "mask vlan mask i_map i_dir e_type ");
+ pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
@@ -1207,52 +1371,53 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "call hclge_cmd_send fail, ret = %d\n", ret);
- return;
+ "failed to dump manage table, ret = %d\n", ret);
+ return ret;
}
if (!req0->resp_code)
continue;
- memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
- snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
- "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- le16_to_cpu(req0->index),
- req0->mac_addr[0], req0->mac_addr[1],
- req0->mac_addr[2], req0->mac_addr[3],
- req0->mac_addr[4], req0->mac_addr[5]);
-
- snprintf(printf_buf + strlen(printf_buf),
- HCLGE_DBG_BUF_LEN - strlen(printf_buf),
- "%x |%04x |%x |%04x|%x |%02x |%02x |",
- !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- le16_to_cpu(req0->ethter_type),
- !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG,
- !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
- req0->i_port_bitmap, req0->i_port_direction);
+ pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
+ le16_to_cpu(req0->index), req0->mac_addr);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %04x %x %04x ",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) &
+ HCLGE_DBG_MNG_VLAN_TAG);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %02x %02x ",
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
msg_egress_port = le16_to_cpu(req0->egress_port);
- snprintf(printf_buf + strlen(printf_buf),
- HCLGE_DBG_BUF_LEN - strlen(printf_buf),
- "%x |%x |%02x |%04x|%x\n",
- !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- msg_egress_port & HCLGE_DBG_MNG_PF_ID,
- (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- le16_to_cpu(req0->egress_queue),
- !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
-
- dev_info(&hdev->pdev->dev, "%s", printf_buf);
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %x %02x %04x %x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
}
+
+ return 0;
}
-static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
- bool sel_x, u32 loc)
+#define HCLGE_DBG_TCAM_BUF_SIZE 256
+
+static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
+ char *tcam_buf,
+ struct hclge_dbg_tcam_msg tcam_msg)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
+ int pos = 0;
int ret, i;
u32 *req;
@@ -1266,31 +1431,35 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
- req1->stage = stage;
+ req1->stage = tcam_msg.stage;
req1->xy_sel = sel_x ? 1 : 0;
- req1->index = cpu_to_le32(loc);
+ req1->index = cpu_to_le32(tcam_msg.loc);
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret)
return ret;
- dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
- sel_x ? "x" : "y", loc);
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
+ tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
- dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", *req++);
/* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
- dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", *req++);
/* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
- dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", *req++);
return ret;
}
@@ -1308,265 +1477,348 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
}
spin_unlock_bh(&hdev->fd_rule_lock);
- if (cnt != hdev->hclge_fd_rule_num)
+ if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
return -EINVAL;
return cnt;
}
-static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
+static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
{
+ u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ struct hclge_dbg_tcam_msg tcam_msg;
int i, ret, rule_cnt;
u16 *rule_locs;
+ char *tcam_buf;
+ int pos = 0;
if (!hnae3_dev_fd_supported(hdev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
- return;
+ return -EOPNOTSUPP;
}
- if (!hdev->hclge_fd_rule_num ||
- !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
- return;
+ if (!hdev->hclge_fd_rule_num || !rule_num)
+ return 0;
- rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
- sizeof(u16), GFP_KERNEL);
+ rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
if (!rule_locs)
- return;
+ return -ENOMEM;
+
+ tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
+ if (!tcam_buf) {
+ kfree(rule_locs);
+ return -ENOMEM;
+ }
rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
- if (rule_cnt <= 0) {
+ if (rule_cnt < 0) {
+ ret = rule_cnt;
dev_err(&hdev->pdev->dev,
- "failed to get rule number, ret = %d\n", rule_cnt);
- kfree(rule_locs);
- return;
+ "failed to get rule number, ret = %d\n", ret);
+ goto out;
}
+ ret = 0;
for (i = 0; i < rule_cnt; i++) {
- ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
+ tcam_msg.stage = HCLGE_FD_STAGE_1;
+ tcam_msg.loc = rule_locs[i];
+
+ ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
- kfree(rule_locs);
- return;
+ goto out;
}
- ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
+ pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
+
+ ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
- kfree(rule_locs);
- return;
+ goto out;
}
+
+ pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
}
+out:
+ kfree(tcam_buf);
kfree(rule_locs);
+ return ret;
}
-void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
-{
- dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
- hdev->rst_stats.pf_rst_cnt);
- dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
- hdev->rst_stats.flr_rst_cnt);
- dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
- hdev->rst_stats.global_rst_cnt);
- dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
- hdev->rst_stats.imp_rst_cnt);
- dev_info(&hdev->pdev->dev, "reset done count: %u\n",
- hdev->rst_stats.reset_done_cnt);
- dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
- hdev->rst_stats.hw_reset_done_cnt);
- dev_info(&hdev->pdev->dev, "reset count: %u\n",
- hdev->rst_stats.reset_cnt);
- dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
- hdev->rst_stats.reset_fail_cnt);
- dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
- dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
- dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
- dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
- dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
- dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
- dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
-}
-
-static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
-{
- dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
- hdev->last_serv_processed);
- dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
- hdev->serv_processed_cnt);
-}
-
-static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev)
-{
- dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi);
- dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi);
- dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used);
- dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left);
-}
-
-static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
-{
- struct hclge_desc *desc_src, *desc_tmp;
- struct hclge_get_m7_bd_cmd *req;
+static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+{
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ struct hclge_fd_ad_cnt_read_cmd *req;
+ char str_id[HCLGE_DBG_ID_LEN];
struct hclge_desc desc;
- u32 bd_num, buf_len;
- int ret, i;
+ int pos = 0;
+ int ret;
+ u64 cnt;
+ u8 i;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "func_id\thit_times\n");
+
+ for (i = 0; i < func_num; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
+ req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
+ req->index = cpu_to_le16(i);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
+ ret);
+ return ret;
+ }
+ cnt = le64_to_cpu(req->cnt);
+ hclge_dbg_get_func_id_str(str_id, i);
+ pos += scnprintf(buf + pos, len - pos,
+ "%s\t%llu\n", str_id, cnt);
+ }
+
+ return 0;
+}
+
+int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+
+ pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
+ hdev->rst_stats.pf_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
+ hdev->rst_stats.imp_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
+ hdev->rst_stats.reset_done_cnt);
+ pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
+ hdev->rst_stats.reset_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
+ hdev->rst_stats.reset_fail_cnt);
+ pos += scnprintf(buf + pos, len - pos,
+ "vector0 interrupt enable status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
+ pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
+ pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
+ pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n",
+ hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG));
+ pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
+ pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
+ pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+ pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
+ hdev->state);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ unsigned long rem_nsec;
+ int pos = 0;
+ u64 lc;
+
+ lc = local_clock();
+ rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
+
+ pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
+ (unsigned long)lc, rem_nsec / 1000);
+ pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
+ jiffies_to_msecs(jiffies - hdev->last_serv_processed));
+ pos += scnprintf(buf + pos, len - pos,
+ "last_service_task_processed: %lu(jiffies)\n",
+ hdev->last_serv_processed);
+ pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
+ hdev->serv_processed_cnt);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+
+ pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
+ hdev->num_nic_msi);
+ pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
+ hdev->num_roce_msi);
+ pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
+ hdev->num_msi_used);
+ pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
+ hdev->num_msi_left);
+
+ return 0;
+}
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
+static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
+ char *buf, int len, u32 bd_num)
+{
+#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
+
+ struct hclge_desc *desc_index = desc_src;
+ u32 offset = 0;
+ int pos = 0;
+ u32 i, j;
- req = (struct hclge_get_m7_bd_cmd *)desc.data;
+ pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+
+ for (i = 0; i < bd_num; i++) {
+ j = 0;
+ while (j < HCLGE_DESC_DATA_LEN - 1) {
+ pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
+ offset);
+ pos += scnprintf(buf + pos, len - pos, "0x%08x ",
+ le32_to_cpu(desc_index->data[j++]));
+ pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
+ le32_to_cpu(desc_index->data[j++]));
+ offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
+ }
+ desc_index++;
+ }
+}
+
+static int
+hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_get_imp_bd_cmd *req;
+ struct hclge_desc *desc_src;
+ struct hclge_desc desc;
+ u32 bd_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
+
+ req = (struct hclge_get_imp_bd_cmd *)desc.data;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "get firmware statistics bd number failed, ret = %d\n",
+ "failed to get imp statistics bd number, ret = %d\n",
ret);
- return;
+ return ret;
}
bd_num = le32_to_cpu(req->bd_num);
- buf_len = sizeof(struct hclge_desc) * bd_num;
- desc_src = kzalloc(buf_len, GFP_KERNEL);
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc_src)
- return;
+ return -ENOMEM;
- desc_tmp = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
- HCLGE_OPC_M7_STATS_INFO);
+ ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
+ HCLGE_OPC_IMP_STATS_INFO);
if (ret) {
kfree(desc_src);
dev_err(&hdev->pdev->dev,
- "get firmware statistics failed, ret = %d\n", ret);
- return;
+ "failed to get imp statistics, ret = %d\n", ret);
+ return ret;
}
- for (i = 0; i < bd_num; i++) {
- dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
- le32_to_cpu(desc_tmp->data[0]),
- le32_to_cpu(desc_tmp->data[1]),
- le32_to_cpu(desc_tmp->data[2]));
- dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
- le32_to_cpu(desc_tmp->data[3]),
- le32_to_cpu(desc_tmp->data[4]),
- le32_to_cpu(desc_tmp->data[5]));
-
- desc_tmp++;
- }
+ hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
kfree(desc_src);
+
+ return 0;
}
#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
+#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
-static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
- struct hclge_desc *desc, int *offset,
- int *length)
+static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
+ char *buf, int *len, int *pos)
{
#define HCLGE_CMD_DATA_NUM 6
- int i;
- int j;
+ int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
+ int i, j;
for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
if (i == 0 && j == 0)
continue;
- dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
- *offset,
- le32_to_cpu(desc[i].data[j]));
- *offset += sizeof(u32);
- *length -= sizeof(u32);
- if (*length <= 0)
+ *pos += scnprintf(buf + *pos, *len - *pos,
+ "0x%04x | 0x%08x\n", offset,
+ le32_to_cpu(desc[i].data[j]));
+
+ offset += sizeof(u32);
+ *index -= sizeof(u32);
+
+ if (*index <= 0)
return;
}
}
}
-/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
- * @hdev: pointer to struct hclge_dev
- * @cmd_buf: string that contains offset and length
- */
-static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
- const char *cmd_buf)
+static int
+hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
{
-#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
-#define HCLGE_NCL_CONFIG_PARAM_NUM 2
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
- int offset;
- int length;
- int data0;
+ int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
+ int pos = 0;
+ u32 data0;
int ret;
- ret = sscanf(cmd_buf, "%x %x", &offset, &length);
- if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
- dev_err(&hdev->pdev->dev,
- "Too few parameters, num = %d.\n", ret);
- return;
- }
-
- if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
- length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
- dev_err(&hdev->pdev->dev,
- "Invalid input, offset = %d, length = %d.\n",
- offset, length);
- return;
- }
+ pos += scnprintf(buf + pos, len - pos, "offset | data\n");
- dev_info(&hdev->pdev->dev, "offset | data\n");
-
- while (length > 0) {
- data0 = offset;
- if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
+ while (index > 0) {
+ data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
+ if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
else
- data0 |= length << 16;
+ data0 |= (u32)index << 16;
ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
HCLGE_OPC_QUERY_NCL_CONFIG);
if (ret)
- return;
+ return ret;
- hclge_ncl_config_data_print(hdev, desc, &offset, &length);
+ hclge_ncl_config_data_print(desc, &index, buf, &len, &pos);
}
+
+ return 0;
}
-static void hclge_dbg_dump_loopback(struct hclge_dev *hdev)
+static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
struct hclge_config_mac_mode_cmd *req_app;
struct hclge_common_lb_cmd *req_common;
struct hclge_desc desc;
u8 loopback_en;
+ int pos = 0;
int ret;
req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
req_common = (struct hclge_common_lb_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
+ pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
+ hdev->hw.mac.mac_id);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to dump app loopback status, ret = %d\n", ret);
- return;
+ return ret;
}
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
- dev_info(&hdev->pdev->dev, "app loopback: %s\n",
- loopback_en ? "on" : "off");
+ pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
+ state_str[loopback_en]);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1574,247 +1826,647 @@ static void hclge_dbg_dump_loopback(struct hclge_dev *hdev)
dev_err(&hdev->pdev->dev,
"failed to dump common loopback status, ret = %d\n",
ret);
- return;
+ return ret;
}
loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
- dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
- loopback_en ? "on" : "off");
+ pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
+ state_str[loopback_en]);
loopback_en = req_common->enable &
- HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
- dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
- loopback_en ? "on" : "off");
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
+ pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
+ state_str[loopback_en]);
if (phydev) {
- dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
- phydev->loopback_enabled ? "on" : "off");
+ loopback_en = phydev->loopback_enabled;
+ pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
+ state_str[loopback_en]);
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
- dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
- loopback_en ? "on" : "off");
+ pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
+ state_str[loopback_en]);
}
+
+ return 0;
}
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
-static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
+static int
+hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
{
-#define HCLGE_BILLION_NANO_SECONDS 1000000000
-
struct hclge_mac_tnl_stats stats;
unsigned long rem_nsec;
+ int pos = 0;
- dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "Recently generated mac tnl interruption:\n");
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
- (unsigned long)stats.time, rem_nsec / 1000,
- stats.status);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
+ }
+
+ return 0;
+}
+
+
+static const struct hclge_dbg_item mac_list_items[] = {
+ { "FUNC_ID", 2 },
+ { "MAC_ADDR", 12 },
+ { "STATE", 2 },
+};
+
+static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
+ bool is_unicast)
+{
+ char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
+ char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
+ char *result[ARRAY_SIZE(mac_list_items)];
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_vport *vport;
+ struct list_head *list;
+ u32 func_id;
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
+ result[i] = &data_str[i][0];
+
+ pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
+ is_unicast ? "UC" : "MC");
+ hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
+ NULL, ARRAY_SIZE(mac_list_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+
+ for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
+ vport = &hdev->vport[func_id];
+ list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
+ spin_lock_bh(&vport->mac_list_lock);
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ i = 0;
+ result[i++] = hclge_dbg_get_func_id_str(str_id,
+ func_id);
+ sprintf(result[i++], "%pM", mac_node->mac_addr);
+ sprintf(result[i++], "%5s",
+ hclge_mac_state_str[mac_node->state]);
+ hclge_dbg_fill_content(content, sizeof(content),
+ mac_list_items,
+ (const char **)result,
+ ARRAY_SIZE(mac_list_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+ spin_unlock_bh(&vport->mac_list_lock);
}
}
-static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
+static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
{
- struct hclge_qs_shapping_cmd *shap_cfg_cmd;
- u8 ir_u, ir_b, ir_s, bs_b, bs_s;
+ u8 func_num = pci_num_vf(hdev->pdev) + 1;
+ struct hclge_vport *vport;
+ int pos = 0;
+ u8 i;
+
+ pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
+ hdev->num_alloc_vport);
+ pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
+ hdev->max_umv_size);
+ pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
+ hdev->wanted_umv_size);
+ pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
+ hdev->priv_umv_size);
+
+ mutex_lock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
+ hdev->share_umv_size);
+ for (i = 0; i < func_num; i++) {
+ vport = &hdev->vport[i];
+ pos += scnprintf(buf + pos, len - pos,
+ "vport(%u) used_umv_num : %u\n",
+ i, vport->used_umv_num);
+ }
+ mutex_unlock(&hdev->vport_lock);
+
+ return 0;
+}
+
+static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
+ struct hclge_dbg_vlan_cfg *vlan_cfg)
+{
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
struct hclge_desc desc;
- u32 shapping_para;
- u32 rate;
+ u16 bmap_index;
+ u8 rx_cfg;
int ret;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
- shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
- shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "qs%u failed to get tx_rate, ret=%d\n",
- qsid, ret);
- return;
+ "failed to get vport%u rxvlan cfg, ret = %d\n",
+ vf_id, ret);
+ return ret;
}
- shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
- ir_b = hclge_tm_get_field(shapping_para, IR_B);
- ir_u = hclge_tm_get_field(shapping_para, IR_U);
- ir_s = hclge_tm_get_field(shapping_para, IR_S);
- bs_b = hclge_tm_get_field(shapping_para, BS_B);
- bs_s = hclge_tm_get_field(shapping_para, BS_S);
- rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
+ rx_cfg = req->vport_vlan_cfg;
+ vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
+ vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
+ vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
+ vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
+ vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
+ vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
- dev_info(&hdev->pdev->dev,
- "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u, flag:%#x, rate:%u(Mbps)\n",
- qsid, ir_b, ir_u, ir_s, bs_b, bs_s, shap_cfg_cmd->flag, rate);
+ return 0;
}
-static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
+static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
+ struct hclge_dbg_vlan_cfg *vlan_cfg)
{
- struct hnae3_knic_private_info *kinfo;
- struct hclge_vport *vport;
- int vport_id, i;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ u8 tx_cfg;
+ int ret;
- for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
- vport = &hdev->vport[vport_id];
- kinfo = &vport->nic.kinfo;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
- dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u txvlan cfg, ret = %d\n",
+ vf_id, ret);
+ return ret;
+ }
- for (i = 0; i < kinfo->tc_info.num_tc; i++) {
- u16 qsid = vport->qs_offset + i;
+ tx_cfg = req->vport_vlan_cfg;
+ vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
- hclge_dbg_dump_qs_shaper_single(hdev, qsid);
- }
- }
+ vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
+ vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
+ vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
+ vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
+ vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
+ vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
+ vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
+
+ return 0;
}
-static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
- const char *cmd_buf)
+static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
+ u8 vlan_type, u8 vf_id,
+ struct hclge_desc *desc)
{
- u16 qsid;
+ struct hclge_vlan_filter_ctrl_cmd *req;
int ret;
- ret = kstrtou16(cmd_buf, 0, &qsid);
- if (ret) {
- hclge_dbg_dump_qs_shaper_all(hdev);
- return;
- }
+ hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
+ req->vlan_type = vlan_type;
+ req->vf_id = vf_id;
- if (qsid >= hdev->ae_dev->dev_specs.max_qset_num) {
- dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-%u]\n",
- qsid, hdev->ae_dev->dev_specs.max_qset_num - 1);
- return;
- }
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
- hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+ return ret;
}
-static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
- bool is_unicast)
+static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
+ u8 vf_id, u8 *vlan_fe)
{
- struct hclge_mac_node *mac_node, *tmp;
- struct hclge_vport *vport;
- struct list_head *list;
- u32 func_id;
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
+ if (ret)
+ return ret;
+
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
+ *vlan_fe = req->vlan_fe;
+
+ return 0;
+}
+
+static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
+ u8 vf_id, u8 *bypass_en)
+{
+ struct hclge_port_vlan_filter_bypass_cmd *req;
+ struct hclge_desc desc;
int ret;
- ret = kstrtouint(cmd_buf, 0, &func_id);
- if (ret < 0) {
+ if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
+ req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
dev_err(&hdev->pdev->dev,
- "dump mac list: bad command string, ret = %d\n", ret);
- return -EINVAL;
+ "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
+ vf_id, ret);
+ return ret;
}
- if (func_id >= hdev->num_alloc_vport) {
- dev_err(&hdev->pdev->dev,
- "function id(%u) is out of range(0-%u)\n", func_id,
- hdev->num_alloc_vport - 1);
- return -EINVAL;
+ *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
+
+ return 0;
+}
+
+static const struct hclge_dbg_item vlan_filter_items[] = {
+ { "FUNC_ID", 2 },
+ { "I_VF_VLAN_FILTER", 2 },
+ { "E_VF_VLAN_FILTER", 2 },
+ { "PORT_VLAN_FILTER_BYPASS", 0 }
+};
+
+static const struct hclge_dbg_item vlan_offload_items[] = {
+ { "FUNC_ID", 2 },
+ { "PVID", 4 },
+ { "ACCEPT_TAG1", 2 },
+ { "ACCEPT_TAG2", 2 },
+ { "ACCEPT_UNTAG1", 2 },
+ { "ACCEPT_UNTAG2", 2 },
+ { "INSERT_TAG1", 2 },
+ { "INSERT_TAG2", 2 },
+ { "SHIFT_TAG", 2 },
+ { "STRIP_TAG1", 2 },
+ { "STRIP_TAG2", 2 },
+ { "DROP_TAG1", 2 },
+ { "DROP_TAG2", 2 },
+ { "PRI_ONLY_TAG1", 2 },
+ { "PRI_ONLY_TAG2", 0 }
+};
+
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
+ const char *result[ARRAY_SIZE(vlan_filter_items)];
+ u8 i, j, vlan_fe, bypass, ingress, egress;
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ int ret;
+
+ ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
+ &vlan_fe);
+ if (ret)
+ return ret;
+ ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
+ egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
+
+ *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
+ state_str[ingress]);
+ *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
+ state_str[egress]);
+
+ hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
+ NULL, ARRAY_SIZE(vlan_filter_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < func_num; i++) {
+ ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
+ &vlan_fe);
+ if (ret)
+ return ret;
+
+ ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
+ egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
+ ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
+ if (ret)
+ return ret;
+ j = 0;
+ result[j++] = hclge_dbg_get_func_id_str(str_id, i);
+ result[j++] = state_str[ingress];
+ result[j++] = state_str[egress];
+ result[j++] =
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ? state_str[bypass] : "NA";
+ hclge_dbg_fill_content(content, sizeof(content),
+ vlan_filter_items, result,
+ ARRAY_SIZE(vlan_filter_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
}
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
+
+ return 0;
+}
- vport = &hdev->vport[func_id];
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
+ const char *result[ARRAY_SIZE(vlan_offload_items)];
+ char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ struct hclge_dbg_vlan_cfg vlan_cfg;
+ int ret;
+ u8 i, j;
- list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
+ hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
+ NULL, ARRAY_SIZE(vlan_offload_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
- dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
- func_id, is_unicast ? "uc" : "mc");
- dev_info(&hdev->pdev->dev, "mac address state\n");
+ for (i = 0; i < func_num; i++) {
+ ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
+ if (ret)
+ return ret;
- spin_lock_bh(&vport->mac_list_lock);
+ ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
+ if (ret)
+ return ret;
- list_for_each_entry_safe(mac_node, tmp, list, node) {
- dev_info(&hdev->pdev->dev, "%pM %d\n",
- mac_node->mac_addr, mac_node->state);
+ sprintf(str_pvid, "%u", vlan_cfg.pvid);
+ j = 0;
+ result[j++] = hclge_dbg_get_func_id_str(str_id, i);
+ result[j++] = str_pvid;
+ result[j++] = state_str[vlan_cfg.accept_tag1];
+ result[j++] = state_str[vlan_cfg.accept_tag2];
+ result[j++] = state_str[vlan_cfg.accept_untag1];
+ result[j++] = state_str[vlan_cfg.accept_untag2];
+ result[j++] = state_str[vlan_cfg.insert_tag1];
+ result[j++] = state_str[vlan_cfg.insert_tag2];
+ result[j++] = state_str[vlan_cfg.shift_tag];
+ result[j++] = state_str[vlan_cfg.strip_tag1];
+ result[j++] = state_str[vlan_cfg.strip_tag2];
+ result[j++] = state_str[vlan_cfg.drop_tag1];
+ result[j++] = state_str[vlan_cfg.drop_tag2];
+ result[j++] = state_str[vlan_cfg.pri_only1];
+ result[j++] = state_str[vlan_cfg.pri_only2];
+
+ hclge_dbg_fill_content(content, sizeof(content),
+ vlan_offload_items, result,
+ ARRAY_SIZE(vlan_offload_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
}
- spin_unlock_bh(&vport->mac_list_lock);
+ return 0;
+}
+
+static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+}
+
+static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+ u32 sw_cfg = ptp->ptp_cfg;
+ unsigned int tx_start;
+ unsigned int last_rx;
+ int pos = 0;
+ u32 hw_cfg;
+ int ret;
+
+ pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
+ ptp->info.name);
+ pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
+ "yes" : "no");
+ pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
+ "yes" : "no");
+ pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
+ "yes" : "no");
+
+ last_rx = jiffies_to_msecs(ptp->last_rx);
+ pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+
+ tx_start = jiffies_to_msecs(ptp->tx_start);
+ pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
+ pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
+ ptp->tx_skipped);
+ pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
+ ptp->tx_timeout);
+ pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
+ ptp->last_tx_seqid);
+
+ ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
+ sw_cfg, hw_cfg);
+
+ pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
return 0;
}
-int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
+static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
{
-#define DUMP_REG "dump reg"
-#define DUMP_TM_MAP "dump tm map"
-#define DUMP_LOOPBACK "dump loopback"
-#define DUMP_INTERRUPT "dump intr"
+ hclge_dbg_dump_mac_list(hdev, buf, len, true);
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
+ return 0;
+}
- if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
- hclge_dbg_fd_tcam(hdev);
- } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
- hclge_dbg_dump_tc(hdev);
- } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
- hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
- } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
- hclge_dbg_dump_tm(hdev);
- } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
- hclge_dbg_dump_qos_pause_cfg(hdev);
- } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
- hclge_dbg_dump_qos_pri_map(hdev);
- } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
- hclge_dbg_dump_qos_buf_cfg(hdev);
- } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
- hclge_dbg_dump_mng_table(hdev);
- } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
- hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
- } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
- hclge_dbg_dump_rst_info(hdev);
- } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
- hclge_dbg_dump_serv_info(hdev);
- } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
- hclge_dbg_get_m7_stats_info(hdev);
- } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
- hclge_dbg_dump_ncl_config(hdev,
- &cmd_buf[sizeof("dump ncl_config")]);
- } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
- hclge_dbg_dump_mac_tnl_status(hdev);
- } else if (strncmp(cmd_buf, DUMP_LOOPBACK,
- strlen(DUMP_LOOPBACK)) == 0) {
- hclge_dbg_dump_loopback(hdev);
- } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
- hclge_dbg_dump_qs_shaper(hdev,
- &cmd_buf[sizeof("dump qs shaper")]);
- } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
- hclge_dbg_dump_mac_list(hdev,
- &cmd_buf[sizeof("dump uc mac list")],
- true);
- } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
- hclge_dbg_dump_mac_list(hdev,
- &cmd_buf[sizeof("dump mc mac list")],
- false);
- } else if (strncmp(cmd_buf, DUMP_INTERRUPT,
- strlen(DUMP_INTERRUPT)) == 0) {
- hclge_dbg_dump_interrupt(hdev);
- } else {
- dev_info(&hdev->pdev->dev, "unknown command\n");
- return -EINVAL;
- }
+static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
+{
+ hclge_dbg_dump_mac_list(hdev, buf, len, false);
return 0;
}
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
+static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
+ {
+ .cmd = HNAE3_DBG_CMD_TM_NODES,
+ .dbg_dump = hclge_dbg_dump_tm_nodes,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PRI,
+ .dbg_dump = hclge_dbg_dump_tm_pri,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_QSET,
+ .dbg_dump = hclge_dbg_dump_tm_qset,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_MAP,
+ .dbg_dump = hclge_dbg_dump_tm_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PG,
+ .dbg_dump = hclge_dbg_dump_tm_pg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PORT,
+ .dbg_dump = hclge_dbg_dump_tm_port,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
+ .dbg_dump = hclge_dbg_dump_tc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
+ .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_pri_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
+ .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_UC,
+ .dbg_dump = hclge_dbg_dump_mac_uc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_MC,
+ .dbg_dump = hclge_dbg_dump_mac_mc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MNG_TBL,
+ .dbg_dump = hclge_dbg_dump_mng_table,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_LOOPBACK,
+ .dbg_dump = hclge_dbg_dump_loopback,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_PTP_INFO,
+ .dbg_dump = hclge_dbg_dump_ptp_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
+ .dbg_dump = hclge_dbg_dump_interrupt,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_RESET_INFO,
+ .dbg_dump = hclge_dbg_dump_rst_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_IMP_INFO,
+ .dbg_dump = hclge_dbg_get_imp_stats_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
+ .dbg_dump = hclge_dbg_dump_ncl_config,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_SSU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RPU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_NCSI,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RTC,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_PPP,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RCB,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_TQP,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_MAC,
+ .dbg_dump = hclge_dbg_dump_mac,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_DCB,
+ .dbg_dump = hclge_dbg_dump_dcb,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_FD_TCAM,
+ .dbg_dump = hclge_dbg_dump_fd_tcam,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
+ .dbg_dump = hclge_dbg_dump_mac_tnl_status,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_SERV_INFO,
+ .dbg_dump = hclge_dbg_dump_serv_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
+ .dbg_dump = hclge_dbg_dump_vlan_config,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_FD_COUNTER,
+ .dbg_dump = hclge_dbg_dump_fd_counter,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_UMV_INFO,
+ .dbg_dump = hclge_dbg_dump_umv_info,
+ },
+};
+
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
char *buf, int len)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ const struct hclge_dbg_func *cmd_func;
struct hclge_dev *hdev = vport->back;
+ u32 i;
- if (strncmp(cmd_buf, HNAE3_DBG_TM_NODES,
- strlen(HNAE3_DBG_TM_NODES)) == 0)
- return hclge_dbg_dump_tm_nodes(hdev, buf, len);
- else if (strncmp(cmd_buf, HNAE3_DBG_TM_PRI,
- strlen(HNAE3_DBG_TM_PRI)) == 0)
- return hclge_dbg_dump_tm_pri(hdev, buf, len);
- else if (strncmp(cmd_buf, HNAE3_DBG_TM_QSET,
- strlen(HNAE3_DBG_TM_QSET)) == 0)
- return hclge_dbg_dump_tm_qset(hdev, buf, len);
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
+ if (cmd == hclge_dbg_cmd_func[i].cmd) {
+ cmd_func = &hclge_dbg_cmd_func[i];
+ if (cmd_func->dbg_dump)
+ return cmd_func->dbg_dump(hdev, buf, len);
+ else
+ return cmd_func->dbg_dump_reg(hdev, cmd, buf,
+ len);
+ }
+ }
+ dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index ca2ab6cf84d9..c526591a7240 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include "hclge_cmd.h"
-#define HCLGE_DBG_BUF_LEN 256
#define HCLGE_DBG_MNG_TBL_MAX 64
#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0)
@@ -70,6 +69,11 @@ struct hclge_dbg_reg_common_msg {
enum hclge_opcode_type cmd;
};
+struct hclge_dbg_tcam_msg {
+ u8 stage;
+ u32 loc;
+};
+
#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
struct hclge_dbg_dfx_message {
int flag;
@@ -78,11 +82,18 @@ struct hclge_dbg_dfx_message {
#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
struct hclge_dbg_reg_type_info {
- const char *reg_type;
+ enum hnae3_dbg_cmd cmd;
const struct hclge_dbg_dfx_message *dfx_msg;
struct hclge_dbg_reg_common_msg reg_msg;
};
+struct hclge_dbg_func {
+ enum hnae3_dbg_cmd cmd;
+ int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
+ int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd,
+ char *buf, int len);
+};
+
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
@@ -723,4 +734,36 @@ static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
{true, "RCB_CFG_TX_RING_EBDNUM"},
};
+#define HCLGE_DBG_INFO_LEN 256
+#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256
+#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512
+#define HCLGE_DBG_ID_LEN 16
+#define HCLGE_DBG_ITEM_NAME_LEN 32
+#define HCLGE_DBG_DATA_STR_LEN 32
+#define HCLGE_DBG_TM_INFO_LEN 256
+
+#define HCLGE_BILLION_NANO_SECONDS 1000000000
+
+struct hclge_dbg_item {
+ char name[HCLGE_DBG_ITEM_NAME_LEN];
+ u16 interval; /* blank numbers after the item */
+};
+
+struct hclge_dbg_vlan_cfg {
+ u16 pvid;
+ u8 accept_tag1;
+ u8 accept_tag2;
+ u8 accept_untag1;
+ u8 accept_untag2;
+ u8 insert_tag1;
+ u8 insert_tag2;
+ u8 shift_tag;
+ u8 strip_tag1;
+ u8 strip_tag2;
+ u8 drop_tag1;
+ u8 drop_tag2;
+ u8 pri_only1;
+ u8 pri_only2;
+};
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 8223d699cd94..ec9a7f8bc3fe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -631,6 +631,134 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
+static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
+ {
+ .module_id = MODULE_NONE,
+ .msg = "MODULE_NONE"
+ }, {
+ .module_id = MODULE_BIOS_COMMON,
+ .msg = "MODULE_BIOS_COMMON"
+ }, {
+ .module_id = MODULE_GE,
+ .msg = "MODULE_GE"
+ }, {
+ .module_id = MODULE_IGU_EGU,
+ .msg = "MODULE_IGU_EGU"
+ }, {
+ .module_id = MODULE_LGE,
+ .msg = "MODULE_LGE"
+ }, {
+ .module_id = MODULE_NCSI,
+ .msg = "MODULE_NCSI"
+ }, {
+ .module_id = MODULE_PPP,
+ .msg = "MODULE_PPP"
+ }, {
+ .module_id = MODULE_QCN,
+ .msg = "MODULE_QCN"
+ }, {
+ .module_id = MODULE_RCB_RX,
+ .msg = "MODULE_RCB_RX"
+ }, {
+ .module_id = MODULE_RTC,
+ .msg = "MODULE_RTC"
+ }, {
+ .module_id = MODULE_SSU,
+ .msg = "MODULE_SSU"
+ }, {
+ .module_id = MODULE_TM,
+ .msg = "MODULE_TM"
+ }, {
+ .module_id = MODULE_RCB_TX,
+ .msg = "MODULE_RCB_TX"
+ }, {
+ .module_id = MODULE_TXDMA,
+ .msg = "MODULE_TXDMA"
+ }, {
+ .module_id = MODULE_MASTER,
+ .msg = "MODULE_MASTER"
+ }, {
+ .module_id = MODULE_ROCEE_TOP,
+ .msg = "MODULE_ROCEE_TOP"
+ }, {
+ .module_id = MODULE_ROCEE_TIMER,
+ .msg = "MODULE_ROCEE_TIMER"
+ }, {
+ .module_id = MODULE_ROCEE_MDB,
+ .msg = "MODULE_ROCEE_MDB"
+ }, {
+ .module_id = MODULE_ROCEE_TSP,
+ .msg = "MODULE_ROCEE_TSP"
+ }, {
+ .module_id = MODULE_ROCEE_TRP,
+ .msg = "MODULE_ROCEE_TRP"
+ }, {
+ .module_id = MODULE_ROCEE_SCC,
+ .msg = "MODULE_ROCEE_SCC"
+ }, {
+ .module_id = MODULE_ROCEE_CAEP,
+ .msg = "MODULE_ROCEE_CAEP"
+ }, {
+ .module_id = MODULE_ROCEE_GEN_AC,
+ .msg = "MODULE_ROCEE_GEN_AC"
+ }, {
+ .module_id = MODULE_ROCEE_QMM,
+ .msg = "MODULE_ROCEE_QMM"
+ }, {
+ .module_id = MODULE_ROCEE_LSAN,
+ .msg = "MODULE_ROCEE_LSAN"
+ }
+};
+
+static const struct hclge_hw_type_id hclge_hw_type_id_st[] = {
+ {
+ .type_id = NONE_ERROR,
+ .msg = "none_error"
+ }, {
+ .type_id = FIFO_ERROR,
+ .msg = "fifo_error"
+ }, {
+ .type_id = MEMORY_ERROR,
+ .msg = "memory_error"
+ }, {
+ .type_id = POISON_ERROR,
+ .msg = "poison_error"
+ }, {
+ .type_id = MSIX_ECC_ERROR,
+ .msg = "msix_ecc_error"
+ }, {
+ .type_id = TQP_INT_ECC_ERROR,
+ .msg = "tqp_int_ecc_error"
+ }, {
+ .type_id = PF_ABNORMAL_INT_ERROR,
+ .msg = "pf_abnormal_int_error"
+ }, {
+ .type_id = MPF_ABNORMAL_INT_ERROR,
+ .msg = "mpf_abnormal_int_error"
+ }, {
+ .type_id = COMMON_ERROR,
+ .msg = "common_error"
+ }, {
+ .type_id = PORT_ERROR,
+ .msg = "port_error"
+ }, {
+ .type_id = ETS_ERROR,
+ .msg = "ets_error"
+ }, {
+ .type_id = NCSI_ERROR,
+ .msg = "ncsi_error"
+ }, {
+ .type_id = GLB_ERROR,
+ .msg = "glb_error"
+ }, {
+ .type_id = ROCEE_NORMAL_ERR,
+ .msg = "rocee_normal_error"
+ }, {
+ .type_id = ROCEE_OVF_ERR,
+ .msg = "rocee_ovf_error"
+ }
+};
+
static void hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts, unsigned long *reset_requests)
@@ -1611,11 +1739,27 @@ static const struct hclge_hw_blk hw_blk[] = {
{ /* sentinel */ }
};
+static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+
+ if (enable)
+ reg_val |= BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B);
+ else
+ reg_val &= ~BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B);
+
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+}
+
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state)
{
const struct hclge_hw_blk *module = hw_blk;
int ret = 0;
+ hclge_config_all_msix_error(hdev, state);
+
while (module->name) {
if (module->config_err_int) {
ret = module->config_err_int(hdev, state);
@@ -1876,11 +2020,8 @@ static int hclge_handle_pf_msix_error(struct hclge_dev *hdev,
static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
- struct hclge_mac_tnl_stats mac_tnl_stats;
- struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
struct hclge_desc *desc;
- u32 status;
int ret;
/* query the number of bds for the MSIx int status */
@@ -1903,16 +2044,45 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
if (ret)
goto msi_error;
+ ret = hclge_handle_mac_tnl(hdev);
+
+msi_error:
+ kfree(desc);
+out:
+ return ret;
+}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "failed to handle msix error during dev init\n");
+ return -EAGAIN;
+ }
+
+ return hclge_handle_all_hw_msix_error(hdev, reset_requests);
+}
+
+int hclge_handle_mac_tnl(struct hclge_dev *hdev)
+{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ u32 status;
+ int ret;
+
/* query and clear mac tnl interruptions */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_TNL_INT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
- goto msi_error;
+ dev_err(dev, "failed to query mac tnl int, ret = %d.\n", ret);
+ return ret;
}
- status = le32_to_cpu(desc->data[0]);
+ status = le32_to_cpu(desc.data[0]);
if (status) {
/* When mac tnl interrupt occurs, we record current time and
* register status here in a fifo, then clear the status. So
@@ -1924,33 +2094,15 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
ret = hclge_clear_mac_tnl_int(hdev);
if (ret)
- dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
+ dev_err(dev, "failed to clear mac tnl int, ret = %d.\n",
+ ret);
}
-msi_error:
- kfree(desc);
-out:
return ret;
}
-int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
- unsigned long *reset_requests)
-{
- struct device *dev = &hdev->pdev->dev;
-
- if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
- dev_err(dev,
- "Can't handle - MSIx error reported during dev init\n");
- return 0;
- }
-
- return hclge_handle_all_hw_msix_error(hdev, reset_requests);
-}
-
void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGE_DESC_NO_DATA_LEN 8
-
struct hclge_dev *hdev = ae_dev->priv;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
@@ -1999,3 +2151,207 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
msi_error:
kfree(desc);
}
+
+bool hclge_find_error_source(struct hclge_dev *hdev)
+{
+ u32 msix_src_flag, hw_err_src_flag;
+
+ msix_src_flag = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_VECTOR0_REG_MSIX_MASK;
+
+ hw_err_src_flag = hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG) &
+ HCLGE_RAS_REG_ERR_MASK;
+
+ return msix_src_flag || hw_err_src_flag;
+}
+
+void hclge_handle_occurred_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ if (hclge_find_error_source(hdev))
+ hclge_handle_error_info_log(ae_dev);
+}
+
+static void
+hclge_handle_error_type_reg_log(struct device *dev,
+ struct hclge_mod_err_info *mod_info,
+ struct hclge_type_reg_err_info *type_reg_info)
+{
+#define HCLGE_ERR_TYPE_MASK 0x7F
+#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7
+
+ u8 mod_id, total_module, type_id, total_type, i, is_ras;
+ u8 index_module = MODULE_NONE;
+ u8 index_type = NONE_ERROR;
+
+ mod_id = mod_info->mod_id;
+ type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK;
+ is_ras = type_reg_info->type_id >> HCLGE_ERR_TYPE_IS_RAS_OFFSET;
+
+ total_module = ARRAY_SIZE(hclge_hw_module_id_st);
+ total_type = ARRAY_SIZE(hclge_hw_type_id_st);
+
+ for (i = 0; i < total_module; i++) {
+ if (mod_id == hclge_hw_module_id_st[i].module_id) {
+ index_module = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < total_type; i++) {
+ if (type_id == hclge_hw_type_id_st[i].type_id) {
+ index_type = i;
+ break;
+ }
+ }
+
+ if (index_module != MODULE_NONE && index_type != NONE_ERROR)
+ dev_err(dev,
+ "found %s %s, is %s error.\n",
+ hclge_hw_module_id_st[index_module].msg,
+ hclge_hw_type_id_st[index_type].msg,
+ is_ras ? "ras" : "msix");
+ else
+ dev_err(dev,
+ "unknown module[%u] or type[%u].\n", mod_id, type_id);
+
+ dev_err(dev, "reg_value:\n");
+ for (i = 0; i < type_reg_info->reg_num; i++)
+ dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
+}
+
+static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
+ const u32 *buf, u32 buf_size)
+{
+ struct hclge_type_reg_err_info *type_reg_info;
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_mod_err_info *mod_info;
+ struct hclge_sum_err_info *sum_info;
+ u8 mod_num, err_num, i;
+ u32 offset = 0;
+
+ sum_info = (struct hclge_sum_err_info *)&buf[offset++];
+ if (sum_info->reset_type &&
+ sum_info->reset_type != HNAE3_NONE_RESET)
+ set_bit(sum_info->reset_type, &ae_dev->hw_err_reset_req);
+ mod_num = sum_info->mod_num;
+
+ while (mod_num--) {
+ if (offset >= buf_size) {
+ dev_err(dev, "The offset(%u) exceeds buf's size(%u).\n",
+ offset, buf_size);
+ return;
+ }
+ mod_info = (struct hclge_mod_err_info *)&buf[offset++];
+ err_num = mod_info->err_num;
+
+ for (i = 0; i < err_num; i++) {
+ if (offset >= buf_size) {
+ dev_err(dev,
+ "The offset(%u) exceeds buf size(%u).\n",
+ offset, buf_size);
+ return;
+ }
+
+ type_reg_info = (struct hclge_type_reg_err_info *)
+ &buf[offset++];
+ hclge_handle_error_type_reg_log(dev, mod_info,
+ type_reg_info);
+
+ offset += type_reg_info->reg_num;
+ }
+ }
+}
+
+static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_bd;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_ALL_ERR_BD_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "failed to query error bd_num, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *bd_num = le32_to_cpu(desc_bd.data[0]);
+ if (!(*bd_num)) {
+ dev_err(dev, "The value of bd_num is 0!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_query_all_err_info(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_QUERY_ALL_ERR_INFO, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(dev, "failed to query error info, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev)
+{
+ u32 bd_num, desc_len, buf_len, buf_size, i;
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_desc *desc;
+ __le32 *desc_data;
+ u32 *buf;
+ int ret;
+
+ ret = hclge_query_all_err_bd_num(hdev, &bd_num);
+ if (ret)
+ goto out;
+
+ desc_len = bd_num * sizeof(struct hclge_desc);
+ desc = kzalloc(desc_len, GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hclge_query_all_err_info(hdev, desc, bd_num);
+ if (ret)
+ goto err_desc;
+
+ buf_len = bd_num * sizeof(struct hclge_desc) - HCLGE_DESC_NO_DATA_LEN;
+ buf_size = buf_len / sizeof(u32);
+
+ desc_data = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_data) {
+ ret = -ENOMEM;
+ goto err_desc;
+ }
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_buf_alloc;
+ }
+
+ memcpy(desc_data, &desc[0].data[0], buf_len);
+ for (i = 0; i < buf_size; i++)
+ buf[i] = le32_to_cpu(desc_data[i]);
+
+ hclge_handle_error_module_log(ae_dev, buf, buf_size);
+ kfree(buf);
+
+err_buf_alloc:
+ kfree(desc_data);
+err_desc:
+ kfree(desc);
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index d647f3c84134..07987fb8332e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -15,6 +15,8 @@
#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
+#define HCLGE_RAS_REG_ERR_MASK \
+ (HCLGE_RAS_REG_NFE_MASK | HCLGE_RAS_REG_ROCEE_ERR_MASK)
#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
@@ -107,6 +109,10 @@
#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F
+#define HCLGE_DESC_DATA_MAX 8
+#define HCLGE_REG_NUM_MAX 256
+#define HCLGE_DESC_NO_DATA_LEN 8
+
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
HCLGE_ERR_INT_RAS_CE = 1,
@@ -114,6 +120,56 @@ enum hclge_err_int_type {
HCLGE_ERR_INT_RAS_FE = 3,
};
+enum hclge_mod_name_list {
+ MODULE_NONE = 0,
+ MODULE_BIOS_COMMON = 1,
+ MODULE_GE = 2,
+ MODULE_IGU_EGU = 3,
+ MODULE_LGE = 4,
+ MODULE_NCSI = 5,
+ MODULE_PPP = 6,
+ MODULE_QCN = 7,
+ MODULE_RCB_RX = 8,
+ MODULE_RTC = 9,
+ MODULE_SSU = 10,
+ MODULE_TM = 11,
+ MODULE_RCB_TX = 12,
+ MODULE_TXDMA = 13,
+ MODULE_MASTER = 14,
+ /* add new MODULE NAME for NIC here in order */
+ MODULE_ROCEE_TOP = 40,
+ MODULE_ROCEE_TIMER = 41,
+ MODULE_ROCEE_MDB = 42,
+ MODULE_ROCEE_TSP = 43,
+ MODULE_ROCEE_TRP = 44,
+ MODULE_ROCEE_SCC = 45,
+ MODULE_ROCEE_CAEP = 46,
+ MODULE_ROCEE_GEN_AC = 47,
+ MODULE_ROCEE_QMM = 48,
+ MODULE_ROCEE_LSAN = 49,
+ /* add new MODULE NAME for RoCEE here in order */
+};
+
+enum hclge_err_type_list {
+ NONE_ERROR = 0,
+ FIFO_ERROR = 1,
+ MEMORY_ERROR = 2,
+ POISON_ERROR = 3,
+ MSIX_ECC_ERROR = 4,
+ TQP_INT_ECC_ERROR = 5,
+ PF_ABNORMAL_INT_ERROR = 6,
+ MPF_ABNORMAL_INT_ERROR = 7,
+ COMMON_ERROR = 8,
+ PORT_ERROR = 9,
+ ETS_ERROR = 10,
+ NCSI_ERROR = 11,
+ GLB_ERROR = 12,
+ /* add new ERROR TYPE for NIC here in order */
+ ROCEE_NORMAL_ERR = 40,
+ ROCEE_OVF_ERR = 41,
+ /* add new ERROR TYPE for ROCEE here in order */
+};
+
struct hclge_hw_blk {
u32 msk;
const char *name;
@@ -126,11 +182,44 @@ struct hclge_hw_error {
enum hnae3_reset_type reset_level;
};
+struct hclge_hw_module_id {
+ enum hclge_mod_name_list module_id;
+ const char *msg;
+};
+
+struct hclge_hw_type_id {
+ enum hclge_err_type_list type_id;
+ const char *msg;
+};
+
+struct hclge_sum_err_info {
+ u8 reset_type;
+ u8 mod_num;
+ u8 rsv[2];
+};
+
+struct hclge_mod_err_info {
+ u8 mod_id;
+ u8 err_num;
+ u8 rsv[2];
+};
+
+struct hclge_type_reg_err_info {
+ u8 type_id;
+ u8 reg_num;
+ u8 rsv[2];
+ u32 hclge_reg[HCLGE_REG_NUM_MAX];
+};
+
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev);
+bool hclge_find_error_source(struct hclge_dev *hdev);
+void hclge_handle_occurred_error(struct hclge_dev *hdev);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);
+int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev);
+int hclge_handle_mac_tnl(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 6304aed49f22..dd3354a57c62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1279,6 +1279,7 @@ static u32 hclge_get_max_speed(u16 speed_ability)
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
+#define HCLGE_TX_SPARE_SIZE_UNIT 4096
#define SPEED_ABILITY_EXT_SHIFT 8
struct hclge_cfg_param_cmd *req;
@@ -1334,6 +1335,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
HCLGE_CFG_SPEED_ABILITY_EXT_S);
cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
+ cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_VLAN_FLTR_CAP_M,
+ HCLGE_CFG_VLAN_FLTR_CAP_S);
+
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
@@ -1354,6 +1359,15 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1U << cfg->pf_rss_size_max :
cfg->vf_rss_size_max;
+
+ /* The unit of the tx spare buffer size queried from configuration
+ * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
+ * needed here.
+ */
+ cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
+ HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
+ HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
+ cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1513,6 +1527,7 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_cfg cfg;
unsigned int i;
int ret;
@@ -1534,6 +1549,9 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space;
+ hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
+ if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
if (hnae3_dev_fd_supported(hdev)) {
hdev->fd_en = true;
@@ -1729,6 +1747,7 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
kinfo->num_rx_desc = num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
+ kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
@@ -1843,6 +1862,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
+ vport->req_vlan_fltr_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
@@ -2835,6 +2855,14 @@ static void hclge_reset_task_schedule(struct hclge_dev *hdev)
hclge_wq, &hdev->service_task, 0);
}
+static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
+}
+
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
@@ -3291,11 +3319,13 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 cmdq_src_reg, msix_src_reg;
+ u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
/* fetch the events from their corresponding regs */
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+ hw_err_src_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -3323,10 +3353,15 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- /* check for vector0 msix event source */
- if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- *clearval = msix_src_reg;
+ /* check for vector0 msix event and hardware error event source */
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
+ hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
return HCLGE_VECTOR0_EVENT_ERR;
+
+ /* check for vector0 ptp event source */
+ if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
+ *clearval = msix_src_reg;
+ return HCLGE_VECTOR0_EVENT_PTP;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
@@ -3338,9 +3373,8 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* print other vector0 event source */
dev_info(&hdev->pdev->dev,
- "CMDQ INT status:0x%x, other INT status:0x%x\n",
- cmdq_src_reg, msix_src_reg);
- *clearval = msix_src_reg;
+ "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
+ cmdq_src_reg, hw_err_src_reg, msix_src_reg);
return HCLGE_VECTOR0_EVENT_OTHER;
}
@@ -3349,6 +3383,7 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_PTP:
case HCLGE_VECTOR0_EVENT_RST:
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
@@ -3377,6 +3412,7 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ unsigned long flags;
u32 clearval = 0;
u32 event_cause;
@@ -3386,21 +3422,16 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
/* vector 0 interrupt is shared with reset and mailbox source events.*/
switch (event_cause) {
case HCLGE_VECTOR0_EVENT_ERR:
- /* we do not know what type of reset is required now. This could
- * only be decided after we fetch the type of errors which
- * caused this event. Therefore, we will do below for now:
- * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
- * have defered type of reset to be used.
- * 2. Schedule the reset service task.
- * 3. When service task receives HNAE3_UNKNOWN_RESET type it
- * will fetch the correct type of reset. This would be done
- * by first decoding the types of errors.
- */
- set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
- fallthrough;
+ hclge_errhand_task_schedule(hdev);
+ break;
case HCLGE_VECTOR0_EVENT_RST:
hclge_reset_task_schedule(hdev);
break;
+ case HCLGE_VECTOR0_EVENT_PTP:
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ hclge_ptp_clean_tx_hwts(hdev);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+ break;
case HCLGE_VECTOR0_EVENT_MBX:
/* If we are here then,
* 1. Either we are not handling any mbx task and we are not
@@ -3421,15 +3452,11 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
hclge_clear_event_cause(hdev, event_cause, clearval);
- /* Enable interrupt if it is not cause by reset. And when
- * clearval equal to 0, it means interrupt status may be
- * cleared by hardware before driver reads status register.
- * For this case, vector0 interrupt also should be enabled.
- */
- if (!clearval ||
- event_cause == HCLGE_VECTOR0_EVENT_MBX) {
+ /* Enable interrupt if it is not caused by reset event or error event */
+ if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
+ event_cause == HCLGE_VECTOR0_EVENT_MBX ||
+ event_cause == HCLGE_VECTOR0_EVENT_OTHER)
hclge_enable_vector(&hdev->misc_vector, true);
- }
return IRQ_HANDLED;
}
@@ -3786,28 +3813,6 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
- /* first, resolve any unknown reset type to the known type(s) */
- if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
- u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
- HCLGE_MISC_VECTOR_INT_STS);
- /* we will intentionally ignore any errors from this function
- * as we will end up in *some* reset request in any case
- */
- if (hclge_handle_hw_msix_error(hdev, addr))
- dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
- msix_sts_reg);
-
- clear_bit(HNAE3_UNKNOWN_RESET, addr);
- /* We defered the clearing of the error event which caused
- * interrupt since it was not posssible to do that in
- * interrupt context (and this is the reason we introduced
- * new UNKNOWN reset type). Now, the errors have been
- * handled and cleared in hardware we can safely enable
- * interrupts. This is an exception to the norm.
- */
- hclge_enable_vector(&hdev->misc_vector, true);
- }
-
/* return the highest priority reset level amongst all */
if (test_bit(HNAE3_IMP_RESET, addr)) {
rst_level = HNAE3_IMP_RESET;
@@ -3936,6 +3941,21 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
return ret;
}
+static void hclge_show_rst_info(struct hclge_dev *hdev)
+{
+ char *buf;
+
+ buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
+
+ dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
+
+ kfree(buf);
+}
+
static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
@@ -3966,7 +3986,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
dev_err(&hdev->pdev->dev, "Reset fail!\n");
- hclge_dbg_dump_rst_info(hdev);
+ hclge_show_rst_info(hdev);
set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
@@ -4241,6 +4261,68 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
hdev->reset_type = HNAE3_NONE_RESET;
}
+static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_type;
+
+ if (ae_dev->hw_err_reset_req) {
+ reset_type = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_type);
+ }
+
+ if (hdev->default_reset_request && ae_dev->ops->reset_event)
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+
+ /* enable interrupt after error handling complete */
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
+static void hclge_handle_err_recovery(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->hw_err_reset_req = 0;
+
+ if (hclge_find_error_source(hdev)) {
+ hclge_handle_error_info_log(ae_dev);
+ hclge_handle_mac_tnl(hdev);
+ }
+
+ hclge_handle_err_reset_request(hdev);
+}
+
+static void hclge_misc_err_recovery(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct device *dev = &hdev->pdev->dev;
+ u32 msix_sts_reg;
+
+ msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+ if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
+ if (hclge_handle_hw_msix_error
+ (hdev, &hdev->default_reset_request))
+ dev_info(dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+ }
+
+ hclge_handle_hw_ras_error(ae_dev);
+
+ hclge_handle_err_reset_request(hdev);
+}
+
+static void hclge_errhand_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
+ return;
+
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_err_recovery(hdev);
+ else
+ hclge_misc_err_recovery(hdev);
+}
+
static void hclge_reset_service_task(struct hclge_dev *hdev)
{
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
@@ -4319,19 +4401,43 @@ out:
hclge_task_schedule(hdev, delta);
}
+static void hclge_ptp_service_task(struct hclge_dev *hdev)
+{
+ unsigned long flags;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
+ !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
+ !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
+ return;
+
+ /* to prevent concurrence with the irq handler */
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+
+ /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
+ * handler may handle it just before spin_lock_irqsave().
+ */
+ if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
+ hclge_ptp_clean_tx_hwts(hdev);
+
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task.work);
+ hclge_errhand_service_task(hdev);
hclge_reset_service_task(hdev);
+ hclge_ptp_service_task(hdev);
hclge_mailbox_service_task(hdev);
hclge_periodic_service_task(hdev);
- /* Handle reset and mbx again in case periodical task delays the
- * handling by calling hclge_task_schedule() in
+ /* Handle error recovery, reset and mbx again in case periodical task
+ * delays the handling by calling hclge_task_schedule() in
* hclge_periodic_service_task().
*/
+ hclge_errhand_service_task(hdev);
hclge_reset_service_task(hdev);
hclge_mailbox_service_task(hdev);
}
@@ -5168,9 +5274,8 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
}
static void hclge_sync_fd_state(struct hclge_dev *hdev)
@@ -5895,8 +6000,14 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
ad_data.queue_id = rule->queue_id;
}
- ad_data.use_counter = false;
- ad_data.counter_id = 0;
+ if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
+ ad_data.use_counter = true;
+ ad_data.counter_id = rule->vf_id %
+ hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
+ } else {
+ ad_data.use_counter = false;
+ ad_data.counter_id = 0;
+ }
ad_data.use_next_stage = false;
ad_data.next_input_key = 0;
@@ -8035,6 +8146,7 @@ int hclge_vport_start(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_active_jiffies = jiffies;
if (test_bit(vport->vport_id, hdev->vport_config_block)) {
@@ -8776,8 +8888,7 @@ static bool hclge_sync_from_add_list(struct list_head *add_list,
kfree(mac_node);
} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
mac_node->state = HCLGE_MAC_TO_DEL;
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, mac_list);
+ list_move_tail(&mac_node->node, mac_list);
} else {
list_del(&mac_node->node);
kfree(mac_node);
@@ -8806,8 +8917,7 @@ static void hclge_sync_from_del_list(struct list_head *del_list,
list_del(&mac_node->node);
kfree(mac_node);
} else {
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, mac_list);
+ list_move_tail(&mac_node->node, mac_list);
}
}
}
@@ -8851,8 +8961,7 @@ static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, &tmp_del_list);
+ list_move_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
@@ -8934,8 +9043,7 @@ static void hclge_build_del_list(struct list_head *list,
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
- list_del(&mac_cfg->node);
- list_add_tail(&mac_cfg->node, tmp_del_list);
+ list_move_tail(&mac_cfg->node, tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
if (is_del_list) {
@@ -9030,8 +9138,7 @@ static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, &tmp_del_list);
+ list_move_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
list_del(&mac_node->node);
@@ -9360,12 +9467,41 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hdev->hw.mac.phydev)
- return hclge_mii_ioctl(hdev, ifr, cmd);
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return hclge_ptp_get_cfg(hdev, ifr);
+ case SIOCSHWTSTAMP:
+ return hclge_ptp_set_cfg(hdev, ifr);
+ default:
+ if (!hdev->hw.mac.phydev)
+ return hclge_mii_ioctl(hdev, ifr, cmd);
+ }
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
+static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
+ bool bypass_en)
+{
+ struct hclge_port_vlan_filter_bypass_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
+ req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
+ req->vf_id = vf_id;
+ hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
+ bypass_en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
+}
+
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
u8 fe_type, bool filter_en, u8 vf_id)
{
@@ -9399,37 +9535,99 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
return ret;
}
-#define HCLGE_FILTER_TYPE_VF 0
-#define HCLGE_FILTER_TYPE_PORT 1
-#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
-#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
-#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
-#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
-#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
-#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
- | HCLGE_FILTER_FE_ROCE_EGRESS_B)
-#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
- | HCLGE_FILTER_FE_ROCE_INGRESS_B)
+static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ enable, vport->vport_id);
+
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable,
+ vport->vport_id);
+ if (ret)
+ return ret;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
+ ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
+ !enable);
+ else if (!vport->vport_id)
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS,
+ enable, 0);
+
+ return ret;
+}
-static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS, enable, 0);
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, enable, 0);
- } else {
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B, enable,
- 0);
+ if (vport->vport_id) {
+ if (vport->port_base_vlan_cfg.state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return true;
+
+ if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
+ return false;
+ } else if (handle->netdev_flags & HNAE3_USER_UPE) {
+ return false;
}
- if (enable)
- handle->netdev_flags |= HNAE3_VLAN_FLTR;
- else
- handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
+
+ if (!vport->req_vlan_fltr_en)
+ return false;
+
+ /* compatible with former device, always enable vlan filter */
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
+ return true;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
+ if (vlan->vlan_id != 0)
+ return true;
+
+ return false;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool need_en;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ vport->req_vlan_fltr_en = request_en;
+
+ need_en = hclge_need_enable_vport_vlan_filter(vport);
+ if (need_en == vport->cur_vlan_fltr_en) {
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ ret = hclge_set_vport_vlan_filter(vport, need_en);
+ if (ret) {
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+ }
+
+ vport->cur_vlan_fltr_en = need_en;
+
+ mutex_unlock(&hdev->vport_lock);
+
+ return 0;
+}
+
+static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_enable_vport_vlan_filter(vport, enable);
}
static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
@@ -9709,7 +9907,7 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
u16 port_base_vlan_state,
- u16 vlan_tag)
+ u16 vlan_tag, u8 qos)
{
int ret;
@@ -9723,7 +9921,8 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
vport->txvlan_cfg.accept_tag1 =
ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
vport->txvlan_cfg.insert_tag1_en = true;
- vport->txvlan_cfg.default_tag1 = vlan_tag;
+ vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
+ vlan_tag;
}
vport->txvlan_cfg.accept_untag1 = true;
@@ -9822,6 +10021,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
vport->vport_id);
if (ret)
return ret;
+ vport->cur_vlan_fltr_en = true;
}
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
@@ -9837,8 +10037,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
}
- handle->netdev_flags |= HNAE3_VLAN_FLTR;
-
hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
@@ -9852,13 +10050,15 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_alloc_vport; i++) {
u16 vlan_tag;
+ u8 qos;
vport = &hdev->vport[i];
vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ qos = vport->port_base_vlan_cfg.vlan_info.qos;
ret = hclge_vlan_offload_cfg(vport,
vport->port_base_vlan_cfg.state,
- vlan_tag);
+ vlan_tag, qos);
if (ret)
return ret;
}
@@ -10033,7 +10233,6 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
hclge_restore_mac_table_common(vport);
hclge_restore_vport_vlan_table(vport);
- set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle);
}
@@ -10060,6 +10259,14 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport);
}
+static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
+}
+
static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
u16 port_base_vlan_state,
struct hclge_vlan_info *new_info,
@@ -10070,6 +10277,10 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
hclge_rm_vport_all_vlan_table(vport, false);
+ /* force clear VLAN 0 */
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter_hw(hdev,
htons(new_info->vlan_proto),
vport->vport_id,
@@ -10077,6 +10288,11 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
false);
}
+ /* force add VLAN 0 */
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
+ if (ret)
+ return ret;
+
ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
vport->vport_id, old_info->vlan_tag,
true);
@@ -10086,6 +10302,18 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
return hclge_add_vport_all_vlan_table(vport);
}
+static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
+ const struct hclge_vlan_info *old_cfg)
+{
+ if (new_cfg->vlan_tag != old_cfg->vlan_tag)
+ return true;
+
+ if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
+ return true;
+
+ return false;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
@@ -10096,10 +10324,14 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
- ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
+ vlan_info->qos);
if (ret)
return ret;
+ if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
+ goto out;
+
if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
/* add new VLAN tag */
ret = hclge_set_vlan_filter_hw(hdev,
@@ -10111,15 +10343,23 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
return ret;
/* remove old VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(old_vlan_info->vlan_proto),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret)
+ if (old_vlan_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_vlan_info->vlan_tag,
+ true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_vlan_info->vlan_tag, ret);
return ret;
+ }
- goto update;
+ goto out;
}
ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
@@ -10127,38 +10367,38 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
if (ret)
return ret;
- /* update state only when disable/enable port based VLAN */
+out:
vport->port_base_vlan_cfg.state = state;
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
-update:
- vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
- vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
- vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
+ vport->port_base_vlan_cfg.vlan_info = *vlan_info;
+ hclge_set_vport_vlan_fltr_change(vport);
return 0;
}
static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
enum hnae3_port_base_vlan_state state,
- u16 vlan)
+ u16 vlan, u8 qos)
{
if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
- if (!vlan)
- return HNAE3_PORT_BASE_VLAN_NOCHANGE;
- else
- return HNAE3_PORT_BASE_VLAN_ENABLE;
- } else {
- if (!vlan)
- return HNAE3_PORT_BASE_VLAN_DISABLE;
- else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
+ if (!vlan && !qos)
return HNAE3_PORT_BASE_VLAN_NOCHANGE;
- else
- return HNAE3_PORT_BASE_VLAN_MODIFY;
+
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
}
+
+ if (!vlan && !qos)
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
+
+ if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
+ vport->port_base_vlan_cfg.vlan_info.qos == qos)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
}
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
@@ -10186,7 +10426,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
state = hclge_get_port_base_vlan_state(vport,
vport->port_base_vlan_cfg.state,
- vlan);
+ vlan, qos);
if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
return 0;
@@ -10209,8 +10449,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
vport->vport_id, state,
- vlan, qos,
- ntohs(proto));
+ &vlan_info);
return 0;
}
@@ -10280,9 +10519,37 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
*/
set_bit(vlan_id, vport->vlan_del_fail_bmap);
}
+
+ hclge_set_vport_vlan_fltr_change(vport);
+
return ret;
}
+static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state))
+ continue;
+
+ ret = hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to sync vlan filter state for vport%u, ret = %d\n",
+ vport->vport_id, ret);
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state);
+ return;
+ }
+ }
+}
+
static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
{
#define HCLGE_MAX_SYNC_COUNT 60
@@ -10305,6 +10572,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ hclge_set_vport_vlan_fltr_change(vport);
sync_cnt++;
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
@@ -10314,6 +10582,8 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
VLAN_N_VID);
}
}
+
+ hclge_sync_vlan_fltr_state(hdev);
}
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
@@ -10807,6 +11077,8 @@ static void hclge_info_show(struct hclge_dev *hdev)
hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+ dev_info(dev, "Default tx spare buffer size: %u\n",
+ hdev->tx_spare_buf_size);
dev_info(dev, "PF info end.\n");
}
@@ -11167,6 +11439,18 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
}
}
+static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
+}
+
+static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -11309,6 +11593,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_ptp_init(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11325,7 +11613,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_resetting_state(hdev);
/* Log and clear the hw errors those already occurred */
- hclge_handle_all_hns_hw_errors(ae_dev);
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_occurred_error(hdev);
+ else
+ hclge_handle_all_hns_hw_errors(ae_dev);
/* request delayed reset for the error recovery because an immediate
* global reset on a PF affecting pending initialization of other PFs
@@ -11339,6 +11630,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
}
+ hclge_init_rxd_adv_layout(hdev);
+
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
@@ -11471,10 +11764,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
u32 new_trusted = enable ? 1 : 0;
- bool en_bc_pmc;
- int ret;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
@@ -11483,18 +11773,9 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
if (vport->vf_info.trusted == new_trusted)
return 0;
- /* Disable promisc mode for VF if it is not trusted any more. */
- if (!enable && vport->vf_info.promisc_enable) {
- en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
- ret = hclge_set_vport_promisc_mode(vport, false, false,
- en_bc_pmc);
- if (ret)
- return ret;
- vport->vf_info.promisc_enable = 0;
- hclge_inform_vf_promisc_info(vport);
- }
-
vport->vf_info.trusted = new_trusted;
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ hclge_task_schedule(hdev, 0);
return 0;
}
@@ -11687,8 +11968,15 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_ptp_init(hdev);
+ if (ret)
+ return ret;
+
/* Log and clear the hw errors those already occurred */
- hclge_handle_all_hns_hw_errors(ae_dev);
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_occurred_error(hdev);
+ else
+ hclge_handle_all_hns_hw_errors(ae_dev);
/* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset.
@@ -11720,6 +12008,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
return ret;
+ hclge_init_rxd_adv_layout(hdev);
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -11735,6 +12025,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
+ hclge_ptp_uninit(hdev);
+ hclge_uninit_rxd_adv_layout(hdev);
hclge_uninit_mac_table(hdev);
hclge_del_all_fd_entries(hdev);
@@ -12385,21 +12677,50 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
struct hnae3_handle *handle = &vport->nic;
u8 tmp_flags;
int ret;
+ u16 i;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
- set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_promisc_flags = vport->overflow_promisc_flags;
}
- if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
+ if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
tmp_flags & HNAE3_MPE);
if (!ret) {
- clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
- hclge_enable_vlan_filter(handle,
- tmp_flags & HNAE3_VLAN_FLTR);
+ clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state);
+ }
+ }
+
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ bool uc_en = false;
+ bool mc_en = false;
+ bool bc_en;
+
+ vport = &hdev->vport[i];
+
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state))
+ continue;
+
+ if (vport->vf_info.trusted) {
+ uc_en = vport->vf_info.request_uc_en > 0;
+ mc_en = vport->vf_info.request_mc_en > 0;
+ }
+ bc_en = vport->vf_info.request_bc_en > 0;
+
+ ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+ mc_en, bc_en);
+ if (ret) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ return;
}
+ hclge_set_vport_vlan_fltr_change(vport);
}
}
@@ -12578,7 +12899,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
- .dbg_run_cmd = hclge_dbg_run_cmd,
.dbg_read_cmd = hclge_dbg_read_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
@@ -12602,6 +12922,9 @@ static const struct hnae3_ae_ops hclge_ops = {
.cls_flower_active = hclge_is_cls_flower_active,
.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
+ .set_tx_hwts_info = hclge_ptp_set_tx_info,
+ .get_rx_hwts = hclge_ptp_get_rx_hwts,
+ .get_ts_info = hclge_ptp_get_ts_info,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ff1d47308c2d..3d3352491dba 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -10,6 +10,7 @@
#include <linux/kfifo.h>
#include "hclge_cmd.h"
+#include "hclge_ptp.h"
#include "hnae3.h"
#define HCLGE_MOD_VERSION "1.0"
@@ -53,6 +54,7 @@
/* bar registers for common func */
#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
#define HCLGE_GRO_EN_REG 0x28000
+#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008
/* bar registers for rcb */
#define HCLGE_RING_RX_ADDR_L_REG 0x80000
@@ -147,6 +149,8 @@
#define HCLGE_MAX_QSET_NUM 1024
+#define HCLGE_DBG_RESET_INFO_LEN 1024
+
enum HLCGE_PORT_TYPE {
HOST_PORT,
NETWORK_PORT
@@ -175,6 +179,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_FUN_RST_ING_B 0
/* Vector0 register bits define */
+#define HCLGE_VECTOR0_REG_PTP_INT_B 0
#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
#define HCLGE_VECTOR0_CORERESET_INT_B 6
#define HCLGE_VECTOR0_IMPRESET_INT_B 7
@@ -187,6 +192,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
+#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
@@ -218,14 +224,16 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_RST_HANDLING,
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
+ HCLGE_STATE_ERR_SERVICE_SCHED,
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
- HCLGE_STATE_PROMISC_CHANGED,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_FD_TBL_CHANGED,
HCLGE_STATE_FD_CLEAR_ALL,
HCLGE_STATE_FD_USER_DEF_CHANGED,
+ HCLGE_STATE_PTP_EN,
+ HCLGE_STATE_PTP_TX_HANDLING,
HCLGE_STATE_MAX
};
@@ -233,6 +241,7 @@ enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_RST,
HCLGE_VECTOR0_EVENT_MBX,
HCLGE_VECTOR0_EVENT_ERR,
+ HCLGE_VECTOR0_EVENT_PTP,
HCLGE_VECTOR0_EVENT_OTHER,
};
@@ -319,6 +328,22 @@ enum hclge_fc_mode {
HCLGE_FC_DEFAULT
};
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
+
+enum hclge_vlan_fltr_cap {
+ HCLGE_VLAN_FLTR_DEF,
+ HCLGE_VLAN_FLTR_CAN_MDF,
+};
enum hclge_link_fail_code {
HCLGE_LF_NORMAL,
HCLGE_LF_REF_CLOCK_LOST,
@@ -349,6 +374,7 @@ struct hclge_tc_info {
struct hclge_cfg {
u8 tc_num;
+ u8 vlan_fliter_cap;
u16 tqp_desc_num;
u16 rx_buf_len;
u16 vf_rss_size_max;
@@ -358,6 +384,7 @@ struct hclge_cfg {
u8 mac_addr[ETH_ALEN];
u8 default_speed;
u32 numa_node_map;
+ u32 tx_spare_buf_size;
u16 speed_ability;
u16 umv_space;
};
@@ -757,9 +784,14 @@ struct hclge_mac_tnl_stats {
struct hclge_vf_vlan_cfg {
u8 mbx_cmd;
u8 subcode;
- u8 is_kill;
- u16 vlan;
- u16 proto;
+ union {
+ struct {
+ u8 is_kill;
+ u16 vlan;
+ u16 proto;
+ };
+ u8 enable;
+ };
};
#pragma pack()
@@ -817,6 +849,7 @@ struct hclge_dev {
u16 alloc_rss_size; /* Allocated RSS task queue */
u16 vf_rss_size_max; /* HW defined VF max RSS task queue */
u16 pf_rss_size_max; /* HW defined PF max RSS task queue */
+ u32 tx_spare_buf_size; /* HW defined TX spare buffer size */
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
@@ -909,6 +942,7 @@ struct hclge_dev {
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
+ struct hclge_ptp *ptp;
};
/* VPort level vlan tag configuration for TX direction */
@@ -949,6 +983,8 @@ struct hclge_rss_tuple_cfg {
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
HCLGE_VPORT_STATE_MAX
};
@@ -969,7 +1005,9 @@ struct hclge_vf_info {
u32 spoofchk;
u32 max_tx_rate;
u32 trusted;
- u16 promisc_enable;
+ u8 request_uc_en;
+ u8 request_mc_en;
+ u8 request_bc_en;
};
struct hclge_vport {
@@ -988,6 +1026,8 @@ struct hclge_vport {
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ bool req_vlan_fltr_en;
+ bool cur_vlan_fltr_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
@@ -1059,8 +1099,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
char *buf, int len);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
@@ -1080,14 +1119,15 @@ void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
- u16 state, u16 vlan_tag, u16 qos,
- u16 vlan_proto);
+ u16 state,
+ struct hclge_vlan_info *vlan_info);
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
struct hclge_desc *desc);
void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
-void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
+int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
int hclge_push_vf_link_status(struct hclge_vport *vport);
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 8e5f9dc8791d..e10a2c36b706 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -231,19 +231,15 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
return ret;
}
-static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *req)
+static void hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en_bc = req->msg.en_bc ? true : false;
- bool en_uc = req->msg.en_uc ? true : false;
- bool en_mc = req->msg.en_mc ? true : false;
struct hnae3_handle *handle = &vport->nic;
- int ret;
+ struct hclge_dev *hdev = vport->back;
- if (!vport->vf_info.trusted) {
- en_uc = false;
- en_mc = false;
- }
+ vport->vf_info.request_uc_en = req->msg.en_uc;
+ vport->vf_info.request_mc_en = req->msg.en_mc;
+ vport->vf_info.request_bc_en = req->msg.en_bc;
if (req->msg.en_limit_promisc)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
@@ -251,22 +247,8 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
clear_bit(HNAE3_PFLAG_LIMIT_PROMISC,
&handle->priv_flags);
- ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
-
- vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
-
- return ret;
-}
-
-void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
-{
- u8 dest_vfid = (u8)vport->vport_id;
- u8 msg_data[2];
-
- memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
-
- hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
- HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ hclge_task_schedule(hdev, 0);
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
@@ -336,17 +318,17 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
- u16 state, u16 vlan_tag, u16 qos,
- u16 vlan_proto)
+ u16 state,
+ struct hclge_vlan_info *vlan_info)
{
#define MSG_DATA_SIZE 8
u8 msg_data[MSG_DATA_SIZE];
memcpy(&msg_data[0], &state, sizeof(u16));
- memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
- memcpy(&msg_data[4], &qos, sizeof(u16));
- memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
+ memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16));
+ memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16));
+ memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16));
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_PUSH_VLAN_INFO, vfid);
@@ -359,49 +341,35 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
#define HCLGE_MBX_VLAN_STATE_OFFSET 0
#define HCLGE_MBX_VLAN_INFO_OFFSET 2
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
struct hclge_vf_vlan_cfg *msg_cmd;
- int status = 0;
msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
- if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
- struct hnae3_handle *handle = &vport->nic;
- u16 vlan, proto;
- bool is_kill;
-
- is_kill = !!msg_cmd->is_kill;
- vlan = msg_cmd->vlan;
- proto = msg_cmd->proto;
- status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
- vlan, is_kill);
- } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
- struct hnae3_handle *handle = &vport->nic;
- bool en = msg_cmd->is_kill ? true : false;
-
- status = hclge_en_hw_strip_rxvtag(handle, en);
- } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
- struct hclge_vlan_info *vlan_info;
- u16 *state;
-
- state = (u16 *)&mbx_req->msg.data[HCLGE_MBX_VLAN_STATE_OFFSET];
- vlan_info = (struct hclge_vlan_info *)
- &mbx_req->msg.data[HCLGE_MBX_VLAN_INFO_OFFSET];
- status = hclge_update_port_base_vlan_cfg(vport, *state,
- vlan_info);
- } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
+ switch (msg_cmd->subcode) {
+ case HCLGE_MBX_VLAN_FILTER:
+ return hclge_set_vlan_filter(handle,
+ cpu_to_be16(msg_cmd->proto),
+ msg_cmd->vlan, msg_cmd->is_kill);
+ case HCLGE_MBX_VLAN_RX_OFF_CFG:
+ return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable);
+ case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE:
/* vf does not need to know about the port based VLAN state
* on device HNAE3_DEVICE_VERSION_V3. So always return disable
* on device HNAE3_DEVICE_VERSION_V3 if vf queries the port
* based VLAN state.
*/
resp_msg->data[0] =
- ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ?
+ hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ?
HNAE3_PORT_BASE_VLAN_DISABLE :
vport->port_base_vlan_cfg.state;
resp_msg->len = sizeof(u8);
+ return 0;
+ case HCLGE_MBX_ENABLE_VLAN_FILTER:
+ return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable);
+ default:
+ return 0;
}
-
- return status;
}
static int hclge_set_vf_alive(struct hclge_vport *vport,
@@ -418,16 +386,23 @@ static int hclge_set_vf_alive(struct hclge_vport *vport,
return ret;
}
-static void hclge_get_vf_tcinfo(struct hclge_vport *vport,
- struct hclge_respond_to_vf_msg *resp_msg)
+static void hclge_get_basic_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_ae_dev *ae_dev = vport->back->ae_dev;
+ struct hclge_basic_info *basic_info;
unsigned int i;
+ basic_info = (struct hclge_basic_info *)resp_msg->data;
for (i = 0; i < kinfo->tc_info.num_tc; i++)
- resp_msg->data[0] |= BIT(i);
+ basic_info->hw_tc_map |= BIT(i);
- resp_msg->len = sizeof(u8);
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ hnae3_set_bit(basic_info->pf_caps,
+ HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1);
+
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
static void hclge_get_vf_queue_info(struct hclge_vport *vport,
@@ -710,7 +685,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
unsigned int flag;
int ret = 0;
- memset(&resp_msg, 0, sizeof(resp_msg));
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
@@ -738,6 +712,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
trace_hclge_pf_mbx_get(hdev, req);
+ /* clear the resp_msg before processing every mailbox message */
+ memset(&resp_msg, 0, sizeof(resp_msg));
+
switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
@@ -748,11 +725,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req);
break;
case HCLGE_MBX_SET_PROMISC_MODE:
- ret = hclge_set_vf_promisc_mode(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF promisc mode\n",
- ret);
+ hclge_set_vf_promisc_mode(vport, req);
break;
case HCLGE_MBX_SET_UNICAST:
ret = hclge_set_vf_uc_mac_addr(vport, req);
@@ -788,8 +761,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_GET_QDEPTH:
hclge_get_vf_queue_depth(vport, &resp_msg);
break;
- case HCLGE_MBX_GET_TCINFO:
- hclge_get_vf_tcinfo(vport, &resp_msg);
+ case HCLGE_MBX_GET_BASIC_INFO:
+ hclge_get_basic_info(vport, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_STATUS:
ret = hclge_push_vf_link_status(vport);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
new file mode 100644
index 000000000000..3b1f84502e36
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021 Hisilicon Limited.
+
+#include <linux/skbuff.h>
+#include "hclge_main.h"
+#include "hnae3.h"
+
+static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ u64 adj_val, adj_base, diff;
+ unsigned long flags;
+ bool is_neg = false;
+ u32 quo, numerator;
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ is_neg = true;
+ }
+
+ adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT;
+ adj_val = adj_base * ppb;
+ diff = div_u64(adj_val, 1000000000ULL);
+
+ if (is_neg)
+ adj_val = adj_base - diff;
+ else
+ adj_val = adj_base + diff;
+
+ /* This clock cycle is defined by three part: quotient, numerator
+ * and denominator. For example, 2.5ns, the quotient is 2,
+ * denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator
+ * is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT.
+ */
+ quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator);
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+ writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ writel(HCLGE_PTP_CYCLE_ADJ_UNIT,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+ writel(HCLGE_PTP_CYCLE_ADJ_EN,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
+ test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
+ ptp->tx_skipped++;
+ return false;
+ }
+
+ ptp->tx_start = jiffies;
+ ptp->tx_skb = skb_get(skb);
+ ptp->tx_cnt++;
+
+ return true;
+}
+
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev)
+{
+ struct sk_buff *skb = hdev->ptp->tx_skb;
+ struct skb_shared_hwtstamps hwts;
+ u32 hi, lo;
+ u64 ns;
+
+ ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) &
+ HCLGE_PTP_TX_TS_NSEC_MASK;
+ lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG);
+ hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) &
+ HCLGE_PTP_TX_TS_SEC_H_MASK;
+ hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base +
+ HCLGE_PTP_TX_TS_SEQID_REG);
+
+ if (skb) {
+ hdev->ptp->tx_skb = NULL;
+ hdev->ptp->tx_cleaned++;
+
+ ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC;
+ hwts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &hwts);
+ dev_kfree_skb_any(skb);
+ }
+
+ clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state);
+}
+
+void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ unsigned long flags;
+ u64 ns = nsec;
+ u32 sec_h;
+
+ if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ return;
+
+ /* Since the BD does not have enough space for the higher 16 bits of
+ * second, and this part will not change frequently, so read it
+ * from register.
+ */
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC;
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+ hdev->ptp->last_rx = jiffies;
+ hdev->ptp->rx_cnt++;
+}
+
+static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+ u32 hi, lo;
+ u64 ns;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG);
+ hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
+ lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC;
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int hclge_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
+ writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG);
+ writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG);
+ /* synchronize the time of phc */
+ writel(HCLGE_PTP_TIME_SYNC_EN,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+ bool is_neg = false;
+ u32 adj_val = 0;
+
+ if (delta < 0) {
+ adj_val |= HCLGE_PTP_TIME_NSEC_NEG;
+ delta = -delta;
+ is_neg = true;
+ }
+
+ if (delta > HCLGE_PTP_TIME_NSEC_MASK) {
+ struct timespec64 ts;
+ s64 ns;
+
+ hclge_ptp_gettimex(ptp, &ts, NULL);
+ ns = timespec64_to_ns(&ts);
+ ns = is_neg ? ns - delta : ns + delta;
+ ts = ns_to_timespec64(ns);
+ return hclge_ptp_settime(ptp, &ts);
+ }
+
+ adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
+ writel(HCLGE_PTP_TIME_ADJ_EN,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+{
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg,
+ sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_ptp_int_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_int_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false);
+ req->int_en = en ? 1 : 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to %s ptp interrupt, ret = %d\n",
+ en ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg)
+{
+ struct hclge_ptp_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_cfg_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query ptp config, ret = %d\n", ret);
+ return ret;
+ }
+
+ *cfg = le32_to_cpu(req->cfg);
+
+ return 0;
+}
+
+static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg)
+{
+ struct hclge_ptp_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_cfg_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false);
+ req->cfg = cpu_to_le32(cfg);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to config ptp, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
+ unsigned long *flags, u32 *ptp_cfg)
+{
+ switch (cfg->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ clear_bit(HCLGE_PTP_FLAG_TX_EN, flags);
+ break;
+ case HWTSTAMP_TX_ON:
+ set_bit(HCLGE_PTP_FLAG_TX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_TX_EN_B;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
+ unsigned long *flags, u32 *ptp_cfg)
+{
+ int rx_filter = cfg->rx_filter;
+
+ switch (cfg->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ clear_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_RX_EN_B;
+ *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
+ rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_RX_EN_B;
+ *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT;
+ rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ cfg->rx_filter = rx_filter;
+
+ return 0;
+}
+
+static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
+ struct hwtstamp_config *cfg)
+{
+ unsigned long flags = hdev->ptp->flags;
+ u32 ptp_cfg = 0;
+ int ret;
+
+ if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags))
+ ptp_cfg |= HCLGE_PTP_EN_B;
+
+ ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_cfg(hdev, ptp_cfg);
+ if (ret)
+ return ret;
+
+ hdev->ptp->flags = flags;
+ hdev->ptp->ptp_cfg = ptp_cfg;
+
+ return 0;
+}
+
+int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ int ret;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "phc is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ ret = hclge_ptp_set_ts_mode(hdev, &cfg);
+ if (ret)
+ return ret;
+
+ hdev->ptp->ts_cfg = cfg;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "phc is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (hdev->ptp->clock)
+ info->phc_index = ptp_clock_index(hdev->ptp->clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
+}
+
+static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp;
+
+ ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp)
+ return -ENOMEM;
+
+ ptp->hdev = hdev;
+ snprintf(ptp->info.name, sizeof(ptp->info.name), "%s",
+ HCLGE_DRIVER_NAME);
+ ptp->info.owner = THIS_MODULE;
+ ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
+ ptp->info.n_ext_ts = 0;
+ ptp->info.pps = 0;
+ ptp->info.adjfreq = hclge_ptp_adjfreq;
+ ptp->info.adjtime = hclge_ptp_adjtime;
+ ptp->info.gettimex64 = hclge_ptp_gettimex;
+ ptp->info.settime64 = hclge_ptp_settime;
+
+ ptp->info.n_alarm = 0;
+ ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&hdev->pdev->dev,
+ "%d failed to register ptp clock, ret = %ld\n",
+ ptp->info.n_alarm, PTR_ERR(ptp->clock));
+ return -ENODEV;
+ } else if (!ptp->clock) {
+ dev_err(&hdev->pdev->dev, "failed to register ptp clock\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
+ return 0;
+}
+
+static void hclge_ptp_destroy_clock(struct hclge_dev *hdev)
+{
+ ptp_clock_unregister(hdev->ptp->clock);
+ hdev->ptp->clock = NULL;
+ devm_kfree(&hdev->pdev->dev, hdev->ptp);
+ hdev->ptp = NULL;
+}
+
+int hclge_ptp_init(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct timespec64 ts;
+ int ret;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps))
+ return 0;
+
+ if (!hdev->ptp) {
+ ret = hclge_ptp_create_clock(hdev);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_ptp_int_en(hdev, true);
+ if (ret)
+ goto out;
+
+ set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ ret = hclge_ptp_adjfreq(&hdev->ptp->info, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init freq, ret = %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init ts mode, ret = %d\n", ret);
+ goto out;
+ }
+
+ ktime_get_real_ts64(&ts);
+ ret = hclge_ptp_settime(&hdev->ptp->info, &ts);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init ts time, ret = %d\n", ret);
+ goto out;
+ }
+
+ set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+ dev_info(&hdev->pdev->dev, "phc initializes ok!\n");
+
+ return 0;
+
+out:
+ hclge_ptp_destroy_clock(hdev);
+
+ return ret;
+}
+
+void hclge_ptp_uninit(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ if (!ptp)
+ return;
+
+ hclge_ptp_int_en(hdev, false);
+ clear_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+ clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags);
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+
+ if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg))
+ dev_err(&hdev->pdev->dev, "failed to disable phc\n");
+
+ if (ptp->tx_skb) {
+ struct sk_buff *skb = ptp->tx_skb;
+
+ ptp->tx_skb = NULL;
+ dev_kfree_skb_any(skb);
+ }
+
+ hclge_ptp_destroy_clock(hdev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
new file mode 100644
index 000000000000..5a202b775471
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021 Hisilicon Limited.
+
+#ifndef __HCLGE_PTP_H
+#define __HCLGE_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/types.h>
+
+#define HCLGE_PTP_REG_OFFSET 0x29000
+
+#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
+#define HCLGE_PTP_TX_TS_NSEC_REG 0x4
+#define HCLGE_PTP_TX_TS_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TX_TS_SEC_L_REG 0x8
+#define HCLGE_PTP_TX_TS_SEC_H_REG 0xC
+#define HCLGE_PTP_TX_TS_SEC_H_MASK GENMASK(15, 0)
+#define HCLGE_PTP_TX_TS_CNT_REG 0x30
+
+#define HCLGE_PTP_TIME_SEC_H_REG 0x50
+#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
+#define HCLGE_PTP_TIME_SEC_L_REG 0x54
+#define HCLGE_PTP_TIME_NSEC_REG 0x58
+#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
+#define HCLGE_PTP_TIME_SYNC_REG 0x5C
+#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
+#define HCLGE_PTP_TIME_ADJ_REG 0x60
+#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
+#define HCLGE_PTP_CYCLE_QUO_REG 0x64
+#define HCLGE_PTP_CYCLE_DEN_REG 0x68
+#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
+#define HCLGE_PTP_CYCLE_CFG_REG 0x70
+#define HCLGE_PTP_CYCLE_ADJ_EN BIT(0)
+#define HCLGE_PTP_CUR_TIME_SEC_H_REG 0x74
+#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
+#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
+
+#define HCLGE_PTP_CYCLE_ADJ_BASE 2
+#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
+#define HCLGE_PTP_CYCLE_ADJ_UNIT 100000000
+#define HCLGE_PTP_SEC_H_OFFSET 32u
+#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
+
+#define HCLGE_PTP_FLAG_EN 0
+#define HCLGE_PTP_FLAG_TX_EN 1
+#define HCLGE_PTP_FLAG_RX_EN 2
+
+struct hclge_ptp {
+ struct hclge_dev *hdev;
+ struct ptp_clock *clock;
+ struct sk_buff *tx_skb;
+ unsigned long flags;
+ void __iomem *io_base;
+ struct ptp_clock_info info;
+ struct hwtstamp_config ts_cfg;
+ spinlock_t lock; /* protects ptp registers */
+ u32 ptp_cfg;
+ u32 last_tx_seqid;
+ unsigned long tx_start;
+ unsigned long tx_cnt;
+ unsigned long tx_skipped;
+ unsigned long tx_cleaned;
+ unsigned long last_rx;
+ unsigned long rx_cnt;
+ unsigned long tx_timeout;
+};
+
+struct hclge_ptp_int_cmd {
+#define HCLGE_PTP_INT_EN_B BIT(0)
+
+ u8 int_en;
+ u8 rsvd[23];
+};
+
+enum hclge_ptp_udp_type {
+ HCLGE_PTP_UDP_NOT_TYPE,
+ HCLGE_PTP_UDP_P13F_TYPE,
+ HCLGE_PTP_UDP_P140_TYPE,
+ HCLGE_PTP_UDP_FULL_TYPE,
+};
+
+enum hclge_ptp_msg_type {
+ HCLGE_PTP_MSG_TYPE_V2_L2,
+ HCLGE_PTP_MSG_TYPE_V2,
+ HCLGE_PTP_MSG_TYPE_V2_EVENT,
+};
+
+enum hclge_ptp_msg0_type {
+ HCLGE_PTP_MSG0_V2_DELAY_REQ = 1,
+ HCLGE_PTP_MSG0_V2_PDELAY_REQ,
+ HCLGE_PTP_MSG0_V2_DELAY_RESP,
+ HCLGE_PTP_MSG0_V2_EVENT = 0xF,
+};
+
+#define HCLGE_PTP_MSG1_V2_DEFAULT 1
+
+struct hclge_ptp_cfg_cmd {
+#define HCLGE_PTP_EN_B BIT(0)
+#define HCLGE_PTP_TX_EN_B BIT(1)
+#define HCLGE_PTP_RX_EN_B BIT(2)
+#define HCLGE_PTP_UDP_EN_SHIFT 3
+#define HCLGE_PTP_UDP_EN_MASK GENMASK(4, 3)
+#define HCLGE_PTP_MSG_TYPE_SHIFT 8
+#define HCLGE_PTP_MSG_TYPE_MASK GENMASK(9, 8)
+#define HCLGE_PTP_MSG1_SHIFT 16
+#define HCLGE_PTP_MSG1_MASK GENMASK(19, 16)
+#define HCLGE_PTP_MSG0_SHIFT 24
+#define HCLGE_PTP_MSG0_MASK GENMASK(27, 24)
+
+ __le32 cfg;
+ u8 rsvd[20];
+};
+
+static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info)
+{
+ struct hclge_ptp *ptp = container_of(info, struct hclge_ptp, info);
+
+ return ptp->hdev;
+}
+
+bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb);
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *dev);
+void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec);
+int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_init(struct hclge_dev *hdev);
+void hclge_ptp_uninit(struct hclge_dev *hdev);
+int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info);
+int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index ebb962bad451..78d5bf1ea561 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1733,6 +1733,36 @@ int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
return 0;
}
+int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset %u shaper, ret = %d\n", qset_id,
+ ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
+ return 0;
+}
+
int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
{
struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
@@ -1775,7 +1805,7 @@ int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
enum hclge_opcode_type cmd,
- struct hclge_pri_shaper_para *para)
+ struct hclge_tm_shaper_para *para)
{
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
struct hclge_desc desc;
@@ -1807,3 +1837,186 @@ int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
return 0;
}
+
+int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
+{
+ struct hclge_nq_to_qs_link_cmd *map;
+ struct hclge_desc desc;
+ u16 qs_id_l;
+ u16 qs_id_h;
+ int ret;
+
+ map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
+ map->nq_id = cpu_to_le16(q_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get queue to qset map, ret = %d\n", ret);
+ return ret;
+ }
+ *qset_id = le16_to_cpu(map->qset_id);
+
+ /* convert qset_id to the following format, drop the vld bit
+ * | qs_id_h | vld | qs_id_l |
+ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
+ * \ \ / /
+ * \ \ / /
+ * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
+ */
+ qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
+ HCLGE_TM_QS_ID_L_S);
+ qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
+ HCLGE_TM_QS_ID_H_EXT_S);
+ *qset_id = 0;
+ hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
+ qs_id_l);
+ hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
+ qs_id_h);
+ return 0;
+}
+
+int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
+{
+#define HCLGE_TM_TC_MASK 0x7
+
+ struct hclge_tqp_tx_queue_tc_cmd *tc;
+ struct hclge_desc desc;
+ int ret;
+
+ tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
+ tc->queue_id = cpu_to_le16(q_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get queue to tc map, ret = %d\n", ret);
+ return ret;
+ }
+
+ *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
+ return 0;
+}
+
+int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
+ u8 *pri_bit_map)
+{
+ struct hclge_pg_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
+ map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+ map->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg to pri map, ret = %d\n", ret);
+ return ret;
+ }
+
+ *pri_bit_map = map->pri_bit_map;
+ return 0;
+}
+
+int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
+{
+ struct hclge_pg_weight_cmd *pg_weight_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
+ pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
+ pg_weight_cmd->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = pg_weight_cmd->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
+ desc.data[0] = cpu_to_le32(pg_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = (u8)le32_to_cpu(desc.data[1]);
+ return 0;
+}
+
+int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_pg_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
+ cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ shap_cfg_cmd->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg shaper(%#x), ret = %d\n",
+ cmd, ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
+ return 0;
+}
+
+int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get port shaper, ret = %d\n", ret);
+ return ret;
+ }
+
+ port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+ shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = port_shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index b25d76023af0..2ee9b795f71d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -199,14 +199,14 @@ struct hclge_tm_nodes_cmd {
__le16 queue_num;
};
-struct hclge_pri_shaper_para {
+struct hclge_tm_shaper_para {
+ u32 rate;
u8 ir_b;
u8 ir_u;
u8 ir_s;
u8 bs_b;
u8 bs_s;
u8 flag;
- u32 rate;
};
#define hclge_tm_set_field(dest, string, val) \
@@ -237,9 +237,22 @@ int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
u8 *link_vld);
int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode);
int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight);
+int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
+ struct hclge_tm_shaper_para *para);
int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode);
int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
enum hclge_opcode_type cmd,
- struct hclge_pri_shaper_para *para);
+ struct hclge_tm_shaper_para *para);
+int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id);
+int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
+int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
+ u8 *pri_bit_map);
+int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight);
+int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode);
+int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para);
+int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
+ struct hclge_tm_shaper_para *para);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d8c5c5810b99..bd19a2d89f6c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -359,6 +359,8 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_RXD_ADV_LAYOUT_B))
+ set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
}
static __le32 hclgevf_build_api_caps(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index c6dc11b32aa7..202feb70dba5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -159,6 +159,7 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_HW_PAD_B,
HCLGEVF_CAP_STASH_B,
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
+ HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15,
};
enum HCLGEVF_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 0db51ef15ef6..52eaf82b7cd7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -243,23 +243,31 @@ static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
}
}
-static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ struct hclge_basic_info *basic_info;
struct hclge_vf_to_pf_msg send_msg;
- u8 resp_msg;
+ unsigned long caps;
int status;
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0);
- status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
sizeof(resp_msg));
if (status) {
dev_err(&hdev->pdev->dev,
- "VF request to get TC info from PF failed %d",
- status);
+ "failed to get basic info from pf, ret = %d", status);
return status;
}
- hdev->hw_tc_map = resp_msg;
+ basic_info = (struct hclge_basic_info *)resp_msg;
+
+ hdev->hw_tc_map = basic_info->hw_tc_map;
+ hdev->mbx_api_version = basic_info->mbx_api_version;
+ caps = basic_info->pf_caps;
+ if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
return 0;
}
@@ -1528,8 +1536,7 @@ static void hclgevf_sync_from_add_list(struct list_head *add_list,
kfree(mac_node);
} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
mac_node->state = HCLGEVF_MAC_TO_DEL;
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, mac_list);
+ list_move_tail(&mac_node->node, mac_list);
} else {
list_del(&mac_node->node);
kfree(mac_node);
@@ -1554,8 +1561,7 @@ static void hclgevf_sync_from_del_list(struct list_head *del_list,
list_del(&mac_node->node);
kfree(mac_node);
} else {
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, mac_list);
+ list_move_tail(&mac_node->node, mac_list);
}
}
}
@@ -1591,8 +1597,7 @@ static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGEVF_MAC_TO_DEL:
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, &tmp_del_list);
+ list_move_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGEVF_MAC_TO_ADD:
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
@@ -1642,6 +1647,22 @@ static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
}
+static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct hclge_vf_to_pf_msg send_msg;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ return -EOPNOTSUPP;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_ENABLE_VLAN_FILTER);
+ send_msg.data[0] = enable ? 1 : 0;
+
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+}
+
static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
@@ -2466,6 +2487,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
+ ret = hclgevf_get_basic_info(hdev);
+ if (ret)
+ return ret;
+
/* get current port based vlan state from PF */
ret = hclgevf_get_port_base_vlan_filter_state(hdev);
if (ret)
@@ -2481,12 +2506,7 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_get_pf_media_type(hdev);
- if (ret)
- return ret;
-
- /* get tc configuration from PF */
- return hclgevf_get_tc_info(hdev);
+ return hclgevf_get_pf_media_type(hdev);
}
static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
@@ -3242,6 +3262,18 @@ static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
+}
+
+static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
+}
+
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -3279,6 +3311,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+ hclgevf_init_rxd_adv_layout(hdev);
+
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -3379,6 +3413,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ hclgevf_init_rxd_adv_layout(hdev);
+
hdev->last_reset_time = jiffies;
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
@@ -3405,6 +3441,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
struct hclge_vf_to_pf_msg send_msg;
hclgevf_state_uninit(hdev);
+ hclgevf_uninit_rxd_adv_layout(hdev);
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
@@ -3784,6 +3821,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
+ .enable_vlan_filter = hclgevf_enable_vlan_filter,
.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
.set_default_reset_request = hclgevf_set_def_reset_request,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 265c9b0b4728..d7d02848d674 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -47,6 +47,7 @@
/* bar registers for common func */
#define HCLGEVF_GRO_EN_REG 0x28000
+#define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008
/* bar registers for rcb */
#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000
@@ -284,6 +285,7 @@ struct hclgevf_dev {
struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
+ u16 mbx_api_version;
u16 num_tqps; /* num task queue pairs of this VF */
u16 alloc_rss_size; /* allocated RSS task queue */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index dc024ef521c0..162d3c330dec 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1663,7 +1663,6 @@ static void hinic_diag_test(struct net_device *netdev,
err = hinic_port_link_state(nic_dev, &link_state);
if (!err && link_state == HINIC_LINK_STATE_UP)
netif_carrier_on(netdev);
-
}
static int hinic_set_phys_id(struct net_device *netdev,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 5a6bbee819cd..307a6d4af993 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -223,7 +223,7 @@ static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
- if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM))
+ if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM)
CMDQ_WQE_HEADER(wqe)->saved_data |=
HINIC_SAVED_DATA_SET(1, ARM);
else
@@ -594,7 +594,7 @@ static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
}
/**
- * cmdq_arm_ceq_handler - cmdq completion event handler for sync command
+ * cmdq_sync_cmd_handler - cmdq completion event handler for sync command
* @cmdq: the cmdq of the command
* @cons_idx: the consumer index to update the error code for
* @errcode: the error code
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 0c74f6674634..428108eb10d2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -48,7 +48,7 @@ enum io_status {
};
/**
- * get_capability - convert device capabilities to NIC capabilities
+ * parse_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
* @dev_cap: device capabilities from FW
*
@@ -92,7 +92,7 @@ static int parse_capability(struct hinic_hwdev *hwdev,
}
/**
- * get_cap_from_fw - get device capabilities from FW
+ * get_capability - get device capabilities from FW
* @pfhwdev: the PF HW device to get capabilities for
*
* Return 0 - Success, negative - Failure
@@ -257,7 +257,7 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
&fw_ctxt, sizeof(fw_ctxt),
&fw_ctxt, &out_size);
- if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) {
+ if (err || out_size != sizeof(fw_ctxt) || fw_ctxt.status) {
dev_err(&pdev->dev, "Failed to init FW ctxt, err: %d, status: 0x%x, out size: 0x%x\n",
err, fw_ctxt.status, out_size);
return -EIO;
@@ -346,7 +346,7 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev)
}
/**
- * clear_io_resource - set the IO resources as not active in the NIC
+ * clear_io_resources - set the IO resources as not active in the NIC
* @hwdev: the NIC HW device
*
* Return 0 - Success, negative - Failure
@@ -424,7 +424,7 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN,
&cmd_base_qpn, sizeof(cmd_base_qpn),
&cmd_base_qpn, &out_size);
- if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) {
+ if (err || out_size != sizeof(cmd_base_qpn) || cmd_base_qpn.status) {
dev_err(&pdev->dev, "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n",
err, cmd_base_qpn.status, out_size);
return -EIO;
@@ -605,8 +605,8 @@ static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
hwif = hwdev->hwif;
pdev = hwif->pdev;
- if ((cmd < HINIC_MGMT_MSG_CMD_BASE) ||
- (cmd >= HINIC_MGMT_MSG_CMD_MAX)) {
+ if (cmd < HINIC_MGMT_MSG_CMD_BASE ||
+ cmd >= HINIC_MGMT_MSG_CMD_MAX) {
dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd);
return;
}
@@ -619,7 +619,7 @@ static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
HINIC_CB_ENABLED,
HINIC_CB_ENABLED | HINIC_CB_RUNNING);
- if ((cb_state == HINIC_CB_ENABLED) && (nic_cb->handler))
+ if (cb_state == HINIC_CB_ENABLED && nic_cb->handler)
nic_cb->handler(nic_cb->handle, buf_in,
in_size, buf_out, out_size);
else
@@ -1090,7 +1090,7 @@ struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i)
}
/**
- * hinic_hwdev_get_sq - get RQ
+ * hinic_hwdev_get_rq - get RQ
* @hwdev: the NIC HW device
* @i: the position of the RQ
*
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 19942fef99d9..d3fc05a07fdb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -254,8 +254,8 @@ static void aeq_irq_handler(struct hinic_eq *eq)
HINIC_EQE_ENABLED,
HINIC_EQE_ENABLED |
HINIC_EQE_RUNNING);
- if ((eqe_state == HINIC_EQE_ENABLED) &&
- (hwe_cb->hwe_handler))
+ if (eqe_state == HINIC_EQE_ENABLED &&
+ hwe_cb->hwe_handler)
hwe_cb->hwe_handler(hwe_cb->handle,
aeqe_curr->data, size);
else
@@ -299,7 +299,7 @@ static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
HINIC_EQE_ENABLED,
HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
- if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler))
+ if (eqe_state == HINIC_EQE_ENABLED && ceq_cb->handler)
ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
else
dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index cab38ff0713c..0428faa68e80 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -334,7 +334,7 @@ static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx,
}
/**
- * dma_attr_table_init - initialize the default dma attributes
+ * dma_attr_init - initialize the default dma attributes
* @hwif: the HW interface of a pci function device
**/
static void dma_attr_init(struct hinic_hwif *hwif)
@@ -395,7 +395,7 @@ static void __print_selftest_reg(struct hinic_hwif *hwif)
/**
* hinic_init_hwif - initialize the hw interface
* @hwif: the HW interface of a pci function device
- * @pdev: the pci device for acessing PCI resources
+ * @pdev: the pci device for accessing PCI resources
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 4ef4008e65bd..a6e43d686293 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -137,7 +137,7 @@ static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
&out_param);
- if ((err) || (out_param != 0)) {
+ if (err || out_param != 0) {
dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
err = -EFAULT;
}
@@ -181,7 +181,7 @@ static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
&out_param);
- if ((err) || (out_param != 0)) {
+ if (err || out_param != 0) {
dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
err = -EFAULT;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 817173f1fbb7..ebc77771f5da 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -294,7 +294,7 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg;
}
- if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) {
+ if (buf_out && recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE) {
memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
*out_size = recv_msg->msg_len;
}
@@ -411,7 +411,7 @@ static void recv_mgmt_msg_work_handler(struct work_struct *work)
HINIC_MGMT_CB_ENABLED,
HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
- if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
+ if (cb_state == HINIC_MGMT_CB_ENABLED && mgmt_cb->cb)
mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
mgmt_work->msg, mgmt_work->msg_len,
buf_out, &out_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index dcba4d009bad..336248aa2e48 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -894,7 +894,7 @@ struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
}
/**
- * hinic_put_wqe - release the ci for new wqes
+ * hinic_rq_put_wqe - release the ci for new wqes
* @rq: recv queue
* @cons_idx: consumer index of the wqe
* @wqe_size: the size of the wqe
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 5dc3743f8091..7f0f1aa3cedd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -89,6 +89,7 @@ static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
return (((idx) >> ((wq)->wqebbs_per_page_shift))
& ((wq)->num_q_pages - 1));
}
+
/**
* queue_alloc_page - allocate page for Queue
* @hwif: HW interface for allocating DMA
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 9a9b09401d01..405ee4d2d2b1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -172,7 +172,6 @@ static int create_txqs(struct hinic_dev *nic_dev)
"Failed to add SQ%d debug\n", i);
goto err_add_sq_dbg;
}
-
}
return 0;
@@ -233,7 +232,7 @@ static void free_txqs(struct hinic_dev *nic_dev)
}
/**
- * create_txqs - Create the Logical Rx Queues of specific NIC device
+ * create_rxqs - Create the Logical Rx Queues of specific NIC device
* @nic_dev: the specific NIC device
*
* Return 0 - Success, negative - Failure
@@ -289,7 +288,7 @@ err_init_rxq:
}
/**
- * free_txqs - Free the Logical Rx Queues of specific NIC device
+ * free_rxqs - Free the Logical Rx Queues of specific NIC device
* @nic_dev: the specific NIC device
**/
static void free_rxqs(struct hinic_dev *nic_dev)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index eb97f2d6b1ad..28ae6f1201a8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -128,7 +128,7 @@ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC,
&port_mac_cmd, sizeof(port_mac_cmd),
&port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
+ if (err || out_size != sizeof(port_mac_cmd) || port_mac_cmd.status) {
dev_err(&pdev->dev, "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n",
err, port_mac_cmd.status, out_size);
return -EFAULT;
@@ -263,7 +263,7 @@ int hinic_port_link_state(struct hinic_dev *nic_dev,
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
&link_cmd, sizeof(link_cmd),
&link_cmd, &out_size);
- if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) {
+ if (err || out_size != sizeof(link_cmd) || link_cmd.status) {
dev_err(&pdev->dev, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n",
err, link_cmd.status, out_size);
return -EINVAL;
@@ -297,7 +297,7 @@ int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE,
&port_state, sizeof(port_state),
&port_state, &out_size);
- if (err || (out_size != sizeof(port_state)) || port_state.status) {
+ if (err || out_size != sizeof(port_state) || port_state.status) {
dev_err(&pdev->dev, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n",
err, port_state.status, out_size);
return -EFAULT;
@@ -329,7 +329,7 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE,
&func_state, sizeof(func_state),
&func_state, &out_size);
- if (err || (out_size != sizeof(func_state)) || func_state.status) {
+ if (err || out_size != sizeof(func_state) || func_state.status) {
dev_err(&pdev->dev, "Failed to set port func state, err: %d, status: 0x%x, out size: 0x%x\n",
err, func_state.status, out_size);
return -EFAULT;
@@ -359,7 +359,7 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP,
port_cap, sizeof(*port_cap),
port_cap, &out_size);
- if (err || (out_size != sizeof(*port_cap)) || port_cap->status) {
+ if (err || out_size != sizeof(*port_cap) || port_cap->status) {
dev_err(&pdev->dev,
"Failed to get port capabilities, err: %d, status: 0x%x, out size: 0x%x\n",
err, port_cap->status, out_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index cce08647b9b2..fed3b6bc0d76 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -118,6 +118,7 @@ static void rx_csum(struct hinic_rxq *rxq, u32 status,
skb->ip_summed = CHECKSUM_NONE;
}
}
+
/**
* rx_alloc_skb - allocate skb and map it to dma address
* @rxq: rx queue
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 710c4ff7bc0e..c5bdb0d374ef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -660,7 +660,7 @@ static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
}
/**
- * free_all_rx_skbs - free all skbs in tx queue
+ * free_all_tx_skbs - free all skbs in tx queue
* @txq: tx queue
**/
static void free_all_tx_skbs(struct hinic_txq *txq)
@@ -717,7 +717,7 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
/* Reading a WQEBB to get real WQE size and consumer index. */
sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
- if ((!sq_wqe) ||
+ if (!sq_wqe ||
(((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
break;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index ea55314b209d..d5df131b183c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2618,10 +2618,8 @@ static int ehea_restart_qps(struct net_device *dev)
u16 dummy16 = 0;
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
- if (!cb0) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!cb0)
+ return -ENOMEM;
for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
@@ -2641,6 +2639,7 @@ static int ehea_restart_qps(struct net_device *dev)
cb0);
if (hret != H_SUCCESS) {
netdev_err(dev, "query_ehea_qp failed (1)\n");
+ ret = -EFAULT;
goto out;
}
@@ -2653,6 +2652,7 @@ static int ehea_restart_qps(struct net_device *dev)
&dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
netdev_err(dev, "modify_ehea_qp failed (1)\n");
+ ret = -EFAULT;
goto out;
}
@@ -2661,6 +2661,7 @@ static int ehea_restart_qps(struct net_device *dev)
cb0);
if (hret != H_SUCCESS) {
netdev_err(dev, "query_ehea_qp failed (2)\n");
+ ret = -EFAULT;
goto out;
}
@@ -2867,14 +2868,14 @@ out:
return ret;
}
-static ssize_t ehea_show_port_id(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t log_port_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
return sprintf(buf, "%d", port->logical_port_id);
}
-static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL);
+static DEVICE_ATTR_RO(log_port_id);
static void logical_port_release(struct device *dev)
{
@@ -3113,7 +3114,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
return NULL;
}
-static ssize_t ehea_probe_port(struct device *dev,
+static ssize_t probe_port_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3168,9 +3169,9 @@ static ssize_t ehea_probe_port(struct device *dev,
return (ssize_t) count;
}
-static ssize_t ehea_remove_port(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t remove_port_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ehea_adapter *adapter = dev_get_drvdata(dev);
struct ehea_port *port;
@@ -3203,8 +3204,8 @@ static ssize_t ehea_remove_port(struct device *dev,
return (ssize_t) count;
}
-static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port);
-static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port);
+static DEVICE_ATTR_WO(probe_port);
+static DEVICE_ATTR_WO(remove_port);
static int ehea_create_device_sysfs(struct platform_device *dev)
{
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index aa9f651288d5..09d3ac374b2d 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -77,7 +77,7 @@ struct emac_regs {
struct {
u32 rsvd1;
u32 revid;
- u32 rsvd2[2];
+ u32 rsvd2[2];
u32 iaht1; /* Reset, R */
u32 iaht2; /* Reset, R */
u32 iaht3; /* Reset, R */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 7fea9ae60f13..737ba85e409f 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1285,36 +1285,41 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
iph_proto = iph6->nexthdr;
}
- /* In OVS environment, when a flow is not cached, specifically for a
- * new TCP connection, the first packet information is passed up
+ /* When CSO is enabled the TCP checksum may have be set to NULL by
+ * the sender given that we zeroed out TCP checksum field in
+ * transmit path (refer ibmveth_start_xmit routine). In this case set
+ * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will
+ * then be recalculated by the destination NIC (CSO must be enabled
+ * on the destination NIC).
+ *
+ * In an OVS environment, when a flow is not cached, specifically for a
+ * new TCP connection, the first packet information is passed up to
* the user space for finding a flow. During this process, OVS computes
* checksum on the first packet when CHECKSUM_PARTIAL flag is set.
*
- * Given that we zeroed out TCP checksum field in transmit path
- * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
- * OVS computed checksum will be incorrect w/o TCP pseudo checksum
- * in the packet. This leads to OVS dropping the packet and hence
- * TCP retransmissions are seen.
- *
- * So, re-compute TCP pseudo header checksum.
+ * So, re-compute TCP pseudo header checksum when configured for
+ * trunk mode.
*/
- if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
+ if (iph_proto == IPPROTO_TCP) {
struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
-
- tcphdrlen = skb->len - iphlen;
-
- /* Recompute TCP pseudo header checksum */
- if (skb_proto == ETH_P_IP)
- tcph->check = ~csum_tcpudp_magic(iph->saddr,
+ if (tcph->check == 0x0000) {
+ /* Recompute TCP pseudo header checksum */
+ if (adapter->is_active_trunk) {
+ tcphdrlen = skb->len - iphlen;
+ if (skb_proto == ETH_P_IP)
+ tcph->check =
+ ~csum_tcpudp_magic(iph->saddr,
iph->daddr, tcphdrlen, iph_proto, 0);
- else if (skb_proto == ETH_P_IPV6)
- tcph->check = ~csum_ipv6_magic(&iph6->saddr,
+ else if (skb_proto == ETH_P_IPV6)
+ tcph->check =
+ ~csum_ipv6_magic(&iph6->saddr,
&iph6->daddr, tcphdrlen, iph_proto, 0);
-
- /* Setup SKB fields for checksum offload */
- skb_partial_csum_set(skb, iphlen,
- offsetof(struct tcphdr, check));
- skb_reset_network_header(skb);
+ }
+ /* Setup SKB fields for checksum offload */
+ skb_partial_csum_set(skb, iphlen,
+ offsetof(struct tcphdr, check));
+ skb_reset_network_header(skb);
+ }
}
}
@@ -1799,8 +1804,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
struct ibmveth_buff_pool *pool = container_of(kobj,
struct ibmveth_buff_pool,
kobj);
- struct net_device *netdev = dev_get_drvdata(
- container_of(kobj->parent, struct device, kobj));
+ struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
long rc;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5788bb956d73..374a75d4faea 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -95,7 +95,7 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
static int ibmvnic_poll(struct napi_struct *napi, int data);
static void send_query_map(struct ibmvnic_adapter *adapter);
-static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
+static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
static int send_request_unmap(struct ibmvnic_adapter *, u8);
static int send_login(struct ibmvnic_adapter *adapter);
static void send_query_cap(struct ibmvnic_adapter *adapter);
@@ -106,6 +106,8 @@ static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
+static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *tx_scrq);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -141,6 +143,29 @@ static const struct ibmvnic_stat ibmvnic_stats[] = {
{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
};
+static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
+ crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
+static int send_version_xchg(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.version_exchange.first = IBMVNIC_CRQ_CMD;
+ crq.version_exchange.cmd = VERSION_EXCHANGE;
+ crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
unsigned long length, unsigned long *number,
unsigned long *irq)
@@ -209,12 +234,11 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
- rc = send_request_map(adapter, ltb->addr,
- ltb->size, ltb->map_id);
+
+ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
if (rc) {
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- mutex_unlock(&adapter->fw_lock);
- return rc;
+ dev_err(dev, "send_request_map failed, rc = %d\n", rc);
+ goto out;
}
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
@@ -222,20 +246,23 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
dev_err(dev,
"Long term map request aborted or timed out,rc = %d\n",
rc);
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- mutex_unlock(&adapter->fw_lock);
- return rc;
+ goto out;
}
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
+ rc = -1;
+ goto out;
+ }
+ rc = 0;
+out:
+ if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- mutex_unlock(&adapter->fw_lock);
- return -1;
+ ltb->buff = NULL;
}
mutex_unlock(&adapter->fw_lock);
- return 0;
+ return rc;
}
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
@@ -255,14 +282,44 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ ltb->buff = NULL;
+ ltb->map_id = 0;
}
-static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
+static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb)
{
- if (!ltb->buff)
- return -EINVAL;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
memset(ltb->buff, 0, ltb->size);
+
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+
+ reinit_completion(&adapter->fw_done);
+ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_info(dev,
+ "Reset failed, long term map request timed out or aborted\n");
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ if (adapter->fw_done_rc) {
+ dev_info(dev,
+ "Reset failed, attempting to free and reallocate buffer\n");
+ free_long_term_buff(adapter, ltb);
+ mutex_unlock(&adapter->fw_lock);
+ return alloc_long_term_buff(adapter, ltb, ltb->size);
+ }
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -298,7 +355,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
rx_scrq = adapter->rx_scrq[pool->index];
ind_bufp = &rx_scrq->ind_buf;
- for (i = 0; i < count; ++i) {
+
+ /* netdev_skb_alloc() could have failed after we saved a few skbs
+ * in the indir_buf and we would not have sent them to VIOS yet.
+ * To account for them, start the loop at ind_bufp->index rather
+ * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
+ * be 0.
+ */
+ for (i = ind_bufp->index; i < count; ++i) {
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
dev_err(dev, "Couldn't replenish rx buff\n");
@@ -484,7 +548,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
rx_pool->size *
rx_pool->buff_size);
} else {
- rc = reset_long_term_buff(&rx_pool->long_term_buff);
+ rc = reset_long_term_buff(adapter,
+ &rx_pool->long_term_buff);
}
if (rc)
@@ -607,11 +672,12 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
-static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
+static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_tx_pool *tx_pool)
{
int rc, i;
- rc = reset_long_term_buff(&tx_pool->long_term_buff);
+ rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
if (rc)
return rc;
@@ -638,10 +704,11 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
- rc = reset_one_tx_pool(&adapter->tso_pool[i]);
+ ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
+ rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc)
return rc;
- rc = reset_one_tx_pool(&adapter->tx_pool[i]);
+ rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
if (rc)
return rc;
}
@@ -734,8 +801,11 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tso_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
- if (!adapter->tso_pool)
+ if (!adapter->tso_pool) {
+ kfree(adapter->tx_pool);
+ adapter->tx_pool = NULL;
return -1;
+ }
adapter->num_active_tx_pools = tx_subcrqs;
@@ -846,9 +916,10 @@ static const char *adapter_state_to_string(enum vnic_state state)
return "REMOVING";
case VNIC_REMOVED:
return "REMOVED";
- default:
- return "UNKNOWN";
+ case VNIC_DOWN:
+ return "DOWN";
}
+ return "UNKNOWN";
}
static int ibmvnic_login(struct net_device *netdev)
@@ -1180,6 +1251,11 @@ static int __ibmvnic_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
+ if (prev_state == VNIC_CLOSED) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_schedule(&adapter->napi[i]);
+ }
+
adapter->state = VNIC_OPEN;
return rc;
}
@@ -1502,7 +1578,8 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
/**
* build_hdr_descs_arr - build a header descriptor array
- * @txbuff: tx buffer
+ * @skb: tx socket buffer
+ * @indir_arr: indirect array
* @num_entries: number of descriptors to be sent
* @hdr_field: bit field determining which headers will be sent
*
@@ -1583,7 +1660,8 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
ind_bufp->index = 0;
if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
- __netif_subqueue_stopped(adapter->netdev, queue_num)) {
+ __netif_subqueue_stopped(adapter->netdev, queue_num) &&
+ !test_bit(0, &adapter->resetting)) {
netif_wake_subqueue(adapter->netdev, queue_num);
netdev_dbg(adapter->netdev, "Started queue %d\n",
queue_num);
@@ -1676,7 +1754,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
@@ -1946,9 +2023,10 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
return "TIMEOUT";
case VNIC_RESET_CHANGE_PARAM:
return "CHANGE_PARAM";
- default:
- return "UNKNOWN";
+ case VNIC_RESET_PASSIVE_INIT:
+ return "PASSIVE_INIT";
}
+ return "UNKNOWN";
}
/*
@@ -2085,10 +2163,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- /* If the adapter was in PROBE state prior to the reset,
+ /* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
*/
- if (reset_state == VNIC_PROBED) {
+ if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
rc = 0;
goto out;
}
@@ -2214,10 +2292,10 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
if (rc)
goto out;
- /* If the adapter was in PROBE state prior to the reset,
+ /* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
*/
- if (reset_state == VNIC_PROBED)
+ if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
goto out;
rc = ibmvnic_login(netdev);
@@ -2270,6 +2348,76 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
return rwi;
}
+/**
+ * do_passive_init - complete probing when partner device is detected.
+ * @adapter: ibmvnic_adapter struct
+ *
+ * If the ibmvnic device does not have a partner device to communicate with at boot
+ * and that partner device comes online at a later time, this function is called
+ * to complete the initialization process of ibmvnic device.
+ * Caller is expected to hold rtnl_lock().
+ *
+ * Returns non-zero if sub-CRQs are not initialized properly leaving the device
+ * in the down state.
+ * Returns 0 upon success and the device is in PROBED state.
+ */
+
+static int do_passive_init(struct ibmvnic_adapter *adapter)
+{
+ unsigned long timeout = msecs_to_jiffies(30000);
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ netdev_dbg(netdev, "Partner device found, probing.\n");
+
+ adapter->state = VNIC_PROBING;
+ reinit_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+ adapter->crq.active = true;
+
+ rc = send_crq_init_complete(adapter);
+ if (rc)
+ goto out;
+
+ rc = send_version_xchg(adapter);
+ if (rc)
+ netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
+
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Initialization sequence timed out\n");
+ rc = -ETIMEDOUT;
+ goto out;
+ }
+
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
+ goto init_failed;
+ }
+
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
+ netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
+
+ adapter->state = VNIC_PROBED;
+ netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
+
+ return 0;
+
+init_failed:
+ release_sub_crqs(adapter, 1);
+out:
+ adapter->state = VNIC_DOWN;
+ return rc;
+}
+
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
@@ -2306,7 +2454,13 @@ static void __ibmvnic_reset(struct work_struct *work)
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (adapter->force_reset_recovery) {
+ if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
+ rtnl_lock();
+ rc = do_passive_init(adapter);
+ rtnl_unlock();
+ if (!rc)
+ netif_carrier_on(adapter->netdev);
+ } else if (adapter->force_reset_recovery) {
/* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any
* future MOBILITY or other resets.
@@ -2402,8 +2556,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- list_for_each(entry, &adapter->rwi_list) {
- tmp = list_entry(entry, struct ibmvnic_rwi, list);
+ list_for_each_entry(tmp, &adapter->rwi_list, list) {
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
reset_reason_to_string(reason));
@@ -3140,6 +3293,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
i);
+ ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
if (adapter->tx_scrq[i]->irq) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
@@ -3213,7 +3367,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
- if (rc && rc != H_FUNCTION)
+ if (rc && (rc != H_FUNCTION))
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
}
@@ -3776,18 +3930,6 @@ static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
return 0;
}
-static int send_version_xchg(struct ibmvnic_adapter *adapter)
-{
- union ibmvnic_crq crq;
-
- memset(&crq, 0, sizeof(crq));
- crq.version_exchange.first = IBMVNIC_CRQ_CMD;
- crq.version_exchange.cmd = VERSION_EXCHANGE;
- crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
-
- return ibmvnic_send_crq(adapter, &crq);
-}
-
struct vnic_login_client_data {
u8 type;
__be16 len;
@@ -3820,21 +3962,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
vlcd->type = 1;
len = strlen(os_name) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(vlcd->name, os_name, len);
+ strscpy(vlcd->name, os_name, len);
vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 2 - LPAR name */
vlcd->type = 2;
len = strlen(utsname()->nodename) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(vlcd->name, utsname()->nodename, len);
+ strscpy(vlcd->name, utsname()->nodename, len);
vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 3 - device name */
vlcd->type = 3;
len = strlen(adapter->netdev->name) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(vlcd->name, adapter->netdev->name, len);
+ strscpy(vlcd->name, adapter->netdev->name, len);
}
static int send_login(struct ibmvnic_adapter *adapter)
@@ -4301,7 +4443,7 @@ static void handle_vpd_rsp(union ibmvnic_crq *crq,
complete:
if (adapter->fw_version[0] == '\0')
- strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
+ strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
complete(&adapter->fw_done);
}
@@ -4907,7 +5049,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
- rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+
+ if (adapter->state == VNIC_DOWN)
+ rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
+ else
+ rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+
if (rc && rc != -EBUSY) {
/* We were unable to schedule the failover
* reset either because the adapter was still
@@ -5330,6 +5477,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
+ bool init_success;
int rc;
dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -5376,6 +5524,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
+ init_success = false;
do {
rc = init_crq_queue(adapter);
if (rc) {
@@ -5385,10 +5534,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
rc = ibmvnic_reset_init(adapter, false);
- if (rc && rc != EAGAIN)
- goto ibmvnic_init_fail;
} while (rc == EAGAIN);
+ /* We are ignoring the error from ibmvnic_reset_init() assuming that the
+ * partner is not ready. CRQ is not active. When the partner becomes
+ * ready, we will do the passive init reset.
+ */
+
+ if (!rc)
+ init_success = true;
+
rc = init_stats_buffers(adapter);
if (rc)
goto ibmvnic_init_fail;
@@ -5397,10 +5552,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (rc)
goto ibmvnic_stats_fail;
- netdev->mtu = adapter->req_mtu - ETH_HLEN;
- netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
- netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
-
rc = device_create_file(&dev->dev, &dev_attr_failover);
if (rc)
goto ibmvnic_dev_file_err;
@@ -5413,7 +5564,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
dev_info(&dev->dev, "ibmvnic registered\n");
- adapter->state = VNIC_PROBED;
+ if (init_success) {
+ adapter->state = VNIC_PROBED;
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
+ netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
+ } else {
+ adapter->state = VNIC_DOWN;
+ }
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index c1d39a748546..22df602323bc 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -851,14 +851,16 @@ enum vnic_state {VNIC_PROBING = 1,
VNIC_CLOSING,
VNIC_CLOSED,
VNIC_REMOVING,
- VNIC_REMOVED};
+ VNIC_REMOVED,
+ VNIC_DOWN};
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
VNIC_RESET_FATAL,
VNIC_RESET_NON_FATAL,
VNIC_RESET_TIMEOUT,
- VNIC_RESET_CHANGE_PARAM};
+ VNIC_RESET_CHANGE_PARAM,
+ VNIC_RESET_PASSIVE_INIT};
struct ibmvnic_rwi {
enum ibmvnic_reset_reason reset_reason;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index c1d155690341..82744a7501c7 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -241,6 +241,7 @@ config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
imply PTP_1588_CLOCK
depends on PCI
+ select AUXILIARY_BUS
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
@@ -294,9 +295,11 @@ config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
+ select AUXILIARY_BUS
select DIMLIB
select NET_DEVLINK
select PLDMFW
+ imply PTP_1588_CLOCK
help
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index f8d78af76d7d..1b0958bd24f6 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1395,7 +1395,7 @@ static int e100_phy_check_without_mii(struct nic *nic)
u8 phy_type;
int without_mii;
- phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
+ phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
switch (phy_type) {
case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
@@ -1515,7 +1515,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
- (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
@@ -2269,9 +2269,9 @@ static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
- (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
- !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
- ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
+ (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
+ !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
+ ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
}
static int e100_up(struct nic *nic)
@@ -2926,7 +2926,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Wol magic packet can be enabled from eeprom */
if ((nic->mac >= mac_82558_D101_A4) &&
- (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
+ (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
nic->flags |= wol_magic;
device_set_wakeup_enable(&pdev->dev, true);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index f976e9daa3d8..3c51ee94fa00 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -513,7 +513,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
ret_val = e1000_write_eeprom(hw, first_word,
last_word - first_word + 1, eeprom_buff);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 19cf36360933..1042e79a1397 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -2522,7 +2522,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* turn it on. For compatibility with a TBI link
* partner, we will store bad packets. Some
* frames have an additional byte on the end and
- * will look like CRC errors to to the hardware.
+ * will look like CRC errors to the hardware.
*/
if (!hw->tbi_compatibility_on) {
hw->tbi_compatibility_on = true;
@@ -2723,7 +2723,7 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
* e1000_shift_in_mdi_bits - Shifts data bits in from the PHY
* @hw: Struct containing variables accessed by shared code
*
- * Bits are shifted in in MSB to LSB order.
+ * Bits are shifted in MSB to LSB order.
*/
static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 042de276e632..c2a109126c27 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -5245,7 +5245,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 590ad110d383..cf7b3887da1d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -4639,7 +4639,7 @@ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
* @hw: pointer to the HW structure
*
* ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
- * register, so the the bus width is hard coded.
+ * register, so the bus width is hard coded.
**/
static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 88e9035b75cf..d150dade06cf 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5223,18 +5223,20 @@ static void e1000_watchdog_task(struct work_struct *work)
pm_runtime_resume(netdev->dev.parent);
/* Checking if MAC is in DMoff state*/
- pcim_state = er32(STATUS);
- while (pcim_state & E1000_STATUS_PCIM_STATE) {
- if (tries++ == dmoff_exit_timeout) {
- e_dbg("Error in exiting dmoff\n");
- break;
- }
- usleep_range(10000, 20000);
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
pcim_state = er32(STATUS);
-
- /* Checking if MAC exited DMoff state */
- if (!(pcim_state & E1000_STATUS_PCIM_STATE))
- e1000_phy_hw_reset(&adapter->hw);
+ while (pcim_state & E1000_STATUS_PCIM_STATE) {
+ if (tries++ == dmoff_exit_timeout) {
+ e_dbg("Error in exiting dmoff\n");
+ break;
+ }
+ usleep_range(10000, 20000);
+ pcim_state = er32(STATUS);
+
+ /* Checking if MAC exited DMoff state */
+ if (!(pcim_state & E1000_STATUS_PCIM_STATE))
+ e1000_phy_hw_reset(&adapter->hw);
+ }
}
/* update snapshot of PHY registers on LSC */
@@ -7118,7 +7120,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 1db35b2c7750..0f0efee5fc8e 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2978,7 +2978,7 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
* @data: pointer to the data to be read or written
* @read: determines if operation is read or write
*
- * Reads the PHY register at offset and stores the retreived information
+ * Reads the PHY register at offset and stores the retrieved information
* in data. Assumes semaphore already acquired. Note that the procedure
* to access these regs uses the address port and data port to read/write.
* These accesses done with PHY address 2 and without using pages.
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 9e3103fae723..dbcae92bb18d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1370,7 +1370,6 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
struct fm10k_hw *hw = &interface->hw;
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 eicr;
- s32 err = 0;
/* unmask any set bits related to this interrupt */
eicr = fm10k_read_reg(hw, FM10K_EICR);
@@ -1386,15 +1385,16 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* service mailboxes */
if (fm10k_mbx_trylock(interface)) {
- err = mbx->ops.process(hw, mbx);
+ s32 err = mbx->ops.process(hw, mbx);
+
+ if (err == FM10K_ERR_RESET_REQUESTED)
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
+
/* handle VFLRE events */
fm10k_iov_event(interface);
fm10k_mbx_unlock(interface);
}
- if (err == FM10K_ERR_RESET_REQUESTED)
- set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
-
/* if switch toggled state we should reset GLORTs */
if (eicr & FM10K_EICR_SWITCHNOTREADY) {
/* force link down for at least 4 seconds */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 85d3dd3a3339..b9417dc0007c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -870,6 +870,8 @@ struct i40e_netdev_priv {
struct i40e_vsi *vsi;
};
+extern struct ida i40e_client_ida;
+
/* struct that defines an interrupt vector */
struct i40e_q_vector {
struct i40e_vsi *vsi;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 32f3facbed1a..e07ed065d3a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -12,6 +12,7 @@ static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
static struct i40e_client *registered_client;
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
+DEFINE_IDA(i40e_client_ida);
static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
@@ -275,6 +276,57 @@ void i40e_client_update_msix_info(struct i40e_pf *pf)
cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
}
+static void i40e_auxiliary_dev_release(struct device *dev)
+{
+ struct i40e_auxiliary_device *i40e_aux_dev =
+ container_of(dev, struct i40e_auxiliary_device, aux_dev.dev);
+
+ ida_free(&i40e_client_ida, i40e_aux_dev->aux_dev.id);
+ kfree(i40e_aux_dev);
+}
+
+static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name)
+{
+ struct i40e_auxiliary_device *i40e_aux_dev;
+ struct pci_dev *pdev = ldev->pcidev;
+ struct auxiliary_device *aux_dev;
+ int ret;
+
+ i40e_aux_dev = kzalloc(sizeof(*i40e_aux_dev), GFP_KERNEL);
+ if (!i40e_aux_dev)
+ return -ENOMEM;
+
+ i40e_aux_dev->ldev = ldev;
+
+ aux_dev = &i40e_aux_dev->aux_dev;
+ aux_dev->name = name;
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = i40e_auxiliary_dev_release;
+ ldev->aux_dev = aux_dev;
+
+ ret = ida_alloc(&i40e_client_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(i40e_aux_dev);
+ return ret;
+ }
+ aux_dev->id = ret;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret < 0) {
+ ida_free(&i40e_client_ida, aux_dev->id);
+ kfree(i40e_aux_dev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret) {
+ auxiliary_device_uninit(aux_dev);
+ return ret;
+ }
+
+ return ret;
+}
+
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
@@ -286,9 +338,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- if (!registered_client || pf->cinst)
- return;
-
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return;
@@ -308,11 +357,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
cdev->lan_info.fw_build = pf->hw.aq.fw_build;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
- if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
- kfree(cdev);
- cdev = NULL;
- return;
- }
+ if (i40e_client_get_params(vsi, &cdev->lan_info.params))
+ goto free_cdev;
mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
@@ -324,7 +370,17 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
cdev->client = registered_client;
pf->cinst = cdev;
- i40e_client_update_msix_info(pf);
+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+
+ if (i40e_register_auxiliary_dev(&cdev->lan_info, "iwarp"))
+ goto free_cdev;
+
+ return;
+
+free_cdev:
+ kfree(cdev);
+ pf->cinst = NULL;
}
/**
@@ -345,7 +401,7 @@ void i40e_client_del_instance(struct i40e_pf *pf)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
- struct i40e_client *client = registered_client;
+ struct i40e_client *client;
struct i40e_client_instance *cdev;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int ret = 0;
@@ -359,9 +415,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
- if (!client || !cdev)
+ if (!cdev || !cdev->client)
return;
+ client = cdev->client;
+
/* Here we handle client opens. If the client is down, and
* the netdev is registered, then open the client.
*/
@@ -423,16 +481,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.bus_id,
pf->hw.bus.device, pf->hw.bus.func);
- /* If a client has already been registered, we need to add an instance
- * of it to our new LAN device.
- */
- if (registered_client)
- i40e_client_add_instance(pf);
+ i40e_client_add_instance(pf);
- /* Since in some cases register may have happened before a device gets
- * added, we can schedule a subtask to go initiate the clients if
- * they can be launched at probe time.
- */
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -449,9 +499,13 @@ out:
**/
int i40e_lan_del_device(struct i40e_pf *pf)
{
+ struct auxiliary_device *aux_dev = pf->cinst->lan_info.aux_dev;
struct i40e_device *ldev, *tmp;
int ret = -ENODEV;
+ auxiliary_device_delete(aux_dev);
+ auxiliary_device_uninit(aux_dev);
+
/* First, remove any client instance. */
i40e_client_del_instance(pf);
@@ -579,7 +633,7 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
u32 v_idx, i, reg_idx, reg;
ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info,
- qvlist_info->num_vectors - 1), GFP_KERNEL);
+ qvlist_info->num_vectors), GFP_KERNEL);
if (!ldev->qvlist_info)
return -ENOMEM;
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
@@ -732,6 +786,42 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
return err;
}
+void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client)
+{
+ struct i40e_pf *pf = ldev->pf;
+
+ pf->cinst->client = client;
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+}
+EXPORT_SYMBOL_GPL(i40e_client_device_register);
+
+void i40e_client_device_unregister(struct i40e_info *ldev)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev)
+ return;
+
+ while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
+ usleep_range(500, 1000);
+
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, false);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ }
+
+ pf->cinst->client = NULL;
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+}
+EXPORT_SYMBOL_GPL(i40e_client_device_unregister);
+
+/* Retain these legacy global registration/unregistration calls till i40iw is
+ * removed from the kernel. The irdma unified driver does not use these
+ * exported symbols.
+ */
/**
* i40e_register_client - Register a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 67cb0b47416a..b4d3fed0d2f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -552,9 +552,9 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
* ENDIF
*/
-/* macro to make the table lines short */
+/* macro to make the table lines short, use explicit indexing with [PTYPE] */
#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- { PTYPE, \
+ [PTYPE] = { \
1, \
I40E_RX_PTYPE_OUTER_##OUTER_IP, \
I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
@@ -565,16 +565,15 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
I40E_RX_PTYPE_INNER_PROT_##I, \
I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
-#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
- { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
/* shorter macros makes the table fit but are terse */
#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
-/* Lookup table mapping the HW PTYPE to the bit field for decoding */
-struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
/* L2 Packet types */
I40E_PTT_UNUSED_ENTRY(0),
I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
@@ -780,118 +779,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* unused entries */
- I40E_PTT_UNUSED_ENTRY(154),
- I40E_PTT_UNUSED_ENTRY(155),
- I40E_PTT_UNUSED_ENTRY(156),
- I40E_PTT_UNUSED_ENTRY(157),
- I40E_PTT_UNUSED_ENTRY(158),
- I40E_PTT_UNUSED_ENTRY(159),
-
- I40E_PTT_UNUSED_ENTRY(160),
- I40E_PTT_UNUSED_ENTRY(161),
- I40E_PTT_UNUSED_ENTRY(162),
- I40E_PTT_UNUSED_ENTRY(163),
- I40E_PTT_UNUSED_ENTRY(164),
- I40E_PTT_UNUSED_ENTRY(165),
- I40E_PTT_UNUSED_ENTRY(166),
- I40E_PTT_UNUSED_ENTRY(167),
- I40E_PTT_UNUSED_ENTRY(168),
- I40E_PTT_UNUSED_ENTRY(169),
-
- I40E_PTT_UNUSED_ENTRY(170),
- I40E_PTT_UNUSED_ENTRY(171),
- I40E_PTT_UNUSED_ENTRY(172),
- I40E_PTT_UNUSED_ENTRY(173),
- I40E_PTT_UNUSED_ENTRY(174),
- I40E_PTT_UNUSED_ENTRY(175),
- I40E_PTT_UNUSED_ENTRY(176),
- I40E_PTT_UNUSED_ENTRY(177),
- I40E_PTT_UNUSED_ENTRY(178),
- I40E_PTT_UNUSED_ENTRY(179),
-
- I40E_PTT_UNUSED_ENTRY(180),
- I40E_PTT_UNUSED_ENTRY(181),
- I40E_PTT_UNUSED_ENTRY(182),
- I40E_PTT_UNUSED_ENTRY(183),
- I40E_PTT_UNUSED_ENTRY(184),
- I40E_PTT_UNUSED_ENTRY(185),
- I40E_PTT_UNUSED_ENTRY(186),
- I40E_PTT_UNUSED_ENTRY(187),
- I40E_PTT_UNUSED_ENTRY(188),
- I40E_PTT_UNUSED_ENTRY(189),
-
- I40E_PTT_UNUSED_ENTRY(190),
- I40E_PTT_UNUSED_ENTRY(191),
- I40E_PTT_UNUSED_ENTRY(192),
- I40E_PTT_UNUSED_ENTRY(193),
- I40E_PTT_UNUSED_ENTRY(194),
- I40E_PTT_UNUSED_ENTRY(195),
- I40E_PTT_UNUSED_ENTRY(196),
- I40E_PTT_UNUSED_ENTRY(197),
- I40E_PTT_UNUSED_ENTRY(198),
- I40E_PTT_UNUSED_ENTRY(199),
-
- I40E_PTT_UNUSED_ENTRY(200),
- I40E_PTT_UNUSED_ENTRY(201),
- I40E_PTT_UNUSED_ENTRY(202),
- I40E_PTT_UNUSED_ENTRY(203),
- I40E_PTT_UNUSED_ENTRY(204),
- I40E_PTT_UNUSED_ENTRY(205),
- I40E_PTT_UNUSED_ENTRY(206),
- I40E_PTT_UNUSED_ENTRY(207),
- I40E_PTT_UNUSED_ENTRY(208),
- I40E_PTT_UNUSED_ENTRY(209),
-
- I40E_PTT_UNUSED_ENTRY(210),
- I40E_PTT_UNUSED_ENTRY(211),
- I40E_PTT_UNUSED_ENTRY(212),
- I40E_PTT_UNUSED_ENTRY(213),
- I40E_PTT_UNUSED_ENTRY(214),
- I40E_PTT_UNUSED_ENTRY(215),
- I40E_PTT_UNUSED_ENTRY(216),
- I40E_PTT_UNUSED_ENTRY(217),
- I40E_PTT_UNUSED_ENTRY(218),
- I40E_PTT_UNUSED_ENTRY(219),
-
- I40E_PTT_UNUSED_ENTRY(220),
- I40E_PTT_UNUSED_ENTRY(221),
- I40E_PTT_UNUSED_ENTRY(222),
- I40E_PTT_UNUSED_ENTRY(223),
- I40E_PTT_UNUSED_ENTRY(224),
- I40E_PTT_UNUSED_ENTRY(225),
- I40E_PTT_UNUSED_ENTRY(226),
- I40E_PTT_UNUSED_ENTRY(227),
- I40E_PTT_UNUSED_ENTRY(228),
- I40E_PTT_UNUSED_ENTRY(229),
-
- I40E_PTT_UNUSED_ENTRY(230),
- I40E_PTT_UNUSED_ENTRY(231),
- I40E_PTT_UNUSED_ENTRY(232),
- I40E_PTT_UNUSED_ENTRY(233),
- I40E_PTT_UNUSED_ENTRY(234),
- I40E_PTT_UNUSED_ENTRY(235),
- I40E_PTT_UNUSED_ENTRY(236),
- I40E_PTT_UNUSED_ENTRY(237),
- I40E_PTT_UNUSED_ENTRY(238),
- I40E_PTT_UNUSED_ENTRY(239),
-
- I40E_PTT_UNUSED_ENTRY(240),
- I40E_PTT_UNUSED_ENTRY(241),
- I40E_PTT_UNUSED_ENTRY(242),
- I40E_PTT_UNUSED_ENTRY(243),
- I40E_PTT_UNUSED_ENTRY(244),
- I40E_PTT_UNUSED_ENTRY(245),
- I40E_PTT_UNUSED_ENTRY(246),
- I40E_PTT_UNUSED_ENTRY(247),
- I40E_PTT_UNUSED_ENTRY(248),
- I40E_PTT_UNUSED_ENTRY(249),
-
- I40E_PTT_UNUSED_ENTRY(250),
- I40E_PTT_UNUSED_ENTRY(251),
- I40E_PTT_UNUSED_ENTRY(252),
- I40E_PTT_UNUSED_ENTRY(253),
- I40E_PTT_UNUSED_ENTRY(254),
- I40E_PTT_UNUSED_ENTRY(255)
+ [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index ccd5b9486ea9..3e822bad4851 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1262,8 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
if (ethtool_link_ksettings_test_link_mode(&safe_ks,
supported,
Autoneg) &&
- hw->phy.link_info.phy_type !=
- I40E_PHY_TYPE_10GBASE_T) {
+ hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
err = -EINVAL;
goto done;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 704e474879c5..861e59a350bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -32,7 +32,7 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
@@ -8703,6 +8703,8 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
dev_driver_string(&pf->pdev->dev),
dev_name(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
} else {
err = -EINVAL;
@@ -10569,7 +10571,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
#endif /* CONFIG_I40E_DCB */
if (!lock_acquired)
rtnl_lock();
- ret = i40e_setup_pf_switch(pf, reinit);
+ ret = i40e_setup_pf_switch(pf, reinit, true);
if (ret)
goto end_unlock;
@@ -14627,10 +14629,11 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
* @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
u16 flags = 0;
int ret;
@@ -14732,9 +14735,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
+ if (!lock_acquired)
+ rtnl_lock();
+
/* repopulate tunnel port filters */
udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
+ if (!lock_acquired)
+ rtnl_unlock();
+
return ret;
}
@@ -15528,7 +15537,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
}
#endif
- err = i40e_setup_pf_switch(pf, false);
+ err = i40e_setup_pf_switch(pf, false, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
@@ -16270,6 +16279,7 @@ static void __exit i40e_exit_module(void)
{
pci_unregister_driver(&i40e_driver);
destroy_workqueue(i40e_wq);
+ ida_destroy(&i40e_client_ida);
i40e_dbg_exit();
}
module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index f1f6fc3744e9..7b971b205d36 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -11,13 +11,14 @@
* operate with the nanosecond field directly without fear of overflow.
*
* Much like the 82599, the update period is dependent upon the link speed:
- * At 40Gb link or no link, the period is 1.6ns.
- * At 10Gb link, the period is multiplied by 2. (3.2ns)
+ * At 40Gb, 25Gb, or no link, the period is 1.6ns.
+ * At 10Gb or 5Gb link, the period is multiplied by 2. (3.2ns)
* At 1Gb link, the period is multiplied by 20. (32ns)
* 1588 functionality is not supported at 100Mbps.
*/
#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
#define I40E_PTP_10GB_INCVAL_MULT 2
+#define I40E_PTP_5GB_INCVAL_MULT 2
#define I40E_PTP_1GB_INCVAL_MULT 20
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
@@ -465,6 +466,9 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
case I40E_LINK_SPEED_10GB:
mult = I40E_PTP_10GB_INCVAL_MULT;
break;
+ case I40E_LINK_SPEED_5GB:
+ mult = I40E_PTP_5GB_INCVAL_MULT;
+ break;
case I40E_LINK_SPEED_1GB:
mult = I40E_PTP_1GB_INCVAL_MULT;
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index de70c16ef619..38eb8151ee9a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2298,7 +2298,6 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
struct bpf_prog *xdp_prog;
u32 act;
- rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
@@ -2313,15 +2312,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = I40E_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
@@ -2329,7 +2333,6 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
break;
}
xdp_out:
- rcu_read_unlock();
return result;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index c81109a63e90..36a4ca1ffb1a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -804,7 +804,6 @@ enum i40e_rx_l2_ptype {
};
struct i40e_rx_ptype_decoded {
- u32 ptype:8;
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:1;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 46d884417c63..e7e778ca074c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -153,7 +153,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
struct bpf_prog *xdp_prog;
u32 act;
- rcu_read_lock();
/* NB! xdp_prog will always be !NULL, due to the fact that
* this path is enabled by setting an XDP program.
*/
@@ -162,9 +161,9 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
- rcu_read_unlock();
- return result;
+ if (err)
+ goto out_failure;
+ return I40E_XDP_REDIR;
}
switch (act) {
@@ -173,18 +172,20 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = I40E_XDP_CONSUMED;
break;
}
- rcu_read_unlock();
return result;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 8547fc8fdfd6..e9cc7f6ddc46 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -522,9 +522,9 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
* ENDIF
*/
-/* macro to make the table lines short */
+/* macro to make the table lines short, use explicit indexing with [PTYPE] */
#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- { PTYPE, \
+ [PTYPE] = { \
1, \
IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
@@ -535,16 +535,15 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
IAVF_RX_PTYPE_INNER_PROT_##I, \
IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
-#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \
- { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
/* shorter macros makes the table fit but are terse */
#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
-/* Lookup table mapping the HW PTYPE to the bit field for decoding */
-struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
+/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */
+struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
/* L2 Packet types */
IAVF_PTT_UNUSED_ENTRY(0),
IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
@@ -750,118 +749,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* unused entries */
- IAVF_PTT_UNUSED_ENTRY(154),
- IAVF_PTT_UNUSED_ENTRY(155),
- IAVF_PTT_UNUSED_ENTRY(156),
- IAVF_PTT_UNUSED_ENTRY(157),
- IAVF_PTT_UNUSED_ENTRY(158),
- IAVF_PTT_UNUSED_ENTRY(159),
-
- IAVF_PTT_UNUSED_ENTRY(160),
- IAVF_PTT_UNUSED_ENTRY(161),
- IAVF_PTT_UNUSED_ENTRY(162),
- IAVF_PTT_UNUSED_ENTRY(163),
- IAVF_PTT_UNUSED_ENTRY(164),
- IAVF_PTT_UNUSED_ENTRY(165),
- IAVF_PTT_UNUSED_ENTRY(166),
- IAVF_PTT_UNUSED_ENTRY(167),
- IAVF_PTT_UNUSED_ENTRY(168),
- IAVF_PTT_UNUSED_ENTRY(169),
-
- IAVF_PTT_UNUSED_ENTRY(170),
- IAVF_PTT_UNUSED_ENTRY(171),
- IAVF_PTT_UNUSED_ENTRY(172),
- IAVF_PTT_UNUSED_ENTRY(173),
- IAVF_PTT_UNUSED_ENTRY(174),
- IAVF_PTT_UNUSED_ENTRY(175),
- IAVF_PTT_UNUSED_ENTRY(176),
- IAVF_PTT_UNUSED_ENTRY(177),
- IAVF_PTT_UNUSED_ENTRY(178),
- IAVF_PTT_UNUSED_ENTRY(179),
-
- IAVF_PTT_UNUSED_ENTRY(180),
- IAVF_PTT_UNUSED_ENTRY(181),
- IAVF_PTT_UNUSED_ENTRY(182),
- IAVF_PTT_UNUSED_ENTRY(183),
- IAVF_PTT_UNUSED_ENTRY(184),
- IAVF_PTT_UNUSED_ENTRY(185),
- IAVF_PTT_UNUSED_ENTRY(186),
- IAVF_PTT_UNUSED_ENTRY(187),
- IAVF_PTT_UNUSED_ENTRY(188),
- IAVF_PTT_UNUSED_ENTRY(189),
-
- IAVF_PTT_UNUSED_ENTRY(190),
- IAVF_PTT_UNUSED_ENTRY(191),
- IAVF_PTT_UNUSED_ENTRY(192),
- IAVF_PTT_UNUSED_ENTRY(193),
- IAVF_PTT_UNUSED_ENTRY(194),
- IAVF_PTT_UNUSED_ENTRY(195),
- IAVF_PTT_UNUSED_ENTRY(196),
- IAVF_PTT_UNUSED_ENTRY(197),
- IAVF_PTT_UNUSED_ENTRY(198),
- IAVF_PTT_UNUSED_ENTRY(199),
-
- IAVF_PTT_UNUSED_ENTRY(200),
- IAVF_PTT_UNUSED_ENTRY(201),
- IAVF_PTT_UNUSED_ENTRY(202),
- IAVF_PTT_UNUSED_ENTRY(203),
- IAVF_PTT_UNUSED_ENTRY(204),
- IAVF_PTT_UNUSED_ENTRY(205),
- IAVF_PTT_UNUSED_ENTRY(206),
- IAVF_PTT_UNUSED_ENTRY(207),
- IAVF_PTT_UNUSED_ENTRY(208),
- IAVF_PTT_UNUSED_ENTRY(209),
-
- IAVF_PTT_UNUSED_ENTRY(210),
- IAVF_PTT_UNUSED_ENTRY(211),
- IAVF_PTT_UNUSED_ENTRY(212),
- IAVF_PTT_UNUSED_ENTRY(213),
- IAVF_PTT_UNUSED_ENTRY(214),
- IAVF_PTT_UNUSED_ENTRY(215),
- IAVF_PTT_UNUSED_ENTRY(216),
- IAVF_PTT_UNUSED_ENTRY(217),
- IAVF_PTT_UNUSED_ENTRY(218),
- IAVF_PTT_UNUSED_ENTRY(219),
-
- IAVF_PTT_UNUSED_ENTRY(220),
- IAVF_PTT_UNUSED_ENTRY(221),
- IAVF_PTT_UNUSED_ENTRY(222),
- IAVF_PTT_UNUSED_ENTRY(223),
- IAVF_PTT_UNUSED_ENTRY(224),
- IAVF_PTT_UNUSED_ENTRY(225),
- IAVF_PTT_UNUSED_ENTRY(226),
- IAVF_PTT_UNUSED_ENTRY(227),
- IAVF_PTT_UNUSED_ENTRY(228),
- IAVF_PTT_UNUSED_ENTRY(229),
-
- IAVF_PTT_UNUSED_ENTRY(230),
- IAVF_PTT_UNUSED_ENTRY(231),
- IAVF_PTT_UNUSED_ENTRY(232),
- IAVF_PTT_UNUSED_ENTRY(233),
- IAVF_PTT_UNUSED_ENTRY(234),
- IAVF_PTT_UNUSED_ENTRY(235),
- IAVF_PTT_UNUSED_ENTRY(236),
- IAVF_PTT_UNUSED_ENTRY(237),
- IAVF_PTT_UNUSED_ENTRY(238),
- IAVF_PTT_UNUSED_ENTRY(239),
-
- IAVF_PTT_UNUSED_ENTRY(240),
- IAVF_PTT_UNUSED_ENTRY(241),
- IAVF_PTT_UNUSED_ENTRY(242),
- IAVF_PTT_UNUSED_ENTRY(243),
- IAVF_PTT_UNUSED_ENTRY(244),
- IAVF_PTT_UNUSED_ENTRY(245),
- IAVF_PTT_UNUSED_ENTRY(246),
- IAVF_PTT_UNUSED_ENTRY(247),
- IAVF_PTT_UNUSED_ENTRY(248),
- IAVF_PTT_UNUSED_ENTRY(249),
-
- IAVF_PTT_UNUSED_ENTRY(250),
- IAVF_PTT_UNUSED_ENTRY(251),
- IAVF_PTT_UNUSED_ENTRY(252),
- IAVF_PTT_UNUSED_ENTRY(253),
- IAVF_PTT_UNUSED_ENTRY(254),
- IAVF_PTT_UNUSED_ENTRY(255)
+ [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index de9fda78b43a..9f1f523807c4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -370,7 +370,6 @@ enum iavf_rx_l2_ptype {
};
struct iavf_rx_ptype_decoded {
- u32 ptype:8;
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:1;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 07fe857e9e3a..4f538cdf42c1 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -22,12 +22,14 @@ ice-y := ice_main.o \
ice_ethtool_fdir.o \
ice_flex_pipe.o \
ice_flow.o \
+ ice_idc.o \
ice_devlink.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index e35db3ff583b..a450343fbb92 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,6 +34,7 @@
#include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/bpf.h>
+#include <linux/auxiliary_bus.h>
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
@@ -55,8 +56,10 @@
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
+#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_ptp.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
@@ -72,12 +75,15 @@
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
-#define ICE_AQ_LEN 64
+#define ICE_AQ_LEN 192
#define ICE_MBXSQ_LEN 64
+#define ICE_SBQ_LEN 64
#define ICE_MIN_LAN_TXRX_MSIX 1
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
+#define ICE_RDMA_NUM_AEQ_MSIX 4
+#define ICE_MIN_RDMA_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -88,8 +94,9 @@
#define ICE_MAX_LG_RSS_QS 256
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
+#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
-#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
+#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
@@ -203,9 +210,9 @@ enum ice_pf_state {
ICE_NEEDS_RESTART,
ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
- ICE_PFR_REQ, /* set by driver and peers */
- ICE_CORER_REQ, /* set by driver and peers */
- ICE_GLOBR_REQ, /* set by driver and peers */
+ ICE_PFR_REQ, /* set by driver */
+ ICE_CORER_REQ, /* set by driver */
+ ICE_GLOBR_REQ, /* set by driver */
ICE_CORER_RECV, /* set by OICR handler */
ICE_GLOBR_RECV, /* set by OICR handler */
ICE_EMPR_RECV, /* set by OICR handler */
@@ -222,6 +229,7 @@ enum ice_pf_state {
ICE_STATE_NOMINAL_CHECK_BITS,
ICE_ADMINQ_EVENT_PENDING,
ICE_MAILBOXQ_EVENT_PENDING,
+ ICE_SIDEBANDQ_EVENT_PENDING,
ICE_MDD_EVENT_PENDING,
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
@@ -332,9 +340,11 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
+ u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_ring **xdp_rings; /* XDP ring array */
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -373,17 +383,22 @@ struct ice_q_vector {
enum ice_pf_flags {
ICE_FLAG_FLTR_SYNC,
+ ICE_FLAG_RDMA_ENA,
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
+ ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
+ ICE_FLAG_PTP, /* PTP is enabled by software */
+ ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
+ ICE_FLAG_MOD_POWER_UNSUPPORTED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
@@ -439,12 +454,17 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
+ struct ice_ptp ptp;
+ u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
+ u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
struct hlist_head aq_wait_list;
wait_queue_head_t aq_wait_queue;
+ wait_queue_head_t reset_wait_queue;
+
u32 hw_csum_rx_error;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
@@ -471,6 +491,8 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
+ struct auxiliary_device *adev;
+ int aux_idx;
u32 sw_int_count;
__le64 nvm_phy_type_lo; /* NVM PHY type low */
@@ -547,15 +569,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
+ struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
- qid -= ring->vsi->num_xdp_txq;
+ qid -= vsi->num_xdp_txq;
- if (!ice_is_xdp_ena_vsi(ring->vsi))
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
- return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
@@ -636,6 +659,9 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+int ice_plug_aux_dev(struct ice_pf *pf);
+void ice_unplug_aux_dev(struct ice_pf *pf);
+int ice_init_rdma(struct ice_pf *pf);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
@@ -660,4 +686,25 @@ int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+/**
+ * ice_set_rdma_cap - enable RDMA support
+ * @pf: PF struct
+ */
+static inline void ice_set_rdma_cap(struct ice_pf *pf)
+{
+ if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
+ set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ ice_plug_aux_dev(pf);
+ }
+}
+
+/**
+ * ice_clear_rdma_cap - disable RDMA support
+ * @pf: PF struct
+ */
+static inline void ice_clear_rdma_cap(struct ice_pf *pf)
+{
+ ice_unplug_aux_dev(pf);
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5cdfe406af84..21b4c7cd6f05 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -108,6 +108,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_TXQS 0x0042
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_FD 0x0045
+#define ICE_AQC_CAPS_1588 0x0046
#define ICE_AQC_CAPS_MAX_MTU 0x0047
#define ICE_AQC_CAPS_NVM_VER 0x0048
#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
@@ -115,6 +116,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
+#define ICE_AQC_CAPS_RDMA 0x0051
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
@@ -1122,7 +1124,9 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5)
#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
- u8 reserved1;
+ u8 link_cfg_err;
+#define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
#define ICE_AQ_LINK_FAULT BIT(1)
@@ -1165,7 +1169,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M
/* External Device Power Ability */
u8 power_desc;
-#define ICE_AQ_PWR_CLASS_M 0x3
+#define ICE_AQ_PWR_CLASS_M 0x3F
#define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0
#define ICE_AQ_LINK_PWR_BASET_HIGH 1
#define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0
@@ -1608,6 +1612,15 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
+/* Sideband Control Interface Commands */
+/* Neighbor Device Request (indirect 0x0C00); also used for the response. */
+struct ice_aqc_neigh_dev_req {
+ __le16 sb_data_len;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
@@ -1684,6 +1697,36 @@ struct ice_aqc_dis_txq_item {
__le16 q_id[];
} __packed;
+/* Add Tx RDMA Queue Set (indirect 0x0C33) */
+struct ice_aqc_add_rdma_qset {
+ u8 num_qset_grps;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each Qset entry for the Add Tx RDMA Queue Set
+ * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset.
+ */
+struct ice_aqc_add_tx_rdma_qset_entry {
+ __le16 tx_qset_id;
+ u8 rsvd[2];
+ __le32 qset_teid;
+ struct ice_aqc_txsched_elem info;
+};
+
+/* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_add_rdma_qset is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_add_rdma_qset_data {
+ __le32 parent_teid;
+ __le16 num_qsets;
+ u8 rsvd[2];
+ struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
+};
+
/* Configure Firmware Logging Command (indirect 0xFF09)
* Logging Information Read Response (indirect 0xFF10)
* Note: The 0xFF10 command has no input parameters.
@@ -1810,6 +1853,30 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+/* Driver Shared Parameters (direct, 0x0C90) */
+struct ice_aqc_driver_shared_params {
+ u8 set_or_get_op;
+#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0)
+#define ICE_AQC_DRIVER_PARAM_SET 0
+#define ICE_AQC_DRIVER_PARAM_GET 1
+ u8 param_indx;
+#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15
+ u8 rsvd[2];
+ __le32 param_val;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_driver_params {
+ /* OS clock index for PTP timer Domain 0 */
+ ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0,
+ /* OS clock index for PTP timer Domain 1 */
+ ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1,
+
+ /* Add new parameters above */
+ ICE_AQC_DRIVER_PARAM_MAX = 16,
+};
+
/* Lan Queue Overflow Event (direct, 0x1001) */
struct ice_aqc_event_lan_overflow {
__le32 prtdcb_ruptq;
@@ -1878,13 +1945,16 @@ struct ice_aq_desc {
struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
+ struct ice_aqc_neigh_dev_req neigh_dev;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
+ struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
+ struct ice_aqc_driver_shared_params drv_shared_params;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_mac_cfg set_mac_cfg;
@@ -2025,15 +2095,21 @@ enum ice_adminq_opc {
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
+ /* Sideband Control Interface commands */
+ ice_aqc_opc_neighbour_device_request = 0x0C00,
+
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
+ ice_aqc_opc_add_rdma_qset = 0x0C33,
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
+ ice_aqc_opc_driver_shared_params = 0x0C90,
+
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index f39cd16403ed..80ed76f0cace 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -52,12 +52,12 @@ bool
ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
-#define ice_sync_arfs_fltrs(pf) do {} while (0)
-#define ice_init_arfs(vsi) do {} while (0)
-#define ice_clear_arfs(vsi) do {} while (0)
-#define ice_remove_arfs(pf) do {} while (0)
-#define ice_free_cpu_rx_rmap(vsi) do {} while (0)
-#define ice_rebuild_arfs(pf) do {} while (0)
+static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
+static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
+static inline void ice_init_arfs(struct ice_vsi *vsi) { }
+static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
+static inline void ice_remove_arfs(struct ice_pf *pf) { }
+static inline void ice_rebuild_arfs(struct ice_pf *pf) { }
static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 5985a7e5ca8a..c36057efc7ae 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -287,6 +287,15 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* make sure the context is associated with the right VSI */
tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+ /* Restrict Tx timestamps to the PF VSI */
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ tlan_ctx->tsyn_ena = 1;
+ break;
+ default:
+ break;
+ }
+
tlan_ctx->tso_ena = ICE_TX_LEGACY;
tlan_ctx->tso_qnum = pf_q;
@@ -319,11 +328,9 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
*
* Configure the Rx descriptor ring in RLAN context.
*/
-int ice_setup_rx_ctx(struct ice_ring *ring)
+static int ice_setup_rx_ctx(struct ice_ring *ring)
{
- struct device *dev = ice_pf_to_dev(ring->vsi->back);
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
- u16 num_bufs = ICE_DESC_UNUSED(ring);
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
@@ -339,48 +346,6 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx));
- ring->rx_buf_len = vsi->rx_buf_len;
-
- if (ring->vsi->type == ICE_VSI_PF) {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
-
- ring->xsk_pool = ice_xsk_pool(ring);
- if (ring->xsk_pool) {
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
-
- ring->rx_buf_len =
- xsk_pool_get_rx_frame_size(ring->xsk_pool);
- /* For AF_XDP ZC, we disallow packets to span on
- * multiple buffers, thus letting us skip that
- * handling in the fast-path.
- */
- chain_len = 1;
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_XSK_BUFF_POOL,
- NULL);
- if (err)
- return err;
- xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
-
- dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
- ring->q_index);
- } else {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
-
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (err)
- return err;
- }
- }
/* Receive Queue Base Address.
* Indicates the starting address of the descriptor queue defined in
* 128 Byte units.
@@ -415,6 +380,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
*/
rlan_ctx.showiv = 0;
+ /* For AF_XDP ZC, we disallow packets to span on
+ * multiple buffers, thus letting us skip that
+ * handling in the fast-path.
+ */
+ if (ring->xsk_pool)
+ chain_len = 1;
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
@@ -431,14 +402,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
* of same priority
*/
if (vsi->type != ICE_VSI_VF)
- ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
+ ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
+ false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
@@ -458,6 +430,66 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
+ return 0;
+}
+
+/**
+ * ice_vsi_cfg_rxq - Configure an Rx queue
+ * @ring: the ring being configured
+ *
+ * Return 0 on success and a negative value on error.
+ */
+int ice_vsi_cfg_rxq(struct ice_ring *ring)
+{
+ struct device *dev = ice_pf_to_dev(ring->vsi->back);
+ u16 num_bufs = ICE_DESC_UNUSED(ring);
+ int err;
+
+ ring->rx_buf_len = ring->vsi->rx_buf_len;
+
+ if (ring->vsi->type == ICE_VSI_PF) {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
+ xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index, ring->q_vector->napi.napi_id);
+
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (err)
+ return err;
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+
+ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->q_index);
+ } else {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
+ xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index, ring->q_vector->napi.napi_id);
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
+ }
+ }
+
+ err = ice_setup_rx_ctx(ring);
+ if (err) {
+ dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ ring->q_index, err);
+ return err;
+ }
+
if (ring->xsk_pool) {
bool ok;
@@ -470,9 +502,13 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
}
ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
- if (!ok)
+ if (!ok) {
+ u16 pf_q = ring->vsi->rxq_map[ring->q_index];
+
dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 44efdb627043..20e1c29aa68a 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,7 @@
#include "ice.h"
-int ice_setup_rx_ctx(struct ice_ring *ring);
+int ice_vsi_cfg_rxq(struct ice_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index e93b1e40f627..2fb81e359cdf 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
+#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
@@ -58,6 +59,17 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_is_e810
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E810 based, false if not.
+ */
+bool ice_is_e810(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_E810;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -424,6 +436,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
li->an_info = link_data.an_info;
li->ext_info = link_data.ext_info;
li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
@@ -454,6 +467,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
(unsigned long long)li->phy_type_high);
ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
@@ -1062,7 +1076,8 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
GLNVM_ULD_POR_DONE_1_M |\
GLNVM_ULD_PCIER_DONE_2_M)
- uld_mask = ICE_RESET_DONE_MASK;
+ uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
+ GLNVM_ULD_PE_DONE_M : 0);
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
@@ -1289,6 +1304,64 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
{ 0 }
};
+/* Sideband Queue command wrappers */
+
+/**
+ * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static int
+ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc,
+ buf, buf_size, cd));
+}
+
+/**
+ * ice_sbq_rw_reg - Fill Sideband Queue command
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ */
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+{
+ struct ice_sbq_cmd_desc desc = {0};
+ struct ice_sbq_msg_req msg = {0};
+ u16 msg_len;
+ int status;
+
+ msg_len = sizeof(msg);
+
+ msg.dest_dev = in->dest_dev;
+ msg.opcode = in->opcode;
+ msg.flags = ICE_SBQ_MSG_FLAGS;
+ msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
+ msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
+ msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
+
+ if (in->opcode)
+ msg.data = cpu_to_le32(in->data);
+ else
+ /* data read comes back in completion, so shorten the struct by
+ * sizeof(msg.data)
+ */
+ msg_len -= sizeof(msg.data);
+
+ desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
+ desc.param0.cmd_len = cpu_to_le16(msg_len);
+ status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
+ if (!status && !in->opcode)
+ in->data = le32_to_cpu
+ (((struct ice_sbq_msg_cmpl *)&msg)->data);
+ return status;
+}
+
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
@@ -1938,6 +2011,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
break;
+ case ICE_AQC_CAPS_RDMA:
+ caps->rdma = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
+ break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
@@ -1971,6 +2048,16 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
caps->maxtc = 4;
ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
caps->maxtc);
+ if (caps->rdma) {
+ ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
+ caps->rdma = 0;
+ }
+
+ /* print message only when processing device capabilities
+ * during initialization.
+ */
+ if (caps == &hw->dev_caps.common_cap)
+ dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
}
}
@@ -2017,6 +2104,48 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
}
/**
+ * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_1588.
+ */
+static void
+ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ struct ice_ts_func_info *info = &func_p->ts_func_info;
+ u32 number = le32_to_cpu(cap->number);
+
+ info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
+ func_p->common_cap.ieee_1588 = info->ena;
+
+ info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
+ info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
+ info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
+ info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
+
+ info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+ info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+
+ ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
+ func_p->common_cap.ieee_1588);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
+ info->src_tmr_owned);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
+ info->tmr_ena);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
+ info->tmr_index_owned);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
+ info->tmr_index_assoc);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
+ info->clk_freq);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
+ info->clk_src);
+}
+
+/**
* ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
* @hw: pointer to the HW struct
* @func_p: pointer to function capabilities structure
@@ -2082,6 +2211,9 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_1588:
+ ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
+ break;
case ICE_AQC_CAPS_FD:
ice_parse_fdir_func_caps(hw, func_p);
break;
@@ -2155,6 +2287,57 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_1588 for device capabilities.
+ */
+static void
+ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
+ u32 logical_id = le32_to_cpu(cap->logical_id);
+ u32 phys_id = le32_to_cpu(cap->phys_id);
+ u32 number = le32_to_cpu(cap->number);
+
+ info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
+ dev_p->common_cap.ieee_1588 = info->ena;
+
+ info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
+ info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
+ info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
+
+ info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
+ info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
+ info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+
+ info->ena_ports = logical_id;
+ info->tmr_own_map = phys_id;
+
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
+ dev_p->common_cap.ieee_1588);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
+ info->tmr0_owner);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
+ info->tmr0_owned);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
+ info->tmr0_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
+ info->tmr1_owner);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
+ info->tmr1_owned);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
+ info->tmr1_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
+ info->ena_ports);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
+ info->tmr_own_map);
+}
+
+/**
* ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2215,6 +2398,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_1588:
+ ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
case ICE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
@@ -3635,6 +3821,52 @@ do_aq:
return status;
}
+/**
+ * ice_aq_add_rdma_qsets
+ * @hw: pointer to the hardware structure
+ * @num_qset_grps: Number of RDMA Qset groups
+ * @qset_list: list of Qset groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add Tx RDMA Qsets (0x0C33)
+ */
+static int
+ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
+ struct ice_aqc_add_rdma_qset_data *qset_list,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_rdma_qset_data *list;
+ struct ice_aqc_add_rdma_qset *cmd;
+ struct ice_aq_desc desc;
+ u16 i, sum_size = 0;
+
+ cmd = &desc.params.add_rdma_qset;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
+
+ if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
+ return -EINVAL;
+
+ for (i = 0, list = qset_list; i < num_qset_grps; i++) {
+ u16 num_qsets = le16_to_cpu(list->num_qsets);
+
+ sum_size += struct_size(list, rdma_qsets, num_qsets);
+ list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
+ num_qsets);
+ }
+
+ if (buf_size != sum_size)
+ return -EINVAL;
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_qset_grps = num_qset_grps;
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
+ buf_size, cd));
+}
+
/* End of FW Admin Queue command wrappers */
/**
@@ -4133,6 +4365,162 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
}
/**
+ * ice_cfg_vsi_rdma - configure the VSI RDMA queues
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap
+ * @max_rdmaqs: max RDMA queues array per TC
+ *
+ * This function adds/updates the VSI RDMA queues per TC.
+ */
+int
+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_rdmaqs)
+{
+ return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
+ max_rdmaqs,
+ ICE_SCHED_NODE_OWNER_RDMA));
+}
+
+/**
+ * ice_ena_vsi_rdma_qset
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @rdma_qset: pointer to RDMA Qset
+ * @num_qsets: number of RDMA Qsets
+ * @qset_teid: pointer to Qset node TEIDs
+ *
+ * This function adds RDMA Qset
+ */
+int
+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
+{
+ struct ice_aqc_txsched_elem_data node = { 0 };
+ struct ice_aqc_add_rdma_qset_data *buf;
+ struct ice_sched_node *parent;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u16 i, buf_size;
+ int ret;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return -EIO;
+ hw = pi->hw;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ buf_size = struct_size(buf, rdma_qsets, num_qsets);
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ mutex_lock(&pi->sched_lock);
+
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
+ ICE_SCHED_NODE_OWNER_RDMA);
+ if (!parent) {
+ ret = -EINVAL;
+ goto rdma_error_exit;
+ }
+ buf->parent_teid = parent->info.node_teid;
+ node.parent_teid = parent->info.node_teid;
+
+ buf->num_qsets = cpu_to_le16(num_qsets);
+ for (i = 0; i < num_qsets; i++) {
+ buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
+ buf->rdma_qsets[i].info.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->rdma_qsets[i].info.generic = 0;
+ buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->rdma_qsets[i].info.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->rdma_qsets[i].info.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ }
+ ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
+ if (ret) {
+ ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
+ goto rdma_error_exit;
+ }
+ node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+ for (i = 0; i < num_qsets; i++) {
+ node.node_teid = buf->rdma_qsets[i].qset_teid;
+ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
+ &node);
+ if (status) {
+ ret = ice_status_to_errno(status);
+ break;
+ }
+ qset_teid[i] = le32_to_cpu(node.node_teid);
+ }
+rdma_error_exit:
+ mutex_unlock(&pi->sched_lock);
+ kfree(buf);
+ return ret;
+}
+
+/**
+ * ice_dis_vsi_rdma_qset - free RDMA resources
+ * @pi: port_info struct
+ * @count: number of RDMA Qsets to free
+ * @qset_teid: TEID of Qset node
+ * @q_id: list of queue IDs being disabled
+ */
+int
+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
+ u16 *q_id)
+{
+ struct ice_aqc_dis_txq_item *qg_list;
+ enum ice_status status = 0;
+ struct ice_hw *hw;
+ u16 qg_size;
+ int i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return -EIO;
+
+ hw = pi->hw;
+
+ qg_size = struct_size(qg_list, q_id, 1);
+ qg_list = kzalloc(qg_size, GFP_KERNEL);
+ if (!qg_list)
+ return -ENOMEM;
+
+ mutex_lock(&pi->sched_lock);
+
+ for (i = 0; i < count; i++) {
+ struct ice_sched_node *node;
+
+ node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
+ if (!node)
+ continue;
+
+ qg_list->parent_teid = node->info.parent_teid;
+ qg_list->num_qs = 1;
+ qg_list->q_id[0] =
+ cpu_to_le16(q_id[i] |
+ ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
+
+ status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
+ ICE_NO_RESET, 0, NULL);
+ if (status)
+ break;
+
+ ice_free_sched_node(pi, node);
+ }
+
+ mutex_unlock(&pi->sched_lock);
+ kfree(qg_list);
+ return ice_status_to_errno(status);
+}
+
+/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct
*
@@ -4304,6 +4692,81 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
}
/**
+ * ice_aq_set_driver_param - Set driver parameter to share via firmware
+ * @hw: pointer to the HW struct
+ * @idx: parameter index to set
+ * @value: the value to set the parameter to
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the value of one of the software defined parameters. All PFs connected
+ * to this device can read the value using ice_aq_get_driver_param.
+ *
+ * Note that firmware provides no synchronization or locking, and will not
+ * save the parameter value during a device reset. It is expected that
+ * a single PF will write the parameter value, while all other PFs will only
+ * read it.
+ */
+int
+ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_shared_params *cmd;
+ struct ice_aq_desc desc;
+
+ if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
+ return -EIO;
+
+ cmd = &desc.params.drv_shared_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
+
+ cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
+ cmd->param_indx = idx;
+ cmd->param_val = cpu_to_le32(value);
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+}
+
+/**
+ * ice_aq_get_driver_param - Get driver parameter shared via firmware
+ * @hw: pointer to the HW struct
+ * @idx: parameter index to set
+ * @value: storage to return the shared parameter
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the value of one of the software defined parameters.
+ *
+ * Note that firmware provides no synchronization or locking. It is expected
+ * that only a single PF will write a given parameter.
+ */
+int
+ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 *value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_shared_params *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
+ return -EIO;
+
+ cmd = &desc.params.drv_shared_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
+
+ cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
+ cmd->param_indx = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ *value = le32_to_cpu(cmd->param_val);
+
+ return 0;
+}
+
+/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 7a9d2dfb21a2..fb16070f02e2 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -40,6 +40,8 @@ enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+bool ice_is_sbq_supported(struct ice_hw *hw);
+struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
@@ -97,6 +99,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
+bool ice_is_e810(struct ice_hw *hw);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -147,6 +150,15 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
+int
+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_rdmaqs);
+int
+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);
+int
+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
+ u16 *q_id);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
@@ -164,6 +176,7 @@ void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
@@ -173,6 +186,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
+int
+ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 value, struct ice_sq_cd *cd);
+int
+ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 *value, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 87b33bdd4960..03bdb125be36 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -52,6 +52,19 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
}
/**
+ * ice_sb_init_regs - Initialize Sideband registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_sb_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->sbq;
+
+ ICE_CQ_INIT_REGS(cq, PF_SB);
+}
+
+/**
* ice_check_sq_alive
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
@@ -609,6 +622,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
+ case ICE_CTL_Q_SB:
+ ice_sb_init_regs(hw);
+ cq = &hw->sbq;
+ break;
case ICE_CTL_Q_MAILBOX:
ice_mailbox_init_regs(hw);
cq = &hw->mailboxq;
@@ -646,6 +663,32 @@ init_ctrlq_free_sq:
}
/**
+ * ice_is_sbq_supported - is the sideband queue supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if the sideband control queue interface is
+ * supported for the device, false otherwise
+ */
+bool ice_is_sbq_supported(struct ice_hw *hw)
+{
+ /* The device sideband queue is only supported on devices with the
+ * generic MAC type.
+ */
+ return hw->mac_type == ICE_MAC_GENERIC;
+}
+
+/**
+ * ice_get_sbq - returns the right control queue to use for sideband
+ * @hw: pointer to the hardware structure
+ */
+struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
+{
+ if (ice_is_sbq_supported(hw))
+ return &hw->sbq;
+ return &hw->adminq;
+}
+
+/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
@@ -662,6 +705,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
+ case ICE_CTL_Q_SB:
+ cq = &hw->sbq;
+ break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
break;
@@ -685,6 +731,9 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PHY Sideband */
+ if (ice_is_sbq_supported(hw))
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -724,6 +773,15 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (status)
return status;
+ /* sideband control queue (SBQ) interface is not supported on some
+ * devices. Initialize if supported, else fallback to the admin queue
+ * interface
+ */
+ if (ice_is_sbq_supported(hw)) {
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
+ if (status)
+ return status;
+ }
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -759,6 +817,8 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
+ if (ice_is_sbq_supported(hw))
+ ice_init_ctrlq_locks(&hw->sbq);
ice_init_ctrlq_locks(&hw->mailboxq);
return ice_init_all_ctrlq(hw);
@@ -791,6 +851,8 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
ice_shutdown_all_ctrlq(hw);
ice_destroy_ctrlq_locks(&hw->adminq);
+ if (ice_is_sbq_supported(hw))
+ ice_destroy_ctrlq_locks(&hw->sbq);
ice_destroy_ctrlq_locks(&hw->mailboxq);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index fe75871e48ca..c07e9cc9fc6e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -9,6 +9,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096
+#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -29,6 +30,7 @@ enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
ICE_CTL_Q_MAILBOX,
+ ICE_CTL_Q_SB,
};
/* Control Queue timeout settings - max delay 1s */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index df02cffdf209..926cf748c5ec 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -275,6 +275,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
int ret = ICE_DCB_NO_HW_CHG;
+ struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -313,6 +314,17 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
+ /* Notify AUX drivers about impending change to TCs */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event) {
+ ret = -ENOMEM;
+ goto free_cfg;
+ }
+
+ set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
@@ -640,6 +652,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ struct iidc_event *event;
u8 tc_map = 0;
int v, ret;
@@ -675,6 +688,14 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
+ /* Notify the AUX drivers that TC change is finished */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return;
+
+ set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 35c21d9ae009..261b6e2ed7bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -60,7 +60,7 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf)
test_bit(ICE_FLAG_DCB_ENA, pf->flags));
}
#else
-#define ice_dcb_rebuild(pf) do {} while (0)
+static inline void ice_dcb_rebuild(struct ice_pf *pf) { }
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
@@ -113,11 +113,12 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
return false;
}
-#define ice_update_dcb_stats(pf) do {} while (0)
-#define ice_pf_dcb_recfg(pf) do {} while (0)
-#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
-#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
-#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
-#define ice_vsi_cfg_netdev_tc(vsi, ena_tc) do {} while (0)
+static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
+static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
+static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
+static inline void
+ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
+static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { }
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.h b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
index 6c630a362293..eac2f34bdcdd 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
@@ -11,9 +11,10 @@ void
ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg);
#else
-#define ice_dcbnl_setup(vsi) do {} while (0)
-#define ice_dcbnl_set_all(vsi) do {} while (0)
-#define ice_dcbnl_flush_apps(pf, old_cfg, new_cfg) do {} while (0)
+static inline void ice_dcbnl_setup(struct ice_vsi *vsi) { }
+static inline void ice_dcbnl_set_all(struct ice_vsi *vsi) { }
+static inline void
+ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg) { }
#endif /* CONFIG_DCB */
-
#endif /* _ICE_DCB_NL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index cf685eeea198..91b545ab8b8f 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -276,6 +276,12 @@ static int ice_devlink_info_get(struct devlink *devlink,
size_t i;
int err;
+ err = ice_wait_for_reset(pf, 10 * HZ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting");
+ return err;
+ }
+
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -283,6 +289,9 @@ static int ice_devlink_info_get(struct devlink *devlink,
/* discover capabilities first */
status = ice_discover_dev_caps(hw, &ctx->dev_caps);
if (status) {
+ dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n",
+ ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
err = -EIO;
goto out_free_ctx;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index d9ddd0bcf65f..d95a5daca114 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1773,49 +1773,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR4_Full);
}
-
- /* Autoneg PHY types */
- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -1972,9 +1929,7 @@ ice_get_link_ksettings(struct net_device *netdev,
ks->base.port = PORT_TP;
break;
case ICE_MEDIA_BACKPLANE:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Backplane);
ks->base.port = PORT_NONE;
@@ -2049,6 +2004,12 @@ ice_get_link_ksettings(struct net_device *netdev,
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ /* Set supported and advertised autoneg */
+ if (ice_is_phy_caps_an_enabled(caps)) {
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ }
+
done:
kfree(caps);
return err;
@@ -3234,6 +3195,31 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return 0;
}
+static int
+ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
+
+ /* only report timestamping if PTP is enabled */
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return ethtool_op_get_ts_info(dev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ice_get_ptp_clock_index(pf);
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
/**
* ice_get_max_txq - return the maximum number of Tx queues for in a PF
* @pf: PF structure
@@ -3501,13 +3487,9 @@ static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
struct ice_ring_container *rc)
{
- struct ice_pf *pf;
-
if (!rc->ring)
return -EINVAL;
- pf = rc->ring->vsi->back;
-
switch (c_type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
@@ -3519,7 +3501,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
ec->tx_coalesce_usecs = rc->itr_setting;
break;
default:
- dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
return -EINVAL;
}
@@ -4029,7 +4011,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = ice_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index dcec0360ce55..f8601d5b0b19 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -702,6 +702,16 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
}
err = pldmfw_flash_image(&priv.context, fw);
+ if (err == -ENOENT) {
+ dev_err(dev, "Firmware image has no record matching this device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
+ } else if (err) {
+ /* Do not set a generic extended ACK message here. A more
+ * specific message may already have been set by one of our
+ * ops.
+ */
+ dev_err(dev, "Failed to flash PLDM image, err %d", err);
+ }
ice_release_nvm(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index de38a0fc9665..76021d977b60 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -31,6 +31,7 @@
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
+#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400
@@ -51,6 +52,54 @@
#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
+#define PF_SB_ARQBAH 0x0022FF00
+#define PF_SB_ARQBAH_ARQBAH_S 0
+#define PF_SB_ARQBAH_ARQBAH_M ICE_M(0xFFFFFFFF, 0)
+#define PF_SB_ARQBAL 0x0022FE80
+#define PF_SB_ARQBAL_ARQBAL_LSB_S 0
+#define PF_SB_ARQBAL_ARQBAL_LSB_M ICE_M(0x3F, 0)
+#define PF_SB_ARQBAL_ARQBAL_S 6
+#define PF_SB_ARQBAL_ARQBAL_M ICE_M(0x3FFFFFF, 6)
+#define PF_SB_ARQH 0x00230000
+#define PF_SB_ARQH_ARQH_S 0
+#define PF_SB_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_SB_ARQLEN 0x0022FF80
+#define PF_SB_ARQLEN_ARQLEN_S 0
+#define PF_SB_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_SB_ARQLEN_ARQVFE_S 28
+#define PF_SB_ARQLEN_ARQVFE_M BIT(28)
+#define PF_SB_ARQLEN_ARQOVFL_S 29
+#define PF_SB_ARQLEN_ARQOVFL_M BIT(29)
+#define PF_SB_ARQLEN_ARQCRIT_S 30
+#define PF_SB_ARQLEN_ARQCRIT_M BIT(30)
+#define PF_SB_ARQLEN_ARQENABLE_S 31
+#define PF_SB_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_SB_ARQT 0x00230080
+#define PF_SB_ARQT_ARQT_S 0
+#define PF_SB_ARQT_ARQT_M ICE_M(0x3FF, 0)
+#define PF_SB_ATQBAH 0x0022FC80
+#define PF_SB_ATQBAH_ATQBAH_S 0
+#define PF_SB_ATQBAH_ATQBAH_M ICE_M(0xFFFFFFFF, 0)
+#define PF_SB_ATQBAL 0x0022FC00
+#define PF_SB_ATQBAL_ATQBAL_S 6
+#define PF_SB_ATQBAL_ATQBAL_M ICE_M(0x3FFFFFF, 6)
+#define PF_SB_ATQH 0x0022FD80
+#define PF_SB_ATQH_ATQH_S 0
+#define PF_SB_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_SB_ATQLEN 0x0022FD00
+#define PF_SB_ATQLEN_ATQLEN_S 0
+#define PF_SB_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_SB_ATQLEN_ATQVFE_S 28
+#define PF_SB_ATQLEN_ATQVFE_M BIT(28)
+#define PF_SB_ATQLEN_ATQOVFL_S 29
+#define PF_SB_ATQLEN_ATQOVFL_M BIT(29)
+#define PF_SB_ATQLEN_ATQCRIT_S 30
+#define PF_SB_ATQLEN_ATQCRIT_M BIT(30)
+#define PF_SB_ATQLEN_ATQENABLE_S 31
+#define PF_SB_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_SB_ATQT 0x0022FE00
+#define PF_SB_ATQT_ATQT_S 0
+#define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0)
#define PRTDCB_GENC 0x00083000
#define PRTDCB_GENC_PFCLDA_S 16
#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
@@ -89,6 +138,10 @@
#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S 4
#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M ICE_M(0x3, 4)
#define GLGEN_CLKSTAT_SRC 0x000B826C
+#define GLGEN_GPIO_CTL(_i) (0x000880C8 + ((_i) * 4))
+#define GLGEN_GPIO_CTL_PIN_DIR_M BIT(4)
+#define GLGEN_GPIO_CTL_PIN_FUNC_S 8
+#define GLGEN_GPIO_CTL_PIN_FUNC_M ICE_M(0xF, 8)
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
#define GLGEN_RSTCTL 0x000B8180
@@ -110,8 +163,6 @@
#define VPGEN_VFRSTAT_VFRD_M BIT(0)
#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
-#define PFHMC_ERRORDATA 0x00520500
-#define PFHMC_ERRORINFO 0x00520400
#define GLINT_CTL 0x0016CC54
#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
#define GLINT_CTL_ITR_GRAN_200_S 16
@@ -155,11 +206,14 @@
#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR 0x0016CA00
+#define PFINT_OICR_TSYN_TX_M BIT(11)
+#define PFINT_OICR_TSYN_EVNT_M BIT(12)
#define PFINT_OICR_ECC_ERR_M BIT(16)
#define PFINT_OICR_MAL_DETECT_M BIT(19)
#define PFINT_OICR_GRST_M BIT(20)
#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
#define PFINT_OICR_HMC_ERR_M BIT(26)
+#define PFINT_OICR_PE_PUSH_M BIT(27)
#define PFINT_OICR_PE_CRITERR_M BIT(28)
#define PFINT_OICR_VFLR_M BIT(29)
#define PFINT_OICR_SWINT_M BIT(31)
@@ -169,6 +223,9 @@
#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR_ENA 0x0016C900
+#define PFINT_SB_CTL 0x0016B600
+#define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_SB_CTL_CAUSE_ENA_M BIT(30)
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
@@ -382,6 +439,36 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
+#define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4))
+#define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4)
+#define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4))
+#define GLTSYN_AUX_OUT_0_OUT_ENA_M BIT(0)
+#define GLTSYN_AUX_OUT_0_OUTMOD_M ICE_M(0x3, 1)
+#define GLTSYN_CLKO_0(_i) (0x000889B8 + ((_i) * 4))
+#define GLTSYN_CMD 0x00088810
+#define GLTSYN_CMD_SYNC 0x00088814
+#define GLTSYN_ENA(_i) (0x00088808 + ((_i) * 4))
+#define GLTSYN_ENA_TSYN_ENA_M BIT(0)
+#define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4))
+#define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4))
+#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4))
+#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4))
+#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4))
+#define GLTSYN_SHADJ_L(_i) (0x00088908 + ((_i) * 4))
+#define GLTSYN_SHTIME_0(_i) (0x000888E0 + ((_i) * 4))
+#define GLTSYN_SHTIME_H(_i) (0x000888F0 + ((_i) * 4))
+#define GLTSYN_SHTIME_L(_i) (0x000888E8 + ((_i) * 4))
+#define GLTSYN_STAT(_i) (0x000888C0 + ((_i) * 4))
+#define GLTSYN_STAT_EVENT0_M BIT(0)
+#define GLTSYN_STAT_EVENT1_M BIT(1)
+#define GLTSYN_STAT_EVENT2_M BIT(2)
+#define GLTSYN_SYNC_DLAY 0x00088818
+#define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4))
+#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4))
+#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
+#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
+#define PFTSYN_SEM 0x00088880
+#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
new file mode 100644
index 000000000000..1f2afdf6cd48
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+/* Inter-Driver Communication */
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_dcb_lib.h"
+
+/**
+ * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
+ * @pf: pointer to PF struct
+ *
+ * This function has to be called with a device_lock on the
+ * pf->adev.dev to avoid race conditions.
+ */
+static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+{
+ struct auxiliary_device *adev;
+
+ adev = pf->adev;
+ if (!adev || !adev->dev.driver)
+ return NULL;
+
+ return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
+ adrv.driver);
+}
+
+/**
+ * ice_send_event_to_aux - send event to RDMA AUX driver
+ * @pf: pointer to PF struct
+ * @event: event struct
+ */
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+{
+ struct iidc_auxiliary_drv *iadrv;
+
+ if (!pf->adev)
+ return;
+
+ device_lock(&pf->adev->dev);
+ iadrv = ice_get_auxiliary_drv(pf);
+ if (iadrv && iadrv->event_handler)
+ iadrv->event_handler(pf, event);
+ device_unlock(&pf->adev->dev);
+}
+
+/**
+ * ice_find_vsi - Find the VSI from VSI ID
+ * @pf: The PF pointer to search in
+ * @vsi_num: The VSI ID to search for
+ */
+static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
+ return pf->vsi[i];
+ return NULL;
+}
+
+/**
+ * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
+ * @pf: PF struct
+ * @qset: Resource to be allocated
+ */
+int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+{
+ u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_vsi *vsi;
+ struct device *dev;
+ u32 qset_teid;
+ u16 qs_handle;
+ int status;
+ int i;
+
+ if (WARN_ON(!pf || !qset))
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EINVAL;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi) {
+ dev_err(dev, "RDMA QSet invalid VSI\n");
+ return -EINVAL;
+ }
+
+ ice_for_each_traffic_class(i)
+ max_rdmaqs[i] = 0;
+
+ max_rdmaqs[qset->tc]++;
+ qs_handle = qset->qs_handle;
+
+ status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_rdmaqs);
+ if (status) {
+ dev_err(dev, "Failed VSI RDMA Qset config\n");
+ return status;
+ }
+
+ status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
+ &qs_handle, 1, &qset_teid);
+ if (status) {
+ dev_err(dev, "Failed VSI RDMA Qset enable\n");
+ return status;
+ }
+ vsi->qset_handle[qset->tc] = qset->qs_handle;
+ qset->teid = qset_teid;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
+
+/**
+ * ice_del_rdma_qset - Delete leaf node for RDMA Qset
+ * @pf: PF struct
+ * @qset: Resource to be freed
+ */
+int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+{
+ struct ice_vsi *vsi;
+ u32 teid;
+ u16 q_id;
+
+ if (WARN_ON(!pf || !qset))
+ return -EINVAL;
+
+ vsi = ice_find_vsi(pf, qset->vport_id);
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
+ return -EINVAL;
+ }
+
+ q_id = qset->qs_handle;
+ teid = qset->teid;
+
+ vsi->qset_handle[qset->tc] = 0;
+
+ return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
+}
+EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
+
+/**
+ * ice_rdma_request_reset - accept request from RDMA to perform a reset
+ * @pf: struct for PF
+ * @reset_type: type of reset
+ */
+int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+{
+ enum ice_reset_req reset;
+
+ if (WARN_ON(!pf))
+ return -EINVAL;
+
+ switch (reset_type) {
+ case IIDC_PFR:
+ reset = ICE_RESET_PFR;
+ break;
+ case IIDC_CORER:
+ reset = ICE_RESET_CORER;
+ break;
+ case IIDC_GLOBR:
+ reset = ICE_RESET_GLOBR;
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
+ return -EINVAL;
+ }
+
+ return ice_schedule_reset(pf, reset);
+}
+EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
+
+/**
+ * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
+ * @pf: pointer to struct for PF
+ * @vsi_id: VSI HW idx to update filter on
+ * @enable: bool whether to enable or disable filters
+ */
+int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
+{
+ struct ice_vsi *vsi;
+ int status;
+
+ if (WARN_ON(!pf))
+ return -EINVAL;
+
+ vsi = ice_find_vsi(pf, vsi_id);
+ if (!vsi)
+ return -EINVAL;
+
+ status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n",
+ enable ? "en" : "dis");
+ } else {
+ if (enable)
+ vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ else
+ vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
+
+/**
+ * ice_get_qos_params - parse QoS params for RDMA consumption
+ * @pf: pointer to PF struct
+ * @qos: set of QoS values
+ */
+void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
+{
+ struct ice_dcbx_cfg *dcbx_cfg;
+ unsigned int i;
+ u32 up2tc;
+
+ dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+
+ qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+}
+EXPORT_SYMBOL_GPL(ice_get_qos_params);
+
+/**
+ * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
+ * @pf: board private structure to initialize
+ */
+static int ice_reserve_rdma_qvector(struct ice_pf *pf)
+{
+ if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ int index;
+
+ index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
+ ICE_RES_RDMA_VEC_ID);
+ if (index < 0)
+ return index;
+ pf->num_avail_sw_msix -= pf->num_rdma_msix;
+ pf->rdma_base_vector = (u16)index;
+ }
+ return 0;
+}
+
+/**
+ * ice_adev_release - function to be mapped to AUX dev's release op
+ * @dev: pointer to device to free
+ */
+static void ice_adev_release(struct device *dev)
+{
+ struct iidc_auxiliary_dev *iadev;
+
+ iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+ kfree(iadev);
+}
+
+/**
+ * ice_plug_aux_dev - allocate and register AUX device
+ * @pf: pointer to pf struct
+ */
+int ice_plug_aux_dev(struct ice_pf *pf)
+{
+ struct iidc_auxiliary_dev *iadev;
+ struct auxiliary_device *adev;
+ int ret;
+
+ iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ if (!iadev)
+ return -ENOMEM;
+
+ adev = &iadev->adev;
+ pf->adev = adev;
+ iadev->pf = pf;
+
+ adev->id = pf->aux_idx;
+ adev->dev.release = ice_adev_release;
+ adev->dev.parent = &pf->pdev->dev;
+ adev->name = IIDC_RDMA_ROCE_NAME;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ pf->adev = NULL;
+ kfree(iadev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ pf->adev = NULL;
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* ice_unplug_aux_dev - unregister and free AUX device
+ * @pf: pointer to pf struct
+ */
+void ice_unplug_aux_dev(struct ice_pf *pf)
+{
+ if (!pf->adev)
+ return;
+
+ auxiliary_device_delete(pf->adev);
+ auxiliary_device_uninit(pf->adev);
+ pf->adev = NULL;
+}
+
+/**
+ * ice_init_rdma - initializes PF for RDMA use
+ * @pf: ptr to ice_pf
+ */
+int ice_init_rdma(struct ice_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ int ret;
+
+ /* Reserve vector resources */
+ ret = ice_reserve_rdma_qvector(pf);
+ if (ret < 0) {
+ dev_err(dev, "failed to reserve vectors for RDMA\n");
+ return ret;
+ }
+
+ return ice_plug_aux_dev(pf);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
new file mode 100644
index 000000000000..b7796b8aecbd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _ICE_IDC_INT_H_
+#define _ICE_IDC_INT_H_
+
+#include <linux/net/intel/iidc.h>
+#include "ice.h"
+
+struct ice_pf;
+
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+
+#endif /* !_ICE_IDC_INT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 4599fc3b4ed8..37c18c66b5c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -172,6 +172,7 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
}
ice_clear_sriov_cap(pf);
+ ice_clear_rdma_cap(pf);
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
@@ -222,6 +223,7 @@ ice_lag_unlink(struct ice_lag *lag,
}
ice_set_sriov_cap(pf);
+ ice_set_rdma_cap(pf);
lag->bonded = false;
lag->role = ICE_LAG_NONE;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 21329ed3087e..80736e0ec0dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -161,7 +161,6 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
struct ice_rx_ptype_decoded {
- u32 ptype:10;
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:2;
@@ -606,9 +605,32 @@ struct ice_tlan_ctx {
u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
-/* macro to make the table lines short */
+/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT ice_ptype_lkup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum ice_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short, use explicit indexing with [PTYPE] */
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- { PTYPE, \
+ [PTYPE] = { \
1, \
ICE_RX_PTYPE_OUTER_##OUTER_IP, \
ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
@@ -619,18 +641,18 @@ struct ice_tlan_ctx {
ICE_RX_PTYPE_INNER_PROT_##I, \
ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
/* shorter macros makes the table fit but are terse */
#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
-/* Lookup table mapping the HW PTYPE to the bit field for decoding */
-static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
+/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
+static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
/* L2 Packet types */
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(2),
ICE_PTT_UNUSED_ENTRY(3),
ICE_PTT_UNUSED_ENTRY(4),
ICE_PTT_UNUSED_ENTRY(5),
@@ -744,7 +766,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
/* Non Tunneled IPv6 */
ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(91),
ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -832,118 +854,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* unused entries */
- ICE_PTT_UNUSED_ENTRY(154),
- ICE_PTT_UNUSED_ENTRY(155),
- ICE_PTT_UNUSED_ENTRY(156),
- ICE_PTT_UNUSED_ENTRY(157),
- ICE_PTT_UNUSED_ENTRY(158),
- ICE_PTT_UNUSED_ENTRY(159),
-
- ICE_PTT_UNUSED_ENTRY(160),
- ICE_PTT_UNUSED_ENTRY(161),
- ICE_PTT_UNUSED_ENTRY(162),
- ICE_PTT_UNUSED_ENTRY(163),
- ICE_PTT_UNUSED_ENTRY(164),
- ICE_PTT_UNUSED_ENTRY(165),
- ICE_PTT_UNUSED_ENTRY(166),
- ICE_PTT_UNUSED_ENTRY(167),
- ICE_PTT_UNUSED_ENTRY(168),
- ICE_PTT_UNUSED_ENTRY(169),
-
- ICE_PTT_UNUSED_ENTRY(170),
- ICE_PTT_UNUSED_ENTRY(171),
- ICE_PTT_UNUSED_ENTRY(172),
- ICE_PTT_UNUSED_ENTRY(173),
- ICE_PTT_UNUSED_ENTRY(174),
- ICE_PTT_UNUSED_ENTRY(175),
- ICE_PTT_UNUSED_ENTRY(176),
- ICE_PTT_UNUSED_ENTRY(177),
- ICE_PTT_UNUSED_ENTRY(178),
- ICE_PTT_UNUSED_ENTRY(179),
-
- ICE_PTT_UNUSED_ENTRY(180),
- ICE_PTT_UNUSED_ENTRY(181),
- ICE_PTT_UNUSED_ENTRY(182),
- ICE_PTT_UNUSED_ENTRY(183),
- ICE_PTT_UNUSED_ENTRY(184),
- ICE_PTT_UNUSED_ENTRY(185),
- ICE_PTT_UNUSED_ENTRY(186),
- ICE_PTT_UNUSED_ENTRY(187),
- ICE_PTT_UNUSED_ENTRY(188),
- ICE_PTT_UNUSED_ENTRY(189),
-
- ICE_PTT_UNUSED_ENTRY(190),
- ICE_PTT_UNUSED_ENTRY(191),
- ICE_PTT_UNUSED_ENTRY(192),
- ICE_PTT_UNUSED_ENTRY(193),
- ICE_PTT_UNUSED_ENTRY(194),
- ICE_PTT_UNUSED_ENTRY(195),
- ICE_PTT_UNUSED_ENTRY(196),
- ICE_PTT_UNUSED_ENTRY(197),
- ICE_PTT_UNUSED_ENTRY(198),
- ICE_PTT_UNUSED_ENTRY(199),
-
- ICE_PTT_UNUSED_ENTRY(200),
- ICE_PTT_UNUSED_ENTRY(201),
- ICE_PTT_UNUSED_ENTRY(202),
- ICE_PTT_UNUSED_ENTRY(203),
- ICE_PTT_UNUSED_ENTRY(204),
- ICE_PTT_UNUSED_ENTRY(205),
- ICE_PTT_UNUSED_ENTRY(206),
- ICE_PTT_UNUSED_ENTRY(207),
- ICE_PTT_UNUSED_ENTRY(208),
- ICE_PTT_UNUSED_ENTRY(209),
-
- ICE_PTT_UNUSED_ENTRY(210),
- ICE_PTT_UNUSED_ENTRY(211),
- ICE_PTT_UNUSED_ENTRY(212),
- ICE_PTT_UNUSED_ENTRY(213),
- ICE_PTT_UNUSED_ENTRY(214),
- ICE_PTT_UNUSED_ENTRY(215),
- ICE_PTT_UNUSED_ENTRY(216),
- ICE_PTT_UNUSED_ENTRY(217),
- ICE_PTT_UNUSED_ENTRY(218),
- ICE_PTT_UNUSED_ENTRY(219),
-
- ICE_PTT_UNUSED_ENTRY(220),
- ICE_PTT_UNUSED_ENTRY(221),
- ICE_PTT_UNUSED_ENTRY(222),
- ICE_PTT_UNUSED_ENTRY(223),
- ICE_PTT_UNUSED_ENTRY(224),
- ICE_PTT_UNUSED_ENTRY(225),
- ICE_PTT_UNUSED_ENTRY(226),
- ICE_PTT_UNUSED_ENTRY(227),
- ICE_PTT_UNUSED_ENTRY(228),
- ICE_PTT_UNUSED_ENTRY(229),
-
- ICE_PTT_UNUSED_ENTRY(230),
- ICE_PTT_UNUSED_ENTRY(231),
- ICE_PTT_UNUSED_ENTRY(232),
- ICE_PTT_UNUSED_ENTRY(233),
- ICE_PTT_UNUSED_ENTRY(234),
- ICE_PTT_UNUSED_ENTRY(235),
- ICE_PTT_UNUSED_ENTRY(236),
- ICE_PTT_UNUSED_ENTRY(237),
- ICE_PTT_UNUSED_ENTRY(238),
- ICE_PTT_UNUSED_ENTRY(239),
-
- ICE_PTT_UNUSED_ENTRY(240),
- ICE_PTT_UNUSED_ENTRY(241),
- ICE_PTT_UNUSED_ENTRY(242),
- ICE_PTT_UNUSED_ENTRY(243),
- ICE_PTT_UNUSED_ENTRY(244),
- ICE_PTT_UNUSED_ENTRY(245),
- ICE_PTT_UNUSED_ENTRY(246),
- ICE_PTT_UNUSED_ENTRY(247),
- ICE_PTT_UNUSED_ENTRY(248),
- ICE_PTT_UNUSED_ENTRY(249),
-
- ICE_PTT_UNUSED_ENTRY(250),
- ICE_PTT_UNUSED_ENTRY(251),
- ICE_PTT_UNUSED_ENTRY(252),
- ICE_PTT_UNUSED_ENTRY(253),
- ICE_PTT_UNUSED_ENTRY(254),
- ICE_PTT_UNUSED_ENTRY(255),
+ [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 82e2ce23df3d..dde9802c6c72 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
+ vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_zc_qps;
+
return 0;
+err_zc_qps:
+ devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -163,12 +169,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
switch (vsi->type) {
case ICE_VSI_PF:
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
+ } else {
+ vsi->alloc_txq = min3(pf->num_lan_msix,
+ ice_get_avail_txq_count(pf),
+ (u16)num_online_cpus());
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -177,12 +184,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
+ } else {
+ vsi->alloc_rxq = min3(pf->num_lan_msix,
+ ice_get_avail_rxq_count(pf),
+ (u16)num_online_cpus());
}
}
@@ -194,6 +202,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
+ if (vf->num_req_qs)
+ vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_msix_per_vf includes (VF miscellaneous vector +
@@ -288,6 +298,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
+ if (vsi->af_xdp_zc_qps) {
+ bitmap_free(vsi->af_xdp_zc_qps);
+ vsi->af_xdp_zc_qps = NULL;
+ }
/* free the ring and vector containers */
if (vsi->q_vectors) {
devm_kfree(dev, vsi->q_vectors);
@@ -617,6 +631,17 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
+ * ice_is_aux_ena
+ * @pf: pointer to the PF struct
+ *
+ * returns true if AUX devices/drivers are supported, false otherwise
+ */
+bool ice_is_aux_ena(struct ice_pf *pf)
+{
+ return test_bit(ICE_FLAG_AUX_ENA, pf->flags);
+}
+
+/**
* ice_vsi_clean_rss_flow_fld - Delete RSS configuration
* @vsi: the VSI being cleaned up
*
@@ -1180,11 +1205,11 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- struct ice_vf *vf;
int i;
ice_for_each_vf(pf, i) {
- vf = &pf->vf[i];
+ struct ice_vf *vf = &pf->vf[i];
+
if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
break;
@@ -1273,6 +1298,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->txq_map[i];
ring->ring_active = false;
ring->vsi = vsi;
+ ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
WRITE_ONCE(vsi->tx_rings[i], ring);
@@ -1650,9 +1676,11 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
* @pf_q: index of the Rx queue in the PF's queue space
* @rxdid: flexible descriptor RXDID
* @prio: priority for the RXDID for this queue
+ * @ena_ts: true to enable timestamp and false to disable timestamp
*/
void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -1667,9 +1695,40 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
+ if (ena_ts)
+ /* Enable TimeSync on this queue */
+ regval |= QRXFLXP_CNTXT_TS_M;
+
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+ if (q_idx >= vsi->num_rxq)
+ return -EINVAL;
+
+ return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ int err;
+
+ if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+ return -EINVAL;
+
+ qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ qg_buf->num_txqs = 1;
+
+ err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+ kfree(qg_buf);
+ return err;
+}
+
/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
@@ -1687,15 +1746,11 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
- for (i = 0; i < vsi->num_rxq; i++) {
- int err;
+ ice_for_each_rxq(vsi, i) {
+ int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
- err = ice_setup_rx_ctx(vsi->rx_rings[i]);
- if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
- i, err);
+ if (err)
return err;
- }
}
return 0;
@@ -1705,12 +1760,13 @@ setup_rings:
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
@@ -1722,7 +1778,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
qg_buf->num_txqs = 1;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
goto err_cfg_txqs;
@@ -1742,7 +1798,7 @@ err_cfg_txqs:
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
}
/**
@@ -1757,7 +1813,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
int ret;
int i;
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
if (ret)
return ret;
@@ -1997,17 +2053,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
* @rst_src: reset source
* @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
+ * @count: number of Tx ring array elements
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings)
+ u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
{
u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ for (q_idx = 0; q_idx < count; q_idx++) {
struct ice_txq_meta txq_meta = { };
int status;
@@ -2035,7 +2092,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
{
- return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
+ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
}
/**
@@ -2044,7 +2101,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
*/
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
}
/**
@@ -2203,7 +2260,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
}
if (status)
- dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
+ dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
vsi->vsi_num, ice_stat_str(status));
}
@@ -2818,11 +2875,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
* cleared in the same manner.
*/
if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- struct ice_vf *vf;
int i;
ice_for_each_vf(pf, i) {
- vf = &pf->vf[i];
+ struct ice_vf *vf = &pf->vf[i];
+
if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
break;
}
@@ -3182,6 +3239,34 @@ bool ice_is_reset_in_progress(unsigned long *state)
test_bit(ICE_GLOBR_REQ, state);
}
+/**
+ * ice_wait_for_reset - Wait for driver to finish reset and rebuild
+ * @pf: pointer to the PF structure
+ * @timeout: length of time to wait, in jiffies
+ *
+ * Wait (sleep) for a short time until the driver finishes cleaning up from
+ * a device reset. The caller must be able to sleep. Use this to delay
+ * operations that could fail while the driver is cleaning up after a device
+ * reset.
+ *
+ * Returns 0 on success, -EBUSY if the reset is not finished within the
+ * timeout, and -ERESTARTSYS if the thread was interrupted.
+ */
+int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
+{
+ long ret;
+
+ ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
+ !ice_is_reset_in_progress(pf->state),
+ timeout);
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+ else
+ return 0;
+}
+
#ifdef CONFIG_DCB
/**
* ice_vsi_update_q_map - update our copy of the VSI info with new queue map
@@ -3316,13 +3401,22 @@ int ice_status_to_errno(enum ice_status err)
case ICE_ERR_DOES_NOT_EXIST:
return -ENOENT;
case ICE_ERR_OUT_OF_RANGE:
- return -ENOTTY;
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_EMPTY:
+ case ICE_ERR_AQ_FW_CRITICAL:
+ return -EIO;
case ICE_ERR_PARAM:
+ case ICE_ERR_INVAL_SIZE:
return -EINVAL;
case ICE_ERR_NO_MEMORY:
return -ENOMEM;
case ICE_ERR_MAX_LIMIT:
return -EAGAIN;
+ case ICE_ERR_RESET_ONGOING:
+ return -EBUSY;
+ case ICE_ERR_AQ_FULL:
+ return -ENOSPC;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 511c2316c40c..d5a28bf0fc2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -12,6 +12,10 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi);
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx);
+
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
@@ -73,9 +77,11 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
bool ice_is_reset_in_progress(unsigned long *state);
+int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio);
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts);
void ice_vsi_dis_irq(struct ice_vsi *vsi);
@@ -102,7 +108,7 @@ enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
-
+bool ice_is_aux_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4ee85a217c6f..ef8d1815af56 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -13,6 +13,12 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
+/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
+ * ice tracepoint functions. This must be done exactly once across the
+ * ice driver.
+ */
+#define CREATE_TRACE_POINTS
+#include "ice_trace.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -35,6 +41,8 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
+static DEFINE_IDA(ice_aux_ida);
+
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -454,6 +462,8 @@ ice_prepare_for_reset(struct ice_pf *pf)
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
+ ice_unplug_aux_dev(pf);
+
/* Notify VFs of impending reset */
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
@@ -467,6 +477,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_release(pf);
+
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -499,6 +512,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_PFR_REQ, pf->state);
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
+ wake_up(&pf->reset_wait_queue);
return;
}
@@ -511,6 +525,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild(pf, reset_type);
clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(ICE_PFR_REQ, pf->state);
+ wake_up(&pf->reset_wait_queue);
ice_reset_all_vfs(pf, true);
}
}
@@ -561,6 +576,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(ICE_PFR_REQ, pf->state);
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
+ wake_up(&pf->reset_wait_queue);
ice_reset_all_vfs(pf, true);
}
@@ -858,6 +874,38 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
}
/**
+ * ice_check_module_power
+ * @pf: pointer to PF struct
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * check module power level returned by a previous call to aq_get_link_info
+ * and print error messages if module power level is not supported
+ */
+static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
+{
+ /* if module power level is supported, clear the flag */
+ if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
+ ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
+ clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
+ return;
+ }
+
+ /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
+ * above block didn't clear this bit, there's nothing to do
+ */
+ if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
+ return;
+
+ if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
+ dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
+ set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
+ } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
+ dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
+ set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
+ }
+}
+
+/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
@@ -892,6 +940,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
pi->lport, ice_stat_str(status),
ice_aq_str(pi->hw->adminq.sq_last_status));
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+
/* Check if the link state is up after updating link info, and treat
* this event as an UP event since the link is actually UP now.
*/
@@ -1190,6 +1240,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
cq = &hw->adminq;
qtype = "Admin";
break;
+ case ICE_CTL_Q_SB:
+ cq = &hw->sbq;
+ qtype = "Sideband";
+ break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
qtype = "Mailbox";
@@ -1364,6 +1418,34 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
}
/**
+ * ice_clean_sbq_subtask - clean the Sideband Queue rings
+ * @pf: board private structure
+ */
+static void ice_clean_sbq_subtask(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ /* Nothing to do here if sideband queue is not supported */
+ if (!ice_is_sbq_supported(hw)) {
+ clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
+ return;
+ }
+
+ if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
+ return;
+
+ if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
+ return;
+
+ clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
+
+ if (ice_ctrlq_pending(hw, &hw->sbq))
+ __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
+
+ ice_flush(hw);
+}
+
+/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
*
@@ -2006,6 +2088,8 @@ static void ice_check_media_subtask(struct ice_pf *pf)
if (err)
return;
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
ice_init_phy_user_cfg(pi);
@@ -2063,6 +2147,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
+ ice_clean_sbq_subtask(pf);
ice_sync_arfs_fltrs(pf);
ice_flush_fdir_ctx(pf);
@@ -2078,6 +2163,7 @@ static void ice_service_task(struct work_struct *work)
test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
+ test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -2096,6 +2182,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->sbq.num_rq_entries = ICE_SBQ_LEN;
+ hw->sbq.num_sq_entries = ICE_SBQ_LEN;
+ hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+ hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
}
/**
@@ -2118,6 +2208,8 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
+ ice_unplug_aux_dev(pf);
+
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@@ -2556,6 +2648,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
/**
+ * ice_xdp_safe_mode - XDP handler for safe mode
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
+ struct netdev_bpf *xdp)
+{
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Please provide working DDP firmware package in order to use XDP\n"
+ "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_xdp - implements XDP handler
* @dev: netdevice
* @xdp: XDP command
@@ -2608,6 +2714,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M |
+ PFINT_OICR_PE_PUSH_M |
PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
@@ -2633,6 +2740,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
dev = ice_pf_to_dev(pf);
set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+ set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
@@ -2678,8 +2786,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
- * We also make note of which reset happened so that peer
- * devices/drivers can be informed.
*/
if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
if (reset == ICE_RESET_CORER)
@@ -2706,11 +2812,36 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
}
- if (oicr & PFINT_OICR_HMC_ERR_M) {
- ena_mask &= ~PFINT_OICR_HMC_ERR_M;
- dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
- rd32(hw, PFHMC_ERRORINFO),
- rd32(hw, PFHMC_ERRORDATA));
+ if (oicr & PFINT_OICR_TSYN_TX_M) {
+ ena_mask &= ~PFINT_OICR_TSYN_TX_M;
+ ice_ptp_process_ts(pf);
+ }
+
+ if (oicr & PFINT_OICR_TSYN_EVNT_M) {
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
+
+ /* Save EVENTs from GTSYN register */
+ pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
+ GLTSYN_STAT_EVENT1_M |
+ GLTSYN_STAT_EVENT2_M);
+ ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
+ kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
+ }
+
+#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
+ if (oicr & ICE_AUX_CRIT_ERR) {
+ struct iidc_event *event;
+
+ ena_mask &= ~ICE_AUX_CRIT_ERR;
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ /* report the entire OICR value to AUX driver */
+ event->reg = oicr;
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
}
/* Report any remaining unexpected interrupts */
@@ -2720,8 +2851,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* If a critical error is pending there is no choice but to
* reset the device.
*/
- if (oicr & (PFINT_OICR_PE_CRITERR_M |
- PFINT_OICR_PCI_EXCEPTION_M |
+ if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_ECC_ERR_M)) {
set_bit(ICE_PFR_REQ, pf->state);
ice_service_task_schedule(pf);
@@ -2749,6 +2879,9 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
wr32(hw, PFINT_MBX_CTL,
rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_SB_CTL,
+ rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
+
/* disable Control queue Interrupt causes */
wr32(hw, PFINT_OICR_CTL,
rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
@@ -2803,6 +2936,11 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
+ /* This enables Sideband queue Interrupt causes */
+ val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
+ PFINT_SB_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_SB_CTL, val);
+
ice_flush(hw);
}
@@ -2972,7 +3110,6 @@ static void ice_set_netdev_features(struct net_device *netdev)
*/
static int ice_cfg_netdev(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
@@ -2992,7 +3129,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
ice_set_ops(netdev);
if (vsi->type == ICE_VSI_PF) {
- SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
+ SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -3266,6 +3403,9 @@ static void ice_deinit_pf(struct ice_pf *pf)
bitmap_free(pf->avail_rxqs);
pf->avail_rxqs = NULL;
}
+
+ if (pf->ptp.clock)
+ ptp_clock_unregister(pf->ptp.clock);
}
/**
@@ -3276,6 +3416,12 @@ static void ice_set_pf_caps(struct ice_pf *pf)
{
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ if (func_caps->common_cap.rdma) {
+ set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ set_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ }
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
@@ -3306,6 +3452,10 @@ static void ice_set_pf_caps(struct ice_pf *pf)
func_caps->fd_fltr_best_effort);
}
+ clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
+ if (func_caps->common_cap.ieee_1588)
+ set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
+
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
@@ -3325,6 +3475,8 @@ static int ice_init_pf(struct ice_pf *pf)
spin_lock_init(&pf->aq_wait_lock);
init_waitqueue_head(&pf->aq_wait_queue);
+ init_waitqueue_head(&pf->reset_wait_queue);
+
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
@@ -3355,11 +3507,12 @@ static int ice_init_pf(struct ice_pf *pf)
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
- int v_left, v_actual, v_other, v_budget = 0;
+ int num_cpus, v_left, v_actual, v_other, v_budget = 0;
struct device *dev = ice_pf_to_dev(pf);
int needed, err, i;
v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+ num_cpus = num_online_cpus();
/* reserve for LAN miscellaneous handler */
needed = ICE_MIN_LAN_OICR_MSIX;
@@ -3381,13 +3534,23 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_other = v_budget;
/* reserve vectors for LAN traffic */
- needed = min_t(int, num_online_cpus(), v_left);
+ needed = num_cpus;
if (v_left < needed)
goto no_hw_vecs_left_err;
pf->num_lan_msix = needed;
v_budget += needed;
v_left -= needed;
+ /* reserve vectors for RDMA auxiliary driver */
+ if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ pf->num_rdma_msix = needed;
+ v_budget += needed;
+ v_left -= needed;
+ }
+
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
@@ -3417,16 +3580,46 @@ static int ice_ena_msix_range(struct ice_pf *pf)
err = -ERANGE;
goto msix_err;
} else {
- int v_traffic = v_actual - v_other;
+ int v_remain = v_actual - v_other;
+ int v_rdma = 0, v_min_rdma = 0;
+
+ if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ /* Need at least 1 interrupt in addition to
+ * AEQ MSIX
+ */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+ v_min_rdma = ICE_MIN_RDMA_MSIX;
+ }
if (v_actual == ICE_MIN_MSIX ||
- v_traffic < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
+ dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- else
- pf->num_lan_msix = v_traffic;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining
+ * vectors to LAN MSIX
+ */
+ pf->num_rdma_msix = v_min_rdma;
+ pf->num_lan_msix = v_remain - v_min_rdma;
+ } else {
+ /* Split remaining MSIX with RDMA after
+ * accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
+
+ if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
}
}
@@ -3441,6 +3634,7 @@ no_hw_vecs_left_err:
needed, v_left);
err = -ERANGE;
exit_err:
+ pf->num_rdma_msix = 0;
pf->num_lan_msix = 0;
return err;
}
@@ -4204,6 +4398,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_init_link_dflt_override(pf->hw.port_info);
+ ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
+
/* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info &
ICE_AQ_MEDIA_AVAILABLE) {
@@ -4242,6 +4438,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
/* initialize DDP driven features */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_init(pf);
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
@@ -4268,8 +4466,29 @@ probe_done:
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
+ if (ice_is_aux_ena(pf)) {
+ pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
+ if (pf->aux_idx < 0) {
+ dev_err(dev, "Failed to allocate device ID for AUX driver\n");
+ err = -ENOMEM;
+ goto err_netdev_reg;
+ }
+
+ err = ice_init_rdma(pf);
+ if (err) {
+ dev_err(dev, "Failed to initialize RDMA: %d\n", err);
+ err = -EIO;
+ goto err_init_aux_unroll;
+ }
+ } else {
+ dev_warn(dev, "RDMA is not supported on this device\n");
+ }
+
return 0;
+err_init_aux_unroll:
+ pf->adev = NULL;
+ ida_free(&ice_aux_ida, pf->aux_idx);
err_netdev_reg:
err_send_version_unroll:
ice_vsi_release_all(pf);
@@ -4379,13 +4598,17 @@ static void ice_remove(struct pci_dev *pdev)
ice_free_vfs(pf);
}
- set_bit(ICE_DOWN, pf->state);
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
+ ice_unplug_aux_dev(pf);
+ ida_free(&ice_aux_ida, pf->aux_idx);
+ set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_deinit_lag(pf);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_release(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
@@ -4538,6 +4761,8 @@ static int __maybe_unused ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
+ ice_unplug_aux_dev(pf);
+
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
if (!disabled)
@@ -5270,6 +5495,7 @@ static void ice_tx_dim_work(struct work_struct *work)
itr = tx_profile[dim->profile_ix].itr;
intrl = tx_profile[dim->profile_ix].intrl;
+ ice_trace(tx_dim_work, q_vector, dim);
ice_write_itr(rc, itr);
ice_write_intrl(q_vector, intrl);
@@ -5294,6 +5520,7 @@ static void ice_rx_dim_work(struct work_struct *work)
itr = rx_profile[dim->profile_ix].itr;
intrl = rx_profile[dim->profile_ix].intrl;
+ ice_trace(rx_dim_work, q_vector, dim);
ice_write_itr(rc, itr);
ice_write_intrl(q_vector, intrl);
@@ -5437,7 +5664,6 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
- struct ice_ring *ring;
u64 pkts, bytes;
int i;
@@ -5461,7 +5687,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
- ring = READ_ONCE(vsi->rx_rings[i]);
+ struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+
ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
@@ -6128,6 +6355,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_clear_pxe_mode(hw);
+ ret = ice_init_nvm(hw);
+ if (ret) {
+ dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
+ goto err_init_ctrlq;
+ }
+
ret = ice_get_caps(hw);
if (ret) {
dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
@@ -6169,6 +6402,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
+ /* If the PF previously had enabled PTP, PTP init needs to happen before
+ * the VSI rebuild. If not, this causes the PTP link status events to
+ * fail.
+ */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_init(pf);
+
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
@@ -6208,6 +6448,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+
+ ice_plug_aux_dev(pf);
return;
err_vsi_rebuild:
@@ -6246,7 +6488,9 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct iidc_event *event;
u8 count = 0;
+ int err = 0;
if (new_mtu == (int)netdev->mtu) {
netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
@@ -6279,27 +6523,59 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
+
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
- int err;
-
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
- return err;
+ goto event_after;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
- return err;
+ goto event_after;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
- return 0;
+event_after:
+ set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+
+ return err;
+}
+
+/**
+ * ice_do_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ */
+static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return ice_ptp_get_ts_config(pf, ifr);
+ case SIOCSHWTSTAMP:
+ return ice_ptp_set_ts_config(pf, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -6818,6 +7094,8 @@ int ice_open_internal(struct net_device *netdev)
return -EIO;
}
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
@@ -6937,6 +7215,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp_safe_mode,
};
static const struct net_device_ops ice_netdev_ops = {
@@ -6950,6 +7229,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
+ .ndo_do_ioctl = ice_do_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
new file mode 100644
index 000000000000..5d5207b56ca9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -0,0 +1,1558 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+#define E810_OUT_PROP_DELAY_NS 1
+
+/**
+ * ice_set_tx_tstamp - Enable or disable Tx timestamping
+ * @pf: The PF pointer to search in
+ * @on: bool value for whether timestamps are enabled or disabled
+ */
+static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
+{
+ struct ice_vsi *vsi;
+ u32 val;
+ u16 i;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return;
+
+ /* Set the timestamp enable flag for all the Tx rings */
+ ice_for_each_rxq(vsi, i) {
+ if (!vsi->tx_rings[i])
+ continue;
+ vsi->tx_rings[i]->ptp_tx = on;
+ }
+
+ /* Configure the Tx timestamp interrupt */
+ val = rd32(&pf->hw, PFINT_OICR_ENA);
+ if (on)
+ val |= PFINT_OICR_TSYN_TX_M;
+ else
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(&pf->hw, PFINT_OICR_ENA, val);
+}
+
+/**
+ * ice_set_rx_tstamp - Enable or disable Rx timestamping
+ * @pf: The PF pointer to search in
+ * @on: bool value for whether timestamps are enabled or disabled
+ */
+static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
+{
+ struct ice_vsi *vsi;
+ u16 i;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return;
+
+ /* Set the timestamp flag for all the Rx rings */
+ ice_for_each_rxq(vsi, i) {
+ if (!vsi->rx_rings[i])
+ continue;
+ vsi->rx_rings[i]->ptp_rx = on;
+ }
+}
+
+/**
+ * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
+ * @pf: Board private structure
+ * @ena: bool value to enable or disable time stamp
+ *
+ * This function will configure timestamping during PTP initialization
+ * and deinitialization
+ */
+static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+{
+ ice_set_tx_tstamp(pf, ena);
+ ice_set_rx_tstamp(pf, ena);
+
+ if (ena) {
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
+ } else {
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ }
+}
+
+/**
+ * ice_get_ptp_clock_index - Get the PTP clock index
+ * @pf: the PF pointer
+ *
+ * Determine the clock index of the PTP clock associated with this device. If
+ * this is the PF controlling the clock, just use the local access to the
+ * clock device pointer.
+ *
+ * Otherwise, read from the driver shared parameters to determine the clock
+ * index value.
+ *
+ * Returns: the index of the PTP clock associated with this device, or -1 if
+ * there is no associated clock.
+ */
+int ice_get_ptp_clock_index(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_aqc_driver_params param_idx;
+ struct ice_hw *hw = &pf->hw;
+ u8 tmr_idx;
+ u32 value;
+ int err;
+
+ /* Use the ptp_clock structure if we're the main PF */
+ if (pf->ptp.clock)
+ return ptp_clock_index(pf->ptp.clock);
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (!tmr_idx)
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
+ else
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
+
+ err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
+ if (err) {
+ dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ return -1;
+ }
+
+ /* The PTP clock index is an integer, and will be between 0 and
+ * INT_MAX. The highest bit of the driver shared parameter is used to
+ * indicate whether or not the currently stored clock index is valid.
+ */
+ if (!(value & PTP_SHARED_CLK_IDX_VALID))
+ return -1;
+
+ return value & ~PTP_SHARED_CLK_IDX_VALID;
+}
+
+/**
+ * ice_set_ptp_clock_index - Set the PTP clock index
+ * @pf: the PF pointer
+ *
+ * Set the PTP clock index for this device into the shared driver parameters,
+ * so that other PFs associated with this device can read it.
+ *
+ * If the PF is unable to store the clock index, it will log an error, but
+ * will continue operating PTP.
+ */
+static void ice_set_ptp_clock_index(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_aqc_driver_params param_idx;
+ struct ice_hw *hw = &pf->hw;
+ u8 tmr_idx;
+ u32 value;
+ int err;
+
+ if (!pf->ptp.clock)
+ return;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (!tmr_idx)
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
+ else
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
+
+ value = (u32)ptp_clock_index(pf->ptp.clock);
+ if (value > INT_MAX) {
+ dev_err(dev, "PTP Clock index is too large to store\n");
+ return;
+ }
+ value |= PTP_SHARED_CLK_IDX_VALID;
+
+ err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
+ if (err) {
+ dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_clear_ptp_clock_index - Clear the PTP clock index
+ * @pf: the PF pointer
+ *
+ * Clear the PTP clock index for this device. Must be called when
+ * unregistering the PTP clock, in order to ensure other PFs stop reporting
+ * a clock object that no longer exists.
+ */
+static void ice_clear_ptp_clock_index(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_aqc_driver_params param_idx;
+ struct ice_hw *hw = &pf->hw;
+ u8 tmr_idx;
+ int err;
+
+ /* Do not clear the index if we don't own the timer */
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ return;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (!tmr_idx)
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
+ else
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
+
+ err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
+ if (err) {
+ dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_ptp_read_src_clk_reg - Read the source clock register
+ * @pf: Board private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ */
+static u64
+ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 hi, lo, lo2;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
+ lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
+
+ if (lo2 < lo) {
+ /* if TIME_L rolled over read TIME_L again and update
+ * system timestamps
+ */
+ ptp_read_system_prets(sts);
+ lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
+ ptp_read_system_postts(sts);
+ hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
+ }
+
+ return ((u64)hi << 32) | lo;
+}
+
+/**
+ * ice_ptp_update_cached_phctime - Update the cached PHC time values
+ * @pf: Board specific private structure
+ *
+ * This function updates the system time values which are cached in the PF
+ * structure and the Rx rings.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old. It must also be called whenever the PHC
+ * time has been changed.
+ */
+static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
+{
+ u64 systime;
+ int i;
+
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+ /* Update the cached PHC time stored in the PF structure */
+ WRITE_ONCE(pf->ptp.cached_phc_time, systime);
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+ int j;
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type != ICE_VSI_PF)
+ continue;
+
+ ice_for_each_rxq(vsi, j) {
+ if (!vsi->rx_rings[j])
+ continue;
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
+}
+
+/**
+ * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ * Note that the captured timestamp values may be 40 bits, but the lower
+ * 8 bits are sub-nanoseconds and generally discarded.
+ *
+ * Extend the 32bit nanosecond timestamp using the following algorithm and
+ * assumptions:
+ *
+ * 1) have a recently cached copy of the PHC time
+ * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
+ * seconds) before or after the PHC time was captured.
+ * 3) calculate the delta between the cached time and the timestamp
+ * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
+ * captured after the PHC time. In this case, the full timestamp is just
+ * the cached PHC time plus the delta.
+ * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
+ * timestamp was captured *before* the PHC time, i.e. because the PHC
+ * cache was updated after the timestamp was captured by hardware. In this
+ * case, the full timestamp is the cached time minus the inverse delta.
+ *
+ * This algorithm works even if the PHC time was updated after a Tx timestamp
+ * was requested, but before the Tx timestamp event was reported from
+ * hardware.
+ *
+ * This calculation primarily relies on keeping the cached PHC time up to
+ * date. If the timestamp was captured more than 2^31 nanoseconds after the
+ * PHC time, it is possible that the lower 32bits of PHC time have
+ * overflowed more than once, and we might generate an incorrect timestamp.
+ *
+ * This is prevented by (a) periodically updating the cached PHC time once
+ * a second, and (b) discarding any Tx timestamp packet if it has waited for
+ * a timestamp for more than one second.
+ */
+static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
+{
+ u32 delta, phc_time_lo;
+ u64 ns;
+
+ /* Extract the lower 32 bits of the PHC time */
+ phc_time_lo = (u32)cached_phc_time;
+
+ /* Calculate the delta between the lower 32bits of the cached PHC
+ * time and the in_tstamp value
+ */
+ delta = (in_tstamp - phc_time_lo);
+
+ /* Do not assume that the in_tstamp is always more recent than the
+ * cached PHC time. If the delta is large, it indicates that the
+ * in_tstamp was taken in the past, and should be converted
+ * forward.
+ */
+ if (delta > (U32_MAX / 2)) {
+ /* reverse the delta calculation here */
+ delta = (phc_time_lo - in_tstamp);
+ ns = cached_phc_time - delta;
+ } else {
+ ns = cached_phc_time + delta;
+ }
+
+ return ns;
+}
+
+/**
+ * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
+ * @pf: Board private structure
+ * @in_tstamp: Ingress/egress 40b timestamp value
+ *
+ * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
+ * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
+ *
+ * *--------------------------------------------------------------*
+ * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
+ * *--------------------------------------------------------------*
+ *
+ * The low bit is an indicator of whether the timestamp is valid. The next
+ * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
+ * and the remaining 32 bits are the lower 32 bits of the PHC timer.
+ *
+ * It is assumed that the caller verifies the timestamp is valid prior to
+ * calling this function.
+ *
+ * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
+ * time stored in the device private PTP structure as the basis for timestamp
+ * extension.
+ *
+ * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
+ * algorithm.
+ */
+static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
+{
+ const u64 mask = GENMASK_ULL(31, 0);
+
+ return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
+ (in_tstamp >> 8) & mask);
+}
+
+/**
+ * ice_ptp_read_time - Read the time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * This function reads the source clock registers and stores them in a timespec.
+ * However, since the registers are 64 bits of nanoseconds, we must convert the
+ * result to a timespec before we can return.
+ */
+static void
+ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
+
+ *ts = ns_to_timespec64(time_ns);
+}
+
+/**
+ * ice_ptp_write_init - Set PHC time to provided value
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the PHC time to the specified time provided in the timespec.
+ */
+static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
+{
+ u64 ns = timespec64_to_ns(ts);
+ struct ice_hw *hw = &pf->hw;
+
+ return ice_ptp_init_time(hw, ns);
+}
+
+/**
+ * ice_ptp_write_adj - Adjust PHC clock time atomically
+ * @pf: Board private structure
+ * @adj: Adjustment in nanoseconds
+ *
+ * Perform an atomic adjustment of the PHC time by the specified number of
+ * nanoseconds.
+ */
+static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ return ice_ptp_adj_clock(hw, adj);
+}
+
+/**
+ * ice_ptp_adjfine - Adjust clock increment rate
+ * @info: the driver's PTP info structure
+ * @scaled_ppm: Parts per million with 16-bit fractional field
+ *
+ * Adjust the frequency of the clock by the indicated scaled ppm from the
+ * base frequency.
+ */
+static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ u64 freq, divisor = 1000000ULL;
+ struct ice_hw *hw = &pf->hw;
+ s64 incval, diff;
+ int neg_adj = 0;
+ int err;
+
+ incval = ICE_PTP_NOMINAL_INCVAL_E810;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
+ /* handle overflow by scaling down the scaled_ppm and
+ * the divisor, losing some precision
+ */
+ scaled_ppm >>= 2;
+ divisor >>= 2;
+ }
+
+ freq = (incval * (u64)scaled_ppm) >> 16;
+ diff = div_u64(freq, divisor);
+
+ if (neg_adj)
+ incval -= diff;
+ else
+ incval += diff;
+
+ err = ice_ptp_write_incval_locked(hw, incval);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
+ err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_extts_work - Workqueue task function
+ * @work: external timestamp work structure
+ *
+ * Service for PTP external clock event
+ */
+static void ice_ptp_extts_work(struct kthread_work *work)
+{
+ struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
+ struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ struct ptp_clock_event event;
+ struct ice_hw *hw = &pf->hw;
+ u8 chan, tmr_idx;
+ u32 hi, lo;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ /* Event time is captured by one of the two matched registers
+ * GLTSYN_EVNT_L: 32 LSB of sampled time event
+ * GLTSYN_EVNT_H: 32 MSB of sampled time event
+ * Event is defined in GLTSYN_EVNT_0 register
+ */
+ for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+ /* Check if channel is enabled */
+ if (pf->ptp.ext_ts_irq & (1 << chan)) {
+ lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+ hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+ event.timestamp = (((u64)hi) << 32) | lo;
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = chan;
+
+ /* Fire event */
+ ptp_clock_event(pf->ptp.clock, &event);
+ pf->ptp.ext_ts_irq &= ~(1 << chan);
+ }
+ }
+}
+
+/**
+ * ice_ptp_cfg_extts - Configure EXTTS pin and channel
+ * @pf: Board private structure
+ * @ena: true to enable; false to disable
+ * @chan: GPIO channel (0-3)
+ * @gpio_pin: GPIO pin
+ * @extts_flags: request flags from the ptp_extts_request.flags
+ */
+static int
+ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+ unsigned int extts_flags)
+{
+ u32 func, aux_reg, gpio_reg, irq_reg;
+ struct ice_hw *hw = &pf->hw;
+ u8 tmr_idx;
+
+ if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
+ return -EINVAL;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ irq_reg = rd32(hw, PFINT_OICR_ENA);
+
+ if (ena) {
+ /* Enable the interrupt */
+ irq_reg |= PFINT_OICR_TSYN_EVNT_M;
+ aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
+
+#define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
+#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
+
+ /* set event level to requested edge */
+ if (extts_flags & PTP_FALLING_EDGE)
+ aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
+ if (extts_flags & PTP_RISING_EDGE)
+ aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
+
+ /* Write GPIO CTL reg.
+ * 0x1 is input sampled by EVENT register(channel)
+ * + num_in_channels * tmr_idx
+ */
+ func = 1 + chan + (tmr_idx * 3);
+ gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
+ GLGEN_GPIO_CTL_PIN_FUNC_M);
+ pf->ptp.ext_ts_chan |= (1 << chan);
+ } else {
+ /* clear the values we set to reset defaults */
+ aux_reg = 0;
+ gpio_reg = 0;
+ pf->ptp.ext_ts_chan &= ~(1 << chan);
+ if (!pf->ptp.ext_ts_chan)
+ irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
+ }
+
+ wr32(hw, PFINT_OICR_ENA, irq_reg);
+ wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
+ wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
+ * @pf: Board private structure
+ * @chan: GPIO channel (0-3)
+ * @config: desired periodic clk configuration. NULL will disable channel
+ * @store: If set to true the values will be stored
+ *
+ * Configure the internal clock generator modules to generate the clock wave of
+ * specified period.
+ */
+static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
+ struct ice_perout_channel *config, bool store)
+{
+ u64 current_time, period, start_time, phase;
+ struct ice_hw *hw = &pf->hw;
+ u32 func, val, gpio_pin;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* 0. Reset mode & out_en in AUX_OUT */
+ wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
+
+ /* If we're disabling the output, clear out CLKO and TGT and keep
+ * output level low
+ */
+ if (!config || !config->ena) {
+ wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
+ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
+ wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
+
+ val = GLGEN_GPIO_CTL_PIN_DIR_M;
+ gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
+ wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+
+ /* Store the value if requested */
+ if (store)
+ memset(&pf->ptp.perout_channels[chan], 0,
+ sizeof(struct ice_perout_channel));
+
+ return 0;
+ }
+ period = config->period;
+ start_time = config->start_time;
+ div64_u64_rem(start_time, period, &phase);
+ gpio_pin = config->gpio_pin;
+
+ /* 1. Write clkout with half of required period value */
+ if (period & 0x1) {
+ dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
+ goto err;
+ }
+
+ period >>= 1;
+
+ /* For proper operation, the GLTSYN_CLKO must be larger than clock tick
+ */
+#define MIN_PULSE 3
+ if (period <= MIN_PULSE || period > U32_MAX) {
+ dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
+ MIN_PULSE * 2);
+ goto err;
+ }
+
+ wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
+
+ /* Allow time for programming before start_time is hit */
+ current_time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+ /* if start time is in the past start the timer at the nearest second
+ * maintaining phase
+ */
+ if (start_time < current_time)
+ start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+ NSEC_PER_SEC) * NSEC_PER_SEC + phase;
+
+ start_time -= E810_OUT_PROP_DELAY_NS;
+
+ /* 2. Write TARGET time */
+ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
+ wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
+
+ /* 3. Write AUX_OUT register */
+ val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
+ wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
+
+ /* 4. write GPIO CTL reg */
+ func = 8 + chan + (tmr_idx * 4);
+ val = GLGEN_GPIO_CTL_PIN_DIR_M |
+ ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
+ wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+
+ /* Store the value if requested */
+ if (store) {
+ memcpy(&pf->ptp.perout_channels[chan], config,
+ sizeof(struct ice_perout_channel));
+ pf->ptp.perout_channels[chan].start_time = phase;
+ }
+
+ return 0;
+err:
+ dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
+ return -EFAULT;
+}
+
+/**
+ * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ */
+static int
+ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_perout_channel clk_cfg = {0};
+ unsigned int chan;
+ u32 gpio_pin;
+ int err;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ chan = rq->perout.index;
+ if (chan == PPS_CLK_GEN_CHAN)
+ clk_cfg.gpio_pin = PPS_PIN_INDEX;
+ else
+ clk_cfg.gpio_pin = chan;
+
+ clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
+ rq->perout.period.nsec);
+ clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
+ rq->perout.start.nsec);
+ clk_cfg.ena = !!on;
+
+ err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ chan = rq->extts.index;
+ gpio_pin = chan;
+
+ err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
+ rq->extts.flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+/**
+ * ice_ptp_gettimex64 - Get the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ */
+static int
+ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+
+ if (!ice_ptp_lock(hw)) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
+ return -EBUSY;
+ }
+
+ ice_ptp_read_time(pf, ts, sts);
+ ice_ptp_unlock(hw);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_settime64 - Set the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ */
+static int
+ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct timespec64 ts64 = *ts;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ err = ice_ptp_write_init(pf, &ts64);
+ ice_ptp_unlock(hw);
+
+ if (!err)
+ ice_ptp_update_cached_phctime(pf);
+
+exit:
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ */
+static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
+{
+ struct timespec64 now, then;
+
+ then = ns_to_timespec64(delta);
+ ice_ptp_gettimex64(info, &now, NULL);
+ now = timespec64_add(now, then);
+
+ return ice_ptp_settime64(info, (const struct timespec64 *)&now);
+}
+
+/**
+ * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ */
+static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+
+ /* Hardware only supports atomic adjustments using signed 32-bit
+ * integers. For any adjustment outside this range, perform
+ * a non-atomic get->adjust->set flow.
+ */
+ if (delta > S32_MAX || delta < S32_MIN) {
+ dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
+ return ice_ptp_adjtime_nonatomic(info, delta);
+ }
+
+ if (!ice_ptp_lock(hw)) {
+ dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
+ return -EBUSY;
+ }
+
+ err = ice_ptp_write_adj(pf, delta);
+
+ ice_ptp_unlock(hw);
+
+ if (err) {
+ dev_err(dev, "PTP failed to adjust time, err %d\n", err);
+ return err;
+ }
+
+ ice_ptp_update_cached_phctime(pf);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
+ * @pf: Board private structure
+ * @ifr: ioctl data
+ *
+ * Copy the timestamping config to user buffer
+ */
+int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return -EIO;
+
+ config = &pf->ptp.tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
+ * @pf: Board private structure
+ * @config: hwtstamp settings requested or saved
+ */
+static int
+ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
+{
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ ice_set_tx_tstamp(pf, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ ice_set_tx_tstamp(pf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ice_set_rx_tstamp(pf, false);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ ice_set_rx_tstamp(pf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_ts_config - ioctl interface to control the timestamping
+ * @pf: Board private structure
+ * @ifr: ioctl data
+ *
+ * Get the user config and store it
+ */
+int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return -EAGAIN;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = ice_ptp_set_timestamp_mode(pf, &config);
+ if (err)
+ return err;
+
+ /* Save these settings for future reference */
+ pf->ptp.tstamp_config = config;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
+ * @rx_ring: Ring to get the VSI info
+ * @rx_desc: Receive descriptor
+ * @skb: Particular skb to send timestamp with
+ *
+ * The driver receives a notification in the receive descriptor with timestamp.
+ * The timestamp is in ns, so we must convert the result first.
+ */
+void
+ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
+{
+ u32 ts_high;
+ u64 ts_ns;
+
+ /* Populate timesync data into skb */
+ if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
+ struct skb_shared_hwtstamps *hwtstamps;
+
+ /* Use ice_ptp_extend_32b_ts directly, using the ring-specific
+ * cached PHC value, rather than accessing the PF. This also
+ * allows us to simply pass the upper 32bits of nanoseconds
+ * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
+ * it would just discard these bits itself.
+ */
+ ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
+ ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
+ }
+}
+
+/**
+ * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
+ * @info: PTP clock capabilities
+ */
+static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
+{
+ info->n_per_out = E810_N_PER_OUT;
+ info->n_ext_ts = E810_N_EXT_TS;
+}
+
+/**
+ * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E810 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for e810
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ info->enable = ice_ptp_gpio_enable_e810;
+
+ ice_ptp_setup_pins_e810(info);
+}
+
+/**
+ * ice_ptp_set_caps - Set PTP capabilities
+ * @pf: Board private structure
+ */
+static void ice_ptp_set_caps(struct ice_pf *pf)
+{
+ struct ptp_clock_info *info = &pf->ptp.info;
+ struct device *dev = ice_pf_to_dev(pf);
+
+ snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
+ dev_driver_string(dev), dev_name(dev));
+ info->owner = THIS_MODULE;
+ info->max_adj = 999999999;
+ info->adjtime = ice_ptp_adjtime;
+ info->adjfine = ice_ptp_adjfine;
+ info->gettimex64 = ice_ptp_gettimex64;
+ info->settime64 = ice_ptp_settime64;
+
+ ice_ptp_set_funcs_e810(pf, info);
+}
+
+/**
+ * ice_ptp_create_clock - Create PTP clock device for userspace
+ * @pf: Board private structure
+ *
+ * This function creates a new PTP clock device. It only creates one if we
+ * don't already have one. Will return error if it can't create one, but success
+ * if we already have a device. Should be used by ice_ptp_init to create clock
+ * initially, and prevent global resets from creating new clock devices.
+ */
+static long ice_ptp_create_clock(struct ice_pf *pf)
+{
+ struct ptp_clock_info *info;
+ struct ptp_clock *clock;
+ struct device *dev;
+
+ /* No need to create a clock device if we already have one */
+ if (pf->ptp.clock)
+ return 0;
+
+ ice_ptp_set_caps(pf);
+
+ info = &pf->ptp.info;
+ dev = ice_pf_to_dev(pf);
+
+ /* Allocate memory for kernel pins interface */
+ if (info->n_pins) {
+ info->pin_config = devm_kcalloc(dev, info->n_pins,
+ sizeof(*info->pin_config),
+ GFP_KERNEL);
+ if (!info->pin_config) {
+ info->n_pins = 0;
+ return -ENOMEM;
+ }
+ }
+
+ /* Attempt to register the clock before enabling the hardware. */
+ clock = ptp_clock_register(info, dev);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ pf->ptp.clock = clock;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
+ * @work: pointer to the kthread_work struct
+ *
+ * Process timestamps captured by the PHY associated with this port. To do
+ * this, loop over each index with a waiting skb.
+ *
+ * If a given index has a valid timestamp, perform the following steps:
+ *
+ * 1) copy the timestamp out of the PHY register
+ * 4) clear the timestamp valid bit in the PHY register
+ * 5) unlock the index by clearing the associated in_use bit.
+ * 2) extend the 40b timestamp value to get a 64bit timestamp
+ * 3) send that timestamp to the stack
+ *
+ * After looping, if we still have waiting SKBs, then re-queue the work. This
+ * may cause us effectively poll even when not strictly necessary. We do this
+ * because it's possible a new timestamp was requested around the same time as
+ * the interrupt. In some cases hardware might not interrupt us again when the
+ * timestamp is captured.
+ *
+ * Note that we only take the tracking lock when clearing the bit and when
+ * checking if we need to re-queue this task. The only place where bits can be
+ * set is the hard xmit routine where an SKB has a request flag set. The only
+ * places where we clear bits are this work function, or the periodic cleanup
+ * thread. If the cleanup thread clears a bit we're processing we catch it
+ * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
+ * starts a new timestamp, we might not begin processing it right away but we
+ * will notice it at the end when we re-queue the work item. If a Tx thread
+ * starts a new timestamp just after this function exits without re-queuing,
+ * the interrupt when the timestamp finishes should trigger. Avoiding holding
+ * the lock for the entire function is important in order to ensure that Tx
+ * threads do not get blocked while waiting for the lock.
+ */
+static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
+{
+ struct ice_ptp_port *ptp_port;
+ struct ice_ptp_tx *tx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u8 idx;
+
+ tx = container_of(work, struct ice_ptp_tx, work);
+ if (!tx->init)
+ return;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+ hw = &pf->hw;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 phy_idx = idx + tx->quad_offset;
+ u64 raw_tstamp, tstamp;
+ struct sk_buff *skb;
+ int err;
+
+ err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
+ &raw_tstamp);
+ if (err)
+ continue;
+
+ /* Check if the timestamp is valid */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID))
+ continue;
+
+ /* clear the timestamp register, so that it won't show valid
+ * again when re-used.
+ */
+ ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
+
+ /* The timestamp is valid, so we'll go ahead and clear this
+ * index and then send the timestamp up to the stack.
+ */
+ spin_lock(&tx->lock);
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ spin_unlock(&tx->lock);
+
+ /* it's (unlikely but) possible we raced with the cleanup
+ * thread for discarding old timestamp requests.
+ */
+ if (!skb)
+ continue;
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check if we still have work to do. If so, re-queue this task to
+ * poll for remaining timestamps.
+ */
+ spin_lock(&tx->lock);
+ if (!bitmap_empty(tx->in_use, tx->len))
+ kthread_queue_work(pf->ptp.kworker, &tx->work);
+ spin_unlock(&tx->lock);
+}
+
+/**
+ * ice_ptp_request_ts - Request an available Tx timestamp index
+ * @tx: the PTP Tx timestamp tracker to request from
+ * @skb: the SKB to associate with this timestamp request
+ */
+s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
+{
+ u8 idx;
+
+ /* Check if this tracker is initialized */
+ if (!tx->init)
+ return -1;
+
+ spin_lock(&tx->lock);
+ /* Find and set the first available index */
+ idx = find_first_zero_bit(tx->in_use, tx->len);
+ if (idx < tx->len) {
+ /* We got a valid index that no other thread could have set. Store
+ * a reference to the skb and the start time to allow discarding old
+ * requests.
+ */
+ set_bit(idx, tx->in_use);
+ tx->tstamps[idx].start = jiffies;
+ tx->tstamps[idx].skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
+ spin_unlock(&tx->lock);
+
+ /* return the appropriate PHY timestamp register index, -1 if no
+ * indexes were available.
+ */
+ if (idx >= tx->len)
+ return -1;
+ else
+ return idx + tx->quad_offset;
+}
+
+/**
+ * ice_ptp_process_ts - Spawn kthread work to handle timestamps
+ * @pf: Board private structure
+ *
+ * Queue work required to process the PTP Tx timestamps outside of interrupt
+ * context.
+ */
+void ice_ptp_process_ts(struct ice_pf *pf)
+{
+ if (pf->ptp.port.tx.init)
+ kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
+}
+
+/**
+ * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
+ * @tx: Tx tracking structure to initialize
+ *
+ * Assumes that the length has already been initialized. Do not call directly,
+ * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
+ */
+static int
+ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
+{
+ tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
+ if (!tx->tstamps)
+ return -ENOMEM;
+
+ tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
+ if (!tx->in_use) {
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&tx->lock);
+ kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
+
+ tx->init = 1;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
+ * @pf: Board private structure
+ * @tx: the tracker to flush
+ */
+static void
+ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ u8 idx;
+
+ for (idx = 0; idx < tx->len; idx++) {
+ u8 phy_idx = idx + tx->quad_offset;
+
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+
+ if (tx->tstamps[idx].skb) {
+ dev_kfree_skb_any(tx->tstamps[idx].skb);
+ tx->tstamps[idx].skb = NULL;
+ }
+ }
+}
+
+/**
+ * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
+ * @pf: Board private structure
+ * @tx: Tx tracking structure to release
+ *
+ * Free memory associated with the Tx timestamp tracker.
+ */
+static void
+ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->init = 0;
+
+ kthread_cancel_work_sync(&tx->work);
+
+ ice_ptp_flush_tx_tracker(pf, tx);
+
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+
+ kfree(tx->in_use);
+ tx->in_use = NULL;
+
+ tx->len = 0;
+}
+
+/**
+ * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ *
+ * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
+ * port has its own block of timestamps, independent of the other ports.
+ */
+static int
+ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->quad = pf->hw.port_info->lport;
+ tx->quad_offset = 0;
+ tx->len = INDEX_PER_QUAD;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @tx: PTP Tx tracker to clean up
+ *
+ * Loop through the Tx timestamp requests and see if any of them have been
+ * waiting for a long time. Discard any SKBs that have been waiting for more
+ * than 2 seconds. This is long enough to be reasonably sure that the
+ * timestamp will never be captured. This might happen if the packet gets
+ * discarded before it reaches the PHY timestamping block.
+ */
+static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
+{
+ u8 idx;
+
+ if (!tx->init)
+ return;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct sk_buff *skb;
+
+ /* Check if this SKB has been waiting for too long */
+ if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
+ continue;
+
+ spin_lock(&tx->lock);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Free the SKB after we've cleared the bit */
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ice_ptp_periodic_work(struct kthread_work *work)
+{
+ struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
+ struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
+ ice_ptp_update_cached_phctime(pf);
+
+ ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
+
+ /* Run twice a second */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work,
+ msecs_to_jiffies(500));
+}
+
+/**
+ * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
+ * @pf: Board private structure
+ *
+ * Setup and initialize a PTP clock device that represents the device hardware
+ * clock. Save the clock index for other functions connected to the same
+ * hardware resource.
+ */
+static int ice_ptp_init_owner(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct timespec64 ts;
+ u8 src_idx;
+ int err;
+
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
+ /* Clear some HW residue and enable source clock */
+ src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Enable source clocks */
+ wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Enable PHY time sync */
+ err = ice_ptp_init_phy_e810(hw);
+ if (err)
+ goto err_exit;
+
+ /* Clear event status indications for auxiliary pins */
+ (void)rd32(hw, GLTSYN_STAT(src_idx));
+
+ /* Acquire the global hardware lock */
+ if (!ice_ptp_lock(hw)) {
+ err = -EBUSY;
+ goto err_exit;
+ }
+
+ /* Write the increment time value to PHY and LAN */
+ err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err_exit;
+ }
+
+ ts = ktime_to_timespec64(ktime_get_real());
+ /* Write the initial Time value to PHY and LAN */
+ err = ice_ptp_write_init(pf, &ts);
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err_exit;
+ }
+
+ /* Release the global hardware lock */
+ ice_ptp_unlock(hw);
+
+ /* Ensure we have a clock device */
+ err = ice_ptp_create_clock(pf);
+ if (err)
+ goto err_clk;
+
+ /* Store the PTP clock index for other PFs */
+ ice_set_ptp_clock_index(pf);
+
+ return 0;
+
+err_clk:
+ pf->ptp.clock = NULL;
+err_exit:
+ dev_err(dev, "PTP failed to register clock, err %d\n", err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_init - Initialize the PTP support after device probe or reset
+ * @pf: Board private structure
+ *
+ * This function sets device up for PTP support. The first time it is run, it
+ * will create a clock device. It does not create a clock device if one
+ * already exists. It also reconfigures the device after a reset.
+ */
+void ice_ptp_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct kthread_worker *kworker;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ /* PTP is currently only supported on E810 devices */
+ if (!ice_is_e810(hw))
+ return;
+
+ /* Check if this PF owns the source timer */
+ if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ err = ice_ptp_init_owner(pf);
+ if (err)
+ return;
+ }
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
+
+ /* Initialize the PTP port Tx timestamp tracker */
+ ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx);
+
+ /* Initialize work functions */
+ kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
+ kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
+
+ /* Allocate a kworker for handling work required for the ports
+ * connected to the PTP hardware clock.
+ */
+ kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev));
+ if (IS_ERR(kworker)) {
+ err = PTR_ERR(kworker);
+ goto err_kworker;
+ }
+ pf->ptp.kworker = kworker;
+
+ set_bit(ICE_FLAG_PTP, pf->flags);
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
+
+ dev_info(dev, "PTP init successful\n");
+ return;
+
+err_kworker:
+ /* If we registered a PTP clock, release it */
+ if (pf->ptp.clock) {
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+ }
+ dev_err(dev, "PTP failed %d\n", err);
+}
+
+/**
+ * ice_ptp_release - Disable the driver/HW support and unregister the clock
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the clock
+ */
+void ice_ptp_release(struct ice_pf *pf)
+{
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+
+ kthread_cancel_delayed_work_sync(&pf->ptp.work);
+
+ if (pf->ptp.kworker) {
+ kthread_destroy_worker(pf->ptp.kworker);
+ pf->ptp.kworker = NULL;
+ }
+
+ if (!pf->ptp.clock)
+ return;
+
+ ice_clear_ptp_clock_index(pf);
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+
+ dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
new file mode 100644
index 000000000000..e1c787bd5b96
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_PTP_H_
+#define _ICE_PTP_H_
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/kthread.h>
+
+#include "ice_ptp_hw.h"
+
+enum ice_ptp_pin {
+ GPIO_20 = 0,
+ GPIO_21,
+ GPIO_22,
+ GPIO_23,
+ NUM_ICE_PTP_PIN
+};
+
+struct ice_perout_channel {
+ bool ena;
+ u32 gpio_pin;
+ u64 period;
+ u64 start_time;
+};
+
+/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
+ * is stored in a buffer of registers. Depending on the specific hardware,
+ * this buffer might be shared across multiple PHY ports.
+ *
+ * On transmit of a packet to be timestamped, software is responsible for
+ * selecting an open index. Hardware makes no attempt to lock or prevent
+ * re-use of an index for multiple packets.
+ *
+ * To handle this, timestamp indexes must be tracked by software to ensure
+ * that an index is not re-used for multiple transmitted packets. The
+ * structures and functions declared in this file track the available Tx
+ * register indexes, as well as provide storage for the SKB pointers.
+ *
+ * To allow multiple ports to access the shared register block independently,
+ * the blocks are split up so that indexes are assigned to each port based on
+ * hardware logical port number.
+ */
+
+/**
+ * struct ice_tx_tstamp - Tracking for a single Tx timestamp
+ * @skb: pointer to the SKB for this timestamp request
+ * @start: jiffies when the timestamp was first requested
+ *
+ * This structure tracks a single timestamp request. The SKB pointer is
+ * provided when initiating a request. The start time is used to ensure that
+ * we discard old requests that were not fulfilled within a 2 second time
+ * window.
+ */
+struct ice_tx_tstamp {
+ struct sk_buff *skb;
+ unsigned long start;
+};
+
+/**
+ * struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
+ * @work: work function to handle processing of Tx timestamps
+ * @lock: lock to prevent concurrent write to in_use bitmap
+ * @tstamps: array of len to store outstanding requests
+ * @in_use: bitmap of len to indicate which slots are in use
+ * @quad: which quad the timestamps are captured in
+ * @quad_offset: offset into timestamp block of the quad to get the real index
+ * @len: length of the tstamps and in_use fields.
+ * @init: if true, the tracker is initialized;
+ */
+struct ice_ptp_tx {
+ struct kthread_work work;
+ spinlock_t lock; /* lock protecting in_use bitmap */
+ struct ice_tx_tstamp *tstamps;
+ unsigned long *in_use;
+ u8 quad;
+ u8 quad_offset;
+ u8 len;
+ u8 init;
+};
+
+/* Quad and port information for initializing timestamp blocks */
+#define INDEX_PER_QUAD 64
+#define INDEX_PER_PORT (INDEX_PER_QUAD / ICE_PORTS_PER_QUAD)
+
+/**
+ * struct ice_ptp_port - data used to initialize an external port for PTP
+ *
+ * This structure contains PTP data related to the external ports. Currently
+ * it is used for tracking the Tx timestamps of a port. In the future this
+ * structure will also hold information for the E822 port initialization
+ * logic.
+ *
+ * @tx: Tx timestamp tracking for this port
+ */
+struct ice_ptp_port {
+ struct ice_ptp_tx tx;
+};
+
+#define GLTSYN_TGT_H_IDX_MAX 4
+
+/**
+ * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @port: data for the PHY port initialization procedure
+ * @work: delayed work function for periodic tasks
+ * @extts_work: work function for handling external Tx timestamps
+ * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @ext_ts_chan: the external timestamp channel in use
+ * @ext_ts_irq: the external timestamp IRQ in use
+ * @kworker: kwork thread for handling periodic work
+ * @perout_channels: periodic output data
+ * @info: structure defining PTP hardware capabilities
+ * @clock: pointer to registered PTP clock device
+ * @tstamp_config: hardware timestamping configuration
+ */
+struct ice_ptp {
+ struct ice_ptp_port port;
+ struct kthread_delayed_work work;
+ struct kthread_work extts_work;
+ u64 cached_phc_time;
+ u8 ext_ts_chan;
+ u8 ext_ts_irq;
+ struct kthread_worker *kworker;
+ struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct hwtstamp_config tstamp_config;
+};
+
+#define __ptp_port_to_ptp(p) \
+ container_of((p), struct ice_ptp, port)
+#define ptp_port_to_pf(p) \
+ container_of(__ptp_port_to_ptp((p)), struct ice_pf, ptp)
+
+#define __ptp_info_to_ptp(i) \
+ container_of((i), struct ice_ptp, info)
+#define ptp_info_to_pf(i) \
+ container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp)
+
+#define PTP_SHARED_CLK_IDX_VALID BIT(31)
+#define ICE_PTP_TS_VALID BIT(0)
+
+/* Per-channel register definitions */
+#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
+#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
+#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
+#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H_IDX_MAX 3
+
+/* Pin definitions for PTP PPS out */
+#define PPS_CLK_GEN_CHAN 3
+#define PPS_CLK_SRC_CHAN 2
+#define PPS_PIN_INDEX 5
+#define TIME_SYNC_PIN_INDEX 4
+#define E810_N_EXT_TS 3
+#define E810_N_PER_OUT 4
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+struct ice_pf;
+int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+int ice_get_ptp_clock_index(struct ice_pf *pf);
+
+s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
+void ice_ptp_process_ts(struct ice_pf *pf);
+
+void
+ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+void ice_ptp_init(struct ice_pf *pf);
+void ice_ptp_release(struct ice_pf *pf);
+#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
+{
+ return -1;
+}
+
+static inline s8
+ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
+{
+ return -1;
+}
+
+static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
+static inline void
+ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+static inline void ice_ptp_init(struct ice_pf *pf) { }
+static inline void ice_ptp_release(struct ice_pf *pf) { }
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
new file mode 100644
index 000000000000..3eca0e4eab0b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_ptp_hw.h"
+
+/* Low level functions for interacting with and managing the device clock used
+ * for the Precision Time Protocol.
+ *
+ * The ice hardware represents the current time using three registers:
+ *
+ * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R
+ * +---------------+ +---------------+ +---------------+
+ * | 32 bits | | 32 bits | | 32 bits |
+ * +---------------+ +---------------+ +---------------+
+ *
+ * The registers are incremented every clock tick using a 40bit increment
+ * value defined over two registers:
+ *
+ * GLTSYN_INCVAL_H GLTSYN_INCVAL_L
+ * +---------------+ +---------------+
+ * | 8 bit s | | 32 bits |
+ * +---------------+ +---------------+
+ *
+ * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
+ * registers every clock source tick. Depending on the specific device
+ * configuration, the clock source frequency could be one of a number of
+ * values.
+ *
+ * For E810 devices, the increment frequency is 812.5 MHz
+ *
+ * The hardware captures timestamps in the PHY for incoming packets, and for
+ * outgoing packets on request. To support this, the PHY maintains a timer
+ * that matches the lower 64 bits of the global source timer.
+ *
+ * In order to ensure that the PHY timers and the source timer are equivalent,
+ * shadow registers are used to prepare the desired initial values. A special
+ * sync command is issued to trigger copying from the shadow registers into
+ * the appropriate source and PHY registers simultaneously.
+ */
+
+/**
+ * ice_get_ptp_src_clock_index - determine source clock index
+ * @hw: pointer to HW struct
+ *
+ * Determine the source clock index currently in use, based on device
+ * capabilities reported during initialization.
+ */
+u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
+{
+ return hw->func_caps.ts_func_info.tmr_index_assoc;
+}
+
+/* E810 functions
+ *
+ * The following functions operate on the E810 series devices which use
+ * a separate external PHY.
+ */
+
+/**
+ * ice_read_phy_reg_e810 - Read register from external PHY on E810
+ * @hw: pointer to the HW struct
+ * @addr: the address to read from
+ * @val: On return, the value read from the PHY
+ *
+ * Read a register from the external PHY on the E810 device.
+ */
+static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int status;
+
+ msg.msg_addr_low = lower_16_bits(addr);
+ msg.msg_addr_high = upper_16_bits(addr);
+ msg.opcode = ice_sbq_msg_rd;
+ msg.dest_dev = rmn_0;
+
+ status = ice_sbq_rw_reg(hw, &msg);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
+ status);
+ return status;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_phy_reg_e810 - Write register on external PHY on E810
+ * @hw: pointer to the HW struct
+ * @addr: the address to writem to
+ * @val: the value to write to the PHY
+ *
+ * Write a value to a register of the external PHY on the E810 device.
+ */
+static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int status;
+
+ msg.msg_addr_low = lower_16_bits(addr);
+ msg.msg_addr_high = upper_16_bits(addr);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.dest_dev = rmn_0;
+ msg.data = val;
+
+ status = ice_sbq_rw_reg(hw, &msg);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E810 device.
+ */
+static int
+ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+{
+ u32 lo_addr, hi_addr, lo, hi;
+ int status;
+
+ lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+
+ status = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ /* For E810 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the timestamp block of the
+ * external PHY on the E810 device.
+ */
+static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
+{
+ u32 lo_addr, hi_addr;
+ int status;
+
+ lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+
+ status = ice_write_phy_reg_e810(hw, lo_addr, 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_e810(hw, hi_addr, 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
+ * @hw: pointer to HW struct
+ *
+ * Enable the timesync PTP functionality for the external PHY connected to
+ * this function.
+ */
+int ice_ptp_init_phy_e810(struct ice_hw *hw)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
+ GLTSYN_ENA_TSYN_ENA_M);
+ if (status)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
+ status);
+
+ return status;
+}
+
+/**
+ * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Prepare the PHY port for an atomic adjustment by programming the PHY
+ * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
+ * is completed by issuing an ADJ_TIME sync command.
+ *
+ * The adjustment value only contains the portion used for the upper 32bits of
+ * the PHY timer, usually in units of nominal nanoseconds. Negative
+ * adjustments are supported using 2s complement arithmetic.
+ */
+static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Adjustments are represented as signed 2's complement values in
+ * nanoseconds. Sub-nanosecond adjustment is not supported.
+ */
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHY
+ * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
+ * completed by issuing an INIT_INCVAL command.
+ */
+static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
+{
+ u32 high, low;
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ low = lower_32_bits(incval);
+ high = upper_32_bits(incval);
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ */
+static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, val;
+ int status;
+
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val = GLTSYN_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val = GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_TIME;
+ break;
+ case READ_TIME:
+ cmd_val = GLTSYN_CMD_READ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ }
+
+ /* Read, modify, write */
+ status = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n", status);
+ return status;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK_E810;
+ val |= cmd_val;
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+
+/* Device agnostic functions
+ *
+ * The following functions implement useful behavior to hide the differences
+ * between E810 and other devices. They call the device-specific
+ * implementations where necessary.
+ *
+ * Currently, the driver only supports E810, but future work will enable
+ * support for E822-based devices.
+ */
+
+/**
+ * ice_ptp_lock - Acquire PTP global semaphore register lock
+ * @hw: pointer to the HW struct
+ *
+ * Acquire the global PTP hardware semaphore lock. Returns true if the lock
+ * was acquired, false otherwise.
+ *
+ * The PFTSYN_SEM register sets the busy bit on read, returning the previous
+ * value. If software sees the busy bit cleared, this means that this function
+ * acquired the lock (and the busy bit is now set). If software sees the busy
+ * bit set, it means that another function acquired the lock.
+ *
+ * Software must clear the busy bit with a write to release the lock for other
+ * functions when done.
+ */
+bool ice_ptp_lock(struct ice_hw *hw)
+{
+ u32 hw_lock;
+ int i;
+
+#define MAX_TRIES 5
+
+ for (i = 0; i < MAX_TRIES; i++) {
+ hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
+ if (!hw_lock)
+ break;
+
+ /* Somebody is holding the lock */
+ usleep_range(10000, 20000);
+ }
+
+ return !hw_lock;
+}
+
+/**
+ * ice_ptp_unlock - Release PTP global semaphore register lock
+ * @hw: pointer to the HW struct
+ *
+ * Release the global PTP hardware semaphore lock. This is done by writing to
+ * the PFTSYN_SEM register.
+ */
+void ice_ptp_unlock(struct ice_hw *hw)
+{
+ wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= GLTSYN_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= GLTSYN_CMD_READ_TIME;
+ break;
+ }
+
+ wr32(hw, GLTSYN_CMD, cmd_val);
+}
+
+/**
+ * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
+ * @hw: pointer to HW struct
+ * @cmd: the command to issue
+ *
+ * Prepare the source timer and PHY timers and then trigger the requested
+ * command. This causes the shadow registers previously written in preparation
+ * for the command to be synchronously applied to both the source and PHY
+ * timers.
+ */
+static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ int status;
+
+ /* First, prepare the source timer */
+ ice_ptp_src_cmd(hw, cmd);
+
+ /* Next, prepare the ports */
+ status = ice_ptp_port_cmd_e810(hw, cmd);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n",
+ cmd, status);
+ return status;
+ }
+
+ /* Write the sync command register to drive both source and PHY timer commands
+ * synchronously
+ */
+ wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_time - Initialize device time to provided value
+ * @hw: pointer to HW struct
+ * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
+ *
+ * Initialize the device to the specified time provided. This requires a three
+ * step process:
+ *
+ * 1) write the new init time to the source timer shadow registers
+ * 2) write the new init time to the PHY timer shadow registers
+ * 3) issue an init_time timer command to synchronously switch both the source
+ * and port timers to the new init time value at the next clock cycle.
+ */
+int ice_ptp_init_time(struct ice_hw *hw, u64 time)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Source timers */
+ wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
+ wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
+
+ /* PHY timers */
+ /* Fill Rx and Tx ports and send msg to PHY */
+ status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
+ if (status)
+ return status;
+
+ return ice_ptp_tmr_cmd(hw, INIT_TIME);
+}
+
+/**
+ * ice_ptp_write_incval - Program PHC with new increment value
+ * @hw: pointer to HW struct
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Program the PHC with a new increment value. This requires a three-step
+ * process:
+ *
+ * 1) Write the increment value to the source timer shadow registers
+ * 2) Write the increment value to the PHY timer shadow registers
+ * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
+ * source and port timers to the new increment value at the next clock
+ * cycle.
+ */
+int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Shadow Adjust */
+ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
+
+ status = ice_ptp_prep_phy_incval_e810(hw, incval);
+ if (status)
+ return status;
+
+ return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
+}
+
+/**
+ * ice_ptp_write_incval_locked - Program new incval while holding semaphore
+ * @hw: pointer to HW struct
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Program a new PHC incval while holding the PTP semaphore.
+ */
+int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
+{
+ int status;
+
+ if (!ice_ptp_lock(hw))
+ return -EBUSY;
+
+ status = ice_ptp_write_incval(hw, incval);
+
+ ice_ptp_unlock(hw);
+
+ return status;
+}
+
+/**
+ * ice_ptp_adj_clock - Adjust PHC clock time atomically
+ * @hw: pointer to HW struct
+ * @adj: Adjustment in nanoseconds
+ *
+ * Perform an atomic adjustment of the PHC time by the specified number of
+ * nanoseconds. This requires a three-step process:
+ *
+ * 1) Write the adjustment to the source timer shadow registers
+ * 2) Write the adjustment to the PHY timer shadow registers
+ * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
+ * both the source and port timers at the next clock cycle.
+ */
+int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Write the desired clock adjustment into the GLTSYN_SHADJ register.
+ * For an ADJ_TIME command, this set of registers represents the value
+ * to add to the clock time. It supports subtraction by interpreting
+ * the value as a 2's complement integer.
+ */
+ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
+ wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
+
+ status = ice_ptp_prep_phy_adj_e810(hw, adj);
+ if (status)
+ return status;
+
+ return ice_ptp_tmr_cmd(hw, ADJ_TIME);
+}
+
+/**
+ * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
+ * @hw: pointer to the HW struct
+ * @block: the block to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block.
+ */
+int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
+{
+ return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+}
+
+/**
+ * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
+ * @hw: pointer to the HW struct
+ * @block: the block to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the timestamp block.
+ */
+int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
+{
+ return ice_clear_phy_tstamp_e810(hw, block, idx);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
new file mode 100644
index 000000000000..55a414e87018
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_PTP_HW_H_
+#define _ICE_PTP_HW_H_
+
+enum ice_ptp_tmr_cmd {
+ INIT_TIME,
+ INIT_INCVAL,
+ ADJ_TIME,
+ ADJ_TIME_AT_TIME,
+ READ_TIME
+};
+
+/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
+ * the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
+ */
+#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+
+/* Device agnostic functions */
+u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
+bool ice_ptp_lock(struct ice_hw *hw);
+void ice_ptp_unlock(struct ice_hw *hw);
+int ice_ptp_init_time(struct ice_hw *hw, u64 time);
+int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
+int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
+int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
+int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
+int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
+
+/* E810 family functions */
+int ice_ptp_init_phy_e810(struct ice_hw *hw);
+
+#define PFTSYN_SEM_BYTES 4
+
+/* PHY timer commands */
+#define SEL_CPK_SRC 8
+
+/* Time Sync command Definitions */
+#define GLTSYN_CMD_INIT_TIME BIT(0)
+#define GLTSYN_CMD_INIT_INCVAL BIT(1)
+#define GLTSYN_CMD_ADJ_TIME BIT(2)
+#define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3))
+#define GLTSYN_CMD_READ_TIME BIT(7)
+
+#define TS_CMD_MASK_E810 0xFF
+#define SYNC_EXEC_CMD 0x3
+
+/* E810 timesync enable register */
+#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
+
+/* E810 shadow init time registers */
+#define ETH_GLTSYN_SHTIME_0(i) (0x03000368 + ((i) * 32))
+#define ETH_GLTSYN_SHTIME_L(i) (0x0300036C + ((i) * 32))
+
+/* E810 shadow time adjust registers */
+#define ETH_GLTSYN_SHADJ_L(_i) (0x03000378 + ((_i) * 32))
+#define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32))
+
+/* E810 timer command register */
+#define ETH_GLTSYN_CMD 0x03000344
+
+/* Source timer incval macros */
+#define INCVAL_HIGH_M 0xFF
+
+/* Timestamp block macros */
+#define TS_LOW_M 0xFFFFFFFF
+#define TS_HIGH_S 32
+
+#define BYTES_PER_IDX_ADDR_L_U 8
+
+/* External PHY timestamp address */
+#define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \
+ ((idx) * BYTES_PER_IDX_ADDR_L_U))
+
+#define LOW_TX_MEMORY_BANK_START 0x03090000
+#define HIGH_TX_MEMORY_BANK_START 0x03090004
+
+#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
new file mode 100644
index 000000000000..ead75fe2bcda
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_SBQ_CMD_H_
+#define _ICE_SBQ_CMD_H_
+
+/* This header file defines the Sideband Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+/* Sideband Queue command structure and opcodes */
+enum ice_sbq_opc {
+ /* Sideband Queue commands */
+ ice_sbq_opc_neigh_dev_req = 0x0C00,
+ ice_sbq_opc_neigh_dev_ev = 0x0C01
+};
+
+/* Sideband Queue descriptor. Indirect command
+ * and non posted
+ */
+struct ice_sbq_cmd_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+
+ /* Opaque message data */
+ __le32 cookie_high;
+ __le32 cookie_low;
+
+ union {
+ __le16 cmd_len;
+ __le16 cmpl_len;
+ } param0;
+
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_sbq_evt_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+ u8 data[24];
+};
+
+enum ice_sbq_msg_dev {
+ rmn_0 = 0x02,
+ rmn_1 = 0x03,
+ rmn_2 = 0x04,
+ cgu = 0x06
+};
+
+enum ice_sbq_msg_opcode {
+ ice_sbq_msg_rd = 0x00,
+ ice_sbq_msg_wr = 0x01
+};
+
+#define ICE_SBQ_MSG_FLAGS 0x40
+#define ICE_SBQ_MSG_SBE_FBE 0x0F
+
+struct ice_sbq_msg_req {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ u8 sbe_fbe;
+ u8 func_id;
+ __le16 msg_addr_low;
+ __le32 msg_addr_high;
+ __le32 data;
+};
+
+struct ice_sbq_msg_cmpl {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ __le32 data;
+};
+
+/* Internal struct */
+struct ice_sbq_msg_input {
+ u8 dest_dev;
+ u8 opcode;
+ u16 msg_addr_low;
+ u32 msg_addr_high;
+ u32 data;
+};
+#endif /* _ICE_SBQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2f097637e405..9f07b6641705 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -596,6 +596,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
}
/**
+ * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_q_ctx *q_ctx;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ /* allocate RDMA queue contexts */
+ if (!vsi_ctx->rdma_q_ctx[tc]) {
+ vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
+ new_numqs,
+ sizeof(*q_ctx),
+ GFP_KERNEL);
+ if (!vsi_ctx->rdma_q_ctx[tc])
+ return ICE_ERR_NO_MEMORY;
+ vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
+ return 0;
+ }
+ /* num queues are increased, update the queue contexts */
+ if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
+ u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
+
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
+ return ICE_ERR_NO_MEMORY;
+ memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
+ prev_num * sizeof(*q_ctx));
+ devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
+ vsi_ctx->rdma_q_ctx[tc] = q_ctx;
+ vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
+ }
+ return 0;
+}
+
+/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
* @opcode: opcode for add, query, or remove profile(s)
@@ -1774,13 +1818,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_ctx)
return ICE_ERR_PARAM;
- prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ else
+ prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
/* num queues are not changed or less than the previous number */
if (new_numqs <= prev_numqs)
return status;
- status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
- if (status)
- return status;
+ if (owner == ICE_SCHED_NODE_OWNER_LAN) {
+ status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
+ return status;
+ } else {
+ status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
+ return status;
+ }
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
@@ -1795,7 +1848,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
new_num_nodes, owner);
if (status)
return status;
- vsi_ctx->sched.max_lanq[tc] = new_numqs;
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi_ctx->sched.max_lanq[tc] = new_numqs;
+ else
+ vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
return 0;
}
@@ -1861,6 +1917,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* recreate the child nodes all the time in these cases.
*/
vsi_ctx->sched.max_lanq[tc] = 0;
+ vsi_ctx->sched.max_rdmaq[tc] = 0;
}
/* update the VSI child nodes */
@@ -1990,6 +2047,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
+ else
+ vsi_ctx->sched.max_rdmaq[i] = 0;
}
status = 0;
@@ -2686,8 +2745,8 @@ static enum ice_status
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
- struct ice_sched_agg_vsi_info *agg_vsi_info;
- struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
+ struct ice_sched_agg_info *agg_info, *old_agg_info;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u8 tc;
@@ -2697,6 +2756,20 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
agg_info = ice_get_agg_info(hw, agg_id);
if (!agg_info)
return ICE_ERR_PARAM;
+ /* If the VSI is already part of another aggregator then update
+ * its VSI info list
+ */
+ old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
+ if (old_agg_info && old_agg_info != agg_info) {
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ list_for_each_entry_safe(old_agg_vsi_info, vtmp,
+ &old_agg_info->agg_vsi_list,
+ list_entry)
+ if (old_agg_vsi_info->vsi_handle == vsi_handle)
+ break;
+ }
+
/* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info) {
@@ -2721,6 +2794,12 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
break;
set_bit(tc, agg_vsi_info->tc_bitmap);
+ if (old_agg_vsi_info)
+ clear_bit(tc, old_agg_vsi_info->tc_bitmap);
+ }
+ if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
+ list_del(&old_agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info);
}
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 357d3073d814..3b6c1420aa7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
+#include "ice_lib.h"
#include "ice_switch.h"
#define ICE_ETH_DA_OFFSET 0
@@ -302,6 +303,10 @@ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
vsi->lan_q_ctx[i] = NULL;
}
+ if (vsi->rdma_q_ctx[i]) {
+ devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
+ vsi->rdma_q_ctx[i] = NULL;
+ }
}
}
@@ -423,6 +428,29 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
}
/**
+ * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
+ * @hw: pointer to HW struct
+ * @vsi_handle: VSI SW index
+ * @enable: boolean for enable/disable
+ */
+int
+ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+
+ ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!ctx)
+ return -EIO;
+
+ if (enable)
+ ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ else
+ ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+
+ return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
+}
+
+/**
* ice_aq_alloc_free_vsi_list
* @hw: pointer to the HW struct
* @vsi_list_id: VSI list ID returned or used for lookup
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 8b4f9d35c860..c5db8d56133f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -26,6 +26,8 @@ struct ice_vsi_ctx {
u8 vf_num;
u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
+ u16 num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
enum ice_sw_fwd_act_type {
@@ -223,6 +225,8 @@ enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int
+ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
@@ -243,7 +247,6 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
-bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
new file mode 100644
index 000000000000..9bc0b8fdfc77
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Intel Corporation. */
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for ice will be "ice".
+ *
+ * This file is named ice_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ice
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_ICE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _ICE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/* ice_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_ice_example(args...)
+ *
+ * ... as:
+ *
+ * ice_trace(example, args...)
+ *
+ * ... to resolve to the PF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, ice_trace_enabled(trace_name) wraps references to
+ * trace_ice_<trace_name>_enabled() functions.
+ * @trace_name: name of tracepoint
+ */
+#define _ICE_TRACE_NAME(trace_name) (trace_##ice##_##trace_name)
+#define ICE_TRACE_NAME(trace_name) _ICE_TRACE_NAME(trace_name)
+
+#define ice_trace(trace_name, args...) ICE_TRACE_NAME(trace_name)(args)
+
+#define ice_trace_enabled(trace_name) ICE_TRACE_NAME(trace_name##_enabled)()
+
+/* This is for events common to PF. Corresponding versions will be named
+ * trace_ice_*. The ice_trace() macro above will select the right trace point
+ * name for the driver.
+ */
+
+/* Begin tracepoints */
+
+/* Global tracepoints */
+
+/* Events related to DIM, q_vectors and ring containers */
+DECLARE_EVENT_CLASS(ice_rx_dim_template,
+ TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+ TP_ARGS(q_vector, dim),
+ TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
+ __field(struct dim *, dim)
+ __string(devname, q_vector->rx.ring->netdev->name)),
+
+ TP_fast_assign(__entry->q_vector = q_vector;
+ __entry->dim = dim;
+ __assign_str(devname, q_vector->rx.ring->netdev->name);),
+
+ TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
+ __get_str(devname),
+ __entry->q_vector->rx.ring->q_index,
+ __entry->dim->state,
+ __entry->dim->profile_ix,
+ __entry->dim->tune_state,
+ __entry->dim->steps_right,
+ __entry->dim->steps_left,
+ __entry->dim->tired)
+);
+
+DEFINE_EVENT(ice_rx_dim_template, ice_rx_dim_work,
+ TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+ TP_ARGS(q_vector, dim)
+);
+
+DECLARE_EVENT_CLASS(ice_tx_dim_template,
+ TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+ TP_ARGS(q_vector, dim),
+ TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
+ __field(struct dim *, dim)
+ __string(devname, q_vector->tx.ring->netdev->name)),
+
+ TP_fast_assign(__entry->q_vector = q_vector;
+ __entry->dim = dim;
+ __assign_str(devname, q_vector->tx.ring->netdev->name);),
+
+ TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
+ __get_str(devname),
+ __entry->q_vector->tx.ring->q_index,
+ __entry->dim->state,
+ __entry->dim->profile_ix,
+ __entry->dim->tune_state,
+ __entry->dim->steps_right,
+ __entry->dim->steps_left,
+ __entry->dim->tired)
+);
+
+DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work,
+ TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+ TP_ARGS(q_vector, dim)
+);
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(ice_tx_template,
+ TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc,
+ struct ice_tx_buf *buf),
+
+ TP_ARGS(ring, desc, buf),
+ TP_STRUCT__entry(__field(void *, ring)
+ __field(void *, desc)
+ __field(void *, buf)
+ __string(devname, ring->netdev->name)),
+
+ TP_fast_assign(__entry->ring = ring;
+ __entry->desc = desc;
+ __entry->buf = buf;
+ __assign_str(devname, ring->netdev->name);),
+
+ TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
+ __entry->ring, __entry->desc, __entry->buf)
+);
+
+#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \
+DEFINE_EVENT(ice_tx_template, name, \
+ TP_PROTO(struct ice_ring *ring, \
+ struct ice_tx_desc *desc, \
+ struct ice_tx_buf *buf), \
+ TP_ARGS(ring, desc, buf))
+
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq);
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap);
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop);
+
+DECLARE_EVENT_CLASS(ice_rx_template,
+ TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+
+ TP_ARGS(ring, desc),
+
+ TP_STRUCT__entry(__field(void *, ring)
+ __field(void *, desc)
+ __string(devname, ring->netdev->name)),
+
+ TP_fast_assign(__entry->ring = ring;
+ __entry->desc = desc;
+ __assign_str(devname, ring->netdev->name);),
+
+ TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
+ __entry->ring, __entry->desc)
+);
+DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
+ TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_ARGS(ring, desc)
+);
+
+DECLARE_EVENT_CLASS(ice_rx_indicate_template,
+ TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb),
+
+ TP_STRUCT__entry(__field(void *, ring)
+ __field(void *, desc)
+ __field(void *, skb)
+ __string(devname, ring->netdev->name)),
+
+ TP_fast_assign(__entry->ring = ring;
+ __entry->desc = desc;
+ __entry->skb = skb;
+ __assign_str(devname, ring->netdev->name);),
+
+ TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
+ __entry->ring, __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate,
+ TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ struct sk_buff *skb),
+ TP_ARGS(ring, desc, skb)
+);
+
+DECLARE_EVENT_CLASS(ice_xmit_template,
+ TP_PROTO(struct ice_ring *ring, struct sk_buff *skb),
+
+ TP_ARGS(ring, skb),
+
+ TP_STRUCT__entry(__field(void *, ring)
+ __field(void *, skb)
+ __string(devname, ring->netdev->name)),
+
+ TP_fast_assign(__entry->ring = ring;
+ __entry->skb = skb;
+ __assign_str(devname, ring->netdev->name);),
+
+ TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
+ __entry->skb, __entry->ring)
+);
+
+#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \
+DEFINE_EVENT(ice_xmit_template, name, \
+ TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \
+ TP_ARGS(ring, skb))
+
+DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
+DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop);
+
+/* End tracepoints */
+
+#endif /* _ICE_TRACE_H_ */
+/* This must be outside ifdef _ICE_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE ../../drivers/net/ethernet/intel/ice/ice_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index e2b4b29ea207..6ee8e0032d52 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -10,6 +10,7 @@
#include "ice_txrx_lib.h"
#include "ice_lib.h"
#include "ice.h"
+#include "ice_trace.h"
#include "ice_dcb_lib.h"
#include "ice_xsk.h"
@@ -224,6 +225,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
smp_rmb(); /* prevent any other reads prior to eop_desc */
+ ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
@@ -254,6 +256,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
+ ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
tx_buf++;
tx_desc++;
i++;
@@ -272,6 +275,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
dma_unmap_len_set(tx_buf, len, 0);
}
}
+ ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
/* move us one more past the eop_desc for start of next pkt */
tx_buf++;
@@ -523,7 +527,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct ice_ring *xdp_ring;
- int err;
+ int err, result;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -532,14 +536,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
return ICE_XDP_PASS;
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- return ice_xmit_xdp_buff(xdp, xdp_ring);
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
+ return result;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ return ICE_XDP_REDIR;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -1076,7 +1086,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 stat_err_bits;
int rx_buf_pgcnt;
u16 vlan_tag = 0;
- u8 rx_ptype;
+ u16 rx_ptype;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
@@ -1096,6 +1106,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ ice_trace(clean_rx_irq, rx_ring, rx_desc);
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
struct ice_vsi *ctrl_vsi = rx_ring->vsi;
@@ -1129,15 +1140,11 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
- rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (!xdp_prog) {
- rcu_read_unlock();
+ if (!xdp_prog)
goto construct_skb;
- }
xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
- rcu_read_unlock();
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
@@ -1201,6 +1208,7 @@ construct_skb:
ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag);
skb = NULL;
@@ -2131,6 +2139,41 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
}
/**
+ * ice_tstamp - set up context descriptor for hardware timestamp
+ * @tx_ring: pointer to the Tx ring to send buffer on
+ * @skb: pointer to the SKB we're sending
+ * @first: Tx buffer
+ * @off: Tx offload parameters
+ */
+static void
+ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
+ struct ice_tx_buf *first, struct ice_tx_offload_params *off)
+{
+ s8 idx;
+
+ /* only timestamp the outbound packet if the user has requested it */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return;
+
+ if (!tx_ring->ptp_tx)
+ return;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ return;
+
+ /* Grab an open timestamp slot */
+ idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
+ if (idx < 0)
+ return;
+
+ off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
+ ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
+ first->tx_flags |= ICE_TX_FLAGS_TSYN;
+}
+
+/**
* ice_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
* @tx_ring: ring to send buffer on
@@ -2143,9 +2186,12 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
+ struct ethhdr *eth;
unsigned int count;
int tso, csum;
+ ice_trace(xmit_frame_ring, tx_ring, skb);
+
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
@@ -2189,13 +2235,17 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
goto out_drop;
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ eth = (struct ethhdr *)skb_mac_header(skb);
+ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
vsi->type == ICE_VSI_PF &&
vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
+ ice_tstamp(tx_ring, skb, first, &offload);
+
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
u16 i = tx_ring->next_to_use;
@@ -2216,6 +2266,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
return NETDEV_TX_OK;
out_drop:
+ ice_trace(xmit_frame_ring_drop, tx_ring, skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index c5a92ac787d6..1e46e80f3d6f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -118,6 +118,7 @@ static inline int ice_skb_pad(void)
* freed instead of returned like skb packets.
*/
#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+#define ICE_TX_FLAGS_TSYN BIT(4)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
@@ -311,6 +312,10 @@ struct ice_ring {
u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
+ struct ice_ptp_tx *tx_tstamps;
+ u64 cached_phctime;
+ u8 ptp_rx:1;
+ u8 ptp_tx:1;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 207f6ee3a7f6..171397dcf00a 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -38,10 +38,23 @@ void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
* ice_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
- * Returns a hash type to be used by skb_set_hash
+ * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
+ * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
+ * Rx desc.
*/
-static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
+static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
{
+ struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return PKT_HASH_TYPE_NONE;
+ if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ return PKT_HASH_TYPE_L4;
+ if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ return PKT_HASH_TYPE_L3;
+ if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
+ return PKT_HASH_TYPE_L2;
+
return PKT_HASH_TYPE_NONE;
}
@@ -54,7 +67,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
*/
static void
ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 rx_ptype)
+ struct sk_buff *skb, u16 rx_ptype)
{
struct ice_32b_rx_flex_desc_nic *nic_mdid;
u32 hash;
@@ -81,7 +94,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*/
static void
ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+ union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
struct ice_rx_ptype_decoded decoded;
u16 rx_status0, rx_status1;
@@ -167,7 +180,7 @@ checksum_fail:
void
ice_process_skb_fields(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
+ struct sk_buff *skb, u16 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
@@ -175,6 +188,9 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
+
+ if (rx_ring->ptp_rx)
+ ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 58ff58f0f972..05ac30752902 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -53,7 +53,7 @@ void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype);
+ struct sk_buff *skb, u16 ptype);
void
ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4474dd6a7ba1..d33d1906103c 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -14,6 +14,7 @@
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
+#include "ice_sbq_cmd.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -45,8 +46,10 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_FLOW BIT_ULL(9)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
+#define ICE_DBG_RDMA BIT_ULL(15)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
+#define ICE_DBG_PTP BIT_ULL(19)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_DESC BIT_ULL(25)
#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
@@ -63,7 +66,7 @@ enum ice_aq_res_ids {
/* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
-#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 5000
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
@@ -146,6 +149,7 @@ struct ice_link_status {
u16 max_frame_size;
u16 link_speed;
u16 req_speeds;
+ u8 link_cfg_err;
u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
@@ -262,6 +266,8 @@ struct ice_hw_common_caps {
u8 rss_table_entry_width; /* RSS Entry width in bits */
u8 dcb;
+ u8 ieee_1588;
+ u8 rdma;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -273,6 +279,54 @@ struct ice_hw_common_caps {
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
};
+/* IEEE 1588 TIME_SYNC specific info */
+/* Function specific definitions */
+#define ICE_TS_FUNC_ENA_M BIT(0)
+#define ICE_TS_SRC_TMR_OWND_M BIT(1)
+#define ICE_TS_TMR_ENA_M BIT(2)
+#define ICE_TS_TMR_IDX_OWND_S 4
+#define ICE_TS_TMR_IDX_OWND_M BIT(4)
+#define ICE_TS_CLK_FREQ_S 16
+#define ICE_TS_CLK_FREQ_M ICE_M(0x7, ICE_TS_CLK_FREQ_S)
+#define ICE_TS_CLK_SRC_S 20
+#define ICE_TS_CLK_SRC_M BIT(20)
+#define ICE_TS_TMR_IDX_ASSOC_S 24
+#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
+
+struct ice_ts_func_info {
+ /* Function specific info */
+ u32 clk_freq;
+ u8 clk_src;
+ u8 tmr_index_assoc;
+ u8 ena;
+ u8 tmr_index_owned;
+ u8 src_tmr_owned;
+ u8 tmr_ena;
+};
+
+/* Device specific definitions */
+#define ICE_TS_TMR0_OWNR_M 0x7
+#define ICE_TS_TMR0_OWND_M BIT(3)
+#define ICE_TS_TMR1_OWNR_S 4
+#define ICE_TS_TMR1_OWNR_M ICE_M(0x7, ICE_TS_TMR1_OWNR_S)
+#define ICE_TS_TMR1_OWND_M BIT(7)
+#define ICE_TS_DEV_ENA_M BIT(24)
+#define ICE_TS_TMR0_ENA_M BIT(25)
+#define ICE_TS_TMR1_ENA_M BIT(26)
+
+struct ice_ts_dev_info {
+ /* Device specific info */
+ u32 ena_ports;
+ u32 tmr_own_map;
+ u32 tmr0_owner;
+ u32 tmr1_owner;
+ u8 tmr0_owned;
+ u8 tmr1_owned;
+ u8 ena;
+ u8 tmr0_ena;
+ u8 tmr1_ena;
+};
+
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
@@ -281,6 +335,7 @@ struct ice_hw_func_caps {
u32 guar_num_vsi;
u32 fd_fltr_guar; /* Number of filters guaranteed */
u32 fd_fltr_best_effort; /* Number of best effort filters */
+ struct ice_ts_func_info ts_func_info;
};
/* Device wide capabilities */
@@ -289,6 +344,7 @@ struct ice_hw_dev_caps {
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
u32 num_flow_director_fltr; /* Number of FD filters available */
+ struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
};
@@ -440,6 +496,7 @@ struct ice_sched_node {
u8 tc_num;
u8 owner;
#define ICE_SCHED_NODE_OWNER_LAN 0
+#define ICE_SCHED_NODE_OWNER_RDMA 2
};
/* Access Macros for Tx Sched Elements data */
@@ -511,6 +568,7 @@ struct ice_sched_vsi_info {
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
+ u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
};
/* driver defines the policy */
@@ -749,6 +807,7 @@ struct ice_hw {
/* Control Queue info */
struct ice_ctl_q_info adminq;
+ struct ice_ctl_q_info sbq;
struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
@@ -784,6 +843,14 @@ struct ice_hw {
u8 ucast_shared; /* true if VSIs can share unicast addr */
+#define ICE_PHY_PER_NAC 1
+#define ICE_MAX_QUAD 2
+#define ICE_NUM_QUAD_TYPE 2
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_PHY_0_LAST_QUAD 1
+#define ICE_PORTS_PER_PHY 8
+#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u32 active_track_id;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index a1d22d2aa0bd..2826570dab51 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -713,13 +713,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
- * in the case of VFR. If this is done for PFR, it can mess up VF
- * resets because the VF driver may already have started cleanup
- * by the time we get here.
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
*/
- if (!is_pfr)
+ if (!is_pfr) {
wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+ }
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -937,16 +939,18 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
vf->num_mac++;
- if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
+ if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
if (status) {
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
- &vf->dflt_lan_addr.addr[0], vf->vf_id,
+ &vf->hw_lan_addr.addr[0], vf->vf_id,
ice_stat_str(status));
return ice_status_to_errno(status);
}
vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
}
return 0;
@@ -1685,7 +1689,6 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
else
promisc_m = ICE_UCAST_PROMISC_BITS;
- vsi = ice_get_vf_vsi(vf);
if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
dev_err(dev, "disabling promiscuous mode failed\n");
}
@@ -1698,7 +1701,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_vf_ctrl_vsi_release(vf);
ice_vf_pre_vsi_rebuild(vf);
- ice_vf_rebuild_vsi_with_release(vf);
+
+ if (ice_vf_rebuild_vsi_with_release(vf)) {
+ dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
+ return false;
+ }
+
ice_vf_post_vsi_rebuild(vf);
/* if the VF has been reset allow it to come up again */
@@ -2379,7 +2387,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
- vf->dflt_lan_addr.addr);
+ vf->hw_lan_addr.addr);
/* match guest capabilities */
vf->driver_caps = vfres->vf_cap_flags;
@@ -3535,10 +3543,9 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
- u16 num_rxq = 0, num_txq = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- int i;
+ int i, q_idx;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3576,18 +3583,31 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
/* copy Tx queue info from VF into VSI */
if (qpi->txq.ring_len > 0) {
- num_txq++;
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
}
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
u16 max_frame_size = ice_vc_get_max_frame_size(vf);
- num_rxq++;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
@@ -3604,27 +3624,20 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- }
- vsi->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is not
- * expected to account for it in the MTU calculation
- */
- if (vf->port_vlan_info)
- vsi->max_frame += VLAN_HLEN;
- }
-
- /* VF can request to configure less than allocated queues or default
- * allocated queues. So update the VSI with new number
- */
- vsi->num_txq = num_txq;
- vsi->num_rxq = num_rxq;
- /* All queues of VF VSI are in TC 0 */
- vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
- vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is not
+ * expected to account for it in the MTU calculation
+ */
+ if (vf->port_vlan_info)
+ vsi->max_frame += VLAN_HLEN;
- if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
error_param:
/* send the response to the VF */
@@ -3660,19 +3673,95 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
}
/**
+ * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
+ * @vc_ether_addr: used to extract the type
+ */
+static u8
+ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
+}
+
+/**
+ * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ */
+static bool
+ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+ return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
+}
+
+/**
+ * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ *
+ * This function should only be called when the MAC address in
+ * virtchnl_ether_addr is a valid unicast MAC
+ */
+static bool
+ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
+{
+ u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+ return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
+}
+
+/**
+ * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to add
+ */
+static void
+ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return;
+
+ /* only allow legacy VF drivers to set the device and hardware MAC if it
+ * is zero and allow new VF drivers to set the hardware MAC if the type
+ * was correctly specified over VIRTCHNL
+ */
+ if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
+ is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
+ ice_is_vc_addr_primary(vc_ether_addr)) {
+ ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
+ ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
+ }
+
+ /* hardware and device MACs are already set, but its possible that the
+ * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
+ * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
+ * away for the legacy VF driver case as it will be updated in the
+ * delete flow for this case
+ */
+ if (ice_is_vc_addr_legacy(vc_ether_addr)) {
+ ether_addr_copy(vf->legacy_last_added_umac.addr,
+ mac_addr);
+ vf->legacy_last_added_umac.time_modified = jiffies;
+ }
+}
+
+/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
- * @mac_addr: MAC address to add
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
*/
static int
-ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *vc_ether_addr)
{
struct device *dev = ice_pf_to_dev(vf->pf);
+ u8 *mac_addr = vc_ether_addr->addr;
enum ice_status status;
- /* default unicast MAC already added */
- if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ /* device MAC already added */
+ if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
return 0;
if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
@@ -3691,12 +3780,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EIO;
}
- /* Set the default LAN address to the latest unicast MAC address added
- * by the VF. The default LAN address is reported by the PF via
- * ndo_get_vf_config.
- */
- if (is_unicast_ether_addr(mac_addr))
- ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
+ ice_vfhw_mac_add(vf, vc_ether_addr);
vf->num_mac++;
@@ -3704,19 +3788,65 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
}
/**
+ * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
+ * @last_added_umac: structure used to check expiration
+ */
+static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
+{
+#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
+ return time_is_before_jiffies(last_added_umac->time_modified +
+ ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
+}
+
+/**
+ * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
+ */
+static void
+ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr) ||
+ !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ return;
+
+ /* allow the device MAC to be repopulated in the add flow and don't
+ * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
+ * to be persistent on VM reboot and across driver unload/load, which
+ * won't work if we clear the hardware MAC here
+ */
+ eth_zero_addr(vf->dev_lan_addr.addr);
+
+ /* only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+ if (ice_is_vc_addr_legacy(vc_ether_addr) &&
+ !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
+ ether_addr_copy(vf->dev_lan_addr.addr,
+ vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr.addr,
+ vf->legacy_last_added_umac.addr);
+ }
+}
+
+/**
* ice_vc_del_mac_addr - attempt to delete the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
- * @mac_addr: MAC address to delete
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
*/
static int
-ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *vc_ether_addr)
{
struct device *dev = ice_pf_to_dev(vf->pf);
+ u8 *mac_addr = vc_ether_addr->addr;
enum ice_status status;
if (!ice_can_vf_change_mac(vf) &&
- ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
@@ -3730,8 +3860,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EIO;
}
- if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
- eth_zero_addr(vf->dflt_lan_addr.addr);
+ ice_vfhw_mac_del(vf, vc_ether_addr);
vf->num_mac--;
@@ -3750,7 +3879,8 @@ static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
int (*ice_vc_cfg_mac)
- (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
+ (struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *virtchnl_ether_addr);
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
@@ -3799,7 +3929,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
is_zero_ether_addr(mac_addr))
continue;
- result = ice_vc_cfg_mac(vf, vsi, mac_addr);
+ result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
if (result == -EEXIST || result == -ENOENT) {
continue;
} else if (result) {
@@ -4437,7 +4567,7 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
return -EBUSY;
ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
/* VF configuration for VLAN and applicable QoS */
ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
@@ -4513,7 +4643,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
vf = &pf->vf[vf_id];
/* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
+ if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr.addr, mac))
return 0;
ret = ice_check_vf_ready_for_cfg(vf);
@@ -4529,7 +4660,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* VF is notified of its new MAC via the PF's response to the
* VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- ether_addr_copy(vf->dflt_lan_addr.addr, mac);
+ ether_addr_copy(vf->dev_lan_addr.addr, mac);
+ ether_addr_copy(vf->hw_lan_addr.addr, mac);
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
@@ -4682,7 +4814,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dflt_lan_addr.addr,
+ vf->dev_lan_addr.addr,
test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
? "on" : "off");
}
@@ -4726,7 +4858,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
vf->mdd_tx_events.count, hw->pf_id, i,
- vf->dflt_lan_addr.addr);
+ vf->dev_lan_addr.addr);
}
}
}
@@ -4816,7 +4948,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
if (pf_vsi)
dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dflt_lan_addr.addr[0],
+ &vf->dev_lan_addr.addr[0],
pf_vsi->netdev->dev_addr);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index d800ed83d6c3..842cb077df86 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -58,6 +58,11 @@ enum ice_virtchnl_cap {
ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
};
+struct ice_time_mac {
+ unsigned long time_modified;
+ u8 addr[ETH_ALEN];
+};
+
/* VF MDD events print structure */
struct ice_mdd_vf_events {
u16 count; /* total count of Rx|Tx events */
@@ -78,7 +83,9 @@ struct ice_vf {
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dflt_lan_addr;
+ struct virtchnl_ether_addr dev_lan_addr;
+ struct virtchnl_ether_addr hw_lan_addr;
+ struct ice_time_mac legacy_last_added_umac;
DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
u16 port_vlan_info; /* Port VLAN ID and QoS */
@@ -151,16 +158,18 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
#else /* CONFIG_PCI_IOV */
-#define ice_process_vflr_event(pf) do {} while (0)
-#define ice_free_vfs(pf) do {} while (0)
-#define ice_vc_process_vf_msg(pf, event) do {} while (0)
-#define ice_vc_notify_link_state(pf) do {} while (0)
-#define ice_vc_notify_reset(pf) do {} while (0)
-#define ice_set_vf_state_qs_dis(vf) do {} while (0)
-#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
-#define ice_print_vfs_mdd_events(pf) do {} while (0)
-#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
-#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
+static inline void ice_process_vflr_event(struct ice_pf *pf) { }
+static inline void ice_free_vfs(struct ice_pf *pf) { }
+static inline
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
+static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
+static inline
+void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
+static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
static inline bool
ice_is_malicious_vf(struct ice_pf __always_unused *pf,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index faa7b8d96adb..5a9f61deeb38 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -236,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
- err = ice_setup_rx_ctx(rx_ring);
+ err = ice_vsi_cfg_rxq(rx_ring);
if (err)
goto free_buf;
@@ -270,6 +270,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
+ clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -300,6 +301,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
return 0;
}
@@ -463,7 +466,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
struct ice_ring *xdp_ring;
u32 act;
- rcu_read_lock();
/* ZC patch is enabled only when XDP program is set,
* so here it can not be NULL
*/
@@ -473,9 +475,9 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
- rcu_read_unlock();
- return result;
+ if (err)
+ goto out_failure;
+ return ICE_XDP_REDIR;
}
switch (act) {
@@ -484,11 +486,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -496,7 +501,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
break;
}
- rcu_read_unlock();
return result;
}
@@ -521,7 +525,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
- u8 rx_ptype;
+ u16 rx_ptype;
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index fad783690134..ea208808623a 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
-#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
+static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { }
+static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 50863fd87d53..cbe92fd23a70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2756,6 +2756,7 @@ out:
return ret_val;
}
+#ifdef CONFIG_IGB_HWMON
static const u8 e1000_emc_temp_data[4] = {
E1000_EMC_INTERNAL_DATA,
E1000_EMC_DIODE1_DATA,
@@ -2769,7 +2770,6 @@ static const u8 e1000_emc_therm_limit[4] = {
E1000_EMC_DIODE3_THERM_LIMIT
};
-#ifdef CONFIG_IGB_HWMON
/**
* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 7bda8c5edea5..2d3daf022651 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb);
+ ktime_t *timestamp);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7545da216d8b..636a1b1fb7e1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -831,7 +831,7 @@ static int igb_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
ret_val = hw->nvm.ops.write(hw, first_word,
last_word - first_word + 1, eeprom_buff);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 038a9fd1af44..7e6435dc7e80 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -356,7 +356,7 @@ static void igb_dump(struct igb_adapter *adapter)
struct igb_reg_info *reginfo;
struct igb_ring *tx_ring;
union e1000_adv_tx_desc *tx_desc;
- struct my_u0 { u64 a; u64 b; } *u0;
+ struct my_u0 { __le64 a; __le64 b; } *u0;
struct igb_ring *rx_ring;
union e1000_adv_rx_desc *rx_desc;
u32 staterr;
@@ -2643,7 +2643,8 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
}
input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
- input->filter.vlan_tci = match.key->vlan_priority;
+ input->filter.vlan_tci =
+ (__force __be16)match.key->vlan_priority;
}
}
@@ -6275,12 +6276,12 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter,
cmd_type |= len | IGB_TXD_DCMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
- olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);
+ olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
/* 82575 requires a unique index per ring */
if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4;
- tx_desc->read.olinfo_status = olinfo_status;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
@@ -8280,7 +8281,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
- union e1000_adv_rx_desc *rx_desc)
+ ktime_t timestamp)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8300,12 +8301,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
- xdp->data += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* Determine available headroom for copy */
headlen = size;
@@ -8336,7 +8333,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
- union e1000_adv_rx_desc *rx_desc)
+ ktime_t timestamp)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8363,11 +8360,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
if (metasize)
skb_metadata_set(skb, metasize);
- /* pull timestamp out of packet data */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
- __skb_pull(skb, IGB_TS_HDR_LEN);
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* update buffer offset */
#if (PAGE_SIZE < 8192)
@@ -8387,7 +8381,6 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
struct bpf_prog *xdp_prog;
u32 act;
- rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
@@ -8401,18 +8394,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
break;
case XDP_TX:
result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (!err)
- result = IGB_XDP_REDIR;
- else
- result = IGB_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = IGB_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -8420,7 +8415,6 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
break;
}
xdp_out:
- rcu_read_unlock();
return ERR_PTR(-result);
}
@@ -8597,7 +8591,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
else
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
@@ -8682,7 +8676,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer;
+ ktime_t timestamp = 0;
+ int pkt_offset = 0;
unsigned int size;
+ void *pktbuf;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -8702,14 +8699,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
dma_rmb();
rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
+ pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ pktbuf, &timestamp);
+
+ pkt_offset += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
/* retrieve a buffer from the ring */
if (!skb) {
- unsigned int offset = igb_rx_offset(rx_ring);
- unsigned char *hard_start;
+ unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
+ unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
- hard_start = page_address(rx_buffer->page) +
- rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
@@ -8732,10 +8739,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
} else if (skb)
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
+ skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
+ timestamp);
else
skb = igb_construct_skb(rx_ring, rx_buffer,
- &xdp, rx_desc);
+ &xdp, timestamp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ba61fe9bfaf4..0011b15e678c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
dev_kfree_skb_any(skb);
}
-#define IGB_RET_PTP_DISABLED 1
-#define IGB_RET_PTP_INVALID 2
-
/**
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
+ * @timestamp: Pointer where timestamp will be stored
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8
*
- * Returns: 0 if success, nonzero if failure
+ * Returns: The timestamp header length or 0 if not available
**/
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb)
+ ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
- return IGB_RET_PTP_DISABLED;
+ return 0;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
/* check reserved dwords are zero, be/le doesn't matter for zero */
if (regval[0])
- return IGB_RET_PTP_INVALID;
+ return 0;
- igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
- le64_to_cpu(regval[1]));
+ igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
break;
}
}
- skb_hwtstamps(skb)->hwtstamp =
- ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
- return 0;
+ *timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
+
+ return IGB_TS_HDR_LEN;
}
/**
@@ -1134,12 +1131,12 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
| E1000_FTQF_MASK); /* mask all inputs */
ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
- wr32(E1000_IMIR(3), htons(PTP_EV_PORT));
+ wr32(E1000_IMIR(3), (__force unsigned int)htons(PTP_EV_PORT));
wr32(E1000_IMIREXT(3),
(E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
if (hw->mac.type == e1000_82576) {
/* enable source port check */
- wr32(E1000_SPQF(3), htons(PTP_EV_PORT));
+ wr32(E1000_SPQF(3), (__force unsigned int)htons(PTP_EV_PORT));
ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
}
wr32(E1000_FTQF(3), ftqf);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index fb3fbcb13331..1bbe9862a758 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -83,14 +83,14 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
static void igbvf_receive_skb(struct igbvf_adapter *adapter,
struct net_device *netdev,
struct sk_buff *skb,
- u32 status, u16 vlan)
+ u32 status, __le16 vlan)
{
u16 vid;
if (status & E1000_RXD_STAT_VP) {
if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
(status & E1000_RXDEXT_STATERR_LB))
- vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
else
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans))
@@ -2056,7 +2056,7 @@ static int igbvf_tso(struct igbvf_ring *tx_ring,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* MSS L4LEN IDX */
mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index c71b0d7dbcee..ba9bb3132d5d 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -35,31 +35,31 @@ struct e1000_hw;
/* Receive Descriptor - Advanced */
union e1000_adv_rx_desc {
struct {
- u64 pkt_addr; /* Packet buffer address */
- u64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
union {
- u32 data;
+ __le32 data;
struct {
- u16 pkt_info; /* RSS/Packet type */
+ __le16 pkt_info; /* RSS/Packet type */
/* Split Header, hdr buffer length */
- u16 hdr_info;
+ __le16 hdr_info;
} hs_rss;
} lo_dword;
union {
- u32 rss; /* RSS Hash */
+ __le32 rss; /* RSS Hash */
struct {
- u16 ip_id; /* IP id */
- u16 csum; /* Packet Checksum */
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
- u32 status_error; /* ext status/error */
- u16 length; /* Packet length */
- u16 vlan; /* VLAN tag */
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
@@ -70,14 +70,14 @@ union e1000_adv_rx_desc {
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
struct {
- u64 buffer_addr; /* Address of descriptor's data buf */
- u32 cmd_type_len;
- u32 olinfo_status;
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
} read;
struct {
- u64 rsvd; /* Reserved */
- u32 nxtseq_seed;
- u32 status;
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
} wb;
};
@@ -94,10 +94,10 @@ union e1000_adv_tx_desc {
/* Context descriptors */
struct e1000_adv_tx_context_desc {
- u32 vlan_macip_lens;
- u32 seqnum_seed;
- u32 type_tucmd_mlhl;
- u32 mss_l4len_idx;
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
};
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 25871351730b..9e0bbb2e55e3 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -118,6 +118,7 @@ struct igc_ring {
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
/* Board specific private data structure */
@@ -255,6 +256,11 @@ bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
void igc_update_stats(struct igc_adapter *adapter);
+void igc_disable_rx_ring(struct igc_ring *ring);
+void igc_enable_rx_ring(struct igc_ring *ring);
+void igc_disable_tx_ring(struct igc_ring *ring);
+void igc_enable_tx_ring(struct igc_ring *ring);
+int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
@@ -366,6 +372,7 @@ extern char igc_driver_name[];
/* VLAN info */
#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGC_TX_FLAGS_VLAN_SHIFT 16
/* igc_test_staterr - tests bits within Rx descriptor status and error fields */
static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
@@ -390,8 +397,6 @@ enum igc_tx_flags {
/* olinfo flags */
IGC_TX_FLAGS_IPV4 = 0x10,
IGC_TX_FLAGS_CSUM = 0x20,
-
- IGC_TX_FLAGS_XDP = 0x100,
};
enum igc_boards {
@@ -408,12 +413,19 @@ enum igc_boards {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+enum igc_tx_buffer_type {
+ IGC_TX_BUFFER_TYPE_SKB,
+ IGC_TX_BUFFER_TYPE_XDP,
+ IGC_TX_BUFFER_TYPE_XSK,
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igc_tx_buffer {
union igc_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
+ enum igc_tx_buffer_type type;
union {
struct sk_buff *skb;
struct xdp_frame *xdpf;
@@ -428,14 +440,19 @@ struct igc_tx_buffer {
};
struct igc_rx_buffer {
- dma_addr_t dma;
- struct page *page;
+ union {
+ struct {
+ dma_addr_t dma;
+ struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
+ __u32 page_offset;
#else
- __u16 page_offset;
+ __u16 page_offset;
#endif
- __u16 pagecnt_bias;
+ __u16 pagecnt_bias;
+ };
+ struct xdp_buff *xdp;
+ };
};
struct igc_q_vector {
@@ -521,7 +538,8 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_RX_SCTP_CSUM,
IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
IGC_RING_FLAG_TX_CTX_IDX,
- IGC_RING_FLAG_TX_DETECT_HANG
+ IGC_RING_FLAG_TX_DETECT_HANG,
+ IGC_RING_FLAG_AF_XDP_ZC,
};
#define ring_uses_large_buffer(ring) \
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index ea627ce52525..ce530f5fd7bd 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -78,9 +78,11 @@ union igc_adv_rx_desc {
/* Additional Transmit Descriptor Control definitions */
#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */
/* Additional Receive Descriptor Control definitions */
#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 0103dda32f39..c3a5a5518790 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -94,12 +94,13 @@
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
-#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
+#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
@@ -128,7 +129,6 @@
#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
/* 1000BASE-T Control Register */
-#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
@@ -323,6 +323,9 @@
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+
+#define IGC_RXDEXT_STATERR_LB 0x00040000
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
index 495bed47ed0a..c09c95cc5f70 100644
--- a/drivers/net/ethernet/intel/igc/igc_dump.c
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -112,7 +112,7 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
void igc_rings_dump(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct my_u0 { u64 a; u64 b; } *u0;
+ struct my_u0 { __le64 a; __le64 b; } *u0;
union igc_adv_tx_desc *tx_desc;
union igc_adv_rx_desc *rx_desc;
struct igc_ring *tx_ring;
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 9722449d7633..fa4171860623 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -554,7 +554,7 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
ret_val = hw->nvm.ops.write(hw, first_word,
last_word - first_word + 1, eeprom_buff);
@@ -765,35 +765,22 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
IGC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
- memcpy(p, igc_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) {
- memcpy(p, igc_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++)
+ ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string);
+ for (i = 0; i < IGC_NETDEV_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ igc_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_restart", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_restart", i);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_drops", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_csum_err", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_alloc_failed", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_drops", i);
+ ethtool_sprintf(&p, "rx_queue_%u_csum_err", i);
+ ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i);
}
/* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 069471b7ffb0..95323095094d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -11,7 +11,7 @@
#include <linux/pm_runtime.h>
#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>
-
+#include <net/xdp_sock_drv.h>
#include <net/ipv6.h>
#include "igc.h"
@@ -111,6 +111,9 @@ void igc_reset(struct igc_adapter *adapter)
if (!netif_running(adapter->netdev))
igc_power_down_phy_copper_base(&adapter->hw);
+ /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
+ wr32(IGC_VET, ETH_P_8021Q);
+
/* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter);
@@ -171,6 +174,14 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
}
+static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
+{
+ dma_unmap_single(dev, dma_unmap_addr(buf, dma),
+ dma_unmap_len(buf, len), DMA_TO_DEVICE);
+
+ dma_unmap_len_set(buf, len, 0);
+}
+
/**
* igc_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
@@ -179,20 +190,27 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union igc_adv_tx_desc *eop_desc, *tx_desc;
- if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
+ switch (tx_buffer->type) {
+ case IGC_TX_BUFFER_TYPE_XSK:
+ xsk_frames++;
+ break;
+ case IGC_TX_BUFFER_TYPE_XDP:
xdp_return_frame(tx_buffer->xdpf);
- else
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
+ break;
+ case IGC_TX_BUFFER_TYPE_SKB:
dev_kfree_skb_any(tx_buffer->skb);
-
- /* unmap skb header data */
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
+ break;
+ default:
+ netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
+ break;
+ }
/* check for eop_desc to determine the end of the packet */
eop_desc = tx_buffer->next_to_watch;
@@ -211,10 +229,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
/* unmap any remaining paged data */
if (dma_unmap_len(tx_buffer, len))
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
/* move us one more past the eop_desc for start of next pkt */
@@ -226,6 +241,9 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
}
}
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
@@ -346,11 +364,7 @@ static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
return err;
}
-/**
- * igc_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
- */
-static void igc_clean_rx_ring(struct igc_ring *rx_ring)
+static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
@@ -383,12 +397,39 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
if (i == rx_ring->count)
i = 0;
}
+}
+
+static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
+{
+ struct igc_rx_buffer *bi;
+ u16 i;
+
+ for (i = 0; i < ring->count; i++) {
+ bi = &ring->rx_buffer_info[i];
+ if (!bi->xdp)
+ continue;
+
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+ }
+}
- clear_ring_uses_large_buffer(rx_ring);
+/**
+ * igc_clean_rx_ring - Free Rx Buffers per Queue
+ * @ring: ring to free buffers from
+ */
+static void igc_clean_rx_ring(struct igc_ring *ring)
+{
+ if (ring->xsk_pool)
+ igc_clean_rx_ring_xsk_pool(ring);
+ else
+ igc_clean_rx_ring_page_shared(ring);
- rx_ring->next_to_alloc = 0;
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
+ clear_ring_uses_large_buffer(ring);
+
+ ring->next_to_alloc = 0;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
}
/**
@@ -414,7 +455,7 @@ void igc_free_rx_resources(struct igc_ring *rx_ring)
{
igc_clean_rx_ring(rx_ring);
- igc_xdp_unregister_rxq_info(rx_ring);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -453,11 +494,16 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
{
struct net_device *ndev = rx_ring->netdev;
struct device *dev = rx_ring->dev;
+ u8 index = rx_ring->queue_index;
int size, desc_len, res;
- res = igc_xdp_register_rxq_info(rx_ring);
- if (res < 0)
+ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
+ rx_ring->q_vector->napi.napi_id);
+ if (res < 0) {
+ netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
+ index);
return res;
+ }
size = sizeof(struct igc_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
@@ -483,7 +529,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
return 0;
err:
- igc_xdp_unregister_rxq_info(rx_ring);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
@@ -515,9 +561,14 @@ static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
return err;
}
-static bool igc_xdp_is_enabled(struct igc_adapter *adapter)
+static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
+ struct igc_ring *ring)
{
- return !!adapter->xdp_prog;
+ if (!igc_xdp_is_enabled(adapter) ||
+ !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
+ return NULL;
+
+ return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
}
/**
@@ -535,6 +586,20 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
u64 rdba = ring->dma;
+ u32 buf_size;
+
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL));
+ }
if (igc_xdp_is_enabled(adapter))
set_ring_uses_large_buffer(ring);
@@ -558,12 +623,15 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
ring->next_to_clean = 0;
ring->next_to_use = 0;
- /* set descriptor configuration */
- srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGC_RXBUFFER_3072;
else
- srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGC_RXBUFFER_2048;
+
+ srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
wr32(IGC_SRRCTL(reg_idx), srrctl);
@@ -618,6 +686,8 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
u64 tdba = ring->dma;
u32 txdctl = 0;
+ ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
+
/* disable the queue */
wr32(IGC_TXDCTL(reg_idx), 0);
wrfl();
@@ -1055,13 +1125,17 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
((u32)((_input) & (_flag)) / ((_flag) / (_result))))
-static u32 igc_tx_cmd_type(u32 tx_flags)
+static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS;
+ /* set HW vlan bit if vlan is present */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
+ IGC_ADVTXD_DCMD_VLE);
+
/* set segmentation bits for TSO */
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
(IGC_ADVTXD_DCMD_TSE));
@@ -1070,6 +1144,9 @@ static u32 igc_tx_cmd_type(u32 tx_flags)
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
(IGC_ADVTXD_MAC_TSTAMP));
+ /* insert frame checksum */
+ cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
+
return cmd_type;
}
@@ -1104,8 +1181,9 @@ static int igc_tx_map(struct igc_ring *tx_ring,
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
- u32 cmd_type = igc_tx_cmd_type(tx_flags);
+ u32 cmd_type;
+ cmd_type = igc_tx_cmd_type(skb, tx_flags);
tx_desc = IGC_TX_DESC(tx_ring, i);
igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
@@ -1211,11 +1289,7 @@ dma_error:
/* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) {
if (dma_unmap_len(tx_buffer, len))
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
if (i-- == 0)
i += tx_ring->count;
@@ -1223,11 +1297,7 @@ dma_error:
}
if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
dev_kfree_skb_any(tx_buffer->skb);
tx_buffer->skb = NULL;
@@ -1359,6 +1429,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->type = IGC_TX_BUFFER_TYPE_SKB;
first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
@@ -1383,6 +1454,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
}
}
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= IGC_TX_FLAGS_VLAN;
+ tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
@@ -1482,6 +1558,25 @@ static inline void igc_rx_hash(struct igc_ring *ring,
PKT_HASH_TYPE_L3);
}
+static void igc_rx_vlan(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
+ u16 vid;
+
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
+ if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
+ test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
/**
* igc_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
@@ -1500,11 +1595,37 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
igc_rx_checksum(rx_ring, rx_desc, skb);
+ igc_rx_vlan(rx_ring, rx_desc, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
+{
+ bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+
+ if (enable) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= IGC_CTRL_VME;
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl &= ~IGC_CTRL_VME;
+ }
+ wr32(IGC_CTRL, ctrl);
+}
+
+static void igc_restore_vlan(struct igc_adapter *adapter)
+{
+ igc_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
const unsigned int size,
int *rx_buffer_pgcnt)
@@ -1930,6 +2051,63 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
}
}
+static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
+{
+ union igc_adv_rx_desc *desc;
+ u16 i = ring->next_to_use;
+ struct igc_rx_buffer *bi;
+ dma_addr_t dma;
+ bool ok = true;
+
+ if (!count)
+ return ok;
+
+ desc = IGC_RX_DESC(ring, i);
+ bi = &ring->rx_buffer_info[i];
+ i -= ring->count;
+
+ do {
+ bi->xdp = xsk_buff_alloc(ring->xsk_pool);
+ if (!bi->xdp) {
+ ok = false;
+ break;
+ }
+
+ dma = xsk_buff_xdp_get_dma(bi->xdp);
+ desc->read.pkt_addr = cpu_to_le64(dma);
+
+ desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ desc = IGC_RX_DESC(ring, 0);
+ bi = ring->rx_buffer_info;
+ i -= ring->count;
+ }
+
+ /* Clear the length for the next_to_use descriptor. */
+ desc->wb.upper.length = 0;
+
+ count--;
+ } while (count);
+
+ i += ring->count;
+
+ if (ring->next_to_use != i) {
+ ring->next_to_use = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, ring->tail);
+ }
+
+ return ok;
+}
+
static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
struct xdp_frame *xdpf,
struct igc_ring *ring)
@@ -1942,8 +2120,8 @@ static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
return -ENOMEM;
}
+ buffer->type = IGC_TX_BUFFER_TYPE_XDP;
buffer->xdpf = xdpf;
- buffer->tx_flags = IGC_TX_FLAGS_XDP;
buffer->protocol = 0;
buffer->bytecount = xdpf->len;
buffer->gso_segs = 1;
@@ -2025,51 +2203,52 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
return res;
}
-static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
- struct xdp_buff *xdp)
+/* This function assumes rcu_read_lock() is held by the caller. */
+static int __igc_xdp_run_prog(struct igc_adapter *adapter,
+ struct bpf_prog *prog,
+ struct xdp_buff *xdp)
{
- struct bpf_prog *prog;
- int res;
- u32 act;
+ u32 act = bpf_prog_run_xdp(prog, xdp);
- rcu_read_lock();
-
- prog = READ_ONCE(adapter->xdp_prog);
- if (!prog) {
- res = IGC_XDP_PASS;
- goto unlock;
- }
-
- act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
- res = IGC_XDP_PASS;
- break;
+ return IGC_XDP_PASS;
case XDP_TX:
if (igc_xdp_xmit_back(adapter, xdp) < 0)
- res = IGC_XDP_CONSUMED;
- else
- res = IGC_XDP_TX;
- break;
+ goto out_failure;
+ return IGC_XDP_TX;
case XDP_REDIRECT:
if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
- res = IGC_XDP_CONSUMED;
- else
- res = IGC_XDP_REDIRECT;
+ goto out_failure;
+ return IGC_XDP_REDIRECT;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
- res = IGC_XDP_CONSUMED;
- break;
+ return IGC_XDP_CONSUMED;
}
+}
-unlock:
- rcu_read_unlock();
+static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
+ struct xdp_buff *xdp)
+{
+ struct bpf_prog *prog;
+ int res;
+
+ prog = READ_ONCE(adapter->xdp_prog);
+ if (!prog) {
+ res = IGC_XDP_PASS;
+ goto out;
+ }
+
+ res = __igc_xdp_run_prog(adapter, prog, xdp);
+
+out:
return ERR_PTR(-res);
}
@@ -2103,6 +2282,20 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
xdp_do_flush();
}
+static void igc_update_rx_stats(struct igc_q_vector *q_vector,
+ unsigned int packets, unsigned int bytes)
+{
+ struct igc_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
@@ -2151,12 +2344,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
}
if (!skb) {
- xdp.data = pktbuf + pkt_offset;
- xdp.data_end = xdp.data + size;
- xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring);
- xdp_set_data_meta_invalid(&xdp);
- xdp.frame_sz = truesize;
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
+ igc_rx_offset(rx_ring) + pkt_offset, size, false);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -2226,12 +2416,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igc_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igc_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -2239,6 +2424,221 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
return total_packets;
}
+static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int totalsize = metasize + datasize;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&ring->q_vector->napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ return skb;
+}
+
+static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
+ union igc_adv_rx_desc *desc,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ struct igc_ring *ring = q_vector->rx.ring;
+ struct sk_buff *skb;
+
+ skb = igc_construct_skb_zc(ring, xdp);
+ if (!skb) {
+ ring->rx_stats.alloc_failed++;
+ return;
+ }
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ if (igc_cleanup_headers(ring, desc, skb))
+ return;
+
+ igc_process_skb_fields(ring, desc, skb);
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_ring *ring = q_vector->rx.ring;
+ u16 cleaned_count = igc_desc_unused(ring);
+ int total_bytes = 0, total_packets = 0;
+ u16 ntc = ring->next_to_clean;
+ struct bpf_prog *prog;
+ bool failure = false;
+ int xdp_status = 0;
+
+ rcu_read_lock();
+
+ prog = READ_ONCE(adapter->xdp_prog);
+
+ while (likely(total_packets < budget)) {
+ union igc_adv_rx_desc *desc;
+ struct igc_rx_buffer *bi;
+ ktime_t timestamp = 0;
+ unsigned int size;
+ int res;
+
+ desc = IGC_RX_DESC(ring, ntc);
+ size = le16_to_cpu(desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ bi = &ring->rx_buffer_info[ntc];
+
+ if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
+ timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
+ bi->xdp->data);
+
+ bi->xdp->data += IGC_TS_HDR_LEN;
+
+ /* HW timestamp has been copied into local variable. Metadata
+ * length when XDP program is called should be 0.
+ */
+ bi->xdp->data_meta += IGC_TS_HDR_LEN;
+ size -= IGC_TS_HDR_LEN;
+ }
+
+ bi->xdp->data_end = bi->xdp->data + size;
+ xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
+
+ res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
+ switch (res) {
+ case IGC_XDP_PASS:
+ igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
+ fallthrough;
+ case IGC_XDP_CONSUMED:
+ xsk_buff_free(bi->xdp);
+ break;
+ case IGC_XDP_TX:
+ case IGC_XDP_REDIRECT:
+ xdp_status |= res;
+ break;
+ }
+
+ bi->xdp = NULL;
+ total_bytes += size;
+ total_packets++;
+ cleaned_count++;
+ ntc++;
+ if (ntc == ring->count)
+ ntc = 0;
+ }
+
+ ring->next_to_clean = ntc;
+ rcu_read_unlock();
+
+ if (cleaned_count >= IGC_RX_BUFFER_WRITE)
+ failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
+
+ if (xdp_status)
+ igc_finalize_xdp(adapter, xdp_status);
+
+ igc_update_rx_stats(q_vector, total_packets, total_bytes);
+
+ if (xsk_uses_need_wakeup(ring->xsk_pool)) {
+ if (failure || ring->next_to_clean == ring->next_to_use)
+ xsk_set_rx_need_wakeup(ring->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(ring->xsk_pool);
+ return total_packets;
+ }
+
+ return failure ? budget : total_packets;
+}
+
+static void igc_update_tx_stats(struct igc_q_vector *q_vector,
+ unsigned int packets, unsigned int bytes)
+{
+ struct igc_ring *ring = q_vector->tx.ring;
+
+ u64_stats_update_begin(&ring->tx_syncp);
+ ring->tx_stats.bytes += bytes;
+ ring->tx_stats.packets += packets;
+ u64_stats_update_end(&ring->tx_syncp);
+
+ q_vector->tx.total_bytes += bytes;
+ q_vector->tx.total_packets += packets;
+}
+
+static void igc_xdp_xmit_zc(struct igc_ring *ring)
+{
+ struct xsk_buff_pool *pool = ring->xsk_pool;
+ struct netdev_queue *nq = txring_txq(ring);
+ union igc_adv_tx_desc *tx_desc = NULL;
+ int cpu = smp_processor_id();
+ u16 ntu = ring->next_to_use;
+ struct xdp_desc xdp_desc;
+ u16 budget;
+
+ if (!netif_carrier_ok(ring->netdev))
+ return;
+
+ __netif_tx_lock(nq, cpu);
+
+ budget = igc_desc_unused(ring);
+
+ while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
+ u32 cmd_type, olinfo_status;
+ struct igc_tx_buffer *bi;
+ dma_addr_t dma;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ xdp_desc.len;
+ olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
+
+ tx_desc = IGC_TX_DESC(ring, ntu);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ bi = &ring->tx_buffer_info[ntu];
+ bi->type = IGC_TX_BUFFER_TYPE_XSK;
+ bi->protocol = 0;
+ bi->bytecount = xdp_desc.len;
+ bi->gso_segs = 1;
+ bi->time_stamp = jiffies;
+ bi->next_to_watch = tx_desc;
+
+ netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
+
+ ntu++;
+ if (ntu == ring->count)
+ ntu = 0;
+ }
+
+ ring->next_to_use = ntu;
+ if (tx_desc) {
+ igc_flush_tx_descriptors(ring);
+ xsk_tx_release(pool);
+ }
+
+ __netif_tx_unlock(nq);
+}
+
/**
* igc_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
@@ -2255,6 +2655,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
unsigned int i = tx_ring->next_to_clean;
struct igc_tx_buffer *tx_buffer;
union igc_adv_tx_desc *tx_desc;
+ u32 xsk_frames = 0;
if (test_bit(__IGC_DOWN, &adapter->state))
return true;
@@ -2284,19 +2685,22 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
- if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
+ switch (tx_buffer->type) {
+ case IGC_TX_BUFFER_TYPE_XSK:
+ xsk_frames++;
+ break;
+ case IGC_TX_BUFFER_TYPE_XDP:
xdp_return_frame(tx_buffer->xdpf);
- else
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
+ break;
+ case IGC_TX_BUFFER_TYPE_SKB:
napi_consume_skb(tx_buffer->skb, napi_budget);
-
- /* unmap skb header data */
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buffer data */
- dma_unmap_len_set(tx_buffer, len, 0);
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
+ break;
+ default:
+ netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
+ break;
+ }
/* clear last DMA location and unmap remaining buffers */
while (tx_desc != eop_desc) {
@@ -2310,13 +2714,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
}
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
- }
+ if (dma_unmap_len(tx_buffer, len))
+ igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
/* move us one more past the eop_desc for start of next pkt */
@@ -2341,12 +2740,16 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->tx_syncp);
- tx_ring->tx_stats.bytes += total_bytes;
- tx_ring->tx_stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->tx_syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+
+ igc_update_tx_stats(q_vector, total_packets, total_bytes);
+
+ if (tx_ring->xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
+ igc_xdp_xmit_zc(tx_ring);
+ }
if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct igc_hw *hw = &adapter->hw;
@@ -2907,6 +3310,8 @@ static void igc_configure(struct igc_adapter *adapter)
igc_get_hw_control(adapter);
igc_set_rx_mode(netdev);
+ igc_restore_vlan(adapter);
+
igc_setup_tctl(adapter);
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
@@ -2926,7 +3331,10 @@ static void igc_configure(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igc_ring *ring = adapter->rx_ring[i];
- igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+ if (ring->xsk_pool)
+ igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
+ else
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
}
}
@@ -3541,14 +3949,17 @@ static int igc_poll(struct napi_struct *napi, int budget)
struct igc_q_vector *q_vector = container_of(napi,
struct igc_q_vector,
napi);
+ struct igc_ring *rx_ring = q_vector->rx.ring;
bool clean_complete = true;
int work_done = 0;
if (q_vector->tx.ring)
clean_complete = igc_clean_tx_irq(q_vector, budget);
- if (q_vector->rx.ring) {
- int cleaned = igc_clean_rx_irq(q_vector, budget);
+ if (rx_ring) {
+ int cleaned = rx_ring->xsk_pool ?
+ igc_clean_rx_irq_zc(q_vector, budget) :
+ igc_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -4200,6 +4611,9 @@ static int igc_set_features(struct net_device *netdev,
netdev_features_t changed = netdev->features ^ features;
struct igc_adapter *adapter = netdev_priv(netdev);
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ igc_vlan_mode(netdev, features);
+
/* Add VLAN support */
if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
@@ -5186,6 +5600,9 @@ static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
switch (bpf->command) {
case XDP_SETUP_PROG:
return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
+ bpf->xsk.queue_id);
default:
return -EOPNOTSUPP;
}
@@ -5231,6 +5648,43 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
return num_frames - drops;
}
+static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
+ struct igc_q_vector *q_vector)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 eics = 0;
+
+ eics |= q_vector->eims_value;
+ wr32(IGC_EICS, eics);
+}
+
+int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_q_vector *q_vector;
+ struct igc_ring *ring;
+
+ if (test_bit(__IGC_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igc_xdp_is_enabled(adapter))
+ return -ENXIO;
+
+ if (queue_id >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ ring = adapter->rx_ring[queue_id];
+
+ if (!ring->xsk_pool)
+ return -ENXIO;
+
+ q_vector = adapter->q_vector[queue_id];
+ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
+ igc_trigger_rxtxq_interrupt(adapter, q_vector);
+
+ return 0;
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -5246,6 +5700,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_setup_tc = igc_setup_tc,
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
+ .ndo_xsk_wakeup = igc_xsk_wakeup,
};
/* PCIe configuration access */
@@ -5485,11 +5940,15 @@ static int igc_probe(struct pci_dev *pdev,
/* copy netdev features into list of user selectable features */
netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= netdev->features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -5998,6 +6457,61 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw)
return adapter->netdev;
}
+static void igc_disable_rx_ring_hw(struct igc_ring *ring)
+{
+ struct igc_hw *hw = &ring->q_vector->adapter->hw;
+ u8 idx = ring->reg_idx;
+ u32 rxdctl;
+
+ rxdctl = rd32(IGC_RXDCTL(idx));
+ rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
+ rxdctl |= IGC_RXDCTL_SWFLUSH;
+ wr32(IGC_RXDCTL(idx), rxdctl);
+}
+
+void igc_disable_rx_ring(struct igc_ring *ring)
+{
+ igc_disable_rx_ring_hw(ring);
+ igc_clean_rx_ring(ring);
+}
+
+void igc_enable_rx_ring(struct igc_ring *ring)
+{
+ struct igc_adapter *adapter = ring->q_vector->adapter;
+
+ igc_configure_rx_ring(adapter, ring);
+
+ if (ring->xsk_pool)
+ igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
+ else
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+}
+
+static void igc_disable_tx_ring_hw(struct igc_ring *ring)
+{
+ struct igc_hw *hw = &ring->q_vector->adapter->hw;
+ u8 idx = ring->reg_idx;
+ u32 txdctl;
+
+ txdctl = rd32(IGC_TXDCTL(idx));
+ txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
+ txdctl |= IGC_TXDCTL_SWFLUSH;
+ wr32(IGC_TXDCTL(idx), txdctl);
+}
+
+void igc_disable_tx_ring(struct igc_ring *ring)
+{
+ igc_disable_tx_ring_hw(ring);
+ igc_clean_tx_ring(ring);
+}
+
+void igc_enable_tx_ring(struct igc_ring *ring)
+{
+ struct igc_adapter *adapter = ring->q_vector->adapter;
+
+ igc_configure_tx_ring(adapter, ring);
+}
+
/**
* igc_init_module - Driver Registration Routine
*
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index cc174853554b..0f82990567d9 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -10,8 +10,8 @@
#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */
#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define IGC_MDIC 0x00020 /* MDI Control - RW */
-#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index 11133c4619bb..a8cf5374be47 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
+#include <net/xdp_sock_drv.h>
+
#include "igc.h"
#include "igc_xdp.h"
@@ -32,29 +34,112 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return 0;
}
-int igc_xdp_register_rxq_info(struct igc_ring *ring)
+static int igc_xdp_enable_pool(struct igc_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id)
{
- struct net_device *dev = ring->netdev;
+ struct net_device *ndev = adapter->netdev;
+ struct device *dev = &adapter->pdev->dev;
+ struct igc_ring *rx_ring, *tx_ring;
+ struct napi_struct *napi;
+ bool needs_reset;
+ u32 frame_size;
int err;
- err = xdp_rxq_info_reg(&ring->xdp_rxq, dev, ring->queue_index, 0);
- if (err) {
- netdev_err(dev, "Failed to register xdp rxq info\n");
- return err;
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
+ /* When XDP is enabled, the driver doesn't support frames that
+ * span over multiple buffers. To avoid that, we check if xsk
+ * frame size is big enough to fit the max ethernet frame size
+ * + vlan double tagging.
+ */
+ return -EOPNOTSUPP;
}
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
- NULL);
+ err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
if (err) {
- netdev_err(dev, "Failed to register xdp rxq mem model\n");
- xdp_rxq_info_unreg(&ring->xdp_rxq);
+ netdev_err(ndev, "Failed to map xsk pool\n");
return err;
}
+ needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
+
+ rx_ring = adapter->rx_ring[queue_id];
+ tx_ring = adapter->tx_ring[queue_id];
+ /* Rx and Tx rings share the same napi context. */
+ napi = &rx_ring->q_vector->napi;
+
+ if (needs_reset) {
+ igc_disable_rx_ring(rx_ring);
+ igc_disable_tx_ring(tx_ring);
+ napi_disable(napi);
+ }
+
+ set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
+ set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
+
+ if (needs_reset) {
+ napi_enable(napi);
+ igc_enable_rx_ring(rx_ring);
+ igc_enable_tx_ring(tx_ring);
+
+ err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
+ if (err) {
+ xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
+{
+ struct igc_ring *rx_ring, *tx_ring;
+ struct xsk_buff_pool *pool;
+ struct napi_struct *napi;
+ bool needs_reset;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
+ if (!pool)
+ return -EINVAL;
+
+ needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
+
+ rx_ring = adapter->rx_ring[queue_id];
+ tx_ring = adapter->tx_ring[queue_id];
+ /* Rx and Tx rings share the same napi context. */
+ napi = &rx_ring->q_vector->napi;
+
+ if (needs_reset) {
+ igc_disable_rx_ring(rx_ring);
+ igc_disable_tx_ring(tx_ring);
+ napi_disable(napi);
+ }
+
+ xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
+ clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
+ clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
+
+ if (needs_reset) {
+ napi_enable(napi);
+ igc_enable_rx_ring(rx_ring);
+ igc_enable_tx_ring(tx_ring);
+ }
+
return 0;
}
-void igc_xdp_unregister_rxq_info(struct igc_ring *ring)
+int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
+ u16 queue_id)
{
- xdp_rxq_info_unreg(&ring->xdp_rxq);
+ return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
+ igc_xdp_disable_pool(adapter, queue_id);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.h b/drivers/net/ethernet/intel/igc/igc_xdp.h
index cfecb515b718..a74e5487d199 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.h
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.h
@@ -6,8 +6,12 @@
int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack);
+int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
+ u16 queue_id);
-int igc_xdp_register_rxq_info(struct igc_ring *ring);
-void igc_xdp_unregister_rxq_info(struct igc_ring *ring);
+static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter)
+{
+ return !!adapter->xdp_prog;
+}
#endif /* _IGC_XDP_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e324e42fab2d..58ea959a4482 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1514,8 +1514,7 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
#define IXGBE_WRITE_REG_BE32(a, reg, value) \
IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
-#define IXGBE_STORE_AS_BE16(_value) \
- ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+#define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value)))
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask)
@@ -1651,13 +1650,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
/* record source and destination port (little-endian)*/
- fdirport = ntohs(input->formatted.dst_port);
+ fdirport = be16_to_cpu(input->formatted.dst_port);
fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
- fdirport |= ntohs(input->formatted.src_port);
+ fdirport |= be16_to_cpu(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
/* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes);
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
fdirvlan |= ntohs(input->formatted.vlan_id);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 03ccbe6b66d2..e90b5047e695 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3678,10 +3678,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
bool return_data)
{
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- union {
- struct ixgbe_hic_hdr hdr;
- u32 u32arr[1];
- } *bp = buffer;
+ struct ixgbe_hic_hdr *hdr = buffer;
+ u32 *u32arr = buffer;
u16 buf_len, dword_len;
s32 status;
u32 bi;
@@ -3707,12 +3705,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
- bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&bp->u32arr[bi]);
+ u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&u32arr[bi]);
}
/* If there is any thing in data position pull it in */
- buf_len = bp->hdr.buf_len;
+ buf_len = hdr->buf_len;
if (!buf_len)
goto rel_out;
@@ -3727,8 +3725,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
/* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
- bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&bp->u32arr[bi]);
+ u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&u32arr[bi]);
}
rel_out:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 54d47265a7ac..e596e1a9fc75 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -511,14 +511,14 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
continue;
reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
- if (reg == xs->id.daddr.a4)
+ if (reg == (__force u32)xs->id.daddr.a4)
return 1;
}
}
if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
- if (reg == xs->id.daddr.a4)
+ if (reg == (__force u32)xs->id.daddr.a4)
return 1;
}
@@ -533,7 +533,7 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
for (j = 0; j < 4; j++) {
reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
- if (reg != xs->id.daddr.a6[j])
+ if (reg != (__force u32)xs->id.daddr.a6[j])
break;
}
if (j == 4) /* did we match all 4 words? */
@@ -543,7 +543,7 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
for (j = 0; j < 4; j++) {
reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
- if (reg != xs->id.daddr.a6[j])
+ if (reg != (__force u32)xs->id.daddr.a6[j])
break;
}
if (j == 4) /* did we match all 4 words? */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c5ec17d19c59..ffff69efd78a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2199,7 +2199,6 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf;
u32 act;
- rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
@@ -2213,23 +2212,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
+ if (unlikely(!xdpf))
+ goto out_failure;
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (result == IXGBE_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (!err)
- result = IXGBE_XDP_REDIR;
- else
- result = IXGBE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = IXGBE_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
@@ -2237,7 +2236,6 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
break;
}
xdp_out:
- rcu_read_unlock();
return ERR_PTR(-result);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 988db46bff0e..214a38de3f41 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return err;
}
-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_frame = msgbuf[1];
u32 max_frs;
+ if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
+ }
+
/*
* For 82599EB we have to keep all PFs and VFs operating with
* the same max_frame value in order to avoid sending an oversize
@@ -533,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
}
}
- /* MTU < 68 is an error and causes problems on some kernels */
- if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
- e_err(drv, "VF max_frame %d out of range\n", max_frame);
- return -EINVAL;
- }
-
/* pull current max frame size from hardware */
max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
max_frs &= IXGBE_MHADD_MFS_MASK;
@@ -1249,7 +1247,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_LPE:
- retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
break;
case IXGBE_VF_SET_MACVLAN:
retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 91ad5b902673..96dd1a4f956a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -100,15 +100,14 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf;
u32 act;
- rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
- rcu_read_unlock();
- return result;
+ if (err)
+ goto out_failure;
+ return IXGBE_XDP_REDIR;
}
switch (act) {
@@ -116,23 +115,23 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
+ if (unlikely(!xdpf))
+ goto out_failure;
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (result == IXGBE_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = IXGBE_XDP_CONSUMED;
break;
}
- rcu_read_unlock();
return result;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ba2ed8a43d2d..c714e1ecd308 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1054,7 +1054,6 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
struct bpf_prog *xdp_prog;
u32 act;
- rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
@@ -1067,11 +1066,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
case XDP_TX:
xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+ if (result == IXGBEVF_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
@@ -1079,7 +1081,6 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
break;
}
xdp_out:
- rcu_read_unlock();
return ERR_PTR(-result);
}
@@ -3814,7 +3815,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 6f987a7ffcb3..b30a45725374 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1315,23 +1315,23 @@ static int korina_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq_byname(pdev, "tx");
p = devm_platform_ioremap_resource_byname(pdev, "emac");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->eth_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_rx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->rx_dma_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_tx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->tx_dma_regs = p;
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 41c2ad210bc9..fb78f17d734f 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -154,6 +154,8 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ struct sk_buff *skb = ch->skb[ch->dma.desc];
+ dma_addr_t mapping;
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
@@ -163,16 +165,18 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
goto skip;
}
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
- ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev,
- ch->dma.desc_base[ch->dma.desc].addr))) {
+ mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
+ XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM;
goto skip;
}
+ ch->dma.desc_base[ch->dma.desc].addr = mapping;
+ /* Make sure the address is written before we give it to HW */
+ wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
@@ -196,6 +200,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
ch->dma.desc %= LTQ_DESC_NUM;
if (ret) {
+ net_dev->stats.rx_dropped++;
netdev_err(net_dev, "failed to allocate new rx buffer\n");
return ret;
}
@@ -348,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
struct xrx200_chan *ch = ptr;
if (napi_schedule_prep(&ch->napi)) {
- __napi_schedule(&ch->napi);
ltq_dma_disable_irq(&ch->dma);
+ __napi_schedule(&ch->napi);
}
ltq_dma_ack_irq(&ch->dma);
@@ -432,7 +437,6 @@ static int xrx200_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res;
struct xrx200_priv *priv;
struct net_device *net_dev;
int err;
@@ -452,13 +456,7 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->max_mtu = XRX200_DMA_DATA_LEN;
/* load the memory ranges */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get resources\n");
- return -ENOENT;
- }
-
- priv->pmac_reg = devm_ioremap_resource(dev, res);
+ priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(priv->pmac_reg))
return PTR_ERR(priv->pmac_reg);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index d14762d93640..62a97c46fba0 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -17,6 +17,8 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -281,7 +283,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
struct orion_mdio_dev *dev;
int i, ret;
- type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev);
+ type = (enum orion_mdio_bus_type)device_get_match_data(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -369,7 +371,13 @@ static int orion_mdio_probe(struct platform_device *pdev)
goto out_mdio;
}
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ /* For the platforms not supporting DT/ACPI fall-back
+ * to mdiobus_register via of_mdiobus_register.
+ */
+ if (is_acpi_node(pdev->dev.fwnode))
+ ret = acpi_mdiobus_register(bus, pdev->dev.fwnode);
+ else
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
goto out_mdio;
@@ -421,12 +429,20 @@ static const struct of_device_id orion_mdio_match[] = {
};
MODULE_DEVICE_TABLE(of, orion_mdio_match);
+static const struct acpi_device_id orion_mdio_acpi_match[] = {
+ { "MRVL0100", BUS_TYPE_SMI },
+ { "MRVL0101", BUS_TYPE_XSMI },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
+
static struct platform_driver orion_mdio_driver = {
.probe = orion_mdio_probe,
.remove = orion_mdio_remove,
.driver = {
.name = "orion-mdio",
.of_match_table = orion_mdio_match,
+ .acpi_match_table = ACPI_PTR(orion_mdio_acpi_match),
},
};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7d5cd9bc6c99..361bc4fbe20b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1805,18 +1805,14 @@ static void mvneta_rx_error(struct mvneta_port *pp,
}
/* Handle RX checksum offload based on the descriptor's status */
-static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
- struct sk_buff *skb)
+static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
{
if ((pp->dev->features & NETIF_F_RXCSUM) &&
(status & MVNETA_RXD_L3_IP4) &&
- (status & MVNETA_RXD_L4_CSUM_OK)) {
- skb->csum = 0;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- }
+ (status & MVNETA_RXD_L4_CSUM_OK))
+ return CHECKSUM_UNNECESSARY;
- skb->ip_summed = CHECKSUM_NONE;
+ return CHECKSUM_NONE;
}
/* Return tx queue pointer (find last set bit) according to <cause> returned
@@ -2320,7 +2316,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
}
static struct sk_buff *
-mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -2331,11 +2327,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
if (!skb)
return ERR_PTR(-ENOMEM);
- page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
+ skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
- mvneta_rx_csum(pp, desc_status, skb);
+ skb->ip_summed = mvneta_rx_csum(pp, desc_status);
for (i = 0; i < num_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
@@ -2343,7 +2339,10 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(frag), skb_frag_off(frag),
skb_frag_size(frag), PAGE_SIZE);
- page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
+ /* We don't need to reset pp_recycle here. It's already set, so
+ * just mark fragments for recycling.
+ */
+ page_pool_store_mem_info(skb_frag_page(frag), pool);
}
return skb;
@@ -2370,7 +2369,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
- rcu_read_lock();
xdp_prog = READ_ONCE(pp->xdp_prog);
/* Fairness NAPI loop */
@@ -2425,7 +2423,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
goto next;
- skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
+ skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
@@ -2448,7 +2446,6 @@ next:
xdp_buf.data_hard_start = NULL;
sinfo.nr_frags = 0;
}
- rcu_read_unlock();
if (xdp_buf.data_hard_start)
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
@@ -2532,7 +2529,7 @@ err_drop_frame:
rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
+ skb->ip_summed = mvneta_rx_csum(pp, rx_status);
napi_gro_receive(napi, skb);
rcvd_pkts++;
@@ -2581,8 +2578,7 @@ err_drop_frame:
skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
-
- mvneta_rx_csum(pp, rx_status, skb);
+ skb->ip_summed = mvneta_rx_csum(pp, rx_status);
napi_gro_receive(napi, skb);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 8edba5ea90f0..b9fbc9f000f2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -993,6 +993,14 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
struct mvpp2_tai;
/* Definitions */
@@ -1002,6 +1010,20 @@ struct mvpp2_rss_table {
u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
+struct mvpp2_buff_hdr {
+ __le32 next_phys_addr;
+ __le32 next_dma_addr;
+ __le16 byte_count;
+ __le16 info;
+ __le16 reserved1; /* bm_qset (for future use, BM) */
+ u8 next_phys_addr_high;
+ u8 next_dma_addr_high;
+ __le16 reserved2;
+ __le16 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+};
+
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
@@ -1175,9 +1197,6 @@ struct mvpp2_port {
/* Firmware node associated to the port */
struct fwnode_handle *fwnode;
- /* Is a PHY always connected to the port */
- bool has_phy;
-
/* Per-port registers' base address */
void __iomem *base;
void __iomem *stats_base;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ec706d614cac..3229bafa2a2c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3543,21 +3543,17 @@ static void mvpp2_rx_error(struct mvpp2_port *port,
}
/* Handle RX checksum offload */
-static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
- struct sk_buff *skb)
+static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
{
if (((status & MVPP2_RXD_L3_IP4) &&
!(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
(status & MVPP2_RXD_L3_IP6))
if (((status & MVPP2_RXD_L4_UDP) ||
(status & MVPP2_RXD_L4_TCP)) &&
- (status & MVPP2_RXD_L4_CSUM_OK)) {
- skb->csum = 0;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- }
+ (status & MVPP2_RXD_L4_CSUM_OK))
+ return CHECKSUM_UNNECESSARY;
- skb->ip_summed = CHECKSUM_NONE;
+ return CHECKSUM_NONE;
}
/* Allocate a new skb and add it to BM pool */
@@ -3784,9 +3780,9 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
}
static int
-mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
- struct bpf_prog *prog, struct xdp_buff *xdp,
- struct page_pool *pp, struct mvpp2_pcpu_stats *stats)
+mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
+ struct xdp_buff *xdp, struct page_pool *pp,
+ struct mvpp2_pcpu_stats *stats)
{
unsigned int len, sync, err;
struct page *page;
@@ -3839,6 +3835,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
return ret;
}
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+ int pool, u32 rx_status)
+{
+ phys_addr_t phys_addr, phys_addr_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ struct mvpp2_buff_hdr *buff_hdr;
+
+ phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+
+ do {
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+ phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+ if (port->priv->hw_version >= MVPP22) {
+ phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+ }
+
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+ phys_addr = phys_addr_next;
+ dma_addr = dma_addr_next;
+
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
int rx_todo, struct mvpp2_rx_queue *rxq)
@@ -3852,8 +3877,6 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
int rx_done = 0;
u32 xdp_ret = 0;
- rcu_read_lock();
-
xdp_prog = READ_ONCE(port->xdp_prog);
/* Get number of received packets and clamp the to-do */
@@ -3871,28 +3894,24 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
phys_addr_t phys_addr;
u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
+ struct page *page;
void *data;
+ phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ data = (void *)phys_to_virt(phys_addr);
+ page = virt_to_page(data);
+ prefetch(page);
+
rx_done++;
rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
rx_bytes -= MVPP2_MH_SIZE;
dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
- phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
- data = (void *)phys_to_virt(phys_addr);
pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
MVPP2_RXD_BM_POOL_ID_OFFS;
bm_pool = &port->priv->bm_pools[pool];
- /* In case of an error, release the requested buffer pointer
- * to the Buffer Manager. This request process is controlled
- * by the hardware, and the information about the buffer is
- * comprised by the RX descriptor.
- */
- if (rx_status & MVPP2_RXD_ERR_SUMMARY)
- goto err_drop_frame;
-
if (port->priv->percpu_pools) {
pp = port->priv->page_pool[pool];
dma_dir = page_pool_get_dma_dir(pp);
@@ -3904,8 +3923,20 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
rx_bytes + MVPP2_MH_SIZE,
dma_dir);
+ /* Buffer header not supported */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ goto err_drop_frame;
+
+ /* In case of an error, release the requested buffer pointer
+ * to the Buffer Manager. This request process is controlled
+ * by the hardware, and the information about the buffer is
+ * comprised by the RX descriptor.
+ */
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
/* Prefetch header */
- prefetch(data);
+ prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
if (bm_pool->frag_size > PAGE_SIZE)
frag_size = 0;
@@ -3925,7 +3956,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
rx_bytes, false);
- ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
+ ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
if (ret) {
xdp_ret |= ret;
@@ -3964,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
if (pp)
- page_pool_release_page(pp, virt_to_page(data));
+ skb_mark_for_recycle(skb, page, pp);
else
dma_unmap_single_attrs(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE,
@@ -3975,8 +4006,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
+ skb->ip_summed = mvpp2_rx_csum(port, rx_status);
skb->protocol = eth_type_trans(skb, dev);
- mvpp2_rx_csum(port, rx_status, skb);
napi_gro_receive(napi, skb);
continue;
@@ -3985,11 +4016,12 @@ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
+ else
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
- rcu_read_unlock();
-
if (xdp_ret & MVPP2_XDP_REDIR)
xdp_do_flush_map();
@@ -4753,9 +4785,8 @@ static int mvpp2_open(struct net_device *dev)
goto err_cleanup_txqs;
}
- /* Phylink isn't supported yet in ACPI mode */
- if (port->of_node) {
- err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
+ if (port->phylink) {
+ err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
if (err) {
netdev_err(port->dev, "could not attach PHY (%d)\n",
err);
@@ -6663,6 +6694,19 @@ static void mvpp2_acpi_start(struct mvpp2_port *port)
SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
}
+/* In order to ensure backward compatibility for ACPI, check if the port
+ * firmware node comprises the necessary description allowing to use phylink.
+ */
+static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
+{
+ if (!is_acpi_node(port_fwnode))
+ return false;
+
+ return (!fwnode_property_present(port_fwnode, "phy-handle") &&
+ !fwnode_property_present(port_fwnode, "managed") &&
+ !fwnode_get_named_child_node(port_fwnode, "fixed-link"));
+}
+
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct fwnode_handle *port_fwnode,
@@ -6738,7 +6782,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port = netdev_priv(dev);
port->dev = dev;
port->fwnode = port_fwnode;
- port->has_phy = !!of_find_property(port_node, "phy", NULL);
port->ntxqs = ntxqs;
port->nrxqs = nrxqs;
port->priv = priv;
@@ -6881,8 +6924,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
dev->dev.of_node = port_node;
- /* Phylink isn't used w/ ACPI as of now */
- if (port_node) {
+ if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
@@ -6894,6 +6936,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
port->phylink = phylink;
} else {
+ dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
port->phylink = NULL;
}
@@ -7311,7 +7354,6 @@ static int mvpp2_get_sram(struct platform_device *pdev,
static int mvpp2_probe(struct platform_device *pdev)
{
- const struct acpi_device_id *acpi_id;
struct fwnode_handle *fwnode = pdev->dev.fwnode;
struct fwnode_handle *port_fwnode;
struct mvpp2 *priv;
@@ -7324,16 +7366,7 @@ static int mvpp2_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- if (has_acpi_companion(&pdev->dev)) {
- acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
- &pdev->dev);
- if (!acpi_id)
- return -EINVAL;
- priv->hw_version = (unsigned long)acpi_id->driver_data;
- } else {
- priv->hw_version =
- (unsigned long)of_device_get_match_data(&pdev->dev);
- }
+ priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
/* multi queue mode isn't supported on PPV2.1, fallback to single
* mode
@@ -7351,6 +7384,10 @@ static int mvpp2_probe(struct platform_device *pdev)
return PTR_ERR(priv->lms_base);
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
if (has_acpi_companion(&pdev->dev)) {
/* In case the MDIO memory region is declared in
* the ACPI, it can already appear as 'in-use'
@@ -7445,34 +7482,35 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_gop_clk;
- priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
+ priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk");
if (IS_ERR(priv->mg_core_clk)) {
- priv->mg_core_clk = NULL;
- } else {
- err = clk_prepare_enable(priv->mg_core_clk);
- if (err < 0)
- goto err_mg_clk;
+ err = PTR_ERR(priv->mg_core_clk);
+ goto err_mg_clk;
}
+
+ err = clk_prepare_enable(priv->mg_core_clk);
+ if (err < 0)
+ goto err_mg_clk;
}
- priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
+ priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk");
if (IS_ERR(priv->axi_clk)) {
err = PTR_ERR(priv->axi_clk);
- if (err == -EPROBE_DEFER)
- goto err_mg_core_clk;
- priv->axi_clk = NULL;
- } else {
- err = clk_prepare_enable(priv->axi_clk);
- if (err < 0)
- goto err_mg_core_clk;
+ goto err_mg_core_clk;
}
+ err = clk_prepare_enable(priv->axi_clk);
+ if (err < 0)
+ goto err_mg_core_clk;
+
/* Get system's tclk rate */
priv->tclk = clk_get_rate(priv->pp_clk);
- } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
- &priv->tclk)) {
- dev_err(&pdev->dev, "missing clock-frequency value\n");
- return -EINVAL;
+ } else {
+ err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk);
+ if (err) {
+ dev_err(&pdev->dev, "missing clock-frequency value\n");
+ return err;
+ }
}
if (priv->hw_version >= MVPP22) {
@@ -7552,6 +7590,8 @@ static int mvpp2_probe(struct platform_device *pdev)
return 0;
err_port_probe:
+ fwnode_handle_put(port_fwnode);
+
i = 0;
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i])
@@ -7560,13 +7600,10 @@ err_port_probe:
}
err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
-
err_mg_core_clk:
- if (priv->hw_version >= MVPP22)
- clk_disable_unprepare(priv->mg_core_clk);
+ clk_disable_unprepare(priv->mg_core_clk);
err_mg_clk:
- if (priv->hw_version >= MVPP22)
- clk_disable_unprepare(priv->mg_clk);
+ clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);
err_pp_clk:
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 7cc7d72d761e..93575800ca92 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -394,9 +394,6 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
if (start > end)
swap(start, end);
- if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
- end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
-
for (tid = start; tid <= end; tid++) {
if (!priv->prs_shadow[tid].valid)
return tid;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index e66109367487..47f5ed006a93 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -197,6 +197,11 @@ enum nix_scheduler {
#define SDP_CHANNELS 256
+/* The mask is to extract lower 10-bits of channel number
+ * which CPT will pass to X2P.
+ */
+#define NIX_CHAN_CPT_X2P_MASK (0x3ffull)
+
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index cedb2616c509..770d86262838 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -134,6 +134,7 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -259,7 +260,11 @@ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
nix_cn10k_aq_enq_rsp) \
-M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info)
+M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
+M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
+ nix_bandprof_alloc_rsp) \
+M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
+ msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -611,7 +616,12 @@ enum nix_af_status {
NIX_AF_INVAL_SSO_PF_FUNC = -420,
NIX_AF_ERR_TX_VTAG_NOSPC = -421,
NIX_AF_ERR_RX_VTAG_INUSE = -422,
- NIX_AF_ERR_NPC_KEY_NOT_SUPP = -423,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+ NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
+ NIX_AF_ERR_INVALID_NIXBLK = -425,
+ NIX_AF_ERR_INVALID_BANDPROF = -426,
+ NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
+ NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
};
/* For NIX RX vtag action */
@@ -680,6 +690,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
union {
struct nix_cn10k_rq_ctx_s rq_mask;
@@ -687,6 +698,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
};
};
@@ -698,6 +710,7 @@ struct nix_cn10k_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
};
@@ -713,6 +726,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ u64 prof;
};
union {
struct nix_rq_ctx_s rq_mask;
@@ -720,6 +734,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ u64 prof_mask;
};
};
@@ -731,6 +746,7 @@ struct nix_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
};
@@ -913,6 +929,7 @@ struct nix_rx_mode {
#define NIX_RX_MODE_UCAST BIT(0)
#define NIX_RX_MODE_PROMISC BIT(1)
#define NIX_RX_MODE_ALLMULTI BIT(2)
+#define NIX_RX_MODE_USE_MCE BIT(3)
u16 mode;
};
@@ -971,6 +988,31 @@ struct nix_hw_info {
u16 min_mtu;
};
+struct nix_bandprof_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Count of profiles needed per layer */
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+};
+
+struct nix_bandprof_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+
+ /* There is no need to allocate morethan 1 bandwidth profile
+ * per RQ of a PF_FUNC's NIXLF. So limit the maximum
+ * profiles to 64 per PF_FUNC.
+ */
+#define MAX_BANDPROF_PER_PFFUNC 64
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_free_req {
+ struct mbox_msghdr hdr;
+ u8 free_all;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -1228,6 +1270,14 @@ struct ptp_rsp {
u64 clk;
};
+struct set_vf_perm {
+ struct mbox_msghdr hdr;
+ u16 vf;
+#define RESET_VF_PERM BIT_ULL(0)
+#define VF_TRUSTED BIT_ULL(1)
+ u64 flags;
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 1e012e787260..19bad9a59c8f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -33,6 +33,10 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_IH_2_ETHER,
NPC_LT_LA_HIGIG2_ETHER,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_LT_LA_CH_LEN_90B_ETHER,
+ NPC_LT_LA_CPT_HDR,
+ NPC_LT_LA_CUSTOM_L2_24B_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -42,7 +46,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CTAG,
NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
- NPC_LT_LB_ITAG,
+ NPC_LT_LB_PPPOE,
NPC_LT_LB_DSA,
NPC_LT_LB_DSA_VLAN,
NPC_LT_LB_EDSA,
@@ -50,6 +54,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
NPC_LT_LB_FDSA,
+ NPC_LT_LB_VLAN_EXDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
@@ -65,6 +70,7 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
+ NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF,
};
@@ -146,7 +152,14 @@ enum npc_kpu_lh_ltype {
* Ethernet interfaces, LBK interfaces, etc.
*/
enum npc_pkind_type {
- NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
+ NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
+ NPC_RX_CHLEN24B_PKIND = 57ULL,
+ NPC_RX_CPT_HDR_PKIND,
+ NPC_RX_CHLEN90B_PKIND,
+ NPC_TX_HIGIG_PKIND,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
};
/* list of known and supported fields in packet header and
@@ -213,7 +226,7 @@ struct npc_kpu_profile_cam {
u16 dp1_mask;
u16 dp2;
u16 dp2_mask;
-};
+} __packed;
struct npc_kpu_profile_action {
u8 errlev;
@@ -233,13 +246,13 @@ struct npc_kpu_profile_action {
u8 mask;
u8 right;
u8 shift;
-};
+} __packed;
struct npc_kpu_profile {
int cam_entries;
int action_entries;
- const struct npc_kpu_profile_cam *cam;
- const struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
@@ -425,7 +438,19 @@ struct nix_tx_action {
/* NPC MCAM reserved entry index per nixlf */
#define NIXLF_UCAST_ENTRY 0
#define NIXLF_BCAST_ENTRY 1
-#define NIXLF_PROMISC_ENTRY 2
+#define NIXLF_ALLMULTI_ENTRY 2
+#define NIXLF_PROMISC_ENTRY 3
+
+struct npc_coalesced_kpu_prfl {
+#define NPC_SIGN 0x00666f727063706e
+#define NPC_PRFL_NAME "npc_prfls_array"
+#define NPC_NAME_LEN 32
+ __le64 signature; /* "npcprof\0" (8 bytes/ASCII characters) */
+ u8 name[NPC_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU firmware/profile version */
+ u8 num_prfl; /* No of NPC profiles. */
+ u16 prfl_sz[0];
+};
struct npc_mcam_kex {
/* MKEX Profle Header */
@@ -445,6 +470,15 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_kpu_fwdata {
+ int entries;
+ /* What follows is:
+ * struct npc_kpu_profile_cam[entries];
+ * struct npc_kpu_profile_action[entries];
+ */
+ u8 data[0];
+} __packed;
+
struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
@@ -459,6 +493,29 @@ struct npc_lt_def_ipsec {
u8 spi_nz;
};
+struct npc_lt_def_apad {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+} __packed;
+
+struct npc_lt_def_color {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 noffset;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_et {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+ u8 offset;
+} __packed;
+
struct npc_lt_def_cfg {
struct npc_lt_def rx_ol2;
struct npc_lt_def rx_oip4;
@@ -476,7 +533,41 @@ struct npc_lt_def_cfg {
struct npc_lt_def pck_oip4;
struct npc_lt_def pck_oip6;
struct npc_lt_def pck_iip4;
-};
+ struct npc_lt_def_apad rx_apad0;
+ struct npc_lt_def_apad rx_apad1;
+ struct npc_lt_def_color ovlan;
+ struct npc_lt_def_color ivlan;
+ struct npc_lt_def_color rx_gen0_color;
+ struct npc_lt_def_color rx_gen1_color;
+ struct npc_lt_def_et rx_et[2];
+} __packed;
+
+/* Loadable KPU profile firmware data */
+struct npc_kpu_profile_fwdata {
+#define KPU_SIGN 0x00666f727075706b
+#define KPU_NAME_LEN 32
+/** Maximum number of custom KPU entries supported by the built-in profile. */
+#define KPU_MAX_CST_ENT 2
+ /* KPU Profle Header */
+ __le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
+ u8 name[KPU_NAME_LEN]; /* KPU Profile name */
+ __le64 version; /* KPU profile version */
+ u8 kpus;
+ u8 reserved[7];
+
+ /* Default MKEX profile to be used with this KPU profile. May be
+ * overridden with mkex_profile module parameter. Format is same as for
+ * the MKEX profile to streamline processing.
+ */
+ struct npc_mcam_kex mkex;
+ /* LTYPE values for specific HW offloaded protocols. */
+ struct npc_lt_def_cfg lt_def;
+ /* Dynamically sized data:
+ * Custom KPU CAM and ACTION configuration entries.
+ * struct npc_kpu_fwdata kpu[kpus];
+ */
+ u8 data[0];
+} __packed;
struct rvu_npc_mcam_rule {
struct flow_msg packet;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 5c372d2c24a1..fee655cc7523 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -11,7 +11,10 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
-#define NPC_KPU_PROFILE_VER 0x0000000100050000
+#define NPC_KPU_PROFILE_VER 0x0000000100060000
+#define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF))
+#define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF))
+#define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF))
#define NPC_IH_W 0x8000
#define NPC_IH_UTAG 0x2000
@@ -20,6 +23,7 @@
#define NPC_ETYPE_IP6 0x86dd
#define NPC_ETYPE_ARP 0x0806
#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_NGIO 0x8842
#define NPC_ETYPE_MPLSU 0x8847
#define NPC_ETYPE_MPLSM 0x8848
#define NPC_ETYPE_ETAG 0x893f
@@ -33,6 +37,10 @@
#define NPC_ETYPE_PPP 0x880b
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
+#define NPC_ETYPE_PPPOE 0x8864
+
+#define NPC_PPP_IP 0x0021
+#define NPC_PPP_IP6 0x0057
#define NPC_IPNH_HOP 0
#define NPC_IPNH_ICMP 1
@@ -142,14 +150,15 @@
#define NPC_DSA_EDSA 0x8000
#define NPC_DSA_FDSA 0xc000
-#define NPC_KEXOF_DMAC 8
-#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */
+#define NPC_KEXOF_DMAC 9
+#define MKEX_SIGN 0x19bbfdbd15f
#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
((flags_ena) << 6) | ((key_ofs) & 0x3F))
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_ERRCODE | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -170,25 +179,31 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
+ NPC_S_KPU1_CUSTOM_L2_90B,
+ NPC_S_KPU1_CPT_HDR,
+ NPC_S_KPU1_CUSTOM_L2_24B,
+ NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
- NPC_S_KPU2_ITAG,
NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
+ NPC_S_KPU2_NGIO,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
- NPC_S_KPU3_ITAG,
NPC_S_KPU3_CTAG_C,
NPC_S_KPU3_STAG_C,
NPC_S_KPU3_QINQ_C,
NPC_S_KPU3_DSA,
+ NPC_S_KPU3_VLAN_EXDSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
NPC_S_KPU4_FDSA,
+ NPC_S_KPU4_VLAN_EXDSA,
+ NPC_S_KPU4_PPPOE,
NPC_S_KPU5_IP,
NPC_S_KPU5_IP6,
NPC_S_KPU5_ARP,
@@ -198,13 +213,19 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_MPLS,
NPC_S_KPU5_MPLS_PL,
NPC_S_KPU5_NSH,
+ NPC_S_KPU5_CPT_IP,
+ NPC_S_KPU5_CPT_IP6,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
NPC_S_KPU6_IP6_FRAG,
+ NPC_S_KPU6_IP6_CPT_FRAG,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST,
+ NPC_S_KPU6_IP6_CPT_ROUT,
NPC_S_KPU7_IP6_EXT,
NPC_S_KPU7_IP6_ROUT,
NPC_S_KPU7_IP6_FRAG,
+ NPC_S_KPU7_CPT_IP6_FRAG,
NPC_S_KPU8_TCP,
NPC_S_KPU8_UDP,
NPC_S_KPU8_SCTP,
@@ -265,7 +286,6 @@ enum npc_kpu_la_lflag {
NPC_F_LA_L_UNK_ETYPE = 1,
NPC_F_LA_L_WITH_VLAN,
NPC_F_LA_L_WITH_ETAG,
- NPC_F_LA_L_WITH_ITAG,
NPC_F_LA_L_WITH_MPLS,
NPC_F_LA_L_WITH_NSH,
};
@@ -442,7 +462,28 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
-static const struct npc_kpu_profile_action ikpu_action_entries[] = {
+#define NPC_KPU_NOP_CAM \
+ { \
+ NPC_S_NA, 0xff, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ }
+
+#define NPC_KPU_NOP_ACTION \
+ { \
+ NPC_ERRLEV_RE, NPC_EC_NOERR, \
+ 0, 0, 0, 0, 0, \
+ NPC_S_NA, 0, 0, \
+ NPC_LID_LA, NPC_LT_NA, \
+ 0, \
+ 0, 0, 0, 0, \
+ }
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
@@ -950,7 +991,7 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_VLAN_EXDSA, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -958,8 +999,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_CUSTOM_L2_24B, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -967,8 +1008,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 40, 54, 58, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -976,8 +1017,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 102, 106, 110, 0, 0,
+ NPC_S_KPU1_CUSTOM_L2_90B, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -1021,7 +1062,9 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1080,6 +1123,15 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1123,7 +1175,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1132,7 +1184,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1141,7 +1193,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1150,7 +1202,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_NSH,
+ NPC_ETYPE_DSA,
0xffff,
0x0000,
0x0000,
@@ -1159,7 +1211,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_DSA,
+ NPC_ETYPE_PPPOE,
0xffff,
0x0000,
0x0000,
@@ -1294,15 +1346,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH_NIX, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1339,8 +1382,8 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH, 0xff,
- NPC_IH_W|NPC_IH_UTAG,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1349,7 +1392,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_IH, 0xff,
NPC_IH_W,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1358,7 +1401,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_IH, 0xff,
0x0000,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1501,15 +1544,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_HIGIG2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1645,7 +1679,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1654,7 +1688,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1663,7 +1697,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1672,6 +1706,132 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1680,7 +1840,88 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1689,6 +1930,150 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -1699,7 +2084,9 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -1783,6 +2170,24 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -2226,15 +2631,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
NPC_S_KPU2_ETAG, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ETAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
0x0000,
0x0000,
0x0000,
@@ -2313,159 +2709,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CTAG2, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -2817,6 +3060,15 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -2827,7 +3079,9 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3243,159 +3497,6 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU3_CTAG_C, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -3936,6 +4037,15 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU3_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -3946,7 +4056,9 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -4084,6 +4196,78 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
{
NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
0x0000,
NPC_DSA_FDSA,
0x0000,
@@ -4092,6 +4276,87 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4102,7 +4367,9 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4125,116 +4392,116 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_TCP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_UDP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_SCTP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_ICMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IGMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_ESP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_AH,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_GRE,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IP6,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_MPLS,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -4245,7 +4512,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4254,7 +4521,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4263,7 +4530,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4272,7 +4539,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4281,7 +4548,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4290,7 +4557,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4299,7 +4566,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4308,7 +4575,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4317,7 +4584,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4326,7 +4593,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4335,7 +4602,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4344,7 +4611,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4662,6 +4929,429 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4672,7 +5362,9 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -5007,6 +5699,330 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5017,7 +6033,9 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5226,6 +6244,105 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5236,7 +6353,9 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5259,8 +6378,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -5268,8 +6387,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -5277,8 +6396,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -5286,8 +6405,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -5565,7 +6684,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5574,7 +6693,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5583,7 +6702,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5592,7 +6711,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5637,7 +6756,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5646,7 +6765,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5655,7 +6774,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5664,7 +6783,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5709,7 +6828,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5718,7 +6837,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5727,7 +6846,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5736,7 +6855,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5781,7 +6900,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5790,7 +6909,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5799,7 +6918,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5808,7 +6927,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5853,7 +6972,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5862,7 +6981,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5871,7 +6990,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5880,7 +6999,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5916,7 +7035,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5925,7 +7044,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5934,7 +7053,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5943,7 +7062,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5977,7 +7096,9 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6387,8 +7508,8 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- 0x0000,
- 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
0x0000,
},
@@ -6448,7 +7569,9 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6613,7 +7736,9 @@ static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6922,13 +8047,15 @@ static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6936,8 +8063,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_UDP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6945,8 +8072,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_SCTP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6954,8 +8081,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_ICMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6963,8 +8090,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_IGMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6972,8 +8099,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_ESP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6981,8 +8108,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_AH,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6990,8 +8117,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -7177,7 +8304,9 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7189,7 +8318,9 @@ static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7201,7 +8332,9 @@ static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7224,8 +8357,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -7233,8 +8366,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -7242,8 +8375,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -7251,8 +8384,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -7402,7 +8535,9 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7459,7 +8594,9 @@ static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu1_action_entries[] = {
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7511,6 +8648,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
+ NPC_S_KPU2_NGIO, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -7518,7 +8663,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
+ 4, 8, 12, 0, 0,
NPC_S_KPU2_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -7550,14 +8695,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -7590,6 +8727,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 2, 0,
+ NPC_S_KPU4_PPPOE, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LA, NPC_LT_LA_8023,
@@ -7707,15 +8852,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 20, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
@@ -7788,7 +8924,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 16, 2, 0,
+ 4, 8, 12, 2, 0,
NPC_S_KPU4_FDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -7897,15 +9033,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 28, 1,
- NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 30, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
@@ -8031,15 +9158,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
- | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 38, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
@@ -8075,6 +9193,326 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 56, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 56, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 54, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 54, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 60, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 60, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 58, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 58, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 0, 0, 1, 0,
+ NPC_S_KPU3_VLAN_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_L2_K1,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -8084,7 +9522,9 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu2_action_entries[] = {
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8159,6 +9599,22 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -8170,7 +9626,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8178,7 +9634,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8186,7 +9642,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8194,7 +9650,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_RARP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8202,7 +9658,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_PTP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8210,7 +9666,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_FCOE, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8218,7 +9674,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8226,7 +9682,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8234,7 +9690,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 1, 0,
NPC_S_KPU4_NSH, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8242,7 +9698,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
0, 0, 0, 0,
},
{
@@ -8250,7 +9706,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_CTAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8258,7 +9714,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_STAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_STAG,
0, 0, 0, 0,
},
{
@@ -8266,7 +9722,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8274,7 +9730,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8282,7 +9738,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8290,7 +9746,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_RARP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8298,7 +9754,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_PTP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8306,7 +9762,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_FCOE, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8314,7 +9770,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8322,7 +9778,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8330,7 +9786,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 1, 0,
NPC_S_KPU4_NSH, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8338,7 +9794,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_STAG, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
0, 0, 0, 0,
},
{
@@ -8346,7 +9802,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8354,7 +9810,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
0, 0, 0, 0,
},
{
@@ -8546,15 +10002,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 16, 20, 24, 0, 0,
- NPC_S_KPU3_ITAG, 14, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8562,7 +10010,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_STAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG,
0, 0, 0, 0,
},
{
@@ -8570,7 +10018,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_QINQ, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ,
0, 0, 0, 0,
},
{
@@ -8578,7 +10026,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8586,7 +10034,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8594,7 +10042,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8602,7 +10050,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_STAG, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
0, 0, 0, 0,
},
{
@@ -8610,7 +10058,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8618,7 +10066,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
0, 0, 0, 0,
},
{
@@ -8632,142 +10080,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -9078,6 +10390,14 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9087,11 +10407,13 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu3_action_entries[] = {
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 0,
+ NPC_S_KPU5_IP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9099,7 +10421,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 0,
+ NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9107,7 +10429,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 0,
+ NPC_S_KPU5_ARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9115,7 +10437,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 0,
+ NPC_S_KPU5_RARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9123,7 +10445,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 0,
+ NPC_S_KPU5_PTP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9131,7 +10453,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 0,
+ NPC_S_KPU5_FCOE, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9139,7 +10461,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9147,7 +10469,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9155,7 +10477,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 0,
+ NPC_S_KPU4_NSH, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9458,142 +10780,6 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
NPC_S_KPU5_IP, 4, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -10073,6 +11259,14 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU4_VLAN_EXDSA, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_VLAN_EXDSA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10082,7 +11276,9 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu4_action_entries[] = {
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10205,6 +11401,70 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
@@ -10212,6 +11472,78 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K4,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10221,7 +11553,9 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu5_action_entries[] = {
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10719,6 +12053,382 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10728,7 +12438,9 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu6_action_entries[] = {
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11026,6 +12738,294 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11035,7 +13035,9 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu7_action_entries[] = {
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11221,6 +13223,94 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11230,7 +13320,9 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu8_action_entries[] = {
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11889,7 +13981,9 @@ static const struct npc_kpu_profile_action kpu8_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu9_action_entries[] = {
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12252,10 +14346,10 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_UNK,
+ 0,
0, 0, 0, 0,
},
{
@@ -12308,7 +14402,9 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu10_action_entries[] = {
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12455,7 +14551,9 @@ static const struct npc_kpu_profile_action kpu10_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu11_action_entries[] = {
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12730,7 +14828,9 @@ static const struct npc_kpu_profile_action kpu11_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu12_action_entries[] = {
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12957,7 +15057,9 @@ static const struct npc_kpu_profile_action kpu12_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu13_action_entries[] = {
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12968,7 +15070,9 @@ static const struct npc_kpu_profile_action kpu13_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu14_action_entries[] = {
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12979,7 +15083,9 @@ static const struct npc_kpu_profile_action kpu14_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu15_action_entries[] = {
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -13158,7 +15264,9 @@ static const struct npc_kpu_profile_action kpu15_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu16_action_entries[] = {
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13209,7 +15317,7 @@ static const struct npc_kpu_profile_action kpu16_action_entries[] = {
},
};
-static const struct npc_kpu_profile npc_kpu_profiles[] = {
+static struct npc_kpu_profile npc_kpu_profiles[] = {
{
ARRAY_SIZE(kpu1_cam_entries),
ARRAY_SIZE(kpu1_action_entries),
@@ -13308,12 +15416,22 @@ static const struct npc_kpu_profile npc_kpu_profiles[] = {
},
};
-static const struct npc_lt_def_cfg npc_lt_defaults = {
+static struct npc_lt_def_cfg npc_lt_defaults = {
.rx_ol2 = {
.lid = NPC_LID_LA,
.ltype_match = NPC_LT_LA_ETHER,
.ltype_mask = 0x0F,
},
+ .ovlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_CTAG,
+ .ltype_mask = 0x0F,
+ },
+ .ivlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_STAG_QINQ,
+ .ltype_mask = 0x0F,
+ },
.rx_oip4 = {
.lid = NPC_LID_LC,
.ltype_match = NPC_LT_LC_IP,
@@ -13392,6 +15510,30 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
.ltype_match = NPC_LT_LG_TU_IP,
.ltype_mask = 0x0F,
},
+ .rx_apad0 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad1 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_et = {
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ },
};
static struct npc_mcam_kex npc_mkex_default = {
@@ -13399,7 +15541,7 @@ static struct npc_mcam_kex npc_mkex_default = {
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
- /* nibble: LA..LE (ltype only) + channel */
+ /* nibble: LA..LE (ltype only) + Error code + Channel */
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
@@ -13410,30 +15552,40 @@ static struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LA] = {
/* Layer A: Ethernet: */
[NPC_LT_LA_ETHER] = {
- /* DMAC: 6 bytes, KW1[47:0] */
+ /* DMAC: 6 bytes, KW1[55:8] */
KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_HIGIG2_ETHER] = {
+ /* Classification: 2 bytes, KW1[23:8] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* VID: 2 bytes, KW1[39:24] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0,
+ NPC_KEXOF_DMAC + 2),
},
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
- KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
+ /* CTAG VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
- /* Outer VLAN: 2 bytes, KW0[63:48] */
- KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
+ /* Outer VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x5),
},
[NPC_LT_LB_FDSA] = {
- /* SWITCH PORT: 1 byte, KW0[63:48] */
- KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4),
+ /* SWITCH PORT: 1 byte, KW0[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
},
[NPC_LID_LC] = {
@@ -13477,6 +15629,13 @@ static struct npc_mcam_kex npc_mkex_default = {
/* DMAC: 6 bytes, KW1[63:16] */
KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
},
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa),
+ },
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ab24a5e8ee8a..0b092949d7ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -57,6 +57,10 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static char *kpu_profile; /* KPU profile name */
+module_param(kpu_profile, charp, 0000);
+MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
+
static void rvu_setup_hw_capabilities(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -180,6 +184,14 @@ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
return (rsrc->max - used);
}
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return false;
+
+ return !test_bit(id, rsrc->bmap);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
{
rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
@@ -1754,6 +1766,48 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+ u16 target;
+
+ /* Only PF can add VF permissions */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ return -EOPNOTSUPP;
+
+ target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ if (req->flags & RESET_VF_PERM) {
+ pfvf->flags &= RVU_CLEAR_VF_PERM;
+ } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
+ (req->flags & VF_TRUSTED)) {
+ change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
+ /* disable multicast and promisc entries */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
+ if (blkaddr < 0)
+ return 0;
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ target, 0);
+ if (nixlf < 0)
+ return 0;
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_ALLMULTI_ENTRY,
+ false);
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_PROMISC_ENTRY,
+ false);
+ }
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -2842,6 +2896,8 @@ static void rvu_update_module_params(struct rvu *rvu)
strscpy(rvu->mkex_pfl_name,
mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+ strscpy(rvu->kpu_pfl_name,
+ kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
}
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c2cc4806d13c..9e5d9ba6f01e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -223,13 +223,17 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
- u8 pf_set_vf_cfg;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
- /* Broadcast pkt replication info */
+ /* Broadcast/Multicast/Promisc pkt replication info */
u16 bcast_mce_idx;
+ u16 mcast_mce_idx;
+ u16 promisc_mce_idx;
struct nix_mce_list bcast_mce_list;
+ struct nix_mce_list mcast_mce_list;
+ struct nix_mce_list promisc_mce_list;
+ bool use_mce_list;
struct rvu_npc_mcam_rule *def_ucast_rule;
@@ -239,8 +243,18 @@ struct rvu_pfvf {
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ unsigned long flags;
};
+enum rvu_pfvf_flags {
+ NIXLF_INITIALIZED = 0,
+ PF_SET_VF_MAC,
+ PF_SET_VF_CFG,
+ PF_SET_VF_TRUSTED,
+};
+
+#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -282,6 +296,13 @@ struct nix_txvlan {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
};
+struct nix_ipolicer {
+ struct rsrc_bmap band_prof;
+ u16 *pfvf_map;
+ u16 *match_id;
+ u16 *ref_count;
+};
+
struct nix_hw {
int blkaddr;
struct rvu *rvu;
@@ -291,6 +312,7 @@ struct nix_hw {
struct nix_mark_format mark_format;
struct nix_lso lso;
struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
};
/* RVU block's capabilities or functionality,
@@ -308,6 +330,7 @@ struct hw_cap {
bool nix_rx_multicast; /* Rx packet replication support */
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
+ bool ipolicer;
};
struct rvu_hwinfo {
@@ -386,6 +409,7 @@ struct npc_kpu_profile_adapter {
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
struct npc_mcam_kex *mkex;
+ bool custom;
size_t pkinds;
size_t kpus;
};
@@ -435,9 +459,13 @@ struct rvu {
struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
/* Firmware data */
struct rvu_fwdata *fwdata;
+ void *kpu_fwdata;
+ size_t kpu_fwdata_sz;
+ void __iomem *kpu_prfl_addr;
/* NPC KPU data */
struct npc_kpu_profile_adapter kpu;
@@ -543,11 +571,16 @@ static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
/* Function Prototypes
* RVU
*/
-static inline int is_afvf(u16 pcifunc)
+static inline bool is_afvf(u16 pcifunc)
{
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+static inline bool is_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
/* check if PF_FUNC is AF */
static inline bool is_pffunc_af(u16 pcifunc)
{
@@ -563,6 +596,7 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
@@ -603,6 +637,12 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
+static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
+{
+ return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+}
+
#define M(_name, _id, fn_name, req, rsp) \
int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
@@ -632,10 +672,22 @@ void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add);
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx);
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr);
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id);
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -646,13 +698,19 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, u8 chan_cnt,
- bool allmulti);
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+ int nixlf, u64 chan, u8 chan_cnt);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan);
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 9bf8eaabf9ab..3cc3c6fd1d84 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1632,6 +1632,165 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
+
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
+
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
+
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
+
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
+
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
+
static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
{
struct nix_hw *nix_hw;
@@ -1664,6 +1823,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
}
static void rvu_dbg_npa_init(struct rvu *rvu)
@@ -2132,6 +2295,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
struct rvu *rvu = s->private;
struct npc_mcam *mcam;
int pf, vf = -1;
+ bool enabled;
int blkaddr;
u16 target;
u64 hits;
@@ -2173,7 +2337,9 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
}
rvu_dbg_npc_mcam_show_action(s, iter);
- seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
+
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
+ seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
if (!iter->has_cntr)
continue;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0a8bd667cb11..d6f8210652c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -21,6 +21,16 @@
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add);
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr);
+static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc);
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -132,6 +142,22 @@ int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
return 0;
}
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr)
+{
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || *blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
+ if (!*nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+ return 0;
+}
+
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
@@ -274,7 +300,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->tx_chan_cnt = 1;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
- pfvf->rx_chan_cnt, false);
+ pfvf->rx_chan_cnt);
break;
}
@@ -285,16 +311,17 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->rx_chan_base, pfvf->mac_addr);
/* Add this PF_FUNC to bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to enable PF_FUNC 0x%x\n",
pcifunc);
return err;
}
-
+ /* Install MCAM rule matching Ethernet broadcast mac address */
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
+
pfvf->maxlen = NIC_HW_MIN_FRS;
pfvf->minlen = NIC_HW_MIN_FRS;
@@ -310,7 +337,7 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pfvf->minlen = 0;
/* Remove this PF_FUNC from bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to disable PF_FUNC 0x%x\n",
@@ -680,8 +707,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- /* Skip NIXLF check for broadcast MCE entry init */
- if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+ /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
+ * operations done by AF itself.
+ */
+ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
+ (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
if (!pfvf->nixlf || nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
}
@@ -721,6 +751,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rsp)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
+ case NIX_AQ_CTYPE_BANDPROF:
+ if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
+ nix_hw, pcifunc))
+ rc = NIX_AF_ERR_INVALID_BANDPROF;
+ break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
@@ -777,6 +812,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(mask, &req->prof_mask,
+ sizeof(struct nix_bandprof_s));
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
@@ -789,6 +827,8 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
@@ -866,6 +906,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(&rsp->prof, ctx,
+ sizeof(struct nix_bandprof_s));
}
}
@@ -2203,8 +2246,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
aq_req.op = op;
aq_req.qidx = mce;
- /* Forward bcast pkts to RQ0, RSS not needed */
- aq_req.mce.op = 0;
+ /* Use RSS with RSS index 0 */
+ aq_req.mce.op = 1;
aq_req.mce.index = 0;
aq_req.mce.eol = eol;
aq_req.mce.pf_func = pcifunc;
@@ -2222,8 +2265,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
-static int nix_update_mce_list(struct nix_mce_list *mce_list,
- u16 pcifunc, bool add)
+static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
+ u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
@@ -2234,6 +2277,9 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
if (mce->pcifunc == pcifunc && !add) {
delete = true;
break;
+ } else if (mce->pcifunc == pcifunc && add) {
+ /* entry already exists */
+ return 0;
}
tail = mce;
}
@@ -2261,36 +2307,23 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
}
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add)
{
- int err = 0, idx, next_idx, last_idx;
- struct nix_mce_list *mce_list;
+ int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
- struct rvu_pfvf *pfvf;
struct mce *mce;
- int blkaddr;
-
- /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
- if (is_afvf(pcifunc))
- return 0;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return 0;
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return 0;
-
- mcast = &nix_hw->mcast;
+ if (!mce_list)
+ return -EINVAL;
/* Get this PF/VF func's MCE index */
- pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
- idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+ idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
- mce_list = &pfvf->bcast_mce_list;
- if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+ if (idx > (mce_idx + mce_list->max)) {
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
@@ -2298,20 +2331,26 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL;
}
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast = &nix_hw->mcast;
mutex_lock(&mcast->mce_lock);
- err = nix_update_mce_list(mce_list, pcifunc, add);
+ err = nix_update_mce_list_entry(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
if (!mce_list->count) {
- rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
goto end;
}
/* Dump the updated list to HW */
- idx = pfvf->bcast_mce_idx;
+ idx = mce_idx;
last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
if (idx > last_idx)
@@ -2332,7 +2371,71 @@ end:
return err;
}
-static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+
+ if (!hw->cap.nix_rx_multicast ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ return;
+ }
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ if (type == NIXLF_BCAST_ENTRY) {
+ *mce_list = &pfvf->bcast_mce_list;
+ *mce_idx = pfvf->bcast_mce_idx;
+ } else if (type == NIXLF_ALLMULTI_ENTRY) {
+ *mce_list = &pfvf->mcast_mce_list;
+ *mce_idx = pfvf->mcast_mce_idx;
+ } else if (type == NIXLF_PROMISC_ENTRY) {
+ *mce_list = &pfvf->promisc_mce_list;
+ *mce_idx = pfvf->promisc_mce_idx;
+ } else {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ }
+}
+
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add)
+{
+ int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+
+ /* skip multicast pkt replication for AF's VFs */
+ if (is_afvf(pcifunc))
+ return 0;
+
+ if (!hw->cap.nix_rx_multicast)
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return -EINVAL;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ mcam_index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+ err = nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, mcam_index, add);
+ return err;
+}
+
+static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
{
struct nix_mcast *mcast = &nix_hw->mcast;
int err, pf, numvfs, idx;
@@ -2355,11 +2458,18 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
if (pfvf->nix_blkaddr != nix_hw->blkaddr)
continue;
- /* Save the start MCE */
+ /* save start idx of broadcast mce list */
pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
-
nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+ /* save start idx of multicast mce list */
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
+
+ /* save the start idx of promisc mce list */
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
+
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
pcifunc = (pf << RVU_PFVF_PF_SHIFT);
@@ -2375,6 +2485,22 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
pcifunc, 0, true);
if (err)
return err;
+
+ /* add dummy entries to multicast mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->mcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to promisc mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->promisc_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
}
}
return 0;
@@ -2421,7 +2547,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
mutex_init(&mcast->mce_lock);
- return nix_setup_bcast_tables(rvu, nix_hw);
+ return nix_setup_mce_tables(rvu, nix_hw);
}
static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
@@ -3035,15 +3161,22 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
pfvf = rvu_get_pfvf(rvu, pcifunc);
- /* VF can't overwrite admin(PF) changes */
- if (from_vf && pfvf->pf_set_vf_cfg)
+ /* untrusted VF can't overwrite admin(PF) changes */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
+ dev_warn(rvu->dev,
+ "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
return -EPERM;
+ }
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
+ if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
+ ether_addr_copy(pfvf->default_mac, req->mac_addr);
+
return 0;
}
@@ -3067,30 +3200,75 @@ int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
- bool allmulti = false, disable_promisc = false;
+ bool allmulti, promisc, nix_rx_multicast;
u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
+ int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
+ allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
+ pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
+
+ nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
+
+ if (is_vf(pcifunc) && !nix_rx_multicast &&
+ (promisc || allmulti)) {
+ dev_warn_ratelimited(rvu->dev,
+ "VF promisc/multicast not supported\n");
+ return 0;
+ }
+
+ /* untrusted VF can't configure promisc/allmulti */
+ if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (promisc || allmulti))
+ return 0;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (nix_rx_multicast) {
+ /* add/del this PF_FUNC to/from mcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
+ allmulti);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to multicast list\n",
+ pcifunc);
+ return err;
+ }
- if (req->mode & NIX_RX_MODE_PROMISC)
- allmulti = false;
- else if (req->mode & NIX_RX_MODE_ALLMULTI)
- allmulti = true;
- else
- disable_promisc = true;
+ /* add/del this PF_FUNC to/from promisc pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
+ promisc);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to promisc list\n",
+ pcifunc);
+ return err;
+ }
+ }
- if (disable_promisc)
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
- else
+ /* install/uninstall allmulti entry */
+ if (allmulti) {
+ rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
+ }
+
+ /* install/uninstall promisc entry */
+ if (promisc) {
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
- pfvf->rx_chan_cnt, allmulti);
+ pfvf->rx_chan_cnt);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+ }
+
return 0;
}
@@ -3470,6 +3648,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
if (err)
return err;
@@ -3523,6 +3705,40 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
(ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
ltdefs->rx_isctp.ltype_mask);
+ if (!is_rvu_otx2(rvu)) {
+ /* Enable APAD calculation for other protocols
+ * matching APAD0 and APAD1 lt def registers.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
+ (ltdefs->rx_apad0.valid << 11) |
+ (ltdefs->rx_apad0.lid << 8) |
+ (ltdefs->rx_apad0.ltype_match << 4) |
+ ltdefs->rx_apad0.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
+ (ltdefs->rx_apad1.valid << 11) |
+ (ltdefs->rx_apad1.lid << 8) |
+ (ltdefs->rx_apad1.ltype_match << 4) |
+ ltdefs->rx_apad1.ltype_mask);
+
+ /* Receive ethertype defination register defines layer
+ * information in NPC_RESULT_S to identify the Ethertype
+ * location in L2 header. Used for Ethertype overwriting
+ * in inline IPsec flow.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
+ (ltdefs->rx_et[0].offset << 12) |
+ (ltdefs->rx_et[0].valid << 11) |
+ (ltdefs->rx_et[0].lid << 8) |
+ (ltdefs->rx_et[0].ltype_match << 4) |
+ ltdefs->rx_et[0].ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
+ (ltdefs->rx_et[1].offset << 12) |
+ (ltdefs->rx_et[1].valid << 11) |
+ (ltdefs->rx_et[1].lid << 8) |
+ (ltdefs->rx_et[1].ltype_match << 4) |
+ ltdefs->rx_et[1].ltype_mask);
+ }
+
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
return err;
@@ -3584,6 +3800,8 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
kfree(txsch->schq.bmap);
}
+ nix_ipolicer_freemem(nix_hw);
+
vlan = &nix_hw->txvlan;
kfree(vlan->rsrc.bmap);
mutex_destroy(&vlan->rsrc_lock);
@@ -3614,6 +3832,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
@@ -3624,6 +3843,9 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
npc_mcam_enable_flows(rvu, pcifunc);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -3631,6 +3853,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
@@ -3639,6 +3862,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
@@ -3657,6 +3883,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
@@ -3681,6 +3909,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
}
nix_ctx_free(rvu, pfvf);
+
+ nix_free_all_bandprof(rvu, pcifunc);
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -3789,3 +4019,586 @@ void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
if (from_vf)
ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
}
+
+/* NIX ingress policers or bandwidth profiles APIs */
+static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
+{
+ struct npc_lt_def_cfg defs, *ltdefs;
+
+ ltdefs = &defs;
+ memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
+
+ /* Extract PCP and DEI fields from outer VLAN from byte offset
+ * 2 from the start of LB_PTR (ie TAG).
+ * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
+ * fields are considered when 'Tunnel enable' is set in profile.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
+ (2UL << 12) | (ltdefs->ovlan.lid << 8) |
+ (ltdefs->ovlan.ltype_match << 4) |
+ ltdefs->ovlan.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
+ (2UL << 12) | (ltdefs->ivlan.lid << 8) |
+ (ltdefs->ivlan.ltype_match << 4) |
+ ltdefs->ivlan.ltype_mask);
+
+ /* DSCP field in outer and tunneled IPv4 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+
+ /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+}
+
+static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
+ int layer, int prof_idx)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ int rc;
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+
+ aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_INIT;
+
+ /* Context is all zeros, submit to AQ */
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc)
+ dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
+ layer, prof_idx);
+ return rc;
+}
+
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_ipolicer *ipolicer;
+ int err, layer, prof_idx;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ if (!(cfg & BIT_ULL(61))) {
+ hw->cap.ipolicer = false;
+ return 0;
+ }
+
+ hw->cap.ipolicer = true;
+ nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
+ sizeof(*ipolicer), GFP_KERNEL);
+ if (!nix_hw->ipolicer)
+ return -ENOMEM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+ switch (layer) {
+ case BAND_PROF_LEAF_LAYER:
+ ipolicer->band_prof.max = cfg & 0XFFFF;
+ break;
+ case BAND_PROF_MID_LAYER:
+ ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
+ break;
+ case BAND_PROF_TOP_LAYER:
+ ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
+ break;
+ }
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ err = rvu_alloc_bitmap(&ipolicer->band_prof);
+ if (err)
+ return err;
+
+ ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->pfvf_map)
+ return -ENOMEM;
+
+ ipolicer->match_id = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->match_id)
+ return -ENOMEM;
+
+ for (prof_idx = 0;
+ prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ /* Set AF as current owner for INIT ops to succeed */
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+
+ /* There is no enable bit in the profile context,
+ * so no context disable. So let's INIT them here
+ * so that PF/VF later on have to just do WRITE to
+ * setup policer rates and config.
+ */
+ err = nix_init_policer_context(rvu, nix_hw,
+ layer, prof_idx);
+ if (err)
+ return err;
+ }
+
+ /* Allocate memory for maintaining ref_counts for MID level
+ * profiles, this will be needed for leaf layer profiles'
+ * aggregation.
+ */
+ if (layer != BAND_PROF_MID_LAYER)
+ continue;
+
+ ipolicer->ref_count = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ }
+
+ /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
+ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
+
+ nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
+
+ return 0;
+}
+
+static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ kfree(ipolicer->band_prof.bmap);
+ }
+}
+
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer, hi_layer, prof_idx;
+
+ /* Bits [15:14] in profile index represent layer */
+ layer = (req->qidx >> 14) & 0x03;
+ prof_idx = req->qidx & 0x3FFF;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ if (prof_idx >= ipolicer->band_prof.max)
+ return -EINVAL;
+
+ /* Check if the profile is allocated to the requesting PCIFUNC or not
+ * with the exception of AF. AF is allowed to read and update contexts.
+ */
+ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ /* If this profile is linked to higher layer profile then check
+ * if that profile is also allocated to the requesting PCIFUNC
+ * or not.
+ */
+ if (!req->prof.hl_en)
+ return 0;
+
+ /* Leaf layer profile can link only to mid layer and
+ * mid layer to top layer.
+ */
+ if (layer == BAND_PROF_LEAF_LAYER)
+ hi_layer = BAND_PROF_MID_LAYER;
+ else if (layer == BAND_PROF_MID_LAYER)
+ hi_layer = BAND_PROF_TOP_LAYER;
+ else
+ return -EINVAL;
+
+ ipolicer = &nix_hw->ipolicer[hi_layer];
+ prof_idx = req->prof.band_prof_id;
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
+ struct nix_bandprof_alloc_req *req,
+ struct nix_bandprof_alloc_rsp *rsp)
+{
+ int blkaddr, layer, prof, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+
+ prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (prof < 0)
+ break;
+ rsp->prof_count[layer]++;
+ rsp->prof_idx[layer][idx] = prof;
+ ipolicer->pfvf_map[prof] = pcifunc;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, layer, prof_idx, err;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the profiles allocated to the PCIFUNC */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ if (ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ struct nix_bandprof_free_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, layer, prof_idx, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (req->free_all)
+ return nix_free_all_bandprof(rvu, pcifunc);
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free the requested profile indices */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = pcifunc;
+ aq_req->ctype = ctype;
+ aq_req->op = NIX_AQ_INSTOP_READ;
+ aq_req->qidx = qidx;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u32 leaf_prof, u16 mid_prof)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = 0x00;
+ aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req->op = NIX_AQ_INSTOP_WRITE;
+ aq_req->qidx = leaf_prof;
+
+ aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.hl_en = 1;
+ aq_req->prof_mask.hl_en = 1;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id)
+{
+ int leaf_prof, mid_prof, leaf_match;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, rc;
+
+ if (!rvu->hw->cap.ipolicer)
+ return 0;
+
+ rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (rc)
+ return rc;
+
+ /* Fetch the RQ's context to see if policing is enabled */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
+ NIX_AQ_CTYPE_RQ, rq_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
+ __func__, rq_idx, pcifunc);
+ return rc;
+ }
+
+ if (!aq_rsp.rq.policer_ena)
+ return 0;
+
+ /* Get the bandwidth profile ID mapped to this RQ */
+ leaf_prof = aq_rsp.rq.band_prof_id;
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
+ ipolicer->match_id[leaf_prof] = match_id;
+
+ /* Check if any other leaf profile is marked with same match_id */
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (idx == leaf_prof)
+ continue;
+ if (ipolicer->match_id[idx] != match_id)
+ continue;
+
+ leaf_match = idx;
+ break;
+ }
+
+ if (idx == ipolicer->band_prof.max)
+ return 0;
+
+ /* Fetch the matching profile's context to check if it's already
+ * mapped to a mid level profile.
+ */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_match);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_match);
+ return rc;
+ }
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ if (aq_rsp.prof.hl_en) {
+ /* Get Mid layer prof index and map leaf_prof index
+ * also such that flows that are being steered
+ * to different RQs and marked with same match_id
+ * are rate limited in a aggregate fashion
+ */
+ mid_prof = aq_rsp.prof.band_prof_id;
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+
+ /* Allocate a mid layer profile and
+ * map both 'leaf_prof' and 'leaf_match' profiles to it.
+ */
+ mutex_lock(&rvu->rsrc_lock);
+ mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (mid_prof < 0) {
+ dev_err(rvu->dev,
+ "%s: Unable to allocate mid layer profile\n", __func__);
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ ipolicer->ref_count[mid_prof] = 0;
+
+ /* Initialize mid layer profile same as 'leaf_prof' */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ goto exit;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req.hdr.pcifunc = 0x00;
+ aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+ memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ /* Clear higher layer enable bit in the mid profile, just in case */
+ aq_req.prof.hl_en = 0;
+ aq_req.prof_mask.hl_en = 1;
+
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to INIT context of mid layer profile %d\n",
+ __func__, mid_prof);
+ goto exit;
+ }
+
+ /* Map both leaf profiles to this mid layer profile */
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_match, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_match, mid_prof);
+ ipolicer->ref_count[mid_prof]--;
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+exit:
+ return rc;
+}
+
+/* Called with mutex rsrc_lock */
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ u16 mid_prof;
+ int rc;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ return;
+ }
+
+ if (!aq_rsp.prof.hl_en)
+ return;
+
+ mid_prof = aq_rsp.prof.band_prof_id;
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ ipolicer->ref_count[mid_prof]--;
+ /* If ref_count is zero, free mid layer profile */
+ if (!ipolicer->ref_count[mid_prof]) {
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 0bc4529691ec..3612e0a2cab3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -19,7 +19,7 @@
#include "cgx.h"
#include "npc_profile.h"
-#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
@@ -27,6 +27,8 @@
#define NPC_KEX_CHAN_MASK 0xFFFULL
#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
+#define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8))
+
static const char def_pfl_name[] = "default";
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
@@ -212,8 +214,10 @@ int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
*/
if (type == NIXLF_BCAST_ENTRY)
return index;
- else if (type == NIXLF_PROMISC_ENTRY)
+ else if (type == NIXLF_ALLMULTI_ENTRY)
return index + 1;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 2;
}
return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
@@ -411,37 +415,49 @@ static void npc_fill_entryword(struct mcam_entry *entry, int idx,
}
}
-static void npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index,
- struct mcam_entry *entry)
+static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pf_func)
+{
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+ nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, struct mcam_entry *entry,
+ bool *enable)
{
u16 owner, target_func;
struct rvu_pfvf *pfvf;
- int bank, nixlf;
u64 rx_action;
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
- /* return incase target is PF or LBK or rule owner is not PF */
+ /* do nothing when target is LBK/PF or owner is not PF */
if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
+ /* save entry2target_pffunc */
pfvf = rvu_get_pfvf(rvu, target_func);
mcam->entry2target_pffunc[index] = target_func;
- /* return if nixlf is not attached or initialized */
- if (!is_nixlf_attached(rvu, target_func) || !pfvf->def_ucast_rule)
- return;
- /* get VF ucast entry rule */
- nix_get_nixlf(rvu, target_func, &nixlf, NULL);
- index = npc_get_nixlf_mcam_index(mcam, target_func,
- nixlf, NIXLF_UCAST_ENTRY);
- bank = npc_get_bank(mcam, index);
- index &= (mcam->banksize - 1);
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target_func) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ *enable = false;
- rx_action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
if (rx_action)
entry->action = rx_action;
}
@@ -493,10 +509,9 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
}
- /* copy VF default entry action to the VF mcam entry */
+ /* PF installing VF rule */
if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_get_default_entry_action(rvu, mcam, blkaddr, actindex,
- entry);
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -647,30 +662,32 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, u8 chan_cnt,
- bool allmulti)
+ int nixlf, u64 chan, u8 chan_cnt)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_install_flow_req req = { 0 };
struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
- u8 mac_addr[ETH_ALEN] = { 0 };
struct nix_rx_action action;
u64 relaxed_mask;
- /* Only PF or AF VF can add a promiscuous entry */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
+ if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- *(u64 *)&action = 0x00;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
+ if (is_cgx_vf(rvu, pcifunc))
+ index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
/* If the corresponding PF's ucast action is RSS,
* use the same action for promisc also
*/
@@ -678,19 +695,20 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
- blkaddr, ucast_idx);
+ blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
}
- if (allmulti) {
- mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
- ether_addr_copy(req.packet.dmac, mac_addr);
- ether_addr_copy(req.mask.dmac, mac_addr);
- req.features = BIT_ULL(NPC_DMAC);
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ action.index = pfvf->promisc_mce_idx;
}
req.chan_mask = 0xFFFU;
@@ -718,8 +736,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, bool enable)
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -728,25 +746,14 @@ static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
- /* Only PF's have a promiscuous entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
- return;
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
-}
-
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
-}
-
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan)
{
@@ -756,8 +763,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, index;
- u32 req_index = 0;
- u8 op;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -770,7 +775,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* If pkt replication is not supported,
* then only PF is allowed to add a bcast match entry.
*/
- if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
+ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc))
return;
/* Get 'pcifunc' of PF device */
@@ -784,10 +789,10 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
* so install entry with UCAST action, so that PF
* receives all broadcast packets.
*/
- op = NIX_RX_ACTIONOP_UCAST;
+ req.op = NIX_RX_ACTIONOP_UCAST;
} else {
- op = NIX_RX_ACTIONOP_MCAST;
- req_index = pfvf->bcast_mce_idx;
+ req.op = NIX_RX_ACTIONOP_MCAST;
+ req.index = pfvf->bcast_mce_idx;
}
eth_broadcast_addr((u8 *)&req.packet.dmac);
@@ -796,15 +801,14 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
req.channel = chan;
req.intf = pfvf->nix_rx_intf;
req.entry = index;
- req.op = op;
req.hdr.pcifunc = 0; /* AF is requester */
req.vf = pcifunc;
- req.index = req_index;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -816,7 +820,104 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action;
+ struct rvu_pfvf *pfvf;
+ u16 vf_func;
+
+ /* Only CGX PF/VF can add allmulticast entry */
+ if (is_afvf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for multicast entry also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ action.index = pfvf->mcast_mce_idx;
+ }
+
+ mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_addr);
+ req.features = BIT_ULL(NPC_DMAC);
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu))
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ else
+ req.chan_mask = 0xFFFU;
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc | vf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
@@ -858,6 +959,7 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
struct rvu_pfvf *pfvf;
@@ -913,7 +1015,8 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
- if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
+ is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
@@ -923,12 +1026,47 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
}
}
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int index, blkaddr, mce_idx;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+
+ /* disable MCAM entry when packet replication is not supported by hw */
+ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ return;
+ }
+
+ /* return incase mce list is not enabled */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ if (hw->cap.nix_rx_multicast && is_vf(pcifunc) &&
+ type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list)
+ return;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, index, enable);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct nix_rx_action action;
- int index, bank, blkaddr;
+ int index, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -939,48 +1077,33 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
- /* For PF, ena/dis promisc and bcast MCAM match entries.
- * For VFs add/delete from bcast list when RX multicast
- * feature is present.
+ /* Nothing to do for VFs, on platforms where pkt replication
+ * is not supported
*/
- if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast)
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast)
return;
- /* For bcast, enable/disable only if it's action is not
- * packet replication, incase if action is replication
- * then this PF/VF's nixlf is removed from bcast replication
- * list.
- */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
- nixlf, NIXLF_BCAST_ENTRY);
- bank = npc_get_bank(mcam, index);
- *(u64 *)&action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
-
- /* VFs will not have BCAST entry */
- if (action.op != NIX_RX_ACTIONOP_MCAST &&
- !(pcifunc & RVU_PFVF_FUNC_MASK)) {
- npc_enable_mcam_entry(rvu, mcam,
- blkaddr, index, enable);
- } else {
- nix_update_bcast_mce_list(rvu, pcifunc, enable);
- /* Enable PF's BCAST entry for packet replication */
- rvu_npc_enable_bcast_entry(rvu, pcifunc, enable);
- }
-
- if (enable)
- rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
- else
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ /* add/delete pf_func to broadcast MCE list */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY, enable);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+
+ /* Delete multicast and promisc MCAM entries */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY, false);
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_PROMISC_ENTRY, false);
}
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ /* Enables only broadcast match entry. Promisc/Allmulti are enabled
+ * in set_rx_mode mbox handler.
+ */
npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
}
@@ -1000,7 +1123,8 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
/* Disable MCAM entries directing traffic to this 'pcifunc' */
list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
if (is_npc_intf_rx(rule->intf) &&
- rule->rx_action.pf_func == pcifunc) {
+ rule->rx_action.pf_func == pcifunc &&
+ rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) {
npc_enable_mcam_entry(rvu, mcam, blkaddr,
rule->entry, false);
rule->enable = false;
@@ -1134,6 +1258,30 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
}
}
+static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
+ u64 *size)
+{
+ u64 prfl_addr, prfl_sz;
+
+ if (!rvu->fwdata)
+ return -EINVAL;
+
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
+
+ if (!prfl_addr || !prfl_sz)
+ return -EINVAL;
+
+ *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!(*prfl_img_addr))
+ return -ENOMEM;
+
+ *size = prfl_sz;
+
+ return 0;
+}
+
+/* strtoull of "mkexprof" with base:36 */
#define MKEX_END_SIGN 0xdeadbeef
static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
@@ -1141,26 +1289,21 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
{
struct device *dev = &rvu->pdev->dev;
struct npc_mcam_kex *mcam_kex;
- void *mkex_prfl_addr = NULL;
- u64 prfl_addr, prfl_sz;
+ void __iomem *mkex_prfl_addr = NULL;
+ u64 prfl_sz;
+ int ret;
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
- goto program_mkex;
-
- if (!rvu->fwdata)
- goto program_mkex;
- prfl_addr = rvu->fwdata->mcam_addr;
- prfl_sz = rvu->fwdata->mcam_sz;
-
- if (!prfl_addr || !prfl_sz)
+ if (rvu->kpu_fwdata_sz ||
+ !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
goto program_mkex;
- mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC);
- if (!mkex_prfl_addr)
+ /* Setting up the mapping for mkex profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz);
+ if (ret < 0)
goto program_mkex;
- mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+ mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr;
while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
/* Compare with mkex mod_param name string */
@@ -1186,7 +1329,7 @@ program_mkex:
/* Program selected mkex profile */
npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
if (mkex_prfl_addr)
- memunmap(mkex_prfl_addr);
+ iounmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
@@ -1263,6 +1406,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
const struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
+ u64 entry_mask;
if (profile->cam_entries != profile->action_entries) {
dev_err(rvu->dev,
@@ -1286,8 +1430,12 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
/* Enable all programmed entries */
num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ entry_mask = enable_mask(num_entries);
+ /* Disable first KPU_MAX_CST_ENT entries for built-in profile */
+ if (!rvu->kpu.custom)
+ entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0);
rvu_write64(rvu, blkaddr,
- NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask);
if (num_entries > 64) {
rvu_write64(rvu, blkaddr,
NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
@@ -1300,6 +1448,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
{
+ profile->custom = 0;
profile->name = def_pfl_name;
profile->version = NPC_KPU_PROFILE_VER;
profile->ikpu = ikpu_action_entries;
@@ -1312,10 +1461,245 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
return 0;
}
+static int npc_apply_custom_kpu(struct rvu *rvu,
+ struct npc_kpu_profile_adapter *profile)
+{
+ size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0;
+ struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
+ struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_fwdata *fw_kpu;
+ int entries;
+ u16 kpu, entry;
+
+ if (rvu->kpu_fwdata_sz < hdr_sz) {
+ dev_warn(rvu->dev, "Invalid KPU profile size\n");
+ return -EINVAL;
+ }
+ if (le64_to_cpu(fw->signature) != KPU_SIGN) {
+ dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
+ fw->signature);
+ return -EINVAL;
+ }
+ /* Verify if the using known profile structure */
+ if (NPC_KPU_VER_MAJ(profile->version) >
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile is aligned with the required kernel changes */
+ if (NPC_KPU_VER_MIN(profile->version) <
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev,
+ "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile fits the HW */
+ if (fw->kpus > profile->kpus) {
+ dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
+ profile->kpus);
+ return -EINVAL;
+ }
+
+ profile->custom = 1;
+ profile->name = fw->name;
+ profile->version = le64_to_cpu(fw->version);
+ profile->mkex = &fw->mkex;
+ profile->lt_def = &fw->lt_def;
+
+ for (kpu = 0; kpu < fw->kpus; kpu++) {
+ fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
+ if (fw_kpu->entries > KPU_MAX_CST_ENT)
+ dev_warn(rvu->dev,
+ "Too many custom entries on KPU%d: %d > %d\n",
+ kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
+ entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
+ cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
+ offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
+ action = (struct npc_kpu_profile_action *)(fw->data + offset);
+ offset += fw_kpu->entries * sizeof(*action);
+ if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
+ dev_warn(rvu->dev,
+ "Profile size mismatch on KPU%i parsing.\n",
+ kpu + 1);
+ return -EINVAL;
+ }
+ for (entry = 0; entry < entries; entry++) {
+ profile->kpu[kpu].cam[entry] = cam[entry];
+ profile->kpu[kpu].action[entry] = action[entry];
+ }
+ }
+
+ return 0;
+}
+
+static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr,
+ u64 prfl_sz, const char *kpu_profile)
+{
+ struct npc_kpu_profile_fwdata *kpu_data = NULL;
+ int rc = -EINVAL;
+
+ kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr;
+ if (le64_to_cpu(kpu_data->signature) == KPU_SIGN &&
+ !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kpu_data;
+ rvu->kpu_fwdata_sz = prfl_sz;
+ rvu->kpu_prfl_addr = prfl_addr;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ const char *kpu_profile)
+{
+ struct npc_coalesced_kpu_prfl *img_data = NULL;
+ int i = 0, rc = -EINVAL;
+ void __iomem *kpu_prfl_addr;
+ u16 offset;
+
+ img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+ !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) {
+ /* Loaded profile is a single KPU profile. */
+ rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr,
+ prfl_sz, kpu_profile);
+ goto done;
+ }
+
+ /* Loaded profile is coalesced image, offset of first KPU profile.*/
+ offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) +
+ (img_data->num_prfl * sizeof(uint16_t));
+ /* Check if mapped image is coalesced image. */
+ while (i < img_data->num_prfl) {
+ /* Profile image offsets are rounded up to next 8 multiple.*/
+ offset = ALIGN_8B_CEIL(offset);
+ kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr +
+ offset);
+ rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr,
+ img_data->prfl_sz[i], kpu_profile);
+ if (!rc)
+ break;
+ /* Calculating offset of profile image based on profile size.*/
+ offset += img_data->prfl_sz[i];
+ i++;
+ }
+done:
+ return rc;
+}
+
+static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile)
+{
+ int ret = -EINVAL;
+ u64 prfl_sz;
+
+ /* Setting up the mapping for NPC profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto done;
+
+ /* Detect if profile is coalesced or single KPU profile and load */
+ ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile);
+ if (ret == 0)
+ goto done;
+
+ /* Cleaning up if KPU profile image from fwdata is not valid. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ rvu->kpu_fwdata = NULL;
+ }
+
+done:
+ return ret;
+}
+
static void npc_load_kpu_profile(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+ const char *kpu_profile = rvu->kpu_pfl_name;
+ const struct firmware *fw = NULL;
+ bool retry_fwdb = false;
+
+ /* If user not specified profile customization */
+ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN))
+ goto revert_to_default;
+ /* First prepare default KPU, then we'll customize top entries. */
+ npc_prepare_default_kpu(profile);
+ /* Order of preceedence for load loading NPC profile (high to low)
+ * Firmware binary in filesystem.
+ * Firmware database method.
+ * Default KPU profile.
+ */
+ if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
+ if (rvu->kpu_fwdata) {
+ memcpy(rvu->kpu_fwdata, fw->data, fw->size);
+ rvu->kpu_fwdata_sz = fw->size;
+ }
+ release_firmware(fw);
+ retry_fwdb = true;
+ goto program_kpu;
+ }
+
+load_image_fwdb:
+ /* Loading the KPU profile using firmware database */
+ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile))
+ goto revert_to_default;
+
+program_kpu:
+ /* Apply profile customization if firmware was loaded. */
+ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) {
+ /* If image from firmware filesystem fails to load or invalid
+ * retry with firmware database method.
+ */
+ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) {
+ /* Loading image from firmware database failed. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ } else {
+ kfree(rvu->kpu_fwdata);
+ }
+ rvu->kpu_fwdata = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ if (retry_fwdb) {
+ retry_fwdb = false;
+ goto load_image_fwdb;
+ }
+ }
+
+ dev_warn(rvu->dev,
+ "Can't load KPU profile %s. Using default.\n",
+ kpu_profile);
+ kfree(rvu->kpu_fwdata);
+ rvu->kpu_fwdata = NULL;
+ goto revert_to_default;
+ }
+
+ dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n",
+ profile->name, NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version));
+
+ return;
+
+revert_to_default:
npc_prepare_default_kpu(profile);
}
@@ -1654,6 +2038,10 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
kfree(mcam->counters.bmap);
+ if (rvu->kpu_prfl_addr)
+ iounmap(rvu->kpu_prfl_addr);
+ else
+ kfree(rvu->kpu_fwdata);
mutex_destroy(&mcam->lock);
}
@@ -2149,8 +2537,11 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->free_count = 0;
/* Check if ref_entry is within range */
- if (req->priority && req->ref_entry >= mcam->bmap_entries)
+ if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+ dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+ __func__, req->ref_entry);
return NPC_MCAM_INVALID_REQ;
+ }
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
@@ -2163,8 +2554,12 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
/* Since list of allocated indices needs to be sent to requester,
* max number of non-contiguous entries per mbox msg is limited.
*/
- if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) {
+ dev_err(rvu->dev,
+ "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n",
+ __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES);
return NPC_MCAM_INVALID_REQ;
+ }
/* Alloc request from PFFUNC with no NIXLF attached should be denied */
if (!is_nixlf_attached(rvu, pcifunc))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 7f35b62eea13..68633145a8b8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -123,11 +123,8 @@ static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
static bool npc_is_same(struct npc_key_field *input,
struct npc_key_field *field)
{
- int ret;
-
- ret = memcmp(&input->layer_mdata, &field->layer_mdata,
- sizeof(struct npc_layer_mdata));
- return ret == 0;
+ return memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata)) == 0;
}
static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
@@ -1103,11 +1100,18 @@ find_rule:
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
}
- if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
+ req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
rule->vfvlan_cfg = true;
+ if (is_npc_intf_rx(req->intf) && req->match_id &&
+ (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
+ return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
+ req->index, req->match_id);
+
return 0;
}
@@ -1167,7 +1171,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
/* PF installing for its VF */
if (req->hdr.pcifunc && !from_vf && req->vf)
- pfvf->pf_set_vf_cfg = 1;
+ set_bit(PF_SET_VF_CFG, &pfvf->flags);
/* update req destination mac addr */
if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
@@ -1177,9 +1181,12 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
}
err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+ if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
+ return -EINVAL;
- /* If interface is uninitialized then do not enable entry */
- if (err || (!req->default_rule && !pfvf->def_ucast_rule))
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
enable = false;
/* Packets reaching NPC in Tx path implies that a
@@ -1193,6 +1200,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (from_vf && !enable)
return -EINVAL;
+ /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
+ if (pf_set_vfs_mac && !enable) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ return 0;
+ }
+
/* If message is from VF then its flow should not overlap with
* reserved unicast flow.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index ac71c0f2f960..76837d5e19c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -171,6 +171,7 @@
#define NIX_AF_SQ_CONST (0x0040)
#define NIX_AF_CQ_CONST (0x0048)
#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PL_CONST (0x0058)
#define NIX_AF_PSE_CONST (0x0060)
#define NIX_AF_TL1_CONST (0x0070)
#define NIX_AF_TL2_CONST (0x0078)
@@ -181,6 +182,7 @@
#define NIX_AF_LSO_CFG (0x00A8)
#define NIX_AF_BLK_RST (0x00B0)
#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_PL_TS (0x00C8)
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
@@ -208,19 +210,27 @@
#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
#define NIX_AF_TCP_TIMER (0x01E0)
-#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_ET(a) (0x01F0ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_DEF_OL2 (0x0200)
#define NIX_AF_RX_DEF_OIP4 (0x0210)
#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
#define NIX_AF_RX_DEF_IIP6 (0x0240)
#define NIX_AF_RX_DEF_OTCP (0x0250)
#define NIX_AF_RX_DEF_ITCP (0x0260)
#define NIX_AF_RX_DEF_OUDP (0x0270)
#define NIX_AF_RX_DEF_IUDP (0x0280)
#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_CST_APAD0 (0x0298)
#define NIX_AF_RX_DEF_ISCTP (0x02A0)
#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
+#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
+#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
+#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
+#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 5e5f45c7eab0..14aa8e37ea41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -286,7 +286,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
- NIX_AQ_CTYPE_BAND_PROF = 0x6,
+ NIX_AQ_CTYPE_BANDPROF = 0x6,
};
/* NIX admin queue instruction opcodes */
@@ -665,6 +665,89 @@ struct nix_rx_mce_s {
uint64_t next : 16;
};
+enum nix_band_prof_layers {
+ BAND_PROF_LEAF_LAYER = 0,
+ BAND_PROF_INVAL_LAYER = 1,
+ BAND_PROF_MID_LAYER = 2,
+ BAND_PROF_TOP_LAYER = 3,
+ BAND_PROF_NUM_LAYERS = 4,
+};
+
+enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
+ NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
+ NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
+ NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
+};
+
+enum nix_band_prof_pc_mode {
+ NIX_RX_PC_MODE_VLAN = 0,
+ NIX_RX_PC_MODE_DSCP = 1,
+ NIX_RX_PC_MODE_GEN = 2,
+ NIX_RX_PC_MODE_RSVD = 3,
+};
+
+/* NIX ingress policer bandwidth profile structure */
+struct nix_bandprof_s {
+ uint64_t pc_mode : 2; /* W0 */
+ uint64_t icolor : 2;
+ uint64_t tnl_ena : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t peir_exponent : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pebs_exponent : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t cir_exponent : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t cbs_exponent : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t peir_mantissa : 8;
+ uint64_t pebs_mantissa : 8;
+ uint64_t cir_mantissa : 8;
+ uint64_t cbs_mantissa : 8; /* W1 */
+ uint64_t lmode : 1;
+ uint64_t l_sellect : 3;
+ uint64_t rdiv : 4;
+ uint64_t adjust_exponent : 5;
+ uint64_t reserved_85_86 : 2;
+ uint64_t adjust_mantissa : 9;
+ uint64_t gc_action : 2;
+ uint64_t yc_action : 2;
+ uint64_t rc_action : 2;
+ uint64_t meter_algo : 2;
+ uint64_t band_prof_id : 7;
+ uint64_t reserved_111_118 : 8;
+ uint64_t hl_en : 1;
+ uint64_t reserved_120_127 : 8;
+ uint64_t ts : 48; /* W2 */
+ uint64_t reserved_176_191 : 16;
+ uint64_t pe_accum : 32; /* W3 */
+ uint64_t c_accum : 32;
+ uint64_t green_pkt_pass : 48; /* W4 */
+ uint64_t reserved_304_319 : 16;
+ uint64_t yellow_pkt_pass : 48; /* W5 */
+ uint64_t reserved_368_383 : 16;
+ uint64_t red_pkt_pass : 48; /* W6 */
+ uint64_t reserved_432_447 : 16;
+ uint64_t green_octs_pass : 48; /* W7 */
+ uint64_t reserved_496_511 : 16;
+ uint64_t yellow_octs_pass : 48; /* W8 */
+ uint64_t reserved_560_575 : 16;
+ uint64_t red_octs_pass : 48; /* W9 */
+ uint64_t reserved_624_639 : 16;
+ uint64_t green_pkt_drop : 48; /* W10 */
+ uint64_t reserved_688_703 : 16;
+ uint64_t yellow_pkt_drop : 48; /* W11 */
+ uint64_t reserved_752_767 : 16;
+ uint64_t red_pkt_drop : 48; /* W12 */
+ uint64_t reserved_816_831 : 16;
+ uint64_t green_octs_drop : 48; /* W13 */
+ uint64_t reserved_880_895 : 16;
+ uint64_t yellow_octs_drop : 48; /* W14 */
+ uint64_t reserved_944_959 : 16;
+ uint64_t red_octs_drop : 48; /* W15 */
+ uint64_t reserved_1008_1023 : 16;
+};
+
enum nix_lsoalg {
NIX_LSOALG_NOP,
NIX_LSOALG_ADD_SEGNUM,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 9ec0313f13fc..1b08896b46d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -179,3 +179,326 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
sq->head++;
sq->head &= (sq->sqe_cnt - 1);
}
+
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
+{
+ struct nix_bandprof_free_req *req;
+ int rc;
+
+ if (is_dev_otx2(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Free all bandwidth profiles allocated */
+ req->free_all = true;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
+{
+ struct nix_bandprof_alloc_req *req;
+ struct nix_bandprof_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct nix_bandprof_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
+ rc = -EIO;
+ goto out;
+ }
+
+ *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
+out:
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to allocate ingress bandwidth policer\n");
+ }
+
+ return rc;
+}
+
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int ret;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return ret;
+}
+
+#define POLICER_TIMESTAMP 1 /* 1 second */
+#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */
+
+static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ int tmp;
+
+ /* Burst is calculated as
+ * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
+ * This is the upper limit on number tokens (bytes) that
+ * can be accumulated in the bucket.
+ */
+ *burst_exp = ilog2(burst);
+ if (burst < 256) {
+ /* No float: can't express mantissa in this case */
+ *burst_mantissa = 0;
+ return;
+ }
+
+ if (*burst_exp > MAX_RATE_EXP)
+ *burst_exp = MAX_RATE_EXP;
+
+ /* Calculate mantissa
+ * Find remaining bytes 'burst - 2^burst_exp'
+ * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
+ */
+ tmp = burst - rounddown_pow_of_two(burst);
+ *burst_mantissa = tmp / (1UL << (*burst_exp - 8));
+}
+
+static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
+ u32 *rate_mantissa, u32 *rdiv)
+{
+ u32 div = 0;
+ u32 exp = 0;
+ u64 tmp;
+
+ /* Figure out mantissa, exponent and divider from given max pkt rate
+ *
+ * To achieve desired rate HW adds
+ * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
+ * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
+ * Here policer timeunit is 2 usecs and rate is in bits per sec.
+ * Since floating point cannot be used below algorithm uses 1000000
+ * scale factor to support rates upto 100Gbps.
+ */
+ tmp = rate * 32 * 2;
+ if (tmp < 256000000) {
+ while (tmp < 256000000) {
+ tmp = tmp * 2;
+ div++;
+ }
+ } else {
+ for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
+ tmp = tmp / 2;
+
+ if (exp > MAX_RATE_EXP)
+ exp = MAX_RATE_EXP;
+ }
+
+ *rate_mantissa = (tmp - 256000000) / 1000000;
+ *rate_exp = exp;
+ *rdiv = div;
+}
+
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Enable policing and set the bandwidth profile (policer) index */
+ if (map)
+ aq->rq.policer_ena = 1;
+ else
+ aq->rq.policer_ena = 0;
+ aq->rq_mask.policer_ena = 1;
+
+ aq->rq.band_prof_id = policer;
+ aq->rq_mask.band_prof_id = GENMASK(9, 0);
+
+ /* Fill AQ info */
+ aq->qidx = rq_idx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
+{
+ struct nix_bandprof_free_req *req;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+ req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* Remove RQ's policer mapping */
+ for (qidx = 0; qidx < hw->rx_queues; qidx++)
+ cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, false);
+
+ rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ u32 burst_exp, burst_mantissa;
+ u32 rate_exp, rate_mantissa;
+ u32 rdiv;
+
+ /* Get exponent and mantissa values for the desired rate */
+ cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
+
+ /* Init bandwidth profile */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Set initial color mode to blind */
+ aq->prof.icolor = 0x03;
+ aq->prof_mask.icolor = 0x03;
+
+ /* Set rate and burst values */
+ aq->prof.cir_exponent = rate_exp;
+ aq->prof_mask.cir_exponent = 0x1F;
+
+ aq->prof.cir_mantissa = rate_mantissa;
+ aq->prof_mask.cir_mantissa = 0xFF;
+
+ aq->prof.cbs_exponent = burst_exp;
+ aq->prof_mask.cbs_exponent = 0x1F;
+
+ aq->prof.cbs_mantissa = burst_mantissa;
+ aq->prof_mask.cbs_mantissa = 0xFF;
+
+ aq->prof.rdiv = rdiv;
+ aq->prof_mask.rdiv = 0xF;
+
+ if (pps) {
+ /* The amount of decremented tokens is calculated according to
+ * the following equation:
+ * max([ LMODE ? 0 : (packet_length - LXPTR)] +
+ * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
+ * 1/256)
+ * if LMODE is 1 then rate limiting will be based on
+ * PPS otherwise bps.
+ * The aim of the ADJUST value is to specify a token cost per
+ * packet in contrary to the packet length that specifies a
+ * cost per byte. To rate limit based on PPS adjust mantissa
+ * is set as 384 and exponent as 1 so that number of tokens
+ * decremented becomes 1 i.e, 1 token per packeet.
+ */
+ aq->prof.adjust_exponent = 1;
+ aq->prof_mask.adjust_exponent = 0x1F;
+
+ aq->prof.adjust_mantissa = 384;
+ aq->prof_mask.adjust_mantissa = 0x1FF;
+
+ aq->prof.lmode = 0x1;
+ aq->prof_mask.lmode = 0x1;
+ }
+
+ /* Two rate three color marker
+ * With PEIR/EIR set to zero, color will be either green or red
+ */
+ aq->prof.meter_algo = 2;
+ aq->prof_mask.meter_algo = 0x3;
+
+ aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
+ aq->prof_mask.rc_action = 0x3;
+
+ aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.yc_action = 0x3;
+
+ aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.gc_action = 0x3;
+
+ /* Setting exponent value as 24 and mantissa as 0 configures
+ * the bucket with zero values making bucket unused. Peak
+ * information rate and Excess information rate buckets are
+ * unused here.
+ */
+ aq->prof.peir_exponent = 24;
+ aq->prof_mask.peir_exponent = 0x1F;
+
+ aq->prof.peir_mantissa = 0;
+ aq->prof_mask.peir_mantissa = 0xFF;
+
+ aq->prof.pebs_exponent = 24;
+ aq->prof_mask.pebs_exponent = 0x1F;
+
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
+ rate, false);
+ if (rc)
+ goto out;
+
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, true);
+ if (rc)
+ break;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index e0bc595cbb78..71292a4cf1f3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -14,4 +14,15 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_pf_lmtst_init(struct otx2_nic *pf);
int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate);
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map);
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps);
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 45730d0d92f2..234b330f3183 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -180,6 +180,7 @@ struct otx2_hw {
/* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 matchall_ipolicer;
/* HW settings, coalescing etc */
u16 rx_chan_base;
@@ -223,6 +224,11 @@ struct otx2_hw {
u64 *nix_lmt_base;
};
+enum vfperm {
+ OTX2_RESET_VF_PERM,
+ OTX2_TRUSTED_VF,
+};
+
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
@@ -230,6 +236,7 @@ struct otx2_vf_config {
u8 mac[ETH_ALEN];
u16 vlan;
int tx_vtag_idx;
+ bool trusted;
};
struct flr_work {
@@ -261,24 +268,26 @@ struct otx2_mac_table {
struct otx2_flow_config {
u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
- u32 nr_flows;
-#define OTX2_MAX_NTUPLE_FLOWS 32
-#define OTX2_MAX_UNICAST_FLOWS 8
-#define OTX2_MAX_VLAN_FLOWS 1
-#define OTX2_MAX_TC_FLOWS OTX2_MAX_NTUPLE_FLOWS
-#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
+ u16 *flow_ent;
+ u16 *def_ent;
+ u16 nr_flows;
+#define OTX2_DEFAULT_FLOWCOUNT 16
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
+#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
OTX2_MAX_UNICAST_FLOWS + \
OTX2_MAX_VLAN_FLOWS)
- u32 ntuple_offset;
- u32 unicast_offset;
- u32 rx_vlan_offset;
- u32 vf_vlan_offset;
-#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
+ u16 ntuple_offset;
+ u16 unicast_offset;
+ u16 rx_vlan_offset;
+ u16 vf_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
- u32 tc_flower_offset;
- u32 ntuple_max_flows;
- u32 tc_max_flows;
+ u16 tc_flower_offset;
+ u16 ntuple_max_flows;
+ u16 tc_max_flows;
struct list_head flow_list;
};
@@ -319,6 +328,7 @@ struct otx2_nic {
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
+#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
u64 flags;
struct otx2_qset qset;
@@ -362,6 +372,7 @@ struct otx2_nic {
struct otx2_flow_config *flow_cfg;
struct otx2_tc_info tc_info;
+ unsigned long rq_bmap;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index f4962a97a075..8df748e0677b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -286,6 +286,12 @@ static int otx2_set_channels(struct net_device *dev,
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
+ if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
+ netdev_err(dev,
+ "Receive queues are in use by TC police action\n");
+ return -EINVAL;
+ }
+
if (if_up)
dev->netdev_ops->ndo_stop(dev);
@@ -786,6 +792,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 0b4fa92ba821..8c97106bdd1c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -20,13 +20,125 @@ struct otx2_flow {
int vf;
};
+static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
+{
+ devm_kfree(pfvf->dev, flow_cfg->flow_ent);
+ flow_cfg->flow_ent = NULL;
+ flow_cfg->ntuple_max_flows = 0;
+ flow_cfg->tc_max_flows = 0;
+}
+
+static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ int ent, err;
+
+ if (!flow_cfg->ntuple_max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req)
+ break;
+
+ req->entry = flow_cfg->flow_ent[ent];
+
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ break;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+}
+
+static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int ent, allocated = 0;
+
+ /* Free current ones and allocate new ones with requested count */
+ otx2_free_ntuple_mcam_entries(pfvf);
+
+ if (!count)
+ return 0;
+
+ flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->flow_ent)
+ return -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
+ * can only be allocated.
+ */
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req)
+ goto exit;
+
+ req->contig = false;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = flow_cfg->def_ent[0];
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ /* If this request is not fulfilled, no need to send
+ * further requests.
+ */
+ if (rsp->count != req->count)
+ break;
+ }
+
+exit:
+ mutex_unlock(&pfvf->mbox.lock);
+
+ flow_cfg->ntuple_offset = 0;
+ flow_cfg->ntuple_max_flows = allocated;
+ flow_cfg->tc_max_flows = allocated;
+
+ if (allocated != count)
+ netdev_info(pfvf->netdev,
+ "Unable to allocate %d MCAM entries for ntuple, got %d\n",
+ count, allocated);
+
+ return allocated;
+}
+
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
struct npc_mcam_alloc_entry_rsp *rsp;
int vf_vlan_max_flows;
- int i;
+ int ent, count;
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ count = OTX2_MAX_UNICAST_FLOWS +
+ OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
+
+ flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->def_ent)
+ return -ENOMEM;
mutex_lock(&pfvf->mbox.lock);
@@ -36,9 +148,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
return -ENOMEM;
}
- vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
req->contig = false;
- req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
+ req->count = count;
/* Send message to AF */
if (otx2_sync_mbox_msg(&pfvf->mbox)) {
@@ -51,37 +162,36 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
if (rsp->count != req->count) {
netdev_info(pfvf->netdev,
- "Unable to allocate %d MCAM entries, got %d\n",
- req->count, rsp->count);
- /* support only ntuples here */
- flow_cfg->ntuple_max_flows = rsp->count;
- flow_cfg->ntuple_offset = 0;
- pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
- flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows;
- pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
- } else {
- flow_cfg->vf_vlan_offset = 0;
- flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
- vf_vlan_max_flows;
- flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset;
- flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
- OTX2_MAX_NTUPLE_FLOWS;
- flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
- OTX2_MAX_UNICAST_FLOWS;
- pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
- pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
- pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
- pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
- pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
- }
-
- for (i = 0; i < rsp->count; i++)
- flow_cfg->entry[i] = rsp->entry_list[i];
+ "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
+ mutex_unlock(&pfvf->mbox.lock);
+ devm_kfree(pfvf->dev, flow_cfg->def_ent);
+ return 0;
+ }
- pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->def_ent[ent] = rsp->entry_list[ent];
+
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->unicast_offset = vf_vlan_max_flows;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
mutex_unlock(&pfvf->mbox.lock);
+ /* Allocate entries for Ntuple filters */
+ count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ if (count <= 0) {
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+ }
+
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+
return 0;
}
@@ -96,13 +206,14 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
- pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
- pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows;
-
err = otx2_alloc_mcam_entries(pf);
if (err)
return err;
+ /* Check if MCAM entries are allocate or not */
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return 0;
+
pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
if (!pf->mac_table)
@@ -146,7 +257,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
ether_addr_copy(pf->mac_table[i].addr, mac);
pf->mac_table[i].inuse = true;
pf->mac_table[i].mcam_entry =
- flow_cfg->entry[i + flow_cfg->unicast_offset];
+ flow_cfg->def_ent[i + flow_cfg->unicast_offset];
req->entry = pf->mac_table[i].mcam_entry;
break;
}
@@ -551,6 +662,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
req->features |= BIT_ULL(NPC_IPPROTO_AH);
else
req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
default:
break;
}
@@ -731,8 +843,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
- flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
- flow->location];
+ flow->entry = flow_cfg->flow_ent[flow->location];
new = true;
}
/* struct copy */
@@ -836,9 +947,8 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
- req->end = flow_cfg->entry[flow_cfg->ntuple_offset +
- flow_cfg->ntuple_max_flows - 1];
+ req->start = flow_cfg->flow_ent[0];
+ req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -905,7 +1015,7 @@ int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
req->intf = NIX_INTF_RX;
ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
eth_broadcast_addr((u8 *)&req->mask.dmac);
@@ -934,7 +1044,7 @@ static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
/* Send message to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 03004fdac0c6..59912f73417b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -39,6 +39,8 @@ MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+static void otx2_vf_link_event_task(struct work_struct *work);
+
enum {
TYPE_PFAF,
TYPE_PFVF,
@@ -1459,6 +1461,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
+ /* Free all ingress bandwidth profiles allocated */
+ cn10k_free_all_ipolicers(pf);
+
mutex_lock(&mbox->lock);
/* Reset NIX LF */
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
@@ -1820,9 +1825,11 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
if (promisc)
req->mode |= NIX_RX_MODE_PROMISC;
- else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
}
@@ -2044,7 +2051,7 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!netif_running(netdev))
return -EAGAIN;
- if (vf >= pci_num_vf(pdev))
+ if (vf >= pf->total_vfs)
return -EINVAL;
if (!is_valid_ether_addr(mac))
@@ -2055,7 +2062,8 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
ret = otx2_do_set_vf_mac(pf, vf, mac);
if (ret == 0)
- dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
+ dev_info(&pdev->dev,
+ "Load/Reload VF driver\n");
return ret;
}
@@ -2104,7 +2112,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
del_req->entry =
- flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
err = otx2_sync_mbox_msg(&pf->mbox);
if (err)
goto out;
@@ -2117,7 +2125,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
del_req->entry =
- flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
err = otx2_sync_mbox_msg(&pf->mbox);
goto out;
@@ -2131,7 +2139,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
- req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
req->packet.vlan_tci = htons(vlan);
req->mask.vlan_tci = htons(VLAN_VID_MASK);
/* af fills the destination mac addr */
@@ -2182,7 +2190,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
eth_zero_addr((u8 *)&req->mask.dmac);
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
- req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
req->features = BIT_ULL(NPC_DMAC);
req->channel = pf->hw.tx_chan_base;
req->intf = NIX_INTF_TX;
@@ -2241,10 +2249,63 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
ivi->vf = vf;
ether_addr_copy(ivi->mac, config->mac);
ivi->vlan = config->vlan;
+ ivi->trusted = config->trusted;
return 0;
}
+static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
+ int req_perm)
+{
+ struct set_vf_perm *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Let AF reset VF permissions as sriov is disabled */
+ if (req_perm == OTX2_RESET_VF_PERM) {
+ req->flags |= RESET_VF_PERM;
+ } else if (req_perm == OTX2_TRUSTED_VF) {
+ if (pf->vf_configs[vf].trusted)
+ req->flags |= VF_TRUSTED;
+ }
+
+ req->vf = vf;
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
+
+static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
+ bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ int rc;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (pf->vf_configs[vf].trusted == enable)
+ return 0;
+
+ pf->vf_configs[vf].trusted = enable;
+ rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
+
+ if (rc)
+ pf->vf_configs[vf].trusted = !enable;
+ else
+ netdev_info(pf->netdev, "VF %d is %strusted\n",
+ vf, enable ? "" : "not ");
+ return rc;
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -2261,6 +2322,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_setup_tc = otx2_setup_tc,
+ .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -2315,6 +2377,40 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return otx2_register_mbox_intr(pf, false);
}
+static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
+{
+ int i;
+
+ pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
+ sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ pf->vf_configs[i].trusted = false;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ }
+
+ return 0;
+}
+
+static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
+{
+ int i;
+
+ if (!pf->vf_configs)
+ return;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
+ }
+}
+
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -2509,6 +2605,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_mcam_flow_del;
+ /* Initialize SR-IOV resources */
+ err = otx2_sriov_vfcfg_init(pf);
+ if (err)
+ goto err_pf_sriov_init;
+
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
@@ -2518,6 +2619,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_pf_sriov_init:
+ otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
@@ -2576,7 +2679,7 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
- int ret, i;
+ int ret;
/* Init PF <=> VF mailbox stuff */
ret = otx2_pfvf_mbox_init(pf, numvfs);
@@ -2587,23 +2690,9 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
if (ret)
goto free_mbox;
- pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
- GFP_KERNEL);
- if (!pf->vf_configs) {
- ret = -ENOMEM;
- goto free_intr;
- }
-
- for (i = 0; i < numvfs; i++) {
- pf->vf_configs[i].pf = pf;
- pf->vf_configs[i].intf_down = true;
- INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
- otx2_vf_link_event_task);
- }
-
ret = otx2_pf_flr_init(pf, numvfs);
if (ret)
- goto free_configs;
+ goto free_intr;
ret = otx2_register_flr_me_intr(pf, numvfs);
if (ret)
@@ -2618,8 +2707,6 @@ free_flr_intr:
otx2_disable_flr_me_intr(pf);
free_flr:
otx2_flr_wq_destroy(pf);
-free_configs:
- kfree(pf->vf_configs);
free_intr:
otx2_disable_pfvf_mbox_intr(pf, numvfs);
free_mbox:
@@ -2632,17 +2719,12 @@ static int otx2_sriov_disable(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
int numvfs = pci_num_vf(pdev);
- int i;
if (!numvfs)
return 0;
pci_disable_sriov(pdev);
- for (i = 0; i < pci_num_vf(pdev); i++)
- cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
- kfree(pf->vf_configs);
-
otx2_disable_flr_me_intr(pf);
otx2_flr_wq_destroy(pf);
otx2_disable_pfvf_mbox_intr(pf, numvfs);
@@ -2682,6 +2764,7 @@ static void otx2_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
otx2_sriov_disable(pf->pdev);
+ otx2_sriov_vfcfg_cleanup(pf);
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 51157b283f6f..905fc02a7dfe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -15,6 +15,7 @@
#include <net/tc_act/tc_vlan.h>
#include <net/ipv6.h>
+#include "cn10k.h"
#include "otx2_common.h"
/* Egress rate limiting definitions */
@@ -41,11 +42,14 @@ struct otx2_tc_flow_stats {
struct otx2_tc_flow {
struct rhash_head node;
unsigned long cookie;
- u16 entry;
unsigned int bitpos;
struct rcu_head rcu;
struct otx2_tc_flow_stats stats;
spinlock_t lock; /* lock for stats */
+ u16 rq;
+ u16 entry;
+ u16 leaf_profile;
+ bool is_act_police;
};
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
@@ -220,17 +224,76 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err;
}
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
+ if (rc) {
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+ }
+
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
+ if (rc)
+ goto free_leaf;
+
+ rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ set_bit(rq_idx, &nic->rq_bmap);
+ node->is_act_police = true;
+ node->rq = rq_idx;
+
+ return 0;
+
+free_leaf:
+ if (cn10k_free_leaf_profile(nic, node->leaf_profile))
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ node->leaf_profile);
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
- struct npc_install_flow_req *req)
+ struct npc_install_flow_req *req,
+ struct flow_cls_offload *f,
+ struct otx2_tc_flow *node)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
+ u32 burst, mark = 0;
+ u8 nr_police = 0;
+ bool pps;
+ u64 rate;
int i;
if (!flow_action_has_entries(flow_action)) {
- netdev_info(nic->netdev, "no tc actions specified");
+ NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
return -EINVAL;
}
@@ -247,8 +310,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
priv = netdev_priv(target);
/* npc_install_flow_req doesn't support passing a target pcifunc */
if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
- netdev_info(nic->netdev,
- "can't redirect to other pf/vf\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
return -EOPNOTSUPP;
}
req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
@@ -259,18 +322,55 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
break;
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps > 0) {
+ rate = act->police.rate_bytes_ps * 8;
+ burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps > 0) {
+ /* The algorithm used to calculate rate
+ * mantissa, exponent values for a given token
+ * rate (token can be byte or packet) requires
+ * token rate to be mutiplied by 8.
+ */
+ rate = act->police.rate_pkt_ps * 8;
+ burst = act->police.burst_pkt;
+ pps = true;
+ }
+ nr_police++;
+ break;
+ case FLOW_ACTION_MARK:
+ mark = act->mark;
+ break;
default:
return -EOPNOTSUPP;
}
}
+ if (nr_police > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rate limit police offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (nr_police)
+ return otx2_tc_act_set_police(nic, node, f, rate, burst,
+ mark, req, pps);
+
return 0;
}
-static int otx2_tc_prepare_flow(struct otx2_nic *nic,
+static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
struct flow_cls_offload *f,
struct npc_install_flow_req *req)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct flow_msg *flow_spec = &req->packet;
struct flow_msg *flow_mask = &req->mask;
struct flow_dissector *dissector;
@@ -335,7 +435,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
flow_rule_match_eth_addrs(rule, &match);
if (!is_zero_ether_addr(match.mask->src)) {
- netdev_err(nic->netdev, "src mac match not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
return -EOPNOTSUPP;
}
@@ -353,11 +453,11 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
flow_rule_match_ip(rule, &match);
if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
match.mask->tos) {
- netdev_err(nic->netdev, "tos not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "tos not supported");
return -EOPNOTSUPP;
}
if (match.mask->ttl) {
- netdev_err(nic->netdev, "ttl not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
return -EOPNOTSUPP;
}
flow_spec->tos = match.key->tos;
@@ -413,8 +513,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
if (ipv6_addr_loopback(&match.key->dst) ||
ipv6_addr_loopback(&match.key->src)) {
- netdev_err(nic->netdev,
- "Flow matching on IPv6 loopback addr is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow matching IPv6 loopback addr not supported");
return -EOPNOTSUPP;
}
@@ -463,7 +563,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
req->features |= BIT_ULL(NPC_SPORT_SCTP);
}
- return otx2_tc_parse_actions(nic, &rule->action, req);
+ return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
@@ -498,6 +598,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
{
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
+ int err;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
@@ -508,6 +609,27 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
return -EINVAL;
}
+ if (flow_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+ flow_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ flow_node->rq, flow_node->leaf_profile);
+
+ err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
otx2_del_mcam_flow_entry(nic, flow_node->entry);
WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
@@ -524,14 +646,21 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
static int otx2_tc_add_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
- struct npc_install_flow_req *req;
- int rc;
+ struct npc_install_flow_req *req, dummy;
+ int rc, err;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
+ if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Not enough MCAM space to add the flow");
+ return -ENOMEM;
+ }
+
/* allocate memory for the new flow and it's node */
new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
@@ -539,17 +668,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
- mutex_lock(&nic->mbox.lock);
- req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
- if (!req) {
- mutex_unlock(&nic->mbox.lock);
- return -ENOMEM;
- }
+ memset(&dummy, 0, sizeof(struct npc_install_flow_req));
- rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req);
+ rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
if (rc) {
- otx2_mbox_reset(&nic->mbox.mbox, 0);
- mutex_unlock(&nic->mbox.lock);
+ kfree_rcu(new_node, rcu);
return rc;
}
@@ -560,18 +683,22 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
- if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
- netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n");
- otx2_mbox_reset(&nic->mbox.mbox, 0);
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
mutex_unlock(&nic->mbox.lock);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto free_leaf;
}
+ memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+
new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
nic->flow_cfg->tc_max_flows);
req->channel = nic->hw.rx_chan_base;
- req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset +
- nic->flow_cfg->tc_max_flows - new_node->bitpos];
+ req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset +
+ nic->flow_cfg->tc_max_flows - new_node->bitpos];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -579,9 +706,10 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
/* Send message to AF */
rc = otx2_sync_mbox_msg(&nic->mbox);
if (rc) {
- netdev_err(nic->netdev, "Failed to install MCAM flow entry\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
mutex_unlock(&nic->mbox.lock);
- goto out;
+ kfree_rcu(new_node, rcu);
+ goto free_leaf;
}
mutex_unlock(&nic->mbox.lock);
@@ -591,12 +719,35 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (rc) {
otx2_del_mcam_flow_entry(nic, req->entry);
kfree_rcu(new_node, rcu);
- goto out;
+ goto free_leaf;
}
set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
tc_info->num_entries++;
-out:
+
+ return 0;
+
+free_leaf:
+ if (new_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
+ new_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ new_node->rq, new_node->leaf_profile);
+ err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ new_node->leaf_profile);
+
+ __clear_bit(new_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
return rc;
}
@@ -675,6 +826,87 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
}
}
+static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u64 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one ingress MATCHALL ratelimitter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ err = cn10k_alloc_matchall_ipolicer(nic);
+ if (err)
+ return err;
+
+ /* Convert to bits per second */
+ rate = entry->police.rate_bytes_ps * 8;
+ err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action supported with Ingress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = cn10k_free_matchall_ipolicer(nic);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_ingress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
@@ -686,6 +918,8 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return otx2_setup_tc_cls_flower(nic, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_ingress_matchall(nic, type_data);
default:
break;
}
@@ -775,6 +1009,9 @@ int otx2_init_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
+ /* Exclude receive queue 0 being used for police action */
+ set_bit(0, &nic->rq_bmap);
+
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 085be90a03eb..13a908f75ba0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -395,6 +395,42 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void otx2vf_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ queue_work(vf->otx2_wq, &vf->rx_mode_work);
+}
+
+static void otx2vf_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = vf->netdev;
+ unsigned int flags = netdev->flags;
+ struct nix_rx_mode *req;
+
+ mutex_lock(&vf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
+ if (!req) {
+ mutex_unlock(&vf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&vf->mbox);
+
+ mutex_unlock(&vf->mbox.lock);
+}
+
static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -432,12 +468,24 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
.ndo_start_xmit = otx2vf_xmit,
+ .ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
};
+static int otx2_wq_init(struct otx2_nic *vf)
+{
+ vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
+ if (!vf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ return 0;
+}
+
static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
{
struct otx2_hw *hw = &vf->hw;
@@ -588,8 +636,6 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(vf);
- INIT_WORK(&vf->reset_task, otx2vf_reset_task);
-
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
int n;
@@ -606,6 +652,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_detach_rsrc;
}
+ err = otx2_wq_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
otx2vf_set_ethtool_ops(netdev);
/* Enable pause frames by default */
@@ -614,6 +664,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_unreg_netdev:
+ unregister_netdev(netdev);
err_detach_rsrc:
if (hw->lmt_base)
iounmap(hw->lmt_base);
@@ -644,6 +696,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
cancel_work_sync(&vf->reset_task);
unregister_netdev(netdev);
+ if (vf->otx2_wq)
+ destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
index 93129e32ebc5..0609df8b913d 100644
--- a/drivers/net/ethernet/marvell/prestera/Makefile
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_PRESTERA) += prestera.o
prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
- prestera_switchdev.o
+ prestera_switchdev.o prestera_acl.o prestera_flow.o \
+ prestera_flower.o prestera_span.o
obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 55aa4bf8a27c..f18fe664b373 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -60,10 +60,22 @@ struct prestera_port_caps {
u8 transceiver;
};
+struct prestera_lag {
+ struct net_device *dev;
+ struct list_head members;
+ u16 member_count;
+ u16 lag_id;
+};
+
+struct prestera_flow_block;
+
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
+ struct prestera_flow_block *flow_block;
struct devlink_port dl_port;
+ struct list_head lag_member;
+ struct prestera_lag *lag;
u32 id;
u32 hw_id;
u32 dev_id;
@@ -127,6 +139,12 @@ struct prestera_port_event {
} data;
};
+enum prestera_fdb_entry_type {
+ PRESTERA_FDB_ENTRY_TYPE_REG_PORT,
+ PRESTERA_FDB_ENTRY_TYPE_LAG,
+ PRESTERA_FDB_ENTRY_TYPE_MAX
+};
+
enum prestera_fdb_event_id {
PRESTERA_FDB_EVENT_UNSPEC,
PRESTERA_FDB_EVENT_LEARNED,
@@ -134,7 +152,11 @@ enum prestera_fdb_event_id {
};
struct prestera_fdb_event {
- u32 port_id;
+ enum prestera_fdb_entry_type type;
+ union {
+ u32 port_id;
+ u16 lag_id;
+ } dest;
u32 vid;
union {
u8 mac[ETH_ALEN];
@@ -150,14 +172,20 @@ struct prestera_event {
};
struct prestera_switchdev;
+struct prestera_span;
struct prestera_rxtx;
+struct prestera_trap_data;
+struct prestera_acl;
struct prestera_switch {
struct prestera_device *dev;
struct prestera_switchdev *swdev;
struct prestera_rxtx *rxtx;
+ struct prestera_acl *acl;
+ struct prestera_span *span;
struct list_head event_handlers;
struct notifier_block netdev_nb;
+ struct prestera_trap_data *trap_data;
char base_mac[ETH_ALEN];
struct list_head port_list;
rwlock_t port_list_lock;
@@ -165,6 +193,9 @@ struct prestera_switch {
u32 mtu_min;
u32 mtu_max;
u8 id;
+ struct prestera_lag *lags;
+ u8 lag_member_max;
+ u8 lag_max;
};
struct prestera_rxtx_params {
@@ -203,4 +234,10 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
+bool prestera_port_is_lag_member(const struct prestera_port *port);
+
+struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
+
+u16 prestera_port_lag_id(const struct prestera_port *port);
+
#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
new file mode 100644
index 000000000000..83c75ffb1a1c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/rhashtable.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_span.h"
+
+struct prestera_acl {
+ struct prestera_switch *sw;
+ struct list_head rules;
+};
+
+struct prestera_acl_ruleset {
+ struct rhashtable rule_ht;
+ struct prestera_switch *sw;
+ u16 id;
+};
+
+struct prestera_acl_rule {
+ struct rhash_head ht_node;
+ struct list_head list;
+ struct list_head match_list;
+ struct list_head action_list;
+ struct prestera_flow_block *block;
+ unsigned long cookie;
+ u32 priority;
+ u8 n_actions;
+ u8 n_matches;
+ u32 id;
+};
+
+static const struct rhashtable_params prestera_acl_rule_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct prestera_acl_rule, cookie),
+ .head_offset = offsetof(struct prestera_acl_rule, ht_node),
+ .automatic_shrinking = true,
+};
+
+static struct prestera_acl_ruleset *
+prestera_acl_ruleset_create(struct prestera_switch *sw)
+{
+ struct prestera_acl_ruleset *ruleset;
+ int err;
+
+ ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
+ if (!ruleset)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = prestera_hw_acl_ruleset_create(sw, &ruleset->id);
+ if (err)
+ goto err_ruleset_create;
+
+ ruleset->sw = sw;
+
+ return ruleset;
+
+err_ruleset_create:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
+}
+
+static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
+{
+ prestera_hw_acl_ruleset_del(ruleset->sw, ruleset->id);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
+}
+
+struct prestera_flow_block *
+prestera_acl_block_create(struct prestera_switch *sw, struct net *net)
+{
+ struct prestera_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ block->net = net;
+ block->sw = sw;
+
+ block->ruleset = prestera_acl_ruleset_create(sw);
+ if (IS_ERR(block->ruleset)) {
+ kfree(block);
+ return NULL;
+ }
+
+ return block;
+}
+
+void prestera_acl_block_destroy(struct prestera_flow_block *block)
+{
+ prestera_acl_ruleset_destroy(block->ruleset);
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct prestera_flow_block_binding *
+prestera_acl_block_lookup(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->port == port)
+ return binding;
+
+ return NULL;
+}
+
+int prestera_acl_block_bind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ if (WARN_ON(prestera_acl_block_lookup(block, port)))
+ return -EEXIST;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ binding->port = port;
+
+ err = prestera_hw_acl_port_bind(port, block->ruleset->id);
+ if (err)
+ goto err_rules_bind;
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_rules_bind:
+ kfree(binding);
+ return err;
+}
+
+int prestera_acl_block_unbind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ binding = prestera_acl_block_lookup(block, port);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ prestera_hw_acl_port_unbind(port, block->ruleset->id);
+
+ kfree(binding);
+ return 0;
+}
+
+struct prestera_acl_ruleset *
+prestera_acl_block_ruleset_get(struct prestera_flow_block *block)
+{
+ return block->ruleset;
+}
+
+u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule)
+{
+ return rule->block->ruleset->id;
+}
+
+struct net *prestera_acl_block_net(struct prestera_flow_block *block)
+{
+ return block->net;
+}
+
+struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block)
+{
+ return block->sw;
+}
+
+struct prestera_acl_rule *
+prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie)
+{
+ return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
+ prestera_acl_rule_ht_params);
+}
+
+struct prestera_acl_rule *
+prestera_acl_rule_create(struct prestera_flow_block *block,
+ unsigned long cookie)
+{
+ struct prestera_acl_rule *rule;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&rule->match_list);
+ INIT_LIST_HEAD(&rule->action_list);
+ rule->cookie = cookie;
+ rule->block = block;
+
+ return rule;
+}
+
+struct list_head *
+prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule)
+{
+ return &rule->match_list;
+}
+
+struct list_head *
+prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule)
+{
+ return &rule->action_list;
+}
+
+int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
+ struct prestera_acl_rule_action_entry *entry)
+{
+ struct prestera_acl_rule_action_entry *a_entry;
+
+ a_entry = kmalloc(sizeof(*a_entry), GFP_KERNEL);
+ if (!a_entry)
+ return -ENOMEM;
+
+ memcpy(a_entry, entry, sizeof(*entry));
+ list_add(&a_entry->list, &rule->action_list);
+
+ rule->n_actions++;
+ return 0;
+}
+
+u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule)
+{
+ return rule->n_actions;
+}
+
+u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule)
+{
+ return rule->priority;
+}
+
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority)
+{
+ rule->priority = priority;
+}
+
+int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
+ struct prestera_acl_rule_match_entry *entry)
+{
+ struct prestera_acl_rule_match_entry *m_entry;
+
+ m_entry = kmalloc(sizeof(*m_entry), GFP_KERNEL);
+ if (!m_entry)
+ return -ENOMEM;
+
+ memcpy(m_entry, entry, sizeof(*entry));
+ list_add(&m_entry->list, &rule->match_list);
+
+ rule->n_matches++;
+ return 0;
+}
+
+u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule)
+{
+ return rule->n_matches;
+}
+
+void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
+{
+ struct prestera_acl_rule_action_entry *a_entry;
+ struct prestera_acl_rule_match_entry *m_entry;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &rule->match_list) {
+ m_entry = list_entry(pos, typeof(*m_entry), list);
+ list_del(pos);
+ kfree(m_entry);
+ }
+
+ list_for_each_safe(pos, n, &rule->action_list) {
+ a_entry = list_entry(pos, typeof(*a_entry), list);
+ list_del(pos);
+ kfree(a_entry);
+ }
+
+ kfree(rule);
+}
+
+int prestera_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
+{
+ u32 rule_id;
+ int err;
+
+ /* try to add rule to hash table first */
+ err = rhashtable_insert_fast(&rule->block->ruleset->rule_ht,
+ &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ if (err)
+ return err;
+
+ /* add rule to hw */
+ err = prestera_hw_acl_rule_add(sw, rule, &rule_id);
+ if (err)
+ goto err_rule_add;
+
+ rule->id = rule_id;
+
+ list_add_tail(&rule->list, &sw->acl->rules);
+
+ return 0;
+
+err_rule_add:
+ rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ return err;
+}
+
+void prestera_acl_rule_del(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
+{
+ rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ list_del(&rule->list);
+ prestera_hw_acl_rule_del(sw, rule->id);
+}
+
+int prestera_acl_rule_get_stats(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use)
+{
+ u64 current_packets;
+ u64 current_bytes;
+ int err;
+
+ err = prestera_hw_acl_rule_stats_get(sw, rule->id, &current_packets,
+ &current_bytes);
+ if (err)
+ return err;
+
+ *packets = current_packets;
+ *bytes = current_bytes;
+ *last_use = jiffies;
+
+ return 0;
+}
+
+int prestera_acl_init(struct prestera_switch *sw)
+{
+ struct prestera_acl *acl;
+
+ acl = kzalloc(sizeof(*acl), GFP_KERNEL);
+ if (!acl)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&acl->rules);
+ sw->acl = acl;
+ acl->sw = sw;
+
+ return 0;
+}
+
+void prestera_acl_fini(struct prestera_switch *sw)
+{
+ struct prestera_acl *acl = sw->acl;
+
+ WARN_ON(!list_empty(&acl->rules));
+ kfree(acl);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
new file mode 100644
index 000000000000..39b7869be659
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_ACL_H_
+#define _PRESTERA_ACL_H_
+
+enum prestera_acl_rule_match_entry_type {
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE = 1,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE
+};
+
+enum prestera_acl_rule_action {
+ PRESTERA_ACL_RULE_ACTION_ACCEPT,
+ PRESTERA_ACL_RULE_ACTION_DROP,
+ PRESTERA_ACL_RULE_ACTION_TRAP
+};
+
+struct prestera_switch;
+struct prestera_port;
+struct prestera_acl_rule;
+struct prestera_acl_ruleset;
+
+struct prestera_flow_block_binding {
+ struct list_head list;
+ struct prestera_port *port;
+ int span_id;
+};
+
+struct prestera_flow_block {
+ struct list_head binding_list;
+ struct prestera_switch *sw;
+ struct net *net;
+ struct prestera_acl_ruleset *ruleset;
+ struct flow_block_cb *block_cb;
+};
+
+struct prestera_acl_rule_action_entry {
+ struct list_head list;
+ enum prestera_acl_rule_action id;
+};
+
+struct prestera_acl_rule_match_entry {
+ struct list_head list;
+ enum prestera_acl_rule_match_entry_type type;
+ union {
+ struct {
+ u8 key;
+ u8 mask;
+ } u8;
+ struct {
+ u16 key;
+ u16 mask;
+ } u16;
+ struct {
+ u32 key;
+ u32 mask;
+ } u32;
+ struct {
+ u64 key;
+ u64 mask;
+ } u64;
+ struct {
+ u8 key[ETH_ALEN];
+ u8 mask[ETH_ALEN];
+ } mac;
+ } keymask;
+};
+
+int prestera_acl_init(struct prestera_switch *sw);
+void prestera_acl_fini(struct prestera_switch *sw);
+struct prestera_flow_block *
+prestera_acl_block_create(struct prestera_switch *sw, struct net *net);
+void prestera_acl_block_destroy(struct prestera_flow_block *block);
+struct net *prestera_acl_block_net(struct prestera_flow_block *block);
+struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block);
+int prestera_acl_block_bind(struct prestera_flow_block *block,
+ struct prestera_port *port);
+int prestera_acl_block_unbind(struct prestera_flow_block *block,
+ struct prestera_port *port);
+struct prestera_acl_ruleset *
+prestera_acl_block_ruleset_get(struct prestera_flow_block *block);
+struct prestera_acl_rule *
+prestera_acl_rule_create(struct prestera_flow_block *block,
+ unsigned long cookie);
+u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule);
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority);
+u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule);
+struct list_head *
+prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule);
+u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule);
+u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule);
+int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
+ struct prestera_acl_rule_action_entry *entry);
+struct list_head *
+prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule);
+int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
+ struct prestera_acl_rule_match_entry *entry);
+void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
+struct prestera_acl_rule *
+prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie);
+int prestera_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule);
+void prestera_acl_rule_del(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule);
+int prestera_acl_rule_get_stats(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use);
+
+#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 94c185a0e2b8..d12e21db9fd6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -4,6 +4,352 @@
#include <net/devlink.h>
#include "prestera_devlink.h"
+#include "prestera_hw.h"
+
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink/prestera.rst
+ */
+enum {
+ DEVLINK_PRESTERA_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ DEVLINK_PRESTERA_TRAP_ID_ARP_BC,
+ DEVLINK_PRESTERA_TRAP_ID_IS_IS,
+ DEVLINK_PRESTERA_TRAP_ID_OSPF,
+ DEVLINK_PRESTERA_TRAP_ID_IP_BC_MAC,
+ DEVLINK_PRESTERA_TRAP_ID_ROUTER_MC,
+ DEVLINK_PRESTERA_TRAP_ID_VRRP,
+ DEVLINK_PRESTERA_TRAP_ID_DHCP,
+ DEVLINK_PRESTERA_TRAP_ID_MAC_TO_ME,
+ DEVLINK_PRESTERA_TRAP_ID_IPV4_OPTIONS,
+ DEVLINK_PRESTERA_TRAP_ID_IP_DEFAULT_ROUTE,
+ DEVLINK_PRESTERA_TRAP_ID_IP_TO_ME,
+ DEVLINK_PRESTERA_TRAP_ID_IPV4_ICMP_REDIRECT,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_0,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_1,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_2,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_3,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_4,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_5,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_6,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_7,
+ DEVLINK_PRESTERA_TRAP_ID_BGP,
+ DEVLINK_PRESTERA_TRAP_ID_SSH,
+ DEVLINK_PRESTERA_TRAP_ID_TELNET,
+ DEVLINK_PRESTERA_TRAP_ID_ICMP,
+ DEVLINK_PRESTERA_TRAP_ID_MET_RED,
+ DEVLINK_PRESTERA_TRAP_ID_IP_SIP_IS_ZERO,
+ DEVLINK_PRESTERA_TRAP_ID_IP_UC_DIP_DA_MISMATCH,
+ DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IPV4_HDR,
+ DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IP_ADDR,
+ DEVLINK_PRESTERA_TRAP_ID_INVALID_SA,
+ DEVLINK_PRESTERA_TRAP_ID_LOCAL_PORT,
+ DEVLINK_PRESTERA_TRAP_ID_PORT_NO_VLAN,
+ DEVLINK_PRESTERA_TRAP_ID_RXDMA_DROP,
+};
+
+#define DEVLINK_PRESTERA_TRAP_NAME_ARP_BC \
+ "arp_bc"
+#define DEVLINK_PRESTERA_TRAP_NAME_IS_IS \
+ "is_is"
+#define DEVLINK_PRESTERA_TRAP_NAME_OSPF \
+ "ospf"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_BC_MAC \
+ "ip_bc_mac"
+#define DEVLINK_PRESTERA_TRAP_NAME_ROUTER_MC \
+ "router_mc"
+#define DEVLINK_PRESTERA_TRAP_NAME_VRRP \
+ "vrrp"
+#define DEVLINK_PRESTERA_TRAP_NAME_DHCP \
+ "dhcp"
+#define DEVLINK_PRESTERA_TRAP_NAME_MAC_TO_ME \
+ "mac_to_me"
+#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_OPTIONS \
+ "ipv4_options"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_DEFAULT_ROUTE \
+ "ip_default_route"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_TO_ME \
+ "ip_to_me"
+#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_ICMP_REDIRECT \
+ "ipv4_icmp_redirect"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_0 \
+ "acl_code_0"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_1 \
+ "acl_code_1"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_2 \
+ "acl_code_2"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_3 \
+ "acl_code_3"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_4 \
+ "acl_code_4"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_5 \
+ "acl_code_5"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_6 \
+ "acl_code_6"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_7 \
+ "acl_code_7"
+#define DEVLINK_PRESTERA_TRAP_NAME_BGP \
+ "bgp"
+#define DEVLINK_PRESTERA_TRAP_NAME_SSH \
+ "ssh"
+#define DEVLINK_PRESTERA_TRAP_NAME_TELNET \
+ "telnet"
+#define DEVLINK_PRESTERA_TRAP_NAME_ICMP \
+ "icmp"
+#define DEVLINK_PRESTERA_TRAP_NAME_RXDMA_DROP \
+ "rxdma_drop"
+#define DEVLINK_PRESTERA_TRAP_NAME_PORT_NO_VLAN \
+ "port_no_vlan"
+#define DEVLINK_PRESTERA_TRAP_NAME_LOCAL_PORT \
+ "local_port"
+#define DEVLINK_PRESTERA_TRAP_NAME_INVALID_SA \
+ "invalid_sa"
+#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IP_ADDR \
+ "illegal_ip_addr"
+#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IPV4_HDR \
+ "illegal_ipv4_hdr"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_UC_DIP_DA_MISMATCH \
+ "ip_uc_dip_da_mismatch"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_SIP_IS_ZERO \
+ "ip_sip_is_zero"
+#define DEVLINK_PRESTERA_TRAP_NAME_MET_RED \
+ "met_red"
+
+struct prestera_trap {
+ struct devlink_trap trap;
+ u8 cpu_code;
+};
+
+struct prestera_trap_item {
+ enum devlink_trap_action action;
+ void *trap_ctx;
+};
+
+struct prestera_trap_data {
+ struct prestera_switch *sw;
+ struct prestera_trap_item *trap_items_arr;
+ u32 traps_count;
+};
+
+#define PRESTERA_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+#define PRESTERA_TRAP_CONTROL(_id, _group_id, _action) \
+ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_CONTROL(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(CONTROL, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_DROP(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+static const struct devlink_trap_group prestera_trap_groups_arr[] = {
+ /* No policer is associated with following groups (policerid == 0)*/
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(OSPF, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(STP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LACP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LLDP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(VRRP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(DHCP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(BGP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 0),
+};
+
+/* Initialize trap list, as well as associate CPU code with them. */
+static struct prestera_trap prestera_trap_items_arr[] = {
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ARP_BC, NEIGH_DISCOVERY),
+ .cpu_code = 5,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IS_IS, LOCAL_DELIVERY),
+ .cpu_code = 13,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(OSPF, OSPF),
+ .cpu_code = 16,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_BC_MAC, LOCAL_DELIVERY),
+ .cpu_code = 19,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(STP, STP, TRAP),
+ .cpu_code = 26,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LACP, LACP, TRAP),
+ .cpu_code = 27,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LLDP, LLDP, TRAP),
+ .cpu_code = 28,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ROUTER_MC, LOCAL_DELIVERY),
+ .cpu_code = 29,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(VRRP, VRRP),
+ .cpu_code = 30,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(DHCP, DHCP),
+ .cpu_code = 33,
+ },
+ {
+ .trap = PRESTERA_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS),
+ .cpu_code = 63,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(MAC_TO_ME, LOCAL_DELIVERY),
+ .cpu_code = 65,
+ },
+ {
+ .trap = PRESTERA_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
+ .cpu_code = 133,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_OPTIONS,
+ L3_EXCEPTIONS),
+ .cpu_code = 141,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_DEFAULT_ROUTE,
+ LOCAL_DELIVERY),
+ .cpu_code = 160,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY,
+ TRAP),
+ .cpu_code = 161,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_ICMP_REDIRECT,
+ L3_EXCEPTIONS),
+ .cpu_code = 180,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
+ TRAP),
+ .cpu_code = 188,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_0, ACL_TRAP),
+ .cpu_code = 192,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_1, ACL_TRAP),
+ .cpu_code = 193,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_2, ACL_TRAP),
+ .cpu_code = 194,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_3, ACL_TRAP),
+ .cpu_code = 195,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_4, ACL_TRAP),
+ .cpu_code = 196,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_5, ACL_TRAP),
+ .cpu_code = 197,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_6, ACL_TRAP),
+ .cpu_code = 198,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_7, ACL_TRAP),
+ .cpu_code = 199,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(BGP, BGP),
+ .cpu_code = 206,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(SSH, LOCAL_DELIVERY),
+ .cpu_code = 207,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(TELNET, LOCAL_DELIVERY),
+ .cpu_code = 208,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ICMP, LOCAL_DELIVERY),
+ .cpu_code = 209,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(RXDMA_DROP, BUFFER_DROPS),
+ .cpu_code = 37,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(PORT_NO_VLAN, L2_DROPS),
+ .cpu_code = 39,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(LOCAL_PORT, L2_DROPS),
+ .cpu_code = 56,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(INVALID_SA, L2_DROPS),
+ .cpu_code = 60,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IP_ADDR, L3_DROPS),
+ .cpu_code = 136,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IPV4_HDR, L3_DROPS),
+ .cpu_code = 137,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(IP_UC_DIP_DA_MISMATCH,
+ L3_DROPS),
+ .cpu_code = 138,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(IP_SIP_IS_ZERO, L3_DROPS),
+ .cpu_code = 145,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(MET_RED, BUFFER_DROPS),
+ .cpu_code = 185,
+ },
+};
+
+static void prestera_devlink_traps_fini(struct prestera_switch *sw);
+
+static int prestera_drop_counter_get(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops);
static int prestera_dl_info_get(struct devlink *dl,
struct devlink_info_req *req,
@@ -27,8 +373,21 @@ static int prestera_dl_info_get(struct devlink *dl,
buf);
}
+static int prestera_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap, void *trap_ctx);
+
+static int prestera_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+
+static int prestera_devlink_traps_register(struct prestera_switch *sw);
+
static const struct devlink_ops prestera_dl_ops = {
.info_get = prestera_dl_info_get,
+ .trap_init = prestera_trap_init,
+ .trap_action_set = prestera_trap_action_set,
+ .trap_drop_counter_get = prestera_drop_counter_get,
};
struct prestera_switch *prestera_devlink_alloc(void)
@@ -53,17 +412,32 @@ int prestera_devlink_register(struct prestera_switch *sw)
int err;
err = devlink_register(dl, sw->dev->dev);
- if (err)
+ if (err) {
dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
+ return err;
+ }
- return err;
+ err = prestera_devlink_traps_register(sw);
+ if (err) {
+ devlink_unregister(dl);
+ dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
}
void prestera_devlink_unregister(struct prestera_switch *sw)
{
+ struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
+ prestera_devlink_traps_fini(sw);
devlink_unregister(dl);
+
+ kfree(trap_data->trap_items_arr);
+ kfree(trap_data);
}
int prestera_devlink_port_register(struct prestera_port *port)
@@ -110,3 +484,155 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
return &port->dl_port;
}
+
+static int prestera_devlink_traps_register(struct prestera_switch *sw)
+{
+ const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
+ const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr);
+ struct devlink *devlink = priv_to_devlink(sw);
+ struct prestera_trap_data *trap_data;
+ struct prestera_trap *prestera_trap;
+ int err, i;
+
+ trap_data = kzalloc(sizeof(*trap_data), GFP_KERNEL);
+ if (!trap_data)
+ return -ENOMEM;
+
+ trap_data->trap_items_arr = kcalloc(traps_count,
+ sizeof(struct prestera_trap_item),
+ GFP_KERNEL);
+ if (!trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto err_trap_items_alloc;
+ }
+
+ trap_data->sw = sw;
+ trap_data->traps_count = traps_count;
+ sw->trap_data = trap_data;
+
+ err = devlink_trap_groups_register(devlink, prestera_trap_groups_arr,
+ groups_count);
+ if (err)
+ goto err_groups_register;
+
+ for (i = 0; i < traps_count; i++) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ err = devlink_traps_register(devlink, &prestera_trap->trap, 1,
+ sw);
+ if (err)
+ goto err_trap_register;
+ }
+
+ return 0;
+
+err_trap_register:
+ for (i--; i >= 0; i--) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
+ }
+err_groups_register:
+ kfree(trap_data->trap_items_arr);
+err_trap_items_alloc:
+ kfree(trap_data);
+ return err;
+}
+
+static struct prestera_trap_item *
+prestera_get_trap_item_by_cpu_code(struct prestera_switch *sw, u8 cpu_code)
+{
+ struct prestera_trap_data *trap_data = sw->trap_data;
+ struct prestera_trap *prestera_trap;
+ int i;
+
+ for (i = 0; i < trap_data->traps_count; i++) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ if (cpu_code == prestera_trap->cpu_code)
+ return &trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+void prestera_devlink_trap_report(struct prestera_port *port,
+ struct sk_buff *skb, u8 cpu_code)
+{
+ struct prestera_trap_item *trap_item;
+ struct devlink *devlink;
+
+ devlink = port->dl_port.devlink;
+
+ trap_item = prestera_get_trap_item_by_cpu_code(port->sw, cpu_code);
+ if (unlikely(!trap_item))
+ return;
+
+ devlink_trap_report(devlink, skb, trap_item->trap_ctx,
+ &port->dl_port, NULL);
+}
+
+static struct prestera_trap_item *
+prestera_devlink_trap_item_lookup(struct prestera_switch *sw, u16 trap_id)
+{
+ struct prestera_trap_data *trap_data = sw->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); i++) {
+ if (prestera_trap_items_arr[i].trap.id == trap_id)
+ return &trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static int prestera_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ struct prestera_switch *sw = devlink_priv(devlink);
+ struct prestera_trap_item *trap_item;
+
+ trap_item = prestera_devlink_trap_item_lookup(sw, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ trap_item->trap_ctx = trap_ctx;
+ trap_item->action = trap->init_action;
+
+ return 0;
+}
+
+static int prestera_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ /* Currently, driver does not support trap action altering */
+ return -EOPNOTSUPP;
+}
+
+static int prestera_drop_counter_get(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops)
+{
+ struct prestera_switch *sw = devlink_priv(devlink);
+ enum prestera_hw_cpu_code_cnt_t cpu_code_type =
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP;
+ struct prestera_trap *prestera_trap =
+ container_of(trap, struct prestera_trap, trap);
+
+ return prestera_hw_cpu_code_counters_get(sw, prestera_trap->cpu_code,
+ cpu_code_type, p_drops);
+}
+
+static void prestera_devlink_traps_fini(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+ const struct devlink_trap *trap;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); ++i) {
+ trap = &prestera_trap_items_arr[i].trap;
+ devlink_traps_unregister(dl, trap, 1);
+ }
+
+ devlink_trap_groups_unregister(dl, prestera_trap_groups_arr,
+ ARRAY_SIZE(prestera_trap_groups_arr));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index 51bee9f75415..5d73aa9db897 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -20,4 +20,7 @@ void prestera_devlink_port_clear(struct prestera_port *port);
struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
+void prestera_devlink_trap_report(struct prestera_port *port,
+ struct sk_buff *skb, u8 cpu_code);
+
#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
index a5e01c7a307b..b7e89c0ca5c0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
@@ -19,6 +19,7 @@
#define PRESTERA_DSA_W1_EXT_BIT BIT(31)
#define PRESTERA_DSA_W1_CFI_BIT BIT(30)
#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10)
+#define PRESTERA_DSA_W1_MASK_CPU_CODE GENMASK(7, 0)
#define PRESTERA_DSA_W2_EXT_BIT BIT(31)
#define PRESTERA_DSA_W2_PORT_NUM BIT(20)
@@ -74,6 +75,8 @@ int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf)
(FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) |
(FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7);
+ dsa->cpu_code = FIELD_GET(PRESTERA_DSA_W1_MASK_CPU_CODE, words[1]);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
index 67018629bdd2..c99342f475cf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
@@ -27,6 +27,7 @@ struct prestera_dsa {
struct prestera_dsa_vlan vlan;
u32 hw_dev_num;
u32 port_num;
+ u8 cpu_code;
};
int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
new file mode 100644
index 000000000000..c9891e968259
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_span.h"
+#include "prestera_flower.h"
+
+static LIST_HEAD(prestera_block_cb_list);
+
+static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return prestera_span_replace(block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ prestera_span_destroy(block);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ if (f->common.chain_index != 0)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return prestera_flower_replace(block, f);
+ case FLOW_CLS_DESTROY:
+ prestera_flower_destroy(block, f);
+ return 0;
+ case FLOW_CLS_STATS:
+ return prestera_flower_stats(block, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_flow_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return prestera_flow_block_flower_cb(block, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return prestera_flow_block_mall_cb(block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void prestera_flow_block_release(void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ prestera_acl_block_destroy(block);
+}
+
+static struct prestera_flow_block *
+prestera_flow_block_get(struct prestera_switch *sw,
+ struct flow_block_offload *f,
+ bool *register_block)
+{
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ prestera_flow_block_cb, sw);
+ if (!block_cb) {
+ block = prestera_acl_block_create(sw, f->net);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ block_cb = flow_block_cb_alloc(prestera_flow_block_cb,
+ sw, block,
+ prestera_flow_block_release);
+ if (IS_ERR(block_cb)) {
+ prestera_acl_block_destroy(block);
+ return ERR_CAST(block_cb);
+ }
+
+ block->block_cb = block_cb;
+ *register_block = true;
+ } else {
+ block = flow_block_cb_priv(block_cb);
+ *register_block = false;
+ }
+
+ flow_block_cb_incref(block_cb);
+
+ return block;
+}
+
+static void prestera_flow_block_put(struct prestera_flow_block *block)
+{
+ struct flow_block_cb *block_cb = block->block_cb;
+
+ if (flow_block_cb_decref(block_cb))
+ return;
+
+ flow_block_cb_free(block_cb);
+ prestera_acl_block_destroy(block);
+}
+
+static int prestera_setup_flow_block_bind(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+ bool register_block;
+ int err;
+
+ block = prestera_flow_block_get(sw, f, &register_block);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+
+ block_cb = block->block_cb;
+
+ err = prestera_acl_block_bind(block, port);
+ if (err)
+ goto err_block_bind;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &prestera_block_cb_list);
+ }
+
+ port->flow_block = block;
+ return 0;
+
+err_block_bind:
+ prestera_flow_block_put(block);
+
+ return err;
+}
+
+static void prestera_setup_flow_block_unbind(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw);
+ if (!block_cb)
+ return;
+
+ block = flow_block_cb_priv(block_cb);
+
+ prestera_span_destroy(block);
+
+ err = prestera_acl_block_unbind(block, port);
+ if (err)
+ goto error;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+error:
+ port->flow_block = NULL;
+}
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &prestera_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return prestera_setup_flow_block_bind(port, f);
+ case FLOW_BLOCK_UNBIND:
+ prestera_setup_flow_block_unbind(port, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
new file mode 100644
index 000000000000..467c7038cace
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_FLOW_H_
+#define _PRESTERA_FLOW_H_
+
+#include <net/flow_offload.h>
+
+struct prestera_port;
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f);
+
+#endif /* _PRESTERA_FLOW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
new file mode 100644
index 000000000000..e571ba09ec08
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include "prestera.h"
+#include "prestera_acl.h"
+#include "prestera_flower.h"
+
+static int prestera_flower_parse_actions(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_acl_rule_action_entry a_entry;
+ const struct flow_action_entry *act;
+ int err, i;
+
+ if (!flow_action_has_entries(flow_action))
+ return 0;
+
+ flow_action_for_each(i, act, flow_action) {
+ memset(&a_entry, 0, sizeof(a_entry));
+
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ break;
+ case FLOW_ACTION_DROP:
+ a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP;
+ break;
+ case FLOW_ACTION_TRAP:
+ a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ pr_err("Unsupported action\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = prestera_acl_rule_action_add(rule, &a_entry);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
+ struct flow_cls_offload *f,
+ struct prestera_flow_block *block)
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+ struct prestera_acl_rule_match_entry m_entry = {0};
+ struct net_device *ingress_dev;
+ struct flow_match_meta match;
+ struct prestera_port *port;
+
+ flow_rule_match_meta(f_rule, &match);
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported ingress ifindex mask");
+ return -EINVAL;
+ }
+
+ ingress_dev = __dev_get_by_index(prestera_acl_block_net(block),
+ match.key->ingress_ifindex);
+ if (!ingress_dev) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Can't find specified ingress port to match on");
+ return -EINVAL;
+ }
+
+ if (!prestera_netdev_check(ingress_dev)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Can't match on switchdev ingress port");
+ return -EINVAL;
+ }
+ port = netdev_priv(ingress_dev);
+
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT;
+ m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32);
+ m_entry.keymask.u64.mask = ~(u64)0;
+
+ return prestera_acl_rule_match_add(rule, &m_entry);
+}
+
+static int prestera_flower_parse(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = f_rule->match.dissector;
+ struct prestera_acl_rule_match_entry m_entry;
+ u16 n_proto_mask = 0;
+ u16 n_proto_key = 0;
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+ int err;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_META) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
+ return -EOPNOTSUPP;
+ }
+
+ prestera_acl_rule_priority_set(rule, f->common.prio);
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
+ err = prestera_flower_parse_meta(rule, f, block);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(f_rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(f_rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+
+ /* add eth type key,mask */
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE;
+ m_entry.keymask.u16.key = n_proto_key;
+ m_entry.keymask.u16.mask = n_proto_mask;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ /* add ip proto key,mask */
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO;
+ m_entry.keymask.u8.key = match.key->ip_proto;
+ m_entry.keymask.u8.mask = match.mask->ip_proto;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(f_rule, &match);
+
+ /* add ethernet dst key,mask */
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC;
+ memcpy(&m_entry.keymask.mac.key,
+ &match.key->dst, sizeof(match.key->dst));
+ memcpy(&m_entry.keymask.mac.mask,
+ &match.mask->dst, sizeof(match.mask->dst));
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ /* add ethernet src key,mask */
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC;
+ memcpy(&m_entry.keymask.mac.key,
+ &match.key->src, sizeof(match.key->src));
+ memcpy(&m_entry.keymask.mac.mask,
+ &match.mask->src, sizeof(match.mask->src));
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(f_rule, &match);
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC;
+ memcpy(&m_entry.keymask.u32.key,
+ &match.key->src, sizeof(match.key->src));
+ memcpy(&m_entry.keymask.u32.mask,
+ &match.mask->src, sizeof(match.mask->src));
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST;
+ memcpy(&m_entry.keymask.u32.key,
+ &match.key->dst, sizeof(match.key->dst));
+ memcpy(&m_entry.keymask.u32.mask,
+ &match.mask->dst, sizeof(match.mask->dst));
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD
+ (f->common.extack,
+ "Only UDP and TCP keys are supported");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ports(f_rule, &match);
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC;
+ m_entry.keymask.u16.key = ntohs(match.key->src);
+ m_entry.keymask.u16.mask = ntohs(match.mask->src);
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST;
+ m_entry.keymask.u16.key = ntohs(match.key->dst);
+ m_entry.keymask.u16.mask = ntohs(match.mask->dst);
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(f_rule, &match);
+
+ if (match.mask->vlan_id != 0) {
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID;
+ m_entry.keymask.u16.key = match.key->vlan_id;
+ m_entry.keymask.u16.mask = match.mask->vlan_id;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID;
+ m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid);
+ m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid);
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+
+ flow_rule_match_icmp(f_rule, &match);
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE;
+ m_entry.keymask.u8.key = match.key->type;
+ m_entry.keymask.u8.mask = match.mask->type;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE;
+ m_entry.keymask.u8.key = match.key->code;
+ m_entry.keymask.u8.mask = match.mask->code;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ return prestera_flower_parse_actions(block, rule,
+ &f->rule->action,
+ f->common.extack);
+}
+
+int prestera_flower_replace(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_rule *rule;
+ int err;
+
+ rule = prestera_acl_rule_create(block, f->cookie);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+
+ err = prestera_flower_parse(block, rule, f);
+ if (err)
+ goto err_flower_parse;
+
+ err = prestera_acl_rule_add(sw, rule);
+ if (err)
+ goto err_rule_add;
+
+ return 0;
+
+err_rule_add:
+err_flower_parse:
+ prestera_acl_rule_destroy(rule);
+ return err;
+}
+
+void prestera_flower_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_acl_rule *rule;
+ struct prestera_switch *sw;
+
+ rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
+ f->cookie);
+ if (rule) {
+ sw = prestera_acl_block_sw(block);
+ prestera_acl_rule_del(sw, rule);
+ prestera_acl_rule_destroy(rule);
+ }
+}
+
+int prestera_flower_stats(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_rule *rule;
+ u64 packets;
+ u64 lastuse;
+ u64 bytes;
+ int err;
+
+ rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
+ f->cookie);
+ if (!rule)
+ return -EINVAL;
+
+ err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse);
+ if (err)
+ return err;
+
+ flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
new file mode 100644
index 000000000000..91e045eec58b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_FLOWER_H_
+#define _PRESTERA_FLOWER_H_
+
+#include <net/pkt_cls.h>
+
+struct prestera_flow_block;
+
+int prestera_flower_replace(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+int prestera_flower_stats(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+
+#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 0424718d5998..c1297859e471 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -2,11 +2,13 @@
/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/ethtool.h>
#include <linux/list.h>
#include "prestera.h"
#include "prestera_hw.h"
+#include "prestera_acl.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
@@ -36,11 +38,31 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
+ PRESTERA_CMD_TYPE_ACL_RULE_ADD = 0x500,
+ PRESTERA_CMD_TYPE_ACL_RULE_DELETE = 0x501,
+ PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET = 0x510,
+ PRESTERA_CMD_TYPE_ACL_RULESET_CREATE = 0x520,
+ PRESTERA_CMD_TYPE_ACL_RULESET_DELETE = 0x521,
+ PRESTERA_CMD_TYPE_ACL_PORT_BIND = 0x530,
+ PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531,
+
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE = 0x902,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE = 0x903,
+
PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
+ PRESTERA_CMD_TYPE_SPAN_GET = 0x1100,
+ PRESTERA_CMD_TYPE_SPAN_BIND = 0x1101,
+ PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102,
+ PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
+
+ PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000,
+
PRESTERA_CMD_TYPE_ACK = 0x10000,
PRESTERA_CMD_TYPE_MAX
};
@@ -86,6 +108,11 @@ enum {
};
enum {
+ PRESTERA_PORT_FLOOD_TYPE_UC = 0,
+ PRESTERA_PORT_FLOOD_TYPE_MC = 1,
+};
+
+enum {
PRESTERA_PORT_GOOD_OCTETS_RCV_CNT,
PRESTERA_PORT_BAD_OCTETS_RCV_CNT,
PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT,
@@ -127,6 +154,12 @@ enum {
PRESTERA_FC_SYMM_ASYMM,
};
+enum {
+ PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0,
+ PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1,
+ PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2,
+};
+
struct prestera_fw_event_handler {
struct list_head list;
struct rcu_head rcu;
@@ -168,6 +201,8 @@ struct prestera_msg_switch_init_resp {
u32 port_count;
u32 mtu_max;
u8 switch_id;
+ u8 lag_max;
+ u8 lag_member_max;
};
struct prestera_msg_port_autoneg_param {
@@ -188,6 +223,11 @@ struct prestera_msg_port_mdix_param {
u8 admin_mode;
};
+struct prestera_msg_port_flood_param {
+ u8 type;
+ u8 enable;
+};
+
union prestera_msg_port_param {
u8 admin_state;
u8 oper_state;
@@ -205,6 +245,7 @@ union prestera_msg_port_param {
struct prestera_msg_port_mdix_param mdix;
struct prestera_msg_port_autoneg_param autoneg;
struct prestera_msg_port_cap_param cap;
+ struct prestera_msg_port_flood_param flood_ext;
};
struct prestera_msg_port_attr_req {
@@ -249,8 +290,13 @@ struct prestera_msg_vlan_req {
struct prestera_msg_fdb_req {
struct prestera_msg_cmd cmd;
u8 dest_type;
- u32 port;
- u32 dev;
+ union {
+ struct {
+ u32 port;
+ u32 dev;
+ };
+ u16 lag_id;
+ } dest;
u8 mac[ETH_ALEN];
u16 vid;
u8 dynamic;
@@ -269,6 +315,85 @@ struct prestera_msg_bridge_resp {
u16 bridge;
};
+struct prestera_msg_acl_action {
+ u32 id;
+};
+
+struct prestera_msg_acl_match {
+ u32 type;
+ union {
+ struct {
+ u8 key;
+ u8 mask;
+ } u8;
+ struct {
+ u16 key;
+ u16 mask;
+ } u16;
+ struct {
+ u32 key;
+ u32 mask;
+ } u32;
+ struct {
+ u64 key;
+ u64 mask;
+ } u64;
+ struct {
+ u8 key[ETH_ALEN];
+ u8 mask[ETH_ALEN];
+ } mac;
+ } __packed keymask;
+};
+
+struct prestera_msg_acl_rule_req {
+ struct prestera_msg_cmd cmd;
+ u32 id;
+ u32 priority;
+ u16 ruleset_id;
+ u8 n_actions;
+ u8 n_matches;
+};
+
+struct prestera_msg_acl_rule_resp {
+ struct prestera_msg_ret ret;
+ u32 id;
+};
+
+struct prestera_msg_acl_rule_stats_resp {
+ struct prestera_msg_ret ret;
+ u64 packets;
+ u64 bytes;
+};
+
+struct prestera_msg_acl_ruleset_bind_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 ruleset_id;
+};
+
+struct prestera_msg_acl_ruleset_req {
+ struct prestera_msg_cmd cmd;
+ u16 id;
+};
+
+struct prestera_msg_acl_ruleset_resp {
+ struct prestera_msg_ret ret;
+ u16 id;
+};
+
+struct prestera_msg_span_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u8 id;
+} __packed __aligned(4);
+
+struct prestera_msg_span_resp {
+ struct prestera_msg_ret ret;
+ u8 id;
+} __packed __aligned(4);
+
struct prestera_msg_stp_req {
struct prestera_msg_cmd cmd;
u32 port;
@@ -293,6 +418,24 @@ struct prestera_msg_rxtx_port_req {
u32 dev;
};
+struct prestera_msg_lag_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 lag_id;
+};
+
+struct prestera_msg_cpu_code_counter_req {
+ struct prestera_msg_cmd cmd;
+ u8 counter_type;
+ u8 code;
+};
+
+struct mvsw_msg_cpu_code_counter_ret {
+ struct prestera_msg_ret ret;
+ u64 packet_count;
+};
+
struct prestera_msg_event {
u16 type;
u16 id;
@@ -315,7 +458,10 @@ union prestera_msg_event_fdb_param {
struct prestera_msg_event_fdb {
struct prestera_msg_event id;
u8 dest_type;
- u32 port_id;
+ union {
+ u32 port_id;
+ u16 lag_id;
+ } dest;
u32 vid;
union prestera_msg_event_fdb_param param;
};
@@ -386,7 +532,19 @@ static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
{
struct prestera_msg_event_fdb *hw_evt = msg;
- evt->fdb_evt.port_id = hw_evt->port_id;
+ switch (hw_evt->dest_type) {
+ case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT:
+ evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT;
+ evt->fdb_evt.dest.port_id = hw_evt->dest.port_id;
+ break;
+ case PRESTERA_HW_FDB_ENTRY_TYPE_LAG:
+ evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG;
+ evt->fdb_evt.dest.lag_id = hw_evt->dest.lag_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
evt->fdb_evt.vid = hw_evt->vid;
ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
@@ -531,6 +689,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
sw->mtu_min = PRESTERA_MIN_MTU;
sw->mtu_max = resp.mtu_max;
sw->id = resp.switch_id;
+ sw->lag_member_max = resp.lag_member_max;
+ sw->lag_max = resp.lag_max;
return 0;
}
@@ -696,6 +856,274 @@ int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
return 0;
}
+int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
+{
+ struct prestera_msg_acl_ruleset_resp resp;
+ struct prestera_msg_acl_ruleset_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULESET_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *ruleset_id = resp.id;
+
+ return 0;
+}
+
+int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id)
+{
+ struct prestera_msg_acl_ruleset_req req = {
+ .id = ruleset_id,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action,
+ struct prestera_acl_rule *rule)
+{
+ struct list_head *a_list = prestera_acl_rule_action_list_get(rule);
+ struct prestera_acl_rule_action_entry *a_entry;
+ int i = 0;
+
+ list_for_each_entry(a_entry, a_list, list) {
+ action[i].id = a_entry->id;
+
+ switch (a_entry->id) {
+ case PRESTERA_ACL_RULE_ACTION_ACCEPT:
+ case PRESTERA_ACL_RULE_ACTION_DROP:
+ case PRESTERA_ACL_RULE_ACTION_TRAP:
+ /* just rule action id, no specific data */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
+ struct prestera_acl_rule *rule)
+{
+ struct list_head *m_list = prestera_acl_rule_match_list_get(rule);
+ struct prestera_acl_rule_match_entry *m_entry;
+ int i = 0;
+
+ list_for_each_entry(m_entry, m_list, list) {
+ match[i].type = m_entry->type;
+
+ switch (m_entry->type) {
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID:
+ match[i].keymask.u16.key = m_entry->keymask.u16.key;
+ match[i].keymask.u16.mask = m_entry->keymask.u16.mask;
+ break;
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO:
+ match[i].keymask.u8.key = m_entry->keymask.u8.key;
+ match[i].keymask.u8.mask = m_entry->keymask.u8.mask;
+ break;
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC:
+ memcpy(match[i].keymask.mac.key,
+ m_entry->keymask.mac.key,
+ sizeof(match[i].keymask.mac.key));
+ memcpy(match[i].keymask.mac.mask,
+ m_entry->keymask.mac.mask,
+ sizeof(match[i].keymask.mac.mask));
+ break;
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST:
+ match[i].keymask.u32.key = m_entry->keymask.u32.key;
+ match[i].keymask.u32.mask = m_entry->keymask.u32.mask;
+ break;
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT:
+ match[i].keymask.u64.key = m_entry->keymask.u64.key;
+ match[i].keymask.u64.mask = m_entry->keymask.u64.mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+int prestera_hw_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule,
+ u32 *rule_id)
+{
+ struct prestera_msg_acl_action *actions;
+ struct prestera_msg_acl_match *matches;
+ struct prestera_msg_acl_rule_resp resp;
+ struct prestera_msg_acl_rule_req *req;
+ u8 n_actions;
+ u8 n_matches;
+ void *buff;
+ u32 size;
+ int err;
+
+ n_actions = prestera_acl_rule_action_len(rule);
+ n_matches = prestera_acl_rule_match_len(rule);
+
+ size = sizeof(*req) + sizeof(*actions) * n_actions +
+ sizeof(*matches) * n_matches;
+
+ buff = kzalloc(size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ req = buff;
+ actions = buff + sizeof(*req);
+ matches = buff + sizeof(*req) + sizeof(*actions) * n_actions;
+
+ /* put acl actions into the message */
+ err = prestera_hw_acl_actions_put(actions, rule);
+ if (err)
+ goto free_buff;
+
+ /* put acl matches into the message */
+ err = prestera_hw_acl_matches_put(matches, rule);
+ if (err)
+ goto free_buff;
+
+ req->ruleset_id = prestera_acl_rule_ruleset_id_get(rule);
+ req->priority = prestera_acl_rule_priority_get(rule);
+ req->n_actions = prestera_acl_rule_action_len(rule);
+ req->n_matches = prestera_acl_rule_match_len(rule);
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_ADD,
+ &req->cmd, size, &resp.ret, sizeof(resp));
+ if (err)
+ goto free_buff;
+
+ *rule_id = resp.id;
+free_buff:
+ kfree(buff);
+ return err;
+}
+
+int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id)
+{
+ struct prestera_msg_acl_rule_req req = {
+ .id = rule_id
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
+ u64 *packets, u64 *bytes)
+{
+ struct prestera_msg_acl_rule_stats_resp resp;
+ struct prestera_msg_acl_rule_req req = {
+ .id = rule_id
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *packets = resp.packets;
+ *bytes = resp.bytes;
+
+ return 0;
+}
+
+int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id)
+{
+ struct prestera_msg_acl_ruleset_bind_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .ruleset_id = ruleset_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_acl_port_unbind(const struct prestera_port *port,
+ u16 ruleset_id)
+{
+ struct prestera_msg_acl_ruleset_bind_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .ruleset_id = ruleset_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
+{
+ struct prestera_msg_span_resp resp;
+ struct prestera_msg_span_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_SPAN_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *span_id = resp.id;
+
+ return 0;
+}
+
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
+{
+ struct prestera_msg_span_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .id = span_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_BIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_span_unbind(const struct prestera_port *port)
+{
+ struct prestera_msg_span_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
+{
+ struct prestera_msg_span_req req = {
+ .id = span_id
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SPAN_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
{
struct prestera_msg_port_attr_req req = {
@@ -988,7 +1416,43 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
&req.cmd, sizeof(req));
}
-int prestera_hw_port_flood_set(struct prestera_port *port, bool flood)
+static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .flood_ext = {
+ .type = PRESTERA_PORT_FLOOD_TYPE_UC,
+ .enable = flood,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .flood_ext = {
+ .type = PRESTERA_PORT_FLOOD_TYPE_MC,
+ .enable = flood,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
@@ -1003,6 +1467,41 @@ int prestera_hw_port_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
+int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
+ unsigned long val)
+{
+ int err;
+
+ if (port->sw->dev->fw_rev.maj <= 2) {
+ if (!(mask & BR_FLOOD))
+ return 0;
+
+ return prestera_hw_port_flood_set_v2(port, val & BR_FLOOD);
+ }
+
+ if (mask & BR_FLOOD) {
+ err = prestera_hw_port_uc_flood_set(port, val & BR_FLOOD);
+ if (err)
+ goto err_uc_flood;
+ }
+
+ if (mask & BR_MCAST_FLOOD) {
+ err = prestera_hw_port_mc_flood_set(port, val & BR_MCAST_FLOOD);
+ if (err)
+ goto err_mc_flood;
+ }
+
+ return 0;
+
+err_mc_flood:
+ prestera_hw_port_mc_flood_set(port, 0);
+err_uc_flood:
+ if (mask & BR_FLOOD)
+ prestera_hw_port_uc_flood_set(port, 0);
+
+ return err;
+}
+
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
@@ -1067,8 +1566,10 @@ int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
u16 vid, bool dynamic)
{
struct prestera_msg_fdb_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .dest = {
+ .dev = port->dev_id,
+ .port = port->hw_id,
+ },
.vid = vid,
.dynamic = dynamic,
};
@@ -1083,8 +1584,10 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
u16 vid)
{
struct prestera_msg_fdb_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .dest = {
+ .dev = port->dev_id,
+ .port = port->hw_id,
+ },
.vid = vid,
};
@@ -1094,11 +1597,48 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
&req.cmd, sizeof(req));
}
+int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid, bool dynamic)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = lag_id,
+ },
+ .vid = vid,
+ .dynamic = dynamic,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = lag_id,
+ },
+ .vid = vid,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_DELETE,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
{
struct prestera_msg_fdb_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .dest = {
+ .dev = port->dev_id,
+ .port = port->hw_id,
+ },
.flush_mode = mode,
};
@@ -1121,8 +1661,10 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
u32 mode)
{
struct prestera_msg_fdb_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .dest = {
+ .dev = port->dev_id,
+ .port = port->hw_id,
+ },
.vid = vid,
.flush_mode = mode,
};
@@ -1131,6 +1673,37 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
&req.cmd, sizeof(req));
}
+int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
+ u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = lag_id,
+ },
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
+ u16 lag_id, u16 vid, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = lag_id,
+ },
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
{
struct prestera_msg_bridge_resp resp;
@@ -1212,6 +1785,68 @@ int prestera_hw_rxtx_port_init(struct prestera_port *port)
&req.cmd, sizeof(req));
}
+int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id)
+{
+ struct prestera_msg_lag_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .lag_id = lag_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id)
+{
+ struct prestera_msg_lag_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .lag_id = lag_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
+ bool enable)
+{
+ struct prestera_msg_lag_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .lag_id = lag_id,
+ };
+ u32 cmd;
+
+ cmd = enable ? PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE :
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE;
+
+ return prestera_cmd(port->sw, cmd, &req.cmd, sizeof(req));
+}
+
+int
+prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
+ enum prestera_hw_cpu_code_cnt_t counter_type,
+ u64 *packet_count)
+{
+ struct prestera_msg_cpu_code_counter_req req = {
+ .counter_type = counter_type,
+ .code = code,
+ };
+ struct mvsw_msg_cpu_code_counter_ret resp;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *packet_count = resp.packet_count;
+
+ return 0;
+}
+
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
prestera_event_cb_t fn,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index b2b5ac95b4e3..546d5fd8240d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -89,12 +89,18 @@ enum {
PRESTERA_STP_FORWARD,
};
+enum prestera_hw_cpu_code_cnt_t {
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0,
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1,
+};
+
struct prestera_switch;
struct prestera_port;
struct prestera_port_stats;
struct prestera_port_caps;
enum prestera_event_type;
struct prestera_event;
+struct prestera_acl_rule;
typedef void (*prestera_event_cb_t)
(struct prestera_switch *sw, struct prestera_event *evt, void *arg);
@@ -138,7 +144,8 @@ int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode);
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
-int prestera_hw_port_flood_set(struct prestera_port *port, bool flood);
+int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
+ unsigned long val);
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type);
/* Vlan API */
@@ -165,6 +172,28 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
+/* ACL API */
+int prestera_hw_acl_ruleset_create(struct prestera_switch *sw,
+ u16 *ruleset_id);
+int prestera_hw_acl_ruleset_del(struct prestera_switch *sw,
+ u16 ruleset_id);
+int prestera_hw_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule,
+ u32 *rule_id);
+int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id);
+int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw,
+ u32 rule_id, u64 *packets, u64 *bytes);
+int prestera_hw_acl_port_bind(const struct prestera_port *port,
+ u16 ruleset_id);
+int prestera_hw_acl_port_unbind(const struct prestera_port *port,
+ u16 ruleset_id);
+
+/* SPAN API */
+int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id);
+int prestera_hw_span_unbind(const struct prestera_port *port);
+int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
@@ -179,4 +208,24 @@ int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params);
int prestera_hw_rxtx_port_init(struct prestera_port *port);
+/* LAG API */
+int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id);
+int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id);
+int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
+ bool enable);
+int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid, bool dynamic);
+int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid);
+int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
+ u32 mode);
+int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
+ u16 lag_id, u16 vid, u32 mode);
+
+/* HW trap/drop counters API */
+int
+prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
+ enum prestera_hw_cpu_code_cnt_t counter_type,
+ u64 *packet_count);
+
#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 2768c78528a5..226f4ff29f6e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -8,9 +8,13 @@
#include <linux/netdev_features.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/if_vlan.h>
#include "prestera.h"
#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_span.h"
#include "prestera_rxtx.h"
#include "prestera_devlink.h"
#include "prestera_ethtool.h"
@@ -199,10 +203,25 @@ static void prestera_port_stats_update(struct work_struct *work)
msecs_to_jiffies(PRESTERA_STATS_DELAY_MS));
}
+static int prestera_port_setup_tc(struct net_device *dev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return prestera_flow_block_setup(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops prestera_netdev_ops = {
.ndo_open = prestera_port_open,
.ndo_stop = prestera_port_close,
.ndo_start_xmit = prestera_port_xmit,
+ .ndo_setup_tc = prestera_port_setup_tc,
.ndo_change_mtu = prestera_port_change_mtu,
.ndo_get_stats64 = prestera_port_get_stats64,
.ndo_set_mac_address = prestera_port_set_mac_address,
@@ -281,6 +300,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
INIT_LIST_HEAD(&port->vlans_list);
port->pvid = PRESTERA_DEFAULT_VID;
+ port->lag = NULL;
port->dev = dev;
port->id = id;
port->sw = sw;
@@ -296,7 +316,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
if (err)
goto err_dl_port_register;
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
@@ -472,6 +492,149 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
+struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id)
+{
+ return id < sw->lag_max ? &sw->lags[id] : NULL;
+}
+
+static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw,
+ struct net_device *dev)
+{
+ struct prestera_lag *lag;
+ u16 id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = &sw->lags[id];
+ if (lag->dev == dev)
+ return lag;
+ }
+
+ return NULL;
+}
+
+static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw,
+ struct net_device *lag_dev)
+{
+ struct prestera_lag *lag = NULL;
+ u16 id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = &sw->lags[id];
+ if (!lag->dev)
+ break;
+ }
+ if (lag) {
+ INIT_LIST_HEAD(&lag->members);
+ lag->dev = lag_dev;
+ }
+
+ return lag;
+}
+
+static void prestera_lag_destroy(struct prestera_switch *sw,
+ struct prestera_lag *lag)
+{
+ WARN_ON(!list_empty(&lag->members));
+ lag->member_count = 0;
+ lag->dev = NULL;
+}
+
+static int prestera_lag_port_add(struct prestera_port *port,
+ struct net_device *lag_dev)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_lag *lag;
+ int err;
+
+ lag = prestera_lag_by_dev(sw, lag_dev);
+ if (!lag) {
+ lag = prestera_lag_create(sw, lag_dev);
+ if (!lag)
+ return -ENOSPC;
+ }
+
+ if (lag->member_count >= sw->lag_member_max)
+ return -ENOSPC;
+
+ err = prestera_hw_lag_member_add(port, lag->lag_id);
+ if (err) {
+ if (!lag->member_count)
+ prestera_lag_destroy(sw, lag);
+ return err;
+ }
+
+ list_add(&port->lag_member, &lag->members);
+ lag->member_count++;
+ port->lag = lag;
+
+ return 0;
+}
+
+static int prestera_lag_port_del(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_lag *lag = port->lag;
+ int err;
+
+ if (!lag || !lag->member_count)
+ return -EINVAL;
+
+ err = prestera_hw_lag_member_del(port, lag->lag_id);
+ if (err)
+ return err;
+
+ list_del(&port->lag_member);
+ lag->member_count--;
+ port->lag = NULL;
+
+ if (netif_is_bridge_port(lag->dev)) {
+ struct net_device *br_dev;
+
+ br_dev = netdev_master_upper_dev_get(lag->dev);
+
+ prestera_bridge_port_leave(br_dev, port);
+ }
+
+ if (!lag->member_count)
+ prestera_lag_destroy(sw, lag);
+
+ return 0;
+}
+
+bool prestera_port_is_lag_member(const struct prestera_port *port)
+{
+ return !!port->lag;
+}
+
+u16 prestera_port_lag_id(const struct prestera_port *port)
+{
+ return port->lag->lag_id;
+}
+
+static int prestera_lag_init(struct prestera_switch *sw)
+{
+ u16 id;
+
+ sw->lags = kcalloc(sw->lag_max, sizeof(*sw->lags), GFP_KERNEL);
+ if (!sw->lags)
+ return -ENOMEM;
+
+ for (id = 0; id < sw->lag_max; id++)
+ sw->lags[id].lag_id = id;
+
+ return 0;
+}
+
+static void prestera_lag_fini(struct prestera_switch *sw)
+{
+ u8 idx;
+
+ for (idx = 0; idx < sw->lag_max; idx++)
+ WARN_ON(sw->lags[idx].member_count);
+
+ kfree(sw->lags);
+}
+
bool prestera_netdev_check(const struct net_device *dev)
{
return dev->netdev_ops == &prestera_netdev_ops;
@@ -505,16 +668,119 @@ struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev)
return port;
}
-static int prestera_netdev_port_event(struct net_device *dev,
+static int prestera_netdev_port_lower_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changelowerstate_info *info = ptr;
+ struct netdev_lag_lower_state_info *lower_state_info;
+ struct prestera_port *port = netdev_priv(dev);
+ bool enabled;
+
+ if (!netif_is_lag_port(dev))
+ return 0;
+ if (!prestera_port_is_lag_member(port))
+ return 0;
+
+ lower_state_info = info->lower_state_info;
+ enabled = lower_state_info->link_up && lower_state_info->tx_enabled;
+
+ return prestera_hw_lag_member_enable(port, port->lag->lag_id, enabled);
+}
+
+static bool prestera_lag_master_check(struct net_device *lag_dev,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *ext_ack)
+{
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(ext_ack, "Unsupported LAG Tx type");
+ return false;
+ }
+
+ return true;
+}
+
+static int prestera_netdev_port_event(struct net_device *lower,
+ struct net_device *dev,
unsigned long event, void *ptr)
{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct prestera_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ upper = info->upper_dev;
+
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ if (!netif_is_bridge_master(upper) &&
+ !netif_is_lag_master(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+
+ if (!info->linking)
+ break;
+
+ if (netdev_has_any_upper_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
+ return -EINVAL;
+ }
+
+ if (netif_is_lag_master(upper) &&
+ !prestera_lag_master_check(upper, info->upper_info, extack))
+ return -EOPNOTSUPP;
+ if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Master device is a LAG master and port has a VLAN");
+ return -EINVAL;
+ }
+ if (netif_is_lag_port(dev) && is_vlan_dev(upper) &&
+ !netif_is_lag_master(vlan_dev_real_dev(upper))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can not put a VLAN on a LAG port");
+ return -EINVAL;
+ }
+ break;
+
case NETDEV_CHANGEUPPER:
- return prestera_bridge_port_event(dev, event, ptr);
- default:
- return 0;
+ if (netif_is_bridge_master(upper)) {
+ if (info->linking)
+ return prestera_bridge_port_join(upper, port);
+ else
+ prestera_bridge_port_leave(upper, port);
+ } else if (netif_is_lag_master(upper)) {
+ if (info->linking)
+ return prestera_lag_port_add(port, upper);
+ else
+ prestera_lag_port_del(port);
+ }
+ break;
+
+ case NETDEV_CHANGELOWERSTATE:
+ return prestera_netdev_port_lower_event(dev, event, ptr);
}
+
+ return 0;
+}
+
+static int prestera_netdevice_lag_event(struct net_device *lag_dev,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
+ if (prestera_netdev_check(dev)) {
+ err = prestera_netdev_port_event(lag_dev, dev, event,
+ ptr);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
static int prestera_netdev_event_handler(struct notifier_block *nb,
@@ -524,7 +790,9 @@ static int prestera_netdev_event_handler(struct notifier_block *nb,
int err = 0;
if (prestera_netdev_check(dev))
- err = prestera_netdev_port_event(dev, event, ptr);
+ err = prestera_netdev_port_event(dev, dev, event, ptr);
+ else if (netif_is_lag_master(dev))
+ err = prestera_netdevice_lag_event(dev, event, ptr);
return notifier_from_errno(err);
}
@@ -574,10 +842,22 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_handlers_register;
+ err = prestera_acl_init(sw);
+ if (err)
+ goto err_acl_init;
+
+ err = prestera_span_init(sw);
+ if (err)
+ goto err_span_init;
+
err = prestera_devlink_register(sw);
if (err)
goto err_dl_register;
+ err = prestera_lag_init(sw);
+ if (err)
+ goto err_lag_init;
+
err = prestera_create_ports(sw);
if (err)
goto err_ports_create;
@@ -585,8 +865,14 @@ static int prestera_switch_init(struct prestera_switch *sw)
return 0;
err_ports_create:
+ prestera_lag_fini(sw);
+err_lag_init:
prestera_devlink_unregister(sw);
err_dl_register:
+ prestera_span_fini(sw);
+err_span_init:
+ prestera_acl_fini(sw);
+err_acl_init:
prestera_event_handlers_unregister(sw);
err_handlers_register:
prestera_rxtx_switch_fini(sw);
@@ -602,7 +888,10 @@ err_swdev_register:
static void prestera_switch_fini(struct prestera_switch *sw)
{
prestera_destroy_ports(sw);
+ prestera_lag_fini(sw);
prestera_devlink_unregister(sw);
+ prestera_span_fini(sw);
+ prestera_acl_fini(sw);
prestera_event_handlers_unregister(sw);
prestera_rxtx_switch_fini(sw);
prestera_switchdev_fini(sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 298110119272..a250d394da38 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/device.h>
#include <linux/firmware.h>
@@ -13,9 +14,12 @@
#define PRESTERA_MSG_MAX_SIZE 1500
-#define PRESTERA_SUPP_FW_MAJ_VER 2
+#define PRESTERA_SUPP_FW_MAJ_VER 3
#define PRESTERA_SUPP_FW_MIN_VER 0
+#define PRESTERA_PREV_FW_MAJ_VER 2
+#define PRESTERA_PREV_FW_MIN_VER 0
+
#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
@@ -144,6 +148,11 @@ struct prestera_fw_regs {
/* PRESTERA_CMD_RCV_CTL_REG flags */
#define PRESTERA_CMD_F_REPL_SENT BIT(0)
+#define PRESTERA_FW_EVT_CTL_STATUS_MASK GENMASK(1, 0)
+
+#define PRESTERA_FW_EVT_CTL_STATUS_ON 0
+#define PRESTERA_FW_EVT_CTL_STATUS_OFF 1
+
#define PRESTERA_EVTQ_REG_OFFSET(q, f) \
(PRESTERA_FW_REG_OFFSET(evtq_list) + \
(q) * sizeof(struct prestera_fw_evtq_regs) + \
@@ -166,6 +175,8 @@ struct prestera_fw_evtq {
};
struct prestera_fw {
+ struct prestera_fw_rev rev_supp;
+ const struct firmware *bin;
struct workqueue_struct *wq;
struct prestera_device dev;
u8 __iomem *ldr_regs;
@@ -260,6 +271,15 @@ static u8 prestera_fw_evtq_pick(struct prestera_fw *fw)
return PRESTERA_EVT_QNUM_MAX;
}
+static void prestera_fw_evt_ctl_status_set(struct prestera_fw *fw, u32 val)
+{
+ u32 status = prestera_fw_read(fw, PRESTERA_FW_STATUS_REG);
+
+ u32p_replace_bits(&status, val, PRESTERA_FW_EVT_CTL_STATUS_MASK);
+
+ prestera_fw_write(fw, PRESTERA_FW_STATUS_REG, status);
+}
+
static void prestera_fw_evt_work_fn(struct work_struct *work)
{
struct prestera_fw *fw;
@@ -269,6 +289,8 @@ static void prestera_fw_evt_work_fn(struct work_struct *work)
fw = container_of(work, struct prestera_fw, evt_work);
msg = fw->evt_msg;
+ prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_OFF);
+
while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) {
u32 idx;
u32 len;
@@ -288,6 +310,8 @@ static void prestera_fw_evt_work_fn(struct work_struct *work)
if (fw->dev.recv_msg)
fw->dev.recv_msg(&fw->dev, msg, len);
}
+
+ prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_ON);
}
static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
@@ -576,25 +600,24 @@ static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr,
static int prestera_fw_rev_check(struct prestera_fw *fw)
{
struct prestera_fw_rev *rev = &fw->dev.fw_rev;
- u16 maj_supp = PRESTERA_SUPP_FW_MAJ_VER;
- u16 min_supp = PRESTERA_SUPP_FW_MIN_VER;
- if (rev->maj == maj_supp && rev->min >= min_supp)
+ if (rev->maj == fw->rev_supp.maj && rev->min >= fw->rev_supp.min)
return 0;
dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'",
- PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+ fw->rev_supp.maj, fw->rev_supp.min);
return -EINVAL;
}
-static int prestera_fw_hdr_parse(struct prestera_fw *fw,
- const struct firmware *img)
+static int prestera_fw_hdr_parse(struct prestera_fw *fw)
{
- struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data;
struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ struct prestera_fw_header *hdr;
u32 magic;
+ hdr = (struct prestera_fw_header *)fw->bin->data;
+
magic = be32_to_cpu(hdr->magic_number);
if (magic != PRESTERA_FW_HDR_MAGIC) {
dev_err(fw->dev.dev, "FW img hdr magic is invalid");
@@ -609,11 +632,52 @@ static int prestera_fw_hdr_parse(struct prestera_fw *fw,
return prestera_fw_rev_check(fw);
}
+static int prestera_fw_get(struct prestera_fw *fw)
+{
+ int ver_maj = PRESTERA_SUPP_FW_MAJ_VER;
+ int ver_min = PRESTERA_SUPP_FW_MIN_VER;
+ char fw_path[128];
+ int err;
+
+pick_fw_ver:
+ snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
+ ver_maj, ver_min);
+
+ err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
+ if (err) {
+ if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) {
+ ver_maj = PRESTERA_PREV_FW_MAJ_VER;
+ ver_min = PRESTERA_PREV_FW_MIN_VER;
+
+ dev_warn(fw->dev.dev,
+ "missing latest %s firmware, fall-back to previous %u.%u version\n",
+ fw_path, ver_maj, ver_min);
+
+ goto pick_fw_ver;
+ } else {
+ dev_err(fw->dev.dev, "failed to request previous firmware: %s\n",
+ fw_path);
+ return err;
+ }
+ }
+
+ dev_info(fw->dev.dev, "Loading %s ...", fw_path);
+
+ fw->rev_supp.maj = ver_maj;
+ fw->rev_supp.min = ver_min;
+ fw->rev_supp.sub = 0;
+
+ return 0;
+}
+
+static void prestera_fw_put(struct prestera_fw *fw)
+{
+ release_firmware(fw->bin);
+}
+
static int prestera_fw_load(struct prestera_fw *fw)
{
size_t hlen = sizeof(struct prestera_fw_header);
- const struct firmware *f;
- char fw_path[128];
int err;
err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG,
@@ -632,30 +696,24 @@ static int prestera_fw_load(struct prestera_fw *fw)
fw->ldr_wr_idx = 0;
- snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
- PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
-
- err = request_firmware_direct(&f, fw_path, fw->dev.dev);
- if (err) {
- dev_err(fw->dev.dev, "failed to request firmware file\n");
+ err = prestera_fw_get(fw);
+ if (err)
return err;
- }
- err = prestera_fw_hdr_parse(fw, f);
+ err = prestera_fw_hdr_parse(fw);
if (err) {
dev_err(fw->dev.dev, "FW image header is invalid\n");
goto out_release;
}
- prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen);
+ prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, fw->bin->size - hlen);
prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START);
- dev_info(fw->dev.dev, "Loading %s ...", fw_path);
-
- err = prestera_ldr_fw_send(fw, f->data + hlen, f->size - hlen);
+ err = prestera_ldr_fw_send(fw, fw->bin->data + hlen,
+ fw->bin->size - hlen);
out_release:
- release_firmware(f);
+ prestera_fw_put(fw);
return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index 2a13c318048c..73d2eba5262f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -14,6 +14,7 @@
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_rxtx.h"
+#include "prestera_devlink.h"
#define PRESTERA_SDMA_WAIT_MUL 10
@@ -214,9 +215,10 @@ static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
struct sk_buff *skb)
{
- const struct prestera_port *port;
+ struct prestera_port *port;
struct prestera_dsa dsa;
u32 hw_port, dev_id;
+ u8 cpu_code;
int err;
skb_pull(skb, ETH_HLEN);
@@ -259,6 +261,9 @@ static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
}
+ cpu_code = dsa.cpu_code;
+ prestera_devlink_trap_report(port, skb, cpu_code);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
new file mode 100644
index 000000000000..3cafca827bb7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_span.h"
+
+struct prestera_span_entry {
+ struct list_head list;
+ struct prestera_port *port;
+ refcount_t ref_count;
+ u8 id;
+};
+
+struct prestera_span {
+ struct prestera_switch *sw;
+ struct list_head entries;
+};
+
+static struct prestera_span_entry *
+prestera_span_entry_create(struct prestera_port *port, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&entry->ref_count, 1);
+ entry->port = port;
+ entry->id = span_id;
+ list_add_tail(&entry->list, &port->sw->span->entries);
+
+ return entry;
+}
+
+static void prestera_span_entry_del(struct prestera_span_entry *entry)
+{
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+static struct prestera_span_entry *
+prestera_span_entry_find_by_id(struct prestera_span *span, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+
+ list_for_each_entry(entry, &span->entries, list) {
+ if (entry->id == span_id)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct prestera_span_entry *
+prestera_span_entry_find_by_port(struct prestera_span *span,
+ struct prestera_port *port)
+{
+ struct prestera_span_entry *entry;
+
+ list_for_each_entry(entry, &span->entries, list) {
+ if (entry->port == port)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static int prestera_span_get(struct prestera_port *port, u8 *span_id)
+{
+ u8 new_span_id;
+ struct prestera_switch *sw = port->sw;
+ struct prestera_span_entry *entry;
+ int err;
+
+ entry = prestera_span_entry_find_by_port(sw->span, port);
+ if (entry) {
+ refcount_inc(&entry->ref_count);
+ *span_id = entry->id;
+ return 0;
+ }
+
+ err = prestera_hw_span_get(port, &new_span_id);
+ if (err)
+ return err;
+
+ entry = prestera_span_entry_create(port, new_span_id);
+ if (IS_ERR(entry)) {
+ prestera_hw_span_release(sw, new_span_id);
+ return PTR_ERR(entry);
+ }
+
+ *span_id = new_span_id;
+ return 0;
+}
+
+static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+ int err;
+
+ entry = prestera_span_entry_find_by_id(sw->span, span_id);
+ if (!entry)
+ return false;
+
+ if (!refcount_dec_and_test(&entry->ref_count))
+ return 0;
+
+ err = prestera_hw_span_release(sw, span_id);
+ if (err)
+ return err;
+
+ prestera_span_entry_del(entry);
+ return 0;
+}
+
+static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port)
+{
+ struct prestera_switch *sw = binding->port->sw;
+ u8 span_id;
+ int err;
+
+ if (binding->span_id != PRESTERA_SPAN_INVALID_ID)
+ /* port already in mirroring */
+ return -EEXIST;
+
+ err = prestera_span_get(to_port, &span_id);
+ if (err)
+ return err;
+
+ err = prestera_hw_span_bind(binding->port, span_id);
+ if (err) {
+ prestera_span_put(sw, span_id);
+ return err;
+ }
+
+ binding->span_id = span_id;
+ return 0;
+}
+
+static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
+{
+ int err;
+
+ err = prestera_hw_span_unbind(binding->port);
+ if (err)
+ return err;
+
+ err = prestera_span_put(binding->port->sw, binding->span_id);
+ if (err)
+ return err;
+
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ return 0;
+}
+
+int prestera_span_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct prestera_flow_block_binding *binding;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ struct prestera_port *port;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+
+ if (!prestera_netdev_check(act->dev)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only Marvell Prestera port is supported");
+ return -EINVAL;
+ }
+ if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
+ return -EOPNOTSUPP;
+ if (act->id != FLOW_ACTION_MIRRED)
+ return -EOPNOTSUPP;
+ if (protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ port = netdev_priv(act->dev);
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_span_rule_add(binding, port);
+ if (err)
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding,
+ &block->binding_list, list)
+ prestera_span_rule_del(binding);
+ return err;
+}
+
+void prestera_span_destroy(struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_span_rule_del(binding);
+}
+
+int prestera_span_init(struct prestera_switch *sw)
+{
+ struct prestera_span *span;
+
+ span = kzalloc(sizeof(*span), GFP_KERNEL);
+ if (!span)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&span->entries);
+
+ sw->span = span;
+ span->sw = sw;
+
+ return 0;
+}
+
+void prestera_span_fini(struct prestera_switch *sw)
+{
+ struct prestera_span *span = sw->span;
+
+ WARN_ON(!list_empty(&span->entries));
+ kfree(span);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h
new file mode 100644
index 000000000000..f0644521f78a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_SPAN_H_
+#define _PRESTERA_SPAN_H_
+
+#include <net/pkt_cls.h>
+
+#define PRESTERA_SPAN_INVALID_ID -1
+
+struct prestera_switch;
+struct prestera_flow_block;
+
+int prestera_span_init(struct prestera_switch *sw);
+void prestera_span_fini(struct prestera_switch *sw);
+int prestera_span_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void prestera_span_destroy(struct prestera_flow_block *block);
+
+#endif /* _PRESTERA_SPAN_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index cb564890a3dc..0b3e8f2db294 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -180,6 +180,45 @@ err_port_vlan_alloc:
return ERR_PTR(err);
}
+static int prestera_fdb_add(struct prestera_port *port,
+ const unsigned char *mac, u16 vid, bool dynamic)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_lag_fdb_add(port->sw, prestera_port_lag_id(port),
+ mac, vid, dynamic);
+
+ return prestera_hw_fdb_add(port, mac, vid, dynamic);
+}
+
+static int prestera_fdb_del(struct prestera_port *port,
+ const unsigned char *mac, u16 vid)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_lag_fdb_del(port->sw, prestera_port_lag_id(port),
+ mac, vid);
+ else
+ return prestera_hw_fdb_del(port, mac, vid);
+}
+
+static int prestera_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_fdb_flush_lag_vlan(port->sw, prestera_port_lag_id(port),
+ vid, mode);
+ else
+ return prestera_hw_fdb_flush_port_vlan(port, vid, mode);
+}
+
+static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_fdb_flush_lag(port->sw, prestera_port_lag_id(port),
+ mode);
+ else
+ return prestera_hw_fdb_flush_port(port, mode);
+}
+
static void
prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
{
@@ -199,11 +238,11 @@ prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
last_port = port_count == 1;
if (last_vlan)
- prestera_hw_fdb_flush_port(port, fdb_flush_mode);
+ prestera_fdb_flush_port(port, fdb_flush_mode);
else if (last_port)
prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode);
else
- prestera_hw_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+ prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
list_del(&port_vlan->br_vlan_head);
prestera_bridge_vlan_put(br_vlan);
@@ -312,11 +351,29 @@ __prestera_bridge_port_by_dev(struct prestera_bridge *bridge,
return NULL;
}
+static int prestera_match_upper_bridge_dev(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ if (netif_is_bridge_master(dev))
+ priv->data = dev;
+
+ return 0;
+}
+
+static struct net_device *prestera_get_upper_bridge_dev(struct net_device *dev)
+{
+ struct netdev_nested_priv priv = { };
+
+ netdev_walk_all_upper_dev_rcu(dev, prestera_match_upper_bridge_dev,
+ &priv);
+ return priv.data;
+}
+
static struct prestera_bridge_port *
prestera_bridge_port_by_dev(struct prestera_switchdev *swdev,
struct net_device *dev)
{
- struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ struct net_device *br_dev = prestera_get_upper_bridge_dev(dev);
struct prestera_bridge *bridge;
if (!br_dev)
@@ -404,7 +461,8 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
if (err)
return err;
- err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
+ br_port->flags);
if (err)
goto err_port_flood_set;
@@ -415,24 +473,23 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
return 0;
err_port_learning_set:
- prestera_hw_port_flood_set(port, false);
err_port_flood_set:
prestera_hw_bridge_port_delete(port, bridge->bridge_id);
return err;
}
-static int prestera_port_bridge_join(struct prestera_port *port,
- struct net_device *upper)
+int prestera_bridge_port_join(struct net_device *br_dev,
+ struct prestera_port *port)
{
struct prestera_switchdev *swdev = port->sw->swdev;
struct prestera_bridge_port *br_port;
struct prestera_bridge *bridge;
int err;
- bridge = prestera_bridge_by_dev(swdev, upper);
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
if (!bridge) {
- bridge = prestera_bridge_create(swdev, upper);
+ bridge = prestera_bridge_create(swdev, br_dev);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
@@ -505,14 +562,14 @@ static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
return prestera_hw_vlan_port_stp_set(port, vid, hw_state);
}
-static void prestera_port_bridge_leave(struct prestera_port *port,
- struct net_device *upper)
+void prestera_bridge_port_leave(struct net_device *br_dev,
+ struct prestera_port *port)
{
struct prestera_switchdev *swdev = port->sw->swdev;
struct prestera_bridge_port *br_port;
struct prestera_bridge *bridge;
- bridge = prestera_bridge_by_dev(swdev, upper);
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
if (!bridge)
return;
@@ -528,57 +585,11 @@ static void prestera_port_bridge_leave(struct prestera_port *port,
prestera_bridge_1d_port_leave(br_port);
prestera_hw_port_learning_set(port, false);
- prestera_hw_port_flood_set(port, false);
+ prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0);
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
prestera_bridge_port_put(br_port);
}
-int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
- void *ptr)
-{
- struct netdev_notifier_changeupper_info *info = ptr;
- struct netlink_ext_ack *extack;
- struct prestera_port *port;
- struct net_device *upper;
- int err;
-
- extack = netdev_notifier_info_to_extack(&info->info);
- port = netdev_priv(dev);
- upper = info->upper_dev;
-
- switch (event) {
- case NETDEV_PRECHANGEUPPER:
- if (!netif_is_bridge_master(upper)) {
- NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
- return -EINVAL;
- }
-
- if (!info->linking)
- break;
-
- if (netdev_has_any_upper_dev(upper)) {
- NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
- return -EINVAL;
- }
- break;
-
- case NETDEV_CHANGEUPPER:
- if (!netif_is_bridge_master(upper))
- break;
-
- if (info->linking) {
- err = prestera_port_bridge_join(port, upper);
- if (err)
- return err;
- } else {
- prestera_port_bridge_leave(port, upper);
- }
- break;
- }
-
- return 0;
-}
-
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
struct net_device *dev,
struct switchdev_brport_flags flags)
@@ -590,11 +601,9 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
if (!br_port)
return 0;
- if (flags.mask & BR_FLOOD) {
- err = prestera_hw_port_flood_set(port, flags.val & BR_FLOOD);
- if (err)
- return err;
- }
+ err = prestera_hw_port_flood_set(port, flags.mask, flags.val);
+ if (err)
+ return err;
if (flags.mask & BR_LEARNING) {
err = prestera_hw_port_learning_set(port,
@@ -699,7 +708,7 @@ err_port_stp_set:
return err;
}
-static int prestera_port_obj_attr_set(struct net_device *dev,
+static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
@@ -771,9 +780,9 @@ static int prestera_port_fdb_set(struct prestera_port *port,
vid = bridge->bridge_id;
if (adding)
- err = prestera_hw_fdb_add(port, fdb_info->addr, vid, false);
+ err = prestera_fdb_add(port, fdb_info->addr, vid, false);
else
- err = prestera_hw_fdb_del(port, fdb_info->addr, vid);
+ err = prestera_fdb_del(port, fdb_info->addr, vid);
return err;
}
@@ -901,7 +910,8 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
if (port_vlan->br_port)
return 0;
- err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
+ br_port->flags);
if (err)
return err;
@@ -1009,15 +1019,15 @@ static int prestera_port_vlans_add(struct prestera_port *port,
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct net_device *dev = vlan->obj.orig_dev;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- if (netif_is_bridge_master(dev))
+ if (netif_is_bridge_master(orig_dev))
return 0;
- br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
if (WARN_ON(!br_port))
return -EINVAL;
@@ -1030,7 +1040,7 @@ static int prestera_port_vlans_add(struct prestera_port *port,
flag_pvid, extack);
}
-static int prestera_port_obj_add(struct net_device *dev,
+static int prestera_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
@@ -1049,14 +1059,14 @@ static int prestera_port_obj_add(struct net_device *dev,
static int prestera_port_vlans_del(struct prestera_port *port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct net_device *dev = vlan->obj.orig_dev;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
- if (netif_is_bridge_master(dev))
+ if (netif_is_bridge_master(orig_dev))
return -EOPNOTSUPP;
- br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
if (WARN_ON(!br_port))
return -EINVAL;
@@ -1068,7 +1078,7 @@ static int prestera_port_vlans_del(struct prestera_port *port,
return 0;
}
-static int prestera_port_obj_del(struct net_device *dev,
+static int prestera_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct prestera_port *port = netdev_priv(dev);
@@ -1114,10 +1124,26 @@ static void prestera_fdb_event(struct prestera_switch *sw,
struct prestera_event *evt, void *arg)
{
struct switchdev_notifier_fdb_info info;
+ struct net_device *dev = NULL;
struct prestera_port *port;
+ struct prestera_lag *lag;
- port = prestera_find_port(sw, evt->fdb_evt.port_id);
- if (!port)
+ switch (evt->fdb_evt.type) {
+ case PRESTERA_FDB_ENTRY_TYPE_REG_PORT:
+ port = prestera_find_port(sw, evt->fdb_evt.dest.port_id);
+ if (port)
+ dev = port->dev;
+ break;
+ case PRESTERA_FDB_ENTRY_TYPE_LAG:
+ lag = prestera_lag_by_id(sw, evt->fdb_evt.dest.lag_id);
+ if (lag)
+ dev = lag->dev;
+ break;
+ default:
+ return;
+ }
+
+ if (!dev)
return;
info.addr = evt->fdb_evt.data.mac;
@@ -1129,11 +1155,11 @@ static void prestera_fdb_event(struct prestera_switch *sw,
switch (evt->id) {
case PRESTERA_FDB_EVENT_LEARNED:
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
- port->dev, &info.info, NULL);
+ dev, &info.info, NULL);
break;
case PRESTERA_FDB_EVENT_AGED:
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
- port->dev, &info.info, NULL);
+ dev, &info.info, NULL);
break;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
index 606e21d2355b..a91bc35d235f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -7,7 +7,10 @@
int prestera_switchdev_init(struct prestera_switch *sw);
void prestera_switchdev_fini(struct prestera_switch *sw);
-int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
- void *ptr);
+int prestera_bridge_port_join(struct net_device *br_dev,
+ struct prestera_port *port);
+
+void prestera_bridge_port_leave(struct net_device *br_dev,
+ struct prestera_port *port);
#endif /* _PRESTERA_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index e967867828d8..9b48ae4bac39 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1528,6 +1528,7 @@ static int pxa168_eth_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
struct pxa168_eth_private *pep = netdev_priv(dev);
+ cancel_work_sync(&pep->tx_timeout_task);
if (pep->htpr) {
dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
pep->htpr, pep->htpr_dma);
@@ -1539,7 +1540,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
clk_disable_unprepare(pep->clk);
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
- cancel_work_sync(&pep->tx_timeout_task);
unregister_netdev(dev);
free_netdev(dev);
return 0;
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
index 6928abcec0a3..f72217348eb4 100644
--- a/drivers/net/ethernet/marvell/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -263,7 +263,7 @@ enum {
CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
- CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
+ CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */
CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 222c32367b2c..8b8bff59c8fe 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -471,7 +471,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
adv |= fiber_fc_adv[sky2->flow_mode];
} else {
reg |= GM_GPCR_AU_FCT_DIS;
- reg |= gm_fc_disable[sky2->flow_mode];
+ reg |= gm_fc_disable[sky2->flow_mode];
/* Forward pause packets to GMAC? */
if (sky2->flow_mode & FC_RX)
@@ -1656,16 +1656,16 @@ static void sky2_hw_up(struct sky2_port *sky2)
tx_init(sky2);
/*
- * On dual port PCI-X card, there is an problem where status
+ * On dual port PCI-X card, there is an problem where status
* can be received out of order due to split transactions
*/
if (otherdev && netif_running(otherdev) &&
- (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
- u16 cmd;
+ (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
+ u16 cmd;
cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
- cmd &= ~PCI_X_CMD_MAX_SPLIT;
- sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
+ cmd &= ~PCI_X_CMD_MAX_SPLIT;
+ sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
}
sky2_mac_init(hw, port);
@@ -1836,8 +1836,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
u16 mss;
u8 ctrl;
- if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
- return NETDEV_TX_BUSY;
+ if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
+ return NETDEV_TX_BUSY;
len = skb_headlen(skb);
mapping = dma_map_single(&hw->pdev->dev, skb->data, len,
@@ -1866,9 +1866,9 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (!(hw->flags & SKY2_HW_NEW_LE))
mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
- if (mss != sky2->tx_last_mss) {
+ if (mss != sky2->tx_last_mss) {
le = get_tx_le(sky2, &slot);
- le->addr = cpu_to_le32(mss);
+ le->addr = cpu_to_le32(mss);
if (hw->flags & SKY2_HW_NEW_LE)
le->opcode = OP_MSS | HW_OWNER;
@@ -1895,8 +1895,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
/* Handle TCP checksum offload */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
/* On Yukon EX (some versions) encoding change. */
- if (hw->flags & SKY2_HW_AUTO_TX_SUM)
- ctrl |= CALSUM; /* auto checksum */
+ if (hw->flags & SKY2_HW_AUTO_TX_SUM)
+ ctrl |= CALSUM; /* auto checksum */
else {
const unsigned offset = skb_transport_offset(skb);
u32 tcpsum;
@@ -2503,7 +2503,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
if (length == 0) {
/* don't need this page */
- __skb_frag_unref(frag);
+ __skb_frag_unref(frag, false);
--skb_shinfo(skb)->nr_frags;
} else {
size = min(length, (unsigned) PAGE_SIZE);
@@ -2557,7 +2557,7 @@ nobuf:
static struct sk_buff *sky2_receive(struct net_device *dev,
u16 length, u32 status)
{
- struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_port *sky2 = netdev_priv(dev);
struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
struct sk_buff *skb = NULL;
u16 count = (status & GMR_FS_LEN) >> 16;
@@ -5063,11 +5063,11 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!disable_msi && pci_enable_msi(pdev) == 0) {
err = sky2_test_msi(hw);
if (err) {
- pci_disable_msi(pdev);
+ pci_disable_msi(pdev);
if (err != -EOPNOTSUPP)
goto err_out_free_netdev;
}
- }
+ }
netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index b2dddd8a246c..ddec1627f1a7 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -538,8 +538,8 @@ enum {
CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */
CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */
CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */
- CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */
- CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
+ CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */
+ CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
@@ -2262,8 +2262,8 @@ struct sky2_port {
#define SKY2_FLAG_AUTO_SPEED 0x0002
#define SKY2_FLAG_AUTO_PAUSE 0x0004
- enum flow_control flow_mode;
- enum flow_control flow_status;
+ enum flow_control flow_mode;
+ enum flow_control flow_status;
#ifdef CONFIG_SKY2_DEBUG
struct dentry *debugfs;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ed4eacef17ce..64adfd24e134 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -681,32 +681,53 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
void mtk_stats_update_mac(struct mtk_mac *mac)
{
struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = MTK_GDM1_TX_GBCNT;
- u64 stats;
-
- base += hw_stats->reg_offset;
+ struct mtk_eth *eth = mac->hw;
u64_stats_update_begin(&hw_stats->syncp);
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
- stats = mtk_r32(mac->hw, base + 0x04);
- if (stats)
- hw_stats->rx_bytes += (stats << 32);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x24);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
- stats = mtk_r32(mac->hw, base + 0x34);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
+ hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
+ } else {
+ unsigned int offs = hw_stats->reg_offset;
+ u64 stats;
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw,
+ MTK_GDM1_RX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ hw_stats->rx_overflow +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ hw_stats->rx_fcs_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ hw_stats->rx_short_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ hw_stats->rx_long_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ }
+
u64_stats_update_end(&hw_stats->syncp);
}
@@ -2423,7 +2444,8 @@ static void mtk_dim_rx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2452,7 +2474,8 @@ static void mtk_dim_tx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2480,6 +2503,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
goto err_disable_pm;
}
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
+
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 11331b44ba07..5ef70dd8b49c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -278,8 +278,21 @@
/* QDMA FQ Free Page Buffer Length Register */
#define MTK_QDMA_FQ_BLEN 0x1B2C
-/* GMA1 Received Good Byte Count Register */
-#define MTK_GDM1_TX_GBCNT 0x2400
+/* GMA1 counter / statics register */
+#define MTK_GDM1_RX_GBCNT_L 0x2400
+#define MTK_GDM1_RX_GBCNT_H 0x2404
+#define MTK_GDM1_RX_GPCNT 0x2408
+#define MTK_GDM1_RX_OERCNT 0x2410
+#define MTK_GDM1_RX_FERCNT 0x2414
+#define MTK_GDM1_RX_SERCNT 0x2418
+#define MTK_GDM1_RX_LENCNT 0x241c
+#define MTK_GDM1_RX_CERCNT 0x2420
+#define MTK_GDM1_RX_FCCNT 0x2424
+#define MTK_GDM1_TX_SKIPCNT 0x2428
+#define MTK_GDM1_TX_COLCNT 0x242c
+#define MTK_GDM1_TX_GBCNT_L 0x2430
+#define MTK_GDM1_TX_GBCNT_H 0x2434
+#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
/* QDMA descriptor txd4 */
@@ -502,6 +515,13 @@
#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+/* Counter / stat register */
+#define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100)
+#define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104)
+#define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108)
+#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
+#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index ff6613a5cdd3..b4f66eb9ddb9 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -22,5 +22,6 @@ source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
source "drivers/net/ethernet/mellanox/mlxfw/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig"
endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 79773ac331ee..d4b5f547a727 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/
obj-$(CONFIG_MLXSW_CORE) += mlxsw/
obj-$(CONFIG_MLXFW) += mlxfw/
+obj-$(CONFIG_MLXBF_GIGE) += mlxbf_gige/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 1434df66fcf2..3616b77caa0a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2027,8 +2027,6 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
-#define MLX4_EEPROM_PAGE_LEN 256
-
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -2063,7 +2061,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e35e4d7ef4d1..442991d91c15 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -526,7 +526,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
fail:
while (nr > 0) {
nr--;
- __skb_frag_unref(skb_shinfo(skb)->frags + nr);
+ __skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
}
return 0;
}
@@ -679,9 +679,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring = priv->rx_ring[cq_ring];
- /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
- rcu_read_lock();
- xdp_prog = rcu_dereference(ring->xdp_prog);
+ xdp_prog = rcu_dereference_bh(ring->xdp_prog);
xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
@@ -744,7 +742,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop the packet, since HW loopback-ed it */
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
- hlist_for_each_entry_rcu(entry, bucket, hlist) {
+ hlist_for_each_entry_rcu_bh(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
ethh->h_source))
goto next;
@@ -899,8 +897,6 @@ next:
break;
}
- rcu_read_unlock();
-
if (likely(polled)) {
if (doorbell_pending) {
priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6cfec81ccc3..dc4ac1a2b6b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
+#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (mlx4_is_mfunc(dev))
disable_unsupported_roce_caps(outbox);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
+ dev_cap->map_clock_to_user = field & 0x80;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
dev_cap->reserved_qps = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 8f020f26ebf5..cf64e54eecb0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
+ bool map_clock_to_user;
};
struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index c326b434734e..00c84656b2e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
+ dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
if (mlx4_is_slave(dev))
return -EOPNOTSUPP;
+ if (!dev->caps.map_clock_to_user) {
+ mlx4_dbg(dev, "Map clock to user is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
if (!params)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index ba6ac31a339d..256a06b3c096 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
#define I2C_ADDR_LOW 0x50
#define I2C_ADDR_HIGH 0x51
#define I2C_PAGE_SIZE 256
+#define I2C_HIGH_PAGE_SIZE 128
/* Module Info Data */
struct mlx4_cable_info {
@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status)
return "Unknown Error";
}
+static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+ int ret;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+ inmad->method = 0x1; /* Get */
+ inmad->class_version = 0x1;
+ inmad->mgmt_class = 0x1;
+ inmad->base_version = 0x1;
+ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = 0;
+ cable_info->page_num = 0;
+ cable_info->i2c_addr = I2C_ADDR_LOW;
+ cable_info->size = cpu_to_be16(1);
+
+ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ if (be16_to_cpu(outmad->status)) {
+ /* Mad returned with bad status */
+ ret = be16_to_cpu(outmad->status);
+ mlx4_warn(dev,
+ "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+ 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
+ cable_info_mad_err_str(ret));
+ ret = -ret;
+ goto out;
+ }
+ cable_info = (struct mlx4_cable_info *)outmad->data;
+ *module_id = cable_info->data[0];
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ *i2c_addr = I2C_ADDR_LOW;
+ *page_num = 0;
+
+ if (*offset < I2C_PAGE_SIZE)
+ return;
+
+ *i2c_addr = I2C_ADDR_HIGH;
+ *offset -= I2C_PAGE_SIZE;
+}
+
+static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ /* Offsets 0-255 belong to page 0.
+ * Offsets 256-639 belong to pages 01, 02, 03.
+ * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
+ */
+ if (*offset < I2C_PAGE_SIZE)
+ *page_num = 0;
+ else
+ *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
+ *i2c_addr = I2C_ADDR_LOW;
+ *offset -= *page_num * I2C_HIGH_PAGE_SIZE;
+}
+
/**
* mlx4_get_module_info - Read cable module eeprom data
* @dev: mlx4_dev.
@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
- u16 i2c_addr;
+ u8 module_id, i2c_addr, page_num;
int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
+ ret = mlx4_get_module_id(dev, port, &module_id);
+ if (ret)
+ return ret;
+
+ switch (module_id) {
+ case MLX4_MODULE_ID_SFP:
+ mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ case MLX4_MODULE_ID_QSFP:
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ case MLX4_MODULE_ID_QSFP28:
+ mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ default:
+ mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
+ return -EINVAL;
+ }
+
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
*/
size -= offset + size - I2C_PAGE_SIZE;
- i2c_addr = I2C_ADDR_LOW;
-
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
- cable_info->page_num = 0;
+ cable_info->page_num = page_num;
cable_info->i2c_addr = i2c_addr;
cable_info->size = cpu_to_be16(size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 461a43f338e6..e1a5a79e27c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -12,7 +12,6 @@ config MLX5_CORE
depends on MLXFW || !MLXFW
depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
- default n
help
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
@@ -36,7 +35,6 @@ config MLX5_CORE_EN
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
select PAGE_POOL
select DIMLIB
- default n
help
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -79,6 +77,16 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads).
+config MLX5_BRIDGE
+ bool
+ depends on MLX5_ESWITCH && BRIDGE
+ default y
+ help
+ mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
+ Enable adding representors of mlx5 uplink and VF ports to Bridge and
+ offloading rules for traffic between such ports. Supports VLANs (trunk and
+ access modes).
+
config MLX5_CLS_ACT
bool "MLX5 TC classifier action support"
depends on MLX5_ESWITCH && NET_CLS_ACT
@@ -131,7 +139,6 @@ config MLX5_CORE_EN_DCB
config MLX5_CORE_IPOIB
bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support"
depends on MLX5_CORE_EN
- default n
help
MLX5 IPoIB offloads & acceleration support.
@@ -139,7 +146,6 @@ config MLX5_FPGA_IPSEC
bool "Mellanox Technologies IPsec Innova support"
depends on MLX5_CORE
depends on MLX5_FPGA
- default n
help
Build IPsec support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -153,7 +159,6 @@ config MLX5_IPSEC
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
select MLX5_ACCEL
- default n
help
Build IPsec support for the Connect-X family of network cards by Mellanox
Technologies.
@@ -166,7 +171,6 @@ config MLX5_EN_IPSEC
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
- default n
help
Build support for IPsec cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
@@ -179,7 +183,6 @@ config MLX5_FPGA_TLS
depends on MLX5_CORE_EN
depends on MLX5_FPGA
select MLX5_EN_TLS
- default n
help
Build TLS support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -194,7 +197,6 @@ config MLX5_TLS
depends on MLX5_CORE_EN
select MLX5_ACCEL
select MLX5_EN_TLS
- default n
help
Build TLS support for the Connect-X family of network cards by Mellanox
Technologies.
@@ -217,7 +219,6 @@ config MLX5_SW_STEERING
config MLX5_SF
bool "Mellanox Technologies subfunction device support using auxiliary device"
depends on MLX5_CORE && MLX5_CORE_EN
- default n
help
Build support for subfuction device in the NIC. A Mellanox subfunction
device can support RDMA, netdevice and vdpa device.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a1223e904190..b5072a3a2585 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
- fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
+ fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
fw_reset.o qos.o
@@ -56,6 +56,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
esw/devlink_port.o esw/vporttbl.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += esw/sample.o
+mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o en/rep/bridge.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a9166cd85013..ceebfc20f65e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -303,6 +303,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
int ret = 0, i;
mutex_lock(&mlx5_intf_mutex);
+ priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -320,6 +321,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
}
} else {
adev = &priv->adev[i]->adev;
+
+ /* Pay attention that this is not PCI driver that
+ * mlx5_core_dev is connected, but auxiliary driver.
+ *
+ * Here we can race of module unload with devlink
+ * reload, but we don't need to take extra lock because
+ * we are holding global mlx5_intf_mutex.
+ */
+ if (!adev->dev.driver)
+ continue;
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->resume)
@@ -350,6 +361,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
continue;
adev = &priv->adev[i]->adev;
+ /* Auxiliary driver was unbind manually through sysfs */
+ if (!adev->dev.driver)
+ goto skip_suspend;
+
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->suspend) {
@@ -357,9 +372,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
continue;
}
+skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
+ priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -448,6 +465,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
struct mlx5_priv *priv = &dev->priv;
lockdep_assert_held(&mlx5_intf_mutex);
+ if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
+ return 0;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 44c458443428..d791d351b489 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -63,6 +63,11 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
err = devlink_info_version_running_put(req, "fw.version", version_str);
if (err)
return err;
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+ if (err)
+ return err;
/* no pending version, return running (stored) version */
if (stored_fw == 0)
@@ -74,8 +79,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
err = devlink_info_version_stored_put(req, "fw.version", version_str);
if (err)
return err;
-
- return 0;
+ return devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
}
static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netlink_ext_ack *extack)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b636d63358d2..b1b51bbba054 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -974,7 +974,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5e_xsk_param *xsk, int node,
struct mlx5e_rq *rq);
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
-void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
@@ -1163,6 +1162,13 @@ mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
}
+static inline bool
+mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
+}
+
int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 0dd7615e5931..bc33eaada3b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -64,6 +64,8 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
struct mlx5e_priv *priv = netdev_priv(dev);
struct devlink_port *port;
+ if (!netif_device_present(dev))
+ return NULL;
port = mlx5e_devlink_get_dl_port(priv);
if (port->registered)
return port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index f410c1268422..150c8e82c738 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -201,7 +201,7 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
- struct dim_cq_moder moder;
+ struct dim_cq_moder moder = {};
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
@@ -214,7 +214,7 @@ static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
{
- struct dim_cq_moder moder;
+ struct dim_cq_moder moder = {};
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
@@ -614,7 +614,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{
- if (mlx5_accel_is_ktls_rx(mdev))
+ if (mlx5e_accel_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
@@ -643,7 +643,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
- param->is_tls = mlx5_accel_is_ktls_rx(mdev);
+ param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d907c1acd4d5..778e229310a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
-#include <linux/ptp_classify.h>
#include "en/ptp.h"
#include "en/txrx.h"
#include "en/params.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index ab935cce952b..c96668bd701c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -6,6 +6,7 @@
#include "en.h"
#include "en_stats.h"
+#include <linux/ptp_classify.h>
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
@@ -43,6 +44,27 @@ struct mlx5e_ptp {
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
};
+static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
+{
+ struct flow_keys fk;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return false;
+
+ if (fk.basic.n_proto == htons(ETH_P_1588))
+ return true;
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
+ return false;
+
+ return (fk.basic.ip_proto == IPPROTO_UDP &&
+ fk.ports.dst == htons(PTP_EV_PORT));
+}
+
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
u8 lag_port, struct mlx5e_ptp **cp);
void mlx5e_ptp_close(struct mlx5e_ptp *c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 95f2b26a3ee3..9c076aa20306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
rpriv = priv->ppriv;
fwd_vport_num = rpriv->rep->vport;
lag_dev = netdev_master_upper_dev_get(netdev);
+ if (!lag_dev)
+ return;
netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
new file mode 100644
index 000000000000..3c0032c9647c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+#include "bridge.h"
+#include "esw/bridge.h"
+#include "en_rep.h"
+
+#define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
+
+struct mlx5_bridge_switchdev_fdb_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ bool add;
+};
+
+static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
+ struct mlx5_esw_bridge_offloads,
+ netdev_nb);
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct net_device *upper;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+
+ if (!mlx5e_eswitch_rep(dev))
+ return 0;
+
+ upper = info->upper_dev;
+ if (!netif_is_bridge_master(upper))
+ return 0;
+
+ esw = br_offloads->esw;
+ priv = netdev_priv(dev);
+ if (esw != priv->mdev->priv.eswitch)
+ return 0;
+
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ return info->linking ?
+ mlx5_esw_bridge_vport_link(upper->ifindex, br_offloads, vport, extack) :
+ mlx5_esw_bridge_vport_unlink(upper->ifindex, br_offloads, vport, extack);
+}
+
+static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ break;
+
+ case NETDEV_CHANGEUPPER:
+ err = mlx5_esw_bridge_port_changeupper(nb, ptr);
+ break;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
+ const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ const struct switchdev_obj_port_vlan *vlan;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+ int err = 0;
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ esw = priv->mdev->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ err = mlx5_esw_bridge_port_vlan_add(vlan->vid, vlan->flags, esw, vport, extack);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
+
+static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
+ const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *vlan;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ esw = priv->mdev->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ mlx5_esw_bridge_port_vlan_del(vlan->vid, esw, vport);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
+ const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+ int err = 0;
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ esw = priv->mdev->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
+ err = -EINVAL;
+ }
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = mlx5_esw_bridge_ageing_time_set(attr->u.ageing_time, esw, vport);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ err = mlx5_esw_bridge_vlan_filtering_set(attr->u.vlan_filtering, esw, vport);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int mlx5_esw_bridge_event_blocking(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ mlx5e_eswitch_rep,
+ mlx5_esw_bridge_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ mlx5e_eswitch_rep,
+ mlx5_esw_bridge_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ mlx5e_eswitch_rep,
+ mlx5_esw_bridge_port_obj_attr_set);
+ break;
+ default:
+ err = 0;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void
+mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
+{
+ dev_put(fdb_work->dev);
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+}
+
+static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
+{
+ struct mlx5_bridge_switchdev_fdb_work *fdb_work =
+ container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
+ struct switchdev_notifier_fdb_info *fdb_info =
+ &fdb_work->fdb_info;
+ struct net_device *dev = fdb_work->dev;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+
+ rtnl_lock();
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ esw = priv->mdev->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ goto out;
+
+ if (fdb_work->add)
+ mlx5_esw_bridge_fdb_create(dev, esw, vport, fdb_info);
+ else
+ mlx5_esw_bridge_fdb_remove(dev, esw, vport, fdb_info);
+
+out:
+ rtnl_unlock();
+ mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
+}
+
+static struct mlx5_bridge_switchdev_fdb_work *
+mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_bridge_switchdev_fdb_work *work;
+ u8 *addr;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
+ memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
+
+ addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!addr) {
+ kfree(work);
+ return ERR_PTR(-ENOMEM);
+ }
+ ether_addr_copy(addr, fdb_info->addr);
+ work->fdb_info.addr = addr;
+
+ dev_hold(dev);
+ work->dev = dev;
+ work->add = add;
+ return work;
+}
+
+static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
+ struct mlx5_esw_bridge_offloads,
+ nb);
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct mlx5_bridge_switchdev_fdb_work *work;
+ struct switchdev_notifier_info *info = ptr;
+ struct net_device *upper;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_eswitch_rep(dev))
+ return NOTIFY_DONE;
+ priv = netdev_priv(dev);
+ if (priv->mdev->priv.eswitch != br_offloads->esw)
+ return NOTIFY_DONE;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ int err = switchdev_handle_port_attr_set(dev, ptr,
+ mlx5e_eswitch_rep,
+ mlx5_esw_bridge_port_obj_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+
+ work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
+ event == SWITCHDEV_FDB_ADD_TO_DEVICE,
+ fdb_info);
+ if (IS_ERR(work)) {
+ WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
+ PTR_ERR(work));
+ return notifier_from_errno(PTR_ERR(work));
+ }
+
+ queue_work(br_offloads->wq, &work->work);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static void mlx5_esw_bridge_update_work(struct work_struct *work)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
+ struct mlx5_esw_bridge_offloads,
+ update_work.work);
+
+ rtnl_lock();
+ mlx5_esw_bridge_update(br_offloads);
+ rtnl_unlock();
+
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
+}
+
+void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw =
+ mdev->priv.eswitch;
+ int err;
+
+ rtnl_lock();
+ br_offloads = mlx5_esw_bridge_init(esw);
+ rtnl_unlock();
+ if (IS_ERR(br_offloads)) {
+ esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
+ return;
+ }
+
+ br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
+ if (!br_offloads->wq) {
+ esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
+ goto err_alloc_wq;
+ }
+ INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
+
+ br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
+ err = register_switchdev_notifier(&br_offloads->nb);
+ if (err) {
+ esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
+ goto err_register_swdev;
+ }
+
+ br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
+ err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
+ if (err) {
+ esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
+ goto err_register_swdev_blk;
+ }
+
+ br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
+ err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ if (err) {
+ esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
+ err);
+ goto err_register_netdev;
+ }
+ return;
+
+err_register_netdev:
+ unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
+err_register_swdev_blk:
+ unregister_switchdev_notifier(&br_offloads->nb);
+err_register_swdev:
+ destroy_workqueue(br_offloads->wq);
+err_alloc_wq:
+ mlx5_esw_bridge_cleanup(esw);
+}
+
+void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw =
+ mdev->priv.eswitch;
+
+ br_offloads = esw->br_offloads;
+ if (!br_offloads)
+ return;
+
+ unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
+ unregister_switchdev_notifier(&br_offloads->nb);
+ cancel_delayed_work(&br_offloads->update_work);
+ destroy_workqueue(br_offloads->wq);
+ rtnl_lock();
+ mlx5_esw_bridge_cleanup(esw);
+ rtnl_unlock();
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.h
new file mode 100644
index 000000000000..fbeb64242831
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_REP_BRIDGE__
+#define __MLX5_EN_REP_BRIDGE__
+
+#include "en.h"
+
+#if IS_ENABLED(CONFIG_MLX5_BRIDGE)
+
+void mlx5e_rep_bridge_init(struct mlx5e_priv *priv);
+void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv);
+
+#else /* CONFIG_MLX5_BRIDGE */
+
+static inline void mlx5e_rep_bridge_init(struct mlx5e_priv *priv) {}
+static inline void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) {}
+
+#endif /* CONFIG_MLX5_BRIDGE */
+
+#endif /* __MLX5_EN_REP_BRIDGE__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
index be0ee03de721..2e9bee4e5209 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
work);
struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
struct neighbour *n = update_work->n;
+ struct mlx5e_encap_entry *e = NULL;
bool neigh_connected, same_dev;
- struct mlx5e_encap_entry *e;
unsigned char ha[ETH_ALEN];
- struct mlx5e_priv *priv;
u8 nud_state, dead;
rtnl_lock();
@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
if (!same_dev)
goto out;
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- if (!mlx5e_encap_take(e))
- continue;
+ /* mlx5e_get_next_init_encap() releases previous encap before returning
+ * the next one.
+ */
+ while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
+ mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
- priv = netdev_priv(e->out_dev);
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
- mlx5e_encap_put(priv, e);
- }
out:
rtnl_unlock();
mlx5e_release_neigh_update_work(update_work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 6cdc52d50a48..059799e4f483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -94,13 +94,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
ASSERT_RTNL();
- /* wait for encap to be fully initialized */
- wait_for_completion(&e->res_ready);
-
mutex_lock(&esw->offloads.encap_tbl_lock);
encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- if (e->compl_result < 0 || (encap_connected == neigh_connected &&
- ether_addr_equal(e->h_dest, ha)))
+ if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
goto unlock;
mlx5e_take_all_encap_flows(e, &flow_list);
@@ -617,7 +613,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
struct mlx5e_tc_update_priv *tc_priv)
{
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- u32 tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
+ u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
if (chain) {
struct mlx5_rep_uplink_priv *uplink_priv;
@@ -626,7 +622,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
struct mlx5_eswitch *esw;
u32 zone_restore_id;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (!tc_skb_ext) {
WARN_ON(1);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 5da5e5323a44..91e7a01e32be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -23,7 +23,7 @@
#include "en_tc.h"
#include "en_rep.h"
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen * 8)
+#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
@@ -32,11 +32,11 @@
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
-#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
+#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
-#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen * 8)
+#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
#define ct_dbg(fmt, args...)\
@@ -150,6 +150,11 @@ struct mlx5_ct_entry {
unsigned long flags;
};
+static void
+mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_mod_hdr_handle *mh);
+
static const struct rhashtable_params cts_ht_params = {
.head_offset = offsetof(struct mlx5_ct_entry, node),
.key_offset = offsetof(struct mlx5_ct_entry, cookie),
@@ -458,8 +463,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
- mlx5e_mod_hdr_detach(ct_priv->dev,
- ct_priv->mod_hdr_tbl, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
}
@@ -686,15 +690,27 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
- ct_priv->mod_hdr_tbl,
- ct_priv->ns_type,
- &mod_acts);
- if (IS_ERR(*mh)) {
- err = PTR_ERR(*mh);
- goto err_mapping;
+ if (nat) {
+ attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(attr->modify_hdr)) {
+ err = PTR_ERR(attr->modify_hdr);
+ goto err_mapping;
+ }
+
+ *mh = NULL;
+ } else {
+ *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl,
+ ct_priv->ns_type,
+ &mod_acts);
+ if (IS_ERR(*mh)) {
+ err = PTR_ERR(*mh);
+ goto err_mapping;
+ }
+ attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
}
- attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
dealloc_mod_hdr_actions(&mod_acts);
return 0;
@@ -705,6 +721,17 @@ err_mapping:
return err;
}
+static void
+mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_mod_hdr_handle *mh)
+{
+ if (mh)
+ mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, mh);
+ else
+ mlx5_modify_header_dealloc(ct_priv->dev, attr->modify_hdr);
+}
+
static int
mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule,
@@ -767,8 +794,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return 0;
err_rule:
- mlx5e_mod_hdr_detach(ct_priv->dev,
- ct_priv->mod_hdr_tbl, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);
@@ -918,7 +944,7 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
}
if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
- ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
+ ct_dbg("Using shared counter entry=0x%p rev=0x%p", entry, rev_entry);
shared_counter = rev_entry->counter;
spin_unlock_bh(&ct_priv->ht_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 69e618d17071..644cf1641cde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -33,15 +33,15 @@ struct mlx5_ct_attr {
#define zone_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
.moffset = 0,\
- .mlen = 2,\
+ .mlen = 16,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
- misc_parameters_2.metadata_reg_c_2) + 2,\
+ misc_parameters_2.metadata_reg_c_2),\
}
#define ctstate_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
- .moffset = 2,\
- .mlen = 2,\
+ .moffset = 16,\
+ .mlen = 16,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_2),\
}
@@ -49,7 +49,7 @@ struct mlx5_ct_attr {
#define mark_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_3,\
.moffset = 0,\
- .mlen = 4,\
+ .mlen = 32,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_3),\
}
@@ -57,7 +57,7 @@ struct mlx5_ct_attr {
#define labels_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_4,\
.moffset = 0,\
- .mlen = 4,\
+ .mlen = 32,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_4),\
}
@@ -65,7 +65,7 @@ struct mlx5_ct_attr {
#define fteid_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
.moffset = 0,\
- .mlen = 4,\
+ .mlen = 32,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_5),\
}
@@ -73,20 +73,19 @@ struct mlx5_ct_attr {
#define zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
.moffset = 0,\
- .mlen = (ESW_ZONE_ID_BITS / 8),\
+ .mlen = ESW_ZONE_ID_BITS,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
- misc_parameters_2.metadata_reg_c_1) + 3,\
+ misc_parameters_2.metadata_reg_c_1),\
}
#define nic_zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\
- .moffset = 2,\
- .mlen = (ESW_ZONE_ID_BITS / 8),\
+ .moffset = 16,\
+ .mlen = ESW_ZONE_ID_BITS,\
}
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
-#define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 172e0474f2e6..8f79f04eccd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -212,6 +212,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_neigh m_neigh = {};
TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size;
@@ -295,9 +296,12 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
*/
goto release_neigh;
}
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv4_encap_size, encap_header,
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = ipv4_encap_size;
+ reformat_params.data = encap_header;
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@@ -324,6 +328,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct mlx5_pkt_reformat_params reformat_params;
TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size;
char *encap_header;
@@ -396,9 +401,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
*/
goto release_neigh;
}
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv4_encap_size, encap_header,
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = ipv4_encap_size;
+ reformat_params.data = encap_header;
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@@ -471,6 +479,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_neigh m_neigh = {};
TC_TUN_ROUTE_ATTR_INIT(attr);
struct ipv6hdr *ip6h;
@@ -553,9 +562,11 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto release_neigh;
}
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv6_encap_size, encap_header,
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = ipv6_encap_size;
+ reformat_params.data = encap_header;
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@@ -582,6 +593,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct mlx5_pkt_reformat_params reformat_params;
TC_TUN_ROUTE_ATTR_INIT(attr);
struct ipv6hdr *ip6h;
int ipv6_encap_size;
@@ -654,9 +666,11 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
goto release_neigh;
}
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv6_encap_size, encap_header,
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = ipv6_encap_size;
+ reformat_params.data = encap_header;
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 593503bc4d07..2e846b741280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -120,6 +120,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_attr *attr;
@@ -130,9 +131,12 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE)
return;
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- e->encap_size, e->encap_header,
+ &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
@@ -251,9 +255,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
mlx5e_take_tmp_flow(flow, flow_list, 0);
}
+typedef bool (match_cb)(struct mlx5e_encap_entry *);
+
static struct mlx5e_encap_entry *
-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
- struct mlx5e_encap_entry *e)
+mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e,
+ match_cb match)
{
struct mlx5e_encap_entry *next = NULL;
@@ -288,7 +295,7 @@ retry:
/* wait for encap to be fully initialized */
wait_for_completion(&next->res_ready);
/* continue searching if encap entry is not in valid state after completion */
- if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ if (!match(next)) {
e = next;
goto retry;
}
@@ -296,6 +303,30 @@ retry:
return next;
}
+static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
+{
+ return e->flags & MLX5_ENCAP_ENTRY_VALID;
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
+}
+
+static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
+{
+ return e->compl_result >= 0;
+}
+
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
@@ -812,6 +843,7 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
+ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
@@ -853,10 +885,12 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
mutex_unlock(&esw->offloads.decap_tbl_lock);
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ reformat_params.size = sizeof(parse_attr->eth);
+ reformat_params.data = &parse_attr->eth;
d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
- sizeof(parse_attr->eth),
- &parse_attr->eth,
+ &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(d->pkt_reformat)) {
err = PTR_ERR(d->pkt_reformat);
@@ -1505,7 +1539,7 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
fen_info = container_of(info, struct fib_entry_notifier_info, info);
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
fen_info->dst_len != 32)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 00af0b831a28..d964665eaa63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -162,7 +162,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
/* Part of the eseg touched by TX offloads */
#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
-static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
+static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
@@ -175,8 +175,6 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
-
- return true;
}
static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 3d45341e2216..7cab08a2f715 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -428,7 +428,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
spin_lock_init(&ipsec->sadb_rx_lock);
ida_init(&ipsec->halloc);
ipsec->en_priv = priv;
- ipsec->en_priv->ipsec = ipsec;
ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
@@ -438,6 +437,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
return -ENOMEM;
}
+ priv->ipsec = ipsec;
mlx5e_accel_ipsec_fs_init(priv);
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
- if (!priv->ipsec)
- return;
-
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index a97e8d205094..33de8f0092a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -136,8 +136,6 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u8 mode,
struct xfrm_offload *xo)
{
- struct mlx5e_swp_spec swp_spec = {};
-
/* Tunnel Mode:
* SWP: OutL3 InL3 InL4
* Pkt: MAC IP ESP IP L4
@@ -146,23 +144,58 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* SWP: OutL3 InL4
* InL3
* Pkt: MAC IP ESP L4
+ *
+ * Tunnel(VXLAN TCP/UDP) over Transport Mode
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP UDP VXLAN IP L4
*/
- swp_spec.l3_proto = skb->protocol;
- swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
- if (swp_spec.is_tun) {
- if (xo->proto == IPPROTO_IPV6) {
- swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
- swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
- } else {
- swp_spec.tun_l3_proto = htons(ETH_P_IP);
- swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
- }
- } else {
- swp_spec.tun_l3_proto = skb->protocol;
- swp_spec.tun_l4_proto = xo->proto;
+
+ /* Shared settings */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+ /* Tunnel mode */
+ if (mode == XFRM_MODE_TUNNEL) {
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ if (xo->proto == IPPROTO_IPV6)
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ return;
+ }
+
+ /* Transport mode */
+ if (mode != XFRM_MODE_TRANSPORT)
+ return;
+
+ if (!xo->inner_ipproto) {
+ eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ if (xo->proto == IPPROTO_UDP)
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ return;
+ }
+
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
}
- mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+ return;
}
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 3e80742a3caf..5120a59361e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -93,18 +93,38 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg);
-static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features)
+static inline netdev_features_t
+mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
{
+ struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp = skb_sec_path(skb);
- if (sp && sp->len) {
+ if (sp && sp->len && xo) {
struct xfrm_state *x = sp->xvec[0];
- if (x && x->xso.offload_handle)
- return true;
+ if (!x || !x->xso.offload_handle)
+ goto out_disable;
+
+ if (xo->inner_ipproto) {
+ /* Cannot support tunnel packet over IPsec tunnel mode
+ * because we cannot offload three IP header csum
+ */
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ goto out_disable;
+
+ /* Only support UDP or TCP L4 checksum */
+ if (xo->inner_ipproto != IPPROTO_UDP &&
+ xo->inner_ipproto != IPPROTO_TCP)
+ goto out_disable;
+ }
+
+ return features;
+
}
- return false;
+
+ /* Disable CSUM and GSO for software IPsec */
+out_disable:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
#else
@@ -120,8 +140,9 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
}
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
-static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features) { return false; }
+static inline netdev_features_t
+mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
+{ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 95293ee0d38d..d93aadbf10da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -59,12 +59,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- if (mlx5_accel_is_ktls_tx(mdev)) {
+ if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev))
+ return;
+
+ if (mlx5e_accel_is_ktls_tx(mdev)) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
}
- if (mlx5_accel_is_ktls_rx(mdev))
+ if (mlx5e_accel_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops;
@@ -89,7 +92,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
int err;
- if (!mlx5_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_accel_is_ktls_rx(priv->mdev))
return 0;
priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
@@ -109,7 +112,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
- if (!mlx5_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_accel_is_ktls_rx(priv->mdev))
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index aaa579bf9a39..5833deb2354c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -15,6 +15,25 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
+
+static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ mlx5_accel_is_ktls_tx(mdev);
+}
+
+static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ mlx5_accel_is_ktls_rx(mdev);
+}
+
+static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ mlx5_accel_is_ktls_device(mdev);
+}
+
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
@@ -44,6 +63,11 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
+
+static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
+
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 51bdf71073f3..9ad3459fb63a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -23,10 +23,13 @@ mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
}
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params)
+u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
u16 num_dumps, stop_room = 0;
+ if (!mlx5e_accel_is_ktls_tx(mdev))
+ return 0;
+
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
@@ -135,6 +138,7 @@ void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
priv = netdev_priv(netdev);
mdev = priv->mdev;
+ atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
mlx5e_destroy_tis(mdev, priv_tx->tisn);
mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
kfree(priv_tx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 8f79335057dc..08c9d5134479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -14,7 +14,7 @@ struct mlx5e_accel_tx_tls_state {
u32 tls_tisn;
};
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params);
+u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index d6b21b899dbc..b8fc863aa68d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -192,13 +192,13 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
u32 caps;
- if (mlx5_accel_is_ktls_device(priv->mdev)) {
+ if (mlx5e_accel_is_ktls_device(priv->mdev)) {
mlx5e_ktls_build_netdev(priv);
return;
}
/* FPGA */
- if (!mlx5_accel_is_tls_device(priv->mdev))
+ if (!mlx5e_accel_is_tls_device(priv->mdev))
return;
caps = mlx5_accel_tls_device_caps(priv->mdev);
@@ -224,7 +224,7 @@ int mlx5e_tls_init(struct mlx5e_priv *priv)
{
struct mlx5e_tls *tls;
- if (!mlx5_accel_is_tls_device(priv->mdev))
+ if (!mlx5e_accel_is_tls_device(priv->mdev))
return 0;
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index 4c9274d390da..62ecf14bf86a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -42,6 +42,7 @@
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
+ atomic64_t tx_tls_del;
atomic64_t tx_tls_drop_metadata;
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
@@ -103,11 +104,18 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv);
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
+static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ mlx5_accel_is_tls_device(mdev);
+}
+
#else
static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
- if (mlx5_accel_is_ktls_device(priv->mdev))
+ if (!is_kdump_kernel() &&
+ mlx5_accel_is_ktls_device(priv->mdev))
mlx5e_ktls_build_netdev(priv);
}
@@ -117,6 +125,7 @@ static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
+static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 82dc09aaa7fc..7a700f913582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -273,7 +273,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
- if (mlx5_accel_is_ktls_tx(sq->mdev))
+ if (mlx5e_accel_is_ktls_tx(sq->mdev))
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
/* FPGA */
@@ -378,11 +378,11 @@ void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- if (!mlx5_accel_is_tls_device(mdev))
+ if (!mlx5e_accel_is_tls_device(mdev))
return 0;
- if (mlx5_accel_is_ktls_device(mdev))
- return mlx5e_ktls_get_stop_room(params);
+ if (mlx5e_accel_is_ktls_device(mdev))
+ return mlx5e_ktls_get_stop_room(mdev, params);
/* FPGA */
/* Resync SKB. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
index 29463bdb7715..56e7b2aee85f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -47,6 +47,7 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
};
@@ -58,7 +59,7 @@ static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
{
if (!priv->tls)
return NULL;
- if (mlx5_accel_is_ktls_device(priv->mdev))
+ if (mlx5e_accel_is_ktls_device(priv->mdev))
return mlx5e_ktls_sw_stats_desc;
return mlx5e_tls_sw_stats_desc;
}
@@ -67,7 +68,7 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{
if (!priv->tls)
return 0;
- if (mlx5_accel_is_ktls_device(priv->mdev))
+ if (mlx5e_accel_is_ktls_device(priv->mdev))
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 5cd466ec6492..25403af32859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -356,7 +356,7 @@ err:
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
- int err = 0;
+ int err = -ENOMEM;
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8360289813f0..bd72572e03d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1624,12 +1624,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ unsigned long fec_bitmap;
u16 fec_policy = 0;
int mode;
int err;
- if (bitmap_weight((unsigned long *)&fecparam->fec,
- ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
+ if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
return -EOPNOTSUPP;
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
@@ -1893,6 +1894,13 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
+ if (new_val && !priv->profile->rx_ptp_support &&
+ priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+ netdev_err(priv->netdev,
+ "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
+ return -EINVAL;
+ }
+
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
@@ -1984,7 +1992,7 @@ static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool e
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
- if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ if (enable && !mlx5e_tx_mpwqe_supported(mdev))
return -EOPNOTSUPP;
new_params = priv->channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 0d571a0c76d9..0b75fab41ae8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "en.h"
#include "en_rep.h"
#include "lib/mpfs.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bca832cdc4cb..414a73d16619 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -91,12 +91,16 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 port_state;
+ bool up;
port_state = mlx5_query_vport_state(mdev,
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
0);
- if (port_state == VPORT_STATE_UP) {
+ up = port_state == VPORT_STATE_UP;
+ if (up == netif_carrier_ok(priv->netdev))
+ netif_carrier_event(priv->netdev);
+ if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
} else {
@@ -853,7 +857,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
goto err_destroy_rq;
- if (mlx5e_is_tls_on(rq->priv) && !mlx5_accel_is_ktls_device(mdev))
+ if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
__set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
@@ -889,10 +893,13 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- if (rq->icosq)
+ if (rq->icosq) {
mlx5e_trigger_irq(rq->icosq);
- else
+ } else {
+ local_bh_disable();
napi_schedule(rq->cq.napi);
+ local_bh_enable();
+ }
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -2697,13 +2704,11 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
int err;
old_num_txqs = netdev->real_num_tx_queues;
- old_ntc = netdev->num_tc;
+ old_ntc = netdev->num_tc ? : 1;
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
- if (priv->channels.params.ptp_rx)
- num_rxqs++;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@ -3855,6 +3860,16 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
}
+ if (mlx5e_is_uplink_rep(priv)) {
+ features &= ~NETIF_F_HW_TLS_RX;
+ if (netdev->features & NETIF_F_HW_TLS_RX)
+ netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_HW_TLS_TX;
+ if (netdev->features & NETIF_F_HW_TLS_TX)
+ netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+ }
+
mutex_unlock(&priv->state_lock);
return features;
@@ -3971,11 +3986,45 @@ int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
return mlx5e_ptp_rx_manage_fs(priv, set);
}
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
+{
+ bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+ int err;
+
+ if (!rx_filter)
+ /* Reset CQE compression to Admin default */
+ return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+
+ if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+ return 0;
+
+ /* Disable CQE compression */
+ netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err)
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+
+ return err;
+}
+
+static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
{
struct mlx5e_params new_params;
+
+ if (ptp_rx == priv->channels.params.ptp_rx)
+ return 0;
+
+ new_params = priv->channels.params;
+ new_params.ptp_rx = ptp_rx;
+ return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+ &new_params.ptp_rx, true);
+}
+
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
struct hwtstamp_config config;
bool rx_cqe_compress_def;
+ bool ptp_rx;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -3995,13 +4044,12 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
}
mutex_lock(&priv->state_lock);
- new_params = priv->channels.params;
rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- new_params.ptp_rx = false;
+ ptp_rx = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -4018,24 +4066,25 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- new_params.ptp_rx = rx_cqe_compress_def;
config.rx_filter = HWTSTAMP_FILTER_ALL;
+ /* ptp_rx is set if both HW TS is set and CQE
+ * compression is set
+ */
+ ptp_rx = rx_cqe_compress_def;
break;
default:
- mutex_unlock(&priv->state_lock);
- return -ERANGE;
+ err = -ERANGE;
+ goto err_unlock;
}
- if (new_params.ptp_rx == priv->channels.params.ptp_rx)
- goto out;
+ if (!priv->profile->rx_ptp_support)
+ err = mlx5e_hwstamp_config_no_ptp_rx(priv,
+ config.rx_filter != HWTSTAMP_FILTER_NONE);
+ else
+ err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
+ if (err)
+ goto err_unlock;
- err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
- &new_params.ptp_rx, true);
- if (err) {
- mutex_unlock(&priv->state_lock);
- return err;
- }
-out:
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
@@ -4044,6 +4093,9 @@ out:
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
+err_unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
}
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -4279,6 +4331,11 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
return features;
#endif
+ break;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ case IPPROTO_ESP:
+ return mlx5e_ipsec_feature_check(skb, features);
+#endif
}
out:
@@ -4295,9 +4352,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
- if (mlx5e_ipsec_feature_check(skb, netdev, features))
- return features;
-
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
(features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
@@ -4613,12 +4667,10 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
- MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* XDP SQ */
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
- MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* set CQE compression */
params->rx_cqe_compress_def = false;
@@ -4774,22 +4826,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
- netdev->hw_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_features |= NETIF_F_GSO_GRE;
+ netdev->hw_enc_features |= NETIF_F_GSO_GRE;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
@@ -5062,7 +5107,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
- mlx5_lag_add(mdev, netdev);
+ mlx5_lag_add_netdev(mdev, netdev);
mlx5e_enable_async_events(priv);
mlx5e_enable_blocking_events(priv);
@@ -5110,7 +5155,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
priv->en_trap = NULL;
}
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
}
@@ -5229,6 +5274,11 @@ static void mlx5e_update_features(struct net_device *netdev)
rtnl_unlock();
}
+static void mlx5e_reset_channels(struct net_device *netdev)
+{
+ netdev_reset_tc(netdev);
+}
+
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
@@ -5283,6 +5333,7 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
@@ -5300,6 +5351,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
+ mlx5e_reset_channels(priv->netdev);
cancel_work_sync(&priv->update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 34eb1118670f..bf94bcb6fa5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -45,11 +45,13 @@
#include "en_tc.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
+#include "en/rep/bridge.h"
#include "en/devlink.h"
#include "fs_core.h"
#include "lib/mlx5.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
+#include "en_accel/ipsec.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -536,13 +538,13 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_change_carrier = mlx5e_rep_change_carrier,
};
-bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
+bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
{
return netdev->netdev_ops == &mlx5e_netdev_ops &&
mlx5e_is_uplink_rep(netdev_priv(netdev));
}
-bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
+bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
{
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
@@ -629,6 +631,11 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ err = mlx5e_ipsec_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
return mlx5e_init_rep(mdev, netdev);
@@ -636,6 +643,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
+ mlx5e_ipsec_cleanup(priv);
}
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
@@ -975,12 +983,13 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
if (MLX5_CAP_GEN(mdev, uplink_follow))
mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
- mlx5_lag_add(mdev, netdev);
+ mlx5_lag_add_netdev(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
mlx5e_rep_neigh_init(rpriv);
+ mlx5e_rep_bridge_init(priv);
netdev->wanted_features |= NETIF_F_HW_TC;
@@ -1002,11 +1011,12 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
netif_device_detach(priv->netdev);
rtnl_unlock();
+ mlx5e_rep_bridge_cleanup(priv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);
mlx5e_rep_tc_disable(priv);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove_netdev(mdev, priv->netdev);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 22585015c7a7..47a2dfb7792a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -231,9 +231,9 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
-bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
-bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
-static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
+bool mlx5e_eswitch_vf_rep(const struct net_device *netdev);
+bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev);
+static inline bool mlx5e_eswitch_rep(const struct net_device *netdev)
{
return mlx5e_eswitch_vf_rep(netdev) ||
mlx5e_eswitch_uplink_rep(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f90894eea9e0..3c65fd0bcf31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -579,6 +579,9 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
return false;
+ if (rq->page_pool)
+ page_pool_nid_changed(rq->page_pool, numa_mem_id());
+
do {
u16 head = mlx5_wq_cyc_get_head(wq);
@@ -734,6 +737,9 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (likely(missing < UMR_WQE_BULK))
return false;
+ if (rq->page_pool)
+ page_pool_nid_changed(rq->page_pool, numa_mem_id());
+
head = rq->mpwqe.actual_wq_head;
i = missing;
do {
@@ -1310,7 +1316,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
dev_kfree_skb_any(skb);
goto free_wqe;
}
@@ -1367,7 +1374,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
}
@@ -1553,12 +1561,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
- if (rq->page_pool)
- page_pool_nid_changed(rq->page_pool, numa_mem_id());
-
if (rq->cqd.left) {
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
- if (rq->cqd.left || work_done >= budget)
+ if (work_done >= budget)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 47a9c49b25fd..629a61e8022f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -83,17 +83,17 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
.moffset = 0,
- .mlen = 2,
+ .mlen = 16,
},
[VPORT_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
- .moffset = 2,
- .mlen = 2,
+ .moffset = 16,
+ .mlen = 16,
},
[TUNNEL_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
- .moffset = 1,
- .mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8),
+ .moffset = 8,
+ .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
.soffset = MLX5_BYTE_OFF(fte_match_param,
misc_parameters_2.metadata_reg_c_1),
},
@@ -110,7 +110,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[NIC_CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
.moffset = 0,
- .mlen = 2,
+ .mlen = 16,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
};
@@ -128,23 +128,46 @@ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
- u32 data,
+ u32 val,
u32 mask)
{
+ void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
- void *headers_c = spec->match_criteria;
- void *headers_v = spec->match_value;
- void *fmask, *fval;
+ u32 max_mask = GENMASK(match_len - 1, 0);
+ __be32 curr_mask_be, curr_val_be;
+ u32 curr_mask, curr_val;
fmask = headers_c + soffset;
fval = headers_v + soffset;
- mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
- data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
+ memcpy(&curr_mask_be, fmask, 4);
+ memcpy(&curr_val_be, fval, 4);
+
+ curr_mask = be32_to_cpu(curr_mask_be);
+ curr_val = be32_to_cpu(curr_val_be);
+
+ //move to correct offset
+ WARN_ON(mask > max_mask);
+ mask <<= moffset;
+ val <<= moffset;
+ max_mask <<= moffset;
+
+ //zero val and mask
+ curr_mask &= ~max_mask;
+ curr_val &= ~max_mask;
+
+ //add current to mask
+ curr_mask |= mask;
+ curr_val |= val;
+
+ //back to be32 and write
+ curr_mask_be = cpu_to_be32(curr_mask);
+ curr_val_be = cpu_to_be32(curr_val);
- memcpy(fmask, &mask, match_len);
- memcpy(fval, &data, match_len);
+ memcpy(fmask, &curr_mask_be, 4);
+ memcpy(fval, &curr_val_be, 4);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
}
@@ -152,23 +175,28 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
void
mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
- u32 *data,
+ u32 *val,
u32 *mask)
{
+ void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
- void *headers_c = spec->match_criteria;
- void *headers_v = spec->match_value;
- void *fmask, *fval;
+ u32 max_mask = GENMASK(match_len - 1, 0);
+ __be32 curr_mask_be, curr_val_be;
+ u32 curr_mask, curr_val;
fmask = headers_c + soffset;
fval = headers_v + soffset;
- memcpy(mask, fmask, match_len);
- memcpy(data, fval, match_len);
+ memcpy(&curr_mask_be, fmask, 4);
+ memcpy(&curr_val_be, fval, 4);
+
+ curr_mask = be32_to_cpu(curr_mask_be);
+ curr_val = be32_to_cpu(curr_val_be);
- *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
- *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
+ *mask = (curr_mask >> moffset) & max_mask;
+ *val = (curr_val >> moffset) & max_mask;
}
int
@@ -192,13 +220,13 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
(mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
/* Firmware has 5bit length field and 0 means 32bits */
- if (mlen == 4)
+ if (mlen == 32)
mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field, mfield);
- MLX5_SET(set_action_in, modact, offset, moffset * 8);
- MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, offset, moffset);
+ MLX5_SET(set_action_in, modact, length, mlen);
MLX5_SET(set_action_in, modact, data, data);
err = mod_hdr_acts->num_actions;
mod_hdr_acts->num_actions++;
@@ -296,13 +324,13 @@ void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
/* Firmware has 5bit length field and 0 means 32bits */
- if (mlen == 4)
+ if (mlen == 32)
mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field, mfield);
- MLX5_SET(set_action_in, modact, offset, moffset * 8);
- MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, offset, moffset);
+ MLX5_SET(set_action_in, modact, length, mlen);
MLX5_SET(set_action_in, modact, data, data);
}
@@ -818,7 +846,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
- params.log_data_size = 15;
+ params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.log_data_size = max_t(u8, params.log_data_size,
@@ -1322,10 +1350,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
bool vf_tun = false, encap_valid = true;
+ struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
@@ -1371,16 +1399,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
int mirred_ifindex;
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = __dev_get_by_index(dev_net(priv->netdev),
- mirred_ifindex);
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto err_out;
+ }
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
+ dev_put(out_dev);
if (err)
goto err_out;
@@ -1393,6 +1427,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
+ if (vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
@@ -2003,11 +2043,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ enum fs_flow_table_type fs_type;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
int err;
+ fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
match_level = outer_match_level;
if (dissector->used_keys &
@@ -2133,6 +2175,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
+ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
+ fs_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on CVLAN is not supported");
+ return -EOPNOTSUPP;
+ }
+
if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
@@ -3526,8 +3575,12 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
if (err)
return err;
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
- dev_get_iflink(vlan_dev));
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+
if (is_vlan_dev(*out_dev))
err = add_vlan_push_action(priv, attr, out_dev, action);
@@ -4740,7 +4793,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
- hpe->hp->pair->peer_gone = true;
+ mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
mlx5e_hairpin_put(priv, hpe);
}
@@ -5074,13 +5127,13 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
chain = mapped_obj.chain;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (WARN_ON(!tc_skb_ext))
return false;
tc_skb_ext->chain = chain;
- zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
+ zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 25c091795bcd..f7cbeb0b66d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -129,7 +129,7 @@ struct tunnel_match_enc_opts {
*/
#define TUNNEL_INFO_BITS 12
#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
-#define ENC_OPTS_BITS 12
+#define ENC_OPTS_BITS 11
#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e);
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
@@ -198,10 +201,10 @@ enum mlx5e_tc_attr_to_reg {
struct mlx5e_tc_attr_to_reg_mapping {
int mfield; /* rewrite field */
- int moffset; /* offset of mfield */
- int mlen; /* bytes to rewrite/match */
+ int moffset; /* bit offset of mfield */
+ int mlen; /* bits to rewrite/match */
- int soffset; /* offset of spec for match */
+ int soffset; /* byte offset of spec for match */
};
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 8ba62671f5f1..c63d78eda606 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,7 +32,6 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
-#include <linux/ptp_classify.h>
#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
@@ -67,24 +66,6 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
}
#endif
-static bool mlx5e_use_ptpsq(struct sk_buff *skb)
-{
- struct flow_keys fk;
-
- if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
- return false;
-
- if (fk.basic.n_proto == htons(ETH_P_1588))
- return true;
-
- if (fk.basic.n_proto != htons(ETH_P_IP) &&
- fk.basic.n_proto != htons(ETH_P_IPV6))
- return false;
-
- return (fk.basic.ip_proto == IPPROTO_UDP &&
- fk.ports.dst == htons(PTP_EV_PORT));
-}
-
static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -145,9 +126,9 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
}
ptp_channel = READ_ONCE(priv->channels.ptp);
- if (unlikely(ptp_channel) &&
- test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
- mlx5e_use_ptpsq(skb))
+ if (unlikely(ptp_channel &&
+ test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+ mlx5e_use_ptpsq(skb)))
return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
@@ -706,16 +687,12 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq);
}
-static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
- return false;
-
+ mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
-
- return true;
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -744,10 +721,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
- attr.ihs)))
- return NETDEV_TX_OK;
-
+ mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs);
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
return NETDEV_TX_OK;
}
@@ -762,9 +736,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
- return NETDEV_TX_OK;
-
+ mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs);
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 77c0ca655975..6e074cc457de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2021, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/interrupt.h>
@@ -45,6 +18,7 @@
#include "eswitch.h"
#include "lib/clock.h"
#include "diag/fw_tracer.h"
+#include "mlx5_irq.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
@@ -84,6 +58,9 @@ struct mlx5_eq_table {
struct mutex lock; /* sync async eqs creations */
int num_comp_eqs;
struct mlx5_irq_table *irq_table;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -136,7 +113,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
eqe = next_eqe_sw(eq);
if (!eqe)
- return 0;
+ goto out;
do {
struct mlx5_core_cq *cq;
@@ -161,6 +138,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+
+out:
eq_update_ci(eq, 1);
if (cqn != -1)
@@ -248,9 +227,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
- eq_update_ci(eq, 1);
out:
+ eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
return unlikely(recovery) ? num_eqes : 0;
@@ -286,7 +265,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
struct mlx5_priv *priv = &dev->priv;
- u8 vecidx = param->irq_index;
+ u16 vecidx = param->irq_index;
__be64 *pas;
void *eqc;
int inlen;
@@ -309,13 +288,20 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq);
+ eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
+ if (IS_ERR(eq->irq)) {
+ err = PTR_ERR(eq->irq);
+ goto err_buf;
+ }
+
+ vecidx = mlx5_irq_get_index(eq->irq);
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
- goto err_buf;
+ goto err_irq;
}
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
@@ -359,6 +345,8 @@ err_eq:
err_in:
kvfree(in);
+err_irq:
+ mlx5_irq_release(eq->irq);
err_buf:
mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
@@ -377,10 +365,9 @@ err_buf:
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{
- struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
- err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
+ err = mlx5_irq_attach_nb(eq->irq, nb);
if (!err)
eq_update_ci(eq, 1);
@@ -399,9 +386,7 @@ EXPORT_SYMBOL(mlx5_eq_enable);
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{
- struct mlx5_eq_table *eq_table = dev->priv.eq_table;
-
- mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
+ mlx5_irq_detach_nb(eq->irq, nb);
}
EXPORT_SYMBOL(mlx5_eq_disable);
@@ -415,10 +400,9 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(eq->irqn);
+ mlx5_irq_release(eq->irq);
mlx5_frag_buf_free(dev, &eq->frag_buf);
-
return err;
}
@@ -490,14 +474,7 @@ static int create_async_eq(struct mlx5_core_dev *dev,
int err;
mutex_lock(&eq_table->lock);
- /* Async EQs must share irq index 0 */
- if (param->irq_index != 0) {
- err = -EINVAL;
- goto unlock;
- }
-
err = create_map_eq(dev, eq, param);
-unlock:
mutex_unlock(&eq_table->lock);
return err;
}
@@ -616,8 +593,11 @@ setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
eq->irq_nb.notifier_call = mlx5_eq_async_int;
spin_lock_init(&eq->lock);
+ if (!zalloc_cpumask_var(&param->affinity, GFP_KERNEL))
+ return -ENOMEM;
err = create_async_eq(dev, &eq->core, param);
+ free_cpumask_var(param->affinity);
if (err) {
mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
return err;
@@ -652,7 +632,6 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
- .irq_index = 0,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@@ -665,7 +644,6 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
- .irq_index = 0,
.nent = MLX5_NUM_ASYNC_EQE,
};
@@ -675,7 +653,6 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
- .irq_index = 0,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@@ -735,6 +712,9 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
int err;
+ if (!cpumask_available(param->affinity))
+ return ERR_PTR(-EINVAL);
+
if (!eq)
return ERR_PTR(-ENOMEM);
@@ -845,16 +825,21 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
.irq_index = vecidx,
.nent = nent,
};
- err = create_map_eq(dev, &eq->core, &param);
- if (err) {
- kfree(eq);
- goto clean;
+
+ if (!zalloc_cpumask_var(&param.affinity, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto clean_eq;
}
+ cpumask_set_cpu(cpumask_local_spread(i, dev->priv.numa_node),
+ param.affinity);
+ err = create_map_eq(dev, &eq->core, &param);
+ free_cpumask_var(param.affinity);
+ if (err)
+ goto clean_eq;
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
if (err) {
destroy_unmap_eq(dev, &eq->core);
- kfree(eq);
- goto clean;
+ goto clean_eq;
}
mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
@@ -863,7 +848,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
}
return 0;
-
+clean_eq:
+ kfree(eq);
clean:
destroy_comp_eqs(dev);
return err;
@@ -899,17 +885,23 @@ EXPORT_SYMBOL(mlx5_comp_vectors_count);
struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
- int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq, *n;
+ int i = 0;
+
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ if (i++ == vector)
+ break;
+ }
- return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
- vecidx);
+ return mlx5_irq_get_affinity_mask(eq->core.irq);
}
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{
- return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
+ return dev->priv.eq_table->rmap;
}
#endif
@@ -926,12 +918,57 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
return ERR_PTR(-ENOENT);
}
+static void clear_rmap(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+
+ free_irq_cpu_rmap(eq_table->rmap);
+#endif
+}
+
+static int set_rmap(struct mlx5_core_dev *mdev)
+{
+ int err = 0;
+#ifdef CONFIG_RFS_ACCEL
+ struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
+ int vecidx;
+
+ eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
+ if (!eq_table->rmap) {
+ err = -ENOMEM;
+ mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
+ goto err_out;
+ }
+
+ vecidx = MLX5_IRQ_VEC_COMP_BASE;
+ for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
+ vecidx++) {
+ err = irq_cpu_rmap_add(eq_table->rmap,
+ pci_irq_vector(mdev->pdev, vecidx));
+ if (err) {
+ mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
+ err);
+ goto err_irq_cpu_rmap_add;
+ }
+ }
+ return 0;
+
+err_irq_cpu_rmap_add:
+ clear_rmap(mdev);
+err_out:
+#endif
+ return err;
+}
+
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
mlx5_irq_table_destroy(dev);
mutex_unlock(&table->lock);
}
@@ -948,12 +985,19 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int max_eqs_sf;
int err;
eq_table->num_comp_eqs =
min_t(int,
- mlx5_irq_get_num_comp(eq_table->irq_table),
+ mlx5_irq_table_get_num_comp(eq_table->irq_table),
num_eqs - MLX5_MAX_ASYNC_EQS);
+ if (mlx5_core_is_sf(dev)) {
+ max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
+ mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
+ eq_table->num_comp_eqs = min_t(int, eq_table->num_comp_eqs,
+ max_eqs_sf);
+ }
err = create_async_eqs(dev);
if (err) {
@@ -961,6 +1005,18 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
goto err_async_eqs;
}
+ if (!mlx5_core_is_sf(dev)) {
+ /* rmap is a mapping between irq number and queue number.
+ * each irq can be assign only to a single rmap.
+ * since SFs share IRQs, rmap mapping cannot function correctly
+ * for irqs that are shared for different core/netdev RX rings.
+ * Hence we don't allow netdev rmap for SFs
+ */
+ err = set_rmap(dev);
+ if (err)
+ goto err_rmap;
+ }
+
err = create_comp_eqs(dev);
if (err) {
mlx5_core_err(dev, "Failed to create completion EQs\n");
@@ -969,6 +1025,9 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
return 0;
err_comp_eqs:
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
+err_rmap:
destroy_async_eqs(dev);
err_async_eqs:
return err;
@@ -976,6 +1035,8 @@ err_async_eqs:
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
destroy_comp_eqs(dev);
destroy_async_eqs(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
new file mode 100644
index 000000000000..a6e1d4f78268
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -0,0 +1,1299 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+#include "bridge.h"
+#include "eswitch.h"
+#include "bridge_priv.h"
+#define CREATE_TRACE_POINTS
+#include "diag/bridge_tracepoint.h"
+
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
+
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+
+#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
+
+enum {
+ MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
+ MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
+ MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
+};
+
+static const struct rhashtable_params fdb_ht_params = {
+ .key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
+ .key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
+ .head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
+ .automatic_shrinking = true,
+};
+
+enum {
+ MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
+};
+
+struct mlx5_esw_bridge {
+ int ifindex;
+ int refcnt;
+ struct list_head list;
+ struct mlx5_esw_bridge_offloads *br_offloads;
+
+ struct list_head fdb_list;
+ struct rhashtable fdb_ht;
+ struct xarray vports;
+
+ struct mlx5_flow_table *egress_ft;
+ struct mlx5_flow_group *egress_vlan_fg;
+ struct mlx5_flow_group *egress_mac_fg;
+ unsigned long ageing_time;
+ u32 flags;
+};
+
+static void
+mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
+ unsigned long val)
+{
+ struct switchdev_notifier_fdb_info send_info;
+
+ send_info.addr = addr;
+ send_info.vid = vid;
+ send_info.offloaded = true;
+ call_switchdev_notifiers(val, dev, &send_info.info, NULL);
+}
+
+static struct mlx5_flow_table *
+mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB namespace\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft_attr.max_fte = max_fte;
+ ft_attr.level = level;
+ ft_attr.prio = FDB_BR_OFFLOAD;
+ fdb = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb))
+ esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
+
+ return fdb;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(ingress_ft, in);
+ kvfree(in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
+ PTR_ERR(fg));
+
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(ingress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
+ PTR_ERR(fg));
+
+ kvfree(in);
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(ingress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
+ PTR_ERR(fg));
+
+ kvfree(in);
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(egress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
+ PTR_ERR(fg));
+ kvfree(in);
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(egress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create bridge egress table MAC flow group (err=%ld)\n",
+ PTR_ERR(fg));
+ kvfree(in);
+ return fg;
+}
+
+static int
+mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
+ struct mlx5_flow_table *ingress_ft, *skip_ft;
+ int err;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
+ return -EOPNOTSUPP;
+
+ ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
+ MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
+ br_offloads->esw);
+ if (IS_ERR(ingress_ft))
+ return PTR_ERR(ingress_ft);
+
+ skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
+ MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
+ br_offloads->esw);
+ if (IS_ERR(skip_ft)) {
+ err = PTR_ERR(skip_ft);
+ goto err_skip_tbl;
+ }
+
+ vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
+ if (IS_ERR(vlan_fg)) {
+ err = PTR_ERR(vlan_fg);
+ goto err_vlan_fg;
+ }
+
+ filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft);
+ if (IS_ERR(filter_fg)) {
+ err = PTR_ERR(filter_fg);
+ goto err_filter_fg;
+ }
+
+ mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
+ if (IS_ERR(mac_fg)) {
+ err = PTR_ERR(mac_fg);
+ goto err_mac_fg;
+ }
+
+ br_offloads->ingress_ft = ingress_ft;
+ br_offloads->skip_ft = skip_ft;
+ br_offloads->ingress_vlan_fg = vlan_fg;
+ br_offloads->ingress_filter_fg = filter_fg;
+ br_offloads->ingress_mac_fg = mac_fg;
+ return 0;
+
+err_mac_fg:
+ mlx5_destroy_flow_group(filter_fg);
+err_filter_fg:
+ mlx5_destroy_flow_group(vlan_fg);
+err_vlan_fg:
+ mlx5_destroy_flow_table(skip_ft);
+err_skip_tbl:
+ mlx5_destroy_flow_table(ingress_ft);
+ return err;
+}
+
+static void
+mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
+ br_offloads->ingress_mac_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
+ br_offloads->ingress_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
+ br_offloads->ingress_vlan_fg = NULL;
+ mlx5_destroy_flow_table(br_offloads->skip_ft);
+ br_offloads->skip_ft = NULL;
+ mlx5_destroy_flow_table(br_offloads->ingress_ft);
+ br_offloads->ingress_ft = NULL;
+}
+
+static int
+mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_flow_group *mac_fg, *vlan_fg;
+ struct mlx5_flow_table *egress_ft;
+ int err;
+
+ egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
+ MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
+ br_offloads->esw);
+ if (IS_ERR(egress_ft))
+ return PTR_ERR(egress_ft);
+
+ vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
+ if (IS_ERR(vlan_fg)) {
+ err = PTR_ERR(vlan_fg);
+ goto err_vlan_fg;
+ }
+
+ mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
+ if (IS_ERR(mac_fg)) {
+ err = PTR_ERR(mac_fg);
+ goto err_mac_fg;
+ }
+
+ bridge->egress_ft = egress_ft;
+ bridge->egress_vlan_fg = vlan_fg;
+ bridge->egress_mac_fg = mac_fg;
+ return 0;
+
+err_mac_fg:
+ mlx5_destroy_flow_group(vlan_fg);
+err_vlan_fg:
+ mlx5_destroy_flow_table(egress_ft);
+ return err;
+}
+
+static void
+mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
+{
+ mlx5_destroy_flow_group(bridge->egress_mac_fg);
+ mlx5_destroy_flow_group(bridge->egress_vlan_fg);
+ mlx5_destroy_flow_table(bridge->egress_ft);
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_destination dests[2] = {};
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ u8 *smac_v, *smac_c;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
+
+ smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, addr);
+ smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+ outer_headers.smac_47_16);
+ eth_broadcast_addr(smac_c);
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
+
+ if (vlan && vlan->pkt_reformat_push) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.pkt_reformat = vlan->pkt_reformat_push;
+ } else if (vlan) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
+ vlan->vid);
+ }
+
+ dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dests[0].ft = bridge->egress_ft;
+ dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dests[1].counter_id = counter_id;
+
+ handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
+ ARRAY_SIZE(dests));
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ .ft = br_offloads->skip_ft,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ u8 *smac_v, *smac_c;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
+
+ smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, addr);
+ smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+ outer_headers.smac_47_16);
+ eth_broadcast_addr(smac_c);
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
+
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+
+ handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ .vport.num = vport_num,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ u8 *dmac_v, *dmac_c;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+ outer_headers.dmac_47_16);
+ ether_addr_copy(dmac_v, addr);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+ outer_headers.dmac_47_16);
+ eth_broadcast_addr(dmac_c);
+
+ if (vlan) {
+ if (vlan->pkt_reformat_pop) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.pkt_reformat = vlan->pkt_reformat_pop;
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
+ vlan->vid);
+ }
+
+ handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge *bridge;
+ int err;
+
+ bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ bridge->br_offloads = br_offloads;
+ err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
+ if (err)
+ goto err_egress_tbl;
+
+ err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
+ if (err)
+ goto err_fdb_ht;
+
+ INIT_LIST_HEAD(&bridge->fdb_list);
+ xa_init(&bridge->vports);
+ bridge->ifindex = ifindex;
+ bridge->refcnt = 1;
+ bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
+ list_add(&bridge->list, &br_offloads->bridges);
+
+ return bridge;
+
+err_fdb_ht:
+ mlx5_esw_bridge_egress_table_cleanup(bridge);
+err_egress_tbl:
+ kvfree(bridge);
+ return ERR_PTR(err);
+}
+
+static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
+{
+ bridge->refcnt++;
+}
+
+static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_esw_bridge *bridge)
+{
+ if (--bridge->refcnt)
+ return;
+
+ mlx5_esw_bridge_egress_table_cleanup(bridge);
+ WARN_ON(!xa_empty(&bridge->vports));
+ list_del(&bridge->list);
+ rhashtable_destroy(&bridge->fdb_ht);
+ kvfree(bridge);
+
+ if (list_empty(&br_offloads->bridges))
+ mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
+}
+
+static struct mlx5_esw_bridge *
+mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge *bridge;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(bridge, &br_offloads->bridges, list) {
+ if (bridge->ifindex == ifindex) {
+ mlx5_esw_bridge_get(bridge);
+ return bridge;
+ }
+ }
+
+ if (!br_offloads->ingress_ft) {
+ int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
+ if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
+ mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
+ return bridge;
+}
+
+static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ return xa_insert(&bridge->vports, port->vport_num, port, GFP_KERNEL);
+}
+
+static struct mlx5_esw_bridge_port *
+mlx5_esw_bridge_port_lookup(u16 vport_num, struct mlx5_esw_bridge *bridge)
+{
+ return xa_load(&bridge->vports, vport_num);
+}
+
+static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ xa_erase(&bridge->vports, port->vport_num);
+}
+
+static void mlx5_esw_bridge_fdb_entry_refresh(unsigned long lastuse,
+ struct mlx5_esw_bridge_fdb_entry *entry)
+{
+ trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
+
+ entry->lastuse = lastuse;
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_ADD_TO_BRIDGE);
+}
+
+static void
+mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
+ struct mlx5_esw_bridge *bridge)
+{
+ trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
+
+ rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
+ mlx5_del_flow_rules(entry->egress_handle);
+ if (entry->filter_handle)
+ mlx5_del_flow_rules(entry->filter_handle);
+ mlx5_del_flow_rules(entry->ingress_handle);
+ mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
+ list_del(&entry->vlan_list);
+ list_del(&entry->list);
+ kvfree(entry);
+}
+
+static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
+ if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+ }
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
+{
+ return xa_load(&port->vlans, vid);
+}
+
+static int
+mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ struct {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ } vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_pkt_reformat *pkt_reformat;
+
+ if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
+ offsetof(struct vlan_ethhdr, h_vlan_proto)) {
+ esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
+ reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
+ reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ reformat_params.size = sizeof(vlan_hdr);
+ reformat_params.data = &vlan_hdr;
+ pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(pkt_reformat)) {
+ esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
+ PTR_ERR(pkt_reformat));
+ return PTR_ERR(pkt_reformat);
+ }
+
+ vlan->pkt_reformat_push = pkt_reformat;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
+ vlan->pkt_reformat_push = NULL;
+}
+
+static int
+mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_pkt_reformat *pkt_reformat;
+
+ if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) ||
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) ||
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) <
+ offsetof(struct vlan_ethhdr, h_vlan_proto)) {
+ esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
+ reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
+ reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ reformat_params.size = sizeof(struct vlan_hdr);
+ pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(pkt_reformat)) {
+ esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
+ PTR_ERR(pkt_reformat));
+ return PTR_ERR(pkt_reformat);
+ }
+
+ vlan->pkt_reformat_pop = pkt_reformat;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
+ vlan->pkt_reformat_pop = NULL;
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ int err;
+
+ vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ INIT_LIST_HEAD(&vlan->fdb_list);
+
+ if (flags & BRIDGE_VLAN_INFO_PVID) {
+ err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
+ if (err)
+ goto err_vlan_push;
+ }
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
+ err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
+ if (err)
+ goto err_vlan_pop;
+ }
+
+ err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
+ if (err)
+ goto err_xa_insert;
+
+ trace_mlx5_esw_bridge_vlan_create(vlan);
+ return vlan;
+
+err_xa_insert:
+ if (vlan->pkt_reformat_pop)
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
+err_vlan_pop:
+ if (vlan->pkt_reformat_push)
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
+err_vlan_push:
+ kvfree(vlan);
+ return ERR_PTR(err);
+}
+
+static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan)
+{
+ xa_erase(&port->vlans, vlan->vid);
+}
+
+static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
+ if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+ }
+
+ if (vlan->pkt_reformat_pop)
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw);
+ if (vlan->pkt_reformat_push)
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw);
+}
+
+static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_esw_bridge *bridge)
+{
+ trace_mlx5_esw_bridge_vlan_cleanup(vlan);
+ mlx5_esw_bridge_vlan_flush(vlan, bridge);
+ mlx5_esw_bridge_vlan_erase(port, vlan);
+ kvfree(vlan);
+}
+
+static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ unsigned long index;
+
+ xa_for_each(&port->vlans, index, vlan)
+ mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge *bridge,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge_vlan *vlan;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, bridge);
+ if (!port) {
+ /* FDB is added asynchronously on wq while port might have been deleted
+ * concurrently. Report on 'info' logging level and skip the FDB offload.
+ */
+ esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
+ if (!vlan) {
+ /* FDB is added asynchronously on wq while vlan might have been deleted
+ * concurrently. Report on 'info' logging level and skip the FDB offload.
+ */
+ esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
+ vport_num);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vlan;
+}
+
+static struct mlx5_esw_bridge_fdb_entry *
+mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsigned char *addr,
+ u16 vid, bool added_by_user, struct mlx5_eswitch *esw,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_vlan *vlan = NULL;
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ struct mlx5_flow_handle *handle;
+ struct mlx5_fc *counter;
+ struct mlx5e_priv *priv;
+ int err;
+
+ if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
+ vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, bridge, esw);
+ if (IS_ERR(vlan))
+ return ERR_CAST(vlan);
+ }
+
+ priv = netdev_priv(dev);
+ entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(entry->key.addr, addr);
+ entry->key.vid = vid;
+ entry->dev = dev;
+ entry->vport_num = vport_num;
+ entry->lastuse = jiffies;
+ if (added_by_user)
+ entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
+
+ counter = mlx5_fc_create(priv->mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_ingress_fc_create;
+ }
+ entry->ingress_counter = counter;
+
+ handle = mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_fc_id(counter),
+ bridge);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
+ vport_num, err);
+ goto err_ingress_flow_create;
+ }
+ entry->ingress_handle = handle;
+
+ if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
+ handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
+ vport_num, err);
+ goto err_ingress_filter_flow_create;
+ }
+ entry->filter_handle = handle;
+ }
+
+ handle = mlx5_esw_bridge_egress_flow_create(vport_num, addr, vlan, bridge);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
+ vport_num, err);
+ goto err_egress_flow_create;
+ }
+ entry->egress_handle = handle;
+
+ err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
+ if (err) {
+ esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
+ goto err_ht_init;
+ }
+
+ if (vlan)
+ list_add(&entry->vlan_list, &vlan->fdb_list);
+ else
+ INIT_LIST_HEAD(&entry->vlan_list);
+ list_add(&entry->list, &bridge->fdb_list);
+
+ trace_mlx5_esw_bridge_fdb_entry_init(entry);
+ return entry;
+
+err_ht_init:
+ mlx5_del_flow_rules(entry->egress_handle);
+err_egress_flow_create:
+ if (entry->filter_handle)
+ mlx5_del_flow_rules(entry->filter_handle);
+err_ingress_filter_flow_create:
+ mlx5_del_flow_rules(entry->ingress_handle);
+err_ingress_flow_create:
+ mlx5_fc_destroy(priv->mdev, entry->ingress_counter);
+err_ingress_fc_create:
+ kvfree(entry);
+ return ERR_PTR(err);
+}
+
+int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!vport->bridge)
+ return -EINVAL;
+
+ vport->bridge->ageing_time = ageing_time;
+ return 0;
+}
+
+int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_esw_bridge *bridge;
+ bool filtering;
+
+ if (!vport->bridge)
+ return -EINVAL;
+
+ bridge = vport->bridge;
+ filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
+ if (filtering == enable)
+ return 0;
+
+ mlx5_esw_bridge_fdb_flush(bridge);
+ if (enable)
+ bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
+ else
+ bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
+
+ return 0;
+}
+
+static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_esw_bridge *bridge,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch *esw = br_offloads->esw;
+ struct mlx5_esw_bridge_port *port;
+ int err;
+
+ port = kvzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto err_port_alloc;
+ }
+
+ port->vport_num = vport->vport;
+ xa_init(&port->vlans);
+ err = mlx5_esw_bridge_port_insert(port, bridge);
+ if (err) {
+ esw_warn(esw->dev, "Failed to insert port metadata (vport=%u,err=%d)\n",
+ vport->vport, err);
+ goto err_port_insert;
+ }
+ trace_mlx5_esw_bridge_vport_init(port);
+
+ vport->bridge = bridge;
+ return 0;
+
+err_port_insert:
+ kvfree(port);
+err_port_alloc:
+ mlx5_esw_bridge_put(br_offloads, bridge);
+ return err;
+}
+
+static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_esw_bridge *bridge = vport->bridge;
+ struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
+ struct mlx5_esw_bridge_port *port;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ if (entry->vport_num == vport->vport)
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+
+ port = mlx5_esw_bridge_port_lookup(vport->vport, bridge);
+ if (!port) {
+ WARN(1, "Vport %u metadata not found on bridge", vport->vport);
+ return -EINVAL;
+ }
+
+ trace_mlx5_esw_bridge_vport_cleanup(port);
+ mlx5_esw_bridge_port_vlans_flush(port, bridge);
+ mlx5_esw_bridge_port_erase(port, bridge);
+ kvfree(port);
+ mlx5_esw_bridge_put(br_offloads, bridge);
+ vport->bridge = NULL;
+ return 0;
+}
+
+int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_bridge *bridge;
+ int err;
+
+ WARN_ON(vport->bridge);
+
+ bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
+ if (IS_ERR(bridge)) {
+ NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
+ return PTR_ERR(bridge);
+ }
+
+ err = mlx5_esw_bridge_vport_init(br_offloads, bridge, vport);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
+ return err;
+}
+
+int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_bridge *bridge = vport->bridge;
+ int err;
+
+ if (!bridge) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
+ return -EINVAL;
+ }
+ if (bridge->ifindex != ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
+ return -EINVAL;
+ }
+
+ err = mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
+ return err;
+}
+
+int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge_vlan *vlan;
+
+ port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
+ if (!port)
+ return -EINVAL;
+
+ vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
+ if (vlan) {
+ if (vlan->flags == flags)
+ return 0;
+ mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge);
+ }
+
+ vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, esw);
+ if (IS_ERR(vlan)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
+ return PTR_ERR(vlan);
+ }
+ return 0;
+}
+
+void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge_vlan *vlan;
+
+ port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
+ if (!port)
+ return;
+
+ vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
+ if (!vlan)
+ return;
+ mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge);
+}
+
+void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_esw_bridge *bridge = vport->bridge;
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ u16 vport_num = vport->vport;
+
+ if (!bridge) {
+ esw_info(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
+ return;
+ }
+
+ entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, fdb_info->addr, fdb_info->vid,
+ fdb_info->added_by_user, esw, bridge);
+ if (IS_ERR(entry))
+ return;
+
+ if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
+ mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
+ SWITCHDEV_FDB_OFFLOADED);
+ else
+ /* Take over dynamic entries to prevent kernel bridge from aging them out. */
+ mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
+ SWITCHDEV_FDB_ADD_TO_BRIDGE);
+}
+
+void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_esw_bridge *bridge = vport->bridge;
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ struct mlx5_esw_bridge_fdb_key key;
+ u16 vport_num = vport->vport;
+
+ if (!bridge) {
+ esw_warn(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
+ return;
+ }
+
+ ether_addr_copy(key.addr, fdb_info->addr);
+ key.vid = fdb_info->vid;
+ entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+ if (!entry) {
+ esw_warn(esw->dev,
+ "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ key.addr, key.vid, vport_num);
+ return;
+ }
+
+ if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
+ mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+}
+
+void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
+ struct mlx5_esw_bridge *bridge;
+
+ list_for_each_entry(bridge, &br_offloads->bridges, list) {
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
+ unsigned long lastuse =
+ (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
+
+ if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
+ continue;
+
+ if (time_after(lastuse, entry->lastuse)) {
+ mlx5_esw_bridge_fdb_entry_refresh(lastuse, entry);
+ } else if (time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+ }
+ }
+ }
+}
+
+static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_eswitch *esw = br_offloads->esw;
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_vport(esw, i, vport)
+ if (vport->bridge)
+ mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
+
+ WARN_ONCE(!list_empty(&br_offloads->bridges),
+ "Cleaning up bridge offloads while still having bridges attached\n");
+}
+
+struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads;
+
+ br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
+ if (!br_offloads)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&br_offloads->bridges);
+ br_offloads->esw = esw;
+ esw->br_offloads = br_offloads;
+
+ return br_offloads;
+}
+
+void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
+
+ if (!br_offloads)
+ return;
+
+ mlx5_esw_bridge_flush(br_offloads);
+
+ esw->br_offloads = NULL;
+ kvfree(br_offloads);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
new file mode 100644
index 000000000000..d826942b27fc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_ESW_BRIDGE_H__
+#define __MLX5_ESW_BRIDGE_H__
+
+#include <linux/notifier.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include "eswitch.h"
+
+struct mlx5_flow_table;
+struct mlx5_flow_group;
+
+struct mlx5_esw_bridge_offloads {
+ struct mlx5_eswitch *esw;
+ struct list_head bridges;
+ struct notifier_block netdev_nb;
+ struct notifier_block nb_blk;
+ struct notifier_block nb;
+ struct workqueue_struct *wq;
+ struct delayed_work update_work;
+
+ struct mlx5_flow_table *ingress_ft;
+ struct mlx5_flow_group *ingress_vlan_fg;
+ struct mlx5_flow_group *ingress_filter_fg;
+ struct mlx5_flow_group *ingress_mac_fg;
+
+ struct mlx5_flow_table *skip_ft;
+};
+
+struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw);
+void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw);
+int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack);
+int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack);
+void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct switchdev_notifier_fdb_info *fdb_info);
+void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct switchdev_notifier_fdb_info *fdb_info);
+void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack);
+void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+#endif /* __MLX5_ESW_BRIDGE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
new file mode 100644
index 000000000000..d9ab2e8bc2cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef _MLX5_ESW_BRIDGE_PRIVATE_
+#define _MLX5_ESW_BRIDGE_PRIVATE_
+
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/rhashtable.h>
+#include <linux/xarray.h>
+#include "fs_core.h"
+
+struct mlx5_esw_bridge_fdb_key {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+enum {
+ MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
+};
+
+struct mlx5_esw_bridge_fdb_entry {
+ struct mlx5_esw_bridge_fdb_key key;
+ struct rhash_head ht_node;
+ struct net_device *dev;
+ struct list_head list;
+ struct list_head vlan_list;
+ u16 vport_num;
+ u16 flags;
+
+ struct mlx5_flow_handle *ingress_handle;
+ struct mlx5_fc *ingress_counter;
+ unsigned long lastuse;
+ struct mlx5_flow_handle *egress_handle;
+ struct mlx5_flow_handle *filter_handle;
+};
+
+struct mlx5_esw_bridge_vlan {
+ u16 vid;
+ u16 flags;
+ struct list_head fdb_list;
+ struct mlx5_pkt_reformat *pkt_reformat_push;
+ struct mlx5_pkt_reformat *pkt_reformat_pop;
+};
+
+struct mlx5_esw_bridge_port {
+ u16 vport_num;
+ struct xarray vlans;
+};
+
+#endif /* _MLX5_ESW_BRIDGE_PRIVATE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
new file mode 100644
index 000000000000..227964b7d3b9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_ESW_BRIDGE_TRACEPOINT_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_ESW_BRIDGE_TRACEPOINT_
+
+#include <linux/tracepoint.h>
+#include "../bridge_priv.h"
+
+DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
+ TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
+ TP_ARGS(fdb),
+ TP_STRUCT__entry(
+ __array(char, dev_name, IFNAMSIZ)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __field(u16, flags)
+ __field(unsigned int, used)
+ ),
+ TP_fast_assign(
+ strncpy(__entry->dev_name,
+ netdev_name(fdb->dev),
+ IFNAMSIZ);
+ memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
+ __entry->vid = fdb->key.vid;
+ __entry->flags = fdb->flags;
+ __entry->used = jiffies_to_msecs(jiffies - fdb->lastuse)
+ ),
+ TP_printk("net_device=%s addr=%pM vid=%hu flags=%hx used=%u",
+ __entry->dev_name,
+ __entry->addr,
+ __entry->vid,
+ __entry->flags,
+ __entry->used / 1000)
+ );
+
+DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
+ mlx5_esw_bridge_fdb_entry_init,
+ TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
+ TP_ARGS(fdb)
+ );
+DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
+ mlx5_esw_bridge_fdb_entry_refresh,
+ TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
+ TP_ARGS(fdb)
+ );
+DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
+ mlx5_esw_bridge_fdb_entry_cleanup,
+ TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
+ TP_ARGS(fdb)
+ );
+
+DECLARE_EVENT_CLASS(mlx5_esw_bridge_vlan_template,
+ TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
+ TP_ARGS(vlan),
+ TP_STRUCT__entry(
+ __field(u16, vid)
+ __field(u16, flags)
+ ),
+ TP_fast_assign(
+ __entry->vid = vlan->vid;
+ __entry->flags = vlan->flags;
+ ),
+ TP_printk("vid=%hu flags=%hx",
+ __entry->vid,
+ __entry->flags)
+ );
+
+DEFINE_EVENT(mlx5_esw_bridge_vlan_template,
+ mlx5_esw_bridge_vlan_create,
+ TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
+ TP_ARGS(vlan)
+ );
+DEFINE_EVENT(mlx5_esw_bridge_vlan_template,
+ mlx5_esw_bridge_vlan_cleanup,
+ TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
+ TP_ARGS(vlan)
+ );
+
+DECLARE_EVENT_CLASS(mlx5_esw_bridge_port_template,
+ TP_PROTO(const struct mlx5_esw_bridge_port *port),
+ TP_ARGS(port),
+ TP_STRUCT__entry(
+ __field(u16, vport_num)
+ ),
+ TP_fast_assign(
+ __entry->vport_num = port->vport_num;
+ ),
+ TP_printk("vport_num=%hu", __entry->vport_num)
+ );
+
+DEFINE_EVENT(mlx5_esw_bridge_port_template,
+ mlx5_esw_bridge_vport_init,
+ TP_PROTO(const struct mlx5_esw_bridge_port *port),
+ TP_ARGS(port)
+ );
+DEFINE_EVENT(mlx5_esw_bridge_port_template,
+ mlx5_esw_bridge_vport_cleanup,
+ TP_PROTO(const struct mlx5_esw_bridge_port *port),
+ TP_ARGS(port)
+ );
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH esw/diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE bridge_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 570f2280823c..97e6cb6f13c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "mlx5_core.h"
@@ -1053,6 +1054,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
goto err_vhca_mapping;
}
+ /* External controller host PF has factory programmed MAC.
+ * Read it from the device.
+ */
+ if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 64ccb2bc0b58..48cac5bf606d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -150,6 +150,8 @@ enum mlx5_eswitch_vport_event {
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
+struct mlx5_esw_bridge;
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -178,6 +180,7 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
+ struct mlx5_esw_bridge *bridge;
};
struct mlx5_esw_indir_table;
@@ -196,6 +199,7 @@ struct mlx5_eswitch_fdb {
struct offloads_fdb {
struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *tc_miss_table;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *send_to_vport_meta_grp;
@@ -270,6 +274,8 @@ enum {
MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
};
+struct mlx5_esw_bridge_offloads;
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -299,6 +305,7 @@ struct mlx5_eswitch {
u32 root_tsar_id;
} qos;
+ struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_esw_offload offloads;
int mode;
u16 manager_vport;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index db1e74280e57..7579f3402776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -219,7 +219,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
struct mlx5_fs_chains *chains,
int i)
{
- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
@@ -1633,7 +1634,21 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.slow_fdb = fdb;
- err = esw_chains_create(esw, fdb);
+ /* Create empty TC-miss managed table. This allows plugging in following
+ * priorities without directly exposing their level 0 table to
+ * eswitch_offloads and passing it as miss_fdb to following call to
+ * esw_chains_create().
+ */
+ memset(&ft_attr, 0, sizeof(ft_attr));
+ ft_attr.prio = FDB_TC_MISS;
+ esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
+ err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
+ esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
+ goto tc_miss_table_err;
+ }
+
+ err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
if (err) {
esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
goto fdb_chains_err;
@@ -1778,6 +1793,8 @@ send_vport_meta_err:
send_vport_err:
esw_chains_destroy(esw, esw_chains(esw));
fdb_chains_err:
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
+tc_miss_table_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
@@ -1805,6 +1822,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_chains_destroy(esw, esw_chains(esw));
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index a81ece94f599..b45954905845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -65,7 +65,7 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
- int err;
+ int err, err2;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
@@ -76,33 +76,34 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.prio = FDB_TC_OFFLOAD;
ft_attr.max_fte = 1;
+ ft_attr.level = 1;
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
- esw_warn(dev, "Failed to create termination table (error %d)\n",
- IS_ERR(tt->termtbl));
- return -EOPNOTSUPP;
+ err = PTR_ERR(tt->termtbl);
+ esw_warn(dev, "Failed to create termination table, err %pe\n", tt->termtbl);
+ return err;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
- esw_warn(dev, "Failed to create termination table rule (error %d)\n",
- IS_ERR(tt->rule));
+ err = PTR_ERR(tt->rule);
+ esw_warn(dev, "Failed to create termination table rule, err %pe\n", tt->rule);
goto add_flow_err;
}
return 0;
add_flow_err:
- err = mlx5_destroy_flow_table(tt->termtbl);
- if (err)
- esw_warn(dev, "Failed to destroy termination table\n");
+ err2 = mlx5_destroy_flow_table(tt->termtbl);
+ if (err2)
+ esw_warn(dev, "Failed to destroy termination table, err %d\n", err2);
- return -EOPNOTSUPP;
+ return err;
}
static struct mlx5_termtbl_handle *
@@ -172,19 +173,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
}
}
-static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
-{
- switch (rt->reformat_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
- case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
- case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
- return true;
- default:
- return false;
- }
-}
-
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
@@ -202,14 +190,6 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
}
-
- if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
- mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->pkt_reformat = src->pkt_reformat;
- src->pkt_reformat = NULL;
- }
}
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
@@ -238,6 +218,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
@@ -279,12 +260,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
+ if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
+ term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
+ } else {
+ term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = NULL;
+ }
+
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr);
if (IS_ERR(tt)) {
- esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
- IS_ERR(tt));
+ esw_warn(esw->dev, "Failed to get termination table, err %pe\n", tt);
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
@@ -301,6 +289,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
goto revert_changes;
/* create the FTE */
+ flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = NULL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
if (IS_ERR(rule))
goto revert_changes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 8e06731d3cb3..896a6c3dbdb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -36,6 +36,7 @@
#include "fs_core.h"
#include "fs_cmd.h"
+#include "fs_ft_pool.h"
#include "mlx5_core.h"
#include "eswitch.h"
@@ -49,9 +50,11 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int log_size,
+ unsigned int size,
struct mlx5_flow_table *next_ft)
{
+ ft->max_fte = size ? roundup_pow_of_two(size) : 1;
+
return 0;
}
@@ -108,9 +111,7 @@ static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
}
static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@@ -181,7 +182,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int log_size,
+ unsigned int size,
struct mlx5_flow_table *next_ft)
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
@@ -192,12 +193,18 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_core_dev *dev = ns->dev;
int err;
+ if (size != POOL_NEXT_SIZE)
+ size = roundup_pow_of_two(size);
+ size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
+ if (!size)
+ return -ENOSPC;
+
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
- MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
@@ -234,9 +241,14 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
}
err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
- if (!err)
+ if (!err) {
ft->id = MLX5_GET(create_flow_table_out, out,
table_id);
+ ft->max_fte = size;
+ } else {
+ mlx5_ft_pool_put_sz(ns->dev, size);
+ }
+
return err;
}
@@ -245,6 +257,7 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
+ int err;
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@@ -254,7 +267,11 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
- return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
+ err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
+ if (!err)
+ mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
+
+ return err;
}
static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
@@ -682,9 +699,7 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
}
static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@@ -702,14 +717,14 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
else
max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
- if (size > max_encap_size) {
+ if (params->size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
- size, max_encap_size);
+ params->size, max_encap_size);
return -EINVAL;
}
- in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
- GFP_KERNEL);
+ in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
+ params->size, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -718,15 +733,20 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat = MLX5_ADDR_OF(packet_reformat_context_in,
packet_reformat_context_in,
reformat_data);
- inlen = reformat - (void *)in + size;
+ inlen = reformat - (void *)in + params->size;
MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
- reformat_data_size, size);
+ reformat_data_size, params->size);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_type, params->type);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_param_0, params->param_0);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
- reformat_type, reformat_type);
- memcpy(reformat, reformat_data, size);
+ reformat_param_1, params->param_1);
+ if (params->data && params->size)
+ memcpy(reformat, params->data, params->size);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index d62de642eca9..5ecd33cdc087 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -38,7 +38,7 @@
struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int log_size,
+ unsigned int size,
struct mlx5_flow_table *next_ft);
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
@@ -77,9 +77,7 @@ struct mlx5_flow_cmds {
bool disconnect);
int (*packet_reformat_alloc)(struct mlx5_flow_root_namespace *ns,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f74d2c834037..d7bf0a3e4a52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -38,6 +38,7 @@
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
+#include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
@@ -752,7 +753,7 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
enum fs_flow_table_type table_type,
enum fs_flow_table_op_mod op_mod,
u32 flags)
@@ -775,7 +776,6 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->op_mod = op_mod;
ft->type = table_type;
ft->vport = vport;
- ft->max_fte = max_fte;
ft->flags = flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -1070,7 +1070,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
struct mlx5_flow_table *next_ft;
struct fs_prio *fs_prio = NULL;
struct mlx5_flow_table *ft;
- int log_table_sz;
int err;
if (!root) {
@@ -1101,7 +1100,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
*/
ft = alloc_flow_table(ft_attr->level,
vport,
- ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
root->table_type,
op_mod, ft_attr->flags);
if (IS_ERR(ft)) {
@@ -1110,12 +1108,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
}
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
- log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = unmanaged ? ft_attr->next_ft :
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
ft->ns = ns;
- err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
+ err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
if (err)
goto free_ft;
@@ -1170,28 +1167,36 @@ mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
ft_attr.level = level;
ft_attr.prio = prio;
+ ft_attr.max_fte = 1;
+
return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
}
EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
+#define MAX_FLOW_GROUP_SIZE BIT(24)
struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr)
{
int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
- int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
int max_num_groups = ft_attr->autogroup.max_num_groups;
struct mlx5_flow_table *ft;
-
- if (max_num_groups > autogroups_max_fte)
- return ERR_PTR(-EINVAL);
- if (num_reserved_entries > ft_attr->max_fte)
- return ERR_PTR(-EINVAL);
+ int autogroups_max_fte;
ft = mlx5_create_flow_table(ns, ft_attr);
if (IS_ERR(ft))
return ft;
+ autogroups_max_fte = ft->max_fte - num_reserved_entries;
+ if (max_num_groups > autogroups_max_fte)
+ goto err_validate;
+ if (num_reserved_entries > ft->max_fte)
+ goto err_validate;
+
+ /* Align the number of groups according to the largest group size */
+ if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
+ max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
+
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
ft->autogroup.max_fte = autogroups_max_fte;
@@ -1199,6 +1204,10 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
return ft;
+
+err_validate:
+ mlx5_destroy_flow_table(ft);
+ return ERR_PTR(-ENOSPC);
}
EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
@@ -1495,7 +1504,9 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
d1->tir_num == d2->tir_num) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
- d1->ft_num == d2->ft_num))
+ d1->ft_num == d2->ft_num) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
+ d1->sampler_id == d2->sampler_id))
return true;
}
@@ -2592,6 +2603,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
kmem_cache_destroy(steering->fgs_cache);
+ mlx5_ft_pool_destroy(dev);
kfree(steering);
}
@@ -2770,6 +2782,18 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (err)
goto out_err;
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
@@ -2942,9 +2966,16 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
if (err)
return err;
+ err = mlx5_ft_pool_init(dev);
+ if (err)
+ return err;
+
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
- if (!steering)
- return -ENOMEM;
+ if (!steering) {
+ err = -ENOMEM;
+ goto err;
+ }
+
steering->dev = dev;
dev->priv.steering = steering;
@@ -3151,9 +3182,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
EXPORT_SYMBOL(mlx5_modify_header_dealloc);
struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_pkt_reformat *pkt_reformat;
@@ -3169,9 +3198,8 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
return ERR_PTR(-ENOMEM);
pkt_reformat->ns_type = ns_type;
- pkt_reformat->reformat_type = reformat_type;
- err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
- reformat_data, ns_type,
+ pkt_reformat->reformat_type = params->type;
+ err = root->cmds->packet_reformat_alloc(root, params, ns_type,
pkt_reformat);
if (err) {
kfree(pkt_reformat);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index e577a2c424af..7317cdeab661 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -331,6 +331,7 @@ void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
(type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
+ (type == FS_FT_NIC_TX) ? MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) : \
(type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
(type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
new file mode 100644
index 000000000000..c14590acc772
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include "fs_ft_pool.h"
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
+ * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via mlx5_ft_pool_get_avail_sz.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define FT_SIZE (16 * 1024 * 1024)
+static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 128,
+ 1 /* size for termination tables */ };
+struct mlx5_ft_pool {
+ int ft_left[ARRAY_SIZE(FT_POOLS)];
+};
+
+int mlx5_ft_pool_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_ft_pool *ft_pool;
+ int i;
+
+ ft_pool = kzalloc(sizeof(*ft_pool), GFP_KERNEL);
+ if (!ft_pool)
+ return -ENOMEM;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
+ ft_pool->ft_left[i] = FT_SIZE / FT_POOLS[i];
+
+ dev->priv.ft_pool = ft_pool;
+ return 0;
+}
+
+void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev)
+{
+ kfree(dev->priv.ft_pool);
+}
+
+int
+mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
+ int desired_size)
+{
+ u32 max_ft_size = 1 << MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_ft_size, table_type);
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ FT_POOLS[i] <= max_ft_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --dev->priv.ft_pool->ft_left[found_i];
+ return FT_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+void
+mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz)
+{
+ int i;
+
+ if (!sz)
+ return;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (sz == FT_POOLS[i]) {
+ ++dev->priv.ft_pool->ft_left[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
new file mode 100644
index 000000000000..25f4274b372b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_FS_FT_POOL_H__
+#define __MLX5_FS_FT_POOL_H__
+
+#include <linux/mlx5/driver.h>
+#include "fs_core.h"
+
+#define POOL_NEXT_SIZE 0
+
+int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
+void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
+
+int
+mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
+ int desired_size);
+void
+mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz);
+
+#endif /* __MLX5_FS_FT_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 02558ac2ace6..016d26f809a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -148,6 +148,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (err)
return err;
+ if (MLX5_CAP_GEN(dev, hca_cap_2)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index d5d57630015f..106b50e42b46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -349,6 +349,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
+ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ return;
+
mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 97d96fc38a65..0e487ec57d5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -150,6 +150,7 @@ enum mlx5_ptys_rate {
MLX5_PTYS_RATE_FDR = 1 << 4,
MLX5_PTYS_RATE_EDR = 1 << 5,
MLX5_PTYS_RATE_HDR = 1 << 6,
+ MLX5_PTYS_RATE_NDR = 1 << 7,
};
static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
@@ -162,6 +163,7 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
case MLX5_PTYS_RATE_FDR: return 14000;
case MLX5_PTYS_RATE_EDR: return 25000;
case MLX5_PTYS_RATE_HDR: return 50000;
+ case MLX5_PTYS_RATE_NDR: return 100000;
default: return -1;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index b8748390335f..5c043c5cc403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -93,6 +93,64 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
+static int mlx5_lag_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr);
+static void mlx5_do_bond_work(struct work_struct *work);
+
+static void mlx5_ldev_free(struct kref *ref)
+{
+ struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
+
+ if (ldev->nb.notifier_call)
+ unregister_netdevice_notifier_net(&init_net, &ldev->nb);
+ mlx5_lag_mp_cleanup(ldev);
+ cancel_delayed_work_sync(&ldev->bond_work);
+ destroy_workqueue(ldev->wq);
+ kfree(ldev);
+}
+
+static void mlx5_ldev_put(struct mlx5_lag *ldev)
+{
+ kref_put(&ldev->ref, mlx5_ldev_free);
+}
+
+static void mlx5_ldev_get(struct mlx5_lag *ldev)
+{
+ kref_get(&ldev->ref);
+}
+
+static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ int err;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return NULL;
+
+ ldev->wq = create_singlethread_workqueue("mlx5_lag");
+ if (!ldev->wq) {
+ kfree(ldev);
+ return NULL;
+ }
+
+ kref_init(&ldev->ref);
+ INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+
+ ldev->nb.notifier_call = mlx5_lag_netdev_event;
+ if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
+ ldev->nb.notifier_call = NULL;
+ mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
+ }
+
+ err = mlx5_lag_mp_init(ldev);
+ if (err)
+ mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
+ err);
+
+ return ldev;
+}
+
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev)
{
@@ -118,17 +176,24 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2)
{
+ bool p1en;
+ bool p2en;
+
+ p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
+ tracker->netdev_state[MLX5_LAG_P1].link_up;
+
+ p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
+ tracker->netdev_state[MLX5_LAG_P2].link_up;
+
*port1 = 1;
*port2 = 2;
- if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
- !tracker->netdev_state[MLX5_LAG_P1].link_up) {
- *port1 = 2;
+ if ((!p1en && !p2en) || (p1en && p2en))
return;
- }
- if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
- !tracker->netdev_state[MLX5_LAG_P2].link_up)
+ if (p1en)
*port2 = 1;
+ else
+ *port1 = 2;
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
@@ -251,6 +316,10 @@ static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
if (!ldev->pf[i].dev)
continue;
+ if (ldev->pf[i].dev->priv.flags &
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
+ continue;
+
ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(ldev->pf[i].dev);
}
@@ -269,6 +338,31 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
}
}
+static void mlx5_disable_lag(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ bool roce_lag;
+ int err;
+
+ roce_lag = __mlx5_lag_is_roce(ldev);
+
+ if (roce_lag) {
+ if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
+ dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+ }
+ mlx5_nic_vport_disable_roce(dev1);
+ }
+
+ err = mlx5_deactivate_lag(ldev);
+ if (err)
+ return;
+
+ if (roce_lag)
+ mlx5_lag_add_devices(ldev);
+}
+
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
@@ -280,9 +374,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!mlx5_lag_is_ready(ldev))
return;
- spin_lock(&lag_lock);
tracker = ldev->tracker;
- spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -291,8 +383,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
!mlx5_sriov_is_enabled(dev1);
#ifdef CONFIG_MLX5_ESWITCH
- roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
- dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
+ roce_lag = roce_lag &&
+ dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
#endif
if (roce_lag)
@@ -316,20 +409,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (do_bond && __mlx5_lag_is_active(ldev)) {
mlx5_modify_lag(ldev, &tracker);
} else if (!do_bond && __mlx5_lag_is_active(ldev)) {
- roce_lag = __mlx5_lag_is_roce(ldev);
-
- if (roce_lag) {
- dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(dev0);
- mlx5_nic_vport_disable_roce(dev1);
- }
-
- err = mlx5_deactivate_lag(ldev);
- if (err)
- return;
-
- if (roce_lag)
- mlx5_lag_add_devices(ldev);
+ mlx5_disable_lag(ldev);
}
}
@@ -481,9 +561,7 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
- spin_lock(&lag_lock);
ldev->tracker = tracker;
- spin_unlock(&lag_lock);
if (changed)
mlx5_queue_bond_work(ldev, 0);
@@ -491,55 +569,52 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct mlx5_lag *mlx5_lag_dev_alloc(void)
+static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
- struct mlx5_lag *ldev;
-
- ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
- if (!ldev)
- return NULL;
-
- ldev->wq = create_singlethread_workqueue("mlx5_lag");
- if (!ldev->wq) {
- kfree(ldev);
- return NULL;
- }
+ unsigned int fn = PCI_FUNC(dev->pdev->devfn);
- INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+ if (fn >= MLX5_MAX_PORTS)
+ return;
- return ldev;
+ spin_lock(&lag_lock);
+ ldev->pf[fn].netdev = netdev;
+ ldev->tracker.netdev_state[fn].link_up = 0;
+ ldev->tracker.netdev_state[fn].tx_enabled = 0;
+ spin_unlock(&lag_lock);
}
-static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
+static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
+ struct net_device *netdev)
{
- destroy_workqueue(ldev->wq);
- kfree(ldev);
+ int i;
+
+ spin_lock(&lag_lock);
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (ldev->pf[i].netdev == netdev) {
+ ldev->pf[i].netdev = NULL;
+ break;
+ }
+ }
+ spin_unlock(&lag_lock);
}
-static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev)
{
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
if (fn >= MLX5_MAX_PORTS)
- return -EPERM;
-
- spin_lock(&lag_lock);
- ldev->pf[fn].dev = dev;
- ldev->pf[fn].netdev = netdev;
- ldev->tracker.netdev_state[fn].link_up = 0;
- ldev->tracker.netdev_state[fn].tx_enabled = 0;
+ return;
+ ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
-
- spin_unlock(&lag_lock);
-
- return fn;
}
-static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+/* Must be called with intf_mutex held */
+static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev)
{
int i;
@@ -550,19 +625,15 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS)
return;
- spin_lock(&lag_lock);
- memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
-
+ ldev->pf[i].dev = NULL;
dev->priv.lag = NULL;
- spin_unlock(&lag_lock);
}
/* Must be called with intf_mutex held */
-void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
+static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- int i, err;
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
@@ -574,67 +645,77 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
ldev = tmp_dev->priv.lag;
if (!ldev) {
- ldev = mlx5_lag_dev_alloc();
+ ldev = mlx5_lag_dev_alloc(dev);
if (!ldev) {
mlx5_core_err(dev, "Failed to alloc lag dev\n");
return;
}
+ } else {
+ mlx5_ldev_get(ldev);
}
- if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
- return;
+ mlx5_ldev_add_mdev(ldev, dev);
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (!ldev->pf[i].dev)
- break;
+ return;
+}
- if (i >= MLX5_MAX_PORTS)
- ldev->flags |= MLX5_LAG_FLAG_READY;
+void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
- if (!ldev->nb.notifier_call) {
- ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
- ldev->nb.notifier_call = NULL;
- mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
- }
- }
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
- err = mlx5_lag_mp_init(ldev);
- if (err)
- mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
- err);
+ mlx5_dev_list_lock();
+ mlx5_ldev_remove_mdev(ldev, dev);
+ mlx5_dev_list_unlock();
+ mlx5_ldev_put(ldev);
+}
+
+void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
+{
+ mlx5_dev_list_lock();
+ __mlx5_lag_dev_add_mdev(dev);
+ mlx5_dev_list_unlock();
}
/* Must be called with intf_mutex held */
-void mlx5_lag_remove(struct mlx5_core_dev *dev)
+void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (!ldev)
return;
if (__mlx5_lag_is_active(ldev))
- mlx5_deactivate_lag(ldev);
-
- mlx5_lag_dev_remove_pf(ldev, dev);
+ mlx5_disable_lag(ldev);
+ mlx5_ldev_remove_netdev(ldev, netdev);
ldev->flags &= ~MLX5_LAG_FLAG_READY;
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
+ struct net_device *netdev)
+{
+ struct mlx5_lag *ldev;
+ int i;
+
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
+
+ mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (ldev->pf[i].dev)
+ if (!ldev->pf[i].dev)
break;
- if (i == MLX5_MAX_PORTS) {
- if (ldev->nb.notifier_call) {
- unregister_netdevice_notifier_net(&init_net, &ldev->nb);
- ldev->nb.notifier_call = NULL;
- }
- mlx5_lag_mp_cleanup(ldev);
- cancel_delayed_work_sync(&ldev->bond_work);
- mlx5_lag_dev_free(ldev);
- }
+ if (i >= MLX5_MAX_PORTS)
+ ldev->flags |= MLX5_LAG_FLAG_READY;
}
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
@@ -643,7 +724,7 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
bool res;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
spin_unlock(&lag_lock);
@@ -657,7 +738,7 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
bool res;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev);
spin_unlock(&lag_lock);
@@ -671,7 +752,7 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
bool res;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
spin_unlock(&lag_lock);
@@ -684,7 +765,7 @@ void mlx5_lag_update(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
mlx5_dev_list_lock();
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
@@ -700,7 +781,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -729,7 +810,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
u8 port = 0;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -765,7 +846,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS;
mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index 8d8cf2d0bc6d..191392c37558 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -40,6 +40,7 @@ struct lag_tracker {
struct mlx5_lag {
u8 flags;
u8 v2p_map[MLX5_MAX_PORTS];
+ struct kref ref;
struct lag_func pf[MLX5_MAX_PORTS];
struct lag_tracker tracker;
struct workqueue_struct *wq;
@@ -49,7 +50,7 @@ struct mlx5_lag {
};
static inline struct mlx5_lag *
-mlx5_lag_dev_get(struct mlx5_core_dev *dev)
+mlx5_lag_dev(struct mlx5_core_dev *dev)
{
return dev->priv.lag;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 2c41a6920264..c4bf8b679541 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -28,7 +28,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_multipath(ldev);
return res;
@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
struct lag_mp *mp = &ldev->lag_mp;
int err;
+ /* always clear mfi, as it might become stale when a route delete event
+ * has been missed
+ */
+ mp->mfi = NULL;
+
if (mp->fib_nb.notifier_call)
return 0;
@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
+ mp->mfi = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index f607a3858ef5..624cedebb510 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2018 Mellanox Technologies */
+/* Copyright (c) 2018-2021, Mellanox Technologies inc. All rights reserved. */
#ifndef __LIB_MLX5_EQ_H__
#define __LIB_MLX5_EQ_H__
@@ -32,6 +32,7 @@ struct mlx5_eq {
unsigned int irqn;
u8 eqn;
struct mlx5_rsc_debug *dbg;
+ struct mlx5_irq *irq;
};
struct mlx5_eq_async {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 00ef10a1a9f8..97e5845b4cfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -6,6 +6,7 @@
#include <linux/mlx5/fs.h>
#include "lib/fs_chains.h"
+#include "fs_ft_pool.h"
#include "en/mapping.h"
#include "fs_core.h"
#include "en_tc.h"
@@ -13,25 +14,10 @@
#define chains_lock(chains) ((chains)->lock)
#define chains_ht(chains) ((chains)->chains_ht)
#define prios_ht(chains) ((chains)->prios_ht)
-#define ft_pool_left(chains) ((chains)->ft_left)
#define tc_default_ft(chains) ((chains)->tc_default_ft)
#define tc_end_ft(chains) ((chains)->tc_end_ft)
#define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
-
-/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
- * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
- * for each flow table pool. We can allocate up to 16M of each pool,
- * and we keep track of how much we used via get_next_avail_sz_from_pool.
- * Firmware doesn't report any of this for now.
- * ESW_POOL is expected to be sorted from large to small and match firmware
- * pools.
- */
-#define FT_SIZE (16 * 1024 * 1024)
-static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
- 1 * 1024 * 1024,
- 64 * 1024,
- 128 };
#define FT_TBL_SZ (64 * 1024)
struct mlx5_fs_chains {
@@ -49,8 +35,6 @@ struct mlx5_fs_chains {
enum mlx5_flow_namespace_type ns;
u32 group_num;
u32 flags;
-
- int ft_left[ARRAY_SIZE(FT_POOLS)];
};
struct fs_chain {
@@ -107,7 +91,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
}
-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
{
return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
}
@@ -160,54 +144,6 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
tc_end_ft(chains) = ft;
}
-#define POOL_NEXT_SIZE 0
-static int
-mlx5_chains_get_avail_sz_from_pool(struct mlx5_fs_chains *chains,
- int desired_size)
-{
- int i, found_i = -1;
-
- for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (ft_pool_left(chains)[i] && FT_POOLS[i] > desired_size) {
- found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
- break;
- }
- }
-
- if (found_i != -1) {
- --ft_pool_left(chains)[found_i];
- return FT_POOLS[found_i];
- }
-
- return 0;
-}
-
-static void
-mlx5_chains_put_sz_to_pool(struct mlx5_fs_chains *chains, int sz)
-{
- int i;
-
- for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (sz == FT_POOLS[i]) {
- ++ft_pool_left(chains)[i];
- return;
- }
- }
-
- WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
-}
-
-static void
-mlx5_chains_init_sz_pool(struct mlx5_fs_chains *chains, u32 ft_max)
-{
- int i;
-
- for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
- ft_pool_left(chains)[i] =
- FT_POOLS[i] <= ft_max ? FT_SIZE / FT_POOLS[i] : 0;
-}
-
static struct mlx5_flow_table *
mlx5_chains_create_table(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level)
@@ -221,11 +157,7 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
- mlx5_chains_get_avail_sz_from_pool(chains, FT_TBL_SZ) :
- mlx5_chains_get_avail_sz_from_pool(chains, POOL_NEXT_SIZE);
- if (!sz)
- return ERR_PTR(-ENOSPC);
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
ft_attr.max_fte = sz;
/* We use tc_default_ft(chains) as the table's next_ft till
@@ -266,21 +198,12 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
if (IS_ERR(ft)) {
mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
(int)PTR_ERR(ft), chain, prio, level, sz);
- mlx5_chains_put_sz_to_pool(chains, sz);
return ft;
}
return ft;
}
-static void
-mlx5_chains_destroy_table(struct mlx5_fs_chains *chains,
- struct mlx5_flow_table *ft)
-{
- mlx5_chains_put_sz_to_pool(chains, ft->max_fte);
- mlx5_destroy_flow_table(ft);
-}
-
static int
create_chain_restore(struct fs_chain *chain)
{
@@ -336,9 +259,10 @@ create_chain_restore(struct fs_chain *chain)
MLX5_SET(set_action_in, modact, field,
mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
MLX5_SET(set_action_in, modact, offset,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset * 8);
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset);
MLX5_SET(set_action_in, modact, length,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen * 8);
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen == 32 ?
+ 0 : mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen);
MLX5_SET(set_action_in, modact, data, chain->id);
mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
1, modact);
@@ -636,7 +560,7 @@ err_insert:
err_miss_rule:
mlx5_destroy_flow_group(miss_group);
err_group:
- mlx5_chains_destroy_table(chains, ft);
+ mlx5_destroy_flow_table(ft);
err_create:
err_alloc:
kvfree(prio_s);
@@ -659,7 +583,7 @@ mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
prio_params);
mlx5_del_flow_rules(prio->miss_rule);
mlx5_destroy_flow_group(prio->miss_group);
- mlx5_chains_destroy_table(chains, prio->ft);
+ mlx5_destroy_flow_table(prio->ft);
mlx5_chains_put_chain(chain);
kvfree(prio);
}
@@ -784,7 +708,7 @@ void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft)
{
- mlx5_chains_destroy_table(chains, ft);
+ mlx5_destroy_flow_table(ft);
}
static struct mlx5_fs_chains *
@@ -816,8 +740,6 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
mlx5_chains_get_chain_range(chains_priv),
mlx5_chains_get_prio_range(chains_priv));
- mlx5_chains_init_sz_pool(chains_priv, attr->max_ft_sz);
-
err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
if (err)
goto init_chains_ht_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
index e96f345e7dae..d50bdb226cef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -28,6 +28,7 @@ struct mlx5_chains_attr {
bool
mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
bool
mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
u32
@@ -70,6 +71,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
#else /* CONFIG_MLX5_CLS_ACT */
+static inline bool
+mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{ return false; }
+
static inline struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level) { return ERR_PTR(-EOPNOTSUPP); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index fd8449ff9e17..839a01da110f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/mpfs.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "lib/mpfs.h"
@@ -175,6 +176,7 @@ out:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_add_mac);
int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
{
@@ -206,3 +208,4 @@ unlock:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_del_mac);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
index 4a7b2c3203a7..4a293542a7aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
@@ -84,12 +84,9 @@ struct l2addr_node {
#ifdef CONFIG_MLX5_MPFS
int mlx5_mpfs_init(struct mlx5_core_dev *dev);
void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
-int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
-static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
-static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/sf.h
new file mode 100644
index 000000000000..84e5683861be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sf.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies Ltd */
+
+#ifndef __LIB_MLX5_SF_H__
+#define __LIB_MLX5_SF_H__
+
+#include <linux/mlx5/driver.h>
+
+static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf_base_id);
+}
+
+#ifdef CONFIG_MLX5_SF
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf);
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ if (!mlx5_sf_supported(dev))
+ return 0;
+ if (MLX5_CAP_GEN(dev, max_num_sf))
+ return MLX5_CAP_GEN(dev, max_num_sf);
+ else
+ return 1 << MLX5_CAP_GEN(dev, log_max_sf);
+}
+
+#else
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c114365eb126..eb1b316560a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -76,6 +76,7 @@
#include "sf/vhca_event.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
+#include "mlx5_irq.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -503,7 +504,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
- struct mlx5_profile *prof = dev->profile;
+ struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
int err;
@@ -524,11 +525,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
- if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+ if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
- profile[prof_sel].log_max_qp,
+ prof->log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
- profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
}
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -1161,7 +1162,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_core_set_hca_defaults(dev);
if (err) {
mlx5_core_err(dev, "Failed to set hca defaults\n");
- goto err_sriov;
+ goto err_set_hca;
}
mlx5_vhca_event_start(dev);
@@ -1185,6 +1186,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
}
mlx5_sf_dev_table_create(dev);
+ mlx5_lag_add_mdev(dev);
return 0;
@@ -1194,6 +1196,7 @@ err_ec:
mlx5_sf_hw_table_destroy(dev);
err_vhca:
mlx5_vhca_event_stop(dev);
+err_set_hca:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
@@ -1219,6 +1222,7 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_lag_remove_mdev(dev);
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_ec_cleanup(dev);
@@ -1381,8 +1385,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
struct mlx5_priv *priv = &dev->priv;
int err;
- dev->profile = &profile[profile_idx];
-
+ memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mutex_init(&dev->intf_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a22b706eebd3..343807ac2036 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -164,27 +164,10 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
-void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
-void mlx5_lag_remove(struct mlx5_core_dev *dev);
-
-int mlx5_irq_table_init(struct mlx5_core_dev *dev);
-void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
-int mlx5_irq_table_create(struct mlx5_core_dev *dev);
-void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
-int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
- struct notifier_block *nb);
-int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
- struct notifier_block *nb);
-
-int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
- int msix_vec_count);
-int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
-
-struct cpumask *
-mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
-struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
-int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
-struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
+void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_add_mdev(struct mlx5_core_dev *dev);
+void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev);
int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
new file mode 100644
index 000000000000..abd024173c42
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_IRQ_H__
+#define __MLX5_IRQ_H__
+
+#include <linux/mlx5/driver.h>
+
+#define MLX5_COMP_EQS_PER_SF 8
+
+#define MLX5_IRQ_EQ_CTRL (0)
+
+struct mlx5_irq;
+
+int mlx5_irq_table_init(struct mlx5_core_dev *dev);
+void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
+int mlx5_irq_table_create(struct mlx5_core_dev *dev);
+void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
+int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
+int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
+struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
+
+int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
+ int msix_vec_count);
+int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
+
+struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
+ struct cpumask *affinity);
+void mlx5_irq_release(struct mlx5_irq *irq);
+int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
+int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
+struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
+int mlx5_irq_get_index(struct mlx5_irq *irq);
+
+#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 50af84e76fb6..174f71ed5280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key |= mlx5_idx_to_mkey(mkey_index);
+ mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
mkey->pd = MLX5_GET(mkc, mkc, pd);
init_waitqueue_head(&mkey->wait);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 1f907df5b3a2..b25f764daa08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -6,60 +6,52 @@
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
+#include "mlx5_irq.h"
+#include "lib/sf.h"
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#define MLX5_MAX_IRQ_NAME (32)
+/* max irq_index is 255. three chars */
+#define MLX5_MAX_IRQ_IDX_CHARS (3)
+
+#define MLX5_SFS_PER_CTRL_IRQ 64
+#define MLX5_IRQ_CTRL_SF_MAX 8
+/* min num of vectores for SFs to be enabled */
+#define MLX5_IRQ_VEC_COMP_BASE_SF 2
+
+#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
+#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
+#define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
+#define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
+#define MLX5_EQ_REFS_PER_IRQ (2)
struct mlx5_irq {
+ u32 index;
struct atomic_notifier_head nh;
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
+ struct kref kref;
+ int irqn;
+ struct mlx5_irq_pool *pool;
};
-struct mlx5_irq_table {
- struct mlx5_irq *irq;
- int nvec;
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
+struct mlx5_irq_pool {
+ char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS];
+ struct xa_limit xa_num_irqs;
+ struct mutex lock; /* sync IRQs creations */
+ struct xarray irqs;
+ u32 max_threshold;
+ u32 min_threshold;
+ struct mlx5_core_dev *dev;
};
-int mlx5_irq_table_init(struct mlx5_core_dev *dev)
-{
- struct mlx5_irq_table *irq_table;
-
- if (mlx5_core_is_sf(dev))
- return 0;
-
- irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
- if (!irq_table)
- return -ENOMEM;
-
- dev->priv.irq_table = irq_table;
- return 0;
-}
-
-void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
-{
- if (mlx5_core_is_sf(dev))
- return;
-
- kvfree(dev->priv.irq_table);
-}
-
-int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
-{
- return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
-}
-
-static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
-{
- struct mlx5_irq_table *irq_table = dev->priv.irq_table;
-
- return &irq_table->irq[vecidx];
-}
+struct mlx5_irq_table {
+ struct mlx5_irq_pool *pf_pool;
+ struct mlx5_irq_pool *sf_ctrl_pool;
+ struct mlx5_irq_pool *sf_comp_pool;
+};
/**
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
@@ -95,9 +87,10 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
int msix_vec_count)
{
- int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap = NULL, *query_cap = NULL, *cap;
int num_vf_msix, min_msix, max_msix;
- void *hca_cap, *cap;
int ret;
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
@@ -116,11 +109,20 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (msix_vec_count > max_msix)
return -EOVERFLOW;
- hca_cap = kzalloc(sz, GFP_KERNEL);
- if (!hca_cap)
- return -ENOMEM;
+ query_cap = kzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
+ if (ret)
+ goto out;
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
@@ -130,38 +132,52 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+out:
kfree(hca_cap);
+ kfree(query_cap);
return ret;
}
-int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
- struct notifier_block *nb)
+static void irq_release(struct kref *kref)
{
- struct mlx5_irq *irq;
+ struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref);
+ struct mlx5_irq_pool *pool = irq->pool;
- irq = &irq_table->irq[vecidx];
- return atomic_notifier_chain_register(&irq->nh, nb);
+ xa_erase(&pool->irqs, irq->index);
+ /* free_irq requires that affinity and rmap will be cleared
+ * before calling it. This is why there is asymmetry with set_rmap
+ * which should be called after alloc_irq but before request_irq.
+ */
+ irq_set_affinity_hint(irq->irqn, NULL);
+ free_cpumask_var(irq->mask);
+ free_irq(irq->irqn, &irq->nh);
+ kfree(irq);
}
-int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
- struct notifier_block *nb)
+static void irq_put(struct mlx5_irq *irq)
{
- struct mlx5_irq *irq;
+ struct mlx5_irq_pool *pool = irq->pool;
- irq = &irq_table->irq[vecidx];
- return atomic_notifier_chain_unregister(&irq->nh, nb);
+ mutex_lock(&pool->lock);
+ kref_put(&irq->kref, irq_release);
+ mutex_unlock(&pool->lock);
}
-static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
+static irqreturn_t irq_int_handler(int irq, void *nh)
{
atomic_notifier_call_chain(nh, 0, NULL);
return IRQ_HANDLED;
}
+static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
+{
+ snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
+}
+
static void irq_set_name(char *name, int vecidx)
{
if (vecidx == 0) {
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
return;
}
@@ -169,251 +185,431 @@ static void irq_set_name(char *name, int vecidx)
vecidx - MLX5_IRQ_VEC_COMP_BASE);
}
-static int request_irqs(struct mlx5_core_dev *dev, int nvec)
+static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
{
+ struct mlx5_core_dev *dev = pool->dev;
char name[MLX5_MAX_IRQ_NAME];
+ struct mlx5_irq *irq;
int err;
- int i;
-
- for (i = 0; i < nvec; i++) {
- struct mlx5_irq *irq = mlx5_irq_get(dev, i);
- int irqn = pci_irq_vector(dev->pdev, i);
+ irq = kzalloc(sizeof(*irq), GFP_KERNEL);
+ if (!irq)
+ return ERR_PTR(-ENOMEM);
+ irq->irqn = pci_irq_vector(dev->pdev, i);
+ if (!pool->name[0])
irq_set_name(name, i);
- ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
- snprintf(irq->name, MLX5_MAX_IRQ_NAME,
- "%s@pci:%s", name, pci_name(dev->pdev));
- err = request_irq(irqn, mlx5_irq_int_handler, 0, irq->name,
- &irq->nh);
- if (err) {
- mlx5_core_err(dev, "Failed to request irq\n");
- goto err_request_irq;
- }
+ else
+ irq_sf_set_name(pool, name, i);
+ ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
+ snprintf(irq->name, MLX5_MAX_IRQ_NAME,
+ "%s@pci:%s", name, pci_name(dev->pdev));
+ err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
+ &irq->nh);
+ if (err) {
+ mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
+ goto err_req_irq;
}
- return 0;
+ if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
+ mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
+ err = -ENOMEM;
+ goto err_cpumask;
+ }
+ kref_init(&irq->kref);
+ irq->index = i;
+ err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
+ irq->index, err);
+ goto err_xa;
+ }
+ irq->pool = pool;
+ return irq;
+err_xa:
+ free_cpumask_var(irq->mask);
+err_cpumask:
+ free_irq(irq->irqn, &irq->nh);
+err_req_irq:
+ kfree(irq);
+ return ERR_PTR(err);
+}
-err_request_irq:
- while (i--) {
- struct mlx5_irq *irq = mlx5_irq_get(dev, i);
- int irqn = pci_irq_vector(dev->pdev, i);
+int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
+{
+ int err;
- free_irq(irqn, &irq->nh);
- }
- return err;
+ err = kref_get_unless_zero(&irq->kref);
+ if (WARN_ON_ONCE(!err))
+ /* Something very bad happens here, we are enabling EQ
+ * on non-existing IRQ.
+ */
+ return -ENOENT;
+ err = atomic_notifier_chain_register(&irq->nh, nb);
+ if (err)
+ irq_put(irq);
+ return err;
}
-static void irq_clear_rmap(struct mlx5_core_dev *dev)
+int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
{
-#ifdef CONFIG_RFS_ACCEL
- struct mlx5_irq_table *irq_table = dev->priv.irq_table;
+ irq_put(irq);
+ return atomic_notifier_chain_unregister(&irq->nh, nb);
+}
- free_irq_cpu_rmap(irq_table->rmap);
-#endif
+struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
+{
+ return irq->mask;
}
-static int irq_set_rmap(struct mlx5_core_dev *mdev)
+int mlx5_irq_get_index(struct mlx5_irq *irq)
{
- int err = 0;
-#ifdef CONFIG_RFS_ACCEL
- struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
- int num_affinity_vec;
- int vecidx;
+ return irq->index;
+}
- num_affinity_vec = mlx5_irq_get_num_comp(irq_table);
- irq_table->rmap = alloc_irq_cpu_rmap(num_affinity_vec);
- if (!irq_table->rmap) {
- err = -ENOMEM;
- mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
- goto err_out;
+/* irq_pool API */
+
+/* creating an irq from irq_pool */
+static struct mlx5_irq *irq_pool_create_irq(struct mlx5_irq_pool *pool,
+ struct cpumask *affinity)
+{
+ struct mlx5_irq *irq;
+ u32 irq_index;
+ int err;
+
+ err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs,
+ GFP_KERNEL);
+ if (err)
+ return ERR_PTR(err);
+ irq = irq_request(pool, irq_index);
+ if (IS_ERR(irq))
+ return irq;
+ cpumask_copy(irq->mask, affinity);
+ irq_set_affinity_hint(irq->irqn, irq->mask);
+ return irq;
+}
+
+/* looking for the irq with the smallest refcount and the same affinity */
+static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
+ struct cpumask *affinity)
+{
+ int start = pool->xa_num_irqs.min;
+ int end = pool->xa_num_irqs.max;
+ struct mlx5_irq *irq = NULL;
+ struct mlx5_irq *iter;
+ unsigned long index;
+
+ lockdep_assert_held(&pool->lock);
+ xa_for_each_range(&pool->irqs, index, iter, start, end) {
+ if (!cpumask_equal(iter->mask, affinity))
+ continue;
+ if (kref_read(&iter->kref) < pool->min_threshold)
+ return iter;
+ if (!irq || kref_read(&iter->kref) <
+ kref_read(&irq->kref))
+ irq = iter;
}
+ return irq;
+}
- vecidx = MLX5_IRQ_VEC_COMP_BASE;
- for (; vecidx < irq_table->nvec; vecidx++) {
- err = irq_cpu_rmap_add(irq_table->rmap,
- pci_irq_vector(mdev->pdev, vecidx));
- if (err) {
- mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
- err);
- goto err_irq_cpu_rmap_add;
+/* requesting an irq from a given pool according to given affinity */
+static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
+ struct cpumask *affinity)
+{
+ struct mlx5_irq *least_loaded_irq, *new_irq;
+
+ mutex_lock(&pool->lock);
+ least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
+ if (least_loaded_irq &&
+ kref_read(&least_loaded_irq->kref) < pool->min_threshold)
+ goto out;
+ new_irq = irq_pool_create_irq(pool, affinity);
+ if (IS_ERR(new_irq)) {
+ if (!least_loaded_irq) {
+ mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
+ cpumask_first(affinity));
+ mutex_unlock(&pool->lock);
+ return new_irq;
}
+ /* We failed to create a new IRQ for the requested affinity,
+ * sharing existing IRQ.
+ */
+ goto out;
}
- return 0;
+ least_loaded_irq = new_irq;
+ goto unlock;
+out:
+ kref_get(&least_loaded_irq->kref);
+ if (kref_read(&least_loaded_irq->kref) > pool->max_threshold)
+ mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
+ least_loaded_irq->irqn, pool->name,
+ kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ);
+unlock:
+ mutex_unlock(&pool->lock);
+ return least_loaded_irq;
+}
-err_irq_cpu_rmap_add:
- irq_clear_rmap(mdev);
-err_out:
-#endif
- return err;
+/* requesting an irq from a given pool according to given index */
+static struct mlx5_irq *
+irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
+ struct cpumask *affinity)
+{
+ struct mlx5_irq *irq;
+
+ mutex_lock(&pool->lock);
+ irq = xa_load(&pool->irqs, vecidx);
+ if (irq) {
+ kref_get(&irq->kref);
+ goto unlock;
+ }
+ irq = irq_request(pool, vecidx);
+ if (IS_ERR(irq) || !affinity)
+ goto unlock;
+ cpumask_copy(irq->mask, affinity);
+ irq_set_affinity_hint(irq->irqn, irq->mask);
+unlock:
+ mutex_unlock(&pool->lock);
+ return irq;
}
-/* Completion IRQ vectors */
+static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table,
+ int i, struct cpumask *affinity)
+{
+ if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL)
+ return irq_table->sf_ctrl_pool;
+ return irq_table->sf_comp_pool;
+}
-static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+/**
+ * mlx5_irq_release - release an IRQ back to the system.
+ * @irq: irq to be released.
+ */
+void mlx5_irq_release(struct mlx5_irq *irq)
{
- int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
+ synchronize_irq(irq->irqn);
+ irq_put(irq);
+}
+
+/**
+ * mlx5_irq_request - request an IRQ for mlx5 device.
+ * @dev: mlx5 device that requesting the IRQ.
+ * @vecidx: vector index of the IRQ. This argument is ignore if affinity is
+ * provided.
+ * @affinity: cpumask requested for this IRQ.
+ *
+ * This function returns a pointer to IRQ, or ERR_PTR in case of error.
+ */
+struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
+ struct cpumask *affinity)
+{
+ struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool;
struct mlx5_irq *irq;
- int irqn;
- irq = mlx5_irq_get(mdev, vecidx);
- irqn = pci_irq_vector(mdev->pdev, vecidx);
- if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
- mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
- return -ENOMEM;
+ if (mlx5_core_is_sf(dev)) {
+ pool = find_sf_irq_pool(irq_table, vecidx, affinity);
+ if (!pool)
+ /* we don't have IRQs for SFs, using the PF IRQs */
+ goto pf_irq;
+ if (cpumask_empty(affinity) && !strcmp(pool->name, "mlx5_sf_comp"))
+ /* In case an SF user request IRQ with vecidx */
+ irq = irq_pool_request_vector(pool, vecidx, NULL);
+ else
+ irq = irq_pool_request_affinity(pool, affinity);
+ goto out;
}
+pf_irq:
+ pool = irq_table->pf_pool;
+ irq = irq_pool_request_vector(pool, vecidx, affinity);
+out:
+ if (IS_ERR(irq))
+ return irq;
+ mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ irq->irqn, cpumask_pr_args(affinity),
+ kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ);
+ return irq;
+}
- cpumask_set_cpu(cpumask_local_spread(i, mdev->priv.numa_node),
- irq->mask);
- if (IS_ENABLED(CONFIG_SMP) &&
- irq_set_affinity_hint(irqn, irq->mask))
- mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x",
- irqn);
-
- return 0;
+static struct mlx5_irq_pool *
+irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
+ u32 min_threshold, u32 max_threshold)
+{
+ struct mlx5_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ pool->dev = dev;
+ xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
+ pool->xa_num_irqs.min = start;
+ pool->xa_num_irqs.max = start + size - 1;
+ if (name)
+ snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
+ name);
+ pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
+ pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
+ mutex_init(&pool->lock);
+ mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
+ name, size, start);
+ return pool;
}
-static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+static void irq_pool_free(struct mlx5_irq_pool *pool)
{
- int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
struct mlx5_irq *irq;
- int irqn;
+ unsigned long index;
- irq = mlx5_irq_get(mdev, vecidx);
- irqn = pci_irq_vector(mdev->pdev, vecidx);
- irq_set_affinity_hint(irqn, NULL);
- free_cpumask_var(irq->mask);
+ xa_for_each(&pool->irqs, index, irq)
+ irq_release(&irq->kref);
+ xa_destroy(&pool->irqs);
+ kvfree(pool);
}
-static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
+static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
{
- int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
+ struct mlx5_irq_table *table = dev->priv.irq_table;
+ int num_sf_ctrl_by_msix;
+ int num_sf_ctrl_by_sfs;
+ int num_sf_ctrl;
int err;
- int i;
- for (i = 0; i < nvec; i++) {
- err = set_comp_irq_affinity_hint(mdev, i);
- if (err)
- goto err_out;
+ /* init pf_pool */
+ table->pf_pool = irq_pool_alloc(dev, 0, pf_vec, NULL,
+ MLX5_EQ_SHARE_IRQ_MIN_COMP,
+ MLX5_EQ_SHARE_IRQ_MAX_COMP);
+ if (IS_ERR(table->pf_pool))
+ return PTR_ERR(table->pf_pool);
+ if (!mlx5_sf_max_functions(dev))
+ return 0;
+ if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
+ mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
+ return 0;
}
+ /* init sf_ctrl_pool */
+ num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
+ num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
+ MLX5_SFS_PER_CTRL_IRQ);
+ num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
+ num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
+ table->sf_ctrl_pool = irq_pool_alloc(dev, pf_vec, num_sf_ctrl,
+ "mlx5_sf_ctrl",
+ MLX5_EQ_SHARE_IRQ_MIN_CTRL,
+ MLX5_EQ_SHARE_IRQ_MAX_CTRL);
+ if (IS_ERR(table->sf_ctrl_pool)) {
+ err = PTR_ERR(table->sf_ctrl_pool);
+ goto err_pf;
+ }
+ /* init sf_comp_pool */
+ table->sf_comp_pool = irq_pool_alloc(dev, pf_vec + num_sf_ctrl,
+ sf_vec - num_sf_ctrl, "mlx5_sf_comp",
+ MLX5_EQ_SHARE_IRQ_MIN_COMP,
+ MLX5_EQ_SHARE_IRQ_MAX_COMP);
+ if (IS_ERR(table->sf_comp_pool)) {
+ err = PTR_ERR(table->sf_comp_pool);
+ goto err_sf_ctrl;
+ }
return 0;
-
-err_out:
- for (i--; i >= 0; i--)
- clear_comp_irq_affinity_hint(mdev, i);
-
+err_sf_ctrl:
+ irq_pool_free(table->sf_ctrl_pool);
+err_pf:
+ irq_pool_free(table->pf_pool);
return err;
}
-static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
+static void irq_pools_destroy(struct mlx5_irq_table *table)
{
- int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
- int i;
-
- for (i = 0; i < nvec; i++)
- clear_comp_irq_affinity_hint(mdev, i);
+ if (table->sf_ctrl_pool) {
+ irq_pool_free(table->sf_comp_pool);
+ irq_pool_free(table->sf_ctrl_pool);
+ }
+ irq_pool_free(table->pf_pool);
}
-struct cpumask *
-mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
+/* irq_table API */
+
+int mlx5_irq_table_init(struct mlx5_core_dev *dev)
{
- return irq_table->irq[vecidx].mask;
+ struct mlx5_irq_table *irq_table;
+
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
+ if (!irq_table)
+ return -ENOMEM;
+
+ dev->priv.irq_table = irq_table;
+ return 0;
}
-#ifdef CONFIG_RFS_ACCEL
-struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *irq_table)
+void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
{
- return irq_table->rmap;
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ kvfree(dev->priv.irq_table);
}
-#endif
-static void unrequest_irqs(struct mlx5_core_dev *dev)
+int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
{
- struct mlx5_irq_table *table = dev->priv.irq_table;
- int i;
-
- for (i = 0; i < table->nvec; i++)
- free_irq(pci_irq_vector(dev->pdev, i),
- &mlx5_irq_get(dev, i)->nh);
+ return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
}
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_irq_table *table = priv->irq_table;
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
- int nvec;
+ int total_vec;
+ int pf_vec;
int err;
if (mlx5_core_is_sf(dev))
return 0;
- nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_IRQ_VEC_COMP_BASE;
- nvec = min_t(int, nvec, num_eqs);
- if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
- return -ENOMEM;
-
- table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL);
- if (!table->irq)
+ pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_IRQ_VEC_COMP_BASE;
+ pf_vec = min_t(int, pf_vec, num_eqs);
+ if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
return -ENOMEM;
- nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
- nvec, PCI_IRQ_MSIX);
- if (nvec < 0) {
- err = nvec;
- goto err_free_irq;
- }
-
- table->nvec = nvec;
+ total_vec = pf_vec;
+ if (mlx5_sf_max_functions(dev))
+ total_vec += MLX5_IRQ_CTRL_SF_MAX +
+ MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
- err = irq_set_rmap(dev);
- if (err)
- goto err_set_rmap;
+ total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
+ total_vec, PCI_IRQ_MSIX);
+ if (total_vec < 0)
+ return total_vec;
+ pf_vec = min(pf_vec, total_vec);
- err = request_irqs(dev, nvec);
+ err = irq_pools_init(dev, total_vec - pf_vec, pf_vec);
if (err)
- goto err_request_irqs;
-
- err = set_comp_irq_affinity_hints(dev);
- if (err) {
- mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
- goto err_set_affinity;
- }
-
- return 0;
+ pci_free_irq_vectors(dev->pdev);
-err_set_affinity:
- unrequest_irqs(dev);
-err_request_irqs:
- irq_clear_rmap(dev);
-err_set_rmap:
- pci_free_irq_vectors(dev->pdev);
-err_free_irq:
- kfree(table->irq);
return err;
}
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
- int i;
if (mlx5_core_is_sf(dev))
return;
- /* free_irq requires that affinity and rmap will be cleared
- * before calling it. This is why there is asymmetry with set_rmap
- * which should be called after alloc_irq but before request_irq.
+ /* There are cases where IRQs still will be in used when we reaching
+ * to here. Hence, making sure all the irqs are realeased.
*/
- irq_clear_rmap(dev);
- clear_comp_irqs_affinity_hints(dev);
- for (i = 0; i < table->nvec; i++)
- free_irq(pci_irq_vector(dev->pdev, i),
- &mlx5_irq_get(dev, i)->nh);
+ irq_pools_destroy(table);
pci_free_irq_vectors(dev->pdev);
- kfree(table->irq);
+}
+
+int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
+{
+ if (table->sf_comp_pool)
+ return table->sf_comp_pool->xa_num_irqs.max -
+ table->sf_comp_pool->xa_num_irqs.min + 1;
+ else
+ return mlx5_irq_table_get_num_comp(table);
}
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 441b5453acae..540cf05f6373 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
+ if (!MLX5_CAP_GEN(dev, roce))
+ return;
+
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 6a0c6f965ad1..fa0288afc0dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -163,6 +163,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
+ case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a8e73c9ed1ea..1be048769309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -136,10 +136,10 @@ static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
switch (hw_state) {
case MLX5_VHCA_STATE_ACTIVE:
case MLX5_VHCA_STATE_IN_USE:
- case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
return DEVLINK_PORT_FN_STATE_ACTIVE;
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
default:
return DEVLINK_PORT_FN_STATE_INACTIVE;
}
@@ -192,14 +192,17 @@ sf_err:
return err;
}
-static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
+ struct netlink_ext_ack *extack)
{
int err;
if (mlx5_sf_is_active(sf))
return 0;
- if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
- return -EINVAL;
+ if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
+ NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
+ return -EBUSY;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
@@ -226,7 +229,8 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
struct mlx5_sf *sf,
- enum devlink_port_fn_state state)
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
{
int err = 0;
@@ -234,7 +238,7 @@ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *ta
if (state == mlx5_sf_to_devlink_state(sf->hw_state))
goto out;
if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
- err = mlx5_sf_activate(dev, sf);
+ err = mlx5_sf_activate(dev, sf, extack);
else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
err = mlx5_sf_deactivate(dev, sf);
else
@@ -265,7 +269,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_po
goto out;
}
- err = mlx5_sf_state_set(dev, table, sf, state);
+ err = mlx5_sf_state_set(dev, table, sf, state, extack);
out:
mlx5_sf_table_put(table);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index ef5f892aafad..d9c69123c1ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -6,7 +6,6 @@
#include "sf.h"
#include "mlx5_ifc_vhca_event.h"
#include "ecpf.h"
-#include "vhca_event.h"
#include "mlx5_core.h"
#include "eswitch.h"
@@ -74,26 +73,29 @@ static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 control
u32 usr_sfnum)
{
struct mlx5_sf_hwc_table *hwc;
+ int free_idx = -1;
int i;
hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
if (!hwc->sfs)
return -ENOSPC;
- /* Check if sf with same sfnum already exists or not. */
for (i = 0; i < hwc->max_fn; i++) {
+ if (!hwc->sfs[i].allocated && free_idx == -1) {
+ free_idx = i;
+ continue;
+ }
+
if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
return -EEXIST;
}
- /* Find the free entry and allocate the entry from the array */
- for (i = 0; i < hwc->max_fn; i++) {
- if (!hwc->sfs[i].allocated) {
- hwc->sfs[i].usr_sfnum = usr_sfnum;
- hwc->sfs[i].allocated = true;
- return i;
- }
- }
- return -ENOSPC;
+
+ if (free_idx == -1)
+ return -ENOSPC;
+
+ hwc->sfs[free_idx].usr_sfnum = usr_sfnum;
+ hwc->sfs[free_idx].allocated = true;
+ return free_idx;
}
static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 0b6aea1e6a94..81ce13b19ee8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -5,42 +5,7 @@
#define __MLX5_SF_H__
#include <linux/mlx5/driver.h>
-
-static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev)
-{
- return MLX5_CAP_GEN(dev, sf_base_id);
-}
-
-#ifdef CONFIG_MLX5_SF
-
-static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
-{
- return MLX5_CAP_GEN(dev, sf);
-}
-
-static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
-{
- if (!mlx5_sf_supported(dev))
- return 0;
- if (MLX5_CAP_GEN(dev, max_num_sf))
- return MLX5_CAP_GEN(dev, max_num_sf);
- else
- return 1 << MLX5_CAP_GEN(dev, log_max_sf);
-}
-
-#else
-
-static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
-{
- return false;
-}
-
-static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
-{
- return 0;
-}
-
-#endif
+#include "lib/sf.h"
#ifdef CONFIG_MLX5_SF_MANAGER
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2338989d4403..e8185b69ac6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+#include "mlx5_irq.h"
#include "eswitch.h"
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 949879cf2092..6475ba35cf6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "dr_types.h"
+#include "dr_ste.h"
enum dr_action_domain {
DR_ACTION_DOMAIN_NIC_INGRESS,
@@ -14,7 +15,8 @@ enum dr_action_domain {
enum dr_action_valid_state {
DR_ACTION_STATE_ERR,
DR_ACTION_STATE_NO_ACTION,
- DR_ACTION_STATE_REFORMAT,
+ DR_ACTION_STATE_ENCAP,
+ DR_ACTION_STATE_DECAP,
DR_ACTION_STATE_MODIFY_HDR,
DR_ACTION_STATE_MODIFY_VLAN,
DR_ACTION_STATE_NON_TERM,
@@ -29,46 +31,74 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
- [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
+ [DR_ACTION_STATE_ENCAP] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ },
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
@@ -80,39 +110,48 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
- [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
@@ -124,41 +163,69 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
- [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ },
+ [DR_ACTION_STATE_ENCAP] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
@@ -171,44 +238,53 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
- [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
@@ -235,6 +311,9 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3:
*action_type = DR_ACTION_TYP_L2_TO_TNL_L3;
break;
+ case DR_ACTION_REFORMAT_TYP_INSERT_HDR:
+ *action_type = DR_ACTION_TYP_INSERT_HDR;
+ break;
default:
return -EINVAL;
}
@@ -454,8 +533,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
- attr.reformat_size = action->reformat->reformat_size;
- attr.reformat_id = action->reformat->reformat_id;
+ if (rx_rule &&
+ !(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) {
+ mlx5dr_info(dmn, "Device doesn't support Encap on RX\n");
+ goto out_invalid_arg;
+ }
+ attr.reformat.size = action->reformat->size;
+ attr.reformat.id = action->reformat->id;
+ break;
+ case DR_ACTION_TYP_SAMPLER:
+ attr.final_icm_addr = rx_rule ? action->sampler->rx_icm_addr :
+ action->sampler->tx_icm_addr;
break;
case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
@@ -481,6 +569,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
break;
+ case DR_ACTION_TYP_INSERT_HDR:
+ attr.reformat.size = action->reformat->size;
+ attr.reformat.id = action->reformat->id;
+ attr.reformat.param_0 = action->reformat->param_0;
+ attr.reformat.param_1 = action->reformat->param_1;
+ break;
default:
goto out_invalid_arg;
}
@@ -543,6 +637,8 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_MODIFY_HDR] = sizeof(struct mlx5dr_action_rewrite),
[DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport),
[DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan),
+ [DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
+ [DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
};
static struct mlx5dr_action *
@@ -651,7 +747,7 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
if (reformat_action) {
reformat_req = true;
hw_dests[i].vport.reformat_id =
- reformat_action->reformat->reformat_id;
+ reformat_action->reformat->id;
ref_actions[num_of_ref++] = reformat_action;
hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
@@ -755,14 +851,43 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
return action;
}
+struct mlx5dr_action *
+mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id)
+{
+ struct mlx5dr_action *action;
+ u64 icm_rx, icm_tx;
+ int ret;
+
+ ret = mlx5dr_cmd_query_flow_sampler(dmn->mdev, sampler_id,
+ &icm_rx, &icm_tx);
+ if (ret)
+ return NULL;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_SAMPLER);
+ if (!action)
+ return NULL;
+
+ action->sampler->dmn = dmn;
+ action->sampler->sampler_id = sampler_id;
+ action->sampler->rx_icm_addr = icm_rx;
+ action->sampler->tx_icm_addr = icm_tx;
+
+ refcount_inc(&dmn->refcount);
+ return action;
+}
+
static int
dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
struct mlx5dr_domain *dmn,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t data_sz,
void *data)
{
- if ((!data && data_sz) || (data && !data_sz) || reformat_type >
- DR_ACTION_TYP_L2_TO_TNL_L3) {
+ if ((!data && data_sz) || (data && !data_sz) ||
+ ((reformat_param_0 || reformat_param_1) &&
+ reformat_type != DR_ACTION_TYP_INSERT_HDR) ||
+ reformat_type > DR_ACTION_TYP_INSERT_HDR) {
mlx5dr_dbg(dmn, "Invalid reformat parameter!\n");
goto out_err;
}
@@ -794,6 +919,7 @@ out_err:
static int
dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+ u8 reformat_param_0, u8 reformat_param_1,
size_t data_sz, void *data,
struct mlx5dr_action *action)
{
@@ -811,13 +937,14 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
else
rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
- ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, data_sz, data,
+ ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, 0, 0,
+ data_sz, data,
&reformat_id);
if (ret)
return ret;
- action->reformat->reformat_id = reformat_id;
- action->reformat->reformat_size = data_sz;
+ action->reformat->id = reformat_id;
+ action->reformat->size = data_sz;
return 0;
}
case DR_ACTION_TYP_TNL_L2_TO_L2:
@@ -859,6 +986,23 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
return 0;
}
+ case DR_ACTION_TYP_INSERT_HDR:
+ {
+ ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev,
+ MLX5_REFORMAT_TYPE_INSERT_HDR,
+ reformat_param_0,
+ reformat_param_1,
+ data_sz, data,
+ &reformat_id);
+ if (ret)
+ return ret;
+
+ action->reformat->id = reformat_id;
+ action->reformat->size = data_sz;
+ action->reformat->param_0 = reformat_param_0;
+ action->reformat->param_1 = reformat_param_1;
+ return 0;
+ }
default:
mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type);
return -EINVAL;
@@ -896,6 +1040,8 @@ struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
struct mlx5dr_action *
mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
enum mlx5dr_action_reformat_type reformat_type,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t data_sz,
void *data)
{
@@ -912,7 +1058,9 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
goto dec_ref;
}
- ret = dr_action_verify_reformat_params(action_type, dmn, data_sz, data);
+ ret = dr_action_verify_reformat_params(action_type, dmn,
+ reformat_param_0, reformat_param_1,
+ data_sz, data);
if (ret)
goto dec_ref;
@@ -923,6 +1071,8 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
action->reformat->dmn = dmn;
ret = dr_action_create_reformat_action(dmn,
+ reformat_param_0,
+ reformat_param_1,
data_sz,
data,
action);
@@ -1516,8 +1666,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
+ case DR_ACTION_TYP_INSERT_HDR:
mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev,
- action->reformat->reformat_id);
+ action->reformat->id);
refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_MODIFY_HDR:
@@ -1525,6 +1676,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
kfree(action->rewrite->data);
refcount_dec(&action->rewrite->dmn->refcount);
break;
+ case DR_ACTION_TYP_SAMPLER:
+ refcount_dec(&action->sampler->dmn->refcount);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 5970cb8fc0c0..54e1f5438bbe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -228,6 +228,36 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
return 0;
}
+int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
+ u32 sampler_id,
+ u64 *rx_icm_addr,
+ u64 *tx_icm_addr)
+{
+ u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ void *attr;
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
+
+ *rx_icm_addr = MLX5_GET64(sampler_obj, attr,
+ sw_steering_icm_address_rx);
+ *tx_icm_addr = MLX5_GET64(sampler_obj, attr,
+ sw_steering_icm_address_tx);
+
+ return 0;
+}
+
int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
{
u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
@@ -460,6 +490,8 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
enum mlx5_reformat_ctx_type rt,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t reformat_size,
void *reformat_data,
u32 *reformat_id)
@@ -486,8 +518,11 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
- memcpy(pdata, reformat_data, reformat_size);
+ if (reformat_data && reformat_size)
+ memcpy(pdata, reformat_data, reformat_size);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
@@ -706,6 +741,9 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
fte->dest_arr[i].vport.reformat_id);
}
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ id = fte->dest_arr[i].sampler_id;
+ break;
default:
id = fte->dest_arr[i].tir_num;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 1fbcd012bb85..7ccfd40586ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
int ret;
ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
- ft_attr.level = dmn->info.caps.max_ft_level - 2;
+ ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
+ MLX5_FT_MAX_MULTIPATH_LEVEL);
ft_attr.reformat_en = reformat_req;
ft_attr.decap_en = reformat_req;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 992b591bf0c5..12a8bbbf944b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -156,6 +156,7 @@ struct mlx5dr_ste_ctx {
u16 (*get_byte_mask)(u8 *hw_ste_p);
/* Actions */
+ u32 actions_caps;
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 0757a4e8540e..f1950e4968da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -437,8 +437,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
attr->gvmi);
dr_ste_v0_set_tx_encap(last_ste,
- attr->reformat_id,
- attr->reformat_size,
+ attr->reformat.id,
+ attr->reformat.size,
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
/* Whenever prio_tag_required enabled, we can be sure that the
* previous table (ACL) already push vlan to our packet,
@@ -1893,6 +1893,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.get_byte_mask = &dr_ste_v0_get_byte_mask,
/* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_NONE,
.set_actions_rx = &dr_ste_v0_set_actions_rx,
.set_actions_tx = &dr_ste_v0_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 054c2e2b6554..4aaca8eb7597 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -116,6 +116,8 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
@@ -246,6 +248,12 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field
[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
},
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
};
static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
@@ -361,8 +369,8 @@ static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -374,6 +382,26 @@ static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
u32 vlan_hdr)
{
@@ -401,11 +429,11 @@ static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_tx_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -519,9 +547,9 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_tx_encap(last_ste, action,
- attr->reformat_id,
- attr->reformat_size);
+ dr_ste_v1_set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -532,12 +560,25 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_tx_encap_l3(last_ste,
- action, d_action,
- attr->reformat_id,
- attr->reformat_size);
+ dr_ste_v1_set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
+ } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
+ if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
@@ -616,7 +657,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
}
if (action_type_set[DR_ACTION_TYP_CTR]) {
- /* Counter action set after decap to exclude decaped header */
+ /* Counter action set after decap and before insert_hdr
+ * to exclude decaped / encaped header respectively.
+ */
if (!allow_ctr) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
@@ -627,6 +670,52 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
}
+ if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ allow_modify_hdr = false;
+ } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
+ u8 *d_action;
+
+ if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+
+ d_action = action + DR_STE_ACTION_SINGLE_SZ;
+
+ dr_ste_v1_set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = false;
+ } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
+ /* Modify header, decap, and encap must use different STEs */
+ if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ allow_modify_hdr = false;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -694,7 +783,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
return -EINVAL;
- memcpy(padded_data, data, data_sz);
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
/* Remove L2L3 outer headers */
MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
@@ -706,32 +799,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
hw_action += DR_STE_ACTION_DOUBLE_SZ;
used_actions++; /* Remove and NOP are a single double action */
- inline_data_sz =
- MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
- /* Add the new header inline + 2 extra bytes */
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
void *addr_inline;
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_INLINE);
/* The hardware expects here offset to words (2 bytes) */
- MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
- i * 2);
+ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
/* Copy bytes one by one to avoid endianness problem */
addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
hw_action, inline_data);
- memcpy(addr_inline, data_ptr, inline_data_sz);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
hw_action += DR_STE_ACTION_DOUBLE_SZ;
- data_ptr += inline_data_sz;
used_actions++;
}
- /* Remove 2 extra bytes */
+ /* Remove first 2 extra bytes */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
- MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
/* The hardware expects here size in words (2 bytes) */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
used_actions++;
@@ -1865,6 +1960,7 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_byte_mask = &dr_ste_v1_set_byte_mask,
.get_byte_mask = &dr_ste_v1_get_byte_mask,
/* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_RX_ENCAP,
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 67460c42a99b..f5e93fa87aff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -89,6 +89,11 @@ enum {
DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
};
+enum mlx5dr_ste_ctx_action_cap {
+ DR_STE_CTX_ACTION_CAP_NONE = 0,
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 0,
+};
+
enum {
DR_MODIFY_ACTION_SIZE = 8,
};
@@ -118,6 +123,8 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_VPORT,
DR_ACTION_TYP_POP_VLAN,
DR_ACTION_TYP_PUSH_VLAN,
+ DR_ACTION_TYP_INSERT_HDR,
+ DR_ACTION_TYP_SAMPLER,
DR_ACTION_TYP_MAX,
};
@@ -261,8 +268,12 @@ struct mlx5dr_ste_actions_attr {
u32 ctr_id;
u16 gvmi;
u16 hit_gvmi;
- u32 reformat_id;
- u32 reformat_size;
+ struct {
+ u32 id;
+ u32 size;
+ u8 param_0;
+ u8 param_1;
+ } reformat;
struct {
int count;
u32 headers[MLX5DR_MAX_VLANS];
@@ -903,8 +914,17 @@ struct mlx5dr_action_rewrite {
struct mlx5dr_action_reformat {
struct mlx5dr_domain *dmn;
- u32 reformat_id;
- u32 reformat_size;
+ u32 id;
+ u32 size;
+ u8 param_0;
+ u8 param_1;
+};
+
+struct mlx5dr_action_sampler {
+ struct mlx5dr_domain *dmn;
+ u64 rx_icm_addr;
+ u64 tx_icm_addr;
+ u32 sampler_id;
};
struct mlx5dr_action_dest_tbl {
@@ -950,6 +970,7 @@ struct mlx5dr_action {
void *data;
struct mlx5dr_action_rewrite *rewrite;
struct mlx5dr_action_reformat *reformat;
+ struct mlx5dr_action_sampler *sampler;
struct mlx5dr_action_dest_tbl *dest_tbl;
struct mlx5dr_action_ctr *ctr;
struct mlx5dr_action_vport *vport;
@@ -1104,6 +1125,10 @@ int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
bool other_vport, u16 vport_number, u16 *gvmi);
int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
struct mlx5dr_esw_caps *caps);
+int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
+ u32 sampler_id,
+ u64 *rx_icm_addr,
+ u64 *tx_icm_addr);
int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_type,
@@ -1142,6 +1167,8 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
struct mlx5dr_cmd_query_flow_table_details *output);
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
enum mlx5_reformat_ctx_type rt,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t reformat_size,
void *reformat_data,
u32 *reformat_id);
@@ -1252,7 +1279,6 @@ struct mlx5dr_send_ring {
u32 tx_head;
void *buf;
u32 buf_size;
- struct ib_wc wc[MAX_SEND_CQE];
u8 sync_buff[MIN_READ_SYNC];
struct mlx5dr_mr *sync_mr;
spinlock_t lock; /* Protect the data path of the send ring */
@@ -1290,6 +1316,7 @@ struct mlx5dr_cmd_flow_destination_hw_info {
u32 ft_num;
u32 ft_id;
u32 counter_id;
+ u32 sampler_id;
struct {
u16 num;
u16 vhca_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 96c39a17d026..d5926dd7e972 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -62,7 +62,7 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int log_size,
+ unsigned int size,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_table *tbl;
@@ -71,7 +71,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
- log_size,
+ size,
next_ft);
flags = ft->flags;
/* turn off encap/decap if not supported for sw-str by fw */
@@ -97,6 +97,8 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
}
}
+ ft->max_fte = INT_MAX;
+
return 0;
}
@@ -287,7 +289,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
tmp_action = mlx5dr_action_create_packet_reformat(domain,
- decap_type, 0,
+ decap_type,
+ 0, 0, 0,
NULL);
if (!tmp_action) {
err = -ENOMEM;
@@ -384,7 +387,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
- u32 ft_id;
+ u32 id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -422,9 +425,20 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
num_term_actions++;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
- ft_id = dst->dest_attr.ft_num;
+ id = dst->dest_attr.ft_num;
tmp_action = mlx5dr_action_create_dest_table_num(domain,
- ft_id);
+ id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ id = dst->dest_attr.sampler_id;
+ tmp_action = mlx5dr_action_create_flow_sampler(domain,
+ id);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -520,9 +534,7 @@ out_err:
}
static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@@ -530,7 +542,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
struct mlx5dr_action *action;
int dr_reformat;
- switch (reformat_type) {
+ switch (params->type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
@@ -542,16 +554,21 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR;
+ break;
default:
mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
- reformat_type);
+ params->type);
return -EOPNOTSUPP;
}
action = mlx5dr_action_create_packet_reformat(dr_domain,
dr_reformat,
- size,
- reformat_data);
+ params->param_0,
+ params->param_1,
+ params->size,
+ params->data);
if (!action) {
mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 612b0ac31db2..bbfe101d4e57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -26,6 +26,7 @@ enum mlx5dr_action_reformat_type {
DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2,
DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
+ DR_ACTION_REFORMAT_TYP_INSERT_HDR,
};
struct mlx5dr_match_parameters {
@@ -100,11 +101,16 @@ struct mlx5dr_action *mlx5dr_action_create_drop(void);
struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
struct mlx5dr_action *
+mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id);
+
+struct mlx5dr_action *
mlx5dr_action_create_flow_counter(u32 counter_id);
struct mlx5dr_action *
mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
enum mlx5dr_action_reformat_type reformat_type,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t data_sz,
void *data);
@@ -124,10 +130,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
- (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
- (MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_6DX));
+ return MLX5_CAP_GEN(dev, roce) &&
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+ (MLX5_CAP_GEN(dev, steering_format_version) <=
+ MLX5_STEERING_FORMAT_CONNECTX_6DX)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 01cc00ad8acf..b6931bbe52d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -424,6 +424,15 @@ err_modify_sq:
return err;
}
+static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
{
int i;
@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0);
-
/* unset peer SQs */
- if (hp->peer_gone)
- return;
- for (i = 0; i < hp->num_channels; i++)
- mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_RST, 0, 0);
+ if (!hp->peer_gone)
+ mlx5_hairpin_unpair_peer_sq(hp);
}
struct mlx5_hairpin *
@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
mlx5_hairpin_destroy_queues(hp);
kfree(hp);
}
+
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ mlx5_hairpin_unpair_peer_sq(hp);
+
+ /* destroy peer SQ */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+
+ hp->peer_gone = true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 457ad42eaa2a..4c1440a95ad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
void *in;
int err;
- if (!vport)
- return -EINVAL;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig b/drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig
new file mode 100644
index 000000000000..4cdebafaf222
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+#
+# Mellanox GigE driver configuration
+#
+
+config MLXBF_GIGE
+ tristate "Mellanox Technologies BlueField Gigabit Ethernet support"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ select PHYLIB
+ help
+ The second generation BlueField SoC from Mellanox Technologies
+ supports an out-of-band Gigabit Ethernet management port to the
+ Arm subsystem.
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile b/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile
new file mode 100644
index 000000000000..e57c1375f236
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+obj-$(CONFIG_MLXBF_GIGE) += mlxbf_gige.o
+
+mlxbf_gige-y := mlxbf_gige_ethtool.o \
+ mlxbf_gige_gpio.o \
+ mlxbf_gige_intr.o \
+ mlxbf_gige_main.o \
+ mlxbf_gige_mdio.o \
+ mlxbf_gige_rx.o \
+ mlxbf_gige_tx.o
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
new file mode 100644
index 000000000000..e3509e69ed1c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* Header file for Gigabit Ethernet driver for Mellanox BlueField SoC
+ * - this file contains software data structures and any chip-specific
+ * data structures (e.g. TX WQE format) that are memory resident.
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#ifndef __MLXBF_GIGE_H__
+#define __MLXBF_GIGE_H__
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/irqreturn.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+
+/* The silicon design supports a maximum RX ring size of
+ * 32K entries. Based on current testing this maximum size
+ * is not required to be supported. Instead the RX ring
+ * will be capped at a realistic value of 1024 entries.
+ */
+#define MLXBF_GIGE_MIN_RXQ_SZ 32
+#define MLXBF_GIGE_MAX_RXQ_SZ 1024
+#define MLXBF_GIGE_DEFAULT_RXQ_SZ 128
+
+#define MLXBF_GIGE_MIN_TXQ_SZ 4
+#define MLXBF_GIGE_MAX_TXQ_SZ 256
+#define MLXBF_GIGE_DEFAULT_TXQ_SZ 128
+
+#define MLXBF_GIGE_DEFAULT_BUF_SZ 2048
+
+#define MLXBF_GIGE_DMA_PAGE_SZ 4096
+#define MLXBF_GIGE_DMA_PAGE_SHIFT 12
+
+/* There are four individual MAC RX filters. Currently
+ * two of them are being used: one for the broadcast MAC
+ * (index 0) and one for local MAC (index 1)
+ */
+#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
+#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
+
+/* Define for broadcast MAC literal */
+#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
+
+/* There are three individual interrupts:
+ * 1) Errors, "OOB" interrupt line
+ * 2) Receive Packet, "OOB_LLU" interrupt line
+ * 3) LLU and PLU Events, "OOB_PLU" interrupt line
+ */
+#define MLXBF_GIGE_ERROR_INTR_IDX 0
+#define MLXBF_GIGE_RECEIVE_PKT_INTR_IDX 1
+#define MLXBF_GIGE_LLU_PLU_INTR_IDX 2
+#define MLXBF_GIGE_PHY_INT_N 3
+
+#define MLXBF_GIGE_MDIO_DEFAULT_PHY_ADDR 0x3
+
+#define MLXBF_GIGE_DEFAULT_PHY_INT_GPIO 12
+
+struct mlxbf_gige_stats {
+ u64 hw_access_errors;
+ u64 tx_invalid_checksums;
+ u64 tx_small_frames;
+ u64 tx_index_errors;
+ u64 sw_config_errors;
+ u64 sw_access_errors;
+ u64 rx_truncate_errors;
+ u64 rx_mac_errors;
+ u64 rx_din_dropped_pkts;
+ u64 tx_fifo_full;
+ u64 rx_filter_passed_pkts;
+ u64 rx_filter_discard_pkts;
+};
+
+struct mlxbf_gige {
+ void __iomem *base;
+ void __iomem *llu_base;
+ void __iomem *plu_base;
+ struct device *dev;
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ void __iomem *mdio_io;
+ struct mii_bus *mdiobus;
+ void __iomem *gpio_io;
+ struct irq_domain *irqdomain;
+ u32 phy_int_gpio_mask;
+ spinlock_t lock; /* for packet processing indices */
+ spinlock_t gpio_lock; /* for GPIO bus access */
+ u16 rx_q_entries;
+ u16 tx_q_entries;
+ u64 *tx_wqe_base;
+ dma_addr_t tx_wqe_base_dma;
+ u64 *tx_wqe_next;
+ u64 *tx_cc;
+ dma_addr_t tx_cc_dma;
+ dma_addr_t *rx_wqe_base;
+ dma_addr_t rx_wqe_base_dma;
+ u64 *rx_cqe_base;
+ dma_addr_t rx_cqe_base_dma;
+ u16 tx_pi;
+ u16 prev_tx_ci;
+ u64 error_intr_count;
+ u64 rx_intr_count;
+ u64 llu_plu_intr_count;
+ struct sk_buff *rx_skb[MLXBF_GIGE_MAX_RXQ_SZ];
+ struct sk_buff *tx_skb[MLXBF_GIGE_MAX_TXQ_SZ];
+ int error_irq;
+ int rx_irq;
+ int llu_plu_irq;
+ int phy_irq;
+ int hw_phy_irq;
+ bool promisc_enabled;
+ u8 valid_polarity;
+ struct napi_struct napi;
+ struct mlxbf_gige_stats stats;
+};
+
+/* Rx Work Queue Element definitions */
+#define MLXBF_GIGE_RX_WQE_SZ 8
+
+/* Rx Completion Queue Element definitions */
+#define MLXBF_GIGE_RX_CQE_SZ 8
+#define MLXBF_GIGE_RX_CQE_PKT_LEN_MASK GENMASK(10, 0)
+#define MLXBF_GIGE_RX_CQE_VALID_MASK GENMASK(11, 11)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK GENMASK(15, 12)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR GENMASK(12, 12)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED GENMASK(13, 13)
+#define MLXBF_GIGE_RX_CQE_CHKSUM_MASK GENMASK(31, 16)
+
+/* Tx Work Queue Element definitions */
+#define MLXBF_GIGE_TX_WQE_SZ_QWORDS 2
+#define MLXBF_GIGE_TX_WQE_SZ 16
+#define MLXBF_GIGE_TX_WQE_PKT_LEN_MASK GENMASK(10, 0)
+#define MLXBF_GIGE_TX_WQE_UPDATE_MASK GENMASK(31, 31)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_LEN_MASK GENMASK(42, 32)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_START_MASK GENMASK(55, 48)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_OFFSET_MASK GENMASK(63, 56)
+
+/* Macro to return packet length of specified TX WQE */
+#define MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr) \
+ (*((tx_wqe_addr) + 1) & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK)
+
+/* Tx Completion Count */
+#define MLXBF_GIGE_TX_CC_SZ 8
+
+/* List of resources in ACPI table */
+enum mlxbf_gige_res {
+ MLXBF_GIGE_RES_MAC,
+ MLXBF_GIGE_RES_MDIO9,
+ MLXBF_GIGE_RES_GPIO0,
+ MLXBF_GIGE_RES_LLU,
+ MLXBF_GIGE_RES_PLU
+};
+
+/* Version of register data returned by mlxbf_gige_get_regs() */
+#define MLXBF_GIGE_REGS_VERSION 1
+
+int mlxbf_gige_mdio_probe(struct platform_device *pdev,
+ struct mlxbf_gige *priv);
+void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
+irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id);
+void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv);
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 dmac);
+void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 *dmac);
+void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv);
+void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv);
+int mlxbf_gige_rx_init(struct mlxbf_gige *priv);
+void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv);
+int mlxbf_gige_tx_init(struct mlxbf_gige *priv);
+void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv);
+bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv);
+netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev);
+struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
+ unsigned int map_len,
+ dma_addr_t *buf_dma,
+ enum dma_data_direction dir);
+int mlxbf_gige_request_irqs(struct mlxbf_gige *priv);
+void mlxbf_gige_free_irqs(struct mlxbf_gige *priv);
+int mlxbf_gige_poll(struct napi_struct *napi, int budget);
+extern const struct ethtool_ops mlxbf_gige_ethtool_ops;
+void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv);
+
+int mlxbf_gige_gpio_init(struct platform_device *pdev, struct mlxbf_gige *priv);
+void mlxbf_gige_gpio_free(struct mlxbf_gige *priv);
+
+#endif /* !defined(__MLXBF_GIGE_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
new file mode 100644
index 000000000000..92b798f8e73a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Ethtool support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/phy.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+/* Start of struct ethtool_ops functions */
+static int mlxbf_gige_get_regs_len(struct net_device *netdev)
+{
+ return MLXBF_GIGE_MMIO_REG_SZ;
+}
+
+static void mlxbf_gige_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+
+ regs->version = MLXBF_GIGE_REGS_VERSION;
+
+ /* Read entire MMIO register space and store results
+ * into the provided buffer. Each 64-bit word is converted
+ * to big-endian to make the output more readable.
+ *
+ * NOTE: by design, a read to an offset without an existing
+ * register will be acknowledged and return zero.
+ */
+ memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ);
+}
+
+static void mlxbf_gige_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+
+ ering->rx_max_pending = MLXBF_GIGE_MAX_RXQ_SZ;
+ ering->tx_max_pending = MLXBF_GIGE_MAX_TXQ_SZ;
+ ering->rx_pending = priv->rx_q_entries;
+ ering->tx_pending = priv->tx_q_entries;
+}
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} mlxbf_gige_ethtool_stats_keys[] = {
+ { "hw_access_errors" },
+ { "tx_invalid_checksums" },
+ { "tx_small_frames" },
+ { "tx_index_errors" },
+ { "sw_config_errors" },
+ { "sw_access_errors" },
+ { "rx_truncate_errors" },
+ { "rx_mac_errors" },
+ { "rx_din_dropped_pkts" },
+ { "tx_fifo_full" },
+ { "rx_filter_passed_pkts" },
+ { "rx_filter_discard_pkts" },
+};
+
+static int mlxbf_gige_get_sset_count(struct net_device *netdev, int stringset)
+{
+ if (stringset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+ return ARRAY_SIZE(mlxbf_gige_ethtool_stats_keys);
+}
+
+static void mlxbf_gige_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *buf)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+ memcpy(buf, &mlxbf_gige_ethtool_stats_keys,
+ sizeof(mlxbf_gige_ethtool_stats_keys));
+}
+
+static void mlxbf_gige_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *estats,
+ u64 *data)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+
+ /* Fill data array with interface statistics
+ *
+ * NOTE: the data writes must be in
+ * sync with the strings shown in
+ * the mlxbf_gige_ethtool_stats_keys[] array
+ *
+ * NOTE2: certain statistics below are zeroed upon
+ * port disable, so the calculation below
+ * must include the "cached" value of the stat
+ * plus the value read directly from hardware.
+ * Cached statistics are currently:
+ * rx_din_dropped_pkts
+ * rx_filter_passed_pkts
+ * rx_filter_discard_pkts
+ */
+ *data++ = priv->stats.hw_access_errors;
+ *data++ = priv->stats.tx_invalid_checksums;
+ *data++ = priv->stats.tx_small_frames;
+ *data++ = priv->stats.tx_index_errors;
+ *data++ = priv->stats.sw_config_errors;
+ *data++ = priv->stats.sw_access_errors;
+ *data++ = priv->stats.rx_truncate_errors;
+ *data++ = priv->stats.rx_mac_errors;
+ *data++ = (priv->stats.rx_din_dropped_pkts +
+ readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER));
+ *data++ = priv->stats.tx_fifo_full;
+ *data++ = (priv->stats.rx_filter_passed_pkts +
+ readq(priv->base + MLXBF_GIGE_RX_PASS_COUNTER_ALL));
+ *data++ = (priv->stats.rx_filter_discard_pkts +
+ readq(priv->base + MLXBF_GIGE_RX_DISC_COUNTER_ALL));
+}
+
+static void mlxbf_gige_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+}
+
+const struct ethtool_ops mlxbf_gige_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = mlxbf_gige_get_ringparam,
+ .get_regs_len = mlxbf_gige_get_regs_len,
+ .get_regs = mlxbf_gige_get_regs,
+ .get_strings = mlxbf_gige_get_strings,
+ .get_sset_count = mlxbf_gige_get_sset_count,
+ .get_ethtool_stats = mlxbf_gige_get_ethtool_stats,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_pauseparam = mlxbf_gige_get_pauseparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c
new file mode 100644
index 000000000000..a8d966db5715
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Initialize and handle GPIO interrupt triggered by INT_N PHY signal.
+ * This GPIO interrupt triggers the PHY state machine to bring the link
+ * up/down.
+ *
+ * Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+#define MLXBF_GIGE_GPIO_CAUSE_FALL_EN 0x48
+#define MLXBF_GIGE_GPIO_CAUSE_OR_CAUSE_EVTEN0 0x80
+#define MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0 0x94
+#define MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE 0x98
+
+static void mlxbf_gige_gpio_enable(struct mlxbf_gige *priv)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->gpio_lock, flags);
+ val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+ val |= priv->phy_int_gpio_mask;
+ writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+
+ /* The INT_N interrupt level is active low.
+ * So enable cause fall bit to detect when GPIO
+ * state goes low.
+ */
+ val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_FALL_EN);
+ val |= priv->phy_int_gpio_mask;
+ writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_FALL_EN);
+
+ /* Enable PHY interrupt by setting the priority level */
+ val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+ val |= priv->phy_int_gpio_mask;
+ writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+ spin_unlock_irqrestore(&priv->gpio_lock, flags);
+}
+
+static void mlxbf_gige_gpio_disable(struct mlxbf_gige *priv)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->gpio_lock, flags);
+ val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~priv->phy_int_gpio_mask;
+ writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+ spin_unlock_irqrestore(&priv->gpio_lock, flags);
+}
+
+static irqreturn_t mlxbf_gige_gpio_handler(int irq, void *ptr)
+{
+ struct mlxbf_gige *priv;
+ u32 val;
+
+ priv = ptr;
+
+ /* Check if this interrupt is from PHY device.
+ * Return if it is not.
+ */
+ val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CAUSE_EVTEN0);
+ if (!(val & priv->phy_int_gpio_mask))
+ return IRQ_NONE;
+
+ /* Clear interrupt when done, otherwise, no further interrupt
+ * will be triggered.
+ */
+ val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+ val |= priv->phy_int_gpio_mask;
+ writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+
+ generic_handle_irq(priv->phy_irq);
+
+ return IRQ_HANDLED;
+}
+
+static void mlxbf_gige_gpio_mask(struct irq_data *irqd)
+{
+ struct mlxbf_gige *priv = irq_data_get_irq_chip_data(irqd);
+
+ mlxbf_gige_gpio_disable(priv);
+}
+
+static void mlxbf_gige_gpio_unmask(struct irq_data *irqd)
+{
+ struct mlxbf_gige *priv = irq_data_get_irq_chip_data(irqd);
+
+ mlxbf_gige_gpio_enable(priv);
+}
+
+static struct irq_chip mlxbf_gige_gpio_chip = {
+ .name = "mlxbf_gige_phy",
+ .irq_mask = mlxbf_gige_gpio_mask,
+ .irq_unmask = mlxbf_gige_gpio_unmask,
+};
+
+static int mlxbf_gige_gpio_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &mlxbf_gige_gpio_chip, handle_simple_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mlxbf_gige_gpio_domain_ops = {
+ .map = mlxbf_gige_gpio_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+#ifdef CONFIG_ACPI
+static int mlxbf_gige_gpio_resources(struct acpi_resource *ares,
+ void *data)
+{
+ struct acpi_resource_gpio *gpio;
+ u32 *phy_int_gpio = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_GPIO) {
+ gpio = &ares->data.gpio;
+ *phy_int_gpio = gpio->pin_table[0];
+ }
+
+ return 1;
+}
+#endif
+
+void mlxbf_gige_gpio_free(struct mlxbf_gige *priv)
+{
+ irq_dispose_mapping(priv->phy_irq);
+ irq_domain_remove(priv->irqdomain);
+}
+
+int mlxbf_gige_gpio_init(struct platform_device *pdev,
+ struct mlxbf_gige *priv)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ u32 phy_int_gpio = 0;
+ int ret;
+
+ LIST_HEAD(resources);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_GPIO0);
+ if (!res)
+ return -ENODEV;
+
+ priv->gpio_io = devm_ioremap(dev, res->start, resource_size(res));
+ if (!priv->gpio_io)
+ return -ENOMEM;
+
+#ifdef CONFIG_ACPI
+ ret = acpi_dev_get_resources(ACPI_COMPANION(dev),
+ &resources, mlxbf_gige_gpio_resources,
+ &phy_int_gpio);
+ acpi_dev_free_resource_list(&resources);
+ if (ret < 0 || !phy_int_gpio) {
+ dev_err(dev, "Error retrieving the gpio phy pin");
+ return -EINVAL;
+ }
+#endif
+
+ priv->phy_int_gpio_mask = BIT(phy_int_gpio);
+
+ mlxbf_gige_gpio_disable(priv);
+
+ priv->hw_phy_irq = platform_get_irq(pdev, MLXBF_GIGE_PHY_INT_N);
+
+ priv->irqdomain = irq_domain_add_simple(NULL, 1, 0,
+ &mlxbf_gige_gpio_domain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(dev, "Failed to add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ priv->phy_irq = irq_create_mapping(priv->irqdomain, 0);
+ if (!priv->phy_irq) {
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
+ dev_err(dev, "Error mapping PHY IRQ\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, priv->hw_phy_irq, mlxbf_gige_gpio_handler,
+ IRQF_ONESHOT | IRQF_SHARED, "mlxbf_gige_phy", priv);
+ if (ret) {
+ dev_err(dev, "Failed to request PHY IRQ");
+ mlxbf_gige_gpio_free(priv);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c
new file mode 100644
index 000000000000..c38795be04a2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Interrupt related logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/interrupt.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+static irqreturn_t mlxbf_gige_error_intr(int irq, void *dev_id)
+{
+ struct mlxbf_gige *priv;
+ u64 int_status;
+
+ priv = dev_id;
+
+ priv->error_intr_count++;
+
+ int_status = readq(priv->base + MLXBF_GIGE_INT_STATUS);
+
+ if (int_status & MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR)
+ priv->stats.hw_access_errors++;
+
+ if (int_status & MLXBF_GIGE_INT_STATUS_TX_CHECKSUM_INPUTS) {
+ priv->stats.tx_invalid_checksums++;
+ /* This error condition is latched into MLXBF_GIGE_INT_STATUS
+ * when the GigE silicon operates on the offending
+ * TX WQE. The write to MLXBF_GIGE_INT_STATUS at the bottom
+ * of this routine clears this error condition.
+ */
+ }
+
+ if (int_status & MLXBF_GIGE_INT_STATUS_TX_SMALL_FRAME_SIZE) {
+ priv->stats.tx_small_frames++;
+ /* This condition happens when the networking stack invokes
+ * this driver's "start_xmit()" method with a packet whose
+ * size < 60 bytes. The GigE silicon will automatically pad
+ * this small frame up to a minimum-sized frame before it is
+ * sent. The "tx_small_frame" condition is latched into the
+ * MLXBF_GIGE_INT_STATUS register when the GigE silicon
+ * operates on the offending TX WQE. The write to
+ * MLXBF_GIGE_INT_STATUS at the bottom of this routine
+ * clears this condition.
+ */
+ }
+
+ if (int_status & MLXBF_GIGE_INT_STATUS_TX_PI_CI_EXCEED_WQ_SIZE)
+ priv->stats.tx_index_errors++;
+
+ if (int_status & MLXBF_GIGE_INT_STATUS_SW_CONFIG_ERROR)
+ priv->stats.sw_config_errors++;
+
+ if (int_status & MLXBF_GIGE_INT_STATUS_SW_ACCESS_ERROR)
+ priv->stats.sw_access_errors++;
+
+ /* Clear all error interrupts by writing '1' back to
+ * all the asserted bits in INT_STATUS. Do not write
+ * '1' back to 'receive packet' bit, since that is
+ * managed separately.
+ */
+
+ int_status &= ~MLXBF_GIGE_INT_STATUS_RX_RECEIVE_PACKET;
+
+ writeq(int_status, priv->base + MLXBF_GIGE_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id)
+{
+ struct mlxbf_gige *priv;
+
+ priv = dev_id;
+
+ priv->rx_intr_count++;
+
+ /* NOTE: GigE silicon automatically disables "packet rx" interrupt by
+ * setting MLXBF_GIGE_INT_MASK bit0 upon triggering the interrupt
+ * to the ARM cores. Software needs to re-enable "packet rx"
+ * interrupts by clearing MLXBF_GIGE_INT_MASK bit0.
+ */
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mlxbf_gige_llu_plu_intr(int irq, void *dev_id)
+{
+ struct mlxbf_gige *priv;
+
+ priv = dev_id;
+ priv->llu_plu_intr_count++;
+
+ return IRQ_HANDLED;
+}
+
+int mlxbf_gige_request_irqs(struct mlxbf_gige *priv)
+{
+ int err;
+
+ err = request_irq(priv->error_irq, mlxbf_gige_error_intr, 0,
+ "mlxbf_gige_error", priv);
+ if (err) {
+ dev_err(priv->dev, "Request error_irq failure\n");
+ return err;
+ }
+
+ err = request_irq(priv->rx_irq, mlxbf_gige_rx_intr, 0,
+ "mlxbf_gige_rx", priv);
+ if (err) {
+ dev_err(priv->dev, "Request rx_irq failure\n");
+ goto free_error_irq;
+ }
+
+ err = request_irq(priv->llu_plu_irq, mlxbf_gige_llu_plu_intr, 0,
+ "mlxbf_gige_llu_plu", priv);
+ if (err) {
+ dev_err(priv->dev, "Request llu_plu_irq failure\n");
+ goto free_rx_irq;
+ }
+
+ return 0;
+
+free_rx_irq:
+ free_irq(priv->rx_irq, priv);
+
+free_error_irq:
+ free_irq(priv->error_irq, priv);
+
+ return err;
+}
+
+void mlxbf_gige_free_irqs(struct mlxbf_gige *priv)
+{
+ free_irq(priv->error_irq, priv);
+ free_irq(priv->rx_irq, priv);
+ free_irq(priv->llu_plu_irq, priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
new file mode 100644
index 000000000000..a0a059e0154f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Gigabit Ethernet driver for Mellanox BlueField SoC
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+#define DRV_NAME "mlxbf_gige"
+
+/* Allocate SKB whose payload pointer aligns with the Bluefield
+ * hardware DMA limitation, i.e. DMA operation can't cross
+ * a 4KB boundary. A maximum packet size of 2KB is assumed in the
+ * alignment formula. The alignment logic overallocates an SKB,
+ * and then adjusts the headroom so that the SKB data pointer is
+ * naturally aligned to a 2KB boundary.
+ */
+struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
+ unsigned int map_len,
+ dma_addr_t *buf_dma,
+ enum dma_data_direction dir)
+{
+ struct sk_buff *skb;
+ u64 addr, offset;
+
+ /* Overallocate the SKB so that any headroom adjustment (to
+ * provide 2KB natural alignment) does not exceed payload area
+ */
+ skb = netdev_alloc_skb(priv->netdev, MLXBF_GIGE_DEFAULT_BUF_SZ * 2);
+ if (!skb)
+ return NULL;
+
+ /* Adjust the headroom so that skb->data is naturally aligned to
+ * a 2KB boundary, which is the maximum packet size supported.
+ */
+ addr = (long)skb->data;
+ offset = (addr + MLXBF_GIGE_DEFAULT_BUF_SZ - 1) &
+ ~(MLXBF_GIGE_DEFAULT_BUF_SZ - 1);
+ offset -= addr;
+ if (offset)
+ skb_reserve(skb, offset);
+
+ /* Return streaming DMA mapping to caller */
+ *buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
+ if (dma_mapping_error(priv->dev, *buf_dma)) {
+ dev_kfree_skb(skb);
+ *buf_dma = (dma_addr_t)0;
+ return NULL;
+ }
+
+ return skb;
+}
+
+static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
+{
+ u8 mac[ETH_ALEN];
+ u64 local_mac;
+
+ memset(mac, 0, ETH_ALEN);
+ mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
+ &local_mac);
+ u64_to_ether_addr(local_mac, mac);
+
+ if (is_valid_ether_addr(mac)) {
+ ether_addr_copy(priv->netdev->dev_addr, mac);
+ } else {
+ /* Provide a random MAC if for some reason the device has
+ * not been configured with a valid MAC address already.
+ */
+ eth_hw_addr_random(priv->netdev);
+ }
+
+ local_mac = ether_addr_to_u64(priv->netdev->dev_addr);
+ mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
+ local_mac);
+}
+
+static void mlxbf_gige_cache_stats(struct mlxbf_gige *priv)
+{
+ struct mlxbf_gige_stats *p;
+
+ /* Cache stats that will be cleared by clean port operation */
+ p = &priv->stats;
+ p->rx_din_dropped_pkts += readq(priv->base +
+ MLXBF_GIGE_RX_DIN_DROP_COUNTER);
+ p->rx_filter_passed_pkts += readq(priv->base +
+ MLXBF_GIGE_RX_PASS_COUNTER_ALL);
+ p->rx_filter_discard_pkts += readq(priv->base +
+ MLXBF_GIGE_RX_DISC_COUNTER_ALL);
+}
+
+static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
+{
+ u64 control;
+ u64 temp;
+ int err;
+
+ /* Set the CLEAN_PORT_EN bit to trigger SW reset */
+ control = readq(priv->base + MLXBF_GIGE_CONTROL);
+ control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
+ writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+
+ /* Ensure completion of "clean port" write before polling status */
+ mb();
+
+ err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
+ (temp & MLXBF_GIGE_STATUS_READY),
+ 100, 100000);
+
+ /* Clear the CLEAN_PORT_EN bit at end of this loop */
+ control = readq(priv->base + MLXBF_GIGE_CONTROL);
+ control &= ~MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
+ writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+
+ return err;
+}
+
+static int mlxbf_gige_open(struct net_device *netdev)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u64 int_en;
+ int err;
+
+ err = mlxbf_gige_request_irqs(priv);
+ if (err)
+ return err;
+ mlxbf_gige_cache_stats(priv);
+ err = mlxbf_gige_clean_port(priv);
+ if (err)
+ goto free_irqs;
+ err = mlxbf_gige_rx_init(priv);
+ if (err)
+ goto free_irqs;
+ err = mlxbf_gige_tx_init(priv);
+ if (err)
+ goto rx_deinit;
+
+ phy_start(phydev);
+
+ netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&priv->napi);
+ netif_start_queue(netdev);
+
+ /* Set bits in INT_EN that we care about */
+ int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
+ MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
+ MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE |
+ MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE |
+ MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
+ MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
+ MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
+
+ /* Ensure completion of all initialization before enabling interrupts */
+ mb();
+
+ writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
+
+ return 0;
+
+rx_deinit:
+ mlxbf_gige_rx_deinit(priv);
+
+free_irqs:
+ mlxbf_gige_free_irqs(priv);
+ return err;
+}
+
+static int mlxbf_gige_stop(struct net_device *netdev)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+
+ writeq(0, priv->base + MLXBF_GIGE_INT_EN);
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
+ mlxbf_gige_free_irqs(priv);
+
+ phy_stop(netdev->phydev);
+
+ mlxbf_gige_rx_deinit(priv);
+ mlxbf_gige_tx_deinit(priv);
+ mlxbf_gige_cache_stats(priv);
+ mlxbf_gige_clean_port(priv);
+
+ return 0;
+}
+
+static int mlxbf_gige_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ if (!(netif_running(netdev)))
+ return -EINVAL;
+
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ bool new_promisc_enabled;
+
+ new_promisc_enabled = netdev->flags & IFF_PROMISC;
+
+ /* Only write to the hardware registers if the new setting
+ * of promiscuous mode is different from the current one.
+ */
+ if (new_promisc_enabled != priv->promisc_enabled) {
+ priv->promisc_enabled = new_promisc_enabled;
+
+ if (new_promisc_enabled)
+ mlxbf_gige_enable_promisc(priv);
+ else
+ mlxbf_gige_disable_promisc(priv);
+ }
+}
+
+static void mlxbf_gige_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+
+ netdev_stats_to_stats64(stats, &netdev->stats);
+
+ stats->rx_length_errors = priv->stats.rx_truncate_errors;
+ stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
+ readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
+ stats->rx_crc_errors = priv->stats.rx_mac_errors;
+ stats->rx_errors = stats->rx_length_errors +
+ stats->rx_fifo_errors +
+ stats->rx_crc_errors;
+
+ stats->tx_fifo_errors = priv->stats.tx_fifo_full;
+ stats->tx_errors = stats->tx_fifo_errors;
+}
+
+static const struct net_device_ops mlxbf_gige_netdev_ops = {
+ .ndo_open = mlxbf_gige_open,
+ .ndo_stop = mlxbf_gige_stop,
+ .ndo_start_xmit = mlxbf_gige_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = mlxbf_gige_do_ioctl,
+ .ndo_set_rx_mode = mlxbf_gige_set_rx_mode,
+ .ndo_get_stats64 = mlxbf_gige_get_stats64,
+};
+
+static void mlxbf_gige_adjust_link(struct net_device *netdev)
+{
+ struct phy_device *phydev = netdev->phydev;
+
+ phy_print_status(phydev);
+}
+
+static int mlxbf_gige_probe(struct platform_device *pdev)
+{
+ struct phy_device *phydev;
+ struct net_device *netdev;
+ struct resource *mac_res;
+ struct resource *llu_res;
+ struct resource *plu_res;
+ struct mlxbf_gige *priv;
+ void __iomem *llu_base;
+ void __iomem *plu_base;
+ void __iomem *base;
+ u64 control;
+ int addr;
+ int err;
+
+ mac_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MAC);
+ if (!mac_res)
+ return -ENXIO;
+
+ base = devm_ioremap_resource(&pdev->dev, mac_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ llu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_LLU);
+ if (!llu_res)
+ return -ENXIO;
+
+ llu_base = devm_ioremap_resource(&pdev->dev, llu_res);
+ if (IS_ERR(llu_base))
+ return PTR_ERR(llu_base);
+
+ plu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_PLU);
+ if (!plu_res)
+ return -ENXIO;
+
+ plu_base = devm_ioremap_resource(&pdev->dev, plu_res);
+ if (IS_ERR(plu_base))
+ return PTR_ERR(plu_base);
+
+ /* Perform general init of GigE block */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+ control |= MLXBF_GIGE_CONTROL_PORT_EN;
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+
+ netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->netdev_ops = &mlxbf_gige_netdev_ops;
+ netdev->ethtool_ops = &mlxbf_gige_ethtool_ops;
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+
+ platform_set_drvdata(pdev, priv);
+ priv->dev = &pdev->dev;
+ priv->pdev = pdev;
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->gpio_lock);
+
+ /* Attach MDIO device */
+ err = mlxbf_gige_mdio_probe(pdev, priv);
+ if (err)
+ return err;
+
+ err = mlxbf_gige_gpio_init(pdev, priv);
+ if (err) {
+ dev_err(&pdev->dev, "PHY IRQ initialization failed\n");
+ mlxbf_gige_mdio_remove(priv);
+ return -ENODEV;
+ }
+
+ priv->base = base;
+ priv->llu_base = llu_base;
+ priv->plu_base = plu_base;
+
+ priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
+ priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+
+ /* Write initial MAC address to hardware */
+ mlxbf_gige_initial_mac(priv);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto out;
+ }
+
+ priv->error_irq = platform_get_irq(pdev, MLXBF_GIGE_ERROR_INTR_IDX);
+ priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
+ priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
+
+ phydev = phy_find_first(priv->mdiobus);
+ if (!phydev) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ addr = phydev->mdio.addr;
+ priv->mdiobus->irq[addr] = priv->phy_irq;
+ phydev->irq = priv->phy_irq;
+
+ err = phy_connect_direct(netdev, phydev,
+ mlxbf_gige_adjust_link,
+ PHY_INTERFACE_MODE_GMII);
+ if (err) {
+ dev_err(&pdev->dev, "Could not attach to PHY\n");
+ goto out;
+ }
+
+ /* MAC only supports 1000T full duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+
+ /* Only symmetric pause with flow control enabled is supported so no
+ * need to negotiate pause.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+
+ /* Display information about attached PHY device */
+ phy_attached_info(phydev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ phy_disconnect(phydev);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ mlxbf_gige_gpio_free(priv);
+ mlxbf_gige_mdio_remove(priv);
+ return err;
+}
+
+static int mlxbf_gige_remove(struct platform_device *pdev)
+{
+ struct mlxbf_gige *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->netdev);
+ phy_disconnect(priv->netdev->phydev);
+ mlxbf_gige_gpio_free(priv);
+ mlxbf_gige_mdio_remove(priv);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void mlxbf_gige_shutdown(struct platform_device *pdev)
+{
+ struct mlxbf_gige *priv = platform_get_drvdata(pdev);
+
+ writeq(0, priv->base + MLXBF_GIGE_INT_EN);
+ mlxbf_gige_clean_port(priv);
+}
+
+static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
+ { "MLNXBF17", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
+
+static struct platform_driver mlxbf_gige_driver = {
+ .probe = mlxbf_gige_probe,
+ .remove = mlxbf_gige_remove,
+ .shutdown = mlxbf_gige_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
+ },
+};
+
+module_platform_driver(mlxbf_gige_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField SoC Gigabit Ethernet Driver");
+MODULE_AUTHOR("David Thompson <davthompson@nvidia.com>");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
new file mode 100644
index 000000000000..e32dd34fdcc0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* MDIO support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "mlxbf_gige.h"
+
+#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
+#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
+
+/* Support clause 22 */
+#define MLXBF_GIGE_MDIO_CL22_ST1 0x1
+#define MLXBF_GIGE_MDIO_CL22_WRITE 0x1
+#define MLXBF_GIGE_MDIO_CL22_READ 0x2
+
+/* Busy bit is set by software and cleared by hardware */
+#define MLXBF_GIGE_MDIO_SET_BUSY 0x1
+
+/* MDIO GW register bits */
+#define MLXBF_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0)
+#define MLXBF_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16)
+#define MLXBF_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
+#define MLXBF_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
+#define MLXBF_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28)
+#define MLXBF_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
+
+/* MDIO config register bits */
+#define MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
+#define MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4)
+#define MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
+
+/* Formula for encoding the MDIO period. The encoded value is
+ * passed to the MDIO config register.
+ *
+ * mdc_clk = 2*(val + 1)*i1clk
+ *
+ * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
+ *
+ * val = (((400 * 430 / 1000) / 2) - 1)
+ */
+#define MLXBF_GIGE_I1CLK_MHZ 430
+#define MLXBF_GIGE_MDC_CLK_NS 400
+
+#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
+
+#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
+ MLXBF_GIGE_MDIO_PERIOD) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
+ int phy_reg, u32 opcode)
+{
+ u32 gw_reg = 0;
+
+ gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_AD_MASK, data);
+ gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_DEVAD_MASK, phy_reg);
+ gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_PARTAD_MASK, phy_add);
+ gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_OPCODE_MASK, opcode);
+ gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_ST1_MASK,
+ MLXBF_GIGE_MDIO_CL22_ST1);
+ gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_BUSY_MASK,
+ MLXBF_GIGE_MDIO_SET_BUSY);
+
+ return gw_reg;
+}
+
+static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
+{
+ struct mlxbf_gige *priv = bus->priv;
+ u32 cmd;
+ int ret;
+ u32 val;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ /* Send mdio read request */
+ cmd = mlxbf_gige_mdio_create_cmd(0, phy_add, phy_reg, MLXBF_GIGE_MDIO_CL22_READ);
+
+ writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
+ ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
+ val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
+
+ if (ret) {
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ return ret;
+ }
+
+ ret = readl(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ /* Only return ad bits of the gw register */
+ ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+
+ return ret;
+}
+
+static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
+ int phy_reg, u16 val)
+{
+ struct mlxbf_gige *priv = bus->priv;
+ u32 cmd;
+ int ret;
+ u32 temp;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ /* Send mdio write request */
+ cmd = mlxbf_gige_mdio_create_cmd(val, phy_add, phy_reg,
+ MLXBF_GIGE_MDIO_CL22_WRITE);
+ writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
+ /* If the poll timed out, drop the request */
+ ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
+ temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
+
+ return ret;
+}
+
+int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MDIO9);
+ if (!res)
+ return -ENODEV;
+
+ priv->mdio_io = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->mdio_io))
+ return PTR_ERR(priv->mdio_io);
+
+ /* Configure mdio parameters */
+ writel(MLXBF_GIGE_MDIO_CFG_VAL,
+ priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+
+ priv->mdiobus = devm_mdiobus_alloc(dev);
+ if (!priv->mdiobus) {
+ dev_err(dev, "Failed to alloc MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ priv->mdiobus->name = "mlxbf-mdio";
+ priv->mdiobus->read = mlxbf_gige_mdio_read;
+ priv->mdiobus->write = mlxbf_gige_mdio_write;
+ priv->mdiobus->parent = dev;
+ priv->mdiobus->priv = priv;
+ snprintf(priv->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+ dev_name(dev));
+
+ ret = mdiobus_register(priv->mdiobus);
+ if (ret)
+ dev_err(dev, "Failed to register MDIO bus\n");
+
+ return ret;
+}
+
+void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv)
+{
+ mdiobus_unregister(priv->mdiobus);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
new file mode 100644
index 000000000000..5fb33c9294bf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* Header file for Mellanox BlueField GigE register defines
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#ifndef __MLXBF_GIGE_REGS_H__
+#define __MLXBF_GIGE_REGS_H__
+
+#define MLXBF_GIGE_STATUS 0x0010
+#define MLXBF_GIGE_STATUS_READY BIT(0)
+#define MLXBF_GIGE_INT_STATUS 0x0028
+#define MLXBF_GIGE_INT_STATUS_RX_RECEIVE_PACKET BIT(0)
+#define MLXBF_GIGE_INT_STATUS_RX_MAC_ERROR BIT(1)
+#define MLXBF_GIGE_INT_STATUS_RX_TRN_ERROR BIT(2)
+#define MLXBF_GIGE_INT_STATUS_SW_ACCESS_ERROR BIT(3)
+#define MLXBF_GIGE_INT_STATUS_SW_CONFIG_ERROR BIT(4)
+#define MLXBF_GIGE_INT_STATUS_TX_PI_CI_EXCEED_WQ_SIZE BIT(5)
+#define MLXBF_GIGE_INT_STATUS_TX_SMALL_FRAME_SIZE BIT(6)
+#define MLXBF_GIGE_INT_STATUS_TX_CHECKSUM_INPUTS BIT(7)
+#define MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR BIT(8)
+#define MLXBF_GIGE_INT_EN 0x0030
+#define MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET BIT(0)
+#define MLXBF_GIGE_INT_EN_RX_MAC_ERROR BIT(1)
+#define MLXBF_GIGE_INT_EN_RX_TRN_ERROR BIT(2)
+#define MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR BIT(3)
+#define MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR BIT(4)
+#define MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE BIT(5)
+#define MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE BIT(6)
+#define MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS BIT(7)
+#define MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR BIT(8)
+#define MLXBF_GIGE_INT_MASK 0x0038
+#define MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET BIT(0)
+#define MLXBF_GIGE_CONTROL 0x0040
+#define MLXBF_GIGE_CONTROL_PORT_EN BIT(0)
+#define MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN BIT(1)
+#define MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC BIT(4)
+#define MLXBF_GIGE_CONTROL_CLEAN_PORT_EN BIT(31)
+#define MLXBF_GIGE_RX_WQ_BASE 0x0200
+#define MLXBF_GIGE_RX_WQE_SIZE_LOG2 0x0208
+#define MLXBF_GIGE_RX_WQE_SIZE_LOG2_RESET_VAL 7
+#define MLXBF_GIGE_RX_CQ_BASE 0x0210
+#define MLXBF_GIGE_TX_WQ_BASE 0x0218
+#define MLXBF_GIGE_TX_WQ_SIZE_LOG2 0x0220
+#define MLXBF_GIGE_TX_WQ_SIZE_LOG2_RESET_VAL 7
+#define MLXBF_GIGE_TX_CI_UPDATE_ADDRESS 0x0228
+#define MLXBF_GIGE_RX_WQE_PI 0x0230
+#define MLXBF_GIGE_TX_PRODUCER_INDEX 0x0238
+#define MLXBF_GIGE_RX_MAC_FILTER 0x0240
+#define MLXBF_GIGE_RX_MAC_FILTER_STRIDE 0x0008
+#define MLXBF_GIGE_RX_DIN_DROP_COUNTER 0x0260
+#define MLXBF_GIGE_TX_CONSUMER_INDEX 0x0310
+#define MLXBF_GIGE_TX_CONTROL 0x0318
+#define MLXBF_GIGE_TX_CONTROL_GRACEFUL_STOP BIT(0)
+#define MLXBF_GIGE_TX_STATUS 0x0388
+#define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1)
+#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520
+#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0)
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN BIT(0)
+#define MLXBF_GIGE_RX_PASS_COUNTER_ALL 0x0550
+#define MLXBF_GIGE_RX_DISC_COUNTER_ALL 0x0560
+#define MLXBF_GIGE_RX 0x0578
+#define MLXBF_GIGE_RX_STRIP_CRC_EN BIT(1)
+#define MLXBF_GIGE_RX_DMA 0x0580
+#define MLXBF_GIGE_RX_DMA_EN BIT(0)
+#define MLXBF_GIGE_RX_CQE_PACKET_CI 0x05b0
+#define MLXBF_GIGE_MAC_CFG 0x05e8
+
+/* NOTE: MLXBF_GIGE_MAC_CFG is the last defined register offset,
+ * so use that plus size of single register to derive total size
+ */
+#define MLXBF_GIGE_MMIO_REG_SZ (MLXBF_GIGE_MAC_CFG + 8)
+
+#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
new file mode 100644
index 000000000000..afa3b92a6905
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Packet receive logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 dmac)
+{
+ void __iomem *base = priv->base;
+ u64 control;
+
+ /* Write destination MAC to specified MAC RX filter */
+ writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+ (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+
+ /* Enable MAC receive filter mask for specified index */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+ control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+}
+
+void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 *dmac)
+{
+ void __iomem *base = priv->base;
+
+ /* Read destination MAC from specified MAC RX filter */
+ *dmac = readq(base + MLXBF_GIGE_RX_MAC_FILTER +
+ (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+}
+
+void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv)
+{
+ void __iomem *base = priv->base;
+ u64 control;
+ u64 end_mac;
+
+ /* Enable MAC_ID_RANGE match functionality */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+ control |= MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+
+ /* Set start of destination MAC range check to 0 */
+ writeq(0, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START);
+
+ /* Set end of destination MAC range check to all FFs */
+ end_mac = BCAST_MAC_ADDR;
+ writeq(end_mac, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END);
+}
+
+void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv)
+{
+ void __iomem *base = priv->base;
+ u64 control;
+
+ /* Disable MAC_ID_RANGE match functionality */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+ control &= ~MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+
+ /* NOTE: no need to change DMAC_RANGE_START or END;
+ * those values are ignored since MAC_ID_RANGE_EN=0
+ */
+}
+
+/* Receive Initialization
+ * 1) Configures RX MAC filters via MMIO registers
+ * 2) Allocates RX WQE array using coherent DMA mapping
+ * 3) Initializes each element of RX WQE array with a receive
+ * buffer pointer (also using coherent DMA mapping)
+ * 4) Allocates RX CQE array using coherent DMA mapping
+ * 5) Completes other misc receive initialization
+ */
+int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+{
+ size_t wq_size, cq_size;
+ dma_addr_t *rx_wqe_ptr;
+ dma_addr_t rx_buf_dma;
+ u64 data;
+ int i, j;
+
+ /* Configure MAC RX filter #0 to allow RX of broadcast pkts */
+ mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX,
+ BCAST_MAC_ADDR);
+
+ wq_size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
+ priv->rx_wqe_base = dma_alloc_coherent(priv->dev, wq_size,
+ &priv->rx_wqe_base_dma,
+ GFP_KERNEL);
+ if (!priv->rx_wqe_base)
+ return -ENOMEM;
+
+ /* Initialize 'rx_wqe_ptr' to point to first RX WQE in array
+ * Each RX WQE is simply a receive buffer pointer, so walk
+ * the entire array, allocating a 2KB buffer for each element
+ */
+ rx_wqe_ptr = priv->rx_wqe_base;
+
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
+ &rx_buf_dma, DMA_FROM_DEVICE);
+ if (!priv->rx_skb[i])
+ goto free_wqe_and_skb;
+ *rx_wqe_ptr++ = rx_buf_dma;
+ }
+
+ /* Write RX WQE base address into MMIO reg */
+ writeq(priv->rx_wqe_base_dma, priv->base + MLXBF_GIGE_RX_WQ_BASE);
+
+ cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
+ priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size,
+ &priv->rx_cqe_base_dma,
+ GFP_KERNEL);
+ if (!priv->rx_cqe_base)
+ goto free_wqe_and_skb;
+
+ for (i = 0; i < priv->rx_q_entries; i++)
+ priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
+
+ /* Write RX CQE base address into MMIO reg */
+ writeq(priv->rx_cqe_base_dma, priv->base + MLXBF_GIGE_RX_CQ_BASE);
+
+ /* Write RX_WQE_PI with current number of replenished buffers */
+ writeq(priv->rx_q_entries, priv->base + MLXBF_GIGE_RX_WQE_PI);
+
+ /* Enable removal of CRC during RX */
+ data = readq(priv->base + MLXBF_GIGE_RX);
+ data |= MLXBF_GIGE_RX_STRIP_CRC_EN;
+ writeq(data, priv->base + MLXBF_GIGE_RX);
+
+ /* Enable RX MAC filter pass and discard counters */
+ writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN,
+ priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC);
+ writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
+ priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
+
+ /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+ * indicate readiness to receive interrupts
+ */
+ data = readq(priv->base + MLXBF_GIGE_INT_MASK);
+ data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
+ writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
+
+ /* Enable RX DMA to write new packets to memory */
+ data = readq(priv->base + MLXBF_GIGE_RX_DMA);
+ data |= MLXBF_GIGE_RX_DMA_EN;
+ writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+
+ writeq(ilog2(priv->rx_q_entries),
+ priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
+
+ return 0;
+
+free_wqe_and_skb:
+ rx_wqe_ptr = priv->rx_wqe_base;
+ for (j = 0; j < i; j++) {
+ dma_unmap_single(priv->dev, *rx_wqe_ptr,
+ MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+ dev_kfree_skb(priv->rx_skb[j]);
+ rx_wqe_ptr++;
+ }
+ dma_free_coherent(priv->dev, wq_size,
+ priv->rx_wqe_base, priv->rx_wqe_base_dma);
+ return -ENOMEM;
+}
+
+/* Receive Deinitialization
+ * This routine will free allocations done by mlxbf_gige_rx_init(),
+ * namely the RX WQE and RX CQE arrays, as well as all RX buffers
+ */
+void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
+{
+ dma_addr_t *rx_wqe_ptr;
+ size_t size;
+ u64 data;
+ int i;
+
+ /* Disable RX DMA to prevent packet transfers to memory */
+ data = readq(priv->base + MLXBF_GIGE_RX_DMA);
+ data &= ~MLXBF_GIGE_RX_DMA_EN;
+ writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+
+ rx_wqe_ptr = priv->rx_wqe_base;
+
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ dma_unmap_single(priv->dev, *rx_wqe_ptr, MLXBF_GIGE_DEFAULT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(priv->rx_skb[i]);
+ rx_wqe_ptr++;
+ }
+
+ size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
+ dma_free_coherent(priv->dev, size,
+ priv->rx_wqe_base, priv->rx_wqe_base_dma);
+
+ size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
+ dma_free_coherent(priv->dev, size,
+ priv->rx_cqe_base, priv->rx_cqe_base_dma);
+
+ priv->rx_wqe_base = NULL;
+ priv->rx_wqe_base_dma = 0;
+ priv->rx_cqe_base = NULL;
+ priv->rx_cqe_base_dma = 0;
+ writeq(0, priv->base + MLXBF_GIGE_RX_WQ_BASE);
+ writeq(0, priv->base + MLXBF_GIGE_RX_CQ_BASE);
+}
+
+static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+{
+ struct net_device *netdev = priv->netdev;
+ struct sk_buff *skb = NULL, *rx_skb;
+ u16 rx_pi_rem, rx_ci_rem;
+ dma_addr_t *rx_wqe_addr;
+ dma_addr_t rx_buf_dma;
+ u64 *rx_cqe_addr;
+ u64 datalen;
+ u64 rx_cqe;
+ u16 rx_ci;
+ u16 rx_pi;
+
+ /* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
+ rx_pi = readq(priv->base + MLXBF_GIGE_RX_WQE_PI);
+ rx_pi_rem = rx_pi % priv->rx_q_entries;
+
+ rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
+ rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
+ rx_cqe = *rx_cqe_addr;
+
+ if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
+ return false;
+
+ if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
+ /* Packet is OK, increment stats */
+ datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += datalen;
+
+ skb = priv->rx_skb[rx_pi_rem];
+
+ skb_put(skb, datalen);
+
+ skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* Alloc another RX SKB for this same index */
+ rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
+ &rx_buf_dma, DMA_FROM_DEVICE);
+ if (!rx_skb)
+ return false;
+ priv->rx_skb[rx_pi_rem] = rx_skb;
+ dma_unmap_single(priv->dev, *rx_wqe_addr,
+ MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+ *rx_wqe_addr = rx_buf_dma;
+ } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
+ priv->stats.rx_mac_errors++;
+ } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
+ priv->stats.rx_truncate_errors++;
+ }
+
+ /* Let hardware know we've replenished one buffer */
+ rx_pi++;
+
+ /* Ensure completion of all writes before notifying HW of replenish */
+ wmb();
+ writeq(rx_pi, priv->base + MLXBF_GIGE_RX_WQE_PI);
+
+ (*rx_pkts)++;
+
+ rx_pi_rem = rx_pi % priv->rx_q_entries;
+ if (rx_pi_rem == 0)
+ priv->valid_polarity ^= 1;
+ rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
+ rx_ci_rem = rx_ci % priv->rx_q_entries;
+
+ if (skb)
+ netif_receive_skb(skb);
+
+ return rx_pi_rem != rx_ci_rem;
+}
+
+/* Driver poll() function called by NAPI infrastructure */
+int mlxbf_gige_poll(struct napi_struct *napi, int budget)
+{
+ struct mlxbf_gige *priv;
+ bool remaining_pkts;
+ int work_done = 0;
+ u64 data;
+
+ priv = container_of(napi, struct mlxbf_gige, napi);
+
+ mlxbf_gige_handle_tx_complete(priv);
+
+ do {
+ remaining_pkts = mlxbf_gige_rx_packet(priv, &work_done);
+ } while (remaining_pkts && work_done < budget);
+
+ /* If amount of work done < budget, turn off NAPI polling
+ * via napi_complete_done(napi, work_done) and then
+ * re-enable interrupts.
+ */
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+ * indicate receive readiness
+ */
+ data = readq(priv->base + MLXBF_GIGE_INT_MASK);
+ data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
+ writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
new file mode 100644
index 000000000000..04982e888c63
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Packet transmit logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+/* Transmit Initialization
+ * 1) Allocates TX WQE array using coherent DMA mapping
+ * 2) Allocates TX completion counter using coherent DMA mapping
+ */
+int mlxbf_gige_tx_init(struct mlxbf_gige *priv)
+{
+ size_t size;
+
+ size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
+ priv->tx_wqe_base = dma_alloc_coherent(priv->dev, size,
+ &priv->tx_wqe_base_dma,
+ GFP_KERNEL);
+ if (!priv->tx_wqe_base)
+ return -ENOMEM;
+
+ priv->tx_wqe_next = priv->tx_wqe_base;
+
+ /* Write TX WQE base address into MMIO reg */
+ writeq(priv->tx_wqe_base_dma, priv->base + MLXBF_GIGE_TX_WQ_BASE);
+
+ /* Allocate address for TX completion count */
+ priv->tx_cc = dma_alloc_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
+ &priv->tx_cc_dma, GFP_KERNEL);
+ if (!priv->tx_cc) {
+ dma_free_coherent(priv->dev, size,
+ priv->tx_wqe_base, priv->tx_wqe_base_dma);
+ return -ENOMEM;
+ }
+
+ /* Write TX CC base address into MMIO reg */
+ writeq(priv->tx_cc_dma, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
+
+ writeq(ilog2(priv->tx_q_entries),
+ priv->base + MLXBF_GIGE_TX_WQ_SIZE_LOG2);
+
+ priv->prev_tx_ci = 0;
+ priv->tx_pi = 0;
+
+ return 0;
+}
+
+/* Transmit Deinitialization
+ * This routine will free allocations done by mlxbf_gige_tx_init(),
+ * namely the TX WQE array and the TX completion counter
+ */
+void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv)
+{
+ u64 *tx_wqe_addr;
+ size_t size;
+ int i;
+
+ tx_wqe_addr = priv->tx_wqe_base;
+
+ for (i = 0; i < priv->tx_q_entries; i++) {
+ if (priv->tx_skb[i]) {
+ dma_unmap_single(priv->dev, *tx_wqe_addr,
+ priv->tx_skb[i]->len, DMA_TO_DEVICE);
+ dev_kfree_skb(priv->tx_skb[i]);
+ priv->tx_skb[i] = NULL;
+ }
+ tx_wqe_addr += 2;
+ }
+
+ size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
+ dma_free_coherent(priv->dev, size,
+ priv->tx_wqe_base, priv->tx_wqe_base_dma);
+
+ dma_free_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
+ priv->tx_cc, priv->tx_cc_dma);
+
+ priv->tx_wqe_base = NULL;
+ priv->tx_wqe_base_dma = 0;
+ priv->tx_cc = NULL;
+ priv->tx_cc_dma = 0;
+ priv->tx_wqe_next = NULL;
+ writeq(0, priv->base + MLXBF_GIGE_TX_WQ_BASE);
+ writeq(0, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
+}
+
+/* Function that returns status of TX ring:
+ * 0: TX ring is full, i.e. there are no
+ * available un-used entries in TX ring.
+ * non-null: TX ring is not full, i.e. there are
+ * some available entries in TX ring.
+ * The non-null value is a measure of
+ * how many TX entries are available, but
+ * it is not the exact number of available
+ * entries (see below).
+ *
+ * The algorithm makes the assumption that if
+ * (prev_tx_ci == tx_pi) then the TX ring is empty.
+ * An empty ring actually has (tx_q_entries-1)
+ * entries, which allows the algorithm to differentiate
+ * the case of an empty ring vs. a full ring.
+ */
+static u16 mlxbf_gige_tx_buffs_avail(struct mlxbf_gige *priv)
+{
+ unsigned long flags;
+ u16 avail;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (priv->prev_tx_ci == priv->tx_pi)
+ avail = priv->tx_q_entries - 1;
+ else
+ avail = ((priv->tx_q_entries + priv->prev_tx_ci - priv->tx_pi)
+ % priv->tx_q_entries) - 1;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return avail;
+}
+
+bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
+{
+ struct net_device_stats *stats;
+ u16 tx_wqe_index;
+ u64 *tx_wqe_addr;
+ u64 tx_status;
+ u16 tx_ci;
+
+ tx_status = readq(priv->base + MLXBF_GIGE_TX_STATUS);
+ if (tx_status & MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL)
+ priv->stats.tx_fifo_full++;
+ tx_ci = readq(priv->base + MLXBF_GIGE_TX_CONSUMER_INDEX);
+ stats = &priv->netdev->stats;
+
+ /* Transmit completion logic needs to loop until the completion
+ * index (in SW) equals TX consumer index (from HW). These
+ * parameters are unsigned 16-bit values and the wrap case needs
+ * to be supported, that is TX consumer index wrapped from 0xFFFF
+ * to 0 while TX completion index is still < 0xFFFF.
+ */
+ for (; priv->prev_tx_ci != tx_ci; priv->prev_tx_ci++) {
+ tx_wqe_index = priv->prev_tx_ci % priv->tx_q_entries;
+ /* Each TX WQE is 16 bytes. The 8 MSB store the 2KB TX
+ * buffer address and the 8 LSB contain information
+ * about the TX WQE.
+ */
+ tx_wqe_addr = priv->tx_wqe_base +
+ (tx_wqe_index * MLXBF_GIGE_TX_WQE_SZ_QWORDS);
+
+ stats->tx_packets++;
+ stats->tx_bytes += MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr);
+
+ dma_unmap_single(priv->dev, *tx_wqe_addr,
+ priv->tx_skb[tx_wqe_index]->len, DMA_TO_DEVICE);
+ dev_consume_skb_any(priv->tx_skb[tx_wqe_index]);
+ priv->tx_skb[tx_wqe_index] = NULL;
+
+ /* Ensure completion of updates across all cores */
+ mb();
+ }
+
+ /* Since the TX ring was likely just drained, check if TX queue
+ * had previously been stopped and now that there are TX buffers
+ * available the TX queue can be awakened.
+ */
+ if (netif_queue_stopped(priv->netdev) &&
+ mlxbf_gige_tx_buffs_avail(priv))
+ netif_wake_queue(priv->netdev);
+
+ return true;
+}
+
+/* Function to advance the tx_wqe_next pointer to next TX WQE */
+void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv)
+{
+ /* Advance tx_wqe_next pointer */
+ priv->tx_wqe_next += MLXBF_GIGE_TX_WQE_SZ_QWORDS;
+
+ /* Check if 'next' pointer is beyond end of TX ring */
+ /* If so, set 'next' back to 'base' pointer of ring */
+ if (priv->tx_wqe_next == (priv->tx_wqe_base +
+ (priv->tx_q_entries * MLXBF_GIGE_TX_WQE_SZ_QWORDS)))
+ priv->tx_wqe_next = priv->tx_wqe_base;
+}
+
+netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ long buff_addr, start_dma_page, end_dma_page;
+ struct sk_buff *tx_skb;
+ dma_addr_t tx_buf_dma;
+ unsigned long flags;
+ u64 *tx_wqe_addr;
+ u64 word2;
+
+ /* If needed, linearize TX SKB as hardware DMA expects this */
+ if (skb->len > MLXBF_GIGE_DEFAULT_BUF_SZ || skb_linearize(skb)) {
+ dev_kfree_skb(skb);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ buff_addr = (long)skb->data;
+ start_dma_page = buff_addr >> MLXBF_GIGE_DMA_PAGE_SHIFT;
+ end_dma_page = (buff_addr + skb->len - 1) >> MLXBF_GIGE_DMA_PAGE_SHIFT;
+
+ /* Verify that payload pointer and data length of SKB to be
+ * transmitted does not violate the hardware DMA limitation.
+ */
+ if (start_dma_page != end_dma_page) {
+ /* DMA operation would fail as-is, alloc new aligned SKB */
+ tx_skb = mlxbf_gige_alloc_skb(priv, skb->len,
+ &tx_buf_dma, DMA_TO_DEVICE);
+ if (!tx_skb) {
+ /* Free original skb, could not alloc new aligned SKB */
+ dev_kfree_skb(skb);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ skb_put_data(tx_skb, skb->data, skb->len);
+
+ /* Free the original SKB */
+ dev_kfree_skb(skb);
+ } else {
+ tx_skb = skb;
+ tx_buf_dma = dma_map_single(priv->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, tx_buf_dma)) {
+ dev_kfree_skb(skb);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* Get address of TX WQE */
+ tx_wqe_addr = priv->tx_wqe_next;
+
+ mlxbf_gige_update_tx_wqe_next(priv);
+
+ /* Put PA of buffer address into first 64-bit word of TX WQE */
+ *tx_wqe_addr = tx_buf_dma;
+
+ /* Set TX WQE pkt_len appropriately
+ * NOTE: GigE silicon will automatically pad up to
+ * minimum packet length if needed.
+ */
+ word2 = tx_skb->len & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK;
+
+ /* Write entire 2nd word of TX WQE */
+ *(tx_wqe_addr + 1) = word2;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
+ priv->tx_pi++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!netdev_xmit_more()) {
+ /* Create memory barrier before write to TX PI */
+ wmb();
+ writeq(priv->tx_pi, priv->base + MLXBF_GIGE_TX_PRODUCER_INDEX);
+ }
+
+ /* Check if the last TX entry was just used */
+ if (!mlxbf_gige_tx_buffs_avail(priv)) {
+ /* TX ring is full, inform stack */
+ netif_stop_queue(netdev);
+
+ /* Since there is no separate "TX complete" interrupt, need
+ * to explicitly schedule NAPI poll. This will trigger logic
+ * which processes TX completions, and will hopefully drain
+ * the TX ring allowing the TX queue to be awakened.
+ */
+ napi_schedule(&priv->napi);
+ }
+
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index a619d90559f7..12871c8dc7c1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -49,28 +49,6 @@ config MLXSW_I2C
To compile this driver as a module, choose M here: the
module will be called mlxsw_i2c.
-config MLXSW_SWITCHIB
- tristate "Mellanox Technologies SwitchIB and SwitchIB-2 support"
- depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV
- default m
- help
- This driver supports Mellanox Technologies SwitchIB and SwitchIB-2
- Infiniband Switch ASICs.
-
- To compile this driver as a module, choose M here: the
- module will be called mlxsw_switchib.
-
-config MLXSW_SWITCHX2
- tristate "Mellanox Technologies SwitchX-2 support"
- depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV
- default m
- help
- This driver supports Mellanox Technologies SwitchX-2 Ethernet
- Switch ASICs.
-
- To compile this driver as a module, choose M here: the
- module will be called mlxsw_switchx2.
-
config MLXSW_SPECTRUM
tristate "Mellanox Technologies Spectrum family support"
depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index f545fd2c5896..196adeb33495 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -8,10 +8,6 @@ obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
mlxsw_pci-objs := pci.o
obj-$(CONFIG_MLXSW_I2C) += mlxsw_i2c.o
mlxsw_i2c-objs := i2c.o
-obj-$(CONFIG_MLXSW_SWITCHIB) += mlxsw_switchib.o
-mlxsw_switchib-objs := switchib.o
-obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
-mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 7e9a7cb31720..e775f08fb464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -630,7 +630,7 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb;
int err;
- skb = skb_copy(trans->tx_skb, GFP_KERNEL);
+ skb = skb_clone(trans->tx_skb, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -1444,7 +1444,9 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
if (err)
return err;
- err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ fw_info_psid);
if (err)
return err;
@@ -1453,7 +1455,9 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
if (err)
return err;
- return 0;
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index dd26865bd587..3713c45cfa1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/err.h>
+#include <linux/ethtool.h>
#include <linux/sfp.h>
#include "core.h"
@@ -25,8 +26,8 @@ struct mlxsw_env {
static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
bool *qsfp, bool *cmis)
{
- char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char *eeprom_tmp;
u8 ident;
int err;
@@ -35,7 +36,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
if (err)
return err;
- mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+ eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
ident = eeprom_tmp[0];
*cmis = false;
switch (ident) {
@@ -63,8 +64,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
u16 offset, u16 size, void *data,
bool qsfp, unsigned int *p_read_size)
{
- char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char *eeprom_tmp;
u16 i2c_addr;
u8 page = 0;
int status;
@@ -115,7 +116,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
if (status)
return -EIO;
- mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+ eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
memcpy(data, eeprom_tmp, size);
*p_read_size = size;
@@ -125,14 +126,14 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int off, int *temp)
{
- char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
+ unsigned int module_temp, module_crit, module_emerg;
union {
u8 buf[MLXSW_REG_MCIA_TH_ITEM_SIZE];
u16 temp;
} temp_thresh;
char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
char mtmp_pl[MLXSW_REG_MTMP_LEN];
- unsigned int module_temp;
+ char *eeprom_tmp;
bool qsfp, cmis;
int page;
int err;
@@ -142,12 +143,21 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
- mlxsw_reg_mtmp_unpack(mtmp_pl, &module_temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &module_temp, NULL, &module_crit,
+ &module_emerg, NULL);
if (!module_temp) {
*temp = 0;
return 0;
}
+ /* Validate if threshold reading is available through MTMP register,
+ * otherwise fallback to read through MCIA.
+ */
+ if (module_emerg) {
+ *temp = off == SFP_TEMP_HIGH_WARN ? module_crit : module_emerg;
+ return 0;
+ }
+
/* Read Free Side Device Temperature Thresholds from page 03h
* (MSB at lower byte address).
* Bytes:
@@ -185,7 +195,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
if (err)
return err;
- mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+ eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
memcpy(temp_thresh.buf, eeprom_tmp, MLXSW_REG_MCIA_TH_ITEM_SIZE);
*temp = temp_thresh.temp * 1000;
@@ -306,6 +316,79 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom);
+static int mlxsw_env_mcia_status_process(const char *mcia_pl,
+ struct netlink_ext_ack *extack)
+{
+ u8 status = mlxsw_reg_mcia_status_get(mcia_pl);
+
+ switch (status) {
+ case MLXSW_REG_MCIA_STATUS_GOOD:
+ return 0;
+ case MLXSW_REG_MCIA_STATUS_NO_EEPROM_MODULE:
+ NL_SET_ERR_MSG_MOD(extack, "No response from module's EEPROM");
+ return -EIO;
+ case MLXSW_REG_MCIA_STATUS_MODULE_NOT_SUPPORTED:
+ NL_SET_ERR_MSG_MOD(extack, "Module type not supported by the device");
+ return -EOPNOTSUPP;
+ case MLXSW_REG_MCIA_STATUS_MODULE_NOT_CONNECTED:
+ NL_SET_ERR_MSG_MOD(extack, "No module present indication");
+ return -EIO;
+ case MLXSW_REG_MCIA_STATUS_I2C_ERROR:
+ NL_SET_ERR_MSG_MOD(extack, "Error occurred while trying to access module's EEPROM using I2C");
+ return -EIO;
+ case MLXSW_REG_MCIA_STATUS_MODULE_DISABLED:
+ NL_SET_ERR_MSG_MOD(extack, "Module is disabled");
+ return -EIO;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown error");
+ return -EIO;
+ }
+}
+
+int
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ u32 bytes_read = 0;
+ u16 device_addr;
+
+ /* Offset cannot be larger than 2 * ETH_MODULE_EEPROM_PAGE_LEN */
+ device_addr = page->offset;
+
+ while (bytes_read < page->length) {
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char *eeprom_tmp;
+ u8 size;
+ int err;
+
+ size = min_t(u8, page->length - bytes_read,
+ MLXSW_REG_MCIA_EEPROM_SIZE);
+
+ mlxsw_reg_mcia_pack(mcia_pl, module, 0, page->page,
+ device_addr + bytes_read, size,
+ page->i2c_address);
+ mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
+
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM");
+ return err;
+ }
+
+ err = mlxsw_env_mcia_status_process(mcia_pl, extack);
+ if (err)
+ return err;
+
+ eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
+ memcpy(page->data + bytes_read, eeprom_tmp, size);
+ bytes_read += size;
+ }
+
+ return bytes_read;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
u8 module,
bool *p_has_temp_sensor)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 2b23f8a87862..0bf5bd0f8a7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -4,6 +4,8 @@
#ifndef _MLXSW_CORE_ENV_H
#define _MLXSW_CORE_ENV_H
+#include <linux/ethtool.h>
+
struct ethtool_modinfo;
struct ethtool_eeprom;
@@ -18,6 +20,11 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data);
int
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+
+int
mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
u64 *p_counter);
int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 2196c946698a..d41afdfbd085 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -72,7 +72,7 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
return err;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
return sprintf(buf, "%d\n", temp);
}
@@ -95,7 +95,7 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
return err;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL, NULL, NULL);
return sprintf(buf, "%d\n", temp_max);
}
@@ -239,7 +239,7 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev,
dev_err(dev, "Failed to query module temperature\n");
return err;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL, NULL, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index dfea14399607..0998dcc9cac0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -149,22 +149,27 @@ mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
static int
mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal_module *tz)
+ struct mlxsw_thermal_module *tz,
+ int crit_temp, int emerg_temp)
{
- int crit_temp, emerg_temp;
int err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
- SFP_TEMP_HIGH_WARN,
- &crit_temp);
- if (err)
- return err;
+ /* Do not try to query temperature thresholds directly from the module's
+ * EEPROM if we got valid thresholds from MTMP.
+ */
+ if (!emerg_temp || !crit_temp) {
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ SFP_TEMP_HIGH_WARN,
+ &crit_temp);
+ if (err)
+ return err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
- SFP_TEMP_HIGH_ALARM,
- &emerg_temp);
- if (err)
- return err;
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ SFP_TEMP_HIGH_ALARM,
+ &emerg_temp);
+ if (err)
+ return err;
+ }
if (crit_temp > emerg_temp) {
dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
@@ -281,7 +286,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
dev_err(dev, "Failed to query temp sensor\n");
return err;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
if (temp > 0)
mlxsw_thermal_tz_score_update(thermal, tzdev, thermal->trips,
temp);
@@ -420,36 +425,57 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
return err;
}
-static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
- int *p_temp)
+static void
+mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core,
+ u16 sensor_index, int *p_temp,
+ int *p_crit_temp,
+ int *p_emerg_temp)
{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
- struct mlxsw_thermal *thermal = tz->parent;
- struct device *dev = thermal->bus_info->dev;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
- int temp;
int err;
- /* Read module temperature. */
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN +
- tz->module, false, false);
- err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
+ /* Read module temperature and thresholds. */
+ mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false);
+ err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
- /* Do not return error - in case of broken module's sensor
- * it will cause error message flooding.
+ /* Set temperature and thresholds to zero to avoid passing
+ * uninitialized data back to the caller.
*/
- temp = 0;
- *p_temp = (int) temp;
- return 0;
+ *p_temp = 0;
+ *p_crit_temp = 0;
+ *p_emerg_temp = 0;
+
+ return;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, p_crit_temp, p_emerg_temp,
+ NULL);
+}
+
+static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ int temp, crit_temp, emerg_temp;
+ struct device *dev;
+ u16 sensor_index;
+ int err;
+
+ dev = thermal->bus_info->dev;
+ sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + tz->module;
+
+ /* Read module temperature and thresholds. */
+ mlxsw_thermal_module_temp_and_thresholds_get(thermal->core,
+ sensor_index, &temp,
+ &crit_temp, &emerg_temp);
*p_temp = temp;
if (!temp)
return 0;
/* Update trip points. */
- err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz);
+ err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
+ crit_temp, emerg_temp);
if (!err && temp > 0)
mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
@@ -560,7 +586,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
if (err)
return err;
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
if (temp > 0)
mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
@@ -693,7 +719,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
- NULL, 0, 0);
+ NULL, 0,
+ module_tz->parent->polling_delay);
if (IS_ERR(module_tz->tzdev)) {
err = PTR_ERR(module_tz->tzdev);
return err;
@@ -716,7 +743,10 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal, u8 module)
{
struct mlxsw_thermal_module *module_tz;
+ int dummy_temp, crit_temp, emerg_temp;
+ u16 sensor_index;
+ sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module;
module_tz = &thermal->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
@@ -727,8 +757,12 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
+ /* Read module temperature and thresholds. */
+ mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp,
+ &crit_temp, &emerg_temp);
/* Update trip point according to the module data. */
- return mlxsw_thermal_module_trips_update(dev, core, module_tz);
+ return mlxsw_thermal_module_trips_update(dev, core, module_tz,
+ crit_temp, emerg_temp);
}
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
@@ -815,7 +849,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
&mlxsw_thermal_gearbox_ops,
- NULL, 0, 0);
+ NULL, 0,
+ gearbox_tz->parent->polling_delay);
if (IS_ERR(gearbox_tz->tzdev))
return PTR_ERR(gearbox_tz->tzdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/ib.h b/drivers/net/ethernet/mellanox/mlxsw/ib.h
deleted file mode 100644
index 2d0cb0f5eb85..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/ib.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
-
-#ifndef _MLXSW_IB_H
-#define _MLXSW_IB_H
-
-#define MLXSW_IB_DEFAULT_MTU 4096
-
-#endif /* _MLXSW_IB_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index b34c44723f8b..d9d56c44e994 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -112,10 +112,23 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
ee, data);
}
+static int
+mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_eeprom_by_page(core, mlxsw_m_port->module,
+ page, extack);
+}
+
static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_drvinfo = mlxsw_m_module_get_drvinfo,
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
+ .get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
};
static int
@@ -234,6 +247,7 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
u8 *last_module)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
u8 module, width;
int err;
@@ -249,6 +263,9 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
if (module == *last_module)
return 0;
*last_module = module;
+
+ if (WARN_ON_ONCE(module >= max_ports))
+ return -EINVAL;
mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 8e8456811384..13b0259f7ea6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1426,11 +1426,6 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
unsigned long end;
u32 val;
- if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
- msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
- return 0;
- }
-
/* We must wait for the HW to become responsive. */
msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 5b1323645a5d..9899c1a2ea8f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -6,12 +6,9 @@
#include <linux/pci.h>
-#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70
-#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
-#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
#if IS_ENABLED(CONFIG_MLXSW_PCI)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 900b4bf5bb5b..6fbda6ebd590 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3907,7 +3907,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
@@ -8305,6 +8305,8 @@ enum {
MLXSW_REG_RECR2_TCP_UDP_EN_IPV4 = 7,
/* Enable TCP/UDP header fields if packet is IPv6 */
MLXSW_REG_RECR2_TCP_UDP_EN_IPV6 = 8,
+
+ __MLXSW_REG_RECR2_HEADER_CNT,
};
/* reg_recr2_outer_header_enables
@@ -8339,6 +8341,8 @@ enum {
MLXSW_REG_RECR2_TCP_UDP_SPORT = 74,
/* TCP/UDP Destination Port */
MLXSW_REG_RECR2_TCP_UDP_DPORT = 75,
+
+ __MLXSW_REG_RECR2_FIELD_CNT,
};
/* reg_recr2_outer_header_fields_enable
@@ -8347,47 +8351,47 @@ enum {
*/
MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_fields_enable, 0x14, 0x14, 1);
-static inline void mlxsw_reg_recr2_ipv4_sip_enable(char *payload)
-{
- int i;
-
- for (i = MLXSW_REG_RECR2_IPV4_SIP0; i <= MLXSW_REG_RECR2_IPV4_SIP3; i++)
- mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
- true);
-}
-
-static inline void mlxsw_reg_recr2_ipv4_dip_enable(char *payload)
-{
- int i;
-
- for (i = MLXSW_REG_RECR2_IPV4_DIP0; i <= MLXSW_REG_RECR2_IPV4_DIP3; i++)
- mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
- true);
-}
-
-static inline void mlxsw_reg_recr2_ipv6_sip_enable(char *payload)
-{
- int i = MLXSW_REG_RECR2_IPV6_SIP0_7;
-
- mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true);
-
- i = MLXSW_REG_RECR2_IPV6_SIP8;
- for (; i <= MLXSW_REG_RECR2_IPV6_SIP15; i++)
- mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
- true);
-}
-
-static inline void mlxsw_reg_recr2_ipv6_dip_enable(char *payload)
-{
- int i = MLXSW_REG_RECR2_IPV6_DIP0_7;
-
- mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true);
+/* reg_recr2_inner_header_enables
+ * Bit mask where each bit enables a specific inner layer to be included in the
+ * hash calculation. Same values as reg_recr2_outer_header_enables.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, inner_header_enables, 0x2C, 0x04, 1);
- i = MLXSW_REG_RECR2_IPV6_DIP8;
- for (; i <= MLXSW_REG_RECR2_IPV6_DIP15; i++)
- mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
- true);
-}
+enum {
+ /* Inner IPv4 Source IP */
+ MLXSW_REG_RECR2_INNER_IPV4_SIP0 = 3,
+ MLXSW_REG_RECR2_INNER_IPV4_SIP3 = 6,
+ /* Inner IPv4 Destination IP */
+ MLXSW_REG_RECR2_INNER_IPV4_DIP0 = 7,
+ MLXSW_REG_RECR2_INNER_IPV4_DIP3 = 10,
+ /* Inner IP Protocol */
+ MLXSW_REG_RECR2_INNER_IPV4_PROTOCOL = 11,
+ /* Inner IPv6 Source IP */
+ MLXSW_REG_RECR2_INNER_IPV6_SIP0_7 = 12,
+ MLXSW_REG_RECR2_INNER_IPV6_SIP8 = 20,
+ MLXSW_REG_RECR2_INNER_IPV6_SIP15 = 27,
+ /* Inner IPv6 Destination IP */
+ MLXSW_REG_RECR2_INNER_IPV6_DIP0_7 = 28,
+ MLXSW_REG_RECR2_INNER_IPV6_DIP8 = 36,
+ MLXSW_REG_RECR2_INNER_IPV6_DIP15 = 43,
+ /* Inner IPv6 Next Header */
+ MLXSW_REG_RECR2_INNER_IPV6_NEXT_HEADER = 44,
+ /* Inner IPv6 Flow Label */
+ MLXSW_REG_RECR2_INNER_IPV6_FLOW_LABEL = 45,
+ /* Inner TCP/UDP Source Port */
+ MLXSW_REG_RECR2_INNER_TCP_UDP_SPORT = 46,
+ /* Inner TCP/UDP Destination Port */
+ MLXSW_REG_RECR2_INNER_TCP_UDP_DPORT = 47,
+
+ __MLXSW_REG_RECR2_INNER_FIELD_CNT,
+};
+
+/* reg_recr2_inner_header_fields_enable
+ * Inner packet fields to enable for ECMP hash subject to inner_header_enables.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, inner_header_fields_enable, 0x30, 0x08, 1);
static inline void mlxsw_reg_recr2_pack(char *payload, u32 seed)
{
@@ -9459,6 +9463,14 @@ MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 12);
((s16)((GENMASK(15, 0) + (v_) + 1) \
* 125)); })
+/* reg_mtmp_max_operational_temperature
+ * The highest temperature in the nominal operational range. Reading is in
+ * 0.125 Celsius degrees units.
+ * In case of module this is SFF critical temperature threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mtmp, max_operational_temperature, 0x04, 16, 16);
+
/* reg_mtmp_temperature
* Temperature reading from the sensor. Reading is in 0.125 Celsius
* degrees units.
@@ -9537,7 +9549,9 @@ static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index,
}
static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp,
- int *p_max_temp, char *sensor_name)
+ int *p_max_temp, int *p_temp_hi,
+ int *p_max_oper_temp,
+ char *sensor_name)
{
s16 temp;
@@ -9549,6 +9563,14 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp,
temp = mlxsw_reg_mtmp_max_temperature_get(payload);
*p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
}
+ if (p_temp_hi) {
+ temp = mlxsw_reg_mtmp_temperature_threshold_hi_get(payload);
+ *p_temp_hi = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ }
+ if (p_max_oper_temp) {
+ temp = mlxsw_reg_mtmp_max_operational_temperature_get(payload);
+ *p_max_oper_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ }
if (sensor_name)
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
@@ -9668,6 +9690,20 @@ MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
*/
MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
+enum {
+ MLXSW_REG_MCIA_STATUS_GOOD = 0,
+ /* No response from module's EEPROM. */
+ MLXSW_REG_MCIA_STATUS_NO_EEPROM_MODULE = 1,
+ /* Module type not supported by the device. */
+ MLXSW_REG_MCIA_STATUS_MODULE_NOT_SUPPORTED = 2,
+ /* No module present indication. */
+ MLXSW_REG_MCIA_STATUS_MODULE_NOT_CONNECTED = 3,
+ /* Error occurred while trying to access module's EEPROM using I2C. */
+ MLXSW_REG_MCIA_STATUS_I2C_ERROR = 9,
+ /* Module is disabled. */
+ MLXSW_REG_MCIA_STATUS_MODULE_DISABLED = 16,
+};
+
/* reg_mcia_status
* Module status.
* Access: RO
@@ -9692,6 +9728,12 @@ MLXSW_ITEM32(reg, mcia, page_number, 0x04, 16, 8);
*/
MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
+/* reg_mcia_bank_number
+ * Bank number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, bank_number, 0x08, 16, 8);
+
/* reg_mcia_size
* Number of bytes to read/write (up to 48 bytes).
* Access: RW
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index bca0354482cb..88699e678544 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2125,9 +2125,14 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port;
enum mlxsw_reg_pude_oper_status status;
+ unsigned int max_ports;
u8 local_port;
+ max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+
+ if (WARN_ON_ONCE(local_port >= max_ports))
+ return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 37ff29a1686e..9de160e740b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -364,7 +364,7 @@ static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
{
- u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(speed, mtu);
+ u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(mtu, speed);
return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
}
@@ -388,8 +388,8 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
int i;
/* Internal buffer. */
- reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_speed,
- mlxsw_sp_port->max_mtu);
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu,
+ mlxsw_sp_port->max_speed);
reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
hdroom->int_buf.reserve_cells = reserve_cells;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index c8061beed6db..267590a0eee7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1051,6 +1051,19 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
}
static int
+mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, module, page,
+ extack);
+}
+
+static int
mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
@@ -1199,6 +1212,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
+ .get_module_eeprom_by_page = mlxsw_sp_get_module_eeprom_by_page,
.get_ts_info = mlxsw_sp_get_ts_info,
.get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats,
.get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index d6e9ecb14681..bfef65d1587c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -568,10 +568,13 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u8 domain_number, u16 sequence_id,
u64 timestamp)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp1_ptp_key key;
u8 types;
+ if (WARN_ON_ONCE(local_port >= max_ports))
+ return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 04672eb5c7f3..9958d503bf0e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1332,6 +1332,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
u8 band, u32 child_handle)
{
struct mlxsw_sp_qdisc *old_qdisc;
+ u32 parent;
if (band < mlxsw_sp_qdisc->num_classes &&
mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
@@ -1352,7 +1353,9 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
- mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
+ parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
+ parent);
if (!WARN_ON(!mlxsw_sp_qdisc))
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 41259c0004d1..7e221ef01437 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2282,6 +2282,7 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
char *rauhtd_pl,
int ent_index)
{
+ u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
struct net_device *dev;
struct neighbour *n;
__be32 dipn;
@@ -2290,6 +2291,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
+ if (WARN_ON_ONCE(rif >= max_rifs))
+ return;
if (!mlxsw_sp->router->rifs[rif]) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
return;
@@ -3841,8 +3844,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
bool offload_change = false;
u32 adj_index;
bool old_adj_index_valid;
- int i, err2, err = 0;
u32 old_adj_index;
+ int i, err2, err;
if (!nhgi->gateway)
return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
@@ -3872,11 +3875,13 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
return 0;
}
mlxsw_sp_nexthop_group_normalize(nhgi);
- if (!nhgi->sum_norm_weight)
+ if (!nhgi->sum_norm_weight) {
/* No neigh of this group is connected so we just set
* the trap and let everthing flow through kernel.
*/
+ err = 0;
goto set_trap;
+ }
ecmp_size = nhgi->sum_norm_weight;
err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
@@ -4307,9 +4312,6 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_key key;
struct mlxsw_sp_nexthop *nh;
- if (mlxsw_sp->router->aborted)
- return;
-
key.fib_nh = fib_nh;
nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
if (!nh)
@@ -5405,7 +5407,6 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
&rt->fib6_nh->fib_nh_gw6))
return nh;
- continue;
}
return NULL;
@@ -6417,9 +6418,6 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node;
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
if (fen_info->fi->nh &&
!mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
return 0;
@@ -6480,9 +6478,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node;
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
return 0;
@@ -7065,9 +7060,6 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
struct fib6_info *rt = rt_arr[0];
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
if (rt->fib6_src.plen)
return -EINVAL;
@@ -7131,9 +7123,6 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
struct fib6_info *rt = rt_arr[0];
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
if (rt->fib6_src.plen)
return -EINVAL;
@@ -7175,9 +7164,6 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
struct fib6_info *rt = rt_arr[0];
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
if (mlxsw_sp_fib6_rt_should_ignore(rt))
return 0;
@@ -7206,55 +7192,6 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_l3proto proto,
- u8 tree_id)
-{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
- enum mlxsw_reg_ralxx_protocol ralxx_proto =
- (enum mlxsw_reg_ralxx_protocol) proto;
- struct mlxsw_sp_fib_entry_priv *priv;
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
- char xralst_pl[MLXSW_REG_XRALST_LEN];
- int i, err;
-
- mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
- err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
- if (err)
- return err;
-
- mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
- err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
- if (err)
- return err;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
- err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
- if (err)
- return err;
-
- priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
- if (IS_ERR(priv))
- return PTR_ERR(priv);
-
- ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
- vr->id, 0, NULL, priv);
- ll_ops->fib_entry_act_ip2me_pack(op_ctx);
- err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
- mlxsw_sp_fib_entry_priv_put(priv);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
{
@@ -7271,9 +7208,6 @@ static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
- if (mlxsw_sp->router->aborted)
- return 0;
-
vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
if (IS_ERR(vr))
return PTR_ERR(vr);
@@ -7288,9 +7222,6 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
- if (mlxsw_sp->router->aborted)
- return;
-
vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
if (WARN_ON(!vr))
return;
@@ -7308,9 +7239,6 @@ mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_vr *vr;
- if (mlxsw_sp->router->aborted)
- return 0;
-
vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
if (IS_ERR(vr))
return PTR_ERR(vr);
@@ -7329,9 +7257,6 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
- if (mlxsw_sp->router->aborted)
- return;
-
vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
if (WARN_ON(!vr))
return;
@@ -7341,25 +7266,6 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
-{
- enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
- int err;
-
- err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
- MLXSW_SP_LPM_TREE_MIN);
- if (err)
- return err;
-
- /* The multicast router code does not need an abort trap as by default,
- * packets that don't match any routes are trapped to the CPU.
- */
-
- proto = MLXSW_SP_L3_PROTO_IPV6;
- return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
- MLXSW_SP_LPM_TREE_MIN + 1);
-}
-
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
@@ -7446,20 +7352,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->router->adj_discard_index_valid = false;
}
-static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
-{
- int err;
-
- if (mlxsw_sp->router->aborted)
- return;
- dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
- mlxsw_sp_router_fib_flush(mlxsw_sp);
- mlxsw_sp->router->aborted = true;
- err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
- if (err)
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
-}
-
struct mlxsw_sp_fib6_event {
struct fib6_info **rt_arr;
unsigned int nrt6;
@@ -7541,7 +7433,7 @@ static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
&fib_event->fen_info);
}
@@ -7576,7 +7468,7 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
fib_event->fib6_event.nrt6);
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
fib6_event->rt_arr,
fib6_event->nrt6);
@@ -7588,7 +7480,7 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
fib_event->fib6_event.nrt6);
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
fib6_event->rt_arr,
fib6_event->nrt6);
@@ -7620,7 +7512,7 @@ static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
if (err)
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
mr_cache_put(fib_event->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
@@ -7631,7 +7523,7 @@ static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
&fib_event->ven_info);
if (err)
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
dev_put(fib_event->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
@@ -7795,9 +7687,6 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
if (event == FIB_EVENT_RULE_DEL)
return 0;
- if (mlxsw_sp->router->aborted)
- return 0;
-
fr_info = container_of(info, struct fib_rule_notifier_info, info);
rule = fr_info->rule;
@@ -7855,10 +7744,6 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
- if (router->aborted) {
- NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
- return notifier_from_errno(-EINVAL);
- }
if (info->family == AF_INET) {
struct fib_entry_notifier_info *fen_info = ptr;
@@ -9594,66 +9479,229 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
-static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
+struct mlxsw_sp_mp_hash_config {
+ DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
+ DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
+ DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
+ DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
+};
+
+#define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
+ bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
+
+#define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
+ bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
+
+#define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
+ bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
+
+static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
{
- mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
+ unsigned long *inner_headers = config->inner_headers;
+ unsigned long *inner_fields = config->inner_fields;
+
+ /* IPv4 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
+ /* IPv6 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
}
-static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
+static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
{
- mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
+ unsigned long *headers = config->headers;
+ unsigned long *fields = config->fields;
+
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
}
-static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
+static void
+mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
+ u32 hash_fields)
+{
+ unsigned long *inner_headers = config->inner_headers;
+ unsigned long *inner_fields = config->inner_fields;
+
+ /* IPv4 Inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
+ /* IPv6 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
+ }
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
+ }
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
+ /* L4 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
+}
+
+static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mp_hash_config *config)
{
struct net *net = mlxsw_sp_net(mlxsw_sp);
- bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
-
- mlxsw_sp_mp_hash_header_set(recr2_pl,
- MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
- mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
- mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
- mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
- if (only_l3)
- return;
- mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
- mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
- mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
- mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
+ unsigned long *headers = config->headers;
+ unsigned long *fields = config->fields;
+ u32 hash_fields;
+
+ switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
+ case 0:
+ mlxsw_sp_mp4_hash_outer_addr(config);
+ break;
+ case 1:
+ mlxsw_sp_mp4_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
+ break;
+ case 2:
+ /* Outer */
+ mlxsw_sp_mp4_hash_outer_addr(config);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_l3(config);
+ break;
+ case 3:
+ hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+ /* Outer */
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
+ break;
+ }
}
-static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
+static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
{
- bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
+ unsigned long *headers = config->headers;
+ unsigned long *fields = config->fields;
- mlxsw_sp_mp_hash_header_set(recr2_pl,
- MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
- mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
- mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
- mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
- mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
- if (only_l3) {
- mlxsw_sp_mp_hash_field_set(recr2_pl,
- MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
- } else {
- mlxsw_sp_mp_hash_header_set(recr2_pl,
- MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
- mlxsw_sp_mp_hash_field_set(recr2_pl,
- MLXSW_REG_RECR2_TCP_UDP_SPORT);
- mlxsw_sp_mp_hash_field_set(recr2_pl,
- MLXSW_REG_RECR2_TCP_UDP_DPORT);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
+}
+
+static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mp_hash_config *config)
+{
+ u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
+ unsigned long *headers = config->headers;
+ unsigned long *fields = config->fields;
+
+ switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
+ case 0:
+ mlxsw_sp_mp6_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
+ break;
+ case 1:
+ mlxsw_sp_mp6_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
+ break;
+ case 2:
+ /* Outer */
+ mlxsw_sp_mp6_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_l3(config);
+ break;
+ case 3:
+ /* Outer */
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
+ }
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
+ }
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
+ break;
}
}
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_mp_hash_config config = {};
char recr2_pl[MLXSW_REG_RECR2_LEN];
+ unsigned long bit;
u32 seed;
seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
mlxsw_reg_recr2_pack(recr2_pl, seed);
- mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
- mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
+ mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
+ mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
+
+ for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
+ mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
+ for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
+ for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
+ mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
+ for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
+ mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index be7708a375e1..c5d7007f9173 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -58,7 +58,6 @@ struct mlxsw_sp_router {
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
struct list_head nexthop_neighs_list;
struct list_head ipip_list;
- bool aborted;
struct notifier_block nexthop_nb;
struct notifier_block fib_nb;
struct notifier_block netevent_nb;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index eeccd586e781..c5ef9aa64efe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -898,7 +898,7 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_port_attr_set(struct net_device *dev,
+static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
@@ -1766,7 +1766,7 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-static int mlxsw_sp_port_obj_add(struct net_device *dev,
+static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
@@ -1916,7 +1916,7 @@ mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-static int mlxsw_sp_port_obj_del(struct net_device *dev,
+static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
@@ -2520,6 +2520,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
@@ -2532,6 +2533,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
+
+ if (WARN_ON_ONCE(local_port >= max_ports))
+ return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
deleted file mode 100644
index 1e561132eb1e..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ /dev/null
@@ -1,595 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/skbuff.h>
-#include <linux/if_vlan.h>
-#include <net/switchdev.h>
-
-#include "pci.h"
-#include "core.h"
-#include "reg.h"
-#include "port.h"
-#include "trap.h"
-#include "txheader.h"
-#include "ib.h"
-
-static const char mlxsw_sib_driver_name[] = "mlxsw_switchib";
-static const char mlxsw_sib2_driver_name[] = "mlxsw_switchib2";
-
-struct mlxsw_sib_port;
-
-struct mlxsw_sib {
- struct mlxsw_sib_port **ports;
- struct mlxsw_core *core;
- const struct mlxsw_bus_info *bus_info;
- u8 hw_id[ETH_ALEN];
-};
-
-struct mlxsw_sib_port {
- struct mlxsw_sib *mlxsw_sib;
- u8 local_port;
- struct {
- u8 module;
- } mapping;
-};
-
-/* tx_v1_hdr_version
- * Tx header version.
- * Must be set to 1.
- */
-MLXSW_ITEM32(tx_v1, hdr, version, 0x00, 28, 4);
-
-/* tx_v1_hdr_ctl
- * Packet control type.
- * 0 - Ethernet control (e.g. EMADs, LACP)
- * 1 - Ethernet data
- */
-MLXSW_ITEM32(tx_v1, hdr, ctl, 0x00, 26, 2);
-
-/* tx_v1_hdr_proto
- * Packet protocol type. Must be set to 1 (Ethernet).
- */
-MLXSW_ITEM32(tx_v1, hdr, proto, 0x00, 21, 3);
-
-/* tx_v1_hdr_swid
- * Switch partition ID. Must be set to 0.
- */
-MLXSW_ITEM32(tx_v1, hdr, swid, 0x00, 12, 3);
-
-/* tx_v1_hdr_control_tclass
- * Indicates if the packet should use the control TClass and not one
- * of the data TClasses.
- */
-MLXSW_ITEM32(tx_v1, hdr, control_tclass, 0x00, 6, 1);
-
-/* tx_v1_hdr_port_mid
- * Destination local port for unicast packets.
- * Destination multicast ID for multicast packets.
- *
- * Control packets are directed to a specific egress port, while data
- * packets are transmitted through the CPU port (0) into the switch partition,
- * where forwarding rules are applied.
- */
-MLXSW_ITEM32(tx_v1, hdr, port_mid, 0x04, 16, 16);
-
-/* tx_v1_hdr_type
- * 0 - Data packets
- * 6 - Control packets
- */
-MLXSW_ITEM32(tx_v1, hdr, type, 0x0C, 0, 4);
-
-static void
-mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
-
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_v1_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_v1_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
- mlxsw_tx_v1_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_v1_hdr_swid_set(txhdr, 0);
- mlxsw_tx_v1_hdr_control_tclass_set(txhdr, 1);
- mlxsw_tx_v1_hdr_port_mid_set(txhdr, tx_info->local_port);
- mlxsw_tx_v1_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
-}
-
-static int mlxsw_sib_hw_id_get(struct mlxsw_sib *mlxsw_sib)
-{
- char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
- int err;
-
- err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(spad), spad_pl);
- if (err)
- return err;
- mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sib->hw_id);
- return 0;
-}
-
-static int
-mlxsw_sib_port_admin_status_set(struct mlxsw_sib_port *mlxsw_sib_port,
- bool is_up)
-{
- struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
- char paos_pl[MLXSW_REG_PAOS_LEN];
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sib_port->local_port,
- is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
- MLXSW_PORT_ADMIN_STATUS_DOWN);
- return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(paos), paos_pl);
-}
-
-static int mlxsw_sib_port_mtu_set(struct mlxsw_sib_port *mlxsw_sib_port,
- u16 mtu)
-{
- struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
- char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int max_mtu;
- int err;
-
- mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sib_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(pmtu), pmtu_pl);
- if (err)
- return err;
- max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
-
- if (mtu > max_mtu)
- return -EINVAL;
-
- mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sib_port->local_port, mtu);
- return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(pmtu), pmtu_pl);
-}
-
-static int mlxsw_sib_port_set(struct mlxsw_sib_port *mlxsw_sib_port, u8 port)
-{
- struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
- char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
- int err;
-
- mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sib_port->local_port);
- mlxsw_reg_plib_ib_port_set(plib_pl, port);
- err = mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(plib), plib_pl);
- return err;
-}
-
-static int mlxsw_sib_port_swid_set(struct mlxsw_sib_port *mlxsw_sib_port,
- u8 swid)
-{
- struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
- char pspa_pl[MLXSW_REG_PSPA_LEN];
-
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sib_port->local_port);
- return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(pspa), pspa_pl);
-}
-
-static int mlxsw_sib_port_module_info_get(struct mlxsw_sib *mlxsw_sib,
- u8 local_port, u8 *p_module,
- u8 *p_width)
-{
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
- int err;
-
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
- *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
- *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- return 0;
-}
-
-static int mlxsw_sib_port_speed_set(struct mlxsw_sib_port *mlxsw_sib_port,
- u16 speed, u16 width)
-{
- struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
-
- mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sib_port->local_port, speed,
- width);
- return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(ptys), ptys_pl);
-}
-
-static bool mlxsw_sib_port_created(struct mlxsw_sib *mlxsw_sib, u8 local_port)
-{
- return mlxsw_sib->ports[local_port] != NULL;
-}
-
-static int __mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
- u8 module, u8 width)
-{
- struct mlxsw_sib_port *mlxsw_sib_port;
- int err;
-
- mlxsw_sib_port = kzalloc(sizeof(*mlxsw_sib_port), GFP_KERNEL);
- if (!mlxsw_sib_port)
- return -ENOMEM;
- mlxsw_sib_port->mlxsw_sib = mlxsw_sib;
- mlxsw_sib_port->local_port = local_port;
- mlxsw_sib_port->mapping.module = module;
-
- err = mlxsw_sib_port_swid_set(mlxsw_sib_port, 0);
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sib_port->local_port);
- goto err_port_swid_set;
- }
-
- /* Expose the IB port number as it's front panel name */
- err = mlxsw_sib_port_set(mlxsw_sib_port, module + 1);
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set IB port\n",
- mlxsw_sib_port->local_port);
- goto err_port_ib_set;
- }
-
- /* Supports all speeds from SDR to FDR (bitmask) and support bus width
- * of 1x, 2x and 4x (3 bits bitmask)
- */
- err = mlxsw_sib_port_speed_set(mlxsw_sib_port,
- MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
- BIT(3) - 1);
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set speed\n",
- mlxsw_sib_port->local_port);
- goto err_port_speed_set;
- }
-
- /* Change to the maximum MTU the device supports, the SMA will take
- * care of the active MTU
- */
- err = mlxsw_sib_port_mtu_set(mlxsw_sib_port, MLXSW_IB_DEFAULT_MTU);
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set MTU\n",
- mlxsw_sib_port->local_port);
- goto err_port_mtu_set;
- }
-
- err = mlxsw_sib_port_admin_status_set(mlxsw_sib_port, true);
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
- mlxsw_sib_port->local_port);
- goto err_port_admin_set;
- }
-
- mlxsw_core_port_ib_set(mlxsw_sib->core, mlxsw_sib_port->local_port,
- mlxsw_sib_port);
- mlxsw_sib->ports[local_port] = mlxsw_sib_port;
- return 0;
-
-err_port_admin_set:
-err_port_mtu_set:
-err_port_speed_set:
-err_port_ib_set:
- mlxsw_sib_port_swid_set(mlxsw_sib_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
- kfree(mlxsw_sib_port);
- return err;
-}
-
-static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
- u8 module, u8 width)
-{
- int err;
-
- err = mlxsw_core_port_init(mlxsw_sib->core, local_port,
- module + 1, false, 0, false, 0,
- mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id));
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n",
- local_port);
- return err;
- }
- err = __mlxsw_sib_port_create(mlxsw_sib, local_port, module, width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_core_port_fini(mlxsw_sib->core, local_port);
- return err;
-}
-
-static void __mlxsw_sib_port_remove(struct mlxsw_sib *mlxsw_sib, u8 local_port)
-{
- struct mlxsw_sib_port *mlxsw_sib_port = mlxsw_sib->ports[local_port];
-
- mlxsw_core_port_clear(mlxsw_sib->core, local_port, mlxsw_sib);
- mlxsw_sib->ports[local_port] = NULL;
- mlxsw_sib_port_admin_status_set(mlxsw_sib_port, false);
- mlxsw_sib_port_swid_set(mlxsw_sib_port, MLXSW_PORT_SWID_DISABLED_PORT);
- kfree(mlxsw_sib_port);
-}
-
-static void mlxsw_sib_port_remove(struct mlxsw_sib *mlxsw_sib, u8 local_port)
-{
- __mlxsw_sib_port_remove(mlxsw_sib, local_port);
- mlxsw_core_port_fini(mlxsw_sib->core, local_port);
-}
-
-static void mlxsw_sib_ports_remove(struct mlxsw_sib *mlxsw_sib)
-{
- int i;
-
- for (i = 1; i < MLXSW_PORT_MAX_IB_PORTS; i++)
- if (mlxsw_sib_port_created(mlxsw_sib, i))
- mlxsw_sib_port_remove(mlxsw_sib, i);
- kfree(mlxsw_sib->ports);
-}
-
-static int mlxsw_sib_ports_create(struct mlxsw_sib *mlxsw_sib)
-{
- size_t alloc_size;
- u8 module, width;
- int i;
- int err;
-
- alloc_size = sizeof(struct mlxsw_sib_port *) * MLXSW_PORT_MAX_IB_PORTS;
- mlxsw_sib->ports = kzalloc(alloc_size, GFP_KERNEL);
- if (!mlxsw_sib->ports)
- return -ENOMEM;
-
- for (i = 1; i < MLXSW_PORT_MAX_IB_PORTS; i++) {
- err = mlxsw_sib_port_module_info_get(mlxsw_sib, i, &module,
- &width);
- if (err)
- goto err_port_module_info_get;
- if (!width)
- continue;
- err = mlxsw_sib_port_create(mlxsw_sib, i, module, width);
- if (err)
- goto err_port_create;
- }
- return 0;
-
-err_port_create:
-err_port_module_info_get:
- for (i--; i >= 1; i--)
- if (mlxsw_sib_port_created(mlxsw_sib, i))
- mlxsw_sib_port_remove(mlxsw_sib, i);
- kfree(mlxsw_sib->ports);
- return err;
-}
-
-static void
-mlxsw_sib_pude_ib_event_func(struct mlxsw_sib_port *mlxsw_sib_port,
- enum mlxsw_reg_pude_oper_status status)
-{
- if (status == MLXSW_PORT_OPER_STATUS_UP)
- pr_info("ib link for port %d - up\n",
- mlxsw_sib_port->mapping.module + 1);
- else
- pr_info("ib link for port %d - down\n",
- mlxsw_sib_port->mapping.module + 1);
-}
-
-static void mlxsw_sib_pude_event_func(const struct mlxsw_reg_info *reg,
- char *pude_pl, void *priv)
-{
- struct mlxsw_sib *mlxsw_sib = priv;
- struct mlxsw_sib_port *mlxsw_sib_port;
- enum mlxsw_reg_pude_oper_status status;
- u8 local_port;
-
- local_port = mlxsw_reg_pude_local_port_get(pude_pl);
- mlxsw_sib_port = mlxsw_sib->ports[local_port];
- if (!mlxsw_sib_port) {
- dev_warn(mlxsw_sib->bus_info->dev, "Port %d: Link event received for non-existent port\n",
- local_port);
- return;
- }
-
- status = mlxsw_reg_pude_oper_status_get(pude_pl);
- mlxsw_sib_pude_ib_event_func(mlxsw_sib_port, status);
-}
-
-static const struct mlxsw_listener mlxsw_sib_listener[] = {
- MLXSW_EVENTL(mlxsw_sib_pude_event_func, PUDE, EMAD),
-};
-
-static int mlxsw_sib_taps_init(struct mlxsw_sib *mlxsw_sib)
-{
- int i;
- int err;
-
- for (i = 0; i < ARRAY_SIZE(mlxsw_sib_listener); i++) {
- err = mlxsw_core_trap_register(mlxsw_sib->core,
- &mlxsw_sib_listener[i],
- mlxsw_sib);
- if (err)
- goto err_rx_listener_register;
- }
-
- return 0;
-
-err_rx_listener_register:
- for (i--; i >= 0; i--) {
- mlxsw_core_trap_unregister(mlxsw_sib->core,
- &mlxsw_sib_listener[i],
- mlxsw_sib);
- }
-
- return err;
-}
-
-static void mlxsw_sib_traps_fini(struct mlxsw_sib *mlxsw_sib)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mlxsw_sib_listener); i++) {
- mlxsw_core_trap_unregister(mlxsw_sib->core,
- &mlxsw_sib_listener[i], mlxsw_sib);
- }
-}
-
-static int mlxsw_sib_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
-{
- char htgt_pl[MLXSW_REG_HTGT_LEN];
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
- mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
- MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SIB_EMAD);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
-}
-
-static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core);
- int err;
-
- mlxsw_sib->core = mlxsw_core;
- mlxsw_sib->bus_info = mlxsw_bus_info;
-
- err = mlxsw_sib_hw_id_get(mlxsw_sib);
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Failed to get switch HW ID\n");
- return err;
- }
-
- err = mlxsw_sib_ports_create(mlxsw_sib);
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Failed to create ports\n");
- return err;
- }
-
- err = mlxsw_sib_taps_init(mlxsw_sib);
- if (err) {
- dev_err(mlxsw_sib->bus_info->dev, "Failed to set traps\n");
- goto err_traps_init_err;
- }
-
- return 0;
-
-err_traps_init_err:
- mlxsw_sib_ports_remove(mlxsw_sib);
- return err;
-}
-
-static void mlxsw_sib_fini(struct mlxsw_core *mlxsw_core)
-{
- struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core);
-
- mlxsw_sib_traps_fini(mlxsw_sib);
- mlxsw_sib_ports_remove(mlxsw_sib);
-}
-
-static const struct mlxsw_config_profile mlxsw_sib_config_profile = {
- .used_max_system_port = 1,
- .max_system_port = 48000,
- .used_max_ib_mc = 1,
- .max_ib_mc = 27,
- .used_max_pkey = 1,
- .max_pkey = 32,
- .swid_config = {
- {
- .used_type = 1,
- .type = MLXSW_PORT_SWID_TYPE_IB,
- }
- },
-};
-
-static struct mlxsw_driver mlxsw_sib_driver = {
- .kind = mlxsw_sib_driver_name,
- .priv_size = sizeof(struct mlxsw_sib),
- .init = mlxsw_sib_init,
- .fini = mlxsw_sib_fini,
- .basic_trap_groups_set = mlxsw_sib_basic_trap_groups_set,
- .txhdr_construct = mlxsw_sib_tx_v1_hdr_construct,
- .txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sib_config_profile,
-};
-
-static struct mlxsw_driver mlxsw_sib2_driver = {
- .kind = mlxsw_sib2_driver_name,
- .priv_size = sizeof(struct mlxsw_sib),
- .init = mlxsw_sib_init,
- .fini = mlxsw_sib_fini,
- .basic_trap_groups_set = mlxsw_sib_basic_trap_groups_set,
- .txhdr_construct = mlxsw_sib_tx_v1_hdr_construct,
- .txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sib_config_profile,
-};
-
-static const struct pci_device_id mlxsw_sib_pci_id_table[] = {
- {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHIB), 0},
- {0, },
-};
-
-static struct pci_driver mlxsw_sib_pci_driver = {
- .name = mlxsw_sib_driver_name,
- .id_table = mlxsw_sib_pci_id_table,
-};
-
-static const struct pci_device_id mlxsw_sib2_pci_id_table[] = {
- {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHIB2), 0},
- {0, },
-};
-
-static struct pci_driver mlxsw_sib2_pci_driver = {
- .name = mlxsw_sib2_driver_name,
- .id_table = mlxsw_sib2_pci_id_table,
-};
-
-static int __init mlxsw_sib_module_init(void)
-{
- int err;
-
- err = mlxsw_core_driver_register(&mlxsw_sib_driver);
- if (err)
- return err;
-
- err = mlxsw_core_driver_register(&mlxsw_sib2_driver);
- if (err)
- goto err_sib2_driver_register;
-
- err = mlxsw_pci_driver_register(&mlxsw_sib_pci_driver);
- if (err)
- goto err_sib_pci_driver_register;
-
- err = mlxsw_pci_driver_register(&mlxsw_sib2_pci_driver);
- if (err)
- goto err_sib2_pci_driver_register;
-
- return 0;
-
-err_sib2_pci_driver_register:
- mlxsw_pci_driver_unregister(&mlxsw_sib_pci_driver);
-err_sib_pci_driver_register:
- mlxsw_core_driver_unregister(&mlxsw_sib2_driver);
-err_sib2_driver_register:
- mlxsw_core_driver_unregister(&mlxsw_sib_driver);
- return err;
-}
-
-static void __exit mlxsw_sib_module_exit(void)
-{
- mlxsw_pci_driver_unregister(&mlxsw_sib2_pci_driver);
- mlxsw_pci_driver_unregister(&mlxsw_sib_pci_driver);
- mlxsw_core_driver_unregister(&mlxsw_sib2_driver);
- mlxsw_core_driver_unregister(&mlxsw_sib_driver);
-}
-
-module_init(mlxsw_sib_module_init);
-module_exit(mlxsw_sib_module_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Elad Raz <eladr@@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox SwitchIB and SwitchIB-2 driver");
-MODULE_ALIAS("mlxsw_switchib2");
-MODULE_DEVICE_TABLE(pci, mlxsw_sib_pci_id_table);
-MODULE_DEVICE_TABLE(pci, mlxsw_sib2_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
deleted file mode 100644
index 131b2a53d261..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ /dev/null
@@ -1,1691 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/skbuff.h>
-#include <linux/if_vlan.h>
-
-#include "pci.h"
-#include "core.h"
-#include "reg.h"
-#include "port.h"
-#include "trap.h"
-#include "txheader.h"
-#include "ib.h"
-
-static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
-static const char mlxsw_sx_driver_version[] = "1.0";
-
-struct mlxsw_sx_port;
-
-struct mlxsw_sx {
- struct mlxsw_sx_port **ports;
- struct mlxsw_core *core;
- const struct mlxsw_bus_info *bus_info;
- u8 hw_id[ETH_ALEN];
-};
-
-struct mlxsw_sx_port_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
- u32 tx_dropped;
-};
-
-struct mlxsw_sx_port {
- struct net_device *dev;
- struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
- struct mlxsw_sx *mlxsw_sx;
- u8 local_port;
- struct {
- u8 module;
- } mapping;
-};
-
-/* tx_hdr_version
- * Tx header version.
- * Must be set to 0.
- */
-MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
-
-/* tx_hdr_ctl
- * Packet control type.
- * 0 - Ethernet control (e.g. EMADs, LACP)
- * 1 - Ethernet data
- */
-MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
-
-/* tx_hdr_proto
- * Packet protocol type. Must be set to 1 (Ethernet).
- */
-MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
-
-/* tx_hdr_etclass
- * Egress TClass to be used on the egress device on the egress port.
- * The MSB is specified in the 'ctclass3' field.
- * Range is 0-15, where 15 is the highest priority.
- */
-MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
-
-/* tx_hdr_swid
- * Switch partition ID.
- */
-MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
-
-/* tx_hdr_port_mid
- * Destination local port for unicast packets.
- * Destination multicast ID for multicast packets.
- *
- * Control packets are directed to a specific egress port, while data
- * packets are transmitted through the CPU port (0) into the switch partition,
- * where forwarding rules are applied.
- */
-MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
-
-/* tx_hdr_ctclass3
- * See field 'etclass'.
- */
-MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
-
-/* tx_hdr_rdq
- * RDQ for control packets sent to remote CPU.
- * Must be set to 0x1F for EMADs, otherwise 0.
- */
-MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
-
-/* tx_hdr_cpu_sig
- * Signature control for packets going to CPU. Must be set to 0.
- */
-MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
-
-/* tx_hdr_sig
- * Stacking protocl signature. Must be set to 0xE0E0.
- */
-MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
-
-/* tx_hdr_stclass
- * Stacking TClass.
- */
-MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
-
-/* tx_hdr_emad
- * EMAD bit. Must be set for EMADs.
- */
-MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
-
-/* tx_hdr_type
- * 0 - Data packets
- * 6 - Control packets
- */
-MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-
-static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
- bool is_emad = tx_info->is_emad;
-
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- /* We currently set default values for the egress tclass (QoS). */
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
- mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
- MLXSW_TXHDR_ETCLASS_5);
- mlxsw_tx_hdr_swid_set(txhdr, 0);
- mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
- mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
- mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
- MLXSW_TXHDR_RDQ_OTHER);
- mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
- mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
- mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
- mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
- MLXSW_TXHDR_NOT_EMAD);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
-}
-
-static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
- bool is_up)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char paos_pl[MLXSW_REG_PAOS_LEN];
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
- is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
- MLXSW_PORT_ADMIN_STATUS_DOWN);
- return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
-}
-
-static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
- bool *p_is_up)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char paos_pl[MLXSW_REG_PAOS_LEN];
- u8 oper_status;
- int err;
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
- if (err)
- return err;
- oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP;
- return 0;
-}
-
-static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
- u16 mtu)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int max_mtu;
- int err;
-
- mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
- if (err)
- return err;
- max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
-
- if (mtu > max_mtu)
- return -EINVAL;
-
- mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
- return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
-}
-
-static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
- u16 mtu)
-{
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
- return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
-}
-
-static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
- u16 mtu)
-{
- return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
-}
-
-static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
- u8 ib_port)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
- int err;
-
- mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
- mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
- return err;
-}
-
-static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char pspa_pl[MLXSW_REG_PSPA_LEN];
-
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
- return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
-}
-
-static int
-mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char sspr_pl[MLXSW_REG_SSPR_LEN];
-
- mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
- return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
-}
-
-static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
- u8 local_port, u8 *p_module,
- u8 *p_width)
-{
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
- int err;
-
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
- *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
- *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- return 0;
-}
-
-static int mlxsw_sx_port_open(struct net_device *dev)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- int err;
-
- err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
- if (err)
- return err;
- netif_start_queue(dev);
- return 0;
-}
-
-static int mlxsw_sx_port_stop(struct net_device *dev)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
-
- netif_stop_queue(dev);
- return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
-}
-
-static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
- const struct mlxsw_tx_info tx_info = {
- .local_port = mlxsw_sx_port->local_port,
- .is_emad = false,
- };
- u64 len;
- int err;
-
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
-
- if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
- return NETDEV_TX_BUSY;
-
- mlxsw_sx_txhdr_construct(skb, &tx_info);
- /* TX header is consumed by HW on the way so we shouldn't count its
- * bytes as being sent.
- */
- len = skb->len - MLXSW_TXHDR_LEN;
- /* Due to a race we might fail here because of a full queue. In that
- * unlikely case we simply drop the packet.
- */
- err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
-
- if (!err) {
- pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
- u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->tx_packets++;
- pcpu_stats->tx_bytes += len;
- u64_stats_update_end(&pcpu_stats->syncp);
- } else {
- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- }
- return NETDEV_TX_OK;
-}
-
-static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- int err;
-
- err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
- if (err)
- return err;
- dev->mtu = mtu;
- return 0;
-}
-
-static void
-mlxsw_sx_port_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx_port_pcpu_stats *p;
- u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
- u32 tx_dropped = 0;
- unsigned int start;
- int i;
-
- for_each_possible_cpu(i) {
- p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
- do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
- rx_packets = p->rx_packets;
- rx_bytes = p->rx_bytes;
- tx_packets = p->tx_packets;
- tx_bytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- /* tx_dropped is u32, updated without syncp protection. */
- tx_dropped += p->tx_dropped;
- }
- stats->tx_dropped = tx_dropped;
-}
-
-static struct devlink_port *
-mlxsw_sx_port_get_devlink_port(struct net_device *dev)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
-
- return mlxsw_core_port_devlink_port_get(mlxsw_sx->core,
- mlxsw_sx_port->local_port);
-}
-
-static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
- .ndo_open = mlxsw_sx_port_open,
- .ndo_stop = mlxsw_sx_port_stop,
- .ndo_start_xmit = mlxsw_sx_port_xmit,
- .ndo_change_mtu = mlxsw_sx_port_change_mtu,
- .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
- .ndo_get_devlink_port = mlxsw_sx_port_get_devlink_port,
-};
-
-static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
-
- strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mlxsw_sx_driver_version,
- sizeof(drvinfo->version));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d",
- mlxsw_sx->bus_info->fw_rev.major,
- mlxsw_sx->bus_info->fw_rev.minor,
- mlxsw_sx->bus_info->fw_rev.subminor);
- strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
- sizeof(drvinfo->bus_info));
-}
-
-struct mlxsw_sx_port_hw_stats {
- char str[ETH_GSTRING_LEN];
- u64 (*getter)(const char *payload);
-};
-
-static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
- {
- .str = "a_frames_transmitted_ok",
- .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
- },
- {
- .str = "a_frames_received_ok",
- .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
- },
- {
- .str = "a_frame_check_sequence_errors",
- .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
- },
- {
- .str = "a_alignment_errors",
- .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
- },
- {
- .str = "a_octets_transmitted_ok",
- .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
- },
- {
- .str = "a_octets_received_ok",
- .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
- },
- {
- .str = "a_multicast_frames_xmitted_ok",
- .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
- },
- {
- .str = "a_broadcast_frames_xmitted_ok",
- .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
- },
- {
- .str = "a_multicast_frames_received_ok",
- .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
- },
- {
- .str = "a_broadcast_frames_received_ok",
- .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
- },
- {
- .str = "a_in_range_length_errors",
- .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
- },
- {
- .str = "a_out_of_range_length_field",
- .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
- },
- {
- .str = "a_frame_too_long_errors",
- .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
- },
- {
- .str = "a_symbol_error_during_carrier",
- .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
- },
- {
- .str = "a_mac_control_frames_transmitted",
- .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
- },
- {
- .str = "a_mac_control_frames_received",
- .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
- },
- {
- .str = "a_unsupported_opcodes_received",
- .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
- },
- {
- .str = "a_pause_mac_ctrl_frames_received",
- .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
- },
- {
- .str = "a_pause_mac_ctrl_frames_xmitted",
- .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
- },
-};
-
-#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
-
-static void mlxsw_sx_port_get_strings(struct net_device *dev,
- u32 stringset, u8 *data)
-{
- u8 *p = data;
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
- memcpy(p, mlxsw_sx_port_hw_stats[i].str,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static void mlxsw_sx_port_get_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
- int i;
- int err;
-
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
- MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
- err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
- for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
- data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
-}
-
-static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return MLXSW_SX_PORT_HW_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-struct mlxsw_sx_port_link_mode {
- u32 mask;
- u32 supported;
- u32 advertised;
- u32 speed;
-};
-
-static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
- MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
- MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
- MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
- .supported = SUPPORTED_40000baseCR4_Full,
- .advertised = ADVERTISED_40000baseCR4_Full,
- .speed = 40000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
- .supported = SUPPORTED_40000baseKR4_Full,
- .advertised = ADVERTISED_40000baseKR4_Full,
- .speed = 40000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
- .supported = SUPPORTED_40000baseSR4_Full,
- .advertised = ADVERTISED_40000baseSR4_Full,
- .speed = 40000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
- .supported = SUPPORTED_40000baseLR4_Full,
- .advertised = ADVERTISED_40000baseLR4_Full,
- .speed = 40000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
- .speed = 25000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
- .speed = 50000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
- .speed = 100000,
- },
-};
-
-#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
-#define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
-
-static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
-{
- if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_SGMII))
- return SUPPORTED_FIBRE;
-
- if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
- return SUPPORTED_Backplane;
- return 0;
-}
-
-static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
-{
- u32 modes = 0;
- int i;
-
- for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
- modes |= mlxsw_sx_port_link_mode[i].supported;
- }
- return modes;
-}
-
-static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
-{
- u32 modes = 0;
- int i;
-
- for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
- modes |= mlxsw_sx_port_link_mode[i].advertised;
- }
- return modes;
-}
-
-static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd)
-{
- u32 speed = SPEED_UNKNOWN;
- u8 duplex = DUPLEX_UNKNOWN;
- int i;
-
- if (!carrier_ok)
- goto out;
-
- for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
- speed = mlxsw_sx_port_link_mode[i].speed;
- duplex = DUPLEX_FULL;
- break;
- }
- }
-out:
- cmd->base.speed = speed;
- cmd->base.duplex = duplex;
-}
-
-static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
-{
- if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_SGMII))
- return PORT_FIBRE;
-
- if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
- return PORT_DA;
-
- if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
- return PORT_NONE;
-
- return PORT_OTHER;
-}
-
-static int
-mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_cap;
- u32 eth_proto_admin;
- u32 eth_proto_oper;
- u32 supported, advertising, lp_advertising;
- int err;
-
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
- err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
- return err;
- }
- mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
-
- supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
- mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
- mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
- eth_proto_oper, cmd);
-
- eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
- lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- lp_advertising);
-
- return 0;
-}
-
-static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
-{
- u32 ptys_proto = 0;
- int i;
-
- for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
- if (advertising & mlxsw_sx_port_link_mode[i].advertised)
- ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
- }
- return ptys_proto;
-}
-
-static u32 mlxsw_sx_to_ptys_speed(u32 speed)
-{
- u32 ptys_proto = 0;
- int i;
-
- for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
- if (speed == mlxsw_sx_port_link_mode[i].speed)
- ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
- }
- return ptys_proto;
-}
-
-static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
-{
- u32 ptys_proto = 0;
- int i;
-
- for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
- if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
- ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
- }
- return ptys_proto;
-}
-
-static int
-mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 speed;
- u32 eth_proto_new;
- u32 eth_proto_cap;
- u32 eth_proto_admin;
- u32 advertising;
- bool is_up;
- int err;
-
- speed = cmd->base.speed;
-
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
-
- eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
- mlxsw_sx_to_ptys_advert_link(advertising) :
- mlxsw_sx_to_ptys_speed(speed);
-
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
- err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
- return err;
- }
- mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
- NULL);
-
- eth_proto_new = eth_proto_new & eth_proto_cap;
- if (!eth_proto_new) {
- netdev_err(dev, "Not supported proto admin requested");
- return -EINVAL;
- }
- if (eth_proto_new == eth_proto_admin)
- return 0;
-
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
- eth_proto_new, true);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to set proto admin");
- return err;
- }
-
- err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
- if (err) {
- netdev_err(dev, "Failed to get oper status");
- return err;
- }
- if (!is_up)
- return 0;
-
- err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
-
- err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
-
- return 0;
-}
-
-static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
- .get_drvinfo = mlxsw_sx_port_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_strings = mlxsw_sx_port_get_strings,
- .get_ethtool_stats = mlxsw_sx_port_get_stats,
- .get_sset_count = mlxsw_sx_port_get_sset_count,
- .get_link_ksettings = mlxsw_sx_port_get_link_ksettings,
- .set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
-};
-
-static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
-{
- char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
- int err;
-
- err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
- if (err)
- return err;
- mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
- return 0;
-}
-
-static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- struct net_device *dev = mlxsw_sx_port->dev;
- char ppad_pl[MLXSW_REG_PPAD_LEN];
- int err;
-
- mlxsw_reg_ppad_pack(ppad_pl, false, 0);
- err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
- if (err)
- return err;
- mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
- /* The last byte value in base mac address is guaranteed
- * to be such it does not overflow when adding local_port
- * value.
- */
- dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
- return 0;
-}
-
-static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
- u16 vid, enum mlxsw_reg_spms_state state)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char *spms_pl;
- int err;
-
- spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
- if (!spms_pl)
- return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
- mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
- kfree(spms_pl);
- return err;
-}
-
-static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
- u16 speed, u16 width)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
-
- mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
- width);
- return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
-}
-
-static int
-mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_admin;
-
- eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
- eth_proto_admin, true);
- return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
-}
-
-static int
-mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
- enum mlxsw_reg_spmlr_learn_mode mode)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- char spmlr_pl[MLXSW_REG_SPMLR_LEN];
-
- mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
- return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
-}
-
-static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
- u8 module, u8 width)
-{
- struct mlxsw_sx_port *mlxsw_sx_port;
- struct net_device *dev;
- int err;
-
- dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
- if (!dev)
- return -ENOMEM;
- SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
- dev_net_set(dev, mlxsw_core_net(mlxsw_sx->core));
- mlxsw_sx_port = netdev_priv(dev);
- mlxsw_sx_port->dev = dev;
- mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
- mlxsw_sx_port->local_port = local_port;
- mlxsw_sx_port->mapping.module = module;
-
- mlxsw_sx_port->pcpu_stats =
- netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
- if (!mlxsw_sx_port->pcpu_stats) {
- err = -ENOMEM;
- goto err_alloc_stats;
- }
-
- dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
- dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
-
- err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
- mlxsw_sx_port->local_port);
- goto err_dev_addr_get;
- }
-
- netif_carrier_off(dev);
-
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_VLAN_CHALLENGED;
-
- dev->min_mtu = 0;
- dev->max_mtu = ETH_MAX_MTU;
-
- /* Each packet needs to have a Tx header (metadata) on top all other
- * headers.
- */
- dev->needed_headroom = MLXSW_TXHDR_LEN;
-
- err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
- mlxsw_sx_port->local_port);
- goto err_port_system_port_mapping_set;
- }
-
- err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sx_port->local_port);
- goto err_port_swid_set;
- }
-
- err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
- mlxsw_sx_port->local_port);
- goto err_port_speed_set;
- }
-
- err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
- mlxsw_sx_port->local_port);
- goto err_port_mtu_set;
- }
-
- err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
- if (err)
- goto err_port_admin_status_set;
-
- err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
- MLXSW_PORT_DEFAULT_VID,
- MLXSW_REG_SPMS_STATE_FORWARDING);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
- mlxsw_sx_port->local_port);
- goto err_port_stp_state_set;
- }
-
- err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
- MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
- mlxsw_sx_port->local_port);
- goto err_port_mac_learning_mode_set;
- }
-
- err = register_netdev(dev);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
- mlxsw_sx_port->local_port);
- goto err_register_netdev;
- }
-
- mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
- mlxsw_sx_port, dev);
- mlxsw_sx->ports[local_port] = mlxsw_sx_port;
- return 0;
-
-err_register_netdev:
-err_port_mac_learning_mode_set:
-err_port_stp_state_set:
-err_port_admin_status_set:
-err_port_mtu_set:
-err_port_speed_set:
- mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
-err_port_system_port_mapping_set:
-err_dev_addr_get:
- free_percpu(mlxsw_sx_port->pcpu_stats);
-err_alloc_stats:
- free_netdev(dev);
- return err;
-}
-
-static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
- u8 module, u8 width)
-{
- int err;
-
- err = mlxsw_core_port_init(mlxsw_sx->core, local_port,
- module + 1, false, 0, false, 0,
- mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id));
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
- local_port);
- return err;
- }
- err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_core_port_fini(mlxsw_sx->core, local_port);
- return err;
-}
-
-static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
-
- mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
- unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
- mlxsw_sx->ports[local_port] = NULL;
- mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
- free_percpu(mlxsw_sx_port->pcpu_stats);
- free_netdev(mlxsw_sx_port->dev);
-}
-
-static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
-{
- return mlxsw_sx->ports[local_port] != NULL;
-}
-
-static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
- u8 module, u8 width)
-{
- struct mlxsw_sx_port *mlxsw_sx_port;
- int err;
-
- mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
- if (!mlxsw_sx_port)
- return -ENOMEM;
- mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
- mlxsw_sx_port->local_port = local_port;
- mlxsw_sx_port->mapping.module = module;
-
- err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
- mlxsw_sx_port->local_port);
- goto err_port_system_port_mapping_set;
- }
-
- /* Adding port to Infiniband swid (1) */
- err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sx_port->local_port);
- goto err_port_swid_set;
- }
-
- /* Expose the IB port number as it's front panel name */
- err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
- mlxsw_sx_port->local_port);
- goto err_port_ib_set;
- }
-
- /* Supports all speeds from SDR to FDR (bitmask) and support bus width
- * of 1x, 2x and 4x (3 bits bitmask)
- */
- err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
- MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
- BIT(3) - 1);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
- mlxsw_sx_port->local_port);
- goto err_port_speed_set;
- }
-
- /* Change to the maximum MTU the device supports, the SMA will take
- * care of the active MTU
- */
- err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
- mlxsw_sx_port->local_port);
- goto err_port_mtu_set;
- }
-
- err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
- mlxsw_sx_port->local_port);
- goto err_port_admin_set;
- }
-
- mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
- mlxsw_sx_port);
- mlxsw_sx->ports[local_port] = mlxsw_sx_port;
- return 0;
-
-err_port_admin_set:
-err_port_mtu_set:
-err_port_speed_set:
-err_port_ib_set:
- mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
-err_port_system_port_mapping_set:
- kfree(mlxsw_sx_port);
- return err;
-}
-
-static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
-
- mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
- mlxsw_sx->ports[local_port] = NULL;
- mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
- mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
- kfree(mlxsw_sx_port);
-}
-
-static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
-{
- enum devlink_port_type port_type =
- mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
-
- if (port_type == DEVLINK_PORT_TYPE_ETH)
- __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
- else if (port_type == DEVLINK_PORT_TYPE_IB)
- __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
-}
-
-static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
-{
- __mlxsw_sx_port_remove(mlxsw_sx, local_port);
- mlxsw_core_port_fini(mlxsw_sx->core, local_port);
-}
-
-static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
-{
- int i;
-
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
- if (mlxsw_sx_port_created(mlxsw_sx, i))
- mlxsw_sx_port_remove(mlxsw_sx, i);
- kfree(mlxsw_sx->ports);
- mlxsw_sx->ports = NULL;
-}
-
-static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
-{
- unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
- size_t alloc_size;
- u8 module, width;
- int i;
- int err;
-
- alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
- mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
- if (!mlxsw_sx->ports)
- return -ENOMEM;
-
- for (i = 1; i < max_ports; i++) {
- err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
- &width);
- if (err)
- goto err_port_module_info_get;
- if (!width)
- continue;
- err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
- if (err)
- goto err_port_create;
- }
- return 0;
-
-err_port_create:
-err_port_module_info_get:
- for (i--; i >= 1; i--)
- if (mlxsw_sx_port_created(mlxsw_sx, i))
- mlxsw_sx_port_remove(mlxsw_sx, i);
- kfree(mlxsw_sx->ports);
- mlxsw_sx->ports = NULL;
- return err;
-}
-
-static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
- enum mlxsw_reg_pude_oper_status status)
-{
- if (status == MLXSW_PORT_OPER_STATUS_UP) {
- netdev_info(mlxsw_sx_port->dev, "link up\n");
- netif_carrier_on(mlxsw_sx_port->dev);
- } else {
- netdev_info(mlxsw_sx_port->dev, "link down\n");
- netif_carrier_off(mlxsw_sx_port->dev);
- }
-}
-
-static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
- enum mlxsw_reg_pude_oper_status status)
-{
- if (status == MLXSW_PORT_OPER_STATUS_UP)
- pr_info("ib link for port %d - up\n",
- mlxsw_sx_port->mapping.module + 1);
- else
- pr_info("ib link for port %d - down\n",
- mlxsw_sx_port->mapping.module + 1);
-}
-
-static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
- char *pude_pl, void *priv)
-{
- struct mlxsw_sx *mlxsw_sx = priv;
- struct mlxsw_sx_port *mlxsw_sx_port;
- enum mlxsw_reg_pude_oper_status status;
- enum devlink_port_type port_type;
- u8 local_port;
-
- local_port = mlxsw_reg_pude_local_port_get(pude_pl);
- mlxsw_sx_port = mlxsw_sx->ports[local_port];
- if (!mlxsw_sx_port) {
- dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
- local_port);
- return;
- }
-
- status = mlxsw_reg_pude_oper_status_get(pude_pl);
- port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
- if (port_type == DEVLINK_PORT_TYPE_ETH)
- mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
- else if (port_type == DEVLINK_PORT_TYPE_IB)
- mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
-}
-
-static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
- void *priv)
-{
- struct mlxsw_sx *mlxsw_sx = priv;
- struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
- struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
-
- if (unlikely(!mlxsw_sx_port)) {
- dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
- local_port);
- return;
- }
-
- skb->dev = mlxsw_sx_port->dev;
-
- pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
- u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += skb->len;
- u64_stats_update_end(&pcpu_stats->syncp);
-
- skb->protocol = eth_type_trans(skb, skb->dev);
- netif_receive_skb(skb);
-}
-
-static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
- enum devlink_port_type new_type)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
- u8 module, width;
- int err;
-
- if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
- dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
- local_port);
- return -EINVAL;
- }
-
- if (new_type == DEVLINK_PORT_TYPE_AUTO)
- return -EOPNOTSUPP;
-
- __mlxsw_sx_port_remove(mlxsw_sx, local_port);
- err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
- &width);
- if (err)
- goto err_port_module_info_get;
-
- if (new_type == DEVLINK_PORT_TYPE_ETH)
- err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
- width);
- else if (new_type == DEVLINK_PORT_TYPE_IB)
- err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
- width);
-
-err_port_module_info_get:
- return err;
-}
-
-enum {
- MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX = 1,
- MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL = 2,
-};
-
-#define MLXSW_SX_RXL(_trap_id) \
- MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
- false, SX2_RX, FORWARD)
-
-static const struct mlxsw_listener mlxsw_sx_listener[] = {
- MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
- MLXSW_SX_RXL(FDB_MC),
- MLXSW_SX_RXL(STP),
- MLXSW_SX_RXL(LACP),
- MLXSW_SX_RXL(EAPOL),
- MLXSW_SX_RXL(LLDP),
- MLXSW_SX_RXL(MMRP),
- MLXSW_SX_RXL(MVRP),
- MLXSW_SX_RXL(RPVST),
- MLXSW_SX_RXL(DHCP),
- MLXSW_SX_RXL(IGMP_QUERY),
- MLXSW_SX_RXL(IGMP_V1_REPORT),
- MLXSW_SX_RXL(IGMP_V2_REPORT),
- MLXSW_SX_RXL(IGMP_V2_LEAVE),
- MLXSW_SX_RXL(IGMP_V3_REPORT),
-};
-
-static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
-{
- char htgt_pl[MLXSW_REG_HTGT_LEN];
- int i;
- int err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
- MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
-
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
- MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
-
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
- err = mlxsw_core_trap_register(mlxsw_sx->core,
- &mlxsw_sx_listener[i],
- mlxsw_sx);
- if (err)
- goto err_listener_register;
-
- }
- return 0;
-
-err_listener_register:
- for (i--; i >= 0; i--) {
- mlxsw_core_trap_unregister(mlxsw_sx->core,
- &mlxsw_sx_listener[i],
- mlxsw_sx);
- }
- return err;
-}
-
-static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
- mlxsw_core_trap_unregister(mlxsw_sx->core,
- &mlxsw_sx_listener[i],
- mlxsw_sx);
- }
-}
-
-static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
-{
- char sfgc_pl[MLXSW_REG_SFGC_LEN];
- char sgcr_pl[MLXSW_REG_SGCR_LEN];
- char *sftr_pl;
- int err;
-
- /* Configure a flooding table, which includes only CPU port. */
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
- mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
- MLXSW_PORT_CPU_PORT, true);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
- kfree(sftr_pl);
- if (err)
- return err;
-
- /* Flood different packet types using the flooding table. */
- mlxsw_reg_sfgc_pack(sfgc_pl,
- MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
- 0);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
- if (err)
- return err;
-
- mlxsw_reg_sfgc_pack(sfgc_pl,
- MLXSW_REG_SFGC_TYPE_BROADCAST,
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
- 0);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
- if (err)
- return err;
-
- mlxsw_reg_sfgc_pack(sfgc_pl,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
- 0);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
- if (err)
- return err;
-
- mlxsw_reg_sfgc_pack(sfgc_pl,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
- 0);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
- if (err)
- return err;
-
- mlxsw_reg_sfgc_pack(sfgc_pl,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
- 0);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
- if (err)
- return err;
-
- mlxsw_reg_sgcr_pack(sgcr_pl, true);
- return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
-}
-
-static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
-{
- char htgt_pl[MLXSW_REG_HTGT_LEN];
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
- mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
- MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
-}
-
-static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
- int err;
-
- mlxsw_sx->core = mlxsw_core;
- mlxsw_sx->bus_info = mlxsw_bus_info;
-
- err = mlxsw_sx_hw_id_get(mlxsw_sx);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
- return err;
- }
-
- err = mlxsw_sx_ports_create(mlxsw_sx);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
- return err;
- }
-
- err = mlxsw_sx_traps_init(mlxsw_sx);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
- goto err_listener_register;
- }
-
- err = mlxsw_sx_flood_init(mlxsw_sx);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
- goto err_flood_init;
- }
-
- return 0;
-
-err_flood_init:
- mlxsw_sx_traps_fini(mlxsw_sx);
-err_listener_register:
- mlxsw_sx_ports_remove(mlxsw_sx);
- return err;
-}
-
-static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
-{
- struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
-
- mlxsw_sx_traps_fini(mlxsw_sx);
- mlxsw_sx_ports_remove(mlxsw_sx);
-}
-
-static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
- .used_max_vepa_channels = 1,
- .max_vepa_channels = 0,
- .used_max_mid = 1,
- .max_mid = 7000,
- .used_max_pgt = 1,
- .max_pgt = 0,
- .used_max_system_port = 1,
- .max_system_port = 48000,
- .used_max_vlan_groups = 1,
- .max_vlan_groups = 127,
- .used_max_regions = 1,
- .max_regions = 400,
- .used_flood_tables = 1,
- .max_flood_tables = 2,
- .max_vid_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .used_max_ib_mc = 1,
- .max_ib_mc = 6,
- .used_max_pkey = 1,
- .max_pkey = 0,
- .swid_config = {
- {
- .used_type = 1,
- .type = MLXSW_PORT_SWID_TYPE_ETH,
- },
- {
- .used_type = 1,
- .type = MLXSW_PORT_SWID_TYPE_IB,
- }
- },
-};
-
-static struct mlxsw_driver mlxsw_sx_driver = {
- .kind = mlxsw_sx_driver_name,
- .priv_size = sizeof(struct mlxsw_sx),
- .init = mlxsw_sx_init,
- .fini = mlxsw_sx_fini,
- .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set,
- .txhdr_construct = mlxsw_sx_txhdr_construct,
- .txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sx_config_profile,
- .port_type_set = mlxsw_sx_port_type_set,
-};
-
-static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
- {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
- {0, },
-};
-
-static struct pci_driver mlxsw_sx_pci_driver = {
- .name = mlxsw_sx_driver_name,
- .id_table = mlxsw_sx_pci_id_table,
-};
-
-static int __init mlxsw_sx_module_init(void)
-{
- int err;
-
- err = mlxsw_core_driver_register(&mlxsw_sx_driver);
- if (err)
- return err;
-
- err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
- if (err)
- goto err_pci_driver_register;
-
- return 0;
-
-err_pci_driver_register:
- mlxsw_core_driver_unregister(&mlxsw_sx_driver);
- return err;
-}
-
-static void __exit mlxsw_sx_module_exit(void)
-{
- mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
- mlxsw_core_driver_unregister(&mlxsw_sx_driver);
-}
-
-module_init(mlxsw_sx_module_init);
-module_exit(mlxsw_sx_module_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
-MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index caa251d0e381..b27713906d3a 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1135,6 +1135,10 @@ static int ks8842_probe(struct platform_device *pdev)
unsigned i;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
goto err_mem_region;
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 13eef6e9bd2d..831518466de2 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -1022,30 +1022,23 @@ static int ks8851_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
*
* Read and check the TX/RX memory selftest information.
*/
-static int ks8851_read_selftest(struct ks8851_net *ks)
+static void ks8851_read_selftest(struct ks8851_net *ks)
{
unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
- int ret = 0;
unsigned rd;
rd = ks8851_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
netdev_warn(ks->netdev, "Memory selftest not finished\n");
- return 0;
+ return;
}
- if (rd & MBIR_TXMBFA) {
+ if (rd & MBIR_TXMBFA)
netdev_err(ks->netdev, "TX memory selftest fail\n");
- ret |= 1;
- }
- if (rd & MBIR_RXMBFA) {
+ if (rd & MBIR_RXMBFA)
netdev_err(ks->netdev, "RX memory selftest fail\n");
- ret |= 2;
- }
-
- return 0;
}
/* driver bus management functions */
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 9ed264ed7070..7945eb5e2fe8 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -25,6 +25,7 @@
#include <linux/crc32.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/micrel_phy.h>
/* DMA Registers */
@@ -271,84 +272,15 @@
#define KS884X_PHY_CTRL_OFFSET 0x00
-/* Mode Control Register */
-#define PHY_REG_CTRL 0
-
-#define PHY_RESET 0x8000
-#define PHY_LOOPBACK 0x4000
-#define PHY_SPEED_100MBIT 0x2000
-#define PHY_AUTO_NEG_ENABLE 0x1000
-#define PHY_POWER_DOWN 0x0800
-#define PHY_MII_DISABLE 0x0400
-#define PHY_AUTO_NEG_RESTART 0x0200
-#define PHY_FULL_DUPLEX 0x0100
-#define PHY_COLLISION_TEST 0x0080
-#define PHY_HP_MDIX 0x0020
-#define PHY_FORCE_MDIX 0x0010
-#define PHY_AUTO_MDIX_DISABLE 0x0008
-#define PHY_REMOTE_FAULT_DISABLE 0x0004
-#define PHY_TRANSMIT_DISABLE 0x0002
-#define PHY_LED_DISABLE 0x0001
-
#define KS884X_PHY_STATUS_OFFSET 0x02
-/* Mode Status Register */
-#define PHY_REG_STATUS 1
-
-#define PHY_100BT4_CAPABLE 0x8000
-#define PHY_100BTX_FD_CAPABLE 0x4000
-#define PHY_100BTX_CAPABLE 0x2000
-#define PHY_10BT_FD_CAPABLE 0x1000
-#define PHY_10BT_CAPABLE 0x0800
-#define PHY_MII_SUPPRESS_CAPABLE 0x0040
-#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
-#define PHY_REMOTE_FAULT 0x0010
-#define PHY_AUTO_NEG_CAPABLE 0x0008
-#define PHY_LINK_STATUS 0x0004
-#define PHY_JABBER_DETECT 0x0002
-#define PHY_EXTENDED_CAPABILITY 0x0001
-
#define KS884X_PHY_ID_1_OFFSET 0x04
#define KS884X_PHY_ID_2_OFFSET 0x06
-/* PHY Identifier Registers */
-#define PHY_REG_ID_1 2
-#define PHY_REG_ID_2 3
-
#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
-/* Auto-Negotiation Advertisement Register */
-#define PHY_REG_AUTO_NEGOTIATION 4
-
-#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
-#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
-/* Not supported. */
-#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
-#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
-#define PHY_AUTO_NEG_100BT4 0x0200
-#define PHY_AUTO_NEG_100BTX_FD 0x0100
-#define PHY_AUTO_NEG_100BTX 0x0080
-#define PHY_AUTO_NEG_10BT_FD 0x0040
-#define PHY_AUTO_NEG_10BT 0x0020
-#define PHY_AUTO_NEG_SELECTOR 0x001F
-#define PHY_AUTO_NEG_802_3 0x0001
-
-#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
-
#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
-/* Auto-Negotiation Link Partner Ability Register */
-#define PHY_REG_REMOTE_CAPABILITY 5
-
-#define PHY_REMOTE_NEXT_PAGE 0x8000
-#define PHY_REMOTE_ACKNOWLEDGE 0x4000
-#define PHY_REMOTE_REMOTE_FAULT 0x2000
-#define PHY_REMOTE_SYM_PAUSE 0x0400
-#define PHY_REMOTE_100BTX_FD 0x0100
-#define PHY_REMOTE_100BTX 0x0080
-#define PHY_REMOTE_10BT_FD 0x0040
-#define PHY_REMOTE_10BT 0x0020
-
/* P1VCT */
#define KS884X_P1VCT_P 0x04F0
#define KS884X_P1PHYCTRL_P 0x04F2
@@ -2153,7 +2085,7 @@ static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
}
/**
- * sw_get_board_storm - get broadcast storm threshold
+ * sw_get_broad_storm - get broadcast storm threshold
* @hw: The hardware instance.
* @percent: Buffer to store the broadcast storm threshold percentage.
*
@@ -2886,15 +2818,6 @@ static void sw_block_addr(struct ksz_hw *hw)
}
}
-#define PHY_LINK_SUPPORT \
- (PHY_AUTO_NEG_ASYM_PAUSE | \
- PHY_AUTO_NEG_SYM_PAUSE | \
- PHY_AUTO_NEG_100BT4 | \
- PHY_AUTO_NEG_100BTX_FD | \
- PHY_AUTO_NEG_100BTX | \
- PHY_AUTO_NEG_10BT_FD | \
- PHY_AUTO_NEG_10BT)
-
static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
@@ -2973,7 +2896,7 @@ static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
}
/**
- * port_w_phy - write data to PHY register
+ * hw_w_phy - write data to PHY register
* @hw: The hardware instance.
* @port: Port to write.
* @reg: PHY register to write.
@@ -3238,16 +3161,18 @@ static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
rx = tx = 0;
if (port->force_link)
rx = tx = 1;
- if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
- if (local & PHY_AUTO_NEG_SYM_PAUSE) {
+ if (remote & LPA_PAUSE_CAP) {
+ if (local & ADVERTISE_PAUSE_CAP) {
rx = tx = 1;
- } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
- (local & PHY_AUTO_NEG_PAUSE) ==
- PHY_AUTO_NEG_ASYM_PAUSE) {
+ } else if ((remote & LPA_PAUSE_ASYM) &&
+ (local &
+ (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) ==
+ ADVERTISE_PAUSE_ASYM) {
tx = 1;
}
- } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
- if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
+ } else if (remote & LPA_PAUSE_ASYM) {
+ if ((local & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
+ == (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
rx = 1;
}
if (!hw->ksz_switch)
@@ -3428,16 +3353,16 @@ static void port_force_link_speed(struct ksz_port *port)
phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
hw_r_phy_ctrl(hw, phy, &data);
- data &= ~PHY_AUTO_NEG_ENABLE;
+ data &= ~BMCR_ANENABLE;
if (10 == port->speed)
- data &= ~PHY_SPEED_100MBIT;
+ data &= ~BMCR_SPEED100;
else if (100 == port->speed)
- data |= PHY_SPEED_100MBIT;
+ data |= BMCR_SPEED100;
if (1 == port->duplex)
- data &= ~PHY_FULL_DUPLEX;
+ data &= ~BMCR_FULLDPLX;
else if (2 == port->duplex)
- data |= PHY_FULL_DUPLEX;
+ data |= BMCR_FULLDPLX;
hw_w_phy_ctrl(hw, phy, data);
}
}
@@ -4782,7 +4707,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
}
/**
- * transmit_done - transmit done processing
+ * tx_done - transmit done processing
* @hw_priv: Network device.
*
* This routine is called when the transmit interrupt is triggered, indicating
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index d0f6dfe0dcf3..d54aa164c4e9 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -54,4 +54,6 @@ config LAN743X
To compile this driver as a module, choose M here. The module will be
called lan743x.
+source "drivers/net/ethernet/microchip/sparx5/Kconfig"
+
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index da603540ca57..c77dc0379bfd 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -8,3 +8,5 @@ obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
obj-$(CONFIG_LAN743X) += lan743x.o
lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+
+obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 3658c4ae3c37..ee921a99e439 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* Microchip ENCX24J600 ethernet driver
*
* Copyright (C) 2015 Gridpoint
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index f604a260ede7..fac61a8fbd02 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* encx24j600_hw.h: Register definitions
*
*/
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
new file mode 100644
index 000000000000..a80419d8d4b5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -0,0 +1,9 @@
+config SPARX5_SWITCH
+ tristate "Sparx5 switch driver"
+ depends on NET_SWITCHDEV
+ depends on HAS_IOMEM
+ select PHYLINK
+ select PHY_SPARX5_SERDES
+ select RESET_CONTROLLER
+ help
+ This driver supports the Sparx5 network switch device.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
new file mode 100644
index 000000000000..faa8f07a6b75
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Sparx5 network device drivers.
+#
+
+obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
+
+sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
+ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
+ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
new file mode 100644
index 000000000000..76a8bb596aec
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+/* QSYS calendar information */
+#define SPX5_PORTS_PER_CALREG 10 /* Ports mapped in a calendar register */
+#define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */
+
+/* DSM calendar information */
+#define SPX5_DSM_CAL_LEN 64
+#define SPX5_DSM_CAL_EMPTY 0xFFFF
+#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
+#define SPX5_DSM_CAL_TAXIS 8
+#define SPX5_DSM_CAL_BW_LOSS 553
+
+#define SPX5_TAXI_PORT_MAX 70
+
+#define SPEED_12500 12500
+
+/* Maps from taxis to port numbers */
+static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = {
+ {57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
+ {58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
+ {59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
+ {60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
+ {61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+ {62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+ {56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+ {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+};
+
+struct sparx5_calendar_data {
+ u32 schedule[SPX5_DSM_CAL_LEN];
+ u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 new_slots[SPX5_DSM_CAL_LEN];
+ u32 temp_sched[SPX5_DSM_CAL_LEN];
+ u32 indices[SPX5_DSM_CAL_LEN];
+ u32 short_list[SPX5_DSM_CAL_LEN];
+ u32 long_list[SPX5_DSM_CAL_LEN];
+};
+
+static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
+{
+ switch (sparx5->target_ct) {
+ case SPX5_TARGET_CT_7546:
+ case SPX5_TARGET_CT_7546TSN:
+ return 65000;
+ case SPX5_TARGET_CT_7549:
+ case SPX5_TARGET_CT_7549TSN:
+ return 91000;
+ case SPX5_TARGET_CT_7552:
+ case SPX5_TARGET_CT_7552TSN:
+ return 129000;
+ case SPX5_TARGET_CT_7556:
+ case SPX5_TARGET_CT_7556TSN:
+ return 161000;
+ case SPX5_TARGET_CT_7558:
+ case SPX5_TARGET_CT_7558TSN:
+ return 201000;
+ default:
+ return 0;
+ }
+}
+
+/* This is used in calendar configuration */
+enum sparx5_cal_bw {
+ SPX5_CAL_SPEED_NONE = 0,
+ SPX5_CAL_SPEED_1G = 1,
+ SPX5_CAL_SPEED_2G5 = 2,
+ SPX5_CAL_SPEED_5G = 3,
+ SPX5_CAL_SPEED_10G = 4,
+ SPX5_CAL_SPEED_25G = 5,
+ SPX5_CAL_SPEED_0G5 = 6,
+ SPX5_CAL_SPEED_12G5 = 7
+};
+
+static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
+{
+ switch (cclock) {
+ case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
+ case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
+ case SPX5_CORE_CLOCK_625MHZ: return 208000; /* 625000 / 3 */
+ default: return 0;
+ }
+ return 0;
+}
+
+static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
+{
+ switch (speed) {
+ case SPX5_CAL_SPEED_1G: return 1000;
+ case SPX5_CAL_SPEED_2G5: return 2500;
+ case SPX5_CAL_SPEED_5G: return 5000;
+ case SPX5_CAL_SPEED_10G: return 10000;
+ case SPX5_CAL_SPEED_25G: return 25000;
+ case SPX5_CAL_SPEED_0G5: return 500;
+ case SPX5_CAL_SPEED_12G5: return 12500;
+ default: return 0;
+ }
+}
+
+static u32 sparx5_bandwidth_to_calendar(u32 bw)
+{
+ switch (bw) {
+ case SPEED_10: return SPX5_CAL_SPEED_0G5;
+ case SPEED_100: return SPX5_CAL_SPEED_0G5;
+ case SPEED_1000: return SPX5_CAL_SPEED_1G;
+ case SPEED_2500: return SPX5_CAL_SPEED_2G5;
+ case SPEED_5000: return SPX5_CAL_SPEED_5G;
+ case SPEED_10000: return SPX5_CAL_SPEED_10G;
+ case SPEED_12500: return SPX5_CAL_SPEED_12G5;
+ case SPEED_25000: return SPX5_CAL_SPEED_25G;
+ case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G;
+ default: return SPX5_CAL_SPEED_NONE;
+ }
+}
+
+static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
+ u32 portno)
+{
+ struct sparx5_port *port;
+
+ if (portno >= SPX5_PORTS) {
+ /* Internal ports */
+ if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) {
+ /* Equals 1.25G */
+ return SPX5_CAL_SPEED_2G5;
+ } else if (portno == SPX5_PORT_VD0) {
+ /* IPMC only idle BW */
+ return SPX5_CAL_SPEED_NONE;
+ } else if (portno == SPX5_PORT_VD1) {
+ /* OAM only idle BW */
+ return SPX5_CAL_SPEED_NONE;
+ } else if (portno == SPX5_PORT_VD2) {
+ /* IPinIP gets only idle BW */
+ return SPX5_CAL_SPEED_NONE;
+ }
+ /* not in port map */
+ return SPX5_CAL_SPEED_NONE;
+ }
+ /* Front ports - may be used */
+ port = sparx5->ports[portno];
+ if (!port)
+ return SPX5_CAL_SPEED_NONE;
+ return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
+}
+
+/* Auto configure the QSYS calendar based on port configuration */
+int sparx5_config_auto_calendar(struct sparx5 *sparx5)
+{
+ u32 cal[7], value, idx, portno;
+ u32 max_core_bw;
+ u32 total_bw = 0, used_port_bw = 0;
+ int err = 0;
+ enum sparx5_cal_bw spd;
+
+ memset(cal, 0, sizeof(cal));
+
+ max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock);
+ if (max_core_bw == 0) {
+ dev_err(sparx5->dev, "Core clock not supported");
+ return -EINVAL;
+ }
+
+ /* Setup the calendar with the bandwidth to each port */
+ for (portno = 0; portno < SPX5_PORTS_ALL; portno++) {
+ u64 reg, offset, this_bw;
+
+ spd = sparx5_get_port_cal_speed(sparx5, portno);
+ if (spd == SPX5_CAL_SPEED_NONE)
+ continue;
+
+ this_bw = sparx5_cal_speed_to_value(spd);
+ if (portno < SPX5_PORTS)
+ used_port_bw += this_bw;
+ else
+ /* Internal ports are granted half the value */
+ this_bw = this_bw / 2;
+ total_bw += this_bw;
+ reg = portno;
+ offset = do_div(reg, SPX5_PORTS_PER_CALREG);
+ cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT);
+ }
+
+ if (used_port_bw > sparx5_target_bandwidth(sparx5)) {
+ dev_err(sparx5->dev,
+ "Port BW %u above target BW %u\n",
+ used_port_bw, sparx5_target_bandwidth(sparx5));
+ return -EINVAL;
+ }
+
+ if (total_bw > max_core_bw) {
+ dev_err(sparx5->dev,
+ "Total BW %u above switch core BW %u\n",
+ total_bw, max_core_bw);
+ return -EINVAL;
+ }
+
+ /* Halt the calendar while changing it */
+ spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
+ QSYS_CAL_CTRL_CAL_MODE,
+ sparx5, QSYS_CAL_CTRL);
+
+ /* Assign port bandwidth to auto calendar */
+ for (idx = 0; idx < ARRAY_SIZE(cal); idx++)
+ spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
+
+ /* Increase grant rate of all ports to account for
+ * core clock ppm deviations
+ */
+ spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
+ QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE,
+ sparx5,
+ QSYS_CAL_CTRL);
+
+ /* Grant idle usage to VD 0-2 */
+ for (idx = 2; idx < 5; idx++)
+ spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
+ sparx5,
+ HSCH_OUTB_SHARE_ENA(idx));
+
+ /* Enable Auto mode */
+ spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
+ QSYS_CAL_CTRL_CAL_MODE,
+ sparx5, QSYS_CAL_CTRL);
+
+ /* Verify successful calendar config */
+ value = spx5_rd(sparx5, QSYS_CAL_CTRL);
+ if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) {
+ dev_err(sparx5->dev, "QSYS calendar error\n");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static u32 sparx5_dsm_exb_gcd(u32 a, u32 b)
+{
+ if (b == 0)
+ return a;
+ return sparx5_dsm_exb_gcd(b, a % b);
+}
+
+static u32 sparx5_dsm_cal_len(u32 *cal)
+{
+ u32 idx = 0, len = 0;
+
+ while (idx < SPX5_DSM_CAL_LEN) {
+ if (cal[idx] != SPX5_DSM_CAL_EMPTY)
+ len++;
+ idx++;
+ }
+ return len;
+}
+
+static u32 sparx5_dsm_cp_cal(u32 *sched)
+{
+ u32 idx = 0, tmp;
+
+ while (idx < SPX5_DSM_CAL_LEN) {
+ if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
+ tmp = sched[idx];
+ sched[idx] = SPX5_DSM_CAL_EMPTY;
+ return tmp;
+ }
+ idx++;
+ }
+ return SPX5_DSM_CAL_EMPTY;
+}
+
+static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
+{
+ bool slow_mode;
+ u32 gcd, idx, sum, min, factor;
+ u32 num_of_slots, slot_spd, empty_slots;
+ u32 taxi_bw, clk_period_ps;
+
+ clk_period_ps = sparx5_clk_period(sparx5->coreclock);
+ taxi_bw = 128 * 1000000 / clk_period_ps;
+ slow_mode = !!(clk_period_ps > 2000);
+ memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi],
+ sizeof(data->taxi_ports));
+
+ for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
+ data->new_slots[idx] = SPX5_DSM_CAL_EMPTY;
+ data->schedule[idx] = SPX5_DSM_CAL_EMPTY;
+ data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY;
+ }
+ /* Default empty calendar */
+ data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
+
+ /* Map ports to taxi positions */
+ for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
+ u32 portno = data->taxi_ports[idx];
+
+ if (portno < SPX5_TAXI_PORT_MAX) {
+ data->taxi_speeds[idx] = sparx5_cal_speed_to_value
+ (sparx5_get_port_cal_speed(sparx5, portno));
+ } else {
+ data->taxi_speeds[idx] = 0;
+ }
+ }
+
+ sum = 0;
+ min = 25000;
+ for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
+ u32 jdx;
+
+ sum += data->taxi_speeds[idx];
+ if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min)
+ min = data->taxi_speeds[idx];
+ gcd = min;
+ for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++)
+ gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]);
+ }
+ if (sum == 0) /* Empty calendar */
+ return 0;
+ /* Make room for overhead traffic */
+ factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS);
+
+ if (sum * factor > (taxi_bw * 1000)) {
+ dev_err(sparx5->dev,
+ "Taxi %u, Requested BW %u above available BW %u\n",
+ taxi, sum, taxi_bw);
+ return -EINVAL;
+ }
+ for (idx = 0; idx < 4; idx++) {
+ u32 raw_spd;
+
+ if (idx == 0)
+ raw_spd = gcd / 5;
+ else if (idx == 1)
+ raw_spd = gcd / 2;
+ else if (idx == 2)
+ raw_spd = gcd;
+ else
+ raw_spd = min;
+ slot_spd = raw_spd * factor / 1000;
+ num_of_slots = taxi_bw / slot_spd;
+ if (num_of_slots <= 64)
+ break;
+ }
+
+ num_of_slots = num_of_slots > 64 ? 64 : num_of_slots;
+ slot_spd = taxi_bw / num_of_slots;
+
+ sum = 0;
+ for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
+ u32 spd = data->taxi_speeds[idx];
+ u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000;
+
+ if (adjusted_speed > 0) {
+ data->avg_dist[idx] = (128 * 1000000 * 10) /
+ (adjusted_speed * clk_period_ps);
+ } else {
+ data->avg_dist[idx] = -1;
+ }
+ data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000;
+ if (spd != 25000 && (spd != 10000 || !slow_mode)) {
+ if (num_of_slots < (5 * data->dev_slots[idx])) {
+ dev_err(sparx5->dev,
+ "Taxi %u, speed %u, Low slot sep.\n",
+ taxi, spd);
+ return -EINVAL;
+ }
+ }
+ sum += data->dev_slots[idx];
+ if (sum > num_of_slots) {
+ dev_err(sparx5->dev,
+ "Taxi %u with overhead factor %u\n",
+ taxi, factor);
+ return -EINVAL;
+ }
+ }
+
+ empty_slots = num_of_slots - sum;
+
+ for (idx = 0; idx < empty_slots; idx++)
+ data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
+
+ for (idx = 1; idx < num_of_slots; idx++) {
+ u32 indices_len = 0;
+ u32 slot, jdx, kdx, ts;
+ s32 cnt;
+ u32 num_of_old_slots, num_of_new_slots, tgt_score;
+
+ for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) {
+ if (data->dev_slots[slot] == idx) {
+ data->indices[indices_len] = slot;
+ indices_len++;
+ }
+ }
+ if (indices_len == 0)
+ continue;
+ kdx = 0;
+ for (slot = 0; slot < idx; slot++) {
+ for (jdx = 0; jdx < indices_len; jdx++, kdx++)
+ data->new_slots[kdx] = data->indices[jdx];
+ }
+
+ for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+ if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY)
+ break;
+ }
+
+ num_of_old_slots = slot;
+ num_of_new_slots = kdx;
+ cnt = 0;
+ ts = 0;
+
+ if (num_of_new_slots > num_of_old_slots) {
+ memcpy(data->short_list, data->schedule,
+ sizeof(data->short_list));
+ memcpy(data->long_list, data->new_slots,
+ sizeof(data->long_list));
+ tgt_score = 100000 * num_of_old_slots /
+ num_of_new_slots;
+ } else {
+ memcpy(data->short_list, data->new_slots,
+ sizeof(data->short_list));
+ memcpy(data->long_list, data->schedule,
+ sizeof(data->long_list));
+ tgt_score = 100000 * num_of_new_slots /
+ num_of_old_slots;
+ }
+
+ while (sparx5_dsm_cal_len(data->short_list) > 0 ||
+ sparx5_dsm_cal_len(data->long_list) > 0) {
+ u32 act = 0;
+
+ if (sparx5_dsm_cal_len(data->short_list) > 0) {
+ data->temp_sched[ts] =
+ sparx5_dsm_cp_cal(data->short_list);
+ ts++;
+ cnt += 100000;
+ act = 1;
+ }
+ while (sparx5_dsm_cal_len(data->long_list) > 0 &&
+ cnt > 0) {
+ data->temp_sched[ts] =
+ sparx5_dsm_cp_cal(data->long_list);
+ ts++;
+ cnt -= tgt_score;
+ act = 1;
+ }
+ if (act == 0) {
+ dev_err(sparx5->dev,
+ "Error in DSM calendar calculation\n");
+ return -EINVAL;
+ }
+ }
+
+ for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+ if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY)
+ break;
+ }
+ for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+ data->schedule[slot] = data->temp_sched[slot];
+ data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY;
+ data->new_slots[slot] = SPX5_DSM_CAL_EMPTY;
+ }
+ }
+ return 0;
+}
+
+static int sparx5_dsm_calendar_check(struct sparx5 *sparx5,
+ struct sparx5_calendar_data *data)
+{
+ u32 num_of_slots, idx, port;
+ int cnt, max_dist;
+ u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN];
+ u32 cal_length = sparx5_dsm_cal_len(data->schedule);
+
+ for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) {
+ num_of_slots = 0;
+ max_dist = data->avg_dist[port];
+ for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
+ slot_indices[idx] = SPX5_DSM_CAL_EMPTY;
+ distances[idx] = SPX5_DSM_CAL_EMPTY;
+ }
+
+ for (idx = 0; idx < cal_length; idx++) {
+ if (data->schedule[idx] == port) {
+ slot_indices[num_of_slots] = idx;
+ num_of_slots++;
+ }
+ }
+
+ slot_indices[num_of_slots] = slot_indices[0] + cal_length;
+
+ for (idx = 0; idx < num_of_slots; idx++) {
+ distances[idx] = (slot_indices[idx + 1] -
+ slot_indices[idx]) * 10;
+ }
+
+ for (idx = 0; idx < num_of_slots; idx++) {
+ u32 jdx, kdx;
+
+ cnt = distances[idx] - max_dist;
+ if (cnt < 0)
+ cnt = -cnt;
+ kdx = 0;
+ for (jdx = (idx + 1) % num_of_slots;
+ jdx != idx;
+ jdx = (jdx + 1) % num_of_slots, kdx++) {
+ cnt = cnt + distances[jdx] - max_dist;
+ if (cnt < 0)
+ cnt = -cnt;
+ if (cnt > max_dist)
+ goto check_err;
+ }
+ }
+ }
+ return 0;
+check_err:
+ dev_err(sparx5->dev,
+ "Port %u: distance %u above limit %d\n",
+ port, cnt, max_dist);
+ return -EINVAL;
+}
+
+static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
+{
+ u32 idx;
+ u32 cal_len = sparx5_dsm_cal_len(data->schedule), len;
+
+ spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
+ for (idx = 0; idx < cal_len; idx++) {
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
+ DSM_TAXI_CAL_CFG_CAL_IDX,
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]),
+ DSM_TAXI_CAL_CFG_CAL_PGM_VAL,
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
+ }
+ spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
+ len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
+ DSM_TAXI_CAL_CFG(taxi)));
+ if (len != cal_len - 1)
+ goto update_err;
+ return 0;
+update_err:
+ dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
+ return -EINVAL;
+}
+
+/* Configure the DSM calendar based on port configuration */
+int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
+{
+ int taxi;
+ struct sparx5_calendar_data *data;
+ int err = 0;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) {
+ err = sparx5_dsm_calendar_calc(sparx5, taxi, data);
+ if (err) {
+ dev_err(sparx5->dev, "DSM calendar calculation failed\n");
+ goto cal_out;
+ }
+ err = sparx5_dsm_calendar_check(sparx5, data);
+ if (err) {
+ dev_err(sparx5->dev, "DSM calendar check failed\n");
+ goto cal_out;
+ }
+ err = sparx5_dsm_calendar_update(sparx5, taxi, data);
+ if (err) {
+ dev_err(sparx5->dev, "DSM calendar update failed\n");
+ goto cal_out;
+ }
+ }
+cal_out:
+ kfree(data);
+ return err;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
new file mode 100644
index 000000000000..59783fc46a7b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/ethtool.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+/* Index of ANA_AC port counters */
+#define SPX5_PORT_POLICER_DROPS 0
+
+/* Add a potentially wrapping 32 bit value to a 64 bit counter */
+static void sparx5_update_counter(u64 *cnt, u32 val)
+{
+ if (val < (*cnt & U32_MAX))
+ *cnt += (u64)1 << 32; /* value has wrapped */
+ *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+enum sparx5_stats_entry {
+ spx5_stats_rx_symbol_err_cnt = 0,
+ spx5_stats_pmac_rx_symbol_err_cnt = 1,
+ spx5_stats_tx_uc_cnt = 2,
+ spx5_stats_pmac_tx_uc_cnt = 3,
+ spx5_stats_tx_mc_cnt = 4,
+ spx5_stats_tx_bc_cnt = 5,
+ spx5_stats_tx_backoff1_cnt = 6,
+ spx5_stats_tx_multi_coll_cnt = 7,
+ spx5_stats_rx_uc_cnt = 8,
+ spx5_stats_pmac_rx_uc_cnt = 9,
+ spx5_stats_rx_mc_cnt = 10,
+ spx5_stats_rx_bc_cnt = 11,
+ spx5_stats_rx_crc_err_cnt = 12,
+ spx5_stats_pmac_rx_crc_err_cnt = 13,
+ spx5_stats_rx_alignment_lost_cnt = 14,
+ spx5_stats_pmac_rx_alignment_lost_cnt = 15,
+ spx5_stats_tx_ok_bytes_cnt = 16,
+ spx5_stats_pmac_tx_ok_bytes_cnt = 17,
+ spx5_stats_tx_defer_cnt = 18,
+ spx5_stats_tx_late_coll_cnt = 19,
+ spx5_stats_tx_xcoll_cnt = 20,
+ spx5_stats_tx_csense_cnt = 21,
+ spx5_stats_rx_ok_bytes_cnt = 22,
+ spx5_stats_pmac_rx_ok_bytes_cnt = 23,
+ spx5_stats_pmac_tx_mc_cnt = 24,
+ spx5_stats_pmac_tx_bc_cnt = 25,
+ spx5_stats_tx_xdefer_cnt = 26,
+ spx5_stats_pmac_rx_mc_cnt = 27,
+ spx5_stats_pmac_rx_bc_cnt = 28,
+ spx5_stats_rx_in_range_len_err_cnt = 29,
+ spx5_stats_pmac_rx_in_range_len_err_cnt = 30,
+ spx5_stats_rx_out_of_range_len_err_cnt = 31,
+ spx5_stats_pmac_rx_out_of_range_len_err_cnt = 32,
+ spx5_stats_rx_oversize_cnt = 33,
+ spx5_stats_pmac_rx_oversize_cnt = 34,
+ spx5_stats_tx_pause_cnt = 35,
+ spx5_stats_pmac_tx_pause_cnt = 36,
+ spx5_stats_rx_pause_cnt = 37,
+ spx5_stats_pmac_rx_pause_cnt = 38,
+ spx5_stats_rx_unsup_opcode_cnt = 39,
+ spx5_stats_pmac_rx_unsup_opcode_cnt = 40,
+ spx5_stats_rx_undersize_cnt = 41,
+ spx5_stats_pmac_rx_undersize_cnt = 42,
+ spx5_stats_rx_fragments_cnt = 43,
+ spx5_stats_pmac_rx_fragments_cnt = 44,
+ spx5_stats_rx_jabbers_cnt = 45,
+ spx5_stats_pmac_rx_jabbers_cnt = 46,
+ spx5_stats_rx_size64_cnt = 47,
+ spx5_stats_pmac_rx_size64_cnt = 48,
+ spx5_stats_rx_size65to127_cnt = 49,
+ spx5_stats_pmac_rx_size65to127_cnt = 50,
+ spx5_stats_rx_size128to255_cnt = 51,
+ spx5_stats_pmac_rx_size128to255_cnt = 52,
+ spx5_stats_rx_size256to511_cnt = 53,
+ spx5_stats_pmac_rx_size256to511_cnt = 54,
+ spx5_stats_rx_size512to1023_cnt = 55,
+ spx5_stats_pmac_rx_size512to1023_cnt = 56,
+ spx5_stats_rx_size1024to1518_cnt = 57,
+ spx5_stats_pmac_rx_size1024to1518_cnt = 58,
+ spx5_stats_rx_size1519tomax_cnt = 59,
+ spx5_stats_pmac_rx_size1519tomax_cnt = 60,
+ spx5_stats_tx_size64_cnt = 61,
+ spx5_stats_pmac_tx_size64_cnt = 62,
+ spx5_stats_tx_size65to127_cnt = 63,
+ spx5_stats_pmac_tx_size65to127_cnt = 64,
+ spx5_stats_tx_size128to255_cnt = 65,
+ spx5_stats_pmac_tx_size128to255_cnt = 66,
+ spx5_stats_tx_size256to511_cnt = 67,
+ spx5_stats_pmac_tx_size256to511_cnt = 68,
+ spx5_stats_tx_size512to1023_cnt = 69,
+ spx5_stats_pmac_tx_size512to1023_cnt = 70,
+ spx5_stats_tx_size1024to1518_cnt = 71,
+ spx5_stats_pmac_tx_size1024to1518_cnt = 72,
+ spx5_stats_tx_size1519tomax_cnt = 73,
+ spx5_stats_pmac_tx_size1519tomax_cnt = 74,
+ spx5_stats_mm_rx_assembly_err_cnt = 75,
+ spx5_stats_mm_rx_assembly_ok_cnt = 76,
+ spx5_stats_mm_rx_merge_frag_cnt = 77,
+ spx5_stats_mm_rx_smd_err_cnt = 78,
+ spx5_stats_mm_tx_pfragment_cnt = 79,
+ spx5_stats_rx_bad_bytes_cnt = 80,
+ spx5_stats_pmac_rx_bad_bytes_cnt = 81,
+ spx5_stats_rx_in_bytes_cnt = 82,
+ spx5_stats_rx_ipg_shrink_cnt = 83,
+ spx5_stats_rx_sync_lost_err_cnt = 84,
+ spx5_stats_rx_tagged_frms_cnt = 85,
+ spx5_stats_rx_untagged_frms_cnt = 86,
+ spx5_stats_tx_out_bytes_cnt = 87,
+ spx5_stats_tx_tagged_frms_cnt = 88,
+ spx5_stats_tx_untagged_frms_cnt = 89,
+ spx5_stats_rx_hih_cksm_err_cnt = 90,
+ spx5_stats_pmac_rx_hih_cksm_err_cnt = 91,
+ spx5_stats_rx_xgmii_prot_err_cnt = 92,
+ spx5_stats_pmac_rx_xgmii_prot_err_cnt = 93,
+ spx5_stats_ana_ac_port_stat_lsb_cnt = 94,
+ spx5_stats_green_p0_rx_fwd = 95,
+ spx5_stats_green_p0_rx_port_drop = 111,
+ spx5_stats_green_p0_tx_port = 127,
+ spx5_stats_rx_local_drop = 143,
+ spx5_stats_tx_local_drop = 144,
+ spx5_stats_count = 145,
+};
+
+static const char *const sparx5_stats_layout[] = {
+ "mm_rx_assembly_err_cnt",
+ "mm_rx_assembly_ok_cnt",
+ "mm_rx_merge_frag_cnt",
+ "mm_rx_smd_err_cnt",
+ "mm_tx_pfragment_cnt",
+ "rx_bad_bytes_cnt",
+ "pmac_rx_bad_bytes_cnt",
+ "rx_in_bytes_cnt",
+ "rx_ipg_shrink_cnt",
+ "rx_sync_lost_err_cnt",
+ "rx_tagged_frms_cnt",
+ "rx_untagged_frms_cnt",
+ "tx_out_bytes_cnt",
+ "tx_tagged_frms_cnt",
+ "tx_untagged_frms_cnt",
+ "rx_hih_cksm_err_cnt",
+ "pmac_rx_hih_cksm_err_cnt",
+ "rx_xgmii_prot_err_cnt",
+ "pmac_rx_xgmii_prot_err_cnt",
+ "rx_port_policer_drop",
+ "rx_fwd_green_p0",
+ "rx_fwd_green_p1",
+ "rx_fwd_green_p2",
+ "rx_fwd_green_p3",
+ "rx_fwd_green_p4",
+ "rx_fwd_green_p5",
+ "rx_fwd_green_p6",
+ "rx_fwd_green_p7",
+ "rx_fwd_yellow_p0",
+ "rx_fwd_yellow_p1",
+ "rx_fwd_yellow_p2",
+ "rx_fwd_yellow_p3",
+ "rx_fwd_yellow_p4",
+ "rx_fwd_yellow_p5",
+ "rx_fwd_yellow_p6",
+ "rx_fwd_yellow_p7",
+ "rx_port_drop_green_p0",
+ "rx_port_drop_green_p1",
+ "rx_port_drop_green_p2",
+ "rx_port_drop_green_p3",
+ "rx_port_drop_green_p4",
+ "rx_port_drop_green_p5",
+ "rx_port_drop_green_p6",
+ "rx_port_drop_green_p7",
+ "rx_port_drop_yellow_p0",
+ "rx_port_drop_yellow_p1",
+ "rx_port_drop_yellow_p2",
+ "rx_port_drop_yellow_p3",
+ "rx_port_drop_yellow_p4",
+ "rx_port_drop_yellow_p5",
+ "rx_port_drop_yellow_p6",
+ "rx_port_drop_yellow_p7",
+ "tx_port_green_p0",
+ "tx_port_green_p1",
+ "tx_port_green_p2",
+ "tx_port_green_p3",
+ "tx_port_green_p4",
+ "tx_port_green_p5",
+ "tx_port_green_p6",
+ "tx_port_green_p7",
+ "tx_port_yellow_p0",
+ "tx_port_yellow_p1",
+ "tx_port_yellow_p2",
+ "tx_port_yellow_p3",
+ "tx_port_yellow_p4",
+ "tx_port_yellow_p5",
+ "tx_port_yellow_p6",
+ "tx_port_yellow_p7",
+ "rx_local_drop",
+ "tx_local_drop",
+};
+
+static void sparx5_get_queue_sys_stats(struct sparx5 *sparx5, int portno)
+{
+ u64 *portstats;
+ u64 *stats;
+ u32 addr;
+ int idx;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ mutex_lock(&sparx5->queue_stats_lock);
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno), sparx5, XQS_STAT_CFG);
+ addr = 0;
+ stats = &portstats[spx5_stats_green_p0_rx_fwd];
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+ sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+ addr = 16;
+ stats = &portstats[spx5_stats_green_p0_rx_port_drop];
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+ sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+ addr = 256;
+ stats = &portstats[spx5_stats_green_p0_tx_port];
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+ sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_local_drop],
+ spx5_rd(sparx5, XQS_CNT(32)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_local_drop],
+ spx5_rd(sparx5, XQS_CNT(272)));
+ mutex_unlock(&sparx5->queue_stats_lock);
+}
+
+static void sparx5_get_ana_ac_stats_stats(struct sparx5 *sparx5, int portno)
+{
+ u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+
+ sparx5_update_counter(&portstats[spx5_stats_ana_ac_port_stat_lsb_cnt],
+ spx5_rd(sparx5, ANA_AC_PORT_STAT_LSB_CNT(portno,
+ SPX5_PORT_POLICER_DROPS)));
+}
+
+static void sparx5_get_dev_phy_stats(u64 *portstats, void __iomem *inst, u32
+ tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SYMBOL_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SYMBOL_ERR_CNT(tinst)));
+}
+
+static void sparx5_get_dev_mac_stats(u64 *portstats, void __iomem *inst, u32
+ tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_UC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_TX_UC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_MC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_BC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_UC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_RX_UC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_MC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_BC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_CRC_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_CRC_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_ALIGNMENT_LOST_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_OK_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_OK_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_OK_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_OK_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_TX_MC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_TX_BC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_RX_MC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt],
+ spx5_inst_rd(inst, DEV5G_PMAC_RX_BC_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_IN_RANGE_LEN_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
+}
+
+static void sparx5_get_dev_mac_ctrl_stats(u64 *portstats, void __iomem *inst,
+ u32 tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_PAUSE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_PAUSE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_PAUSE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_PAUSE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_UNSUP_OPCODE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(tinst)));
+}
+
+static void sparx5_get_dev_rmon_stats(u64 *portstats, void __iomem *inst, u32
+ tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_UNDERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_UNDERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_FRAGMENTS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_FRAGMENTS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_JABBERS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_JABBERS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_SIZE64_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE64_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE65TO127_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE65TO127_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE128TO255_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE128TO255_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE256TO511_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE256TO511_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE512TO1023_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE512TO1023_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE1024TO1518_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE1024TO1518_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_SIZE1519TOMAX_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
+ spx5_inst_rd(inst, DEV5G_TX_SIZE64_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE64_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE65TO127_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE65TO127_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE128TO255_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE128TO255_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE256TO511_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE256TO511_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE512TO1023_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE512TO1023_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE1024TO1518_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE1024TO1518_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_SIZE1519TOMAX_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(tinst)));
+}
+
+static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32
+ tinst)
+{
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_RX_ASSEMBLY_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_RX_ASSEMBLY_OK_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_RX_MERGE_FRAG_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_RX_SMD_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_MM_TX_PFRAGMENT_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_BAD_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_BAD_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt],
+ spx5_inst_rd(inst, DEV5G_RX_IN_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_IPG_SHRINK_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_TAGGED_FRMS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_UNTAGGED_FRMS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_OUT_BYTES_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_TAGGED_FRMS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_TX_UNTAGGED_FRMS_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_hih_cksm_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_HIH_CKSM_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_hih_cksm_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_xgmii_prot_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_RX_XGMII_PROT_ERR_CNT(tinst)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_xgmii_prot_err_cnt],
+ spx5_inst_rd(inst,
+ DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(tinst)));
+}
+
+static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno)
+{
+ u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+ void __iomem *inst;
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_phy_stats(portstats, inst, tinst);
+ sparx5_get_dev_mac_stats(portstats, inst, tinst);
+ sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
+ sparx5_get_dev_rmon_stats(portstats, inst, tinst);
+ sparx5_get_dev_misc_stats(portstats, inst, tinst);
+}
+
+static void sparx5_get_asm_phy_stats(u64 *portstats, void __iomem *inst, int
+ portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SYMBOL_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SYMBOL_ERR_CNT(portno)));
+}
+
+static void sparx5_get_asm_mac_stats(u64 *portstats, void __iomem *inst, int
+ portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt],
+ spx5_inst_rd(inst, ASM_TX_UC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_TX_UC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt],
+ spx5_inst_rd(inst, ASM_TX_MC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt],
+ spx5_inst_rd(inst, ASM_TX_BC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_backoff1_cnt],
+ spx5_inst_rd(inst, ASM_TX_BACKOFF1_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_multi_coll_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_MULTI_COLL_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt],
+ spx5_inst_rd(inst, ASM_RX_UC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_RX_UC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt],
+ spx5_inst_rd(inst, ASM_RX_MC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt],
+ spx5_inst_rd(inst, ASM_RX_BC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt],
+ spx5_inst_rd(inst, ASM_RX_CRC_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_CRC_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_ALIGNMENT_LOST_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_ALIGNMENT_LOST_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt],
+ spx5_inst_rd(inst, ASM_TX_OK_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_OK_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_defer_cnt],
+ spx5_inst_rd(inst, ASM_TX_DEFER_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_late_coll_cnt],
+ spx5_inst_rd(inst, ASM_TX_LATE_COLL_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_xcoll_cnt],
+ spx5_inst_rd(inst, ASM_TX_XCOLL_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_csense_cnt],
+ spx5_inst_rd(inst, ASM_TX_CSENSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt],
+ spx5_inst_rd(inst, ASM_RX_OK_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_OK_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_TX_MC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_TX_BC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_xdefer_cnt],
+ spx5_inst_rd(inst, ASM_TX_XDEFER_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_RX_MC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt],
+ spx5_inst_rd(inst, ASM_PMAC_RX_BC_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_IN_RANGE_LEN_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+ spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_OVERSIZE_CNT(portno)));
+}
+
+static void sparx5_get_asm_mac_ctrl_stats(u64 *portstats, void __iomem *inst,
+ int portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt],
+ spx5_inst_rd(inst, ASM_TX_PAUSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_PAUSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt],
+ spx5_inst_rd(inst, ASM_RX_PAUSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_PAUSE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_UNSUP_OPCODE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_UNSUP_OPCODE_CNT(portno)));
+}
+
+static void sparx5_get_asm_rmon_stats(u64 *portstats, void __iomem *inst, int
+ portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
+ spx5_inst_rd(inst, ASM_RX_UNDERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_UNDERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+ spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_OVERSIZE_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
+ spx5_inst_rd(inst, ASM_RX_FRAGMENTS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_FRAGMENTS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
+ spx5_inst_rd(inst, ASM_RX_JABBERS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_JABBERS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
+ spx5_inst_rd(inst, ASM_RX_SIZE64_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE64_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE65TO127_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE65TO127_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE128TO255_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE128TO255_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE256TO511_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE256TO511_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE512TO1023_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE512TO1023_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE1024TO1518_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE1024TO1518_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SIZE1519TOMAX_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_SIZE1519TOMAX_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
+ spx5_inst_rd(inst, ASM_TX_SIZE64_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE64_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE65TO127_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE65TO127_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE128TO255_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE128TO255_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE256TO511_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE256TO511_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE512TO1023_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE512TO1023_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE1024TO1518_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE1024TO1518_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_SIZE1519TOMAX_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_TX_SIZE1519TOMAX_CNT(portno)));
+}
+
+static void sparx5_get_asm_misc_stats(u64 *portstats, void __iomem *inst, int
+ portno)
+{
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_RX_ASSEMBLY_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_RX_ASSEMBLY_OK_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_RX_MERGE_FRAG_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_RX_SMD_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt],
+ spx5_inst_rd(inst,
+ ASM_MM_TX_PFRAGMENT_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt],
+ spx5_inst_rd(inst, ASM_RX_BAD_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt],
+ spx5_inst_rd(inst,
+ ASM_PMAC_RX_BAD_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt],
+ spx5_inst_rd(inst, ASM_RX_IN_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_IPG_SHRINK_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_sync_lost_err_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_SYNC_LOST_ERR_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_TAGGED_FRMS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt],
+ spx5_inst_rd(inst,
+ ASM_RX_UNTAGGED_FRMS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt],
+ spx5_inst_rd(inst, ASM_TX_OUT_BYTES_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_TAGGED_FRMS_CNT(portno)));
+ sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt],
+ spx5_inst_rd(inst,
+ ASM_TX_UNTAGGED_FRMS_CNT(portno)));
+}
+
+static void sparx5_get_asm_stats(struct sparx5 *sparx5, int portno)
+{
+ u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+ void __iomem *inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+
+ sparx5_get_asm_phy_stats(portstats, inst, portno);
+ sparx5_get_asm_mac_stats(portstats, inst, portno);
+ sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno);
+ sparx5_get_asm_rmon_stats(portstats, inst, portno);
+ sparx5_get_asm_misc_stats(portstats, inst, portno);
+}
+
+static const struct ethtool_rmon_hist_range sparx5_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {}
+};
+
+static void sparx5_get_eth_phy_stats(struct net_device *ndev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_phy_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_phy_stats(portstats, inst, portno);
+ }
+ phy_stats->SymbolErrorDuringCarrier =
+ portstats[spx5_stats_rx_symbol_err_cnt] +
+ portstats[spx5_stats_pmac_rx_symbol_err_cnt];
+}
+
+static void sparx5_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_mac_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_mac_stats(portstats, inst, portno);
+ }
+ mac_stats->FramesTransmittedOK = portstats[spx5_stats_tx_uc_cnt] +
+ portstats[spx5_stats_pmac_tx_uc_cnt] +
+ portstats[spx5_stats_tx_mc_cnt] +
+ portstats[spx5_stats_tx_bc_cnt];
+ mac_stats->SingleCollisionFrames =
+ portstats[spx5_stats_tx_backoff1_cnt];
+ mac_stats->MultipleCollisionFrames =
+ portstats[spx5_stats_tx_multi_coll_cnt];
+ mac_stats->FramesReceivedOK = portstats[spx5_stats_rx_uc_cnt] +
+ portstats[spx5_stats_pmac_rx_uc_cnt] +
+ portstats[spx5_stats_rx_mc_cnt] +
+ portstats[spx5_stats_rx_bc_cnt];
+ mac_stats->FrameCheckSequenceErrors =
+ portstats[spx5_stats_rx_crc_err_cnt] +
+ portstats[spx5_stats_pmac_rx_crc_err_cnt];
+ mac_stats->AlignmentErrors = portstats[spx5_stats_rx_alignment_lost_cnt]
+ + portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+ mac_stats->OctetsTransmittedOK = portstats[spx5_stats_tx_ok_bytes_cnt] +
+ portstats[spx5_stats_pmac_tx_ok_bytes_cnt];
+ mac_stats->FramesWithDeferredXmissions =
+ portstats[spx5_stats_tx_defer_cnt];
+ mac_stats->LateCollisions =
+ portstats[spx5_stats_tx_late_coll_cnt];
+ mac_stats->FramesAbortedDueToXSColls =
+ portstats[spx5_stats_tx_xcoll_cnt];
+ mac_stats->CarrierSenseErrors = portstats[spx5_stats_tx_csense_cnt];
+ mac_stats->OctetsReceivedOK = portstats[spx5_stats_rx_ok_bytes_cnt] +
+ portstats[spx5_stats_pmac_rx_ok_bytes_cnt];
+ mac_stats->MulticastFramesXmittedOK = portstats[spx5_stats_tx_mc_cnt] +
+ portstats[spx5_stats_pmac_tx_mc_cnt];
+ mac_stats->BroadcastFramesXmittedOK = portstats[spx5_stats_tx_bc_cnt] +
+ portstats[spx5_stats_pmac_tx_bc_cnt];
+ mac_stats->FramesWithExcessiveDeferral =
+ portstats[spx5_stats_tx_xdefer_cnt];
+ mac_stats->MulticastFramesReceivedOK = portstats[spx5_stats_rx_mc_cnt] +
+ portstats[spx5_stats_pmac_rx_mc_cnt];
+ mac_stats->BroadcastFramesReceivedOK = portstats[spx5_stats_rx_bc_cnt] +
+ portstats[spx5_stats_pmac_rx_bc_cnt];
+ mac_stats->InRangeLengthErrors =
+ portstats[spx5_stats_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_in_range_len_err_cnt];
+ mac_stats->OutOfRangeLengthField =
+ portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt];
+ mac_stats->FrameTooLongErrors = portstats[spx5_stats_rx_oversize_cnt] +
+ portstats[spx5_stats_pmac_rx_oversize_cnt];
+}
+
+static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *mac_ctrl_stats)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno);
+ }
+ mac_ctrl_stats->MACControlFramesTransmitted =
+ portstats[spx5_stats_tx_pause_cnt] +
+ portstats[spx5_stats_pmac_tx_pause_cnt];
+ mac_ctrl_stats->MACControlFramesReceived =
+ portstats[spx5_stats_rx_pause_cnt] +
+ portstats[spx5_stats_pmac_rx_pause_cnt];
+ mac_ctrl_stats->UnsupportedOpcodesReceived =
+ portstats[spx5_stats_rx_unsup_opcode_cnt] +
+ portstats[spx5_stats_pmac_rx_unsup_opcode_cnt];
+}
+
+static void sparx5_get_eth_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_rmon_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_rmon_stats(portstats, inst, portno);
+ }
+ rmon_stats->undersize_pkts = portstats[spx5_stats_rx_undersize_cnt] +
+ portstats[spx5_stats_pmac_rx_undersize_cnt];
+ rmon_stats->oversize_pkts = portstats[spx5_stats_rx_oversize_cnt] +
+ portstats[spx5_stats_pmac_rx_oversize_cnt];
+ rmon_stats->fragments = portstats[spx5_stats_rx_fragments_cnt] +
+ portstats[spx5_stats_pmac_rx_fragments_cnt];
+ rmon_stats->jabbers = portstats[spx5_stats_rx_jabbers_cnt] +
+ portstats[spx5_stats_pmac_rx_jabbers_cnt];
+ rmon_stats->hist[0] = portstats[spx5_stats_rx_size64_cnt] +
+ portstats[spx5_stats_pmac_rx_size64_cnt];
+ rmon_stats->hist[1] = portstats[spx5_stats_rx_size65to127_cnt] +
+ portstats[spx5_stats_pmac_rx_size65to127_cnt];
+ rmon_stats->hist[2] = portstats[spx5_stats_rx_size128to255_cnt] +
+ portstats[spx5_stats_pmac_rx_size128to255_cnt];
+ rmon_stats->hist[3] = portstats[spx5_stats_rx_size256to511_cnt] +
+ portstats[spx5_stats_pmac_rx_size256to511_cnt];
+ rmon_stats->hist[4] = portstats[spx5_stats_rx_size512to1023_cnt] +
+ portstats[spx5_stats_pmac_rx_size512to1023_cnt];
+ rmon_stats->hist[5] = portstats[spx5_stats_rx_size1024to1518_cnt] +
+ portstats[spx5_stats_pmac_rx_size1024to1518_cnt];
+ rmon_stats->hist[6] = portstats[spx5_stats_rx_size1519tomax_cnt] +
+ portstats[spx5_stats_pmac_rx_size1519tomax_cnt];
+ rmon_stats->hist_tx[0] = portstats[spx5_stats_tx_size64_cnt] +
+ portstats[spx5_stats_pmac_tx_size64_cnt];
+ rmon_stats->hist_tx[1] = portstats[spx5_stats_tx_size65to127_cnt] +
+ portstats[spx5_stats_pmac_tx_size65to127_cnt];
+ rmon_stats->hist_tx[2] = portstats[spx5_stats_tx_size128to255_cnt] +
+ portstats[spx5_stats_pmac_tx_size128to255_cnt];
+ rmon_stats->hist_tx[3] = portstats[spx5_stats_tx_size256to511_cnt] +
+ portstats[spx5_stats_pmac_tx_size256to511_cnt];
+ rmon_stats->hist_tx[4] = portstats[spx5_stats_tx_size512to1023_cnt] +
+ portstats[spx5_stats_pmac_tx_size512to1023_cnt];
+ rmon_stats->hist_tx[5] = portstats[spx5_stats_tx_size1024to1518_cnt] +
+ portstats[spx5_stats_pmac_tx_size1024to1518_cnt];
+ rmon_stats->hist_tx[6] = portstats[spx5_stats_tx_size1519tomax_cnt] +
+ portstats[spx5_stats_pmac_tx_size1519tomax_cnt];
+ *ranges = sparx5_rmon_ranges;
+}
+
+static int sparx5_get_sset_count(struct net_device *ndev, int sset)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+ return sparx5->num_ethtool_stats;
+}
+
+static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int idx;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (idx = 0; idx < sparx5->num_ethtool_stats; idx++)
+ strncpy(data + idx * ETH_GSTRING_LEN,
+ sparx5->stats_layout[idx], ETH_GSTRING_LEN);
+}
+
+static void sparx5_get_sset_data(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ void __iomem *inst;
+ u64 *portstats;
+ int idx;
+
+ portstats = &sparx5->stats[portno * sparx5->num_stats];
+ if (sparx5_is_baser(port->conf.portmode)) {
+ u32 tinst = sparx5_port_dev_index(portno);
+ u32 dev = sparx5_to_high_dev(portno);
+
+ inst = spx5_inst_get(sparx5, dev, tinst);
+ sparx5_get_dev_misc_stats(portstats, inst, tinst);
+ } else {
+ inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+ sparx5_get_asm_misc_stats(portstats, inst, portno);
+ }
+ sparx5_get_ana_ac_stats_stats(sparx5, portno);
+ sparx5_get_queue_sys_stats(sparx5, portno);
+ /* Copy port counters to the ethtool buffer */
+ for (idx = spx5_stats_mm_rx_assembly_err_cnt;
+ idx < spx5_stats_mm_rx_assembly_err_cnt +
+ sparx5->num_ethtool_stats; idx++)
+ *data++ = portstats[idx];
+}
+
+void sparx5_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ u64 *portstats;
+ int idx;
+
+ if (!sparx5->stats)
+ return; /* Not initialized yet */
+
+ portstats = &sparx5->stats[port->portno * sparx5->num_stats];
+
+ stats->rx_packets = portstats[spx5_stats_rx_uc_cnt] +
+ portstats[spx5_stats_pmac_rx_uc_cnt] +
+ portstats[spx5_stats_rx_mc_cnt] +
+ portstats[spx5_stats_rx_bc_cnt];
+ stats->tx_packets = portstats[spx5_stats_tx_uc_cnt] +
+ portstats[spx5_stats_pmac_tx_uc_cnt] +
+ portstats[spx5_stats_tx_mc_cnt] +
+ portstats[spx5_stats_tx_bc_cnt];
+ stats->rx_bytes = portstats[spx5_stats_rx_ok_bytes_cnt] +
+ portstats[spx5_stats_pmac_rx_ok_bytes_cnt];
+ stats->tx_bytes = portstats[spx5_stats_tx_ok_bytes_cnt] +
+ portstats[spx5_stats_pmac_tx_ok_bytes_cnt];
+ stats->rx_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_rx_oversize_cnt] +
+ portstats[spx5_stats_pmac_rx_oversize_cnt] +
+ portstats[spx5_stats_rx_crc_err_cnt] +
+ portstats[spx5_stats_pmac_rx_crc_err_cnt] +
+ portstats[spx5_stats_rx_alignment_lost_cnt] +
+ portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+ stats->tx_errors = portstats[spx5_stats_tx_xcoll_cnt] +
+ portstats[spx5_stats_tx_csense_cnt] +
+ portstats[spx5_stats_tx_late_coll_cnt];
+ stats->multicast = portstats[spx5_stats_rx_mc_cnt] +
+ portstats[spx5_stats_pmac_rx_mc_cnt];
+ stats->collisions = portstats[spx5_stats_tx_late_coll_cnt] +
+ portstats[spx5_stats_tx_xcoll_cnt] +
+ portstats[spx5_stats_tx_backoff1_cnt];
+ stats->rx_length_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] +
+ portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] +
+ portstats[spx5_stats_rx_oversize_cnt] +
+ portstats[spx5_stats_pmac_rx_oversize_cnt];
+ stats->rx_crc_errors = portstats[spx5_stats_rx_crc_err_cnt] +
+ portstats[spx5_stats_pmac_rx_crc_err_cnt];
+ stats->rx_frame_errors = portstats[spx5_stats_rx_alignment_lost_cnt] +
+ portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+ stats->tx_aborted_errors = portstats[spx5_stats_tx_xcoll_cnt];
+ stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
+ stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
+ stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+ stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+ + idx];
+ stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
+}
+
+static void sparx5_update_port_stats(struct sparx5 *sparx5, int portno)
+{
+ if (sparx5_is_baser(sparx5->ports[portno]->conf.portmode))
+ sparx5_get_device_stats(sparx5, portno);
+ else
+ sparx5_get_asm_stats(sparx5, portno);
+ sparx5_get_ana_ac_stats_stats(sparx5, portno);
+ sparx5_get_queue_sys_stats(sparx5, portno);
+}
+
+static void sparx5_update_stats(struct sparx5 *sparx5)
+{
+ int idx;
+
+ for (idx = 0; idx < SPX5_PORTS; idx++)
+ if (sparx5->ports[idx])
+ sparx5_update_port_stats(sparx5, idx);
+}
+
+static void sparx5_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct sparx5 *sparx5 = container_of(dwork,
+ struct sparx5,
+ stats_work);
+
+ sparx5_update_stats(sparx5);
+
+ queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
+ SPX5_STATS_CHECK_DELAY);
+}
+
+static int sparx5_get_link_settings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int sparx5_set_link_settings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void sparx5_config_stats(struct sparx5 *sparx5)
+{
+ /* Enable global events for port policer drops */
+ spx5_rmw(ANA_AC_PORT_SGE_CFG_MASK_SET(0xf0f0),
+ ANA_AC_PORT_SGE_CFG_MASK,
+ sparx5,
+ ANA_AC_PORT_SGE_CFG(SPX5_PORT_POLICER_DROPS));
+}
+
+static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
+{
+ /* Clear Queue System counters */
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno) |
+ XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(3), sparx5,
+ XQS_STAT_CFG);
+
+ /* Use counter for port policer drop count */
+ spx5_rmw(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(1) |
+ ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(0) |
+ ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(0xff),
+ ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE |
+ ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE |
+ ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK,
+ sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS));
+}
+
+const struct ethtool_ops sparx5_ethtool_ops = {
+ .get_sset_count = sparx5_get_sset_count,
+ .get_strings = sparx5_get_sset_strings,
+ .get_ethtool_stats = sparx5_get_sset_data,
+ .get_link_ksettings = sparx5_get_link_settings,
+ .set_link_ksettings = sparx5_set_link_settings,
+ .get_link = ethtool_op_get_link,
+ .get_eth_phy_stats = sparx5_get_eth_phy_stats,
+ .get_eth_mac_stats = sparx5_get_eth_mac_stats,
+ .get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
+ .get_rmon_stats = sparx5_get_eth_rmon_stats,
+};
+
+int sparx_stats_init(struct sparx5 *sparx5)
+{
+ char queue_name[32];
+ int portno;
+
+ sparx5->stats_layout = sparx5_stats_layout;
+ sparx5->num_stats = spx5_stats_count;
+ sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout);
+ sparx5->stats = devm_kcalloc(sparx5->dev,
+ SPX5_PORTS_ALL * sparx5->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!sparx5->stats)
+ return -ENOMEM;
+
+ mutex_init(&sparx5->queue_stats_lock);
+ sparx5_config_stats(sparx5);
+ for (portno = 0; portno < SPX5_PORTS; portno++)
+ if (sparx5->ports[portno])
+ sparx5_config_port_stats(sparx5, portno);
+
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(sparx5->dev));
+ sparx5->stats_queue = create_singlethread_workqueue(queue_name);
+ INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work);
+ queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
+ SPX5_STATS_CHECK_DELAY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
new file mode 100644
index 000000000000..0443f66b5550
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+/* Commands for Mac Table Command register */
+#define MAC_CMD_LEARN 0 /* Insert (Learn) 1 entry */
+#define MAC_CMD_UNLEARN 1 /* Unlearn (Forget) 1 entry */
+#define MAC_CMD_LOOKUP 2 /* Look up 1 entry */
+#define MAC_CMD_READ 3 /* Read entry at Mac Table Index */
+#define MAC_CMD_WRITE 4 /* Write entry at Mac Table Index */
+#define MAC_CMD_SCAN 5 /* Scan (Age or find next) */
+#define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
+#define MAC_CMD_CLEAR_ALL 7 /* Delete all entries in table */
+
+/* Commands for MAC_ENTRY_ADDR_TYPE */
+#define MAC_ENTRY_ADDR_TYPE_UPSID_PN 0
+#define MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
+#define MAC_ENTRY_ADDR_TYPE_GLAG 2
+#define MAC_ENTRY_ADDR_TYPE_MC_IDX 3
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+struct sparx5_mact_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN];
+ u32 flags;
+#define MAC_ENT_ALIVE BIT(0)
+#define MAC_ENT_MOVED BIT(1)
+#define MAC_ENT_LOCK BIT(2)
+ u16 vid;
+ u16 port;
+};
+
+static int sparx5_mact_get_status(struct sparx5 *sparx5)
+{
+ return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
+}
+
+static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
+{
+ u32 val;
+
+ return readx_poll_timeout(sparx5_mact_get_status,
+ sparx5, val,
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void sparx5_mact_select(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN],
+ u16 vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
+ spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
+}
+
+int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
+ const unsigned char mac[ETH_ALEN], u16 vid)
+{
+ int addr, type, ret;
+
+ if (pgid < SPX5_PORTS) {
+ type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
+ addr = pgid % 32;
+ addr += (pgid / 32) << 5; /* Add upsid */
+ } else {
+ type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
+ addr = pgid - SPX5_PORTS;
+ }
+
+ mutex_lock(&sparx5->lock);
+
+ sparx5_mact_select(sparx5, mac, vid);
+
+ /* MAC entry properties */
+ spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
+ LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
+ LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
+ LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
+ sparx5, LRN_MAC_ACCESS_CFG_2);
+ spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
+
+ /* Insert/learn new entry */
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ ret = sparx5_mact_wait_for_completion(sparx5);
+
+ mutex_unlock(&sparx5->lock);
+
+ return ret;
+}
+
+int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ return sparx5_mact_forget(sparx5, addr, port->pvid);
+}
+
+int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
+}
+
+static int sparx5_mact_get(struct sparx5 *sparx5,
+ unsigned char mac[ETH_ALEN],
+ u16 *vid, u32 *pcfg2)
+{
+ u32 mach, macl, cfg2;
+ int ret = -ENOENT;
+
+ cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+ if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
+ mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
+ macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
+ mac[0] = ((mach >> 8) & 0xff);
+ mac[1] = ((mach >> 0) & 0xff);
+ mac[2] = ((macl >> 24) & 0xff);
+ mac[3] = ((macl >> 16) & 0xff);
+ mac[4] = ((macl >> 8) & 0xff);
+ mac[5] = ((macl >> 0) & 0xff);
+ *vid = mach >> 16;
+ *pcfg2 = cfg2;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+bool sparx5_mact_getnext(struct sparx5 *sparx5,
+ unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
+{
+ u32 cfg2;
+ int ret;
+
+ mutex_lock(&sparx5->lock);
+
+ sparx5_mact_select(sparx5, mac, *vid);
+
+ spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
+ LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
+ sparx5, LRN_SCAN_NEXT_CFG);
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
+ (MAC_CMD_FIND_SMALLEST) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ ret = sparx5_mact_wait_for_completion(sparx5);
+ if (ret == 0) {
+ ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
+ if (ret == 0)
+ *pcfg2 = cfg2;
+ }
+
+ mutex_unlock(&sparx5->lock);
+
+ return ret == 0;
+}
+
+static int sparx5_mact_lookup(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN],
+ u16 vid)
+{
+ int ret;
+
+ mutex_lock(&sparx5->lock);
+
+ sparx5_mact_select(sparx5, mac, vid);
+
+ /* Issue a lookup command */
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ ret = sparx5_mact_wait_for_completion(sparx5);
+ if (ret)
+ goto out;
+
+ ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
+ (spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
+
+out:
+ mutex_unlock(&sparx5->lock);
+
+ return ret;
+}
+
+int sparx5_mact_forget(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid)
+{
+ int ret;
+
+ mutex_lock(&sparx5->lock);
+
+ sparx5_mact_select(sparx5, mac, vid);
+
+ /* Issue an unlearn command */
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ ret = sparx5_mact_wait_for_completion(sparx5);
+
+ mutex_unlock(&sparx5->lock);
+
+ return ret;
+}
+
+static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct sparx5_mact_entry *mact_entry;
+
+ mact_entry = devm_kzalloc(sparx5->dev,
+ sizeof(*mact_entry), GFP_ATOMIC);
+ if (!mact_entry)
+ return NULL;
+
+ memcpy(mact_entry->mac, mac, ETH_ALEN);
+ mact_entry->vid = vid;
+ mact_entry->port = port_index;
+ return mact_entry;
+}
+
+static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct sparx5_mact_entry *mact_entry;
+ struct sparx5_mact_entry *res = NULL;
+
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
+ if (mact_entry->vid == vid &&
+ ether_addr_equal(mac, mact_entry->mac) &&
+ mact_entry->port == port_index) {
+ res = mact_entry;
+ break;
+ }
+ }
+ mutex_unlock(&sparx5->mact_lock);
+
+ return res;
+}
+
+static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev, bool offloaded)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = offloaded;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int sparx5_add_mact_entry(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sparx5_mact_entry *mact_entry;
+ int ret;
+
+ ret = sparx5_mact_lookup(sparx5, addr, vid);
+ if (ret)
+ return 0;
+
+ /* In case the entry already exists, don't add it again to SW,
+ * just update HW, but we need to look in the actual HW because
+ * it is possible for an entry to be learn by HW and before the
+ * mact thread to start the frame will reach CPU and the CPU will
+ * add the entry but without the extern_learn flag.
+ */
+ mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
+ if (mact_entry)
+ goto update_hw;
+
+ /* Add the entry in SW MAC table not to get the notification when
+ * SW is pulling again
+ */
+ mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
+ if (!mact_entry)
+ return -ENOMEM;
+
+ mutex_lock(&sparx5->mact_lock);
+ list_add_tail(&mact_entry->list, &sparx5->mact_entries);
+ mutex_unlock(&sparx5->mact_lock);
+
+update_hw:
+ ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
+
+ /* New entry? */
+ if (mact_entry->flags == 0) {
+ mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
+ sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
+ port->ndev, true);
+ }
+
+ return ret;
+}
+
+int sparx5_del_mact_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mact_entry *mact_entry, *tmp;
+
+ /* Delete the entry in SW MAC table not to get the notification when
+ * SW is pulling again
+ */
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
+ list) {
+ if ((vid == 0 || mact_entry->vid == vid) &&
+ ether_addr_equal(addr, mact_entry->mac)) {
+ list_del(&mact_entry->list);
+ devm_kfree(sparx5->dev, mact_entry);
+
+ sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+ }
+ }
+ mutex_unlock(&sparx5->mact_lock);
+
+ return 0;
+}
+
+static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
+ unsigned char mac[ETH_ALEN],
+ u16 vid, u32 cfg2)
+{
+ struct sparx5_mact_entry *mact_entry;
+ bool found = false;
+ u16 port;
+
+ if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
+ MAC_ENTRY_ADDR_TYPE_UPSID_PN)
+ return;
+
+ port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
+ if (port >= SPX5_PORTS)
+ return;
+
+ if (!test_bit(port, sparx5->bridge_mask))
+ return;
+
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
+ if (mact_entry->vid == vid &&
+ ether_addr_equal(mac, mact_entry->mac)) {
+ found = true;
+ mact_entry->flags |= MAC_ENT_ALIVE;
+ if (mact_entry->port != port) {
+ dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
+ mact_entry->port, port);
+ mact_entry->port = port;
+ mact_entry->flags |= MAC_ENT_MOVED;
+ }
+ /* Entry handled */
+ break;
+ }
+ }
+ mutex_unlock(&sparx5->mact_lock);
+
+ if (found && !(mact_entry->flags & MAC_ENT_MOVED))
+ /* Present, not moved */
+ return;
+
+ if (!found) {
+ /* Entry not found - now add */
+ mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
+ if (!mact_entry)
+ return;
+
+ mact_entry->flags |= MAC_ENT_ALIVE;
+ mutex_lock(&sparx5->mact_lock);
+ list_add_tail(&mact_entry->list, &sparx5->mact_entries);
+ mutex_unlock(&sparx5->mact_lock);
+ }
+
+ /* New or moved entry - notify bridge */
+ sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ mac, vid, sparx5->ports[port]->ndev,
+ true);
+}
+
+void sparx5_mact_pull_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
+ mact_work);
+ struct sparx5_mact_entry *mact_entry, *tmp;
+ unsigned char mac[ETH_ALEN];
+ u32 cfg2;
+ u16 vid;
+ int ret;
+
+ /* Reset MAC entry flags */
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
+ mact_entry->flags &= MAC_ENT_LOCK;
+ mutex_unlock(&sparx5->mact_lock);
+
+ /* MAIN mac address processing loop */
+ vid = 0;
+ memset(mac, 0, sizeof(mac));
+ do {
+ mutex_lock(&sparx5->lock);
+ sparx5_mact_select(sparx5, mac, vid);
+ spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
+ sparx5, LRN_SCAN_NEXT_CFG);
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
+ (MAC_CMD_FIND_SMALLEST) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+ ret = sparx5_mact_wait_for_completion(sparx5);
+ if (ret == 0)
+ ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
+ mutex_unlock(&sparx5->lock);
+ if (ret == 0)
+ sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
+ } while (ret == 0);
+
+ mutex_lock(&sparx5->mact_lock);
+ list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
+ list) {
+ /* If the entry is in HW or permanent, then skip */
+ if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
+ continue;
+
+ sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mact_entry->mac, mact_entry->vid,
+ sparx5->ports[mact_entry->port]->ndev,
+ true);
+
+ list_del(&mact_entry->list);
+ devm_kfree(sparx5->dev, mact_entry);
+ }
+ mutex_unlock(&sparx5->mact_lock);
+
+ queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
+ SPX5_MACT_PULL_DELAY);
+}
+
+void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
+{
+ int value = max(1, msecs / 10); /* unit 10 ms */
+
+ spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
+ LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
+ LRN_AUTOAGE_CFG_UNIT_SIZE |
+ LRN_AUTOAGE_CFG_PERIOD_VAL,
+ sparx5,
+ LRN_AUTOAGE_CFG(0));
+}
+
+void sparx5_mact_init(struct sparx5 *sparx5)
+{
+ mutex_init(&sparx5->lock);
+
+ /* Flush MAC table */
+ spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
+ LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+ sparx5, LRN_COMMON_ACCESS_CTRL);
+
+ if (sparx5_mact_wait_for_completion(sparx5) != 0)
+ dev_warn(sparx5->dev, "MAC flush error\n");
+
+ sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
new file mode 100644
index 000000000000..f666133a15de
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <net/switchdev.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/reset.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+#define QLIM_WM(fraction) \
+ ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
+#define IO_RANGES 3
+
+struct initial_port_config {
+ u32 portno;
+ struct device_node *node;
+ struct sparx5_port_config conf;
+ struct phy *serdes;
+};
+
+struct sparx5_ram_config {
+ void __iomem *init_reg;
+ u32 init_val;
+};
+
+struct sparx5_main_io_resource {
+ enum sparx5_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
+ { TARGET_CPU, 0, 0 }, /* 0x600000000 */
+ { TARGET_FDMA, 0x80000, 0 }, /* 0x600080000 */
+ { TARGET_PCEP, 0x400000, 0 }, /* 0x600400000 */
+ { TARGET_DEV2G5, 0x10004000, 1 }, /* 0x610004000 */
+ { TARGET_DEV5G, 0x10008000, 1 }, /* 0x610008000 */
+ { TARGET_PCS5G_BR, 0x1000c000, 1 }, /* 0x61000c000 */
+ { TARGET_DEV2G5 + 1, 0x10010000, 1 }, /* 0x610010000 */
+ { TARGET_DEV5G + 1, 0x10014000, 1 }, /* 0x610014000 */
+ { TARGET_PCS5G_BR + 1, 0x10018000, 1 }, /* 0x610018000 */
+ { TARGET_DEV2G5 + 2, 0x1001c000, 1 }, /* 0x61001c000 */
+ { TARGET_DEV5G + 2, 0x10020000, 1 }, /* 0x610020000 */
+ { TARGET_PCS5G_BR + 2, 0x10024000, 1 }, /* 0x610024000 */
+ { TARGET_DEV2G5 + 6, 0x10028000, 1 }, /* 0x610028000 */
+ { TARGET_DEV5G + 6, 0x1002c000, 1 }, /* 0x61002c000 */
+ { TARGET_PCS5G_BR + 6, 0x10030000, 1 }, /* 0x610030000 */
+ { TARGET_DEV2G5 + 7, 0x10034000, 1 }, /* 0x610034000 */
+ { TARGET_DEV5G + 7, 0x10038000, 1 }, /* 0x610038000 */
+ { TARGET_PCS5G_BR + 7, 0x1003c000, 1 }, /* 0x61003c000 */
+ { TARGET_DEV2G5 + 8, 0x10040000, 1 }, /* 0x610040000 */
+ { TARGET_DEV5G + 8, 0x10044000, 1 }, /* 0x610044000 */
+ { TARGET_PCS5G_BR + 8, 0x10048000, 1 }, /* 0x610048000 */
+ { TARGET_DEV2G5 + 9, 0x1004c000, 1 }, /* 0x61004c000 */
+ { TARGET_DEV5G + 9, 0x10050000, 1 }, /* 0x610050000 */
+ { TARGET_PCS5G_BR + 9, 0x10054000, 1 }, /* 0x610054000 */
+ { TARGET_DEV2G5 + 10, 0x10058000, 1 }, /* 0x610058000 */
+ { TARGET_DEV5G + 10, 0x1005c000, 1 }, /* 0x61005c000 */
+ { TARGET_PCS5G_BR + 10, 0x10060000, 1 }, /* 0x610060000 */
+ { TARGET_DEV2G5 + 11, 0x10064000, 1 }, /* 0x610064000 */
+ { TARGET_DEV5G + 11, 0x10068000, 1 }, /* 0x610068000 */
+ { TARGET_PCS5G_BR + 11, 0x1006c000, 1 }, /* 0x61006c000 */
+ { TARGET_DEV2G5 + 12, 0x10070000, 1 }, /* 0x610070000 */
+ { TARGET_DEV10G, 0x10074000, 1 }, /* 0x610074000 */
+ { TARGET_PCS10G_BR, 0x10078000, 1 }, /* 0x610078000 */
+ { TARGET_DEV2G5 + 14, 0x1007c000, 1 }, /* 0x61007c000 */
+ { TARGET_DEV10G + 2, 0x10080000, 1 }, /* 0x610080000 */
+ { TARGET_PCS10G_BR + 2, 0x10084000, 1 }, /* 0x610084000 */
+ { TARGET_DEV2G5 + 15, 0x10088000, 1 }, /* 0x610088000 */
+ { TARGET_DEV10G + 3, 0x1008c000, 1 }, /* 0x61008c000 */
+ { TARGET_PCS10G_BR + 3, 0x10090000, 1 }, /* 0x610090000 */
+ { TARGET_DEV2G5 + 16, 0x10094000, 1 }, /* 0x610094000 */
+ { TARGET_DEV2G5 + 17, 0x10098000, 1 }, /* 0x610098000 */
+ { TARGET_DEV2G5 + 18, 0x1009c000, 1 }, /* 0x61009c000 */
+ { TARGET_DEV2G5 + 19, 0x100a0000, 1 }, /* 0x6100a0000 */
+ { TARGET_DEV2G5 + 20, 0x100a4000, 1 }, /* 0x6100a4000 */
+ { TARGET_DEV2G5 + 21, 0x100a8000, 1 }, /* 0x6100a8000 */
+ { TARGET_DEV2G5 + 22, 0x100ac000, 1 }, /* 0x6100ac000 */
+ { TARGET_DEV2G5 + 23, 0x100b0000, 1 }, /* 0x6100b0000 */
+ { TARGET_DEV2G5 + 32, 0x100b4000, 1 }, /* 0x6100b4000 */
+ { TARGET_DEV2G5 + 33, 0x100b8000, 1 }, /* 0x6100b8000 */
+ { TARGET_DEV2G5 + 34, 0x100bc000, 1 }, /* 0x6100bc000 */
+ { TARGET_DEV2G5 + 35, 0x100c0000, 1 }, /* 0x6100c0000 */
+ { TARGET_DEV2G5 + 36, 0x100c4000, 1 }, /* 0x6100c4000 */
+ { TARGET_DEV2G5 + 37, 0x100c8000, 1 }, /* 0x6100c8000 */
+ { TARGET_DEV2G5 + 38, 0x100cc000, 1 }, /* 0x6100cc000 */
+ { TARGET_DEV2G5 + 39, 0x100d0000, 1 }, /* 0x6100d0000 */
+ { TARGET_DEV2G5 + 40, 0x100d4000, 1 }, /* 0x6100d4000 */
+ { TARGET_DEV2G5 + 41, 0x100d8000, 1 }, /* 0x6100d8000 */
+ { TARGET_DEV2G5 + 42, 0x100dc000, 1 }, /* 0x6100dc000 */
+ { TARGET_DEV2G5 + 43, 0x100e0000, 1 }, /* 0x6100e0000 */
+ { TARGET_DEV2G5 + 44, 0x100e4000, 1 }, /* 0x6100e4000 */
+ { TARGET_DEV2G5 + 45, 0x100e8000, 1 }, /* 0x6100e8000 */
+ { TARGET_DEV2G5 + 46, 0x100ec000, 1 }, /* 0x6100ec000 */
+ { TARGET_DEV2G5 + 47, 0x100f0000, 1 }, /* 0x6100f0000 */
+ { TARGET_DEV2G5 + 57, 0x100f4000, 1 }, /* 0x6100f4000 */
+ { TARGET_DEV25G + 1, 0x100f8000, 1 }, /* 0x6100f8000 */
+ { TARGET_PCS25G_BR + 1, 0x100fc000, 1 }, /* 0x6100fc000 */
+ { TARGET_DEV2G5 + 59, 0x10104000, 1 }, /* 0x610104000 */
+ { TARGET_DEV25G + 3, 0x10108000, 1 }, /* 0x610108000 */
+ { TARGET_PCS25G_BR + 3, 0x1010c000, 1 }, /* 0x61010c000 */
+ { TARGET_DEV2G5 + 60, 0x10114000, 1 }, /* 0x610114000 */
+ { TARGET_DEV25G + 4, 0x10118000, 1 }, /* 0x610118000 */
+ { TARGET_PCS25G_BR + 4, 0x1011c000, 1 }, /* 0x61011c000 */
+ { TARGET_DEV2G5 + 64, 0x10124000, 1 }, /* 0x610124000 */
+ { TARGET_DEV5G + 12, 0x10128000, 1 }, /* 0x610128000 */
+ { TARGET_PCS5G_BR + 12, 0x1012c000, 1 }, /* 0x61012c000 */
+ { TARGET_PORT_CONF, 0x10130000, 1 }, /* 0x610130000 */
+ { TARGET_DEV2G5 + 3, 0x10404000, 1 }, /* 0x610404000 */
+ { TARGET_DEV5G + 3, 0x10408000, 1 }, /* 0x610408000 */
+ { TARGET_PCS5G_BR + 3, 0x1040c000, 1 }, /* 0x61040c000 */
+ { TARGET_DEV2G5 + 4, 0x10410000, 1 }, /* 0x610410000 */
+ { TARGET_DEV5G + 4, 0x10414000, 1 }, /* 0x610414000 */
+ { TARGET_PCS5G_BR + 4, 0x10418000, 1 }, /* 0x610418000 */
+ { TARGET_DEV2G5 + 5, 0x1041c000, 1 }, /* 0x61041c000 */
+ { TARGET_DEV5G + 5, 0x10420000, 1 }, /* 0x610420000 */
+ { TARGET_PCS5G_BR + 5, 0x10424000, 1 }, /* 0x610424000 */
+ { TARGET_DEV2G5 + 13, 0x10428000, 1 }, /* 0x610428000 */
+ { TARGET_DEV10G + 1, 0x1042c000, 1 }, /* 0x61042c000 */
+ { TARGET_PCS10G_BR + 1, 0x10430000, 1 }, /* 0x610430000 */
+ { TARGET_DEV2G5 + 24, 0x10434000, 1 }, /* 0x610434000 */
+ { TARGET_DEV2G5 + 25, 0x10438000, 1 }, /* 0x610438000 */
+ { TARGET_DEV2G5 + 26, 0x1043c000, 1 }, /* 0x61043c000 */
+ { TARGET_DEV2G5 + 27, 0x10440000, 1 }, /* 0x610440000 */
+ { TARGET_DEV2G5 + 28, 0x10444000, 1 }, /* 0x610444000 */
+ { TARGET_DEV2G5 + 29, 0x10448000, 1 }, /* 0x610448000 */
+ { TARGET_DEV2G5 + 30, 0x1044c000, 1 }, /* 0x61044c000 */
+ { TARGET_DEV2G5 + 31, 0x10450000, 1 }, /* 0x610450000 */
+ { TARGET_DEV2G5 + 48, 0x10454000, 1 }, /* 0x610454000 */
+ { TARGET_DEV10G + 4, 0x10458000, 1 }, /* 0x610458000 */
+ { TARGET_PCS10G_BR + 4, 0x1045c000, 1 }, /* 0x61045c000 */
+ { TARGET_DEV2G5 + 49, 0x10460000, 1 }, /* 0x610460000 */
+ { TARGET_DEV10G + 5, 0x10464000, 1 }, /* 0x610464000 */
+ { TARGET_PCS10G_BR + 5, 0x10468000, 1 }, /* 0x610468000 */
+ { TARGET_DEV2G5 + 50, 0x1046c000, 1 }, /* 0x61046c000 */
+ { TARGET_DEV10G + 6, 0x10470000, 1 }, /* 0x610470000 */
+ { TARGET_PCS10G_BR + 6, 0x10474000, 1 }, /* 0x610474000 */
+ { TARGET_DEV2G5 + 51, 0x10478000, 1 }, /* 0x610478000 */
+ { TARGET_DEV10G + 7, 0x1047c000, 1 }, /* 0x61047c000 */
+ { TARGET_PCS10G_BR + 7, 0x10480000, 1 }, /* 0x610480000 */
+ { TARGET_DEV2G5 + 52, 0x10484000, 1 }, /* 0x610484000 */
+ { TARGET_DEV10G + 8, 0x10488000, 1 }, /* 0x610488000 */
+ { TARGET_PCS10G_BR + 8, 0x1048c000, 1 }, /* 0x61048c000 */
+ { TARGET_DEV2G5 + 53, 0x10490000, 1 }, /* 0x610490000 */
+ { TARGET_DEV10G + 9, 0x10494000, 1 }, /* 0x610494000 */
+ { TARGET_PCS10G_BR + 9, 0x10498000, 1 }, /* 0x610498000 */
+ { TARGET_DEV2G5 + 54, 0x1049c000, 1 }, /* 0x61049c000 */
+ { TARGET_DEV10G + 10, 0x104a0000, 1 }, /* 0x6104a0000 */
+ { TARGET_PCS10G_BR + 10, 0x104a4000, 1 }, /* 0x6104a4000 */
+ { TARGET_DEV2G5 + 55, 0x104a8000, 1 }, /* 0x6104a8000 */
+ { TARGET_DEV10G + 11, 0x104ac000, 1 }, /* 0x6104ac000 */
+ { TARGET_PCS10G_BR + 11, 0x104b0000, 1 }, /* 0x6104b0000 */
+ { TARGET_DEV2G5 + 56, 0x104b4000, 1 }, /* 0x6104b4000 */
+ { TARGET_DEV25G, 0x104b8000, 1 }, /* 0x6104b8000 */
+ { TARGET_PCS25G_BR, 0x104bc000, 1 }, /* 0x6104bc000 */
+ { TARGET_DEV2G5 + 58, 0x104c4000, 1 }, /* 0x6104c4000 */
+ { TARGET_DEV25G + 2, 0x104c8000, 1 }, /* 0x6104c8000 */
+ { TARGET_PCS25G_BR + 2, 0x104cc000, 1 }, /* 0x6104cc000 */
+ { TARGET_DEV2G5 + 61, 0x104d4000, 1 }, /* 0x6104d4000 */
+ { TARGET_DEV25G + 5, 0x104d8000, 1 }, /* 0x6104d8000 */
+ { TARGET_PCS25G_BR + 5, 0x104dc000, 1 }, /* 0x6104dc000 */
+ { TARGET_DEV2G5 + 62, 0x104e4000, 1 }, /* 0x6104e4000 */
+ { TARGET_DEV25G + 6, 0x104e8000, 1 }, /* 0x6104e8000 */
+ { TARGET_PCS25G_BR + 6, 0x104ec000, 1 }, /* 0x6104ec000 */
+ { TARGET_DEV2G5 + 63, 0x104f4000, 1 }, /* 0x6104f4000 */
+ { TARGET_DEV25G + 7, 0x104f8000, 1 }, /* 0x6104f8000 */
+ { TARGET_PCS25G_BR + 7, 0x104fc000, 1 }, /* 0x6104fc000 */
+ { TARGET_DSM, 0x10504000, 1 }, /* 0x610504000 */
+ { TARGET_ASM, 0x10600000, 1 }, /* 0x610600000 */
+ { TARGET_GCB, 0x11010000, 2 }, /* 0x611010000 */
+ { TARGET_QS, 0x11030000, 2 }, /* 0x611030000 */
+ { TARGET_ANA_ACL, 0x11050000, 2 }, /* 0x611050000 */
+ { TARGET_LRN, 0x11060000, 2 }, /* 0x611060000 */
+ { TARGET_VCAP_SUPER, 0x11080000, 2 }, /* 0x611080000 */
+ { TARGET_QSYS, 0x110a0000, 2 }, /* 0x6110a0000 */
+ { TARGET_QFWD, 0x110b0000, 2 }, /* 0x6110b0000 */
+ { TARGET_XQS, 0x110c0000, 2 }, /* 0x6110c0000 */
+ { TARGET_CLKGEN, 0x11100000, 2 }, /* 0x611100000 */
+ { TARGET_ANA_AC_POL, 0x11200000, 2 }, /* 0x611200000 */
+ { TARGET_QRES, 0x11280000, 2 }, /* 0x611280000 */
+ { TARGET_EACL, 0x112c0000, 2 }, /* 0x6112c0000 */
+ { TARGET_ANA_CL, 0x11400000, 2 }, /* 0x611400000 */
+ { TARGET_ANA_L3, 0x11480000, 2 }, /* 0x611480000 */
+ { TARGET_HSCH, 0x11580000, 2 }, /* 0x611580000 */
+ { TARGET_REW, 0x11600000, 2 }, /* 0x611600000 */
+ { TARGET_ANA_L2, 0x11800000, 2 }, /* 0x611800000 */
+ { TARGET_ANA_AC, 0x11900000, 2 }, /* 0x611900000 */
+ { TARGET_VOP, 0x11a00000, 2 }, /* 0x611a00000 */
+};
+
+static int sparx5_create_targets(struct sparx5 *sparx5)
+{
+ struct resource *iores[IO_RANGES];
+ void __iomem *iomem[IO_RANGES];
+ void __iomem *begin[IO_RANGES];
+ int range_id[IO_RANGES];
+ int idx, jdx;
+
+ for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
+ const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+
+ if (idx == iomap->range) {
+ range_id[idx] = jdx;
+ idx++;
+ }
+ }
+ for (idx = 0; idx < IO_RANGES; idx++) {
+ iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM,
+ idx);
+ if (!iores[idx]) {
+ dev_err(sparx5->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
+ iomem[idx] = devm_ioremap(sparx5->dev,
+ iores[idx]->start,
+ iores[idx]->end - iores[idx]->start
+ + 1);
+ if (!iomem[idx]) {
+ dev_err(sparx5->dev, "Unable to get switch registers: %s\n",
+ iores[idx]->name);
+ return -ENOMEM;
+ }
+ begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset;
+ }
+ for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
+ const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+
+ sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ }
+ return 0;
+}
+
+static int sparx5_create_port(struct sparx5 *sparx5,
+ struct initial_port_config *config)
+{
+ struct sparx5_port *spx5_port;
+ struct net_device *ndev;
+ struct phylink *phylink;
+ int err;
+
+ ndev = sparx5_create_netdev(sparx5, config->portno);
+ if (IS_ERR(ndev)) {
+ dev_err(sparx5->dev, "Could not create net device: %02u\n",
+ config->portno);
+ return PTR_ERR(ndev);
+ }
+ spx5_port = netdev_priv(ndev);
+ spx5_port->of_node = config->node;
+ spx5_port->serdes = config->serdes;
+ spx5_port->pvid = NULL_VID;
+ spx5_port->signd_internal = true;
+ spx5_port->signd_active_high = true;
+ spx5_port->signd_enable = true;
+ spx5_port->max_vlan_tags = SPX5_PORT_MAX_TAGS_NONE;
+ spx5_port->vlan_type = SPX5_VLAN_PORT_TYPE_UNAWARE;
+ spx5_port->custom_etype = 0x8880; /* Vitesse */
+ spx5_port->phylink_pcs.poll = true;
+ spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
+ sparx5->ports[config->portno] = spx5_port;
+
+ err = sparx5_port_init(sparx5, spx5_port, &config->conf);
+ if (err) {
+ dev_err(sparx5->dev, "port init failed\n");
+ return err;
+ }
+ spx5_port->conf = config->conf;
+
+ /* Setup VLAN */
+ sparx5_vlan_port_setup(sparx5, spx5_port->portno);
+
+ /* Create a phylink for PHY management. Also handles SFPs */
+ spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
+ spx5_port->phylink_config.type = PHYLINK_NETDEV;
+ spx5_port->phylink_config.pcs_poll = true;
+
+ phylink = phylink_create(&spx5_port->phylink_config,
+ of_fwnode_handle(config->node),
+ config->conf.phy_mode,
+ &sparx5_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ spx5_port->phylink = phylink;
+ phylink_set_pcs(phylink, &spx5_port->phylink_pcs);
+
+ return 0;
+}
+
+static int sparx5_init_ram(struct sparx5 *s5)
+{
+ const struct sparx5_ram_config spx5_ram_cfg[] = {
+ {spx5_reg_get(s5, ANA_AC_STAT_RESET), ANA_AC_STAT_RESET_RESET},
+ {spx5_reg_get(s5, ASM_STAT_CFG), ASM_STAT_CFG_STAT_CNT_CLR_SHOT},
+ {spx5_reg_get(s5, QSYS_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, REW_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, VOP_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, ANA_AC_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, ASM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, EACL_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, VCAP_SUPER_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+ {spx5_reg_get(s5, DSM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}
+ };
+ const struct sparx5_ram_config *cfg;
+ u32 value, pending, jdx, idx;
+
+ for (jdx = 0; jdx < 10; jdx++) {
+ pending = ARRAY_SIZE(spx5_ram_cfg);
+ for (idx = 0; idx < ARRAY_SIZE(spx5_ram_cfg); idx++) {
+ cfg = &spx5_ram_cfg[idx];
+ if (jdx == 0) {
+ writel(cfg->init_val, cfg->init_reg);
+ } else {
+ value = readl(cfg->init_reg);
+ if ((value & cfg->init_val) != cfg->init_val)
+ pending--;
+ }
+ }
+ if (!pending)
+ break;
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ }
+
+ if (pending > 0) {
+ /* Still initializing, should be complete in
+ * less than 1ms
+ */
+ dev_err(s5->dev, "Memory initialization error\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sparx5_init_switchcore(struct sparx5 *sparx5)
+{
+ u32 value;
+ int err = 0;
+
+ spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(1),
+ EACL_POL_EACL_CFG_EACL_FORCE_INIT,
+ sparx5,
+ EACL_POL_EACL_CFG);
+
+ spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(0),
+ EACL_POL_EACL_CFG_EACL_FORCE_INIT,
+ sparx5,
+ EACL_POL_EACL_CFG);
+
+ /* Initialize memories, if not done already */
+ value = spx5_rd(sparx5, HSCH_RESET_CFG);
+ if (!(value & HSCH_RESET_CFG_CORE_ENA)) {
+ err = sparx5_init_ram(sparx5);
+ if (err)
+ return err;
+ }
+
+ /* Reset counters */
+ spx5_wr(ANA_AC_STAT_RESET_RESET_SET(1), sparx5, ANA_AC_STAT_RESET);
+ spx5_wr(ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(1), sparx5, ASM_STAT_CFG);
+
+ /* Enable switch-core and queue system */
+ spx5_wr(HSCH_RESET_CFG_CORE_ENA_SET(1), sparx5, HSCH_RESET_CFG);
+
+ return 0;
+}
+
+static int sparx5_init_coreclock(struct sparx5 *sparx5)
+{
+ enum sparx5_core_clockfreq freq = sparx5->coreclock;
+ u32 clk_div, clk_period, pol_upd_int, idx;
+
+ /* Verify if core clock frequency is supported on target.
+ * If 'VTSS_CORE_CLOCK_DEFAULT' then the highest supported
+ * freq. is used
+ */
+ switch (sparx5->target_ct) {
+ case SPX5_TARGET_CT_7546:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_250MHZ;
+ else if (sparx5->coreclock != SPX5_CORE_CLOCK_250MHZ)
+ freq = 0; /* Not supported */
+ break;
+ case SPX5_TARGET_CT_7549:
+ case SPX5_TARGET_CT_7552:
+ case SPX5_TARGET_CT_7556:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_500MHZ;
+ else if (sparx5->coreclock != SPX5_CORE_CLOCK_500MHZ)
+ freq = 0; /* Not supported */
+ break;
+ case SPX5_TARGET_CT_7558:
+ case SPX5_TARGET_CT_7558TSN:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_625MHZ;
+ else if (sparx5->coreclock != SPX5_CORE_CLOCK_625MHZ)
+ freq = 0; /* Not supported */
+ break;
+ case SPX5_TARGET_CT_7546TSN:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_625MHZ;
+ break;
+ case SPX5_TARGET_CT_7549TSN:
+ case SPX5_TARGET_CT_7552TSN:
+ case SPX5_TARGET_CT_7556TSN:
+ if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+ freq = SPX5_CORE_CLOCK_625MHZ;
+ else if (sparx5->coreclock == SPX5_CORE_CLOCK_250MHZ)
+ freq = 0; /* Not supported */
+ break;
+ default:
+ dev_err(sparx5->dev, "Target (%#04x) not supported\n",
+ sparx5->target_ct);
+ return -ENODEV;
+ }
+
+ switch (freq) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ clk_div = 10;
+ pol_upd_int = 312;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ clk_div = 5;
+ pol_upd_int = 624;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ clk_div = 4;
+ pol_upd_int = 780;
+ break;
+ default:
+ dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n",
+ sparx5->coreclock, sparx5->target_ct);
+ return -EINVAL;
+ }
+
+ /* Update state with chosen frequency */
+ sparx5->coreclock = freq;
+
+ /* Configure the LCPLL */
+ spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
+ sparx5,
+ CLKGEN_LCPLL1_CORE_CLK_CFG);
+
+ clk_period = sparx5_clk_period(freq);
+
+ spx5_rmw(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(clk_period / 100),
+ HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS,
+ sparx5,
+ HSCH_SYS_CLK_PER);
+
+ spx5_rmw(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
+ ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS,
+ sparx5,
+ ANA_AC_POL_BDLB_DLB_CTRL);
+
+ spx5_rmw(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
+ ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS,
+ sparx5,
+ ANA_AC_POL_SLB_DLB_CTRL);
+
+ spx5_rmw(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(clk_period / 100),
+ LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS,
+ sparx5,
+ LRN_AUTOAGE_CFG_1);
+
+ for (idx = 0; idx < 3; idx++)
+ spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100),
+ GCB_SIO_CLOCK_SYS_CLK_PERIOD,
+ sparx5,
+ GCB_SIO_CLOCK(idx));
+
+ spx5_rmw(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET
+ ((256 * 1000) / clk_period),
+ HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY,
+ sparx5,
+ HSCH_TAS_STATEMACHINE_CFG);
+
+ spx5_rmw(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(pol_upd_int),
+ ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT,
+ sparx5,
+ ANA_AC_POL_POL_UPD_INT_CFG);
+
+ return 0;
+}
+
+static int sparx5_qlim_set(struct sparx5 *sparx5)
+{
+ u32 res, dp, prio;
+
+ for (res = 0; res < 2; res++) {
+ for (prio = 0; prio < 8; prio++)
+ spx5_wr(0xFFF, sparx5,
+ QRES_RES_CFG(prio + 630 + res * 1024));
+
+ for (dp = 0; dp < 4; dp++)
+ spx5_wr(0xFFF, sparx5,
+ QRES_RES_CFG(dp + 638 + res * 1024));
+ }
+
+ /* Set 80,90,95,100% of memory size for top watermarks */
+ spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0));
+ spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0));
+ spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0));
+ spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0));
+
+ return 0;
+}
+
+/* Some boards needs to map the SGPIO for signal detect explicitly to the
+ * port module
+ */
+static void sparx5_board_init(struct sparx5 *sparx5)
+{
+ int idx;
+
+ if (!sparx5->sd_sgpio_remapping)
+ return;
+
+ /* Enable SGPIO Signal Detect remapping */
+ spx5_rmw(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL,
+ GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL,
+ sparx5,
+ GCB_HW_SGPIO_SD_CFG);
+
+ /* Refer to LOS SGPIO */
+ for (idx = 0; idx < SPX5_PORTS; idx++)
+ if (sparx5->ports[idx])
+ if (sparx5->ports[idx]->conf.sd_sgpio != ~0)
+ spx5_wr(sparx5->ports[idx]->conf.sd_sgpio,
+ sparx5,
+ GCB_HW_SGPIO_TO_SD_MAP_CFG(idx));
+}
+
+static int sparx5_start(struct sparx5 *sparx5)
+{
+ u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ char queue_name[32];
+ u32 idx;
+ int err;
+
+ /* Setup own UPSIDs */
+ for (idx = 0; idx < 3; idx++) {
+ spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx));
+ spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx));
+ spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx));
+ spx5_wr(idx, sparx5, REW_OWN_UPSID(idx));
+ }
+
+ /* Enable CPU ports */
+ for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++)
+ spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1),
+ QFWD_SWITCH_PORT_MODE_PORT_ENA,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(idx));
+
+ /* Init masks */
+ sparx5_update_fwd(sparx5);
+
+ /* CPU copy CPU pgids */
+ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+ sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU));
+ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+ sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST));
+
+ /* Recalc injected frame FCS */
+ for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++)
+ spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1),
+ ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA,
+ sparx5, ANA_CL_FILTER_CTRL(idx));
+
+ /* Init MAC table, ageing */
+ sparx5_mact_init(sparx5);
+
+ /* Setup VLANs */
+ sparx5_vlan_init(sparx5);
+
+ /* Add host mode BC address (points only to CPU) */
+ sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID);
+
+ /* Enable queue limitation watermarks */
+ sparx5_qlim_set(sparx5);
+
+ err = sparx5_config_auto_calendar(sparx5);
+ if (err)
+ return err;
+
+ err = sparx5_config_dsm_calendar(sparx5);
+ if (err)
+ return err;
+
+ /* Init stats */
+ err = sparx_stats_init(sparx5);
+ if (err)
+ return err;
+
+ /* Init mact_sw struct */
+ mutex_init(&sparx5->mact_lock);
+ INIT_LIST_HEAD(&sparx5->mact_entries);
+ snprintf(queue_name, sizeof(queue_name), "%s-mact",
+ dev_name(sparx5->dev));
+ sparx5->mact_queue = create_singlethread_workqueue(queue_name);
+ INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work);
+ queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
+ SPX5_MACT_PULL_DELAY);
+
+ err = sparx5_register_netdevs(sparx5);
+ if (err)
+ return err;
+
+ sparx5_board_init(sparx5);
+ err = sparx5_register_notifier_blocks(sparx5);
+
+ /* Start register based INJ/XTR */
+ err = -ENXIO;
+ if (err && sparx5->xtr_irq >= 0) {
+ err = devm_request_irq(sparx5->dev, sparx5->xtr_irq,
+ sparx5_xtr_handler, IRQF_SHARED,
+ "sparx5-xtr", sparx5);
+ if (!err)
+ err = sparx5_manual_injection_mode(sparx5);
+ if (err)
+ sparx5->xtr_irq = -ENXIO;
+ } else {
+ sparx5->xtr_irq = -ENXIO;
+ }
+ return err;
+}
+
+static void sparx5_cleanup_ports(struct sparx5 *sparx5)
+{
+ sparx5_unregister_netdevs(sparx5);
+ sparx5_destroy_netdevs(sparx5);
+}
+
+static int mchp_sparx5_probe(struct platform_device *pdev)
+{
+ struct initial_port_config *configs, *config;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *ports, *portnp;
+ struct reset_control *reset;
+ struct sparx5 *sparx5;
+ int idx = 0, err = 0;
+
+ if (!np && !pdev->dev.platform_data)
+ return -ENODEV;
+
+ sparx5 = devm_kzalloc(&pdev->dev, sizeof(*sparx5), GFP_KERNEL);
+ if (!sparx5)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sparx5);
+ sparx5->pdev = pdev;
+ sparx5->dev = &pdev->dev;
+
+ /* Do switch core reset if available */
+ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+ "Failed to get switch reset controller.\n");
+ reset_control_reset(reset);
+
+ /* Default values, some from DT */
+ sparx5->coreclock = SPX5_CORE_CLOCK_DEFAULT;
+
+ ports = of_get_child_by_name(np, "ethernet-ports");
+ if (!ports) {
+ dev_err(sparx5->dev, "no ethernet-ports child node found\n");
+ return -ENODEV;
+ }
+ sparx5->port_count = of_get_child_count(ports);
+
+ configs = kcalloc(sparx5->port_count,
+ sizeof(struct initial_port_config), GFP_KERNEL);
+ if (!configs) {
+ err = -ENOMEM;
+ goto cleanup_pnode;
+ }
+
+ for_each_available_child_of_node(ports, portnp) {
+ struct sparx5_port_config *conf;
+ struct phy *serdes;
+ u32 portno;
+
+ err = of_property_read_u32(portnp, "reg", &portno);
+ if (err) {
+ dev_err(sparx5->dev, "port reg property error\n");
+ continue;
+ }
+ config = &configs[idx];
+ conf = &config->conf;
+ conf->speed = SPEED_UNKNOWN;
+ conf->bandwidth = SPEED_UNKNOWN;
+ err = of_get_phy_mode(portnp, &conf->phy_mode);
+ if (err) {
+ dev_err(sparx5->dev, "port %u: missing phy-mode\n",
+ portno);
+ continue;
+ }
+ err = of_property_read_u32(portnp, "microchip,bandwidth",
+ &conf->bandwidth);
+ if (err) {
+ dev_err(sparx5->dev, "port %u: missing bandwidth\n",
+ portno);
+ continue;
+ }
+ err = of_property_read_u32(portnp, "microchip,sd-sgpio", &conf->sd_sgpio);
+ if (err)
+ conf->sd_sgpio = ~0;
+ else
+ sparx5->sd_sgpio_remapping = true;
+ serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
+ "port %u: missing serdes\n",
+ portno);
+ goto cleanup_config;
+ }
+ config->portno = portno;
+ config->node = portnp;
+ config->serdes = serdes;
+
+ conf->media = PHY_MEDIA_DAC;
+ conf->serdes_reset = true;
+ conf->portmode = conf->phy_mode;
+ conf->power_down = true;
+ idx++;
+ }
+
+ err = sparx5_create_targets(sparx5);
+ if (err)
+ goto cleanup_config;
+
+ if (!of_get_mac_address(np, sparx5->base_mac)) {
+ dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n");
+ eth_random_addr(sparx5->base_mac);
+ sparx5->base_mac[5] = 0;
+ }
+
+ sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr");
+
+ /* Read chip ID to check CPU interface */
+ sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID);
+
+ sparx5->target_ct = (enum spx5_target_chiptype)
+ GCB_CHIP_ID_PART_ID_GET(sparx5->chip_id);
+
+ /* Initialize Switchcore and internal RAMs */
+ err = sparx5_init_switchcore(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Switchcore initialization error\n");
+ goto cleanup_config;
+ }
+
+ /* Initialize the LC-PLL (core clock) and set affected registers */
+ err = sparx5_init_coreclock(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "LC-PLL initialization error\n");
+ goto cleanup_config;
+ }
+
+ for (idx = 0; idx < sparx5->port_count; ++idx) {
+ config = &configs[idx];
+ if (!config->node)
+ continue;
+
+ err = sparx5_create_port(sparx5, config);
+ if (err) {
+ dev_err(sparx5->dev, "port create error\n");
+ goto cleanup_ports;
+ }
+ }
+
+ err = sparx5_start(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Start failed\n");
+ goto cleanup_ports;
+ }
+ goto cleanup_config;
+
+cleanup_ports:
+ sparx5_cleanup_ports(sparx5);
+cleanup_config:
+ kfree(configs);
+cleanup_pnode:
+ of_node_put(ports);
+ return err;
+}
+
+static int mchp_sparx5_remove(struct platform_device *pdev)
+{
+ struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+
+ if (sparx5->xtr_irq) {
+ disable_irq(sparx5->xtr_irq);
+ sparx5->xtr_irq = -ENXIO;
+ }
+ sparx5_cleanup_ports(sparx5);
+ /* Unregister netdevs */
+ sparx5_unregister_notifier_blocks(sparx5);
+
+ return 0;
+}
+
+static const struct of_device_id mchp_sparx5_match[] = {
+ { .compatible = "microchip,sparx5-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mchp_sparx5_match);
+
+static struct platform_driver mchp_sparx5_driver = {
+ .probe = mchp_sparx5_probe,
+ .remove = mchp_sparx5_remove,
+ .driver = {
+ .name = "sparx5-switch",
+ .of_match_table = mchp_sparx5_match,
+ },
+};
+
+module_platform_driver(mchp_sparx5_driver);
+
+MODULE_DESCRIPTION("Microchip Sparx5 switch driver");
+MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
new file mode 100644
index 000000000000..4d5f44c3a421
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_MAIN_H__
+#define __SPARX5_MAIN_H__
+
+#include <linux/types.h>
+#include <linux/phy/phy.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitmap.h>
+#include <linux/phylink.h>
+#include <linux/hrtimer.h>
+
+/* Target chip type */
+enum spx5_target_chiptype {
+ SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
+ SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */
+ SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */
+ SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */
+ SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */
+ SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
+ SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
+ SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
+ SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
+ SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+};
+
+enum sparx5_port_max_tags {
+ SPX5_PORT_MAX_TAGS_NONE, /* No extra tags allowed */
+ SPX5_PORT_MAX_TAGS_ONE, /* Single tag allowed */
+ SPX5_PORT_MAX_TAGS_TWO /* Single and double tag allowed */
+};
+
+enum sparx5_vlan_port_type {
+ SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */
+ SPX5_VLAN_PORT_TYPE_C, /* C-port */
+ SPX5_VLAN_PORT_TYPE_S, /* S-port */
+ SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */
+};
+
+#define SPX5_PORTS 65
+#define SPX5_PORT_CPU (SPX5_PORTS) /* Next port is CPU port */
+#define SPX5_PORT_CPU_0 (SPX5_PORT_CPU + 0) /* CPU Port 65 */
+#define SPX5_PORT_CPU_1 (SPX5_PORT_CPU + 1) /* CPU Port 66 */
+#define SPX5_PORT_VD0 (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */
+#define SPX5_PORT_VD1 (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */
+#define SPX5_PORT_VD2 (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/
+#define SPX5_PORTS_ALL (SPX5_PORT_CPU + 5) /* Total number of ports */
+
+#define PGID_BASE SPX5_PORTS /* Starts after port PGIDs */
+#define PGID_UC_FLOOD (PGID_BASE + 0)
+#define PGID_MC_FLOOD (PGID_BASE + 1)
+#define PGID_IPV4_MC_DATA (PGID_BASE + 2)
+#define PGID_IPV4_MC_CTRL (PGID_BASE + 3)
+#define PGID_IPV6_MC_DATA (PGID_BASE + 4)
+#define PGID_IPV6_MC_CTRL (PGID_BASE + 5)
+#define PGID_BCAST (PGID_BASE + 6)
+#define PGID_CPU (PGID_BASE + 7)
+
+#define IFH_LEN 9 /* 36 bytes */
+#define NULL_VID 0
+#define SPX5_MACT_PULL_DELAY (2 * HZ)
+#define SPX5_STATS_CHECK_DELAY (1 * HZ)
+#define SPX5_PRIOS 8 /* Number of priority queues */
+#define SPX5_BUFFER_CELL_SZ 184 /* Cell size */
+#define SPX5_BUFFER_MEMORY 4194280 /* 22795 words * 184 bytes */
+
+#define XTR_QUEUE 0
+#define INJ_QUEUE 0
+
+struct sparx5;
+
+struct sparx5_port_config {
+ phy_interface_t portmode;
+ u32 bandwidth;
+ int speed;
+ int duplex;
+ enum phy_media media;
+ bool inband;
+ bool power_down;
+ bool autoneg;
+ bool serdes_reset;
+ u32 pause;
+ u32 pause_adv;
+ phy_interface_t phy_mode;
+ u32 sd_sgpio;
+};
+
+struct sparx5_port {
+ struct net_device *ndev;
+ struct sparx5 *sparx5;
+ struct device_node *of_node;
+ struct phy *serdes;
+ struct sparx5_port_config conf;
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+ struct phylink_pcs phylink_pcs;
+ u16 portno;
+ /* Ingress default VLAN (pvid) */
+ u16 pvid;
+ /* Egress default VLAN (vid) */
+ u16 vid;
+ bool signd_internal;
+ bool signd_active_high;
+ bool signd_enable;
+ bool flow_control;
+ enum sparx5_port_max_tags max_vlan_tags;
+ enum sparx5_vlan_port_type vlan_type;
+ u32 custom_etype;
+ u32 ifh[IFH_LEN];
+ bool vlan_aware;
+ struct hrtimer inj_timer;
+};
+
+enum sparx5_core_clockfreq {
+ SPX5_CORE_CLOCK_DEFAULT, /* Defaults to the highest supported frequency */
+ SPX5_CORE_CLOCK_250MHZ, /* 250MHZ core clock frequency */
+ SPX5_CORE_CLOCK_500MHZ, /* 500MHZ core clock frequency */
+ SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */
+};
+
+struct sparx5 {
+ struct platform_device *pdev;
+ struct device *dev;
+ u32 chip_id;
+ enum spx5_target_chiptype target_ct;
+ void __iomem *regs[NUM_TARGETS];
+ int port_count;
+ struct mutex lock; /* MAC reg lock */
+ /* port structures are in net device */
+ struct sparx5_port *ports[SPX5_PORTS];
+ enum sparx5_core_clockfreq coreclock;
+ /* Statistics */
+ u32 num_stats;
+ u32 num_ethtool_stats;
+ const char * const *stats_layout;
+ u64 *stats;
+ /* Workqueue for reading stats */
+ struct mutex queue_stats_lock;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+ /* Notifiers */
+ struct notifier_block netdevice_nb;
+ struct notifier_block switchdev_nb;
+ struct notifier_block switchdev_blocking_nb;
+ /* Switch state */
+ u8 base_mac[ETH_ALEN];
+ /* Associated bridge device (when bridged) */
+ struct net_device *hw_bridge_dev;
+ /* Bridged interfaces */
+ DECLARE_BITMAP(bridge_mask, SPX5_PORTS);
+ DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS);
+ DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS);
+ DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS);
+ /* SW MAC table */
+ struct list_head mact_entries;
+ /* mac table list (mact_entries) mutex */
+ struct mutex mact_lock;
+ struct delayed_work mact_work;
+ struct workqueue_struct *mact_queue;
+ /* Board specifics */
+ bool sd_sgpio_remapping;
+ /* Register based inj/xtr */
+ int xtr_irq;
+};
+
+/* sparx5_switchdev.c */
+int sparx5_register_notifier_blocks(struct sparx5 *sparx5);
+void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
+
+/* sparx5_packet.c */
+irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
+int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
+int sparx5_manual_injection_mode(struct sparx5 *sparx5);
+void sparx5_port_inj_timer_setup(struct sparx5_port *port);
+
+/* sparx5_mactable.c */
+void sparx5_mact_pull_work(struct work_struct *work);
+int sparx5_mact_learn(struct sparx5 *sparx5, int port,
+ const unsigned char mac[ETH_ALEN], u16 vid);
+bool sparx5_mact_getnext(struct sparx5 *sparx5,
+ unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
+int sparx5_mact_forget(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid);
+int sparx5_add_mact_entry(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ const unsigned char *addr, u16 vid);
+int sparx5_del_mact_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid);
+int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr);
+int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr);
+void sparx5_set_ageing(struct sparx5 *sparx5, int msecs);
+void sparx5_mact_init(struct sparx5 *sparx5);
+
+/* sparx5_vlan.c */
+void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
+void sparx5_update_fwd(struct sparx5 *sparx5);
+void sparx5_vlan_init(struct sparx5 *sparx5);
+void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno);
+int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
+ bool untagged);
+int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid);
+void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port);
+
+/* sparx5_calendar.c */
+int sparx5_config_auto_calendar(struct sparx5 *sparx5);
+int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
+
+/* sparx5_ethtool.c */
+void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
+int sparx_stats_init(struct sparx5 *sparx5);
+
+/* sparx5_netdev.c */
+bool sparx5_netdevice_check(const struct net_device *dev);
+struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
+int sparx5_register_netdevs(struct sparx5 *sparx5);
+void sparx5_destroy_netdevs(struct sparx5 *sparx5);
+void sparx5_unregister_netdevs(struct sparx5 *sparx5);
+
+/* Clock period in picoseconds */
+static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
+{
+ switch (cclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ return 4000;
+ case SPX5_CORE_CLOCK_500MHZ:
+ return 2000;
+ case SPX5_CORE_CLOCK_625MHZ:
+ default:
+ return 1600;
+ }
+}
+
+static inline bool sparx5_is_baser(phy_interface_t interface)
+{
+ return interface == PHY_INTERFACE_MODE_5GBASER ||
+ interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_25GBASER;
+}
+
+extern const struct phylink_mac_ops sparx5_phylink_mac_ops;
+extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops;
+extern const struct ethtool_ops sparx5_ethtool_ops;
+
+/* Calculate raw offset */
+static inline __pure int spx5_offset(int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+/* Read, Write and modify registers content.
+ * The register definition macros start at the id
+ */
+static inline void __iomem *spx5_addr(void __iomem *base[],
+ int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base[id + (tinst)] +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline void __iomem *spx5_inst_addr(void __iomem *base,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(spx5_inst_addr(iomem, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_wr(u32 val, struct sparx5 *sparx5,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_inst_wr(u32 val, void __iomem *iomem,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, spx5_inst_addr(iomem,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
+ rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
+ rinst, rcnt, rwidth));
+}
+
+static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst)
+{
+ return sparx5->regs[id + tinst];
+}
+
+static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return spx5_addr(sparx5->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth);
+}
+
+#endif /* __SPARX5_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
new file mode 100644
index 000000000000..5ab2373a7178
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -0,0 +1,4642 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ * Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2021-05-06 13:06:37 +0200.
+ * Commit ID: 9ae4ec441e25e4b9003f4e514df5cb12a36b84d3
+ */
+
+#ifndef _SPARX5_MAIN_REGS_H_
+#define _SPARX5_MAIN_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum sparx5_target {
+ TARGET_ANA_AC = 1,
+ TARGET_ANA_ACL = 2,
+ TARGET_ANA_AC_POL = 4,
+ TARGET_ANA_CL = 6,
+ TARGET_ANA_L2 = 7,
+ TARGET_ANA_L3 = 8,
+ TARGET_ASM = 9,
+ TARGET_CLKGEN = 11,
+ TARGET_CPU = 12,
+ TARGET_DEV10G = 17,
+ TARGET_DEV25G = 29,
+ TARGET_DEV2G5 = 37,
+ TARGET_DEV5G = 102,
+ TARGET_DSM = 115,
+ TARGET_EACL = 116,
+ TARGET_FDMA = 117,
+ TARGET_GCB = 118,
+ TARGET_HSCH = 119,
+ TARGET_LRN = 122,
+ TARGET_PCEP = 129,
+ TARGET_PCS10G_BR = 132,
+ TARGET_PCS25G_BR = 144,
+ TARGET_PCS5G_BR = 160,
+ TARGET_PORT_CONF = 173,
+ TARGET_QFWD = 175,
+ TARGET_QRES = 176,
+ TARGET_QS = 177,
+ TARGET_QSYS = 178,
+ TARGET_REW = 179,
+ TARGET_VCAP_SUPER = 326,
+ TARGET_VOP = 327,
+ TARGET_XQS = 331,
+ NUM_TARGETS = 332
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* ANA_AC:RAM_CTRL:RAM_INIT */
+#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC, 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
+
+#define ANA_AC_RAM_INIT_RAM_INIT BIT(1)
+#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(ANA_AC_RAM_INIT_RAM_INIT, x)
+#define ANA_AC_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(ANA_AC_RAM_INIT_RAM_INIT, x)
+
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* ANA_AC:PS_COMMON:OWN_UPSID */
+#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC, 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
+
+#define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+
+/* ANA_AC:SRC:SRC_CFG */
+#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
+
+/* ANA_AC:SRC:SRC_CFG1 */
+#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
+
+/* ANA_AC:SRC:SRC_CFG2 */
+#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
+
+#define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0)
+#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_AC_SRC_CFG2_PORT_MASK2, x)
+#define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x)
+
+/* ANA_AC:PGID:PGID_CFG */
+#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
+
+/* ANA_AC:PGID:PGID_CFG1 */
+#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
+
+/* ANA_AC:PGID:PGID_CFG2 */
+#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
+
+#define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0)
+#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_AC_PGID_CFG2_PORT_MASK2, x)
+#define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x)
+
+/* ANA_AC:PGID:PGID_MISC_CFG */
+#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
+
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\
+ FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_GET(x)\
+ FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x)
+
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA BIT(1)
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x)
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x)
+
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA BIT(0)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
+
+#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0)
+#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x)
+#define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\
+ FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
+#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
+
+#define ANA_AC_STAT_RESET_RESET BIT(0)
+#define ANA_AC_STAT_RESET_RESET_SET(x)\
+ FIELD_PREP(ANA_AC_STAT_RESET_RESET, x)
+#define ANA_AC_STAT_RESET_RESET_GET(x)\
+ FIELD_GET(ANA_AC_STAT_RESET_RESET, x)
+
+/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
+#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4)
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_GET(x)\
+ FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE GENMASK(3, 1)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(x)\
+ FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_GET(x)\
+ FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE BIT(0)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(x)\
+ FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\
+ FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
+
+/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
+#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+
+/* ANA_ACL:COMMON:OWN_UPSID */
+#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
+
+#define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+
+/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
+#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
+
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0)
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\
+ FIELD_PREP(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\
+ FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
+
+/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
+#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
+ FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\
+ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT GENMASK(18, 4)
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\
+ FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\
+ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA BIT(1)
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_GET(x)\
+ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA BIT(0)
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
+ FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
+
+/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
+#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
+ FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\
+ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT GENMASK(18, 4)
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\
+ FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\
+ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA BIT(1)
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_GET(x)\
+ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA BIT(0)
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
+ FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+
+/* ANA_CL:PORT:FILTER_CTRL */
+#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
+
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2)
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x)
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_GET(x)\
+ FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x)
+
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS BIT(1)
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x)
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_GET(x)\
+ FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x)
+
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA BIT(0)
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\
+ FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
+
+/* ANA_CL:PORT:VLAN_FILTER_CTRL */
+#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
+
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10)
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x)
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS BIT(9)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS BIT(8)
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS BIT(7)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS BIT(6)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS BIT(5)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS BIT(4)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS BIT(3)
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS BIT(2)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS BIT(1)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS BIT(0)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
+
+/* ANA_CL:PORT:ETAG_FILTER_CTRL */
+#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
+
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x)
+
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS BIT(0)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\
+ FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
+
+/* ANA_CL:PORT:VLAN_CTRL */
+#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP GENMASK(25, 23)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI BIT(22)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA BIT(21)
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x)
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL BIT(20)
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x)
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA BIT(19)
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x)
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT GENMASK(18, 17)
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x)
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE BIT(16)
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x)
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_PCP GENMASK(15, 13)
+#define ANA_CL_VLAN_CTRL_PORT_PCP_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_PCP, x)
+#define ANA_CL_VLAN_CTRL_PORT_PCP_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_PCP, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_DEI BIT(12)
+#define ANA_CL_VLAN_CTRL_PORT_DEI_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_DEI, x)
+#define ANA_CL_VLAN_CTRL_PORT_DEI_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_DEI, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VID GENMASK(11, 0)
+#define ANA_CL_VLAN_CTRL_PORT_VID_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VID, x)
+#define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x)
+
+/* ANA_CL:PORT:VLAN_CTRL_2 */
+#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
+
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0)
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\
+ FIELD_PREP(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\
+ FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+
+/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
+#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+
+/* ANA_CL:COMMON:OWN_UPSID */
+#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
+
+#define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+
+/* ANA_L2:COMMON:AUTO_LRN_CFG */
+#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
+
+/* ANA_L2:COMMON:AUTO_LRN_CFG1 */
+#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
+
+/* ANA_L2:COMMON:AUTO_LRN_CFG2 */
+#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
+
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0)
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\
+ FIELD_PREP(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\
+ FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
+
+/* ANA_L2:COMMON:OWN_UPSID */
+#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
+
+#define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+
+/* ANA_L3:COMMON:VLAN_CTRL */
+#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3, 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
+
+#define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0)
+#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
+#define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
+
+/* ANA_L3:VLAN:VLAN_CFG */
+#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
+
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24)
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x)
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_FID GENMASK(20, 8)
+#define ANA_L3_VLAN_CFG_VLAN_FID_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FID, x)
+#define ANA_L3_VLAN_CFG_VLAN_FID_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FID, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA BIT(6)
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA BIT(5)
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS BIT(4)
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x)
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS BIT(3)
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x)
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA BIT(2)
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA BIT(1)
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA BIT(0)
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
+
+/* ANA_L3:VLAN:VLAN_MASK_CFG */
+#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
+
+/* ANA_L3:VLAN:VLAN_MASK_CFG1 */
+#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
+
+/* ANA_L3:VLAN:VLAN_MASK_CFG2 */
+#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
+
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0)
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
+
+/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
+#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
+#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */
+#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
+#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
+#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
+#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UC_CNT */
+#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_MC_CNT */
+#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_BC_CNT */
+#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
+#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
+#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
+#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
+#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */
+#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */
+#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
+#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
+#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
+#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
+#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
+#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
+#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
+#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
+#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */
+#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
+#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_UC_CNT */
+#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_MC_CNT */
+#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_BC_CNT */
+#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */
+#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
+#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
+#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
+#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
+#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
+#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
+#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
+#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
+#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
+#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
+#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
+#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
+#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
+#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
+#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
+#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
+#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
+#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
+#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
+#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
+#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
+#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
+#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
+#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
+#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
+#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
+#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
+#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
+#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
+#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
+#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
+#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
+#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
+#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
+#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
+#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
+#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
+#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
+#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
+#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
+#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
+#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
+#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
+#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
+#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
+#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */
+#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_DEFER_CNT */
+#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */
+#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
+#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */
+#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
+#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
+
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
+#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
+
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
+
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
+#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
+
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
+
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
+#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
+
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
+#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
+
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
+
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+
+/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
+#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
+
+/* ASM:CFG:STAT_CFG */
+#define ASM_STAT_CFG __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
+
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0)
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\
+ FIELD_PREP(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\
+ FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
+
+/* ASM:CFG:PORT_CFG */
+#define ASM_PORT_CFG(r) __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
+
+#define ASM_PORT_CFG_CSC_STAT_DIS BIT(12)
+#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_CSC_STAT_DIS, x)
+#define ASM_PORT_CFG_CSC_STAT_DIS_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_CSC_STAT_DIS, x)
+
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA BIT(11)
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA BIT(10)
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x)
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x)
+
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA BIT(9)
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_NO_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_NO_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA BIT(8)
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_FRM_AGING_DIS BIT(7)
+#define ASM_PORT_CFG_FRM_AGING_DIS_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_FRM_AGING_DIS, x)
+#define ASM_PORT_CFG_FRM_AGING_DIS_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_FRM_AGING_DIS, x)
+
+#define ASM_PORT_CFG_PAD_ENA BIT(6)
+#define ASM_PORT_CFG_PAD_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_PAD_ENA, x)
+#define ASM_PORT_CFG_PAD_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_PAD_ENA, x)
+
+#define ASM_PORT_CFG_INJ_DISCARD_CFG GENMASK(5, 4)
+#define ASM_PORT_CFG_INJ_DISCARD_CFG_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_INJ_DISCARD_CFG, x)
+#define ASM_PORT_CFG_INJ_DISCARD_CFG_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_INJ_DISCARD_CFG, x)
+
+#define ASM_PORT_CFG_INJ_FORMAT_CFG GENMASK(3, 2)
+#define ASM_PORT_CFG_INJ_FORMAT_CFG_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_INJ_FORMAT_CFG, x)
+#define ASM_PORT_CFG_INJ_FORMAT_CFG_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_INJ_FORMAT_CFG, x)
+
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA BIT(1)
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_VSTAX2_AWR_ENA, x)
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_VSTAX2_AWR_ENA, x)
+
+#define ASM_PORT_CFG_PFRM_FLUSH BIT(0)
+#define ASM_PORT_CFG_PFRM_FLUSH_SET(x)\
+ FIELD_PREP(ASM_PORT_CFG_PFRM_FLUSH, x)
+#define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\
+ FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x)
+
+/* ASM:RAM_CTRL:RAM_INIT */
+#define ASM_RAM_INIT __REG(TARGET_ASM, 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
+
+#define ASM_RAM_INIT_RAM_INIT BIT(1)
+#define ASM_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(ASM_RAM_INIT_RAM_INIT, x)
+#define ASM_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(ASM_RAM_INIT_RAM_INIT, x)
+
+#define ASM_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define ASM_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(ASM_RAM_INIT_RAM_CFG_HOOK, x)
+#define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
+#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV GENMASK(10, 8)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR BIT(11)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL GENMASK(13, 12)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA BIT(14)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA BIT(15)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(x)\
+ FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\
+ FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
+
+/* CPU:CPU_REGS:PROC_CTRL */
+#define CPU_PROC_CTRL __REG(TARGET_CPU, 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
+
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS BIT(11)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS BIT(10)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+
+#define CPU_PROC_CTRL_BE_EXCEP_MODE BIT(9)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+
+#define CPU_PROC_CTRL_VINITHI BIT(8)
+#define CPU_PROC_CTRL_VINITHI_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_VINITHI, x)
+#define CPU_PROC_CTRL_VINITHI_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_VINITHI, x)
+
+#define CPU_PROC_CTRL_CFGTE BIT(7)
+#define CPU_PROC_CTRL_CFGTE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_CFGTE, x)
+#define CPU_PROC_CTRL_CFGTE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_CFGTE, x)
+
+#define CPU_PROC_CTRL_CP15S_DISABLE BIT(6)
+#define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x)
+#define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x)
+
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE BIT(5)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA BIT(4)
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+
+#define CPU_PROC_CTRL_ACP_AWCACHE BIT(3)
+#define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x)
+#define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x)
+
+#define CPU_PROC_CTRL_ACP_ARCACHE BIT(2)
+#define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x)
+#define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x)
+
+#define CPU_PROC_CTRL_L2_FLUSH_REQ BIT(1)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+
+#define CPU_PROC_CTRL_ACP_DISABLE BIT(0)
+#define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\
+ FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x)
+#define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\
+ FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV10G_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV10G_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV10G_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+ FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+ FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
+#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
+
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0)
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\
+ FIELD_PREP(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\
+ FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 16, r, 3, 4)
+
+#define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ID, x)
+#define DEV10G_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA BIT(4)
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
+#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY BIT(3)
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY BIT(2)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY BIT(1)
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY BIT(0)
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_SET(x)\
+ FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\
+ FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
+
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G, t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23)
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST BIT(12)
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST BIT(8)
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST BIT(4)
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST BIT(0)
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G, t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
+
+#define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0)
+#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\
+ FIELD_PREP(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
+#define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\
+ FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV25G_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV25G_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV25G_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+ FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+ FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23)
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST BIT(12)
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST BIT(8)
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST BIT(4)
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST BIT(0)
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
+
+#define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0)
+#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\
+ FIELD_PREP(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
+#define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\
+ FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
+
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
+#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
+
+#define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8)
+#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_SEL, x)
+#define DEV25G_PCS25G_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_SEL, x)
+
+#define DEV25G_PCS25G_SD_CFG_SD_POL BIT(4)
+#define DEV25G_PCS25G_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_POL, x)
+#define DEV25G_PCS25G_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_POL, x)
+
+#define DEV25G_PCS25G_SD_CFG_SD_ENA BIT(0)
+#define DEV25G_PCS25G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
+#define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
+
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5, t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23)
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST BIT(17)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST BIT(16)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST BIT(12)
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST BIT(8)
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST BIT(4)
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST BIT(0)
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_ENA_CFG_RX_ENA, x)
+#define DEV2G5_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV2G5_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV2G5_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
+#define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
+
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x)
+
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA BIT(0)
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
+
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG_TAG_ID, x)
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1)
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG_PB_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
+#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
+
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x)
+
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2 GENMASK(15, 0)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\
+ FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
+
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x)
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_GET(x)\
+ FIELD_GET(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x)
+
+#define DEV2G5_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEV2G5_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_IFG_CFG_TX_IFG, x)
+#define DEV2G5_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEV2G5_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
+
+/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
+
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x)
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x)
+
+#define DEV2G5_MAC_HDX_CFG_SEED GENMASK(23, 16)
+#define DEV2G5_MAC_HDX_CFG_SEED_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED, x)
+#define DEV2G5_MAC_HDX_CFG_SEED_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED, x)
+
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x)
+
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x)
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x)
+
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS GENMASK(6, 0)
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_SET(x)\
+ FIELD_PREP(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\
+ FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x)
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x)
+
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x)
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x)
+
+#define DEV2G5_PCS1G_CFG_PCS_ENA BIT(0)
+#define DEV2G5_PCS1G_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_CFG_PCS_ENA, x)
+#define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x)
+
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1)
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8)
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_SEL, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_SEL, x)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_POL BIT(4)
+#define DEV2G5_PCS1G_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_POL, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_POL, x)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA BIT(0)
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
+#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
+
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4)
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LB_CFG_RA_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LB_CFG_RA_ENA, x)
+
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x)
+
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16)
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_PR BIT(4)
+#define DEV2G5_PCS1G_ANEG_STATUS_PR_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PR, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_PR_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PR, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12)
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x)
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x)
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
+
+/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
+#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5, t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
+
+#define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26)
+#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_SEL, x)
+#define DEV2G5_PCS_FX100_CFG_SD_SEL_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_SEL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SD_POL BIT(25)
+#define DEV2G5_PCS_FX100_CFG_SD_POL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_POL, x)
+#define DEV2G5_PCS_FX100_CFG_SD_POL_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_POL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SD_ENA BIT(24)
+#define DEV2G5_PCS_FX100_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL GENMASK(15, 12)
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_RXBITSEL, x)
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_RXBITSEL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG GENMASK(10, 9)
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x)
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x)
+
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER GENMASK(7, 4)
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x)
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x)
+
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA BIT(0)
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
+
+/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
+#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
+
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8)
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x)
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x)
+
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS BIT(2)
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x)
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x)
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV5G_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV5G_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV5G_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+ FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+ FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+ FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+ FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
+#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
+#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
+#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
+#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
+#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
+#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
+#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
+#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
+#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
+#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
+#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
+#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
+#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
+#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
+#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
+#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
+#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
+#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
+#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
+#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
+#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
+#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
+#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
+#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
+#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
+#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
+#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
+#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
+#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
+#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
+#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
+#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
+#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
+#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
+#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
+#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
+#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
+#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
+#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
+#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
+#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
+#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
+#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 184, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 188, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
+#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
+#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
+#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
+#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
+#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
+#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
+#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
+#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
+#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
+#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
+#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
+#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
+#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
+
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
+#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
+#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
+
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
+#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
+
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
+#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
+#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
+
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
+#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
+#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
+
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
+
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
+
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
+
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
+ FIELD_PREP(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
+ FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G, t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL GENMASK(24, 23)
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST BIT(12)
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST BIT(8)
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST BIT(4)
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST BIT(0)
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/* DSM:RAM_CTRL:RAM_INIT */
+#define DSM_RAM_INIT __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
+
+#define DSM_RAM_INIT_RAM_INIT BIT(1)
+#define DSM_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(DSM_RAM_INIT_RAM_INIT, x)
+#define DSM_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(DSM_RAM_INIT_RAM_INIT, x)
+
+#define DSM_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define DSM_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(DSM_RAM_INIT_RAM_CFG_HOOK, x)
+#define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* DSM:CFG:BUF_CFG */
+#define DSM_BUF_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
+
+#define DSM_BUF_CFG_CSC_STAT_DIS BIT(13)
+#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\
+ FIELD_PREP(DSM_BUF_CFG_CSC_STAT_DIS, x)
+#define DSM_BUF_CFG_CSC_STAT_DIS_GET(x)\
+ FIELD_GET(DSM_BUF_CFG_CSC_STAT_DIS, x)
+
+#define DSM_BUF_CFG_AGING_ENA BIT(12)
+#define DSM_BUF_CFG_AGING_ENA_SET(x)\
+ FIELD_PREP(DSM_BUF_CFG_AGING_ENA, x)
+#define DSM_BUF_CFG_AGING_ENA_GET(x)\
+ FIELD_GET(DSM_BUF_CFG_AGING_ENA, x)
+
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS BIT(11)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(x)\
+ FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_GET(x)\
+ FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x)
+
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT GENMASK(10, 0)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_SET(x)\
+ FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\
+ FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
+
+/* DSM:CFG:DEV_TX_STOP_WM_CFG */
+#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
+
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9)
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\
+ FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x)
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_GET(x)\
+ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA BIT(8)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(x)\
+ FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_GET(x)\
+ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM GENMASK(7, 1)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(x)\
+ FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_GET(x)\
+ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR BIT(0)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(x)\
+ FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\
+ FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
+
+/* DSM:CFG:RX_PAUSE_CFG */
+#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
+
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1)
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\
+ FIELD_PREP(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x)
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_GET(x)\
+ FIELD_GET(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x)
+
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL BIT(0)
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_SET(x)\
+ FIELD_PREP(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\
+ FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
+
+/* DSM:CFG:MAC_CFG */
+#define DSM_MAC_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
+
+#define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16)
+#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\
+ FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_VAL, x)
+#define DSM_MAC_CFG_TX_PAUSE_VAL_GET(x)\
+ FIELD_GET(DSM_MAC_CFG_TX_PAUSE_VAL, x)
+
+#define DSM_MAC_CFG_HDX_BACKPREASSURE BIT(2)
+#define DSM_MAC_CFG_HDX_BACKPREASSURE_SET(x)\
+ FIELD_PREP(DSM_MAC_CFG_HDX_BACKPREASSURE, x)
+#define DSM_MAC_CFG_HDX_BACKPREASSURE_GET(x)\
+ FIELD_GET(DSM_MAC_CFG_HDX_BACKPREASSURE, x)
+
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE BIT(1)
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_SET(x)\
+ FIELD_PREP(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x)
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_GET(x)\
+ FIELD_GET(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x)
+
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF BIT(0)
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_SET(x)\
+ FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\
+ FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
+
+/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
+#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
+
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\
+ FIELD_PREP(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\
+ FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
+
+/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
+#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
+
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0)
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\
+ FIELD_PREP(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\
+ FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
+
+/* DSM:CFG:TAXI_CAL_CFG */
+#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
+
+#define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15)
+#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_IDX, x)
+#define DSM_TAXI_CAL_CFG_CAL_IDX_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_IDX, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN GENMASK(14, 9)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL GENMASK(8, 5)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL GENMASK(4, 1)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA BIT(0)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+
+/* EACL:POL_CFG:POL_EACL_CFG */
+#define EACL_POL_EACL_CFG __REG(TARGET_EACL, 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
+
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5)
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x)
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x)
+
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY BIT(4)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x)
+
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY BIT(3)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE BIT(2)
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN BIT(1)
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT BIT(0)
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(x)\
+ FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
+ FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+
+/* EACL:RAM_CTRL:RAM_INIT */
+#define EACL_RAM_INIT __REG(TARGET_EACL, 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
+
+#define EACL_RAM_INIT_RAM_INIT BIT(1)
+#define EACL_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(EACL_RAM_INIT_RAM_INIT, x)
+#define EACL_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(EACL_RAM_INIT_RAM_INIT, x)
+
+#define EACL_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define EACL_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(EACL_RAM_INIT_RAM_CFG_HOOK, x)
+#define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+ FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+ FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+ FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP_PREV */
+#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
+#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7)
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(6)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT BIT(5)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(4, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/* FDMA:FDMA:FDMA_CH_TRANSLATE */
+#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
+
+#define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0)
+#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\
+ FIELD_PREP(FDMA_CH_TRANSLATE_OFFSET, x)
+#define FDMA_CH_TRANSLATE_OFFSET_GET(x)\
+ FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x)
+
+/* FDMA:FDMA:FDMA_XTR_CFG */
+#define FDMA_XTR_CFG __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
+
+#define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11)
+#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\
+ FIELD_PREP(FDMA_XTR_CFG_XTR_FIFO_WM, x)
+#define FDMA_XTR_CFG_XTR_FIFO_WM_GET(x)\
+ FIELD_GET(FDMA_XTR_CFG_XTR_FIFO_WM, x)
+
+#define FDMA_XTR_CFG_XTR_ARB_SAT GENMASK(10, 0)
+#define FDMA_XTR_CFG_XTR_ARB_SAT_SET(x)\
+ FIELD_PREP(FDMA_XTR_CFG_XTR_ARB_SAT, x)
+#define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\
+ FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x)
+
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE BIT(3)
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP_FORCE, x)
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP_FORCE, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY BIT(1)
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x)
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x)
+
+#define FDMA_PORT_CTRL_XTR_BUF_RST BIT(0)
+#define FDMA_PORT_CTRL_XTR_BUF_RST_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_RST, x)
+#define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x)
+
+/* FDMA:FDMA:FDMA_INTR_DCB */
+#define FDMA_INTR_DCB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
+
+#define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0)
+#define FDMA_INTR_DCB_INTR_DCB_SET(x)\
+ FIELD_PREP(FDMA_INTR_DCB_INTR_DCB, x)
+#define FDMA_INTR_DCB_INTR_DCB_GET(x)\
+ FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x)
+
+/* FDMA:FDMA:FDMA_INTR_DCB_ENA */
+#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
+
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+#define FDMA_INTR_DB_INTR_DB GENMASK(7, 0)
+#define FDMA_INTR_DB_INTR_DB_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_INTR_DB, x)
+#define FDMA_INTR_DB_INTR_DB_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_INTR_DB, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+#define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8)
+#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\
+ FIELD_PREP(FDMA_INTR_ERR_INTR_PORT_ERR, x)
+#define FDMA_INTR_ERR_INTR_PORT_ERR_GET(x)\
+ FIELD_GET(FDMA_INTR_ERR_INTR_PORT_ERR, x)
+
+#define FDMA_INTR_ERR_INTR_CH_ERR GENMASK(7, 0)
+#define FDMA_INTR_ERR_INTR_CH_ERR_SET(x)\
+ FIELD_PREP(FDMA_INTR_ERR_INTR_CH_ERR, x)
+#define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\
+ FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x)
+
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+#define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30)
+#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_XTR_WR, x)
+#define FDMA_ERRORS_ERR_XTR_WR_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_XTR_WR, x)
+
+#define FDMA_ERRORS_ERR_XTR_OVF GENMASK(29, 28)
+#define FDMA_ERRORS_ERR_XTR_OVF_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_XTR_OVF, x)
+#define FDMA_ERRORS_ERR_XTR_OVF_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_XTR_OVF, x)
+
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF GENMASK(27, 26)
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x)
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x)
+
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL GENMASK(25, 24)
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x)
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x)
+
+#define FDMA_ERRORS_ERR_DCB_RD GENMASK(23, 16)
+#define FDMA_ERRORS_ERR_DCB_RD_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_DCB_RD, x)
+#define FDMA_ERRORS_ERR_DCB_RD_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_DCB_RD, x)
+
+#define FDMA_ERRORS_ERR_INJ_RD GENMASK(15, 10)
+#define FDMA_ERRORS_ERR_INJ_RD_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_INJ_RD, x)
+#define FDMA_ERRORS_ERR_INJ_RD_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_INJ_RD, x)
+
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC GENMASK(9, 8)
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x)
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x)
+
+#define FDMA_ERRORS_ERR_CH_WR GENMASK(7, 0)
+#define FDMA_ERRORS_ERR_CH_WR_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_ERR_CH_WR, x)
+#define FDMA_ERRORS_ERR_CH_WR_GET(x)\
+ FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x)
+
+/* FDMA:FDMA:FDMA_ERRORS_2 */
+#define FDMA_ERRORS_2 __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
+
+#define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0)
+#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\
+ FIELD_PREP(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
+#define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\
+ FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
+
+/* FDMA:FDMA:FDMA_CTRL */
+#define FDMA_CTRL __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
+
+#define FDMA_CTRL_NRESET BIT(0)
+#define FDMA_CTRL_NRESET_SET(x)\
+ FIELD_PREP(FDMA_CTRL_NRESET, x)
+#define FDMA_CTRL_NRESET_GET(x)\
+ FIELD_GET(FDMA_CTRL_NRESET, x)
+
+/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */
+#define GCB_CHIP_ID __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
+
+#define GCB_CHIP_ID_REV_ID GENMASK(31, 28)
+#define GCB_CHIP_ID_REV_ID_SET(x)\
+ FIELD_PREP(GCB_CHIP_ID_REV_ID, x)
+#define GCB_CHIP_ID_REV_ID_GET(x)\
+ FIELD_GET(GCB_CHIP_ID_REV_ID, x)
+
+#define GCB_CHIP_ID_PART_ID GENMASK(27, 12)
+#define GCB_CHIP_ID_PART_ID_SET(x)\
+ FIELD_PREP(GCB_CHIP_ID_PART_ID, x)
+#define GCB_CHIP_ID_PART_ID_GET(x)\
+ FIELD_GET(GCB_CHIP_ID_PART_ID, x)
+
+#define GCB_CHIP_ID_MFG_ID GENMASK(11, 1)
+#define GCB_CHIP_ID_MFG_ID_SET(x)\
+ FIELD_PREP(GCB_CHIP_ID_MFG_ID, x)
+#define GCB_CHIP_ID_MFG_ID_GET(x)\
+ FIELD_GET(GCB_CHIP_ID_MFG_ID, x)
+
+#define GCB_CHIP_ID_ONE BIT(0)
+#define GCB_CHIP_ID_ONE_SET(x)\
+ FIELD_PREP(GCB_CHIP_ID_ONE, x)
+#define GCB_CHIP_ID_ONE_GET(x)\
+ FIELD_GET(GCB_CHIP_ID_ONE, x)
+
+/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */
+#define GCB_SOFT_RST __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
+
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2)
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\
+ FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST_GET(x)\
+ FIELD_GET(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
+
+#define GCB_SOFT_RST_SOFT_SWC_RST BIT(1)
+#define GCB_SOFT_RST_SOFT_SWC_RST_SET(x)\
+ FIELD_PREP(GCB_SOFT_RST_SOFT_SWC_RST, x)
+#define GCB_SOFT_RST_SOFT_SWC_RST_GET(x)\
+ FIELD_GET(GCB_SOFT_RST_SOFT_SWC_RST, x)
+
+#define GCB_SOFT_RST_SOFT_CHIP_RST BIT(0)
+#define GCB_SOFT_RST_SOFT_CHIP_RST_SET(x)\
+ FIELD_PREP(GCB_SOFT_RST_SOFT_CHIP_RST, x)
+#define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\
+ FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x)
+
+/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
+#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
+
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1)
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\
+ FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x)
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_GET(x)\
+ FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x)
+
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL BIT(0)
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_SET(x)\
+ FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\
+ FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
+
+/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
+
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\
+ FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\
+ FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+
+/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
+#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB, 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
+
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8)
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\
+ FIELD_PREP(GCB_SIO_CLOCK_SIO_CLK_FREQ, x)
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ_GET(x)\
+ FIELD_GET(GCB_SIO_CLOCK_SIO_CLK_FREQ, x)
+
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD GENMASK(7, 0)
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(x)\
+ FIELD_PREP(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
+ FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+
+/* HSCH:HSCH_MISC:SYS_CLK_PER */
+#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
+
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS GENMASK(7, 0)
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(x)\
+ FIELD_PREP(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
+ FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+
+/* HSCH:SYSTEM:FLUSH_CTRL */
+#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
+
+#define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27)
+#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_ENA, x)
+#define HSCH_FLUSH_CTRL_FLUSH_ENA_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_ENA, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_SRC BIT(26)
+#define HSCH_FLUSH_CTRL_FLUSH_SRC_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SRC, x)
+#define HSCH_FLUSH_CTRL_FLUSH_SRC_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SRC, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_DST BIT(25)
+#define HSCH_FLUSH_CTRL_FLUSH_DST_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_DST, x)
+#define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_PORT GENMASK(24, 18)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE BIT(17)
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x)
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_SE BIT(16)
+#define HSCH_FLUSH_CTRL_FLUSH_SE_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SE, x)
+#define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_HIER GENMASK(15, 0)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\
+ FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\
+ FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+
+/* HSCH:SYSTEM:PORT_MODE */
+#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
+
+#define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4)
+#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_DEQUEUE_DIS, x)
+#define HSCH_PORT_MODE_DEQUEUE_DIS_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_DEQUEUE_DIS, x)
+
+#define HSCH_PORT_MODE_AGE_DIS BIT(3)
+#define HSCH_PORT_MODE_AGE_DIS_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_AGE_DIS, x)
+#define HSCH_PORT_MODE_AGE_DIS_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_AGE_DIS, x)
+
+#define HSCH_PORT_MODE_TRUNC_ENA BIT(2)
+#define HSCH_PORT_MODE_TRUNC_ENA_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_TRUNC_ENA, x)
+#define HSCH_PORT_MODE_TRUNC_ENA_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_TRUNC_ENA, x)
+
+#define HSCH_PORT_MODE_EIR_REMARK_ENA BIT(1)
+#define HSCH_PORT_MODE_EIR_REMARK_ENA_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_EIR_REMARK_ENA, x)
+#define HSCH_PORT_MODE_EIR_REMARK_ENA_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_EIR_REMARK_ENA, x)
+
+#define HSCH_PORT_MODE_CPU_PRIO_MODE BIT(0)
+#define HSCH_PORT_MODE_CPU_PRIO_MODE_SET(x)\
+ FIELD_PREP(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
+#define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\
+ FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
+
+/* HSCH:SYSTEM:OUTB_SHARE_ENA */
+#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
+
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0)
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\
+ FIELD_PREP(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\
+ FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
+
+/* HSCH:MMGT:RESET_CFG */
+#define HSCH_RESET_CFG __REG(TARGET_HSCH, 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
+
+#define HSCH_RESET_CFG_CORE_ENA BIT(0)
+#define HSCH_RESET_CFG_CORE_ENA_SET(x)\
+ FIELD_PREP(HSCH_RESET_CFG_CORE_ENA, x)
+#define HSCH_RESET_CFG_CORE_ENA_GET(x)\
+ FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x)
+
+/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH, 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
+
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0)
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\
+ FIELD_PREP(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
+ FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+
+/* LRN:COMMON:COMMON_ACCESS_CTRL */
+#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE BIT(19)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD GENMASK(4, 1)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x)
+
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT BIT(0)
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(x)\
+ FIELD_PREP(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\
+ FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
+
+/* LRN:COMMON:MAC_ACCESS_CFG_0 */
+#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x)
+
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB GENMASK(15, 0)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
+
+/* LRN:COMMON:MAC_ACCESS_CFG_1 */
+#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
+
+/* LRN:COMMON:MAC_ACCESS_CFG_2 */
+#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL BIT(27)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU GENMASK(26, 24)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY BIT(23)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE BIT(22)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR BIT(21)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG GENMASK(20, 19)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL GENMASK(18, 17)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED BIT(16)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD BIT(15)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE GENMASK(14, 12)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR GENMASK(11, 0)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
+
+/* LRN:COMMON:MAC_ACCESS_CFG_3 */
+#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\
+ FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\
+ FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+
+/* LRN:COMMON:SCAN_NEXT_CFG */
+#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL GENMASK(18, 17)
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL GENMASK(16, 15)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA BIT(14)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA BIT(13)
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA BIT(12)
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA BIT(11)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA BIT(10)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA BIT(9)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA BIT(8)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA BIT(7)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK GENMASK(6, 3)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x)
+
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA BIT(2)
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA BIT(1)
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA BIT(0)
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
+
+/* LRN:COMMON:SCAN_NEXT_CFG_1 */
+#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
+
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16)
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x)
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x)
+
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK GENMASK(14, 0)
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_SET(x)\
+ FIELD_PREP(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\
+ FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
+
+/* LRN:COMMON:AUTOAGE_CFG */
+#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
+
+#define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28)
+#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_UNIT_SIZE, x)
+#define LRN_AUTOAGE_CFG_UNIT_SIZE_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_UNIT_SIZE, x)
+
+#define LRN_AUTOAGE_CFG_PERIOD_VAL GENMASK(27, 0)
+#define LRN_AUTOAGE_CFG_PERIOD_VAL_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
+#define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
+
+/* LRN:COMMON:AUTOAGE_CFG_1 */
+#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
+
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25)
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x)
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x)
+
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN GENMASK(24, 15)
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x)
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x)
+
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS GENMASK(14, 7)
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x)
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x)
+
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA BIT(6)
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x)
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT GENMASK(5, 2)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT BIT(1)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA BIT(0)
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
+
+/* LRN:COMMON:AUTOAGE_CFG_2 */
+#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
+
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS GENMASK(3, 0)
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\
+ FIELD_PREP(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\
+ FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
+#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
+
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0)
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_MSG_CODE, x)
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_MSG_CODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_TAG GENMASK(15, 8)
+#define PCEP_RCTRL_2_OUT_0_TAG_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG, x)
+#define PCEP_RCTRL_2_OUT_0_TAG_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG, x)
+
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN BIT(16)
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x)
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x)
+
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS BIT(19)
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x)
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x)
+
+#define PCEP_RCTRL_2_OUT_0_SNP BIT(20)
+#define PCEP_RCTRL_2_OUT_0_SNP_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_SNP, x)
+#define PCEP_RCTRL_2_OUT_0_SNP_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_SNP, x)
+
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD BIT(22)
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x)
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x)
+
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN BIT(23)
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x)
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x)
+
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE BIT(28)
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x)
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE BIT(29)
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x)
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_REGION_EN BIT(31)
+#define PCEP_RCTRL_2_OUT_0_REGION_EN_SET(x)\
+ FIELD_PREP(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
+#define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\
+ FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
+
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_GET(x)\
+ FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x)
+
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW GENMASK(31, 16)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\
+ FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
+
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_GET(x)\
+ FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x)
+
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW GENMASK(31, 16)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\
+ FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
+
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
+
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_GET(x)\
+ FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x)
+
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW GENMASK(31, 2)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_SET(x)\
+ FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\
+ FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31)
+#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS10G_BR_PCS_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30)
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24)
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP BIT(18)
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA BIT(15)
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS BIT(14)
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE BIT(13)
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12)
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP BIT(7)
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6)
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE BIT(4)
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3)
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8)
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_POL BIT(4)
+#define PCS10G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA BIT(0)
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31)
+#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS25G_BR_PCS_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30)
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24)
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP BIT(18)
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA BIT(15)
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS BIT(14)
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE BIT(13)
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12)
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP BIT(7)
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6)
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE BIT(4)
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3)
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8)
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_POL BIT(4)
+#define PCS25G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA BIT(0)
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31)
+#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS5G_BR_PCS_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA BIT(30)
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX GENMASK(29, 24)
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP BIT(18)
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA BIT(15)
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS BIT(14)
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE BIT(13)
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE BIT(12)
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP BIT(7)
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA BIT(6)
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE BIT(4)
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE BIT(3)
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8)
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_POL BIT(4)
+#define PCS5G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA BIT(0)
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/* PORT_CONF:HW_CFG:DEV5G_MODES */
+#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE BIT(1)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE BIT(2)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE BIT(3)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE BIT(4)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE BIT(5)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE BIT(6)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE BIT(7)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE BIT(8)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE BIT(9)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE BIT(10)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE BIT(11)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE BIT(12)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
+
+/* PORT_CONF:HW_CFG:DEV10G_MODES */
+#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE BIT(1)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE BIT(2)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE BIT(3)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE BIT(4)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE BIT(5)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE BIT(6)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE BIT(7)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE BIT(8)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE BIT(9)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE BIT(10)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE BIT(11)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
+
+/* PORT_CONF:HW_CFG:DEV25G_MODES */
+#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE BIT(1)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE BIT(2)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE BIT(3)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE BIT(4)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE BIT(5)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE BIT(6)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE BIT(7)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
+
+/* PORT_CONF:HW_CFG:QSGMII_ENA */
+#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1 BIT(1)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2 BIT(2)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3 BIT(3)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4 BIT(4)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5 BIT(5)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6 BIT(6)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7 BIT(7)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8 BIT(8)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9 BIT(9)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10 BIT(10)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11 BIT(11)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\
+ FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\
+ FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
+
+/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
+#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
+
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9)
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x)
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x)
+
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM BIT(8)
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x)
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x)
+
+#define PORT_CONF_USGMII_CFG_FLIP_LANES BIT(7)
+#define PORT_CONF_USGMII_CFG_FLIP_LANES_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_FLIP_LANES, x)
+#define PORT_CONF_USGMII_CFG_FLIP_LANES_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_FLIP_LANES, x)
+
+#define PORT_CONF_USGMII_CFG_SHYST_DIS BIT(6)
+#define PORT_CONF_USGMII_CFG_SHYST_DIS_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_SHYST_DIS, x)
+#define PORT_CONF_USGMII_CFG_SHYST_DIS_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_SHYST_DIS, x)
+
+#define PORT_CONF_USGMII_CFG_E_DET_ENA BIT(5)
+#define PORT_CONF_USGMII_CFG_E_DET_ENA_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_E_DET_ENA, x)
+#define PORT_CONF_USGMII_CFG_E_DET_ENA_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_E_DET_ENA, x)
+
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA BIT(4)
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_USE_I1_ENA, x)
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_USE_I1_ENA, x)
+
+#define PORT_CONF_USGMII_CFG_QUAD_MODE BIT(1)
+#define PORT_CONF_USGMII_CFG_QUAD_MODE_SET(x)\
+ FIELD_PREP(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
+ FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+
+/* QFWD:SYSTEM:SWITCH_PORT_MODE */
+#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
+
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19)
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_PORT_ENA, x)
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_PORT_ENA, x)
+
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY GENMASK(18, 10)
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x)
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x)
+
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD GENMASK(9, 6)
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x)
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x)
+
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(5)
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING BIT(4)
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x)
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING BIT(3)
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x)
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE BIT(2)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS BIT(1)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x)
+
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE BIT(0)
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_SET(x)\
+ FIELD_PREP(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\
+ FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+
+/* QRES:RES_CTRL:RES_CFG */
+#define QRES_RES_CFG(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+
+#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0)
+#define QRES_RES_CFG_WM_HIGH_SET(x)\
+ FIELD_PREP(QRES_RES_CFG_WM_HIGH, x)
+#define QRES_RES_CFG_WM_HIGH_GET(x)\
+ FIELD_GET(QRES_RES_CFG_WM_HIGH, x)
+
+/* QRES:RES_CTRL:RES_STAT */
+#define QRES_RES_STAT(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
+
+#define QRES_RES_STAT_MAXUSE GENMASK(20, 0)
+#define QRES_RES_STAT_MAXUSE_SET(x)\
+ FIELD_PREP(QRES_RES_STAT_MAXUSE, x)
+#define QRES_RES_STAT_MAXUSE_GET(x)\
+ FIELD_GET(QRES_RES_STAT_MAXUSE, x)
+
+/* QRES:RES_CTRL:RES_STAT_CUR */
+#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
+
+#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0)
+#define QRES_RES_STAT_CUR_INUSE_SET(x)\
+ FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x)
+#define QRES_RES_STAT_CUR_INUSE_GET(x)\
+ FIELD_GET(QRES_RES_STAT_CUR_INUSE, x)
+
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_STATUS_WORD_POS, x)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_STATUS_WORD_POS, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0)
+#define QS_XTR_FLUSH_FLUSH_SET(x)\
+ FIELD_PREP(QS_XTR_FLUSH_FLUSH, x)
+#define QS_XTR_FLUSH_FLUSH_GET(x)\
+ FIELD_GET(QS_XTR_FLUSH_FLUSH, x)
+
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0)
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\
+ FIELD_PREP(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\
+ FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
+
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_ABORT BIT(20)
+#define QS_INJ_CTRL_ABORT_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_ABORT, x)
+#define QS_INJ_CTRL_ABORT_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_ABORT, x)
+
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+#define QS_INJ_STATUS_INJ_IN_PROGRESS GENMASK(1, 0)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
+
+/* QSYS:PAUSE_CFG:PAUSE_CFG */
+#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
+
+#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14)
+#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\
+ FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x)
+#define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\
+ FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x)
+
+#define QSYS_PAUSE_CFG_PAUSE_STOP GENMASK(13, 2)
+#define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+ FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+#define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+ FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define QSYS_PAUSE_CFG_PAUSE_ENA BIT(1)
+#define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+ FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_ENA, x)
+#define QSYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+ FIELD_GET(QSYS_PAUSE_CFG_PAUSE_ENA, x)
+
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA BIT(0)
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_SET(x)\
+ FIELD_PREP(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\
+ FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
+
+/* QSYS:PAUSE_CFG:ATOP */
+#define QSYS_ATOP(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
+
+#define QSYS_ATOP_ATOP GENMASK(11, 0)
+#define QSYS_ATOP_ATOP_SET(x)\
+ FIELD_PREP(QSYS_ATOP_ATOP, x)
+#define QSYS_ATOP_ATOP_GET(x)\
+ FIELD_GET(QSYS_ATOP_ATOP, x)
+
+/* QSYS:PAUSE_CFG:FWD_PRESSURE */
+#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
+
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\
+ FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE, x)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_GET(x)\
+ FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE, x)
+
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS BIT(0)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(x)\
+ FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\
+ FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
+
+/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
+
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\
+ FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\
+ FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+
+/* QSYS:CALCFG:CAL_AUTO */
+#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
+
+#define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0)
+#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\
+ FIELD_PREP(QSYS_CAL_AUTO_CAL_AUTO, x)
+#define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\
+ FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x)
+
+/* QSYS:CALCFG:CAL_CTRL */
+#define QSYS_CAL_CTRL __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
+
+#define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11)
+#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\
+ FIELD_PREP(QSYS_CAL_CTRL_CAL_MODE, x)
+#define QSYS_CAL_CTRL_CAL_MODE_GET(x)\
+ FIELD_GET(QSYS_CAL_CTRL_CAL_MODE, x)
+
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE GENMASK(10, 1)
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(x)\
+ FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x)
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_GET(x)\
+ FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x)
+
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR BIT(0)
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_SET(x)\
+ FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\
+ FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
+
+/* QSYS:RAM_CTRL:RAM_INIT */
+#define QSYS_RAM_INIT __REG(TARGET_QSYS, 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_RAM_INIT_RAM_INIT BIT(1)
+#define QSYS_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(QSYS_RAM_INIT_RAM_INIT, x)
+#define QSYS_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(QSYS_RAM_INIT_RAM_INIT, x)
+
+#define QSYS_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define QSYS_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
+#define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* REW:COMMON:OWN_UPSID */
+#define REW_OWN_UPSID(r) __REG(TARGET_REW, 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
+
+#define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
+#define REW_OWN_UPSID_OWN_UPSID_SET(x)\
+ FIELD_PREP(REW_OWN_UPSID_OWN_UPSID, x)
+#define REW_OWN_UPSID_OWN_UPSID_GET(x)\
+ FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x)
+
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13)
+#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_PCP, x)
+#define REW_PORT_VLAN_CFG_PORT_PCP_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_PCP, x)
+
+#define REW_PORT_VLAN_CFG_PORT_DEI BIT(12)
+#define REW_PORT_VLAN_CFG_PORT_DEI_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_DEI, x)
+#define REW_PORT_VLAN_CFG_PORT_DEI_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_DEI, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/* REW:PORT:TAG_CTRL */
+#define REW_TAG_CTRL(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
+
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13)
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x)
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x)
+
+#define REW_TAG_CTRL_TAG_CFG GENMASK(12, 11)
+#define REW_TAG_CTRL_TAG_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_CFG, x)
+#define REW_TAG_CTRL_TAG_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_CFG, x)
+
+#define REW_TAG_CTRL_TAG_TPID_CFG GENMASK(10, 8)
+#define REW_TAG_CTRL_TAG_TPID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_TPID_CFG, x)
+#define REW_TAG_CTRL_TAG_TPID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_TPID_CFG, x)
+
+#define REW_TAG_CTRL_TAG_VID_CFG GENMASK(7, 6)
+#define REW_TAG_CTRL_TAG_VID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_VID_CFG, x)
+#define REW_TAG_CTRL_TAG_VID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_VID_CFG, x)
+
+#define REW_TAG_CTRL_TAG_PCP_CFG GENMASK(5, 3)
+#define REW_TAG_CTRL_TAG_PCP_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_PCP_CFG, x)
+#define REW_TAG_CTRL_TAG_PCP_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_PCP_CFG, x)
+
+#define REW_TAG_CTRL_TAG_DEI_CFG GENMASK(2, 0)
+#define REW_TAG_CTRL_TAG_DEI_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CTRL_TAG_DEI_CFG, x)
+#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+
+/* REW:RAM_CTRL:RAM_INIT */
+#define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
+
+#define REW_RAM_INIT_RAM_INIT BIT(1)
+#define REW_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(REW_RAM_INIT_RAM_INIT, x)
+#define REW_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(REW_RAM_INIT_RAM_INIT, x)
+
+#define REW_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define REW_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(REW_RAM_INIT_RAM_CFG_HOOK, x)
+#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
+#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1)
+#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_INIT, x)
+#define VCAP_SUPER_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_INIT, x)
+
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* VOP:RAM_CTRL:RAM_INIT */
+#define VOP_RAM_INIT __REG(TARGET_VOP, 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
+
+#define VOP_RAM_INIT_RAM_INIT BIT(1)
+#define VOP_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(VOP_RAM_INIT_RAM_INIT, x)
+#define VOP_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(VOP_RAM_INIT_RAM_INIT, x)
+
+#define VOP_RAM_INIT_RAM_CFG_HOOK BIT(0)
+#define VOP_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+ FIELD_PREP(VOP_RAM_INIT_RAM_CFG_HOOK, x)
+#define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+ FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x)
+
+/* XQS:SYSTEM:STAT_CFG */
+#define XQS_STAT_CFG __REG(TARGET_XQS, 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
+
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18)
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\
+ FIELD_PREP(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\
+ FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
+
+#define XQS_STAT_CFG_STAT_VIEW GENMASK(17, 5)
+#define XQS_STAT_CFG_STAT_VIEW_SET(x)\
+ FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x)
+#define XQS_STAT_CFG_STAT_VIEW_GET(x)\
+ FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x)
+
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY BIT(4)
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\
+ FIELD_PREP(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x)
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_GET(x)\
+ FIELD_GET(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x)
+
+#define XQS_STAT_CFG_STAT_WRAP_DIS GENMASK(3, 0)
+#define XQS_STAT_CFG_STAT_WRAP_DIS_SET(x)\
+ FIELD_PREP(XQS_STAT_CFG_STAT_WRAP_DIS, x)
+#define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\
+ FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x)
+
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
+#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\
+ FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\
+ FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
+#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\
+ FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\
+ FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
+#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\
+ FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\
+ FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
+#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\
+ FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\
+ FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+
+/* XQS:STAT:CNT */
+#define XQS_CNT(g) __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+
+#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
new file mode 100644
index 000000000000..9d485a9d1f1f
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+/* The IFH bit position of the first VSTAX bit. This is because the
+ * VSTAX bit positions in Data sheet is starting from zero.
+ */
+#define VSTAX 73
+
+static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+{
+ u8 *ifh_hdr = ifh;
+ /* Calculate the Start IFH byte position of this IFH bit position */
+ u32 byte = (35 - (pos / 8));
+ /* Calculate the Start bit position in the Start IFH byte */
+ u32 bit = (pos % 8);
+ u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
+
+ /* Max width is 5 bytes - 40 bits. In worst case this will
+ * spread over 6 bytes - 48 bits
+ */
+ compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
+
+ /* The b0-b7 goes into the start IFH byte */
+ if (encode & 0xFF)
+ ifh_hdr[byte] |= (u8)((encode & 0xFF));
+ /* The b8-b15 goes into the next IFH byte */
+ if (encode & 0xFF00)
+ ifh_hdr[byte - 1] |= (u8)((encode & 0xFF00) >> 8);
+ /* The b16-b23 goes into the next IFH byte */
+ if (encode & 0xFF0000)
+ ifh_hdr[byte - 2] |= (u8)((encode & 0xFF0000) >> 16);
+ /* The b24-b31 goes into the next IFH byte */
+ if (encode & 0xFF000000)
+ ifh_hdr[byte - 3] |= (u8)((encode & 0xFF000000) >> 24);
+ /* The b32-b39 goes into the next IFH byte */
+ if (encode & 0xFF00000000)
+ ifh_hdr[byte - 4] |= (u8)((encode & 0xFF00000000) >> 32);
+ /* The b40-b47 goes into the next IFH byte */
+ if (encode & 0xFF0000000000)
+ ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
+}
+
+static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+{
+ /* VSTAX.RSV = 1. MSBit must be 1 */
+ ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1);
+ /* VSTAX.INGR_DROP_MODE = Enable. Don't make head-of-line blocking */
+ ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 55, 1);
+ /* MISC.CPU_MASK/DPORT = Destination port */
+ ifh_encode_bitfield(ifh_hdr, portno, 29, 8);
+ /* MISC.PIPELINE_PT */
+ ifh_encode_bitfield(ifh_hdr, 16, 37, 5);
+ /* MISC.PIPELINE_ACT */
+ ifh_encode_bitfield(ifh_hdr, 1, 42, 3);
+ /* FWD.SRC_PORT = CPU */
+ ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7);
+ /* FWD.SFLOW_ID (disable SFlow sampling) */
+ ifh_encode_bitfield(ifh_hdr, 124, 57, 7);
+ /* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */
+ ifh_encode_bitfield(ifh_hdr, 1, 67, 1);
+}
+
+static int sparx5_port_open(struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ int err = 0;
+
+ sparx5_port_enable(port, true);
+ err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
+ if (err) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ phylink_start(port->phylink);
+
+ if (!ndev->phydev) {
+ /* power up serdes */
+ port->conf.power_down = false;
+ if (port->conf.serdes_reset)
+ err = sparx5_serdes_set(port->sparx5, port, &port->conf);
+ else
+ err = phy_power_on(port->serdes);
+ if (err)
+ netdev_err(ndev, "%s failed\n", __func__);
+ }
+
+ return err;
+}
+
+static int sparx5_port_stop(struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ int err = 0;
+
+ sparx5_port_enable(port, false);
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+
+ if (!ndev->phydev) {
+ /* power down serdes */
+ port->conf.power_down = true;
+ if (port->conf.serdes_reset)
+ err = sparx5_serdes_set(port->sparx5, port, &port->conf);
+ else
+ err = phy_power_off(port->serdes);
+ if (err)
+ netdev_err(ndev, "%s failed\n", __func__);
+ }
+ return 0;
+}
+
+static void sparx5_set_rx_mode(struct net_device *dev)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!test_bit(port->portno, sparx5->bridge_mask))
+ __dev_mc_sync(dev, sparx5_mc_sync, sparx5_mc_unsync);
+}
+
+static int sparx5_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->portno);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sparx5_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ const struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* Remove current */
+ sparx5_mact_forget(sparx5, dev->dev_addr, port->pvid);
+
+ /* Add new */
+ sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
+
+ /* Record the address */
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static int sparx5_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct sparx5_port *sparx5_port = netdev_priv(dev);
+ struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+ ppid->id_len = sizeof(sparx5->base_mac);
+ memcpy(&ppid->id, &sparx5->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops sparx5_port_netdev_ops = {
+ .ndo_open = sparx5_port_open,
+ .ndo_stop = sparx5_port_stop,
+ .ndo_start_xmit = sparx5_port_xmit_impl,
+ .ndo_set_rx_mode = sparx5_set_rx_mode,
+ .ndo_get_phys_port_name = sparx5_port_get_phys_port_name,
+ .ndo_set_mac_address = sparx5_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = sparx5_get_stats64,
+ .ndo_get_port_parent_id = sparx5_get_port_parent_id,
+};
+
+bool sparx5_netdevice_check(const struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &sparx5_port_netdev_ops);
+}
+
+struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
+{
+ struct sparx5_port *spx5_port;
+ struct net_device *ndev;
+ u64 val;
+
+ ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
+ if (!ndev)
+ return ERR_PTR(-ENOMEM);
+
+ SET_NETDEV_DEV(ndev, sparx5->dev);
+ spx5_port = netdev_priv(ndev);
+ spx5_port->ndev = ndev;
+ spx5_port->sparx5 = sparx5;
+ spx5_port->portno = portno;
+ sparx5_set_port_ifh(spx5_port->ifh, portno);
+
+ ndev->netdev_ops = &sparx5_port_netdev_ops;
+ ndev->ethtool_ops = &sparx5_ethtool_ops;
+
+ val = ether_addr_to_u64(sparx5->base_mac) + portno + 1;
+ u64_to_ether_addr(val, ndev->dev_addr);
+
+ return ndev;
+}
+
+int sparx5_register_netdevs(struct sparx5 *sparx5)
+{
+ int portno;
+ int err;
+
+ for (portno = 0; portno < SPX5_PORTS; portno++)
+ if (sparx5->ports[portno]) {
+ err = register_netdev(sparx5->ports[portno]->ndev);
+ if (err) {
+ dev_err(sparx5->dev,
+ "port: %02u: netdev registration failed\n",
+ portno);
+ return err;
+ }
+ sparx5_port_inj_timer_setup(sparx5->ports[portno]);
+ }
+ return 0;
+}
+
+void sparx5_destroy_netdevs(struct sparx5 *sparx5)
+{
+ struct sparx5_port *port;
+ int portno;
+
+ for (portno = 0; portno < SPX5_PORTS; portno++) {
+ port = sparx5->ports[portno];
+ if (port && port->phylink) {
+ /* Disconnect the phy */
+ rtnl_lock();
+ sparx5_port_stop(port->ndev);
+ phylink_disconnect_phy(port->phylink);
+ rtnl_unlock();
+ phylink_destroy(port->phylink);
+ port->phylink = NULL;
+ }
+ }
+}
+
+void sparx5_unregister_netdevs(struct sparx5 *sparx5)
+{
+ int portno;
+
+ for (portno = 0; portno < SPX5_PORTS; portno++)
+ if (sparx5->ports[portno])
+ unregister_netdev(sparx5->ports[portno]->ndev);
+}
+
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
new file mode 100644
index 000000000000..09ca7a3bafdc
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define XTR_EOF_0 ntohl((__force __be32)0x80000000u)
+#define XTR_EOF_1 ntohl((__force __be32)0x80000001u)
+#define XTR_EOF_2 ntohl((__force __be32)0x80000002u)
+#define XTR_EOF_3 ntohl((__force __be32)0x80000003u)
+#define XTR_PRUNED ntohl((__force __be32)0x80000004u)
+#define XTR_ABORT ntohl((__force __be32)0x80000005u)
+#define XTR_ESCAPE ntohl((__force __be32)0x80000006u)
+#define XTR_NOT_READY ntohl((__force __be32)0x80000007u)
+
+#define XTR_VALID_BYTES(x) (4 - ((x) & 3))
+
+#define INJ_TIMEOUT_NS 50000
+
+struct frame_info {
+ int src_port;
+};
+
+static void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
+{
+ /* Start flush */
+ spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH);
+
+ /* Allow to drain */
+ mdelay(1);
+
+ /* All Queues normal */
+ spx5_wr(0, sparx5, QS_XTR_FLUSH);
+}
+
+static void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
+{
+ u8 *xtr_hdr = (u8 *)ifh;
+
+ /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */
+ u32 fwd =
+ ((u32)xtr_hdr[27] << 24) |
+ ((u32)xtr_hdr[28] << 16) |
+ ((u32)xtr_hdr[29] << 8) |
+ ((u32)xtr_hdr[30] << 0);
+ fwd = (fwd >> 5);
+ info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+}
+
+static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
+{
+ bool eof_flag = false, pruned_flag = false, abort_flag = false;
+ struct net_device *netdev;
+ struct sparx5_port *port;
+ struct frame_info fi;
+ int i, byte_cnt = 0;
+ struct sk_buff *skb;
+ u32 ifh[IFH_LEN];
+ u32 *rxbuf;
+
+ /* Get IFH */
+ for (i = 0; i < IFH_LEN; i++)
+ ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
+
+ /* Decode IFH (whats needed) */
+ sparx5_ifh_parse(ifh, &fi);
+
+ /* Map to port netdev */
+ port = fi.src_port < SPX5_PORTS ?
+ sparx5->ports[fi.src_port] : NULL;
+ if (!port || !port->ndev) {
+ dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
+ sparx5_xtr_flush(sparx5, grp);
+ return;
+ }
+
+ /* Have netdev, get skb */
+ netdev = port->ndev;
+ skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN);
+ if (!skb) {
+ sparx5_xtr_flush(sparx5, grp);
+ dev_err(sparx5->dev, "No skb allocated\n");
+ netdev->stats.rx_dropped++;
+ return;
+ }
+ rxbuf = (u32 *)skb->data;
+
+ /* Now, pull frame data */
+ while (!eof_flag) {
+ u32 val = spx5_rd(sparx5, QS_XTR_RD(grp));
+ u32 cmp = val;
+
+ if (byte_swap)
+ cmp = ntohl((__force __be32)val);
+
+ switch (cmp) {
+ case XTR_NOT_READY:
+ break;
+ case XTR_ABORT:
+ /* No accompanying data */
+ abort_flag = true;
+ eof_flag = true;
+ break;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ /* This assumes STATUS_WORD_POS == 1, Status
+ * just after last data
+ */
+ byte_cnt -= (4 - XTR_VALID_BYTES(val));
+ eof_flag = true;
+ break;
+ case XTR_PRUNED:
+ /* But get the last 4 bytes as well */
+ eof_flag = true;
+ pruned_flag = true;
+ fallthrough;
+ case XTR_ESCAPE:
+ *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp));
+ byte_cnt += 4;
+ rxbuf++;
+ break;
+ default:
+ *rxbuf = val;
+ byte_cnt += 4;
+ rxbuf++;
+ }
+ }
+
+ if (abort_flag || pruned_flag || !eof_flag) {
+ netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n",
+ abort_flag, pruned_flag, eof_flag);
+ kfree_skb(skb);
+ netdev->stats.rx_dropped++;
+ return;
+ }
+
+ /* Everything we see on an interface that is in the HW bridge
+ * has already been forwarded
+ */
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+
+ /* Finish up skb */
+ skb_put(skb, byte_cnt - ETH_FCS_LEN);
+ eth_skb_pad(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ netdev->stats.rx_bytes += skb->len;
+ netdev->stats.rx_packets++;
+}
+
+static int sparx5_inject(struct sparx5 *sparx5,
+ u32 *ifh,
+ struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int grp = INJ_QUEUE;
+ u32 val, w, count;
+ u8 *buf;
+
+ val = spx5_rd(sparx5, QS_INJ_STATUS);
+ if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) {
+ pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n",
+ QS_INJ_STATUS_FIFO_RDY_GET(val));
+ return -EBUSY;
+ }
+
+ /* Indicate SOF */
+ spx5_wr(QS_INJ_CTRL_SOF_SET(1) |
+ QS_INJ_CTRL_GAP_SIZE_SET(1),
+ sparx5, QS_INJ_CTRL(grp));
+
+ /* Write the IFH to the chip. */
+ for (w = 0; w < IFH_LEN; w++)
+ spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp));
+
+ /* Write words, round up */
+ count = DIV_ROUND_UP(skb->len, 4);
+ buf = skb->data;
+ for (w = 0; w < count; w++, buf += 4) {
+ val = get_unaligned((const u32 *)buf);
+ spx5_wr(val, sparx5, QS_INJ_WR(grp));
+ }
+
+ /* Add padding */
+ while (w < (60 / 4)) {
+ spx5_wr(0, sparx5, QS_INJ_WR(grp));
+ w++;
+ }
+
+ /* Indicate EOF and valid bytes in last word */
+ spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) |
+ QS_INJ_CTRL_EOF_SET(1),
+ sparx5, QS_INJ_CTRL(grp));
+
+ /* Add dummy CRC */
+ spx5_wr(0, sparx5, QS_INJ_WR(grp));
+ w++;
+
+ val = spx5_rd(sparx5, QS_INJ_STATUS);
+ if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n",
+ QS_INJ_STATUS_WMARK_REACHED_GET(val));
+ netif_stop_queue(ndev);
+ hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS,
+ HRTIMER_MODE_REL);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int ret;
+
+ ret = sparx5_inject(sparx5, port->ifh, skb, dev);
+
+ if (ret == NETDEV_TX_OK) {
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ skb_tx_timestamp(skb);
+ dev_kfree_skb_any(skb);
+ } else {
+ stats->tx_dropped++;
+ }
+ return ret;
+}
+
+static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr)
+{
+ struct sparx5_port *port = container_of(tmr, struct sparx5_port,
+ inj_timer);
+ int grp = INJ_QUEUE;
+ u32 val;
+
+ val = spx5_rd(port->sparx5, QS_INJ_STATUS);
+ if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
+ pr_err_ratelimited("Injection: Reset watermark count\n");
+ /* Reset Watermark count to restart */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+ port->sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(port->portno));
+ }
+ netif_wake_queue(port->ndev);
+ return HRTIMER_NORESTART;
+}
+
+int sparx5_manual_injection_mode(struct sparx5 *sparx5)
+{
+ const int byte_swap = 1;
+ int portno;
+
+ /* Change mode to manual extraction and injection */
+ spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
+ QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+ sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
+ spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+ sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
+
+ /* CPU ports capture setup */
+ for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+ /* ASM CPU port: No preamble, IFH, enable padding */
+ spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
+ ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
+ ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
+ sparx5, ASM_PORT_CFG(portno));
+
+ /* Reset WM cnt to unclog queued frames */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(portno));
+
+ /* Set Disassembler Stop Watermark level */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(portno));
+
+ /* Enable Disassembler buffer underrun watchdog
+ */
+ spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0),
+ DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
+ sparx5,
+ DSM_BUF_CFG(portno));
+ }
+ return 0;
+}
+
+irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
+{
+ struct sparx5 *s5 = _sparx5;
+ int poll = 64;
+
+ /* Check data in queue */
+ while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0)
+ sparx5_xtr_grp(s5, XTR_QUEUE, false);
+
+ return IRQ_HANDLED;
+}
+
+void sparx5_port_inj_timer_setup(struct sparx5_port *port)
+{
+ hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->inj_timer.function = sparx5_injection_timeout;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
new file mode 100644
index 000000000000..af70e2795125
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/sfp.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_port_config *b)
+{
+ if (a->speed != b->speed ||
+ a->portmode != b->portmode ||
+ a->autoneg != b->autoneg ||
+ a->pause_adv != b->pause_adv ||
+ a->power_down != b->power_down ||
+ a->media != b->media)
+ return true;
+ return false;
+}
+
+static void sparx5_phylink_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ case PHY_INTERFACE_MODE_NA:
+ if (port->conf.bandwidth == SPEED_5000)
+ phylink_set(mask, 5000baseT_Full);
+ if (port->conf.bandwidth == SPEED_10000) {
+ phylink_set(mask, 5000baseT_Full);
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+ }
+ if (port->conf.bandwidth == SPEED_25000) {
+ phylink_set(mask, 5000baseT_Full);
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+ phylink_set(mask, 25000baseCR_Full);
+ phylink_set(mask, 25000baseSR_Full);
+ }
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
+ fallthrough;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
+ fallthrough;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ state->interface == PHY_INTERFACE_MODE_NA) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void sparx5_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Currently not used */
+}
+
+static void sparx5_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+ struct sparx5_port_config conf;
+ int err;
+
+ conf = port->conf;
+ conf.duplex = duplex;
+ conf.pause = 0;
+ conf.pause |= tx_pause ? MLO_PAUSE_TX : 0;
+ conf.pause |= rx_pause ? MLO_PAUSE_RX : 0;
+ conf.speed = speed;
+ /* Configure the port to speed/duplex/pause */
+ err = sparx5_port_config(port->sparx5, port, &conf);
+ if (err)
+ netdev_err(port->ndev, "port config failed: %d\n", err);
+}
+
+static void sparx5_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* Currently not used */
+}
+
+static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct sparx5_port, phylink_pcs);
+}
+
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct sparx5_port *port = sparx5_pcs_to_port(pcs);
+ struct sparx5_port_status status;
+
+ sparx5_get_port_status(port->sparx5, port, &status);
+ state->link = status.link && !status.link_down;
+ state->an_complete = status.an_complete;
+ state->speed = status.speed;
+ state->duplex = status.duplex;
+ state->pause = status.pause;
+}
+
+static int sparx5_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct sparx5_port *port = sparx5_pcs_to_port(pcs);
+ struct sparx5_port_config conf;
+ int ret = 0;
+
+ conf = port->conf;
+ conf.power_down = false;
+ conf.portmode = interface;
+ conf.inband = phylink_autoneg_inband(mode);
+ conf.autoneg = phylink_test(advertising, Autoneg);
+ conf.pause_adv = 0;
+ if (phylink_test(advertising, Pause))
+ conf.pause_adv |= ADVERTISE_1000XPAUSE;
+ if (phylink_test(advertising, Asym_Pause))
+ conf.pause_adv |= ADVERTISE_1000XPSE_ASYM;
+ if (sparx5_is_baser(interface)) {
+ if (phylink_test(advertising, FIBRE))
+ conf.media = PHY_MEDIA_SR;
+ else
+ conf.media = PHY_MEDIA_DAC;
+ }
+ if (!port_conf_has_changed(&port->conf, &conf))
+ return ret;
+ /* Enable the PCS matching this interface type */
+ ret = sparx5_port_pcs_set(port->sparx5, port, &conf);
+ if (ret)
+ netdev_err(port->ndev, "port PCS config failed: %d\n", ret);
+ return ret;
+}
+
+static void sparx5_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+ /* Currently not used */
+}
+
+const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
+ .pcs_get_state = sparx5_pcs_get_state,
+ .pcs_config = sparx5_pcs_config,
+ .pcs_an_restart = sparx5_pcs_aneg_restart,
+};
+
+const struct phylink_mac_ops sparx5_phylink_mac_ops = {
+ .validate = sparx5_phylink_validate,
+ .mac_config = sparx5_phylink_mac_config,
+ .mac_link_down = sparx5_phylink_mac_link_down,
+ .mac_link_up = sparx5_phylink_mac_link_up,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
new file mode 100644
index 000000000000..d2e3250928bf
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+#define SPX5_ETYPE_TAG_C 0x8100
+#define SPX5_ETYPE_TAG_S 0x88a8
+
+#define SPX5_WAIT_US 1000
+#define SPX5_WAIT_MAX_US 2000
+
+enum port_error {
+ SPX5_PERR_SPEED,
+ SPX5_PERR_IFTYPE,
+};
+
+#define PAUSE_DISCARD 0xC
+#define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
+
+static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
+{
+ status->an_complete = true;
+ if (!(lp_abil & LPA_SGMII_LINK)) {
+ status->link = false;
+ return;
+ }
+
+ switch (lp_abil & LPA_SGMII_SPD_MASK) {
+ case LPA_SGMII_10:
+ status->speed = SPEED_10;
+ break;
+ case LPA_SGMII_100:
+ status->speed = SPEED_100;
+ break;
+ case LPA_SGMII_1000:
+ status->speed = SPEED_1000;
+ break;
+ default:
+ status->link = false;
+ return;
+ }
+ if (lp_abil & LPA_SGMII_FULL_DUPLEX)
+ status->duplex = DUPLEX_FULL;
+ else
+ status->duplex = DUPLEX_HALF;
+}
+
+static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
+{
+ status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
+ status->an_complete = true;
+ status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
+ DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
+
+ if ((ld_abil & ADVERTISE_1000XPAUSE) &&
+ (lp_abil & ADVERTISE_1000XPAUSE)) {
+ status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
+ } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
+ (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
+ status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
+ MLO_PAUSE_TX : 0;
+ status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
+ MLO_PAUSE_RX : 0;
+ } else {
+ status->pause = MLO_PAUSE_NONE;
+ }
+}
+
+static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_status *status)
+{
+ u32 portno = port->portno;
+ u16 lp_adv, ld_adv;
+ u32 value;
+
+ /* Get PCS Link down sticky */
+ value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
+ status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
+ if (status->link_down) /* Clear the sticky */
+ spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
+
+ /* Get both current Link and Sync status */
+ value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
+ status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
+ DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
+
+ if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
+ status->speed = SPEED_1000;
+ else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
+ status->speed = SPEED_2500;
+
+ status->duplex = DUPLEX_FULL;
+
+ /* Get PCS ANEG status register */
+ value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
+
+ /* Aneg complete provides more information */
+ if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
+ lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
+ if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
+ decode_sgmii_word(lp_adv, status);
+ } else {
+ value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
+ ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
+ decode_cl37_word(lp_adv, ld_adv, status);
+ }
+ }
+ return 0;
+}
+
+static int sparx5_get_sfi_status(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_status *status)
+{
+ bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
+ u32 portno = port->portno;
+ u32 value, dev, tinst;
+ void __iomem *inst;
+
+ if (!high_speed_dev) {
+ netdev_err(port->ndev, "error: low speed and SFI mode\n");
+ return -EINVAL;
+ }
+
+ dev = sparx5_to_high_dev(portno);
+ tinst = sparx5_port_dev_index(portno);
+ inst = spx5_inst_get(sparx5, dev, tinst);
+
+ value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+ if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
+ /* The link is or has been down. Clear the sticky bit */
+ status->link_down = 1;
+ spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+ value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+ }
+ status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
+ status->duplex = DUPLEX_FULL;
+ if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
+ status->speed = SPEED_5000;
+ else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
+ status->speed = SPEED_10000;
+ else
+ status->speed = SPEED_25000;
+
+ return 0;
+}
+
+/* Get link status of 1000Base-X/in-band and SFI ports.
+ */
+int sparx5_get_port_status(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_status *status)
+{
+ memset(status, 0, sizeof(*status));
+ status->speed = port->conf.speed;
+ if (port->conf.power_down) {
+ status->link = false;
+ return 0;
+ }
+ switch (port->conf.portmode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return sparx5_get_dev2g5_status(sparx5, port, status);
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ return sparx5_get_sfi_status(sparx5, port, status);
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
+ default:
+ netdev_err(port->ndev, "Status not supported");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int sparx5_port_error(struct sparx5_port *port,
+ struct sparx5_port_config *conf,
+ enum port_error errtype)
+{
+ switch (errtype) {
+ case SPX5_PERR_SPEED:
+ netdev_err(port->ndev,
+ "Interface does not support speed: %u: for %s\n",
+ conf->speed, phy_modes(conf->portmode));
+ break;
+ case SPX5_PERR_IFTYPE:
+ netdev_err(port->ndev,
+ "Switch port does not support interface type: %s\n",
+ phy_modes(conf->portmode));
+ break;
+ default:
+ netdev_err(port->ndev,
+ "Interface configuration error\n");
+ }
+
+ return -EINVAL;
+}
+
+static int sparx5_port_verify_speed(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ if ((sparx5_port_is_2g5(port->portno) &&
+ conf->speed > SPEED_2500) ||
+ (sparx5_port_is_5g(port->portno) &&
+ conf->speed > SPEED_5000) ||
+ (sparx5_port_is_10g(port->portno) &&
+ conf->speed > SPEED_10000))
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+
+ switch (conf->portmode) {
+ case PHY_INTERFACE_MODE_NA:
+ return -EINVAL;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if (conf->speed != SPEED_1000 ||
+ sparx5_port_is_2g5(port->portno))
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ if (sparx5_port_is_2g5(port->portno))
+ return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (conf->speed != SPEED_2500 ||
+ sparx5_port_is_2g5(port->portno))
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ if (port->portno > 47)
+ return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+ fallthrough;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (conf->speed != SPEED_1000 &&
+ conf->speed != SPEED_100 &&
+ conf->speed != SPEED_10 &&
+ conf->speed != SPEED_2500)
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ if ((conf->speed != SPEED_5000 &&
+ conf->speed != SPEED_10000 &&
+ conf->speed != SPEED_25000))
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
+ default:
+ return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+ }
+ return 0;
+}
+
+static bool sparx5_dev_change(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ return sparx5_is_baser(port->conf.portmode) ^
+ sparx5_is_baser(conf->portmode);
+}
+
+static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
+{
+ u32 value, resource, prio, delay_cnt = 0;
+ bool poll_src = true;
+ char *mem = "";
+
+ /* Resource == 0: Memory tracked per source (SRC-MEM)
+ * Resource == 1: Frame references tracked per source (SRC-REF)
+ * Resource == 2: Memory tracked per destination (DST-MEM)
+ * Resource == 3: Frame references tracked per destination. (DST-REF)
+ */
+ while (1) {
+ bool empty = true;
+
+ for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
+ u32 base;
+
+ base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
+ for (prio = 0; prio < SPX5_PRIOS; prio++) {
+ value = spx5_rd(sparx5,
+ QRES_RES_STAT(base + prio));
+ if (value) {
+ mem = resource == 0 ?
+ "DST-MEM" : "SRC-MEM";
+ empty = false;
+ }
+ }
+ }
+
+ if (empty)
+ break;
+
+ if (delay_cnt++ == 2000) {
+ dev_err(sparx5->dev,
+ "Flush timeout port %u. %s queue not empty\n",
+ portno, mem);
+ return -EINVAL;
+ }
+
+ usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
+ }
+ return 0;
+}
+
+static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
+{
+ u32 tinst = high_spd_dev ?
+ sparx5_port_dev_index(port->portno) : port->portno;
+ u32 dev = high_spd_dev ?
+ sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
+ void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
+ u32 spd = port->conf.speed;
+ u32 spd_prm;
+ int err;
+
+ if (high_spd_dev) {
+ /* 1: Reset the PCS Rx clock domain */
+ spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
+ DEV10G_DEV_RST_CTRL_PCS_RX_RST,
+ devinst,
+ DEV10G_DEV_RST_CTRL(0));
+
+ /* 2: Disable MAC frame reception */
+ spx5_inst_rmw(0,
+ DEV10G_MAC_ENA_CFG_RX_ENA,
+ devinst,
+ DEV10G_MAC_ENA_CFG(0));
+ } else {
+ /* 1: Reset the PCS Rx clock domain */
+ spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+ devinst,
+ DEV2G5_DEV_RST_CTRL(0));
+ /* 2: Disable MAC frame reception */
+ spx5_inst_rmw(0,
+ DEV2G5_MAC_ENA_CFG_RX_ENA,
+ devinst,
+ DEV2G5_MAC_ENA_CFG(0));
+ }
+ /* 3: Disable traffic being sent to or from switch port->portno */
+ spx5_rmw(0,
+ QFWD_SWITCH_PORT_MODE_PORT_ENA,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(port->portno));
+
+ /* 4: Disable dequeuing from the egress queues */
+ spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
+ HSCH_PORT_MODE_DEQUEUE_DIS,
+ sparx5,
+ HSCH_PORT_MODE(port->portno));
+
+ /* 5: Disable Flowcontrol */
+ spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
+ QSYS_PAUSE_CFG_PAUSE_STOP,
+ sparx5,
+ QSYS_PAUSE_CFG(port->portno));
+
+ spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
+ /* 6: Wait while the last frame is exiting the queues */
+ usleep_range(8 * spd_prm, 10 * spd_prm);
+
+ /* 7: Flush the queues accociated with the port->portno */
+ spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
+ HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
+ HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
+ HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
+ HSCH_FLUSH_CTRL_FLUSH_PORT |
+ HSCH_FLUSH_CTRL_FLUSH_DST |
+ HSCH_FLUSH_CTRL_FLUSH_SRC |
+ HSCH_FLUSH_CTRL_FLUSH_ENA,
+ sparx5,
+ HSCH_FLUSH_CTRL);
+
+ /* 8: Enable dequeuing from the egress queues */
+ spx5_rmw(0,
+ HSCH_PORT_MODE_DEQUEUE_DIS,
+ sparx5,
+ HSCH_PORT_MODE(port->portno));
+
+ /* 9: Wait until flushing is complete */
+ err = sparx5_port_flush_poll(sparx5, port->portno);
+ if (err)
+ return err;
+
+ /* 10: Reset the MAC clock domain */
+ if (high_spd_dev) {
+ spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
+ DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
+ DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
+ DEV10G_DEV_RST_CTRL_PCS_TX_RST |
+ DEV10G_DEV_RST_CTRL_MAC_RX_RST |
+ DEV10G_DEV_RST_CTRL_MAC_TX_RST,
+ devinst,
+ DEV10G_DEV_RST_CTRL(0));
+
+ } else {
+ spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
+ DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
+ DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
+ DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
+ DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
+ DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
+ devinst,
+ DEV2G5_DEV_RST_CTRL(0));
+ }
+ /* 11: Clear flushing */
+ spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
+ HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
+ HSCH_FLUSH_CTRL_FLUSH_PORT |
+ HSCH_FLUSH_CTRL_FLUSH_ENA,
+ sparx5,
+ HSCH_FLUSH_CTRL);
+
+ if (high_spd_dev) {
+ u32 pcs = sparx5_to_pcs_dev(port->portno);
+ void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
+
+ /* 12: Disable 5G/10G/25 BaseR PCS */
+ spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
+ PCS10G_BR_PCS_CFG_PCS_ENA,
+ pcsinst,
+ PCS10G_BR_PCS_CFG(0));
+
+ if (sparx5_port_is_25g(port->portno))
+ /* Disable 25G PCS */
+ spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
+ DEV25G_PCS25G_CFG_PCS25G_ENA,
+ sparx5,
+ DEV25G_PCS25G_CFG(tinst));
+ } else {
+ /* 12: Disable 1G PCS */
+ spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
+ DEV2G5_PCS1G_CFG_PCS_ENA,
+ sparx5,
+ DEV2G5_PCS1G_CFG(port->portno));
+ }
+
+ /* The port is now flushed and disabled */
+ return 0;
+}
+
+static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
+ u32 portno, u32 speed)
+{
+ u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
+ const u32 taxi_dist[SPX5_PORTS_ALL] = {
+ 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
+ 4, 4, 4, 4,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 4, 6, 8, 4, 6, 8, 6, 8,
+ 2, 2, 2, 2, 2, 2, 2, 4, 2
+ };
+ u32 mac_per = 6400, tmp1, tmp2, tmp3;
+ u32 fifo_width = 16;
+ u32 mac_width = 8;
+ u32 addition = 0;
+
+ switch (speed) {
+ case SPEED_25000:
+ return 0;
+ case SPEED_10000:
+ mac_per = 6400;
+ mac_width = 8;
+ addition = 1;
+ break;
+ case SPEED_5000:
+ mac_per = 12800;
+ mac_width = 8;
+ addition = 0;
+ break;
+ case SPEED_2500:
+ mac_per = 3200;
+ mac_width = 1;
+ addition = 0;
+ break;
+ case SPEED_1000:
+ mac_per = 8000;
+ mac_width = 1;
+ addition = 0;
+ break;
+ case SPEED_100:
+ case SPEED_10:
+ return 1;
+ default:
+ break;
+ }
+
+ tmp1 = 1000 * mac_width / fifo_width;
+ tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
+ * sys_clk / mac_per);
+ tmp3 = tmp1 * tmp2 / 1000;
+ return (tmp3 + 2000 + 999) / 1000 + addition;
+}
+
+/* Configure port muxing:
+ * QSGMII: 4x2G5 devices
+ */
+static int sparx5_port_mux_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 portno = port->portno;
+ u32 inst;
+
+ if (port->conf.portmode == conf->portmode)
+ return 0; /* Nothing to do */
+
+ switch (conf->portmode) {
+ case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
+ inst = (portno - portno % 4) / 4;
+ spx5_rmw(BIT(inst),
+ BIT(inst),
+ sparx5,
+ PORT_CONF_QSGMII_ENA);
+
+ if ((portno / 4 % 2) == 0) {
+ /* Affects d0-d3,d8-d11..d40-d43 */
+ spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
+ PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
+ PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
+ PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
+ PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
+ PORT_CONF_USGMII_CFG_QUAD_MODE,
+ sparx5,
+ PORT_CONF_USGMII_CFG((portno / 8)));
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
+ struct sparx5_port *port)
+{
+ enum sparx5_port_max_tags max_tags = port->max_vlan_tags;
+ int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
+ max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
+ bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
+ enum sparx5_vlan_port_type vlan_type = port->vlan_type;
+ bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
+ u32 dev = sparx5_to_high_dev(port->portno);
+ u32 tinst = sparx5_port_dev_index(port->portno);
+ void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
+ u32 etype;
+
+ etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+ port->custom_etype :
+ vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+ SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
+
+ spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+ DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+ DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+ sparx5,
+ DEV2G5_MAC_TAGS_CFG(port->portno));
+
+ if (sparx5_port_is_2g5(port->portno))
+ return 0;
+
+ spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
+ DEV10G_MAC_TAGS_CFG_TAG_ID |
+ DEV10G_MAC_TAGS_CFG_TAG_ENA,
+ inst,
+ DEV10G_MAC_TAGS_CFG(0, 0));
+
+ spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
+ DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
+ inst,
+ DEV10G_MAC_NUM_TAGS_CFG(0));
+
+ spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
+ DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
+ inst,
+ DEV10G_MAC_MAXLEN_CFG(0));
+ return 0;
+}
+
+static int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
+{
+ u32 clk_period_ps = 1600; /* 625Mhz for now */
+ u32 urg = 672000;
+
+ switch (speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ urg = 672000;
+ break;
+ case SPEED_2500:
+ urg = 270000;
+ break;
+ case SPEED_5000:
+ urg = 135000;
+ break;
+ case SPEED_10000:
+ urg = 67200;
+ break;
+ case SPEED_25000:
+ urg = 27000;
+ break;
+ }
+ return urg / clk_period_ps - 1;
+}
+
+static u16 sparx5_wm_enc(u16 value)
+{
+ if (value >= 2048)
+ return 2048 + value / 16;
+
+ return value;
+}
+
+static int sparx5_port_fc_setup(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
+ u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
+
+ if (conf->pause & MLO_PAUSE_TX)
+ pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN /
+ SPX5_BUFFER_CELL_SZ));
+
+ /* Set HDX flowcontrol */
+ spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
+ DSM_MAC_CFG_HDX_BACKPREASSURE,
+ sparx5,
+ DSM_MAC_CFG(port->portno));
+
+ /* Obey flowcontrol */
+ spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
+ DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
+ sparx5,
+ DSM_RX_PAUSE_CFG(port->portno));
+
+ /* Disable forward pressure */
+ spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
+ QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
+ sparx5,
+ QSYS_FWD_PRESSURE(port->portno));
+
+ /* Generate pause frames */
+ spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
+ QSYS_PAUSE_CFG_PAUSE_STOP,
+ sparx5,
+ QSYS_PAUSE_CFG(port->portno));
+
+ return 0;
+}
+
+static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
+{
+ if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
+ return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
+ else
+ return 1; /* Enable SGMII Aneg */
+}
+
+int sparx5_serdes_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ int portmode, err, speed = conf->speed;
+
+ if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
+ ((port->portno % 4) != 0)) {
+ return 0;
+ }
+ if (sparx5_is_baser(conf->portmode)) {
+ if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
+ speed = SPEED_25000;
+ else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
+ speed = SPEED_10000;
+ else
+ speed = SPEED_5000;
+ }
+
+ err = phy_set_media(port->serdes, conf->media);
+ if (err)
+ return err;
+ if (speed > 0) {
+ err = phy_set_speed(port->serdes, speed);
+ if (err)
+ return err;
+ }
+ if (conf->serdes_reset) {
+ err = phy_reset(port->serdes);
+ if (err)
+ return err;
+ }
+
+ /* Configure SerDes with port parameters
+ * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
+ */
+ portmode = conf->portmode;
+ if (sparx5_is_baser(conf->portmode))
+ portmode = PHY_INTERFACE_MODE_10GBASER;
+ err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
+ if (err)
+ return err;
+ conf->serdes_reset = false;
+ return err;
+}
+
+static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ bool sgmii = false, inband_aneg = false;
+ int err;
+
+ if (port->conf.inband) {
+ if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
+ conf->portmode == PHY_INTERFACE_MODE_QSGMII)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+ else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+ conf->autoneg)
+ inband_aneg = true; /* Clause-37 in-band-aneg */
+
+ err = sparx5_serdes_set(sparx5, port, conf);
+ if (err)
+ return -EINVAL;
+ } else {
+ sgmii = true; /* Phy is connnected to the MAC */
+ }
+
+ /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
+ spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
+ DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ sparx5,
+ DEV2G5_PCS1G_MODE_CFG(port->portno));
+
+ /* Enable PCS */
+ spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
+ sparx5,
+ DEV2G5_PCS1G_CFG(port->portno));
+
+ if (inband_aneg) {
+ u16 abil = sparx5_get_aneg_word(conf);
+
+ /* Enable in-band aneg */
+ spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
+ DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+ DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
+ DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
+ sparx5,
+ DEV2G5_PCS1G_ANEG_CFG(port->portno));
+ } else {
+ spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
+ }
+
+ /* Take PCS out of reset */
+ spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
+ DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
+ DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+ sparx5,
+ DEV2G5_DEV_RST_CTRL(port->portno));
+
+ return 0;
+}
+
+static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
+ u32 pix = sparx5_port_dev_index(port->portno);
+ u32 dev = sparx5_to_high_dev(port->portno);
+ u32 pcs = sparx5_to_pcs_dev(port->portno);
+ void __iomem *devinst;
+ void __iomem *pcsinst;
+ int err;
+
+ devinst = spx5_inst_get(sparx5, dev, pix);
+ pcsinst = spx5_inst_get(sparx5, pcs, pix);
+
+ /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
+ err = sparx5_serdes_set(sparx5, port, conf);
+ if (err)
+ return -EINVAL;
+ if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
+ /* Enable PCS for 25G device, speed 25G */
+ spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
+ DEV25G_PCS25G_CFG_PCS25G_ENA,
+ sparx5,
+ DEV25G_PCS25G_CFG(pix));
+ } else {
+ /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
+ spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
+ PCS10G_BR_PCS_CFG_PCS_ENA,
+ pcsinst,
+ PCS10G_BR_PCS_CFG(0));
+ }
+
+ /* Enable 5G/10G/25G MAC module */
+ spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
+ devinst,
+ DEV10G_MAC_ENA_CFG(0));
+
+ /* Take the device out of reset */
+ spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
+ DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
+ DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
+ DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
+ DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
+ DEV10G_DEV_RST_CTRL_PCS_RX_RST |
+ DEV10G_DEV_RST_CTRL_PCS_TX_RST |
+ DEV10G_DEV_RST_CTRL_MAC_RX_RST |
+ DEV10G_DEV_RST_CTRL_MAC_TX_RST |
+ DEV10G_DEV_RST_CTRL_SPEED_SEL,
+ devinst,
+ DEV10G_DEV_RST_CTRL(0));
+
+ return 0;
+}
+
+/* Switch between 1G/2500 and 5G/10G/25G devices */
+static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
+{
+ int bt_indx = BIT(sparx5_port_dev_index(port));
+
+ if (sparx5_port_is_5g(port)) {
+ spx5_rmw(hsd ? 0 : bt_indx,
+ bt_indx,
+ sparx5,
+ PORT_CONF_DEV5G_MODES);
+ } else if (sparx5_port_is_10g(port)) {
+ spx5_rmw(hsd ? 0 : bt_indx,
+ bt_indx,
+ sparx5,
+ PORT_CONF_DEV10G_MODES);
+ } else if (sparx5_port_is_25g(port)) {
+ spx5_rmw(hsd ? 0 : bt_indx,
+ bt_indx,
+ sparx5,
+ PORT_CONF_DEV25G_MODES);
+ }
+}
+
+/* Configure speed/duplex dependent registers */
+static int sparx5_port_config_low_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
+ bool fdx = conf->duplex == DUPLEX_FULL;
+ int spd = conf->speed;
+
+ clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
+ gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
+ tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
+ hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
+ hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
+
+ /* GIG/FDX mode */
+ spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
+ DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
+ DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
+ DEV2G5_MAC_MODE_CFG_FDX_ENA,
+ sparx5,
+ DEV2G5_MAC_MODE_CFG(port->portno));
+
+ /* Set MAC IFG Gaps */
+ spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
+ DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
+ DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
+ sparx5,
+ DEV2G5_MAC_IFG_CFG(port->portno));
+
+ /* Disabling frame aging when in HDX (due to HDX issue) */
+ spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
+ HSCH_PORT_MODE_AGE_DIS,
+ sparx5,
+ HSCH_PORT_MODE(port->portno));
+
+ /* Enable MAC module */
+ spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
+ DEV2G5_MAC_ENA_CFG_TX_ENA,
+ sparx5,
+ DEV2G5_MAC_ENA_CFG(port->portno));
+
+ /* Select speed and take MAC out of reset */
+ spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
+ DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
+ DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
+ DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+ DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
+ DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
+ sparx5,
+ DEV2G5_DEV_RST_CTRL(port->portno));
+
+ return 0;
+}
+
+int sparx5_port_pcs_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+
+{
+ bool high_speed_dev = sparx5_is_baser(conf->portmode);
+ int err;
+
+ if (sparx5_dev_change(sparx5, port, conf)) {
+ /* switch device */
+ sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
+
+ /* Disable the not-in-use device */
+ err = sparx5_port_disable(sparx5, port, !high_speed_dev);
+ if (err)
+ return err;
+ }
+ /* Disable the port before re-configuring */
+ err = sparx5_port_disable(sparx5, port, high_speed_dev);
+ if (err)
+ return -EINVAL;
+
+ if (high_speed_dev)
+ err = sparx5_port_pcs_high_set(sparx5, port, conf);
+ else
+ err = sparx5_port_pcs_low_set(sparx5, port, conf);
+
+ if (err)
+ return -EINVAL;
+
+ if (port->conf.inband) {
+ /* Enable/disable 1G counters in ASM */
+ spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+ ASM_PORT_CFG_CSC_STAT_DIS,
+ sparx5,
+ ASM_PORT_CFG(port->portno));
+
+ /* Enable/disable 1G counters in DSM */
+ spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+ DSM_BUF_CFG_CSC_STAT_DIS,
+ sparx5,
+ DSM_BUF_CFG(port->portno));
+ }
+
+ port->conf = *conf;
+
+ return 0;
+}
+
+int sparx5_port_config(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ bool high_speed_dev = sparx5_is_baser(conf->portmode);
+ int err, urgency, stop_wm;
+
+ err = sparx5_port_verify_speed(sparx5, port, conf);
+ if (err)
+ return err;
+
+ /* high speed device is already configured */
+ if (!high_speed_dev)
+ sparx5_port_config_low_set(sparx5, port, conf);
+
+ /* Configure flow control */
+ err = sparx5_port_fc_setup(sparx5, port, conf);
+ if (err)
+ return err;
+
+ /* Set the DSM stop watermark */
+ stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
+ DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
+ /* Enable port in queue system */
+ urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
+ spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
+ QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
+ QFWD_SWITCH_PORT_MODE_PORT_ENA |
+ QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(port->portno));
+
+ /* Save the new values */
+ port->conf = *conf;
+
+ return 0;
+}
+
+/* Initialize port config to default */
+int sparx5_port_init(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
+ u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
+ u32 devhigh = sparx5_to_high_dev(port->portno);
+ u32 pix = sparx5_port_dev_index(port->portno);
+ u32 pcs = sparx5_to_pcs_dev(port->portno);
+ bool sd_pol = port->signd_active_high;
+ bool sd_sel = !port->signd_internal;
+ bool sd_ena = port->signd_enable;
+ u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
+ void __iomem *devinst;
+ void __iomem *pcsinst;
+ int err;
+
+ devinst = spx5_inst_get(sparx5, devhigh, pix);
+ pcsinst = spx5_inst_get(sparx5, pcs, pix);
+
+ /* Set the mux port mode */
+ err = sparx5_port_mux_set(sparx5, port, conf);
+ if (err)
+ return err;
+
+ /* Configure MAC vlan awareness */
+ err = sparx5_port_max_tags_set(sparx5, port);
+ if (err)
+ return err;
+
+ /* Set Max Length */
+ spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+ sparx5,
+ DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+ /* 1G/2G5: Signal Detect configuration */
+ spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV2G5_PCS1G_SD_CFG(port->portno));
+
+ /* Set Pause WM hysteresis */
+ spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
+ QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
+ QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
+ QSYS_PAUSE_CFG_PAUSE_START |
+ QSYS_PAUSE_CFG_PAUSE_STOP |
+ QSYS_PAUSE_CFG_PAUSE_ENA,
+ sparx5,
+ QSYS_PAUSE_CFG(port->portno));
+
+ /* Port ATOP. Frames are tail dropped when this WM is hit */
+ spx5_wr(QSYS_ATOP_ATOP_SET(atop),
+ sparx5,
+ QSYS_ATOP(port->portno));
+
+ /* Discard pause frame 01-80-C2-00-00-01 */
+ spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
+
+ if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
+ conf->portmode == PHY_INTERFACE_MODE_SGMII) {
+ err = sparx5_serdes_set(sparx5, port, conf);
+ if (err)
+ return err;
+
+ if (!sparx5_port_is_2g5(port->portno))
+ /* Enable shadow device */
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
+ sparx5_dev_switch(sparx5, port->portno, false);
+ }
+ if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
+ // All ports must be PCS enabled in QSGMII mode
+ spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
+ DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
+ sparx5,
+ DEV2G5_DEV_RST_CTRL(port->portno));
+ }
+ /* Default IFGs for 1G */
+ spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
+ DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
+ DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
+ sparx5,
+ DEV2G5_MAC_IFG_CFG(port->portno));
+
+ if (sparx5_port_is_2g5(port->portno))
+ return 0; /* Low speed device only - return */
+
+ /* Now setup the high speed device */
+ if (conf->portmode == PHY_INTERFACE_MODE_NA)
+ conf->portmode = PHY_INTERFACE_MODE_10GBASER;
+
+ if (sparx5_is_baser(conf->portmode))
+ sparx5_dev_switch(sparx5, port->portno, true);
+
+ /* Set Max Length */
+ spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
+ devinst,
+ DEV10G_MAC_ENA_CFG(0));
+
+ /* Handle Signal Detect in 10G PCS */
+ spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
+ PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
+ PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
+ pcsinst,
+ PCS10G_BR_PCS_SD_CFG(0));
+
+ if (sparx5_port_is_25g(port->portno)) {
+ /* Handle Signal Detect in 25G PCS */
+ spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV25G_PCS25G_SD_CFG(pix));
+ }
+
+ return 0;
+}
+
+void sparx5_port_enable(struct sparx5_port *port, bool enable)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ /* Enable port for frame transfer? */
+ spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
+ QFWD_SWITCH_PORT_MODE_PORT_ENA,
+ sparx5,
+ QFWD_SWITCH_PORT_MODE(port->portno));
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
new file mode 100644
index 000000000000..fd05ab6436d1
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_PORT_H__
+#define __SPARX5_PORT_H__
+
+#include "sparx5_main.h"
+
+static inline bool sparx5_port_is_2g5(int portno)
+{
+ return portno >= 16 && portno <= 47;
+}
+
+static inline bool sparx5_port_is_5g(int portno)
+{
+ return portno <= 11 || portno == 64;
+}
+
+static inline bool sparx5_port_is_10g(int portno)
+{
+ return (portno >= 12 && portno <= 15) || (portno >= 48 && portno <= 55);
+}
+
+static inline bool sparx5_port_is_25g(int portno)
+{
+ return portno >= 56 && portno <= 63;
+}
+
+static inline u32 sparx5_to_high_dev(int port)
+{
+ if (sparx5_port_is_5g(port))
+ return TARGET_DEV5G;
+ if (sparx5_port_is_10g(port))
+ return TARGET_DEV10G;
+ return TARGET_DEV25G;
+}
+
+static inline u32 sparx5_to_pcs_dev(int port)
+{
+ if (sparx5_port_is_5g(port))
+ return TARGET_PCS5G_BR;
+ if (sparx5_port_is_10g(port))
+ return TARGET_PCS10G_BR;
+ return TARGET_PCS25G_BR;
+}
+
+static inline int sparx5_port_dev_index(int port)
+{
+ if (sparx5_port_is_2g5(port))
+ return port;
+ if (sparx5_port_is_5g(port))
+ return (port <= 11 ? port : 12);
+ if (sparx5_port_is_10g(port))
+ return (port >= 12 && port <= 15) ?
+ port - 12 : port - 44;
+ return (port - 56);
+}
+
+int sparx5_port_init(struct sparx5 *sparx5,
+ struct sparx5_port *spx5_port,
+ struct sparx5_port_config *conf);
+
+int sparx5_port_config(struct sparx5 *sparx5,
+ struct sparx5_port *spx5_port,
+ struct sparx5_port_config *conf);
+
+int sparx5_port_pcs_set(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+int sparx5_serdes_set(struct sparx5 *sparx5,
+ struct sparx5_port *spx5_port,
+ struct sparx5_port_config *conf);
+
+struct sparx5_port_status {
+ bool link;
+ bool link_down;
+ int speed;
+ bool an_complete;
+ int duplex;
+ int pause;
+};
+
+int sparx5_get_port_status(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ struct sparx5_port_status *status);
+
+void sparx5_port_enable(struct sparx5_port *port, bool enable);
+
+#endif /* __SPARX5_PORT_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
new file mode 100644
index 000000000000..a72e3b3b596e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static struct workqueue_struct *sparx5_owq;
+
+struct sparx5_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_MCAST_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
+}
+
+static void sparx5_attr_stp_state_set(struct sparx5_port *port,
+ u8 state)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!test_bit(port->portno, sparx5->bridge_mask)) {
+ netdev_err(port->ndev,
+ "Controlling non-bridged port %d?\n", port->portno);
+ return;
+ }
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ set_bit(port->portno, sparx5->bridge_fwd_mask);
+ fallthrough;
+ case BR_STATE_LEARNING:
+ set_bit(port->portno, sparx5->bridge_lrn_mask);
+ break;
+
+ default:
+ /* All other states treated as blocking */
+ clear_bit(port->portno, sparx5->bridge_fwd_mask);
+ clear_bit(port->portno, sparx5->bridge_lrn_mask);
+ break;
+ }
+
+ /* apply the bridge_fwd_mask to all the ports */
+ sparx5_update_fwd(sparx5);
+}
+
+static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
+
+ sparx5_set_ageing(port->sparx5, ageing_time);
+}
+
+static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ sparx5_attr_stp_state_set(port, attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ port->vlan_aware = attr->u.vlan_filtering;
+ sparx5_vlan_port_apply(port->sparx5, port);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sparx5_port_bridge_join(struct sparx5_port *port,
+ struct net_device *bridge)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
+ /* First bridged port */
+ sparx5->hw_bridge_dev = bridge;
+ else
+ if (sparx5->hw_bridge_dev != bridge)
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ return -ENODEV;
+
+ set_bit(port->portno, sparx5->bridge_mask);
+
+ /* Port enters in bridge mode therefor don't need to copy to CPU
+ * frames for multicast in case the bridge is not requesting them
+ */
+ __dev_mc_unsync(port->ndev, sparx5_mc_unsync);
+
+ return 0;
+}
+
+static void sparx5_port_bridge_leave(struct sparx5_port *port,
+ struct net_device *bridge)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+
+ clear_bit(port->portno, sparx5->bridge_mask);
+ if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
+ sparx5->hw_bridge_dev = NULL;
+
+ /* Clear bridge vlan settings before updating the port settings */
+ port->vlan_aware = 0;
+ port->pvid = NULL_VID;
+ port->vid = NULL_VID;
+
+ /* Port enters in host more therefore restore mc list */
+ __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
+}
+
+static int sparx5_port_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ int err = 0;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = sparx5_port_bridge_join(port, info->upper_dev);
+ else
+ sparx5_port_bridge_leave(port, info->upper_dev);
+
+ sparx5_vlan_port_apply(port->sparx5, port);
+ }
+
+ return err;
+}
+
+static int sparx5_port_add_addr(struct net_device *dev, bool up)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ u16 vid = port->pvid;
+
+ if (up)
+ sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
+ else
+ sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
+
+ return 0;
+}
+
+static int sparx5_netdevice_port_event(struct net_device *dev,
+ struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ if (!sparx5_netdevice_check(dev))
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ err = sparx5_port_changeupper(dev, ptr);
+ break;
+ case NETDEV_PRE_UP:
+ err = sparx5_port_add_addr(dev, true);
+ break;
+ case NETDEV_DOWN:
+ err = sparx5_port_add_addr(dev, false);
+ break;
+ }
+
+ return err;
+}
+
+static int sparx5_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
+
+ ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
+
+ return notifier_from_errno(ret);
+}
+
+static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
+{
+ struct sparx5_switchdev_event_work *switchdev_work =
+ container_of(work, struct sparx5_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct sparx5_port *port;
+ struct sparx5 *sparx5;
+
+ rtnl_lock();
+ if (!sparx5_netdevice_check(dev))
+ goto out;
+
+ port = netdev_priv(dev);
+ sparx5 = port->sparx5;
+
+ fdb_info = &switchdev_work->fdb_info;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
+ break;
+ }
+
+out:
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+static void sparx5_schedule_work(struct work_struct *work)
+{
+ queue_work(sparx5_owq, work);
+}
+
+static int sparx5_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct sparx5_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ sparx5_netdevice_check,
+ sparx5_port_attr_set);
+ return notifier_from_errno(err);
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fallthrough;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ INIT_WORK(&switchdev_work->work,
+ sparx5_switchdev_bridge_fdb_event_work);
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(dev);
+
+ sparx5_schedule_work(&switchdev_work->work);
+ break;
+ }
+
+ return NOTIFY_DONE;
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
+ struct sparx5_port *port,
+ u16 vid, bool add)
+{
+ if (!port ||
+ !test_bit(port->portno, sparx5->bridge_mask))
+ return; /* Skip null/host interfaces */
+
+ /* Bridge connects to vid? */
+ if (add) {
+ /* Add port MAC address from the VLAN */
+ sparx5_mact_learn(sparx5, PGID_CPU,
+ port->ndev->dev_addr, vid);
+ } else {
+ /* Control port addr visibility depending on
+ * port VLAN connectivity.
+ */
+ if (test_bit(port->portno, sparx5->vlan_mask[vid]))
+ sparx5_mact_learn(sparx5, PGID_CPU,
+ port->ndev->dev_addr, vid);
+ else
+ sparx5_mact_forget(sparx5,
+ port->ndev->dev_addr, vid);
+ }
+}
+
+static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
+ struct sparx5 *sparx5,
+ u16 vid, bool add)
+{
+ int i;
+
+ /* First, handle bridge address'es */
+ if (add) {
+ sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
+ vid);
+ sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
+ vid);
+ } else {
+ sparx5_mact_forget(sparx5, dev->dev_addr, vid);
+ sparx5_mact_forget(sparx5, dev->broadcast, vid);
+ }
+
+ /* Now look at bridged ports */
+ for (i = 0; i < SPX5_PORTS; i++)
+ sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
+}
+
+static int sparx5_handle_port_vlan_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_vlan *v)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(dev)) {
+ if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
+ struct sparx5 *sparx5 =
+ container_of(nb, struct sparx5,
+ switchdev_blocking_nb);
+
+ sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
+ }
+ return 0;
+ }
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ return sparx5_vlan_vid_add(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+}
+
+static int sparx5_handle_port_obj_add(struct net_device *dev,
+ struct notifier_block *nb,
+ struct switchdev_notifier_port_obj_info *info)
+{
+ const struct switchdev_obj *obj = info->obj;
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = sparx5_handle_port_vlan_add(dev, nb,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ info->handled = true;
+ return err;
+}
+
+static int sparx5_handle_port_vlan_del(struct net_device *dev,
+ struct notifier_block *nb,
+ u16 vid)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ int ret;
+
+ /* Master bridge? */
+ if (netif_is_bridge_master(dev)) {
+ struct sparx5 *sparx5 =
+ container_of(nb, struct sparx5,
+ switchdev_blocking_nb);
+
+ sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
+ return 0;
+ }
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ ret = sparx5_vlan_vid_del(port, vid);
+ if (ret)
+ return ret;
+
+ /* Delete the port MAC address with the matching VLAN information */
+ sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
+
+ return 0;
+}
+
+static int sparx5_handle_port_obj_del(struct net_device *dev,
+ struct notifier_block *nb,
+ struct switchdev_notifier_port_obj_info *info)
+{
+ const struct switchdev_obj *obj = info->obj;
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = sparx5_handle_port_vlan_del(dev, nb,
+ SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ info->handled = true;
+ return err;
+}
+
+static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = sparx5_handle_port_obj_add(dev, nb, ptr);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = sparx5_handle_port_obj_del(dev, nb, ptr);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ sparx5_netdevice_check,
+ sparx5_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+int sparx5_register_notifier_blocks(struct sparx5 *s5)
+{
+ int err;
+
+ s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
+ err = register_netdevice_notifier(&s5->netdevice_nb);
+ if (err)
+ return err;
+
+ s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
+ err = register_switchdev_notifier(&s5->switchdev_nb);
+ if (err)
+ goto err_switchdev_nb;
+
+ s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
+ err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
+ if (err)
+ goto err_switchdev_blocking_nb;
+
+ sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
+ if (!sparx5_owq) {
+ err = -ENOMEM;
+ goto err_switchdev_blocking_nb;
+ }
+
+ return 0;
+
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&s5->switchdev_nb);
+err_switchdev_nb:
+ unregister_netdevice_notifier(&s5->netdevice_nb);
+
+ return err;
+}
+
+void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
+{
+ destroy_workqueue(sparx5_owq);
+
+ unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
+ unregister_switchdev_notifier(&s5->switchdev_nb);
+ unregister_netdevice_notifier(&s5->netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
new file mode 100644
index 000000000000..4ce490a25f33
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
+{
+ u32 mask[3];
+
+ /* Divide up mask in 32 bit words */
+ bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
+
+ /* Output mask to respective registers */
+ spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
+ spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
+ spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
+
+ return 0;
+}
+
+void sparx5_vlan_init(struct sparx5 *sparx5)
+{
+ u16 vid;
+
+ spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
+ ANA_L3_VLAN_CTRL_VLAN_ENA,
+ sparx5,
+ ANA_L3_VLAN_CTRL);
+
+ /* Map VLAN = FID */
+ for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
+ spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
+ ANA_L3_VLAN_CFG_VLAN_FID,
+ sparx5,
+ ANA_L3_VLAN_CFG(vid));
+}
+
+void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
+{
+ struct sparx5_port *port = sparx5->ports[portno];
+
+ /* Configure PVID */
+ spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
+ ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
+ ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
+ ANA_CL_VLAN_CTRL_PORT_VID,
+ sparx5,
+ ANA_CL_VLAN_CTRL(port->portno));
+}
+
+int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ int ret;
+
+ /* Make the port a member of the VLAN */
+ set_bit(port->portno, sparx5->vlan_mask[vid]);
+ ret = sparx5_vlant_set_mask(sparx5, vid);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ /* Untagged egress vlan classification */
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ netdev_err(port->ndev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
+ port->vid = vid;
+ }
+
+ sparx5_vlan_port_apply(sparx5, port);
+
+ return 0;
+}
+
+int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ int ret;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ /* Stop the port from being a member of the vlan */
+ clear_bit(port->portno, sparx5->vlan_mask[vid]);
+ ret = sparx5_vlant_set_mask(sparx5, vid);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ /* Egress */
+ if (port->vid == vid)
+ port->vid = 0;
+
+ sparx5_vlan_port_apply(sparx5, port);
+
+ return 0;
+}
+
+void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u32 val, mask;
+
+ /* mask is spread across 3 registers x 32 bit */
+ if (port->portno < 32) {
+ mask = BIT(port->portno);
+ val = enable ? mask : 0;
+ spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
+ } else if (port->portno < 64) {
+ mask = BIT(port->portno - 32);
+ val = enable ? mask : 0;
+ spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
+ } else if (port->portno < SPX5_PORTS) {
+ mask = BIT(port->portno - 64);
+ val = enable ? mask : 0;
+ spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
+ } else {
+ netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
+ }
+}
+
+void sparx5_update_fwd(struct sparx5 *sparx5)
+{
+ DECLARE_BITMAP(workmask, SPX5_PORTS);
+ u32 mask[3];
+ int port;
+
+ /* Divide up fwd mask in 32 bit words */
+ bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
+
+ /* Update flood masks */
+ for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
+ spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
+ spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
+ spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
+ }
+
+ /* Update SRC masks */
+ for (port = 0; port < SPX5_PORTS; port++) {
+ if (test_bit(port, sparx5->bridge_fwd_mask)) {
+ /* Allow to send to all bridged but self */
+ bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
+ clear_bit(port, workmask);
+ bitmap_to_arr32(mask, workmask, SPX5_PORTS);
+ spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
+ spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
+ spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
+ } else {
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
+ }
+ }
+
+ /* Learning enabled only for bridged ports */
+ bitmap_and(workmask, sparx5->bridge_fwd_mask,
+ sparx5->bridge_lrn_mask, SPX5_PORTS);
+ bitmap_to_arr32(mask, workmask, SPX5_PORTS);
+
+ /* Apply learning mask */
+ spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
+ spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
+ spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
+}
+
+void sparx5_vlan_port_apply(struct sparx5 *sparx5,
+ struct sparx5_port *port)
+
+{
+ u32 val;
+
+ /* Configure PVID, vlan aware */
+ val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
+ ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
+ ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
+ spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
+
+ val = 0;
+ if (port->vlan_aware && !port->pvid)
+ /* If port is vlan-aware and tagged, drop untagged and
+ * priority tagged frames.
+ */
+ val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
+ ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
+ ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
+ spx5_wr(val, sparx5,
+ ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+ val = REW_TAG_CTRL_TAG_TPID_CFG_SET(0);
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CTRL_TAG_CFG_SET(1);
+ else
+ val |= REW_TAG_CTRL_TAG_CFG_SET(3);
+ }
+ spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
+
+ /* Egress VID */
+ spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+ REW_PORT_VLAN_CFG_PORT_VID,
+ sparx5,
+ REW_PORT_VLAN_CFG(port->portno));
+}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 04d067243457..fff78900fc8a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1230,8 +1230,10 @@ static int mana_create_txq(struct mana_port_context *apc,
cq->gdma_id = cq->gdma_cq->id;
- if (WARN_ON(cq->gdma_id >= gc->max_num_cqs))
- return -EINVAL;
+ if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
+ err = -EINVAL;
+ goto out;
+ }
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
@@ -1387,8 +1389,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc = gd->gdma_context;
- rxq = kzalloc(sizeof(*rxq) +
- RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
+ rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
GFP_KERNEL);
if (!rxq)
return NULL;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index b85733942053..5249b64f4fc5 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -481,13 +481,12 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->pdev = pdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ndev->base_addr = res->start;
- priv->base = devm_ioremap_resource(p_dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto init_fail;
}
+ ndev->base_addr = res->start;
spin_lock_init(&priv->txlock);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0c4283319d7f..adfb9781799e 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -379,6 +379,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
int ocelot_port_flush(struct ocelot *ocelot, int port)
{
+ unsigned int pause_ena;
int err, val;
/* Disable dequeuing from the egress queues */
@@ -387,6 +388,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
QSYS_PORT_MODE, port);
/* Disable flow control */
+ ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
/* Disable priority flow control */
@@ -422,6 +424,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
/* Clear flushing again. */
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+ /* Re-enable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
+
return err;
}
EXPORT_SYMBOL(ocelot_port_flush);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index aad33d22c33f..3e89e34f86d5 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -939,7 +939,7 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
ANA_PORT_CPU_FWD_CFG, port);
}
-static int ocelot_port_attr_set(struct net_device *dev,
+static int ocelot_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
@@ -948,6 +948,9 @@ static int ocelot_port_attr_set(struct net_device *dev,
int port = priv->chip_port;
int err = 0;
+ if (ctx && ctx != priv)
+ return 0;
+
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state);
@@ -1058,12 +1061,16 @@ ocelot_port_obj_mrp_del_ring_role(struct net_device *dev,
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
-static int ocelot_port_obj_add(struct net_device *dev,
+static int ocelot_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
+ struct ocelot_port_private *priv = netdev_priv(dev);
int ret = 0;
+ if (ctx && ctx != priv)
+ return 0;
+
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
ret = ocelot_port_obj_add_vlan(dev,
@@ -1086,11 +1093,15 @@ static int ocelot_port_obj_add(struct net_device *dev,
return ret;
}
-static int ocelot_port_obj_del(struct net_device *dev,
+static int ocelot_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
+ struct ocelot_port_private *priv = netdev_priv(dev);
int ret = 0;
+ if (ctx && ctx != priv)
+ return 0;
+
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
ret = ocelot_vlan_vid_del(dev,
@@ -1143,10 +1154,14 @@ static int ocelot_switchdev_sync(struct ocelot *ocelot, int port,
struct net_device *bridge_dev,
struct netlink_ext_ack *extack)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
clock_t ageing_time;
u8 stp_state;
int err;
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+
ocelot_inherit_brport_flags(ocelot, port, brport_dev);
stp_state = br_port_get_stp_state(brport_dev);
@@ -1160,16 +1175,12 @@ static int ocelot_switchdev_sync(struct ocelot *ocelot, int port,
ageing_time = br_get_ageing_time(bridge_dev);
ocelot_port_attr_ageing_set(ocelot, port, ageing_time);
- err = br_mdb_replay(bridge_dev, brport_dev,
+ err = br_mdb_replay(bridge_dev, brport_dev, priv, true,
&ocelot_switchdev_blocking_nb, extack);
if (err && err != -EOPNOTSUPP)
return err;
- err = br_fdb_replay(bridge_dev, brport_dev, &ocelot_switchdev_nb);
- if (err)
- return err;
-
- err = br_vlan_replay(bridge_dev, brport_dev,
+ err = br_vlan_replay(bridge_dev, brport_dev, priv, true,
&ocelot_switchdev_blocking_nb, extack);
if (err && err != -EOPNOTSUPP)
return err;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c84c8bf2bc20..fc99ad8e4a38 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3815,6 +3815,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"invalid sram_size %dB or board span %ldB\n",
mgp->sram_size, mgp->board_span);
+ status = -EINVAL;
goto abort_with_ioremap;
}
memcpy_fromio(mgp->eeprom_strings,
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index b81e1487945c..51b4b25d15ad 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -969,7 +969,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_create_file:
- unregister_netdev(dev);
+ unregister_netdev(dev);
err_register_netdev:
iounmap(ioaddr);
@@ -3103,14 +3103,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCSMIIREG: /* Write MII PHY register. */
if (dev->if_port == PORT_TP) {
if ((data->phy_id & 0x1f) == np->phy_addr_external) {
- if ((data->reg_num & 0x1f) == MII_ADVERTISE)
+ if ((data->reg_num & 0x1f) == MII_ADVERTISE)
np->advertising = data->val_in;
mdio_write(dev, data->reg_num & 0x1f,
data->val_in);
}
} else {
if ((data->phy_id & 0x1f) == np->phy_addr_external) {
- if ((data->reg_num & 0x1f) == MII_ADVERTISE)
+ if ((data->reg_num & 0x1f) == MII_ADVERTISE)
np->advertising = data->val_in;
}
move_int_phy(dev, data->phy_id & 0x1f);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9cfcd5500462..0b017d4f5c08 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -1101,6 +1101,8 @@ static int s2io_print_pci_mode(struct s2io_nic *nic)
* @nic: device private variable
* @link: link status (UP/DOWN) used to enable/disable continuous
* transmit interrupts
+ * @may_sleep: parameter indicates if sleeping when waiting for
+ * command complete
* Description: The function configures transmit traffic interrupts
* Return Value: SUCCESS on success and
* '-1' on failure
@@ -2743,7 +2745,7 @@ static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
}
/**
- * s2io_poll - Rx interrupt handler for NAPI support
+ * s2io_poll_msix - Rx interrupt handler for NAPI support
* @napi : pointer to the napi structure.
* @budget : The number of packets that were budgeted to be processed
* during one pass through the 'Poll" function.
@@ -3323,6 +3325,8 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
* @addr: address
* @busy_bit: bit to check for busy
* @bit_state: state to check
+ * @may_sleep: parameter indicates if sleeping when waiting for
+ * command complete
* Description: Function that waits for a command to Write into RMAC
* ADDR DATA registers to be completed and returns either success or
* error depending on whether the command was complete or not.
@@ -4868,6 +4872,8 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
/**
* s2io_set_multicast - entry point for multicast address enable/disable.
* @dev : pointer to the device structure
+ * @may_sleep: parameter indicates if sleeping when waiting for command
+ * complete
* Description:
* This function is a driver entry point which gets called by the kernel
* whenever multicast addresses must be enabled/disabled. This also gets
@@ -5288,7 +5294,7 @@ s2io_ethtool_set_link_ksettings(struct net_device *dev,
}
/**
- * s2io_ethtol_get_link_ksettings - Return link specific information.
+ * s2io_ethtool_get_link_ksettings - Return link specific information.
* @dev: pointer to netdev
* @cmd : pointer to the structure with parameters given by ethtool
* to return link information.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 5162b938a1ac..a3204a7ef750 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -3784,6 +3784,7 @@ vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
itable[j]);
+ return;
default:
return;
}
@@ -4884,7 +4885,7 @@ vpath_open_exit1:
}
/**
- * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
+ * vxge_hw_vpath_rx_doorbell_init - Close the handle got from previous vpath
* (vpath) open
* @vp: Handle got from previous vpath open
*
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 87892bd992b1..82eef4c72f01 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -87,7 +87,7 @@ static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+static void vxge_reset_all_vpaths(struct vxgedev *vdev);
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
@@ -1606,7 +1606,6 @@ static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
static int do_vxge_reset(struct vxgedev *vdev, int event)
{
- enum vxge_hw_status status;
int ret = 0, vp_id, i;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
@@ -1709,14 +1708,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
netif_tx_stop_all_queues(vdev->ndev);
if (event == VXGE_LL_FULL_RESET) {
- status = vxge_reset_all_vpaths(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: can not reset vpaths",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- }
+ vxge_reset_all_vpaths(vdev);
}
if (event == VXGE_LL_COMPL_RESET) {
@@ -1799,7 +1791,7 @@ static void vxge_reset(struct work_struct *work)
}
/**
- * vxge_poll - Receive handler when Receive Polling is used.
+ * vxge_poll_msix - Receive handler when Receive Polling is used.
* @napi: pointer to the napi structure.
* @budget: Number of packets budgeted to be processed in this iteration.
*
@@ -1969,9 +1961,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
}
/* reset vpaths */
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static void vxge_reset_all_vpaths(struct vxgedev *vdev)
{
- enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
int i;
@@ -1986,18 +1977,16 @@ static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
"vxge_hw_vpath_recover_"
"from_reset failed for vpath: "
"%d", i);
- return status;
+ return;
}
} else {
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for "
"vpath:%d", i);
- return status;
+ return;
}
}
}
-
- return status;
}
/* close vpaths */
@@ -2676,11 +2665,7 @@ static int vxge_set_features(struct net_device *dev, netdev_features_t features)
/* !netif_running() ensured by vxge_fix_features() */
vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
- if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
- dev->features = features ^ NETIF_F_RXHASH;
- vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
- return -EIO;
- }
+ vxge_reset_all_vpaths(vdev);
return 0;
}
@@ -3693,10 +3678,9 @@ static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
driver_config->vpath_per_dev = 1;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- else
+ if (vxge_bVALn(vpath_mask, i, 1))
default_no_vpath++;
+
if (default_no_vpath < driver_config->vpath_per_dev)
driver_config->vpath_per_dev = default_no_vpath;
@@ -4752,7 +4736,7 @@ _exit0:
}
/**
- * vxge_rem_nic - Free the PCI device
+ * vxge_remove - Free the PCI device
* @pdev: structure containing the PCI related information of the device.
* Description: This function is called by the Pci subsystem to release a
* PCI device and free up all resource held up by the device.
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index d31772ae511d..9cff3d48acbc 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -51,7 +51,8 @@ nfp-objs += \
flower/metadata.o \
flower/offload.o \
flower/tunnel_conf.o \
- flower/qos_conf.o
+ flower/qos_conf.o \
+ flower/conntrack.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
diff --git a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
index f0783aa9e66e..4247bca09807 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
+++ b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
@@ -36,7 +36,7 @@ enum nfp_net_mbox_cmsg_state {
};
/**
- * struct nfp_ccm_mbox_skb_cb - CCM mailbox specific info
+ * struct nfp_ccm_mbox_cmsg_cb - CCM mailbox specific info
* @state: processing state (/stage) of the message
* @err: error encountered during processing if any
* @max_len: max(request_len, reply_len)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
new file mode 100644
index 000000000000..273d529d43c2
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -0,0 +1,1180 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2021 Corigine, Inc. */
+
+#include "conntrack.h"
+
+const struct rhashtable_params nfp_tc_ct_merge_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_tc_merge,
+ hash_node),
+ .key_len = sizeof(unsigned long) * 2,
+ .key_offset = offsetof(struct nfp_fl_ct_tc_merge, cookie),
+ .automatic_shrinking = true,
+};
+
+const struct rhashtable_params nfp_nft_ct_merge_params = {
+ .head_offset = offsetof(struct nfp_fl_nft_tc_merge,
+ hash_node),
+ .key_len = sizeof(unsigned long) * 3,
+ .key_offset = offsetof(struct nfp_fl_nft_tc_merge, cookie),
+ .automatic_shrinking = true,
+};
+
+static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
+ enum flow_action_id act_id);
+
+/**
+ * get_hashentry() - Wrapper around hashtable lookup.
+ * @ht: hashtable where entry could be found
+ * @key: key to lookup
+ * @params: hashtable params
+ * @size: size of entry to allocate if not in table
+ *
+ * Returns an entry from a hashtable. If entry does not exist
+ * yet allocate the memory for it and return the new entry.
+ */
+static void *get_hashentry(struct rhashtable *ht, void *key,
+ const struct rhashtable_params params, size_t size)
+{
+ void *result;
+
+ result = rhashtable_lookup_fast(ht, key, params);
+
+ if (result)
+ return result;
+
+ result = kzalloc(size, GFP_KERNEL);
+ if (!result)
+ return ERR_PTR(-ENOMEM);
+
+ return result;
+}
+
+bool is_pre_ct_flow(struct flow_cls_offload *flow)
+{
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, &flow->rule->action) {
+ if (act->id == FLOW_ACTION_CT && !act->ct.action)
+ return true;
+ }
+ return false;
+}
+
+bool is_post_ct_flow(struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_ct ct;
+
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ flow_rule_match_ct(rule, &ct);
+ if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
+ return true;
+ }
+ return false;
+}
+
+static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
+ struct nfp_fl_ct_flow_entry *entry2)
+{
+ unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
+ entry2->rule->match.dissector->used_keys;
+ bool out;
+
+ /* check the overlapped fields one by one, the unmasked part
+ * should not conflict with each other.
+ */
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match1, match2;
+
+ flow_rule_match_control(entry1->rule, &match1);
+ flow_rule_match_control(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match1, match2;
+
+ flow_rule_match_basic(entry1->rule, &match1);
+ flow_rule_match_basic(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match1, match2;
+
+ flow_rule_match_ipv4_addrs(entry1->rule, &match1);
+ flow_rule_match_ipv4_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match1, match2;
+
+ flow_rule_match_ipv6_addrs(entry1->rule, &match1);
+ flow_rule_match_ipv6_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match1, match2;
+
+ flow_rule_match_ports(entry1->rule, &match1);
+ flow_rule_match_ports(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match1, match2;
+
+ flow_rule_match_eth_addrs(entry1->rule, &match1);
+ flow_rule_match_eth_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match1, match2;
+
+ flow_rule_match_vlan(entry1->rule, &match1);
+ flow_rule_match_vlan(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match1, match2;
+
+ flow_rule_match_mpls(entry1->rule, &match1);
+ flow_rule_match_mpls(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match1, match2;
+
+ flow_rule_match_tcp(entry1->rule, &match1);
+ flow_rule_match_tcp(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match1, match2;
+
+ flow_rule_match_ip(entry1->rule, &match1);
+ flow_rule_match_ip(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match1, match2;
+
+ flow_rule_match_enc_keyid(entry1->rule, &match1);
+ flow_rule_match_enc_keyid(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match1, match2;
+
+ flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
+ flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match1, match2;
+
+ flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
+ flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match1, match2;
+
+ flow_rule_match_enc_control(entry1->rule, &match1);
+ flow_rule_match_enc_control(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match1, match2;
+
+ flow_rule_match_enc_ip(entry1->rule, &match1);
+ flow_rule_match_enc_ip(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_match_enc_opts match1, match2;
+
+ flow_rule_match_enc_opts(entry1->rule, &match1);
+ flow_rule_match_enc_opts(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ return 0;
+
+check_failed:
+ return -EINVAL;
+}
+
+static int nfp_ct_check_mangle_merge(struct flow_action_entry *a_in,
+ struct flow_rule *rule)
+{
+ enum flow_action_mangle_base htype = a_in->mangle.htype;
+ u32 offset = a_in->mangle.offset;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS))
+ return -EOPNOTSUPP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (offset == offsetof(struct iphdr, ttl) &&
+ match.mask->ttl)
+ return -EOPNOTSUPP;
+ if (offset == round_down(offsetof(struct iphdr, tos), 4) &&
+ match.mask->tos)
+ return -EOPNOTSUPP;
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (offset == offsetof(struct iphdr, saddr) &&
+ match.mask->src)
+ return -EOPNOTSUPP;
+ if (offset == offsetof(struct iphdr, daddr) &&
+ match.mask->dst)
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (offset == round_down(offsetof(struct ipv6hdr, hop_limit), 4) &&
+ match.mask->ttl)
+ return -EOPNOTSUPP;
+ /* for ipv6, tos and flow_lbl are in the same word */
+ if (offset == round_down(offsetof(struct ipv6hdr, flow_lbl), 4) &&
+ match.mask->tos)
+ return -EOPNOTSUPP;
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if (offset >= offsetof(struct ipv6hdr, saddr) &&
+ offset < offsetof(struct ipv6hdr, daddr) &&
+ memchr_inv(&match.mask->src, 0, sizeof(match.mask->src)))
+ return -EOPNOTSUPP;
+ if (offset >= offsetof(struct ipv6hdr, daddr) &&
+ offset < sizeof(struct ipv6hdr) &&
+ memchr_inv(&match.mask->dst, 0, sizeof(match.mask->dst)))
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ /* currently only can modify ports */
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
+ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
+ struct nfp_fl_ct_flow_entry *post_ct_entry,
+ struct nfp_fl_ct_flow_entry *nft_entry)
+{
+ struct flow_action_entry *act;
+ int err, i;
+
+ /* Check for pre_ct->action conflicts */
+ flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ err = nfp_ct_check_mangle_merge(act, nft_entry->rule);
+ if (err)
+ return err;
+ err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MPLS_PUSH:
+ case FLOW_ACTION_MPLS_POP:
+ case FLOW_ACTION_MPLS_MANGLE:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+
+ /* Check for nft->action conflicts */
+ flow_action_for_each(i, act, &nft_entry->rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MPLS_PUSH:
+ case FLOW_ACTION_MPLS_POP:
+ case FLOW_ACTION_MPLS_MANGLE:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
+ struct nfp_fl_ct_flow_entry *nft_entry)
+{
+ struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
+ struct flow_action_entry *ct_met;
+ struct flow_match_ct ct;
+ int i;
+
+ ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
+ if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) {
+ u32 *act_lbl;
+
+ act_lbl = ct_met->ct_metadata.labels;
+ flow_rule_match_ct(post_ct_entry->rule, &ct);
+ for (i = 0; i < 4; i++) {
+ if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
+ (act_lbl[i] & ct.mask->ct_labels[i]))
+ return -EINVAL;
+ }
+
+ if ((ct.key->ct_mark & ct.mask->ct_mark) ^
+ (ct_met->ct_metadata.mark & ct.mask->ct_mark))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ return 0;
+}
+
+static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
+ struct net_device *netdev)
+{
+ return 0;
+}
+
+static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
+ struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_tc_merge *tc_m_entry)
+{
+ struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
+ struct nfp_fl_nft_tc_merge *nft_m_entry;
+ unsigned long new_cookie[3];
+ int err;
+
+ pre_ct_entry = tc_m_entry->pre_ct_parent;
+ post_ct_entry = tc_m_entry->post_ct_parent;
+
+ err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
+ if (err)
+ return err;
+
+ /* Check that the two tc flows are also compatible with
+ * the nft entry. No need to check the pre_ct and post_ct
+ * entries as that was already done during pre_merge.
+ * The nft entry does not have a netdev or chain populated, so
+ * skip this check.
+ */
+ err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
+ if (err)
+ return err;
+ err = nfp_ct_merge_check(post_ct_entry, nft_entry);
+ if (err)
+ return err;
+ err = nfp_ct_check_meta(post_ct_entry, nft_entry);
+ if (err)
+ return err;
+
+ /* Combine tc_merge and nft cookies for this cookie. */
+ new_cookie[0] = tc_m_entry->cookie[0];
+ new_cookie[1] = tc_m_entry->cookie[1];
+ new_cookie[2] = nft_entry->cookie;
+ nft_m_entry = get_hashentry(&zt->nft_merge_tb,
+ &new_cookie,
+ nfp_nft_ct_merge_params,
+ sizeof(*nft_m_entry));
+
+ if (IS_ERR(nft_m_entry))
+ return PTR_ERR(nft_m_entry);
+
+ /* nft_m_entry already present, not merging again */
+ if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
+ return 0;
+
+ memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
+ nft_m_entry->zt = zt;
+ nft_m_entry->tc_m_parent = tc_m_entry;
+ nft_m_entry->nft_parent = nft_entry;
+ nft_m_entry->tc_flower_cookie = 0;
+ /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
+ * it only combined them if the netdevs were the same, so can use any of them.
+ */
+ nft_m_entry->netdev = pre_ct_entry->netdev;
+
+ /* Add this entry to the tc_m_list and nft_flow lists */
+ list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
+ list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
+
+ /* Generate offload structure and send to nfp */
+ err = nfp_fl_ct_add_offload(nft_m_entry);
+ if (err)
+ goto err_nft_ct_offload;
+
+ err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
+ nfp_nft_ct_merge_params);
+ if (err)
+ goto err_nft_ct_merge_insert;
+
+ zt->nft_merge_count++;
+
+ return err;
+
+err_nft_ct_merge_insert:
+ nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
+ nft_m_entry->netdev);
+err_nft_ct_offload:
+ list_del(&nft_m_entry->tc_merge_list);
+ list_del(&nft_m_entry->nft_flow_list);
+ kfree(nft_m_entry);
+ return err;
+}
+
+static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
+ struct nfp_fl_ct_flow_entry *ct_entry1,
+ struct nfp_fl_ct_flow_entry *ct_entry2)
+{
+ struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
+ struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
+ struct nfp_fl_ct_tc_merge *m_entry;
+ unsigned long new_cookie[2];
+ int err;
+
+ if (ct_entry1->type == CT_TYPE_PRE_CT) {
+ pre_ct_entry = ct_entry1;
+ post_ct_entry = ct_entry2;
+ } else {
+ post_ct_entry = ct_entry1;
+ pre_ct_entry = ct_entry2;
+ }
+
+ if (post_ct_entry->netdev != pre_ct_entry->netdev)
+ return -EINVAL;
+ /* Checks that the chain_index of the filter matches the
+ * chain_index of the GOTO action.
+ */
+ if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
+ return -EINVAL;
+
+ err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry);
+ if (err)
+ return err;
+
+ new_cookie[0] = pre_ct_entry->cookie;
+ new_cookie[1] = post_ct_entry->cookie;
+ m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
+ nfp_tc_ct_merge_params, sizeof(*m_entry));
+ if (IS_ERR(m_entry))
+ return PTR_ERR(m_entry);
+
+ /* m_entry already present, not merging again */
+ if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
+ return 0;
+
+ memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
+ m_entry->zt = zt;
+ m_entry->post_ct_parent = post_ct_entry;
+ m_entry->pre_ct_parent = pre_ct_entry;
+
+ /* Add this entry to the pre_ct and post_ct lists */
+ list_add(&m_entry->post_ct_list, &post_ct_entry->children);
+ list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
+ INIT_LIST_HEAD(&m_entry->children);
+
+ err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
+ nfp_tc_ct_merge_params);
+ if (err)
+ goto err_ct_tc_merge_insert;
+ zt->tc_merge_count++;
+
+ /* Merge with existing nft flows */
+ list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
+ list_node) {
+ nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
+ }
+
+ return 0;
+
+err_ct_tc_merge_insert:
+ list_del(&m_entry->post_ct_list);
+ list_del(&m_entry->pre_ct_list);
+ kfree(m_entry);
+ return err;
+}
+
+static struct
+nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
+ u16 zone, bool wildcarded)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ if (wildcarded && priv->ct_zone_wc)
+ return priv->ct_zone_wc;
+
+ if (!wildcarded) {
+ zt = get_hashentry(&priv->ct_zone_table, &zone,
+ nfp_zone_table_params, sizeof(*zt));
+
+ /* If priv is set this is an existing entry, just return it */
+ if (IS_ERR(zt) || zt->priv)
+ return zt;
+ } else {
+ zt = kzalloc(sizeof(*zt), GFP_KERNEL);
+ if (!zt)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ zt->zone = zone;
+ zt->priv = priv;
+ zt->nft = NULL;
+
+ /* init the various hash tables and lists*/
+ INIT_LIST_HEAD(&zt->pre_ct_list);
+ INIT_LIST_HEAD(&zt->post_ct_list);
+ INIT_LIST_HEAD(&zt->nft_flows_list);
+
+ err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
+ if (err)
+ goto err_tc_merge_tb_init;
+
+ err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
+ if (err)
+ goto err_nft_merge_tb_init;
+
+ if (wildcarded) {
+ priv->ct_zone_wc = zt;
+ } else {
+ err = rhashtable_insert_fast(&priv->ct_zone_table,
+ &zt->hash_node,
+ nfp_zone_table_params);
+ if (err)
+ goto err_zone_insert;
+ }
+
+ return zt;
+
+err_zone_insert:
+ rhashtable_destroy(&zt->nft_merge_tb);
+err_nft_merge_tb_init:
+ rhashtable_destroy(&zt->tc_merge_tb);
+err_tc_merge_tb_init:
+ kfree(zt);
+ return ERR_PTR(err);
+}
+
+static struct
+nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ bool is_nft, struct netlink_ext_ack *extack)
+{
+ struct nf_flow_match *nft_match = NULL;
+ struct nfp_fl_ct_flow_entry *entry;
+ struct nfp_fl_ct_map_entry *map;
+ struct flow_action_entry *act;
+ int err, i;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
+ if (!entry->rule) {
+ err = -ENOMEM;
+ goto err_pre_ct_rule;
+ }
+
+ /* nft flows gets destroyed after callback return, so need
+ * to do a full copy instead of just a reference.
+ */
+ if (is_nft) {
+ nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
+ if (!nft_match) {
+ err = -ENOMEM;
+ goto err_pre_ct_act;
+ }
+ memcpy(&nft_match->dissector, flow->rule->match.dissector,
+ sizeof(nft_match->dissector));
+ memcpy(&nft_match->mask, flow->rule->match.mask,
+ sizeof(nft_match->mask));
+ memcpy(&nft_match->key, flow->rule->match.key,
+ sizeof(nft_match->key));
+ entry->rule->match.dissector = &nft_match->dissector;
+ entry->rule->match.mask = &nft_match->mask;
+ entry->rule->match.key = &nft_match->key;
+ } else {
+ entry->rule->match.dissector = flow->rule->match.dissector;
+ entry->rule->match.mask = flow->rule->match.mask;
+ entry->rule->match.key = flow->rule->match.key;
+ }
+
+ entry->zt = zt;
+ entry->netdev = netdev;
+ entry->cookie = flow->cookie;
+ entry->chain_index = flow->common.chain_index;
+ entry->tun_offset = NFP_FL_CT_NO_TUN;
+
+ /* Copy over action data. Unfortunately we do not get a handle to the
+ * original tcf_action data, and the flow objects gets destroyed, so we
+ * cannot just save a pointer to this either, so need to copy over the
+ * data unfortunately.
+ */
+ entry->rule->action.num_entries = flow->rule->action.num_entries;
+ flow_action_for_each(i, act, &flow->rule->action) {
+ struct flow_action_entry *new_act;
+
+ new_act = &entry->rule->action.entries[i];
+ memcpy(new_act, act, sizeof(struct flow_action_entry));
+ /* Entunnel is a special case, need to allocate and copy
+ * tunnel info.
+ */
+ if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
+ struct ip_tunnel_info *tun = act->tunnel;
+ size_t tun_size = sizeof(*tun) + tun->options_len;
+
+ new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
+ if (!new_act->tunnel) {
+ err = -ENOMEM;
+ goto err_pre_ct_tun_cp;
+ }
+ entry->tun_offset = i;
+ }
+ }
+
+ INIT_LIST_HEAD(&entry->children);
+
+ /* Now add a ct map entry to flower-priv */
+ map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params, sizeof(*map));
+ if (IS_ERR(map)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: ct map entry creation failed");
+ err = -ENOMEM;
+ goto err_ct_flow_insert;
+ }
+ map->cookie = flow->cookie;
+ map->ct_entry = entry;
+ err = rhashtable_insert_fast(&zt->priv->ct_map_table,
+ &map->hash_node,
+ nfp_ct_map_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: ct map entry table add failed");
+ goto err_map_insert;
+ }
+
+ return entry;
+
+err_map_insert:
+ kfree(map);
+err_ct_flow_insert:
+ if (entry->tun_offset != NFP_FL_CT_NO_TUN)
+ kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
+err_pre_ct_tun_cp:
+ kfree(nft_match);
+err_pre_ct_act:
+ kfree(entry->rule);
+err_pre_ct_rule:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ zt = m_entry->zt;
+
+ /* Flow is in HW, need to delete */
+ if (m_entry->tc_flower_cookie) {
+ err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
+ m_entry->netdev);
+ if (err)
+ return;
+ }
+
+ WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
+ &m_entry->hash_node,
+ nfp_nft_ct_merge_params));
+ zt->nft_merge_count--;
+ list_del(&m_entry->tc_merge_list);
+ list_del(&m_entry->nft_flow_list);
+
+ kfree(m_entry);
+}
+
+static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
+{
+ struct nfp_fl_nft_tc_merge *m_entry, *tmp;
+
+ /* These post entries are parts of two lists, one is a list of nft_entries
+ * and the other is of from a list of tc_merge structures. Iterate
+ * through the relevant list and cleanup the entries.
+ */
+
+ if (is_nft_flow) {
+ /* Need to iterate through list of nft_flow entries*/
+ struct nfp_fl_ct_flow_entry *ct_entry = entry;
+
+ list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
+ nft_flow_list) {
+ cleanup_nft_merge_entry(m_entry);
+ }
+ } else {
+ /* Need to iterate through list of tc_merged_flow entries*/
+ struct nfp_fl_ct_tc_merge *ct_entry = entry;
+
+ list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
+ tc_merge_list) {
+ cleanup_nft_merge_entry(m_entry);
+ }
+ }
+}
+
+static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ zt = m_ent->zt;
+ err = rhashtable_remove_fast(&zt->tc_merge_tb,
+ &m_ent->hash_node,
+ nfp_tc_ct_merge_params);
+ if (err)
+ pr_warn("WARNING: could not remove merge_entry from hashtable\n");
+ zt->tc_merge_count--;
+ list_del(&m_ent->post_ct_list);
+ list_del(&m_ent->pre_ct_list);
+
+ if (!list_empty(&m_ent->children))
+ nfp_free_nft_merge_children(m_ent, false);
+ kfree(m_ent);
+}
+
+static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
+{
+ struct nfp_fl_ct_tc_merge *m_ent, *tmp;
+
+ switch (entry->type) {
+ case CT_TYPE_PRE_CT:
+ list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
+ nfp_del_tc_merge_entry(m_ent);
+ }
+ break;
+ case CT_TYPE_POST_CT:
+ list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
+ nfp_del_tc_merge_entry(m_ent);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
+{
+ list_del(&entry->list_node);
+
+ if (!list_empty(&entry->children)) {
+ if (entry->type == CT_TYPE_NFT)
+ nfp_free_nft_merge_children(entry, true);
+ else
+ nfp_free_tc_merge_children(entry);
+ }
+
+ if (entry->tun_offset != NFP_FL_CT_NO_TUN)
+ kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
+
+ if (entry->type == CT_TYPE_NFT) {
+ struct nf_flow_match *nft_match;
+
+ nft_match = container_of(entry->rule->match.dissector,
+ struct nf_flow_match, dissector);
+ kfree(nft_match);
+ }
+
+ kfree(entry->rule);
+ kfree(entry);
+}
+
+static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
+ enum flow_action_id act_id)
+{
+ struct flow_action_entry *act = NULL;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == act_id)
+ return act;
+ }
+ return NULL;
+}
+
+static void
+nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
+ struct nfp_fl_ct_zone_entry *zt_src,
+ struct nfp_fl_ct_zone_entry *zt_dst)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
+ struct list_head *ct_list;
+
+ if (ct_entry1->type == CT_TYPE_PRE_CT)
+ ct_list = &zt_src->post_ct_list;
+ else if (ct_entry1->type == CT_TYPE_POST_CT)
+ ct_list = &zt_src->pre_ct_list;
+ else
+ return;
+
+ list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
+ list_node) {
+ nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
+ }
+}
+
+static void
+nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_zone_entry *zt)
+{
+ struct nfp_fl_ct_tc_merge *tc_merge_entry;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
+ rhashtable_walk_start(&iter);
+ while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(tc_merge_entry))
+ continue;
+ rhashtable_walk_stop(&iter);
+ nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *ct_act, *ct_goto;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ ct_act = get_flow_act(flow->rule, FLOW_ACTION_CT);
+ if (!ct_act) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: Conntrack action empty in conntrack offload");
+ return -EOPNOTSUPP;
+ }
+
+ ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
+ if (!ct_goto) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: Conntrack requires ACTION_GOTO");
+ return -EOPNOTSUPP;
+ }
+
+ zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
+ if (IS_ERR(zt)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not create zone table entry");
+ return PTR_ERR(zt);
+ }
+
+ if (!zt->nft) {
+ zt->nft = ct_act->ct.flow_table;
+ err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not register nft_callback");
+ return err;
+ }
+ }
+
+ /* Add entry to pre_ct_list */
+ ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
+ if (IS_ERR(ct_entry))
+ return PTR_ERR(ct_entry);
+ ct_entry->type = CT_TYPE_PRE_CT;
+ ct_entry->chain_index = ct_goto->chain_index;
+ list_add(&ct_entry->list_node, &zt->pre_ct_list);
+ zt->pre_ct_count++;
+
+ nfp_ct_merge_tc_entries(ct_entry, zt, zt);
+
+ /* Need to check and merge with tables in the wc_zone as well */
+ if (priv->ct_zone_wc)
+ nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
+
+ return 0;
+}
+
+int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ bool wildcarded = false;
+ struct flow_match_ct ct;
+
+ flow_rule_match_ct(rule, &ct);
+ if (!ct.mask->ct_zone) {
+ wildcarded = true;
+ } else if (ct.mask->ct_zone != U16_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: partially wildcarded ct_zone is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
+ if (IS_ERR(zt)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not create zone table entry");
+ return PTR_ERR(zt);
+ }
+
+ /* Add entry to post_ct_list */
+ ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
+ if (IS_ERR(ct_entry))
+ return PTR_ERR(ct_entry);
+
+ ct_entry->type = CT_TYPE_POST_CT;
+ ct_entry->chain_index = flow->common.chain_index;
+ list_add(&ct_entry->list_node, &zt->post_ct_list);
+ zt->post_ct_count++;
+
+ if (wildcarded) {
+ /* Iterate through all zone tables if not empty, look for merges with
+ * pre_ct entries and merge them.
+ */
+ struct rhashtable_iter iter;
+ struct nfp_fl_ct_zone_entry *zone_table;
+
+ rhashtable_walk_enter(&priv->ct_zone_table, &iter);
+ rhashtable_walk_start(&iter);
+ while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(zone_table))
+ continue;
+ rhashtable_walk_stop(&iter);
+ nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ } else {
+ nfp_ct_merge_tc_entries(ct_entry, zt, zt);
+ }
+
+ return 0;
+}
+
+static int
+nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
+{
+ struct nfp_fl_ct_map_entry *ct_map_ent;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct netlink_ext_ack *extack = NULL;
+
+ ASSERT_RTNL();
+
+ extack = flow->common.extack;
+ switch (flow->command) {
+ case FLOW_CLS_REPLACE:
+ /* Netfilter can request offload multiple times for the same
+ * flow - protect against adding duplicates.
+ */
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (!ct_map_ent) {
+ ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
+ if (IS_ERR(ct_entry))
+ return PTR_ERR(ct_entry);
+ ct_entry->type = CT_TYPE_NFT;
+ list_add(&ct_entry->list_node, &zt->nft_flows_list);
+ zt->nft_flows_count++;
+ nfp_ct_merge_nft_with_tc(ct_entry, zt);
+ }
+ return 0;
+ case FLOW_CLS_DESTROY:
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ return nfp_fl_ct_del_flow(ct_map_ent);
+ case FLOW_CLS_STATS:
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct flow_cls_offload *flow = type_data;
+ struct nfp_fl_ct_zone_entry *zt = cb_priv;
+ int err = -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ rtnl_lock();
+ err = nfp_fl_ct_offload_nft_flow(zt, flow);
+ rtnl_unlock();
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
+
+static void
+nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
+{
+ struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
+
+ list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
+ list_node) {
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
+ &nft_entry->cookie,
+ nfp_ct_map_params);
+ nfp_fl_ct_del_flow(ct_map_ent);
+ }
+}
+
+int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct rhashtable *m_table;
+
+ if (!ct_map_ent)
+ return -ENOENT;
+
+ zt = ct_map_ent->ct_entry->zt;
+ ct_entry = ct_map_ent->ct_entry;
+ m_table = &zt->priv->ct_map_table;
+
+ switch (ct_entry->type) {
+ case CT_TYPE_PRE_CT:
+ zt->pre_ct_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_entry);
+ kfree(ct_map_ent);
+
+ /* If this is the last pre_ct_rule it means that it is
+ * very likely that the nft table will be cleaned up next,
+ * as this happens on the removal of the last act_ct flow.
+ * However we cannot deregister the callback on the removal
+ * of the last nft flow as this runs into a deadlock situation.
+ * So deregister the callback on removal of the last pre_ct flow
+ * and remove any remaining nft flow entries. We also cannot
+ * save this state and delete the callback later since the
+ * nft table would already have been freed at that time.
+ */
+ if (!zt->pre_ct_count) {
+ nf_flow_table_offload_del_cb(zt->nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
+ zt->nft = NULL;
+ nfp_fl_ct_clean_nft_entries(zt);
+ }
+ break;
+ case CT_TYPE_POST_CT:
+ zt->post_ct_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_entry);
+ kfree(ct_map_ent);
+ break;
+ case CT_TYPE_NFT:
+ zt->nft_flows_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
+ kfree(ct_map_ent);
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
new file mode 100644
index 000000000000..170b6cdb8cd0
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2021 Corigine, Inc. */
+
+#ifndef __NFP_FLOWER_CONNTRACK_H__
+#define __NFP_FLOWER_CONNTRACK_H__ 1
+
+#include <net/netfilter/nf_flow_table.h>
+#include "main.h"
+
+#define NFP_FL_CT_NO_TUN 0xff
+
+#define COMPARE_UNMASKED_FIELDS(__match1, __match2, __out) \
+ do { \
+ typeof(__match1) _match1 = (__match1); \
+ typeof(__match2) _match2 = (__match2); \
+ bool *_out = (__out); \
+ int i, size = sizeof(*(_match1).key); \
+ char *k1, *m1, *k2, *m2; \
+ *_out = false; \
+ k1 = (char *)_match1.key; \
+ m1 = (char *)_match1.mask; \
+ k2 = (char *)_match2.key; \
+ m2 = (char *)_match2.mask; \
+ for (i = 0; i < size; i++) \
+ if ((k1[i] & m1[i] & m2[i]) ^ \
+ (k2[i] & m1[i] & m2[i])) { \
+ *_out = true; \
+ break; \
+ } \
+ } while (0) \
+
+extern const struct rhashtable_params nfp_zone_table_params;
+extern const struct rhashtable_params nfp_ct_map_params;
+extern const struct rhashtable_params nfp_tc_ct_merge_params;
+extern const struct rhashtable_params nfp_nft_ct_merge_params;
+
+/**
+ * struct nfp_fl_ct_zone_entry - Zone entry containing conntrack flow information
+ * @zone: The zone number, used as lookup key in hashtable
+ * @hash_node: Used by the hashtable
+ * @priv: Pointer to nfp_flower_priv data
+ * @nft: Pointer to nf_flowtable for this zone
+ *
+ * @pre_ct_list: The pre_ct_list of nfp_fl_ct_flow_entry entries
+ * @pre_ct_count: Keep count of the number of pre_ct entries
+ *
+ * @post_ct_list: The post_ct_list of nfp_fl_ct_flow_entry entries
+ * @post_ct_count: Keep count of the number of post_ct entries
+ *
+ * @tc_merge_tb: The table of merged tc flows
+ * @tc_merge_count: Keep count of the number of merged tc entries
+ *
+ * @nft_flows_list: The list of nft relatednfp_fl_ct_flow_entry entries
+ * @nft_flows_count: Keep count of the number of nft_flow entries
+ *
+ * @nft_merge_tb: The table of merged tc+nft flows
+ * @nft_merge_count: Keep count of the number of merged tc+nft entries
+ */
+struct nfp_fl_ct_zone_entry {
+ u16 zone;
+ struct rhash_head hash_node;
+
+ struct nfp_flower_priv *priv;
+ struct nf_flowtable *nft;
+
+ struct list_head pre_ct_list;
+ unsigned int pre_ct_count;
+
+ struct list_head post_ct_list;
+ unsigned int post_ct_count;
+
+ struct rhashtable tc_merge_tb;
+ unsigned int tc_merge_count;
+
+ struct list_head nft_flows_list;
+ unsigned int nft_flows_count;
+
+ struct rhashtable nft_merge_tb;
+ unsigned int nft_merge_count;
+};
+
+enum ct_entry_type {
+ CT_TYPE_PRE_CT,
+ CT_TYPE_NFT,
+ CT_TYPE_POST_CT,
+};
+
+/**
+ * struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
+ * @cookie: Flow cookie, same as original TC flow, used as key
+ * @list_node: Used by the list
+ * @chain_index: Chain index of the original flow
+ * @netdev: netdev structure.
+ * @type: Type of pre-entry from enum ct_entry_type
+ * @zt: Reference to the zone table this belongs to
+ * @children: List of tc_merge flows this flow forms part of
+ * @rule: Reference to the original TC flow rule
+ * @stats: Used to cache stats for updating
+ * @tun_offset: Used to indicate tunnel action offset in action list
+ */
+struct nfp_fl_ct_flow_entry {
+ unsigned long cookie;
+ struct list_head list_node;
+ u32 chain_index;
+ enum ct_entry_type type;
+ struct net_device *netdev;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct list_head children;
+ struct flow_rule *rule;
+ struct flow_stats stats;
+ u8 tun_offset; // Set to NFP_FL_CT_NO_TUN if no tun
+};
+
+/**
+ * struct nfp_fl_ct_tc_merge - Merge of two flows from tc
+ * @cookie: Flow cookie, combination of pre and post ct cookies
+ * @hash_node: Used by the hashtable
+ * @pre_ct_list: This entry is part of a pre_ct_list
+ * @post_ct_list: This entry is part of a post_ct_list
+ * @zt: Reference to the zone table this belongs to
+ * @pre_ct_parent: The pre_ct_parent
+ * @post_ct_parent: The post_ct_parent
+ * @children: List of nft merged entries
+ */
+struct nfp_fl_ct_tc_merge {
+ unsigned long cookie[2];
+ struct rhash_head hash_node;
+ struct list_head pre_ct_list;
+ struct list_head post_ct_list;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct nfp_fl_ct_flow_entry *pre_ct_parent;
+ struct nfp_fl_ct_flow_entry *post_ct_parent;
+ struct list_head children;
+};
+
+/**
+ * struct nfp_fl_nft_tc_merge - Merge of tc_merge flows with nft flow
+ * @netdev: Ingress netdev name
+ * @cookie: Flow cookie, combination of tc_merge and nft cookies
+ * @hash_node: Used by the hashtable
+ * @zt: Reference to the zone table this belongs to
+ * @nft_flow_list: This entry is part of a nft_flows_list
+ * @tc_merge_list: This entry is part of a ct_merge_list
+ * @tc_m_parent: The tc_merge parent
+ * @nft_parent: The nft_entry parent
+ * @tc_flower_cookie: The cookie of the flow offloaded to the nfp
+ * @flow_pay: Reference to the offloaded flow struct
+ */
+struct nfp_fl_nft_tc_merge {
+ struct net_device *netdev;
+ unsigned long cookie[3];
+ struct rhash_head hash_node;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct list_head nft_flow_list;
+ struct list_head tc_merge_list;
+ struct nfp_fl_ct_tc_merge *tc_m_parent;
+ struct nfp_fl_ct_flow_entry *nft_parent;
+ unsigned long tc_flower_cookie;
+ struct nfp_fl_payload *flow_pay;
+};
+
+/**
+ * struct nfp_fl_ct_map_entry - Map between flow cookie and specific ct_flow
+ * @cookie: Flow cookie, same as original TC flow, used as key
+ * @hash_node: Used by the hashtable
+ * @ct_entry: Pointer to corresponding ct_entry
+ */
+struct nfp_fl_ct_map_entry {
+ unsigned long cookie;
+ struct rhash_head hash_node;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+};
+
+bool is_pre_ct_flow(struct flow_cls_offload *flow);
+bool is_post_ct_flow(struct flow_cls_offload *flow);
+
+/**
+ * nfp_fl_ct_handle_pre_ct() - Handles -trk conntrack rules
+ * @priv: Pointer to app priv
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure.
+ * @extack: Extack pointer for errors
+ *
+ * Adds a new entry to the relevant zone table and tries to
+ * merge with other +trk+est entries and offload if possible.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack);
+/**
+ * nfp_fl_ct_handle_post_ct() - Handles +trk+est conntrack rules
+ * @priv: Pointer to app priv
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure.
+ * @extack: Extack pointer for errors
+ *
+ * Adds a new entry to the relevant zone table and tries to
+ * merge with other -trk entries and offload if possible.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack);
+
+/**
+ * nfp_fl_ct_clean_flow_entry() - Free a nfp_fl_ct_flow_entry
+ * @entry: Flow entry to cleanup
+ */
+void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry);
+
+/**
+ * nfp_fl_ct_del_flow() - Handle flow_del callbacks for conntrack
+ * @ct_map_ent: ct map entry for the flow that needs deleting
+ */
+int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent);
+
+/**
+ * nfp_fl_ct_handle_nft_flow() - Handle flower flow callbacks for nft table
+ * @type: Type provided by callback
+ * @type_data: Callback data
+ * @cb_priv: Pointer to data provided when registering the callback, in this
+ * case it's the zone table.
+ */
+int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 31377923ea3d..0fbd682ccf72 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -193,6 +193,9 @@ struct nfp_fl_internal_ports {
* @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
+ * @ct_zone_table: Hash table used to store the different zones
+ * @ct_zone_wc: Special zone entry for wildcarded zone matches
+ * @ct_map_table: Hash table used to referennce ct flows
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -227,6 +230,9 @@ struct nfp_flower_priv {
spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt;
struct rhashtable merge_table;
+ struct rhashtable ct_zone_table;
+ struct nfp_fl_ct_zone_entry *ct_zone_wc;
+ struct rhashtable ct_map_table;
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 327bb56b3ef5..621113650a9b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -9,6 +9,7 @@
#include <net/pkt_cls.h>
#include "cmsg.h"
+#include "conntrack.h"
#include "main.h"
#include "../nfp_app.h"
@@ -496,6 +497,20 @@ const struct rhashtable_params merge_table_params = {
.key_len = sizeof(u64),
};
+const struct rhashtable_params nfp_zone_table_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_zone_entry, hash_node),
+ .key_len = sizeof(u16),
+ .key_offset = offsetof(struct nfp_fl_ct_zone_entry, zone),
+ .automatic_shrinking = false,
+};
+
+const struct rhashtable_params nfp_ct_map_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_map_entry, hash_node),
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct nfp_fl_ct_map_entry, cookie),
+ .automatic_shrinking = true,
+};
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_num_mems)
{
@@ -516,6 +531,14 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_stats_ctx_table;
+ err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
+ if (err)
+ goto err_free_merge_table;
+
+ err = rhashtable_init(&priv->ct_map_table, &nfp_ct_map_params);
+ if (err)
+ goto err_free_ct_zone_table;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -523,7 +546,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_merge_table;
+ goto err_free_ct_map_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -560,6 +583,10 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_ct_map_table:
+ rhashtable_destroy(&priv->ct_map_table);
+err_free_ct_zone_table:
+ rhashtable_destroy(&priv->ct_zone_table);
err_free_merge_table:
rhashtable_destroy(&priv->merge_table);
err_free_stats_ctx_table:
@@ -569,6 +596,100 @@ err_free_flow_table:
return -ENOMEM;
}
+static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
+{
+ if (!zt)
+ return;
+
+ if (!list_empty(&zt->pre_ct_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "pre_ct_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->pre_ct_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ if (!list_empty(&zt->post_ct_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "post_ct_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->post_ct_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ if (zt->nft) {
+ nf_flow_table_offload_del_cb(zt->nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
+ zt->nft = NULL;
+ }
+
+ if (!list_empty(&zt->nft_flows_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "nft_flows_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->nft_flows_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ rhashtable_free_and_destroy(&zt->tc_merge_tb,
+ nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&zt->nft_merge_tb,
+ nfp_check_rhashtable_empty, NULL);
+
+ kfree(zt);
+}
+
+static void nfp_free_zone_table_entry(void *ptr, void *arg)
+{
+ struct nfp_fl_ct_zone_entry *zt = ptr;
+
+ nfp_zone_table_entry_destroy(zt);
+}
+
+static void nfp_free_map_table_entry(void *ptr, void *arg)
+{
+ struct nfp_fl_ct_map_entry *map = ptr;
+
+ if (!map)
+ return;
+
+ kfree(map);
+}
+
void nfp_flower_metadata_cleanup(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -582,6 +703,12 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->merge_table,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&priv->ct_zone_table,
+ nfp_free_zone_table_entry, NULL);
+ nfp_zone_table_entry_destroy(priv->ct_zone_wc);
+
+ rhashtable_free_and_destroy(&priv->ct_map_table,
+ nfp_free_map_table_entry, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index e95969c462e4..2406d33356ad 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -7,6 +7,7 @@
#include "cmsg.h"
#include "main.h"
+#include "conntrack.h"
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
@@ -1276,6 +1277,20 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return 0;
}
+static bool offload_pre_check(struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
+ return false;
+
+ if (flow->common.chain_index)
+ return false;
+
+ return true;
+}
+
/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
@@ -1302,6 +1317,15 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
+ if (is_pre_ct_flow(flow))
+ return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
+
+ if (is_post_ct_flow(flow))
+ return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
+
+ if (!offload_pre_check(flow))
+ return -EOPNOTSUPP;
+
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
@@ -1481,6 +1505,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
struct nfp_port *port = NULL;
@@ -1490,6 +1515,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
+ /* Check ct_map_table */
+ ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (ct_map_ent) {
+ err = nfp_fl_ct_del_flow(ct_map_ent);
+ return err;
+ }
+
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
@@ -1646,9 +1679,10 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
+ struct flow_cls_common_offload *common = type_data;
struct nfp_repr *repr = cb_priv;
- if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
+ if (!tc_can_offload_extack(repr->netdev, common->extack))
return -EOPNOTSUPP;
switch (type) {
@@ -1746,10 +1780,6 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
- struct flow_cls_offload *flower = type_data;
-
- if (flower->common.chain_index)
- return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index d19c02e99114..ab70179728f6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -21,7 +21,7 @@
#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7)
/**
- * struct nfp_tun_pre_run_rule - rule matched before decap
+ * struct nfp_tun_pre_tun_rule - rule matched before decap
* @flags: options for the rule offset
* @port_idx: index of destination MAC address for the rule
* @vlan_tci: VLAN info associated with MAC
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index eeb30680b4dc..5dfa4799c34f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1819,7 +1819,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct xdp_buff xdp;
int idx;
- rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
@@ -2036,7 +2035,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (!nfp_net_xdp_complete(tx_ring))
pkts_polled = budget;
}
- rcu_read_unlock();
return pkts_polled;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index b3cabc274121..3b8e675087de 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -103,6 +103,7 @@ nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
case NFP_PORT_PF_PORT:
case NFP_PORT_VF_PORT:
nfp_repr_vnic_get_stats64(repr->port, stats);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 94994a939277..d7ac0307797f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -905,8 +905,7 @@ area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
return;
/* Move to front of LRU */
- list_del(&cache->entry);
- list_add(&cache->entry, &cpp->area_cache_list);
+ list_move(&cache->entry, &cpp->area_cache_list);
mutex_unlock(&cpp->area_cache_mutex);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index d4e02542e2e9..e2e5fd003ad6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -24,7 +24,7 @@
#define NFFW_FWID_ALL 255
-/**
+/*
* NFFW_INFO_VERSION history:
* 0: This was never actually used (before versioning), but it refers to
* the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index a6861df9904f..2d097dcb7bda 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1224,7 +1224,6 @@ static int nixge_of_get_resources(struct platform_device *pdev)
const struct of_device_id *of_id;
enum nixge_version version;
struct resource *ctrlres;
- struct resource *dmares;
struct net_device *ndev;
struct nixge_priv *priv;
@@ -1236,12 +1235,9 @@ static int nixge_of_get_resources(struct platform_device *pdev)
version = (enum nixge_version)of_id->data;
if (version <= NIXGE_V2)
- dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
else
- dmares = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "dma");
-
- priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
+ priv->dma_regs = devm_platform_ioremap_resource_byname(pdev, "dma");
if (IS_ERR(priv->dma_regs)) {
netdev_err(ndev, "failed to map dma regs\n");
return PTR_ERR(priv->dma_regs);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index a6823c4d355d..108f312bc542 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -596,8 +596,6 @@ struct pch_gbe_adapter {
#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw)
-extern const char pch_driver_version[];
-
/* pch_gbe_main.c */
int pch_gbe_up(struct pch_gbe_adapter *adapter);
void pch_gbe_down(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index a58f14aca10c..660b07cb5b92 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -8,6 +8,8 @@
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
+static const char pch_driver_version[] = "1.01";
+
/*
* pch_gbe_stats - Stats item information
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 334af49e5add..e351f3d1608f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -8,15 +8,16 @@
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
+
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_pch.h>
#include <linux/gpio.h>
-#define DRV_VERSION "1.01"
-const char pch_driver_version[] = DRV_VERSION;
-
#define PCH_GBE_MAR_ENTRIES 16
#define PCH_GBE_SHORT_PKT 64
#define DSC_INIT16 0xC000
@@ -97,8 +98,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
-#define MINNOW_PHY_RESET_GPIO 13
-
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
@@ -108,7 +107,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
{
u8 *data = skb->data;
unsigned int offset;
- u16 *hi, *id;
+ u16 hi, id;
u32 lo;
if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
@@ -119,14 +118,11 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
return 0;
- hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
- id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
-
- memcpy(&lo, &hi[1], sizeof(lo));
+ hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0);
+ lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2);
+ id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID);
- return (uid_hi == *hi &&
- uid_lo == lo &&
- seqid == *id);
+ return (uid_hi == hi && uid_lo == lo && seqid == id);
}
static void
@@ -136,7 +132,6 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
struct pci_dev *pdev;
u64 ns;
u32 hi, lo, val;
- u16 uid, seq;
if (!adapter->hwts_rx_en)
return;
@@ -152,10 +147,7 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
lo = pch_src_uuid_lo_read(pdev);
hi = pch_src_uuid_hi_read(pdev);
- uid = hi & 0xffff;
- seq = (hi >> 16) & 0xffff;
-
- if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
+ if (!pch_ptp_match(skb, hi, lo, hi >> 16))
goto out;
ns = pch_rx_snap_read(pdev);
@@ -298,15 +290,12 @@ static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
* @reg: Pointer of register
* @bit: Busy bit
*/
-static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
+static void pch_gbe_wait_clr_bit(void __iomem *reg, u32 bit)
{
u32 tmp;
/* wait busy */
- tmp = 1000;
- while ((ioread32(reg) & bit) && --tmp)
- cpu_relax();
- if (!tmp)
+ if (readx_poll_timeout_atomic(ioread32, reg, tmp, !(tmp & bit), 0, 10))
pr_err("Error: busy bit is not cleared\n");
}
@@ -490,18 +479,13 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
u16 data)
{
struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
- u32 data_out = 0;
- unsigned int i;
unsigned long flags;
+ u32 data_out;
spin_lock_irqsave(&hw->miim_lock, flags);
- for (i = 100; i; --i) {
- if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
- break;
- udelay(20);
- }
- if (i == 0) {
+ if (readx_poll_timeout_atomic(ioread32, &hw->reg->MIIM, data_out,
+ data_out & PCH_GBE_MIIM_OPER_READY, 20, 2000)) {
netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
spin_unlock_irqrestore(&hw->miim_lock, flags);
return 0; /* No way to indicate timeout error */
@@ -509,12 +493,8 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
(addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
dir | data), &hw->reg->MIIM);
- for (i = 0; i < 100; i++) {
- udelay(20);
- data_out = ioread32(&hw->reg->MIIM);
- if ((data_out & PCH_GBE_MIIM_OPER_READY))
- break;
- }
+ readx_poll_timeout_atomic(ioread32, &hw->reg->MIIM, data_out,
+ data_out & PCH_GBE_MIIM_OPER_READY, 20, 2000);
spin_unlock_irqrestore(&hw->miim_lock, flags);
netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
@@ -2532,9 +2512,13 @@ static int pch_gbe_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->hw.back = adapter;
adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
+
adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
- if (adapter->pdata && adapter->pdata->platform_init)
- adapter->pdata->platform_init(pdev);
+ if (adapter->pdata && adapter->pdata->platform_init) {
+ ret = adapter->pdata->platform_init(pdev);
+ if (ret)
+ goto err_free_netdev;
+ }
adapter->ptp_pdev =
pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
@@ -2624,26 +2608,45 @@ err_free_netdev:
return ret;
}
+static void pch_gbe_gpio_remove_table(void *table)
+{
+ gpiod_remove_lookup_table(table);
+}
+
+static int pch_gbe_gpio_add_table(struct device *dev, void *table)
+{
+ gpiod_add_lookup_table(table);
+ return devm_add_action_or_reset(dev, pch_gbe_gpio_remove_table, table);
+}
+
+static struct gpiod_lookup_table pch_gbe_minnow_gpio_table = {
+ .dev_id = "0000:02:00.1",
+ .table = {
+ GPIO_LOOKUP("sch_gpio.33158", 13, NULL, GPIO_ACTIVE_LOW),
+ {}
+ },
+};
+
/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
* ensure it is awake for probe and init. Request the line and reset the PHY.
*/
static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
{
- unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
- unsigned gpio = MINNOW_PHY_RESET_GPIO;
+ struct gpio_desc *gpiod;
int ret;
- ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
- "minnow_phy_reset");
- if (ret) {
- dev_err(&pdev->dev,
- "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
+ ret = pch_gbe_gpio_add_table(&pdev->dev, &pch_gbe_minnow_gpio_table);
+ if (ret)
return ret;
- }
- gpio_set_value(gpio, 0);
+ gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpiod),
+ "Can't request PHY reset GPIO line\n");
+
+ gpiod_set_value(gpiod, 1);
usleep_range(1250, 1500);
- gpio_set_value(gpio, 1);
+ gpiod_set_value(gpiod, 0);
usleep_range(1250, 1500);
return ret;
@@ -2722,7 +2725,6 @@ module_pci_driver(pch_gbe_driver);
MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
/* pch_gbe_main.c */
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 5f8b0bb3af6e..202973a82712 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
+ depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
select NET_DEVLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 6b5ddb07ee83..98f430905ffa 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -110,6 +110,9 @@ config QED_RDMA
config QED_ISCSI
bool
+config QED_NVMETCP
+ bool
+
config QED_FCOE
bool
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 08f9477d2ee8..35ec9aab3dc7 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1685,6 +1685,7 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
break;
case NETXEN_NIC_RESPONSE_DESC:
netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
+ goto skip;
default:
goto skip;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 7e6bac85495d..344ea1143454 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1602,6 +1602,8 @@ err_out_free_netdev:
free_netdev(netdev);
err_out_free_res:
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 8251755ec18c..0d9c2fe0245d 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -28,6 +28,11 @@ qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_OOO) += qed_ooo.o
+qed-$(CONFIG_QED_NVMETCP) += \
+ qed_nvmetcp.o \
+ qed_nvmetcp_fw_funcs.o \
+ qed_nvmetcp_ip_services.o
+
qed-$(CONFIG_QED_RDMA) += \
qed_iwarp.o \
qed_rdma.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index a20cb8a0c377..b590c70539b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -49,6 +49,8 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_MIN_WIDS (4)
#define QED_PF_DEMS_SIZE (4)
+#define QED_LLH_DONT_CARE 0
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -200,6 +202,7 @@ enum qed_pci_personality {
QED_PCI_ETH,
QED_PCI_FCOE,
QED_PCI_ISCSI,
+ QED_PCI_NVMETCP,
QED_PCI_ETH_ROCE,
QED_PCI_ETH_IWARP,
QED_PCI_ETH_RDMA,
@@ -239,6 +242,7 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
+ QED_NVMETCP_CQ,
QED_ISCSI_CQ,
QED_FCOE_CQ,
QED_VF_L2_QUE,
@@ -284,6 +288,8 @@ struct qed_hw_info {
((dev)->hw_info.personality == QED_PCI_FCOE)
#define QED_IS_ISCSI_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ISCSI)
+#define QED_IS_NVMETCP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_NVMETCP)
/* Resource Allocation scheme results */
u32 resc_start[QED_MAX_RESC];
@@ -592,6 +598,7 @@ struct qed_hwfn {
struct qed_ooo_info *p_ooo_info;
struct qed_rdma_info *p_rdma_info;
struct qed_iscsi_info *p_iscsi_info;
+ struct qed_nvmetcp_info *p_nvmetcp_info;
struct qed_fcoe_info *p_fcoe_info;
struct qed_pf_params pf_params;
@@ -828,6 +835,7 @@ struct qed_dev {
struct qed_eth_cb_ops *eth;
struct qed_fcoe_cb_ops *fcoe;
struct qed_iscsi_cb_ops *iscsi;
+ struct qed_nvmetcp_cb_ops *nvmetcp;
} protocol_ops;
void *ops_cookie;
@@ -999,4 +1007,10 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
+
+int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
+void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+void qed_llh_clear_all_filters(struct qed_dev *cdev);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0a22f8ce9a2c..5a0a3cbcc1c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -94,14 +94,14 @@ struct src_ent {
static bool src_proto(enum protocol_type type)
{
- return type == PROTOCOLID_ISCSI ||
+ return type == PROTOCOLID_TCP_ULP ||
type == PROTOCOLID_FCOE ||
type == PROTOCOLID_IWARP;
}
static bool tm_cid_proto(enum protocol_type type)
{
- return type == PROTOCOLID_ISCSI ||
+ return type == PROTOCOLID_TCP_ULP ||
type == PROTOCOLID_FCOE ||
type == PROTOCOLID_ROCE ||
type == PROTOCOLID_IWARP;
@@ -2072,7 +2072,6 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
PROTOCOLID_FCOE,
p_params->num_cons,
0);
-
qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
QED_CXT_FCOE_TID_SEG, 0,
p_params->num_tasks, true);
@@ -2090,13 +2089,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
if (p_params->num_cons && p_params->num_tasks) {
qed_cxt_set_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
p_params->num_cons,
0);
-
qed_cxt_set_proto_tid_count(p_hwfn,
- PROTOCOLID_ISCSI,
- QED_CXT_ISCSI_TID_SEG,
+ PROTOCOLID_TCP_ULP,
+ QED_CXT_TCP_ULP_TID_SEG,
0,
p_params->num_tasks,
true);
@@ -2106,6 +2104,29 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
}
break;
}
+ case QED_PCI_NVMETCP:
+ {
+ struct qed_nvmetcp_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ p_params->num_cons,
+ 0);
+ qed_cxt_set_proto_tid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ QED_CXT_TCP_ULP_TID_SEG,
+ 0,
+ p_params->num_tasks,
+ true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "NvmeTCP personality used without setting params!\n");
+ }
+ break;
+ }
default:
return -EINVAL;
}
@@ -2129,8 +2150,9 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
seg = QED_CXT_FCOE_TID_SEG;
break;
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
- seg = QED_CXT_ISCSI_TID_SEG;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ seg = QED_CXT_TCP_ULP_TID_SEG;
break;
default:
return -EINVAL;
@@ -2455,8 +2477,9 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
seg = QED_CXT_FCOE_TID_SEG;
break;
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
- seg = QED_CXT_ISCSI_TID_SEG;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ seg = QED_CXT_TCP_ULP_TID_SEG;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 056e79620a0e..8adb7ed0c12d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -50,7 +50,7 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
struct qed_tid_mem *p_info);
-#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI
+#define QED_CXT_TCP_ULP_TID_SEG PROTOCOLID_TCP_ULP
#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE
enum qed_cxt_elem_type {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 17d5b649eb36..e81dd34a3cac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
&dcbx_info->operational.params,
- sizeof(struct qed_dcbx_admin_params));
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
p_hwfn->p_dcbx_info->set.config.valid = true;
memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index d2f5855b2ea7..0410c3604abd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -37,6 +37,7 @@
#include "qed_sriov.h"
#include "qed_vf.h"
#include "qed_rdma.h"
+#include "qed_nvmetcp.h"
static DEFINE_SPINLOCK(qm_lock);
@@ -667,7 +668,8 @@ qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
/* Storage PF is bound to a single engine while L2 PF uses both */
- if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) ||
+ QED_IS_NVMETCP_PERSONALITY(p_hwfn))
eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
else /* L2_PERSONALITY */
eng = QED_BOTH_ENG;
@@ -1164,6 +1166,9 @@ void qed_llh_remove_mac_filter(struct qed_dev *cdev,
if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
goto out;
+ if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ return;
+
ether_addr_copy(filter.mac.addr, mac_addr);
rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
&ref_cnt);
@@ -1381,6 +1386,11 @@ void qed_resc_free(struct qed_dev *cdev)
qed_ooo_free(p_hwfn);
}
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ qed_nvmetcp_free(p_hwfn);
+ qed_ooo_free(p_hwfn);
+ }
+
if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
qed_rdma_info_free(p_hwfn);
@@ -1423,6 +1433,7 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
flags |= PQ_FLAGS_OFLD;
break;
case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
break;
case QED_PCI_ETH_ROCE:
@@ -2263,10 +2274,11 @@ int qed_resc_alloc(struct qed_dev *cdev)
* at the same time
*/
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
- } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
NULL);
n_eqes += 2 * num_cons;
}
@@ -2313,6 +2325,15 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
}
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ rc = qed_nvmetcp_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ rc = qed_ooo_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+
if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
rc = qed_rdma_info_alloc(p_hwfn);
if (rc)
@@ -2393,6 +2414,11 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_iscsi_setup(p_hwfn);
qed_ooo_setup(p_hwfn);
}
+
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ qed_nvmetcp_setup(p_hwfn);
+ qed_ooo_setup(p_hwfn);
+ }
}
}
@@ -2854,7 +2880,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
- (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
+ ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) ||
+ (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
@@ -3535,14 +3562,21 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
+
+ if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt,
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
+
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n",
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+ (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ),
(int)sb_cnt.cnt);
}
@@ -3734,7 +3768,8 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
break;
case QED_BDQ:
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
- p_hwfn->hw_info.personality != QED_PCI_FCOE)
+ p_hwfn->hw_info.personality != QED_PCI_FCOE &&
+ p_hwfn->hw_info.personality != QED_PCI_NVMETCP)
*p_resc_num = 0;
else
*p_resc_num = 1;
@@ -3755,7 +3790,8 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_start = 0;
else if (p_hwfn->cdev->num_ports_in_engine == 4)
*p_resc_start = p_hwfn->port_id;
- else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
*p_resc_start = p_hwfn->port_id;
else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
*p_resc_start = p_hwfn->port_id + 2;
@@ -5326,3 +5362,93 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
+
+static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "remove_all");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(*p_filters));
+
+ return 0;
+}
+
+static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
+ !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ rc = qed_llh_shadow_remove_all_filters(cdev, ppfid);
+ if (rc)
+ goto out;
+
+ for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
+ filter_idx++) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt,
+ abs_ppfid, filter_idx);
+ if (rc)
+ goto out;
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
+{
+ return qed_llh_add_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ src_port, QED_LLH_DONT_CARE);
+}
+
+void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
+{
+ qed_llh_remove_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ src_port, QED_LLH_DONT_CARE);
+}
+
+int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
+{
+ return qed_llh_add_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_DONT_CARE, dest_port);
+}
+
+void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
+{
+ qed_llh_remove_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_DONT_CARE, dest_port);
+}
+
+void qed_llh_clear_all_filters(struct qed_dev *cdev)
+{
+ u8 ppfid;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
+ !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ return;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++)
+ qed_llh_clear_ppfid_filters(cdev, ppfid);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 559df9f4d656..fb1baa2da2d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -20,6 +20,7 @@
#include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h>
+#include <linux/qed/nvmetcp_common.h>
#include <linux/qed/iwarp_common.h>
#include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h>
@@ -1118,7 +1119,7 @@ struct outer_tag_config_struct {
/* personality per PF */
enum personality_type {
BAD_PERSONALITY_TYP,
- PERSONALITY_ISCSI,
+ PERSONALITY_TCP_ULP,
PERSONALITY_FCOE,
PERSONALITY_RDMA_AND_ETH,
PERSONALITY_RDMA,
@@ -12147,7 +12148,8 @@ struct public_func {
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040
#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
#define FUNC_MF_CFG_MIN_BW_SHIFT 8
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 4eae4ee3538f..db926d8b3033 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -158,7 +158,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_INIT_FUNC,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -250,7 +250,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
- qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI,
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
qed_iscsi_async_event);
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -286,7 +286,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -453,7 +453,7 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
struct iscsi_conn_update_ramrod_params *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
u32 dval;
/* Get SPQ entry */
@@ -465,7 +465,7 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -506,7 +506,7 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -548,7 +548,7 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -582,7 +582,7 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -606,13 +606,13 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_DESTROY_FUNC,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
return rc;
}
@@ -786,7 +786,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
u32 icid;
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ISCSI, &icid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 49783f365079..02a4610d9330 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -960,7 +960,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
- conn_type != QED_LL2_TYPE_IWARP) {
+ conn_type != QED_LL2_TYPE_IWARP &&
+ (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) {
p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1;
} else {
@@ -1037,8 +1038,8 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
case QED_LL2_TYPE_FCOE:
p_ramrod->conn_type = PROTOCOLID_FCOE;
break;
- case QED_LL2_TYPE_ISCSI:
- p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ case QED_LL2_TYPE_TCP_ULP:
+ p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
break;
case QED_LL2_TYPE_ROCE:
p_ramrod->conn_type = PROTOCOLID_ROCE;
@@ -1047,8 +1048,9 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
case QED_LL2_TYPE_OOO:
- if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
- p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
+ p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
else
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
@@ -1634,7 +1636,8 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
if (rc)
goto out;
- if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
+ if (!QED_IS_RDMA_PERSONALITY(p_hwfn) &&
+ !QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
@@ -2376,7 +2379,8 @@ out:
static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
{
return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
- QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
+ QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
+ QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
(QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
}
@@ -2402,11 +2406,13 @@ static int qed_ll2_stop(struct qed_dev *cdev)
if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
return 0;
+ if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
eth_zero_addr(cdev->ll2_mac_address);
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_ll2_stop_ooo(p_hwfn);
/* In CMT mode, LL2 is always started on engine 0 for a storage PF */
@@ -2442,7 +2448,8 @@ static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
conn_type = QED_LL2_TYPE_FCOE;
break;
case QED_PCI_ISCSI:
- conn_type = QED_LL2_TYPE_ISCSI;
+ case QED_PCI_NVMETCP:
+ conn_type = QED_LL2_TYPE_TCP_ULP;
break;
case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE;
@@ -2567,7 +2574,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
}
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(p_hwfn, params);
if (rc) {
@@ -2576,10 +2583,13 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
}
- rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
- if (rc) {
- DP_NOTICE(cdev, "Failed to add an LLH filter\n");
- goto err3;
+ if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
+ rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to add an LLH filter\n");
+ goto err3;
+ }
+
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
@@ -2587,7 +2597,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
return 0;
err3:
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_ll2_stop_ooo(p_hwfn);
err2:
if (b_is_storage_eng1)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index cd882c453394..4387292c37e2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2446,6 +2446,9 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
+ case FUNC_MF_CFG_PROTOCOL_NVMETCP:
+ *p_proto = QED_PCI_NVMETCP;
+ break;
case FUNC_MF_CFG_PROTOCOL_FCOE:
*p_proto = QED_PCI_FCOE;
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index 3e3192a3ad9b..6190adf965bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -1306,7 +1306,8 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
if ((tlv_group & QED_MFW_TLV_ISCSI) &&
- p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+ p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_NVMETCP) {
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Skipping iSCSI TLVs for non-iSCSI function\n");
tlv_group &= ~QED_MFW_TLV_ISCSI;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
new file mode 100644
index 000000000000..f19128c8d9cc
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_nvmetcp.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+#include "qed_reg_addr.h"
+#include "qed_nvmetcp_fw_funcs.h"
+
+static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+ u16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ if (p_hwfn->p_nvmetcp_info->event_cb) {
+ struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info;
+
+ return p_nvmetcp->event_cb(p_nvmetcp->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n");
+
+ return -EINVAL;
+ }
+}
+
+static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr,
+ void *event_context,
+ nvmetcp_event_cb_t async_event_cb)
+{
+ struct nvmetcp_init_ramrod_params *p_ramrod = NULL;
+ struct qed_nvmetcp_pf_params *p_params = NULL;
+ struct scsi_init_func_queues *p_queue = NULL;
+ struct nvmetcp_spe_func_init *p_init = NULL;
+ struct qed_sp_init_data init_data = {};
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+ u16 val;
+ u8 i;
+
+ /* Get SPQ entry */
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_init;
+ p_init = &p_ramrod->nvmetcp_init_spe;
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+ p_queue = &p_init->q_params;
+ p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
+ p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
+ p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
+ p_params->ll2_ooo_queue_id;
+ SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1);
+ p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
+ p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks);
+ p_init->debug_flags = p_params->debug_mode;
+ DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
+ p_params->glbl_q_params_addr);
+ p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE);
+ p_queue->num_queues = p_params->num_queues;
+ val = RESC_START(p_hwfn, QED_CMDQS_CQS);
+ p_queue->queue_relative_offset = cpu_to_le16((u16)val);
+ p_queue->cq_sb_pi = p_params->gl_rq_pi;
+
+ for (i = 0; i < p_params->num_queues; i++) {
+ val = qed_get_igu_sb_id(p_hwfn, i);
+ p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
+ }
+
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0);
+ p_queue->cmdq_num_entries = 0;
+ p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
+ p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER);
+ p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER);
+ p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT);
+ p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT;
+ SET_FIELD(p_ramrod->nvmetcp_init_spe.params,
+ NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT);
+ p_hwfn->p_nvmetcp_info->event_context = event_context;
+ p_hwfn->p_nvmetcp_info->event_cb = async_event_cb;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
+ qed_nvmetcp_async_event);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
+
+ return rc;
+}
+
+static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info)
+{
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
+ int rc;
+
+ memset(info, 0, sizeof(*info));
+ rc = qed_fill_dev_info(cdev, &info->common);
+ info->port_id = MFW_PORT(hwfn);
+ info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ);
+
+ return rc;
+}
+
+static void qed_register_nvmetcp_ops(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops,
+ void *cookie)
+{
+ cdev->protocol_ops.nvmetcp = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static int qed_nvmetcp_stop(struct qed_dev *cdev)
+{
+ int rc;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+ DP_NOTICE(cdev, "nvmetcp already stopped\n");
+
+ return 0;
+ }
+
+ if (!hash_empty(cdev->connections)) {
+ DP_NOTICE(cdev,
+ "Can't stop nvmetcp - not all connections were returned\n");
+
+ return -EINVAL;
+ }
+
+ /* Stop the nvmetcp */
+ rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
+ cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+
+ return rc;
+}
+
+static int qed_nvmetcp_start(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context,
+ nvmetcp_event_cb_t async_event_cb)
+{
+ struct qed_tid_mem *tid_info;
+ int rc;
+
+ if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+ DP_NOTICE(cdev, "nvmetcp already started;\n");
+
+ return 0;
+ }
+
+ rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev),
+ QED_SPQ_MODE_EBLOCK, NULL,
+ event_context, async_event_cb);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start nvmetcp\n");
+
+ return rc;
+ }
+
+ cdev->flags |= QED_FLAG_STORAGE_STARTED;
+ hash_init(cdev->connections);
+
+ if (!tasks)
+ return 0;
+
+ tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
+ if (!tid_info) {
+ qed_nvmetcp_stop(cdev);
+
+ return -ENOMEM;
+ }
+
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to gather task information\n");
+ qed_nvmetcp_stop(cdev);
+ kfree(tid_info);
+
+ return rc;
+ }
+
+ /* Fill task information */
+ tasks->size = tid_info->tid_size;
+ tasks->num_tids_per_block = tid_info->num_tids_per_block;
+ memcpy(tasks->blocks, tid_info->blocks,
+ MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *));
+ kfree(tid_info);
+
+ return 0;
+}
+
+static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev,
+ u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con = NULL;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+ return NULL;
+
+ hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+ if (hash_con->con->icid == handle)
+ break;
+ }
+
+ if (!hash_con || hash_con->con->icid != handle)
+ return NULL;
+
+ return hash_con;
+}
+
+static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_spe_conn_offload *p_ramrod = NULL;
+ struct tcp_offload_params_opt2 *p_tcp = NULL;
+ struct qed_sp_init_data init_data = { 0 };
+ struct qed_spq_entry *p_ent = NULL;
+ dma_addr_t r2tq_pbl_addr;
+ dma_addr_t xhq_pbl_addr;
+ dma_addr_t uhq_pbl_addr;
+ u16 physical_q;
+ int rc = 0;
+ u8 i;
+
+ /* Get SPQ entry */
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload;
+
+ /* Transmission PQ is the first of the PF */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q);
+ p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q);
+
+ /* nvmetcp Pure-ACK PQ */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_conn->physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr);
+ r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr);
+ xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr);
+ uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr);
+ p_ramrod->nvmetcp.flags = p_conn->offl_flags;
+ p_ramrod->nvmetcp.default_cq = p_conn->default_cq;
+ p_ramrod->nvmetcp.initial_ack = 0;
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr,
+ p_conn->nvmetcp_cccid_itid_table_addr);
+ p_ramrod->nvmetcp.nvmetcp.cccid_max_range =
+ cpu_to_le16(p_conn->nvmetcp_cccid_max_range);
+ p_tcp = &p_ramrod->tcp;
+ qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi,
+ &p_tcp->remote_mac_addr_mid,
+ &p_tcp->remote_mac_addr_lo, p_conn->remote_mac);
+ qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi,
+ &p_tcp->local_mac_addr_mid,
+ &p_tcp->local_mac_addr_lo, p_conn->local_mac);
+ p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
+ p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
+ p_tcp->ip_version = p_conn->ip_version;
+ if (p_tcp->ip_version == TCP_IPV6) {
+ for (i = 0; i < 4; i++) {
+ p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]);
+ p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]);
+ }
+ } else {
+ p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]);
+ p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]);
+ }
+
+ p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
+ p_tcp->ttl = p_conn->ttl;
+ p_tcp->tos_or_tc = p_conn->tos_or_tc;
+ p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
+ p_tcp->local_port = cpu_to_le16(p_conn->local_port);
+ p_tcp->mss = cpu_to_le16(p_conn->mss);
+ p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+ p_tcp->connect_mode = p_conn->connect_mode;
+ p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
+ p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u32 dval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_update;
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->flags = p_conn->update_flag;
+ p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
+ dval = p_conn->max_recv_pdu_length;
+ p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
+ dval = p_conn->max_send_pdu_length;
+ p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
+ p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_spe_conn_termination *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate;
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->abortive = p_conn->abortive_dsconnect;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ return (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn **p_out_conn)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ };
+ struct qed_nvmetcp_pf_params *p_params = NULL;
+ struct qed_nvmetcp_conn *p_conn = NULL;
+ int rc = 0;
+
+ /* Try finding a free connection that can be used */
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list))
+ p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
+ struct qed_nvmetcp_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ *p_out_conn = p_conn;
+
+ return 0;
+ }
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ /* Need to allocate a new connection */
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+ p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+ if (!p_conn)
+ return -ENOMEM;
+
+ params.num_elems = p_params->num_r2tq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe);
+ params.elem_size = sizeof(struct nvmetcp_wqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
+ if (rc)
+ goto nomem_r2tq;
+
+ params.num_elems = p_params->num_uhq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
+ params.elem_size = sizeof(struct iscsi_uhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
+ if (rc)
+ goto nomem_uhq;
+
+ params.elem_size = sizeof(struct iscsi_xhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
+ if (rc)
+ goto nomem;
+
+ p_conn->free_on_delete = true;
+ *p_out_conn = p_conn;
+
+ return 0;
+
+nomem:
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+nomem_uhq:
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+nomem_r2tq:
+ kfree(p_conn);
+
+ return -ENOMEM;
+}
+
+static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn **p_out_conn)
+{
+ struct qed_nvmetcp_conn *p_conn = NULL;
+ int rc = 0;
+ u32 icid;
+
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ if (rc)
+ return rc;
+
+ rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn);
+ if (rc) {
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ qed_cxt_release_cid(p_hwfn, icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ return rc;
+ }
+
+ p_conn->icid = icid;
+ p_conn->conn_id = (u16)icid;
+ p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+ *p_out_conn = p_conn;
+
+ return rc;
+}
+
+static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn)
+{
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list);
+ qed_cxt_release_cid(p_hwfn, p_conn->icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+}
+
+static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn)
+{
+ qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+ kfree(p_conn);
+}
+
+int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvmetcp_info *p_nvmetcp_info;
+
+ p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL);
+ if (!p_nvmetcp_info)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_nvmetcp_info->free_list);
+ p_hwfn->p_nvmetcp_info = p_nvmetcp_info;
+
+ return 0;
+}
+
+void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn)
+{
+ spin_lock_init(&p_hwfn->p_nvmetcp_info->lock);
+}
+
+void qed_nvmetcp_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvmetcp_conn *p_conn = NULL;
+
+ if (!p_hwfn->p_nvmetcp_info)
+ return;
+
+ while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
+ struct qed_nvmetcp_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ qed_nvmetcp_free_connection(p_hwfn, p_conn);
+ }
+ }
+
+ kfree(p_hwfn->p_nvmetcp_info);
+ p_hwfn->p_nvmetcp_info = NULL;
+}
+
+static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ int rc;
+
+ /* Allocate a hashed connection */
+ hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
+ if (!hash_con)
+ return -ENOMEM;
+
+ /* Acquire the connection */
+ rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev),
+ &hash_con->con);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to acquire Connection\n");
+ kfree(hash_con);
+
+ return rc;
+ }
+
+ /* Added the connection to hash table */
+ *handle = hash_con->con->icid;
+ *fw_cid = hash_con->con->fw_cid;
+ hash_add(cdev->connections, &hash_con->node, *handle);
+ if (p_doorbell)
+ *p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev),
+ *handle);
+
+ return 0;
+}
+
+static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ hlist_del(&hash_con->node);
+ qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
+ kfree(hash_con);
+
+ return 0;
+}
+
+static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ struct qed_nvmetcp_conn *con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+
+ /* FW initializations */
+ con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE;
+ con->sq_pbl_addr = conn_info->sq_pbl_addr;
+ con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range;
+ con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr;
+ con->default_cq = conn_info->default_cq;
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0);
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1);
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1);
+
+ /* Networking and TCP stack initializations */
+ ether_addr_copy(con->local_mac, conn_info->src.mac);
+ ether_addr_copy(con->remote_mac, conn_info->dst.mac);
+ memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
+ memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
+ con->local_port = conn_info->src.port;
+ con->remote_port = conn_info->dst.port;
+ con->vlan_id = conn_info->vlan_id;
+
+ if (conn_info->timestamp_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1);
+
+ if (conn_info->delayed_ack_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1);
+
+ if (conn_info->tcp_keep_alive_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1);
+
+ if (conn_info->ecn_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1);
+
+ con->ip_version = conn_info->ip_version;
+ con->flow_label = QED_TCP_FLOW_LABEL;
+ con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
+ con->ka_timeout = conn_info->ka_timeout;
+ con->ka_interval = conn_info->ka_interval;
+ con->max_rt_time = conn_info->max_rt_time;
+ con->ttl = conn_info->ttl;
+ con->tos_or_tc = conn_info->tos_or_tc;
+ con->mss = conn_info->mss;
+ con->cwnd = conn_info->cwnd;
+ con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
+ con->connect_mode = 0;
+
+ return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_update_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ struct qed_nvmetcp_conn *con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+ SET_FIELD(con->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0);
+ SET_FIELD(con->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1);
+ if (conn_info->hdr_digest_en)
+ SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1);
+
+ if (conn_info->data_digest_en)
+ SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1);
+
+ /* Placeholder - initialize pfv, cpda, hpda */
+
+ con->max_seq_size = conn_info->max_io_size;
+ con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
+ con->max_send_pdu_length = conn_info->max_send_pdu_length;
+ con->first_seq_length = conn_info->max_io_size;
+
+ return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev,
+ u32 handle, u8 abrt_conn)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ hash_con->con->abortive_dsconnect = abrt_conn;
+
+ return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .ll2 = &qed_ll2_ops_pass,
+ .fill_dev_info = &qed_fill_nvmetcp_dev_info,
+ .register_ops = &qed_register_nvmetcp_ops,
+ .start = &qed_nvmetcp_start,
+ .stop = &qed_nvmetcp_stop,
+ .acquire_conn = &qed_nvmetcp_acquire_conn,
+ .release_conn = &qed_nvmetcp_release_conn,
+ .offload_conn = &qed_nvmetcp_offload_conn,
+ .update_conn = &qed_nvmetcp_update_conn,
+ .destroy_conn = &qed_nvmetcp_destroy_conn,
+ .clear_sq = &qed_nvmetcp_clear_conn_sq,
+ .add_src_tcp_port_filter = &qed_llh_add_src_tcp_port_filter,
+ .remove_src_tcp_port_filter = &qed_llh_remove_src_tcp_port_filter,
+ .add_dst_tcp_port_filter = &qed_llh_add_dst_tcp_port_filter,
+ .remove_dst_tcp_port_filter = &qed_llh_remove_dst_tcp_port_filter,
+ .clear_all_filters = &qed_llh_clear_all_filters,
+ .init_read_io = &init_nvmetcp_host_read_task,
+ .init_write_io = &init_nvmetcp_host_write_task,
+ .init_icreq_exchange = &init_nvmetcp_init_conn_req_task,
+ .init_task_cleanup = &init_cleanup_task_nvmetcp
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void)
+{
+ return &qed_nvmetcp_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_nvmetcp_ops);
+
+void qed_put_nvmetcp_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_nvmetcp_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
new file mode 100644
index 000000000000..e5e9d075bf4f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_H
+#define _QED_NVMETCP_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+#define QED_NVMETCP_FW_CQ_SIZE (4 * 1024)
+
+/* tcp parameters */
+#define QED_TCP_FLOW_LABEL 0
+#define QED_TCP_TWO_MSL_TIMER 4000
+#define QED_TCP_HALF_WAY_CLOSE_TIMEOUT 10
+#define QED_TCP_MAX_FIN_RT 2
+#define QED_TCP_SWS_TIMER 5000
+
+struct qed_nvmetcp_info {
+ spinlock_t lock; /* Connection resources. */
+ struct list_head free_list;
+ u16 max_num_outstanding_tasks;
+ void *event_context;
+ nvmetcp_event_cb_t event_cb;
+};
+
+struct qed_hash_nvmetcp_con {
+ struct hlist_node node;
+ struct qed_nvmetcp_conn *con;
+};
+
+struct qed_nvmetcp_conn {
+ struct list_head list_entry;
+ bool free_on_delete;
+ u16 conn_id;
+ u32 icid;
+ u32 fw_cid;
+ u8 layer_code;
+ u8 offl_flags;
+ u8 connect_mode;
+ dma_addr_t sq_pbl_addr;
+ struct qed_chain r2tq;
+ struct qed_chain xhq;
+ struct qed_chain uhq;
+ u8 local_mac[6];
+ u8 remote_mac[6];
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u16 vlan_id;
+ u16 tcp_flags;
+ u32 remote_ip[4];
+ u32 local_ip[4];
+ u32 flow_label;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u8 ttl;
+ u8 tos_or_tc;
+ u16 remote_port;
+ u16 local_port;
+ u16 mss;
+ u8 rcv_wnd_scale;
+ u32 rcv_wnd;
+ u32 cwnd;
+ u8 update_flag;
+ u8 default_cq;
+ u8 abortive_dsconnect;
+ u32 max_seq_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+ u32 first_seq_length;
+ u16 physical_q0;
+ u16 physical_q1;
+ u16 nvmetcp_cccid_max_range;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+};
+
+#if IS_ENABLED(CONFIG_QED_NVMETCP)
+int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn);
+void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn);
+void qed_nvmetcp_free(struct qed_hwfn *p_hwfn);
+
+#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+static inline int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn) {}
+static inline void qed_nvmetcp_free(struct qed_hwfn *p_hwfn) {}
+
+#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
new file mode 100644
index 000000000000..c1dd71d19f3f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include "qed_nvmetcp_fw_funcs.h"
+
+#define NVMETCP_NUM_SGES_IN_CACHE 0x4
+
+bool nvmetcp_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct storage_sgl_task_params *sgl_params)
+{
+ u8 num_sges_to_init = (u8)(sgl_params->num_sges > NVMETCP_NUM_SGES_IN_CACHE ?
+ NVMETCP_NUM_SGES_IN_CACHE : sgl_params->num_sges);
+ u8 sge_index;
+
+ /* sgl params */
+ ctx_sgl_params->sgl_addr.lo = cpu_to_le32(sgl_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.hi = cpu_to_le32(sgl_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_total_length = cpu_to_le32(sgl_params->total_buffer_size);
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+ ctx_data_desc->sge[sge_index].sge_addr.lo =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.hi =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_len =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_len);
+ }
+}
+
+static inline u32 calc_rw_task_size(struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type)
+{
+ u32 io_size;
+
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE)
+ io_size = task_params->tx_io_size;
+ else
+ io_size = task_params->rx_io_size;
+
+ if (unlikely(!io_size))
+ return 0;
+
+ return io_size;
+}
+
+static inline void init_sqe(struct nvmetcp_task_params *task_params,
+ struct storage_sgl_task_params *sgl_task_params,
+ enum nvmetcp_task_type task_type)
+{
+ if (!task_params->sqe)
+ return;
+
+ memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+ task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+
+ switch (task_type) {
+ case NVMETCP_TASK_TYPE_HOST_WRITE: {
+ u32 buf_size = 0;
+ u32 num_sges = 0;
+
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1);
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_NORMAL);
+ if (task_params->tx_io_size) {
+ if (task_params->send_write_incapsule)
+ buf_size = calc_rw_task_size(task_params, task_type);
+
+ if (nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = NVMETCP_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min((u16)sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size);
+ } break;
+
+ case NVMETCP_TASK_TYPE_HOST_READ: {
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_NORMAL);
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1);
+ } break;
+
+ case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST: {
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH);
+
+ if (task_params->tx_io_size) {
+ SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN,
+ task_params->tx_io_size);
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES,
+ min((u16)sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+ }
+ } break;
+
+ case NVMETCP_TASK_TYPE_CLEANUP:
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP);
+
+ default:
+ break;
+ }
+}
+
+/* The following function initializes of NVMeTCP task params */
+static inline void
+init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context,
+ struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type)
+{
+ context->ystorm_st_context.state.cccid = task_params->host_cccid;
+ SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1);
+ context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo);
+ context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi);
+}
+
+/* The following function initializes default values to all tasks */
+static inline void
+init_default_nvmetcp_task(struct nvmetcp_task_params *task_params,
+ void *pdu_header, void *nvme_cmd,
+ enum nvmetcp_task_type task_type)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+ const u8 val_byte = context->mstorm_ag_context.cdu_validation;
+ u8 dw_index;
+
+ memset(context, 0, sizeof(*context));
+ init_nvmetcp_task_params(context, task_params,
+ (enum nvmetcp_task_type)task_type);
+
+ /* Swapping requirements used below, will be removed in future FW versions */
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE ||
+ task_type == NVMETCP_TASK_TYPE_HOST_READ) {
+ for (dw_index = 0;
+ dw_index < QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index]));
+
+ for (dw_index = QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32);
+ dw_index < QED_NVMETCP_CMD_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)nvme_cmd)[dw_index - 2]));
+ } else {
+ for (dw_index = 0;
+ dw_index < QED_NVMETCP_NON_IO_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index]));
+ }
+
+ /* M-Storm Context: */
+ context->mstorm_ag_context.cdu_validation = val_byte;
+ context->mstorm_st_context.task_type = (u8)(task_type);
+ context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid);
+
+ /* Ustorm Context: */
+ SET_FIELD(context->ustorm_ag_context.flags1, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV, 1);
+ context->ustorm_st_context.task_type = (u8)(task_type);
+ context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+ context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+/* The following function initializes the U-Storm Task Contexts */
+static inline void
+init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx *ustorm_st_context,
+ struct e5_ustorm_nvmetcp_task_ag_ctx *ustorm_ag_context,
+ u32 remaining_recv_len,
+ u32 expected_data_transfer_len, u8 num_sges,
+ bool tx_dif_conn_err_en)
+{
+ /* Remaining data to be received in bytes. Used in validations*/
+ ustorm_st_context->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+ ustorm_ag_context->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+ ustorm_st_context->exp_data_transfer_len = cpu_to_le32(expected_data_transfer_len);
+ SET_FIELD(ustorm_st_context->reg1_map, REG1_NUM_SGES, num_sges);
+ SET_FIELD(ustorm_ag_context->flags2, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ tx_dif_conn_err_en ? 1 : 0);
+}
+
+/* The following function initializes Local Completion Contexts: */
+static inline void
+set_local_completion_context(struct e5_nvmetcp_task_context *context)
+{
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP, 1);
+ SET_FIELD(context->ustorm_st_context.flags,
+ USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+/* Common Fastpath task init function: */
+static inline void
+init_rw_nvmetcp_task(struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type,
+ void *pdu_header, void *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+ u32 task_size = calc_rw_task_size(task_params, task_type);
+ bool slow_io = false;
+ u8 num_sges = 0;
+
+ init_default_nvmetcp_task(task_params, pdu_header, nvme_cmd, task_type);
+
+ /* Tx/Rx: */
+ if (task_params->tx_io_size) {
+ /* if data to transmit: */
+ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params,
+ &context->ystorm_st_context.state.data_desc,
+ sgl_task_params);
+ slow_io = nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+ num_sges =
+ (u8)(!slow_io ? min((u32)sgl_task_params->num_sges,
+ (u32)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ NVMETCP_WQE_NUM_SGES_SLOWIO);
+ if (slow_io) {
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_NVMETCP_TASK_STATE_SLOW_IO, 1);
+ }
+ } else if (task_params->rx_io_size) {
+ /* if data to receive: */
+ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params,
+ &context->mstorm_st_context.data_desc,
+ sgl_task_params);
+ num_sges =
+ (u8)(!nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge) ?
+ min((u32)sgl_task_params->num_sges,
+ (u32)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ NVMETCP_WQE_NUM_SGES_SLOWIO);
+ context->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+ }
+
+ /* Ustorm context: */
+ init_ustorm_task_contexts(&context->ustorm_st_context,
+ &context->ustorm_ag_context,
+ /* Remaining Receive length is the Task Size */
+ task_size,
+ /* The size of the transmitted task */
+ task_size,
+ /* num_sges */
+ num_sges,
+ false);
+
+ /* Set exp_data_acked */
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) {
+ if (task_params->send_write_incapsule)
+ context->ustorm_ag_context.exp_data_acked = task_size;
+ else
+ context->ustorm_ag_context.exp_data_acked = 0;
+ } else if (task_type == NVMETCP_TASK_TYPE_HOST_READ) {
+ context->ustorm_ag_context.exp_data_acked = 0;
+ }
+
+ context->ustorm_ag_context.exp_cont_len = 0;
+ init_sqe(task_params, sgl_task_params, task_type);
+}
+
+static void
+init_common_initiator_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_READ,
+ cmd_pdu_header, nvme_cmd, sgl_task_params);
+}
+
+void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_common_initiator_read_task(task_params, (void *)cmd_pdu_header,
+ (void *)nvme_cmd, sgl_task_params);
+}
+
+static void
+init_common_initiator_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_WRITE,
+ cmd_pdu_header, nvme_cmd, sgl_task_params);
+}
+
+void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_common_initiator_write_task(task_params, (void *)cmd_pdu_header,
+ (void *)nvme_cmd, sgl_task_params);
+}
+
+static void
+init_common_login_request_task(struct nvmetcp_task_params *task_params,
+ void *login_req_pdu_header,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+
+ init_default_nvmetcp_task(task_params, (void *)login_req_pdu_header, NULL,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST);
+
+ /* Ustorm Context: */
+ init_ustorm_task_contexts(&context->ustorm_st_context,
+ &context->ustorm_ag_context,
+
+ /* Remaining Receive length is the Task Size */
+ task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0,
+
+ /* The size of the transmitted task */
+ task_params->tx_io_size ?
+ tx_sgl_task_params->total_buffer_size : 0,
+ 0, /* num_sges */
+ 0); /* tx_dif_conn_err_en */
+
+ /* SGL context: */
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params,
+ &context->ystorm_st_context.state.data_desc,
+ tx_sgl_task_params);
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params,
+ &context->mstorm_st_context.data_desc,
+ rx_sgl_task_params);
+
+ context->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0);
+ init_sqe(task_params, tx_sgl_task_params, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST);
+}
+
+/* The following function initializes Login task in Host mode: */
+void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params)
+{
+ init_common_login_request_task(task_params, init_conn_req_pdu_hdr,
+ tx_sgl_task_params, rx_sgl_task_params);
+}
+
+void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params)
+{
+ init_sqe(task_params, NULL, NVMETCP_TASK_TYPE_CLEANUP);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h
new file mode 100644
index 000000000000..1d5ddc217bdb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_FW_FUNCS_H
+#define _QED_NVMETCP_FW_FUNCS_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+
+#if IS_ENABLED(CONFIG_QED_NVMETCP)
+
+void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params);
+
+#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif /* _QED_NVMETCP_FW_FUNCS_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
new file mode 100644
index 000000000000..96a2077fd315
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright 2021 Marvell. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+
+#include <net/tcp.h>
+
+#include <linux/qed/qed_nvmetcp_ip_services_if.h>
+
+#define QED_IP_RESOL_TIMEOUT 4
+
+int qed_route_ipv4(struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *remote_addr,
+ struct sockaddr *hardware_address,
+ struct net_device **ndev)
+{
+ struct neighbour *neigh = NULL;
+ __be32 *loc_ip, *rem_ip;
+ struct rtable *rt;
+ int rc = -ENXIO;
+ int retry;
+
+ loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr;
+ rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr;
+ *ndev = NULL;
+ rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/);
+ if (IS_ERR(rt)) {
+ pr_err("lookup route failed\n");
+ rc = PTR_ERR(rt);
+ goto return_err;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, rem_ip);
+ if (!neigh) {
+ rc = -ENOMEM;
+ ip_rt_put(rt);
+ goto return_err;
+ }
+
+ *ndev = rt->dst.dev;
+ ip_rt_put(rt);
+
+ /* If not resolved, kick-off state machine towards resolution */
+ if (!(neigh->nud_state & NUD_VALID))
+ neigh_event_send(neigh, NULL);
+
+ /* query neighbor until resolved or timeout */
+ retry = QED_IP_RESOL_TIMEOUT;
+ while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
+ msleep(1000);
+ retry--;
+ }
+
+ if (neigh->nud_state & NUD_VALID) {
+ /* copy resolved MAC address */
+ neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev);
+ hardware_address->sa_family = (*ndev)->type;
+ rc = 0;
+ }
+
+ neigh_release(neigh);
+ if (!(*loc_ip)) {
+ *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE);
+ local_addr->ss_family = AF_INET;
+ }
+
+return_err:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_route_ipv4);
+
+int qed_route_ipv6(struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *remote_addr,
+ struct sockaddr *hardware_address,
+ struct net_device **ndev)
+{
+ struct neighbour *neigh = NULL;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ int rc = -ENXIO;
+ int retry;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr;
+ fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ pr_err("lookup route failed %d\n", dst->error);
+ }
+
+ goto out;
+ }
+
+ neigh = dst_neigh_lookup(dst, &fl6.daddr);
+ if (neigh) {
+ *ndev = ip6_dst_idev(dst)->dev;
+
+ /* If not resolved, kick-off state machine towards resolution */
+ if (!(neigh->nud_state & NUD_VALID))
+ neigh_event_send(neigh, NULL);
+
+ /* query neighbor until resolved or timeout */
+ retry = QED_IP_RESOL_TIMEOUT;
+ while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
+ msleep(1000);
+ retry--;
+ }
+
+ if (neigh->nud_state & NUD_VALID) {
+ neigh_ha_snapshot((u8 *)hardware_address->sa_data,
+ neigh, *ndev);
+ hardware_address->sa_family = (*ndev)->type;
+ rc = 0;
+ }
+
+ neigh_release(neigh);
+
+ if (ipv6_addr_any(&fl6.saddr)) {
+ if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev,
+ &fl6.daddr, 0, &fl6.saddr)) {
+ pr_err("Unable to find source IP address\n");
+ goto out;
+ }
+
+ local_addr->ss_family = AF_INET6;
+ ((struct sockaddr_in6 *)local_addr)->sin6_addr =
+ fl6.saddr;
+ }
+ }
+
+ dst_release(dst);
+
+out:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_route_ipv6);
+
+void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id)
+{
+ if (is_vlan_dev(*ndev)) {
+ *vlan_id = vlan_dev_vlan_id(*ndev);
+ *ndev = vlan_dev_real_dev(*ndev);
+ }
+}
+EXPORT_SYMBOL(qed_vlan_get_ndev);
+
+struct pci_dev *qed_validate_ndev(struct net_device *ndev)
+{
+ struct pci_dev *pdev = NULL;
+ struct net_device *upper;
+
+ for_each_pci_dev(pdev) {
+ if (pdev && pdev->driver &&
+ !strcmp(pdev->driver->name, "qede")) {
+ upper = pci_get_drvdata(pdev);
+ if (upper->ifindex == ndev->ifindex)
+ return pdev;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(qed_validate_ndev);
+
+__be16 qed_get_in_port(struct sockaddr_storage *sa)
+{
+ return sa->ss_family == AF_INET
+ ? ((struct sockaddr_in *)sa)->sin_port
+ : ((struct sockaddr_in6 *)sa)->sin6_port;
+}
+EXPORT_SYMBOL(qed_get_in_port);
+
+int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
+ struct socket **sock, u16 *port)
+{
+ struct sockaddr_storage sa;
+ int rc = 0;
+
+ rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
+ sock);
+ if (rc) {
+ pr_warn("failed to create socket: %d\n", rc);
+ goto err;
+ }
+
+ (*sock)->sk->sk_allocation = GFP_KERNEL;
+ sk_set_memalloc((*sock)->sk);
+
+ rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr,
+ sizeof(local_ip_addr));
+
+ if (rc) {
+ pr_warn("failed to bind socket: %d\n", rc);
+ goto err_sock;
+ }
+
+ rc = kernel_getsockname(*sock, (struct sockaddr *)&sa);
+ if (rc < 0) {
+ pr_warn("getsockname() failed: %d\n", rc);
+ goto err_sock;
+ }
+
+ *port = ntohs(qed_get_in_port(&sa));
+
+ return 0;
+
+err_sock:
+ sock_release(*sock);
+ sock = NULL;
+err:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_fetch_tcp_port);
+
+void qed_return_tcp_port(struct socket *sock)
+{
+ if (sock && sock->sk) {
+ tcp_set_state(sock->sk, TCP_CLOSE);
+ sock_release(sock);
+ }
+}
+EXPORT_SYMBOL(qed_return_tcp_port);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 88353aa404dc..b8c5641b29a8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -16,7 +16,7 @@
#include "qed_ll2.h"
#include "qed_ooo.h"
#include "qed_cxt.h"
-
+#include "qed_nvmetcp.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
struct qed_ooo_info
@@ -83,7 +83,8 @@ int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
switch (p_hwfn->hw_info.personality) {
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
break;
case QED_PCI_ETH_RDMA:
case QED_PCI_ETH_IWARP:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 993f1357b6fc..60ff3222bf55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -100,6 +100,11 @@ union ramrod_data {
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
struct iscsi_spe_conn_termination iscsi_conn_terminate;
+ struct nvmetcp_init_ramrod_params nvmetcp_init;
+ struct nvmetcp_spe_conn_offload nvmetcp_conn_offload;
+ struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update;
+ struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate;
+
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index aa71adcf31ee..b4ed54ffef9b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -385,7 +385,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_FCOE;
break;
case QED_PCI_ISCSI:
- p_ramrod->personality = PERSONALITY_ISCSI;
+ case QED_PCI_NVMETCP:
+ p_ramrod->personality = PERSONALITY_TCP_ULP;
break;
case QED_PCI_ETH_ROCE:
case QED_PCI_ETH_IWARP:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 8e150dd4f899..065e9004598e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1089,13 +1089,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
*len, false);
- /* Queues always have a full reset currently, so for the time
- * being until there's atomic program replace just mark read
- * side for map helpers.
- */
- rcu_read_lock();
act = bpf_prog_run_xdp(prog, &xdp);
- rcu_read_unlock();
/* Recalculate, as XDP might have changed the headers */
*data_offset = xdp.data - xdp.data_hard_start;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 2f6598086d9b..6304514a6f2c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -247,12 +247,10 @@ static struct qede_rdma_event_work *
qede_rdma_get_free_event_node(struct qede_dev *edev)
{
struct qede_rdma_event_work *event_node = NULL;
- struct list_head *list_node = NULL;
bool found = false;
- list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
- event_node = list_entry(list_node, struct qede_rdma_event_work,
- list);
+ list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list,
+ list) {
if (!work_pending(&event_node->work)) {
found = true;
break;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 214e347097a7..2376b2729633 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -114,7 +114,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
value = readl(&port_regs->CommonRegs.semaphoreReg);
if ((value & (sem_mask >> 16)) == sem_bits)
return 0;
- ssleep(1);
+ mdelay(1000);
} while (--seconds);
return -1;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index d2c190732d3e..0a2f34fc8b24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -746,7 +746,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
}
/**
- * qlcnic_83xx_idc_cold_state
+ * qlcnic_83xx_idc_cold_state_handler
*
* @adapter: adapter structure
*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index c4297aea7d15..711609503ba6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -180,7 +180,7 @@ static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
}
/**
- * qlcnic_83xx_vnic_opmode
+ * qlcnic_83xx_config_vnic_opmode
*
* @adapter: adapter structure
* Identify virtual NIC operational modes.
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index e1b8490bed0a..4b8bc46f55c2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -460,12 +460,10 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
int err = -EINVAL;
/* Delete MAC from the existing list */
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal(addr, cur->mac_addr)) {
err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
@@ -483,11 +481,9 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan,
enum qlcnic_mac_type mac_type)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
/* look up if already exists */
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal(addr, cur->mac_addr) &&
cur->vlan_id == vlan)
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 601d22495a88..95ecc84dddcd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -203,7 +203,6 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
-int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index bdf15d2a6431..af4c516a9e7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1390,6 +1390,7 @@ static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int ma
break;
case QLCNIC_RESPONSE_DESC:
qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ goto skip;
default:
goto skip;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 96b947fde646..a4fa507903ee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -319,10 +319,8 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
@@ -2690,6 +2688,7 @@ err_out_free_hw_res:
kfree(ahw);
err_out_free_res:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
@@ -3343,9 +3342,6 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
do {
msleep(1000);
prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
-
- if (prev_state == QLCNIC_DEV_QUISCENT)
- continue;
} while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
if (!dev_init_timeo) {
@@ -3455,6 +3451,7 @@ wait_npar:
adapter->fw_wait_cnt = 0;
return;
}
+ break;
case QLCNIC_DEV_FAILED:
break;
default:
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 702aa217a27a..d59fff2fbcc6 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -62,6 +62,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = {
"SPI errors",
"Write verify errors",
"Buffer available errors",
+ "Bad signature",
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index ab9b02574a15..b64c254e00ba 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -504,8 +504,12 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
+ if (qca->sync == QCASPI_SYNC_READY)
+ qca->stats.bad_signature++;
+
qca->sync = QCASPI_SYNC_UNKNOWN;
netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n");
+ return;
} else {
/* ensure that the WRBUF is empty */
qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA,
@@ -523,10 +527,14 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
switch (qca->sync) {
case QCASPI_SYNC_READY:
- /* Read signature, if not valid go to unknown state. */
+ /* Check signature twice, if not valid go to unknown state. */
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
+ if (signature != QCASPI_GOOD_SIGNATURE)
+ qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
+
if (signature != QCASPI_GOOD_SIGNATURE) {
qca->sync = QCASPI_SYNC_UNKNOWN;
+ qca->stats.bad_signature++;
netdev_dbg(qca->net_dev, "sync: bad signature, restart\n");
/* don't reset right away */
return;
@@ -653,8 +661,7 @@ qcaspi_intr_handler(int irq, void *data)
struct qcaspi *qca = data;
qca->intr_req++;
- if (qca->spi_thread &&
- qca->spi_thread->state != TASK_RUNNING)
+ if (qca->spi_thread)
wake_up_process(qca->spi_thread);
return IRQ_HANDLED;
@@ -777,8 +784,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
- if (qca->spi_thread &&
- qca->spi_thread->state != TASK_RUNNING)
+ if (qca->spi_thread)
wake_up_process(qca->spi_thread);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index d13a67e20d65..3067356106f0 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -75,6 +75,7 @@ struct qcaspi_stats {
u64 spi_err;
u64 write_verify_failed;
u64 buf_avail_err;
+ u64 bad_signature;
};
struct qcaspi {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 8d51b0cb545c..27b1663c476e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -163,7 +163,8 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct ifla_rmnet_flags *flags;
flags = nla_data(data[IFLA_RMNET_FLAGS]);
- data_format = flags->flags & flags->mask;
+ data_format &= ~flags->mask;
+ data_format |= flags->flags & flags->mask;
}
netdev_dbg(dev, "data format [0x%08X]\n", data_format);
@@ -336,7 +337,8 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
old_data_format = port->data_format;
flags = nla_data(data[IFLA_RMNET_FLAGS]);
- port->data_format = flags->flags & flags->mask;
+ port->data_format &= ~flags->mask;
+ port->data_format |= flags->flags & flags->mask;
if (rmnet_vnd_update_dev_mtu(port, real_dev)) {
port->data_format = old_data_format;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 8d8d4690a074..3d3cba56c516 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2018, 2021 The Linux Foundation.
+ * All rights reserved.
*
* RMNET Data configuration engine
*/
@@ -48,6 +49,7 @@ struct rmnet_pcpu_stats {
struct rmnet_priv_stats {
u64 csum_ok;
+ u64 csum_ip4_header_bad;
u64 csum_valid_unset;
u64 csum_validation_failed;
u64 csum_err_bad_buffer;
@@ -56,6 +58,7 @@ struct rmnet_priv_stats {
u64 csum_fragmented_pkt;
u64 csum_skipped;
u64 csum_sw;
+ u64 csum_hw;
};
struct rmnet_priv {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 0be5ac7ab261..bfbd7847f946 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*
* RMNET Data ingress/egress handler
*/
@@ -82,12 +82,18 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
skb->dev = ep->egress_dev;
- /* Subtract MAP header */
- skb_pull(skb, sizeof(struct rmnet_map_header));
- rmnet_set_skb_proto(skb);
-
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
- if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+ if ((port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) &&
+ (map_header->flags & MAP_NEXT_HEADER_FLAG)) {
+ if (rmnet_map_process_next_hdr_packet(skb, len))
+ goto free_skb;
+ skb_pull(skb, sizeof(*map_header));
+ rmnet_set_skb_proto(skb);
+ } else {
+ /* Subtract MAP header */
+ skb_pull(skb, sizeof(*map_header));
+ rmnet_set_skb_proto(skb);
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4 &&
+ !rmnet_map_checksum_downlink_packet(skb, len + pad))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -128,7 +134,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
- int required_headroom, additional_header_len;
+ int required_headroom, additional_header_len, csum_type = 0;
struct rmnet_map_header *map_header;
additional_header_len = 0;
@@ -136,18 +142,23 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
- required_headroom += additional_header_len;
+ csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
+ } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
+ additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
+ csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}
- if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
- return -ENOMEM;
- }
+ required_headroom += additional_header_len;
+
+ if (skb_cow_head(skb, required_headroom) < 0)
+ return -ENOMEM;
- if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
- rmnet_map_checksum_uplink_packet(skb, orig_dev);
+ if (csum_type)
+ rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
+ csum_type);
- map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
+ map_header = rmnet_map_add_map_header(skb, additional_header_len,
+ port, 0);
if (!map_header)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 2aea153f4247..e5a0b38f7dbe 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*/
#ifndef _RMNET_MAP_H_
@@ -43,10 +43,15 @@ enum rmnet_map_commands {
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
- int hdrlen, int pad);
+ int hdrlen,
+ struct rmnet_port *port,
+ int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
- struct net_device *orig_dev);
+ struct rmnet_port *port,
+ struct net_device *orig_dev,
+ int csum_type);
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 0ac2ff828320..3676976c875b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*
* RMNET Data MAP protocol
*/
@@ -8,6 +8,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include <linux/bitfield.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@@ -18,23 +19,13 @@
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
const void *txporthdr)
{
- __sum16 *check = NULL;
+ if (protocol == IPPROTO_TCP)
+ return &((struct tcphdr *)txporthdr)->check;
- switch (protocol) {
- case IPPROTO_TCP:
- check = &(((struct tcphdr *)txporthdr)->check);
- break;
-
- case IPPROTO_UDP:
- check = &(((struct udphdr *)txporthdr)->check);
- break;
+ if (protocol == IPPROTO_UDP)
+ return &((struct udphdr *)txporthdr)->check;
- default:
- check = NULL;
- break;
- }
-
- return check;
+ return NULL;
}
static int
@@ -42,71 +33,74 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer,
struct rmnet_priv *priv)
{
- __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
- u16 csum_value, csum_value_final;
- struct iphdr *ip4h;
- void *txporthdr;
- __be16 addend;
-
- ip4h = (struct iphdr *)(skb->data);
- if ((ntohs(ip4h->frag_off) & IP_MF) ||
- ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
+ struct iphdr *ip4h = (struct iphdr *)skb->data;
+ void *txporthdr = skb->data + ip4h->ihl * 4;
+ __sum16 *csum_field, pseudo_csum;
+ __sum16 ip_payload_csum;
+
+ /* Computing the checksum over just the IPv4 header--including its
+ * checksum field--should yield 0. If it doesn't, the IP header
+ * is bad, so return an error and let the IP layer drop it.
+ */
+ if (ip_fast_csum(ip4h, ip4h->ihl)) {
+ priv->stats.csum_ip4_header_bad++;
+ return -EINVAL;
+ }
+
+ /* We don't support checksum offload on IPv4 fragments */
+ if (ip_is_fragment(ip4h)) {
priv->stats.csum_fragmented_pkt++;
return -EOPNOTSUPP;
}
- txporthdr = skb->data + ip4h->ihl * 4;
-
+ /* Checksum offload is only supported for UDP and TCP protocols */
csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
-
if (!csum_field) {
priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
}
- /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
- if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
+ /* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
+ if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
priv->stats.csum_skipped++;
return 0;
}
- csum_value = ~ntohs(csum_trailer->csum_value);
- hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
- ip_payload_csum = csum16_sub((__force __sum16)csum_value,
- (__force __be16)hdr_csum);
-
- pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
- ntohs(ip4h->tot_len) - ip4h->ihl * 4,
- ip4h->protocol, 0);
- addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
- pseudo_csum = csum16_add(ip_payload_csum, addend);
-
- addend = (__force __be16)ntohs((__force __be16)*csum_field);
- csum_temp = ~csum16_sub(pseudo_csum, addend);
- csum_value_final = (__force u16)csum_temp;
-
- if (unlikely(csum_value_final == 0)) {
- switch (ip4h->protocol) {
- case IPPROTO_UDP:
- /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
- csum_value_final = ~csum_value_final;
- break;
-
- case IPPROTO_TCP:
- /* DL4 Non-RFC compliant TCP checksum found */
- if (*csum_field == (__force __sum16)0xFFFF)
- csum_value_final = ~csum_value_final;
- break;
- }
- }
-
- if (csum_value_final == ntohs((__force __be16)*csum_field)) {
- priv->stats.csum_ok++;
- return 0;
- } else {
+ /* The checksum value in the trailer is computed over the entire
+ * IP packet, including the IP header and payload. To derive the
+ * transport checksum from this, we first subract the contribution
+ * of the IP header from the trailer checksum. We then add the
+ * checksum computed over the pseudo header.
+ *
+ * We verified above that the IP header contributes zero to the
+ * trailer checksum. Therefore the checksum in the trailer is
+ * just the checksum computed over the IP payload.
+
+ * If the IP payload arrives intact, adding the pseudo header
+ * checksum to the IP payload checksum will yield 0xffff (negative
+ * zero). This means the trailer checksum and the pseudo checksum
+ * are additive inverses of each other. Put another way, the
+ * message passes the checksum test if the trailer checksum value
+ * is the negated pseudo header checksum.
+ *
+ * Knowing this, we don't even need to examine the transport
+ * header checksum value; it is already accounted for in the
+ * checksum value found in the trailer.
+ */
+ ip_payload_csum = csum_trailer->csum_value;
+
+ pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+ ip4h->protocol, 0);
+
+ /* The cast is required to ensure only the low 16 bits are examined */
+ if (ip_payload_csum != (__sum16)~pseudo_csum) {
priv->stats.csum_validation_failed++;
return -EINVAL;
}
+
+ priv->stats.csum_ok++;
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -115,76 +109,66 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer,
struct rmnet_priv *priv)
{
- __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
- u16 csum_value, csum_value_final;
- __be16 ip6_hdr_csum, addend;
- struct ipv6hdr *ip6h;
- void *txporthdr;
- u32 length;
-
- ip6h = (struct ipv6hdr *)(skb->data);
-
- txporthdr = skb->data + sizeof(struct ipv6hdr);
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
+ void *txporthdr = skb->data + sizeof(*ip6h);
+ __sum16 *csum_field, pseudo_csum;
+ __sum16 ip6_payload_csum;
+ __be16 ip_header_csum;
+
+ /* Checksum offload is only supported for UDP and TCP protocols;
+ * the packet cannot include any IPv6 extension headers
+ */
csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
-
if (!csum_field) {
priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
}
- csum_value = ~ntohs(csum_trailer->csum_value);
- ip6_hdr_csum = (__force __be16)
- ~ntohs((__force __be16)ip_compute_csum(ip6h,
- (int)(txporthdr - (void *)(skb->data))));
- ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
- ip6_hdr_csum);
-
- length = (ip6h->nexthdr == IPPROTO_UDP) ?
- ntohs(((struct udphdr *)txporthdr)->len) :
- ntohs(ip6h->payload_len);
- pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- length, ip6h->nexthdr, 0));
- addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
- pseudo_csum = csum16_add(ip6_payload_csum, addend);
-
- addend = (__force __be16)ntohs((__force __be16)*csum_field);
- csum_temp = ~csum16_sub(pseudo_csum, addend);
- csum_value_final = (__force u16)csum_temp;
-
- if (unlikely(csum_value_final == 0)) {
- switch (ip6h->nexthdr) {
- case IPPROTO_UDP:
- /* RFC 2460 section 8.1
- * DL6 One's complement rule for UDP checksum 0
- */
- csum_value_final = ~csum_value_final;
- break;
-
- case IPPROTO_TCP:
- /* DL6 Non-RFC compliant TCP checksum found */
- if (*csum_field == (__force __sum16)0xFFFF)
- csum_value_final = ~csum_value_final;
- break;
- }
- }
-
- if (csum_value_final == ntohs((__force __be16)*csum_field)) {
- priv->stats.csum_ok++;
- return 0;
- } else {
+ /* The checksum value in the trailer is computed over the entire
+ * IP packet, including the IP header and payload. To derive the
+ * transport checksum from this, we first subract the contribution
+ * of the IP header from the trailer checksum. We then add the
+ * checksum computed over the pseudo header.
+ */
+ ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
+ ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
+
+ pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ ntohs(ip6h->payload_len),
+ ip6h->nexthdr, 0);
+
+ /* It's sufficient to compare the IP payload checksum with the
+ * negated pseudo checksum to determine whether the packet
+ * checksum was good. (See further explanation in comments
+ * in rmnet_map_ipv4_dl_csum_trailer()).
+ *
+ * The cast is required to ensure only the low 16 bits are
+ * examined.
+ */
+ if (ip6_payload_csum != (__sum16)~pseudo_csum) {
priv->stats.csum_validation_failed++;
return -EINVAL;
}
+
+ priv->stats.csum_ok++;
+ return 0;
+}
+#else
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
+{
+ return 0;
}
#endif
-static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
{
- struct iphdr *ip4h = (struct iphdr *)iphdr;
void *txphdr;
u16 *csum;
- txphdr = iphdr + ip4h->ihl * 4;
+ txphdr = (void *)ip4h + ip4h->ihl * 4;
if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
@@ -193,15 +177,14 @@ static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
}
static void
-rmnet_map_ipv4_ul_csum_header(void *iphdr,
+rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- struct iphdr *ip4h = iphdr;
u16 val;
val = MAP_CSUM_UL_ENABLED_FLAG;
- if (ip4h->protocol == IPPROTO_UDP)
+ if (iphdr->protocol == IPPROTO_UDP)
val |= MAP_CSUM_UL_UDP_FLAG;
val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
@@ -214,13 +197,13 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
}
#if IS_ENABLED(CONFIG_IPV6)
-static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+static void
+rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
{
- struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
void *txphdr;
u16 *csum;
- txphdr = ip6hdr + sizeof(struct ipv6hdr);
+ txphdr = ip6h + 1;
if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
@@ -229,15 +212,14 @@ static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
}
static void
-rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- struct ipv6hdr *ip6h = ip6hdr;
u16 val;
val = MAP_CSUM_UL_ENABLED_FLAG;
- if (ip6h->nexthdr == IPPROTO_UDP)
+ if (ipv6hdr->nexthdr == IPPROTO_UDP)
val |= MAP_CSUM_UL_UDP_FLAG;
val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
@@ -246,16 +228,73 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
skb->ip_summed = CHECKSUM_NONE;
- rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+ rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
+}
+#else
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
}
#endif
+static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev)
+{
+ struct rmnet_priv *priv = netdev_priv(orig_dev);
+ struct rmnet_map_v5_csum_header *ul_header;
+
+ ul_header = skb_push(skb, sizeof(*ul_header));
+ memset(ul_header, 0, sizeof(*ul_header));
+ ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ void *iph = ip_hdr(skb);
+ __sum16 *check;
+ void *trans;
+ u8 proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
+
+ proto = ((struct iphdr *)iph)->protocol;
+ trans = iph + ip_len;
+ } else if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6)) {
+ u16 ip_len = sizeof(struct ipv6hdr);
+
+ proto = ((struct ipv6hdr *)iph)->nexthdr;
+ trans = iph + ip_len;
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
+ goto sw_csum;
+ }
+
+ check = rmnet_map_get_csum_field(proto, trans);
+ if (check) {
+ skb->ip_summed = CHECKSUM_NONE;
+ /* Ask for checksum offloading */
+ ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
+ priv->stats.csum_hw++;
+ return;
+ }
+ }
+
+sw_csum:
+ priv->stats.csum_sw++;
+}
+
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
- int hdrlen, int pad)
+ int hdrlen,
+ struct rmnet_port *port,
+ int pad)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
@@ -266,6 +305,10 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));
+ /* Set next_hdr bit for csum offload packets */
+ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
+ map_header->flags |= MAP_NEXT_HEADER_FLAG;
+
if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
@@ -300,8 +343,11 @@ done:
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
+ struct rmnet_map_v5_csum_header *next_hdr = NULL;
struct rmnet_map_header *maph;
+ void *data = skb->data;
struct sk_buff *skbn;
+ u8 nexthdr_type;
u32 packet_len;
if (skb->len == 0)
@@ -310,8 +356,18 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+ } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
+ if (!(maph->flags & MAP_CMD_FLAG)) {
+ packet_len += sizeof(*next_hdr);
+ if (maph->flags & MAP_NEXT_HEADER_FLAG)
+ next_hdr = data + sizeof(*maph);
+ else
+ /* Mapv5 data pkt without csum hdr is invalid */
+ return NULL;
+ }
+ }
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -320,6 +376,13 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
if (!maph->pkt_len)
return NULL;
+ if (next_hdr) {
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return NULL;
+ }
+
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
if (!skbn)
return NULL;
@@ -355,28 +418,19 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
return -EINVAL;
}
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
-#if IS_ENABLED(CONFIG_IPV6)
+
+ if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
-#else
- priv->stats.csum_err_invalid_ip_version++;
- return -EPROTONOSUPPORT;
-#endif
- } else {
- priv->stats.csum_err_invalid_ip_version++;
- return -EPROTONOSUPPORT;
- }
- return 0;
+ priv->stats.csum_err_invalid_ip_version++;
+
+ return -EPROTONOSUPPORT;
}
-/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
- * packets that are supported for UL checksum offload.
- */
-void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
- struct net_device *orig_dev)
+static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
@@ -389,28 +443,80 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
goto sw_csum;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- iphdr = (char *)ul_header +
- sizeof(struct rmnet_map_ul_csum_header);
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto sw_csum;
- if (skb->protocol == htons(ETH_P_IP)) {
- rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
- return;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
-#if IS_ENABLED(CONFIG_IPV6)
- rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
- return;
-#else
- priv->stats.csum_err_invalid_ip_version++;
- goto sw_csum;
-#endif
- } else {
- priv->stats.csum_err_invalid_ip_version++;
- }
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_csum_header);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
+ rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
+ return;
}
+ priv->stats.csum_err_invalid_ip_version++;
+
sw_csum:
memset(ul_header, 0, sizeof(*ul_header));
priv->stats.csum_sw++;
}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev,
+ int csum_type)
+{
+ switch (csum_type) {
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
+ rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
+ break;
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
+ rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Process a MAPv5 packet header */
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
+ u16 len)
+{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
+ struct rmnet_map_v5_csum_header *next_hdr;
+ u8 nexthdr_type;
+
+ next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
+ sizeof(struct rmnet_map_header));
+
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return -EINVAL;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ priv->stats.csum_sw++;
+ } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
+ priv->stats.csum_ok++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ priv->stats.csum_valid_unset++;
+ }
+
+ /* Pull csum v5 header */
+ skb_pull(skb, sizeof(*next_hdr));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 41fbd2ceeede..13d8eb43a485 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct rmnet_priv *priv = netdev_priv(dev);
- struct rmnet_vnd_stats total_stats;
+ struct rmnet_vnd_stats total_stats = { };
struct rmnet_pcpu_stats *pcpu_ptr;
+ struct rmnet_vnd_stats snapshot;
unsigned int cpu, start;
- memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
-
for_each_possible_cpu(cpu) {
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
- total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
- total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
- total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
- total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+ snapshot = pcpu_ptr->stats; /* struct assignment */
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
- total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+ total_stats.rx_pkts += snapshot.rx_pkts;
+ total_stats.rx_bytes += snapshot.rx_bytes;
+ total_stats.tx_pkts += snapshot.tx_pkts;
+ total_stats.tx_bytes += snapshot.tx_bytes;
+ total_stats.tx_drops += snapshot.tx_drops;
}
s->rx_packets = total_stats.rx_pkts;
@@ -166,6 +166,7 @@ static const struct net_device_ops rmnet_vnd_ops = {
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum ok",
+ "Bad IPv4 header checksum",
"Checksum valid bit not set",
"Checksum validation failed",
"Checksum error bad buffer",
@@ -174,6 +175,7 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum skipped on ip fragment",
"Checksum skipped",
"Checksum computed in software",
+ "Checksum computed in hardware",
};
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -354,4 +356,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
}
return 0;
-} \ No newline at end of file
+}
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 7c74318620b1..47e9998b62f0 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -200,7 +200,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
int limit = MAC_DEF_TIMEOUT;
u16 cmd;
- iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
+ iowrite16(MDIO_READ | reg | (phy_addr << 8), ioaddr + MMDIO);
/* Wait for the read bit to be cleared */
while (limit--) {
cmd = ioread16(ioaddr + MMDIO);
@@ -224,7 +224,7 @@ static int r6040_phy_write(void __iomem *ioaddr,
iowrite16(val, ioaddr + MMWD);
/* Write the command to the MDIO bus */
- iowrite16(MDIO_WRITE + reg + (phy_addr << 8), ioaddr + MMDIO);
+ iowrite16(MDIO_WRITE | reg | (phy_addr << 8), ioaddr + MMDIO);
/* Wait for the write bit to be cleared */
while (limit--) {
cmd = ioread16(ioaddr + MMDIO);
@@ -544,7 +544,7 @@ static int r6040_rx(struct net_device *dev, int limit)
skb_ptr->dev = priv->dev;
/* Do not count the CRC */
- skb_put(skb_ptr, descptr->len - 4);
+ skb_put(skb_ptr, descptr->len - ETH_FCS_LEN);
dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
MAX_BUF_SIZE, DMA_FROM_DEVICE);
skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
@@ -552,7 +552,7 @@ static int r6040_rx(struct net_device *dev, int limit)
/* Send to upper layer */
netif_receive_skb(skb_ptr);
dev->stats.rx_packets++;
- dev->stats.rx_bytes += descptr->len - 4;
+ dev->stats.rx_bytes += descptr->len - ETH_FCS_LEN;
/* put new skb into descriptor */
descptr->skb_ptr = new_skb;
@@ -943,6 +943,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
};
static const struct net_device_ops r6040_netdev_ops = {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 4e44313b7651..9677e257e9a1 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -6,7 +6,7 @@
Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
Copyright 2001 Manfred Spraul [natsemi.c]
Copyright 1999-2001 by Donald Becker. [natsemi.c]
- Written 1997-2001 by Donald Becker. [8139too.c]
+ Written 1997-2001 by Donald Becker. [8139too.c]
Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
This software may be used and distributed according to the terms of
@@ -947,8 +947,8 @@ static struct net_device_stats *cp_get_stats(struct net_device *dev)
/* The chip only need report frame silently dropped. */
spin_lock_irqsave(&cp->lock, flags);
- if (netif_running(dev) && netif_device_present(dev))
- __cp_get_stats(cp);
+ if (netif_running(dev) && netif_device_present(dev))
+ __cp_get_stats(cp);
spin_unlock_irqrestore(&cp->lock, flags);
return &dev->stats;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 1e5a453dea14..f0608f050050 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -11,7 +11,7 @@
-----<snip>-----
- Written 1997-2001 by Donald Becker.
+ Written 1997-2001 by Donald Becker.
This software may be used and distributed according to the
terms of the GNU General Public License (GPL), incorporated
herein by reference. Drivers based on or derived from this
@@ -548,8 +548,8 @@ static const struct {
{ "RTL-8100",
HW_REVID(1, 1, 1, 1, 0, 1, 0),
- HasLWake,
- },
+ HasLWake,
+ },
{ "RTL-8100B/8139D",
HW_REVID(1, 1, 1, 0, 1, 0, 1),
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 9e3b35c97e63..b6c849b258a0 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -497,8 +497,8 @@ static void write_packet(long ioaddr, int length, unsigned char *packet, int pad
{
if (length & 1)
{
- length++;
- pad_len++;
+ length++;
+ pad_len++;
}
outb(EOC+MAR, ioaddr + PAR_DATA);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 2c89cde7da1e..f744557c33a3 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -34,8 +34,6 @@
#include "r8169.h"
#include "r8169_firmware.h"
-#define MODULENAME "r8169"
-
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
@@ -1454,7 +1452,7 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (rtl_fw)
@@ -1671,7 +1669,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch(stringset) {
case ETH_SS_STATS:
- memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
+ memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
break;
}
}
@@ -3510,7 +3508,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
rtl_pcie_state_l2l3_disable(tp);
- rtl_hw_aspm_clkreq_enable(tp, true);
}
DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
@@ -4117,6 +4114,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
padto = max_t(unsigned int, padto, ETH_ZLEN);
+ break;
default:
break;
}
@@ -5305,7 +5303,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME);
+ rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
if (rc < 0) {
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
return rc;
@@ -5440,7 +5438,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver rtl8169_pci_driver = {
- .name = MODULENAME,
+ .name = KBUILD_MODNAME,
.id_table = rtl8169_pci_tbl,
.probe = rtl_init_one,
.remove = rtl_remove_one,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4afff320dfd0..69c50f81e1cb 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2047,13 +2047,6 @@ static int ravb_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* Get base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
-
ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
NUM_TX_QUEUE, NUM_RX_QUEUE);
if (!ndev)
@@ -2065,9 +2058,6 @@ static int ravb_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- /* The Ether-specific entries in the device structure. */
- ndev->base_addr = res->start;
-
chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
if (chip_id == RCAR_GEN3)
@@ -2089,12 +2079,15 @@ static int ravb_probe(struct platform_device *pdev)
priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
- priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
goto out_release;
}
+ /* The Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+
spin_lock_init(&priv->lock);
INIT_WORK(&priv->work, ravb_tx_timeout_work);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c5b154868c1f..840478692a37 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *sh_eth_gstrings_stats,
+ memcpy(data, sh_eth_gstrings_stats,
sizeof(sh_eth_gstrings_stats));
break;
}
@@ -3225,9 +3225,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
int ret;
- /* get base addr */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
if (!ndev)
return -ENOMEM;
@@ -3245,7 +3242,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp = netdev_priv(ndev);
mdp->num_tx_ring = TX_RING_SIZE;
mdp->num_rx_ring = RX_RING_SIZE;
- mdp->addr = devm_ioremap_resource(&pdev->dev, res);
+ mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mdp->addr)) {
ret = PTR_ERR(mdp->addr);
goto out_release;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 971f1e54b652..090bcd2fb758 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -789,7 +789,7 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
}
/**
- * sxgbe_tx_clean:
+ * sxgbe_tx_all_clean:
* @priv: driver private structure
* Description: it reclaims resources after transmission completes.
*/
@@ -1015,7 +1015,7 @@ static void sxgbe_tx_timer(struct timer_list *t)
}
/**
- * sxgbe_init_tx_coalesce: init tx mitigation options.
+ * sxgbe_tx_init_coalesce: init tx mitigation options.
* @priv: driver private structure
* Description:
* This inits the transmit coalesce parameters: i.e. timer rate,
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 65c98837ec45..16a4cbae9326 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -617,7 +617,7 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
break;
}
/*
- * ignore our own packets...
+ * ignore our own packets...
*/
if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) &&
!(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) {
@@ -672,7 +672,7 @@ done:
*/
if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
dev->stats.rx_dropped++;
- ether3_outw(next_ptr, REG_RECVPTR);
+ ether3_outw(next_ptr, REG_RECVPTR);
ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
}
@@ -690,11 +690,11 @@ static void ether3_tx(struct net_device *dev)
do {
unsigned long status;
- /*
+ /*
* Read the packet header
- */
+ */
ether3_setbuffer(dev, buffer_read, tx_tail * 0x600);
- status = ether3_readlong(dev);
+ status = ether3_readlong(dev);
/*
* Check to see if this packet has been transmitted
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index c3f35da1b82a..e7e2223aebbf 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -370,9 +370,9 @@ static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
return 0;
}
-static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t link_control_flag_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct efx_nic *efx = dev_get_drvdata(dev);
@@ -382,9 +382,9 @@ static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
? 1 : 0);
}
-static ssize_t efx_ef10_show_primary_flag(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t primary_flag_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct efx_nic *efx = dev_get_drvdata(dev);
@@ -519,9 +519,8 @@ static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
mutex_unlock(&nic_data->vlan_lock);
}
-static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
- NULL);
-static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
+static DEVICE_ATTR_RO(link_control_flag);
+static DEVICE_ATTR_RO(primary_flag);
static int efx_ef10_probe(struct efx_nic *efx)
{
@@ -1070,7 +1069,8 @@ static int efx_ef10_probe_vf(struct efx_nic *efx)
/* If the parent PF has no VF data structure, it doesn't know about this
* VF so fail probe. The VF needs to be re-created. This can happen
- * if the PF driver is unloaded while the VF is assigned to a guest.
+ * if the PF driver was unloaded while any VF was assigned to a guest
+ * (using Xen, only).
*/
pci_dev_pf = efx->pci_dev->physfn;
if (pci_dev_pf) {
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 21fa6c0e8873..752d6406f07e 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -122,8 +122,7 @@ static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
struct ef10_vf *vf = nic_data->vf + i;
/* If VF is assigned, do not free the vport */
- if (vf->pci_dev &&
- vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ if (vf->pci_dev && pci_is_dev_assigned(vf->pci_dev))
continue;
if (vf->vport_assigned) {
@@ -207,9 +206,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
return 0;
fail:
- efx_ef10_sriov_free_vf_vports(efx);
- kfree(nic_data->vf);
- nic_data->vf = NULL;
+ efx_ef10_sriov_free_vf_vswitching(efx);
return rc;
}
@@ -402,12 +399,17 @@ fail1:
return rc;
}
+/* Disable SRIOV and remove VFs
+ * If some VFs are attached to a guest (using Xen, only) nothing is
+ * done if force=false, and vports are freed if force=true (for the non
+ * attachedc ones, only) but SRIOV is not disabled and VFs are not
+ * removed in either case.
+ */
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
- unsigned int vfs_assigned = 0;
-
- vfs_assigned = pci_vfs_assigned(dev);
+ unsigned int vfs_assigned = pci_vfs_assigned(dev);
+ int rc = 0;
if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@@ -417,10 +419,12 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
if (!vfs_assigned)
pci_disable_sriov(dev);
+ else
+ rc = -EBUSY;
efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;
- return 0;
+ return rc;
}
int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
@@ -439,24 +443,18 @@ int efx_ef10_sriov_init(struct efx_nic *efx)
void efx_ef10_sriov_fini(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- unsigned int i;
int rc;
if (!nic_data->vf) {
- /* Remove any un-assigned orphaned VFs */
+ /* Remove any un-assigned orphaned VFs. This can happen if the PF driver
+ * was unloaded while any VF was assigned to a guest (using Xen, only).
+ */
if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev))
pci_disable_sriov(efx->pci_dev);
return;
}
- /* Remove any VFs in the host */
- for (i = 0; i < efx->vf_count; ++i) {
- struct efx_nic *vf_efx = nic_data->vf[i].efx;
-
- if (vf_efx)
- vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
- }
-
+ /* Disable SRIOV and remove any VFs in the host */
rc = efx_ef10_pci_sriov_disable(efx, true);
if (rc)
netif_dbg(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index c746ca7235f1..37fcf2eb0741 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -689,13 +689,13 @@ static struct notifier_block efx_netdev_notifier = {
.notifier_call = efx_netdev_event,
};
-static ssize_t
-show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t phy_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
-static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+static DEVICE_ATTR_RO(phy_type);
static int efx_register_netdev(struct efx_nic *efx)
{
@@ -722,8 +722,7 @@ static int efx_register_netdev(struct efx_nic *efx)
efx->state = STATE_READY;
smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
- netif_err(efx, probe, efx->net_dev,
- "aborting probe due to scheduled reset\n");
+ pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
rc = -EIO;
goto fail_locked;
}
@@ -990,8 +989,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx->type->init(efx);
up_write(&efx->filter_sem);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to initialise NIC\n");
+ pci_err(efx->pci_dev, "failed to initialise NIC\n");
goto fail3;
}
@@ -1038,8 +1036,8 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
if (efx->type->sriov_init) {
rc = efx->type->sriov_init(efx);
if (rc)
- netif_err(efx, probe, efx->net_dev,
- "SR-IOV can't be enabled rc %d\n", rc);
+ pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n",
+ rc);
}
/* Determine netdevice features */
@@ -1106,8 +1104,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail1;
- netif_info(efx, probe, efx->net_dev,
- "Solarflare NIC detected\n");
+ pci_info(pci_dev, "Solarflare NIC detected\n");
if (!efx->type->is_vf)
efx_probe_vpd_strings(efx);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index de797e1ac5a9..896b59253197 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -1160,8 +1160,9 @@ void efx_fini_io(struct efx_nic *efx)
}
#ifdef CONFIG_SFC_MCDI_LOGGING
-static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t mcdi_logging_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@@ -1169,8 +1170,9 @@ static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
}
-static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t mcdi_logging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@@ -1180,7 +1182,7 @@ static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+static DEVICE_ATTR_RW(mcdi_logging);
void efx_init_mcdi_logging(struct efx_nic *efx)
{
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 5e7a57b680ca..9ec752a43c75 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2254,12 +2254,12 @@ static struct notifier_block ef4_netdev_notifier = {
};
static ssize_t
-show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
+phy_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ef4_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
-static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+static DEVICE_ATTR_RO(phy_type);
static int ef4_register_netdev(struct ef4_nic *efx)
{
diff --git a/drivers/net/ethernet/sfc/falcon/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
index 729a05c1b0cf..2d2d8099011e 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
@@ -354,16 +354,16 @@ fail_on:
return rc;
}
-static ssize_t show_phy_flash_cfg(struct device *dev,
+static ssize_t phy_flash_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ef4_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
-static ssize_t set_phy_flash_cfg(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t phy_flash_cfg_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ef4_nic *efx = dev_get_drvdata(dev);
enum ef4_phy_mode old_mode, new_mode;
@@ -396,7 +396,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
return err ? err : count;
}
-static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
+static DEVICE_ATTR_RW(phy_flash_cfg);
static void sfe4001_fini(struct ef4_nic *efx)
{
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 49df02ecee91..148dcd48b58d 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1668,13 +1668,17 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
*/
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
- unsigned vi_count, buftbl_min, total_tx_channels;
-
+ unsigned vi_count, total_tx_channels;
#ifdef CONFIG_SFC_SRIOV
- struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_nic_data *nic_data;
+ unsigned buftbl_min;
#endif
total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
+ vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
+
+#ifdef CONFIG_SFC_SRIOV
+ nic_data = efx->nic_data;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
@@ -1682,9 +1686,6 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
-
-#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
if (efx->type->sriov_wanted(efx)) {
unsigned vi_dc_entries, buftbl_free;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index d1e908846f5d..22fbb0ae77fb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -90,6 +90,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx->pci_dev->irq);
goto fail1;
}
+ efx->irqs_hooked = true;
return 0;
}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 17b8119c48e5..606750938b89 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -260,18 +260,14 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
s16 offset;
int err;
- rcu_read_lock();
- xdp_prog = rcu_dereference(efx->xdp_prog);
- if (!xdp_prog) {
- rcu_read_unlock();
+ xdp_prog = rcu_dereference_bh(efx->xdp_prog);
+ if (!xdp_prog)
return true;
- }
rx_queue = efx_channel_get_rx_queue(channel);
if (unlikely(channel->rx_pkt_n_frags > 1)) {
/* We can't do XDP on fragmented packets - drop. */
- rcu_read_unlock();
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
if (net_ratelimit())
@@ -296,7 +292,6 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
rx_buf->len, false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
- rcu_read_unlock();
offset = (u8 *)xdp.data - *ehp;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 6eef0f45b133..2b29fd4cbdf4 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -835,6 +835,10 @@ static int ioc3eth_probe(struct platform_device *pdev)
int err;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
/* get mac addr from one wire prom */
if (ioc3eth_get_mac_addr(regs, mac_addr))
return -EPROBE_DEFER; /* not available yet */
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 620c26f71be8..ca9c00b7f588 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -678,12 +678,12 @@ static int sis900_mii_probe(struct net_device *net_dev)
/* Reset phy if default phy is internal sis900 */
if ((sis_priv->mii->phy_id0 == 0x001D) &&
((sis_priv->mii->phy_id1&0xFFF0) == 0x8000))
- status = sis900_reset_phy(net_dev, sis_priv->cur_phy);
+ status = sis900_reset_phy(net_dev, sis_priv->cur_phy);
/* workaround for ICS1893 PHY */
if ((sis_priv->mii->phy_id0 == 0x0015) &&
((sis_priv->mii->phy_id1&0xFFF0) == 0xF440))
- mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
+ mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
if(status & MII_STAT_LINK){
while (poll_bit) {
@@ -727,7 +727,7 @@ static int sis900_mii_probe(struct net_device *net_dev)
static u16 sis900_default_phy(struct net_device * net_dev)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
- struct mii_phy *phy = NULL, *phy_home = NULL,
+ struct mii_phy *phy = NULL, *phy_home = NULL,
*default_phy = NULL, *phy_lan = NULL;
u16 status;
@@ -1339,18 +1339,18 @@ static void sis900_timer(struct timer_list *t)
} else {
/* Link ON -> OFF */
if (!(status & MII_STAT_LINK)){
- netif_carrier_off(net_dev);
+ netif_carrier_off(net_dev);
if(netif_msg_link(sis_priv))
- printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
- /* Change mode issue */
- if ((mii_phy->phy_id0 == 0x001D) &&
- ((mii_phy->phy_id1 & 0xFFF0) == 0x8000))
- sis900_reset_phy(net_dev, sis_priv->cur_phy);
+ /* Change mode issue */
+ if ((mii_phy->phy_id0 == 0x001D) &&
+ ((mii_phy->phy_id1 & 0xFFF0) == 0x8000))
+ sis900_reset_phy(net_dev, sis_priv->cur_phy);
sis630_set_eq(net_dev, sis_priv->chipset_rev);
- goto LookForLink;
+ goto LookForLink;
}
}
@@ -2331,7 +2331,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
case IF_PORT_10BASE2: /* 10Base2 */
case IF_PORT_AUI: /* AUI */
case IF_PORT_100BASEFX: /* 100BaseFx */
- /* These Modes are not supported (are they?)*/
+ /* These Modes are not supported (are they?)*/
return -EOPNOTSUPP;
default:
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 4b2330deed47..bf7c8c8b1350 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -182,8 +182,8 @@ struct smc_local {
struct sk_buff * saved_skb;
/*
- . This keeps track of how many packets that I have
- . sent out. When an TX_EMPTY interrupt comes, I know
+ . This keeps track of how many packets that I have
+ . sent out. When an TX_EMPTY interrupt comes, I know
. that all of these have been sent.
*/
int packets_waiting;
@@ -343,7 +343,7 @@ static void smc_reset( int ioaddr )
/* Note: It doesn't seem that waiting for the MMU busy is needed here,
but this is a place where future chipsets _COULD_ break. Be wary
- of issuing another MMU command right after this */
+ of issuing another MMU command right after this */
outb( 0, ioaddr + INT_MASK );
}
@@ -521,9 +521,9 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
SMC_SELECT_BANK( 2 );
outw( MC_ALLOC | numPages, ioaddr + MMU_CMD );
/*
- . Performance Hack
+ . Performance Hack
.
- . wait a short amount of time.. if I can send a packet now, I send
+ . wait a short amount of time.. if I can send a packet now, I send
. it now. Otherwise, I enable an interrupt and wait for one to be
. available.
.
@@ -540,17 +540,17 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
if ( status & IM_ALLOC_INT ) {
/* acknowledge the interrupt */
outb( IM_ALLOC_INT, ioaddr + INTERRUPT );
- break;
+ break;
}
- } while ( -- time_out );
+ } while ( -- time_out );
- if ( !time_out ) {
+ if ( !time_out ) {
/* oh well, wait until the chip finds memory later */
SMC_ENABLE_INT( IM_ALLOC_INT );
PRINTK2((CARDNAME": memory allocation deferred.\n"));
/* it's deferred, but I'll handle it later */
return NETDEV_TX_OK;
- }
+ }
/* or YES! I can send the packet now.. */
smc_hardware_send_packet(dev);
netif_wake_queue(dev);
@@ -616,7 +616,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
#endif
/* send the packet length ( +6 for status, length and ctl byte )
- and the status word ( set to zeros ) */
+ and the status word ( set to zeros ) */
#ifdef USE_32_BIT
outl( (length +6 ) << 16 , ioaddr + DATA_1 );
#else
@@ -629,8 +629,8 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* send the actual data
. I _think_ it's faster to send the longs first, and then
. mop up by sending the last word. It depends heavily
- . on alignment, at least on the 486. Maybe it would be
- . a good idea to check which is optimal? But that could take
+ . on alignment, at least on the 486. Maybe it would be
+ . a good idea to check which is optimal? But that could take
. almost as much time as is saved?
*/
#ifdef USE_32_BIT
@@ -757,7 +757,7 @@ static int __init smc_findirq(int ioaddr)
outb( IM_ALLOC_INT, ioaddr + INT_MASK );
/*
- . Allocate 512 bytes of memory. Note that the chip was just
+ . Allocate 512 bytes of memory. Note that the chip was just
. reset so all the memory is available
*/
outw( MC_ALLOC | 1, ioaddr + MMU_CMD );
@@ -871,7 +871,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
goto err_out;
}
/* The above MIGHT indicate a device, but I need to write to further
- test this. */
+ test this. */
outw( 0x0, ioaddr + BANK_SELECT );
bank = inw( ioaddr + BANK_SELECT );
if ( (bank & 0xFF00 ) != 0x3300 ) {
@@ -879,7 +879,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
goto err_out;
}
/* well, we've already written once, so hopefully another time won't
- hurt. This time, I need to switch the bank register to bank 1,
+ hurt. This time, I need to switch the bank register to bank 1,
so I can access the base address register */
SMC_SELECT_BANK(1);
base_address_register = inw( ioaddr + BASE );
@@ -917,7 +917,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr;
/*
- . Get the MAC address ( bank 1, regs 4 - 9 )
+ . Get the MAC address ( bank 1, regs 4 - 9 )
*/
SMC_SELECT_BANK( 1 );
for ( i = 0; i < 6; i += 2 ) {
@@ -938,8 +938,8 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
/*
Now, I want to find out more about the chip. This is sort of
- redundant, but it's cleaner to have it in both, rather than having
- one VERY long probe procedure.
+ redundant, but it's cleaner to have it in both, rather than having
+ one VERY long probe procedure.
*/
SMC_SELECT_BANK(3);
revision_register = inw( ioaddr + REVISION );
@@ -967,7 +967,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
/*
. If dev->irq is 0, then the device has to be banged on to see
. what the IRQ is.
- .
+ .
. This banging doesn't always detect the IRQ, for unknown reasons.
. a workaround is to reset the chip and try again.
.
@@ -978,7 +978,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
.
. Specifying an IRQ is done with the assumption that the user knows
. what (s)he is doing. No checking is done!!!!
- .
+ .
*/
if ( dev->irq < 2 ) {
int trials;
@@ -1070,7 +1070,7 @@ static int smc_open(struct net_device *dev)
}
/*
- According to Becker, I have to set the hardware address
+ According to Becker, I have to set the hardware address
at this point, because the (l)user can set it with an
ioctl. Easily done...
*/
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index cbde83f620a0..813ea941b91a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -671,19 +671,19 @@ smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
status = SMC_GET_INT(lp);
if (status & IM_ALLOC_INT) {
SMC_ACK_INT(lp, IM_ALLOC_INT);
- break;
+ break;
}
- } while (--poll_count);
+ } while (--poll_count);
smc_special_unlock(&lp->lock, flags);
lp->pending_tx_skb = skb;
- if (!poll_count) {
+ if (!poll_count) {
/* oh well, wait until the chip finds memory later */
netif_stop_queue(dev);
DBG(2, dev, "TX memory allocation deferred.\n");
SMC_ENABLE_INT(lp, IM_ALLOC_INT);
- } else {
+ } else {
/*
* Allocation succeeded: push packet to the chip's own memory
* immediately.
@@ -1790,7 +1790,7 @@ static int smc_findirq(struct smc_local *lp)
SMC_SET_INT_MASK(lp, IM_ALLOC_INT);
/*
- * Allocate 512 bytes of memory. Note that the chip was just
+ * Allocate 512 bytes of memory. Note that the chip was just
* reset so all the memory is available
*/
SMC_SET_MMU_CMD(lp, MC_ALLOC | 1);
@@ -1998,8 +1998,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
/* Grab the IRQ */
retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev);
- if (retval)
- goto err_out;
+ if (retval)
+ goto err_out;
#ifdef CONFIG_ARCH_PXA
# ifdef SMC_USE_PXA_DMA
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index dfc85cc68173..20d148c019d8 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -958,7 +958,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
- rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
dma_dir = page_pool_get_dma_dir(dring->page_pool);
@@ -1069,8 +1068,6 @@ next:
}
netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
- rcu_read_unlock();
-
return done;
}
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index fcbb4bb31408..5eb6bb4f7b6c 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* sni_ave.c - Socionext UniPhier AVE ethernet driver
* Copyright 2014 Panasonic Corporation
* Copyright 2015-2017 Socionext Inc.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 7737e4d0bb9e..ac3c248d4f9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -66,6 +66,18 @@ config DWMAC_ANARION
This selects the Anarion SoC glue layer support for the stmmac driver.
+config DWMAC_INGENIC
+ tristate "Ingenic MAC support"
+ default MACH_INGENIC
+ depends on OF && HAS_IOMEM && (MACH_INGENIC || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on Ingenic SoCs.
+
+ This selects Ingenic SoCs glue layer support for the stmmac
+ device driver. This driver is used on for the Ingenic SoCs
+ MAC ethernet controller.
+
config DWMAC_IPQ806X
tristate "QCA IPQ806x DWMAC support"
default ARCH_QCOM
@@ -238,6 +250,15 @@ config DWMAC_INTEL
This selects the Intel platform specific bus support for the
stmmac driver. This driver is used for Intel Quark/EHL/TGL.
+config DWMAC_LOONGSON
+ tristate "Loongson PCI DWMAC support"
+ default MACH_LOONGSON64
+ depends on STMMAC_ETH && PCI
+ depends on COMMON_CLK
+ help
+ This selects the LOONGSON PCI bus support for the stmmac driver,
+ Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
+
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index f2e478b884b0..d4e12e9ace4f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -14,6 +14,7 @@ stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
+obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
@@ -36,4 +37,5 @@ dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
+obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 619e3c0760d6..5fecc83f175b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -503,8 +503,7 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
- const struct mdio_xpcs_ops *xpcs;
- struct mdio_xpcs_args xpcs_args;
+ struct dw_xpcs *xpcs;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
new file mode 100644
index 000000000000..9a6d819b84ae
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwmac-ingenic.c - Ingenic SoCs DWMAC specific glue layer
+ *
+ * Copyright (c) 2021 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define MACPHYC_TXCLK_SEL_MASK GENMASK(31, 31)
+#define MACPHYC_TXCLK_SEL_OUTPUT 0x1
+#define MACPHYC_TXCLK_SEL_INPUT 0x0
+#define MACPHYC_MODE_SEL_MASK GENMASK(31, 31)
+#define MACPHYC_MODE_SEL_RMII 0x0
+#define MACPHYC_TX_SEL_MASK GENMASK(19, 19)
+#define MACPHYC_TX_SEL_ORIGIN 0x0
+#define MACPHYC_TX_SEL_DELAY 0x1
+#define MACPHYC_TX_DELAY_MASK GENMASK(18, 12)
+#define MACPHYC_RX_SEL_MASK GENMASK(11, 11)
+#define MACPHYC_RX_SEL_ORIGIN 0x0
+#define MACPHYC_RX_SEL_DELAY 0x1
+#define MACPHYC_RX_DELAY_MASK GENMASK(10, 4)
+#define MACPHYC_SOFT_RST_MASK GENMASK(3, 3)
+#define MACPHYC_PHY_INFT_MASK GENMASK(2, 0)
+#define MACPHYC_PHY_INFT_RMII 0x4
+#define MACPHYC_PHY_INFT_RGMII 0x1
+#define MACPHYC_PHY_INFT_GMII 0x0
+#define MACPHYC_PHY_INFT_MII 0x0
+
+#define MACPHYC_TX_DELAY_PS_MAX 2496
+#define MACPHYC_TX_DELAY_PS_MIN 20
+
+#define MACPHYC_RX_DELAY_PS_MAX 2496
+#define MACPHYC_RX_DELAY_PS_MIN 20
+
+enum ingenic_mac_version {
+ ID_JZ4775,
+ ID_X1000,
+ ID_X1600,
+ ID_X1830,
+ ID_X2000,
+};
+
+struct ingenic_mac {
+ const struct ingenic_soc_info *soc_info;
+ struct device *dev;
+ struct regmap *regmap;
+
+ int rx_delay;
+ int tx_delay;
+};
+
+struct ingenic_soc_info {
+ enum ingenic_mac_version version;
+ u32 mask;
+
+ int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
+};
+
+static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ int ret;
+
+ if (mac->soc_info->set_mode) {
+ ret = mac->soc_info->set_mode(plat_dat);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_MII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_GMII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_GMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_GMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, 0);
+}
+
+static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
+ FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
+
+ if (mac->tx_delay == 0)
+ val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN);
+ else
+ val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_DELAY) |
+ FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1);
+
+ if (mac->rx_delay == 0)
+ val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN);
+ else
+ val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_DELAY) |
+ FIELD_PREP(MACPHYC_RX_DELAY_MASK, (mac->rx_delay + 9750) / 19500 - 1);
+
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int ingenic_mac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct ingenic_mac *mac;
+ const struct ingenic_soc_info *data;
+ u32 tx_delay_ps, rx_delay_ps;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ mac = devm_kzalloc(&pdev->dev, sizeof(*mac), GFP_KERNEL);
+ if (!mac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No of match data provided\n");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ /* Get MAC PHY control register */
+ mac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mode-reg");
+ if (IS_ERR(mac->regmap)) {
+ dev_err(&pdev->dev, "%s: Failed to get syscon regmap\n", __func__);
+ ret = PTR_ERR(mac->regmap);
+ goto err_remove_config_dt;
+ }
+
+ if (!of_property_read_u32(pdev->dev.of_node, "tx-clk-delay-ps", &tx_delay_ps)) {
+ if (tx_delay_ps >= MACPHYC_TX_DELAY_PS_MIN &&
+ tx_delay_ps <= MACPHYC_TX_DELAY_PS_MAX) {
+ mac->tx_delay = tx_delay_ps * 1000;
+ } else {
+ dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(pdev->dev.of_node, "rx-clk-delay-ps", &rx_delay_ps)) {
+ if (rx_delay_ps >= MACPHYC_RX_DELAY_PS_MIN &&
+ rx_delay_ps <= MACPHYC_RX_DELAY_PS_MAX) {
+ mac->rx_delay = rx_delay_ps * 1000;
+ } else {
+ dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ mac->soc_info = data;
+ mac->dev = &pdev->dev;
+
+ plat_dat->bsp_priv = mac;
+
+ ret = ingenic_mac_init(plat_dat);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ingenic_mac_suspend(struct device *dev)
+{
+ int ret;
+
+ ret = stmmac_suspend(dev);
+
+ return ret;
+}
+
+static int ingenic_mac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = ingenic_mac_init(priv->plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_resume(dev);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume);
+
+static struct ingenic_soc_info jz4775_soc_info = {
+ .version = ID_JZ4775,
+ .mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = jz4775_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1000_soc_info = {
+ .version = ID_X1000,
+ .mask = MACPHYC_SOFT_RST_MASK,
+
+ .set_mode = x1000_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1600_soc_info = {
+ .version = ID_X1600,
+ .mask = MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x1600_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1830_soc_info = {
+ .version = ID_X1830,
+ .mask = MACPHYC_MODE_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x1830_mac_set_mode,
+};
+
+static struct ingenic_soc_info x2000_soc_info = {
+ .version = ID_X2000,
+ .mask = MACPHYC_TX_SEL_MASK | MACPHYC_TX_DELAY_MASK | MACPHYC_RX_SEL_MASK |
+ MACPHYC_RX_DELAY_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x2000_mac_set_mode,
+};
+
+static const struct of_device_id ingenic_mac_of_matches[] = {
+ { .compatible = "ingenic,jz4775-mac", .data = &jz4775_soc_info },
+ { .compatible = "ingenic,x1000-mac", .data = &x1000_soc_info },
+ { .compatible = "ingenic,x1600-mac", .data = &x1600_soc_info },
+ { .compatible = "ingenic,x1830-mac", .data = &x1830_soc_info },
+ { .compatible = "ingenic,x2000-mac", .data = &x2000_soc_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
+
+static struct platform_driver ingenic_mac_driver = {
+ .probe = ingenic_mac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "ingenic-mac",
+ .pm = pm_ptr(&ingenic_mac_pm_ops),
+ .of_match_table = ingenic_mac_of_matches,
+ },
+};
+module_platform_driver(ingenic_mac_driver);
+
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_DESCRIPTION("Ingenic SoCs DWMAC specific glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 80728a4c0e3f..8e8778cfbbad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -10,22 +10,6 @@
#include "stmmac.h"
#include "stmmac_ptp.h"
-#define INTEL_MGBE_ADHOC_ADDR 0x15
-#define INTEL_MGBE_XPCS_ADDR 0x16
-
-/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
-#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3)
-#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
-#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3)
-#define PSE_PTP_CLK_FREQ_256MHZ (0)
-#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0)
-#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
-#define PCH_PTP_CLK_FREQ_200MHZ (0)
-
-/* Cross-timestamping defines */
-#define ART_CPUID_LEAF 0x15
-#define EHL_PSE_ART_MHZ 19200000
-
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
unsigned long crossts_adj;
@@ -102,6 +86,22 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+ /* Set the serdes rate and the PCLK rate */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data &= ~SERDES_RATE_MASK;
+ data &= ~SERDES_PCLK_MASK;
+
+ if (priv->plat->max_speed == 2500)
+ data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
+ SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
+ else
+ data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
+ SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
+
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
/* assert clk_req */
data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data |= SERDES_PLL_CLK;
@@ -230,6 +230,32 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
}
+static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int serdes_phy_addr = 0;
+ u32 data = 0;
+
+ serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+
+ /* Determine the link speed mode: 2.5Gbps/1Gbps */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR);
+
+ if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
+ SERDES_LINK_MODE_2G5) {
+ dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
+ priv->plat->max_speed = 2500;
+ priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
+ priv->plat->mdio_bus_data->xpcs_an_inband = false;
+ } else {
+ priv->plat->max_speed = 1000;
+ priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ priv->plat->mdio_bus_data->xpcs_an_inband = true;
+ }
+}
+
/* Program PTP Clock Frequency for different variant of
* Intel mGBE that has slightly different GPO mapping
*/
@@ -429,6 +455,17 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->force_sf_dma_mode = 0;
plat->tso_en = 1;
+ /* Multiplying factor to the clk_eee_i clock time
+ * period to make it closer to 100 ns. This value
+ * should be programmed such that the clk_eee_time_period *
+ * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
+ * clk_eee frequency is 19.2Mhz
+ * clk_eee_time_period is 52ns
+ * 52ns * (1 + 1) = 104ns
+ * MULT_FACT_100NS = 1
+ */
+ plat->mult_fact_100ns = 1;
+
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
for (i = 0; i < plat->rx_queues_to_use; i++) {
@@ -556,6 +593,17 @@ static int ehl_common_data(struct pci_dev *pdev,
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->clk_ptp_rate = 200000000;
+ plat->use_phy_wol = 1;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 1;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
return intel_mgbe_common_data(pdev, plat);
}
@@ -565,7 +613,7 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
-
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
@@ -618,6 +666,7 @@ static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return ehl_pse0_common_data(pdev, plat);
@@ -656,6 +705,7 @@ static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return ehl_pse1_common_data(pdev, plat);
@@ -672,6 +722,16 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 200000000;
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
return intel_mgbe_common_data(pdev, plat);
}
@@ -680,6 +740,7 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -694,6 +755,7 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
{
plat->bus_id = 2;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -948,6 +1010,12 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (!plat->dma_cfg)
return -ENOMEM;
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
/* Enable pci device */
ret = pcim_enable_device(pdev);
if (ret) {
@@ -1020,7 +1088,7 @@ err_alloc_irq:
/**
* intel_eth_pci_remove
*
- * @pdev: platform device pointer
+ * @pdev: pci device pointer
* Description: this function calls the main to free the net resources
* and releases the PCI resources.
*/
@@ -1050,6 +1118,7 @@ static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
return ret;
pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index 542acb8ce467..0a37987478c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -9,6 +9,7 @@
#define POLL_DELAY_US 8
/* SERDES Register */
+#define SERDES_GCR 0x0 /* Global Conguration */
#define SERDES_GSR0 0x5 /* Global Status Reg0 */
#define SERDES_GCR0 0xb /* Global Configuration Reg0 */
@@ -17,8 +18,36 @@
#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */
#define SERDES_RST BIT(2) /* Serdes Reset */
#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/
+#define SERDES_RATE_MASK GENMASK(9, 8)
+#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */
+#define SERDES_LINK_MODE_MASK GENMASK(2, 1)
+#define SERDES_LINK_MODE_SHIFT 1
#define SERDES_PWR_ST_SHIFT 4
#define SERDES_PWR_ST_P0 0x0
#define SERDES_PWR_ST_P3 0x3
+#define SERDES_LINK_MODE_2G5 0x3
+#define SERSED_LINK_MODE_1G 0x2
+#define SERDES_PCLK_37p5MHZ 0x0
+#define SERDES_PCLK_70MHZ 0x1
+#define SERDES_RATE_PCIE_GEN1 0x0
+#define SERDES_RATE_PCIE_GEN2 0x1
+#define SERDES_RATE_PCIE_SHIFT 8
+#define SERDES_PCLK_SHIFT 12
+
+#define INTEL_MGBE_ADHOC_ADDR 0x15
+#define INTEL_MGBE_XPCS_ADDR 0x16
+
+/* Cross-timestamping defines */
+#define ART_CPUID_LEAF 0x15
+#define EHL_PSE_ART_MHZ 19200000
+
+/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
+#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3)
+#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
+#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3)
+#define PSE_PTP_CLK_FREQ_256MHZ (0)
+#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0)
+#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
+#define PCH_PTP_CLK_FREQ_200MHZ (0)
#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
new file mode 100644
index 000000000000..e108b0d2bd28
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Loongson Corporation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/of_irq.h>
+#include "stmmac.h"
+
+static int loongson_default_data(struct plat_stmmacenet_data *plat)
+{
+ plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[0].use_prio = false;
+ plat->rx_queues_cfg[0].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
+
+ /* Default to phy auto-detection */
+ plat->phy_addr = -1;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+
+ plat->multicast_filter_bins = 256;
+ return 0;
+}
+
+static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res;
+ bool mdio = false;
+ int ret, i;
+ struct device_node *np;
+
+ np = dev_of_node(&pdev->dev);
+
+ if (!np) {
+ pr_info("dwmac_loongson_pci: No OF node\n");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
+ pr_info("dwmac_loongson_pci: Incompatible OF node\n");
+ return -ENODEV;
+ }
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ if (plat->mdio_node) {
+ dev_err(&pdev->dev, "Found MDIO subnode\n");
+ mdio = true;
+ }
+
+ if (mdio) {
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
+ return ret;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+ break;
+ }
+
+ plat->bus_id = of_alias_get_id(np, "ethernet");
+ if (plat->bus_id < 0)
+ plat->bus_id = pci_dev_id(pdev);
+
+ plat->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (plat->phy_interface < 0)
+ dev_err(&pdev->dev, "phy_mode not found\n");
+
+ plat->interface = PHY_INTERFACE_MODE_GMII;
+
+ pci_set_master(pdev);
+
+ loongson_default_data(plat);
+ pci_enable_msi(pdev);
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[0];
+
+ res.irq = of_irq_get_byname(np, "macirq");
+ if (res.irq < 0) {
+ dev_err(&pdev->dev, "IRQ macirq not found\n");
+ ret = -ENODEV;
+ }
+
+ res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
+ if (res.wol_irq < 0) {
+ dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n");
+ res.wol_irq = res.irq;
+ }
+
+ res.lpi_irq = of_irq_get_byname(np, "eth_lpi");
+ if (res.lpi_irq < 0) {
+ dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
+ ret = -ENODEV;
+ }
+
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
+}
+
+static void loongson_dwmac_remove(struct pci_dev *pdev)
+{
+ int i;
+
+ stmmac_dvr_remove(&pdev->dev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+
+ pci_disable_device(pdev);
+}
+
+static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int __maybe_unused loongson_dwmac_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
+ loongson_dwmac_resume);
+
+static const struct pci_device_id loongson_dwmac_id_table[] = {
+ { PCI_VDEVICE(LOONGSON, 0x7a03) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
+
+struct pci_driver loongson_dwmac_driver = {
+ .name = "dwmac-loongson-pci",
+ .id_table = loongson_dwmac_id_table,
+ .probe = loongson_dwmac_probe,
+ .remove = loongson_dwmac_remove,
+ .driver = {
+ .pm = &loongson_dwmac_pm_ops,
+ },
+};
+
+module_pci_driver(loongson_dwmac_driver);
+
+MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
+MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 84382fc5cc4d..5c74b6279d69 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -454,7 +454,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
const struct ethqos_emac_driver_data *data;
struct qcom_ethqos *ethqos;
- struct resource *res;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -474,8 +473,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
}
ethqos->pdev = pdev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii");
- ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res);
+ ethqos->rgmii_base = devm_platform_ioremap_resource_byname(pdev, "rgmii");
if (IS_ERR(ethqos->rgmii_base)) {
ret = PTR_ERR(ethqos->rgmii_base);
goto err_mem;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 8d28a536e1bb..280ac0129572 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,11 +33,13 @@ struct rk_gmac_ops {
void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ u32 regs[];
};
struct rk_priv_data {
struct platform_device *pdev;
phy_interface_t phy_iface;
+ int id;
struct regulator *regulator;
bool suspended;
const struct rk_gmac_ops *ops;
@@ -482,6 +484,54 @@ static const struct rk_gmac_ops rk3288_ops = {
.set_rmii_speed = rk3288_set_rmii_speed,
};
+#define RK3308_GRF_MAC_CON0 0x04a0
+
+/* RK3308_GRF_MAC_CON0 */
+#define RK3308_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(2) | GRF_CLR_BIT(3) | \
+ GRF_BIT(4))
+#define RK3308_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0)
+#define RK3308_GMAC_SPEED_100M GRF_BIT(0)
+
+static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
+ RK3308_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
+ RK3308_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
+ RK3308_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3308_ops = {
+ .set_to_rmii = rk3308_set_to_rmii,
+ .set_rmii_speed = rk3308_set_rmii_speed,
+};
+
#define RK3328_GRF_MAC_CON0 0x0900
#define RK3328_GRF_MAC_CON1 0x0904
#define RK3328_GRF_MAC_CON2 0x0908
@@ -948,6 +998,107 @@ static const struct rk_gmac_ops rk3399_ops = {
.set_rmii_speed = rk3399_set_rmii_speed,
};
+#define RK3568_GRF_GMAC0_CON0 0x0380
+#define RK3568_GRF_GMAC0_CON1 0x0384
+#define RK3568_GRF_GMAC1_CON0 0x0388
+#define RK3568_GRF_GMAC1_CON1 0x038c
+
+/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
+#define RK3568_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RK3568_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3568_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RK3568_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+#define RK3568_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RK3568_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+
+/* RK3568_GRF_GMAC0_CON0 && RK3568_GRF_GMAC1_CON0 */
+#define RK3568_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3568_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ u32 con0, con1;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 :
+ RK3568_GRF_GMAC0_CON0;
+ con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
+ RK3568_GRF_GMAC0_CON1;
+
+ regmap_write(bsp_priv->grf, con0,
+ RK3568_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3568_GMAC_CLK_TX_DL_CFG(tx_delay));
+
+ regmap_write(bsp_priv->grf, con1,
+ RK3568_GMAC_PHY_INTF_SEL_RGMII |
+ RK3568_GMAC_RXCLK_DLY_ENABLE |
+ RK3568_GMAC_TXCLK_DLY_ENABLE);
+}
+
+static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ u32 con1;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
+ RK3568_GRF_GMAC0_CON1;
+ regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ case 1000:
+ rate = 125000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static const struct rk_gmac_ops rk3568_ops = {
+ .set_to_rgmii = rk3568_set_to_rgmii,
+ .set_to_rmii = rk3568_set_to_rmii,
+ .set_rgmii_speed = rk3568_set_gmac_speed,
+ .set_rmii_speed = rk3568_set_gmac_speed,
+ .regs = {
+ 0xfe2a0000, /* gmac0 */
+ 0xfe010000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
@@ -1216,6 +1367,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
{
struct rk_priv_data *bsp_priv;
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
const char *strings = NULL;
int value;
@@ -1227,6 +1379,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
bsp_priv->ops = ops;
+ /* Some SoCs have multiple MAC controllers, which need
+ * to be distinguished.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res) {
+ int i = 0;
+
+ while (ops->regs[i]) {
+ if (ops->regs[i] == res->start) {
+ bsp_priv->id = i;
+ break;
+ }
+ i++;
+ }
+ }
+
bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(bsp_priv->regulator)) {
if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
@@ -1294,11 +1462,36 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
return bsp_priv;
}
+static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (!bsp_priv->ops->set_to_rgmii)
+ return -EINVAL;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!bsp_priv->ops->set_to_rmii)
+ return -EINVAL;
+ break;
+ default:
+ dev_err(&bsp_priv->pdev->dev,
+ "unsupported interface %d", bsp_priv->phy_iface);
+ }
+ return 0;
+}
+
static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
int ret;
struct device *dev = &bsp_priv->pdev->dev;
+ ret = rk_gmac_check_ops(bsp_priv);
+ if (ret)
+ return ret;
+
ret = gmac_clk_enable(bsp_priv, true);
if (ret)
return ret;
@@ -1369,10 +1562,12 @@ static void rk_fix_speed(void *priv, unsigned int speed)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
+ if (bsp_priv->ops->set_rgmii_speed)
+ bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
break;
case PHY_INTERFACE_MODE_RMII:
- bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
+ if (bsp_priv->ops->set_rmii_speed)
+ bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
break;
default:
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
@@ -1400,7 +1595,11 @@ static int rk_gmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->has_gmac = true;
+ /* If the stmmac is not already selected as gmac4,
+ * then make sure we fallback to gmac.
+ */
+ if (!plat_dat->has_gmac4)
+ plat_dat->has_gmac = true;
plat_dat->fix_mac_speed = rk_fix_speed;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
@@ -1477,10 +1676,12 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+ { .compatible = "rockchip,rk3308-gmac", .data = &rk3308_ops },
{ .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+ { .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b70d44ac0990..3c73453725f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -76,10 +76,10 @@ enum power_event {
#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
/* GMAC HW ADDR regs */
-#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
- (reg * 8))
-#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
- (reg * 8))
+#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
+ 0x00000040 + (reg * 8))
+#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
+ 0x00000044 + (reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 1
#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f35c03c9f91e..67ba083eb90c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -1358,6 +1358,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->link.speed10 = GMAC_CONFIG_PS;
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
mac->link.speed1000 = 0;
+ mac->link.speed2500 = GMAC_CONFIG_FES;
mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
mac->mii.addr = GMAC_MDIO_ADDR;
mac->mii.data = GMAC_MDIO_DATA;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index d8c6ff725237..9c2d40f853ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -183,7 +183,8 @@ static void dwmac5_handle_dma_err(struct net_device *ndev,
STAT_OFF(dma_errors), stats);
}
-int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
+int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_feat_cfg)
{
u32 value;
@@ -193,11 +194,16 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
/* 1. Enable Safety Features */
value = readl(ioaddr + MTL_ECC_CONTROL);
value |= MEEAO; /* MTL ECC Error Addr Status Override */
- value |= TSOEE; /* TSO ECC */
- value |= MRXPEE; /* MTL RX Parser ECC */
- value |= MESTEE; /* MTL EST ECC */
- value |= MRXEE; /* MTL RX FIFO ECC */
- value |= MTXEE; /* MTL TX FIFO ECC */
+ if (safety_feat_cfg->tsoee)
+ value |= TSOEE; /* TSO ECC */
+ if (safety_feat_cfg->mrxpee)
+ value |= MRXPEE; /* MTL RX Parser ECC */
+ if (safety_feat_cfg->mestee)
+ value |= MESTEE; /* MTL EST ECC */
+ if (safety_feat_cfg->mrxee)
+ value |= MRXEE; /* MTL RX FIFO ECC */
+ if (safety_feat_cfg->mtxee)
+ value |= MTXEE; /* MTL TX FIFO ECC */
writel(value, ioaddr + MTL_ECC_CONTROL);
/* 2. Enable MTL Safety Interrupts */
@@ -219,13 +225,16 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
/* 5. Enable Parity and Timeout for FSM */
value = readl(ioaddr + MAC_FSM_CONTROL);
- value |= PRTYEN; /* FSM Parity Feature */
- value |= TMOUTEN; /* FSM Timeout Feature */
+ if (safety_feat_cfg->prtyen)
+ value |= PRTYEN; /* FSM Parity Feature */
+ if (safety_feat_cfg->tmouten)
+ value |= TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + MAC_FSM_CONTROL);
/* 4. Enable Data Parity Protection */
value = readl(ioaddr + MTL_DPP_CONTROL);
- value |= EDPP;
+ if (safety_feat_cfg->edpp)
+ value |= EDPP;
writel(value, ioaddr + MTL_DPP_CONTROL);
/*
@@ -235,7 +244,8 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
if (asp <= 0x2)
return 0;
- value |= EPSI;
+ if (safety_feat_cfg->epsi)
+ value |= EPSI;
writel(value, ioaddr + MTL_DPP_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 6b2fd37b29ad..53c138d0ff48 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -137,7 +137,8 @@
#define GMAC_INT_FPE_EN BIT(17)
-int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp);
+int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg);
int dwmac5_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_stats *stats);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index ad4df9bddcf3..c4d78fa93663 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -801,7 +801,9 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
}
-static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
+static int
+dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg)
{
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 6d5e0f2b03ce..6dc1c98ebec8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -348,7 +348,8 @@ struct stmmac_ops {
void (*pcs_rane)(void __iomem *ioaddr, bool restart);
void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
/* Safety Features */
- int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp);
+ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg);
int (*safety_feat_irq_status)(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_stats *stats);
@@ -612,18 +613,6 @@ struct stmmac_mmc_ops {
#define stmmac_mmc_read(__priv, __args...) \
stmmac_do_void_callback(__priv, mmc, read, __args)
-/* XPCS callbacks */
-#define stmmac_xpcs_validate(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, validate, __args)
-#define stmmac_xpcs_config(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, config, __args)
-#define stmmac_xpcs_get_state(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, get_state, __args)
-#define stmmac_xpcs_link_up(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, link_up, __args)
-#define stmmac_xpcs_probe(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, probe, __args)
-
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b6cd43eda7ac..e735134e8487 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -75,7 +75,7 @@ struct stmmac_tx_queue {
unsigned int cur_tx;
unsigned int dirty_tx;
dma_addr_t dma_tx_phy;
- u32 tx_tail_addr;
+ dma_addr_t tx_tail_addr;
u32 mss;
};
@@ -311,6 +311,7 @@ enum stmmac_state {
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
+int stmmac_xpcs_setup(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
void stmmac_ptp_register(struct stmmac_priv *priv);
@@ -338,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
{
if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
+ return XDP_PACKET_HEADROOM + NET_IP_ALIGN;
- return 0;
+ return NET_SKB_PAD + NET_IP_ALIGN;
}
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 61b11639ee0c..d0ce608b81c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -720,6 +720,14 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
netdev_warn(priv->dev,
"Setting EEE tx-lpi is not supported\n");
+ if (priv->hw->xpcs) {
+ ret = xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ edata->eee_enabled);
+ if (ret)
+ return ret;
+ }
+
if (!edata->eee_enabled)
stmmac_disable_eee_mode(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 345b4c6d1fd4..8d9d6ecf8c63 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -931,6 +931,11 @@ static void stmmac_validate(struct phylink_config *config,
if ((max_speed > 0) && (max_speed < 1000)) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
+ } else if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500) {
+ phylink_set(mac_supported, 2500baseT_Full);
+ phylink_set(mac_supported, 2500baseX_Full);
+ }
} else if (priv->plat->has_xgmac) {
if (!max_speed || (max_speed >= 2500)) {
phylink_set(mac_supported, 2500baseT_Full);
@@ -996,29 +1001,14 @@ static void stmmac_validate(struct phylink_config *config,
linkmode_andnot(state->advertising, state->advertising, mask);
/* If PCS is supported, check which modes it supports. */
- stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
-}
-
-static void stmmac_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
-
- state->link = 0;
- stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
+ if (priv->hw->xpcs)
+ xpcs_validate(priv->hw->xpcs, supported, state);
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
-
- stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
-}
-
-static void stmmac_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
+ /* Nothing to do, xpcs_config() handles everything */
}
static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
@@ -1031,8 +1021,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
if (is_up && *hs_enable) {
stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
} else {
- *lo_state = FPE_EVENT_UNKNOWN;
- *lp_state = FPE_EVENT_UNKNOWN;
+ *lo_state = FPE_STATE_OFF;
+ *lp_state = FPE_STATE_OFF;
}
}
@@ -1060,8 +1050,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 ctrl;
- stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
-
ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl &= ~priv->hw->link.speed_mask;
@@ -1154,9 +1142,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.validate = stmmac_validate,
- .mac_pcs_get_state = stmmac_mac_pcs_get_state,
.mac_config = stmmac_mac_config,
- .mac_an_restart = stmmac_mac_an_restart,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
};
@@ -1196,7 +1182,6 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
*/
static int stmmac_init_phy(struct net_device *dev)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct stmmac_priv *priv = netdev_priv(dev);
struct device_node *node;
int ret;
@@ -1222,14 +1207,19 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_connect_phy(priv->phylink, phydev);
}
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
+ if (!priv->plat->pmt) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+ }
return ret;
}
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
int mode = priv->plat->phy_interface;
struct phylink *phylink;
@@ -1237,8 +1227,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
priv->phylink_config.pcs_poll = true;
- priv->phylink_config.ovr_an_inband =
- priv->plat->mdio_bus_data->xpcs_an_inband;
+ if (priv->plat->mdio_bus_data)
+ priv->phylink_config.ovr_an_inband =
+ mdio_bus_data->xpcs_an_inband;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1248,6 +1239,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (IS_ERR(phylink))
return PTR_ERR(phylink);
+ if (priv->hw->xpcs)
+ phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
+
priv->phylink = phylink;
return 0;
}
@@ -3169,7 +3163,8 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
{
if (priv->dma_cap.asp) {
netdev_info(priv->dev, "Enabling Safety Features\n");
- stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
+ stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
+ priv->plat->safety_feat_cfg);
} else {
netdev_info(priv->dev, "No Safety Features support found\n");
}
@@ -3411,8 +3406,8 @@ static void stmmac_free_irq(struct net_device *dev,
static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
- enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
@@ -3559,8 +3554,8 @@ irq_error:
static int stmmac_request_irq_single(struct net_device *dev)
{
- enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
int ret;
ret = request_irq(dev->irq, stmmac_interrupt,
@@ -3570,7 +3565,7 @@ static int stmmac_request_irq_single(struct net_device *dev)
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
irq_err = REQ_IRQ_ERR_MAC;
- return ret;
+ goto irq_error;
}
/* Request the Wake IRQ in case of another line
@@ -3584,7 +3579,7 @@ static int stmmac_request_irq_single(struct net_device *dev)
"%s: ERROR: allocating the WoL IRQ %d (%d)\n",
__func__, priv->wol_irq, ret);
irq_err = REQ_IRQ_ERR_WOL;
- return ret;
+ goto irq_error;
}
}
@@ -3634,6 +3629,7 @@ static int stmmac_request_irq(struct net_device *dev)
int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
int bfsize = 0;
u32 chan;
int ret;
@@ -3646,7 +3642,8 @@ int stmmac_open(struct net_device *dev)
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
- priv->hw->xpcs_args.an_mode != DW_AN_C73) {
+ (!priv->hw->xpcs ||
+ xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
ret = stmmac_init_phy(dev);
if (ret) {
netdev_err(priv->dev,
@@ -4654,7 +4651,6 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
return res;
}
-/* This function assumes rcu_read_lock() is held by the caller. */
static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
struct bpf_prog *prog,
struct xdp_buff *xdp)
@@ -4696,17 +4692,14 @@ static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
struct bpf_prog *prog;
int res;
- rcu_read_lock();
-
prog = READ_ONCE(priv->xdp_prog);
if (!prog) {
res = STMMAC_XDP_PASS;
- goto unlock;
+ goto out;
}
res = __stmmac_xdp_run_prog(priv, prog, xdp);
-unlock:
- rcu_read_unlock();
+out:
return ERR_PTR(-res);
}
@@ -4976,10 +4969,8 @@ read_again:
buf->xdp->data_end = buf->xdp->data + buf1_len;
xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
- rcu_read_lock();
prog = READ_ONCE(priv->xdp_prog);
res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
- rcu_read_unlock();
switch (res) {
case STMMAC_XDP_PASS:
@@ -5134,7 +5125,7 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page));
+ prefetch(page_address(buf->page) + buf->page_offset);
if (buf->sec_page)
prefetch(page_address(buf->sec_page));
@@ -5167,12 +5158,9 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
- xdp.data = page_address(buf->page) + buf->page_offset;
- xdp.data_end = xdp.data + buf1_len;
- xdp.data_hard_start = page_address(buf->page);
- xdp_set_data_meta_invalid(&xdp);
- xdp.frame_sz = buf_sz;
- xdp.rxq = &rx_q->xdp_rxq;
+ xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(buf->page),
+ buf->page_offset, buf1_len, false);
pre_len = xdp.data_end - xdp.data_hard_start -
buf->page_offset;
@@ -5888,12 +5876,21 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
ret = eth_mac_addr(ndev, addr);
if (ret)
- return ret;
+ goto set_mac_error;
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+set_mac_error:
+ pm_runtime_put(priv->device);
+
return ret;
}
@@ -6188,12 +6185,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6219,6 +6210,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6532,7 +6529,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* register (if supported).
*/
priv->plat->enh_desc = priv->dma_cap.enh_desc;
- priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+ priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
+ !priv->plat->use_phy_wol;
priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
@@ -6841,6 +6839,11 @@ int stmmac_dvr_probe(struct device *device,
reset_control_reset(priv->plat->stmmac_rst);
}
+ ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
+ if (ret == -ENOTSUPP)
+ dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
+ ERR_PTR(ret));
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
@@ -6992,6 +6995,15 @@ int stmmac_dvr_probe(struct device *device,
}
}
+ if (priv->plat->speed_mode_2500)
+ priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
+
+ if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
+ ret = stmmac_xpcs_setup(priv->mii);
+ if (ret)
+ goto error_xpcs_setup;
+ }
+
ret = stmmac_phy_setup(priv);
if (ret) {
netdev_err(ndev, "failed to setup phy (%d)\n", ret);
@@ -7028,6 +7040,7 @@ error_serdes_powerup:
unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
+error_xpcs_setup:
error_phy_setup:
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
@@ -7036,7 +7049,6 @@ error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
destroy_workqueue(priv->wq);
- stmmac_bus_clks_config(priv, false);
bitmap_free(priv->af_xdp_zc_qps);
return ret;
@@ -7073,6 +7085,7 @@ int stmmac_dvr_remove(struct device *dev)
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
+ reset_control_assert(priv->plat->stmmac_ahb_rst);
pm_runtime_put(dev);
pm_runtime_disable(dev);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b750074f8f9c..a5d150c5f3d8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -397,6 +397,41 @@ int stmmac_mdio_reset(struct mii_bus *bus)
return 0;
}
+int stmmac_xpcs_setup(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ struct mdio_device *mdiodev;
+ struct stmmac_priv *priv;
+ struct dw_xpcs *xpcs;
+ int mode, addr;
+
+ priv = netdev_priv(ndev);
+ mode = priv->plat->phy_interface;
+
+ /* Try to probe the XPCS by scanning all addresses. */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ mdiodev = mdio_device_create(bus, addr);
+ if (IS_ERR(mdiodev))
+ continue;
+
+ xpcs = xpcs_create(mdiodev, mode);
+ if (IS_ERR_OR_NULL(xpcs)) {
+ mdio_device_free(mdiodev);
+ continue;
+ }
+
+ priv->hw->xpcs = xpcs;
+ break;
+ }
+
+ if (!priv->hw->xpcs) {
+ dev_warn(priv->device, "No xPCS found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
/**
* stmmac_mdio_register
* @ndev: net device structure
@@ -444,14 +479,6 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
- if (mdio_bus_data->has_xpcs) {
- priv->hw->xpcs = mdio_xpcs_get_ops();
- if (!priv->hw->xpcs) {
- err = -ENODEV;
- goto bus_register_fail;
- }
- }
-
if (mdio_bus_data->needs_reset)
new_bus->reset = &stmmac_mdio_reset;
@@ -503,30 +530,10 @@ int stmmac_mdio_register(struct net_device *ndev)
found = 1;
}
- /* Try to probe the XPCS by scanning all addresses. */
- if (priv->hw->xpcs) {
- struct mdio_xpcs_args *xpcs = &priv->hw->xpcs_args;
- int ret, mode = priv->plat->phy_interface;
- max_addr = PHY_MAX_ADDR;
-
- xpcs->bus = new_bus;
-
- for (addr = 0; addr < max_addr; addr++) {
- xpcs->addr = addr;
-
- ret = stmmac_xpcs_probe(priv, xpcs, mode);
- if (!ret) {
- found = 1;
- break;
- }
- }
- }
-
if (!found && !mdio_node) {
dev_warn(dev, "No PHY found\n");
- mdiobus_unregister(new_bus);
- mdiobus_free(new_bus);
- return -ENODEV;
+ err = -ENODEV;
+ goto no_phy_found;
}
bus_register_done:
@@ -534,6 +541,8 @@ bus_register_done:
return 0;
+no_phy_found:
+ mdiobus_unregister(new_bus);
bus_register_fail:
mdiobus_free(new_bus);
return err;
@@ -551,6 +560,11 @@ int stmmac_mdio_unregister(struct net_device *ndev)
if (!priv->mii)
return 0;
+ if (priv->hw->xpcs) {
+ mdio_device_free(priv->hw->xpcs->mdiodev);
+ xpcs_destroy(priv->hw->xpcs);
+ }
+
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
mdiobus_free(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 95e0e4d6f74d..fcf17d8a0494 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -174,6 +174,12 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (!plat->dma_cfg)
return -ENOMEM;
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
/* Enable pci device */
ret = pci_enable_device(pdev);
if (ret) {
@@ -203,6 +209,16 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
res.wol_irq = pdev->irq;
res.irq = pdev->irq;
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 1;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 1;
+ plat->safety_feat_cfg->edpp = 1;
+ plat->safety_feat_cfg->prtyen = 1;
+ plat->safety_feat_cfg->tmouten = 1;
+
return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1e17a23d9118..072eff8079d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -230,8 +230,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
- else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
- plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
else
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
@@ -602,6 +600,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
goto error_hw_init;
}
+ plat->stmmac_ahb_rst = devm_reset_control_get_optional_shared(
+ &pdev->dev, "ahb");
+ if (IS_ERR(plat->stmmac_ahb_rst)) {
+ ret = plat->stmmac_ahb_rst;
+ goto error_hw_init;
+ }
+
return plat;
error_hw_init:
@@ -622,6 +627,8 @@ error_pclk_get:
void stmmac_remove_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat)
{
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
of_node_put(plat->phy_node);
of_node_put(plat->mdio_node);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 4e70efc45458..92dab609d4f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -573,10 +573,8 @@ static int tc_add_flow(struct stmmac_priv *priv,
for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
ret = tc_flow_parsers[i].fn(priv, cls, entry);
- if (!ret) {
+ if (!ret)
entry->in_use = true;
- continue;
- }
}
if (!entry->in_use)
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 54f45d8c79a7..981685c88308 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -486,7 +486,7 @@ page_err:
/* initialize spare pool of rx buffers, but allocate during the open */
static void cas_spare_init(struct cas *cp)
{
- spin_lock(&cp->rx_inuse_lock);
+ spin_lock(&cp->rx_inuse_lock);
INIT_LIST_HEAD(&cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9790656cf970..cfb9e21b18b7 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1258,8 +1258,8 @@ static void gem_begin_auto_negotiation(struct gem *gp,
&advertising, ep->link_modes.advertising);
if (gp->phy_type != phy_mii_mdio0 &&
- gp->phy_type != phy_mii_mdio1)
- goto non_mii;
+ gp->phy_type != phy_mii_mdio1)
+ goto non_mii;
/* Setup advertise */
if (found_mii_phy(gp))
@@ -1410,7 +1410,7 @@ static int gem_set_link_modes(struct gem *gp)
if (gp->phy_type == phy_serialink ||
gp->phy_type == phy_serdes) {
- u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
+ u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
pause = 1;
@@ -1892,7 +1892,7 @@ static void gem_init_mac(struct gem *gp)
static void gem_init_pause_thresholds(struct gem *gp)
{
- u32 cfg;
+ u32 cfg;
/* Calculate pause thresholds. Setting the OFF threshold to the
* full RX fifo size effectively disables PAUSE generation which
@@ -1914,15 +1914,15 @@ static void gem_init_pause_thresholds(struct gem *gp)
/* Configure the chip "burst" DMA mode & enable some
* HW bug fixes on Apple version
*/
- cfg = 0;
- if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
+ cfg = 0;
+ if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
- cfg |= GREG_CFG_IBURST;
+ cfg |= GREG_CFG_IBURST;
#endif
- cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
- cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
- writel(cfg, gp->regs + GREG_CFG);
+ cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
+ cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
+ writel(cfg, gp->regs + GREG_CFG);
/* If Infinite Burst didn't stick, then use different
* thresholds (and Apple bug fixes don't exist)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 54b53dbdb33c..a2c1a404c52d 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2286,8 +2286,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
- int entry;
- u32 tx_flags;
+ int entry;
+ u32 tx_flags;
tx_flags = TXFLAG_OWN;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2301,7 +2301,7 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
spin_lock_irq(&hp->happy_lock);
- if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+ if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&hp->happy_lock);
printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 6a67b026df0b..718539cdd2f2 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1506,12 +1506,12 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
memset(tx_chn, 0, sizeof(*tx_chn));
}
}
@@ -1531,12 +1531,12 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
netif_napi_del(&tx_chn->napi_tx);
- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
memset(tx_chn, 0, sizeof(*tx_chn));
}
}
@@ -1624,11 +1624,11 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
rx_chn = &common->rx_chns;
- if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
-
if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
}
static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
index 23cfb91e9c4d..9c29b363e9ae 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -84,7 +84,7 @@ static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
return 0;
}
-static int am65_cpsw_port_attr_set(struct net_device *ndev,
+static int am65_cpsw_port_attr_set(struct net_device *ndev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
@@ -302,7 +302,7 @@ static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
return 0;
}
-static int am65_cpsw_port_obj_add(struct net_device *ndev,
+static int am65_cpsw_port_obj_add(struct net_device *ndev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
@@ -329,7 +329,7 @@ static int am65_cpsw_port_obj_add(struct net_device *ndev,
return err;
}
-static int am65_cpsw_port_obj_del(struct net_device *ndev,
+static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
const struct switchdev_obj *obj)
{
struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 9caaae79fc95..c30a6e510aa3 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -1037,11 +1037,9 @@ static int am65_cpts_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct am65_cpts *cpts;
- struct resource *res;
void __iomem *base;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpts");
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource_byname(pdev, "cpts");
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 6e72ecbe5cf7..e8f38e3f7706 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -206,7 +206,6 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
static int cpsw_phy_sel_probe(struct platform_device *pdev)
{
- struct resource *res;
const struct of_device_id *of_id;
struct cpsw_phy_sel_priv *priv;
@@ -223,8 +222,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->cpsw_phy_sel = of_id->data;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
- priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+ priv->gmii_sel = devm_platform_ioremap_resource_byname(pdev, "gmii-sel");
if (IS_ERR(priv->gmii_sel))
return PTR_ERR(priv->gmii_sel);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c0cd7de88316..cbbd0f665796 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -430,8 +430,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
- /* unmap page as no netstack skb page recycling */
- page_pool_release_page(pool, page);
+ /* mark skb for recycling */
+ skb_mark_for_recycle(skb, page, pool);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -1532,8 +1532,7 @@ static int cpsw_probe(struct platform_device *pdev)
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
- ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ss_regs = devm_ioremap_resource(dev, ss_res);
+ ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
if (IS_ERR(ss_regs))
return PTR_ERR(ss_regs);
cpsw->regs = ss_regs;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index d828f856237a..0c75e0576ee1 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -70,7 +70,7 @@ enum {
};
/**
- * struct ale_dev_id - The ALE version/SoC specific configuration
+ * struct cpsw_ale_dev_id - The ALE version/SoC specific configuration
* @dev_id: ALE version/SoC id
* @features: features supported by ALE
* @tbl_entries: number of ALE entries
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 69b7a4e0220a..57d279fdcc9f 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -373,8 +373,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
- /* unmap page as no netstack skb page recycling */
- page_pool_release_page(pool, page);
+ /* mark skb for recycling */
+ skb_mark_for_recycle(skb, page, pool);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -1883,8 +1883,7 @@ static int cpsw_probe(struct platform_device *pdev)
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
- ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ss_regs = devm_ioremap_resource(dev, ss_res);
+ ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
if (IS_ERR(ss_regs)) {
ret = PTR_ERR(ss_regs);
return ret;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 5862f0a4a975..ecc2a6b7e28f 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1328,13 +1328,9 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
struct bpf_prog *prog;
u32 act;
- rcu_read_lock();
-
prog = READ_ONCE(priv->xdp_prog);
- if (!prog) {
- ret = CPSW_XDP_PASS;
- goto out;
- }
+ if (!prog)
+ return CPSW_XDP_PASS;
act = bpf_prog_run_xdp(prog, xdp);
/* XDP prog might have changed packet data and boundaries */
@@ -1378,10 +1374,8 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
ndev->stats.rx_bytes += *len;
ndev->stats.rx_packets++;
out:
- rcu_read_unlock();
return ret;
drop:
- rcu_read_unlock();
page_pool_recycle_direct(cpsw->page_pool[ch], page);
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 05a64fb7a04f..f7fb6e17dadd 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -86,7 +86,7 @@ static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
return 0;
}
-static int cpsw_port_attr_set(struct net_device *ndev,
+static int cpsw_port_attr_set(struct net_device *ndev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
@@ -310,7 +310,7 @@ static int cpsw_port_mdb_del(struct cpsw_priv *priv,
return err;
}
-static int cpsw_port_obj_add(struct net_device *ndev,
+static int cpsw_port_obj_add(struct net_device *ndev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
@@ -338,7 +338,7 @@ static int cpsw_port_obj_add(struct net_device *ndev,
return err;
}
-static int cpsw_port_obj_del(struct net_device *ndev,
+static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
const struct switchdev_obj *obj)
{
struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f9417b44cae8..c674e34b6839 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1814,13 +1814,12 @@ static int davinci_emac_probe(struct platform_device *pdev)
priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
/* Get EMAC platform data */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
- priv->remap_addr = devm_ioremap_resource(&pdev->dev, res);
+ priv->remap_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->remap_addr)) {
rc = PTR_ERR(priv->remap_addr);
goto no_pdata;
}
+ priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res_ctrl) {
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 9030e619e543..97942b0e3897 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1350,8 +1350,8 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
KNAV_QUEUE_SHARED);
if (IS_ERR(tx_pipe->dma_queue)) {
- dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
- name, ret);
+ dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
+ name, tx_pipe->dma_queue);
ret = PTR_ERR(tx_pipe->dma_queue);
goto err;
}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index fecc4d7b00b0..88426b5e410b 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1897,7 +1897,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
}
/**
- * tx_srv - transmit interrupt service
+ * velocity_tx_srv - transmit interrupt service
* @vptr: Velocity
*
* Scan the queues looking for transmitted packets that
@@ -2453,7 +2453,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
/**
- * velocity_get_status - statistics callback
+ * velocity_get_stats - statistics callback
* @dev: network device
*
* Callback from the network layer to allow driver statistics
@@ -3723,7 +3723,7 @@ static int __init velocity_init_module(void)
}
/**
- * velocity_cleanup - module unload
+ * velocity_cleanup_module - module unload
*
* When the velocity hardware is unloaded this function is called.
* It will clean up the notifiers and the unregister the PCI
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index ec5db481c9cd..811815f8cd3b 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -263,19 +263,14 @@ static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
static int w5100_mmio_init(struct net_device *ndev)
{
struct platform_device *pdev = to_platform_device(ndev->dev.parent);
- struct w5100_priv *priv = netdev_priv(ndev);
struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
- struct resource *mem;
spin_lock_init(&mmio_priv->reg_lock);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ mmio_priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mmio_priv->base))
return PTR_ERR(mmio_priv->base);
- netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
-
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a1f5f07f4ca9..60a4f79b8fa1 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
stat = be32_to_cpu(cur_p->app0);
while (stat & STS_CTRL_APP0_CMPLT) {
+ /* Make sure that the other fields are read after bd is
+ * released by dma
+ */
+ rmb();
dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
skb = (struct sk_buff *)ptr_from_txbd(cur_p);
if (skb)
dev_consume_skb_irq(skb);
- cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app3 = 0;
@@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
+ /* app0 must be visible last, as it is used to flag
+ * availability of the bd
+ */
+ smp_mb();
+ cur_p->app0 = 0;
+
lp->tx_bd_ci++;
if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0;
@@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
if (cur_p->app0)
return NETDEV_TX_BUSY;
+ /* Make sure to read next bd app0 after this one */
+ rmb();
+
tail++;
if (tail >= lp->tx_bd_num)
tail = 0;
@@ -849,7 +861,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
smp_mb();
/* Space might have just been freed - check again */
- if (temac_check_tx_bd_space(lp, num_frag))
+ if (temac_check_tx_bd_space(lp, num_frag + 1))
return NETDEV_TX_BUSY;
netif_wake_queue(ndev);
@@ -876,7 +888,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
cur_p->phys = cpu_to_be32(skb_dma_addr);
- ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -915,6 +926,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
+ /* Mark last fragment with skb address, so it can be consumed
+ * in temac_start_xmit_done()
+ */
+ ptr_to_txbd((void *)skb, cur_p);
+
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= lp->tx_bd_num)
@@ -926,6 +942,9 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wmb();
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b508c9453f40..13cd799541aa 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1543,6 +1543,7 @@ static void axienet_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_MII:
phylink_set(mask, 100baseT_Full);
phylink_set(mask, 10baseT_Full);
+ fallthrough;
default:
break;
}
@@ -1893,8 +1894,7 @@ static int axienet_probe(struct platform_device *pdev)
goto cleanup_clk;
/* Map device registers */
- ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+ lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
if (IS_ERR(lp->regs)) {
ret = PTR_ERR(lp->regs);
goto cleanup_clk;
@@ -2009,9 +2009,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->eth_irq = platform_get_irq_optional(pdev, 0);
} else {
/* Check for these resources directly on the Ethernet node. */
- struct resource *res = platform_get_resource(pdev,
- IORESOURCE_MEM, 1);
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
+ lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
lp->eth_irq = platform_get_irq_optional(pdev, 2);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index d9d58a7dabee..b06377fe7293 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1189,9 +1189,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
dev_info(dev,
- "Xilinx EmacLite at 0x%08lX mapped to 0x%08lX, irq=%d\n",
- (unsigned long __force)ndev->mem_start,
- (unsigned long __force)lp->base_addr, ndev->irq);
+ "Xilinx EmacLite at 0x%08lX mapped to 0x%p, irq=%d\n",
+ (unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq);
return 0;
error:
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 2049d76a0e68..4f6db6f5c272 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1232,7 +1232,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pktlen < ETH_ZLEN)
{
if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
pktlen = ETH_ZLEN;
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index cb89323855d8..85c66af9e56d 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1425,7 +1425,6 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct eth_plat_info *plat;
struct net_device *ndev;
- struct resource *res;
struct port *port;
int err;
@@ -1482,10 +1481,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
port->id = plat->npe;
/* Get the port resource and remap */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- port->regs = devm_ioremap_resource(dev, res);
+ port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(port->regs))
return PTR_ERR(port->regs);
@@ -1531,8 +1527,8 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link);
} else {
phydev = mdiobus_get_phy(mdio_bus, plat->phy);
- if (IS_ERR(phydev)) {
- err = PTR_ERR(phydev);
+ if (!phydev) {
+ err = -ENODEV;
dev_err(dev, "could not connect phydev (%d)\n", err);
goto err_free_mem;
}
diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c
index 35110c0c00a0..41107338f0c0 100644
--- a/drivers/net/fddi/skfp/ess.c
+++ b/drivers/net/fddi/skfp/ess.c
@@ -379,17 +379,17 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
* if the payload is greater than zero.
* For the SBAPayload and the SBAOverhead we have the following
* unite quations
- * _ _
+ * _ _
* | bytes |
* SBAPayload = | 8000 ------ |
* | s |
* - -
- * _ _
+ * _ _
* | bytes |
* SBAOverhead = | ------ |
* | T-NEG |
* - -
- *
+ *
* T-NEG is described by the equation:
*
* (-) fddiMACT-NEG
diff --git a/drivers/net/fddi/skfp/h/supern_2.h b/drivers/net/fddi/skfp/h/supern_2.h
index 78ae8ea4007c..0bbbd411d000 100644
--- a/drivers/net/fddi/skfp/h/supern_2.h
+++ b/drivers/net/fddi/skfp/h/supern_2.h
@@ -1025,7 +1025,7 @@ struct tx_queue {
#define PLC_QELM_A_BIST 0x5b6b /* BIST signature of QELM Rev. A */
/*
- FDDI board recources
+ FDDI board recources
*/
/*
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 466622664424..185c8a398681 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -90,16 +90,8 @@ static struct platform_driver fjes_driver = {
};
static struct resource fjes_resource[] = {
- {
- .flags = IORESOURCE_MEM,
- .start = 0,
- .end = 0,
- },
- {
- .flags = IORESOURCE_IRQ,
- .start = 0,
- .end = 0,
- },
+ DEFINE_RES_MEM(0, 1),
+ DEFINE_RES_IRQ(0)
};
static bool is_extended_socket_device(struct acpi_device *device)
@@ -1262,6 +1254,10 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->interrupt_watch_enable = false;
res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err_free_control_wq;
+ }
hw->hw_res.start = res->start;
hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 39c00f050fbd..30e0a10595a1 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -201,6 +201,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
* calculate the transport header.
*/
skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
skb->dev = pctx->dev;
@@ -436,7 +437,7 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
gtp1->length = htons(payload_len);
gtp1->tid = htonl(pctx->u.v1.o_tei);
- /* TODO: Suppport for extension header, sequence number and N-PDU.
+ /* TODO: Support for extension header, sequence number and N-PDU.
* Update the length field if any of them is available.
*/
}
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 80f41945709f..a15cc5e50290 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -716,11 +716,11 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
err = 0;
break;
- case SIOCSIFHWADDR: {
- char addr[AX25_ADDR_LEN];
+ case SIOCSIFHWADDR: {
+ char addr[AX25_ADDR_LEN];
- if (copy_from_user(&addr,
- (void __user *) arg, AX25_ADDR_LEN)) {
+ if (copy_from_user(&addr,
+ (void __user *)arg, AX25_ADDR_LEN)) {
err = -EFAULT;
break;
}
@@ -728,11 +728,9 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
netif_tx_lock_bh(dev);
memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
-
err = 0;
break;
}
-
default:
err = tty_mode_ioctl(tty, file, cmd, arg);
}
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index e4e4981ac1d2..4435a1195194 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -231,7 +231,7 @@ struct baycom_state {
#if 0
static inline void append_crc_ccitt(unsigned char *buffer, int len)
{
- unsigned int crc = 0xffff;
+ unsigned int crc = 0xffff;
for (;len>0;len--)
crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buffer++) & 0xff];
@@ -390,7 +390,7 @@ static void encode_hdlc(struct baycom_state *bc)
for (j = 0; j < 8; j++)
if (unlikely(!(notbitstream & (0x1f0 << j)))) {
bitstream &= ~(0x100 << j);
- bitbuf = (bitbuf & (((2 << j) << numbit) - 1)) |
+ bitbuf = (bitbuf & (((2 << j) << numbit) - 1)) |
((bitbuf & ~(((2 << j) << numbit) - 1)) << 1);
numbit++;
notbitstream = ~bitstream;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 1ad6085994b1..0e623c2e8b2d 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -368,7 +368,7 @@ static int bpq_close(struct net_device *dev)
/* ------------------------------------------------------------------------ */
-
+#ifdef CONFIG_PROC_FS
/*
* Proc filesystem
*/
@@ -440,7 +440,7 @@ static const struct seq_operations bpq_seqops = {
.stop = bpq_seq_stop,
.show = bpq_seq_show,
};
-
+#endif
/* ------------------------------------------------------------------------ */
static const struct net_device_ops bpq_netdev_ops = {
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 9e0058154ac3..cbaf1cdde7cb 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -74,7 +74,7 @@
static inline void append_crc_ccitt(unsigned char *buffer, int len)
{
- unsigned int crc = crc_ccitt(0xffff, buffer, len) ^ 0xffff;
+ unsigned int crc = crc_ccitt(0xffff, buffer, len) ^ 0xffff;
buffer += len;
*buffer++ = crc;
*buffer++ = crc >> 8;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 65154224d5b8..b99128669bc8 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -276,7 +276,7 @@ static void ax_bump(struct mkiss *ax)
*/
*ax->rbuff &= ~0x20;
}
- }
+ }
count = ax->rcount;
@@ -501,7 +501,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
default:
count = kiss_esc(p, ax->xbuff, len);
}
- }
+ }
spin_unlock_bh(&ax->buflock);
set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
ax->tty = NULL;
unregister_netdev(ax->dev);
+ free_netdev(ax->dev);
}
/* Perform I/O control on an active ax25 channel. */
@@ -815,7 +816,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
dev = ax->dev;
switch (cmd) {
- case SIOCGIFNAME:
+ case SIOCGIFNAME:
err = copy_to_user((void __user *) arg, ax->dev->name,
strlen(ax->dev->name) + 1) ? -EFAULT : 0;
break;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 4690c6a59054..3f1edd0526a4 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1192,18 +1192,18 @@ static void t_tail(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
- del_timer(&scc->tx_wdog);
- scc_key_trx(scc, TX_OFF);
+ del_timer(&scc->tx_wdog);
+ scc_key_trx(scc, TX_OFF);
spin_unlock_irqrestore(&scc->lock, flags);
- if (scc->stat.tx_state == TXS_TIMEOUT) /* we had a timeout? */
- {
- scc->stat.tx_state = TXS_WAIT;
+ if (scc->stat.tx_state == TXS_TIMEOUT) /* we had a timeout? */
+ {
+ scc->stat.tx_state = TXS_WAIT;
scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
- return;
- }
-
- scc->stat.tx_state = TXS_IDLE;
+ return;
+ }
+
+ scc->stat.tx_state = TXS_IDLE;
netif_wake_queue(scc->dev);
}
@@ -1580,7 +1580,7 @@ static int scc_net_open(struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
- if (!scc->init)
+ if (!scc->init)
return -EINVAL;
scc->tx_buff = NULL;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5ab53e9942f3..d4911041596c 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -668,7 +668,7 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
}
yp->tx_len = skb->len - 1; /* strip KISS byte */
if (yp->tx_len >= YAM_MAX_FRAME || yp->tx_len < 2) {
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
break;
}
skb_copy_from_linear_data_offset(skb, 1,
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 442c520ab8f3..bc48855dff10 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -895,9 +895,16 @@ static inline u32 netvsc_rqstor_size(unsigned long ringbytes)
ringbytes / NETVSC_MIN_IN_MSG_SIZE;
}
+/* XFER PAGE packets can specify a maximum of 375 ranges for NDIS >= 6.0
+ * and a maximum of 64 ranges for NDIS < 6.0 with no RSC; with RSC, this
+ * limit is raised to 562 (= NVSP_RSC_MAX).
+ */
+#define NETVSC_MAX_XFER_PAGE_RANGES NVSP_RSC_MAX
#define NETVSC_XFER_HEADER_SIZE(rng_cnt) \
(offsetof(struct vmtransfer_page_packet_header, ranges) + \
(rng_cnt) * sizeof(struct vmtransfer_page_range))
+#define NETVSC_MAX_PKT_SIZE (NETVSC_XFER_HEADER_SIZE(NETVSC_MAX_XFER_PAGE_RANGES) + \
+ sizeof(struct nvsp_message) + (sizeof(u32) * VRSS_SEND_TAB_SIZE))
struct multi_send_data {
struct sk_buff *skb; /* skb containing the pkt */
@@ -1163,6 +1170,7 @@ struct rndis_set_request {
u32 info_buflen;
u32 info_buf_offset;
u32 dev_vc_handle;
+ u8 info_buf[];
};
/* Response to NdisSetRequest */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9d07c9ce4be2..7bd935412853 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -757,7 +757,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
int queue_sends;
u64 cmd_rqst;
- cmd_rqst = vmbus_request_addr(&channel->requestor, (u64)desc->trans_id);
+ cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
netdev_err(ndev, "Incorrect transaction id\n");
return;
@@ -817,8 +817,8 @@ static void netvsc_send_completion(struct net_device *ndev,
/* First check if this is a VMBUS completion without data payload */
if (!msglen) {
- cmd_rqst = vmbus_request_addr(&incoming_channel->requestor,
- (u64)desc->trans_id);
+ cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
+ (u64)desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
netdev_err(ndev, "Invalid transaction id\n");
return;
@@ -1649,7 +1649,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_poll, NAPI_POLL_WEIGHT);
/* Open the channel */
+ device->channel->next_request_id_callback = vmbus_next_request_id;
+ device->channel->request_addr_callback = vmbus_request_addr;
device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
+ device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+
ret = vmbus_open(device->channel, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, net_device->chan_table);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f682a5572d84..382bebc2420d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2384,6 +2384,9 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
dev_hold(vf_netdev);
rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
+ if (ndev->needed_headroom < vf_netdev->needed_headroom)
+ ndev->needed_headroom = vf_netdev->needed_headroom;
+
vf_netdev->wanted_features = ndev->features;
netdev_update_features(vf_netdev);
@@ -2462,6 +2465,8 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
dev_put(vf_netdev);
+ ndev->needed_headroom = RNDIS_AND_PPI_SIZE;
+
return NOTIFY_OK;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c0e89e107d57..f6c9c2a670f9 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1051,10 +1051,8 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
set = &request->request_msg.msg.set_req;
set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
set->info_buflen = sizeof(u32);
- set->info_buf_offset = sizeof(struct rndis_set_request);
-
- memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
- &new_filter, sizeof(u32));
+ set->info_buf_offset = offsetof(typeof(*set), info_buf);
+ memcpy(set->info_buf, &new_filter, sizeof(u32));
ret = rndis_filter_send_request(dev, request);
if (ret == 0) {
@@ -1259,7 +1257,11 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
/* Set the channel before opening.*/
nvchan->channel = new_sc;
+ new_sc->next_request_id_callback = vmbus_next_request_id;
+ new_sc->request_addr_callback = vmbus_request_addr;
new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
+ new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index da9135231c07..ebc976b7fcc2 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -480,7 +480,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
struct hwsim_edge *e;
u32 v0, v1;
- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
@@ -715,6 +715,8 @@ static int hwsim_subscribe_all_others(struct hwsim_phy *phy)
return 0;
+sub_fail:
+ hwsim_edge_unsubscribe_me(phy);
me_fail:
rcu_read_lock();
list_for_each_entry_rcu(e, &phy->edges, list) {
@@ -722,8 +724,6 @@ me_fail:
hwsim_free_edge(e);
}
rcu_read_unlock();
-sub_fail:
- hwsim_edge_unsubscribe_me(phy);
return -ENOMEM;
}
@@ -824,12 +824,17 @@ err_pib:
static void hwsim_del(struct hwsim_phy *phy)
{
struct hwsim_pib *pib;
+ struct hwsim_edge *e;
hwsim_edge_unsubscribe_me(phy);
list_del(&phy->list);
rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy->edges, list) {
+ list_del_rcu(&e->list);
+ hwsim_free_edge(e);
+ }
pib = rcu_dereference(phy->pib);
rcu_read_unlock();
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index b9be530b285f..ff83e00b77af 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -8,8 +8,8 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/ieee802154.h>
#include <linux/irq.h>
@@ -1388,7 +1388,7 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
static struct spi_driver mrf24j40_driver = {
.driver = {
- .of_match_table = of_match_ptr(mrf24j40_of_match),
+ .of_match_table = mrf24j40_of_match,
.name = "mrf24j40",
},
.id_table = mrf24j40_ids,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index ab7022582154..e9258a9f3702 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -16,10 +16,10 @@
by Patrick McHardy and then maintained by Andre Correa.
You need the tc action mirror or redirect to feed this device
- packets.
+ packets.
- Authors: Jamal Hadi Salim (2005)
+ Authors: Jamal Hadi Salim (2005)
*/
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 1efe1a88104b..506f8d5cd4ee 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -7,8 +7,9 @@ ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
ipa_gsi.o ipa_smp2p.o ipa_uc.o \
ipa_endpoint.o ipa_cmd.o ipa_modem.o \
- ipa_resource.o ipa_qmi.o ipa_qmi_msg.o
+ ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
+ ipa_sysfs.o
-ipa-y += ipa_data-v3.5.1.o ipa_data-v4.2.o \
- ipa_data-v4.5.o ipa_data-v4.9.o \
- ipa_data-v4.11.o
+ipa-y += ipa_data-v3.1.o ipa_data-v3.5.1.o \
+ ipa_data-v4.2.o ipa_data-v4.5.o \
+ ipa_data-v4.9.o ipa_data-v4.11.o
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index e374079603cf..427c68b2ad8f 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -210,13 +210,65 @@ static void gsi_irq_setup(struct gsi *gsi)
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
- /* The inter-EE registers are in the non-adjusted address range */
- iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
+ /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
+ if (gsi->version > IPA_VERSION_3_1) {
+ u32 offset;
+
+ /* These registers are in the non-adjusted address range */
+ offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
+ iowrite32(0, gsi->virt_raw + offset);
+ offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
+ iowrite32(0, gsi->virt_raw + offset);
+ }
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
+/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
+static int gsi_ring_setup(struct gsi *gsi)
+{
+ struct device *dev = gsi->dev;
+ u32 count;
+ u32 val;
+
+ if (gsi->version < IPA_VERSION_3_5_1) {
+ /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
+ gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
+ gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
+
+ return 0;
+ }
+
+ val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+
+ count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+ if (!count) {
+ dev_err(dev, "GSI reports zero channels supported\n");
+ return -EINVAL;
+ }
+ if (count > GSI_CHANNEL_COUNT_MAX) {
+ dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
+ GSI_CHANNEL_COUNT_MAX, count);
+ count = GSI_CHANNEL_COUNT_MAX;
+ }
+ gsi->channel_count = count;
+
+ count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+ if (!count) {
+ dev_err(dev, "GSI reports zero event rings supported\n");
+ return -EINVAL;
+ }
+ if (count > GSI_EVT_RING_COUNT_MAX) {
+ dev_warn(dev,
+ "limiting to %u event rings; hardware supports %u\n",
+ GSI_EVT_RING_COUNT_MAX, count);
+ count = GSI_EVT_RING_COUNT_MAX;
+ }
+ gsi->evt_ring_count = count;
+
+ return 0;
+}
+
/* Event ring commands are performed one at a time. Their completion
* is signaled by the event ring control GSI interrupt type, which is
* only enabled when we issue an event ring command. Only the event
@@ -1827,43 +1879,21 @@ static void gsi_channel_teardown(struct gsi *gsi)
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi)
{
- struct device *dev = gsi->dev;
u32 val;
+ int ret;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
if (!(val & ENABLED_FMASK)) {
- dev_err(dev, "GSI has not been enabled\n");
+ dev_err(gsi->dev, "GSI has not been enabled\n");
return -EIO;
}
gsi_irq_setup(gsi); /* No matching teardown required */
- val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
-
- gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
- if (!gsi->channel_count) {
- dev_err(dev, "GSI reports zero channels supported\n");
- return -EINVAL;
- }
- if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
- dev_warn(dev,
- "limiting to %u channels; hardware supports %u\n",
- GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
- gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
- }
-
- gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
- if (!gsi->evt_ring_count) {
- dev_err(dev, "GSI reports zero event rings supported\n");
- return -EINVAL;
- }
- if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
- dev_warn(dev,
- "limiting to %u event rings; hardware supports %u\n",
- GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
- gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
- }
+ ret = gsi_ring_setup(gsi); /* No matching teardown required */
+ if (ret)
+ return ret;
/* Initialize the error log */
iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index d5996bdb20ef..81cd7b07f6e1 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -17,7 +17,7 @@
/* Maximum number of channels and event rings supported by the driver */
#define GSI_CHANNEL_COUNT_MAX 23
-#define GSI_EVT_RING_COUNT_MAX 20
+#define GSI_EVT_RING_COUNT_MAX 24
/* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
#define GSI_TLV_MAX 64
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index cb42c5ae86fa..bf9593d9eaea 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -52,7 +52,8 @@
*/
#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
-/* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
+/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
+
#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index e7ff376cb5b7..744406832a77 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -58,6 +58,7 @@ enum ipa_flag {
* @mem_virt: Virtual address of IPA-local memory space
* @mem_offset: Offset from @mem_virt used for access to IPA memory
* @mem_size: Total size (bytes) of memory at @mem_virt
+ * @mem_count: Number of entries in the mem array
* @mem: Array of IPA-local memory region descriptors
* @imem_iova: I/O virtual address of IPA region in IMEM
* @imem_size: Size of IMEM region
@@ -103,6 +104,7 @@ struct ipa {
void *mem_virt;
u32 mem_offset;
u32 mem_size;
+ u32 mem_count;
const struct ipa_mem *mem;
unsigned long imem_iova;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 525cdf28d9ea..af44ca41189e 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -200,41 +200,55 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_valid(struct ipa *ipa)
{
- const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
+ u32 offset;
u32 size;
- /* In ipa_cmd_hdr_init_local_add() we record the offset and size
- * of the header table memory area. Make sure the offset and size
- * fit in the fields that need to hold them, and that the entire
- * range is within the overall IPA memory range.
+ /* In ipa_cmd_hdr_init_local_add() we record the offset and size of
+ * the header table memory area in an immediate command. Make sure
+ * the offset and size fit in the fields that need to hold them, and
+ * that the entire range is within the overall IPA memory range.
*/
offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
- if (mem->offset > offset_max ||
- ipa->mem_offset > offset_max - mem->offset) {
+ size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+
+ /* The header memory area contains both the modem and AP header
+ * regions. The modem portion defines the address of the region.
+ */
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ offset = mem->offset;
+ size = mem->size;
+
+ /* Make sure the offset fits in the IPA command */
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
dev_err(dev, "header table region offset too large\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- ipa->mem_offset, mem->offset, offset_max);
+ ipa->mem_offset, offset, offset_max);
return false;
}
- size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
- size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
- size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ /* Add the size of the AP portion (if defined) to the combined size */
+ mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
+ if (mem)
+ size += mem->size;
+ /* Make sure the combined size fits in the IPA command */
if (size > size_max) {
dev_err(dev, "header table region size too large\n");
dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
return false;
}
- if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
+
+ /* Make sure the entire combined area fits in IPA memory */
+ if (size > ipa->mem_size || offset > ipa->mem_size - size) {
dev_err(dev, "header table region out of range\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- mem->offset, size, ipa->mem_size);
+ offset, size, ipa->mem_size);
return false;
}
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c
new file mode 100644
index 000000000000..4c28189462a7
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-v3.1.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_HDR_SECTORS,
+ IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v3.1 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL = 0,
+ IPA_RSRC_GROUP_SRC_DL,
+ IPA_RSRC_GROUP_SRC_DIAG,
+ IPA_RSRC_GROUP_SRC_DMA,
+ IPA_RSRC_GROUP_SRC_UNUSED,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL = 0,
+ IPA_RSRC_GROUP_DST_DL,
+ IPA_RSRC_GROUP_DST_DIAG_DPL,
+ IPA_RSRC_GROUP_DST_DMA,
+ IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL,
+ IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v3.1 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 8,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 2,
+ .max_reads = 8,
+ },
+};
+
+/* Endpoint data for an SoC having IPA v3.1 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 22,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 18,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 15,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 3,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 8,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_DL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_LAN_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 4,
+ .endpoint_id = 9,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 5,
+ .endpoint_id = 18,
+ .toward_ipa = false,
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 3, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 3, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 1, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 1, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 2, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDR_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 14, .max = 14,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 16, .max = 16,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 19, .max = 19,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 26, .max = 26,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5, /* 3 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5, /* 7 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 19, .max = 19,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 26, .max = 26,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 3, .max = 3, /* 2 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 3, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 1, .max = 1, /* 0 downstream */
+ },
+ /* IPA_RSRC_GROUP_DST_DMA uses 2 downstream */
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 3, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 1, .max = 1,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v3.1 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x07d0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x09d0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x0bd8,
+ .size = 0x1424,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_END_MARKER,
+ .offset = 0x2000,
+ .size = 0,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v3.1 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146bd000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
+/* Interconnect bandwidths are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 640000, /* 640 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ {
+ .name = "imem",
+ .peak_bandwidth = 640000, /* 640 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next interconnect */
+ {
+ .name = "config",
+ .peak_bandwidth = 80000, /* 80 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v3.1 */
+static const struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 16 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v3.1 */
+const struct ipa_data ipa_data_v3_1 = {
+ .version = IPA_VERSION_3_1,
+ .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
+};
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c
index ead1a82f32f5..af536ef8c120 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/ipa_data-v3.5.1.c
@@ -271,77 +271,92 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v3.5.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0140,
.canary_count = 2,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x07d0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x09d0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x0bd8,
.size = 0x1024,
.canary_count = 0,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x1c00,
.size = 0x0400,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
index 05806ceae8b5..9353efbd504f 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -220,112 +220,134 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.11 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x0cd0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x0ee0,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x1be8,
.size = 0x0050,
.canary_count = 0,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x1c40,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x1c70,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x1cb8,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x1ef0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x1f18,
.size = 0x100c,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_END_MARKER,
.offset = 0x3000,
.size = 0x0000,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c
index 8744f19c6401..3b09b7baa95f 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/ipa_data-v4.2.c
@@ -219,92 +219,110 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.2 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0290,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0310,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0318,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0398,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x03a0,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0420,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0428,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x04a8,
.size = 0x0140,
.canary_count = 2,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x05f0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x07f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x09f8,
.size = 0x0050,
.canary_count = 2,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x0a50,
.size = 0x0060,
.canary_count = 2,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x0ab0,
.size = 0x0140,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x0bf0,
.size = 0x140c,
.canary_count = 0,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_END_MARKER,
.offset = 0x2000,
.size = 0,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index 5f67a3a909ee..a99b6478fa3a 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -265,117 +265,140 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.5 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_FILTER_ROUTE] = {
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
index e41be790f45e..798d43e1eb13 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -263,116 +263,140 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.9 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = { .offset = 0x0288,
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_FILTER_ROUTE] = {
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 5c4c8d72d7d8..5bc244c8f94e 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -300,6 +300,7 @@ struct ipa_data {
const struct ipa_clock_data *clock_data;
};
+extern const struct ipa_data ipa_data_v3_1;
extern const struct ipa_data ipa_data_v3_5_1;
extern const struct ipa_data ipa_data_v4_2;
extern const struct ipa_data ipa_data_v4_5;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index ccc99ad983eb..ab02669bae4e 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -75,8 +75,6 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
-#ifdef IPA_VALIDATE
-
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -88,11 +86,6 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (ipa_gsi_endpoint_data_empty(data))
return true;
- /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */
- if (ipa->version >= IPA_VERSION_4_5)
- if (data->endpoint.config.checksum)
- return false;
-
if (!data->toward_ipa) {
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
@@ -230,27 +223,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return true;
}
-#else /* !IPA_VALIDATE */
-
-static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
- const struct ipa_gsi_endpoint_data *data)
-{
- const struct ipa_gsi_endpoint_data *dp = data;
- enum ipa_endpoint_name name;
-
- if (ipa->version < IPA_VERSION_4_5)
- return true;
-
- /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */
- for (name = 0; name < count; name++, dp++)
- if (data->endpoint.config.checksum)
- return false;
-
- return true;
-}
-
-#endif /* !IPA_VALIDATE */
-
/* Allocate a transaction to use on a non-command endpoint */
static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
u32 tre_count)
@@ -457,28 +429,34 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ enum ipa_cs_offload_en enabled;
u32 val = 0;
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->data->checksum) {
+ enum ipa_version version = endpoint->ipa->version;
+
if (endpoint->toward_ipa) {
u32 checksum_offset;
- val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
- CS_OFFLOAD_EN_FMASK);
/* Checksum header offset is in 4-byte units */
checksum_offset = sizeof(struct rmnet_map_header);
checksum_offset /= sizeof(u32);
val |= u32_encode_bits(checksum_offset,
CS_METADATA_HDR_OFFSET_FMASK);
+
+ enabled = version < IPA_VERSION_4_5
+ ? IPA_CS_OFFLOAD_UL
+ : IPA_CS_OFFLOAD_INLINE;
} else {
- val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
- CS_OFFLOAD_EN_FMASK);
+ enabled = version < IPA_VERSION_4_5
+ ? IPA_CS_OFFLOAD_DL
+ : IPA_CS_OFFLOAD_INLINE;
}
} else {
- val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
- CS_OFFLOAD_EN_FMASK);
+ enabled = IPA_CS_OFFLOAD_NONE;
}
+ val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
/* CS_GEN_QMB_MASTER_SEL is 0 */
iowrite32(val, endpoint->ipa->reg_virt + offset);
@@ -498,6 +476,27 @@ static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+static u32
+ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
+{
+ u32 header_size = sizeof(struct rmnet_map_header);
+
+ /* Without checksum offload, we just have the MAP header */
+ if (!endpoint->data->checksum)
+ return header_size;
+
+ if (version < IPA_VERSION_4_5) {
+ /* Checksum header inserted for AP TX endpoints only */
+ if (endpoint->toward_ipa)
+ header_size += sizeof(struct rmnet_map_ul_csum_header);
+ } else {
+ /* Checksum header is used in both directions */
+ header_size += sizeof(struct rmnet_map_v5_csum_header);
+ }
+
+ return header_size;
+}
+
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
@@ -526,13 +525,11 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
u32 val = 0;
if (endpoint->data->qmap) {
- size_t header_size = sizeof(struct rmnet_map_header);
enum ipa_version version = ipa->version;
+ size_t header_size;
- /* We might supply a checksum header after the QMAP header */
- if (endpoint->toward_ipa && endpoint->data->checksum)
- header_size += sizeof(struct rmnet_map_ul_csum_header);
- val |= ipa_header_size_encoded(version, header_size);
+ header_size = ipa_qmap_header_size(version, endpoint);
+ val = ipa_header_size_encoded(version, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
@@ -1734,6 +1731,21 @@ int ipa_endpoint_config(struct ipa *ipa)
u32 max;
u32 val;
+ /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
+ * Furthermore, the endpoints were not grouped such that TX
+ * endpoint numbers started with 0 and RX endpoints had numbers
+ * higher than all TX endpoints, so we can't do the simple
+ * direction check used for newer hardware below.
+ *
+ * For hardware that doesn't support the FLAVOR_0 register,
+ * just set the available mask to support any endpoint, and
+ * assume the configuration is valid.
+ */
+ if (ipa->version < IPA_VERSION_3_5) {
+ ipa->available = ~0;
+ return 0;
+ }
+
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
*/
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 9915603ed10b..9810c61a0320 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -31,6 +31,7 @@
#include "ipa_uc.h"
#include "ipa_interrupt.h"
#include "gsi_trans.h"
+#include "ipa_sysfs.h"
/**
* DOC: The IP Accelerator
@@ -399,16 +400,20 @@ static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
/* Implement some hardware workarounds */
if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) {
- /* Enable open global clocks (not needed for IPA v4.5) */
- val = GLOBAL_FMASK;
- val |= GLOBAL_2X_CLK_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
-
/* Disable PA mask to allow HOLB drop */
val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
val &= ~PA_MASK_EN_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+
+ /* Enable open global clocks in the CLKON configuration */
+ val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
+ } else if (version == IPA_VERSION_3_1) {
+ val = MISC_FMASK; /* Disable MISC clock gating */
+ } else {
+ val = 0; /* No CLKON configuration needed */
}
+ if (val)
+ iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
ipa_hardware_config_comp(ipa);
@@ -529,6 +534,7 @@ static int ipa_firmware_load(struct device *dev)
}
ret = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (ret) {
dev_err(dev, "error %d getting \"memory-region\" resource\n",
ret);
@@ -573,6 +579,10 @@ out_release_firmware:
static const struct of_device_id ipa_match[] = {
{
+ .compatible = "qcom,msm8998-ipa",
+ .data = &ipa_data_v3_1,
+ },
+ {
.compatible = "qcom,sdm845-ipa",
.data = &ipa_data_v3_5_1,
},
@@ -639,6 +649,27 @@ static void ipa_validate_build(void)
#endif /* IPA_VALIDATE */
}
+static bool ipa_version_valid(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_0:
+ case IPA_VERSION_3_1:
+ case IPA_VERSION_3_5:
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ case IPA_VERSION_4_2:
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_7:
+ case IPA_VERSION_4_9:
+ case IPA_VERSION_4_11:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
/**
* ipa_probe() - IPA platform driver probe function
* @pdev: Platform device pointer
@@ -676,11 +707,15 @@ static int ipa_probe(struct platform_device *pdev)
/* Get configuration data early; needed for clock initialization */
data = of_device_get_match_data(dev);
if (!data) {
- /* This is really IPA_VALIDATE (should never happen) */
dev_err(dev, "matched hardware not supported\n");
return -ENODEV;
}
+ if (!ipa_version_valid(data->version)) {
+ dev_err(dev, "invalid IPA version\n");
+ return -EINVAL;
+ }
+
/* If we need Trust Zone, make sure it's available */
modem_init = of_property_read_bool(dev->of_node, "modem-init");
if (!modem_init)
@@ -881,6 +916,13 @@ static const struct dev_pm_ops ipa_pm_ops = {
.resume = ipa_resume,
};
+static const struct attribute_group *ipa_attribute_groups[] = {
+ &ipa_attribute_group,
+ &ipa_feature_attribute_group,
+ &ipa_modem_attribute_group,
+ NULL,
+};
+
static struct platform_driver ipa_driver = {
.probe = ipa_probe,
.remove = ipa_remove,
@@ -889,6 +931,7 @@ static struct platform_driver ipa_driver = {
.name = "ipa",
.pm = &ipa_pm_ops,
.of_match_table = ipa_match,
+ .dev_groups = ipa_attribute_groups,
},
};
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index c5c3b1b7e67d..4337b0920d3d 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -26,11 +26,26 @@
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
+const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ u32 i;
+
+ for (i = 0; i < ipa->mem_count; i++) {
+ const struct ipa_mem *mem = &ipa->mem[i];
+
+ if (mem->id == mem_id)
+ return mem;
+ }
+
+ return NULL;
+}
+
/* Add an immediate command to a transaction that zeroes a memory region */
static void
-ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
+ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr = ipa->zero_addr;
if (!mem->size)
@@ -60,6 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
+ const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
u16 size;
@@ -74,39 +90,136 @@ int ipa_mem_setup(struct ipa *ipa)
return -EBUSY;
}
- /* Initialize IPA-local header memory. The modem and AP header
- * regions are contiguous, and initialized together.
+ /* Initialize IPA-local header memory. The AP header region, if
+ * present, is contiguous with and follows the modem header region,
+ * and they are initialized together.
*/
- offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
- size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
- size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ offset = mem->offset;
+ size = mem->size;
+ mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
+ if (mem)
+ size += mem->size;
ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
- offset = ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset;
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
+ offset = ipa->mem_offset + mem->offset;
val = proc_cntxt_base_addr_encoded(ipa->version, offset);
iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
return 0;
}
-#ifdef IPA_VALIDATE
+/* Is the given memory region ID is valid for the current IPA version? */
+static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ enum ipa_version version = ipa->version;
+
+ switch (mem_id) {
+ case IPA_MEM_UC_SHARED:
+ case IPA_MEM_UC_INFO:
+ case IPA_MEM_V4_FILTER_HASHED:
+ case IPA_MEM_V4_FILTER:
+ case IPA_MEM_V6_FILTER_HASHED:
+ case IPA_MEM_V6_FILTER:
+ case IPA_MEM_V4_ROUTE_HASHED:
+ case IPA_MEM_V4_ROUTE:
+ case IPA_MEM_V6_ROUTE_HASHED:
+ case IPA_MEM_V6_ROUTE:
+ case IPA_MEM_MODEM_HEADER:
+ case IPA_MEM_AP_HEADER:
+ case IPA_MEM_MODEM_PROC_CTX:
+ case IPA_MEM_AP_PROC_CTX:
+ case IPA_MEM_MODEM:
+ case IPA_MEM_UC_EVENT_RING:
+ case IPA_MEM_PDN_CONFIG:
+ case IPA_MEM_STATS_QUOTA_MODEM:
+ case IPA_MEM_STATS_QUOTA_AP:
+ case IPA_MEM_END_MARKER: /* pseudo region */
+ break;
+
+ case IPA_MEM_STATS_TETHERING:
+ case IPA_MEM_STATS_DROP:
+ if (version < IPA_VERSION_4_0)
+ return false;
+ break;
+
+ case IPA_MEM_STATS_V4_FILTER:
+ case IPA_MEM_STATS_V6_FILTER:
+ case IPA_MEM_STATS_V4_ROUTE:
+ case IPA_MEM_STATS_V6_ROUTE:
+ if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
+ return false;
+ break;
+
+ case IPA_MEM_NAT_TABLE:
+ case IPA_MEM_STATS_FILTER_ROUTE:
+ if (version < IPA_VERSION_4_5)
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/* Must the given memory region be present in the configuration? */
+static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ switch (mem_id) {
+ case IPA_MEM_UC_SHARED:
+ case IPA_MEM_UC_INFO:
+ case IPA_MEM_V4_FILTER_HASHED:
+ case IPA_MEM_V4_FILTER:
+ case IPA_MEM_V6_FILTER_HASHED:
+ case IPA_MEM_V6_FILTER:
+ case IPA_MEM_V4_ROUTE_HASHED:
+ case IPA_MEM_V4_ROUTE:
+ case IPA_MEM_V6_ROUTE_HASHED:
+ case IPA_MEM_V6_ROUTE:
+ case IPA_MEM_MODEM_HEADER:
+ case IPA_MEM_MODEM_PROC_CTX:
+ case IPA_MEM_AP_PROC_CTX:
+ case IPA_MEM_MODEM:
+ return true;
+
+ case IPA_MEM_PDN_CONFIG:
+ case IPA_MEM_STATS_QUOTA_MODEM:
+ case IPA_MEM_STATS_TETHERING:
+ return ipa->version >= IPA_VERSION_4_0;
-static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+ default:
+ return false; /* Anything else is optional */
+ }
+}
+
+static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
{
- const struct ipa_mem *mem = &ipa->mem[mem_id];
struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id = mem->id;
u16 size_multiple;
+ /* Make sure the memory region is valid for this version of IPA */
+ if (!ipa_mem_id_valid(ipa, mem_id)) {
+ dev_err(dev, "region id %u not valid\n", mem_id);
+ return false;
+ }
+
+ if (!mem->size && !mem->canary_count) {
+ dev_err(dev, "empty memory region %u\n", mem_id);
+ return false;
+ }
+
/* Other than modem memory, sizes must be a multiple of 8 */
size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
if (mem->size % size_multiple)
@@ -117,23 +230,74 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
else if (mem->offset < mem->canary_count * sizeof(__le32))
dev_err(dev, "region %u offset too small for %hu canaries\n",
mem_id, mem->canary_count);
- else if (mem->offset + mem->size > ipa->mem_size)
- dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
- mem_id, ipa->mem_size);
+ else if (mem_id == IPA_MEM_END_MARKER && mem->size)
+ dev_err(dev, "non-zero end marker region size\n");
else
return true;
return false;
}
-#else /* !IPA_VALIDATE */
-
-static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+/* Verify each defined memory region is valid. */
+static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
+ DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id;
+ u32 i;
+
+ if (mem_data->local_count > IPA_MEM_COUNT) {
+ dev_err(dev, "too many memory regions (%u > %u)\n",
+ mem_data->local_count, IPA_MEM_COUNT);
+ return false;
+ }
+
+ for (i = 0; i < mem_data->local_count; i++) {
+ const struct ipa_mem *mem = &mem_data->local[i];
+
+ if (__test_and_set_bit(mem->id, regions)) {
+ dev_err(dev, "duplicate memory region %u\n", mem->id);
+ return false;
+ }
+
+ /* Defined regions have non-zero size and/or canary count */
+ if (!ipa_mem_valid_one(ipa, mem))
+ return false;
+ }
+
+ /* Now see if any required regions are not defined */
+ for (mem_id = find_first_zero_bit(regions, IPA_MEM_COUNT);
+ mem_id < IPA_MEM_COUNT;
+ mem_id = find_next_zero_bit(regions, IPA_MEM_COUNT, mem_id + 1)) {
+ if (ipa_mem_id_required(ipa, mem_id))
+ dev_err(dev, "required memory region %u missing\n",
+ mem_id);
+ }
+
return true;
}
-#endif /*! IPA_VALIDATE */
+/* Do all memory regions fit within the IPA local memory? */
+static bool ipa_mem_size_valid(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 limit = ipa->mem_size;
+ u32 i;
+
+ for (i = 0; i < ipa->mem_count; i++) {
+ const struct ipa_mem *mem = &ipa->mem[i];
+
+ if (mem->offset + mem->size <= limit)
+ continue;
+
+ dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
+ mem->id, limit);
+
+ return false;
+ }
+
+ return true;
+}
/**
* ipa_mem_config() - Configure IPA shared memory
@@ -144,11 +308,12 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- enum ipa_mem_id mem_id;
+ const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
void *virt;
u32 val;
+ u32 i;
/* Check the advertised location and size of the shared memory area */
val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
@@ -168,6 +333,10 @@ int ipa_mem_config(struct ipa *ipa)
mem_size);
}
+ /* We know our memory size; make sure regions are all in range */
+ if (!ipa_mem_size_valid(ipa))
+ return -EINVAL;
+
/* Prealloc DMA memory for zeroing regions */
virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
if (!virt)
@@ -176,29 +345,18 @@ int ipa_mem_config(struct ipa *ipa)
ipa->zero_virt = virt;
ipa->zero_size = IPA_MEM_MAX;
- /* Verify each defined memory region is valid, and if indicated
- * for the region, write "canary" values in the space prior to
- * the region's base address.
+ /* For each defined region, write "canary" values in the
+ * space prior to the region's base address if indicated.
*/
- for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
- const struct ipa_mem *mem = &ipa->mem[mem_id];
- u16 canary_count;
+ for (i = 0; i < ipa->mem_count; i++) {
+ u16 canary_count = ipa->mem[i].canary_count;
__le32 *canary;
- /* Validate all regions (even undefined ones) */
- if (!ipa_mem_valid(ipa, mem_id))
- goto err_dma_free;
-
- /* Skip over undefined regions */
- if (!mem->offset && !mem->size)
- continue;
-
- canary_count = mem->canary_count;
if (!canary_count)
continue;
/* Write canary values in the space before the region */
- canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
+ canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
do
*--canary = IPA_MEM_CANARY_VAL;
while (--canary_count);
@@ -212,8 +370,9 @@ int ipa_mem_config(struct ipa *ipa)
if (!ipa_cmd_data_valid(ipa))
goto err_dma_free;
- /* Verify the microcontroller ring alignment (0 is OK too) */
- if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
+ /* Verify the microcontroller ring alignment (if defined) */
+ mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
+ if (mem && mem->offset % 1024) {
dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
goto err_dma_free;
}
@@ -261,11 +420,9 @@ int ipa_mem_zero_modem(struct ipa *ipa)
return -EBUSY;
}
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
@@ -380,7 +537,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
* (in this case, the modem). An allocation from SMEM is persistent
* until the AP reboots; there is no way to free an allocated SMEM
* region. Allocation only reserves the space; to use it you need
- * to "get" a pointer it (this implies no reference counting).
+ * to "get" a pointer it (this does not imply reference counting).
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/
@@ -457,11 +614,12 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
struct resource *res;
int ret;
- if (mem_data->local_count > IPA_MEM_COUNT) {
- dev_err(dev, "to many memory regions (%u > %u)\n",
- mem_data->local_count, IPA_MEM_COUNT);
+ /* Make sure the set of defined memory regions is valid */
+ if (!ipa_mem_valid(ipa, mem_data))
return -EINVAL;
- }
+
+ ipa->mem_count = mem_data->local_count;
+ ipa->mem = mem_data->local;
ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
if (ret) {
@@ -486,9 +644,6 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
ipa->mem_addr = res->start;
ipa->mem_size = resource_size(res);
- /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
- ipa->mem = mem_data->local;
-
ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
if (ret)
goto err_unmap;
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index a422aec69e5d..570bfdd99bff 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -54,37 +54,43 @@ enum ipa_mem_id {
IPA_MEM_V6_ROUTE_HASHED, /* 2 canaries */
IPA_MEM_V6_ROUTE, /* 2 canaries */
IPA_MEM_MODEM_HEADER, /* 2 canaries */
- IPA_MEM_AP_HEADER, /* 0 canaries */
+ IPA_MEM_AP_HEADER, /* 0 canaries, optional */
IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */
IPA_MEM_AP_PROC_CTX, /* 0 canaries */
- IPA_MEM_NAT_TABLE, /* 4 canaries (IPA v4.5 and above) */
- IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_QUOTA_AP, /* 0 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_MODEM, /* 0/2 canaries */
+ IPA_MEM_UC_EVENT_RING, /* 1 canary, optional */
+ IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_QUOTA_AP, /* 0 canaries, optional (IPA v4.0+) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_DROP, /* 0 canaries, optional (IPA v4.0+) */
+ /* The next 5 filter and route statistics regions are optional */
IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
- IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5 and above) */
- IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */
- IPA_MEM_MODEM, /* 0/2 canaries */
- IPA_MEM_UC_EVENT_RING, /* 1 canary */
+ IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5+) */
+ IPA_MEM_NAT_TABLE, /* 4 canaries, optional (IPA v4.5+) */
+ IPA_MEM_END_MARKER, /* 1 canary (not a real region) */
IPA_MEM_COUNT, /* Number of regions (not an index) */
};
/**
* struct ipa_mem - IPA local memory region description
+ * @id: memory region identifier
* @offset: offset in IPA memory space to base of the region
* @size: size in bytes base of the region
* @canary_count: Number of 32-bit "canary" values that precede region
*/
struct ipa_mem {
+ enum ipa_mem_id id;
u32 offset;
u16 size;
u16 canary_count;
};
+const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id);
+
int ipa_mem_config(struct ipa *ipa);
void ipa_mem_deconfig(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 593665efbcf9..4661105ce7ab 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -298,32 +298,32 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.platform_type_valid = 1;
req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
- mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
if (mem->size) {
req.hdr_tbl_info_valid = 1;
req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
}
- mem = &ipa->mem[IPA_MEM_V4_ROUTE];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
- mem = &ipa->mem[IPA_MEM_V6_ROUTE];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
- mem = &ipa->mem[IPA_MEM_V4_FILTER];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
- mem = &ipa->mem[IPA_MEM_V6_FILTER];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER);
req.v6_filter_tbl_start_valid = 1;
req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
- mem = &ipa->mem[IPA_MEM_MODEM];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM);
if (mem->size) {
req.modem_mem_info_valid = 1;
req.modem_mem_info.start = ipa->mem_offset + mem->offset;
@@ -336,7 +336,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* skip_uc_load_valid and skip_uc_load are set above */
- mem = &ipa->mem[IPA_MEM_MODEM_PROC_CTX];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
if (mem->size) {
req.hdr_proc_ctx_tbl_info_valid = 1;
req.hdr_proc_ctx_tbl_info.start =
@@ -347,7 +347,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* Nothing to report for the compression table (zip_tbl_info) */
- mem = &ipa->mem[IPA_MEM_V4_ROUTE_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE_HASHED);
if (mem->size) {
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
@@ -355,7 +355,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
- mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
if (mem->size) {
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
@@ -363,22 +363,21 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
- mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
if (mem->size) {
req.v4_hash_filter_tbl_start_valid = 1;
req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
- mem = &ipa->mem[IPA_MEM_V6_FILTER_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER_HASHED);
if (mem->size) {
req.v6_hash_filter_tbl_start_valid = 1;
req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
- /* None of the stats fields are valid (IPA v4.0 and above) */
-
+ /* The stats fields are only valid for IPA v4.0+ */
if (ipa->version >= IPA_VERSION_4_0) {
- mem = &ipa->mem[IPA_MEM_STATS_QUOTA_MODEM];
+ mem = ipa_mem_find(ipa, IPA_MEM_STATS_QUOTA_MODEM);
if (mem->size) {
req.hw_stats_quota_base_addr_valid = 1;
req.hw_stats_quota_base_addr =
@@ -387,8 +386,9 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.hw_stats_quota_size = ipa->mem_offset + mem->size;
}
- mem = &ipa->mem[IPA_MEM_STATS_DROP];
- if (mem->size) {
+ /* If the DROP stats region is defined, include it */
+ mem = ipa_mem_find(ipa, IPA_MEM_STATS_DROP);
+ if (mem && mem->size) {
req.hw_stats_drop_base_addr_valid = 1;
req.hw_stats_drop_base_addr =
ipa->mem_offset + mem->offset;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 286ea9634c49..b89dec5865a5 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -368,6 +368,7 @@ enum ipa_cs_offload_en {
IPA_CS_OFFLOAD_NONE = 0x0,
IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */
IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */
+ IPA_CS_OFFLOAD_INLINE = 0x1, /* IPA v4.5 (TX and RX) */
};
/* Valid only for TX (IPA consumer) endpoints */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index a5f7a79a1923..cf709df70d28 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -176,11 +176,8 @@ static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
int ret;
ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"%s\" IRQ property\n",
- ret, name);
+ if (ret <= 0)
return ret ? : -EINVAL;
- }
irq = ret;
ret = request_threaded_irq(irq, NULL, handler, 0, name, smp2p);
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
new file mode 100644
index 000000000000..ff61dbdd70d8
--- /dev/null
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "ipa.h"
+#include "ipa_version.h"
+#include "ipa_sysfs.h"
+
+static const char *ipa_version_string(struct ipa *ipa)
+{
+ switch (ipa->version) {
+ case IPA_VERSION_3_0:
+ return "3.0";
+ case IPA_VERSION_3_1:
+ return "3.1";
+ case IPA_VERSION_3_5:
+ return "3.5";
+ case IPA_VERSION_3_5_1:
+ return "3.5.1";
+ case IPA_VERSION_4_0:
+ return "4.0";
+ case IPA_VERSION_4_1:
+ return "4.1";
+ case IPA_VERSION_4_2:
+ return "4.2";
+ case IPA_VERSION_4_5:
+ return "4.5";
+ case IPA_VERSION_4_7:
+ return "4.7";
+ case IPA_VERSION_4_9:
+ return "4.9";
+ case IPA_VERSION_4_11:
+ return "4.11";
+ default:
+ return "0.0"; /* Won't happen (checked at probe time) */
+ }
+}
+
+static ssize_t
+version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_version_string(ipa));
+}
+
+static DEVICE_ATTR_RO(version);
+
+static struct attribute *ipa_attrs[] = {
+ &dev_attr_version.attr,
+ NULL
+};
+
+const struct attribute_group ipa_attribute_group = {
+ .attrs = ipa_attrs,
+};
+
+static const char *ipa_offload_string(struct ipa *ipa)
+{
+ return ipa->version < IPA_VERSION_4_5 ? "MAPv4" : "MAPv5";
+}
+
+static ssize_t rx_offload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+}
+
+static DEVICE_ATTR_RO(rx_offload);
+
+static ssize_t tx_offload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+}
+
+static DEVICE_ATTR_RO(tx_offload);
+
+static struct attribute *ipa_feature_attrs[] = {
+ &dev_attr_rx_offload.attr,
+ &dev_attr_tx_offload.attr,
+ NULL
+};
+
+const struct attribute_group ipa_feature_attribute_group = {
+ .name = "feature",
+ .attrs = ipa_feature_attrs,
+};
+
+static ssize_t
+ipa_endpoint_id_show(struct ipa *ipa, char *buf, enum ipa_endpoint_name name)
+{
+ u32 endpoint_id = ipa->name_map[name]->endpoint_id;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", endpoint_id);
+}
+
+static ssize_t rx_endpoint_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_RX);
+}
+
+static DEVICE_ATTR_RO(rx_endpoint_id);
+
+static ssize_t tx_endpoint_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_TX);
+}
+
+static DEVICE_ATTR_RO(tx_endpoint_id);
+
+static struct attribute *ipa_modem_attrs[] = {
+ &dev_attr_rx_endpoint_id.attr,
+ &dev_attr_tx_endpoint_id.attr,
+ NULL
+};
+
+const struct attribute_group ipa_modem_attribute_group = {
+ .name = "modem",
+ .attrs = ipa_modem_attrs,
+};
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
new file mode 100644
index 000000000000..b34e5650bf8c
--- /dev/null
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+#ifndef _IPA_SYSFS_H_
+#define _IPA_SYSFS_H_
+
+struct attribute_group;
+
+extern const struct attribute_group ipa_attribute_group;
+extern const struct attribute_group ipa_feature_attribute_group;
+extern const struct attribute_group ipa_modem_attribute_group;
+
+#endif /* _IPA_SYSFS_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 3168d72f4245..c617a9156f26 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -150,29 +150,16 @@ static void ipa_table_validate_build(void)
}
static bool
-ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
+ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
{
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
struct device *dev = &ipa->pdev->dev;
- const struct ipa_mem *mem;
u32 size;
- if (route) {
- if (ipv6)
- mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]
- : &ipa->mem[IPA_MEM_V6_ROUTE];
- else
- mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
- : &ipa->mem[IPA_MEM_V4_ROUTE];
+ if (route)
size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
- } else {
- if (ipv6)
- mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
- : &ipa->mem[IPA_MEM_V6_FILTER];
- else
- mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
- : &ipa->mem[IPA_MEM_V4_FILTER];
+ else
size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
- }
if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
return false;
@@ -185,9 +172,8 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
if (hashed && !mem->size)
return true;
- dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n",
- ipv6 ? '6' : '4', hashed ? "hashed " : "",
- route ? "route" : "filter", mem->size, size);
+ dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
+ route ? "route" : "filter", mem_id, mem->size, size);
return false;
}
@@ -195,16 +181,16 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
/* Verify the filter and route table memory regions are the expected size */
bool ipa_table_valid(struct ipa *ipa)
{
- bool valid = true;
+ bool valid;
- valid = valid && ipa_table_valid_one(ipa, false, false, false);
- valid = valid && ipa_table_valid_one(ipa, false, false, true);
- valid = valid && ipa_table_valid_one(ipa, false, true, false);
- valid = valid && ipa_table_valid_one(ipa, false, true, true);
- valid = valid && ipa_table_valid_one(ipa, true, false, false);
- valid = valid && ipa_table_valid_one(ipa, true, false, true);
- valid = valid && ipa_table_valid_one(ipa, true, true, false);
- valid = valid && ipa_table_valid_one(ipa, true, true, true);
+ valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true);
return valid;
}
@@ -256,14 +242,15 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
}
static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
- u16 first, u16 count, const struct ipa_mem *mem)
+ u16 first, u16 count, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr;
u32 offset;
u16 size;
- /* Nothing to do if the table memory regions is empty */
+ /* Nothing to do if the table memory region is empty */
if (!mem->size)
return;
@@ -282,16 +269,13 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
* for the IPv4 and IPv6 non-hashed and hashed filter tables.
*/
static int
-ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
+ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
{
u32 ep_mask = ipa->filter_map;
u32 count = hweight32(ep_mask);
struct gsi_trans *trans;
enum gsi_ee_id ee_id;
- if (!mem->size)
- return 0;
-
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -311,7 +295,7 @@ ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
if (endpoint->ee_id != ee_id)
continue;
- ipa_table_reset_add(trans, true, endpoint_id, 1, mem);
+ ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id);
}
gsi_trans_commit_wait(trans);
@@ -327,20 +311,18 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
{
int ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED],
- modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED],
- modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
return ret;
}
@@ -371,15 +353,13 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
count = IPA_ROUTE_AP_COUNT;
}
+ ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V4_ROUTE]);
- ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+ IPA_MEM_V4_ROUTE_HASHED);
+ ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V6_ROUTE]);
- ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+ IPA_MEM_V6_ROUTE_HASHED);
gsi_trans_commit_wait(trans);
@@ -433,10 +413,12 @@ int ipa_table_hash_flush(struct ipa *ipa)
static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
enum ipa_cmd_opcode opcode,
- const struct ipa_mem *mem,
- const struct ipa_mem *hash_mem)
+ enum ipa_mem_id mem_id,
+ enum ipa_mem_id hash_mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t hash_addr;
dma_addr_t addr;
u16 hash_count;
@@ -477,20 +459,16 @@ int ipa_table_setup(struct ipa *ipa)
}
ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
- &ipa->mem[IPA_MEM_V4_ROUTE],
- &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+ IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED);
ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
- &ipa->mem[IPA_MEM_V6_ROUTE],
- &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+ IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
- &ipa->mem[IPA_MEM_V4_FILTER],
- &ipa->mem[IPA_MEM_V4_FILTER_HASHED]);
+ IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
- &ipa->mem[IPA_MEM_V6_FILTER],
- &ipa->mem[IPA_MEM_V6_FILTER_HASHED]);
+ IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED);
gsi_trans_commit_wait(trans);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 2756363e6938..fd9219863234 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -116,7 +116,8 @@ enum ipa_uc_event {
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
{
- u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset;
+ const struct ipa_mem *mem = ipa_mem_find(ipa, IPA_MEM_UC_SHARED);
+ u32 offset = ipa->mem_offset + mem->offset;
return ipa->mem_virt + offset;
}
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index ee2b3d02f3cd..6c16c895d842 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -21,6 +21,8 @@
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
*
* Defines the version of IPA (and GSI) hardware present on the platform.
+ * Please update ipa_version_valid() and ipa_version_string() whenever a
+ * new version is added.
*/
enum ipa_version {
IPA_VERSION_3_0,
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 92425e1fd70c..93dc48b9b4f2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1819,7 +1819,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.rx_sa = rx_sa;
ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
- MACSEC_KEYID_LEN);
+ secy->key_len);
err = macsec_offload(ops->mdo_add_rxsa, &ctx);
if (err)
@@ -2061,7 +2061,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.tx_sa = tx_sa;
ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
- MACSEC_KEYID_LEN);
+ secy->key_len);
err = macsec_offload(ops->mdo_add_txsa, &ctx);
if (err)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1b998aa481f8..80de9768ecd4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1781,7 +1781,7 @@ static int macvlan_device_event(struct notifier_block *unused,
unregister_netdevice_many(&list_kill);
break;
case NETDEV_PRE_TYPE_CHANGE:
- /* Forbid underlaying device to change its type. */
+ /* Forbid underlying device to change its type. */
return NOTIFY_BAD;
case NETDEV_NOTIFY_PEERS:
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index d06e06f5e31a..99a6c13a11af 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -19,6 +19,13 @@ config MDIO_BUS
reflects whether the mdio_bus/mdio_device code is built as a
loadable module or built-in.
+config FWNODE_MDIO
+ def_tristate PHYLIB
+ depends on (ACPI || OF) || COMPILE_TEST
+ select FIXED_PHY
+ help
+ FWNODE MDIO bus (Ethernet PHY) accessors
+
config OF_MDIO
def_tristate PHYLIB
depends on OF
@@ -27,6 +34,13 @@ config OF_MDIO
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
+config ACPI_MDIO
+ def_tristate PHYLIB
+ depends on ACPI
+ depends on PHYLIB
+ help
+ ACPI MDIO bus (Ethernet PHY) accessors
+
if MDIO_BUS
config MDIO_DEVRES
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index c3ec0ef989df..15f8dc4042ce 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux MDIO bus drivers
-obj-$(CONFIG_OF_MDIO) += of_mdio.o
+obj-$(CONFIG_ACPI_MDIO) += acpi_mdio.o
+obj-$(CONFIG_FWNODE_MDIO) += fwnode_mdio.o
+obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c
new file mode 100644
index 000000000000..d77c987fda9c
--- /dev/null
+++ b/drivers/net/mdio/acpi_mdio.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ACPI helpers for the MDIO (Ethernet PHY) API
+ *
+ * This file provides helper functions for extracting PHY device information
+ * out of the ACPI ASL and using it to populate an mii_bus.
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/fwnode_mdio.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
+MODULE_LICENSE("GPL");
+
+/**
+ * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+ * @mdio: pointer to mii_bus structure
+ * @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent
+ * an ACPI device object corresponding to the MDIO bus and its children are
+ * expected to correspond to the PHY devices on that bus.
+ *
+ * This function registers the mii_bus structure and registers a phy_device
+ * for each child node of @fwnode.
+ */
+int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child;
+ u32 addr;
+ int ret;
+
+ /* Mask out all PHYs from auto probing. */
+ mdio->phy_mask = GENMASK(31, 0);
+ ret = mdiobus_register(mdio);
+ if (ret)
+ return ret;
+
+ ACPI_COMPANION_SET(&mdio->dev, to_acpi_device_node(fwnode));
+
+ /* Loop over the child nodes and register a phy_device for each PHY */
+ fwnode_for_each_child_node(fwnode, child) {
+ ret = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &addr);
+ if (ret || addr >= PHY_MAX_ADDR)
+ continue;
+
+ ret = fwnode_mdiobus_register_phy(mdio, child, addr);
+ if (ret == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(acpi_mdiobus_register);
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
new file mode 100644
index 000000000000..1becb1a731f6
--- /dev/null
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * fwnode helpers for the MDIO (Ethernet PHY) API
+ *
+ * This file provides helper functions for extracting PHY device information
+ * out of the fwnode and using it to populate an mii_bus.
+ */
+
+#include <linux/acpi.h>
+#include <linux/fwnode_mdio.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
+MODULE_LICENSE("GPL");
+
+static struct mii_timestamper *
+fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
+{
+ struct of_phandle_args arg;
+ int err;
+
+ if (is_acpi_node(fwnode))
+ return NULL;
+
+ err = of_parse_phandle_with_fixed_args(to_of_node(fwnode),
+ "timestamper", 1, 0, &arg);
+ if (err == -ENOENT)
+ return NULL;
+ else if (err)
+ return ERR_PTR(err);
+
+ if (arg.args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ return register_mii_timestamper(arg.np, arg.args[0]);
+}
+
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr)
+{
+ int rc;
+
+ rc = fwnode_irq_get(child, 0);
+ if (rc == -EPROBE_DEFER)
+ return rc;
+
+ if (rc > 0) {
+ phy->irq = rc;
+ mdio->irq[addr] = rc;
+ } else {
+ phy->irq = mdio->irq[addr];
+ }
+
+ if (fwnode_property_read_bool(child, "broken-turn-around"))
+ mdio->phy_ignore_ta_mask |= 1 << addr;
+
+ fwnode_property_read_u32(child, "reset-assert-us",
+ &phy->mdio.reset_assert_delay);
+ fwnode_property_read_u32(child, "reset-deassert-us",
+ &phy->mdio.reset_deassert_delay);
+
+ /* Associate the fwnode with the device structure so it
+ * can be looked up later
+ */
+ fwnode_handle_get(child);
+ device_set_node(&phy->mdio.dev, child);
+
+ /* All data is now stored in the phy struct;
+ * register it
+ */
+ rc = phy_device_register(phy);
+ if (rc) {
+ fwnode_handle_put(child);
+ return rc;
+ }
+
+ dev_dbg(&mdio->dev, "registered phy %p fwnode at address %i\n",
+ child, addr);
+ return 0;
+}
+EXPORT_SYMBOL(fwnode_mdiobus_phy_device_register);
+
+int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child, u32 addr)
+{
+ struct mii_timestamper *mii_ts = NULL;
+ struct phy_device *phy;
+ bool is_c45 = false;
+ u32 phy_id;
+ int rc;
+
+ mii_ts = fwnode_find_mii_timestamper(child);
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
+
+ rc = fwnode_property_match_string(child, "compatible",
+ "ethernet-phy-ieee802.3-c45");
+ if (rc >= 0)
+ is_c45 = true;
+
+ if (is_c45 || fwnode_get_phy_id(child, &phy_id))
+ phy = get_phy_device(bus, addr, is_c45);
+ else
+ phy = phy_device_create(bus, addr, phy_id, 0, NULL);
+ if (IS_ERR(phy)) {
+ unregister_mii_timestamper(mii_ts);
+ return PTR_ERR(phy);
+ }
+
+ if (is_acpi_node(child)) {
+ phy->irq = bus->irq[addr];
+
+ /* Associate the fwnode with the device structure so it
+ * can be looked up later.
+ */
+ phy->mdio.dev.fwnode = child;
+
+ /* All data is now stored in the phy struct, so register it */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ fwnode_handle_put(phy->mdio.dev.fwnode);
+ return rc;
+ }
+ } else if (is_of_node(child)) {
+ rc = fwnode_mdiobus_phy_device_register(bus, phy, child, addr);
+ if (rc) {
+ unregister_mii_timestamper(mii_ts);
+ phy_device_free(phy);
+ return rc;
+ }
+ }
+
+ /* phy->mii_ts may already be defined by the PHY driver. A
+ * mii_timestamper probed via the device tree will still have
+ * precedence.
+ */
+ if (mii_ts)
+ phy->mii_ts = mii_ts;
+ return 0;
+}
+EXPORT_SYMBOL(fwnode_mdiobus_register_phy);
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 5d171e7f118d..bfc9be23c973 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -203,7 +203,7 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
return;
}
- /* The MDIO clock is the reference clock (typicaly 250Mhz) divided by
+ /* The MDIO clock is the reference clock (typically 250Mhz) divided by
* 2 x (MDIO_CLK_DIV + 1)
*/
reg = unimac_mdio_readl(priv, MDIO_CFG);
diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index 8fe8f0119fc1..bd1aea2d5a26 100644
--- a/drivers/net/mdio/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
@@ -7,33 +7,33 @@
#include <linux/delay.h>
#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
-#include <linux/phy.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* MII address register definitions */
-#define MII_ADDR_REG_ADDR 0x10
-#define MII_BUSY BIT(0)
-#define MII_WRITE BIT(1)
-#define MII_CLKRANGE_60_100M (0 << 2)
-#define MII_CLKRANGE_100_150M (1 << 2)
-#define MII_CLKRANGE_20_35M (2 << 2)
-#define MII_CLKRANGE_35_60M (3 << 2)
-#define MII_CLKRANGE_150_250M (4 << 2)
-#define MII_CLKRANGE_250_300M (5 << 2)
+#define MII_ADDR_REG_ADDR 0x10
+#define MII_BUSY BIT(0)
+#define MII_WRITE BIT(1)
+#define MII_CLKRANGE(x) ((x) << 2)
+#define MII_CLKRANGE_60_100M MII_CLKRANGE(0)
+#define MII_CLKRANGE_100_150M MII_CLKRANGE(1)
+#define MII_CLKRANGE_20_35M MII_CLKRANGE(2)
+#define MII_CLKRANGE_35_60M MII_CLKRANGE(3)
+#define MII_CLKRANGE_150_250M MII_CLKRANGE(4)
+#define MII_CLKRANGE_250_300M MII_CLKRANGE(5)
#define MII_CLKRANGE_MASK GENMASK(4, 2)
#define MII_REG_SHIFT 6
#define MII_REG_MASK GENMASK(10, 6)
#define MII_ADDR_SHIFT 11
#define MII_ADDR_MASK GENMASK(15, 11)
-#define MII_DATA_REG_ADDR 0x14
+#define MII_DATA_REG_ADDR 0x14
-#define MII_MDIO_DELAY_USEC (1000)
-#define MII_MDIO_RETRY_MSEC (10)
+#define MII_MDIO_DELAY_USEC (1000)
+#define MII_MDIO_RETRY_MSEC (10)
struct ipq8064_mdio {
struct regmap *base; /* NSS_GMAC0_BASE */
@@ -65,7 +65,7 @@ ipq8064_mdio_read(struct mii_bus *bus, int phy_addr, int reg_offset)
((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr);
- usleep_range(8, 10);
+ usleep_range(10, 13);
err = ipq8064_mdio_wait_busy(priv);
if (err)
@@ -91,19 +91,46 @@ ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data)
((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr);
- usleep_range(8, 10);
+
+ /* For the specific reg 31 extra time is needed or the next
+ * read will produce garbage data.
+ */
+ if (reg_offset == 31)
+ usleep_range(30, 43);
+ else
+ usleep_range(10, 13);
return ipq8064_mdio_wait_busy(priv);
}
+static const struct regmap_config ipq8064_mdio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .can_multi_write = false,
+ /* the mdio lock is used by any user of this mdio driver */
+ .disable_locking = true,
+
+ .cache_type = REGCACHE_NONE,
+};
+
static int
ipq8064_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ipq8064_mdio *priv;
+ struct resource res;
struct mii_bus *bus;
+ void __iomem *base;
int ret;
+ if (of_address_to_resource(np, 0, &res))
+ return -ENOMEM;
+
+ base = ioremap(res.start, resource_size(&res));
+ if (!base)
+ return -ENOMEM;
+
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
if (!bus)
return -ENOMEM;
@@ -115,15 +142,10 @@ ipq8064_mdio_probe(struct platform_device *pdev)
bus->parent = &pdev->dev;
priv = bus->priv;
- priv->base = device_node_to_regmap(np);
- if (IS_ERR(priv->base)) {
- if (priv->base == ERR_PTR(-EPROBE_DEFER))
- return -EPROBE_DEFER;
-
- dev_err(&pdev->dev, "error getting device regmap, error=%pe\n",
- priv->base);
+ priv->base = devm_regmap_init_mmio(&pdev->dev, base,
+ &ipq8064_mdio_regmap_config);
+ if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- }
ret = of_mdiobus_register(bus, np);
if (ret)
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index b36e5ea04ddf..2d67e12c8262 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -139,10 +139,6 @@ static int mscc_miim_probe(struct platform_device *pdev)
struct mscc_miim_dev *dev;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));
if (!bus)
return -ENOMEM;
@@ -155,7 +151,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
bus->parent = &pdev->dev;
dev = bus->priv;
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(dev->regs)) {
dev_err(&pdev->dev, "Unable to map MIIM registers\n");
return PTR_ERR(dev->regs);
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 03261e6b9ceb..014c0baedbd2 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -65,7 +65,7 @@ static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md)
writel(val, md->base + MDIO_SCAN_CTRL_OFFSET);
if (md->core_clk) {
- /* use rate adjust regs to derrive the mdio's operating
+ /* use rate adjust regs to derive the mdio's operating
* frequency from the specified core clock
*/
divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY;
@@ -187,7 +187,9 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
return -ENOMEM;
md->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ md->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(md->base))
+ return PTR_ERR(md->base);
if (res->start & 0xfff) {
/* For backward compatibility in case the
* base address is specified with an offset.
@@ -196,9 +198,6 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
res->start &= ~0xfff;
res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
}
- md->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(md->base))
- return PTR_ERR(md->base);
md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
if (!md->mii_bus) {
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index bf86c9c7a288..b8866bc3f2e8 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -95,7 +95,7 @@ static int g12a_ephy_pll_enable(struct clk_hw *hw)
/* Poll on the digital lock instead of the usual analog lock
* This is done because bit 31 is unreliable on some SoC. Bit
- * 31 may indicate that the PLL is not lock eventhough the clock
+ * 31 may indicate that the PLL is not lock even though the clock
* is actually running
*/
return readl_poll_timeout(pll->base + ETH_PLL_CTL0, val,
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 8ce99c4888e1..e096e68ac667 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -71,7 +71,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
return 0;
fail_register:
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return err;
@@ -85,7 +84,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return 0;
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index cb1761693b69..822d2cdd2f35 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -126,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
continue;
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
oct_mdio_writeq(0, bus->register_base + SMI_EN);
}
pci_release_regions(pdev);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 094494a68ddf..9e3c815a070f 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/fwnode_mdio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -29,128 +30,28 @@ MODULE_LICENSE("GPL");
* ethernet-phy-idAAAA.BBBB */
static int of_get_phy_id(struct device_node *device, u32 *phy_id)
{
- struct property *prop;
- const char *cp;
- unsigned int upper, lower;
-
- of_property_for_each_string(device, "compatible", prop, cp) {
- if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) == 2) {
- *phy_id = ((upper & 0xFFFF) << 16) | (lower & 0xFFFF);
- return 0;
- }
- }
- return -EINVAL;
-}
-
-static struct mii_timestamper *of_find_mii_timestamper(struct device_node *node)
-{
- struct of_phandle_args arg;
- int err;
-
- err = of_parse_phandle_with_fixed_args(node, "timestamper", 1, 0, &arg);
-
- if (err == -ENOENT)
- return NULL;
- else if (err)
- return ERR_PTR(err);
-
- if (arg.args_count != 1)
- return ERR_PTR(-EINVAL);
-
- return register_mii_timestamper(arg.np, arg.args[0]);
+ return fwnode_get_phy_id(of_fwnode_handle(device), phy_id);
}
int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy,
- struct device_node *child, u32 addr)
+ struct device_node *child, u32 addr)
{
- int rc;
-
- rc = of_irq_get(child, 0);
- if (rc == -EPROBE_DEFER)
- return rc;
-
- if (rc > 0) {
- phy->irq = rc;
- mdio->irq[addr] = rc;
- } else {
- phy->irq = mdio->irq[addr];
- }
-
- if (of_property_read_bool(child, "broken-turn-around"))
- mdio->phy_ignore_ta_mask |= 1 << addr;
-
- of_property_read_u32(child, "reset-assert-us",
- &phy->mdio.reset_assert_delay);
- of_property_read_u32(child, "reset-deassert-us",
- &phy->mdio.reset_deassert_delay);
-
- /* Associate the OF node with the device structure so it
- * can be looked up later */
- of_node_get(child);
- phy->mdio.dev.of_node = child;
- phy->mdio.dev.fwnode = of_fwnode_handle(child);
-
- /* All data is now stored in the phy struct;
- * register it */
- rc = phy_device_register(phy);
- if (rc) {
- of_node_put(child);
- return rc;
- }
-
- dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
- child, addr);
- return 0;
+ return fwnode_mdiobus_phy_device_register(mdio, phy,
+ of_fwnode_handle(child),
+ addr);
}
EXPORT_SYMBOL(of_mdiobus_phy_device_register);
static int of_mdiobus_register_phy(struct mii_bus *mdio,
struct device_node *child, u32 addr)
{
- struct mii_timestamper *mii_ts;
- struct phy_device *phy;
- bool is_c45;
- int rc;
- u32 phy_id;
-
- mii_ts = of_find_mii_timestamper(child);
- if (IS_ERR(mii_ts))
- return PTR_ERR(mii_ts);
-
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
-
- if (!is_c45 && !of_get_phy_id(child, &phy_id))
- phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
- else
- phy = get_phy_device(mdio, addr, is_c45);
- if (IS_ERR(phy)) {
- if (mii_ts)
- unregister_mii_timestamper(mii_ts);
- return PTR_ERR(phy);
- }
-
- rc = of_mdiobus_phy_device_register(mdio, phy, child, addr);
- if (rc) {
- if (mii_ts)
- unregister_mii_timestamper(mii_ts);
- phy_device_free(phy);
- return rc;
- }
-
- /* phy->mii_ts may already be defined by the PHY driver. A
- * mii_timestamper probed via the device tree will still have
- * precedence.
- */
- if (mii_ts)
- phy->mii_ts = mii_ts;
-
- return 0;
+ return fwnode_mdiobus_register_phy(mdio, of_fwnode_handle(child), addr);
}
static int of_mdiobus_register_device(struct mii_bus *mdio,
struct device_node *child, u32 addr)
{
+ struct fwnode_handle *fwnode = of_fwnode_handle(child);
struct mdio_device *mdiodev;
int rc;
@@ -161,9 +62,8 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
/* Associate the OF node with the device structure so it
* can be looked up later.
*/
- of_node_get(child);
- mdiodev->dev.of_node = child;
- mdiodev->dev.fwnode = of_fwnode_handle(child);
+ fwnode_handle_get(fwnode);
+ device_set_node(&mdiodev->dev, fwnode);
/* All data is now stored in the mdiodev struct; register it. */
rc = mdio_device_register(mdiodev);
@@ -262,8 +162,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
* the device tree are populated after the bus has been registered */
mdio->phy_mask = ~0;
- mdio->dev.of_node = np;
- mdio->dev.fwnode = of_fwnode_handle(np);
+ device_set_node(&mdio->dev, of_fwnode_handle(np));
/* Get bus level PHY reset GPIO details */
mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
@@ -347,16 +246,7 @@ EXPORT_SYMBOL(of_mdiobus_register);
*/
struct mdio_device *of_mdio_find_device(struct device_node *np)
{
- struct device *d;
-
- if (!np)
- return NULL;
-
- d = bus_find_device_by_of_node(&mdio_bus_type, np);
- if (!d)
- return NULL;
-
- return to_mdio_device(d);
+ return fwnode_mdio_find_device(of_fwnode_handle(np));
}
EXPORT_SYMBOL(of_mdio_find_device);
@@ -369,18 +259,7 @@ EXPORT_SYMBOL(of_mdio_find_device);
*/
struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
- struct mdio_device *mdiodev;
-
- mdiodev = of_mdio_find_device(phy_np);
- if (!mdiodev)
- return NULL;
-
- if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
- return to_phy_device(&mdiodev->dev);
-
- put_device(&mdiodev->dev);
-
- return NULL;
+ return fwnode_phy_find_device(of_fwnode_handle(phy_np));
}
EXPORT_SYMBOL(of_phy_find_device);
@@ -466,7 +345,7 @@ EXPORT_SYMBOL(of_phy_get_and_connect);
* of_phy_is_fixed_link() and of_phy_register_fixed_link() must
* support two DT bindings:
* - the old DT binding, where 'fixed-link' was a property with 5
- * cells encoding various informations about the fixed PHY
+ * cells encoding various information about the fixed PHY
* - the new DT binding, where 'fixed-link' is a sub-node of the
* Ethernet device.
*/
diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
index 0d8293a47a56..e60e38c1f09d 100644
--- a/drivers/net/mhi/net.c
+++ b/drivers/net/mhi/net.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/u64_stats_sync.h>
+#include <linux/wwan.h>
#include "mhi.h"
@@ -18,6 +19,12 @@
#define MHI_NET_MAX_MTU 0xffff
#define MHI_NET_DEFAULT_MTU 0x4000
+/* When set to false, the default netdev (link 0) is not created, and it's up
+ * to user to create the link (via wwan rtnetlink).
+ */
+static bool create_default_iface = true;
+module_param(create_default_iface, bool, 0);
+
struct mhi_device_info {
const char *netname;
const struct mhi_net_proto *proto;
@@ -25,7 +32,7 @@ struct mhi_device_info {
static int mhi_ndo_open(struct net_device *ndev)
{
- struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
/* Feed the rx buffer pool */
schedule_delayed_work(&mhi_netdev->rx_refill, 0);
@@ -40,7 +47,7 @@ static int mhi_ndo_open(struct net_device *ndev)
static int mhi_ndo_stop(struct net_device *ndev)
{
- struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
netif_stop_queue(ndev);
netif_carrier_off(ndev);
@@ -49,9 +56,9 @@ static int mhi_ndo_stop(struct net_device *ndev)
return 0;
}
-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
const struct mhi_net_proto *proto = mhi_netdev->proto;
struct mhi_device *mdev = mhi_netdev->mdev;
int err;
@@ -86,7 +93,7 @@ exit_drop:
static void mhi_ndo_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
{
- struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
unsigned int start;
do {
@@ -295,32 +302,33 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
}
-static struct device_type wwan_type = {
- .name = "wwan",
-};
-
-static int mhi_net_probe(struct mhi_device *mhi_dev,
- const struct mhi_device_id *id)
+static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
+ struct netlink_ext_ack *extack)
{
- const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
- struct device *dev = &mhi_dev->dev;
+ const struct mhi_device_info *info;
+ struct mhi_device *mhi_dev = ctxt;
struct mhi_net_dev *mhi_netdev;
- struct net_device *ndev;
int err;
- ndev = alloc_netdev(sizeof(*mhi_netdev), info->netname,
- NET_NAME_PREDICTABLE, mhi_net_setup);
- if (!ndev)
- return -ENOMEM;
+ info = (struct mhi_device_info *)mhi_dev->id->driver_data;
+
+ /* For now we only support one link (link context 0), driver must be
+ * reworked to break 1:1 relationship for net MBIM and to forward setup
+ * call to rmnet(QMAP) otherwise.
+ */
+ if (if_id != 0)
+ return -EINVAL;
+
+ if (dev_get_drvdata(&mhi_dev->dev))
+ return -EBUSY;
+
+ mhi_netdev = wwan_netdev_drvpriv(ndev);
- mhi_netdev = netdev_priv(ndev);
- dev_set_drvdata(dev, mhi_netdev);
+ dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
mhi_netdev->ndev = ndev;
mhi_netdev->mdev = mhi_dev;
mhi_netdev->skbagg_head = NULL;
mhi_netdev->proto = info->proto;
- SET_NETDEV_DEV(ndev, &mhi_dev->dev);
- SET_NETDEV_DEVTYPE(ndev, &wwan_type);
INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
u64_stats_init(&mhi_netdev->stats.rx_syncp);
@@ -334,7 +342,10 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
/* Number of transfer descriptors determines size of the queue */
mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- err = register_netdev(ndev);
+ if (extack)
+ err = register_netdevice(ndev);
+ else
+ err = register_netdev(ndev);
if (err)
goto out_err;
@@ -347,23 +358,89 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
return 0;
out_err_proto:
- unregister_netdev(ndev);
+ unregister_netdevice(ndev);
out_err:
free_netdev(ndev);
return err;
}
-static void mhi_net_remove(struct mhi_device *mhi_dev)
+static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
+ struct list_head *head)
{
- struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
+ struct mhi_device *mhi_dev = ctxt;
- unregister_netdev(mhi_netdev->ndev);
+ if (head)
+ unregister_netdevice_queue(ndev, head);
+ else
+ unregister_netdev(ndev);
- mhi_unprepare_from_transfer(mhi_netdev->mdev);
+ mhi_unprepare_from_transfer(mhi_dev);
kfree_skb(mhi_netdev->skbagg_head);
- free_netdev(mhi_netdev->ndev);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static const struct wwan_ops mhi_wwan_ops = {
+ .priv_size = sizeof(struct mhi_net_dev),
+ .setup = mhi_net_setup,
+ .newlink = mhi_net_newlink,
+ .dellink = mhi_net_dellink,
+};
+
+static int mhi_net_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+ struct net_device *ndev;
+ int err;
+
+ err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev,
+ WWAN_NO_DEFAULT_LINK);
+ if (err)
+ return err;
+
+ if (!create_default_iface)
+ return 0;
+
+ /* Create a default interface which is used as either RMNET real-dev,
+ * MBIM link 0 or ip link 0)
+ */
+ ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname,
+ NET_NAME_PREDICTABLE, mhi_net_setup);
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_unregister;
+ }
+
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ err = mhi_net_newlink(mhi_dev, ndev, 0, NULL);
+ if (err)
+ goto err_release;
+
+ return 0;
+
+err_release:
+ free_netdev(ndev);
+err_unregister:
+ wwan_unregister_ops(&cntrl->mhi_dev->dev);
+
+ return err;
+}
+
+static void mhi_net_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+
+ /* WWAN core takes care of removing remaining links */
+ wwan_unregister_ops(&cntrl->mhi_dev->dev);
+
+ if (create_default_iface)
+ mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL);
}
static const struct mhi_device_info mhi_hwip0 = {
diff --git a/drivers/net/mhi/proto_mbim.c b/drivers/net/mhi/proto_mbim.c
index fc72b3f6ec9e..bf1ad863237d 100644
--- a/drivers/net/mhi/proto_mbim.c
+++ b/drivers/net/mhi/proto_mbim.c
@@ -16,6 +16,7 @@
#include <linux/ip.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
+#include <linux/wwan.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
@@ -56,7 +57,7 @@ static void __mbim_errors_inc(struct mhi_net_dev *dev)
static int mbim_rx_verify_nth16(struct sk_buff *skb)
{
- struct mhi_net_dev *dev = netdev_priv(skb->dev);
+ struct mhi_net_dev *dev = wwan_netdev_drvpriv(skb->dev);
struct mbim_context *ctx = dev->proto_data;
struct usb_cdc_ncm_nth16 *nth16;
int len;
@@ -102,7 +103,7 @@ static int mbim_rx_verify_nth16(struct sk_buff *skb)
static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
{
- struct mhi_net_dev *dev = netdev_priv(skb->dev);
+ struct mhi_net_dev *dev = wwan_netdev_drvpriv(skb->dev);
int ret;
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index e71ebb933266..779c3a96dba7 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -81,7 +81,7 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR);
if (mii->supports_gmii) {
- ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
+ ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 0e9511661601..ccec29970d5b 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -27,21 +27,34 @@ static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
unsigned int num_vfs)
{
- nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
- sizeof(struct nsim_vf_config),
- GFP_KERNEL | __GFP_NOWARN);
+ struct nsim_dev *nsim_dev;
+ int err = 0;
+
+ if (nsim_bus_dev->max_vfs < num_vfs)
+ return -ENOMEM;
+
if (!nsim_bus_dev->vfconfigs)
return -ENOMEM;
nsim_bus_dev->num_vfs = num_vfs;
- return 0;
+ nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ if (nsim_esw_mode_is_switchdev(nsim_dev)) {
+ err = nsim_esw_switchdev_enable(nsim_dev, NULL);
+ if (err)
+ nsim_bus_dev->num_vfs = 0;
+ }
+
+ return err;
}
-static void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev)
+void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev)
{
- kfree(nsim_bus_dev->vfconfigs);
- nsim_bus_dev->vfconfigs = NULL;
+ struct nsim_dev *nsim_dev;
+
nsim_bus_dev->num_vfs = 0;
+ nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ if (nsim_esw_mode_is_switchdev(nsim_dev))
+ nsim_esw_legacy_enable(nsim_dev, NULL);
}
static ssize_t
@@ -56,7 +69,7 @@ nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- rtnl_lock();
+ mutex_lock(&nsim_bus_dev->vfs_lock);
if (nsim_bus_dev->num_vfs == num_vfs)
goto exit_good;
if (nsim_bus_dev->num_vfs && num_vfs) {
@@ -74,7 +87,7 @@ nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
exit_good:
ret = count;
exit_unlock:
- rtnl_unlock();
+ mutex_unlock(&nsim_bus_dev->vfs_lock);
return ret;
}
@@ -92,6 +105,79 @@ static struct device_attribute nsim_bus_dev_numvfs_attr =
__ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show,
nsim_bus_dev_numvfs_store);
+ssize_t nsim_bus_dev_max_vfs_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_bus_dev *nsim_bus_dev = file->private_data;
+ char buf[11];
+ ssize_t len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", nsim_bus_dev->max_vfs);
+ if (len < 0)
+ return len;
+
+ return simple_read_from_buffer(data, count, ppos, buf, len);
+}
+
+ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_bus_dev *nsim_bus_dev = file->private_data;
+ struct nsim_vf_config *vfconfigs;
+ ssize_t ret;
+ char buf[10];
+ u32 val;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count >= sizeof(buf))
+ return -ENOSPC;
+
+ mutex_lock(&nsim_bus_dev->vfs_lock);
+ /* Reject if VFs are configured */
+ if (nsim_bus_dev->num_vfs) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = copy_from_user(buf, data, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ buf[count] = '\0';
+ ret = kstrtouint(buf, 10, &val);
+ if (ret) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* max_vfs limited by the maximum number of provided port indexes */
+ if (val > NSIM_DEV_VF_PORT_INDEX_MAX - NSIM_DEV_VF_PORT_INDEX_BASE) {
+ ret = -ERANGE;
+ goto unlock;
+ }
+
+ vfconfigs = kcalloc(val, sizeof(struct nsim_vf_config), GFP_KERNEL | __GFP_NOWARN);
+ if (!vfconfigs) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ kfree(nsim_bus_dev->vfconfigs);
+ nsim_bus_dev->vfconfigs = vfconfigs;
+ nsim_bus_dev->max_vfs = val;
+ *ppos += count;
+ ret = count;
+unlock:
+ mutex_unlock(&nsim_bus_dev->vfs_lock);
+ return ret;
+}
+
static ssize_t
new_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -113,7 +199,7 @@ new_port_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
devlink_reload_disable(devlink);
- ret = nsim_dev_port_add(nsim_bus_dev, port_index);
+ ret = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
devlink_reload_enable(devlink);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
@@ -142,7 +228,7 @@ del_port_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
devlink_reload_disable(devlink);
- ret = nsim_dev_port_del(nsim_bus_dev, port_index);
+ ret = nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
devlink_reload_enable(devlink);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
@@ -168,9 +254,6 @@ static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
static void nsim_bus_dev_release(struct device *dev)
{
- struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
-
- nsim_bus_dev_vfs_disable(nsim_bus_dev);
}
static struct device_type nsim_bus_dev_type = {
@@ -311,6 +394,8 @@ static struct bus_type nsim_bus = {
.num_vf = nsim_num_vf,
};
+#define NSIM_BUS_DEV_MAX_VFS 4
+
static struct nsim_bus_dev *
nsim_bus_dev_new(unsigned int id, unsigned int port_count)
{
@@ -329,15 +414,28 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
+ nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS;
mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
+ mutex_init(&nsim_bus_dev->vfs_lock);
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
+ nsim_bus_dev->vfconfigs = kcalloc(nsim_bus_dev->max_vfs,
+ sizeof(struct nsim_vf_config),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!nsim_bus_dev->vfconfigs) {
+ err = -ENOMEM;
+ goto err_nsim_bus_dev_id_free;
+ }
+
err = device_register(&nsim_bus_dev->dev);
if (err)
- goto err_nsim_bus_dev_id_free;
+ goto err_nsim_vfs_free;
+
return nsim_bus_dev;
+err_nsim_vfs_free:
+ kfree(nsim_bus_dev->vfconfigs);
err_nsim_bus_dev_id_free:
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
err_nsim_bus_dev_free:
@@ -351,6 +449,7 @@ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
smp_store_release(&nsim_bus_dev->init, false);
device_unregister(&nsim_bus_dev->dev);
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+ kfree(nsim_bus_dev->vfconfigs);
kfree(nsim_bus_dev);
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 6189a4c0d39e..6348307bfa84 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -35,6 +35,25 @@
#include "netdevsim.h"
+static unsigned int
+nsim_dev_port_index(enum nsim_dev_port_type type, unsigned int port_index)
+{
+ switch (type) {
+ case NSIM_DEV_PORT_TYPE_VF:
+ port_index = NSIM_DEV_VF_PORT_INDEX_BASE + port_index;
+ break;
+ case NSIM_DEV_PORT_TYPE_PF:
+ break;
+ }
+
+ return port_index;
+}
+
+static inline unsigned int nsim_dev_port_index_to_vf_index(unsigned int port_index)
+{
+ return port_index - NSIM_DEV_VF_PORT_INDEX_BASE;
+}
+
static struct dentry *nsim_dev_ddir;
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
@@ -192,9 +211,18 @@ static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
.owner = THIS_MODULE,
};
+static const struct file_operations nsim_dev_max_vfs_fops = {
+ .open = simple_open,
+ .read = nsim_bus_dev_max_vfs_read,
+ .write = nsim_bus_dev_max_vfs_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
char dev_ddir_name[sizeof(DRV_NAME) + 10];
+ int err;
sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
@@ -231,30 +259,84 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
debugfs_create_bool("fail_trap_policer_counter_get", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_policer_counter_get);
+ nsim_dev->max_vfs = debugfs_create_file("max_vfs",
+ 0600,
+ nsim_dev->ddir,
+ nsim_dev->nsim_bus_dev,
+ &nsim_dev_max_vfs_fops);
+ nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir);
+ if (IS_ERR(nsim_dev->nodes_ddir)) {
+ err = PTR_ERR(nsim_dev->nodes_ddir);
+ goto err_out;
+ }
+ debugfs_create_bool("fail_trap_drop_counter_get", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_drop_counter_get);
nsim_udp_tunnels_debugfs_create(nsim_dev);
return 0;
+
+err_out:
+ debugfs_remove_recursive(nsim_dev->ports_ddir);
+ debugfs_remove_recursive(nsim_dev->ddir);
+ return err;
}
static void nsim_dev_debugfs_exit(struct nsim_dev *nsim_dev)
{
+ debugfs_remove_recursive(nsim_dev->nodes_ddir);
debugfs_remove_recursive(nsim_dev->ports_ddir);
debugfs_remove_recursive(nsim_dev->ddir);
}
+static ssize_t nsim_dev_rate_parent_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ char **name_ptr = file->private_data;
+ size_t len;
+
+ if (!*name_ptr)
+ return 0;
+
+ len = strlen(*name_ptr);
+ return simple_read_from_buffer(data, count, ppos, *name_ptr, len);
+}
+
+static const struct file_operations nsim_dev_rate_parent_fops = {
+ .open = simple_open,
+ .read = nsim_dev_rate_parent_read,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
struct nsim_dev_port *nsim_dev_port)
{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ unsigned int port_index = nsim_dev_port->port_index;
char port_ddir_name[16];
char dev_link_name[32];
- sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
+ sprintf(port_ddir_name, "%u", port_index);
nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
nsim_dev->ports_ddir);
if (IS_ERR(nsim_dev_port->ddir))
return PTR_ERR(nsim_dev_port->ddir);
- sprintf(dev_link_name, "../../../" DRV_NAME "%u",
- nsim_dev->nsim_bus_dev->dev.id);
+ sprintf(dev_link_name, "../../../" DRV_NAME "%u", nsim_bus_dev->dev.id);
+ if (nsim_dev_port_is_vf(nsim_dev_port)) {
+ unsigned int vf_id = nsim_dev_port_index_to_vf_index(port_index);
+
+ debugfs_create_u16("tx_share", 0400, nsim_dev_port->ddir,
+ &nsim_bus_dev->vfconfigs[vf_id].min_tx_rate);
+ debugfs_create_u16("tx_max", 0400, nsim_dev_port->ddir,
+ &nsim_bus_dev->vfconfigs[vf_id].max_tx_rate);
+ nsim_dev_port->rate_parent = debugfs_create_file("rate_parent",
+ 0400,
+ nsim_dev_port->ddir,
+ &nsim_dev_port->parent_name,
+ &nsim_dev_rate_parent_fops);
+ }
debugfs_create_symlink("dev", nsim_dev_port->ddir, dev_link_name);
return 0;
@@ -407,6 +489,74 @@ static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
devlink_region_destroy(nsim_dev->dummy_region);
}
+static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
+int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+ struct nsim_dev_port *nsim_dev_port, *tmp;
+
+ devlink_rate_nodes_destroy(devlink);
+ mutex_lock(&nsim_dev->port_list_lock);
+ list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
+ if (nsim_dev_port_is_vf(nsim_dev_port))
+ __nsim_dev_port_del(nsim_dev_port);
+ mutex_unlock(&nsim_dev->port_list_lock);
+ nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ return 0;
+}
+
+int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack)
+{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ int i, err;
+
+ for (i = 0; i < nsim_bus_dev->num_vfs; i++) {
+ err = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
+ pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
+ goto err_port_add_vfs;
+ }
+ }
+ nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ return 0;
+
+err_port_add_vfs:
+ for (i--; i >= 0; i--)
+ nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ return err;
+}
+
+static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ int err = 0;
+
+ mutex_lock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ if (mode == nsim_dev->esw_mode)
+ goto unlock;
+
+ if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ err = nsim_esw_legacy_enable(nsim_dev, extack);
+ else if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ err = nsim_esw_switchdev_enable(nsim_dev, extack);
+ else
+ err = -EINVAL;
+
+unlock:
+ mutex_unlock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ return err;
+}
+
+static int nsim_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ *mode = nsim_dev->esw_mode;
+ return 0;
+}
+
struct nsim_trap_item {
void *trap_ctx;
enum devlink_trap_action action;
@@ -416,6 +566,7 @@ struct nsim_trap_data {
struct delayed_work trap_report_dw;
struct nsim_trap_item *trap_items_arr;
u64 *trap_policers_cnt_arr;
+ u64 trap_pkt_cnt;
struct nsim_dev *nsim_dev;
spinlock_t trap_lock; /* Protects trap_items_arr */
};
@@ -892,7 +1043,190 @@ nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
return 0;
}
+#define NSIM_LINK_SPEED_MAX 5000 /* Mbps */
+#define NSIM_LINK_SPEED_UNIT 125000 /* 1 Mbps given in bytes/sec to avoid
+ * u64 overflow during conversion from
+ * bytes to bits.
+ */
+
+static int nsim_rate_bytes_to_units(char *name, u64 *rate, struct netlink_ext_ack *extack)
+{
+ u64 val;
+ u32 rem;
+
+ val = div_u64_rem(*rate, NSIM_LINK_SPEED_UNIT, &rem);
+ if (rem) {
+ pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
+ name, *rate);
+ NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps.");
+ return -EINVAL;
+ }
+
+ if (val > NSIM_LINK_SPEED_MAX) {
+ pr_err("%s rate value %lluMbps exceed link maximum speed 5000Mbps.\n",
+ name, val);
+ NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed 5000Mbps.");
+ return -EINVAL;
+ }
+ *rate = val;
+ return 0;
+}
+
+static int nsim_leaf_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_port *nsim_dev_port = priv;
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev_port->ns->nsim_bus_dev;
+ int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index);
+ int err;
+
+ err = nsim_rate_bytes_to_units("tx_share", &tx_share, extack);
+ if (err)
+ return err;
+
+ nsim_bus_dev->vfconfigs[vf_id].min_tx_rate = tx_share;
+ return 0;
+}
+
+static int nsim_leaf_tx_max_set(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_port *nsim_dev_port = priv;
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev_port->ns->nsim_bus_dev;
+ int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index);
+ int err;
+
+ err = nsim_rate_bytes_to_units("tx_max", &tx_max, extack);
+ if (err)
+ return err;
+
+ nsim_bus_dev->vfconfigs[vf_id].max_tx_rate = tx_max;
+ return 0;
+}
+
+struct nsim_rate_node {
+ struct dentry *ddir;
+ struct dentry *rate_parent;
+ char *parent_name;
+ u16 tx_share;
+ u16 tx_max;
+};
+
+static int nsim_node_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv;
+ int err;
+
+ err = nsim_rate_bytes_to_units("tx_share", &tx_share, extack);
+ if (err)
+ return err;
+
+ nsim_node->tx_share = tx_share;
+ return 0;
+}
+
+static int nsim_node_tx_max_set(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv;
+ int err;
+
+ err = nsim_rate_bytes_to_units("tx_max", &tx_max, extack);
+ if (err)
+ return err;
+
+ nsim_node->tx_max = tx_max;
+ return 0;
+}
+
+static int nsim_rate_node_new(struct devlink_rate *node, void **priv,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(node->devlink);
+ struct nsim_rate_node *nsim_node;
+
+ if (!nsim_esw_mode_is_switchdev(nsim_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Node creation allowed only in switchdev mode.");
+ return -EOPNOTSUPP;
+ }
+
+ nsim_node = kzalloc(sizeof(*nsim_node), GFP_KERNEL);
+ if (!nsim_node)
+ return -ENOMEM;
+
+ nsim_node->ddir = debugfs_create_dir(node->name, nsim_dev->nodes_ddir);
+
+ debugfs_create_u16("tx_share", 0400, nsim_node->ddir, &nsim_node->tx_share);
+ debugfs_create_u16("tx_max", 0400, nsim_node->ddir, &nsim_node->tx_max);
+ nsim_node->rate_parent = debugfs_create_file("rate_parent", 0400,
+ nsim_node->ddir,
+ &nsim_node->parent_name,
+ &nsim_dev_rate_parent_fops);
+
+ *priv = nsim_node;
+ return 0;
+}
+
+static int nsim_rate_node_del(struct devlink_rate *node, void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv;
+
+ debugfs_remove(nsim_node->rate_parent);
+ debugfs_remove_recursive(nsim_node->ddir);
+ kfree(nsim_node);
+ return 0;
+}
+
+static int nsim_rate_leaf_parent_set(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_port *nsim_dev_port = priv_child;
+
+ if (parent)
+ nsim_dev_port->parent_name = parent->name;
+ else
+ nsim_dev_port->parent_name = NULL;
+ return 0;
+}
+
+static int nsim_rate_node_parent_set(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv_child;
+
+ if (parent)
+ nsim_node->parent_name = parent->name;
+ else
+ nsim_node->parent_name = NULL;
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_drop_counter_get(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ u64 *cnt;
+
+ if (nsim_dev->fail_trap_drop_counter_get)
+ return -EINVAL;
+
+ cnt = &nsim_dev->trap_data->trap_pkt_cnt;
+ *p_drops = (*cnt)++;
+
+ return 0;
+}
+
static const struct devlink_ops nsim_dev_devlink_ops = {
+ .eswitch_mode_set = nsim_devlink_eswitch_mode_set,
+ .eswitch_mode_get = nsim_devlink_eswitch_mode_get,
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT |
DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
@@ -905,32 +1239,52 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
.trap_group_set = nsim_dev_devlink_trap_group_set,
.trap_policer_set = nsim_dev_devlink_trap_policer_set,
.trap_policer_counter_get = nsim_dev_devlink_trap_policer_counter_get,
+ .rate_leaf_tx_share_set = nsim_leaf_tx_share_set,
+ .rate_leaf_tx_max_set = nsim_leaf_tx_max_set,
+ .rate_node_tx_share_set = nsim_node_tx_share_set,
+ .rate_node_tx_max_set = nsim_node_tx_max_set,
+ .rate_node_new = nsim_rate_node_new,
+ .rate_node_del = nsim_rate_node_del,
+ .rate_leaf_parent_set = nsim_rate_leaf_parent_set,
+ .rate_node_parent_set = nsim_rate_node_parent_set,
+ .trap_drop_counter_get = nsim_dev_devlink_trap_drop_counter_get,
};
#define NSIM_DEV_MAX_MACS_DEFAULT 32
#define NSIM_DEV_TEST1_DEFAULT true
-static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
+static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
struct devlink_port_attrs attrs = {};
struct nsim_dev_port *nsim_dev_port;
struct devlink_port *devlink_port;
int err;
+ if (type == NSIM_DEV_PORT_TYPE_VF && !nsim_bus_dev->num_vfs)
+ return -EINVAL;
+
nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
if (!nsim_dev_port)
return -ENOMEM;
- nsim_dev_port->port_index = port_index;
+ nsim_dev_port->port_index = nsim_dev_port_index(type, port_index);
+ nsim_dev_port->port_type = type;
devlink_port = &nsim_dev_port->devlink_port;
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = port_index + 1;
+ if (nsim_dev_port_is_pf(nsim_dev_port)) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port_index + 1;
+ } else {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = 0;
+ attrs.pci_vf.vf = port_index;
+ }
memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
devlink_port_attrs_set(devlink_port, &attrs);
err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
- port_index);
+ nsim_dev_port->port_index);
if (err)
goto err_port_free;
@@ -944,11 +1298,20 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
goto err_port_debugfs_exit;
}
+ if (nsim_dev_port_is_vf(nsim_dev_port)) {
+ err = devlink_rate_leaf_create(&nsim_dev_port->devlink_port,
+ nsim_dev_port);
+ if (err)
+ goto err_nsim_destroy;
+ }
+
devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev);
list_add(&nsim_dev_port->list, &nsim_dev->port_list);
return 0;
+err_nsim_destroy:
+ nsim_destroy(nsim_dev_port->ns);
err_port_debugfs_exit:
nsim_dev_port_debugfs_exit(nsim_dev_port);
err_dl_port_unregister:
@@ -963,6 +1326,8 @@ static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
struct devlink_port *devlink_port = &nsim_dev_port->devlink_port;
list_del(&nsim_dev_port->list);
+ if (nsim_dev_port_is_vf(nsim_dev_port))
+ devlink_rate_leaf_destroy(&nsim_dev_port->devlink_port);
devlink_port_type_clear(devlink_port);
nsim_destroy(nsim_dev_port->ns);
nsim_dev_port_debugfs_exit(nsim_dev_port);
@@ -987,7 +1352,7 @@ static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
int i, err;
for (i = 0; i < port_count; i++) {
- err = __nsim_dev_port_add(nsim_dev, i);
+ err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i);
if (err)
goto err_port_del_all;
}
@@ -1134,6 +1499,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
devlink_params_publish(devlink);
devlink_reload_enable(devlink);
+ nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
return 0;
err_psample_exit:
@@ -1169,6 +1535,12 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
if (devlink_is_reload_failed(devlink))
return;
debugfs_remove(nsim_dev->take_snapshot);
+
+ mutex_lock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ if (nsim_dev->nsim_bus_dev->num_vfs)
+ nsim_bus_dev_vfs_disable(nsim_dev->nsim_bus_dev);
+ mutex_unlock(&nsim_dev->nsim_bus_dev->vfs_lock);
+
nsim_dev_port_del_all(nsim_dev);
nsim_dev_psample_exit(nsim_dev);
nsim_dev_health_exit(nsim_dev);
@@ -1197,32 +1569,34 @@ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
}
static struct nsim_dev_port *
-__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, unsigned int port_index)
+__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
+ unsigned int port_index)
{
struct nsim_dev_port *nsim_dev_port;
+ port_index = nsim_dev_port_index(type, port_index);
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list)
if (nsim_dev_port->port_index == port_index)
return nsim_dev_port;
return NULL;
}
-int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
int err;
mutex_lock(&nsim_dev->port_list_lock);
- if (__nsim_dev_port_lookup(nsim_dev, port_index))
+ if (__nsim_dev_port_lookup(nsim_dev, type, port_index))
err = -EEXIST;
else
- err = __nsim_dev_port_add(nsim_dev, port_index);
+ err = __nsim_dev_port_add(nsim_dev, type, port_index);
mutex_unlock(&nsim_dev->port_list_lock);
return err;
}
-int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
@@ -1230,7 +1604,7 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
int err = 0;
mutex_lock(&nsim_dev->port_list_lock);
- nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, port_index);
+ nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, type, port_index);
if (!nsim_dev_port)
err = -ENOENT;
else
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 659d3dceb687..c3aeb15843e2 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -113,6 +113,11 @@ static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
struct netdevsim *ns = netdev_priv(dev);
struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ if (nsim_esw_mode_is_switchdev(ns->nsim_dev)) {
+ pr_err("Not supported in switchdev mode. Please use devlink API.\n");
+ return -EOPNOTSUPP;
+ }
+
if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
@@ -261,6 +266,18 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_get_devlink_port = nsim_get_devlink_port,
};
+static const struct net_device_ops nsim_vf_netdev_ops = {
+ .ndo_start_xmit = nsim_start_xmit,
+ .ndo_set_rx_mode = nsim_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = nsim_change_mtu,
+ .ndo_get_stats64 = nsim_get_stats64,
+ .ndo_setup_tc = nsim_setup_tc,
+ .ndo_set_features = nsim_set_features,
+ .ndo_get_devlink_port = nsim_get_devlink_port,
+};
+
static void nsim_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -280,6 +297,49 @@ static void nsim_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
}
+static int nsim_init_netdevsim(struct netdevsim *ns)
+{
+ int err;
+
+ ns->netdev->netdev_ops = &nsim_netdev_ops;
+
+ err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
+ if (err)
+ return err;
+
+ rtnl_lock();
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_utn_destroy;
+
+ nsim_ipsec_init(ns);
+
+ err = register_netdevice(ns->netdev);
+ if (err)
+ goto err_ipsec_teardown;
+ rtnl_unlock();
+ return 0;
+
+err_ipsec_teardown:
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+err_utn_destroy:
+ rtnl_unlock();
+ nsim_udp_tunnels_info_destroy(ns->netdev);
+ return err;
+}
+
+static int nsim_init_netdevsim_vf(struct netdevsim *ns)
+{
+ int err;
+
+ ns->netdev->netdev_ops = &nsim_vf_netdev_ops;
+ rtnl_lock();
+ err = register_netdevice(ns->netdev);
+ rtnl_unlock();
+ return err;
+}
+
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
{
@@ -299,33 +359,15 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->nsim_dev_port = nsim_dev_port;
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
- dev->netdev_ops = &nsim_netdev_ops;
nsim_ethtool_init(ns);
-
- err = nsim_udp_tunnels_info_create(nsim_dev, dev);
+ if (nsim_dev_port_is_pf(nsim_dev_port))
+ err = nsim_init_netdevsim(ns);
+ else
+ err = nsim_init_netdevsim_vf(ns);
if (err)
goto err_free_netdev;
-
- rtnl_lock();
- err = nsim_bpf_init(ns);
- if (err)
- goto err_utn_destroy;
-
- nsim_ipsec_init(ns);
-
- err = register_netdevice(dev);
- if (err)
- goto err_ipsec_teardown;
- rtnl_unlock();
-
return ns;
-err_ipsec_teardown:
- nsim_ipsec_teardown(ns);
- nsim_bpf_uninit(ns);
-err_utn_destroy:
- rtnl_unlock();
- nsim_udp_tunnels_info_destroy(dev);
err_free_netdev:
free_netdev(dev);
return ERR_PTR(err);
@@ -337,10 +379,13 @@ void nsim_destroy(struct netdevsim *ns)
rtnl_lock();
unregister_netdevice(dev);
- nsim_ipsec_teardown(ns);
- nsim_bpf_uninit(ns);
+ if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+ }
rtnl_unlock();
- nsim_udp_tunnels_info_destroy(dev);
+ if (nsim_dev_port_is_pf(ns->nsim_dev_port))
+ nsim_udp_tunnels_info_destroy(dev);
free_netdev(dev);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 7ff24e03577b..ae462957dcee 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -197,11 +197,22 @@ static inline void nsim_dev_psample_exit(struct nsim_dev *nsim_dev)
}
#endif
+enum nsim_dev_port_type {
+ NSIM_DEV_PORT_TYPE_PF,
+ NSIM_DEV_PORT_TYPE_VF,
+};
+
+#define NSIM_DEV_VF_PORT_INDEX_BASE 128
+#define NSIM_DEV_VF_PORT_INDEX_MAX UINT_MAX
+
struct nsim_dev_port {
struct list_head list;
struct devlink_port devlink_port;
unsigned int port_index;
+ enum nsim_dev_port_type port_type;
struct dentry *ddir;
+ struct dentry *rate_parent;
+ char *parent_name;
struct netdevsim *ns;
};
@@ -212,6 +223,8 @@ struct nsim_dev {
struct dentry *ddir;
struct dentry *ports_ddir;
struct dentry *take_snapshot;
+ struct dentry *max_vfs;
+ struct dentry *nodes_ddir;
struct bpf_offload_dev *bpf_dev;
bool bpf_bind_accept;
bool bpf_bind_verifier_accept;
@@ -236,6 +249,7 @@ struct nsim_dev {
bool fail_trap_group_set;
bool fail_trap_policer_set;
bool fail_trap_policer_counter_get;
+ bool fail_trap_drop_counter_get;
struct {
struct udp_tunnel_nic_shared utn_shared;
u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
@@ -247,8 +261,22 @@ struct nsim_dev {
u32 sleep;
} udp_ports;
struct nsim_dev_psample *psample;
+ u16 esw_mode;
};
+int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack);
+int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack);
+
+static inline bool nsim_esw_mode_is_legacy(struct nsim_dev *nsim_dev)
+{
+ return nsim_dev->esw_mode == DEVLINK_ESWITCH_MODE_LEGACY;
+}
+
+static inline bool nsim_esw_mode_is_switchdev(struct nsim_dev *nsim_dev)
+{
+ return nsim_dev->esw_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
+}
+
static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
{
return devlink_net(priv_to_devlink(nsim_dev));
@@ -259,8 +287,10 @@ void nsim_dev_exit(void);
int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev);
int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ enum nsim_dev_port_type type,
unsigned int port_index);
int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ enum nsim_dev_port_type type,
unsigned int port_index);
struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
@@ -269,6 +299,23 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *fib_data);
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
enum nsim_resource_id res_id, bool max);
+ssize_t nsim_bus_dev_max_vfs_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos);
+ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos);
+void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev);
+
+static inline bool nsim_dev_port_is_pf(struct nsim_dev_port *nsim_dev_port)
+{
+ return nsim_dev_port->port_type == NSIM_DEV_PORT_TYPE_PF;
+}
+
+static inline bool nsim_dev_port_is_vf(struct nsim_dev_port *nsim_dev_port)
+{
+ return nsim_dev_port->port_type == NSIM_DEV_PORT_TYPE_VF;
+}
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
void nsim_ipsec_init(struct netdevsim *ns);
void nsim_ipsec_teardown(struct netdevsim *ns);
@@ -308,7 +355,9 @@ struct nsim_bus_dev {
struct net *initial_net; /* Purpose of this is to carry net pointer
* during the probe time only.
*/
+ unsigned int max_vfs;
unsigned int num_vfs;
+ struct mutex vfs_lock; /* Protects vfconfigs */
struct nsim_vf_config *vfconfigs;
/* Lock for devlink->reload_enabled in netdevsim module */
struct mutex nsim_bus_reload_lock;
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index c23146755972..0603d469bd57 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux PCS drivers
-obj-$(CONFIG_PCS_XPCS) += pcs-xpcs.o
+pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o
+
+obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
diff --git a/drivers/net/pcs/pcs-xpcs-nxp.c b/drivers/net/pcs/pcs-xpcs-nxp.c
new file mode 100644
index 000000000000..984c9f7f16a8
--- /dev/null
+++ b/drivers/net/pcs/pcs-xpcs-nxp.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2021 NXP Semiconductors
+ */
+#include <linux/pcs/pcs-xpcs.h>
+#include "pcs-xpcs.h"
+
+/* LANE_DRIVER1_0 register */
+#define SJA1110_LANE_DRIVER1_0 0x8038
+#define SJA1110_TXDRV(x) (((x) << 12) & GENMASK(14, 12))
+
+/* LANE_DRIVER2_0 register */
+#define SJA1110_LANE_DRIVER2_0 0x803a
+#define SJA1110_TXDRVTRIM_LSB(x) ((x) & GENMASK_ULL(15, 0))
+
+/* LANE_DRIVER2_1 register */
+#define SJA1110_LANE_DRIVER2_1 0x803b
+#define SJA1110_LANE_DRIVER2_1_RSV BIT(9)
+#define SJA1110_TXDRVTRIM_MSB(x) (((x) & GENMASK_ULL(23, 16)) >> 16)
+
+/* LANE_TRIM register */
+#define SJA1110_LANE_TRIM 0x8040
+#define SJA1110_TXTEN BIT(11)
+#define SJA1110_TXRTRIM(x) (((x) << 8) & GENMASK(10, 8))
+#define SJA1110_TXPLL_BWSEL BIT(7)
+#define SJA1110_RXTEN BIT(6)
+#define SJA1110_RXRTRIM(x) (((x) << 3) & GENMASK(5, 3))
+#define SJA1110_CDR_GAIN BIT(2)
+#define SJA1110_ACCOUPLE_RXVCM_EN BIT(0)
+
+/* LANE_DATAPATH_1 register */
+#define SJA1110_LANE_DATAPATH_1 0x8037
+
+/* POWERDOWN_ENABLE register */
+#define SJA1110_POWERDOWN_ENABLE 0x8041
+#define SJA1110_TXPLL_PD BIT(12)
+#define SJA1110_TXPD BIT(11)
+#define SJA1110_RXPKDETEN BIT(10)
+#define SJA1110_RXCH_PD BIT(9)
+#define SJA1110_RXBIAS_PD BIT(8)
+#define SJA1110_RESET_SER_EN BIT(7)
+#define SJA1110_RESET_SER BIT(6)
+#define SJA1110_RESET_DES BIT(5)
+#define SJA1110_RCVEN BIT(4)
+
+/* RXPLL_CTRL0 register */
+#define SJA1110_RXPLL_CTRL0 0x8065
+#define SJA1110_RXPLL_FBDIV(x) (((x) << 2) & GENMASK(9, 2))
+
+/* RXPLL_CTRL1 register */
+#define SJA1110_RXPLL_CTRL1 0x8066
+#define SJA1110_RXPLL_REFDIV(x) ((x) & GENMASK(4, 0))
+
+/* TXPLL_CTRL0 register */
+#define SJA1110_TXPLL_CTRL0 0x806d
+#define SJA1110_TXPLL_FBDIV(x) ((x) & GENMASK(11, 0))
+
+/* TXPLL_CTRL1 register */
+#define SJA1110_TXPLL_CTRL1 0x806e
+#define SJA1110_TXPLL_REFDIV(x) ((x) & GENMASK(5, 0))
+
+/* RX_DATA_DETECT register */
+#define SJA1110_RX_DATA_DETECT 0x8045
+
+/* RX_CDR_CTLE register */
+#define SJA1110_RX_CDR_CTLE 0x8042
+
+/* In NXP SJA1105, the PCS is integrated with a PMA that has the TX lane
+ * polarity inverted by default (PLUS is MINUS, MINUS is PLUS). To obtain
+ * normal non-inverted behavior, the TX lane polarity must be inverted in the
+ * PCS, via the DIGITAL_CONTROL_2 register.
+ */
+int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs)
+{
+ return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL2,
+ DW_VR_MII_DIG_CTRL2_TX_POL_INV);
+}
+
+static int nxp_sja1110_pma_config(struct dw_xpcs *xpcs,
+ u16 txpll_fbdiv, u16 txpll_refdiv,
+ u16 rxpll_fbdiv, u16 rxpll_refdiv,
+ u16 rx_cdr_ctle)
+{
+ u16 val;
+ int ret;
+
+ /* Program TX PLL feedback divider and reference divider settings for
+ * correct oscillation frequency.
+ */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_TXPLL_CTRL0,
+ SJA1110_TXPLL_FBDIV(txpll_fbdiv));
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_TXPLL_CTRL1,
+ SJA1110_TXPLL_REFDIV(txpll_refdiv));
+ if (ret < 0)
+ return ret;
+
+ /* Program transmitter amplitude and disable amplitude trimming */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER1_0,
+ SJA1110_TXDRV(0x5));
+ if (ret < 0)
+ return ret;
+
+ val = SJA1110_TXDRVTRIM_LSB(0xffffffull);
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER2_0, val);
+ if (ret < 0)
+ return ret;
+
+ val = SJA1110_TXDRVTRIM_MSB(0xffffffull) | SJA1110_LANE_DRIVER2_1_RSV;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER2_1, val);
+ if (ret < 0)
+ return ret;
+
+ /* Enable input and output resistor terminations for low BER. */
+ val = SJA1110_ACCOUPLE_RXVCM_EN | SJA1110_CDR_GAIN |
+ SJA1110_RXRTRIM(4) | SJA1110_RXTEN | SJA1110_TXPLL_BWSEL |
+ SJA1110_TXRTRIM(3) | SJA1110_TXTEN;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_TRIM, val);
+ if (ret < 0)
+ return ret;
+
+ /* Select PCS as transmitter data source. */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DATAPATH_1, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Program RX PLL feedback divider and reference divider for correct
+ * oscillation frequency.
+ */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RXPLL_CTRL0,
+ SJA1110_RXPLL_FBDIV(rxpll_fbdiv));
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RXPLL_CTRL1,
+ SJA1110_RXPLL_REFDIV(rxpll_refdiv));
+ if (ret < 0)
+ return ret;
+
+ /* Program threshold for receiver signal detector.
+ * Enable control of RXPLL by receiver signal detector to disable RXPLL
+ * when an input signal is not present.
+ */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RX_DATA_DETECT, 0x0005);
+ if (ret < 0)
+ return ret;
+
+ /* Enable TX and RX PLLs and circuits.
+ * Release reset of PMA to enable data flow to/from PCS.
+ */
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ val = ret & ~(SJA1110_TXPLL_PD | SJA1110_TXPD | SJA1110_RXCH_PD |
+ SJA1110_RXBIAS_PD | SJA1110_RESET_SER_EN |
+ SJA1110_RESET_SER | SJA1110_RESET_DES);
+ val |= SJA1110_RXPKDETEN | SJA1110_RCVEN;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE, val);
+ if (ret < 0)
+ return ret;
+
+ /* Program continuous-time linear equalizer (CTLE) settings. */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RX_CDR_CTLE,
+ rx_cdr_ctle);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs)
+{
+ return nxp_sja1110_pma_config(xpcs, 0x19, 0x1, 0x19, 0x1, 0x212a);
+}
+
+int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs)
+{
+ return nxp_sja1110_pma_config(xpcs, 0x7d, 0x2, 0x7d, 0x2, 0x732a);
+}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 944ba105cac1..63fda3fc40aa 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -11,80 +11,10 @@
#include <linux/mdio.h>
#include <linux/phylink.h>
#include <linux/workqueue.h>
+#include "pcs-xpcs.h"
-#define SYNOPSYS_XPCS_USXGMII_ID 0x7996ced0
-#define SYNOPSYS_XPCS_10GKR_ID 0x7996ced0
-#define SYNOPSYS_XPCS_XLGMII_ID 0x7996ced0
-#define SYNOPSYS_XPCS_SGMII_ID 0x7996ced0
-#define SYNOPSYS_XPCS_MASK 0xffffffff
-
-/* Vendor regs access */
-#define DW_VENDOR BIT(15)
-
-/* VR_XS_PCS */
-#define DW_USXGMII_RST BIT(10)
-#define DW_USXGMII_EN BIT(9)
-#define DW_VR_XS_PCS_DIG_STS 0x0010
-#define DW_RXFIFO_ERR GENMASK(6, 5)
-
-/* SR_MII */
-#define DW_USXGMII_FULL BIT(8)
-#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5))
-#define DW_USXGMII_10000 (BIT(13) | BIT(6))
-#define DW_USXGMII_5000 (BIT(13) | BIT(5))
-#define DW_USXGMII_2500 (BIT(5))
-#define DW_USXGMII_1000 (BIT(6))
-#define DW_USXGMII_100 (BIT(13))
-#define DW_USXGMII_10 (0)
-
-/* SR_AN */
-#define DW_SR_AN_ADV1 0x10
-#define DW_SR_AN_ADV2 0x11
-#define DW_SR_AN_ADV3 0x12
-#define DW_SR_AN_LP_ABL1 0x13
-#define DW_SR_AN_LP_ABL2 0x14
-#define DW_SR_AN_LP_ABL3 0x15
-
-/* Clause 73 Defines */
-/* AN_LP_ABL1 */
-#define DW_C73_PAUSE BIT(10)
-#define DW_C73_ASYM_PAUSE BIT(11)
-#define DW_C73_AN_ADV_SF 0x1
-/* AN_LP_ABL2 */
-#define DW_C73_1000KX BIT(5)
-#define DW_C73_10000KX4 BIT(6)
-#define DW_C73_10000KR BIT(7)
-/* AN_LP_ABL3 */
-#define DW_C73_2500KX BIT(0)
-#define DW_C73_5000KR BIT(1)
-
-/* Clause 37 Defines */
-/* VR MII MMD registers offsets */
-#define DW_VR_MII_DIG_CTRL1 0x8000
-#define DW_VR_MII_AN_CTRL 0x8001
-#define DW_VR_MII_AN_INTR_STS 0x8002
-
-/* VR_MII_DIG_CTRL1 */
-#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9)
-
-/* VR_MII_AN_CTRL */
-#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3
-#define DW_VR_MII_TX_CONFIG_MASK BIT(3)
-#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1
-#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0
-#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1
-#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1)
-#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0
-#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2
-
-/* VR_MII_AN_INTR_STS */
-#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1)
-#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2
-#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2)
-#define DW_VR_MII_C37_ANSGM_SP_10 0x0
-#define DW_VR_MII_C37_ANSGM_SP_100 0x1
-#define DW_VR_MII_C37_ANSGM_SP_1000 0x2
-#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4)
+#define phylink_pcs_to_xpcs(pl_pcs) \
+ container_of((pl_pcs), struct dw_xpcs, pcs)
static const int xpcs_usxgmii_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
@@ -144,96 +74,141 @@ static const int xpcs_sgmii_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_2500basex_features[] = {
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const phy_interface_t xpcs_usxgmii_interfaces[] = {
PHY_INTERFACE_MODE_USXGMII,
- PHY_INTERFACE_MODE_MAX,
};
static const phy_interface_t xpcs_10gkr_interfaces[] = {
PHY_INTERFACE_MODE_10GKR,
- PHY_INTERFACE_MODE_MAX,
};
static const phy_interface_t xpcs_xlgmii_interfaces[] = {
PHY_INTERFACE_MODE_XLGMII,
- PHY_INTERFACE_MODE_MAX,
};
static const phy_interface_t xpcs_sgmii_interfaces[] = {
PHY_INTERFACE_MODE_SGMII,
+};
+
+static const phy_interface_t xpcs_2500basex_interfaces[] = {
+ PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_MAX,
};
-static struct xpcs_id {
- u32 id;
- u32 mask;
+enum {
+ DW_XPCS_USXGMII,
+ DW_XPCS_10GKR,
+ DW_XPCS_XLGMII,
+ DW_XPCS_SGMII,
+ DW_XPCS_2500BASEX,
+ DW_XPCS_INTERFACE_MAX,
+};
+
+struct xpcs_compat {
const int *supported;
const phy_interface_t *interface;
+ int num_interfaces;
int an_mode;
-} xpcs_id_list[] = {
- {
- .id = SYNOPSYS_XPCS_USXGMII_ID,
- .mask = SYNOPSYS_XPCS_MASK,
- .supported = xpcs_usxgmii_features,
- .interface = xpcs_usxgmii_interfaces,
- .an_mode = DW_AN_C73,
- }, {
- .id = SYNOPSYS_XPCS_10GKR_ID,
- .mask = SYNOPSYS_XPCS_MASK,
- .supported = xpcs_10gkr_features,
- .interface = xpcs_10gkr_interfaces,
- .an_mode = DW_AN_C73,
- }, {
- .id = SYNOPSYS_XPCS_XLGMII_ID,
- .mask = SYNOPSYS_XPCS_MASK,
- .supported = xpcs_xlgmii_features,
- .interface = xpcs_xlgmii_interfaces,
- .an_mode = DW_AN_C73,
- }, {
- .id = SYNOPSYS_XPCS_SGMII_ID,
- .mask = SYNOPSYS_XPCS_MASK,
- .supported = xpcs_sgmii_features,
- .interface = xpcs_sgmii_interfaces,
- .an_mode = DW_AN_C37_SGMII,
- },
+ int (*pma_config)(struct dw_xpcs *xpcs);
};
-static int xpcs_read(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+struct xpcs_id {
+ u32 id;
+ u32 mask;
+ const struct xpcs_compat *compat;
+};
+
+static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id,
+ phy_interface_t interface)
+{
+ int i, j;
+
+ for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
+ const struct xpcs_compat *compat = &id->compat[i];
+
+ for (j = 0; j < compat->num_interfaces; j++)
+ if (compat->interface[j] == interface)
+ return compat;
+ }
+
+ return NULL;
+}
+
+int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface)
+{
+ const struct xpcs_compat *compat;
+
+ compat = xpcs_find_compat(xpcs->id, interface);
+ if (!compat)
+ return -ENODEV;
+
+ return compat->an_mode;
+}
+EXPORT_SYMBOL_GPL(xpcs_get_an_mode);
+
+static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat,
+ enum ethtool_link_mode_bit_indices linkmode)
{
- u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+ int i;
- return mdiobus_read(xpcs->bus, xpcs->addr, reg_addr);
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ if (compat->supported[i] == linkmode)
+ return true;
+
+ return false;
}
-static int xpcs_write(struct mdio_xpcs_args *xpcs, int dev, u32 reg, u16 val)
+#define xpcs_linkmode_supported(compat, mode) \
+ __xpcs_linkmode_supported(compat, ETHTOOL_LINK_MODE_ ## mode ## _BIT)
+
+int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg)
{
- u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+ u32 reg_addr = mdiobus_c45_addr(dev, reg);
+ struct mii_bus *bus = xpcs->mdiodev->bus;
+ int addr = xpcs->mdiodev->addr;
- return mdiobus_write(xpcs->bus, xpcs->addr, reg_addr, val);
+ return mdiobus_read(bus, addr, reg_addr);
}
-static int xpcs_read_vendor(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val)
+{
+ u32 reg_addr = mdiobus_c45_addr(dev, reg);
+ struct mii_bus *bus = xpcs->mdiodev->bus;
+ int addr = xpcs->mdiodev->addr;
+
+ return mdiobus_write(bus, addr, reg_addr, val);
+}
+
+static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
{
return xpcs_read(xpcs, dev, DW_VENDOR | reg);
}
-static int xpcs_write_vendor(struct mdio_xpcs_args *xpcs, int dev, int reg,
+static int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg,
u16 val)
{
return xpcs_write(xpcs, dev, DW_VENDOR | reg, val);
}
-static int xpcs_read_vpcs(struct mdio_xpcs_args *xpcs, int reg)
+static int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg)
{
return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg);
}
-static int xpcs_write_vpcs(struct mdio_xpcs_args *xpcs, int reg, u16 val)
+static int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val)
{
return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val);
}
-static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev)
+static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev)
{
/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
unsigned int retries = 12;
@@ -249,15 +224,17 @@ static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev)
return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0;
}
-static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs)
+static int xpcs_soft_reset(struct dw_xpcs *xpcs,
+ const struct xpcs_compat *compat)
{
int ret, dev;
- switch (xpcs->an_mode) {
+ switch (compat->an_mode) {
case DW_AN_C73:
dev = MDIO_MMD_PCS;
break;
case DW_AN_C37_SGMII:
+ case DW_2500BASEX:
dev = MDIO_MMD_VEND2;
break;
default:
@@ -274,10 +251,10 @@ static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs)
#define xpcs_warn(__xpcs, __state, __args...) \
({ \
if ((__state)->link) \
- dev_warn(&(__xpcs)->bus->dev, ##__args); \
+ dev_warn(&(__xpcs)->mdiodev->dev, ##__args); \
})
-static int xpcs_read_fault_c73(struct mdio_xpcs_args *xpcs,
+static int xpcs_read_fault_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
int ret;
@@ -328,7 +305,7 @@ static int xpcs_read_fault_c73(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_read_link_c73(struct mdio_xpcs_args *xpcs, bool an)
+static int xpcs_read_link_c73(struct dw_xpcs *xpcs, bool an)
{
bool link = true;
int ret;
@@ -368,7 +345,7 @@ static int xpcs_get_max_usxgmii_speed(const unsigned long *supported)
return max;
}
-static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed)
+static void xpcs_config_usxgmii(struct dw_xpcs *xpcs, int speed)
{
int ret, speed_sel;
@@ -393,36 +370,44 @@ static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed)
break;
default:
/* Nothing to do here */
- return -EINVAL;
+ return;
}
ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
if (ret < 0)
- return ret;
+ goto out;
ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_EN);
if (ret < 0)
- return ret;
+ goto out;
ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1);
if (ret < 0)
- return ret;
+ goto out;
ret &= ~DW_USXGMII_SS_MASK;
ret |= speed_sel | DW_USXGMII_FULL;
ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret);
if (ret < 0)
- return ret;
+ goto out;
ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
if (ret < 0)
- return ret;
+ goto out;
- return xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST);
+ ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST);
+ if (ret < 0)
+ goto out;
+
+ return;
+
+out:
+ pr_err("%s: XPCS access returned %pe\n", __func__, ERR_PTR(ret));
}
-static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
+static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
+ const struct xpcs_compat *compat)
{
int ret, adv;
@@ -434,7 +419,7 @@ static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
/* SR_AN_ADV3 */
adv = 0;
- if (phylink_test(xpcs->supported, 2500baseX_Full))
+ if (xpcs_linkmode_supported(compat, 2500baseX_Full))
adv |= DW_C73_2500KX;
/* TODO: 5000baseKR */
@@ -445,11 +430,11 @@ static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
/* SR_AN_ADV2 */
adv = 0;
- if (phylink_test(xpcs->supported, 1000baseKX_Full))
+ if (xpcs_linkmode_supported(compat, 1000baseKX_Full))
adv |= DW_C73_1000KX;
- if (phylink_test(xpcs->supported, 10000baseKX4_Full))
+ if (xpcs_linkmode_supported(compat, 10000baseKX4_Full))
adv |= DW_C73_10000KX4;
- if (phylink_test(xpcs->supported, 10000baseKR_Full))
+ if (xpcs_linkmode_supported(compat, 10000baseKR_Full))
adv |= DW_C73_10000KR;
ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV2, adv);
@@ -458,19 +443,20 @@ static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
/* SR_AN_ADV1 */
adv = DW_C73_AN_ADV_SF;
- if (phylink_test(xpcs->supported, Pause))
+ if (xpcs_linkmode_supported(compat, Pause))
adv |= DW_C73_PAUSE;
- if (phylink_test(xpcs->supported, Asym_Pause))
+ if (xpcs_linkmode_supported(compat, Asym_Pause))
adv |= DW_C73_ASYM_PAUSE;
return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv);
}
-static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
+static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
+ const struct xpcs_compat *compat)
{
int ret;
- ret = _xpcs_config_aneg_c73(xpcs);
+ ret = _xpcs_config_aneg_c73(xpcs, compat);
if (ret < 0)
return ret;
@@ -483,8 +469,9 @@ static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret);
}
-static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state,
+ const struct xpcs_compat *compat)
{
int ret;
@@ -499,7 +486,7 @@ static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs,
/* Check if Aneg outcome is valid */
if (!(ret & DW_C73_AN_ADV_SF)) {
- xpcs_config_aneg_c73(xpcs);
+ xpcs_config_aneg_c73(xpcs, compat);
return 0;
}
@@ -509,7 +496,7 @@ static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_read_lpa_c73(struct mdio_xpcs_args *xpcs,
+static int xpcs_read_lpa_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
int ret;
@@ -558,7 +545,7 @@ static int xpcs_read_lpa_c73(struct mdio_xpcs_args *xpcs,
return 0;
}
-static void xpcs_resolve_lpa_c73(struct mdio_xpcs_args *xpcs,
+static void xpcs_resolve_lpa_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising);
@@ -568,7 +555,7 @@ static void xpcs_resolve_lpa_c73(struct mdio_xpcs_args *xpcs,
state->duplex = DUPLEX_FULL;
}
-static int xpcs_get_max_xlgmii_speed(struct mdio_xpcs_args *xpcs,
+static int xpcs_get_max_xlgmii_speed(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
unsigned long *adv = state->advertising;
@@ -622,7 +609,7 @@ static int xpcs_get_max_xlgmii_speed(struct mdio_xpcs_args *xpcs,
return speed;
}
-static void xpcs_resolve_pma(struct mdio_xpcs_args *xpcs,
+static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
@@ -641,16 +628,70 @@ static void xpcs_resolve_pma(struct mdio_xpcs_args *xpcs,
}
}
-static int xpcs_validate(struct mdio_xpcs_args *xpcs,
- unsigned long *supported,
- struct phylink_link_state *state)
+void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported,
+ struct phylink_link_state *state)
{
- linkmode_and(supported, supported, xpcs->supported);
- linkmode_and(state->advertising, state->advertising, xpcs->supported);
- return 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported);
+ const struct xpcs_compat *compat;
+ int i;
+
+ /* phylink expects us to report all supported modes with
+ * PHY_INTERFACE_MODE_NA, just don't limit the supported and
+ * advertising masks and exit.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_NA)
+ return;
+
+ bitmap_zero(xpcs_supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ compat = xpcs_find_compat(xpcs->id, state->interface);
+
+ /* Populate the supported link modes for this
+ * PHY interface type
+ */
+ if (compat)
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(compat->supported[i], xpcs_supported);
+
+ linkmode_and(supported, supported, xpcs_supported);
+ linkmode_and(state->advertising, state->advertising, xpcs_supported);
}
+EXPORT_SYMBOL_GPL(xpcs_validate);
-static int xpcs_config_aneg_c37_sgmii(struct mdio_xpcs_args *xpcs)
+int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
+{
+ int ret;
+
+ if (enable) {
+ /* Enable EEE */
+ ret = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
+ DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
+ DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
+ mult_fact_100ns << DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT;
+ } else {
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);
+ if (ret < 0)
+ return ret;
+ ret &= ~(DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
+ DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
+ DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
+ DW_VR_MII_EEE_MULT_FACT_100NS);
+ }
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, ret);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= DW_VR_MII_EEE_TRN_LPI;
+ return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret);
+}
+EXPORT_SYMBOL_GPL(xpcs_config_eee);
+
+static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
{
int ret;
@@ -686,26 +727,61 @@ static int xpcs_config_aneg_c37_sgmii(struct mdio_xpcs_args *xpcs)
if (ret < 0)
return ret;
- ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ if (phylink_autoneg_inband(mode))
+ ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ else
+ ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
}
-static int xpcs_config(struct mdio_xpcs_args *xpcs,
- const struct phylink_link_state *state)
+static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1);
+ if (ret < 0)
+ return ret;
+ ret |= DW_VR_MII_DIG_CTRL1_2G5_EN;
+ ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL);
+ if (ret < 0)
+ return ret;
+ ret &= ~AN_CL37_EN;
+ ret |= SGMII_SPEED_SS6;
+ ret &= ~SGMII_SPEED_SS13;
+ return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, ret);
+}
+
+int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ unsigned int mode)
{
+ const struct xpcs_compat *compat;
int ret;
- switch (xpcs->an_mode) {
+ compat = xpcs_find_compat(xpcs->id, interface);
+ if (!compat)
+ return -ENODEV;
+
+ switch (compat->an_mode) {
case DW_AN_C73:
- if (state->an_enabled) {
- ret = xpcs_config_aneg_c73(xpcs);
+ if (phylink_autoneg_inband(mode)) {
+ ret = xpcs_config_aneg_c73(xpcs, compat);
if (ret)
return ret;
}
break;
case DW_AN_C37_SGMII:
- ret = xpcs_config_aneg_c37_sgmii(xpcs);
+ ret = xpcs_config_aneg_c37_sgmii(xpcs, mode);
+ if (ret)
+ return ret;
+ break;
+ case DW_2500BASEX:
+ ret = xpcs_config_2500basex(xpcs);
if (ret)
return ret;
break;
@@ -713,11 +789,29 @@ static int xpcs_config(struct mdio_xpcs_args *xpcs,
return -1;
}
+ if (compat->pma_config) {
+ ret = compat->pma_config(xpcs);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
+EXPORT_SYMBOL_GPL(xpcs_do_config);
+
+static int xpcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
+ return xpcs_do_config(xpcs, interface, mode);
+}
-static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state,
+ const struct xpcs_compat *compat)
{
int ret;
@@ -727,16 +821,16 @@ static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs,
/* ... and then we check the faults. */
ret = xpcs_read_fault_c73(xpcs, state);
if (ret) {
- ret = xpcs_soft_reset(xpcs);
+ ret = xpcs_soft_reset(xpcs, compat);
if (ret)
return ret;
state->link = 0;
- return xpcs_config(xpcs, state);
+ return xpcs_do_config(xpcs, state->interface, MLO_AN_INBAND);
}
- if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state)) {
+ if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state, compat)) {
state->an_complete = true;
xpcs_read_lpa_c73(xpcs, state);
xpcs_resolve_lpa_c73(xpcs, state);
@@ -749,7 +843,7 @@ static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_get_state_c37_sgmii(struct mdio_xpcs_args *xpcs,
+static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
int ret;
@@ -790,39 +884,81 @@ static int xpcs_get_state_c37_sgmii(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static void xpcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+ const struct xpcs_compat *compat;
int ret;
- switch (xpcs->an_mode) {
+ compat = xpcs_find_compat(xpcs->id, state->interface);
+ if (!compat)
+ return;
+
+ switch (compat->an_mode) {
case DW_AN_C73:
- ret = xpcs_get_state_c73(xpcs, state);
- if (ret)
- return ret;
+ ret = xpcs_get_state_c73(xpcs, state, compat);
+ if (ret) {
+ pr_err("xpcs_get_state_c73 returned %pe\n",
+ ERR_PTR(ret));
+ return;
+ }
break;
case DW_AN_C37_SGMII:
ret = xpcs_get_state_c37_sgmii(xpcs, state);
- if (ret)
- return ret;
+ if (ret) {
+ pr_err("xpcs_get_state_c37_sgmii returned %pe\n",
+ ERR_PTR(ret));
+ }
break;
default:
- return -1;
+ return;
}
+}
- return 0;
+static void xpcs_link_up_sgmii(struct dw_xpcs *xpcs, unsigned int mode,
+ int speed, int duplex)
+{
+ int val, ret;
+
+ if (phylink_autoneg_inband(mode))
+ return;
+
+ switch (speed) {
+ case SPEED_1000:
+ val = BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ val = BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ val = BMCR_SPEED10;
+ break;
+ default:
+ return;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ val |= BMCR_FULLDPLX;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val);
+ if (ret)
+ pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret));
}
-static int xpcs_link_up(struct mdio_xpcs_args *xpcs, int speed,
- phy_interface_t interface)
+void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
if (interface == PHY_INTERFACE_MODE_USXGMII)
return xpcs_config_usxgmii(xpcs, speed);
-
- return 0;
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ return xpcs_link_up_sgmii(xpcs, mode, speed, duplex);
}
+EXPORT_SYMBOL_GPL(xpcs_link_up);
-static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
+static u32 xpcs_get_id(struct dw_xpcs *xpcs)
{
int ret;
u32 id;
@@ -838,8 +974,10 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
if (ret < 0)
return 0xffffffff;
- /* If Device IDs are not all zeros, we found C73 AN-type device */
- if (id | ret)
+ /* If Device IDs are not all zeros or all ones,
+ * we found C73 AN-type device
+ */
+ if ((id | ret) && (id | ret) != 0xffffffff)
return id | ret;
/* Next, search C37 PCS using Vendor-Specific MII MMD */
@@ -860,60 +998,141 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
return 0xffffffff;
}
-static bool xpcs_check_features(struct mdio_xpcs_args *xpcs,
- struct xpcs_id *match,
- phy_interface_t interface)
-{
- int i;
-
- for (i = 0; match->interface[i] != PHY_INTERFACE_MODE_MAX; i++) {
- if (match->interface[i] == interface)
- break;
- }
+static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+ [DW_XPCS_USXGMII] = {
+ .supported = xpcs_usxgmii_features,
+ .interface = xpcs_usxgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_usxgmii_interfaces),
+ .an_mode = DW_AN_C73,
+ },
+ [DW_XPCS_10GKR] = {
+ .supported = xpcs_10gkr_features,
+ .interface = xpcs_10gkr_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_10gkr_interfaces),
+ .an_mode = DW_AN_C73,
+ },
+ [DW_XPCS_XLGMII] = {
+ .supported = xpcs_xlgmii_features,
+ .interface = xpcs_xlgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_xlgmii_interfaces),
+ .an_mode = DW_AN_C73,
+ },
+ [DW_XPCS_SGMII] = {
+ .supported = xpcs_sgmii_features,
+ .interface = xpcs_sgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces),
+ .an_mode = DW_AN_C37_SGMII,
+ },
+ [DW_XPCS_2500BASEX] = {
+ .supported = xpcs_2500basex_features,
+ .interface = xpcs_2500basex_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_2500basex_features),
+ .an_mode = DW_2500BASEX,
+ },
+};
- if (match->interface[i] == PHY_INTERFACE_MODE_MAX)
- return false;
+static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+ [DW_XPCS_SGMII] = {
+ .supported = xpcs_sgmii_features,
+ .interface = xpcs_sgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces),
+ .an_mode = DW_AN_C37_SGMII,
+ .pma_config = nxp_sja1105_sgmii_pma_config,
+ },
+};
- for (i = 0; match->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
- set_bit(match->supported[i], xpcs->supported);
+static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+ [DW_XPCS_SGMII] = {
+ .supported = xpcs_sgmii_features,
+ .interface = xpcs_sgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces),
+ .an_mode = DW_AN_C37_SGMII,
+ .pma_config = nxp_sja1110_sgmii_pma_config,
+ },
+ [DW_XPCS_2500BASEX] = {
+ .supported = xpcs_2500basex_features,
+ .interface = xpcs_2500basex_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces),
+ .an_mode = DW_2500BASEX,
+ .pma_config = nxp_sja1110_2500basex_pma_config,
+ },
+};
- xpcs->an_mode = match->an_mode;
+static const struct xpcs_id xpcs_id_list[] = {
+ {
+ .id = SYNOPSYS_XPCS_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .compat = synopsys_xpcs_compat,
+ }, {
+ .id = NXP_SJA1105_XPCS_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .compat = nxp_sja1105_xpcs_compat,
+ }, {
+ .id = NXP_SJA1110_XPCS_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .compat = nxp_sja1110_xpcs_compat,
+ },
+};
- return true;
-}
+static const struct phylink_pcs_ops xpcs_phylink_ops = {
+ .pcs_config = xpcs_config,
+ .pcs_get_state = xpcs_get_state,
+ .pcs_link_up = xpcs_link_up,
+};
-static int xpcs_probe(struct mdio_xpcs_args *xpcs, phy_interface_t interface)
+struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
+ phy_interface_t interface)
{
- u32 xpcs_id = xpcs_get_id(xpcs);
- struct xpcs_id *match = NULL;
- int i;
+ struct dw_xpcs *xpcs;
+ u32 xpcs_id;
+ int i, ret;
+
+ xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
+ if (!xpcs)
+ return NULL;
+
+ xpcs->mdiodev = mdiodev;
+
+ xpcs_id = xpcs_get_id(xpcs);
for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) {
- struct xpcs_id *entry = &xpcs_id_list[i];
+ const struct xpcs_id *entry = &xpcs_id_list[i];
+ const struct xpcs_compat *compat;
+
+ if ((xpcs_id & entry->mask) != entry->id)
+ continue;
- if ((xpcs_id & entry->mask) == entry->id) {
- match = entry;
+ xpcs->id = entry;
- if (xpcs_check_features(xpcs, match, interface))
- return xpcs_soft_reset(xpcs);
+ compat = xpcs_find_compat(entry, interface);
+ if (!compat) {
+ ret = -ENODEV;
+ goto out;
}
+
+ xpcs->pcs.ops = &xpcs_phylink_ops;
+ xpcs->pcs.poll = true;
+
+ ret = xpcs_soft_reset(xpcs, compat);
+ if (ret)
+ goto out;
+
+ return xpcs;
}
- return -ENODEV;
-}
+ ret = -ENODEV;
-static struct mdio_xpcs_ops xpcs_ops = {
- .validate = xpcs_validate,
- .config = xpcs_config,
- .get_state = xpcs_get_state,
- .link_up = xpcs_link_up,
- .probe = xpcs_probe,
-};
+out:
+ kfree(xpcs);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(xpcs_create);
-struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
+void xpcs_destroy(struct dw_xpcs *xpcs)
{
- return &xpcs_ops;
+ kfree(xpcs);
}
-EXPORT_SYMBOL_GPL(mdio_xpcs_get_ops);
+EXPORT_SYMBOL_GPL(xpcs_destroy);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
new file mode 100644
index 000000000000..35651d32a224
--- /dev/null
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ *
+ * Author: Jose Abreu <Jose.Abreu@synopsys.com>
+ */
+
+#define SYNOPSYS_XPCS_ID 0x7996ced0
+#define SYNOPSYS_XPCS_MASK 0xffffffff
+
+/* Vendor regs access */
+#define DW_VENDOR BIT(15)
+
+/* VR_XS_PCS */
+#define DW_USXGMII_RST BIT(10)
+#define DW_USXGMII_EN BIT(9)
+#define DW_VR_XS_PCS_DIG_STS 0x0010
+#define DW_RXFIFO_ERR GENMASK(6, 5)
+
+/* SR_MII */
+#define DW_USXGMII_FULL BIT(8)
+#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5))
+#define DW_USXGMII_10000 (BIT(13) | BIT(6))
+#define DW_USXGMII_5000 (BIT(13) | BIT(5))
+#define DW_USXGMII_2500 (BIT(5))
+#define DW_USXGMII_1000 (BIT(6))
+#define DW_USXGMII_100 (BIT(13))
+#define DW_USXGMII_10 (0)
+
+/* SR_AN */
+#define DW_SR_AN_ADV1 0x10
+#define DW_SR_AN_ADV2 0x11
+#define DW_SR_AN_ADV3 0x12
+#define DW_SR_AN_LP_ABL1 0x13
+#define DW_SR_AN_LP_ABL2 0x14
+#define DW_SR_AN_LP_ABL3 0x15
+
+/* Clause 73 Defines */
+/* AN_LP_ABL1 */
+#define DW_C73_PAUSE BIT(10)
+#define DW_C73_ASYM_PAUSE BIT(11)
+#define DW_C73_AN_ADV_SF 0x1
+/* AN_LP_ABL2 */
+#define DW_C73_1000KX BIT(5)
+#define DW_C73_10000KX4 BIT(6)
+#define DW_C73_10000KR BIT(7)
+/* AN_LP_ABL3 */
+#define DW_C73_2500KX BIT(0)
+#define DW_C73_5000KR BIT(1)
+
+/* Clause 37 Defines */
+/* VR MII MMD registers offsets */
+#define DW_VR_MII_MMD_CTRL 0x0000
+#define DW_VR_MII_DIG_CTRL1 0x8000
+#define DW_VR_MII_AN_CTRL 0x8001
+#define DW_VR_MII_AN_INTR_STS 0x8002
+/* Enable 2.5G Mode */
+#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2)
+/* EEE Mode Control Register */
+#define DW_VR_MII_EEE_MCTRL0 0x8006
+#define DW_VR_MII_EEE_MCTRL1 0x800b
+#define DW_VR_MII_DIG_CTRL2 0x80e1
+
+/* VR_MII_DIG_CTRL1 */
+#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9)
+
+/* VR_MII_DIG_CTRL2 */
+#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4)
+#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0)
+
+/* VR_MII_AN_CTRL */
+#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3
+#define DW_VR_MII_TX_CONFIG_MASK BIT(3)
+#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1
+#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0
+#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1
+#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1)
+#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0
+#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2
+
+/* VR_MII_AN_INTR_STS */
+#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1)
+#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2
+#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2)
+#define DW_VR_MII_C37_ANSGM_SP_10 0x0
+#define DW_VR_MII_C37_ANSGM_SP_100 0x1
+#define DW_VR_MII_C37_ANSGM_SP_1000 0x2
+#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4)
+
+/* SR MII MMD Control defines */
+#define AN_CL37_EN BIT(12) /* Enable Clause 37 auto-nego */
+#define SGMII_SPEED_SS13 BIT(13) /* SGMII speed along with SS6 */
+#define SGMII_SPEED_SS6 BIT(6) /* SGMII speed along with SS13 */
+
+/* VR MII EEE Control 0 defines */
+#define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */
+#define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */
+#define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */
+#define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */
+#define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */
+#define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */
+
+#define DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT 8
+#define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8)
+
+/* VR MII EEE Control 1 defines */
+#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+
+int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
+int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val);
+
+int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs);
+int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs);
+int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 288bf405ebdb..c56f703ae998 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -207,6 +207,11 @@ config MARVELL_88X2222_PHY
Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet
Transceiver.
+config MEDIATEK_GE_PHY
+ tristate "MediaTek Gigabit Ethernet PHYs"
+ help
+ Supports the MediaTek Gigabit Ethernet PHYs.
+
config MICREL_PHY
tristate "Micrel PHYs"
help
@@ -229,6 +234,12 @@ config MICROSEMI_PHY
help
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
+config MOTORCOMM_PHY
+ tristate "Motorcomm PHYs"
+ help
+ Enables support for Motorcomm network PHYs.
+ Currently supports the YT8511 gigabit PHY.
+
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
help
@@ -247,10 +258,11 @@ config NXP_TJA11XX_PHY
Currently supports the NXP TJA1100 and TJA1101 PHY.
config AT803X_PHY
- tristate "Qualcomm Atheros AR803X PHYs"
+ tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
depends on REGULATOR
help
- Currently supports the AR8030, AR8031, AR8033 and AR8035 model
+ Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
+ QCA8337(Internal qca8k PHY) model
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index bcda7ed2455d..172bb193ae6a 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -64,12 +64,14 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o
+obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc/
+obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 55a0b91816e2..5ce6da62cc8e 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
-/**
+/*
* Driver for Analog Devices Industrial Ethernet PHYs
*
* Copyright 2019 Analog Devices Inc.
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 32af52dd5aed..5d62b85a4024 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -83,8 +83,8 @@
#define AT803X_MODE_CFG_MASK 0x0F
#define AT803X_MODE_CFG_SGMII 0x01
-#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
-#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
#define AT803X_DEBUG_REG_0 0x00
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
@@ -92,10 +92,16 @@
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define AT803X_DEBUG_REG_3C 0x3C
+
+#define AT803X_DEBUG_REG_3D 0x3D
+
#define AT803X_DEBUG_REG_1F 0x1F
#define AT803X_DEBUG_PLL_ON BIT(2)
#define AT803X_DEBUG_RGMII_1V8 BIT(3)
+#define MDIO_AZ_DEBUG 0x800D
+
/* AT803x supports either the XTAL input pad, an internal PLL or the
* DSP as clock reference for the clock output pad. The XTAL reference
* is only used for 25 MHz output, all other frequencies need the PLL.
@@ -128,33 +134,59 @@
#define AT803X_CLK_OUT_STRENGTH_HALF 1
#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
-#define AT803X_DEFAULT_DOWNSHIFT 5
-#define AT803X_MIN_DOWNSHIFT 2
-#define AT803X_MAX_DOWNSHIFT 9
+#define AT803X_DEFAULT_DOWNSHIFT 5
+#define AT803X_MIN_DOWNSHIFT 2
+#define AT803X_MAX_DOWNSHIFT 9
#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
-#define ATH9331_PHY_ID 0x004dd041
-#define ATH8030_PHY_ID 0x004dd076
-#define ATH8031_PHY_ID 0x004dd074
-#define ATH8032_PHY_ID 0x004dd023
-#define ATH8035_PHY_ID 0x004dd072
+#define ATH9331_PHY_ID 0x004dd041
+#define ATH8030_PHY_ID 0x004dd076
+#define ATH8031_PHY_ID 0x004dd074
+#define ATH8032_PHY_ID 0x004dd023
+#define ATH8035_PHY_ID 0x004dd072
#define AT8030_PHY_ID_MASK 0xffffffef
-#define AT803X_PAGE_FIBER 0
-#define AT803X_PAGE_COPPER 1
+#define QCA8327_PHY_ID 0x004dd034
+#define QCA8337_PHY_ID 0x004dd036
+#define QCA8K_PHY_ID_MASK 0xffffffff
+
+#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
+
+#define AT803X_PAGE_FIBER 0
+#define AT803X_PAGE_COPPER 1
+
+/* don't turn off internal PLL */
+#define AT803X_KEEP_PLL_ENABLED BIT(0)
+#define AT803X_DISABLE_SMARTEEE BIT(1)
MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
+enum stat_access_type {
+ PHY,
+ MMD
+};
+
+struct at803x_hw_stat {
+ const char *string;
+ u8 reg;
+ u32 mask;
+ enum stat_access_type access_type;
+};
+
+static struct at803x_hw_stat at803x_hw_stats[] = {
+ { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+ { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+ { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+};
+
struct at803x_priv {
int flags;
-#define AT803X_KEEP_PLL_ENABLED BIT(0) /* don't turn off internal PLL */
-#define AT803X_DISABLE_SMARTEEE BIT(1)
u16 clk_25m_reg;
u16 clk_25m_mask;
u8 smarteee_lpi_tw_1g;
@@ -162,6 +194,7 @@ struct at803x_priv {
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
struct regulator *vddio;
+ u64 stats[ARRAY_SIZE(at803x_hw_stats)];
};
struct at803x_context {
@@ -173,6 +206,17 @@ struct at803x_context {
u16 led_control;
};
+static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, data);
+}
+
static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
{
int ret;
@@ -335,6 +379,53 @@ static void at803x_get_wol(struct phy_device *phydev,
wol->wolopts |= WAKE_MAGIC;
}
+static int at803x_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(at803x_hw_stats);
+}
+
+static void at803x_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++) {
+ strscpy(data + i * ETH_GSTRING_LEN,
+ at803x_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static u64 at803x_get_stat(struct phy_device *phydev, int i)
+{
+ struct at803x_hw_stat stat = at803x_hw_stats[i];
+ struct at803x_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.access_type == MMD)
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & stat.mask;
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void at803x_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++)
+ data[i] = at803x_get_stat(phydev, i);
+}
+
static int at803x_suspend(struct phy_device *phydev)
{
int value;
@@ -610,6 +701,34 @@ static void at803x_remove(struct phy_device *phydev)
regulator_disable(priv->vddio);
}
+static int at803x_get_features(struct phy_device *phydev)
+{
+ int err;
+
+ err = genphy_read_abilities(phydev);
+ if (err)
+ return err;
+
+ if (!at803x_match_phy_id(phydev, ATH8031_PHY_ID))
+ return 0;
+
+ /* AR8031/AR8033 have different status registers
+ * for copper and fiber operation. However, the
+ * extended status register is the same for both
+ * operation modes.
+ *
+ * As a result of that, ESTATUS_1000_XFULL is set
+ * to 1 even when operating in copper TP mode.
+ *
+ * Remove this mode from the supported link modes,
+ * as this driver currently only supports copper
+ * operation.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+ return 0;
+}
+
static int at803x_smarteee_config(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
@@ -1170,6 +1289,34 @@ static int at803x_cable_test_start(struct phy_device *phydev)
return 0;
}
+static int qca83xx_config_init(struct phy_device *phydev)
+{
+ u8 switch_revision;
+
+ switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
+
+ switch (switch_revision) {
+ case 1:
+ /* For 100M waveform */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_0, 0x02ea);
+ /* Turn on Gigabit clock */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x68a0);
+ break;
+
+ case 2:
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
+ fallthrough;
+ case 4:
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_5, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
+ break;
+ }
+
+ return 0;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* Qualcomm Atheros AR8035 */
@@ -1225,7 +1372,7 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.read_page = at803x_read_page,
.write_page = at803x_write_page,
- /* PHY_GBIT_FEATURES */
+ .get_features = at803x_get_features,
.read_status = at803x_read_status,
.config_intr = &at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
@@ -1266,7 +1413,20 @@ static struct phy_driver at803x_driver[] = {
.read_status = at803x_read_status,
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
-} };
+}, {
+ /* QCA8337 */
+ .phy_id = QCA8337_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "QCA PHY 8337",
+ /* PHY_GBIT_FEATURES */
+ .probe = at803x_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = at803x_get_sset_count,
+ .get_strings = at803x_get_strings,
+ .get_stats = at803x_get_stats,
+}, };
module_phy_driver(at803x_driver);
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index 79bf7ef1fcfd..457896337505 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -10,6 +10,8 @@
#include <linux/mii.h>
#include <linux/phy.h>
+#define PHY_ID_ASIX_AX88772A 0x003b1861
+#define PHY_ID_ASIX_AX88772C 0x003b1881
#define PHY_ID_ASIX_AX88796B 0x003b1841
MODULE_DESCRIPTION("Asix PHY driver");
@@ -39,7 +41,75 @@ static int asix_soft_reset(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
-static struct phy_driver asix_driver[] = { {
+/* AX88772A is not working properly with some old switches (NETGEAR EN 108TP):
+ * after autoneg is done and the link status is reported as active, the MII_LPA
+ * register is 0. This issue is not reproducible on AX88772C.
+ */
+static int asix_ax88772a_read_status(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ if (!phydev->link)
+ return 0;
+
+ /* If MII_LPA is 0, phy_resolve_aneg_linkmode() will fail to resolve
+ * linkmode so use MII_BMCR as default values.
+ */
+ val = phy_read(phydev, MII_BMCR);
+ if (val < 0)
+ return val;
+
+ if (val & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ if (val & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ ret = genphy_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_linkmode(phydev);
+
+ return 0;
+}
+
+static void asix_ax88772a_link_change_notify(struct phy_device *phydev)
+{
+ /* Reset PHY, otherwise MII_LPA will provide outdated information.
+ * This issue is reproducible only with some link partner PHYs
+ */
+ if (phydev->state == PHY_NOLINK && phydev->drv->soft_reset)
+ phydev->drv->soft_reset(phydev);
+}
+
+static struct phy_driver asix_driver[] = {
+{
+ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A),
+ .name = "Asix Electronics AX88772A",
+ .flags = PHY_IS_INTERNAL,
+ .read_status = asix_ax88772a_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .soft_reset = asix_soft_reset,
+ .link_change_notify = asix_ax88772a_link_change_notify,
+}, {
+ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C),
+ .name = "Asix Electronics AX88772C",
+ .flags = PHY_IS_INTERNAL,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .soft_reset = asix_soft_reset,
+}, {
.phy_id = PHY_ID_ASIX_AX88796B,
.name = "Asix Electronics AX88796B",
.phy_id_mask = 0xfffffff0,
@@ -50,6 +120,8 @@ static struct phy_driver asix_driver[] = { {
module_phy_driver(asix_driver);
static struct mdio_device_id __maybe_unused asix_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C) },
{ PHY_ID_ASIX_AX88796B, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 4ac8fd190e9d..313563482690 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -54,9 +54,9 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
u16 reg = be32_to_cpup(paddr++);
u16 mask = be32_to_cpup(paddr++);
u16 val_bits = be32_to_cpup(paddr++);
- int val;
u32 regnum = mdiobus_c45_addr(devid, reg);
- val = 0;
+ int val = 0;
+
if (mask) {
val = phy_read(phydev, regnum);
if (val < 0) {
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index a3b3842c67e5..4ac4bce1bf32 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -43,10 +43,10 @@
#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
#define MII_DM9161_INTR_SPD_CHANGE 0x0008
#define MII_DM9161_INTR_LINK_CHANGE 0x0004
-#define MII_DM9161_INTR_INIT 0x0000
+#define MII_DM9161_INTR_INIT 0x0000
#define MII_DM9161_INTR_STOP \
-(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
- | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+ (MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK | \
+ MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
#define MII_DM9161_INTR_CHANGE \
(MII_DM9161_INTR_DPLX_CHANGE | \
MII_DM9161_INTR_SPD_CHANGE | \
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 0d79f68f301c..705c16675b80 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -170,9 +170,9 @@ static ushort gpio_tab[GPIO_TABLE_SIZE] = {
module_param(chosen_phy, int, 0444);
module_param_array(gpio_tab, ushort, NULL, 0444);
-MODULE_PARM_DESC(chosen_phy, \
+MODULE_PARM_DESC(chosen_phy,
"The address of the PHY to use for the ancillary clock features");
-MODULE_PARM_DESC(gpio_tab, \
+MODULE_PARM_DESC(gpio_tab,
"Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
static void dp83640_gpio_defaults(struct ptp_pin_desc *pd)
@@ -615,6 +615,7 @@ static void prune_rx_ts(struct dp83640_private *dp83640)
static void enable_broadcast(struct phy_device *phydev, int init_page, int on)
{
int val;
+
phy_write(phydev, PAGESEL, 0);
val = phy_read(phydev, PHYCR2);
if (on)
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 9bd9a5c0b1db..6bbc81ad295f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -826,16 +826,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
if (err < 0)
return err;
usleep_range(10, 20);
- /* After reset FORCE_LINK_GOOD bit is set. Although the
- * default value should be unset. Disable FORCE_LINK_GOOD
- * for the phy to work properly.
- */
return phy_modify(phydev, MII_DP83867_PHYCTRL,
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 09e07b902d3a..be1b71d7cab7 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -46,8 +46,8 @@ MODULE_LICENSE("GPL");
static int et1011c_config_aneg(struct phy_device *phydev)
{
- int ctl = 0;
- ctl = phy_read(phydev, MII_BMCR);
+ int ctl = phy_read(phydev, MII_BMCR);
+
if (ctl < 0)
return ctl;
ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 |
@@ -60,9 +60,10 @@ static int et1011c_config_aneg(struct phy_device *phydev)
static int et1011c_read_status(struct phy_device *phydev)
{
+ static int speed;
int ret;
u32 val;
- static int speed;
+
ret = genphy_read_status(phydev);
if (speed != phydev->speed) {
@@ -72,10 +73,10 @@ static int et1011c_read_status(struct phy_device *phydev)
ET1011C_GIGABIT_SPEED) {
val = phy_read(phydev, ET1011C_CONFIG_REG);
val &= ~ET1011C_TX_FIFO_MASK;
- phy_write(phydev, ET1011C_CONFIG_REG, val\
- | ET1011C_GMII_INTERFACE\
- | ET1011C_SYS_CLK_EN\
- | ET1011C_TX_FIFO_DEPTH_16);
+ phy_write(phydev, ET1011C_CONFIG_REG, val |
+ ET1011C_GMII_INTERFACE |
+ ET1011C_SYS_CLK_EN |
+ ET1011C_TX_FIFO_DEPTH_16);
}
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 18d81f43f2a8..c65fb5f5d2dc 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -161,8 +161,8 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
}
int fixed_phy_add(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status) {
-
+ struct fixed_phy_status *status)
+{
return fixed_phy_add_gpiod(irq, phy_addr, status, NULL);
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index bde3356a2f86..e3bf827b7959 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -242,8 +242,8 @@ static int lxt973a2_read_status(struct phy_device *phydev)
return lpa;
/* If both registers are equal, it is suspect but not
- * impossible, hence a new try
- */
+ * impossible, hence a new try
+ */
} while (lpa == adv && retry--);
mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e6721c1c26c2..3de93c9f2744 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -367,39 +367,24 @@ static irqreturn_t marvell_handle_interrupt(struct phy_device *phydev)
static int marvell_set_polarity(struct phy_device *phydev, int polarity)
{
- int reg;
- int err;
- int val;
-
- /* get the current settings */
- reg = phy_read(phydev, MII_M1011_PHY_SCR);
- if (reg < 0)
- return reg;
+ u16 val;
- val = reg;
- val &= ~MII_M1011_PHY_SCR_AUTO_CROSS;
switch (polarity) {
case ETH_TP_MDI:
- val |= MII_M1011_PHY_SCR_MDI;
+ val = MII_M1011_PHY_SCR_MDI;
break;
case ETH_TP_MDI_X:
- val |= MII_M1011_PHY_SCR_MDI_X;
+ val = MII_M1011_PHY_SCR_MDI_X;
break;
case ETH_TP_MDI_AUTO:
case ETH_TP_MDI_INVALID:
default:
- val |= MII_M1011_PHY_SCR_AUTO_CROSS;
+ val = MII_M1011_PHY_SCR_AUTO_CROSS;
break;
}
- if (val != reg) {
- /* Set the new polarity value in the register */
- err = phy_write(phydev, MII_M1011_PHY_SCR, val);
- if (err)
- return err;
- }
-
- return val != reg;
+ return phy_modify_changed(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_AUTO_CROSS, val);
}
static int marvell_config_aneg(struct phy_device *phydev)
@@ -824,14 +809,19 @@ static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev)
{
int delay;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY;
- } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
delay = MII_M1111_RGMII_RX_DELAY;
- } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
delay = MII_M1111_RGMII_TX_DELAY;
- } else {
+ break;
+ default:
delay = 0;
+ break;
}
return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index dadf75ff3ab9..53f034fc2ef7 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -175,6 +175,7 @@ EXPORT_SYMBOL(mdiobus_alloc_size);
static void mdiobus_release(struct device *d)
{
struct mii_bus *bus = to_mii_bus(d);
+
BUG_ON(bus->state != MDIOBUS_RELEASED &&
/* for compatibility with error handling in drivers */
bus->state != MDIOBUS_ALLOCATED);
@@ -458,8 +459,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
continue;
if (addr == mdiodev->addr) {
- dev->of_node = child;
- dev->fwnode = of_fwnode_handle(child);
+ device_set_node(dev, of_fwnode_handle(child));
return;
}
}
@@ -607,7 +607,8 @@ void mdiobus_unregister(struct mii_bus *bus)
struct mdio_device *mdiodev;
int i;
- BUG_ON(bus->state != MDIOBUS_REGISTERED);
+ if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
+ return;
bus->state = MDIOBUS_UNREGISTERED;
for (i = 0; i < PHY_MAX_ADDR; i++) {
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 0837319a52d7..c94cb5382dc9 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -77,7 +77,7 @@ int mdio_device_register(struct mdio_device *mdiodev)
{
int err;
- dev_dbg(&mdiodev->dev, "mdio_device_register\n");
+ dev_dbg(&mdiodev->dev, "%s\n", __func__);
err = mdiobus_register_device(mdiodev);
if (err)
@@ -188,7 +188,7 @@ int mdio_driver_register(struct mdio_driver *drv)
struct mdio_driver_common *mdiodrv = &drv->mdiodrv;
int retval;
- pr_debug("mdio_driver_register: %s\n", mdiodrv->driver.name);
+ pr_debug("%s: %s\n", __func__, mdiodrv->driver.name);
mdiodrv->driver.bus = &mdio_bus_type;
mdiodrv->driver.probe = mdio_probe;
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
new file mode 100644
index 000000000000..11ff335d6228
--- /dev/null
+++ b/drivers/net/phy/mediatek-ge.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define MTK_EXT_PAGE_ACCESS 0x1f
+#define MTK_PHY_PAGE_STANDARD 0x0000
+#define MTK_PHY_PAGE_EXTENDED 0x0001
+#define MTK_PHY_PAGE_EXTENDED_2 0x0002
+#define MTK_PHY_PAGE_EXTENDED_3 0x0003
+#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
+#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
+
+static int mtk_gephy_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, MTK_EXT_PAGE_ACCESS);
+}
+
+static int mtk_gephy_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, MTK_EXT_PAGE_ACCESS, page);
+}
+
+static void mtk_gephy_config_init(struct phy_device *phydev)
+{
+ /* Disable EEE */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
+
+ /* Enable HW auto downshift */
+ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4));
+
+ /* Increase SlvDPSready time */
+ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+ __phy_write(phydev, 0x10, 0xafae);
+ __phy_write(phydev, 0x12, 0x2f);
+ __phy_write(phydev, 0x10, 0x8fae);
+ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+
+ /* Adjust 100_mse_threshold */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x123, 0xffff);
+
+ /* Disable mcc */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xa6, 0x300);
+}
+
+static int mt7530_phy_config_init(struct phy_device *phydev)
+{
+ mtk_gephy_config_init(phydev);
+
+ /* Increase post_update_timer */
+ phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3, 0x11, 0x4b);
+
+ return 0;
+}
+
+static int mt7531_phy_config_init(struct phy_device *phydev)
+{
+ if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
+ return -EINVAL;
+
+ mtk_gephy_config_init(phydev);
+
+ /* PHY link down power saving enable */
+ phy_set_bits(phydev, 0x17, BIT(4));
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, 0xc6, 0x300);
+
+ /* Set TX Pair delay selection */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x13, 0x404);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x14, 0x404);
+
+ return 0;
+}
+
+static struct phy_driver mtk_gephy_driver[] = {
+ {
+ PHY_ID_MATCH_EXACT(0x03a29412),
+ .name = "MediaTek MT7530 PHY",
+ .config_init = mt7530_phy_config_init,
+ /* Interrupts are handled by the switch, not the PHY
+ * itself.
+ */
+ .config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .read_page = mtk_gephy_read_page,
+ .write_page = mtk_gephy_write_page,
+ },
+ {
+ PHY_ID_MATCH_EXACT(0x03a29441),
+ .name = "MediaTek MT7531 PHY",
+ .config_init = mt7531_phy_config_init,
+ /* Interrupts are handled by the switch, not the PHY
+ * itself.
+ */
+ .config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .read_page = mtk_gephy_read_page,
+ .write_page = mtk_gephy_write_page,
+ },
+};
+
+module_phy_driver(mtk_gephy_driver);
+
+static struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(0x03a29400) },
+ { }
+};
+
+MODULE_DESCRIPTION("MediaTek Gigabit Ethernet PHY driver");
+MODULE_AUTHOR("DENG, Qingfang <dqfext@gmail.com>");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(mdio, mtk_gephy_tbl);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index a14a00328fa3..4d53886f7d51 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -20,6 +20,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
@@ -38,42 +39,60 @@
/* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B
-#define KSZPHY_INTCS_JABBER BIT(15)
-#define KSZPHY_INTCS_RECEIVE_ERR BIT(14)
-#define KSZPHY_INTCS_PAGE_RECEIVE BIT(13)
-#define KSZPHY_INTCS_PARELLEL BIT(12)
-#define KSZPHY_INTCS_LINK_PARTNER_ACK BIT(11)
-#define KSZPHY_INTCS_LINK_DOWN BIT(10)
-#define KSZPHY_INTCS_REMOTE_FAULT BIT(9)
-#define KSZPHY_INTCS_LINK_UP BIT(8)
-#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
+#define KSZPHY_INTCS_JABBER BIT(15)
+#define KSZPHY_INTCS_RECEIVE_ERR BIT(14)
+#define KSZPHY_INTCS_PAGE_RECEIVE BIT(13)
+#define KSZPHY_INTCS_PARELLEL BIT(12)
+#define KSZPHY_INTCS_LINK_PARTNER_ACK BIT(11)
+#define KSZPHY_INTCS_LINK_DOWN BIT(10)
+#define KSZPHY_INTCS_REMOTE_FAULT BIT(9)
+#define KSZPHY_INTCS_LINK_UP BIT(8)
+#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
KSZPHY_INTCS_LINK_DOWN)
-#define KSZPHY_INTCS_LINK_DOWN_STATUS BIT(2)
-#define KSZPHY_INTCS_LINK_UP_STATUS BIT(0)
-#define KSZPHY_INTCS_STATUS (KSZPHY_INTCS_LINK_DOWN_STATUS |\
+#define KSZPHY_INTCS_LINK_DOWN_STATUS BIT(2)
+#define KSZPHY_INTCS_LINK_UP_STATUS BIT(0)
+#define KSZPHY_INTCS_STATUS (KSZPHY_INTCS_LINK_DOWN_STATUS |\
KSZPHY_INTCS_LINK_UP_STATUS)
+/* LinkMD Control/Status */
+#define KSZ8081_LMD 0x1d
+#define KSZ8081_LMD_ENABLE_TEST BIT(15)
+#define KSZ8081_LMD_STAT_NORMAL 0
+#define KSZ8081_LMD_STAT_OPEN 1
+#define KSZ8081_LMD_STAT_SHORT 2
+#define KSZ8081_LMD_STAT_FAIL 3
+#define KSZ8081_LMD_STAT_MASK GENMASK(14, 13)
+/* Short cable (<10 meter) has been detected by LinkMD */
+#define KSZ8081_LMD_SHORT_INDICATOR BIT(12)
+#define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+
/* PHY Control 1 */
-#define MII_KSZPHY_CTRL_1 0x1e
+#define MII_KSZPHY_CTRL_1 0x1e
+#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
/* PHY Control 2 / PHY Control (if no PHY Control 1) */
-#define MII_KSZPHY_CTRL_2 0x1f
-#define MII_KSZPHY_CTRL MII_KSZPHY_CTRL_2
+#define MII_KSZPHY_CTRL_2 0x1f
+#define MII_KSZPHY_CTRL MII_KSZPHY_CTRL_2
/* bitmap of PHY register to set interrupt mode */
+#define KSZ8081_CTRL2_HP_MDIX BIT(15)
+#define KSZ8081_CTRL2_MDI_MDI_X_SELECT BIT(14)
+#define KSZ8081_CTRL2_DISABLE_AUTO_MDIX BIT(13)
+#define KSZ8081_CTRL2_FORCE_LINK BIT(11)
+#define KSZ8081_CTRL2_POWER_SAVING BIT(10)
#define KSZPHY_CTRL_INT_ACTIVE_HIGH BIT(9)
#define KSZPHY_RMII_REF_CLK_SEL BIT(7)
/* Write/read to/from extended registers */
-#define MII_KSZPHY_EXTREG 0x0b
-#define KSZPHY_EXTREG_WRITE 0x8000
+#define MII_KSZPHY_EXTREG 0x0b
+#define KSZPHY_EXTREG_WRITE 0x8000
-#define MII_KSZPHY_EXTREG_WRITE 0x0c
-#define MII_KSZPHY_EXTREG_READ 0x0d
+#define MII_KSZPHY_EXTREG_WRITE 0x0c
+#define MII_KSZPHY_EXTREG_READ 0x0d
/* Extended registers */
-#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104
-#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105
-#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
+#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104
+#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105
+#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
#define PS_TO_REG 200
@@ -422,6 +441,87 @@ static int ksz8081_config_init(struct phy_device *phydev)
return kszphy_config_init(phydev);
}
+static int ksz8081_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = KSZ8081_CTRL2_DISABLE_AUTO_MDIX;
+ break;
+ case ETH_TP_MDI_X:
+ val = KSZ8081_CTRL2_DISABLE_AUTO_MDIX |
+ KSZ8081_CTRL2_MDI_MDI_X_SELECT;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify(phydev, MII_KSZPHY_CTRL_2,
+ KSZ8081_CTRL2_HP_MDIX |
+ KSZ8081_CTRL2_MDI_MDI_X_SELECT |
+ KSZ8081_CTRL2_DISABLE_AUTO_MDIX,
+ KSZ8081_CTRL2_HP_MDIX | val);
+}
+
+static int ksz8081_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The MDI-X configuration is automatically changed by the PHY after
+ * switching from autoneg off to on. So, take MDI-X configuration under
+ * own control and set it after autoneg configuration was done.
+ */
+ return ksz8081_config_mdix(phydev, phydev->mdix_ctrl);
+}
+
+static int ksz8081_mdix_update(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_KSZPHY_CTRL_2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & KSZ8081_CTRL2_DISABLE_AUTO_MDIX) {
+ if (ret & KSZ8081_CTRL2_MDI_MDI_X_SELECT)
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ } else {
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ }
+
+ ret = phy_read(phydev, MII_KSZPHY_CTRL_1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & KSZ8081_CTRL1_MDIX_STAT)
+ phydev->mdix = ETH_TP_MDI;
+ else
+ phydev->mdix = ETH_TP_MDI_X;
+
+ return 0;
+}
+
+static int ksz8081_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz8081_mdix_update(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
@@ -488,8 +588,7 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
static int ksz9021_config_init(struct phy_device *phydev)
{
- const struct device *dev = &phydev->mdio.dev;
- const struct device_node *of_node = dev->of_node;
+ const struct device_node *of_node;
const struct device *dev_walker;
/* The Micrel driver has a deprecated option to place phy OF
@@ -711,8 +810,7 @@ static int ksz9031_config_rgmii_delay(struct phy_device *phydev)
static int ksz9031_config_init(struct phy_device *phydev)
{
- const struct device *dev = &phydev->mdio.dev;
- const struct device_node *of_node = dev->of_node;
+ const struct device_node *of_node;
static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
static const char *rx_data_skews[4] = {
"rxd0-skew-ps", "rxd1-skew-ps",
@@ -907,8 +1005,7 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
static int ksz9131_config_init(struct phy_device *phydev)
{
- const struct device *dev = &phydev->mdio.dev;
- struct device_node *of_node = dev->of_node;
+ struct device_node *of_node;
char *clk_skews[2] = {"rxc-skew-psec", "txc-skew-psec"};
char *rx_data_skews[4] = {
"rxd0-skew-psec", "rxd1-skew-psec",
@@ -1048,6 +1145,92 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
return 0;
}
+static int ksz886x_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = KSZ886X_BMCR_DISABLE_AUTO_MDIX;
+ break;
+ case ETH_TP_MDI_X:
+ /* Note: The naming of the bit KSZ886X_BMCR_FORCE_MDI is bit
+ * counter intuitive, the "-X" in "1 = Force MDI" in the data
+ * sheet seems to be missing:
+ * 1 = Force MDI (sic!) (transmit on RX+/RX- pins)
+ * 0 = Normal operation (transmit on TX+/TX- pins)
+ */
+ val = KSZ886X_BMCR_DISABLE_AUTO_MDIX | KSZ886X_BMCR_FORCE_MDI;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify(phydev, MII_BMCR,
+ KSZ886X_BMCR_HP_MDIX | KSZ886X_BMCR_FORCE_MDI |
+ KSZ886X_BMCR_DISABLE_AUTO_MDIX,
+ KSZ886X_BMCR_HP_MDIX | val);
+}
+
+static int ksz886x_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The MDI-X configuration is automatically changed by the PHY after
+ * switching from autoneg off to on. So, take MDI-X configuration under
+ * own control and set it after autoneg configuration was done.
+ */
+ return ksz886x_config_mdix(phydev, phydev->mdix_ctrl);
+}
+
+static int ksz886x_mdix_update(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (ret & KSZ886X_BMCR_DISABLE_AUTO_MDIX) {
+ if (ret & KSZ886X_BMCR_FORCE_MDI)
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ } else {
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ }
+
+ ret = phy_read(phydev, MII_KSZPHY_CTRL);
+ if (ret < 0)
+ return ret;
+
+ /* Same reverse logic as KSZ886X_BMCR_FORCE_MDI */
+ if (ret & KSZ886X_CTRL_MDIX_STAT)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ return 0;
+}
+
+static int ksz886x_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz886x_mdix_update(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
static int kszphy_get_sset_count(struct phy_device *phydev)
{
return ARRAY_SIZE(kszphy_hw_stats);
@@ -1193,6 +1376,167 @@ static int kszphy_probe(struct phy_device *phydev)
return 0;
}
+static int ksz886x_cable_test_start(struct phy_device *phydev)
+{
+ if (phydev->dev_flags & MICREL_KSZ8_P1_ERRATA)
+ return -EOPNOTSUPP;
+
+ /* If autoneg is enabled, we won't be able to test cross pair
+ * short. In this case, the PHY will "detect" a link and
+ * confuse the internal state machine - disable auto neg here.
+ * If autoneg is disabled, we should set the speed to 10mbit.
+ */
+ return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100);
+}
+
+static int ksz886x_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ case KSZ8081_LMD_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case KSZ8081_LMD_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case KSZ8081_LMD_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case KSZ8081_LMD_STAT_FAIL:
+ fallthrough;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool ksz886x_cable_test_failed(u16 status)
+{
+ return FIELD_GET(KSZ8081_LMD_STAT_MASK, status) ==
+ KSZ8081_LMD_STAT_FAIL;
+}
+
+static bool ksz886x_cable_test_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ case KSZ8081_LMD_STAT_OPEN:
+ fallthrough;
+ case KSZ8081_LMD_STAT_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int ksz886x_cable_test_fault_length(u16 status)
+{
+ int dt;
+
+ /* According to the data sheet the distance to the fault is
+ * DELTA_TIME * 0.4 meters.
+ */
+ dt = FIELD_GET(KSZ8081_LMD_DELTA_TIME_MASK, status);
+
+ return (dt * 400) / 10;
+}
+
+static int ksz886x_cable_test_wait_for_completion(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = phy_read_poll_timeout(phydev, KSZ8081_LMD, val,
+ !(val & KSZ8081_LMD_ENABLE_TEST),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ };
+ int ret, val, mdix;
+
+ /* There is no way to choice the pair, like we do one ksz9031.
+ * We can workaround this limitation by using the MDI-X functionality.
+ */
+ if (pair == 0)
+ mdix = ETH_TP_MDI;
+ else
+ mdix = ETH_TP_MDI_X;
+
+ switch (phydev->phy_id & MICREL_PHY_ID_MASK) {
+ case PHY_ID_KSZ8081:
+ ret = ksz8081_config_mdix(phydev, mdix);
+ break;
+ case PHY_ID_KSZ886X:
+ ret = ksz886x_config_mdix(phydev, mdix);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Now we are ready to fire. This command will send a 100ns pulse
+ * to the pair.
+ */
+ ret = phy_write(phydev, KSZ8081_LMD, KSZ8081_LMD_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ ret = ksz886x_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, KSZ8081_LMD);
+ if (val < 0)
+ return val;
+
+ if (ksz886x_cable_test_failed(val))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ ksz886x_cable_test_result_trans(val));
+ if (ret)
+ return ret;
+
+ if (!ksz886x_cable_test_fault_length_valid(val))
+ return 0;
+
+ return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ ksz886x_cable_test_fault_length(val));
+}
+
+static int ksz886x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ unsigned long pair_mask = 0x3;
+ int retries = 20;
+ int pair, ret;
+
+ *finished = false;
+
+ /* Try harder if link partner is active */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ksz886x_cable_test_one_pair(phydev, pair);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+ clear_bit(pair, &pair_mask);
+ }
+ /* If link partner is in autonegotiation mode it will send 2ms
+ * of FLPs with at least 6ms of silence.
+ * Add 2ms sleep to have better chances to hit this silence.
+ */
+ if (pair_mask)
+ msleep(2);
+ }
+
+ *finished = true;
+
+ return ret;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1299,11 +1643,14 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
+ .flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = ksz8081_config_init,
.soft_reset = genphy_soft_reset,
+ .config_aneg = ksz8081_config_aneg,
+ .read_status = ksz8081_read_status,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
@@ -1311,6 +1658,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz886x_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
}, {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
@@ -1399,9 +1748,14 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
/* PHY_BASIC_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.config_init = kszphy_config_init,
+ .config_aneg = ksz886x_config_aneg,
+ .read_status = ksz886x_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .cable_test_start = ksz886x_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
}, {
.name = "Micrel KSZ87XX Switch",
/* PHY_BASIC_FEATURES */
diff --git a/drivers/net/phy/mii_timestamper.c b/drivers/net/phy/mii_timestamper.c
index b71b7456462d..51ae0593a04f 100644
--- a/drivers/net/phy/mii_timestamper.c
+++ b/drivers/net/phy/mii_timestamper.c
@@ -111,6 +111,9 @@ void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
struct mii_timestamping_desc *desc;
struct list_head *this;
+ if (!mii_ts)
+ return;
+
/* mii_timestamper statically registered by the PHY driver won't use the
* register_mii_timestamper() and thus don't have ->device set. Don't
* try to unregister these.
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
new file mode 100644
index 000000000000..7e6ac2c5e27e
--- /dev/null
+++ b/drivers/net/phy/motorcomm.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Motorcomm PHYs
+ *
+ * Author: Peter Geis <pgwipeout@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define PHY_ID_YT8511 0x0000010a
+
+#define YT8511_PAGE_SELECT 0x1e
+#define YT8511_PAGE 0x1f
+#define YT8511_EXT_CLK_GATE 0x0c
+#define YT8511_EXT_DELAY_DRIVE 0x0d
+#define YT8511_EXT_SLEEP_CTRL 0x27
+
+/* 2b00 25m from pll
+ * 2b01 25m from xtl *default*
+ * 2b10 62.m from pll
+ * 2b11 125m from pll
+ */
+#define YT8511_CLK_125M (BIT(2) | BIT(1))
+#define YT8511_PLLON_SLP BIT(14)
+
+/* RX Delay enabled = 1.8ns 1000T, 8ns 10/100T */
+#define YT8511_DELAY_RX BIT(0)
+
+/* TX Gig-E Delay is bits 7:4, default 0x5
+ * TX Fast-E Delay is bits 15:12, default 0xf
+ * Delay = 150ps * N - 250ps
+ * On = 2000ps, off = 50ps
+ */
+#define YT8511_DELAY_GE_TX_EN (0xf << 4)
+#define YT8511_DELAY_GE_TX_DIS (0x2 << 4)
+#define YT8511_DELAY_FE_TX_EN (0xf << 12)
+#define YT8511_DELAY_FE_TX_DIS (0x2 << 12)
+
+static int yt8511_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, YT8511_PAGE_SELECT);
+};
+
+static int yt8511_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, YT8511_PAGE_SELECT, page);
+};
+
+static int yt8511_config_init(struct phy_device *phydev)
+{
+ int oldpage, ret = 0;
+ unsigned int ge, fe;
+
+ oldpage = phy_select_page(phydev, YT8511_EXT_CLK_GATE);
+ if (oldpage < 0)
+ goto err_restore_page;
+
+ /* set rgmii delay mode */
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ ge = YT8511_DELAY_GE_TX_DIS;
+ fe = YT8511_DELAY_FE_TX_DIS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ ge = YT8511_DELAY_RX | YT8511_DELAY_GE_TX_DIS;
+ fe = YT8511_DELAY_FE_TX_DIS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ ge = YT8511_DELAY_GE_TX_EN;
+ fe = YT8511_DELAY_FE_TX_EN;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ ge = YT8511_DELAY_RX | YT8511_DELAY_GE_TX_EN;
+ fe = YT8511_DELAY_FE_TX_EN;
+ break;
+ default: /* do not support other modes */
+ ret = -EOPNOTSUPP;
+ goto err_restore_page;
+ }
+
+ ret = __phy_modify(phydev, YT8511_PAGE, (YT8511_DELAY_RX | YT8511_DELAY_GE_TX_EN), ge);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* set clock mode to 125mhz */
+ ret = __phy_modify(phydev, YT8511_PAGE, 0, YT8511_CLK_125M);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* fast ethernet delay is in a separate page */
+ ret = __phy_write(phydev, YT8511_PAGE_SELECT, YT8511_EXT_DELAY_DRIVE);
+ if (ret < 0)
+ goto err_restore_page;
+
+ ret = __phy_modify(phydev, YT8511_PAGE, YT8511_DELAY_FE_TX_EN, fe);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* leave pll enabled in sleep */
+ ret = __phy_write(phydev, YT8511_PAGE_SELECT, YT8511_EXT_SLEEP_CTRL);
+ if (ret < 0)
+ goto err_restore_page;
+
+ ret = __phy_modify(phydev, YT8511_PAGE, 0, YT8511_PLLON_SLP);
+ if (ret < 0)
+ goto err_restore_page;
+
+err_restore_page:
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
+static struct phy_driver motorcomm_phy_drvs[] = {
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8511),
+ .name = "YT8511 Gigabit Ethernet",
+ .config_init = yt8511_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = yt8511_read_page,
+ .write_page = yt8511_write_page,
+ },
+};
+
+module_phy_driver(motorcomm_phy_drvs);
+
+MODULE_DESCRIPTION("Motorcomm PHY driver");
+MODULE_AUTHOR("Peter Geis");
+MODULE_LICENSE("GPL");
+
+static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8511) },
+ { /* sentinal */ }
+};
+
+MODULE_DEVICE_TABLE(mdio, motorcomm_tbl);
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index 10be266e48e8..b7b2521c73fb 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -501,7 +501,7 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
}
/* Derive the AES key to get a key for the hash autentication */
-static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
+static int vsc8584_macsec_derive_key(const u8 key[MACSEC_MAX_KEY_LEN],
u16 key_len, u8 hkey[16])
{
const u8 input[AES_BLOCK_SIZE] = {0};
diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h
index 9c6d25e36de2..453304bae778 100644
--- a/drivers/net/phy/mscc/mscc_macsec.h
+++ b/drivers/net/phy/mscc/mscc_macsec.h
@@ -81,7 +81,7 @@ struct macsec_flow {
/* Highest takes precedence [0..15] */
u8 priority;
- u8 key[MACSEC_KEYID_LEN];
+ u8 key[MACSEC_MAX_KEY_LEN];
union {
struct macsec_rx_sa *rx_sa;
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 46160baaafe3..9ae9cc6b23c2 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -68,7 +68,8 @@ static int ns_ack_interrupt(struct phy_device *phydev)
return ret;
/* Clear the interrupt status bit by writing a “1”
- * to the corresponding bit in INT_CLEAR (2:0 are reserved) */
+ * to the corresponding bit in INT_CLEAR (2:0 are reserved)
+ */
ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7);
return ret;
@@ -150,7 +151,8 @@ static int ns_config_init(struct phy_device *phydev)
{
ns_giga_speed_fallback(phydev, ALL_FALLBACK_ON);
/* In the latest MAC or switches design, the 10 Mbps loopback
- is desired to be turned off. */
+ * is desired to be turned off.
+ */
ns_10_base_t_hdx_loopack(phydev, hdx_loopback_off);
return ns_ack_interrupt(phydev);
}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 26b9c0d7cb9d..91a327f67a42 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -13,6 +13,9 @@
#include <linux/phy.h>
#include <linux/processor.h>
#include <linux/property.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
#define PHY_ID_TJA_1103 0x001BB010
@@ -57,6 +60,9 @@
#define VEND1_PORT_CONTROL 0x8040
#define PORT_CONTROL_EN BIT(14)
+#define VEND1_PORT_ABILITIES 0x8046
+#define PTP_ABILITY BIT(3)
+
#define VEND1_PORT_INFRA_CONTROL 0xAC00
#define PORT_INFRA_CONTROL_EN BIT(14)
@@ -91,13 +97,106 @@
#define VEND1_TX_IPG_LENGTH 0xAFD1
#define COUNTER_EN BIT(15)
+#define VEND1_LTC_LOAD_CTRL 0x1105
+#define READ_LTC BIT(2)
+#define LOAD_LTC BIT(0)
+
+#define VEND1_LTC_WR_NSEC_0 0x1106
+#define VEND1_LTC_WR_NSEC_1 0x1107
+#define VEND1_LTC_WR_SEC_0 0x1108
+#define VEND1_LTC_WR_SEC_1 0x1109
+
+#define VEND1_LTC_RD_NSEC_0 0x110A
+#define VEND1_LTC_RD_NSEC_1 0x110B
+#define VEND1_LTC_RD_SEC_0 0x110C
+#define VEND1_LTC_RD_SEC_1 0x110D
+
+#define VEND1_RATE_ADJ_SUBNS_0 0x110F
+#define VEND1_RATE_ADJ_SUBNS_1 0x1110
+#define CLK_RATE_ADJ_LD BIT(15)
+#define CLK_RATE_ADJ_DIR BIT(14)
+
+#define VEND1_HW_LTC_LOCK_CTRL 0x1115
+#define HW_LTC_LOCK_EN BIT(0)
+
+#define VEND1_PTP_IRQ_EN 0x1131
+#define VEND1_PTP_IRQ_STATUS 0x1132
+#define PTP_IRQ_EGR_TS BIT(0)
+
+#define VEND1_RX_TS_INSRT_CTRL 0x114D
+#define RX_TS_INSRT_MODE2 0x02
+
+#define VEND1_EGR_RING_DATA_0 0x114E
+#define VEND1_EGR_RING_DATA_1_SEQ_ID 0x114F
+#define VEND1_EGR_RING_DATA_2_NSEC_15_0 0x1150
+#define VEND1_EGR_RING_DATA_3 0x1151
+#define VEND1_EGR_RING_CTRL 0x1154
+
+#define RING_DATA_0_DOMAIN_NUMBER GENMASK(7, 0)
+#define RING_DATA_0_MSG_TYPE GENMASK(11, 8)
+#define RING_DATA_0_SEC_4_2 GENMASK(14, 2)
+#define RING_DATA_0_TS_VALID BIT(15)
+
+#define RING_DATA_3_NSEC_29_16 GENMASK(13, 0)
+#define RING_DATA_3_SEC_1_0 GENMASK(15, 14)
+#define RING_DATA_5_SEC_16_5 GENMASK(15, 4)
+#define RING_DONE BIT(0)
+
+#define TS_SEC_MASK GENMASK(1, 0)
+
+#define VEND1_PORT_FUNC_ENABLES 0x8048
+#define PTP_ENABLE BIT(3)
+
+#define VEND1_PORT_PTP_CONTROL 0x9000
+#define PORT_PTP_CONTROL_BYPASS BIT(11)
+
+#define VEND1_PTP_CLK_PERIOD 0x1104
+#define PTP_CLK_PERIOD_100BT1 15ULL
+
+#define VEND1_EVENT_MSG_FILT 0x1148
+#define EVENT_MSG_FILT_ALL 0x0F
+#define EVENT_MSG_FILT_NONE 0x00
+
+#define VEND1_TX_PIPE_DLY_NS 0x1149
+#define VEND1_TX_PIPEDLY_SUBNS 0x114A
+#define VEND1_RX_PIPE_DLY_NS 0x114B
+#define VEND1_RX_PIPEDLY_SUBNS 0x114C
+
#define RGMII_PERIOD_PS 8000U
#define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
#define MIN_ID_PS 1644U
#define MAX_ID_PS 2260U
#define DEFAULT_ID_PS 2000U
+#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK(31, 0) * (ppb) * \
+ PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
+
+#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
+
+struct nxp_c45_skb_cb {
+ struct ptp_header *header;
+ unsigned int type;
+};
+
+struct nxp_c45_hwts {
+ u32 nsec;
+ u32 sec;
+ u8 domain_number;
+ u16 sequence_id;
+ u8 msg_type;
+};
+
struct nxp_c45_phy {
+ struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+ /* used to access the PTP registers atomic */
+ struct mutex ptp_lock;
+ int hwts_tx;
+ int hwts_rx;
u32 tx_delay;
u32 rx_delay;
};
@@ -110,6 +209,382 @@ struct nxp_c45_phy_stats {
u16 mask;
};
+static bool nxp_c45_poll_txts(struct phy_device *phydev)
+{
+ return phydev->irq <= 0;
+}
+
+static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
+
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
+ READ_LTC);
+ ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_LTC_RD_NSEC_0);
+ ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_LTC_RD_NSEC_1) << 16;
+ ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_LTC_RD_SEC_0);
+ ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_LTC_RD_SEC_1) << 16;
+
+ return 0;
+}
+
+static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
+
+ mutex_lock(&priv->ptp_lock);
+ _nxp_c45_ptp_gettimex64(ptp, ts, sts);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
+
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0,
+ ts->tv_nsec);
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1,
+ ts->tv_nsec >> 16);
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0,
+ ts->tv_sec);
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1,
+ ts->tv_sec >> 16);
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
+ LOAD_LTC);
+
+ return 0;
+}
+
+static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
+
+ mutex_lock(&priv->ptp_lock);
+ _nxp_c45_ptp_settime64(ptp, ts);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+ u64 subns_inc_val;
+ bool inc;
+
+ mutex_lock(&priv->ptp_lock);
+ inc = ppb >= 0;
+ ppb = abs(ppb);
+
+ subns_inc_val = PPM_TO_SUBNS_INC(ppb);
+
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0,
+ subns_inc_val);
+ subns_inc_val >>= 16;
+ subns_inc_val |= CLK_RATE_ADJ_LD;
+ if (inc)
+ subns_inc_val |= CLK_RATE_ADJ_DIR;
+
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1,
+ subns_inc_val);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
+ struct timespec64 now, then;
+
+ mutex_lock(&priv->ptp_lock);
+ then = ns_to_timespec64(delta);
+ _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
+ now = timespec64_add(now, then);
+ _nxp_c45_ptp_settime64(ptp, &now);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
+ struct nxp_c45_hwts *hwts)
+{
+ ts->tv_nsec = hwts->nsec;
+ if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
+ ts->tv_sec -= TS_SEC_MASK + 1;
+ ts->tv_sec &= ~TS_SEC_MASK;
+ ts->tv_sec |= hwts->sec & TS_SEC_MASK;
+}
+
+static bool nxp_c45_match_ts(struct ptp_header *header,
+ struct nxp_c45_hwts *hwts,
+ unsigned int type)
+{
+ return ntohs(header->sequence_id) == hwts->sequence_id &&
+ ptp_get_msgtype(header, type) == hwts->msg_type &&
+ header->domain_number == hwts->domain_number;
+}
+
+static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
+ struct nxp_c45_hwts *hwts)
+{
+ bool valid;
+ u16 reg;
+
+ mutex_lock(&priv->ptp_lock);
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
+ RING_DONE);
+ reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
+ valid = !!(reg & RING_DATA_0_TS_VALID);
+ if (!valid)
+ goto nxp_c45_get_hwtxts_out;
+
+ hwts->domain_number = reg;
+ hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8;
+ hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10;
+ hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_EGR_RING_DATA_1_SEQ_ID);
+ hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_EGR_RING_DATA_2_NSEC_15_0);
+ reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3);
+ hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16;
+ hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14;
+
+nxp_c45_get_hwtxts_out:
+ mutex_unlock(&priv->ptp_lock);
+ return valid;
+}
+
+static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
+ struct nxp_c45_hwts *txts)
+{
+ struct sk_buff *skb, *tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+ unsigned long flags;
+ bool ts_match;
+ s64 ts_ns;
+
+ spin_lock_irqsave(&priv->tx_queue.lock, flags);
+ skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
+ ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
+ NXP_C45_SKB_CB(skb)->type);
+ if (!ts_match)
+ continue;
+ skb_match = skb;
+ __skb_unlink(skb, &priv->tx_queue);
+ break;
+ }
+ spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
+
+ if (skb_match) {
+ nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
+ nxp_c45_reconstruct_ts(&ts, txts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ts_ns = timespec64_to_ns(&ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
+ skb_complete_tx_timestamp(skb_match, &shhwtstamps);
+ } else {
+ phydev_warn(priv->phydev,
+ "the tx timestamp doesn't match with any skb\n");
+ }
+}
+
+static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
+ bool poll_txts = nxp_c45_poll_txts(priv->phydev);
+ struct skb_shared_hwtstamps *shhwtstamps_rx;
+ struct nxp_c45_hwts hwts;
+ bool reschedule = false;
+ struct timespec64 ts;
+ struct sk_buff *skb;
+ bool txts_valid;
+ u32 ts_raw;
+
+ while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
+ txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
+ if (unlikely(!txts_valid)) {
+ /* Still more skbs in the queue */
+ reschedule = true;
+ break;
+ }
+
+ nxp_c45_process_txts(priv, &hwts);
+ }
+
+ while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
+ nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
+ ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
+ hwts.sec = ts_raw >> 30;
+ hwts.nsec = ts_raw & GENMASK(29, 0);
+ nxp_c45_reconstruct_ts(&ts, &hwts);
+ shhwtstamps_rx = skb_hwtstamps(skb);
+ shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
+ NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
+ netif_rx_ni(skb);
+ }
+
+ return reschedule ? 1 : -1;
+}
+
+static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
+{
+ priv->caps = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "NXP C45 PHC",
+ .max_adj = 16666666,
+ .adjfine = nxp_c45_ptp_adjfine,
+ .adjtime = nxp_c45_ptp_adjtime,
+ .gettimex64 = nxp_c45_ptp_gettimex64,
+ .settime64 = nxp_c45_ptp_settime64,
+ .do_aux_work = nxp_c45_do_aux_work,
+ };
+
+ priv->ptp_clock = ptp_clock_register(&priv->caps,
+ &priv->phydev->mdio.dev);
+
+ if (IS_ERR(priv->ptp_clock))
+ return PTR_ERR(priv->ptp_clock);
+
+ if (!priv->ptp_clock)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
+ mii_ts);
+
+ switch (priv->hwts_tx) {
+ case HWTSTAMP_TX_ON:
+ NXP_C45_SKB_CB(skb)->type = type;
+ NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&priv->tx_queue, skb);
+ if (nxp_c45_poll_txts(priv->phydev))
+ ptp_schedule_worker(priv->ptp_clock, 0);
+ break;
+ case HWTSTAMP_TX_OFF:
+ default:
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
+ mii_ts);
+ struct ptp_header *header = ptp_parse_header(skb, type);
+
+ if (!header)
+ return false;
+
+ if (!priv->hwts_rx)
+ return false;
+
+ NXP_C45_SKB_CB(skb)->header = header;
+ skb_queue_tail(&priv->rx_queue, skb);
+ ptp_schedule_worker(priv->ptp_clock, 0);
+
+ return true;
+}
+
+static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
+ struct ifreq *ifreq)
+{
+ struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
+ mii_ts);
+ struct phy_device *phydev = priv->phydev;
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ priv->hwts_tx = cfg.tx_type;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ priv->hwts_rx = 1;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (priv->hwts_rx || priv->hwts_tx) {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
+ EVENT_MSG_FILT_ALL);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_PTP_CONTROL,
+ PORT_PTP_CONTROL_BYPASS);
+ } else {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
+ EVENT_MSG_FILT_NONE);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL,
+ PORT_PTP_CONTROL_BYPASS);
+ }
+
+ if (nxp_c45_poll_txts(priv->phydev))
+ goto nxp_c45_no_ptp_irq;
+
+ if (priv->hwts_tx)
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
+ else
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
+
+nxp_c45_no_ptp_irq:
+ return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *ts_info)
+{
+ struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
+ mii_ts);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
+ ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
+
+ return 0;
+}
+
static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
{ "phy_symbol_error_cnt", MDIO_MMD_VEND1,
VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
@@ -205,7 +680,9 @@ static int nxp_c45_config_intr(struct phy_device *phydev)
static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
{
+ struct nxp_c45_phy *priv = phydev->priv;
irqreturn_t ret = IRQ_NONE;
+ struct nxp_c45_hwts hwts;
int irq;
irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
@@ -216,6 +693,18 @@ static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
ret = IRQ_HANDLED;
}
+ /* There is no need for ACK.
+ * The irq signal will be asserted until the EGR TS FIFO will be
+ * emptied.
+ */
+ irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS);
+ if (irq & PTP_IRQ_EGR_TS) {
+ while (nxp_c45_get_hwtxts(priv, &hwts))
+ nxp_c45_process_txts(priv, &hwts);
+
+ ret = IRQ_HANDLED;
+ }
+
return ret;
}
@@ -546,6 +1035,12 @@ static int nxp_c45_config_init(struct phy_device *phydev)
return ret;
}
+ /* Bug workaround for SJA1110 rev B: enable write access
+ * to MDIO_MMD_PMAPMD
+ */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
+
phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
PHY_CONFIG_AUTO);
@@ -566,20 +1061,60 @@ static int nxp_c45_config_init(struct phy_device *phydev)
phydev->autoneg = AUTONEG_DISABLE;
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD,
+ PTP_CLK_PERIOD_100BT1);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL,
+ HW_LTC_LOCK_EN);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
+ RX_TS_INSRT_MODE2);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
+ PTP_ENABLE);
+
return nxp_c45_start_op(phydev);
}
static int nxp_c45_probe(struct phy_device *phydev)
{
struct nxp_c45_phy *priv;
+ int ptp_ability;
+ int ret = 0;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ skb_queue_head_init(&priv->tx_queue);
+ skb_queue_head_init(&priv->rx_queue);
+
+ priv->phydev = phydev;
+
phydev->priv = priv;
- return 0;
+ mutex_init(&priv->ptp_lock);
+
+ ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ ptp_ability = !!(ptp_ability & PTP_ABILITY);
+ if (!ptp_ability) {
+ phydev_dbg(phydev, "the phy does not support PTP");
+ goto no_ptp_support;
+ }
+
+ if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
+ IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
+ priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
+ priv->mii_ts.txtstamp = nxp_c45_txtstamp;
+ priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
+ priv->mii_ts.ts_info = nxp_c45_ts_info;
+ phydev->mii_ts = &priv->mii_ts;
+ ret = nxp_c45_init_ptp_clock(priv);
+ } else {
+ phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
+ }
+
+no_ptp_support:
+
+ return ret;
}
static struct phy_driver nxp_c45_driver[] = {
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index f4816b7d31b3..c617dbcad6ea 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -172,7 +172,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg);
* @phydev: target phy_device struct
*
* Disable auto-negotiation in the Clause 45 PHY. The link parameters
- * parameters are controlled through the PMA/PMD MMD registers.
+ * are controlled through the PMA/PMD MMD registers.
*
* Returns zero on success, negative errno code on failure.
*/
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 8d333d3084ed..2870c33b8975 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -76,7 +76,8 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
/* A mapping of all SUPPORTED settings to speed/duplex. This table
* must be grouped by speed and sorted in descending match priority
- * - iow, descending speed. */
+ * - iow, descending speed.
+ */
#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \
.bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1f0512e39c65..8eeb26d8aeb7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -380,8 +380,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
else if (val & BMCR_SPEED100)
phydev->speed = SPEED_100;
else phydev->speed = SPEED_10;
- }
- else {
+ } else {
if (phydev->autoneg == AUTONEG_DISABLE)
change_autoneg = true;
phydev->autoneg = AUTONEG_ENABLE;
@@ -1136,6 +1135,9 @@ void phy_state_machine(struct work_struct *work)
else if (do_suspend)
phy_suspend(phydev);
+ if (err == -ENODEV)
+ return;
+
if (err < 0)
phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0a2d8bedf73d..5d5f9a9ee768 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -833,6 +834,27 @@ static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id)
return 0;
}
+/* Extract the phy ID from the compatible string of the form
+ * ethernet-phy-idAAAA.BBBB.
+ */
+int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
+{
+ unsigned int upper, lower;
+ const char *cp;
+ int ret;
+
+ ret = fwnode_property_read_string(fwnode, "compatible", &cp);
+ if (ret)
+ return ret;
+
+ if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) != 2)
+ return -EINVAL;
+
+ *phy_id = ((upper & GENMASK(15, 0)) << 16) | (lower & GENMASK(15, 0));
+ return 0;
+}
+EXPORT_SYMBOL(fwnode_get_phy_id);
+
/**
* get_phy_device - reads the specified PHY device and returns its @phy_device
* struct
@@ -870,6 +892,18 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
if (r)
return ERR_PTR(r);
+ /* PHY device such as the Marvell Alaska 88E2110 will return a PHY ID
+ * of 0 when probed using get_phy_c22_id() with no error. Proceed to
+ * probe with C45 to see if we're able to get a valid PHY ID in the C45
+ * space, if successful, create the C45 PHY device.
+ */
+ if (!is_c45 && phy_id == 0 && bus->probe_capabilities >= MDIOBUS_C45) {
+ r = get_phy_c45_ids(bus, addr, &c45_ids);
+ if (!r)
+ return phy_device_create(bus, addr, phy_id,
+ true, &c45_ids);
+ }
+
return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
}
EXPORT_SYMBOL(get_phy_device);
@@ -923,8 +957,7 @@ EXPORT_SYMBOL(phy_device_register);
*/
void phy_device_remove(struct phy_device *phydev)
{
- if (phydev->mii_ts)
- unregister_mii_timestamper(phydev->mii_ts);
+ unregister_mii_timestamper(phydev->mii_ts);
device_del(&phydev->mdio.dev);
@@ -2864,6 +2897,90 @@ static bool phy_drv_supports_irq(struct phy_driver *phydrv)
}
/**
+ * fwnode_mdio_find_device - Given a fwnode, find the mdio_device
+ * @fwnode: pointer to the mdio_device's fwnode
+ *
+ * If successful, returns a pointer to the mdio_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ * The caller should call put_device() on the mdio_device after its use.
+ */
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
+{
+ struct device *d;
+
+ if (!fwnode)
+ return NULL;
+
+ d = bus_find_device_by_fwnode(&mdio_bus_type, fwnode);
+ if (!d)
+ return NULL;
+
+ return to_mdio_device(d);
+}
+EXPORT_SYMBOL(fwnode_mdio_find_device);
+
+/**
+ * fwnode_phy_find_device - For provided phy_fwnode, find phy_device.
+ *
+ * @phy_fwnode: Pointer to the phy's fwnode.
+ *
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ */
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
+{
+ struct mdio_device *mdiodev;
+
+ mdiodev = fwnode_mdio_find_device(phy_fwnode);
+ if (!mdiodev)
+ return NULL;
+
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+ return to_phy_device(&mdiodev->dev);
+
+ put_device(&mdiodev->dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(fwnode_phy_find_device);
+
+/**
+ * device_phy_find_device - For the given device, get the phy_device
+ * @dev: Pointer to the given device
+ *
+ * Refer return conditions of fwnode_phy_find_device().
+ */
+struct phy_device *device_phy_find_device(struct device *dev)
+{
+ return fwnode_phy_find_device(dev_fwnode(dev));
+}
+EXPORT_SYMBOL_GPL(device_phy_find_device);
+
+/**
+ * fwnode_get_phy_node - Get the phy_node using the named reference.
+ * @fwnode: Pointer to fwnode from which phy_node has to be obtained.
+ *
+ * Refer return conditions of fwnode_find_reference().
+ * For ACPI, only "phy-handle" is supported. Legacy DT properties "phy"
+ * and "phy-device" are not supported in ACPI. DT supports all the three
+ * named references to the phy node.
+ */
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *phy_node;
+
+ /* Only phy-handle is used for ACPI */
+ phy_node = fwnode_find_reference(fwnode, "phy-handle", 0);
+ if (is_acpi_node(fwnode) || !IS_ERR(phy_node))
+ return phy_node;
+ phy_node = fwnode_find_reference(fwnode, "phy", 0);
+ if (IS_ERR(phy_node))
+ phy_node = fwnode_find_reference(fwnode, "phy-device", 0);
+ return phy_node;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_phy_node);
+
+/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
*
@@ -2883,7 +3000,7 @@ static int phy_probe(struct device *dev)
/* Disable the interrupt if the PHY doesn't support it
* but the interrupt is still a valid one
*/
- if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev))
+ if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev))
phydev->irq = PHY_POLL;
if (phydrv->flags & PHY_IS_INTERNAL)
@@ -2904,15 +3021,14 @@ static int phy_probe(struct device *dev)
* a controller will attach, and may modify one
* or both of these values
*/
- if (phydrv->features) {
+ if (phydrv->features)
linkmode_copy(phydev->supported, phydrv->features);
- } else if (phydrv->get_features) {
+ else if (phydrv->get_features)
err = phydrv->get_features(phydev);
- } else if (phydev->is_c45) {
+ else if (phydev->is_c45)
err = genphy_c45_pma_read_abilities(phydev);
- } else {
+ else
err = genphy_read_abilities(phydev);
- }
if (err)
goto out;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 96d8e88b4e46..eb29ef53d971 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2015 Russell King
*/
+#include <linux/acpi.h>
#include <linux/ethtool.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
@@ -181,7 +182,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
pl->link_config.duplex = DUPLEX_FULL;
/* We treat the "pause" and "asym-pause" terminology as
- * defining the link partner's ability. */
+ * defining the link partner's ability.
+ */
if (fwnode_property_read_bool(fixed_node, "pause"))
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
pl->link_config.lp_advertising);
@@ -311,6 +313,11 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 5000baseT_Full);
break;
+ case PHY_INTERFACE_MODE_25GBASER:
+ phylink_set(pl->supported, 25000baseCR_Full);
+ phylink_set(pl->supported, 25000baseKR_Full);
+ phylink_set(pl->supported, 25000baseSR_Full);
+ fallthrough;
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
@@ -679,7 +686,8 @@ static void phylink_resolve(struct work_struct *w)
phylink_mac_pcs_get_state(pl, &link_state);
/* If we have a phy, the "up" state is the union of
- * both the PHY and the MAC */
+ * both the PHY and the MAC
+ */
if (pl->phydev)
link_state.link &= pl->phy_state.link;
@@ -688,7 +696,8 @@ static void phylink_resolve(struct work_struct *w)
link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with
- * the PHY flow control bits. */
+ * the PHY flow control bits.
+ */
link_state.pause = pl->phy_state.pause;
mac_config = true;
}
@@ -1084,7 +1093,26 @@ EXPORT_SYMBOL_GPL(phylink_connect_phy);
int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
u32 flags)
{
- struct device_node *phy_node;
+ return phylink_fwnode_phy_connect(pl, of_fwnode_handle(dn), flags);
+}
+EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
+
+/**
+ * phylink_fwnode_phy_connect() - connect the PHY specified in the fwnode.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @fwnode: a pointer to a &struct fwnode_handle.
+ * @flags: PHY-specific flags to communicate to the PHY device driver
+ *
+ * Connect the phy specified @fwnode to the phylink instance specified
+ * by @pl.
+ *
+ * Returns 0 on success or a negative errno.
+ */
+int phylink_fwnode_phy_connect(struct phylink *pl,
+ struct fwnode_handle *fwnode,
+ u32 flags)
+{
+ struct fwnode_handle *phy_fwnode;
struct phy_device *phy_dev;
int ret;
@@ -1094,28 +1122,25 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
phy_interface_mode_is_8023z(pl->link_interface)))
return 0;
- phy_node = of_parse_phandle(dn, "phy-handle", 0);
- if (!phy_node)
- phy_node = of_parse_phandle(dn, "phy", 0);
- if (!phy_node)
- phy_node = of_parse_phandle(dn, "phy-device", 0);
-
- if (!phy_node) {
+ phy_fwnode = fwnode_get_phy_node(fwnode);
+ if (IS_ERR(phy_fwnode)) {
if (pl->cfg_link_an_mode == MLO_AN_PHY)
return -ENODEV;
return 0;
}
- phy_dev = of_phy_find_device(phy_node);
+ phy_dev = fwnode_phy_find_device(phy_fwnode);
/* We're done with the phy_node handle */
- of_node_put(phy_node);
+ fwnode_handle_put(phy_fwnode);
if (!phy_dev)
return -ENODEV;
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
- if (ret)
+ if (ret) {
+ phy_device_free(phy_dev);
return ret;
+ }
ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
@@ -1123,7 +1148,7 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
return ret;
}
-EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
+EXPORT_SYMBOL_GPL(phylink_fwnode_phy_connect);
/**
* phylink_disconnect_phy() - disconnect any PHY attached to the phylink
@@ -1358,11 +1383,10 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
ASSERT_RTNL();
- if (pl->phydev) {
+ if (pl->phydev)
phy_ethtool_ksettings_get(pl->phydev, kset);
- } else {
+ else
kset->base.port = pl->link_port;
- }
linkmode_copy(kset->link_modes.supported, pl->supported);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index d5c1aaa8236a..30d15f7c9b03 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -100,6 +100,7 @@ static int qs6612_ack_interrupt(struct phy_device *phydev)
static int qs6612_config_intr(struct phy_device *phydev)
{
int err;
+
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
/* clear any interrupts before enabling them */
err = qs6612_ack_interrupt(phydev);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 821e85a97367..11be60333fa8 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -8,6 +8,7 @@
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*/
#include <linux/bitops.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -27,6 +28,7 @@
#define RTL821x_PAGE_SELECT 0x1f
#define RTL8211F_PHYCR1 0x18
+#define RTL8211F_PHYCR2 0x19
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
@@ -40,6 +42,8 @@
#define RTL8211E_TX_DELAY BIT(12)
#define RTL8211E_RX_DELAY BIT(11)
+#define RTL8211F_CLKOUT_EN BIT(0)
+
#define RTL8201F_ISR 0x1e
#define RTL8201F_ISR_ANERR BIT(15)
#define RTL8201F_ISR_DUPLEX BIT(13)
@@ -71,6 +75,11 @@ MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
+struct rtl821x_priv {
+ u16 phycr1;
+ u16 phycr2;
+};
+
static int rtl821x_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, RTL821x_PAGE_SELECT);
@@ -81,6 +90,37 @@ static int rtl821x_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
}
+static int rtl821x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct rtl821x_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR1);
+ if (ret < 0)
+ return ret;
+
+ priv->phycr1 = ret & (RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF);
+ if (of_property_read_bool(dev->of_node, "realtek,aldps-enable"))
+ priv->phycr1 |= RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF;
+
+ ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
+ if (ret < 0)
+ return ret;
+
+ priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
+ if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
+ priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static int rtl8201_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -291,13 +331,19 @@ static int rtl8211c_config_init(struct phy_device *phydev)
static int rtl8211f_config_init(struct phy_device *phydev)
{
+ struct rtl821x_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
u16 val_txdly, val_rxdly;
- u16 val;
int ret;
- val = RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_XTAL_OFF;
- phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1, val, val);
+ ret = phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1,
+ RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF,
+ priv->phycr1);
+ if (ret < 0) {
+ dev_err(dev, "aldps mode configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
@@ -354,6 +400,27 @@ static int rtl8211f_config_init(struct phy_device *phydev)
val_rxdly ? "enabled" : "disabled");
}
+ ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ RTL8211F_CLKOUT_EN, priv->phycr2);
+ if (ret < 0) {
+ dev_err(dev, "clkout configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ return genphy_soft_reset(phydev);
+}
+
+static int rtl821x_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_resume(phydev);
+ if (ret < 0)
+ return ret;
+
+ msleep(20);
+
return 0;
}
@@ -847,12 +914,13 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
+ .probe = rtl821x_probe,
.config_init = &rtl8211f_config_init,
.read_status = rtlgen_read_status,
.config_intr = &rtl8211f_config_intr,
.handle_interrupt = rtl8211f_handle_interrupt,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = rtl821x_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index 52f1f65320fe..bb13e75183ee 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
-/**
+/*
* drivers/net/phy/rockchip.c
*
* Driver for ROCKCHIP Ethernet PHYs
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index e61de66e973b..7362f8c3271c 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -392,6 +392,11 @@ EXPORT_SYMBOL_GPL(sfp_parse_support);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
unsigned long *link_modes)
{
+ if (phylink_test(link_modes, 25000baseCR_Full) ||
+ phylink_test(link_modes, 25000baseKR_Full) ||
+ phylink_test(link_modes, 25000baseSR_Full))
+ return PHY_INTERFACE_MODE_25GBASER;
+
if (phylink_test(link_modes, 10000baseCR_Full) ||
phylink_test(link_modes, 10000baseSR_Full) ||
phylink_test(link_modes, 10000baseLR_Full) ||
@@ -624,14 +629,14 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
* be put via sfp_bus_put() when done.
*
* Returns:
- * - on success, a pointer to the sfp_bus structure,
- * - %NULL if no SFP is specified,
- * - on failure, an error pointer value:
+ * - on success, a pointer to the sfp_bus structure,
+ * - %NULL if no SFP is specified,
+ * - on failure, an error pointer value:
*
- * - corresponding to the errors detailed for
- * fwnode_property_get_reference_args().
- * - %-ENOMEM if we failed to allocate the bus.
- * - an error from the upstream's connect_phy() method.
+ * - corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * - %-ENOMEM if we failed to allocate the bus.
+ * - an error from the upstream's connect_phy() method.
*/
struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
{
@@ -666,14 +671,14 @@ EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode);
* bus, so it is safe to put the bus after this call.
*
* Returns:
- * - on success, a pointer to the sfp_bus structure,
- * - %NULL if no SFP is specified,
- * - on failure, an error pointer value:
+ * - on success, a pointer to the sfp_bus structure,
+ * - %NULL if no SFP is specified,
+ * - on failure, an error pointer value:
*
- * - corresponding to the errors detailed for
- * fwnode_property_get_reference_args().
- * - %-ENOMEM if we failed to allocate the bus.
- * - an error from the upstream's connect_phy() method.
+ * - corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * - %-ENOMEM if we failed to allocate the bus.
+ * - an error from the upstream's connect_phy() method.
*/
int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
const struct sfp_upstream_ops *ops)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 37f722c763d7..34e90216bd2c 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2153,7 +2153,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
case SFP_S_INIT:
if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
- /* TX_FAULT is still asserted after t_init or
+ /* TX_FAULT is still asserted after t_init
* or t_start_up, so assume there is a fault.
*/
sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index ca49c1ad3efc..8b5445a724ce 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -160,11 +160,11 @@ static const struct spi_device_id ks8995_id[] = {
MODULE_DEVICE_TABLE(spi, ks8995_id);
static const struct of_device_id ks8895_spi_of_match[] = {
- { .compatible = "micrel,ks8995" },
- { .compatible = "micrel,ksz8864" },
- { .compatible = "micrel,ksz8795" },
- { },
- };
+ { .compatible = "micrel,ks8995" },
+ { .compatible = "micrel,ksz8864" },
+ { .compatible = "micrel,ksz8795" },
+ { },
+};
MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
static inline u8 get_chip_id(u8 val)
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 431fe5e0ce31..309e4c3496c4 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -20,12 +20,12 @@
#include <linux/mii.h>
#include <linux/phy.h>
-#define MII_XCIIS 0x11 /* Configuration Info IRQ & Status Reg */
-#define MII_XIE 0x12 /* Interrupt Enable Register */
+#define MII_XCIIS 0x11 /* Configuration Info IRQ & Status Reg */
+#define MII_XIE 0x12 /* Interrupt Enable Register */
#define MII_XIE_DEFAULT_MASK 0x0070 /* ANE complete, Remote Fault, Link Down */
#define STE101P_PHY_ID 0x00061c50
-#define STE100P_PHY_ID 0x1c040011
+#define STE100P_PHY_ID 0x1c040011
static int ste10Xp_config_init(struct phy_device *phydev)
{
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 16704e243162..897b979ec03c 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -249,7 +249,8 @@ static int vsc73xx_config_aneg(struct phy_device *phydev)
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
- * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
+ * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
+ */
static int vsc8601_add_skew(struct phy_device *phydev)
{
int ret;
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index 61fedb23d3cf..db0dc36d12e3 100644
--- a/drivers/net/ppp/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
@@ -436,7 +436,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
* Initialize the data information for the compression code
*/
db->totlen = sizeof (struct bsd_db) +
- (sizeof (struct bsd_dict) * hsize);
+ (sizeof (struct bsd_dict) * hsize);
db->hsize = hsize;
db->hshift = hshift;
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index f78ceba42e57..ba93bab948e0 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -325,7 +325,7 @@ found:
* Found it -- move to the front on the connection list.
*/
if(lcs == ocs) {
- /* found at most recently used */
+ /* found at most recently used */
} else if (cs == ocs) {
/* found at least recently used */
comp->xmit_oldest = lcs->cs_this;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 84f832806313..2ced021f4faf 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2559,15 +2559,15 @@ static int tun_flags(struct tun_struct *tun)
return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
}
-static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
+static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return sprintf(buf, "0x%x\n", tun_flags(tun));
}
-static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return uid_valid(tun->owner)?
@@ -2576,8 +2576,8 @@ static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
sprintf(buf, "-1\n");
}
-static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t group_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return gid_valid(tun->group) ?
@@ -2586,9 +2586,9 @@ static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
sprintf(buf, "-1\n");
}
-static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
-static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
-static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
+static DEVICE_ATTR_RO(tun_flags);
+static DEVICE_ATTR_RO(owner);
+static DEVICE_ATTR_RO(group);
static struct attribute *tun_dev_attrs[] = {
&dev_attr_tun_flags.attr,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index fbbe78643631..4c5d69732a7e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -164,12 +164,14 @@ config USB_NET_AX8817X
depends on USB_USBNET
select CRC32
select PHYLIB
+ select AX88796B_PHY
+ imply NET_SELFTESTS
default y
help
This option adds support for ASIX AX88xxx based USB 2.0
10/100 Ethernet adapters.
- This driver should work with at least the following devices:
+ This driver should work with at least the following devices:
* Aten UC210T
* ASIX AX88172
* Billionton Systems, USB2AR
@@ -220,13 +222,13 @@ config USB_NET_CDCETHER
CDC Ethernet is an implementation option for DOCSIS cable modems
that support USB connectivity, used for non-Microsoft USB hosts.
The Linux-USB CDC Ethernet Gadget driver is an open implementation.
- This driver should work with at least the following devices:
+ This driver should work with at least the following devices:
* Dell Wireless 5530 HSPA
- * Ericsson PipeRider (all variants)
+ * Ericsson PipeRider (all variants)
* Ericsson Mobile Broadband Module (all variants)
- * Motorola (DM100 and SB4100)
- * Broadcom Cable Modem (reference design)
+ * Motorola (DM100 and SB4100)
+ * Broadcom Cable Modem (reference design)
* Toshiba (PCX1100U and F3507g/F3607gw)
* ...
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 3b53685301de..e1994a246122 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -25,6 +25,8 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <net/selftests.h>
#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
@@ -178,6 +180,10 @@ struct asix_common_private {
u16 presvd_phy_advertise;
u16 presvd_phy_bmcr;
struct asix_rx_fixup_info rx_fixup_info;
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ u16 phy_addr;
+ char phy_name[20];
};
extern const struct driver_info ax88172a_info;
@@ -205,8 +211,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
int asix_set_sw_mii(struct usbnet *dev, int in_pm);
int asix_set_hw_mii(struct usbnet *dev, int in_pm);
-int asix_read_phy_addr(struct usbnet *dev, int internal);
-int asix_get_phy_addr(struct usbnet *dev);
+int asix_read_phy_addr(struct usbnet *dev, bool internal);
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm);
@@ -215,6 +220,7 @@ int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm);
u16 asix_read_medium_status(struct usbnet *dev, int in_pm);
int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm);
+void asix_adjust_link(struct net_device *netdev);
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm);
@@ -223,6 +229,9 @@ void asix_set_multicast(struct net_device *net);
int asix_mdio_read(struct net_device *netdev, int phy_id, int loc);
void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val);
+int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum);
+int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val);
+
int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc);
void asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc,
int val);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7bc6e8f856fe..ac92bc52a85e 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -288,32 +288,33 @@ int asix_set_hw_mii(struct usbnet *dev, int in_pm)
return ret;
}
-int asix_read_phy_addr(struct usbnet *dev, int internal)
+int asix_read_phy_addr(struct usbnet *dev, bool internal)
{
- int offset = (internal ? 1 : 0);
+ int ret, offset;
u8 buf[2];
- int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
- netdev_dbg(dev->net, "asix_get_phy_addr()\n");
+ ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
+ if (ret < 0)
+ goto error;
if (ret < 2) {
- netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
- goto out;
+ ret = -EIO;
+ goto error;
}
- netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
- *((__le16 *)buf));
+
+ offset = (internal ? 1 : 0);
ret = buf[offset];
-out:
+ netdev_dbg(dev->net, "%s PHY address 0x%x\n",
+ internal ? "internal" : "external", ret);
+
return ret;
-}
-int asix_get_phy_addr(struct usbnet *dev)
-{
- /* return the address of the internal phy */
- return asix_read_phy_addr(dev, 1);
-}
+error:
+ netdev_err(dev->net, "Error reading PHY_ID register: %02x\n", ret);
+ return ret;
+}
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm)
{
@@ -383,6 +384,27 @@ int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
return ret;
}
+/* set MAC link settings according to information from phylib */
+void asix_adjust_link(struct net_device *netdev)
+{
+ struct phy_device *phydev = netdev->phydev;
+ struct usbnet *dev = netdev_priv(netdev);
+ u16 mode = 0;
+
+ if (phydev->link) {
+ mode = AX88772_MEDIUM_DEFAULT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ mode &= ~AX_MEDIUM_FD;
+
+ if (phydev->speed != SPEED_100)
+ mode &= ~AX_MEDIUM_PS;
+ }
+
+ asix_write_medium_mode(dev, mode, 0);
+ phy_print_status(phydev);
+}
+
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
{
int ret;
@@ -463,18 +485,23 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
return ret;
}
- asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
- (__u16)loc, 2, &res, 0);
- asix_set_hw_mii(dev, 0);
+ ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, (__u16)loc, 2,
+ &res, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = asix_set_hw_mii(dev, 0);
+out:
mutex_unlock(&dev->phy_mutex);
netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
phy_id, loc, le16_to_cpu(res));
- return le16_to_cpu(res);
+ return ret < 0 ? ret : le16_to_cpu(res);
}
-void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
+static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
+ int val)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
@@ -494,15 +521,40 @@ void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
- mutex_unlock(&dev->phy_mutex);
- return;
- }
- asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
- (__u16)loc, 2, &res, 0);
- asix_set_hw_mii(dev, 0);
+ if (ret == -ENODEV)
+ goto out;
+
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2,
+ &res, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = asix_set_hw_mii(dev, 0);
+out:
mutex_unlock(&dev->phy_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+
+void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
+{
+ __asix_mdio_write(netdev, phy_id, loc, val);
+}
+
+/* MDIO read and write wrappers for phylib */
+int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct usbnet *priv = bus->priv;
+
+ return asix_mdio_read(priv->net, phy_id, regnum);
+}
+
+int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
+{
+ struct usbnet *priv = bus->priv;
+
+ return __asix_mdio_write(priv->net, phy_id, regnum, val);
}
int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 19a8fafb8f04..aec97b021a73 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -262,7 +262,10 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x3f;
dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = asix_get_phy_addr(dev);
+
+ dev->mii.phy_id = asix_read_phy_addr(dev, true);
+ if (dev->mii.phy_id < 0)
+ return dev->mii.phy_id;
dev->net->netdev_ops = &ax88172_netdev_ops;
dev->net->ethtool_ops = &ax88172_ethtool_ops;
@@ -280,9 +283,29 @@ out:
return ret;
}
+static void ax88772_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
+ }
+}
+
+static int ax88772_ethtool_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ethtool_ops ax88772_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
- .get_link = asix_get_link,
+ .get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
@@ -290,37 +313,18 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
- .nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings_mii,
- .set_link_ksettings = usbnet_set_link_ksettings_mii,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .self_test = net_selftest,
+ .get_strings = ax88772_ethtool_get_strings,
+ .get_sset_count = ax88772_ethtool_get_sset_count,
};
-static int ax88772_link_reset(struct usbnet *dev)
-{
- u16 mode;
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
-
- mii_check_media(&dev->mii, 1, 1);
- mii_ethtool_gset(&dev->mii, &ecmd);
- mode = AX88772_MEDIUM_DEFAULT;
-
- if (ethtool_cmd_speed(&ecmd) != SPEED_100)
- mode &= ~AX_MEDIUM_PS;
-
- if (ecmd.duplex != DUPLEX_FULL)
- mode &= ~AX_MEDIUM_FD;
-
- netdev_dbg(dev->net, "ax88772_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
- ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
-
- asix_write_medium_mode(dev, mode, 0);
-
- return 0;
-}
-
static int ax88772_reset(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
+ struct asix_common_private *priv = dev->driver_priv;
int ret;
/* Rewrite MAC address */
@@ -339,6 +343,8 @@ static int ax88772_reset(struct usbnet *dev)
if (ret < 0)
goto out;
+ phy_start(priv->phydev);
+
return 0;
out:
@@ -583,7 +589,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = asix_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
@@ -592,6 +598,9 @@ static void ax88772_suspend(struct usbnet *dev)
struct asix_common_private *priv = dev->driver_priv;
u16 medium;
+ if (netif_running(dev->net))
+ phy_stop(priv->phydev);
+
/* Stop MAC operation */
medium = asix_read_medium_status(dev, 1);
medium &= ~AX_MEDIUM_RE;
@@ -599,14 +608,6 @@ static void ax88772_suspend(struct usbnet *dev)
netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n",
asix_read_medium_status(dev, 1));
-
- /* Preserve BMCR for restoring */
- priv->presvd_phy_bmcr =
- asix_mdio_read_nopm(dev->net, dev->mii.phy_id, MII_BMCR);
-
- /* Preserve ANAR for restoring */
- priv->presvd_phy_advertise =
- asix_mdio_read_nopm(dev->net, dev->mii.phy_id, MII_ADVERTISE);
}
static int asix_suspend(struct usb_interface *intf, pm_message_t message)
@@ -620,39 +621,22 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
return usbnet_suspend(intf, message);
}
-static void ax88772_restore_phy(struct usbnet *dev)
-{
- struct asix_common_private *priv = dev->driver_priv;
-
- if (priv->presvd_phy_advertise) {
- /* Restore Advertisement control reg */
- asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_ADVERTISE,
- priv->presvd_phy_advertise);
-
- /* Restore BMCR */
- if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
- priv->presvd_phy_bmcr |= BMCR_ANRESTART;
-
- asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
- priv->presvd_phy_bmcr);
-
- priv->presvd_phy_advertise = 0;
- priv->presvd_phy_bmcr = 0;
- }
-}
-
static void ax88772_resume(struct usbnet *dev)
{
+ struct asix_common_private *priv = dev->driver_priv;
int i;
for (i = 0; i < 3; i++)
if (!ax88772_hw_reset(dev, 1))
break;
- ax88772_restore_phy(dev);
+
+ if (netif_running(dev->net))
+ phy_start(priv->phydev);
}
static void ax88772a_resume(struct usbnet *dev)
{
+ struct asix_common_private *priv = dev->driver_priv;
int i;
for (i = 0; i < 3; i++) {
@@ -660,7 +644,8 @@ static void ax88772a_resume(struct usbnet *dev)
break;
}
- ax88772_restore_phy(dev);
+ if (netif_running(dev->net))
+ phy_start(priv->phydev);
}
static int asix_resume(struct usb_interface *intf)
@@ -674,12 +659,61 @@ static int asix_resume(struct usb_interface *intf)
return usbnet_resume(intf);
}
+static int ax88772_init_mdio(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+
+ priv->mdio = devm_mdiobus_alloc(&dev->udev->dev);
+ if (!priv->mdio)
+ return -ENOMEM;
+
+ priv->mdio->priv = dev;
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+
+ return devm_mdiobus_register(&dev->udev->dev, priv->mdio);
+}
+
+static int ax88772_init_phy(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+ int ret;
+
+ ret = asix_read_phy_addr(dev, true);
+ if (ret < 0)
+ return ret;
+
+ priv->phy_addr = ret;
+
+ snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
+ priv->mdio->id, priv->phy_addr);
+
+ priv->phydev = phy_connect(dev->net, priv->phy_name, &asix_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (IS_ERR(priv->phydev)) {
+ netdev_err(dev->net, "Could not connect to PHY device %s\n",
+ priv->phy_name);
+ ret = PTR_ERR(priv->phydev);
+ return ret;
+ }
+
+ priv->phydev->mac_managed_pm = 1;
+
+ phy_attached_info(priv->phydev);
+
+ return 0;
+}
+
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret, i;
u8 buf[ETH_ALEN] = {0}, chipcode = 0;
- u32 phyid;
struct asix_common_private *priv;
+ int ret, i;
+ u32 phyid;
usbnet_get_endpoints(dev, intf);
@@ -711,14 +745,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
asix_set_netdev_dev_addr(dev, buf);
- /* Initialize MII structure */
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = asix_mdio_read;
- dev->mii.mdio_write = asix_mdio_write;
- dev->mii.phy_id_mask = 0x1f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = asix_get_phy_addr(dev);
-
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
@@ -746,11 +772,11 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->rx_urb_size = 2048;
}
- dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
- if (!dev->driver_priv)
+ priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- priv = dev->driver_priv;
+ dev->driver_priv = priv;
priv->presvd_phy_bmcr = 0;
priv->presvd_phy_advertise = 0;
@@ -762,13 +788,32 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
priv->suspend = ax88772_suspend;
}
+ ret = ax88772_init_mdio(dev);
+ if (ret)
+ return ret;
+
+ return ax88772_init_phy(dev);
+}
+
+static int ax88772_stop(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+
+ /* On unplugged USB, we will get MDIO communication errors and the
+ * PHY will be set in to PHY_HALTED state.
+ */
+ if (priv->phydev->state != PHY_HALTED)
+ phy_stop(priv->phydev);
+
return 0;
}
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ struct asix_common_private *priv = dev->driver_priv;
+
+ phy_disconnect(priv->phydev);
asix_rx_fixup_common_free(dev->driver_priv);
- kfree(dev->driver_priv);
}
static const struct ethtool_ops ax88178_ethtool_ops = {
@@ -1081,7 +1126,10 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0xff;
dev->mii.supports_gmii = 1;
- dev->mii.phy_id = asix_get_phy_addr(dev);
+
+ dev->mii.phy_id = asix_read_phy_addr(dev, true);
+ if (dev->mii.phy_id < 0)
+ return dev->mii.phy_id;
dev->net->netdev_ops = &ax88178_netdev_ops;
dev->net->ethtool_ops = &ax88178_ethtool_ops;
@@ -1153,8 +1201,8 @@ static const struct driver_info ax88772_info = {
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
- .link_reset = ax88772_link_reset,
.reset = ax88772_reset,
+ .stop = ax88772_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
@@ -1165,7 +1213,6 @@ static const struct driver_info ax88772b_info = {
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
- .link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
@@ -1201,7 +1248,6 @@ static const struct driver_info hg20f9_info = {
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
- .link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index b404c9462dce..530947d7477b 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -25,20 +25,6 @@ struct ax88172a_private {
struct asix_rx_fixup_info rx_fixup_info;
};
-/* MDIO read and write wrappers for phylib */
-static int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
-{
- return asix_mdio_read(((struct usbnet *)bus->priv)->net, phy_id,
- regnum);
-}
-
-static int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 val)
-{
- asix_mdio_write(((struct usbnet *)bus->priv)->net, phy_id, regnum, val);
- return 0;
-}
-
/* set MAC link settings according to information from phylib */
static void ax88172a_adjust_link(struct net_device *netdev)
{
@@ -219,7 +205,12 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
goto free;
}
- priv->phy_addr = asix_read_phy_addr(dev, priv->use_embdphy);
+ ret = asix_read_phy_addr(dev, priv->use_embdphy);
+ if (ret < 0)
+ goto free;
+
+ priv->phy_addr = ret;
+
ax88172a_reset_phy(dev, priv->use_embdphy);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 0eeec80bec31..359ea0d10e59 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -26,7 +26,7 @@
* for transport over USB using a simpler USB device model than the
* previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet").
*
- * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf
+ * For details, see https://usb.org/sites/default/files/CDC_EEM10.pdf
*
* This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24,
* 2.6.27 and 2.6.30rc2 kernel.
@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
}
skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
+ dev_kfree_skb_any(skb);
if (!skb2)
return NULL;
- dev_kfree_skb_any(skb);
skb = skb2;
done:
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 7eb0109e9baa..eb3817d70f2b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -217,7 +217,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
skip:
- /* Communcation class functions with bmCapabilities are not
+ /* Communication class functions with bmCapabilities are not
* RNDIS. But some Wireless class RNDIS functions use
* bmCapabilities for their own purpose. The failsafe is
* therefore applied only to Communication class RNDIS
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 5db66272fc82..4c4ab7b38d78 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -168,6 +168,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
subdriver = usb_cdc_wdm_register(ctx->control,
&dev->status->desc,
le16_to_cpu(ctx->mbim_desc->wMaxControlMessage),
+ WWAN_PORT_MBIM,
cdc_mbim_wdm_manage_power);
if (IS_ERR(subdriver)) {
ret = PTR_ERR(subdriver);
@@ -300,8 +301,8 @@ error:
return NULL;
}
-/* Some devices are known to send Neigbor Solicitation messages and
- * require Neigbor Advertisement replies. The IPv6 core will not
+/* Some devices are known to send Neighbor Solicitation messages and
+ * require Neighbor Advertisement replies. The IPv6 core will not
* respond since IFF_NOARP is set, so we must handle them ourselves.
*/
static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
@@ -588,7 +589,7 @@ static const struct driver_info cdc_mbim_info_zlp = {
*
* Note: The current implementation of this feature restricts each NTB
* to a single NDP, implying that multiplexed sessions cannot share an
- * NTB. This might affect performace for multiplexed sessions.
+ * NTB. This might affect performance for multiplexed sessions.
*/
static const struct driver_info cdc_mbim_info_ndp_to_end = {
.description = "CDC MBIM",
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b04055fd1b79..24753a4da7e6 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -192,7 +192,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
return val;
}
-static ssize_t cdc_ncm_show_min_tx_pkt(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t min_tx_pkt_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -200,7 +201,8 @@ static ssize_t cdc_ncm_show_min_tx_pkt(struct device *d, struct device_attribute
return sprintf(buf, "%u\n", ctx->min_tx_pkt);
}
-static ssize_t cdc_ncm_show_rx_max(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t rx_max_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -208,7 +210,8 @@ static ssize_t cdc_ncm_show_rx_max(struct device *d, struct device_attribute *at
return sprintf(buf, "%u\n", ctx->rx_max);
}
-static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t tx_max_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -216,7 +219,8 @@ static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *at
return sprintf(buf, "%u\n", ctx->tx_max);
}
-static ssize_t cdc_ncm_show_tx_timer_usecs(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t tx_timer_usecs_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -224,7 +228,9 @@ static ssize_t cdc_ncm_show_tx_timer_usecs(struct device *d, struct device_attri
return sprintf(buf, "%u\n", ctx->timer_interval / (u32)NSEC_PER_USEC);
}
-static ssize_t cdc_ncm_store_min_tx_pkt(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t min_tx_pkt_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -238,7 +244,9 @@ static ssize_t cdc_ncm_store_min_tx_pkt(struct device *d, struct device_attribu
return len;
}
-static ssize_t cdc_ncm_store_rx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t rx_max_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -251,7 +259,9 @@ static ssize_t cdc_ncm_store_rx_max(struct device *d, struct device_attribute *
return len;
}
-static ssize_t cdc_ncm_store_tx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t tx_max_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -264,7 +274,9 @@ static ssize_t cdc_ncm_store_tx_max(struct device *d, struct device_attribute *
return len;
}
-static ssize_t cdc_ncm_store_tx_timer_usecs(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t tx_timer_usecs_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -285,10 +297,10 @@ static ssize_t cdc_ncm_store_tx_timer_usecs(struct device *d, struct device_att
return len;
}
-static DEVICE_ATTR(min_tx_pkt, 0644, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt);
-static DEVICE_ATTR(rx_max, 0644, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max);
-static DEVICE_ATTR(tx_max, 0644, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max);
-static DEVICE_ATTR(tx_timer_usecs, 0644, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs);
+static DEVICE_ATTR_RW(min_tx_pkt);
+static DEVICE_ATTR_RW(rx_max);
+static DEVICE_ATTR_RW(tx_max);
+static DEVICE_ATTR_RW(tx_timer_usecs);
static ssize_t ndp_to_end_show(struct device *d, struct device_attribute *attr, char *buf)
{
@@ -628,7 +640,7 @@ out:
/* set MTU to max supported by the device if necessary */
dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
- /* do not exceed operater preferred MTU */
+ /* do not exceed operator preferred MTU */
if (ctx->mbim_extended_desc) {
mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
@@ -685,7 +697,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u32 def_rx, def_tx;
- /* be conservative when selecting intial buffer size to
+ /* be conservative when selecting initial buffer size to
* increase the number of hosts this will work for
*/
def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
@@ -1880,7 +1892,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_LINK_INTR,
+ | FLAG_LINK_INTR | FLAG_ETHER,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 3ef4b2841402..54ef8492ca01 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -457,9 +457,8 @@ static const struct usb_device_id hso_ids[] = {
MODULE_DEVICE_TABLE(usb, hso_ids);
/* Sysfs attribute */
-static ssize_t hso_sysfs_show_porttype(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t hsotype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hso_device *hso_dev = dev_get_drvdata(dev);
char *port_name;
@@ -505,7 +504,7 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
return sprintf(buf, "%s\n", port_name);
}
-static DEVICE_ATTR(hsotype, 0444, hso_sysfs_show_porttype, NULL);
+static DEVICE_ATTR_RO(hsotype);
static struct attribute *hso_serial_dev_attrs[] = {
&dev_attr_hsotype.attr,
@@ -1689,7 +1688,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
spin_unlock_irqrestore(&serial->serial_lock, flags);
return usb_control_msg(serial->parent->usb,
- usb_rcvctrlpipe(serial->parent->usb, 0), 0x22,
+ usb_sndctrlpipe(serial->parent->usb, 0), 0x22,
0x21, val, if_num, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
@@ -2436,7 +2435,7 @@ static int hso_rfkill_set_block(void *data, bool blocked)
if (hso_dev->usb_gone)
rv = 0;
else
- rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0),
+ rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0),
enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
mutex_unlock(&hso_dev->mutex);
@@ -2618,32 +2617,31 @@ static struct hso_device *hso_create_bulk_serial_device(
num_urbs = 2;
serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
GFP_KERNEL);
+ if (!serial->tiocmget)
+ goto exit;
serial->tiocmget->serial_state_notification
= kzalloc(sizeof(struct hso_serial_state_notification),
GFP_KERNEL);
- /* it isn't going to break our heart if serial->tiocmget
- * allocation fails don't bother checking this.
- */
- if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
- tiocmget = serial->tiocmget;
- tiocmget->endp = hso_get_ep(interface,
- USB_ENDPOINT_XFER_INT,
- USB_DIR_IN);
- if (!tiocmget->endp) {
- dev_err(&interface->dev, "Failed to find INT IN ep\n");
- goto exit;
- }
-
- tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (tiocmget->urb) {
- mutex_init(&tiocmget->mutex);
- init_waitqueue_head(&tiocmget->waitq);
- } else
- hso_free_tiomget(serial);
+ if (!serial->tiocmget->serial_state_notification)
+ goto exit;
+ tiocmget = serial->tiocmget;
+ tiocmget->endp = hso_get_ep(interface,
+ USB_ENDPOINT_XFER_INT,
+ USB_DIR_IN);
+ if (!tiocmget->endp) {
+ dev_err(&interface->dev, "Failed to find INT IN ep\n");
+ goto exit;
}
- }
- else
+
+ tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tiocmget->urb)
+ goto exit;
+
+ mutex_init(&tiocmget->mutex);
+ init_waitqueue_head(&tiocmget->waitq);
+ } else {
num_urbs = 1;
+ }
if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE,
BULK_URB_TX_SIZE))
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index a87f0dabcdb7..849b77330bf2 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -96,6 +96,7 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
subdriver = usb_cdc_wdm_register(ctx->control,
&usbnet_dev->status->desc,
1024, /* wMaxCommand */
+ WWAN_PORT_AT,
huawei_cdc_ncm_wdm_manage_power);
if (IS_ERR(subdriver)) {
ret = PTR_ERR(subdriver);
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index ed05f992c612..6fde41550de1 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -61,7 +61,7 @@ static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
int need_tail = 0;
__le16 *len;
- /* if packet and our header is smaler than 64 pad to 64 (+ ZLP) */
+ /* if packet and our header is smaller than 64 pad to 64 (+ ZLP) */
if ((pack_with_header_len) < dev->maxpacket)
need_tail = dev->maxpacket - pack_with_header_len + 1;
/*
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 6acc5e904518..25489389ea49 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -298,7 +298,7 @@ struct lan78xx_net;
struct lan78xx_priv {
struct lan78xx_net *dev;
u32 rfe_ctl;
- u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
+ u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
struct mutex dataport_mutex; /* for dataport access */
@@ -1645,6 +1645,7 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_strings = lan78xx_get_strings,
.get_wol = lan78xx_get_wol,
.set_wol = lan78xx_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = lan78xx_get_eee,
.set_eee = lan78xx_set_eee,
.get_pauseparam = lan78xx_get_pause,
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 217a2d8fa47b..b2495fa80171 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -31,7 +31,7 @@
* Windows/Mac drivers do send a couple of such frames to the device
* during initialisation, with protocol set to 0x0906 or 0x0b06 and (what
* seems to be) a flag in the .dummy_flags. This doesn't seem necessary
- * for modem operation but can possibly be used for GPS or other funcitons.
+ * for modem operation but can possibly be used for GPS or other functions.
*/
struct vl600_frame_hdr {
@@ -72,7 +72,7 @@ static int vl600_bind(struct usbnet *dev, struct usb_interface *intf)
/* ARP packets don't go through, but they're also of no use. The
* subnet has only two hosts anyway: us and the gateway / DHCP
* server (probably simulated by modem firmware or network operator)
- * whose address changes everytime we connect to the intarwebz and
+ * whose address changes every time we connect to the intarwebz and
* who doesn't bother answering ARP requests either. So hardware
* addresses have no meaning, the destination and the source of every
* packet depend only on whether it is on the IN or OUT endpoint. */
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 9f9352a4522f..2469bdcb1a04 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -601,7 +601,7 @@ MODULE_DEVICE_TABLE(usb, products);
static int mcs7830_reset_resume (struct usb_interface *intf)
{
- /* YES, this function is successful enough that ethtool -d
+ /* YES, this function is successful enough that ethtool -d
does show same output pre-/post-suspend */
struct usbnet *dev = usb_get_intfdata(intf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6700f1970b24..6a2e4f884b12 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -575,7 +575,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
skb->protocol = htons(ETH_P_MAP);
- return (netif_rx(skb) == NET_RX_SUCCESS);
+ return 1;
}
switch (skb->data[0] & 0xf0) {
@@ -710,7 +710,8 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
/* register subdriver */
subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
- 4096, &qmi_wwan_cdc_wdm_manage_power);
+ 4096, WWAN_PORT_QMI,
+ &qmi_wwan_cdc_wdm_manage_power);
if (IS_ERR(subdriver)) {
dev_err(&info->control->dev, "subdriver registration failed\n");
rv = PTR_ERR(subdriver);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 136ea06540ff..1692d3b1b6e1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -931,6 +931,8 @@ struct r8152 {
u32 rx_pending;
u32 fc_pause_on, fc_pause_off;
+ unsigned int pipe_in, pipe_out, pipe_intr, pipe_ctrl_in, pipe_ctrl_out;
+
u32 support_2500full:1;
u32 lenovo_macpassthru:1;
u32 dell_tb_rx_agg_bug:1;
@@ -1198,7 +1200,7 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
- ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
+ ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in,
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, tmp, size, 500);
if (ret < 0)
@@ -1221,7 +1223,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
- ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
+ ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out,
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
value, index, tmp, size, 500);
@@ -2041,7 +2043,7 @@ static int alloc_all_mem(struct r8152 *tp)
goto err1;
tp->intr_interval = (int)ep_intr->desc.bInterval;
- usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3),
+ usb_fill_int_urb(tp->intr_urb, tp->udev, tp->pipe_intr,
tp->intr_buff, INTBUFSIZE, intr_callback,
tp, tp->intr_interval);
@@ -2305,7 +2307,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
if (ret < 0)
goto out_tx_fill;
- usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ usb_fill_bulk_urb(agg->urb, tp->udev, tp->pipe_out,
agg->head, (int)(tx_data - (u8 *)agg->head),
(usb_complete_t)write_bulk_callback, agg);
@@ -2445,7 +2447,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
unsigned int pkt_len, rx_frag_head_sz;
struct sk_buff *skb;
- /* limite the skb numbers for rx_queue */
+ /* limit the skb numbers for rx_queue */
if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
break;
@@ -2620,7 +2622,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
!test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
return 0;
- usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
+ usb_fill_bulk_urb(agg->urb, tp->udev, tp->pipe_in,
agg->buffer, tp->rx_buf_sz,
(usb_complete_t)read_bulk_callback, agg);
@@ -8107,6 +8109,37 @@ static void r8156b_init(struct r8152 *tp)
tp->coalesce = 15000; /* 15 us */
}
+static bool rtl_check_vendor_ok(struct usb_interface *intf)
+{
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *in, *out, *intr;
+
+ if (usb_find_common_endpoints(alt, &in, &out, &intr, NULL) < 0) {
+ dev_err(&intf->dev, "Expected endpoints are not found\n");
+ return false;
+ }
+
+ /* Check Rx endpoint address */
+ if (usb_endpoint_num(in) != 1) {
+ dev_err(&intf->dev, "Invalid Rx endpoint address\n");
+ return false;
+ }
+
+ /* Check Tx endpoint address */
+ if (usb_endpoint_num(out) != 2) {
+ dev_err(&intf->dev, "Invalid Tx endpoint address\n");
+ return false;
+ }
+
+ /* Check interrupt endpoint address */
+ if (usb_endpoint_num(intr) != 3) {
+ dev_err(&intf->dev, "Invalid interrupt endpoint address\n");
+ return false;
+ }
+
+ return true;
+}
+
static bool rtl_vendor_mode(struct usb_interface *intf)
{
struct usb_host_interface *alt = intf->cur_altsetting;
@@ -8115,12 +8148,15 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
int i, num_configs;
if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
- return true;
+ return rtl_check_vendor_ok(intf);
/* The vendor mode is not always config #1, so to find it out. */
udev = interface_to_usbdev(intf);
c = udev->config;
num_configs = udev->descriptor.bNumConfigurations;
+ if (num_configs < 2)
+ return false;
+
for (i = 0; i < num_configs; (i++, c++)) {
struct usb_interface_descriptor *desc = NULL;
@@ -8135,7 +8171,8 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
}
}
- WARN_ON_ONCE(i == num_configs);
+ if (i == num_configs)
+ dev_err(&intf->dev, "Unexpected Device\n");
return false;
}
@@ -8176,7 +8213,7 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (!tp)
return 0;
- /* reset the MAC adddress in case of policy change */
+ /* reset the MAC address in case of policy change */
if (determine_ethernet_addr(tp, &sa) >= 0) {
rtnl_lock();
dev_set_mac_address (tp->netdev, &sa, NULL);
@@ -8643,7 +8680,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
+ memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
break;
}
}
@@ -8932,6 +8969,79 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
return 0;
}
+static void rtl8152_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ u16 bmcr, lcladv, rmtadv;
+ u8 cap;
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
+ mutex_lock(&tp->control);
+
+ bmcr = r8152_mdio_read(tp, MII_BMCR);
+ lcladv = r8152_mdio_read(tp, MII_ADVERTISE);
+ rmtadv = r8152_mdio_read(tp, MII_LPA);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+ if (!(bmcr & BMCR_ANENABLE)) {
+ pause->autoneg = 0;
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ return;
+ }
+
+ pause->autoneg = 1;
+
+ cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_RX)
+ pause->rx_pause = 1;
+
+ if (cap & FLOW_CTRL_TX)
+ pause->tx_pause = 1;
+}
+
+static int rtl8152_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ u16 old, new1;
+ u8 cap = 0;
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->control);
+
+ if (pause->autoneg && !(r8152_mdio_read(tp, MII_BMCR) & BMCR_ANENABLE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pause->rx_pause)
+ cap |= FLOW_CTRL_RX;
+
+ if (pause->tx_pause)
+ cap |= FLOW_CTRL_TX;
+
+ old = r8152_mdio_read(tp, MII_ADVERTISE);
+ new1 = (old & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) | mii_advertise_flowctrl(cap);
+ if (old != new1)
+ r8152_mdio_write(tp, MII_ADVERTISE, new1);
+
+out:
+ mutex_unlock(&tp->control);
+ usb_autopm_put_interface(tp->intf);
+
+ return ret;
+}
+
static const struct ethtool_ops ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = rtl8152_get_drvinfo,
@@ -8954,6 +9064,8 @@ static const struct ethtool_ops ops = {
.set_tunable = rtl8152_set_tunable,
.get_ringparam = rtl8152_get_ringparam,
.set_ringparam = rtl8152_set_ringparam,
+ .get_pauseparam = rtl8152_get_pauseparam,
+ .set_pauseparam = rtl8152_set_pauseparam,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -9381,9 +9493,6 @@ static int rtl8152_probe(struct usb_interface *intf,
if (!rtl_vendor_mode(intf))
return -ENODEV;
- if (intf->cur_altsetting->desc.bNumEndpoints < 3)
- return -ENODEV;
-
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@@ -9400,6 +9509,12 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->intf = intf;
tp->version = version;
+ tp->pipe_ctrl_in = usb_rcvctrlpipe(udev, 0);
+ tp->pipe_ctrl_out = usb_sndctrlpipe(udev, 0);
+ tp->pipe_in = usb_rcvbulkpipe(udev, 1);
+ tp->pipe_out = usb_sndbulkpipe(udev, 2);
+ tp->pipe_intr = usb_rcvintpipe(udev, 3);
+
switch (version) {
case RTL_VER_01:
case RTL_VER_02:
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index f813ca9dec53..85a8b96e39a6 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -324,7 +324,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
* For RX we handle drivers that zero-pad to end-of-packet.
* Don't let userspace change these settings.
*
- * NOTE: there still seems to be wierdness here, as if we need
+ * NOTE: there still seems to be weirdness here, as if we need
* to do some more things to make sure WinCE targets accept this.
* They default to jumbograms of 8KB or 16KB, which is absurd
* for such low data rates and which is also more than Linux
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f8cdabb9ef5a..13141dbfa3a8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_wait_ready(dev, 0);
if (ret < 0) {
netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
- return ret;
+ goto free_pdata;
}
smsc75xx_init_mac_address(dev);
@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_reset(dev);
if (ret < 0) {
netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
- return ret;
+ goto cancel_work;
}
dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1502,6 +1502,13 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
return 0;
+
+cancel_work:
+ cancel_work_sync(&pdata->set_multicast);
+free_pdata:
+ kfree(pdata);
+ dev->data[0] = 0;
+ return ret;
}
static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -1511,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
- pdata = NULL;
dev->data[0] = 0;
}
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ecf62849f4c1..470e1c1e6353 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -74,6 +74,23 @@ MODULE_PARM_DESC (msg_level, "Override default message level");
/*-------------------------------------------------------------------------*/
+static const char * const usbnet_event_names[] = {
+ [EVENT_TX_HALT] = "EVENT_TX_HALT",
+ [EVENT_RX_HALT] = "EVENT_RX_HALT",
+ [EVENT_RX_MEMORY] = "EVENT_RX_MEMORY",
+ [EVENT_STS_SPLIT] = "EVENT_STS_SPLIT",
+ [EVENT_LINK_RESET] = "EVENT_LINK_RESET",
+ [EVENT_RX_PAUSED] = "EVENT_RX_PAUSED",
+ [EVENT_DEV_ASLEEP] = "EVENT_DEV_ASLEEP",
+ [EVENT_DEV_OPEN] = "EVENT_DEV_OPEN",
+ [EVENT_DEVICE_REPORT_IDLE] = "EVENT_DEVICE_REPORT_IDLE",
+ [EVENT_NO_RUNTIME_PM] = "EVENT_NO_RUNTIME_PM",
+ [EVENT_RX_KILL] = "EVENT_RX_KILL",
+ [EVENT_LINK_CHANGE] = "EVENT_LINK_CHANGE",
+ [EVENT_SET_RX_MODE] = "EVENT_SET_RX_MODE",
+ [EVENT_NO_IP_ALIGN] = "EVENT_NO_IP_ALIGN",
+};
+
/* handles CDC Ethernet and many other network "bulk data" interfaces */
int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
{
@@ -452,9 +469,9 @@ void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
if (!schedule_work (&dev->kevent))
- netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
+ netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]);
else
- netdev_dbg(dev->net, "kevent %d scheduled\n", work);
+ netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -1597,6 +1614,9 @@ void usbnet_disconnect (struct usb_interface *intf)
xdev->bus->bus_name, xdev->devpath,
dev->driver_info->description);
+ if (dev->driver_info->unbind)
+ dev->driver_info->unbind(dev, intf);
+
net = dev->net;
unregister_netdev (net);
@@ -1604,9 +1624,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_scuttle_anchored_urbs(&dev->deferred);
- if (dev->driver_info->unbind)
- dev->driver_info->unbind (dev, intf);
-
usb_kill_urb(dev->interrupt);
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9b6a4a875c55..b0b81458ca94 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -380,7 +380,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
bool hdr_valid, unsigned int metasize,
- unsigned int headroom)
+ bool whole_page)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -398,17 +398,24 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- /* If headroom is not 0, there is an offset between the beginning of the
+ /* If whole_page, there is an offset between the beginning of the
* data and the allocated space, otherwise the data and the allocated
* space are aligned.
+ *
+ * Buffers with headroom use PAGE_SIZE as alloc size, see
+ * add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
- if (headroom) {
- /* Buffers with headroom use PAGE_SIZE as alloc size,
+ if (whole_page) {
+ /* Buffers with whole_page use PAGE_SIZE as alloc size,
* see add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
truesize = PAGE_SIZE;
- tailroom = truesize - len - offset;
- buf = page_address(page);
+
+ /* page maybe head page, so we should get the buf by p, not the
+ * page
+ */
+ tailroom = truesize - len - offset_in_page(p);
+ buf = (char *)((unsigned long)p & PAGE_MASK);
} else {
tailroom = truesize - len;
buf = p;
@@ -726,6 +733,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
len -= vi->hdr_len;
stats->bytes += len;
+ if (unlikely(len > GOOD_PACKET_LEN)) {
+ pr_debug("%s: rx error: len %u exceeds max size %d\n",
+ dev->name, len, GOOD_PACKET_LEN);
+ dev->stats.rx_length_errors++;
+ goto err_len;
+ }
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -829,6 +842,7 @@ err:
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
+err_len:
stats->drops++;
put_page(page);
xdp_xmit:
@@ -882,6 +896,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
+ if (unlikely(len > truesize)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)ctx);
+ dev->stats.rx_length_errors++;
+ goto err_skb;
+ }
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -958,7 +978,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
- metasize, headroom);
+ metasize, true);
return head_skb;
}
break;
@@ -1008,15 +1028,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
- if (unlikely(len > truesize)) {
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
- dev->stats.rx_length_errors++;
- goto err_skb;
- }
-
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
- metasize, headroom);
+ metasize, !!headroom);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -1623,7 +1636,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
virtio_is_little_endian(vi->vdev), false,
0))
- BUG();
+ return -EPROTO;
if (vi->mergeable_rx_bufs)
hdr->num_buffers = 0;
@@ -2834,8 +2847,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
ctx[rxq2vq(i)] = true;
}
- ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
- names, ctx, NULL);
+ ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
+ names, ctx, NULL);
if (ret)
goto err_find;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 503e2fd7ce51..2b1b944d4b28 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -274,7 +274,7 @@ vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack)
int res;
/* we pre-allocate elements used in the spin-locked section (so that we
- * keep the spinlock as short as possibile).
+ * keep the spinlock as short as possible).
*/
new_me = vrf_map_elem_alloc(GFP_KERNEL);
if (!new_me)
@@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
dev->flags = IFF_MASTER | IFF_NOARP;
- /* MTU is irrelevant for VRF device; set to 64k similar to lo */
- dev->mtu = 64 * 1024;
-
/* similarly, oper state is irrelevant; set to up to avoid confusion */
dev->operstate = IF_OPER_UP;
netdev_lockdep_set_classes(dev);
@@ -1369,22 +1366,22 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
int orig_iif = skb->skb_iif;
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
- bool is_ll_src;
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
- * for packets with lladdr src, however, skip so that the dst can be
- * determine at input using original ifindex in the case that daddr
- * needs strict
+ * For strict packets with a source LLA, determine the dst using the
+ * original ifindex.
*/
- is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
- if (skb->pkt_type == PACKET_LOOPBACK ||
- (need_strict && !is_ndisc && !is_ll_src)) {
+ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
+
if (skb->pkt_type == PACKET_LOOPBACK)
skb->pkt_type = PACKET_HOST;
+ else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
+ vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
+
goto out;
}
@@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
* which breaks networking.
*/
dev->min_mtu = IPV6_MIN_MTU;
- dev->max_mtu = ETH_MAX_MTU;
+ dev->max_mtu = IP6_MAX_MTU;
+ dev->mtu = dev->max_mtu;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 02a14f1b938a..5a8df5a195cb 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2164,6 +2164,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
struct neighbour *n;
struct nd_msg *msg;
+ rcu_read_lock();
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
goto out;
@@ -2215,6 +2216,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
}
out:
+ rcu_read_unlock();
consume_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 83c9481995dd..473df2505c8e 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -49,7 +49,7 @@ config COSA
network device.
You will need user-space utilities COSA or SRP boards for downloading
- the firmware to the cards and to set them up. Look at the
+ the firmware to the cards and to set them up. Look at the
<http://www.fi.muni.cz/~kas/cosa/> for more information. You can also
read the comment at the top of the <file:drivers/net/wan/cosa.c> for
details about the cards and the driver itself.
@@ -108,7 +108,7 @@ config HDLC
Generic HDLC driver currently supports raw HDLC, Cisco HDLC, Frame
Relay, synchronous Point-to-Point Protocol (PPP) and X.25.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called hdlc.
If unsure, say N.
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index c354a5143e99..059c2f7133be 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -28,9 +28,8 @@
#include "hd64570.h"
-
-static const char* version = "Moxa C101 driver version: 1.15";
-static const char* devname = "C101";
+static const char *version = "Moxa C101 driver version: 1.15";
+static const char *devname = "C101";
#undef DEBUG_PKT
#define DEBUG_RINGS
@@ -51,7 +50,6 @@ static const char* devname = "C101";
static char *hw; /* pointer to hw=xxx command line string */
-
typedef struct card_s {
struct net_device *dev;
spinlock_t lock; /* TX lock */
@@ -72,14 +70,13 @@ typedef struct card_s {
u8 page;
struct card_s *next_card;
-}card_t;
+} card_t;
typedef card_t port_t;
static card_t *first_card;
static card_t **new_card = &first_card;
-
#define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg))
#define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg))
#define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg))
@@ -87,19 +84,18 @@ static card_t **new_card = &first_card;
/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
#define sca_outw(value, reg, card) do { \
writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
- writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
-} while(0)
+ writeb((value >> 8) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
+} while (0)
#define port_to_card(port) (port)
#define log_node(port) (0)
#define phy_node(port) (0)
#define winsize(card) (C101_WINDOW_SIZE)
#define win0base(card) ((card)->win0base)
-#define winbase(card) ((card)->win0base + 0x2000)
+#define winbase(card) ((card)->win0base + 0x2000)
#define get_port(card, port) (card)
static void sca_msci_intr(port_t *port);
-
static inline u8 sca_get_page(card_t *card)
{
return card->page;
@@ -111,10 +107,8 @@ static inline void openwin(card_t *card, u8 page)
writeb(page, card->win0base + C101_PAGE);
}
-
#include "hd64570.c"
-
static inline void set_carrier(port_t *port)
{
if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
@@ -123,7 +117,6 @@ static inline void set_carrier(port_t *port)
netif_carrier_off(port_to_dev(port));
}
-
static void sca_msci_intr(port_t *port)
{
u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
@@ -145,13 +138,12 @@ static void sca_msci_intr(port_t *port)
set_carrier(port);
}
-
static void c101_set_iface(port_t *port)
{
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
- switch(port->settings.clock_type) {
+ switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG_RX; /* TX clock */
txs |= CLK_RXCLK_TX; /* BRG output */
@@ -179,7 +171,6 @@ static void c101_set_iface(port_t *port)
sca_set_port(port);
}
-
static int c101_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
@@ -206,7 +197,6 @@ static int c101_open(struct net_device *dev)
return 0;
}
-
static int c101_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
@@ -218,7 +208,6 @@ static int c101_close(struct net_device *dev)
return 0;
}
-
static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -240,7 +229,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
- switch(ifr->ifr_settings.type) {
+ switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
@@ -252,7 +241,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
case IF_IFACE_SYNC_SERIAL:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
@@ -276,8 +265,6 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-
-
static void c101_destroy_card(card_t *card)
{
readb(card->win0base + C101_PAGE); /* Resets SCA? */
@@ -309,18 +296,18 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
card_t *card;
int result;
- if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
+ if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ {
pr_err("invalid IRQ value\n");
return -ENODEV;
}
- if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
+ if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) != 0) {
pr_err("invalid RAM value\n");
return -ENODEV;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL)
+ if (!card)
return -ENOBUFS;
card->dev = alloc_hdlcdev(card);
@@ -392,11 +379,9 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
return 0;
}
-
-
static int __init c101_init(void)
{
- if (hw == NULL) {
+ if (!hw) {
#ifdef MODULE
pr_info("no card initialized\n");
#endif
@@ -419,26 +404,25 @@ static int __init c101_init(void)
if (*hw == '\x0')
return first_card ? 0 : -EINVAL;
- }while(*hw++ == ':');
+ } while (*hw++ == ':');
pr_err("invalid hardware parameters\n");
return first_card ? 0 : -EINVAL;
}
-
static void __exit c101_cleanup(void)
{
card_t *card = first_card;
while (card) {
card_t *ptr = card;
+
card = card->next_card;
unregister_hdlc_device(port_to_dev(ptr));
c101_destroy_card(ptr);
}
}
-
module_init(c101_init);
module_exit(c101_cleanup);
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 2369ca250cd6..43caab0b7dee 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1,13 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
-/*
- * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+/* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
* Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*/
-/*
- * The driver for the SRP and COSA synchronous serial cards.
+/* The driver for the SRP and COSA synchronous serial cards.
*
* HARDWARE INFO
*
@@ -90,7 +88,7 @@
#define COSA_MAX_ID_STRING 128
/* Maximum length of the channel name */
-#define COSA_MAX_NAME (sizeof("cosaXXXcXXX")+1)
+#define COSA_MAX_NAME (sizeof("cosaXXXcXXX") + 1)
/* Per-channel data structure */
@@ -124,9 +122,9 @@ struct channel_data {
};
/* cosa->firmware_status bits */
-#define COSA_FW_RESET (1<<0) /* Is the ROM monitor active? */
-#define COSA_FW_DOWNLOAD (1<<1) /* Is the microcode downloaded? */
-#define COSA_FW_START (1<<2) /* Is the microcode running? */
+#define COSA_FW_RESET BIT(0) /* Is the ROM monitor active? */
+#define COSA_FW_DOWNLOAD BIT(1) /* Is the microcode downloaded? */
+#define COSA_FW_START BIT(2) /* Is the microcode running? */
struct cosa_data {
int num; /* Card number */
@@ -152,28 +150,25 @@ struct cosa_data {
char *type; /* card type */
};
-/*
- * Define this if you want all the possible ports to be autoprobed.
+/* Define this if you want all the possible ports to be autoprobed.
* It is here but it probably is not a good idea to use this.
*/
-/* #define COSA_ISA_AUTOPROBE 1 */
+/* #define COSA_ISA_AUTOPROBE 1*/
-/*
- * Character device major number. 117 was allocated for us.
+/* Character device major number. 117 was allocated for us.
* The value of 0 means to allocate a first free one.
*/
static DEFINE_MUTEX(cosa_chardev_mutex);
static int cosa_major = 117;
-/*
- * Encoding of the minor numbers:
+/* Encoding of the minor numbers:
* The lowest CARD_MINOR_BITS bits means the channel on the single card,
* the highest bits means the card number.
*/
#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved
- * for the single card */
-/*
- * The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
+ * for the single card
+ */
+/* The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
* macro doesn't like anything other than the raw number as an argument :-(
*/
#define MAX_CARDS 16
@@ -184,8 +179,7 @@ static int cosa_major = 117;
#define DRIVER_TXMAP_SHIFT 2
#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */
-/*
- * for cosa->rxtx - indicates whether either transmit or receive is
+/* for cosa->rxtx - indicates whether either transmit or receive is
* in progress. These values are mean number of the bit.
*/
#define TXBIT 0
@@ -198,22 +192,22 @@ static int cosa_major = 117;
#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
#undef DEBUG_IO //1 /* Dump the I/O traffic */
-#define TX_TIMEOUT (5*HZ)
+#define TX_TIMEOUT (5 * HZ)
/* Maybe the following should be allocated dynamically */
static struct cosa_data cosa_cards[MAX_CARDS];
static int nr_cards;
#ifdef COSA_ISA_AUTOPROBE
-static int io[MAX_CARDS+1] = { 0x220, 0x228, 0x210, 0x218, 0, };
+static int io[MAX_CARDS + 1] = {0x220, 0x228, 0x210, 0x218, 0,};
/* NOTE: DMA is not autoprobed!!! */
-static int dma[MAX_CARDS+1] = { 1, 7, 1, 7, 1, 7, 1, 7, 0, };
+static int dma[MAX_CARDS + 1] = {1, 7, 1, 7, 1, 7, 1, 7, 0,};
#else
-static int io[MAX_CARDS+1];
-static int dma[MAX_CARDS+1];
+static int io[MAX_CARDS + 1];
+static int dma[MAX_CARDS + 1];
#endif
/* IRQ can be safely autoprobed */
-static int irq[MAX_CARDS+1] = { -1, -1, -1, -1, -1, -1, 0, };
+static int irq[MAX_CARDS + 1] = {-1, -1, -1, -1, -1, -1, 0,};
/* for class stuff*/
static struct class *cosa_class;
@@ -244,14 +238,14 @@ MODULE_LICENSE("GPL");
#define cosa_inw inw
#endif
-#define is_8bit(cosa) (!(cosa->datareg & 0x08))
+#define is_8bit(cosa) (!((cosa)->datareg & 0x08))
-#define cosa_getstatus(cosa) (cosa_inb(cosa->statusreg))
-#define cosa_putstatus(cosa, stat) (cosa_outb(stat, cosa->statusreg))
-#define cosa_getdata16(cosa) (cosa_inw(cosa->datareg))
-#define cosa_getdata8(cosa) (cosa_inb(cosa->datareg))
-#define cosa_putdata16(cosa, dt) (cosa_outw(dt, cosa->datareg))
-#define cosa_putdata8(cosa, dt) (cosa_outb(dt, cosa->datareg))
+#define cosa_getstatus(cosa) (cosa_inb((cosa)->statusreg))
+#define cosa_putstatus(cosa, stat) (cosa_outb(stat, (cosa)->statusreg))
+#define cosa_getdata16(cosa) (cosa_inw((cosa)->datareg))
+#define cosa_getdata8(cosa) (cosa_inb((cosa)->datareg))
+#define cosa_putdata16(cosa, dt) (cosa_outw(dt, (cosa)->datareg))
+#define cosa_putdata8(cosa, dt) (cosa_outb(dt, (cosa)->datareg))
/* Initialization stuff */
static int cosa_probe(int ioaddr, int irq, int dma);
@@ -280,14 +274,14 @@ static char *chrdev_setup_rx(struct channel_data *channel, int size);
static int chrdev_rx_done(struct channel_data *channel);
static int chrdev_tx_done(struct channel_data *channel, int size);
static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos);
+ char __user *buf, size_t count, loff_t *ppos);
static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
+ const char __user *buf, size_t count, loff_t *ppos);
static unsigned int cosa_poll(struct file *file, poll_table *poll);
static int cosa_open(struct inode *inode, struct file *file);
static int cosa_release(struct inode *inode, struct file *file);
static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
+ unsigned long arg);
#ifdef COSA_FASYNC_WORKING
static int cosa_fasync(struct inode *inode, struct file *file, int on);
#endif
@@ -337,7 +331,7 @@ static void debug_status_in(struct cosa_data *cosa, int status);
static void debug_status_out(struct cosa_data *cosa, int status);
#endif
-static inline struct channel_data* dev_to_chan(struct net_device *dev)
+static inline struct channel_data *dev_to_chan(struct net_device *dev)
{
return (struct channel_data *)dev_to_hdlc(dev)->priv;
}
@@ -355,15 +349,16 @@ static int __init cosa_init(void)
goto out;
}
} else {
- if (!(cosa_major=register_chrdev(0, "cosa", &cosa_fops))) {
+ cosa_major = register_chrdev(0, "cosa", &cosa_fops);
+ if (!cosa_major) {
pr_warn("unable to register chardev\n");
err = -EIO;
goto out;
}
}
- for (i=0; i<MAX_CARDS; i++)
+ for (i = 0; i < MAX_CARDS; i++)
cosa_cards[i].num = -1;
- for (i=0; io[i] != 0 && i < MAX_CARDS; i++)
+ for (i = 0; io[i] != 0 && i < MAX_CARDS; i++)
cosa_probe(io[i], irq[i], dma[i]);
if (!nr_cards) {
pr_warn("no devices found\n");
@@ -426,7 +421,7 @@ static const struct net_device_ops cosa_ops = {
static int cosa_probe(int base, int irq, int dma)
{
- struct cosa_data *cosa = cosa_cards+nr_cards;
+ struct cosa_data *cosa = cosa_cards + nr_cards;
int i, err = 0;
memset(cosa, 0, sizeof(struct cosa_data));
@@ -438,7 +433,8 @@ static int cosa_probe(int base, int irq, int dma)
return -1;
}
/* I/O address should be between 0x100 and 0x3ff and should be
- * multiple of 8. */
+ * multiple of 8.
+ */
if (base < 0x100 || base > 0x3ff || base & 0x7) {
pr_info("invalid I/O address 0x%x\n", base);
return -1;
@@ -448,8 +444,9 @@ static int cosa_probe(int base, int irq, int dma)
pr_info("invalid DMA %d\n", dma);
return -1;
}
- /* and finally, on 16-bit COSA DMA should be 4-7 and
- * I/O base should not be multiple of 0x10 */
+ /* and finally, on 16-bit COSA DMA should be 4-7 and
+ * I/O base should not be multiple of 0x10
+ */
if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n",
base, dma);
@@ -458,12 +455,12 @@ static int cosa_probe(int base, int irq, int dma)
cosa->dma = dma;
cosa->datareg = base;
- cosa->statusreg = is_8bit(cosa)?base+1:base+2;
+ cosa->statusreg = is_8bit(cosa) ? base + 1 : base + 2;
spin_lock_init(&cosa->lock);
- if (!request_region(base, is_8bit(cosa)?2:4,"cosa"))
+ if (!request_region(base, is_8bit(cosa) ? 2 : 4, "cosa"))
return -1;
-
+
if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
printk(KERN_DEBUG "probe at 0x%x failed.\n", base);
err = -1;
@@ -471,11 +468,11 @@ static int cosa_probe(int base, int irq, int dma)
}
/* Test the validity of identification string */
- if (!strncmp(cosa->id_string, "SRP", 3))
+ if (!strncmp(cosa->id_string, "SRP", 3)) {
cosa->type = "srp";
- else if (!strncmp(cosa->id_string, "COSA", 4))
- cosa->type = is_8bit(cosa)? "cosa8": "cosa16";
- else {
+ } else if (!strncmp(cosa->id_string, "COSA", 4)) {
+ cosa->type = is_8bit(cosa) ? "cosa8" : "cosa16";
+ } else {
/* Print a warning only if we are not autoprobing */
#ifndef COSA_ISA_AUTOPROBE
pr_info("valid signature not found at 0x%x\n", base);
@@ -483,9 +480,9 @@ static int cosa_probe(int base, int irq, int dma)
err = -1;
goto err_out;
}
- /* Update the name of the region now we know the type of card */
- release_region(base, is_8bit(cosa)?2:4);
- if (!request_region(base, is_8bit(cosa)?2:4, cosa->type)) {
+ /* Update the name of the region now we know the type of card */
+ release_region(base, is_8bit(cosa) ? 2 : 4);
+ if (!request_region(base, is_8bit(cosa) ? 2 : 4, cosa->type)) {
printk(KERN_DEBUG "changing name at 0x%x failed.\n", base);
return -1;
}
@@ -495,8 +492,7 @@ static int cosa_probe(int base, int irq, int dma)
unsigned long irqs;
/* pr_info("IRQ autoprobe\n"); */
irqs = probe_irq_on();
- /*
- * Enable interrupt on tx buffer empty (it sure is)
+ /* Enable interrupt on tx buffer empty (it sure is)
* really sure ?
* FIXME: When this code is not used as module, we should
* probably call udelay() instead of the interruptible sleep.
@@ -536,8 +532,8 @@ static int cosa_probe(int base, int irq, int dma)
err = -1;
goto err_out1;
}
-
- cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL|GFP_DMA);
+
+ cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL | GFP_DMA);
if (!cosa->bouncebuf) {
err = -ENOMEM;
goto err_out2;
@@ -563,7 +559,8 @@ static int cosa_probe(int base, int irq, int dma)
sema_init(&chan->wsem, 1);
/* Register the network interface */
- if (!(chan->netdev = alloc_hdlcdev(chan))) {
+ chan->netdev = alloc_hdlcdev(chan);
+ if (!chan->netdev) {
pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
err = -ENOMEM;
goto err_hdlcdev;
@@ -603,12 +600,11 @@ err_out2:
err_out1:
free_irq(cosa->irq, cosa);
err_out:
- release_region(cosa->datareg,is_8bit(cosa)?2:4);
+ release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
pr_notice("cosa%d: allocating resources failed\n", cosa->num);
return err;
}
-
/*---------- network device ---------- */
static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
@@ -659,7 +655,7 @@ static int cosa_net_open(struct net_device *dev)
}
static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev)
{
struct channel_data *chan = dev_to_chan(dev);
@@ -714,13 +710,12 @@ static int cosa_net_close(struct net_device *dev)
static char *cosa_net_setup_rx(struct channel_data *chan, int size)
{
- /*
- * We can safely fall back to non-dma-able memory, because we have
+ /* We can safely fall back to non-dma-able memory, because we have
* the cosa->bouncebuf pre-allocated.
*/
kfree_skb(chan->rx_skb);
chan->rx_skb = dev_alloc_skb(size);
- if (chan->rx_skb == NULL) {
+ if (!chan->rx_skb) {
pr_notice("%s: Memory squeeze, dropping packet\n", chan->name);
chan->netdev->stats.rx_dropped++;
return NULL;
@@ -767,7 +762,7 @@ static int cosa_net_tx_done(struct channel_data *chan, int size)
/*---------- Character device ---------- */
static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
+ char __user *buf, size_t count, loff_t *ppos)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
@@ -782,9 +777,9 @@ static ssize_t cosa_read(struct file *file,
}
if (mutex_lock_interruptible(&chan->rlock))
return -ERESTARTSYS;
-
- chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL);
- if (chan->rxdata == NULL) {
+
+ chan->rxdata = kmalloc(COSA_MTU, GFP_DMA | GFP_KERNEL);
+ if (!chan->rxdata) {
mutex_unlock(&chan->rlock);
return -ENOMEM;
}
@@ -840,9 +835,8 @@ static int chrdev_rx_done(struct channel_data *chan)
return 1;
}
-
static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+ const char __user *buf, size_t count, loff_t *ppos)
{
DECLARE_WAITQUEUE(wait, current);
struct channel_data *chan = file->private_data;
@@ -860,10 +854,10 @@ static ssize_t cosa_write(struct file *file,
if (count > COSA_MTU)
count = COSA_MTU;
-
+
/* Allocate the buffer */
- kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA);
- if (kbuf == NULL) {
+ kbuf = kmalloc(count, GFP_KERNEL | GFP_DMA);
+ if (!kbuf) {
up(&chan->wsem);
return -ENOMEM;
}
@@ -872,7 +866,7 @@ static ssize_t cosa_write(struct file *file,
kfree(kbuf);
return -EFAULT;
}
- chan->tx_status=0;
+ chan->tx_status = 0;
cosa_start_tx(chan, kbuf, count);
spin_lock_irqsave(&cosa->lock, flags);
@@ -927,20 +921,20 @@ static int cosa_open(struct inode *inode, struct file *file)
int ret = 0;
mutex_lock(&cosa_chardev_mutex);
- if ((n=iminor(file_inode(file))>>CARD_MINOR_BITS)
- >= nr_cards) {
+ n = iminor(file_inode(file)) >> CARD_MINOR_BITS;
+ if (n >= nr_cards) {
ret = -ENODEV;
goto out;
}
- cosa = cosa_cards+n;
+ cosa = cosa_cards + n;
- if ((n=iminor(file_inode(file))
- & ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels) {
+ n = iminor(file_inode(file)) & ((1 << CARD_MINOR_BITS) - 1);
+ if (n >= cosa->nchannels) {
ret = -ENODEV;
goto out;
}
chan = cosa->chan + n;
-
+
file->private_data = chan;
spin_lock_irqsave(&cosa->lock, flags);
@@ -982,26 +976,25 @@ static struct fasync_struct *fasync[256] = { NULL, };
/* To be done ... */
static int cosa_fasync(struct inode *inode, struct file *file, int on)
{
- int port = iminor(inode);
+ int port = iminor(inode);
return fasync_helper(inode, file, on, &fasync[port]);
}
#endif
-
/* ---------- Ioctls ---------- */
-/*
- * Ioctl subroutines can safely be made inline, because they are called
+/* Ioctl subroutines can safely be made inline, because they are called
* only from cosa_ioctl().
*/
static inline int cosa_reset(struct cosa_data *cosa)
{
char idstring[COSA_MAX_ID_STRING];
+
if (cosa->usage > 1)
pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
cosa->num, cosa->usage);
- cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_START);
+ cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_START);
if (cosa_reset_and_read_id(cosa, idstring) < 0) {
pr_notice("cosa%d: reset failed\n", cosa->num);
return -EIO;
@@ -1025,7 +1018,7 @@ static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
cosa->name, cosa->firmware_status);
return -EPERM;
}
-
+
if (copy_from_user(&d, arg, sizeof(d)))
return -EFAULT;
@@ -1034,9 +1027,8 @@ static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
return -EINVAL;
-
/* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_DOWNLOAD);
+ cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_DOWNLOAD);
i = download(cosa, d.code, d.len, d.addr);
if (i < 0) {
@@ -1046,7 +1038,7 @@ static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
}
pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET|COSA_FW_DOWNLOAD;
+ cosa->firmware_status |= COSA_FW_RESET | COSA_FW_DOWNLOAD;
return 0;
}
@@ -1091,14 +1083,15 @@ static inline int cosa_start(struct cosa_data *cosa, int address)
pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
cosa->num, cosa->usage);
- if ((cosa->firmware_status & (COSA_FW_RESET|COSA_FW_DOWNLOAD))
- != (COSA_FW_RESET|COSA_FW_DOWNLOAD)) {
+ if ((cosa->firmware_status & (COSA_FW_RESET | COSA_FW_DOWNLOAD))
+ != (COSA_FW_RESET | COSA_FW_DOWNLOAD)) {
pr_notice("%s: download the microcode and/or reset the card first (status %d)\n",
cosa->name, cosa->firmware_status);
return -EPERM;
}
cosa->firmware_status &= ~COSA_FW_RESET;
- if ((i=startmicrocode(cosa, address)) < 0) {
+ i = startmicrocode(cosa, address);
+ if (i < 0) {
pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n",
cosa->num, address, i);
return -EIO;
@@ -1108,11 +1101,12 @@ static inline int cosa_start(struct cosa_data *cosa, int address)
cosa->firmware_status |= COSA_FW_START;
return 0;
}
-
+
/* Buffer of size at least COSA_MAX_ID_STRING is expected */
static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
{
- int l = strlen(cosa->id_string)+1;
+ int l = strlen(cosa->id_string) + 1;
+
if (copy_to_user(string, cosa->id_string, l))
return -EFAULT;
return l;
@@ -1121,16 +1115,19 @@ static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
/* Buffer of size at least COSA_MAX_ID_STRING is expected */
static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
{
- int l = strlen(cosa->type)+1;
+ int l = strlen(cosa->type) + 1;
+
if (copy_to_user(string, cosa->type, l))
return -EFAULT;
return l;
}
static int cosa_ioctl_common(struct cosa_data *cosa,
- struct channel_data *channel, unsigned int cmd, unsigned long arg)
+ struct channel_data *channel, unsigned int cmd,
+ unsigned long arg)
{
void __user *argp = (void __user *)arg;
+
switch (cmd) {
case COSAIORSET: /* Reset the device */
if (!capable(CAP_NET_ADMIN))
@@ -1143,7 +1140,7 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
case COSAIODOWNLD: /* Download the firmware */
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
-
+
return cosa_download(cosa, argp);
case COSAIORMEM:
if (!capable(CAP_SYS_RAWIO))
@@ -1176,6 +1173,7 @@ static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int rv;
struct channel_data *chan = dev_to_chan(dev);
+
rv = cosa_ioctl_common(chan->cosa, chan, cmd,
(unsigned long)ifr->ifr_data);
if (rv != -ENOIOCTLCMD)
@@ -1184,7 +1182,7 @@ static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
struct channel_data *channel = file->private_data;
struct cosa_data *cosa;
@@ -1197,11 +1195,9 @@ static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
return ret;
}
-
/*---------- HW layer interface ---------- */
-/*
- * The higher layer can bind itself to the HW layer by setting the callbacks
+/* The higher layer can bind itself to the HW layer by setting the callbacks
* in the channel_data structure and by using these routines.
*/
static void cosa_enable_rx(struct channel_data *chan)
@@ -1220,8 +1216,7 @@ static void cosa_disable_rx(struct channel_data *chan)
put_driver_status(cosa);
}
-/*
- * FIXME: This routine probably should check for cosa_start_tx() called when
+/* FIXME: This routine probably should check for cosa_start_tx() called when
* the previous transmit is still unfinished. In this case the non-zero
* return value should indicate to the caller that the queuing(sp?) up
* the transmit has failed.
@@ -1235,7 +1230,7 @@ static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
pr_info("cosa%dc%d: starting tx(0x%x)",
chan->cosa->num, chan->num, len);
- for (i=0; i<len; i++)
+ for (i = 0; i < len; i++)
pr_cont(" %02x", buf[i]&0xff);
pr_cont("\n");
#endif
@@ -1262,10 +1257,10 @@ static void put_driver_status(struct cosa_data *cosa)
status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
| (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
- &DRIVER_TXMAP_MASK : 0);
+ | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
+ & DRIVER_TXMAP_MASK : 0);
if (!cosa->rxtx) {
- if (cosa->rxbitmap|cosa->txbitmap) {
+ if (cosa->rxbitmap | cosa->txbitmap) {
if (!cosa->enabled) {
cosa_putstatus(cosa, SR_RX_INT_ENA);
#ifdef DEBUG_IO
@@ -1294,10 +1289,10 @@ static void put_driver_status_nolock(struct cosa_data *cosa)
status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
| (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
- &DRIVER_TXMAP_MASK : 0);
+ | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
+ & DRIVER_TXMAP_MASK : 0);
- if (cosa->rxbitmap|cosa->txbitmap) {
+ if (cosa->rxbitmap | cosa->txbitmap) {
cosa_putstatus(cosa, SR_RX_INT_ENA);
#ifdef DEBUG_IO
debug_status_out(cosa, SR_RX_INT_ENA);
@@ -1316,8 +1311,7 @@ static void put_driver_status_nolock(struct cosa_data *cosa)
#endif
}
-/*
- * The "kickme" function: When the DMA times out, this is called to
+/* The "kickme" function: When the DMA times out, this is called to
* clean up the driver status.
* FIXME: Preliminary support, the interface is probably wrong.
*/
@@ -1344,7 +1338,7 @@ static void cosa_kick(struct cosa_data *cosa)
udelay(100);
cosa_putstatus(cosa, 0);
udelay(100);
- (void) cosa_getdata8(cosa);
+ (void)cosa_getdata8(cosa);
udelay(100);
cosa_putdata8(cosa, 0);
udelay(100);
@@ -1352,8 +1346,7 @@ static void cosa_kick(struct cosa_data *cosa)
spin_unlock_irqrestore(&cosa->lock, flags);
}
-/*
- * Check if the whole buffer is DMA-able. It means it is below the 16M of
+/* Check if the whole buffer is DMA-able. It means it is below the 16M of
* physical memory and doesn't span the 64k boundary. For now it seems
* SKB's never do this, but we'll check this anyway.
*/
@@ -1361,9 +1354,10 @@ static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
{
static int count;
unsigned long b = (unsigned long)buf;
- if (b+len >= MAX_DMA_ADDRESS)
+
+ if (b + len >= MAX_DMA_ADDRESS)
return 0;
- if ((b^ (b+len)) & 0x10000) {
+ if ((b ^ (b + len)) & 0x10000) {
if (count++ < 5)
pr_info("%s: packet spanning a 64k boundary\n",
chan->name);
@@ -1372,11 +1366,9 @@ static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
return 1;
}
-
/* ---------- The SRP/COSA ROM monitor functions ---------- */
-/*
- * Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
+/* Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
* drivers need to say 4-digit hex number meaning start address of the microcode
* separated by a single space. Monitor replies by saying " =". Now driver
* has to write 4-digit hex number meaning the last byte address ended
@@ -1387,18 +1379,27 @@ static int download(struct cosa_data *cosa, const char __user *microcode, int le
{
int i;
- if (put_wait_data(cosa, 'w') == -1) return -1;
+ if (put_wait_data(cosa, 'w') == -1)
+ return -1;
if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
- if (get_wait_data(cosa) != '=') return -3;
-
- if (puthexnumber(cosa, address) < 0) return -4;
- if (put_wait_data(cosa, ' ') == -1) return -10;
- if (get_wait_data(cosa) != ' ') return -11;
- if (get_wait_data(cosa) != '=') return -12;
-
- if (puthexnumber(cosa, address+length-1) < 0) return -13;
- if (put_wait_data(cosa, ' ') == -1) return -18;
- if (get_wait_data(cosa) != ' ') return -19;
+ if (get_wait_data(cosa) != '=')
+ return -3;
+
+ if (puthexnumber(cosa, address) < 0)
+ return -4;
+ if (put_wait_data(cosa, ' ') == -1)
+ return -10;
+ if (get_wait_data(cosa) != ' ')
+ return -11;
+ if (get_wait_data(cosa) != '=')
+ return -12;
+
+ if (puthexnumber(cosa, address + length - 1) < 0)
+ return -13;
+ if (put_wait_data(cosa, ' ') == -1)
+ return -18;
+ if (get_wait_data(cosa) != ' ')
+ return -19;
while (length--) {
char c;
@@ -1413,43 +1414,53 @@ static int download(struct cosa_data *cosa, const char __user *microcode, int le
microcode++;
}
- if (get_wait_data(cosa) != '\r') return -21;
- if (get_wait_data(cosa) != '\n') return -22;
- if (get_wait_data(cosa) != '.') return -23;
+ if (get_wait_data(cosa) != '\r')
+ return -21;
+ if (get_wait_data(cosa) != '\n')
+ return -22;
+ if (get_wait_data(cosa) != '.')
+ return -23;
#if 0
printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
#endif
return 0;
}
-
-/*
- * Starting microcode is done via the "g" command of the SRP monitor.
+/* Starting microcode is done via the "g" command of the SRP monitor.
* The chat should be the following: "g" "g=" "<addr><CR>"
* "<CR><CR><LF><CR><LF>".
*/
static int startmicrocode(struct cosa_data *cosa, int address)
{
- if (put_wait_data(cosa, 'g') == -1) return -1;
- if (get_wait_data(cosa) != 'g') return -2;
- if (get_wait_data(cosa) != '=') return -3;
-
- if (puthexnumber(cosa, address) < 0) return -4;
- if (put_wait_data(cosa, '\r') == -1) return -5;
-
- if (get_wait_data(cosa) != '\r') return -6;
- if (get_wait_data(cosa) != '\r') return -7;
- if (get_wait_data(cosa) != '\n') return -8;
- if (get_wait_data(cosa) != '\r') return -9;
- if (get_wait_data(cosa) != '\n') return -10;
+ if (put_wait_data(cosa, 'g') == -1)
+ return -1;
+ if (get_wait_data(cosa) != 'g')
+ return -2;
+ if (get_wait_data(cosa) != '=')
+ return -3;
+
+ if (puthexnumber(cosa, address) < 0)
+ return -4;
+ if (put_wait_data(cosa, '\r') == -1)
+ return -5;
+
+ if (get_wait_data(cosa) != '\r')
+ return -6;
+ if (get_wait_data(cosa) != '\r')
+ return -7;
+ if (get_wait_data(cosa) != '\n')
+ return -8;
+ if (get_wait_data(cosa) != '\r')
+ return -9;
+ if (get_wait_data(cosa) != '\n')
+ return -10;
#if 0
printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
#endif
return 0;
}
-/*
- * Reading memory is done via the "r" command of the SRP monitor.
+/* Reading memory is done via the "r" command of the SRP monitor.
* The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
* Then driver can read the data and the conversation is finished
* by SRP monitor sending "<CR><LF>." (dot at the end).
@@ -1459,27 +1470,39 @@ static int startmicrocode(struct cosa_data *cosa, int address)
*/
static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
{
- if (put_wait_data(cosa, 'r') == -1) return -1;
- if ((get_wait_data(cosa)) != 'r') return -2;
- if ((get_wait_data(cosa)) != '=') return -3;
-
- if (puthexnumber(cosa, address) < 0) return -4;
- if (put_wait_data(cosa, ' ') == -1) return -5;
- if (get_wait_data(cosa) != ' ') return -6;
- if (get_wait_data(cosa) != '=') return -7;
-
- if (puthexnumber(cosa, address+length-1) < 0) return -8;
- if (put_wait_data(cosa, ' ') == -1) return -9;
- if (get_wait_data(cosa) != ' ') return -10;
+ if (put_wait_data(cosa, 'r') == -1)
+ return -1;
+ if ((get_wait_data(cosa)) != 'r')
+ return -2;
+ if ((get_wait_data(cosa)) != '=')
+ return -3;
+
+ if (puthexnumber(cosa, address) < 0)
+ return -4;
+ if (put_wait_data(cosa, ' ') == -1)
+ return -5;
+ if (get_wait_data(cosa) != ' ')
+ return -6;
+ if (get_wait_data(cosa) != '=')
+ return -7;
+
+ if (puthexnumber(cosa, address + length - 1) < 0)
+ return -8;
+ if (put_wait_data(cosa, ' ') == -1)
+ return -9;
+ if (get_wait_data(cosa) != ' ')
+ return -10;
while (length--) {
char c;
int i;
- if ((i=get_wait_data(cosa)) == -1) {
+
+ i = get_wait_data(cosa);
+ if (i == -1) {
pr_info("0x%04x bytes remaining\n", length);
return -11;
}
- c=i;
+ c = i;
#if 1
if (put_user(c, microcode))
return -23; /* ??? */
@@ -1489,22 +1512,24 @@ static int readmem(struct cosa_data *cosa, char __user *microcode, int length, i
microcode++;
}
- if (get_wait_data(cosa) != '\r') return -21;
- if (get_wait_data(cosa) != '\n') return -22;
- if (get_wait_data(cosa) != '.') return -23;
+ if (get_wait_data(cosa) != '\r')
+ return -21;
+ if (get_wait_data(cosa) != '\n')
+ return -22;
+ if (get_wait_data(cosa) != '.')
+ return -23;
#if 0
printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
#endif
return 0;
}
-/*
- * This function resets the device and reads the initial prompt
+/* This function resets the device and reads the initial prompt
* of the device's ROM monitor.
*/
static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
{
- int i=0, id=0, prev=0, curr=0;
+ int i = 0, id = 0, prev = 0, curr = 0;
/* Reset the card ... */
cosa_putstatus(cosa, 0);
@@ -1514,18 +1539,18 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
/* Disable all IRQs from the card */
cosa_putstatus(cosa, 0);
- /*
- * Try to read the ID string. The card then prints out the
+ /* Try to read the ID string. The card then prints out the
* identification string ended by the "\n\x2e".
*
* The following loop is indexed through i (instead of id)
* to avoid looping forever when for any reason
* the port returns '\r', '\n' or '\x2e' permanently.
*/
- for (i=0; i<COSA_MAX_ID_STRING-1; i++, prev=curr) {
- if ((curr = get_wait_data(cosa)) == -1) {
+ for (i = 0; i < COSA_MAX_ID_STRING - 1; i++, prev = curr) {
+ curr = get_wait_data(cosa);
+ if (curr == -1)
return -1;
- }
+
curr &= 0xff;
if (curr != '\r' && curr != '\n' && curr != 0x2e)
idstring[id++] = curr;
@@ -1537,11 +1562,9 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
return id;
}
-
/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
-/*
- * This routine gets the data byte from the card waiting for the SR_RX_RDY
+/* This routine gets the data byte from the card waiting for the SR_RX_RDY
* bit to be set in a loop. It should be used in the exceptional cases
* only (for example when resetting the card or downloading the firmware.
*/
@@ -1553,10 +1576,11 @@ static int get_wait_data(struct cosa_data *cosa)
/* read data and return them */
if (cosa_getstatus(cosa) & SR_RX_RDY) {
short r;
+
r = cosa_getdata8(cosa);
#if 0
pr_info("get_wait_data returning after %d retries\n",
- 999-retries);
+ 999 - retries);
#endif
return r;
}
@@ -1568,20 +1592,20 @@ static int get_wait_data(struct cosa_data *cosa)
return -1;
}
-/*
- * This routine puts the data byte to the card waiting for the SR_TX_RDY
+/* This routine puts the data byte to the card waiting for the SR_TX_RDY
* bit to be set in a loop. It should be used in the exceptional cases
* only (for example when resetting the card or downloading the firmware).
*/
static int put_wait_data(struct cosa_data *cosa, int data)
{
int retries = 1000;
+
while (--retries) {
/* read data and return them */
if (cosa_getstatus(cosa) & SR_TX_RDY) {
cosa_putdata8(cosa, data);
#if 0
- pr_info("Putdata: %d retries\n", 999-retries);
+ pr_info("Putdata: %d retries\n", 999 - retries);
#endif
return 0;
}
@@ -1594,9 +1618,8 @@ static int put_wait_data(struct cosa_data *cosa, int data)
cosa->num, cosa_getstatus(cosa));
return -1;
}
-
-/*
- * The following routine puts the hexadecimal number into the SRP monitor
+
+/* The following routine puts the hexadecimal number into the SRP monitor
* and verifies the proper echo of the sent bytes. Returns 0 on success,
* negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
* (-2,-4,-6,-8) means that reading echo failed.
@@ -1608,26 +1631,24 @@ static int puthexnumber(struct cosa_data *cosa, int number)
/* Well, I should probably replace this by something faster. */
sprintf(temp, "%04X", number);
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
if (put_wait_data(cosa, temp[i]) == -1) {
pr_notice("cosa%d: puthexnumber failed to write byte %d\n",
cosa->num, i);
- return -1-2*i;
+ return -1 - 2 * i;
}
if (get_wait_data(cosa) != temp[i]) {
pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n",
cosa->num, i);
- return -2-2*i;
+ return -2 - 2 * i;
}
}
return 0;
}
-
/* ---------- Interrupt routines ---------- */
-/*
- * There are three types of interrupt:
+/* There are three types of interrupt:
* At the beginning of transmit - this handled is in tx_interrupt(),
* at the beginning of receive - it is in rx_interrupt() and
* at the end of transmit/receive - it is the eot_interrupt() function.
@@ -1635,14 +1656,13 @@ static int puthexnumber(struct cosa_data *cosa, int number)
* COSA status byte. I have moved the rx/tx/eot interrupt handling into
* separate functions to make it more readable. These functions are inline,
* so there should be no overhead of function call.
- *
+ *
* In the COSA bus-master mode, we need to tell the card the address of a
* buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
* It's time to use the bottom half :-(
*/
-/*
- * Transmit interrupt routine - called when COSA is willing to obtain
+/* Transmit interrupt routine - called when COSA is willing to obtain
* data from the OS. The most tricky part of the routine is selection
* of channel we (OS) want to send packet for. For SRP we should probably
* use the round-robin approach. The newer COSA firmwares have a simple
@@ -1667,7 +1687,8 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
set_bit(TXBIT, &cosa->rxtx);
if (!test_bit(IRQBIT, &cosa->rxtx)) {
/* flow control, see the comment above */
- int i=0;
+ int i = 0;
+
if (!cosa->txbitmap) {
pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n",
cosa->name);
@@ -1681,9 +1702,10 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
i++;
if (cosa->txchan >= cosa->nchannels)
cosa->txchan = 0;
- if (!(cosa->txbitmap & (1<<cosa->txchan)))
+ if (!(cosa->txbitmap & (1 << cosa->txchan)))
continue;
- if (~status & (1 << (cosa->txchan+DRIVER_TXMAP_SHIFT)))
+ if (~status &
+ (1 << (cosa->txchan + DRIVER_TXMAP_SHIFT)))
break;
/* in second pass, accept first ready-to-TX channel */
if (i > cosa->nchannels) {
@@ -1698,12 +1720,13 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
}
cosa->txsize = cosa->chan[cosa->txchan].txsize;
- if (cosa_dma_able(cosa->chan+cosa->txchan,
- cosa->chan[cosa->txchan].txbuf, cosa->txsize)) {
+ if (cosa_dma_able(cosa->chan + cosa->txchan,
+ cosa->chan[cosa->txchan].txbuf,
+ cosa->txsize)) {
cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
} else {
memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
- cosa->txsize);
+ cosa->txsize);
cosa->txbuf = cosa->bouncebuf;
}
}
@@ -1711,12 +1734,12 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
if (is_8bit(cosa)) {
if (!test_bit(IRQBIT, &cosa->rxtx)) {
cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0)|
+ cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0) |
((cosa->txsize >> 8) & 0x1f));
#ifdef DEBUG_IO
debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0)|
- ((cosa->txsize >> 8) & 0x1f));
+ debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0) |
+ ((cosa->txsize >> 8) & 0x1f));
debug_data_in(cosa, cosa_getdata8(cosa));
#else
cosa_getdata8(cosa);
@@ -1727,20 +1750,20 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
} else {
clear_bit(IRQBIT, &cosa->rxtx);
cosa_putstatus(cosa, 0);
- cosa_putdata8(cosa, cosa->txsize&0xff);
+ cosa_putdata8(cosa, cosa->txsize & 0xff);
#ifdef DEBUG_IO
debug_status_out(cosa, 0);
- debug_data_out(cosa, cosa->txsize&0xff);
+ debug_data_out(cosa, cosa->txsize & 0xff);
#endif
}
} else {
cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata16(cosa, ((cosa->txchan<<13) & 0xe000)
+ cosa_putdata16(cosa, ((cosa->txchan << 13) & 0xe000)
| (cosa->txsize & 0x1fff));
#ifdef DEBUG_IO
debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan<<13) & 0xe000)
- | (cosa->txsize & 0x1fff));
+ debug_data_out(cosa, ((cosa->txchan << 13) & 0xe000) |
+ (cosa->txsize & 0x1fff));
debug_data_in(cosa, cosa_getdata8(cosa));
debug_status_out(cosa, 0);
#else
@@ -1751,25 +1774,28 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
if (cosa->busmaster) {
unsigned long addr = virt_to_bus(cosa->txbuf);
- int count=0;
+ int count = 0;
+
pr_info("busmaster IRQ\n");
- while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
+ while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
count++;
udelay(10);
- if (count > 1000) break;
+ if (count > 1000)
+ break;
}
pr_info("status %x\n", cosa_getstatus(cosa));
pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, (addr >> 16)&0xffff);
+ cosa_putdata16(cosa, (addr >> 16) & 0xffff);
count = 0;
- while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
+ while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
count++;
- if (count > 1000) break;
+ if (count > 1000)
+ break;
udelay(10);
}
pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, addr &0xffff);
+ cosa_putdata16(cosa, addr & 0xffff);
flags1 = claim_dma_lock();
set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
enable_dma(cosa->dma);
@@ -1785,9 +1811,9 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
enable_dma(cosa->dma);
release_dma_lock(flags1);
}
- cosa_putstatus(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
+ cosa_putstatus(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
+ debug_status_out(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
#endif
spin_unlock_irqrestore(&cosa->lock, flags);
}
@@ -1806,7 +1832,7 @@ static inline void rx_interrupt(struct cosa_data *cosa, int status)
if (!test_bit(IRQBIT, &cosa->rxtx)) {
set_bit(IRQBIT, &cosa->rxtx);
put_driver_status_nolock(cosa);
- cosa->rxsize = cosa_getdata8(cosa) <<8;
+ cosa->rxsize = cosa_getdata8(cosa) << 8;
#ifdef DEBUG_IO
debug_data_in(cosa, cosa->rxsize >> 8);
#endif
@@ -1859,20 +1885,20 @@ reject: /* Reject the packet */
disable_dma(cosa->dma);
clear_dma_ff(cosa->dma);
set_dma_mode(cosa->dma, DMA_MODE_READ);
- if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff)) {
+ if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff))
set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
- } else {
+ else
set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
- }
- set_dma_count(cosa->dma, (cosa->rxsize&0x1fff));
+
+ set_dma_count(cosa->dma, (cosa->rxsize & 0x1fff));
enable_dma(cosa->dma);
release_dma_lock(flags);
spin_lock_irqsave(&cosa->lock, flags);
- cosa_putstatus(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
+ cosa_putstatus(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
if (!is_8bit(cosa) && (status & SR_TX_RDY))
cosa_putdata8(cosa, DRIVER_RX_READY);
#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
+ debug_status_out(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
if (!is_8bit(cosa) && (status & SR_TX_RDY))
debug_data_cmd(cosa, DRIVER_RX_READY);
#endif
@@ -1882,13 +1908,15 @@ reject: /* Reject the packet */
static inline void eot_interrupt(struct cosa_data *cosa, int status)
{
unsigned long flags, flags1;
+
spin_lock_irqsave(&cosa->lock, flags);
flags1 = claim_dma_lock();
disable_dma(cosa->dma);
clear_dma_ff(cosa->dma);
release_dma_lock(flags1);
if (test_bit(TXBIT, &cosa->rxtx)) {
- struct channel_data *chan = cosa->chan+cosa->txchan;
+ struct channel_data *chan = cosa->chan + cosa->txchan;
+
if (chan->tx_done)
if (chan->tx_done(chan, cosa->txsize))
clear_bit(chan->num, &cosa->txbitmap);
@@ -1896,9 +1924,10 @@ static inline void eot_interrupt(struct cosa_data *cosa, int status)
#ifdef DEBUG_DATA
{
int i;
+
pr_info("cosa%dc%d: done rx(0x%x)",
cosa->num, cosa->rxchan->num, cosa->rxsize);
- for (i=0; i<cosa->rxsize; i++)
+ for (i = 0; i < cosa->rxsize; i++)
pr_cont(" %02x", cosa->rxbuf[i]&0xff);
pr_cont("\n");
}
@@ -1914,8 +1943,7 @@ static inline void eot_interrupt(struct cosa_data *cosa, int status)
} else {
pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num);
}
- /*
- * Clear the RXBIT, TXBIT and IRQBIT (the latest should be
+ /* Clear the RXBIT, TXBIT and IRQBIT (the latest should be
* cleared anyway). We should do it as soon as possible
* so that we can tell the COSA we are done and to give it a time
* for recovery.
@@ -1968,10 +1996,8 @@ again:
return IRQ_HANDLED;
}
-
/* ---------- I/O debugging routines ---------- */
-/*
- * These routines can be used to monitor COSA/SRP I/O and to printk()
+/* These routines can be used to monitor COSA/SRP I/O and to printk()
* the data being transferred on the data and status I/O port in a
* readable way.
*/
@@ -1980,6 +2006,7 @@ again:
static void debug_status_in(struct cosa_data *cosa, int status)
{
char *s;
+
switch (status & SR_CMD_FROM_SRP_MASK) {
case SR_UP_REQUEST:
s = "RX_REQ";
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 5de71e44fc5a..b3466e084e84 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * FarSync WAN driver for Linux (2.6.x kernel version)
+/* FarSync WAN driver for Linux (2.6.x kernel version)
*
* Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
*
@@ -30,8 +29,7 @@
#include "farsync.h"
-/*
- * Module info
+/* Module info
*/
MODULE_AUTHOR("R.J.Dunlop <bob.dunlop@farsite.co.uk>");
MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd.");
@@ -49,20 +47,23 @@ MODULE_LICENSE("GPL");
/* Default parameters for the link
*/
#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
- * useful */
+ * useful
+ */
#define FST_TXQ_DEPTH 16 /* This one is for the buffering
* of frames on the way down to the card
* so that we can keep the card busy
* and maximise throughput
*/
#define FST_HIGH_WATER_MARK 12 /* Point at which we flow control
- * network layer */
+ * network layer
+ */
#define FST_LOW_WATER_MARK 8 /* Point at which we remove flow
- * control from network layer */
+ * control from network layer
+ */
#define FST_MAX_MTU 8000 /* Huge but possible */
#define FST_DEF_MTU 1500 /* Common sane value */
-#define FST_TX_TIMEOUT (2*HZ)
+#define FST_TX_TIMEOUT (2 * HZ)
#ifdef ARPHRD_RAWHDLC
#define ARPHRD_MYTYPE ARPHRD_RAWHDLC /* Raw frames */
@@ -70,13 +71,12 @@ MODULE_LICENSE("GPL");
#define ARPHRD_MYTYPE ARPHRD_HDLC /* Cisco-HDLC (keepalives etc) */
#endif
-/*
- * Modules parameters and associated variables
+/* Modules parameters and associated variables
*/
static int fst_txq_low = FST_LOW_WATER_MARK;
static int fst_txq_high = FST_HIGH_WATER_MARK;
static int fst_max_reads = 7;
-static int fst_excluded_cards = 0;
+static int fst_excluded_cards;
static int fst_excluded_list[FST_MAX_CARDS];
module_param(fst_txq_low, int, 0);
@@ -105,9 +105,11 @@ module_param_array(fst_excluded_list, int, NULL, 0);
#define FST_MEMSIZE 0x100000 /* Size of card memory (1Mb) */
#define SMC_BASE 0x00002000L /* Base offset of the shared memory window main
- * configuration structure */
+ * configuration structure
+ */
#define BFM_BASE 0x00010000L /* Base offset of the shared memory window DMA
- * buffers */
+ * buffers
+ */
#define LEN_TX_BUFFER 8192 /* Size of packet buffers */
#define LEN_RX_BUFFER 8192
@@ -377,8 +379,7 @@ struct fst_shared {
#define INTCSR_9054 0x68 /* Interrupt control/status register */
/* 9054 DMA Registers */
-/*
- * Note that we will be using DMA Channel 0 for copying rx data
+/* Note that we will be using DMA Channel 0 for copying rx data
* and Channel 1 for copying tx data
*/
#define DMAMODE0 0x80
@@ -421,7 +422,7 @@ struct buf_window {
/* Per port (line or channel) information
*/
struct fst_port_info {
- struct net_device *dev; /* Device struct - must be first */
+ struct net_device *dev; /* Device struct - must be first */
struct fst_card_info *card; /* Card we're associated with */
int index; /* Port index on the card */
int hwif; /* Line hardware (lineInterface copy) */
@@ -431,8 +432,7 @@ struct fst_port_info {
int txpos; /* Next Tx buffer to use */
int txipos; /* Next Tx buffer to check for free */
int start; /* Indication of start/stop to network */
- /*
- * A sixteen entry transmit queue
+ /* A sixteen entry transmit queue
*/
int txqs; /* index to get next buffer to tx */
int txqe; /* index to queue next packet */
@@ -479,9 +479,7 @@ struct fst_card_info {
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
#define port_to_dev(P) ((P)->dev)
-
-/*
- * Shared memory window access macros
+/* Shared memory window access macros
*
* We have a nice memory based structure above, which could be directly
* mapped on i386 but might not work on other architectures unless we use
@@ -491,16 +489,15 @@ struct fst_card_info {
*/
#define WIN_OFFSET(X) ((long)&(((struct fst_shared *)SMC_BASE)->X))
-#define FST_RDB(C,E) readb ((C)->mem + WIN_OFFSET(E))
-#define FST_RDW(C,E) readw ((C)->mem + WIN_OFFSET(E))
-#define FST_RDL(C,E) readl ((C)->mem + WIN_OFFSET(E))
+#define FST_RDB(C, E) (readb((C)->mem + WIN_OFFSET(E)))
+#define FST_RDW(C, E) (readw((C)->mem + WIN_OFFSET(E)))
+#define FST_RDL(C, E) (readl((C)->mem + WIN_OFFSET(E)))
-#define FST_WRB(C,E,B) writeb ((B), (C)->mem + WIN_OFFSET(E))
-#define FST_WRW(C,E,W) writew ((W), (C)->mem + WIN_OFFSET(E))
-#define FST_WRL(C,E,L) writel ((L), (C)->mem + WIN_OFFSET(E))
+#define FST_WRB(C, E, B) (writeb((B), (C)->mem + WIN_OFFSET(E)))
+#define FST_WRW(C, E, W) (writew((W), (C)->mem + WIN_OFFSET(E)))
+#define FST_WRL(C, E, L) (writel((L), (C)->mem + WIN_OFFSET(E)))
-/*
- * Debug support
+/* Debug support
*/
#if FST_DEBUG
@@ -524,43 +521,41 @@ do { \
} while (0)
#endif
-/*
- * PCI ID lookup table
+/* PCI ID lookup table
*/
static const struct pci_device_id fst_pci_dev_id[] = {
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T4P},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T1U},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2U},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T4U},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
{0,} /* End */
};
MODULE_DEVICE_TABLE(pci, fst_pci_dev_id);
-/*
- * Device Driver Work Queues
+/* Device Driver Work Queues
*
- * So that we don't spend too much time processing events in the
- * Interrupt Service routine, we will declare a work queue per Card
+ * So that we don't spend too much time processing events in the
+ * Interrupt Service routine, we will declare a work queue per Card
* and make the ISR schedule a task in the queue for later execution.
* In the 2.4 Kernel we used to use the immediate queue for BH's
- * Now that they are gone, tasklets seem to be much better than work
+ * Now that they are gone, tasklets seem to be much better than work
* queues.
*/
@@ -578,18 +573,16 @@ static u64 fst_work_txq;
static u64 fst_work_intq;
static void
-fst_q_work_item(u64 * queue, int card_index)
+fst_q_work_item(u64 *queue, int card_index)
{
unsigned long flags;
u64 mask;
- /*
- * Grab the queue exclusively
+ /* Grab the queue exclusively
*/
spin_lock_irqsave(&fst_work_q_lock, flags);
- /*
- * Making an entry in the queue is simply a matter of setting
+ /* Making an entry in the queue is simply a matter of setting
* a bit for the card indicating that there is work to do in the
* bottom half for the card. Note the limitation of 64 cards.
* That ought to be enough
@@ -606,8 +599,7 @@ fst_process_tx_work_q(struct tasklet_struct *unused)
u64 work_txq;
int i;
- /*
- * Grab the queue exclusively
+ /* Grab the queue exclusively
*/
dbg(DBG_TX, "fst_process_tx_work_q\n");
spin_lock_irqsave(&fst_work_q_lock, flags);
@@ -615,12 +607,11 @@ fst_process_tx_work_q(struct tasklet_struct *unused)
fst_work_txq = 0;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
- /*
- * Call the bottom half for each card with work waiting
+ /* Call the bottom half for each card with work waiting
*/
for (i = 0; i < FST_MAX_CARDS; i++) {
if (work_txq & 0x01) {
- if (fst_card_array[i] != NULL) {
+ if (fst_card_array[i]) {
dbg(DBG_TX, "Calling tx bh for card %d\n", i);
do_bottom_half_tx(fst_card_array[i]);
}
@@ -636,8 +627,7 @@ fst_process_int_work_q(struct tasklet_struct *unused)
u64 work_intq;
int i;
- /*
- * Grab the queue exclusively
+ /* Grab the queue exclusively
*/
dbg(DBG_INTR, "fst_process_int_work_q\n");
spin_lock_irqsave(&fst_work_q_lock, flags);
@@ -645,12 +635,11 @@ fst_process_int_work_q(struct tasklet_struct *unused)
fst_work_intq = 0;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
- /*
- * Call the bottom half for each card with work waiting
+ /* Call the bottom half for each card with work waiting
*/
for (i = 0; i < FST_MAX_CARDS; i++) {
if (work_intq & 0x01) {
- if (fst_card_array[i] != NULL) {
+ if (fst_card_array[i]) {
dbg(DBG_INTR,
"Calling rx & tx bh for card %d\n", i);
do_bottom_half_rx(fst_card_array[i]);
@@ -683,19 +672,16 @@ fst_cpureset(struct fst_card_info *card)
dbg(DBG_ASS,
"Error in reading interrupt line register\n");
}
- /*
- * Assert PLX software reset and Am186 hardware reset
+ /* Assert PLX software reset and Am186 hardware reset
* and then deassert the PLX software reset but 186 still in reset
*/
outw(0x440f, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
- /*
- * We are delaying here to allow the 9054 to reset itself
+ /* We are delaying here to allow the 9054 to reset itself
*/
usleep_range(10, 20);
outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
- /*
- * We are delaying here to allow the 9054 to reload its eeprom
+ /* We are delaying here to allow the 9054 to reload its eeprom
*/
usleep_range(10, 20);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
@@ -720,19 +706,17 @@ static inline void
fst_cpurelease(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
- /*
- * Force posted writes to complete
+ /* Force posted writes to complete
*/
- (void) readb(card->mem);
+ (void)readb(card->mem);
- /*
- * Release LRESET DO = 1
+ /* Release LRESET DO = 1
* Then release Local Hold, DO = 1
*/
outw(0x040e, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
} else {
- (void) readb(card->ctlmem);
+ (void)readb(card->ctlmem);
}
}
@@ -742,7 +726,7 @@ static inline void
fst_clear_intr(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
- (void) readb(card->ctlmem);
+ (void)readb(card->ctlmem);
} else {
/* Poke the appropriate PLX chip register (same as enabling interrupts)
*/
@@ -755,11 +739,10 @@ fst_clear_intr(struct fst_card_info *card)
static inline void
fst_enable_intr(struct fst_card_info *card)
{
- if (card->family == FST_FAMILY_TXU) {
+ if (card->family == FST_FAMILY_TXU)
outl(0x0f0c0900, card->pci_conf + INTCSR_9054);
- } else {
+ else
outw(0x0543, card->pci_conf + INTCSR_9052);
- }
}
/* Disable card interrupts
@@ -767,11 +750,10 @@ fst_enable_intr(struct fst_card_info *card)
static inline void
fst_disable_intr(struct fst_card_info *card)
{
- if (card->family == FST_FAMILY_TXU) {
+ if (card->family == FST_FAMILY_TXU)
outl(0x00000000, card->pci_conf + INTCSR_9054);
- } else {
+ else
outw(0x0000, card->pci_conf + INTCSR_9052);
- }
}
/* Process the result of trying to pass a received frame up the stack
@@ -782,8 +764,7 @@ fst_process_rx_status(int rx_status, char *name)
switch (rx_status) {
case NET_RX_SUCCESS:
{
- /*
- * Nothing to do here
+ /* Nothing to do here
*/
break;
}
@@ -800,11 +781,10 @@ fst_process_rx_status(int rx_status, char *name)
static inline void
fst_init_dma(struct fst_card_info *card)
{
- /*
- * This is only required for the PLX 9054
+ /* This is only required for the PLX 9054
*/
if (card->family == FST_FAMILY_TXU) {
- pci_set_master(card->device);
+ pci_set_master(card->device);
outl(0x00020441, card->pci_conf + DMAMODE0);
outl(0x00020441, card->pci_conf + DMAMODE1);
outl(0x0, card->pci_conf + DMATHR);
@@ -819,8 +799,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
{
struct net_device *dev = port_to_dev(port);
- /*
- * Everything is now set, just tell the card to go
+ /* Everything is now set, just tell the card to go
*/
dbg(DBG_TX, "fst_tx_dma_complete\n");
FST_WRB(card, txDescrRing[port->index][txpos].bits,
@@ -830,8 +809,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
netif_trans_update(dev);
}
-/*
- * Mark it for our own raw sockets interface
+/* Mark it for our own raw sockets interface
*/
static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
{
@@ -874,55 +852,47 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
dev->stats.rx_dropped++;
}
-/*
- * Receive a frame through the DMA
+/* Receive a frame through the DMA
*/
static inline void
fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
- /*
- * This routine will setup the DMA and start it
+ /* This routine will setup the DMA and start it
*/
dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len);
- if (card->dmarx_in_progress) {
+ if (card->dmarx_in_progress)
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
- }
outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */
outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
- /*
- * We use the dmarx_in_progress flag to flag the channel as busy
+ /* We use the dmarx_in_progress flag to flag the channel as busy
*/
card->dmarx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR0); /* Start the transfer */
}
-/*
- * Send a frame through the DMA
+/* Send a frame through the DMA
*/
static inline void
fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
- /*
- * This routine will setup the DMA and start it.
+ /* This routine will setup the DMA and start it.
*/
dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len);
- if (card->dmatx_in_progress) {
+ if (card->dmatx_in_progress)
dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
- }
outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */
outl(mem, card->pci_conf + DMALADR1); /* to here */
outl(len, card->pci_conf + DMASIZ1); /* for this length */
outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
- /*
- * We use the dmatx_in_progress to flag the channel as busy
+ /* We use the dmatx_in_progress to flag the channel as busy
*/
card->dmatx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR1); /* Start the transfer */
@@ -958,12 +928,11 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
mbval = FST_RDW(card, portMailbox[port->index][0]);
}
- if (safety > 0) {
+ if (safety > 0)
dbg(DBG_CMD, "Mailbox clear after %d jiffies\n", safety);
- }
- if (mbval == NAK) {
+
+ if (mbval == NAK)
dbg(DBG_CMD, "issue_cmd: previous command was NAK'd\n");
- }
FST_WRW(card, portMailbox[port->index][0], cmd);
@@ -998,8 +967,7 @@ fst_op_lower(struct fst_port_info *port, unsigned int outputs)
fst_issue_cmd(port, SETV24O);
}
-/*
- * Setup port Rx buffers
+/* Setup port Rx buffers
*/
static void
fst_rx_config(struct fst_port_info *port)
@@ -1016,8 +984,8 @@ fst_rx_config(struct fst_port_info *port)
for (i = 0; i < NUM_RX_BUFFER; i++) {
offset = BUF_OFFSET(rxBuffer[pi][i][0]);
- FST_WRW(card, rxDescrRing[pi][i].ladr, (u16) offset);
- FST_WRB(card, rxDescrRing[pi][i].hadr, (u8) (offset >> 16));
+ FST_WRW(card, rxDescrRing[pi][i].ladr, (u16)offset);
+ FST_WRB(card, rxDescrRing[pi][i].hadr, (u8)(offset >> 16));
FST_WRW(card, rxDescrRing[pi][i].bcnt, cnv_bcnt(LEN_RX_BUFFER));
FST_WRW(card, rxDescrRing[pi][i].mcnt, LEN_RX_BUFFER);
FST_WRB(card, rxDescrRing[pi][i].bits, DMA_OWN);
@@ -1026,8 +994,7 @@ fst_rx_config(struct fst_port_info *port)
spin_unlock_irqrestore(&card->card_lock, flags);
}
-/*
- * Setup port Tx buffers
+/* Setup port Tx buffers
*/
static void
fst_tx_config(struct fst_port_info *port)
@@ -1044,8 +1011,8 @@ fst_tx_config(struct fst_port_info *port)
for (i = 0; i < NUM_TX_BUFFER; i++) {
offset = BUF_OFFSET(txBuffer[pi][i][0]);
- FST_WRW(card, txDescrRing[pi][i].ladr, (u16) offset);
- FST_WRB(card, txDescrRing[pi][i].hadr, (u8) (offset >> 16));
+ FST_WRW(card, txDescrRing[pi][i].ladr, (u16)offset);
+ FST_WRB(card, txDescrRing[pi][i].hadr, (u8)(offset >> 16));
FST_WRW(card, txDescrRing[pi][i].bcnt, 0);
FST_WRB(card, txDescrRing[pi][i].bits, 0);
}
@@ -1069,16 +1036,14 @@ fst_intr_te1_alarm(struct fst_card_info *card, struct fst_port_info *port)
ais = FST_RDB(card, suStatus.alarmIndicationSignal);
if (los) {
- /*
- * Lost the link
+ /* Lost the link
*/
if (netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier off\n");
netif_carrier_off(port_to_dev(port));
}
} else {
- /*
- * Link available
+ /* Link available
*/
if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier on\n");
@@ -1110,7 +1075,7 @@ fst_intr_ctlchg(struct fst_card_info *card, struct fst_port_info *port)
signals = FST_RDL(card, v24DebouncedSts[port->index]);
- if (signals & (((port->hwif == X21) || (port->hwif == X21D))
+ if (signals & ((port->hwif == X21 || port->hwif == X21D)
? IPSTS_INDICATE : IPSTS_DCD)) {
if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "DCD active\n");
@@ -1132,8 +1097,7 @@ fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port,
{
struct net_device *dev = port_to_dev(port);
- /*
- * Increment the appropriate error counter
+ /* Increment the appropriate error counter
*/
dev->stats.rx_errors++;
if (dmabits & RX_OFLO) {
@@ -1168,15 +1132,14 @@ fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
int pi;
pi = port->index;
- /*
- * Discard buffer descriptors until we see the start of the
+ /* Discard buffer descriptors until we see the start of the
* next frame. Note that for long frames this could be in
- * a subsequent interrupt.
+ * a subsequent interrupt.
*/
i = 0;
while ((dmabits & (DMA_OWN | RX_STP)) == 0) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
if (++i > NUM_RX_BUFFER) {
dbg(DBG_ASS, "intr_rx: Discarding more bufs"
" than we have\n");
@@ -1190,11 +1153,9 @@ fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
/* Discard the terminal buffer */
if (!(dmabits & DMA_OWN)) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
}
port->rxpos = rxp;
- return;
-
}
/* Rx complete interrupt
@@ -1219,17 +1180,15 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
pi, rxp);
return;
}
- if (card->dmarx_in_progress) {
+ if (card->dmarx_in_progress)
return;
- }
/* Get buffer length */
len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt);
/* Discard the CRC */
len -= 2;
if (len == 0) {
- /*
- * This seems to happen on the TE1 interface sometimes
+ /* This seems to happen on the TE1 interface sometimes
* so throw the frame away and log the event.
*/
pr_err("Frame received with 0 length. Card %d Port %d\n",
@@ -1237,7 +1196,7 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
return;
}
@@ -1254,7 +1213,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
}
/* Allocate SKB */
- if ((skb = dev_alloc_skb(len)) == NULL) {
+ skb = dev_alloc_skb(len);
+ if (!skb) {
dbg(DBG_RX, "intr_rx: can't allocate buffer\n");
dev->stats.rx_dropped++;
@@ -1262,18 +1222,17 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
return;
}
- /*
- * We know the length we need to receive, len.
+ /* We know the length we need to receive, len.
* It's not worth using the DMA for reads of less than
* FST_MIN_DMA_LEN
*/
- if ((len < FST_MIN_DMA_LEN) || (card->family == FST_FAMILY_TXP)) {
+ if (len < FST_MIN_DMA_LEN || card->family == FST_FAMILY_TXP) {
memcpy_fromio(skb_put(skb, len),
card->mem + BUF_OFFSET(rxBuffer[pi][rxp][0]),
len);
@@ -1307,12 +1266,11 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos);
}
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
}
-/*
- * The bottom halfs to the ISR
+/* The bottom half to the ISR
*
*/
@@ -1326,8 +1284,7 @@ do_bottom_half_tx(struct fst_card_info *card)
unsigned long flags;
struct net_device *dev;
- /*
- * Find a free buffer for the transmit
+ /* Find a free buffer for the transmit
* Step through each port on this card
*/
@@ -1340,39 +1297,36 @@ do_bottom_half_tx(struct fst_card_info *card)
while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
DMA_OWN) &&
!(card->dmatx_in_progress)) {
- /*
- * There doesn't seem to be a txdone event per-se
+ /* There doesn't seem to be a txdone event per-se
* We seem to have to deduce it, by checking the DMA_OWN
* bit on the next buffer we think we can use
*/
spin_lock_irqsave(&card->card_lock, flags);
- if ((txq_length = port->txqe - port->txqs) < 0) {
- /*
- * This is the case where one has wrapped and the
+ txq_length = port->txqe - port->txqs;
+ if (txq_length < 0) {
+ /* This is the case where one has wrapped and the
* maths gives us a negative number
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags);
if (txq_length > 0) {
- /*
- * There is something to send
+ /* There is something to send
*/
spin_lock_irqsave(&card->card_lock, flags);
skb = port->txq[port->txqs];
port->txqs++;
- if (port->txqs == FST_TXQ_DEPTH) {
+ if (port->txqs == FST_TXQ_DEPTH)
port->txqs = 0;
- }
+
spin_unlock_irqrestore(&card->card_lock, flags);
- /*
- * copy the data and set the required indicators on the
+ /* copy the data and set the required indicators on the
* card.
*/
FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
cnv_bcnt(skb->len));
- if ((skb->len < FST_MIN_DMA_LEN) ||
- (card->family == FST_FAMILY_TXP)) {
+ if (skb->len < FST_MIN_DMA_LEN ||
+ card->family == FST_FAMILY_TXP) {
/* Enqueue the packet with normal io */
memcpy_toio(card->mem +
BUF_OFFSET(txBuffer[pi]
@@ -1401,8 +1355,7 @@ do_bottom_half_tx(struct fst_card_info *card)
}
if (++port->txpos >= NUM_TX_BUFFER)
port->txpos = 0;
- /*
- * If we have flow control on, can we now release it?
+ /* If we have flow control on, can we now release it?
*/
if (port->start) {
if (txq_length < fst_txq_low) {
@@ -1413,8 +1366,7 @@ do_bottom_half_tx(struct fst_card_info *card)
}
dev_kfree_skb(skb);
} else {
- /*
- * Nothing to send so break out of the while loop
+ /* Nothing to send so break out of the while loop
*/
break;
}
@@ -1438,8 +1390,7 @@ do_bottom_half_rx(struct fst_card_info *card)
while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
& DMA_OWN) && !(card->dmarx_in_progress)) {
if (rx_count > fst_max_reads) {
- /*
- * Don't spend forever in receive processing
+ /* Don't spend forever in receive processing
* Schedule another event
*/
fst_q_work_item(&fst_work_intq, card->card_no);
@@ -1452,8 +1403,7 @@ do_bottom_half_rx(struct fst_card_info *card)
}
}
-/*
- * The interrupt service routine
+/* The interrupt service routine
* Dev_id is our fst_card_info pointer
*/
static irqreturn_t
@@ -1468,8 +1418,7 @@ fst_intr(int dummy, void *dev_id)
unsigned int do_card_interrupt;
unsigned int int_retry_count;
- /*
- * Check to see if the interrupt was for this card
+ /* Check to see if the interrupt was for this card
* return if not
* Note that the call to clear the interrupt is important
*/
@@ -1478,10 +1427,9 @@ fst_intr(int dummy, void *dev_id)
pr_err("Interrupt received for card %d in a non running state (%d)\n",
card->card_no, card->state);
- /*
- * It is possible to really be running, i.e. we have re-loaded
+ /* It is possible to really be running, i.e. we have re-loaded
* a running card
- * Clear and reprime the interrupt source
+ * Clear and reprime the interrupt source
*/
fst_clear_intr(card);
return IRQ_HANDLED;
@@ -1490,8 +1438,7 @@ fst_intr(int dummy, void *dev_id)
/* Clear and reprime the interrupt source */
fst_clear_intr(card);
- /*
- * Is the interrupt for this card (handshake == 1)
+ /* Is the interrupt for this card (handshake == 1)
*/
do_card_interrupt = 0;
if (FST_RDB(card, interruptHandshake) == 1) {
@@ -1500,13 +1447,11 @@ fst_intr(int dummy, void *dev_id)
FST_WRB(card, interruptHandshake, 0xEE);
}
if (card->family == FST_FAMILY_TXU) {
- /*
- * Is it a DMA Interrupt
+ /* Is it a DMA Interrupt
*/
dma_intcsr = inl(card->pci_conf + INTCSR_9054);
if (dma_intcsr & 0x00200000) {
- /*
- * DMA Channel 0 (Rx transfer complete)
+ /* DMA Channel 0 (Rx transfer complete)
*/
dbg(DBG_RX, "DMA Rx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR0);
@@ -1517,8 +1462,7 @@ fst_intr(int dummy, void *dev_id)
do_card_interrupt += FST_RX_DMA_INT;
}
if (dma_intcsr & 0x00400000) {
- /*
- * DMA Channel 1 (Tx transfer complete)
+ /* DMA Channel 1 (Tx transfer complete)
*/
dbg(DBG_TX, "DMA Tx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR1);
@@ -1529,8 +1473,7 @@ fst_intr(int dummy, void *dev_id)
}
}
- /*
- * Have we been missing Interrupts
+ /* Have we been missing Interrupts
*/
int_retry_count = FST_RDL(card, interruptRetryCount);
if (int_retry_count) {
@@ -1539,9 +1482,8 @@ fst_intr(int dummy, void *dev_id)
FST_WRL(card, interruptRetryCount, 0);
}
- if (!do_card_interrupt) {
+ if (!do_card_interrupt)
return IRQ_HANDLED;
- }
/* Scehdule the bottom half of the ISR */
fst_q_work_item(&fst_work_intq, card->card_no);
@@ -1611,7 +1553,7 @@ fst_intr(int dummy, void *dev_id)
rdidx = 0;
}
FST_WRB(card, interruptEvent.rdindex, rdidx);
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
}
/* Check that the shared memory configuration is one that we can handle
@@ -1635,7 +1577,8 @@ check_started_ok(struct fst_card_info *card)
return;
}
/* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
- if ((i = FST_RDB(card, taskStatus)) == 0x01) {
+ i = FST_RDB(card, taskStatus);
+ if (i == 0x01) {
card->state = FST_RUNNING;
} else if (i == 0xFF) {
pr_err("Firmware initialisation failed. Card halted\n");
@@ -1665,8 +1608,8 @@ set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
int err;
unsigned char my_framing;
- /* Set things according to the user set valid flags
- * Several of the old options have been invalidated/replaced by the
+ /* Set things according to the user set valid flags
+ * Several of the old options have been invalidated/replaced by the
* generic hdlc package.
*/
err = 0;
@@ -1740,9 +1683,8 @@ set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
#endif
}
#if FST_DEBUG
- if (info->valid & FSTVAL_DEBUG) {
+ if (info->valid & FSTVAL_DEBUG)
fst_debug_mask = info->debug;
- }
#endif
return err;
@@ -1754,7 +1696,7 @@ gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
{
int i;
- memset(info, 0, sizeof (struct fstioc_info));
+ memset(info, 0, sizeof(struct fstioc_info));
i = port->index;
info->kernelVersion = LINUX_VERSION_CODE;
@@ -1787,27 +1729,23 @@ gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
info->cardMode = FST_RDW(card, cardMode);
info->smcFirmwareVersion = FST_RDL(card, smcFirmwareVersion);
- /*
- * The T2U can report cable presence for both A or B
- * in bits 0 and 1 of cableStatus. See which port we are and
+ /* The T2U can report cable presence for both A or B
+ * in bits 0 and 1 of cableStatus. See which port we are and
* do the mapping.
*/
if (card->family == FST_FAMILY_TXU) {
if (port->index == 0) {
- /*
- * Port A
+ /* Port A
*/
info->cableStatus = info->cableStatus & 1;
} else {
- /*
- * Port B
+ /* Port B
*/
info->cableStatus = info->cableStatus >> 1;
info->cableStatus = info->cableStatus & 1;
}
}
- /*
- * Some additional bits if we are TE1
+ /* Some additional bits if we are TE1
*/
if (card->type == FST_TYPE_TE1) {
info->lineSpeed = FST_RDL(card, suConfig.dataRate);
@@ -1851,14 +1789,12 @@ fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
sync_serial_settings sync;
int i;
- if (ifr->ifr_settings.size != sizeof (sync)) {
+ if (ifr->ifr_settings.size != sizeof(sync))
return -ENOMEM;
- }
if (copy_from_user
- (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof (sync))) {
+ (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof(sync)))
return -EFAULT;
- }
if (sync.loopback)
return -EINVAL;
@@ -1951,12 +1887,11 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
ifr->ifr_settings.type = IF_IFACE_X21;
break;
}
- if (ifr->ifr_settings.size == 0) {
+ if (ifr->ifr_settings.size == 0)
return 0; /* only type requested */
- }
- if (ifr->ifr_settings.size < sizeof (sync)) {
+
+ if (ifr->ifr_settings.size < sizeof(sync))
return -ENOMEM;
- }
i = port->index;
memset(&sync, 0, sizeof(sync));
@@ -1966,11 +1901,10 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof (sync))) {
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof(sync)))
return -EFAULT;
- }
- ifr->ifr_settings.size = sizeof (sync);
+ ifr->ifr_settings.size = sizeof(sync);
return 0;
}
@@ -2008,21 +1942,19 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* First copy in the header with the length and offset of data
* to write
*/
- if (ifr->ifr_data == NULL) {
+ if (!ifr->ifr_data)
return -EINVAL;
- }
+
if (copy_from_user(&wrthdr, ifr->ifr_data,
- sizeof (struct fstioc_write))) {
+ sizeof(struct fstioc_write)))
return -EFAULT;
- }
/* Sanity check the parameters. We don't support partial writes
* when going over the top
*/
if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE ||
- wrthdr.size + wrthdr.offset > FST_MEMSIZE) {
+ wrthdr.size + wrthdr.offset > FST_MEMSIZE)
return -ENXIO;
- }
/* Now copy the data to the card. */
@@ -2037,9 +1969,9 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Writes to the memory of a card in the reset state constitute
* a download
*/
- if (card->state == FST_RESET) {
+ if (card->state == FST_RESET)
card->state = FST_DOWNLOAD;
- }
+
return 0;
case FSTGETCONF:
@@ -2059,21 +1991,18 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
- if (ifr->ifr_data == NULL) {
+ if (!ifr->ifr_data)
return -EINVAL;
- }
gather_conf_info(card, port, &info);
- if (copy_to_user(ifr->ifr_data, &info, sizeof (info))) {
+ if (copy_to_user(ifr->ifr_data, &info, sizeof(info)))
return -EFAULT;
- }
+
return 0;
case FSTSETCONF:
-
- /*
- * Most of the settings have been moved to the generic ioctls
+ /* Most of the settings have been moved to the generic ioctls
* this just covers debug and board ident now
*/
@@ -2082,9 +2011,8 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
card->card_no, card->state);
return -EIO;
}
- if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
+ if (copy_from_user(&info, ifr->ifr_data, sizeof(info)))
return -EFAULT;
- }
return set_conf_from_info(card, port, &info);
@@ -2150,7 +2078,7 @@ fst_openport(struct fst_port_info *port)
port->run = 1;
signals = FST_RDL(port->card, v24DebouncedSts[port->index]);
- if (signals & (((port->hwif == X21) || (port->hwif == X21D))
+ if (signals & ((port->hwif == X21 || port->hwif == X21D)
? IPSTS_INDICATE : IPSTS_DCD))
netif_carrier_on(port_to_dev(port));
else
@@ -2159,7 +2087,6 @@ fst_openport(struct fst_port_info *port)
port->txqe = 0;
port->txqs = 0;
}
-
}
static void
@@ -2185,7 +2112,7 @@ fst_open(struct net_device *dev)
port = dev_to_port(dev);
if (!try_module_get(THIS_MODULE))
- return -EBUSY;
+ return -EBUSY;
if (port->mode != FST_RAW) {
err = hdlc_open(dev);
@@ -2220,9 +2147,9 @@ fst_close(struct net_device *dev)
netif_stop_queue(dev);
fst_closeport(dev_to_port(dev));
- if (port->mode != FST_RAW) {
+ if (port->mode != FST_RAW)
hdlc_close(dev);
- }
+
module_put(THIS_MODULE);
return 0;
}
@@ -2230,8 +2157,7 @@ fst_close(struct net_device *dev)
static int
fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parity)
{
- /*
- * Setting currently fixed in FarSync card so we check and forget
+ /* Setting currently fixed in FarSync card so we check and forget
*/
if (encoding != ENCODING_NRZ || parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
@@ -2289,23 +2215,21 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /*
- * We are always going to queue the packet
+ /* We are always going to queue the packet
* so that the bottom half is the only place we tx from
* Check there is room in the port txq
*/
spin_lock_irqsave(&card->card_lock, flags);
- if ((txq_length = port->txqe - port->txqs) < 0) {
- /*
- * This is the case where the next free has wrapped but the
+ txq_length = port->txqe - port->txqs;
+ if (txq_length < 0) {
+ /* This is the case where the next free has wrapped but the
* last used hasn't
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags);
if (txq_length > fst_txq_high) {
- /*
- * We have got enough buffers in the pipeline. Ask the network
+ /* We have got enough buffers in the pipeline. Ask the network
* layer to stop sending frames down
*/
netif_stop_queue(dev);
@@ -2313,8 +2237,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (txq_length == FST_TXQ_DEPTH - 1) {
- /*
- * This shouldn't have happened but such is life
+ /* This shouldn't have happened but such is life
*/
dev_kfree_skb(skb);
dev->stats.tx_errors++;
@@ -2323,8 +2246,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /*
- * queue the buffer
+ /* queue the buffer
*/
spin_lock_irqsave(&card->card_lock, flags);
port->txq[port->txqe] = skb;
@@ -2340,8 +2262,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-/*
- * Card setup having checked hardware resources.
+/* Card setup having checked hardware resources.
* Should be pretty bizarre if we get an error here (kernel memory
* exhaustion is one possibility). If we do see a problem we report it
* via a printk and leave the corresponding interface and all that follow
@@ -2371,7 +2292,7 @@ fst_init_card(struct fst_card_info *card)
err = register_hdlc_device(card->ports[i].dev);
if (err < 0) {
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
- i, -err);
+ i, -err);
while (i--)
unregister_hdlc_device(card->ports[i].dev);
return err;
@@ -2393,14 +2314,13 @@ static const struct net_device_ops fst_ops = {
.ndo_tx_timeout = fst_tx_timeout,
};
-/*
- * Initialise card when detected.
+/* Initialise card when detected.
* Returns 0 to indicate success, or errno otherwise.
*/
static int
fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int no_of_cards_added = 0;
+ static int no_of_cards_added;
struct fst_card_info *card;
int err = 0;
int i;
@@ -2411,17 +2331,15 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#if FST_DEBUG
dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
#endif
- /*
- * We are going to be clever and allow certain cards not to be
+ /* We are going to be clever and allow certain cards not to be
* configured. An exclude list can be provided in /etc/modules.conf
*/
if (fst_excluded_cards != 0) {
- /*
- * There are cards to exclude
+ /* There are cards to exclude
*
*/
for (i = 0; i < fst_excluded_cards; i++) {
- if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
+ if (pdev->devfn >> 3 == fst_excluded_list[i]) {
pr_info("FarSync PCI device %d not assigned\n",
(pdev->devfn) >> 3);
return -EBUSY;
@@ -2431,16 +2349,18 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Allocate driver private data */
card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL);
- if (card == NULL)
+ if (!card)
return -ENOMEM;
/* Try to enable the device */
- if ((err = pci_enable_device(pdev)) != 0) {
+ err = pci_enable_device(pdev);
+ if (err) {
pr_err("Failed to enable card. Err %d\n", -err);
goto enable_fail;
}
- if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
+ err = pci_request_regions(pdev, "FarSync");
+ if (err) {
pr_err("Failed to allocate regions. Err %d\n", -err);
goto regions_fail;
}
@@ -2449,12 +2369,14 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->pci_conf = pci_resource_start(pdev, 1);
card->phys_mem = pci_resource_start(pdev, 2);
card->phys_ctlmem = pci_resource_start(pdev, 3);
- if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
+ card->mem = ioremap(card->phys_mem, FST_MEMSIZE);
+ if (!card->mem) {
pr_err("Physical memory remap failed\n");
err = -ENODEV;
goto ioremap_physmem_fail;
}
- if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
+ card->ctlmem = ioremap(card->phys_ctlmem, 0x10);
+ if (!card->ctlmem) {
pr_err("Control memory remap failed\n");
err = -ENODEV;
goto ioremap_ctlmem_fail;
@@ -2474,19 +2396,20 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->family = ((ent->driver_data == FST_TYPE_T2P) ||
(ent->driver_data == FST_TYPE_T4P))
? FST_FAMILY_TXP : FST_FAMILY_TXU;
- if ((ent->driver_data == FST_TYPE_T1U) ||
- (ent->driver_data == FST_TYPE_TE1))
+ if (ent->driver_data == FST_TYPE_T1U ||
+ ent->driver_data == FST_TYPE_TE1)
card->nports = 1;
else
card->nports = ((ent->driver_data == FST_TYPE_T2P) ||
(ent->driver_data == FST_TYPE_T2U)) ? 2 : 4;
card->state = FST_UNINIT;
- spin_lock_init ( &card->card_lock );
+ spin_lock_init(&card->card_lock);
- for ( i = 0 ; i < card->nports ; i++ ) {
+ for (i = 0; i < card->nports; i++) {
struct net_device *dev = alloc_hdlcdev(&card->ports[i]);
hdlc_device *hdlc;
+
if (!dev) {
while (i--)
free_netdev(card->ports[i].dev);
@@ -2495,29 +2418,29 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto hdlcdev_fail;
}
card->ports[i].dev = dev;
- card->ports[i].card = card;
- card->ports[i].index = i;
- card->ports[i].run = 0;
+ card->ports[i].card = card;
+ card->ports[i].index = i;
+ card->ports[i].run = 0;
hdlc = dev_to_hdlc(dev);
- /* Fill in the net device info */
+ /* Fill in the net device info */
/* Since this is a PCI setup this is purely
* informational. Give them the buffer addresses
* and basic card I/O.
*/
- dev->mem_start = card->phys_mem
- + BUF_OFFSET ( txBuffer[i][0][0]);
- dev->mem_end = card->phys_mem
- + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
- dev->base_addr = card->pci_conf;
- dev->irq = card->irq;
+ dev->mem_start = card->phys_mem
+ + BUF_OFFSET(txBuffer[i][0][0]);
+ dev->mem_end = card->phys_mem
+ + BUF_OFFSET(txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
+ dev->base_addr = card->pci_conf;
+ dev->irq = card->irq;
dev->netdev_ops = &fst_ops;
dev->tx_queue_len = FST_TX_QUEUE_LEN;
dev->watchdog_timeo = FST_TX_TIMEOUT;
- hdlc->attach = fst_attach;
- hdlc->xmit = fst_start_xmit;
+ hdlc->attach = fst_attach;
+ hdlc->xmit = fst_start_xmit;
}
card->device = pdev;
@@ -2549,13 +2472,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto init_card_fail;
if (card->family == FST_FAMILY_TXU) {
- /*
- * Allocate a dma buffer for transmit and receives
+ /* Allocate a dma buffer for transmit and receives
*/
card->rx_dma_handle_host =
dma_alloc_coherent(&card->device->dev, FST_MAX_MTU,
&card->rx_dma_handle_card, GFP_KERNEL);
- if (card->rx_dma_handle_host == NULL) {
+ if (!card->rx_dma_handle_host) {
pr_err("Could not allocate rx dma buffer\n");
err = -ENOMEM;
goto rx_dma_fail;
@@ -2563,7 +2485,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->tx_dma_handle_host =
dma_alloc_coherent(&card->device->dev, FST_MAX_MTU,
&card->tx_dma_handle_card, GFP_KERNEL);
- if (card->tx_dma_handle_host == NULL) {
+ if (!card->tx_dma_handle_host) {
pr_err("Could not allocate tx dma buffer\n");
err = -ENOMEM;
goto tx_dma_fail;
@@ -2598,8 +2520,7 @@ enable_fail:
return err;
}
-/*
- * Cleanup and close down a card
+/* Cleanup and close down a card
*/
static void
fst_remove_one(struct pci_dev *pdev)
@@ -2611,6 +2532,7 @@ fst_remove_one(struct pci_dev *pdev)
for (i = 0; i < card->nports; i++) {
struct net_device *dev = port_to_dev(&card->ports[i]);
+
unregister_hdlc_device(dev);
}
@@ -2621,8 +2543,7 @@ fst_remove_one(struct pci_dev *pdev)
iounmap(card->mem);
pci_release_regions(pdev);
if (card->family == FST_FAMILY_TXU) {
- /*
- * Free dma buffers
+ /* Free dma buffers
*/
dma_free_coherent(&card->device->dev, FST_MAX_MTU,
card->rx_dma_handle_host,
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 7eac6a3e1cde..39f05fabbfa4 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -1171,9 +1171,8 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
- if (!uhdlc_priv) {
+ if (!uhdlc_priv)
return -ENOMEM;
- }
dev_set_drvdata(&pdev->dev, uhdlc_priv);
uhdlc_priv->dev = &pdev->dev;
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 058e48182838..0d19e39fec86 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -47,7 +47,6 @@
#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
-
static inline struct net_device *port_to_dev(port_t *port)
{
return port->dev;
@@ -59,12 +58,18 @@ static inline int sca_intr_status(card_t *card)
u8 isr0 = sca_in(ISR0, card);
u8 isr1 = sca_in(ISR1, card);
- if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
- if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
- if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
- if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
- if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
- if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
+ if (isr1 & 0x03)
+ result |= SCA_INTR_DMAC_RX(0);
+ if (isr1 & 0x0C)
+ result |= SCA_INTR_DMAC_TX(0);
+ if (isr1 & 0x30)
+ result |= SCA_INTR_DMAC_RX(1);
+ if (isr1 & 0xC0)
+ result |= SCA_INTR_DMAC_TX(1);
+ if (isr0 & 0x0F)
+ result |= SCA_INTR_MSCI(0);
+ if (isr0 & 0xF0)
+ result |= SCA_INTR_MSCI(1);
if (!(result & SCA_INTR_DMAC_TX(0)))
if (sca_in(DSR_TX(0), card) & DSR_EOM)
@@ -76,7 +81,7 @@ static inline int sca_intr_status(card_t *card)
return result;
}
-static inline port_t* dev_to_port(struct net_device *dev)
+static inline port_t *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
@@ -87,7 +92,6 @@ static inline u16 next_desc(port_t *port, u16 desc, int transmit)
: port_to_card(port)->rx_ring_buffers);
}
-
static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
{
u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
@@ -98,14 +102,12 @@ static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
transmit * rx_buffs + desc;
}
-
static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
{
/* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
}
-
static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
int transmit)
{
@@ -118,14 +120,12 @@ static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
#endif
}
-
static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
{
return port_to_card(port)->buff_offset +
desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
}
-
static inline void sca_set_carrier(port_t *port)
{
if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
@@ -143,7 +143,6 @@ static inline void sca_set_carrier(port_t *port)
}
}
-
static void sca_init_port(port_t *port)
{
card_t *card = port_to_card(port);
@@ -213,13 +212,12 @@ static void sca_init_port(port_t *port)
sca_set_carrier(port);
}
-
#ifdef NEED_SCA_MSCI_INTR
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
{
u16 msci = get_msci(port);
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
/* Reset MSCI TX underrun and CDCD status bit */
@@ -236,7 +234,6 @@ static inline void sca_msci_intr(port_t *port)
}
#endif
-
static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
u16 rxin)
{
@@ -265,8 +262,9 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
openwin(card, page + 1);
memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
- } else
+ } else {
memcpy_fromio(skb->data, winbase(card) + buff, len);
+ }
#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0); /* select pkt_desc table page back */
@@ -282,7 +280,6 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
netif_rx(skb);
}
-
/* Receive DMA interrupt service */
static inline void sca_rx_intr(port_t *port)
{
@@ -304,7 +301,7 @@ static inline void sca_rx_intr(port_t *port)
pkt_desc __iomem *desc;
u32 cda = sca_inw(dmac + CDAL, card);
- if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+ if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc)))
break; /* No frame received */
desc = desc_address(port, port->rxin, 0);
@@ -322,8 +319,9 @@ static inline void sca_rx_intr(port_t *port)
dev->stats.rx_crc_errors++;
if (stat & ST_RX_EOM)
port->rxpart = 0; /* received last fragment */
- } else
+ } else {
sca_rx(card, port, desc, port->rxin);
+ }
/* Set new error descriptor address */
sca_outw(desc_off, dmac + EDAL, card);
@@ -334,13 +332,12 @@ static inline void sca_rx_intr(port_t *port)
sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
}
-
/* Transmit DMA interrupt service */
static inline void sca_tx_intr(port_t *port)
{
struct net_device *dev = port_to_dev(port);
u16 dmac = get_dmac_tx(port);
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
u8 stat;
spin_lock(&port->lock);
@@ -356,7 +353,8 @@ static inline void sca_tx_intr(port_t *port)
u32 desc_off = desc_offset(port, port->txlast, 1);
u32 cda = sca_inw(dmac + CDAL, card);
- if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+
+ if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc)))
break; /* Transmitter is/will_be sending this frame */
desc = desc_address(port, port->txlast, 1);
@@ -370,8 +368,7 @@ static inline void sca_tx_intr(port_t *port)
spin_unlock(&port->lock);
}
-
-static irqreturn_t sca_intr(int irq, void* dev_id)
+static irqreturn_t sca_intr(int irq, void *dev_id)
{
card_t *card = dev_id;
int i;
@@ -379,10 +376,11 @@ static irqreturn_t sca_intr(int irq, void* dev_id)
int handled = 0;
u8 page = sca_get_page(card);
- while((stat = sca_intr_status(card)) != 0) {
+ while ((stat = sca_intr_status(card)) != 0) {
handled = 1;
for (i = 0; i < 2; i++) {
port_t *port = get_port(card, i);
+
if (port) {
if (stat & SCA_INTR_MSCI(i))
sca_msci_intr(port);
@@ -400,15 +398,13 @@ static irqreturn_t sca_intr(int irq, void* dev_id)
return IRQ_RETVAL(handled);
}
-
static void sca_set_port(port_t *port)
{
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
u16 msci = get_msci(port);
u8 md2 = sca_in(msci + MD2, card);
unsigned int tmc, br = 10, brv = 1024;
-
if (port->settings.clock_rate > 0) {
/* Try lower br for better accuracy*/
do {
@@ -417,14 +413,15 @@ static void sca_set_port(port_t *port)
/* Baud Rate = CLOCK_BASE / TMC / 2^BR */
tmc = CLOCK_BASE / brv / port->settings.clock_rate;
- }while (br > 1 && tmc <= 128);
+ } while (br > 1 && tmc <= 128);
if (tmc < 1) {
tmc = 1;
br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
brv = 1;
- } else if (tmc > 255)
+ } else if (tmc > 255) {
tmc = 256; /* tmc=0 means 256 - low baud rates */
+ }
port->settings.clock_rate = CLOCK_BASE / brv / tmc;
} else {
@@ -450,34 +447,50 @@ static void sca_set_port(port_t *port)
md2 &= ~MD2_LOOPBACK;
sca_out(md2, msci + MD2, card);
-
}
-
static void sca_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
u16 msci = get_msci(port);
u8 md0, md2;
- switch(port->encoding) {
- case ENCODING_NRZ: md2 = MD2_NRZ; break;
- case ENCODING_NRZI: md2 = MD2_NRZI; break;
- case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
- case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
- default: md2 = MD2_MANCHESTER;
+ switch (port->encoding) {
+ case ENCODING_NRZ:
+ md2 = MD2_NRZ;
+ break;
+ case ENCODING_NRZI:
+ md2 = MD2_NRZI;
+ break;
+ case ENCODING_FM_MARK:
+ md2 = MD2_FM_MARK;
+ break;
+ case ENCODING_FM_SPACE:
+ md2 = MD2_FM_SPACE;
+ break;
+ default:
+ md2 = MD2_MANCHESTER;
}
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
- switch(port->parity) {
- case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
- case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
- case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
- case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
- default: md0 = MD0_HDLC | MD0_CRC_NONE;
+ switch (port->parity) {
+ case PARITY_CRC16_PR0:
+ md0 = MD0_HDLC | MD0_CRC_16_0;
+ break;
+ case PARITY_CRC16_PR1:
+ md0 = MD0_HDLC | MD0_CRC_16;
+ break;
+ case PARITY_CRC16_PR0_CCITT:
+ md0 = MD0_HDLC | MD0_CRC_ITU_0;
+ break;
+ case PARITY_CRC16_PR1_CCITT:
+ md0 = MD0_HDLC | MD0_CRC_ITU;
+ break;
+ default:
+ md0 = MD0_HDLC | MD0_CRC_NONE;
}
sca_out(CMD_RESET, msci + CMD, card);
@@ -494,9 +507,9 @@ static void sca_open(struct net_device *dev)
sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
/* We're using the following interrupts:
- - TXINT (DMAC completed all transmisions, underrun or DCD change)
- - all DMA interrupts
-*/
+ * - TXINT (DMAC completed all transmisions, underrun or DCD change)
+ * - all DMA interrupts
+ */
sca_set_carrier(port);
/* MSCI TX INT and RX INT A IRQ enable */
@@ -517,11 +530,10 @@ static void sca_open(struct net_device *dev)
netif_start_queue(dev);
}
-
static void sca_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
/* reset channel */
sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
@@ -535,7 +547,6 @@ static void sca_close(struct net_device *dev)
netif_stop_queue(dev);
}
-
static int sca_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -558,7 +569,6 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-
#ifdef DEBUG_RINGS
static void sca_dump_rings(struct net_device *dev)
{
@@ -613,7 +623,6 @@ static void sca_dump_rings(struct net_device *dev)
}
#endif /* DEBUG_RINGS */
-
static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
@@ -645,8 +654,9 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
memcpy_toio(winbase(card) + buff, skb->data, maxlen);
openwin(card, page + 1);
memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
- } else
+ } else {
memcpy_toio(winbase(card) + buff, skb->data, len);
+ }
#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0); /* select pkt_desc table page back */
@@ -670,7 +680,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
#ifdef NEED_DETECT_RAM
static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
{
@@ -699,7 +708,6 @@ static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
}
#endif /* NEED_DETECT_RAM */
-
static void sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 9f60e3969bf8..b89b03a6aba7 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -41,20 +41,20 @@
#define NAPI_WEIGHT 16
-#define get_msci(port) (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET)
-#define get_dmac_rx(port) (port->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
-#define get_dmac_tx(port) (port->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
+#define get_msci(port) ((port)->chan ? MSCI1_OFFSET : MSCI0_OFFSET)
+#define get_dmac_rx(port) ((port)->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
+#define get_dmac_tx(port) ((port)->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
-#define sca_in(reg, card) readb(card->scabase + (reg))
-#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
-#define sca_inw(reg, card) readw(card->scabase + (reg))
-#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
-#define sca_inl(reg, card) readl(card->scabase + (reg))
-#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
+#define sca_in(reg, card) readb((card)->scabase + (reg))
+#define sca_out(value, reg, card) writeb(value, (card)->scabase + (reg))
+#define sca_inw(reg, card) readw((card)->scabase + (reg))
+#define sca_outw(value, reg, card) writew(value, (card)->scabase + (reg))
+#define sca_inl(reg, card) readl((card)->scabase + (reg))
+#define sca_outl(value, reg, card) writel(value, (card)->scabase + (reg))
static int sca_poll(struct napi_struct *napi, int budget);
-static inline port_t* dev_to_port(struct net_device *dev)
+static inline port_t *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
@@ -81,14 +81,12 @@ static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc;
}
-
static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
{
/* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
}
-
static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
int transmit)
{
@@ -96,14 +94,12 @@ static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
desc_offset(port, desc, transmit));
}
-
static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
{
return port->card->buff_offset +
desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
}
-
static inline void sca_set_carrier(port_t *port)
{
if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) {
@@ -121,7 +117,6 @@ static inline void sca_set_carrier(port_t *port)
}
}
-
static void sca_init_port(port_t *port)
{
card_t *card = port->card;
@@ -181,12 +176,11 @@ static void sca_init_port(port_t *port)
netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT);
}
-
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
{
u16 msci = get_msci(port);
- card_t* card = port->card;
+ card_t *card = port->card;
if (sca_in(msci + ST1, card) & ST1_CDCD) {
/* Reset MSCI CDCD status bit */
@@ -195,7 +189,6 @@ static inline void sca_msci_intr(port_t *port)
}
}
-
static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
u16 rxin)
{
@@ -225,7 +218,6 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
netif_receive_skb(skb);
}
-
/* Receive DMA service */
static inline int sca_rx_done(port_t *port, int budget)
{
@@ -281,12 +273,11 @@ static inline int sca_rx_done(port_t *port, int budget)
return received;
}
-
/* Transmit DMA service */
static inline void sca_tx_done(port_t *port)
{
struct net_device *dev = port->netdev;
- card_t* card = port->card;
+ card_t *card = port->card;
u8 stat;
unsigned count = 0;
@@ -321,7 +312,6 @@ static inline void sca_tx_done(port_t *port)
spin_unlock(&port->lock);
}
-
static int sca_poll(struct napi_struct *napi, int budget)
{
port_t *port = container_of(napi, port_t, napi);
@@ -363,15 +353,13 @@ static irqreturn_t sca_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-
static void sca_set_port(port_t *port)
{
- card_t* card = port->card;
+ card_t *card = port->card;
u16 msci = get_msci(port);
u8 md2 = sca_in(msci + MD2, card);
unsigned int tmc, br = 10, brv = 1024;
-
if (port->settings.clock_rate > 0) {
/* Try lower br for better accuracy*/
do {
@@ -380,14 +368,15 @@ static void sca_set_port(port_t *port)
/* Baud Rate = CLOCK_BASE / TMC / 2^BR */
tmc = CLOCK_BASE / brv / port->settings.clock_rate;
- }while (br > 1 && tmc <= 128);
+ } while (br > 1 && tmc <= 128);
if (tmc < 1) {
tmc = 1;
br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
brv = 1;
- } else if (tmc > 255)
+ } else if (tmc > 255) {
tmc = 256; /* tmc=0 means 256 - low baud rates */
+ }
port->settings.clock_rate = CLOCK_BASE / brv / tmc;
} else {
@@ -414,34 +403,50 @@ static void sca_set_port(port_t *port)
md2 &= ~MD2_LOOPBACK;
sca_out(md2, msci + MD2, card);
-
}
-
static void sca_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
- card_t* card = port->card;
+ card_t *card = port->card;
u16 msci = get_msci(port);
u8 md0, md2;
- switch(port->encoding) {
- case ENCODING_NRZ: md2 = MD2_NRZ; break;
- case ENCODING_NRZI: md2 = MD2_NRZI; break;
- case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
- case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
- default: md2 = MD2_MANCHESTER;
+ switch (port->encoding) {
+ case ENCODING_NRZ:
+ md2 = MD2_NRZ;
+ break;
+ case ENCODING_NRZI:
+ md2 = MD2_NRZI;
+ break;
+ case ENCODING_FM_MARK:
+ md2 = MD2_FM_MARK;
+ break;
+ case ENCODING_FM_SPACE:
+ md2 = MD2_FM_SPACE;
+ break;
+ default:
+ md2 = MD2_MANCHESTER;
}
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
- switch(port->parity) {
- case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
- case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
- case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
- case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
- default: md0 = MD0_HDLC | MD0_CRC_NONE;
+ switch (port->parity) {
+ case PARITY_CRC16_PR0:
+ md0 = MD0_HDLC | MD0_CRC_16_0;
+ break;
+ case PARITY_CRC16_PR1:
+ md0 = MD0_HDLC | MD0_CRC_16;
+ break;
+ case PARITY_CRC32_PR1_CCITT:
+ md0 = MD0_HDLC | MD0_CRC_ITU32;
+ break;
+ case PARITY_CRC16_PR1_CCITT:
+ md0 = MD0_HDLC | MD0_CRC_ITU;
+ break;
+ default:
+ md0 = MD0_HDLC | MD0_CRC_NONE;
}
sca_out(CMD_RESET, msci + CMD, card);
@@ -476,7 +481,6 @@ static void sca_open(struct net_device *dev)
netif_start_queue(dev);
}
-
static void sca_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
@@ -488,7 +492,6 @@ static void sca_close(struct net_device *dev)
netif_stop_queue(dev);
}
-
static int sca_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -511,7 +514,6 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-
#ifdef DEBUG_RINGS
static void sca_dump_rings(struct net_device *dev)
{
@@ -558,7 +560,6 @@ static void sca_dump_rings(struct net_device *dev)
}
#endif /* DEBUG_RINGS */
-
static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
@@ -600,7 +601,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
{
/* Round RAM size to 32 bits, fill from end to start */
@@ -619,7 +619,6 @@ static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
return i;
}
-
static void sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 1bdd3df0867a..dd6312b69861 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -36,8 +36,7 @@
#include <linux/slab.h>
#include <net/net_namespace.h>
-
-static const char* version = "HDLC support module revision 1.22";
+static const char *version = "HDLC support module revision 1.22";
#undef DEBUG_LINK
@@ -74,25 +73,24 @@ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
return hdlc->xmit(skb, dev); /* call hardware driver directly */
}
+EXPORT_SYMBOL(hdlc_start_xmit);
static inline void hdlc_proto_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
+
if (hdlc->proto->start)
hdlc->proto->start(dev);
}
-
-
static inline void hdlc_proto_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
+
if (hdlc->proto->stop)
hdlc->proto->stop(dev);
}
-
-
static int hdlc_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -141,8 +139,6 @@ carrier_exit:
return NOTIFY_DONE;
}
-
-
/* Must be called by hardware driver when HDLC device is being opened */
int hdlc_open(struct net_device *dev)
{
@@ -152,11 +148,12 @@ int hdlc_open(struct net_device *dev)
hdlc->carrier, hdlc->open);
#endif
- if (hdlc->proto == NULL)
+ if (!hdlc->proto)
return -ENOSYS; /* no protocol attached */
if (hdlc->proto->open) {
int result = hdlc->proto->open(dev);
+
if (result)
return result;
}
@@ -166,16 +163,16 @@ int hdlc_open(struct net_device *dev)
if (hdlc->carrier) {
netdev_info(dev, "Carrier detected\n");
hdlc_proto_start(dev);
- } else
+ } else {
netdev_info(dev, "No carrier\n");
+ }
hdlc->open = 1;
spin_unlock_irq(&hdlc->state_lock);
return 0;
}
-
-
+EXPORT_SYMBOL(hdlc_open);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev)
@@ -197,8 +194,7 @@ void hdlc_close(struct net_device *dev)
if (hdlc->proto->close)
hdlc->proto->close(dev);
}
-
-
+EXPORT_SYMBOL(hdlc_close);
int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
@@ -217,12 +213,14 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Not handled by currently attached protocol (if any) */
while (proto) {
- if ((result = proto->ioctl(dev, ifr)) != -EINVAL)
+ result = proto->ioctl(dev, ifr);
+ if (result != -EINVAL)
return result;
proto = proto->next;
}
return -EINVAL;
}
+EXPORT_SYMBOL(hdlc_ioctl);
static const struct header_ops hdlc_null_ops;
@@ -256,12 +254,14 @@ static void hdlc_setup(struct net_device *dev)
struct net_device *alloc_hdlcdev(void *priv)
{
struct net_device *dev;
+
dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d",
NET_NAME_UNKNOWN, hdlc_setup);
if (dev)
dev_to_hdlc(dev)->priv = priv;
return dev;
}
+EXPORT_SYMBOL(alloc_hdlcdev);
void unregister_hdlc_device(struct net_device *dev)
{
@@ -270,8 +270,7 @@ void unregister_hdlc_device(struct net_device *dev)
unregister_netdevice(dev);
rtnl_unlock();
}
-
-
+EXPORT_SYMBOL(unregister_hdlc_device);
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
size_t size)
@@ -287,7 +286,7 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
if (size) {
dev_to_hdlc(dev)->state = kmalloc(size, GFP_KERNEL);
- if (dev_to_hdlc(dev)->state == NULL) {
+ if (!dev_to_hdlc(dev)->state) {
module_put(proto->module);
return -ENOBUFS;
}
@@ -296,7 +295,7 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
return 0;
}
-
+EXPORT_SYMBOL(attach_hdlc_protocol);
int detach_hdlc_protocol(struct net_device *dev)
{
@@ -322,7 +321,7 @@ int detach_hdlc_protocol(struct net_device *dev)
return 0;
}
-
+EXPORT_SYMBOL(detach_hdlc_protocol);
void register_hdlc_protocol(struct hdlc_proto *proto)
{
@@ -331,7 +330,7 @@ void register_hdlc_protocol(struct hdlc_proto *proto)
first_proto = proto;
rtnl_unlock();
}
-
+EXPORT_SYMBOL(register_hdlc_protocol);
void unregister_hdlc_protocol(struct hdlc_proto *proto)
{
@@ -346,54 +345,38 @@ void unregister_hdlc_protocol(struct hdlc_proto *proto)
*p = proto->next;
rtnl_unlock();
}
-
-
+EXPORT_SYMBOL(unregister_hdlc_protocol);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("HDLC support module");
MODULE_LICENSE("GPL v2");
-EXPORT_SYMBOL(hdlc_start_xmit);
-EXPORT_SYMBOL(hdlc_open);
-EXPORT_SYMBOL(hdlc_close);
-EXPORT_SYMBOL(hdlc_ioctl);
-EXPORT_SYMBOL(alloc_hdlcdev);
-EXPORT_SYMBOL(unregister_hdlc_device);
-EXPORT_SYMBOL(register_hdlc_protocol);
-EXPORT_SYMBOL(unregister_hdlc_protocol);
-EXPORT_SYMBOL(attach_hdlc_protocol);
-EXPORT_SYMBOL(detach_hdlc_protocol);
-
static struct packet_type hdlc_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_HDLC),
.func = hdlc_rcv,
};
-
static struct notifier_block hdlc_notifier = {
.notifier_call = hdlc_device_event,
};
-
static int __init hdlc_module_init(void)
{
int result;
pr_info("%s\n", version);
- if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
+ result = register_netdevice_notifier(&hdlc_notifier);
+ if (result)
return result;
dev_add_pack(&hdlc_packet_type);
return 0;
}
-
-
static void __exit hdlc_module_exit(void)
{
dev_remove_pack(&hdlc_packet_type);
unregister_netdevice_notifier(&hdlc_notifier);
}
-
module_init(hdlc_module_init);
module_exit(hdlc_module_exit);
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index cb5898f7d68c..349ca18088e8 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -28,13 +28,11 @@
#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
-
struct hdlc_header {
u8 address;
u8 control;
__be16 protocol;
-}__packed;
-
+} __packed;
struct cisco_packet {
__be32 type; /* code */
@@ -42,11 +40,10 @@ struct cisco_packet {
__be32 par2;
__be16 rel; /* reliability */
__be32 time;
-}__packed;
+} __packed;
#define CISCO_PACKET_LEN 18
#define CISCO_BIG_PACKET_LEN 20
-
struct cisco_state {
cisco_proto settings;
@@ -59,16 +56,13 @@ struct cisco_state {
u32 rxseq; /* RX sequence number */
};
-
static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
-
-static inline struct cisco_state* state(hdlc_device *hdlc)
+static inline struct cisco_state *state(hdlc_device *hdlc)
{
return (struct cisco_state *)hdlc->state;
}
-
static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 type, const void *daddr, const void *saddr,
unsigned int len)
@@ -79,7 +73,7 @@ static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
#endif
skb_push(skb, sizeof(struct hdlc_header));
- data = (struct hdlc_header*)skb->data;
+ data = (struct hdlc_header *)skb->data;
if (type == CISCO_KEEPALIVE)
data->address = CISCO_MULTICAST;
else
@@ -90,8 +84,6 @@ static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
return sizeof(struct hdlc_header);
}
-
-
static void cisco_keepalive_send(struct net_device *dev, u32 type,
__be32 par1, __be32 par2)
{
@@ -100,13 +92,12 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cisco_packet));
- if (!skb) {
- netdev_warn(dev, "Memory squeeze on %s()\n", __func__);
+ if (!skb)
return;
- }
+
skb_reserve(skb, 4);
cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
- data = (struct cisco_packet*)(skb->data + 4);
+ data = (struct cisco_packet *)(skb->data + 4);
data->type = htonl(type);
data->par1 = par1;
@@ -124,11 +115,9 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
dev_queue_xmit(skb);
}
-
-
static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
{
- struct hdlc_header *data = (struct hdlc_header*)skb->data;
+ struct hdlc_header *data = (struct hdlc_header *)skb->data;
if (skb->len < sizeof(struct hdlc_header))
return cpu_to_be16(ETH_P_HDLC);
@@ -148,13 +137,12 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
}
}
-
static int cisco_rx(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
- struct hdlc_header *data = (struct hdlc_header*)skb->data;
+ struct hdlc_header *data = (struct hdlc_header *)skb->data;
struct cisco_packet *cisco_data;
struct in_device *in_dev;
__be32 addr, mask;
@@ -183,10 +171,10 @@ static int cisco_rx(struct sk_buff *skb)
goto rx_error;
}
- cisco_data = (struct cisco_packet*)(skb->data + sizeof
+ cisco_data = (struct cisco_packet *)(skb->data + sizeof
(struct hdlc_header));
- switch (ntohl (cisco_data->type)) {
+ switch (ntohl(cisco_data->type)) {
case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
@@ -226,6 +214,7 @@ static int cisco_rx(struct sk_buff *skb)
st->last_poll = jiffies;
if (!st->up) {
u32 sec, min, hrs, days;
+
sec = ntohl(cisco_data->time) / 1000;
min = sec / 60; sec -= min * 60;
hrs = min / 60; min -= hrs * 60;
@@ -253,8 +242,6 @@ rx_error:
return NET_RX_DROP;
}
-
-
static void cisco_timer(struct timer_list *t)
{
struct cisco_state *st = from_timer(st, t, timer);
@@ -276,8 +263,6 @@ static void cisco_timer(struct timer_list *t)
add_timer(&st->timer);
}
-
-
static void cisco_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -294,8 +279,6 @@ static void cisco_start(struct net_device *dev)
add_timer(&st->timer);
}
-
-
static void cisco_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -310,7 +293,6 @@ static void cisco_stop(struct net_device *dev)
spin_unlock_irqrestore(&st->lock, flags);
}
-
static struct hdlc_proto proto = {
.start = cisco_start,
.stop = cisco_stop,
@@ -359,7 +341,8 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
new_settings.timeout < 2)
return -EINVAL;
- result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,
+ PARITY_CRC16_PR1_CCITT);
if (result)
return result;
@@ -381,21 +364,17 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-
-
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-
module_init(mod_init);
module_exit(mod_exit);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0720f5f92caa..72250fe0a1df 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -6,16 +6,16 @@
* Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
*
- Theory of PVC state
+ Theory of PVC state
DCE mode:
(exist,new) -> 0,0 when "PVC create" or if "link unreliable"
- 0,x -> 1,1 if "link reliable" when sending FULL STATUS
- 1,1 -> 1,0 if received FULL STATUS ACK
+ 0,x -> 1,1 if "link reliable" when sending FULL STATUS
+ 1,1 -> 1,0 if received FULL STATUS ACK
(active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
- -> 1 when "PVC up" and (exist,new) = 1,0
+ -> 1 when "PVC up" and (exist,new) = 1,0
DTE mode:
(exist,new,active) = FULL STATUS if "link reliable"
@@ -60,7 +60,6 @@
#define NLPID_CCITT_ANSI_LMI 0x08
#define NLPID_CISCO_LMI 0x09
-
#define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
#define LMI_CISCO_DLCI 1023
@@ -86,7 +85,6 @@
#define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
#define LMI_ANSI_LENGTH 14
-
struct fr_hdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned ea1: 1;
@@ -111,7 +109,6 @@ struct fr_hdr {
#endif
} __packed;
-
struct pvc_device {
struct net_device *frad;
struct net_device *main;
@@ -128,7 +125,7 @@ struct pvc_device {
unsigned int fecn: 1;
unsigned int becn: 1;
unsigned int bandwidth; /* Cisco LMI reporting only */
- }state;
+ } state;
};
struct frad_state {
@@ -149,29 +146,24 @@ struct frad_state {
u8 rxseq; /* RX sequence number */
};
-
static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
-
static inline u16 q922_to_dlci(u8 *hdr)
{
return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
}
-
static inline void dlci_to_q922(u8 *hdr, u16 dlci)
{
hdr[0] = (dlci >> 2) & 0xFC;
hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
}
-
-static inline struct frad_state* state(hdlc_device *hdlc)
+static inline struct frad_state *state(hdlc_device *hdlc)
{
- return(struct frad_state *)(hdlc->state);
+ return (struct frad_state *)(hdlc->state);
}
-
static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
{
struct pvc_device *pvc = state(hdlc)->first_pvc;
@@ -187,7 +179,6 @@ static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
return NULL;
}
-
static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -215,13 +206,11 @@ static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
return pvc;
}
-
static inline int pvc_is_used(struct pvc_device *pvc)
{
return pvc->main || pvc->ether;
}
-
static inline void pvc_carrier(int on, struct pvc_device *pvc)
{
if (on) {
@@ -241,7 +230,6 @@ static inline void pvc_carrier(int on, struct pvc_device *pvc)
}
}
-
static inline void delete_unused_pvcs(hdlc_device *hdlc)
{
struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
@@ -260,7 +248,6 @@ static inline void delete_unused_pvcs(hdlc_device *hdlc)
}
}
-
static inline struct net_device **get_dev_p(struct pvc_device *pvc,
int type)
{
@@ -270,7 +257,6 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc,
return &pvc->main;
}
-
static int fr_hard_header(struct sk_buff *skb, u16 dlci)
{
if (!skb->dev) { /* Control packets */
@@ -334,8 +320,6 @@ static int fr_hard_header(struct sk_buff *skb, u16 dlci)
return 0;
}
-
-
static int pvc_open(struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
@@ -345,6 +329,7 @@ static int pvc_open(struct net_device *dev)
if (pvc->open_count++ == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
+
if (state(hdlc)->settings.lmi == LMI_NONE)
pvc->state.active = netif_carrier_ok(pvc->frad);
@@ -354,14 +339,13 @@ static int pvc_open(struct net_device *dev)
return 0;
}
-
-
static int pvc_close(struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
if (--pvc->open_count == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
+
if (state(hdlc)->settings.lmi == LMI_NONE)
pvc->state.active = 0;
@@ -373,8 +357,6 @@ static int pvc_close(struct net_device *dev)
return 0;
}
-
-
static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct pvc_device *pvc = dev->ml_priv;
@@ -465,15 +447,12 @@ static inline void fr_log_dlci_active(struct pvc_device *pvc)
pvc->state.active ? "active" : "inactive");
}
-
-
static inline u8 fr_lmi_nextseq(u8 x)
{
x++;
return x ? x : 1;
}
-
static void fr_lmi_send(struct net_device *dev, int fullrep)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -495,17 +474,16 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
}
skb = dev_alloc_skb(len);
- if (!skb) {
- netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
+ if (!skb)
return;
- }
+
memset(skb->data, 0, len);
skb_reserve(skb, 4);
- if (lmi == LMI_CISCO) {
+ if (lmi == LMI_CISCO)
fr_hard_header(skb, LMI_CISCO_DLCI);
- } else {
+ else
fr_hard_header(skb, LMI_CCITT_ANSI_DLCI);
- }
+
data = skb_tail_pointer(skb);
data[i++] = LMI_CALLREF;
data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
@@ -569,8 +547,6 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
dev_queue_xmit(skb);
}
-
-
static void fr_set_link_state(int reliable, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -603,7 +579,6 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
}
}
-
static void fr_timer(struct timer_list *t)
{
struct frad_state *st = from_timer(st, t, timer);
@@ -637,10 +612,10 @@ static void fr_timer(struct timer_list *t)
fr_set_link_state(reliable, dev);
}
- if (state(hdlc)->settings.dce)
+ if (state(hdlc)->settings.dce) {
state(hdlc)->timer.expires = jiffies +
state(hdlc)->settings.t392 * HZ;
- else {
+ } else {
if (state(hdlc)->n391cnt)
state(hdlc)->n391cnt--;
@@ -655,7 +630,6 @@ static void fr_timer(struct timer_list *t)
add_timer(&state(hdlc)->timer);
}
-
static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -696,8 +670,9 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
return 1;
}
i = 7;
- } else
+ } else {
i = 6;
+ }
if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
LMI_ANSI_CISCO_REPTYPE)) {
@@ -814,8 +789,8 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
}
i++;
- new = !! (skb->data[i + 2] & 0x08);
- active = !! (skb->data[i + 2] & 0x02);
+ new = !!(skb->data[i + 2] & 0x08);
+ active = !!(skb->data[i + 2] & 0x02);
if (lmi == LMI_CISCO) {
dlci = (skb->data[i] << 8) | skb->data[i + 1];
bw = (skb->data[i + 3] << 16) |
@@ -962,8 +937,8 @@ static int fr_rx(struct sk_buff *skb)
pvc->state.becn ^= 1;
}
-
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb) {
frad->stats.rx_dropped++;
return NET_RX_DROP;
}
@@ -1018,8 +993,6 @@ rx_drop:
return NET_RX_DROP;
}
-
-
static void fr_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -1040,11 +1013,11 @@ static void fr_start(struct net_device *dev)
/* First poll after 1 s */
state(hdlc)->timer.expires = jiffies + HZ;
add_timer(&state(hdlc)->timer);
- } else
+ } else {
fr_set_link_state(1, dev);
+ }
}
-
static void fr_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -1056,7 +1029,6 @@ static void fr_stop(struct net_device *dev)
fr_set_link_state(0, dev);
}
-
static void fr_close(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -1071,7 +1043,6 @@ static void fr_close(struct net_device *dev)
}
}
-
static void pvc_setup(struct net_device *dev)
{
dev->type = ARPHRD_DLCI;
@@ -1095,7 +1066,8 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
struct net_device *dev;
int used;
- if ((pvc = add_pvc(frad, dlci)) == NULL) {
+ pvc = add_pvc(frad, dlci);
+ if (!pvc) {
netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
return -ENOBUFS;
}
@@ -1121,7 +1093,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
} else {
- *(__be16*)dev->dev_addr = htons(dlci);
+ *(__be16 *)dev->dev_addr = htons(dlci);
dlci_to_q922(dev->broadcast, dlci);
}
dev->netdev_ops = &pvc_ops;
@@ -1147,17 +1119,17 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
return 0;
}
-
-
static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
{
struct pvc_device *pvc;
struct net_device *dev;
- if ((pvc = find_pvc(hdlc, dlci)) == NULL)
+ pvc = find_pvc(hdlc, dlci);
+ if (!pvc)
return -ENOENT;
- if ((dev = *get_dev_p(pvc, type)) == NULL)
+ dev = *get_dev_p(pvc, type);
+ if (!dev)
return -ENOENT;
if (dev->flags & IFF_UP)
@@ -1174,12 +1146,11 @@ static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
return 0;
}
-
-
static void fr_destroy(struct net_device *frad)
{
hdlc_device *hdlc = dev_to_hdlc(frad);
struct pvc_device *pvc = state(hdlc)->first_pvc;
+
state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
state(hdlc)->dce_pvc_count = 0;
state(hdlc)->dce_changed = 1;
@@ -1198,7 +1169,6 @@ static void fr_destroy(struct net_device *frad)
}
}
-
static struct hdlc_proto proto = {
.close = fr_close,
.start = fr_start,
@@ -1209,7 +1179,6 @@ static struct hdlc_proto proto = {
.module = THIS_MODULE,
};
-
static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
{
fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
@@ -1259,7 +1228,8 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
new_settings.dce != 1))
return -EINVAL;
- result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,
+ PARITY_CRC16_PR1_CCITT);
if (result)
return result;
@@ -1309,20 +1279,17 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-
module_init(mod_init);
module_exit(mod_exit);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 261b53fc8e04..834be2ae3e9e 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -41,6 +41,7 @@ static const char *const code_names[CP_CODES] = {
"0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
"TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
};
+
static char debug_buffer[64 + 3 * DEBUG_CP];
#endif
@@ -58,7 +59,6 @@ struct cp_header {
__be16 len;
};
-
struct proto {
struct net_device *dev;
struct timer_list timer;
@@ -91,6 +91,7 @@ static const char *const state_names[STATES] = {
"Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
"Opened"
};
+
static const char *const event_names[EVENTS] = {
"Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
"RTR", "RTA", "RUC", "RXJ+", "RXJ-"
@@ -101,12 +102,12 @@ static struct sk_buff_head tx_queue; /* used when holding the spin lock */
static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
-static inline struct ppp* get_ppp(struct net_device *dev)
+static inline struct ppp *get_ppp(struct net_device *dev)
{
return (struct ppp *)dev_to_hdlc(dev)->state;
}
-static inline struct proto* get_proto(struct net_device *dev, u16 pid)
+static inline struct proto *get_proto(struct net_device *dev, u16 pid)
{
struct ppp *ppp = get_ppp(dev);
@@ -122,7 +123,7 @@ static inline struct proto* get_proto(struct net_device *dev, u16 pid)
}
}
-static inline const char* proto_name(u16 pid)
+static inline const char *proto_name(u16 pid)
{
switch (pid) {
case PID_LCP:
@@ -138,7 +139,7 @@ static inline const char* proto_name(u16 pid)
static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
{
- struct hdlc_header *data = (struct hdlc_header*)skb->data;
+ struct hdlc_header *data = (struct hdlc_header *)skb->data;
if (skb->len < sizeof(struct hdlc_header))
return htons(ETH_P_HDLC);
@@ -160,7 +161,6 @@ static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
}
}
-
static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 type, const void *daddr, const void *saddr,
unsigned int len)
@@ -171,7 +171,7 @@ static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
#endif
skb_push(skb, sizeof(struct hdlc_header));
- data = (struct hdlc_header*)skb->data;
+ data = (struct hdlc_header *)skb->data;
data->address = HDLC_ADDR_ALLSTATIONS;
data->control = HDLC_CTRL_UI;
@@ -193,10 +193,10 @@ static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
return sizeof(struct hdlc_header);
}
-
static void ppp_tx_flush(void)
{
struct sk_buff *skb;
+
while ((skb = skb_dequeue(&tx_queue)) != NULL)
dev_queue_xmit(skb);
}
@@ -219,10 +219,9 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cp_header) + magic_len + len);
- if (!skb) {
- netdev_warn(dev, "out of memory in ppp_tx_cp()\n");
+ if (!skb)
return;
- }
+
skb_reserve(skb, sizeof(struct hdlc_header));
cp = skb_put(skb, sizeof(struct cp_header));
@@ -256,7 +255,6 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
skb_queue_tail(&tx_queue, skb);
}
-
/* State transition table (compare STD-51)
Events Actions
TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count
@@ -294,7 +292,6 @@ static int cp_table[EVENTS][STATES] = {
{ 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */
};
-
/* SCA: RCR+ must supply id, len and data
SCN: RCR- must supply code, id, len and data
STA: RTR must supply id
@@ -369,7 +366,6 @@ static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
#endif
}
-
static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
unsigned int req_len, const u8 *data)
{
@@ -378,7 +374,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
u8 *out;
unsigned int len = req_len, nak_len = 0, rej_len = 0;
- if (!(out = kmalloc(len, GFP_ATOMIC))) {
+ out = kmalloc(len, GFP_ATOMIC);
+ if (!out) {
dev->stats.rx_dropped++;
return; /* out of memory, ignore CR packet */
}
@@ -435,7 +432,7 @@ err_out:
static int ppp_rx(struct sk_buff *skb)
{
- struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
+ struct hdlc_header *hdr = (struct hdlc_header *)skb->data;
struct net_device *dev = skb->dev;
struct ppp *ppp = get_ppp(dev);
struct proto *proto;
@@ -493,7 +490,7 @@ static int ppp_rx(struct sk_buff *skb)
if (pid == PID_LCP)
switch (cp->code) {
case LCP_PROTO_REJ:
- pid = ntohs(*(__be16*)skb->data);
+ pid = ntohs(*(__be16 *)skb->data);
if (pid == PID_LCP || pid == PID_IPCP ||
pid == PID_IPV6CP)
ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
@@ -615,7 +612,6 @@ static void ppp_timer(struct timer_list *t)
ppp_tx_flush();
}
-
static void ppp_start(struct net_device *dev)
{
struct ppp *ppp = get_ppp(dev);
@@ -623,6 +619,7 @@ static void ppp_start(struct net_device *dev)
for (i = 0; i < IDX_COUNT; i++) {
struct proto *proto = &ppp->protos[i];
+
proto->dev = dev;
timer_setup(&proto->timer, ppp_timer, 0);
proto->state = CLOSED;
@@ -680,7 +677,8 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* no settable parameters */
- result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,
+ PARITY_CRC16_PR1_CCITT);
if (result)
return result;
@@ -707,7 +705,6 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-
static int __init mod_init(void)
{
skb_queue_head_init(&tx_queue);
@@ -720,7 +717,6 @@ static void __exit mod_exit(void)
unregister_hdlc_protocol(&proto);
}
-
module_init(mod_init);
module_exit(mod_exit);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index ba8c36c7ea91..d2bf72bf3bd7 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -56,10 +56,8 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
unsigned char *ptr;
skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
- if (!skb) {
- netdev_err(dev, "out of memory\n");
+ if (!skb)
return;
- }
ptr = skb_put(skb, 1);
*ptr = code;
@@ -70,22 +68,16 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
tasklet_schedule(&x25st->rx_tasklet);
}
-
-
static void x25_connected(struct net_device *dev, int reason)
{
x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
}
-
-
static void x25_disconnected(struct net_device *dev, int reason)
{
x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
}
-
-
static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
struct x25_state *x25st = state(dev_to_hdlc(dev));
@@ -108,8 +100,6 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-
-
static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -123,8 +113,6 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
hdlc->xmit(skb, dev); /* Ignore return value :-( */
}
-
-
static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -149,13 +137,15 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->data[0]) {
case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
- if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
+ result = lapb_data_request(dev, skb);
+ if (result != LAPB_OK)
dev_kfree_skb(skb);
spin_unlock_bh(&x25st->up_lock);
return NETDEV_TX_OK;
case X25_IFACE_CONNECT:
- if ((result = lapb_connect_request(dev))!= LAPB_OK) {
+ result = lapb_connect_request(dev);
+ if (result != LAPB_OK) {
if (result == LAPB_CONNECTED)
/* Send connect confirm. msg to level 3 */
x25_connected(dev, 0);
@@ -166,7 +156,8 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
break;
case X25_IFACE_DISCONNECT:
- if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
+ result = lapb_disconnect_request(dev);
+ if (result != LAPB_OK) {
if (result == LAPB_NOTCONNECTED)
/* Send disconnect confirm. msg to level 3 */
x25_disconnected(dev, 0);
@@ -185,8 +176,6 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
-
static int x25_open(struct net_device *dev)
{
static const struct lapb_register_struct cb = {
@@ -232,8 +221,6 @@ static int x25_open(struct net_device *dev)
return 0;
}
-
-
static void x25_close(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -247,15 +234,14 @@ static void x25_close(struct net_device *dev)
tasklet_kill(&x25st->rx_tasklet);
}
-
-
static int x25_rx(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct x25_state *x25st = state(hdlc);
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb) {
dev->stats.rx_dropped++;
return NET_RX_DROP;
}
@@ -279,7 +265,6 @@ static int x25_rx(struct sk_buff *skb)
return NET_RX_DROP;
}
-
static struct hdlc_proto proto = {
.open = x25_open,
.close = x25_close,
@@ -289,7 +274,6 @@ static struct hdlc_proto proto = {
.module = THIS_MODULE,
};
-
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
{
x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
@@ -326,35 +310,36 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
new_settings.t1 = 3;
new_settings.t2 = 1;
new_settings.n2 = 10;
- }
- else {
+ } else {
if (copy_from_user(&new_settings, x25_s, size))
return -EFAULT;
if ((new_settings.dce != 0 &&
- new_settings.dce != 1) ||
- (new_settings.modulo != 8 &&
- new_settings.modulo != 128) ||
- new_settings.window < 1 ||
- (new_settings.modulo == 8 &&
- new_settings.window > 7) ||
- (new_settings.modulo == 128 &&
- new_settings.window > 127) ||
- new_settings.t1 < 1 ||
- new_settings.t1 > 255 ||
- new_settings.t2 < 1 ||
- new_settings.t2 > 255 ||
- new_settings.n2 < 1 ||
- new_settings.n2 > 255)
+ new_settings.dce != 1) ||
+ (new_settings.modulo != 8 &&
+ new_settings.modulo != 128) ||
+ new_settings.window < 1 ||
+ (new_settings.modulo == 8 &&
+ new_settings.window > 7) ||
+ (new_settings.modulo == 128 &&
+ new_settings.window > 127) ||
+ new_settings.t1 < 1 ||
+ new_settings.t1 > 255 ||
+ new_settings.t2 < 1 ||
+ new_settings.t2 > 255 ||
+ new_settings.n2 < 1 ||
+ new_settings.n2 > 255)
return -EINVAL;
}
- result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,
+ PARITY_CRC16_PR1_CCITT);
if (result)
return result;
- if ((result = attach_hdlc_protocol(dev, &proto,
- sizeof(struct x25_state))))
+ result = attach_hdlc_protocol(dev, &proto,
+ sizeof(struct x25_state));
+ if (result)
return result;
memcpy(&state(hdlc)->settings, &new_settings, size);
@@ -380,21 +365,17 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-
-
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-
module_init(mod_init);
module_exit(mod_exit);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 6c05c4c8914a..fd61a7cc4fdf 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Comtrol SV11 card driver
+/* Comtrol SV11 card driver
*
* This is a slightly odd Z85230 synchronous driver. All you need to
* know basically is
@@ -9,7 +8,7 @@
*
* It supports DMA using two DMA channels in SYNC mode. The driver doesn't
* use these facilities
- *
+ *
* The control port is at io+1, the data at io+3 and turning off the DMA
* is done by writing 0 to io+4
*
@@ -44,17 +43,15 @@
static int dma;
-/*
- * Network driver support routines
+/* Network driver support routines
*/
-static inline struct z8530_dev* dev_to_sv(struct net_device *dev)
+static inline struct z8530_dev *dev_to_sv(struct net_device *dev)
{
return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
}
-/*
- * Frame receive. Simple for our card as we do HDLC and there
+/* Frame receive. Simple for our card as we do HDLC and there
* is no funny garbage involved
*/
@@ -65,15 +62,13 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
skb->protocol = hdlc_type_trans(skb, c->netdevice);
skb_reset_mac_header(skb);
skb->dev = c->netdevice;
- /*
- * Send it to the PPP layer. We don't have time to process
+ /* Send it to the PPP layer. We don't have time to process
* it right now.
*/
netif_rx(skb);
}
-/*
- * We've been placed in the UP state
+/* We've been placed in the UP state
*/
static int hostess_open(struct net_device *d)
@@ -81,19 +76,18 @@ static int hostess_open(struct net_device *d)
struct z8530_dev *sv11 = dev_to_sv(d);
int err = -1;
- /*
- * Link layer up
+ /* Link layer up
*/
switch (dma) {
- case 0:
- err = z8530_sync_open(d, &sv11->chanA);
- break;
- case 1:
- err = z8530_sync_dma_open(d, &sv11->chanA);
- break;
- case 2:
- err = z8530_sync_txdma_open(d, &sv11->chanA);
- break;
+ case 0:
+ err = z8530_sync_open(d, &sv11->chanA);
+ break;
+ case 1:
+ err = z8530_sync_dma_open(d, &sv11->chanA);
+ break;
+ case 2:
+ err = z8530_sync_txdma_open(d, &sv11->chanA);
+ break;
}
if (err)
@@ -102,15 +96,15 @@ static int hostess_open(struct net_device *d)
err = hdlc_open(d);
if (err) {
switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
+ case 0:
+ z8530_sync_close(d, &sv11->chanA);
+ break;
+ case 1:
+ z8530_sync_dma_close(d, &sv11->chanA);
+ break;
+ case 2:
+ z8530_sync_txdma_close(d, &sv11->chanA);
+ break;
}
return err;
}
@@ -127,8 +121,7 @@ static int hostess_open(struct net_device *d)
static int hostess_close(struct net_device *d)
{
struct z8530_dev *sv11 = dev_to_sv(d);
- /*
- * Discard new frames
+ /* Discard new frames
*/
sv11->chanA.rx_function = z8530_null_rx;
@@ -136,32 +129,29 @@ static int hostess_close(struct net_device *d)
netif_stop_queue(d);
switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
+ case 0:
+ z8530_sync_close(d, &sv11->chanA);
+ break;
+ case 1:
+ z8530_sync_dma_close(d, &sv11->chanA);
+ break;
+ case 2:
+ z8530_sync_txdma_close(d, &sv11->chanA);
+ break;
}
return 0;
}
static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
{
- /* struct z8530_dev *sv11=dev_to_sv(d);
- z8530_ioctl(d,&sv11->chanA,ifr,cmd) */
return hdlc_ioctl(d, ifr, cmd);
}
-/*
- * Passed network frames, fire them downwind.
+/* Passed network frames, fire them downwind.
*/
static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
+ struct net_device *d)
{
return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
}
@@ -174,8 +164,7 @@ static int hostess_attach(struct net_device *dev, unsigned short encoding,
return -EINVAL;
}
-/*
- * Description block for a Comtrol Hostess SV11 card
+/* Description block for a Comtrol Hostess SV11 card
*/
static const struct net_device_ops hostess_ops = {
@@ -189,8 +178,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
{
struct z8530_dev *sv;
struct net_device *netdev;
- /*
- * Get the needed I/O space
+ /* Get the needed I/O space
*/
if (!request_region(iobase, 8, "Comtrol SV11")) {
@@ -202,8 +190,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
if (!sv)
goto err_kzalloc;
- /*
- * Stuff in the I/O addressing
+ /* Stuff in the I/O addressing
*/
sv->active = 0;
@@ -218,7 +205,8 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
outb(0, iobase + 4); /* DMA off */
/* We want a fast IRQ for this device. Actually we'd like an even faster
- IRQ ;) - This is one driver RtLinux is made for */
+ * IRQ ;) - This is one driver RtLinux is made for
+ */
if (request_irq(irq, z8530_interrupt, 0,
"Hostess SV11", sv) < 0) {
@@ -232,8 +220,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
sv->chanB.dev = sv;
if (dma) {
- /*
- * You can have DMA off or 1 and 3 thats the lot
+ /* You can have DMA off or 1 and 3 thats the lot
* on the Comtrol.
*/
sv->chanA.txdma = 3;
@@ -248,11 +235,11 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
}
/* Kill our private IRQ line the hostess can end up chattering
- until the configuration is set */
+ * until the configuration is set
+ */
disable_irq(irq);
- /*
- * Begin normal initialise
+ /* Begin normal initialise
*/
if (z8530_init(sv)) {
@@ -268,8 +255,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
enable_irq(irq);
- /*
- * Now we can take the IRQ
+ /* Now we can take the IRQ
*/
sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
@@ -340,7 +326,8 @@ static struct z8530_dev *sv11_unit;
int init_module(void)
{
- if ((sv11_unit = sv11_init(io, irq)) == NULL)
+ sv11_unit = sv11_init(io, irq);
+ if (!sv11_unit)
return -ENODEV;
return 0;
}
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index ecea09fd21cb..e97521138f7e 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -83,7 +83,6 @@
#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
-
/* hss_config, PCRs */
/* Frame sync sampling, default = active low */
#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
@@ -150,26 +149,24 @@
/* HSS number, default = 0 (first) */
#define CCR_SECOND_HSS 0x01000000
-
/* hss_config, clkCR: main:10, num:10, denom:12 */
-#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
-
-#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
-#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
-#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
-#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
-#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
-#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
-
-#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
-#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
-#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
-#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
-#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
-#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
-
-/*
- * HSS_CONFIG_CLOCK_CR register consists of 3 parts:
+#define CLK42X_SPEED_EXP ((0x3FF << 22) | (2 << 12) | 15) /*65 KHz*/
+
+#define CLK42X_SPEED_512KHZ ((130 << 22) | (2 << 12) | 15)
+#define CLK42X_SPEED_1536KHZ ((43 << 22) | (18 << 12) | 47)
+#define CLK42X_SPEED_1544KHZ ((43 << 22) | (33 << 12) | 192)
+#define CLK42X_SPEED_2048KHZ ((32 << 22) | (34 << 12) | 63)
+#define CLK42X_SPEED_4096KHZ ((16 << 22) | (34 << 12) | 127)
+#define CLK42X_SPEED_8192KHZ ((8 << 22) | (34 << 12) | 255)
+
+#define CLK46X_SPEED_512KHZ ((130 << 22) | (24 << 12) | 127)
+#define CLK46X_SPEED_1536KHZ ((43 << 22) | (152 << 12) | 383)
+#define CLK46X_SPEED_1544KHZ ((43 << 22) | (66 << 12) | 385)
+#define CLK46X_SPEED_2048KHZ ((32 << 22) | (280 << 12) | 511)
+#define CLK46X_SPEED_4096KHZ ((16 << 22) | (280 << 12) | 1023)
+#define CLK46X_SPEED_8192KHZ ((8 << 22) | (280 << 12) | 2047)
+
+/* HSS_CONFIG_CLOCK_CR register consists of 3 parts:
* A (10 bits), B (10 bits) and C (12 bits).
* IXP42x HSS clock generator operation (verified with an oscilloscope):
* Each clock bit takes 7.5 ns (1 / 133.xx MHz).
@@ -208,7 +205,6 @@
#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
#define HSS_CONFIG_RX_LUT 0x38
-
/* NPE command codes */
/* writes the ConfigWord value to the location specified by offset */
#define PORT_CONFIG_WRITE 0x40
@@ -220,7 +216,8 @@
#define PORT_ERROR_READ 0x42
/* triggers the NPE to reset internal status and enable the HssPacketized
- operation for the flow specified by pPipe */
+ * operation for the flow specified by pPipe
+ */
#define PKT_PIPE_FLOW_ENABLE 0x50
#define PKT_PIPE_FLOW_DISABLE 0x51
#define PKT_NUM_PIPES_WRITE 0x52
@@ -235,12 +232,12 @@
#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
- this packet (if buf_len < pkt_len) */
+ * this packet (if buf_len < pkt_len)
+ */
#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
#define ERR_HDLC_ABORT 6 /* abort sequence received */
#define ERR_DISCONNECTING 7 /* disconnect is in progress */
-
#ifdef __ARMEB__
typedef struct sk_buff buffer_t;
#define free_buffer dev_kfree_skb
@@ -308,7 +305,6 @@ struct desc {
u32 __reserved1[4];
};
-
#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
(n) * sizeof(struct desc))
#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
@@ -327,7 +323,7 @@ static DEFINE_SPINLOCK(npe_lock);
static const struct {
int tx, txdone, rx, rxfree;
-}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
+} queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
HSS0_PKT_RXFREE0_QUEUE},
{HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
HSS1_PKT_RXFREE0_QUEUE},
@@ -337,7 +333,7 @@ static const struct {
* utility functions
****************************************************************************/
-static inline struct port* dev_to_port(struct net_device *dev)
+static inline struct port *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
@@ -346,6 +342,7 @@ static inline struct port* dev_to_port(struct net_device *dev)
static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
{
int i;
+
for (i = 0; i < cnt; i++)
dest[i] = swab32(src[i]);
}
@@ -355,9 +352,10 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
* HSS access
****************************************************************************/
-static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
+static void hss_npe_send(struct port *port, struct msg *msg, const char *what)
{
- u32 *val = (u32*)msg;
+ u32 *val = (u32 *)msg;
+
if (npe_send_message(port->npe, msg, what)) {
pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n",
port->id, val[0], val[1], npe_name(port->npe));
@@ -513,10 +511,12 @@ static int hss_load_firmware(struct port *port)
if (port->initialized)
return 0;
- if (!npe_running(port->npe) &&
- (err = npe_load_firmware(port->npe, npe_name(port->npe),
- port->dev)))
- return err;
+ if (!npe_running(port->npe)) {
+ err = npe_load_firmware(port->npe, npe_name(port->npe),
+ port->dev);
+ if (err)
+ return err;
+ }
/* HDLC mode configuration */
memset(&msg, 0, sizeof(msg));
@@ -567,7 +567,6 @@ static inline void debug_pkt(struct net_device *dev, const char *func,
#endif
}
-
static inline void debug_desc(u32 phys, struct desc *desc)
{
#if DEBUG_DESC
@@ -583,7 +582,8 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
u32 phys, tab_phys, n_desc;
struct desc *tab;
- if (!(phys = qmgr_get_entry(queue)))
+ phys = qmgr_get_entry(queue);
+ if (!phys)
return -1;
BUG_ON(phys & 0x1F);
@@ -603,10 +603,10 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
/* Don't check for queue overflow here, we've allocated sufficient
- length and queues >= 32 don't support this check anyway. */
+ * length and queues >= 32 don't support this check anyway.
+ */
}
-
static inline void dma_unmap_tx(struct port *port, struct desc *desc)
{
#ifdef __ARMEB__
@@ -619,7 +619,6 @@ static inline void dma_unmap_tx(struct port *port, struct desc *desc)
#endif
}
-
static void hss_hdlc_set_carrier(void *pdev, int carrier)
{
struct net_device *netdev = pdev;
@@ -670,7 +669,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
u32 phys;
#endif
- if ((n = queue_get_desc(rxq, port, 0)) < 0) {
+ n = queue_get_desc(rxq, port, 0);
+ if (n < 0) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
" napi_complete\n", dev->name);
@@ -705,7 +705,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
switch (desc->status) {
case 0:
#ifdef __ARMEB__
- if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
+ skb = netdev_alloc_skb(dev, RX_SIZE);
+ if (skb) {
phys = dma_map_single(&dev->dev, skb->data,
RX_SIZE,
DMA_FROM_DEVICE);
@@ -784,7 +785,6 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
return received; /* not all work done */
}
-
static void hss_hdlc_txdone_irq(void *pdev)
{
struct net_device *dev = pdev;
@@ -854,7 +854,8 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
#else
offset = (int)skb->data & 3; /* keep 32-bit alignment */
bytes = ALIGN(offset + len, 4);
- if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
+ mem = kmalloc(bytes, GFP_ATOMIC);
+ if (!mem) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -910,7 +911,6 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
static int request_hdlc_queues(struct port *port)
{
int err;
@@ -974,8 +974,9 @@ static int init_hdlc_queues(struct port *port)
return -ENOMEM;
}
- if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
- &port->desc_tab_phys)))
+ port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ &port->desc_tab_phys);
+ if (!port->desc_tab)
return -ENOMEM;
memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
@@ -987,11 +988,13 @@ static int init_hdlc_queues(struct port *port)
buffer_t *buff;
void *data;
#ifdef __ARMEB__
- if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
+ buff = netdev_alloc_skb(port->netdev, RX_SIZE);
+ if (!buff)
return -ENOMEM;
data = buff->data;
#else
- if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
+ buff = kmalloc(RX_SIZE, GFP_KERNEL);
+ if (!buff)
return -ENOMEM;
data = buff;
#endif
@@ -1016,6 +1019,7 @@ static void destroy_hdlc_queues(struct port *port)
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff = port->rx_buff_tab[i];
+
if (buff) {
dma_unmap_single(&port->netdev->dev,
desc->data, RX_SIZE,
@@ -1026,6 +1030,7 @@ static void destroy_hdlc_queues(struct port *port)
for (i = 0; i < TX_DESCS; i++) {
struct desc *desc = tx_desc_ptr(port, i);
buffer_t *buff = port->tx_buff_tab[i];
+
if (buff) {
dma_unmap_tx(port, desc);
free_buffer(buff);
@@ -1047,23 +1052,29 @@ static int hss_hdlc_open(struct net_device *dev)
unsigned long flags;
int i, err = 0;
- if ((err = hdlc_open(dev)))
+ err = hdlc_open(dev);
+ if (err)
return err;
- if ((err = hss_load_firmware(port)))
+ err = hss_load_firmware(port);
+ if (err)
goto err_hdlc_close;
- if ((err = request_hdlc_queues(port)))
+ err = request_hdlc_queues(port);
+ if (err)
goto err_hdlc_close;
- if ((err = init_hdlc_queues(port)))
+ err = init_hdlc_queues(port);
+ if (err)
goto err_destroy_queues;
spin_lock_irqsave(&npe_lock, flags);
- if (port->plat->open)
- if ((err = port->plat->open(port->id, dev,
- hss_hdlc_set_carrier)))
+ if (port->plat->open) {
+ err = port->plat->open(port->id, dev, hss_hdlc_set_carrier);
+ if (err)
goto err_unlock;
+ }
+
spin_unlock_irqrestore(&npe_lock, flags);
/* Populate queues with buffers, no failure after this point */
@@ -1160,7 +1171,6 @@ static int hss_hdlc_close(struct net_device *dev)
return 0;
}
-
static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -1169,7 +1179,7 @@ static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
if (encoding != ENCODING_NRZ)
return -EINVAL;
- switch(parity) {
+ switch (parity) {
case PARITY_CRC16_PR1_CCITT:
port->hdlc_cfg = 0;
return 0;
@@ -1224,6 +1234,7 @@ static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
for (b = 0; b < 0x400; b++) {
u64 c = (b + 1) * (u64)rate;
+
do_div(c, timer_freq - rate * a);
c--;
if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
@@ -1255,7 +1266,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
- switch(ifr->ifr_settings.type) {
+ switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_V35;
if (ifr->ifr_settings.size < size) {
@@ -1272,7 +1283,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case IF_IFACE_SYNC_SERIAL:
case IF_IFACE_V35:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
@@ -1288,11 +1299,11 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
port->clock_type = clk; /* Update settings */
- if (clk == CLOCK_INT)
+ if (clk == CLOCK_INT) {
find_best_clock(port->plat->timer_freq,
new_line.clock_rate,
&port->clock_rate, &port->clock_reg);
- else {
+ } else {
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
}
@@ -1334,15 +1345,19 @@ static int hss_init_one(struct platform_device *pdev)
hdlc_device *hdlc;
int err;
- if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
return -ENOMEM;
- if ((port->npe = npe_request(0)) == NULL) {
+ port->npe = npe_request(0);
+ if (!port->npe) {
err = -ENODEV;
goto err_free;
}
- if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
+ dev = alloc_hdlcdev(port);
+ port->netdev = alloc_hdlcdev(port);
+ if (!port->netdev) {
err = -ENOMEM;
goto err_plat;
}
@@ -1361,7 +1376,8 @@ static int hss_init_one(struct platform_device *pdev)
port->plat = pdev->dev.platform_data;
netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
- if ((err = register_hdlc_device(dev)))
+ err = register_hdlc_device(dev);
+ if (err)
goto err_free_netdev;
platform_set_drvdata(pdev, port);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 59646865a3a4..89d31adc3809 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -6,7 +6,7 @@
*
* This is a "pseudo" network driver to allow LAPB over Ethernet.
*
- * This driver can use any ethernet destination address, and can be
+ * This driver can use any ethernet destination address, and can be
* limited to accept frames from one dedicated ethernet card only.
*
* History
@@ -44,7 +44,8 @@
static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
/* If this number is made larger, check that the temporary string buffer
- * in lapbeth_new_device is large enough to store the probe device name.*/
+ * in lapbeth_new_device is large enough to store the probe device name.
+ */
#define MAXLAPBDEV 100
struct lapbethdev {
@@ -64,15 +65,14 @@ static void lapbeth_disconnected(struct net_device *dev, int reason);
/* ------------------------------------------------------------------------ */
-/*
- * Get the LAPB device for the ethernet device
+/* Get the LAPB device for the ethernet device
*/
static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
{
struct lapbethdev *lapbeth;
list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
- if (lapbeth->ethdev == dev)
+ if (lapbeth->ethdev == dev)
return lapbeth;
}
return NULL;
@@ -105,10 +105,10 @@ static int lapbeth_napi_poll(struct napi_struct *napi, int budget)
return processed;
}
-/*
- * Receive a LAPB frame via an ethernet interface.
+/* Receive a LAPB frame via an ethernet interface.
*/
-static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
+static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev)
{
int len, err;
struct lapbethdev *lapbeth;
@@ -116,7 +116,8 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
if (dev_net(dev) != &init_net)
goto drop;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
return NET_RX_DROP;
if (!pskb_may_pull(skb, 2))
@@ -137,7 +138,8 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
skb_pull(skb, 2); /* Remove the length bytes */
skb_trim(skb, len); /* Set the length of the data */
- if ((err = lapb_data_received(lapbeth->axdev, skb)) != LAPB_OK) {
+ err = lapb_data_received(lapbeth->axdev, skb);
+ if (err != LAPB_OK) {
printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err);
goto drop_unlock;
}
@@ -177,11 +179,10 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/*
- * Send a LAPB frame via an ethernet interface
+/* Send a LAPB frame via an ethernet interface
*/
static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev)
{
struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
@@ -219,7 +220,8 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
skb_pull(skb, 1);
- if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
+ err = lapb_data_request(dev, skb);
+ if (err != LAPB_OK) {
pr_err("lapb_data_request error - %d\n", err);
goto drop;
}
@@ -263,10 +265,8 @@ static void lapbeth_connected(struct net_device *dev, int reason)
unsigned char *ptr;
struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
- if (!skb) {
- pr_err("out of memory\n");
+ if (!skb)
return;
- }
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_CONNECT;
@@ -283,10 +283,8 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
unsigned char *ptr;
struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
- if (!skb) {
- pr_err("out of memory\n");
+ if (!skb)
return;
- }
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_DISCONNECT;
@@ -297,17 +295,16 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
napi_schedule(&lapbeth->napi);
}
-/*
- * Set AX.25 callsign
+/* Set AX.25 callsign
*/
static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
+
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
return 0;
}
-
static const struct lapb_register_struct lapbeth_callbacks = {
.connect_confirmation = lapbeth_connected,
.connect_indication = lapbeth_connected,
@@ -317,8 +314,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
.data_transmit = lapbeth_data_transmit,
};
-/*
- * open/close a device
+/* open/close a device
*/
static int lapbeth_open(struct net_device *dev)
{
@@ -327,7 +323,8 @@ static int lapbeth_open(struct net_device *dev)
napi_enable(&lapbeth->napi);
- if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
+ err = lapb_register(dev, &lapbeth_callbacks);
+ if (err != LAPB_OK) {
pr_err("lapb_register error: %d\n", err);
return -ENODEV;
}
@@ -348,7 +345,8 @@ static int lapbeth_close(struct net_device *dev)
lapbeth->up = false;
spin_unlock_bh(&lapbeth->up_lock);
- if ((err = lapb_unregister(dev)) != LAPB_OK)
+ err = lapb_unregister(dev);
+ if (err != LAPB_OK)
pr_err("lapb_unregister error: %d\n", err);
napi_disable(&lapbeth->napi);
@@ -375,8 +373,7 @@ static void lapbeth_setup(struct net_device *dev)
dev->addr_len = 0;
}
-/*
- * Setup a new device.
+/* Setup a new device.
*/
static int lapbeth_new_device(struct net_device *dev)
{
@@ -427,8 +424,7 @@ fail:
goto out;
}
-/*
- * Free a lapb network device.
+/* Free a lapb network device.
*/
static void lapbeth_free_device(struct lapbethdev *lapbeth)
{
@@ -437,8 +433,7 @@ static void lapbeth_free_device(struct lapbethdev *lapbeth)
unregister_netdevice(lapbeth->axdev);
}
-/*
- * Handle device status changes.
+/* Handle device status changes.
*
* Called from notifier with RTNL held.
*/
@@ -457,13 +452,13 @@ static int lapbeth_device_event(struct notifier_block *this,
switch (event) {
case NETDEV_UP:
/* New ethernet device -> new LAPB interface */
- if (lapbeth_get_x25_dev(dev) == NULL)
+ if (!lapbeth_get_x25_dev(dev))
lapbeth_new_device(dev);
break;
case NETDEV_GOING_DOWN:
/* ethernet device closes -> close LAPB interface */
lapbeth = lapbeth_get_x25_dev(dev);
- if (lapbeth)
+ if (lapbeth)
dev_close(lapbeth->axdev);
break;
case NETDEV_UNREGISTER:
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 38961793adad..3bd541c868d5 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -9,7 +9,7 @@
*/
int lmc_probe(struct net_device * dev);
unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
- devaddr, unsigned regno);
+ devaddr, unsigned regno);
void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
unsigned regno, unsigned data);
void lmc_led_on(lmc_softc_t * const, u32);
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5bf4463873b1..bdb6dc2409bc 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -32,9 +32,8 @@
#include <asm/io.h>
#include "hd64570.h"
-
-static const char* version = "SDL RISCom/N2 driver version: 1.15";
-static const char* devname = "RISCom/N2";
+static const char *version = "SDL RISCom/N2 driver version: 1.15";
+static const char *devname = "RISCom/N2";
#undef DEBUG_PKT
#define DEBUG_RINGS
@@ -64,11 +63,9 @@ static char *hw; /* pointer to hw=xxx command line string */
#define PCR_ENWIN 4 /* Open window */
#define PCR_BUS16 8 /* 16-bit bus */
-
/* Memory Base Address Register */
#define N2_BAR 2
-
/* Page Scan Register */
#define N2_PSR 4
#define WIN16K 0x00
@@ -78,7 +75,6 @@ static char *hw; /* pointer to hw=xxx command line string */
#define PSR_DMAEN 0x80
#define PSR_PAGEBITS 0x0F
-
/* Modem Control Reg */
#define N2_MCR 6
#define CLOCK_OUT_PORT1 0x80
@@ -90,7 +86,6 @@ static char *hw; /* pointer to hw=xxx command line string */
#define DTR_PORT1 0x02
#define DTR_PORT0 0x01
-
typedef struct port_s {
struct net_device *dev;
struct card_s *card;
@@ -106,9 +101,7 @@ typedef struct port_s {
u8 rxs, txs, tmc; /* SCA registers */
u8 phy_node; /* physical port # - 0 or 1 */
u8 log_node; /* logical port # */
-}port_t;
-
-
+} port_t;
typedef struct card_s {
u8 __iomem *winbase; /* ISA window base address */
@@ -122,13 +115,11 @@ typedef struct card_s {
port_t ports[2];
struct card_s *next_card;
-}card_t;
-
+} card_t;
static card_t *first_card;
static card_t **new_card = &first_card;
-
#define sca_reg(reg, card) (0x8000 | (card)->io | \
((reg) & 0x0F) | (((reg) & 0xF0) << 6))
#define sca_in(reg, card) inb(sca_reg(reg, card))
@@ -144,23 +135,20 @@ static card_t **new_card = &first_card;
#define get_port(card, port) ((card)->ports[port].valid ? \
&(card)->ports[port] : NULL)
-
static __inline__ u8 sca_get_page(card_t *card)
{
return inb(card->io + N2_PSR) & PSR_PAGEBITS;
}
-
static __inline__ void openwin(card_t *card, u8 page)
{
u8 psr = inb(card->io + N2_PSR);
+
outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR);
}
-
#include "hd64570.c"
-
static void n2_set_iface(port_t *port)
{
card_t *card = port->card;
@@ -170,7 +158,7 @@ static void n2_set_iface(port_t *port)
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
- switch(port->settings.clock_type) {
+ switch (port->settings.clock_type) {
case CLOCK_INT:
mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
rxs |= CLK_BRG_RX; /* BRG output */
@@ -203,13 +191,12 @@ static void n2_set_iface(port_t *port)
sca_set_port(port);
}
-
-
static int n2_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int io = port->card->io;
- u8 mcr = inb(io + N2_MCR) | (port->phy_node ? TX422_PORT1:TX422_PORT0);
+ u8 mcr = inb(io + N2_MCR) |
+ (port->phy_node ? TX422_PORT1 : TX422_PORT0);
int result;
result = hdlc_open(dev);
@@ -226,13 +213,12 @@ static int n2_open(struct net_device *dev)
return 0;
}
-
-
static int n2_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int io = port->card->io;
- u8 mcr = inb(io+N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0);
+ u8 mcr = inb(io + N2_MCR) |
+ (port->phy_node ? TX422_PORT1 : TX422_PORT0);
sca_close(dev);
mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */
@@ -241,8 +227,6 @@ static int n2_close(struct net_device *dev)
return 0;
}
-
-
static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -259,7 +243,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
- switch(ifr->ifr_settings.type) {
+ switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
@@ -271,7 +255,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
case IF_IFACE_SYNC_SERIAL:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
@@ -295,8 +279,6 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-
-
static void n2_destroy_card(card_t *card)
{
int cnt;
@@ -304,6 +286,7 @@ static void n2_destroy_card(card_t *card)
for (cnt = 0; cnt < 2; cnt++)
if (card->ports[cnt].card) {
struct net_device *dev = port_to_dev(&card->ports[cnt]);
+
unregister_hdlc_device(dev);
}
@@ -354,7 +337,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL)
+ if (!card)
return -ENOBUFS;
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
@@ -486,11 +469,9 @@ static int __init n2_run(unsigned long io, unsigned long irq,
return 0;
}
-
-
static int __init n2_init(void)
{
- if (hw==NULL) {
+ if (!hw) {
#ifdef MODULE
pr_info("no card initialized\n");
#endif
@@ -515,7 +496,7 @@ static int __init n2_init(void)
if (*hw++ != ',')
break;
- while(1) {
+ while (1) {
if (*hw == '0' && !valid[0])
valid[0] = 1; /* Port 0 enabled */
else if (*hw == '1' && !valid[1])
@@ -533,25 +514,24 @@ static int __init n2_init(void)
if (*hw == '\x0')
return first_card ? 0 : -EINVAL;
- }while(*hw++ == ':');
+ } while (*hw++ == ':');
pr_err("invalid hardware parameters\n");
return first_card ? 0 : -EINVAL;
}
-
static void __exit n2_cleanup(void)
{
card_t *card = first_card;
while (card) {
card_t *ptr = card;
+
card = card->next_card;
n2_destroy_card(ptr);
}
}
-
module_init(n2_init);
module_exit(n2_cleanup);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 001fd378d417..7b123a771aa6 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -44,7 +44,7 @@
#define MAX_TX_BUFFERS 10
static int pci_clock_freq = 33000000;
-static int use_crystal_clock = 0;
+static int use_crystal_clock;
static unsigned int CLOCK_BASE;
/* Masks to access the init_ctrl PLX register */
@@ -52,11 +52,9 @@ static unsigned int CLOCK_BASE;
#define PC300_CHMEDIA_MASK(port) (0x00000020UL << ((port) * 3))
#define PC300_CTYPE_MASK (0x00000800UL)
-
enum { PC300_RSV = 1, PC300_X21, PC300_TE }; /* card types */
-/*
- * PLX PCI9050-1 local configuration and shared runtime registers.
+/* PLX PCI9050-1 local configuration and shared runtime registers.
* This structure can be used to access 9050 registers (memory mapped).
*/
typedef struct {
@@ -69,9 +67,7 @@ typedef struct {
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
-}plx9050;
-
-
+} plx9050;
typedef struct port_s {
struct napi_struct napi;
@@ -88,9 +84,7 @@ typedef struct port_s {
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
-}port_t;
-
-
+} port_t;
typedef struct card_s {
int type; /* RSV, X21, etc. */
@@ -105,26 +99,24 @@ typedef struct card_s {
u8 irq; /* interrupt request level */
port_t ports[2];
-}card_t;
-
+} card_t;
#define get_port(card, port) ((port) < (card)->n_ports ? \
(&(card)->ports[port]) : (NULL))
#include "hd64572.c"
-
static void pc300_set_iface(port_t *port)
{
card_t *card = port->card;
- u32 __iomem * init_ctrl = &card->plxbase->init_ctrl;
+ u32 __iomem *init_ctrl = &card->plxbase->init_ctrl;
u16 msci = get_msci(port);
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
- switch(port->settings.clock_type) {
+ switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
@@ -162,13 +154,11 @@ static void pc300_set_iface(port_t *port)
}
}
-
-
static int pc300_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
-
int result = hdlc_open(dev);
+
if (result)
return result;
@@ -177,8 +167,6 @@ static int pc300_open(struct net_device *dev)
return 0;
}
-
-
static int pc300_close(struct net_device *dev)
{
sca_close(dev);
@@ -186,8 +174,6 @@ static int pc300_close(struct net_device *dev)
return 0;
}
-
-
static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -214,7 +200,6 @@ static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (copy_to_user(line, &port->settings, size))
return -EFAULT;
return 0;
-
}
if (port->card->type == PC300_X21 &&
@@ -255,8 +240,6 @@ static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
-
-
static void pc300_pci_remove_one(struct pci_dev *pdev)
{
int i;
@@ -314,7 +297,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL) {
+ if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -338,9 +321,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
- if (card->plxbase == NULL ||
- card->scabase == NULL ||
- card->rambase == NULL) {
+ if (!card->plxbase || !card->scabase || !card->rambase) {
pr_err("ioremap() failed\n");
pc300_pci_remove_one(pdev);
return -ENOMEM;
@@ -365,12 +346,14 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
else
card->n_ports = 2;
- for (i = 0; i < card->n_ports; i++)
- if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) {
+ for (i = 0; i < card->n_ports; i++) {
+ card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]);
+ if (!card->ports[i].netdev) {
pr_err("unable to allocate memory\n");
pc300_pci_remove_one(pdev);
return -ENOMEM;
}
+ }
/* Reset PLX */
p = &card->plxbase->init_ctrl;
@@ -442,6 +425,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
+
port->chan = i;
spin_lock_init(&port->lock);
@@ -472,8 +456,6 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
return 0;
}
-
-
static const struct pci_device_id pc300_pci_tbl[] = {
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
@@ -486,7 +468,6 @@ static const struct pci_device_id pc300_pci_tbl[] = {
{ 0, }
};
-
static struct pci_driver pc300_pci_driver = {
.name = "PC300",
.id_table = pc300_pci_tbl,
@@ -494,7 +475,6 @@ static struct pci_driver pc300_pci_driver = {
.remove = pc300_pci_remove_one,
};
-
static int __init pc300_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
@@ -511,8 +491,6 @@ static int __init pc300_init_module(void)
return pci_register_driver(&pc300_pci_driver);
}
-
-
static void __exit pc300_cleanup_module(void)
{
pci_unregister_driver(&pc300_pci_driver);
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ba5cc0c53833..dee9c4e15eca 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -42,8 +42,7 @@
static int pci_clock_freq = 33000000;
#define CLOCK_BASE pci_clock_freq
-/*
- * PLX PCI9052 local configuration and shared runtime registers.
+/* PLX PCI9052 local configuration and shared runtime registers.
* This structure can be used to access 9052 registers (memory mapped).
*/
typedef struct {
@@ -56,9 +55,7 @@ typedef struct {
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
-}plx9052;
-
-
+} plx9052;
typedef struct port_s {
struct napi_struct napi;
@@ -74,9 +71,7 @@ typedef struct port_s {
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
-}port_t;
-
-
+} port_t;
typedef struct card_s {
u8 __iomem *rambase; /* buffer memory base (virtual) */
@@ -88,15 +83,15 @@ typedef struct card_s {
u8 irq; /* interrupt request level */
port_t ports[2];
-}card_t;
-
+} card_t;
-#define get_port(card, port) (&card->ports[port])
+#define get_port(card, port) (&(card)->ports[port])
#define sca_flush(card) (sca_in(IER0, card))
static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
{
int len;
+
do {
len = length > 256 ? 256 : length;
memcpy_toio(dest, src, len);
@@ -112,7 +107,6 @@ static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
#include "hd64572.c"
-
static void pci200_set_iface(port_t *port)
{
card_t *card = port->card;
@@ -122,7 +116,7 @@ static void pci200_set_iface(port_t *port)
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
- switch(port->settings.clock_type) {
+ switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
@@ -151,13 +145,11 @@ static void pci200_set_iface(port_t *port)
sca_set_port(port);
}
-
-
static int pci200_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
-
int result = hdlc_open(dev);
+
if (result)
return result;
@@ -167,8 +159,6 @@ static int pci200_open(struct net_device *dev)
return 0;
}
-
-
static int pci200_close(struct net_device *dev)
{
sca_close(dev);
@@ -177,8 +167,6 @@ static int pci200_close(struct net_device *dev)
return 0;
}
-
-
static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -195,7 +183,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
- switch(ifr->ifr_settings.type) {
+ switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_V35;
if (ifr->ifr_settings.size < size) {
@@ -233,8 +221,6 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-
-
static void pci200_pci_remove_one(struct pci_dev *pdev)
{
int i;
@@ -292,7 +278,7 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL) {
+ if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -314,18 +300,16 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
return -EFAULT;
}
- plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
+ plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
- scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
+ scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
- ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
+ ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
- if (card->plxbase == NULL ||
- card->scabase == NULL ||
- card->rambase == NULL) {
+ if (!card->plxbase || !card->scabase || !card->rambase) {
pr_err("ioremap() failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
@@ -380,6 +364,7 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
+
port->chan = i;
spin_lock_init(&port->lock);
@@ -407,15 +392,12 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
return 0;
}
-
-
static const struct pci_device_id pci200_pci_tbl[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
};
-
static struct pci_driver pci200_pci_driver = {
.name = "PCI200SYN",
.id_table = pci200_pci_tbl,
@@ -423,7 +405,6 @@ static struct pci_driver pci200_pci_driver = {
.remove = pci200_pci_remove_one,
};
-
static int __init pci200_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
@@ -433,8 +414,6 @@ static int __init pci200_init_module(void)
return pci_register_driver(&pci200_pci_driver);
}
-
-
static void __exit pci200_cleanup_module(void)
{
pci_unregister_driver(&pci200_pci_driver);
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 7dddc9dcbe23..4403e219ca03 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Sealevel Systems 4021 driver.
+/* Sealevel Systems 4021 driver.
*
* (c) Copyright 1999, 2001 Alan Cox
* (c) Copyright 2001 Red Hat Inc.
@@ -29,32 +28,25 @@
#include <asm/byteorder.h>
#include "z85230.h"
-
-struct slvl_device
-{
+struct slvl_device {
struct z8530_channel *chan;
int channel;
};
-
-struct slvl_board
-{
+struct slvl_board {
struct slvl_device dev[2];
struct z8530_dev board;
int iobase;
};
-/*
- * Network driver support routines
- */
+ /* Network driver support routines */
-static inline struct slvl_device* dev_to_chan(struct net_device *dev)
+static inline struct slvl_device *dev_to_chan(struct net_device *dev)
{
return (struct slvl_device *)dev_to_hdlc(dev)->priv;
}
-/*
- * Frame receive. Simple for our card as we do HDLC and there
+/* Frame receive. Simple for our card as we do HDLC and there
* is no funny garbage involved
*/
@@ -68,9 +60,7 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
netif_rx(skb);
}
-/*
- * We've been placed in the UP state
- */
+ /* We've been placed in the UP state */
static int sealevel_open(struct net_device *d)
{
@@ -78,17 +68,15 @@ static int sealevel_open(struct net_device *d)
int err = -1;
int unit = slvl->channel;
- /*
- * Link layer up.
- */
+ /* Link layer up. */
switch (unit) {
- case 0:
- err = z8530_sync_dma_open(d, slvl->chan);
- break;
- case 1:
- err = z8530_sync_open(d, slvl->chan);
- break;
+ case 0:
+ err = z8530_sync_dma_open(d, slvl->chan);
+ break;
+ case 1:
+ err = z8530_sync_open(d, slvl->chan);
+ break;
}
if (err)
@@ -97,21 +85,18 @@ static int sealevel_open(struct net_device *d)
err = hdlc_open(d);
if (err) {
switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
+ case 0:
+ z8530_sync_dma_close(d, slvl->chan);
+ break;
+ case 1:
+ z8530_sync_close(d, slvl->chan);
+ break;
}
return err;
}
slvl->chan->rx_function = sealevel_input;
- /*
- * Go go go
- */
netif_start_queue(d);
return 0;
}
@@ -121,9 +106,7 @@ static int sealevel_close(struct net_device *d)
struct slvl_device *slvl = dev_to_chan(d);
int unit = slvl->channel;
- /*
- * Discard new frames
- */
+ /* Discard new frames */
slvl->chan->rx_function = z8530_null_rx;
@@ -131,12 +114,12 @@ static int sealevel_close(struct net_device *d)
netif_stop_queue(d);
switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
+ case 0:
+ z8530_sync_dma_close(d, slvl->chan);
+ break;
+ case 1:
+ z8530_sync_close(d, slvl->chan);
+ break;
}
return 0;
}
@@ -144,16 +127,15 @@ static int sealevel_close(struct net_device *d)
static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
{
/* struct slvl_device *slvl=dev_to_chan(d);
- z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
+ * z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd)
+ */
return hdlc_ioctl(d, ifr, cmd);
}
-/*
- * Passed network frames, fire them downwind.
- */
+/* Passed network frames, fire them downwind. */
static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
+ struct net_device *d)
{
return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
}
@@ -176,6 +158,7 @@ static const struct net_device_ops sealevel_ops = {
static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
{
struct net_device *dev = alloc_hdlcdev(sv);
+
if (!dev)
return -1;
@@ -195,10 +178,7 @@ static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
return 0;
}
-
-/*
- * Allocate and setup Sealevel board.
- */
+/* Allocate and setup Sealevel board. */
static __init struct slvl_board *slvl_init(int iobase, int irq,
int txdma, int rxdma, int slow)
@@ -206,9 +186,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
struct z8530_dev *dev;
struct slvl_board *b;
- /*
- * Get the needed I/O space
- */
+ /* Get the needed I/O space */
if (!request_region(iobase, 8, "Sealevel 4021")) {
pr_warn("I/O 0x%X already in use\n", iobase);
@@ -227,17 +205,13 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
dev = &b->board;
- /*
- * Stuff in the I/O addressing
- */
+ /* Stuff in the I/O addressing */
dev->active = 0;
b->iobase = iobase;
- /*
- * Select 8530 delays for the old board
- */
+ /* Select 8530 delays for the old board */
if (slow)
iobase |= Z8530_PORT_SLEEP;
@@ -250,15 +224,13 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
dev->chanA.irqs = &z8530_nop;
dev->chanB.irqs = &z8530_nop;
- /*
- * Assert DTR enable DMA
- */
+ /* Assert DTR enable DMA */
outb(3 | (1 << 7), b->iobase + 4);
-
/* We want a fast IRQ for this device. Actually we'd like an even faster
- IRQ ;) - This is one driver RtLinux is made for */
+ * IRQ ;) - This is one driver RtLinux is made for
+ */
if (request_irq(irq, z8530_interrupt, 0,
"SeaLevel", dev) < 0) {
@@ -282,9 +254,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
disable_irq(irq);
- /*
- * Begin normal initialise
- */
+ /* Begin normal initialise */
if (z8530_init(dev) != 0) {
pr_err("Z8530 series device not found\n");
@@ -299,9 +269,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
}
- /*
- * Now we can take the IRQ
- */
+ /* Now we can take the IRQ */
enable_irq(irq);
@@ -338,6 +306,7 @@ static void __exit slvl_shutdown(struct slvl_board *b)
for (u = 0; u < 2; u++) {
struct net_device *d = b->dev[u].chan->netdevice;
+
unregister_hdlc_device(d);
free_netdev(d);
}
@@ -351,12 +320,11 @@ static void __exit slvl_shutdown(struct slvl_board *b)
kfree(b);
}
-
-static int io=0x238;
-static int txdma=1;
-static int rxdma=3;
-static int irq=5;
-static bool slow=false;
+static int io = 0x238;
+static int txdma = 1;
+static int rxdma = 3;
+static int irq = 5;
+static bool slow;
module_param_hw(io, int, ioport, 0);
MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index a83133388de9..f22e48415e6f 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -32,7 +32,7 @@
#include "wanxl.h"
-static const char* version = "wanXL serial card driver version: 0.48";
+static const char *version = "wanXL serial card driver version: 0.48";
#define PLX_CTL_RESET 0x40000000 /* adapter reset */
@@ -50,24 +50,21 @@ static const char* version = "wanXL serial card driver version: 0.48";
/* MAILBOX #2 - DRAM SIZE */
#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
-
struct port {
struct net_device *dev;
struct card *card;
spinlock_t lock; /* for wanxl_xmit */
- int node; /* physical port #0 - 3 */
+ int node; /* physical port #0 - 3 */
unsigned int clock_type;
int tx_in, tx_out;
struct sk_buff *tx_skbs[TX_BUFFERS];
};
-
struct card_status {
desc_t rx_descs[RX_QUEUE_LENGTH];
port_status_t port_status[4];
};
-
struct card {
int n_ports; /* 1, 2 or 4 ports */
u8 irq;
@@ -81,25 +78,22 @@ struct card {
struct port ports[]; /* 1 - 4 port structures follow */
};
-
-
static inline struct port *dev_to_port(struct net_device *dev)
{
return (struct port *)dev_to_hdlc(dev)->priv;
}
-
static inline port_status_t *get_status(struct port *port)
{
return &port->card->status->port_status[port->node];
}
-
#ifdef DEBUG_PCI
static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
size_t size, int direction)
{
dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
+
if (addr + size > 0x100000000LL)
pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
pci_name(pdev), (unsigned long long)addr);
@@ -110,7 +104,6 @@ static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
#define pci_map_single pci_map_single_debug
#endif
-
/* Cable and/or personality module change interrupt service */
static inline void wanxl_cable_intr(struct port *port)
{
@@ -118,22 +111,46 @@ static inline void wanxl_cable_intr(struct port *port)
int valid = 1;
const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
- switch(value & 0x7) {
- case STATUS_CABLE_V35: cable = "V.35"; break;
- case STATUS_CABLE_X21: cable = "X.21"; break;
- case STATUS_CABLE_V24: cable = "V.24"; break;
- case STATUS_CABLE_EIA530: cable = "EIA530"; break;
- case STATUS_CABLE_NONE: cable = "no"; break;
- default: cable = "invalid";
+ switch (value & 0x7) {
+ case STATUS_CABLE_V35:
+ cable = "V.35";
+ break;
+ case STATUS_CABLE_X21:
+ cable = "X.21";
+ break;
+ case STATUS_CABLE_V24:
+ cable = "V.24";
+ break;
+ case STATUS_CABLE_EIA530:
+ cable = "EIA530";
+ break;
+ case STATUS_CABLE_NONE:
+ cable = "no";
+ break;
+ default:
+ cable = "invalid";
}
- switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
- case STATUS_CABLE_V35: pm = "V.35"; break;
- case STATUS_CABLE_X21: pm = "X.21"; break;
- case STATUS_CABLE_V24: pm = "V.24"; break;
- case STATUS_CABLE_EIA530: pm = "EIA530"; break;
- case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
- default: pm = "invalid personality"; valid = 0;
+ switch ((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
+ case STATUS_CABLE_V35:
+ pm = "V.35";
+ break;
+ case STATUS_CABLE_X21:
+ pm = "X.21";
+ break;
+ case STATUS_CABLE_V24:
+ pm = "V.24";
+ break;
+ case STATUS_CABLE_EIA530:
+ pm = "EIA530";
+ break;
+ case STATUS_CABLE_NONE:
+ pm = "no personality";
+ valid = 0;
+ break;
+ default:
+ pm = "invalid personality";
+ valid = 0;
}
if (valid) {
@@ -154,14 +171,13 @@ static inline void wanxl_cable_intr(struct port *port)
netif_carrier_off(port->dev);
}
-
-
/* Transmit complete interrupt service */
static inline void wanxl_tx_intr(struct port *port)
{
struct net_device *dev = port->dev;
+
while (1) {
- desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
+ desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
struct sk_buff *skb = port->tx_skbs[port->tx_in];
switch (desc->stat) {
@@ -179,34 +195,33 @@ static inline void wanxl_tx_intr(struct port *port)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
}
- desc->stat = PACKET_EMPTY; /* Free descriptor */
+ desc->stat = PACKET_EMPTY; /* Free descriptor */
dma_unmap_single(&port->card->pdev->dev, desc->address,
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
- port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
- }
+ port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
+ }
}
-
-
/* Receive complete interrupt service */
static inline void wanxl_rx_intr(struct card *card)
{
desc_t *desc;
+
while (desc = &card->status->rx_descs[card->rx_in],
desc->stat != PACKET_EMPTY) {
- if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
+ if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) {
pr_crit("%s: received packet for nonexistent port\n",
pci_name(card->pdev));
- else {
+ } else {
struct sk_buff *skb = card->rx_skbs[card->rx_in];
struct port *port = &card->ports[desc->stat &
PACKET_PORT_MASK];
struct net_device *dev = port->dev;
- if (!skb)
+ if (!skb) {
dev->stats.rx_dropped++;
- else {
+ } else {
dma_unmap_single(&card->pdev->dev,
desc->address, BUFFER_LENGTH,
DMA_FROM_DEVICE);
@@ -239,21 +254,18 @@ static inline void wanxl_rx_intr(struct card *card)
}
}
-
-
-static irqreturn_t wanxl_intr(int irq, void* dev_id)
+static irqreturn_t wanxl_intr(int irq, void *dev_id)
{
struct card *card = dev_id;
- int i;
- u32 stat;
- int handled = 0;
-
+ int i;
+ u32 stat;
+ int handled = 0;
- while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
- handled = 1;
+ while ((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
+ handled = 1;
writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
- for (i = 0; i < card->n_ports; i++) {
+ for (i = 0; i < card->n_ports; i++) {
if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
wanxl_tx_intr(&card->ports[i]);
if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
@@ -261,23 +273,21 @@ static irqreturn_t wanxl_intr(int irq, void* dev_id)
}
if (stat & (1 << DOORBELL_FROM_CARD_RX))
wanxl_rx_intr(card);
- }
+ }
- return IRQ_RETVAL(handled);
+ return IRQ_RETVAL(handled);
}
-
-
static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = dev_to_port(dev);
desc_t *desc;
- spin_lock(&port->lock);
+ spin_lock(&port->lock);
desc = &get_status(port)->tx_descs[port->tx_out];
- if (desc->stat != PACKET_EMPTY) {
- /* should never happen - previous xmit should stop queue */
+ if (desc->stat != PACKET_EMPTY) {
+ /* should never happen - previous xmit should stop queue */
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
#endif
@@ -312,8 +322,6 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
-
static int wanxl_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -335,8 +343,6 @@ static int wanxl_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-
-
static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -384,11 +390,9 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
default:
return hdlc_ioctl(dev, ifr, cmd);
- }
+ }
}
-
-
static int wanxl_open(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
@@ -400,7 +404,9 @@ static int wanxl_open(struct net_device *dev)
netdev_err(dev, "port already open\n");
return -EIO;
}
- if ((i = hdlc_open(dev)) != 0)
+
+ i = hdlc_open(dev);
+ if (i)
return i;
port->tx_in = port->tx_out = 0;
@@ -423,8 +429,6 @@ static int wanxl_open(struct net_device *dev)
return -EFAULT;
}
-
-
static int wanxl_close(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
@@ -461,8 +465,6 @@ static int wanxl_close(struct net_device *dev)
return 0;
}
-
-
static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
@@ -474,8 +476,6 @@ static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
return &dev->stats;
}
-
-
static int wanxl_puts_command(struct card *card, u32 cmd)
{
unsigned long timeout = jiffies + 5 * HZ;
@@ -486,13 +486,11 @@ static int wanxl_puts_command(struct card *card, u32 cmd)
return 0;
schedule();
- }while (time_after(timeout, jiffies));
+ } while (time_after(timeout, jiffies));
return -1;
}
-
-
static void wanxl_reset(struct card *card)
{
u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
@@ -505,8 +503,6 @@ static void wanxl_reset(struct card *card)
readl(card->plx + PLX_CONTROL); /* wait for posted write */
}
-
-
static void wanxl_pci_remove_one(struct pci_dev *pdev)
{
struct card *card = pci_get_drvdata(pdev);
@@ -543,7 +539,6 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
kfree(card);
}
-
#include "wanxlfw.inc"
static const struct net_device_ops wanxl_ops = {
@@ -574,12 +569,14 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
return i;
/* QUICC can only access first 256 MB of host RAM directly,
- but PLX9060 DMA does 32-bits for actual packet data transfers */
+ * but PLX9060 DMA does 32-bits for actual packet data transfers
+ */
/* FIXME when PCI/DMA subsystems are fixed.
- We set both dma_mask and consistent_dma_mask to 28 bits
- and pray pci_alloc_consistent() will use this info. It should
- work on most platforms */
+ * We set both dma_mask and consistent_dma_mask to 28 bits
+ * and pray pci_alloc_consistent() will use this info. It should
+ * work on most platforms
+ */
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
pr_err("No usable DMA configuration\n");
@@ -594,13 +591,18 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
}
switch (pdev->device) {
- case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
- case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
- default: ports = 4;
+ case PCI_DEVICE_ID_SBE_WANXL100:
+ ports = 1;
+ break;
+ case PCI_DEVICE_ID_SBE_WANXL200:
+ ports = 2;
+ break;
+ default:
+ ports = 4;
}
card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
- if (card == NULL) {
+ if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -612,7 +614,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
card->status = dma_alloc_coherent(&pdev->dev,
sizeof(struct card_status),
&card->status_address, GFP_KERNEL);
- if (card->status == NULL) {
+ if (!card->status) {
wanxl_pci_remove_one(pdev);
return -ENOBUFS;
}
@@ -624,8 +626,9 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
#endif
/* FIXME when PCI/DMA subsystems are fixed.
- We set both dma_mask and consistent_dma_mask back to 32 bits
- to indicate the card can do 32-bit DMA addressing */
+ * We set both dma_mask and consistent_dma_mask back to 32 bits
+ * to indicate the card can do 32-bit DMA addressing
+ */
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_err("No usable DMA configuration\n");
@@ -639,7 +642,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
card->plx = ioremap(plx_phy, 0x70);
if (!card->plx) {
pr_err("ioremap() failed\n");
- wanxl_pci_remove_one(pdev);
+ wanxl_pci_remove_one(pdev);
return -EFAULT;
}
@@ -656,7 +659,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
return -ENODEV;
}
- switch(stat & 0xC0) {
+ switch (stat & 0xC0) {
case 0x00: /* hmm - PUTS completed with non-zero code? */
case 0x80: /* PUTS still testing the hardware */
break;
@@ -677,7 +680,6 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
/* set up on-board RAM mapping */
mem_phy = pci_resource_start(pdev, 2);
-
/* sanity check the board's reported memory size */
if (ramsize < BUFFERS_ADDR +
(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
@@ -697,6 +699,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
for (i = 0; i < RX_QUEUE_LENGTH; i++) {
struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
+
card->rx_skbs[i] = skb;
if (skb)
card->status->rx_descs[i].address =
@@ -707,12 +710,12 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
if (!mem) {
pr_err("ioremap() failed\n");
- wanxl_pci_remove_one(pdev);
+ wanxl_pci_remove_one(pdev);
return -EFAULT;
}
for (i = 0; i < sizeof(firmware); i += 4)
- writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i);
+ writel(ntohl(*(__be32 *)(firmware + i)), mem + PDM_OFFSET + i);
for (i = 0; i < ports; i++)
writel(card->status_address +
@@ -732,10 +735,11 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
timeout = jiffies + 5 * HZ;
do {
- if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
+ stat = readl(card->plx + PLX_MAILBOX_5);
+ if (stat)
break;
schedule();
- }while (time_after(timeout, jiffies));
+ } while (time_after(timeout, jiffies));
if (!stat) {
pr_warn("%s: timeout while initializing card firmware\n",
@@ -764,6 +768,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
hdlc_device *hdlc;
struct port *port = &card->ports[i];
struct net_device *dev = alloc_hdlcdev(port);
+
if (!dev) {
pr_err("%s: unable to allocate memory\n",
pci_name(pdev));
@@ -813,7 +818,6 @@ static const struct pci_device_id wanxl_pci_tbl[] = {
{ 0, }
};
-
static struct pci_driver wanxl_pci_driver = {
.name = "wanXL",
.id_table = wanxl_pci_tbl,
@@ -821,7 +825,6 @@ static struct pci_driver wanxl_pci_driver = {
.remove = wanxl_pci_remove_one,
};
-
static int __init wanxl_init_module(void)
{
#ifdef MODULE
@@ -835,7 +838,6 @@ static void __exit wanxl_cleanup_module(void)
pci_unregister_driver(&wanxl_pci_driver);
}
-
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 138930c66ad2..982a03488a00 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- *
- * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
+/* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
* (c) Copyright 2000, 2001 Red Hat Inc
*
* Development of this driver was funded by Equiinet Ltd
@@ -12,7 +10,7 @@
* Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
* unification of all the Z85x30 asynchronous drivers for real.
*
- * DMA now uses get_free_page as kmalloc buffers may span a 64K
+ * DMA now uses get_free_page as kmalloc buffers may span a 64K
* boundary.
*
* Modified for SMP safety and SMP locking by Alan Cox
@@ -55,14 +53,13 @@
#include "z85230.h"
-
/**
* z8530_read_port - Architecture specific interface function
* @p: port to read
*
* Provided port access methods. The Comtrol SV11 requires no delays
* between accesses and uses PC I/O. Some drivers may need a 5uS delay
- *
+ *
* In the longer term this should become an architecture specific
* section so that this can become a generic driver interface for all
* platforms. For now we only handle PC I/O ports with or without the
@@ -74,8 +71,9 @@
static inline int z8530_read_port(unsigned long p)
{
- u8 r=inb(Z8530_PORT_OF(p));
- if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
+ u8 r = inb(Z8530_PORT_OF(p));
+
+ if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
udelay(5);
return r;
}
@@ -95,34 +93,30 @@ static inline int z8530_read_port(unsigned long p)
* dread 5uS sanity delay.
*/
-
static inline void z8530_write_port(unsigned long p, u8 d)
{
- outb(d,Z8530_PORT_OF(p));
- if(p&Z8530_PORT_SLEEP)
+ outb(d, Z8530_PORT_OF(p));
+ if (p & Z8530_PORT_SLEEP)
udelay(5);
}
-
-
static void z8530_rx_done(struct z8530_channel *c);
static void z8530_tx_done(struct z8530_channel *c);
-
/**
- * read_zsreg - Read a register from a Z85230
+ * read_zsreg - Read a register from a Z85230
* @c: Z8530 channel to read from (2 per chip)
* @reg: Register to read
* FIXME: Use a spinlock.
- *
+ *
* Most of the Z8530 registers are indexed off the control registers.
* A read is done by writing to the control register and reading the
* register back. The caller must hold the lock
*/
-
+
static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
{
- if(reg)
+ if (reg)
z8530_write_port(c->ctrlio, reg);
return z8530_read_port(c->ctrlio);
}
@@ -138,7 +132,8 @@ static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
static inline u8 read_zsdata(struct z8530_channel *c)
{
u8 r;
- r=z8530_read_port(c->dataio);
+
+ r = z8530_read_port(c->dataio);
return r;
}
@@ -156,10 +151,9 @@ static inline u8 read_zsdata(struct z8530_channel *c)
*/
static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
{
- if(reg)
+ if (reg)
z8530_write_port(c->ctrlio, reg);
z8530_write_port(c->ctrlio, val);
-
}
/**
@@ -182,108 +176,94 @@ static inline void write_zsctrl(struct z8530_channel *c, u8 val)
*
* Write directly to the data register on the Z8530
*/
-
-
static inline void write_zsdata(struct z8530_channel *c, u8 val)
{
z8530_write_port(c->dataio, val);
}
-/*
- * Register loading parameters for a dead port
+/* Register loading parameters for a dead port
*/
-
-u8 z8530_dead_port[]=
-{
+
+u8 z8530_dead_port[] = {
255
};
-
EXPORT_SYMBOL(z8530_dead_port);
-/*
- * Register loading parameters for currently supported circuit types
+/* Register loading parameters for currently supported circuit types
*/
-
-/*
- * Data clocked by telco end. This is the correct data for the UK
+/* Data clocked by telco end. This is the correct data for the UK
* "kilostream" service, and most other similar services.
*/
-
-u8 z8530_hdlc_kilostream[]=
-{
- 4, SYNC_ENAB|SDLC|X1CLK,
+
+u8 z8530_hdlc_kilostream[] = {
+ 4, SYNC_ENAB | SDLC | X1CLK,
2, 0, /* No vector */
1, 0,
- 3, ENT_HM|RxCRC_ENAB|Rx8,
- 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+ 3, ENT_HM | RxCRC_ENAB | Rx8,
+ 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
9, 0, /* Disable interrupts */
6, 0xFF,
7, FLAG,
- 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
+ 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/
11, TCTRxCP,
14, DISDPLL,
- 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
- 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
- 9, NV|MIE|NORESET,
+ 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
+ 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
+ 9, NV | MIE | NORESET,
255
};
-
EXPORT_SYMBOL(z8530_hdlc_kilostream);
-/*
- * As above but for enhanced chips.
+/* As above but for enhanced chips.
*/
-
-u8 z8530_hdlc_kilostream_85230[]=
-{
- 4, SYNC_ENAB|SDLC|X1CLK,
+
+u8 z8530_hdlc_kilostream_85230[] = {
+ 4, SYNC_ENAB | SDLC | X1CLK,
2, 0, /* No vector */
1, 0,
- 3, ENT_HM|RxCRC_ENAB|Rx8,
- 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+ 3, ENT_HM | RxCRC_ENAB | Rx8,
+ 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
9, 0, /* Disable interrupts */
6, 0xFF,
7, FLAG,
- 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
+ 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */
11, TCTRxCP,
14, DISDPLL,
- 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
- 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
- 9, NV|MIE|NORESET,
+ 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
+ 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
+ 9, NV | MIE | NORESET,
23, 3, /* Extended mode AUTO TX and EOM*/
-
+
255
};
-
EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
/**
* z8530_flush_fifo - Flush on chip RX FIFO
* @c: Channel to flush
*
- * Flush the receive FIFO. There is no specific option for this, we
+ * Flush the receive FIFO. There is no specific option for this, we
* blindly read bytes and discard them. Reading when there is no data
* is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
- *
+ *
* All locking is handled for the caller. On return data may still be
* present if it arrived during the flush.
*/
-
+
static void z8530_flush_fifo(struct z8530_channel *c)
{
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
- if(c->dev->type==Z85230)
- {
+ if (c->dev->type == Z85230) {
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
}
-}
+}
/**
* z8530_rtsdtr - Control the outgoing DTS/RTS line
@@ -309,7 +289,7 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
* z8530_rx - Handle a PIO receive event
* @c: Z8530 channel to process
*
- * Receive handler for receiving in PIO mode. This is much like the
+ * Receive handler for receiving in PIO mode. This is much like the
* async one but not quite the same or as complex
*
* Note: Its intended that this handler can easily be separated from
@@ -322,77 +302,63 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
* other code - this is true in the RT case too.
*
* We only cover the sync cases for this. If you want 2Mbit async
- * do it yourself but consider medical assistance first. This non DMA
- * synchronous mode is portable code. The DMA mode assumes PCI like
+ * do it yourself but consider medical assistance first. This non DMA
+ * synchronous mode is portable code. The DMA mode assumes PCI like
* ISA DMA
*
* Called with the device lock held
*/
-
+
static void z8530_rx(struct z8530_channel *c)
{
- u8 ch,stat;
+ u8 ch, stat;
- while(1)
- {
+ while (1) {
/* FIFO empty ? */
- if(!(read_zsreg(c, R0)&1))
+ if (!(read_zsreg(c, R0) & 1))
break;
- ch=read_zsdata(c);
- stat=read_zsreg(c, R1);
-
- /*
- * Overrun ?
+ ch = read_zsdata(c);
+ stat = read_zsreg(c, R1);
+
+ /* Overrun ?
*/
- if(c->count < c->max)
- {
- *c->dptr++=ch;
+ if (c->count < c->max) {
+ *c->dptr++ = ch;
c->count++;
}
- if(stat&END_FR)
- {
-
- /*
- * Error ?
+ if (stat & END_FR) {
+ /* Error ?
*/
- if(stat&(Rx_OVR|CRC_ERR))
- {
+ if (stat & (Rx_OVR | CRC_ERR)) {
/* Rewind the buffer and return */
- if(c->skb)
- c->dptr=c->skb->data;
- c->count=0;
- if(stat&Rx_OVR)
- {
+ if (c->skb)
+ c->dptr = c->skb->data;
+ c->count = 0;
+ if (stat & Rx_OVR) {
pr_warn("%s: overrun\n", c->dev->name);
c->rx_overrun++;
}
- if(stat&CRC_ERR)
- {
+ if (stat & CRC_ERR) {
c->rx_crc_err++;
/* printk("crc error\n"); */
}
/* Shove the frame upstream */
- }
- else
- {
- /*
- * Drop the lock for RX processing, or
- * there are deadlocks
- */
+ } else {
+ /* Drop the lock for RX processing, or
+ * there are deadlocks
+ */
z8530_rx_done(c);
write_zsctrl(c, RES_Rx_CRC);
}
}
}
- /*
- * Clear irq
+ /* Clear irq
*/
write_zsctrl(c, ERR_RES);
write_zsctrl(c, RES_H_IUS);
}
-
/**
* z8530_tx - Handle a PIO transmit event
* @c: Z8530 channel to process
@@ -402,35 +368,31 @@ static void z8530_rx(struct z8530_channel *c)
* in as possible, its quite possible that we won't keep up with the
* data rate otherwise.
*/
-
+
static void z8530_tx(struct z8530_channel *c)
{
- while(c->txcount) {
+ while (c->txcount) {
/* FIFO full ? */
- if(!(read_zsreg(c, R0)&4))
+ if (!(read_zsreg(c, R0) & 4))
return;
c->txcount--;
- /*
- * Shovel out the byte
+ /* Shovel out the byte
*/
write_zsreg(c, R8, *c->tx_ptr++);
write_zsctrl(c, RES_H_IUS);
/* We are about to underflow */
- if(c->txcount==0)
- {
+ if (c->txcount == 0) {
write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+ write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
}
}
-
- /*
- * End of frame TX - fire another one
+ /* End of frame TX - fire another one
*/
-
+
write_zsctrl(c, RES_Tx_P);
- z8530_tx_done(c);
+ z8530_tx_done(c);
write_zsctrl(c, RES_H_IUS);
}
@@ -460,8 +422,7 @@ static void z8530_status(struct z8530_channel *chan)
z8530_tx_done(chan);
}
- if (altered & chan->dcdcheck)
- {
+ if (altered & chan->dcdcheck) {
if (status & chan->dcdcheck) {
pr_info("%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
@@ -474,7 +435,6 @@ static void z8530_status(struct z8530_channel *chan)
if (chan->netdevice)
netif_carrier_off(chan->netdevice);
}
-
}
write_zsctrl(chan, RES_EXT_INT);
write_zsctrl(chan, RES_H_IUS);
@@ -485,7 +445,6 @@ struct z8530_irqhandler z8530_sync = {
.tx = z8530_tx,
.status = z8530_status,
};
-
EXPORT_SYMBOL(z8530_sync);
/**
@@ -497,31 +456,27 @@ EXPORT_SYMBOL(z8530_sync);
* events are handled by the DMA hardware. We get a kick here only if
* a frame ended.
*/
-
+
static void z8530_dma_rx(struct z8530_channel *chan)
{
- if(chan->rxdma_on)
- {
+ if (chan->rxdma_on) {
/* Special condition check only */
u8 status;
-
+
read_zsreg(chan, R7);
read_zsreg(chan, R6);
-
- status=read_zsreg(chan, R1);
-
- if(status&END_FR)
- {
+
+ status = read_zsreg(chan, R1);
+
+ if (status & END_FR)
z8530_rx_done(chan); /* Fire up the next one */
- }
+
write_zsctrl(chan, ERR_RES);
write_zsctrl(chan, RES_H_IUS);
- }
- else
- {
+ } else {
/* DMA is off right now, drain the slow way */
z8530_rx(chan);
- }
+ }
}
/**
@@ -531,11 +486,9 @@ static void z8530_dma_rx(struct z8530_channel *chan)
* We have received an interrupt while doing DMA transmissions. It
* shouldn't happen. Scream loudly if it does.
*/
-
static void z8530_dma_tx(struct z8530_channel *chan)
{
- if(!chan->dma_tx)
- {
+ if (!chan->dma_tx) {
pr_warn("Hey who turned the DMA off?\n");
z8530_tx(chan);
return;
@@ -548,40 +501,35 @@ static void z8530_dma_tx(struct z8530_channel *chan)
/**
* z8530_dma_status - Handle a DMA status exception
* @chan: Z8530 channel to process
- *
+ *
* A status event occurred on the Z8530. We receive these for two reasons
* when in DMA mode. Firstly if we finished a packet transfer we get one
* and kick the next packet out. Secondly we may see a DCD change.
*
*/
-
static void z8530_dma_status(struct z8530_channel *chan)
{
u8 status, altered;
- status=read_zsreg(chan, R0);
- altered=chan->status^status;
-
- chan->status=status;
+ status = read_zsreg(chan, R0);
+ altered = chan->status ^ status;
+ chan->status = status;
- if(chan->dma_tx)
- {
- if(status&TxEOM)
- {
+ if (chan->dma_tx) {
+ if (status & TxEOM) {
unsigned long flags;
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(chan->txdma);
- clear_dma_ff(chan->txdma);
- chan->txdma_on=0;
+ clear_dma_ff(chan->txdma);
+ chan->txdma_on = 0;
release_dma_lock(flags);
z8530_tx_done(chan);
}
}
- if (altered & chan->dcdcheck)
- {
+ if (altered & chan->dcdcheck) {
if (status & chan->dcdcheck) {
pr_info("%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
@@ -621,21 +569,18 @@ static struct z8530_irqhandler z8530_txdma_sync = {
* (eg the MacII) we must clear the interrupt cause or die.
*/
-
static void z8530_rx_clear(struct z8530_channel *c)
{
- /*
- * Data and status bytes
+ /* Data and status bytes
*/
u8 stat;
read_zsdata(c);
- stat=read_zsreg(c, R1);
-
- if(stat&END_FR)
+ stat = read_zsreg(c, R1);
+
+ if (stat & END_FR)
write_zsctrl(c, RES_Rx_CRC);
- /*
- * Clear irq
+ /* Clear irq
*/
write_zsctrl(c, ERR_RES);
write_zsctrl(c, RES_H_IUS);
@@ -667,8 +612,9 @@ static void z8530_tx_clear(struct z8530_channel *c)
static void z8530_status_clear(struct z8530_channel *chan)
{
- u8 status=read_zsreg(chan, R0);
- if(status&TxEOM)
+ u8 status = read_zsreg(chan, R0);
+
+ if (status & TxEOM)
write_zsctrl(chan, ERR_RES);
write_zsctrl(chan, RES_EXT_INT);
write_zsctrl(chan, RES_H_IUS);
@@ -679,13 +625,11 @@ struct z8530_irqhandler z8530_nop = {
.tx = z8530_tx_clear,
.status = z8530_status_clear,
};
-
-
EXPORT_SYMBOL(z8530_nop);
/**
* z8530_interrupt - Handle an interrupt from a Z8530
- * @irq: Interrupt number
+ * @irq: Interrupt number
* @dev_id: The Z8530 device that is interrupting.
*
* A Z85[2]30 device has stuck its hand in the air for attention.
@@ -701,78 +645,73 @@ EXPORT_SYMBOL(z8530_nop);
irqreturn_t z8530_interrupt(int irq, void *dev_id)
{
- struct z8530_dev *dev=dev_id;
+ struct z8530_dev *dev = dev_id;
u8 intr;
static volatile int locker=0;
- int work=0;
+ int work = 0;
struct z8530_irqhandler *irqs;
-
- if(locker)
- {
+
+ if (locker) {
pr_err("IRQ re-enter\n");
return IRQ_NONE;
}
- locker=1;
+ locker = 1;
spin_lock(&dev->lock);
- while(++work<5000)
- {
-
+ while (++work < 5000) {
intr = read_zsreg(&dev->chanA, R3);
- if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
+ if (!(intr &
+ (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
break;
-
- /* This holds the IRQ status. On the 8530 you must read it from chan
- A even though it applies to the whole chip */
-
+
+ /* This holds the IRQ status. On the 8530 you must read it
+ * from chan A even though it applies to the whole chip
+ */
+
/* Now walk the chip and see what it is wanting - it may be
- an IRQ for someone else remember */
-
- irqs=dev->chanA.irqs;
+ * an IRQ for someone else remember
+ */
+
+ irqs = dev->chanA.irqs;
- if(intr & (CHARxIP|CHATxIP|CHAEXT))
- {
- if(intr&CHARxIP)
+ if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
+ if (intr & CHARxIP)
irqs->rx(&dev->chanA);
- if(intr&CHATxIP)
+ if (intr & CHATxIP)
irqs->tx(&dev->chanA);
- if(intr&CHAEXT)
+ if (intr & CHAEXT)
irqs->status(&dev->chanA);
}
- irqs=dev->chanB.irqs;
+ irqs = dev->chanB.irqs;
- if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
- {
- if(intr&CHBRxIP)
+ if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
+ if (intr & CHBRxIP)
irqs->rx(&dev->chanB);
- if(intr&CHBTxIP)
+ if (intr & CHBTxIP)
irqs->tx(&dev->chanB);
- if(intr&CHBEXT)
+ if (intr & CHBEXT)
irqs->status(&dev->chanB);
}
}
spin_unlock(&dev->lock);
- if(work==5000)
+ if (work == 5000)
pr_err("%s: interrupt jammed - abort(0x%X)!\n",
dev->name, intr);
/* Ok all done */
- locker=0;
+ locker = 0;
return IRQ_HANDLED;
}
-
EXPORT_SYMBOL(z8530_interrupt);
-static const u8 reg_init[16]=
-{
- 0,0,0,0,
- 0,0,0,0,
- 0,0,0,0,
- 0x55,0,0,0
+static const u8 reg_init[16] = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0x55, 0, 0, 0
};
-
/**
* z8530_sync_open - Open a Z8530 channel for PIO
* @dev: The network interface we are using
@@ -781,7 +720,6 @@ static const u8 reg_init[16]=
* Switch a Z8530 into synchronous mode without DMA assist. We
* raise the RTS/DTR and commence network operation.
*/
-
int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
{
unsigned long flags;
@@ -789,7 +727,7 @@ int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
spin_lock_irqsave(c->lock, flags);
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
@@ -798,17 +736,15 @@ int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
/* This loads the double buffer up */
z8530_rx_done(c); /* Load the frame ring */
z8530_rx_done(c); /* Load the backup frame */
- z8530_rtsdtr(c,1);
+ z8530_rtsdtr(c, 1);
c->dma_tx = 0;
- c->regs[R1]|=TxINT_ENAB;
+ c->regs[R1] |= TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
-
EXPORT_SYMBOL(z8530_sync_open);
/**
@@ -819,25 +755,23 @@ EXPORT_SYMBOL(z8530_sync_open);
* Close down a Z8530 interface and switch its interrupt handlers
* to discard future events.
*/
-
int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
{
u8 chk;
unsigned long flags;
-
+
spin_lock_irqsave(c->lock, flags);
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- chk=read_zsreg(c,R0);
+
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_close);
/**
@@ -849,91 +783,83 @@ EXPORT_SYMBOL(z8530_sync_close);
* ISA DMA channels must be available for this to work. We assume ISA
* DMA driven I/O and PC limits on access.
*/
-
int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
{
unsigned long cflags, dflags;
-
+
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
- /*
- * Load the DMA interfaces up
+
+ /* Load the DMA interfaces up
*/
c->rxdma_on = 0;
c->txdma_on = 0;
-
- /*
- * Allocate the DMA flip buffers. Limit by page size.
+
+ /* Allocate the DMA flip buffers. Limit by page size.
* Everyone runs 1500 mtu or less on wan links so this
* should be fine.
*/
-
- if(c->mtu > PAGE_SIZE/2)
+
+ if (c->mtu > PAGE_SIZE / 2)
return -EMSGSIZE;
-
- c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->rx_buf[0]==NULL)
+
+ c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->rx_buf[0])
return -ENOBUFS;
- c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
-
- c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->tx_dma_buf[0]==NULL)
- {
+ c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
+
+ c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->tx_dma_buf[0]) {
free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0]=NULL;
+ c->rx_buf[0] = NULL;
return -ENOBUFS;
}
- c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
+ c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
- c->tx_dma_used=0;
+ c->tx_dma_used = 0;
c->dma_tx = 1;
- c->dma_num=0;
- c->dma_ready=1;
-
- /*
- * Enable DMA control mode
+ c->dma_num = 0;
+ c->dma_ready = 1;
+
+ /* Enable DMA control mode
*/
spin_lock_irqsave(c->lock, cflags);
-
- /*
- * TX DMA via DIR/REQ
+
+ /* TX DMA via DIR/REQ
+ */
+
+ c->regs[R14] |= DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ c->regs[R1] &= ~TxINT_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+
+ /* RX DMA via W/Req
*/
-
- c->regs[R14]|= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
- c->regs[R1]&= ~TxINT_ENAB;
+ c->regs[R1] |= WT_FN_RDYFN;
+ c->regs[R1] |= WT_RDY_RT;
+ c->regs[R1] |= INT_ERR_Rx;
+ c->regs[R1] &= ~TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * RX DMA via W/Req
- */
-
- c->regs[R1]|= WT_FN_RDYFN;
- c->regs[R1]|= WT_RDY_RT;
- c->regs[R1]|= INT_ERR_Rx;
- c->regs[R1]&= ~TxINT_ENAB;
+ c->regs[R1] |= WT_RDY_ENAB;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]|= WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * DMA interrupts
+
+ /* DMA interrupts
+ */
+
+ /* Set up the DMA configuration
*/
-
- /*
- * Set up the DMA configuration
- */
-
- dflags=claim_dma_lock();
-
+
+ dflags = claim_dma_lock();
+
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
- set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+ set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
set_dma_count(c->rxdma, c->mtu);
enable_dma(c->rxdma);
@@ -942,26 +868,24 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
clear_dma_ff(c->txdma);
set_dma_mode(c->txdma, DMA_MODE_WRITE);
disable_dma(c->txdma);
-
+
release_dma_lock(dflags);
-
- /*
- * Select the DMA interrupt handlers
+
+ /* Select the DMA interrupt handlers
*/
c->rxdma_on = 1;
c->txdma_on = 1;
c->tx_dma_used = 1;
-
+
c->irqs = &z8530_dma_sync;
- z8530_rtsdtr(c,1);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ z8530_rtsdtr(c, 1);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, cflags);
-
+
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_dma_open);
/**
@@ -972,66 +896,60 @@ EXPORT_SYMBOL(z8530_sync_dma_open);
* Shut down a DMA mode synchronous interface. Halt the DMA, and
* free the buffers.
*/
-
int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
{
u8 chk;
unsigned long flags;
-
+
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- /*
- * Disable the PC DMA channels
+
+ /* Disable the PC DMA channels
*/
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
-
+
c->rxdma_on = 0;
-
+
disable_dma(c->txdma);
clear_dma_ff(c->txdma);
release_dma_lock(flags);
-
+
c->txdma_on = 0;
c->tx_dma_used = 0;
spin_lock_irqsave(c->lock, flags);
- /*
- * Disable DMA control mode
+ /* Disable DMA control mode
*/
-
- c->regs[R1]&= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
- c->regs[R1]|= INT_ALL_Rx;
+
+ c->regs[R1] &= ~WT_RDY_ENAB;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14]&= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if(c->rx_buf[0])
- {
+ c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
+ c->regs[R1] |= INT_ALL_Rx;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R14] &= ~DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ if (c->rx_buf[0]) {
free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0]=NULL;
+ c->rx_buf[0] = NULL;
}
- if(c->tx_dma_buf[0])
- {
+ if (c->tx_dma_buf[0]) {
free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0]=NULL;
+ c->tx_dma_buf[0] = NULL;
}
- chk=read_zsreg(c,R0);
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_dma_close);
/**
@@ -1050,65 +968,58 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
printk("Opening sync interface for TX-DMA\n");
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
-
- /*
- * Allocate the DMA flip buffers. Limit by page size.
+
+ /* Allocate the DMA flip buffers. Limit by page size.
* Everyone runs 1500 mtu or less on wan links so this
* should be fine.
*/
-
- if(c->mtu > PAGE_SIZE/2)
+
+ if (c->mtu > PAGE_SIZE / 2)
return -EMSGSIZE;
-
- c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->tx_dma_buf[0]==NULL)
- return -ENOBUFS;
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
+ c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->tx_dma_buf[0])
+ return -ENOBUFS;
+ c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
spin_lock_irqsave(c->lock, cflags);
- /*
- * Load the PIO receive ring
+ /* Load the PIO receive ring
*/
z8530_rx_done(c);
z8530_rx_done(c);
- /*
- * Load the DMA interfaces up
+ /* Load the DMA interfaces up
*/
c->rxdma_on = 0;
c->txdma_on = 0;
-
- c->tx_dma_used=0;
- c->dma_num=0;
- c->dma_ready=1;
+
+ c->tx_dma_used = 0;
+ c->dma_num = 0;
+ c->dma_ready = 1;
c->dma_tx = 1;
- /*
- * Enable DMA control mode
+ /* Enable DMA control mode
*/
- /*
- * TX DMA via DIR/REQ
- */
- c->regs[R14]|= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1]&= ~TxINT_ENAB;
+ /* TX DMA via DIR/REQ
+ */
+ c->regs[R14] |= DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ c->regs[R1] &= ~TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * Set up the DMA configuration
- */
-
+
+ /* Set up the DMA configuration
+ */
+
dflags = claim_dma_lock();
disable_dma(c->txdma);
@@ -1117,23 +1028,21 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
disable_dma(c->txdma);
release_dma_lock(dflags);
-
- /*
- * Select the DMA interrupt handlers
+
+ /* Select the DMA interrupt handlers
*/
c->rxdma_on = 0;
c->txdma_on = 1;
c->tx_dma_used = 1;
-
+
c->irqs = &z8530_txdma_sync;
- z8530_rtsdtr(c,1);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ z8530_rtsdtr(c, 1);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, cflags);
-
+
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_txdma_open);
/**
@@ -1141,7 +1050,7 @@ EXPORT_SYMBOL(z8530_sync_txdma_open);
* @dev: Network device to detach
* @c: Z8530 channel to move into discard mode
*
- * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
+ * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
* and free the buffers.
*/
@@ -1150,17 +1059,15 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
unsigned long dflags, cflags;
u8 chk;
-
spin_lock_irqsave(c->lock, cflags);
-
+
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- /*
- * Disable the PC DMA channels
+
+ /* Disable the PC DMA channels
*/
-
+
dflags = claim_dma_lock();
disable_dma(c->txdma);
@@ -1170,41 +1077,34 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
release_dma_lock(dflags);
- /*
- * Disable DMA control mode
+ /* Disable DMA control mode
*/
-
- c->regs[R1]&= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
- c->regs[R1]|= INT_ALL_Rx;
+
+ c->regs[R1] &= ~WT_RDY_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
+ c->regs[R1] |= INT_ALL_Rx;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14]&= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if(c->tx_dma_buf[0])
- {
+ c->regs[R14] &= ~DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ if (c->tx_dma_buf[0]) {
free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0]=NULL;
+ c->tx_dma_buf[0] = NULL;
}
- chk=read_zsreg(c,R0);
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, cflags);
return 0;
}
-
-
EXPORT_SYMBOL(z8530_sync_txdma_close);
-
-/*
- * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
+/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
* it exists...
*/
-
-static const char *z8530_type_name[]={
+static const char * const z8530_type_name[] = {
"Z8530",
"Z85C30",
"Z85230"
@@ -1224,78 +1124,71 @@ static const char *z8530_type_name[]={
void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
{
pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
- dev->name,
+ dev->name,
z8530_type_name[dev->type],
mapping,
Z8530_PORT_OF(io),
dev->irq);
}
-
EXPORT_SYMBOL(z8530_describe);
-/*
- * Locked operation part of the z8530 init code
+/* Locked operation part of the z8530 init code
*/
-
static inline int do_z8530_init(struct z8530_dev *dev)
{
/* NOP the interrupt handlers first - we might get a
- floating IRQ transition when we reset the chip */
- dev->chanA.irqs=&z8530_nop;
- dev->chanB.irqs=&z8530_nop;
- dev->chanA.dcdcheck=DCD;
- dev->chanB.dcdcheck=DCD;
+ * floating IRQ transition when we reset the chip
+ */
+ dev->chanA.irqs = &z8530_nop;
+ dev->chanB.irqs = &z8530_nop;
+ dev->chanA.dcdcheck = DCD;
+ dev->chanB.dcdcheck = DCD;
/* Reset the chip */
write_zsreg(&dev->chanA, R9, 0xC0);
udelay(200);
/* Now check its valid */
write_zsreg(&dev->chanA, R12, 0xAA);
- if(read_zsreg(&dev->chanA, R12)!=0xAA)
+ if (read_zsreg(&dev->chanA, R12) != 0xAA)
return -ENODEV;
write_zsreg(&dev->chanA, R12, 0x55);
- if(read_zsreg(&dev->chanA, R12)!=0x55)
+ if (read_zsreg(&dev->chanA, R12) != 0x55)
return -ENODEV;
-
- dev->type=Z8530;
-
- /*
- * See the application note.
+
+ dev->type = Z8530;
+
+ /* See the application note.
*/
-
+
write_zsreg(&dev->chanA, R15, 0x01);
-
- /*
- * If we can set the low bit of R15 then
+
+ /* If we can set the low bit of R15 then
* the chip is enhanced.
*/
-
- if(read_zsreg(&dev->chanA, R15)==0x01)
- {
+
+ if (read_zsreg(&dev->chanA, R15) == 0x01) {
/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
/* Put a char in the fifo */
write_zsreg(&dev->chanA, R8, 0);
- if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
+ if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
dev->type = Z85230; /* Has a FIFO */
else
dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
}
-
- /*
- * The code assumes R7' and friends are
+
+ /* The code assumes R7' and friends are
* off. Use write_zsext() for these and keep
* this bit clear.
*/
-
+
write_zsreg(&dev->chanA, R15, 0);
-
- /*
- * At this point it looks like the chip is behaving
+
+ /* At this point it looks like the chip is behaving
*/
-
+
memcpy(dev->chanA.regs, reg_init, 16);
- memcpy(dev->chanB.regs, reg_init ,16);
-
+ memcpy(dev->chanB.regs, reg_init, 16);
+
return 0;
}
@@ -1332,36 +1225,32 @@ int z8530_init(struct z8530_dev *dev)
return ret;
}
-
-
EXPORT_SYMBOL(z8530_init);
/**
* z8530_shutdown - Shutdown a Z8530 device
* @dev: The Z8530 chip to shutdown
*
- * We set the interrupt handlers to silence any interrupts. We then
+ * We set the interrupt handlers to silence any interrupts. We then
* reset the chip and wait 100uS to be sure the reset completed. Just
* in case the caller then tries to do stuff.
*
* This is called without the lock held
*/
-
int z8530_shutdown(struct z8530_dev *dev)
{
unsigned long flags;
/* Reset the chip */
spin_lock_irqsave(&dev->lock, flags);
- dev->chanA.irqs=&z8530_nop;
- dev->chanB.irqs=&z8530_nop;
+ dev->chanA.irqs = &z8530_nop;
+ dev->chanB.irqs = &z8530_nop;
write_zsreg(&dev->chanA, R9, 0xC0);
/* We must lock the udelay, the chip is offlimits here */
udelay(100);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_shutdown);
/**
@@ -1370,7 +1259,7 @@ EXPORT_SYMBOL(z8530_shutdown);
* @rtable: table of register, value pairs
* FIXME: ioctl to allow user uploaded tables
*
- * Load a Z8530 channel up from the system data. We use +16 to
+ * Load a Z8530 channel up from the system data. We use +16 to
* indicate the "prime" registers. The value 255 terminates the
* table.
*/
@@ -1381,41 +1270,39 @@ int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
spin_lock_irqsave(c->lock, flags);
- while(*rtable!=255)
- {
- int reg=*rtable++;
- if(reg>0x0F)
- write_zsreg(c, R15, c->regs[15]|1);
- write_zsreg(c, reg&0x0F, *rtable);
- if(reg>0x0F)
- write_zsreg(c, R15, c->regs[15]&~1);
- c->regs[reg]=*rtable++;
+ while (*rtable != 255) {
+ int reg = *rtable++;
+
+ if (reg > 0x0F)
+ write_zsreg(c, R15, c->regs[15] | 1);
+ write_zsreg(c, reg & 0x0F, *rtable);
+ if (reg > 0x0F)
+ write_zsreg(c, R15, c->regs[15] & ~1);
+ c->regs[reg] = *rtable++;
}
- c->rx_function=z8530_null_rx;
- c->skb=NULL;
- c->tx_skb=NULL;
- c->tx_next_skb=NULL;
- c->mtu=1500;
- c->max=0;
- c->count=0;
- c->status=read_zsreg(c, R0);
- c->sync=1;
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ c->rx_function = z8530_null_rx;
+ c->skb = NULL;
+ c->tx_skb = NULL;
+ c->tx_next_skb = NULL;
+ c->mtu = 1500;
+ c->max = 0;
+ c->count = 0;
+ c->status = read_zsreg(c, R0);
+ c->sync = 1;
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_channel_load);
-
/**
* z8530_tx_begin - Begin packet transmission
* @c: The Z8530 channel to kick
*
* This is the speed sensitive side of transmission. If we are called
* and no buffer is being transmitted we commence the next buffer. If
- * nothing is queued we idle the sync.
+ * nothing is queued we idle the sync.
*
* Note: We are handling this code path in the interrupt path, keep it
* fast or bad things will happen.
@@ -1426,85 +1313,68 @@ EXPORT_SYMBOL(z8530_channel_load);
static void z8530_tx_begin(struct z8530_channel *c)
{
unsigned long flags;
- if(c->tx_skb)
+
+ if (c->tx_skb)
return;
-
- c->tx_skb=c->tx_next_skb;
- c->tx_next_skb=NULL;
- c->tx_ptr=c->tx_next_ptr;
-
- if(c->tx_skb==NULL)
- {
+
+ c->tx_skb = c->tx_next_skb;
+ c->tx_next_skb = NULL;
+ c->tx_ptr = c->tx_next_ptr;
+
+ if (!c->tx_skb) {
/* Idle on */
- if(c->dma_tx)
- {
- flags=claim_dma_lock();
+ if (c->dma_tx) {
+ flags = claim_dma_lock();
disable_dma(c->txdma);
- /*
- * Check if we crapped out.
+ /* Check if we crapped out.
*/
- if (get_dma_residue(c->txdma))
- {
+ if (get_dma_residue(c->txdma)) {
c->netdevice->stats.tx_dropped++;
c->netdevice->stats.tx_fifo_errors++;
}
release_dma_lock(flags);
}
- c->txcount=0;
- }
- else
- {
- c->txcount=c->tx_skb->len;
-
-
- if(c->dma_tx)
- {
- /*
- * FIXME. DMA is broken for the original 8530,
+ c->txcount = 0;
+ } else {
+ c->txcount = c->tx_skb->len;
+
+ if (c->dma_tx) {
+ /* FIXME. DMA is broken for the original 8530,
* on the older parts we need to set a flag and
* wait for a further TX interrupt to fire this
- * stage off
+ * stage off
*/
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(c->txdma);
- /*
- * These two are needed by the 8530/85C30
+ /* These two are needed by the 8530/85C30
* and must be issued when idling.
*/
-
- if(c->dev->type!=Z85230)
- {
+ if (c->dev->type != Z85230) {
write_zsctrl(c, RES_Tx_CRC);
write_zsctrl(c, RES_EOM_L);
- }
- write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+ }
+ write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
clear_dma_ff(c->txdma);
set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
set_dma_count(c->txdma, c->txcount);
enable_dma(c->txdma);
release_dma_lock(flags);
write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R5, c->regs[R5]|TxENAB);
- }
- else
- {
-
+ write_zsreg(c, R5, c->regs[R5] | TxENAB);
+ } else {
/* ABUNDER off */
write_zsreg(c, R10, c->regs[10]);
write_zsctrl(c, RES_Tx_CRC);
-
- while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
- {
+
+ while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
write_zsreg(c, R8, *c->tx_ptr++);
c->txcount--;
}
-
}
}
- /*
- * Since we emptied tx_skb we can ask for more
+ /* Since we emptied tx_skb we can ask for more
*/
netif_wake_queue(c->netdevice);
}
@@ -1525,7 +1395,7 @@ static void z8530_tx_done(struct z8530_channel *c)
struct sk_buff *skb;
/* Actually this can happen.*/
- if (c->tx_skb == NULL)
+ if (!c->tx_skb)
return;
skb = c->tx_skb;
@@ -1544,12 +1414,10 @@ static void z8530_tx_done(struct z8530_channel *c)
* We point the receive handler at this function when idle. Instead
* of processing the frames we get to throw them away.
*/
-
void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
-
EXPORT_SYMBOL(z8530_null_rx);
/**
@@ -1564,67 +1432,58 @@ EXPORT_SYMBOL(z8530_null_rx);
*
* Called with the lock held
*/
-
static void z8530_rx_done(struct z8530_channel *c)
{
struct sk_buff *skb;
int ct;
-
- /*
- * Is our receive engine in DMA mode
+
+ /* Is our receive engine in DMA mode
*/
-
- if(c->rxdma_on)
- {
- /*
- * Save the ready state and the buffer currently
+ if (c->rxdma_on) {
+ /* Save the ready state and the buffer currently
* being used as the DMA target
*/
-
- int ready=c->dma_ready;
- unsigned char *rxb=c->rx_buf[c->dma_num];
+ int ready = c->dma_ready;
+ unsigned char *rxb = c->rx_buf[c->dma_num];
unsigned long flags;
-
- /*
- * Complete this DMA. Necessary to find the length
- */
-
- flags=claim_dma_lock();
-
+
+ /* Complete this DMA. Necessary to find the length
+ */
+ flags = claim_dma_lock();
+
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
- c->rxdma_on=0;
- ct=c->mtu-get_dma_residue(c->rxdma);
- if(ct<0)
- ct=2; /* Shit happens.. */
- c->dma_ready=0;
-
- /*
- * Normal case: the other slot is free, start the next DMA
+ c->rxdma_on = 0;
+ ct = c->mtu - get_dma_residue(c->rxdma);
+ if (ct < 0)
+ ct = 2; /* Shit happens.. */
+ c->dma_ready = 0;
+
+ /* Normal case: the other slot is free, start the next DMA
* into it immediately.
*/
-
- if(ready)
- {
- c->dma_num^=1;
- set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+
+ if (ready) {
+ c->dma_num ^= 1;
+ set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
set_dma_count(c->rxdma, c->mtu);
c->rxdma_on = 1;
enable_dma(c->rxdma);
- /* Stop any frames that we missed the head of
- from passing */
+ /* Stop any frames that we missed the head of
+ * from passing
+ */
write_zsreg(c, R0, RES_Rx_CRC);
- }
- else
+ } else {
/* Can't occur as we dont reenable the DMA irq until
- after the flip is done */
+ * after the flip is done
+ */
netdev_warn(c->netdevice, "DMA flip overrun!\n");
+ }
release_dma_lock(flags);
- /*
- * Shove the old buffer into an sk_buff. We can't DMA
+ /* Shove the old buffer into an sk_buff. We can't DMA
* directly into one on a PC - it might be above the 16Mb
* boundary. Optimisation - we could check to see if we
* can avoid the copy. Optimisation 2 - make the memcpy
@@ -1632,7 +1491,7 @@ static void z8530_rx_done(struct z8530_channel *c)
*/
skb = dev_alloc_skb(ct);
- if (skb == NULL) {
+ if (!skb) {
c->netdevice->stats.rx_dropped++;
netdev_warn(c->netdevice, "Memory squeeze\n");
} else {
@@ -1646,8 +1505,7 @@ static void z8530_rx_done(struct z8530_channel *c)
RT_LOCK;
skb = c->skb;
- /*
- * The game we play for non DMA is similar. We want to
+ /* The game we play for non DMA is similar. We want to
* get the controller set up for the next packet as fast
* as possible. We potentially only have one byte + the
* fifo length for this. Thus we want to flip to the new
@@ -1658,7 +1516,7 @@ static void z8530_rx_done(struct z8530_channel *c)
* sync IRQ for the RT_LOCK area.
*
*/
- ct=c->count;
+ ct = c->count;
c->skb = c->skb2;
c->count = 0;
@@ -1673,15 +1531,13 @@ static void z8530_rx_done(struct z8530_channel *c)
RT_UNLOCK;
c->skb2 = dev_alloc_skb(c->mtu);
- if (c->skb2 == NULL)
- netdev_warn(c->netdevice, "memory squeeze\n");
- else
+ if (c->skb2)
skb_put(c->skb2, c->mtu);
+
c->netdevice->stats.rx_packets++;
c->netdevice->stats.rx_bytes += ct;
}
- /*
- * If we received a frame we must now process it.
+ /* If we received a frame we must now process it.
*/
if (skb) {
skb_trim(skb, ct);
@@ -1702,9 +1558,10 @@ static void z8530_rx_done(struct z8530_channel *c)
static inline int spans_boundary(struct sk_buff *skb)
{
- unsigned long a=(unsigned long)skb->data;
- a^=(a+skb->len);
- if(a&0x00010000) /* If the 64K bit is different.. */
+ unsigned long a = (unsigned long)skb->data;
+
+ a ^= (a + skb->len);
+ if (a & 0x00010000) /* If the 64K bit is different.. */
return 1;
return 0;
}
@@ -1715,60 +1572,54 @@ static inline int spans_boundary(struct sk_buff *skb)
* @skb: The packet to kick down the channel
*
* Queue a packet for transmission. Because we have rather
- * hard to hit interrupt latencies for the Z85230 per packet
+ * hard to hit interrupt latencies for the Z85230 per packet
* even in DMA mode we do the flip to DMA buffer if needed here
* not in the IRQ.
*
- * Called from the network code. The lock is not held at this
+ * Called from the network code. The lock is not held at this
* point.
*/
-
netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
{
unsigned long flags;
-
+
netif_stop_queue(c->netdevice);
- if(c->tx_next_skb)
+ if (c->tx_next_skb)
return NETDEV_TX_BUSY;
-
/* PC SPECIFIC - DMA limits */
-
- /*
- * If we will DMA the transmit and its gone over the ISA bus
+ /* If we will DMA the transmit and its gone over the ISA bus
* limit, then copy to the flip buffer
*/
-
- if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
- {
- /*
- * Send the flip buffer, and flip the flippy bit.
+
+ if (c->dma_tx &&
+ ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
+ 16 * 1024 * 1024 || spans_boundary(skb))) {
+ /* Send the flip buffer, and flip the flippy bit.
* We don't care which is used when just so long as
* we never use the same buffer twice in a row. Since
* only one buffer can be going out at a time the other
* has to be safe.
*/
- c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
- c->tx_dma_used^=1; /* Flip temp buffer */
+ c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
+ c->tx_dma_used ^= 1; /* Flip temp buffer */
skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
+ } else {
+ c->tx_next_ptr = skb->data;
}
- else
- c->tx_next_ptr=skb->data;
RT_LOCK;
- c->tx_next_skb=skb;
+ c->tx_next_skb = skb;
RT_UNLOCK;
-
+
spin_lock_irqsave(c->lock, flags);
z8530_tx_begin(c);
spin_unlock_irqrestore(c->lock, flags);
-
+
return NETDEV_TX_OK;
}
-
EXPORT_SYMBOL(z8530_queue_xmit);
-/*
- * Module support
+/* Module support
*/
static const char banner[] __initconst =
KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
index fc52b2cb500b..dbe1f8514efc 100644
--- a/drivers/net/wireguard/Makefile
+++ b/drivers/net/wireguard/Makefile
@@ -1,5 +1,4 @@
-ccflags-y := -O3
-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
wireguard-y := main.o
wireguard-y += noise.o
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 3725e9cd85f4..b7197e80f226 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -6,6 +6,8 @@
#include "allowedips.h"
#include "peer.h"
+static struct kmem_cache *node_cache;
+
static void swap_endian(u8 *dst, const u8 *src, u8 bits)
{
if (bits == 32) {
@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
node->bitlen = bits;
memcpy(node->bits, src, bits / 8U);
}
-#define CHOOSE_NODE(parent, key) \
- parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static inline u8 choose(struct allowedips_node *node, const u8 *key)
+{
+ return (key[node->bit_at_a] >> node->bit_at_b) & 1;
+}
static void push_rcu(struct allowedips_node **stack,
struct allowedips_node __rcu *p, unsigned int *len)
@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
}
}
+static void node_free_rcu(struct rcu_head *rcu)
+{
+ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
+}
+
static void root_free_rcu(struct rcu_head *rcu)
{
struct allowedips_node *node, *stack[128] = {
@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
while (len > 0 && (node = stack[--len])) {
push_rcu(stack, node->bit[0], &len);
push_rcu(stack, node->bit[1], &len);
- kfree(node);
+ kmem_cache_free(node_cache, node);
}
}
@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
}
}
-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
- struct wg_peer *peer, struct mutex *lock)
-{
-#define REF(p) rcu_access_pointer(p)
-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
-#define PUSH(p) ({ \
- WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \
- stack[len++] = p; \
- })
-
- struct allowedips_node __rcu **stack[128], **nptr;
- struct allowedips_node *node, *prev;
- unsigned int len;
-
- if (unlikely(!peer || !REF(*top)))
- return;
-
- for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
- nptr = stack[len - 1];
- node = DEREF(nptr);
- if (!node) {
- --len;
- continue;
- }
- if (!prev || REF(prev->bit[0]) == node ||
- REF(prev->bit[1]) == node) {
- if (REF(node->bit[0]))
- PUSH(&node->bit[0]);
- else if (REF(node->bit[1]))
- PUSH(&node->bit[1]);
- } else if (REF(node->bit[0]) == prev) {
- if (REF(node->bit[1]))
- PUSH(&node->bit[1]);
- } else {
- if (rcu_dereference_protected(node->peer,
- lockdep_is_held(lock)) == peer) {
- RCU_INIT_POINTER(node->peer, NULL);
- list_del_init(&node->peer_list);
- if (!node->bit[0] || !node->bit[1]) {
- rcu_assign_pointer(*nptr, DEREF(
- &node->bit[!REF(node->bit[0])]));
- kfree_rcu(node, rcu);
- node = DEREF(nptr);
- }
- }
- --len;
- }
- }
-
-#undef REF
-#undef DEREF
-#undef PUSH
-}
-
static unsigned int fls128(u64 a, u64 b)
{
return a ? fls64(a) + 64U : fls64(b);
@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
found = node;
if (node->cidr == bits)
break;
- node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+ node = rcu_dereference_bh(node->bit[choose(node, key)]);
}
return found;
}
@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
u8 cidr, u8 bits, struct allowedips_node **rnode,
struct mutex *lock)
{
- struct allowedips_node *node = rcu_dereference_protected(trie,
- lockdep_is_held(lock));
+ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
struct allowedips_node *parent = NULL;
bool exact = false;
@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
exact = true;
break;
}
- node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
- lockdep_is_held(lock));
+ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
}
*rnode = parent;
return exact;
}
+static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+{
+ node->parent_bit_packed = (unsigned long)parent | bit;
+ rcu_assign_pointer(*parent, node);
+}
+
+static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
+{
+ u8 bit = choose(parent, node->bits);
+ connect_node(&parent->bit[bit], bit, node);
+}
+
static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
u8 cidr, struct wg_peer *peer, struct mutex *lock)
{
@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return -EINVAL;
if (!rcu_access_pointer(*trie)) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
RCU_INIT_POINTER(node->peer, peer);
list_add_tail(&node->peer_list, &peer->allowedips_list);
copy_and_assign_cidr(node, key, cidr, bits);
- rcu_assign_pointer(*trie, node);
+ connect_node(trie, 2, node);
return 0;
}
if (node_placement(*trie, key, cidr, bits, &node, lock)) {
@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return 0;
}
- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!newnode))
return -ENOMEM;
RCU_INIT_POINTER(newnode->peer, peer);
@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
if (!node) {
down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
} else {
- down = rcu_dereference_protected(CHOOSE_NODE(node, key),
- lockdep_is_held(lock));
+ const u8 bit = choose(node, key);
+ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
if (!down) {
- rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+ connect_node(&node->bit[bit], bit, newnode);
return 0;
}
}
@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
parent = node;
if (newnode->cidr == cidr) {
- rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+ choose_and_connect_node(newnode, down);
if (!parent)
- rcu_assign_pointer(*trie, newnode);
+ connect_node(trie, 2, newnode);
else
- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
- newnode);
- } else {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node)) {
- list_del(&newnode->peer_list);
- kfree(newnode);
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&node->peer_list);
- copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+ choose_and_connect_node(parent, newnode);
+ return 0;
+ }
- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
- if (!parent)
- rcu_assign_pointer(*trie, node);
- else
- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
- node);
+ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+ if (unlikely(!node)) {
+ list_del(&newnode->peer_list);
+ kmem_cache_free(node_cache, newnode);
+ return -ENOMEM;
}
+ INIT_LIST_HEAD(&node->peer_list);
+ copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+ choose_and_connect_node(node, down);
+ choose_and_connect_node(node, newnode);
+ if (!parent)
+ connect_node(trie, 2, node);
+ else
+ choose_and_connect_node(parent, node);
return 0;
}
@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock)
{
+ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
+ bool free_parent;
+
+ if (list_empty(&peer->allowedips_list))
+ return;
++table->seq;
- walk_remove_by_peer(&table->root4, peer, lock);
- walk_remove_by_peer(&table->root6, peer, lock);
+ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
+ list_del_init(&node->peer_list);
+ RCU_INIT_POINTER(node->peer, NULL);
+ if (node->bit[0] && node->bit[1])
+ continue;
+ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+ lockdep_is_held(lock));
+ if (child)
+ child->parent_bit_packed = node->parent_bit_packed;
+ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+ *parent_bit = child;
+ parent = (void *)parent_bit -
+ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+ free_parent = !rcu_access_pointer(node->bit[0]) &&
+ !rcu_access_pointer(node->bit[1]) &&
+ (node->parent_bit_packed & 3) <= 1 &&
+ !rcu_access_pointer(parent->peer);
+ if (free_parent)
+ child = rcu_dereference_protected(
+ parent->bit[!(node->parent_bit_packed & 1)],
+ lockdep_is_held(lock));
+ call_rcu(&node->rcu, node_free_rcu);
+ if (!free_parent)
+ continue;
+ if (child)
+ child->parent_bit_packed = parent->parent_bit_packed;
+ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+ call_rcu(&parent->rcu, node_free_rcu);
+ }
}
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
return NULL;
}
+int __init wg_allowedips_slab_init(void)
+{
+ node_cache = KMEM_CACHE(allowedips_node, 0);
+ return node_cache ? 0 : -ENOMEM;
+}
+
+void wg_allowedips_slab_uninit(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(node_cache);
+}
+
#include "selftest/allowedips.c"
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
index e5c83cafcef4..2346c797eb4d 100644
--- a/drivers/net/wireguard/allowedips.h
+++ b/drivers/net/wireguard/allowedips.h
@@ -15,14 +15,11 @@ struct wg_peer;
struct allowedips_node {
struct wg_peer __rcu *peer;
struct allowedips_node __rcu *bit[2];
- /* While it may seem scandalous that we waste space for v4,
- * we're alloc'ing to the nearest power of 2 anyway, so this
- * doesn't actually make a difference.
- */
- u8 bits[16] __aligned(__alignof(u64));
u8 cidr, bit_at_a, bit_at_b, bitlen;
+ u8 bits[16] __aligned(__alignof(u64));
- /* Keep rarely used list at bottom to be beyond cache line. */
+ /* Keep rarely used members at bottom to be beyond cache line. */
+ unsigned long parent_bit_packed;
union {
struct list_head peer_list;
struct rcu_head rcu;
@@ -33,7 +30,7 @@ struct allowedips {
struct allowedips_node __rcu *root4;
struct allowedips_node __rcu *root6;
u64 seq;
-};
+} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
void wg_allowedips_init(struct allowedips *table);
void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
bool wg_allowedips_selftest(void);
#endif
+int wg_allowedips_slab_init(void);
+void wg_allowedips_slab_uninit(void);
+
#endif /* _WG_ALLOWEDIPS_H */
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index 7a7d5f1a80fc..75dbe77b0b4b 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -21,13 +21,22 @@ static int __init mod_init(void)
{
int ret;
+ ret = wg_allowedips_slab_init();
+ if (ret < 0)
+ goto err_allowedips;
+
#ifdef DEBUG
+ ret = -ENOTRECOVERABLE;
if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
!wg_ratelimiter_selftest())
- return -ENOTRECOVERABLE;
+ goto err_peer;
#endif
wg_noise_init();
+ ret = wg_peer_init();
+ if (ret < 0)
+ goto err_peer;
+
ret = wg_device_init();
if (ret < 0)
goto err_device;
@@ -44,6 +53,10 @@ static int __init mod_init(void)
err_netlink:
wg_device_uninit();
err_device:
+ wg_peer_uninit();
+err_peer:
+ wg_allowedips_slab_uninit();
+err_allowedips:
return ret;
}
@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
+ wg_peer_uninit();
+ wg_allowedips_slab_uninit();
}
module_init(mod_init);
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
index cd5cb0292cb6..1acd00ab2fbc 100644
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -15,6 +15,7 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
+static struct kmem_cache *peer_cache;
static atomic64_t peer_counter = ATOMIC64_INIT(0);
struct wg_peer *wg_peer_create(struct wg_device *wg,
@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
return ERR_PTR(ret);
- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
if (unlikely(!peer))
return ERR_PTR(ret);
- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
goto err;
peer->device = wg;
@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
return peer;
err:
- kfree(peer);
+ kmem_cache_free(peer_cache, peer);
return ERR_PTR(ret);
}
@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
/* Mark as dead, so that we don't allow jumping contexts after. */
WRITE_ONCE(peer->is_dead, true);
- /* The caller must now synchronize_rcu() for this to take effect. */
+ /* The caller must now synchronize_net() for this to take effect. */
}
static void peer_remove_after_dead(struct wg_peer *peer)
@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
lockdep_assert_held(&peer->device->device_update_lock);
peer_make_dead(peer);
- synchronize_rcu();
+ synchronize_net();
peer_remove_after_dead(peer);
}
@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
peer_make_dead(peer);
list_add_tail(&peer->peer_list, &dead_peers);
}
- synchronize_rcu();
+ synchronize_net();
list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
peer_remove_after_dead(peer);
}
@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
/* The final zeroing takes care of clearing any remaining handshake key
* material and other potentially sensitive information.
*/
- kfree_sensitive(peer);
+ memzero_explicit(peer, sizeof(*peer));
+ kmem_cache_free(peer_cache, peer);
}
static void kref_release(struct kref *refcount)
@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
return;
kref_put(&peer->refcount, kref_release);
}
+
+int __init wg_peer_init(void)
+{
+ peer_cache = KMEM_CACHE(wg_peer, 0);
+ return peer_cache ? 0 : -ENOMEM;
+}
+
+void wg_peer_uninit(void)
+{
+ kmem_cache_destroy(peer_cache);
+}
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
index 8d53b687a1d1..76e4d3128ad4 100644
--- a/drivers/net/wireguard/peer.h
+++ b/drivers/net/wireguard/peer.h
@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
void wg_peer_remove(struct wg_peer *peer);
void wg_peer_remove_all(struct wg_device *wg);
+int wg_peer_init(void);
+void wg_peer_uninit(void);
+
#endif /* _WG_PEER_H */
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 846db14cb046..e173204ae7d7 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -19,32 +19,22 @@
#include <linux/siphash.h>
-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
- u8 cidr)
-{
- swap_endian(dst, src, bits);
- memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
- if (cidr)
- dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
-}
-
static __init void print_node(struct allowedips_node *node, u8 bits)
{
char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
- char *fmt_declaration = KERN_DEBUG
- "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ u8 ip1[16], ip2[16], cidr1, cidr2;
char *style = "dotted";
- u8 ip1[16], ip2[16];
u32 color = 0;
+ if (node == NULL)
+ return;
if (bits == 32) {
fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
- fmt_declaration = KERN_DEBUG
- "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
} else if (bits == 128) {
fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
- fmt_declaration = KERN_DEBUG
- "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
}
if (node->peer) {
hsiphash_key_t key = { { 0 } };
@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
hsiphash_1u32(0xabad1dea, &key) % 200;
style = "bold";
}
- swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
- printk(fmt_declaration, ip1, node->cidr, style, color);
+ wg_allowedips_read_node(node, ip1, &cidr1);
+ printk(fmt_declaration, ip1, cidr1, style, color);
if (node->bit[0]) {
- swap_endian_and_apply_cidr(ip2,
- rcu_dereference_raw(node->bit[0])->bits, bits,
- node->cidr);
- printk(fmt_connection, ip1, node->cidr, ip2,
- rcu_dereference_raw(node->bit[0])->cidr);
- print_node(rcu_dereference_raw(node->bit[0]), bits);
+ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
+ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
}
if (node->bit[1]) {
- swap_endian_and_apply_cidr(ip2,
- rcu_dereference_raw(node->bit[1])->bits,
- bits, node->cidr);
- printk(fmt_connection, ip1, node->cidr, ip2,
- rcu_dereference_raw(node->bit[1])->cidr);
- print_node(rcu_dereference_raw(node->bit[1]), bits);
+ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
+ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
}
+ if (node->bit[0])
+ print_node(rcu_dereference_raw(node->bit[0]), bits);
+ if (node->bit[1])
+ print_node(rcu_dereference_raw(node->bit[1]), bits);
}
static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
{
union nf_inet_addr mask;
- memset(&mask, 0x00, 128 / 8);
- memset(&mask, 0xff, cidr / 8);
+ memset(&mask, 0, sizeof(mask));
+ memset(&mask.all, 0xff, cidr / 8);
if (cidr % 32)
mask.all[cidr / 32] = (__force u32)htonl(
(0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
}
static __init inline bool
-horrible_match_v4(const struct horrible_allowedips_node *node,
- struct in_addr *ip)
+horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
{
return (ip->s_addr & node->mask.ip) == node->ip.ip;
}
static __init inline bool
-horrible_match_v6(const struct horrible_allowedips_node *node,
- struct in6_addr *ip)
+horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
{
- return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
- node->ip.ip6[0] &&
- (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
- node->ip.ip6[1] &&
- (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
- node->ip.ip6[2] &&
+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
(ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
}
static __init void
-horrible_insert_ordered(struct horrible_allowedips *table,
- struct horrible_allowedips_node *node)
+horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
{
struct horrible_allowedips_node *other = NULL, *where = NULL;
u8 my_cidr = horrible_mask_to_cidr(node->mask);
hlist_for_each_entry(other, &table->head, table) {
- if (!memcmp(&other->mask, &node->mask,
- sizeof(union nf_inet_addr)) &&
- !memcmp(&other->ip, &node->ip,
- sizeof(union nf_inet_addr)) &&
- other->ip_version == node->ip_version) {
+ if (other->ip_version == node->ip_version &&
+ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
+ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
other->value = node->value;
kfree(node);
return;
}
+ }
+ hlist_for_each_entry(other, &table->head, table) {
where = other;
if (horrible_mask_to_cidr(other->mask) <= my_cidr)
break;
@@ -201,8 +181,7 @@ static __init int
horrible_allowedips_insert_v4(struct horrible_allowedips *table,
struct in_addr *ip, u8 cidr, void *value)
{
- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
- GFP_KERNEL);
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
@@ -219,8 +198,7 @@ static __init int
horrible_allowedips_insert_v6(struct horrible_allowedips *table,
struct in6_addr *ip, u8 cidr, void *value)
{
- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
- GFP_KERNEL);
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
}
static __init void *
-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
- struct in_addr *ip)
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
{
struct horrible_allowedips_node *node;
- void *ret = NULL;
hlist_for_each_entry(node, &table->head, table) {
- if (node->ip_version != 4)
- continue;
- if (horrible_match_v4(node, ip)) {
- ret = node->value;
- break;
- }
+ if (node->ip_version == 4 && horrible_match_v4(node, ip))
+ return node->value;
}
- return ret;
+ return NULL;
}
static __init void *
-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
- struct in6_addr *ip)
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
{
struct horrible_allowedips_node *node;
- void *ret = NULL;
hlist_for_each_entry(node, &table->head, table) {
- if (node->ip_version != 6)
+ if (node->ip_version == 6 && horrible_match_v6(node, ip))
+ return node->value;
+ }
+ return NULL;
+}
+
+
+static __init void
+horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
+{
+ struct horrible_allowedips_node *node;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(node, h, &table->head, table) {
+ if (node->value != value)
continue;
- if (horrible_match_v6(node, ip)) {
- ret = node->value;
- break;
- }
+ hlist_del(&node->table);
+ kfree(node);
}
- return ret;
+
}
static __init bool randomized_test(void)
@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
goto free;
}
kref_init(&peers[i]->refcount);
+ INIT_LIST_HEAD(&peers[i]->allowedips_list);
}
mutex_lock(&mutex);
@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
if (wg_allowedips_insert_v4(&t,
(struct in_addr *)mutated,
cidr, peer, &mutex) < 0) {
- pr_err("allowedips random malloc: FAIL\n");
+ pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
if (horrible_allowedips_insert_v4(&h,
@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
print_tree(t.root6, 128);
}
- for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 4);
- if (lookup(t.root4, 32, ip) !=
- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
- pr_err("allowedips random self-test: FAIL\n");
- goto free;
+ for (j = 0;; ++j) {
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 4);
+ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
+ pr_err("allowedips random v4 self-test: FAIL\n");
+ goto free;
+ }
+ prandom_bytes(ip, 16);
+ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+ pr_err("allowedips random v6 self-test: FAIL\n");
+ goto free;
+ }
}
+ if (j >= NUM_PEERS)
+ break;
+ mutex_lock(&mutex);
+ wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
+ mutex_unlock(&mutex);
+ horrible_allowedips_remove_by_value(&h, peers[j]);
}
- for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 16);
- if (lookup(t.root6, 128, ip) !=
- horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
- pr_err("allowedips random self-test: FAIL\n");
- goto free;
- }
+ if (t.root4 || t.root6) {
+ pr_err("allowedips random self-test removal: FAIL\n");
+ goto free;
}
+
ret = true;
free:
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index d9ad850daa79..8c496b747108 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
if (new4)
wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
mutex_unlock(&wg->socket_update_lock);
- synchronize_rcu();
+ synchronize_net();
sock_free(old4);
sock_free(old6);
}
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 869524852fba..ab8f77ae5e66 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -442,14 +442,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
pdev = ar_ahb->pdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ath10k_err(ar, "failed to get memory resource\n");
- ret = -ENXIO;
- goto out;
- }
-
- ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res);
+ ar_ahb->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ar_ahb->mem)) {
ath10k_err(ar, "mem ioremap error\n");
ret = PTR_ERR(ar_ahb->mem);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 648ed36f845f..5aeff2d9f6cf 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -301,7 +301,7 @@ struct ath10k_fw_stats_pdev {
s32 underrun;
u32 hw_paused;
s32 tx_abort;
- s32 mpdus_requed;
+ s32 mpdus_requeued;
u32 tx_ko;
u32 data_rc;
u32 self_triggers;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index fd052f6ed019..39378e3f9b2b 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1105,7 +1105,7 @@ static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"d_tx_ppdu_reaped",
"d_tx_fifo_underrun",
"d_tx_ppdu_abort",
- "d_tx_mpdu_requed",
+ "d_tx_mpdu_requeued",
"d_tx_excessive_retries",
"d_tx_hw_rate",
"d_tx_dropped_sw_retries",
@@ -1205,7 +1205,7 @@ void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
data[i++] = pdev_stats->hw_reaped;
data[i++] = pdev_stats->underrun;
data[i++] = pdev_stats->tx_abort;
- data[i++] = pdev_stats->mpdus_requed;
+ data[i++] = pdev_stats->mpdus_requeued;
data[i++] = pdev_stats->tx_ko;
data[i++] = pdev_stats->data_rc;
data[i++] = pdev_stats->sw_retry_failure;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 956157946106..ec689e3ce48a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -845,6 +845,7 @@ enum htt_security_types {
#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
#define ATH10K_TXRX_NUM_EXT_TIDS 19
+#define ATH10K_TXRX_NON_QOS_TID 16
enum htt_security_flags {
#define HTT_SECURITY_TYPE_MASK 0x7F
@@ -1282,8 +1283,8 @@ struct htt_dbg_stats_wal_tx_stats {
/* Num PPDUs cleaned up in TX abort */
__le32 tx_abort;
- /* Num MPDUs requed by SW */
- __le32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
/* excessive retries */
__le32 tx_ko;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 1a08156d5011..adbaeb67eedf 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1746,16 +1746,95 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
}
+static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
+ u16 offset,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
+
+ if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+ }
+ return pn;
+}
+
+static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 offset)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ return !is_multicast_ether_addr(hdr->addr1);
+}
+
+static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 peer_id,
+ u16 offset,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ath10k_peer *peer;
+ union htt_rx_pn_t *last_pn, new_pn = {0};
+ struct ieee80211_hdr *hdr;
+ u8 tid, frag_number;
+ u32 seq;
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
+ return false;
+ }
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = ATH10K_TXRX_NON_QOS_TID;
+
+ last_pn = &peer->frag_tids_last_pn[tid];
+ new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
+ frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+ if (frag_number == 0) {
+ last_pn->pn48 = new_pn.pn48;
+ peer->frag_tids_seq[tid] = seq;
+ } else {
+ if (seq != peer->frag_tids_seq[tid])
+ return false;
+
+ if (new_pn.pn48 != last_pn->pn48 + 1)
+ return false;
+
+ last_pn->pn48 = new_pn.pn48;
+ }
+
+ return true;
+}
+
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status,
bool fill_crypt_header,
u8 *rx_hdr,
- enum ath10k_pkt_rx_err *err)
+ enum ath10k_pkt_rx_err *err,
+ u16 peer_id,
+ bool frag)
{
struct sk_buff *first;
struct sk_buff *last;
- struct sk_buff *msdu;
+ struct sk_buff *msdu, *temp;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
@@ -1768,6 +1847,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
bool is_decrypted;
bool is_mgmt;
u32 attention;
+ bool frag_pn_check = true, multicast_check = true;
if (skb_queue_empty(amsdu))
return;
@@ -1866,7 +1946,37 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
}
skb_queue_walk(amsdu, msdu) {
+ if (frag && !fill_crypt_header && is_decrypted &&
+ enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
+ msdu,
+ peer_id,
+ 0,
+ enctype);
+
+ if (frag)
+ multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
+ msdu,
+ 0);
+
+ if (!frag_pn_check || !multicast_check) {
+ /* Discard the fragment with invalid PN or multicast DA
+ */
+ temp = msdu->prev;
+ __skb_unlink(msdu, amsdu);
+ dev_kfree_skb_any(msdu);
+ msdu = temp;
+ frag_pn_check = true;
+ multicast_check = true;
+ continue;
+ }
+
ath10k_htt_rx_h_csum_offload(msdu);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_MMIC_STRIPPED;
+
ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
is_decrypted);
@@ -1884,6 +1994,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_IV_STRIPPED &
+ ~RX_FLAG_MMIC_STRIPPED;
}
}
@@ -1991,14 +2106,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
ath10k_unchain_msdu(amsdu, unchain_cnt);
}
+static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu)
+{
+ u8 *subframe_hdr;
+ struct sk_buff *first;
+ bool is_first, is_last;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ first = skb_peek(amsdu);
+
+ rxd = (void *)first->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+
+ is_first = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Return in case of non-aggregated msdu */
+ if (is_first && is_last)
+ return true;
+
+ /* First msdu flag is not set for the first msdu of the list */
+ if (!is_first)
+ return false;
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
+ crypto_len;
+
+ /* Validate if the amsdu has a proper first subframe.
+ * There are chances a single msdu can be received as amsdu when
+ * the unauthenticated amsdu flag of a QoS header
+ * gets flipped in non-SPP AMSDU's, in such cases the first
+ * subframe has llc/snap header in place of a valid da.
+ * return false if the da matches rfc1042 pattern
+ */
+ if (ether_addr_equal(subframe_hdr, rfc1042_header))
+ return false;
+
+ return true;
+}
+
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
- /* FIXME: It might be a good idea to do some fuzzy-testing to drop
- * invalid/dangerous frames.
- */
-
if (!rx_status->freq) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
return false;
@@ -2009,6 +2172,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
+ if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
+ return false;
+ }
+
return true;
}
@@ -2071,7 +2239,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
+ false);
msdus_to_queue = skb_queue_len(&amsdu);
ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
@@ -2204,6 +2373,11 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
fw_desc = &rx->fw_desc;
rx_desc_len = fw_desc->len;
+ if (fw_desc->u.bits.discard) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
+ goto err;
+ }
+
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
* same limitiation here as well.
@@ -2509,6 +2683,13 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
rx_desc_info = __le32_to_cpu(rx_desc->info);
+ hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
+
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ /* Discard the fragment with multicast DA */
+ goto err;
+ }
+
if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
spin_unlock_bh(&ar->data_lock);
return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
@@ -2516,8 +2697,6 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
HTT_RX_NON_TKIP_MIC);
}
- hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
-
if (ieee80211_has_retry(hdr->frame_control))
goto err;
@@ -3027,7 +3206,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
- NULL);
+ NULL, peer_id, frag);
ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 5ce4f8d038b9..c272b290fa73 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5592,6 +5592,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (arvif->nohwcrypt &&
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ret = -EINVAL;
ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
goto err;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e7fde635e0ee..71878ab35b93 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3685,8 +3685,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id != 0xffffffff) {
if (!ath10k_pci_chip_is_supported(pdev->device,
- bus_params.chip_id))
+ bus_params.chip_id)) {
+ ret = -ENODEV;
goto err_unsupported;
+ }
}
}
@@ -3697,11 +3699,15 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
- if (bus_params.chip_id == 0xffffffff)
+ if (bus_params.chip_id == 0xffffffff) {
+ ret = -ENODEV;
goto err_unsupported;
+ }
- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
- goto err_free_irq;
+ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
+ ret = -ENODEV;
+ goto err_unsupported;
+ }
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 862d0901c5b8..cf64898b9447 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -235,7 +235,6 @@ u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe);
void ath10k_pci_hif_power_down(struct ath10k *ar);
int ath10k_pci_alloc_pipes(struct ath10k *ar);
void ath10k_pci_free_pipes(struct ath10k *ar);
-void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_rx_replenish_retry(struct timer_list *t);
void ath10k_pci_ce_deinit(struct ath10k *ar);
void ath10k_pci_init_napi(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index f2b6bf8f0d60..705b6295e466 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1282,7 +1282,19 @@ struct fw_rx_desc_base {
#define FW_RX_DESC_UDP (1 << 6)
struct fw_rx_desc_hl {
- u8 info0;
+ union {
+ struct {
+ u8 discard:1,
+ forward:1,
+ any_err:1,
+ dup_err:1,
+ reserved:1,
+ inspect:1,
+ extension:2;
+ } bits;
+ u8 info0;
+ } u;
+
u8 version;
u8 len;
u8 flags;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index d48b922215eb..b8a4bbfe10b8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2795,7 +2795,7 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
switch (ar->scan.state) {
case ATH10K_SCAN_IDLE:
case ATH10K_SCAN_STARTING:
- ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
goto exit;
case ATH10K_SCAN_RUNNING:
case ATH10K_SCAN_ABORTING:
@@ -2867,7 +2867,7 @@ void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
dst->underrun = __le32_to_cpu(src->underrun);
dst->tx_abort = __le32_to_cpu(src->tx_abort);
- dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+ dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
dst->tx_ko = __le32_to_cpu(src->tx_ko);
dst->data_rc = __le32_to_cpu(src->data_rc);
dst->self_triggers = __le32_to_cpu(src->self_triggers);
@@ -2895,7 +2895,7 @@ ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
dst->underrun = __le32_to_cpu(src->underrun);
dst->tx_abort = __le32_to_cpu(src->tx_abort);
- dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+ dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
dst->tx_ko = __le32_to_cpu(src->tx_ko);
dst->data_rc = __le32_to_cpu(src->data_rc);
dst->self_triggers = __le32_to_cpu(src->self_triggers);
@@ -8270,7 +8270,7 @@ ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs requed", pdev->mpdus_requed);
+ "MPDUs requeued", pdev->mpdus_requeued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Excessive retries", pdev->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index d870f7067cb7..41c1a3d339c2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -4371,8 +4371,8 @@ struct wmi_pdev_stats_tx {
/* Num PPDUs cleaned up in TX abort */
__le32 tx_abort;
- /* Num MPDUs requed by SW */
- __le32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
/* excessive retries */
__le32 tx_ko;
@@ -4444,8 +4444,8 @@ struct wmi_10_4_pdev_stats_tx {
/* Num PPDUs cleaned up in TX abort */
__le32 tx_abort;
- /* Num MPDUs requed by SW */
- __le32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
/* excessive retries */
__le32 tx_ko;
@@ -7418,7 +7418,6 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_connect(struct ath10k *ar);
-struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
u32 cmd_id);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 77ce3347ab86..969bf1a590d9 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -70,6 +70,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.cold_boot_calib = true,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .fix_l1ss = true,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -110,6 +111,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.cold_boot_calib = true,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .fix_l1ss = true,
},
{
.name = "qca6390 hw2.0",
@@ -149,6 +151,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.cold_boot_calib = false,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .fix_l1ss = true,
},
{
.name = "qcn9074 hw1.0",
@@ -186,6 +189,47 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.cold_boot_calib = false,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .fix_l1ss = true,
+ },
+ {
+ .name = "wcn6855 hw2.0",
+ .hw_rev = ATH11K_HW_WCN6855_HW20,
+ .fw = {
+ .dir = "WCN6855/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+ .tcl_0_only = true,
+ .spectral_fft_sz = 0,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .cold_boot_calib = false,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .fix_l1ss = false,
},
};
@@ -488,7 +532,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
if (len < ALIGN(ie_len, 4)) {
ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
switch (ie_id) {
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 55af982deca7..018fb2385f2a 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -107,6 +107,7 @@ enum ath11k_hw_rev {
ATH11K_HW_QCA6390_HW20,
ATH11K_HW_IPQ6018_HW10,
ATH11K_HW_QCN9074_HW10,
+ ATH11K_HW_WCN6855_HW20,
};
enum ath11k_firmware_mode {
@@ -795,8 +796,8 @@ struct ath11k_fw_stats_pdev {
s32 underrun;
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
- /* Num MPDUs requed by SW */
- s32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ s32 mpdus_requeued;
/* excessive retries */
u32 tx_ko;
/* data hw rate code */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index ec93f14e6d2a..9e0c90da99d3 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -89,7 +89,7 @@ static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
htt_stats_buf->tx_abort);
len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u",
- htt_stats_buf->mpdu_requed);
+ htt_stats_buf->mpdu_requeued);
len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u",
htt_stats_buf->tx_xretry);
len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u",
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index 567a26d485a9..d428f52003a4 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -147,7 +147,7 @@ struct htt_tx_pdev_stats_cmn_tlv {
u32 hw_flush;
u32 hw_filt;
u32 tx_abort;
- u32 mpdu_requed;
+ u32 mpdu_requeued;
u32 tx_xretry;
u32 data_rc;
u32 mpdu_dropped_xretry;
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 04f6c4e0658b..b0c8f6290099 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -342,7 +342,6 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
- u32 ring_hash_map;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
@@ -439,20 +438,9 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
/* When hash based routing of rx packet is enabled, 32 entries to map
- * the hash values to the ring will be configured. Each hash entry uses
- * three bits to map to a particular ring. The ring mapping will be
- * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:Not used.
+ * the hash values to the ring will be configured.
*/
- ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
- HAL_HASH_ROUTING_RING_SW2 << 3 |
- HAL_HASH_ROUTING_RING_SW3 << 6 |
- HAL_HASH_ROUTING_RING_SW4 << 9 |
- HAL_HASH_ROUTING_RING_SW1 << 12 |
- HAL_HASH_ROUTING_RING_SW2 << 15 |
- HAL_HASH_ROUTING_RING_SW3 << 18 |
- HAL_HASH_ROUTING_RING_SW4 << 21;
-
- ath11k_hal_reo_hw_setup(ab, ring_hash_map);
+ ab->hw_params.hw_ops->reo_setup(ab);
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 1d9aa1bb6b6e..603d2f93ac18 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -260,6 +260,16 @@ static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
+static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
+
+ return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
+ (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
+ __le32_to_cpu(attn->info1)));
+}
+
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
@@ -852,6 +862,24 @@ static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_d
__skb_queue_purge(&rx_tid->rx_frags);
}
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ }
+}
+
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
{
struct dp_rx_tid *rx_tid;
@@ -3450,6 +3478,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
u8 tid;
int ret = 0;
bool more_frags;
+ bool is_mcbc;
rx_desc = (struct hal_rx_desc *)msdu->data;
peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
@@ -3457,6 +3486,11 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
+ is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+
+ /* Multicast/Broadcast fragments are not expected */
+ if (is_mcbc)
+ return -EINVAL;
if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
!ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index bf399312b5ff..623da3bf9dc8 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -49,6 +49,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key);
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
struct ath11k_peer *peer, u8 tid);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 08e3c72d9237..eaa0edca5576 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -382,6 +382,16 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
+ if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
+ }
+
/* interrupt setup */
/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
* unit of 8 usecs instead of 1 usec (as required by v1).
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 91d1428b8b94..35ed3a14e200 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -120,6 +120,7 @@ struct ath11k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_MISC_CTL 0x00000630
#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
@@ -280,6 +281,7 @@ struct ath11k_base;
#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23)
#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+#define HAL_REO1_MISC_CTL_FRAGMENT_DST_RING GENMASK(20, 17)
/* CE ring bit field mask and shift */
#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
@@ -906,7 +908,6 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
u32 start_seq, enum hal_pn_type type);
void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
struct hal_srng *srng);
-void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map);
void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
struct hal_wbm_idle_scatter_list *sbuf,
u32 nsbufs, u32 tot_link_desc,
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index fac2396edf32..325055ca41ab 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -801,43 +801,6 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
-void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map)
-{
- u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
- u32 val;
-
- val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
-
- val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
- val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
- HAL_SRNG_RING_ID_REO2SW1) |
- FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
- FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
-
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
- HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
- HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
- HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
- HAL_DEFAULT_REO_TIMEOUT_USEC);
-
- ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
- FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
- ring_hash_map));
- ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
- FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
- ring_hash_map));
- ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
- FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
- ring_hash_map));
- ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
- FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
- ring_hash_map));
-}
-
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
@@ -1128,12 +1091,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
break;
}
case HAL_RX_MPDU_START: {
- struct hal_rx_mpdu_info *mpdu_info =
- (struct hal_rx_mpdu_info *)tlv_data;
u16 peer_id;
- peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
- __le32_to_cpu(mpdu_info->info0));
+ peer_id = ab->hw_params.hw_ops->mpdu_info_get_peerid(tlv_data);
if (peer_id)
ppdu_info->peer_id = peer_id;
break;
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index d464a270c049..0f1f04b812b9 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -254,12 +254,20 @@ struct hal_rx_phyrx_rssi_legacy_info {
} __packed;
#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
+#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
+
struct hal_rx_mpdu_info {
__le32 rsvd0;
__le32 info0;
__le32 rsvd1[21];
} __packed;
+struct hal_rx_mpdu_info_wcn6855 {
+ __le32 rsvd0[8];
+ __le32 info0;
+ __le32 rsvd1[14];
+} __packed;
+
#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 377ae8d5b58f..d9596903b0a5 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -10,6 +10,7 @@
#include "hw.h"
#include "core.h"
#include "ce.h"
+#include "hif.h"
/* Map from pdev index to hw mac index */
static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
@@ -45,6 +46,13 @@ static void ath11k_hw_qcn9074_tx_mesh_enable(struct ath11k_base *ab,
true);
}
+static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
+ true);
+}
+
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
@@ -91,6 +99,52 @@ static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
config->num_keep_alive_pattern = 0;
}
+static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+ /* Each hash entry uses three bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 3 |
+ HAL_HASH_ROUTING_RING_SW3 << 6 |
+ HAL_HASH_ROUTING_RING_SW4 << 9 |
+ HAL_HASH_ROUTING_RING_SW1 << 12 |
+ HAL_HASH_ROUTING_RING_SW2 << 15 |
+ HAL_HASH_ROUTING_RING_SW3 << 18 |
+ HAL_HASH_ROUTING_RING_SW4 << 21;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+}
+
static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
struct target_resource_config *config)
{
@@ -489,6 +543,228 @@ static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
return &desc->u.qcn9074.msdu_payload[0];
}
+static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.hdr_status;
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info1));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn6855.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.wcn6855.msdu_end, (u8 *)&ldesc->u.wcn6855.msdu_end,
+ sizeof(struct rx_msdu_end_wcn6855));
+ memcpy((u8 *)&fdesc->u.wcn6855.attention, (u8 *)&ldesc->u.wcn6855.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.wcn6855.mpdu_end, (u8 *)&ldesc->u.wcn6855.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn6855.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.wcn6855.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static
+struct rx_attention *ath11k_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn6855.attention;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn6855.msdu_payload[0];
+}
+
+static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+ /* Each hash entry uses four bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL);
+ val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
+
+static u16 ath11k_hw_ipq8074_mpdu_info_get_peerid(u8 *tlv_data)
+{
+ u16 peer_id = 0;
+ struct hal_rx_mpdu_info *mpdu_info =
+ (struct hal_rx_mpdu_info *)tlv_data;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->info0));
+
+ return peer_id;
+}
+
+static u16 ath11k_hw_wcn6855_mpdu_info_get_peerid(u8 *tlv_data)
+{
+ u16 peer_id = 0;
+ struct hal_rx_mpdu_info_wcn6855 *mpdu_info =
+ (struct hal_rx_mpdu_info_wcn6855 *)tlv_data;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855,
+ __le32_to_cpu(mpdu_info->info0));
+ return peer_id;
+}
+
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@@ -521,6 +797,8 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -555,6 +833,8 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -589,6 +869,8 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
};
const struct ath11k_hw_ops qcn9074_ops = {
@@ -623,6 +905,44 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+};
+
+const struct ath11k_hw_ops wcn6855_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_wcn6855_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_wcn6855_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_wcn6855_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_wcn6855_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_wcn6855_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_wcn6855_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_wcn6855_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_wcn6855_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_wcn6855_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_wcn6855_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_wcn6855_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
};
#define ATH11K_TX_RING_MASK_0 0x1
@@ -1688,3 +2008,74 @@ const struct ath11k_hw_regs qcn9074_regs = {
.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
};
+
+const struct ath11k_hw_regs wcn6855_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000690,
+ .hal_tcl1_ring_base_msb = 0x00000694,
+ .hal_tcl1_ring_id = 0x00000698,
+ .hal_tcl1_ring_misc = 0x000006a0,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b0,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006dc,
+ .hal_tcl1_ring_msi1_data = 0x000006e0,
+ .hal_tcl2_ring_base_lsb = 0x000006e8,
+ .hal_tcl_ring_base_lsb = 0x00000798,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a0,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x00000244,
+ .hal_reo1_ring_base_msb = 0x00000248,
+ .hal_reo1_ring_id = 0x0000024c,
+ .hal_reo1_ring_misc = 0x00000254,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+ .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+ .hal_reo1_ring_producer_int_setup = 0x00000268,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+ .hal_reo1_ring_msi1_base_msb = 0x00000290,
+ .hal_reo1_ring_msi1_data = 0x00000294,
+ .hal_reo2_ring_base_lsb = 0x0000029c,
+ .hal_reo1_aging_thresh_ix_0 = 0x000005bc,
+ .hal_reo1_aging_thresh_ix_1 = 0x000005c0,
+ .hal_reo1_aging_thresh_ix_2 = 0x000005c4,
+ .hal_reo1_aging_thresh_ix_3 = 0x000005c8,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003030,
+ .hal_reo1_ring_tp = 0x00003034,
+ .hal_reo2_ring_hp = 0x00003038,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x00000454,
+ .hal_reo_tcl_ring_hp = 0x00003060,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x0000055c,
+ .hal_reo_status_hp = 0x00003078,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000870,
+ .hal_wbm_idle_link_ring_misc = 0x00000880,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001e8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000920,
+ .hal_wbm1_release_ring_base_lsb = 0x00000978,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index c81a6328361d..62f5978b3005 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -162,6 +162,7 @@ struct ath11k_hw_params {
bool cold_boot_calib;
bool supports_suspend;
u32 hal_desc_sz;
+ bool fix_l1ss;
};
struct ath11k_hw_ops {
@@ -199,12 +200,15 @@ struct ath11k_hw_ops {
void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len);
struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
+ void (*reo_setup)(struct ath11k_base *ab);
+ u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
};
extern const struct ath11k_hw_ops ipq8074_ops;
extern const struct ath11k_hw_ops ipq6018_ops;
extern const struct ath11k_hw_ops qca6390_ops;
extern const struct ath11k_hw_ops qcn9074_ops;
+extern const struct ath11k_hw_ops wcn6855_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
@@ -318,5 +322,6 @@ struct ath11k_hw_regs {
extern const struct ath11k_hw_regs ipq8074_regs;
extern const struct ath11k_hw_regs qca6390_regs;
extern const struct ath11k_hw_regs qcn9074_regs;
+extern const struct ath11k_hw_regs wcn6855_regs;
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 4df425dd31a2..e9b3689331ec 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1314,10 +1314,16 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
arg->he_flag = true;
- memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
- sizeof(arg->peer_he_cap_macinfo));
- memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
- sizeof(arg->peer_he_cap_phyinfo));
+ memcpy_and_pad(&arg->peer_he_cap_macinfo,
+ sizeof(arg->peer_he_cap_macinfo),
+ he_cap->he_cap_elem.mac_cap_info,
+ sizeof(he_cap->he_cap_elem.mac_cap_info),
+ 0);
+ memcpy_and_pad(&arg->peer_he_cap_phyinfo,
+ sizeof(arg->peer_he_cap_phyinfo),
+ he_cap->he_cap_elem.phy_cap_info,
+ sizeof(he_cap->he_cap_elem.phy_cap_info),
+ 0);
arg->peer_he_ops = vif->bss_conf.he_oper.params;
/* the top most byte is used to indicate BSS color info */
@@ -2779,6 +2785,12 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*/
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+
+ /* flush the fragments cache during key (re)install to
+ * ensure all frags in the new frag list belong to the same key.
+ */
+ if (peer && cmd == SET_KEY)
+ ath11k_peer_frags_flush(ar, peer);
spin_unlock_bh(&ab->base_lock);
if (!peer) {
@@ -5373,11 +5385,6 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
if (WARN_ON(!arvif->is_up))
continue;
- ret = ath11k_mac_setup_bcn_tmpl(arvif);
- if (ret)
- ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
- ret);
-
ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
if (ret) {
ath11k_warn(ab, "failed to restart vdev %d: %d\n",
@@ -5385,6 +5392,11 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
continue;
}
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 27b394d115e2..75cc2d80fde8 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -354,6 +354,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
ath11k_mhi_config = &ath11k_mhi_config_qcn9074;
break;
case ATH11K_HW_QCA6390_HW20:
+ case ATH11K_HW_WCN6855_HW20:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 0f31eb566fb6..646ad79f309c 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -36,10 +36,12 @@
#define QCA6390_DEVICE_ID 0x1101
#define QCN9074_DEVICE_ID 0x1104
+#define WCN6855_DEVICE_ID 0x1103
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
- /* TODO: add QCN9074_DEVICE_ID) once firmware issues are resolved */
+ { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
{0}
};
@@ -432,7 +434,8 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
ath11k_pci_enable_ltssm(ab);
ath11k_pci_clear_all_intrs(ab);
ath11k_pci_set_wlaon_pwr_ctrl(ab);
- ath11k_pci_fix_l1ss(ab);
+ if (ab->hw_params.fix_l1ss)
+ ath11k_pci_fix_l1ss(ab);
}
ath11k_mhi_clear_vector(ab);
@@ -1176,12 +1179,26 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.get_ce_msi_idx = ath11k_pci_get_ce_msi_idx,
};
+static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
+{
+ u32 soc_hw_version;
+
+ soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
+ soc_hw_version);
+ *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
+ soc_hw_version);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
+ *major, *minor);
+}
+
static int ath11k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
- u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
+ u32 soc_hw_version_major, soc_hw_version_minor;
int ret;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
@@ -1209,15 +1226,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
- soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
- soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
- soc_hw_version);
- soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
- soc_hw_version);
-
- ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
- soc_hw_version_major, soc_hw_version_minor);
-
+ ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
switch (soc_hw_version_major) {
case 2:
ab->hw_rev = ATH11K_HW_QCA6390_HW20;
@@ -1235,6 +1245,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ab->bus_params.static_window_map = true;
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
+ case WCN6855_DEVICE_ID:
+ ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ switch (soc_hw_version_major) {
+ case 2:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW20;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+ ab_pci->msi_config = &ath11k_msi_config[0];
+ break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
pci_dev->device);
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 0cdb4a1f816e..79c50804d7dc 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -368,6 +368,7 @@ struct rx_attention {
#define RX_MPDU_START_INFO2_BSSID_HIT BIT(9)
#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(13, 10)
#define RX_MPDU_START_INFO2_TID GENMASK(17, 14)
+#define RX_MPDU_START_INFO2_TID_WCN6855 GENMASK(18, 15)
#define RX_MPDU_START_INFO3_REO_DEST_IND GENMASK(4, 0)
#define RX_MPDU_START_INFO3_FLOW_ID_TOEPLITZ BIT(7)
@@ -546,6 +547,31 @@ struct rx_mpdu_start_qcn9074 {
__le32 ht_ctrl;
} __packed;
+struct rx_mpdu_start_wcn6855 {
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+} __packed;
+
/* rx_mpdu_start
*
* rxpcu_mpdu_filter_in_category
@@ -804,6 +830,20 @@ struct rx_msdu_start_qcn9074 {
__le16 vlan_stag_c1;
} __packed;
+struct rx_msdu_start_wcn6855 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+ __le16 vlan_ctag_ci;
+ __le16 vlan_stag_ci;
+} __packed;
+
/* rx_msdu_start
*
* rxpcu_mpdu_filter_in_category
@@ -988,7 +1028,9 @@ struct rx_msdu_start_qcn9074 {
#define RX_MSDU_END_INFO2_REPORTED_MPDU_LEN GENMASK(13, 0)
#define RX_MSDU_END_INFO2_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855 BIT(28)
#define RX_MSDU_END_INFO2_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO2_LAST_MSDU_WCN6855 BIT(29)
#define RX_MSDU_END_INFO2_SA_IDX_TIMEOUT BIT(16)
#define RX_MSDU_END_INFO2_DA_IDX_TIMEOUT BIT(17)
#define RX_MSDU_END_INFO2_MSDU_LIMIT_ERR BIT(18)
@@ -1037,6 +1079,31 @@ struct rx_msdu_end_ipq8074 {
__le16 sa_sw_peer_id;
} __packed;
+struct rx_msdu_end_wcn6855 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 reported_mpdu_len;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info4;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info2;
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+ __le32 rule_indication[2];
+ __le32 info6;
+ __le32 info7;
+} __packed;
+
#define RX_MSDU_END_MPDU_LENGTH_INFO GENMASK(13, 0)
#define RX_MSDU_END_INFO2_DA_OFFSET GENMASK(5, 0)
@@ -1400,10 +1467,30 @@ struct hal_rx_desc_qcn9074 {
u8 msdu_payload[0];
} __packed;
+struct hal_rx_desc_wcn6855 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_wcn6855 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_wcn6855 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_wcn6855 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+} __packed;
+
struct hal_rx_desc {
union {
struct hal_rx_desc_ipq8074 ipq8074;
struct hal_rx_desc_qcn9074 qcn9074;
+ struct hal_rx_desc_wcn6855 wcn6855;
} u;
} __packed;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 5ca2d80679b6..6c253eae9d06 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -5235,7 +5235,7 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->hw_reaped = src->hw_reaped;
dst->underrun = src->underrun;
dst->tx_abort = src->tx_abort;
- dst->mpdus_requed = src->mpdus_requed;
+ dst->mpdus_requeued = src->mpdus_requeued;
dst->tx_ko = src->tx_ko;
dst->data_rc = src->data_rc;
dst->self_triggers = src->self_triggers;
@@ -5505,7 +5505,7 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs requed", pdev->mpdus_requed);
+ "MPDUs requeued", pdev->mpdus_requeued);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Excessive retries", pdev->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 3ade1ddd35c9..d35c47e0b19d 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -4171,8 +4171,8 @@ struct wmi_pdev_stats_tx {
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
- /* Num MPDUs requed by SW */
- s32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ s32 mpdus_requeued;
/* excessive retries */
u32 tx_ko;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index f2db7cf16566..3f4ce4e9c532 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -855,7 +855,7 @@ ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
}
/**
- * at5k_hw_stop_rx_pcu() - Stop RX engine
+ * ath5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 29527e8dcced..fefdc6753acd 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3303,8 +3303,8 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (ret < 0)
return ret;
} else {
- ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
- MATCHED_SSID_FILTER, 0);
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ MATCHED_SSID_FILTER, 0);
if (ret < 0)
return ret;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 76b538942a79..5184a0aacfe2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -522,6 +522,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
+ rxs->enc_flags |=
+ (rxsp->status4 & AR_STBC) ? (1 << RX_ENC_FLAG_STBC_SHIFT) : 0;
rxs->bw = (rxsp->status4 & AR_2040) ? RATE_INFO_BW_40 : RATE_INFO_BW_20;
rxs->evm0 = rxsp->status6;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 45f6402478b5..139831539da3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -307,6 +307,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
hchan = ah->curchan;
}
+ if (!hchan) {
+ fastcc = false;
+ hchan = ath9k_cmn_get_channel(sc->hw, ah, &sc->cur_chan->chandef);
+ }
+
if (!ath_prepare_reset(sc))
fastcc = false;
@@ -2649,7 +2654,7 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 duration)
+ struct ieee80211_prep_tx_info *info)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index b2d760873992..ba9bea79381c 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -16,13 +16,11 @@ config CARL9170
config CARL9170_LEDS
bool "SoftLED Support"
- depends on CARL9170
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
default y
+ depends on CARL9170
+ depends on MAC80211_LEDS
help
- This option is necessary, if you want your device' LEDs to blink
+ This option is necessary, if you want your device's LEDs to blink.
Say Y, unless you need the LEDs for firmware debugging.
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index eae9abf540a7..b53ebb3ac9a2 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -24,7 +24,7 @@
#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
/**
- * ath_hw_set_bssid_mask - filter out bssids we listen
+ * ath_hw_setbssidmask - filter out bssids we listen
*
* @common: the ath_common struct for the device.
*
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 63079231e48e..8e1dbfda6538 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -800,7 +800,7 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
(char *)ctl_skb->skb->data, ctl_skb->skb->len);
/* Move the head of the ring to the next empty descriptor */
- ch->head_blk_ctl = ctl_skb->next;
+ ch->head_blk_ctl = ctl_skb->next;
/* Commit all previous writes and set descriptors to VALID */
wmb();
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 65ef893f2736..455143c4164e 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -3464,8 +3464,12 @@ struct wcn36xx_hal_rem_bcn_filter_req {
#define WCN36XX_HAL_OFFLOAD_DISABLE 0
#define WCN36XX_HAL_OFFLOAD_ENABLE 1
#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE 0x2
+#define WCN36XX_HAL_OFFLOAD_MCAST_FILTER_ENABLE 0x4
+#define WCN36XX_HAL_OFFLOAD_NS_AND_MCAST_FILTER_ENABLE \
+ (WCN36XX_HAL_OFFLOAD_ENABLE | WCN36XX_HAL_OFFLOAD_MCAST_FILTER_ENABLE)
#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE \
- (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+ (WCN36XX_HAL_OFFLOAD_ENABLE | WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+#define WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX 0x02
struct wcn36xx_hal_ns_offload_params {
u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
@@ -3487,10 +3491,10 @@ struct wcn36xx_hal_ns_offload_params {
/* slot index for this offload */
u32 slot_index;
u8 bss_index;
-};
+} __packed;
struct wcn36xx_hal_host_offload_req {
- u8 offload_Type;
+ u8 offload_type;
/* enable or disable */
u8 enable;
@@ -3499,13 +3503,13 @@ struct wcn36xx_hal_host_offload_req {
u8 host_ipv4_addr[4];
u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
} u;
-};
+} __packed;
struct wcn36xx_hal_host_offload_req_msg {
struct wcn36xx_hal_msg_header header;
struct wcn36xx_hal_host_offload_req host_offload_params;
struct wcn36xx_hal_ns_offload_params ns_offload_params;
-};
+} __packed;
/* Packet Types. */
#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT 1
@@ -4901,7 +4905,7 @@ struct wcn36xx_hal_gtk_offload_req_msg {
u64 key_replay_counter;
u8 bss_index;
-};
+} __packed;
struct wcn36xx_hal_gtk_offload_rsp_msg {
struct wcn36xx_hal_msg_header header;
@@ -4915,7 +4919,7 @@ struct wcn36xx_hal_gtk_offload_rsp_msg {
struct wcn36xx_hal_gtk_offload_get_info_req_msg {
struct wcn36xx_hal_msg_header header;
u8 bss_index;
-};
+} __packed;
struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
struct wcn36xx_hal_msg_header header;
@@ -4939,7 +4943,7 @@ struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
u32 igtk_rekey_count;
u8 bss_index;
-};
+} __packed;
struct dhcp_info {
/* Indicates the device mode which indicates about the DHCP activity */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index afb4877eaad8..d202f2128df2 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -25,6 +25,7 @@
#include <linux/rpmsg.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/soc/qcom/wcnss_ctrl.h>
+#include <net/ipv6.h>
#include "wcn36xx.h"
#include "testmode.h"
@@ -172,7 +173,9 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support wowlan_support = {
- .flags = WIPHY_WOWLAN_ANY
+ .flags = WIPHY_WOWLAN_ANY |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY
};
#endif
@@ -293,23 +296,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
goto out_free_dxe_pool;
}
- wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
- if (!wcn->hal_buf) {
- wcn36xx_err("Failed to allocate smd buf\n");
- ret = -ENOMEM;
- goto out_free_dxe_ctl;
- }
-
ret = wcn36xx_smd_load_nv(wcn);
if (ret) {
wcn36xx_err("Failed to push NV to chip\n");
- goto out_free_smd_buf;
+ goto out_free_dxe_ctl;
}
ret = wcn36xx_smd_start(wcn);
if (ret) {
wcn36xx_err("Failed to start chip\n");
- goto out_free_smd_buf;
+ goto out_free_dxe_ctl;
}
if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
@@ -336,8 +332,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
out_smd_stop:
wcn36xx_smd_stop(wcn);
-out_free_smd_buf:
- kfree(wcn->hal_buf);
out_free_dxe_ctl:
wcn36xx_dxe_free_ctl_blks(wcn);
out_free_dxe_pool:
@@ -372,8 +366,6 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
wcn36xx_dxe_free_mem_pools(wcn);
wcn36xx_dxe_free_ctl_blks(wcn);
-
- kfree(wcn->hal_buf);
}
static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
@@ -1088,28 +1080,91 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
#ifdef CONFIG_PM
+static struct ieee80211_vif *wcn36xx_get_first_assoc_vif(struct wcn36xx *wcn)
+{
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ if (vif_priv->sta_assoc) {
+ vif = wcn36xx_priv_to_vif(vif_priv);
+ break;
+ }
+ }
+ return vif;
+}
+
static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
{
struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
+ int ret = 0;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
- flush_workqueue(wcn->hal_ind_wq);
- wcn36xx_smd_set_power_params(wcn, true);
- return 0;
+ mutex_lock(&wcn->conf_mutex);
+
+ vif = wcn36xx_get_first_assoc_vif(wcn);
+ if (vif) {
+ ret = wcn36xx_smd_arp_offload(wcn, vif, true);
+ if (ret)
+ goto out;
+ ret = wcn36xx_smd_ipv6_ns_offload(wcn, vif, true);
+ if (ret)
+ goto out;
+ ret = wcn36xx_smd_gtk_offload(wcn, vif, true);
+ if (ret)
+ goto out;
+ ret = wcn36xx_smd_set_power_params(wcn, true);
+ if (ret)
+ goto out;
+ ret = wcn36xx_smd_wlan_host_suspend_ind(wcn);
+ }
+out:
+ mutex_unlock(&wcn->conf_mutex);
+ return ret;
}
static int wcn36xx_resume(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
- flush_workqueue(wcn->hal_ind_wq);
- wcn36xx_smd_set_power_params(wcn, false);
+ mutex_lock(&wcn->conf_mutex);
+ vif = wcn36xx_get_first_assoc_vif(wcn);
+ if (vif) {
+ wcn36xx_smd_host_resume(wcn);
+ wcn36xx_smd_set_power_params(wcn, false);
+ wcn36xx_smd_gtk_offload_get_info(wcn, vif);
+ wcn36xx_smd_gtk_offload(wcn, vif, false);
+ wcn36xx_smd_ipv6_ns_offload(wcn, vif, false);
+ wcn36xx_smd_arp_offload(wcn, vif, false);
+ }
+ mutex_unlock(&wcn->conf_mutex);
+
return 0;
}
+static void wcn36xx_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+
+ mutex_lock(&wcn->conf_mutex);
+
+ memcpy(vif_priv->rekey_data.kek, data->kek, NL80211_KEK_LEN);
+ memcpy(vif_priv->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ vif_priv->rekey_data.replay_ctr =
+ cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
+ vif_priv->rekey_data.valid = true;
+
+ mutex_unlock(&wcn->conf_mutex);
+}
+
#endif
static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
@@ -1176,6 +1231,34 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void wcn36xx_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct inet6_ifaddr *ifa;
+ int idx = 0;
+
+ memset(vif_priv->tentative_addrs, 0, sizeof(vif_priv->tentative_addrs));
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ vif_priv->target_ipv6_addrs[idx] = ifa->addr;
+ if (ifa->flags & IFA_F_TENTATIVE)
+ __set_bit(idx, vif_priv->tentative_addrs);
+ idx++;
+ if (idx >= WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX)
+ break;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "%pI6 %s\n", &ifa->addr,
+ (ifa->flags & IFA_F_TENTATIVE) ? "tentative" : NULL);
+ }
+ read_unlock_bh(&idev->lock);
+
+ vif_priv->num_target_ipv6_addrs = idx;
+}
+#endif
+
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
@@ -1184,6 +1267,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
#ifdef CONFIG_PM
.suspend = wcn36xx_suspend,
.resume = wcn36xx_resume,
+ .set_rekey_data = wcn36xx_set_rekey_data,
#endif
.config = wcn36xx_config,
.prepare_multicast = wcn36xx_prepare_multicast,
@@ -1199,6 +1283,9 @@ static const struct ieee80211_ops wcn36xx_ops = {
.sta_add = wcn36xx_sta_add,
.sta_remove = wcn36xx_sta_remove,
.ampdu_action = wcn36xx_ampdu_action,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = wcn36xx_ipv6_addr_change,
+#endif
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
@@ -1401,6 +1488,12 @@ static int wcn36xx_probe(struct platform_device *pdev)
mutex_init(&wcn->hal_mutex);
mutex_init(&wcn->scan_lock);
+ wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+ if (!wcn->hal_buf) {
+ ret = -ENOMEM;
+ goto out_wq;
+ }
+
ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
if (ret < 0) {
wcn36xx_err("failed to set DMA mask: %d\n", ret);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index d0c3a1557e8d..0e3be17d8cea 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -445,22 +445,12 @@ out:
return ret;
}
-static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
- enum wcn36xx_hal_host_msg_type msg_type,
- size_t msg_size)
-{
- memset(hdr, 0, msg_size + sizeof(*hdr));
- hdr->msg_type = msg_type;
- hdr->msg_version = WCN36XX_HAL_MSG_VERSION0;
- hdr->len = msg_size + sizeof(*hdr);
-}
-
#define __INIT_HAL_MSG(msg_body, type, version) \
do { \
- memset(&msg_body, 0, sizeof(msg_body)); \
- msg_body.header.msg_type = type; \
- msg_body.header.msg_version = version; \
- msg_body.header.len = sizeof(msg_body); \
+ memset(&(msg_body), 0, sizeof(msg_body)); \
+ (msg_body).header.msg_type = type; \
+ (msg_body).header.msg_version = version; \
+ (msg_body).header.len = sizeof(msg_body); \
} while (0) \
#define INIT_HAL_MSG(msg_body, type) \
@@ -2729,8 +2719,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
wcn->hal_buf;
- init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ,
- sizeof(msg_body->mc_addr_list));
+ INIT_HAL_MSG(*msg_body, WCN36XX_HAL_8023_MULTICAST_LIST_REQ);
/* An empty list means all mc traffic will be received */
if (fp)
@@ -2756,6 +2745,269 @@ out:
return ret;
}
+int wcn36xx_smd_arp_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_host_offload_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_OFFLOAD_REQ);
+ msg_body.host_offload_params.offload_type =
+ WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD;
+ if (enable) {
+ msg_body.host_offload_params.enable =
+ WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE;
+ memcpy(&msg_body.host_offload_params.u,
+ &vif->bss_conf.arp_addr_list[0], sizeof(__be32));
+ }
+ msg_body.ns_offload_params.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending host_offload_arp failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("host_offload_arp failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+int wcn36xx_smd_ipv6_ns_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_host_offload_req_msg msg_body;
+ struct wcn36xx_hal_ns_offload_params *ns_params;
+ struct wcn36xx_hal_host_offload_req *ho_params;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_OFFLOAD_REQ);
+ ho_params = &msg_body.host_offload_params;
+ ns_params = &msg_body.ns_offload_params;
+
+ ho_params->offload_type = WCN36XX_HAL_IPV6_NS_OFFLOAD;
+ if (enable) {
+ ho_params->enable =
+ WCN36XX_HAL_OFFLOAD_NS_AND_MCAST_FILTER_ENABLE;
+ if (vif_priv->num_target_ipv6_addrs) {
+ memcpy(&ho_params->u,
+ &vif_priv->target_ipv6_addrs[0].in6_u,
+ sizeof(struct in6_addr));
+ memcpy(&ns_params->target_ipv6_addr1,
+ &vif_priv->target_ipv6_addrs[0].in6_u,
+ sizeof(struct in6_addr));
+ ns_params->target_ipv6_addr1_valid = 1;
+ }
+ if (vif_priv->num_target_ipv6_addrs > 1) {
+ memcpy(&ns_params->target_ipv6_addr2,
+ &vif_priv->target_ipv6_addrs[1].in6_u,
+ sizeof(struct in6_addr));
+ ns_params->target_ipv6_addr2_valid = 1;
+ }
+ }
+ memcpy(&ns_params->self_addr, vif->addr, ETH_ALEN);
+ ns_params->bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending host_offload_arp failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("host_offload_arp failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+#else
+int wcn36xx_smd_ipv6_ns_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable)
+{
+ return 0;
+}
+#endif
+
+int wcn36xx_smd_gtk_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_gtk_offload_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_GTK_OFFLOAD_REQ);
+
+ if (enable) {
+ memcpy(&msg_body.kek, vif_priv->rekey_data.kek, NL80211_KEK_LEN);
+ memcpy(&msg_body.kck, vif_priv->rekey_data.kck, NL80211_KCK_LEN);
+ msg_body.key_replay_counter =
+ le64_to_cpu(vif_priv->rekey_data.replay_ctr);
+ msg_body.bss_index = vif_priv->bss_index;
+ } else {
+ msg_body.flags = WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE;
+ }
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending host_offload_arp failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("host_offload_arp failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_gtk_offload_get_info_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_gtk_offload_get_info_rsp_msg *rsp;
+ __be64 replay_ctr;
+
+ if (wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_gtk_offload_get_info_rsp_msg *)wcn->hal_buf;
+
+ if (rsp->bss_index != vif_priv->bss_index) {
+ wcn36xx_err("gtk_offload_info invalid response bss index %d\n",
+ rsp->bss_index);
+ return -ENOENT;
+ }
+
+ if (vif_priv->rekey_data.replay_ctr != cpu_to_le64(rsp->key_replay_counter)) {
+ replay_ctr = cpu_to_be64(rsp->key_replay_counter);
+ vif_priv->rekey_data.replay_ctr =
+ cpu_to_le64(rsp->key_replay_counter);
+ ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "GTK replay counter increment %llu\n",
+ rsp->key_replay_counter);
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "gtk offload info status %d last_rekey_status %d "
+ "replay_counter %llu total_rekey_count %d gtk_rekey_count %d "
+ "igtk_rekey_count %d bss_index %d\n",
+ rsp->status, rsp->last_rekey_status,
+ rsp->key_replay_counter, rsp->total_rekey_count,
+ rsp->gtk_rekey_count, rsp->igtk_rekey_count,
+ rsp->bss_index);
+
+ return 0;
+}
+
+int wcn36xx_smd_gtk_offload_get_info(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_gtk_offload_get_info_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending gtk_offload_get_info failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("gtk_offload_get_info failed err=%d\n", ret);
+ goto out;
+ }
+ ret = wcn36xx_smd_gtk_offload_get_info_rsp(wcn, vif);
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_wlan_host_suspend_ind_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_SUSPEND_IND);
+ msg_body.configured_mcst_bcst_filter_setting = 0;
+ msg_body.active_session_count = 1;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, msg_body.header.len);
+
+ mutex_unlock(&wcn->hal_mutex);
+
+ return ret;
+}
+
+int wcn36xx_smd_host_resume(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_wlan_host_resume_req_msg msg_body;
+ struct wcn36xx_hal_host_resume_rsp_msg *rsp;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_RESUME_REQ);
+ msg_body.configured_mcst_bcst_filter_setting = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending wlan_host_resume failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("wlan_host_resume err=%d\n", ret);
+ goto out;
+ }
+
+ rsp = (struct wcn36xx_hal_host_resume_rsp_msg *)wcn->hal_buf;
+ if (rsp->status)
+ wcn36xx_warn("wlan_host_resume status=%d\n", rsp->status);
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+
+ return ret;
+}
+
int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
void *buf, int len, void *priv, u32 addr)
{
@@ -2804,6 +3056,10 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP:
case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP:
+ case WCN36XX_HAL_HOST_OFFLOAD_RSP:
+ case WCN36XX_HAL_GTK_OFFLOAD_RSP:
+ case WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP:
+ case WCN36XX_HAL_HOST_RESUME_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 462860572e1f..d8bded03945d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -146,4 +146,21 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp);
+
+int wcn36xx_smd_arp_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable);
+
+int wcn36xx_smd_ipv6_ns_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable);
+
+int wcn36xx_smd_gtk_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable);
+
+int wcn36xx_smd_gtk_offload_get_info(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+
+int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn);
+
+int wcn36xx_smd_host_resume(struct wcn36xx *wcn);
+
#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 71fa9992b118..6121d8a5641a 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -18,6 +18,7 @@
#define _WCN36XX_H_
#include <linux/completion.h>
+#include <linux/in6.h>
#include <linux/printk.h>
#include <linux/spinlock.h>
#include <net/mac80211.h>
@@ -136,6 +137,19 @@ struct wcn36xx_vif {
u8 self_dpu_desc_index;
u8 self_ucast_dpu_sign;
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 addresses for WoWLAN */
+ struct in6_addr target_ipv6_addrs[WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX];
+ unsigned long tentative_addrs[BITS_TO_LONGS(WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX)];
+ int num_target_ipv6_addrs;
+#endif
+ /* WoWLAN GTK rekey data */
+ struct {
+ u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+ __le64 replay_ctr;
+ bool valid;
+ } rekey_data;
+
struct list_head sta_list;
};
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 6746fd206d2a..1ff2679963f0 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2842,9 +2842,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
mutex_unlock(&wil->vif_mutex);
if (p2p_wdev) {
- wiphy_lock(wil->wiphy);
cfg80211_unregister_wdev(p2p_wdev);
- wiphy_unlock(wil->wiphy);
kfree(p2p_wdev);
}
}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index d13d081fdcc6..67172385a5d6 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -9,7 +9,7 @@
#include "wil6210.h"
#include "trace.h"
-/**
+/*
* Theory of operation:
*
* There is ISR pseudo-cause register,
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 02ad44997e87..2dc8406736f4 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -224,7 +224,7 @@ struct auth_no_hdr {
u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
- * return AHB address for given firmware internal (linker) address
+ * wmi_addr_remap - return AHB address for given firmware internal (linker) address
* @x: internal address
* If address have no valid AHB mapping, return 0
*/
@@ -242,7 +242,7 @@ static u32 wmi_addr_remap(u32 x)
}
/**
- * find fw_mapping entry by section name
+ * wil_find_fw_mapping - find fw_mapping entry by section name
* @section: section name
*
* Return pointer to section or NULL if not found
@@ -260,7 +260,7 @@ struct fw_map *wil_find_fw_mapping(const char *section)
}
/**
- * Check address validity for WMI buffer; remap if needed
+ * wmi_buffer_block - Check address validity for WMI buffer; remap if needed
* @wil: driver data
* @ptr_: internal (linker) fw/ucode address
* @size: if non zero, validate the block does not
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 665b737fbb0d..cf3ccf4ddfe7 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -4592,58 +4592,11 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
- u8 channel = dev->phy.channel;
- int tone[2] = { 57, 58 };
- u32 noise[2] = { 0x3FF, 0x3FF };
-
B43_WARN_ON(dev->phy.rev < 3);
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
- if (nphy->gband_spurwar_en) {
- /* TODO: N PHY Adjust Analog Pfbw (7) */
- if (channel == 11 && b43_is_40mhz(dev)) {
- ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
- } else {
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- }
- /* TODO: N PHY Adjust CRS Min Power (0x1E) */
- }
-
- if (nphy->aband_spurwar_en) {
- if (channel == 54) {
- tone[0] = 0x20;
- noise[0] = 0x25F;
- } else if (channel == 38 || channel == 102 || channel == 118) {
- if (0 /* FIXME */) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
- } else if (channel == 134) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else if (channel == 151) {
- tone[0] = 0x10;
- noise[0] = 0x23F;
- } else if (channel == 153 || channel == 161) {
- tone[0] = 0x30;
- noise[0] = 0x23F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
-
- if (!tone[0] && !noise[0]) {
- ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
- } else {
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- }
- }
-
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 0);
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 7e2f70c4207c..6869f2bf1bae 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -213,19 +213,6 @@ return dev->dma.tx_ring1;
return ring;
}
-/* Bcm4301-ring to mac80211-queue mapping */
-static inline int txring_to_priority(struct b43legacy_dmaring *ring)
-{
- static const u8 idx_to_prio[] =
- { 3, 2, 1, 0, 4, 5, };
-
-/*FIXME: have only one queue, for now */
-return 0;
-
- return idx_to_prio[ring->index];
-}
-
-
static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
int controller_idx)
{
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index f64ebff68308..eec3af9c3745 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -391,7 +391,7 @@ void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf)
* registers, we should take care of register overflows.
* In theory, the whole tsf read process should be atomic.
* We try to be atomic here, by restaring the read process,
- * if any of the high registers changed (overflew).
+ * if any of the high registers changed (overflowed).
*/
if (dev->dev->id.revision >= 3) {
u32 low;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index f4405d7861b6..cedba56fc448 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2767,8 +2767,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
- s32 total_rssi;
- s32 count_rssi;
+ s32 total_rssi_avg = 0;
+ s32 total_rssi = 0;
+ s32 count_rssi = 0;
int rssi;
u32 i;
@@ -2834,25 +2835,27 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
}
- total_rssi = 0;
- count_rssi = 0;
for (i = 0; i < BRCMF_ANT_MAX; i++) {
- if (sta_info_le.rssi[i]) {
- sinfo->chain_signal_avg[count_rssi] =
- sta_info_le.rssi[i];
- sinfo->chain_signal[count_rssi] =
- sta_info_le.rssi[i];
- total_rssi += sta_info_le.rssi[i];
- count_rssi++;
- }
+ if (sta_info_le.rssi[i] == 0 ||
+ sta_info_le.rx_lastpkt_rssi[i] == 0)
+ continue;
+ sinfo->chains |= BIT(count_rssi);
+ sinfo->chain_signal[count_rssi] =
+ sta_info_le.rx_lastpkt_rssi[i];
+ sinfo->chain_signal_avg[count_rssi] =
+ sta_info_le.rssi[i];
+ total_rssi += sta_info_le.rx_lastpkt_rssi[i];
+ total_rssi_avg += sta_info_le.rssi[i];
+ count_rssi++;
}
if (count_rssi) {
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
- sinfo->chains = count_rssi;
-
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
- total_rssi /= count_rssi;
- sinfo->signal = total_rssi;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+ sinfo->signal = total_rssi / count_rssi;
+ sinfo->signal_avg = total_rssi_avg / count_rssi;
} else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
&ifp->vif->sme_state)) {
memset(&scb_val, 0, sizeof(scb_val));
@@ -2892,8 +2895,13 @@ brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
&cfg->assoclist,
sizeof(cfg->assoclist));
if (err) {
- bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
- err);
+ /* GET_ASSOCLIST unsupported by firmware of older chips */
+ if (err == -EBADE)
+ bphy_info_once(drvr, "BRCMF_C_GET_ASSOCLIST unsupported\n");
+ else
+ bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST failed, err=%d\n",
+ err);
+
cfg->assoclist.count = 0;
return -EOPNOTSUPP;
}
@@ -6848,7 +6856,12 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
if (err) {
- bphy_err(drvr, "rxchain error (%d)\n", err);
+ /* rxchain unsupported by firmware of older chips */
+ if (err == -EBADE)
+ bphy_info_once(drvr, "rxchain unsupported\n");
+ else
+ bphy_err(drvr, "rxchain error (%d)\n", err);
+
nchain = 1;
} else {
for (nchain = 0; rxchain; nchain++)
@@ -7442,18 +7455,23 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
s32 found_index;
int i;
- country_codes = drvr->settings->country_codes;
- if (!country_codes) {
- brcmf_dbg(TRACE, "No country codes configured for device\n");
- return -EINVAL;
- }
-
if ((alpha2[0] == ccreq->country_abbrev[0]) &&
(alpha2[1] == ccreq->country_abbrev[1])) {
brcmf_dbg(TRACE, "Country code already set\n");
return -EAGAIN;
}
+ country_codes = drvr->settings->country_codes;
+ if (!country_codes) {
+ brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
+ memset(ccreq, 0, sizeof(*ccreq));
+ ccreq->country_abbrev[0] = alpha2[0];
+ ccreq->country_abbrev[1] = alpha2[1];
+ ccreq->ccode[0] = alpha2[0];
+ ccreq->ccode[1] = alpha2[1];
+ return 0;
+ }
+
found_index = -1;
for (i = 0; i < country_codes->table_size; i++) {
cc = &country_codes->table[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index cee1682d2333..db5f8535fdb5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -188,9 +188,14 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
/*Finally, pick up the PROMISC flag */
cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
- if (err < 0)
- bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, %d\n",
- err);
+ if (err < 0) {
+ /* PROMISC unsupported by firmware of older chips */
+ if (err == -EBADE)
+ bphy_info_once(drvr, "BRCMF_C_SET_PROMISC unsupported\n");
+ else
+ bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, err=%d\n",
+ err);
+ }
brcmf_configure_arp_nd_offload(ifp, !cmd_value);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 44ba6f389fa9..9bb5f709d41a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -60,6 +60,10 @@ void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...);
##__VA_ARGS__); \
} while (0)
+#define bphy_info_once(drvr, fmt, ...) \
+ wiphy_info_once((drvr)->wiphy, "%s: " fmt, __func__, \
+ ##__VA_ARGS__)
+
#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
/* For debug/tracing purposes treat info messages as errors */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index 46c66415b4a6..e290dec9c53d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -32,6 +32,13 @@ static const char BRCM_ ## fw_name ## _FIRMWARE_BASENAME[] = \
BRCMF_FW_DEFAULT_PATH fw_base; \
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw_base ".bin")
+/* Firmware and Country Local Matrix files */
+#define BRCMF_FW_CLM_DEF(fw_name, fw_base) \
+static const char BRCM_ ## fw_name ## _FIRMWARE_BASENAME[] = \
+ BRCMF_FW_DEFAULT_PATH fw_base; \
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw_base ".bin"); \
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw_base ".clm_blob")
+
#define BRCMF_FW_ENTRY(chipid, mask, name) \
{ chipid, mask, BRCM_ ## name ## _FIRMWARE_BASENAME }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index a7554265f95f..2f7bc3a70c65 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -12,12 +12,59 @@
#include "common.h"
#include "of.h"
+static int brcmf_of_get_country_codes(struct device *dev,
+ struct brcmf_mp_device *settings)
+{
+ struct device_node *np = dev->of_node;
+ struct brcmfmac_pd_cc_entry *cce;
+ struct brcmfmac_pd_cc *cc;
+ int count;
+ int i;
+
+ count = of_property_count_strings(np, "brcm,ccode-map");
+ if (count < 0) {
+ /* The property is optional, so return success if it doesn't
+ * exist. Otherwise propagate the error code.
+ */
+ return (count == -EINVAL) ? 0 : count;
+ }
+
+ cc = devm_kzalloc(dev, sizeof(*cc) + count * sizeof(*cce), GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ cc->table_size = count;
+
+ for (i = 0; i < count; i++) {
+ const char *map;
+
+ cce = &cc->table[i];
+
+ if (of_property_read_string_index(np, "brcm,ccode-map",
+ i, &map))
+ continue;
+
+ /* String format e.g. US-Q2-86 */
+ if (sscanf(map, "%2c-%2c-%d", cce->iso3166, cce->cc,
+ &cce->rev) != 3)
+ brcmf_err("failed to read country map %s\n", map);
+ else
+ brcmf_dbg(INFO, "%s-%s-%d\n", cce->iso3166, cce->cc,
+ cce->rev);
+ }
+
+ settings->country_codes = cc;
+
+ return 0;
+}
+
void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
struct brcmf_mp_device *settings)
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *root, *np = dev->of_node;
int irq;
+ int err;
u32 irqf;
u32 val;
@@ -43,8 +90,14 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
of_node_put(root);
}
- if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
- !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+ if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+ return;
+
+ err = brcmf_of_get_country_codes(dev, settings);
+ if (err)
+ brcmf_err("failed to get OF country code map (err=%d)\n", err);
+
+ if (bus_type != BRCMF_BUSTYPE_SDIO)
return;
if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 34cd8a7401fe..9ac0d8c73d5a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2037,7 +2037,7 @@ static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
}
/**
- * Change a P2P Role.
+ * brcmf_p2p_ifchange - Change a P2P Role.
* @cfg: driver private data for cfg80211 interface.
* @if_type: interface type.
* Returns 0 if success.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 143a705b5cb3..c49dd0c36ae4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -48,8 +48,8 @@ enum brcmf_pcie_state {
BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
-BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
-BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
+BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-pcie");
+BRCMF_FW_CLM_DEF(43570, "brcmfmac43570-pcie");
BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 16ed325795a8..97ee9e2e2e35 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -616,18 +616,18 @@ BRCMF_FW_DEF(43362, "brcmfmac43362-sdio");
BRCMF_FW_DEF(4339, "brcmfmac4339-sdio");
BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
/* Note the names are not postfixed with a1 for backward compatibility */
-BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio");
-BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
+BRCMF_FW_CLM_DEF(43430A1, "brcmfmac43430-sdio");
+BRCMF_FW_CLM_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
-BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
-BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
+BRCMF_FW_CLM_DEF(4354, "brcmfmac4354-sdio");
+BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-sdio");
BRCMF_FW_DEF(4359, "brcmfmac4359-sdio");
-BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
-BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
+BRCMF_FW_CLM_DEF(4373, "brcmfmac4373-sdio");
+BRCMF_FW_CLM_DEF(43012, "brcmfmac43012-sdio");
/* firmware config files */
-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-sdio.*.txt");
-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-pcie.*.txt");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
@@ -1291,7 +1291,7 @@ static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
}
}
-/**
+/*
* brcmfmac sdio bus specific header
* This is the lowest layer header wrapped on the packets transmitted between
* host and WiFi dongle which contains information needed for SDIO core and
@@ -4162,7 +4162,6 @@ static int brcmf_sdio_bus_reset(struct device *dev)
if (ret) {
brcmf_err("Failed to probe after sdio device reset: ret %d\n",
ret);
- brcmf_sdiod_remove(sdiodev);
}
return ret;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
index 53365977bfd6..2084b506a450 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
@@ -531,9 +531,6 @@ void ai_detach(struct si_pub *sih)
sii = container_of(sih, struct si_info, pub);
- if (sii == NULL)
- return;
-
kfree(sii);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 39f3af2d0439..eadac0f5590f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -1220,6 +1220,7 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
{
struct brcms_info *wl;
struct ieee80211_hw *hw;
+ int ret;
dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
@@ -1244,11 +1245,16 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
wl = brcms_attach(pdev);
if (!wl) {
pr_err("%s: brcms_attach failed!\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_free_ieee80211;
}
brcms_led_register(wl);
return 0;
+
+err_free_ieee80211:
+ ieee80211_free_hw(hw);
+ return ret;
}
static int brcms_suspend(struct bcma_device *pdev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 763e0ec583d7..26de1bd7fee9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -6607,7 +6607,8 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
- memcpy(&rts->ra, &h->addr1, 2 * ETH_ALEN);
+ memcpy(&rts->ra, &h->addr1, ETH_ALEN);
+ memcpy(&rts->ta, &h->addr2, ETH_ALEN);
}
/* mainrate
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
index aa4ab53bf634..af86c7fc5112 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
@@ -29,7 +29,6 @@ void brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 23fbddd0c1f8..47eb89b773cf 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5356,7 +5356,7 @@ struct ipw2100_wep_key {
#define WEP_STR_128(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10]
/**
- * Set a the wep key
+ * ipw2100_set_key() - Set a the wep key
*
* @priv: struct to work on
* @idx: index of the key we want to set
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 14b0db28143b..d86918d162aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -16,9 +16,10 @@ iwlwifi-objs += iwl-trans.o
iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o
-iwlwifi-objs += fw/dbg.o fw/pnvm.o
+iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
+iwlwifi-$(CONFIG_EFI) += fw/uefi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index c2315dea9a23..7f1faa9d97b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 63
+#define IWL_22000_UCODE_API_MAX 64
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -47,6 +47,7 @@
#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-"
#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-"
#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-"
+#define IWL_MA_A_FM_A_FW_PRE "iwlwifi-ma-a0-fm-a0-"
#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-"
#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-"
#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-"
@@ -93,6 +94,8 @@
IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(api) \
+ IWL_MA_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
@@ -389,6 +392,7 @@ const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
+const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
const char iwl_ax200_killer_1650w_name[] =
@@ -724,6 +728,13 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
+ .fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
.uhb_supported = true,
@@ -797,6 +808,7 @@ MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index df1297358379..871533beff30 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -171,8 +171,12 @@ const char iwl9260_killer_1550_name[] =
"Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
const char iwl9560_killer_1550i_name[] =
"Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
+const char iwl9560_killer_1550i_160_name[] =
+ "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
const char iwl9560_killer_1550s_name[] =
"Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
+const char iwl9560_killer_1550s_160_name[] =
+ "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
const struct iwl_cfg iwl9260_2ac_cfg = {
.fw_name_pre = IWL9260_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index e31bba836c6f..34933f133a0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -163,6 +163,27 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
+/*
+ * Evaluate a DSM with no arguments and a u32 return value,
+ */
+int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+ const guid_t *guid, u32 *value)
+{
+ int ret;
+ u64 val;
+
+ ret = iwl_acpi_get_dsm_integer(dev, rev, func,
+ guid, &val, sizeof(u32));
+
+ if (ret < 0)
+ return ret;
+
+ /* cast val (u64) to be u32 */
+ *value = (u32)val;
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
+
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev)
@@ -696,68 +717,37 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
-static u32 iwl_acpi_eval_dsm_func(struct device *dev, enum iwl_dsm_funcs_rev_0 eval_func)
-{
- union acpi_object *obj;
- u32 ret;
-
- obj = iwl_acpi_get_dsm_object(dev, 0,
- eval_func, NULL,
- &iwl_guid);
-
- if (IS_ERR(obj)) {
- IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM func '%d': Got Error in obj = %ld\n",
- eval_func,
- PTR_ERR(obj));
- return 0;
- }
-
- if (obj->type != ACPI_TYPE_INTEGER) {
- IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM func '%d' did not return a valid object, type=%d\n",
- eval_func,
- obj->type);
- ret = 0;
- goto out;
- }
-
- ret = obj->integer.value;
- IWL_DEBUG_DEV_RADIO(dev,
- "ACPI: DSM method evaluated: func='%d', ret=%d\n",
- eval_func,
- ret);
-out:
- ACPI_FREE(obj);
- return ret;
-}
-
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
- u32 ret;
+ int ret;
+ u8 value;
__le32 config_bitmap = 0;
/*
** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
*/
- ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_ENABLE_INDONESIA_5G2);
+ ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
+ DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &iwl_guid, &value);
- if (ret == DSM_VALUE_INDONESIA_ENABLE)
+ if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
/*
** Evaluate func 'DSM_FUNC_DISABLE_SRD'
*/
- ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_DISABLE_SRD);
-
- if (ret == DSM_VALUE_SRD_PASSIVE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
-
- else if (ret == DSM_VALUE_SRD_DISABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
+ DSM_FUNC_DISABLE_SRD,
+ &iwl_guid, &value);
+ if (!ret) {
+ if (value == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (value == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
return config_bitmap;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index d16e6ec08c9f..b858e998999c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -78,6 +78,7 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_DISABLE_SRD = 1,
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
DSM_FUNC_11AX_ENABLEMENT = 6,
+ DSM_FUNC_ENABLE_UNII4_CHAN = 7
};
enum iwl_dsm_values_srd {
@@ -116,6 +117,9 @@ void *iwl_acpi_get_object(struct device *dev, acpi_string method);
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
const guid_t *guid, u8 *value);
+int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+ const guid_t *guid, u32 *value);
+
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev);
@@ -182,6 +186,12 @@ static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
return -ENOENT;
}
+static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+ const guid_t *guid, u32 *value)
+{
+ return -ENOENT;
+}
+
static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index c625d319142e..ce060c3dfd7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -535,11 +535,6 @@ enum iwl_legacy_cmds {
OFFLOADS_QUERY_CMD = 0xd5,
/**
- * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
- */
- REMOTE_WAKE_CONFIG_CMD = 0xd6,
-
- /**
* @D0I3_END_CMD: End D0i3/D3 state, no command data
*/
D0I3_END_CMD = 0xed,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 758639084e0c..b2e7ef3ddc88 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -159,6 +159,22 @@ struct iwl_proto_offload_cmd_v3_large {
struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+/**
+ * struct iwl_proto_offload_cmd_v4 - ARP/NS offload configuration
+ * @sta_id: station id
+ * @common: common/IPv4 configuration
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @targ_addrs: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v4 {
+ __le32 sta_id;
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_4 */
+
/*
* WOWLAN_PATTERNS
*/
@@ -302,13 +318,23 @@ struct iwl_wowlan_patterns_cmd {
/**
* @n_patterns: number of patterns
*/
- __le32 n_patterns;
+ u8 n_patterns;
+
+ /**
+ * @n_patterns: sta_id
+ */
+ u8 sta_id;
+
+ /**
+ * @reserved: reserved for alignment
+ */
+ __le16 reserved;
/**
* @patterns: the patterns, array length in @n_patterns
*/
struct iwl_wowlan_pattern_v2 patterns[];
-} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_3 */
enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
@@ -339,9 +365,10 @@ enum iwl_wowlan_flags {
};
/**
- * struct iwl_wowlan_config_cmd - WoWLAN configuration
+ * struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6)
* @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
- * @non_qos_seq: non-QoS sequence counter to use next
+ * @non_qos_seq: non-QoS sequence counter to use next.
+ * Reserved if the struct has version >= 6.
* @qos_seq: QoS sequence counters to use next
* @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
* @is_11n_connection: indicates HT connection
@@ -456,6 +483,23 @@ struct iwl_wowlan_kek_kck_material_cmd_v3 {
__le32 bigtk_cipher;
} __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */
+struct iwl_wowlan_kek_kck_material_cmd_v4 {
+ __le32 sta_id;
+ u8 kck[IWL_KCK_MAX_SIZE];
+ u8 kek[IWL_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+ __le32 akm;
+ __le32 gtk_cipher;
+ __le32 igtk_cipher;
+ __le32 bigtk_cipher;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_4 */
+
+struct iwl_wowlan_get_status_cmd {
+ __le32 sta_id;
+} __packed; /* WOWLAN_GET_STATUSES_CMD_API_S_VER_1 */
+
#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
enum iwl_wowlan_rekey_status {
@@ -604,12 +648,13 @@ struct iwl_wowlan_status_v7 {
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
/**
- * struct iwl_wowlan_status_v9 - WoWLAN status (version 9)
+ * struct iwl_wowlan_status_v9 - WoWLAN status (versions 9 and 10)
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
* @pattern_number: number of the matched pattern
- * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next.
+ * Reserved if the struct has version >= 10.
* @qos_seq_ctr: QoS sequence counters to use next
* @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
* @num_of_gtk_rekeys: number of GTK rekeys
@@ -638,7 +683,7 @@ struct iwl_wowlan_status_v9 {
u8 tid_tear_down;
u8 reserved[3];
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
+} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
/**
* struct iwl_wowlan_status - WoWLAN status
@@ -683,55 +728,6 @@ static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
}
-#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
-#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
-#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
-
-struct iwl_tcp_packet_info {
- __le16 tcp_pseudo_header_checksum;
- __le16 tcp_payload_length;
-} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
-
-struct iwl_tcp_packet {
- struct iwl_tcp_packet_info info;
- u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
- u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
-} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
-
-struct iwl_remote_wake_packet {
- struct iwl_tcp_packet_info info;
- u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
- u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
-} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
-
-struct iwl_wowlan_remote_wake_config {
- __le32 connection_max_time; /* unused */
- /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
- u8 max_syn_retries;
- u8 max_data_retries;
- u8 tcp_syn_ack_timeout;
- u8 tcp_ack_timeout;
-
- struct iwl_tcp_packet syn_tx;
- struct iwl_tcp_packet synack_rx;
- struct iwl_tcp_packet keepalive_ack_rx;
- struct iwl_tcp_packet fin_tx;
-
- struct iwl_remote_wake_packet keepalive_tx;
- struct iwl_remote_wake_packet wake_rx;
-
- /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
- u8 sequence_number_offset;
- u8 sequence_number_length;
- u8 token_offset;
- u8 token_length;
- /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
- __le32 initial_sequence_number;
- __le16 keepalive_interval;
- __le16 num_tokens;
- u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
-} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
-
/* TODO: NetDetect API */
#endif /* __iwl_fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index d299bba3aa54..985b0dc5b52a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -64,6 +64,12 @@ enum iwl_data_path_subcmd_ids {
RX_NO_DATA_NOTIF = 0xF5,
/**
+ * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+ * &struct iwl_thermal_dual_chain_request
+ */
+ THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
+
+ /**
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
*/
TLC_MNG_UPDATE_NOTIF = 0xF7,
@@ -169,4 +175,24 @@ struct iwl_datapath_monitor_notif {
u8 reserved[3];
} __packed; /* MONITOR_NTF_API_S_VER_1 */
+/**
+ * enum iwl_thermal_dual_chain_req_events - firmware SMPS request event
+ * @THERMAL_DUAL_CHAIN_REQ_ENABLE: (re-)enable dual-chain operation
+ * (subject to other constraints)
+ * @THERMAL_DUAL_CHAIN_REQ_DISABLE: disable dual-chain operation
+ * (static SMPS)
+ */
+enum iwl_thermal_dual_chain_req_events {
+ THERMAL_DUAL_CHAIN_REQ_ENABLE,
+ THERMAL_DUAL_CHAIN_REQ_DISABLE,
+}; /* THERMAL_DUAL_CHAIN_DISABLE_STATE_API_E_VER_1 */
+
+/**
+ * struct iwl_thermal_dual_chain_request - SMPS request
+ * @event: the type of request, see &enum iwl_thermal_dual_chain_req_events
+ */
+struct iwl_thermal_dual_chain_request {
+ __le32 event;
+} __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 996d5cc5bd9a..5a2d9a1f7e73 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -11,6 +11,7 @@
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
+#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
/**
* struct iwl_fw_ini_hcmd
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index dc8f2777e944..cf48c6fa8f65 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -453,6 +453,25 @@ struct iwl_lari_config_change_cmd_v3 {
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */
/**
+ * struct iwl_lari_config_change_cmd_v4 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ */
+struct iwl_lari_config_change_cmd_v4 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */
+
+/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index cc4e18ca9566..df7c55e06f54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1933,6 +1933,13 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
u32 num_of_ranges, i, size;
void *range;
+ /*
+ * The higher part of the ID in version 2 is irrelevant for
+ * us, so mask it out.
+ */
+ if (le32_to_cpu(reg->hdr.version) == 2)
+ id &= IWL_FW_INI_REGION_V2_MASK;
+
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
!ops->fill_range)
return 0;
@@ -1957,7 +1964,7 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
- header->region_id = reg->id;
+ header->region_id = cpu_to_le32(id);
header->num_of_ranges = cpu_to_le32(num_of_ranges);
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
@@ -2752,44 +2759,6 @@ void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt)
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync);
-#define FSEQ_REG(x) { .addr = (x), .str = #x, }
-
-void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
-{
- struct iwl_trans *trans = fwrt->trans;
- int i;
- struct {
- u32 addr;
- const char *str;
- } fseq_regs[] = {
- FSEQ_REG(FSEQ_ERROR_CODE),
- FSEQ_REG(FSEQ_TOP_INIT_VERSION),
- FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
- FSEQ_REG(FSEQ_OTP_VERSION),
- FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
- FSEQ_REG(FSEQ_ALIVE_TOKEN),
- FSEQ_REG(FSEQ_CNVI_ID),
- FSEQ_REG(FSEQ_CNVR_ID),
- FSEQ_REG(CNVI_AUX_MISC_CHIP),
- FSEQ_REG(CNVR_AUX_MISC_CHIP),
- FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
- FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
- };
-
- if (!iwl_trans_grab_nic_access(trans))
- return;
-
- IWL_ERR(fwrt, "Fseq Registers:\n");
-
- for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
- IWL_ERR(fwrt, "0x%08X | %s\n",
- iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
- fseq_regs[i].str);
-
- iwl_trans_release_nic_access(trans);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs);
-
static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend)
{
struct iwl_dbg_suspend_resume_cmd cmd = {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 49fa2f5f8c7e..c0e84ef84f5d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -321,4 +321,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor);
}
}
+
+void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
new file mode 100644
index 000000000000..a1842205e86a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/devcoredump.h>
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "dbg.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table_v1 {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 bcon_time; /* beacon timer */
+ u32 tsf_low; /* network timestamp function timer */
+ u32 tsf_hi; /* network timestamp function timer */
+ u32 gp1; /* GP1 timer register */
+ u32 gp2; /* GP2 timer register */
+ u32 gp3; /* GP3 timer register */
+ u32 ucode_ver; /* uCode version */
+ u32 hw_ver; /* HW Silicon version */
+ u32 brd_ver; /* HW board version */
+ u32 log_pc; /* log program counter */
+ u32 frame_ptr; /* frame pointer */
+ u32 stack_ptr; /* stack pointer */
+ u32 hcmd; /* last host command header */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
+ u32 wait_event; /* wait event() caller address */
+ u32 l2p_control; /* L2pControlField */
+ u32 l2p_duration; /* L2pDurationField */
+ u32 l2p_mhvalid; /* L2pMhValidBits */
+ u32 l2p_addr_match; /* L2pAddrMatchStat */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ u32 flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
+
+struct iwl_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 trm_hw_status0; /* TRM HW status */
+ u32 trm_hw_status1; /* TRM HW status */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 bcon_time; /* beacon timer */
+ u32 tsf_low; /* network timestamp function timer */
+ u32 tsf_hi; /* network timestamp function timer */
+ u32 gp1; /* GP1 timer register */
+ u32 gp2; /* GP2 timer register */
+ u32 fw_rev_type; /* firmware revision type */
+ u32 major; /* uCode version major */
+ u32 minor; /* uCode version minor */
+ u32 hw_ver; /* HW Silicon version */
+ u32 brd_ver; /* HW board version */
+ u32 log_pc; /* log program counter */
+ u32 frame_ptr; /* frame pointer */
+ u32 stack_ptr; /* stack pointer */
+ u32 hcmd; /* last host command header */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ u32 last_cmd_id; /* last HCMD id handled by the firmware */
+ u32 wait_event; /* wait event() caller address */
+ u32 l2p_control; /* L2pControlField */
+ u32 l2p_duration; /* L2pDurationField */
+ u32 l2p_mhvalid; /* L2pMhValidBits */
+ u32 l2p_addr_match; /* L2pAddrMatchStat */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ u32 flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 umac_major;
+ u32 umac_minor;
+ u32 frame_pointer; /* core register 27*/
+ u32 stack_pointer; /* core register 28 */
+ u32 cmd_header; /* latest host cmd sent to UMAC */
+ u32 nic_isr_pref; /* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_umac_error_event_table table = {};
+ u32 base = fwrt->trans->dbg.umac_error_event_table;
+
+ if (!base &&
+ !(fwrt->trans->dbg.error_event_table_tlv_status &
+ IWL_ERROR_EVENT_TABLE_UMAC))
+ return;
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (table.valid)
+ fwrt->dump.umac_err_id = table.error_id;
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
+ fwrt->trans->status, table.valid);
+ }
+
+ IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
+ iwl_fw_lookup_assert_desc(table.error_id));
+ IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
+ IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2);
+ IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1);
+ IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2);
+ IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1);
+ IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2);
+ IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3);
+ IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major);
+ IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor);
+ IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer);
+ IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer);
+ IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header);
+ IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref);
+}
+
+static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_error_event_table table = {};
+ u32 val, base = fwrt->trans->dbg.lmac_error_event_table[lmac_num];
+
+ if (fwrt->cur_fw_img == IWL_UCODE_INIT) {
+ if (!base)
+ base = fwrt->fw->init_errlog_ptr;
+ } else {
+ if (!base)
+ base = fwrt->fw->inst_errlog_ptr;
+ }
+
+ if (base < 0x400000) {
+ IWL_ERR(fwrt,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (fwrt->cur_fw_img == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ /* check if there is a HW error */
+ val = iwl_trans_read_mem32(trans, base);
+ if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
+ int err;
+
+ IWL_ERR(trans, "HW error, resetting before reading\n");
+
+ /* reset the device */
+ iwl_trans_sw_reset(trans);
+
+ err = iwl_finish_nic_init(trans, trans->trans_cfg);
+ if (err)
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (table.valid)
+ fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
+ fwrt->trans->status, table.valid);
+ }
+
+ /* Do not change this output - scripts rely on it */
+
+ IWL_ERR(fwrt, "Loaded firmware version: %s\n", fwrt->fw->fw_version);
+
+ IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id,
+ iwl_fw_lookup_assert_desc(table.error_id));
+ IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
+ IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
+ IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2);
+ IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1);
+ IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2);
+ IWL_ERR(fwrt, "0x%08X | data1\n", table.data1);
+ IWL_ERR(fwrt, "0x%08X | data2\n", table.data2);
+ IWL_ERR(fwrt, "0x%08X | data3\n", table.data3);
+ IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time);
+ IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low);
+ IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi);
+ IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1);
+ IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2);
+ IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type);
+ IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major);
+ IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor);
+ IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver);
+ IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver);
+ IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd);
+ IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id);
+ IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+/*
+ * TCM error struct.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_tcm_error_event_table {
+ u32 valid;
+ u32 error_id;
+ u32 blink2;
+ u32 ilink1;
+ u32 ilink2;
+ u32 data1, data2, data3;
+ u32 logpc;
+ u32 frame_pointer;
+ u32 stack_pointer;
+ u32 msgid;
+ u32 isr;
+ u32 hw_status[5];
+ u32 sw_status[1];
+ u32 reserved[4];
+} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
+
+static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_tcm_error_event_table table = {};
+ u32 base = fwrt->trans->dbg.tcm_error_event_table;
+ int i;
+
+ if (!base ||
+ !(fwrt->trans->dbg.error_event_table_tlv_status &
+ IWL_ERROR_EVENT_TABLE_TCM))
+ return;
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ IWL_ERR(fwrt, "TCM status:\n");
+ IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
+ IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
+ IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
+ IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2);
+ IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1);
+ IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2);
+ IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3);
+ IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc);
+ IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer);
+ IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer);
+ IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid);
+ IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr);
+ for (i = 0; i < ARRAY_SIZE(table.hw_status); i++)
+ IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n",
+ table.hw_status[i], i);
+ for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
+ IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
+ table.sw_status[i], i);
+}
+
+static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ u32 error, data1;
+
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ error = UMAG_SB_CPU_2_STATUS;
+ data1 = UMAG_SB_CPU_1_STATUS;
+ } else if (fwrt->trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
+ error = SB_CPU_2_STATUS;
+ data1 = SB_CPU_1_STATUS;
+ } else {
+ return;
+ }
+
+ error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
+
+ IWL_ERR(trans, "IML/ROM dump:\n");
+
+ if (error & 0xFFFF0000)
+ IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
+
+ IWL_ERR(fwrt, "0x%08X | IML/ROM error/state\n", error);
+ IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n",
+ iwl_read_umac_prph(trans, data1));
+
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
+ iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
+}
+
+#define FSEQ_REG(x) { .addr = (x), .str = #x, }
+
+static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ int i;
+ struct {
+ u32 addr;
+ const char *str;
+ } fseq_regs[] = {
+ FSEQ_REG(FSEQ_ERROR_CODE),
+ FSEQ_REG(FSEQ_TOP_INIT_VERSION),
+ FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
+ FSEQ_REG(FSEQ_OTP_VERSION),
+ FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
+ FSEQ_REG(FSEQ_ALIVE_TOKEN),
+ FSEQ_REG(FSEQ_CNVI_ID),
+ FSEQ_REG(FSEQ_CNVR_ID),
+ FSEQ_REG(CNVI_AUX_MISC_CHIP),
+ FSEQ_REG(CNVR_AUX_MISC_CHIP),
+ FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
+ FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
+ };
+
+ if (!iwl_trans_grab_nic_access(trans))
+ return;
+
+ IWL_ERR(fwrt, "Fseq Registers:\n");
+
+ for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
+ IWL_ERR(fwrt, "0x%08X | %s\n",
+ iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
+ fseq_regs[i].str);
+
+ iwl_trans_release_nic_access(trans);
+}
+
+void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
+{
+ if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
+ IWL_ERR(fwrt,
+ "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+ return;
+ }
+
+ iwl_fwrt_dump_lmac_error_log(fwrt, 0);
+ if (fwrt->trans->dbg.lmac_error_event_table[1])
+ iwl_fwrt_dump_lmac_error_log(fwrt, 1);
+ iwl_fwrt_dump_umac_error_log(fwrt);
+ iwl_fwrt_dump_tcm_error_log(fwrt);
+ iwl_fwrt_dump_iml_error_log(fwrt);
+ iwl_fwrt_dump_fseq_regs(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index f9c5cf538ad1..9a8c7b7a0816 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2008-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2008-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -52,7 +52,8 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_INIT_DATA = 4,
IWL_UCODE_TLV_BOOT = 5,
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
- IWL_UCODE_TLV_PAN = 7,
+ IWL_UCODE_TLV_PAN = 7, /* deprecated -- only used in DVM */
+ IWL_UCODE_TLV_MEM_DESC = 7, /* replaces PAN in non-DVM */
IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
@@ -97,6 +98,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_PNVM_VERSION = 62,
IWL_UCODE_TLV_PNVM_SKU = 64,
+ IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65,
IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
@@ -277,10 +279,11 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
- NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
- /* sparse says it cannot increment the previous enum member */
- = 128
+ /* sparse says it cannot increment the previous enum member */
+#define NUM_IWL_UCODE_TLV_API 128
+#else
+ NUM_IWL_UCODE_TLV_API
#endif
};
@@ -411,6 +414,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57,
IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58,
+ IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@@ -446,10 +450,11 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)102,
- NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
- /* sparse says it cannot increment the previous enum member */
- = 128
+ /* sparse says it cannot increment the previous enum member */
+#define NUM_IWL_UCODE_TLV_CAPA 128
+#else
+ NUM_IWL_UCODE_TLV_CAPA
#endif
};
@@ -946,6 +951,10 @@ struct iwl_fw_cmd_version {
u8 notif_ver;
} __packed;
+struct iwl_fw_tcm_error_addr {
+ __le32 addr;
+}; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */
+
static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
size_t fixed_size, size_t var_size)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 40f2109a097f..2403490cbc26 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -10,7 +10,7 @@
#include "fw/api/commands.h"
#include "fw/api/nvm-reg.h"
#include "fw/api/alive.h"
-#include <linux/efi.h>
+#include "fw/uefi.h"
struct iwl_pnvm_section {
__le32 offset;
@@ -220,83 +220,6 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
return -ENOENT;
}
-#if defined(CONFIG_EFI)
-
-#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
- 0xb2, 0xec, 0xf5, 0xa3, \
- 0x59, 0x4f, 0x4a, 0xea)
-
-#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
-
-#define IWL_HARDCODED_PNVM_SIZE 4096
-
-struct pnvm_sku_package {
- u8 rev;
- u8 reserved1[3];
- u32 total_size;
- u8 n_skus;
- u8 reserved2[11];
- u8 data[];
-};
-
-static int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
- u8 **data, size_t *len)
-{
- struct efivar_entry *pnvm_efivar;
- struct pnvm_sku_package *package;
- unsigned long package_size;
- int err;
-
- pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
- if (!pnvm_efivar)
- return -ENOMEM;
-
- memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
- sizeof(IWL_UEFI_OEM_PNVM_NAME));
- pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
-
- /*
- * TODO: we hardcode a maximum length here, because reading
- * from the UEFI is not working. To implement this properly,
- * we have to call efivar_entry_size().
- */
- package_size = IWL_HARDCODED_PNVM_SIZE;
-
- package = kmalloc(package_size, GFP_KERNEL);
- if (!package) {
- err = -ENOMEM;
- goto out;
- }
-
- err = efivar_entry_get(pnvm_efivar, NULL, &package_size, package);
- if (err) {
- IWL_DEBUG_FW(trans,
- "PNVM UEFI variable not found %d (len %lu)\n",
- err, package_size);
- goto out;
- }
-
- IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %lu\n", package_size);
-
- *data = kmemdup(package->data, *len, GFP_KERNEL);
- if (!*data)
- err = -ENOMEM;
- *len = package_size - sizeof(*package);
-
-out:
- kfree(package);
- kfree(pnvm_efivar);
-
- return err;
-}
-#else /* CONFIG_EFI */
-static inline int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
- u8 **data, size_t *len)
-{
- return -EOPNOTSUPP;
-}
-#endif /* CONFIG_EFI */
-
static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
{
const struct firmware *pnvm;
@@ -335,6 +258,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
{
u8 *data;
size_t len;
+ struct pnvm_sku_package *package;
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
@@ -356,9 +280,19 @@ int iwl_pnvm_load(struct iwl_trans *trans,
}
/* First attempt to get the PNVM from BIOS */
- ret = iwl_pnvm_get_from_efi(trans, &data, &len);
- if (!ret)
- goto parse;
+ package = iwl_uefi_get_pnvm(trans, &len);
+ if (!IS_ERR_OR_NULL(package)) {
+ data = kmemdup(package->data, len, GFP_KERNEL);
+
+ /* free package regardless of whether kmemdup succeeded */
+ kfree(package);
+
+ if (data) {
+ /* we need only the data size */
+ len -= sizeof(*package);
+ goto parse;
+ }
+ }
/* If it's not available, try from the filesystem */
ret = iwl_pnvm_get_from_fs(trans, &data, &len);
@@ -379,6 +313,30 @@ parse:
kfree(data);
skip_parse:
+ data = NULL;
+ /* now try to get the reduce power table, if not loaded yet */
+ if (!trans->reduce_power_loaded) {
+ data = iwl_uefi_get_reduced_power(trans, &len);
+ if (IS_ERR_OR_NULL(data)) {
+ /*
+ * Pretend we've loaded it - at least we've tried and
+ * couldn't load it at all, so there's no point in
+ * trying again over and over.
+ */
+ trans->reduce_power_loaded = true;
+
+ goto skip_reduce_power;
+ }
+ }
+
+ ret = iwl_trans_set_reduce_power(trans, data, len);
+ if (ret)
+ IWL_DEBUG_FW(trans,
+ "Failed to set reduce power table %d\n",
+ ret);
+ kfree(data);
+
+skip_reduce_power:
iwl_init_notification_wait(notif_wait, &pnvm_wait,
ntf_cmds, ARRAY_SIZE(ntf_cmds),
iwl_pnvm_complete_fn, trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
index e4f91bce222d..61d3d4e0b7d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/******************************************************************************
*
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2020-2021 Intel Corporation
*
*****************************************************************************/
@@ -10,7 +10,7 @@
#include "fw/notif-wait.h"
-#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
+#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
new file mode 100644
index 000000000000..a7c79d814aa4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include "iwl-drv.h"
+#include "pnvm.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+
+#include "fw/uefi.h"
+#include "fw/api/alive.h"
+#include <linux/efi.h>
+
+#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
+ 0xb2, 0xec, 0xf5, 0xa3, \
+ 0x59, 0x4f, 0x4a, 0xea)
+
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+{
+ struct efivar_entry *pnvm_efivar;
+ void *data;
+ unsigned long package_size;
+ int err;
+
+ *len = 0;
+
+ pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
+ if (!pnvm_efivar)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
+ sizeof(IWL_UEFI_OEM_PNVM_NAME));
+ pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+
+ /*
+ * TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_PNVM_SIZE;
+
+ data = kmalloc(package_size, GFP_KERNEL);
+ if (!data) {
+ data = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data);
+ if (err) {
+ IWL_DEBUG_FW(trans,
+ "PNVM UEFI variable not found %d (len %zd)\n",
+ err, package_size);
+ kfree(data);
+ data = ERR_PTR(err);
+ goto out;
+ }
+
+ IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %zd\n", package_size);
+ *len = package_size;
+
+out:
+ kfree(pnvm_efivar);
+
+ return data;
+}
+
+static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
+ const u8 *data, size_t len)
+{
+ struct iwl_ucode_tlv *tlv;
+ u8 *reduce_power_data = NULL, *tmp;
+ u32 size = 0;
+
+ IWL_DEBUG_FW(trans, "Handling REDUCE_POWER section\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ reduce_power_data = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ data += sizeof(*tlv);
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_MEM_DESC: {
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_MEM_DESC len %d\n",
+ tlv_len);
+
+ IWL_DEBUG_FW(trans, "Adding data (size %d)\n", tlv_len);
+
+ tmp = krealloc(reduce_power_data, size + tlv_len, GFP_KERNEL);
+ if (!tmp) {
+ IWL_DEBUG_FW(trans,
+ "Couldn't allocate (more) reduce_power_data\n");
+
+ reduce_power_data = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ reduce_power_data = tmp;
+
+ memcpy(reduce_power_data + size, data, tlv_len);
+
+ size += tlv_len;
+
+ break;
+ }
+ case IWL_UCODE_TLV_PNVM_SKU:
+ IWL_DEBUG_FW(trans,
+ "New REDUCE_POWER section started, stop parsing.\n");
+ goto done;
+ default:
+ IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
+ tlv_type, tlv_len);
+ break;
+ }
+
+ len -= ALIGN(tlv_len, 4);
+ data += ALIGN(tlv_len, 4);
+ }
+
+done:
+ if (!size) {
+ IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+ reduce_power_data = ERR_PTR(-ENOENT);
+ goto out;
+ }
+
+ IWL_INFO(trans, "loaded REDUCE_POWER\n");
+
+out:
+ return reduce_power_data;
+}
+
+static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
+ const u8 *data, size_t len)
+{
+ struct iwl_ucode_tlv *tlv;
+ void *sec_data;
+
+ IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
+ struct iwl_sku_id *sku_id =
+ (void *)(data + sizeof(*tlv));
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
+ tlv_len);
+ IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
+ le32_to_cpu(sku_id->data[0]),
+ le32_to_cpu(sku_id->data[1]),
+ le32_to_cpu(sku_id->data[2]));
+
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+
+ if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+ trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+ trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ sec_data = iwl_uefi_reduce_power_section(trans,
+ data,
+ len);
+ if (!IS_ERR(sec_data))
+ return sec_data;
+ } else {
+ IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
+ }
+ } else {
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
+{
+ struct efivar_entry *reduce_power_efivar;
+ struct pnvm_sku_package *package;
+ void *data = NULL;
+ unsigned long package_size;
+ int err;
+
+ *len = 0;
+
+ reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL);
+ if (!reduce_power_efivar)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME,
+ sizeof(IWL_UEFI_REDUCED_POWER_NAME));
+ reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+
+ /*
+ * TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_REDUCE_POWER_SIZE;
+
+ package = kmalloc(package_size, GFP_KERNEL);
+ if (!package) {
+ package = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package);
+ if (err) {
+ IWL_DEBUG_FW(trans,
+ "Reduced Power UEFI variable not found %d (len %lu)\n",
+ err, package_size);
+ kfree(package);
+ data = ERR_PTR(err);
+ goto out;
+ }
+
+ IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
+ package_size);
+ *len = package_size;
+
+ IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
+ package->rev, package->total_size, package->n_skus);
+
+ data = iwl_uefi_reduce_power_parse(trans, package->data,
+ *len - sizeof(*package));
+
+ kfree(package);
+
+out:
+ kfree(reduce_power_efivar);
+
+ return data;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
new file mode 100644
index 000000000000..45d0b36d79b5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+
+#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
+#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
+
+/*
+ * TODO: we have these hardcoded values that the caller must pass,
+ * because reading from the UEFI is not working. To implement this
+ * properly, we have to change iwl_pnvm_get_from_uefi() to call
+ * efivar_entry_size() and return the value to the caller instead.
+ */
+#define IWL_HARDCODED_PNVM_SIZE 4096
+#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768
+
+struct pnvm_sku_package {
+ u8 rev;
+ u32 total_size;
+ u8 n_skus;
+ u32 reserved[2];
+ u8 data[];
+} __packed;
+
+#ifdef CONFIG_EFI
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
+#else /* CONFIG_EFI */
+static inline
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_EFI */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index b35ffdfdf14b..bf6ee56d4d96 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -426,6 +426,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_MR 0x110
+#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_ID_TH 0x1
#define IWL_CFG_RF_ID_TH1 0x1
@@ -505,8 +506,11 @@ extern const char iwl_ax201_killer_1650s_name[];
extern const char iwl_ax201_killer_1650i_name[];
extern const char iwl_ax210_killer_1675w_name[];
extern const char iwl_ax210_killer_1675x_name[];
+extern const char iwl9560_killer_1550i_160_name[];
+extern const char iwl9560_killer_1550s_160_name[];
extern const char iwl_ax211_name[];
extern const char iwl_ax221_name[];
+extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
@@ -586,7 +590,6 @@ extern const struct iwl_cfg iwl_qu_b0_hr_b0;
extern const struct iwl_cfg iwl_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
-extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
@@ -613,6 +616,7 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 2be605cc6fbf..e1fec23ac07f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020 Intel Corporation
+ * Copyright (C) 2018, 2020-2021 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -128,6 +128,17 @@ struct iwl_prph_scratch_rbd_cfg {
} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
/*
+ * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
+ * @base_addr: reduce power table address
+ * @size: table size in dwords
+ */
+struct iwl_prph_scratch_uefi_cfg {
+ __le64 base_addr;
+ __le32 size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
+
+/*
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
* @control: control flags of FH configurations
@@ -141,6 +152,7 @@ struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
struct iwl_prph_scratch_hwm_cfg hwm_cfg;
struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+ struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
/*
@@ -151,7 +163,7 @@ struct iwl_prph_scratch_ctrl_cfg {
*/
struct iwl_prph_scratch {
struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
- __le32 reserved[16];
+ __le32 reserved[12];
struct iwl_context_info_dram dram;
} __packed; /* PERIPH_SCRATCH_S */
@@ -245,9 +257,11 @@ struct iwl_context_info_gen3 {
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw);
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const void *data, u32 len);
+int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+ const void *data, u32 len);
#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index db312abd2e09..47e5a17c0f48 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -325,9 +325,6 @@ enum {
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
-/* HW_RF CHIP ID */
-#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
-
/* HW_RF CHIP STEP */
#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 4cd8c39cc3e9..0ddd255a8cc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -57,7 +57,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
- [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 1,},
+ [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,},
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
};
@@ -178,9 +178,20 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
u32 type = le32_to_cpu(reg->type);
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
+ /*
+ * The higher part of the ID in version 2 is irrelevant for
+ * us, so mask it out.
+ */
+ if (le32_to_cpu(reg->hdr.version) == 2)
+ id &= IWL_FW_INI_REGION_V2_MASK;
+
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
+ /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
+ IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
+ IWL_FW_INI_MAX_NAME, reg->name);
+
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 884750bf7840..977dce686bdb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1117,6 +1117,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_ERROR_EVENT_TABLE_LMAC1;
break;
}
+ case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: {
+ struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data;
+
+ if (tlv_len != sizeof(*ptr))
+ goto invalid_tlv_len;
+ drv->trans->dbg.tcm_error_event_table =
+ le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL;
+ drv->trans->dbg.error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_TCM;
+ break;
+ }
case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
case IWL_UCODE_TLV_TYPE_HCMD:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index fc75d049046d..850648ebd61c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -549,8 +549,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
.mac_cap_info[3] =
- IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
.mac_cap_info[4] =
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU |
IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
@@ -579,25 +578,20 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
- .phy_cap_info[5] =
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP7_MAX_NC_1,
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
.phy_cap_info[8] =
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
.phy_cap_info[9] =
- IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
@@ -632,19 +626,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
- .mac_cap_info[2] =
- IEEE80211_HE_MAC_CAP2_BSR,
.mac_cap_info[3] =
- IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
- .mac_cap_info[4] =
- IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
- .mac_cap_info[5] =
- IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
.phy_cap_info[2] =
@@ -654,27 +640,14 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
- .phy_cap_info[4] =
- IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
- .phy_cap_info[5] =
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP7_MAX_NC_1,
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
.phy_cap_info[8] =
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
.phy_cap_info[9] =
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
},
/*
@@ -745,12 +718,72 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
}
+static void
+iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sband_iftype_data *iftype_data,
+ u8 tx_chains, u8 rx_chains,
+ const struct iwl_fw *fw)
+{
+ bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
+
+ /* Advertise an A-MPDU exponent extension based on
+ * operating band
+ */
+ if (sband->band != NL80211_BAND_2GHZ)
+ iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1;
+ else
+ iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
+
+ if (is_ap && iwlwifi_mod_params.nvm_file)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ if ((tx_chains & rx_chains) == ANT_AB) {
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |=
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
+ if (!is_ap)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_MAX_NC_2;
+ } else if (!is_ap) {
+ /* If not 2x2, we need to indicate 1x1 in the
+ * Midamble RX Max NSTS - but not for AP mode
+ */
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
+ ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+ }
+
+ switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_GF:
+ case IWL_CFG_RF_TYPE_MR:
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ if (!is_ap)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ }
+
+ if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
+ iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT;
+}
+
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
- u8 tx_chains, u8 rx_chains)
+ u8 tx_chains, u8 rx_chains,
+ const struct iwl_fw *fw)
{
struct ieee80211_sband_iftype_data *iftype_data;
+ int i;
/* should only initialize once */
if (WARN_ON(sband->iftype_data))
@@ -777,26 +810,18 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
sband->iftype_data = iftype_data;
sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
- /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
- if ((tx_chains & rx_chains) != ANT_AB) {
- int i;
-
- for (i = 0; i < sband->n_iftype_data; i++) {
- iftype_data[i].he_cap.he_cap_elem.phy_cap_info[1] &=
- ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
- iftype_data[i].he_cap.he_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
- iftype_data[i].he_cap.he_cap_elem.phy_cap_info[7] &=
- ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
- }
- }
+ for (i = 0; i < sband->n_iftype_data; i++)
+ iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i],
+ tx_chains, rx_chains, fw);
+
iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains);
}
static void iwl_init_sbands(struct iwl_trans *trans,
struct iwl_nvm_data *data,
const void *nvm_ch_flags, u8 tx_chains,
- u8 rx_chains, u32 sbands_flags, bool v4)
+ u8 rx_chains, u32 sbands_flags, bool v4,
+ const struct iwl_fw *fw)
{
struct device *dev = trans->dev;
const struct iwl_cfg *cfg = trans->cfg;
@@ -816,7 +841,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
- iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+ fw);
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
@@ -831,7 +857,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
- iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+ fw);
/* 6GHz band. */
sband = &data->bands[NL80211_BAND_6GHZ];
@@ -843,7 +870,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
NL80211_BAND_6GHZ);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
- iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+ fw);
else
sband->n_channels = 0;
if (n_channels != n_used)
@@ -1154,7 +1182,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains,
- sbands_flags, false);
+ sbands_flags, false, fw);
data->calib_version = 255;
return data;
@@ -1661,7 +1689,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
channel_profile,
nvm->valid_tx_ant & fw->valid_tx_ant,
nvm->valid_rx_ant & fw->valid_rx_ant,
- sbands_flags, v4);
+ sbands_flags, v4, fw);
iwl_free_resp(&hcmd);
return nvm;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 3ce77e4eb7e3..9a9e714bf9af 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -412,6 +412,8 @@ enum {
#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19)
#define UREG_DOORBELL_TO_ISR6_PNVM BIT(20)
+#define CNVI_MBOX_C 0xA3400C
+
#define FSEQ_ERROR_CODE 0xA340C8
#define FSEQ_TOP_INIT_VERSION 0xA34038
#define FSEQ_CNVIO_INIT_VERSION 0xA3403C
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index bf569f856ad8..0199d7a5a648 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -193,6 +193,7 @@ enum iwl_error_event_table_status {
IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
+ IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
};
/**
@@ -589,6 +590,8 @@ struct iwl_trans_ops {
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
+ int (*set_reduce_power)(struct iwl_trans *trans,
+ const void *data, u32 len);
void (*interrupts)(struct iwl_trans *trans, bool enable);
};
@@ -706,6 +709,7 @@ struct iwl_self_init_dram {
* @trigger_tlv: array of pointers to triggers TLVs for debug
* @lmac_error_event_table: addrs of lmacs error tables
* @umac_error_event_table: addr of umac error table
+ * @tcm_error_event_table: address of TCM error table
* @error_event_table_tlv_status: bitmap that indicates what error table
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
@@ -732,6 +736,7 @@ struct iwl_trans_debug {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
+ u32 tcm_error_event_table;
unsigned int error_event_table_tlv_status;
enum iwl_ini_cfg_state internal_ini_cfg;
@@ -957,6 +962,7 @@ struct iwl_trans {
bool pm_support;
bool ltr_enabled;
u8 pnvm_loaded:1;
+ u8 reduce_power_loaded:1;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
@@ -1420,6 +1426,20 @@ static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
return 0;
}
+static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const void *data, u32 len)
+{
+ if (trans->ops->set_reduce_power) {
+ int ret = trans->ops->set_reduce_power(trans, data, len);
+
+ if (ret)
+ return ret;
+ }
+
+ trans->reduce_power_loaded = true;
+ return 0;
+}
+
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
{
return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 2e28cf299ef4..6a259d867d90 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -104,7 +104,7 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
- struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd;
+ struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd;
bool error, use_rsc_tsc, use_tkip, configure_keys;
int wep_key_idx;
};
@@ -393,14 +393,19 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
struct cfg80211_wowlan *wowlan)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int i, err;
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_PATTERNS,
+ IWL_FW_CMD_VER_UNKNOWN);
if (!wowlan->n_patterns)
return 0;
@@ -408,11 +413,13 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
cmd.len[0] = sizeof(*pattern_cmd) +
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
- pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
return -ENOMEM;
- pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+ pattern_cmd->n_patterns = wowlan->n_patterns;
+ if (ver >= 3)
+ pattern_cmd->sta_id = mvmvif->ap_sta_id;
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@@ -636,7 +643,6 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
struct ieee80211_sta *ap_sta)
{
- int ret;
struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
@@ -646,12 +652,16 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
- /* Query the last used seqno and set it */
- ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
- if (ret < 0)
- return ret;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_CONFIGURATION, 0) < 6) {
+ /* Query the last used seqno and set it */
+ int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
- wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+ if (ret < 0)
+ return ret;
+
+ wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+ }
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
@@ -706,7 +716,8 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 cmd_flags)
{
- struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {};
+ struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
+ struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd;
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -715,7 +726,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
- .kek_kck_cmd = &kek_kck_cmd,
+ .kek_kck_cmd = _kek_kck_cmd,
};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@@ -809,13 +820,9 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
IWL_ALWAYS_LONG_GROUP,
WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN);
- if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 &&
+ if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
return -EINVAL;
- if (cmd_ver == 3)
- cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
- else
- cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
mvmvif->rekey_data.kck_len);
@@ -825,6 +832,21 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
+ kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id);
+
+ if (cmd_ver == 4) {
+ cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
+ } else {
+ if (cmd_ver == 3)
+ cmd_size =
+ sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
+ else
+ cmd_size =
+ sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
+ /* skip the sta_id at the beginning */
+ _kek_kck_cmd = (void *)
+ ((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
+ }
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
mvmvif->rekey_data.akm);
@@ -832,7 +854,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
cmd_size,
- &kek_kck_cmd);
+ _kek_kck_cmd);
if (ret)
goto out;
}
@@ -884,7 +906,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
- ret = iwl_mvm_send_patterns(mvm, wowlan);
+ ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
else
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
@@ -1534,9 +1556,12 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
}
out:
- mvmvif->seqno_valid = true;
- /* +0x10 because the set API expects next-to-use, not last-used */
- mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_GET_STATUSES, 0) < 10) {
+ mvmvif->seqno_valid = true;
+ /* +0x10 because the set API expects next-to-use, not last-used */
+ mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+ }
return true;
}
@@ -1587,15 +1612,27 @@ iwl_mvm_parse_wowlan_status_common(v6)
iwl_mvm_parse_wowlan_status_common(v7)
iwl_mvm_parse_wowlan_status_common(v9)
-struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
+static struct iwl_wowlan_status *
+iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
{
struct iwl_wowlan_status *status;
+ struct iwl_wowlan_get_status_cmd get_status_cmd = {
+ .sta_id = cpu_to_le32(sta_id),
+ };
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
+ .data = { &get_status_cmd, },
+ .len = { sizeof(get_status_cmd), },
};
int ret, len;
u8 notif_ver;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_GET_STATUSES,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ cmd.len[0] = 0;
lockdep_assert_held(&mvm->mutex);
@@ -1608,8 +1645,11 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
/* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
- notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- WOWLAN_GET_STATUSES, 7);
+ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_GET_STATUSES, 0);
+ if (!notif_ver)
+ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ WOWLAN_GET_STATUSES, 7);
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
@@ -1654,7 +1694,7 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
status->gtk[0] = v7->gtk[0];
status->igtk[0] = v7->igtk[0];
- } else if (notif_ver == 9) {
+ } else if (notif_ver == 9 || notif_ver == 10) {
struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
@@ -1680,29 +1720,37 @@ out_free_resp:
}
static struct iwl_wowlan_status *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
{
- int ret;
-
- /* only for tracing for now */
- ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
- if (ret)
- IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ OFFLOADS_QUERY_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
+ __le32 station_id = cpu_to_le32(sta_id);
+ u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
+
+ if (!mvm->net_detect) {
+ /* only for tracing for now */
+ int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0,
+ cmd_size, &station_id);
+ if (ret)
+ IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+ }
- return iwl_mvm_send_wowlan_get_status(mvm);
+ return iwl_mvm_send_wowlan_get_status(mvm, sta_id);
}
/* releases the MVM mutex */
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_status_data status;
struct iwl_wowlan_status *fw_status;
int i;
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
- fw_status = iwl_mvm_get_wakeup_status(mvm);
+ fw_status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
@@ -1880,7 +1928,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
u32 reasons = 0;
int i, n_matches, ret;
- fw_status = iwl_mvm_get_wakeup_status(mvm);
+ fw_status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 38d0bfb649cc..7d9faeffd154 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -460,7 +460,7 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
int pos = 0;
mutex_lock(&mvm->mutex);
- iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &curr_os, NULL);
mutex_unlock(&mvm->mutex);
do_div(curr_os, NSEC_PER_USEC);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 63d65018d098..95f883aba148 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1023,7 +1023,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
mvm->fw_restart++;
/* take the return value to make compiler happy - it will fail anyway */
- ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(LONG_GROUP, REPLY_ERROR),
+ 0, 0, NULL);
mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index a456b8a0ae58..59cef0d89a6d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -430,6 +430,10 @@ iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
FTM_PUT_FLAG(TB);
else if (peer->ftm.non_trigger_based)
FTM_PUT_FLAG(NON_TB);
+
+ if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
+ peer->ftm.lmr_feedback)
+ FTM_PUT_FLAG(LMR_FEEDBACK);
}
static int
@@ -879,7 +883,8 @@ static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
u32 curr_gp2, diff;
u64 now_from_boot_ns;
- iwl_mvm_get_sync_time(mvm, &curr_gp2, &now_from_boot_ns);
+ iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
+ &now_from_boot_ns, NULL);
if (curr_gp2 >= gp2_ts)
diff = curr_gp2 - gp2_ts;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 8aa5f1a2c58c..38fd5886af2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1139,19 +1139,34 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
- int cmd_ret;
- struct iwl_lari_config_change_cmd_v3 cmd = {};
+ int ret;
+ u32 value;
+ struct iwl_lari_config_change_cmd_v4 cmd = {};
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
+ ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0, DSM_FUNC_11AX_ENABLEMENT,
+ &iwl_guid, &value);
+ if (!ret)
+ cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
/* apply more config masks here */
- if (cmd.config_bitmap) {
+ ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0,
+ DSM_FUNC_ENABLE_UNII4_CHAN,
+ &iwl_guid, &value);
+ if (!ret)
+ cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
+
+ if (cmd.config_bitmap ||
+ cmd.oem_11ax_allow_bitmap ||
+ cmd.oem_unii4_allow_bitmap) {
size_t cmd_size;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE, 1);
- if (cmd_ver == 3)
+ if (cmd_ver == 4)
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
+ else if (cmd_ver == 3)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
else if (cmd_ver == 2)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
@@ -1159,16 +1174,21 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
- le32_to_cpu(cmd.config_bitmap));
- cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE),
- 0, cmd_size, &cmd);
- if (cmd_ret < 0)
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.config_bitmap),
+ le32_to_cpu(cmd.oem_11ax_allow_bitmap));
+ IWL_DEBUG_RADIO(mvm,
+ "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, cmd_ver=%d\n",
+ le32_to_cpu(cmd.oem_unii4_allow_bitmap),
+ cmd_ver);
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ 0, cmd_size, &cmd);
+ if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
- cmd_ret);
+ ret);
}
}
#else /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 607d5d564928..70ebecb73c24 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3306,14 +3306,14 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 req_duration)
+ struct ieee80211_prep_tx_info *info)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
- if (req_duration > duration)
- duration = req_duration;
+ if (info->duration > duration)
+ duration = info->duration;
mutex_lock(&mvm->mutex);
/* Try really hard to protect the session and hear a beacon
@@ -3800,6 +3800,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct cfg80211_chan_def chandef;
struct iwl_mvm_phy_ctxt *phy_ctxt;
+ bool band_change_removal;
int ret, i;
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
@@ -3880,19 +3881,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
/*
- * Change the PHY context configuration as it is currently referenced
- * only by the P2P Device MAC
+ * Check if the remain-on-channel is on a different band and that
+ * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
+ * so, we'll need to release and then re-configure here, since we
+ * must not remove a PHY context that's part of a binding.
*/
- if (mvmvif->phy_ctxt->ref == 1) {
+ band_change_removal =
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+ mvmvif->phy_ctxt->channel->band != chandef.chan->band;
+
+ if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
+ /*
+ * Change the PHY context configuration as it is currently
+ * referenced only by the P2P Device MAC (and we can modify it)
+ */
ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
&chandef, 1, 1);
if (ret)
goto out_unlock;
} else {
/*
- * The PHY context is shared with other MACs. Need to remove the
- * P2P Device from the binding, allocate an new PHY context and
- * create a new binding
+ * The PHY context is shared with other MACs (or we're trying to
+ * switch bands), so remove the P2P Device from the binding,
+ * allocate an new PHY context and create a new binding.
*/
phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
if (!phy_ctxt) {
@@ -4211,7 +4223,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
struct ieee80211_vif *disabled_vif = NULL;
lockdep_assert_held(&mvm->mutex);
-
iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
switch (vif->type) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4d9d4d6892fc..b50942f28bb7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -16,6 +16,8 @@
#include <linux/thermal.h>
#endif
+#include <linux/ktime.h>
+
#include "iwl-op-mode.h"
#include "iwl-trans.h"
#include "fw/notif-wait.h"
@@ -195,6 +197,7 @@ enum iwl_mvm_smps_type_request {
IWL_MVM_SMPS_REQ_BT_COEX,
IWL_MVM_SMPS_REQ_TT,
IWL_MVM_SMPS_REQ_PROT,
+ IWL_MVM_SMPS_REQ_FW,
NUM_IWL_MVM_SMPS_REQ,
};
@@ -991,6 +994,8 @@ struct iwl_mvm {
*/
bool temperature_test; /* Debug test temperature is enabled */
+ bool fw_static_smps_request;
+
unsigned long bt_coex_last_tcm_ts;
struct iwl_mvm_tcm tcm;
@@ -1447,10 +1452,16 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+
+static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+ iwl_fwrt_dump_error_logs(&mvm->fwrt);
+}
+
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
-void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
+ u64 *boottime, ktime_t *realtime);
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
/* Tx / Host Commands */
@@ -1769,7 +1780,6 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
-struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm);
#ifdef CONFIG_PM
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
@@ -1827,7 +1837,9 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
-bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif);
/* Low latency */
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
index 1cc90e61367b..41880517e8bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2012-2014, 2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -36,7 +36,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
struct iwl_proto_offload_cmd_v3_small v3s;
- struct iwl_proto_offload_cmd_v3_large v3l;
+ struct iwl_proto_offload_cmd_v4 v4;
} cmd = {};
struct iwl_host_cmd hcmd = {
.id = PROT_OFFLOAD_CONFIG_CMD,
@@ -47,6 +47,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ PROT_OFFLOAD_CONFIG_CMD, 0);
+
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
@@ -72,9 +75,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
addrs = cmd.v3s.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
} else {
- nsc = cmd.v3l.ns_config;
+ nsc = cmd.v4.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
- addrs = cmd.v3l.targ_addrs;
+ addrs = cmd.v4.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
}
@@ -116,7 +119,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
cmd.v3s.num_valid_ipv6_addrs =
cpu_to_le32(i - num_skipped);
else
- cmd.v3l.num_valid_ipv6_addrs =
+ cmd.v4.num_valid_ipv6_addrs =
cpu_to_le32(i - num_skipped);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
bool found = false;
@@ -171,8 +174,17 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
common = &cmd.v3s.common;
size = sizeof(cmd.v3s);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
- common = &cmd.v3l.common;
- size = sizeof(cmd.v3l);
+ common = &cmd.v4.common;
+ size = sizeof(cmd.v4);
+ if (ver < 4) {
+ /*
+ * This basically uses iwl_proto_offload_cmd_v3_large
+ * which doesn't have the sta_id parameter before the
+ * common part.
+ */
+ size -= sizeof(cmd.v4.sta_id);
+ hcmd.data[0] = common;
+ }
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ebed82c590e5..20e8d343a950 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -210,6 +210,39 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
ieee80211_disconnect(vif, true);
}
+void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW,
+ mvm->fw_static_smps_request ?
+ IEEE80211_SMPS_STATIC :
+ IEEE80211_SMPS_AUTOMATIC);
+}
+
+static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ iwl_mvm_apply_fw_smps_request(vif);
+}
+
+static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
+
+ /*
+ * We could pass it to the iterator data, but also need to remember
+ * it for new interfaces that are added while in this state.
+ */
+ mvm->fw_static_smps_request =
+ req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
+ ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_intf_dual_chain_req, NULL);
+}
+
/**
* enum iwl_rx_handler_context context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
@@ -358,6 +391,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
+ iwl_mvm_rx_thermal_dual_chain_req,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_thermal_dual_chain_request),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -445,7 +483,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(D3_CONFIG_CMD),
HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
HCMD_NAME(OFFLOADS_QUERY_CMD),
- HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
HCMD_NAME(MATCH_FOUND_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
HCMD_NAME(WOWLAN_PATTERNS),
@@ -503,6 +540,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(MONITOR_NOTIF),
+ HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 0fd51f6aa206..035336a9e755 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -76,6 +76,7 @@ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
}
static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
__le32 *rxchain_info,
u8 chains_static,
u8 chains_dynamic)
@@ -93,11 +94,22 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
* between the two antennas is sufficiently different to impact
* performance.
*/
- if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+ if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) {
idle_cnt = 2;
active_cnt = 2;
}
+ /*
+ * If the firmware requested it, then we know that it supports
+ * getting zero for the values to indicate "use one, but pick
+ * which one yourself", which means it can dynamically pick one
+ * that e.g. has better RSSI.
+ */
+ if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
+ idle_cnt = 0;
+ active_cnt = 0;
+ }
+
*rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
*rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -113,6 +125,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
* Add the phy configuration to the PHY context command
*/
static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd_v1 *cmd,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
@@ -123,7 +136,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
- iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info,
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info,
chains_static, chains_dynamic);
tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -133,6 +146,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
* Add the phy configuration to the PHY context command
*/
static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
@@ -143,7 +157,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
- iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info,
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
chains_static, chains_dynamic);
}
@@ -170,7 +184,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
/* Set the command data */
- iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+ iwl_mvm_phy_ctxt_cmd_data(mvm, ctxt, &cmd, chandef,
chains_static,
chains_dynamic);
@@ -186,7 +200,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
action);
/* Set the command data */
- iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef,
+ iwl_mvm_phy_ctxt_cmd_data_v1(mvm, ctxt, &cmd, chandef,
chains_static,
chains_dynamic);
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 8e26422ca326..c0babb8d5b5c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -2001,8 +2001,10 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct sk_buff *skb;
u8 channel, energy_a, energy_b;
struct iwl_mvm_rx_phy_data phy_data = {
+ .info_type = le32_get_bits(desc->phy_info[1],
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
.d0 = desc->phy_info[0],
- .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
+ .d1 = desc->phy_info[1],
};
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
@@ -2015,10 +2017,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
- phy_data.info_type =
- le32_get_bits(desc->phy_info[1],
- IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
-
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 5a0696c44f6d..0368b7101222 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -2327,9 +2327,9 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
&scan_p->general_params,
gen_flags);
- ret = iwl_mvm_fill_scan_sched_params(params,
- scan_p->periodic_params.schedule,
- &scan_p->periodic_params.delay);
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
if (ret)
return ret;
@@ -2362,9 +2362,9 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
&scan_p->general_params,
gen_flags);
- ret = iwl_mvm_fill_scan_sched_params(params,
- scan_p->periodic_params.schedule,
- &scan_p->periodic_params.delay);
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f618368eda83..9c45a64c5009 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -3794,8 +3794,12 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
mvm_sta->disable_tx = disable;
- /* Tell mac80211 to start/stop queuing tx for this station */
- ieee80211_sta_block_awake(mvm->hw, sta, disable);
+ /*
+ * If sta PS state is handled by mac80211, tell it to start/stop
+ * queuing tx for this station.
+ */
+ if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
+ ieee80211_sta_block_awake(mvm->hw, sta, disable);
iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 83342a6a6d5b..d3307a11fcac 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -31,6 +31,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
return;
list_del(&te_data->list);
+
+ /*
+ * the list is only used for AUX ROC events so make sure it is always
+ * initialized
+ */
+ INIT_LIST_HEAD(&te_data->list);
+
te_data->running = false;
te_data->uid = 0;
te_data->id = TE_MAX;
@@ -310,6 +317,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ !te_data->vif->bss_conf.assoc ?
+ "Not associated and the time event is over already..." :
"No beacon heard and the time event is over already...");
break;
default:
@@ -607,14 +616,15 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
}
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif)
+ struct iwl_mvm_vif *mvmvif,
+ u32 id)
{
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
- .conf_id = cpu_to_le32(mvmvif->time_event_data.id),
+ .conf_id = cpu_to_le32(id),
};
int ret;
@@ -632,6 +642,12 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
{
u32 id;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ enum nl80211_iftype iftype;
+
+ if (!te_data->vif)
+ return false;
+
+ iftype = te_data->vif->type;
/*
* It is possible that by the time we got to this point the time
@@ -656,8 +672,8 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
- iwl_mvm_cancel_session_protection(mvm, mvmvif);
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
iwl_mvm_roc_finished(mvm);
}
@@ -738,11 +754,6 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
IWL_ERR(mvm, "Couldn't remove the time event\n");
}
-/*
- * When the firmware supports the session protection API,
- * this is not needed since it'll automatically remove the
- * session protection after association + beacon reception.
- */
void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -756,7 +767,15 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
id = te_data->id;
spin_unlock_bh(&mvm->time_event_lock);
- if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ if (id != SESSION_PROTECT_CONF_ASSOC) {
+ IWL_DEBUG_TE(mvm,
+ "don't remove session protection id=%u\n",
+ id);
+ return;
+ }
+ } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
IWL_DEBUG_TE(mvm,
"don't remove TE with id=%u (not session protection)\n",
id);
@@ -808,6 +827,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, vif,
+ !vif->bss_conf.assoc ?
+ "Not associated and the session protection is over already..." :
"No beacon heard and the session protection is over already...");
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, te_data);
@@ -981,7 +1002,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- iwl_mvm_cancel_session_protection(mvm, mvmvif);
+ iwl_mvm_cancel_session_protection(mvm, mvmvif,
+ mvmvif->time_event_data.id);
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
} else {
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
@@ -1141,6 +1163,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
te_data->duration = le32_to_cpu(cmd.duration_tu);
+ te_data->vif = vif;
spin_unlock_bh(&mvm->time_event_lock);
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 1ad621d13ad3..0a13c2bda2ee 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1032,6 +1032,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
+ if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he)
+ return -1;
+
if (unlikely(ieee80211_is_probe_resp(fc)))
iwl_mvm_probe_resp_set_noa(mvm, skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index c566be99a4c7..4a3d2971a98b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -238,316 +238,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
-/*
- * Note: This structure is read from the device with IO accesses,
- * and the reading already does the endian conversion. As it is
- * read with u32-sized accesses, any members with a different size
- * need to be ordered correctly though!
- */
-struct iwl_error_event_table_v1 {
- u32 valid; /* (nonzero) valid, (0) log is empty */
- u32 error_id; /* type of error */
- u32 pc; /* program counter */
- u32 blink1; /* branch link */
- u32 blink2; /* branch link */
- u32 ilink1; /* interrupt link */
- u32 ilink2; /* interrupt link */
- u32 data1; /* error-specific data */
- u32 data2; /* error-specific data */
- u32 data3; /* error-specific data */
- u32 bcon_time; /* beacon timer */
- u32 tsf_low; /* network timestamp function timer */
- u32 tsf_hi; /* network timestamp function timer */
- u32 gp1; /* GP1 timer register */
- u32 gp2; /* GP2 timer register */
- u32 gp3; /* GP3 timer register */
- u32 ucode_ver; /* uCode version */
- u32 hw_ver; /* HW Silicon version */
- u32 brd_ver; /* HW board version */
- u32 log_pc; /* log program counter */
- u32 frame_ptr; /* frame pointer */
- u32 stack_ptr; /* stack pointer */
- u32 hcmd; /* last host command header */
- u32 isr0; /* isr status register LMPM_NIC_ISR0:
- * rxtx_flag */
- u32 isr1; /* isr status register LMPM_NIC_ISR1:
- * host_flag */
- u32 isr2; /* isr status register LMPM_NIC_ISR2:
- * enc_flag */
- u32 isr3; /* isr status register LMPM_NIC_ISR3:
- * time_flag */
- u32 isr4; /* isr status register LMPM_NIC_ISR4:
- * wico interrupt */
- u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
- u32 wait_event; /* wait event() caller address */
- u32 l2p_control; /* L2pControlField */
- u32 l2p_duration; /* L2pDurationField */
- u32 l2p_mhvalid; /* L2pMhValidBits */
- u32 l2p_addr_match; /* L2pAddrMatchStat */
- u32 lmpm_pmg_sel; /* indicate which clocks are turned on
- * (LMPM_PMG_SEL) */
- u32 u_timestamp; /* indicate when the date and time of the
- * compilation */
- u32 flow_handler; /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
-
-struct iwl_error_event_table {
- u32 valid; /* (nonzero) valid, (0) log is empty */
- u32 error_id; /* type of error */
- u32 trm_hw_status0; /* TRM HW status */
- u32 trm_hw_status1; /* TRM HW status */
- u32 blink2; /* branch link */
- u32 ilink1; /* interrupt link */
- u32 ilink2; /* interrupt link */
- u32 data1; /* error-specific data */
- u32 data2; /* error-specific data */
- u32 data3; /* error-specific data */
- u32 bcon_time; /* beacon timer */
- u32 tsf_low; /* network timestamp function timer */
- u32 tsf_hi; /* network timestamp function timer */
- u32 gp1; /* GP1 timer register */
- u32 gp2; /* GP2 timer register */
- u32 fw_rev_type; /* firmware revision type */
- u32 major; /* uCode version major */
- u32 minor; /* uCode version minor */
- u32 hw_ver; /* HW Silicon version */
- u32 brd_ver; /* HW board version */
- u32 log_pc; /* log program counter */
- u32 frame_ptr; /* frame pointer */
- u32 stack_ptr; /* stack pointer */
- u32 hcmd; /* last host command header */
- u32 isr0; /* isr status register LMPM_NIC_ISR0:
- * rxtx_flag */
- u32 isr1; /* isr status register LMPM_NIC_ISR1:
- * host_flag */
- u32 isr2; /* isr status register LMPM_NIC_ISR2:
- * enc_flag */
- u32 isr3; /* isr status register LMPM_NIC_ISR3:
- * time_flag */
- u32 isr4; /* isr status register LMPM_NIC_ISR4:
- * wico interrupt */
- u32 last_cmd_id; /* last HCMD id handled by the firmware */
- u32 wait_event; /* wait event() caller address */
- u32 l2p_control; /* L2pControlField */
- u32 l2p_duration; /* L2pDurationField */
- u32 l2p_mhvalid; /* L2pMhValidBits */
- u32 l2p_addr_match; /* L2pAddrMatchStat */
- u32 lmpm_pmg_sel; /* indicate which clocks are turned on
- * (LMPM_PMG_SEL) */
- u32 u_timestamp; /* indicate when the date and time of the
- * compilation */
- u32 flow_handler; /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
-
-/*
- * UMAC error struct - relevant starting from family 8000 chip.
- * Note: This structure is read from the device with IO accesses,
- * and the reading already does the endian conversion. As it is
- * read with u32-sized accesses, any members with a different size
- * need to be ordered correctly though!
- */
-struct iwl_umac_error_event_table {
- u32 valid; /* (nonzero) valid, (0) log is empty */
- u32 error_id; /* type of error */
- u32 blink1; /* branch link */
- u32 blink2; /* branch link */
- u32 ilink1; /* interrupt link */
- u32 ilink2; /* interrupt link */
- u32 data1; /* error-specific data */
- u32 data2; /* error-specific data */
- u32 data3; /* error-specific data */
- u32 umac_major;
- u32 umac_minor;
- u32 frame_pointer; /* core register 27*/
- u32 stack_pointer; /* core register 28 */
- u32 cmd_header; /* latest host cmd sent to UMAC */
- u32 nic_isr_pref; /* ISR status register */
-} __packed;
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
-{
- struct iwl_trans *trans = mvm->trans;
- struct iwl_umac_error_event_table table = {};
- u32 base = mvm->trans->dbg.umac_error_event_table;
-
- if (!base &&
- !(mvm->trans->dbg.error_event_table_tlv_status &
- IWL_ERROR_EVENT_TABLE_UMAC))
- return;
-
- iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
- if (table.valid)
- mvm->fwrt.dump.umac_err_id = table.error_id;
-
- if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
- IWL_ERR(trans, "Start IWL Error Log Dump:\n");
- IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
- mvm->status, table.valid);
- }
-
- IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
- iwl_fw_lookup_assert_desc(table.error_id));
- IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
- IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
- IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
- IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
- IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
- IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
- IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
- IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
- IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
- IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
- IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
- IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
- IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
-}
-
-static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
-{
- struct iwl_trans *trans = mvm->trans;
- struct iwl_error_event_table table = {};
- u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num];
-
- if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
- if (!base)
- base = mvm->fw->init_errlog_ptr;
- } else {
- if (!base)
- base = mvm->fw->inst_errlog_ptr;
- }
-
- if (base < 0x400000) {
- IWL_ERR(mvm,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base,
- (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
- ? "Init" : "RT");
- return;
- }
-
- /* check if there is a HW error */
- val = iwl_trans_read_mem32(trans, base);
- if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
- int err;
-
- IWL_ERR(trans, "HW error, resetting before reading\n");
-
- /* reset the device */
- iwl_trans_sw_reset(trans);
-
- err = iwl_finish_nic_init(trans, trans->trans_cfg);
- if (err)
- return;
- }
-
- iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
- if (table.valid)
- mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
-
- if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
- IWL_ERR(trans, "Start IWL Error Log Dump:\n");
- IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
- mvm->status, table.valid);
- }
-
- /* Do not change this output - scripts rely on it */
-
- IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
-
- IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
- iwl_fw_lookup_assert_desc(table.error_id));
- IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
- IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
- IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
- IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
- IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
- IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
- IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
- IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
- IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
- IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
- IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
- IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
- IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
- IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
- IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
- IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
- IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
- IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
- IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
- IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
- IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
- IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
- IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
- IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
- IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
- IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
- IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
- IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
- IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
- IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
- IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
- IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
- IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
-}
-
-static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
-{
- struct iwl_trans *trans = mvm->trans;
- u32 error, data1;
-
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
- error = UMAG_SB_CPU_2_STATUS;
- data1 = UMAG_SB_CPU_1_STATUS;
- } else if (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_8000) {
- error = SB_CPU_2_STATUS;
- data1 = SB_CPU_1_STATUS;
- } else {
- return;
- }
-
- error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
-
- IWL_ERR(trans, "IML/ROM dump:\n");
-
- if (error & 0xFFFF0000)
- IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
-
- IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
- IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
- iwl_read_umac_prph(trans, data1));
-
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
- IWL_ERR(mvm, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
- iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
-}
-
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
-{
- if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
- IWL_ERR(mvm,
- "DEVICE_ENABLED bit is not set. Aborting dump.\n");
- return;
- }
-
- iwl_mvm_dump_lmac_error_log(mvm, 0);
-
- if (mvm->trans->dbg.lmac_error_event_table[1])
- iwl_mvm_dump_lmac_error_log(mvm, 1);
-
- iwl_mvm_dump_umac_error_log(mvm);
-
- iwl_mvm_dump_iml_error_log(mvm);
-
- iwl_fw_error_print_fseq_regs(&mvm->fwrt);
-}
-
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn)
{
@@ -621,7 +311,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_request)
{
struct iwl_mvm_vif *mvmvif;
- enum ieee80211_smps_mode smps_mode;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
int i;
lockdep_assert_held(&mvm->mutex);
@@ -630,10 +320,8 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
return;
- if (vif->type == NL80211_IFTYPE_AP)
- smps_mode = IEEE80211_SMPS_OFF;
- else
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
@@ -683,23 +371,37 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
}
+struct iwl_mvm_diversity_iter_data {
+ struct iwl_mvm_phy_ctxt *ctxt;
+ bool result;
+};
+
static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- bool *result = _data;
+ struct iwl_mvm_diversity_iter_data *data = _data;
int i;
+ if (mvmvif->phy_ctxt != data->ctxt)
+ return;
+
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
- mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
- *result = false;
+ mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
+ data->result = false;
+ break;
+ }
}
}
-bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt)
{
- bool result = true;
+ struct iwl_mvm_diversity_iter_data data = {
+ .ctxt = ctxt,
+ .result = true,
+ };
lockdep_assert_held(&mvm->mutex);
@@ -711,9 +413,9 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_diversity_iter, &result);
+ iwl_mvm_diversity_iter, &data);
- return result;
+ return data.result;
}
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
@@ -1398,7 +1100,8 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
return iwl_read_prph(mvm->trans, reg_addr);
}
-void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
+ u32 *gp2, u64 *boottime, ktime_t *realtime)
{
bool ps_disabled;
@@ -1412,7 +1115,11 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
}
*gp2 = iwl_mvm_get_systime(mvm);
- *boottime = ktime_get_boottime_ns();
+
+ if (clock_type == CLOCK_BOOTTIME && boottime)
+ *boottime = ktime_get_boottime_ns();
+ else if (clock_type == CLOCK_REALTIME && realtime)
+ *realtime = ktime_get_real();
if (!ps_disabled) {
mvm->ps_disabled = ps_disabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index cecc32e7dbe8..239a722cd79d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -79,7 +79,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info;
- void *iml_img;
u32 control_flags = 0;
int ret;
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -138,8 +137,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
- * assigned later */
- prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+ * assigned later
+ *
+ * We also use the second half of this page to give the device some
+ * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
+ * use this, but the hardware still reads/writes there and we can't let
+ * it go do that with a NULL pointer.
+ */
+ BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
+ prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
&trans_pcie->prph_info_dma_addr,
GFP_KERNEL);
if (!prph_info) {
@@ -166,13 +172,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->cr_head_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
ctxt_info_gen3->tr_tail_idx_arr_base_addr =
- cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+ cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
- cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
- ctxt_info_gen3->cr_idx_arr_size =
- cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
- ctxt_info_gen3->tr_idx_arr_size =
- cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+ cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
ctxt_info_gen3->mtr_base_addr =
cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
@@ -187,14 +189,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
trans_pcie->prph_scratch = prph_scratch;
/* Allocate IML */
- iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
- &trans_pcie->iml_dma_addr, GFP_KERNEL);
- if (!iml_img) {
+ trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
+ &trans_pcie->iml_dma_addr,
+ GFP_KERNEL);
+ if (!trans_pcie->iml) {
ret = -ENOMEM;
goto err_free_ctxt_info;
}
- memcpy(iml_img, trans->iml, trans->iml_len);
+ memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
iwl_enable_fw_load_int_ctx_info(trans);
@@ -216,10 +219,8 @@ err_free_ctxt_info:
trans_pcie->ctxt_info_dma_addr);
trans_pcie->ctxt_info_gen3 = NULL;
err_free_prph_info:
- dma_free_coherent(trans->dev,
- sizeof(*prph_info),
- prph_info,
- trans_pcie->prph_info_dma_addr);
+ dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
+ trans_pcie->prph_info_dma_addr);
err_free_prph_scratch:
dma_free_coherent(trans->dev,
@@ -230,29 +231,40 @@ err_free_prph_scratch:
}
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (trans_pcie->iml) {
+ dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
+ trans_pcie->iml_dma_addr);
+ trans_pcie->iml_dma_addr = 0;
+ trans_pcie->iml = NULL;
+ }
+
+ iwl_pcie_ctxt_info_free_fw_img(trans);
+
+ if (alive)
+ return;
+
if (!trans_pcie->ctxt_info_gen3)
return;
+ /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
trans_pcie->ctxt_info_gen3,
trans_pcie->ctxt_info_dma_addr);
trans_pcie->ctxt_info_dma_addr = 0;
trans_pcie->ctxt_info_gen3 = NULL;
- iwl_pcie_ctxt_info_free_fw_img(trans);
-
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
trans_pcie->prph_scratch,
trans_pcie->prph_scratch_dma_addr);
trans_pcie->prph_scratch_dma_addr = 0;
trans_pcie->prph_scratch = NULL;
- dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
- trans_pcie->prph_info,
+ /* this is needed for the entire lifetime */
+ dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
trans_pcie->prph_info_dma_addr);
trans_pcie->prph_info_dma_addr = 0;
trans_pcie->prph_info = NULL;
@@ -290,3 +302,37 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
return 0;
}
+
+int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+ const void *data, u32 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+ int ret;
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
+ /* only allocate the DRAM if not allocated yet */
+ if (!trans->reduce_power_loaded) {
+ if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
+ return -EBUSY;
+
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
+ &trans_pcie->reduce_power_dram);
+ if (ret < 0) {
+ IWL_DEBUG_FW(trans,
+ "Failed to allocate reduce power DMA %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ prph_sc_ctrl->reduce_power_cfg.base_addr =
+ cpu_to_le64(trans_pcie->reduce_power_dram.physical);
+ prph_sc_ctrl->reduce_power_cfg.size =
+ cpu_to_le32(trans_pcie->reduce_power_dram.size);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index d94bd8d732e9..16baee3d52ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -532,6 +532,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+ IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
+ IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
@@ -1030,6 +1032,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma_a0_mr_a0, iwl_ax221_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_ma_a0_fm_a0, iwl_ax231_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
@@ -1209,14 +1216,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
+ } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) {
iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
+ } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) {
iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
+ } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) {
iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 76a512cd2e5c..cc550f6ef957 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -109,12 +109,8 @@ struct iwl_rx_completion_desc {
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
* In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
- * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
- * @tr_tail: driver's pointer to the transmission ring tail buffer
- * @tr_tail_dma: physical address of the buffer for the transmission ring tail
- * @cr_tail: driver's pointer to the completion ring tail buffer
- * @cr_tail_dma: physical address of the buffer for the completion ring tail
+ * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
+ * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@@ -142,10 +138,6 @@ struct iwl_rxq {
struct iwl_rx_completion_desc *cd;
};
dma_addr_t used_bd_dma;
- __le16 *tr_tail;
- dma_addr_t tr_tail_dma;
- __le16 *cr_tail;
- dma_addr_t cr_tail_dma;
u32 read;
u32 write;
u32 free_count;
@@ -279,6 +271,8 @@ struct cont_rec {
* Context information addresses will be taken from here.
* This is driver's local copy for keeping track of size and
* count for allocating and freeing the memory.
+ * @iml: image loader image virtual address
+ * @iml_dma_addr: image loader image DMA address
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @kw: keep warm address
@@ -317,6 +311,7 @@ struct cont_rec {
* @alloc_page_lock: spinlock for the page allocator
* @alloc_page: allocated page to still use parts of
* @alloc_page_used: how much of the allocated page was already used (bytes)
+ * @rf_name: name/version of the CRF, if any
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -329,6 +324,7 @@ struct iwl_trans_pcie {
};
struct iwl_prph_info *prph_info;
struct iwl_prph_scratch *prph_scratch;
+ void *iml;
dma_addr_t ctxt_info_dma_addr;
dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr;
@@ -353,6 +349,7 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr kw;
struct iwl_dram_data pnvm_dram;
+ struct iwl_dram_data reduce_power_dram;
struct iwl_txq *txq_memory;
@@ -409,6 +406,8 @@ struct iwl_trans_pcie {
bool fw_reset_handshake;
bool fw_reset_done;
wait_queue_head_t fw_reset_waitq;
+
+ char rf_name[32];
};
static inline struct iwl_trans_pcie *
@@ -530,9 +529,6 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
-#define IWL_NUM_OF_COMPLETION_RINGS 31
-#define IWL_NUM_OF_TRANSFER_RINGS 527
-
static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
int start)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index fb8491412be4..4f6f4b2720f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -663,7 +663,6 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- struct device *dev = trans->dev;
bool use_rx_td = (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
@@ -685,21 +684,6 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
-
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- return;
-
- if (rxq->tr_tail)
- dma_free_coherent(dev, sizeof(__le16),
- rxq->tr_tail, rxq->tr_tail_dma);
- rxq->tr_tail_dma = 0;
- rxq->tr_tail = NULL;
-
- if (rxq->cr_tail)
- dma_free_coherent(dev, sizeof(__le16),
- rxq->cr_tail, rxq->cr_tail_dma);
- rxq->cr_tail_dma = 0;
- rxq->cr_tail = NULL;
}
static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
@@ -744,21 +728,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
rxq->rb_stts_dma =
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
- if (!use_rx_td)
- return 0;
-
- /* Allocate the driver's pointer to TR tail */
- rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
- &rxq->tr_tail_dma, GFP_KERNEL);
- if (!rxq->tr_tail)
- goto err;
-
- /* Allocate the driver's pointer to CR tail */
- rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
- &rxq->cr_tail_dma, GFP_KERNEL);
- if (!rxq->cr_tail)
- goto err;
-
return 0;
err:
@@ -1590,9 +1559,6 @@ restart:
out:
/* Backtrack one entry */
rxq->read = i;
- /* update cr tail with the rxq read pointer */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- *rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 1bcd36e9e008..a34009357227 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -149,7 +149,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
iwl_pcie_ctxt_info_free_paging(trans);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_gen3_free(trans);
+ iwl_pcie_ctxt_info_gen3_free(trans, false);
else
iwl_pcie_ctxt_info_free(trans);
@@ -240,6 +240,75 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return 0;
}
+static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ char *buf = trans_pcie->rf_name;
+ size_t buflen = sizeof(trans_pcie->rf_name);
+ size_t pos;
+ u32 version;
+
+ if (buf[0])
+ return;
+
+ switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
+ pos = scnprintf(buf, buflen, "JF");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF):
+ pos = scnprintf(buf, buflen, "GF");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4):
+ pos = scnprintf(buf, buflen, "GF4");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
+ pos = scnprintf(buf, buflen, "HR");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
+ pos = scnprintf(buf, buflen, "HR1");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
+ pos = scnprintf(buf, buflen, "HRCDB");
+ break;
+ default:
+ return;
+ }
+
+ switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
+ version = iwl_read_prph(trans, CNVI_MBOX_C);
+ switch (version) {
+ case 0x20000:
+ pos += scnprintf(buf + pos, buflen - pos, " B3");
+ break;
+ case 0x120000:
+ pos += scnprintf(buf + pos, buflen - pos, " B5");
+ break;
+ default:
+ pos += scnprintf(buf + pos, buflen - pos,
+ " (0x%x)", version);
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
+ trans->hw_rf_id);
+
+ IWL_INFO(trans, "Detected RF %s\n", buf);
+
+ /*
+ * also add a \n for debugfs - need to do it after printing
+ * since our IWL_INFO machinery wants to see a static \n at
+ * the end of the string
+ */
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+}
+
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -254,7 +323,10 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
*/
- iwl_pcie_ctxt_info_free(trans);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_pcie_ctxt_info_gen3_free(trans, true);
+ else
+ iwl_pcie_ctxt_info_free(trans);
/*
* Re-enable all the interrupts, including the RF-Kill one, now that
@@ -263,6 +335,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_enable_interrupts(trans);
mutex_lock(&trans_pcie->mutex);
iwl_pcie_check_hw_rf_kill(trans);
+
+ iwl_pcie_get_rf_name(trans);
mutex_unlock(&trans_pcie->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 239bc177a3e5..bee6b4574226 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1648,7 +1648,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
if (ret)
IWL_ERR(trans_pcie->trans,
"Failed to set affinity mask for IRQ %d\n",
- i);
+ trans_pcie->msix_entries[i].vector);
}
}
@@ -1943,6 +1943,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
trans_pcie->pnvm_dram.block,
trans_pcie->pnvm_dram.physical);
+ if (trans_pcie->reduce_power_dram.size)
+ dma_free_coherent(trans->dev,
+ trans_pcie->reduce_power_dram.size,
+ trans_pcie->reduce_power_dram.block,
+ trans_pcie->reduce_power_dram.physical);
+
mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);
}
@@ -2848,11 +2854,28 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
return bytes_copied;
}
+static ssize_t iwl_dbgfs_rf_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->rf_name[0])
+ return -ENODEV;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ trans_pcie->rf_name,
+ strlen(trans_pcie->rf_name));
+}
+
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+DEBUGFS_READ_FILE_OPS(rf);
+
static const struct file_operations iwl_dbgfs_tx_queue_ops = {
.owner = THIS_MODULE,
.open = iwl_dbgfs_tx_queue_open,
@@ -2879,6 +2902,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
+ DEBUGFS_ADD_FILE(rf, dir, 0400);
}
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
@@ -3400,6 +3424,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
.set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
+ .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
#endif
@@ -3413,6 +3438,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans *trans;
int ret, addr_size;
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+ void __iomem * const *table;
if (!cfg_trans->gen2)
ops = &trans_ops_pcie;
@@ -3485,9 +3511,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
}
- trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
- if (!trans_pcie->hw_base) {
+ table = pcim_iomap_table(pdev);
+ if (!table) {
dev_err(&pdev->dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto out_no_pci;
+ }
+
+ trans_pcie->hw_base = table[0];
+ if (!trans_pcie->hw_base) {
+ dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
ret = -ENODEV;
goto out_no_pci;
}
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
index 2c7adb4be100..0aea35c9c11c 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ b/drivers/net/wireless/intersil/orinoco/hw.c
@@ -988,15 +988,18 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
* tsc must be NULL or up to 8 bytes
*/
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
- int set_tx, const u8 *key, const u8 *rsc,
- size_t rsc_len, const u8 *tsc, size_t tsc_len)
+ int set_tx, const u8 *key, size_t key_len,
+ const u8 *rsc, size_t rsc_len,
+ const u8 *tsc, size_t tsc_len)
{
struct {
__le16 idx;
u8 rsc[ORINOCO_SEQ_LEN];
- u8 key[TKIP_KEYLEN];
- u8 tx_mic[MIC_KEYLEN];
- u8 rx_mic[MIC_KEYLEN];
+ struct {
+ u8 key[TKIP_KEYLEN];
+ u8 tx_mic[MIC_KEYLEN];
+ u8 rx_mic[MIC_KEYLEN];
+ } tkip;
u8 tsc[ORINOCO_SEQ_LEN];
} __packed buf;
struct hermes *hw = &priv->hw;
@@ -1011,8 +1014,9 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
key_idx |= 0x8000;
buf.idx = cpu_to_le16(key_idx);
- memcpy(buf.key, key,
- sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
+ if (key_len != sizeof(buf.tkip))
+ return -EINVAL;
+ memcpy(&buf.tkip, key, sizeof(buf.tkip));
if (rsc_len > sizeof(buf.rsc))
rsc_len = sizeof(buf.rsc);
diff --git a/drivers/net/wireless/intersil/orinoco/hw.h b/drivers/net/wireless/intersil/orinoco/hw.h
index 466d1ede76f1..da5804dbdf34 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.h
+++ b/drivers/net/wireless/intersil/orinoco/hw.h
@@ -38,8 +38,9 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
int __orinoco_hw_setup_enc(struct orinoco_private *priv);
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
- int set_tx, const u8 *key, const u8 *rsc,
- size_t rsc_len, const u8 *tsc, size_t tsc_len);
+ int set_tx, const u8 *key, size_t key_len,
+ const u8 *rsc, size_t rsc_len,
+ const u8 *tsc, size_t tsc_len);
int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
struct net_device *dev,
diff --git a/drivers/net/wireless/intersil/orinoco/wext.c b/drivers/net/wireless/intersil/orinoco/wext.c
index 7b6c4ae8ddb3..4a01260027bc 100644
--- a/drivers/net/wireless/intersil/orinoco/wext.c
+++ b/drivers/net/wireless/intersil/orinoco/wext.c
@@ -791,7 +791,7 @@ static int orinoco_ioctl_set_encodeext(struct net_device *dev,
err = __orinoco_hw_set_tkip_key(priv, idx,
ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
- priv->keys[idx].key,
+ priv->keys[idx].key, priv->keys[idx].key_len,
tkip_iv, ORINOCO_SEQ_LEN, NULL, 0);
if (err)
printk(KERN_ERR "%s: Error %d setting TKIP key"
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 51ce767eaf88..ffa894f7312a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -626,6 +626,7 @@ struct mac80211_hwsim_data {
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
struct mac_address addresses[2];
+ struct ieee80211_chanctx_conf *chanctx;
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
@@ -1257,7 +1258,8 @@ static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
struct sk_buff *my_skb,
- int dst_portid)
+ int dst_portid,
+ struct ieee80211_channel *channel)
{
struct sk_buff *skb;
struct mac80211_hwsim_data *data = hw->priv;
@@ -1312,7 +1314,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
goto nla_put_failure;
- if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
+ if (nla_put_u32(skb, HWSIM_ATTR_FREQ, channel->center_freq))
goto nla_put_failure;
/* We get the tx control (rate and retries) info*/
@@ -1659,7 +1661,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
_portid = READ_ONCE(data->wmediumd);
if (_portid || hwsim_virtio_enabled)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, channel);
/* NO wmediumd detected, perfect medium simulation */
data->tx_pkts++;
@@ -1693,8 +1695,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
+
data->started = false;
hrtimer_cancel(&data->beacon_timer);
+
+ while (!skb_queue_empty(&data->pending))
+ ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
+
wiphy_dbg(hw->wiphy, "%s\n", __func__);
}
@@ -1770,8 +1777,10 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, chan);
if (_pid || hwsim_virtio_enabled)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _pid, chan);
+ data->tx_pkts++;
+ data->tx_bytes += skb->len;
mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
dev_kfree_skb(skb);
}
@@ -2509,6 +2518,11 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+ hwsim->chanctx = ctx;
+ mutex_unlock(&hwsim->mutex);
hwsim_set_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2520,6 +2534,11 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+ hwsim->chanctx = NULL;
+ mutex_unlock(&hwsim->mutex);
wiphy_dbg(hw->wiphy,
"remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
ctx->def.chan->center_freq, ctx->def.width,
@@ -2532,6 +2551,11 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+ hwsim->chanctx = ctx;
+ mutex_unlock(&hwsim->mutex);
hwsim_check_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -3124,6 +3148,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->max_remain_on_channel_duration = 1000;
data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
+ data->chanctx = NULL;
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
@@ -3633,6 +3658,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
int frame_data_len;
void *frame_data;
struct sk_buff *skb = NULL;
+ struct ieee80211_channel *channel = NULL;
if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
!info->attrs[HWSIM_ATTR_FRAME] ||
@@ -3659,6 +3685,17 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;
+ if (data2->use_chanctx) {
+ if (data2->tmp_chan)
+ channel = data2->tmp_chan;
+ else if (data2->chanctx)
+ channel = data2->chanctx->def.chan;
+ } else {
+ channel = data2->channel;
+ }
+ if (!channel)
+ goto out;
+
if (!hwsim_virtio_enabled) {
if (hwsim_net_get_netgroup(genl_info_net(info)) !=
data2->netgroup)
@@ -3670,7 +3707,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
/* check if radio is configured properly */
- if (data2->idle || !data2->started)
+ if ((data2->idle && !data2->tmp_chan) || !data2->started)
goto out;
/* A frame is received from user space */
@@ -3683,18 +3720,16 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
mutex_lock(&data2->mutex);
rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]);
- if (rx_status.freq != data2->channel->center_freq &&
- (!data2->tmp_chan ||
- rx_status.freq != data2->tmp_chan->center_freq)) {
+ if (rx_status.freq != channel->center_freq) {
mutex_unlock(&data2->mutex);
goto out;
}
mutex_unlock(&data2->mutex);
} else {
- rx_status.freq = data2->channel->center_freq;
+ rx_status.freq = channel->center_freq;
}
- rx_status.band = data2->channel->band;
+ rx_status.band = channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
@@ -3791,11 +3826,6 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
return -EINVAL;
}
- if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
- GENL_SET_ERR_MSG(info, "too many channels specified");
- return -EINVAL;
- }
-
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index ee4cf3437e28..64fc5e410864 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -941,7 +941,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
wdev->netdev = dev;
priv->dev = dev;
- dev->netdev_ops = &lbs_netdev_ops;
+ dev->netdev_ops = &lbs_netdev_ops;
dev->watchdog_timeo = 5 * HZ;
dev->ethtool_ops = &lbs_ethtool_ops;
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index c68814841583..6cbba84989b8 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -151,13 +151,13 @@ static uint16_t lbs_mesh_get_channel(struct lbs_private *priv)
*/
/**
- * lbs_anycast_get - Get function for sysfs attribute anycast_mask
+ * anycast_mask_show - Get function for sysfs attribute anycast_mask
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t lbs_anycast_get(struct device *dev,
- struct device_attribute *attr, char * buf)
+static ssize_t anycast_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_access mesh_access;
@@ -173,14 +173,15 @@ static ssize_t lbs_anycast_get(struct device *dev,
}
/**
- * lbs_anycast_set - Set function for sysfs attribute anycast_mask
+ * anycast_mask_store - Set function for sysfs attribute anycast_mask
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t lbs_anycast_set(struct device *dev,
- struct device_attribute *attr, const char * buf, size_t count)
+static ssize_t anycast_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_access mesh_access;
@@ -199,13 +200,13 @@ static ssize_t lbs_anycast_set(struct device *dev,
}
/**
- * lbs_prb_rsp_limit_get - Get function for sysfs attribute prb_rsp_limit
+ * prb_rsp_limit_show - Get function for sysfs attribute prb_rsp_limit
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t prb_rsp_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_access mesh_access;
@@ -225,14 +226,15 @@ static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
}
/**
- * lbs_prb_rsp_limit_set - Set function for sysfs attribute prb_rsp_limit
+ * prb_rsp_limit_store - Set function for sysfs attribute prb_rsp_limit
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t prb_rsp_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_access mesh_access;
@@ -259,27 +261,28 @@ static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
}
/**
- * lbs_mesh_get - Get function for sysfs attribute mesh
+ * lbs_mesh_show - Get function for sysfs attribute mesh
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t lbs_mesh_get(struct device *dev,
- struct device_attribute *attr, char * buf)
+static ssize_t lbs_mesh_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev);
}
/**
- * lbs_mesh_set - Set function for sysfs attribute mesh
+ * lbs_mesh_store - Set function for sysfs attribute mesh
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t lbs_mesh_set(struct device *dev,
- struct device_attribute *attr, const char * buf, size_t count)
+static ssize_t lbs_mesh_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
int enable;
@@ -301,20 +304,19 @@ static ssize_t lbs_mesh_set(struct device *dev,
* lbs_mesh attribute to be exported per ethX interface
* through sysfs (/sys/class/net/ethX/lbs_mesh)
*/
-static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set);
+static DEVICE_ATTR_RW(lbs_mesh);
/*
* anycast_mask attribute to be exported per mshX interface
* through sysfs (/sys/class/net/mshX/anycast_mask)
*/
-static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set);
+static DEVICE_ATTR_RW(anycast_mask);
/*
* prb_rsp_limit attribute to be exported per mshX interface
* through sysfs (/sys/class/net/mshX/prb_rsp_limit)
*/
-static DEVICE_ATTR(prb_rsp_limit, 0644, lbs_prb_rsp_limit_get,
- lbs_prb_rsp_limit_set);
+static DEVICE_ATTR_RW(prb_rsp_limit);
static struct attribute *lbs_mesh_sysfs_entries[] = {
&dev_attr_anycast_mask.attr,
@@ -351,13 +353,13 @@ static int mesh_get_default_parameters(struct device *dev,
}
/**
- * bootflag_get - Get function for sysfs attribute bootflag
+ * bootflag_show - Get function for sysfs attribute bootflag
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t bootflag_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t bootflag_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -371,14 +373,14 @@ static ssize_t bootflag_get(struct device *dev,
}
/**
- * bootflag_set - Set function for sysfs attribute bootflag
+ * bootflag_store - Set function for sysfs attribute bootflag
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bootflag_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_config cmd;
@@ -401,13 +403,13 @@ static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
}
/**
- * boottime_get - Get function for sysfs attribute boottime
+ * boottime_show - Get function for sysfs attribute boottime
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t boottime_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t boottime_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -421,14 +423,15 @@ static ssize_t boottime_get(struct device *dev,
}
/**
- * boottime_set - Set function for sysfs attribute boottime
+ * boottime_store - Set function for sysfs attribute boottime
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t boottime_set(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t boottime_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_config cmd;
@@ -460,13 +463,13 @@ static ssize_t boottime_set(struct device *dev,
}
/**
- * channel_get - Get function for sysfs attribute channel
+ * channel_show - Get function for sysfs attribute channel
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t channel_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t channel_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -480,14 +483,14 @@ static ssize_t channel_get(struct device *dev,
}
/**
- * channel_set - Set function for sysfs attribute channel
+ * channel_store - Set function for sysfs attribute channel
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t channel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_config cmd;
@@ -510,13 +513,13 @@ static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
}
/**
- * mesh_id_get - Get function for sysfs attribute mesh_id
+ * mesh_id_show - Get function for sysfs attribute mesh_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t mesh_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -539,14 +542,14 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
}
/**
- * mesh_id_set - Set function for sysfs attribute mesh_id
+ * mesh_id_store - Set function for sysfs attribute mesh_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t mesh_id_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cmd_ds_mesh_config cmd;
struct mrvl_mesh_defaults defs;
@@ -585,13 +588,14 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
}
/**
- * protocol_id_get - Get function for sysfs attribute protocol_id
+ * protocol_id_show - Get function for sysfs attribute protocol_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t protocol_id_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -605,14 +609,15 @@ static ssize_t protocol_id_get(struct device *dev,
}
/**
- * protocol_id_set - Set function for sysfs attribute protocol_id
+ * protocol_id_store - Set function for sysfs attribute protocol_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t protocol_id_set(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t protocol_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cmd_ds_mesh_config cmd;
struct mrvl_mesh_defaults defs;
@@ -646,13 +651,13 @@ static ssize_t protocol_id_set(struct device *dev,
}
/**
- * metric_id_get - Get function for sysfs attribute metric_id
+ * metric_id_show - Get function for sysfs attribute metric_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t metric_id_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t metric_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -666,14 +671,15 @@ static ssize_t metric_id_get(struct device *dev,
}
/**
- * metric_id_set - Set function for sysfs attribute metric_id
+ * metric_id_store - Set function for sysfs attribute metric_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t metric_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cmd_ds_mesh_config cmd;
struct mrvl_mesh_defaults defs;
@@ -707,13 +713,13 @@ static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
}
/**
- * capability_get - Get function for sysfs attribute capability
+ * capability_show - Get function for sysfs attribute capability
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t capability_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t capability_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -727,14 +733,15 @@ static ssize_t capability_get(struct device *dev,
}
/**
- * capability_set - Set function for sysfs attribute capability
+ * capability_store - Set function for sysfs attribute capability
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t capability_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cmd_ds_mesh_config cmd;
struct mrvl_mesh_defaults defs;
@@ -768,13 +775,13 @@ static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
}
-static DEVICE_ATTR(bootflag, 0644, bootflag_get, bootflag_set);
-static DEVICE_ATTR(boottime, 0644, boottime_get, boottime_set);
-static DEVICE_ATTR(channel, 0644, channel_get, channel_set);
-static DEVICE_ATTR(mesh_id, 0644, mesh_id_get, mesh_id_set);
-static DEVICE_ATTR(protocol_id, 0644, protocol_id_get, protocol_id_set);
-static DEVICE_ATTR(metric_id, 0644, metric_id_get, metric_id_set);
-static DEVICE_ATTR(capability, 0644, capability_get, capability_set);
+static DEVICE_ATTR_RW(bootflag);
+static DEVICE_ATTR_RW(boottime);
+static DEVICE_ATTR_RW(channel);
+static DEVICE_ATTR_RW(mesh_id);
+static DEVICE_ATTR_RW(protocol_id);
+static DEVICE_ATTR_RW(metric_id);
+static DEVICE_ATTR_RW(capability);
static struct attribute *boot_opts_attrs[] = {
&dev_attr_bootflag.attr,
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index a92916dc81a9..fe0a69e804d8 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -48,7 +48,7 @@ static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
static int if_usb_reset_device(struct lbtf_private *priv);
/**
- * if_usb_wrike_bulk_callback - call back to handle URB status
+ * if_usb_write_bulk_callback - call back to handle URB status
*
* @urb: pointer to urb structure
*/
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 470d669c7f14..2ff23ab259ab 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -995,6 +995,11 @@ struct host_cmd_ds_802_11_key_material {
struct mwifiex_ie_type_key_param_set key_param_set;
} __packed;
+struct host_cmd_ds_802_11_key_material_wep {
+ __le16 action;
+ struct mwifiex_ie_type_key_param_set key_param_set[NUM_WEP_KEYS];
+} __packed;
+
struct host_cmd_ds_gen {
__le16 command;
__le16 size;
@@ -2347,6 +2352,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_wmm_get_status get_wmm_status;
struct host_cmd_ds_802_11_key_material key_material;
struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
+ struct host_cmd_ds_802_11_key_material_wep key_material_wep;
struct host_cmd_ds_version_ext verext;
struct host_cmd_ds_mgmt_frame_reg reg_mask;
struct host_cmd_ds_remain_on_chan roc_cfg;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 529dfd8b7ae8..17399d4aa129 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1445,11 +1445,18 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
if (!priv)
continue;
rtnl_lock();
- wiphy_lock(adapter->wiphy);
if (priv->netdev &&
- priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
+ priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) {
+ /*
+ * Close the netdev now, because if we do it later, the
+ * netdev notifiers will need to acquire the wiphy lock
+ * again --> deadlock.
+ */
+ dev_close(priv->wdev.netdev);
+ wiphy_lock(adapter->wiphy);
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
- wiphy_unlock(adapter->wiphy);
+ wiphy_unlock(adapter->wiphy);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index d3a968ef21ef..48ea00da1fc9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -840,14 +840,15 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
}
if (!enc_key) {
- memset(&key_material->key_param_set, 0,
- (NUM_WEP_KEYS *
- sizeof(struct mwifiex_ie_type_key_param_set)));
+ struct host_cmd_ds_802_11_key_material_wep *key_material_wep =
+ (struct host_cmd_ds_802_11_key_material_wep *)key_material;
+ memset(key_material_wep->key_param_set, 0,
+ sizeof(key_material_wep->key_param_set));
ret = mwifiex_set_keyparamset_wep(priv,
- &key_material->key_param_set,
+ &key_material_wep->key_param_set[0],
&key_param_len);
cmd->size = cpu_to_le16(key_param_len +
- sizeof(key_material->action) + S_DS_GEN);
+ sizeof(key_material_wep->action) + S_DS_GEN);
return ret;
} else
memset(&key_material->key_param_set, 0,
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 84b32a5f01ee..3bf6571f4149 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -4552,7 +4552,7 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
else
rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
legacy_rate_mask_to_array(p->legacy_rates, rates);
- memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
+ memcpy(p->ht_rates, &sta->ht_cap.mcs, 16);
p->interop = 1;
p->amsdu_enabled = 0;
@@ -5034,7 +5034,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ap_legacy_rates =
ap->supp_rates[NL80211_BAND_5GHZ] << 5;
}
- memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
+ memcpy(ap_mcs_rates, &ap->ht_cap.mcs, 16);
rcu_read_unlock();
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 72b1cc0ecfda..5e1c1506a4c6 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -191,6 +191,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
q->entry[idx].txwi = txwi;
q->entry[idx].skb = skb;
+ q->entry[idx].wcid = 0xffff;
return idx;
}
@@ -349,6 +350,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
+ struct ieee80211_tx_status status = {
+ .sta = sta,
+ };
struct mt76_tx_info tx_info = {
.skb = skb,
};
@@ -360,11 +364,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
u8 *txwi;
t = mt76_get_txwi(dev);
- if (!t) {
- hw = mt76_tx_status_get_hw(dev, skb);
- ieee80211_free_txskb(hw, skb);
- return -ENOMEM;
- }
+ if (!t)
+ goto free_skb;
+
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
@@ -427,8 +429,13 @@ free:
}
#endif
- dev_kfree_skb(tx_info.skb);
mt76_put_txwi(dev, t);
+
+free_skb:
+ status.skb = tx_info.skb;
+ hw = mt76_tx_status_get_hw(dev, tx_info.skb);
+ ieee80211_tx_status_ext(hw, &status);
+
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 977acab0360a..d03aedc3286b 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -83,6 +83,22 @@ static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
{ .throughput = 300 * 1024, .blink_time = 50 },
};
+struct ieee80211_rate mt76_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+EXPORT_SYMBOL_GPL(mt76_rates);
+
static int mt76_led_init(struct mt76_dev *dev)
{
struct device_node *np = dev->dev->of_node;
@@ -315,17 +331,6 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
-
- wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_ADHOC);
}
struct mt76_phy *
@@ -346,6 +351,17 @@ mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
phy->hw = hw;
phy->priv = hw->priv + phy_size;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
return phy;
}
EXPORT_SYMBOL_GPL(mt76_alloc_phy);
@@ -428,6 +444,17 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
mutex_init(&dev->mcu.mutex);
dev->tx_worker.fn = mt76_tx_worker;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
@@ -514,10 +541,36 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
{
struct sk_buff *skb = phy->rx_amsdu[q].head;
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_dev *dev = phy->dev;
phy->rx_amsdu[q].head = NULL;
phy->rx_amsdu[q].tail = NULL;
+
+ /*
+ * Validate if the amsdu has a proper first subframe.
+ * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
+ * flag of the QoS header gets flipped. In such cases, the first
+ * subframe has a LLC/SNAP header in the location of the destination
+ * address.
+ */
+ if (skb_shinfo(skb)->frag_list) {
+ int offset = 0;
+
+ if (!(status->flag & RX_FLAG_8023)) {
+ offset = ieee80211_get_hdrlen_from_skb(skb);
+
+ if ((status->flag &
+ (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
+ RX_FLAG_DECRYPTED)
+ offset += 8;
+ }
+
+ if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
__skb_queue_tail(&dev->rx_skb[q], skb);
}
@@ -606,20 +659,19 @@ void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
}
EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
-void mt76_update_survey(struct mt76_dev *dev)
+void mt76_update_survey(struct mt76_phy *phy)
{
+ struct mt76_dev *dev = phy->dev;
ktime_t cur_time;
if (dev->drv->update_survey)
- dev->drv->update_survey(dev);
+ dev->drv->update_survey(phy);
cur_time = ktime_get_boottime();
- mt76_update_survey_active_time(&dev->phy, cur_time);
- if (dev->phy2)
- mt76_update_survey_active_time(dev->phy2, cur_time);
+ mt76_update_survey_active_time(phy, cur_time);
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
- struct mt76_channel_state *state = dev->phy.chan_state;
+ struct mt76_channel_state *state = phy->chan_state;
spin_lock_bh(&dev->cc_lock);
state->cc_bss_rx += dev->cur_cc_bss_rx;
@@ -638,7 +690,7 @@ void mt76_set_channel(struct mt76_phy *phy)
int timeout = HZ / 5;
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
- mt76_update_survey(dev);
+ mt76_update_survey(phy);
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
@@ -663,7 +715,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
mutex_lock(&dev->mutex);
if (idx == 0 && dev->drv->update_survey)
- mt76_update_survey(dev);
+ mt76_update_survey(phy);
sband = &phy->sband_2g;
if (idx >= sband->sband.n_channels) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 36ede65919f8..25c5ceef5257 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -87,6 +87,22 @@ enum mt76_rxq_id {
__MT_RXQ_MAX
};
+enum mt76_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_TKIP_NO_MIC,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_BIP_CMAC_128,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_WAPI,
+ MT_CIPHER_CCMP_CCX,
+ MT_CIPHER_CCMP_256,
+ MT_CIPHER_GCMP,
+ MT_CIPHER_GCMP_256,
+};
+
struct mt76_queue_buf {
dma_addr_t addr;
u16 len;
@@ -320,6 +336,7 @@ enum {
struct mt76_hw_cap {
bool has_2ghz;
bool has_5ghz;
+ bool has_6ghz;
};
#define MT_DRV_TXWI_NO_FREE BIT(0)
@@ -336,7 +353,7 @@ struct mt76_driver_ops {
u16 token_size;
u8 mcs_rates;
- void (*update_survey)(struct mt76_dev *dev);
+ void (*update_survey)(struct mt76_phy *phy);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -738,6 +755,21 @@ enum mt76_phy_type {
MT_PHY_TYPE_HE_MU,
};
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+extern struct ieee80211_rate mt76_rates[12];
+
#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
#define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
@@ -1031,7 +1063,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
bool more_data);
bool mt76_has_tx_pending(struct mt76_phy *phy);
void mt76_set_channel(struct mt76_phy *phy);
-void mt76_update_survey(struct mt76_dev *dev);
+void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
@@ -1056,7 +1088,14 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list);
-void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb);
+void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
+ struct list_head *free_list);
+static inline void
+mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
+{
+ __mt76_tx_complete_skb(dev, wcid, skb, NULL);
+}
+
void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
bool flush);
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -1253,4 +1292,15 @@ mt76_token_put(struct mt76_dev *dev, int token)
return txwi;
}
+
+static inline int
+mt76_get_next_pkt_id(struct mt76_wcid *wcid)
+{
+ wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
+ if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
+ wcid->packet_id == MT_PACKET_ID_NO_SKB)
+ wcid->packet_id = MT_PACKET_ID_FIRST;
+
+ return wcid->packet_id;
+}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index e1b2cfa56074..031d39a48a55 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -304,34 +304,6 @@ mt7603_init_hardware(struct mt7603_dev *dev)
return 0;
}
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
-}
-
-static struct ieee80211_rate mt7603_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(11, 60),
- OFDM_RATE(15, 90),
- OFDM_RATE(10, 120),
- OFDM_RATE(14, 180),
- OFDM_RATE(9, 240),
- OFDM_RATE(13, 360),
- OFDM_RATE(8, 480),
- OFDM_RATE(12, 540),
-};
-
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@@ -569,8 +541,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
wiphy->reg_notifier = mt7603_regd_notifier;
- ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
- ARRAY_SIZE(mt7603_rates));
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index fbceb07c5f37..3972c56136a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -550,14 +550,27 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
u8 *data = (u8 *)rxd;
if (status->flag & RX_FLAG_DECRYPTED) {
- status->iv[0] = data[5];
- status->iv[1] = data[4];
- status->iv[2] = data[3];
- status->iv[3] = data[2];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
+ case MT_CIPHER_AES_CCMP:
+ case MT_CIPHER_CCMP_CCX:
+ case MT_CIPHER_CCMP_256:
+ insert_ccmp_hdr =
+ FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ fallthrough;
+ case MT_CIPHER_TKIP:
+ case MT_CIPHER_TKIP_NO_MIC:
+ case MT_CIPHER_GCMP:
+ case MT_CIPHER_GCMP_256:
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+ break;
+ default:
+ break;
+ }
}
rxd += 4;
@@ -831,7 +844,7 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
}
-static enum mt7603_cipher_type
+static enum mt76_cipher_type
mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
memset(key_data, 0, 32);
@@ -863,7 +876,7 @@ mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
struct ieee80211_key_conf *key)
{
- enum mt7603_cipher_type cipher;
+ enum mt76_cipher_type cipher;
u32 addr = mt7603_wtbl3_addr(wcid);
u8 key_data[32];
int key_len = sizeof(key_data);
@@ -1213,7 +1226,7 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
- ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].count = 0;
info->status.rates[0].idx = -1;
}
@@ -1584,12 +1597,12 @@ trigger:
return true;
}
-void mt7603_update_channel(struct mt76_dev *mdev)
+void mt7603_update_channel(struct mt76_phy *mphy)
{
- struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
struct mt76_channel_state *state;
- state = mdev->phy.chan_state;
+ state = mphy->chan_state;
state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
}
@@ -1806,7 +1819,7 @@ void mt7603_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
dev->mphy.mac_work_count++;
- mt76_update_survey(&dev->mt76);
+ mt76_update_survey(&dev->mphy);
mt7603_edcca_check(dev);
for (i = 0, idx = 0; i < 2; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 1df5b9fed2bb..0fd46d907638 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -256,7 +256,7 @@ void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t);
-void mt7603_update_channel(struct mt76_dev *mdev);
+void mt7603_update_channel(struct mt76_phy *mphy);
void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
void mt7603_cca_stats_reset(struct mt7603_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index 6741e6907194..3b901090b29c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -765,16 +765,4 @@ enum {
#define MT_WTBL1_OR (MT_WTBL1_BASE + 0x2300)
#define MT_WTBL1_OR_PSM_WRITE BIT(31)
-enum mt7603_cipher_type {
- MT_CIPHER_NONE,
- MT_CIPHER_WEP40,
- MT_CIPHER_TKIP,
- MT_CIPHER_TKIP_NO_MIC,
- MT_CIPHER_AES_CCMP,
- MT_CIPHER_WEP104,
- MT_CIPHER_BIP_CMAC_128,
- MT_CIPHER_WEP128,
- MT_CIPHER_WAPI,
-};
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
index e8fc4a7ae9bc..83f9861ff522 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
obj-$(CONFIG_MT7615E) += mt7615e.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index 676bb22726d6..cb4659771fd9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -75,7 +75,7 @@ mt7615_pm_set(void *data, u64 val)
if (!mt7615_wait_for_mcu_init(dev))
return 0;
- if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
+ if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
return -EOPNOTSUPP;
if (val == pm->enable)
@@ -319,24 +319,6 @@ mt7615_radio_read(struct seq_file *s, void *data)
return 0;
}
-static int mt7615_read_temperature(struct seq_file *s, void *data)
-{
- struct mt7615_dev *dev = dev_get_drvdata(s->private);
- int temp;
-
- if (!mt7615_wait_for_mcu_init(dev))
- return 0;
-
- /* cpu */
- mt7615_mutex_acquire(dev);
- temp = mt7615_mcu_get_temperature(dev, 0);
- mt7615_mutex_release(dev);
-
- seq_printf(s, "Temperature: %d\n", temp);
-
- return 0;
-}
-
static int
mt7615_queues_acq(struct seq_file *s, void *data)
{
@@ -566,8 +548,6 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
debugfs_create_file("reset_test", 0200, dir, dev,
&fops_reset_test);
- debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
- mt7615_read_temperature);
debugfs_create_file("ext_mac_addr", 0600, dir, dev, &fops_ext_mac_addr);
debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 8004ae5c16a9..00aefea1bf61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -81,7 +81,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
if (napi_complete(napi))
mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
return 0;
}
@@ -99,7 +99,7 @@ static int mt7615_poll_rx(struct napi_struct *napi, int budget)
return 0;
}
done = mt76_dma_rx_poll(napi, budget);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
return done;
}
@@ -222,14 +222,9 @@ void mt7615_dma_start(struct mt7615_dev *dev)
int mt7615_dma_init(struct mt7615_dev *dev)
{
int rx_ring_size = MT7615_RX_RING_SIZE;
- int rx_buf_size = MT_RX_BUF_SIZE;
u32 mask;
int ret;
- /* Increase buffer size to receive large VHT MPDUs */
- if (dev->mphy.cap.has_5ghz)
- rx_buf_size *= 2;
-
mt76_dma_attach(&dev->mt76);
mt76_wr(dev, MT_WPDMA_GLO_CFG,
@@ -270,7 +265,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
/* init rx queues */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
- MT7615_RX_MCU_RING_SIZE, rx_buf_size,
+ MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
MT_RX_RING_BASE);
if (ret)
return ret;
@@ -279,7 +274,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
rx_ring_size /= 2;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
- rx_ring_size, rx_buf_size, MT_RX_RING_BASE);
+ rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 86341d1f82f3..2f1ac644e018 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -8,11 +8,61 @@
*/
#include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include "mt7615.h"
#include "mac.h"
#include "mcu.h"
#include "eeprom.h"
+static ssize_t mt7615_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mt7615_dev *mdev = dev_get_drvdata(dev);
+ int temperature;
+
+ if (!mt7615_wait_for_mcu_init(mdev))
+ return 0;
+
+ mt7615_mutex_acquire(mdev);
+ temperature = mt7615_mcu_get_temperature(mdev);
+ mt7615_mutex_release(mdev);
+
+ if (temperature < 0)
+ return temperature;
+
+ /* display in millidegree celcius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7615_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *mt7615_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mt7615_hwmon);
+
+int mt7615_thermal_init(struct mt7615_dev *dev)
+{
+ struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+ struct device *hwmon;
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
+ wiphy_name(wiphy), dev,
+ mt7615_hwmon_groups);
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_thermal_init);
+
static void
mt7615_phy_init(struct mt7615_dev *dev)
{
@@ -174,35 +224,6 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init);
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
-}
-
-struct ieee80211_rate mt7615_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(11, 60),
- OFDM_RATE(15, 90),
- OFDM_RATE(10, 120),
- OFDM_RATE(14, 180),
- OFDM_RATE(9, 240),
- OFDM_RATE(13, 360),
- OFDM_RATE(8, 480),
- OFDM_RATE(12, 540),
-};
-EXPORT_SYMBOL_GPL(mt7615_rates);
-
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@@ -362,7 +383,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
wiphy->reg_notifier = mt7615_regd_notifier;
wiphy->max_sched_scan_plan_interval =
- MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
+ MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL;
wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
@@ -472,8 +493,8 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
for (i = 0; i <= MT_TXQ_PSD ; i++)
mphy->q_tx[i] = dev->mphy.q_tx[i];
- ret = mt76_register_phy(mphy, true, mt7615_rates,
- ARRAY_SIZE(mt7615_rates));
+ ret = mt76_register_phy(mphy, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
if (ret)
ieee80211_free_hw(mphy->hw);
@@ -510,7 +531,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
spin_lock_init(&dev->pm.txq_lock);
- set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index f81a17d56008..ff3f85e4087c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -20,7 +20,7 @@
#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
- .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
[5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 },
[6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 },
@@ -34,7 +34,7 @@ static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
};
static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
- .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
[0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 },
[1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
@@ -45,7 +45,7 @@ static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
};
static const struct mt7615_dfs_radar_spec jp_radar_specs = {
- .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
[0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 },
[1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
@@ -57,6 +57,33 @@ static const struct mt7615_dfs_radar_spec jp_radar_specs = {
},
};
+static enum mt76_cipher_type
+mt7615_mac_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MT_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MT_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MT_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MT_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MT_CIPHER_WAPI;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
u8 idx, bool unicast)
{
@@ -313,14 +340,27 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
u8 *data = (u8 *)rxd;
if (status->flag & RX_FLAG_DECRYPTED) {
- status->iv[0] = data[5];
- status->iv[1] = data[4];
- status->iv[2] = data[3];
- status->iv[3] = data[2];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
+ case MT_CIPHER_AES_CCMP:
+ case MT_CIPHER_CCMP_CCX:
+ case MT_CIPHER_CCMP_256:
+ insert_ccmp_hdr =
+ FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ fallthrough;
+ case MT_CIPHER_TKIP:
+ case MT_CIPHER_TKIP_NO_MIC:
+ case MT_CIPHER_GCMP:
+ case MT_CIPHER_GCMP_256:
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+ break;
+ default:
+ break;
+ }
}
rxd += 4;
if ((u8 *)rxd - skb->data >= skb->len)
@@ -1062,7 +1102,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
- mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
+ mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
sta->rate_set_tsf |= rd.rateset;
@@ -1078,7 +1118,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
- enum mt7615_cipher_type cipher, u16 cipher_mask,
+ enum mt76_cipher_type cipher, u16 cipher_mask,
enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
@@ -1118,7 +1158,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static int
mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- enum mt7615_cipher_type cipher, u16 cipher_mask,
+ enum mt76_cipher_type cipher, u16 cipher_mask,
int keyidx, enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
@@ -1157,7 +1197,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static void
mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- enum mt7615_cipher_type cipher, u16 cipher_mask,
+ enum mt76_cipher_type cipher, u16 cipher_mask,
enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
@@ -1183,7 +1223,7 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
- enum mt7615_cipher_type cipher;
+ enum mt76_cipher_type cipher;
u16 cipher_mask = wcid->cipher;
int err;
@@ -1235,22 +1275,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
int first_idx = 0, last_idx;
int i, idx, count;
bool fixed_rate, ack_timeout;
- bool probe, ampdu, cck = false;
+ bool ampdu, cck = false;
bool rs_idx;
u32 rate_set_tsf;
u32 final_rate, final_rate_flags, final_nss, txs;
- fixed_rate = info->status.rates[0].count;
- probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
-
txs = le32_to_cpu(txs_data[1]);
- ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
+ ampdu = txs & MT_TXS1_AMPDU;
txs = le32_to_cpu(txs_data[3]);
count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
txs = le32_to_cpu(txs_data[0]);
+ fixed_rate = txs & MT_TXS0_FIXED_RATE;
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
@@ -1272,7 +1310,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
- if (fixed_rate && !probe) {
+ if (fixed_rate) {
info->status.rates[0].count = count;
i = 0;
goto out;
@@ -1391,7 +1429,7 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
- ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].count = 0;
info->status.rates[0].idx = -1;
}
@@ -1821,43 +1859,41 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
state->noise = -(phy->noise >> 4);
}
-static void __mt7615_update_channel(struct mt7615_dev *dev)
+static void mt7615_update_survey(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
+ ktime_t cur_time;
+
+ /* MT7615 can only update both phys simultaneously
+ * since some reisters are shared across bands.
+ */
mt7615_phy_update_channel(&mdev->phy, 0);
if (mdev->phy2)
mt7615_phy_update_channel(mdev->phy2, 1);
+ cur_time = ktime_get_boottime();
+
+ mt76_update_survey_active_time(&mdev->phy, cur_time);
+ if (mdev->phy2)
+ mt76_update_survey_active_time(mdev->phy2, cur_time);
+
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
-void mt7615_update_channel(struct mt76_dev *mdev)
+void mt7615_update_channel(struct mt76_phy *mphy)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
return;
- __mt7615_update_channel(dev);
+ mt7615_update_survey(dev);
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
EXPORT_SYMBOL_GPL(mt7615_update_channel);
-static void mt7615_update_survey(struct mt7615_dev *dev)
-{
- struct mt76_dev *mdev = &dev->mt76;
- ktime_t cur_time;
-
- __mt7615_update_channel(dev);
- cur_time = ktime_get_boottime();
-
- mt76_update_survey_active_time(&mdev->phy, cur_time);
- if (mdev->phy2)
- mt76_update_survey_active_time(mdev->phy2, cur_time);
-}
-
static void
mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
{
@@ -1906,14 +1942,26 @@ void mt7615_pm_wake_work(struct work_struct *work)
mphy = dev->phy.mt76;
if (!mt7615_mcu_set_drv_ctrl(dev)) {
+ struct mt76_dev *mdev = &dev->mt76;
int i;
- mt76_for_each_q_rx(&dev->mt76, i)
- napi_schedule(&dev->mt76.napi[i]);
- mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
- ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
- MT7615_WATCHDOG_TIME);
+ if (mt76_is_sdio(mdev)) {
+ mt76_worker_schedule(&mdev->sdio.txrx_worker);
+ } else {
+ mt76_for_each_q_rx(mdev, i)
+ napi_schedule(&mdev->napi[i]);
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
+ mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
+ false);
+ }
+
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state)) {
+ unsigned long timeout;
+
+ timeout = mt7615_get_macwork_timeout(dev);
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ timeout);
+ }
}
ieee80211_wake_queues(mphy->hw);
@@ -1948,6 +1996,7 @@ void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_phy *phy;
struct mt76_phy *mphy;
+ unsigned long timeout;
mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
mac_work.work);
@@ -1966,8 +2015,9 @@ void mt7615_mac_work(struct work_struct *work)
mt7615_mutex_release(phy->dev);
mt76_tx_status_check(mphy->dev, NULL, false);
- ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
- MT7615_WATCHDOG_TIME);
+
+ timeout = mt7615_get_macwork_timeout(phy->dev);
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout);
}
void mt7615_tx_token_put(struct mt7615_dev *dev)
@@ -2048,14 +2098,12 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
{
const struct mt7615_dfs_radar_spec *radar_specs;
struct mt7615_dev *dev = phy->dev;
- int err, i;
+ int err, i, lpn = 500;
switch (dev->mt76.region) {
case NL80211_DFS_FCC:
radar_specs = &fcc_radar_specs;
- err = mt7615_mcu_set_fcc5_lpn(dev, 8);
- if (err < 0)
- return err;
+ lpn = 8;
break;
case NL80211_DFS_ETSI:
radar_specs = &etsi_radar_specs;
@@ -2067,6 +2115,11 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
return -EINVAL;
}
+ /* avoid FCC radar detection in non-FCC region */
+ err = mt7615_mcu_set_fcc5_lpn(dev, lpn);
+ if (err < 0)
+ return err;
+
for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
err = mt7615_mcu_set_radar_th(dev, i,
&radar_specs->radar_pattern[i]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index 6bf9da040196..46f283eb8d0f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -383,48 +383,6 @@ struct mt7615_dfs_radar_spec {
struct mt7615_dfs_pattern radar_pattern[16];
};
-enum mt7615_cipher_type {
- MT_CIPHER_NONE,
- MT_CIPHER_WEP40,
- MT_CIPHER_TKIP,
- MT_CIPHER_TKIP_NO_MIC,
- MT_CIPHER_AES_CCMP,
- MT_CIPHER_WEP104,
- MT_CIPHER_BIP_CMAC_128,
- MT_CIPHER_WEP128,
- MT_CIPHER_WAPI,
- MT_CIPHER_CCMP_256 = 10,
- MT_CIPHER_GCMP,
- MT_CIPHER_GCMP_256,
-};
-
-static inline enum mt7615_cipher_type
-mt7615_mac_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MT_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MT_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MT_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MT_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MT_CIPHER_WAPI;
- default:
- return MT_CIPHER_NONE;
- }
-}
-
static inline struct mt7615_txp_common *
mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 39733b351ac4..dada43d6d879 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -28,6 +28,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ unsigned long timeout;
bool running;
int ret;
@@ -78,8 +79,8 @@ static int mt7615_start(struct ieee80211_hw *hw)
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
- ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
- MT7615_WATCHDOG_TIME);
+ timeout = mt7615_get_macwork_timeout(dev);
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
if (!running)
mt7615_mac_reset_counters(dev);
@@ -240,8 +241,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
}
ret = mt7615_mcu_add_dev_info(phy, vif, true);
- if (ret)
- goto out;
out:
mt7615_mutex_release(dev);
@@ -352,10 +351,12 @@ out:
mt7615_mutex_release(dev);
mt76_worker_schedule(&dev->mt76.tx_worker);
- if (!mt76_testmode_enabled(phy->mt76))
+ if (!mt76_testmode_enabled(phy->mt76)) {
+ unsigned long timeout = mt7615_get_macwork_timeout(dev);
+
ieee80211_queue_delayed_work(phy->mt76->hw,
- &phy->mt76->mac_work,
- MT7615_WATCHDOG_TIME);
+ &phy->mt76->mac_work, timeout);
+ }
return ret;
}
@@ -695,7 +696,7 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
msta->n_rates = i;
if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) {
mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(phy->mt76, &dev->pm);
}
spin_unlock_bh(&dev->mt76.lock);
}
@@ -711,7 +712,7 @@ void mt7615_tx_worker(struct mt76_worker *w)
}
mt76_tx_worker_run(&dev->mt76);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
static void mt7615_tx(struct ieee80211_hw *hw,
@@ -741,7 +742,7 @@ static void mt7615_tx(struct ieee80211_hw *hw,
if (mt76_connac_pm_ref(mphy, &dev->pm)) {
mt76_tx(mphy, control->sta, wcid, skb);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(mphy, &dev->pm);
return;
}
@@ -881,7 +882,8 @@ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt7615_mutex_acquire(dev);
- mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
+ /* TSF read */
+ mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
@@ -911,7 +913,33 @@ mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
/* TSF software overwrite */
- mt76_set(dev, reg, MT_LPON_TCR_WRITE);
+ mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_WRITE);
+
+ mt7615_mutex_release(dev);
+}
+
+static void
+mt7615_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ s64 timestamp)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 idx = mvif->mt76.omac_idx;
+ u32 reg;
+
+ idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
+ reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
+
+ mt7615_mutex_acquire(dev);
+
+ mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
+ /* TSF software adjust*/
+ mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_ADJUST);
mt7615_mutex_release(dev);
}
@@ -1162,7 +1190,7 @@ static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- mt7615_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt7615_mcu_set_sta_decap_offload(dev, vif, sta);
}
#ifdef CONFIG_PM
@@ -1200,6 +1228,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ unsigned long timeout;
bool running;
mt7615_mutex_acquire(dev);
@@ -1223,8 +1252,8 @@ static int mt7615_resume(struct ieee80211_hw *hw)
mt76_connac_mcu_set_suspend_iter,
phy->mt76);
- ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
- MT7615_WATCHDOG_TIME);
+ timeout = mt7615_get_macwork_timeout(dev);
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
mt7615_mutex_release(dev);
@@ -1278,6 +1307,7 @@ const struct ieee80211_ops mt7615_ops = {
.get_stats = mt7615_get_stats,
.get_tsf = mt7615_get_tsf,
.set_tsf = mt7615_set_tsf,
+ .offset_tsf = mt7615_offset_tsf,
.get_survey = mt76_get_survey,
.get_antenna = mt76_get_antenna,
.set_antenna = mt7615_set_antenna,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index aa42af9ebfd6..f8a09692d3e4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -411,6 +411,9 @@ mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
c = (struct mt7615_mcu_csa_notify *)skb->data;
+ if (c->omac_idx > EXT_BSSID_MAX)
+ return;
+
if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
mphy = dev->mt76.phy2;
@@ -427,6 +430,10 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
r = (struct mt7615_mcu_rdd_report *)skb->data;
+ if (!dev->radar_pattern.n_pulses && !r->long_detected &&
+ !r->constant_prf_detected && !r->staggered_prf_detected)
+ return;
+
if (r->band_idx && dev->mt76.phy2)
mphy = dev->mt76.phy2;
@@ -1021,9 +1028,10 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
if (IS_ERR(sskb))
return PTR_ERR(sskb);
- mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable);
+ mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, true);
if (enable && sta)
- mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0);
+ mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
+ MT76_STA_INFO_STATE_ASSOC);
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
WTBL_RESET_AND_SET, NULL,
@@ -1037,8 +1045,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
if (sta)
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
NULL, wtbl_hdr);
- mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, &msta->wcid, NULL,
- wtbl_hdr);
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
+ NULL, wtbl_hdr);
}
cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
@@ -1058,6 +1066,26 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
}
+static int
+mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
+ wtbl_hdr);
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
+ true);
+}
+
static const struct mt7615_mcu_ops wtbl_update_ops = {
.add_beacon_offload = mt7615_mcu_add_beacon_offload,
.set_pm_state = mt7615_mcu_ctrl_pm_state,
@@ -1068,6 +1096,7 @@ static const struct mt7615_mcu_ops wtbl_update_ops = {
.sta_add = mt7615_mcu_wtbl_sta_add,
.set_drv_ctrl = mt7615_mcu_drv_pmctrl,
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+ .set_sta_decap_offload = mt7615_mcu_wtbl_update_hdr_trans,
};
static int
@@ -1120,18 +1149,21 @@ mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
static int
__mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable, int cmd)
+ struct ieee80211_sta *sta, bool enable, int cmd,
+ bool offload_fw)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt76_sta_cmd_info info = {
.sta = sta,
.vif = vif,
+ .offload_fw = offload_fw,
.enable = enable,
+ .newly = true,
.cmd = cmd,
};
info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid;
- return mt76_connac_mcu_add_sta_cmd(phy, &info);
+ return mt76_connac_mcu_sta_cmd(phy, &info);
}
static int
@@ -1139,7 +1171,19 @@ mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
- MCU_EXT_CMD_STA_REC_UPDATE);
+ MCU_EXT_CMD_STA_REC_UPDATE, false);
+}
+
+static int
+mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
+ vif, &msta->wcid,
+ MCU_EXT_CMD_STA_REC_UPDATE);
}
static const struct mt7615_mcu_ops sta_update_ops = {
@@ -1152,27 +1196,9 @@ static const struct mt7615_mcu_ops sta_update_ops = {
.sta_add = mt7615_mcu_add_sta,
.set_drv_ctrl = mt7615_mcu_drv_pmctrl,
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+ .set_sta_decap_offload = mt7615_mcu_sta_update_hdr_trans,
};
-int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb = NULL;
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
- WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, &msta->wcid, NULL, wtbl_hdr);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
- true);
-}
-
static int
mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
@@ -1280,7 +1306,7 @@ mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
- MCU_UNI_CMD_STA_REC_UPDATE);
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
}
static int
@@ -1338,6 +1364,18 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
MCU_UNI_CMD_STA_REC_UPDATE, true);
}
+static int
+mt7615_mcu_sta_uni_update_hdr_trans(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
+ vif, &msta->wcid,
+ MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
static const struct mt7615_mcu_ops uni_update_ops = {
.add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
.set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
@@ -1348,6 +1386,7 @@ static const struct mt7615_mcu_ops uni_update_ops = {
.sta_add = mt7615_mcu_uni_add_sta,
.set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+ .set_sta_decap_offload = mt7615_mcu_sta_uni_update_hdr_trans,
};
int mt7615_mcu_restart(struct mt76_dev *dev)
@@ -2322,14 +2361,12 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
}
-int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev)
{
struct {
u8 action;
u8 rsv[3];
- } req = {
- .action = index,
- };
+ } req = {};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
sizeof(req), true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 202ea235415e..71719c787511 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -229,7 +229,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
GFP_KERNEL);
if (!bus_ops) {
ret = -ENOMEM;
- goto error;
+ goto err_free_dev;
}
bus_ops->rr = mt7615_rr;
@@ -242,17 +242,20 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
- goto error;
+ goto err_free_dev;
if (is_mt7663(mdev))
mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
ret = mt7615_register_device(dev);
if (ret)
- goto error;
+ goto err_free_irq;
return 0;
-error:
+
+err_free_irq:
+ devm_free_irq(pdev, irq, dev);
+err_free_dev:
mt76_free_device(&dev->mt76);
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 989f05ed4377..d0c64a9b09cf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -20,7 +20,6 @@
MT7615_MAX_INTERFACES)
#define MT7615_PM_TIMEOUT (HZ / 12)
-#define MT7615_WATCHDOG_TIME (HZ / 10)
#define MT7615_HW_SCAN_TIMEOUT (HZ / 10)
#define MT7615_RESET_TIMEOUT (30 * HZ)
#define MT7615_RATE_RETRY 2
@@ -202,6 +201,7 @@ struct mt7615_phy {
#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev))
#define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev))
+#define mt7615_mcu_set_sta_decap_offload(dev, ...) (dev)->mcu_ops->set_sta_decap_offload((dev), __VA_ARGS__)
struct mt7615_mcu_ops {
int (*add_tx_ba)(struct mt7615_dev *dev,
struct ieee80211_ampdu_params *params,
@@ -221,6 +221,9 @@ struct mt7615_mcu_ops {
int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
int (*set_drv_ctrl)(struct mt7615_dev *dev);
int (*set_fw_ctrl)(struct mt7615_dev *dev);
+ int (*set_sta_decap_offload)(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
};
struct mt7615_dev {
@@ -356,6 +359,7 @@ static inline int mt7622_wmac_init(struct mt7615_dev *dev)
}
#endif
+int mt7615_thermal_init(struct mt7615_dev *dev);
int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
@@ -456,6 +460,12 @@ static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
return MT_INT_TX_DONE(dev->mt76.q_mcu[MT_MCUQ_WM]->hw_idx);
}
+static inline unsigned long
+mt7615_get_macwork_timeout(struct mt7615_dev *dev)
+{
+ return dev->pm.enable ? HZ / 3 : HZ / 10;
+}
+
void mt7615_dma_reset(struct mt7615_dev *dev);
void mt7615_scan_work(struct work_struct *work);
void mt7615_roc_work(struct work_struct *work);
@@ -466,7 +476,7 @@ int mt7615_set_channel(struct mt7615_phy *phy);
void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
-void mt7615_update_channel(struct mt76_dev *mdev);
+void mt7615_update_channel(struct mt76_phy *mphy);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
void mt7615_mac_reset_counters(struct mt7615_dev *dev);
void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
@@ -494,7 +504,7 @@ u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
-int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev);
int mt7615_mcu_set_tx_power(struct mt7615_phy *phy);
void mt7615_mcu_exit(struct mt7615_dev *dev);
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
@@ -518,9 +528,6 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7615_mac_work(struct work_struct *work);
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
struct mt76_txwi_cache *txwi);
-int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev);
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
index ec8ec1a2033f..a2465b49ecd0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -98,7 +98,7 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
addr = mt7615_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
static int
@@ -147,8 +147,12 @@ int mt7615_register_device(struct mt7615_dev *dev)
if (ret)
return ret;
- ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
- ARRAY_SIZE(mt7615_rates));
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret)
+ return ret;
+
+ ret = mt7615_thermal_init(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index d7cbef752f9f..da87c02a73eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -131,20 +131,21 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
int pid, id;
u8 *txwi = (u8 *)txwi_ptr;
struct mt76_txwi_cache *t;
+ struct mt7615_sta *msta;
void *txp;
+ msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
if (!wcid)
wcid = &dev->mt76.global_wcid;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
struct mt7615_phy *phy = &dev->phy;
if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
@@ -267,6 +268,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
struct mt7615_phy *phy2;
struct mt76_phy *ext_phy;
struct mt7615_dev *dev;
+ unsigned long timeout;
dev = container_of(work, struct mt7615_dev, reset_work);
ext_phy = dev->mt76.phy2;
@@ -344,11 +346,11 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt7615_mutex_release(dev);
+ timeout = mt7615_get_macwork_timeout(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7615_WATCHDOG_TIME);
+ timeout);
if (phy2)
ieee80211_queue_delayed_work(ext_phy->hw,
- &phy2->mt76->mac_work,
- MT7615_WATCHDOG_TIME);
+ &phy2->mt76->mac_work, timeout);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 63c081bb04d0..6712ad9faeaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -463,7 +463,9 @@ enum mt7615_reg_base {
#define MT_LPON_TCR0(_n) MT_LPON(0x010 + ((_n) * 4))
#define MT_LPON_TCR2(_n) MT_LPON(0x0f8 + ((_n) - 2) * 4)
#define MT_LPON_TCR_MODE GENMASK(1, 0)
+#define MT_LPON_TCR_READ GENMASK(1, 0)
#define MT_LPON_TCR_WRITE BIT(0)
+#define MT_LPON_TCR_ADJUST BIT(1)
#define MT_LPON_UTTR0 MT_LPON(0x018)
#define MT_LPON_UTTR1 MT_LPON(0x01c)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
index 05180971de84..03877d89e152 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+/* SPDX-License-Identifier: ISC */
/* Copyright (C) 2020 MediaTek Inc.
*
* Author: Sean Wang <sean.wang@mediatek.com>
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index 17fe4187d1de..45c1cd3b9f49 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -51,16 +51,14 @@ mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
return ret;
}
-static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
u32 status;
int ret;
- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
- goto out;
-
sdio_claim_host(func);
sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
@@ -69,29 +67,45 @@ static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot get ownership from device");
- set_bit(MT76_STATE_PM, &mphy->state);
- sdio_release_host(func);
+ } else {
+ clear_bit(MT76_STATE_PM, &mphy->state);
- return ret;
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.doze_time += pm->stats.last_wake_event -
+ pm->stats.last_doze_event;
}
-
sdio_release_host(func);
-out:
- dev->pm.last_activity = jiffies;
+ return ret;
+}
- return 0;
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int ret = 0;
+
+ mutex_lock(&dev->pm.mutex);
+
+ if (test_bit(MT76_STATE_PM, &mphy->state))
+ ret = __mt7663s_mcu_drv_pmctrl(dev);
+
+ mutex_unlock(&dev->pm.mutex);
+
+ return ret;
}
static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int ret = 0;
u32 status;
- int ret;
- if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
- return 0;
+ mutex_lock(&pm->mutex);
+
+ if (mt76_connac_skip_fw_pmctrl(mphy, pm))
+ goto out;
sdio_claim_host(func);
@@ -102,9 +116,15 @@ static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot set ownership to device");
clear_bit(MT76_STATE_PM, &mphy->state);
+ } else {
+ pm->stats.last_doze_event = jiffies;
+ pm->stats.awake_time += pm->stats.last_doze_event -
+ pm->stats.last_wake_event;
}
sdio_release_host(func);
+out:
+ mutex_unlock(&pm->mutex);
return ret;
}
@@ -123,7 +143,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
struct mt7615_mcu_ops *mcu_ops;
int ret;
- ret = mt7663s_mcu_drv_pmctrl(dev);
+ ret = __mt7663s_mcu_drv_pmctrl(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
index 4393dd21ebbb..04f4c89b7499 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
@@ -283,9 +283,15 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
{
struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
txrx_worker);
- struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
int i, nframes, ret;
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+ queue_work(mdev->wq, &dev->pm.wake_work);
+ return;
+ }
+
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
@@ -295,16 +301,16 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
/* tx */
for (i = 0; i <= MT_TXQ_PSD; i++) {
- ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]);
+ ret = mt7663s_tx_run_queue(mdev, mdev->phy.q_tx[i]);
if (ret > 0)
nframes += ret;
}
- ret = mt7663s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
+ ret = mt7663s_tx_run_queue(mdev, mdev->q_mcu[MT_MCUQ_WM]);
if (ret > 0)
nframes += ret;
/* rx */
- ret = mt7663s_rx_handler(dev);
+ ret = mt7663s_rx_handler(mdev);
if (ret > 0)
nframes += ret;
} while (nframes > 0);
@@ -312,6 +318,8 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
/* enable interrupt */
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
+
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
void mt7663s_sdio_irq(struct sdio_func *func)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
index be9a69fe1b38..f13d1b418742 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -31,7 +31,6 @@ int mt7622_wmac_init(struct mt7615_dev *dev)
static int mt7622_wmac_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *mem_base;
int irq;
@@ -39,7 +38,7 @@ static int mt7622_wmac_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- mem_base = devm_ioremap_resource(&pdev->dev, res);
+ mem_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mem_base))
return PTR_ERR(mem_base);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index c55698f9c49a..028ff432d811 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -55,10 +55,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
dev->mt76.mcu_ops = &mt7663u_mcu_ops,
- /* usb does not support runtime-pm */
- clear_bit(MT76_STATE_PM, &dev->mphy.state);
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
-
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
mt7615_mcu_restart(&dev->mt76);
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index f8d3673c2cae..996d48cca18a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -123,7 +123,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
- mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
+ mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
val = mt76_rr(dev, MT_LPON_UTTR0);
sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
@@ -191,14 +191,15 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct sk_buff *skb = tx_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt7615_sta *msta;
int pad;
+ msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
- !msta->rate_probe) {
+ msta && !msta->rate_probe) {
/* request to configure sampling rate */
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
@@ -323,8 +324,8 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
hw->max_tx_fragments = 1;
}
- err = mt76_register_device(&dev->mt76, true, mt7615_rates,
- ARRAY_SIZE(mt7615_rates));
+ err = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
if (err < 0)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 6c889b90fd12..f49d97d0a1c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -7,12 +7,13 @@
#include "mt76.h"
#define MT76_CONNAC_SCAN_IE_LEN 600
-#define MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL 10
+#define MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL 10
+#define MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL U16_MAX
#define MT76_CONNAC_MAX_SCHED_SCAN_SSID 10
#define MT76_CONNAC_MAX_SCAN_MATCH 16
#define MT76_CONNAC_COREDUMP_TIMEOUT (HZ / 20)
-#define MT76_CONNAC_COREDUMP_SZ (128 * 1024)
+#define MT76_CONNAC_COREDUMP_SZ (1300 * 1024)
enum {
CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
@@ -45,6 +46,8 @@ enum {
struct mt76_connac_pm {
bool enable;
+ bool ds_enable;
+ bool suspended;
spinlock_t txq_lock;
struct {
@@ -116,19 +119,27 @@ out:
}
static inline void
-mt76_connac_pm_unref(struct mt76_connac_pm *pm)
+mt76_connac_pm_unref(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
spin_lock_bh(&pm->wake.lock);
- pm->wake.count--;
+
pm->last_activity = jiffies;
+ if (--pm->wake.count == 0 &&
+ test_bit(MT76_STATE_MCU_RUNNING, &phy->state))
+ mt76_connac_power_save_sched(phy, pm);
+
spin_unlock_bh(&pm->wake.lock);
}
static inline bool
mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
+ struct mt76_dev *dev = phy->dev;
bool ret;
+ if (dev->token_count)
+ return true;
+
spin_lock_bh(&pm->wake.lock);
ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state);
spin_unlock_bh(&pm->wake.lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index 6f180c92d413..af43bcb54578 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -10,13 +10,16 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
if (!pm->enable)
return 0;
- if (!mt76_is_mmio(dev))
+ if (mt76_is_usb(dev))
return 0;
cancel_delayed_work_sync(&pm->ps_work);
if (!test_bit(MT76_STATE_PM, &phy->state))
return 0;
+ if (pm->suspended)
+ return 0;
+
queue_work(dev->wq, &pm->wake_work);
if (!wait_event_timeout(pm->wait,
!test_bit(MT76_STATE_PM, &phy->state),
@@ -34,12 +37,15 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
{
struct mt76_dev *dev = phy->dev;
- if (!mt76_is_mmio(dev))
+ if (mt76_is_usb(dev))
return;
if (!pm->enable)
return;
+ if (pm->suspended)
+ return;
+
pm->last_activity = jiffies;
if (!test_bit(MT76_STATE_PM, &phy->state)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index fe0ab5e5ff81..5c3a81e5f559 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- bool enable)
+ bool enable, bool newly)
{
struct sta_rec_basic *basic;
struct tlv *tlv;
@@ -316,7 +316,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
if (enable) {
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ if (newly)
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
basic->conn_state = CONN_STATE_PORT_SECURE;
} else {
basic->conn_state = CONN_STATE_DISCONNECT;
@@ -393,6 +394,7 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
}
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
void *sta_wtbl, void *wtbl_tlv)
{
@@ -404,9 +406,46 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
wtbl_tlv, sta_wtbl);
htr = (struct wtbl_hdr_trans *)tlv;
htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ htr->to_ds = true;
+ else
+ htr->from_ds = true;
+
+ if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
+ htr->to_ds = true;
+ htr->from_ds = true;
+ }
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv);
+int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct mt76_wcid *wcid, int cmd)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
+ sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, sta_wtbl, wtbl_hdr);
+
+ return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
+
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_vif *vif,
@@ -671,7 +710,7 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
- u8 rcpi)
+ u8 rcpi, u8 sta_state)
{
struct cfg80211_chan_def *chandef = &mphy->chandef;
enum nl80211_band band = chandef->chan->band;
@@ -721,6 +760,10 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
phy->rcpi = rcpi;
+ phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
+ sta->ht_cap.ampdu_factor) |
+ FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
+ sta->ht_cap.ampdu_density);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
@@ -732,7 +775,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
state = (struct sta_rec_state *)tlv;
- state->state = 2;
+ state->state = sta_state;
if (sta->vht_cap.vht_supported) {
state->vht_opmode = sta->bandwidth;
@@ -824,8 +867,8 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
-int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
- struct mt76_sta_cmd_info *info)
+int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
+ struct mt76_sta_cmd_info *info)
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
@@ -837,10 +880,13 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta, info->enable);
- if (info->enable && info->sta)
- mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif,
- info->rcpi);
+ if (info->sta || !info->offload_fw)
+ mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta,
+ info->enable, info->newly);
+ if (info->sta && info->enable)
+ mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
+ info->vif, info->rcpi,
+ info->state);
sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
sizeof(struct tlv));
@@ -855,6 +901,8 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
info->sta, sta_wtbl,
wtbl_hdr);
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
+ sta_wtbl, wtbl_hdr);
if (info->sta)
mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
sta_wtbl, wtbl_hdr);
@@ -862,7 +910,7 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
-EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd);
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_cmd);
void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
@@ -891,8 +939,10 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
ba->rst_ba_sb = 1;
}
- if (is_mt7921(dev))
+ if (is_mt7921(dev)) {
+ ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
return;
+ }
if (enable && tx) {
u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
@@ -1267,6 +1317,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
u8 pad[3];
} __packed hdr;
struct bss_info_uni_he he;
+ struct bss_info_uni_bss_color bss_color;
} he_req = {
.hdr = {
.bss_idx = mvif->idx,
@@ -1275,8 +1326,21 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
.tag = cpu_to_le16(UNI_BSS_INFO_HE_BASIC),
.len = cpu_to_le16(sizeof(struct bss_info_uni_he)),
},
+ .bss_color = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BSS_COLOR),
+ .len = cpu_to_le16(sizeof(struct bss_info_uni_bss_color)),
+ .enable = 0,
+ .bss_color = 0,
+ },
};
+ if (enable) {
+ he_req.bss_color.enable =
+ vif->bss_conf.he_bss_color.enabled;
+ he_req.bss_color.bss_color =
+ vif->bss_conf.he_bss_color.color;
+ }
+
mt76_connac_mcu_uni_bss_he_tlv(phy, vif,
(struct tlv *)&he_req.he);
err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE,
@@ -1459,14 +1523,16 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
req->version = 1;
req->seq_num = mvif->scan_seq_num | ext_phy << 7;
- if (is_mt7663(phy->dev) &&
- (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) {
- get_random_mask_addr(req->mt7663.random_mac, sreq->mac_addr,
- sreq->mac_addr_mask);
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
+ : req->mt7921.random_mac;
+
req->scan_func = 1;
- } else if (is_mt7921(phy->dev)) {
- req->mt7921.bss_idx = mvif->idx;
+ get_random_mask_addr(addr, sreq->mac_addr,
+ sreq->mac_addr_mask);
}
+ if (is_mt7921(phy->dev))
+ req->mt7921.bss_idx = mvif->idx;
req->ssids_num = sreq->n_ssids;
for (i = 0; i < req->ssids_num; i++) {
@@ -1552,6 +1618,26 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
+int mt76_connac_sta_state_dp(struct mt76_dev *dev,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ if ((old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) ||
+ (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ mt76_connac_mcu_set_deep_sleep(dev, true);
+
+ if ((old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) ||
+ (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC))
+ mt76_connac_mcu_set_deep_sleep(dev, false);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_sta_state_dp);
+
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
struct mt76_connac_coredump *coredump)
{
@@ -1566,6 +1652,60 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
+int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
+{
+ struct mt76_connac_cap_hdr {
+ __le16 n_element;
+ u8 rsv[2];
+ } __packed * hdr;
+ struct sk_buff *skb;
+ int ret, i;
+
+ ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CMD_GET_NIC_CAPAB, NULL,
+ 0, true, &skb);
+ if (ret)
+ return ret;
+
+ hdr = (struct mt76_connac_cap_hdr *)skb->data;
+ if (skb->len < sizeof(*hdr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ skb_pull(skb, sizeof(*hdr));
+
+ for (i = 0; i < le16_to_cpu(hdr->n_element); i++) {
+ struct tlv_hdr {
+ __le32 type;
+ __le32 len;
+ } __packed * tlv = (struct tlv_hdr *)skb->data;
+ int len;
+
+ if (skb->len < sizeof(*tlv))
+ break;
+
+ skb_pull(skb, sizeof(*tlv));
+
+ len = le32_to_cpu(tlv->len);
+ if (skb->len < len)
+ break;
+
+ switch (le32_to_cpu(tlv->type)) {
+ case MT_NIC_CAP_6G:
+ phy->cap.has_6ghz = skb->data[0];
+ break;
+ default:
+ break;
+ }
+ skb_pull(skb, len);
+ }
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_get_nic_capability);
+
static void
mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
struct mt76_power_limits *limits,
@@ -1628,12 +1768,15 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
142, 144, 149, 151, 153, 155, 157,
159, 161, 165
};
+ int i, n_chan, batch_size, idx = 0, tx_power, last_ch;
struct mt76_connac_sku_tlv sku_tlbv;
- int i, n_chan, batch_size, idx = 0;
struct mt76_power_limits limits;
const u8 *ch_list;
sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
+ tx_power = 2 * phy->hw->conf.power_level;
+ if (!tx_power)
+ tx_power = 127;
if (band == NL80211_BAND_2GHZ) {
n_chan = ARRAY_SIZE(chan_list_2ghz);
@@ -1644,39 +1787,48 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
}
batch_size = DIV_ROUND_UP(n_chan, batch_len);
+ if (!phy->cap.has_5ghz)
+ last_ch = chan_list_2ghz[n_chan - 1];
+ else
+ last_ch = chan_list_5ghz[n_chan - 1];
+
for (i = 0; i < batch_size; i++) {
- bool last_msg = i == batch_size - 1;
- int num_ch = last_msg ? n_chan % batch_len : batch_len;
struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
.band = band == NL80211_BAND_2GHZ ? 1 : 2,
- .n_chan = num_ch,
- .last_msg = last_msg,
};
+ int j, err, msg_len, num_ch;
struct sk_buff *skb;
- int j, err, msg_len;
+ num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
if (!skb)
return -ENOMEM;
+ skb_reserve(skb, sizeof(tx_power_tlv));
+
BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2));
memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
+ tx_power_tlv.n_chan = num_ch;
- skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv));
for (j = 0; j < num_ch; j++, idx++) {
struct ieee80211_channel chan = {
.hw_value = ch_list[idx],
.band = band,
};
- mt76_get_rate_power_limits(phy, &chan, &limits, 127);
+ mt76_get_rate_power_limits(phy, &chan, &limits,
+ tx_power);
+ tx_power_tlv.last_msg = ch_list[idx] == last_ch;
sku_tlbv.channel = ch_list[idx];
+
mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit,
&limits, band);
skb_put_data(skb, &sku_tlbv, sku_len);
}
+ __skb_push(skb, sizeof(tx_power_tlv));
+ memcpy(skb->data, &tx_power_tlv, sizeof(tx_power_tlv));
err = mt76_mcu_skb_send_msg(dev, skb,
MCU_CMD_SET_RATE_TX_POWER, false);
@@ -1691,11 +1843,20 @@ int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
{
int err;
- err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ);
- if (err < 0)
- return err;
+ if (phy->cap.has_2ghz) {
+ err = mt76_connac_mcu_rate_txpower_band(phy,
+ NL80211_BAND_2GHZ);
+ if (err < 0)
+ return err;
+ }
+ if (phy->cap.has_5ghz) {
+ err = mt76_connac_mcu_rate_txpower_band(phy,
+ NL80211_BAND_5GHZ);
+ if (err < 0)
+ return err;
+ }
- return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ);
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
@@ -1935,7 +2096,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
ptlv->index = index;
memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
- memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
+ memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
}
@@ -1970,14 +2131,17 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
};
if (wowlan->magic_pkt)
- req.wow_ctrl_tlv.trigger |= BIT(0);
+ req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC;
if (wowlan->disconnect)
- req.wow_ctrl_tlv.trigger |= BIT(2);
+ req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT |
+ UNI_WOW_DETECT_TYPE_BCN_LOST);
if (wowlan->nd_config) {
mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
- req.wow_ctrl_tlv.trigger |= BIT(5);
+ req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT;
mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
}
+ if (wowlan->n_patterns)
+ req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP;
if (mt76_is_mmio(dev))
req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index a1096861d04a..1c73beb22677 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -559,6 +559,7 @@ enum {
MCU_CMD_SET_RATE_TX_POWER = MCU_CE_PREFIX | 0x5d,
MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
+ MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a,
MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
@@ -575,6 +576,7 @@ enum {
enum {
UNI_BSS_INFO_BASIC = 0,
UNI_BSS_INFO_RLM = 2,
+ UNI_BSS_INFO_BSS_COLOR = 4,
UNI_BSS_INFO_HE_BASIC = 5,
UNI_BSS_INFO_BCN_CONTENT = 7,
UNI_BSS_INFO_QBSS = 15,
@@ -591,6 +593,36 @@ enum {
};
enum {
+ MT_NIC_CAP_TX_RESOURCE,
+ MT_NIC_CAP_TX_EFUSE_ADDR,
+ MT_NIC_CAP_COEX,
+ MT_NIC_CAP_SINGLE_SKU,
+ MT_NIC_CAP_CSUM_OFFLOAD,
+ MT_NIC_CAP_HW_VER,
+ MT_NIC_CAP_SW_VER,
+ MT_NIC_CAP_MAC_ADDR,
+ MT_NIC_CAP_PHY,
+ MT_NIC_CAP_MAC,
+ MT_NIC_CAP_FRAME_BUF,
+ MT_NIC_CAP_BEAM_FORM,
+ MT_NIC_CAP_LOCATION,
+ MT_NIC_CAP_MUMIMO,
+ MT_NIC_CAP_BUFFER_MODE_INFO,
+ MT_NIC_CAP_HW_ADIE_VERSION = 0x14,
+ MT_NIC_CAP_ANTSWP = 0x16,
+ MT_NIC_CAP_WFDMA_REALLOC,
+ MT_NIC_CAP_6G,
+};
+
+#define UNI_WOW_DETECT_TYPE_MAGIC BIT(0)
+#define UNI_WOW_DETECT_TYPE_ANY BIT(1)
+#define UNI_WOW_DETECT_TYPE_DISCONNECT BIT(2)
+#define UNI_WOW_DETECT_TYPE_GTK_REKEY_FAIL BIT(3)
+#define UNI_WOW_DETECT_TYPE_BCN_LOST BIT(4)
+#define UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT BIT(5)
+#define UNI_WOW_DETECT_TYPE_BITMAP BIT(6)
+
+enum {
UNI_SUSPEND_MODE_SETTING,
UNI_SUSPEND_WOW_CTRL,
UNI_SUSPEND_WOW_GPIO_PARAM,
@@ -762,7 +794,7 @@ struct mt76_connac_sched_scan_req {
u8 intervals_num;
u8 scan_func; /* MT7663: BIT(0) eable random mac address */
struct mt76_connac_mcu_scan_channel channels[64];
- __le16 intervals[MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL];
+ __le16 intervals[MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL];
union {
struct {
u8 random_mac[ETH_ALEN];
@@ -770,7 +802,9 @@ struct mt76_connac_sched_scan_req {
} mt7663;
struct {
u8 bss_idx;
- u8 pad2[63];
+ u8 pad2[19];
+ u8 random_mac[ETH_ALEN];
+ u8 pad3[38];
} mt7921;
};
} __packed;
@@ -781,6 +815,14 @@ struct mt76_connac_sched_scan_done {
__le16 pad;
} __packed;
+struct bss_info_uni_bss_color {
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 bss_color;
+ u8 rsv[2];
+} __packed;
+
struct bss_info_uni_he {
__le16 tag;
__le16 len;
@@ -885,15 +927,24 @@ struct mt76_connac_suspend_tlv {
u8 pad[5];
} __packed;
+enum mt76_sta_info_state {
+ MT76_STA_INFO_STATE_NONE,
+ MT76_STA_INFO_STATE_AUTH,
+ MT76_STA_INFO_STATE_ASSOC
+};
+
struct mt76_sta_cmd_info {
struct ieee80211_sta *sta;
struct mt76_wcid *wcid;
struct ieee80211_vif *vif;
+ bool offload_fw;
bool enable;
+ bool newly;
int cmd;
u8 rcpi;
+ u8 state;
};
#define MT_SKU_POWER_LIMIT 161
@@ -963,18 +1014,23 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable);
+ struct ieee80211_sta *sta, bool enable,
+ bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
void *wtbl_tlv);
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
void *sta_wtbl, void *wtbl_tlv);
+int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct mt76_wcid *wcid, int cmd);
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
- u8 rcpi);
+ u8 rcpi, u8 state);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
void *wtbl_tlv);
@@ -996,8 +1052,8 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
bool enable);
-int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
- struct mt76_sta_cmd_info *info);
+int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
+ struct mt76_sta_cmd_info *info);
void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band);
@@ -1008,6 +1064,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
int mt76_connac_mcu_start_patch(struct mt76_dev *dev);
int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get);
int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option);
+int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy);
int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
@@ -1028,6 +1085,9 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
+int mt76_connac_sta_state_dp(struct mt76_dev *dev,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state);
int mt76_connac_mcu_chip_config(struct mt76_dev *dev);
int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable);
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index dd66fd12a2e6..cea24213186c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -68,7 +68,7 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
nic_conf1 &= 0xff00;
if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
- dev_err(dev->mt76.dev,
+ dev_dbg(dev->mt76.dev,
"driver does not support HW RF ctrl\n");
if (!mt76x02_field_valid(nic_conf0 >> 8))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 5847f943e8da..b795e7245c07 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.reconfig_complete = mt76x02_reconfig_complete,
};
-static int mt76x0e_register_device(struct mt76x02_dev *dev)
+static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume)
{
int err;
@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
- err = mt76x02_dma_init(dev);
- if (err < 0)
- return err;
+ if (!resume) {
+ err = mt76x02_dma_init(dev);
+ if (err < 0)
+ return err;
+ }
err = mt76x0_init_hardware(dev);
if (err < 0)
@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+ return 0;
+}
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+ int err;
+
+ err = mt76x0e_init_hardware(dev, false);
+ if (err < 0)
+ return err;
+
err = mt76x0_register_device(dev);
if (err < 0)
return err;
@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ mt76_pci_disable_aspm(pdev);
+
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
&drv_ops);
if (!mdev)
@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev)
mt76_free_device(mdev);
}
+#ifdef CONFIG_PM
+static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int i;
+
+ mt76_worker_disable(&mdev->tx_worker);
+ for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++)
+ mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true);
+ for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++)
+ mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true);
+ napi_disable(&mdev->tx_napi);
+
+ mt76_for_each_q_rx(mdev, i)
+ napi_disable(&mdev->napi[i]);
+
+ mt76x02_dma_disable(dev);
+ mt76x02_mcu_cleanup(dev);
+ mt76x0_chip_onoff(dev, false, false);
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+ pci_save_state(pdev);
+
+ return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+}
+
+static int mt76x0e_resume(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int err, i;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ mt76_worker_enable(&mdev->tx_worker);
+
+ mt76_for_each_q_rx(mdev, i) {
+ mt76_queue_rx_reset(dev, i);
+ napi_enable(&mdev->napi[i]);
+ napi_schedule(&mdev->napi[i]);
+ }
+
+ napi_enable(&mdev->tx_napi);
+ napi_schedule(&mdev->tx_napi);
+
+ return mt76x0e_init_hardware(dev, true);
+}
+#endif /* CONFIG_PM */
+
static const struct pci_device_id mt76x0e_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) },
@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = {
.id_table = mt76x0e_device_table,
.probe = mt76x0e_probe,
.remove = mt76x0e_remove,
+#ifdef CONFIG_PM
+ .suspend = mt76x0e_suspend,
+ .resume = mt76x0e_resume,
+#endif /* CONFIG_PM */
};
module_pci_driver(mt76x0e_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 0da37867cb64..c32e6dc68773 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -34,24 +34,24 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
memset(key_data, 0, 32);
if (!key)
- return MT_CIPHER_NONE;
+ return MT76X02_CIPHER_NONE;
if (key->keylen > 32)
- return MT_CIPHER_NONE;
+ return MT76X02_CIPHER_NONE;
memcpy(key_data, key->key, key->keylen);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
+ return MT76X02_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
+ return MT76X02_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
+ return MT76X02_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
+ return MT76X02_CIPHER_AES_CCMP;
default:
- return MT_CIPHER_NONE;
+ return MT76X02_CIPHER_NONE;
}
}
@@ -63,7 +63,7 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u32 val;
cipher = mt76x02_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
+ if (cipher == MT76X02_CIPHER_NONE && key)
return -EOPNOTSUPP;
val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
@@ -91,10 +91,10 @@ void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
pn = (u64)eiv << 16;
- if (cipher == MT_CIPHER_TKIP) {
+ if (cipher == MT76X02_CIPHER_TKIP) {
pn |= (iv >> 16) & 0xff;
pn |= (iv & 0xff) << 8;
- } else if (cipher >= MT_CIPHER_AES_CCMP) {
+ } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
pn |= iv & 0xffff;
} else {
return;
@@ -112,7 +112,7 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
u64 pn;
cipher = mt76x02_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
+ if (cipher == MT76X02_CIPHER_NONE && key)
return -EOPNOTSUPP;
mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
@@ -126,16 +126,16 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
pn = atomic64_read(&key->tx_pn);
iv_data[3] = key->keyidx << 6;
- if (cipher >= MT_CIPHER_TKIP) {
+ if (cipher >= MT76X02_CIPHER_TKIP) {
iv_data[3] |= 0x20;
put_unaligned_le32(pn >> 16, &iv_data[4]);
}
- if (cipher == MT_CIPHER_TKIP) {
+ if (cipher == MT76X02_CIPHER_TKIP) {
iv_data[0] = (pn >> 8) & 0xff;
iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
iv_data[2] = pn & 0xff;
- } else if (cipher >= MT_CIPHER_AES_CCMP) {
+ } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
put_unaligned_le16((pn & 0xffff), &iv_data[0]);
}
}
@@ -1022,12 +1022,12 @@ void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
}
-void mt76x02_update_channel(struct mt76_dev *mdev)
+void mt76x02_update_channel(struct mt76_phy *mphy)
{
- struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
- state = mdev->phy.chan_state;
+ state = mphy->chan_state;
state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
spin_lock_bh(&dev->mt76.cc_lock);
@@ -1169,7 +1169,7 @@ void mt76x02_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
- mt76_update_survey(&dev->mt76);
+ mt76_update_survey(&dev->mphy);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 0cfbaca50210..5dc6c834111e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -195,7 +195,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-void mt76x02_update_channel(struct mt76_dev *mdev);
+void mt76x02_update_channel(struct mt76_phy *mphy);
void mt76x02_mac_work(struct work_struct *work);
void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 3e722276b5c2..fa7872ac22bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -692,15 +692,15 @@ struct mt76_wcid_key {
} __packed __aligned(4);
enum mt76x02_cipher_type {
- MT_CIPHER_NONE,
- MT_CIPHER_WEP40,
- MT_CIPHER_WEP104,
- MT_CIPHER_TKIP,
- MT_CIPHER_AES_CCMP,
- MT_CIPHER_CKIP40,
- MT_CIPHER_CKIP104,
- MT_CIPHER_CKIP128,
- MT_CIPHER_WAPI,
+ MT76X02_CIPHER_NONE,
+ MT76X02_CIPHER_WEP40,
+ MT76X02_CIPHER_WEP104,
+ MT76X02_CIPHER_TKIP,
+ MT76X02_CIPHER_AES_CCMP,
+ MT76X02_CIPHER_CKIP40,
+ MT76X02_CIPHER_CKIP104,
+ MT76X02_CIPHER_CKIP128,
+ MT76X02_CIPHER_WAPI,
};
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 02db5d66735d..ccdbab341271 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -7,24 +7,18 @@
#include <linux/module.h>
#include "mt76x02.h"
-#define CCK_RATE(_idx, _rate) { \
+#define MT76x02_CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + (_idx)), \
}
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
-}
-
struct ieee80211_rate mt76x02_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
+ MT76x02_CCK_RATE(0, 10),
+ MT76x02_CCK_RATE(1, 20),
+ MT76x02_CCK_RATE(2, 55),
+ MT76x02_CCK_RATE(3, 110),
OFDM_RATE(0, 60),
OFDM_RATE(1, 90),
OFDM_RATE(2, 120),
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
index 40c8061787e9..80e49244348e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_MT7915E) += mt7915e.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 6a8ddeeecbe9..64048243e34b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -3,6 +3,7 @@
#include "mt7915.h"
#include "eeprom.h"
+#include "mcu.h"
/** global debugfs **/
@@ -16,7 +17,7 @@ mt7915_implicit_txbf_set(void *data, u64 val)
dev->ibf = !!val;
- return mt7915_mcu_set_txbf_type(dev);
+ return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
}
static int
@@ -147,6 +148,9 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
{
struct mt7915_dev *dev = s->private;
bool ext_phy = phy != &dev->phy;
+ static const char * const bw[] = {
+ "BW20", "BW40", "BW80", "BW160"
+ };
int cnt;
if (!phy)
@@ -164,11 +168,16 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
- seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld\n",
+ seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld, ",
FIELD_GET(MT_ETBF_RX_FB_ALL, cnt),
FIELD_GET(MT_ETBF_RX_FB_HE, cnt),
FIELD_GET(MT_ETBF_RX_FB_VHT, cnt),
FIELD_GET(MT_ETBF_RX_FB_HT, cnt));
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
+ seq_printf(s, "%s, NC: %ld, NR: %ld\n",
+ bw[FIELD_GET(MT_ETBF_RX_FB_BW, cnt)],
+ FIELD_GET(MT_ETBF_RX_FB_NC, cnt),
+ FIELD_GET(MT_ETBF_RX_FB_NR, cnt));
/* Tx Beamformee Rx NDPA & Tx feedback report */
cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
@@ -204,7 +213,7 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file);
/* Tx amsdu info */
- seq_puts(file, "Tx MSDU stat:\n");
+ seq_puts(file, "Tx MSDU statistics:\n");
for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
n += stat[i];
@@ -224,18 +233,6 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
-static int mt7915_read_temperature(struct seq_file *s, void *data)
-{
- struct mt7915_dev *dev = dev_get_drvdata(s->private);
- int temp;
-
- /* cpu */
- temp = mt7915_mcu_get_temperature(dev, 0);
- seq_printf(s, "Temperature: %d\n", temp);
-
- return 0;
-}
-
static int
mt7915_queues_acq(struct seq_file *s, void *data)
{
@@ -307,54 +304,23 @@ mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
"RU26", "RU52", "RU106", "RU242/SU20",
"RU484/SU40", "RU996/SU80", "RU2x996/SU160"
};
- struct mt7915_dev *dev = dev_get_drvdata(s->private);
- bool ext_phy = phy != &dev->phy;
- u32 reg_base;
- int i, idx = 0;
+ s8 txpower[MT7915_SKU_RATE_NUM], *buf;
+ int i;
if (!phy)
return;
- reg_base = MT_TMAC_FP0R0(ext_phy);
- seq_printf(s, "\nBand %d\n", ext_phy);
+ seq_printf(s, "\nBand %d\n", phy != &phy->dev->phy);
- for (i = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
- u8 cnt, mcs_num = mt7915_sku_group_len[i];
- s8 txpower[12];
- int j;
+ mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
+ for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
+ u8 mcs_num = mt7915_sku_group_len[i];
- if (i == SKU_HT_BW20 || i == SKU_HT_BW40) {
- mcs_num = 8;
- } else if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) {
+ if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160)
mcs_num = 10;
- } else if (i == SKU_HE_RU26) {
- reg_base = MT_TMAC_FP0R18(ext_phy);
- idx = 0;
- }
-
- for (j = 0, cnt = 0; j < DIV_ROUND_UP(mcs_num, 4); j++) {
- u32 val;
-
- if (i == SKU_VHT_BW160 && idx == 60) {
- reg_base = MT_TMAC_FP0R15(ext_phy);
- idx = 0;
- }
-
- val = mt76_rr(dev, reg_base + (idx / 4) * 4);
-
- if (idx && idx % 4)
- val >>= (idx % 4) * 8;
-
- while (val > 0 && cnt < mcs_num) {
- s8 pwr = FIELD_GET(MT_TMAC_FP_MASK, val);
-
- txpower[cnt++] = pwr;
- val >>= 8;
- idx++;
- }
- }
- mt76_seq_puts_array(s, sku_group_name[i], txpower, mcs_num);
+ mt76_seq_puts_array(s, sku_group_name[i], buf, mcs_num);
+ buf += mt7915_sku_group_len[i];
}
}
@@ -390,8 +356,6 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_trigger);
debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
- debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
- mt7915_read_temperature);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
mt7915_read_rate_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 11d0b760abd7..9182568f95c7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -19,39 +19,6 @@ int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
return 0;
}
-void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- __le32 *rxd = (__le32 *)skb->data;
- enum rx_pkt_type type;
-
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
-
- switch (type) {
- case PKT_TYPE_TXRX_NOTIFY:
- mt7915_mac_tx_free(dev, skb);
- break;
- case PKT_TYPE_RX_EVENT:
- mt7915_mcu_rx_event(dev, skb);
- break;
-#ifdef CONFIG_NL80211_TESTMODE
- case PKT_TYPE_TXRXV:
- mt7915_mac_fill_rx_vector(dev, skb);
- break;
-#endif
- case PKT_TYPE_NORMAL:
- if (!mt7915_mac_fill_rx(dev, skb)) {
- mt76_rx(&dev->mt76, q, skb);
- return;
- }
- fallthrough;
- default:
- dev_kfree_skb(skb);
- break;
- }
-}
-
static void
mt7915_tx_cleanup(struct mt7915_dev *dev)
{
@@ -112,8 +79,6 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev)
int mt7915_dma_init(struct mt7915_dev *dev)
{
- /* Increase buffer size to receive large VHT/HE MPDUs */
- int rx_buf_size = MT_RX_BUF_SIZE * 2;
u32 hif1_ofs = 0;
int ret;
@@ -177,28 +142,28 @@ int mt7915_dma_init(struct mt7915_dev *dev)
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
- rx_buf_size, MT_RX_EVENT_RING_BASE);
+ MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
if (ret)
return ret;
/* event from WA */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
- rx_buf_size, MT_RX_EVENT_RING_BASE);
+ MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
if (ret)
return ret;
/* rx data queue */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
- rx_buf_size, MT_RX_DATA_RING_BASE);
+ MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
if (ret)
return ret;
if (dev->dbdc_support) {
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
- rx_buf_size,
+ MT_RX_BUF_SIZE,
MT_RX_DATA_RING_BASE + hif1_ofs);
if (ret)
return ret;
@@ -207,7 +172,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
MT7915_RXQ_MCU_WA_EXT,
MT7915_RX_MCU_RING_SIZE,
- rx_buf_size,
+ MT_RX_BUF_SIZE,
MT_RX_EVENT_RING_BASE + hif1_ofs);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 8ededf2e5279..ee3d64434821 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -4,22 +4,12 @@
#include "mt7915.h"
#include "eeprom.h"
-static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
-{
- u8 *data = dev->mt76.eeprom.data;
-
- if (data[offset] == 0xff && !dev->flash_mode)
- mt7915_mcu_get_eeprom(dev, offset);
-
- return data[offset];
-}
-
static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
- u32 val;
+ u8 *eeprom = mdev->eeprom.data;
+ u32 val = eeprom[MT_EE_DO_PRE_CAL];
- val = mt7915_eeprom_read(dev, MT_EE_DO_PRE_CAL);
if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
return 0;
@@ -43,7 +33,13 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
dev->flash_mode = true;
ret = mt7915_eeprom_load_precal(dev);
} else {
- memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
+ u32 block_num, i;
+
+ block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
+ MT7915_EEPROM_BLOCK_SIZE);
+ for (i = 0; i < block_num; i++)
+ mt7915_mcu_get_eeprom(dev,
+ i * MT7915_EEPROM_BLOCK_SIZE);
}
return ret;
@@ -52,10 +48,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
static int mt7915_check_eeprom(struct mt7915_dev *dev)
{
u8 *eeprom = dev->mt76.eeprom.data;
- u16 val;
-
- mt7915_eeprom_read(dev, MT_EE_CHIP_ID);
- val = get_unaligned_le16(eeprom);
+ u16 val = get_unaligned_le16(eeprom);
switch (val) {
case 0x7915:
@@ -69,9 +62,10 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
+ u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
- val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF + ext_phy);
+ val = eeprom[MT_EE_WIFI_CONF + ext_phy];
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
@@ -143,6 +137,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx)
{
+ u8 *eeprom = dev->mt76.eeprom.data;
int index, target_power;
bool tssi_on;
@@ -153,18 +148,18 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
if (chan->band == NL80211_BAND_2GHZ) {
index = MT_EE_TX0_POWER_2G + chain_idx * 3;
- target_power = mt7915_eeprom_read(dev, index);
+ target_power = eeprom[index];
if (!tssi_on)
- target_power += mt7915_eeprom_read(dev, index + 1);
+ target_power += eeprom[index + 1];
} else {
int group = mt7915_get_channel_group(chan->hw_value);
index = MT_EE_TX0_POWER_5G + chain_idx * 12;
- target_power = mt7915_eeprom_read(dev, index + group);
+ target_power = eeprom[index + group];
if (!tssi_on)
- target_power += mt7915_eeprom_read(dev, index + 8);
+ target_power += eeprom[index + 8];
}
return target_power;
@@ -172,13 +167,14 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
{
+ u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
s8 delta;
if (band == NL80211_BAND_2GHZ)
- val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_2G);
+ val = eeprom[MT_EE_RATE_DELTA_2G];
else
- val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_5G);
+ val = eeprom[MT_EE_RATE_DELTA_5G];
if (!(val & MT_EE_RATE_DELTA_EN))
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 033fb592bdf0..a43389a41800 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -33,7 +33,7 @@ enum mt7915_eeprom_field {
#define MT_EE_WIFI_CAL_GROUP BIT(0)
#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1)
#define MT_EE_CAL_UNIT 1024
-#define MT_EE_CAL_GROUP_SIZE (44 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_GROUP_SIZE (49 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT)
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
@@ -99,12 +99,15 @@ static inline bool
mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
{
u8 *eep = dev->mt76.eeprom.data;
+ u8 val = eep[MT_EE_WIFI_CONF + 7];
- /* TODO: DBDC */
- if (band == NL80211_BAND_5GHZ)
- return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_5G;
+ if (band == NL80211_BAND_2GHZ)
+ return val & MT_EE_WIFI_CONF7_TSSI0_2G;
+
+ if (dev->dbdc_support)
+ return val & MT_EE_WIFI_CONF7_TSSI1_5G;
else
- return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G;
+ return val & MT_EE_WIFI_CONF7_TSSI0_5G;
}
extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 822f3aa6bb8b..4798d6344305 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -2,39 +2,14 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
#include "mt7915.h"
#include "mac.h"
#include "mcu.h"
#include "eeprom.h"
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
-}
-
-static struct ieee80211_rate mt7915_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(11, 60),
- OFDM_RATE(15, 90),
- OFDM_RATE(10, 120),
- OFDM_RATE(14, 180),
- OFDM_RATE(9, 240),
- OFDM_RATE(13, 360),
- OFDM_RATE(8, 480),
- OFDM_RATE(12, 540),
-};
-
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@@ -67,6 +42,117 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
+static ssize_t mt7915_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mt7915_phy *phy = dev_get_drvdata(dev);
+ int temperature;
+
+ temperature = mt7915_mcu_get_temperature(phy);
+ if (temperature < 0)
+ return temperature;
+
+ /* display in millidegree celcius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7915_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *mt7915_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mt7915_hwmon);
+
+static int
+mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MT7915_THERMAL_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct mt7915_phy *phy = cdev->devdata;
+
+ *state = phy->throttle_state;
+
+ return 0;
+}
+
+static int
+mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct mt7915_phy *phy = cdev->devdata;
+ int ret;
+
+ if (state > MT7915_THERMAL_THROTTLE_MAX)
+ return -EINVAL;
+
+ if (state == phy->throttle_state)
+ return 0;
+
+ ret = mt7915_mcu_set_thermal_throttling(phy, state);
+ if (ret)
+ return ret;
+
+ phy->throttle_state = state;
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mt7915_thermal_ops = {
+ .get_max_state = mt7915_thermal_get_max_throttle_state,
+ .get_cur_state = mt7915_thermal_get_cur_throttle_state,
+ .set_cur_state = mt7915_thermal_set_cur_throttle_state,
+};
+
+static void mt7915_unregister_thermal(struct mt7915_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+
+ if (!phy->cdev)
+ return;
+
+ sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
+ thermal_cooling_device_unregister(phy->cdev);
+}
+
+static int mt7915_thermal_init(struct mt7915_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon;
+
+ cdev = thermal_cooling_device_register(wiphy_name(wiphy), phy,
+ &mt7915_thermal_ops);
+ if (!IS_ERR(cdev)) {
+ if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
+ "cooling_device") < 0)
+ thermal_cooling_device_unregister(cdev);
+ else
+ phy->cdev = cdev;
+ }
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
+ wiphy_name(wiphy), phy,
+ mt7915_hwmon_groups);
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+
+ return 0;
+}
+
static void
mt7915_init_txpower(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband)
@@ -201,7 +287,6 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
- mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
@@ -228,20 +313,19 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
{
int ret;
-
if (dev->dbdc_support) {
- ret = mt7915_mcu_set_txbf_module(dev);
+ ret = mt7915_mcu_set_txbf(dev, MT_BF_MODULE_UPDATE);
if (ret)
return ret;
}
/* trigger sounding packets */
- ret = mt7915_mcu_set_txbf_sounding(dev);
+ ret = mt7915_mcu_set_txbf(dev, MT_BF_SOUNDING_ON);
if (ret)
return ret;
/* enable eBF */
- return mt7915_mcu_set_txbf_type(dev);
+ return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
}
static int mt7915_register_ext_phy(struct mt7915_dev *dev)
@@ -281,8 +365,12 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
if (ret)
goto error;
- ret = mt76_register_phy(mphy, true, mt7915_rates,
- ARRAY_SIZE(mt7915_rates));
+ ret = mt76_register_phy(mphy, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret)
+ goto error;
+
+ ret = mt7915_thermal_init(phy);
if (ret)
goto error;
@@ -480,6 +568,9 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
if (nss < 2)
return;
+ /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+ elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
+
if (vif != NL80211_IFTYPE_AP)
return;
@@ -493,9 +584,6 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
-
- /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
- elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
}
static void
@@ -579,8 +667,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
switch (i) {
case NL80211_IFTYPE_AP:
- he_cap_elem->mac_cap_info[0] |=
- IEEE80211_HE_MAC_CAP0_TWT_RES;
he_cap_elem->mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BSR;
he_cap_elem->mac_cap_info[4] |=
@@ -594,8 +680,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
break;
case NL80211_IFTYPE_STATION:
- he_cap_elem->mac_cap_info[0] |=
- IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap_elem->mac_cap_info[1] |=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
@@ -690,6 +774,7 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
if (!phy)
return;
+ mt7915_unregister_thermal(phy);
mt76_unregister_phy(mphy);
ieee80211_free_hw(mphy->hw);
}
@@ -731,8 +816,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif
- ret = mt76_register_device(&dev->mt76, true, mt7915_rates,
- ARRAY_SIZE(mt7915_rates));
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret)
+ return ret;
+
+ ret = mt7915_thermal_init(&dev->phy);
if (ret)
return ret;
@@ -748,10 +837,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
void mt7915_unregister_device(struct mt7915_dev *dev)
{
mt7915_unregister_ext_phy(dev);
+ mt7915_unregister_thermal(&dev->phy);
mt76_unregister_device(&dev->mt76);
mt7915_mcu_exit(dev);
mt7915_tx_token_put(dev);
mt7915_dma_cleanup(dev);
+ tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 7a9759fb79d8..2462704094b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -307,7 +307,8 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
}
}
-int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
+static int
+mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -412,14 +413,27 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
u8 *data = (u8 *)rxd;
if (status->flag & RX_FLAG_DECRYPTED) {
- status->iv[0] = data[5];
- status->iv[1] = data[4];
- status->iv[2] = data[3];
- status->iv[3] = data[2];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
+ case MT_CIPHER_AES_CCMP:
+ case MT_CIPHER_CCMP_CCX:
+ case MT_CIPHER_CCMP_256:
+ insert_ccmp_hdr =
+ FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ fallthrough;
+ case MT_CIPHER_TKIP:
+ case MT_CIPHER_TKIP_NO_MIC:
+ case MT_CIPHER_GCMP:
+ case MT_CIPHER_GCMP_256:
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+ break;
+ default:
+ break;
+ }
}
rxd += 4;
if ((u8 *)rxd - skb->data >= skb->len)
@@ -610,9 +624,10 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
return 0;
}
-#ifdef CONFIG_NL80211_TESTMODE
-void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
+static void
+mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
{
+#ifdef CONFIG_NL80211_TESTMODE
struct mt7915_phy *phy = &dev->phy;
__le32 *rxd = (__le32 *)skb->data;
__le32 *rxv_hdr = rxd + 2;
@@ -650,10 +665,10 @@ void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
phy->test.last_freq_offset = foe;
phy->test.last_snr = snr;
+#endif
dev_kfree_skb(skb);
}
-#endif
static void
mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
@@ -885,7 +900,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
}
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key, bool beacon)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -944,7 +959,12 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
txwi[3] = cpu_to_le32(val);
txwi[4] = 0;
- txwi[5] = 0;
+
+ val = FIELD_PREP(MT_TXD5_PID, pid);
+ if (pid >= MT_PACKET_ID_FIRST)
+ val |= MT_TXD5_TX_STATUS_HOST;
+ txwi[5] = cpu_to_le32(val);
+
txwi[6] = 0;
txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
@@ -984,11 +1004,11 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
struct mt76_txwi_cache *t;
struct mt7915_txp *txp;
int id, i, nbuf = tx_info->nbuf - 1;
u8 *txwi = (u8 *)txwi_ptr;
+ int pid;
if (unlikely(tx_info->skb->len <= ETH_HLEN))
return -EINVAL;
@@ -996,10 +1016,10 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (!wcid)
wcid = &dev->mt76.global_wcid;
- mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
- false);
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- cb->wcid = wcid->idx;
+ mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
+ false);
txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
@@ -1071,54 +1091,7 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
}
static void
-mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
- struct ieee80211_sta *sta, u8 stat,
- struct list_head *free_list)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_status status = {
- .sta = sta,
- .info = info,
- .skb = skb,
- .free_list = free_list,
- };
- struct ieee80211_hw *hw;
-
- if (sta) {
- struct mt7915_sta *msta;
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- status.rate = &msta->stats.tx_rate;
- }
-
-#ifdef CONFIG_NL80211_TESTMODE
- if (mt76_is_testmode_skb(mdev, skb, &hw)) {
- struct mt7915_phy *phy = mt7915_hw_phy(hw);
- struct ieee80211_vif *vif = phy->monitor_vif;
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-
- mt76_tx_complete_skb(mdev, mvif->sta.wcid.idx, skb);
- return;
- }
-#endif
-
- hw = mt76_tx_status_get_hw(mdev, skb);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- info->flags |= IEEE80211_TX_STAT_AMPDU;
-
- if (stat)
- ieee80211_tx_info_clear_status(info);
-
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_ACK;
-
- info->status.tx_time = 0;
- ieee80211_tx_status_ext(hw, &status);
-}
-
-void mt7915_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *t)
+mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
struct mt7915_txp *txp;
int i;
@@ -1129,7 +1102,39 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
}
-void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
+static void
+mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, struct list_head *free_list)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_wcid *wcid;
+ __le32 *txwi;
+ u16 wcid_idx;
+
+ mt7915_txp_skb_unmap(mdev, t);
+ if (!t->skb)
+ goto out;
+
+ txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
+ if (sta) {
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ wcid_idx = wcid->idx;
+
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7915_tx_check_aggr(sta, txwi);
+ } else {
+ wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+ }
+
+ __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+
+out:
+ t->skb = NULL;
+ mt76_put_txwi(mdev, t);
+}
+
+static void
+mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
struct mt76_dev *mdev = &dev->mt76;
@@ -1194,28 +1199,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
if (!txwi)
continue;
- mt7915_txp_skb_unmap(mdev, txwi);
- if (txwi->skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
- void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
-
- if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7915_tx_check_aggr(sta, txwi_ptr);
-
- if (sta && !info->tx_time_est) {
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
- int pending;
-
- pending = atomic_dec_return(&wcid->non_aql_packets);
- if (pending < 0)
- atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
- }
-
- mt7915_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
- txwi->skb = NULL;
- }
-
- mt76_put_txwi(mdev, txwi);
+ mt7915_txwi_free(dev, txwi, sta, &free_list);
}
mt7915_mac_sta_poll(dev);
@@ -1233,6 +1217,120 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
}
}
+static bool
+mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
+ __le32 *txs_data)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct ieee80211_tx_info *info;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ mt76_tx_status_lock(mdev, &list);
+ skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+ if (!skb)
+ goto out;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(txs_data[0] & le32_to_cpu(MT_TXS0_ACK_ERROR_MASK)))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
+
+ info->status.rates[0].idx = -1;
+ mt76_tx_status_skb_done(mdev, skb, &list);
+
+out:
+ mt76_tx_status_unlock(mdev, &list);
+
+ return !!skb;
+}
+
+static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
+{
+ struct mt7915_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ __le32 *txs_data = data;
+ u16 wcidx;
+ u32 txs;
+ u8 pid;
+
+ txs = le32_to_cpu(txs_data[0]);
+ if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+ return;
+
+ txs = le32_to_cpu(txs_data[2]);
+ wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+
+ txs = le32_to_cpu(txs_data[3]);
+ pid = FIELD_GET(MT_TXS3_PID, txs);
+
+ if (pid < MT_PACKET_ID_FIRST)
+ return;
+
+ if (wcidx >= MT7915_WTBL_SIZE)
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
+
+ mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data);
+
+ if (!wcid->sta)
+ goto out;
+
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+out:
+ rcu_read_unlock();
+}
+
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7915_mac_tx_free(dev, skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt7915_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_TXRXV:
+ mt7915_mac_fill_rx_vector(dev, skb);
+ break;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7915_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_NORMAL:
+ if (!mt7915_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ fallthrough;
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
struct mt7915_dev *dev;
@@ -1254,15 +1352,8 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
e->skb = t ? t->skb : NULL;
}
- if (e->skb) {
- struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
- struct mt76_wcid *wcid;
-
- wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
-
- mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
- NULL);
- }
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
@@ -1296,14 +1387,10 @@ void mt7915_mac_reset_counters(struct mt7915_phy *phy)
memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
/* reset airtime counters */
- mt76_rr(dev, MT_MIB_SDR9(ext_phy));
- mt76_rr(dev, MT_MIB_SDR36(ext_phy));
- mt76_rr(dev, MT_MIB_SDR37(ext_phy));
-
- mt76_set(dev, MT_WF_RMAC_MIB_TIME0(ext_phy),
- MT_WF_RMAC_MIB_RXTIME_CLR);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
MT_WF_RMAC_MIB_RXTIME_CLR);
+
+ mt7915_mcu_get_chan_mib_info(phy, true);
}
void mt7915_mac_set_timing(struct mt7915_phy *phy)
@@ -1397,53 +1484,24 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
return sum / n;
}
-static void
-mt7915_phy_update_channel(struct mt76_phy *mphy, int idx)
+void mt7915_update_channel(struct mt76_phy *mphy)
{
- struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76);
struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
- struct mt76_channel_state *state;
- u64 busy_time, tx_time, rx_time, obss_time;
+ struct mt76_channel_state *state = mphy->chan_state;
+ bool ext_phy = phy != &phy->dev->phy;
int nf;
- busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
- MT_MIB_SDR9_BUSY_MASK);
- tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
- MT_MIB_SDR36_TXTIME_MASK);
- rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
- MT_MIB_SDR37_RXTIME_MASK);
- obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
- MT_MIB_OBSSTIME_MASK);
+ mt7915_mcu_get_chan_mib_info(phy, false);
- nf = mt7915_phy_get_nf(phy, idx);
+ nf = mt7915_phy_get_nf(phy, ext_phy);
if (!phy->noise)
phy->noise = nf << 4;
else if (nf)
phy->noise += nf - (phy->noise >> 4);
- state = mphy->chan_state;
- state->cc_busy += busy_time;
- state->cc_tx += tx_time;
- state->cc_rx += rx_time + obss_time;
- state->cc_bss_rx += rx_time;
state->noise = -(phy->noise >> 4);
}
-void mt7915_update_channel(struct mt76_dev *mdev)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
-
- mt7915_phy_update_channel(&mdev->phy, 0);
- if (mdev->phy2)
- mt7915_phy_update_channel(mdev->phy2, 1);
-
- /* reset obss airtime */
- mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
- if (mdev->phy2)
- mt76_set(dev, MT_WF_RMAC_MIB_TIME0(1),
- MT_WF_RMAC_MIB_RXTIME_CLR);
-}
-
static bool
mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
{
@@ -1530,14 +1588,18 @@ mt7915_dma_reset(struct mt7915_dev *dev)
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
if (dev->hif2) {
mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
(MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN));
mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
(MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO));
}
}
@@ -1548,14 +1610,7 @@ void mt7915_tx_token_put(struct mt7915_dev *dev)
spin_lock_bh(&dev->mt76.token_lock);
idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7915_txp_skb_unmap(&dev->mt76, txwi);
- if (txwi->skb) {
- struct ieee80211_hw *hw;
-
- hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
- ieee80211_free_txskb(hw, txwi->skb);
- }
- mt76_put_txwi(&dev->mt76, txwi);
+ mt7915_txwi_free(dev, txwi, NULL, NULL);
dev->mt76.token_count--;
}
spin_unlock_bh(&dev->mt76.token_lock);
@@ -1588,11 +1643,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
set_bit(MT76_RESET, &phy2->mt76->state);
cancel_delayed_work_sync(&phy2->mt76->mac_work);
}
- /* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mphy);
- if (ext_phy)
- mt76_txq_schedule_all(ext_phy);
-
mt76_worker_disable(&dev->mt76.tx_worker);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
@@ -1618,10 +1668,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (phy2)
clear_bit(MT76_RESET, &phy2->mt76->state);
- mt76_worker_enable(&dev->mt76.tx_worker);
- napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
-
napi_enable(&dev->mt76.napi[0]);
napi_schedule(&dev->mt76.napi[0]);
@@ -1630,14 +1676,20 @@ void mt7915_mac_reset_work(struct work_struct *work)
napi_enable(&dev->mt76.napi[2]);
napi_schedule(&dev->mt76.napi[2]);
+ tasklet_schedule(&dev->irq_tasklet);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
ieee80211_wake_queues(mt76_hw(dev));
if (ext_phy)
ieee80211_wake_queues(ext_phy->hw);
- mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
- mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
-
mutex_unlock(&dev->mt76.mutex);
mt7915_update_beacons(dev);
@@ -1651,7 +1703,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
}
static void
-mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
+mt7915_mac_update_stats(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
@@ -1733,8 +1785,10 @@ void mt7915_mac_sta_rc_work(struct work_struct *work)
if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
IEEE80211_RC_NSS_CHANGED |
- IEEE80211_RC_BW_CHANGED))
+ IEEE80211_RC_BW_CHANGED)) {
+ mt7915_mcu_add_he(dev, vif, sta);
mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+ }
if (changed & IEEE80211_RC_SMPS_CHANGED)
mt7915_mcu_add_smps(dev, vif, sta);
@@ -1756,11 +1810,11 @@ void mt7915_mac_work(struct work_struct *work)
mutex_lock(&mphy->dev->mutex);
- mt76_update_survey(mphy->dev);
+ mt76_update_survey(mphy);
if (++mphy->mac_work_count == 5) {
mphy->mac_work_count = 0;
- mt7915_mac_update_mib_stats(phy);
+ mt7915_mac_update_stats(phy);
}
if (++phy->sta_work_count == 10) {
@@ -1770,6 +1824,8 @@ void mt7915_mac_work(struct work_struct *work)
mutex_unlock(&mphy->dev->mutex);
+ mt76_tx_status_check(mphy->dev, NULL, false);
+
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7915_WATCHDOG_TIME);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index 0f929fb53027..eb1885f4bd8e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -304,6 +304,62 @@ struct mt7915_tx_free {
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
+#define MT_TXS0_FIXED_RATE BIT(31)
+#define MT_TXS0_BW GENMASK(30, 29)
+#define MT_TXS0_TID GENMASK(28, 26)
+#define MT_TXS0_AMPDU BIT(25)
+#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TX_RATE GENMASK(13, 0)
+
+#define MT_TXS1_SEQNO GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE GENMASK(19, 16)
+#define MT_TXS1_RXV_SEQNO GENMASK(15, 8)
+#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0)
+
+#define MT_TXS2_BF_STATUS GENMASK(31, 30)
+#define MT_TXS2_LAST_TX_RATE GENMASK(29, 27)
+#define MT_TXS2_SHARED_ANTENNA BIT(26)
+#define MT_TXS2_WCID GENMASK(25, 16)
+#define MT_TXS2_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS3_PID GENMASK(31, 24)
+#define MT_TXS3_ANT_ID GENMASK(23, 0)
+
+#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
+
+#define MT_TXS5_F0_FINAL_MPDU BIT(31)
+#define MT_TXS5_F0_QOS BIT(30)
+#define MT_TXS5_F0_TX_COUNT GENMASK(29, 25)
+#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0)
+#define MT_TXS5_F1_MPDU_TX_COUNT GENMASK(31, 24)
+#define MT_TXS5_F1_MPDU_TX_BYTES GENMASK(23, 0)
+
+#define MT_TXS6_F0_NOISE_3 GENMASK(31, 24)
+#define MT_TXS6_F0_NOISE_2 GENMASK(23, 16)
+#define MT_TXS6_F0_NOISE_1 GENMASK(15, 8)
+#define MT_TXS6_F0_NOISE_0 GENMASK(7, 0)
+#define MT_TXS6_F1_MPDU_FAIL_COUNT GENMASK(31, 24)
+#define MT_TXS6_F1_MPDU_FAIL_BYTES GENMASK(23, 0)
+
+#define MT_TXS7_F0_RCPI_3 GENMASK(31, 24)
+#define MT_TXS7_F0_RCPI_2 GENMASK(23, 16)
+#define MT_TXS7_F0_RCPI_1 GENMASK(15, 8)
+#define MT_TXS7_F0_RCPI_0 GENMASK(7, 0)
+#define MT_TXS7_F1_MPDU_RETRY_COUNT GENMASK(31, 24)
+#define MT_TXS7_F1_MPDU_RETRY_BYTES GENMASK(23, 0)
+
struct mt7915_dfs_pulse {
u32 max_width; /* us */
int max_pwr; /* dbm */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index e5bd687546b6..c25f8da590dd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -139,12 +139,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
if (type != NL80211_IFTYPE_STATION)
break;
- /* next, try to find a free repeater entry for the sta */
- i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
- REPEATER_BSSID_MAX - REPEATER_BSSID_START);
- if (i)
- return i + 32 - 1;
-
i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
if (i)
return i - 1;
@@ -172,6 +166,22 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
return -1;
}
+static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
+ mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
+ memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0),
+ sizeof(mvif->bitrate_mask.control[i].ht_mcs));
+ memset(mvif->bitrate_mask.control[i].vht_mcs, GENMASK(15, 0),
+ sizeof(mvif->bitrate_mask.control[i].vht_mcs));
+ memset(mvif->bitrate_mask.control[i].he_mcs, GENMASK(15, 0),
+ sizeof(mvif->bitrate_mask.control[i].he_mcs));
+ }
+}
+
static int mt7915_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -241,6 +251,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
vif->offload_flags = 0;
vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+ mt7915_init_bitrate_mask(vif);
+
out:
mutex_unlock(&dev->mt76.mutex);
@@ -798,7 +810,8 @@ mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
/* TSF software read */
- mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE);
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
@@ -827,7 +840,34 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software overwrite */
- mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE);
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ s64 timestamp)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+ mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+ /* TSF software adjust*/
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
mutex_unlock(&dev->mt76.mutex);
}
@@ -911,17 +951,15 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
-static void
-mt7915_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- u32 changed)
+static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
{
- struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ struct ieee80211_hw *hw = msta->vif->phy->mt76->hw;
+ u32 *changed = data;
spin_lock_bh(&dev->sta_poll_lock);
- msta->stats.changed |= changed;
+ msta->stats.changed |= *changed;
if (list_empty(&msta->rc_list))
list_add_tail(&msta->rc_list, &dev->sta_rc_list);
spin_unlock_bh(&dev->sta_poll_lock);
@@ -929,6 +967,39 @@ mt7915_sta_rc_update(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &dev->rc_work);
}
+static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ mt7915_sta_rc_work(&changed, sta);
+}
+
+static int
+mt7915_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
+ u32 changed;
+
+ if (mask->control[band].gi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ changed = IEEE80211_RC_SUPP_RATES_CHANGED;
+ mvif->bitrate_mask = *mask;
+
+ /* Update firmware rate control to add a boundary on top of table
+ * to limit the rate selection for each peer, so when set bitrates
+ * vht-mcs-5 1:9, which actually means nss = 1 mcs = 0~9. This only
+ * applies to data frames as for the other mgmt, mcast, bcast still
+ * use legacy rates as it is.
+ */
+ ieee80211_iterate_stations_atomic(hw, mt7915_sta_rc_work, &changed);
+
+ return 0;
+}
+
static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -987,9 +1058,11 @@ const struct ieee80211_ops mt7915_ops = {
.get_stats = mt7915_get_stats,
.get_tsf = mt7915_get_tsf,
.set_tsf = mt7915_set_tsf,
+ .offset_tsf = mt7915_offset_tsf,
.get_survey = mt76_get_survey,
.get_antenna = mt76_get_antenna,
.set_antenna = mt7915_set_antenna,
+ .set_bitrate_mask = mt7915_set_bitrate_mask,
.set_coverage_class = mt7915_set_coverage_class,
.sta_statistics = mt7915_sta_statistics,
.sta_set_4addr = mt7915_sta_set_4addr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index b3f14ff67c5a..863aa18b3024 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -88,28 +88,28 @@ struct mt7915_fw_region {
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
-static enum mt7915_cipher_type
+static enum mcu_cipher_type
mt7915_mcu_get_cipher(int cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
+ return MCU_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
+ return MCU_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
+ return MCU_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_AES_CMAC:
- return MT_CIPHER_BIP_CMAC_128;
+ return MCU_CIPHER_BIP_CMAC_128;
case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
+ return MCU_CIPHER_AES_CCMP;
case WLAN_CIPHER_SUITE_CCMP_256:
- return MT_CIPHER_CCMP_256;
+ return MCU_CIPHER_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
- return MT_CIPHER_GCMP;
+ return MCU_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
- return MT_CIPHER_GCMP_256;
+ return MCU_CIPHER_GCMP_256;
case WLAN_CIPHER_SUITE_SMS4:
- return MT_CIPHER_WAPI;
+ return MCU_CIPHER_WAPI;
default:
return MT_CIPHER_NONE;
}
@@ -147,10 +147,10 @@ mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
}
static u8
-mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
- enum nl80211_band band = mphy->chandef.chan->band;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
const struct ieee80211_sta_he_cap *he_cap;
@@ -163,7 +163,7 @@ mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif,
} else {
struct ieee80211_supported_band *sband;
- sband = mphy->hw->wiphy->bands[band];
+ sband = mvif->phy->mt76->hw->wiphy->bands[band];
ht_cap = &sband->ht_cap;
vht_cap = &sband->vht_cap;
@@ -209,6 +209,112 @@ mt7915_mcu_get_sta_nss(u16 mcs_map)
return nss - 1;
}
+static void
+mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
+ const u16 *mask)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct cfg80211_chan_def *chandef = &msta->vif->phy->mt76->chandef;
+ int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+ u16 mcs_map;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_80P80:
+ mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80p80);
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+ break;
+ default:
+ mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+ break;
+ }
+
+ for (nss = 0; nss < max_nss; nss++) {
+ int mcs;
+
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ mcs = GENMASK(11, 0);
+ break;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ mcs = GENMASK(9, 0);
+ break;
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ mcs = GENMASK(7, 0);
+ break;
+ default:
+ mcs = 0;
+ }
+
+ mcs = mcs ? fls(mcs & mask[nss]) - 1 : -1;
+
+ switch (mcs) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8 ... 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10 ... 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
+ mcs_map &= ~(0x3 << (nss * 2));
+ mcs_map |= mcs << (nss * 2);
+
+ /* only support 2ss on 160MHz */
+ if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+ break;
+ }
+
+ *he_mcs = cpu_to_le16(mcs_map);
+}
+
+static void
+mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
+ const u16 *mask)
+{
+ u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+ u16 mcs;
+
+ for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
+ switch (mcs_map & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ mcs = GENMASK(9, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ mcs = GENMASK(8, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ mcs = GENMASK(7, 0);
+ break;
+ default:
+ mcs = 0;
+ }
+
+ vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
+
+ /* only support 2ss on 160MHz */
+ if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+ break;
+ }
+}
+
+static void
+mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
+ const u8 *mask)
+{
+ int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+
+ for (nss = 0; nss < max_nss; nss++)
+ ht_mcs[nss] = sta->ht_cap.mcs.rx_mask[nss] & mask[nss];
+}
+
static int
mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
@@ -350,6 +456,24 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
}
static void
+mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_thermal_notify *t;
+ struct mt7915_phy *phy;
+
+ t = (struct mt7915_mcu_thermal_notify *)skb->data;
+ if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
+ return;
+
+ if (t->ctrl.band_idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ phy = (struct mt7915_phy *)mphy->priv;
+ phy->throttle_state = t->ctrl.duty.duty_cycle;
+}
+
+static void
mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -469,6 +593,7 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
u16 attempts = le16_to_cpu(ra->attempts);
u16 curr = le16_to_cpu(ra->curr_rate);
u16 wcidx = le16_to_cpu(ra->wlan_idx);
+ struct ieee80211_tx_status status = {};
struct mt76_phy *mphy = &dev->mphy;
struct mt7915_sta_stats *stats;
struct mt7915_sta *msta;
@@ -500,6 +625,13 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
stats->per = 1000 * (attempts - success) / attempts;
}
+
+ status.sta = wcid_to_sta(wcid);
+ if (!status.sta)
+ return;
+
+ status.rate = &stats->tx_rate;
+ ieee80211_tx_status_ext(mphy->hw, &status);
}
static void
@@ -531,6 +663,9 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
switch (rxd->ext_eid) {
+ case MCU_EXT_EVENT_THERMAL_PROTECT:
+ mt7915_mcu_rx_thermal_notify(dev, skb);
+ break;
case MCU_EXT_EVENT_RDD_REPORT:
mt7915_mcu_rx_radar_detected(dev, skb);
break;
@@ -733,7 +868,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
bss->dtim_period = vif->bss_conf.dtim_period;
- bss->phy_mode = mt7915_get_phy_mode(phy->mt76, vif, NULL);
+ bss->phy_mode = mt7915_get_phy_mode(vif, NULL);
} else {
memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
}
@@ -1072,14 +1207,14 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
sec_key = &sec->key[0];
sec_key->cipher_len = sizeof(*sec_key);
- if (cipher == MT_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+ if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
sec_key->key_id = bip->keyidx;
sec_key->key_len = 16;
memcpy(sec_key->key, bip->key, 16);
sec_key = &sec->key[1];
- sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
sec_key->cipher_len = sizeof(*sec_key);
sec_key->key_len = 16;
memcpy(sec_key->key, key->key, 16);
@@ -1091,14 +1226,14 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
sec_key->key_len = key->keylen;
memcpy(sec_key->key, key->key, key->keylen);
- if (cipher == MT_CIPHER_TKIP) {
+ if (cipher == MCU_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
memcpy(sec_key->key + 16, key->key + 24, 8);
memcpy(sec_key->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
- if (cipher == MT_CIPHER_AES_CCMP) {
+ if (cipher == MCU_CIPHER_AES_CCMP) {
memcpy(bip->key, key->key, key->keylen);
bip->keyidx = key->keyidx;
}
@@ -1336,8 +1471,11 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
static void
mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+ const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
struct sta_rec_he *he;
struct tlv *tlv;
u32 cap = 0;
@@ -1428,15 +1566,18 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- he->max_nss_mcs[CMD_HE_MCS_BW8080] =
- he_cap->he_mcs_nss_supp.rx_mcs_80p80;
+ mt7915_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW8080],
+ mcs_mask);
- he->max_nss_mcs[CMD_HE_MCS_BW160] =
- he_cap->he_mcs_nss_supp.rx_mcs_160;
+ mt7915_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW160],
+ mcs_mask);
fallthrough;
default:
- he->max_nss_mcs[CMD_HE_MCS_BW80] =
- he_cap->he_mcs_nss_supp.rx_mcs_80;
+ mt7915_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW80],
+ mcs_mask);
break;
}
@@ -1544,27 +1685,18 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
}
-static int
-mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static void
+mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct sk_buff *skb;
- int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_muru);
-
- if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
- return 0;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ struct sta_rec_vht *vht;
+ struct tlv *tlv;
- /* starec muru */
- mt7915_mcu_sta_muru_tlv(skb, sta);
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
}
static void
@@ -1616,17 +1748,6 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
mt7915_mcu_sta_amsdu_tlv(skb, sta);
}
- /* starec vht */
- if (sta->vht_cap.vht_supported) {
- struct sta_rec_vht *vht;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
- vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
- }
-
/* starec he */
if (sta->he_cap.has_he)
mt7915_mcu_sta_he_tlv(skb, sta);
@@ -2016,26 +2137,21 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
vc = mt7915_get_he_phy_cap(phy, vif);
ve = &vc->he_cap_elem;
- ebfee = !!((HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) ||
- HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4])) &&
+ ebfee = !!(HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) &&
HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4]));
- ebf = !!((HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) ||
- HE_PHY(CAP4_MU_BEAMFORMER, ve->phy_cap_info[4])) &&
+ ebf = !!(HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) &&
HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]));
} else if (sta->vht_cap.vht_supported) {
struct ieee80211_sta_vht_cap *pc;
struct ieee80211_sta_vht_cap *vc;
- u32 cr, ce;
pc = &sta->vht_cap;
vc = &phy->mt76->sband_5g.sband.vht_cap;
- cr = IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
- IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
- ce = IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
- IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
- ebfee = !!((pc->cap & cr) && (vc->cap & ce));
- ebf = !!((vc->cap & cr) && (pc->cap & ce));
+ ebfee = !!((pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
+ (vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+ ebf = !!((vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
+ (pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
}
/* must keep each tag independent */
@@ -2079,57 +2195,47 @@ static void
mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct mt76_phy *mphy = &dev->mphy;
- enum nl80211_band band;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra *ra;
struct tlv *tlv;
- u32 supp_rate, n_rates, cap = sta->wme ? STA_CAP_WMM : 0;
- u8 i, nss = sta->rx_nss, mcs = 0;
+ u32 supp_rate = sta->supp_rates[band];
+ u32 cap = sta->wme ? STA_CAP_WMM : 0;
tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
ra = (struct sta_rec_ra *)tlv;
- if (msta->wcid.ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
-
- band = mphy->chandef.chan->band;
- supp_rate = sta->supp_rates[band];
- n_rates = hweight32(supp_rate);
-
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt7915_get_phy_mode(mphy, vif, sta);
- ra->channel = mphy->chandef.chan->hw_value;
+ ra->phy_mode = mt7915_get_phy_mode(vif, sta);
+ ra->channel = chandef->chan->hw_value;
ra->bw = sta->bandwidth;
- ra->rate_len = n_rates;
ra->phy.bw = sta->bandwidth;
- if (n_rates) {
+ if (supp_rate) {
+ supp_rate &= mask->control[band].legacy;
+ ra->rate_len = hweight32(supp_rate);
+
if (band == NL80211_BAND_2GHZ) {
ra->supp_mode = MODE_CCK;
ra->supp_cck_rate = supp_rate & GENMASK(3, 0);
- ra->phy.type = MT_PHY_TYPE_CCK;
- if (n_rates > 4) {
+ if (ra->rate_len > 4) {
ra->supp_mode |= MODE_OFDM;
ra->supp_ofdm_rate = supp_rate >> 4;
- ra->phy.type = MT_PHY_TYPE_OFDM;
}
} else {
ra->supp_mode = MODE_OFDM;
ra->supp_ofdm_rate = supp_rate;
- ra->phy.type = MT_PHY_TYPE_OFDM;
}
}
if (sta->ht_cap.ht_supported) {
- for (i = 0; i < nss; i++)
- ra->ht_mcs[i] = sta->ht_cap.mcs.rx_mask[i];
+ const u8 *mcs_mask = mask->control[band].ht_mcs;
- ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
ra->supp_mode |= MODE_HT;
- mcs = hweight32(le32_to_cpu(ra->supp_ht_mcs)) - 1;
ra->af = sta->ht_cap.ampdu_factor;
ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
@@ -2144,13 +2250,16 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
cap |= STA_CAP_RX_STBC;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
cap |= STA_CAP_LDPC;
+
+ mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, mcs_mask);
+ ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
}
if (sta->vht_cap.vht_supported) {
- u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
- u16 vht_mcs;
- u8 af, mcs_prev;
+ const u16 *mcs_mask = mask->control[band].vht_mcs;
+ u8 af;
+ ra->supp_mode |= MODE_VHT;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
sta->vht_cap.cap);
ra->af = max_t(u8, ra->af, af);
@@ -2167,33 +2276,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
cap |= STA_CAP_VHT_LDPC;
- ra->supp_mode |= MODE_VHT;
- for (mcs = 0, i = 0; i < nss; i++, mcs_map >>= 2) {
- switch (mcs_map & 0x3) {
- case IEEE80211_VHT_MCS_SUPPORT_0_9:
- vht_mcs = GENMASK(9, 0);
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_8:
- vht_mcs = GENMASK(8, 0);
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_7:
- vht_mcs = GENMASK(7, 0);
- break;
- default:
- vht_mcs = 0;
- }
-
- ra->supp_vht_mcs[i] = cpu_to_le16(vht_mcs);
-
- mcs_prev = hweight16(vht_mcs) - 1;
- if (mcs_prev > mcs)
- mcs = mcs_prev;
-
- /* only support 2ss on 160MHz */
- if (i > 1 && (ra->bw == CMD_CBW_160MHZ ||
- ra->bw == CMD_CBW_8080MHZ))
- break;
- }
+ mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, mcs_mask);
}
if (sta->he_cap.has_he) {
@@ -2201,28 +2284,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
cap |= STA_CAP_HE;
}
- ra->sta_status = cpu_to_le32(cap);
-
- switch (BIT(fls(ra->supp_mode) - 1)) {
- case MODE_VHT:
- ra->phy.type = MT_PHY_TYPE_VHT;
- ra->phy.mcs = mcs;
- ra->phy.nss = nss;
- ra->phy.stbc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC);
- ra->phy.ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
- ra->phy.sgi =
- !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
- break;
- case MODE_HT:
- ra->phy.type = MT_PHY_TYPE_HT;
- ra->phy.mcs = mcs;
- ra->phy.ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
- ra->phy.stbc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC);
- ra->phy.sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
- break;
- default:
- break;
- }
+ ra->sta_cap = cpu_to_le32(cap);
}
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -2243,6 +2305,87 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
MCU_EXT_CMD(STA_REC_UPDATE), true);
}
+int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sk_buff *skb;
+ int len;
+
+ if (!sta->he_cap.has_he)
+ return 0;
+
+ len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_he);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_he_tlv(skb, sta);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
+static int
+mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+#define MT_STA_BSS_GROUP 1
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct {
+ __le32 action;
+ u8 wlan_idx_lo;
+ u8 status;
+ u8 wlan_idx_hi;
+ u8 rsv0[5];
+ __le32 val;
+ u8 rsv1[8];
+ } __packed req = {
+ .action = cpu_to_le32(MT_STA_BSS_GROUP),
+ .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
+ .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
+ .val = cpu_to_le32(mvif->idx % 16),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_DRR_CTRL), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sk_buff *skb;
+ int ret;
+
+ if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
+ return 0;
+
+ ret = mt7915_mcu_add_group(dev, vif, sta);
+ if (ret)
+ return ret;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* wait until TxBF and MU ready to update stare vht */
+
+ /* starec muru */
+ mt7915_mcu_sta_muru_tlv(skb, sta);
+ /* starec vht */
+ mt7915_mcu_sta_vht_tlv(skb, sta);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
@@ -2253,17 +2396,14 @@ int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
/* must keep the order */
ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
- if (ret)
+ if (ret || !enable)
return ret;
ret = mt7915_mcu_add_mu(dev, vif, sta);
if (ret)
return ret;
- if (enable)
- return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
-
- return 0;
+ return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
}
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -2432,7 +2572,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4);
buf = (u8 *)tlv + sizeof(*cont);
- mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL,
+ mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
true);
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
}
@@ -3307,7 +3447,8 @@ int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
{
struct mt7915_mcu_eeprom_info req = {
- .addr = cpu_to_le32(round_down(offset, 16)),
+ .addr = cpu_to_le32(round_down(offset,
+ MT7915_EEPROM_BLOCK_SIZE)),
};
struct mt7915_mcu_eeprom_info *res;
struct sk_buff *skb;
@@ -3321,7 +3462,7 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
res = (struct mt7915_mcu_eeprom_info *)skb->data;
buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
- memcpy(buf, res->data, 16);
+ memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE);
dev_kfree_skb(skb);
return 0;
@@ -3440,8 +3581,9 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
- u16 total = 2, idx, center_freq = chandef->center_freq1;
+ u16 total = 2, center_freq = chandef->center_freq1;
u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+ int idx;
if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
return 0;
@@ -3469,22 +3611,128 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
return 0;
}
-int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index)
+int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
+{
+ /* strict order */
+ static const enum mt7915_chan_mib_offs offs[] = {
+ MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME
+ };
+ struct mt76_channel_state *state = phy->mt76->chan_state;
+ struct mt76_channel_state *state_ts = &phy->state_ts;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_mcu_mib *res, req[4];
+ struct sk_buff *skb;
+ int i, ret;
+
+ for (i = 0; i < 4; i++) {
+ req[i].band = cpu_to_le32(phy != &dev->phy);
+ req[i].offs = cpu_to_le32(offs[i]);
+ }
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
+ req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct mt7915_mcu_mib *)(skb->data + 20);
+
+ if (chan_switch)
+ goto out;
+
+#define __res_u64(s) le64_to_cpu(res[s].data)
+ state->cc_busy += __res_u64(0) - state_ts->cc_busy;
+ state->cc_tx += __res_u64(1) - state_ts->cc_tx;
+ state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
+ state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
+
+out:
+ state_ts->cc_busy = __res_u64(0);
+ state_ts->cc_tx = __res_u64(1);
+ state_ts->cc_bss_rx = __res_u64(2);
+ state_ts->cc_rx = __res_u64(2) + __res_u64(3);
+#undef __res_u64
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
{
+ struct mt7915_dev *dev = phy->dev;
struct {
u8 ctrl_id;
u8 action;
- u8 band;
+ u8 dbdc_idx;
u8 rsv[5];
} req = {
.ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
- .action = index,
+ .dbdc_idx = phy != &dev->phy,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
sizeof(req), true);
}
+int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ struct mt7915_mcu_thermal_ctrl ctrl;
+
+ __le32 trigger_temp;
+ __le32 restore_temp;
+ __le16 sustain_time;
+ u8 rsv[2];
+ } __packed req = {
+ .ctrl = {
+ .band_idx = phy != &dev->phy,
+ },
+ };
+ int level;
+
+#define TRIGGER_TEMPERATURE 122
+#define RESTORE_TEMPERATURE 116
+#define SUSTAIN_PERIOD 10
+
+ if (!state) {
+ req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
+ goto out;
+ }
+
+ /* set duty cycle and level */
+ for (level = 0; level < 4; level++) {
+ int ret;
+
+ req.ctrl.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG;
+ req.ctrl.duty.duty_level = level;
+ req.ctrl.duty.duty_cycle = state;
+ state = state * 4 / 5;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req.ctrl), false);
+ if (ret)
+ return ret;
+ }
+
+ /* currently use fixed values for throttling, and would be better
+ * to implement thermal zone for dynamic trip in the long run.
+ */
+
+ /* set high-temperature trigger threshold */
+ req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE;
+ req.trigger_temp = cpu_to_le32(TRIGGER_TEMPERATURE);
+ req.restore_temp = cpu_to_le32(RESTORE_TEMPERATURE);
+ req.sustain_time = cpu_to_le16(SUSTAIN_PERIOD);
+
+out:
+ req.ctrl.type.protect_type = 1;
+ req.ctrl.type.trigger_type = 1;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req), false);
+}
+
int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
{
struct {
@@ -3505,7 +3753,6 @@ int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
{
-#define MT7915_SKU_RATE_NUM 161
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
@@ -3555,6 +3802,39 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
sizeof(req), true);
}
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
+{
+#define RATE_POWER_INFO 2
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 category;
+ u8 band;
+ u8 _rsv;
+ } __packed req = {
+ .format_id = 7,
+ .category = RATE_POWER_INFO,
+ .band = phy != &dev->phy,
+ };
+ s8 res[MT7915_SKU_RATE_NUM][2];
+ struct sk_buff *skb;
+ int ret, i;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ memcpy(res, skb->data + 4, sizeof(res));
+ for (i = 0; i < len; i++)
+ txpower[i] = res[i][req.band];
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
u8 en)
{
@@ -3613,57 +3893,50 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band)
&req, sizeof(req), false);
}
-int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev)
-{
-#define MT_BF_MODULE_UPDATE 25
- struct {
- u8 action;
- u8 bf_num;
- u8 bf_bitmap;
- u8 bf_sel[8];
- u8 rsv[8];
- } __packed req = {
- .action = MT_BF_MODULE_UPDATE,
- .bf_num = 2,
- .bf_bitmap = GENMASK(1, 0),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
- sizeof(req), true);
-}
-
-int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev)
+int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action)
{
-#define MT_BF_TYPE_UPDATE 20
struct {
u8 action;
- bool ebf;
- bool ibf;
- u8 rsv;
+ union {
+ struct {
+ u8 snd_mode;
+ u8 sta_num;
+ u8 rsv;
+ u8 wlan_idx[4];
+ __le32 snd_period; /* ms */
+ } __packed snd;
+ struct {
+ bool ebf;
+ bool ibf;
+ u8 rsv;
+ } __packed type;
+ struct {
+ u8 bf_num;
+ u8 bf_bitmap;
+ u8 bf_sel[8];
+ u8 rsv[5];
+ } __packed mod;
+ };
} __packed req = {
- .action = MT_BF_TYPE_UPDATE,
- .ebf = true,
- .ibf = dev->ibf,
+ .action = action,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
- sizeof(req), true);
-}
-
-int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev)
-{
-#define MT_BF_PROCESSING 4
- struct {
- u8 action;
- u8 snd_mode;
- u8 sta_num;
- u8 rsv;
- u8 wlan_idx[4];
- __le32 snd_period; /* ms */
- } __packed req = {
- .action = true,
- .snd_mode = MT_BF_PROCESSING,
- };
+#define MT_BF_PROCESSING 4
+ switch (action) {
+ case MT_BF_SOUNDING_ON:
+ req.snd.snd_mode = MT_BF_PROCESSING;
+ break;
+ case MT_BF_TYPE_UPDATE:
+ req.type.ebf = true;
+ req.type.ibf = dev->ibf;
+ break;
+ case MT_BF_MODULE_UPDATE:
+ req.mod.bf_num = 2;
+ req.mod.bf_bitmap = GENMASK(1, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
sizeof(req), true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 42582a66e42d..edd3ba3a0c2d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -68,6 +68,29 @@ struct mt7915_mcu_rxd {
u8 s2d_index;
};
+struct mt7915_mcu_thermal_ctrl {
+ u8 ctrl_id;
+ u8 band_idx;
+ union {
+ struct {
+ u8 protect_type; /* 1: duty admit, 2: radio off */
+ u8 trigger_type; /* 0: low, 1: high */
+ } __packed type;
+ struct {
+ u8 duty_level; /* level 0~3 */
+ u8 duty_cycle;
+ } __packed duty;
+ };
+} __packed;
+
+struct mt7915_mcu_thermal_notify {
+ struct mt7915_mcu_rxd rxd;
+
+ struct mt7915_mcu_thermal_ctrl ctrl;
+ __le32 temperature;
+ u8 rsv[8];
+} __packed;
+
struct mt7915_mcu_csa_notify {
struct mt7915_mcu_rxd rxd;
@@ -193,6 +216,19 @@ struct mt7915_mcu_phy_rx_info {
#define MT_RA_RATE_DCM_EN BIT(4)
#define MT_RA_RATE_BW GENMASK(14, 13)
+struct mt7915_mcu_mib {
+ __le32 band;
+ __le32 offs;
+ __le64 data;
+} __packed;
+
+enum mt7915_chan_mib_offs {
+ MIB_BUSY_TIME = 14,
+ MIB_TX_TIME = 81,
+ MIB_RX_TIME,
+ MIB_OBSS_AIRTIME = 86
+};
+
struct edca {
u8 queue;
u8 set;
@@ -262,6 +298,7 @@ enum {
MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
MCU_EXT_CMD_TXBF_ACTION = 0x1e,
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_THERMAL_PROT = 0x23,
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
MCU_EXT_CMD_EDCA_UPDATE = 0x27,
@@ -277,6 +314,7 @@ enum {
MCU_EXT_CMD_MUAR_UPDATE = 0x48,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_GET_MIB_INFO = 0x5a,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
MCU_EXT_CMD_SCS_CTRL = 0x82,
@@ -919,7 +957,7 @@ struct sta_rec_ra {
u8 op_vht_rx_nss;
u8 op_vht_rx_nss_type;
- __le32 sta_status;
+ __le32 sta_cap;
struct ra_phy phy;
} __packed;
@@ -1034,18 +1072,17 @@ enum {
STA_REC_MAX_NUM
};
-enum mt7915_cipher_type {
- MT_CIPHER_NONE,
- MT_CIPHER_WEP40,
- MT_CIPHER_WEP104,
- MT_CIPHER_WEP128,
- MT_CIPHER_TKIP,
- MT_CIPHER_AES_CCMP,
- MT_CIPHER_CCMP_256,
- MT_CIPHER_GCMP,
- MT_CIPHER_GCMP_256,
- MT_CIPHER_WAPI,
- MT_CIPHER_BIP_CMAC_128,
+enum mcu_cipher_type {
+ MCU_CIPHER_WEP40 = 1,
+ MCU_CIPHER_WEP104,
+ MCU_CIPHER_WEP128,
+ MCU_CIPHER_TKIP,
+ MCU_CIPHER_AES_CCMP,
+ MCU_CIPHER_CCMP_256,
+ MCU_CIPHER_GCMP,
+ MCU_CIPHER_GCMP_256,
+ MCU_CIPHER_WAPI,
+ MCU_CIPHER_BIP_CMAC_128,
};
enum {
@@ -1067,10 +1104,27 @@ enum {
};
enum {
+ THERMAL_PROTECT_PARAMETER_CTRL,
+ THERMAL_PROTECT_BASIC_INFO,
+ THERMAL_PROTECT_ENABLE,
+ THERMAL_PROTECT_DISABLE,
+ THERMAL_PROTECT_DUTY_CONFIG,
+ THERMAL_PROTECT_MECH_INFO,
+ THERMAL_PROTECT_DUTY_INFO,
+ THERMAL_PROTECT_STATE_ACT,
+};
+
+enum {
MT_EBF = BIT(0), /* explicit beamforming */
MT_IBF = BIT(1) /* implicit beamforming */
};
+enum {
+ MT_BF_SOUNDING_ON = 1,
+ MT_BF_TYPE_UPDATE = 20,
+ MT_BF_MODULE_UPDATE = 25
+};
+
#define MT7915_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
sizeof(struct wtbl_generic) + \
sizeof(struct wtbl_rx) + \
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 4ea8972d4e2f..3f613fae6218 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -9,7 +9,7 @@
#include "../mt76.h"
#include "regs.h"
-#define MT7915_MAX_INTERFACES 32
+#define MT7915_MAX_INTERFACES 19
#define MT7915_MAX_WMM_SETS 4
#define MT7915_WTBL_SIZE 288
#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
@@ -31,6 +31,7 @@
#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
#define MT7915_EEPROM_SIZE 3584
+#define MT7915_EEPROM_BLOCK_SIZE 16
#define MT7915_TOKEN_SIZE 8192
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
@@ -38,6 +39,10 @@
#define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */
#define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */
+#define MT7915_THERMAL_THROTTLE_MAX 100
+
+#define MT7915_SKU_RATE_NUM 161
+
struct mt7915_vif;
struct mt7915_sta;
struct mt7915_dfs_pulse;
@@ -100,6 +105,7 @@ struct mt7915_vif {
struct mt7915_phy *phy;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct cfg80211_bitrate_mask bitrate_mask;
};
struct mib_stats {
@@ -126,6 +132,9 @@ struct mt7915_phy {
struct ieee80211_vif *monitor_vif;
+ struct thermal_cooling_device *cdev;
+ u8 throttle_state;
+
u32 rxfilter;
u64 omac_mask;
@@ -141,6 +150,7 @@ struct mt7915_phy {
u32 ampdu_ref;
struct mib_stats mib;
+ struct mt76_channel_state state_ts;
struct list_head stats_list;
u8 sta_work_count;
@@ -169,6 +179,7 @@ struct mt7915_dev {
struct mt7915_hif *hif2;
const struct mt76_bus_ops *bus_ops;
+ struct tasklet_struct irq_tasklet;
struct mt7915_phy phy;
u16 chainmask;
@@ -322,6 +333,8 @@ int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int mt7915_set_channel(struct mt7915_phy *phy);
@@ -342,9 +355,8 @@ int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
-int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev);
-int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev);
-int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev);
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
+int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action);
int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
const struct mt7915_dfs_pulse *pulse);
@@ -352,7 +364,9 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
const struct mt7915_dfs_pattern *pattern);
int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev);
int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
-int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index);
+int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch);
+int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
+int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
@@ -374,9 +388,11 @@ void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
static inline void mt7915_irq_enable(struct mt7915_dev *dev, u32 mask)
{
if (dev->hif2)
- mt7915_dual_hif_set_irq_mask(dev, true, 0, mask);
+ mt7915_dual_hif_set_irq_mask(dev, false, 0, mask);
else
- mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+ mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
+
+ tasklet_schedule(&dev->irq_tasklet);
}
static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
@@ -392,12 +408,9 @@ void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key, bool beacon);
void mt7915_mac_set_timing(struct mt7915_phy *phy);
-int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb);
-void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb);
-void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -417,13 +430,11 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7915_stats_work(struct work_struct *work);
-void mt7915_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *txwi);
int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force);
int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy);
void mt7915_set_stream_he_caps(struct mt7915_phy *phy);
void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
-void mt7915_update_channel(struct mt76_dev *mdev);
+void mt7915_update_channel(struct mt76_phy *mphy);
int mt7915_init_debugfs(struct mt7915_dev *dev);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 643f171884cf..340b364da5f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -94,11 +94,15 @@ mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
}
/* TODO: support 2/4/6/8 MSI-X vectors */
-static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+static void mt7915_irq_tasklet(struct tasklet_struct *t)
{
- struct mt7915_dev *dev = dev_instance;
+ struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
u32 intr, intr1, mask;
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
@@ -111,9 +115,6 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
intr |= intr1;
}
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
- return IRQ_NONE;
-
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
mask = intr & MT_INT_RX_DONE_ALL;
@@ -150,6 +151,20 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
wake_up(&dev->reset_wait);
}
}
+}
+
+static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7915_dev *dev = dev_instance;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->irq_tasklet);
return IRQ_HANDLED;
}
@@ -240,6 +255,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
+ mt76_pci_disable_aspm(pdev);
+
if (id->device == 0x7916)
return mt7915_pci_hif2_probe(pdev);
@@ -250,10 +267,18 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
dev = container_of(mdev, struct mt7915_dev, mt76);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ goto free;
+
ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
if (ret)
goto error;
+ tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
@@ -266,10 +291,14 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
ret = mt7915_register_device(dev);
if (ret)
- goto error;
+ goto free_irq;
return 0;
+free_irq:
+ devm_free_irq(mdev->dev, pdev->irq, dev);
error:
+ pci_free_irq_vectors(pdev);
+free:
mt76_free_device(&dev->mt76);
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index efe0f2904c66..a213b5cb82f8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -82,11 +82,6 @@
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
-#define MT_TMAC_FP0R0(_band) MT_WF_TMAC(_band, 0x020)
-#define MT_TMAC_FP0R15(_band) MT_WF_TMAC(_band, 0x080)
-#define MT_TMAC_FP0R18(_band) MT_WF_TMAC(_band, 0x270)
-#define MT_TMAC_FP_MASK GENMASK(7, 0)
-
#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
@@ -104,6 +99,11 @@
#define MT_ETBF_TX_FB_CPL GENMASK(31, 16)
#define MT_ETBF_TX_FB_TRI GENMASK(15, 0)
+#define MT_ETBF_RX_FB_CONT(_band) MT_WF_ETBF(_band, 0x068)
+#define MT_ETBF_RX_FB_BW GENMASK(7, 6)
+#define MT_ETBF_RX_FB_NC GENMASK(5, 3)
+#define MT_ETBF_RX_FB_NR GENMASK(2, 0)
+
#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x0f0)
#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16)
#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0)
@@ -124,6 +124,8 @@
#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
#define MT_LPON_TCR_SW_WRITE BIT(0)
+#define MT_LPON_TCR_SW_ADJUST BIT(1)
+#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
/* MIB: band 0(0x24800), band 1(0xa4800) */
#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
@@ -132,20 +134,9 @@
#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
-#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
-#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
-
-#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
-#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
-
#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
-#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
-#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
-#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
-
#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
@@ -158,9 +149,6 @@
#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
-#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4))
-#define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0)
-
#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
@@ -258,14 +246,10 @@
#define MT_WF_RFCR1_DROP_CFEND BIT(7)
#define MT_WF_RFCR1_DROP_CFACK BIT(8)
-#define MT_WF_RMAC_MIB_TIME0(_band) MT_WF_RMAC(_band, 0x03c4)
+#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
-#define MT_WF_RMAC_MIB_AIRTIME14(_band) MT_WF_RMAC(_band, 0x03b8)
-#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
-#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
-
/* WFDMA0 */
#define MT_WFDMA0_BASE 0xd4000
#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index f9d81e36ef09..b220b334906b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -464,10 +464,17 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
static void
mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
{
- if (en)
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
+
+ if (en) {
+ struct mt7915_dev *dev = phy->dev;
+
mt7915_tm_update_channel(phy);
- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
+ /* read-clear */
+ mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy));
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
+ }
}
static int
@@ -690,7 +697,11 @@ static int
mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
{
struct mt7915_phy *phy = mphy->priv;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ enum mt76_rxq_id q;
void *rx, *rssi;
+ u16 fcs_err;
int i;
rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
@@ -735,6 +746,12 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
+ fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+ q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
+ mphy->test.rx_stats.packets[q] += fcs_err;
+ mphy->test.rx_stats.fcs_error[q] += fcs_err;
+
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
index 8f8533ef9859..397a6b5532bc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+/* SPDX-License-Identifier: ISC */
/* Copyright (C) 2020 MediaTek Inc. */
#ifndef __MT7915_TESTMODE_H
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
index e531666f9fb4..0ebb59966a08 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_MT7921E) += mt7921e.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
index 6ee423dd4027..77468bdae460 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -184,7 +184,10 @@ mt7921_txpwr(struct seq_file *s, void *data)
struct mt7921_txpwr txpwr;
int ret;
+ mt7921_mutex_acquire(dev);
ret = mt7921_get_txpwr_info(dev, &txpwr);
+ mt7921_mutex_release(dev);
+
if (ret)
return ret;
@@ -247,6 +250,9 @@ mt7921_pm_set(void *data, u64 val)
ieee80211_iterate_active_interfaces(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_pm_interface_iter, mphy->priv);
+
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+
mt7921_mutex_release(dev);
return 0;
@@ -265,6 +271,36 @@ mt7921_pm_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n");
static int
+mt7921_deep_sleep_set(void *data, u64 val)
+{
+ struct mt7921_dev *dev = data;
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool enable = !!val;
+
+ mt7921_mutex_acquire(dev);
+ if (pm->ds_enable != enable) {
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, enable);
+ pm->ds_enable = enable;
+ }
+ mt7921_mutex_release(dev);
+
+ return 0;
+}
+
+static int
+mt7921_deep_sleep_get(void *data, u64 *val)
+{
+ struct mt7921_dev *dev = data;
+
+ *val = dev->pm.ds_enable;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ds, mt7921_deep_sleep_get,
+ mt7921_deep_sleep_set, "%lld\n");
+
+static int
mt7921_pm_stats(struct seq_file *s, void *data)
{
struct mt7921_dev *dev = dev_get_drvdata(s->private);
@@ -355,6 +391,7 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset);
debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir,
mt7921_pm_stats);
+ debugfs_create_file("deep-sleep", 0600, dir, dev, &fops_ds);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index 71e664ee7652..7d7d43a5422f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -74,7 +74,7 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget)
mt7921_tx_cleanup(dev);
if (napi_complete(napi))
mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
return 0;
}
@@ -92,7 +92,7 @@ static int mt7921_poll_rx(struct napi_struct *napi, int budget)
return 0;
}
done = mt76_dma_rx_poll(napi, budget);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
return done;
}
@@ -313,9 +313,9 @@ static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
int mt7921_wfsys_reset(struct mt7921_dev *dev)
{
- mt76_set(dev, 0x70002600, BIT(0));
- msleep(200);
- mt76_clear(dev, 0x70002600, BIT(0));
+ mt76_clear(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
+ msleep(50);
+ mt76_set(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
@@ -380,9 +380,7 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
int mt7921_dma_init(struct mt7921_dev *dev)
{
- /* Increase buffer size to receive large VHT/HE MPDUs */
struct mt76_bus_ops *bus_ops;
- int rx_buf_size = MT_RX_BUF_SIZE * 2;
int ret;
dev->bus_ops = dev->mt76.bus;
@@ -402,6 +400,10 @@ int mt7921_dma_init(struct mt7921_dev *dev)
if (ret)
return ret;
+ ret = mt7921_wfsys_reset(dev);
+ if (ret)
+ return ret;
+
/* init tx queue */
ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
MT7921_TX_RING_SIZE);
@@ -426,7 +428,7 @@ int mt7921_dma_init(struct mt7921_dev *dev)
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT7921_RXQ_MCU_WM,
MT7921_RX_MCU_RING_SIZE,
- rx_buf_size, MT_RX_EVENT_RING_BASE);
+ MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
if (ret)
return ret;
@@ -434,14 +436,14 @@ int mt7921_dma_init(struct mt7921_dev *dev)
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT7921_RXQ_MCU_WM,
MT7921_RX_MCU_RING_SIZE,
- rx_buf_size, MT_WFDMA0(0x540));
+ MT_RX_BUF_SIZE, MT_WFDMA0(0x540));
if (ret)
return ret;
/* rx data */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE,
- rx_buf_size, MT_RX_DATA_RING_BASE);
+ MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index fe28bf4050c4..a9ce10b98827 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -7,34 +7,6 @@
#include "mcu.h"
#include "eeprom.h"
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
-}
-
-static struct ieee80211_rate mt7921_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(11, 60),
- OFDM_RATE(15, 90),
- OFDM_RATE(10, 120),
- OFDM_RATE(14, 180),
- OFDM_RATE(9, 240),
- OFDM_RATE(13, 360),
- OFDM_RATE(8, 480),
- OFDM_RATE(12, 540),
-};
-
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = MT7921_MAX_INTERFACES,
@@ -73,11 +45,13 @@ static void
mt7921_init_wiphy(struct ieee80211_hw *hw)
{
struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = phy->dev;
struct wiphy *wiphy = hw->wiphy;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = 64;
+ hw->max_tx_aggregation_subframes = 128;
+ hw->netdev_features = NETIF_F_RXCSUM;
hw->radiotap_timestamp.units_pos =
IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
@@ -88,11 +62,13 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
hw->vif_data_size = sizeof(struct mt7921_vif);
wiphy->iface_combinations = if_comb;
+ wiphy->flags &= ~WIPHY_FLAG_IBSS_RSN;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_scan_ssids = 4;
wiphy->max_sched_scan_plan_interval =
- MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
+ MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL;
wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH;
@@ -100,46 +76,33 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->reg_notifier = mt7921_regd_notifier;
- wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ if (dev->pm.enable)
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+
hw->max_tx_fragments = 4;
}
static void
mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
{
- u32 mask, set;
-
mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
mt76_set(dev, MT_TMAC_CTCR0(band),
MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
MT_TMAC_CTCR0_INS_DDLMT_EN);
- mask = MT_MDP_RCFR0_MCU_RX_MGMT |
- MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
- MT_MDP_RCFR0_MCU_RX_CTL_BAR;
- set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
- FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
- FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
- mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
-
- mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
- MT_MDP_RCFR1_RX_DROPPED_UCAST |
- MT_MDP_RCFR1_RX_DROPPED_MCAST;
- set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
- FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
- FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
- mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
-
mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
@@ -148,14 +111,15 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
-void mt7921_mac_init(struct mt7921_dev *dev)
+int mt7921_mac_init(struct mt7921_dev *dev)
{
int i;
mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536);
- /* disable hardware de-agg */
- mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
- mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN);
+ /* enable hardware de-agg */
+ mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
+ /* enable hardware rx header translation */
+ mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN);
for (i = 0; i < MT7921_WTBL_SIZE; i++)
mt7921_mac_wtbl_update(dev, i,
@@ -163,7 +127,7 @@ void mt7921_mac_init(struct mt7921_dev *dev)
for (i = 0; i < 2; i++)
mt7921_mac_init_band(dev, i);
- mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
+ return mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
}
static int mt7921_init_hardware(struct mt7921_dev *dev)
@@ -203,9 +167,7 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
- mt7921_mac_init(dev);
-
- return 0;
+ return mt7921_mac_init(dev);
}
int mt7921_register_device(struct mt7921_dev *dev)
@@ -224,7 +186,6 @@ int mt7921_register_device(struct mt7921_dev *dev)
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
spin_lock_init(&dev->pm.txq_lock);
- set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_LIST_HEAD(&dev->phy.stats_list);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work);
@@ -239,6 +200,8 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
+ dev->pm.enable = true;
+ dev->pm.ds_enable = true;
ret = mt7921_init_hardware(dev);
if (ret)
@@ -253,19 +216,33 @@ int mt7921_register_device(struct mt7921_dev *dev)
IEEE80211_HT_CAP_MAX_AMSDU;
dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
mt76_set_stream_caps(&dev->mphy, true);
mt7921_set_stream_he_caps(&dev->phy);
- ret = mt76_register_device(&dev->mt76, true, mt7921_rates,
- ARRAY_SIZE(mt7921_rates));
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret)
+ return ret;
+
+ ret = mt7921_init_debugfs(dev);
if (ret)
return ret;
- return mt7921_init_debugfs(dev);
+ ret = mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
+ if (ret)
+ return ret;
+
+ dev->hw_init_done = true;
+
+ return 0;
}
void mt7921_unregister_device(struct mt7921_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 214bd1859792..7fe2e3a50428 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -308,21 +308,24 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
{
+ u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ bool hdr_trans, unicast, insert_ccmp_hdr = false;
+ u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
+ __le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7921_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 *rxv = NULL;
- u32 mode = 0;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
- bool unicast, insert_ccmp_hdr = false;
- u8 remove_pad;
+ u32 rxd4 = le32_to_cpu(rxd[4]);
+ u16 seq_ctrl = 0;
+ __le16 fc = 0;
+ u32 mode = 0;
int i, idx;
- u8 chfreq;
memset(status, 0, sizeof(*status));
@@ -332,9 +335,13 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
return -EINVAL;
+ if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
+ return -EINVAL;
+
chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+ hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
@@ -357,6 +364,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
+ if ((rxd0 & csum_mask) == csum_mask)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -377,6 +387,13 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
rxd += 6;
if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+ u32 v0 = le32_to_cpu(rxd[0]);
+ u32 v2 = le32_to_cpu(rxd[2]);
+
+ fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
+ seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
+ qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
+
rxd += 4;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@@ -386,14 +403,27 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
u8 *data = (u8 *)rxd;
if (status->flag & RX_FLAG_DECRYPTED) {
- status->iv[0] = data[5];
- status->iv[1] = data[4];
- status->iv[2] = data[3];
- status->iv[3] = data[2];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
+ case MT_CIPHER_AES_CCMP:
+ case MT_CIPHER_CCMP_CCX:
+ case MT_CIPHER_CCMP_256:
+ insert_ccmp_hdr =
+ FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ fallthrough;
+ case MT_CIPHER_TKIP:
+ case MT_CIPHER_TKIP_NO_MIC:
+ case MT_CIPHER_GCMP:
+ case MT_CIPHER_GCMP_256:
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+ break;
+ default:
+ break;
+ }
}
rxd += 4;
if ((u8 *)rxd - skb->data >= skb->len)
@@ -444,16 +474,19 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
- status->signal = status->chain_signal[0];
-
- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
- if (!(status->chains & BIT(i)))
+ status->signal = -128;
+ for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
+ if (!(status->chains & BIT(i)) ||
+ status->chain_signal[i] >= 0)
continue;
status->signal = max(status->signal,
status->chain_signal[i]);
}
+ if (status->signal == -128)
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
stbc = FIELD_GET(MT_PRXV_STBC, v0);
gi = FIELD_GET(MT_PRXV_SGI, v0);
cck = false;
@@ -540,10 +573,35 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
- if (insert_ccmp_hdr) {
- u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+ amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
+ status->amsdu = !!amsdu_info;
+ if (status->amsdu) {
+ status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
+ status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
+ if (!hdr_trans) {
+ memmove(skb->data + 2, skb->data,
+ ieee80211_get_hdrlen_from_skb(skb));
+ skb_pull(skb, 2);
+ }
+ }
+
+ if (!hdr_trans) {
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
- mt76_insert_ccmp_hdr(skb, key_id);
+ hdr = mt76_skb_get_hdr(skb);
+ fc = hdr->frame_control;
+ if (ieee80211_is_data_qos(fc)) {
+ seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
+ qos_ctl = *ieee80211_get_qos_ctl(hdr);
+ }
+ } else {
+ status->flag &= ~(RX_FLAG_RADIOTAP_HE |
+ RX_FLAG_RADIOTAP_HE_MU);
+ status->flag |= RX_FLAG_8023;
}
mt7921_mac_assoc_rssi(dev, skb);
@@ -551,14 +609,12 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
- hdr = mt76_skb_get_hdr(skb);
- if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
- status->aggr = unicast &&
- !ieee80211_is_qos_nullfunc(hdr->frame_control);
- status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
- status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
+ status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
+ status->qos_ctl = qos_ctl;
return 0;
}
@@ -676,6 +732,23 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
txwi[7] |= cpu_to_le32(val);
}
+static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi)
+{
+ struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
+ u32 pid, frame_type = FIELD_GET(MT_TXD2_FRAME_TYPE, txwi[2]);
+
+ if (!(frame_type & (IEEE80211_FTYPE_DATA >> 2)))
+ return;
+
+ if (time_is_after_eq_jiffies(msta->next_txs_ts))
+ return;
+
+ msta->next_txs_ts = jiffies + msecs_to_jiffies(250);
+ pid = mt76_get_next_pkt_id(wcid);
+ txwi[5] |= cpu_to_le32(MT_TXD5_TX_STATUS_MCU |
+ FIELD_PREP(MT_TXD5_PID, pid));
+}
+
void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, bool beacon)
@@ -752,6 +825,8 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
+
+ mt7921_update_txs(wcid, txwi);
}
static void
@@ -1154,18 +1229,18 @@ mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
state->noise = -(phy->noise >> 4);
}
-void mt7921_update_channel(struct mt76_dev *mdev)
+void mt7921_update_channel(struct mt76_phy *mphy)
{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
- if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
+ if (mt76_connac_pm_wake(mphy, &dev->pm))
return;
- mt7921_phy_update_channel(&mdev->phy, 0);
+ mt7921_phy_update_channel(mphy, 0);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
- mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
}
void mt7921_tx_token_put(struct mt7921_dev *dev)
@@ -1196,7 +1271,8 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mvif->phy->dev;
- ieee80211_disconnect(vif, true);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_disconnect(vif, true);
mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
mt7921_mcu_set_tx(dev, vif);
@@ -1212,6 +1288,7 @@ mt7921_mac_reset(struct mt7921_dev *dev)
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+ set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -1227,56 +1304,64 @@ mt7921_mac_reset(struct mt7921_dev *dev)
mt7921_tx_token_put(dev);
idr_init(&dev->mt76.token);
- err = mt7921_wpdma_reset(dev, true);
- if (err)
- return err;
+ mt7921_wpdma_reset(dev, true);
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
napi_schedule(&dev->mt76.napi[i]);
}
- napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
- mt76_worker_enable(&dev->mt76.tx_worker);
-
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
- clear_bit(MT76_STATE_PM, &dev->mphy.state);
- mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
+ MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
err = mt7921_run_firmware(dev);
if (err)
- return err;
+ goto out;
err = mt7921_mcu_set_eeprom(dev);
if (err)
- return err;
+ goto out;
- mt7921_mac_init(dev);
- return __mt7921_start(&dev->phy);
+ err = mt7921_mac_init(dev);
+ if (err)
+ goto out;
+
+ err = __mt7921_start(&dev->phy);
+out:
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ return err;
}
/* system error recovery */
void mt7921_mac_reset_work(struct work_struct *work)
{
- struct ieee80211_hw *hw;
- struct mt7921_dev *dev;
+ struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+ reset_work);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_connac_pm *pm = &dev->pm;
int i;
- dev = container_of(work, struct mt7921_dev, reset_work);
- hw = mt76_hw(dev);
-
dev_err(dev->mt76.dev, "chip reset\n");
+ dev->hw_full_reset = true;
ieee80211_stop_queues(hw);
cancel_delayed_work_sync(&dev->mphy.mac_work);
- cancel_delayed_work_sync(&dev->pm.ps_work);
- cancel_work_sync(&dev->pm.wake_work);
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
mutex_lock(&dev->mt76.mutex);
for (i = 0; i < 10; i++) {
+ __mt7921_mcu_drv_pmctrl(dev);
+
if (!mt7921_mac_reset(dev))
break;
}
@@ -1293,16 +1378,24 @@ void mt7921_mac_reset_work(struct work_struct *work)
ieee80211_scan_completed(dev->mphy.hw, &info);
}
+ dev->hw_full_reset = false;
ieee80211_wake_queues(hw);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_vif_connect_iter, NULL);
+ mt76_connac_power_save_sched(&dev->mt76.phy, pm);
}
void mt7921_reset(struct mt76_dev *mdev)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ if (!dev->hw_init_done)
+ return;
+
+ if (dev->hw_full_reset)
+ return;
+
queue_work(dev->mt76.wq, &dev->reset_work);
}
@@ -1337,30 +1430,6 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
}
}
-static void
-mt7921_mac_sta_stats_work(struct mt7921_phy *phy)
-{
- struct mt7921_dev *dev = phy->dev;
- struct mt7921_sta *msta;
- LIST_HEAD(list);
-
- spin_lock_bh(&dev->sta_poll_lock);
- list_splice_init(&phy->stats_list, &list);
-
- while (!list_empty(&list)) {
- msta = list_first_entry(&list, struct mt7921_sta, stats_list);
- list_del_init(&msta->stats_list);
- spin_unlock_bh(&dev->sta_poll_lock);
-
- /* query wtbl info to report tx rate for further devices */
- mt7921_get_wtbl_info(dev, msta->wcid.idx);
-
- spin_lock_bh(&dev->sta_poll_lock);
- }
-
- spin_unlock_bh(&dev->sta_poll_lock);
-}
-
void mt7921_mac_work(struct work_struct *work)
{
struct mt7921_phy *phy;
@@ -1372,16 +1441,12 @@ void mt7921_mac_work(struct work_struct *work)
mt7921_mutex_acquire(phy->dev);
- mt76_update_survey(mphy->dev);
+ mt76_update_survey(mphy);
if (++mphy->mac_work_count == 2) {
mphy->mac_work_count = 0;
mt7921_mac_update_mib_stats(phy);
}
- if (++phy->sta_work_count == 4) {
- phy->sta_work_count = 0;
- mt7921_mac_sta_stats_work(phy);
- }
mt7921_mutex_release(phy->dev);
ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
@@ -1404,8 +1469,9 @@ void mt7921_pm_wake_work(struct work_struct *work)
napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt7921_tx_cleanup(dev);
- ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
- MT7921_WATCHDOG_TIME);
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7921_WATCHDOG_TIME);
}
ieee80211_wake_queues(mphy->hw);
@@ -1416,13 +1482,15 @@ void mt7921_pm_power_save_work(struct work_struct *work)
{
struct mt7921_dev *dev;
unsigned long delta;
+ struct mt76_phy *mphy;
dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
pm.ps_work.work);
+ mphy = dev->phy.mt76;
delta = dev->pm.idle_timeout;
- if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
+ if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
+ test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
@@ -1430,8 +1498,10 @@ void mt7921_pm_power_save_work(struct work_struct *work)
goto out;
}
- if (!mt7921_mcu_fw_pmctrl(dev))
+ if (!mt7921_mcu_fw_pmctrl(dev)) {
+ cancel_delayed_work_sync(&mphy->mac_work);
return;
+ }
out:
queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
}
@@ -1493,7 +1563,7 @@ void mt7921_coredump_work(struct work_struct *work)
break;
skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
- if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+ if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
dev_kfree_skb(skb);
continue;
}
@@ -1503,7 +1573,10 @@ void mt7921_coredump_work(struct work_struct *work)
dev_kfree_skb(skb);
}
- dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
- GFP_KERNEL);
+
+ if (dump)
+ dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+ GFP_KERNEL);
+
mt7921_reset(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
index 109c8849d106..3af67fac213d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
@@ -88,6 +88,9 @@ enum rx_pkt_type {
/* RXD DW4 */
#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
+#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
+#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
+#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
#define MT_RXD4_NORMAL_CLS BIT(10)
#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
@@ -97,6 +100,17 @@ enum rx_pkt_type {
#define MT_RXD3_NORMAL_PF_MODE BIT(29)
#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+/* RXD GROUP4 */
+#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
+#define MT_RXD6_TA_LO GENMASK(31, 16)
+
+#define MT_RXD7_TA_HI GENMASK(31, 0)
+
+#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
+#define MT_RXD8_QOS_CTL GENMASK(31, 16)
+
+#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
+
/* P-RXV DW0 */
#define MT_PRXV_TX_RATE GENMASK(6, 0)
#define MT_PRXV_TX_DCM BIT(4)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index f4c27aa41048..7fd21049ff5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -74,19 +74,19 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
else if (band == NL80211_BAND_5GHZ)
he_cap_elem->phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
he_cap_elem->phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
he_cap_elem->phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
switch (i) {
case NL80211_IFTYPE_STATION:
- he_cap_elem->mac_cap_info[0] |=
- IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap_elem->mac_cap_info[1] |=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
@@ -103,7 +103,15 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
he_cap_elem->phy_cap_info[3] |=
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[4] |=
+ IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ he_cap_elem->phy_cap_info[5] |=
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
@@ -224,54 +232,6 @@ static void mt7921_stop(struct ieee80211_hw *hw)
mt7921_mutex_release(dev);
}
-static inline int get_free_idx(u32 mask, u8 start, u8 end)
-{
- return ffs(~mask & GENMASK(end, start));
-}
-
-static int get_omac_idx(enum nl80211_iftype type, u64 mask)
-{
- int i;
-
- switch (type) {
- case NL80211_IFTYPE_STATION:
- /* prefer hw bssid slot 1-3 */
- i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
- if (i)
- return i - 1;
-
- /* next, try to find a free repeater entry for the sta */
- i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
- REPEATER_BSSID_MAX - REPEATER_BSSID_START);
- if (i)
- return i + 32 - 1;
-
- i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
- if (i)
- return i - 1;
-
- if (~mask & BIT(HW_BSSID_0))
- return HW_BSSID_0;
-
- break;
- case NL80211_IFTYPE_MONITOR:
- /* ap uses hw bssid 0 and ext bssid */
- if (~mask & BIT(HW_BSSID_0))
- return HW_BSSID_0;
-
- i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
- if (i)
- return i - 1;
-
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- return -1;
-}
-
static int mt7921_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -293,12 +253,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
goto out;
}
- idx = get_omac_idx(vif->type, phy->omac_mask);
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
- }
- mvif->mt76.omac_idx = idx;
+ mvif->mt76.omac_idx = mvif->mt76.idx;
mvif->phy = phy;
mvif->mt76.band_idx = 0;
mvif->mt76.wmm_idx = mvif->mt76.idx % MT7921_MAX_WMM_SETS;
@@ -370,7 +325,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&dev->sta_poll_lock);
}
-int mt7921_set_channel(struct mt7921_phy *phy)
+static int mt7921_set_channel(struct mt7921_phy *phy)
{
struct mt7921_dev *dev = phy->dev;
int ret;
@@ -430,6 +385,10 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
wcid_keyidx = &wcid->hw_key_idx2;
break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (!mvif->wep_sta)
+ return -EOPNOTSUPP;
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -437,8 +396,6 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
default:
return -EOPNOTSUPP;
}
@@ -456,6 +413,12 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
cmd == SET_KEY ? key : NULL);
err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+ if (err)
+ goto out;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ err = mt7921_mcu_add_key(dev, vif, mvif->wep_sta, key, cmd);
out:
mt7921_mutex_release(dev);
@@ -478,6 +441,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
mt7921_mutex_acquire(dev);
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
+ mt76_connac_mcu_set_rate_txpower(phy->mt76);
+
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
@@ -623,7 +589,8 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
mt7921_mcu_uni_bss_ps(dev, vif);
if (changed & BSS_CHANGED_ASSOC) {
- mt7921_mcu_sta_add(dev, NULL, vif, true);
+ mt7921_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_ASSOC);
mt7921_bss_bcnft_apply(dev, vif, info->assoc);
}
@@ -662,14 +629,14 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (ret)
return ret;
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
- true);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ mvif->wep_sta = msta;
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- ret = mt7921_mcu_sta_add(dev, sta, vif, true);
+ ret = mt7921_mcu_sta_update(dev, sta, vif, true,
+ MT76_STA_INFO_STATE_NONE);
if (ret)
return ret;
@@ -678,6 +645,27 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return 0;
}
+void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+
+ mt7921_mutex_acquire(dev);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
+ true);
+
+ mt7921_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
+
+ mt7921_mutex_release(dev);
+}
+
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
@@ -687,13 +675,14 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
- mt7921_mcu_sta_add(dev, sta, vif, false);
+ mt7921_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE);
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
if (vif->type == NL80211_IFTYPE_STATION) {
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ mvif->wep_sta = NULL;
ewma_rssi_init(&mvif->rssi);
if (!sta->tdls)
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
@@ -721,7 +710,7 @@ void mt7921_tx_worker(struct mt76_worker *w)
}
mt76_txq_schedule_all(&dev->mphy);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
static void mt7921_tx(struct ieee80211_hw *hw,
@@ -751,7 +740,7 @@ static void mt7921_tx(struct ieee80211_hw *hw,
if (mt76_connac_pm_ref(mphy, &dev->pm)) {
mt76_tx(mphy, control->sta, wcid, skb);
- mt76_connac_pm_unref(&dev->pm);
+ mt76_connac_pm_unref(mphy, &dev->pm);
return;
}
@@ -832,20 +821,21 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return ret;
}
-static int
-mt7921_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int mt7921_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
- IEEE80211_STA_NONE);
-}
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
-static int
-mt7921_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
- IEEE80211_STA_NOTEXIST);
+ if (dev->pm.ds_enable) {
+ mt7921_mutex_acquire(dev);
+ mt76_connac_sta_state_dp(&dev->mt76, old_state, new_state);
+ mt7921_mutex_release(dev);
+ }
+
+ return mt76_sta_state(hw, vif, sta, old_state, new_state);
}
static int
@@ -1164,6 +1154,23 @@ static void mt7921_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
HZ / 2);
}
+static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool enabled)
+{
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+
+ mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
+ MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@@ -1174,10 +1181,10 @@ const struct ieee80211_ops mt7921_ops = {
.conf_tx = mt7921_conf_tx,
.configure_filter = mt7921_configure_filter,
.bss_info_changed = mt7921_bss_info_changed,
- .sta_add = mt7921_sta_add,
- .sta_remove = mt7921_sta_remove,
+ .sta_state = mt7921_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7921_set_key,
+ .sta_set_decap_offload = mt7921_sta_set_decap_offload,
.ampdu_action = mt7921_ampdu_action,
.set_rts_threshold = mt7921_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 5f3d56d570a5..c2c4dc196802 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -88,28 +88,28 @@ struct mt7921_fw_region {
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
-static enum mt7921_cipher_type
+static enum mcu_cipher_type
mt7921_mcu_get_cipher(int cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
+ return MCU_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
+ return MCU_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
+ return MCU_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_AES_CMAC:
- return MT_CIPHER_BIP_CMAC_128;
+ return MCU_CIPHER_BIP_CMAC_128;
case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
+ return MCU_CIPHER_AES_CCMP;
case WLAN_CIPHER_SUITE_CCMP_256:
- return MT_CIPHER_CCMP_256;
+ return MCU_CIPHER_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
- return MT_CIPHER_GCMP;
+ return MCU_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
- return MT_CIPHER_GCMP_256;
+ return MCU_CIPHER_GCMP_256;
case WLAN_CIPHER_SUITE_SMS4:
- return MT_CIPHER_WAPI;
+ return MCU_CIPHER_WAPI;
default:
return MT_CIPHER_NONE;
}
@@ -399,40 +399,6 @@ mt7921_mcu_tx_rate_parse(struct mt76_phy *mphy,
}
static void
-mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
- u16 wlan_idx)
-{
- struct mt7921_mcu_wlan_info_event *wtbl_info =
- (struct mt7921_mcu_wlan_info_event *)(skb->data);
- struct rate_info rate = {};
- u8 curr_idx = wtbl_info->rate_info.rate_idx;
- u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]);
- struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap;
- struct mt76_phy *mphy = &dev->mphy;
- struct mt7921_sta_stats *stats;
- struct mt7921_sta *msta;
- struct mt76_wcid *wcid;
-
- if (wlan_idx >= MT76_N_WCIDS)
- return;
-
- rcu_read_lock();
-
- wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
- if (!wcid)
- goto out;
-
- msta = container_of(wcid, struct mt7921_sta, wcid);
- stats = &msta->stats;
-
- /* current rate */
- mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
- stats->tx_rate = rate;
-out:
- rcu_read_unlock();
-}
-
-static void
mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -447,22 +413,33 @@ mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
static void
-mt7921_mcu_beacon_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
+mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_connac_beacon_loss_event *event = priv;
+
+ if (mvif->idx != event->bss_idx)
+ return;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
+static void
+mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt76_connac_beacon_loss_event *event;
- struct mt76_phy *mphy;
- u8 band_idx = 0; /* DBDC support */
+ struct mt76_phy *mphy = &dev->mt76.phy;
skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
event = (struct mt76_connac_beacon_loss_event *)skb->data;
- if (band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
- else
- mphy = &dev->mt76.phy;
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt76_connac_mcu_beacon_loss_iter, event);
+ mt7921_mcu_connection_loss_iter, event);
}
static void
@@ -521,13 +498,56 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
static void
+mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_mcu_tx_done_event *event;
+ struct mt7921_sta *msta;
+ struct mt7921_phy *mphy = &dev->phy;
+ struct mt7921_mcu_peer_cap peer;
+ struct ieee80211_sta *sta;
+ LIST_HEAD(list);
+
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ event = (struct mt7921_mcu_tx_done_event *)skb->data;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&mphy->stats_list, &list);
+
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7921_sta, stats_list);
+ list_del_init(&msta->stats_list);
+
+ if (msta->wcid.idx != event->wlan_idx)
+ continue;
+
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ sta = wcid_to_sta(&msta->wcid);
+
+ /* peer config based on IEEE SPEC */
+ memset(&peer, 0x0, sizeof(peer));
+ peer.bw = event->bw;
+ peer.g2 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ peer.g4 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ peer.g8 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ peer.g16 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+ mt7921_mcu_tx_rate_parse(mphy->mt76, &peer,
+ &msta->stats.tx_rate, event->tx_rate);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ break;
+ }
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void
mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
switch (rxd->eid) {
case MCU_EVENT_BSS_BEACON_LOSS:
- mt7921_mcu_beacon_loss_event(dev, skb);
+ mt7921_mcu_connection_loss_event(dev, skb);
break;
case MCU_EVENT_SCHED_SCAN_DONE:
case MCU_EVENT_SCAN_DONE:
@@ -546,6 +566,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
case MCU_EVENT_LP_INFO:
mt7921_mcu_low_power_event(dev, skb);
break;
+ case MCU_EVENT_TX_DONE:
+ mt7921_mcu_tx_done_event(dev, skb);
+ break;
default:
break;
}
@@ -566,6 +589,7 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
rxd->eid == MCU_EVENT_BSS_ABSENCE ||
rxd->eid == MCU_EVENT_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_TX_DONE ||
rxd->eid == MCU_EVENT_DBG_MSG ||
rxd->eid == MCU_EVENT_COREDUMP ||
rxd->eid == MCU_EVENT_LP_INFO ||
@@ -601,14 +625,14 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
sec_key = &sec->key[0];
sec_key->cipher_len = sizeof(*sec_key);
- if (cipher == MT_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+ if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
sec_key->key_id = bip->keyidx;
sec_key->key_len = 16;
memcpy(sec_key->key, bip->key, 16);
sec_key = &sec->key[1];
- sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
sec_key->cipher_len = sizeof(*sec_key);
sec_key->key_len = 16;
memcpy(sec_key->key, key->key, 16);
@@ -620,14 +644,14 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
sec_key->key_len = key->keylen;
memcpy(sec_key->key, key->key, key->keylen);
- if (cipher == MT_CIPHER_TKIP) {
+ if (cipher == MCU_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
memcpy(sec_key->key + 16, key->key + 24, 8);
memcpy(sec_key->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
- if (cipher == MT_CIPHER_AES_CCMP) {
+ if (cipher == MCU_CIPHER_AES_CCMP) {
memcpy(bip->key, key->key, key->keylen);
bip->keyidx = key->keyidx;
}
@@ -931,8 +955,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
#endif /* CONFIG_PM */
- clear_bit(MT76_STATE_PM, &dev->mphy.state);
-
dev_err(dev->mt76.dev, "Firmware init done\n");
return 0;
@@ -966,7 +988,7 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
mt7921_mcu_fw_log_2_host(dev, 1);
- return 0;
+ return mt76_connac_mcu_get_nic_capability(&dev->mphy);
}
int mt7921_mcu_init(struct mt7921_dev *dev)
@@ -1133,26 +1155,6 @@ int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
return 0;
}
-u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx)
-{
- struct mt7921_mcu_wlan_info wtbl_info = {
- .wlan_idx = cpu_to_le32(wlan_idx),
- };
- struct sk_buff *skb;
- int ret;
-
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_WTBL,
- &wtbl_info, sizeof(wtbl_info), true,
- &skb);
- if (ret)
- return ret;
-
- mt7921_mcu_tx_rate_report(dev, skb, wlan_idx);
- dev_kfree_skb(skb);
-
- return 0;
-}
-
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
@@ -1265,8 +1267,9 @@ int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
sizeof(req), false);
}
-int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif, bool enable)
+int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, bool enable,
+ enum mt76_sta_info_state state)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
int rssi = -ewma_rssi_read(&mvif->rssi);
@@ -1275,27 +1278,25 @@ int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
.vif = vif,
.enable = enable,
.cmd = MCU_UNI_CMD_STA_REC_UPDATE,
+ .state = state,
+ .offload_fw = true,
.rcpi = to_rcpi(rssi),
};
struct mt7921_sta *msta;
msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL;
info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
+ info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
- return mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info);
+ return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
}
-int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
+int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
- mutex_lock(&pm->mutex);
-
- if (!test_bit(MT76_STATE_PM, &mphy->state))
- goto out;
-
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
@@ -1316,6 +1317,22 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
out:
+ return err;
+}
+
+int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err = 0;
+
+ mutex_lock(&pm->mutex);
+
+ if (!test_bit(MT76_STATE_PM, &mphy->state))
+ goto out;
+
+ err = __mt7921_mcu_drv_pmctrl(dev);
+out:
mutex_unlock(&pm->mutex);
if (err)
@@ -1365,6 +1382,7 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7921_phy *phy = priv;
struct mt7921_dev *dev = phy->dev;
+ struct ieee80211_hw *hw = mt76_hw(dev);
int ret;
if (dev->pm.enable)
@@ -1377,9 +1395,11 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (dev->pm.enable) {
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
} else {
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index 49823d0a3d0a..d76cf8f8dfdf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -81,6 +81,7 @@ enum {
MCU_EVENT_REG_ACCESS = 0x05,
MCU_EVENT_LP_INFO = 0x07,
MCU_EVENT_SCAN_DONE = 0x0d,
+ MCU_EVENT_TX_DONE = 0x0f,
MCU_EVENT_BSS_ABSENCE = 0x11,
MCU_EVENT_BSS_BEACON_LOSS = 0x13,
MCU_EVENT_CH_PRIVILEGE = 0x18,
@@ -197,18 +198,17 @@ struct sta_rec_sec {
struct sec_key key[2];
} __packed;
-enum mt7921_cipher_type {
- MT_CIPHER_NONE,
- MT_CIPHER_WEP40,
- MT_CIPHER_WEP104,
- MT_CIPHER_WEP128,
- MT_CIPHER_TKIP,
- MT_CIPHER_AES_CCMP,
- MT_CIPHER_CCMP_256,
- MT_CIPHER_GCMP,
- MT_CIPHER_GCMP_256,
- MT_CIPHER_WAPI,
- MT_CIPHER_BIP_CMAC_128,
+enum mcu_cipher_type {
+ MCU_CIPHER_WEP40 = 1,
+ MCU_CIPHER_WEP104,
+ MCU_CIPHER_WEP128,
+ MCU_CIPHER_TKIP,
+ MCU_CIPHER_AES_CCMP,
+ MCU_CIPHER_CCMP_256,
+ MCU_CIPHER_GCMP,
+ MCU_CIPHER_GCMP_256,
+ MCU_CIPHER_WAPI,
+ MCU_CIPHER_BIP_CMAC_128,
};
enum {
@@ -254,86 +254,6 @@ struct mt7921_mcu_reg_event {
__le32 val;
} __packed;
-struct mt7921_mcu_tx_config {
- u8 peer_addr[ETH_ALEN];
- u8 sw;
- u8 dis_rx_hdr_tran;
-
- u8 aad_om;
- u8 pfmu_idx;
- __le16 partial_aid;
-
- u8 ibf;
- u8 ebf;
- u8 is_ht;
- u8 is_vht;
-
- u8 mesh;
- u8 baf_en;
- u8 cf_ack;
- u8 rdg_ba;
-
- u8 rdg;
- u8 pm;
- u8 rts;
- u8 smps;
-
- u8 txop_ps;
- u8 not_update_ipsm;
- u8 skip_tx;
- u8 ldpc;
-
- u8 qos;
- u8 from_ds;
- u8 to_ds;
- u8 dyn_bw;
-
- u8 amdsu_cross_lg;
- u8 check_per;
- u8 gid_63;
- u8 he;
-
- u8 vht_ibf;
- u8 vht_ebf;
- u8 vht_ldpc;
- u8 he_ldpc;
-} __packed;
-
-struct mt7921_mcu_sec_config {
- u8 wpi_flag;
- u8 rv;
- u8 ikv;
- u8 rkv;
-
- u8 rcid;
- u8 rca1;
- u8 rca2;
- u8 even_pn;
-
- u8 key_id;
- u8 muar_idx;
- u8 cipher_suit;
- u8 rsv[1];
-} __packed;
-
-struct mt7921_mcu_key_config {
- u8 key[32];
-} __packed;
-
-struct mt7921_mcu_rate_info {
- u8 mpdu_fail;
- u8 mpdu_tx;
- u8 rate_idx;
- u8 rsv[1];
- __le16 rate[8];
-} __packed;
-
-struct mt7921_mcu_ba_config {
- u8 ba_en;
- u8 rsv[3];
- __le32 ba_winsize;
-} __packed;
-
struct mt7921_mcu_ant_id_config {
u8 ant_id[4];
} __packed;
@@ -357,41 +277,6 @@ struct mt7921_mcu_peer_cap {
u8 rsv[1];
} __packed;
-struct mt7921_mcu_rx_cnt {
- u8 rx_rcpi[4];
- u8 rx_cc[4];
- u8 rx_cc_sel;
- u8 ce_rmsd;
- u8 rsv[2];
-} __packed;
-
-struct mt7921_mcu_tx_cnt {
- __le16 rate1_cnt;
- __le16 rate1_fail_cnt;
- __le16 rate2_cnt;
- __le16 rate3_cnt;
- __le16 cur_bw_tx_cnt;
- __le16 cur_bw_tx_fail_cnt;
- __le16 other_bw_tx_cnt;
- __le16 other_bw_tx_fail_cnt;
-} __packed;
-
-struct mt7921_mcu_wlan_info_event {
- struct mt7921_mcu_tx_config tx_config;
- struct mt7921_mcu_sec_config sec_config;
- struct mt7921_mcu_key_config key_config;
- struct mt7921_mcu_rate_info rate_info;
- struct mt7921_mcu_ba_config ba_config;
- struct mt7921_mcu_peer_cap peer_cap;
- struct mt7921_mcu_rx_cnt rx_cnt;
- struct mt7921_mcu_tx_cnt tx_cnt;
-} __packed;
-
-struct mt7921_mcu_wlan_info {
- __le32 wlan_idx;
- struct mt7921_mcu_wlan_info_event event;
-} __packed;
-
struct mt7921_txpwr_req {
u8 ver;
u8 action;
@@ -407,4 +292,31 @@ struct mt7921_txpwr_event {
struct mt7921_txpwr txpwr;
} __packed;
+struct mt7921_mcu_tx_done_event {
+ u8 pid;
+ u8 status;
+ u16 seq;
+
+ u8 wlan_idx;
+ u8 tx_cnt;
+ u16 tx_rate;
+
+ u8 flag;
+ u8 tid;
+ u8 rsp_rate;
+ u8 mcs;
+
+ u8 bw;
+ u8 tx_pwr;
+ u8 reason;
+ u8 rsv0[1];
+
+ u32 delay;
+ u32 timestamp;
+ u32 applied_flag;
+
+ u8 txs[28];
+
+ u8 rsv1[32];
+} __packed;
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 59862ea4951c..2d8bd6bfc820 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -92,6 +92,8 @@ struct mt7921_sta {
unsigned long ampdu_state;
struct mt7921_sta_key_conf bip;
+
+ unsigned long next_txs_ts;
};
DECLARE_EWMA(rssi, 10, 8);
@@ -100,6 +102,8 @@ struct mt7921_vif {
struct mt76_vif mt76; /* must be first */
struct mt7921_sta sta;
+ struct mt7921_sta *wep_sta;
+
struct mt7921_phy *phy;
struct ewma_rssi rssi;
@@ -156,6 +160,8 @@ struct mt7921_dev {
u16 chainmask;
struct work_struct reset_work;
+ bool hw_full_reset:1;
+ bool hw_init_done:1;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
@@ -256,9 +262,9 @@ int mt7921_mcu_init(struct mt7921_dev *dev);
int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
struct mt7921_sta *msta, struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
-int mt7921_set_channel(struct mt7921_phy *phy);
-int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif, bool enable);
+int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, bool enable,
+ enum mt76_sta_info_state state);
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
@@ -318,7 +324,7 @@ static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev)
return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}
-void mt7921_mac_init(struct mt7921_dev *dev);
+int mt7921_mac_init(struct mt7921_dev *dev);
bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
void mt7921_mac_reset_counters(struct mt7921_phy *phy);
void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
@@ -330,6 +336,8 @@ void mt7921_mac_fill_rx_vector(struct mt7921_dev *dev, struct sk_buff *skb);
void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb);
int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7921_mac_work(struct work_struct *work);
@@ -352,7 +360,7 @@ void mt7921_stats_work(struct work_struct *work);
void mt7921_txp_skb_unmap(struct mt76_dev *dev,
struct mt76_txwi_cache *txwi);
void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
-void mt7921_update_channel(struct mt76_dev *mdev);
+void mt7921_update_channel(struct mt76_phy *mphy);
int mt7921_init_debugfs(struct mt7921_dev *dev);
int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
@@ -362,12 +370,12 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable);
void mt7921_scan_work(struct work_struct *work);
-u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx);
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
+int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_pm_wake_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index fa02d934f0bf..c3905bcab360 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -106,6 +106,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
+ .sta_assoc = mt7921_mac_sta_assoc,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt7921_update_channel,
};
@@ -188,22 +189,29 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt76_connac_pm *pm = &dev->pm;
bool hif_suspend;
int i, err;
- err = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ pm->suspended = true;
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
+
+ err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
- return err;
+ goto restore_suspend;
hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
if (hif_suspend) {
err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
- return err;
+ goto restore_suspend;
}
- if (!dev->pm.enable)
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
+ /* always enable deep sleep during suspend to reduce
+ * power consumption
+ */
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
napi_disable(&mdev->tx_napi);
mt76_worker_disable(&mdev->tx_worker);
@@ -231,27 +239,30 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
err = mt7921_mcu_fw_pmctrl(dev);
if (err)
- goto restore;
+ goto restore_napi;
pci_save_state(pdev);
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (err)
- goto restore;
+ goto restore_napi;
return 0;
-restore:
+restore_napi:
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
- if (!dev->pm.enable)
+ if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
if (hif_suspend)
mt76_connac_mcu_set_hif_suspend(mdev, false);
+restore_suspend:
+ pm->suspended = false;
+
return err;
}
@@ -259,8 +270,10 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt76_connac_pm *pm = &dev->pm;
int i, err;
+ pm->suspended = false;
err = pci_set_power_state(pdev, PCI_D0);
if (err)
return err;
@@ -291,7 +304,8 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
- if (!dev->pm.enable)
+ /* restore previous ds setting */
+ if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index a18d2896ee1f..783a15635ec5 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -184,9 +184,6 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
if (!q->queued)
wake_up(&dev->tx_wait);
- if (!mcu)
- mt76_txq_schedule(&dev->phy, q->qid);
-
return nframes;
}
@@ -195,19 +192,28 @@ static void mt76s_status_worker(struct mt76_worker *w)
struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
status_worker);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ bool resched = false;
int i, nframes;
do {
+ int ndata_frames = 0;
+
nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
for (i = 0; i <= MT_TXQ_PSD; i++)
- nframes += mt76s_process_tx_queue(dev,
- dev->phy.q_tx[i]);
+ ndata_frames += mt76s_process_tx_queue(dev,
+ dev->phy.q_tx[i]);
+ nframes += ndata_frames;
+ if (ndata_frames > 0)
+ resched = true;
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->sdio.stat_work);
} while (nframes > 0);
+
+ if (resched)
+ mt76_worker_schedule(&dev->sdio.txrx_worker);
}
static void mt76s_tx_status_data(struct work_struct *work)
@@ -256,6 +262,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
q->entry[q->head].skb = tx_info.skb;
q->entry[q->head].buf_sz = len;
+ q->entry[q->head].wcid = 0xffff;
smp_wmb();
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 001d0ba5f73e..f73ffbd6e622 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -88,17 +88,8 @@ static void
mt76_testmode_free_skb(struct mt76_phy *phy)
{
struct mt76_testmode_data *td = &phy->test;
- struct sk_buff *skb = td->tx_skb;
-
- if (!skb)
- return;
- if (skb_has_frag_list(skb)) {
- kfree_skb_list(skb_shinfo(skb)->frag_list);
- skb_shinfo(skb)->frag_list = NULL;
- }
-
- dev_kfree_skb(skb);
+ dev_kfree_skb(td->tx_skb);
td->tx_skb = NULL;
}
@@ -158,19 +149,18 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
frag_len = MT_TXP_MAX_LEN;
frag = alloc_skb(frag_len, GFP_KERNEL);
- if (!frag)
+ if (!frag) {
+ mt76_testmode_free_skb(phy);
+ dev_kfree_skb(head);
return -ENOMEM;
+ }
__skb_put_zero(frag, frag_len);
head->len += frag->len;
head->data_len += frag->len;
- if (*frag_tail) {
- (*frag_tail)->next = frag;
- frag_tail = &frag;
- } else {
- *frag_tail = frag;
- }
+ *frag_tail = frag;
+ frag_tail = &(*frag_tail)->next;
}
mt76_testmode_free_skb(phy);
@@ -531,6 +521,14 @@ mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg)
u64 rx_fcs_error = 0;
int i;
+ if (dev->test_ops->dump_stats) {
+ int ret;
+
+ ret = dev->test_ops->dump_stats(phy, msg);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) {
rx_packets += td->rx_stats.packets[i];
rx_fcs_error += td->rx_stats.fcs_error[i];
@@ -545,9 +543,6 @@ mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg)
MT76_TM_STATS_ATTR_PAD))
return -EMSGSIZE;
- if (dev->test_ops->dump_stats)
- return dev->test_ops->dump_stats(phy, msg);
-
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 53ea8de82df0..f0f7a913eaab 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -54,11 +54,23 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
spin_unlock_bh(&dev->status_list.lock);
+ rcu_read_lock();
while ((skb = __skb_dequeue(list)) != NULL) {
+ struct ieee80211_tx_status status = {
+ .skb = skb,
+ .info = IEEE80211_SKB_CB(skb),
+ };
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+ struct mt76_wcid *wcid;
+
+ wcid = rcu_dereference(dev->wcid[cb->wcid]);
+ if (wcid)
+ status.sta = wcid_to_sta(wcid);
+
hw = mt76_tx_status_get_hw(dev, skb);
- ieee80211_tx_status(hw, skb);
+ ieee80211_tx_status_ext(hw, &status);
}
-
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
@@ -80,7 +92,7 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
if (flags & MT_TX_CB_TXS_FAILED) {
- ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].count = 0;
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
}
@@ -117,12 +129,7 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
spin_lock_bh(&dev->status_list.lock);
memset(cb, 0, sizeof(*cb));
- wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
- if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
- wcid->packet_id == MT_PACKET_ID_NO_SKB)
- wcid->packet_id = MT_PACKET_ID_FIRST;
-
- pid = wcid->packet_id;
+ pid = mt76_get_next_pkt_id(wcid);
cb->wcid = wcid->idx;
cb->pktid = pid;
cb->jiffies = jiffies;
@@ -173,36 +180,37 @@ mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
EXPORT_SYMBOL_GPL(mt76_tx_status_check);
static void
-mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76_wcid *wcid;
int pending;
- if (info->tx_time_est)
- return;
-
- if (wcid_idx >= ARRAY_SIZE(dev->wcid))
+ if (!wcid || info->tx_time_est)
return;
- rcu_read_lock();
-
- wcid = rcu_dereference(dev->wcid[wcid_idx]);
- if (wcid) {
- pending = atomic_dec_return(&wcid->non_aql_packets);
- if (pending < 0)
- atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
- }
-
- rcu_read_unlock();
+ pending = atomic_dec_return(&wcid->non_aql_packets);
+ if (pending < 0)
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
}
-void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
+ struct list_head *free_list)
{
+ struct ieee80211_tx_status status = {
+ .skb = skb,
+ .free_list = free_list,
+ };
+ struct mt76_wcid *wcid = NULL;
struct ieee80211_hw *hw;
struct sk_buff_head list;
- mt76_tx_check_non_aql(dev, wcid_idx, skb);
+ rcu_read_lock();
+
+ if (wcid_idx < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
+
+ mt76_tx_check_non_aql(dev, wcid, skb);
#ifdef CONFIG_NL80211_TESTMODE
if (mt76_is_testmode_skb(dev, skb, &hw)) {
@@ -214,21 +222,25 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk
wake_up(&dev->tx_wait);
dev_kfree_skb_any(skb);
- return;
+ goto out;
}
#endif
if (!skb->prev) {
hw = mt76_tx_status_get_hw(dev, skb);
- ieee80211_free_txskb(hw, skb);
- return;
+ status.sta = wcid_to_sta(wcid);
+ ieee80211_tx_status_ext(hw, &status);
+ goto out;
}
mt76_tx_status_lock(dev, &list);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
mt76_tx_status_unlock(dev, &list);
+
+out:
+ rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
+EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
static int
__mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
@@ -244,11 +256,15 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
non_aql = !info->tx_time_est;
idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
- if (idx < 0 || !sta || !non_aql)
+ if (idx < 0 || !sta)
return idx;
wcid = (struct mt76_wcid *)sta->drv_priv;
q->entry[idx].wcid = wcid->idx;
+
+ if (!non_aql)
+ return idx;
+
pending = atomic_inc_return(&wcid->non_aql_packets);
if (stop && pending >= MT_MAX_NON_AQL_PKT)
*stop = true;
@@ -285,7 +301,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
skb_set_queue_mapping(skb, qid);
}
- if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+ if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 30bc54e98c58..1e9f60bb811a 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -925,6 +925,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
q->head = (q->head + 1) % q->ndesc;
q->entry[idx].skb = tx_info.skb;
+ q->entry[idx].wcid = 0xffff;
q->queued++;
return idx;
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index 6bcc4a13ae6c..cc772045d526 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -26,6 +26,7 @@ static const struct usb_device_id mt7601u_device_table[] = {
{ USB_DEVICE(0x2717, 0x4106) },
{ USB_DEVICE(0x2955, 0x0001) },
{ USB_DEVICE(0x2955, 0x1001) },
+ { USB_DEVICE(0x2955, 0x1003) },
{ USB_DEVICE(0x2a5f, 0x1000) },
{ USB_DEVICE(0x7392, 0x7710) },
{ 0, }
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 1472e9843896..8e9aaf03a6fa 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -164,7 +164,7 @@ static int wilc_bus_probe(struct spi_device *spi)
wilc->bus_data = spi_priv;
wilc->dev_irq_num = spi->irq;
- wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
+ wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc");
if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
kfree(spi_priv);
return -EPROBE_DEFER;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 5264b0a1f098..deddb0afd312 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1037,7 +1037,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
* FIXME: if we do not find matching entry, we tell that frame was
* posted without any retries. We need to find a way to fix that
* and provide retry count.
- */
+ */
if (unlikely((aggr == 1 && ampdu == 0 && real_mcs != mcs)) || !match) {
rt2800_rate_from_status(skbdesc, status, rt2x00dev->curr_band);
mcs = real_mcs;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index d4d389e8f1b4..fb1d31b2d52a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -446,8 +446,9 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
* Beacons and probe responses require the tsf timestamp
* to be inserted into the frame.
*/
- if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))
+ if ((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ !(tx_info->flags & IEEE80211_TX_CTL_INJECTED))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index d1a566cc0c9e..01735776345a 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -853,15 +853,10 @@ struct rtl8192eu_efuse {
u8 usb_optional_function;
u8 res9[2];
u8 mac_addr[ETH_ALEN]; /* 0xd7 */
- u8 res10[2];
- u8 vendor_name[7];
- u8 res11[2];
- u8 device_name[0x0b]; /* 0xe8 */
- u8 res12[2];
- u8 serial[0x0b]; /* 0xf5 */
- u8 res13[0x30];
+ u8 device_info[80];
+ u8 res11[3];
u8 unknown[0x0d]; /* 0x130 */
- u8 res14[0xc3];
+ u8 res12[0xc3];
};
struct rtl8xxxu_reg8val {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index cfe2dfdae928..b06508d0cdf8 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -554,9 +554,43 @@ rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
}
+static void rtl8192eu_log_next_device_info(struct rtl8xxxu_priv *priv,
+ char *record_name,
+ char *device_info,
+ unsigned int *record_offset)
+{
+ char *record = device_info + *record_offset;
+
+ /* A record is [ total length | 0x03 | value ] */
+ unsigned char l = record[0];
+
+ /*
+ * The whole device info section seems to be 80 characters, make sure
+ * we don't read further.
+ */
+ if (*record_offset + l > 80) {
+ dev_warn(&priv->udev->dev,
+ "invalid record length %d while parsing \"%s\" at offset %u.\n",
+ l, record_name, *record_offset);
+ return;
+ }
+
+ if (l >= 2) {
+ char value[80];
+
+ memcpy(value, &record[2], l - 2);
+ value[l - 2] = '\0';
+ dev_info(&priv->udev->dev, "%s: %s\n", record_name, value);
+ *record_offset = *record_offset + l;
+ } else {
+ dev_info(&priv->udev->dev, "%s not available.\n", record_name);
+ }
+}
+
static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
+ unsigned int record_offset;
int i;
if (efuse->rtl_id != cpu_to_le16(0x8129))
@@ -604,12 +638,25 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
priv->has_xtalk = 1;
priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
- dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
- if (memchr_inv(efuse->serial, 0xff, 11))
- dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
- else
- dev_info(&priv->udev->dev, "Serial not available.\n");
+ /*
+ * device_info section seems to be laid out as records
+ * [ total length | 0x03 | value ] so:
+ * - vendor length + 2
+ * - 0x03
+ * - vendor string (not null terminated)
+ * - product length + 2
+ * - 0x03
+ * - product string (not null terminated)
+ * Then there is one or 2 0x00 on all the 4 devices I own or found
+ * dumped online.
+ * As previous version of the code handled an optional serial
+ * string, I now assume there may be a third record if the
+ * length is not 0.
+ */
+ record_offset = 0;
+ rtl8192eu_log_next_device_info(priv, "Vendor", efuse->device_info, &record_offset);
+ rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
+ rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
unsigned char *raw = priv->efuse_wifi.raw;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 9ff09cf7eb62..ac1061caacd6 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5554,6 +5554,11 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
urb_len = skb->len;
pkt_cnt = 0;
+ if (urb_len < sizeof(struct rtl8xxxu_rxdesc16)) {
+ kfree_skb(skb);
+ return RX_TYPE_ERROR;
+ }
+
do {
rx_desc = (struct rtl8xxxu_rxdesc16 *)skb->data;
_rx_desc_le = (__le32 *)skb->data;
@@ -5581,7 +5586,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
* at least cover the rx descriptor
*/
if (pkt_cnt > 1 &&
- urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
+ urb_len >= (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
next_skb = skb_clone(skb, GFP_ATOMIC);
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5627,7 +5632,9 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
pkt_cnt--;
urb_len -= pkt_offset;
- } while (skb && urb_len > 0 && pkt_cnt > 0);
+ next_skb = NULL;
+ } while (skb && pkt_cnt > 0 &&
+ urb_len >= sizeof(struct rtl8xxxu_rxdesc16));
return RX_TYPE_DATA_PKT;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 447caa4aad32..c5b8df58d4a2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -1721,10 +1721,6 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 14);
coex_dm->ps_tdma_du_adj_type = 14;
- } else if (max_interval == 3) {
- btc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 15);
- coex_dm->ps_tdma_du_adj_type = 15;
} else {
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 15);
@@ -1739,10 +1735,6 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 10);
coex_dm->ps_tdma_du_adj_type = 10;
- } else if (max_interval == 3) {
- btc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 11);
- coex_dm->ps_tdma_du_adj_type = 11;
} else {
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 11);
@@ -1759,10 +1751,6 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 6);
coex_dm->ps_tdma_du_adj_type = 6;
- } else if (max_interval == 3) {
- btc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 7);
- coex_dm->ps_tdma_du_adj_type = 7;
} else {
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 7);
@@ -1777,10 +1765,6 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 2);
coex_dm->ps_tdma_du_adj_type = 2;
- } else if (max_interval == 3) {
- btc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 3);
- coex_dm->ps_tdma_du_adj_type = 3;
} else {
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 3);
@@ -2810,6 +2794,7 @@ static void btc8821a2ant_action_a2dp(struct btc_coexist *btcoexist)
0x4);
}
+ /* preserve identical branches for further fine-tuning */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
@@ -2944,6 +2929,7 @@ static void btc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
0x4);
}
+ /* preserve identical branches for further fine-tuning */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 26);
@@ -3132,6 +3118,7 @@ static void btc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ /* preserve identical branches for further fine-tuning */
if (wifi_bw == BTC_WIFI_BW_LEGACY) {
/* for HID at 11b/g mode */
btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
@@ -3321,6 +3308,7 @@ static void btc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
0x4);
}
+ /* preserve identical branches for further fine-tuning */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index 7aa28da39409..7a0355dc6bab 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -167,7 +167,7 @@ void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
u32 ul_command;
u32 ul_content;
- u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u32 ul_enc_algo;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
case WEP40_ENCRYPTION:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 8d2c6d8d32d9..4ff0d4118193 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -112,7 +112,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
}
/**
- * writeLLT - LLT table write access
+ * rtl92c_llt_write - LLT table write access
* @hw: Pointer to the ieee80211_hw structure.
* @address: LLT logical address.
* @data: LLT data content
@@ -144,7 +144,7 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
}
/**
- * rtl92c_init_LLT_table - Init LLT table
+ * rtl92c_init_llt_table - Init LLT table
* @hw: Pointer to the ieee80211_hw structure.
* @boundary: Page boundary.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 68ec009ea157..76dd881ef9bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -2574,7 +2574,7 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"path-B / 2.4G LCK\n");
}
- memset(&curvecount_val[0], 0, CV_CURVE_CNT * 2);
+ memset(curvecount_val, 0, sizeof(curvecount_val));
/* Set LC calibration off */
rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
0x08000, 0x0);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index 38034102aacb..e474b4ec17f3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -513,7 +513,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
/* This bit indicate this packet is used for FW download. */
if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
- /* For firmware downlaod we only need to set LINIP */
+ /* For firmware download we only need to set LINIP */
set_tx_desc_linip(pdesc, tcb_desc->last_inipkt);
/* 92SE must set as 1 for firmware download HW DMA error */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index f8a1de6e9849..c98f2216734f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -915,7 +915,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- bool rtstatus = true;
+ bool rtstatus;
int err;
u8 tmp_u1b;
unsigned long flags;
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index cedbf3825848..2551e228b581 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -591,8 +591,10 @@ void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb)
struct rtw_coex *coex = &rtwdev->coex;
u8 *payload = get_payload_from_coex_resp(skb);
- if (payload[0] != COEX_RESP_ACK_BY_WL_FW)
+ if (payload[0] != COEX_RESP_ACK_BY_WL_FW) {
+ dev_kfree_skb_any(skb);
return;
+ }
skb_queue_tail(&coex->queue, skb);
wake_up(&coex->wait);
@@ -630,20 +632,16 @@ static bool rtw_coex_get_bt_scan_type(struct rtw_dev *rtwdev, u8 *scan_type)
struct rtw_coex_info_req req = {0};
struct sk_buff *skb;
u8 *payload;
- bool ret = false;
req.op_code = BT_MP_INFO_OP_SCAN_TYPE;
skb = rtw_coex_info_request(rtwdev, &req);
if (!skb)
- goto out;
+ return false;
payload = get_payload_from_coex_resp(skb);
*scan_type = GET_COEX_RESP_BT_SCAN_TYPE(payload);
dev_kfree_skb_any(skb);
- ret = true;
-
-out:
- return ret;
+ return true;
}
static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev,
@@ -651,19 +649,15 @@ static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev,
{
struct rtw_coex_info_req req = {0};
struct sk_buff *skb;
- bool ret = false;
req.op_code = BT_MP_INFO_OP_LNA_CONSTRAINT;
req.para1 = lna_constrain_level;
skb = rtw_coex_info_request(rtwdev, &req);
if (!skb)
- goto out;
+ return false;
dev_kfree_skb_any(skb);
- ret = true;
-
-out:
- return ret;
+ return true;
}
#define case_BTSTATUS(src) \
@@ -3523,6 +3517,7 @@ static bool rtw_coex_get_bt_reg(struct rtw_dev *rtwdev,
payload = get_payload_from_coex_resp(skb);
*val = GET_COEX_RESP_BT_REG_VAL(payload);
+ dev_kfree_skb_any(skb);
return true;
}
@@ -3533,19 +3528,17 @@ static bool rtw_coex_get_bt_patch_version(struct rtw_dev *rtwdev,
struct rtw_coex_info_req req = {0};
struct sk_buff *skb;
u8 *payload;
- bool ret = false;
req.op_code = BT_MP_INFO_OP_PATCH_VER;
skb = rtw_coex_info_request(rtwdev, &req);
if (!skb)
- goto out;
+ return false;
payload = get_payload_from_coex_resp(skb);
*patch_version = GET_COEX_RESP_BT_PATCH_VER(payload);
- ret = true;
+ dev_kfree_skb_any(skb);
-out:
- return ret;
+ return true;
}
static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev,
@@ -3554,19 +3547,17 @@ static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev,
struct rtw_coex_info_req req = {0};
struct sk_buff *skb;
u8 *payload;
- bool ret = false;
req.op_code = BT_MP_INFO_OP_SUPP_VER;
skb = rtw_coex_info_request(rtwdev, &req);
if (!skb)
- goto out;
+ return false;
payload = get_payload_from_coex_resp(skb);
*supported_version = GET_COEX_RESP_BT_SUPP_VER(payload);
- ret = true;
+ dev_kfree_skb_any(skb);
-out:
- return ret;
+ return true;
}
static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev,
@@ -3575,19 +3566,17 @@ static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev,
struct rtw_coex_info_req req = {0};
struct sk_buff *skb;
u8 *payload;
- bool ret = false;
req.op_code = BT_MP_INFO_OP_SUPP_FEAT;
skb = rtw_coex_info_request(rtwdev, &req);
if (!skb)
- goto out;
+ return false;
payload = get_payload_from_coex_resp(skb);
*supported_feature = GET_COEX_RESP_BT_SUPP_FEAT(payload);
- ret = true;
+ dev_kfree_skb_any(skb);
-out:
- return ret;
+ return true;
}
struct rtw_coex_sta_stat_iter_data {
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 18ab472ea46c..dfd52cff5d02 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -11,6 +11,7 @@
#include "debug.h"
#include "phy.h"
#include "reg.h"
+#include "ps.h"
#ifdef CONFIG_RTW88_DEBUGFS
@@ -847,7 +848,13 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
if (!input)
return -EINVAL;
+ if (test_bit(RTW_FLAG_RESTARTING, rtwdev->flags))
+ return -EINPROGRESS;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
rtw_write8(rtwdev, REG_HRCV_MSG, 1);
+ mutex_unlock(&rtwdev->mutex);
return count;
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index c8efd1900a34..0dd3f9a88c8d 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -20,6 +20,7 @@ enum rtw_debug_mask {
RTW_DBG_BF = 0x00000800,
RTW_DBG_WOW = 0x00001000,
RTW_DBG_CFO = 0x00002000,
+ RTW_DBG_PATH_DIV = 0x00004000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index ea2cd4db1d3c..3bfa5ecc0053 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -127,6 +127,62 @@ static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
}
+struct rtw_beacon_filter_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 *payload;
+};
+
+static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_beacon_filter_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+ u8 *payload = iter_data->payload;
+ u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
+ u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
+ s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
+
+ switch (type) {
+ case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
+ event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
+ break;
+ case BCN_FILTER_CONNECTION_LOSS:
+ ieee80211_connection_loss(vif);
+ break;
+ case BCN_FILTER_CONNECTED:
+ rtwdev->beacon_loss = false;
+ break;
+ case BCN_FILTER_NOTIFY_BEACON_LOSS:
+ rtwdev->beacon_loss = true;
+ rtw_leave_lps(rtwdev);
+ break;
+ }
+}
+
+static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
+ u8 length)
+{
+ struct rtw_beacon_filter_iter_data dev_iter_data;
+
+ dev_iter_data.rtwdev = rtwdev;
+ dev_iter_data.payload = payload;
+ rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
+ &dev_iter_data);
+}
+
+static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
+ u8 length)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->scan_density = payload[0];
+
+ rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
+ dm_info->scan_density);
+}
+
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
@@ -152,6 +208,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_WLAN_INFO:
rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
break;
+ case C2H_BCN_FILTER_NOTIFY:
+ rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
+ break;
case C2H_HALMAC:
rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
break;
@@ -186,6 +245,12 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
break;
case C2H_WLAN_RFON:
complete(&rtwdev->lps_leave_check);
+ dev_kfree_skb_any(skb);
+ break;
+ case C2H_SCAN_RESULT:
+ complete(&rtwdev->fw_scan_density);
+ rtw_fw_scan_result(rtwdev, c2h->payload, len);
+ dev_kfree_skb_any(skb);
break;
default:
/* pass offset for further operation */
@@ -527,6 +592,45 @@ void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
+ struct rtw_sta_info *si =
+ sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
+ s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si)
+ return;
+
+ if (!connect) {
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
+ SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+ return;
+ }
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
+ ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+ memset(h2c_pkt, 0, sizeof(h2c_pkt));
+ threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
+ SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
+ SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
+ BCN_FILTER_OFFLOAD_MODE_DEFAULT);
+ SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
+ SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
+ SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
+ SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
+ SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -1613,3 +1717,13 @@ void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
+
+void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
+ SET_SCAN_START(h2c_pkt, start);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 7c5b1d75e26f..a8a7162fbe64 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -24,6 +24,14 @@
#define DLFW_BLK_SIZE_LEGACY 4
#define FW_START_ADDR_LEGACY 0x1000
+#define BCN_LOSS_CNT 10
+#define BCN_FILTER_NOTIFY_SIGNAL_CHANGE 0
+#define BCN_FILTER_CONNECTION_LOSS 1
+#define BCN_FILTER_CONNECTED 2
+#define BCN_FILTER_NOTIFY_BEACON_LOSS 3
+
+#define SCAN_NOTIFY_TIMEOUT msecs_to_jiffies(10)
+
enum rtw_c2h_cmd_id {
C2H_CCX_TX_RPT = 0x03,
C2H_BT_INFO = 0x09,
@@ -32,6 +40,8 @@ enum rtw_c2h_cmd_id {
C2H_HW_FEATURE_REPORT = 0x19,
C2H_WLAN_INFO = 0x27,
C2H_WLAN_RFON = 0x32,
+ C2H_BCN_FILTER_NOTIFY = 0x36,
+ C2H_SCAN_RESULT = 0x38,
C2H_HW_FEATURE_DUMP = 0xfd,
C2H_HALMAC = 0xff,
};
@@ -78,9 +88,20 @@ enum rtw_fw_feature {
FW_FEATURE_LPS_C2H = BIT(1),
FW_FEATURE_LCLK = BIT(2),
FW_FEATURE_PG = BIT(3),
+ FW_FEATURE_BCN_FILTER = BIT(5),
+ FW_FEATURE_NOTIFY_SCAN = BIT(6),
FW_FEATURE_MAX = BIT(31),
};
+enum rtw_beacon_filter_offload_mode {
+ BCN_FILTER_OFFLOAD_MODE_0 = 0,
+ BCN_FILTER_OFFLOAD_MODE_1,
+ BCN_FILTER_OFFLOAD_MODE_2,
+ BCN_FILTER_OFFLOAD_MODE_3,
+
+ BCN_FILTER_OFFLOAD_MODE_DEFAULT = BCN_FILTER_OFFLOAD_MODE_1,
+};
+
struct rtw_coex_info_req {
u8 seq;
u8 op_code;
@@ -237,6 +258,10 @@ struct rtw_fw_hdr_legacy {
#define GET_RA_REPORT_BW(c2h_payload) (c2h_payload[6])
#define GET_RA_REPORT_MACID(c2h_payload) (c2h_payload[1])
+#define GET_BCN_FILTER_NOTIFY_TYPE(c2h_payload) (c2h_payload[1] & 0xf)
+#define GET_BCN_FILTER_NOTIFY_EVENT(c2h_payload) (c2h_payload[1] & 0x10)
+#define GET_BCN_FILTER_NOTIFY_RSSI(c2h_payload) (c2h_payload[2] - 100)
+
/* PKT H2C */
#define H2C_PKT_CMD_ID 0xFF
#define H2C_PKT_CATEGORY 0x01
@@ -345,7 +370,10 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_LPS_PG_INFO 0x2b
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
+#define H2C_CMD_BCN_FILTER_OFFLOAD_P0 0x56
+#define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57
#define H2C_CMD_WL_PHY_INFO 0x58
+#define H2C_CMD_SCAN 0x59
#define H2C_CMD_COEX_TDMA_TYPE 0x60
#define H2C_CMD_QUERY_BT_INFO 0x61
@@ -381,6 +409,23 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
#define SET_WL_PHY_INFO_RX_EVM(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(16))
+#define SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(20, 17))
+#define SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 21))
+#define SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(3, 0))
+#define SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(13, 4))
+
+#define SET_SCAN_START(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
@@ -554,6 +599,12 @@ static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
}
+static inline bool rtw_fw_feature_check(struct rtw_fw_state *fw,
+ enum rtw_fw_feature feature)
+{
+ return !!(fw->feature & feature);
+}
+
void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
struct sk_buff *skb);
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
@@ -577,6 +628,8 @@ void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev);
+void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
+ struct ieee80211_vif *vif);
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size);
void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
@@ -607,5 +660,5 @@ void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c);
void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev);
int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer);
-
+void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 333df6b38113..6f5629852416 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -153,6 +153,9 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
u8 port = 0;
u8 bcn_ctrl = 0;
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
rtwvif->port = port;
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
@@ -399,6 +402,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_write32_clr(rtwdev, REG_FWHW_TXQ_CTRL,
BIT_EN_BCNQ_DL);
}
+ if (changed & BSS_CHANGED_CQM)
+ rtw_fw_beacon_filter_config(rtwdev, true, vif);
if (changed & BSS_CHANGED_MU_GROUPS)
rtw_chip_set_gid_table(rtwdev, vif, conf);
@@ -450,6 +455,7 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
+ rtw_fw_beacon_filter_config(rtwdev, false, vif);
mutex_lock(&rtwdev->mutex);
rtw_sta_remove(rtwdev, sta, true);
mutex_unlock(&rtwdev->mutex);
@@ -599,6 +605,7 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
+ rtw_core_fw_scan_notify(rtwdev, true);
set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
@@ -618,6 +625,8 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
+ rtw_core_fw_scan_notify(rtwdev, false);
+
ether_addr_copy(rtwvif->mac_addr, vif->addr);
config |= PORT_SET_MAC_ADDR;
rtw_vif_port_config(rtwdev, rtwvif, config);
@@ -629,7 +638,7 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 duration)
+ struct ieee80211_prep_tx_info *info)
{
struct rtw_dev *rtwdev = hw->priv;
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index f3a3a86fa9b5..c6364837e83b 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
+#include <linux/devcoredump.h>
+
#include "main.h"
#include "regd.h"
#include "fw.h"
@@ -239,7 +241,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
* get that vif and check if device is having traffic more than the
* threshold.
*/
- if (rtwdev->ps_enabled && data.rtwvif && !ps_active)
+ if (rtwdev->ps_enabled && data.rtwvif && !ps_active &&
+ !rtwdev->beacon_loss)
rtw_enter_lps(rtwdev, data.rtwvif->port);
rtwdev->watch_dog_cnt++;
@@ -292,6 +295,7 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
rtwdev->sta_cnt++;
+ rtwdev->beacon_loss = false;
rtw_info(rtwdev, "sta %pM joined with macid %d\n",
sta->addr, si->mac_id);
@@ -318,59 +322,131 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
sta->addr, si->mac_id);
}
-static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
+struct rtw_fwcd_hdr {
+ u32 item;
+ u32 size;
+ u32 padding1;
+ u32 padding2;
+} __packed;
+
+static int rtw_fwcd_prep(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+ const struct rtw_fwcd_segs *segs = chip->fwcd_segs;
+ u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr);
+ u8 i;
+
+ if (segs) {
+ prep_size += segs->num * sizeof(struct rtw_fwcd_hdr);
+
+ for (i = 0; i < segs->num; i++)
+ prep_size += segs->segs[i];
+ }
+
+ desc->data = vmalloc(prep_size);
+ if (!desc->data)
+ return -ENOMEM;
+
+ desc->size = prep_size;
+ desc->next = desc->data;
+
+ return 0;
+}
+
+static u8 *rtw_fwcd_next(struct rtw_dev *rtwdev, u32 item, u32 size)
+{
+ struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+ struct rtw_fwcd_hdr *hdr;
+ u8 *next;
+
+ if (!desc->data) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared successfully\n");
+ return NULL;
+ }
+
+ next = desc->next + sizeof(struct rtw_fwcd_hdr);
+ if (next - desc->data + size > desc->size) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared enough\n");
+ return NULL;
+ }
+
+ hdr = (struct rtw_fwcd_hdr *)(desc->next);
+ hdr->item = item;
+ hdr->size = size;
+ hdr->padding1 = 0x01234567;
+ hdr->padding2 = 0x89abcdef;
+ desc->next = next + size;
+
+ return next;
+}
+
+static void rtw_fwcd_dump(struct rtw_dev *rtwdev)
+{
+ struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+
+ rtw_dbg(rtwdev, RTW_DBG_FW, "dump fwcd\n");
+
+ /* Data will be freed after lifetime of device coredump. After calling
+ * dev_coredump, data is supposed to be handled by the device coredump
+ * framework. Note that a new dump will be discarded if a previous one
+ * hasn't been released yet.
+ */
+ dev_coredumpv(rtwdev->dev, desc->data, desc->size, GFP_KERNEL);
+}
+
+static void rtw_fwcd_free(struct rtw_dev *rtwdev, bool free_self)
+{
+ struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+
+ if (free_self) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "free fwcd by self\n");
+ vfree(desc->data);
+ }
+
+ desc->data = NULL;
+ desc->next = NULL;
+}
+
+static int rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
{
u32 size = rtwdev->chip->fw_rxff_size;
u32 *buf;
u8 seq;
- bool ret = true;
- buf = vmalloc(size);
+ buf = (u32 *)rtw_fwcd_next(rtwdev, RTW_FWCD_TLV, size);
if (!buf)
- goto exit;
+ return -ENOMEM;
if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) {
rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n");
- goto free_buf;
+ return -EINVAL;
}
if (GET_FW_DUMP_LEN(buf) == 0) {
rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n");
- goto free_buf;
+ return -EINVAL;
}
seq = GET_FW_DUMP_SEQ(buf);
- if (seq > 0 && seq != (rtwdev->fw.prev_dump_seq + 1)) {
+ if (seq > 0) {
rtw_dbg(rtwdev, RTW_DBG_FW,
"fw crash dump's seq is wrong: %d\n", seq);
- goto free_buf;
- }
-
- print_hex_dump(KERN_ERR, "rtw88 fw dump: ", DUMP_PREFIX_OFFSET, 16, 1,
- buf, size, true);
-
- if (GET_FW_DUMP_MORE(buf) == 1) {
- rtwdev->fw.prev_dump_seq = seq;
- ret = false;
+ return -EINVAL;
}
-free_buf:
- vfree(buf);
-exit:
- rtw_write8(rtwdev, REG_MCU_TST_CFG, 0);
-
- return ret;
+ return 0;
}
int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
- const char *prefix_str)
+ u32 fwcd_item)
{
u32 rxff = rtwdev->chip->fw_rxff_size;
u32 dump_size, done_size = 0;
u8 *buf;
int ret;
- buf = vzalloc(size);
+ buf = rtw_fwcd_next(rtwdev, fwcd_item, size);
if (!buf)
return -ENOMEM;
@@ -383,7 +459,7 @@ int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
rtw_err(rtwdev,
"ddma fw 0x%x [+0x%x] to fw fifo fail\n",
ocp_src, done_size);
- goto exit;
+ return ret;
}
ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0,
@@ -392,24 +468,18 @@ int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
rtw_err(rtwdev,
"dump fw 0x%x [+0x%x] from fw fifo fail\n",
ocp_src, done_size);
- goto exit;
+ return ret;
}
size -= dump_size;
done_size += dump_size;
}
- print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 1,
- buf, done_size, true);
-
-exit:
- vfree(buf);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(rtw_dump_fw);
-int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
- const char *prefix_str)
+int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size)
{
u8 *buf;
u32 i;
@@ -419,17 +489,13 @@ int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
return -EINVAL;
}
- buf = vzalloc(size);
+ buf = rtw_fwcd_next(rtwdev, RTW_FWCD_REG, size);
if (!buf)
return -ENOMEM;
for (i = 0; i < size; i += 4)
*(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i);
- print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf,
- size, true);
-
- vfree(buf);
return 0;
}
EXPORT_SYMBOL(rtw_dump_reg);
@@ -487,20 +553,24 @@ void rtw_fw_recovery(struct rtw_dev *rtwdev)
static void __fw_recovery_work(struct rtw_dev *rtwdev)
{
-
- /* rtw_fw_dump_crash_log() returns false indicates that there are
- * still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware
- * to dump the remaining part of the log, and firmware will trigger an
- * IMR_C2HCMD interrupt to inform driver the log is ready.
- */
- if (!rtw_fw_dump_crash_log(rtwdev)) {
- rtw_write8(rtwdev, REG_HRCV_MSG, 1);
- return;
- }
- rtwdev->fw.prev_dump_seq = 0;
+ int ret = 0;
set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
- rtw_chip_dump_fw_crash(rtwdev);
+
+ ret = rtw_fwcd_prep(rtwdev);
+ if (ret)
+ goto free;
+ ret = rtw_fw_dump_crash_log(rtwdev);
+ if (ret)
+ goto free;
+ ret = rtw_chip_dump_fw_crash(rtwdev);
+ if (ret)
+ goto free;
+
+ rtw_fwcd_dump(rtwdev);
+free:
+ rtw_fwcd_free(rtwdev, !!ret);
+ rtw_write8(rtwdev, REG_MCU_TST_CFG, 0);
WARN(1, "firmware crash, start reset and recover\n");
@@ -1109,11 +1179,11 @@ static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
return LPS_DEEP_MODE_NONE;
if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_PG)) &&
- (fw->feature & FW_FEATURE_PG))
+ rtw_fw_feature_check(fw, FW_FEATURE_PG))
return LPS_DEEP_MODE_PG;
if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_LCLK)) &&
- (fw->feature & FW_FEATURE_LCLK))
+ rtw_fw_feature_check(fw, FW_FEATURE_LCLK))
return LPS_DEEP_MODE_LCLK;
return LPS_DEEP_MODE_NONE;
@@ -1183,6 +1253,22 @@ err:
return ret;
}
+void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
+{
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_NOTIFY_SCAN))
+ return;
+
+ if (start) {
+ rtw_fw_scan_notify(rtwdev, true);
+ } else {
+ reinit_completion(&rtwdev->fw_scan_density);
+ rtw_fw_scan_notify(rtwdev, false);
+ if (!wait_for_completion_timeout(&rtwdev->fw_scan_density,
+ SCAN_NOTIFY_TIMEOUT))
+ rtw_warn(rtwdev, "firmware failed to report density after scan\n");
+ }
+}
+
int rtw_core_start(struct rtw_dev *rtwdev)
{
int ret;
@@ -1761,6 +1847,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
init_waitqueue_head(&rtwdev->coex.wait);
init_completion(&rtwdev->lps_leave_check);
+ init_completion(&rtwdev->fw_scan_density);
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
@@ -1812,6 +1899,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
destroy_workqueue(rtwdev->tx_wq);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
+ skb_queue_purge(&rtwdev->coex.queue);
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index dc3744847ba9..e5af375b3dd0 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -806,7 +806,7 @@ struct rtw_regulatory {
struct rtw_chip_ops {
int (*mac_init)(struct rtw_dev *rtwdev);
- void (*dump_fw_crash)(struct rtw_dev *rtwdev);
+ int (*dump_fw_crash)(struct rtw_dev *rtwdev);
void (*shutdown)(struct rtw_dev *rtwdev);
int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
void (*phy_set_param)(struct rtw_dev *rtwdev);
@@ -841,6 +841,10 @@ struct rtw_chip_ops {
u8 fixrate_en, u8 *new_rate);
void (*cfo_init)(struct rtw_dev *rtwdev);
void (*cfo_track)(struct rtw_dev *rtwdev);
+ void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path,
+ enum rtw_bb_path tx_path_1ss,
+ enum rtw_bb_path tx_path_cck,
+ bool is_tx2_path);
/* for coex */
void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -1108,6 +1112,15 @@ enum rtw_fw_fifo_sel {
RTW_FW_FIFO_MAX,
};
+enum rtw_fwcd_item {
+ RTW_FWCD_TLV,
+ RTW_FWCD_REG,
+ RTW_FWCD_ROM,
+ RTW_FWCD_IMEM,
+ RTW_FWCD_DMEM,
+ RTW_FWCD_EMEM,
+};
+
/* hardware configuration for each IC */
struct rtw_chip_info {
struct rtw_chip_ops *ops;
@@ -1136,7 +1149,11 @@ struct rtw_chip_info {
u8 max_power_index;
u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
+ const struct rtw_fwcd_segs *fwcd_segs;
+
+ u8 default_1ss_tx_path;
+ bool path_div_supported;
bool ht_supported;
bool vht_supported;
u8 lps_deep_mode_supported;
@@ -1614,6 +1631,8 @@ struct rtw_dm_info {
struct rtw_iqk_info iqk;
struct rtw_gapk_info gapk;
bool is_bt_iqk_timeout;
+
+ u8 scan_density;
};
struct rtw_efuse {
@@ -1717,6 +1736,17 @@ struct rtw_fifo_conf {
const struct rtw_rqpn *rqpn;
};
+struct rtw_fwcd_desc {
+ u32 size;
+ u8 *next;
+ u8 *data;
+};
+
+struct rtw_fwcd_segs {
+ const u32 *segs;
+ u8 num;
+};
+
#define FW_CD_TYPE 0xffff
#define FW_CD_LEN 4
#define FW_CD_VAL 0xaabbccdd
@@ -1724,11 +1754,11 @@ struct rtw_fw_state {
const struct firmware *firmware;
struct rtw_dev *rtwdev;
struct completion completion;
+ struct rtw_fwcd_desc fwcd_desc;
u16 version;
u8 sub_version;
u8 sub_index;
u16 h2c_version;
- u8 prev_dump_seq;
u32 feature;
};
@@ -1781,6 +1811,14 @@ struct rtw_hal {
[DESC_RATE_MAX];
};
+struct rtw_path_div {
+ enum rtw_bb_path current_tx_path;
+ u32 path_a_sum;
+ u32 path_b_sum;
+ u16 path_a_cnt;
+ u16 path_b_cnt;
+};
+
struct rtw_dev {
struct ieee80211_hw *hw;
struct device *dev;
@@ -1837,6 +1875,7 @@ struct rtw_dev {
/* lps power state & handler work */
struct rtw_lps_conf lps_conf;
bool ps_enabled;
+ bool beacon_loss;
struct completion lps_leave_check;
struct dentry *debugfs;
@@ -1848,11 +1887,13 @@ struct rtw_dev {
DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
u8 mp_mode;
+ struct rtw_path_div dm_path_div;
struct rtw_fw_state wow_fw;
struct rtw_wow_param wow;
bool need_rfk;
+ struct completion fw_scan_density;
/* hci related data, must be last */
u8 priv[] __aligned(sizeof(void *));
@@ -1923,10 +1964,12 @@ static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
clear_bit(mac_id, rtwdev->mac_id_map);
}
-static inline void rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
+static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
{
if (rtwdev->chip->ops->dump_fw_crash)
- rtwdev->chip->ops->dump_fw_crash(rtwdev);
+ return rtwdev->chip->ops->dump_fw_crash(rtwdev);
+
+ return 0;
}
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
@@ -1958,9 +2001,9 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
bool fw_exist);
void rtw_fw_recovery(struct rtw_dev *rtwdev);
+void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
- const char *prefix_str);
-int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
- const char *prefix_str);
+ u32 fwcd_item);
+int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index f59a4c462e3b..e7d17ab8f113 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "main.h"
@@ -1673,6 +1674,36 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
netif_napi_del(&rtwpci->napi);
}
+enum rtw88_quirk_dis_pci_caps {
+ QUIRK_DIS_PCI_CAP_MSI,
+ QUIRK_DIS_PCI_CAP_ASPM,
+};
+
+static int disable_pci_caps(const struct dmi_system_id *dmi)
+{
+ uintptr_t dis_caps = (uintptr_t)dmi->driver_data;
+
+ if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_MSI))
+ rtw_disable_msi = true;
+ if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_ASPM))
+ rtw_pci_disable_aspm = true;
+
+ return 1;
+}
+
+static const struct dmi_system_id rtw88_pci_quirks[] = {
+ {
+ .callback = disable_pci_caps,
+ .ident = "Protempo Ltd L116HTN6SPW",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Protempo Ltd"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L116HTN6SPW"),
+ },
+ .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
+ },
+ {}
+};
+
int rtw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1723,6 +1754,7 @@ int rtw_pci_probe(struct pci_dev *pdev,
goto err_destroy_pci;
}
+ dmi_check_system(rtw88_pci_quirks);
rtw_pci_phy_cfg(rtwdev);
ret = rtw_register_hw(rtwdev, hw);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8146acaf1893..569dd3cfde35 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -127,6 +127,17 @@ static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
chip->ops->cfo_init(rtwdev);
}
+static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+
+ path_div->current_tx_path = rtwdev->chip->default_1ss_tx_path;
+ path_div->path_a_cnt = 0;
+ path_div->path_a_sum = 0;
+ path_div->path_b_cnt = 0;
+ path_div->path_b_sum = 0;
+}
+
void rtw_phy_init(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -149,6 +160,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev)
dm_info->iqk.done = false;
rtw_phy_cfo_init(rtwdev);
+ rtw_phy_tx_path_div_init(rtwdev);
}
EXPORT_SYMBOL(rtw_phy_init);
@@ -695,6 +707,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
rtw_phy_dig(rtwdev);
rtw_phy_cck_pd(rtwdev);
rtw_phy_ra_track(rtwdev);
+ rtw_phy_tx_path_diversity(rtwdev);
rtw_phy_cfo_track(rtwdev);
rtw_phy_dpk_track(rtwdev);
rtw_phy_pwr_track(rtwdev);
@@ -2315,3 +2328,71 @@ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
return false;
}
EXPORT_SYMBOL(rtw_phy_pwrtrack_need_iqk);
+
+static void rtw_phy_set_tx_path_by_reg(struct rtw_dev *rtwdev,
+ enum rtw_bb_path tx_path_sel_1ss)
+{
+ struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+ enum rtw_bb_path tx_path_sel_cck = tx_path_sel_1ss;
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (tx_path_sel_1ss == path_div->current_tx_path)
+ return;
+
+ path_div->current_tx_path = tx_path_sel_1ss;
+ rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "Switch TX path=%s\n",
+ tx_path_sel_1ss == BB_PATH_A ? "A" : "B");
+ chip->ops->config_tx_path(rtwdev, rtwdev->hal.antenna_tx,
+ tx_path_sel_1ss, tx_path_sel_cck, false);
+}
+
+static void rtw_phy_tx_path_div_select(struct rtw_dev *rtwdev)
+{
+ struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+ enum rtw_bb_path path = path_div->current_tx_path;
+ s32 rssi_a = 0, rssi_b = 0;
+
+ if (path_div->path_a_cnt)
+ rssi_a = path_div->path_a_sum / path_div->path_a_cnt;
+ else
+ rssi_a = 0;
+ if (path_div->path_b_cnt)
+ rssi_b = path_div->path_b_sum / path_div->path_b_cnt;
+ else
+ rssi_b = 0;
+
+ if (rssi_a != rssi_b)
+ path = (rssi_a > rssi_b) ? BB_PATH_A : BB_PATH_B;
+
+ path_div->path_a_cnt = 0;
+ path_div->path_a_sum = 0;
+ path_div->path_b_cnt = 0;
+ path_div->path_b_sum = 0;
+ rtw_phy_set_tx_path_by_reg(rtwdev, path);
+}
+
+static void rtw_phy_tx_path_diversity_2ss(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->hal.antenna_rx != BB_PATH_AB) {
+ rtw_dbg(rtwdev, RTW_DBG_PATH_DIV,
+ "[Return] tx_Path_en=%d, rx_Path_en=%d\n",
+ rtwdev->hal.antenna_tx, rtwdev->hal.antenna_rx);
+ return;
+ }
+ if (rtwdev->sta_cnt == 0) {
+ rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "No Link\n");
+ return;
+ }
+
+ rtw_phy_tx_path_div_select(rtwdev);
+}
+
+void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (!chip->path_div_supported)
+ return;
+
+ rtw_phy_tx_path_diversity_2ss(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index 0b6f2fc8193c..112ed125970a 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -61,6 +61,7 @@ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
struct rtw_swing_table *swing_table);
void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat);
+void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev);
struct rtw_txpwr_lmt_cfg_pair {
u8 regd;
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 3bead34c3d10..3f0ac33156d6 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -152,7 +152,7 @@ static void rtw_fw_leave_lps_check(struct rtw_dev *rtwdev)
else
fw = &rtwdev->fw;
- if (fw->feature & FW_FEATURE_LPS_C2H)
+ if (rtw_fw_feature_check(fw, FW_FEATURE_LPS_C2H))
ret = __rtw_fw_leave_lps_check_c2h(rtwdev);
else
ret = __rtw_fw_leave_lps_check_reg(rtwdev);
@@ -172,7 +172,7 @@ static void rtw_fw_leave_lps_check_prepare(struct rtw_dev *rtwdev)
else
fw = &rtwdev->fw;
- if (fw->feature & FW_FEATURE_LPS_C2H)
+ if (rtw_fw_feature_check(fw, FW_FEATURE_LPS_C2H))
reinit_completion(&rtwdev->lps_leave_check);
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 6cb593cc33c2..8bf3cd3a3678 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -17,7 +17,6 @@
#include "util.h"
#include "bf.h"
#include "efuse.h"
-#include "coex.h"
#define IQK_DONE_8822C 0xaa
@@ -80,6 +79,13 @@ static void rtw8822c_header_file_init(struct rtw_dev *rtwdev, bool pre)
rtw_write32_set(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
}
+static void rtw8822c_bb_reset(struct rtw_dev *rtwdev)
+{
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+ rtw_write16_clr(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+}
+
static void rtw8822c_dac_backup_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *backup,
struct rtw_backup_info *backup_rf)
@@ -2103,13 +2109,51 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
return 0;
}
-static void rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev)
+#define FWCD_SIZE_REG_8822C 0x2000
+#define FWCD_SIZE_DMEM_8822C 0x10000
+#define FWCD_SIZE_IMEM_8822C 0x10000
+#define FWCD_SIZE_EMEM_8822C 0x20000
+#define FWCD_SIZE_ROM_8822C 0x10000
+
+static const u32 __fwcd_segs_8822c[] = {
+ FWCD_SIZE_REG_8822C,
+ FWCD_SIZE_DMEM_8822C,
+ FWCD_SIZE_IMEM_8822C,
+ FWCD_SIZE_EMEM_8822C,
+ FWCD_SIZE_ROM_8822C,
+};
+
+static const struct rtw_fwcd_segs rtw8822c_fwcd_segs = {
+ .segs = __fwcd_segs_8822c,
+ .num = ARRAY_SIZE(__fwcd_segs_8822c),
+};
+
+static int rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev)
{
- rtw_dump_reg(rtwdev, 0x0, 0x2000, "rtw8822c reg_");
- rtw_dump_fw(rtwdev, OCPBASE_DMEM_88XX, 0x10000, "rtw8822c DMEM_");
- rtw_dump_fw(rtwdev, OCPBASE_IMEM_88XX, 0x10000, "rtw8822c IMEM_");
- rtw_dump_fw(rtwdev, OCPBASE_EMEM_88XX, 0x20000, "rtw8822c EMEM_");
- rtw_dump_fw(rtwdev, OCPBASE_ROM_88XX, 0x10000, "rtw8822c ROM_");
+#define __dump_fw_8822c(_dev, _mem) \
+ rtw_dump_fw(_dev, OCPBASE_ ## _mem ## _88XX, \
+ FWCD_SIZE_ ## _mem ## _8822C, RTW_FWCD_ ## _mem)
+ int ret;
+
+ ret = rtw_dump_reg(rtwdev, 0x0, FWCD_SIZE_REG_8822C);
+ if (ret)
+ return ret;
+ ret = __dump_fw_8822c(rtwdev, DMEM);
+ if (ret)
+ return ret;
+ ret = __dump_fw_8822c(rtwdev, IMEM);
+ if (ret)
+ return ret;
+ ret = __dump_fw_8822c(rtwdev, EMEM);
+ if (ret)
+ return ret;
+ ret = __dump_fw_8822c(rtwdev, ROM);
+ if (ret)
+ return ret;
+
+ return 0;
+
+#undef __dump_fw_8822c
}
static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable)
@@ -2424,10 +2468,11 @@ static void rtw8822c_config_cck_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
else
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
}
+ rtw8822c_bb_reset(rtwdev);
}
static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
- bool is_tx2_path)
+ enum rtw_bb_path tx_path_sel_1ss)
{
if (tx_path == BB_PATH_A) {
rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x11);
@@ -2436,21 +2481,28 @@ static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x12);
rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
} else {
- if (is_tx2_path) {
+ if (tx_path_sel_1ss == BB_PATH_AB) {
rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x33);
rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0404);
- } else {
+ } else if (tx_path_sel_1ss == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x32);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
+ } else if (tx_path_sel_1ss == BB_PATH_A) {
rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x31);
rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
}
}
+ rtw8822c_bb_reset(rtwdev);
}
static void rtw8822c_config_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+ enum rtw_bb_path tx_path_sel_1ss,
+ enum rtw_bb_path tx_path_cck,
bool is_tx2_path)
{
- rtw8822c_config_cck_tx_path(rtwdev, tx_path, is_tx2_path);
- rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, is_tx2_path);
+ rtw8822c_config_cck_tx_path(rtwdev, tx_path_cck, is_tx2_path);
+ rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, tx_path_sel_1ss);
+ rtw8822c_bb_reset(rtwdev);
}
static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
@@ -2466,7 +2518,8 @@ static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x11111);
rtw8822c_config_rx_path(rtwdev, rx_path);
- rtw8822c_config_tx_path(rtwdev, tx_path, is_tx2_path);
+ rtw8822c_config_tx_path(rtwdev, tx_path, BB_PATH_A, BB_PATH_A,
+ is_tx2_path);
rtw8822c_toggle_igi(rtwdev);
}
@@ -2517,6 +2570,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_path_div *p_div = &rtwdev->dm_path_div;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
@@ -2559,6 +2613,13 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
dm_info->rssi[path] = rssi;
+ if (path == RF_PATH_A) {
+ p_div->path_a_sum += rssi;
+ p_div->path_a_cnt++;
+ } else if (path == RF_PATH_B) {
+ p_div->path_b_sum += rssi;
+ p_div->path_b_cnt++;
+ }
dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
@@ -4371,26 +4432,28 @@ static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
}
}
-static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
- struct rtw_swing_table *swing_table,
- u8 path)
+static void rtw8822c_pwr_track_stats(struct rtw_dev *rtwdev, u8 path)
{
- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- u8 thermal_value, delta;
+ u8 thermal_value;
if (rtwdev->efuse.thermal_meter[path] == 0xff)
return;
thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
-
rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
+}
- delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
+static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 delta;
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
dm_info->delta_power_index[path] =
rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
delta);
-
rtw8822c_pwrtrack_set(rtwdev, path);
}
@@ -4401,12 +4464,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
rtw_phy_config_swing_table(rtwdev, &swing_table);
+ for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+ rtw8822c_pwr_track_stats(rtwdev, i);
if (rtw_phy_pwrtrack_need_lck(rtwdev))
rtw8822c_do_lck(rtwdev);
-
for (i = 0; i < rtwdev->hal.rf_path_num; i++)
rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
-
}
static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
@@ -4851,6 +4914,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
.cfo_init = rtw8822c_cfo_init,
.cfo_track = rtw8822c_cfo_track,
+ .config_tx_path = rtw8822c_config_tx_path,
.coex_set_init = rtw8822c_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -5192,6 +5256,8 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = 128,
.dig_min = 0x20,
+ .default_1ss_tx_path = BB_PATH_A,
+ .path_div_supported = true,
.ht_supported = true,
.vht_supported = true,
.lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK) | BIT(LPS_DEEP_MODE_PG),
@@ -5259,6 +5325,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.coex_info_hw_regs = coex_info_hw_regs_8822c,
.fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680},
+ .fwcd_segs = &rtw8822c_fwcd_segs,
};
EXPORT_SYMBOL(rtw8822c_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index 822f3da91f1b..f9e3d0779c59 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -16812,53 +16812,53 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00010E46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x93000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x94000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x95000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x95000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x95000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x95000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x95000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x95000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x95000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0x95000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x0003D646,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -18762,53 +18762,53 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -18957,53 +18957,53 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -19152,53 +19152,53 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -19347,53 +19347,53 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000005, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000006, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000015, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0x95000016, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00031E46,
+ 0x03F, 0x0003D646,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -19610,21 +19610,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19633,21 +19633,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19656,21 +19656,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19679,21 +19679,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19702,21 +19702,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000006, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19725,21 +19725,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19748,21 +19748,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19771,21 +19771,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19794,21 +19794,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19817,21 +19817,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19840,21 +19840,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19863,21 +19863,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19886,21 +19886,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000006, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19909,21 +19909,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19932,21 +19932,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19955,21 +19955,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -19978,21 +19978,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -20001,21 +20001,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -20024,21 +20024,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -20047,21 +20047,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -20070,21 +20070,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -20093,21 +20093,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -20116,21 +20116,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -20139,21 +20139,21 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x000008C8,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x000008CB,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x000008CE,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x000008D1,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x000008D4,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000DD1,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0xA0000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000487,
@@ -38484,21 +38484,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38507,21 +38507,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38530,21 +38530,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38553,21 +38553,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38576,21 +38576,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000006, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38599,21 +38599,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38622,21 +38622,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38645,21 +38645,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38668,21 +38668,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000002, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38691,21 +38691,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000003, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38714,21 +38714,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000004, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38737,21 +38737,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000005, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38760,21 +38760,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000006, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38783,21 +38783,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38806,21 +38806,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x94000016, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38829,21 +38829,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000001, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38852,21 +38852,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000002, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38875,21 +38875,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000003, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38898,21 +38898,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000004, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38921,21 +38921,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000005, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38944,21 +38944,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000006, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38967,21 +38967,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000015, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -38990,21 +38990,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0x95000016, 0x00000000, 0x40000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000467,
@@ -39013,21 +39013,21 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000062,
0x03F, 0x00000908,
0x033, 0x00000063,
- 0x03F, 0x00000D09,
+ 0x03F, 0x00000CC6,
0x033, 0x00000064,
- 0x03F, 0x00000D49,
+ 0x03F, 0x00000CC9,
0x033, 0x00000065,
- 0x03F, 0x00000D8A,
+ 0x03F, 0x00000CCC,
0x033, 0x00000066,
- 0x03F, 0x00000DEB,
+ 0x03F, 0x00000CCF,
0x033, 0x00000067,
- 0x03F, 0x00000DEE,
+ 0x03F, 0x00000CD2,
0x033, 0x00000068,
- 0x03F, 0x00000DF1,
+ 0x03F, 0x00000CD5,
0x033, 0x00000069,
- 0x03F, 0x00000DF4,
+ 0x03F, 0x00000DD4,
0x033, 0x0000006A,
- 0x03F, 0x00000DF7,
+ 0x03F, 0x00000DD7,
0xA0000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000487,
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 9fe77556858e..63ce2443f136 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1036,14 +1036,11 @@ static bool is_associated(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
u8 bssid[ETH_ALEN];
- int ret;
if (!priv->radio_on)
return false;
- ret = get_bssid(usbdev, bssid);
-
- return (ret == 0 && !is_zero_ether_addr(bssid));
+ return (get_bssid(usbdev, bssid) == 0 && !is_zero_ether_addr(bssid));
}
static int disassociate(struct usbnet *usbdev, bool reset_ssid)
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index ce9892152f4d..99b21a2c8386 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
- (common->secinfo.security_enable)) {
+ info->control.hw_key) {
if (rsi_is_cipher_wep(common))
ieee80211_size += 4;
else
@@ -470,9 +470,9 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
}
if (common->band == NL80211_BAND_2GHZ)
- bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_1);
+ bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_1);
else
- bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_6);
+ bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_6);
if (mac_bcn->data[tim_offset + 2] == 0)
bcn_frm->frame_info |= cpu_to_le16(RSI_DATA_DESC_DTIM_BEACON);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 16025300cddb..b66975f54567 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -837,6 +837,23 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
common->cqm_info.rssi_hyst);
}
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ rsi_dbg(INFO_ZONE, "%s: Changed Beacon interval: %d\n",
+ __func__, bss_conf->beacon_int);
+ if (common->beacon_interval != bss->beacon_int) {
+ common->beacon_interval = bss->beacon_int;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ struct vif_priv *vif_info = (struct vif_priv *)vif->drv_priv;
+
+ rsi_set_vap_capabilities(common, RSI_OPMODE_AP,
+ vif->addr, vif_info->vap_id,
+ VAP_UPDATE);
+ }
+ }
+ adapter->ps_info.listen_interval =
+ bss->beacon_int * adapter->ps_info.num_bcns_per_lis_int;
+ }
+
if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
((vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_P2P_GO))) {
@@ -1028,7 +1045,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
mutex_lock(&common->mutex);
switch (cmd) {
case SET_KEY:
- secinfo->security_enable = true;
status = rsi_hal_key_config(hw, vif, key, sta);
if (status) {
mutex_unlock(&common->mutex);
@@ -1047,8 +1063,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
break;
case DISABLE_KEY:
- if (vif->type == NL80211_IFTYPE_STATION)
- secinfo->security_enable = false;
rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
memset(key, 0, sizeof(struct ieee80211_key_conf));
status = rsi_hal_key_config(hw, vif, key, sta);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 33c76d39a8e9..891fd5f0fa76 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1547,8 +1547,8 @@ static int rsi_eeprom_read(struct rsi_common *common)
}
/**
- * This function sends a frame to block/unblock
- * data queues in the firmware
+ * rsi_send_block_unblock_frame() - This function sends a frame to block/unblock
+ * data queues in the firmware
*
* @common: Pointer to the driver private structure.
* @block_event: Event block if true, unblock if false
@@ -1803,8 +1803,7 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
RSI_WIFI_MGMT_Q);
cmd_frame->desc.desc_dword0.frame_type = WOWLAN_CONFIG_PARAMS;
cmd_frame->host_sleep_status = sleep_status;
- if (common->secinfo.security_enable &&
- common->secinfo.gtk_cipher)
+ if (common->secinfo.gtk_cipher)
flags |= RSI_WOW_GTK_REKEY;
if (sleep_status)
cmd_frame->wow_flags = flags;
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index a1065e5a92b4..0f535850a383 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -151,7 +151,6 @@ enum edca_queue {
};
struct security_info {
- bool security_enable;
u32 ptk_cipher;
u32 gtk_cipher;
};
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index b65ec14136c7..4c30b5772ce0 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -53,6 +53,7 @@ static const struct sdio_device_id cw1200_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
{ /* end: all zeroes */ },
};
+MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids);
/* hwbus_ops implemetation */
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 988581cc134b..1f856fbbc0ea 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -75,30 +75,27 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
return -EINVAL;
- /* will be unlocked in cw1200_scan_work() */
- down(&priv->scan.lock);
- mutex_lock(&priv->conf_mutex);
-
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
- if (!frame.skb) {
- mutex_unlock(&priv->conf_mutex);
- up(&priv->scan.lock);
+ if (!frame.skb)
return -ENOMEM;
- }
if (req->ie_len)
skb_put_data(frame.skb, req->ie, req->ie_len);
+ /* will be unlocked in cw1200_scan_work() */
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+
ret = wsm_set_template_frame(priv, &frame);
if (!ret) {
/* Host want to be the probe responder. */
ret = wsm_set_probe_responder(priv, true);
}
if (ret) {
- dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
up(&priv->scan.lock);
+ dev_kfree_skb(frame.skb);
return ret;
}
@@ -120,8 +117,8 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
+ dev_kfree_skb(frame.skb);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
}
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 498c8db2eb48..c3be81dc7970 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -12,7 +12,7 @@
#include "acx.h"
/**
- * send command to firmware
+ * wl1251_cmd_send - Send command to firmware
*
* @wl: wl struct
* @id: command id
@@ -59,7 +59,7 @@ out:
}
/**
- * send test command to firmware
+ * wl1251_cmd_test - Send test command to firmware
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
@@ -100,7 +100,7 @@ int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer)
}
/**
- * read acx from firmware
+ * wl1251_cmd_interrogate - Read acx from firmware
*
* @wl: wl struct
* @id: acx id
@@ -138,7 +138,7 @@ out:
}
/**
- * write acx value to firmware
+ * wl1251_cmd_configure - Write acx value to firmware
*
* @wl: wl struct
* @id: acx id
@@ -454,9 +454,12 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
cmd->channels[i].channel = channels[i]->hw_value;
}
- cmd->params.ssid_len = ssid_len;
- if (ssid)
- memcpy(cmd->params.ssid, ssid, ssid_len);
+ if (ssid) {
+ int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN);
+
+ cmd->params.ssid_len = len;
+ memcpy(cmd->params.ssid, ssid, len);
+ }
ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 9d7dbfe7fe0c..c6da0cfb4afb 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1503,6 +1503,13 @@ static int wl12xx_get_fuse_mac(struct wl1271 *wl)
u32 mac1, mac2;
int ret;
+ /* Device may be in ELP from the bootloader or kexec */
+ ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ if (ret < 0)
+ goto out;
+
+ usleep_range(500000, 700000);
+
ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
if (ret < 0)
goto out;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 32a2e27cc561..8b798b5fcaf5 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -821,7 +821,7 @@ out:
/**
- * send test command to firmware
+ * wl1271_cmd_test - send test command to firmware
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
@@ -850,7 +850,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
EXPORT_SYMBOL_GPL(wl1271_cmd_test);
/**
- * read acx from firmware
+ * wl1271_cmd_interrogate - read acx from firmware
*
* @wl: wl struct
* @id: acx id
@@ -879,7 +879,7 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
}
/**
- * write acx value to firmware
+ * wlcore_cmd_configure_failsafe - write acx value to firmware
*
* @wl: wl struct
* @id: acx id
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index a68bbadae043..46ab69eab26a 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -29,18 +29,20 @@ int wlcore_event_fw_logger(struct wl1271 *wl)
u8 *buffer;
u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
- u32 end_buff_addr = WL18XX_LOGGER_SDIO_BUFF_ADDR +
- WL18XX_LOGGER_BUFF_OFFSET;
+ u32 addr_ptr;
+ u32 buff_start_ptr;
+ u32 buff_read_ptr;
+ u32 buff_end_ptr;
u32 available_len;
u32 actual_len;
- u32 clear_addr;
+ u32 clear_ptr;
size_t len;
u32 start_loc;
buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
if (!buffer) {
wl1271_error("Fail to allocate fw logger memory");
- fw_log.actual_buff_size = cpu_to_le32(0);
+ actual_len = 0;
goto out;
}
@@ -49,51 +51,58 @@ int wlcore_event_fw_logger(struct wl1271 *wl)
if (ret < 0) {
wl1271_error("Fail to read logger buffer, error_id = %d",
ret);
- fw_log.actual_buff_size = cpu_to_le32(0);
+ actual_len = 0;
goto free_out;
}
memcpy(&fw_log, buffer, sizeof(fw_log));
- if (le32_to_cpu(fw_log.actual_buff_size) == 0)
+ actual_len = le32_to_cpu(fw_log.actual_buff_size);
+ if (actual_len == 0)
goto free_out;
- actual_len = le32_to_cpu(fw_log.actual_buff_size);
- start_loc = (le32_to_cpu(fw_log.buff_read_ptr) -
- internal_fw_addrbase) - addr;
- end_buff_addr += le32_to_cpu(fw_log.max_buff_size);
- available_len = end_buff_addr -
- (le32_to_cpu(fw_log.buff_read_ptr) -
- internal_fw_addrbase);
- actual_len = min(actual_len, available_len);
- len = actual_len;
+ /* Calculate the internal pointer to the fwlog structure */
+ addr_ptr = internal_fw_addrbase + addr;
+
+ /* Calculate the internal pointers to the start and end of log buffer */
+ buff_start_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET;
+ buff_end_ptr = buff_start_ptr + le32_to_cpu(fw_log.max_buff_size);
+ /* Read the read pointer and validate it */
+ buff_read_ptr = le32_to_cpu(fw_log.buff_read_ptr);
+ if (buff_read_ptr < buff_start_ptr ||
+ buff_read_ptr >= buff_end_ptr) {
+ wl1271_error("buffer read pointer out of bounds: %x not in (%x-%x)\n",
+ buff_read_ptr, buff_start_ptr, buff_end_ptr);
+ goto free_out;
+ }
+
+ start_loc = buff_read_ptr - addr_ptr;
+ available_len = buff_end_ptr - buff_read_ptr;
+
+ /* Copy initial part up to the end of ring buffer */
+ len = min(actual_len, available_len);
wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
- clear_addr = addr + start_loc + le32_to_cpu(fw_log.actual_buff_size) +
- internal_fw_addrbase;
+ clear_ptr = addr_ptr + start_loc + actual_len;
+ if (clear_ptr == buff_end_ptr)
+ clear_ptr = buff_start_ptr;
- len = le32_to_cpu(fw_log.actual_buff_size) - len;
+ /* Copy any remaining part from beginning of ring buffer */
+ len = actual_len - len;
if (len) {
wl12xx_copy_fwlog(wl,
&buffer[WL18XX_LOGGER_BUFF_OFFSET],
len);
- clear_addr = addr + WL18XX_LOGGER_BUFF_OFFSET + len +
- internal_fw_addrbase;
- }
-
- /* double check that clear address and write pointer are the same */
- if (clear_addr != le32_to_cpu(fw_log.buff_write_ptr)) {
- wl1271_error("Calculate of clear addr Clear = %x, write = %x",
- clear_addr, le32_to_cpu(fw_log.buff_write_ptr));
+ clear_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET + len;
}
- /* indicate FW about Clear buffer */
+ /* Update the read pointer */
ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
- fw_log.buff_write_ptr);
+ clear_ptr);
free_out:
kfree(buffer);
out:
- return le32_to_cpu(fw_log.actual_buff_size);
+ return actual_len;
}
EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 8509b989940c..e500b8405f8f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3242,8 +3242,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
* the firmware filters so that all multicast packets are passed
* This is mandatory for MDNS based discovery protocols
*/
- if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
- if (*total & FIF_ALLMULTI) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI) {
ret = wl1271_acx_group_address_tbl(wl, wlvif,
false,
NULL, 0);
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index 5cf0379b88b6..35b535c125b6 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -12,9 +12,9 @@
#include "debug.h"
#include "sysfs.h"
-static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t bt_coex_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
@@ -30,9 +30,9 @@ static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
}
-static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bt_coex_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct wl1271 *wl = dev_get_drvdata(dev);
unsigned long res;
@@ -71,13 +71,11 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
return count;
}
-static DEVICE_ATTR(bt_coex_state, 0644,
- wl1271_sysfs_show_bt_coex_state,
- wl1271_sysfs_store_bt_coex_state);
+static DEVICE_ATTR_RW(bt_coex_state);
-static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t hw_pg_ver_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
@@ -94,7 +92,7 @@ static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
return len;
}
-static DEVICE_ATTR(hw_pg_ver, 0444, wl1271_sysfs_show_hw_pg_ver, NULL);
+static DEVICE_ATTR_RO(hw_pg_ver);
static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 5c4cd0e1adeb..a7ceef10bf6a 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1544,14 +1544,14 @@ static int __init usb_init(void)
zd_workqueue = create_singlethread_workqueue(driver.name);
if (zd_workqueue == NULL) {
- printk(KERN_ERR "%s couldn't create workqueue\n", driver.name);
+ pr_err("%s couldn't create workqueue\n", driver.name);
return -ENOMEM;
}
r = usb_register(&driver);
if (r) {
destroy_workqueue(zd_workqueue);
- printk(KERN_ERR "%s usb_register() failed. Error number %d\n",
+ pr_err("%s usb_register() failed. Error number %d\n",
driver.name, r);
return r;
}
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index 7ad1920120bc..de9384326bc8 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -3,15 +3,9 @@
# Wireless WAN device configuration
#
-menuconfig WWAN
- bool "Wireless WAN"
- help
- This section contains Wireless WAN configuration for WWAN framework
- and drivers.
-
-if WWAN
+menu "Wireless WAN"
-config WWAN_CORE
+config WWAN
tristate "WWAN Driver Core"
help
Say Y here if you want to use the WWAN driver core. This driver
@@ -20,9 +14,19 @@ config WWAN_CORE
To compile this driver as a module, choose M here: the module will be
called wwan.
+if WWAN
+
+config WWAN_HWSIM
+ tristate "Simulated WWAN device"
+ help
+ This driver is a developer testing tool that can be used to test WWAN
+ framework.
+
+ To compile this driver as a module, choose M here: the module will be
+ called wwan_hwsim. If unsure, say N.
+
config MHI_WWAN_CTRL
tristate "MHI WWAN control driver for QCOM-based PCIe modems"
- select WWAN_CORE
depends on MHI_BUS
help
MHI WWAN CTRL allows QCOM-based PCIe modems to expose different modem
@@ -34,4 +38,35 @@ config MHI_WWAN_CTRL
To compile this driver as a module, choose M here: the module will be
called mhi_wwan_ctrl.
+config RPMSG_WWAN_CTRL
+ tristate "RPMSG WWAN control driver"
+ depends on RPMSG
+ help
+ RPMSG WWAN CTRL allows modems available via RPMSG channels to expose
+ different modem protocols/ports to userspace, including AT and QMI.
+ These protocols can be accessed directly from userspace
+ (e.g. AT commands) or via libraries/tools (e.g. libqmi, libqcdm...).
+
+ This is mainly used for modems integrated into many Qualcomm SoCs,
+ e.g. for AT and QMI on Qualcomm MSM8916 or MSM8974. Note that many
+ newer Qualcomm SoCs (e.g. SDM845) still provide an AT port through
+ this driver but the QMI messages can only be sent through
+ QRTR network sockets (CONFIG_QRTR).
+
+ To compile this driver as a module, choose M here: the module will be
+ called rpmsg_wwan_ctrl.
+
+config IOSM
+ tristate "IOSM Driver for Intel M.2 WWAN Device"
+ depends on INTEL_IOMMU
+ help
+ This driver enables Intel M.2 WWAN Device communication.
+
+ If you have one of those Intel M.2 WWAN Modules and wish to use it in
+ Linux say Y/M here.
+
+ If unsure, say N.
+
endif # WWAN
+
+endmenu
diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
index 556cd90958ca..d90ac33abaef 100644
--- a/drivers/net/wwan/Makefile
+++ b/drivers/net/wwan/Makefile
@@ -3,7 +3,11 @@
# Makefile for the Linux WWAN device drivers.
#
-obj-$(CONFIG_WWAN_CORE) += wwan.o
+obj-$(CONFIG_WWAN) += wwan.o
wwan-objs += wwan_core.o
+obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o
+
obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
+obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o
+obj-$(CONFIG_IOSM) += iosm/
diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile
new file mode 100644
index 000000000000..4f9f0ae398e1
--- /dev/null
+++ b/drivers/net/wwan/iosm/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: (GPL-2.0-only)
+#
+# Copyright (C) 2020-21 Intel Corporation.
+#
+
+iosm-y = \
+ iosm_ipc_task_queue.o \
+ iosm_ipc_imem.o \
+ iosm_ipc_imem_ops.o \
+ iosm_ipc_mmio.o \
+ iosm_ipc_port.o \
+ iosm_ipc_wwan.o \
+ iosm_ipc_uevent.o \
+ iosm_ipc_pm.o \
+ iosm_ipc_pcie.o \
+ iosm_ipc_irq.o \
+ iosm_ipc_chnl_cfg.o \
+ iosm_ipc_protocol.o \
+ iosm_ipc_protocol_ops.o \
+ iosm_ipc_mux.o \
+ iosm_ipc_mux_codec.o
+
+obj-$(CONFIG_IOSM) := iosm.o
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
new file mode 100644
index 000000000000..804e6c4f2c78
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/wwan.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+
+/* Max. sizes of a downlink buffers */
+#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_DL_LOOPBACK_SIZE (1 * 1024 * 1024)
+#define IPC_MEM_MAX_DL_AT_BUF_SIZE 2048
+#define IPC_MEM_MAX_DL_RPC_BUF_SIZE (32 * 1024)
+#define IPC_MEM_MAX_DL_MBIM_BUF_SIZE IPC_MEM_MAX_DL_RPC_BUF_SIZE
+
+/* Max. transfer descriptors for a pipe. */
+#define IPC_MEM_MAX_TDS_FLASH_DL 3
+#define IPC_MEM_MAX_TDS_FLASH_UL 6
+#define IPC_MEM_MAX_TDS_AT 4
+#define IPC_MEM_MAX_TDS_RPC 4
+#define IPC_MEM_MAX_TDS_MBIM IPC_MEM_MAX_TDS_RPC
+#define IPC_MEM_MAX_TDS_LOOPBACK 11
+
+/* Accumulation backoff usec */
+#define IRQ_ACC_BACKOFF_OFF 0
+
+/* MUX acc backoff 1ms */
+#define IRQ_ACC_BACKOFF_MUX 1000
+
+/* Modem channel configuration table
+ * Always reserve element zero for flash channel.
+ */
+static struct ipc_chnl_cfg modem_cfg[] = {
+ /* IP Mux */
+ { IPC_MEM_IP_CHL_ID_0, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1,
+ IPC_MEM_MAX_TDS_MUX_LITE_UL, IPC_MEM_MAX_TDS_MUX_LITE_DL,
+ IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE, WWAN_PORT_UNKNOWN },
+ /* RPC - 0 */
+ { IPC_MEM_CTRL_CHL_ID_1, IPC_MEM_PIPE_2, IPC_MEM_PIPE_3,
+ IPC_MEM_MAX_TDS_RPC, IPC_MEM_MAX_TDS_RPC,
+ IPC_MEM_MAX_DL_RPC_BUF_SIZE, WWAN_PORT_UNKNOWN },
+ /* IAT0 */
+ { IPC_MEM_CTRL_CHL_ID_2, IPC_MEM_PIPE_4, IPC_MEM_PIPE_5,
+ IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
+ WWAN_PORT_AT },
+ /* Trace */
+ { IPC_MEM_CTRL_CHL_ID_3, IPC_MEM_PIPE_6, IPC_MEM_PIPE_7,
+ IPC_MEM_TDS_TRC, IPC_MEM_TDS_TRC, IPC_MEM_MAX_DL_TRC_BUF_SIZE,
+ WWAN_PORT_UNKNOWN },
+ /* IAT1 */
+ { IPC_MEM_CTRL_CHL_ID_4, IPC_MEM_PIPE_8, IPC_MEM_PIPE_9,
+ IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
+ WWAN_PORT_AT },
+ /* Loopback */
+ { IPC_MEM_CTRL_CHL_ID_5, IPC_MEM_PIPE_10, IPC_MEM_PIPE_11,
+ IPC_MEM_MAX_TDS_LOOPBACK, IPC_MEM_MAX_TDS_LOOPBACK,
+ IPC_MEM_MAX_DL_LOOPBACK_SIZE, WWAN_PORT_UNKNOWN },
+ /* MBIM Channel */
+ { IPC_MEM_CTRL_CHL_ID_6, IPC_MEM_PIPE_12, IPC_MEM_PIPE_13,
+ IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_TDS_MBIM,
+ IPC_MEM_MAX_DL_MBIM_BUF_SIZE, WWAN_PORT_MBIM },
+};
+
+int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
+{
+ int array_size = ARRAY_SIZE(modem_cfg);
+
+ if (index >= array_size) {
+ pr_err("index: %d and array_size %d", index, array_size);
+ return -ECHRNG;
+ }
+
+ if (index == IPC_MEM_MUX_IP_CH_IF_ID)
+ chnl_cfg->accumulation_backoff = IRQ_ACC_BACKOFF_MUX;
+ else
+ chnl_cfg->accumulation_backoff = IRQ_ACC_BACKOFF_OFF;
+
+ chnl_cfg->ul_nr_of_entries = modem_cfg[index].ul_nr_of_entries;
+ chnl_cfg->dl_nr_of_entries = modem_cfg[index].dl_nr_of_entries;
+ chnl_cfg->dl_buf_size = modem_cfg[index].dl_buf_size;
+ chnl_cfg->id = modem_cfg[index].id;
+ chnl_cfg->ul_pipe = modem_cfg[index].ul_pipe;
+ chnl_cfg->dl_pipe = modem_cfg[index].dl_pipe;
+ chnl_cfg->wwan_port_type = modem_cfg[index].wwan_port_type;
+
+ return 0;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
new file mode 100644
index 000000000000..422471367f78
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation
+ */
+
+#ifndef IOSM_IPC_CHNL_CFG_H
+#define IOSM_IPC_CHNL_CFG_H
+
+#include "iosm_ipc_mux.h"
+
+/* Number of TDs on the trace channel */
+#define IPC_MEM_TDS_TRC 32
+
+/* Trace channel TD buffer size. */
+#define IPC_MEM_MAX_DL_TRC_BUF_SIZE 8192
+
+/* Channel ID */
+enum ipc_channel_id {
+ IPC_MEM_IP_CHL_ID_0 = 0,
+ IPC_MEM_CTRL_CHL_ID_1,
+ IPC_MEM_CTRL_CHL_ID_2,
+ IPC_MEM_CTRL_CHL_ID_3,
+ IPC_MEM_CTRL_CHL_ID_4,
+ IPC_MEM_CTRL_CHL_ID_5,
+ IPC_MEM_CTRL_CHL_ID_6,
+};
+
+/**
+ * struct ipc_chnl_cfg - IPC channel configuration structure
+ * @id: Interface ID
+ * @ul_pipe: Uplink datastream
+ * @dl_pipe: Downlink datastream
+ * @ul_nr_of_entries: Number of Transfer descriptor uplink pipe
+ * @dl_nr_of_entries: Number of Transfer descriptor downlink pipe
+ * @dl_buf_size: Downlink buffer size
+ * @wwan_port_type: Wwan subsystem port type
+ * @accumulation_backoff: Time in usec for data accumalation
+ */
+struct ipc_chnl_cfg {
+ u32 id;
+ u32 ul_pipe;
+ u32 dl_pipe;
+ u32 ul_nr_of_entries;
+ u32 dl_nr_of_entries;
+ u32 dl_buf_size;
+ u32 wwan_port_type;
+ u32 accumulation_backoff;
+};
+
+/**
+ * ipc_chnl_cfg_get - Get pipe configuration.
+ * @chnl_cfg: Array of ipc_chnl_cfg struct
+ * @index: Channel index (upto MAX_CHANNELS)
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
new file mode 100644
index 000000000000..9f00e36b7f79
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_port.h"
+
+/* Check the wwan ips if it is valid with Channel as input. */
+static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
+{
+ if (chnl)
+ return chnl->ctype == IPC_CTYPE_WWAN &&
+ chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
+ return false;
+}
+
+static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
+{
+ union ipc_msg_prep_args prep_args = {
+ .sleep.target = 1,
+ .sleep.state = state,
+ };
+
+ ipc_imem->device_sleep = state;
+
+ return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_SLEEP, &prep_args, NULL);
+}
+
+static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ /* limit max. nr of entries */
+ if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
+ return false;
+
+ return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
+}
+
+/* This timer handler will retry DL buff allocation if a pipe has no free buf
+ * and gives doorbell if TD is available
+ */
+static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ bool new_buffers_available = false;
+ bool retry_allocation = false;
+ int i;
+
+ for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
+ struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
+
+ if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
+ continue;
+
+ while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
+ new_buffers_available = true;
+
+ if (pipe->nr_of_queued_entries == 0)
+ retry_allocation = true;
+ }
+
+ if (new_buffers_available)
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_DL_PROCESS);
+
+ if (retry_allocation) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->td_alloc_timer))
+ hrtimer_start(&ipc_imem->td_alloc_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+ return 0;
+}
+
+static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, td_alloc_timer);
+ /* Post an async tasklet event to trigger HP update Doorbell */
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
+ 0, false);
+ return HRTIMER_NORESTART;
+}
+
+/* Fast update timer tasklet handler to trigger HP update */
+static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_FAST_TD_UPD_TMR);
+
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, fast_update_timer);
+ /* Post an async tasklet event to trigger HP update Doorbell */
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
+static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
+ struct ipc_mux_config *cfg)
+{
+ ipc_mmio_update_cp_capability(ipc_imem->mmio);
+
+ if (!ipc_imem->mmio->has_mux_lite) {
+ dev_err(ipc_imem->dev, "Failed to get Mux capability.");
+ return -EINVAL;
+ }
+
+ cfg->protocol = MUX_LITE;
+
+ cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
+ MUX_UL_ON_CREDITS :
+ MUX_UL;
+
+ /* The instance ID is same as channel ID because this is been reused
+ * for channel alloc function.
+ */
+ cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
+ cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
+
+ return 0;
+}
+
+void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
+ unsigned int reset_enable, bool atomic_ctx)
+{
+ union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
+ reset_enable };
+
+ if (atomic_ctx)
+ ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_FEATURE_SET, &prep_args,
+ NULL);
+ else
+ ipc_protocol_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_FEATURE_SET, &prep_args);
+}
+
+void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
+{
+ /* Use the TD update timer only in the runtime phase */
+ if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
+ /* trigger the doorbell irq on CP directly. */
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_TD_UPD_TMR_START);
+ return;
+ }
+
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer))
+ hrtimer_start(&ipc_imem->tdupdate_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
+void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
+{
+ if (hrtimer_active(hr_timer))
+ hrtimer_cancel(hr_timer);
+}
+
+bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+ struct sk_buff_head *ul_list;
+ bool hpda_pending = false;
+ bool forced_hpdu = false;
+ struct ipc_pipe *pipe;
+ int i;
+
+ /* Analyze the uplink pipe of all active channels. */
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ channel = &ipc_imem->channels[i];
+
+ if (channel->state != IMEM_CHANNEL_ACTIVE)
+ continue;
+
+ pipe = &channel->ul_pipe;
+
+ /* Get the reference to the skbuf accumulator list. */
+ ul_list = &channel->ul_list;
+
+ /* Fill the transfer descriptor with the uplink buffer info. */
+ hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+ pipe, ul_list);
+
+ /* forced HP update needed for non data channels */
+ if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
+ forced_hpdu = true;
+ }
+
+ if (forced_hpdu) {
+ hpda_pending = false;
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_UL_WRITE_TD);
+ }
+
+ return hpda_pending;
+}
+
+void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
+{
+ int timeout = IPC_MODEM_BOOT_TIMEOUT;
+
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
+
+ /* Trigger the CP interrupt to enter the init state. */
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_INIT);
+ /* Wait for the CP update. */
+ do {
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ ipc_imem->ipc_requested_state) {
+ /* Prepare the MMIO space */
+ ipc_mmio_config(ipc_imem->mmio);
+
+ /* Trigger the CP irq to enter the running state. */
+ ipc_imem->ipc_requested_state =
+ IPC_MEM_DEVICE_IPC_RUNNING;
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_RUNNING);
+
+ return;
+ }
+ msleep(20);
+ } while (--timeout);
+
+ /* timeout */
+ dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
+ ipc_imem_phase_get_string(ipc_imem->phase),
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
+}
+
+/* Analyze the packet type and distribute it. */
+static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe, struct sk_buff *skb)
+{
+ u16 port_id;
+
+ if (!skb)
+ return;
+
+ /* An AT/control or IP packet is expected. */
+ switch (pipe->channel->ctype) {
+ case IPC_CTYPE_CTRL:
+ port_id = pipe->channel->channel_id;
+
+ /* Pass the packet to the wwan layer. */
+ wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb);
+ break;
+
+ case IPC_CTYPE_WWAN:
+ if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
+ ipc_mux_dl_decode(ipc_imem->mux, skb);
+ break;
+ default:
+ dev_err(ipc_imem->dev, "Invalid channel type");
+ break;
+ }
+}
+
+/* Process the downlink data and pass them to the char or net layer. */
+static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ s32 cnt = 0, processed_td_cnt = 0;
+ struct ipc_mem_channel *channel;
+ u32 head = 0, tail = 0;
+ bool processed = false;
+ struct sk_buff *skb;
+
+ channel = pipe->channel;
+
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
+ &tail);
+ if (pipe->old_tail != tail) {
+ if (pipe->old_tail < tail)
+ cnt = tail - pipe->old_tail;
+ else
+ cnt = pipe->nr_of_entries - pipe->old_tail + tail;
+ }
+
+ processed_td_cnt = cnt;
+
+ /* Seek for pipes with pending DL data. */
+ while (cnt--) {
+ skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
+
+ /* Analyze the packet type and distribute it. */
+ ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
+ }
+
+ /* try to allocate new empty DL SKbs from head..tail - 1*/
+ while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
+ processed = true;
+
+ if (processed && !ipc_imem_check_wwan_ips(channel)) {
+ /* Force HP update for non IP channels */
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_DL_PROCESS);
+ processed = false;
+
+ /* If Fast Update timer is already running then stop */
+ ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
+ }
+
+ /* Any control channel process will get immediate HP update.
+ * Start Fast update timer only for IP channel if all the TDs were
+ * used in last process.
+ */
+ if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ hrtimer_start(&ipc_imem->fast_update_timer,
+ ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
+ }
+
+ if (ipc_imem->app_notify_dl_pend)
+ complete(&ipc_imem->dl_pend_sem);
+}
+
+/* process open uplink pipe */
+static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_mem_channel *channel;
+ u32 tail = 0, head = 0;
+ struct sk_buff *skb;
+ s32 cnt = 0;
+
+ channel = pipe->channel;
+
+ /* Get the internal phase. */
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
+ &tail);
+
+ if (pipe->old_tail != tail) {
+ if (pipe->old_tail < tail)
+ cnt = tail - pipe->old_tail;
+ else
+ cnt = pipe->nr_of_entries - pipe->old_tail + tail;
+ }
+
+ /* Free UL buffers. */
+ while (cnt--) {
+ skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
+
+ if (!skb)
+ continue;
+
+ /* If the user app was suspended in uplink direction - blocking
+ * write, resume it.
+ */
+ if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
+ complete(&channel->ul_sem);
+
+ /* Free the skbuf element. */
+ if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
+ if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
+ ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
+ else
+ dev_err(ipc_imem->dev,
+ "OP Type is UL_MUX, unknown if_id %d",
+ channel->if_id);
+ } else {
+ ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
+ }
+ }
+
+ /* Trace channel stats for IP UL pipe. */
+ if (ipc_imem_check_wwan_ips(pipe->channel))
+ ipc_mux_check_n_restart_tx(ipc_imem->mux);
+
+ if (ipc_imem->app_notify_ul_pend)
+ complete(&ipc_imem->ul_pend_sem);
+}
+
+/* Executes the irq. */
+static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+
+ if (ipc_imem->flash_channel_id < 0) {
+ ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL;
+ dev_err(ipc_imem->dev, "Missing flash app:%d",
+ ipc_imem->flash_channel_id);
+ return;
+ }
+
+ ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
+
+ /* Wake up the flash app to continue or to terminate depending
+ * on the CP ROM exit code.
+ */
+ channel = &ipc_imem->channels[ipc_imem->flash_channel_id];
+ complete(&channel->ul_sem);
+}
+
+/* Execute the UL bundle timer actions, generating the doorbell irq. */
+static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_TD_UPD_TMR);
+ return 0;
+}
+
+/* Consider link power management in the runtime phase. */
+static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
+{
+ /* link will go down, Test pending UL packets.*/
+ if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
+ hrtimer_active(&ipc_imem->tdupdate_timer)) {
+ /* Generate the doorbell irq. */
+ ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
+ /* Stop the TD update timer. */
+ ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
+ /* Stop the fast update timer. */
+ ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
+ }
+}
+
+/* Execute startup timer and wait for delayed start (e.g. NAND) */
+static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ /* Update & check the current operation phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
+ return -EIO;
+
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_UNINIT) {
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
+
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_INIT);
+
+ ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
+ /* reduce period to 100 ms to check for mmio init state */
+ if (!hrtimer_active(&ipc_imem->startup_timer))
+ hrtimer_start(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_INIT) {
+ /* Startup complete - disable timer */
+ ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
+
+ /* Prepare the MMIO space */
+ ipc_mmio_config(ipc_imem->mmio);
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_RUNNING);
+ }
+
+ return 0;
+}
+
+static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
+{
+ enum hrtimer_restart result = HRTIMER_NORESTART;
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, startup_timer);
+
+ if (ktime_to_ns(ipc_imem->hrtimer_period)) {
+ hrtimer_forward(&ipc_imem->startup_timer, ktime_get(),
+ ipc_imem->hrtimer_period);
+ result = HRTIMER_RESTART;
+ }
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
+ NULL, 0, false);
+ return result;
+}
+
+/* Get the CP execution stage */
+static enum ipc_mem_exec_stage
+ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
+{
+ return (ipc_imem->phase == IPC_P_RUN &&
+ ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
+ ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
+ ipc_mmio_get_exec_stage(ipc_imem->mmio);
+}
+
+/* Callback to send the modem ready uevent */
+static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ enum ipc_mem_exec_stage exec_stage =
+ ipc_imem_get_exec_stage_buffered(ipc_imem);
+
+ if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
+
+ return 0;
+}
+
+/* This function is executed in a task context via an ipc_worker object,
+ * as the creation or removal of device can't be done from tasklet.
+ */
+static void ipc_imem_run_state_worker(struct work_struct *instance)
+{
+ struct ipc_chnl_cfg chnl_cfg_port = { 0 };
+ struct ipc_mux_config mux_cfg;
+ struct iosm_imem *ipc_imem;
+ u8 ctrl_chl_idx = 0;
+
+ ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
+
+ if (ipc_imem->phase != IPC_P_RUN) {
+ dev_err(ipc_imem->dev,
+ "Modem link down. Exit run state worker.");
+ return;
+ }
+
+ if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
+ ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+
+ ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
+ if (ipc_imem->mux)
+ ipc_imem->mux->wwan = ipc_imem->wwan;
+
+ while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
+ if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
+ ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
+ if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
+ chnl_cfg_port,
+ IRQ_MOD_OFF);
+ ipc_imem->ipc_port[ctrl_chl_idx] =
+ ipc_port_init(ipc_imem, chnl_cfg_port);
+ }
+ }
+ ctrl_chl_idx++;
+ }
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
+ false);
+
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+}
+
+static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
+{
+ enum ipc_mem_device_ipc_state curr_ipc_status;
+ enum ipc_phase old_phase, phase;
+ bool retry_allocation = false;
+ bool ul_pending = false;
+ int ch_id, i;
+
+ if (irq != IMEM_IRQ_DONT_CARE)
+ ipc_imem->ev_irq_pending[irq] = false;
+
+ /* Get the internal phase. */
+ old_phase = ipc_imem->phase;
+
+ if (old_phase == IPC_P_OFF_REQ) {
+ dev_dbg(ipc_imem->dev,
+ "[%s]: Ignoring MSI. Deinit sequence in progress!",
+ ipc_imem_phase_get_string(old_phase));
+ return;
+ }
+
+ /* Update the phase controlled by CP. */
+ phase = ipc_imem_phase_update(ipc_imem);
+
+ switch (phase) {
+ case IPC_P_RUN:
+ if (!ipc_imem->enter_runtime) {
+ /* Excute the transition from flash/boot to runtime. */
+ ipc_imem->enter_runtime = 1;
+
+ /* allow device to sleep, default value is
+ * IPC_HOST_SLEEP_ENTER_SLEEP
+ */
+ ipc_imem_msg_send_device_sleep(ipc_imem,
+ ipc_imem->device_sleep);
+
+ ipc_imem_msg_send_feature_set(ipc_imem,
+ IPC_MEM_INBAND_CRASH_SIG,
+ true);
+ }
+
+ curr_ipc_status =
+ ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
+
+ /* check ipc_status change */
+ if (ipc_imem->ipc_status != curr_ipc_status) {
+ ipc_imem->ipc_status = curr_ipc_status;
+
+ if (ipc_imem->ipc_status ==
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ schedule_work(&ipc_imem->run_state_worker);
+ }
+ }
+
+ /* Consider power management in the runtime phase. */
+ ipc_imem_slp_control_exec(ipc_imem);
+ break; /* Continue with skbuf processing. */
+
+ /* Unexpected phases. */
+ case IPC_P_OFF:
+ case IPC_P_OFF_REQ:
+ dev_err(ipc_imem->dev, "confused phase %s",
+ ipc_imem_phase_get_string(phase));
+ return;
+
+ case IPC_P_PSI:
+ if (old_phase != IPC_P_ROM)
+ break;
+
+ fallthrough;
+ /* On CP the PSI phase is already active. */
+
+ case IPC_P_ROM:
+ /* Before CP ROM driver starts the PSI image, it sets
+ * the exit_code field on the doorbell scratchpad and
+ * triggers the irq.
+ */
+ ipc_imem_rom_irq_exec(ipc_imem);
+ return;
+
+ default:
+ break;
+ }
+
+ /* process message ring */
+ ipc_protocol_msg_process(ipc_imem, irq);
+
+ /* process all open pipes */
+ for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
+ struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
+ struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
+
+ if (dl_pipe->is_open &&
+ (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
+ ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
+
+ if (dl_pipe->nr_of_queued_entries == 0)
+ retry_allocation = true;
+ }
+
+ if (ul_pipe->is_open)
+ ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
+ }
+
+ /* Try to generate new ADB or ADGH. */
+ if (ipc_mux_ul_data_encode(ipc_imem->mux))
+ ipc_imem_td_update_timer_start(ipc_imem);
+
+ /* Continue the send procedure with accumulated SIO or NETIF packets.
+ * Reset the debounce flags.
+ */
+ ul_pending |= ipc_imem_ul_write_td(ipc_imem);
+
+ /* if UL data is pending restart TD update timer */
+ if (ul_pending) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer))
+ hrtimer_start(&ipc_imem->tdupdate_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+
+ /* If CP has executed the transition
+ * from IPC_INIT to IPC_RUNNING in the PSI
+ * phase, wake up the flash app to open the pipes.
+ */
+ if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
+ ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
+ ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_RUNNING &&
+ ipc_imem->flash_channel_id >= 0) {
+ /* Wake up the flash app to open the pipes. */
+ ch_id = ipc_imem->flash_channel_id;
+ complete(&ipc_imem->channels[ch_id].ul_sem);
+ }
+
+ /* Reset the expected CP state. */
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
+
+ if (retry_allocation) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->td_alloc_timer))
+ hrtimer_start(&ipc_imem->td_alloc_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
+/* Callback by tasklet for handling interrupt events. */
+static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size)
+{
+ ipc_imem_handle_irq(ipc_imem, arg);
+
+ return 0;
+}
+
+void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
+{
+ /* start doorbell irq delay timer if UL is pending */
+ if (ipc_imem_ul_write_td(ipc_imem))
+ ipc_imem_td_update_timer_start(ipc_imem);
+}
+
+/* Check the execution stage and update the AP phase */
+static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
+ enum ipc_mem_exec_stage stage)
+{
+ switch (stage) {
+ case IPC_MEM_EXEC_STAGE_BOOT:
+ if (ipc_imem->phase != IPC_P_ROM) {
+ /* Send this event only once */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
+ }
+
+ ipc_imem->phase = IPC_P_ROM;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_PSI:
+ ipc_imem->phase = IPC_P_PSI;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_EBL:
+ ipc_imem->phase = IPC_P_EBL;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_RUN:
+ if (ipc_imem->phase != IPC_P_RUN &&
+ ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
+ }
+ ipc_imem->phase = IPC_P_RUN;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_CRASH:
+ if (ipc_imem->phase != IPC_P_CRASH)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
+
+ ipc_imem->phase = IPC_P_CRASH;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_CD_READY:
+ if (ipc_imem->phase != IPC_P_CD_READY)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
+ ipc_imem->phase = IPC_P_CD_READY;
+ break;
+
+ default:
+ /* unknown exec stage:
+ * assume that link is down and send info to listeners
+ */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
+ break;
+ }
+
+ return ipc_imem->phase;
+}
+
+/* Send msg to device to open pipe */
+static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ union ipc_msg_prep_args prep_args = {
+ .pipe_open.pipe = pipe,
+ };
+
+ if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
+ pipe->is_open = true;
+
+ return pipe->is_open;
+}
+
+/* Allocates the TDs for the given pipe along with firing HP update DB. */
+static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct ipc_pipe *dl_pipe = msg;
+ bool processed = false;
+ int i;
+
+ for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
+ processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
+
+ /* Trigger the doorbell irq to inform CP that new downlink buffers are
+ * available.
+ */
+ if (processed)
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
+
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, tdupdate_timer);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
+/* Get the CP execution state and map it to the AP phase. */
+enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
+{
+ enum ipc_mem_exec_stage exec_stage =
+ ipc_imem_get_exec_stage_buffered(ipc_imem);
+ /* If the CP stage is undef, return the internal precalculated phase. */
+ return ipc_imem->phase == IPC_P_OFF_REQ ?
+ ipc_imem->phase :
+ ipc_imem_phase_update_check(ipc_imem, exec_stage);
+}
+
+const char *ipc_imem_phase_get_string(enum ipc_phase phase)
+{
+ switch (phase) {
+ case IPC_P_RUN:
+ return "A-RUN";
+
+ case IPC_P_OFF:
+ return "A-OFF";
+
+ case IPC_P_ROM:
+ return "A-ROM";
+
+ case IPC_P_PSI:
+ return "A-PSI";
+
+ case IPC_P_EBL:
+ return "A-EBL";
+
+ case IPC_P_CRASH:
+ return "A-CRASH";
+
+ case IPC_P_CD_READY:
+ return "A-CD_READY";
+
+ case IPC_P_OFF_REQ:
+ return "A-OFF_REQ";
+
+ default:
+ return "A-???";
+ }
+}
+
+void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
+{
+ union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
+
+ pipe->is_open = false;
+ ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
+ &prep_args);
+
+ ipc_imem_pipe_cleanup(ipc_imem, pipe);
+}
+
+void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
+{
+ struct ipc_mem_channel *channel;
+
+ if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
+ dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
+ return;
+ }
+
+ channel = &ipc_imem->channels[channel_id];
+
+ if (channel->state == IMEM_CHANNEL_FREE) {
+ dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
+ channel_id, channel->state);
+ return;
+ }
+
+ /* Free only the channel id in the CP power off mode. */
+ if (channel->state == IMEM_CHANNEL_RESERVED)
+ /* Release only the channel id. */
+ goto channel_free;
+
+ if (ipc_imem->phase == IPC_P_RUN) {
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+ }
+
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+
+channel_free:
+ ipc_imem_channel_free(channel);
+}
+
+struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
+ int channel_id, u32 db_id)
+{
+ struct ipc_mem_channel *channel;
+
+ if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
+ dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
+ return NULL;
+ }
+
+ channel = &ipc_imem->channels[channel_id];
+
+ channel->state = IMEM_CHANNEL_ACTIVE;
+
+ if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
+ goto ul_pipe_err;
+
+ if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
+ goto dl_pipe_err;
+
+ /* Allocate the downlink buffers in tasklet context. */
+ if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
+ &channel->dl_pipe, 0, false)) {
+ dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
+ goto task_failed;
+ }
+
+ /* Active channel. */
+ return channel;
+task_failed:
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+dl_pipe_err:
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ul_pipe_err:
+ ipc_imem_channel_free(channel);
+ return NULL;
+}
+
+void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
+{
+ ipc_protocol_suspend(ipc_imem->ipc_protocol);
+}
+
+void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
+{
+ ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
+}
+
+void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
+{
+ enum ipc_mem_exec_stage stage;
+
+ if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ ipc_imem_phase_update_check(ipc_imem, stage);
+ }
+}
+
+void ipc_imem_channel_free(struct ipc_mem_channel *channel)
+{
+ /* Reset dynamic channel elements. */
+ channel->state = IMEM_CHANNEL_FREE;
+}
+
+int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
+ enum ipc_ctype ctype)
+{
+ struct ipc_mem_channel *channel;
+ int i;
+
+ /* Find channel of given type/index */
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ channel = &ipc_imem->channels[i];
+ if (channel->ctype == ctype && channel->index == index)
+ break;
+ }
+
+ if (i >= ipc_imem->nr_of_channels) {
+ dev_dbg(ipc_imem->dev,
+ "no channel definition for index=%d ctype=%d", index,
+ ctype);
+ return -ECHRNG;
+ }
+
+ if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
+ dev_dbg(ipc_imem->dev, "channel is in use");
+ return -EBUSY;
+ }
+
+ if (channel->ctype == IPC_CTYPE_WWAN &&
+ index == IPC_MEM_MUX_IP_CH_IF_ID)
+ channel->if_id = index;
+
+ channel->channel_id = index;
+ channel->state = IMEM_CHANNEL_RESERVED;
+
+ return i;
+}
+
+void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
+{
+ struct ipc_mem_channel *channel;
+
+ if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
+ chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
+ dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
+ chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
+ return;
+ }
+
+ if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
+ dev_err(ipc_imem->dev, "too many channels");
+ return;
+ }
+
+ channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
+ channel->channel_id = ipc_imem->nr_of_channels;
+ channel->ctype = ctype;
+ channel->index = chnl_cfg.id;
+ channel->net_err_count = 0;
+ channel->state = IMEM_CHANNEL_FREE;
+ ipc_imem->nr_of_channels++;
+
+ ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ skb_queue_head_init(&channel->ul_list);
+
+ init_completion(&channel->ul_sem);
+}
+
+void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
+{
+ struct ipc_mem_channel *channel;
+
+ if (id < 0 || id >= ipc_imem->nr_of_channels) {
+ dev_err(ipc_imem->dev, "invalid channel id %d", id);
+ return;
+ }
+
+ channel = &ipc_imem->channels[id];
+
+ if (channel->state != IMEM_CHANNEL_FREE &&
+ channel->state != IMEM_CHANNEL_RESERVED) {
+ dev_err(ipc_imem->dev, "invalid channel state %d",
+ channel->state);
+ return;
+ }
+
+ channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
+ channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
+ channel->ul_pipe.is_open = false;
+ channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
+ channel->ul_pipe.channel = channel;
+ channel->ul_pipe.dir = IPC_MEM_DIR_UL;
+ channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
+ channel->ul_pipe.irq_moderation = irq_moderation;
+ channel->ul_pipe.buf_size = 0;
+
+ channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
+ channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
+ channel->dl_pipe.is_open = false;
+ channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
+ channel->dl_pipe.channel = channel;
+ channel->dl_pipe.dir = IPC_MEM_DIR_DL;
+ channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
+ channel->dl_pipe.irq_moderation = irq_moderation;
+ channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
+}
+
+static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
+{
+ int i;
+
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ struct ipc_mem_channel *channel;
+
+ channel = &ipc_imem->channels[i];
+
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+
+ ipc_imem_channel_free(channel);
+ }
+}
+
+void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
+{
+ struct sk_buff *skb;
+
+ /* Force pipe to closed state also when not explicitly closed through
+ * ipc_imem_pipe_close()
+ */
+ pipe->is_open = false;
+
+ /* Empty the uplink skb accumulator. */
+ while ((skb = skb_dequeue(&pipe->channel->ul_list)))
+ ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
+
+ ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
+}
+
+/* Send IPC protocol uninit to the modem when Link is active. */
+static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
+{
+ int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
+ enum ipc_mem_device_ipc_state ipc_state;
+
+ /* When PCIe link is up set IPC_UNINIT
+ * of the modem otherwise ignore it when PCIe link down happens.
+ */
+ if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
+ /* set modem to UNINIT
+ * (in case we want to reload the AP driver without resetting
+ * the modem)
+ */
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_UNINIT);
+ ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
+
+ /* Wait for maximum 30ms to allow the Modem to uninitialize the
+ * protocol.
+ */
+ while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
+ (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
+ (timeout > 0)) {
+ usleep_range(1000, 1250);
+ timeout--;
+ ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
+ }
+ }
+}
+
+void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
+{
+ ipc_imem->phase = IPC_P_OFF_REQ;
+
+ /* forward MDM_NOT_READY to listeners */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
+
+ hrtimer_cancel(&ipc_imem->td_alloc_timer);
+ hrtimer_cancel(&ipc_imem->tdupdate_timer);
+ hrtimer_cancel(&ipc_imem->fast_update_timer);
+ hrtimer_cancel(&ipc_imem->startup_timer);
+
+ /* cancel the workqueue */
+ cancel_work_sync(&ipc_imem->run_state_worker);
+
+ if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
+ ipc_mux_deinit(ipc_imem->mux);
+ ipc_wwan_deinit(ipc_imem->wwan);
+ ipc_port_deinit(ipc_imem->ipc_port);
+ }
+
+ ipc_imem_device_ipc_uninit(ipc_imem);
+ ipc_imem_channel_reset(ipc_imem);
+
+ ipc_protocol_deinit(ipc_imem->ipc_protocol);
+ ipc_task_deinit(ipc_imem->ipc_task);
+
+ kfree(ipc_imem->ipc_task);
+ kfree(ipc_imem->mmio);
+
+ ipc_imem->phase = IPC_P_OFF;
+}
+
+/* After CP has unblocked the PCIe link, save the start address of the doorbell
+ * scratchpad and prepare the shared memory region. If the flashing to RAM
+ * procedure shall be executed, copy the chip information from the doorbell
+ * scratchtpad to the application buffer and wake up the flash app.
+ */
+static int ipc_imem_config(struct iosm_imem *ipc_imem)
+{
+ enum ipc_phase phase;
+
+ /* Initialize the semaphore for the blocking read UL/DL transfer. */
+ init_completion(&ipc_imem->ul_pend_sem);
+
+ init_completion(&ipc_imem->dl_pend_sem);
+
+ /* clear internal flags */
+ ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
+ ipc_imem->enter_runtime = 0;
+
+ phase = ipc_imem_phase_update(ipc_imem);
+
+ /* Either CP shall be in the power off or power on phase. */
+ switch (phase) {
+ case IPC_P_ROM:
+ ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
+ /* poll execution stage (for delayed start, e.g. NAND) */
+ if (!hrtimer_active(&ipc_imem->startup_timer))
+ hrtimer_start(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ return 0;
+
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ case IPC_P_RUN:
+ /* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
+
+ /* Verify the exepected initial state. */
+ if (ipc_imem->ipc_requested_state ==
+ ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
+ ipc_imem_ipc_init_check(ipc_imem);
+
+ return 0;
+ }
+ dev_err(ipc_imem->dev,
+ "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+ break;
+ case IPC_P_CRASH:
+ case IPC_P_CD_READY:
+ dev_dbg(ipc_imem->dev,
+ "Modem is in phase %d, reset Modem to collect CD",
+ phase);
+ return 0;
+ default:
+ dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
+ break;
+ }
+
+ complete(&ipc_imem->dl_pend_sem);
+ complete(&ipc_imem->ul_pend_sem);
+ ipc_imem->phase = IPC_P_OFF;
+ return -EIO;
+}
+
+/* Pass the dev ptr to the shared memory driver and request the entry points */
+struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
+ void __iomem *mmio, struct device *dev)
+{
+ struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
+
+ if (!ipc_imem)
+ return NULL;
+
+ /* Save the device address. */
+ ipc_imem->pcie = pcie;
+ ipc_imem->dev = dev;
+
+ ipc_imem->pci_device_id = device_id;
+
+ ipc_imem->ev_cdev_write_pending = false;
+ ipc_imem->cp_version = 0;
+ ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
+
+ /* Reset the flash channel id. */
+ ipc_imem->flash_channel_id = -1;
+
+ /* Reset the max number of configured channels */
+ ipc_imem->nr_of_channels = 0;
+
+ /* allocate IPC MMIO */
+ ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
+ if (!ipc_imem->mmio) {
+ dev_err(ipc_imem->dev, "failed to initialize mmio region");
+ goto mmio_init_fail;
+ }
+
+ ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
+ GFP_KERNEL);
+
+ /* Create tasklet for event handling*/
+ if (!ipc_imem->ipc_task)
+ goto ipc_task_fail;
+
+ if (ipc_task_init(ipc_imem->ipc_task))
+ goto ipc_task_init_fail;
+
+ ipc_imem->ipc_task->dev = ipc_imem->dev;
+
+ INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
+
+ ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
+
+ if (!ipc_imem->ipc_protocol)
+ goto protocol_init_fail;
+
+ /* The phase is set to power off. */
+ ipc_imem->phase = IPC_P_OFF;
+
+ hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
+
+ hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
+
+ hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
+
+ hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+
+ if (ipc_imem_config(ipc_imem)) {
+ dev_err(ipc_imem->dev, "failed to initialize the imem");
+ goto imem_config_fail;
+ }
+
+ return ipc_imem;
+
+imem_config_fail:
+ hrtimer_cancel(&ipc_imem->td_alloc_timer);
+ hrtimer_cancel(&ipc_imem->fast_update_timer);
+ hrtimer_cancel(&ipc_imem->tdupdate_timer);
+ hrtimer_cancel(&ipc_imem->startup_timer);
+protocol_init_fail:
+ cancel_work_sync(&ipc_imem->run_state_worker);
+ ipc_task_deinit(ipc_imem->ipc_task);
+ipc_task_init_fail:
+ kfree(ipc_imem->ipc_task);
+ipc_task_fail:
+ kfree(ipc_imem->mmio);
+mmio_init_fail:
+ kfree(ipc_imem);
+ return NULL;
+}
+
+void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
+{
+ /* Debounce IPC_EV_IRQ. */
+ if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
+ ipc_imem->ev_irq_pending[irq] = true;
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
+ NULL, 0, false);
+ }
+}
+
+void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
+{
+ ipc_imem->td_update_timer_suspended = suspend;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
new file mode 100644
index 000000000000..0d2f10e4cbc8
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -0,0 +1,579 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IMEM_H
+#define IOSM_IPC_IMEM_H
+
+#include <linux/skbuff.h>
+#include <stdbool.h>
+
+#include "iosm_ipc_mmio.h"
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_uevent.h"
+#include "iosm_ipc_wwan.h"
+#include "iosm_ipc_task_queue.h"
+
+struct ipc_chnl_cfg;
+
+/* IRQ moderation in usec */
+#define IRQ_MOD_OFF 0
+#define IRQ_MOD_NET 1000
+#define IRQ_MOD_TRC 4000
+
+/* Either the PSI image is accepted by CP or the suspended flash tool is waken,
+ * informed that the CP ROM driver is not ready to process the PSI image.
+ * unit : milliseconds
+ */
+#define IPC_PSI_TRANSFER_TIMEOUT 3000
+
+/* Timeout in 20 msec to wait for the modem to boot up to
+ * IPC_MEM_DEVICE_IPC_INIT state.
+ * unit : milliseconds (500 * ipc_util_msleep(20))
+ */
+#define IPC_MODEM_BOOT_TIMEOUT 500
+
+/* Wait timeout for ipc status reflects IPC_MEM_DEVICE_IPC_UNINIT
+ * unit : milliseconds
+ */
+#define IPC_MODEM_UNINIT_TIMEOUT_MS 30
+
+/* Pending time for processing data.
+ * unit : milliseconds
+ */
+#define IPC_PEND_DATA_TIMEOUT 500
+
+/* The timeout in milliseconds for application to wait for remote time. */
+#define IPC_REMOTE_TS_TIMEOUT_MS 10
+
+/* Timeout for TD allocation retry.
+ * unit : milliseconds
+ */
+#define IPC_TD_ALLOC_TIMER_PERIOD_MS 100
+
+/* Host sleep target is host */
+#define IPC_HOST_SLEEP_HOST 0
+
+/* Host sleep target is device */
+#define IPC_HOST_SLEEP_DEVICE 1
+
+/* Sleep message, target host: AP enters sleep / target device: CP is
+ * allowed to enter sleep and shall use the host sleep protocol
+ */
+#define IPC_HOST_SLEEP_ENTER_SLEEP 0
+
+/* Sleep_message, target host: AP exits sleep / target device: CP is
+ * NOT allowed to enter sleep
+ */
+#define IPC_HOST_SLEEP_EXIT_SLEEP 1
+
+#define IMEM_IRQ_DONT_CARE (-1)
+
+#define IPC_MEM_MAX_CHANNELS 7
+
+#define IPC_MEM_MUX_IP_SESSION_ENTRIES 8
+
+#define IPC_MEM_MUX_IP_CH_IF_ID 0
+
+#define TD_UPDATE_DEFAULT_TIMEOUT_USEC 1900
+
+#define FORCE_UPDATE_DEFAULT_TIMEOUT_USEC 500
+
+/* Sleep_message, target host: not applicable / target device: CP is
+ * allowed to enter sleep and shall NOT use the device sleep protocol
+ */
+#define IPC_HOST_SLEEP_ENTER_SLEEP_NO_PROTOCOL 2
+
+/* in_band_crash_signal IPC_MEM_INBAND_CRASH_SIG
+ * Modem crash notification configuration. If this value is non-zero then
+ * FEATURE_SET message will be sent to the Modem as a result the Modem will
+ * signal Crash via Execution Stage register. If this value is zero then Modem
+ * will use out-of-band method to notify about it's Crash.
+ */
+#define IPC_MEM_INBAND_CRASH_SIG 1
+
+/* Extra headroom to be allocated for DL SKBs to allow addition of Ethernet
+ * header
+ */
+#define IPC_MEM_DL_ETH_OFFSET 16
+
+#define IPC_CB(skb) ((struct ipc_skb_cb *)((skb)->cb))
+
+#define FULLY_FUNCTIONAL 0
+
+/* List of the supported UL/DL pipes. */
+enum ipc_mem_pipes {
+ IPC_MEM_PIPE_0 = 0,
+ IPC_MEM_PIPE_1,
+ IPC_MEM_PIPE_2,
+ IPC_MEM_PIPE_3,
+ IPC_MEM_PIPE_4,
+ IPC_MEM_PIPE_5,
+ IPC_MEM_PIPE_6,
+ IPC_MEM_PIPE_7,
+ IPC_MEM_PIPE_8,
+ IPC_MEM_PIPE_9,
+ IPC_MEM_PIPE_10,
+ IPC_MEM_PIPE_11,
+ IPC_MEM_PIPE_12,
+ IPC_MEM_PIPE_13,
+ IPC_MEM_PIPE_14,
+ IPC_MEM_PIPE_15,
+ IPC_MEM_PIPE_16,
+ IPC_MEM_PIPE_17,
+ IPC_MEM_PIPE_18,
+ IPC_MEM_PIPE_19,
+ IPC_MEM_PIPE_20,
+ IPC_MEM_PIPE_21,
+ IPC_MEM_PIPE_22,
+ IPC_MEM_PIPE_23,
+ IPC_MEM_MAX_PIPES
+};
+
+/* Enum defining channel states. */
+enum ipc_channel_state {
+ IMEM_CHANNEL_FREE,
+ IMEM_CHANNEL_RESERVED,
+ IMEM_CHANNEL_ACTIVE,
+ IMEM_CHANNEL_CLOSING,
+};
+
+/* Time Unit */
+enum ipc_time_unit {
+ IPC_SEC = 0,
+ IPC_MILLI_SEC = 1,
+ IPC_MICRO_SEC = 2,
+ IPC_NANO_SEC = 3,
+ IPC_PICO_SEC = 4,
+ IPC_FEMTO_SEC = 5,
+ IPC_ATTO_SEC = 6,
+};
+
+/**
+ * enum ipc_ctype - Enum defining supported channel type needed for control
+ * /IP traffic.
+ * @IPC_CTYPE_WWAN: Used for IP traffic
+ * @IPC_CTYPE_CTRL: Used for Control Communication
+ */
+enum ipc_ctype {
+ IPC_CTYPE_WWAN,
+ IPC_CTYPE_CTRL,
+};
+
+/* Pipe direction. */
+enum ipc_mem_pipe_dir {
+ IPC_MEM_DIR_UL,
+ IPC_MEM_DIR_DL,
+};
+
+/* HP update identifier. To be used as data for ipc_cp_irq_hpda_update() */
+enum ipc_hp_identifier {
+ IPC_HP_MR = 0,
+ IPC_HP_PM_TRIGGER,
+ IPC_HP_WAKEUP_SPEC_TMR,
+ IPC_HP_TD_UPD_TMR_START,
+ IPC_HP_TD_UPD_TMR,
+ IPC_HP_FAST_TD_UPD_TMR,
+ IPC_HP_UL_WRITE_TD,
+ IPC_HP_DL_PROCESS,
+ IPC_HP_NET_CHANNEL_INIT,
+ IPC_HP_CDEV_OPEN,
+};
+
+/**
+ * struct ipc_pipe - Structure for Pipe.
+ * @tdr_start: Ipc private protocol Transfer Descriptor Ring
+ * @channel: Id of the sio device, set by imem_sio_open,
+ * needed to pass DL char to the user terminal
+ * @skbr_start: Circular buffer for skbuf and the buffer
+ * reference in a tdr_start entry.
+ * @phy_tdr_start: Transfer descriptor start address
+ * @old_head: last head pointer reported to CP.
+ * @old_tail: AP read position before CP moves the read
+ * position to write/head. If CP has consumed the
+ * buffers, AP has to freed the skbuf starting at
+ * tdr_start[old_tail].
+ * @nr_of_entries: Number of elements of skb_start and tdr_start.
+ * @max_nr_of_queued_entries: Maximum number of queued entries in TDR
+ * @accumulation_backoff: Accumulation in usec for accumulation
+ * backoff (0 = no acc backoff)
+ * @irq_moderation: timer in usec for irq_moderation
+ * (0=no irq moderation)
+ * @pipe_nr: Pipe identification number
+ * @irq: Interrupt vector
+ * @dir: Direction of data stream in pipe
+ * @td_tag: Unique tag of the buffer queued
+ * @buf_size: Buffer size (in bytes) for preallocated
+ * buffers (for DL pipes)
+ * @nr_of_queued_entries: Aueued number of entries
+ * @is_open: Check for open pipe status
+ */
+struct ipc_pipe {
+ struct ipc_protocol_td *tdr_start;
+ struct ipc_mem_channel *channel;
+ struct sk_buff **skbr_start;
+ dma_addr_t phy_tdr_start;
+ u32 old_head;
+ u32 old_tail;
+ u32 nr_of_entries;
+ u32 max_nr_of_queued_entries;
+ u32 accumulation_backoff;
+ u32 irq_moderation;
+ u32 pipe_nr;
+ u32 irq;
+ enum ipc_mem_pipe_dir dir;
+ u32 td_tag;
+ u32 buf_size;
+ u16 nr_of_queued_entries;
+ u8 is_open:1;
+};
+
+/**
+ * struct ipc_mem_channel - Structure for Channel.
+ * @channel_id: Instance of the channel list and is return to the user
+ * at the end of the open operation.
+ * @ctype: Control or netif channel.
+ * @index: unique index per ctype
+ * @ul_pipe: pipe objects
+ * @dl_pipe: pipe objects
+ * @if_id: Interface ID
+ * @net_err_count: Number of downlink errors returned by ipc_wwan_receive
+ * interface at the entry point of the IP stack.
+ * @state: Free, reserved or busy (in use).
+ * @ul_sem: Needed for the blocking write or uplink transfer.
+ * @ul_list: Uplink accumulator which is filled by the uplink
+ * char app or IP stack. The socket buffer pointer are
+ * added to the descriptor list in the kthread context.
+ */
+struct ipc_mem_channel {
+ int channel_id;
+ enum ipc_ctype ctype;
+ int index;
+ struct ipc_pipe ul_pipe;
+ struct ipc_pipe dl_pipe;
+ int if_id;
+ u32 net_err_count;
+ enum ipc_channel_state state;
+ struct completion ul_sem;
+ struct sk_buff_head ul_list;
+};
+
+/**
+ * enum ipc_phase - Different AP and CP phases.
+ * The enums defined after "IPC_P_ROM" and before
+ * "IPC_P_RUN" indicates the operating state where CP can
+ * respond to any requests. So while introducing new phase
+ * this shall be taken into consideration.
+ * @IPC_P_OFF: On host PC, the PCIe device link settings are known
+ * about the combined power on. PC is running, the driver
+ * is loaded and CP is in power off mode. The PCIe bus
+ * driver call the device power mode D3hot. In this phase
+ * the driver the polls the device, until the device is in
+ * the power on state and signals the power mode D0.
+ * @IPC_P_OFF_REQ: The intermediate phase between cleanup activity starts
+ * and ends.
+ * @IPC_P_CRASH: The phase indicating CP crash
+ * @IPC_P_CD_READY: The phase indicating CP core dump is ready
+ * @IPC_P_ROM: After power on, CP starts in ROM mode and the IPC ROM
+ * driver is waiting 150 ms for the AP active notification
+ * saved in the PCI link status register.
+ * @IPC_P_PSI: Primary signed image download phase
+ * @IPC_P_EBL: Extended bootloader pahse
+ * @IPC_P_RUN: The phase after flashing to RAM is the RUNTIME phase.
+ */
+enum ipc_phase {
+ IPC_P_OFF,
+ IPC_P_OFF_REQ,
+ IPC_P_CRASH,
+ IPC_P_CD_READY,
+ IPC_P_ROM,
+ IPC_P_PSI,
+ IPC_P_EBL,
+ IPC_P_RUN,
+};
+
+/**
+ * struct iosm_imem - Current state of the IPC shared memory.
+ * @mmio: mmio instance to access CP MMIO area /
+ * doorbell scratchpad.
+ * @ipc_protocol: IPC Protocol instance
+ * @ipc_task: Task for entry into ipc task queue
+ * @wwan: WWAN device pointer
+ * @mux: IP Data multiplexing state.
+ * @sio: IPC SIO data structure pointer
+ * @ipc_port: IPC PORT data structure pointer
+ * @pcie: IPC PCIe
+ * @dev: Pointer to device structure
+ * @flash_channel_id: Reserved channel id for flashing to RAM.
+ * @ipc_requested_state: Expected IPC state on CP.
+ * @channels: Channel list with UL/DL pipe pairs.
+ * @ipc_status: local ipc_status
+ * @nr_of_channels: number of configured channels
+ * @startup_timer: startup timer for NAND support.
+ * @hrtimer_period: Hr timer period
+ * @tdupdate_timer: Delay the TD update doorbell.
+ * @fast_update_timer: forced head pointer update delay timer.
+ * @td_alloc_timer: Timer for DL pipe TD allocation retry
+ * @rom_exit_code: Mapped boot rom exit code.
+ * @enter_runtime: 1 means the transition to runtime phase was
+ * executed.
+ * @ul_pend_sem: Semaphore to wait/complete of UL TDs
+ * before closing pipe.
+ * @app_notify_ul_pend: Signal app if UL TD is pending
+ * @dl_pend_sem: Semaphore to wait/complete of DL TDs
+ * before closing pipe.
+ * @app_notify_dl_pend: Signal app if DL TD is pending
+ * @phase: Operating phase like runtime.
+ * @pci_device_id: Device ID
+ * @cp_version: CP version
+ * @device_sleep: Device sleep state
+ * @run_state_worker: Pointer to worker component for device
+ * setup operations to be called when modem
+ * reaches RUN state
+ * @ev_irq_pending: 0 means inform the IPC tasklet to
+ * process the irq actions.
+ * @flag: Flag to monitor the state of driver
+ * @td_update_timer_suspended: if true then td update timer suspend
+ * @ev_cdev_write_pending: 0 means inform the IPC tasklet to pass
+ * the accumulated uplink buffers to CP.
+ * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
+ * @reset_det_n: Reset detect flag
+ * @pcie_wake_n: Pcie wake flag
+ */
+struct iosm_imem {
+ struct iosm_mmio *mmio;
+ struct iosm_protocol *ipc_protocol;
+ struct ipc_task *ipc_task;
+ struct iosm_wwan *wwan;
+ struct iosm_mux *mux;
+ struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS];
+ struct iosm_pcie *pcie;
+ struct device *dev;
+ int flash_channel_id;
+ enum ipc_mem_device_ipc_state ipc_requested_state;
+ struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS];
+ u32 ipc_status;
+ u32 nr_of_channels;
+ struct hrtimer startup_timer;
+ ktime_t hrtimer_period;
+ struct hrtimer tdupdate_timer;
+ struct hrtimer fast_update_timer;
+ struct hrtimer td_alloc_timer;
+ enum rom_exit_code rom_exit_code;
+ u32 enter_runtime;
+ struct completion ul_pend_sem;
+ u32 app_notify_ul_pend;
+ struct completion dl_pend_sem;
+ u32 app_notify_dl_pend;
+ enum ipc_phase phase;
+ u16 pci_device_id;
+ int cp_version;
+ int device_sleep;
+ struct work_struct run_state_worker;
+ u8 ev_irq_pending[IPC_IRQ_VECTORS];
+ unsigned long flag;
+ u8 td_update_timer_suspended:1,
+ ev_cdev_write_pending:1,
+ ev_mux_net_transmit_pending:1,
+ reset_det_n:1,
+ pcie_wake_n:1;
+};
+
+/**
+ * ipc_imem_init - Initialize the shared memory region
+ * @pcie: Pointer to core driver data-struct
+ * @device_id: PCI device ID
+ * @mmio: Pointer to the mmio area
+ * @dev: Pointer to device structure
+ *
+ * Returns: Initialized imem pointer on success else NULL
+ */
+struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
+ void __iomem *mmio, struct device *dev);
+
+/**
+ * ipc_imem_pm_s2idle_sleep - Set PM variables to sleep/active for
+ * s2idle sleep/active
+ * @ipc_imem: Pointer to imem data-struct
+ * @sleep: Set PM Variable to sleep/active
+ */
+void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep);
+
+/**
+ * ipc_imem_pm_suspend - The HAL shall ask the shared memory layer
+ * whether D3 is allowed.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_pm_resume - The HAL shall inform the shared memory layer
+ * that the device is active.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_pm_resume(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_cleanup - Inform CP and free the shared memory resources.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_cleanup(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_irq_process - Shift the IRQ actions to the IPC thread.
+ * @ipc_imem: Pointer to imem data-struct
+ * @irq: Irq number
+ */
+void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq);
+
+/**
+ * imem_get_device_sleep_state - Get the device sleep state value.
+ * @ipc_imem: Pointer to imem instance
+ *
+ * Returns: device sleep state
+ */
+int imem_get_device_sleep_state(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_td_update_timer_suspend - Updates the TD Update Timer suspend flag.
+ * @ipc_imem: Pointer to imem data-struct
+ * @suspend: Flag to update. If TRUE then HP update doorbell is triggered to
+ * device without any wait. If FALSE then HP update doorbell is
+ * delayed until timeout.
+ */
+void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend);
+
+/**
+ * ipc_imem_channel_close - Release the channel resources.
+ * @ipc_imem: Pointer to imem data-struct
+ * @channel_id: Channel ID to be cleaned up.
+ */
+void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id);
+
+/**
+ * ipc_imem_channel_alloc - Reserves a channel
+ * @ipc_imem: Pointer to imem data-struct
+ * @index: ID to lookup from the preallocated list.
+ * @ctype: Channel type.
+ *
+ * Returns: Index on success and failure value on error
+ */
+int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
+ enum ipc_ctype ctype);
+
+/**
+ * ipc_imem_channel_open - Establish the pipes.
+ * @ipc_imem: Pointer to imem data-struct
+ * @channel_id: Channel ID returned during alloc.
+ * @db_id: Doorbell ID for trigger identifier.
+ *
+ * Returns: Pointer of ipc_mem_channel on success and NULL on failure.
+ */
+struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
+ int channel_id, u32 db_id);
+
+/**
+ * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not running.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_ul_write_td - Pass the channel UL list to protocol layer for TD
+ * preparation and sending them to the device.
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: TRUE of HP Doorbell trigger is pending. FALSE otherwise.
+ */
+bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_ul_send - Dequeue SKB from channel list and start with
+ * the uplink transfer.If HP Doorbell is pending to be
+ * triggered then starts the TD Update Timer.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_ul_send(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_channel_update - Set or modify pipe config of an existing channel
+ * @ipc_imem: Pointer to imem data-struct
+ * @id: Channel config index
+ * @chnl_cfg: Channel config struct
+ * @irq_moderation: Timer in usec for irq_moderation
+ */
+void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation);
+
+/**
+ * ipc_imem_channel_free -Free an IPC channel.
+ * @channel: Channel to be freed
+ */
+void ipc_imem_channel_free(struct ipc_mem_channel *channel);
+
+/**
+ * ipc_imem_hrtimer_stop - Stop the hrtimer
+ * @hr_timer: Pointer to hrtimer instance
+ */
+void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer);
+
+/**
+ * ipc_imem_pipe_cleanup - Reset volatile pipe content for all channels
+ * @ipc_imem: Pointer to imem data-struct
+ * @pipe: Pipe to cleaned up
+ */
+void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe);
+
+/**
+ * ipc_imem_pipe_close - Send msg to device to close pipe
+ * @ipc_imem: Pointer to imem data-struct
+ * @pipe: Pipe to be closed
+ */
+void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe);
+
+/**
+ * ipc_imem_phase_update - Get the CP execution state
+ * and map it to the AP phase.
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: Current ap updated phase
+ */
+enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_phase_get_string - Return the current operation
+ * phase as string.
+ * @phase: AP phase
+ *
+ * Returns: AP phase string
+ */
+const char *ipc_imem_phase_get_string(enum ipc_phase phase);
+
+/**
+ * ipc_imem_msg_send_feature_set - Send feature set message to modem
+ * @ipc_imem: Pointer to imem data-struct
+ * @reset_enable: 0 = out-of-band, 1 = in-band-crash notification
+ * @atomic_ctx: if disabled call in tasklet context
+ *
+ */
+void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
+ unsigned int reset_enable, bool atomic_ctx);
+
+/**
+ * ipc_imem_ipc_init_check - Send the init event to CP, wait a certain time and
+ * set CP to runtime with the context information
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_channel_init - Initialize the channel list with UL/DL pipe pairs.
+ * @ipc_imem: Pointer to imem data-struct
+ * @ctype: Channel type
+ * @chnl_cfg: Channel configuration struct
+ * @irq_moderation: Timer in usec for irq_moderation
+ */
+void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
new file mode 100644
index 000000000000..46f76e8aae92
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_port.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Open a packet data online channel between the network layer and CP. */
+int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
+{
+ dev_dbg(ipc_imem->dev, "%s if id: %d",
+ ipc_imem_phase_get_string(ipc_imem->phase), if_id);
+
+ /* The network interface is only supported in the runtime phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
+ dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ return -EIO;
+ }
+
+ /* check for the interafce id
+ * if if_id 1 to 8 then create IP MUX channel sessions.
+ * To start MUX session from 0 as network interface id would start
+ * from 1 so map it to if_id = if_id - 1
+ */
+ if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
+ return ipc_mux_open_session(ipc_imem->mux, if_id - 1);
+
+ return -EINVAL;
+}
+
+/* Release a net link to CP. */
+void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id)
+{
+ if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
+ if_id <= IP_MUX_SESSION_END)
+ ipc_mux_close_session(ipc_imem->mux, if_id - 1);
+}
+
+/* Tasklet call to do uplink transfer. */
+static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_imem->ev_cdev_write_pending = false;
+ ipc_imem_ul_send(ipc_imem);
+
+ return 0;
+}
+
+/* Through tasklet to do sio write. */
+static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
+{
+ if (ipc_imem->ev_cdev_write_pending)
+ return -1;
+
+ ipc_imem->ev_cdev_write_pending = true;
+
+ return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
+ NULL, 0, false);
+}
+
+/* Function for transfer UL data */
+int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
+ int if_id, int channel_id, struct sk_buff *skb)
+{
+ int ret = -EINVAL;
+
+ if (!ipc_imem || channel_id < 0)
+ goto out;
+
+ /* Is CP Running? */
+ if (ipc_imem->phase != IPC_P_RUN) {
+ dev_dbg(ipc_imem->dev, "phase %s transmit",
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ ret = -EIO;
+ goto out;
+ }
+
+ if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
+ /* Route the UL packet through IP MUX Layer */
+ ret = ipc_mux_ul_trigger_encode(ipc_imem->mux,
+ if_id - 1, skb);
+ else
+ dev_err(ipc_imem->dev,
+ "invalid if_id %d: ", if_id);
+out:
+ return ret;
+}
+
+/* Initialize wwan channel */
+void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ enum ipc_mux_protocol mux_type)
+{
+ struct ipc_chnl_cfg chnl_cfg = { 0 };
+
+ ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
+
+ /* If modem version is invalid (0xffffffff), do not initialize WWAN. */
+ if (ipc_imem->cp_version == -1) {
+ dev_err(ipc_imem->dev, "invalid CP version");
+ return;
+ }
+
+ ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ /* WWAN registration. */
+ ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
+ if (!ipc_imem->wwan)
+ dev_err(ipc_imem->dev,
+ "failed to register the ipc_wwan interfaces");
+}
+
+/* Map SKB to DMA for transfer */
+static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
+ struct sk_buff *skb)
+{
+ struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
+ char *buf = skb->data;
+ int len = skb->len;
+ dma_addr_t mapping;
+ int ret;
+
+ ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
+
+ if (ret)
+ goto err;
+
+ BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
+
+ IPC_CB(skb)->mapping = mapping;
+ IPC_CB(skb)->direction = DMA_TO_DEVICE;
+ IPC_CB(skb)->len = len;
+ IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
+
+err:
+ return ret;
+}
+
+/* return true if channel is ready for use */
+static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel)
+{
+ enum ipc_phase phase;
+
+ /* Update the current operation phase. */
+ phase = ipc_imem->phase;
+
+ /* Select the operation depending on the execution stage. */
+ switch (phase) {
+ case IPC_P_RUN:
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ break;
+
+ case IPC_P_ROM:
+ /* Prepare the PSI image for the CP ROM driver and
+ * suspend the flash app.
+ */
+ if (channel->state != IMEM_CHANNEL_RESERVED) {
+ dev_err(ipc_imem->dev,
+ "ch[%d]:invalid channel state %d,expected %d",
+ channel->channel_id, channel->state,
+ IMEM_CHANNEL_RESERVED);
+ goto channel_unavailable;
+ }
+ goto channel_available;
+
+ default:
+ /* Ignore uplink actions in all other phases. */
+ dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
+ channel->channel_id, phase);
+ goto channel_unavailable;
+ }
+ /* Check the full availability of the channel. */
+ if (channel->state != IMEM_CHANNEL_ACTIVE) {
+ dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
+ channel->channel_id, channel->state);
+ goto channel_unavailable;
+ }
+
+channel_available:
+ return true;
+
+channel_unavailable:
+ return false;
+}
+
+/* Release a sio link to CP. */
+void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev)
+{
+ struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
+ struct ipc_mem_channel *channel = ipc_cdev->channel;
+ enum ipc_phase curr_phase;
+ int status = 0;
+ u32 tail = 0;
+
+ curr_phase = ipc_imem->phase;
+
+ /* If current phase is IPC_P_OFF or SIO ID is -ve then
+ * channel is already freed. Nothing to do.
+ */
+ if (curr_phase == IPC_P_OFF) {
+ dev_err(ipc_imem->dev,
+ "nothing to do. Current Phase: %s",
+ ipc_imem_phase_get_string(curr_phase));
+ return;
+ }
+
+ if (channel->state == IMEM_CHANNEL_FREE) {
+ dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
+ channel->channel_id, channel->state);
+ return;
+ }
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
+ ipc_imem->app_notify_ul_pend = 1;
+
+ /* Suspend the user app and wait a certain time for processing
+ * UL Data.
+ */
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->ul_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
+ channel->ul_pipe.pipe_nr,
+ channel->ul_pipe.old_head,
+ channel->ul_pipe.old_tail);
+ }
+
+ ipc_imem->app_notify_ul_pend = 0;
+ }
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
+ &channel->dl_pipe, NULL, &tail);
+
+ if (tail != channel->dl_pipe.old_tail) {
+ ipc_imem->app_notify_dl_pend = 1;
+
+ /* Suspend the user app and wait a certain time for processing
+ * DL Data.
+ */
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->dl_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
+ channel->dl_pipe.pipe_nr,
+ channel->dl_pipe.old_head,
+ channel->dl_pipe.old_tail);
+ }
+
+ ipc_imem->app_notify_dl_pend = 0;
+ }
+
+ /* Due to wait for completion in messages, there is a small window
+ * between closing the pipe and updating the channel is closed. In this
+ * small window there could be HP update from Host Driver. Hence update
+ * the channel state as CLOSING to aviod unnecessary interrupt
+ * towards CP.
+ */
+ channel->state = IMEM_CHANNEL_CLOSING;
+
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+
+ ipc_imem_channel_free(channel);
+}
+
+/* Open a PORT link to CP and return the channel */
+struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
+ int chl_id, int hp_id)
+{
+ struct ipc_mem_channel *channel;
+ int ch_id;
+
+ /* The PORT interface is only supported in the runtime phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
+ dev_err(ipc_imem->dev, "PORT open refused, phase %s",
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ return NULL;
+ }
+
+ ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
+
+ if (ch_id < 0) {
+ dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
+ return NULL;
+ }
+
+ channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
+
+ if (!channel) {
+ dev_err(ipc_imem->dev, "PORT channel id open failed");
+ return NULL;
+ }
+
+ return channel;
+}
+
+/* transfer skb to modem */
+int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
+{
+ struct ipc_mem_channel *channel = ipc_cdev->channel;
+ struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
+ int ret = -EIO;
+
+ if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
+ ipc_imem->phase == IPC_P_OFF_REQ)
+ goto out;
+
+ ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
+
+ if (ret)
+ goto out;
+
+ /* Add skb to the uplink skbuf accumulator. */
+ skb_queue_tail(&channel->ul_list, skb);
+
+ ret = ipc_imem_call_cdev_write(ipc_imem);
+
+ if (ret) {
+ skb_dequeue_tail(&channel->ul_list);
+ dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
+ ipc_cdev->channel->channel_id);
+ }
+out:
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
new file mode 100644
index 000000000000..fd356dafbdd6
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IMEM_OPS_H
+#define IOSM_IPC_IMEM_OPS_H
+
+#include "iosm_ipc_mux_codec.h"
+
+/* Maximum wait time for blocking read */
+#define IPC_READ_TIMEOUT 500
+
+/* The delay in ms for defering the unregister */
+#define SIO_UNREGISTER_DEFER_DELAY_MS 1
+
+/* Default delay till CP PSI image is running and modem updates the
+ * execution stage.
+ * unit : milliseconds
+ */
+#define PSI_START_DEFAULT_TIMEOUT 3000
+
+/* Default time out when closing SIO, till the modem is in
+ * running state.
+ * unit : milliseconds
+ */
+#define BOOT_CHECK_DEFAULT_TIMEOUT 400
+
+/* IP MUX channel range */
+#define IP_MUX_SESSION_START 1
+#define IP_MUX_SESSION_END 8
+
+/* Default IP MUX channel */
+#define IP_MUX_SESSION_DEFAULT 1
+
+/**
+ * ipc_imem_sys_port_open - Open a port link to CP.
+ * @ipc_imem: Imem instance.
+ * @chl_id: Channel Indentifier.
+ * @hp_id: HP Indentifier.
+ *
+ * Return: channel instance on success, NULL for failure
+ */
+struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
+ int chl_id, int hp_id);
+
+/**
+ * ipc_imem_sys_cdev_close - Release a sio link to CP.
+ * @ipc_cdev: iosm sio instance.
+ */
+void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev);
+
+/**
+ * ipc_imem_sys_cdev_write - Route the uplink buffer to CP.
+ * @ipc_cdev: iosm_cdev instance.
+ * @skb: Pointer to skb.
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb);
+
+/**
+ * ipc_imem_sys_wwan_open - Open packet data online channel between network
+ * layer and CP.
+ * @ipc_imem: Imem instance.
+ * @if_id: ip link tag of the net device.
+ *
+ * Return: Channel ID on success and failure value on error
+ */
+int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id);
+
+/**
+ * ipc_imem_sys_wwan_close - Close packet data online channel between network
+ * layer and CP.
+ * @ipc_imem: Imem instance.
+ * @if_id: IP link id net device.
+ * @channel_id: Channel ID to be closed.
+ */
+void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id);
+
+/**
+ * ipc_imem_sys_wwan_transmit - Function for transfer UL data
+ * @ipc_imem: Imem instance.
+ * @if_id: link ID of the device.
+ * @channel_id: Channel ID used
+ * @skb: Pointer to sk buffer
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id, struct sk_buff *skb);
+/**
+ * ipc_imem_wwan_channel_init - Initializes WWAN channels and the channel for
+ * MUX.
+ * @ipc_imem: Pointer to iosm_imem struct.
+ * @mux_type: Type of mux protocol.
+ */
+void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ enum ipc_mux_protocol mux_type);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_irq.c b/drivers/net/wwan/iosm/iosm_ipc_irq.c
new file mode 100644
index 000000000000..702f50a48151
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_irq.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_protocol.h"
+
+static void ipc_write_dbell_reg(struct iosm_pcie *ipc_pcie, int irq_n, u32 data)
+{
+ void __iomem *write_reg;
+
+ /* Select the first doorbell register, which is only currently needed
+ * by CP.
+ */
+ write_reg = (void __iomem *)((u8 __iomem *)ipc_pcie->ipc_regs +
+ ipc_pcie->doorbell_write +
+ (irq_n * ipc_pcie->doorbell_reg_offset));
+
+ /* Fire the doorbell irq by writing data on the doorbell write pointer
+ * register.
+ */
+ iowrite32(data, write_reg);
+}
+
+void ipc_doorbell_fire(struct iosm_pcie *ipc_pcie, int irq_n, u32 data)
+{
+ ipc_write_dbell_reg(ipc_pcie, irq_n, data);
+}
+
+/* Threaded Interrupt handler for MSI interrupts */
+static irqreturn_t ipc_msi_interrupt(int irq, void *dev_id)
+{
+ struct iosm_pcie *ipc_pcie = dev_id;
+ int instance = irq - ipc_pcie->pci->irq;
+
+ /* Shift the MSI irq actions to the IPC tasklet. IRQ_NONE means the
+ * irq was not from the IPC device or could not be served.
+ */
+ if (instance >= ipc_pcie->nvec)
+ return IRQ_NONE;
+
+ if (!test_bit(0, &ipc_pcie->suspend))
+ ipc_imem_irq_process(ipc_pcie->imem, instance);
+
+ return IRQ_HANDLED;
+}
+
+void ipc_release_irq(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pdev = ipc_pcie->pci;
+
+ if (pdev->msi_enabled) {
+ while (--ipc_pcie->nvec >= 0)
+ free_irq(pdev->irq + ipc_pcie->nvec, ipc_pcie);
+ }
+ pci_free_irq_vectors(pdev);
+}
+
+int ipc_acquire_irq(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pdev = ipc_pcie->pci;
+ int i, rc = -EINVAL;
+
+ ipc_pcie->nvec = pci_alloc_irq_vectors(pdev, IPC_MSI_VECTORS,
+ IPC_MSI_VECTORS, PCI_IRQ_MSI);
+
+ if (ipc_pcie->nvec < 0) {
+ rc = ipc_pcie->nvec;
+ goto error;
+ }
+
+ if (!pdev->msi_enabled)
+ goto error;
+
+ for (i = 0; i < ipc_pcie->nvec; ++i) {
+ rc = request_threaded_irq(pdev->irq + i, NULL,
+ ipc_msi_interrupt, IRQF_ONESHOT,
+ KBUILD_MODNAME, ipc_pcie);
+ if (rc) {
+ dev_err(ipc_pcie->dev, "unable to grab IRQ, rc=%d", rc);
+ ipc_pcie->nvec = i;
+ ipc_release_irq(ipc_pcie);
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_irq.h b/drivers/net/wwan/iosm/iosm_ipc_irq.h
new file mode 100644
index 000000000000..a8ed596cb6a5
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_irq.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IRQ_H
+#define IOSM_IPC_IRQ_H
+
+struct iosm_pcie;
+
+/**
+ * ipc_doorbell_fire - fire doorbell to CP
+ * @ipc_pcie: Pointer to iosm_pcie
+ * @irq_n: Doorbell type
+ * @data: ipc state
+ */
+void ipc_doorbell_fire(struct iosm_pcie *ipc_pcie, int irq_n, u32 data);
+
+/**
+ * ipc_release_irq - Release the IRQ handler.
+ * @ipc_pcie: Pointer to iosm_pcie struct
+ */
+void ipc_release_irq(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_acquire_irq - acquire IRQ & register IRQ handler.
+ * @ipc_pcie: Pointer to iosm_pcie struct
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_acquire_irq(struct iosm_pcie *ipc_pcie);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
new file mode 100644
index 000000000000..06c94b1720b6
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/slab.h>
+
+#include "iosm_ipc_mmio.h"
+
+/* Definition of MMIO offsets
+ * note that MMIO_CI offsets are relative to end of chip info structure
+ */
+
+/* MMIO chip info size in bytes */
+#define MMIO_CHIP_INFO_SIZE 60
+
+/* CP execution stage */
+#define MMIO_OFFSET_EXECUTION_STAGE 0x00
+
+/* Boot ROM Chip Info struct */
+#define MMIO_OFFSET_CHIP_INFO 0x04
+
+#define MMIO_OFFSET_ROM_EXIT_CODE 0x40
+
+#define MMIO_OFFSET_PSI_ADDRESS 0x54
+
+#define MMIO_OFFSET_PSI_SIZE 0x5C
+
+#define MMIO_OFFSET_IPC_STATUS 0x60
+
+#define MMIO_OFFSET_CONTEXT_INFO 0x64
+
+#define MMIO_OFFSET_BASE_ADDR 0x6C
+
+#define MMIO_OFFSET_END_ADDR 0x74
+
+#define MMIO_OFFSET_CP_VERSION 0xF0
+
+#define MMIO_OFFSET_CP_CAPABILITIES 0xF4
+
+/* Timeout in 50 msec to wait for the modem boot code to write a valid
+ * execution stage into mmio area
+ */
+#define IPC_MMIO_EXEC_STAGE_TIMEOUT 50
+
+/* check if exec stage has one of the valid values */
+static bool ipc_mmio_is_valid_exec_stage(enum ipc_mem_exec_stage stage)
+{
+ switch (stage) {
+ case IPC_MEM_EXEC_STAGE_BOOT:
+ case IPC_MEM_EXEC_STAGE_PSI:
+ case IPC_MEM_EXEC_STAGE_EBL:
+ case IPC_MEM_EXEC_STAGE_RUN:
+ case IPC_MEM_EXEC_STAGE_CRASH:
+ case IPC_MEM_EXEC_STAGE_CD_READY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
+{
+ u32 cp_cap;
+ unsigned int ver;
+
+ ver = ipc_mmio_get_cp_version(ipc_mmio);
+ cp_cap = readl(ipc_mmio->base + ipc_mmio->offset.cp_capability);
+
+ ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) &&
+ !(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR);
+
+ ipc_mmio->has_ul_flow_credit =
+ (ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
+}
+
+struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
+{
+ struct iosm_mmio *ipc_mmio = kzalloc(sizeof(*ipc_mmio), GFP_KERNEL);
+ int retries = IPC_MMIO_EXEC_STAGE_TIMEOUT;
+ enum ipc_mem_exec_stage stage;
+
+ if (!ipc_mmio)
+ return NULL;
+
+ ipc_mmio->dev = dev;
+
+ ipc_mmio->base = mmio;
+
+ ipc_mmio->offset.exec_stage = MMIO_OFFSET_EXECUTION_STAGE;
+
+ /* Check for a valid execution stage to make sure that the boot code
+ * has correctly initialized the MMIO area.
+ */
+ do {
+ stage = ipc_mmio_get_exec_stage(ipc_mmio);
+ if (ipc_mmio_is_valid_exec_stage(stage))
+ break;
+
+ msleep(20);
+ } while (retries-- > 0);
+
+ if (!retries) {
+ dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
+ goto init_fail;
+ }
+
+ ipc_mmio->offset.chip_info = MMIO_OFFSET_CHIP_INFO;
+
+ /* read chip info size and version from chip info structure */
+ ipc_mmio->chip_info_version =
+ ioread8(ipc_mmio->base + ipc_mmio->offset.chip_info);
+
+ /* Increment of 2 is needed as the size value in the chip info
+ * excludes the version and size field, which are always present
+ */
+ ipc_mmio->chip_info_size =
+ ioread8(ipc_mmio->base + ipc_mmio->offset.chip_info + 1) + 2;
+
+ if (ipc_mmio->chip_info_size != MMIO_CHIP_INFO_SIZE) {
+ dev_err(ipc_mmio->dev, "Unexpected Chip Info");
+ goto init_fail;
+ }
+
+ ipc_mmio->offset.rom_exit_code = MMIO_OFFSET_ROM_EXIT_CODE;
+
+ ipc_mmio->offset.psi_address = MMIO_OFFSET_PSI_ADDRESS;
+ ipc_mmio->offset.psi_size = MMIO_OFFSET_PSI_SIZE;
+ ipc_mmio->offset.ipc_status = MMIO_OFFSET_IPC_STATUS;
+ ipc_mmio->offset.context_info = MMIO_OFFSET_CONTEXT_INFO;
+ ipc_mmio->offset.ap_win_base = MMIO_OFFSET_BASE_ADDR;
+ ipc_mmio->offset.ap_win_end = MMIO_OFFSET_END_ADDR;
+
+ ipc_mmio->offset.cp_version = MMIO_OFFSET_CP_VERSION;
+ ipc_mmio->offset.cp_capability = MMIO_OFFSET_CP_CAPABILITIES;
+
+ return ipc_mmio;
+
+init_fail:
+ kfree(ipc_mmio);
+ return NULL;
+}
+
+enum ipc_mem_exec_stage ipc_mmio_get_exec_stage(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IPC_MEM_EXEC_STAGE_INVALID;
+
+ return (enum ipc_mem_exec_stage)readl(ipc_mmio->base +
+ ipc_mmio->offset.exec_stage);
+}
+
+void ipc_mmio_copy_chip_info(struct iosm_mmio *ipc_mmio, void *dest,
+ size_t size)
+{
+ if (ipc_mmio && dest)
+ memcpy_fromio(dest, ipc_mmio->base + ipc_mmio->offset.chip_info,
+ size);
+}
+
+enum ipc_mem_device_ipc_state ipc_mmio_get_ipc_state(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IPC_MEM_DEVICE_IPC_INVALID;
+
+ return (enum ipc_mem_device_ipc_state)
+ readl(ipc_mmio->base + ipc_mmio->offset.ipc_status);
+}
+
+enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IMEM_ROM_EXIT_FAIL;
+
+ return (enum rom_exit_code)readl(ipc_mmio->base +
+ ipc_mmio->offset.rom_exit_code);
+}
+
+void ipc_mmio_config(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return;
+
+ /* AP memory window (full window is open and active so that modem checks
+ * each AP address) 0 means don't check on modem side.
+ */
+ iowrite64_lo_hi(0, ipc_mmio->base + ipc_mmio->offset.ap_win_base);
+ iowrite64_lo_hi(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end);
+
+ iowrite64_lo_hi(ipc_mmio->context_info_addr,
+ ipc_mmio->base + ipc_mmio->offset.context_info);
+}
+
+void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
+ u32 size)
+{
+ if (!ipc_mmio)
+ return;
+
+ iowrite64_lo_hi(addr, ipc_mmio->base + ipc_mmio->offset.psi_address);
+ writel(size, ipc_mmio->base + ipc_mmio->offset.psi_size);
+}
+
+void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio, phys_addr_t addr)
+{
+ if (!ipc_mmio)
+ return;
+
+ /* store context_info address. This will be stored in the mmio area
+ * during IPC_MEM_DEVICE_IPC_INIT state via ipc_mmio_config()
+ */
+ ipc_mmio->context_info_addr = addr;
+}
+
+int ipc_mmio_get_cp_version(struct iosm_mmio *ipc_mmio)
+{
+ return ipc_mmio ? readl(ipc_mmio->base + ipc_mmio->offset.cp_version) :
+ -EFAULT;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
new file mode 100644
index 000000000000..45e6923da78f
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MMIO_H
+#define IOSM_IPC_MMIO_H
+
+/* Minimal IOSM CP VERSION which has valid CP_CAPABILITIES field */
+#define IOSM_CP_VERSION 0x0100UL
+
+/* DL dir Aggregation support mask */
+#define DL_AGGR BIT(23)
+
+/* UL dir Aggregation support mask */
+#define UL_AGGR BIT(22)
+
+/* UL flow credit support mask */
+#define UL_FLOW_CREDIT BIT(21)
+
+/* Possible states of the IPC finite state machine. */
+enum ipc_mem_device_ipc_state {
+ IPC_MEM_DEVICE_IPC_UNINIT,
+ IPC_MEM_DEVICE_IPC_INIT,
+ IPC_MEM_DEVICE_IPC_RUNNING,
+ IPC_MEM_DEVICE_IPC_RECOVERY,
+ IPC_MEM_DEVICE_IPC_ERROR,
+ IPC_MEM_DEVICE_IPC_DONT_CARE,
+ IPC_MEM_DEVICE_IPC_INVALID = -1
+};
+
+/* Boot ROM exit status. */
+enum rom_exit_code {
+ IMEM_ROM_EXIT_OPEN_EXT = 0x01,
+ IMEM_ROM_EXIT_OPEN_MEM = 0x02,
+ IMEM_ROM_EXIT_CERT_EXT = 0x10,
+ IMEM_ROM_EXIT_CERT_MEM = 0x20,
+ IMEM_ROM_EXIT_FAIL = 0xFF
+};
+
+/* Boot stages */
+enum ipc_mem_exec_stage {
+ IPC_MEM_EXEC_STAGE_RUN = 0x600DF00D,
+ IPC_MEM_EXEC_STAGE_CRASH = 0x8BADF00D,
+ IPC_MEM_EXEC_STAGE_CD_READY = 0xBADC0DED,
+ IPC_MEM_EXEC_STAGE_BOOT = 0xFEEDB007,
+ IPC_MEM_EXEC_STAGE_PSI = 0xFEEDBEEF,
+ IPC_MEM_EXEC_STAGE_EBL = 0xFEEDCAFE,
+ IPC_MEM_EXEC_STAGE_INVALID = 0xFFFFFFFF
+};
+
+/* mmio scratchpad info */
+struct mmio_offset {
+ int exec_stage;
+ int chip_info;
+ int rom_exit_code;
+ int psi_address;
+ int psi_size;
+ int ipc_status;
+ int context_info;
+ int ap_win_base;
+ int ap_win_end;
+ int cp_version;
+ int cp_capability;
+};
+
+/**
+ * struct iosm_mmio - MMIO region mapped to the doorbell scratchpad.
+ * @base: Base address of MMIO region
+ * @dev: Pointer to device structure
+ * @offset: Start offset
+ * @context_info_addr: Physical base address of context info structure
+ * @chip_info_version: Version of chip info structure
+ * @chip_info_size: Size of chip info structure
+ * @has_mux_lite: It doesn't support mux aggergation
+ * @has_ul_flow_credit: Ul flow credit support
+ * @has_slp_no_prot: Device sleep no protocol support
+ * @has_mcr_support: Usage of mcr support
+ */
+struct iosm_mmio {
+ unsigned char __iomem *base;
+ struct device *dev;
+ struct mmio_offset offset;
+ phys_addr_t context_info_addr;
+ unsigned int chip_info_version;
+ unsigned int chip_info_size;
+ u8 has_mux_lite:1,
+ has_ul_flow_credit:1,
+ has_slp_no_prot:1,
+ has_mcr_support:1;
+};
+
+/**
+ * ipc_mmio_init - Allocate mmio instance data
+ * @mmio_addr: Mapped AP base address of the MMIO area.
+ * @dev: Pointer to device structure
+ *
+ * Returns: address of mmio instance data or NULL if fails.
+ */
+struct iosm_mmio *ipc_mmio_init(void __iomem *mmio_addr, struct device *dev);
+
+/**
+ * ipc_mmio_set_psi_addr_and_size - Set start address and size of the
+ * primary system image (PSI) for the
+ * FW dowload.
+ * @ipc_mmio: Pointer to mmio instance
+ * @addr: PSI address
+ * @size: PSI immage size
+ */
+void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
+ u32 size);
+
+/**
+ * ipc_mmio_set_contex_info_addr - Stores the Context Info Address in
+ * MMIO instance to share it with CP during
+ * mmio_init.
+ * @ipc_mmio: Pointer to mmio instance
+ * @addr: 64-bit address of AP context information.
+ */
+void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio,
+ phys_addr_t addr);
+
+/**
+ * ipc_mmio_get_cp_version - Get the CP IPC version
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: version number on success and failure value on error.
+ */
+int ipc_mmio_get_cp_version(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_rom_exit_code - Get exit code from CP boot rom download app
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: exit code from CP boot rom download APP
+ */
+enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_exec_stage - Query CP execution stage
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: CP execution stage
+ */
+enum ipc_mem_exec_stage ipc_mmio_get_exec_stage(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_ipc_state - Query CP IPC state
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: CP IPC state
+ */
+enum ipc_mem_device_ipc_state
+ipc_mmio_get_ipc_state(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_copy_chip_info - Copy size bytes of CP chip info structure
+ * into caller provided buffer
+ * @ipc_mmio: Pointer to mmio instance
+ * @dest: Pointer to caller provided buff
+ * @size: Number of bytes to copy
+ */
+void ipc_mmio_copy_chip_info(struct iosm_mmio *ipc_mmio, void *dest,
+ size_t size);
+
+/**
+ * ipc_mmio_config - Write context info and AP memory range addresses.
+ * This needs to be called when CP is in
+ * IPC_MEM_DEVICE_IPC_INIT state
+ *
+ * @ipc_mmio: Pointer to mmio instance
+ */
+void ipc_mmio_config(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_update_cp_capability - Read and update modem capability, from mmio
+ * capability offset
+ *
+ * @ipc_mmio: Pointer to mmio instance
+ */
+void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
new file mode 100644
index 000000000000..c1c77ce699da
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_mux_codec.h"
+
+/* At the begin of the runtime phase the IP MUX channel shall created. */
+static int ipc_mux_channel_create(struct iosm_mux *ipc_mux)
+{
+ int channel_id;
+
+ channel_id = ipc_imem_channel_alloc(ipc_mux->imem, ipc_mux->instance_id,
+ IPC_CTYPE_WWAN);
+
+ if (channel_id < 0) {
+ dev_err(ipc_mux->dev,
+ "allocation of the MUX channel id failed");
+ ipc_mux->state = MUX_S_ERROR;
+ ipc_mux->event = MUX_E_NOT_APPLICABLE;
+ goto no_channel;
+ }
+
+ /* Establish the MUX channel in blocking mode. */
+ ipc_mux->channel = ipc_imem_channel_open(ipc_mux->imem, channel_id,
+ IPC_HP_NET_CHANNEL_INIT);
+
+ if (!ipc_mux->channel) {
+ dev_err(ipc_mux->dev, "ipc_imem_channel_open failed");
+ ipc_mux->state = MUX_S_ERROR;
+ ipc_mux->event = MUX_E_NOT_APPLICABLE;
+ return -ENODEV; /* MUX channel is not available. */
+ }
+
+ /* Define the MUX active state properties. */
+ ipc_mux->state = MUX_S_ACTIVE;
+ ipc_mux->event = MUX_E_NO_ORDERS;
+
+no_channel:
+ return channel_id;
+}
+
+/* Reset the session/if id state. */
+static void ipc_mux_session_free(struct iosm_mux *ipc_mux, int if_id)
+{
+ struct mux_session *if_entry;
+
+ if_entry = &ipc_mux->session[if_id];
+ /* Reset the session state. */
+ if_entry->wwan = NULL;
+}
+
+/* Create and send the session open command. */
+static struct mux_cmd_open_session_resp *
+ipc_mux_session_open_send(struct iosm_mux *ipc_mux, int if_id)
+{
+ struct mux_cmd_open_session_resp *open_session_resp;
+ struct mux_acb *acb = &ipc_mux->acb;
+ union mux_cmd_param param;
+
+ /* open_session commands to one ACB and start transmission. */
+ param.open_session.flow_ctrl = 0;
+ param.open_session.ipv4v6_hints = 0;
+ param.open_session.reserved2 = 0;
+ param.open_session.dl_head_pad_len = cpu_to_le32(IPC_MEM_DL_ETH_OFFSET);
+
+ /* Finish and transfer ACB. The user thread is suspended.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ acb->wanted_response = MUX_CMD_OPEN_SESSION_RESP;
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_OPEN_SESSION, if_id, 0,
+ &param, sizeof(param.open_session), true,
+ false) ||
+ acb->got_response != MUX_CMD_OPEN_SESSION_RESP) {
+ dev_err(ipc_mux->dev, "if_id %d: OPEN_SESSION send failed",
+ if_id);
+ return NULL;
+ }
+
+ open_session_resp = &ipc_mux->acb.got_param.open_session_resp;
+ if (open_session_resp->response != cpu_to_le32(MUX_CMD_RESP_SUCCESS)) {
+ dev_err(ipc_mux->dev,
+ "if_id %d,session open failed,response=%d", if_id,
+ open_session_resp->response);
+ return NULL;
+ }
+
+ return open_session_resp;
+}
+
+/* Open the first IP session. */
+static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
+ struct mux_session_open *session_open)
+{
+ struct mux_cmd_open_session_resp *open_session_resp;
+ int if_id;
+
+ /* Search for a free session interface id. */
+ if_id = le32_to_cpu(session_open->if_id);
+ if (if_id < 0 || if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "invalid interface id=%d", if_id);
+ return false;
+ }
+
+ /* Create and send the session open command.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ open_session_resp = ipc_mux_session_open_send(ipc_mux, if_id);
+ if (!open_session_resp) {
+ ipc_mux_session_free(ipc_mux, if_id);
+ session_open->if_id = cpu_to_le32(-1);
+ return false;
+ }
+
+ /* Initialize the uplink skb accumulator. */
+ skb_queue_head_init(&ipc_mux->session[if_id].ul_list);
+
+ ipc_mux->session[if_id].dl_head_pad_len = IPC_MEM_DL_ETH_OFFSET;
+ ipc_mux->session[if_id].ul_head_pad_len =
+ le32_to_cpu(open_session_resp->ul_head_pad_len);
+ ipc_mux->session[if_id].wwan = ipc_mux->wwan;
+
+ /* Reset the flow ctrl stats of the session */
+ ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
+ ipc_mux->session[if_id].ul_flow_credits = 0;
+ ipc_mux->session[if_id].net_tx_stop = false;
+ ipc_mux->session[if_id].flow_ctl_mask = 0;
+
+ /* Save and return the assigned if id. */
+ session_open->if_id = cpu_to_le32(if_id);
+
+ return true;
+}
+
+/* Free pending session UL packet. */
+static void ipc_mux_session_reset(struct iosm_mux *ipc_mux, int if_id)
+{
+ /* Reset the session/if id state. */
+ ipc_mux_session_free(ipc_mux, if_id);
+
+ /* Empty the uplink skb accumulator. */
+ skb_queue_purge(&ipc_mux->session[if_id].ul_list);
+}
+
+static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
+ struct mux_session_close *msg)
+{
+ int if_id;
+
+ /* Copy the session interface id. */
+ if_id = le32_to_cpu(msg->if_id);
+
+ if (if_id < 0 || if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "invalid session id %d", if_id);
+ return;
+ }
+
+ /* Create and send the session close command.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_CLOSE_SESSION, if_id, 0,
+ NULL, 0, true, false))
+ dev_err(ipc_mux->dev, "if_id %d: CLOSE_SESSION send failed",
+ if_id);
+
+ /* Reset the flow ctrl stats of the session */
+ ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_mask = 0;
+
+ ipc_mux_session_reset(ipc_mux, if_id);
+}
+
+static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
+ struct mux_channel_close *channel_close_p)
+{
+ int i;
+
+ /* Free pending session UL packet. */
+ for (i = 0; i < ipc_mux->nr_sessions; i++)
+ if (ipc_mux->session[i].wwan)
+ ipc_mux_session_reset(ipc_mux, i);
+
+ ipc_imem_channel_close(ipc_mux->imem, ipc_mux->channel_id);
+
+ /* Reset the MUX object. */
+ ipc_mux->state = MUX_S_INACTIVE;
+ ipc_mux->event = MUX_E_INACTIVE;
+}
+
+/* CP has interrupted AP. If AP is in IP MUX mode, execute the pending ops. */
+static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg)
+{
+ enum mux_event order;
+ bool success;
+ int ret = -EIO;
+
+ if (!ipc_mux->initialized) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ order = msg->common.event;
+
+ switch (ipc_mux->state) {
+ case MUX_S_INACTIVE:
+ if (order != MUX_E_MUX_SESSION_OPEN)
+ goto out; /* Wait for the request to open a session */
+
+ if (ipc_mux->event == MUX_E_INACTIVE)
+ /* Establish the MUX channel and the new state. */
+ ipc_mux->channel_id = ipc_mux_channel_create(ipc_mux);
+
+ if (ipc_mux->state != MUX_S_ACTIVE) {
+ ret = ipc_mux->channel_id; /* Missing the MUX channel */
+ goto out;
+ }
+
+ /* Disable the TD update timer and open the first IP session. */
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
+ ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
+ success = ipc_mux_session_open(ipc_mux, &msg->session_open);
+
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
+ if (success)
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_S_ACTIVE:
+ switch (order) {
+ case MUX_E_MUX_SESSION_OPEN:
+ /* Disable the TD update timer and open a session */
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
+ ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
+ success = ipc_mux_session_open(ipc_mux,
+ &msg->session_open);
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
+ if (success)
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_E_MUX_SESSION_CLOSE:
+ /* Release an IP session. */
+ ipc_mux->event = MUX_E_MUX_SESSION_CLOSE;
+ ipc_mux_session_close(ipc_mux, &msg->session_close);
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_E_MUX_CHANNEL_CLOSE:
+ /* Close the MUX channel pipes. */
+ ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_channel_close(ipc_mux, &msg->channel_close);
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ default:
+ /* Invalid order. */
+ goto out;
+ }
+
+ default:
+ dev_err(ipc_mux->dev,
+ "unexpected MUX transition: state=%d, event=%d",
+ ipc_mux->state, ipc_mux->event);
+ }
+out:
+ return ret;
+}
+
+struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
+ struct iosm_imem *imem)
+{
+ struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
+ int i, ul_tds, ul_td_size;
+ struct sk_buff_head *free_list;
+ struct sk_buff *skb;
+
+ if (!ipc_mux)
+ return NULL;
+
+ ipc_mux->protocol = mux_cfg->protocol;
+ ipc_mux->ul_flow = mux_cfg->ul_flow;
+ ipc_mux->nr_sessions = mux_cfg->nr_sessions;
+ ipc_mux->instance_id = mux_cfg->instance_id;
+ ipc_mux->wwan_q_offset = 0;
+
+ ipc_mux->pcie = imem->pcie;
+ ipc_mux->imem = imem;
+ ipc_mux->ipc_protocol = imem->ipc_protocol;
+ ipc_mux->dev = imem->dev;
+ ipc_mux->wwan = imem->wwan;
+
+ /* Get the reference to the UL ADB list. */
+ free_list = &ipc_mux->ul_adb.free_list;
+
+ /* Initialize the list with free ADB. */
+ skb_queue_head_init(free_list);
+
+ ul_td_size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
+
+ ul_tds = IPC_MEM_MAX_TDS_MUX_LITE_UL;
+
+ ipc_mux->ul_adb.dest_skb = NULL;
+
+ ipc_mux->initialized = true;
+ ipc_mux->adb_prep_ongoing = false;
+ ipc_mux->size_needed = 0;
+ ipc_mux->ul_data_pend_bytes = 0;
+ ipc_mux->state = MUX_S_INACTIVE;
+ ipc_mux->ev_mux_net_transmit_pending = false;
+ ipc_mux->tx_transaction_id = 0;
+ ipc_mux->rr_next_session = 0;
+ ipc_mux->event = MUX_E_INACTIVE;
+ ipc_mux->channel_id = -1;
+ ipc_mux->channel = NULL;
+
+ /* Allocate the list of UL ADB. */
+ for (i = 0; i < ul_tds; i++) {
+ dma_addr_t mapping;
+
+ skb = ipc_pcie_alloc_skb(ipc_mux->pcie, ul_td_size, GFP_ATOMIC,
+ &mapping, DMA_TO_DEVICE, 0);
+ if (!skb) {
+ ipc_mux_deinit(ipc_mux);
+ return NULL;
+ }
+ /* Extend the UL ADB list. */
+ skb_queue_tail(free_list, skb);
+ }
+
+ return ipc_mux;
+}
+
+/* Informs the network stack to restart transmission for all opened session if
+ * Flow Control is not ON for that session.
+ */
+static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ /* If flow control of the session is OFF and if there was tx
+ * stop then restart. Inform the network interface to restart
+ * sending data.
+ */
+ if (session->flow_ctl_mask == 0) {
+ session->net_tx_stop = false;
+ ipc_mux_netif_tx_flowctrl(session, idx, false);
+ }
+ }
+}
+
+/* Informs the network stack to stop sending further pkt for all opened
+ * sessions
+ */
+static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
+ }
+}
+
+void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux)
+{
+ if (ipc_mux->ul_flow == MUX_UL) {
+ int low_thresh = IPC_MEM_MUX_UL_FLOWCTRL_LOW_B;
+
+ if (ipc_mux->ul_data_pend_bytes < low_thresh)
+ ipc_mux_restart_tx_for_all_sessions(ipc_mux);
+ }
+}
+
+int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux)
+{
+ return ipc_mux ? ipc_mux->nr_sessions : -EFAULT;
+}
+
+enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux)
+{
+ return ipc_mux ? ipc_mux->protocol : MUX_UNKNOWN;
+}
+
+int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr)
+{
+ struct mux_session_open *session_open;
+ union mux_msg mux_msg;
+
+ session_open = &mux_msg.session_open;
+ session_open->event = MUX_E_MUX_SESSION_OPEN;
+
+ session_open->if_id = cpu_to_le32(session_nr);
+ ipc_mux->session[session_nr].flags |= IPC_MEM_WWAN_MUX;
+ return ipc_mux_schedule(ipc_mux, &mux_msg);
+}
+
+int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr)
+{
+ struct mux_session_close *session_close;
+ union mux_msg mux_msg;
+ int ret_val;
+
+ session_close = &mux_msg.session_close;
+ session_close->event = MUX_E_MUX_SESSION_CLOSE;
+
+ session_close->if_id = cpu_to_le32(session_nr);
+ ret_val = ipc_mux_schedule(ipc_mux, &mux_msg);
+ ipc_mux->session[session_nr].flags &= ~IPC_MEM_WWAN_MUX;
+
+ return ret_val;
+}
+
+void ipc_mux_deinit(struct iosm_mux *ipc_mux)
+{
+ struct mux_channel_close *channel_close;
+ struct sk_buff_head *free_list;
+ union mux_msg mux_msg;
+ struct sk_buff *skb;
+
+ if (!ipc_mux->initialized)
+ return;
+ ipc_mux_stop_netif_for_all_sessions(ipc_mux);
+
+ channel_close = &mux_msg.channel_close;
+ channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_schedule(ipc_mux, &mux_msg);
+
+ /* Empty the ADB free list. */
+ free_list = &ipc_mux->ul_adb.free_list;
+
+ /* Remove from the head of the downlink queue. */
+ while ((skb = skb_dequeue(free_list)))
+ ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
+
+ if (ipc_mux->channel) {
+ ipc_mux->channel->ul_pipe.is_open = false;
+ ipc_mux->channel->dl_pipe.is_open = false;
+ }
+
+ kfree(ipc_mux);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h
new file mode 100644
index 000000000000..ddd2cd0bd911
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MUX_H
+#define IOSM_IPC_MUX_H
+
+#include "iosm_ipc_protocol.h"
+
+/* Size of the buffer for the IP MUX data buffer. */
+#define IPC_MEM_MAX_DL_MUX_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_DL_MUX_BUF_SIZE
+
+/* Size of the buffer for the IP MUX Lite data buffer. */
+#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
+
+/* TD counts for IP MUX Lite */
+#define IPC_MEM_MAX_TDS_MUX_LITE_UL 800
+#define IPC_MEM_MAX_TDS_MUX_LITE_DL 1200
+
+/* open session request (AP->CP) */
+#define MUX_CMD_OPEN_SESSION 1
+
+/* response to open session request (CP->AP) */
+#define MUX_CMD_OPEN_SESSION_RESP 2
+
+/* close session request (AP->CP) */
+#define MUX_CMD_CLOSE_SESSION 3
+
+/* response to close session request (CP->AP) */
+#define MUX_CMD_CLOSE_SESSION_RESP 4
+
+/* Flow control command with mask of the flow per queue/flow. */
+#define MUX_LITE_CMD_FLOW_CTL 5
+
+/* ACK the flow control command. Shall have the same Transaction ID as the
+ * matching FLOW_CTL command.
+ */
+#define MUX_LITE_CMD_FLOW_CTL_ACK 6
+
+/* Command for report packet indicating link quality metrics. */
+#define MUX_LITE_CMD_LINK_STATUS_REPORT 7
+
+/* Response to a report packet */
+#define MUX_LITE_CMD_LINK_STATUS_REPORT_RESP 8
+
+/* Used to reset a command/response state. */
+#define MUX_CMD_INVALID 255
+
+/* command response : command processed successfully */
+#define MUX_CMD_RESP_SUCCESS 0
+
+/* MUX for route link devices */
+#define IPC_MEM_WWAN_MUX BIT(0)
+
+/* Initiated actions to change the state of the MUX object. */
+enum mux_event {
+ MUX_E_INACTIVE, /* No initiated actions. */
+ MUX_E_MUX_SESSION_OPEN, /* Create the MUX channel and a session. */
+ MUX_E_MUX_SESSION_CLOSE, /* Release a session. */
+ MUX_E_MUX_CHANNEL_CLOSE, /* Release the MUX channel. */
+ MUX_E_NO_ORDERS, /* No MUX order. */
+ MUX_E_NOT_APPLICABLE, /* Defect IP MUX. */
+};
+
+/* MUX session open command. */
+struct mux_session_open {
+ enum mux_event event;
+ __le32 if_id;
+};
+
+/* MUX session close command. */
+struct mux_session_close {
+ enum mux_event event;
+ __le32 if_id;
+};
+
+/* MUX channel close command. */
+struct mux_channel_close {
+ enum mux_event event;
+};
+
+/* Default message type to find out the right message type. */
+struct mux_common {
+ enum mux_event event;
+};
+
+/* List of ops in MUX mode. */
+union mux_msg {
+ struct mux_session_open session_open;
+ struct mux_session_close session_close;
+ struct mux_channel_close channel_close;
+ struct mux_common common;
+};
+
+/* Parameter definition of the open session command. */
+struct mux_cmd_open_session {
+ u8 flow_ctrl; /* 0: Flow control disabled (flow allowed). */
+ /* 1: Flow control enabled (flow not allowed)*/
+ u8 ipv4v6_hints; /* 0: IPv4/IPv6 hints not supported.*/
+ /* 1: IPv4/IPv6 hints supported*/
+ __le16 reserved2; /* Reserved. Set to zero. */
+ __le32 dl_head_pad_len; /* Maximum length supported */
+ /* for DL head padding on a datagram. */
+};
+
+/* Parameter definition of the open session response. */
+struct mux_cmd_open_session_resp {
+ __le32 response; /* Response code */
+ u8 flow_ctrl; /* 0: Flow control disabled (flow allowed). */
+ /* 1: Flow control enabled (flow not allowed) */
+ u8 ipv4v6_hints; /* 0: IPv4/IPv6 hints not supported */
+ /* 1: IPv4/IPv6 hints supported */
+ __le16 reserved2; /* Reserved. Set to zero. */
+ __le32 ul_head_pad_len; /* Actual length supported for */
+ /* UL head padding on adatagram.*/
+};
+
+/* Parameter definition of the close session response code */
+struct mux_cmd_close_session_resp {
+ __le32 response;
+};
+
+/* Parameter definition of the flow control command. */
+struct mux_cmd_flow_ctl {
+ __le32 mask; /* indicating the desired flow control */
+ /* state for various flows/queues */
+};
+
+/* Parameter definition of the link status report code*/
+struct mux_cmd_link_status_report {
+ u8 payload;
+};
+
+/* Parameter definition of the link status report response code. */
+struct mux_cmd_link_status_report_resp {
+ __le32 response;
+};
+
+/**
+ * union mux_cmd_param - Union-definition of the command parameters.
+ * @open_session: Inband command for open session
+ * @open_session_resp: Inband command for open session response
+ * @close_session_resp: Inband command for close session response
+ * @flow_ctl: In-band flow control on the opened interfaces
+ * @link_status: In-band Link Status Report
+ * @link_status_resp: In-band command for link status report response
+ */
+union mux_cmd_param {
+ struct mux_cmd_open_session open_session;
+ struct mux_cmd_open_session_resp open_session_resp;
+ struct mux_cmd_close_session_resp close_session_resp;
+ struct mux_cmd_flow_ctl flow_ctl;
+ struct mux_cmd_link_status_report link_status;
+ struct mux_cmd_link_status_report_resp link_status_resp;
+};
+
+/* States of the MUX object.. */
+enum mux_state {
+ MUX_S_INACTIVE, /* IP MUX is unused. */
+ MUX_S_ACTIVE, /* IP MUX channel is available. */
+ MUX_S_ERROR, /* Defect IP MUX. */
+};
+
+/* Supported MUX protocols. */
+enum ipc_mux_protocol {
+ MUX_UNKNOWN,
+ MUX_LITE,
+};
+
+/* Supported UL data transfer methods. */
+enum ipc_mux_ul_flow {
+ MUX_UL_UNKNOWN,
+ MUX_UL, /* Normal UL data transfer */
+ MUX_UL_ON_CREDITS, /* UL data transfer will be based on credits */
+};
+
+/* List of the MUX session. */
+struct mux_session {
+ struct iosm_wwan *wwan; /*Network i/f used for communication*/
+ int if_id; /* i/f id for session open message.*/
+ u32 flags;
+ u32 ul_head_pad_len; /* Nr of bytes for UL head padding. */
+ u32 dl_head_pad_len; /* Nr of bytes for DL head padding. */
+ struct sk_buff_head ul_list; /* skb entries for an ADT. */
+ u32 flow_ctl_mask; /* UL flow control */
+ u32 flow_ctl_en_cnt; /* Flow control Enable cmd count */
+ u32 flow_ctl_dis_cnt; /* Flow Control Disable cmd count */
+ int ul_flow_credits; /* UL flow credits */
+ u8 net_tx_stop:1,
+ flush:1; /* flush net interface ? */
+};
+
+/* State of a single UL data block. */
+struct mux_adb {
+ struct sk_buff *dest_skb; /* Current UL skb for the data block. */
+ u8 *buf; /* ADB memory. */
+ struct mux_adgh *adgh; /* ADGH pointer */
+ struct sk_buff *qlth_skb; /* QLTH pointer */
+ u32 *next_table_index; /* Pointer to next table index. */
+ struct sk_buff_head free_list; /* List of alloc. ADB for the UL sess.*/
+ int size; /* Size of the ADB memory. */
+ u32 if_cnt; /* Statistic counter */
+ u32 dg_cnt_total;
+ u32 payload_size;
+};
+
+/* Temporary ACB state. */
+struct mux_acb {
+ struct sk_buff *skb; /* Used UL skb. */
+ int if_id; /* Session id. */
+ u32 wanted_response;
+ u32 got_response;
+ u32 cmd;
+ union mux_cmd_param got_param; /* Received command/response parameter */
+};
+
+/**
+ * struct iosm_mux - Structure of the data multiplexing over an IP channel.
+ * @dev: Pointer to device structure
+ * @session: Array of the MUX sessions.
+ * @channel: Reference to the IP MUX channel
+ * @pcie: Pointer to iosm_pcie struct
+ * @imem: Pointer to iosm_imem
+ * @wwan: Poinetr to iosm_wwan
+ * @ipc_protocol: Pointer to iosm_protocol
+ * @channel_id: Channel ID for MUX
+ * @protocol: Type of the MUX protocol
+ * @ul_flow: UL Flow type
+ * @nr_sessions: Number of sessions
+ * @instance_id: Instance ID
+ * @state: States of the MUX object
+ * @event: Initiated actions to change the state of the MUX object
+ * @tx_transaction_id: Transaction id for the ACB command.
+ * @rr_next_session: Next session number for round robin.
+ * @ul_adb: State of the UL ADB/ADGH.
+ * @size_needed: Variable to store the size needed during ADB preparation
+ * @ul_data_pend_bytes: Pending UL data to be processed in bytes
+ * @acb: Temporary ACB state
+ * @wwan_q_offset: This will hold the offset of the given instance
+ * Useful while passing or receiving packets from
+ * wwan/imem layer.
+ * @initialized: MUX object is initialized
+ * @ev_mux_net_transmit_pending:
+ * 0 means inform the IPC tasklet to pass the
+ * accumulated uplink ADB to CP.
+ * @adb_prep_ongoing: Flag for ADB preparation status
+ */
+struct iosm_mux {
+ struct device *dev;
+ struct mux_session session[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ struct ipc_mem_channel *channel;
+ struct iosm_pcie *pcie;
+ struct iosm_imem *imem;
+ struct iosm_wwan *wwan;
+ struct iosm_protocol *ipc_protocol;
+ int channel_id;
+ enum ipc_mux_protocol protocol;
+ enum ipc_mux_ul_flow ul_flow;
+ int nr_sessions;
+ int instance_id;
+ enum mux_state state;
+ enum mux_event event;
+ u32 tx_transaction_id;
+ int rr_next_session;
+ struct mux_adb ul_adb;
+ int size_needed;
+ long long ul_data_pend_bytes;
+ struct mux_acb acb;
+ int wwan_q_offset;
+ u8 initialized:1,
+ ev_mux_net_transmit_pending:1,
+ adb_prep_ongoing:1;
+};
+
+/* MUX configuration structure */
+struct ipc_mux_config {
+ enum ipc_mux_protocol protocol;
+ enum ipc_mux_ul_flow ul_flow;
+ int nr_sessions;
+ int instance_id;
+};
+
+/**
+ * ipc_mux_init - Allocates and Init MUX instance
+ * @mux_cfg: Pointer to MUX configuration structure
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: Initialized mux pointer on success else NULL
+ */
+struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
+ struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_mux_deinit - Deallocates MUX instance
+ * @ipc_mux: Pointer to the MUX instance.
+ */
+void ipc_mux_deinit(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_check_n_restart_tx - Checks for pending UL date bytes and then
+ * it restarts the net interface tx queue if
+ * device has set flow control as off.
+ * @ipc_mux: Pointer to MUX data-struct
+ */
+void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_get_active_protocol - Returns the active MUX protocol type.
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: enum of type ipc_mux_protocol
+ */
+enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_open_session - Opens a MUX session for IP traffic.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @session_nr: Interface ID or session number
+ *
+ * Returns: channel id on success, failure value on error
+ */
+int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr);
+
+/**
+ * ipc_mux_close_session - Closes a MUX session.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @session_nr: Interface ID or session number
+ *
+ * Returns: channel id on success, failure value on error
+ */
+int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr);
+
+/**
+ * ipc_mux_get_max_sessions - Retuns the maximum sessions supported on the
+ * provided MUX instance..
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: Number of sessions supported on Success and failure value on error
+ */
+int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
new file mode 100644
index 000000000000..e634ffc6ec08
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/nospec.h>
+
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_mux_codec.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Test the link power state and send a MUX command in blocking mode. */
+static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size)
+{
+ struct iosm_mux *ipc_mux = ipc_imem->mux;
+ const struct mux_acb *acb = msg;
+
+ skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
+ ipc_imem_ul_send(ipc_mux->imem);
+
+ return 0;
+}
+
+static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
+{
+ struct completion *completion = &ipc_mux->channel->ul_sem;
+ int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
+ 0, &ipc_mux->acb,
+ sizeof(ipc_mux->acb), false);
+ if (ret) {
+ dev_err(ipc_mux->dev, "unable to send mux command");
+ return ret;
+ }
+
+ /* if blocking, suspend the app and wait for irq in the flash or
+ * crash phase. return false on timeout to indicate failure.
+ */
+ if (blocking) {
+ u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
+
+ reinit_completion(completion);
+
+ if (wait_for_completion_interruptible_timeout
+ (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
+ 0) {
+ dev_err(ipc_mux->dev, "ch[%d] timeout",
+ ipc_mux->channel_id);
+ ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/* Prepare mux Command */
+static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
+ u32 cmd, struct mux_acb *acb,
+ void *param, u32 param_size)
+{
+ struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
+
+ cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
+ cmdh->command_type = cpu_to_le32(cmd);
+ cmdh->if_id = acb->if_id;
+
+ acb->cmd = cmd;
+
+ cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
+ param_size);
+ cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
+
+ if (param)
+ memcpy(&cmdh->param, param, param_size);
+
+ skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
+
+ return cmdh;
+}
+
+static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ /* Allocate skb memory for the uplink buffer. */
+ skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
+ GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Save the skb address. */
+ acb->skb = skb;
+
+ memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
+
+ return 0;
+}
+
+int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
+ u32 transaction_id, union mux_cmd_param *param,
+ size_t res_size, bool blocking, bool respond)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct mux_lite_cmdh *ack_lite;
+ int ret = 0;
+
+ acb->if_id = if_id;
+ ret = ipc_mux_acb_alloc(ipc_mux);
+ if (ret)
+ return ret;
+
+ ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param,
+ res_size);
+ if (respond)
+ ack_lite->transaction_id = cpu_to_le32(transaction_id);
+
+ ret = ipc_mux_acb_send(ipc_mux, blocking);
+
+ return ret;
+}
+
+void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
+{
+ /* Inform the network interface to start/stop flow ctrl */
+ ipc_wwan_tx_flowctrl(session->wwan, idx, on);
+}
+
+static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
+ struct mux_lite_cmdh *cmdh)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+
+ switch (le32_to_cpu(cmdh->command_type)) {
+ case MUX_CMD_OPEN_SESSION_RESP:
+ case MUX_CMD_CLOSE_SESSION_RESP:
+ /* Resume the control application. */
+ acb->got_param = cmdh->param;
+ break;
+
+ case MUX_LITE_CMD_FLOW_CTL_ACK:
+ /* This command type is not expected as response for
+ * Aggregation version of the protocol. So return non-zero.
+ */
+ if (ipc_mux->protocol != MUX_LITE)
+ return -EINVAL;
+
+ dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received",
+ cmdh->if_id, le32_to_cpu(cmdh->transaction_id));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ acb->wanted_response = MUX_CMD_INVALID;
+ acb->got_response = le32_to_cpu(cmdh->command_type);
+ complete(&ipc_mux->channel->ul_sem);
+
+ return 0;
+}
+
+static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
+ struct mux_lite_cmdh *cmdh)
+{
+ union mux_cmd_param *param = &cmdh->param;
+ struct mux_session *session;
+ int new_size;
+
+ dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
+ cmdh->if_id, le32_to_cpu(cmdh->command_type));
+
+ switch (le32_to_cpu(cmdh->command_type)) {
+ case MUX_LITE_CMD_FLOW_CTL:
+
+ if (cmdh->if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "if_id [%d] not valid",
+ cmdh->if_id);
+ return -EINVAL; /* No session interface id. */
+ }
+
+ session = &ipc_mux->session[cmdh->if_id];
+
+ new_size = offsetof(struct mux_lite_cmdh, param) +
+ sizeof(param->flow_ctl);
+ if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
+ /* Backward Compatibility */
+ if (cmdh->cmd_len == cpu_to_le16(new_size))
+ session->flow_ctl_mask =
+ le32_to_cpu(param->flow_ctl.mask);
+ else
+ session->flow_ctl_mask = ~0;
+ /* if CP asks for FLOW CTRL Enable
+ * then set our internal flow control Tx flag
+ * to limit uplink session queueing
+ */
+ session->net_tx_stop = true;
+ /* Update the stats */
+ session->flow_ctl_en_cnt++;
+ } else if (param->flow_ctl.mask == 0) {
+ /* Just reset the Flow control mask and let
+ * mux_flow_ctrl_low_thre_b take control on
+ * our internal Tx flag and enabling kernel
+ * flow control
+ */
+ /* Backward Compatibility */
+ if (cmdh->cmd_len == cpu_to_le16(new_size))
+ session->flow_ctl_mask =
+ le32_to_cpu(param->flow_ctl.mask);
+ else
+ session->flow_ctl_mask = 0;
+ /* Update the stats */
+ session->flow_ctl_dis_cnt++;
+ } else {
+ break;
+ }
+
+ dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id,
+ le32_to_cpu(param->flow_ctl.mask));
+ break;
+
+ case MUX_LITE_CMD_LINK_STATUS_REPORT:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Decode and Send appropriate response to a command block. */
+static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
+ __le32 trans_id = cmdh->transaction_id;
+
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) {
+ /* Unable to decode command response indicates the cmd_type
+ * may be a command instead of response. So try to decoding it.
+ */
+ if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) {
+ /* Decoded command may need a response. Give the
+ * response according to the command type.
+ */
+ union mux_cmd_param *mux_cmd = NULL;
+ size_t size = 0;
+ u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
+
+ if (cmdh->command_type ==
+ cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
+ mux_cmd = &cmdh->param;
+ mux_cmd->link_status_resp.response =
+ cpu_to_le32(MUX_CMD_RESP_SUCCESS);
+ /* response field is u32 */
+ size = sizeof(u32);
+ } else if (cmdh->command_type ==
+ cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
+ cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
+ } else {
+ return;
+ }
+
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
+ le32_to_cpu(trans_id),
+ mux_cmd, size, false,
+ true))
+ dev_err(ipc_mux->dev,
+ "if_id %d: cmd send failed",
+ cmdh->if_id);
+ }
+ }
+}
+
+/* Pass the DL packet to the netif layer. */
+static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
+ struct iosm_wwan *wwan, u32 offset,
+ u8 service_class, struct sk_buff *skb)
+{
+ struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
+
+ if (!dest_skb)
+ return -ENOMEM;
+
+ skb_pull(dest_skb, offset);
+ skb_set_tail_pointer(dest_skb, dest_skb->len);
+ /* Pass the packet to the netif layer. */
+ dest_skb->priority = service_class;
+
+ return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1);
+}
+
+/* Decode Flow Credit Table in the block */
+static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
+ unsigned char *block)
+{
+ struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
+ struct iosm_wwan *wwan;
+ int ul_credits;
+ int if_id;
+
+ if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
+ dev_err(ipc_mux->dev, "unexpected FCT length: %d",
+ fct->vfl_length);
+ return;
+ }
+
+ if_id = fct->if_id;
+ if (if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
+ return;
+ }
+
+ /* Is the session active ? */
+ if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan) {
+ dev_err(ipc_mux->dev, "session Net ID is NULL");
+ return;
+ }
+
+ ul_credits = fct->vfl.nr_of_bytes;
+
+ dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
+ if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
+
+ /* Update the Flow Credit information from ADB */
+ ipc_mux->session[if_id].ul_flow_credits += ul_credits;
+
+ /* Check whether the TX can be started */
+ if (ipc_mux->session[if_id].ul_flow_credits > 0) {
+ ipc_mux->session[if_id].net_tx_stop = false;
+ ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
+ ipc_mux->session[if_id].if_id, false);
+ }
+}
+
+/* Decode non-aggregated datagram */
+static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
+ struct sk_buff *skb)
+{
+ u32 pad_len, packet_offset;
+ struct iosm_wwan *wwan;
+ struct mux_adgh *adgh;
+ u8 *block = skb->data;
+ int rc = 0;
+ u8 if_id;
+
+ adgh = (struct mux_adgh *)block;
+
+ if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) {
+ dev_err(ipc_mux->dev, "invalid ADGH signature received");
+ return;
+ }
+
+ if_id = adgh->if_id;
+ if (if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
+ return;
+ }
+
+ /* Is the session active ? */
+ if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan) {
+ dev_err(ipc_mux->dev, "session Net ID is NULL");
+ return;
+ }
+
+ /* Store the pad len for the corresponding session
+ * Pad bytes as negotiated in the open session less the header size
+ * (see session management chapter for details).
+ * If resulting padding is zero or less, the additional head padding is
+ * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
+ * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
+ * set to zero
+ */
+ pad_len =
+ ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+ packet_offset = sizeof(*adgh) + pad_len;
+
+ if_id += ipc_mux->wwan_q_offset;
+
+ /* Pass the packet to the netif layer */
+ rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
+ adgh->service_class, skb);
+ if (rc) {
+ dev_err(ipc_mux->dev, "mux adgh decoding error");
+ return;
+ }
+ ipc_mux->session[if_id].flush = 1;
+}
+
+void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ u32 signature;
+
+ if (!skb->data)
+ return;
+
+ /* Decode the MUX header type. */
+ signature = le32_to_cpup((__le32 *)skb->data);
+
+ switch (signature) {
+ case MUX_SIG_ADGH:
+ ipc_mux_dl_adgh_decode(ipc_mux, skb);
+ break;
+
+ case MUX_SIG_FCTH:
+ ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
+ break;
+
+ case MUX_SIG_CMDH:
+ ipc_mux_dl_cmd_decode(ipc_mux, skb);
+ break;
+
+ default:
+ dev_err(ipc_mux->dev, "invalid ABH signature");
+ }
+
+ ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
+}
+
+static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
+ struct mux_adb *ul_adb, u32 type)
+{
+ /* Take the first element of the free list. */
+ struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
+ int qlt_size;
+
+ if (!skb)
+ return -EBUSY; /* Wait for a free ADB skb. */
+
+ /* Mark it as UL ADB to select the right free operation. */
+ IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
+
+ switch (type) {
+ case MUX_SIG_ADGH:
+ /* Save the ADB memory settings. */
+ ul_adb->dest_skb = skb;
+ ul_adb->buf = skb->data;
+ ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
+ /* reset statistic counter */
+ ul_adb->if_cnt = 0;
+ ul_adb->payload_size = 0;
+ ul_adb->dg_cnt_total = 0;
+
+ ul_adb->adgh = (struct mux_adgh *)skb->data;
+ memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
+ break;
+
+ case MUX_SIG_QLTH:
+ qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
+ (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
+
+ if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
+ dev_err(ipc_mux->dev,
+ "can't support. QLT size:%d SKB size: %d",
+ qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
+ return -ERANGE;
+ }
+
+ ul_adb->qlth_skb = skb;
+ memset((ul_adb->qlth_skb)->data, 0, qlt_size);
+ skb_put(skb, qlt_size);
+ break;
+ }
+
+ return 0;
+}
+
+static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
+{
+ struct mux_adb *ul_adb = &ipc_mux->ul_adb;
+ u16 adgh_len;
+ long long bytes;
+ char *str;
+
+ if (!ul_adb->dest_skb) {
+ dev_err(ipc_mux->dev, "no dest skb");
+ return;
+ }
+
+ adgh_len = le16_to_cpu(ul_adb->adgh->length);
+ skb_put(ul_adb->dest_skb, adgh_len);
+ skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
+ ul_adb->dest_skb = NULL;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
+ struct mux_session *session;
+
+ session = &ipc_mux->session[ul_adb->adgh->if_id];
+ str = "available_credits";
+ bytes = (long long)session->ul_flow_credits;
+
+ } else {
+ str = "pend_bytes";
+ bytes = ipc_mux->ul_data_pend_bytes;
+ ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
+ adgh_len;
+ }
+
+ dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
+ adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
+ str, bytes);
+}
+
+/* Allocates an ADB from the free list and initializes it with ADBH */
+static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
+ struct mux_adb *adb, int *size_needed,
+ u32 type)
+{
+ bool ret_val = false;
+ int status;
+
+ if (!adb->dest_skb) {
+ /* Allocate memory for the ADB including of the
+ * datagram table header.
+ */
+ status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
+ if (status)
+ /* Is a pending ADB available ? */
+ ret_val = true; /* None. */
+
+ /* Update size need to zero only for new ADB memory */
+ *size_needed = 0;
+ }
+
+ return ret_val;
+}
+
+/* Informs the network stack to stop sending further packets for all opened
+ * sessions
+ */
+static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ session->net_tx_stop = true;
+ }
+}
+
+/* Sends Queue Level Table of all opened sessions */
+static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
+{
+ struct ipc_mem_lite_gen_tbl *qlt;
+ struct mux_session *session;
+ bool qlt_updated = false;
+ int i;
+ int qlt_size;
+
+ if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
+ return qlt_updated;
+
+ qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ session = &ipc_mux->session[i];
+
+ if (!session->wwan || session->flow_ctl_mask)
+ continue;
+
+ if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
+ MUX_SIG_QLTH)) {
+ dev_err(ipc_mux->dev,
+ "no reserved mem to send QLT of if_id: %d", i);
+ break;
+ }
+
+ /* Prepare QLT */
+ qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
+ ->data;
+ qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
+ qlt->length = cpu_to_le16(qlt_size);
+ qlt->if_id = i;
+ qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
+ qlt->reserved[0] = 0;
+ qlt->reserved[1] = 0;
+
+ qlt->vfl.nr_of_bytes = session->ul_list.qlen;
+
+ /* Add QLT to the transfer list. */
+ skb_queue_tail(&ipc_mux->channel->ul_list,
+ ipc_mux->ul_adb.qlth_skb);
+
+ qlt_updated = true;
+ ipc_mux->ul_adb.qlth_skb = NULL;
+ }
+
+ if (qlt_updated)
+ /* Updates the TDs with ul_list */
+ (void)ipc_imem_ul_write_td(ipc_mux->imem);
+
+ return qlt_updated;
+}
+
+/* Checks the available credits for the specified session and returns
+ * number of packets for which credits are available.
+ */
+static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list,
+ int max_nr_of_pkts)
+{
+ int pkts_to_send = 0;
+ struct sk_buff *skb;
+ int credits = 0;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
+ credits = session->ul_flow_credits;
+ if (credits <= 0) {
+ dev_dbg(ipc_mux->dev,
+ "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
+ session->if_id, session->ul_flow_credits,
+ session->ul_list.qlen); /* nr_of_bytes */
+ return 0;
+ }
+ } else {
+ credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
+ ipc_mux->ul_data_pend_bytes;
+ if (credits <= 0) {
+ ipc_mux_stop_tx_for_all_sessions(ipc_mux);
+
+ dev_dbg(ipc_mux->dev,
+ "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
+ session->if_id, ipc_mux->ul_data_pend_bytes,
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
+ return 0;
+ }
+ }
+
+ /* Check if there are enough credits/bytes available to send the
+ * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
+ * depending on available credits.
+ */
+ skb_queue_walk(ul_list, skb)
+ {
+ if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
+ break;
+ credits -= skb->len;
+ pkts_to_send++;
+ }
+
+ return pkts_to_send;
+}
+
+/* Encode the UL IP packet according to Lite spec. */
+static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list,
+ struct mux_adb *adb, int nr_of_pkts)
+{
+ int offset = sizeof(struct mux_adgh);
+ int adb_updated = -EINVAL;
+ struct sk_buff *src_skb;
+ int aligned_size = 0;
+ int nr_of_skb = 0;
+ u32 pad_len = 0;
+
+ /* Re-calculate the number of packets depending on number of bytes to be
+ * processed/available credits.
+ */
+ nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
+ nr_of_pkts);
+
+ /* If calculated nr_of_pkts from available credits is <= 0
+ * then nothing to do.
+ */
+ if (nr_of_pkts <= 0)
+ return 0;
+
+ /* Read configured UL head_pad_length for session.*/
+ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
+ pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+
+ /* Process all pending UL packets for this session
+ * depending on the allocated datagram table size.
+ */
+ while (nr_of_pkts > 0) {
+ /* get destination skb allocated */
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ MUX_SIG_ADGH)) {
+ dev_err(ipc_mux->dev, "no reserved memory for ADGH");
+ return -ENOMEM;
+ }
+
+ /* Peek at the head of the list. */
+ src_skb = skb_peek(ul_list);
+ if (!src_skb) {
+ dev_err(ipc_mux->dev,
+ "skb peek return NULL with count : %d",
+ nr_of_pkts);
+ break;
+ }
+
+ /* Calculate the memory value. */
+ aligned_size = ALIGN((pad_len + src_skb->len), 4);
+
+ ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
+
+ if (ipc_mux->size_needed > adb->size) {
+ dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
+ ipc_mux->size_needed, adb->size);
+ /* Return 1 if any IP packet is added to the transfer
+ * list.
+ */
+ return nr_of_skb ? 1 : 0;
+ }
+
+ /* Add buffer (without head padding to next pending transfer) */
+ memcpy(adb->buf + offset + pad_len, src_skb->data,
+ src_skb->len);
+
+ adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH);
+ adb->adgh->if_id = session_id;
+ adb->adgh->length =
+ cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
+ src_skb->len);
+ adb->adgh->service_class = src_skb->priority;
+ adb->adgh->next_count = --nr_of_pkts;
+ adb->dg_cnt_total++;
+ adb->payload_size += src_skb->len;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
+ /* Decrement the credit value as we are processing the
+ * datagram from the UL list.
+ */
+ session->ul_flow_credits -= src_skb->len;
+
+ /* Remove the processed elements and free it. */
+ src_skb = skb_dequeue(ul_list);
+ dev_kfree_skb(src_skb);
+ nr_of_skb++;
+
+ ipc_mux_ul_adgh_finish(ipc_mux);
+ }
+
+ if (nr_of_skb) {
+ /* Send QLT info to modem if pending bytes > high watermark
+ * in case of mux lite
+ */
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
+ ipc_mux->ul_data_pend_bytes >=
+ IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
+ adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
+ else
+ adb_updated = 1;
+
+ /* Updates the TDs with ul_list */
+ (void)ipc_imem_ul_write_td(ipc_mux->imem);
+ }
+
+ return adb_updated;
+}
+
+bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
+{
+ struct sk_buff_head *ul_list;
+ struct mux_session *session;
+ int updated = 0;
+ int session_id;
+ int dg_n;
+ int i;
+
+ if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
+ ipc_mux->adb_prep_ongoing)
+ return false;
+
+ ipc_mux->adb_prep_ongoing = true;
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ session_id = ipc_mux->rr_next_session;
+ session = &ipc_mux->session[session_id];
+
+ /* Go to next handle rr_next_session overflow */
+ ipc_mux->rr_next_session++;
+ if (ipc_mux->rr_next_session >= ipc_mux->nr_sessions)
+ ipc_mux->rr_next_session = 0;
+
+ if (!session->wwan || session->flow_ctl_mask ||
+ session->net_tx_stop)
+ continue;
+
+ ul_list = &session->ul_list;
+
+ /* Is something pending in UL and flow ctrl off */
+ dg_n = skb_queue_len(ul_list);
+ if (dg_n > MUX_MAX_UL_DG_ENTRIES)
+ dg_n = MUX_MAX_UL_DG_ENTRIES;
+
+ if (dg_n == 0)
+ /* Nothing to do for ipc_mux session
+ * -> try next session id.
+ */
+ continue;
+
+ updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session,
+ ul_list, &ipc_mux->ul_adb,
+ dg_n);
+ }
+
+ ipc_mux->adb_prep_ongoing = false;
+ return updated == 1;
+}
+
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_adgh *adgh;
+ u16 adgh_len;
+
+ adgh = (struct mux_adgh *)skb->data;
+ adgh_len = le16_to_cpu(adgh->length);
+
+ if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) &&
+ ipc_mux->ul_flow == MUX_UL)
+ ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes -
+ adgh_len;
+
+ if (ipc_mux->ul_flow == MUX_UL)
+ dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
+ ipc_mux->ul_data_pend_bytes);
+
+ /* Reset the skb settings. */
+ skb->tail = 0;
+ skb->len = 0;
+
+ /* Add the consumed ADB to the free list. */
+ skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
+}
+
+/* Start the NETIF uplink send transfer in MUX mode. */
+static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_mux *ipc_mux = ipc_imem->mux;
+ bool ul_data_pend = false;
+
+ /* Add session UL data to a ADB and ADGH */
+ ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
+ if (ul_data_pend)
+ /* Delay the doorbell irq */
+ ipc_imem_td_update_timer_start(ipc_mux->imem);
+
+ /* reset the debounce flag */
+ ipc_mux->ev_mux_net_transmit_pending = false;
+
+ return 0;
+}
+
+int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
+ struct sk_buff *skb)
+{
+ struct mux_session *session = &ipc_mux->session[if_id];
+ int ret = -EINVAL;
+
+ if (ipc_mux->channel &&
+ ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
+ dev_err(ipc_mux->dev,
+ "channel state is not IMEM_CHANNEL_ACTIVE");
+ goto out;
+ }
+
+ if (!session->wwan) {
+ dev_err(ipc_mux->dev, "session net ID is NULL");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Session is under flow control.
+ * Check if packet can be queued in session list, if not
+ * suspend net tx
+ */
+ if (skb_queue_len(&session->ul_list) >=
+ (session->net_tx_stop ?
+ IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
+ (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
+ IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
+ ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Add skb to the uplink skb accumulator. */
+ skb_queue_tail(&session->ul_list, skb);
+
+ /* Inform the IPC kthread to pass uplink IP packets to CP. */
+ if (!ipc_mux->ev_mux_net_transmit_pending) {
+ ipc_mux->ev_mux_net_transmit_pending = true;
+ ret = ipc_task_queue_send_task(ipc_mux->imem,
+ ipc_mux_tq_ul_trigger_encode, 0,
+ NULL, 0, false);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
+ if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
+ skb->len, skb->truesize, skb->priority);
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
new file mode 100644
index 000000000000..4a74e3c9457f
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MUX_CODEC_H
+#define IOSM_IPC_MUX_CODEC_H
+
+#include "iosm_ipc_mux.h"
+
+/* Queue level size and reporting
+ * >1 is enable, 0 is disable
+ */
+#define MUX_QUEUE_LEVEL 1
+
+/* Size of the buffer for the IP MUX commands. */
+#define MUX_MAX_UL_ACB_BUF_SIZE 256
+
+/* Maximum number of packets in a go per session */
+#define MUX_MAX_UL_DG_ENTRIES 100
+
+/* ADGH: Signature of the Datagram Header. */
+#define MUX_SIG_ADGH 0x48474441
+
+/* CMDH: Signature of the Command Header. */
+#define MUX_SIG_CMDH 0x48444D43
+
+/* QLTH: Signature of the Queue Level Table */
+#define MUX_SIG_QLTH 0x48544C51
+
+/* FCTH: Signature of the Flow Credit Table */
+#define MUX_SIG_FCTH 0x48544346
+
+/* MUX UL session threshold factor */
+#define IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR (4)
+
+/* Size of the buffer for the IP MUX Lite data buffer. */
+#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
+
+/* MUX UL session threshold in number of packets */
+#define IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD (64)
+
+/* Default time out for sending IPC session commands like
+ * open session, close session etc
+ * unit : milliseconds
+ */
+#define IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT 1000 /* 1 second */
+
+/* MUX UL flow control lower threshold in bytes */
+#define IPC_MEM_MUX_UL_FLOWCTRL_LOW_B 10240 /* 10KB */
+
+/* MUX UL flow control higher threshold in bytes (5ms worth of data)*/
+#define IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B (110 * 1024)
+
+/**
+ * struct mux_adgh - Aggregated Datagram Header.
+ * @signature: Signature of the Aggregated Datagram Header(0x48474441)
+ * @length: Length (in bytes) of the datagram header. This length
+ * shall include the header size. Min value: 0x10
+ * @if_id: ID of the interface the datagrams belong to
+ * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1), It is optional if not
+ * used set it to zero.
+ * @reserved: Reserved bits. Set to zero.
+ * @service_class: Service class identifier for the datagram.
+ * @next_count: Count of the datagrams that shall be following this
+ * datagrams for this interface. A count of zero means
+ * the next datagram may not belong to this interface.
+ * @reserved1: Reserved bytes, Set to zero
+ */
+struct mux_adgh {
+ __le32 signature;
+ __le16 length;
+ u8 if_id;
+ u8 opt_ipv4v6;
+ u8 service_class;
+ u8 next_count;
+ u8 reserved1[6];
+};
+
+/**
+ * struct mux_lite_cmdh - MUX Lite Command Header
+ * @signature: Signature of the Command Header(0x48444D43)
+ * @cmd_len: Length (in bytes) of the command. This length shall
+ * include the header size. Minimum value: 0x10
+ * @if_id: ID of the interface the commands in the table belong to.
+ * @reserved: Reserved Set to zero.
+ * @command_type: Command Enum.
+ * @transaction_id: 4 byte value shall be generated and sent along with a
+ * command Responses and ACKs shall have the same
+ * Transaction ID as their commands. It shall be unique to
+ * the command transaction on the given interface.
+ * @param: Optional parameters used with the command.
+ */
+struct mux_lite_cmdh {
+ __le32 signature;
+ __le16 cmd_len;
+ u8 if_id;
+ u8 reserved;
+ __le32 command_type;
+ __le32 transaction_id;
+ union mux_cmd_param param;
+};
+
+/**
+ * struct mux_lite_vfl - value field in generic table
+ * @nr_of_bytes: Number of bytes available to transmit in the queue.
+ */
+struct mux_lite_vfl {
+ u32 nr_of_bytes;
+};
+
+/**
+ * struct ipc_mem_lite_gen_tbl - Generic table format for Queue Level
+ * and Flow Credit
+ * @signature: Signature of the table
+ * @length: Length of the table
+ * @if_id: ID of the interface the table belongs to
+ * @vfl_length: Value field length
+ * @reserved: Reserved
+ * @vfl: Value field of variable length
+ */
+struct ipc_mem_lite_gen_tbl {
+ __le32 signature;
+ __le16 length;
+ u8 if_id;
+ u8 vfl_length;
+ u32 reserved[2];
+ struct mux_lite_vfl vfl;
+};
+
+/**
+ * ipc_mux_dl_decode -Route the DL packet through the IP MUX layer
+ * depending on Header.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
+void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+
+/**
+ * ipc_mux_dl_acb_send_cmds - Respond to the Command blocks.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @cmd_type: Command
+ * @if_id: Session interface id.
+ * @transaction_id: Command transaction id.
+ * @param: Pointer to command params.
+ * @res_size: Response size
+ * @blocking: True for blocking send
+ * @respond: If true return transaction ID
+ *
+ * Returns: 0 in success and failure value on error
+ */
+int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
+ u32 transaction_id, union mux_cmd_param *param,
+ size_t res_size, bool blocking, bool respond);
+
+/**
+ * ipc_mux_netif_tx_flowctrl - Enable/Disable TX flow control on MUX sessions.
+ * @session: Pointer to mux_session struct
+ * @idx: Session ID
+ * @on: true for Enable and false for disable flow control
+ */
+void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on);
+
+/**
+ * ipc_mux_ul_trigger_encode - Route the UL packet through the IP MUX layer
+ * for encoding.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @if_id: Session ID.
+ * @skb: Pointer to ipc_skb.
+ *
+ * Returns: 0 if successfully encoded
+ * failure value on error
+ * -EBUSY if packet has to be retransmitted.
+ */
+int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
+ struct sk_buff *skb);
+/**
+ * ipc_mux_ul_data_encode - UL encode function for calling from Tasklet context.
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: TRUE if any packet of any session is encoded FALSE otherwise.
+ */
+bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_ul_encoded_process - Handles the Modem processed UL data by adding
+ * the SKB to the UL free list.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
new file mode 100644
index 000000000000..7f7d364d3a51
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <net/rtnetlink.h>
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_protocol.h"
+
+MODULE_DESCRIPTION("IOSM Driver");
+MODULE_LICENSE("GPL v2");
+
+/* WWAN GUID */
+static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
+ 0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
+
+static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
+{
+ /* Free the MSI resources. */
+ ipc_release_irq(ipc_pcie);
+
+ /* Free mapped doorbell scratchpad bus memory into CPU space. */
+ iounmap(ipc_pcie->scratchpad);
+
+ /* Free mapped IPC_REGS bus memory into CPU space. */
+ iounmap(ipc_pcie->ipc_regs);
+
+ /* Releases all PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_regions. Call this function only
+ * after all use of the PCI regions has ceased.
+ */
+ pci_release_regions(ipc_pcie->pci);
+}
+
+static void ipc_pcie_cleanup(struct iosm_pcie *ipc_pcie)
+{
+ /* Free the shared memory resources. */
+ ipc_imem_cleanup(ipc_pcie->imem);
+
+ ipc_pcie_resources_release(ipc_pcie);
+
+ /* Signal to the system that the PCI device is not in use. */
+ pci_disable_device(ipc_pcie->pci);
+}
+
+static void ipc_pcie_deinit(struct iosm_pcie *ipc_pcie)
+{
+ kfree(ipc_pcie->imem);
+ kfree(ipc_pcie);
+}
+
+static void ipc_pcie_remove(struct pci_dev *pci)
+{
+ struct iosm_pcie *ipc_pcie = pci_get_drvdata(pci);
+
+ ipc_pcie_cleanup(ipc_pcie);
+
+ ipc_pcie_deinit(ipc_pcie);
+}
+
+static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pci = ipc_pcie->pci;
+ u32 cap = 0;
+ u32 ret;
+
+ /* Reserved PCI I/O and memory resources.
+ * Mark all PCI regions associated with PCI device pci as
+ * being reserved by owner IOSM_IPC.
+ */
+ ret = pci_request_regions(pci, "IOSM_IPC");
+ if (ret) {
+ dev_err(ipc_pcie->dev, "failed pci request regions");
+ goto pci_request_region_fail;
+ }
+
+ /* Reserve the doorbell IPC REGS memory resources.
+ * Remap the memory into CPU space. Arrange for the physical address
+ * (BAR) to be visible from this driver.
+ * pci_ioremap_bar() ensures that the memory is marked uncachable.
+ */
+ ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
+
+ if (!ipc_pcie->ipc_regs) {
+ dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
+ ret = -EBUSY;
+ goto ipc_regs_remap_fail;
+ }
+
+ /* Reserve the MMIO scratchpad memory resources.
+ * Remap the memory into CPU space. Arrange for the physical address
+ * (BAR) to be visible from this driver.
+ * pci_ioremap_bar() ensures that the memory is marked uncachable.
+ */
+ ipc_pcie->scratchpad =
+ pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
+
+ if (!ipc_pcie->scratchpad) {
+ dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
+ ret = -EBUSY;
+ goto scratch_remap_fail;
+ }
+
+ /* Install the irq handler triggered by CP. */
+ ret = ipc_acquire_irq(ipc_pcie);
+ if (ret) {
+ dev_err(ipc_pcie->dev, "acquiring MSI irq failed!");
+ goto irq_acquire_fail;
+ }
+
+ /* Enable bus-mastering for the IOSM IPC device. */
+ pci_set_master(pci);
+
+ /* Enable LTR if possible
+ * This is needed for L1.2!
+ */
+ pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap);
+ if (cap & PCI_EXP_DEVCAP2_LTR)
+ pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+
+ dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
+
+ return ret;
+
+irq_acquire_fail:
+ iounmap(ipc_pcie->scratchpad);
+scratch_remap_fail:
+ iounmap(ipc_pcie->ipc_regs);
+ipc_regs_remap_fail:
+ pci_release_regions(pci);
+pci_request_region_fail:
+ return ret;
+}
+
+bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
+ bool parent)
+{
+ struct pci_dev *pdev;
+ u16 value = 0;
+ u32 enabled;
+
+ if (parent)
+ pdev = ipc_pcie->pci->bus->self;
+ else
+ pdev = ipc_pcie->pci;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &value);
+ enabled = value & PCI_EXP_LNKCTL_ASPMC;
+ dev_dbg(ipc_pcie->dev, "ASPM L1: 0x%04X 0x%03X", pdev->device, value);
+
+ return (enabled == PCI_EXP_LNKCTL_ASPM_L1 ||
+ enabled == PCI_EXP_LNKCTL_ASPMC);
+}
+
+bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *parent;
+ u16 link_status = 0;
+
+ if (!ipc_pcie->pci->bus || !ipc_pcie->pci->bus->self) {
+ dev_err(ipc_pcie->dev, "root port not found");
+ return false;
+ }
+
+ parent = ipc_pcie->pci->bus->self;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &link_status);
+ dev_dbg(ipc_pcie->dev, "Link status: 0x%04X", link_status);
+
+ return link_status & PCI_EXP_LNKSTA_DLLLA;
+}
+
+static bool ipc_pcie_check_aspm_supported(struct iosm_pcie *ipc_pcie,
+ bool parent)
+{
+ struct pci_dev *pdev;
+ u32 support;
+ u32 cap = 0;
+
+ if (parent)
+ pdev = ipc_pcie->pci->bus->self;
+ else
+ pdev = ipc_pcie->pci;
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &cap);
+ support = u32_get_bits(cap, PCI_EXP_LNKCAP_ASPMS);
+ if (support < PCI_EXP_LNKCTL_ASPM_L1) {
+ dev_dbg(ipc_pcie->dev, "ASPM L1 not supported: 0x%04X",
+ pdev->device);
+ return false;
+ }
+ return true;
+}
+
+void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie)
+{
+ bool parent_aspm_enabled, dev_aspm_enabled;
+
+ /* check if both root port and child supports ASPM L1 */
+ if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
+ !ipc_pcie_check_aspm_supported(ipc_pcie, false))
+ return;
+
+ parent_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, true);
+ dev_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, false);
+
+ dev_dbg(ipc_pcie->dev, "ASPM parent: %s device: %s",
+ parent_aspm_enabled ? "Enabled" : "Disabled",
+ dev_aspm_enabled ? "Enabled" : "Disabled");
+}
+
+/* Initializes PCIe endpoint configuration */
+static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
+{
+ /* BAR0 is used for doorbell */
+ ipc_pcie->ipc_regs_bar_nr = IPC_DOORBELL_BAR0;
+
+ /* update HW configuration */
+ ipc_pcie->scratchpad_bar_nr = IPC_SCRATCHPAD_BAR2;
+ ipc_pcie->doorbell_reg_offset = IPC_DOORBELL_CH_OFFSET;
+ ipc_pcie->doorbell_write = IPC_WRITE_PTR_REG_0;
+ ipc_pcie->doorbell_capture = IPC_CAPTURE_PTR_REG_0;
+}
+
+/* This will read the BIOS WWAN RTD3 settings:
+ * D0L1.2/D3L2/Disabled
+ */
+static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
+{
+ union acpi_object *object;
+ acpi_handle handle_acpi;
+
+ handle_acpi = ACPI_HANDLE(dev);
+ if (!handle_acpi) {
+ pr_debug("pci device is NOT ACPI supporting device\n");
+ goto default_ret;
+ }
+
+ object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
+
+ if (object && object->integer.value == 3)
+ return IPC_PCIE_D3L2;
+
+default_ret:
+ return IPC_PCIE_D0L12;
+}
+
+static int ipc_pcie_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
+
+ pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
+ pci_id->vendor);
+
+ if (!ipc_pcie)
+ goto ret_fail;
+
+ /* Initialize ipc dbg component for the PCIe device */
+ ipc_pcie->dev = &pci->dev;
+
+ /* Set the driver specific data. */
+ pci_set_drvdata(pci, ipc_pcie);
+
+ /* Save the address of the PCI device configuration. */
+ ipc_pcie->pci = pci;
+
+ /* Update platform configuration */
+ ipc_pcie_config_init(ipc_pcie);
+
+ /* Initialize the device before it is used. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ */
+ if (pci_enable_device(pci)) {
+ dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device");
+ /* If enable of PCIe device has failed then calling
+ * ipc_pcie_cleanup will panic the system. More over
+ * ipc_pcie_cleanup() is required to be called after
+ * ipc_imem_mount()
+ */
+ goto pci_enable_fail;
+ }
+
+ ipc_pcie_config_aspm(ipc_pcie);
+ dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
+
+ /* Read WWAN RTD3 BIOS Setting
+ */
+ ipc_pcie->d3l2_support = ipc_pcie_read_bios_cfg(&pci->dev);
+
+ ipc_pcie->suspend = 0;
+
+ if (ipc_pcie_resources_request(ipc_pcie))
+ goto resources_req_fail;
+
+ /* Establish the link to the imem layer. */
+ ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
+ ipc_pcie->scratchpad, ipc_pcie->dev);
+ if (!ipc_pcie->imem) {
+ dev_err(ipc_pcie->dev, "failed to init imem");
+ goto imem_init_fail;
+ }
+
+ return 0;
+
+imem_init_fail:
+ ipc_pcie_resources_release(ipc_pcie);
+resources_req_fail:
+ pci_disable_device(pci);
+pci_enable_fail:
+ kfree(ipc_pcie);
+ret_fail:
+ return -EIO;
+}
+
+static const struct pci_device_id iosm_ipc_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
+
+/* Enter sleep in s2idle case
+ */
+static int __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
+{
+ ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
+
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ set_bit(0, &ipc_pcie->suspend);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+
+ ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
+
+ return 0;
+}
+
+/* Resume from sleep in s2idle case
+ */
+static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
+{
+ ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
+
+ ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
+
+ /* Complete all memory stores before clearing bit. */
+ smp_mb__before_atomic();
+
+ clear_bit(0, &ipc_pcie->suspend);
+
+ /* Complete all memory stores after clearing bit. */
+ smp_mb__after_atomic();
+ return 0;
+}
+
+int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = ipc_pcie->pci;
+
+ /* Execute D3 one time. */
+ if (pdev->current_state != PCI_D0) {
+ dev_dbg(ipc_pcie->dev, "done for PM=%d", pdev->current_state);
+ return 0;
+ }
+
+ /* The HAL shall ask the shared memory layer whether D3 is allowed. */
+ ipc_imem_pm_suspend(ipc_pcie->imem);
+
+ /* Save the PCI configuration space of a device before suspending. */
+ ret = pci_save_state(pdev);
+
+ if (ret) {
+ dev_err(ipc_pcie->dev, "pci_save_state error=%d", ret);
+ return ret;
+ }
+
+ /* Set the power state of a PCI device.
+ * Transition a device to a new power state, using the device's PCI PM
+ * registers.
+ */
+ ret = pci_set_power_state(pdev, PCI_D3cold);
+
+ if (ret) {
+ dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret);
+ return ret;
+ }
+
+ dev_dbg(ipc_pcie->dev, "SUSPEND done");
+ return ret;
+}
+
+int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
+{
+ int ret;
+
+ /* Set the power state of a PCI device.
+ * Transition a device to a new power state, using the device's PCI PM
+ * registers.
+ */
+ ret = pci_set_power_state(ipc_pcie->pci, PCI_D0);
+
+ if (ret) {
+ dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret);
+ return ret;
+ }
+
+ pci_restore_state(ipc_pcie->pci);
+
+ /* The HAL shall inform the shared memory layer that the device is
+ * active.
+ */
+ ipc_imem_pm_resume(ipc_pcie->imem);
+
+ dev_dbg(ipc_pcie->dev, "RESUME done");
+ return ret;
+}
+
+static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
+{
+ struct iosm_pcie *ipc_pcie;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ ipc_pcie = pci_get_drvdata(pdev);
+
+ switch (ipc_pcie->d3l2_support) {
+ case IPC_PCIE_D0L12:
+ ipc_pcie_suspend_s2idle(ipc_pcie);
+ break;
+ case IPC_PCIE_D3L2:
+ ipc_pcie_suspend(ipc_pcie);
+ break;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
+{
+ struct iosm_pcie *ipc_pcie;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ ipc_pcie = pci_get_drvdata(pdev);
+
+ switch (ipc_pcie->d3l2_support) {
+ case IPC_PCIE_D0L12:
+ ipc_pcie_resume_s2idle(ipc_pcie);
+ break;
+ case IPC_PCIE_D3L2:
+ ipc_pcie_resume(ipc_pcie);
+ break;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
+
+static struct pci_driver iosm_ipc_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = ipc_pcie_probe,
+ .remove = ipc_pcie_remove,
+ .driver = {
+ .pm = &iosm_ipc_pm,
+ },
+ .id_table = iosm_ipc_ids,
+};
+
+int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
+ size_t size, dma_addr_t *mapping, int direction)
+{
+ if (ipc_pcie->pci) {
+ *mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
+ direction);
+ if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
+ dev_err(ipc_pcie->dev, "dma mapping failed");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
+ dma_addr_t mapping, int direction)
+{
+ if (!mapping)
+ return;
+ if (ipc_pcie->pci)
+ dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
+}
+
+struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
+ gfp_t flags, size_t size)
+{
+ struct sk_buff *skb;
+
+ if (!ipc_pcie || !size) {
+ pr_err("invalid pcie object or size");
+ return NULL;
+ }
+
+ skb = __netdev_alloc_skb(NULL, size, flags);
+ if (!skb)
+ return NULL;
+
+ IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
+ IPC_CB(skb)->mapping = 0;
+
+ return skb;
+}
+
+struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
+ gfp_t flags, dma_addr_t *mapping,
+ int direction, size_t headroom)
+{
+ struct sk_buff *skb = ipc_pcie_alloc_local_skb(ipc_pcie, flags,
+ size + headroom);
+ if (!skb)
+ return NULL;
+
+ if (headroom)
+ skb_reserve(skb, headroom);
+
+ if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
+
+ /* Store the mapping address in skb scratch pad for later usage */
+ IPC_CB(skb)->mapping = *mapping;
+ IPC_CB(skb)->direction = direction;
+ IPC_CB(skb)->len = size;
+
+ return skb;
+}
+
+void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
+ IPC_CB(skb)->direction);
+ IPC_CB(skb)->mapping = 0;
+ dev_kfree_skb(skb);
+}
+
+static int __init iosm_ipc_driver_init(void)
+{
+ if (pci_register_driver(&iosm_ipc_driver)) {
+ pr_err("registering of IOSM PCIe driver failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit iosm_ipc_driver_exit(void)
+{
+ pci_unregister_driver(&iosm_ipc_driver);
+}
+
+module_init(iosm_ipc_driver_init);
+module_exit(iosm_ipc_driver_exit);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.h b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
new file mode 100644
index 000000000000..7d1f0cd7364c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PCIE_H
+#define IOSM_IPC_PCIE_H
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+
+#include "iosm_ipc_irq.h"
+
+/* Device ID */
+#define INTEL_CP_DEVICE_7560_ID 0x7560
+
+/* Define for BAR area usage */
+#define IPC_DOORBELL_BAR0 0
+#define IPC_SCRATCHPAD_BAR2 2
+
+/* Defines for DOORBELL registers information */
+#define IPC_DOORBELL_CH_OFFSET BIT(5)
+#define IPC_WRITE_PTR_REG_0 BIT(4)
+#define IPC_CAPTURE_PTR_REG_0 BIT(3)
+
+/* Number of MSI used for IPC */
+#define IPC_MSI_VECTORS 1
+
+/* Total number of Maximum IPC IRQ vectors used for IPC */
+#define IPC_IRQ_VECTORS IPC_MSI_VECTORS
+
+/**
+ * enum ipc_pcie_sleep_state - Enum type to different sleep state transitions
+ * @IPC_PCIE_D0L12: Put the sleep state in D0L12
+ * @IPC_PCIE_D3L2: Put the sleep state in D3L2
+ */
+enum ipc_pcie_sleep_state {
+ IPC_PCIE_D0L12,
+ IPC_PCIE_D3L2,
+};
+
+/**
+ * struct iosm_pcie - IPC_PCIE struct.
+ * @pci: Address of the device description
+ * @dev: Pointer to generic device structure
+ * @ipc_regs: Remapped CP doorbell address of the irq register
+ * set, to fire the doorbell irq.
+ * @scratchpad: Remapped CP scratchpad address, to send the
+ * configuration. tuple and the IPC descriptors
+ * to CP in the ROM phase. The config tuple
+ * information are saved on the MSI scratchpad.
+ * @imem: Pointer to imem data struct
+ * @ipc_regs_bar_nr: BAR number to be used for IPC doorbell
+ * @scratchpad_bar_nr: BAR number to be used for Scratchpad
+ * @nvec: number of requested irq vectors
+ * @doorbell_reg_offset: doorbell_reg_offset
+ * @doorbell_write: doorbell write register
+ * @doorbell_capture: doorbell capture resgister
+ * @suspend: S2IDLE sleep/active
+ * @d3l2_support: Read WWAN RTD3 BIOS setting for D3L2 support
+ */
+struct iosm_pcie {
+ struct pci_dev *pci;
+ struct device *dev;
+ void __iomem *ipc_regs;
+ void __iomem *scratchpad;
+ struct iosm_imem *imem;
+ int ipc_regs_bar_nr;
+ int scratchpad_bar_nr;
+ int nvec;
+ u32 doorbell_reg_offset;
+ u32 doorbell_write;
+ u32 doorbell_capture;
+ unsigned long suspend;
+ enum ipc_pcie_sleep_state d3l2_support;
+};
+
+/**
+ * struct ipc_skb_cb - Struct definition of the socket buffer which is mapped to
+ * the cb field of sbk
+ * @mapping: Store physical or IOVA mapped address of skb virtual add.
+ * @direction: DMA direction
+ * @len: Length of the DMA mapped region
+ * @op_type: Expected values are defined about enum ipc_ul_usr_op.
+ */
+struct ipc_skb_cb {
+ dma_addr_t mapping;
+ int direction;
+ int len;
+ u8 op_type;
+};
+
+/**
+ * enum ipc_ul_usr_op - Control operation to execute the right action on
+ * the user interface.
+ * @UL_USR_OP_BLOCKED: The uplink app was blocked until CP confirms that the
+ * uplink buffer was consumed triggered by the IRQ.
+ * @UL_MUX_OP_ADB: In MUX mode the UL ADB shall be addedd to the free list.
+ * @UL_DEFAULT: SKB in non muxing mode
+ */
+enum ipc_ul_usr_op {
+ UL_USR_OP_BLOCKED,
+ UL_MUX_OP_ADB,
+ UL_DEFAULT,
+};
+
+/**
+ * ipc_pcie_addr_map - Maps the kernel's virtual address to either IOVA
+ * address space or Physical address space, the mapping is
+ * stored in the skb's cb.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @data: Skb mem containing data
+ * @size: Data size
+ * @mapping: Dma mapping address
+ * @direction: Data direction
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
+ size_t size, dma_addr_t *mapping, int direction);
+
+/**
+ * ipc_pcie_addr_unmap - Unmaps the skb memory region from IOVA address space
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @size: Data size
+ * @mapping: Dma mapping address
+ * @direction: Data direction
+ */
+void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
+ dma_addr_t mapping, int direction);
+
+/**
+ * ipc_pcie_alloc_skb - Allocate an uplink SKB for the given size.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @size: Size of the SKB required.
+ * @flags: Allocation flags
+ * @mapping: Copies either mapped IOVA add. or converted Phy address
+ * @direction: DMA data direction
+ * @headroom: Header data offset
+ *
+ * Returns: Pointer to ipc_skb on Success, NULL on failure.
+ */
+struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
+ gfp_t flags, dma_addr_t *mapping,
+ int direction, size_t headroom);
+
+/**
+ * ipc_pcie_alloc_local_skb - Allocate a local SKB for the given size.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @flags: Allocation flags
+ * @size: Size of the SKB required.
+ *
+ * Returns: Pointer to ipc_skb on Success, NULL on failure.
+ */
+struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
+ gfp_t flags, size_t size);
+
+/**
+ * ipc_pcie_kfree_skb - Free skb allocated by ipc_pcie_alloc_*_skb().
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @skb: Pointer to the skb
+ */
+void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb);
+
+/**
+ * ipc_pcie_check_data_link_active - Check Data Link Layer Active
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: true if active, otherwise false
+ */
+bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_suspend - Callback invoked by pm_runtime_suspend. It decrements
+ * the device's usage count then, carry out a suspend,
+ * either synchronous or asynchronous.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_suspend(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_resume - Callback invoked by pm_runtime_resume. It increments
+ * the device's usage count then, carry out a resume,
+ * either synchronous or asynchronous.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_resume(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_check_aspm_enabled - Check if ASPM L1 is already enabled
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @parent: True if checking ASPM L1 for parent else false
+ *
+ * Returns: true if ASPM is already enabled else false
+ */
+bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
+ bool parent);
+/**
+ * ipc_pcie_config_aspm - Configure ASPM L1
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ */
+void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.c b/drivers/net/wwan/iosm/iosm_ipc_pm.c
new file mode 100644
index 000000000000..413601c72dcd
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pm.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_protocol.h"
+
+/* Timeout value in MS for the PM to wait for device to reach active state */
+#define IPC_PM_ACTIVE_TIMEOUT_MS (500)
+
+/* Note that here "active" has the value 1, as compared to the enums
+ * ipc_mem_host_pm_state or ipc_mem_dev_pm_state, where "active" is 0
+ */
+#define IPC_PM_SLEEP (0)
+#define CONSUME_STATE (0)
+#define IPC_PM_ACTIVE (1)
+
+void ipc_pm_signal_hpda_doorbell(struct iosm_pm *ipc_pm, u32 identifier,
+ bool host_slp_check)
+{
+ if (host_slp_check && ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE &&
+ ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE_WAIT) {
+ ipc_pm->pending_hpda_update = true;
+ dev_dbg(ipc_pm->dev,
+ "Pend HPDA update set. Host PM_State: %d identifier:%d",
+ ipc_pm->host_pm_state, identifier);
+ return;
+ }
+
+ if (!ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, true)) {
+ ipc_pm->pending_hpda_update = true;
+ dev_dbg(ipc_pm->dev, "Pending HPDA update set. identifier:%d",
+ identifier);
+ return;
+ }
+ ipc_pm->pending_hpda_update = false;
+
+ /* Trigger the irq towards CP */
+ ipc_cp_irq_hpda_update(ipc_pm->pcie, identifier);
+
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, false);
+}
+
+/* Wake up the device if it is in low power mode. */
+static bool ipc_pm_link_activate(struct iosm_pm *ipc_pm)
+{
+ if (ipc_pm->cp_state == IPC_MEM_DEV_PM_ACTIVE)
+ return true;
+
+ if (ipc_pm->cp_state == IPC_MEM_DEV_PM_SLEEP) {
+ if (ipc_pm->ap_state == IPC_MEM_DEV_PM_SLEEP) {
+ /* Wake up the device. */
+ ipc_cp_irq_sleep_control(ipc_pm->pcie,
+ IPC_MEM_DEV_PM_WAKEUP);
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE_WAIT;
+
+ goto not_active;
+ }
+
+ if (ipc_pm->ap_state == IPC_MEM_DEV_PM_ACTIVE_WAIT)
+ goto not_active;
+
+ return true;
+ }
+
+not_active:
+ /* link is not ready */
+ return false;
+}
+
+bool ipc_pm_wait_for_device_active(struct iosm_pm *ipc_pm)
+{
+ bool ret_val = false;
+
+ if (ipc_pm->ap_state != IPC_MEM_DEV_PM_ACTIVE) {
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ /* Wait for IPC_PM_ACTIVE_TIMEOUT_MS for Device sleep state
+ * machine to enter ACTIVE state.
+ */
+ set_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+
+ if (!wait_for_completion_interruptible_timeout
+ (&ipc_pm->host_sleep_complete,
+ msecs_to_jiffies(IPC_PM_ACTIVE_TIMEOUT_MS))) {
+ dev_err(ipc_pm->dev,
+ "PM timeout. Expected State:%d. Actual: %d",
+ IPC_MEM_DEV_PM_ACTIVE, ipc_pm->ap_state);
+ goto active_timeout;
+ }
+ }
+
+ ret_val = true;
+active_timeout:
+ /* Complete all memory stores before clearing bit */
+ smp_mb__before_atomic();
+
+ /* Reset the atomic variable in any case as device sleep
+ * state machine change is no longer of interest.
+ */
+ clear_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after clearing bit */
+ smp_mb__after_atomic();
+
+ return ret_val;
+}
+
+static void ipc_pm_on_link_sleep(struct iosm_pm *ipc_pm)
+{
+ /* pending sleep ack and all conditions are cleared
+ * -> signal SLEEP__ACK to CP
+ */
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
+
+ ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_SLEEP);
+}
+
+static void ipc_pm_on_link_wake(struct iosm_pm *ipc_pm, bool ack)
+{
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+
+ if (ack) {
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+
+ ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_ACTIVE);
+
+ /* check the consume state !!! */
+ if (test_bit(CONSUME_STATE, &ipc_pm->host_sleep_pend))
+ complete(&ipc_pm->host_sleep_complete);
+ }
+
+ /* Check for pending HPDA update.
+ * Pending HP update could be because of sending message was
+ * put on hold due to Device sleep state or due to TD update
+ * which could be because of Device Sleep and Host Sleep
+ * states.
+ */
+ if (ipc_pm->pending_hpda_update &&
+ ipc_pm->host_pm_state == IPC_MEM_HOST_PM_ACTIVE)
+ ipc_pm_signal_hpda_doorbell(ipc_pm, IPC_HP_PM_TRIGGER, true);
+}
+
+bool ipc_pm_trigger(struct iosm_pm *ipc_pm, enum ipc_pm_unit unit, bool active)
+{
+ union ipc_pm_cond old_cond;
+ union ipc_pm_cond new_cond;
+ bool link_active;
+
+ /* Save the current D3 state. */
+ new_cond = ipc_pm->pm_cond;
+ old_cond = ipc_pm->pm_cond;
+
+ /* Calculate the power state only in the runtime phase. */
+ switch (unit) {
+ case IPC_PM_UNIT_IRQ: /* CP irq */
+ new_cond.irq = active;
+ break;
+
+ case IPC_PM_UNIT_LINK: /* Device link state. */
+ new_cond.link = active;
+ break;
+
+ case IPC_PM_UNIT_HS: /* Host sleep trigger requires Link. */
+ new_cond.hs = active;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Something changed ? */
+ if (old_cond.raw == new_cond.raw) {
+ /* Stay in the current PM state. */
+ link_active = old_cond.link == IPC_PM_ACTIVE;
+ goto ret;
+ }
+
+ ipc_pm->pm_cond = new_cond;
+
+ if (new_cond.link)
+ ipc_pm_on_link_wake(ipc_pm, unit == IPC_PM_UNIT_LINK);
+ else if (unit == IPC_PM_UNIT_LINK)
+ ipc_pm_on_link_sleep(ipc_pm);
+
+ if (old_cond.link == IPC_PM_SLEEP && new_cond.raw) {
+ link_active = ipc_pm_link_activate(ipc_pm);
+ goto ret;
+ }
+
+ link_active = old_cond.link == IPC_PM_ACTIVE;
+
+ret:
+ return link_active;
+}
+
+bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm)
+{
+ /* suspend not allowed if host_pm_state is not IPC_MEM_HOST_PM_ACTIVE */
+ if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE) {
+ dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
+ ipc_pm->host_pm_state, IPC_MEM_HOST_PM_ACTIVE);
+ return false;
+ }
+
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_SLEEP_WAIT_D3;
+
+ return true;
+}
+
+bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm)
+{
+ if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_SLEEP) {
+ dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
+ ipc_pm->host_pm_state, IPC_MEM_HOST_PM_SLEEP);
+ return false;
+ }
+
+ /* Sending Sleep Exit message to CP. Update the state */
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE_WAIT;
+
+ return true;
+}
+
+void ipc_pm_set_s2idle_sleep(struct iosm_pm *ipc_pm, bool sleep)
+{
+ if (sleep) {
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_SLEEP;
+ } else {
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
+ }
+}
+
+bool ipc_pm_dev_slp_notification(struct iosm_pm *ipc_pm, u32 cp_pm_req)
+{
+ if (cp_pm_req == ipc_pm->device_sleep_notification)
+ return false;
+
+ ipc_pm->device_sleep_notification = cp_pm_req;
+
+ /* Evaluate the PM request. */
+ switch (ipc_pm->cp_state) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ switch (cp_pm_req) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ /* Inform the PM that the device link can go down. */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, false);
+ return true;
+
+ default:
+ dev_err(ipc_pm->dev,
+ "loc-pm=%d active: confused req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ switch (cp_pm_req) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ /* Inform the PM that the device link is active. */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, true);
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ break;
+
+ default:
+ dev_err(ipc_pm->dev,
+ "loc-pm=%d sleep: confused req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+ break;
+
+ default:
+ dev_err(ipc_pm->dev, "confused loc-pm=%d, req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+
+ return false;
+}
+
+void ipc_pm_init(struct iosm_protocol *ipc_protocol)
+{
+ struct iosm_imem *ipc_imem = ipc_protocol->imem;
+ struct iosm_pm *ipc_pm = &ipc_protocol->pm;
+
+ ipc_pm->pcie = ipc_imem->pcie;
+ ipc_pm->dev = ipc_imem->dev;
+
+ ipc_pm->pm_cond.irq = IPC_PM_SLEEP;
+ ipc_pm->pm_cond.hs = IPC_PM_SLEEP;
+ ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
+
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+
+ /* Create generic wait-for-completion handler for Host Sleep
+ * and device sleep coordination.
+ */
+ init_completion(&ipc_pm->host_sleep_complete);
+
+ /* Complete all memory stores before clearing bit */
+ smp_mb__before_atomic();
+
+ clear_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after clearing bit */
+ smp_mb__after_atomic();
+}
+
+void ipc_pm_deinit(struct iosm_protocol *proto)
+{
+ struct iosm_pm *ipc_pm = &proto->pm;
+
+ complete(&ipc_pm->host_sleep_complete);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.h b/drivers/net/wwan/iosm/iosm_ipc_pm.h
new file mode 100644
index 000000000000..e7c00f388cb0
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pm.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PM_H
+#define IOSM_IPC_PM_H
+
+/* Trigger the doorbell interrupt on cp to change the PM sleep/active status */
+#define ipc_cp_irq_sleep_control(ipc_pcie, data) \
+ ipc_doorbell_fire(ipc_pcie, IPC_DOORBELL_IRQ_SLEEP, data)
+
+/* Trigger the doorbell interrupt on CP to do hpda update */
+#define ipc_cp_irq_hpda_update(ipc_pcie, data) \
+ ipc_doorbell_fire(ipc_pcie, IPC_DOORBELL_IRQ_HPDA, 0xFF & (data))
+
+/**
+ * union ipc_pm_cond - Conditions for D3 and the sleep message to CP.
+ * @raw: raw/combined value for faster check
+ * @irq: IRQ towards CP
+ * @hs: Host Sleep
+ * @link: Device link state.
+ */
+union ipc_pm_cond {
+ unsigned int raw;
+
+ struct {
+ unsigned int irq:1,
+ hs:1,
+ link:1;
+ };
+};
+
+/**
+ * enum ipc_mem_host_pm_state - Possible states of the HOST SLEEP finite state
+ * machine.
+ * @IPC_MEM_HOST_PM_ACTIVE: Host is active
+ * @IPC_MEM_HOST_PM_ACTIVE_WAIT: Intermediate state before going to
+ * active
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_IDLE: Intermediate state to wait for idle
+ * before going into sleep
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_D3: Intermediate state to wait for D3
+ * before going to sleep
+ * @IPC_MEM_HOST_PM_SLEEP: after this state the interface is not
+ * accessible host is in suspend to RAM
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_EXIT_SLEEP: Intermediate state before exiting
+ * sleep
+ */
+enum ipc_mem_host_pm_state {
+ IPC_MEM_HOST_PM_ACTIVE,
+ IPC_MEM_HOST_PM_ACTIVE_WAIT,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_IDLE,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_D3,
+ IPC_MEM_HOST_PM_SLEEP,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_EXIT_SLEEP,
+};
+
+/**
+ * enum ipc_mem_dev_pm_state - Possible states of the DEVICE SLEEP finite state
+ * machine.
+ * @IPC_MEM_DEV_PM_ACTIVE: IPC_MEM_DEV_PM_ACTIVE is the initial
+ * power management state.
+ * IRQ(struct ipc_mem_device_info:
+ * device_sleep_notification)
+ * and DOORBELL-IRQ-HPDA(data) values.
+ * @IPC_MEM_DEV_PM_SLEEP: IPC_MEM_DEV_PM_SLEEP is PM state for
+ * sleep.
+ * @IPC_MEM_DEV_PM_WAKEUP: DOORBELL-IRQ-DEVICE_WAKE(data).
+ * @IPC_MEM_DEV_PM_HOST_SLEEP: DOORBELL-IRQ-HOST_SLEEP(data).
+ * @IPC_MEM_DEV_PM_ACTIVE_WAIT: Local intermediate states.
+ * @IPC_MEM_DEV_PM_FORCE_SLEEP: DOORBELL-IRQ-FORCE_SLEEP.
+ * @IPC_MEM_DEV_PM_FORCE_ACTIVE: DOORBELL-IRQ-FORCE_ACTIVE.
+ */
+enum ipc_mem_dev_pm_state {
+ IPC_MEM_DEV_PM_ACTIVE,
+ IPC_MEM_DEV_PM_SLEEP,
+ IPC_MEM_DEV_PM_WAKEUP,
+ IPC_MEM_DEV_PM_HOST_SLEEP,
+ IPC_MEM_DEV_PM_ACTIVE_WAIT,
+ IPC_MEM_DEV_PM_FORCE_SLEEP = 7,
+ IPC_MEM_DEV_PM_FORCE_ACTIVE,
+};
+
+/**
+ * struct iosm_pm - Power management instance
+ * @pcie: Pointer to iosm_pcie structure
+ * @dev: Pointer to device structure
+ * @host_pm_state: PM states for host
+ * @host_sleep_pend: Variable to indicate Host Sleep Pending
+ * @host_sleep_complete: Generic wait-for-completion used in
+ * case of Host Sleep
+ * @pm_cond: Conditions for power management
+ * @ap_state: Current power management state, the
+ * initial state is IPC_MEM_DEV_PM_ACTIVE eq. 0.
+ * @cp_state: PM State of CP
+ * @device_sleep_notification: last handled device_sleep_notfication
+ * @pending_hpda_update: is a HPDA update pending?
+ */
+struct iosm_pm {
+ struct iosm_pcie *pcie;
+ struct device *dev;
+ enum ipc_mem_host_pm_state host_pm_state;
+ unsigned long host_sleep_pend;
+ struct completion host_sleep_complete;
+ union ipc_pm_cond pm_cond;
+ enum ipc_mem_dev_pm_state ap_state;
+ enum ipc_mem_dev_pm_state cp_state;
+ u32 device_sleep_notification;
+ u8 pending_hpda_update:1;
+};
+
+/**
+ * enum ipc_pm_unit - Power management units.
+ * @IPC_PM_UNIT_IRQ: IRQ towards CP
+ * @IPC_PM_UNIT_HS: Host Sleep for converged protocol
+ * @IPC_PM_UNIT_LINK: Link state controlled by CP.
+ */
+enum ipc_pm_unit {
+ IPC_PM_UNIT_IRQ,
+ IPC_PM_UNIT_HS,
+ IPC_PM_UNIT_LINK,
+};
+
+/**
+ * ipc_pm_init - Allocate power management component
+ * @ipc_protocol: Pointer to iosm_protocol structure
+ */
+void ipc_pm_init(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_pm_deinit - Free power management component, invalidating its pointer.
+ * @ipc_protocol: Pointer to iosm_protocol structure
+ */
+void ipc_pm_deinit(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_pm_dev_slp_notification - Handle a sleep notification message from the
+ * device. This can be called from interrupt state
+ * This function handles Host Sleep requests too
+ * if the Host Sleep protocol is register based.
+ * @ipc_pm: Pointer to power management component
+ * @sleep_notification: Actual notification from device
+ *
+ * Returns: true if dev sleep state has to be checked, false otherwise.
+ */
+bool ipc_pm_dev_slp_notification(struct iosm_pm *ipc_pm,
+ u32 sleep_notification);
+
+/**
+ * ipc_pm_set_s2idle_sleep - Set PM variables to sleep/active
+ * @ipc_pm: Pointer to power management component
+ * @sleep: true to enter sleep/false to exit sleep
+ */
+void ipc_pm_set_s2idle_sleep(struct iosm_pm *ipc_pm, bool sleep);
+
+/**
+ * ipc_pm_prepare_host_sleep - Prepare the PM for sleep by entering
+ * IPC_MEM_HOST_PM_SLEEP_WAIT_D3 state.
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true on success, false if the host was not active.
+ */
+bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_prepare_host_active - Prepare the PM for wakeup by entering
+ * IPC_MEM_HOST_PM_ACTIVE_WAIT state.
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true on success, false if the host was not sleeping.
+ */
+bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_wait_for_device_active - Wait upto IPC_PM_ACTIVE_TIMEOUT_MS ms
+ * for the device to reach active state
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true if device is active, false on timeout
+ */
+bool ipc_pm_wait_for_device_active(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_signal_hpda_doorbell - Wake up the device if it is in low power mode
+ * and trigger a head pointer update interrupt.
+ * @ipc_pm: Pointer to power management component
+ * @identifier: specifies what component triggered hpda update irq
+ * @host_slp_check: if set to true then Host Sleep state machine check will
+ * be performed. If Host Sleep state machine allows HP
+ * update then only doorbell is triggered otherwise pending
+ * flag will be set. If set to false then Host Sleep check
+ * will not be performed. This is helpful for Host Sleep
+ * negotiation through message ring.
+ */
+void ipc_pm_signal_hpda_doorbell(struct iosm_pm *ipc_pm, u32 identifier,
+ bool host_slp_check);
+/**
+ * ipc_pm_trigger - Update power manager and wake up the link if needed
+ * @ipc_pm: Pointer to power management component
+ * @unit: Power management units
+ * @active: Device link state
+ *
+ * Returns: true if link is unchanged or active, false otherwise
+ */
+bool ipc_pm_trigger(struct iosm_pm *ipc_pm, enum ipc_pm_unit unit, bool active);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c
new file mode 100644
index 000000000000..beb944847398
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_port.h"
+
+/* open logical channel for control communication */
+static int ipc_port_ctrl_start(struct wwan_port *port)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+ int ret = 0;
+
+ ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
+ ipc_port->chl_id,
+ IPC_HP_CDEV_OPEN);
+ if (!ipc_port->channel)
+ ret = -EIO;
+
+ return ret;
+}
+
+/* close logical channel */
+static void ipc_port_ctrl_stop(struct wwan_port *port)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+
+ ipc_imem_sys_cdev_close(ipc_port);
+}
+
+/* transfer control data to modem */
+static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+
+ return ipc_imem_sys_cdev_write(ipc_port, skb);
+}
+
+static const struct wwan_port_ops ipc_wwan_ctrl_ops = {
+ .start = ipc_port_ctrl_start,
+ .stop = ipc_port_ctrl_stop,
+ .tx = ipc_port_ctrl_tx,
+};
+
+/* Port init func */
+struct iosm_cdev *ipc_port_init(struct iosm_imem *ipc_imem,
+ struct ipc_chnl_cfg ipc_port_cfg)
+{
+ struct iosm_cdev *ipc_port = kzalloc(sizeof(*ipc_port), GFP_KERNEL);
+ enum wwan_port_type port_type = ipc_port_cfg.wwan_port_type;
+ enum ipc_channel_id chl_id = ipc_port_cfg.id;
+
+ if (!ipc_port)
+ return NULL;
+
+ ipc_port->dev = ipc_imem->dev;
+ ipc_port->pcie = ipc_imem->pcie;
+
+ ipc_port->port_type = port_type;
+ ipc_port->chl_id = chl_id;
+ ipc_port->ipc_imem = ipc_imem;
+
+ ipc_port->iosm_port = wwan_create_port(ipc_port->dev, port_type,
+ &ipc_wwan_ctrl_ops, ipc_port);
+
+ return ipc_port;
+}
+
+/* Port deinit func */
+void ipc_port_deinit(struct iosm_cdev *port[])
+{
+ struct iosm_cdev *ipc_port;
+ u8 ctrl_chl_nr;
+
+ for (ctrl_chl_nr = 0; ctrl_chl_nr < IPC_MEM_MAX_CHANNELS;
+ ctrl_chl_nr++) {
+ if (port[ctrl_chl_nr]) {
+ ipc_port = port[ctrl_chl_nr];
+ wwan_remove_port(ipc_port->iosm_port);
+ kfree(ipc_port);
+ }
+ }
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.h b/drivers/net/wwan/iosm/iosm_ipc_port.h
new file mode 100644
index 000000000000..11bc8ed21616
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PORT_H
+#define IOSM_IPC_PORT_H
+
+#include <linux/wwan.h>
+
+#include "iosm_ipc_imem_ops.h"
+
+/**
+ * struct iosm_cdev - State of the char driver layer.
+ * @iosm_port: Pointer of type wwan_port
+ * @ipc_imem: imem instance
+ * @dev: Pointer to device struct
+ * @pcie: PCIe component
+ * @port_type: WWAN port type
+ * @channel: Channel instance
+ * @chl_id: Channel Indentifier
+ */
+struct iosm_cdev {
+ struct wwan_port *iosm_port;
+ struct iosm_imem *ipc_imem;
+ struct device *dev;
+ struct iosm_pcie *pcie;
+ enum wwan_port_type port_type;
+ struct ipc_mem_channel *channel;
+ enum ipc_channel_id chl_id;
+};
+
+/**
+ * ipc_port_init - Allocate IPC port & register to wwan subsystem for AT/MBIM
+ * communication.
+ * @ipc_imem: Pointer to iosm_imem structure
+ * @ipc_port_cfg: IPC Port Config
+ *
+ * Returns: 0 on success & NULL on failure
+ */
+struct iosm_cdev *ipc_port_init(struct iosm_imem *ipc_imem,
+ struct ipc_chnl_cfg ipc_port_cfg);
+
+/**
+ * ipc_port_deinit - Free IPC port & unregister port with wwan subsystem.
+ * @ipc_port: Array of pointer to the ipc port data-struct
+ */
+void ipc_port_deinit(struct iosm_cdev *ipc_port[]);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol.c b/drivers/net/wwan/iosm/iosm_ipc_protocol.c
new file mode 100644
index 000000000000..834d8b146a94
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_protocol.h"
+#include "iosm_ipc_protocol_ops.h"
+#include "iosm_ipc_pm.h"
+#include "iosm_ipc_task_queue.h"
+
+int ipc_protocol_tq_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *prep_args,
+ struct ipc_rsp *response)
+{
+ int index = ipc_protocol_msg_prep(ipc_protocol->imem, msg_type,
+ prep_args);
+
+ /* Store reference towards caller specified response in response ring
+ * and signal CP
+ */
+ if (index >= 0 && index < IPC_MEM_MSG_ENTRIES) {
+ ipc_protocol->rsp_ring[index] = response;
+ ipc_protocol_msg_hp_update(ipc_protocol->imem);
+ }
+
+ return index;
+}
+
+/* Callback for message send */
+static int ipc_protocol_tq_msg_send_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct ipc_call_msg_send_args *send_args = msg;
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ return ipc_protocol_tq_msg_send(ipc_protocol, send_args->msg_type,
+ send_args->prep_args,
+ send_args->response);
+}
+
+/* Remove reference to a response. This is typically used when a requestor timed
+ * out and is no longer interested in the response.
+ */
+static int ipc_protocol_tq_msg_remove(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ ipc_protocol->rsp_ring[arg] = NULL;
+ return 0;
+}
+
+int ipc_protocol_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type prep,
+ union ipc_msg_prep_args *prep_args)
+{
+ struct ipc_call_msg_send_args send_args;
+ unsigned int exec_timeout;
+ struct ipc_rsp response;
+ int index;
+
+ exec_timeout = (ipc_protocol_get_ap_exec_stage(ipc_protocol) ==
+ IPC_MEM_EXEC_STAGE_RUN ?
+ IPC_MSG_COMPLETE_RUN_DEFAULT_TIMEOUT :
+ IPC_MSG_COMPLETE_BOOT_DEFAULT_TIMEOUT);
+
+ /* Trap if called from non-preemptible context */
+ might_sleep();
+
+ response.status = IPC_MEM_MSG_CS_INVALID;
+ init_completion(&response.completion);
+
+ send_args.msg_type = prep;
+ send_args.prep_args = prep_args;
+ send_args.response = &response;
+
+ /* Allocate and prepare message to be sent in tasklet context.
+ * A positive index returned form tasklet_call references the message
+ * in case it needs to be cancelled when there is a timeout.
+ */
+ index = ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_msg_send_cb, 0,
+ &send_args, 0, true);
+
+ if (index < 0) {
+ dev_err(ipc_protocol->dev, "msg %d failed", prep);
+ return index;
+ }
+
+ /* Wait for the device to respond to the message */
+ switch (wait_for_completion_timeout(&response.completion,
+ msecs_to_jiffies(exec_timeout))) {
+ case 0:
+ /* Timeout, there was no response from the device.
+ * Remove the reference to the local response completion
+ * object as we are no longer interested in the response.
+ */
+ ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_msg_remove, index,
+ NULL, 0, true);
+ dev_err(ipc_protocol->dev, "msg timeout");
+ ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
+ break;
+ default:
+ /* We got a response in time; check completion status: */
+ if (response.status != IPC_MEM_MSG_CS_SUCCESS) {
+ dev_err(ipc_protocol->dev,
+ "msg completion status error %d",
+ response.status);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int ipc_protocol_msg_send_host_sleep(struct iosm_protocol *ipc_protocol,
+ u32 state)
+{
+ union ipc_msg_prep_args prep_args = {
+ .sleep.target = 0,
+ .sleep.state = state,
+ };
+
+ return ipc_protocol_msg_send(ipc_protocol, IPC_MSG_PREP_SLEEP,
+ &prep_args);
+}
+
+void ipc_protocol_doorbell_trigger(struct iosm_protocol *ipc_protocol,
+ u32 identifier)
+{
+ ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, identifier, true);
+}
+
+bool ipc_protocol_pm_dev_sleep_handle(struct iosm_protocol *ipc_protocol)
+{
+ u32 ipc_status = ipc_protocol_get_ipc_status(ipc_protocol);
+ u32 requested;
+
+ if (ipc_status != IPC_MEM_DEVICE_IPC_RUNNING) {
+ dev_err(ipc_protocol->dev,
+ "irq ignored, CP IPC state is %d, should be RUNNING",
+ ipc_status);
+
+ /* Stop further processing. */
+ return false;
+ }
+
+ /* Get a copy of the requested PM state by the device and the local
+ * device PM state.
+ */
+ requested = ipc_protocol_pm_dev_get_sleep_notification(ipc_protocol);
+
+ return ipc_pm_dev_slp_notification(&ipc_protocol->pm, requested);
+}
+
+static int ipc_protocol_tq_wakeup_dev_slp(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_pm *ipc_pm = &ipc_imem->ipc_protocol->pm;
+
+ /* Wakeup from device sleep if it is not ACTIVE */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, true);
+
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, false);
+
+ return 0;
+}
+
+void ipc_protocol_s2idle_sleep(struct iosm_protocol *ipc_protocol, bool sleep)
+{
+ ipc_pm_set_s2idle_sleep(&ipc_protocol->pm, sleep);
+}
+
+bool ipc_protocol_suspend(struct iosm_protocol *ipc_protocol)
+{
+ if (!ipc_pm_prepare_host_sleep(&ipc_protocol->pm))
+ goto err;
+
+ ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_wakeup_dev_slp, 0, NULL, 0,
+ true);
+
+ if (!ipc_pm_wait_for_device_active(&ipc_protocol->pm)) {
+ ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
+ goto err;
+ }
+
+ /* Send the sleep message for sync sys calls. */
+ dev_dbg(ipc_protocol->dev, "send TARGET_HOST, ENTER_SLEEP");
+ if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
+ IPC_HOST_SLEEP_ENTER_SLEEP)) {
+ /* Sending ENTER_SLEEP message failed, we are still active */
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+ goto err;
+ }
+
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
+ return true;
+err:
+ return false;
+}
+
+bool ipc_protocol_resume(struct iosm_protocol *ipc_protocol)
+{
+ if (!ipc_pm_prepare_host_active(&ipc_protocol->pm))
+ return false;
+
+ dev_dbg(ipc_protocol->dev, "send TARGET_HOST, EXIT_SLEEP");
+ if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
+ IPC_HOST_SLEEP_EXIT_SLEEP)) {
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
+ return false;
+ }
+
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+
+ return true;
+}
+
+struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem)
+{
+ struct iosm_protocol *ipc_protocol =
+ kzalloc(sizeof(*ipc_protocol), GFP_KERNEL);
+ struct ipc_protocol_context_info *p_ci;
+ u64 addr;
+
+ if (!ipc_protocol)
+ return NULL;
+
+ ipc_protocol->dev = ipc_imem->dev;
+ ipc_protocol->pcie = ipc_imem->pcie;
+ ipc_protocol->imem = ipc_imem;
+ ipc_protocol->p_ap_shm = NULL;
+ ipc_protocol->phy_ap_shm = 0;
+
+ ipc_protocol->old_msg_tail = 0;
+
+ ipc_protocol->p_ap_shm =
+ pci_alloc_consistent(ipc_protocol->pcie->pci,
+ sizeof(*ipc_protocol->p_ap_shm),
+ &ipc_protocol->phy_ap_shm);
+
+ if (!ipc_protocol->p_ap_shm) {
+ dev_err(ipc_protocol->dev, "pci shm alloc error");
+ kfree(ipc_protocol);
+ return NULL;
+ }
+
+ /* Prepare the context info for CP. */
+ addr = ipc_protocol->phy_ap_shm;
+ p_ci = &ipc_protocol->p_ap_shm->ci;
+ p_ci->device_info_addr =
+ addr + offsetof(struct ipc_protocol_ap_shm, device_info);
+ p_ci->head_array =
+ addr + offsetof(struct ipc_protocol_ap_shm, head_array);
+ p_ci->tail_array =
+ addr + offsetof(struct ipc_protocol_ap_shm, tail_array);
+ p_ci->msg_head = addr + offsetof(struct ipc_protocol_ap_shm, msg_head);
+ p_ci->msg_tail = addr + offsetof(struct ipc_protocol_ap_shm, msg_tail);
+ p_ci->msg_ring_addr =
+ addr + offsetof(struct ipc_protocol_ap_shm, msg_ring);
+ p_ci->msg_ring_entries = cpu_to_le16(IPC_MEM_MSG_ENTRIES);
+ p_ci->msg_irq_vector = IPC_MSG_IRQ_VECTOR;
+ p_ci->device_info_irq_vector = IPC_DEVICE_IRQ_VECTOR;
+
+ ipc_mmio_set_contex_info_addr(ipc_imem->mmio, addr);
+
+ ipc_pm_init(ipc_protocol);
+
+ return ipc_protocol;
+}
+
+void ipc_protocol_deinit(struct iosm_protocol *proto)
+{
+ pci_free_consistent(proto->pcie->pci, sizeof(*proto->p_ap_shm),
+ proto->p_ap_shm, proto->phy_ap_shm);
+
+ ipc_pm_deinit(proto);
+ kfree(proto);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol.h b/drivers/net/wwan/iosm/iosm_ipc_protocol.h
new file mode 100644
index 000000000000..9b3a6d86ece7
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PROTOCOL_H
+#define IOSM_IPC_PROTOCOL_H
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_pm.h"
+#include "iosm_ipc_protocol_ops.h"
+
+/* Trigger the doorbell interrupt on CP. */
+#define IPC_DOORBELL_IRQ_HPDA 0
+#define IPC_DOORBELL_IRQ_IPC 1
+#define IPC_DOORBELL_IRQ_SLEEP 2
+
+/* IRQ vector number. */
+#define IPC_DEVICE_IRQ_VECTOR 0
+#define IPC_MSG_IRQ_VECTOR 0
+#define IPC_UL_PIPE_IRQ_VECTOR 0
+#define IPC_DL_PIPE_IRQ_VECTOR 0
+
+#define IPC_MEM_MSG_ENTRIES 128
+
+/* Default time out for sending IPC messages like open pipe, close pipe etc.
+ * during run mode.
+ *
+ * If the message interface lock to CP times out, the link to CP is broken.
+ * mode : run mode (IPC_MEM_EXEC_STAGE_RUN)
+ * unit : milliseconds
+ */
+#define IPC_MSG_COMPLETE_RUN_DEFAULT_TIMEOUT 500 /* 0.5 seconds */
+
+/* Default time out for sending IPC messages like open pipe, close pipe etc.
+ * during boot mode.
+ *
+ * If the message interface lock to CP times out, the link to CP is broken.
+ * mode : boot mode
+ * (IPC_MEM_EXEC_STAGE_BOOT | IPC_MEM_EXEC_STAGE_PSI | IPC_MEM_EXEC_STAGE_EBL)
+ * unit : milliseconds
+ */
+#define IPC_MSG_COMPLETE_BOOT_DEFAULT_TIMEOUT 500 /* 0.5 seconds */
+
+/**
+ * struct ipc_protocol_context_info - Structure of the context info
+ * @device_info_addr: 64 bit address to device info
+ * @head_array: 64 bit address to head pointer arr for the pipes
+ * @tail_array: 64 bit address to tail pointer arr for the pipes
+ * @msg_head: 64 bit address to message head pointer
+ * @msg_tail: 64 bit address to message tail pointer
+ * @msg_ring_addr: 64 bit pointer to the message ring buffer
+ * @msg_ring_entries: This field provides the number of entries which
+ * the MR can hold
+ * @msg_irq_vector: This field provides the IRQ which shall be
+ * generated by the EP device when generating
+ * completion for Messages.
+ * @device_info_irq_vector: This field provides the IRQ which shall be
+ * generated by the EP dev after updating Dev. Info
+ */
+struct ipc_protocol_context_info {
+ phys_addr_t device_info_addr;
+ phys_addr_t head_array;
+ phys_addr_t tail_array;
+ phys_addr_t msg_head;
+ phys_addr_t msg_tail;
+ phys_addr_t msg_ring_addr;
+ __le16 msg_ring_entries;
+ u8 msg_irq_vector;
+ u8 device_info_irq_vector;
+};
+
+/**
+ * struct ipc_protocol_device_info - Structure for the device information
+ * @execution_stage: CP execution stage
+ * @ipc_status: IPC states
+ * @device_sleep_notification: Requested device pm states
+ */
+struct ipc_protocol_device_info {
+ __le32 execution_stage;
+ __le32 ipc_status;
+ __le32 device_sleep_notification;
+};
+
+/**
+ * struct ipc_protocol_ap_shm - Protocol Shared Memory Structure
+ * @ci: Context information struct
+ * @device_info: Device information struct
+ * @msg_head: Point to msg head
+ * @head_array: Array of head pointer
+ * @msg_tail: Point to msg tail
+ * @tail_array: Array of tail pointer
+ * @msg_ring: Circular buffers for the read/tail and write/head
+ * indeces.
+ */
+struct ipc_protocol_ap_shm {
+ struct ipc_protocol_context_info ci;
+ struct ipc_protocol_device_info device_info;
+ __le32 msg_head;
+ __le32 head_array[IPC_MEM_MAX_PIPES];
+ __le32 msg_tail;
+ __le32 tail_array[IPC_MEM_MAX_PIPES];
+ union ipc_mem_msg_entry msg_ring[IPC_MEM_MSG_ENTRIES];
+};
+
+/**
+ * struct iosm_protocol - Structure for IPC protocol.
+ * @p_ap_shm: Pointer to Protocol Shared Memory Structure
+ * @pm: Instance to struct iosm_pm
+ * @pcie: Pointer to struct iosm_pcie
+ * @imem: Pointer to struct iosm_imem
+ * @rsp_ring: Array of OS completion objects to be triggered once CP
+ * acknowledges a request in the message ring
+ * @dev: Pointer to device structure
+ * @phy_ap_shm: Physical/Mapped representation of the shared memory info
+ * @old_msg_tail: Old msg tail ptr, until AP has handled ACK's from CP
+ */
+struct iosm_protocol {
+ struct ipc_protocol_ap_shm *p_ap_shm;
+ struct iosm_pm pm;
+ struct iosm_pcie *pcie;
+ struct iosm_imem *imem;
+ struct ipc_rsp *rsp_ring[IPC_MEM_MSG_ENTRIES];
+ struct device *dev;
+ phys_addr_t phy_ap_shm;
+ u32 old_msg_tail;
+};
+
+/**
+ * struct ipc_call_msg_send_args - Structure for message argument for
+ * tasklet function.
+ * @prep_args: Arguments for message preparation function
+ * @response: Can be NULL if result can be ignored
+ * @msg_type: Message Type
+ */
+struct ipc_call_msg_send_args {
+ union ipc_msg_prep_args *prep_args;
+ struct ipc_rsp *response;
+ enum ipc_msg_prep_type msg_type;
+};
+
+/**
+ * ipc_protocol_tq_msg_send - prepare the msg and send to CP
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @msg_type: Message type
+ * @prep_args: Message arguments
+ * @response: Pointer to a response object which has a
+ * completion object and return code.
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_protocol_tq_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *prep_args,
+ struct ipc_rsp *response);
+
+/**
+ * ipc_protocol_msg_send - Send ipc control message to CP and wait for response
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @prep: Message type
+ * @prep_args: Message arguments
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_protocol_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type prep,
+ union ipc_msg_prep_args *prep_args);
+
+/**
+ * ipc_protocol_suspend - Signal to CP that host wants to go to sleep (suspend).
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ *
+ * Returns: true if host can suspend, false if suspend must be aborted.
+ */
+bool ipc_protocol_suspend(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_s2idle_sleep - Call PM function to set PM variables in s2idle
+ * sleep/active case
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @sleep: True for sleep/False for active
+ */
+void ipc_protocol_s2idle_sleep(struct iosm_protocol *ipc_protocol, bool sleep);
+
+/**
+ * ipc_protocol_resume - Signal to CP that host wants to resume operation.
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ *
+ * Returns: true if host can resume, false if there is a problem.
+ */
+bool ipc_protocol_resume(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_pm_dev_sleep_handle - Handles the Device Sleep state change
+ * notification.
+ * @ipc_protocol: Pointer to ipc_protocol instance.
+ *
+ * Returns: true if sleep notification handled, false otherwise.
+ */
+bool ipc_protocol_pm_dev_sleep_handle(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_doorbell_trigger - Wrapper for PM function which wake up the
+ * device if it is in low power mode
+ * and trigger a head pointer update interrupt.
+ * @ipc_protocol: Pointer to ipc_protocol instance.
+ * @identifier: Specifies what component triggered hpda
+ * update irq
+ */
+void ipc_protocol_doorbell_trigger(struct iosm_protocol *ipc_protocol,
+ u32 identifier);
+
+/**
+ * ipc_protocol_sleep_notification_string - Returns last Sleep Notification as
+ * string.
+ * @ipc_protocol: Instance pointer of Protocol module.
+ *
+ * Returns: Pointer to string.
+ */
+const char *
+ipc_protocol_sleep_notification_string(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_init - Allocates IPC protocol instance
+ * @ipc_imem: Pointer to iosm_imem structure
+ *
+ * Returns: Address of IPC protocol instance on success & NULL on failure.
+ */
+struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_protocol_deinit - Deallocates IPC protocol instance
+ * @ipc_protocol: pointer to the IPC protocol instance
+ */
+void ipc_protocol_deinit(struct iosm_protocol *ipc_protocol);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
new file mode 100644
index 000000000000..91109e27efd3
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_protocol.h"
+#include "iosm_ipc_protocol_ops.h"
+
+/* Get the next free message element.*/
+static union ipc_mem_msg_entry *
+ipc_protocol_free_msg_get(struct iosm_protocol *ipc_protocol, int *index)
+{
+ u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
+ u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
+ union ipc_mem_msg_entry *msg;
+
+ if (new_head == le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail)) {
+ dev_err(ipc_protocol->dev, "message ring is full");
+ return NULL;
+ }
+
+ /* Get the pointer to the next free message element,
+ * reset the fields and mark is as invalid.
+ */
+ msg = &ipc_protocol->p_ap_shm->msg_ring[head];
+ memset(msg, 0, sizeof(*msg));
+
+ /* return index in message ring */
+ *index = head;
+
+ return msg;
+}
+
+/* Updates the message ring Head pointer */
+void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+ u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
+ u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
+
+ /* Update head pointer and fire doorbell. */
+ ipc_protocol->p_ap_shm->msg_head = cpu_to_le32(new_head);
+ ipc_protocol->old_msg_tail =
+ le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
+
+ ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, IPC_HP_MR, false);
+}
+
+/* Allocate and prepare a OPEN_PIPE message.
+ * This also allocates the memory for the new TDR structure and
+ * updates the pipe structure referenced in the preparation arguments.
+ */
+static int ipc_protocol_msg_prepipe_open(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+ struct ipc_pipe *pipe = args->pipe_open.pipe;
+ struct ipc_protocol_td *tdr;
+ struct sk_buff **skbr;
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ /* Allocate the skbuf elements for the skbuf which are on the way.
+ * SKB ring is internal memory allocation for driver. No need to
+ * re-calculate the start and end addresses.
+ */
+ skbr = kcalloc(pipe->nr_of_entries, sizeof(*skbr), GFP_ATOMIC);
+ if (!skbr)
+ return -ENOMEM;
+
+ /* Allocate the transfer descriptors for the pipe. */
+ tdr = pci_alloc_consistent(ipc_protocol->pcie->pci,
+ pipe->nr_of_entries * sizeof(*tdr),
+ &pipe->phy_tdr_start);
+ if (!tdr) {
+ kfree(skbr);
+ dev_err(ipc_protocol->dev, "tdr alloc error");
+ return -ENOMEM;
+ }
+
+ pipe->max_nr_of_queued_entries = pipe->nr_of_entries - 1;
+ pipe->nr_of_queued_entries = 0;
+ pipe->tdr_start = tdr;
+ pipe->skbr_start = skbr;
+ pipe->old_tail = 0;
+
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
+
+ msg->open_pipe.type_of_message = IPC_MEM_MSG_OPEN_PIPE;
+ msg->open_pipe.pipe_nr = pipe->pipe_nr;
+ msg->open_pipe.tdr_addr = cpu_to_le64(pipe->phy_tdr_start);
+ msg->open_pipe.tdr_entries = cpu_to_le16(pipe->nr_of_entries);
+ msg->open_pipe.accumulation_backoff =
+ cpu_to_le32(pipe->accumulation_backoff);
+ msg->open_pipe.irq_vector = cpu_to_le32(pipe->irq);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prepipe_close(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+ struct ipc_pipe *pipe = args->pipe_close.pipe;
+
+ if (!msg)
+ return -EIO;
+
+ msg->close_pipe.type_of_message = IPC_MEM_MSG_CLOSE_PIPE;
+ msg->close_pipe.pipe_nr = pipe->pipe_nr;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_CLOSE_PIPE(pipe_nr=%d)",
+ msg->close_pipe.pipe_nr);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prep_sleep(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ /* Prepare and send the host sleep message to CP to enter or exit D3. */
+ msg->host_sleep.type_of_message = IPC_MEM_MSG_SLEEP;
+ msg->host_sleep.target = args->sleep.target; /* 0=host, 1=device */
+
+ /* state; 0=enter, 1=exit 2=enter w/o protocol */
+ msg->host_sleep.state = args->sleep.state;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_SLEEP(target=%d; state=%d)",
+ msg->host_sleep.target, msg->host_sleep.state);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prep_feature_set(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ msg->feature_set.type_of_message = IPC_MEM_MSG_FEATURE_SET;
+ msg->feature_set.reset_enable = args->feature_set.reset_enable <<
+ RESET_BIT;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_FEATURE_SET(reset_enable=%d)",
+ msg->feature_set.reset_enable >> RESET_BIT);
+
+ return index;
+}
+
+/* Processes the message consumed by CP. */
+bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+ struct ipc_rsp **rsp_ring = ipc_protocol->rsp_ring;
+ bool msg_processed = false;
+ u32 i;
+
+ if (le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail) >=
+ IPC_MEM_MSG_ENTRIES) {
+ dev_err(ipc_protocol->dev, "msg_tail out of range: %d",
+ le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail));
+ return msg_processed;
+ }
+
+ if (irq != IMEM_IRQ_DONT_CARE &&
+ irq != ipc_protocol->p_ap_shm->ci.msg_irq_vector)
+ return msg_processed;
+
+ for (i = ipc_protocol->old_msg_tail;
+ i != le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
+ i = (i + 1) % IPC_MEM_MSG_ENTRIES) {
+ union ipc_mem_msg_entry *msg =
+ &ipc_protocol->p_ap_shm->msg_ring[i];
+
+ dev_dbg(ipc_protocol->dev, "msg[%d]: type=%u status=%d", i,
+ msg->common.type_of_message,
+ msg->common.completion_status);
+
+ /* Update response with status and wake up waiting requestor */
+ if (rsp_ring[i]) {
+ rsp_ring[i]->status =
+ le32_to_cpu(msg->common.completion_status);
+ complete(&rsp_ring[i]->completion);
+ rsp_ring[i] = NULL;
+ }
+ msg_processed = true;
+ }
+
+ ipc_protocol->old_msg_tail = i;
+ return msg_processed;
+}
+
+/* Sends data from UL list to CP for the provided pipe by updating the Head
+ * pointer of given pipe.
+ */
+bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe,
+ struct sk_buff_head *p_ul_list)
+{
+ struct ipc_protocol_td *td;
+ bool hpda_pending = false;
+ struct sk_buff *skb;
+ s32 free_elements;
+ u32 head;
+ u32 tail;
+
+ if (!ipc_protocol->p_ap_shm) {
+ dev_err(ipc_protocol->dev, "driver is not initialized");
+ return false;
+ }
+
+ /* Get head and tail of the td list and calculate
+ * the number of free elements.
+ */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = pipe->old_tail;
+
+ while (!skb_queue_empty(p_ul_list)) {
+ if (head < tail)
+ free_elements = tail - head - 1;
+ else
+ free_elements =
+ pipe->nr_of_entries - head + ((s32)tail - 1);
+
+ if (free_elements <= 0) {
+ dev_dbg(ipc_protocol->dev,
+ "no free td elements for UL pipe %d",
+ pipe->pipe_nr);
+ break;
+ }
+
+ /* Get the td address. */
+ td = &pipe->tdr_start[head];
+
+ /* Take the first element of the uplink list and add it
+ * to the td list.
+ */
+ skb = skb_dequeue(p_ul_list);
+ if (WARN_ON(!skb))
+ break;
+
+ /* Save the reference to the uplink skbuf. */
+ pipe->skbr_start[head] = skb;
+
+ td->buffer.address = IPC_CB(skb)->mapping;
+ td->scs = cpu_to_le32(skb->len) & cpu_to_le32(SIZE_MASK);
+ td->next = 0;
+
+ pipe->nr_of_queued_entries++;
+
+ /* Calculate the new head and save it. */
+ head++;
+ if (head >= pipe->nr_of_entries)
+ head = 0;
+
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
+ cpu_to_le32(head);
+ }
+
+ if (pipe->old_head != head) {
+ dev_dbg(ipc_protocol->dev, "New UL TDs Pipe:%d", pipe->pipe_nr);
+
+ pipe->old_head = head;
+ /* Trigger doorbell because of pending UL packets. */
+ hpda_pending = true;
+ }
+
+ return hpda_pending;
+}
+
+/* Checks for Tail pointer update from CP and returns the data as SKB. */
+struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_protocol_td *p_td = &pipe->tdr_start[pipe->old_tail];
+ struct sk_buff *skb = pipe->skbr_start[pipe->old_tail];
+
+ pipe->nr_of_queued_entries--;
+ pipe->old_tail++;
+ if (pipe->old_tail >= pipe->nr_of_entries)
+ pipe->old_tail = 0;
+
+ if (!p_td->buffer.address) {
+ dev_err(ipc_protocol->dev, "Td buffer address is NULL");
+ return NULL;
+ }
+
+ if (p_td->buffer.address != IPC_CB(skb)->mapping) {
+ dev_err(ipc_protocol->dev,
+ "pipe %d: invalid buf_addr or skb_data",
+ pipe->pipe_nr);
+ return NULL;
+ }
+
+ return skb;
+}
+
+/* Allocates an SKB for CP to send data and updates the Head Pointer
+ * of the given Pipe#.
+ */
+bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_protocol_td *td;
+ dma_addr_t mapping = 0;
+ u32 head, new_head;
+ struct sk_buff *skb;
+ u32 tail;
+
+ /* Get head and tail of the td list and calculate
+ * the number of free elements.
+ */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
+
+ new_head = head + 1;
+ if (new_head >= pipe->nr_of_entries)
+ new_head = 0;
+
+ if (new_head == tail)
+ return false;
+
+ /* Get the td address. */
+ td = &pipe->tdr_start[head];
+
+ /* Allocate the skbuf for the descriptor. */
+ skb = ipc_pcie_alloc_skb(ipc_protocol->pcie, pipe->buf_size, GFP_ATOMIC,
+ &mapping, DMA_FROM_DEVICE,
+ IPC_MEM_DL_ETH_OFFSET);
+ if (!skb)
+ return false;
+
+ td->buffer.address = mapping;
+ td->scs = cpu_to_le32(pipe->buf_size) & cpu_to_le32(SIZE_MASK);
+ td->next = 0;
+
+ /* store the new head value. */
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
+ cpu_to_le32(new_head);
+
+ /* Save the reference to the skbuf. */
+ pipe->skbr_start[head] = skb;
+
+ pipe->nr_of_queued_entries++;
+
+ return true;
+}
+
+/* Processes DL TD's */
+struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ u32 tail =
+ le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
+ struct ipc_protocol_td *p_td;
+ struct sk_buff *skb;
+
+ if (!pipe->tdr_start)
+ return NULL;
+
+ /* Copy the reference to the downlink buffer. */
+ p_td = &pipe->tdr_start[pipe->old_tail];
+ skb = pipe->skbr_start[pipe->old_tail];
+
+ /* Reset the ring elements. */
+ pipe->skbr_start[pipe->old_tail] = NULL;
+
+ pipe->nr_of_queued_entries--;
+
+ pipe->old_tail++;
+ if (pipe->old_tail >= pipe->nr_of_entries)
+ pipe->old_tail = 0;
+
+ if (!skb) {
+ dev_err(ipc_protocol->dev, "skb is null");
+ goto ret;
+ } else if (!p_td->buffer.address) {
+ dev_err(ipc_protocol->dev, "td/buffer address is null");
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ }
+
+ if (!IPC_CB(skb)) {
+ dev_err(ipc_protocol->dev, "pipe# %d, tail: %d skb_cb is NULL",
+ pipe->pipe_nr, tail);
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ }
+
+ if (p_td->buffer.address != IPC_CB(skb)->mapping) {
+ dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p",
+ (void *)p_td->buffer.address, skb->data);
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ } else if ((le32_to_cpu(p_td->scs) & SIZE_MASK) > pipe->buf_size) {
+ dev_err(ipc_protocol->dev, "invalid buffer size %d > %d",
+ le32_to_cpu(p_td->scs) & SIZE_MASK,
+ pipe->buf_size);
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ } else if (le32_to_cpu(p_td->scs) >> COMPLETION_STATUS ==
+ IPC_MEM_TD_CS_ABORT) {
+ /* Discard aborted buffers. */
+ dev_dbg(ipc_protocol->dev, "discard 'aborted' buffers");
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ }
+
+ /* Set the length field in skbuf. */
+ skb_put(skb, le32_to_cpu(p_td->scs) & SIZE_MASK);
+
+ret:
+ return skb;
+}
+
+void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe, u32 *head,
+ u32 *tail)
+{
+ struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
+
+ if (head)
+ *head = le32_to_cpu(ipc_ap_shm->head_array[pipe->pipe_nr]);
+
+ if (tail)
+ *tail = le32_to_cpu(ipc_ap_shm->tail_array[pipe->pipe_nr]);
+}
+
+/* Frees the TDs given to CP. */
+void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct sk_buff *skb;
+ u32 head;
+ u32 tail;
+
+ /* Get the start and the end of the buffer list. */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = pipe->old_tail;
+
+ /* Reset tail and head to 0. */
+ ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr] = 0;
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
+
+ /* Free pending uplink and downlink buffers. */
+ if (pipe->skbr_start) {
+ while (head != tail) {
+ /* Get the reference to the skbuf,
+ * which is on the way and free it.
+ */
+ skb = pipe->skbr_start[tail];
+ if (skb)
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+
+ tail++;
+ if (tail >= pipe->nr_of_entries)
+ tail = 0;
+ }
+
+ kfree(pipe->skbr_start);
+ pipe->skbr_start = NULL;
+ }
+
+ pipe->old_tail = 0;
+
+ /* Free and reset the td and skbuf circular buffers. kfree is save! */
+ if (pipe->tdr_start) {
+ pci_free_consistent(ipc_protocol->pcie->pci,
+ sizeof(*pipe->tdr_start) *
+ pipe->nr_of_entries,
+ pipe->tdr_start, pipe->phy_tdr_start);
+
+ pipe->tdr_start = NULL;
+ }
+}
+
+enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
+ *ipc_protocol)
+{
+ return (enum ipc_mem_device_ipc_state)
+ le32_to_cpu(ipc_protocol->p_ap_shm->device_info.ipc_status);
+}
+
+enum ipc_mem_exec_stage
+ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol)
+{
+ return le32_to_cpu(ipc_protocol->p_ap_shm->device_info.execution_stage);
+}
+
+int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *args)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ switch (msg_type) {
+ case IPC_MSG_PREP_SLEEP:
+ return ipc_protocol_msg_prep_sleep(ipc_protocol, args);
+
+ case IPC_MSG_PREP_PIPE_OPEN:
+ return ipc_protocol_msg_prepipe_open(ipc_protocol, args);
+
+ case IPC_MSG_PREP_PIPE_CLOSE:
+ return ipc_protocol_msg_prepipe_close(ipc_protocol, args);
+
+ case IPC_MSG_PREP_FEATURE_SET:
+ return ipc_protocol_msg_prep_feature_set(ipc_protocol, args);
+
+ /* Unsupported messages in protocol */
+ case IPC_MSG_PREP_MAP:
+ case IPC_MSG_PREP_UNMAP:
+ default:
+ dev_err(ipc_protocol->dev,
+ "unsupported message type: %d in protocol", msg_type);
+ return -EINVAL;
+ }
+}
+
+u32
+ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol *ipc_protocol)
+{
+ struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
+
+ return le32_to_cpu(ipc_ap_shm->device_info.device_sleep_notification);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h
new file mode 100644
index 000000000000..35aa1387306e
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PROTOCOL_OPS_H
+#define IOSM_IPC_PROTOCOL_OPS_H
+
+#define SIZE_MASK 0x00FFFFFF
+#define COMPLETION_STATUS 24
+#define RESET_BIT 7
+
+/**
+ * enum ipc_mem_td_cs - Completion status of a TD
+ * @IPC_MEM_TD_CS_INVALID: Initial status - td not yet used.
+ * @IPC_MEM_TD_CS_PARTIAL_TRANSFER: More data pending -> next TD used for this
+ * @IPC_MEM_TD_CS_END_TRANSFER: IO transfer is complete.
+ * @IPC_MEM_TD_CS_OVERFLOW: IO transfer to small for the buff to write
+ * @IPC_MEM_TD_CS_ABORT: TD marked as abort and shall be discarded
+ * by AP.
+ * @IPC_MEM_TD_CS_ERROR: General error.
+ */
+enum ipc_mem_td_cs {
+ IPC_MEM_TD_CS_INVALID,
+ IPC_MEM_TD_CS_PARTIAL_TRANSFER,
+ IPC_MEM_TD_CS_END_TRANSFER,
+ IPC_MEM_TD_CS_OVERFLOW,
+ IPC_MEM_TD_CS_ABORT,
+ IPC_MEM_TD_CS_ERROR,
+};
+
+/**
+ * enum ipc_mem_msg_cs - Completion status of IPC Message
+ * @IPC_MEM_MSG_CS_INVALID: Initial status.
+ * @IPC_MEM_MSG_CS_SUCCESS: IPC Message completion success.
+ * @IPC_MEM_MSG_CS_ERROR: Message send error.
+ */
+enum ipc_mem_msg_cs {
+ IPC_MEM_MSG_CS_INVALID,
+ IPC_MEM_MSG_CS_SUCCESS,
+ IPC_MEM_MSG_CS_ERROR,
+};
+
+/**
+ * struct ipc_msg_prep_args_pipe - struct for pipe args for message preparation
+ * @pipe: Pipe to open/close
+ */
+struct ipc_msg_prep_args_pipe {
+ struct ipc_pipe *pipe;
+};
+
+/**
+ * struct ipc_msg_prep_args_sleep - struct for sleep args for message
+ * preparation
+ * @target: 0=host, 1=device
+ * @state: 0=enter sleep, 1=exit sleep
+ */
+struct ipc_msg_prep_args_sleep {
+ unsigned int target;
+ unsigned int state;
+};
+
+/**
+ * struct ipc_msg_prep_feature_set - struct for feature set argument for
+ * message preparation
+ * @reset_enable: 0=out-of-band, 1=in-band-crash notification
+ */
+struct ipc_msg_prep_feature_set {
+ u8 reset_enable;
+};
+
+/**
+ * struct ipc_msg_prep_map - struct for map argument for message preparation
+ * @region_id: Region to map
+ * @addr: Pcie addr of region to map
+ * @size: Size of the region to map
+ */
+struct ipc_msg_prep_map {
+ unsigned int region_id;
+ unsigned long addr;
+ size_t size;
+};
+
+/**
+ * struct ipc_msg_prep_unmap - struct for unmap argument for message preparation
+ * @region_id: Region to unmap
+ */
+struct ipc_msg_prep_unmap {
+ unsigned int region_id;
+};
+
+/**
+ * struct ipc_msg_prep_args - Union to handle different message types
+ * @pipe_open: Pipe open message preparation struct
+ * @pipe_close: Pipe close message preparation struct
+ * @sleep: Sleep message preparation struct
+ * @feature_set: Feature set message preparation struct
+ * @map: Memory map message preparation struct
+ * @unmap: Memory unmap message preparation struct
+ */
+union ipc_msg_prep_args {
+ struct ipc_msg_prep_args_pipe pipe_open;
+ struct ipc_msg_prep_args_pipe pipe_close;
+ struct ipc_msg_prep_args_sleep sleep;
+ struct ipc_msg_prep_feature_set feature_set;
+ struct ipc_msg_prep_map map;
+ struct ipc_msg_prep_unmap unmap;
+};
+
+/**
+ * enum ipc_msg_prep_type - Enum for message prepare actions
+ * @IPC_MSG_PREP_SLEEP: Sleep message preparation type
+ * @IPC_MSG_PREP_PIPE_OPEN: Pipe open message preparation type
+ * @IPC_MSG_PREP_PIPE_CLOSE: Pipe close message preparation type
+ * @IPC_MSG_PREP_FEATURE_SET: Feature set message preparation type
+ * @IPC_MSG_PREP_MAP: Memory map message preparation type
+ * @IPC_MSG_PREP_UNMAP: Memory unmap message preparation type
+ */
+enum ipc_msg_prep_type {
+ IPC_MSG_PREP_SLEEP,
+ IPC_MSG_PREP_PIPE_OPEN,
+ IPC_MSG_PREP_PIPE_CLOSE,
+ IPC_MSG_PREP_FEATURE_SET,
+ IPC_MSG_PREP_MAP,
+ IPC_MSG_PREP_UNMAP,
+};
+
+/**
+ * struct ipc_rsp - Response to sent message
+ * @completion: For waking up requestor
+ * @status: Completion status
+ */
+struct ipc_rsp {
+ struct completion completion;
+ enum ipc_mem_msg_cs status;
+};
+
+/**
+ * enum ipc_mem_msg - Type-definition of the messages.
+ * @IPC_MEM_MSG_OPEN_PIPE: AP ->CP: Open a pipe
+ * @IPC_MEM_MSG_CLOSE_PIPE: AP ->CP: Close a pipe
+ * @IPC_MEM_MSG_ABORT_PIPE: AP ->CP: wait for completion of the
+ * running transfer and abort all pending
+ * IO-transfers for the pipe
+ * @IPC_MEM_MSG_SLEEP: AP ->CP: host enter or exit sleep
+ * @IPC_MEM_MSG_FEATURE_SET: AP ->CP: Intel feature configuration
+ */
+enum ipc_mem_msg {
+ IPC_MEM_MSG_OPEN_PIPE = 0x01,
+ IPC_MEM_MSG_CLOSE_PIPE = 0x02,
+ IPC_MEM_MSG_ABORT_PIPE = 0x03,
+ IPC_MEM_MSG_SLEEP = 0x04,
+ IPC_MEM_MSG_FEATURE_SET = 0xF0,
+};
+
+/**
+ * struct ipc_mem_msg_open_pipe - Message structure for open pipe
+ * @tdr_addr: Tdr address
+ * @tdr_entries: Tdr entries
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @irq_vector: MSI vector number
+ * @accumulation_backoff: Time in usec for data accumalation
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_open_pipe {
+ __le64 tdr_addr;
+ __le16 tdr_entries;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 irq_vector;
+ __le32 accumulation_backoff;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_close_pipe - Message structure for close pipe
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_close_pipe {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_abort_pipe - Message structure for abort pipe
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_abort_pipe {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_host_sleep - Message structure for sleep message.
+ * @reserved1: Reserved
+ * @target: 0=host, 1=device, host or EP devie
+ * is the message target
+ * @state: 0=enter sleep, 1=exit sleep,
+ * 2=enter sleep no protocol
+ * @reserved2: Reserved
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_host_sleep {
+ __le32 reserved1[2];
+ u8 target;
+ u8 state;
+ u8 reserved2;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_feature_set - Message structure for feature_set message
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @reset_enable: 0=out-of-band, 1=in-band-crash notification
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_feature_set {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 reset_enable;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_common - Message structure for completion status update.
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_common {
+ __le32 reserved1[2];
+ u8 reserved2[3];
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * union ipc_mem_msg_entry - Union with all possible messages.
+ * @open_pipe: Open pipe message struct
+ * @close_pipe: Close pipe message struct
+ * @abort_pipe: Abort pipe message struct
+ * @host_sleep: Host sleep message struct
+ * @feature_set: Featuer set message struct
+ * @common: Used to access msg_type and to set the completion status
+ */
+union ipc_mem_msg_entry {
+ struct ipc_mem_msg_open_pipe open_pipe;
+ struct ipc_mem_msg_close_pipe close_pipe;
+ struct ipc_mem_msg_abort_pipe abort_pipe;
+ struct ipc_mem_msg_host_sleep host_sleep;
+ struct ipc_mem_msg_feature_set feature_set;
+ struct ipc_mem_msg_common common;
+};
+
+/* Transfer descriptor definition. */
+struct ipc_protocol_td {
+ union {
+ /* 0 : 63 - 64-bit address of a buffer in host memory. */
+ dma_addr_t address;
+ struct {
+ /* 0 : 31 - 32 bit address */
+ __le32 address;
+ /* 32 : 63 - corresponding descriptor */
+ __le32 desc;
+ } __packed shm;
+ } buffer;
+
+ /* 0 - 2nd byte - Size of the buffer.
+ * The host provides the size of the buffer queued.
+ * The EP device reads this value and shall update
+ * it for downlink transfers to indicate the
+ * amount of data written in buffer.
+ * 3rd byte - This field provides the completion status
+ * of the TD. When queuing the TD, the host sets
+ * the status to 0. The EP device updates this
+ * field when completing the TD.
+ */
+ __le32 scs;
+
+ /* 0th - nr of following descriptors
+ * 1 - 3rd byte - reserved
+ */
+ __le32 next;
+} __packed;
+
+/**
+ * ipc_protocol_msg_prep - Prepare message based upon message type
+ * @ipc_imem: iosm_protocol instance
+ * @msg_type: message prepare type
+ * @args: message arguments
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *args);
+
+/**
+ * ipc_protocol_msg_hp_update - Function for head pointer update
+ * of message ring
+ * @ipc_imem: iosm_protocol instance
+ */
+void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_protocol_msg_process - Function for processing responses
+ * to IPC messages
+ * @ipc_imem: iosm_protocol instance
+ * @irq: IRQ vector
+ *
+ * Return: True on success, false if error
+ */
+bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq);
+
+/**
+ * ipc_protocol_ul_td_send - Function for sending the data to CP
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ * @p_ul_list: uplink sk_buff list
+ *
+ * Return: true in success, false in case of error
+ */
+bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe,
+ struct sk_buff_head *p_ul_list);
+
+/**
+ * ipc_protocol_ul_td_process - Function for processing the sent data
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: sk_buff instance
+ */
+struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_dl_td_prepare - Function for providing DL TDs to CP
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: true in success, false in case of error
+ */
+bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_dl_td_process - Function for processing the DL data
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: sk_buff instance
+ */
+struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_get_head_tail_index - Function for getting Head and Tail
+ * pointer index of given pipe
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe Instance
+ * @head: head pointer index of the given pipe
+ * @tail: tail pointer index of the given pipe
+ */
+void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe, u32 *head,
+ u32 *tail);
+/**
+ * ipc_protocol_get_ipc_status - Function for getting the IPC Status
+ * @ipc_protocol: iosm_protocol instance
+ *
+ * Return: Returns IPC State
+ */
+enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
+ *ipc_protocol);
+
+/**
+ * ipc_protocol_pipe_cleanup - Function to cleanup pipe resources
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ */
+void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_get_ap_exec_stage - Function for getting AP Exec Stage
+ * @ipc_protocol: pointer to struct iosm protocol
+ *
+ * Return: returns BOOT Stages
+ */
+enum ipc_mem_exec_stage
+ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_pm_dev_get_sleep_notification - Function for getting Dev Sleep
+ * notification
+ * @ipc_protocol: iosm_protocol instance
+ *
+ * Return: Returns dev PM State
+ */
+u32 ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol
+ *ipc_protocol);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_task_queue.c b/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
new file mode 100644
index 000000000000..852a99166144
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Actual tasklet function, will be called whenever tasklet is scheduled.
+ * Calls event handler involves callback for each element in the message queue
+ */
+static void ipc_task_queue_handler(unsigned long data)
+{
+ struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
+ unsigned int q_rpos = ipc_task->q_rpos;
+
+ /* Loop over the input queue contents. */
+ while (q_rpos != ipc_task->q_wpos) {
+ /* Get the current first queue element. */
+ struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
+
+ /* Process the input message. */
+ if (args->func)
+ args->response = args->func(args->ipc_imem, args->arg,
+ args->msg, args->size);
+
+ /* Signal completion for synchronous calls */
+ if (args->completion)
+ complete(args->completion);
+
+ /* Free message if copy was allocated. */
+ if (args->is_copy)
+ kfree(args->msg);
+
+ /* Set invalid queue element. Technically
+ * spin_lock_irqsave is not required here as
+ * the array element has been processed already
+ * so we can assume that immediately after processing
+ * ipc_task element, queue will not rotate again to
+ * ipc_task same element within such short time.
+ */
+ args->completion = NULL;
+ args->func = NULL;
+ args->msg = NULL;
+ args->size = 0;
+ args->is_copy = false;
+
+ /* calculate the new read ptr and update the volatile read
+ * ptr
+ */
+ q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
+ ipc_task->q_rpos = q_rpos;
+ }
+}
+
+/* Free memory alloc and trigger completions left in the queue during dealloc */
+static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
+{
+ unsigned int q_rpos = ipc_task->q_rpos;
+
+ while (q_rpos != ipc_task->q_wpos) {
+ struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
+
+ if (args->completion)
+ complete(args->completion);
+
+ if (args->is_copy)
+ kfree(args->msg);
+
+ q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
+ ipc_task->q_rpos = q_rpos;
+ }
+}
+
+/* Add a message to the queue and trigger the ipc_task. */
+static int
+ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
+ int arg, void *msg,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ size_t size, bool is_copy, bool wait)
+{
+ struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
+ struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
+ struct completion completion;
+ unsigned int pos, nextpos;
+ unsigned long flags;
+ int result = -EIO;
+
+ init_completion(&completion);
+
+ /* tasklet send may be called from both interrupt or thread
+ * context, therefore protect queue operation by spinlock
+ */
+ spin_lock_irqsave(&ipc_task->q_lock, flags);
+
+ pos = ipc_task->q_wpos;
+ nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
+
+ /* Get next queue position. */
+ if (nextpos != ipc_task->q_rpos) {
+ /* Get the reference to the queue element and save the passed
+ * values.
+ */
+ ipc_task->args[pos].arg = arg;
+ ipc_task->args[pos].msg = msg;
+ ipc_task->args[pos].func = func;
+ ipc_task->args[pos].ipc_imem = ipc_imem;
+ ipc_task->args[pos].size = size;
+ ipc_task->args[pos].is_copy = is_copy;
+ ipc_task->args[pos].completion = wait ? &completion : NULL;
+ ipc_task->args[pos].response = -1;
+
+ /* apply write barrier so that ipc_task->q_rpos elements
+ * are updated before ipc_task->q_wpos is being updated.
+ */
+ smp_wmb();
+
+ /* Update the status of the free queue space. */
+ ipc_task->q_wpos = nextpos;
+ result = 0;
+ }
+
+ spin_unlock_irqrestore(&ipc_task->q_lock, flags);
+
+ if (result == 0) {
+ tasklet_schedule(ipc_tasklet);
+
+ if (wait) {
+ wait_for_completion(&completion);
+ result = ipc_task->args[pos].response;
+ }
+ } else {
+ dev_err(ipc_imem->ipc_task->dev, "queue is full");
+ }
+
+ return result;
+}
+
+int ipc_task_queue_send_task(struct iosm_imem *imem,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ int arg, void *msg, size_t size, bool wait)
+{
+ bool is_copy = false;
+ void *copy = msg;
+ int ret = -ENOMEM;
+
+ if (size > 0) {
+ copy = kmemdup(msg, size, GFP_ATOMIC);
+ if (!copy)
+ goto out;
+
+ is_copy = true;
+ }
+
+ ret = ipc_task_queue_add_task(imem, arg, copy, func,
+ size, is_copy, wait);
+ if (ret < 0) {
+ dev_err(imem->ipc_task->dev,
+ "add task failed for %ps %d, %p, %zu, %d", func, arg,
+ copy, size, is_copy);
+ if (is_copy)
+ kfree(copy);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int ipc_task_init(struct ipc_task *ipc_task)
+{
+ struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
+
+ ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
+ GFP_KERNEL);
+
+ if (!ipc_task->ipc_tasklet)
+ return -ENOMEM;
+
+ /* Initialize the spinlock needed to protect the message queue of the
+ * ipc_task
+ */
+ spin_lock_init(&ipc_queue->q_lock);
+
+ tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
+ (unsigned long)ipc_queue);
+ return 0;
+}
+
+void ipc_task_deinit(struct ipc_task *ipc_task)
+{
+ tasklet_kill(ipc_task->ipc_tasklet);
+
+ kfree(ipc_task->ipc_tasklet);
+ /* This will free/complete any outstanding messages,
+ * without calling the actual handler
+ */
+ ipc_task_queue_cleanup(&ipc_task->ipc_queue);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_task_queue.h b/drivers/net/wwan/iosm/iosm_ipc_task_queue.h
new file mode 100644
index 000000000000..df6e9cd925a9
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_task_queue.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_TASK_QUEUE_H
+#define IOSM_IPC_TASK_QUEUE_H
+
+/* Number of available element for the input message queue of the IPC
+ * ipc_task
+ */
+#define IPC_THREAD_QUEUE_SIZE 256
+
+/**
+ * struct ipc_task_queue_args - Struct for Task queue elements
+ * @ipc_imem: Pointer to struct iosm_imem
+ * @msg: Message argument for tasklet function. (optional, can be NULL)
+ * @completion: OS object used to wait for the tasklet function to finish for
+ * synchronous calls
+ * @func: Function to be called in tasklet (tl) context
+ * @arg: Generic integer argument for tasklet function (optional)
+ * @size: Message size argument for tasklet function (optional)
+ * @response: Return code of tasklet function for synchronous calls
+ * @is_copy: Is true if msg contains a pointer to a copy of the original msg
+ * for async. calls that needs to be freed once the tasklet returns
+ */
+struct ipc_task_queue_args {
+ struct iosm_imem *ipc_imem;
+ void *msg;
+ struct completion *completion;
+ int (*func)(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size);
+ int arg;
+ size_t size;
+ int response;
+ u8 is_copy:1;
+};
+
+/**
+ * struct ipc_task_queue - Struct for Task queue
+ * @q_lock: Protect the message queue of the ipc ipc_task
+ * @args: Message queue of the IPC ipc_task
+ * @q_rpos: First queue element to process.
+ * @q_wpos: First free element of the input queue.
+ */
+struct ipc_task_queue {
+ spinlock_t q_lock; /* for atomic operation on queue */
+ struct ipc_task_queue_args args[IPC_THREAD_QUEUE_SIZE];
+ unsigned int q_rpos;
+ unsigned int q_wpos;
+};
+
+/**
+ * struct ipc_task - Struct for Task
+ * @dev: Pointer to device structure
+ * @ipc_tasklet: Tasklet for serialized work offload
+ * from interrupts and OS callbacks
+ * @ipc_queue: Task for entry into ipc task queue
+ */
+struct ipc_task {
+ struct device *dev;
+ struct tasklet_struct *ipc_tasklet;
+ struct ipc_task_queue ipc_queue;
+};
+
+/**
+ * ipc_task_init - Allocate a tasklet
+ * @ipc_task: Pointer to ipc_task structure
+ * Returns: 0 on success and failure value on error.
+ */
+int ipc_task_init(struct ipc_task *ipc_task);
+
+/**
+ * ipc_task_deinit - Free a tasklet, invalidating its pointer.
+ * @ipc_task: Pointer to ipc_task structure
+ */
+void ipc_task_deinit(struct ipc_task *ipc_task);
+
+/**
+ * ipc_task_queue_send_task - Synchronously/Asynchronously call a function in
+ * tasklet context.
+ * @imem: Pointer to iosm_imem struct
+ * @func: Function to be called in tasklet context
+ * @arg: Integer argument for func
+ * @msg: Message pointer argument for func
+ * @size: Size argument for func
+ * @wait: if true wait for result
+ *
+ * Returns: Result value returned by func or failure value if func could not
+ * be called.
+ */
+int ipc_task_queue_send_task(struct iosm_imem *imem,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ int arg, void *msg, size_t size, bool wait);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
new file mode 100644
index 000000000000..2229d752926c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+
+#include "iosm_ipc_uevent.h"
+
+/* Update the uevent in work queue context */
+static void ipc_uevent_work(struct work_struct *data)
+{
+ struct ipc_uevent_info *info;
+ char *envp[2] = { NULL, NULL };
+
+ info = container_of(data, struct ipc_uevent_info, work);
+
+ envp[0] = info->uevent;
+
+ if (kobject_uevent_env(&info->dev->kobj, KOBJ_CHANGE, envp))
+ pr_err("uevent %s failed to sent", info->uevent);
+
+ kfree(info);
+}
+
+void ipc_uevent_send(struct device *dev, char *uevent)
+{
+ struct ipc_uevent_info *info = kzalloc(sizeof(*info), GFP_ATOMIC);
+
+ if (!info)
+ return;
+
+ /* Initialize the kernel work queue */
+ INIT_WORK(&info->work, ipc_uevent_work);
+
+ /* Store the device and event information */
+ info->dev = dev;
+ snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent);
+
+ /* Schedule uevent in process context using work queue */
+ schedule_work(&info->work);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.h b/drivers/net/wwan/iosm/iosm_ipc_uevent.h
new file mode 100644
index 000000000000..2e45c051b5f4
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_UEVENT_H
+#define IOSM_IPC_UEVENT_H
+
+/* Baseband event strings */
+#define UEVENT_MDM_NOT_READY "MDM_NOT_READY"
+#define UEVENT_ROM_READY "ROM_READY"
+#define UEVENT_MDM_READY "MDM_READY"
+#define UEVENT_CRASH "CRASH"
+#define UEVENT_CD_READY "CD_READY"
+#define UEVENT_CD_READY_LINK_DOWN "CD_READY_LINK_DOWN"
+#define UEVENT_MDM_TIMEOUT "MDM_TIMEOUT"
+
+/* Maximum length of user events */
+#define MAX_UEVENT_LEN 64
+
+/**
+ * struct ipc_uevent_info - Uevent information structure.
+ * @dev: Pointer to device structure
+ * @uevent: Uevent information
+ * @work: Uevent work struct
+ */
+struct ipc_uevent_info {
+ struct device *dev;
+ char uevent[MAX_UEVENT_LEN];
+ struct work_struct work;
+};
+
+/**
+ * ipc_uevent_send - Send modem event to user space.
+ * @dev: Generic device pointer
+ * @uevent: Uevent information
+ *
+ */
+void ipc_uevent_send(struct device *dev, char *uevent);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
new file mode 100644
index 000000000000..c999c64001f4
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_link.h>
+#include <linux/rtnetlink.h>
+#include <linux/wwan.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_wwan.h"
+
+#define IOSM_IP_TYPE_MASK 0xF0
+#define IOSM_IP_TYPE_IPV4 0x40
+#define IOSM_IP_TYPE_IPV6 0x60
+
+#define IOSM_IF_ID_PAYLOAD 2
+
+/**
+ * struct iosm_netdev_priv - netdev WWAN driver specific private data
+ * @ipc_wwan: Pointer to iosm_wwan struct
+ * @netdev: Pointer to network interface device structure
+ * @if_id: Interface id for device.
+ * @ch_id: IPC channel number for which interface device is created.
+ */
+struct iosm_netdev_priv {
+ struct iosm_wwan *ipc_wwan;
+ struct net_device *netdev;
+ int if_id;
+ int ch_id;
+};
+
+/**
+ * struct iosm_wwan - This structure contains information about WWAN root device
+ * and interface to the IPC layer.
+ * @ipc_imem: Pointer to imem data-struct
+ * @sub_netlist: List of active netdevs
+ * @dev: Pointer device structure
+ * @if_mutex: Mutex used for add and remove interface id
+ */
+struct iosm_wwan {
+ struct iosm_imem *ipc_imem;
+ struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
+ struct device *dev;
+ struct mutex if_mutex; /* Mutex used for add and remove interface id */
+};
+
+/* Bring-up the wwan net link */
+static int ipc_wwan_link_open(struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
+ struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ int if_id = priv->if_id;
+ int ret;
+
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ mutex_lock(&ipc_wwan->if_mutex);
+
+ /* get channel id */
+ priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
+
+ if (priv->ch_id < 0) {
+ dev_err(ipc_wwan->dev,
+ "cannot connect wwan0 & id %d to the IPC mem layer",
+ if_id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* enable tx path, DL data may follow */
+ netif_start_queue(netdev);
+
+ dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
+ priv->ch_id, priv->if_id);
+
+ ret = 0;
+out:
+ mutex_unlock(&ipc_wwan->if_mutex);
+ return ret;
+}
+
+/* Bring-down the wwan net link */
+static int ipc_wwan_link_stop(struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
+
+ netif_stop_queue(netdev);
+
+ mutex_lock(&priv->ipc_wwan->if_mutex);
+ ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
+ priv->ch_id);
+ priv->ch_id = -1;
+ mutex_unlock(&priv->ipc_wwan->if_mutex);
+
+ return 0;
+}
+
+/* Transmit a packet */
+static int ipc_wwan_link_transmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
+ struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ int if_id = priv->if_id;
+ int ret;
+
+ /* Interface IDs from 1 to 8 are for IP data
+ * & from 257 to 261 are for non-IP data
+ */
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ /* Send the SKB to device for transmission */
+ ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
+ if_id, priv->ch_id, skb);
+
+ /* Return code of zero is success */
+ if (ret == 0) {
+ ret = NETDEV_TX_OK;
+ } else if (ret == -EBUSY) {
+ ret = NETDEV_TX_BUSY;
+ dev_err(ipc_wwan->dev, "unable to push packets");
+ } else {
+ goto exit;
+ }
+
+ return ret;
+
+exit:
+ /* Log any skb drop */
+ if (if_id)
+ dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id,
+ ret);
+
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+/* Ops structure for wwan net link */
+static const struct net_device_ops ipc_inm_ops = {
+ .ndo_open = ipc_wwan_link_open,
+ .ndo_stop = ipc_wwan_link_stop,
+ .ndo_start_xmit = ipc_wwan_link_transmit,
+};
+
+/* Setup function for creating new net link */
+static void ipc_wwan_setup(struct net_device *iosm_dev)
+{
+ iosm_dev->header_ops = NULL;
+ iosm_dev->hard_header_len = 0;
+ iosm_dev->priv_flags |= IFF_NO_QUEUE;
+
+ iosm_dev->type = ARPHRD_NONE;
+ iosm_dev->min_mtu = ETH_MIN_MTU;
+ iosm_dev->max_mtu = ETH_MAX_MTU;
+
+ iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ iosm_dev->netdev_ops = &ipc_inm_ops;
+}
+
+/* Create new wwan net link */
+static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
+ u32 if_id, struct netlink_ext_ack *extack)
+{
+ struct iosm_wwan *ipc_wwan = ctxt;
+ struct iosm_netdev_priv *priv;
+ int err;
+
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ priv = wwan_netdev_drvpriv(dev);
+ priv->if_id = if_id;
+ priv->netdev = dev;
+ priv->ipc_wwan = ipc_wwan;
+
+ mutex_lock(&ipc_wwan->if_mutex);
+ if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ err = register_netdevice(dev);
+ if (err)
+ goto out_unlock;
+
+ rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
+ mutex_unlock(&ipc_wwan->if_mutex);
+
+ netif_device_attach(dev);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&ipc_wwan->if_mutex);
+ return err;
+}
+
+static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
+ struct list_head *head)
+{
+ struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev);
+ struct iosm_wwan *ipc_wwan = ctxt;
+ int if_id = priv->if_id;
+
+ if (WARN_ON(if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
+ return;
+
+ mutex_lock(&ipc_wwan->if_mutex);
+
+ if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
+ goto unlock;
+
+ RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
+ /* unregistering includes synchronize_net() */
+ unregister_netdevice(dev);
+
+unlock:
+ mutex_unlock(&ipc_wwan->if_mutex);
+}
+
+static const struct wwan_ops iosm_wwan_ops = {
+ .priv_size = sizeof(struct iosm_netdev_priv),
+ .setup = ipc_wwan_setup,
+ .newlink = ipc_wwan_newlink,
+ .dellink = ipc_wwan_dellink,
+};
+
+int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
+ bool dss, int if_id)
+{
+ struct sk_buff *skb = skb_arg;
+ struct net_device_stats *stats;
+ struct iosm_netdev_priv *priv;
+ int ret;
+
+ if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4)
+ skb->protocol = htons(ETH_P_IP);
+ else if ((skb->data[0] & IOSM_IP_TYPE_MASK) ==
+ IOSM_IP_TYPE_IPV6)
+ skb->protocol = htons(ETH_P_IPV6);
+
+ skb->pkt_type = PACKET_HOST;
+
+ if (if_id < (IP_MUX_SESSION_START - 1) ||
+ if_id > (IP_MUX_SESSION_END - 1)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ rcu_read_lock();
+ priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
+ if (!priv) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ skb->dev = priv->netdev;
+ stats = &priv->netdev->stats;
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ ret = netif_rx(skb);
+ skb = NULL;
+unlock:
+ rcu_read_unlock();
+free:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on)
+{
+ struct net_device *netdev;
+ struct iosm_netdev_priv *priv;
+ bool is_tx_blk;
+
+ rcu_read_lock();
+ priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
+ if (!priv) {
+ rcu_read_unlock();
+ return;
+ }
+
+ netdev = priv->netdev;
+
+ is_tx_blk = netif_queue_stopped(netdev);
+
+ if (on)
+ dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable",
+ if_id);
+
+ if (on && !is_tx_blk)
+ netif_stop_queue(netdev);
+ else if (!on && is_tx_blk)
+ netif_wake_queue(netdev);
+ rcu_read_unlock();
+}
+
+struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
+{
+ struct iosm_wwan *ipc_wwan;
+
+ ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL);
+ if (!ipc_wwan)
+ return NULL;
+
+ ipc_wwan->dev = dev;
+ ipc_wwan->ipc_imem = ipc_imem;
+
+ /* WWAN core will create a netdev for the default IP MUX channel */
+ if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
+ IP_MUX_SESSION_DEFAULT)) {
+ kfree(ipc_wwan);
+ return NULL;
+ }
+
+ mutex_init(&ipc_wwan->if_mutex);
+
+ return ipc_wwan;
+}
+
+void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
+{
+ /* This call will remove all child netdev(s) */
+ wwan_unregister_ops(ipc_wwan->dev);
+
+ mutex_destroy(&ipc_wwan->if_mutex);
+
+ kfree(ipc_wwan);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.h b/drivers/net/wwan/iosm/iosm_ipc_wwan.h
new file mode 100644
index 000000000000..4925f22dff0a
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_WWAN_H
+#define IOSM_IPC_WWAN_H
+
+/**
+ * ipc_wwan_init - Allocate, Init and register WWAN device
+ * @ipc_imem: Pointer to imem data-struct
+ * @dev: Pointer to device structure
+ *
+ * Returns: Pointer to instance on success else NULL
+ */
+struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev);
+
+/**
+ * ipc_wwan_deinit - Unregister and free WWAN device, clear pointer
+ * @ipc_wwan: Pointer to wwan instance data
+ */
+void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan);
+
+/**
+ * ipc_wwan_receive - Receive a downlink packet from CP.
+ * @ipc_wwan: Pointer to wwan instance
+ * @skb_arg: Pointer to struct sk_buff
+ * @dss: Set to true if interafce id is from 257 to 261,
+ * else false
+ * @if_id: Interface ID
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
+ bool dss, int if_id);
+
+/**
+ * ipc_wwan_tx_flowctrl - Enable/Disable TX flow control
+ * @ipc_wwan: Pointer to wwan instance
+ * @id: Ipc mux channel session id
+ * @on: if true then flow ctrl would be enabled else disable
+ *
+ */
+void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int id, bool on);
+
+/**
+ * ipc_wwan_is_tx_stopped - Checks if Tx stopped for a Interface id.
+ * @ipc_wwan: Pointer to wwan instance
+ * @id: Ipc mux channel session id
+ *
+ * Return: true if stopped, false otherwise
+ */
+bool ipc_wwan_is_tx_stopped(struct iosm_wwan *ipc_wwan, int id);
+
+#endif
diff --git a/drivers/net/wwan/rpmsg_wwan_ctrl.c b/drivers/net/wwan/rpmsg_wwan_ctrl.c
new file mode 100644
index 000000000000..31c24420ab2e
--- /dev/null
+++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Stephan Gerhold <stephan@gerhold.net> */
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/wwan.h>
+
+struct rpmsg_wwan_dev {
+ /* Lower level is a rpmsg dev, upper level is a wwan port */
+ struct rpmsg_device *rpdev;
+ struct wwan_port *wwan_port;
+ struct rpmsg_endpoint *ept;
+};
+
+static int rpmsg_wwan_ctrl_callback(struct rpmsg_device *rpdev,
+ void *buf, int len, void *priv, u32 src)
+{
+ struct rpmsg_wwan_dev *rpwwan = priv;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, buf, len);
+ wwan_port_rx(rpwwan->wwan_port, skb);
+ return 0;
+}
+
+static int rpmsg_wwan_ctrl_start(struct wwan_port *port)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+ struct rpmsg_channel_info chinfo = {
+ .src = rpwwan->rpdev->src,
+ .dst = RPMSG_ADDR_ANY,
+ };
+
+ strncpy(chinfo.name, rpwwan->rpdev->id.name, RPMSG_NAME_SIZE);
+ rpwwan->ept = rpmsg_create_ept(rpwwan->rpdev, rpmsg_wwan_ctrl_callback,
+ rpwwan, chinfo);
+ if (!rpwwan->ept)
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static void rpmsg_wwan_ctrl_stop(struct wwan_port *port)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+
+ rpmsg_destroy_ept(rpwwan->ept);
+ rpwwan->ept = NULL;
+}
+
+static int rpmsg_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ ret = rpmsg_trysend(rpwwan->ept, skb->data, skb->len);
+ if (ret)
+ return ret;
+
+ consume_skb(skb);
+ return 0;
+}
+
+static int rpmsg_wwan_ctrl_tx_blocking(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ ret = rpmsg_send(rpwwan->ept, skb->data, skb->len);
+ if (ret)
+ return ret;
+
+ consume_skb(skb);
+ return 0;
+}
+
+static __poll_t rpmsg_wwan_ctrl_tx_poll(struct wwan_port *port,
+ struct file *filp, poll_table *wait)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+
+ return rpmsg_poll(rpwwan->ept, filp, wait);
+}
+
+static const struct wwan_port_ops rpmsg_wwan_pops = {
+ .start = rpmsg_wwan_ctrl_start,
+ .stop = rpmsg_wwan_ctrl_stop,
+ .tx = rpmsg_wwan_ctrl_tx,
+ .tx_blocking = rpmsg_wwan_ctrl_tx_blocking,
+ .tx_poll = rpmsg_wwan_ctrl_tx_poll,
+};
+
+static struct device *rpmsg_wwan_find_parent(struct device *dev)
+{
+ /* Select first platform device as parent for the WWAN ports.
+ * On Qualcomm platforms this is usually the platform device that
+ * represents the modem remote processor. This might need to be
+ * adjusted when adding device IDs for other platforms.
+ */
+ for (dev = dev->parent; dev; dev = dev->parent) {
+ if (dev_is_platform(dev))
+ return dev;
+ }
+ return NULL;
+}
+
+static int rpmsg_wwan_ctrl_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_wwan_dev *rpwwan;
+ struct wwan_port *port;
+ struct device *parent;
+
+ parent = rpmsg_wwan_find_parent(&rpdev->dev);
+ if (!parent)
+ return -ENODEV;
+
+ rpwwan = devm_kzalloc(&rpdev->dev, sizeof(*rpwwan), GFP_KERNEL);
+ if (!rpwwan)
+ return -ENOMEM;
+
+ rpwwan->rpdev = rpdev;
+ dev_set_drvdata(&rpdev->dev, rpwwan);
+
+ /* Register as a wwan port, id.driver_data contains wwan port type */
+ port = wwan_create_port(parent, rpdev->id.driver_data,
+ &rpmsg_wwan_pops, rpwwan);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ rpwwan->wwan_port = port;
+
+ return 0;
+};
+
+static void rpmsg_wwan_ctrl_remove(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_wwan_dev *rpwwan = dev_get_drvdata(&rpdev->dev);
+
+ wwan_remove_port(rpwwan->wwan_port);
+}
+
+static const struct rpmsg_device_id rpmsg_wwan_ctrl_id_table[] = {
+ /* RPMSG channels for Qualcomm SoCs with integrated modem */
+ { .name = "DATA5_CNTL", .driver_data = WWAN_PORT_QMI },
+ { .name = "DATA4", .driver_data = WWAN_PORT_AT },
+ {},
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_wwan_ctrl_id_table);
+
+static struct rpmsg_driver rpmsg_wwan_ctrl_driver = {
+ .drv.name = "rpmsg_wwan_ctrl",
+ .id_table = rpmsg_wwan_ctrl_id_table,
+ .probe = rpmsg_wwan_ctrl_probe,
+ .remove = rpmsg_wwan_ctrl_remove,
+};
+module_rpmsg_driver(rpmsg_wwan_ctrl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RPMSG WWAN CTRL Driver");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index cff04e532c1e..3e16c318e705 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -12,9 +12,13 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/termios.h>
#include <linux/wwan.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/wwan.h>
-#define WWAN_MAX_MINORS 256 /* 256 minors allowed with register_chrdev() */
+/* Maximum number of minors in use */
+#define WWAN_MAX_MINORS (1 << MINORBITS)
static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
@@ -34,11 +38,15 @@ static int wwan_major;
* @id: WWAN device unique ID.
* @dev: Underlying device.
* @port_id: Current available port ID to pick.
+ * @ops: wwan device ops
+ * @ops_ctxt: context to pass to ops
*/
struct wwan_device {
unsigned int id;
struct device dev;
atomic_t port_id;
+ const struct wwan_ops *ops;
+ void *ops_ctxt;
};
/**
@@ -51,6 +59,8 @@ struct wwan_device {
* @dev: Underlying device
* @rxq: Buffer inbound queue
* @waitqueue: The waitqueue for port fops (read/write/poll)
+ * @data_lock: Port specific data access serialization
+ * @at_data: AT port specific data
*/
struct wwan_port {
enum wwan_port_type type;
@@ -61,8 +71,29 @@ struct wwan_port {
struct device dev;
struct sk_buff_head rxq;
wait_queue_head_t waitqueue;
+ struct mutex data_lock; /* Port specific data access serialization */
+ union {
+ struct {
+ struct ktermios termios;
+ int mdmbits;
+ } at_data;
+ };
};
+static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_device *wwan = to_wwan_dev(dev);
+
+ return sprintf(buf, "%d\n", wwan->id);
+}
+static DEVICE_ATTR_RO(index);
+
+static struct attribute *wwan_dev_attrs[] = {
+ &dev_attr_index.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wwan_dev);
+
static void wwan_dev_destroy(struct device *dev)
{
struct wwan_device *wwandev = to_wwan_dev(dev);
@@ -74,11 +105,13 @@ static void wwan_dev_destroy(struct device *dev)
static const struct device_type wwan_dev_type = {
.name = "wwan_dev",
.release = wwan_dev_destroy,
+ .groups = wwan_dev_groups,
};
static int wwan_dev_parent_match(struct device *dev, const void *parent)
{
- return (dev->type == &wwan_dev_type && dev->parent == parent);
+ return (dev->type == &wwan_dev_type &&
+ (dev->parent == parent || dev == parent));
}
static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
@@ -92,6 +125,23 @@ static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
return to_wwan_dev(dev);
}
+static int wwan_dev_name_match(struct device *dev, const void *name)
+{
+ return dev->type == &wwan_dev_type &&
+ strcmp(dev_name(dev), name) == 0;
+}
+
+static struct wwan_device *wwan_dev_get_by_name(const char *name)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
/* This function allocates and registers a new WWAN device OR if a WWAN device
* already exist for the given parent, it gets a reference and return it.
* This function is not exported (for now), it is called indirectly via
@@ -156,9 +206,14 @@ static void wwan_remove_dev(struct wwan_device *wwandev)
/* WWAN device is created and registered (get+add) along with its first
* child port, and subsequent port registrations only grab a reference
* (get). The WWAN device must then be unregistered (del+put) along with
- * its latest port, and reference simply dropped (put) otherwise.
+ * its last port, and reference simply dropped (put) otherwise. In the
+ * same fashion, we must not unregister it when the ops are still there.
*/
- ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
+ if (wwandev->ops)
+ ret = 1;
+ else
+ ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
+
if (!ret)
device_unregister(&wwandev->dev);
else
@@ -169,12 +224,53 @@ static void wwan_remove_dev(struct wwan_device *wwandev)
/* ------- WWAN port management ------- */
+static const struct {
+ const char * const name; /* Port type name */
+ const char * const devsuf; /* Port devce name suffix */
+} wwan_port_types[WWAN_PORT_MAX + 1] = {
+ [WWAN_PORT_AT] = {
+ .name = "AT",
+ .devsuf = "at",
+ },
+ [WWAN_PORT_MBIM] = {
+ .name = "MBIM",
+ .devsuf = "mbim",
+ },
+ [WWAN_PORT_QMI] = {
+ .name = "QMI",
+ .devsuf = "qmi",
+ },
+ [WWAN_PORT_QCDM] = {
+ .name = "QCDM",
+ .devsuf = "qcdm",
+ },
+ [WWAN_PORT_FIREHOSE] = {
+ .name = "FIREHOSE",
+ .devsuf = "firehose",
+ },
+};
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wwan_port *port = to_wwan_port(dev);
+
+ return sprintf(buf, "%s\n", wwan_port_types[port->type].name);
+}
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *wwan_port_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wwan_port);
+
static void wwan_port_destroy(struct device *dev)
{
struct wwan_port *port = to_wwan_port(dev);
ida_free(&minors, MINOR(port->dev.devt));
- skb_queue_purge(&port->rxq);
+ mutex_destroy(&port->data_lock);
mutex_destroy(&port->ops_lock);
kfree(port);
}
@@ -182,6 +278,7 @@ static void wwan_port_destroy(struct device *dev)
static const struct device_type wwan_port_dev_type = {
.name = "wwan_port",
.release = wwan_port_destroy,
+ .groups = wwan_port_groups,
};
static int wwan_port_minor_match(struct device *dev, const void *minor)
@@ -201,14 +298,55 @@ static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
return to_wwan_port(dev);
}
-/* Keep aligned with wwan_port_type enum */
-static const char * const wwan_port_type_str[] = {
- "AT",
- "MBIM",
- "QMI",
- "QCDM",
- "FIREHOSE"
-};
+/* Allocate and set unique name based on passed format
+ *
+ * Name allocation approach is highly inspired by the __dev_alloc_name()
+ * function.
+ *
+ * To avoid names collision, the caller must prevent the new port device
+ * registration as well as concurrent invocation of this function.
+ */
+static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+ const unsigned int max_ports = PAGE_SIZE * 8;
+ struct class_dev_iter iter;
+ unsigned long *idmap;
+ struct device *dev;
+ char buf[0x20];
+ int id;
+
+ idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (!idmap)
+ return -ENOMEM;
+
+ /* Collect ids of same name format ports */
+ class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
+ while ((dev = class_dev_iter_next(&iter))) {
+ if (dev->parent != &wwandev->dev)
+ continue;
+ if (sscanf(dev_name(dev), fmt, &id) != 1)
+ continue;
+ if (id < 0 || id >= max_ports)
+ continue;
+ set_bit(id, idmap);
+ }
+ class_dev_iter_exit(&iter);
+
+ /* Allocate unique id */
+ id = find_first_zero_bit(idmap, max_ports);
+ free_page((unsigned long)idmap);
+
+ snprintf(buf, sizeof(buf), fmt, id); /* Name generation */
+
+ dev = device_find_child_by_name(&wwandev->dev, buf);
+ if (dev) {
+ put_device(dev);
+ return -ENFILE;
+ }
+
+ return dev_set_name(&port->dev, buf);
+}
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
@@ -218,8 +356,9 @@ struct wwan_port *wwan_create_port(struct device *parent,
struct wwan_device *wwandev;
struct wwan_port *port;
int minor, err = -ENOMEM;
+ char namefmt[0x20];
- if (type >= WWAN_PORT_MAX || !ops)
+ if (type > WWAN_PORT_MAX || !ops)
return ERR_PTR(-EINVAL);
/* A port is always a child of a WWAN device, retrieve (allocate or
@@ -245,6 +384,7 @@ struct wwan_port *wwan_create_port(struct device *parent,
mutex_init(&port->ops_lock);
skb_queue_head_init(&port->rxq);
init_waitqueue_head(&port->waitqueue);
+ mutex_init(&port->data_lock);
port->dev.parent = &wwandev->dev;
port->dev.class = wwan_class;
@@ -252,12 +392,18 @@ struct wwan_port *wwan_create_port(struct device *parent,
port->dev.devt = MKDEV(wwan_major, minor);
dev_set_drvdata(&port->dev, drvdata);
- /* create unique name based on wwan device id, port index and type */
- dev_set_name(&port->dev, "wwan%up%u%s", wwandev->id,
- atomic_inc_return(&wwandev->port_id),
- wwan_port_type_str[port->type]);
+ /* allocate unique name based on wwan device id, port type and number */
+ snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
+ wwan_port_types[port->type].devsuf);
+ /* Serialize ports registration */
+ mutex_lock(&wwan_register_lock);
+
+ __wwan_port_dev_assign_name(port, namefmt);
err = device_register(&port->dev);
+
+ mutex_unlock(&wwan_register_lock);
+
if (err)
goto error_put_device;
@@ -346,12 +492,16 @@ static void wwan_port_op_stop(struct wwan_port *port)
{
mutex_lock(&port->ops_lock);
port->start_count--;
- if (port->ops && !port->start_count)
- port->ops->stop(port);
+ if (!port->start_count) {
+ if (port->ops)
+ port->ops->stop(port);
+ skb_queue_purge(&port->rxq);
+ }
mutex_unlock(&port->ops_lock);
}
-static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
+static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
+ bool nonblock)
{
int ret;
@@ -361,7 +511,10 @@ static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
goto out_unlock;
}
- ret = port->ops->tx(port, skb);
+ if (nonblock || !port->ops->tx_blocking)
+ ret = port->ops->tx(port, skb);
+ else
+ ret = port->ops->tx_blocking(port, skb);
out_unlock:
mutex_unlock(&port->ops_lock);
@@ -488,7 +641,7 @@ static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
return -EFAULT;
}
- ret = wwan_port_op_tx(port, skb);
+ ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
if (ret) {
kfree_skb(skb);
return ret;
@@ -504,16 +657,124 @@ static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &port->waitqueue, wait);
- if (!is_write_blocked(port))
+ mutex_lock(&port->ops_lock);
+ if (port->ops && port->ops->tx_poll)
+ mask |= port->ops->tx_poll(port, filp, wait);
+ else if (!is_write_blocked(port))
mask |= EPOLLOUT | EPOLLWRNORM;
if (!is_read_blocked(port))
mask |= EPOLLIN | EPOLLRDNORM;
if (!port->ops)
mask |= EPOLLHUP | EPOLLERR;
+ mutex_unlock(&port->ops_lock);
return mask;
}
+/* Implements minimalistic stub terminal IOCTLs support */
+static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ mutex_lock(&port->data_lock);
+
+ switch (cmd) {
+ case TCFLSH:
+ break;
+
+ case TCGETS:
+ if (copy_to_user((void __user *)arg, &port->at_data.termios,
+ sizeof(struct termios)))
+ ret = -EFAULT;
+ break;
+
+ case TCSETS:
+ case TCSETSW:
+ case TCSETSF:
+ if (copy_from_user(&port->at_data.termios, (void __user *)arg,
+ sizeof(struct termios)))
+ ret = -EFAULT;
+ break;
+
+#ifdef TCGETS2
+ case TCGETS2:
+ if (copy_to_user((void __user *)arg, &port->at_data.termios,
+ sizeof(struct termios2)))
+ ret = -EFAULT;
+ break;
+
+ case TCSETS2:
+ case TCSETSW2:
+ case TCSETSF2:
+ if (copy_from_user(&port->at_data.termios, (void __user *)arg,
+ sizeof(struct termios2)))
+ ret = -EFAULT;
+ break;
+#endif
+
+ case TIOCMGET:
+ ret = put_user(port->at_data.mdmbits, (int __user *)arg);
+ break;
+
+ case TIOCMSET:
+ case TIOCMBIC:
+ case TIOCMBIS: {
+ int mdmbits;
+
+ if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (cmd == TIOCMBIC)
+ port->at_data.mdmbits &= ~mdmbits;
+ else if (cmd == TIOCMBIS)
+ port->at_data.mdmbits |= mdmbits;
+ else
+ port->at_data.mdmbits = mdmbits;
+ break;
+ }
+
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ mutex_unlock(&port->data_lock);
+
+ return ret;
+}
+
+static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct wwan_port *port = filp->private_data;
+ int res;
+
+ if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */
+ res = wwan_port_fops_at_ioctl(port, cmd, arg);
+ if (res != -ENOIOCTLCMD)
+ return res;
+ }
+
+ switch (cmd) {
+ case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */
+ unsigned long flags;
+ struct sk_buff *skb;
+ int amount = 0;
+
+ spin_lock_irqsave(&port->rxq.lock, flags);
+ skb_queue_walk(&port->rxq, skb)
+ amount += skb->len;
+ spin_unlock_irqrestore(&port->rxq.lock, flags);
+
+ return put_user(amount, (int __user *)arg);
+ }
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
static const struct file_operations wwan_port_fops = {
.owner = THIS_MODULE,
.open = wwan_port_fops_open,
@@ -521,28 +782,345 @@ static const struct file_operations wwan_port_fops = {
.read = wwan_port_fops_read,
.write = wwan_port_fops_write,
.poll = wwan_port_fops_poll,
+ .unlocked_ioctl = wwan_port_fops_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ptr_ioctl,
+#endif
.llseek = noop_llseek,
};
+static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (!data)
+ return -EINVAL;
+
+ if (!tb[IFLA_PARENT_DEV_NAME])
+ return -EINVAL;
+
+ if (!data[IFLA_WWAN_LINK_ID])
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct device_type wwan_type = { .name = "wwan" };
+
+static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
+ const char *ifname,
+ unsigned char name_assign_type,
+ unsigned int num_tx_queues,
+ unsigned int num_rx_queues)
+{
+ const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]);
+ struct wwan_device *wwandev = wwan_dev_get_by_name(devname);
+ struct net_device *dev;
+ unsigned int priv_size;
+
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ /* only supported if ops were registered (not just ports) */
+ if (!wwandev->ops) {
+ dev = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
+
+ priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size;
+ dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type,
+ wwandev->ops->setup, num_tx_queues, num_rx_queues);
+
+ if (dev) {
+ SET_NETDEV_DEV(dev, &wwandev->dev);
+ SET_NETDEV_DEVTYPE(dev, &wwan_type);
+ }
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+ return dev;
+}
+
+static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
+ u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
+ struct wwan_netdev_priv *priv = netdev_priv(dev);
+ int ret;
+
+ if (IS_ERR(wwandev))
+ return PTR_ERR(wwandev);
+
+ /* shouldn't have a netdev (left) with us as parent so WARN */
+ if (WARN_ON(!wwandev->ops)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ priv->link_id = link_id;
+ if (wwandev->ops->newlink)
+ ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
+ link_id, extack);
+ else
+ ret = register_netdevice(dev);
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+ return ret;
+}
+
+static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
+
+ if (IS_ERR(wwandev))
+ return;
+
+ /* shouldn't have a netdev (left) with us as parent so WARN */
+ if (WARN_ON(!wwandev->ops))
+ goto out;
+
+ if (wwandev->ops->dellink)
+ wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
+ else
+ unregister_netdevice_queue(dev, head);
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+}
+
+static size_t wwan_rtnl_get_size(const struct net_device *dev)
+{
+ return
+ nla_total_size(4) + /* IFLA_WWAN_LINK_ID */
+ 0;
+}
+
+static int wwan_rtnl_fill_info(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct wwan_netdev_priv *priv = netdev_priv(dev);
+
+ if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
+ [IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
+ .kind = "wwan",
+ .maxtype = __IFLA_WWAN_MAX,
+ .alloc = wwan_rtnl_alloc,
+ .validate = wwan_rtnl_validate,
+ .newlink = wwan_rtnl_newlink,
+ .dellink = wwan_rtnl_dellink,
+ .get_size = wwan_rtnl_get_size,
+ .fill_info = wwan_rtnl_fill_info,
+ .policy = wwan_rtnl_policy,
+};
+
+static void wwan_create_default_link(struct wwan_device *wwandev,
+ u32 def_link_id)
+{
+ struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
+ struct nlattr *data[IFLA_WWAN_MAX + 1];
+ struct net_device *dev;
+ struct nlmsghdr *nlh;
+ struct sk_buff *msg;
+
+ /* Forge attributes required to create a WWAN netdev. We first
+ * build a netlink message and then parse it. This looks
+ * odd, but such approach is less error prone.
+ */
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (WARN_ON(!msg))
+ return;
+ nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0);
+ if (WARN_ON(!nlh))
+ goto free_attrs;
+
+ if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev)))
+ goto free_attrs;
+ tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO);
+ if (!tb[IFLA_LINKINFO])
+ goto free_attrs;
+ linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA);
+ if (!linkinfo[IFLA_INFO_DATA])
+ goto free_attrs;
+ if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id))
+ goto free_attrs;
+ nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]);
+ nla_nest_end(msg, tb[IFLA_LINKINFO]);
+
+ nlmsg_end(msg, nlh);
+
+ /* The next three parsing calls can not fail */
+ nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL);
+ nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO],
+ NULL, NULL);
+ nla_parse_nested_deprecated(data, IFLA_WWAN_MAX,
+ linkinfo[IFLA_INFO_DATA], NULL, NULL);
+
+ rtnl_lock();
+
+ dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM,
+ &wwan_rtnl_link_ops, tb, NULL);
+ if (WARN_ON(IS_ERR(dev)))
+ goto unlock;
+
+ if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
+ free_netdev(dev);
+ goto unlock;
+ }
+
+unlock:
+ rtnl_unlock();
+
+free_attrs:
+ nlmsg_free(msg);
+}
+
+/**
+ * wwan_register_ops - register WWAN device ops
+ * @parent: Device to use as parent and shared by all WWAN ports and
+ * created netdevs
+ * @ops: operations to register
+ * @ctxt: context to pass to operations
+ * @def_link_id: id of the default link that will be automatically created by
+ * the WWAN core for the WWAN device. The default link will not be created
+ * if the passed value is WWAN_NO_DEFAULT_LINK.
+ *
+ * Returns: 0 on success, a negative error code on failure
+ */
+int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
+ void *ctxt, u32 def_link_id)
+{
+ struct wwan_device *wwandev;
+
+ if (WARN_ON(!parent || !ops || !ops->setup))
+ return -EINVAL;
+
+ wwandev = wwan_create_dev(parent);
+ if (!wwandev)
+ return -ENOMEM;
+
+ if (WARN_ON(wwandev->ops)) {
+ wwan_remove_dev(wwandev);
+ return -EBUSY;
+ }
+
+ wwandev->ops = ops;
+ wwandev->ops_ctxt = ctxt;
+
+ /* NB: we do not abort ops registration in case of default link
+ * creation failure. Link ops is the management interface, while the
+ * default link creation is a service option. And we should not prevent
+ * a user from manually creating a link latter if service option failed
+ * now.
+ */
+ if (def_link_id != WWAN_NO_DEFAULT_LINK)
+ wwan_create_default_link(wwandev, def_link_id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wwan_register_ops);
+
+/* Enqueue child netdev deletion */
+static int wwan_child_dellink(struct device *dev, void *data)
+{
+ struct list_head *kill_list = data;
+
+ if (dev->type == &wwan_type)
+ wwan_rtnl_dellink(to_net_dev(dev), kill_list);
+
+ return 0;
+}
+
+/**
+ * wwan_unregister_ops - remove WWAN device ops
+ * @parent: Device to use as parent and shared by all WWAN ports and
+ * created netdevs
+ */
+void wwan_unregister_ops(struct device *parent)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
+ LIST_HEAD(kill_list);
+
+ if (WARN_ON(IS_ERR(wwandev)))
+ return;
+ if (WARN_ON(!wwandev->ops)) {
+ put_device(&wwandev->dev);
+ return;
+ }
+
+ /* put the reference obtained by wwan_dev_get_by_parent(),
+ * we should still have one (that the owner is giving back
+ * now) due to the ops being assigned.
+ */
+ put_device(&wwandev->dev);
+
+ rtnl_lock(); /* Prevent concurent netdev(s) creation/destroying */
+
+ /* Remove all child netdev(s), using batch removing */
+ device_for_each_child(&wwandev->dev, &kill_list,
+ wwan_child_dellink);
+ unregister_netdevice_many(&kill_list);
+
+ wwandev->ops = NULL; /* Finally remove ops */
+
+ rtnl_unlock();
+
+ wwandev->ops_ctxt = NULL;
+ wwan_remove_dev(wwandev);
+}
+EXPORT_SYMBOL_GPL(wwan_unregister_ops);
+
static int __init wwan_init(void)
{
+ int err;
+
+ err = rtnl_link_register(&wwan_rtnl_link_ops);
+ if (err)
+ return err;
+
wwan_class = class_create(THIS_MODULE, "wwan");
- if (IS_ERR(wwan_class))
- return PTR_ERR(wwan_class);
+ if (IS_ERR(wwan_class)) {
+ err = PTR_ERR(wwan_class);
+ goto unregister;
+ }
/* chrdev used for wwan ports */
- wwan_major = register_chrdev(0, "wwan_port", &wwan_port_fops);
+ wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
+ &wwan_port_fops);
if (wwan_major < 0) {
- class_destroy(wwan_class);
- return wwan_major;
+ err = wwan_major;
+ goto destroy;
}
return 0;
+
+destroy:
+ class_destroy(wwan_class);
+unregister:
+ rtnl_link_unregister(&wwan_rtnl_link_ops);
+ return err;
}
static void __exit wwan_exit(void)
{
- unregister_chrdev(wwan_major, "wwan_port");
+ __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
+ rtnl_link_unregister(&wwan_rtnl_link_ops);
class_destroy(wwan_class);
}
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
new file mode 100644
index 000000000000..5b62cf3b3c42
--- /dev/null
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * WWAN device simulator for WWAN framework testing.
+ *
+ * Copyright (c) 2021, Sergey Ryazanov <ryazanov.s.a@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wwan.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+
+#include <net/arp.h>
+
+static int wwan_hwsim_devsnum = 2;
+module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
+MODULE_PARM_DESC(devices, "Number of simulated devices");
+
+static struct class *wwan_hwsim_class;
+
+static struct dentry *wwan_hwsim_debugfs_topdir;
+static struct dentry *wwan_hwsim_debugfs_devcreate;
+
+static DEFINE_SPINLOCK(wwan_hwsim_devs_lock);
+static LIST_HEAD(wwan_hwsim_devs);
+static unsigned int wwan_hwsim_dev_idx;
+
+struct wwan_hwsim_dev {
+ struct list_head list;
+ unsigned int id;
+ struct device dev;
+ struct work_struct del_work;
+ struct dentry *debugfs_topdir;
+ struct dentry *debugfs_portcreate;
+ spinlock_t ports_lock; /* Serialize ports creation/deletion */
+ unsigned int port_idx;
+ struct list_head ports;
+};
+
+struct wwan_hwsim_port {
+ struct list_head list;
+ unsigned int id;
+ struct wwan_hwsim_dev *dev;
+ struct wwan_port *wwan;
+ struct work_struct del_work;
+ struct dentry *debugfs_topdir;
+ enum { /* AT command parser state */
+ AT_PARSER_WAIT_A,
+ AT_PARSER_WAIT_T,
+ AT_PARSER_WAIT_TERM,
+ AT_PARSER_SKIP_LINE,
+ } pstate;
+};
+
+static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops;
+static const struct file_operations wwan_hwsim_debugfs_portcreate_fops;
+static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops;
+static void wwan_hwsim_port_del_work(struct work_struct *work);
+static void wwan_hwsim_dev_del_work(struct work_struct *work);
+
+static netdev_tx_t wwan_hwsim_netdev_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ consume_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops wwan_hwsim_netdev_ops = {
+ .ndo_start_xmit = wwan_hwsim_netdev_xmit,
+};
+
+static void wwan_hwsim_netdev_setup(struct net_device *ndev)
+{
+ ndev->netdev_ops = &wwan_hwsim_netdev_ops;
+ ndev->needs_free_netdev = true;
+
+ ndev->mtu = ETH_DATA_LEN;
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = ETH_MAX_MTU;
+
+ ndev->type = ARPHRD_NONE;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+}
+
+static const struct wwan_ops wwan_hwsim_wwan_rtnl_ops = {
+ .priv_size = 0, /* No private data */
+ .setup = wwan_hwsim_netdev_setup,
+};
+
+static int wwan_hwsim_port_start(struct wwan_port *wport)
+{
+ struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
+
+ port->pstate = AT_PARSER_WAIT_A;
+
+ return 0;
+}
+
+static void wwan_hwsim_port_stop(struct wwan_port *wport)
+{
+}
+
+/* Implements a minimalistic AT commands parser that echo input back and
+ * reply with 'OK' to each input command. See AT command protocol details in the
+ * ITU-T V.250 recomendations document.
+ *
+ * Be aware that this processor is not fully V.250 compliant.
+ */
+static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
+{
+ struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
+ struct sk_buff *out;
+ int i, n, s;
+
+ /* Estimate a max possible number of commands by counting the number of
+ * termination chars (S3 param, CR by default). And then allocate the
+ * output buffer that will be enough to fit the echo and result codes of
+ * all commands.
+ */
+ for (i = 0, n = 0; i < in->len; ++i)
+ if (in->data[i] == '\r')
+ n++;
+ n = in->len + n * (2 + 2 + 2); /* Output buffer size */
+ out = alloc_skb(n, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ for (i = 0, s = 0; i < in->len; ++i) {
+ char c = in->data[i];
+
+ if (port->pstate == AT_PARSER_WAIT_A) {
+ if (c == 'A' || c == 'a')
+ port->pstate = AT_PARSER_WAIT_T;
+ else if (c != '\n') /* Ignore formating char */
+ port->pstate = AT_PARSER_SKIP_LINE;
+ } else if (port->pstate == AT_PARSER_WAIT_T) {
+ if (c == 'T' || c == 't')
+ port->pstate = AT_PARSER_WAIT_TERM;
+ else
+ port->pstate = AT_PARSER_SKIP_LINE;
+ } else if (port->pstate == AT_PARSER_WAIT_TERM) {
+ if (c != '\r')
+ continue;
+ /* Consume the trailing formatting char as well */
+ if ((i + 1) < in->len && in->data[i + 1] == '\n')
+ i++;
+ n = i - s + 1;
+ memcpy(skb_put(out, n), &in->data[s], n);/* Echo */
+ memcpy(skb_put(out, 6), "\r\nOK\r\n", 6);
+ s = i + 1;
+ port->pstate = AT_PARSER_WAIT_A;
+ } else if (port->pstate == AT_PARSER_SKIP_LINE) {
+ if (c != '\r')
+ continue;
+ port->pstate = AT_PARSER_WAIT_A;
+ }
+ }
+
+ if (i > s) {
+ /* Echo the processed portion of a not yet completed command */
+ n = i - s;
+ memcpy(skb_put(out, n), &in->data[s], n);
+ }
+
+ consume_skb(in);
+
+ wwan_port_rx(wport, out);
+
+ return 0;
+}
+
+static const struct wwan_port_ops wwan_hwsim_port_ops = {
+ .start = wwan_hwsim_port_start,
+ .stop = wwan_hwsim_port_stop,
+ .tx = wwan_hwsim_port_tx,
+};
+
+static struct wwan_hwsim_port *wwan_hwsim_port_new(struct wwan_hwsim_dev *dev)
+{
+ struct wwan_hwsim_port *port;
+ char name[0x10];
+ int err;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->dev = dev;
+
+ spin_lock(&dev->ports_lock);
+ port->id = dev->port_idx++;
+ spin_unlock(&dev->ports_lock);
+
+ port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT,
+ &wwan_hwsim_port_ops,
+ port);
+ if (IS_ERR(port->wwan)) {
+ err = PTR_ERR(port->wwan);
+ goto err_free_port;
+ }
+
+ INIT_WORK(&port->del_work, wwan_hwsim_port_del_work);
+
+ snprintf(name, sizeof(name), "port%u", port->id);
+ port->debugfs_topdir = debugfs_create_dir(name, dev->debugfs_topdir);
+ debugfs_create_file("destroy", 0200, port->debugfs_topdir, port,
+ &wwan_hwsim_debugfs_portdestroy_fops);
+
+ return port;
+
+err_free_port:
+ kfree(port);
+
+ return ERR_PTR(err);
+}
+
+static void wwan_hwsim_port_del(struct wwan_hwsim_port *port)
+{
+ debugfs_remove(port->debugfs_topdir);
+
+ /* Make sure that there is no pending deletion work */
+ if (current_work() != &port->del_work)
+ cancel_work_sync(&port->del_work);
+
+ wwan_remove_port(port->wwan);
+ kfree(port);
+}
+
+static void wwan_hwsim_port_del_work(struct work_struct *work)
+{
+ struct wwan_hwsim_port *port =
+ container_of(work, typeof(*port), del_work);
+ struct wwan_hwsim_dev *dev = port->dev;
+
+ spin_lock(&dev->ports_lock);
+ if (list_empty(&port->list)) {
+ /* Someone else deleting port at the moment */
+ spin_unlock(&dev->ports_lock);
+ return;
+ }
+ list_del_init(&port->list);
+ spin_unlock(&dev->ports_lock);
+
+ wwan_hwsim_port_del(port);
+}
+
+static void wwan_hwsim_dev_release(struct device *sysdev)
+{
+ struct wwan_hwsim_dev *dev = container_of(sysdev, typeof(*dev), dev);
+
+ kfree(dev);
+}
+
+static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
+{
+ struct wwan_hwsim_dev *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ dev->id = wwan_hwsim_dev_idx++;
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ dev->dev.release = wwan_hwsim_dev_release;
+ dev->dev.class = wwan_hwsim_class;
+ dev_set_name(&dev->dev, "hwsim%u", dev->id);
+
+ spin_lock_init(&dev->ports_lock);
+ INIT_LIST_HEAD(&dev->ports);
+
+ err = device_register(&dev->dev);
+ if (err)
+ goto err_free_dev;
+
+ INIT_WORK(&dev->del_work, wwan_hwsim_dev_del_work);
+
+ err = wwan_register_ops(&dev->dev, &wwan_hwsim_wwan_rtnl_ops, dev, 1);
+ if (err)
+ goto err_unreg_dev;
+
+ dev->debugfs_topdir = debugfs_create_dir(dev_name(&dev->dev),
+ wwan_hwsim_debugfs_topdir);
+ debugfs_create_file("destroy", 0200, dev->debugfs_topdir, dev,
+ &wwan_hwsim_debugfs_devdestroy_fops);
+ dev->debugfs_portcreate =
+ debugfs_create_file("portcreate", 0200,
+ dev->debugfs_topdir, dev,
+ &wwan_hwsim_debugfs_portcreate_fops);
+
+ return dev;
+
+err_unreg_dev:
+ device_unregister(&dev->dev);
+ /* Memory will be freed in the device release callback */
+
+ return ERR_PTR(err);
+
+err_free_dev:
+ kfree(dev);
+
+ return ERR_PTR(err);
+}
+
+static void wwan_hwsim_dev_del(struct wwan_hwsim_dev *dev)
+{
+ debugfs_remove(dev->debugfs_portcreate); /* Avoid new ports */
+
+ spin_lock(&dev->ports_lock);
+ while (!list_empty(&dev->ports)) {
+ struct wwan_hwsim_port *port;
+
+ port = list_first_entry(&dev->ports, struct wwan_hwsim_port,
+ list);
+ list_del_init(&port->list);
+ spin_unlock(&dev->ports_lock);
+ wwan_hwsim_port_del(port);
+ spin_lock(&dev->ports_lock);
+ }
+ spin_unlock(&dev->ports_lock);
+
+ debugfs_remove(dev->debugfs_topdir);
+
+ /* This will remove all child netdev(s) */
+ wwan_unregister_ops(&dev->dev);
+
+ /* Make sure that there is no pending deletion work */
+ if (current_work() != &dev->del_work)
+ cancel_work_sync(&dev->del_work);
+
+ device_unregister(&dev->dev);
+ /* Memory will be freed in the device release callback */
+}
+
+static void wwan_hwsim_dev_del_work(struct work_struct *work)
+{
+ struct wwan_hwsim_dev *dev = container_of(work, typeof(*dev), del_work);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ if (list_empty(&dev->list)) {
+ /* Someone else deleting device at the moment */
+ spin_unlock(&wwan_hwsim_devs_lock);
+ return;
+ }
+ list_del_init(&dev->list);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ wwan_hwsim_dev_del(dev);
+}
+
+static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_port *port = file->private_data;
+
+ /* We can not delete port here since it will cause a deadlock due to
+ * waiting this callback to finish in the debugfs_remove() call. So,
+ * use workqueue.
+ */
+ schedule_work(&port->del_work);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops = {
+ .write = wwan_hwsim_debugfs_portdestroy_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_portcreate_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev = file->private_data;
+ struct wwan_hwsim_port *port;
+
+ port = wwan_hwsim_port_new(dev);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ spin_lock(&dev->ports_lock);
+ list_add_tail(&port->list, &dev->ports);
+ spin_unlock(&dev->ports_lock);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_portcreate_fops = {
+ .write = wwan_hwsim_debugfs_portcreate_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev = file->private_data;
+
+ /* We can not delete device here since it will cause a deadlock due to
+ * waiting this callback to finish in the debugfs_remove() call. So,
+ * use workqueue.
+ */
+ schedule_work(&dev->del_work);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops = {
+ .write = wwan_hwsim_debugfs_devdestroy_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_devcreate_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev;
+
+ dev = wwan_hwsim_dev_new();
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ list_add_tail(&dev->list, &wwan_hwsim_devs);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_devcreate_fops = {
+ .write = wwan_hwsim_debugfs_devcreate_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static int __init wwan_hwsim_init_devs(void)
+{
+ struct wwan_hwsim_dev *dev;
+ int i, j;
+
+ for (i = 0; i < wwan_hwsim_devsnum; ++i) {
+ dev = wwan_hwsim_dev_new();
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ list_add_tail(&dev->list, &wwan_hwsim_devs);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ /* Create a couple of ports per each device to accelerate
+ * the simulator readiness time.
+ */
+ for (j = 0; j < 2; ++j) {
+ struct wwan_hwsim_port *port;
+
+ port = wwan_hwsim_port_new(dev);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ spin_lock(&dev->ports_lock);
+ list_add_tail(&port->list, &dev->ports);
+ spin_unlock(&dev->ports_lock);
+ }
+ }
+
+ return 0;
+}
+
+static void wwan_hwsim_free_devs(void)
+{
+ struct wwan_hwsim_dev *dev;
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ while (!list_empty(&wwan_hwsim_devs)) {
+ dev = list_first_entry(&wwan_hwsim_devs, struct wwan_hwsim_dev,
+ list);
+ list_del_init(&dev->list);
+ spin_unlock(&wwan_hwsim_devs_lock);
+ wwan_hwsim_dev_del(dev);
+ spin_lock(&wwan_hwsim_devs_lock);
+ }
+ spin_unlock(&wwan_hwsim_devs_lock);
+}
+
+static int __init wwan_hwsim_init(void)
+{
+ int err;
+
+ if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
+ return -EINVAL;
+
+ wwan_hwsim_class = class_create(THIS_MODULE, "wwan_hwsim");
+ if (IS_ERR(wwan_hwsim_class))
+ return PTR_ERR(wwan_hwsim_class);
+
+ wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
+ wwan_hwsim_debugfs_devcreate =
+ debugfs_create_file("devcreate", 0200,
+ wwan_hwsim_debugfs_topdir, NULL,
+ &wwan_hwsim_debugfs_devcreate_fops);
+
+ err = wwan_hwsim_init_devs();
+ if (err)
+ goto err_clean_devs;
+
+ return 0;
+
+err_clean_devs:
+ wwan_hwsim_free_devs();
+ debugfs_remove(wwan_hwsim_debugfs_topdir);
+ class_destroy(wwan_hwsim_class);
+
+ return err;
+}
+
+static void __exit wwan_hwsim_exit(void)
+{
+ debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
+ wwan_hwsim_free_devs();
+ flush_scheduled_work(); /* Wait deletion works completion */
+ debugfs_remove(wwan_hwsim_debugfs_topdir);
+ class_destroy(wwan_hwsim_class);
+}
+
+module_init(wwan_hwsim_init);
+module_exit(wwan_hwsim_exit);
+
+MODULE_AUTHOR("Sergey Ryazanov");
+MODULE_DESCRIPTION("Device simulator for WWAN framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 193b723fe3bd..c58996c1e230 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
{
if (queue->task) {
kthread_stop(queue->task);
+ put_task_struct(queue->task);
queue->task = NULL;
}
@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
if (IS_ERR(task))
goto kthread_err;
queue->task = task;
+ /*
+ * Take a reference to the task in order to prevent it from being freed
+ * if the thread function returns before kthread_stop is called.
+ */
+ get_task_struct(task);
task = kthread_run(xenvif_dealloc_kthread, queue,
"%s-dealloc", queue->name);
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index fe0719ed81a0..528745862738 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -149,7 +149,7 @@ static void fdp_nci_send_patch_cb(struct nci_dev *ndev)
wake_up(&info->setup_wq);
}
-/**
+/*
* Register a packet sent counter and a callback
*
* We have no other way of knowing when all firmware packets were sent out
@@ -167,7 +167,7 @@ static void fdp_nci_set_data_pkt_counter(struct nci_dev *ndev,
info->data_pkt_counter_cb = cb;
}
-/**
+/*
* The device is expecting a stream of packets. All packets need to
* have the PBF flag set to 0x0 (last packet) even if the firmware
* file is segmented and there are multiple packets. If we give the
@@ -237,28 +237,18 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
static int fdp_nci_open(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
return info->phy_ops->enable(info->phy);
}
static int fdp_nci_close(struct nci_dev *ndev)
{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
return 0;
}
static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
if (atomic_dec_and_test(&info->data_pkt_counter))
info->data_pkt_counter_cb(ndev);
@@ -266,16 +256,6 @@ static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
return info->phy_ops->write(info->phy, skb);
}
-int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
-{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
- return nci_recv_frame(ndev, skb);
-}
-EXPORT_SYMBOL(fdp_nci_recv_frame);
-
static int fdp_nci_request_firmware(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
@@ -286,7 +266,7 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
r = request_firmware(&info->ram_patch, FDP_RAM_PATCH_NAME, dev);
if (r < 0) {
nfc_err(dev, "RAM patch request error\n");
- goto error;
+ return r;
}
data = (u8 *) info->ram_patch->data;
@@ -303,7 +283,7 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
r = request_firmware(&info->otp_patch, FDP_OTP_PATCH_NAME, dev);
if (r < 0) {
nfc_err(dev, "OTP patch request error\n");
- goto out;
+ return 0;
}
data = (u8 *) info->otp_patch->data;
@@ -315,10 +295,7 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
dev_dbg(dev, "OTP patch version: %d, size: %d\n",
info->otp_patch_version, (int) info->otp_patch->size);
-out:
return 0;
-error:
- return r;
}
static void fdp_nci_release_firmware(struct nci_dev *ndev)
@@ -476,8 +453,6 @@ static int fdp_nci_setup(struct nci_dev *ndev)
int r;
u8 patched = 0;
- dev_dbg(dev, "%s\n", __func__);
-
r = nci_core_init(ndev);
if (r)
goto error;
@@ -585,9 +560,7 @@ static int fdp_nci_core_reset_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
- dev_dbg(dev, "%s\n", __func__);
info->setup_reset_ntf = 1;
wake_up(&info->setup_wq);
@@ -598,9 +571,7 @@ static int fdp_nci_prop_patch_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
- dev_dbg(dev, "%s\n", __func__);
info->setup_patch_ntf = 1;
info->setup_patch_status = skb->data[0];
wake_up(&info->setup_wq);
@@ -773,11 +744,6 @@ EXPORT_SYMBOL(fdp_nci_probe);
void fdp_nci_remove(struct nci_dev *ndev)
{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
-
nci_unregister_device(ndev);
nci_free_device(ndev);
}
diff --git a/drivers/nfc/fdp/fdp.h b/drivers/nfc/fdp/fdp.h
index 9bd1f3f23e2d..ead3b21ccae6 100644
--- a/drivers/nfc/fdp/fdp.h
+++ b/drivers/nfc/fdp/fdp.h
@@ -25,6 +25,5 @@ int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
struct nci_dev **ndev, int tx_headroom, int tx_tailroom,
u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg);
void fdp_nci_remove(struct nci_dev *ndev);
-int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
#endif /* __LOCAL_FDP_H_ */
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index adaa1a7147f9..c5596e514648 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -49,7 +49,6 @@ static int fdp_nci_i2c_enable(void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
- dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
fdp_nci_i2c_reset(phy);
return 0;
@@ -59,7 +58,6 @@ static void fdp_nci_i2c_disable(void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
- dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
fdp_nci_i2c_reset(phy);
}
@@ -197,7 +195,6 @@ flush:
static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
- struct i2c_client *client;
struct sk_buff *skb;
int r;
@@ -206,9 +203,6 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "%s\n", __func__);
-
r = fdp_nci_i2c_read(phy, &skb);
if (r == -EREMOTEIO)
@@ -217,7 +211,7 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
if (skb != NULL)
- fdp_nci_recv_frame(phy->ndev, skb);
+ nci_recv_frame(phy->ndev, skb);
return IRQ_HANDLED;
}
@@ -288,8 +282,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client)
u32 clock_freq;
int r = 0;
- dev_dbg(dev, "%s\n", __func__);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(dev, "No I2C_FUNC_I2C support\n");
return -ENODEV;
@@ -351,8 +343,6 @@ static int fdp_nci_i2c_remove(struct i2c_client *client)
{
struct fdp_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
fdp_nci_remove(phy->ndev);
fdp_nci_i2c_disable(phy);
@@ -368,7 +358,7 @@ MODULE_DEVICE_TABLE(acpi, fdp_nci_i2c_acpi_match);
static struct i2c_driver fdp_nci_i2c_driver = {
.driver = {
.name = FDP_I2C_DRIVER_NAME,
- .acpi_match_table = ACPI_PTR(fdp_nci_i2c_acpi_match),
+ .acpi_match_table = fdp_nci_i2c_acpi_match,
},
.probe_new = fdp_nci_i2c_probe,
.remove = fdp_nci_i2c_remove,
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 0f43bb389566..e56cea716cd2 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -98,8 +98,6 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
size_t if_version_length;
int bytes_recv, r;
- pr_info("%s\n", __func__);
-
memset(&cmd, 0, sizeof(struct mei_nfc_cmd));
cmd.hdr.cmd = MEI_NFC_CMD_MAINTENANCE;
cmd.hdr.data_size = 1;
@@ -146,8 +144,6 @@ static int mei_nfc_connect(struct nfc_mei_phy *phy)
size_t connect_length, connect_resp_length;
int bytes_recv, r;
- pr_info("%s\n", __func__);
-
connect_length = sizeof(struct mei_nfc_cmd) +
sizeof(struct mei_nfc_connect);
@@ -320,8 +316,6 @@ static int nfc_mei_phy_enable(void *phy_id)
int r;
struct nfc_mei_phy *phy = phy_id;
- pr_info("%s\n", __func__);
-
if (phy->powered == 1)
return 0;
@@ -363,8 +357,6 @@ static void nfc_mei_phy_disable(void *phy_id)
{
struct nfc_mei_phy *phy = phy_id;
- pr_info("%s\n", __func__);
-
mei_cldev_disable(phy->cldev);
phy->powered = 0;
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index 8d3988457c58..b1d3975e8a81 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -364,7 +364,6 @@ static void microread_im_transceive_cb(void *context, struct sk_buff *skb,
case MICROREAD_CB_TYPE_READER_ALL:
if (err == 0) {
if (skb->len == 0) {
- err = -EPROTO;
kfree_skb(skb);
info->async_cb(info->async_cb_context, NULL,
-EPROTO);
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index 52c8ae504e32..aaccb8b76b3e 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell NFC driver: Firmware downloader
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/module.h>
@@ -50,8 +39,8 @@ enum {
};
/*
-** Patterns for responses
-*/
+ * Patterns for responses
+ */
static const uint8_t nci_pattern_core_reset_ntf[] = {
0x60, 0x00, 0x02, 0xA0, 0x01
@@ -451,7 +440,7 @@ static void fw_dnld_rx_work(struct work_struct *work)
}
}
-int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv)
+int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv)
{
char name[32];
@@ -465,13 +454,13 @@ int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv)
return 0;
}
-void nfcmrvl_fw_dnld_deinit(struct nfcmrvl_private *priv)
+void nfcmrvl_fw_dnld_deinit(struct nfcmrvl_private *priv)
{
destroy_workqueue(priv->fw_dnld.rx_wq);
}
-void nfcmrvl_fw_dnld_recv_frame(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+void nfcmrvl_fw_dnld_recv_frame(struct nfcmrvl_private *priv,
+ struct sk_buff *skb)
{
/* Discard command timer */
if (timer_pending(&priv->ndev->cmd_timer))
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.h b/drivers/nfc/nfcmrvl/fw_dnld.h
index ee4a339c05fd..7c4d91b01910 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.h
+++ b/drivers/nfc/nfcmrvl/fw_dnld.h
@@ -1,20 +1,9 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
* Marvell NFC driver: Firmware downloader
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#ifndef __NFCMRVL_FW_DNLD_H__
#define __NFCMRVL_FW_DNLD_H__
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 18cd96284b77..59a529e72d96 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -1,20 +1,9 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* Marvell NFC-over-I2C driver: I2C interface related functions
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -49,11 +38,6 @@ static int nfcmrvl_i2c_read(struct nfcmrvl_i2c_drv_data *drv_data,
return -EBADMSG;
}
- if (nci_hdr.plen > NCI_MAX_PAYLOAD_SIZE) {
- nfc_err(&drv_data->i2c->dev, "invalid packet payload size\n");
- return -EBADMSG;
- }
-
*skb = nci_skb_alloc(drv_data->priv->ndev,
nci_hdr.plen + NCI_CTRL_HDR_SIZE, GFP_KERNEL);
if (!*skb)
@@ -260,7 +244,7 @@ static int nfcmrvl_i2c_remove(struct i2c_client *client)
}
-static const struct of_device_id of_nfcmrvl_i2c_match[] = {
+static const struct of_device_id of_nfcmrvl_i2c_match[] __maybe_unused = {
{ .compatible = "marvell,nfc-i2c", },
{},
};
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 529be35ac178..a4620b480c4f 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell NFC driver: major functions
*
* Copyright (C) 2014-2015 Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/module.h>
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
index de68ff45e49a..a715543bc9bf 100644
--- a/drivers/nfc/nfcmrvl/nfcmrvl.h
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -1,20 +1,9 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
* Marvell NFC driver
*
* Copyright (C) 2014-2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#ifndef _NFCMRVL_H_
#define _NFCMRVL_H_
@@ -36,16 +25,16 @@
#define NFCMRVL_NCI_MAX_EVENT_SIZE 260
/*
-** NCI FW Parmaters
-*/
+ * NCI FW Parameters
+ */
#define NFCMRVL_PB_BAIL_OUT 0x11
#define NFCMRVL_PROP_REF_CLOCK 0xF0
#define NFCMRVL_PROP_SET_HI_CONFIG 0xF1
/*
-** HCI defines
-*/
+ * HCI defines
+ */
#define NFCMRVL_HCI_EVENT_HEADER_SIZE 0x04
#define NFCMRVL_HCI_EVENT_CODE 0x04
@@ -78,8 +67,8 @@ struct nfcmrvl_private {
bool support_fw_dnld;
/*
- ** PHY related information
- */
+ * PHY related information
+ */
/* PHY driver context */
void *drv_data;
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index 8e0ddb434770..66696321c645 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -1,20 +1,9 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* Marvell NFC-over-SPI driver: SPI interface related functions
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -196,7 +185,7 @@ static int nfcmrvl_spi_remove(struct spi_device *spi)
return 0;
}
-static const struct of_device_id of_nfcmrvl_spi_match[] = {
+static const struct of_device_id of_nfcmrvl_spi_match[] __maybe_unused = {
{ .compatible = "marvell,nfc-spi", },
{},
};
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index e5a622ce4b95..50d86c90b9dd 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -1,19 +1,8 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* Marvell NFC-over-UART driver
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/module.h>
@@ -29,8 +18,8 @@ static unsigned int break_control;
static int reset_n_io = -EINVAL;
/*
-** NFCMRVL NCI OPS
-*/
+ * NFCMRVL NCI OPS
+ */
static int nfcmrvl_uart_nci_open(struct nfcmrvl_private *priv)
{
@@ -103,8 +92,8 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
}
/*
-** NCI UART OPS
-*/
+ * NCI UART OPS
+ */
static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
{
@@ -178,10 +167,10 @@ static void nfcmrvl_nci_uart_tx_done(struct nci_uart *nu)
return;
/*
- ** To ensure that if the NFCC goes in DEEP SLEEP sate we can wake him
- ** up. we set BREAK. Once we will be ready to send again we will remove
- ** it.
- */
+ * To ensure that if the NFCC goes in DEEP SLEEP sate we can wake him
+ * up. we set BREAK. Once we will be ready to send again we will remove
+ * it.
+ */
if (priv->config.break_control && nu->tty->ops->break_ctl) {
nu->tty->ops->break_ctl(nu->tty, -1);
usleep_range(1000, 3000);
@@ -200,23 +189,7 @@ static struct nci_uart nfcmrvl_nci_uart = {
.tx_done = nfcmrvl_nci_uart_tx_done,
}
};
-
-/*
-** Module init
-*/
-
-static int nfcmrvl_uart_init_module(void)
-{
- return nci_uart_register(&nfcmrvl_nci_uart);
-}
-
-static void nfcmrvl_uart_exit_module(void)
-{
- nci_uart_unregister(&nfcmrvl_nci_uart);
-}
-
-module_init(nfcmrvl_uart_init_module);
-module_exit(nfcmrvl_uart_exit_module);
+module_driver(nfcmrvl_nci_uart, nci_uart_register, nci_uart_unregister);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell NFC-over-UART");
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 888e298f610b..9d649b45300b 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -1,20 +1,9 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* Marvell NFC-over-USB driver: USB interface related functions
*
* Copyright (C) 2014, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#include <linux/module.h>
#include <linux/usb.h>
@@ -68,7 +57,6 @@ static int nfcmrvl_inc_tx(struct nfcmrvl_usb_drv_data *drv_data)
static void nfcmrvl_bulk_complete(struct urb *urb)
{
struct nfcmrvl_usb_drv_data *drv_data = urb->context;
- struct sk_buff *skb;
int err;
dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d\n",
@@ -78,6 +66,8 @@ static void nfcmrvl_bulk_complete(struct urb *urb)
return;
if (!urb->status) {
+ struct sk_buff *skb;
+
skb = nci_skb_alloc(drv_data->priv->ndev, urb->actual_length,
GFP_ATOMIC);
if (!skb) {
@@ -296,7 +286,6 @@ static void nfcmrvl_waker(struct work_struct *work)
static int nfcmrvl_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_endpoint_descriptor *ep_desc;
struct nfcmrvl_usb_drv_data *drv_data;
struct nfcmrvl_private *priv;
int i;
@@ -314,18 +303,16 @@ static int nfcmrvl_probe(struct usb_interface *intf,
return -ENOMEM;
for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ struct usb_endpoint_descriptor *ep_desc;
+
ep_desc = &intf->cur_altsetting->endpoint[i].desc;
if (!drv_data->bulk_tx_ep &&
usb_endpoint_is_bulk_out(ep_desc)) {
drv_data->bulk_tx_ep = ep_desc;
- continue;
- }
-
- if (!drv_data->bulk_rx_ep &&
- usb_endpoint_is_bulk_in(ep_desc)) {
+ } else if (!drv_data->bulk_rx_ep &&
+ usb_endpoint_is_bulk_in(ep_desc)) {
drv_data->bulk_rx_ep = ep_desc;
- continue;
}
}
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
index a0ce95a287c5..2b0c7232e91f 100644
--- a/drivers/nfc/nxp-nci/core.c
+++ b/drivers/nfc/nxp-nci/core.c
@@ -70,21 +70,16 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
struct nxp_nci_info *info = nci_get_drvdata(ndev);
int r;
- if (!info->phy_ops->write) {
- r = -ENOTSUPP;
- goto send_exit;
- }
+ if (!info->phy_ops->write)
+ return -EOPNOTSUPP;
- if (info->mode != NXP_NCI_MODE_NCI) {
- r = -EINVAL;
- goto send_exit;
- }
+ if (info->mode != NXP_NCI_MODE_NCI)
+ return -EINVAL;
r = info->phy_ops->write(info->phy_id, skb);
if (r < 0)
kfree_skb(skb);
-send_exit:
return r;
}
@@ -104,10 +99,8 @@ int nxp_nci_probe(void *phy_id, struct device *pdev,
int r;
info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL);
- if (!info) {
- r = -ENOMEM;
- goto probe_exit;
- }
+ if (!info)
+ return -ENOMEM;
info->phy_id = phy_id;
info->pdev = pdev;
@@ -120,31 +113,25 @@ int nxp_nci_probe(void *phy_id, struct device *pdev,
if (info->phy_ops->set_mode) {
r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
if (r < 0)
- goto probe_exit;
+ return r;
}
info->mode = NXP_NCI_MODE_COLD;
info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS,
NXP_NCI_HDR_LEN, 0);
- if (!info->ndev) {
- r = -ENOMEM;
- goto probe_exit;
- }
+ if (!info->ndev)
+ return -ENOMEM;
nci_set_parent_dev(info->ndev, pdev);
nci_set_drvdata(info->ndev, info);
r = nci_register_device(info->ndev);
- if (r < 0)
- goto probe_exit_free_nci;
+ if (r < 0) {
+ nci_free_device(info->ndev);
+ return r;
+ }
*ndev = info->ndev;
-
- goto probe_exit;
-
-probe_exit_free_nci:
- nci_free_device(info->ndev);
-probe_exit:
return r;
}
EXPORT_SYMBOL(nxp_nci_probe);
diff --git a/drivers/nfc/nxp-nci/firmware.c b/drivers/nfc/nxp-nci/firmware.c
index dae0c8030e95..119bf305c642 100644
--- a/drivers/nfc/nxp-nci/firmware.c
+++ b/drivers/nfc/nxp-nci/firmware.c
@@ -95,10 +95,8 @@ static int nxp_nci_fw_send_chunk(struct nxp_nci_info *info)
int r;
skb = nci_skb_alloc(info->ndev, info->max_payload, GFP_KERNEL);
- if (!skb) {
- r = -ENOMEM;
- goto chunk_exit;
- }
+ if (!skb)
+ return -ENOMEM;
chunk_len = info->max_payload - NXP_NCI_FW_HDR_LEN - NXP_NCI_FW_CRC_LEN;
remaining_len = fw_info->frame_size - fw_info->written;
@@ -124,7 +122,6 @@ static int nxp_nci_fw_send_chunk(struct nxp_nci_info *info)
kfree_skb(skb);
-chunk_exit:
return r;
}
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 795da9b85d56..e6bf8cfe3aa7 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -174,9 +174,6 @@ static int pn533_i2c_probe(struct i2c_client *client,
struct pn533 *priv;
int r = 0;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
@@ -195,9 +192,8 @@ static int pn533_i2c_probe(struct i2c_client *client,
phy, &i2c_phy_ops, NULL,
&phy->i2c_dev->dev);
- if (IS_ERR(priv)) {
+ if (IS_ERR(priv))
return PTR_ERR(priv);
- }
phy->priv = priv;
r = pn532_i2c_nfc_alloc(priv, PN533_NO_TYPE_B_PROTOCOLS, &client->dev);
@@ -239,8 +235,6 @@ static int pn533_i2c_remove(struct i2c_client *client)
{
struct pn533_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
free_irq(client->irq, phy);
pn53x_unregister_nfc(phy->priv);
@@ -249,7 +243,7 @@ static int pn533_i2c_remove(struct i2c_client *client)
return 0;
}
-static const struct of_device_id of_pn533_i2c_match[] = {
+static const struct of_device_id of_pn533_i2c_match[] __maybe_unused = {
{ .compatible = "nxp,pn532", },
/*
* NOTE: The use of the compatibles with the trailing "...-i2c" is
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 2c7f9916f206..cd64bfe20402 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1075,8 +1075,6 @@ static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
u8 status, ret, mi;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
skb_queue_purge(&dev->resp_q);
return PTR_ERR(resp);
@@ -1124,8 +1122,6 @@ static void pn533_wq_tm_mi_recv(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, 0);
if (!skb)
return;
@@ -1148,8 +1144,6 @@ static void pn533_wq_tm_mi_send(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
/* Grab the first skb in the queue */
skb = skb_dequeue(&dev->fragment_skb);
if (skb == NULL) { /* No more data */
@@ -1186,8 +1180,6 @@ static void pn533_wq_tg_get_data(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, 0);
if (!skb)
return;
@@ -1206,8 +1198,6 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
size_t gb_len;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (resp->len < ATR_REQ_GB_OFFSET + 1)
return -EINVAL;
@@ -1260,8 +1250,6 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg,
{
int rc = 0;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -1283,8 +1271,6 @@ static void pn533_wq_rf(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, 2);
if (!skb)
return;
@@ -1360,8 +1346,6 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev)
u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- dev_dbg(dev->dev, "%s", __func__);
-
if (!dev->gb) {
dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
@@ -1511,8 +1495,6 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
struct pn533_poll_modulations *cur_mod;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -1783,8 +1765,6 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
struct sk_buff *skb;
struct sk_buff *resp;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
if (!skb)
return -ENOMEM;
@@ -1866,8 +1846,6 @@ static int pn533_deactivate_target_complete(struct pn533 *dev, void *arg,
{
int rc = 0;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -1892,8 +1870,6 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (!dev->tgt_active_prot) {
nfc_err(dev->dev, "There is no active target\n");
return;
@@ -1988,8 +1964,6 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (dev->poll_mod_count) {
nfc_err(dev->dev,
"Cannot bring the DEP link up while polling\n");
@@ -2067,8 +2041,6 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- dev_dbg(dev->dev, "%s\n", __func__);
-
pn533_poll_reset_mod_list(dev);
if (dev->tgt_mode || dev->tgt_active_prot)
@@ -2092,8 +2064,6 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
struct sk_buff *skb, *tmp, *t;
unsigned int skb_len = 0, tmp_len = 0;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (skb_queue_empty(&dev->resp_q))
return NULL;
@@ -2133,8 +2103,6 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
int rc = 0;
u8 status, ret, mi;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
goto _error;
@@ -2288,8 +2256,6 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
struct pn533_data_exchange_arg *arg = NULL;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (!dev->tgt_active_prot) {
nfc_err(dev->dev,
"Can't exchange data if there is no active target\n");
@@ -2356,8 +2322,6 @@ static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
{
u8 status;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp))
return PTR_ERR(resp);
@@ -2388,8 +2352,6 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
/* let's split in multiple chunks if size's too big */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
rc = pn533_fill_fragment_skbs(dev, skb);
@@ -2426,8 +2388,6 @@ static void pn533_wq_mi_recv(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
if (!skb)
goto error;
@@ -2476,8 +2436,6 @@ static void pn533_wq_mi_send(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
/* Grab the first skb in the queue */
skb = skb_dequeue(&dev->fragment_skb);
@@ -2533,8 +2491,6 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
struct sk_buff *resp;
int skb_len;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
skb = pn533_alloc_skb(dev, skb_len);
@@ -2580,8 +2536,6 @@ static int pn533_pasori_fw_reset(struct pn533 *dev)
struct sk_buff *skb;
struct sk_buff *resp;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, sizeof(u8));
if (!skb)
return -ENOMEM;
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index a0665d8ea85b..7bdaf8263070 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -319,7 +319,7 @@ static struct serdev_device_driver pn532_uart_driver = {
.remove = pn532_uart_remove,
.driver = {
.name = "pn532_uart",
- .of_match_table = of_match_ptr(pn532_uart_of_match),
+ .of_match_table = pn532_uart_of_match,
},
};
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 84d2bfabf42b..bd7f7478d189 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -354,8 +354,6 @@ static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
{
struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
- dev_dbg(&urb->dev->dev, "%s\n", __func__);
-
print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
urb->transfer_buffer, urb->transfer_buffer_length,
false);
@@ -375,8 +373,6 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
void *cntx;
struct pn533_acr122_poweron_rdr_arg arg;
- dev_dbg(&phy->udev->dev, "%s\n", __func__);
-
buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 4ac8cb262559..de59e439c369 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -50,7 +50,7 @@ static const struct i2c_device_id pn544_hci_i2c_id_table[] = {
MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
-static const struct acpi_device_id pn544_hci_i2c_acpi_match[] = {
+static const struct acpi_device_id pn544_hci_i2c_acpi_match[] __maybe_unused = {
{"NXP5440", 0},
{}
};
@@ -241,8 +241,6 @@ static int pn544_hci_i2c_enable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
- pr_info("%s\n", __func__);
-
pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
phy->powered = 1;
@@ -875,9 +873,6 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
struct pn544_i2c_phy *phy;
int r = 0;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
@@ -937,8 +932,6 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
{
struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
cancel_work_sync(&phy->fw_work);
if (phy->fw_work_state != FW_WORK_STATE_IDLE)
pn544_hci_i2c_fw_work_complete(phy, -ENODEV);
@@ -951,7 +944,7 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
return 0;
}
-static const struct of_device_id of_pn544_i2c_match[] = {
+static const struct of_device_id of_pn544_i2c_match[] __maybe_unused = {
{ .compatible = "nxp,pn544-i2c", },
{},
};
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 8e4d355dc3ae..4df926cc37d0 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -94,7 +94,7 @@ struct port100;
typedef void (*port100_send_async_complete_t)(struct port100 *dev, void *arg,
struct sk_buff *resp);
-/**
+/*
* Setting sets structure for in_set_rf command
*
* @in_*_set_number: Represent the entry indexes in the port-100 RF Base Table.
@@ -145,7 +145,7 @@ static const struct port100_in_rf_setting in_rf_settings[] = {
};
/**
- * Setting sets structure for tg_set_rf command
+ * struct port100_tg_rf_setting - Setting sets structure for tg_set_rf command
*
* @tg_set_number: Represents the entry index in the port-100 RF Base Table.
* This table contains multiple RF setting sets required for RF
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 897394167522..4d1cf1bb55b0 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -6,6 +6,7 @@
* Robert Baldyga <r.baldyga@samsung.com>
*/
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/delay.h>
@@ -22,6 +23,7 @@
struct s3fwrn5_i2c_phy {
struct phy_common common;
struct i2c_client *i2c_dev;
+ struct clk *clk;
unsigned int irq_skip:1;
};
@@ -207,17 +209,40 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ phy->clk = devm_clk_get_optional(&client->dev, NULL);
+ if (IS_ERR(phy->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(phy->clk),
+ "failed to get clock\n");
+
+ /*
+ * S3FWRN5 depends on a clock input ("XI" pin) to function properly.
+ * Depending on the hardware configuration this could be an always-on
+ * oscillator or some external clock that must be explicitly enabled.
+ * Make sure the clock is running before starting S3FWRN5.
+ */
+ ret = clk_prepare_enable(phy->clk);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
ret = s3fwrn5_probe(&phy->common.ndev, phy, &phy->i2c_dev->dev,
&i2c_phy_ops);
if (ret < 0)
- return ret;
+ goto disable_clk;
ret = devm_request_threaded_irq(&client->dev, phy->i2c_dev->irq, NULL,
s3fwrn5_i2c_irq_thread_fn, IRQF_ONESHOT,
S3FWRN5_I2C_DRIVER_NAME, phy);
if (ret)
- s3fwrn5_remove(phy->common.ndev);
+ goto s3fwrn5_remove;
+ return 0;
+
+s3fwrn5_remove:
+ s3fwrn5_remove(phy->common.ndev);
+disable_clk:
+ clk_disable_unprepare(phy->clk);
return ret;
}
@@ -226,6 +251,7 @@ static int s3fwrn5_i2c_remove(struct i2c_client *client)
struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
s3fwrn5_remove(phy->common.ndev);
+ clk_disable_unprepare(phy->clk);
return 0;
}
@@ -236,7 +262,7 @@ static const struct i2c_device_id s3fwrn5_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table);
-static const struct of_device_id of_s3fwrn5_i2c_match[] = {
+static const struct of_device_id of_s3fwrn5_i2c_match[] __maybe_unused = {
{ .compatible = "samsung,s3fwrn5-i2c", },
{}
};
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 55d600cd3861..46981405e8b1 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -206,9 +206,6 @@ static int st_nci_i2c_probe(struct i2c_client *client,
struct st_nci_i2c_phy *phy;
int r;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
@@ -261,8 +258,6 @@ static int st_nci_i2c_remove(struct i2c_client *client)
{
struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
ndlc_remove(phy->ndlc);
return 0;
@@ -274,14 +269,14 @@ static const struct i2c_device_id st_nci_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table);
-static const struct acpi_device_id st_nci_i2c_acpi_match[] = {
+static const struct acpi_device_id st_nci_i2c_acpi_match[] __maybe_unused = {
{"SMO2101"},
{"SMO2102"},
{}
};
MODULE_DEVICE_TABLE(acpi, st_nci_i2c_acpi_match);
-static const struct of_device_id of_st_nci_i2c_match[] = {
+static const struct of_device_id of_st_nci_i2c_match[] __maybe_unused = {
{ .compatible = "st,st21nfcb-i2c", },
{ .compatible = "st,st21nfcb_i2c", },
{ .compatible = "st,st21nfcc-i2c", },
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 1cba8f69d3ae..5fd89f72969d 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -470,8 +470,6 @@ int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx)
{
int r;
- pr_debug("st_nci_disable_se\n");
-
/*
* According to upper layer, se_idx == NFC_SE_UICC when
* info->se_info.se_status->is_uicc_enable is true should never happen
@@ -496,8 +494,6 @@ int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
{
int r;
- pr_debug("st_nci_enable_se\n");
-
/*
* According to upper layer, se_idx == NFC_SE_UICC when
* info->se_info.se_status->is_uicc_enable is true should never happen.
@@ -534,10 +530,8 @@ static int st_nci_hci_network_init(struct nci_dev *ndev)
dest_params =
kzalloc(sizeof(struct core_conn_create_dest_spec_params) +
sizeof(struct dest_spec_params), GFP_KERNEL);
- if (dest_params == NULL) {
- r = -ENOMEM;
- goto exit;
- }
+ if (dest_params == NULL)
+ return -ENOMEM;
dest_params->type = NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE;
dest_params->length = sizeof(struct dest_spec_params);
@@ -594,8 +588,6 @@ static int st_nci_hci_network_init(struct nci_dev *ndev)
free_dest_params:
kfree(dest_params);
-
-exit:
return r;
}
@@ -606,8 +598,6 @@ int st_nci_discover_se(struct nci_dev *ndev)
int se_count = 0;
struct st_nci_info *info = nci_get_drvdata(ndev);
- pr_debug("st_nci_discover_se\n");
-
r = st_nci_hci_network_init(ndev);
if (r != 0)
return r;
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 09df6ea65840..250d56f204c3 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -216,9 +216,6 @@ static int st_nci_spi_probe(struct spi_device *dev)
struct st_nci_spi_phy *phy;
int r;
- dev_dbg(&dev->dev, "%s\n", __func__);
- dev_dbg(&dev->dev, "IRQ: %d\n", dev->irq);
-
/* Check SPI platform functionnalities */
if (!dev) {
pr_debug("%s: dev is NULL. Device is not accessible.\n",
@@ -274,8 +271,6 @@ static int st_nci_spi_remove(struct spi_device *dev)
{
struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
- dev_dbg(&dev->dev, "%s\n", __func__);
-
ndlc_remove(phy->ndlc);
return 0;
@@ -287,13 +282,13 @@ static struct spi_device_id st_nci_spi_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
-static const struct acpi_device_id st_nci_spi_acpi_match[] = {
+static const struct acpi_device_id st_nci_spi_acpi_match[] __maybe_unused = {
{"SMO2101", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, st_nci_spi_acpi_match);
-static const struct of_device_id of_st_nci_spi_match[] = {
+static const struct of_device_id of_st_nci_spi_match[] __maybe_unused = {
{ .compatible = "st,st21nfcb-spi", },
{}
};
diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c
index c6a9d30a4dba..94b600029a2a 100644
--- a/drivers/nfc/st-nci/vendor_cmds.c
+++ b/drivers/nfc/st-nci/vendor_cmds.c
@@ -98,7 +98,7 @@ static int st_nci_hci_dm_get_info(struct nfc_dev *dev, void *data,
r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_GETINFO,
data, data_len, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_DM_GET_INFO, skb->len);
@@ -117,7 +117,6 @@ static int st_nci_hci_dm_get_info(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
@@ -131,7 +130,7 @@ static int st_nci_hci_dm_get_data(struct nfc_dev *dev, void *data,
r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_GETDATA,
data, data_len, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_DM_GET_DATA, skb->len);
@@ -150,7 +149,6 @@ static int st_nci_hci_dm_get_data(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
@@ -216,7 +214,7 @@ static int st_nci_hci_get_param(struct nfc_dev *dev, void *data,
r = nci_hci_get_param(ndev, param->gate, param->data, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_GET_PARAM, skb->len);
@@ -235,7 +233,6 @@ static int st_nci_hci_get_param(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
@@ -262,7 +259,7 @@ static int st_nci_hci_dm_vdc_measurement_value(struct nfc_dev *dev, void *data,
ST_NCI_HCI_DM_VDC_MEASUREMENT_VALUE,
data, data_len, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_DM_VDC_MEASUREMENT_VALUE, skb->len);
@@ -281,7 +278,6 @@ static int st_nci_hci_dm_vdc_measurement_value(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
@@ -299,7 +295,7 @@ static int st_nci_hci_dm_vdc_value_comparison(struct nfc_dev *dev, void *data,
ST_NCI_HCI_DM_VDC_VALUE_COMPARISON,
data, data_len, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_DM_VDC_VALUE_COMPARISON, skb->len);
@@ -318,7 +314,6 @@ static int st_nci_hci_dm_vdc_value_comparison(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 8874d605b14f..1ec651e31064 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -196,38 +196,29 @@ static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev,
skb_trim(skb, skb->len - 1);
- if (!skb->len) {
- r = -EIO;
- goto exit;
- }
+ if (!skb->len)
+ return -EIO;
- if (skb->len < ST21NFCA_ATR_REQ_MIN_SIZE) {
- r = -EPROTO;
- goto exit;
- }
+ if (skb->len < ST21NFCA_ATR_REQ_MIN_SIZE)
+ return -EPROTO;
atr_req = (struct st21nfca_atr_req *)skb->data;
- if (atr_req->length < sizeof(struct st21nfca_atr_req)) {
- r = -EPROTO;
- goto exit;
- }
+ if (atr_req->length < sizeof(struct st21nfca_atr_req))
+ return -EPROTO;
r = st21nfca_tm_send_atr_res(hdev, atr_req);
if (r)
- goto exit;
+ return r;
gb_len = skb->len - sizeof(struct st21nfca_atr_req);
r = nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
NFC_COMM_PASSIVE, atr_req->gbi, gb_len);
if (r)
- goto exit;
-
- r = 0;
+ return r;
-exit:
- return r;
+ return 0;
}
static int st21nfca_tm_send_psl_res(struct nfc_hci_dev *hdev,
@@ -280,25 +271,18 @@ static int st21nfca_tm_recv_psl_req(struct nfc_hci_dev *hdev,
struct sk_buff *skb)
{
struct st21nfca_psl_req *psl_req;
- int r;
skb_trim(skb, skb->len - 1);
- if (!skb->len) {
- r = -EIO;
- goto exit;
- }
+ if (!skb->len)
+ return -EIO;
psl_req = (struct st21nfca_psl_req *)skb->data;
- if (skb->len < sizeof(struct st21nfca_psl_req)) {
- r = -EIO;
- goto exit;
- }
+ if (skb->len < sizeof(struct st21nfca_psl_req))
+ return -EIO;
- r = st21nfca_tm_send_psl_res(hdev, psl_req);
-exit:
- return r;
+ return st21nfca_tm_send_psl_res(hdev, psl_req);
}
int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb)
@@ -324,7 +308,6 @@ static int st21nfca_tm_recv_dep_req(struct nfc_hci_dev *hdev,
{
struct st21nfca_dep_req_res *dep_req;
u8 size;
- int r;
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
skb_trim(skb, skb->len - 1);
@@ -332,20 +315,16 @@ static int st21nfca_tm_recv_dep_req(struct nfc_hci_dev *hdev,
size = 4;
dep_req = (struct st21nfca_dep_req_res *)skb->data;
- if (skb->len < size) {
- r = -EIO;
- goto exit;
- }
+ if (skb->len < size)
+ return -EIO;
if (ST21NFCA_NFC_DEP_DID_BIT_SET(dep_req->pfb))
size++;
if (ST21NFCA_NFC_DEP_NAD_BIT_SET(dep_req->pfb))
size++;
- if (skb->len < size) {
- r = -EIO;
- goto exit;
- }
+ if (skb->len < size)
+ return -EIO;
/* Receiving DEP_REQ - Decoding */
switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_req->pfb)) {
@@ -364,8 +343,6 @@ static int st21nfca_tm_recv_dep_req(struct nfc_hci_dev *hdev,
skb_pull(skb, size);
return nfc_tm_data_received(hdev->ndev, skb);
-exit:
- return r;
}
static int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev,
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 23ed11f91213..7a9f4d71707e 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -502,9 +502,6 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
struct st21nfca_i2c_phy *phy;
int r;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
@@ -568,8 +565,6 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
{
struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
st21nfca_hci_remove(phy->hdev);
if (phy->powered)
@@ -584,13 +579,13 @@ static const struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table);
-static const struct acpi_device_id st21nfca_hci_i2c_acpi_match[] = {
+static const struct acpi_device_id st21nfca_hci_i2c_acpi_match[] __maybe_unused = {
{"SMO2100", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, st21nfca_hci_i2c_acpi_match);
-static const struct of_device_id of_st21nfca_i2c_match[] = {
+static const struct of_device_id of_st21nfca_i2c_match[] __maybe_unused = {
{ .compatible = "st,st21nfca-i2c", },
{ .compatible = "st,st21nfca_i2c", },
{}
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 457854765983..2dc788c363fd 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -926,10 +926,8 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev,
int len_data_to_tag = 0;
skb_resp = nfc_alloc_recv_skb(MAX_RESPONSE_BUFFER_SIZE, GFP_KERNEL);
- if (!skb_resp) {
- rc = -ENOMEM;
- goto error;
- }
+ if (!skb_resp)
+ return -ENOMEM;
switch (stcontext->current_rf_tech) {
case NFC_DIGITAL_RF_TECH_106A:
@@ -986,7 +984,6 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev,
free_skb_resp:
kfree_skb(skb_resp);
-error:
return rc;
}
@@ -1059,9 +1056,9 @@ static const struct spi_device_id st95hf_id[] = {
};
MODULE_DEVICE_TABLE(spi, st95hf_id);
-static const struct of_device_id st95hf_spi_of_match[] = {
- { .compatible = "st,st95hf" },
- { },
+static const struct of_device_id st95hf_spi_of_match[] __maybe_unused = {
+ { .compatible = "st,st95hf" },
+ {},
};
MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 7b9556291eb1..088d3dd6f6fa 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -228,49 +228,34 @@ static const struct block_device_operations nd_blk_fops = {
.submit_bio = nd_blk_submit_bio,
};
-static void nd_blk_release_queue(void *q)
-{
- blk_cleanup_queue(q);
-}
-
static void nd_blk_release_disk(void *disk)
{
del_gendisk(disk);
- put_disk(disk);
+ blk_cleanup_disk(disk);
}
static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
{
struct device *dev = &nsblk->common.dev;
resource_size_t available_disk_size;
- struct request_queue *q;
struct gendisk *disk;
u64 internal_nlba;
internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
- q = blk_alloc_queue(NUMA_NO_NODE);
- if (!q)
- return -ENOMEM;
- if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
- return -ENOMEM;
-
- blk_queue_max_hw_sectors(q, UINT_MAX);
- blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-
- disk = alloc_disk(0);
+ disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
return -ENOMEM;
- disk->first_minor = 0;
disk->fops = &nd_blk_fops;
- disk->queue = q;
- disk->flags = GENHD_FL_EXT_DEVT;
disk->private_data = nsblk;
nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
+ blk_queue_max_hw_sectors(disk->queue, UINT_MAX);
+ blk_queue_logical_block_size(disk->queue, nsblk_sector_size(nsblk));
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+
if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
return -ENOMEM;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 18a267d5073f..92dec4952297 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1521,35 +1521,25 @@ static int btt_blk_init(struct btt *btt)
struct nd_btt *nd_btt = btt->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
- /* create a new disk and request queue for btt */
- btt->btt_queue = blk_alloc_queue(NUMA_NO_NODE);
- if (!btt->btt_queue)
+ btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE);
+ if (!btt->btt_disk)
return -ENOMEM;
- btt->btt_disk = alloc_disk(0);
- if (!btt->btt_disk) {
- blk_cleanup_queue(btt->btt_queue);
- return -ENOMEM;
- }
-
nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
btt->btt_disk->first_minor = 0;
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
- btt->btt_disk->queue = btt->btt_queue;
- btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
- blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
- blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
+ blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
+ blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
if (btt_meta_size(btt)) {
int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
if (rc) {
del_gendisk(btt->btt_disk);
- put_disk(btt->btt_disk);
- blk_cleanup_queue(btt->btt_queue);
+ blk_cleanup_disk(btt->btt_disk);
return rc;
}
}
@@ -1564,8 +1554,7 @@ static int btt_blk_init(struct btt *btt)
static void btt_blk_cleanup(struct btt *btt)
{
del_gendisk(btt->btt_disk);
- put_disk(btt->btt_disk);
- blk_cleanup_queue(btt->btt_queue);
+ blk_cleanup_disk(btt->btt_disk);
}
/**
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index aa53e0b769bd..0c76c0333f6e 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -201,7 +201,6 @@ struct badblocks;
/**
* struct btt - handle for a BTT instance
* @btt_disk: Pointer to the gendisk for BTT device
- * @btt_queue: Pointer to the request queue for the BTT device
* @arena_list: Head of the list of arenas
* @debugfs_dir: Debugfs dentry
* @nd_btt: Parent nd_btt struct
@@ -219,7 +218,6 @@ struct badblocks;
*/
struct btt {
struct gendisk *btt_disk;
- struct request_queue *btt_queue;
struct list_head arena_list;
struct dentry *debugfs_dir;
struct nd_btt *nd_btt;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ed10a8b66068..1e0615b8565e 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -335,10 +335,9 @@ static const struct attribute_group *pmem_attribute_groups[] = {
static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
{
- struct request_queue *q =
- container_of(pgmap->ref, struct request_queue, q_usage_counter);
+ struct pmem_device *pmem = pgmap->owner;
- blk_cleanup_queue(q);
+ blk_cleanup_disk(pmem->disk);
}
static void pmem_release_queue(void *pgmap)
@@ -361,7 +360,6 @@ static void pmem_release_disk(void *__pmem)
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
- put_disk(pmem->disk);
}
static const struct dev_pagemap_ops fsdax_pagemap_ops = {
@@ -422,10 +420,13 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY;
}
- q = blk_alloc_queue(dev_to_node(dev));
- if (!q)
+ disk = blk_alloc_disk(nid);
+ if (!disk)
return -ENOMEM;
+ q = disk->queue;
+ pmem->disk = disk;
+ pmem->pgmap.owner = pmem;
pmem->pfn_flags = PFN_DEV;
pmem->pgmap.ref = &q->q_usage_counter;
if (is_nd_pfn(dev)) {
@@ -470,14 +471,7 @@ static int pmem_attach_disk(struct device *dev,
if (pmem->pfn_flags & PFN_MAP)
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
- disk = alloc_disk_node(0, nid);
- if (!disk)
- return -ENOMEM;
- pmem->disk = disk;
-
disk->fops = &pmem_fops;
- disk->queue = q;
- disk->flags = GENHD_FL_EXT_DEVT;
disk->private_data = pmem;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
@@ -491,7 +485,6 @@ static int pmem_attach_disk(struct device *dev,
flags = DAXDEV_F_SYNC;
dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
if (IS_ERR(dax_dev)) {
- put_disk(disk);
return PTR_ERR(dax_dev);
}
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index a44d49d63968..c3f3d77f1aac 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -21,7 +21,7 @@ config NVME_MULTIPATH
help
This option enables support for multipath access to NVMe
subsystems. If this option is enabled only a single
- /dev/nvmeXnY device will show up for each NVMe namespaces,
+ /dev/nvmeXnY device will show up for each NVMe namespace,
even if it is accessible through multiple controllers.
config NVME_HWMON
@@ -71,7 +71,8 @@ config NVME_FC
config NVME_TCP
tristate "NVM Express over Fabrics TCP host driver"
depends on INET
- depends on BLK_DEV_NVME
+ depends on BLOCK
+ select NVME_CORE
select NVME_FABRICS
select CRYPTO
select CRYPTO_CRC32C
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 762125f2905f..80c656dcbbac 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -57,6 +57,26 @@ static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
+static unsigned long apst_primary_timeout_ms = 100;
+module_param(apst_primary_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(apst_primary_timeout_ms,
+ "primary APST timeout in ms");
+
+static unsigned long apst_secondary_timeout_ms = 2000;
+module_param(apst_secondary_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(apst_secondary_timeout_ms,
+ "secondary APST timeout in ms");
+
+static unsigned long apst_primary_latency_tol_us = 15000;
+module_param(apst_primary_latency_tol_us, ulong, 0644);
+MODULE_PARM_DESC(apst_primary_latency_tol_us,
+ "primary APST latency tolerance in us");
+
+static unsigned long apst_secondary_latency_tol_us = 100000;
+module_param(apst_secondary_latency_tol_us, ulong, 0644);
+MODULE_PARM_DESC(apst_secondary_latency_tol_us,
+ "secondary APST latency tolerance in us");
+
static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
@@ -701,9 +721,7 @@ EXPORT_SYMBOL_GPL(__nvme_check_ready);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
+ struct nvme_command c = { };
c.directive.opcode = nvme_admin_directive_send;
c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
@@ -728,9 +746,8 @@ static int nvme_enable_streams(struct nvme_ctrl *ctrl)
static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
struct streams_directive_params *s, u32 nsid)
{
- struct nvme_command c;
+ struct nvme_command c = { };
- memset(&c, 0, sizeof(c));
memset(s, 0, sizeof(*s));
c.directive.opcode = nvme_admin_directive_recv;
@@ -1440,10 +1457,9 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen, u32 *result)
{
union nvme_result res = { 0 };
- struct nvme_command c;
+ struct nvme_command c = { };
int ret;
- memset(&c, 0, sizeof(c));
c.features.opcode = op;
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
@@ -1522,36 +1538,6 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work);
}
-/*
- * Issue ioctl requests on the first available path. Note that unlike normal
- * block layer requests we will not retry failed request on another controller.
- */
-struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
- struct nvme_ns_head **head, int *srcu_idx)
-{
-#ifdef CONFIG_NVME_MULTIPATH
- if (disk->fops == &nvme_ns_head_ops) {
- struct nvme_ns *ns;
-
- *head = disk->private_data;
- *srcu_idx = srcu_read_lock(&(*head)->srcu);
- ns = nvme_find_path(*head);
- if (!ns)
- srcu_read_unlock(&(*head)->srcu, *srcu_idx);
- return ns;
- }
-#endif
- *head = NULL;
- *srcu_idx = -1;
- return disk->private_data;
-}
-
-void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
-{
- if (head)
- srcu_read_unlock(&head->srcu, idx);
-}
-
static int nvme_ns_open(struct nvme_ns *ns)
{
@@ -1601,9 +1587,8 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
u32 max_integrity_segments)
{
- struct blk_integrity integrity;
+ struct blk_integrity integrity = { };
- memset(&integrity, 0, sizeof(integrity));
switch (pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
@@ -1948,30 +1933,45 @@ static char nvme_pr_type(enum pr_type type)
}
};
+static int nvme_send_ns_head_pr_command(struct block_device *bdev,
+ struct nvme_command *c, u8 data[16])
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EWOULDBLOCK;
+
+ if (ns) {
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
+ u8 data[16])
+{
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ return nvme_submit_sync_cmd(ns->queue, c, data, 16);
+}
+
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
u64 key, u64 sa_key, u8 op)
{
- struct nvme_ns_head *head = NULL;
- struct nvme_ns *ns;
- struct nvme_command c;
- int srcu_idx, ret;
+ struct nvme_command c = { };
u8 data[16] = { 0, };
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- return -EWOULDBLOCK;
-
put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]);
- memset(&c, 0, sizeof(c));
c.common.opcode = op;
- c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10 = cpu_to_le32(cdw10);
- ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
- nvme_put_ns_from_disk(head, srcu_idx);
- return ret;
+ if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
+ bdev->bd_disk->fops == &nvme_ns_head_ops)
+ return nvme_send_ns_head_pr_command(bdev, &c, data);
+ return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
}
static int nvme_pr_register(struct block_device *bdev, u64 old,
@@ -2036,9 +2036,8 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send)
{
struct nvme_ctrl *ctrl = data;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
- memset(&cmd, 0, sizeof(cmd));
if (send)
cmd.common.opcode = nvme_admin_security_send;
else
@@ -2053,6 +2052,17 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
+#ifdef CONFIG_BLK_DEV_ZONED
+static int nvme_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
+ data);
+}
+#else
+#define nvme_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
+
static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
@@ -2218,13 +2228,53 @@ static int nvme_configure_acre(struct nvme_ctrl *ctrl)
}
/*
+ * The function checks whether the given total (exlat + enlat) latency of
+ * a power state allows the latter to be used as an APST transition target.
+ * It does so by comparing the latency to the primary and secondary latency
+ * tolerances defined by module params. If there's a match, the corresponding
+ * timeout value is returned and the matching tolerance index (1 or 2) is
+ * reported.
+ */
+static bool nvme_apst_get_transition_time(u64 total_latency,
+ u64 *transition_time, unsigned *last_index)
+{
+ if (total_latency <= apst_primary_latency_tol_us) {
+ if (*last_index == 1)
+ return false;
+ *last_index = 1;
+ *transition_time = apst_primary_timeout_ms;
+ return true;
+ }
+ if (apst_secondary_timeout_ms &&
+ total_latency <= apst_secondary_latency_tol_us) {
+ if (*last_index <= 2)
+ return false;
+ *last_index = 2;
+ *transition_time = apst_secondary_timeout_ms;
+ return true;
+ }
+ return false;
+}
+
+/*
* APST (Autonomous Power State Transition) lets us program a table of power
* state transitions that the controller will perform automatically.
- * We configure it with a simple heuristic: we are willing to spend at most 2%
- * of the time transitioning between power states. Therefore, when running in
- * any given state, we will enter the next lower-power non-operational state
- * after waiting 50 * (enlat + exlat) microseconds, as long as that state's exit
- * latency is under the requested maximum latency.
+ *
+ * Depending on module params, one of the two supported techniques will be used:
+ *
+ * - If the parameters provide explicit timeouts and tolerances, they will be
+ * used to build a table with up to 2 non-operational states to transition to.
+ * The default parameter values were selected based on the values used by
+ * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
+ * regeneration of the APST table in the event of switching between external
+ * and battery power, the timeouts and tolerances reflect a compromise
+ * between values used by Microsoft for AC and battery scenarios.
+ * - If not, we'll configure the table with a simple heuristic: we are willing
+ * to spend at most 2% of the time transitioning between power states.
+ * Therefore, when running in any given state, we will enter the next
+ * lower-power non-operational state after waiting 50 * (enlat + exlat)
+ * microseconds, as long as that state's exit latency is under the requested
+ * maximum latency.
*
* We will not autonomously enter any non-operational state for which the total
* latency exceeds ps_max_latency_us.
@@ -2240,6 +2290,7 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl)
int max_ps = -1;
int state;
int ret;
+ unsigned last_lt_index = UINT_MAX;
/*
* If APST isn't supported or if we haven't been initialized yet,
@@ -2298,13 +2349,19 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl)
le32_to_cpu(ctrl->psd[state].entry_lat);
/*
- * This state is good. Use it as the APST idle target for
- * higher power states.
+ * This state is good. It can be used as the APST idle target
+ * for higher power states.
*/
- transition_ms = total_latency_us + 19;
- do_div(transition_ms, 20);
- if (transition_ms > (1 << 24) - 1)
- transition_ms = (1 << 24) - 1;
+ if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
+ if (!nvme_apst_get_transition_time(total_latency_us,
+ &transition_ms, &last_lt_index))
+ continue;
+ } else {
+ transition_ms = total_latency_us + 19;
+ do_div(transition_ms, 20);
+ if (transition_ms > (1 << 24) - 1)
+ transition_ms = (1 << 24) - 1;
+ }
target = cpu_to_le64((state << 3) | (transition_ms << 8));
if (max_ps == -1)
@@ -3485,8 +3542,10 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
cdev_init(cdev, fops);
cdev->owner = owner;
ret = cdev_device_add(cdev, cdev_device);
- if (ret)
+ if (ret) {
+ put_device(cdev_device);
ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
+ }
return ret;
}
@@ -3699,7 +3758,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
disk->fops = &nvme_bdev_ops;
disk->private_data = ns;
disk->queue = ns->queue;
- disk->flags = GENHD_FL_EXT_DEVT;
/*
* Without the multipath code enabled, multiple controller per
* subsystems are visible as devices and thus we cannot use the
@@ -4067,6 +4125,11 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
opts->host_traddr ?: "none");
+ if (ret)
+ return ret;
+
+ ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
+ opts->host_iface ?: "none");
}
return ret;
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index a2bb7fc63a73..1e6a7cc056ca 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -112,6 +112,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
(len) ? "," : "", ctrl->opts->host_traddr);
+ if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
+ len += scnprintf(buf + len, size - len, "%shost_iface=%s",
+ (len) ? "," : "", ctrl->opts->host_iface);
len += scnprintf(buf + len, size - len, "\n");
return len;
@@ -187,11 +190,10 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
*/
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_get.opcode = nvme_fabrics_command;
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.attrib = 1;
@@ -233,10 +235,9 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read64);
*/
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_set.opcode = nvme_fabrics_command;
cmd.prop_set.fctype = nvme_fabrics_type_property_set;
cmd.prop_set.attrib = 0;
@@ -254,28 +255,23 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
EXPORT_SYMBOL_GPL(nvmf_reg_write32);
/**
- * nvmf_log_connect_error() - Error-parsing-diagnostic print
- * out function for connect() errors.
- *
- * @ctrl: the specific /dev/nvmeX device that had the error.
- *
- * @errval: Error code to be decoded in a more human-friendly
- * printout.
- *
- * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
- *
- * @cmd: This is the SQE portion of a submission capsule.
- *
- * @data: This is the "Data" portion of a submission capsule.
+ * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
+ * connect() errors.
+ * @ctrl: The specific /dev/nvmeX device that had the error.
+ * @errval: Error code to be decoded in a more human-friendly
+ * printout.
+ * @offset: For use with the NVMe error code
+ * NVME_SC_CONNECT_INVALID_PARAM.
+ * @cmd: This is the SQE portion of a submission capsule.
+ * @data: This is the "Data" portion of a submission capsule.
*/
static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int errval, int offset, struct nvme_command *cmd,
struct nvmf_connect_data *data)
{
- int err_sctype = errval & (~NVME_SC_DNR);
+ int err_sctype = errval & ~NVME_SC_DNR;
switch (err_sctype) {
-
case (NVME_SC_CONNECT_INVALID_PARAM):
if (offset >> 16) {
char *inv_data = "Connect Invalid Data Parameter";
@@ -318,30 +314,30 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
}
}
break;
-
case NVME_SC_CONNECT_INVALID_HOST:
dev_err(ctrl->device,
"Connect for subsystem %s is not allowed, hostnqn: %s\n",
data->subsysnqn, data->hostnqn);
break;
-
case NVME_SC_CONNECT_CTRL_BUSY:
dev_err(ctrl->device,
"Connect command failed: controller is busy or not available\n");
break;
-
case NVME_SC_CONNECT_FORMAT:
dev_err(ctrl->device,
"Connect incompatible format: %d",
cmd->connect.recfmt);
break;
-
+ case NVME_SC_HOST_PATH_ERROR:
+ dev_err(ctrl->device,
+ "Connect command failed: host path error\n");
+ break;
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
err_sctype);
break;
- } /* switch (err_sctype) */
+ }
}
/**
@@ -366,12 +362,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
*/
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
struct nvmf_connect_data *data;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = 0;
@@ -434,12 +429,11 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
*/
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
struct nvmf_connect_data *data;
union nvme_result res;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = cpu_to_le16(qid);
@@ -545,6 +539,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_KATO, "keep_alive_tmo=%d" },
{ NVMF_OPT_HOSTNQN, "hostnqn=%s" },
{ NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
+ { NVMF_OPT_HOST_IFACE, "host_iface=%s" },
{ NVMF_OPT_HOST_ID, "hostid=%s" },
{ NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
{ NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
@@ -754,6 +749,15 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
kfree(opts->host_traddr);
opts->host_traddr = p;
break;
+ case NVMF_OPT_HOST_IFACE:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->host_iface);
+ opts->host_iface = p;
+ break;
case NVMF_OPT_HOST_ID:
p = match_strdup(args);
if (!p) {
@@ -938,6 +942,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->trsvcid);
kfree(opts->subsysnqn);
kfree(opts->host_traddr);
+ kfree(opts->host_iface);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index d7f7974dc208..c31dad69a773 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -66,6 +66,7 @@ enum {
NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
NVMF_OPT_TOS = 1 << 19,
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
+ NVMF_OPT_HOST_IFACE = 1 << 21,
};
/**
@@ -83,7 +84,9 @@ enum {
* @trsvcid: The transport-specific TRSVCID field for a port on the
* subsystem which is adding a controller.
* @host_traddr: A transport-specific field identifying the NVME host port
- * to use for the connection to the controller.
+ * to use for the connection to the controller.
+ * @host_iface: A transport-specific field identifying the NVME host
+ * interface to use for the connection to the controller.
* @queue_size: Number of IO queue elements.
* @nr_io_queues: Number of controller IO queues that will be established.
* @reconnect_delay: Time between two consecutive reconnect attempts.
@@ -108,6 +111,7 @@ struct nvmf_ctrl_options {
char *traddr;
char *trsvcid;
char *host_traddr;
+ char *host_iface;
size_t queue_size;
unsigned int nr_io_queues;
unsigned int reconnect_delay;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 256e87721a01..7600863f7752 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3107,13 +3107,15 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff);
+ ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue;
}
/* FC-NVME supports normal SGL Data Block Descriptors */
- if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
+ if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
+ ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue;
}
@@ -3280,11 +3282,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
return;
- if (portptr->port_state == FC_OBJSTATE_ONLINE)
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
ctrl->cnum, status);
- else if (time_after_eq(jiffies, rport->dev_loss_end))
+ if (status > 0 && (status & NVME_SC_DNR))
+ recon = false;
+ } else if (time_after_eq(jiffies, rport->dev_loss_end))
recon = false;
if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
@@ -3298,12 +3302,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else {
- if (portptr->port_state == FC_OBJSTATE_ONLINE)
- dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: Max reconnect attempts (%d) "
- "reached.\n",
- ctrl->cnum, ctrl->ctrl.nr_reconnects);
- else
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+ if (status > 0 && (status & NVME_SC_DNR))
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: reconnect failure\n",
+ ctrl->cnum);
+ else
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: Max reconnect attempts "
+ "(%d) reached.\n",
+ ctrl->cnum, ctrl->ctrl.nr_reconnects);
+ } else
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: dev_loss_tmo (%d) expired "
"while waiting for remoteport connectivity.\n",
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 9557ead02de1..d93928d1e5bd 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -177,6 +177,20 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
+static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
+ struct nvme_ns *ns, __u32 nsid)
+{
+ if (ns && nsid != ns->head->ns_id) {
+ dev_err(ctrl->device,
+ "%s: nsid (%u) in cmd does not match nsid (%u)"
+ "of namespace\n",
+ current->comm, nsid, ns->head->ns_id);
+ return false;
+ }
+
+ return true;
+}
+
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
{
@@ -192,12 +206,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return -EFAULT;
if (cmd.flags)
return -EINVAL;
- if (ns && cmd.nsid != ns->head->ns_id) {
- dev_err(ctrl->device,
- "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
- current->comm, cmd.nsid, ns->head->ns_id);
+ if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
return -EINVAL;
- }
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
@@ -242,12 +252,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return -EFAULT;
if (cmd.flags)
return -EINVAL;
- if (ns && cmd.nsid != ns->head->ns_id) {
- dev_err(ctrl->device,
- "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
- current->comm, cmd.nsid, ns->head->ns_id);
+ if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
return -EINVAL;
- }
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
@@ -372,12 +378,13 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
+ __releases(&head->srcu)
{
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
nvme_get_ctrl(ns->ctrl);
- nvme_put_ns_from_disk(head, srcu_idx);
+ srcu_read_unlock(&head->srcu, srcu_idx);
ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
nvme_put_ctrl(ctrl);
@@ -387,14 +394,15 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct nvme_ns_head *head = NULL;
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
void __user *argp = (void __user *)arg;
struct nvme_ns *ns;
- int srcu_idx, ret;
+ int srcu_idx, ret = -EWOULDBLOCK;
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- return -EWOULDBLOCK;
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (!ns)
+ goto out_unlock;
/*
* Handle ioctls that apply to the controller instead of the namespace
@@ -402,12 +410,11 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
* deadlock when deleting namespaces using the passthrough interface.
*/
if (is_ctrl_ioctl(cmd))
- ret = nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
- else {
- ret = nvme_ns_ioctl(ns, cmd, argp);
- nvme_put_ns_from_disk(head, srcu_idx);
- }
+ return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
+ ret = nvme_ns_ioctl(ns, cmd, argp);
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
@@ -419,21 +426,19 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
container_of(cdev, struct nvme_ns_head, cdev);
void __user *argp = (void __user *)arg;
struct nvme_ns *ns;
- int srcu_idx, ret;
+ int srcu_idx, ret = -EWOULDBLOCK;
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
- if (!ns) {
- srcu_read_unlock(&head->srcu, srcu_idx);
- return -EWOULDBLOCK;
- }
+ if (!ns)
+ goto out_unlock;
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
ret = nvme_ns_ioctl(ns, cmd, argp);
- nvme_put_ns_from_disk(head, srcu_idx);
-
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
#endif /* CONFIG_NVME_MULTIPATH */
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index f81871c7128a..0ea5298469c3 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -349,6 +349,25 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
nvme_put_ns_head(disk->private_data);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_ns_head *head = disk->private_data;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (ns)
+ ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+#else
+#define nvme_ns_head_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
+
const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE,
.submit_bio = nvme_ns_head_submit_bio,
@@ -356,7 +375,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl,
.getgeo = nvme_getgeo,
- .report_zones = nvme_report_zones,
+ .report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -416,18 +435,12 @@ static void nvme_requeue_work(struct work_struct *work)
next = bio->bi_next;
bio->bi_next = NULL;
- /*
- * Reset disk to the mpath node and resubmit to select a new
- * path.
- */
- bio_set_dev(bio, head->disk->part0);
submit_bio_noacct(bio);
}
}
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{
- struct request_queue *q;
bool vwc = false;
mutex_init(&head->lock);
@@ -443,34 +456,24 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
return 0;
- q = blk_alloc_queue(ctrl->numa_node);
- if (!q)
- goto out;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- /* set to a default value for 512 until disk is validated */
- blk_queue_logical_block_size(q, 512);
- blk_set_stacking_limits(&q->limits);
-
- /* we need to propagate up the VMC settings */
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- vwc = true;
- blk_queue_write_cache(q, vwc, vwc);
-
- head->disk = alloc_disk(0);
+ head->disk = blk_alloc_disk(ctrl->numa_node);
if (!head->disk)
- goto out_cleanup_queue;
+ return -ENOMEM;
head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head;
- head->disk->queue = q;
- head->disk->flags = GENHD_FL_EXT_DEVT;
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
- return 0;
-out_cleanup_queue:
- blk_cleanup_queue(q);
-out:
- return -ENOMEM;
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
+ /* set to a default value of 512 until the disk is validated */
+ blk_queue_logical_block_size(head->disk->queue, 512);
+ blk_set_stacking_limits(&head->disk->queue->limits);
+
+ /* we need to propagate up the VMC settings */
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ vwc = true;
+ blk_queue_write_cache(head->disk->queue, vwc, vwc);
+ return 0;
}
static void nvme_mpath_set_live(struct nvme_ns *ns)
@@ -769,16 +772,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
- blk_cleanup_queue(head->disk->queue);
- if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
- /*
- * if device_add_disk wasn't called, prevent
- * disk release to put a bogus reference on the
- * request queue
- */
- head->disk->queue = NULL;
- }
- put_disk(head->disk);
+ blk_cleanup_disk(head->disk);
}
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
@@ -799,6 +793,13 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
!(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
return 0;
+ if (!ctrl->max_namespaces ||
+ ctrl->max_namespaces > le32_to_cpu(id->nn)) {
+ dev_err(ctrl->device,
+ "Invalid MNAN value %u\n", ctrl->max_namespaces);
+ return -EINVAL;
+ }
+
ctrl->anacap = id->anacap;
ctrl->anatt = id->anatt;
ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0015860ec12b..75420ceacc10 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -674,9 +674,6 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset);
-struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
- struct nvme_ns_head **head, int *srcu_idx);
-void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head);
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
@@ -697,6 +694,7 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
+struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
#ifdef CONFIG_NVME_MULTIPATH
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
@@ -718,7 +716,6 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
-struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
@@ -810,17 +807,14 @@ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns);
+int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
#ifdef CONFIG_BLK_DEV_ZONED
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
-int nvme_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
-
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action);
#else
-#define nvme_report_zones NULL
-
static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action)
@@ -875,6 +869,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
}
#endif
+static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
+{
+ return ctrl->sgls & ((1 << 0) | (1 << 1));
+}
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
void nvme_execute_passthru_rq(struct request *rq);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a29b170701fc..d3c5086673bc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -307,13 +307,12 @@ static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
static void nvme_dbbuf_set(struct nvme_dev *dev)
{
- struct nvme_command c;
+ struct nvme_command c = { };
unsigned int i;
if (!dev->dbbuf_dbs)
return;
- memset(&c, 0, sizeof(c));
c.dbbuf.opcode = nvme_admin_dbbuf;
c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
@@ -536,7 +535,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
- if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+ if (!nvme_ctrl_sgl_supported(&dev->ctrl))
return false;
if (!iod->nvmeq->qid)
return false;
@@ -559,7 +558,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
dma_addr = next_dma_addr;
}
-
}
static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
@@ -576,7 +574,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
dma_addr = next_dma_addr;
}
-
}
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
@@ -855,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
&cmnd->rw, &bv);
if (iod->nvmeq->qid && sgl_threshold &&
- dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+ nvme_ctrl_sgl_supported(&dev->ctrl))
return nvme_setup_sgl_simple(dev, req,
&cmnd->rw, &bv);
}
@@ -1032,7 +1029,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
- u16 tmp = nvmeq->cq_head + 1;
+ u32 tmp = nvmeq->cq_head + 1;
if (tmp == nvmeq->q_depth) {
nvmeq->cq_head = 0;
@@ -1114,9 +1111,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = &dev->queues[0];
- struct nvme_command c;
+ struct nvme_command c = { };
- memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
nvme_submit_cmd(nvmeq, &c, true);
@@ -1124,9 +1120,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
- struct nvme_command c;
+ struct nvme_command c = { };
- memset(&c, 0, sizeof(c));
c.delete_queue.opcode = opcode;
c.delete_queue.qid = cpu_to_le16(id);
@@ -1136,7 +1131,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq, s16 vector)
{
- struct nvme_command c;
+ struct nvme_command c = { };
int flags = NVME_QUEUE_PHYS_CONTIG;
if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
@@ -1146,7 +1141,6 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
* Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
- memset(&c, 0, sizeof(c));
c.create_cq.opcode = nvme_admin_create_cq;
c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
c.create_cq.cqid = cpu_to_le16(qid);
@@ -1161,7 +1155,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq)
{
struct nvme_ctrl *ctrl = &dev->ctrl;
- struct nvme_command c;
+ struct nvme_command c = { };
int flags = NVME_QUEUE_PHYS_CONTIG;
/*
@@ -1176,7 +1170,6 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
* Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
- memset(&c, 0, sizeof(c));
c.create_sq.opcode = nvme_admin_create_sq;
c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
c.create_sq.sqid = cpu_to_le16(qid);
@@ -1257,7 +1250,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_queue *nvmeq = iod->nvmeq;
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
u32 csts = readl(dev->bar + NVME_REG_CSTS);
/* If PCI error recovery process is happening, we cannot reset or
@@ -1337,7 +1330,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
iod->aborted = 1;
- memset(&cmd, 0, sizeof(cmd));
cmd.abort.opcode = nvme_admin_abort_cmd;
cmd.abort.cid = req->tag;
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
@@ -1888,10 +1880,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
u64 dma_addr = dev->host_mem_descs_dma;
- struct nvme_command c;
+ struct nvme_command c = { };
int ret;
- memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
c.features.dword11 = cpu_to_le32(bits);
@@ -2265,9 +2256,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
{
struct request_queue *q = nvmeq->dev->ctrl.admin_q;
struct request *req;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
- memset(&cmd, 0, sizeof(cmd));
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
@@ -2828,54 +2818,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return 0;
}
-#ifdef CONFIG_ACPI
-static bool nvme_acpi_storage_d3(struct pci_dev *dev)
-{
- struct acpi_device *adev;
- struct pci_dev *root;
- acpi_handle handle;
- acpi_status status;
- u8 val;
-
- /*
- * Look for _DSD property specifying that the storage device on the port
- * must use D3 to support deep platform power savings during
- * suspend-to-idle.
- */
- root = pcie_find_root_port(dev);
- if (!root)
- return false;
-
- adev = ACPI_COMPANION(&root->dev);
- if (!adev)
- return false;
-
- /*
- * The property is defined in the PXSX device for South complex ports
- * and in the PEGP device for North complex ports.
- */
- status = acpi_get_handle(adev->handle, "PXSX", &handle);
- if (ACPI_FAILURE(status)) {
- status = acpi_get_handle(adev->handle, "PEGP", &handle);
- if (ACPI_FAILURE(status))
- return false;
- }
-
- if (acpi_bus_get_device(handle, &adev))
- return false;
-
- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
- &val))
- return false;
- return val == 1;
-}
-#else
-static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
-{
- return false;
-}
-#endif /* CONFIG_ACPI */
-
static void nvme_async_probe(void *data, async_cookie_t cookie)
{
struct nvme_dev *dev = data;
@@ -2925,7 +2867,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= check_vendor_combination_bug(pdev);
- if (!noacpi && nvme_acpi_storage_d3(pdev)) {
+ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
/*
* Some systems use a bios work around to ask for D3 on
* platforms that support kernel managed suspend.
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 37943dc4c2c1..a9e70cefd7ed 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1088,7 +1088,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
- int ret = -EINVAL;
+ int ret;
bool changed;
ret = nvme_rdma_configure_admin_queue(ctrl, new);
@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
int count)
{
struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
- struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
struct ib_sge *sge = &req->sge[1];
+ struct scatterlist *sgl;
u32 len = 0;
int i;
- for (i = 0; i < count; i++, sgl++, sge++) {
+ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
sge->addr = sg_dma_address(sgl);
sge->length = sg_dma_len(sgl);
sge->lkey = queue->device->pd->local_dma_lkey;
len += sge->length;
+ sge++;
}
sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 34f4b3402f7c..c7bd37103cf4 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -123,6 +123,7 @@ struct nvme_tcp_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct sockaddr_storage addr;
struct sockaddr_storage src_addr;
+ struct net_device *ndev;
struct nvme_ctrl ctrl;
struct work_struct err_work;
@@ -1455,6 +1456,20 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
}
}
+ if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
+ char *iface = nctrl->opts->host_iface;
+ sockptr_t optval = KERNEL_SOCKPTR(iface);
+
+ ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
+ optval, strlen(iface));
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to bind to interface %s queue %d err %d\n",
+ iface, qid, ret);
+ goto err_sock;
+ }
+ }
+
queue->hdr_digest = nctrl->opts->hdr_digest;
queue->data_digest = nctrl->opts->data_digest;
if (queue->hdr_digest || queue->data_digest) {
@@ -1973,11 +1988,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return ret;
if (ctrl->icdoff) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->device, "icdoff is not supported!\n");
goto destroy_admin;
}
- if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) {
+ if (!nvme_ctrl_sgl_supported(ctrl)) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
goto destroy_admin;
}
@@ -2515,6 +2532,16 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
}
}
+ if (opts->mask & NVMF_OPT_HOST_IFACE) {
+ ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface);
+ if (!ctrl->ndev) {
+ pr_err("invalid interface passed: %s\n",
+ opts->host_iface);
+ ret = -ENODEV;
+ goto out_free_ctrl;
+ }
+ }
+
if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
ret = -EALREADY;
goto out_free_ctrl;
@@ -2571,7 +2598,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
- NVMF_OPT_TOS,
+ NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
.create_ctrl = nvme_tcp_create_ctrl,
};
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 475dd45c3db4..d95010481fce 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -171,8 +171,8 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
return cb(&zone, idx, data);
}
-static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nvme_zone_report *report;
struct nvme_command c = { };
@@ -180,6 +180,9 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nz, i;
size_t buflen;
+ if (ns->head->ids.csi != NVME_CSI_ZNS)
+ return -EINVAL;
+
report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen);
if (!report)
return -ENOMEM;
@@ -227,26 +230,6 @@ out_free:
return ret;
}
-int nvme_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
-{
- struct nvme_ns_head *head = NULL;
- struct nvme_ns *ns;
- int srcu_idx, ret;
-
- ns = nvme_get_ns_from_disk(disk, &head, &srcu_idx);
- if (unlikely(!ns))
- return -EWOULDBLOCK;
-
- if (ns->head->ids.csi == NVME_CSI_ZNS)
- ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
- else
- ret = -EINVAL;
- nvme_put_ns_from_disk(head, srcu_idx);
-
- return ret;
-}
-
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *c, enum nvme_zone_mgmt_action action)
{
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index ebf91fc4c72e..9837e580fa7e 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
+nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index dcd49a72f2f3..0cb98f2bbc8c 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -162,15 +162,8 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
{
- u16 status = NVME_SC_INTERNAL;
- struct nvme_effects_log *log;
-
- log = kzalloc(sizeof(*log), GFP_KERNEL);
- if (!log)
- goto out;
-
log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
@@ -184,9 +177,45 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
+}
- status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
+{
+ log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
+}
+
+static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+{
+ struct nvme_effects_log *log;
+ u16 status = NVME_SC_SUCCESS;
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ switch (req->cmd->get_log_page.csi) {
+ case NVME_CSI_NVM:
+ nvmet_get_cmd_effects_nvm(log);
+ break;
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ status = NVME_SC_INVALID_IO_CMD_SET;
+ goto free;
+ }
+ nvmet_get_cmd_effects_nvm(log);
+ nvmet_get_cmd_effects_zns(log);
+ break;
+ default:
+ status = NVME_SC_INVALID_LOG_PAGE;
+ goto free;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+free:
kfree(log);
out:
nvmet_req_complete(req, status);
@@ -313,22 +342,6 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
}
-static u16 nvmet_set_model_number(struct nvmet_subsys *subsys)
-{
- u16 status = 0;
-
- mutex_lock(&subsys->lock);
- if (!subsys->model_number) {
- subsys->model_number =
- kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
- if (!subsys->model_number)
- status = NVME_SC_INTERNAL;
- }
- mutex_unlock(&subsys->lock);
-
- return status;
-}
-
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -337,14 +350,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
u32 cmd_capsule_size;
u16 status = 0;
- /*
- * If there is no model number yet, set it now. It will then remain
- * stable for the life time of the subsystem.
- */
- if (!subsys->model_number) {
- status = nvmet_set_model_number(subsys);
- if (status)
- goto out;
+ if (!subsys->subsys_discovered) {
+ mutex_lock(&subsys->lock);
+ subsys->subsys_discovered = true;
+ mutex_unlock(&subsys->lock);
}
id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -357,9 +366,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0;
id->ssvid = 0;
- memset(id->sn, ' ', sizeof(id->sn));
- bin2hex(id->sn, &ctrl->subsys->serial,
- min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+ memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
strlen(subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
@@ -415,7 +422,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
- id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
+ id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
NVME_CTRL_ONCS_WRITE_ZEROES);
@@ -635,6 +642,12 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
goto out;
}
+ status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
+ NVME_NIDT_CSI_LEN,
+ &req->ns->csi, &off);
+ if (status)
+ goto out;
+
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
status = NVME_SC_INTERNAL | NVME_SC_DNR;
@@ -643,6 +656,23 @@ out:
nvmet_req_complete(req, status);
}
+static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
+{
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ nvmet_execute_identify_desclist(req);
+ return true;
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ nvmet_execute_identify_desclist(req);
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
static void nvmet_execute_identify(struct nvmet_req *req)
{
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
@@ -650,19 +680,54 @@ static void nvmet_execute_identify(struct nvmet_req *req)
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_NS:
- return nvmet_execute_identify_ns(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_ns(req);
+ default:
+ break;
+ }
+ break;
+ case NVME_ID_CNS_CS_NS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ return nvmet_execute_identify_cns_cs_ns(req);
+ default:
+ break;
+ }
+ }
+ break;
case NVME_ID_CNS_CTRL:
- return nvmet_execute_identify_ctrl(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_ctrl(req);
+ }
+ break;
+ case NVME_ID_CNS_CS_CTRL:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ return nvmet_execute_identify_cns_cs_ctrl(req);
+ default:
+ break;
+ }
+ }
+ break;
case NVME_ID_CNS_NS_ACTIVE_LIST:
- return nvmet_execute_identify_nslist(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_nslist(req);
+ default:
+ break;
+ }
+ break;
case NVME_ID_CNS_NS_DESC_LIST:
- return nvmet_execute_identify_desclist(req);
+ if (nvmet_handle_identify_desclist(req) == true)
+ return;
+ break;
}
- pr_debug("unhandled identify cns %d on qid %d\n",
- req->cmd->identify.cns, req->sq->qid);
- req->error_loc = offsetof(struct nvme_identify, cns);
- nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_req_cns_error_complete(req);
}
/*
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 65a0cf99f557..273555127188 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1007,13 +1007,26 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
NVME_MINOR(subsys->ver));
}
-static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
- const char *page, size_t count)
+static ssize_t
+nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
{
- struct nvmet_subsys *subsys = to_subsys(item);
int major, minor, tertiary = 0;
int ret;
+ if (subsys->subsys_discovered) {
+ if (NVME_TERTIARY(subsys->ver))
+ pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+ else
+ pr_err("Can't set version number. %llu.%llu is already assigned\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver));
+ return -EINVAL;
+ }
+
/* passthru subsystems use the underlying controller's version */
if (nvmet_passthru_ctrl(subsys))
return -EINVAL;
@@ -1022,35 +1035,84 @@ static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
if (ret != 2 && ret != 3)
return -EINVAL;
- down_write(&nvmet_config_sem);
subsys->ver = NVME_VS(major, minor, tertiary);
- up_write(&nvmet_config_sem);
return count;
}
+
+static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
CONFIGFS_ATTR(nvmet_subsys_, attr_version);
+/* See Section 1.5 of NVMe 1.4 */
+static bool nvmet_is_ascii(const char c)
+{
+ return c >= 0x20 && c <= 0x7e;
+}
+
static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial);
}
-static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
- const char *page, size_t count)
+static ssize_t
+nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
{
- u64 serial;
+ int pos, len = strcspn(page, "\n");
- if (sscanf(page, "%llx\n", &serial) != 1)
+ if (subsys->subsys_discovered) {
+ pr_err("Can't set serial number. %s is already assigned\n",
+ subsys->serial);
return -EINVAL;
+ }
+
+ if (!len || len > NVMET_SN_MAX_SIZE) {
+ pr_err("Serial Number can not be empty or exceed %d Bytes\n",
+ NVMET_SN_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos])) {
+ pr_err("Serial Number must contain only ASCII strings\n");
+ return -EINVAL;
+ }
+ }
+
+ memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
+
+ return count;
+}
+
+static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
down_write(&nvmet_config_sem);
- to_subsys(item)->serial = serial;
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
up_write(&nvmet_config_sem);
- return count;
+ return ret;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
@@ -1118,20 +1180,8 @@ static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
- int ret;
-
- mutex_lock(&subsys->lock);
- ret = snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number ?
- subsys->model_number : NVMET_DEFAULT_CTRL_MODEL);
- mutex_unlock(&subsys->lock);
-
- return ret;
-}
-/* See Section 1.5 of NVMe 1.4 */
-static bool nvmet_is_ascii(const char c)
-{
- return c >= 0x20 && c <= 0x7e;
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
}
static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
@@ -1139,7 +1189,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
{
int pos = 0, len;
- if (subsys->model_number) {
+ if (subsys->subsys_discovered) {
pr_err("Can't set model number. %s is already assigned\n",
subsys->model_number);
return -EINVAL;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 1853db38b682..ac7210a3ea1c 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -16,6 +16,7 @@
#include "nvmet.h"
struct workqueue_struct *buffered_io_wq;
+struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
@@ -43,43 +44,34 @@ DECLARE_RWSEM(nvmet_ana_sem);
inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
{
- u16 status;
-
switch (errno) {
case 0:
- status = NVME_SC_SUCCESS;
- break;
+ return NVME_SC_SUCCESS;
case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length);
- status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
- break;
+ return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
case -EREMOTEIO:
req->error_loc = offsetof(struct nvme_rw_command, slba);
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
- break;
+ return NVME_SC_LBA_RANGE | NVME_SC_DNR;
case -EOPNOTSUPP:
req->error_loc = offsetof(struct nvme_common_command, opcode);
switch (req->cmd->common.opcode) {
case nvme_cmd_dsm:
case nvme_cmd_write_zeroes:
- status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
- break;
+ return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
default:
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
break;
case -ENODATA:
req->error_loc = offsetof(struct nvme_rw_command, nsid);
- status = NVME_SC_ACCESS_DENIED;
- break;
+ return NVME_SC_ACCESS_DENIED;
case -EIO:
fallthrough;
default:
req->error_loc = offsetof(struct nvme_common_command, opcode);
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
}
-
- return status;
}
u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
@@ -122,11 +114,11 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
return 0;
}
-static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
+static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
{
- unsigned long nsid = 0;
struct nvmet_ns *cur;
unsigned long idx;
+ u32 nsid = 0;
xa_for_each(&subsys->namespaces, idx, cur)
nsid = cur->nsid;
@@ -141,14 +133,13 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
{
- u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_req *req;
mutex_lock(&ctrl->lock);
while (ctrl->nr_async_event_cmds) {
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
mutex_lock(&ctrl->lock);
}
mutex_unlock(&ctrl->lock);
@@ -388,10 +379,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
{
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvmet_ctrl, ka_work);
- bool cmd_seen = ctrl->cmd_seen;
+ bool reset_tbkas = ctrl->reset_tbkas;
- ctrl->cmd_seen = false;
- if (cmd_seen) {
+ ctrl->reset_tbkas = false;
+ if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
@@ -412,7 +403,6 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
- INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
@@ -693,6 +683,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
uuid_gen(&ns->uuid);
ns->buffered_io = false;
+ ns->csi = NVME_CSI_NVM;
return ns;
}
@@ -804,6 +795,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
percpu_ref_exit(&sq->ref);
if (ctrl) {
+ /*
+ * The teardown flow may take some time, and the host may not
+ * send us keep-alive during this period, hence reset the
+ * traffic based keep-alive timer so we don't trigger a
+ * controller teardown as a result of a keep-alive expiration.
+ */
+ ctrl->reset_tbkas = true;
nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */
}
@@ -888,10 +886,18 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return ret;
}
- if (req->ns->file)
- return nvmet_file_parse_io_cmd(req);
-
- return nvmet_bdev_parse_io_cmd(req);
+ switch (req->ns->csi) {
+ case NVME_CSI_NVM:
+ if (req->ns->file)
+ return nvmet_file_parse_io_cmd(req);
+ return nvmet_bdev_parse_io_cmd(req);
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return nvmet_bdev_zns_parse_io_cmd(req);
+ return NVME_SC_INVALID_IO_CMD_SET;
+ default:
+ return NVME_SC_INVALID_IO_CMD_SET;
+ }
}
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
@@ -952,7 +958,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
}
if (sq->ctrl)
- sq->ctrl->cmd_seen = true;
+ sq->ctrl->reset_tbkas = true;
return true;
@@ -998,19 +1004,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
return req->transfer_len - req->metadata_len;
}
-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+ struct nvmet_req *req)
{
- req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
nvmet_data_transfer_len(req));
if (!req->sg)
goto out_err;
if (req->metadata_len) {
- req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
+ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
&req->metadata_sg_cnt, req->metadata_len);
if (!req->metadata_sg)
goto out_free_sg;
}
+
+ req->p2p_dev = p2p_dev;
+
return 0;
out_free_sg:
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
@@ -1018,25 +1028,19 @@ out_err:
return -ENOMEM;
}
-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
{
- if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
- return false;
-
- if (req->sq->ctrl && req->sq->qid && req->ns) {
- req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
- req->ns->nsid);
- if (req->p2p_dev)
- return true;
- }
-
- req->p2p_dev = NULL;
- return false;
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+ !req->sq->ctrl || !req->sq->qid || !req->ns)
+ return NULL;
+ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
}
int nvmet_req_alloc_sgls(struct nvmet_req *req)
{
- if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
+ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
return 0;
req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
@@ -1065,6 +1069,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
if (req->metadata_sg)
pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+ req->p2p_dev = NULL;
} else {
sgl_free(req->sg);
if (req->metadata_sg)
@@ -1113,6 +1118,17 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
}
+static inline bool nvmet_css_supported(u8 cc_css)
+{
+ switch (cc_css <<= NVME_CC_CSS_SHIFT) {
+ case NVME_CC_CSS_NVM:
+ case NVME_CC_CSS_CSI:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
{
lockdep_assert_held(&ctrl->lock);
@@ -1132,7 +1148,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
if (nvmet_cc_mps(ctrl->cc) != 0 ||
nvmet_cc_ams(ctrl->cc) != 0 ||
- nvmet_cc_css(ctrl->cc) != 0) {
+ !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
ctrl->csts = NVME_CSTS_CFS;
return;
}
@@ -1183,6 +1199,8 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
{
/* command sets supported: NVMe command set: */
ctrl->cap = (1ULL << 37);
+ /* Controller supports one or more I/O Command Sets */
+ ctrl->cap |= (1ULL << 43);
/* CC.EN timeout in 500msec units: */
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
@@ -1352,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1493,6 +1512,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type)
{
struct nvmet_subsys *subsys;
+ char serial[NVMET_SN_MAX_SIZE / 2];
+ int ret;
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
if (!subsys)
@@ -1500,7 +1521,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys->ver = NVMET_DEFAULT_VS;
/* generate a random serial number as our controllers are ephemeral: */
- get_random_bytes(&subsys->serial, sizeof(subsys->serial));
+ get_random_bytes(&serial, sizeof(serial));
+ bin2hex(subsys->serial, &serial, sizeof(serial));
+
+ subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
+ if (!subsys->model_number) {
+ ret = -ENOMEM;
+ goto free_subsys;
+ }
switch (type) {
case NVME_NQN_NVME:
@@ -1511,15 +1539,15 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
break;
default:
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
- kfree(subsys);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto free_mn;
}
subsys->type = type;
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
GFP_KERNEL);
if (!subsys->subsysnqn) {
- kfree(subsys);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto free_mn;
}
subsys->cntlid_min = NVME_CNTLID_MIN;
subsys->cntlid_max = NVME_CNTLID_MAX;
@@ -1531,6 +1559,12 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
INIT_LIST_HEAD(&subsys->hosts);
return subsys;
+
+free_mn:
+ kfree(subsys->model_number);
+free_subsys:
+ kfree(subsys);
+ return ERR_PTR(ret);
}
static void nvmet_subsys_free(struct kref *ref)
@@ -1569,11 +1603,15 @@ static int __init nvmet_init(void)
nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+ zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
+ if (!zbd_wq)
+ return -ENOMEM;
+
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
WQ_MEM_RECLAIM, 0);
if (!buffered_io_wq) {
error = -ENOMEM;
- goto out;
+ goto out_free_zbd_work_queue;
}
error = nvmet_init_discovery();
@@ -1589,7 +1627,8 @@ out_exit_discovery:
nvmet_exit_discovery();
out_free_work_queue:
destroy_workqueue(buffered_io_wq);
-out:
+out_free_zbd_work_queue:
+ destroy_workqueue(zbd_wq);
return error;
}
@@ -1599,6 +1638,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
destroy_workqueue(buffered_io_wq);
+ destroy_workqueue(zbd_wq);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index fc3645fc2c24..7aa62bc6ae84 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -244,7 +244,6 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
- const char model[] = "Linux";
u16 status = 0;
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
@@ -262,11 +261,10 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
goto out;
}
- memset(id->sn, ' ', sizeof(id->sn));
- bin2hex(id->sn, &ctrl->subsys->serial,
- min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+ memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
memset(id->fr, ' ', sizeof(id->fr));
- memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
+ strlen(ctrl->subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 19e113240fff..22b5108168a6 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -2511,13 +2511,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
int ret;
/*
- * if there is no nvmet mapping to the targetport there
- * shouldn't be requests. just terminate them.
- */
- if (!tgtport->pe)
- goto transport_error;
-
- /*
* Fused commands are currently not supported in the linux
* implementation.
*
@@ -2544,7 +2537,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
- fod->req.port = tgtport->pe->port;
+ if (tgtport->pe)
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 429263ca9b97..0fc2781ab970 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -47,6 +47,14 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->nows = to0based(ql->io_opt / ql->logical_block_size);
}
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
+{
+ if (ns->bdev) {
+ blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
+ ns->bdev = NULL;
+ }
+}
+
static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
{
struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
@@ -86,15 +94,15 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
nvmet_bdev_ns_enable_integrity(ns);
- return 0;
-}
-
-void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
-{
- if (ns->bdev) {
- blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
- ns->bdev = NULL;
+ if (bdev_is_zoned(ns->bdev)) {
+ if (!nvmet_bdev_zns_enable(ns)) {
+ nvmet_bdev_ns_disable(ns);
+ return -EINVAL;
+ }
+ ns->csi = NVME_CSI_ZNS;
}
+
+ return 0;
}
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
@@ -102,7 +110,7 @@ void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
ns->size = i_size_read(ns->bdev->bd_inode);
}
-static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
{
u16 status = NVME_SC_SUCCESS;
@@ -164,8 +172,7 @@ static void nvmet_bio_done(struct bio *bio)
struct nvmet_req *req = bio->bi_private;
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
- if (bio != &req->b.inline_bio)
- bio_put(bio);
+ nvmet_req_bio_put(req, bio);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -174,11 +181,10 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
{
struct blk_integrity *bi;
struct bio_integrity_payload *bip;
- struct block_device *bdev = req->ns->bdev;
int rc;
size_t resid, len;
- bi = bdev_get_integrity(bdev);
+ bi = bdev_get_integrity(req->ns->bdev);
if (unlikely(!bi)) {
pr_err("Unable to locate bio_integrity\n");
return -ENODEV;
@@ -430,9 +436,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
{
- struct nvme_command *cmd = req->cmd;
-
- switch (cmd->common.opcode) {
+ switch (req->cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 7fdbdc496597..1dd1a0fe2e81 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -385,9 +385,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
{
- struct nvme_command *cmd = req->cmd;
-
- switch (cmd->common.opcode) {
+ switch (req->cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_file_execute_rw;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index cb30cb942e1d..a5c4a1865026 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
- clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+ if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+ return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}
+ ctrl->ctrl.queue_count = 1;
}
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
return 0;
out_cleanup_queue:
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
blk_cleanup_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure should never happen */
- WARN_ON_ONCE(1);
+ if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ /* state change failure for non-deleted ctrl? */
+ WARN_ON_ONCE(1);
return;
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index d69a409515d6..06dd3d537f07 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -28,6 +28,7 @@
#define NVMET_NO_ERROR_LOC ((u16)-1)
#define NVMET_DEFAULT_CTRL_MODEL "Linux"
#define NVMET_MN_MAX_SIZE 40
+#define NVMET_SN_MAX_SIZE 20
/*
* Supported optional AENs:
@@ -82,6 +83,7 @@ struct nvmet_ns {
struct pci_dev *p2p_dev;
int pi_type;
int metadata_size;
+ u8 csi;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -167,7 +169,7 @@ struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
- bool cmd_seen;
+ bool reset_tbkas;
struct mutex lock;
u64 cap;
@@ -217,7 +219,7 @@ struct nvmet_subsys {
struct xarray namespaces;
unsigned int nr_namespaces;
- unsigned int max_nsid;
+ u32 max_nsid;
u16 cntlid_min;
u16 cntlid_max;
@@ -229,7 +231,8 @@ struct nvmet_subsys {
u16 max_qid;
u64 ver;
- u64 serial;
+ char serial[NVMET_SN_MAX_SIZE];
+ bool subsys_discovered;
char *subsysnqn;
bool pi_support;
@@ -247,6 +250,10 @@ struct nvmet_subsys {
unsigned int admin_timeout;
unsigned int io_timeout;
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ u8 zasl;
+#endif /* CONFIG_BLK_DEV_ZONED */
};
static inline struct nvmet_subsys *to_subsys(struct config_item *item)
@@ -332,6 +339,12 @@ struct nvmet_req {
struct work_struct work;
bool use_workqueue;
} p;
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct {
+ struct bio inline_bio;
+ struct work_struct zmgmt_work;
+ } z;
+#endif /* CONFIG_BLK_DEV_ZONED */
};
int sg_cnt;
int metadata_sg_cnt;
@@ -351,6 +364,7 @@ struct nvmet_req {
};
extern struct workqueue_struct *buffered_io_wq;
+extern struct workqueue_struct *zbd_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
@@ -400,6 +414,7 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
@@ -527,6 +542,14 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
void nvmet_ns_revalidate(struct nvmet_ns *ns);
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
+
+bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
+void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
+void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
{
@@ -622,4 +645,18 @@ static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
}
+static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
+{
+ pr_debug("unhandled identify cns %d on qid %d\n",
+ req->cmd->identify.cns, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
+static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
+{
+ if (bio != &req->b.inline_bio)
+ bio_put(bio);
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 39b1473f7204..fced52de33ce 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -206,8 +206,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
sg->offset) < sg->length) {
- if (bio != &req->p.inline_bio)
- bio_put(bio);
+ nvmet_req_bio_put(req, bio);
return -EINVAL;
}
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 7d607f435e36..891174ccd44b 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1257,7 +1257,7 @@ out_err:
static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
{
- struct ib_qp_init_attr qp_attr;
+ struct ib_qp_init_attr qp_attr = { };
struct nvmet_rdma_device *ndev = queue->dev;
int nr_cqe, ret, i, factor;
@@ -1275,7 +1275,6 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
goto out;
}
- memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.qp_context = queue;
qp_attr.event_handler = nvmet_rdma_qp_event;
qp_attr.send_cq = queue->cq;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index f9f34f6caf5e..d8aceef83284 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -550,7 +550,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
* nvmet_req_init is completed.
*/
if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
- len && len < cmd->req.port->inline_data_size &&
+ len && len <= cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
}
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
new file mode 100644
index 000000000000..17f8b7a45f21
--- /dev/null
+++ b/drivers/nvme/target/zns.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe ZNS-ZBD command implementation.
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/nvme.h>
+#include <linux/blkdev.h>
+#include "nvmet.h"
+
+/*
+ * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
+ * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
+ * as page_shift value. When calculating the ZASL use shift by 12.
+ */
+#define NVMET_MPSMIN_SHIFT 12
+
+static inline u8 nvmet_zasl(unsigned int zone_append_sects)
+{
+ /*
+ * Zone Append Size Limit (zasl) is expressed as a power of 2 value
+ * with the minimum memory page size (i.e. 12) as unit.
+ */
+ return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
+}
+
+static int validate_conv_zones_cb(struct blk_zone *z,
+ unsigned int i, void *data)
+{
+ if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
+{
+ struct request_queue *q = ns->bdev->bd_disk->queue;
+ u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q));
+ struct gendisk *bd_disk = ns->bdev->bd_disk;
+ int ret;
+
+ if (ns->subsys->zasl) {
+ if (ns->subsys->zasl > zasl)
+ return false;
+ }
+ ns->subsys->zasl = zasl;
+
+ /*
+ * Generic zoned block devices may have a smaller last zone which is
+ * not supported by ZNS. Exclude zoned drives that have such smaller
+ * last zone.
+ */
+ if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
+ return false;
+ /*
+ * ZNS does not define a conventional zone type. If the underlying
+ * device has a bitmap set indicating the existence of conventional
+ * zones, reject the device. Otherwise, use report zones to detect if
+ * the device has conventional zones.
+ */
+ if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
+ return false;
+
+ ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
+ validate_conv_zones_cb, NULL);
+ if (ret < 0)
+ return false;
+
+ ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ return true;
+}
+
+void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
+{
+ u8 zasl = req->sq->ctrl->subsys->zasl;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl_zns *id;
+ u16 status;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ if (ctrl->ops->get_mdts)
+ id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
+ else
+ id->zasl = zasl;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+{
+ struct nvme_id_ns_zns *id_zns;
+ u64 zsze;
+ u16 status;
+
+ if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
+ if (!id_zns) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_req_find_ns(req);
+ if (status) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+
+ if (!bdev_is_zoned(req->ns->bdev)) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto done;
+ }
+
+ nvmet_ns_revalidate(req->ns);
+ zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
+ req->ns->blksize_shift;
+ id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
+ id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
+ id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
+
+done:
+ status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
+ kfree(id_zns);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
+{
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+ u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+
+ if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
+ return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ }
+
+ if (out_bufsize < sizeof(struct nvme_zone_report)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ switch (req->cmd->zmr.pr) {
+ case 0:
+ case 1:
+ break;
+ default:
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ switch (req->cmd->zmr.zrasf) {
+ case NVME_ZRASF_ZONE_REPORT_ALL:
+ case NVME_ZRASF_ZONE_STATE_EMPTY:
+ case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
+ case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
+ case NVME_ZRASF_ZONE_STATE_CLOSED:
+ case NVME_ZRASF_ZONE_STATE_FULL:
+ case NVME_ZRASF_ZONE_STATE_READONLY:
+ case NVME_ZRASF_ZONE_STATE_OFFLINE:
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ return NVME_SC_SUCCESS;
+}
+
+struct nvmet_report_zone_data {
+ struct nvmet_req *req;
+ u64 out_buf_offset;
+ u64 out_nr_zones;
+ u64 nr_zones;
+ u8 zrasf;
+};
+
+static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
+{
+ static const unsigned int nvme_zrasf_to_blk_zcond[] = {
+ [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
+ [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
+ [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
+ [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
+ [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
+ [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
+ [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
+ };
+ struct nvmet_report_zone_data *rz = d;
+
+ if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
+ z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
+ return 0;
+
+ if (rz->nr_zones < rz->out_nr_zones) {
+ struct nvme_zone_descriptor zdesc = { };
+ u16 status;
+
+ zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
+ zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
+ zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
+ zdesc.za = z->reset ? 1 << 2 : 0;
+ zdesc.zs = z->cond << 4;
+ zdesc.zt = z->type;
+
+ status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
+ sizeof(zdesc));
+ if (status)
+ return -EINVAL;
+
+ rz->out_buf_offset += sizeof(zdesc);
+ }
+
+ rz->nr_zones++;
+
+ return 0;
+}
+
+static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
+{
+ unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+
+ return blkdev_nr_zones(req->ns->bdev->bd_disk) -
+ (sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
+}
+
+static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
+{
+ if (bufsize <= sizeof(struct nvme_zone_report))
+ return 0;
+
+ return (bufsize - sizeof(struct nvme_zone_report)) /
+ sizeof(struct nvme_zone_descriptor);
+}
+
+static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
+ sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+ unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
+ u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+ __le64 nr_zones;
+ u16 status;
+ int ret;
+ struct nvmet_report_zone_data rz_data = {
+ .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
+ /* leave the place for report zone header */
+ .out_buf_offset = sizeof(struct nvme_zone_report),
+ .zrasf = req->cmd->zmr.zrasf,
+ .nr_zones = 0,
+ .req = req,
+ };
+
+ status = nvmet_bdev_validate_zone_mgmt_recv(req);
+ if (status)
+ goto out;
+
+ if (!req_slba_nr_zones) {
+ status = NVME_SC_SUCCESS;
+ goto out;
+ }
+
+ ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
+ nvmet_bdev_report_zone_cb, &rz_data);
+ if (ret < 0) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ /*
+ * When partial bit is set nr_zones must indicate the number of zone
+ * descriptors actually transferred.
+ */
+ if (req->cmd->zmr.pr)
+ rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
+
+ nr_zones = cpu_to_le64(rz_data.nr_zones);
+ status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
+{
+ INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
+ queue_work(zbd_wq, &req->z.zmgmt_work);
+}
+
+static inline enum req_opf zsa_req_op(u8 zsa)
+{
+ switch (zsa) {
+ case NVME_ZONE_OPEN:
+ return REQ_OP_ZONE_OPEN;
+ case NVME_ZONE_CLOSE:
+ return REQ_OP_ZONE_CLOSE;
+ case NVME_ZONE_FINISH:
+ return REQ_OP_ZONE_FINISH;
+ case NVME_ZONE_RESET:
+ return REQ_OP_ZONE_RESET;
+ default:
+ return REQ_OP_LAST;
+ }
+}
+
+static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
+{
+ switch (ret) {
+ case 0:
+ return NVME_SC_SUCCESS;
+ case -EINVAL:
+ case -EIO:
+ return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ default:
+ return NVME_SC_INTERNAL;
+ }
+}
+
+struct nvmet_zone_mgmt_send_all_data {
+ unsigned long *zbitmap;
+ struct nvmet_req *req;
+};
+
+static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
+{
+ struct nvmet_zone_mgmt_send_all_data *data = d;
+
+ switch (zsa_req_op(data->req->cmd->zms.zsa)) {
+ case REQ_OP_ZONE_OPEN:
+ switch (z->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ switch (z->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case REQ_OP_ZONE_FINISH:
+ switch (z->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ set_bit(i, data->zbitmap);
+
+ return 0;
+}
+
+static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
+{
+ struct block_device *bdev = req->ns->bdev;
+ unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct bio *bio = NULL;
+ sector_t sector = 0;
+ int ret;
+ struct nvmet_zone_mgmt_send_all_data d = {
+ .req = req,
+ };
+
+ d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
+ GFP_NOIO, q->node);
+ if (!d.zbitmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Scan and build bitmap of the eligible zones */
+ ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
+ if (ret != nr_zones) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out;
+ } else {
+ /* We scanned all the zones */
+ ret = 0;
+ }
+
+ while (sector < get_capacity(bdev->bd_disk)) {
+ if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
+ bio = blk_next_bio(bio, 0, GFP_KERNEL);
+ bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC;
+ bio->bi_iter.bi_sector = sector;
+ bio_set_dev(bio, bdev);
+ /* This may take a while, so be nice to others */
+ cond_resched();
+ }
+ sector += blk_queue_zone_sectors(q);
+ }
+
+ if (bio) {
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ }
+
+out:
+ kfree(d.zbitmap);
+
+ return blkdev_zone_mgmt_errno_to_nvme_status(ret);
+}
+
+static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
+{
+ int ret;
+
+ switch (zsa_req_op(req->cmd->zms.zsa)) {
+ case REQ_OP_ZONE_RESET:
+ ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
+ get_capacity(req->ns->bdev->bd_disk),
+ GFP_KERNEL);
+ if (ret < 0)
+ return blkdev_zone_mgmt_errno_to_nvme_status(ret);
+ break;
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return nvmet_bdev_zone_mgmt_emulate_all(req);
+ default:
+ /* this is needed to quiet compiler warning */
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
+ enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
+ struct block_device *bdev = req->ns->bdev;
+ sector_t zone_sectors = bdev_zone_sectors(bdev);
+ u16 status = NVME_SC_SUCCESS;
+ int ret;
+
+ if (op == REQ_OP_LAST) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
+ status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ goto out;
+ }
+
+ /* when select all bit is set slba field is ignored */
+ if (req->cmd->zms.select_all) {
+ status = nvmet_bdev_execute_zmgmt_send_all(req);
+ goto out;
+ }
+
+ if (sect >= get_capacity(bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (sect & (zone_sectors - 1)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
+ if (ret < 0)
+ status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
+{
+ INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
+ queue_work(zbd_wq, &req->z.zmgmt_work);
+}
+
+static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
+{
+ struct nvmet_req *req = bio->bi_private;
+
+ if (bio->bi_status == BLK_STS_OK) {
+ req->cqe->result.u64 =
+ nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
+ }
+
+ nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
+ nvmet_req_bio_put(req, bio);
+}
+
+void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
+{
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ u16 status = NVME_SC_SUCCESS;
+ unsigned int total_len = 0;
+ struct scatterlist *sg;
+ struct bio *bio;
+ int sg_cnt;
+
+ /* Request is completed on len mismatch in nvmet_check_transter_len() */
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
+ return;
+
+ if (!req->sg_cnt) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (nvmet_use_inline_bvec(req)) {
+ bio = &req->z.inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ } else {
+ bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
+ }
+
+ bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+ bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
+ bio_set_dev(bio, req->ns->bdev);
+ bio->bi_iter.bi_sector = sect;
+ bio->bi_private = req;
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ bio->bi_opf |= REQ_FUA;
+
+ for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
+ struct page *p = sg_page(sg);
+ unsigned int l = sg->length;
+ unsigned int o = sg->offset;
+ unsigned int ret;
+
+ ret = bio_add_zone_append_page(bio, p, l, o);
+ if (ret != sg->length) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_bio;
+ }
+ total_len += sg->length;
+ }
+
+ if (total_len != nvmet_rw_data_len(req)) {
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ goto out_put_bio;
+ }
+
+ submit_bio(bio);
+ return;
+
+out_put_bio:
+ nvmet_req_bio_put(req, bio);
+out:
+ nvmet_req_complete(req, status);
+}
+
+u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_zone_append:
+ req->execute = nvmet_bdev_execute_zone_append;
+ return 0;
+ case nvme_cmd_zone_mgmt_recv:
+ req->execute = nvmet_bdev_execute_zone_mgmt_recv;
+ return 0;
+ case nvme_cmd_zone_mgmt_send:
+ req->execute = nvmet_bdev_execute_zone_mgmt_send;
+ return 0;
+ default:
+ return nvmet_bdev_parse_io_cmd(req);
+ }
+}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e366218d6736..b335c077f215 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -893,6 +893,16 @@ static int _set_required_opps(struct device *dev,
if (!required_opp_tables)
return 0;
+ /*
+ * We only support genpd's OPPs in the "required-opps" for now, as we
+ * don't know much about other use cases. Error out if the required OPP
+ * doesn't belong to a genpd.
+ */
+ if (unlikely(!required_opp_tables[0]->is_genpd)) {
+ dev_err(dev, "required-opps don't belong to a genpd\n");
+ return -ENOENT;
+ }
+
/* required-opps not fully initialized yet */
if (lazy_linking_pending(opp_table))
return -EBUSY;
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index c582a9ca397b..d298e38aaf7e 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -197,21 +197,8 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
required_opp_tables[i] = _find_table_of_opp_np(required_np);
of_node_put(required_np);
- if (IS_ERR(required_opp_tables[i])) {
+ if (IS_ERR(required_opp_tables[i]))
lazy = true;
- continue;
- }
-
- /*
- * We only support genpd's OPPs in the "required-opps" for now,
- * as we don't know how much about other cases. Error out if the
- * required OPP doesn't belong to a genpd.
- */
- if (!required_opp_tables[i]->is_genpd) {
- dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n",
- required_np);
- goto free_required_tables;
- }
}
/* Let's do the linking later on */
@@ -379,13 +366,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
struct dev_pm_opp *opp;
int i, ret;
- /*
- * We only support genpd's OPPs in the "required-opps" for now,
- * as we don't know much about other cases.
- */
- if (!new_table->is_genpd)
- return;
-
mutex_lock(&opp_table_lock);
list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
@@ -433,8 +413,7 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
/* All required opp-tables found, remove from lazy list */
if (!lazy) {
- list_del(&opp_table->lazy);
- INIT_LIST_HEAD(&opp_table->lazy);
+ list_del_init(&opp_table->lazy);
list_for_each_entry(opp, &opp_table->opp_list, node)
_required_opps_available(opp, opp_table->required_opp_count);
@@ -874,7 +853,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
return ERR_PTR(-ENOMEM);
ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
- if (ret < 0 && !opp_table->is_genpd) {
+ if (ret < 0) {
dev_err(dev, "%s: opp key field not found\n", __func__);
goto free_opp;
}
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index eca805c1a023..9e6ce0dc2f53 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
@@ -38,6 +39,6 @@ ifdef CONFIG_ACPI
ifdef CONFIG_PCI_QUIRKS
obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
-obj-$(CONFIG_ARM64) += pcie-tegra194.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
endif
endif
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644
index 000000000000..c2de6ed4d86f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index bafd2c6ab3c2..504669e3afe0 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -22,8 +22,6 @@
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
-#include <linux/pci-acpi.h>
-#include <linux/pci-ecam.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -247,24 +245,6 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-static const u32 event_cntr_ctrl_offset[] = {
- 0x1d8,
- 0x1a8,
- 0x1a8,
- 0x1a8,
- 0x1c4,
- 0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
- 0x1dc,
- 0x1ac,
- 0x1ac,
- 0x1ac,
- 0x1c8,
- 0x1dc
-};
-
struct tegra_pcie_dw {
struct device *dev;
struct resource *appl_res;
@@ -313,104 +293,6 @@ struct tegra_pcie_dw_of_data {
enum dw_pcie_device_mode mode;
};
-#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-struct tegra194_pcie_ecam {
- void __iomem *config_base;
- void __iomem *iatu_base;
- void __iomem *dbi_base;
-};
-
-static int tegra194_acpi_init(struct pci_config_window *cfg)
-{
- struct device *dev = cfg->parent;
- struct tegra194_pcie_ecam *pcie_ecam;
-
- pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
- if (!pcie_ecam)
- return -ENOMEM;
-
- pcie_ecam->config_base = cfg->win;
- pcie_ecam->iatu_base = cfg->win + SZ_256K;
- pcie_ecam->dbi_base = cfg->win + SZ_512K;
- cfg->priv = pcie_ecam;
-
- return 0;
-}
-
-static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
- u32 val, u32 reg)
-{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- writel(val, pcie_ecam->iatu_base + offset + reg);
-}
-
-static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
-{
- atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
- PCIE_ATU_LOWER_BASE);
- atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
- PCIE_ATU_UPPER_BASE);
- atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
- PCIE_ATU_LOWER_TARGET);
- atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
- PCIE_ATU_LIMIT);
- atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
- PCIE_ATU_UPPER_TARGET);
- atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
- atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
-}
-
-static void __iomem *tegra194_map_bus(struct pci_bus *bus,
- unsigned int devfn, int where)
-{
- struct pci_config_window *cfg = bus->sysdata;
- struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
- u32 busdev;
- int type;
-
- if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
- return NULL;
-
- if (bus->number == cfg->busr.start) {
- if (PCI_SLOT(devfn) == 0)
- return pcie_ecam->dbi_base + where;
- else
- return NULL;
- }
-
- busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
- PCIE_ATU_FUNC(PCI_FUNC(devfn));
-
- if (bus->parent->number == cfg->busr.start) {
- if (PCI_SLOT(devfn) == 0)
- type = PCIE_ATU_TYPE_CFG0;
- else
- return NULL;
- } else {
- type = PCIE_ATU_TYPE_CFG1;
- }
-
- program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
- SZ_256K);
-
- return pcie_ecam->config_base + where;
-}
-
-const struct pci_ecam_ops tegra194_pcie_ops = {
- .init = tegra194_acpi_init,
- .pci_ops = {
- .map_bus = tegra194_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
-
-#ifdef CONFIG_PCIE_TEGRA194
-
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
{
return container_of(pci, struct tegra_pcie_dw, pci);
@@ -694,6 +576,24 @@ static struct pci_ops tegra_pci_ops = {
};
#if defined(CONFIG_PCIEASPM)
+static const u32 event_cntr_ctrl_offset[] = {
+ 0x1d8,
+ 0x1a8,
+ 0x1a8,
+ 0x1a8,
+ 0x1c4,
+ 0x1d8
+};
+
+static const u32 event_cntr_data_offset[] = {
+ 0x1dc,
+ 0x1ac,
+ 0x1ac,
+ 0x1ac,
+ 0x1c8,
+ 0x1dc
+};
+
static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -2411,5 +2311,3 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
MODULE_LICENSE("GPL v2");
-
-#endif /* CONFIG_PCIE_TEGRA194 */
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 051b48bd7985..e3f5e7ab7606 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
udelay(PIO_RETRY_DELAY);
}
- dev_err(dev, "config read/write timed out\n");
+ dev_err(dev, "PIO read/write transfer time out\n");
return -ETIMEDOUT;
}
@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
return true;
}
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+
+ /*
+ * Trying to start a new PIO transfer when previous has not completed
+ * cause External Abort on CPU which results in kernel panic:
+ *
+ * SError Interrupt on CPU0, code 0xbf000002 -- SError
+ * Kernel panic - not syncing: Asynchronous SError Interrupt
+ *
+ * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+ * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+ * concurrent calls at the same time. But because PIO transfer may take
+ * about 1.5s when link is down or card is disconnected, it means that
+ * advk_pcie_wait_pio() does not always have to wait for completion.
+ *
+ * Some versions of ARM Trusted Firmware handles this External Abort at
+ * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+ * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+ */
+ if (advk_readl(pcie, PIO_START)) {
+ dev_err(dev, "Previous PIO read/write transfer is still running\n");
+ return true;
+ }
+
+ return false;
+}
+
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return pci_bridge_emul_conf_read(&pcie->bridge, where,
size, val);
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie)) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (where % size)
return PCIBIOS_SET_FAILED;
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie))
+ return PCIBIOS_SET_FAILED;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6511648271b2..bebe3eeebc4e 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -3476,6 +3476,9 @@ static void __exit exit_hv_pci_drv(void)
static int __init init_hv_pci_drv(void)
{
+ if (!hv_is_hyperv_initialized())
+ return -ENODEV;
+
/* Set the invalid domain number's bit, so it will not be used */
set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index da5b414d585a..a143b02b2dcd 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -103,6 +103,13 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
#endif
}
+bool pci_host_of_has_msi_map(struct device *dev)
+{
+ if (dev && dev->of_node)
+ return of_get_property(dev->of_node, "msi-map", NULL);
+ return false;
+}
+
static inline int __of_pci_pci_compare(struct device_node *node,
unsigned int data)
{
@@ -346,6 +353,8 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
dev_node);
*io_base = range.cpu_addr;
+ } else if (resource_type(res) == IORESOURCE_MEM) {
+ res->flags &= ~IORESOURCE_MEM_64;
}
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b717680377a9..8d4ebe095d0c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1900,11 +1900,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
int err;
int i, bars = 0;
- if (atomic_inc_return(&dev->enable_cnt) > 1) {
- pci_update_current_state(dev, dev->current_state);
- return 0; /* already enabled */
+ /*
+ * Power state could be unknown at this point, either due to a fresh
+ * boot or a device removal call. So get the current power state
+ * so that things like MSI message writing will behave as expected
+ * (e.g. if the device really is in D0 at enable time).
+ */
+ if (dev->pm_cap) {
+ u16 pmcsr;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
+ if (atomic_inc_return(&dev->enable_cnt) > 1)
+ return 0; /* already enabled */
+
bridge = pci_upstream_bridge(dev);
if (bridge)
pci_enable_bridge(bridge);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3a62d09b8869..275204646c68 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -925,7 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
pci_set_bus_msi_domain(bus);
- if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+ if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
+ !pci_host_of_has_msi_map(parent))
bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
if (!parent)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index dcb229de1acb..22b2bb1109c9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3547,6 +3547,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
}
/*
+ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
+ * prevented for those affected devices.
+ */
+static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
+{
+ if ((dev->device & 0xffc0) == 0x2340)
+ quirk_no_bus_reset(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ quirk_nvidia_no_bus_reset);
+
+/*
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
* The device will throw a Link Down error on AER-capable systems and
* regardless of AER, config space of the device is never accessible again
@@ -3566,6 +3578,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+/*
+ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
+ * automatically disables LTSSM when Secondary Bus Reset is received and
+ * the device stops working. Prevent bus reset for these devices. With
+ * this change, the device can be assigned to VMs with VFIO, but it will
+ * leak state between VMs. Reference
+ * https://e2e.ti.com/support/processors/f/791/t/954382
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
+
static void quirk_no_pm_reset(struct pci_dev *dev)
{
/*
@@ -3901,6 +3923,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
return 0;
}
+#define PCI_DEVICE_ID_HINIC_VF 0x375E
+#define HINIC_VF_FLR_TYPE 0x1000
+#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
+#define HINIC_VF_OP 0xE80
+#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
+#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
+
+/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
+static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+{
+ unsigned long timeout;
+ void __iomem *bar;
+ u32 val;
+
+ if (probe)
+ return 0;
+
+ bar = pci_iomap(pdev, 0, 0);
+ if (!bar)
+ return -ENOTTY;
+
+ /* Get and check firmware capabilities */
+ val = ioread32be(bar + HINIC_VF_FLR_TYPE);
+ if (!(val & HINIC_VF_FLR_CAP_BIT)) {
+ pci_iounmap(pdev, bar);
+ return -ENOTTY;
+ }
+
+ /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
+ val = ioread32be(bar + HINIC_VF_OP);
+ val = val | HINIC_VF_FLR_PROC_BIT;
+ iowrite32be(val, bar + HINIC_VF_OP);
+
+ pcie_flr(pdev);
+
+ /*
+ * The device must recapture its Bus and Device Numbers after FLR
+ * in order generate Completions. Issue a config write to let the
+ * device capture this information.
+ */
+ pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
+
+ /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
+ timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
+ do {
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+ msleep(20);
+ } while (time_before(jiffies, timeout));
+
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+
+ pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
+
+reset_complete:
+ pci_iounmap(pdev, bar);
+
+ return 0;
+}
+
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
@@ -3913,6 +3998,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
+ { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
+ reset_hinic_vf_dev },
{ 0 }
};
@@ -4753,6 +4840,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
+ /* Broadcom multi-function device */
+ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
@@ -5154,7 +5243,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
- (pdev->device == 0x7340 && pdev->revision != 0xc5))
+ (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
+ (pdev->device == 0x7341 && pdev->revision != 0x00))
return;
if (pdev->device == 0x15d8) {
@@ -5181,6 +5271,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
/* AMD Raven platform iGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 666d8a9b557f..54aca3a62814 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -37,7 +37,7 @@
#define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
#define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model))
-#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
+#define CCI_PMU_CNTR_MASK ((1ULL << 32) - 1)
#define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
#define CCI_PMU_MAX_HW_CNTRS(model) \
@@ -806,7 +806,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev
return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
/* Generic code to find an unused idx from the mask */
- for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
+ for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
if (!test_and_set_bit(idx, hw->used_mask))
return idx;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 96d47cb302dd..a96c31604545 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1211,7 +1211,7 @@ static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
perf_pmu_migrate_context(&dt->pmu, cpu, target);
dt->cpu = target;
if (ccn->irq)
- WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu)));
+ WARN_ON(irq_set_affinity(ccn->irq, cpumask_of(dt->cpu)));
return 0;
}
@@ -1291,7 +1291,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
- err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu));
+ err = irq_set_affinity(ccn->irq, cpumask_of(ccn->dt.cpu));
if (err) {
dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
goto error_set_affinity;
@@ -1325,8 +1325,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
- if (ccn->irq)
- irq_set_affinity_hint(ccn->irq, NULL);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
writel(0, ccn->dt.base + CCN_DT_PMCR);
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 56a5c355701d..bc3cba5f8c5d 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -31,7 +31,7 @@
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
-#define CMN_CHILD_NODE_ADDR GENMASK(27,0)
+#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
#define CMN_ADDR_NODE_PTR GENMASK(27, 14)
@@ -1162,7 +1162,7 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
perf_pmu_migrate_context(&cmn->pmu, cpu, target);
for (i = 0; i < cmn->num_dtcs; i++)
- irq_set_affinity_hint(cmn->dtc[i].irq, cpumask_of(target));
+ irq_set_affinity(cmn->dtc[i].irq, cpumask_of(target));
cmn->cpu = target;
return 0;
}
@@ -1212,7 +1212,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
irq = cmn->dtc[i].irq;
for (j = i; j--; ) {
if (cmn->dtc[j].irq == irq) {
- cmn->dtc[j].irq_friend = j - i;
+ cmn->dtc[j].irq_friend = i - j;
goto next;
}
}
@@ -1222,7 +1222,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
if (err)
return err;
- err = irq_set_affinity_hint(irq, cpumask_of(cmn->cpu));
+ err = irq_set_affinity(irq, cpumask_of(cmn->cpu));
if (err)
return err;
next:
@@ -1568,16 +1568,11 @@ static int arm_cmn_probe(struct platform_device *pdev)
static int arm_cmn_remove(struct platform_device *pdev)
{
struct arm_cmn *cmn = platform_get_drvdata(pdev);
- int i;
writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);
perf_pmu_unregister(&cmn->pmu);
cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
-
- for (i = 0; i < cmn->num_dtcs; i++)
- irq_set_affinity_hint(cmn->dtc[i].irq, NULL);
-
return 0;
}
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index b6c2511d59af..280a6ae3e27c 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -421,7 +421,7 @@ static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
if (ret)
goto out_free_aff;
- ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
+ ret = irq_set_affinity(irq_num, cpumask_of(irq->cpu));
if (ret)
goto out_free_irq;
@@ -475,7 +475,6 @@ static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
list_del(&irq->irqs_node);
mutex_unlock(&dmc620_pmu_irqs_lock);
- WARN_ON(irq_set_affinity_hint(irq->irq_num, NULL));
free_irq(irq->irq_num, irq);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
kfree(irq);
@@ -622,7 +621,7 @@ static int dmc620_pmu_cpu_teardown(unsigned int cpu,
perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
mutex_unlock(&dmc620_pmu_irqs_lock);
- WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
+ WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target)));
irq->cpu = target;
return 0;
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 196faea074d0..a36698a90d2f 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -687,7 +687,7 @@ static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
{
cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
- if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu))
+ if (irq_set_affinity(dsu_pmu->irq, &dsu_pmu->active_cpu))
pr_warn("Failed to set irq affinity to %d\n", cpu);
}
@@ -769,7 +769,6 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
if (rc) {
cpuhp_state_remove_instance(dsu_pmu_cpuhp_state,
&dsu_pmu->cpuhp_node);
- irq_set_affinity_hint(dsu_pmu->irq, NULL);
}
return rc;
@@ -781,7 +780,6 @@ static int dsu_pmu_device_remove(struct platform_device *pdev)
perf_pmu_unregister(&dsu_pmu->pmu);
cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
- irq_set_affinity_hint(dsu_pmu->irq, NULL);
return 0;
}
@@ -840,10 +838,8 @@ static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
/* If there are no active CPUs in the DSU, leave IRQ disabled */
- if (dst >= nr_cpu_ids) {
- irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ if (dst >= nr_cpu_ids)
return 0;
- }
perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
dsu_pmu_set_active_cpu(dst, dsu_pmu);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index d4f7f1f9cc77..3cbc3baf087f 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -563,14 +563,14 @@ static int armpmu_filter_match(struct perf_event *event)
return ret;
}
-static ssize_t armpmu_cpumask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
}
-static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
+static DEVICE_ATTR_RO(cpus);
static struct attribute *armpmu_common_attrs[] = {
&dev_attr_cpus.attr,
@@ -644,11 +644,9 @@ int armpmu_request_irq(int irq, int cpu)
}
irq_flags = IRQF_PERCPU |
- IRQF_NOBALANCING |
+ IRQF_NOBALANCING | IRQF_NO_AUTOEN |
IRQF_NO_THREAD;
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
-
err = request_nmi(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
@@ -670,7 +668,7 @@ int armpmu_request_irq(int irq, int cpu)
&cpu_armpmu);
irq_ops = &percpu_pmuirq_ops;
} else {
- has_nmi= true;
+ has_nmi = true;
irq_ops = &percpu_pmunmi_ops;
}
} else {
@@ -869,10 +867,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
int cpu;
pmu = kzalloc(sizeof(*pmu), flags);
- if (!pmu) {
- pr_info("failed to allocate PMU device!\n");
+ if (!pmu)
goto out;
- }
pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
if (!pmu->hw_events) {
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index ff6fab4bae30..226348822ab3 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -277,7 +277,7 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
struct perf_event *event, int idx)
{
u32 span, sid;
- unsigned int num_ctrs = smmu_pmu->num_counters;
+ unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
bool filter_en = !!get_filter_enable(event);
span = filter_en ? get_filter_span(event) :
@@ -285,17 +285,19 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
sid = filter_en ? get_filter_stream_id(event) :
SMMU_PMCG_DEFAULT_FILTER_SID;
- /* Support individual filter settings */
- if (!smmu_pmu->global_filter) {
+ cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
+ /*
+ * Per-counter filtering, or scheduling the first globally-filtered
+ * event into an empty PMU so idx == 0 and it works out equivalent.
+ */
+ if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
smmu_pmu_set_event_filter(event, idx, span, sid);
return 0;
}
- /* Requested settings same as current global settings*/
- idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
- if (idx == num_ctrs ||
- smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
- smmu_pmu_set_event_filter(event, 0, span, sid);
+ /* Otherwise, must match whatever's currently scheduled */
+ if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
+ smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
return 0;
}
@@ -509,11 +511,8 @@ static ssize_t smmu_pmu_event_show(struct device *dev,
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
-#define SMMU_EVENT_ATTR(name, config) \
- (&((struct perf_pmu_events_attr) { \
- .attr = __ATTR(name, 0444, smmu_pmu_event_show, NULL), \
- .id = config, \
- }).attr.attr)
+#define SMMU_EVENT_ATTR(name, config) \
+ PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config)
static struct attribute *smmu_pmu_events[] = {
SMMU_EVENT_ATTR(cycles, 0),
@@ -628,7 +627,7 @@ static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
smmu_pmu->on_cpu = target;
- WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
+ WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
return 0;
}
@@ -839,15 +838,14 @@ static int smmu_pmu_probe(struct platform_device *pdev)
/* Pick one CPU to be the preferred one to use */
smmu_pmu->on_cpu = raw_smp_processor_id();
- WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
- cpumask_of(smmu_pmu->on_cpu)));
+ WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
&smmu_pmu->node);
if (err) {
dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
err, &res_0->start);
- goto out_clear_affinity;
+ return err;
}
err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
@@ -866,8 +864,6 @@ static int smmu_pmu_probe(struct platform_device *pdev)
out_unregister:
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
-out_clear_affinity:
- irq_set_affinity_hint(smmu_pmu->irq, NULL);
return err;
}
@@ -877,7 +873,6 @@ static int smmu_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&smmu_pmu->pmu);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
- irq_set_affinity_hint(smmu_pmu->irq, NULL);
return 0;
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 8a1e86ab2d8e..d44bcc29d99c 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -231,15 +231,14 @@ static const struct attribute_group arm_spe_pmu_format_group = {
.attrs = arm_spe_pmu_formats_attr,
};
-static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
}
-static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL);
+static DEVICE_ATTR_RO(cpumask);
static struct attribute *arm_spe_pmu_attrs[] = {
&dev_attr_cpumask.attr,
@@ -1044,7 +1043,6 @@ static void __arm_spe_pmu_dev_probe(void *info)
spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
- return;
}
static void __arm_spe_pmu_reset_local(void)
@@ -1190,10 +1188,8 @@ static int arm_spe_pmu_device_probe(struct platform_device *pdev)
}
spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
- if (!spe_pmu) {
- dev_err(dev, "failed to allocate spe_pmu\n");
+ if (!spe_pmu)
return -ENOMEM;
- }
spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
if (!spe_pmu->handle)
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 2bbb93188064..94ebc1ecace7 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -222,11 +222,8 @@ ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
-#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
- (&((struct perf_pmu_events_attr[]) { \
- { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
- .id = _id, } \
- })[0].attr.attr)
+#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id)
static struct attribute *ddr_perf_events_attrs[] = {
IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
@@ -674,7 +671,7 @@ static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
pmu->cpu = target;
- WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
+ WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
return 0;
}
@@ -705,8 +702,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
num);
- if (!name)
- return -ENOMEM;
+ if (!name) {
+ ret = -ENOMEM;
+ goto cpuhp_state_err;
+ }
pmu->devtype_data = of_device_get_match_data(&pdev->dev);
@@ -749,7 +748,7 @@ static int ddr_perf_probe(struct platform_device *pdev)
}
pmu->irq = irq;
- ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
+ ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
if (ret) {
dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
goto ddr_perf_err;
@@ -777,7 +776,6 @@ static int ddr_perf_remove(struct platform_device *pdev)
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
cpuhp_remove_multi_state(pmu->cpuhp_state);
- irq_set_affinity_hint(pmu->irq, NULL);
perf_pmu_unregister(&pmu->pmu);
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 7c8a4bc21db4..62299ab5a9be 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -2,7 +2,7 @@
/*
* HiSilicon SoC DDRC uncore Hardware event counters support
*
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
* Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
* Anurup M <anurup.m@huawei.com>
*
@@ -537,7 +537,6 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
- irq_set_affinity_hint(ddrc_pmu->irq, NULL);
}
return ret;
@@ -550,8 +549,6 @@ static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&ddrc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
- irq_set_affinity_hint(ddrc_pmu->irq, NULL);
-
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 0316fabe32f1..393513150106 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -2,7 +2,7 @@
/*
* HiSilicon SoC HHA uncore Hardware event counters support
*
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
* Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
* Anurup M <anurup.m@huawei.com>
*
@@ -90,7 +90,7 @@ static void hisi_hha_pmu_config_ds(struct perf_event *event)
val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
val |= HHA_DATSRC_SKT_EN;
- writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+ writel(val, hha_pmu->base + HHA_DATSRC_CTRL);
}
}
@@ -104,7 +104,7 @@ static void hisi_hha_pmu_clear_ds(struct perf_event *event)
val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
val &= ~HHA_DATSRC_SKT_EN;
- writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+ writel(val, hha_pmu->base + HHA_DATSRC_CTRL);
}
}
@@ -540,7 +540,6 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node);
- irq_set_affinity_hint(hha_pmu->irq, NULL);
}
return ret;
@@ -553,8 +552,6 @@ static int hisi_hha_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&hha_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
&hha_pmu->node);
- irq_set_affinity_hint(hha_pmu->irq, NULL);
-
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index bf9f7772cac9..560ab964c8b5 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -2,7 +2,7 @@
/*
* HiSilicon SoC L3C uncore Hardware event counters support
*
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
* Author: Anurup M <anurup.m@huawei.com>
* Shaokun Zhang <zhangshaokun@hisilicon.com>
*
@@ -578,7 +578,6 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
dev_err(l3c_pmu->dev, "L3C PMU register failed!\n");
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node);
- irq_set_affinity_hint(l3c_pmu->irq, NULL);
}
return ret;
@@ -591,8 +590,6 @@ static int hisi_l3c_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&l3c_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
&l3c_pmu->node);
- irq_set_affinity_hint(l3c_pmu->irq, NULL);
-
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 14f23eb31248..83264ec0a957 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -333,7 +333,7 @@ static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
NULL
};
-static struct attribute_group hisi_pa_pmu_identifier_group = {
+static const struct attribute_group hisi_pa_pmu_identifier_group = {
.attrs = hisi_pa_pmu_identifier_attrs,
};
@@ -436,7 +436,6 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
&pa_pmu->node);
- irq_set_affinity_hint(pa_pmu->irq, NULL);
return ret;
}
@@ -451,8 +450,6 @@ static int hisi_pa_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&pa_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
&pa_pmu->node);
- irq_set_affinity_hint(pa_pmu->irq, NULL);
-
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 13c68b5e39c4..a738aeab5c04 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -2,7 +2,7 @@
/*
* HiSilicon SoC Hardware event counters support
*
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
* Author: Anurup M <anurup.m@huawei.com>
* Shaokun Zhang <zhangshaokun@hisilicon.com>
*
@@ -488,7 +488,7 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
hisi_pmu->on_cpu = cpu;
/* Overflow interrupt also should use the same CPU */
- WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(cpu)));
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
return 0;
}
@@ -521,7 +521,7 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
/* Use this CPU for event counting */
hisi_pmu->on_cpu = target;
- WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(target)));
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index ea9d89bbc1ea..7f5841d6f592 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -2,7 +2,7 @@
/*
* HiSilicon SoC Hardware event counters support
*
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
* Author: Anurup M <anurup.m@huawei.com>
* Shaokun Zhang <zhangshaokun@hisilicon.com>
*
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index 46be312fa126..6aedc303ff56 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -2,7 +2,7 @@
/*
* HiSilicon SLLC uncore Hardware event counters support
*
- * Copyright (C) 2020 Hisilicon Limited
+ * Copyright (C) 2020 HiSilicon Limited
* Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
*
* This code is based on the uncore PMUs like arm-cci and arm-ccn.
@@ -366,7 +366,7 @@ static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
NULL
};
-static struct attribute_group hisi_sllc_pmu_identifier_group = {
+static const struct attribute_group hisi_sllc_pmu_identifier_group = {
.attrs = hisi_sllc_pmu_identifier_attrs,
};
@@ -465,7 +465,6 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
&sllc_pmu->node);
- irq_set_affinity_hint(sllc_pmu->irq, NULL);
return ret;
}
@@ -481,8 +480,6 @@ static int hisi_sllc_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&sllc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
&sllc_pmu->node);
- irq_set_affinity_hint(sllc_pmu->irq, NULL);
-
return 0;
}
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index fc54a80f9c5c..5b093badd0f6 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -679,11 +679,8 @@ static ssize_t l2cache_pmu_event_show(struct device *dev,
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
-#define L2CACHE_EVENT_ATTR(_name, _id) \
- (&((struct perf_pmu_events_attr[]) { \
- { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \
- .id = _id, } \
- })[0].attr.attr)
+#define L2CACHE_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, l2cache_pmu_event_show, _id)
static struct attribute *l2_cache_pmu_events[] = {
L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
@@ -869,14 +866,14 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
irq = platform_get_irq(sdev, 0);
if (irq < 0)
return irq;
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
cluster->irq = irq;
cluster->l2cache_pmu = l2cache_pmu;
cluster->on_cpu = -1;
err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq,
- IRQF_NOBALANCING | IRQF_NO_THREAD,
+ IRQF_NOBALANCING | IRQF_NO_THREAD |
+ IRQF_NO_AUTOEN,
"l2-cache-pmu", cluster);
if (err) {
dev_err(&pdev->dev,
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index bba078077c93..1ff2ff6582bf 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -647,10 +647,7 @@ static ssize_t l3cache_pmu_event_show(struct device *dev,
}
#define L3CACHE_EVENT_ATTR(_name, _id) \
- (&((struct perf_pmu_events_attr[]) { \
- { .attr = __ATTR(_name, 0444, l3cache_pmu_event_show, NULL), \
- .id = _id, } \
- })[0].attr.attr)
+ PMU_EVENT_ATTR_ID(_name, l3cache_pmu_event_show, _id)
static struct attribute *qcom_l3_cache_pmu_events[] = {
L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES),
@@ -670,15 +667,15 @@ static const struct attribute_group qcom_l3_cache_pmu_events_group = {
/* cpumask */
-static ssize_t qcom_l3_cache_pmu_cpumask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask);
}
-static DEVICE_ATTR(cpumask, 0444, qcom_l3_cache_pmu_cpumask_show, NULL);
+static DEVICE_ATTR_RO(cpumask);
static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
@@ -767,10 +764,8 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc);
- if (IS_ERR(l3pmu->regs)) {
- dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start);
+ if (IS_ERR(l3pmu->regs))
return PTR_ERR(l3pmu->regs);
- }
qcom_l3_cache__init(l3pmu);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 06a6d569b0b5..fc1a376ee906 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -817,10 +817,8 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
}
base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(base)) {
- dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+ if (IS_ERR(base))
return NULL;
- }
tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
if (!tx2_pmu)
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index ffe3bdeec845..2b6d476bd213 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -278,17 +278,14 @@ static const struct attribute_group mc_pmu_v3_format_attr_group = {
static ssize_t xgene_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dev_ext_attribute *eattr;
+ struct perf_pmu_events_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_attr, attr);
- eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sysfs_emit(buf, "config=0x%lx\n", (unsigned long) eattr->var);
+ return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
}
#define XGENE_PMU_EVENT_ATTR(_name, _config) \
- (&((struct dev_ext_attribute[]) { \
- { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
- .var = (void *) _config, } \
- })[0].attr.attr)
+ PMU_EVENT_ATTR_ID(_name, xgene_pmu_event_show, _config)
static struct attribute *l3c_pmu_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
@@ -604,15 +601,15 @@ static const struct attribute_group mc_pmu_v3_events_attr_group = {
/*
* sysfs cpumask attributes
*/
-static ssize_t xgene_pmu_cpumask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
}
-static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
+static DEVICE_ATTR_RO(cpumask);
static struct attribute *xgene_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index 899b9eb43fad..a39f30fa2e99 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -78,7 +78,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@@ -87,7 +87,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
static inline void brcm_usb_writel(u32 val, void __iomem *addr)
{
/* See brcmnand_readl() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 5c68e31c5939..e93818e3991f 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -940,6 +940,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->nsubnodes = node;
if (sp->num_lanes > SIERRA_MAX_LANES) {
+ ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
goto put_child2;
}
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index cdbcc49f7115..731c483a04de 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
break;
default:
dev_err(tphy->dev, "incompatible PHY type\n");
+ clk_disable_unprepare(instance->ref_clk);
+ clk_disable_unprepare(instance->da_ref_clk);
return -EINVAL;
}
diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c
index c8a7d0927ced..4076580fc2cd 100644
--- a/drivers/phy/microchip/sparx5_serdes.c
+++ b/drivers/phy/microchip/sparx5_serdes.c
@@ -2470,6 +2470,10 @@ static int sparx5_serdes_probe(struct platform_device *pdev)
priv->coreclock = clock;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores) {
+ dev_err(priv->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
if (IS_ERR(iomem)) {
dev_err(priv->dev, "Unable to get serdes registers: %s\n",
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 753cb5bab930..2a9465f4bb3a 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -341,7 +341,7 @@ static struct platform_driver mt7621_pci_phy_driver = {
.probe = mt7621_pci_phy_probe,
.driver = {
.name = "mt7621-pci-phy",
- .of_match_table = of_match_ptr(mt7621_pci_phy_ids),
+ .of_match_table = mt7621_pci_phy_ids,
},
};
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 9eb6d37c907e..126f5b8735cc 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1212,6 +1212,7 @@ static int wiz_probe(struct platform_device *pdev)
if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+ ret = -EINVAL;
dev_err(dev, "Invalid typec-dir-debounce property\n");
goto err_addr_to_resource;
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 996ebcba4d38..4c0d26606b6c 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2702,8 +2702,8 @@ static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
}
/**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g5_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
*
* @ctx: The pinmux context
* @expr: The expression associated with the function whose signal is to be
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 5c1a109842a7..eeab093a7815 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -2611,8 +2611,8 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
};
/**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g6_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
*
* @ctx: The pinmux context
* @expr: The expression associated with the function whose signal is to be
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 9c65d560d48f..9bbfe5c14b36 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -108,7 +108,8 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
}
/**
- * Disable a signal on a pin by disabling all provided signal expressions.
+ * aspeed_disable_sig() - Disable a signal on a pin by disabling all provided
+ * signal expressions.
*
* @ctx: The pinmux context
* @exprs: The list of signal expressions (from a priority level on a pin)
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c
index 57305ca838a7..894e2efd3be7 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c
@@ -21,7 +21,8 @@ static inline void aspeed_sig_desc_print_val(
}
/**
- * Query the enabled or disabled state of a signal descriptor
+ * aspeed_sig_desc_eval() - Query the enabled or disabled state of a signal
+ * descriptor.
*
* @desc: The signal descriptor of interest
* @enabled: True to query the enabled state, false to query disabled state
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index c12fa57ebd12..165cb7a59715 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -845,8 +845,10 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
i = 0;
device_for_each_child_node(dev, fwnode) {
ret = microchip_sgpio_register_bank(dev, priv, fwnode, i++);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
return ret;
+ }
}
if (priv->in.gpio.ngpio != priv->out.gpio.ngpio) {
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 25d2f7f7f3b6..11e967dbb44b 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -223,7 +223,7 @@ config PINCTRL_SC7280
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
depends on GPIOLIB && (OF || ACPI)
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 5aaf57b40407..0bb4931cec59 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -410,15 +410,15 @@ static const char * const gpio_groups[] = {
"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
- "gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
- "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
- "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
- "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
- "gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
- "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
- "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
- "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
- "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107",
};
static const char * const qdss_stm_groups[] = {
diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
index 1f4bca854add..a9b511c7e850 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
if (p->groups[group].enabled) {
dev_err(p->dev, "%s is already enabled\n",
p->groups[group].name);
- return -EBUSY;
+ return 0;
}
p->groups[group].enabled = 1;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index ad9eb5ed8e81..c14d12d54cc5 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1224,7 +1224,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
struct device *dev = pctl->dev;
struct resource res;
int npins = STM32_GPIO_PINS_PER_BANK;
- int bank_nr, err;
+ int bank_nr, err, i = 0;
if (!IS_ERR(bank->rstc))
reset_control_deassert(bank->rstc);
@@ -1246,9 +1246,14 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
of_property_read_string(np, "st,bank-name", &bank->gpio_chip.label);
- if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args)) {
+ if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, i, &args)) {
bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
bank->gpio_chip.base = args.args[1];
+
+ npins = args.args[2];
+ while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ ++i, &args))
+ npins += args.args[2];
} else {
bank_nr = pctl->nbanks;
bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index f00107017318..9d1e7e03628e 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -703,7 +703,7 @@ end_ishtp_cl_alloc_error:
*
* Return: 0
*/
-static int cros_ec_ishtp_remove(struct ishtp_cl_device *cl_device)
+static void cros_ec_ishtp_remove(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
@@ -712,8 +712,6 @@ static int cros_ec_ishtp_remove(struct ishtp_cl_device *cl_device)
cancel_work_sync(&client_data->work_ec_evt);
cros_ish_deinit(cros_ish_cl);
ishtp_put_device(cl_device);
-
- return 0;
}
/**
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index a9db2f32658f..b013445147dd 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, priv->irq,
mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
- | IRQF_SHARED | IRQF_NO_AUTOEN,
- "mlxreg-hotplug", priv);
+ | IRQF_SHARED, "mlxreg-hotplug", priv);
if (err) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
return err;
}
+ disable_irq(priv->irq);
spin_lock_init(&priv->lock);
INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
dev_set_drvdata(&pdev->dev, priv);
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
index 3aaeea9f0433..fd6dc452f3e8 100644
--- a/drivers/platform/surface/aggregator/Kconfig
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
-# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
menuconfig SURFACE_AGGREGATOR
tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
index c112e2c7112b..c8498c41e758 100644
--- a/drivers/platform/surface/aggregator/Makefile
+++ b/drivers/platform/surface/aggregator/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
-# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
# For include/trace/define_trace.h to include trace.h
CFLAGS_core.o = -I$(src)
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index a9b660af0917..0169677c243e 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -2,7 +2,7 @@
/*
* Surface System Aggregator Module bus and device integration.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/device.h>
diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h
index 7712baaed6a5..ed032c2cbdb2 100644
--- a/drivers/platform/surface/aggregator/bus.h
+++ b/drivers/platform/surface/aggregator/bus.h
@@ -2,7 +2,7 @@
/*
* Surface System Aggregator Module bus and device integration.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_BUS_H
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 8a70df60142c..b8c377b3f932 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -2,7 +2,7 @@
/*
* Main SSAM/SSH controller structure and functionality.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
@@ -408,6 +408,31 @@ ssam_nf_refcount_dec(struct ssam_nf *nf, struct ssam_event_registry reg,
}
/**
+ * ssam_nf_refcount_dec_free() - Decrement reference-/activation-count of the
+ * given event and free its entry if the reference count reaches zero.
+ * @nf: The notifier system reference.
+ * @reg: The registry used to enable/disable the event.
+ * @id: The event ID.
+ *
+ * Decrements the reference-/activation-count of the specified event, freeing
+ * its entry if it reaches zero.
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ */
+static void ssam_nf_refcount_dec_free(struct ssam_nf *nf,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id)
+{
+ struct ssam_nf_refcount_entry *entry;
+
+ lockdep_assert_held(&nf->lock);
+
+ entry = ssam_nf_refcount_dec(nf, reg, id);
+ if (entry && entry->refcount == 0)
+ kfree(entry);
+}
+
+/**
* ssam_nf_refcount_empty() - Test if the notification system has any
* enabled/active events.
* @nf: The notification system.
@@ -1907,7 +1932,7 @@ static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
{
int status;
- status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
if (status < 0 && status != -EINVAL) {
ssam_err(ctrl,
@@ -2123,13 +2148,122 @@ int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
/* -- Top-level event registry interface. ----------------------------------- */
/**
+ * ssam_nf_refcount_enable() - Enable event for reference count entry if it has
+ * not already been enabled.
+ * @ctrl: The controller to enable the event on.
+ * @entry: The reference count entry for the event to be enabled.
+ * @flags: The flags used for enabling the event on the EC.
+ *
+ * Enable the event associated with the given reference count entry if the
+ * reference count equals one, i.e. the event has not previously been enabled.
+ * If the event has already been enabled (i.e. reference count not equal to
+ * one), check that the flags used for enabling match and warn about this if
+ * they do not.
+ *
+ * This does not modify the reference count itself, which is done with
+ * ssam_nf_refcount_inc() / ssam_nf_refcount_dec().
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ *
+ * Return: Returns zero on success. If the event is enabled by this call,
+ * returns the status of the event-enable EC command.
+ */
+static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
+ struct ssam_nf_refcount_entry *entry, u8 flags)
+{
+ const struct ssam_event_registry reg = entry->key.reg;
+ const struct ssam_event_id id = entry->key.id;
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ int status;
+
+ lockdep_assert_held(&nf->lock);
+
+ ssam_dbg(ctrl, "enabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ reg.target_category, id.target_category, id.instance, entry->refcount);
+
+ if (entry->refcount == 1) {
+ status = ssam_ssh_event_enable(ctrl, reg, id, flags);
+ if (status)
+ return status;
+
+ entry->flags = flags;
+
+ } else if (entry->flags != flags) {
+ ssam_warn(ctrl,
+ "inconsistent flags when enabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
+ flags, entry->flags, reg.target_category, id.target_category,
+ id.instance);
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_nf_refcount_disable_free() - Disable event for reference count entry if it is
+ * no longer in use and free the corresponding entry.
+ * @ctrl: The controller to disable the event on.
+ * @entry: The reference count entry for the event to be disabled.
+ * @flags: The flags used for enabling the event on the EC.
+ *
+ * If the reference count equals zero, i.e. the event is no longer requested by
+ * any client, the event will be disabled and the corresponding reference count
+ * entry freed. The reference count entry must not be used any more after a
+ * call to this function.
+ *
+ * Also checks if the flags used for disabling the event match the flags used
+ * for enabling the event and warns if they do not (regardless of reference
+ * count).
+ *
+ * This does not modify the reference count itself, which is done with
+ * ssam_nf_refcount_inc() / ssam_nf_refcount_dec().
+ *
+ * Note: ``nf->lock`` must be held when calling this function.
+ *
+ * Return: Returns zero on success. If the event is disabled by this call,
+ * returns the status of the event-enable EC command.
+ */
+static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
+ struct ssam_nf_refcount_entry *entry, u8 flags)
+{
+ const struct ssam_event_registry reg = entry->key.reg;
+ const struct ssam_event_id id = entry->key.id;
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ int status = 0;
+
+ lockdep_assert_held(&nf->lock);
+
+ ssam_dbg(ctrl, "disabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ reg.target_category, id.target_category, id.instance, entry->refcount);
+
+ if (entry->flags != flags) {
+ ssam_warn(ctrl,
+ "inconsistent flags when disabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
+ flags, entry->flags, reg.target_category, id.target_category,
+ id.instance);
+ }
+
+ if (entry->refcount == 0) {
+ status = ssam_ssh_event_disable(ctrl, reg, id, flags);
+ kfree(entry);
+ }
+
+ return status;
+}
+
+/**
* ssam_notifier_register() - Register an event notifier.
* @ctrl: The controller to register the notifier on.
* @n: The event notifier to register.
*
- * Register an event notifier and increment the usage counter of the
- * associated SAM event. If the event was previously not enabled, it will be
- * enabled during this call.
+ * Register an event notifier. Increment the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the event is not
+ * marked as an observer and is currently not enabled, it will be enabled
+ * during this call. If the notifier is marked as an observer, no attempt will
+ * be made at enabling any event and no reference count will be modified.
+ *
+ * Notifiers marked as observers do not need to be associated with one specific
+ * event, i.e. as long as no event matching is performed, only the event target
+ * category needs to be set.
*
* Return: Returns zero on success, %-ENOSPC if there have already been
* %INT_MAX notifiers for the event ID/type associated with the notifier block
@@ -2138,11 +2272,10 @@ int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
* for the specific associated event, returns the status of the event-enable
* EC-command.
*/
-int ssam_notifier_register(struct ssam_controller *ctrl,
- struct ssam_event_notifier *n)
+int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
{
u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
- struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_refcount_entry *entry = NULL;
struct ssam_nf_head *nf_head;
struct ssam_nf *nf;
int status;
@@ -2155,44 +2288,32 @@ int ssam_notifier_register(struct ssam_controller *ctrl,
mutex_lock(&nf->lock);
- entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
- if (IS_ERR(entry)) {
- mutex_unlock(&nf->lock);
- return PTR_ERR(entry);
+ if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) {
+ entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
+ if (IS_ERR(entry)) {
+ mutex_unlock(&nf->lock);
+ return PTR_ERR(entry);
+ }
}
- ssam_dbg(ctrl, "enabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
- n->event.reg.target_category, n->event.id.target_category,
- n->event.id.instance, entry->refcount);
-
status = ssam_nfblk_insert(nf_head, &n->base);
if (status) {
- entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
- if (entry->refcount == 0)
- kfree(entry);
+ if (entry)
+ ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id);
mutex_unlock(&nf->lock);
return status;
}
- if (entry->refcount == 1) {
- status = ssam_ssh_event_enable(ctrl, n->event.reg, n->event.id,
- n->event.flags);
+ if (entry) {
+ status = ssam_nf_refcount_enable(ctrl, entry, n->event.flags);
if (status) {
ssam_nfblk_remove(&n->base);
- kfree(ssam_nf_refcount_dec(nf, n->event.reg, n->event.id));
+ ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id);
mutex_unlock(&nf->lock);
synchronize_srcu(&nf_head->srcu);
return status;
}
-
- entry->flags = n->event.flags;
-
- } else if (entry->flags != n->event.flags) {
- ssam_warn(ctrl,
- "inconsistent flags when enabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
- n->event.flags, entry->flags, n->event.reg.target_category,
- n->event.id.target_category, n->event.id.instance);
}
mutex_unlock(&nf->lock);
@@ -2205,17 +2326,16 @@ EXPORT_SYMBOL_GPL(ssam_notifier_register);
* @ctrl: The controller the notifier has been registered on.
* @n: The event notifier to unregister.
*
- * Unregister an event notifier and decrement the usage counter of the
- * associated SAM event. If the usage counter reaches zero, the event will be
- * disabled.
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
*
* Return: Returns zero on success, %-ENOENT if the given notifier block has
* not been registered on the controller. If the given notifier block was the
* last one associated with its specific event, returns the status of the
* event-disable EC-command.
*/
-int ssam_notifier_unregister(struct ssam_controller *ctrl,
- struct ssam_event_notifier *n)
+int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
{
u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
struct ssam_nf_refcount_entry *entry;
@@ -2236,33 +2356,24 @@ int ssam_notifier_unregister(struct ssam_controller *ctrl,
return -ENOENT;
}
- entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
- if (WARN_ON(!entry)) {
- /*
- * If this does not return an entry, there's a logic error
- * somewhere: The notifier block is registered, but the event
- * refcount entry is not there. Remove the notifier block
- * anyways.
- */
- status = -ENOENT;
- goto remove;
- }
-
- ssam_dbg(ctrl, "disabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
- n->event.reg.target_category, n->event.id.target_category,
- n->event.id.instance, entry->refcount);
-
- if (entry->flags != n->event.flags) {
- ssam_warn(ctrl,
- "inconsistent flags when disabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
- n->event.flags, entry->flags, n->event.reg.target_category,
- n->event.id.target_category, n->event.id.instance);
- }
+ /*
+ * If this is an observer notifier, do not attempt to disable the
+ * event, just remove it.
+ */
+ if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) {
+ entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
+ if (WARN_ON(!entry)) {
+ /*
+ * If this does not return an entry, there's a logic
+ * error somewhere: The notifier block is registered,
+ * but the event refcount entry is not there. Remove
+ * the notifier block anyways.
+ */
+ status = -ENOENT;
+ goto remove;
+ }
- if (entry->refcount == 0) {
- status = ssam_ssh_event_disable(ctrl, n->event.reg, n->event.id,
- n->event.flags);
- kfree(entry);
+ status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags);
}
remove:
@@ -2275,6 +2386,105 @@ remove:
EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
/**
+ * ssam_controller_event_enable() - Enable the specified event.
+ * @ctrl: The controller to enable the event for.
+ * @reg: The event registry to use for enabling the event.
+ * @id: The event ID specifying the event to be enabled.
+ * @flags: The SAM event flags used for enabling the event.
+ *
+ * Increment the event reference count of the specified event. If the event has
+ * not been enabled previously, it will be enabled by this call.
+ *
+ * Note: In general, ssam_notifier_register() with a non-observer notifier
+ * should be preferred for enabling/disabling events, as this will guarantee
+ * proper ordering and event forwarding in case of errors during event
+ * enabling/disabling.
+ *
+ * Return: Returns zero on success, %-ENOSPC if the reference count for the
+ * specified event has reached its maximum, %-ENOMEM if the corresponding event
+ * entry could not be allocated. If this is the first time that this event has
+ * been enabled (i.e. the reference count was incremented from zero to one by
+ * this call), returns the status of the event-enable EC-command.
+ */
+int ssam_controller_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct ssam_nf_refcount_entry *entry;
+ int status;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ mutex_lock(&nf->lock);
+
+ entry = ssam_nf_refcount_inc(nf, reg, id);
+ if (IS_ERR(entry)) {
+ mutex_unlock(&nf->lock);
+ return PTR_ERR(entry);
+ }
+
+ status = ssam_nf_refcount_enable(ctrl, entry, flags);
+ if (status) {
+ ssam_nf_refcount_dec_free(nf, reg, id);
+ mutex_unlock(&nf->lock);
+ return status;
+ }
+
+ mutex_unlock(&nf->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_controller_event_enable);
+
+/**
+ * ssam_controller_event_disable() - Disable the specified event.
+ * @ctrl: The controller to disable the event for.
+ * @reg: The event registry to use for disabling the event.
+ * @id: The event ID specifying the event to be disabled.
+ * @flags: The flags used when enabling the event.
+ *
+ * Decrement the reference count of the specified event. If the reference count
+ * reaches zero, the event will be disabled.
+ *
+ * Note: In general, ssam_notifier_register()/ssam_notifier_unregister() with a
+ * non-observer notifier should be preferred for enabling/disabling events, as
+ * this will guarantee proper ordering and event forwarding in case of errors
+ * during event enabling/disabling.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given event has not been
+ * enabled on the controller. If the reference count of the event reaches zero
+ * during this call, returns the status of the event-disable EC-command.
+ */
+int ssam_controller_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct ssam_nf_refcount_entry *entry;
+ int status;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ mutex_lock(&nf->lock);
+
+ entry = ssam_nf_refcount_dec(nf, reg, id);
+ if (!entry) {
+ mutex_unlock(&nf->lock);
+ return -ENOENT;
+ }
+
+ status = ssam_nf_refcount_disable_free(ctrl, entry, flags);
+
+ mutex_unlock(&nf->lock);
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_controller_event_disable);
+
+/**
* ssam_notifier_disable_registered() - Disable events for all registered
* notifiers.
* @ctrl: The controller for which to disable the notifiers/events.
diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
index 8297d34e7489..a0963c3562ff 100644
--- a/drivers/platform/surface/aggregator/controller.h
+++ b/drivers/platform/surface/aggregator/controller.h
@@ -2,7 +2,7 @@
/*
* Main SSAM/SSH controller structure and functionality.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index 8dc2c267bcd6..279d9df19c01 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -7,7 +7,7 @@
* Handles communication via requests as well as enabling, disabling, and
* relaying of events.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
@@ -621,8 +621,8 @@ static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
static int ssam_serial_hub_probe(struct serdev_device *serdev)
{
+ struct acpi_device *ssh = ACPI_COMPANION(&serdev->dev);
struct ssam_controller *ctrl;
- acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
acpi_status astatus;
int status;
@@ -652,7 +652,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
if (status)
goto err_devopen;
- astatus = ssam_serdev_setup_via_acpi(ssh, serdev);
+ astatus = ssam_serdev_setup_via_acpi(ssh->handle, serdev);
if (ACPI_FAILURE(astatus)) {
status = -ENXIO;
goto err_devinit;
@@ -706,7 +706,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
* For now let's thus default power/wakeup to false.
*/
device_set_wakeup_capable(&serdev->dev, true);
- acpi_walk_dep_device_list(ssh);
+ acpi_dev_clear_dependencies(ssh);
return 0;
diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h
index 1221f642dda1..e562958ffdf0 100644
--- a/drivers/platform/surface/aggregator/ssh_msgb.h
+++ b/drivers/platform/surface/aggregator/ssh_msgb.h
@@ -2,7 +2,7 @@
/*
* SSH message builder functions.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
index 15d96eac6811..8a4451c1ffe5 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -2,7 +2,7 @@
/*
* SSH packet transport layer.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
@@ -1567,9 +1567,7 @@ static void ssh_ptl_timeout_reap(struct work_struct *work)
clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
atomic_dec(&ptl->pending.count);
- list_del(&p->pending_node);
-
- list_add_tail(&p->pending_node, &claimed);
+ list_move_tail(&p->pending_node, &claimed);
}
spin_unlock(&ptl->pending.lock);
@@ -1957,8 +1955,7 @@ void ssh_ptl_shutdown(struct ssh_ptl *ptl)
smp_mb__before_atomic();
clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
- list_del(&p->queue_node);
- list_add_tail(&p->queue_node, &complete_q);
+ list_move_tail(&p->queue_node, &complete_q);
}
spin_unlock(&ptl->queue.lock);
@@ -1970,8 +1967,7 @@ void ssh_ptl_shutdown(struct ssh_ptl *ptl)
smp_mb__before_atomic();
clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
- list_del(&p->pending_node);
- list_add_tail(&p->pending_node, &complete_q);
+ list_move_tail(&p->pending_node, &complete_q);
}
atomic_set(&ptl->pending.count, 0);
spin_unlock(&ptl->pending.lock);
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
index e8757d03f279..2eb329f0b91a 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
@@ -2,7 +2,7 @@
/*
* SSH packet transport layer.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c
index e2dead8de94a..b77912f8f13b 100644
--- a/drivers/platform/surface/aggregator/ssh_parser.c
+++ b/drivers/platform/surface/aggregator/ssh_parser.c
@@ -2,7 +2,7 @@
/*
* SSH message parser.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_parser.h b/drivers/platform/surface/aggregator/ssh_parser.h
index 63c38d350988..3bd6e180fd16 100644
--- a/drivers/platform/surface/aggregator/ssh_parser.h
+++ b/drivers/platform/surface/aggregator/ssh_parser.h
@@ -2,7 +2,7 @@
/*
* SSH message parser.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
index 52a83a8fcf82..790f7f0eee98 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -2,7 +2,7 @@
/*
* SSH request transport layer.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
@@ -863,9 +863,7 @@ static void ssh_rtl_timeout_reap(struct work_struct *work)
clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
atomic_dec(&rtl->pending.count);
- list_del(&r->node);
-
- list_add_tail(&r->node, &claimed);
+ list_move_tail(&r->node, &claimed);
}
spin_unlock(&rtl->pending.lock);
@@ -1204,8 +1202,7 @@ void ssh_rtl_shutdown(struct ssh_rtl *rtl)
smp_mb__before_atomic();
clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
- list_del(&r->node);
- list_add_tail(&r->node, &claimed);
+ list_move_tail(&r->node, &claimed);
}
spin_unlock(&rtl->queue.lock);
@@ -1238,8 +1235,7 @@ void ssh_rtl_shutdown(struct ssh_rtl *rtl)
smp_mb__before_atomic();
clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
- list_del(&r->node);
- list_add_tail(&r->node, &claimed);
+ list_move_tail(&r->node, &claimed);
}
spin_unlock(&rtl->pending.lock);
}
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.h b/drivers/platform/surface/aggregator/ssh_request_layer.h
index cb35815858d1..9c3cbae2d4bd 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.h
@@ -2,7 +2,7 @@
/*
* SSH request transport layer.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
index eb332bb53ae4..de64cf169060 100644
--- a/drivers/platform/surface/aggregator/trace.h
+++ b/drivers/platform/surface/aggregator/trace.h
@@ -2,7 +2,7 @@
/*
* Trace points for SSAM/SSH.
*
- * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#undef TRACE_SYSTEM
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
index cc4f9cba6856..dea82aa1abd4 100644
--- a/drivers/platform/surface/surface3_power.c
+++ b/drivers/platform/surface/surface3_power.c
@@ -446,12 +446,12 @@ mshw0011_space_handler(u32 function, acpi_physical_address command,
static int mshw0011_install_space_handler(struct i2c_client *client)
{
- acpi_handle handle;
+ struct acpi_device *adev;
struct mshw0011_handler_data *data;
acpi_status status;
- handle = ACPI_HANDLE(&client->dev);
- if (!handle)
+ adev = ACPI_COMPANION(&client->dev);
+ if (!adev)
return -ENODEV;
data = kzalloc(sizeof(struct mshw0011_handler_data),
@@ -460,25 +460,25 @@ static int mshw0011_install_space_handler(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- status = acpi_bus_attach_private_data(handle, (void *)data);
+ status = acpi_bus_attach_private_data(adev->handle, (void *)data);
if (ACPI_FAILURE(status)) {
kfree(data);
return -ENOMEM;
}
- status = acpi_install_address_space_handler(handle,
- ACPI_ADR_SPACE_GSBUS,
- &mshw0011_space_handler,
- NULL,
- data);
+ status = acpi_install_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &mshw0011_space_handler,
+ NULL,
+ data);
if (ACPI_FAILURE(status)) {
dev_err(&client->dev, "Error installing i2c space handler\n");
- acpi_bus_detach_private_data(handle);
+ acpi_bus_detach_private_data(adev->handle);
kfree(data);
return -ENOMEM;
}
- acpi_walk_dep_device_list(handle);
+ acpi_dev_clear_dependencies(adev);
return 0;
}
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index ef9c1f8e8336..8339988d95c1 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -798,7 +798,7 @@ static int san_consumer_links_setup(struct platform_device *pdev)
static int san_probe(struct platform_device *pdev)
{
- acpi_handle san = ACPI_HANDLE(&pdev->dev);
+ struct acpi_device *san = ACPI_COMPANION(&pdev->dev);
struct ssam_controller *ctrl;
struct san_data *data;
acpi_status astatus;
@@ -821,7 +821,8 @@ static int san_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- astatus = acpi_install_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
+ astatus = acpi_install_address_space_handler(san->handle,
+ ACPI_ADR_SPACE_GSBUS,
&san_opreg_handler, NULL,
&data->info);
if (ACPI_FAILURE(astatus))
@@ -835,7 +836,7 @@ static int san_probe(struct platform_device *pdev)
if (status)
goto err_install_dev;
- acpi_walk_dep_device_list(san);
+ acpi_dev_clear_dependencies(san);
return 0;
err_install_dev:
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
index 79e28fab7e40..30fb50fde450 100644
--- a/drivers/platform/surface/surface_aggregator_cdev.c
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -3,29 +3,69 @@
* Provides user-space access to the SSAM EC via the /dev/surface/aggregator
* misc device. Intended for debugging and development.
*
- * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/fs.h>
+#include <linux/ioctl.h>
#include <linux/kernel.h>
+#include <linux/kfifo.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/poll.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include <linux/surface_aggregator/cdev.h>
#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/serial_hub.h>
#define SSAM_CDEV_DEVICE_NAME "surface_aggregator_cdev"
+
+/* -- Main structures. ------------------------------------------------------ */
+
+enum ssam_cdev_device_state {
+ SSAM_CDEV_DEVICE_SHUTDOWN_BIT = BIT(0),
+};
+
struct ssam_cdev {
struct kref kref;
struct rw_semaphore lock;
+
+ struct device *dev;
struct ssam_controller *ctrl;
struct miscdevice mdev;
+ unsigned long flags;
+
+ struct rw_semaphore client_lock; /* Guards client list. */
+ struct list_head client_list;
+};
+
+struct ssam_cdev_client;
+
+struct ssam_cdev_notifier {
+ struct ssam_cdev_client *client;
+ struct ssam_event_notifier nf;
+};
+
+struct ssam_cdev_client {
+ struct ssam_cdev *cdev;
+ struct list_head node;
+
+ struct mutex notifier_lock; /* Guards notifier access for registration */
+ struct ssam_cdev_notifier *notifier[SSH_NUM_EVENTS];
+
+ struct mutex read_lock; /* Guards FIFO buffer read access */
+ struct mutex write_lock; /* Guards FIFO buffer write access */
+ DECLARE_KFIFO(buffer, u8, 4096);
+
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
};
static void __ssam_cdev_release(struct kref *kref)
@@ -47,24 +87,173 @@ static void ssam_cdev_put(struct ssam_cdev *cdev)
kref_put(&cdev->kref, __ssam_cdev_release);
}
-static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
+
+/* -- Notifier handling. ---------------------------------------------------- */
+
+static u32 ssam_cdev_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
{
- struct miscdevice *mdev = filp->private_data;
- struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
+ struct ssam_cdev_notifier *cdev_nf = container_of(nf, struct ssam_cdev_notifier, nf);
+ struct ssam_cdev_client *client = cdev_nf->client;
+ struct ssam_cdev_event event;
+ size_t n = struct_size(&event, data, in->length);
+
+ /* Translate event. */
+ event.target_category = in->target_category;
+ event.target_id = in->target_id;
+ event.command_id = in->command_id;
+ event.instance_id = in->instance_id;
+ event.length = in->length;
+
+ mutex_lock(&client->write_lock);
+
+ /* Make sure we have enough space. */
+ if (kfifo_avail(&client->buffer) < n) {
+ dev_warn(client->cdev->dev,
+ "buffer full, dropping event (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
+ in->target_category, in->target_id, in->command_id, in->instance_id);
+ mutex_unlock(&client->write_lock);
+ return 0;
+ }
+
+ /* Copy event header and payload. */
+ kfifo_in(&client->buffer, (const u8 *)&event, struct_size(&event, data, 0));
+ kfifo_in(&client->buffer, &in->data[0], in->length);
+
+ mutex_unlock(&client->write_lock);
+
+ /* Notify waiting readers. */
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ wake_up_interruptible(&client->waitq);
- filp->private_data = ssam_cdev_get(cdev);
- return stream_open(inode, filp);
+ /*
+ * Don't mark events as handled, this is the job of a proper driver and
+ * not the debugging interface.
+ */
+ return 0;
}
-static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
+static int ssam_cdev_notifier_register(struct ssam_cdev_client *client, u8 tc, int priority)
{
- ssam_cdev_put(filp->private_data);
- return 0;
+ const u16 rqid = ssh_tc_to_rqid(tc);
+ const u16 event = ssh_rqid_to_event(rqid);
+ struct ssam_cdev_notifier *nf;
+ int status;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ /* Validate notifier target category. */
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ mutex_lock(&client->notifier_lock);
+
+ /* Check if the notifier has already been registered. */
+ if (client->notifier[event]) {
+ mutex_unlock(&client->notifier_lock);
+ return -EEXIST;
+ }
+
+ /* Allocate new notifier. */
+ nf = kzalloc(sizeof(*nf), GFP_KERNEL);
+ if (!nf) {
+ mutex_unlock(&client->notifier_lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * Create a dummy notifier with the minimal required fields for
+ * observer registration. Note that we can skip fully specifying event
+ * and registry here as we do not need any matching and use silent
+ * registration, which does not enable the corresponding event.
+ */
+ nf->client = client;
+ nf->nf.base.fn = ssam_cdev_notifier;
+ nf->nf.base.priority = priority;
+ nf->nf.event.id.target_category = tc;
+ nf->nf.event.mask = 0; /* Do not do any matching. */
+ nf->nf.flags = SSAM_EVENT_NOTIFIER_OBSERVER;
+
+ /* Register notifier. */
+ status = ssam_notifier_register(client->cdev->ctrl, &nf->nf);
+ if (status)
+ kfree(nf);
+ else
+ client->notifier[event] = nf;
+
+ mutex_unlock(&client->notifier_lock);
+ return status;
+}
+
+static int ssam_cdev_notifier_unregister(struct ssam_cdev_client *client, u8 tc)
+{
+ const u16 rqid = ssh_tc_to_rqid(tc);
+ const u16 event = ssh_rqid_to_event(rqid);
+ int status;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ /* Validate notifier target category. */
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ mutex_lock(&client->notifier_lock);
+
+ /* Check if the notifier is currently registered. */
+ if (!client->notifier[event]) {
+ mutex_unlock(&client->notifier_lock);
+ return -ENOENT;
+ }
+
+ /* Unregister and free notifier. */
+ status = ssam_notifier_unregister(client->cdev->ctrl, &client->notifier[event]->nf);
+ kfree(client->notifier[event]);
+ client->notifier[event] = NULL;
+
+ mutex_unlock(&client->notifier_lock);
+ return status;
+}
+
+static void ssam_cdev_notifier_unregister_all(struct ssam_cdev_client *client)
+{
+ int i;
+
+ down_read(&client->cdev->lock);
+
+ /*
+ * This function may be used during shutdown, thus we need to test for
+ * cdev->ctrl instead of the SSAM_CDEV_DEVICE_SHUTDOWN_BIT bit.
+ */
+ if (client->cdev->ctrl) {
+ for (i = 0; i < SSH_NUM_EVENTS; i++)
+ ssam_cdev_notifier_unregister(client, i + 1);
+
+ } else {
+ int count = 0;
+
+ /*
+ * Device has been shut down. Any notifier remaining is a bug,
+ * so warn about that as this would otherwise hardly be
+ * noticeable. Nevertheless, free them as well.
+ */
+ mutex_lock(&client->notifier_lock);
+ for (i = 0; i < SSH_NUM_EVENTS; i++) {
+ count += !!(client->notifier[i]);
+ kfree(client->notifier[i]);
+ client->notifier[i] = NULL;
+ }
+ mutex_unlock(&client->notifier_lock);
+
+ WARN_ON(count > 0);
+ }
+
+ up_read(&client->cdev->lock);
}
-static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
+
+/* -- IOCTL functions. ------------------------------------------------------ */
+
+static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_request __user *r)
{
- struct ssam_cdev_request __user *r;
struct ssam_cdev_request rqst;
struct ssam_request spec = {};
struct ssam_response rsp = {};
@@ -72,7 +261,8 @@ static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
void __user *rspdata;
int status = 0, ret = 0, tmp;
- r = (struct ssam_cdev_request __user *)arg;
+ lockdep_assert_held_read(&client->cdev->lock);
+
ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
if (ret)
goto out;
@@ -152,7 +342,7 @@ static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
}
/* Perform request. */
- status = ssam_request_sync(cdev->ctrl, &spec, &rsp);
+ status = ssam_request_sync(client->cdev->ctrl, &spec, &rsp);
if (status)
goto out;
@@ -177,48 +367,315 @@ out:
return ret;
}
-static long __ssam_cdev_device_ioctl(struct ssam_cdev *cdev, unsigned int cmd,
+static long ssam_cdev_notif_register(struct ssam_cdev_client *client,
+ const struct ssam_cdev_notifier_desc __user *d)
+{
+ struct ssam_cdev_notifier_desc desc;
+ long ret;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
+ if (ret)
+ return ret;
+
+ return ssam_cdev_notifier_register(client, desc.target_category, desc.priority);
+}
+
+static long ssam_cdev_notif_unregister(struct ssam_cdev_client *client,
+ const struct ssam_cdev_notifier_desc __user *d)
+{
+ struct ssam_cdev_notifier_desc desc;
+ long ret;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
+ if (ret)
+ return ret;
+
+ return ssam_cdev_notifier_unregister(client, desc.target_category);
+}
+
+static long ssam_cdev_event_enable(struct ssam_cdev_client *client,
+ const struct ssam_cdev_event_desc __user *d)
+{
+ struct ssam_cdev_event_desc desc;
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ long ret;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ /* Read descriptor from user-space. */
+ ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
+ if (ret)
+ return ret;
+
+ /* Translate descriptor. */
+ reg.target_category = desc.reg.target_category;
+ reg.target_id = desc.reg.target_id;
+ reg.cid_enable = desc.reg.cid_enable;
+ reg.cid_disable = desc.reg.cid_disable;
+
+ id.target_category = desc.id.target_category;
+ id.instance = desc.id.instance;
+
+ /* Disable event. */
+ return ssam_controller_event_enable(client->cdev->ctrl, reg, id, desc.flags);
+}
+
+static long ssam_cdev_event_disable(struct ssam_cdev_client *client,
+ const struct ssam_cdev_event_desc __user *d)
+{
+ struct ssam_cdev_event_desc desc;
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ long ret;
+
+ lockdep_assert_held_read(&client->cdev->lock);
+
+ /* Read descriptor from user-space. */
+ ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
+ if (ret)
+ return ret;
+
+ /* Translate descriptor. */
+ reg.target_category = desc.reg.target_category;
+ reg.target_id = desc.reg.target_id;
+ reg.cid_enable = desc.reg.cid_enable;
+ reg.cid_disable = desc.reg.cid_disable;
+
+ id.target_category = desc.id.target_category;
+ id.instance = desc.id.instance;
+
+ /* Disable event. */
+ return ssam_controller_event_disable(client->cdev->ctrl, reg, id, desc.flags);
+}
+
+
+/* -- File operations. ------------------------------------------------------ */
+
+static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *mdev = filp->private_data;
+ struct ssam_cdev_client *client;
+ struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
+
+ /* Initialize client */
+ client = vzalloc(sizeof(*client));
+ if (!client)
+ return -ENOMEM;
+
+ client->cdev = ssam_cdev_get(cdev);
+
+ INIT_LIST_HEAD(&client->node);
+
+ mutex_init(&client->notifier_lock);
+
+ mutex_init(&client->read_lock);
+ mutex_init(&client->write_lock);
+ INIT_KFIFO(client->buffer);
+ init_waitqueue_head(&client->waitq);
+
+ filp->private_data = client;
+
+ /* Attach client. */
+ down_write(&cdev->client_lock);
+
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
+ up_write(&cdev->client_lock);
+ mutex_destroy(&client->write_lock);
+ mutex_destroy(&client->read_lock);
+ mutex_destroy(&client->notifier_lock);
+ ssam_cdev_put(client->cdev);
+ vfree(client);
+ return -ENODEV;
+ }
+ list_add_tail(&client->node, &cdev->client_list);
+
+ up_write(&cdev->client_lock);
+
+ stream_open(inode, filp);
+ return 0;
+}
+
+static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
+{
+ struct ssam_cdev_client *client = filp->private_data;
+
+ /* Force-unregister all remaining notifiers of this client. */
+ ssam_cdev_notifier_unregister_all(client);
+
+ /* Detach client. */
+ down_write(&client->cdev->client_lock);
+ list_del(&client->node);
+ up_write(&client->cdev->client_lock);
+
+ /* Free client. */
+ mutex_destroy(&client->write_lock);
+ mutex_destroy(&client->read_lock);
+
+ mutex_destroy(&client->notifier_lock);
+
+ ssam_cdev_put(client->cdev);
+ vfree(client);
+
+ return 0;
+}
+
+static long __ssam_cdev_device_ioctl(struct ssam_cdev_client *client, unsigned int cmd,
unsigned long arg)
{
+ lockdep_assert_held_read(&client->cdev->lock);
+
switch (cmd) {
case SSAM_CDEV_REQUEST:
- return ssam_cdev_request(cdev, arg);
+ return ssam_cdev_request(client, (struct ssam_cdev_request __user *)arg);
+
+ case SSAM_CDEV_NOTIF_REGISTER:
+ return ssam_cdev_notif_register(client,
+ (struct ssam_cdev_notifier_desc __user *)arg);
+
+ case SSAM_CDEV_NOTIF_UNREGISTER:
+ return ssam_cdev_notif_unregister(client,
+ (struct ssam_cdev_notifier_desc __user *)arg);
+
+ case SSAM_CDEV_EVENT_ENABLE:
+ return ssam_cdev_event_enable(client, (struct ssam_cdev_event_desc __user *)arg);
+
+ case SSAM_CDEV_EVENT_DISABLE:
+ return ssam_cdev_event_disable(client, (struct ssam_cdev_event_desc __user *)arg);
default:
return -ENOTTY;
}
}
-static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct ssam_cdev *cdev = file->private_data;
+ struct ssam_cdev_client *client = file->private_data;
long status;
/* Ensure that controller is valid for as long as we need it. */
+ if (down_read_killable(&client->cdev->lock))
+ return -ERESTARTSYS;
+
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags)) {
+ up_read(&client->cdev->lock);
+ return -ENODEV;
+ }
+
+ status = __ssam_cdev_device_ioctl(client, cmd, arg);
+
+ up_read(&client->cdev->lock);
+ return status;
+}
+
+static ssize_t ssam_cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
+{
+ struct ssam_cdev_client *client = file->private_data;
+ struct ssam_cdev *cdev = client->cdev;
+ unsigned int copied;
+ int status = 0;
+
if (down_read_killable(&cdev->lock))
return -ERESTARTSYS;
- if (!cdev->ctrl) {
+ /* Make sure we're not shut down. */
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
up_read(&cdev->lock);
return -ENODEV;
}
- status = __ssam_cdev_device_ioctl(cdev, cmd, arg);
+ do {
+ /* Check availability, wait if necessary. */
+ if (kfifo_is_empty(&client->buffer)) {
+ up_read(&cdev->lock);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ status = wait_event_interruptible(client->waitq,
+ !kfifo_is_empty(&client->buffer) ||
+ test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT,
+ &cdev->flags));
+ if (status < 0)
+ return status;
+
+ if (down_read_killable(&cdev->lock))
+ return -ERESTARTSYS;
+
+ /* Need to check that we're not shut down again. */
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
+ up_read(&cdev->lock);
+ return -ENODEV;
+ }
+ }
+
+ /* Try to read from FIFO. */
+ if (mutex_lock_interruptible(&client->read_lock)) {
+ up_read(&cdev->lock);
+ return -ERESTARTSYS;
+ }
+
+ status = kfifo_to_user(&client->buffer, buf, count, &copied);
+ mutex_unlock(&client->read_lock);
+
+ if (status < 0) {
+ up_read(&cdev->lock);
+ return status;
+ }
+
+ /* We might not have gotten anything, check this here. */
+ if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
+ up_read(&cdev->lock);
+ return -EAGAIN;
+ }
+ } while (copied == 0);
up_read(&cdev->lock);
- return status;
+ return copied;
+}
+
+static __poll_t ssam_cdev_poll(struct file *file, struct poll_table_struct *pt)
+{
+ struct ssam_cdev_client *client = file->private_data;
+ __poll_t events = 0;
+
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags))
+ return EPOLLHUP | EPOLLERR;
+
+ poll_wait(file, &client->waitq, pt);
+
+ if (!kfifo_is_empty(&client->buffer))
+ events |= EPOLLIN | EPOLLRDNORM;
+
+ return events;
+}
+
+static int ssam_cdev_fasync(int fd, struct file *file, int on)
+{
+ struct ssam_cdev_client *client = file->private_data;
+
+ return fasync_helper(fd, file, on, &client->fasync);
}
static const struct file_operations ssam_controller_fops = {
.owner = THIS_MODULE,
.open = ssam_cdev_device_open,
.release = ssam_cdev_device_release,
+ .read = ssam_cdev_read,
+ .poll = ssam_cdev_poll,
+ .fasync = ssam_cdev_fasync,
.unlocked_ioctl = ssam_cdev_device_ioctl,
.compat_ioctl = ssam_cdev_device_ioctl,
- .llseek = noop_llseek,
+ .llseek = no_llseek,
};
+
+/* -- Device and driver setup ----------------------------------------------- */
+
static int ssam_dbg_device_probe(struct platform_device *pdev)
{
struct ssam_controller *ctrl;
@@ -236,6 +693,7 @@ static int ssam_dbg_device_probe(struct platform_device *pdev)
kref_init(&cdev->kref);
init_rwsem(&cdev->lock);
cdev->ctrl = ctrl;
+ cdev->dev = &pdev->dev;
cdev->mdev.parent = &pdev->dev;
cdev->mdev.minor = MISC_DYNAMIC_MINOR;
@@ -243,6 +701,9 @@ static int ssam_dbg_device_probe(struct platform_device *pdev)
cdev->mdev.nodename = "surface/aggregator";
cdev->mdev.fops = &ssam_controller_fops;
+ init_rwsem(&cdev->client_lock);
+ INIT_LIST_HEAD(&cdev->client_list);
+
status = misc_register(&cdev->mdev);
if (status) {
kfree(cdev);
@@ -256,8 +717,32 @@ static int ssam_dbg_device_probe(struct platform_device *pdev)
static int ssam_dbg_device_remove(struct platform_device *pdev)
{
struct ssam_cdev *cdev = platform_get_drvdata(pdev);
+ struct ssam_cdev_client *client;
- misc_deregister(&cdev->mdev);
+ /*
+ * Mark device as shut-down. Prevent new clients from being added and
+ * new operations from being executed.
+ */
+ set_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags);
+
+ down_write(&cdev->client_lock);
+
+ /* Remove all notifiers registered by us. */
+ list_for_each_entry(client, &cdev->client_list, node) {
+ ssam_cdev_notifier_unregister_all(client);
+ }
+
+ /* Wake up async clients. */
+ list_for_each_entry(client, &cdev->client_list, node) {
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+ }
+
+ /* Wake up blocking clients. */
+ list_for_each_entry(client, &cdev->client_list, node) {
+ wake_up_interruptible(&client->waitq);
+ }
+
+ up_write(&cdev->client_lock);
/*
* The controller is only guaranteed to be valid for as long as the
@@ -266,8 +751,11 @@ static int ssam_dbg_device_remove(struct platform_device *pdev)
*/
down_write(&cdev->lock);
cdev->ctrl = NULL;
+ cdev->dev = NULL;
up_write(&cdev->lock);
+ misc_deregister(&cdev->mdev);
+
ssam_cdev_put(cdev);
return 0;
}
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index 685d37a7add1..4428c4330229 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -119,8 +119,13 @@ static const struct software_node ssam_node_hid_base_iid6 = {
.parent = &ssam_node_hub_base,
};
-/* Devices for Surface Book 2. */
-static const struct software_node *ssam_node_group_sb2[] = {
+/*
+ * Devices for 5th- and 6th-generations models:
+ * - Surface Book 2,
+ * - Surface Laptop 1 and 2,
+ * - Surface Pro 5 and 6.
+ */
+static const struct software_node *ssam_node_group_gen5[] = {
&ssam_node_root,
&ssam_node_tmp_pprof,
NULL,
@@ -142,21 +147,7 @@ static const struct software_node *ssam_node_group_sb3[] = {
NULL,
};
-/* Devices for Surface Laptop 1. */
-static const struct software_node *ssam_node_group_sl1[] = {
- &ssam_node_root,
- &ssam_node_tmp_pprof,
- NULL,
-};
-
-/* Devices for Surface Laptop 2. */
-static const struct software_node *ssam_node_group_sl2[] = {
- &ssam_node_root,
- &ssam_node_tmp_pprof,
- NULL,
-};
-
-/* Devices for Surface Laptop 3. */
+/* Devices for Surface Laptop 3 and 4. */
static const struct software_node *ssam_node_group_sl3[] = {
&ssam_node_root,
&ssam_node_bat_ac,
@@ -177,20 +168,6 @@ static const struct software_node *ssam_node_group_slg1[] = {
NULL,
};
-/* Devices for Surface Pro 5. */
-static const struct software_node *ssam_node_group_sp5[] = {
- &ssam_node_root,
- &ssam_node_tmp_pprof,
- NULL,
-};
-
-/* Devices for Surface Pro 6. */
-static const struct software_node *ssam_node_group_sp6[] = {
- &ssam_node_root,
- &ssam_node_tmp_pprof,
- NULL,
-};
-
/* Devices for Surface Pro 7 and Surface Pro 7+. */
static const struct software_node *ssam_node_group_sp7[] = {
&ssam_node_root,
@@ -495,10 +472,10 @@ static struct ssam_device_driver ssam_base_hub_driver = {
static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Pro 4, 5, and 6 (OMBR < 0x10) */
- { "MSHW0081", (unsigned long)ssam_node_group_sp5 },
+ { "MSHW0081", (unsigned long)ssam_node_group_gen5 },
/* Surface Pro 6 (OMBR >= 0x10) */
- { "MSHW0111", (unsigned long)ssam_node_group_sp6 },
+ { "MSHW0111", (unsigned long)ssam_node_group_gen5 },
/* Surface Pro 7 */
{ "MSHW0116", (unsigned long)ssam_node_group_sp7 },
@@ -507,23 +484,26 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
{ "MSHW0119", (unsigned long)ssam_node_group_sp7 },
/* Surface Book 2 */
- { "MSHW0107", (unsigned long)ssam_node_group_sb2 },
+ { "MSHW0107", (unsigned long)ssam_node_group_gen5 },
/* Surface Book 3 */
{ "MSHW0117", (unsigned long)ssam_node_group_sb3 },
/* Surface Laptop 1 */
- { "MSHW0086", (unsigned long)ssam_node_group_sl1 },
+ { "MSHW0086", (unsigned long)ssam_node_group_gen5 },
/* Surface Laptop 2 */
- { "MSHW0112", (unsigned long)ssam_node_group_sl2 },
+ { "MSHW0112", (unsigned long)ssam_node_group_gen5 },
/* Surface Laptop 3 (13", Intel) */
{ "MSHW0114", (unsigned long)ssam_node_group_sl3 },
- /* Surface Laptop 3 (15", AMD) */
+ /* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */
{ "MSHW0110", (unsigned long)ssam_node_group_sl3 },
+ /* Surface Laptop 4 (13", Intel) */
+ { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 5d9b758a99bb..1203b9a82993 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -427,6 +427,7 @@ static int surface_dtx_open(struct inode *inode, struct file *file)
*/
if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
up_write(&ddev->client_lock);
+ mutex_destroy(&client->read_lock);
sdtx_device_put(client->ddev);
kfree(client);
return -ENODEV;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 60592fb88e7a..7d385c3b2239 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -415,16 +415,17 @@ config HP_ACCEL
To compile this driver as a module, choose M here: the module will
be called hp_accel.
-config HP_WIRELESS
- tristate "HP wireless button"
+config WIRELESS_HOTKEY
+ tristate "Wireless hotkey button"
depends on ACPI
depends on INPUT
help
- This driver provides supports for new HP wireless button for Windows 8.
+ This driver provides supports for the wireless buttons found on some AMD,
+ HP, & Xioami laptops.
On such systems the driver should load automatically (via ACPI alias).
To compile this driver as a module, choose M here: the module will
- be called hp-wireless.
+ be called wireless-hotkey.
config HP_WMI
tristate "HP WMI extras"
@@ -639,6 +640,19 @@ config THINKPAD_ACPI_HOTKEY_POLL
If you are not sure, say Y here. The driver enables polling only if
it is strictly necessary to do so.
+config THINKPAD_LMI
+ tristate "Lenovo WMI-based systems management driver"
+ depends on ACPI_WMI
+ select FW_ATTR_CLASS
+ help
+ This driver allows changing BIOS settings on Lenovo machines whose
+ BIOS support the WMI interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called think-lmi.
+
+source "drivers/platform/x86/intel/Kconfig"
+
config INTEL_ATOMISP2_LED
tristate "Intel AtomISP2 camera LED driver"
depends on GPIOLIB && LEDS_GPIO
@@ -673,30 +687,6 @@ config INTEL_ATOMISP2_PM
To compile this driver as a module, choose M here: the module
will be called intel_atomisp2_pm.
-config INTEL_CHT_INT33FE
- tristate "Intel Cherry Trail ACPI INT33FE Driver"
- depends on X86 && ACPI && I2C && REGULATOR
- depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
- depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
- depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
- help
- This driver add support for the INT33FE ACPI device found on
- some Intel Cherry Trail devices.
-
- There are two kinds of INT33FE ACPI device possible: for hardware
- with USB Type-C and Micro-B connectors. This driver supports both.
-
- The INT33FE ACPI device has a CRS table with I2cSerialBusV2
- resources for Fuel Gauge Controller and (in the Type-C variant)
- FUSB302 USB Type-C Controller and PI3USB30532 USB switch.
- This driver instantiates i2c-clients for these, so that standard
- i2c drivers for these chips can bind to the them.
-
- If you enable this driver it is advised to also select
- CONFIG_BATTERY_BQ27XXX=m or CONFIG_BATTERY_BQ27XXX_I2C=m for Micro-B
- device and CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m
- for Type-C device.
-
config INTEL_HID_EVENT
tristate "INTEL HID Event"
depends on ACPI
@@ -1076,6 +1066,9 @@ config TOUCHSCREEN_DMI
the OS-image for the device. This option supplies the missing info.
Enable this for x86 tablets with Silead or Chipone touchscreens.
+config FW_ATTR_CLASS
+ tristate
+
config INTEL_IMR
bool "Intel Isolated Memory Region support"
depends on X86_INTEL_QUARK && IOSF_MBI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index dcc8cdb95b4d..7ee369aab10d 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_GPD_POCKET_FAN) += gpd-pocket-fan.o
# Hewlett Packard
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
-obj-$(CONFIG_HP_WIRELESS) += hp-wireless.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
@@ -64,14 +63,13 @@ obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
+obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o
# Intel
+obj-$(CONFIG_X86_PLATFORM_DRIVERS_INTEL) += intel/
+
obj-$(CONFIG_INTEL_ATOMISP2_LED) += intel_atomisp2_led.o
obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
-obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
-intel_cht_int33fe-objs := intel_cht_int33fe_common.o \
- intel_cht_int33fe_typec.o \
- intel_cht_int33fe_microb.o
obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
@@ -112,9 +110,11 @@ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
# Platform drivers
+obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
+obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
# Intel uncore drivers
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index d41d7ad14be0..0cb927f0f301 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -110,11 +110,6 @@ static struct quirk_entry quirk_asus_forceals = {
.wmi_force_als_set = true,
};
-static struct quirk_entry quirk_asus_vendor_backlight = {
- .wmi_backlight_power = true,
- .wmi_backlight_set_devstate = true,
-};
-
static struct quirk_entry quirk_asus_use_kbd_dock_devid = {
.use_kbd_dock_devid = true,
};
@@ -427,78 +422,6 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. GA401IH",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"),
- },
- .driver_data = &quirk_asus_vendor_backlight,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. GA401II",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"),
- },
- .driver_data = &quirk_asus_vendor_backlight,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. GA401IU",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"),
- },
- .driver_data = &quirk_asus_vendor_backlight,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. GA401IV",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"),
- },
- .driver_data = &quirk_asus_vendor_backlight,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. GA401IVC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"),
- },
- .driver_data = &quirk_asus_vendor_backlight,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. GA502II",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"),
- },
- .driver_data = &quirk_asus_vendor_backlight,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. GA502IU",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"),
- },
- .driver_data = &quirk_asus_vendor_backlight,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. GA502IV",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"),
- },
- .driver_data = &quirk_asus_vendor_backlight,
- },
- {
- .callback = dmi_matched,
.ident = "Asus Transformer T100TA / T100HA / T100CHI",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index e0a55337f51a..9e7314d90bea 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -5,7 +5,6 @@
menuconfig X86_PLATFORM_DRIVERS_DELL
bool "Dell X86 Platform Specific Device Drivers"
- default n
depends on X86_PLATFORM_DEVICES
help
Say Y here to get to see options for device drivers for various
@@ -53,6 +52,7 @@ config DELL_LAPTOP
depends on BACKLIGHT_CLASS_DEVICE
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
+ depends on DELL_WMI || DELL_WMI = n
depends on SERIO_I8042
depends on DELL_SMBIOS
select POWER_SUPPLY
@@ -164,6 +164,14 @@ config DELL_WMI
To compile this driver as a module, choose M here: the module will
be called dell-wmi.
+config DELL_WMI_PRIVACY
+ bool "Dell WMI Hardware Privacy Support"
+ depends on DELL_WMI
+ depends on LEDS_TRIGGER_AUDIO
+ help
+ This option adds integration with the "Dell Hardware Privacy"
+ feature of Dell laptops to the dell-wmi driver.
+
config DELL_WMI_AIO
tristate "WMI Hotkeys for Dell All-In-One series"
default m
@@ -197,6 +205,7 @@ config DELL_WMI_SYSMAN
depends on ACPI_WMI
depends on DMI
select NLS
+ select FW_ATTR_CLASS
help
This driver allows changing BIOS settings on many Dell machines from
2018 and newer without the use of any additional software.
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
index d720a3e42ae3..ddba1df71e80 100644
--- a/drivers/platform/x86/dell/Makefile
+++ b/drivers/platform/x86/dell/Makefile
@@ -15,6 +15,8 @@ dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
+dell-wmi-objs := dell-wmi-base.o
+dell-wmi-$(CONFIG_DELL_WMI_PRIVACY) += dell-wmi-privacy.o
obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index d513a59a5d47..28447c180be8 100644
--- a/drivers/platform/x86/dell/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -394,8 +394,7 @@ static int host_control_smi(void)
/* wait a few to see if it executed */
num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
- while ((cmd_status = inb(PCAT_APM_STATUS_PORT))
- == ESM_STATUS_CMD_UNSUCCESSFUL) {
+ while ((s8)inb(PCAT_APM_STATUS_PORT) == ESM_STATUS_CMD_UNSUCCESSFUL) {
num_ticks--;
if (num_ticks == EXPIRED_TIMER)
return -ETIME;
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 70edc5bb3a14..8230e7a68a5e 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -31,6 +31,8 @@
#include "dell-rbtn.h"
#include "dell-smbios.h"
+#include "dell-wmi-privacy.h"
+
struct quirk_entry {
bool touchpad_led;
bool kbd_led_not_present;
@@ -90,6 +92,7 @@ static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
static bool force_rfkill;
+static bool micmute_led_registered;
module_param(force_rfkill, bool, 0444);
MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models");
@@ -2205,11 +2208,13 @@ static int __init dell_init(void)
dell_laptop_register_notifier(&dell_laptop_notifier);
if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) &&
- dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE)) {
+ dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE) &&
+ !dell_privacy_has_mic_mute()) {
micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
if (ret < 0)
goto fail_led;
+ micmute_led_registered = true;
}
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
@@ -2257,7 +2262,8 @@ static int __init dell_init(void)
fail_get_brightness:
backlight_device_unregister(dell_backlight_device);
fail_backlight:
- led_classdev_unregister(&micmute_led_cdev);
+ if (micmute_led_registered)
+ led_classdev_unregister(&micmute_led_cdev);
fail_led:
dell_cleanup_rfkill();
fail_rfkill:
@@ -2278,7 +2284,8 @@ static void __exit dell_exit(void)
touchpad_led_exit();
kbd_led_exit();
backlight_device_unregister(dell_backlight_device);
- led_classdev_unregister(&micmute_led_cdev);
+ if (micmute_led_registered)
+ led_classdev_unregister(&micmute_led_cdev);
dell_cleanup_rfkill();
if (platform_device) {
platform_device_unregister(platform_device);
diff --git a/drivers/platform/x86/dell/dell-wmi.c b/drivers/platform/x86/dell/dell-wmi-base.c
index 5e1b7f897df5..089c125e18f7 100644
--- a/drivers/platform/x86/dell/dell-wmi.c
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -27,6 +27,7 @@
#include <acpi/video.h>
#include "dell-smbios.h"
#include "dell-wmi-descriptor.h"
+#include "dell-wmi-privacy.h"
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
@@ -427,7 +428,6 @@ static void dell_wmi_notify(struct wmi_device *wdev,
switch (buffer_entry[1]) {
case 0x0000: /* One key pressed or event occurred */
- case 0x0012: /* Event with extended data occurred */
if (len > 2)
dell_wmi_process_key(wdev, buffer_entry[1],
buffer_entry[2]);
@@ -439,6 +439,13 @@ static void dell_wmi_notify(struct wmi_device *wdev,
dell_wmi_process_key(wdev, buffer_entry[1],
buffer_entry[i]);
break;
+ case 0x0012:
+ if ((len > 4) && dell_privacy_process_event(buffer_entry[1], buffer_entry[3],
+ buffer_entry[4]))
+ /* dell_privacy_process_event has handled the event */;
+ else if (len > 2)
+ dell_wmi_process_key(wdev, buffer_entry[1], buffer_entry[2]);
+ break;
default: /* Unknown event */
pr_info("Unknown WMI event type 0x%x\n",
(int)buffer_entry[1]);
@@ -747,6 +754,10 @@ static int __init dell_wmi_init(void)
}
}
+ err = dell_privacy_register_driver();
+ if (err)
+ return err;
+
return wmi_driver_register(&dell_wmi_driver);
}
late_initcall(dell_wmi_init);
@@ -757,6 +768,7 @@ static void __exit dell_wmi_exit(void)
dell_wmi_events_set_enabled(false);
wmi_driver_unregister(&dell_wmi_driver);
+ dell_privacy_unregister_driver();
}
module_exit(dell_wmi_exit);
diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.c b/drivers/platform/x86/dell/dell-wmi-privacy.c
new file mode 100644
index 000000000000..074b7e68c227
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-privacy.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Dell privacy notification driver
+ *
+ * Copyright (C) 2021 Dell Inc. All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/list.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+#include "dell-wmi-privacy.h"
+
+#define DELL_PRIVACY_GUID "6932965F-1671-4CEB-B988-D3AB0A901919"
+#define MICROPHONE_STATUS BIT(0)
+#define CAMERA_STATUS BIT(1)
+#define DELL_PRIVACY_AUDIO_EVENT 0x1
+#define DELL_PRIVACY_CAMERA_EVENT 0x2
+#define led_to_priv(c) container_of(c, struct privacy_wmi_data, cdev)
+
+/*
+ * The wmi_list is used to store the privacy_priv struct with mutex protecting
+ */
+static LIST_HEAD(wmi_list);
+static DEFINE_MUTEX(list_mutex);
+
+struct privacy_wmi_data {
+ struct input_dev *input_dev;
+ struct wmi_device *wdev;
+ struct list_head list;
+ struct led_classdev cdev;
+ u32 features_present;
+ u32 last_status;
+};
+
+/* DELL Privacy Type */
+enum dell_hardware_privacy_type {
+ DELL_PRIVACY_TYPE_AUDIO = 0,
+ DELL_PRIVACY_TYPE_CAMERA,
+ DELL_PRIVACY_TYPE_SCREEN,
+ DELL_PRIVACY_TYPE_MAX,
+};
+
+static const char * const privacy_types[DELL_PRIVACY_TYPE_MAX] = {
+ [DELL_PRIVACY_TYPE_AUDIO] = "Microphone",
+ [DELL_PRIVACY_TYPE_CAMERA] = "Camera Shutter",
+ [DELL_PRIVACY_TYPE_SCREEN] = "ePrivacy Screen",
+};
+
+/*
+ * Keymap for WMI privacy events of type 0x0012
+ */
+static const struct key_entry dell_wmi_keymap_type_0012[] = {
+ /* privacy mic mute */
+ { KE_KEY, 0x0001, { KEY_MICMUTE } },
+ /* privacy camera mute */
+ { KE_SW, 0x0002, { SW_CAMERA_LENS_COVER } },
+ { KE_END, 0},
+};
+
+bool dell_privacy_has_mic_mute(void)
+{
+ struct privacy_wmi_data *priv;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct privacy_wmi_data,
+ list);
+ mutex_unlock(&list_mutex);
+
+ return priv && (priv->features_present & BIT(DELL_PRIVACY_TYPE_AUDIO));
+}
+EXPORT_SYMBOL_GPL(dell_privacy_has_mic_mute);
+
+/*
+ * The flow of privacy event:
+ * 1) User presses key. HW does stuff with this key (timeout is started)
+ * 2) WMI event is emitted from BIOS
+ * 3) WMI event is received by dell-privacy
+ * 4) KEY_MICMUTE emitted from dell-privacy
+ * 5) Userland picks up key and modifies kcontrol for SW mute
+ * 6) Codec kernel driver catches and calls ledtrig_audio_set which will call
+ * led_set_brightness() on the LED registered by dell_privacy_leds_setup()
+ * 7) dell-privacy notifies EC, the timeout is cancelled and the HW mute activates.
+ * If the EC is not notified then the HW mic mute will activate when the timeout
+ * triggers, just a bit later than with the active ack.
+ */
+bool dell_privacy_process_event(int type, int code, int status)
+{
+ struct privacy_wmi_data *priv;
+ const struct key_entry *key;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct privacy_wmi_data,
+ list);
+ if (!priv)
+ goto error;
+
+ key = sparse_keymap_entry_from_scancode(priv->input_dev, (type << 16) | code);
+ if (!key) {
+ dev_warn(&priv->wdev->dev, "Unknown key with type 0x%04x and code 0x%04x pressed\n",
+ type, code);
+ goto error;
+ }
+ dev_dbg(&priv->wdev->dev, "Key with type 0x%04x and code 0x%04x pressed\n", type, code);
+
+ switch (code) {
+ case DELL_PRIVACY_AUDIO_EVENT: /* Mic mute */
+ case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
+ priv->last_status = status;
+ sparse_keymap_report_entry(priv->input_dev, key, 1, true);
+ ret = true;
+ break;
+ default:
+ dev_dbg(&priv->wdev->dev, "unknown event type 0x%04x 0x%04x\n", type, code);
+ }
+
+error:
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+
+static ssize_t dell_privacy_supported_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(dev);
+ enum dell_hardware_privacy_type type;
+ u32 privacy_list;
+ int len = 0;
+
+ privacy_list = priv->features_present;
+ for (type = DELL_PRIVACY_TYPE_AUDIO; type < DELL_PRIVACY_TYPE_MAX; type++) {
+ if (privacy_list & BIT(type))
+ len += sysfs_emit_at(buf, len, "[%s] [supported]\n", privacy_types[type]);
+ else
+ len += sysfs_emit_at(buf, len, "[%s] [unsupported]\n", privacy_types[type]);
+ }
+
+ return len;
+}
+
+static ssize_t dell_privacy_current_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(dev);
+ u32 privacy_supported = priv->features_present;
+ enum dell_hardware_privacy_type type;
+ u32 privacy_state = priv->last_status;
+ int len = 0;
+
+ for (type = DELL_PRIVACY_TYPE_AUDIO; type < DELL_PRIVACY_TYPE_MAX; type++) {
+ if (privacy_supported & BIT(type)) {
+ if (privacy_state & BIT(type))
+ len += sysfs_emit_at(buf, len, "[%s] [unmuted]\n", privacy_types[type]);
+ else
+ len += sysfs_emit_at(buf, len, "[%s] [muted]\n", privacy_types[type]);
+ }
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR_RO(dell_privacy_supported_type);
+static DEVICE_ATTR_RO(dell_privacy_current_state);
+
+static struct attribute *privacy_attributes[] = {
+ &dev_attr_dell_privacy_supported_type.attr,
+ &dev_attr_dell_privacy_current_state.attr,
+ NULL,
+};
+
+static const struct attribute_group privacy_attribute_group = {
+ .attrs = privacy_attributes
+};
+
+/*
+ * Describes the Device State class exposed by BIOS which can be consumed by
+ * various applications interested in knowing the Privacy feature capabilities.
+ * class DeviceState
+ * {
+ * [key, read] string InstanceName;
+ * [read] boolean ReadOnly;
+ *
+ * [WmiDataId(1), read] uint32 DevicesSupported;
+ * 0 - None; 0x1 - Microphone; 0x2 - Camera; 0x4 - ePrivacy Screen
+ *
+ * [WmiDataId(2), read] uint32 CurrentState;
+ * 0 - Off; 1 - On; Bit0 - Microphone; Bit1 - Camera; Bit2 - ePrivacyScreen
+ * };
+ */
+static int get_current_status(struct wmi_device *wdev)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(&wdev->dev);
+ union acpi_object *obj_present;
+ u32 *buffer;
+ int ret = 0;
+
+ if (!priv) {
+ dev_err(&wdev->dev, "dell privacy priv is NULL\n");
+ return -EINVAL;
+ }
+ /* check privacy support features and device states */
+ obj_present = wmidev_block_query(wdev, 0);
+ if (!obj_present) {
+ dev_err(&wdev->dev, "failed to read Binary MOF\n");
+ return -EIO;
+ }
+
+ if (obj_present->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Binary MOF is not a buffer!\n");
+ ret = -EIO;
+ goto obj_free;
+ }
+ /* Although it's not technically a failure, this would lead to
+ * unexpected behavior
+ */
+ if (obj_present->buffer.length != 8) {
+ dev_err(&wdev->dev, "Dell privacy buffer has unexpected length (%d)!\n",
+ obj_present->buffer.length);
+ ret = -EINVAL;
+ goto obj_free;
+ }
+ buffer = (u32 *)obj_present->buffer.pointer;
+ priv->features_present = buffer[0];
+ priv->last_status = buffer[1];
+
+obj_free:
+ kfree(obj_present);
+ return ret;
+}
+
+static int dell_privacy_micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct privacy_wmi_data *priv = led_to_priv(led_cdev);
+ static char *acpi_method = (char *)"ECAK";
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -EIO;
+
+ if (!acpi_has_method(handle, acpi_method))
+ return -EIO;
+
+ status = acpi_evaluate_object(handle, acpi_method, NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&priv->wdev->dev, "Error setting privacy EC ack value: %s\n",
+ acpi_format_exception(status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Pressing the mute key activates a time delayed circuit to physically cut
+ * off the mute. The LED is in the same circuit, so it reflects the true
+ * state of the HW mute. The reason for the EC "ack" is so that software
+ * can first invoke a SW mute before the HW circuit is cut off. Without SW
+ * cutting this off first does not affect the time delayed muting or status
+ * of the LED but there is a possibility of a "popping" noise.
+ *
+ * If the EC receives the SW ack, the circuit will be activated before the
+ * delay completed.
+ *
+ * Exposing as an LED device allows the codec drivers notification path to
+ * EC ACK to work
+ */
+static int dell_privacy_leds_setup(struct device *dev)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(dev);
+
+ priv->cdev.name = "dell-privacy::micmute";
+ priv->cdev.max_brightness = 1;
+ priv->cdev.brightness_set_blocking = dell_privacy_micmute_led_set;
+ priv->cdev.default_trigger = "audio-micmute";
+ priv->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ return devm_led_classdev_register(dev, &priv->cdev);
+}
+
+static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct privacy_wmi_data *priv;
+ struct key_entry *keymap;
+ int ret, i;
+
+ ret = wmi_has_guid(DELL_PRIVACY_GUID);
+ if (!ret)
+ pr_debug("Unable to detect available Dell privacy devices!\n");
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, priv);
+ priv->wdev = wdev;
+ /* create evdev passing interface */
+ priv->input_dev = devm_input_allocate_device(&wdev->dev);
+ if (!priv->input_dev)
+ return -ENOMEM;
+
+ /* remap the wmi keymap event to new keymap */
+ keymap = kcalloc(ARRAY_SIZE(dell_wmi_keymap_type_0012),
+ sizeof(struct key_entry), GFP_KERNEL);
+ if (!keymap)
+ return -ENOMEM;
+
+ /* remap the keymap code with Dell privacy key type 0x12 as prefix
+ * KEY_MICMUTE scancode will be reported as 0x120001
+ */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
+ keymap[i] = dell_wmi_keymap_type_0012[i];
+ keymap[i].code |= (0x0012 << 16);
+ }
+ ret = sparse_keymap_setup(priv->input_dev, keymap, NULL);
+ kfree(keymap);
+ if (ret)
+ return ret;
+
+ priv->input_dev->dev.parent = &wdev->dev;
+ priv->input_dev->name = "Dell Privacy Driver";
+ priv->input_dev->id.bustype = BUS_HOST;
+
+ ret = input_register_device(priv->input_dev);
+ if (ret)
+ return ret;
+
+ ret = get_current_status(priv->wdev);
+ if (ret)
+ return ret;
+
+ ret = devm_device_add_group(&wdev->dev, &privacy_attribute_group);
+ if (ret)
+ return ret;
+
+ if (priv->features_present & BIT(DELL_PRIVACY_TYPE_AUDIO)) {
+ ret = dell_privacy_leds_setup(&priv->wdev->dev);
+ if (ret)
+ return ret;
+ }
+ mutex_lock(&list_mutex);
+ list_add_tail(&priv->list, &wmi_list);
+ mutex_unlock(&list_mutex);
+ return 0;
+}
+
+static void dell_privacy_wmi_remove(struct wmi_device *wdev)
+{
+ struct privacy_wmi_data *priv = dev_get_drvdata(&wdev->dev);
+
+ mutex_lock(&list_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&list_mutex);
+}
+
+static const struct wmi_device_id dell_wmi_privacy_wmi_id_table[] = {
+ { .guid_string = DELL_PRIVACY_GUID },
+ { },
+};
+
+static struct wmi_driver dell_privacy_wmi_driver = {
+ .driver = {
+ .name = "dell-privacy",
+ },
+ .probe = dell_privacy_wmi_probe,
+ .remove = dell_privacy_wmi_remove,
+ .id_table = dell_wmi_privacy_wmi_id_table,
+};
+
+int dell_privacy_register_driver(void)
+{
+ return wmi_driver_register(&dell_privacy_wmi_driver);
+}
+
+void dell_privacy_unregister_driver(void)
+{
+ wmi_driver_unregister(&dell_privacy_wmi_driver);
+}
diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.h b/drivers/platform/x86/dell/dell-wmi-privacy.h
new file mode 100644
index 000000000000..50c9b943dd47
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-wmi-privacy.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Dell privacy notification driver
+ *
+ * Copyright (C) 2021 Dell Inc. All Rights Reserved.
+ */
+
+#ifndef _DELL_PRIVACY_WMI_H_
+#define _DELL_PRIVACY_WMI_H_
+
+#if IS_ENABLED(CONFIG_DELL_WMI_PRIVACY)
+bool dell_privacy_has_mic_mute(void);
+bool dell_privacy_process_event(int type, int code, int status);
+int dell_privacy_register_driver(void);
+void dell_privacy_unregister_driver(void);
+#else /* CONFIG_DELL_PRIVACY */
+static inline bool dell_privacy_has_mic_mute(void)
+{
+ return false;
+}
+
+static inline bool dell_privacy_process_event(int type, int code, int status)
+{
+ return false;
+}
+
+static inline int dell_privacy_register_driver(void)
+{
+ return 0;
+}
+
+static inline void dell_privacy_unregister_driver(void)
+{
+}
+#endif /* CONFIG_DELL_PRIVACY */
+#endif
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
index b80f2a62ea3f..3ad33a094588 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
@@ -152,12 +152,15 @@ static ssize_t curr_val##_store(struct kobject *kobj, \
return ret ? ret : count; \
}
+#define check_property_type(attr, prop, valuetype) \
+ (attr##_obj[prop].type != valuetype)
+
union acpi_object *get_wmiobj_pointer(int instance_id, const char *guid_string);
int get_instance_count(const char *guid_string);
void strlcpy_attr(char *dest, char *src);
int populate_enum_data(union acpi_object *enumeration_obj, int instance_id,
- struct kobject *attr_name_kobj);
+ struct kobject *attr_name_kobj, u32 enum_property_count);
int alloc_enum_data(void);
void exit_enum_attributes(void);
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
index 091e48c217ed..8cc212c85266 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
@@ -132,39 +132,68 @@ int alloc_enum_data(void)
* @enumeration_obj: ACPI object with enumeration data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
+ * @enum_property_count: Total properties count under enumeration type
*/
int populate_enum_data(union acpi_object *enumeration_obj, int instance_id,
- struct kobject *attr_name_kobj)
+ struct kobject *attr_name_kobj, u32 enum_property_count)
{
int i, next_obj, value_modifier_count, possible_values_count;
wmi_priv.enumeration_data[instance_id].attr_name_kobj = attr_name_kobj;
+ if (check_property_type(enumeration, ATTR_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].attribute_name,
enumeration_obj[ATTR_NAME].string.pointer);
+ if (check_property_type(enumeration, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].display_name_language_code,
enumeration_obj[DISPL_NAME_LANG_CODE].string.pointer);
+ if (check_property_type(enumeration, DISPLAY_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].display_name,
enumeration_obj[DISPLAY_NAME].string.pointer);
+ if (check_property_type(enumeration, DEFAULT_VAL, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].default_value,
enumeration_obj[DEFAULT_VAL].string.pointer);
+ if (check_property_type(enumeration, MODIFIER, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].dell_modifier,
enumeration_obj[MODIFIER].string.pointer);
next_obj = MODIFIER + 1;
- value_modifier_count = (uintptr_t)enumeration_obj[next_obj].string.pointer;
+ if (next_obj >= enum_property_count)
+ return -EINVAL;
+
+ if (check_property_type(enumeration, next_obj, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ value_modifier_count = (uintptr_t)enumeration_obj[next_obj++].string.pointer;
for (i = 0; i < value_modifier_count; i++) {
+ if (next_obj >= enum_property_count)
+ return -EINVAL;
+ if (check_property_type(enumeration, next_obj, ACPI_TYPE_STRING))
+ return -EINVAL;
strcat(wmi_priv.enumeration_data[instance_id].dell_value_modifier,
- enumeration_obj[++next_obj].string.pointer);
+ enumeration_obj[next_obj++].string.pointer);
strcat(wmi_priv.enumeration_data[instance_id].dell_value_modifier, ";");
}
- possible_values_count = (uintptr_t) enumeration_obj[++next_obj].string.pointer;
+ if (next_obj >= enum_property_count)
+ return -EINVAL;
+
+ if (check_property_type(enumeration, next_obj, ACPI_TYPE_INTEGER))
+ return -EINVAL;
+ possible_values_count = (uintptr_t) enumeration_obj[next_obj++].string.pointer;
for (i = 0; i < possible_values_count; i++) {
+ if (next_obj >= enum_property_count)
+ return -EINVAL;
+ if (check_property_type(enumeration, next_obj, ACPI_TYPE_STRING))
+ return -EINVAL;
strcat(wmi_priv.enumeration_data[instance_id].possible_values,
- enumeration_obj[++next_obj].string.pointer);
+ enumeration_obj[next_obj++].string.pointer);
strcat(wmi_priv.enumeration_data[instance_id].possible_values, ";");
}
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
index 8a49ba6e44f9..951e75b538fa 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
@@ -141,20 +141,36 @@ int populate_int_data(union acpi_object *integer_obj, int instance_id,
struct kobject *attr_name_kobj)
{
wmi_priv.integer_data[instance_id].attr_name_kobj = attr_name_kobj;
+ if (check_property_type(integer, ATTR_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.integer_data[instance_id].attribute_name,
integer_obj[ATTR_NAME].string.pointer);
+ if (check_property_type(integer, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.integer_data[instance_id].display_name_language_code,
integer_obj[DISPL_NAME_LANG_CODE].string.pointer);
+ if (check_property_type(integer, DISPLAY_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.integer_data[instance_id].display_name,
integer_obj[DISPLAY_NAME].string.pointer);
+ if (check_property_type(integer, DEFAULT_VAL, ACPI_TYPE_INTEGER))
+ return -EINVAL;
wmi_priv.integer_data[instance_id].default_value =
(uintptr_t)integer_obj[DEFAULT_VAL].string.pointer;
+ if (check_property_type(integer, MODIFIER, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.integer_data[instance_id].dell_modifier,
integer_obj[MODIFIER].string.pointer);
+ if (check_property_type(integer, MIN_VALUE, ACPI_TYPE_INTEGER))
+ return -EINVAL;
wmi_priv.integer_data[instance_id].min_value =
(uintptr_t)integer_obj[MIN_VALUE].string.pointer;
+ if (check_property_type(integer, MAX_VALUE, ACPI_TYPE_INTEGER))
+ return -EINVAL;
wmi_priv.integer_data[instance_id].max_value =
(uintptr_t)integer_obj[MAX_VALUE].string.pointer;
+ if (check_property_type(integer, SCALAR_INCR, ACPI_TYPE_INTEGER))
+ return -EINVAL;
wmi_priv.integer_data[instance_id].scalar_increment =
(uintptr_t)integer_obj[SCALAR_INCR].string.pointer;
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
index 834b3e82ad9f..230e6ee96636 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
@@ -159,10 +159,16 @@ int alloc_po_data(void)
int populate_po_data(union acpi_object *po_obj, int instance_id, struct kobject *attr_name_kobj)
{
wmi_priv.po_data[instance_id].attr_name_kobj = attr_name_kobj;
+ if (check_property_type(po, ATTR_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.po_data[instance_id].attribute_name,
po_obj[ATTR_NAME].string.pointer);
+ if (check_property_type(po, MIN_PASS_LEN, ACPI_TYPE_INTEGER))
+ return -EINVAL;
wmi_priv.po_data[instance_id].min_password_length =
(uintptr_t)po_obj[MIN_PASS_LEN].string.pointer;
+ if (check_property_type(po, MAX_PASS_LEN, ACPI_TYPE_INTEGER))
+ return -EINVAL;
wmi_priv.po_data[instance_id].max_password_length =
(uintptr_t) po_obj[MAX_PASS_LEN].string.pointer;
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
index 339a082d6c18..86ec962aace9 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
@@ -95,9 +95,9 @@ int set_new_password(const char *password_type, const char *new)
print_hex_dump_bytes("set new password data: ", DUMP_PREFIX_NONE, buffer, buffer_size);
ret = call_password_interface(wmi_priv.password_attr_wdev, buffer, buffer_size);
- /* clear current_password here and use user input from wmi_priv.current_password */
+ /* on success copy the new password to current password */
if (!ret)
- memset(current_password, 0, MAX_BUFF);
+ strscpy(current_password, new, MAX_BUFF);
/* explain to user the detailed failure reason */
else if (ret == -EOPNOTSUPP)
dev_err(&wmi_priv.password_attr_wdev->dev, "admin password must be configured\n");
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
index 552537852459..c392f0ecf8b5 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
@@ -118,24 +118,38 @@ int alloc_str_data(void)
/**
* populate_str_data() - Populate all properties of an instance under string attribute
- * @str_obj: ACPI object with integer data
+ * @str_obj: ACPI object with string data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int populate_str_data(union acpi_object *str_obj, int instance_id, struct kobject *attr_name_kobj)
{
wmi_priv.str_data[instance_id].attr_name_kobj = attr_name_kobj;
+ if (check_property_type(str, ATTR_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].attribute_name,
str_obj[ATTR_NAME].string.pointer);
+ if (check_property_type(str, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].display_name_language_code,
str_obj[DISPL_NAME_LANG_CODE].string.pointer);
+ if (check_property_type(str, DISPLAY_NAME, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].display_name,
str_obj[DISPLAY_NAME].string.pointer);
+ if (check_property_type(str, DEFAULT_VAL, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].default_value,
str_obj[DEFAULT_VAL].string.pointer);
+ if (check_property_type(str, MODIFIER, ACPI_TYPE_STRING))
+ return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].dell_modifier,
str_obj[MODIFIER].string.pointer);
+ if (check_property_type(str, MIN_LEN, ACPI_TYPE_INTEGER))
+ return -EINVAL;
wmi_priv.str_data[instance_id].min_length = (uintptr_t)str_obj[MIN_LEN].string.pointer;
+ if (check_property_type(str, MAX_LEN, ACPI_TYPE_INTEGER))
+ return -EINVAL;
wmi_priv.str_data[instance_id].max_length = (uintptr_t) str_obj[MAX_LEN].string.pointer;
return sysfs_create_group(attr_name_kobj, &str_attr_group);
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index c8d276d78e92..636bdfa83284 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -13,14 +13,11 @@
#include <linux/kernel.h>
#include <linux/wmi.h>
#include "dell-wmi-sysman.h"
+#include "../../firmware_attributes_class.h"
#define MAX_TYPES 4
#include <linux/nls.h>
-static struct class firmware_attributes_class = {
- .name = "firmware-attributes",
-};
-
struct wmi_sysman_priv wmi_priv = {
.mutex = __MUTEX_INITIALIZER(wmi_priv.mutex),
};
@@ -28,6 +25,7 @@ struct wmi_sysman_priv wmi_priv = {
/* reset bios to defaults */
static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
static int reset_option = -1;
+static struct class *fw_attr_class;
/**
@@ -481,7 +479,8 @@ static int init_bios_attributes(int attr_type, const char *guid)
/* enumerate all of this attribute */
switch (attr_type) {
case ENUM:
- retval = populate_enum_data(elements, instance_id, attr_name_kobj);
+ retval = populate_enum_data(elements, instance_id, attr_name_kobj,
+ obj->package.count);
break;
case INT:
retval = populate_int_data(elements, instance_id, attr_name_kobj);
@@ -541,11 +540,11 @@ static int __init sysman_init(void)
goto err_exit_bios_attr_pass_interface;
}
- ret = class_register(&firmware_attributes_class);
+ ret = fw_attributes_class_get(&fw_attr_class);
if (ret)
goto err_exit_bios_attr_pass_interface;
- wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(wmi_priv.class_dev)) {
ret = PTR_ERR(wmi_priv.class_dev);
@@ -602,10 +601,10 @@ err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
- device_destroy(&firmware_attributes_class, MKDEV(0, 0));
+ device_destroy(fw_attr_class, MKDEV(0, 0));
err_unregister_class:
- class_unregister(&firmware_attributes_class);
+ fw_attributes_class_put();
err_exit_bios_attr_pass_interface:
exit_bios_attr_pass_interface();
@@ -619,8 +618,8 @@ err_exit_bios_attr_set_interface:
static void __exit sysman_exit(void)
{
release_attributes_data();
- device_destroy(&firmware_attributes_class, MKDEV(0, 0));
- class_unregister(&firmware_attributes_class);
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+ fw_attributes_class_put();
exit_bios_attr_set_interface();
exit_bios_attr_pass_interface();
}
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
new file mode 100644
index 000000000000..fafe8eaf6e3e
--- /dev/null
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Firmware attributes class helper module */
+
+#include <linux/mutex.h>
+#include <linux/device/class.h>
+#include <linux/module.h>
+#include "firmware_attributes_class.h"
+
+static DEFINE_MUTEX(fw_attr_lock);
+static int fw_attr_inuse;
+
+static struct class firmware_attributes_class = {
+ .name = "firmware-attributes",
+};
+
+int fw_attributes_class_get(struct class **fw_attr_class)
+{
+ int err;
+
+ mutex_lock(&fw_attr_lock);
+ if (!fw_attr_inuse) { /*first time class is being used*/
+ err = class_register(&firmware_attributes_class);
+ if (err) {
+ mutex_unlock(&fw_attr_lock);
+ return err;
+ }
+ }
+ fw_attr_inuse++;
+ *fw_attr_class = &firmware_attributes_class;
+ mutex_unlock(&fw_attr_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fw_attributes_class_get);
+
+int fw_attributes_class_put(void)
+{
+ mutex_lock(&fw_attr_lock);
+ if (!fw_attr_inuse) {
+ mutex_unlock(&fw_attr_lock);
+ return -EINVAL;
+ }
+ fw_attr_inuse--;
+ if (!fw_attr_inuse) /* No more consumers */
+ class_unregister(&firmware_attributes_class);
+ mutex_unlock(&fw_attr_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fw_attributes_class_put);
+
+MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h
new file mode 100644
index 000000000000..486485cb1f54
--- /dev/null
+++ b/drivers/platform/x86/firmware_attributes_class.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Firmware attributes class helper module */
+
+#ifndef FW_ATTR_CLASS_H
+#define FW_ATTR_CLASS_H
+
+int fw_attributes_class_get(struct class **fw_attr_class);
+int fw_attributes_class_put(void);
+
+#endif /* FW_ATTR_CLASS_H */
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index a72270932ec3..9996485f5295 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -462,7 +462,7 @@ static struct attribute *hdaps_attributes[] = {
NULL,
};
-static struct attribute_group hdaps_attribute_group = {
+static const struct attribute_group hdaps_attribute_group = {
.attrs = hdaps_attributes,
};
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
deleted file mode 100644
index 0753ef18e721..000000000000
--- a/drivers/platform/x86/hp-wireless.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Airplane mode button for HP & Xiaomi laptops
- *
- * Copyright (C) 2014-2017 Alex Hung <alex.hung@canonical.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Alex Hung");
-MODULE_ALIAS("acpi*:HPQ6001:*");
-MODULE_ALIAS("acpi*:WSTADEF:*");
-MODULE_ALIAS("acpi*:AMDI0051:*");
-
-static struct input_dev *hpwl_input_dev;
-
-static const struct acpi_device_id hpwl_ids[] = {
- {"HPQ6001", 0},
- {"WSTADEF", 0},
- {"AMDI0051", 0},
- {"", 0},
-};
-
-static int hp_wireless_input_setup(void)
-{
- int err;
-
- hpwl_input_dev = input_allocate_device();
- if (!hpwl_input_dev)
- return -ENOMEM;
-
- hpwl_input_dev->name = "HP Wireless hotkeys";
- hpwl_input_dev->phys = "hpq6001/input0";
- hpwl_input_dev->id.bustype = BUS_HOST;
- hpwl_input_dev->evbit[0] = BIT(EV_KEY);
- set_bit(KEY_RFKILL, hpwl_input_dev->keybit);
-
- err = input_register_device(hpwl_input_dev);
- if (err)
- goto err_free_dev;
-
- return 0;
-
-err_free_dev:
- input_free_device(hpwl_input_dev);
- return err;
-}
-
-static void hp_wireless_input_destroy(void)
-{
- input_unregister_device(hpwl_input_dev);
-}
-
-static void hpwl_notify(struct acpi_device *acpi_dev, u32 event)
-{
- if (event != 0x80) {
- pr_info("Received unknown event (0x%x)\n", event);
- return;
- }
-
- input_report_key(hpwl_input_dev, KEY_RFKILL, 1);
- input_sync(hpwl_input_dev);
- input_report_key(hpwl_input_dev, KEY_RFKILL, 0);
- input_sync(hpwl_input_dev);
-}
-
-static int hpwl_add(struct acpi_device *device)
-{
- int err;
-
- err = hp_wireless_input_setup();
- if (err)
- pr_err("Failed to setup hp wireless hotkeys\n");
-
- return err;
-}
-
-static int hpwl_remove(struct acpi_device *device)
-{
- hp_wireless_input_destroy();
- return 0;
-}
-
-static struct acpi_driver hpwl_driver = {
- .name = "hp-wireless",
- .owner = THIS_MODULE,
- .ids = hpwl_ids,
- .ops = {
- .add = hpwl_add,
- .remove = hpwl_remove,
- .notify = hpwl_notify,
- },
-};
-
-module_acpi_driver(hpwl_driver);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 387817290921..784326bd72f0 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1408,6 +1408,18 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 6:
ideapad_input_report(priv, bit);
break;
+ case 10:
+ /*
+ * This event gets send on a Yoga 300-11IBR when the EC
+ * believes that the device has changed between laptop/
+ * tent/stand/tablet mode. The EC relies on getting
+ * angle info from 2 accelerometers through a special
+ * windows service calling a DSM on the DUAL250E ACPI-
+ * device. Linux does not do this, making the laptop/
+ * tent/stand/tablet mode info unreliable, so we simply
+ * ignore these events.
+ */
+ break;
case 9:
ideapad_sync_rfk_state(priv);
break;
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
new file mode 100644
index 000000000000..f2eef337eb98
--- /dev/null
+++ b/drivers/platform/x86/intel/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+menuconfig X86_PLATFORM_DRIVERS_INTEL
+ bool "Intel x86 Platform Specific Device Drivers"
+ default y
+ help
+ Say Y here to get to see options for device drivers for
+ various Intel x86 platforms, including vendor-specific
+ drivers. This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped
+ and disabled.
+
+if X86_PLATFORM_DRIVERS_INTEL
+
+source "drivers/platform/x86/intel/int33fe/Kconfig"
+source "drivers/platform/x86/intel/int3472/Kconfig"
+
+endif # X86_PLATFORM_DRIVERS_INTEL
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
new file mode 100644
index 000000000000..0653055942d5
--- /dev/null
+++ b/drivers/platform/x86/intel/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for drivers/platform/x86/intel
+# Intel x86 Platform-Specific Drivers
+#
+
+obj-$(CONFIG_INTEL_CHT_INT33FE) += int33fe/
+obj-$(CONFIG_INTEL_SKL_INT3472) += int3472/
diff --git a/drivers/platform/x86/intel/int33fe/Kconfig b/drivers/platform/x86/intel/int33fe/Kconfig
new file mode 100644
index 000000000000..2f7329a2e399
--- /dev/null
+++ b/drivers/platform/x86/intel/int33fe/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INTEL_CHT_INT33FE
+ tristate "Intel Cherry Trail ACPI INT33FE Driver"
+ depends on X86 && ACPI && I2C && REGULATOR
+ depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
+ depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
+ depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
+ help
+ This driver add support for the INT33FE ACPI device found on
+ some Intel Cherry Trail devices.
+
+ There are two kinds of INT33FE ACPI device possible: for hardware
+ with USB Type-C and Micro-B connectors. This driver supports both.
+
+ The INT33FE ACPI device has a CRS table with I2cSerialBusV2
+ resources for Fuel Gauge Controller and (in the Type-C variant)
+ FUSB302 USB Type-C Controller and PI3USB30532 USB switch.
+ This driver instantiates i2c-clients for these, so that standard
+ i2c drivers for these chips can bind to the them.
+
+ If you enable this driver it is advised to also select
+ CONFIG_BATTERY_BQ27XXX=m or CONFIG_BATTERY_BQ27XXX_I2C=m for Micro-B
+ device and CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m
+ for Type-C device.
diff --git a/drivers/platform/x86/intel/int33fe/Makefile b/drivers/platform/x86/intel/int33fe/Makefile
new file mode 100644
index 000000000000..cc11183ce179
--- /dev/null
+++ b/drivers/platform/x86/intel/int33fe/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
+intel_cht_int33fe-objs := intel_cht_int33fe_common.o \
+ intel_cht_int33fe_typec.o \
+ intel_cht_int33fe_microb.o
diff --git a/drivers/platform/x86/intel_cht_int33fe_common.c b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.c
index 251ed9bac789..251ed9bac789 100644
--- a/drivers/platform/x86/intel_cht_int33fe_common.c
+++ b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.c
diff --git a/drivers/platform/x86/intel_cht_int33fe_common.h b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.h
index 03cd45f4e8cb..03cd45f4e8cb 100644
--- a/drivers/platform/x86/intel_cht_int33fe_common.h
+++ b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_common.h
diff --git a/drivers/platform/x86/intel_cht_int33fe_microb.c b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_microb.c
index 673f41cd14b5..673f41cd14b5 100644
--- a/drivers/platform/x86/intel_cht_int33fe_microb.c
+++ b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_microb.c
diff --git a/drivers/platform/x86/intel_cht_int33fe_typec.c b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_typec.c
index b61bad9cc8d2..d59544167430 100644
--- a/drivers/platform/x86/intel_cht_int33fe_typec.c
+++ b/drivers/platform/x86/intel/int33fe/intel_cht_int33fe_typec.c
@@ -168,8 +168,8 @@ static int cht_int33fe_setup_dp(struct cht_int33fe_data *data)
return -ENODEV;
}
- /* Then the DP child device node */
- data->dp = device_get_named_child_node(&pdev->dev, "DD02");
+ /* Then the DP-2 child device node */
+ data->dp = device_get_named_child_node(&pdev->dev, "DD04");
pci_dev_put(pdev);
if (!data->dp)
return -ENODEV;
diff --git a/drivers/platform/x86/intel/int3472/Kconfig b/drivers/platform/x86/intel/int3472/Kconfig
new file mode 100644
index 000000000000..62e5d4cf9ee5
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/Kconfig
@@ -0,0 +1,30 @@
+config INTEL_SKL_INT3472
+ tristate "Intel SkyLake ACPI INT3472 Driver"
+ depends on ACPI
+ depends on COMMON_CLK
+ depends on I2C
+ depends on GPIOLIB
+ depends on REGULATOR
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ This driver adds power controller support for the Intel SkyCam
+ devices found on the Intel SkyLake platforms.
+
+ The INT3472 is a camera power controller, a logical device found on
+ Intel Skylake-based systems that can map to different hardware
+ devices depending on the platform. On machines designed for Chrome OS
+ it maps to a TPS68470 camera PMIC. On machines designed for Windows,
+ it maps to either a TP68470 camera PMIC, a uP6641Q sensor PMIC, or a
+ set of discrete GPIOs and power gates.
+
+ If your device was designed for Chrome OS, this driver will provide
+ an ACPI OpRegion, which must be available before any of the devices
+ using it are probed. For this reason, you should select Y if your
+ device was designed for ChromeOS. For the same reason the
+ I2C_DESIGNWARE_PLATFORM option must be set to Y too.
+
+ Say Y or M here if you have a SkyLake device designed for use
+ with Windows or ChromeOS. Say N here if you are not sure.
+
+ The module will be named "intel-skl-int3472".
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
new file mode 100644
index 000000000000..48bd97f0a04e
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472.o
+intel_skl_int3472-objs := intel_skl_int3472_common.o \
+ intel_skl_int3472_discrete.o \
+ intel_skl_int3472_tps68470.o \
+ intel_skl_int3472_clk_and_regulator.o
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_clk_and_regulator.c b/drivers/platform/x86/intel/int3472/intel_skl_int3472_clk_and_regulator.c
new file mode 100644
index 000000000000..1700e7557a82
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_clk_and_regulator.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/slab.h>
+
+#include "intel_skl_int3472_common.h"
+
+/*
+ * The regulators have to have .ops to be valid, but the only ops we actually
+ * support are .enable and .disable which are handled via .ena_gpiod. Pass an
+ * empty struct to clear the check without lying about capabilities.
+ */
+static const struct regulator_ops int3472_gpio_regulator_ops;
+
+static int skl_int3472_clk_prepare(struct clk_hw *hw)
+{
+ struct int3472_gpio_clock *clk = to_int3472_clk(hw);
+
+ gpiod_set_value_cansleep(clk->ena_gpio, 1);
+ gpiod_set_value_cansleep(clk->led_gpio, 1);
+
+ return 0;
+}
+
+static void skl_int3472_clk_unprepare(struct clk_hw *hw)
+{
+ struct int3472_gpio_clock *clk = to_int3472_clk(hw);
+
+ gpiod_set_value_cansleep(clk->ena_gpio, 0);
+ gpiod_set_value_cansleep(clk->led_gpio, 0);
+}
+
+static int skl_int3472_clk_enable(struct clk_hw *hw)
+{
+ /*
+ * We're just turning a GPIO on to enable the clock, which operation
+ * has the potential to sleep. Given .enable() cannot sleep, but
+ * .prepare() can, we toggle the GPIO in .prepare() instead. Thus,
+ * nothing to do here.
+ */
+ return 0;
+}
+
+static void skl_int3472_clk_disable(struct clk_hw *hw)
+{
+ /* Likewise, nothing to do here... */
+}
+
+static unsigned int skl_int3472_get_clk_frequency(struct int3472_discrete_device *int3472)
+{
+ union acpi_object *obj;
+ unsigned int freq;
+
+ obj = skl_int3472_get_acpi_buffer(int3472->sensor, "SSDB");
+ if (IS_ERR(obj))
+ return 0; /* report rate as 0 on error */
+
+ if (obj->buffer.length < CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET + sizeof(u32)) {
+ dev_err(int3472->dev, "The buffer is too small\n");
+ kfree(obj);
+ return 0;
+ }
+
+ freq = *(u32 *)(obj->buffer.pointer + CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET);
+
+ kfree(obj);
+ return freq;
+}
+
+static unsigned long skl_int3472_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct int3472_gpio_clock *clk = to_int3472_clk(hw);
+
+ return clk->frequency;
+}
+
+static const struct clk_ops skl_int3472_clock_ops = {
+ .prepare = skl_int3472_clk_prepare,
+ .unprepare = skl_int3472_clk_unprepare,
+ .enable = skl_int3472_clk_enable,
+ .disable = skl_int3472_clk_disable,
+ .recalc_rate = skl_int3472_clk_recalc_rate,
+};
+
+int skl_int3472_register_clock(struct int3472_discrete_device *int3472)
+{
+ struct clk_init_data init = {
+ .ops = &skl_int3472_clock_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ };
+ int ret;
+
+ init.name = kasprintf(GFP_KERNEL, "%s-clk",
+ acpi_dev_name(int3472->adev));
+ if (!init.name)
+ return -ENOMEM;
+
+ int3472->clock.frequency = skl_int3472_get_clk_frequency(int3472);
+
+ int3472->clock.clk_hw.init = &init;
+ int3472->clock.clk = clk_register(&int3472->adev->dev,
+ &int3472->clock.clk_hw);
+ if (IS_ERR(int3472->clock.clk)) {
+ ret = PTR_ERR(int3472->clock.clk);
+ goto out_free_init_name;
+ }
+
+ int3472->clock.cl = clkdev_create(int3472->clock.clk, NULL,
+ int3472->sensor_name);
+ if (!int3472->clock.cl) {
+ ret = -ENOMEM;
+ goto err_unregister_clk;
+ }
+
+ kfree(init.name);
+ return 0;
+
+err_unregister_clk:
+ clk_unregister(int3472->clock.clk);
+out_free_init_name:
+ kfree(init.name);
+
+ return ret;
+}
+
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472)
+{
+ clkdev_drop(int3472->clock.cl);
+ clk_unregister(int3472->clock.clk);
+}
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio)
+{
+ const struct int3472_sensor_config *sensor_config;
+ char *path = agpio->resource_source.string_ptr;
+ struct regulator_consumer_supply supply_map;
+ struct regulator_init_data init_data = { };
+ struct regulator_config cfg = { };
+ int ret;
+
+ sensor_config = int3472->sensor_config;
+ if (IS_ERR(sensor_config)) {
+ dev_err(int3472->dev, "No sensor module config\n");
+ return PTR_ERR(sensor_config);
+ }
+
+ if (!sensor_config->supply_map.supply) {
+ dev_err(int3472->dev, "No supply name defined\n");
+ return -ENODEV;
+ }
+
+ init_data.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+ init_data.num_consumer_supplies = 1;
+ supply_map = sensor_config->supply_map;
+ supply_map.dev_name = int3472->sensor_name;
+ init_data.consumer_supplies = &supply_map;
+
+ snprintf(int3472->regulator.regulator_name,
+ sizeof(int3472->regulator.regulator_name), "%s-regulator",
+ acpi_dev_name(int3472->adev));
+ snprintf(int3472->regulator.supply_name,
+ GPIO_REGULATOR_SUPPLY_NAME_LENGTH, "supply-0");
+
+ int3472->regulator.rdesc = INT3472_REGULATOR(
+ int3472->regulator.regulator_name,
+ int3472->regulator.supply_name,
+ &int3472_gpio_regulator_ops);
+
+ int3472->regulator.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
+ "int3472,regulator");
+ if (IS_ERR(int3472->regulator.gpio)) {
+ dev_err(int3472->dev, "Failed to get regulator GPIO line\n");
+ return PTR_ERR(int3472->regulator.gpio);
+ }
+
+ cfg.dev = &int3472->adev->dev;
+ cfg.init_data = &init_data;
+ cfg.ena_gpiod = int3472->regulator.gpio;
+
+ int3472->regulator.rdev = regulator_register(&int3472->regulator.rdesc,
+ &cfg);
+ if (IS_ERR(int3472->regulator.rdev)) {
+ ret = PTR_ERR(int3472->regulator.rdev);
+ goto err_free_gpio;
+ }
+
+ return 0;
+
+err_free_gpio:
+ gpiod_put(int3472->regulator.gpio);
+
+ return ret;
+}
+
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472)
+{
+ regulator_unregister(int3472->regulator.rdev);
+ gpiod_put(int3472->regulator.gpio);
+}
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.c b/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.c
new file mode 100644
index 000000000000..497e74fba75f
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "intel_skl_int3472_common.h"
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *id)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle = adev->handle;
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, id, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENODEV);
+
+ obj = buffer.pointer;
+ if (!obj)
+ return ERR_PTR(-ENODEV);
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ acpi_handle_err(handle, "%s object is not an ACPI buffer\n", id);
+ kfree(obj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return obj;
+}
+
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
+{
+ union acpi_object *obj;
+ int ret;
+
+ obj = skl_int3472_get_acpi_buffer(adev, "CLDB");
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->buffer.length > sizeof(*cldb)) {
+ acpi_handle_err(adev->handle, "The CLDB buffer is too large\n");
+ ret = -EINVAL;
+ goto out_free_obj;
+ }
+
+ memcpy(cldb, obj->buffer.pointer, obj->buffer.length);
+ ret = 0;
+
+out_free_obj:
+ kfree(obj);
+ return ret;
+}
+
+static const struct acpi_device_id int3472_device_id[] = {
+ { "INT3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, int3472_device_id);
+
+static struct platform_driver int3472_discrete = {
+ .driver = {
+ .name = "int3472-discrete",
+ .acpi_match_table = int3472_device_id,
+ },
+ .probe = skl_int3472_discrete_probe,
+ .remove = skl_int3472_discrete_remove,
+};
+
+static struct i2c_driver int3472_tps68470 = {
+ .driver = {
+ .name = "int3472-tps68470",
+ .acpi_match_table = int3472_device_id,
+ },
+ .probe_new = skl_int3472_tps68470_probe,
+};
+
+static int skl_int3472_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&int3472_discrete);
+ if (ret)
+ return ret;
+
+ ret = i2c_register_driver(THIS_MODULE, &int3472_tps68470);
+ if (ret)
+ platform_driver_unregister(&int3472_discrete);
+
+ return ret;
+}
+module_init(skl_int3472_init);
+
+static void skl_int3472_exit(void)
+{
+ platform_driver_unregister(&int3472_discrete);
+ i2c_del_driver(&int3472_tps68470);
+}
+module_exit(skl_int3472_exit);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h b/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h
new file mode 100644
index 000000000000..714fde73b524
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#ifndef _INTEL_SKL_INT3472_H
+#define _INTEL_SKL_INT3472_H
+
+#include <linux/clk-provider.h>
+#include <linux/gpio/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
+/* FIXME drop this once the I2C_DEV_NAME_FORMAT macro has been added to include/linux/i2c.h */
+#ifndef I2C_DEV_NAME_FORMAT
+#define I2C_DEV_NAME_FORMAT "i2c-%s"
+#endif
+
+/* PMIC GPIO Types */
+#define INT3472_GPIO_TYPE_RESET 0x00
+#define INT3472_GPIO_TYPE_POWERDOWN 0x01
+#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b
+#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
+#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
+
+#define INT3472_PDEV_MAX_NAME_LEN 23
+#define INT3472_MAX_SENSOR_GPIOS 3
+
+#define GPIO_REGULATOR_NAME_LENGTH 21
+#define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9
+
+#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86
+
+#define INT3472_REGULATOR(_name, _supply, _ops) \
+ (const struct regulator_desc) { \
+ .name = _name, \
+ .supply_name = _supply, \
+ .type = REGULATOR_VOLTAGE, \
+ .ops = _ops, \
+ .owner = THIS_MODULE, \
+ }
+
+#define to_int3472_clk(hw) \
+ container_of(hw, struct int3472_gpio_clock, clk_hw)
+
+#define to_int3472_device(clk) \
+ container_of(clk, struct int3472_discrete_device, clock)
+
+struct acpi_device;
+struct i2c_client;
+struct platform_device;
+
+struct int3472_cldb {
+ u8 version;
+ /*
+ * control logic type
+ * 0: UNKNOWN
+ * 1: DISCRETE(CRD-D)
+ * 2: PMIC TPS68470
+ * 3: PMIC uP6641
+ */
+ u8 control_logic_type;
+ u8 control_logic_id;
+ u8 sensor_card_sku;
+ u8 reserved[28];
+};
+
+struct int3472_gpio_function_remap {
+ const char *documented;
+ const char *actual;
+};
+
+struct int3472_sensor_config {
+ const char *sensor_module_name;
+ struct regulator_consumer_supply supply_map;
+ const struct int3472_gpio_function_remap *function_maps;
+};
+
+struct int3472_discrete_device {
+ struct acpi_device *adev;
+ struct device *dev;
+ struct acpi_device *sensor;
+ const char *sensor_name;
+
+ const struct int3472_sensor_config *sensor_config;
+
+ struct int3472_gpio_regulator {
+ char regulator_name[GPIO_REGULATOR_NAME_LENGTH];
+ char supply_name[GPIO_REGULATOR_SUPPLY_NAME_LENGTH];
+ struct gpio_desc *gpio;
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+ } regulator;
+
+ struct int3472_gpio_clock {
+ struct clk *clk;
+ struct clk_hw clk_hw;
+ struct clk_lookup *cl;
+ struct gpio_desc *ena_gpio;
+ struct gpio_desc *led_gpio;
+ u32 frequency;
+ } clock;
+
+ unsigned int ngpios; /* how many GPIOs have we seen */
+ unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
+ struct gpiod_lookup_table gpios;
+};
+
+int skl_int3472_discrete_probe(struct platform_device *pdev);
+int skl_int3472_discrete_remove(struct platform_device *pdev);
+int skl_int3472_tps68470_probe(struct i2c_client *client);
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
+ char *id);
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
+
+int skl_int3472_register_clock(struct int3472_discrete_device *int3472);
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio);
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472);
+
+#endif
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c b/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
new file mode 100644
index 000000000000..9fe0a2527e1c
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/uuid.h>
+
+#include "intel_skl_int3472_common.h"
+
+/*
+ * 79234640-9e10-4fea-a5c1-b5aa8b19756f
+ * This _DSM GUID returns information about the GPIO lines mapped to a
+ * discrete INT3472 device. Function number 1 returns a count of the GPIO
+ * lines that are mapped. Subsequent functions return 32 bit ints encoding
+ * information about the GPIO line, including its purpose.
+ */
+static const guid_t int3472_gpio_guid =
+ GUID_INIT(0x79234640, 0x9e10, 0x4fea,
+ 0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f);
+
+/*
+ * 822ace8f-2814-4174-a56b-5f029fe079ee
+ * This _DSM GUID returns a string from the sensor device, which acts as a
+ * module identifier.
+ */
+static const guid_t cio2_sensor_module_guid =
+ GUID_INIT(0x822ace8f, 0x2814, 0x4174,
+ 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee);
+
+/*
+ * Here follows platform specific mapping information that we can pass to
+ * the functions mapping resources to the sensors. Where the sensors have
+ * a power enable pin defined in DSDT we need to provide a supply name so
+ * the sensor drivers can find the regulator. The device name will be derived
+ * from the sensor's ACPI device within the code. Optionally, we can provide a
+ * NULL terminated array of function name mappings to deal with any platform
+ * specific deviations from the documented behaviour of GPIOs.
+ *
+ * Map a GPIO function name to NULL to prevent the driver from mapping that
+ * GPIO at all.
+ */
+
+static const struct int3472_gpio_function_remap ov2680_gpio_function_remaps[] = {
+ { "reset", NULL },
+ { "powerdown", "reset" },
+ { }
+};
+
+static const struct int3472_sensor_config int3472_sensor_configs[] = {
+ /* Lenovo Miix 510-12ISK - OV2680, Front */
+ { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps },
+ /* Lenovo Miix 510-12ISK - OV5648, Rear */
+ { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL },
+ /* Surface Go 1&2 - OV5693, Front */
+ { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL },
+};
+
+static const struct int3472_sensor_config *
+skl_int3472_get_sensor_module_config(struct int3472_discrete_device *int3472)
+{
+ union acpi_object *obj;
+ unsigned int i;
+
+ obj = acpi_evaluate_dsm_typed(int3472->sensor->handle,
+ &cio2_sensor_module_guid, 0x00,
+ 0x01, NULL, ACPI_TYPE_STRING);
+
+ if (!obj) {
+ dev_err(int3472->dev,
+ "Failed to get sensor module string from _DSM\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (obj->string.type != ACPI_TYPE_STRING) {
+ dev_err(int3472->dev,
+ "Sensor _DSM returned a non-string value\n");
+
+ ACPI_FREE(obj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(int3472_sensor_configs); i++) {
+ if (!strcmp(int3472_sensor_configs[i].sensor_module_name,
+ obj->string.pointer))
+ break;
+ }
+
+ ACPI_FREE(obj);
+
+ if (i >= ARRAY_SIZE(int3472_sensor_configs))
+ return ERR_PTR(-EINVAL);
+
+ return &int3472_sensor_configs[i];
+}
+
+static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio,
+ const char *func, u32 polarity)
+{
+ const struct int3472_sensor_config *sensor_config;
+ char *path = agpio->resource_source.string_ptr;
+ struct gpiod_lookup *table_entry;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ acpi_status status;
+ int ret;
+
+ if (int3472->n_sensor_gpios >= INT3472_MAX_SENSOR_GPIOS) {
+ dev_warn(int3472->dev, "Too many GPIOs mapped\n");
+ return -EINVAL;
+ }
+
+ sensor_config = int3472->sensor_config;
+ if (!IS_ERR(sensor_config) && sensor_config->function_maps) {
+ const struct int3472_gpio_function_remap *remap;
+
+ for (remap = sensor_config->function_maps; remap->documented; remap++) {
+ if (!strcmp(func, remap->documented)) {
+ func = remap->actual;
+ break;
+ }
+ }
+ }
+
+ /* Functions mapped to NULL should not be mapped to the sensor */
+ if (!func)
+ return 0;
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ ret = acpi_bus_get_device(handle, &adev);
+ if (ret)
+ return -ENODEV;
+
+ table_entry = &int3472->gpios.table[int3472->n_sensor_gpios];
+ table_entry->key = acpi_dev_name(adev);
+ table_entry->chip_hwnum = agpio->pin_table[0];
+ table_entry->con_id = func;
+ table_entry->idx = 0;
+ table_entry->flags = polarity;
+
+ int3472->n_sensor_gpios++;
+
+ return 0;
+}
+
+static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u8 type)
+{
+ char *path = agpio->resource_source.string_ptr;
+ u16 pin = agpio->pin_table[0];
+ struct gpio_desc *gpio;
+
+ switch (type) {
+ case INT3472_GPIO_TYPE_CLK_ENABLE:
+ gpio = acpi_get_and_request_gpiod(path, pin, "int3472,clk-enable");
+ if (IS_ERR(gpio))
+ return (PTR_ERR(gpio));
+
+ int3472->clock.ena_gpio = gpio;
+ break;
+ case INT3472_GPIO_TYPE_PRIVACY_LED:
+ gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led");
+ if (IS_ERR(gpio))
+ return (PTR_ERR(gpio));
+
+ int3472->clock.led_gpio = gpio;
+ break;
+ default:
+ dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * skl_int3472_handle_gpio_resources: Map PMIC resources to consuming sensor
+ * @ares: A pointer to a &struct acpi_resource
+ * @data: A pointer to a &struct int3472_discrete_device
+ *
+ * This function handles GPIO resources that are against an INT3472
+ * ACPI device, by checking the value of the corresponding _DSM entry.
+ * This will return a 32bit int, where the lowest byte represents the
+ * function of the GPIO pin:
+ *
+ * 0x00 Reset
+ * 0x01 Power down
+ * 0x0b Power enable
+ * 0x0c Clock enable
+ * 0x0d Privacy LED
+ *
+ * There are some known platform specific quirks where that does not quite
+ * hold up; for example where a pin with type 0x01 (Power down) is mapped to
+ * a sensor pin that performs a reset function or entries in _CRS and _DSM that
+ * do not actually correspond to a physical connection. These will be handled
+ * by the mapping sub-functions.
+ *
+ * GPIOs will either be mapped directly to the sensor device or else used
+ * to create clocks and regulators via the usual frameworks.
+ *
+ * Return:
+ * * 1 - To continue the loop
+ * * 0 - When all resources found are handled properly.
+ * * -EINVAL - If the resource is not a GPIO IO resource
+ * * -ENODEV - If the resource has no corresponding _DSM entry
+ * * -Other - Errors propagated from one of the sub-functions.
+ */
+static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
+ void *data)
+{
+ struct int3472_discrete_device *int3472 = data;
+ struct acpi_resource_gpio *agpio;
+ union acpi_object *obj;
+ const char *err_msg;
+ int ret;
+ u8 type;
+
+ if (!acpi_gpio_get_io_resource(ares, &agpio))
+ return 1;
+
+ /*
+ * ngpios + 2 because the index of this _DSM function is 1-based and
+ * the first function is just a count.
+ */
+ obj = acpi_evaluate_dsm_typed(int3472->adev->handle,
+ &int3472_gpio_guid, 0x00,
+ int3472->ngpios + 2,
+ NULL, ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ dev_warn(int3472->dev, "No _DSM entry for GPIO pin %u\n",
+ agpio->pin_table[0]);
+ return 1;
+ }
+
+ type = obj->integer.value & 0xff;
+
+ switch (type) {
+ case INT3472_GPIO_TYPE_RESET:
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "reset",
+ GPIO_ACTIVE_LOW);
+ if (ret)
+ err_msg = "Failed to map reset pin to sensor\n";
+
+ break;
+ case INT3472_GPIO_TYPE_POWERDOWN:
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "powerdown",
+ GPIO_ACTIVE_LOW);
+ if (ret)
+ err_msg = "Failed to map powerdown pin to sensor\n";
+
+ break;
+ case INT3472_GPIO_TYPE_CLK_ENABLE:
+ case INT3472_GPIO_TYPE_PRIVACY_LED:
+ ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type);
+ if (ret)
+ err_msg = "Failed to map GPIO to clock\n";
+
+ break;
+ case INT3472_GPIO_TYPE_POWER_ENABLE:
+ ret = skl_int3472_register_regulator(int3472, agpio);
+ if (ret)
+ err_msg = "Failed to map regulator to sensor\n";
+
+ break;
+ default:
+ dev_warn(int3472->dev,
+ "GPIO type 0x%02x unknown; the sensor may not work\n",
+ type);
+ ret = 1;
+ break;
+ }
+
+ int3472->ngpios++;
+ ACPI_FREE(obj);
+
+ if (ret < 0)
+ return dev_err_probe(int3472->dev, ret, err_msg);
+
+ return ret;
+}
+
+static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
+{
+ LIST_HEAD(resource_list);
+ int ret;
+
+ /*
+ * No error check, because not having a sensor config is not necessarily
+ * a failure mode.
+ */
+ int3472->sensor_config = skl_int3472_get_sensor_module_config(int3472);
+
+ ret = acpi_dev_get_resources(int3472->adev, &resource_list,
+ skl_int3472_handle_gpio_resources,
+ int3472);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /*
+ * If we find no clock enable GPIO pin then the privacy LED won't work.
+ * We've never seen that situation, but it's possible. Warn the user so
+ * it's clear what's happened.
+ */
+ if (int3472->clock.ena_gpio) {
+ ret = skl_int3472_register_clock(int3472);
+ if (ret)
+ return ret;
+ } else {
+ if (int3472->clock.led_gpio)
+ dev_warn(int3472->dev,
+ "No clk GPIO. The privacy LED won't work\n");
+ }
+
+ int3472->gpios.dev_id = int3472->sensor_name;
+ gpiod_add_lookup_table(&int3472->gpios);
+
+ return 0;
+}
+
+int skl_int3472_discrete_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct int3472_discrete_device *int3472;
+ struct int3472_cldb cldb;
+ int ret;
+
+ ret = skl_int3472_fill_cldb(adev, &cldb);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
+ return ret;
+ }
+
+ if (cldb.control_logic_type != 1) {
+ dev_err(&pdev->dev, "Unsupported control logic type %u\n",
+ cldb.control_logic_type);
+ return -EINVAL;
+ }
+
+ /* Max num GPIOs we've seen plus a terminator */
+ int3472 = devm_kzalloc(&pdev->dev, struct_size(int3472, gpios.table,
+ INT3472_MAX_SENSOR_GPIOS + 1), GFP_KERNEL);
+ if (!int3472)
+ return -ENOMEM;
+
+ int3472->adev = adev;
+ int3472->dev = &pdev->dev;
+ platform_set_drvdata(pdev, int3472);
+
+ int3472->sensor = acpi_dev_get_first_consumer_dev(adev);
+ if (!int3472->sensor) {
+ dev_err(&pdev->dev, "INT3472 seems to have no dependents.\n");
+ return -ENODEV;
+ }
+
+ int3472->sensor_name = devm_kasprintf(int3472->dev, GFP_KERNEL,
+ I2C_DEV_NAME_FORMAT,
+ acpi_dev_name(int3472->sensor));
+ if (!int3472->sensor_name) {
+ ret = -ENOMEM;
+ goto err_put_sensor;
+ }
+
+ /*
+ * Initialising this list means we can call gpiod_remove_lookup_table()
+ * in failure paths without issue.
+ */
+ INIT_LIST_HEAD(&int3472->gpios.list);
+
+ ret = skl_int3472_parse_crs(int3472);
+ if (ret) {
+ skl_int3472_discrete_remove(pdev);
+ return ret;
+ }
+
+ return 0;
+
+err_put_sensor:
+ acpi_dev_put(int3472->sensor);
+
+ return ret;
+}
+
+int skl_int3472_discrete_remove(struct platform_device *pdev)
+{
+ struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
+
+ gpiod_remove_lookup_table(&int3472->gpios);
+
+ if (int3472->clock.ena_gpio)
+ skl_int3472_unregister_clock(int3472);
+
+ gpiod_put(int3472->clock.ena_gpio);
+ gpiod_put(int3472->clock.led_gpio);
+
+ skl_int3472_unregister_regulator(int3472);
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_tps68470.c b/drivers/platform/x86/intel/int3472/intel_skl_int3472_tps68470.c
new file mode 100644
index 000000000000..c05b4cf502fe
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_tps68470.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "intel_skl_int3472_common.h"
+
+#define DESIGNED_FOR_CHROMEOS 1
+#define DESIGNED_FOR_WINDOWS 2
+
+static const struct mfd_cell tps68470_cros[] = {
+ { .name = "tps68470-gpio" },
+ { .name = "tps68470_pmic_opregion" },
+};
+
+static const struct mfd_cell tps68470_win[] = {
+ { .name = "tps68470-gpio" },
+ { .name = "tps68470-clk" },
+ { .name = "tps68470-regulator" },
+};
+
+static const struct regmap_config tps68470_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS68470_REG_MAX,
+};
+
+static int tps68470_chip_init(struct device *dev, struct regmap *regmap)
+{
+ unsigned int version;
+ int ret;
+
+ /* Force software reset */
+ ret = regmap_write(regmap, TPS68470_REG_RESET, TPS68470_REG_RESET_MASK);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, TPS68470_REG_REVID, &version);
+ if (ret) {
+ dev_err(dev, "Failed to read revision register: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "TPS68470 REVID: 0x%02x\n", version);
+
+ return 0;
+}
+
+/** skl_int3472_tps68470_calc_type: Check what platform a device is designed for
+ * @adev: A pointer to a &struct acpi_device
+ *
+ * Check CLDB buffer against the PMIC's adev. If present, then we check
+ * the value of control_logic_type field and follow one of the
+ * following scenarios:
+ *
+ * 1. No CLDB - likely ACPI tables designed for ChromeOS. We
+ * create platform devices for the GPIOs and OpRegion drivers.
+ *
+ * 2. CLDB, with control_logic_type = 2 - probably ACPI tables
+ * made for Windows 2-in-1 platforms. Register pdevs for GPIO,
+ * Clock and Regulator drivers to bind to.
+ *
+ * 3. Any other value in control_logic_type, we should never have
+ * gotten to this point; fail probe and return.
+ *
+ * Return:
+ * * 1 Device intended for ChromeOS
+ * * 2 Device intended for Windows
+ * * -EINVAL Where @adev has an object named CLDB but it does not conform to
+ * our expectations
+ */
+static int skl_int3472_tps68470_calc_type(struct acpi_device *adev)
+{
+ struct int3472_cldb cldb = { 0 };
+ int ret;
+
+ /*
+ * A CLDB buffer that exists, but which does not match our expectations
+ * should trigger an error so we don't blindly continue.
+ */
+ ret = skl_int3472_fill_cldb(adev, &cldb);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ if (ret)
+ return DESIGNED_FOR_CHROMEOS;
+
+ if (cldb.control_logic_type != 2)
+ return -EINVAL;
+
+ return DESIGNED_FOR_WINDOWS;
+}
+
+int skl_int3472_tps68470_probe(struct i2c_client *client)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ struct regmap *regmap;
+ int device_type;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to create regmap: %ld\n", PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ ret = tps68470_chip_init(&client->dev, regmap);
+ if (ret < 0) {
+ dev_err(&client->dev, "TPS68470 init error %d\n", ret);
+ return ret;
+ }
+
+ device_type = skl_int3472_tps68470_calc_type(adev);
+ switch (device_type) {
+ case DESIGNED_FOR_WINDOWS:
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ tps68470_win, ARRAY_SIZE(tps68470_win),
+ NULL, 0, NULL);
+ break;
+ case DESIGNED_FOR_CHROMEOS:
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ tps68470_cros, ARRAY_SIZE(tps68470_cros),
+ NULL, 0, NULL);
+ break;
+ default:
+ dev_err(&client->dev, "Failed to add MFD devices\n");
+ return device_type;
+ }
+
+ return ret;
+}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index bffe548187ee..4dfdbfca6841 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -829,7 +829,7 @@ static u16 calc_avg_temp(struct ips_driver *ips, u16 *array)
static u16 read_mgtv(struct ips_driver *ips)
{
- u16 ret;
+ u16 __maybe_unused ret;
u64 slope, offset;
u64 val;
diff --git a/drivers/platform/x86/intel_pmt_crashlog.c b/drivers/platform/x86/intel_pmt_crashlog.c
index 92d315a16cfd..56963ceb6345 100644
--- a/drivers/platform/x86/intel_pmt_crashlog.c
+++ b/drivers/platform/x86/intel_pmt_crashlog.c
@@ -218,7 +218,7 @@ static struct attribute *pmt_crashlog_attrs[] = {
NULL
};
-static struct attribute_group pmt_crashlog_group = {
+static const struct attribute_group pmt_crashlog_group = {
.attrs = pmt_crashlog_attrs,
};
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
index 0c2aa22c7a12..6f0cc679c8e5 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
@@ -281,10 +281,69 @@ static int isst_if_get_platform_info(void __user *argp)
struct isst_if_cpu_info {
/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
int bus_info[2];
+ struct pci_dev *pci_dev[2];
int punit_cpu_id;
+ int numa_node;
};
static struct isst_if_cpu_info *isst_cpu_info;
+#define ISST_MAX_PCI_DOMAINS 8
+
+static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
+{
+ struct pci_dev *matched_pci_dev = NULL;
+ struct pci_dev *pci_dev = NULL;
+ int no_matches = 0;
+ int i, bus_number;
+
+ if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
+ cpu >= num_possible_cpus())
+ return NULL;
+
+ bus_number = isst_cpu_info[cpu].bus_info[bus_no];
+ if (bus_number < 0)
+ return NULL;
+
+ for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
+ struct pci_dev *_pci_dev;
+ int node;
+
+ _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
+ if (!_pci_dev)
+ continue;
+
+ ++no_matches;
+ if (!matched_pci_dev)
+ matched_pci_dev = _pci_dev;
+
+ node = dev_to_node(&_pci_dev->dev);
+ if (node == NUMA_NO_NODE) {
+ pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
+ cpu, bus_no, dev, fn);
+ continue;
+ }
+
+ if (node == isst_cpu_info[cpu].numa_node) {
+ pci_dev = _pci_dev;
+ break;
+ }
+ }
+
+ /*
+ * If there is no numa matched pci_dev, then there can be following cases:
+ * 1. CONFIG_NUMA is not defined: In this case if there is only single device
+ * match, then we don't need numa information. Simply return last match.
+ * Othewise return NULL.
+ * 2. NUMA information is not exposed via _SEG method. In this case it is similar
+ * to case 1.
+ * 3. Numa information doesn't match with CPU numa node and more than one match
+ * return NULL.
+ */
+ if (!pci_dev && no_matches == 1)
+ pci_dev = matched_pci_dev;
+
+ return pci_dev;
+}
/**
* isst_if_get_pci_dev() - Get the PCI device instance for a CPU
@@ -300,17 +359,18 @@ static struct isst_if_cpu_info *isst_cpu_info;
*/
struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
- int bus_number;
+ struct pci_dev *pci_dev;
if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
cpu >= num_possible_cpus())
return NULL;
- bus_number = isst_cpu_info[cpu].bus_info[bus_no];
- if (bus_number < 0)
- return NULL;
+ pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
+
+ if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
+ return pci_dev;
- return pci_get_domain_bus_and_slot(0, bus_number, PCI_DEVFN(dev, fn));
+ return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
}
EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
@@ -327,6 +387,8 @@ static int isst_if_cpu_online(unsigned int cpu)
} else {
isst_cpu_info[cpu].bus_info[0] = data & 0xff;
isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
+ isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
+ isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
}
ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
@@ -335,6 +397,7 @@ static int isst_if_cpu_online(unsigned int cpu)
return ret;
}
isst_cpu_info[cpu].punit_cpu_id = data;
+ isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
isst_restore_msr_local(cpu);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index d5cec6e35bb8..7ee010aa740a 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -388,7 +388,7 @@ MODULE_PARM_DESC(force,
"Disable the DMI check and forces the driver to be loaded");
static bool debug;
-module_param(debug, bool, S_IRUGO | S_IWUSR);
+module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug enabled or not");
static int sabi_command(struct samsung_laptop *samsung, u16 command,
@@ -705,7 +705,7 @@ static ssize_t set_performance_level(struct device *dev,
return count;
}
-static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(performance_level, 0644,
get_performance_level, set_performance_level);
static int read_battery_life_extender(struct samsung_laptop *samsung)
@@ -774,7 +774,7 @@ static ssize_t set_battery_life_extender(struct device *dev,
return count;
}
-static DEVICE_ATTR(battery_life_extender, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(battery_life_extender, 0644,
get_battery_life_extender, set_battery_life_extender);
static int read_usb_charge(struct samsung_laptop *samsung)
@@ -843,7 +843,7 @@ static ssize_t set_usb_charge(struct device *dev,
return count;
}
-static DEVICE_ATTR(usb_charge, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(usb_charge, 0644,
get_usb_charge, set_usb_charge);
static int read_lid_handling(struct samsung_laptop *samsung)
@@ -908,7 +908,7 @@ static ssize_t set_lid_handling(struct device *dev,
return count;
}
-static DEVICE_ATTR(lid_handling, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(lid_handling, 0644,
get_lid_handling, set_lid_handling);
static struct attribute *platform_attributes[] = {
@@ -1291,24 +1291,17 @@ static void samsung_debugfs_init(struct samsung_laptop *samsung)
samsung->debug.sdiag_wrapper.data = samsung->sdiag;
samsung->debug.sdiag_wrapper.size = strlen(samsung->sdiag);
- debugfs_create_u16("command", S_IRUGO | S_IWUSR, root,
- &samsung->debug.command);
- debugfs_create_u32("d0", S_IRUGO | S_IWUSR, root,
- &samsung->debug.data.d0);
- debugfs_create_u32("d1", S_IRUGO | S_IWUSR, root,
- &samsung->debug.data.d1);
- debugfs_create_u16("d2", S_IRUGO | S_IWUSR, root,
- &samsung->debug.data.d2);
- debugfs_create_u8("d3", S_IRUGO | S_IWUSR, root,
- &samsung->debug.data.d3);
- debugfs_create_blob("data", S_IRUGO | S_IWUSR, root,
- &samsung->debug.data_wrapper);
- debugfs_create_blob("f0000_segment", S_IRUSR | S_IWUSR, root,
+ debugfs_create_u16("command", 0644, root, &samsung->debug.command);
+ debugfs_create_u32("d0", 0644, root, &samsung->debug.data.d0);
+ debugfs_create_u32("d1", 0644, root, &samsung->debug.data.d1);
+ debugfs_create_u16("d2", 0644, root, &samsung->debug.data.d2);
+ debugfs_create_u8("d3", 0644, root, &samsung->debug.data.d3);
+ debugfs_create_blob("data", 0444, root, &samsung->debug.data_wrapper);
+ debugfs_create_blob("f0000_segment", 0400, root,
&samsung->debug.f0000_wrapper);
- debugfs_create_file("call", S_IFREG | S_IRUGO, root, samsung,
+ debugfs_create_file("call", 0444, root, samsung,
&samsung_laptop_call_fops);
- debugfs_create_blob("sdiag", S_IRUGO | S_IWUSR, root,
- &samsung->debug.sdiag_wrapper);
+ debugfs_create_blob("sdiag", 0444, root, &samsung->debug.sdiag_wrapper);
}
static void samsung_sabi_exit(struct samsung_laptop *samsung)
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 803920b6f01d..9072eb302618 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -156,7 +156,7 @@ static struct attribute *tc1100_attributes[] = {
NULL
};
-static struct attribute_group tc1100_attribute_group = {
+static const struct attribute_group tc1100_attribute_group = {
.attrs = tc1100_attributes,
};
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
new file mode 100644
index 000000000000..3671b5d20613
--- /dev/null
+++ b/drivers/platform/x86/think-lmi.c
@@ -0,0 +1,904 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Think LMI BIOS configuration driver
+ *
+ * Copyright(C) 2019-2021 Lenovo
+ *
+ * Original code from Thinkpad-wmi project https://github.com/iksaif/thinkpad-wmi
+ * Copyright(C) 2017 Corentin Chary <corentin.chary@gmail.com>
+ * Distributed under the GPL-2.0 license
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+#include "firmware_attributes_class.h"
+#include "think-lmi.h"
+
+/*
+ * Name:
+ * Lenovo_BiosSetting
+ * Description:
+ * Get item name and settings for current LMI instance.
+ * Type:
+ * Query
+ * Returns:
+ * "Item,Value"
+ * Example:
+ * "WakeOnLAN,Enable"
+ */
+#define LENOVO_BIOS_SETTING_GUID "51F5230E-9677-46CD-A1CF-C0B23EE34DB7"
+
+/*
+ * Name:
+ * Lenovo_SetBiosSetting
+ * Description:
+ * Change the BIOS setting to the desired value using the Lenovo_SetBiosSetting
+ * class. To save the settings, use the Lenovo_SaveBiosSetting class.
+ * BIOS settings and values are case sensitive.
+ * After making changes to the BIOS settings, you must reboot the computer
+ * before the changes will take effect.
+ * Type:
+ * Method
+ * Arguments:
+ * "Item,Value,Password,Encoding,KbdLang;"
+ * Example:
+ * "WakeOnLAN,Disable,pa55w0rd,ascii,us;"
+ */
+#define LENOVO_SET_BIOS_SETTINGS_GUID "98479A64-33F5-4E33-A707-8E251EBBC3A1"
+
+/*
+ * Name:
+ * Lenovo_SaveBiosSettings
+ * Description:
+ * Save any pending changes in settings.
+ * Type:
+ * Method
+ * Arguments:
+ * "Password,Encoding,KbdLang;"
+ * Example:
+ * "pa55w0rd,ascii,us;"
+ */
+#define LENOVO_SAVE_BIOS_SETTINGS_GUID "6A4B54EF-A5ED-4D33-9455-B0D9B48DF4B3"
+
+/*
+ * Name:
+ * Lenovo_BiosPasswordSettings
+ * Description:
+ * Return BIOS Password settings
+ * Type:
+ * Query
+ * Returns:
+ * PasswordMode, PasswordState, MinLength, MaxLength,
+ * SupportedEncoding, SupportedKeyboard
+ */
+#define LENOVO_BIOS_PASSWORD_SETTINGS_GUID "8ADB159E-1E32-455C-BC93-308A7ED98246"
+
+/*
+ * Name:
+ * Lenovo_SetBiosPassword
+ * Description:
+ * Change a specific password.
+ * - BIOS settings cannot be changed at the same boot as power-on
+ * passwords (POP) and hard disk passwords (HDP). If you want to change
+ * BIOS settings and POP or HDP, you must reboot the system after changing
+ * one of them.
+ * - A password cannot be set using this method when one does not already
+ * exist. Passwords can only be updated or cleared.
+ * Type:
+ * Method
+ * Arguments:
+ * "PasswordType,CurrentPassword,NewPassword,Encoding,KbdLang;"
+ * Example:
+ * "pop,pa55w0rd,newpa55w0rd,ascii,us;”
+ */
+#define LENOVO_SET_BIOS_PASSWORD_GUID "2651D9FD-911C-4B69-B94E-D0DED5963BD7"
+
+/*
+ * Name:
+ * Lenovo_GetBiosSelections
+ * Description:
+ * Return a list of valid settings for a given item.
+ * Type:
+ * Method
+ * Arguments:
+ * "Item"
+ * Returns:
+ * "Value1,Value2,Value3,..."
+ * Example:
+ * -> "FlashOverLAN"
+ * <- "Enabled,Disabled"
+ */
+#define LENOVO_GET_BIOS_SELECTIONS_GUID "7364651A-132F-4FE7-ADAA-40C6C7EE2E3B"
+
+#define TLMI_POP_PWD (1 << 0)
+#define TLMI_PAP_PWD (1 << 1)
+#define to_tlmi_pwd_setting(kobj) container_of(kobj, struct tlmi_pwd_setting, kobj)
+#define to_tlmi_attr_setting(kobj) container_of(kobj, struct tlmi_attr_setting, kobj)
+
+static const struct tlmi_err_codes tlmi_errs[] = {
+ {"Success", 0},
+ {"Not Supported", -EOPNOTSUPP},
+ {"Invalid Parameter", -EINVAL},
+ {"Access Denied", -EACCES},
+ {"System Busy", -EBUSY},
+};
+
+static const char * const encoding_options[] = {
+ [TLMI_ENCODING_ASCII] = "ascii",
+ [TLMI_ENCODING_SCANCODE] = "scancode",
+};
+static struct think_lmi tlmi_priv;
+static struct class *fw_attr_class;
+
+/* ------ Utility functions ------------*/
+/* Convert BIOS WMI error string to suitable error code */
+static int tlmi_errstr_to_err(const char *errstr)
+{
+ int i;
+
+ for (i = 0; i < sizeof(tlmi_errs)/sizeof(struct tlmi_err_codes); i++) {
+ if (!strcmp(tlmi_errs[i].err_str, errstr))
+ return tlmi_errs[i].err_code;
+ }
+ return -EPERM;
+}
+
+/* Extract error string from WMI return buffer */
+static int tlmi_extract_error(const struct acpi_buffer *output)
+{
+ const union acpi_object *obj;
+
+ obj = output->pointer;
+ if (!obj)
+ return -ENOMEM;
+ if (obj->type != ACPI_TYPE_STRING || !obj->string.pointer)
+ return -EIO;
+
+ return tlmi_errstr_to_err(obj->string.pointer);
+}
+
+/* Utility function to execute WMI call to BIOS */
+static int tlmi_simple_call(const char *guid, const char *arg)
+{
+ const struct acpi_buffer input = { strlen(arg), (char *)arg };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ int i, err;
+
+ /*
+ * Duplicated call required to match BIOS workaround for behavior
+ * seen when WMI accessed via scripting on other OS.
+ */
+ for (i = 0; i < 2; i++) {
+ /* (re)initialize output buffer to default state */
+ output.length = ACPI_ALLOCATE_BUFFER;
+ output.pointer = NULL;
+
+ status = wmi_evaluate_method(guid, 0, 0, &input, &output);
+ if (ACPI_FAILURE(status)) {
+ kfree(output.pointer);
+ return -EIO;
+ }
+ err = tlmi_extract_error(&output);
+ kfree(output.pointer);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/* Extract output string from WMI return buffer */
+static int tlmi_extract_output_string(const struct acpi_buffer *output,
+ char **string)
+{
+ const union acpi_object *obj;
+ char *s;
+
+ obj = output->pointer;
+ if (!obj)
+ return -ENOMEM;
+ if (obj->type != ACPI_TYPE_STRING || !obj->string.pointer)
+ return -EIO;
+
+ s = kstrdup(obj->string.pointer, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+ *string = s;
+ return 0;
+}
+
+/* ------ Core interface functions ------------*/
+
+/* Get password settings from BIOS */
+static int tlmi_get_pwd_settings(struct tlmi_pwdcfg *pwdcfg)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ const union acpi_object *obj;
+ acpi_status status;
+
+ if (!tlmi_priv.can_get_password_settings)
+ return -EOPNOTSUPP;
+
+ status = wmi_query_block(LENOVO_BIOS_PASSWORD_SETTINGS_GUID, 0,
+ &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = output.pointer;
+ if (!obj)
+ return -ENOMEM;
+ if (obj->type != ACPI_TYPE_BUFFER || !obj->buffer.pointer) {
+ kfree(obj);
+ return -EIO;
+ }
+ /*
+ * The size of thinkpad_wmi_pcfg on ThinkStation is larger than ThinkPad.
+ * To make the driver compatible on different brands, we permit it to get
+ * the data in below case.
+ */
+ if (obj->buffer.length < sizeof(struct tlmi_pwdcfg)) {
+ pr_warn("Unknown pwdcfg buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return -EIO;
+ }
+ memcpy(pwdcfg, obj->buffer.pointer, sizeof(struct tlmi_pwdcfg));
+ kfree(obj);
+ return 0;
+}
+
+static int tlmi_save_bios_settings(const char *password)
+{
+ return tlmi_simple_call(LENOVO_SAVE_BIOS_SETTINGS_GUID,
+ password);
+}
+
+static int tlmi_setting(int item, char **value, const char *guid_string)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ int ret;
+
+ status = wmi_query_block(guid_string, item, &output);
+ if (ACPI_FAILURE(status)) {
+ kfree(output.pointer);
+ return -EIO;
+ }
+
+ ret = tlmi_extract_output_string(&output, value);
+ kfree(output.pointer);
+ return ret;
+}
+
+static int tlmi_get_bios_selections(const char *item, char **value)
+{
+ const struct acpi_buffer input = { strlen(item), (char *)item };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ int ret;
+
+ status = wmi_evaluate_method(LENOVO_GET_BIOS_SELECTIONS_GUID,
+ 0, 0, &input, &output);
+
+ if (ACPI_FAILURE(status)) {
+ kfree(output.pointer);
+ return -EIO;
+ }
+
+ ret = tlmi_extract_output_string(&output, value);
+ kfree(output.pointer);
+ return ret;
+}
+
+/* ---- Authentication sysfs --------------------------------------------------------- */
+static ssize_t is_enabled_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%d\n", setting->valid);
+}
+
+static struct kobj_attribute auth_is_pass_set = __ATTR_RO(is_enabled);
+
+static ssize_t current_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ size_t pwdlen;
+ char *p;
+
+ pwdlen = strlen(buf);
+ /* pwdlen == 0 is allowed to clear the password */
+ if (pwdlen && ((pwdlen < setting->minlen) || (pwdlen > setting->maxlen)))
+ return -EINVAL;
+
+ strscpy(setting->password, buf, setting->maxlen);
+ /* Strip out CR if one is present, setting password won't work if it is present */
+ p = strchrnul(setting->password, '\n');
+ *p = '\0';
+ return count;
+}
+
+static struct kobj_attribute auth_current_password = __ATTR_WO(current_password);
+
+static ssize_t new_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ char *auth_str, *new_pwd, *p;
+ size_t pwdlen;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!tlmi_priv.can_set_bios_password)
+ return -EOPNOTSUPP;
+
+ new_pwd = kstrdup(buf, GFP_KERNEL);
+ if (!new_pwd)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present, setting password won't work if it is present */
+ p = strchrnul(new_pwd, '\n');
+ *p = '\0';
+
+ pwdlen = strlen(new_pwd);
+ /* pwdlen == 0 is allowed to clear the password */
+ if (pwdlen && ((pwdlen < setting->minlen) || (pwdlen > setting->maxlen))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Format: 'PasswordType,CurrentPw,NewPw,Encoding,KbdLang;' */
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s,%s,%s;",
+ setting->pwd_type, setting->password, new_pwd,
+ encoding_options[setting->encoding], setting->kbdlang);
+ if (!auth_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = tlmi_simple_call(LENOVO_SET_BIOS_PASSWORD_GUID, auth_str);
+ kfree(auth_str);
+out:
+ kfree(new_pwd);
+ return ret ?: count;
+}
+
+static struct kobj_attribute auth_new_password = __ATTR_WO(new_password);
+
+static ssize_t min_password_length_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%d\n", setting->minlen);
+}
+
+static struct kobj_attribute auth_min_pass_length = __ATTR_RO(min_password_length);
+
+static ssize_t max_password_length_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%d\n", setting->maxlen);
+}
+static struct kobj_attribute auth_max_pass_length = __ATTR_RO(max_password_length);
+
+static ssize_t mechanism_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "password\n");
+}
+static struct kobj_attribute auth_mechanism = __ATTR_RO(mechanism);
+
+static ssize_t encoding_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", encoding_options[setting->encoding]);
+}
+
+static ssize_t encoding_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int i;
+
+ /* Scan for a matching profile */
+ i = sysfs_match_string(encoding_options, buf);
+ if (i < 0)
+ return -EINVAL;
+
+ setting->encoding = i;
+ return count;
+}
+
+static struct kobj_attribute auth_encoding = __ATTR_RW(encoding);
+
+static ssize_t kbdlang_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", setting->kbdlang);
+}
+
+static ssize_t kbdlang_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int length;
+
+ /* Calculate length till '\n' or terminating 0 */
+ length = strchrnul(buf, '\n') - buf;
+ if (!length || length >= TLMI_LANG_MAXLEN)
+ return -EINVAL;
+
+ memcpy(setting->kbdlang, buf, length);
+ setting->kbdlang[length] = '\0';
+ return count;
+}
+
+static struct kobj_attribute auth_kbdlang = __ATTR_RW(kbdlang);
+
+static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", setting->role);
+}
+static struct kobj_attribute auth_role = __ATTR_RO(role);
+
+static struct attribute *auth_attrs[] = {
+ &auth_is_pass_set.attr,
+ &auth_min_pass_length.attr,
+ &auth_max_pass_length.attr,
+ &auth_current_password.attr,
+ &auth_new_password.attr,
+ &auth_role.attr,
+ &auth_mechanism.attr,
+ &auth_encoding.attr,
+ &auth_kbdlang.attr,
+ NULL
+};
+
+static const struct attribute_group auth_attr_group = {
+ .attrs = auth_attrs,
+};
+
+/* ---- Attributes sysfs --------------------------------------------------------- */
+static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", setting->display_name);
+}
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+ char *item, *value;
+ int ret;
+
+ ret = tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID);
+ if (ret)
+ return ret;
+
+ /* validate and split from `item,value` -> `value` */
+ value = strpbrk(item, ",");
+ if (!value || value == item || !strlen(value + 1))
+ return -EINVAL;
+
+ ret = sysfs_emit(buf, "%s\n", value + 1);
+ kfree(item);
+ return ret;
+}
+
+static ssize_t possible_values_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+ if (!tlmi_priv.can_get_bios_selections)
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%s\n", setting->possible_values);
+}
+
+static ssize_t current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+ char *set_str = NULL, *new_setting = NULL;
+ char *auth_str = NULL;
+ char *p;
+ int ret;
+
+ if (!tlmi_priv.can_set_bios_settings)
+ return -EOPNOTSUPP;
+
+ new_setting = kstrdup(buf, GFP_KERNEL);
+ if (!new_setting)
+ return -ENOMEM;
+
+ /* Strip out CR if one is present */
+ p = strchrnul(new_setting, '\n');
+ *p = '\0';
+
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
+ tlmi_priv.pwd_admin->password,
+ encoding_options[tlmi_priv.pwd_admin->encoding],
+ tlmi_priv.pwd_admin->kbdlang);
+ if (!auth_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (auth_str)
+ set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
+ new_setting, auth_str);
+ else
+ set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
+ new_setting);
+ if (!set_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = tlmi_simple_call(LENOVO_SET_BIOS_SETTINGS_GUID, set_str);
+ if (ret)
+ goto out;
+
+ if (auth_str)
+ ret = tlmi_save_bios_settings(auth_str);
+ else
+ ret = tlmi_save_bios_settings("");
+
+out:
+ kfree(auth_str);
+ kfree(set_str);
+ kfree(new_setting);
+ return ret ?: count;
+}
+
+static struct kobj_attribute attr_displ_name = __ATTR_RO(display_name);
+
+static struct kobj_attribute attr_possible_values = __ATTR_RO(possible_values);
+
+static struct kobj_attribute attr_current_val = __ATTR_RW_MODE(current_value, 0600);
+
+static struct attribute *tlmi_attrs[] = {
+ &attr_displ_name.attr,
+ &attr_current_val.attr,
+ &attr_possible_values.attr,
+ NULL
+};
+
+static const struct attribute_group tlmi_attr_group = {
+ .attrs = tlmi_attrs,
+};
+
+static ssize_t tlmi_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct kobj_attribute *kattr;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show)
+ return kattr->show(kobj, kattr, buf);
+ return -EIO;
+}
+
+static ssize_t tlmi_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kobj_attribute *kattr;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store)
+ return kattr->store(kobj, kattr, buf, count);
+ return -EIO;
+}
+
+static const struct sysfs_ops tlmi_kobj_sysfs_ops = {
+ .show = tlmi_attr_show,
+ .store = tlmi_attr_store,
+};
+
+static void tlmi_attr_setting_release(struct kobject *kobj)
+{
+ struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+ kfree(setting->possible_values);
+ kfree(setting);
+}
+
+static void tlmi_pwd_setting_release(struct kobject *kobj)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ kfree(setting);
+}
+
+static struct kobj_type tlmi_attr_setting_ktype = {
+ .release = &tlmi_attr_setting_release,
+ .sysfs_ops = &tlmi_kobj_sysfs_ops,
+};
+
+static struct kobj_type tlmi_pwd_setting_ktype = {
+ .release = &tlmi_pwd_setting_release,
+ .sysfs_ops = &tlmi_kobj_sysfs_ops,
+};
+
+/* ---- Initialisation --------------------------------------------------------- */
+static void tlmi_release_attr(void)
+{
+ int i;
+
+ /* Attribute structures */
+ for (i = 0; i < TLMI_SETTINGS_COUNT; i++) {
+ if (tlmi_priv.setting[i]) {
+ sysfs_remove_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group);
+ kobject_put(&tlmi_priv.setting[i]->kobj);
+ }
+ }
+ kset_unregister(tlmi_priv.attribute_kset);
+
+ /* Authentication structures */
+ sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_admin->kobj);
+ sysfs_remove_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_power->kobj);
+ kset_unregister(tlmi_priv.authentication_kset);
+}
+
+static int tlmi_sysfs_init(void)
+{
+ int i, ret;
+
+ ret = fw_attributes_class_get(&fw_attr_class);
+ if (ret)
+ return ret;
+
+ tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ NULL, "%s", "thinklmi");
+ if (IS_ERR(tlmi_priv.class_dev)) {
+ ret = PTR_ERR(tlmi_priv.class_dev);
+ goto fail_class_created;
+ }
+
+ tlmi_priv.attribute_kset = kset_create_and_add("attributes", NULL,
+ &tlmi_priv.class_dev->kobj);
+ if (!tlmi_priv.attribute_kset) {
+ ret = -ENOMEM;
+ goto fail_device_created;
+ }
+
+ for (i = 0; i < TLMI_SETTINGS_COUNT; i++) {
+ /* Check if index is a valid setting - skip if it isn't */
+ if (!tlmi_priv.setting[i])
+ continue;
+
+ /* check for duplicate or reserved values */
+ if (kset_find_obj(tlmi_priv.attribute_kset, tlmi_priv.setting[i]->display_name) ||
+ !strcmp(tlmi_priv.setting[i]->display_name, "Reserved")) {
+ pr_debug("duplicate or reserved attribute name found - %s\n",
+ tlmi_priv.setting[i]->display_name);
+ kfree(tlmi_priv.setting[i]->possible_values);
+ kfree(tlmi_priv.setting[i]);
+ tlmi_priv.setting[i] = NULL;
+ continue;
+ }
+
+ /* Build attribute */
+ tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
+ ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype,
+ NULL, "%s", tlmi_priv.setting[i]->display_name);
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group);
+ if (ret)
+ goto fail_create_attr;
+ }
+
+ /* Create authentication entries */
+ tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
+ &tlmi_priv.class_dev->kobj);
+ if (!tlmi_priv.authentication_kset) {
+ ret = -ENOMEM;
+ goto fail_create_attr;
+ }
+ tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype,
+ NULL, "%s", "Admin");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+
+ tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype,
+ NULL, "%s", "System");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+
+ return ret;
+
+fail_create_attr:
+ tlmi_release_attr();
+fail_device_created:
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+fail_class_created:
+ fw_attributes_class_put();
+ return ret;
+}
+
+/* ---- Base Driver -------------------------------------------------------- */
+static int tlmi_analyze(void)
+{
+ struct tlmi_pwdcfg pwdcfg;
+ acpi_status status;
+ int i, ret;
+
+ if (wmi_has_guid(LENOVO_SET_BIOS_SETTINGS_GUID) &&
+ wmi_has_guid(LENOVO_SAVE_BIOS_SETTINGS_GUID))
+ tlmi_priv.can_set_bios_settings = true;
+
+ if (wmi_has_guid(LENOVO_GET_BIOS_SELECTIONS_GUID))
+ tlmi_priv.can_get_bios_selections = true;
+
+ if (wmi_has_guid(LENOVO_SET_BIOS_PASSWORD_GUID))
+ tlmi_priv.can_set_bios_password = true;
+
+ if (wmi_has_guid(LENOVO_BIOS_PASSWORD_SETTINGS_GUID))
+ tlmi_priv.can_get_password_settings = true;
+
+ /*
+ * Try to find the number of valid settings of this machine
+ * and use it to create sysfs attributes.
+ */
+ for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
+ struct tlmi_attr_setting *setting;
+ char *item = NULL;
+ char *p;
+
+ tlmi_priv.setting[i] = NULL;
+ status = tlmi_setting(i, &item, LENOVO_BIOS_SETTING_GUID);
+ if (ACPI_FAILURE(status))
+ break;
+ if (!item)
+ break;
+ if (!*item)
+ continue;
+
+ /* It is not allowed to have '/' for file name. Convert it into '\'. */
+ strreplace(item, '/', '\\');
+
+ /* Remove the value part */
+ p = strchrnul(item, ',');
+ *p = '\0';
+
+ /* Create a setting entry */
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting) {
+ ret = -ENOMEM;
+ goto fail_clear_attr;
+ }
+ setting->index = i;
+ strscpy(setting->display_name, item, TLMI_SETTINGS_MAXLEN);
+ /* If BIOS selections supported, load those */
+ if (tlmi_priv.can_get_bios_selections) {
+ ret = tlmi_get_bios_selections(setting->display_name,
+ &setting->possible_values);
+ if (ret || !setting->possible_values)
+ pr_info("Error retrieving possible values for %d : %s\n",
+ i, setting->display_name);
+ }
+ tlmi_priv.setting[i] = setting;
+ tlmi_priv.settings_count++;
+ kfree(item);
+ }
+
+ /* Create password setting structure */
+ ret = tlmi_get_pwd_settings(&pwdcfg);
+ if (ret)
+ goto fail_clear_attr;
+
+ tlmi_priv.pwd_admin = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
+ if (!tlmi_priv.pwd_admin) {
+ ret = -ENOMEM;
+ goto fail_clear_attr;
+ }
+ strscpy(tlmi_priv.pwd_admin->kbdlang, "us", TLMI_LANG_MAXLEN);
+ tlmi_priv.pwd_admin->encoding = TLMI_ENCODING_ASCII;
+ tlmi_priv.pwd_admin->pwd_type = "pap";
+ tlmi_priv.pwd_admin->role = "bios-admin";
+ tlmi_priv.pwd_admin->minlen = pwdcfg.min_length;
+ if (WARN_ON(pwdcfg.max_length >= TLMI_PWD_BUFSIZE))
+ pwdcfg.max_length = TLMI_PWD_BUFSIZE - 1;
+ tlmi_priv.pwd_admin->maxlen = pwdcfg.max_length;
+ if (pwdcfg.password_state & TLMI_PAP_PWD)
+ tlmi_priv.pwd_admin->valid = true;
+
+ tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
+ if (!tlmi_priv.pwd_power) {
+ ret = -ENOMEM;
+ goto fail_clear_attr;
+ }
+ strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN);
+ tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII;
+ tlmi_priv.pwd_power->pwd_type = "pop";
+ tlmi_priv.pwd_power->role = "power-on";
+ tlmi_priv.pwd_power->minlen = pwdcfg.min_length;
+ tlmi_priv.pwd_power->maxlen = pwdcfg.max_length;
+
+ if (pwdcfg.password_state & TLMI_POP_PWD)
+ tlmi_priv.pwd_power->valid = true;
+
+ return 0;
+
+fail_clear_attr:
+ for (i = 0; i < TLMI_SETTINGS_COUNT; ++i)
+ kfree(tlmi_priv.setting[i]);
+ return ret;
+}
+
+static void tlmi_remove(struct wmi_device *wdev)
+{
+ tlmi_release_attr();
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+ fw_attributes_class_put();
+}
+
+static int tlmi_probe(struct wmi_device *wdev, const void *context)
+{
+ tlmi_analyze();
+ return tlmi_sysfs_init();
+}
+
+static const struct wmi_device_id tlmi_id_table[] = {
+ { .guid_string = LENOVO_BIOS_SETTING_GUID },
+ { }
+};
+MODULE_DEVICE_TABLE(wmi, tlmi_id_table);
+
+static struct wmi_driver tlmi_driver = {
+ .driver = {
+ .name = "think-lmi",
+ },
+ .id_table = tlmi_id_table,
+ .probe = tlmi_probe,
+ .remove = tlmi_remove,
+};
+
+MODULE_AUTHOR("Sugumaran L <slacshiminar@lenovo.com>");
+MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
+MODULE_DESCRIPTION("ThinkLMI Driver");
+MODULE_LICENSE("GPL");
+
+module_wmi_driver(tlmi_driver);
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
new file mode 100644
index 000000000000..6fa8da7af6c7
--- /dev/null
+++ b/drivers/platform/x86/think-lmi.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _THINK_LMI_H_
+#define _THINK_LMI_H_
+
+#include <linux/types.h>
+
+#define TLMI_SETTINGS_COUNT 256
+#define TLMI_SETTINGS_MAXLEN 512
+#define TLMI_PWD_BUFSIZE 129
+#define TLMI_LANG_MAXLEN 4
+
+/* Possible error values */
+struct tlmi_err_codes {
+ const char *err_str;
+ int err_code;
+};
+
+enum encoding_option {
+ TLMI_ENCODING_ASCII,
+ TLMI_ENCODING_SCANCODE,
+};
+
+/* password configuration details */
+struct tlmi_pwdcfg {
+ uint32_t password_mode;
+ uint32_t password_state;
+ uint32_t min_length;
+ uint32_t max_length;
+ uint32_t supported_encodings;
+ uint32_t supported_keyboard;
+};
+
+/* password setting details */
+struct tlmi_pwd_setting {
+ struct kobject kobj;
+ bool valid;
+ char password[TLMI_PWD_BUFSIZE];
+ const char *pwd_type;
+ const char *role;
+ int minlen;
+ int maxlen;
+ enum encoding_option encoding;
+ char kbdlang[TLMI_LANG_MAXLEN];
+};
+
+/* Attribute setting details */
+struct tlmi_attr_setting {
+ struct kobject kobj;
+ int index;
+ char display_name[TLMI_SETTINGS_MAXLEN];
+ char *possible_values;
+};
+
+struct think_lmi {
+ struct wmi_device *wmi_device;
+
+ int settings_count;
+ bool can_set_bios_settings;
+ bool can_get_bios_selections;
+ bool can_set_bios_password;
+ bool can_get_password_settings;
+
+ struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
+ struct device *class_dev;
+ struct kset *attribute_kset;
+ struct kset *authentication_kset;
+ struct tlmi_pwd_setting *pwd_admin;
+ struct tlmi_pwd_setting *pwd_power;
+};
+
+#endif /* !_THINK_LMI_H_ */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd60c9397d35..603156a6e3ed 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7938,7 +7938,7 @@ static int volume_write(char *buf)
continue;
} else if (sscanf(cmd, "level %u", &l) == 1 &&
l >= 0 && l <= TP_EC_VOLUME_MAX) {
- new_level = l;
+ new_level = l;
continue;
}
}
@@ -8853,6 +8853,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
+ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
};
static int __init fan_init(struct ibm_init_struct *iibm)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index fa7232ad8c39..352508d30467 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2831,6 +2831,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
if (!dev->info_supported && !dev->system_event_supported) {
pr_warn("No hotkey query interface found\n");
+ error = -EINVAL;
goto err_remove_filter;
}
diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c
index b237bd6b1ee5..49e84095bb01 100644
--- a/drivers/platform/x86/toshiba_haps.c
+++ b/drivers/platform/x86/toshiba_haps.c
@@ -131,7 +131,7 @@ static const struct attribute_group haps_attr_group = {
*/
static void toshiba_haps_notify(struct acpi_device *device, u32 event)
{
- pr_debug("Received event: 0x%x", event);
+ pr_debug("Received event: 0x%x\n", event);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev),
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index bde740d6120e..0e1451b1d9c6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -299,6 +299,35 @@ static const struct ts_dmi_data estar_beauty_hd_data = {
.properties = estar_beauty_hd_props,
};
+/* Generic props + data for upside-down mounted GDIX1001 touchscreens */
+static const struct property_entry gdix1001_upside_down_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ { }
+};
+
+static const struct ts_dmi_data gdix1001_00_upside_down_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = gdix1001_upside_down_props,
+};
+
+static const struct ts_dmi_data gdix1001_01_upside_down_data = {
+ .acpi_name = "GDIX1001:01",
+ .properties = gdix1001_upside_down_props,
+};
+
+static const struct property_entry glavey_tm800a550l_props[] = {
+ PROPERTY_ENTRY_STRING("firmware-name", "gt912-glavey-tm800a550l.fw"),
+ PROPERTY_ENTRY_STRING("goodix,config-name", "gt912-glavey-tm800a550l.cfg"),
+ PROPERTY_ENTRY_U32("goodix,main-clk", 54),
+ { }
+};
+
+static const struct ts_dmi_data glavey_tm800a550l_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = glavey_tm800a550l_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -942,7 +971,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
- /* Chuwi Hi10 Prus (CWI597) */
+ /* Chuwi Hi10 Pro (CWI529) */
.driver_data = (void *)&chuwi_hi10_pro_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
@@ -1038,6 +1067,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
},
},
+ { /* Glavey TM800A550L */
+ .driver_data = (void *)&glavey_tm800a550l_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+ },
+ },
{
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
@@ -1331,6 +1369,24 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Teclast X89 (Android version / BIOS) */
+ .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
+ DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
+ },
+ },
+ {
+ /* Teclast X89 (Windows version / BIOS) */
+ .driver_data = (void *)&gdix1001_01_upside_down_data,
+ .matches = {
+ /* tPAD is too generic, also match on bios date */
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
+ DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
+ },
+ },
+ {
/* Teclast X98 Plus II */
.driver_data = (void *)&teclast_x98plus2_data,
.matches = {
@@ -1339,6 +1395,19 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Teclast X98 Pro */
+ .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .matches = {
+ /*
+ * Only match BIOS date, because the manufacturers
+ * BIOS does not report the board name at all
+ * (sometimes)...
+ */
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
+ },
+ },
+ {
/* Trekstor Primebook C11 */
.driver_data = (void *)&trekstor_primebook_c11_data,
.matches = {
@@ -1414,6 +1483,22 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* "WinBook TW100" */
+ .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
+ }
+ },
+ {
+ /* WinBook TW700 */
+ .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
+ },
+ },
+ {
/* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
diff --git a/drivers/platform/x86/uv_sysfs.c b/drivers/platform/x86/uv_sysfs.c
index 7badcfa3f384..956a354b57c1 100644
--- a/drivers/platform/x86/uv_sysfs.c
+++ b/drivers/platform/x86/uv_sysfs.c
@@ -778,7 +778,7 @@ static struct attribute *base_attrs[] = {
NULL,
};
-static struct attribute_group base_attr_group = {
+static const struct attribute_group base_attr_group = {
.attrs = base_attrs
};
@@ -823,7 +823,7 @@ static struct attribute *hubless_base_attrs[] = {
NULL,
};
-static struct attribute_group hubless_base_attr_group = {
+static const struct attribute_group hubless_base_attr_group = {
.attrs = hubless_base_attrs
};
diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
new file mode 100644
index 000000000000..b010e4ca3383
--- /dev/null
+++ b/drivers/platform/x86/wireless-hotkey.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Airplane mode button for AMD, HP & Xiaomi laptops
+ *
+ * Copyright (C) 2014-2017 Alex Hung <alex.hung@canonical.com>
+ * Copyright (C) 2021 Advanced Micro Devices
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Hung");
+MODULE_ALIAS("acpi*:HPQ6001:*");
+MODULE_ALIAS("acpi*:WSTADEF:*");
+MODULE_ALIAS("acpi*:AMDI0051:*");
+
+static struct input_dev *wl_input_dev;
+
+static const struct acpi_device_id wl_ids[] = {
+ {"HPQ6001", 0},
+ {"WSTADEF", 0},
+ {"AMDI0051", 0},
+ {"", 0},
+};
+
+static int wireless_input_setup(void)
+{
+ int err;
+
+ wl_input_dev = input_allocate_device();
+ if (!wl_input_dev)
+ return -ENOMEM;
+
+ wl_input_dev->name = "Wireless hotkeys";
+ wl_input_dev->phys = "hpq6001/input0";
+ wl_input_dev->id.bustype = BUS_HOST;
+ wl_input_dev->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_RFKILL, wl_input_dev->keybit);
+
+ err = input_register_device(wl_input_dev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(wl_input_dev);
+ return err;
+}
+
+static void wireless_input_destroy(void)
+{
+ input_unregister_device(wl_input_dev);
+}
+
+static void wl_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ if (event != 0x80) {
+ pr_info("Received unknown event (0x%x)\n", event);
+ return;
+ }
+
+ input_report_key(wl_input_dev, KEY_RFKILL, 1);
+ input_sync(wl_input_dev);
+ input_report_key(wl_input_dev, KEY_RFKILL, 0);
+ input_sync(wl_input_dev);
+}
+
+static int wl_add(struct acpi_device *device)
+{
+ int err;
+
+ err = wireless_input_setup();
+ if (err)
+ pr_err("Failed to setup hp wireless hotkeys\n");
+
+ return err;
+}
+
+static int wl_remove(struct acpi_device *device)
+{
+ wireless_input_destroy();
+ return 0;
+}
+
+static struct acpi_driver wl_driver = {
+ .name = "wireless-hotkey",
+ .owner = THIS_MODULE,
+ .ids = wl_ids,
+ .ops = {
+ .add = wl_add,
+ .remove = wl_remove,
+ .notify = wl_notify,
+ },
+};
+
+module_acpi_driver(wl_driver);
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index cdcfa39cf167..e74a0f6a3157 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -6,7 +6,6 @@
extern struct mutex pnp_lock;
extern const struct attribute_group *pnp_dev_groups[];
-void *pnp_alloc(long size);
int pnp_register_protocol(struct pnp_protocol *protocol);
void pnp_unregister_protocol(struct pnp_protocol *protocol);
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index c2464ee08e4a..d40ed8621571 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -80,7 +80,7 @@ static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv)
if (!id)
return 0;
- clink = pnp_alloc(sizeof(*clink));
+ clink = kzalloc(sizeof(*clink), GFP_KERNEL);
if (!clink)
return 0;
clink->card = card;
@@ -181,8 +181,8 @@ struct pnp_card *pnp_alloc_card(struct pnp_protocol *protocol, int id, char *pnp
return card;
}
-static ssize_t pnp_show_card_name(struct device *dmdev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dmdev,
+ struct device_attribute *attr, char *buf)
{
char *str = buf;
struct pnp_card *card = to_pnp_card(dmdev);
@@ -191,10 +191,10 @@ static ssize_t pnp_show_card_name(struct device *dmdev,
return (str - buf);
}
-static DEVICE_ATTR(name, S_IRUGO, pnp_show_card_name, NULL);
+static DEVICE_ATTR_RO(name);
-static ssize_t pnp_show_card_ids(struct device *dmdev,
- struct device_attribute *attr, char *buf)
+static ssize_t card_id_show(struct device *dmdev,
+ struct device_attribute *attr, char *buf)
{
char *str = buf;
struct pnp_card *card = to_pnp_card(dmdev);
@@ -207,7 +207,7 @@ static ssize_t pnp_show_card_ids(struct device *dmdev,
return (str - buf);
}
-static DEVICE_ATTR(card_id, S_IRUGO, pnp_show_card_ids, NULL);
+static DEVICE_ATTR_RO(card_id);
static int pnp_interface_attach_card(struct pnp_card *card)
{
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index a50ab002e9e4..4df5aa6a309c 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -31,18 +31,6 @@ DEFINE_MUTEX(pnp_lock);
int pnp_platform_devices;
EXPORT_SYMBOL(pnp_platform_devices);
-void *pnp_alloc(long size)
-{
- void *result;
-
- result = kzalloc(size, GFP_KERNEL);
- if (!result) {
- printk(KERN_ERR "pnp: Out of Memory\n");
- return NULL;
- }
- return result;
-}
-
static void pnp_remove_protocol(struct pnp_protocol *protocol)
{
mutex_lock(&pnp_lock);
@@ -227,9 +215,8 @@ int pnp_add_device(struct pnp_dev *dev)
for (id = dev->id; id; id = id->next)
len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
- dev_printk(KERN_DEBUG, &dev->dev, "%s device, IDs%s (%s)\n",
- dev->protocol->name, buf,
- dev->active ? "active" : "disabled");
+ dev_dbg(&dev->dev, "%s device, IDs%s (%s)\n", dev->protocol->name, buf,
+ dev->active ? "active" : "disabled");
return 0;
}
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 602c46893e83..44efcdb87e6f 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -214,7 +214,7 @@ static ssize_t options_show(struct device *dmdev, struct device_attribute *attr,
int ret, dep = 0, set = 0;
char *indent;
- buffer = pnp_alloc(sizeof(pnp_info_buffer_t));
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -257,7 +257,7 @@ static ssize_t resources_show(struct device *dmdev,
if (!dev)
return -EINVAL;
- buffer = pnp_alloc(sizeof(pnp_info_buffer_t));
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 785a796430fa..1ae458c02656 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -57,21 +57,20 @@ static const struct proc_ops isapnp_proc_bus_proc_ops = {
static int isapnp_proc_attach_device(struct pnp_dev *dev)
{
struct pnp_card *bus = dev->card;
- struct proc_dir_entry *de, *e;
char name[16];
- if (!(de = bus->procdir)) {
+ if (!bus->procdir) {
sprintf(name, "%02x", bus->number);
- de = bus->procdir = proc_mkdir(name, isapnp_proc_bus_dir);
- if (!de)
+ bus->procdir = proc_mkdir(name, isapnp_proc_bus_dir);
+ if (!bus->procdir)
return -ENOMEM;
}
sprintf(name, "%02x", dev->number);
- e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de,
+ dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, bus->procdir,
&isapnp_proc_bus_proc_ops, dev);
- if (!e)
+ if (!dev->procent)
return -ENOMEM;
- proc_set_size(e, 256);
+ proc_set_size(dev->procent, 256);
return 0;
}
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 9b760e73ee8f..669ef4700c1a 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -298,14 +298,12 @@ struct pnp_protocol pnpbios_protocol = {
static int __init insert_device(struct pnp_bios_node *node)
{
- struct list_head *pos;
struct pnp_dev *dev;
char id[8];
int error;
/* check if the device is already added */
- list_for_each(pos, &pnpbios_protocol.devices) {
- dev = list_entry(pos, struct pnp_dev, protocol_list);
+ list_for_each_entry(dev, &pnpbios_protocol.devices, protocol_list) {
if (dev->number == node->handle)
return -EEXIST;
}
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 70d4ba95735a..2fa0f7d55259 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -540,7 +540,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
res->start = irq;
res->end = irq;
- dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
+ dev_dbg(&dev->dev, "%pR\n", res);
return pnp_res;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 03a246e60fd9..a23a37a4d5dc 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -63,27 +63,6 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
spin_unlock_irqrestore(&queue->lock, flags);
}
-s32 scaled_ppm_to_ppb(long ppm)
-{
- /*
- * The 'freq' field in the 'struct timex' is in parts per
- * million, but with a 16 bit binary fractional field.
- *
- * We want to calculate
- *
- * ppb = scaled_ppm * 1000 / 2^16
- *
- * which simplifies to
- *
- * ppb = scaled_ppm * 125 / 2^13
- */
- s64 ppb = 1 + ppm;
- ppb *= 125;
- ppb >>= 13;
- return (s32) ppb;
-}
-EXPORT_SYMBOL(scaled_ppm_to_ppb);
-
/* posix clock implementation */
static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
@@ -138,7 +117,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
} else if (tx->modes & ADJ_FREQUENCY) {
- s32 ppb = scaled_ppm_to_ppb(tx->freq);
+ long ppb = scaled_ppm_to_ppb(tx->freq);
if (ppb > ops->max_adj || ppb < -ops->max_adj)
return -ERANGE;
if (ops->adjfine)
@@ -239,6 +218,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pr_err("failed to create ptp aux_worker %d\n", err);
goto kworker_err;
}
+ ptp->pps_source->lookup_cookie = ptp;
}
err = ptp_populate_pin_groups(ptp);
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 530e5f90095e..0d1034e3ed0f 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -324,7 +324,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!bp->base) {
dev_err(&pdev->dev, "io_remap bar0\n");
err = -ENOMEM;
- goto out;
+ goto out_release_regions;
}
bp->reg = bp->base + OCP_REGISTER_OFFSET;
bp->tod = bp->base + TOD_REGISTER_OFFSET;
@@ -347,6 +347,8 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out:
+ pci_iounmap(pdev, bp->base);
+out_release_regions:
pci_release_regions(pdev);
out_disable:
pci_disable_device(pdev);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 9d84d9245490..24ce9a17ab4f 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -193,20 +193,10 @@ config REGULATOR_BCM590XX
BCM590xx PMUs. This will enable support for the software
controllable LDO/Switching regulators.
-config REGULATOR_BD70528
- tristate "ROHM BD70528 Power Regulator"
- depends on MFD_ROHM_BD70528
- help
- This driver supports voltage regulators on ROHM BD70528 PMIC.
- This will enable support for the software controllable buck
- and LDO regulators.
-
- This driver can also be built as a module. If so, the module
- will be called bd70528-regulator.
-
config REGULATOR_BD71815
tristate "ROHM BD71815 Power Regulator"
depends on MFD_ROHM_BD71828
+ select REGULATOR_ROHM
help
This driver supports voltage regulators on ROHM BD71815 PMIC.
This will enable support for the software controllable buck
@@ -588,6 +578,14 @@ config REGULATOR_MAX8660
This driver controls a Maxim 8660/8661 voltage output
regulator via I2C bus.
+config REGULATOR_MAX8893
+ tristate "Maxim 8893 voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls a Maxim 8893 voltage output
+ regulator via I2C bus.
+
config REGULATOR_MAX8907
tristate "Maxim 8907 voltage regulator"
depends on MFD_MAX8907 || COMPILE_TEST
@@ -779,6 +777,15 @@ config REGULATOR_MT6358
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MT6359
+ tristate "MediaTek MT6359 PMIC"
+ depends on MFD_MT6397
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6359 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6360
tristate "MT6360 SubPMIC Regulator"
depends on MFD_MT6360
@@ -1030,8 +1037,28 @@ config REGULATOR_RT5033
RT5033 PMIC. The device supports multiple regulators like
current source, LDO and Buck.
+config REGULATOR_RT6160
+ tristate "Richtek RT6160 BuckBoost voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This adds support for voltage regulator in Richtek RT6160.
+ This device automatically change voltage output mode from
+ Buck or Boost. The mode transistion depend on the input source voltage.
+ The wide output range is from 2025mV to 5200mV and can be used on most
+ common application scenario.
+
+config REGULATOR_RT6245
+ tristate "Richtek RT6245 voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This adds supprot for Richtek RT6245 voltage regulator.
+ It can support up to 14A output current and adjustable output voltage
+ from 0.4375V to 1.3875V, per step 12.5mV.
+
config REGULATOR_RTMV20
- tristate "RTMV20 Laser Diode Regulator"
+ tristate "Richtek RTMV20 Laser Diode Regulator"
depends on I2C
select REGMAP_I2C
help
@@ -1150,6 +1177,12 @@ config REGULATOR_STW481X_VMMC
This driver supports the internal VMMC regulator in the STw481x
PMIC chips.
+config REGULATOR_SY7636A
+ tristate "Silergy SY7636A voltage regulator"
+ depends on MFD_SY7636A
+ help
+ This driver supports Silergy SY3686A voltage regulator.
+
config REGULATOR_SY8106A
tristate "Silergy SY8106A regulator"
depends on I2C && (OF || COMPILE_TEST)
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 580b015296ea..8c2f82206b94 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -4,7 +4,7 @@
#
-obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o
+obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o irq_helpers.o
obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
@@ -29,7 +29,6 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_ATC260X) += atc260x-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
-obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
obj-$(CONFIG_REGULATOR_BD71815) += bd71815-regulator.o
obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
@@ -72,6 +71,7 @@ obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
+obj-$(CONFIG_REGULATOR_MAX8893) += max8893.o
obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
@@ -94,6 +94,7 @@ obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6315) += mt6315-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
+obj-$(CONFIG_REGULATOR_MT6359) += mt6359-regulator.o
obj-$(CONFIG_REGULATOR_MT6360) += mt6360-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
@@ -124,6 +125,8 @@ obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o
obj-$(CONFIG_REGULATOR_RT4801) += rt4801-regulator.o
obj-$(CONFIG_REGULATOR_RT4831) += rt4831-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
+obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o
+obj-$(CONFIG_REGULATOR_RT6245) += rt6245-regulator.o
obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
@@ -136,6 +139,7 @@ obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
+obj-$(CONFIG_REGULATOR_SY7636A) += sy7636a-regulator.o
obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
obj-$(CONFIG_REGULATOR_SY8824X) += sy8824x.o
obj-$(CONFIG_REGULATOR_SY8827N) += sy8827n.o
diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
index d8b429955d33..05147d2c3842 100644
--- a/drivers/regulator/atc260x-regulator.c
+++ b/drivers/regulator/atc260x-regulator.c
@@ -28,16 +28,16 @@ static const struct linear_range atc2609a_dcdc_voltage_ranges[] = {
static const struct linear_range atc2609a_ldo_voltage_ranges0[] = {
REGULATOR_LINEAR_RANGE(700000, 0, 15, 100000),
- REGULATOR_LINEAR_RANGE(2100000, 16, 28, 100000),
+ REGULATOR_LINEAR_RANGE(2100000, 0, 12, 100000),
};
static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
REGULATOR_LINEAR_RANGE(850000, 0, 15, 100000),
- REGULATOR_LINEAR_RANGE(2100000, 16, 27, 100000),
+ REGULATOR_LINEAR_RANGE(2100000, 0, 11, 100000),
};
static const unsigned int atc260x_ldo_voltage_range_sel[] = {
- 0x0, 0x1,
+ 0x0, 0x20,
};
static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -411,7 +411,7 @@ enum atc2609a_reg_ids {
.owner = THIS_MODULE, \
}
-#define atc2609a_reg_desc_ldo_range_pick(num, n_range) { \
+#define atc2609a_reg_desc_ldo_range_pick(num, n_range, n_volt) { \
.name = "LDO"#num, \
.supply_name = "ldo"#num, \
.of_match = of_match_ptr("ldo"#num), \
@@ -421,6 +421,7 @@ enum atc2609a_reg_ids {
.type = REGULATOR_VOLTAGE, \
.linear_ranges = atc2609a_ldo_voltage_ranges##n_range, \
.n_linear_ranges = ARRAY_SIZE(atc2609a_ldo_voltage_ranges##n_range), \
+ .n_voltages = n_volt, \
.vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
.vsel_mask = GENMASK(4, 1), \
.vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
@@ -458,12 +459,12 @@ static const struct regulator_desc atc2609a_reg[] = {
atc2609a_reg_desc_ldo_bypass(0),
atc2609a_reg_desc_ldo_bypass(1),
atc2609a_reg_desc_ldo_bypass(2),
- atc2609a_reg_desc_ldo_range_pick(3, 0),
- atc2609a_reg_desc_ldo_range_pick(4, 0),
+ atc2609a_reg_desc_ldo_range_pick(3, 0, 29),
+ atc2609a_reg_desc_ldo_range_pick(4, 0, 29),
atc2609a_reg_desc_ldo(5),
- atc2609a_reg_desc_ldo_range_pick(6, 1),
- atc2609a_reg_desc_ldo_range_pick(7, 0),
- atc2609a_reg_desc_ldo_range_pick(8, 0),
+ atc2609a_reg_desc_ldo_range_pick(6, 1, 28),
+ atc2609a_reg_desc_ldo_range_pick(7, 0, 29),
+ atc2609a_reg_desc_ldo_range_pick(8, 0, 29),
atc2609a_reg_desc_ldo_fixed(9),
};
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
deleted file mode 100644
index 1f5f9482b209..000000000000
--- a/drivers/regulator/bd70528-regulator.c
+++ /dev/null
@@ -1,283 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 ROHM Semiconductors
-// bd70528-regulator.c ROHM BD70528MWV regulator driver
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/rohm-bd70528.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/slab.h>
-
-#define BUCK_RAMPRATE_250MV 0
-#define BUCK_RAMPRATE_125MV 1
-#define BUCK_RAMP_MAX 250
-
-static const struct linear_range bd70528_buck1_volts[] = {
- REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 600000),
- REGULATOR_LINEAR_RANGE(2750000, 0x2, 0xf, 50000),
-};
-static const struct linear_range bd70528_buck2_volts[] = {
- REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 300000),
- REGULATOR_LINEAR_RANGE(1550000, 0x2, 0xd, 50000),
- REGULATOR_LINEAR_RANGE(3000000, 0xe, 0xf, 300000),
-};
-static const struct linear_range bd70528_buck3_volts[] = {
- REGULATOR_LINEAR_RANGE(800000, 0x00, 0xd, 50000),
- REGULATOR_LINEAR_RANGE(1800000, 0xe, 0xf, 0),
-};
-
-/* All LDOs have same voltage ranges */
-static const struct linear_range bd70528_ldo_volts[] = {
- REGULATOR_LINEAR_RANGE(1650000, 0x0, 0x07, 50000),
- REGULATOR_LINEAR_RANGE(2100000, 0x8, 0x0f, 100000),
- REGULATOR_LINEAR_RANGE(2850000, 0x10, 0x19, 50000),
- REGULATOR_LINEAR_RANGE(3300000, 0x19, 0x1f, 0),
-};
-
-/* Also both LEDs support same voltages */
-static const unsigned int led_volts[] = {
- 20000, 30000
-};
-
-static int bd70528_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- if (ramp_delay > 0 && ramp_delay <= BUCK_RAMP_MAX) {
- unsigned int ramp_value = BUCK_RAMPRATE_250MV;
-
- if (ramp_delay <= 125)
- ramp_value = BUCK_RAMPRATE_125MV;
-
- return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
- BD70528_MASK_BUCK_RAMP,
- ramp_value << BD70528_SIFT_BUCK_RAMP);
- }
- dev_err(&rdev->dev, "%s: ramp_delay: %d not supported\n",
- rdev->desc->name, ramp_delay);
- return -EINVAL;
-}
-
-static int bd70528_led_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int sel)
-{
- int ret;
-
- ret = regulator_is_enabled_regmap(rdev);
- if (ret < 0)
- return ret;
-
- if (ret == 0)
- return regulator_set_voltage_sel_regmap(rdev, sel);
-
- dev_err(&rdev->dev,
- "LED voltage change not allowed when led is enabled\n");
-
- return -EBUSY;
-}
-
-static const struct regulator_ops bd70528_buck_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = bd70528_set_ramp_delay,
-};
-
-static const struct regulator_ops bd70528_ldo_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
-
-static const struct regulator_ops bd70528_led_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = bd70528_led_set_voltage_sel,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
-};
-
-static const struct regulator_desc bd70528_desc[] = {
- {
- .name = "buck1",
- .of_match = of_match_ptr("BUCK1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_BUCK1,
- .ops = &bd70528_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_buck1_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_buck1_volts),
- .n_voltages = BD70528_BUCK_VOLTS,
- .enable_reg = BD70528_REG_BUCK1_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_BUCK1_VOLT,
- .vsel_mask = BD70528_MASK_BUCK_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck2",
- .of_match = of_match_ptr("BUCK2"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_BUCK2,
- .ops = &bd70528_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_buck2_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_buck2_volts),
- .n_voltages = BD70528_BUCK_VOLTS,
- .enable_reg = BD70528_REG_BUCK2_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_BUCK2_VOLT,
- .vsel_mask = BD70528_MASK_BUCK_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck3",
- .of_match = of_match_ptr("BUCK3"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_BUCK3,
- .ops = &bd70528_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_buck3_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_buck3_volts),
- .n_voltages = BD70528_BUCK_VOLTS,
- .enable_reg = BD70528_REG_BUCK3_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_BUCK3_VOLT,
- .vsel_mask = BD70528_MASK_BUCK_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo1",
- .of_match = of_match_ptr("LDO1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LDO1,
- .ops = &bd70528_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_ldo_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
- .n_voltages = BD70528_LDO_VOLTS,
- .enable_reg = BD70528_REG_LDO1_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_LDO1_VOLT,
- .vsel_mask = BD70528_MASK_LDO_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo2",
- .of_match = of_match_ptr("LDO2"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LDO2,
- .ops = &bd70528_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_ldo_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
- .n_voltages = BD70528_LDO_VOLTS,
- .enable_reg = BD70528_REG_LDO2_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_LDO2_VOLT,
- .vsel_mask = BD70528_MASK_LDO_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo3",
- .of_match = of_match_ptr("LDO3"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LDO3,
- .ops = &bd70528_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_ldo_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
- .n_voltages = BD70528_LDO_VOLTS,
- .enable_reg = BD70528_REG_LDO3_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_LDO3_VOLT,
- .vsel_mask = BD70528_MASK_LDO_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo_led1",
- .of_match = of_match_ptr("LDO_LED1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LED1,
- .ops = &bd70528_led_ops,
- .type = REGULATOR_VOLTAGE,
- .volt_table = &led_volts[0],
- .n_voltages = ARRAY_SIZE(led_volts),
- .enable_reg = BD70528_REG_LED_EN,
- .enable_mask = BD70528_MASK_LED1_EN,
- .vsel_reg = BD70528_REG_LED_VOLT,
- .vsel_mask = BD70528_MASK_LED1_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo_led2",
- .of_match = of_match_ptr("LDO_LED2"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LED2,
- .ops = &bd70528_led_ops,
- .type = REGULATOR_VOLTAGE,
- .volt_table = &led_volts[0],
- .n_voltages = ARRAY_SIZE(led_volts),
- .enable_reg = BD70528_REG_LED_EN,
- .enable_mask = BD70528_MASK_LED2_EN,
- .vsel_reg = BD70528_REG_LED_VOLT,
- .vsel_mask = BD70528_MASK_LED2_VOLT,
- .owner = THIS_MODULE,
- },
-
-};
-
-static int bd70528_probe(struct platform_device *pdev)
-{
- int i;
- struct regulator_config config = {
- .dev = pdev->dev.parent,
- };
-
- config.regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!config.regmap)
- return -ENODEV;
-
- for (i = 0; i < ARRAY_SIZE(bd70528_desc); i++) {
- struct regulator_dev *rdev;
-
- rdev = devm_regulator_register(&pdev->dev, &bd70528_desc[i],
- &config);
- if (IS_ERR(rdev)) {
- dev_err(&pdev->dev,
- "failed to register %s regulator\n",
- bd70528_desc[i].name);
- return PTR_ERR(rdev);
- }
- }
- return 0;
-}
-
-static struct platform_driver bd70528_regulator = {
- .driver = {
- .name = "bd70528-pmic"
- },
- .probe = bd70528_probe,
-};
-
-module_platform_driver(bd70528_regulator);
-
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("BD70528 voltage regulator driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bd70528-pmic");
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index a4e8d5e36b40..16edd9062ca9 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -13,6 +13,8 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -26,14 +28,6 @@ struct bd71815_regulator {
const struct rohm_dvs_config *dvs;
};
-struct bd71815_pmic {
- struct bd71815_regulator descs[BD71815_REGULATOR_CNT];
- struct regmap *regmap;
- struct device *dev;
- struct gpio_descs *gps;
- struct regulator_dev *rdev[BD71815_REGULATOR_CNT];
-};
-
static const int bd7181x_wled_currents[] = {
10, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000, 2000, 3000, 4000,
5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 15000,
@@ -300,14 +294,13 @@ static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
static int bd7181x_buck12_get_voltage_sel(struct regulator_dev *rdev)
{
- struct bd71815_pmic *pmic = rdev_get_drvdata(rdev);
int rid = rdev_get_id(rdev);
int ret, regh, regl, val;
regh = BD71815_REG_BUCK1_VOLT_H + rid * 0x2;
regl = BD71815_REG_BUCK1_VOLT_L + rid * 0x2;
- ret = regmap_read(pmic->regmap, regh, &val);
+ ret = regmap_read(rdev->regmap, regh, &val);
if (ret)
return ret;
@@ -319,7 +312,7 @@ static int bd7181x_buck12_get_voltage_sel(struct regulator_dev *rdev)
* by BD71815_BUCK_DVSSEL bit
*/
if ((!(val & BD71815_BUCK_STBY_DVS)) && (!(val & BD71815_BUCK_DVSSEL)))
- ret = regmap_read(pmic->regmap, regl, &val);
+ ret = regmap_read(rdev->regmap, regl, &val);
if (ret)
return ret;
@@ -333,14 +326,13 @@ static int bd7181x_buck12_get_voltage_sel(struct regulator_dev *rdev)
static int bd7181x_buck12_set_voltage_sel(struct regulator_dev *rdev,
unsigned int sel)
{
- struct bd71815_pmic *pmic = rdev_get_drvdata(rdev);
int rid = rdev_get_id(rdev);
int ret, val, reg, regh, regl;
regh = BD71815_REG_BUCK1_VOLT_H + rid*0x2;
regl = BD71815_REG_BUCK1_VOLT_L + rid*0x2;
- ret = regmap_read(pmic->regmap, regh, &val);
+ ret = regmap_read(rdev->regmap, regh, &val);
if (ret)
return ret;
@@ -350,7 +342,7 @@ static int bd7181x_buck12_set_voltage_sel(struct regulator_dev *rdev,
* voltages at runtime is not supported by this driver.
*/
if (((val & BD71815_BUCK_STBY_DVS))) {
- return regmap_update_bits(pmic->regmap, regh, BD71815_VOLT_MASK,
+ return regmap_update_bits(rdev->regmap, regh, BD71815_VOLT_MASK,
sel);
}
/* Update new voltage to the register which is not selected now */
@@ -359,12 +351,13 @@ static int bd7181x_buck12_set_voltage_sel(struct regulator_dev *rdev,
else
reg = regh;
- ret = regmap_update_bits(pmic->regmap, reg, BD71815_VOLT_MASK, sel);
+ ret = regmap_update_bits(rdev->regmap, reg, BD71815_VOLT_MASK, sel);
if (ret)
return ret;
/* Select the other DVS register to be used */
- return regmap_update_bits(pmic->regmap, regh, BD71815_BUCK_DVSSEL, ~val);
+ return regmap_update_bits(rdev->regmap, regh, BD71815_BUCK_DVSSEL,
+ ~val);
}
static const struct regulator_ops bd7181x_ldo_regulator_ops = {
@@ -522,7 +515,7 @@ static const struct regulator_ops bd7181x_led_regulator_ops = {
.dvs = (_dvs), \
}
-static struct bd71815_regulator bd71815_regulators[] = {
+static const struct bd71815_regulator bd71815_regulators[] = {
BD71815_BUCK12_REG(buck1, BD71815_BUCK1, BD71815_REG_BUCK1_VOLT_H,
BD71815_REG_BUCK1_MODE, 800000, 2000000, 25000,
&buck1_dvs),
@@ -568,24 +561,16 @@ static struct bd71815_regulator bd71815_regulators[] = {
static int bd7181x_probe(struct platform_device *pdev)
{
- struct bd71815_pmic *pmic;
struct regulator_config config = {};
int i, ret;
struct gpio_desc *ldo4_en;
+ struct regmap *regmap;
- pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic)
- return -ENOMEM;
-
- memcpy(pmic->descs, bd71815_regulators, sizeof(pmic->descs));
-
- pmic->dev = &pdev->dev;
- pmic->regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!pmic->regmap) {
- dev_err(pmic->dev, "No parent regmap\n");
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "No parent regmap\n");
return -ENODEV;
}
- platform_set_drvdata(pdev, pmic);
ldo4_en = devm_gpiod_get_from_of_node(&pdev->dev,
pdev->dev.parent->of_node,
"rohm,vsel-gpios", 0,
@@ -599,23 +584,23 @@ static int bd7181x_probe(struct platform_device *pdev)
}
/* Disable to go to ship-mode */
- ret = regmap_update_bits(pmic->regmap, BD71815_REG_PWRCTRL,
- RESTARTEN, 0);
+ ret = regmap_update_bits(regmap, BD71815_REG_PWRCTRL, RESTARTEN, 0);
if (ret)
return ret;
config.dev = pdev->dev.parent;
- config.regmap = pmic->regmap;
+ config.regmap = regmap;
for (i = 0; i < BD71815_REGULATOR_CNT; i++) {
- struct regulator_desc *desc;
+ const struct regulator_desc *desc;
struct regulator_dev *rdev;
- desc = &pmic->descs[i].desc;
+ desc = &bd71815_regulators[i].desc;
+
if (i == BD71815_LDO4)
config.ena_gpiod = ldo4_en;
-
- config.driver_data = pmic;
+ else
+ config.ena_gpiod = NULL;
rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
@@ -624,8 +609,6 @@ static int bd7181x_probe(struct platform_device *pdev)
desc->name);
return PTR_ERR(rdev);
}
- config.ena_gpiod = NULL;
- pmic->rdev[i] = rdev;
}
return 0;
}
@@ -639,7 +622,6 @@ MODULE_DEVICE_TABLE(platform, bd7181x_pmic_id);
static struct platform_driver bd7181x_regulator = {
.driver = {
.name = "bd7181x-pmic",
- .owner = THIS_MODULE,
},
.probe = bd7181x_probe,
.id_table = bd7181x_pmic_id,
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e61295b30503..b1eb46961993 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -334,7 +334,7 @@ BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
NULL);
BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
- regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
+ regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
NULL);
/*
diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
index 204a2da054f5..e16c3727db7a 100644
--- a/drivers/regulator/bd9576-regulator.c
+++ b/drivers/regulator/bd9576-regulator.c
@@ -2,10 +2,10 @@
// Copyright (C) 2020 ROHM Semiconductors
// ROHM BD9576MUF/BD9573MUF regulator driver
-#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/mfd/rohm-bd957x.h>
#include <linux/mfd/rohm-generic.h>
@@ -16,29 +16,118 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#define BD957X_VOUTS1_VOLT 3300000
#define BD957X_VOUTS4_BASE_VOLT 1030000
#define BD957X_VOUTS34_NUM_VOLT 32
-static int vout1_volt_table[] = {5000000, 4900000, 4800000, 4700000, 4600000,
- 4500000, 4500000, 4500000, 5000000, 5100000,
- 5200000, 5300000, 5400000, 5500000, 5500000,
- 5500000};
+#define BD9576_THERM_IRQ_MASK_TW BIT(5)
+#define BD9576_xVD_IRQ_MASK_VOUTL1 BIT(5)
+#define BD9576_UVD_IRQ_MASK_VOUTS1_OCW BIT(6)
+#define BD9576_xVD_IRQ_MASK_VOUT1TO4 0x0F
-static int vout2_volt_table[] = {1800000, 1780000, 1760000, 1740000, 1720000,
- 1700000, 1680000, 1660000, 1800000, 1820000,
- 1840000, 1860000, 1880000, 1900000, 1920000,
- 1940000};
+static const unsigned int vout1_volt_table[] = {
+ 5000000, 4900000, 4800000, 4700000, 4600000,
+ 4500000, 4500000, 4500000, 5000000, 5100000,
+ 5200000, 5300000, 5400000, 5500000, 5500000,
+ 5500000
+};
+
+static const unsigned int vout2_volt_table[] = {
+ 1800000, 1780000, 1760000, 1740000, 1720000,
+ 1700000, 1680000, 1660000, 1800000, 1820000,
+ 1840000, 1860000, 1880000, 1900000, 1920000,
+ 1940000
+};
+
+static const unsigned int voutl1_volt_table[] = {
+ 2500000, 2540000, 2580000, 2620000, 2660000,
+ 2700000, 2740000, 2780000, 2500000, 2460000,
+ 2420000, 2380000, 2340000, 2300000, 2260000,
+ 2220000
+};
+
+static const struct linear_range vout1_xvd_ranges[] = {
+ REGULATOR_LINEAR_RANGE(225000, 0x01, 0x2b, 0),
+ REGULATOR_LINEAR_RANGE(225000, 0x2c, 0x54, 5000),
+ REGULATOR_LINEAR_RANGE(425000, 0x55, 0x7f, 0),
+};
+
+static const struct linear_range vout234_xvd_ranges[] = {
+ REGULATOR_LINEAR_RANGE(17000, 0x01, 0x0f, 0),
+ REGULATOR_LINEAR_RANGE(17000, 0x10, 0x6d, 1000),
+ REGULATOR_LINEAR_RANGE(110000, 0x6e, 0x7f, 0),
+};
+
+static const struct linear_range voutL1_xvd_ranges[] = {
+ REGULATOR_LINEAR_RANGE(34000, 0x01, 0x0f, 0),
+ REGULATOR_LINEAR_RANGE(34000, 0x10, 0x6d, 2000),
+ REGULATOR_LINEAR_RANGE(220000, 0x6e, 0x7f, 0),
+};
+
+static struct linear_range voutS1_ocw_ranges_internal[] = {
+ REGULATOR_LINEAR_RANGE(200000, 0x01, 0x04, 0),
+ REGULATOR_LINEAR_RANGE(250000, 0x05, 0x18, 50000),
+ REGULATOR_LINEAR_RANGE(1200000, 0x19, 0x3f, 0),
+};
+
+static struct linear_range voutS1_ocw_ranges[] = {
+ REGULATOR_LINEAR_RANGE(50000, 0x01, 0x04, 0),
+ REGULATOR_LINEAR_RANGE(60000, 0x05, 0x18, 10000),
+ REGULATOR_LINEAR_RANGE(250000, 0x19, 0x3f, 0),
+};
+
+static struct linear_range voutS1_ocp_ranges_internal[] = {
+ REGULATOR_LINEAR_RANGE(300000, 0x01, 0x06, 0),
+ REGULATOR_LINEAR_RANGE(350000, 0x7, 0x1b, 50000),
+ REGULATOR_LINEAR_RANGE(1350000, 0x1c, 0x3f, 0),
+};
-static int voutl1_volt_table[] = {2500000, 2540000, 2580000, 2620000, 2660000,
- 2700000, 2740000, 2780000, 2500000, 2460000,
- 2420000, 2380000, 2340000, 2300000, 2260000,
- 2220000};
+static struct linear_range voutS1_ocp_ranges[] = {
+ REGULATOR_LINEAR_RANGE(70000, 0x01, 0x06, 0),
+ REGULATOR_LINEAR_RANGE(80000, 0x7, 0x1b, 10000),
+ REGULATOR_LINEAR_RANGE(280000, 0x1c, 0x3f, 0),
+};
struct bd957x_regulator_data {
struct regulator_desc desc;
int base_voltage;
+ struct regulator_dev *rdev;
+ int ovd_notif;
+ int uvd_notif;
+ int temp_notif;
+ int ovd_err;
+ int uvd_err;
+ int temp_err;
+ const struct linear_range *xvd_ranges;
+ int num_xvd_ranges;
+ bool oc_supported;
+ unsigned int ovd_reg;
+ unsigned int uvd_reg;
+ unsigned int xvd_mask;
+ unsigned int ocp_reg;
+ unsigned int ocp_mask;
+ unsigned int ocw_reg;
+ unsigned int ocw_mask;
+ unsigned int ocw_rfet;
+};
+
+#define BD9576_NUM_REGULATORS 6
+#define BD9576_NUM_OVD_REGULATORS 5
+
+struct bd957x_data {
+ struct bd957x_regulator_data regulator_data[BD9576_NUM_REGULATORS];
+ struct regmap *regmap;
+ struct delayed_work therm_irq_suppress;
+ struct delayed_work ovd_irq_suppress;
+ struct delayed_work uvd_irq_suppress;
+ unsigned int therm_irq;
+ unsigned int ovd_irq;
+ unsigned int uvd_irq;
+ spinlock_t err_lock;
+ int regulator_global_err;
};
static int bd957x_vout34_list_voltage(struct regulator_dev *rdev,
@@ -72,151 +161,784 @@ static int bd957x_list_voltage(struct regulator_dev *rdev,
return desc->volt_table[index];
}
-static const struct regulator_ops bd957x_vout34_ops = {
+static void bd9576_fill_ovd_flags(struct bd957x_regulator_data *data,
+ bool warn)
+{
+ if (warn) {
+ data->ovd_notif = REGULATOR_EVENT_OVER_VOLTAGE_WARN;
+ data->ovd_err = REGULATOR_ERROR_OVER_VOLTAGE_WARN;
+ } else {
+ data->ovd_notif = REGULATOR_EVENT_REGULATION_OUT;
+ data->ovd_err = REGULATOR_ERROR_REGULATION_OUT;
+ }
+}
+
+static void bd9576_fill_ocp_flags(struct bd957x_regulator_data *data,
+ bool warn)
+{
+ if (warn) {
+ data->uvd_notif = REGULATOR_EVENT_OVER_CURRENT_WARN;
+ data->uvd_err = REGULATOR_ERROR_OVER_CURRENT_WARN;
+ } else {
+ data->uvd_notif = REGULATOR_EVENT_OVER_CURRENT;
+ data->uvd_err = REGULATOR_ERROR_OVER_CURRENT;
+ }
+}
+
+static void bd9576_fill_uvd_flags(struct bd957x_regulator_data *data,
+ bool warn)
+{
+ if (warn) {
+ data->uvd_notif = REGULATOR_EVENT_UNDER_VOLTAGE_WARN;
+ data->uvd_err = REGULATOR_ERROR_UNDER_VOLTAGE_WARN;
+ } else {
+ data->uvd_notif = REGULATOR_EVENT_UNDER_VOLTAGE;
+ data->uvd_err = REGULATOR_ERROR_UNDER_VOLTAGE;
+ }
+}
+
+static void bd9576_fill_temp_flags(struct bd957x_regulator_data *data,
+ bool enable, bool warn)
+{
+ if (!enable) {
+ data->temp_notif = 0;
+ data->temp_err = 0;
+ } else if (warn) {
+ data->temp_notif = REGULATOR_EVENT_OVER_TEMP_WARN;
+ data->temp_err = REGULATOR_ERROR_OVER_TEMP_WARN;
+ } else {
+ data->temp_notif = REGULATOR_EVENT_OVER_TEMP;
+ data->temp_err = REGULATOR_ERROR_OVER_TEMP;
+ }
+}
+
+static int bd9576_set_limit(const struct linear_range *r, int num_ranges,
+ struct regmap *regmap, int reg, int mask, int lim)
+{
+ int ret;
+ bool found;
+ int sel = 0;
+
+ if (lim) {
+
+ ret = linear_range_get_selector_low_array(r, num_ranges,
+ lim, &sel, &found);
+ if (ret)
+ return ret;
+
+ if (!found)
+ dev_warn(regmap_get_device(regmap),
+ "limit %d out of range. Setting lower\n",
+ lim);
+ }
+
+ return regmap_update_bits(regmap, reg, mask, sel);
+}
+
+static bool check_ocp_flag_mismatch(struct regulator_dev *rdev, int severity,
+ struct bd957x_regulator_data *r)
+{
+ if ((severity == REGULATOR_SEVERITY_ERR &&
+ r->uvd_notif != REGULATOR_EVENT_OVER_CURRENT) ||
+ (severity == REGULATOR_SEVERITY_WARN &&
+ r->uvd_notif != REGULATOR_EVENT_OVER_CURRENT_WARN)) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't support both OCP WARN and ERR\n");
+ /* Do not overwrite ERR config with WARN */
+ if (severity == REGULATOR_SEVERITY_WARN)
+ return true;
+
+ bd9576_fill_ocp_flags(r, 0);
+ }
+
+ return false;
+}
+
+static bool check_uvd_flag_mismatch(struct regulator_dev *rdev, int severity,
+ struct bd957x_regulator_data *r)
+{
+ if ((severity == REGULATOR_SEVERITY_ERR &&
+ r->uvd_notif != REGULATOR_EVENT_UNDER_VOLTAGE) ||
+ (severity == REGULATOR_SEVERITY_WARN &&
+ r->uvd_notif != REGULATOR_EVENT_UNDER_VOLTAGE_WARN)) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't support both UVD WARN and ERR\n");
+ if (severity == REGULATOR_SEVERITY_WARN)
+ return true;
+
+ bd9576_fill_uvd_flags(r, 0);
+ }
+
+ return false;
+}
+
+static bool check_ovd_flag_mismatch(struct regulator_dev *rdev, int severity,
+ struct bd957x_regulator_data *r)
+{
+ if ((severity == REGULATOR_SEVERITY_ERR &&
+ r->ovd_notif != REGULATOR_EVENT_REGULATION_OUT) ||
+ (severity == REGULATOR_SEVERITY_WARN &&
+ r->ovd_notif != REGULATOR_EVENT_OVER_VOLTAGE_WARN)) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't support both OVD WARN and ERR\n");
+ if (severity == REGULATOR_SEVERITY_WARN)
+ return true;
+
+ bd9576_fill_ovd_flags(r, 0);
+ }
+
+ return false;
+}
+
+static bool check_temp_flag_mismatch(struct regulator_dev *rdev, int severity,
+ struct bd957x_regulator_data *r)
+{
+ if ((severity == REGULATOR_SEVERITY_ERR &&
+ r->ovd_notif != REGULATOR_EVENT_OVER_TEMP) ||
+ (severity == REGULATOR_SEVERITY_WARN &&
+ r->ovd_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't support both thermal WARN and ERR\n");
+ if (severity == REGULATOR_SEVERITY_WARN)
+ return true;
+ }
+
+ return false;
+}
+
+static int bd9576_set_ocp(struct regulator_dev *rdev, int lim_uA, int severity,
+ bool enable)
+{
+ struct bd957x_data *d;
+ struct bd957x_regulator_data *r;
+ int reg, mask;
+ int Vfet, rfet;
+ const struct linear_range *range;
+ int num_ranges;
+
+ if ((lim_uA && !enable) || (!lim_uA && enable))
+ return -EINVAL;
+
+ r = container_of(rdev->desc, struct bd957x_regulator_data, desc);
+ if (!r->oc_supported)
+ return -EINVAL;
+
+ d = rdev_get_drvdata(rdev);
+
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ reg = r->ocp_reg;
+ mask = r->ocp_mask;
+ if (r->ocw_rfet) {
+ range = voutS1_ocp_ranges;
+ num_ranges = ARRAY_SIZE(voutS1_ocp_ranges);
+ rfet = r->ocw_rfet / 1000;
+ } else {
+ range = voutS1_ocp_ranges_internal;
+ num_ranges = ARRAY_SIZE(voutS1_ocp_ranges_internal);
+ /* Internal values are already micro-amperes */
+ rfet = 1000;
+ }
+ } else {
+ reg = r->ocw_reg;
+ mask = r->ocw_mask;
+
+ if (r->ocw_rfet) {
+ range = voutS1_ocw_ranges;
+ num_ranges = ARRAY_SIZE(voutS1_ocw_ranges);
+ rfet = r->ocw_rfet / 1000;
+ } else {
+ range = voutS1_ocw_ranges_internal;
+ num_ranges = ARRAY_SIZE(voutS1_ocw_ranges_internal);
+ /* Internal values are already micro-amperes */
+ rfet = 1000;
+ }
+
+ /* We abuse uvd fields for OCW on VoutS1 */
+ if (r->uvd_notif) {
+ /*
+ * If both warning and error are requested, prioritize
+ * ERROR configuration
+ */
+ if (check_ocp_flag_mismatch(rdev, severity, r))
+ return 0;
+ } else {
+ bool warn = severity == REGULATOR_SEVERITY_WARN;
+
+ bd9576_fill_ocp_flags(r, warn);
+ }
+ }
+
+ /*
+ * limits are given in uA, rfet is mOhm
+ * Divide lim_uA by 1000 to get Vfet in uV.
+ * (We expect both Rfet and limit uA to be magnitude of hundreds of
+ * milli Amperes & milli Ohms => we should still have decent accuracy)
+ */
+ Vfet = lim_uA/1000 * rfet;
+
+ return bd9576_set_limit(range, num_ranges, d->regmap,
+ reg, mask, Vfet);
+}
+
+static int bd9576_set_uvp(struct regulator_dev *rdev, int lim_uV, int severity,
+ bool enable)
+{
+ struct bd957x_data *d;
+ struct bd957x_regulator_data *r;
+ int mask, reg;
+
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ if (!enable || lim_uV)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * BD9576 has enable control as a special value in limit reg. Can't
+ * set limit but keep feature disabled or enable W/O given limit.
+ */
+ if ((lim_uV && !enable) || (!lim_uV && enable))
+ return -EINVAL;
+
+ r = container_of(rdev->desc, struct bd957x_regulator_data, desc);
+ d = rdev_get_drvdata(rdev);
+
+ mask = r->xvd_mask;
+ reg = r->uvd_reg;
+ /*
+ * Check that there is no mismatch for what the detection IRQs are to
+ * be used.
+ */
+ if (r->uvd_notif) {
+ if (check_uvd_flag_mismatch(rdev, severity, r))
+ return 0;
+ } else {
+ bd9576_fill_uvd_flags(r, severity == REGULATOR_SEVERITY_WARN);
+ }
+
+ return bd9576_set_limit(r->xvd_ranges, r->num_xvd_ranges, d->regmap,
+ reg, mask, lim_uV);
+}
+
+static int bd9576_set_ovp(struct regulator_dev *rdev, int lim_uV, int severity,
+ bool enable)
+{
+ struct bd957x_data *d;
+ struct bd957x_regulator_data *r;
+ int mask, reg;
+
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ if (!enable || lim_uV)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * BD9576 has enable control as a special value in limit reg. Can't
+ * set limit but keep feature disabled or enable W/O given limit.
+ */
+ if ((lim_uV && !enable) || (!lim_uV && enable))
+ return -EINVAL;
+
+ r = container_of(rdev->desc, struct bd957x_regulator_data, desc);
+ d = rdev_get_drvdata(rdev);
+
+ mask = r->xvd_mask;
+ reg = r->ovd_reg;
+ /*
+ * Check that there is no mismatch for what the detection IRQs are to
+ * be used.
+ */
+ if (r->ovd_notif) {
+ if (check_ovd_flag_mismatch(rdev, severity, r))
+ return 0;
+ } else {
+ bd9576_fill_ovd_flags(r, severity == REGULATOR_SEVERITY_WARN);
+ }
+
+ return bd9576_set_limit(r->xvd_ranges, r->num_xvd_ranges, d->regmap,
+ reg, mask, lim_uV);
+}
+
+
+static int bd9576_set_tw(struct regulator_dev *rdev, int lim, int severity,
+ bool enable)
+{
+ struct bd957x_data *d;
+ struct bd957x_regulator_data *r;
+ int i;
+
+ /*
+ * BD9576MUF has fixed temperature limits
+ * The detection can only be enabled/disabled
+ */
+ if (lim)
+ return -EINVAL;
+
+ /* Protection can't be disabled */
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ if (!enable)
+ return -EINVAL;
+ else
+ return 0;
+ }
+
+ r = container_of(rdev->desc, struct bd957x_regulator_data, desc);
+ d = rdev_get_drvdata(rdev);
+
+ /*
+ * Check that there is no mismatch for what the detection IRQs are to
+ * be used.
+ */
+ if (r->temp_notif)
+ if (check_temp_flag_mismatch(rdev, severity, r))
+ return 0;
+
+ bd9576_fill_temp_flags(r, enable, severity == REGULATOR_SEVERITY_WARN);
+
+ if (enable)
+ return regmap_update_bits(d->regmap, BD957X_REG_INT_THERM_MASK,
+ BD9576_THERM_IRQ_MASK_TW, 0);
+
+ /*
+ * If any of the regulators is interested in thermal warning we keep IRQ
+ * enabled.
+ */
+ for (i = 0; i < BD9576_NUM_REGULATORS; i++)
+ if (d->regulator_data[i].temp_notif)
+ return 0;
+
+ return regmap_update_bits(d->regmap, BD957X_REG_INT_THERM_MASK,
+ BD9576_THERM_IRQ_MASK_TW,
+ BD9576_THERM_IRQ_MASK_TW);
+}
+
+static const struct regulator_ops bd9573_vout34_ops = {
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = bd957x_vout34_list_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static const struct regulator_ops bd957X_vouts1_regulator_ops = {
+static const struct regulator_ops bd9576_vout34_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = bd957x_vout34_list_voltage,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_over_voltage_protection = bd9576_set_ovp,
+ .set_under_voltage_protection = bd9576_set_uvp,
+ .set_thermal_protection = bd9576_set_tw,
+};
+
+static const struct regulator_ops bd9573_vouts1_regulator_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_ops bd9576_vouts1_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
+ .set_over_current_protection = bd9576_set_ocp,
};
-static const struct regulator_ops bd957x_ops = {
+static const struct regulator_ops bd9573_ops = {
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = bd957x_list_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct bd957x_regulator_data bd9576_regulators[] = {
- {
- .desc = {
- .name = "VD50",
- .of_match = of_match_ptr("regulator-vd50"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VD50,
- .type = REGULATOR_VOLTAGE,
- .ops = &bd957x_ops,
- .volt_table = &vout1_volt_table[0],
- .n_voltages = ARRAY_SIZE(vout1_volt_table),
- .vsel_reg = BD957X_REG_VOUT1_TUNE,
- .vsel_mask = BD957X_MASK_VOUT1_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGER1,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+static const struct regulator_ops bd9576_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = bd957x_list_voltage,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_over_voltage_protection = bd9576_set_ovp,
+ .set_under_voltage_protection = bd9576_set_uvp,
+ .set_thermal_protection = bd9576_set_tw,
+};
+
+static const struct regulator_ops *bd9573_ops_arr[] = {
+ [BD957X_VD50] = &bd9573_ops,
+ [BD957X_VD18] = &bd9573_ops,
+ [BD957X_VDDDR] = &bd9573_vout34_ops,
+ [BD957X_VD10] = &bd9573_vout34_ops,
+ [BD957X_VOUTL1] = &bd9573_ops,
+ [BD957X_VOUTS1] = &bd9573_vouts1_regulator_ops,
+};
+
+static const struct regulator_ops *bd9576_ops_arr[] = {
+ [BD957X_VD50] = &bd9576_ops,
+ [BD957X_VD18] = &bd9576_ops,
+ [BD957X_VDDDR] = &bd9576_vout34_ops,
+ [BD957X_VD10] = &bd9576_vout34_ops,
+ [BD957X_VOUTL1] = &bd9576_ops,
+ [BD957X_VOUTS1] = &bd9576_vouts1_regulator_ops,
+};
+
+static int vouts1_get_fet_res(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct bd957x_regulator_data *data;
+ int ret;
+ u32 uohms;
+
+ data = container_of(desc, struct bd957x_regulator_data, desc);
+
+ ret = of_property_read_u32(np, "rohm,ocw-fet-ron-micro-ohms", &uohms);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+
+ return 0;
+ }
+ data->ocw_rfet = uohms;
+ return 0;
+}
+
+static struct bd957x_data bd957x_regulators = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "VD50",
+ .of_match = of_match_ptr("regulator-vd50"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD50,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &vout1_volt_table[0],
+ .n_voltages = ARRAY_SIZE(vout1_volt_table),
+ .vsel_reg = BD957X_REG_VOUT1_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT1_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .xvd_ranges = vout1_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(vout1_xvd_ranges),
+ .ovd_reg = BD9576_REG_VOUT1_OVD,
+ .uvd_reg = BD9576_REG_VOUT1_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
},
- },
- {
- .desc = {
- .name = "VD18",
- .of_match = of_match_ptr("regulator-vd18"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VD18,
- .type = REGULATOR_VOLTAGE,
- .ops = &bd957x_ops,
- .volt_table = &vout2_volt_table[0],
- .n_voltages = ARRAY_SIZE(vout2_volt_table),
- .vsel_reg = BD957X_REG_VOUT2_TUNE,
- .vsel_mask = BD957X_MASK_VOUT2_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGER2,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VD18",
+ .of_match = of_match_ptr("regulator-vd18"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD18,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &vout2_volt_table[0],
+ .n_voltages = ARRAY_SIZE(vout2_volt_table),
+ .vsel_reg = BD957X_REG_VOUT2_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT2_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER2,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .xvd_ranges = vout234_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(vout234_xvd_ranges),
+ .ovd_reg = BD9576_REG_VOUT2_OVD,
+ .uvd_reg = BD9576_REG_VOUT2_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
},
- },
- {
- .desc = {
- .name = "VDDDR",
- .of_match = of_match_ptr("regulator-vdddr"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VDDDR,
- .ops = &bd957x_vout34_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD957X_VOUTS34_NUM_VOLT,
- .vsel_reg = BD957X_REG_VOUT3_TUNE,
- .vsel_mask = BD957X_MASK_VOUT3_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGER3,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VDDDR",
+ .of_match = of_match_ptr("regulator-vdddr"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VDDDR,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD957X_VOUTS34_NUM_VOLT,
+ .vsel_reg = BD957X_REG_VOUT3_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT3_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER3,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .ovd_reg = BD9576_REG_VOUT3_OVD,
+ .uvd_reg = BD9576_REG_VOUT3_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
+ .xvd_ranges = vout234_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(vout234_xvd_ranges),
},
- },
- {
- .desc = {
- .name = "VD10",
- .of_match = of_match_ptr("regulator-vd10"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VD10,
- .ops = &bd957x_vout34_ops,
- .type = REGULATOR_VOLTAGE,
- .fixed_uV = BD957X_VOUTS4_BASE_VOLT,
- .n_voltages = BD957X_VOUTS34_NUM_VOLT,
- .vsel_reg = BD957X_REG_VOUT4_TUNE,
- .vsel_mask = BD957X_MASK_VOUT4_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGER4,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VD10",
+ .of_match = of_match_ptr("regulator-vd10"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD10,
+ .type = REGULATOR_VOLTAGE,
+ .fixed_uV = BD957X_VOUTS4_BASE_VOLT,
+ .n_voltages = BD957X_VOUTS34_NUM_VOLT,
+ .vsel_reg = BD957X_REG_VOUT4_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT4_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER4,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .xvd_ranges = vout234_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(vout234_xvd_ranges),
+ .ovd_reg = BD9576_REG_VOUT4_OVD,
+ .uvd_reg = BD9576_REG_VOUT4_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
},
- },
- {
- .desc = {
- .name = "VOUTL1",
- .of_match = of_match_ptr("regulator-voutl1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VOUTL1,
- .ops = &bd957x_ops,
- .type = REGULATOR_VOLTAGE,
- .volt_table = &voutl1_volt_table[0],
- .n_voltages = ARRAY_SIZE(voutl1_volt_table),
- .vsel_reg = BD957X_REG_VOUTL1_TUNE,
- .vsel_mask = BD957X_MASK_VOUTL1_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGERL1,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VOUTL1",
+ .of_match = of_match_ptr("regulator-voutl1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VOUTL1,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &voutl1_volt_table[0],
+ .n_voltages = ARRAY_SIZE(voutl1_volt_table),
+ .vsel_reg = BD957X_REG_VOUTL1_TUNE,
+ .vsel_mask = BD957X_MASK_VOUTL1_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGERL1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .xvd_ranges = voutL1_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(voutL1_xvd_ranges),
+ .ovd_reg = BD9576_REG_VOUTL1_OVD,
+ .uvd_reg = BD9576_REG_VOUTL1_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
},
- },
- {
- .desc = {
- .name = "VOUTS1",
- .of_match = of_match_ptr("regulator-vouts1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VOUTS1,
- .ops = &bd957X_vouts1_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 1,
- .fixed_uV = BD957X_VOUTS1_VOLT,
- .enable_reg = BD957X_REG_POW_TRIGGERS1,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VOUTS1",
+ .of_match = of_match_ptr("regulator-vouts1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VOUTS1,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 1,
+ .fixed_uV = BD957X_VOUTS1_VOLT,
+ .enable_reg = BD957X_REG_POW_TRIGGERS1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ .of_parse_cb = vouts1_get_fet_res,
+ },
+ .oc_supported = true,
+ .ocw_reg = BD9576_REG_VOUT1S_OCW,
+ .ocw_mask = BD9576_MASK_VOUT1S_OCW,
+ .ocp_reg = BD9576_REG_VOUT1S_OCP,
+ .ocp_mask = BD9576_MASK_VOUT1S_OCP,
},
},
};
+static int bd9576_renable(struct regulator_irq_data *rid, int reg, int mask)
+{
+ int val, ret;
+ struct bd957x_data *d = (struct bd957x_data *)rid->data;
+
+ ret = regmap_read(d->regmap, reg, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ if (rid->opaque && rid->opaque == (val & mask)) {
+ /*
+ * It seems we stil have same status. Ack and return
+ * information that we are still out of limits and core
+ * should not enable IRQ
+ */
+ regmap_write(d->regmap, reg, mask & val);
+ return REGULATOR_ERROR_ON;
+ }
+ rid->opaque = 0;
+ /*
+ * Status was changed. Either prolem was solved or we have new issues.
+ * Let's re-enable IRQs and be prepared to report problems again
+ */
+ return REGULATOR_ERROR_CLEARED;
+}
+
+static int bd9576_uvd_renable(struct regulator_irq_data *rid)
+{
+ return bd9576_renable(rid, BD957X_REG_INT_UVD_STAT, UVD_IRQ_VALID_MASK);
+}
+
+static int bd9576_ovd_renable(struct regulator_irq_data *rid)
+{
+ return bd9576_renable(rid, BD957X_REG_INT_OVD_STAT, OVD_IRQ_VALID_MASK);
+}
+
+static int bd9576_temp_renable(struct regulator_irq_data *rid)
+{
+ return bd9576_renable(rid, BD957X_REG_INT_THERM_STAT,
+ BD9576_THERM_IRQ_MASK_TW);
+}
+
+static int bd9576_uvd_handler(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int val, ret, i;
+ struct bd957x_data *d = (struct bd957x_data *)rid->data;
+
+ ret = regmap_read(d->regmap, BD957X_REG_INT_UVD_STAT, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ *dev_mask = 0;
+
+ rid->opaque = val & UVD_IRQ_VALID_MASK;
+
+ /*
+ * Go through the set status bits and report either error or warning
+ * to the notifier depending on what was flagged in DT
+ */
+ *dev_mask = val & BD9576_xVD_IRQ_MASK_VOUT1TO4;
+ /* There is 1 bit gap in register after Vout1 .. Vout4 statuses */
+ *dev_mask |= ((val & BD9576_xVD_IRQ_MASK_VOUTL1) >> 1);
+ /*
+ * We (ab)use the uvd for OCW notification. DT parsing should
+ * have added correct OCW flag to uvd_notif and uvd_err for S1
+ */
+ *dev_mask |= ((val & BD9576_UVD_IRQ_MASK_VOUTS1_OCW) >> 1);
+
+ for_each_set_bit(i, dev_mask, 6) {
+ struct bd957x_regulator_data *rdata;
+ struct regulator_err_state *stat;
+
+ rdata = &d->regulator_data[i];
+ stat = &rid->states[i];
+
+ stat->notifs = rdata->uvd_notif;
+ stat->errors = rdata->uvd_err;
+ }
+
+ ret = regmap_write(d->regmap, BD957X_REG_INT_UVD_STAT,
+ UVD_IRQ_VALID_MASK & val);
+
+ return 0;
+}
+
+static int bd9576_ovd_handler(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int val, ret, i;
+ struct bd957x_data *d = (struct bd957x_data *)rid->data;
+
+ ret = regmap_read(d->regmap, BD957X_REG_INT_OVD_STAT, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ rid->opaque = val & OVD_IRQ_VALID_MASK;
+ *dev_mask = 0;
+
+ if (!(val & OVD_IRQ_VALID_MASK))
+ return 0;
+
+ *dev_mask = val & BD9576_xVD_IRQ_MASK_VOUT1TO4;
+ /* There is 1 bit gap in register after Vout1 .. Vout4 statuses */
+ *dev_mask |= ((val & BD9576_xVD_IRQ_MASK_VOUTL1) >> 1);
+
+ for_each_set_bit(i, dev_mask, 5) {
+ struct bd957x_regulator_data *rdata;
+ struct regulator_err_state *stat;
+
+ rdata = &d->regulator_data[i];
+ stat = &rid->states[i];
+
+ stat->notifs = rdata->ovd_notif;
+ stat->errors = rdata->ovd_err;
+ }
+
+ /* Clear the sub-IRQ status */
+ regmap_write(d->regmap, BD957X_REG_INT_OVD_STAT,
+ OVD_IRQ_VALID_MASK & val);
+
+ return 0;
+}
+
+#define BD9576_DEV_MASK_ALL_REGULATORS 0x3F
+
+static int bd9576_thermal_handler(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int val, ret, i;
+ struct bd957x_data *d = (struct bd957x_data *)rid->data;
+
+ ret = regmap_read(d->regmap, BD957X_REG_INT_THERM_STAT, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ if (!(val & BD9576_THERM_IRQ_MASK_TW)) {
+ *dev_mask = 0;
+ return 0;
+ }
+
+ *dev_mask = BD9576_DEV_MASK_ALL_REGULATORS;
+
+ for (i = 0; i < BD9576_NUM_REGULATORS; i++) {
+ struct bd957x_regulator_data *rdata;
+ struct regulator_err_state *stat;
+
+ rdata = &d->regulator_data[i];
+ stat = &rid->states[i];
+
+ stat->notifs = rdata->temp_notif;
+ stat->errors = rdata->temp_err;
+ }
+
+ /* Clear the sub-IRQ status */
+ regmap_write(d->regmap, BD957X_REG_INT_THERM_STAT,
+ BD9576_THERM_IRQ_MASK_TW);
+
+ return 0;
+}
+
static int bd957x_probe(struct platform_device *pdev)
{
+ int i;
+ unsigned int num_reg_data;
+ bool vout_mode, ddr_sel, may_have_irqs = false;
struct regmap *regmap;
+ struct bd957x_data *ic_data;
struct regulator_config config = { 0 };
- int i;
- bool vout_mode, ddr_sel;
- const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
- unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
+ /* All regulators are related to UVD and thermal IRQs... */
+ struct regulator_dev *rdevs[BD9576_NUM_REGULATORS];
+ /* ...But VoutS1 is not flagged by OVD IRQ */
+ struct regulator_dev *ovd_devs[BD9576_NUM_OVD_REGULATORS];
+ static const struct regulator_irq_desc bd9576_notif_uvd = {
+ .name = "bd9576-uvd",
+ .irq_off_ms = 1000,
+ .map_event = bd9576_uvd_handler,
+ .renable = bd9576_uvd_renable,
+ .data = &bd957x_regulators,
+ };
+ static const struct regulator_irq_desc bd9576_notif_ovd = {
+ .name = "bd9576-ovd",
+ .irq_off_ms = 1000,
+ .map_event = bd9576_ovd_handler,
+ .renable = bd9576_ovd_renable,
+ .data = &bd957x_regulators,
+ };
+ static const struct regulator_irq_desc bd9576_notif_temp = {
+ .name = "bd9576-temp",
+ .irq_off_ms = 1000,
+ .map_event = bd9576_thermal_handler,
+ .renable = bd9576_temp_renable,
+ .data = &bd957x_regulators,
+ };
enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
+ num_reg_data = ARRAY_SIZE(bd957x_regulators.regulator_data);
+
+ ic_data = &bd957x_regulators;
+
regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!regmap) {
dev_err(&pdev->dev, "No regmap\n");
return -EINVAL;
}
+
+ ic_data->regmap = regmap;
vout_mode = of_property_read_bool(pdev->dev.parent->of_node,
"rohm,vout1-en-low");
if (vout_mode) {
@@ -263,15 +985,17 @@ static int bd957x_probe(struct platform_device *pdev)
* bytes and use bd9576_regulators directly for non-constant configs
* like DDR voltage selection.
*/
+ platform_set_drvdata(pdev, ic_data);
ddr_sel = of_property_read_bool(pdev->dev.parent->of_node,
"rohm,ddr-sel-low");
if (ddr_sel)
- bd9576_regulators[2].desc.fixed_uV = 1350000;
+ ic_data->regulator_data[2].desc.fixed_uV = 1350000;
else
- bd9576_regulators[2].desc.fixed_uV = 1500000;
+ ic_data->regulator_data[2].desc.fixed_uV = 1500000;
switch (chip) {
case ROHM_CHIP_TYPE_BD9576:
+ may_have_irqs = true;
dev_dbg(&pdev->dev, "Found BD9576MUF\n");
break;
case ROHM_CHIP_TYPE_BD9573:
@@ -282,38 +1006,122 @@ static int bd957x_probe(struct platform_device *pdev)
return -EINVAL;
}
+ for (i = 0; i < num_reg_data; i++) {
+ struct regulator_desc *d;
+
+ d = &ic_data->regulator_data[i].desc;
+
+
+ if (may_have_irqs) {
+ if (d->id >= ARRAY_SIZE(bd9576_ops_arr))
+ return -EINVAL;
+
+ d->ops = bd9576_ops_arr[d->id];
+ } else {
+ if (d->id >= ARRAY_SIZE(bd9573_ops_arr))
+ return -EINVAL;
+
+ d->ops = bd9573_ops_arr[d->id];
+ }
+ }
+
config.dev = pdev->dev.parent;
config.regmap = regmap;
+ config.driver_data = ic_data;
for (i = 0; i < num_reg_data; i++) {
- const struct regulator_desc *desc;
- struct regulator_dev *rdev;
- const struct bd957x_regulator_data *r;
-
- r = &reg_data[i];
- desc = &r->desc;
+ struct bd957x_regulator_data *r = &ic_data->regulator_data[i];
+ const struct regulator_desc *desc = &r->desc;
- rdev = devm_regulator_register(&pdev->dev, desc, &config);
- if (IS_ERR(rdev)) {
+ r->rdev = devm_regulator_register(&pdev->dev, desc,
+ &config);
+ if (IS_ERR(r->rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
desc->name);
- return PTR_ERR(rdev);
+ return PTR_ERR(r->rdev);
}
/*
* Clear the VOUT1 GPIO setting - rest of the regulators do not
* support GPIO control
*/
config.ena_gpiod = NULL;
+
+ if (!may_have_irqs)
+ continue;
+
+ rdevs[i] = r->rdev;
+ if (i < BD957X_VOUTS1)
+ ovd_devs[i] = r->rdev;
}
+ if (may_have_irqs) {
+ void *ret;
+ /*
+ * We can add both the possible error and warning flags here
+ * because the core uses these only for status clearing and
+ * if we use warnings - errors are always clear and the other
+ * way around. We can also add CURRENT flag for all regulators
+ * because it is never set if it is not supported. Same applies
+ * to setting UVD for VoutS1 - it is not accidentally cleared
+ * as it is never set.
+ */
+ int uvd_errs = REGULATOR_ERROR_UNDER_VOLTAGE |
+ REGULATOR_ERROR_UNDER_VOLTAGE_WARN |
+ REGULATOR_ERROR_OVER_CURRENT |
+ REGULATOR_ERROR_OVER_CURRENT_WARN;
+ int ovd_errs = REGULATOR_ERROR_OVER_VOLTAGE_WARN |
+ REGULATOR_ERROR_REGULATION_OUT;
+ int temp_errs = REGULATOR_ERROR_OVER_TEMP |
+ REGULATOR_ERROR_OVER_TEMP_WARN;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "bd9576-uvd");
+ /* Register notifiers - can fail if IRQ is not given */
+ ret = devm_regulator_irq_helper(&pdev->dev, &bd9576_notif_uvd,
+ irq, 0, uvd_errs, NULL,
+ &rdevs[0],
+ BD9576_NUM_REGULATORS);
+ if (IS_ERR(ret)) {
+ if (PTR_ERR(ret) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "UVD disabled %pe\n", ret);
+ }
+
+ irq = platform_get_irq_byname(pdev, "bd9576-ovd");
+
+ ret = devm_regulator_irq_helper(&pdev->dev, &bd9576_notif_ovd,
+ irq, 0, ovd_errs, NULL,
+ &ovd_devs[0],
+ BD9576_NUM_OVD_REGULATORS);
+ if (IS_ERR(ret)) {
+ if (PTR_ERR(ret) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "OVD disabled %pe\n", ret);
+ }
+ irq = platform_get_irq_byname(pdev, "bd9576-temp");
+
+ ret = devm_regulator_irq_helper(&pdev->dev, &bd9576_notif_temp,
+ irq, 0, temp_errs, NULL,
+ &rdevs[0],
+ BD9576_NUM_REGULATORS);
+ if (IS_ERR(ret)) {
+ if (PTR_ERR(ret) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "Thermal warning disabled %pe\n",
+ ret);
+ }
+ }
return 0;
}
static const struct platform_device_id bd957x_pmic_id[] = {
- { "bd9573-pmic", ROHM_CHIP_TYPE_BD9573 },
- { "bd9576-pmic", ROHM_CHIP_TYPE_BD9576 },
+ { "bd9573-regulator", ROHM_CHIP_TYPE_BD9573 },
+ { "bd9576-regulator", ROHM_CHIP_TYPE_BD9576 },
{ },
};
MODULE_DEVICE_TABLE(platform, bd957x_pmic_id);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f192bf19492e..ca6caba8a191 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -33,17 +33,6 @@
#include "dummy.h"
#include "internal.h"
-#define rdev_crit(rdev, fmt, ...) \
- pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-#define rdev_err(rdev, fmt, ...) \
- pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-#define rdev_warn(rdev, fmt, ...) \
- pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-#define rdev_info(rdev, fmt, ...) \
- pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-#define rdev_dbg(rdev, fmt, ...) \
- pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-
static DEFINE_WW_CLASS(regulator_ww_class);
static DEFINE_MUTEX(regulator_nesting_mutex);
static DEFINE_MUTEX(regulator_list_mutex);
@@ -117,6 +106,7 @@ const char *rdev_get_name(struct regulator_dev *rdev)
else
return "";
}
+EXPORT_SYMBOL_GPL(rdev_get_name);
static bool have_full_constraints(void)
{
@@ -591,8 +581,8 @@ regulator_get_suspend_state_check(struct regulator_dev *rdev, suspend_state_t st
return rstate;
}
-static ssize_t regulator_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
int uV;
@@ -605,16 +595,16 @@ static ssize_t regulator_uV_show(struct device *dev,
return uV;
return sprintf(buf, "%d\n", uV);
}
-static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL);
+static DEVICE_ATTR_RO(microvolts);
-static ssize_t regulator_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t microamps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev));
}
-static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL);
+static DEVICE_ATTR_RO(microamps);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -645,14 +635,14 @@ static ssize_t regulator_print_opmode(char *buf, int mode)
return sprintf(buf, "%s\n", regulator_opmode_to_str(mode));
}
-static ssize_t regulator_opmode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t opmode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_opmode(buf, _regulator_get_mode(rdev));
}
-static DEVICE_ATTR(opmode, 0444, regulator_opmode_show, NULL);
+static DEVICE_ATTR_RO(opmode);
static ssize_t regulator_print_state(char *buf, int state)
{
@@ -664,8 +654,8 @@ static ssize_t regulator_print_state(char *buf, int state)
return sprintf(buf, "unknown\n");
}
-static ssize_t regulator_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
ssize_t ret;
@@ -676,10 +666,10 @@ static ssize_t regulator_state_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(state, 0444, regulator_state_show, NULL);
+static DEVICE_ATTR_RO(state);
-static ssize_t regulator_status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
int status;
@@ -723,10 +713,10 @@ static ssize_t regulator_status_show(struct device *dev,
return sprintf(buf, "%s\n", label);
}
-static DEVICE_ATTR(status, 0444, regulator_status_show, NULL);
+static DEVICE_ATTR_RO(status);
-static ssize_t regulator_min_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_microamps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -735,10 +725,10 @@ static ssize_t regulator_min_uA_show(struct device *dev,
return sprintf(buf, "%d\n", rdev->constraints->min_uA);
}
-static DEVICE_ATTR(min_microamps, 0444, regulator_min_uA_show, NULL);
+static DEVICE_ATTR_RO(min_microamps);
-static ssize_t regulator_max_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_microamps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -747,10 +737,10 @@ static ssize_t regulator_max_uA_show(struct device *dev,
return sprintf(buf, "%d\n", rdev->constraints->max_uA);
}
-static DEVICE_ATTR(max_microamps, 0444, regulator_max_uA_show, NULL);
+static DEVICE_ATTR_RO(max_microamps);
-static ssize_t regulator_min_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -759,10 +749,10 @@ static ssize_t regulator_min_uV_show(struct device *dev,
return sprintf(buf, "%d\n", rdev->constraints->min_uV);
}
-static DEVICE_ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL);
+static DEVICE_ATTR_RO(min_microvolts);
-static ssize_t regulator_max_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -771,10 +761,10 @@ static ssize_t regulator_max_uV_show(struct device *dev,
return sprintf(buf, "%d\n", rdev->constraints->max_uV);
}
-static DEVICE_ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL);
+static DEVICE_ATTR_RO(max_microvolts);
-static ssize_t regulator_total_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t requested_microamps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
struct regulator *regulator;
@@ -788,7 +778,7 @@ static ssize_t regulator_total_uA_show(struct device *dev,
regulator_unlock(rdev);
return sprintf(buf, "%d\n", uA);
}
-static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL);
+static DEVICE_ATTR_RO(requested_microamps);
static ssize_t num_users_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -813,104 +803,95 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(type);
-static ssize_t regulator_suspend_mem_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_mem_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV);
}
-static DEVICE_ATTR(suspend_mem_microvolts, 0444,
- regulator_suspend_mem_uV_show, NULL);
+static DEVICE_ATTR_RO(suspend_mem_microvolts);
-static ssize_t regulator_suspend_disk_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_disk_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV);
}
-static DEVICE_ATTR(suspend_disk_microvolts, 0444,
- regulator_suspend_disk_uV_show, NULL);
+static DEVICE_ATTR_RO(suspend_disk_microvolts);
-static ssize_t regulator_suspend_standby_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_standby_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV);
}
-static DEVICE_ATTR(suspend_standby_microvolts, 0444,
- regulator_suspend_standby_uV_show, NULL);
+static DEVICE_ATTR_RO(suspend_standby_microvolts);
-static ssize_t regulator_suspend_mem_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_mem_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_opmode(buf,
rdev->constraints->state_mem.mode);
}
-static DEVICE_ATTR(suspend_mem_mode, 0444,
- regulator_suspend_mem_mode_show, NULL);
+static DEVICE_ATTR_RO(suspend_mem_mode);
-static ssize_t regulator_suspend_disk_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_disk_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_opmode(buf,
rdev->constraints->state_disk.mode);
}
-static DEVICE_ATTR(suspend_disk_mode, 0444,
- regulator_suspend_disk_mode_show, NULL);
+static DEVICE_ATTR_RO(suspend_disk_mode);
-static ssize_t regulator_suspend_standby_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_standby_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_opmode(buf,
rdev->constraints->state_standby.mode);
}
-static DEVICE_ATTR(suspend_standby_mode, 0444,
- regulator_suspend_standby_mode_show, NULL);
+static DEVICE_ATTR_RO(suspend_standby_mode);
-static ssize_t regulator_suspend_mem_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_mem_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_state(buf,
rdev->constraints->state_mem.enabled);
}
-static DEVICE_ATTR(suspend_mem_state, 0444,
- regulator_suspend_mem_state_show, NULL);
+static DEVICE_ATTR_RO(suspend_mem_state);
-static ssize_t regulator_suspend_disk_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_disk_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_state(buf,
rdev->constraints->state_disk.enabled);
}
-static DEVICE_ATTR(suspend_disk_state, 0444,
- regulator_suspend_disk_state_show, NULL);
+static DEVICE_ATTR_RO(suspend_disk_state);
-static ssize_t regulator_suspend_standby_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_standby_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_state(buf,
rdev->constraints->state_standby.enabled);
}
-static DEVICE_ATTR(suspend_standby_state, 0444,
- regulator_suspend_standby_state_show, NULL);
+static DEVICE_ATTR_RO(suspend_standby_state);
-static ssize_t regulator_bypass_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t bypass_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
const char *report;
@@ -928,8 +909,7 @@ static ssize_t regulator_bypass_show(struct device *dev,
return sprintf(buf, "%s\n", report);
}
-static DEVICE_ATTR(bypass, 0444,
- regulator_bypass_show, NULL);
+static DEVICE_ATTR_RO(bypass);
/* Calculate the new optimum regulator operating mode based on the new total
* consumer load. All locks held by caller
@@ -1315,6 +1295,52 @@ static int machine_constraints_current(struct regulator_dev *rdev,
static int _regulator_do_enable(struct regulator_dev *rdev);
+static int notif_set_limit(struct regulator_dev *rdev,
+ int (*set)(struct regulator_dev *, int, int, bool),
+ int limit, int severity)
+{
+ bool enable;
+
+ if (limit == REGULATOR_NOTIF_LIMIT_DISABLE) {
+ enable = false;
+ limit = 0;
+ } else {
+ enable = true;
+ }
+
+ if (limit == REGULATOR_NOTIF_LIMIT_ENABLE)
+ limit = 0;
+
+ return set(rdev, limit, severity, enable);
+}
+
+static int handle_notify_limits(struct regulator_dev *rdev,
+ int (*set)(struct regulator_dev *, int, int, bool),
+ struct notification_limit *limits)
+{
+ int ret = 0;
+
+ if (!set)
+ return -EOPNOTSUPP;
+
+ if (limits->prot)
+ ret = notif_set_limit(rdev, set, limits->prot,
+ REGULATOR_SEVERITY_PROT);
+ if (ret)
+ return ret;
+
+ if (limits->err)
+ ret = notif_set_limit(rdev, set, limits->err,
+ REGULATOR_SEVERITY_ERR);
+ if (ret)
+ return ret;
+
+ if (limits->warn)
+ ret = notif_set_limit(rdev, set, limits->warn,
+ REGULATOR_SEVERITY_WARN);
+
+ return ret;
+}
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
@@ -1400,9 +1426,27 @@ static int set_machine_constraints(struct regulator_dev *rdev)
}
}
+ /*
+ * Existing logic does not warn if over_current_protection is given as
+ * a constraint but driver does not support that. I think we should
+ * warn about this type of issues as it is possible someone changes
+ * PMIC on board to another type - and the another PMIC's driver does
+ * not support setting protection. Board composer may happily believe
+ * the DT limits are respected - especially if the new PMIC HW also
+ * supports protection but the driver does not. I won't change the logic
+ * without hearing more experienced opinion on this though.
+ *
+ * If warning is seen as a good idea then we can merge handling the
+ * over-curret protection and detection and get rid of this special
+ * handling.
+ */
if (rdev->constraints->over_current_protection
&& ops->set_over_current_protection) {
- ret = ops->set_over_current_protection(rdev);
+ int lim = rdev->constraints->over_curr_limits.prot;
+
+ ret = ops->set_over_current_protection(rdev, lim,
+ REGULATOR_SEVERITY_PROT,
+ true);
if (ret < 0) {
rdev_err(rdev, "failed to set over current protection: %pe\n",
ERR_PTR(ret));
@@ -1410,6 +1454,62 @@ static int set_machine_constraints(struct regulator_dev *rdev)
}
}
+ if (rdev->constraints->over_current_detection)
+ ret = handle_notify_limits(rdev,
+ ops->set_over_current_protection,
+ &rdev->constraints->over_curr_limits);
+ if (ret) {
+ if (ret != -EOPNOTSUPP) {
+ rdev_err(rdev, "failed to set over current limits: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ rdev_warn(rdev,
+ "IC does not support requested over-current limits\n");
+ }
+
+ if (rdev->constraints->over_voltage_detection)
+ ret = handle_notify_limits(rdev,
+ ops->set_over_voltage_protection,
+ &rdev->constraints->over_voltage_limits);
+ if (ret) {
+ if (ret != -EOPNOTSUPP) {
+ rdev_err(rdev, "failed to set over voltage limits %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ rdev_warn(rdev,
+ "IC does not support requested over voltage limits\n");
+ }
+
+ if (rdev->constraints->under_voltage_detection)
+ ret = handle_notify_limits(rdev,
+ ops->set_under_voltage_protection,
+ &rdev->constraints->under_voltage_limits);
+ if (ret) {
+ if (ret != -EOPNOTSUPP) {
+ rdev_err(rdev, "failed to set under voltage limits %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ rdev_warn(rdev,
+ "IC does not support requested under voltage limits\n");
+ }
+
+ if (rdev->constraints->over_temp_detection)
+ ret = handle_notify_limits(rdev,
+ ops->set_thermal_protection,
+ &rdev->constraints->temp_limits);
+ if (ret) {
+ if (ret != -EOPNOTSUPP) {
+ rdev_err(rdev, "failed to set temperature limits %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ rdev_warn(rdev,
+ "IC does not support requested temperature limits\n");
+ }
+
if (rdev->constraints->active_discharge && ops->set_active_discharge) {
bool ad_state = (rdev->constraints->active_discharge ==
REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
@@ -1425,6 +1525,12 @@ static int set_machine_constraints(struct regulator_dev *rdev)
* and we have control then make sure it is enabled.
*/
if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ /* If we want to enable this regulator, make sure that we know
+ * the supplying regulator.
+ */
+ if (rdev->supply_name && !rdev->supply)
+ return -EPROBE_DEFER;
+
if (rdev->supply) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
@@ -4105,6 +4211,29 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
}
EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
+int regulator_sync_voltage_rdev(struct regulator_dev *rdev)
+{
+ int ret;
+
+ regulator_lock(rdev);
+
+ if (!rdev->desc->ops->set_voltage &&
+ !rdev->desc->ops->set_voltage_sel) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* balance only, if regulator is coupled */
+ if (rdev->coupling_desc.n_coupled > 1)
+ ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON);
+ else
+ ret = -EOPNOTSUPP;
+
+out:
+ regulator_unlock(rdev);
+ return ret;
+}
+
/**
* regulator_sync_voltage - re-apply last regulator output voltage
* @regulator: regulator source
@@ -4380,22 +4509,36 @@ unsigned int regulator_get_mode(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(regulator_get_mode);
+static int rdev_get_cached_err_flags(struct regulator_dev *rdev)
+{
+ int ret = 0;
+
+ if (rdev->use_cached_err) {
+ spin_lock(&rdev->err_lock);
+ ret = rdev->cached_err;
+ spin_unlock(&rdev->err_lock);
+ }
+ return ret;
+}
+
static int _regulator_get_error_flags(struct regulator_dev *rdev,
unsigned int *flags)
{
- int ret;
+ int cached_flags, ret = 0;
regulator_lock(rdev);
- /* sanity check */
- if (!rdev->desc->ops->get_error_flags) {
+ cached_flags = rdev_get_cached_err_flags(rdev);
+
+ if (rdev->desc->ops->get_error_flags)
+ ret = rdev->desc->ops->get_error_flags(rdev, flags);
+ else if (!rdev->use_cached_err)
ret = -EINVAL;
- goto out;
- }
- ret = rdev->desc->ops->get_error_flags(rdev, flags);
-out:
+ *flags |= cached_flags;
+
regulator_unlock(rdev);
+
return ret;
}
@@ -5228,6 +5371,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
goto rinse;
}
device_initialize(&rdev->dev);
+ spin_lock_init(&rdev->err_lock);
/*
* Duplicate the config so the driver could override it after
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index eb3fc1db4edc..c4754f3cf233 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
+ ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- return PTR_ERR(drvdata->dev);
+ return ret;
}
platform_set_drvdata(pdev, drvdata);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index e18d291c7f21..23fa429ebe76 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -250,7 +250,8 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
case DA9052_ID_BUCK3:
case DA9052_ID_LDO2:
case DA9052_ID_LDO3:
- ret = (new_sel - old_sel) * info->step_uV / 6250;
+ ret = DIV_ROUND_UP(abs(new_sel - old_sel) * info->step_uV,
+ 6250);
break;
}
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 08cbf688e14d..e66925090258 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -280,7 +280,7 @@ static unsigned int da9121_map_mode(unsigned int mode)
case DA9121_BUCK_MODE_FORCE_PFM:
return REGULATOR_MODE_STANDBY;
default:
- return -EINVAL;
+ return REGULATOR_MODE_INVALID;
}
}
@@ -317,7 +317,7 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
- unsigned int val;
+ unsigned int val, mode;
int ret = 0;
ret = regmap_read(chip->regmap, da9121_mode_field[id].reg, &val);
@@ -326,7 +326,11 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
return -EINVAL;
}
- return da9121_map_mode(val & da9121_mode_field[id].msk);
+ mode = da9121_map_mode(val & da9121_mode_field[id].msk);
+ if (mode == REGULATOR_MODE_INVALID)
+ return -EINVAL;
+
+ return mode;
}
static const struct regulator_ops da9121_buck_ops = {
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 3091210889e3..a8de0aa88bad 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -481,3 +481,55 @@ void devm_regulator_unregister_notifier(struct regulator *regulator,
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regulator_unregister_notifier);
+
+static void regulator_irq_helper_drop(void *res)
+{
+ regulator_irq_helper_cancel(&res);
+}
+
+/**
+ * devm_regulator_irq_helper - resource managed registration of IRQ based
+ * regulator event/error notifier
+ *
+ * @dev: device to which lifetime the helper's lifetime is
+ * bound.
+ * @d: IRQ helper descriptor.
+ * @irq: IRQ used to inform events/errors to be notified.
+ * @irq_flags: Extra IRQ flags to be OR'ed with the default
+ * IRQF_ONESHOT when requesting the (threaded) irq.
+ * @common_errs: Errors which can be flagged by this IRQ for all rdevs.
+ * When IRQ is re-enabled these errors will be cleared
+ * from all associated regulators
+ * @per_rdev_errs: Optional error flag array describing errors specific
+ * for only some of the regulators. These errors will be
+ * or'ed with common errors. If this is given the array
+ * should contain rdev_amount flags. Can be set to NULL
+ * if there is no regulator specific error flags for this
+ * IRQ.
+ * @rdev: Array of pointers to regulators associated with this
+ * IRQ.
+ * @rdev_amount: Amount of regulators associated with this IRQ.
+ *
+ * Return: handle to irq_helper or an ERR_PTR() encoded error code.
+ */
+void *devm_regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs,
+ int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount)
+{
+ void *ptr;
+ int ret;
+
+ ptr = regulator_irq_helper(dev, d, irq, irq_flags, common_errs,
+ per_rdev_errs, rdev, rdev_amount);
+ if (IS_ERR(ptr))
+ return ptr;
+
+ ret = devm_add_action_or_reset(dev, regulator_irq_helper_drop, ptr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_irq_helper);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index f3918f03aaf3..dac1fb584fa3 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -55,9 +55,7 @@
#define FAN53555_NVOLTAGES 64 /* Numbers of voltages */
#define FAN53526_NVOLTAGES 128
-#define TCS4525_NVOLTAGES 127 /* Numbers of voltages */
-#define TCS_VSEL_NSEL_MASK 0x7f
#define TCS_VSEL0_MODE (1 << 7)
#define TCS_VSEL1_MODE (1 << 6)
@@ -68,7 +66,7 @@ enum fan53555_vendor {
FAN53526_VENDOR_FAIRCHILD = 0,
FAN53555_VENDOR_FAIRCHILD,
FAN53555_VENDOR_SILERGY,
- FAN53555_VENDOR_TCS,
+ FAN53526_VENDOR_TCS,
};
enum {
@@ -90,6 +88,14 @@ enum {
FAN53555_CHIP_ID_08 = 8,
};
+enum {
+ TCS4525_CHIP_ID_12 = 12,
+};
+
+enum {
+ TCS4526_CHIP_ID_00 = 0,
+};
+
/* IC mask revision */
enum {
FAN53555_CHIP_REV_00 = 0x3,
@@ -124,7 +130,8 @@ struct fan53555_device_info {
/* Slew rate */
unsigned int slew_reg;
unsigned int slew_mask;
- unsigned int slew_shift;
+ const unsigned int *ramp_delay_table;
+ unsigned int n_ramp_values;
unsigned int slew_rate;
};
@@ -198,7 +205,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
}
-static const int slew_rates[] = {
+static const unsigned int slew_rates[] = {
64000,
32000,
16000,
@@ -209,51 +216,13 @@ static const int slew_rates[] = {
500,
};
-static const int tcs_slew_rates[] = {
+static const unsigned int tcs_slew_rates[] = {
18700,
9300,
4600,
2300,
};
-static int fan53555_set_ramp(struct regulator_dev *rdev, int ramp)
-{
- struct fan53555_device_info *di = rdev_get_drvdata(rdev);
- int regval = -1, i;
- const int *slew_rate_t;
- int slew_rate_n;
-
- switch (di->vendor) {
- case FAN53526_VENDOR_FAIRCHILD:
- case FAN53555_VENDOR_FAIRCHILD:
- case FAN53555_VENDOR_SILERGY:
- slew_rate_t = slew_rates;
- slew_rate_n = ARRAY_SIZE(slew_rates);
- break;
- case FAN53555_VENDOR_TCS:
- slew_rate_t = tcs_slew_rates;
- slew_rate_n = ARRAY_SIZE(tcs_slew_rates);
- break;
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < slew_rate_n; i++) {
- if (ramp <= slew_rate_t[i])
- regval = i;
- else
- break;
- }
-
- if (regval < 0) {
- dev_err(di->dev, "unsupported ramp value %d\n", ramp);
- return -EINVAL;
- }
-
- return regmap_update_bits(rdev->regmap, di->slew_reg,
- di->slew_mask, regval << di->slew_shift);
-}
-
static const struct regulator_ops fan53555_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -266,7 +235,7 @@ static const struct regulator_ops fan53555_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = fan53555_set_mode,
.get_mode = fan53555_get_mode,
- .set_ramp_delay = fan53555_set_ramp,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_enable = fan53555_set_suspend_enable,
.set_suspend_disable = fan53555_set_suspend_disable,
};
@@ -294,6 +263,10 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
return -EINVAL;
}
+ di->slew_reg = FAN53555_CONTROL;
+ di->slew_mask = CTL_SLEW_MASK;
+ di->ramp_delay_table = slew_rates;
+ di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53526_NVOLTAGES;
return 0;
@@ -338,7 +311,8 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
}
di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
- di->slew_shift = CTL_SLEW_SHIFT;
+ di->ramp_delay_table = slew_rates;
+ di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
@@ -359,24 +333,33 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
return -EINVAL;
}
di->slew_reg = FAN53555_CONTROL;
- di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
- di->slew_shift = CTL_SLEW_SHIFT;
+ di->ramp_delay_table = slew_rates;
+ di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
}
-static int fan53555_voltages_setup_tcs(struct fan53555_device_info *di)
+static int fan53526_voltages_setup_tcs(struct fan53555_device_info *di)
{
- di->slew_reg = TCS4525_TIME;
- di->slew_mask = TCS_SLEW_MASK;
- di->slew_shift = TCS_SLEW_MASK;
-
- /* Init voltage range and step */
- di->vsel_min = 600000;
- di->vsel_step = 6250;
- di->vsel_count = TCS4525_NVOLTAGES;
+ switch (di->chip_id) {
+ case TCS4525_CHIP_ID_12:
+ case TCS4526_CHIP_ID_00:
+ di->slew_reg = TCS4525_TIME;
+ di->slew_mask = TCS_SLEW_MASK;
+ di->ramp_delay_table = tcs_slew_rates;
+ di->n_ramp_values = ARRAY_SIZE(tcs_slew_rates);
+
+ /* Init voltage range and step */
+ di->vsel_min = 600000;
+ di->vsel_step = 6250;
+ di->vsel_count = FAN53526_NVOLTAGES;
+ break;
+ default:
+ dev_err(di->dev, "Chip ID %d not supported!\n", di->chip_id);
+ return -EINVAL;
+ }
return 0;
}
@@ -410,7 +393,7 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
return -EINVAL;
}
break;
- case FAN53555_VENDOR_TCS:
+ case FAN53526_VENDOR_TCS:
switch (pdata->sleep_vsel_id) {
case FAN53555_VSEL_ID_0:
di->sleep_reg = TCS4525_VSEL0;
@@ -449,7 +432,7 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
di->mode_reg = di->vol_reg;
di->mode_mask = VSEL_MODE;
break;
- case FAN53555_VENDOR_TCS:
+ case FAN53526_VENDOR_TCS:
di->mode_reg = TCS4525_COMMAND;
switch (pdata->sleep_vsel_id) {
@@ -477,8 +460,8 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
case FAN53555_VENDOR_SILERGY:
ret = fan53555_voltages_setup_silergy(di);
break;
- case FAN53555_VENDOR_TCS:
- ret = fan53555_voltages_setup_tcs(di);
+ case FAN53526_VENDOR_TCS:
+ ret = fan53526_voltages_setup_tcs(di);
break;
default:
dev_err(di->dev, "vendor %d not supported!\n", di->vendor);
@@ -505,6 +488,10 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->uV_step = di->vsel_step;
rdesc->vsel_reg = di->vol_reg;
rdesc->vsel_mask = di->vsel_count - 1;
+ rdesc->ramp_reg = di->slew_reg;
+ rdesc->ramp_mask = di->slew_mask;
+ rdesc->ramp_delay_table = di->ramp_delay_table;
+ rdesc->n_ramp_values = di->n_ramp_values;
rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config);
@@ -553,7 +540,10 @@ static const struct of_device_id __maybe_unused fan53555_dt_ids[] = {
.data = (void *)FAN53555_VENDOR_SILERGY,
}, {
.compatible = "tcs,tcs4525",
- .data = (void *)FAN53555_VENDOR_TCS
+ .data = (void *)FAN53526_VENDOR_TCS
+ }, {
+ .compatible = "tcs,tcs4526",
+ .data = (void *)FAN53526_VENDOR_TCS
},
{ }
};
@@ -661,7 +651,10 @@ static const struct i2c_device_id fan53555_id[] = {
.driver_data = FAN53555_VENDOR_SILERGY
}, {
.name = "tcs4525",
- .driver_data = FAN53555_VENDOR_TCS
+ .driver_data = FAN53526_VENDOR_TCS
+ }, {
+ .name = "tcs4526",
+ .driver_data = FAN53526_VENDOR_TCS
},
{ },
};
diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
index e83eb4fb1876..8f25930d2769 100644
--- a/drivers/regulator/fan53880.c
+++ b/drivers/regulator/fan53880.c
@@ -51,6 +51,7 @@ static const struct regulator_ops fan53880_ops = {
REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000), \
}, \
.n_linear_ranges = 2, \
+ .n_voltages = 0x74, \
.vsel_reg = FAN53880_LDO ## _num ## VOUT, \
.vsel_mask = 0x7f, \
.enable_reg = FAN53880_ENABLE, \
@@ -76,8 +77,9 @@ static const struct regulator_desc fan53880_regulators[] = {
REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
},
.n_linear_ranges = 2,
+ .n_voltages = 0xf8,
.vsel_reg = FAN53880_BUCKVOUT,
- .vsel_mask = 0x7f,
+ .vsel_mask = 0xff,
.enable_reg = FAN53880_ENABLE,
.enable_mask = 0x10,
.enable_time = 480,
@@ -95,6 +97,7 @@ static const struct regulator_desc fan53880_regulators[] = {
REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
},
.n_linear_ranges = 2,
+ .n_voltages = 0x71,
.vsel_reg = FAN53880_BOOSTVOUT,
.vsel_mask = 0x7f,
.enable_reg = FAN53880_ENABLE_BOOST,
@@ -111,8 +114,7 @@ static const struct regmap_config fan53880_regmap = {
.max_register = FAN53880_ENABLE_BOOST,
};
-static int fan53880_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int fan53880_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -174,7 +176,7 @@ static struct i2c_driver fan53880_regulator_driver = {
.name = "fan53880",
.of_match_table = of_match_ptr(fan53880_dt_ids),
},
- .probe = fan53880_i2c_probe,
+ .probe_new = fan53880_i2c_probe,
.id_table = fan53880_i2c_id,
};
module_i2c_driver(fan53880_regulator_driver);
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 02ad83153e19..39284610a536 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -88,10 +88,15 @@ static int reg_domain_disable(struct regulator_dev *rdev)
{
struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
struct device *dev = rdev->dev.parent;
+ int ret;
+
+ ret = dev_pm_genpd_set_performance_state(dev, 0);
+ if (ret)
+ return ret;
priv->enable_counter--;
- return dev_pm_genpd_set_performance_state(dev, 0);
+ return 0;
}
static int reg_is_enabled(struct regulator_dev *rdev)
@@ -271,7 +276,8 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
*/
cfg.ena_gpiod = gpiod_get_optional(&pdev->dev, NULL, gflags);
if (IS_ERR(cfg.ena_gpiod))
- return PTR_ERR(cfg.ena_gpiod);
+ return dev_err_probe(&pdev->dev, PTR_ERR(cfg.ena_gpiod),
+ "can't get GPIO\n");
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 0e16e31c968f..ad2237a95572 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -948,7 +948,7 @@ int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay)
int ret;
unsigned int sel;
- if (!rdev->desc->n_ramp_values)
+ if (WARN_ON(!rdev->desc->n_ramp_values || !rdev->desc->ramp_delay_table))
return -EINVAL;
ret = find_closest_bigger(ramp_delay, rdev->desc->ramp_delay_table,
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index dc631c1a46b4..bff8c515dcde 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -386,7 +386,7 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
- u32 reg_val;
+ unsigned int reg_val;
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & info->mode_mask)
@@ -398,7 +398,7 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
- u32 reg_val;
+ unsigned int reg_val;
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & info->mode_mask)
@@ -411,7 +411,7 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
- u32 new_mode;
+ unsigned int new_mode;
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -435,7 +435,7 @@ static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
- u32 new_mode;
+ unsigned int new_mode;
switch (mode) {
case REGULATOR_MODE_NORMAL:
diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c
index f6a14e9c3cbf..9b162c0555c3 100644
--- a/drivers/regulator/hi6421v600-regulator.c
+++ b/drivers/regulator/hi6421v600-regulator.c
@@ -3,7 +3,7 @@
// Device driver for regulators in Hisi IC
//
// Copyright (c) 2013 Linaro Ltd.
-// Copyright (c) 2011 Hisilicon.
+// Copyright (c) 2011 HiSilicon Ltd.
// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
//
// Guodong Xu <guodong.xu@linaro.org>
@@ -16,14 +16,15 @@
#include <linux/regulator/driver.h>
#include <linux/spmi.h>
+struct hi6421_spmi_reg_priv {
+ /* Serialize regulator enable logic */
+ struct mutex enable_mutex;
+};
+
struct hi6421_spmi_reg_info {
struct regulator_desc desc;
- struct hi6421_spmi_pmic *pmic;
u8 eco_mode_mask;
u32 eco_uA;
-
- /* Serialize regulator enable logic */
- struct mutex enable_mutex;
};
static const unsigned int ldo3_voltages[] = {
@@ -83,7 +84,7 @@ static const unsigned int ldo34_voltages[] = {
.owner = THIS_MODULE, \
.volt_table = vtable, \
.n_voltages = ARRAY_SIZE(vtable), \
- .vsel_mask = (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
+ .vsel_mask = ARRAY_SIZE(vtable) - 1, \
.vsel_reg = vreg, \
.enable_reg = ereg, \
.enable_mask = emask, \
@@ -97,41 +98,31 @@ static const unsigned int ldo34_voltages[] = {
static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
{
- struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
+ struct hi6421_spmi_reg_priv *priv;
int ret;
+ priv = dev_get_drvdata(rdev->dev.parent);
/* cannot enable more than one regulator at one time */
- mutex_lock(&sreg->enable_mutex);
+ mutex_lock(&priv->enable_mutex);
- ret = regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
rdev->desc->enable_mask);
/* Avoid powering up multiple devices at the same time */
usleep_range(rdev->desc->off_on_delay, rdev->desc->off_on_delay + 60);
- mutex_unlock(&sreg->enable_mutex);
+ mutex_unlock(&priv->enable_mutex);
return ret;
}
-static int hi6421_spmi_regulator_disable(struct regulator_dev *rdev)
-{
- struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
-
- return regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask, 0);
-}
-
static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
{
struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
- u32 reg_val;
+ unsigned int reg_val;
- regmap_read(pmic->regmap, rdev->desc->enable_reg, &reg_val);
+ regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & sreg->eco_mode_mask)
return REGULATOR_MODE_IDLE;
@@ -143,21 +134,23 @@ static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
- u32 val;
+ unsigned int val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = 0;
break;
case REGULATOR_MODE_IDLE:
- val = sreg->eco_mode_mask << (ffs(sreg->eco_mode_mask) - 1);
+ if (!sreg->eco_mode_mask)
+ return -EINVAL;
+
+ val = sreg->eco_mode_mask;
break;
default:
return -EINVAL;
}
- return regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
sreg->eco_mode_mask, val);
}
@@ -177,9 +170,9 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev,
static const struct regulator_ops hi6421_spmi_ldo_rops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = hi6421_spmi_regulator_enable,
- .disable = hi6421_spmi_regulator_disable,
+ .disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_iterate,
+ .map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421_spmi_regulator_get_mode,
@@ -238,7 +231,7 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
{
struct device *pmic_dev = pdev->dev.parent;
struct regulator_config config = { };
- struct hi6421_spmi_reg_info *sreg;
+ struct hi6421_spmi_reg_priv *priv;
struct hi6421_spmi_reg_info *info;
struct device *dev = &pdev->dev;
struct hi6421_spmi_pmic *pmic;
@@ -254,18 +247,18 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
if (WARN_ON(!pmic))
return -ENODEV;
- sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
- if (!sreg)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- sreg->pmic = pmic;
- mutex_init(&sreg->enable_mutex);
+ mutex_init(&priv->enable_mutex);
+ platform_set_drvdata(pdev, priv);
for (i = 0; i < ARRAY_SIZE(regulator_info); i++) {
info = &regulator_info[i];
config.dev = pdev->dev.parent;
- config.driver_data = sreg;
+ config.driver_data = info;
config.regmap = pmic->regmap;
rdev = devm_regulator_register(dev, &info->desc, &config);
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
index ac2ee2030211..556bb73f3329 100644
--- a/drivers/regulator/hi655x-regulator.c
+++ b/drivers/regulator/hi655x-regulator.c
@@ -2,7 +2,7 @@
//
// Device driver for regulators in Hi655x IC
//
-// Copyright (c) 2016 Hisilicon.
+// Copyright (c) 2016 HiSilicon Ltd.
//
// Authors:
// Chen Feng <puck.chen@hisilicon.com>
@@ -72,7 +72,7 @@ enum hi655x_regulator_id {
static int hi655x_is_enabled(struct regulator_dev *rdev)
{
unsigned int value = 0;
- struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+ const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
regmap_read(rdev->regmap, regulator->status_reg, &value);
return (value & rdev->desc->enable_mask);
@@ -80,7 +80,7 @@ static int hi655x_is_enabled(struct regulator_dev *rdev)
static int hi655x_disable(struct regulator_dev *rdev)
{
- struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+ const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
return regmap_write(rdev->regmap, regulator->disable_reg,
rdev->desc->enable_mask);
@@ -169,7 +169,6 @@ static const struct hi655x_regulator regulators[] = {
static int hi655x_regulator_probe(struct platform_device *pdev)
{
unsigned int i;
- struct hi655x_regulator *regulator;
struct hi655x_pmic *pmic;
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -180,22 +179,17 @@ static int hi655x_regulator_probe(struct platform_device *pdev)
return -ENODEV;
}
- regulator = devm_kzalloc(&pdev->dev, sizeof(*regulator), GFP_KERNEL);
- if (!regulator)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, regulator);
-
config.dev = pdev->dev.parent;
config.regmap = pmic->regmap;
- config.driver_data = regulator;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ config.driver_data = (void *) &regulators[i];
+
rdev = devm_regulator_register(&pdev->dev,
&regulators[i].rdesc,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
- regulator->rdesc.name);
+ regulators[i].rdesc.name);
return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 2391b565ef11..1e9c71642143 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -15,6 +15,17 @@
#define REGULATOR_STATES_NUM (PM_SUSPEND_MAX + 1)
+#define rdev_crit(rdev, fmt, ...) \
+ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_err(rdev, fmt, ...) \
+ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_warn(rdev, fmt, ...) \
+ pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_info(rdev, fmt, ...) \
+ pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_dbg(rdev, fmt, ...) \
+ pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+
struct regulator_voltage {
int min_uV;
int max_uV;
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
new file mode 100644
index 000000000000..fabe2e53093e
--- /dev/null
+++ b/drivers/regulator/irq_helpers.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2021 ROHM Semiconductors
+// regulator IRQ based event notification helpers
+//
+// Logic has been partially adapted from qcom-labibb driver.
+//
+// Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/regulator/driver.h>
+
+#include "internal.h"
+
+#define REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS 10000
+
+struct regulator_irq {
+ struct regulator_irq_data rdata;
+ struct regulator_irq_desc desc;
+ int irq;
+ int retry_cnt;
+ struct delayed_work isr_work;
+};
+
+/*
+ * Should only be called from threaded handler to prevent potential deadlock
+ */
+static void rdev_flag_err(struct regulator_dev *rdev, int err)
+{
+ spin_lock(&rdev->err_lock);
+ rdev->cached_err |= err;
+ spin_unlock(&rdev->err_lock);
+}
+
+static void rdev_clear_err(struct regulator_dev *rdev, int err)
+{
+ spin_lock(&rdev->err_lock);
+ rdev->cached_err &= ~err;
+ spin_unlock(&rdev->err_lock);
+}
+
+static void regulator_notifier_isr_work(struct work_struct *work)
+{
+ struct regulator_irq *h;
+ struct regulator_irq_desc *d;
+ struct regulator_irq_data *rid;
+ int ret = 0;
+ int tmo, i;
+ int num_rdevs;
+
+ h = container_of(work, struct regulator_irq,
+ isr_work.work);
+ d = &h->desc;
+ rid = &h->rdata;
+ num_rdevs = rid->num_states;
+
+reread:
+ if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
+ if (!d->die)
+ return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ ret = d->die(rid);
+ /*
+ * If the 'last resort' IC recovery failed we will have
+ * nothing else left to do...
+ */
+ if (ret)
+ return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+
+ /*
+ * If h->die() was implemented we assume recovery has been
+ * attempted (probably regulator was shut down) and we
+ * just enable IRQ and bail-out.
+ */
+ goto enable_out;
+ }
+ if (d->renable) {
+ ret = d->renable(rid);
+
+ if (ret == REGULATOR_FAILED_RETRY) {
+ /* Driver could not get current status */
+ h->retry_cnt++;
+ if (!d->reread_ms)
+ goto reread;
+
+ tmo = d->reread_ms;
+ goto reschedule;
+ }
+
+ if (ret) {
+ /*
+ * IC status reading succeeded. update error info
+ * just in case the renable changed it.
+ */
+ for (i = 0; i < num_rdevs; i++) {
+ struct regulator_err_state *stat;
+ struct regulator_dev *rdev;
+
+ stat = &rid->states[i];
+ rdev = stat->rdev;
+ rdev_clear_err(rdev, (~stat->errors) &
+ stat->possible_errs);
+ }
+ h->retry_cnt++;
+ /*
+ * The IC indicated problem is still ON - no point in
+ * re-enabling the IRQ. Retry later.
+ */
+ tmo = d->irq_off_ms;
+ goto reschedule;
+ }
+ }
+
+ /*
+ * Either IC reported problem cleared or no status checker was provided.
+ * If problems are gone - good. If not - then the IRQ will fire again
+ * and we'll have a new nice loop. In any case we should clear error
+ * flags here and re-enable IRQs.
+ */
+ for (i = 0; i < num_rdevs; i++) {
+ struct regulator_err_state *stat;
+ struct regulator_dev *rdev;
+
+ stat = &rid->states[i];
+ rdev = stat->rdev;
+ rdev_clear_err(rdev, stat->possible_errs);
+ }
+
+ /*
+ * Things have been seemingly successful => zero retry-counter.
+ */
+ h->retry_cnt = 0;
+
+enable_out:
+ enable_irq(h->irq);
+
+ return;
+
+reschedule:
+ if (!d->high_prio)
+ mod_delayed_work(system_wq, &h->isr_work,
+ msecs_to_jiffies(tmo));
+ else
+ mod_delayed_work(system_highpri_wq, &h->isr_work,
+ msecs_to_jiffies(tmo));
+}
+
+static irqreturn_t regulator_notifier_isr(int irq, void *data)
+{
+ struct regulator_irq *h = data;
+ struct regulator_irq_desc *d;
+ struct regulator_irq_data *rid;
+ unsigned long rdev_map = 0;
+ int num_rdevs;
+ int ret, i;
+
+ d = &h->desc;
+ rid = &h->rdata;
+ num_rdevs = rid->num_states;
+
+ if (d->fatal_cnt)
+ h->retry_cnt++;
+
+ /*
+ * we spare a few cycles by not clearing statuses prior to this call.
+ * The IC driver must initialize the status buffers for rdevs
+ * which it indicates having active events via rdev_map.
+ *
+ * Maybe we should just to be on a safer side(?)
+ */
+ ret = d->map_event(irq, rid, &rdev_map);
+
+ /*
+ * If status reading fails (which is unlikely) we don't ack/disable
+ * IRQ but just increase fail count and retry when IRQ fires again.
+ * If retry_count exceeds the given safety limit we call IC specific die
+ * handler which can try disabling regulator(s).
+ *
+ * If no die handler is given we will just bug() as a last resort.
+ *
+ * We could try disabling all associated rdevs - but we might shoot
+ * ourselves in the head and leave the problematic regulator enabled. So
+ * if IC has no die-handler populated we just assume the regulator
+ * can't be disabled.
+ */
+ if (unlikely(ret == REGULATOR_FAILED_RETRY))
+ goto fail_out;
+
+ h->retry_cnt = 0;
+ /*
+ * Let's not disable IRQ if there were no status bits for us. We'd
+ * better leave spurious IRQ handling to genirq
+ */
+ if (ret || !rdev_map)
+ return IRQ_NONE;
+
+ /*
+ * Some events are bogus if the regulator is disabled. Skip such events
+ * if all relevant regulators are disabled
+ */
+ if (d->skip_off) {
+ for_each_set_bit(i, &rdev_map, num_rdevs) {
+ struct regulator_dev *rdev;
+ const struct regulator_ops *ops;
+
+ rdev = rid->states[i].rdev;
+ ops = rdev->desc->ops;
+
+ /*
+ * If any of the flagged regulators is enabled we do
+ * handle this
+ */
+ if (ops->is_enabled(rdev))
+ break;
+ }
+ if (i == num_rdevs)
+ return IRQ_NONE;
+ }
+
+ /* Disable IRQ if HW keeps line asserted */
+ if (d->irq_off_ms)
+ disable_irq_nosync(irq);
+
+ /*
+ * IRQ seems to be for us. Let's fire correct notifiers / store error
+ * flags
+ */
+ for_each_set_bit(i, &rdev_map, num_rdevs) {
+ struct regulator_err_state *stat;
+ struct regulator_dev *rdev;
+
+ stat = &rid->states[i];
+ rdev = stat->rdev;
+
+ rdev_dbg(rdev, "Sending regulator notification EVT 0x%lx\n",
+ stat->notifs);
+
+ regulator_notifier_call_chain(rdev, stat->notifs, NULL);
+ rdev_flag_err(rdev, stat->errors);
+ }
+
+ if (d->irq_off_ms) {
+ if (!d->high_prio)
+ schedule_delayed_work(&h->isr_work,
+ msecs_to_jiffies(d->irq_off_ms));
+ else
+ mod_delayed_work(system_highpri_wq,
+ &h->isr_work,
+ msecs_to_jiffies(d->irq_off_ms));
+ }
+
+ return IRQ_HANDLED;
+
+fail_out:
+ if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
+ /* If we have no recovery, just try shut down straight away */
+ if (!d->die) {
+ hw_protection_shutdown("Regulator failure. Retry count exceeded",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ } else {
+ ret = d->die(rid);
+ /* If die() failed shut down as a last attempt to save the HW */
+ if (ret)
+ hw_protection_shutdown("Regulator failure. Recovery failed",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static int init_rdev_state(struct device *dev, struct regulator_irq *h,
+ struct regulator_dev **rdev, int common_err,
+ int *rdev_err, int rdev_amount)
+{
+ int i;
+
+ h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) *
+ rdev_amount, GFP_KERNEL);
+ if (!h->rdata.states)
+ return -ENOMEM;
+
+ h->rdata.num_states = rdev_amount;
+ h->rdata.data = h->desc.data;
+
+ for (i = 0; i < rdev_amount; i++) {
+ h->rdata.states[i].possible_errs = common_err;
+ if (rdev_err)
+ h->rdata.states[i].possible_errs |= *rdev_err++;
+ h->rdata.states[i].rdev = *rdev++;
+ }
+
+ return 0;
+}
+
+static void init_rdev_errors(struct regulator_irq *h)
+{
+ int i;
+
+ for (i = 0; i < h->rdata.num_states; i++)
+ if (h->rdata.states[i].possible_errs)
+ h->rdata.states[i].rdev->use_cached_err = true;
+}
+
+/**
+ * regulator_irq_helper - register IRQ based regulator event/error notifier
+ *
+ * @dev: device providing the IRQs
+ * @d: IRQ helper descriptor.
+ * @irq: IRQ used to inform events/errors to be notified.
+ * @irq_flags: Extra IRQ flags to be OR'ed with the default
+ * IRQF_ONESHOT when requesting the (threaded) irq.
+ * @common_errs: Errors which can be flagged by this IRQ for all rdevs.
+ * When IRQ is re-enabled these errors will be cleared
+ * from all associated regulators
+ * @per_rdev_errs: Optional error flag array describing errors specific
+ * for only some of the regulators. These errors will be
+ * or'ed with common errors. If this is given the array
+ * should contain rdev_amount flags. Can be set to NULL
+ * if there is no regulator specific error flags for this
+ * IRQ.
+ * @rdev: Array of pointers to regulators associated with this
+ * IRQ.
+ * @rdev_amount: Amount of regulators associated with this IRQ.
+ *
+ * Return: handle to irq_helper or an ERR_PTR() encoded error code.
+ */
+void *regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs, int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount)
+{
+ struct regulator_irq *h;
+ int ret;
+
+ if (!rdev_amount || !d || !d->map_event || !d->name)
+ return ERR_PTR(-EINVAL);
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ h->irq = irq;
+ h->desc = *d;
+
+ ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
+ rdev_amount);
+ if (ret)
+ return ERR_PTR(ret);
+
+ init_rdev_errors(h);
+
+ if (h->desc.irq_off_ms)
+ INIT_DELAYED_WORK(&h->isr_work, regulator_notifier_isr_work);
+
+ ret = request_threaded_irq(h->irq, NULL, regulator_notifier_isr,
+ IRQF_ONESHOT | irq_flags, h->desc.name, h);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+
+ return ERR_PTR(ret);
+ }
+
+ return h;
+}
+EXPORT_SYMBOL_GPL(regulator_irq_helper);
+
+/**
+ * regulator_irq_helper_cancel - drop IRQ based regulator event/error notifier
+ *
+ * @handle: Pointer to handle returned by a successful call to
+ * regulator_irq_helper(). Will be NULLed upon return.
+ *
+ * The associated IRQ is released and work is cancelled when the function
+ * returns.
+ */
+void regulator_irq_helper_cancel(void **handle)
+{
+ if (handle && *handle) {
+ struct regulator_irq *h = *handle;
+
+ free_irq(h->irq, h);
+ if (h->desc.irq_off_ms)
+ cancel_delayed_work_sync(&h->isr_work);
+
+ h = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 13c535711265..321bec6e3f8d 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -136,52 +136,9 @@ err_i2c:
return 0;
}
-static int lp8755_buck_set_ramp(struct regulator_dev *rdev, int ramp)
-{
- int ret;
- unsigned int regval = 0x00;
- enum lp8755_bucks id = rdev_get_id(rdev);
-
- /* uV/us */
- switch (ramp) {
- case 0 ... 230:
- regval = 0x07;
- break;
- case 231 ... 470:
- regval = 0x06;
- break;
- case 471 ... 940:
- regval = 0x05;
- break;
- case 941 ... 1900:
- regval = 0x04;
- break;
- case 1901 ... 3800:
- regval = 0x03;
- break;
- case 3801 ... 7500:
- regval = 0x02;
- break;
- case 7501 ... 15000:
- regval = 0x01;
- break;
- case 15001 ... 30000:
- regval = 0x00;
- break;
- default:
- dev_err(&rdev->dev,
- "Not supported ramp value %d %s\n", ramp, __func__);
- return -EINVAL;
- }
-
- ret = regmap_update_bits(rdev->regmap, 0x07 + id, 0x07, regval);
- if (ret < 0)
- goto err_i2c;
- return ret;
-err_i2c:
- dev_err(&rdev->dev, "i2c access error %s\n", __func__);
- return ret;
-}
+static const unsigned int lp8755_buck_ramp_table[] = {
+ 30000, 15000, 7500, 3800, 1900, 940, 470, 230
+};
static const struct regulator_ops lp8755_buck_ops = {
.map_voltage = regulator_map_voltage_linear,
@@ -194,7 +151,7 @@ static const struct regulator_ops lp8755_buck_ops = {
.enable_time = lp8755_buck_enable_time,
.set_mode = lp8755_buck_set_mode,
.get_mode = lp8755_buck_get_mode,
- .set_ramp_delay = lp8755_buck_set_ramp,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
#define lp8755_rail(_id) "lp8755_buck"#_id
@@ -269,6 +226,10 @@ out_i2c_error:
.enable_mask = LP8755_BUCK_EN_M,\
.vsel_reg = LP8755_REG_BUCK##_id,\
.vsel_mask = LP8755_BUCK_VOUT_M,\
+ .ramp_reg = (LP8755_BUCK##_id) + 0x7,\
+ .ramp_mask = 0x7,\
+ .ramp_delay_table = lp8755_buck_ramp_table,\
+ .n_ramp_values = ARRAY_SIZE(lp8755_buck_ramp_table),\
}
static const struct regulator_desc lp8755_regulators[] = {
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 38f7ccb63b52..5e0b669c3a01 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -54,6 +54,11 @@
#define LTC3589_VCCR_SW3_GO BIT(4)
#define LTC3589_VCCR_LDO2_GO BIT(6)
+#define LTC3589_VRRCR_SW1_RAMP_MASK GENMASK(1, 0)
+#define LTC3589_VRRCR_SW2_RAMP_MASK GENMASK(3, 2)
+#define LTC3589_VRRCR_SW3_RAMP_MASK GENMASK(5, 4)
+#define LTC3589_VRRCR_LDO2_RAMP_MASK GENMASK(7, 6)
+
enum ltc3589_variant {
LTC3589,
LTC3589_1,
@@ -88,27 +93,9 @@ static const int ltc3589_12_ldo4[] = {
1200000, 1800000, 2500000, 3200000,
};
-static int ltc3589_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- struct ltc3589 *ltc3589 = rdev_get_drvdata(rdev);
- int sel, shift;
-
- if (unlikely(ramp_delay <= 0))
- return -EINVAL;
-
- /* VRRCR slew rate offsets are the same as VCCR go bit offsets */
- shift = ffs(rdev->desc->apply_bit) - 1;
-
- /* The slew rate can be set to 0.88, 1.75, 3.5, or 7 mV/uS */
- for (sel = 0; sel < 4; sel++) {
- if ((880 << sel) >= ramp_delay) {
- return regmap_update_bits(ltc3589->regmap,
- LTC3589_VRRCR,
- 0x3 << shift, sel << shift);
- }
- }
- return -EINVAL;
-}
+static const unsigned int ltc3589_ramp_table[] = {
+ 880, 1750, 3500, 7000
+};
static int ltc3589_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
@@ -149,7 +136,7 @@ static const struct regulator_ops ltc3589_linear_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_ramp_delay = ltc3589_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_voltage = ltc3589_set_suspend_voltage,
.set_suspend_mode = ltc3589_set_suspend_mode,
@@ -218,16 +205,13 @@ static int ltc3589_of_parse_cb(struct device_node *np,
return 0;
}
-#define LTC3589_REG(_name, _of_name, _ops, en_bit, dtv1_reg, dtv_mask, go_bit)\
+#define LTC3589_REG(_name, _of_name, _ops, en_bit, dtv1_reg, dtv_mask) \
[LTC3589_ ## _name] = { \
.name = #_name, \
.of_match = of_match_ptr(#_of_name), \
.regulators_node = of_match_ptr("regulators"), \
.of_parse_cb = ltc3589_of_parse_cb, \
.n_voltages = (dtv_mask) + 1, \
- .min_uV = (go_bit) ? 362500 : 0, \
- .uV_step = (go_bit) ? 12500 : 0, \
- .ramp_delay = (go_bit) ? 1750 : 0, \
.fixed_uV = (dtv_mask) ? 0 : 800000, \
.ops = &ltc3589_ ## _ops ## _regulator_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -235,30 +219,49 @@ static int ltc3589_of_parse_cb(struct device_node *np,
.owner = THIS_MODULE, \
.vsel_reg = (dtv1_reg), \
.vsel_mask = (dtv_mask), \
- .apply_reg = (go_bit) ? LTC3589_VCCR : 0, \
- .apply_bit = (go_bit), \
.enable_reg = (en_bit) ? LTC3589_OVEN : 0, \
.enable_mask = (en_bit), \
}
#define LTC3589_LINEAR_REG(_name, _of_name, _dtv1) \
- LTC3589_REG(_name, _of_name, linear, LTC3589_OVEN_ ## _name, \
- LTC3589_ ## _dtv1, 0x1f, \
- LTC3589_VCCR_ ## _name ## _GO)
+ [LTC3589_ ## _name] = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(#_of_name), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .of_parse_cb = ltc3589_of_parse_cb, \
+ .n_voltages = 32, \
+ .min_uV = 362500, \
+ .uV_step = 12500, \
+ .ramp_delay = 1750, \
+ .ops = &ltc3589_linear_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = LTC3589_ ## _name, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = LTC3589_ ## _dtv1, \
+ .vsel_mask = 0x1f, \
+ .apply_reg = LTC3589_VCCR, \
+ .apply_bit = LTC3589_VCCR_ ## _name ## _GO, \
+ .enable_reg = LTC3589_OVEN, \
+ .enable_mask = (LTC3589_OVEN_ ## _name), \
+ .ramp_reg = LTC3589_VRRCR, \
+ .ramp_mask = LTC3589_VRRCR_ ## _name ## _RAMP_MASK, \
+ .ramp_delay_table = ltc3589_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(ltc3589_ramp_table), \
+ }
+
#define LTC3589_FIXED_REG(_name, _of_name) \
- LTC3589_REG(_name, _of_name, fixed, LTC3589_OVEN_ ## _name, 0, 0, 0)
+ LTC3589_REG(_name, _of_name, fixed, LTC3589_OVEN_ ## _name, 0, 0)
static const struct regulator_desc ltc3589_regulators[] = {
LTC3589_LINEAR_REG(SW1, sw1, B1DTV1),
LTC3589_LINEAR_REG(SW2, sw2, B2DTV1),
LTC3589_LINEAR_REG(SW3, sw3, B3DTV1),
LTC3589_FIXED_REG(BB_OUT, bb-out),
- LTC3589_REG(LDO1, ldo1, fixed_standby, 0, 0, 0, 0),
+ LTC3589_REG(LDO1, ldo1, fixed_standby, 0, 0, 0),
LTC3589_LINEAR_REG(LDO2, ldo2, L2DTV1),
LTC3589_FIXED_REG(LDO3, ldo3),
- LTC3589_REG(LDO4, ldo4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2,
- 0x60, 0),
+ LTC3589_REG(LDO4, ldo4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2, 0x60),
};
static bool ltc3589_writeable_reg(struct device *dev, unsigned int reg)
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 8d9731e4052b..3cf8f085170a 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -814,6 +814,13 @@ static int max77620_regulator_probe(struct platform_device *pdev)
config.dev = dev;
config.driver_data = pmic;
+ /*
+ * Set of_node_reuse flag to prevent driver core from attempting to
+ * claim any pinmux resources already claimed by the parent device.
+ * Otherwise PMIC driver will fail to re-probe.
+ */
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
for (id = 0; id < MAX77620_NUM_REGS; id++) {
struct regulator_dev *rdev;
struct regulator_desc *rdesc;
@@ -839,12 +846,10 @@ static int max77620_regulator_probe(struct platform_device *pdev)
return ret;
rdev = devm_regulator_register(dev, rdesc, &config);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
- dev_err(dev, "Regulator registration %s failed: %d\n",
- rdesc->name, ret);
- return ret;
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Regulator registration %s failed\n",
+ rdesc->name);
}
return 0;
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index 9089ec608fcc..55a07d3f3ee2 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -67,13 +67,6 @@
#define MAX77686_REGULATORS MAX77686_REG_MAX
#define MAX77686_LDOS 26
-enum max77686_ramp_rate {
- RAMP_RATE_13P75MV,
- RAMP_RATE_27P5MV,
- RAMP_RATE_55MV,
- RAMP_RATE_NO_CTRL, /* 100mV/us */
-};
-
struct max77686_data {
struct device *dev;
DECLARE_BITMAP(gpio_enabled, MAX77686_REGULATORS);
@@ -220,31 +213,6 @@ static int max77686_enable(struct regulator_dev *rdev)
max77686->opmode[id] << shift);
}
-static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- unsigned int ramp_value = RAMP_RATE_NO_CTRL;
-
- switch (ramp_delay) {
- case 1 ... 13750:
- ramp_value = RAMP_RATE_13P75MV;
- break;
- case 13751 ... 27500:
- ramp_value = RAMP_RATE_27P5MV;
- break;
- case 27501 ... 55000:
- ramp_value = RAMP_RATE_55MV;
- break;
- case 55001 ... 100000:
- break;
- default:
- pr_warn("%s: ramp_delay: %d not supported, setting 100000\n",
- rdev->desc->name, ramp_delay);
- }
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- MAX77686_RAMP_RATE_MASK, ramp_value << 6);
-}
-
static int max77686_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
@@ -284,6 +252,10 @@ static int max77686_of_parse_cb(struct device_node *np,
return 0;
}
+static const unsigned int max77686_buck_dvs_ramp_table[] = {
+ 13750, 27500, 55000, 100000
+};
+
static const struct regulator_ops max77686_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
@@ -330,7 +302,7 @@ static const struct regulator_ops max77686_buck_dvs_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max77686_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77686_set_suspend_disable,
};
@@ -462,6 +434,10 @@ static const struct regulator_ops max77686_buck_dvs_ops = {
.enable_reg = MAX77686_REG_BUCK2CTRL1 + (num - 2) * 10, \
.enable_mask = MAX77686_OPMODE_MASK \
<< MAX77686_OPMODE_BUCK234_SHIFT, \
+ .ramp_reg = MAX77686_REG_BUCK2CTRL1 + (num - 2) * 10, \
+ .ramp_mask = MAX77686_RAMP_RATE_MASK, \
+ .ramp_delay_table = max77686_buck_dvs_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(max77686_buck_dvs_ramp_table), \
}
static const struct regulator_desc regulators[] = {
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index 7b8ec8c0bd15..21e0eb0f43f9 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -43,15 +43,14 @@
#define MAX77802_OFF_PWRREQ 0x1
#define MAX77802_LP_PWRREQ 0x2
-/* MAX77802 has two register formats: 2-bit and 4-bit */
-static const unsigned int ramp_table_77802_2bit[] = {
+static const unsigned int max77802_buck234_ramp_table[] = {
12500,
25000,
50000,
100000,
};
-static unsigned int ramp_table_77802_4bit[] = {
+static const unsigned int max77802_buck16_ramp_table[] = {
1000, 2000, 3030, 4000,
5000, 5880, 7140, 8330,
9090, 10000, 11110, 12500,
@@ -221,58 +220,6 @@ static int max77802_enable(struct regulator_dev *rdev)
max77802->opmode[id] << shift);
}
-static int max77802_find_ramp_value(struct regulator_dev *rdev,
- const unsigned int limits[], int size,
- unsigned int ramp_delay)
-{
- int i;
-
- for (i = 0; i < size; i++) {
- if (ramp_delay <= limits[i])
- return i;
- }
-
- /* Use maximum value for no ramp control */
- dev_warn(&rdev->dev, "%s: ramp_delay: %d not supported, setting 100000\n",
- rdev->desc->name, ramp_delay);
- return size - 1;
-}
-
-/* Used for BUCKs 2-4 */
-static int max77802_set_ramp_delay_2bit(struct regulator_dev *rdev,
- int ramp_delay)
-{
- int id = rdev_get_id(rdev);
- unsigned int ramp_value;
-
- if (id > MAX77802_BUCK4) {
- dev_warn(&rdev->dev,
- "%s: regulator: ramp delay not supported\n",
- rdev->desc->name);
- return -EINVAL;
- }
- ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_2bit,
- ARRAY_SIZE(ramp_table_77802_2bit), ramp_delay);
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- MAX77802_RAMP_RATE_MASK_2BIT,
- ramp_value << MAX77802_RAMP_RATE_SHIFT_2BIT);
-}
-
-/* For BUCK1, 6 */
-static int max77802_set_ramp_delay_4bit(struct regulator_dev *rdev,
- int ramp_delay)
-{
- unsigned int ramp_value;
-
- ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_4bit,
- ARRAY_SIZE(ramp_table_77802_4bit), ramp_delay);
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- MAX77802_RAMP_RATE_MASK_4BIT,
- ramp_value << MAX77802_RAMP_RATE_SHIFT_4BIT);
-}
-
/*
* LDOs 2, 4-19, 22-35
*/
@@ -316,7 +263,7 @@ static const struct regulator_ops max77802_buck_16_dvs_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max77802_set_ramp_delay_4bit,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77802_set_suspend_disable,
};
@@ -330,7 +277,7 @@ static const struct regulator_ops max77802_buck_234_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max77802_set_ramp_delay_2bit,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77802_set_suspend_disable,
.set_suspend_mode = max77802_set_suspend_mode,
};
@@ -345,7 +292,6 @@ static const struct regulator_ops max77802_buck_dvs_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max77802_set_ramp_delay_2bit,
.set_suspend_disable = max77802_set_suspend_disable,
};
@@ -409,6 +355,10 @@ static const struct regulator_ops max77802_buck_dvs_ops = {
.vsel_mask = MAX77802_DVS_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK ## num ## CTRL, \
.enable_mask = MAX77802_OPMODE_MASK, \
+ .ramp_reg = MAX77802_REG_BUCK ## num ## CTRL, \
+ .ramp_mask = MAX77802_RAMP_RATE_MASK_4BIT, \
+ .ramp_delay_table = max77802_buck16_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(max77802_buck16_ramp_table), \
.of_map_mode = max77802_map_mode, \
}
@@ -431,6 +381,10 @@ static const struct regulator_ops max77802_buck_dvs_ops = {
.enable_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
.enable_mask = MAX77802_OPMODE_MASK << \
MAX77802_OPMODE_BUCK234_SHIFT, \
+ .ramp_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
+ .ramp_mask = MAX77802_RAMP_RATE_MASK_2BIT, \
+ .ramp_delay_table = max77802_buck234_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(max77802_buck234_ramp_table), \
.of_map_mode = max77802_map_mode, \
}
diff --git a/drivers/regulator/max8893.c b/drivers/regulator/max8893.c
new file mode 100644
index 000000000000..1519bf760da7
--- /dev/null
+++ b/drivers/regulator/max8893.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+static const struct regulator_ops max8893_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+};
+
+static const struct regulator_desc max8893_regulators[] = {
+ {
+ .name = "BUCK",
+ .supply_name = "in-buck",
+ .of_match = of_match_ptr("buck"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x11,
+ .id = 6,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 800000,
+ .uV_step = 100000,
+ .vsel_reg = 0x4,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(7),
+ },
+ {
+ .name = "LDO1",
+ .supply_name = "in-ldo1",
+ .of_match = of_match_ptr("ldo1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x12,
+ .id = 1,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 1600000,
+ .uV_step = 100000,
+ .vsel_reg = 0x5,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(5),
+ },
+ {
+ .name = "LDO2",
+ .supply_name = "in-ldo2",
+ .of_match = of_match_ptr("ldo2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x16,
+ .id = 2,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 1200000,
+ .uV_step = 100000,
+ .vsel_reg = 0x6,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(4),
+ },
+ {
+ .name = "LDO3",
+ .supply_name = "in-ldo3",
+ .of_match = of_match_ptr("ldo3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x12,
+ .id = 3,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 1600000,
+ .uV_step = 100000,
+ .vsel_reg = 0x7,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(3),
+ },
+ {
+ .name = "LDO4",
+ .supply_name = "in-ldo4",
+ .of_match = of_match_ptr("ldo4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x1a,
+ .id = 4,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 800000,
+ .uV_step = 100000,
+ .vsel_reg = 0x8,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(2),
+ },
+ {
+ .name = "LDO5",
+ .supply_name = "in-ldo5",
+ .of_match = of_match_ptr("ldo5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x1a,
+ .id = 5,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 800000,
+ .uV_step = 100000,
+ .vsel_reg = 0x9,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(1),
+ }
+};
+
+static const struct regmap_config max8893_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int max8893_probe_new(struct i2c_client *i2c)
+{
+ int id, ret;
+ struct regulator_config config = {.dev = &i2c->dev};
+ struct regmap *regmap = devm_regmap_init_i2c(i2c, &max8893_regmap);
+
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ for (id = 0; id < ARRAY_SIZE(max8893_regulators); id++) {
+ struct regulator_dev *rdev;
+ rdev = devm_regulator_register(&i2c->dev,
+ &max8893_regulators[id],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "failed to register %s: %d\n",
+ max8893_regulators[id].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id max8893_dt_match[] = {
+ { .compatible = "maxim,max8893" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, max8893_dt_match);
+#endif
+
+static const struct i2c_device_id max8893_ids[] = {
+ { "max8893", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, max8893_ids);
+
+static struct i2c_driver max8893_driver = {
+ .probe_new = max8893_probe_new,
+ .driver = {
+ .name = "max8893",
+ .of_match_table = of_match_ptr(max8893_dt_match),
+ },
+ .id_table = max8893_ids,
+};
+
+module_i2c_driver(max8893_driver);
+
+MODULE_DESCRIPTION("Maxim MAX8893 PMIC driver");
+MODULE_AUTHOR("Sergey Larin <cerg2010cerg2010@mail.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 9aee1444181d..8da8f9b6c4fd 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -265,33 +265,6 @@ static unsigned int max8973_dcdc_get_mode(struct regulator_dev *rdev)
REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
-static int max8973_set_ramp_delay(struct regulator_dev *rdev,
- int ramp_delay)
-{
- struct max8973_chip *max = rdev_get_drvdata(rdev);
- unsigned int control;
- int ret;
-
- /* Set ramp delay */
- if (ramp_delay <= 12000)
- control = MAX8973_RAMP_12mV_PER_US;
- else if (ramp_delay <= 25000)
- control = MAX8973_RAMP_25mV_PER_US;
- else if (ramp_delay <= 50000)
- control = MAX8973_RAMP_50mV_PER_US;
- else if (ramp_delay <= 200000)
- control = MAX8973_RAMP_200mV_PER_US;
- else
- return -EINVAL;
-
- ret = regmap_update_bits(max->regmap, MAX8973_CONTROL1,
- MAX8973_RAMP_MASK, control);
- if (ret < 0)
- dev_err(max->dev, "register %d update failed, %d",
- MAX8973_CONTROL1, ret);
- return ret;
-}
-
static int max8973_set_current_limit(struct regulator_dev *rdev,
int min_ua, int max_ua)
{
@@ -341,6 +314,10 @@ static int max8973_get_current_limit(struct regulator_dev *rdev)
return 9000000;
}
+static const unsigned int max8973_buck_ramp_table[] = {
+ 12000, 25000, 50000, 200000
+};
+
static const struct regulator_ops max8973_dcdc_ops = {
.get_voltage_sel = max8973_dcdc_get_voltage_sel,
.set_voltage_sel = max8973_dcdc_set_voltage_sel,
@@ -348,7 +325,7 @@ static const struct regulator_ops max8973_dcdc_ops = {
.set_mode = max8973_dcdc_set_mode,
.get_mode = max8973_dcdc_get_mode,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max8973_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static int max8973_init_dcdc(struct max8973_chip *max,
@@ -694,6 +671,10 @@ static int max8973_probe(struct i2c_client *client,
max->desc.min_uV = MAX8973_MIN_VOLATGE;
max->desc.uV_step = MAX8973_VOLATGE_STEP;
max->desc.n_voltages = MAX8973_BUCK_N_VOLTAGE;
+ max->desc.ramp_reg = MAX8973_CONTROL1;
+ max->desc.ramp_mask = MAX8973_RAMP_MASK;
+ max->desc.ramp_delay_table = max8973_buck_ramp_table;
+ max->desc.n_ramp_values = ARRAY_SIZE(max8973_buck_ramp_table);
max->dvs_gpio = (pdata->dvs_gpio) ? pdata->dvs_gpio : -EINVAL;
max->enable_external_control = pdata->enable_ext_control;
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 88c6bd5b6c78..042668385678 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -90,10 +90,14 @@ enum mcp16502_reg {
};
/* Ramp delay (uV/us) for buck1, ldo1, ldo2. */
-static const int mcp16502_ramp_b1l12[] = { 6250, 3125, 2083, 1563 };
+static const unsigned int mcp16502_ramp_b1l12[] = {
+ 6250, 3125, 2083, 1563
+};
/* Ramp delay (uV/us) for buck2, buck3, buck4. */
-static const int mcp16502_ramp_b234[] = { 3125, 1563, 1042, 781 };
+static const unsigned int mcp16502_ramp_b234[] = {
+ 3125, 1563, 1042, 781
+};
static unsigned int mcp16502_of_map_mode(unsigned int mode)
{
@@ -103,7 +107,7 @@ static unsigned int mcp16502_of_map_mode(unsigned int mode)
return REGULATOR_MODE_INVALID;
}
-#define MCP16502_REGULATOR(_name, _id, _ranges, _ops) \
+#define MCP16502_REGULATOR(_name, _id, _ranges, _ops, _ramp_table) \
[_id] = { \
.name = _name, \
.regulators_node = of_match_ptr("regulators"), \
@@ -121,6 +125,10 @@ static unsigned int mcp16502_of_map_mode(unsigned int mode)
.vsel_mask = MCP16502_VSEL, \
.enable_reg = (((_id) + 1) << 4), \
.enable_mask = MCP16502_EN, \
+ .ramp_reg = MCP16502_REG_BASE(_id, CFG), \
+ .ramp_mask = MCP16502_DVSR, \
+ .ramp_delay_table = _ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(_ramp_table), \
}
enum {
@@ -314,42 +322,6 @@ static int mcp16502_set_voltage_time_sel(struct regulator_dev *rdev,
return ret;
}
-static int mcp16502_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- const int *ramp;
- int id = rdev_get_id(rdev);
- unsigned int i, size;
-
- switch (id) {
- case BUCK1:
- case LDO1:
- case LDO2:
- ramp = mcp16502_ramp_b1l12;
- size = ARRAY_SIZE(mcp16502_ramp_b1l12);
- break;
-
- case BUCK2:
- case BUCK3:
- case BUCK4:
- ramp = mcp16502_ramp_b234;
- size = ARRAY_SIZE(mcp16502_ramp_b234);
- break;
-
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < size; i++) {
- if (ramp[i] == ramp_delay)
- break;
- }
- if (i == size)
- return -EINVAL;
-
- return regmap_update_bits(rdev->regmap, MCP16502_REG_BASE(id, CFG),
- MCP16502_DVSR, (i << 2));
-}
-
#ifdef CONFIG_SUSPEND
/*
* mcp16502_suspend_get_target_reg() - get the reg of the target suspend PMIC
@@ -445,7 +417,7 @@ static const struct regulator_ops mcp16502_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.get_status = mcp16502_get_status,
.set_voltage_time_sel = mcp16502_set_voltage_time_sel,
- .set_ramp_delay = mcp16502_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_mode = mcp16502_set_mode,
.get_mode = mcp16502_get_mode,
@@ -471,7 +443,7 @@ static const struct regulator_ops mcp16502_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.get_status = mcp16502_get_status,
.set_voltage_time_sel = mcp16502_set_voltage_time_sel,
- .set_ramp_delay = mcp16502_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
#ifdef CONFIG_SUSPEND
.set_suspend_voltage = mcp16502_set_suspend_voltage,
@@ -495,13 +467,19 @@ static const struct linear_range b234_ranges[] = {
};
static const struct regulator_desc mcp16502_desc[] = {
- /* MCP16502_REGULATOR(_name, _id, ranges, regulator_ops) */
- MCP16502_REGULATOR("VDD_IO", BUCK1, b1l12_ranges, mcp16502_buck_ops),
- MCP16502_REGULATOR("VDD_DDR", BUCK2, b234_ranges, mcp16502_buck_ops),
- MCP16502_REGULATOR("VDD_CORE", BUCK3, b234_ranges, mcp16502_buck_ops),
- MCP16502_REGULATOR("VDD_OTHER", BUCK4, b234_ranges, mcp16502_buck_ops),
- MCP16502_REGULATOR("LDO1", LDO1, b1l12_ranges, mcp16502_ldo_ops),
- MCP16502_REGULATOR("LDO2", LDO2, b1l12_ranges, mcp16502_ldo_ops)
+ /* MCP16502_REGULATOR(_name, _id, ranges, regulator_ops, ramp_table) */
+ MCP16502_REGULATOR("VDD_IO", BUCK1, b1l12_ranges, mcp16502_buck_ops,
+ mcp16502_ramp_b1l12),
+ MCP16502_REGULATOR("VDD_DDR", BUCK2, b234_ranges, mcp16502_buck_ops,
+ mcp16502_ramp_b234),
+ MCP16502_REGULATOR("VDD_CORE", BUCK3, b234_ranges, mcp16502_buck_ops,
+ mcp16502_ramp_b234),
+ MCP16502_REGULATOR("VDD_OTHER", BUCK4, b234_ranges, mcp16502_buck_ops,
+ mcp16502_ramp_b234),
+ MCP16502_REGULATOR("LDO1", LDO1, b1l12_ranges, mcp16502_ldo_ops,
+ mcp16502_ramp_b1l12),
+ MCP16502_REGULATOR("LDO2", LDO2, b1l12_ranges, mcp16502_ldo_ops,
+ mcp16502_ramp_b1l12)
};
static const struct regmap_range mcp16502_ranges[] = {
@@ -522,8 +500,7 @@ static const struct regmap_config mcp16502_regmap_config = {
.wr_table = &mcp16502_yes_reg_table,
};
-static int mcp16502_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mcp16502_probe(struct i2c_client *client)
{
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -606,7 +583,7 @@ static const struct i2c_device_id mcp16502_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, mcp16502_i2c_id);
static struct i2c_driver mcp16502_drv = {
- .probe = mcp16502_probe,
+ .probe_new = mcp16502_probe,
.driver = {
.name = "mcp16502-regulator",
.of_match_table = of_match_ptr(mcp16502_ids),
diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c
index 67ce1b52a1a1..39cebec0edb6 100644
--- a/drivers/regulator/mp5416.c
+++ b/drivers/regulator/mp5416.c
@@ -67,6 +67,10 @@
.vsel_mask = MP5416_MASK_VSET, \
.enable_reg = MP5416_REG_BUCK ## _id, \
.enable_mask = MP5416_REGULATOR_EN, \
+ .ramp_reg = MP5416_REG_CTL2, \
+ .ramp_mask = MP5416_MASK_DVS_SLEWRATE, \
+ .ramp_delay_table = mp5416_buck_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(mp5416_buck_ramp_table), \
.active_discharge_on = _dval, \
.active_discharge_reg = _dreg, \
.active_discharge_mask = _dval, \
@@ -123,7 +127,16 @@ static const unsigned int mp5416_I_limits2[] = {
2200000, 3200000, 4200000, 5200000
};
-static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
+/*
+ * DVS ramp rate BUCK1 to BUCK4
+ * 00: 32mV/us
+ * 01: 16mV/us
+ * 10: 8mV/us
+ * 11: 4mV/us
+ */
+static const unsigned int mp5416_buck_ramp_table[] = {
+ 32000, 16000, 8000, 4000
+};
static const struct regulator_ops mp5416_ldo_ops = {
.enable = regulator_enable_regmap,
@@ -147,7 +160,7 @@ static const struct regulator_ops mp5416_buck_ops = {
.set_active_discharge = regulator_set_active_discharge_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
.set_current_limit = regulator_set_current_limit_regmap,
- .set_ramp_delay = mp5416_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
@@ -161,33 +174,6 @@ static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
MP5416LDO("ldo4", 4, BIT(1)),
};
-/*
- * DVS ramp rate BUCK1 to BUCK4
- * 00: 32mV/us
- * 01: 16mV/us
- * 10: 8mV/us
- * 11: 4mV/us
- */
-static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- unsigned int ramp_val;
-
- if (ramp_delay > 32000 || ramp_delay < 0)
- return -EINVAL;
-
- if (ramp_delay <= 4000)
- ramp_val = 3;
- else if (ramp_delay <= 8000)
- ramp_val = 2;
- else if (ramp_delay <= 16000)
- ramp_val = 1;
- else
- ramp_val = 0;
-
- return regmap_update_bits(rdev->regmap, MP5416_REG_CTL2,
- MP5416_MASK_DVS_SLEWRATE, ramp_val << 6);
-}
-
static int mp5416_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
diff --git a/drivers/regulator/mp886x.c b/drivers/regulator/mp886x.c
index a84fd74081de..8ad4722eca4b 100644
--- a/drivers/regulator/mp886x.c
+++ b/drivers/regulator/mp886x.c
@@ -26,7 +26,7 @@
struct mp886x_cfg_info {
const struct regulator_ops *rops;
- const int slew_rates[8];
+ const unsigned int slew_rates[8];
const int switch_freq[4];
const u8 fs_reg;
const u8 fs_shift;
@@ -42,28 +42,6 @@ struct mp886x_device_info {
unsigned int sel;
};
-static int mp886x_set_ramp(struct regulator_dev *rdev, int ramp)
-{
- struct mp886x_device_info *di = rdev_get_drvdata(rdev);
- const struct mp886x_cfg_info *ci = di->ci;
- int reg = -1, i;
-
- for (i = 0; i < ARRAY_SIZE(ci->slew_rates); i++) {
- if (ramp <= ci->slew_rates[i])
- reg = i;
- else
- break;
- }
-
- if (reg < 0) {
- dev_err(di->dev, "unsupported ramp value %d\n", ramp);
- return -EINVAL;
- }
-
- return regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1,
- MP886X_SLEW_MASK, reg << MP886X_SLEW_SHIFT);
-}
-
static void mp886x_set_switch_freq(struct mp886x_device_info *di,
struct regmap *regmap,
u32 freq)
@@ -169,7 +147,7 @@ static const struct regulator_ops mp8869_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = mp886x_set_mode,
.get_mode = mp886x_get_mode,
- .set_ramp_delay = mp886x_set_ramp,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static const struct mp886x_cfg_info mp8869_ci = {
@@ -248,7 +226,7 @@ static const struct regulator_ops mp8867_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = mp886x_set_mode,
.get_mode = mp886x_get_mode,
- .set_ramp_delay = mp886x_set_ramp,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static const struct mp886x_cfg_info mp8867_ci = {
@@ -290,6 +268,10 @@ static int mp886x_regulator_register(struct mp886x_device_info *di,
rdesc->uV_step = 10000;
rdesc->vsel_reg = MP886X_VSEL;
rdesc->vsel_mask = 0x3f;
+ rdesc->ramp_reg = MP886X_SYSCNTLREG1;
+ rdesc->ramp_mask = MP886X_SLEW_MASK;
+ rdesc->ramp_delay_table = di->ci->slew_rates;
+ rdesc->n_ramp_values = ARRAY_SIZE(di->ci->slew_rates);
rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config);
diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
index 9edc34981ee0..284c229e1aa4 100644
--- a/drivers/regulator/mt6315-regulator.c
+++ b/drivers/regulator/mt6315-regulator.c
@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
};
-static unsigned int mt6315_map_mode(u32 mode)
+static unsigned int mt6315_map_mode(unsigned int mode)
{
switch (mode) {
case MT6315_BUCK_MODE_AUTO:
@@ -84,7 +84,7 @@ static unsigned int mt6315_regulator_get_mode(struct regulator_dev *rdev)
modeset_mask = init->modeset_mask[rdev_get_id(rdev)];
ret = regmap_read(rdev->regmap, MT6315_BUCK_TOP_4PHASE_ANA_CON42, &regval);
if (ret != 0) {
- dev_notice(&rdev->dev, "Failed to get mode: %d\n", ret);
+ dev_err(&rdev->dev, "Failed to get mode: %d\n", ret);
return ret;
}
@@ -93,7 +93,7 @@ static unsigned int mt6315_regulator_get_mode(struct regulator_dev *rdev)
ret = regmap_read(rdev->regmap, MT6315_BUCK_TOP_CON1, &regval);
if (ret != 0) {
- dev_notice(&rdev->dev, "Failed to get lp mode: %d\n", ret);
+ dev_err(&rdev->dev, "Failed to get lp mode: %d\n", ret);
return ret;
}
@@ -147,12 +147,12 @@ static int mt6315_regulator_set_mode(struct regulator_dev *rdev,
break;
default:
ret = -EINVAL;
- dev_notice(&rdev->dev, "Unsupported mode: %d\n", mode);
+ dev_err(&rdev->dev, "Unsupported mode: %d\n", mode);
break;
}
if (ret != 0) {
- dev_notice(&rdev->dev, "Failed to set mode: %d\n", ret);
+ dev_err(&rdev->dev, "Failed to set mode: %d\n", ret);
return ret;
}
@@ -168,7 +168,7 @@ static int mt6315_get_status(struct regulator_dev *rdev)
info = container_of(rdev->desc, struct mt6315_regulator_info, desc);
ret = regmap_read(rdev->regmap, info->status_reg, &regval);
if (ret < 0) {
- dev_notice(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
return ret;
}
@@ -223,8 +223,8 @@ static int mt6315_regulator_probe(struct spmi_device *pdev)
int i;
regmap = devm_regmap_init_spmi_ext(pdev, &mt6315_regmap_config);
- if (!regmap)
- return -ENODEV;
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
chip = devm_kzalloc(dev, sizeof(struct mt6315_chip), GFP_KERNEL);
if (!chip)
@@ -260,8 +260,9 @@ static int mt6315_regulator_probe(struct spmi_device *pdev)
config.driver_data = init_data;
rdev = devm_regulator_register(dev, &mt6315_regulators[i].desc, &config);
if (IS_ERR(rdev)) {
- dev_notice(dev, "Failed to register %s\n", mt6315_regulators[i].desc.name);
- continue;
+ dev_err(dev, "Failed to register %s\n",
+ mt6315_regulators[i].desc.name);
+ return PTR_ERR(rdev);
}
}
@@ -279,7 +280,7 @@ static void mt6315_regulator_shutdown(struct spmi_device *pdev)
ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY, 0);
ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY_H, 0);
if (ret < 0)
- dev_notice(&pdev->dev, "[%#x] Failed to enable power off sequence. %d\n",
+ dev_err(&pdev->dev, "[%#x] Failed to enable power off sequence. %d\n",
pdev->usid, ret);
}
diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
index 13cb6ac9a892..0d35be4e0e5a 100644
--- a/drivers/regulator/mt6358-regulator.c
+++ b/drivers/regulator/mt6358-regulator.c
@@ -153,50 +153,50 @@ static const struct linear_range buck_volt_range4[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500),
};
-static const u32 vdram2_voltages[] = {
+static const unsigned int vdram2_voltages[] = {
600000, 1800000,
};
-static const u32 vsim_voltages[] = {
+static const unsigned int vsim_voltages[] = {
1700000, 1800000, 2700000, 3000000, 3100000,
};
-static const u32 vibr_voltages[] = {
+static const unsigned int vibr_voltages[] = {
1200000, 1300000, 1500000, 1800000,
2000000, 2800000, 3000000, 3300000,
};
-static const u32 vusb_voltages[] = {
+static const unsigned int vusb_voltages[] = {
3000000, 3100000,
};
-static const u32 vcamd_voltages[] = {
+static const unsigned int vcamd_voltages[] = {
900000, 1000000, 1100000, 1200000,
1300000, 1500000, 1800000,
};
-static const u32 vefuse_voltages[] = {
+static const unsigned int vefuse_voltages[] = {
1700000, 1800000, 1900000,
};
-static const u32 vmch_vemc_voltages[] = {
+static const unsigned int vmch_vemc_voltages[] = {
2900000, 3000000, 3300000,
};
-static const u32 vcama_voltages[] = {
+static const unsigned int vcama_voltages[] = {
1800000, 2500000, 2700000,
2800000, 2900000, 3000000,
};
-static const u32 vcn33_bt_wifi_voltages[] = {
+static const unsigned int vcn33_bt_wifi_voltages[] = {
3300000, 3400000, 3500000,
};
-static const u32 vmc_voltages[] = {
+static const unsigned int vmc_voltages[] = {
1800000, 2900000, 3000000, 3300000,
};
-static const u32 vldo28_voltages[] = {
+static const unsigned int vldo28_voltages[] = {
2800000, 3000000,
};
@@ -457,7 +457,7 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
MT6358_REG_FIXED("ldo_vaud28", VAUD28,
MT6358_LDO_VAUD28_CON0, 0, 2800000),
MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
- MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0x10, 0),
+ MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf, 0),
MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
diff --git a/drivers/regulator/mt6359-regulator.c b/drivers/regulator/mt6359-regulator.c
new file mode 100644
index 000000000000..7ce0bd377a08
--- /dev/null
+++ b/drivers/regulator/mt6359-regulator.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2021 MediaTek Inc.
+
+#include <linux/platform_device.h>
+#include <linux/mfd/mt6359/registers.h>
+#include <linux/mfd/mt6359p/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6359-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6359_BUCK_MODE_AUTO 0
+#define MT6359_BUCK_MODE_FORCE_PWM 1
+#define MT6359_BUCK_MODE_NORMAL 0
+#define MT6359_BUCK_MODE_LP 2
+
+/*
+ * MT6359 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @status_reg: for query status of regulators.
+ * @qi: Mask for query enable signal status of regulators.
+ * @modeset_reg: for operating AUTO/PWM mode register.
+ * @modeset_mask: MASK for operating modeset register.
+ * @modeset_shift: SHIFT for operating modeset register.
+ */
+struct mt6359_regulator_info {
+ struct regulator_desc desc;
+ u32 status_reg;
+ u32 qi;
+ u32 modeset_reg;
+ u32 modeset_mask;
+ u32 modeset_shift;
+ u32 lp_mode_reg;
+ u32 lp_mode_mask;
+ u32 lp_mode_shift;
+};
+
+#define MT6359_BUCK(match, _name, min, max, step, \
+ _enable_reg, _status_reg, \
+ _vsel_reg, _vsel_mask, \
+ _lp_mode_reg, _lp_mode_shift, \
+ _modeset_reg, _modeset_shift) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &mt6359_volt_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .uV_step = (step), \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .min_uV = (min), \
+ .vsel_reg = _vsel_reg, \
+ .vsel_mask = _vsel_mask, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(0), \
+ .of_map_mode = mt6359_map_mode, \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+ .lp_mode_reg = _lp_mode_reg, \
+ .lp_mode_mask = BIT(_lp_mode_shift), \
+ .lp_mode_shift = _lp_mode_shift, \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = BIT(_modeset_shift), \
+ .modeset_shift = _modeset_shift \
+}
+
+#define MT6359_LDO_LINEAR(match, _name, min, max, step, \
+ _enable_reg, _status_reg, _vsel_reg, _vsel_mask) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &mt6359_volt_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .uV_step = (step), \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .min_uV = (min), \
+ .vsel_reg = _vsel_reg, \
+ .vsel_mask = _vsel_mask, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+}
+
+#define MT6359_LDO(match, _name, _volt_table, \
+ _enable_reg, _enable_mask, _status_reg, \
+ _vsel_reg, _vsel_mask, _en_delay) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &mt6359_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(_volt_table), \
+ .volt_table = _volt_table, \
+ .vsel_reg = _vsel_reg, \
+ .vsel_mask = _vsel_mask, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(_enable_mask), \
+ .enable_time = _en_delay, \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+}
+
+#define MT6359_REG_FIXED(match, _name, _enable_reg, \
+ _status_reg, _fixed_volt) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &mt6359_volt_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(0), \
+ .fixed_uV = (_fixed_volt), \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+}
+
+#define MT6359P_LDO1(match, _name, _ops, _volt_table, \
+ _enable_reg, _enable_mask, _status_reg, \
+ _vsel_reg, _vsel_mask) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(_volt_table), \
+ .volt_table = _volt_table, \
+ .vsel_reg = _vsel_reg, \
+ .vsel_mask = _vsel_mask, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(_enable_mask), \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+}
+
+static const unsigned int vsim1_voltages[] = {
+ 0, 0, 0, 1700000, 1800000, 0, 0, 0, 2700000, 0, 0, 3000000, 3100000,
+};
+
+static const unsigned int vibr_voltages[] = {
+ 1200000, 1300000, 1500000, 0, 1800000, 2000000, 0, 0, 2700000, 2800000,
+ 0, 3000000, 0, 3300000,
+};
+
+static const unsigned int vrf12_voltages[] = {
+ 0, 0, 1100000, 1200000, 1300000,
+};
+
+static const unsigned int volt18_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1700000, 1800000, 1900000,
+};
+
+static const unsigned int vcn13_voltages[] = {
+ 900000, 1000000, 0, 1200000, 1300000,
+};
+
+static const unsigned int vcn33_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2800000, 0, 0, 0, 3300000, 3400000, 3500000,
+};
+
+static const unsigned int vefuse_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1700000, 1800000, 1900000, 2000000,
+};
+
+static const unsigned int vxo22_voltages[] = {
+ 1800000, 0, 0, 0, 2200000,
+};
+
+static const unsigned int vrfck_voltages[] = {
+ 0, 0, 1500000, 0, 0, 0, 0, 1600000, 0, 0, 0, 0, 1700000,
+};
+
+static const unsigned int vrfck_voltages_1[] = {
+ 1240000, 1600000,
+};
+
+static const unsigned int vio28_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2800000, 2900000, 3000000, 3100000, 3300000,
+};
+
+static const unsigned int vemc_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2900000, 3000000, 0, 3300000,
+};
+
+static const unsigned int vemc_voltages_1[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 2500000, 2800000, 2900000, 3000000, 3100000,
+ 3300000,
+};
+
+static const unsigned int va12_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 1200000, 1300000,
+};
+
+static const unsigned int va09_voltages[] = {
+ 0, 0, 800000, 900000, 0, 0, 1200000,
+};
+
+static const unsigned int vrf18_voltages[] = {
+ 0, 0, 0, 0, 0, 1700000, 1800000, 1810000,
+};
+
+static const unsigned int vbbck_voltages[] = {
+ 0, 0, 0, 0, 1100000, 0, 0, 0, 1150000, 0, 0, 0, 1200000,
+};
+
+static const unsigned int vsim2_voltages[] = {
+ 0, 0, 0, 1700000, 1800000, 0, 0, 0, 2700000, 0, 0, 3000000, 3100000,
+};
+
+static inline unsigned int mt6359_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case MT6359_BUCK_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case MT6359_BUCK_MODE_FORCE_PWM:
+ return REGULATOR_MODE_FAST;
+ case MT6359_BUCK_MODE_LP:
+ return REGULATOR_MODE_IDLE;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static int mt6359_get_status(struct regulator_dev *rdev)
+{
+ int ret;
+ u32 regval;
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+
+ ret = regmap_read(rdev->regmap, info->status_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ if (regval & info->qi)
+ return REGULATOR_STATUS_ON;
+ else
+ return REGULATOR_STATUS_OFF;
+}
+
+static unsigned int mt6359_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret, regval;
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to get mt6359 buck mode: %d\n", ret);
+ return ret;
+ }
+
+ if ((regval & info->modeset_mask) >> info->modeset_shift ==
+ MT6359_BUCK_MODE_FORCE_PWM)
+ return REGULATOR_MODE_FAST;
+
+ ret = regmap_read(rdev->regmap, info->lp_mode_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to get mt6359 buck lp mode: %d\n", ret);
+ return ret;
+ }
+
+ if (regval & info->lp_mode_mask)
+ return REGULATOR_MODE_IDLE;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret = 0, val;
+ int curr_mode;
+
+ curr_mode = mt6359_regulator_get_mode(rdev);
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = MT6359_BUCK_MODE_FORCE_PWM;
+ val <<= info->modeset_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ info->modeset_reg,
+ info->modeset_mask,
+ val);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ if (curr_mode == REGULATOR_MODE_FAST) {
+ val = MT6359_BUCK_MODE_AUTO;
+ val <<= info->modeset_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ info->modeset_reg,
+ info->modeset_mask,
+ val);
+ } else if (curr_mode == REGULATOR_MODE_IDLE) {
+ val = MT6359_BUCK_MODE_NORMAL;
+ val <<= info->lp_mode_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ info->lp_mode_reg,
+ info->lp_mode_mask,
+ val);
+ udelay(100);
+ }
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = MT6359_BUCK_MODE_LP >> 1;
+ val <<= info->lp_mode_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ info->lp_mode_reg,
+ info->lp_mode_mask,
+ val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to set mt6359 buck mode: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int mt6359p_vemc_set_voltage_sel(struct regulator_dev *rdev,
+ u32 sel)
+{
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+ u32 val = 0;
+
+ sel <<= ffs(info->desc.vsel_mask) - 1;
+ ret = regmap_write(rdev->regmap, MT6359P_TMA_KEY_ADDR, TMA_KEY);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(rdev->regmap, MT6359P_VM_MODE_ADDR, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ /* If HW trapping is 0, use VEMC_VOSEL_0 */
+ ret = regmap_update_bits(rdev->regmap,
+ info->desc.vsel_reg,
+ info->desc.vsel_mask, sel);
+ break;
+ case 1:
+ /* If HW trapping is 1, use VEMC_VOSEL_1 */
+ ret = regmap_update_bits(rdev->regmap,
+ info->desc.vsel_reg + 0x2,
+ info->desc.vsel_mask, sel);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rdev->regmap, MT6359P_TMA_KEY_ADDR, 0);
+ return ret;
+}
+
+static int mt6359p_vemc_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+ u32 val = 0;
+
+ ret = regmap_read(rdev->regmap, MT6359P_VM_MODE_ADDR, &val);
+ if (ret)
+ return ret;
+ switch (val) {
+ case 0:
+ /* If HW trapping is 0, use VEMC_VOSEL_0 */
+ ret = regmap_read(rdev->regmap,
+ info->desc.vsel_reg, &val);
+ break;
+ case 1:
+ /* If HW trapping is 1, use VEMC_VOSEL_1 */
+ ret = regmap_read(rdev->regmap,
+ info->desc.vsel_reg + 0x2, &val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ return ret;
+
+ val &= info->desc.vsel_mask;
+ val >>= ffs(info->desc.vsel_mask) - 1;
+
+ return val;
+}
+
+static const struct regulator_ops mt6359_volt_linear_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6359_get_status,
+ .set_mode = mt6359_regulator_set_mode,
+ .get_mode = mt6359_regulator_get_mode,
+};
+
+static const struct regulator_ops mt6359_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6359_get_status,
+};
+
+static const struct regulator_ops mt6359_volt_fixed_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6359_get_status,
+};
+
+static const struct regulator_ops mt6359p_vemc_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = mt6359p_vemc_set_voltage_sel,
+ .get_voltage_sel = mt6359p_vemc_get_voltage_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6359_get_status,
+};
+
+/* The array is indexed by id(MT6359_ID_XXX) */
+static struct mt6359_regulator_info mt6359_regulators[] = {
+ MT6359_BUCK("buck_vs1", VS1, 800000, 2200000, 12500,
+ MT6359_RG_BUCK_VS1_EN_ADDR,
+ MT6359_DA_VS1_EN_ADDR, MT6359_RG_BUCK_VS1_VOSEL_ADDR,
+ MT6359_RG_BUCK_VS1_VOSEL_MASK <<
+ MT6359_RG_BUCK_VS1_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VS1_LP_ADDR, MT6359_RG_BUCK_VS1_LP_SHIFT,
+ MT6359_RG_VS1_FPWM_ADDR, MT6359_RG_VS1_FPWM_SHIFT),
+ MT6359_BUCK("buck_vgpu11", VGPU11, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VGPU11_EN_ADDR,
+ MT6359_DA_VGPU11_EN_ADDR, MT6359_RG_BUCK_VGPU11_VOSEL_ADDR,
+ MT6359_RG_BUCK_VGPU11_VOSEL_MASK <<
+ MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VGPU11_LP_ADDR,
+ MT6359_RG_BUCK_VGPU11_LP_SHIFT,
+ MT6359_RG_VGPU11_FCCM_ADDR, MT6359_RG_VGPU11_FCCM_SHIFT),
+ MT6359_BUCK("buck_vmodem", VMODEM, 400000, 1100000, 6250,
+ MT6359_RG_BUCK_VMODEM_EN_ADDR,
+ MT6359_DA_VMODEM_EN_ADDR, MT6359_RG_BUCK_VMODEM_VOSEL_ADDR,
+ MT6359_RG_BUCK_VMODEM_VOSEL_MASK <<
+ MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VMODEM_LP_ADDR,
+ MT6359_RG_BUCK_VMODEM_LP_SHIFT,
+ MT6359_RG_VMODEM_FCCM_ADDR, MT6359_RG_VMODEM_FCCM_SHIFT),
+ MT6359_BUCK("buck_vpu", VPU, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPU_EN_ADDR,
+ MT6359_DA_VPU_EN_ADDR, MT6359_RG_BUCK_VPU_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPU_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPU_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPU_LP_ADDR, MT6359_RG_BUCK_VPU_LP_SHIFT,
+ MT6359_RG_VPU_FCCM_ADDR, MT6359_RG_VPU_FCCM_SHIFT),
+ MT6359_BUCK("buck_vcore", VCORE, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VCORE_EN_ADDR,
+ MT6359_DA_VCORE_EN_ADDR, MT6359_RG_BUCK_VCORE_VOSEL_ADDR,
+ MT6359_RG_BUCK_VCORE_VOSEL_MASK <<
+ MT6359_RG_BUCK_VCORE_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VCORE_LP_ADDR, MT6359_RG_BUCK_VCORE_LP_SHIFT,
+ MT6359_RG_VCORE_FCCM_ADDR, MT6359_RG_VCORE_FCCM_SHIFT),
+ MT6359_BUCK("buck_vs2", VS2, 800000, 1600000, 12500,
+ MT6359_RG_BUCK_VS2_EN_ADDR,
+ MT6359_DA_VS2_EN_ADDR, MT6359_RG_BUCK_VS2_VOSEL_ADDR,
+ MT6359_RG_BUCK_VS2_VOSEL_MASK <<
+ MT6359_RG_BUCK_VS2_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VS2_LP_ADDR, MT6359_RG_BUCK_VS2_LP_SHIFT,
+ MT6359_RG_VS2_FPWM_ADDR, MT6359_RG_VS2_FPWM_SHIFT),
+ MT6359_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+ MT6359_RG_BUCK_VPA_EN_ADDR,
+ MT6359_DA_VPA_EN_ADDR, MT6359_RG_BUCK_VPA_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPA_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPA_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPA_LP_ADDR, MT6359_RG_BUCK_VPA_LP_SHIFT,
+ MT6359_RG_VPA_MODESET_ADDR, MT6359_RG_VPA_MODESET_SHIFT),
+ MT6359_BUCK("buck_vproc2", VPROC2, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPROC2_EN_ADDR,
+ MT6359_DA_VPROC2_EN_ADDR, MT6359_RG_BUCK_VPROC2_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPROC2_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPROC2_LP_ADDR,
+ MT6359_RG_BUCK_VPROC2_LP_SHIFT,
+ MT6359_RG_VPROC2_FCCM_ADDR, MT6359_RG_VPROC2_FCCM_SHIFT),
+ MT6359_BUCK("buck_vproc1", VPROC1, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPROC1_EN_ADDR,
+ MT6359_DA_VPROC1_EN_ADDR, MT6359_RG_BUCK_VPROC1_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPROC1_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPROC1_LP_ADDR,
+ MT6359_RG_BUCK_VPROC1_LP_SHIFT,
+ MT6359_RG_VPROC1_FCCM_ADDR, MT6359_RG_VPROC1_FCCM_SHIFT),
+ MT6359_BUCK("buck_vcore_sshub", VCORE_SSHUB, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR,
+ MT6359_DA_VCORE_EN_ADDR,
+ MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR,
+ MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK <<
+ MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VCORE_LP_ADDR, MT6359_RG_BUCK_VCORE_LP_SHIFT,
+ MT6359_RG_VCORE_FCCM_ADDR, MT6359_RG_VCORE_FCCM_SHIFT),
+ MT6359_REG_FIXED("ldo_vaud18", VAUD18, MT6359_RG_LDO_VAUD18_EN_ADDR,
+ MT6359_DA_VAUD18_B_EN_ADDR, 1800000),
+ MT6359_LDO("ldo_vsim1", VSIM1, vsim1_voltages,
+ MT6359_RG_LDO_VSIM1_EN_ADDR, MT6359_RG_LDO_VSIM1_EN_SHIFT,
+ MT6359_DA_VSIM1_B_EN_ADDR, MT6359_RG_VSIM1_VOSEL_ADDR,
+ MT6359_RG_VSIM1_VOSEL_MASK << MT6359_RG_VSIM1_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO("ldo_vibr", VIBR, vibr_voltages,
+ MT6359_RG_LDO_VIBR_EN_ADDR, MT6359_RG_LDO_VIBR_EN_SHIFT,
+ MT6359_DA_VIBR_B_EN_ADDR, MT6359_RG_VIBR_VOSEL_ADDR,
+ MT6359_RG_VIBR_VOSEL_MASK << MT6359_RG_VIBR_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vrf12", VRF12, vrf12_voltages,
+ MT6359_RG_LDO_VRF12_EN_ADDR, MT6359_RG_LDO_VRF12_EN_SHIFT,
+ MT6359_DA_VRF12_B_EN_ADDR, MT6359_RG_VRF12_VOSEL_ADDR,
+ MT6359_RG_VRF12_VOSEL_MASK << MT6359_RG_VRF12_VOSEL_SHIFT,
+ 120),
+ MT6359_REG_FIXED("ldo_vusb", VUSB, MT6359_RG_LDO_VUSB_EN_0_ADDR,
+ MT6359_DA_VUSB_B_EN_ADDR, 3000000),
+ MT6359_LDO_LINEAR("ldo_vsram_proc2", VSRAM_PROC2, 500000, 1293750, 6250,
+ MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR,
+ MT6359_DA_VSRAM_PROC2_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vio18", VIO18, volt18_voltages,
+ MT6359_RG_LDO_VIO18_EN_ADDR, MT6359_RG_LDO_VIO18_EN_SHIFT,
+ MT6359_DA_VIO18_B_EN_ADDR, MT6359_RG_VIO18_VOSEL_ADDR,
+ MT6359_RG_VIO18_VOSEL_MASK << MT6359_RG_VIO18_VOSEL_SHIFT,
+ 960),
+ MT6359_LDO("ldo_vcamio", VCAMIO, volt18_voltages,
+ MT6359_RG_LDO_VCAMIO_EN_ADDR, MT6359_RG_LDO_VCAMIO_EN_SHIFT,
+ MT6359_DA_VCAMIO_B_EN_ADDR, MT6359_RG_VCAMIO_VOSEL_ADDR,
+ MT6359_RG_VCAMIO_VOSEL_MASK << MT6359_RG_VCAMIO_VOSEL_SHIFT,
+ 1290),
+ MT6359_REG_FIXED("ldo_vcn18", VCN18, MT6359_RG_LDO_VCN18_EN_ADDR,
+ MT6359_DA_VCN18_B_EN_ADDR, 1800000),
+ MT6359_REG_FIXED("ldo_vfe28", VFE28, MT6359_RG_LDO_VFE28_EN_ADDR,
+ MT6359_DA_VFE28_B_EN_ADDR, 2800000),
+ MT6359_LDO("ldo_vcn13", VCN13, vcn13_voltages,
+ MT6359_RG_LDO_VCN13_EN_ADDR, MT6359_RG_LDO_VCN13_EN_SHIFT,
+ MT6359_DA_VCN13_B_EN_ADDR, MT6359_RG_VCN13_VOSEL_ADDR,
+ MT6359_RG_VCN13_VOSEL_MASK << MT6359_RG_VCN13_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vcn33_1_bt", VCN33_1_BT, vcn33_voltages,
+ MT6359_RG_LDO_VCN33_1_EN_0_ADDR,
+ MT6359_RG_LDO_VCN33_1_EN_0_SHIFT,
+ MT6359_DA_VCN33_1_B_EN_ADDR, MT6359_RG_VCN33_1_VOSEL_ADDR,
+ MT6359_RG_VCN33_1_VOSEL_MASK <<
+ MT6359_RG_VCN33_1_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_vcn33_1_wifi", VCN33_1_WIFI, vcn33_voltages,
+ MT6359_RG_LDO_VCN33_1_EN_1_ADDR,
+ MT6359_RG_LDO_VCN33_1_EN_1_SHIFT,
+ MT6359_DA_VCN33_1_B_EN_ADDR, MT6359_RG_VCN33_1_VOSEL_ADDR,
+ MT6359_RG_VCN33_1_VOSEL_MASK <<
+ MT6359_RG_VCN33_1_VOSEL_SHIFT, 240),
+ MT6359_REG_FIXED("ldo_vaux18", VAUX18, MT6359_RG_LDO_VAUX18_EN_ADDR,
+ MT6359_DA_VAUX18_B_EN_ADDR, 1800000),
+ MT6359_LDO_LINEAR("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750,
+ 6250,
+ MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR,
+ MT6359_DA_VSRAM_OTHERS_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vefuse", VEFUSE, vefuse_voltages,
+ MT6359_RG_LDO_VEFUSE_EN_ADDR, MT6359_RG_LDO_VEFUSE_EN_SHIFT,
+ MT6359_DA_VEFUSE_B_EN_ADDR, MT6359_RG_VEFUSE_VOSEL_ADDR,
+ MT6359_RG_VEFUSE_VOSEL_MASK << MT6359_RG_VEFUSE_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vxo22", VXO22, vxo22_voltages,
+ MT6359_RG_LDO_VXO22_EN_ADDR, MT6359_RG_LDO_VXO22_EN_SHIFT,
+ MT6359_DA_VXO22_B_EN_ADDR, MT6359_RG_VXO22_VOSEL_ADDR,
+ MT6359_RG_VXO22_VOSEL_MASK << MT6359_RG_VXO22_VOSEL_SHIFT,
+ 120),
+ MT6359_LDO("ldo_vrfck", VRFCK, vrfck_voltages,
+ MT6359_RG_LDO_VRFCK_EN_ADDR, MT6359_RG_LDO_VRFCK_EN_SHIFT,
+ MT6359_DA_VRFCK_B_EN_ADDR, MT6359_RG_VRFCK_VOSEL_ADDR,
+ MT6359_RG_VRFCK_VOSEL_MASK << MT6359_RG_VRFCK_VOSEL_SHIFT,
+ 480),
+ MT6359_REG_FIXED("ldo_vbif28", VBIF28, MT6359_RG_LDO_VBIF28_EN_ADDR,
+ MT6359_DA_VBIF28_B_EN_ADDR, 2800000),
+ MT6359_LDO("ldo_vio28", VIO28, vio28_voltages,
+ MT6359_RG_LDO_VIO28_EN_ADDR, MT6359_RG_LDO_VIO28_EN_SHIFT,
+ MT6359_DA_VIO28_B_EN_ADDR, MT6359_RG_VIO28_VOSEL_ADDR,
+ MT6359_RG_VIO28_VOSEL_MASK << MT6359_RG_VIO28_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vemc", VEMC, vemc_voltages,
+ MT6359_RG_LDO_VEMC_EN_ADDR, MT6359_RG_LDO_VEMC_EN_SHIFT,
+ MT6359_DA_VEMC_B_EN_ADDR, MT6359_RG_VEMC_VOSEL_ADDR,
+ MT6359_RG_VEMC_VOSEL_MASK << MT6359_RG_VEMC_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vcn33_2_bt", VCN33_2_BT, vcn33_voltages,
+ MT6359_RG_LDO_VCN33_2_EN_0_ADDR,
+ MT6359_RG_LDO_VCN33_2_EN_0_SHIFT,
+ MT6359_DA_VCN33_2_B_EN_ADDR, MT6359_RG_VCN33_2_VOSEL_ADDR,
+ MT6359_RG_VCN33_2_VOSEL_MASK <<
+ MT6359_RG_VCN33_2_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_vcn33_2_wifi", VCN33_2_WIFI, vcn33_voltages,
+ MT6359_RG_LDO_VCN33_2_EN_1_ADDR,
+ MT6359_RG_LDO_VCN33_2_EN_1_SHIFT,
+ MT6359_DA_VCN33_2_B_EN_ADDR, MT6359_RG_VCN33_2_VOSEL_ADDR,
+ MT6359_RG_VCN33_2_VOSEL_MASK <<
+ MT6359_RG_VCN33_2_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_va12", VA12, va12_voltages,
+ MT6359_RG_LDO_VA12_EN_ADDR, MT6359_RG_LDO_VA12_EN_SHIFT,
+ MT6359_DA_VA12_B_EN_ADDR, MT6359_RG_VA12_VOSEL_ADDR,
+ MT6359_RG_VA12_VOSEL_MASK << MT6359_RG_VA12_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_va09", VA09, va09_voltages,
+ MT6359_RG_LDO_VA09_EN_ADDR, MT6359_RG_LDO_VA09_EN_SHIFT,
+ MT6359_DA_VA09_B_EN_ADDR, MT6359_RG_VA09_VOSEL_ADDR,
+ MT6359_RG_VA09_VOSEL_MASK << MT6359_RG_VA09_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vrf18", VRF18, vrf18_voltages,
+ MT6359_RG_LDO_VRF18_EN_ADDR, MT6359_RG_LDO_VRF18_EN_SHIFT,
+ MT6359_DA_VRF18_B_EN_ADDR, MT6359_RG_VRF18_VOSEL_ADDR,
+ MT6359_RG_VRF18_VOSEL_MASK << MT6359_RG_VRF18_VOSEL_SHIFT,
+ 120),
+ MT6359_LDO_LINEAR("ldo_vsram_md", VSRAM_MD, 500000, 1100000, 6250,
+ MT6359_RG_LDO_VSRAM_MD_EN_ADDR,
+ MT6359_DA_VSRAM_MD_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vufs", VUFS, volt18_voltages,
+ MT6359_RG_LDO_VUFS_EN_ADDR, MT6359_RG_LDO_VUFS_EN_SHIFT,
+ MT6359_DA_VUFS_B_EN_ADDR, MT6359_RG_VUFS_VOSEL_ADDR,
+ MT6359_RG_VUFS_VOSEL_MASK << MT6359_RG_VUFS_VOSEL_SHIFT,
+ 1920),
+ MT6359_LDO("ldo_vm18", VM18, volt18_voltages,
+ MT6359_RG_LDO_VM18_EN_ADDR, MT6359_RG_LDO_VM18_EN_SHIFT,
+ MT6359_DA_VM18_B_EN_ADDR, MT6359_RG_VM18_VOSEL_ADDR,
+ MT6359_RG_VM18_VOSEL_MASK << MT6359_RG_VM18_VOSEL_SHIFT,
+ 1920),
+ MT6359_LDO("ldo_vbbck", VBBCK, vbbck_voltages,
+ MT6359_RG_LDO_VBBCK_EN_ADDR, MT6359_RG_LDO_VBBCK_EN_SHIFT,
+ MT6359_DA_VBBCK_B_EN_ADDR, MT6359_RG_VBBCK_VOSEL_ADDR,
+ MT6359_RG_VBBCK_VOSEL_MASK << MT6359_RG_VBBCK_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO_LINEAR("ldo_vsram_proc1", VSRAM_PROC1, 500000, 1293750, 6250,
+ MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR,
+ MT6359_DA_VSRAM_PROC1_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vsim2", VSIM2, vsim2_voltages,
+ MT6359_RG_LDO_VSIM2_EN_ADDR, MT6359_RG_LDO_VSIM2_EN_SHIFT,
+ MT6359_DA_VSIM2_B_EN_ADDR, MT6359_RG_VSIM2_VOSEL_ADDR,
+ MT6359_RG_VSIM2_VOSEL_MASK << MT6359_RG_VSIM2_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO_LINEAR("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB,
+ 500000, 1293750, 6250,
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR,
+ MT6359_DA_VSRAM_OTHERS_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT),
+};
+
+static struct mt6359_regulator_info mt6359p_regulators[] = {
+ MT6359_BUCK("buck_vs1", VS1, 800000, 2200000, 12500,
+ MT6359_RG_BUCK_VS1_EN_ADDR,
+ MT6359_DA_VS1_EN_ADDR, MT6359_RG_BUCK_VS1_VOSEL_ADDR,
+ MT6359_RG_BUCK_VS1_VOSEL_MASK <<
+ MT6359_RG_BUCK_VS1_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VS1_LP_ADDR, MT6359_RG_BUCK_VS1_LP_SHIFT,
+ MT6359_RG_VS1_FPWM_ADDR, MT6359_RG_VS1_FPWM_SHIFT),
+ MT6359_BUCK("buck_vgpu11", VGPU11, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VGPU11_EN_ADDR,
+ MT6359_DA_VGPU11_EN_ADDR, MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR,
+ MT6359_RG_BUCK_VGPU11_VOSEL_MASK <<
+ MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VGPU11_LP_ADDR,
+ MT6359_RG_BUCK_VGPU11_LP_SHIFT,
+ MT6359_RG_VGPU11_FCCM_ADDR, MT6359_RG_VGPU11_FCCM_SHIFT),
+ MT6359_BUCK("buck_vmodem", VMODEM, 400000, 1100000, 6250,
+ MT6359_RG_BUCK_VMODEM_EN_ADDR,
+ MT6359_DA_VMODEM_EN_ADDR, MT6359_RG_BUCK_VMODEM_VOSEL_ADDR,
+ MT6359_RG_BUCK_VMODEM_VOSEL_MASK <<
+ MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VMODEM_LP_ADDR,
+ MT6359_RG_BUCK_VMODEM_LP_SHIFT,
+ MT6359_RG_VMODEM_FCCM_ADDR, MT6359_RG_VMODEM_FCCM_SHIFT),
+ MT6359_BUCK("buck_vpu", VPU, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPU_EN_ADDR,
+ MT6359_DA_VPU_EN_ADDR, MT6359_RG_BUCK_VPU_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPU_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPU_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPU_LP_ADDR, MT6359_RG_BUCK_VPU_LP_SHIFT,
+ MT6359_RG_VPU_FCCM_ADDR, MT6359_RG_VPU_FCCM_SHIFT),
+ MT6359_BUCK("buck_vcore", VCORE, 506250, 1300000, 6250,
+ MT6359_RG_BUCK_VCORE_EN_ADDR,
+ MT6359_DA_VCORE_EN_ADDR, MT6359P_RG_BUCK_VCORE_VOSEL_ADDR,
+ MT6359_RG_BUCK_VCORE_VOSEL_MASK <<
+ MT6359_RG_BUCK_VCORE_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VCORE_LP_ADDR, MT6359_RG_BUCK_VCORE_LP_SHIFT,
+ MT6359_RG_VCORE_FCCM_ADDR, MT6359_RG_VCORE_FCCM_SHIFT),
+ MT6359_BUCK("buck_vs2", VS2, 800000, 1600000, 12500,
+ MT6359_RG_BUCK_VS2_EN_ADDR,
+ MT6359_DA_VS2_EN_ADDR, MT6359_RG_BUCK_VS2_VOSEL_ADDR,
+ MT6359_RG_BUCK_VS2_VOSEL_MASK <<
+ MT6359_RG_BUCK_VS2_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VS2_LP_ADDR, MT6359_RG_BUCK_VS2_LP_SHIFT,
+ MT6359_RG_VS2_FPWM_ADDR, MT6359_RG_VS2_FPWM_SHIFT),
+ MT6359_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+ MT6359_RG_BUCK_VPA_EN_ADDR,
+ MT6359_DA_VPA_EN_ADDR, MT6359_RG_BUCK_VPA_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPA_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPA_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPA_LP_ADDR, MT6359_RG_BUCK_VPA_LP_SHIFT,
+ MT6359_RG_VPA_MODESET_ADDR, MT6359_RG_VPA_MODESET_SHIFT),
+ MT6359_BUCK("buck_vproc2", VPROC2, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPROC2_EN_ADDR,
+ MT6359_DA_VPROC2_EN_ADDR, MT6359_RG_BUCK_VPROC2_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPROC2_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPROC2_LP_ADDR,
+ MT6359_RG_BUCK_VPROC2_LP_SHIFT,
+ MT6359_RG_VPROC2_FCCM_ADDR, MT6359_RG_VPROC2_FCCM_SHIFT),
+ MT6359_BUCK("buck_vproc1", VPROC1, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPROC1_EN_ADDR,
+ MT6359_DA_VPROC1_EN_ADDR, MT6359_RG_BUCK_VPROC1_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPROC1_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPROC1_LP_ADDR,
+ MT6359_RG_BUCK_VPROC1_LP_SHIFT,
+ MT6359_RG_VPROC1_FCCM_ADDR, MT6359_RG_VPROC1_FCCM_SHIFT),
+ MT6359_BUCK("buck_vgpu11_sshub", VGPU11_SSHUB, 400000, 1193750, 6250,
+ MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR,
+ MT6359_DA_VGPU11_EN_ADDR,
+ MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR,
+ MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK <<
+ MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VGPU11_LP_ADDR,
+ MT6359_RG_BUCK_VGPU11_LP_SHIFT,
+ MT6359_RG_VGPU11_FCCM_ADDR, MT6359_RG_VGPU11_FCCM_SHIFT),
+ MT6359_REG_FIXED("ldo_vaud18", VAUD18, MT6359P_RG_LDO_VAUD18_EN_ADDR,
+ MT6359P_DA_VAUD18_B_EN_ADDR, 1800000),
+ MT6359_LDO("ldo_vsim1", VSIM1, vsim1_voltages,
+ MT6359P_RG_LDO_VSIM1_EN_ADDR, MT6359P_RG_LDO_VSIM1_EN_SHIFT,
+ MT6359P_DA_VSIM1_B_EN_ADDR, MT6359P_RG_VSIM1_VOSEL_ADDR,
+ MT6359_RG_VSIM1_VOSEL_MASK << MT6359_RG_VSIM1_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO("ldo_vibr", VIBR, vibr_voltages,
+ MT6359P_RG_LDO_VIBR_EN_ADDR, MT6359P_RG_LDO_VIBR_EN_SHIFT,
+ MT6359P_DA_VIBR_B_EN_ADDR, MT6359P_RG_VIBR_VOSEL_ADDR,
+ MT6359_RG_VIBR_VOSEL_MASK << MT6359_RG_VIBR_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vrf12", VRF12, vrf12_voltages,
+ MT6359P_RG_LDO_VRF12_EN_ADDR, MT6359P_RG_LDO_VRF12_EN_SHIFT,
+ MT6359P_DA_VRF12_B_EN_ADDR, MT6359P_RG_VRF12_VOSEL_ADDR,
+ MT6359_RG_VRF12_VOSEL_MASK << MT6359_RG_VRF12_VOSEL_SHIFT,
+ 480),
+ MT6359_REG_FIXED("ldo_vusb", VUSB, MT6359P_RG_LDO_VUSB_EN_0_ADDR,
+ MT6359P_DA_VUSB_B_EN_ADDR, 3000000),
+ MT6359_LDO_LINEAR("ldo_vsram_proc2", VSRAM_PROC2, 500000, 1293750, 6250,
+ MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR,
+ MT6359P_DA_VSRAM_PROC2_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vio18", VIO18, volt18_voltages,
+ MT6359P_RG_LDO_VIO18_EN_ADDR, MT6359P_RG_LDO_VIO18_EN_SHIFT,
+ MT6359P_DA_VIO18_B_EN_ADDR, MT6359P_RG_VIO18_VOSEL_ADDR,
+ MT6359_RG_VIO18_VOSEL_MASK << MT6359_RG_VIO18_VOSEL_SHIFT,
+ 960),
+ MT6359_LDO("ldo_vcamio", VCAMIO, volt18_voltages,
+ MT6359P_RG_LDO_VCAMIO_EN_ADDR,
+ MT6359P_RG_LDO_VCAMIO_EN_SHIFT,
+ MT6359P_DA_VCAMIO_B_EN_ADDR, MT6359P_RG_VCAMIO_VOSEL_ADDR,
+ MT6359_RG_VCAMIO_VOSEL_MASK << MT6359_RG_VCAMIO_VOSEL_SHIFT,
+ 1290),
+ MT6359_REG_FIXED("ldo_vcn18", VCN18, MT6359P_RG_LDO_VCN18_EN_ADDR,
+ MT6359P_DA_VCN18_B_EN_ADDR, 1800000),
+ MT6359_REG_FIXED("ldo_vfe28", VFE28, MT6359P_RG_LDO_VFE28_EN_ADDR,
+ MT6359P_DA_VFE28_B_EN_ADDR, 2800000),
+ MT6359_LDO("ldo_vcn13", VCN13, vcn13_voltages,
+ MT6359P_RG_LDO_VCN13_EN_ADDR, MT6359P_RG_LDO_VCN13_EN_SHIFT,
+ MT6359P_DA_VCN13_B_EN_ADDR, MT6359P_RG_VCN13_VOSEL_ADDR,
+ MT6359_RG_VCN13_VOSEL_MASK << MT6359_RG_VCN13_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vcn33_1_bt", VCN33_1_BT, vcn33_voltages,
+ MT6359P_RG_LDO_VCN33_1_EN_0_ADDR,
+ MT6359_RG_LDO_VCN33_1_EN_0_SHIFT,
+ MT6359P_DA_VCN33_1_B_EN_ADDR, MT6359P_RG_VCN33_1_VOSEL_ADDR,
+ MT6359_RG_VCN33_1_VOSEL_MASK <<
+ MT6359_RG_VCN33_1_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_vcn33_1_wifi", VCN33_1_WIFI, vcn33_voltages,
+ MT6359P_RG_LDO_VCN33_1_EN_1_ADDR,
+ MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT,
+ MT6359P_DA_VCN33_1_B_EN_ADDR, MT6359P_RG_VCN33_1_VOSEL_ADDR,
+ MT6359_RG_VCN33_1_VOSEL_MASK <<
+ MT6359_RG_VCN33_1_VOSEL_SHIFT, 240),
+ MT6359_REG_FIXED("ldo_vaux18", VAUX18, MT6359P_RG_LDO_VAUX18_EN_ADDR,
+ MT6359P_DA_VAUX18_B_EN_ADDR, 1800000),
+ MT6359_LDO_LINEAR("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750,
+ 6250,
+ MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR,
+ MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vefuse", VEFUSE, vefuse_voltages,
+ MT6359P_RG_LDO_VEFUSE_EN_ADDR,
+ MT6359P_RG_LDO_VEFUSE_EN_SHIFT,
+ MT6359P_DA_VEFUSE_B_EN_ADDR, MT6359P_RG_VEFUSE_VOSEL_ADDR,
+ MT6359_RG_VEFUSE_VOSEL_MASK << MT6359_RG_VEFUSE_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vxo22", VXO22, vxo22_voltages,
+ MT6359P_RG_LDO_VXO22_EN_ADDR, MT6359P_RG_LDO_VXO22_EN_SHIFT,
+ MT6359P_DA_VXO22_B_EN_ADDR, MT6359P_RG_VXO22_VOSEL_ADDR,
+ MT6359_RG_VXO22_VOSEL_MASK << MT6359_RG_VXO22_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO("ldo_vrfck_1", VRFCK, vrfck_voltages_1,
+ MT6359P_RG_LDO_VRFCK_EN_ADDR, MT6359P_RG_LDO_VRFCK_EN_SHIFT,
+ MT6359P_DA_VRFCK_B_EN_ADDR, MT6359P_RG_VRFCK_VOSEL_ADDR,
+ MT6359_RG_VRFCK_VOSEL_MASK << MT6359_RG_VRFCK_VOSEL_SHIFT,
+ 480),
+ MT6359_REG_FIXED("ldo_vbif28", VBIF28, MT6359P_RG_LDO_VBIF28_EN_ADDR,
+ MT6359P_DA_VBIF28_B_EN_ADDR, 2800000),
+ MT6359_LDO("ldo_vio28", VIO28, vio28_voltages,
+ MT6359P_RG_LDO_VIO28_EN_ADDR, MT6359P_RG_LDO_VIO28_EN_SHIFT,
+ MT6359P_DA_VIO28_B_EN_ADDR, MT6359P_RG_VIO28_VOSEL_ADDR,
+ MT6359_RG_VIO28_VOSEL_MASK << MT6359_RG_VIO28_VOSEL_SHIFT,
+ 1920),
+ MT6359P_LDO1("ldo_vemc_1", VEMC, mt6359p_vemc_ops, vemc_voltages_1,
+ MT6359P_RG_LDO_VEMC_EN_ADDR, MT6359P_RG_LDO_VEMC_EN_SHIFT,
+ MT6359P_DA_VEMC_B_EN_ADDR,
+ MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR,
+ MT6359P_RG_LDO_VEMC_VOSEL_0_MASK <<
+ MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT),
+ MT6359_LDO("ldo_vcn33_2_bt", VCN33_2_BT, vcn33_voltages,
+ MT6359P_RG_LDO_VCN33_2_EN_0_ADDR,
+ MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT,
+ MT6359P_DA_VCN33_2_B_EN_ADDR, MT6359P_RG_VCN33_2_VOSEL_ADDR,
+ MT6359_RG_VCN33_2_VOSEL_MASK <<
+ MT6359_RG_VCN33_2_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_vcn33_2_wifi", VCN33_2_WIFI, vcn33_voltages,
+ MT6359P_RG_LDO_VCN33_2_EN_1_ADDR,
+ MT6359_RG_LDO_VCN33_2_EN_1_SHIFT,
+ MT6359P_DA_VCN33_2_B_EN_ADDR, MT6359P_RG_VCN33_2_VOSEL_ADDR,
+ MT6359_RG_VCN33_2_VOSEL_MASK <<
+ MT6359_RG_VCN33_2_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_va12", VA12, va12_voltages,
+ MT6359P_RG_LDO_VA12_EN_ADDR, MT6359P_RG_LDO_VA12_EN_SHIFT,
+ MT6359P_DA_VA12_B_EN_ADDR, MT6359P_RG_VA12_VOSEL_ADDR,
+ MT6359_RG_VA12_VOSEL_MASK << MT6359_RG_VA12_VOSEL_SHIFT,
+ 960),
+ MT6359_LDO("ldo_va09", VA09, va09_voltages,
+ MT6359P_RG_LDO_VA09_EN_ADDR, MT6359P_RG_LDO_VA09_EN_SHIFT,
+ MT6359P_DA_VA09_B_EN_ADDR, MT6359P_RG_VA09_VOSEL_ADDR,
+ MT6359_RG_VA09_VOSEL_MASK << MT6359_RG_VA09_VOSEL_SHIFT,
+ 960),
+ MT6359_LDO("ldo_vrf18", VRF18, vrf18_voltages,
+ MT6359P_RG_LDO_VRF18_EN_ADDR, MT6359P_RG_LDO_VRF18_EN_SHIFT,
+ MT6359P_DA_VRF18_B_EN_ADDR, MT6359P_RG_VRF18_VOSEL_ADDR,
+ MT6359_RG_VRF18_VOSEL_MASK << MT6359_RG_VRF18_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO_LINEAR("ldo_vsram_md", VSRAM_MD, 500000, 1293750, 6250,
+ MT6359P_RG_LDO_VSRAM_MD_EN_ADDR,
+ MT6359P_DA_VSRAM_MD_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vufs", VUFS, volt18_voltages,
+ MT6359P_RG_LDO_VUFS_EN_ADDR, MT6359P_RG_LDO_VUFS_EN_SHIFT,
+ MT6359P_DA_VUFS_B_EN_ADDR, MT6359P_RG_VUFS_VOSEL_ADDR,
+ MT6359_RG_VUFS_VOSEL_MASK << MT6359_RG_VUFS_VOSEL_SHIFT,
+ 1920),
+ MT6359_LDO("ldo_vm18", VM18, volt18_voltages,
+ MT6359P_RG_LDO_VM18_EN_ADDR, MT6359P_RG_LDO_VM18_EN_SHIFT,
+ MT6359P_DA_VM18_B_EN_ADDR, MT6359P_RG_VM18_VOSEL_ADDR,
+ MT6359_RG_VM18_VOSEL_MASK << MT6359_RG_VM18_VOSEL_SHIFT,
+ 1920),
+ MT6359_LDO("ldo_vbbck", VBBCK, vbbck_voltages,
+ MT6359P_RG_LDO_VBBCK_EN_ADDR, MT6359P_RG_LDO_VBBCK_EN_SHIFT,
+ MT6359P_DA_VBBCK_B_EN_ADDR, MT6359P_RG_VBBCK_VOSEL_ADDR,
+ MT6359P_RG_VBBCK_VOSEL_MASK << MT6359P_RG_VBBCK_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO_LINEAR("ldo_vsram_proc1", VSRAM_PROC1, 500000, 1293750, 6250,
+ MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR,
+ MT6359P_DA_VSRAM_PROC1_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vsim2", VSIM2, vsim2_voltages,
+ MT6359P_RG_LDO_VSIM2_EN_ADDR, MT6359P_RG_LDO_VSIM2_EN_SHIFT,
+ MT6359P_DA_VSIM2_B_EN_ADDR, MT6359P_RG_VSIM2_VOSEL_ADDR,
+ MT6359_RG_VSIM2_VOSEL_MASK << MT6359_RG_VSIM2_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO_LINEAR("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB,
+ 500000, 1293750, 6250,
+ MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR,
+ MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT),
+};
+
+static int mt6359_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ struct mt6359_regulator_info *mt6359_info;
+ int i, hw_ver;
+
+ regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
+ if (hw_ver >= MT6359P_CHIP_VER)
+ mt6359_info = mt6359p_regulators;
+ else
+ mt6359_info = mt6359_regulators;
+
+ config.dev = mt6397->dev;
+ config.regmap = mt6397->regmap;
+ for (i = 0; i < MT6359_MAX_REGULATOR; i++, mt6359_info++) {
+ config.driver_data = mt6359_info;
+ rdev = devm_regulator_register(&pdev->dev, &mt6359_info->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n", mt6359_info->desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id mt6359_platform_ids[] = {
+ {"mt6359-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6359_platform_ids);
+
+static struct platform_driver mt6359_regulator_driver = {
+ .driver = {
+ .name = "mt6359-regulator",
+ },
+ .probe = mt6359_regulator_probe,
+ .id_table = mt6359_platform_ids,
+};
+
+module_platform_driver(mt6359_regulator_driver);
+
+MODULE_AUTHOR("Wen Su <wen.su@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6359 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 49f6c05fee34..f54d4f176882 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -21,6 +21,62 @@ static const char *const regulator_states[PM_SUSPEND_MAX + 1] = {
[PM_SUSPEND_MAX] = "regulator-state-disk",
};
+static void fill_limit(int *limit, int val)
+{
+ if (val)
+ if (val == 1)
+ *limit = REGULATOR_NOTIF_LIMIT_ENABLE;
+ else
+ *limit = val;
+ else
+ *limit = REGULATOR_NOTIF_LIMIT_DISABLE;
+}
+
+static void of_get_regulator_prot_limits(struct device_node *np,
+ struct regulation_constraints *constraints)
+{
+ u32 pval;
+ int i;
+ static const char *const props[] = {
+ "regulator-oc-%s-microamp",
+ "regulator-ov-%s-microvolt",
+ "regulator-temp-%s-kelvin",
+ "regulator-uv-%s-microvolt",
+ };
+ struct notification_limit *limits[] = {
+ &constraints->over_curr_limits,
+ &constraints->over_voltage_limits,
+ &constraints->temp_limits,
+ &constraints->under_voltage_limits,
+ };
+ bool set[4] = {0};
+
+ /* Protection limits: */
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ char prop[255];
+ bool found;
+ int j;
+ static const char *const lvl[] = {
+ "protection", "error", "warn"
+ };
+ int *l[] = {
+ &limits[i]->prot, &limits[i]->err, &limits[i]->warn,
+ };
+
+ for (j = 0; j < ARRAY_SIZE(lvl); j++) {
+ snprintf(prop, 255, props[i], lvl[j]);
+ found = !of_property_read_u32(np, prop, &pval);
+ if (found)
+ fill_limit(l[j], pval);
+ set[i] |= found;
+ }
+ }
+ constraints->over_current_detection = set[0];
+ constraints->over_voltage_detection = set[1];
+ constraints->over_temp_detection = set[2];
+ constraints->under_voltage_detection = set[3];
+}
+
static int of_get_regulation_constraints(struct device *dev,
struct device_node *np,
struct regulator_init_data **init_data,
@@ -188,6 +244,8 @@ static int of_get_regulation_constraints(struct device *dev,
constraints->over_current_protection = of_property_read_bool(np,
"regulator-over-current-protection");
+ of_get_regulator_prot_limits(np, constraints);
+
for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {
switch (i) {
case PM_SUSPEND_MEM:
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 2f7ee212cb8c..64e5f5f0cc84 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -65,32 +65,9 @@ static const struct regmap_config pca9450_regmap_config = {
* 10: 25mV/4usec
* 11: 25mV/8usec
*/
-static int pca9450_dvs_set_ramp_delay(struct regulator_dev *rdev,
- int ramp_delay)
-{
- int id = rdev_get_id(rdev);
- unsigned int ramp_value;
-
- switch (ramp_delay) {
- case 1 ... 3125:
- ramp_value = BUCK1_RAMP_3P125MV;
- break;
- case 3126 ... 6250:
- ramp_value = BUCK1_RAMP_6P25MV;
- break;
- case 6251 ... 12500:
- ramp_value = BUCK1_RAMP_12P5MV;
- break;
- case 12501 ... 25000:
- ramp_value = BUCK1_RAMP_25MV;
- break;
- default:
- ramp_value = BUCK1_RAMP_25MV;
- }
-
- return regmap_update_bits(rdev->regmap, PCA9450_REG_BUCK1CTRL + id * 3,
- BUCK1_RAMP_MASK, ramp_value << 6);
-}
+static const unsigned int pca9450_dvs_buck_ramp_table[] = {
+ 25000, 12500, 6250, 3125
+};
static const struct regulator_ops pca9450_dvs_buck_regulator_ops = {
.enable = regulator_enable_regmap,
@@ -100,7 +77,7 @@ static const struct regulator_ops pca9450_dvs_buck_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = pca9450_dvs_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static const struct regulator_ops pca9450_buck_regulator_ops = {
@@ -251,6 +228,10 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK1OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK1CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK1CTRL,
+ .ramp_mask = BUCK1_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -276,6 +257,10 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK2CTRL,
+ .ramp_mask = BUCK2_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -301,6 +286,10 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK3OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK3CTRL,
.enable_mask = BUCK3_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK3CTRL,
+ .ramp_mask = BUCK3_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -477,6 +466,10 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK1OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK1CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK1CTRL,
+ .ramp_mask = BUCK1_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -502,6 +495,10 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK2CTRL,
+ .ramp_mask = BUCK2_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c
index de25e3279b4b..b3da0dc58782 100644
--- a/drivers/regulator/qcom-labibb-regulator.c
+++ b/drivers/regulator/qcom-labibb-regulator.c
@@ -307,13 +307,21 @@ end:
return IRQ_HANDLED;
}
-static int qcom_labibb_set_ocp(struct regulator_dev *rdev)
+static int qcom_labibb_set_ocp(struct regulator_dev *rdev, int lim,
+ int severity, bool enable)
{
struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
char *ocp_irq_name;
u32 irq_flags = IRQF_ONESHOT;
int irq_trig_low, ret;
+ /*
+ * labibb supports only protection - and does not support setting
+ * limit. Furthermore, we don't support disabling protection.
+ */
+ if (lim || severity != REGULATOR_SEVERITY_PROT || !enable)
+ return -EINVAL;
+
/* If there is no OCP interrupt, there's nothing to set */
if (vreg->ocp_irq <= 0)
return -EINVAL;
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 22fec370fa61..6cca910a76de 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -811,12 +811,12 @@ static const struct rpmh_vreg_init_data pm8998_vreg_data[] = {
RPMH_VREG("ldo28", "ldo%s28", &pmic4_pldo, "vdd-l16-l28"),
RPMH_VREG("lvs1", "vs%s1", &pmic4_lvs, "vin-lvs-1-2"),
RPMH_VREG("lvs2", "vs%s2", &pmic4_lvs, "vin-lvs-1-2"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pmi8998_vreg_data[] = {
RPMH_VREG("bob", "bob%s1", &pmic4_bob, "vdd-bob"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8005_vreg_data[] = {
@@ -824,7 +824,7 @@ static const struct rpmh_vreg_init_data pm8005_vreg_data[] = {
RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
RPMH_VREG("smps3", "smp%s3", &pmic4_ftsmps426, "vdd-s3"),
RPMH_VREG("smps4", "smp%s4", &pmic4_ftsmps426, "vdd-s4"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8150_vreg_data[] = {
@@ -856,7 +856,7 @@ static const struct rpmh_vreg_init_data pm8150_vreg_data[] = {
RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"),
RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"),
RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = {
@@ -880,7 +880,39 @@ static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = {
RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
- {},
+ {}
+};
+
+static const struct rpmh_vreg_init_data pmm8155au_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_hfsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_hfsmps510, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps510, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps510, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps510, "vdd-s8"),
+ RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps510, "vdd-s9"),
+ RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps510, "vdd-s10"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_pldo, "vdd-l2-l10"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l6-l9"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l6-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l2-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l16-l17"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ {}
};
static const struct rpmh_vreg_init_data pm8350_vreg_data[] = {
@@ -906,7 +938,7 @@ static const struct rpmh_vreg_init_data pm8350_vreg_data[] = {
RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l8"),
RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l6-l9-l10"),
RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo, "vdd-l6-l9-l10"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = {
@@ -934,7 +966,7 @@ static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = {
RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l1-l12"),
RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l3-l4-l5-l7-l13"),
RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
@@ -947,7 +979,7 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
@@ -960,7 +992,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm6150_vreg_data[] = {
@@ -988,7 +1020,7 @@ static const struct rpmh_vreg_init_data pm6150_vreg_data[] = {
RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
RPMH_VREG("ldo18", "ldo%s18", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
RPMH_VREG("ldo19", "ldo%s19", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm6150l_vreg_data[] = {
@@ -1012,7 +1044,7 @@ static const struct rpmh_vreg_init_data pm6150l_vreg_data[] = {
RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pmx55_vreg_data[] = {
@@ -1039,7 +1071,7 @@ static const struct rpmh_vreg_init_data pmx55_vreg_data[] = {
RPMH_VREG("ldo14", "ldo%s14", &pmic5_nldo, "vdd-l14"),
RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo, "vdd-l15"),
RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l16"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
@@ -1070,6 +1102,7 @@ static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
RPMH_VREG("ldo18", "ldo%s18", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
RPMH_VREG("ldo19", "ldo%s19", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
+ {}
};
static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
@@ -1083,6 +1116,7 @@ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
RPMH_VREG("ldo5", "ldo%s5", &pmic5_nldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l5-l6"),
RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-bob"),
+ {}
};
static int rpmh_regulator_probe(struct platform_device *pdev)
@@ -1176,6 +1210,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pm8150l_vreg_data,
},
{
+ .compatible = "qcom,pmm8155au-rpmh-regulators",
+ .data = pmm8155au_vreg_data,
+ },
+ {
.compatible = "qcom,pmx55-rpmh-regulators",
.data = pmx55_vreg_data,
},
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index bb944ee5fe3b..198fcc6551f6 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -251,6 +251,50 @@ static const struct regulator_desc pma8084_switch = {
.ops = &rpm_switch_ops,
};
+static const struct regulator_desc pm8226_hfsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(1575000, 96, 158, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 159,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8226_ftsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
+ REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 262,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8226_pldo = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+ REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
+ },
+ .n_linear_ranges = 3,
+ .n_voltages = 164,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8226_nldo = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 64,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8226_switch = {
+ .ops = &rpm_switch_ops,
+};
+
static const struct regulator_desc pm8x41_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
@@ -405,8 +449,8 @@ static const struct regulator_desc pm8950_pldo = {
static const struct regulator_desc pm8953_lnldo = {
.linear_ranges = (struct linear_range[]) {
- REGULATOR_LINEAR_RANGE(1380000, 8, 15, 120000),
REGULATOR_LINEAR_RANGE(690000, 0, 7, 60000),
+ REGULATOR_LINEAR_RANGE(1380000, 8, 15, 120000),
},
.n_linear_ranges = 2,
.n_voltages = 16,
@@ -746,6 +790,44 @@ static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8226_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8226_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8226_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8226_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8226_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8226_hfsmps, "vdd_s5" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8226_pldo, "vdd_l12_l14" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8226_pldo, "vdd_l12_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8226_pldo, "vdd_l25" },
+ { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8226_switch, "vdd_lvs1" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
@@ -1092,6 +1174,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators },
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
+ { .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
{ .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 95677c51c1fa..41424a3366d0 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -595,11 +595,15 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
return regulator_enable_regmap(rdev);
}
-static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
+static int spmi_regulator_vs_ocp(struct regulator_dev *rdev, int lim_uA,
+ int severity, bool enable)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 reg = SPMI_VS_OCP_OVERRIDE;
+ if (lim_uA || !enable || severity != REGULATOR_SEVERITY_PROT)
+ return -EINVAL;
+
return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
}
diff --git a/drivers/regulator/qcom_usb_vbus-regulator.c b/drivers/regulator/qcom_usb_vbus-regulator.c
index 457788b50572..2e627c2b6c51 100644
--- a/drivers/regulator/qcom_usb_vbus-regulator.c
+++ b/drivers/regulator/qcom_usb_vbus-regulator.c
@@ -16,13 +16,21 @@
#define CMD_OTG 0x40
#define OTG_EN BIT(0)
+#define OTG_CURRENT_LIMIT_CFG 0x52
+#define OTG_CURRENT_LIMIT_MASK GENMASK(2, 0)
#define OTG_CFG 0x53
#define OTG_EN_SRC_CFG BIT(1)
+static const unsigned int curr_table[] = {
+ 500000, 1000000, 1500000, 2000000, 2500000, 3000000,
+};
+
static const struct regulator_ops qcom_usb_vbus_reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static struct regulator_desc qcom_usb_vbus_rdesc = {
@@ -30,6 +38,8 @@ static struct regulator_desc qcom_usb_vbus_rdesc = {
.ops = &qcom_usb_vbus_reg_ops,
.owner = THIS_MODULE,
.type = REGULATOR_VOLTAGE,
+ .curr_table = curr_table,
+ .n_current_limits = ARRAY_SIZE(curr_table),
};
static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev)
@@ -61,6 +71,8 @@ static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev)
qcom_usb_vbus_rdesc.enable_reg = base + CMD_OTG;
qcom_usb_vbus_rdesc.enable_mask = OTG_EN;
+ qcom_usb_vbus_rdesc.csel_reg = base + OTG_CURRENT_LIMIT_CFG;
+ qcom_usb_vbus_rdesc.csel_mask = OTG_CURRENT_LIMIT_MASK;
config.dev = dev;
config.init_data = init_data;
config.of_node = dev->of_node;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index e926c1a85846..127dc2e2e690 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -158,13 +158,6 @@ struct rk808_regulator_data {
struct gpio_desc *dvs_gpio[2];
};
-static const int rk808_buck_config_regs[] = {
- RK808_BUCK1_CONFIG_REG,
- RK808_BUCK2_CONFIG_REG,
- RK808_BUCK3_CONFIG_REG,
- RK808_BUCK4_CONFIG_REG,
-};
-
static const struct linear_range rk808_ldo3_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0, 13, 100000),
REGULATOR_LINEAR_RANGE(2500000, 15, 15, 0),
@@ -215,6 +208,15 @@ static const struct linear_range rk817_buck3_voltage_ranges[] = {
RK817_BUCK3_SEL_CNT, RK817_BUCK1_STP1),
};
+static const unsigned int rk808_buck1_2_ramp_table[] = {
+ 2000, 4000, 6000, 10000
+};
+
+/* RK817 RK809 */
+static const unsigned int rk817_buck1_4_ramp_table[] = {
+ 3000, 6300, 12500, 25000
+};
+
static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
@@ -340,62 +342,6 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
return regulator_set_voltage_time_sel(rdev, old_selector, new_selector);
}
-static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
- unsigned int reg = rk808_buck_config_regs[rdev_get_id(rdev)];
-
- switch (ramp_delay) {
- case 1 ... 2000:
- ramp_value = RK808_RAMP_RATE_2MV_PER_US;
- break;
- case 2001 ... 4000:
- ramp_value = RK808_RAMP_RATE_4MV_PER_US;
- break;
- case 4001 ... 6000:
- ramp_value = RK808_RAMP_RATE_6MV_PER_US;
- break;
- case 6001 ... 10000:
- break;
- default:
- pr_warn("%s ramp_delay: %d not supported, setting 10000\n",
- rdev->desc->name, ramp_delay);
- }
-
- return regmap_update_bits(rdev->regmap, reg,
- RK808_RAMP_RATE_MASK, ramp_value);
-}
-
-/*
- * RK817 RK809
- */
-static int rk817_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- unsigned int ramp_value = RK817_RAMP_RATE_25MV_PER_US;
- unsigned int reg = RK817_BUCK_CONFIG_REG(rdev_get_id(rdev));
-
- switch (ramp_delay) {
- case 0 ... 3000:
- ramp_value = RK817_RAMP_RATE_3MV_PER_US;
- break;
- case 3001 ... 6300:
- ramp_value = RK817_RAMP_RATE_6_3MV_PER_US;
- break;
- case 6301 ... 12500:
- ramp_value = RK817_RAMP_RATE_12_5MV_PER_US;
- break;
- case 12501 ... 25000:
- break;
- default:
- dev_warn(&rdev->dev,
- "%s ramp_delay: %d not supported, setting 25000\n",
- rdev->desc->name, ramp_delay);
- }
-
- return regmap_update_bits(rdev->regmap, reg,
- RK817_RAMP_RATE_MASK, ramp_value);
-}
-
static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
{
unsigned int reg;
@@ -625,7 +571,7 @@ static const struct regulator_ops rk808_buck1_2_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
- .set_ramp_delay = rk808_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_voltage = rk808_set_suspend_voltage,
.set_suspend_enable = rk808_set_suspend_enable,
.set_suspend_disable = rk808_set_suspend_disable,
@@ -722,7 +668,7 @@ static const struct regulator_ops rk817_buck_ops_range = {
.set_mode = rk8xx_set_mode,
.get_mode = rk8xx_get_mode,
.set_suspend_mode = rk8xx_set_suspend_mode,
- .set_ramp_delay = rk817_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_voltage = rk808_set_suspend_voltage_range,
.set_suspend_enable = rk817_set_suspend_enable,
.set_suspend_disable = rk817_set_suspend_disable,
@@ -814,6 +760,10 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_BUCK_VSEL_MASK,
.enable_reg = RK808_DCDC_EN_REG,
.enable_mask = BIT(0),
+ .ramp_reg = RK808_BUCK1_CONFIG_REG,
+ .ramp_mask = RK808_RAMP_RATE_MASK,
+ .ramp_delay_table = rk808_buck1_2_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk808_buck1_2_ramp_table),
.owner = THIS_MODULE,
}, {
.name = "DCDC_REG2",
@@ -830,6 +780,10 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_BUCK_VSEL_MASK,
.enable_reg = RK808_DCDC_EN_REG,
.enable_mask = BIT(1),
+ .ramp_reg = RK808_BUCK2_CONFIG_REG,
+ .ramp_mask = RK808_RAMP_RATE_MASK,
+ .ramp_delay_table = rk808_buck1_2_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk808_buck1_2_ramp_table),
.owner = THIS_MODULE,
}, {
.name = "DCDC_REG3",
@@ -910,6 +864,10 @@ static const struct regulator_desc rk809_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC1),
.enable_val = ENABLE_MASK(RK817_ID_DCDC1),
.disable_val = DISABLE_VAL(RK817_ID_DCDC1),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC1),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -929,6 +887,10 @@ static const struct regulator_desc rk809_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC2),
.enable_val = ENABLE_MASK(RK817_ID_DCDC2),
.disable_val = DISABLE_VAL(RK817_ID_DCDC2),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC2),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -948,6 +910,10 @@ static const struct regulator_desc rk809_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC3),
.enable_val = ENABLE_MASK(RK817_ID_DCDC3),
.disable_val = DISABLE_VAL(RK817_ID_DCDC3),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC3),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -967,6 +933,10 @@ static const struct regulator_desc rk809_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC4),
.enable_val = ENABLE_MASK(RK817_ID_DCDC4),
.disable_val = DISABLE_VAL(RK817_ID_DCDC4),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC4),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
},
@@ -1052,6 +1022,10 @@ static const struct regulator_desc rk817_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC1),
.enable_val = ENABLE_MASK(RK817_ID_DCDC1),
.disable_val = DISABLE_VAL(RK817_ID_DCDC1),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC1),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -1071,6 +1045,10 @@ static const struct regulator_desc rk817_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC2),
.enable_val = ENABLE_MASK(RK817_ID_DCDC2),
.disable_val = DISABLE_VAL(RK817_ID_DCDC2),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC2),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -1090,6 +1068,10 @@ static const struct regulator_desc rk817_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC3),
.enable_val = ENABLE_MASK(RK817_ID_DCDC3),
.disable_val = DISABLE_VAL(RK817_ID_DCDC3),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC3),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -1109,6 +1091,10 @@ static const struct regulator_desc rk817_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC4),
.enable_val = ENABLE_MASK(RK817_ID_DCDC4),
.disable_val = DISABLE_VAL(RK817_ID_DCDC4),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC4),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
},
diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
index 2055a9cb13ba..7a87788d3f09 100644
--- a/drivers/regulator/rt4801-regulator.c
+++ b/drivers/regulator/rt4801-regulator.c
@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
struct gpio_descs *gpios = priv->enable_gpios;
int id = rdev_get_id(rdev), ret;
- if (gpios->ndescs <= id) {
+ if (!gpios || gpios->ndescs <= id) {
dev_warn(&rdev->dev, "no dedicated gpio can control\n");
goto bypass_gpio;
}
@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
struct gpio_descs *gpios = priv->enable_gpios;
int id = rdev_get_id(rdev);
- if (gpios->ndescs <= id) {
+ if (!gpios || gpios->ndescs <= id) {
dev_warn(&rdev->dev, "no dedicated gpio can control\n");
goto bypass_gpio;
}
diff --git a/drivers/regulator/rt4831-regulator.c b/drivers/regulator/rt4831-regulator.c
index e3aaac90d238..676b0419e48f 100644
--- a/drivers/regulator/rt4831-regulator.c
+++ b/drivers/regulator/rt4831-regulator.c
@@ -108,6 +108,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
.bypass_reg = RT4831_REG_DSVEN,
.bypass_val_on = DSV_MODE_BYPASS,
.bypass_val_off = DSV_MODE_NORMAL,
+ .owner = THIS_MODULE,
},
{
.name = "DSVP",
@@ -125,6 +126,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
.enable_mask = RT4831_POSEN_MASK,
.active_discharge_reg = RT4831_REG_DSVEN,
.active_discharge_mask = RT4831_POSADEN_MASK,
+ .owner = THIS_MODULE,
},
{
.name = "DSVN",
@@ -142,6 +144,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
.enable_mask = RT4831_NEGEN_MASK,
.active_discharge_reg = RT4831_REG_DSVEN,
.active_discharge_mask = RT4831_NEGADEN_MASK,
+ .owner = THIS_MODULE,
}
};
diff --git a/drivers/regulator/rt6160-regulator.c b/drivers/regulator/rt6160-regulator.c
new file mode 100644
index 000000000000..5d7b0e7ad69a
--- /dev/null
+++ b/drivers/regulator/rt6160-regulator.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define RT6160_MODE_AUTO 0
+#define RT6160_MODE_FPWM 1
+
+#define RT6160_REG_CNTL 0x01
+#define RT6160_REG_STATUS 0x02
+#define RT6160_REG_DEVID 0x03
+#define RT6160_REG_VSELL 0x04
+#define RT6160_REG_VSELH 0x05
+#define RT6160_NUM_REGS (RT6160_REG_VSELH + 1)
+
+#define RT6160_FPWM_MASK BIT(3)
+#define RT6160_RAMPRATE_MASK GENMASK(1, 0)
+#define RT6160_VID_MASK GENMASK(7, 4)
+#define RT6160_VSEL_MASK GENMASK(6, 0)
+#define RT6160_HDSTAT_MASK BIT(4)
+#define RT6160_UVSTAT_MASK BIT(3)
+#define RT6160_OCSTAT_MASK BIT(2)
+#define RT6160_TSDSTAT_MASK BIT(1)
+#define RT6160_PGSTAT_MASK BIT(0)
+
+#define RT6160_VENDOR_ID 0xA0
+#define RT6160_VOUT_MINUV 2025000
+#define RT6160_VOUT_MAXUV 5200000
+#define RT6160_VOUT_STPUV 25000
+#define RT6160_N_VOUTS ((RT6160_VOUT_MAXUV - RT6160_VOUT_MINUV) / RT6160_VOUT_STPUV + 1)
+
+#define RT6160_I2CRDY_TIMEUS 100
+
+struct rt6160_priv {
+ struct regulator_desc desc;
+ struct gpio_desc *enable_gpio;
+ struct regmap *regmap;
+ bool enable_state;
+};
+
+static const unsigned int rt6160_ramp_tables[] = {
+ 1000, 2500, 5000, 10000
+};
+
+static int rt6160_enable(struct regulator_dev *rdev)
+{
+ struct rt6160_priv *priv = rdev_get_drvdata(rdev);
+
+ if (!priv->enable_gpio)
+ return 0;
+
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ priv->enable_state = true;
+
+ usleep_range(RT6160_I2CRDY_TIMEUS, RT6160_I2CRDY_TIMEUS + 100);
+
+ regcache_cache_only(priv->regmap, false);
+ return regcache_sync(priv->regmap);
+}
+
+static int rt6160_disable(struct regulator_dev *rdev)
+{
+ struct rt6160_priv *priv = rdev_get_drvdata(rdev);
+
+ if (!priv->enable_gpio)
+ return -EINVAL;
+
+ /* Mark regcache as dirty and cache only before HW disabled */
+ regcache_cache_only(priv->regmap, true);
+ regcache_mark_dirty(priv->regmap);
+
+ priv->enable_state = false;
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+
+ return 0;
+}
+
+static int rt6160_is_enabled(struct regulator_dev *rdev)
+{
+ struct rt6160_priv *priv = rdev_get_drvdata(rdev);
+
+ return priv->enable_state ? 1 : 0;
+}
+
+static int rt6160_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int mode_val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ mode_val = RT6160_FPWM_MASK;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ mode_val = 0;
+ break;
+ default:
+ dev_err(&rdev->dev, "mode not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(regmap, RT6160_REG_CNTL, RT6160_FPWM_MASK, mode_val);
+}
+
+static unsigned int rt6160_get_mode(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, RT6160_REG_CNTL, &val);
+ if (ret)
+ return ret;
+
+ if (val & RT6160_FPWM_MASK)
+ return REGULATOR_MODE_FAST;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int rt6160_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int suspend_vsel_reg;
+ int vsel;
+
+ vsel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (vsel < 0)
+ return vsel;
+
+ if (rdev->desc->vsel_reg == RT6160_REG_VSELL)
+ suspend_vsel_reg = RT6160_REG_VSELH;
+ else
+ suspend_vsel_reg = RT6160_REG_VSELL;
+
+ return regmap_update_bits(regmap, suspend_vsel_reg,
+ RT6160_VSEL_MASK, vsel);
+}
+
+static int rt6160_get_error_flags(struct regulator_dev *rdev, unsigned int *flags)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int val, events = 0;
+ int ret;
+
+ ret = regmap_read(regmap, RT6160_REG_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (val & (RT6160_HDSTAT_MASK | RT6160_TSDSTAT_MASK))
+ events |= REGULATOR_ERROR_OVER_TEMP;
+
+ if (val & RT6160_UVSTAT_MASK)
+ events |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+ if (val & RT6160_OCSTAT_MASK)
+ events |= REGULATOR_ERROR_OVER_CURRENT;
+
+ if (val & RT6160_PGSTAT_MASK)
+ events |= REGULATOR_ERROR_FAIL;
+
+ *flags = events;
+ return 0;
+}
+
+static const struct regulator_ops rt6160_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+
+ .enable = rt6160_enable,
+ .disable = rt6160_disable,
+ .is_enabled = rt6160_is_enabled,
+
+ .set_mode = rt6160_set_mode,
+ .get_mode = rt6160_get_mode,
+ .set_suspend_voltage = rt6160_set_suspend_voltage,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .get_error_flags = rt6160_get_error_flags,
+};
+
+static unsigned int rt6160_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case RT6160_MODE_FPWM:
+ return REGULATOR_MODE_FAST;
+ case RT6160_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ }
+
+ return REGULATOR_MODE_INVALID;
+}
+
+static bool rt6160_is_accessible_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= RT6160_REG_CNTL && reg <= RT6160_REG_VSELH)
+ return true;
+ return false;
+}
+
+static bool rt6160_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == RT6160_REG_STATUS)
+ return true;
+ return false;
+}
+
+static const struct regmap_config rt6160_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT6160_REG_VSELH,
+ .num_reg_defaults_raw = RT6160_NUM_REGS,
+ .cache_type = REGCACHE_FLAT,
+
+ .writeable_reg = rt6160_is_accessible_reg,
+ .readable_reg = rt6160_is_accessible_reg,
+ .volatile_reg = rt6160_is_volatile_reg,
+};
+
+static int rt6160_probe(struct i2c_client *i2c)
+{
+ struct rt6160_priv *priv;
+ struct regulator_config regulator_cfg = {};
+ struct regulator_dev *rdev;
+ bool vsel_active_low;
+ unsigned int devid;
+ int ret;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ vsel_active_low =
+ device_property_present(&i2c->dev, "richtek,vsel-active-low");
+
+ priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio)) {
+ dev_err(&i2c->dev, "Failed to get 'enable' gpio\n");
+ return PTR_ERR(priv->enable_gpio);
+ }
+ priv->enable_state = true;
+
+ usleep_range(RT6160_I2CRDY_TIMEUS, RT6160_I2CRDY_TIMEUS + 100);
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &rt6160_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&i2c->dev, "Failed to init regmap (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(priv->regmap, RT6160_REG_DEVID, &devid);
+ if (ret)
+ return ret;
+
+ if ((devid & RT6160_VID_MASK) != RT6160_VENDOR_ID) {
+ dev_err(&i2c->dev, "VID not correct [0x%02x]\n", devid);
+ return -ENODEV;
+ }
+
+ priv->desc.name = "rt6160-buckboost";
+ priv->desc.type = REGULATOR_VOLTAGE;
+ priv->desc.owner = THIS_MODULE;
+ priv->desc.min_uV = RT6160_VOUT_MINUV;
+ priv->desc.uV_step = RT6160_VOUT_STPUV;
+ if (vsel_active_low)
+ priv->desc.vsel_reg = RT6160_REG_VSELL;
+ else
+ priv->desc.vsel_reg = RT6160_REG_VSELH;
+ priv->desc.vsel_mask = RT6160_VSEL_MASK;
+ priv->desc.n_voltages = RT6160_N_VOUTS;
+ priv->desc.ramp_reg = RT6160_REG_CNTL;
+ priv->desc.ramp_mask = RT6160_RAMPRATE_MASK;
+ priv->desc.ramp_delay_table = rt6160_ramp_tables;
+ priv->desc.n_ramp_values = ARRAY_SIZE(rt6160_ramp_tables);
+ priv->desc.of_map_mode = rt6160_of_map_mode;
+ priv->desc.ops = &rt6160_regulator_ops;
+
+ regulator_cfg.dev = &i2c->dev;
+ regulator_cfg.of_node = i2c->dev.of_node;
+ regulator_cfg.regmap = priv->regmap;
+ regulator_cfg.driver_data = priv;
+ regulator_cfg.init_data = of_get_regulator_init_data(&i2c->dev, i2c->dev.of_node,
+ &priv->desc);
+
+ rdev = devm_regulator_register(&i2c->dev, &priv->desc, &regulator_cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused rt6160_of_match_table[] = {
+ { .compatible = "richtek,rt6160", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt6160_of_match_table);
+
+static struct i2c_driver rt6160_driver = {
+ .driver = {
+ .name = "rt6160",
+ .of_match_table = rt6160_of_match_table,
+ },
+ .probe_new = rt6160_probe,
+};
+module_i2c_driver(rt6160_driver);
+
+MODULE_DESCRIPTION("Richtek RT6160 voltage regulator driver");
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rt6245-regulator.c b/drivers/regulator/rt6245-regulator.c
new file mode 100644
index 000000000000..d3299a72fd10
--- /dev/null
+++ b/drivers/regulator/rt6245-regulator.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define RT6245_VIRT_OCLIMIT 0x00
+#define RT6245_VIRT_OTLEVEL 0x01
+#define RT6245_VIRT_PGDLYTIME 0x02
+#define RT6245_VIRT_SLEWRATE 0x03
+#define RT6245_VIRT_SWFREQ 0x04
+#define RT6245_VIRT_VOUT 0x05
+
+#define RT6245_VOUT_MASK GENMASK(6, 0)
+#define RT6245_SLEW_MASK GENMASK(2, 0)
+#define RT6245_CHKSUM_MASK BIT(7)
+#define RT6245_CODE_MASK GENMASK(6, 0)
+
+/* HW Enable + Soft start time */
+#define RT6245_ENTIME_IN_US 5000
+
+#define RT6245_VOUT_MINUV 437500
+#define RT6245_VOUT_MAXUV 1387500
+#define RT6245_VOUT_STEPUV 12500
+#define RT6245_NUM_VOUT ((RT6245_VOUT_MAXUV - RT6245_VOUT_MINUV) / RT6245_VOUT_STEPUV + 1)
+
+struct rt6245_priv {
+ struct gpio_desc *enable_gpio;
+ bool enable_state;
+};
+
+static int rt6245_enable(struct regulator_dev *rdev)
+{
+ struct rt6245_priv *priv = rdev_get_drvdata(rdev);
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int ret;
+
+ if (!priv->enable_gpio)
+ return 0;
+
+ gpiod_direction_output(priv->enable_gpio, 1);
+ usleep_range(RT6245_ENTIME_IN_US, RT6245_ENTIME_IN_US + 1000);
+
+ regcache_cache_only(regmap, false);
+ ret = regcache_sync(regmap);
+ if (ret)
+ return ret;
+
+ priv->enable_state = true;
+ return 0;
+}
+
+static int rt6245_disable(struct regulator_dev *rdev)
+{
+ struct rt6245_priv *priv = rdev_get_drvdata(rdev);
+ struct regmap *regmap = rdev_get_regmap(rdev);
+
+ if (!priv->enable_gpio)
+ return -EINVAL;
+
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
+
+ gpiod_direction_output(priv->enable_gpio, 0);
+
+ priv->enable_state = false;
+ return 0;
+}
+
+static int rt6245_is_enabled(struct regulator_dev *rdev)
+{
+ struct rt6245_priv *priv = rdev_get_drvdata(rdev);
+
+ return priv->enable_state ? 1 : 0;
+}
+
+static const struct regulator_ops rt6245_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .enable = rt6245_enable,
+ .disable = rt6245_disable,
+ .is_enabled = rt6245_is_enabled,
+};
+
+/* ramp delay dividend is 12500 uV/uS, and divisor from 1 to 8 */
+static const unsigned int rt6245_ramp_delay_table[] = {
+ 12500, 6250, 4167, 3125, 2500, 2083, 1786, 1562
+};
+
+static const struct regulator_desc rt6245_regulator_desc = {
+ .name = "rt6245-regulator",
+ .ops = &rt6245_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .min_uV = RT6245_VOUT_MINUV,
+ .uV_step = RT6245_VOUT_STEPUV,
+ .n_voltages = RT6245_NUM_VOUT,
+ .ramp_delay_table = rt6245_ramp_delay_table,
+ .n_ramp_values = ARRAY_SIZE(rt6245_ramp_delay_table),
+ .owner = THIS_MODULE,
+ .vsel_reg = RT6245_VIRT_VOUT,
+ .vsel_mask = RT6245_VOUT_MASK,
+ .ramp_reg = RT6245_VIRT_SLEWRATE,
+ .ramp_mask = RT6245_SLEW_MASK,
+};
+
+static int rt6245_init_device_properties(struct device *dev)
+{
+ const struct {
+ const char *name;
+ unsigned int reg;
+ } rt6245_props[] = {
+ { "richtek,oc-level-select", RT6245_VIRT_OCLIMIT },
+ { "richtek,ot-level-select", RT6245_VIRT_OTLEVEL },
+ { "richtek,pgdly-time-select", RT6245_VIRT_PGDLYTIME },
+ { "richtek,switch-freq-select", RT6245_VIRT_SWFREQ }
+ };
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ u8 propval;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(rt6245_props); i++) {
+ ret = device_property_read_u8(dev, rt6245_props[i].name, &propval);
+ if (ret)
+ continue;
+
+ ret = regmap_write(regmap, rt6245_props[i].reg, propval);
+ if (ret) {
+ dev_err(dev, "Fail to apply [%s:%d]\n", rt6245_props[i].name, propval);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rt6245_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *i2c = context;
+ const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
+ unsigned int code, bit_count;
+
+ code = func_base[reg];
+ code += val;
+
+ /* xor checksum for bit 6 to 0 */
+ bit_count = hweight8(code & RT6245_CODE_MASK);
+ if (bit_count % 2)
+ code |= RT6245_CHKSUM_MASK;
+ else
+ code &= ~RT6245_CHKSUM_MASK;
+
+ return i2c_smbus_write_byte(i2c, code);
+}
+
+static const struct reg_default rt6245_reg_defaults[] = {
+ /* Default over current 14A */
+ { RT6245_VIRT_OCLIMIT, 2 },
+ /* Default over temperature 150'c */
+ { RT6245_VIRT_OTLEVEL, 0 },
+ /* Default power good delay time 10us */
+ { RT6245_VIRT_PGDLYTIME, 1 },
+ /* Default slewrate 12.5mV/uS */
+ { RT6245_VIRT_SLEWRATE, 0 },
+ /* Default switch frequency 800KHz */
+ { RT6245_VIRT_SWFREQ, 1 },
+ /* Default voltage 750mV */
+ { RT6245_VIRT_VOUT, 0x19 }
+};
+
+static const struct regmap_config rt6245_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT6245_VIRT_VOUT,
+ .cache_type = REGCACHE_FLAT,
+ .reg_defaults = rt6245_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(rt6245_reg_defaults),
+ .reg_write = rt6245_reg_write,
+};
+
+static int rt6245_probe(struct i2c_client *i2c)
+{
+ struct rt6245_priv *priv;
+ struct regmap *regmap;
+ struct regulator_config regulator_cfg = {};
+ struct regulator_dev *rdev;
+ int ret;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->enable_state = true;
+
+ priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio)) {
+ dev_err(&i2c->dev, "Failed to get 'enable' gpio\n");
+ return PTR_ERR(priv->enable_gpio);
+ }
+
+ usleep_range(RT6245_ENTIME_IN_US, RT6245_ENTIME_IN_US + 1000);
+
+ regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &rt6245_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&i2c->dev, "Failed to initialize the regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ ret = rt6245_init_device_properties(&i2c->dev);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to initialize device properties\n");
+ return ret;
+ }
+
+ regulator_cfg.dev = &i2c->dev;
+ regulator_cfg.of_node = i2c->dev.of_node;
+ regulator_cfg.regmap = regmap;
+ regulator_cfg.driver_data = priv;
+ regulator_cfg.init_data = of_get_regulator_init_data(&i2c->dev, i2c->dev.of_node,
+ &rt6245_regulator_desc);
+ rdev = devm_regulator_register(&i2c->dev, &rt6245_regulator_desc, &regulator_cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused rt6245_of_match_table[] = {
+ { .compatible = "richtek,rt6245", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt6245_of_match_table);
+
+static struct i2c_driver rt6245_driver = {
+ .driver = {
+ .name = "rt6245",
+ .of_match_table = rt6245_of_match_table,
+ },
+ .probe_new = rt6245_probe,
+};
+module_i2c_driver(rt6245_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT6245 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
index 852fb2596ffd..4bca64de0f67 100644
--- a/drivers/regulator/rtmv20-regulator.c
+++ b/drivers/regulator/rtmv20-regulator.c
@@ -27,6 +27,7 @@
#define RTMV20_REG_LDIRQ 0x30
#define RTMV20_REG_LDSTAT 0x40
#define RTMV20_REG_LDMASK 0x50
+#define RTMV20_MAX_REGS (RTMV20_REG_LDMASK + 1)
#define RTMV20_VID_MASK GENMASK(7, 4)
#define RICHTEK_VID 0x80
@@ -103,9 +104,47 @@ static int rtmv20_lsw_disable(struct regulator_dev *rdev)
return 0;
}
+static int rtmv20_lsw_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ int sel;
+
+ if (min_uA > RTMV20_LSW_MAXUA || max_uA < RTMV20_LSW_MINUA)
+ return -EINVAL;
+
+ if (max_uA > RTMV20_LSW_MAXUA)
+ max_uA = RTMV20_LSW_MAXUA;
+
+ sel = (max_uA - RTMV20_LSW_MINUA) / RTMV20_LSW_STEPUA;
+
+ /* Ensure the selected setting is still in range */
+ if ((sel * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA) < min_uA)
+ return -EINVAL;
+
+ sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
+ rdev->desc->csel_mask, sel);
+}
+
+static int rtmv20_lsw_get_current_limit(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
+ if (ret)
+ return ret;
+
+ val &= rdev->desc->csel_mask;
+ val >>= ffs(rdev->desc->csel_mask) - 1;
+
+ return val * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA;
+}
+
static const struct regulator_ops rtmv20_regulator_ops = {
- .set_current_limit = regulator_set_current_limit_regmap,
- .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = rtmv20_lsw_set_current_limit,
+ .get_current_limit = rtmv20_lsw_get_current_limit,
.enable = rtmv20_lsw_enable,
.disable = rtmv20_lsw_disable,
.is_enabled = regulator_is_enabled_regmap,
@@ -275,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = RTMV20_REG_LDMASK,
+ .num_reg_defaults_raw = RTMV20_MAX_REGS,
.writeable_reg = rtmv20_is_accessible_reg,
.readable_reg = rtmv20_is_accessible_reg,
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index bbadf72b94e8..1f02f60ad136 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -173,7 +173,7 @@ scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
sreg->desc.uV_step =
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
sreg->desc.linear_min_sel = 0;
- sreg->desc.n_voltages = delta_uV / sreg->desc.uV_step;
+ sreg->desc.n_voltages = (delta_uV / sreg->desc.uV_step) + 1;
sreg->desc.ops = &scmi_reg_linear_ops;
}
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index cf10fdb72e32..2d7597c76e4a 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -32,7 +32,8 @@ struct stpmic1_regulator_cfg {
static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode);
static unsigned int stpmic1_get_mode(struct regulator_dev *rdev);
-static int stpmic1_set_icc(struct regulator_dev *rdev);
+static int stpmic1_set_icc(struct regulator_dev *rdev, int lim, int severity,
+ bool enable);
static unsigned int stpmic1_map_mode(unsigned int mode);
enum {
@@ -491,11 +492,26 @@ static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode)
STPMIC1_BUCK_MODE_LP, value);
}
-static int stpmic1_set_icc(struct regulator_dev *rdev)
+static int stpmic1_set_icc(struct regulator_dev *rdev, int lim, int severity,
+ bool enable)
{
struct stpmic1_regulator_cfg *cfg = rdev_get_drvdata(rdev);
struct regmap *regmap = rdev_get_regmap(rdev);
+ /*
+ * The code seems like one bit in a register controls whether OCP is
+ * enabled. So we might be able to turn it off here is if that
+ * was requested. I won't support this because I don't have the HW.
+ * Feel free to try and implement if you have the HW and need kernel
+ * to disable this.
+ *
+ * Also, I don't know if limit can be configured or if we support
+ * error/warning instead of protect. So I just keep existing logic
+ * and assume no.
+ */
+ if (lim || severity != REGULATOR_SEVERITY_PROT || !enable)
+ return -EINVAL;
+
/* enable switch off in case of over current */
return regmap_update_bits(regmap, cfg->icc_reg, cfg->icc_mask,
cfg->icc_mask);
diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
new file mode 100644
index 000000000000..e021ae08cbaa
--- /dev/null
+++ b/drivers/regulator/sy7636a-regulator.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Functions to access SY3686A power management chip voltages
+//
+// Copyright (C) 2019 reMarkable AS - http://www.remarkable.com/
+//
+// Authors: Lars Ivar Miljeteig <lars.ivar.miljeteig@remarkable.com>
+// Alistair Francis <alistair@alistair23.me>
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/sy7636a.h>
+
+#define SY7636A_POLL_ENABLED_TIME 500
+
+static int sy7636a_get_vcom_voltage_op(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int val, val_h;
+
+ ret = regmap_read(rdev->regmap, SY7636A_REG_VCOM_ADJUST_CTRL_L, &val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(rdev->regmap, SY7636A_REG_VCOM_ADJUST_CTRL_H, &val_h);
+ if (ret)
+ return ret;
+
+ val |= (val_h << VCOM_ADJUST_CTRL_SHIFT);
+
+ return (val & VCOM_ADJUST_CTRL_MASK) * VCOM_ADJUST_CTRL_SCAL;
+}
+
+static int sy7636a_get_status(struct regulator_dev *rdev)
+{
+ struct sy7636a *sy7636a = rdev_get_drvdata(rdev);
+ int ret = 0;
+
+ ret = gpiod_get_value_cansleep(sy7636a->pgood_gpio);
+ if (ret < 0)
+ dev_err(&rdev->dev, "Failed to read pgood gpio: %d\n", ret);
+
+ return ret;
+}
+
+static const struct regulator_ops sy7636a_vcom_volt_ops = {
+ .get_voltage = sy7636a_get_vcom_voltage_op,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = sy7636a_get_status,
+};
+
+static const struct regulator_desc desc = {
+ .name = "vcom",
+ .id = 0,
+ .ops = &sy7636a_vcom_volt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = SY7636A_REG_OPERATION_MODE_CRL,
+ .enable_mask = SY7636A_OPERATION_MODE_CRL_ONOFF,
+ .poll_enabled_time = SY7636A_POLL_ENABLED_TIME,
+ .regulators_node = of_match_ptr("regulators"),
+ .of_match = of_match_ptr("vcom"),
+};
+
+static int sy7636a_regulator_probe(struct platform_device *pdev)
+{
+ struct sy7636a *sy7636a = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct gpio_desc *gdp;
+ int ret;
+
+ if (!sy7636a)
+ return -EPROBE_DEFER;
+
+ platform_set_drvdata(pdev, sy7636a);
+
+ gdp = devm_gpiod_get(sy7636a->dev, "epd-pwr-good", GPIOD_IN);
+ if (IS_ERR(gdp)) {
+ dev_err(sy7636a->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ return PTR_ERR(gdp);
+ }
+
+ sy7636a->pgood_gpio = gdp;
+
+ ret = regmap_write(sy7636a->regmap, SY7636A_REG_POWER_ON_DELAY_TIME, 0x0);
+ if (ret) {
+ dev_err(sy7636a->dev, "Failed to initialize regulator: %d\n", ret);
+ return ret;
+ }
+
+ config.dev = &pdev->dev;
+ config.dev->of_node = sy7636a->dev->of_node;
+ config.driver_data = sy7636a;
+ config.regmap = sy7636a->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(sy7636a->dev, "Failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id sy7636a_regulator_id_table[] = {
+ { "sy7636a-regulator", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sy7636a_regulator_id_table);
+
+static struct platform_driver sy7636a_regulator_driver = {
+ .driver = {
+ .name = "sy7636a-regulator",
+ },
+ .probe = sy7636a_regulator_probe,
+ .id_table = sy7636a_regulator_id_table,
+};
+module_platform_driver(sy7636a_regulator_driver);
+
+MODULE_AUTHOR("Lars Ivar Miljeteig <lars.ivar.miljeteig@remarkable.com>");
+MODULE_DESCRIPTION("SY7636A voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index 2e02e26b516c..e75b0973e325 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -201,6 +201,7 @@ static const struct of_device_id uniphier_regulator_match[] = {
},
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, uniphier_regulator_match);
static struct platform_driver uniphier_regulator_driver = {
.probe = uniphier_regulator_probe,
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 8e3b5a67cfd8..8ca28664776e 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -29,15 +29,15 @@ struct userspace_consumer_data {
struct regulator_bulk_data *supplies;
};
-static ssize_t reg_show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static ssize_t reg_show_state(struct device *dev,
+static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
@@ -48,8 +48,8 @@ static ssize_t reg_show_state(struct device *dev,
return sprintf(buf, "disabled\n");
}
-static ssize_t reg_set_state(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
bool enabled;
@@ -87,8 +87,8 @@ static ssize_t reg_set_state(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(name, 0444, reg_show_name, NULL);
-static DEVICE_ATTR(state, 0644, reg_show_state, reg_set_state);
+static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RW(state);
static struct attribute *attributes[] = {
&dev_attr_name.attr,
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index e5daee4f9373..c1404d3dae2c 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -459,8 +459,10 @@ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
if (ids)
for (i = 0; ids[i].name[0]; i++)
- if (rpmsg_id_match(rpdev, &ids[i]))
+ if (rpmsg_id_match(rpdev, &ids[i])) {
+ rpdev->id.driver_data = ids[i].driver_data;
return 1;
+ }
return of_driver_match_device(dev, drv);
}
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 6655035e5164..80dc479a6ff0 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -75,7 +75,7 @@ static int __mtk_rtc_read_time(struct mt6397_rtc *rtc,
tm->tm_min = data[RTC_OFFSET_MIN];
tm->tm_hour = data[RTC_OFFSET_HOUR];
tm->tm_mday = data[RTC_OFFSET_DOM];
- tm->tm_mon = data[RTC_OFFSET_MTH];
+ tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_TC_MTH_MASK;
tm->tm_year = data[RTC_OFFSET_YEAR];
ret = regmap_read(rtc->regmap, rtc->addr_base + RTC_TC_SEC, sec);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 1b9e1442e6a5..fd42a5fffaed 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
}
+static int dasd_diag_pe_handler(struct dasd_device *device,
+ __u8 tbvpm, __u8 fcsecpm)
+{
+ return dasd_generic_verify_path(device, tbvpm);
+}
+
static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
.check_device = dasd_diag_check_device,
- .verify_path = dasd_generic_verify_path,
+ .pe_handler = dasd_diag_pe_handler,
.fill_geometry = dasd_diag_fill_geometry,
.setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag,
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 4789410885e4..3ad319aee51e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
+static int dasd_fba_pe_handler(struct dasd_device *device,
+ __u8 tbvpm, __u8 fcsecpm)
+{
+ return dasd_generic_verify_path(device, tbvpm);
+}
+
static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
- .verify_path = dasd_generic_verify_path,
+ .pe_handler = dasd_fba_pe_handler,
.setup_blk_queue = dasd_fba_setup_blk_queue,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 8d6587ec73e2..493e8469893c 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -109,9 +109,9 @@ int dasd_scan_partitions(struct dasd_block *block)
return -ENODEV;
}
- mutex_lock(&bdev->bd_mutex);
- rc = bdev_disk_changed(bdev, false);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_lock(&block->gdp->open_mutex);
+ rc = bdev_disk_changed(block->gdp, false);
+ mutex_unlock(&block->gdp->open_mutex);
if (rc)
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, rc %d", rc);
@@ -145,9 +145,9 @@ void dasd_destroy_partitions(struct dasd_block *block)
bdev = block->bdev;
block->bdev = NULL;
- mutex_lock(&bdev->bd_mutex);
- bdev_disk_changed(bdev, true);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
+ bdev_disk_changed(bdev->bd_disk, true);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
blkdev_put(bdev, FMODE_READ);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 1c59b0e86a9f..155428bfed8a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -297,7 +297,6 @@ struct dasd_discipline {
* e.g. verify that new path is compatible with the current
* configuration.
*/
- int (*verify_path)(struct dasd_device *, __u8);
int (*pe_handler)(struct dasd_device *, __u8, __u8);
/*
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index da33cb4cba28..7faa56399999 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -90,7 +90,6 @@ struct dcssblk_dev_info {
int segment_type;
unsigned char save_pending;
unsigned char is_shared;
- struct request_queue *dcssblk_queue;
int num_of_segments;
struct list_head seg_list;
struct dax_device *dax_dev;
@@ -429,9 +428,7 @@ removeseg:
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
- blk_cleanup_queue(dev_info->dcssblk_queue);
- dev_info->gd->queue = NULL;
- put_disk(dev_info->gd);
+ blk_cleanup_disk(dev_info->gd);
up_write(&dcssblk_devices_sem);
if (device_remove_file_self(dev, attr)) {
@@ -644,18 +641,17 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->dev.release = dcssblk_release_segment;
dev_info->dev.groups = dcssblk_dev_attr_groups;
INIT_LIST_HEAD(&dev_info->lh);
- dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
+ dev_info->gd = blk_alloc_disk(NUMA_NO_NODE);
if (dev_info->gd == NULL) {
rc = -ENOMEM;
goto seg_list_del;
}
dev_info->gd->major = dcssblk_major;
+ dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK;
dev_info->gd->fops = &dcssblk_devops;
- dev_info->dcssblk_queue = blk_alloc_queue(NUMA_NO_NODE);
- dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
- blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
- blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
+ blk_queue_logical_block_size(dev_info->gd->queue, 4096);
+ blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
@@ -719,9 +715,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
put_dev:
list_del(&dev_info->lh);
- blk_cleanup_queue(dev_info->dcssblk_queue);
- dev_info->gd->queue = NULL;
- put_disk(dev_info->gd);
+ blk_cleanup_disk(dev_info->gd);
list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
segment_unload(seg_info->segment_name);
}
@@ -731,9 +725,7 @@ put_dev:
dev_list_del:
list_del(&dev_info->lh);
release_gd:
- blk_cleanup_queue(dev_info->dcssblk_queue);
- dev_info->gd->queue = NULL;
- put_disk(dev_info->gd);
+ blk_cleanup_disk(dev_info->gd);
up_write(&dcssblk_devices_sem);
seg_list_del:
if (dev_info == NULL)
@@ -801,9 +793,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
- blk_cleanup_queue(dev_info->dcssblk_queue);
- dev_info->gd->queue = NULL;
- put_disk(dev_info->gd);
+ blk_cleanup_disk(dev_info->gd);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index a4f6f2e62b1d..88cba6212ee2 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -462,12 +462,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
if (ret)
goto out;
- rq = blk_mq_init_queue(&bdev->tag_set);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
+ bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev);
+ if (IS_ERR(bdev->gendisk)) {
+ ret = PTR_ERR(bdev->gendisk);
goto out_tag;
}
- bdev->rq = rq;
+ rq = bdev->rq = bdev->gendisk->queue;
nr_max_blk = min(scmdev->nr_max_block,
(unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
@@ -477,17 +477,11 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
- bdev->gendisk = alloc_disk(SCM_NR_PARTS);
- if (!bdev->gendisk) {
- ret = -ENOMEM;
- goto out_queue;
- }
- rq->queuedata = scmdev;
bdev->gendisk->private_data = scmdev;
bdev->gendisk->fops = &scm_blk_devops;
- bdev->gendisk->queue = rq;
bdev->gendisk->major = scm_major;
bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
+ bdev->gendisk->minors = SCM_NR_PARTS;
len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
if (devindex > 25) {
@@ -504,8 +498,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
return 0;
-out_queue:
- blk_cleanup_queue(rq);
out_tag:
blk_mq_free_tag_set(&bdev->tag_set);
out:
@@ -516,9 +508,8 @@ out:
void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
{
del_gendisk(bdev->gendisk);
- blk_cleanup_queue(bdev->gendisk->queue);
+ blk_cleanup_disk(bdev->gendisk);
blk_mq_free_tag_set(&bdev->tag_set);
- put_disk(bdev->gendisk);
}
void scm_blk_set_available(struct scm_blk_dev *bdev)
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index d1ed39162943..91ef710edfd2 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -56,7 +56,6 @@ typedef struct {
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
-static struct request_queue *xpram_queues[XPRAM_MAX_DEVS];
static unsigned int xpram_pages;
static int xpram_devs;
@@ -341,17 +340,13 @@ static int __init xpram_setup_blkdev(void)
int i, rc = -ENOMEM;
for (i = 0; i < xpram_devs; i++) {
- xpram_disks[i] = alloc_disk(1);
+ xpram_disks[i] = blk_alloc_disk(NUMA_NO_NODE);
if (!xpram_disks[i])
goto out;
- xpram_queues[i] = blk_alloc_queue(NUMA_NO_NODE);
- if (!xpram_queues[i]) {
- put_disk(xpram_disks[i]);
- goto out;
- }
- blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
- blk_queue_logical_block_size(xpram_queues[i], 4096);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_disks[i]->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM,
+ xpram_disks[i]->queue);
+ blk_queue_logical_block_size(xpram_disks[i]->queue, 4096);
}
/*
@@ -373,9 +368,9 @@ static int __init xpram_setup_blkdev(void)
offset += xpram_devices[i].size;
disk->major = XPRAM_MAJOR;
disk->first_minor = i;
+ disk->minors = 1;
disk->fops = &xpram_devops;
disk->private_data = &xpram_devices[i];
- disk->queue = xpram_queues[i];
sprintf(disk->disk_name, "slram%d", i);
set_capacity(disk, xpram_sizes[i] << 1);
add_disk(disk);
@@ -383,10 +378,8 @@ static int __init xpram_setup_blkdev(void)
return 0;
out:
- while (i--) {
- blk_cleanup_queue(xpram_queues[i]);
- put_disk(xpram_disks[i]);
- }
+ while (i--)
+ blk_cleanup_disk(xpram_disks[i]);
return rc;
}
@@ -434,8 +427,7 @@ static void __exit xpram_exit(void)
int i;
for (i = 0; i < xpram_devs; i++) {
del_gendisk(xpram_disks[i]);
- blk_cleanup_queue(xpram_queues[i]);
- put_disk(xpram_disks[i]);
+ blk_cleanup_disk(xpram_disks[i]);
}
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
platform_device_unregister(xpram_pdev);
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index b9febc581b1f..8d1b2771c1aa 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
int ret;
+ /* this is an error in the caller */
+ if (cp->initialized)
+ return -EBUSY;
+
/*
* We only support prefetching the channel program. We assume all channel
* programs executed by supported guests likewise support prefetching.
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 8c625b530035..9b61e9b131ad 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
struct vfio_ccw_private *private;
struct irb *irb;
bool is_final;
+ bool cp_is_finished = false;
private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb;
@@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
- if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
+ if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
cp_free(&private->cp);
+ cp_is_finished = true;
+ }
}
mutex_lock(&private->io_mutex);
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
mutex_unlock(&private->io_mutex);
- if (private->mdev && is_final)
+ /*
+ * Reset to IDLE only if processing of a channel program
+ * has finished. Do not overwrite a possible processing
+ * state if the final interrupt was for HSCH or CSCH.
+ */
+ if (private->mdev && cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 23e61aa638e4..e435a9cd92da 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
}
err_out:
+ private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
io_region->ret_code, errstr);
}
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 491a64c61fff..c57d2a7f0919 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -279,8 +279,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
}
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
- if (region->ret_code != 0)
- private->state = VFIO_CCW_STATE_IDLE;
ret = (region->ret_code != 0) ? region->ret_code : count;
out_unlock:
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ecefc25eff0c..337353c9655e 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
+ bool found = false;
status = ap_dqap(aq->qid, &aq->reply->psmid,
aq->reply->msg, aq->reply->len);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- aq->queue_count--;
+ aq->queue_count = max_t(int, 0, aq->queue_count - 1);
if (aq->queue_count > 0)
mod_timer(&aq->timeout,
jiffies + aq->request_timeout);
@@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
list_del_init(&ap_msg->list);
aq->pendingq_count--;
ap_msg->receive(aq, ap_msg, aq->reply);
+ found = true;
break;
}
+ if (!found) {
+ AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
+ __func__, aq->reply->psmid,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ }
fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
@@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- aq->queue_count++;
+ aq->queue_count = max_t(int, 1, aq->queue_count + 1);
if (aq->queue_count == 1)
mod_timer(&aq->timeout, jiffies + aq->request_timeout);
list_move_tail(&ap_msg->list, &aq->pendingq);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index b2c7e10dfdcd..122c85c22469 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -366,16 +366,6 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mutex_lock(&matrix_dev->lock);
-
- /*
- * If the KVM pointer is in flux or the guest is running, disallow
- * un-assignment of control domain.
- */
- if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
- mutex_unlock(&matrix_dev->lock);
- return -EBUSY;
- }
-
vfio_ap_mdev_reset_queues(mdev);
list_del(&matrix_mdev->node);
kfree(matrix_mdev);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 260860cf3aa1..5a0c2f07a3a2 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -118,24 +118,6 @@ static struct device_driver netiucv_driver = {
.bus = &iucv_bus,
};
-static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
-static void netiucv_callback_connack(struct iucv_path *, u8 *);
-static void netiucv_callback_connrej(struct iucv_path *, u8 *);
-static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
-static void netiucv_callback_connres(struct iucv_path *, u8 *);
-static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
-static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
-
-static struct iucv_handler netiucv_handler = {
- .path_pending = netiucv_callback_connreq,
- .path_complete = netiucv_callback_connack,
- .path_severed = netiucv_callback_connrej,
- .path_quiesced = netiucv_callback_connsusp,
- .path_resumed = netiucv_callback_connres,
- .message_pending = netiucv_callback_rx,
- .message_complete = netiucv_callback_txdone
-};
-
/**
* Per connection profiling data
*/
@@ -774,6 +756,16 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
}
}
+static struct iucv_handler netiucv_handler = {
+ .path_pending = netiucv_callback_connreq,
+ .path_complete = netiucv_callback_connack,
+ .path_severed = netiucv_callback_connrej,
+ .path_quiesced = netiucv_callback_connsusp,
+ .path_resumed = netiucv_callback_connres,
+ .message_pending = netiucv_callback_rx,
+ .message_complete = netiucv_callback_txdone,
+};
+
static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
{
struct iucv_event *ev = arg;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index fd9b869d278e..f4d554ea0c93 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -417,13 +417,17 @@ enum qeth_qdio_out_buffer_state {
QETH_QDIO_BUF_EMPTY,
/* Filled by driver; owned by hardware in order to be sent. */
QETH_QDIO_BUF_PRIMED,
- /* Discovered by the TX completion code: */
- QETH_QDIO_BUF_PENDING,
- /* Finished by the TX completion code: */
- QETH_QDIO_BUF_NEED_QAOB,
- /* Received QAOB notification on CQ: */
- QETH_QDIO_BUF_QAOB_OK,
- QETH_QDIO_BUF_QAOB_ERROR,
+};
+
+enum qeth_qaob_state {
+ QETH_QAOB_ISSUED,
+ QETH_QAOB_PENDING,
+ QETH_QAOB_DONE,
+};
+
+struct qeth_qaob_priv1 {
+ unsigned int state;
+ u8 queue_no;
};
struct qeth_qdio_out_buffer {
@@ -433,9 +437,8 @@ struct qeth_qdio_out_buffer {
unsigned int frames;
unsigned int bytes;
struct sk_buff_head skb_list;
- int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ DECLARE_BITMAP(from_kmem_cache, QDIO_MAX_ELEMENTS_PER_BUFFER);
- struct qeth_qdio_out_q *q;
struct list_head list_entry;
struct qaob *aob;
};
@@ -483,6 +486,7 @@ struct qeth_out_q_stats {
u64 stopped;
u64 doorbell;
u64 coal_frames;
+ u64 completion_irq;
u64 completion_yield;
u64 completion_timer;
@@ -526,6 +530,7 @@ struct qeth_qdio_out_q {
unsigned int coalesce_usecs;
unsigned int max_coalesced_frames;
+ unsigned int rescan_usecs;
};
#define qeth_for_each_output_queue(card, q, i) \
@@ -612,7 +617,6 @@ struct qeth_channel {
struct ccw_device *ccwdev;
struct qeth_cmd_buffer *active_cmd;
enum qeth_channel_states state;
- atomic_t irq_pending;
};
struct qeth_reply {
@@ -662,11 +666,6 @@ static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob)
return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8));
}
-static inline bool qeth_trylock_channel(struct qeth_channel *channel)
-{
- return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0;
-}
-
/**
* OSA card related definitions
*/
@@ -886,13 +885,24 @@ static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
return card->state == CARD_STATE_SOFTSETUP;
}
+static inline bool qeth_use_tx_irqs(struct qeth_card *card)
+{
+ return !IS_IQD(card);
+}
+
static inline void qeth_unlock_channel(struct qeth_card *card,
struct qeth_channel *channel)
{
- atomic_set(&channel->irq_pending, 0);
+ xchg(&channel->active_cmd, NULL);
wake_up(&card->wait_q);
}
+static inline bool qeth_trylock_channel(struct qeth_channel *channel,
+ struct qeth_cmd_buffer *cmd)
+{
+ return cmpxchg(&channel->active_cmd, NULL, cmd) == NULL;
+}
+
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a1f08e9aa064..62f88ccbd03f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -70,9 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
unsigned int data_length);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
-static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf,
- enum iucv_tx_notify notification);
static void qeth_close_dev_handler(struct work_struct *work)
{
@@ -434,65 +431,6 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
-static void qeth_qdio_handle_aob(struct qeth_card *card,
- unsigned long phys_aob_addr)
-{
- enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
- struct qaob *aob;
- struct qeth_qdio_out_buffer *buffer;
- enum iucv_tx_notify notification;
- struct qeth_qdio_out_q *queue;
- unsigned int i;
-
- aob = (struct qaob *) phys_to_virt(phys_aob_addr);
- QETH_CARD_TEXT(card, 5, "haob");
- QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
- buffer = (struct qeth_qdio_out_buffer *) aob->user1;
- QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
-
- if (aob->aorc) {
- QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
- new_state = QETH_QDIO_BUF_QAOB_ERROR;
- }
-
- switch (atomic_xchg(&buffer->state, new_state)) {
- case QETH_QDIO_BUF_PRIMED:
- /* Faster than TX completion code, let it handle the async
- * completion for us. It will also recycle the QAOB.
- */
- break;
- case QETH_QDIO_BUF_PENDING:
- /* TX completion code is active and will handle the async
- * completion for us. It will also recycle the QAOB.
- */
- break;
- case QETH_QDIO_BUF_NEED_QAOB:
- /* TX completion code is already finished. */
- notification = qeth_compute_cq_notification(aob->aorc, 1);
- qeth_notify_skbs(buffer->q, buffer, notification);
-
- /* Free dangling allocations. The attached skbs are handled by
- * qeth_tx_complete_pending_bufs(), and so is the QAOB.
- */
- for (i = 0;
- i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
- i++) {
- void *data = phys_to_virt(aob->sba[i]);
-
- if (data && buffer->is_header[i])
- kmem_cache_free(qeth_core_header_cache, data);
- buffer->is_header[i] = 0;
- }
-
- queue = buffer->q;
- atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
- napi_schedule(&queue->napi);
- break;
- default:
- WARN_ON_ONCE(1);
- }
-}
-
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
void *data)
{
@@ -1268,7 +1206,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
- channel->active_cmd = NULL;
qeth_unlock_channel(card, channel);
rc = qeth_check_irb_error(card, cdev, irb);
@@ -1353,10 +1290,10 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
}
}
-static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
+static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf, bool error,
int budget)
{
- struct qeth_qdio_out_q *queue = buf->q;
struct sk_buff *skb;
/* Empty buffer? */
@@ -1400,17 +1337,18 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
int i;
/* is PCI flag set on buffer? */
- if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
+ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
atomic_dec(&queue->set_pci_flags_count);
+ QETH_TXQ_STAT_INC(queue, completion_irq);
+ }
- qeth_tx_complete_buf(buf, error, budget);
+ qeth_tx_complete_buf(queue, buf, error, budget);
for (i = 0; i < queue->max_elements; ++i) {
void *data = phys_to_virt(buf->buffer->element[i].addr);
- if (data && buf->is_header[i])
+ if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache, data);
- buf->is_header[i] = 0;
}
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
@@ -1434,14 +1372,30 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
struct qeth_qdio_out_buffer *buf, *tmp;
list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
- if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
+ struct qeth_qaob_priv1 *priv;
+ struct qaob *aob = buf->aob;
+ enum iucv_tx_notify notify;
+ unsigned int i;
+
+ priv = (struct qeth_qaob_priv1 *)&aob->user1;
+ if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
QETH_CARD_TEXT(card, 5, "fp");
QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
- if (drain)
- qeth_notify_skbs(queue, buf,
- TX_NOTIFY_GENERALERROR);
- qeth_tx_complete_buf(buf, drain, budget);
+ notify = drain ? TX_NOTIFY_GENERALERROR :
+ qeth_compute_cq_notification(aob->aorc, 1);
+ qeth_notify_skbs(queue, buf, notify);
+ qeth_tx_complete_buf(queue, buf, drain, budget);
+
+ for (i = 0;
+ i < aob->sb_count && i < queue->max_elements;
+ i++) {
+ void *data = phys_to_virt(aob->sba[i]);
+
+ if (test_bit(i, buf->from_kmem_cache) && data)
+ kmem_cache_free(qeth_core_header_cache,
+ data);
+ }
list_del(&buf->list_entry);
qeth_free_out_buf(buf);
@@ -1713,11 +1667,10 @@ static int qeth_stop_channel(struct qeth_channel *channel)
rc = ccw_device_set_offline(cdev);
spin_lock_irq(get_ccwdev_lock(cdev));
- if (channel->active_cmd) {
+ if (channel->active_cmd)
dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
channel->active_cmd);
- channel->active_cmd = NULL;
- }
+
cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
@@ -1730,7 +1683,7 @@ static int qeth_start_channel(struct qeth_channel *channel)
int rc;
channel->state = CH_STATE_DOWN;
- atomic_set(&channel->irq_pending, 0);
+ xchg(&channel->active_cmd, NULL);
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = qeth_irq;
@@ -2037,7 +1990,7 @@ static int qeth_send_control_data(struct qeth_card *card,
reply->param = reply_param;
timeout = wait_event_interruptible_timeout(card->wait_q,
- qeth_trylock_channel(channel),
+ qeth_trylock_channel(channel, iob),
timeout);
if (timeout <= 0) {
qeth_put_cmd(iob);
@@ -2057,8 +2010,6 @@ static int qeth_send_control_data(struct qeth_card *card,
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
(addr_t) iob, 0, 0, timeout);
- if (!rc)
- channel->active_cmd = iob;
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
@@ -2578,7 +2529,6 @@ static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
newbuf->buffer = q->qdio_bufs[bidx];
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
- newbuf->q = q;
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
q->bufs[bidx] = newbuf;
return 0;
@@ -2663,8 +2613,15 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
INIT_LIST_HEAD(&queue->pending_bufs);
spin_lock_init(&queue->lock);
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
- queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
- queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+ if (IS_IQD(card)) {
+ queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
+ queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+ queue->rescan_usecs = QETH_TX_TIMER_USECS;
+ } else {
+ queue->coalesce_usecs = USEC_PER_SEC;
+ queue->max_coalesced_frames = 0;
+ queue->rescan_usecs = 10 * USEC_PER_SEC;
+ }
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
}
@@ -3601,8 +3558,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
struct qeth_qdio_out_buffer *buf = queue->bufs[index];
- unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
struct qeth_card *card = queue->card;
+ unsigned int frames, usecs;
struct qaob *aob = NULL;
int rc;
int i;
@@ -3629,8 +3586,12 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (!buf->aob)
buf->aob = qdio_allocate_aob();
if (buf->aob) {
+ struct qeth_qaob_priv1 *priv;
+
aob = buf->aob;
- aob->user1 = (u64) buf;
+ priv = (struct qeth_qaob_priv1 *)&aob->user1;
+ priv->state = QETH_QAOB_ISSUED;
+ priv->queue_no = queue->queue_no;
}
}
} else {
@@ -3658,14 +3619,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
}
-
- if (atomic_read(&queue->set_pci_flags_count))
- qdio_flags |= QDIO_FLAG_PCI_OUT;
}
QETH_TXQ_STAT_INC(queue, doorbell);
- rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count,
- aob);
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
+ index, count, aob);
switch (rc) {
case 0:
@@ -3673,17 +3631,20 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* ignore temporary SIGA errors without busy condition */
/* Fake the TX completion interrupt: */
- if (IS_IQD(card)) {
- unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
- unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+ frames = READ_ONCE(queue->max_coalesced_frames);
+ usecs = READ_ONCE(queue->coalesce_usecs);
- if (frames && queue->coalesced_frames >= frames) {
- napi_schedule(&queue->napi);
- queue->coalesced_frames = 0;
- QETH_TXQ_STAT_INC(queue, coal_frames);
- } else if (usecs) {
- qeth_tx_arm_timer(queue, usecs);
- }
+ if (frames && queue->coalesced_frames >= frames) {
+ napi_schedule(&queue->napi);
+ queue->coalesced_frames = 0;
+ QETH_TXQ_STAT_INC(queue, coal_frames);
+ } else if (qeth_use_tx_irqs(card) &&
+ atomic_read(&queue->used_buffers) >= 32) {
+ /* Old behaviour carried over from the qdio layer: */
+ napi_schedule(&queue->napi);
+ QETH_TXQ_STAT_INC(queue, coal_frames);
+ } else if (usecs) {
+ qeth_tx_arm_timer(queue, usecs);
}
break;
@@ -3769,6 +3730,18 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
+static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
+{
+ struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
+ unsigned int queue_no = priv->queue_no;
+
+ BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
+
+ if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
+ queue_no < card->qdio.no_out_queues)
+ napi_schedule(&card->qdio.out_qs[queue_no]->napi);
+}
+
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
unsigned int queue, int first_element,
int count)
@@ -3795,7 +3768,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
buffer->element[e].addr) {
unsigned long phys_aob_addr = buffer->element[e].addr;
- qeth_qdio_handle_aob(card, phys_aob_addr);
+ qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
++e;
}
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
@@ -3831,36 +3804,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
- struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct net_device *dev = card->dev;
- struct netdev_queue *txq;
- int i;
QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_FATAL) {
QETH_CARD_TEXT(card, 2, "achkcond");
netif_tx_stop_all_queues(dev);
qeth_schedule_recovery(card);
- return;
}
-
- for (i = first_element; i < (first_element + count); ++i) {
- struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
-
- qeth_handle_send_error(card, buf, qdio_error);
- qeth_clear_output_buffer(queue, buf, qdio_error, 0);
- }
-
- atomic_sub(count, &queue->used_buffers);
- qeth_check_outbound_queue(queue);
-
- txq = netdev_get_tx_queue(dev, __queue);
- /* xmit may have observed the full-condition, but not yet stopped the
- * txq. In which case the code below won't trigger. So before returning,
- * xmit will re-check the txq's fill level and wake it up if needed.
- */
- if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
- netif_tx_wake_queue(txq);
}
/**
@@ -4101,7 +4052,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
/* HW header is allocated from cache: */
if ((void *)hdr != skb->data)
- buf->is_header[element] = 1;
+ __set_bit(element, buf->from_kmem_cache);
/* HW header was pushed and is contiguous with linear part: */
else if (length > 0 && !PAGE_ALIGNED(data) &&
(data == (char *)hdr + hd_len))
@@ -5256,7 +5207,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
- init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
@@ -5956,9 +5906,10 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
/* Fetch completed RX buffers: */
if (!card->rx.b_count) {
card->rx.qdio_err = 0;
- card->rx.b_count = qdio_get_next_buffers(
- card->data.ccwdev, 0, &card->rx.b_index,
- &card->rx.qdio_err);
+ card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
+ 0, true,
+ &card->rx.b_index,
+ &card->rx.qdio_err);
if (card->rx.b_count <= 0) {
card->rx.b_count = 0;
break;
@@ -6022,6 +5973,16 @@ int qeth_poll(struct napi_struct *napi, int budget)
work_done = qeth_rx_poll(card, budget);
+ if (qeth_use_tx_irqs(card)) {
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
+ qeth_for_each_output_queue(card, queue, i) {
+ if (!qeth_out_queue_is_empty(queue))
+ napi_schedule(&queue->napi);
+ }
+ }
+
if (card->options.cq == QETH_CQ_ENABLED)
qeth_cq_poll(card);
@@ -6055,6 +6016,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
struct qaob *aob = buffer->aob;
+ struct qeth_qaob_priv1 *priv;
+ enum iucv_tx_notify notify;
if (!aob) {
netdev_WARN_ONCE(card->dev,
@@ -6066,60 +6029,27 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
- switch (atomic_cmpxchg(&buffer->state,
- QETH_QDIO_BUF_PRIMED,
- QETH_QDIO_BUF_PENDING)) {
- case QETH_QDIO_BUF_PRIMED:
- /* We have initial ownership, no QAOB (yet): */
+ priv = (struct qeth_qaob_priv1 *)&aob->user1;
+ /* QAOB hasn't completed yet: */
+ if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
- /* Handle race with qeth_qdio_handle_aob(): */
- switch (atomic_xchg(&buffer->state,
- QETH_QDIO_BUF_NEED_QAOB)) {
- case QETH_QDIO_BUF_PENDING:
- /* No concurrent QAOB notification. */
-
- /* Prepare the queue slot for immediate re-use: */
- qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
- if (qeth_alloc_out_buf(queue, bidx,
- GFP_ATOMIC)) {
- QETH_CARD_TEXT(card, 2, "outofbuf");
- qeth_schedule_recovery(card);
- }
-
- list_add(&buffer->list_entry,
- &queue->pending_bufs);
- /* Skip clearing the buffer: */
- return;
- case QETH_QDIO_BUF_QAOB_OK:
- qeth_notify_skbs(queue, buffer,
- TX_NOTIFY_DELAYED_OK);
- error = false;
- break;
- case QETH_QDIO_BUF_QAOB_ERROR:
- qeth_notify_skbs(queue, buffer,
- TX_NOTIFY_DELAYED_GENERALERROR);
- error = true;
- break;
- default:
- WARN_ON_ONCE(1);
+ /* Prepare the queue slot for immediate re-use: */
+ qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
+ if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
+ QETH_CARD_TEXT(card, 2, "outofbuf");
+ qeth_schedule_recovery(card);
}
- break;
- case QETH_QDIO_BUF_QAOB_OK:
- /* qeth_qdio_handle_aob() already received a QAOB: */
- qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK);
- error = false;
- break;
- case QETH_QDIO_BUF_QAOB_ERROR:
- /* qeth_qdio_handle_aob() already received a QAOB: */
- qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR);
- error = true;
- break;
- default:
- WARN_ON_ONCE(1);
+ list_add(&buffer->list_entry, &queue->pending_bufs);
+ /* Skip clearing the buffer: */
+ return;
}
+ /* QAOB already completed: */
+ notify = qeth_compute_cq_notification(aob->aorc, 0);
+ qeth_notify_skbs(queue, buffer, notify);
+ error = !!aob->aorc;
memset(aob, 0, sizeof(*aob));
} else if (card->options.cq == QETH_CQ_ENABLED) {
qeth_notify_skbs(queue, buffer,
@@ -6138,7 +6068,10 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
unsigned int work_done = 0;
struct netdev_queue *txq;
- txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
+ if (IS_IQD(card))
+ txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
+ else
+ txq = netdev_get_tx_queue(dev, queue_no);
while (1) {
unsigned int start, error, i;
@@ -6165,8 +6098,9 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
&start, &error);
if (completed <= 0) {
/* Ensure we see TX completion for pending work: */
- if (napi_complete_done(napi, 0))
- qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
+ if (napi_complete_done(napi, 0) &&
+ !atomic_read(&queue->set_pci_flags_count))
+ qeth_tx_arm_timer(queue, queue->rescan_usecs);
return 0;
}
@@ -6179,12 +6113,19 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
bytes += buffer->bytes;
qeth_handle_send_error(card, buffer, error);
- qeth_iqd_tx_complete(queue, bidx, error, budget);
+ if (IS_IQD(card))
+ qeth_iqd_tx_complete(queue, bidx, error, budget);
+ else
+ qeth_clear_output_buffer(queue, buffer, error,
+ budget);
}
- netdev_tx_completed_queue(txq, packets, bytes);
atomic_sub(completed, &queue->used_buffers);
work_done += completed;
+ if (IS_IQD(card))
+ netdev_tx_completed_queue(txq, packets, bytes);
+ else
+ qeth_check_outbound_queue(queue);
/* xmit may have observed the full-condition, but not yet
* stopped the txq. In which case the code below won't trigger.
@@ -7228,6 +7169,8 @@ EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
int qeth_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
QETH_CARD_TEXT(card, 4, "qethopen");
@@ -7235,16 +7178,11 @@ int qeth_open(struct net_device *dev)
netif_tx_start_all_queues(dev);
local_bh_disable();
- if (IS_IQD(card)) {
- struct qeth_qdio_out_q *queue;
- unsigned int i;
-
- qeth_for_each_output_queue(card, queue, i) {
- netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
- QETH_NAPI_WEIGHT);
- napi_enable(&queue->napi);
- napi_schedule(&queue->napi);
- }
+ qeth_for_each_output_queue(card, queue, i) {
+ netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
+ QETH_NAPI_WEIGHT);
+ napi_enable(&queue->napi);
+ napi_schedule(&queue->napi);
}
napi_enable(&card->napi);
@@ -7259,6 +7197,8 @@ EXPORT_SYMBOL_GPL(qeth_open);
int qeth_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
QETH_CARD_TEXT(card, 4, "qethstop");
@@ -7266,24 +7206,17 @@ int qeth_stop(struct net_device *dev)
cancel_delayed_work_sync(&card->buffer_reclaim_work);
qdio_stop_irq(CARD_DDEV(card));
- if (IS_IQD(card)) {
- struct qeth_qdio_out_q *queue;
- unsigned int i;
-
- /* Quiesce the NAPI instances: */
- qeth_for_each_output_queue(card, queue, i)
- napi_disable(&queue->napi);
+ /* Quiesce the NAPI instances: */
+ qeth_for_each_output_queue(card, queue, i)
+ napi_disable(&queue->napi);
- /* Stop .ndo_start_xmit, might still access queue->napi. */
- netif_tx_disable(dev);
+ /* Stop .ndo_start_xmit, might still access queue->napi. */
+ netif_tx_disable(dev);
- qeth_for_each_output_queue(card, queue, i) {
- del_timer_sync(&queue->timer);
- /* Queues may get re-allocated, so remove the NAPIs. */
- netif_napi_del(&queue->napi);
- }
- } else {
- netif_tx_disable(dev);
+ qeth_for_each_output_queue(card, queue, i) {
+ del_timer_sync(&queue->timer);
+ /* Queues may get re-allocated, so remove the NAPIs. */
+ netif_napi_del(&queue->napi);
}
return 0;
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 3a51bbff0ffe..2c4cb300a8fc 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -41,6 +41,7 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("Queue stopped", stopped),
QETH_TXQ_STAT("Doorbell", doorbell),
QETH_TXQ_STAT("IRQ for frames", coal_frames),
+ QETH_TXQ_STAT("Completion IRQ", completion_irq),
QETH_TXQ_STAT("Completion yield", completion_yield),
QETH_TXQ_STAT("Completion timer", completion_timer),
};
@@ -79,10 +80,8 @@ static void qeth_add_stat_strings(u8 **data, const char *prefix,
{
unsigned int i;
- for (i = 0; i < size; i++) {
- snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name);
- *data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < size; i++)
+ ethtool_sprintf(data, "%s%s", prefix, stats[i].name);
}
static int qeth_get_sset_count(struct net_device *dev, int stringset)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ca44421a6d6e..2abf86c104d5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -805,8 +805,6 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!netif_device_present(dev))
return -ENODEV;
- if (!(priv->brport_hw_features))
- return -EOPNOTSUPP;
nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
if (nla_type(attr) == IFLA_PROTINFO) {
@@ -832,6 +830,16 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
return 0;
if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
return -EINVAL;
+ if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) {
+ NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
+ "Operation not supported by HW");
+ return -EOPNOTSUPP;
+ }
+ if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) {
+ NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
+ "Requires NET_SWITCHDEV");
+ return -EOPNOTSUPP;
+ }
enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 924d55a8acbf..65182ad9cdf8 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -58,7 +58,6 @@
#include "aicasm_symbol.h"
#include "aicasm_insformat.h"
-int yylineno;
char *yyfilename;
char stock_prefix[] = "aic_";
char *prefix = stock_prefix;
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 7bf7fd5953ac..ed3bdd43c297 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -108,7 +108,7 @@ struct macro_arg {
regex_t arg_regex;
char *replacement_text;
};
-STAILQ_HEAD(macro_arg_list, macro_arg) args;
+STAILQ_HEAD(macro_arg_list, macro_arg);
struct macro_info {
struct macro_arg_list args;
diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h
index a7515c3039ed..53343a6d8ae1 100644
--- a/drivers/scsi/aic7xxx/scsi_message.h
+++ b/drivers/scsi/aic7xxx/scsi_message.h
@@ -3,6 +3,17 @@
* $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $
*/
+/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
+#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
+#define MSG_RESTOREPOINTERS 0x03 /* O/O */
+#define MSG_DISCONNECT 0x04 /* O/O */
+#define MSG_MESSAGE_REJECT 0x07 /* M/M */
+#define MSG_NOOP 0x08 /* M/M */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
+#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
+
/* Identify message */ /* M/M */
#define MSG_IDENTIFYFLAG 0x80
#define MSG_IDENTIFY_DISCFLAG 0x40
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 1a0dc18d6915..ed300a279a38 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1220,6 +1220,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
was a result from the ABTS request rather than the CLEANUP
request */
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+ rc = FAILED;
goto done;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 499c770d405c..e95408314078 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -4811,14 +4811,14 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
{
int i;
- free_irq(pci_irq_vector(pdev, 1), hisi_hba);
- free_irq(pci_irq_vector(pdev, 2), hisi_hba);
- free_irq(pci_irq_vector(pdev, 11), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
- free_irq(pci_irq_vector(pdev, nr), cq);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
pci_free_irq_vectors(pdev);
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 697c09ef259b..cd52664920e1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -254,12 +254,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_enable_async_suspend(&shost->shost_dev);
+ get_device(&shost->shost_gendev);
error = device_add(&shost->shost_dev);
if (error)
goto out_del_gendev;
- get_device(&shost->shost_gendev);
-
if (shost->transportt->host_size) {
shost->shost_data = kzalloc(shost->transportt->host_size,
GFP_KERNEL);
@@ -278,33 +277,36 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (!shost->work_q) {
error = -EINVAL;
- goto out_free_shost_data;
+ goto out_del_dev;
}
}
error = scsi_sysfs_add_host(shost);
if (error)
- goto out_destroy_host;
+ goto out_del_dev;
scsi_proc_host_add(shost);
scsi_autopm_put_host(shost);
return error;
- out_destroy_host:
- if (shost->work_q)
- destroy_workqueue(shost->work_q);
- out_free_shost_data:
- kfree(shost->shost_data);
+ /*
+ * Any host allocation in this function will be freed in
+ * scsi_host_dev_release().
+ */
out_del_dev:
device_del(&shost->shost_dev);
out_del_gendev:
+ /*
+ * Host state is SHOST_RUNNING so we have to explicitly release
+ * ->shost_dev.
+ */
+ put_device(&shost->shost_dev);
device_del(&shost->shost_gendev);
out_disable_runtime_pm:
device_disable_async_suspend(&shost->shost_gendev);
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_mq_destroy_tags(shost);
fail:
return error;
}
@@ -345,7 +347,7 @@ static void scsi_host_dev_release(struct device *dev)
ida_simple_remove(&host_index_ida, shost->host_no);
- if (parent)
+ if (shost->shost_state != SHOST_CREATED)
put_device(parent);
kfree(shost);
}
@@ -388,8 +390,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
mutex_init(&shost->scan_mutex);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
- if (index < 0)
- goto fail_kfree;
+ if (index < 0) {
+ kfree(shost);
+ return NULL;
+ }
shost->host_no = index;
shost->dma_channel = 0xff;
@@ -481,7 +485,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
- goto fail_index_remove;
+ goto fail;
}
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -490,17 +494,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
if (!shost->tmf_work_q) {
shost_printk(KERN_WARNING, shost,
"failed to create tmf workq\n");
- goto fail_kthread;
+ goto fail;
}
scsi_proc_hostdir_add(shost->hostt);
return shost;
+ fail:
+ /*
+ * Host state is still SHOST_CREATED and that is enough to release
+ * ->shost_gendev. scsi_host_dev_release() will free
+ * dev_name(&shost->shost_dev).
+ */
+ put_device(&shost->shost_gendev);
- fail_kthread:
- kthread_stop(shost->ehandler);
- fail_index_remove:
- ida_simple_remove(&host_index_ida, shost->host_no);
- fail_kfree:
- kfree(shost);
return NULL;
}
EXPORT_SYMBOL(scsi_host_alloc);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 6540d48eb0e8..715c34904e3e 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -13,6 +13,7 @@
#include <linux/dmapool.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 41ac9477df7a..10b6c6daaacd 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/string.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 19cf418928fa..e3d03d744713 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -25,7 +25,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
static void sas_resume_port(struct asd_sas_phy *phy)
{
- struct domain_device *dev;
+ struct domain_device *dev, *n;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
@@ -44,7 +44,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
* 1/ presume every device came back
* 2/ force the next revalidation to check all expander phys
*/
- list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
int i, rc;
rc = sas_notify_lldd_dev_found(dev);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 573c8599d71c..fc3682f15f50 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -20589,10 +20589,8 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abtswqe = &abtsiocb->wqe;
memset(abtswqe, 0, sizeof(*abtswqe));
- if (lpfc_is_link_up(phba))
+ if (!lpfc_is_link_up(phba))
bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
- else
- bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
abtswqe->abort_cmd.rsrvd5 = 0;
abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 756231151882..b92570a7c309 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1827,22 +1827,20 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
"WWPN (0x%s) already exists.\n", buf);
- goto err1;
+ return rc;
}
if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
"because link is not up.\n");
- rc = -EIO;
- goto err1;
+ return -EIO;
}
vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
if (!vn_port) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
"for vport.\n");
- rc = -ENOMEM;
- goto err1;
+ return -ENOMEM;
}
fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
@@ -1866,7 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
if (rc) {
QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
"for lport stats.\n");
- goto err2;
+ goto err;
}
fc_set_wwnn(vn_port, vport->node_name);
@@ -1884,7 +1882,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
if (rc) {
QEDF_WARN(&base_qedf->dbg_ctx,
"Error adding Scsi_Host rc=0x%x.\n", rc);
- goto err2;
+ goto err;
}
/* Set default dev_loss_tmo based on module parameter */
@@ -1925,9 +1923,10 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
-err2:
+ return 0;
+
+err:
scsi_host_put(vn_port->host);
-err1:
return rc;
}
@@ -1968,8 +1967,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
fc_lport_free_stats(vn_port);
/* Release Scsi_Host */
- if (vn_port->host)
- scsi_host_put(vn_port->host);
+ scsi_host_put(vn_port->host);
out:
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b2008fb1dd38..12a6848ade43 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1563,10 +1563,12 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
return;
}
+ mutex_lock(&tgt->ha->optrom_mutex);
mutex_lock(&vha->vha_tgt.tgt_mutex);
tgt->tgt_stop = 0;
tgt->tgt_stopped = 1;
mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ mutex_unlock(&tgt->ha->optrom_mutex);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
tgt);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index d92cec12454c..d33355ab6e14 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -184,6 +184,7 @@ static struct {
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
{"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+ {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cb3c37d1e009..746a7def2825 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1387,6 +1387,22 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
}
}
+static bool sd_need_revalidate(struct block_device *bdev,
+ struct scsi_disk *sdkp)
+{
+ if (sdkp->device->removable || sdkp->write_prot) {
+ if (bdev_check_media_change(bdev))
+ return true;
+ }
+
+ /*
+ * Force a full rescan after ioctl(BLKRRPART). While the disk state has
+ * nothing to do with partitions, BLKRRPART is used to force a full
+ * revalidate after things like a format for historical reasons.
+ */
+ return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+}
+
/**
* sd_open - open a scsi disk device
* @bdev: Block device of the scsi disk to open
@@ -1400,7 +1416,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
* In the latter case @inode and @filp carry an abridged amount
* of information as noted above.
*
- * Locking: called with bdev->bd_mutex held.
+ * Locking: called with bdev->bd_disk->open_mutex held.
**/
static int sd_open(struct block_device *bdev, fmode_t mode)
{
@@ -1423,10 +1439,8 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
if (!scsi_block_when_processing_errors(sdev))
goto error_out;
- if (sdev->removable || sdkp->write_prot) {
- if (bdev_check_media_change(bdev))
- sd_revalidate_disk(bdev->bd_disk);
- }
+ if (sd_need_revalidate(bdev, sdkp))
+ sd_revalidate_disk(bdev->bd_disk);
/*
* If the drive is empty, just let the open fail.
@@ -1476,7 +1490,7 @@ error_out:
* Note: may block (uninterruptible) if error recovery is underway
* on this disk.
*
- * Locking: called with bdev->bd_mutex held.
+ * Locking: called with bdev->bd_disk->open_mutex held.
**/
static void sd_release(struct gendisk *disk, fmode_t mode)
{
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e4633b84c556..7815ed642d43 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -220,6 +220,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
return DISK_EVENT_EJECT_REQUEST;
else if (med->media_event_code == 2)
return DISK_EVENT_MEDIA_CHANGE;
+ else if (med->media_event_code == 3)
+ return DISK_EVENT_EJECT_REQUEST;
return 0;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index e6718a74e5da..403753929320 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -406,6 +406,14 @@ static void storvsc_on_channel_callback(void *context);
#define STORVSC_IDE_MAX_TARGETS 1
#define STORVSC_IDE_MAX_CHANNELS 1
+/*
+ * Upper bound on the size of a storvsc packet. vmscsi_size_delta is not
+ * included in the calculation because it is set after STORVSC_MAX_PKT_SIZE
+ * is used in storvsc_connect_to_vsp
+ */
+#define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\
+ sizeof(struct vstor_packet))
+
struct storvsc_cmd_request {
struct scsi_cmnd *cmd;
@@ -688,6 +696,23 @@ old_is_alloced:
spin_unlock_irqrestore(&stor_device->lock, flags);
}
+static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
+{
+ struct storvsc_cmd_request *request =
+ (struct storvsc_cmd_request *)(unsigned long)rqst_addr;
+
+ if (rqst_addr == VMBUS_RQST_INIT)
+ return VMBUS_RQST_INIT;
+ if (rqst_addr == VMBUS_RQST_RESET)
+ return VMBUS_RQST_RESET;
+
+ /*
+ * Cannot return an ID of 0, which is reserved for an unsolicited
+ * message from Hyper-V.
+ */
+ return (u64)blk_mq_unique_tag(request->cmd->request) + 1;
+}
+
static void handle_sc_creation(struct vmbus_channel *new_sc)
{
struct hv_device *device = new_sc->primary_channel->device_obj;
@@ -701,12 +726,9 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
return;
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+ new_sc->max_pkt_size = STORVSC_MAX_PKT_SIZE;
- /*
- * The size of vmbus_requestor is an upper bound on the number of requests
- * that can be in-progress at any one time across all channels.
- */
- new_sc->rqstor_size = scsi_driver.can_queue;
+ new_sc->next_request_id_callback = storvsc_next_request_id;
ret = vmbus_open(new_sc,
storvsc_ringbuffer_size,
@@ -773,7 +795,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
ret = vmbus_sendpacket(device->channel, vstor_packet,
(sizeof(struct vstor_packet) -
stor_device->vmscsi_size_delta),
- (unsigned long)request,
+ VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -842,7 +864,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
ret = vmbus_sendpacket(device->channel, vstor_packet,
(sizeof(struct vstor_packet) -
stor_device->vmscsi_size_delta),
- (unsigned long)request,
+ VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
@@ -1244,6 +1266,7 @@ static void storvsc_on_channel_callback(void *context)
const struct vmpacket_descriptor *desc;
struct hv_device *device;
struct storvsc_device *stor_device;
+ struct Scsi_Host *shost;
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1254,20 +1277,12 @@ static void storvsc_on_channel_callback(void *context)
if (!stor_device)
return;
- foreach_vmbus_pkt(desc, channel) {
- void *packet = hv_pkt_data(desc);
- struct storvsc_cmd_request *request;
- u64 cmd_rqst;
-
- cmd_rqst = vmbus_request_addr(&channel->requestor,
- desc->trans_id);
- if (cmd_rqst == VMBUS_RQST_ERROR) {
- dev_err(&device->device,
- "Incorrect transaction id\n");
- continue;
- }
+ shost = stor_device->host;
- request = (struct storvsc_cmd_request *)(unsigned long)cmd_rqst;
+ foreach_vmbus_pkt(desc, channel) {
+ struct vstor_packet *packet = hv_pkt_data(desc);
+ struct storvsc_cmd_request *request = NULL;
+ u64 rqst_id = desc->trans_id;
if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
stor_device->vmscsi_size_delta) {
@@ -1275,14 +1290,44 @@ static void storvsc_on_channel_callback(void *context)
continue;
}
- if (request == &stor_device->init_request ||
- request == &stor_device->reset_request) {
- memcpy(&request->vstor_packet, packet,
- (sizeof(struct vstor_packet) - stor_device->vmscsi_size_delta));
- complete(&request->wait_event);
+ if (rqst_id == VMBUS_RQST_INIT) {
+ request = &stor_device->init_request;
+ } else if (rqst_id == VMBUS_RQST_RESET) {
+ request = &stor_device->reset_request;
} else {
+ /* Hyper-V can send an unsolicited message with ID of 0 */
+ if (rqst_id == 0) {
+ /*
+ * storvsc_on_receive() looks at the vstor_packet in the message
+ * from the ring buffer. If the operation in the vstor_packet is
+ * COMPLETE_IO, then we call storvsc_on_io_completion(), and
+ * dereference the guest memory address. Make sure we don't call
+ * storvsc_on_io_completion() with a guest memory address that is
+ * zero if Hyper-V were to construct and send such a bogus packet.
+ */
+ if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
+ dev_err(&device->device, "Invalid packet with ID of 0\n");
+ continue;
+ }
+ } else {
+ struct scsi_cmnd *scmnd;
+
+ /* Transaction 'rqst_id' corresponds to tag 'rqst_id - 1' */
+ scmnd = scsi_host_find_tag(shost, rqst_id - 1);
+ if (scmnd == NULL) {
+ dev_err(&device->device, "Incorrect transaction ID\n");
+ continue;
+ }
+ request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd);
+ }
+
storvsc_on_receive(stor_device, packet, request);
+ continue;
}
+
+ memcpy(&request->vstor_packet, packet,
+ (sizeof(struct vstor_packet) - stor_device->vmscsi_size_delta));
+ complete(&request->wait_event);
}
}
@@ -1294,11 +1339,8 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
- /*
- * The size of vmbus_requestor is an upper bound on the number of requests
- * that can be in-progress at any one time across all channels.
- */
- device->channel->rqstor_size = scsi_driver.can_queue;
+ device->channel->max_pkt_size = STORVSC_MAX_PKT_SIZE;
+ device->channel->next_request_id_callback = storvsc_next_request_id;
ret = vmbus_open(device->channel,
ring_size,
@@ -1624,7 +1666,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
ret = vmbus_sendpacket(device->channel, vstor_packet,
(sizeof(struct vstor_packet) -
stor_device->vmscsi_size_delta),
- (unsigned long)&stor_device->reset_request,
+ VMBUS_RQST_RESET,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index aee3cfc7142a..0a84ec9e7cea 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -603,11 +603,23 @@ static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
if (!ret) {
- if (ver >= UFS_UNIPRO_VER_1_8)
+ if (ver >= UFS_UNIPRO_VER_1_8) {
host->hw_ver.major = 3;
+ /*
+ * Fix HCI version for some platforms with
+ * incorrect version
+ */
+ if (hba->ufs_version < ufshci_version(3, 0))
+ hba->ufs_version = ufshci_version(3, 0);
+ }
}
}
+static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
+{
+ return hba->ufs_version;
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@@ -1048,6 +1060,7 @@ static void ufs_mtk_event_notify(struct ufs_hba *hba,
static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
.init = ufs_mtk_init,
+ .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
.setup_clocks = ufs_mtk_setup_clocks,
.hce_enable_notify = ufs_mtk_hce_enable_notify,
.link_startup_notify = ufs_mtk_link_startup_notify,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 8a79605d9652..b9969fce6b4d 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -585,7 +585,13 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
case BTSTAT_SUCCESS:
case BTSTAT_LINKED_COMMAND_COMPLETED:
case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
- /* If everything went fine, let's move on.. */
+ /*
+ * Commands like INQUIRY may transfer less data than
+ * requested by the initiator via bufflen. Set residual
+ * count to make upper layer aware of the actual amount
+ * of data returned.
+ */
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
cmd->result = (DID_OK << 16);
break;
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index e1957476a006..6dd190270123 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -626,10 +626,8 @@ static int meson_msr_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base)) {
- dev_err(&pdev->dev, "io resource mapping failed\n");
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_clk_msr_regmap_config);
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index ec90b44fa0cd..3c158251a58b 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/ixp4xx/npe.h>
@@ -679,6 +680,7 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
{
int i, found = 0;
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct resource *res;
for (i = 0; i < NPE_COUNT; i++) {
@@ -711,6 +713,11 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
if (!found)
return -ENODEV;
+
+ /* Spawn crypto subdevice if using device tree */
+ if (IS_ENABLED(CONFIG_OF) && np)
+ devm_of_platform_populate(dev);
+
return 0;
}
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 2827085a323b..0ef79d60e88e 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1150,8 +1150,16 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports);
- if (ret)
- return ret;
+ if (ret) {
+ u32 version;
+
+ ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &version);
+
+ if (version <= 0x01030000)
+ memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+ else
+ return ret;
+ }
memset(hstart, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
of_property_read_u8_array(np, "qcom,ports-hstart", hstart, nports);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8b161ec4943b..e71a4c514f7b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -65,7 +65,7 @@ config SPI_ALTERA
This is the driver for the Altera SPI Controller.
config SPI_ALTERA_CORE
- tristate "Altera SPI Controller core code"
+ tristate "Altera SPI Controller core code" if COMPILE_TEST
select REGMAP
help
"The core code for the Altera SPI Controller"
@@ -806,6 +806,7 @@ config SPI_STM32_QSPI
tristate "STMicroelectronics STM32 QUAD SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
depends on OF
+ depends on SPI_MEM
help
This enables support for the Quad SPI controller in master mode.
This driver does not support generic SPI. The implementation only
diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c
index 3e32e4fe5895..39a3e1a032e0 100644
--- a/drivers/spi/spi-altera-dfl.c
+++ b/drivers/spi/spi-altera-dfl.c
@@ -148,10 +148,8 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
base = devm_ioremap_resource(dev, &dfl_dev->mmio_res);
- if (IS_ERR(base)) {
- dev_err(dev, "%s get mem resource fail!\n", __func__);
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
config_spi_master(base, master);
dev_dbg(dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 98ace748cd98..d1e287d2d9cd 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -19,7 +19,6 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/platform_data/spi-ath79.h>
#define DRV_NAME "ath79-spi"
@@ -138,7 +137,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ath79_spi *sp;
- struct ath79_spi_platform_data *pdata;
unsigned long rate;
int ret;
@@ -152,15 +150,10 @@ static int ath79_spi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, sp);
- pdata = dev_get_platdata(&pdev->dev);
-
master->use_gpio_descriptors = true;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->flags = SPI_MASTER_GPIO_SS;
- if (pdata) {
- master->bus_num = pdata->bus_num;
- master->num_chipselect = pdata->num_chipselect;
- }
+ master->num_chipselect = 3;
sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 7cd5fe00dfc1..2ef74885ffa2 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -700,7 +700,6 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
struct spi_transfer *xfer,
u32 *plen)
- __must_hold(&as->lock)
{
struct atmel_spi *as = spi_master_get_devdata(master);
struct dma_chan *rxchan = master->dma_rx;
@@ -716,8 +715,6 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
if (!rxchan || !txchan)
return -ENODEV;
- /* release lock for DMA operations */
- atmel_spi_unlock(as);
*plen = xfer->len;
@@ -786,15 +783,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
rxchan->device->device_issue_pending(rxchan);
txchan->device->device_issue_pending(txchan);
- /* take back lock */
- atmel_spi_lock(as);
return 0;
err_dma:
spi_writel(as, IDR, SPI_BIT(OVRES));
atmel_spi_stop_dma(master);
err_exit:
- atmel_spi_lock(as);
return -ENOMEM;
}
@@ -863,7 +857,6 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
* lock is held, spi irq is blocked
*/
static void atmel_spi_pdc_next_xfer(struct spi_master *master,
- struct spi_message *msg,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_master_get_devdata(master);
@@ -879,12 +872,12 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RPR, rx_dma);
spi_writel(as, TPR, tx_dma);
- if (msg->spi->bits_per_word > 8)
+ if (xfer->bits_per_word > 8)
len >>= 1;
spi_writel(as, RCR, len);
spi_writel(as, TCR, len);
- dev_dbg(&msg->spi->dev,
+ dev_dbg(&master->dev,
" start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@@ -898,12 +891,12 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RNPR, rx_dma);
spi_writel(as, TNPR, tx_dma);
- if (msg->spi->bits_per_word > 8)
+ if (xfer->bits_per_word > 8)
len >>= 1;
spi_writel(as, RNCR, len);
spi_writel(as, TNCR, len);
- dev_dbg(&msg->spi->dev,
+ dev_dbg(&master->dev,
" next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@@ -1054,8 +1047,6 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
/* Interrupt
*
- * No need for locking in this Interrupt handler: done_status is the
- * only information modified.
*/
static irqreturn_t
atmel_spi_pio_interrupt(int irq, void *dev_id)
@@ -1273,12 +1264,28 @@ static int atmel_spi_setup(struct spi_device *spi)
return 0;
}
+static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct atmel_spi *as = spi_master_get_devdata(spi->master);
+ /* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
+ * since we already have routines for activate/deactivate translate
+ * high/low to active/inactive
+ */
+ enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
+
+ if (enable) {
+ cs_activate(as, spi);
+ } else {
+ cs_deactivate(as, spi);
+ }
+
+}
+
static int atmel_spi_one_transfer(struct spi_master *master,
- struct spi_message *msg,
+ struct spi_device *spi,
struct spi_transfer *xfer)
{
struct atmel_spi *as;
- struct spi_device *spi = msg->spi;
u8 bits;
u32 len;
struct atmel_spi_device *asd;
@@ -1288,11 +1295,6 @@ static int atmel_spi_one_transfer(struct spi_master *master,
as = spi_master_get_devdata(master);
- if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
- dev_dbg(&spi->dev, "missing rx or tx buf\n");
- return -EINVAL;
- }
-
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
if (bits != xfer->bits_per_word - 8) {
@@ -1305,13 +1307,13 @@ static int atmel_spi_one_transfer(struct spi_master *master,
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
*/
- if ((!msg->is_dma_mapped)
+ if ((!master->cur_msg_mapped)
&& as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
}
- atmel_spi_set_xfer_speed(as, msg->spi, xfer);
+ atmel_spi_set_xfer_speed(as, spi, xfer);
as->done_status = 0;
as->current_transfer = xfer;
@@ -1320,7 +1322,9 @@ static int atmel_spi_one_transfer(struct spi_master *master,
reinit_completion(&as->xfer_completion);
if (as->use_pdc) {
- atmel_spi_pdc_next_xfer(master, msg, xfer);
+ atmel_spi_lock(as);
+ atmel_spi_pdc_next_xfer(master, xfer);
+ atmel_spi_unlock(as);
} else if (atmel_spi_use_dma(as, xfer)) {
len = as->current_remaining_bytes;
ret = atmel_spi_next_xfer_dma_submit(master,
@@ -1328,21 +1332,21 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (ret) {
dev_err(&spi->dev,
"unable to use DMA, fallback to PIO\n");
- atmel_spi_next_xfer_pio(master, xfer);
+ as->done_status = ret;
+ break;
} else {
as->current_remaining_bytes -= len;
if (as->current_remaining_bytes < 0)
as->current_remaining_bytes = 0;
}
} else {
+ atmel_spi_lock(as);
atmel_spi_next_xfer_pio(master, xfer);
+ atmel_spi_unlock(as);
}
- /* interrupts are disabled, so free the lock for schedule */
- atmel_spi_unlock(as);
dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
SPI_DMA_TIMEOUT);
- atmel_spi_lock(as);
if (WARN_ON(dma_timeout == 0)) {
dev_err(&spi->dev, "spi transfer timeout\n");
as->done_status = -EIO;
@@ -1381,90 +1385,16 @@ static int atmel_spi_one_transfer(struct spi_master *master,
} else if (atmel_spi_use_dma(as, xfer)) {
atmel_spi_stop_dma(master);
}
-
- if (!msg->is_dma_mapped
- && as->use_pdc)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- return 0;
-
- } else {
- /* only update length if no error */
- msg->actual_length += xfer->len;
}
- if (!msg->is_dma_mapped
+ if (!master->cur_msg_mapped
&& as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
- spi_transfer_delay_exec(xfer);
-
- if (xfer->cs_change) {
- if (list_is_last(&xfer->transfer_list,
- &msg->transfers)) {
- as->keep_cs = true;
- } else {
- cs_deactivate(as, msg->spi);
- udelay(10);
- cs_activate(as, msg->spi);
- }
- }
-
- return 0;
-}
-
-static int atmel_spi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
-{
- struct atmel_spi *as;
- struct spi_transfer *xfer;
- struct spi_device *spi = msg->spi;
- int ret = 0;
-
- as = spi_master_get_devdata(master);
-
- dev_dbg(&spi->dev, "new message %p submitted for %s\n",
- msg, dev_name(&spi->dev));
-
- atmel_spi_lock(as);
- cs_activate(as, spi);
-
- as->keep_cs = false;
-
- msg->status = 0;
- msg->actual_length = 0;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- trace_spi_transfer_start(msg, xfer);
-
- ret = atmel_spi_one_transfer(master, msg, xfer);
- if (ret)
- goto msg_done;
-
- trace_spi_transfer_stop(msg, xfer);
- }
-
if (as->use_pdc)
atmel_spi_disable_pdc_transfer(as);
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- dev_dbg(&spi->dev,
- " xfer %p: len %u tx %p/%pad rx %p/%pad\n",
- xfer, xfer->len,
- xfer->tx_buf, &xfer->tx_dma,
- xfer->rx_buf, &xfer->rx_dma);
- }
-
-msg_done:
- if (!as->keep_cs)
- cs_deactivate(as, msg->spi);
-
- atmel_spi_unlock(as);
-
- msg->status = as->done_status;
- spi_finalize_current_message(spi->master);
-
- return ret;
+ return as->done_status;
}
static void atmel_spi_cleanup(struct spi_device *spi)
@@ -1554,7 +1484,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
master->setup = atmel_spi_setup;
master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
- master->transfer_one_message = atmel_spi_transfer_one_message;
+ master->transfer_one = atmel_spi_one_transfer;
+ master->set_cs = atmel_spi_set_cs;
master->cleanup = atmel_spi_cleanup;
master->auto_runtime_pm = true;
master->max_dma_len = SPI_MAX_DMA_XFER;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 8965fe61c8b4..5f8771fe1a31 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -68,7 +68,6 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
-#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
@@ -96,8 +95,6 @@ MODULE_PARM_DESC(polling_limit_us,
* @rx_prologue: bytes received without DMA if first RX sglist entry's
* length is not a multiple of 4 (to overcome hardware limitation)
* @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
- * @prepare_cs: precalculated CS register value for ->prepare_message()
- * (uses slave-specific clock polarity and phase settings)
* @debugfs_dir: the debugfs directory - neede to remove debugfs when
* unloading the module
* @count_transfer_polling: count of how often polling mode is used
@@ -107,7 +104,7 @@ MODULE_PARM_DESC(polling_limit_us,
* These are counted as well in @count_transfer_polling and
* @count_transfer_irq
* @count_transfer_dma: count how often dma mode is used
- * @chip_select: SPI slave currently selected
+ * @slv: SPI slave currently selected
* (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
* @tx_dma_active: whether a TX DMA descriptor is in progress
* @rx_dma_active: whether a RX DMA descriptor is in progress
@@ -115,11 +112,6 @@ MODULE_PARM_DESC(polling_limit_us,
* @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
* (cyclically copies from zero page to TX FIFO)
* @fill_tx_addr: bus address of zero page
- * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
- * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
- * @clear_rx_addr: bus address of @clear_rx_cs
- * @clear_rx_cs: precalculated CS register value to clear RX FIFO
- * (uses slave-specific clock polarity and phase settings)
*/
struct bcm2835_spi {
void __iomem *regs;
@@ -134,7 +126,6 @@ struct bcm2835_spi {
int tx_prologue;
int rx_prologue;
unsigned int tx_spillover;
- u32 prepare_cs[BCM2835_SPI_NUM_CS];
struct dentry *debugfs_dir;
u64 count_transfer_polling;
@@ -142,14 +133,28 @@ struct bcm2835_spi {
u64 count_transfer_irq_after_polling;
u64 count_transfer_dma;
- u8 chip_select;
+ struct bcm2835_spidev *slv;
unsigned int tx_dma_active;
unsigned int rx_dma_active;
struct dma_async_tx_descriptor *fill_tx_desc;
dma_addr_t fill_tx_addr;
- struct dma_async_tx_descriptor *clear_rx_desc[BCM2835_SPI_NUM_CS];
+};
+
+/**
+ * struct bcm2835_spidev - BCM2835 SPI slave
+ * @prepare_cs: precalculated CS register value for ->prepare_message()
+ * (uses slave-specific clock polarity and phase settings)
+ * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
+ * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
+ * @clear_rx_addr: bus address of @clear_rx_cs
+ * @clear_rx_cs: precalculated CS register value to clear RX FIFO
+ * (uses slave-specific clock polarity and phase settings)
+ */
+struct bcm2835_spidev {
+ u32 prepare_cs;
+ struct dma_async_tx_descriptor *clear_rx_desc;
dma_addr_t clear_rx_addr;
- u32 clear_rx_cs[BCM2835_SPI_NUM_CS] ____cacheline_aligned;
+ u32 clear_rx_cs ____cacheline_aligned;
};
#if defined(CONFIG_DEBUG_FS)
@@ -624,8 +629,7 @@ static void bcm2835_spi_dma_tx_done(void *data)
/* busy-wait for TX FIFO to empty */
while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
- bcm2835_wr(bs, BCM2835_SPI_CS,
- bs->clear_rx_cs[bs->chip_select]);
+ bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs);
bs->tx_dma_active = false;
smp_wmb();
@@ -646,18 +650,18 @@ static void bcm2835_spi_dma_tx_done(void *data)
/**
* bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
* @ctlr: SPI master controller
- * @spi: SPI slave
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
+ * @slv: BCM2835 SPI slave
* @is_tx: whether to submit DMA descriptor for TX or RX sglist
*
* Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
* Return 0 on success or a negative error number.
*/
static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
- struct spi_device *spi,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
+ struct bcm2835_spidev *slv,
bool is_tx)
{
struct dma_chan *chan;
@@ -697,7 +701,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
} else if (!tfr->rx_buf) {
desc->callback = bcm2835_spi_dma_tx_done;
desc->callback_param = ctlr;
- bs->chip_select = spi->chip_select;
+ bs->slv = slv;
}
/* submit it to DMA-engine */
@@ -709,8 +713,8 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
/**
* bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
* @ctlr: SPI master controller
- * @spi: SPI slave
* @tfr: SPI transfer
+ * @slv: BCM2835 SPI slave
* @cs: CS register
*
* For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
@@ -754,8 +758,8 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
* performed at the end of an RX-only transfer.
*/
static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
- struct spi_device *spi,
struct spi_transfer *tfr,
+ struct bcm2835_spidev *slv,
u32 cs)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
@@ -773,7 +777,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
/* setup tx-DMA */
if (bs->tx_buf) {
- ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, true);
} else {
cookie = dmaengine_submit(bs->fill_tx_desc);
ret = dma_submit_error(cookie);
@@ -799,9 +803,9 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
* this saves 10us or more.
*/
if (bs->rx_buf) {
- ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, false);
} else {
- cookie = dmaengine_submit(bs->clear_rx_desc[spi->chip_select]);
+ cookie = dmaengine_submit(slv->clear_rx_desc);
ret = dma_submit_error(cookie);
}
if (ret) {
@@ -850,8 +854,6 @@ static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
static void bcm2835_dma_release(struct spi_controller *ctlr,
struct bcm2835_spi *bs)
{
- int i;
-
if (ctlr->dma_tx) {
dmaengine_terminate_sync(ctlr->dma_tx);
@@ -870,17 +872,6 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
if (ctlr->dma_rx) {
dmaengine_terminate_sync(ctlr->dma_rx);
-
- for (i = 0; i < BCM2835_SPI_NUM_CS; i++)
- if (bs->clear_rx_desc[i])
- dmaengine_desc_free(bs->clear_rx_desc[i]);
-
- if (bs->clear_rx_addr)
- dma_unmap_single(ctlr->dma_rx->device->dev,
- bs->clear_rx_addr,
- sizeof(bs->clear_rx_cs),
- DMA_TO_DEVICE);
-
dma_release_channel(ctlr->dma_rx);
ctlr->dma_rx = NULL;
}
@@ -892,7 +883,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
struct dma_slave_config slave_config;
const __be32 *addr;
dma_addr_t dma_reg_base;
- int ret, i;
+ int ret;
/* base address in dma-space */
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
@@ -972,35 +963,6 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
if (ret)
goto err_config;
- bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
- bs->clear_rx_cs,
- sizeof(bs->clear_rx_cs),
- DMA_TO_DEVICE);
- if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) {
- dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n");
- bs->clear_rx_addr = 0;
- ret = -ENOMEM;
- goto err_release;
- }
-
- for (i = 0; i < BCM2835_SPI_NUM_CS; i++) {
- bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
- bs->clear_rx_addr + i * sizeof(u32),
- sizeof(u32), 0,
- DMA_MEM_TO_DEV, 0);
- if (!bs->clear_rx_desc[i]) {
- dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n");
- ret = -ENOMEM;
- goto err_release;
- }
-
- ret = dmaengine_desc_set_reuse(bs->clear_rx_desc[i]);
- if (ret) {
- dev_err(dev, "cannot reuse clear_rx_desc - not using DMA mode\n");
- goto err_release;
- }
- }
-
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
@@ -1082,9 +1044,10 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *tfr)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
unsigned long spi_hz, clk_hz, cdiv;
unsigned long hz_per_byte, byte_limit;
- u32 cs = bs->prepare_cs[spi->chip_select];
+ u32 cs = slv->prepare_cs;
/* set clock */
spi_hz = tfr->speed_hz;
@@ -1133,7 +1096,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
* this 1 idle clock cycle pattern but runs the spi clock without gaps
*/
if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
- return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
+ return bcm2835_spi_transfer_one_dma(ctlr, tfr, slv, cs);
/* run in interrupt-mode */
return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
@@ -1144,6 +1107,7 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
{
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
int ret;
if (ctlr->can_dma) {
@@ -1162,7 +1126,7 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
* Set up clock polarity before spi_transfer_one_message() asserts
* chip select to avoid a gratuitous clock signal edge.
*/
- bcm2835_wr(bs, BCM2835_SPI_CS, bs->prepare_cs[spi->chip_select]);
+ bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs);
return 0;
}
@@ -1188,13 +1152,83 @@ static int chip_match_name(struct gpio_chip *chip, void *data)
return !strcmp(chip->label, data);
}
+static void bcm2835_spi_cleanup(struct spi_device *spi)
+{
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ struct spi_controller *ctlr = spi->controller;
+
+ if (slv->clear_rx_desc)
+ dmaengine_desc_free(slv->clear_rx_desc);
+
+ if (slv->clear_rx_addr)
+ dma_unmap_single(ctlr->dma_rx->device->dev,
+ slv->clear_rx_addr,
+ sizeof(u32),
+ DMA_TO_DEVICE);
+
+ kfree(slv);
+}
+
+static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct bcm2835_spi *bs,
+ struct bcm2835_spidev *slv)
+{
+ int ret;
+
+ if (!ctlr->dma_rx)
+ return 0;
+
+ slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
+ &slv->clear_rx_cs,
+ sizeof(u32),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) {
+ dev_err(&spi->dev, "cannot map clear_rx_cs\n");
+ slv->clear_rx_addr = 0;
+ return -ENOMEM;
+ }
+
+ slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
+ slv->clear_rx_addr,
+ sizeof(u32), 0,
+ DMA_MEM_TO_DEV, 0);
+ if (!slv->clear_rx_desc) {
+ dev_err(&spi->dev, "cannot prepare clear_rx_desc\n");
+ return -ENOMEM;
+ }
+
+ ret = dmaengine_desc_set_reuse(slv->clear_rx_desc);
+ if (ret) {
+ dev_err(&spi->dev, "cannot reuse clear_rx_desc\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int bcm2835_spi_setup(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
struct gpio_chip *chip;
+ int ret;
u32 cs;
+ if (!slv) {
+ slv = kzalloc(ALIGN(sizeof(*slv), dma_get_cache_alignment()),
+ GFP_KERNEL);
+ if (!slv)
+ return -ENOMEM;
+
+ spi_set_ctldata(spi, slv);
+
+ ret = bcm2835_spi_setup_dma(ctlr, spi, bs, slv);
+ if (ret)
+ goto err_cleanup;
+ }
+
/*
* Precalculate SPI slave's CS register value for ->prepare_message():
* The driver always uses software-controlled GPIO chip select, hence
@@ -1206,20 +1240,19 @@ static int bcm2835_spi_setup(struct spi_device *spi)
cs |= BCM2835_SPI_CS_CPOL;
if (spi->mode & SPI_CPHA)
cs |= BCM2835_SPI_CS_CPHA;
- bs->prepare_cs[spi->chip_select] = cs;
+ slv->prepare_cs = cs;
/*
* Precalculate SPI slave's CS register value to clear RX FIFO
* in case of a TX-only DMA transfer.
*/
if (ctlr->dma_rx) {
- bs->clear_rx_cs[spi->chip_select] = cs |
- BCM2835_SPI_CS_TA |
- BCM2835_SPI_CS_DMAEN |
- BCM2835_SPI_CS_CLEAR_RX;
+ slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
+ BCM2835_SPI_CS_DMAEN |
+ BCM2835_SPI_CS_CLEAR_RX;
dma_sync_single_for_device(ctlr->dma_rx->device->dev,
- bs->clear_rx_addr,
- sizeof(bs->clear_rx_cs),
+ slv->clear_rx_addr,
+ sizeof(u32),
DMA_TO_DEVICE);
}
@@ -1241,7 +1274,8 @@ static int bcm2835_spi_setup(struct spi_device *spi)
*/
dev_err(&spi->dev,
"setup: only two native chip-selects are supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_cleanup;
}
/*
@@ -1262,14 +1296,20 @@ static int bcm2835_spi_setup(struct spi_device *spi)
DRV_NAME,
GPIO_LOOKUP_FLAGS_DEFAULT,
GPIOD_OUT_LOW);
- if (IS_ERR(spi->cs_gpiod))
- return PTR_ERR(spi->cs_gpiod);
+ if (IS_ERR(spi->cs_gpiod)) {
+ ret = PTR_ERR(spi->cs_gpiod);
+ goto err_cleanup;
+ }
/* and set up the "mode" and level */
dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
spi->chip_select);
return 0;
+
+err_cleanup:
+ bcm2835_spi_cleanup(spi);
+ return ret;
}
static int bcm2835_spi_probe(struct platform_device *pdev)
@@ -1278,8 +1318,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
struct bcm2835_spi *bs;
int err;
- ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
- dma_get_cache_alignment()));
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!ctlr)
return -ENOMEM;
@@ -1288,8 +1327,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
ctlr->use_gpio_descriptors = true;
ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
+ ctlr->num_chipselect = 3;
ctlr->setup = bcm2835_spi_setup;
+ ctlr->cleanup = bcm2835_spi_cleanup;
ctlr->transfer_one = bcm2835_spi_transfer_one;
ctlr->handle_err = bcm2835_spi_handle_err;
ctlr->prepare_message = bcm2835_spi_prepare_message;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 75589ac6e95f..37eab100a7d8 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -384,7 +384,7 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
bs->pending = 0;
/* Calculate the estimated time in us the transfer runs. Note that
- * there are are 2 idle clocks cycles after each chunk getting
+ * there are 2 idle clocks cycles after each chunk getting
* transferred - in our case the chunk size is 3 bytes, so we
* approximate this by 9 cycles/byte. This is used to find the number
* of Hz per byte per polling limit. E.g., we can transfer 1 byte in
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 6a6af85aebfd..27d0087f8688 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -184,6 +184,8 @@ int spi_bitbang_setup(struct spi_device *spi)
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
+ bool initial_setup = false;
+ int retval;
bitbang = spi_master_get_devdata(spi->master);
@@ -192,22 +194,30 @@ int spi_bitbang_setup(struct spi_device *spi)
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
+ initial_setup = true;
}
/* per-word shift register access, in hardware or bitbanging */
cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
- if (!cs->txrx_word)
- return -EINVAL;
+ if (!cs->txrx_word) {
+ retval = -EINVAL;
+ goto err_free;
+ }
if (bitbang->setup_transfer) {
- int retval = bitbang->setup_transfer(spi, NULL);
+ retval = bitbang->setup_transfer(spi, NULL);
if (retval < 0)
- return retval;
+ goto err_free;
}
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
return 0;
+
+err_free:
+ if (initial_setup)
+ kfree(cs);
+ return retval;
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 17c06039a74d..3379720cfcb8 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -56,7 +56,7 @@ struct dw_spi_mscc {
/*
* The Designware SPI controller (referred to as master in the documentation)
* automatically deasserts chip select when the tx fifo is empty. The chip
- * selects then needs to be either driven as GPIOs or, for the first 4 using the
+ * selects then needs to be either driven as GPIOs or, for the first 4 using
* the SPI boot controller registers. the final chip select is an OR gate
* between the Designware SPI controller and the SPI boot controller.
*/
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 028736687488..fb45e6af6638 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1375,11 +1375,13 @@ poll_mode:
ret = spi_register_controller(ctlr);
if (ret != 0) {
dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
- goto out_free_irq;
+ goto out_release_dma;
}
return ret;
+out_release_dma:
+ dspi_release_dma(dspi);
out_free_irq:
if (dspi->irq)
free_irq(dspi->irq, dspi);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index d0e5aa18b7ba..bdf94cc7be1a 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -440,6 +440,7 @@ static int fsl_spi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg __iomem *reg_base;
+ bool initial_setup = false;
int retval;
u32 hw_mode;
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
@@ -452,6 +453,7 @@ static int fsl_spi_setup(struct spi_device *spi)
if (!cs)
return -ENOMEM;
spi_set_ctldata(spi, cs);
+ initial_setup = true;
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -475,6 +477,8 @@ static int fsl_spi_setup(struct spi_device *spi)
retval = fsl_spi_setup_transfer(spi, NULL);
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
+ if (initial_setup)
+ kfree(cs);
return retval;
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 3d0d8ddd5772..b3861fb88711 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -639,8 +639,8 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
complete(&mas->abort_done);
/*
- * It's safe or a good idea to Ack all of our our interrupts at the
- * end of the function. Specifically:
+ * It's safe or a good idea to Ack all of our interrupts at the end
+ * of the function. Specifically:
* - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
* clearing Acks. Clearing at the end relies on nobody else having
* started a new transfer yet or else we could be clearing _their_
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
index 3f986ba1c328..58b823a16fc4 100644
--- a/drivers/spi/spi-hisi-kunpeng.c
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -126,6 +127,7 @@ struct hisi_spi {
void __iomem *regs;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
+ u16 bus_num;
/* Current message transfer state info */
const void *tx;
@@ -133,8 +135,49 @@ struct hisi_spi {
void *rx;
unsigned int rx_len;
u8 n_bytes; /* current is a 1/2/4 bytes op */
+
+ struct dentry *debugfs;
+ struct debugfs_regset32 regset;
+};
+
+#define HISI_SPI_DBGFS_REG(_name, _off) \
+{ \
+ .name = _name, \
+ .offset = _off, \
+}
+
+static const struct debugfs_reg32 hisi_spi_regs[] = {
+ HISI_SPI_DBGFS_REG("CSCR", HISI_SPI_CSCR),
+ HISI_SPI_DBGFS_REG("CR", HISI_SPI_CR),
+ HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
+ HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
+ HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
+ HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
+ HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
+ HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
+ HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
+ HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
+ HISI_SPI_DBGFS_REG("ICR", HISI_SPI_ICR),
+ HISI_SPI_DBGFS_REG("VERSION", HISI_SPI_VERSION),
};
+static int hisi_spi_debugfs_init(struct hisi_spi *hs)
+{
+ char name[32];
+
+ snprintf(name, 32, "hisi_spi%d", hs->bus_num);
+ hs->debugfs = debugfs_create_dir(name, NULL);
+ if (!hs->debugfs)
+ return -ENOMEM;
+
+ hs->regset.regs = hisi_spi_regs;
+ hs->regset.nregs = ARRAY_SIZE(hisi_spi_regs);
+ hs->regset.base = hs->regs;
+ debugfs_create_regset32("registers", 0400, hs->debugfs, &hs->regset);
+
+ return 0;
+}
+
static u32 hisi_spi_busy(struct hisi_spi *hs)
{
return readl(hs->regs + HISI_SPI_SR) & SR_BUSY;
@@ -424,6 +467,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
hs = spi_controller_get_devdata(master);
hs->dev = dev;
hs->irq = irq;
+ hs->bus_num = pdev->id;
hs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hs->regs))
@@ -446,7 +490,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
- master->bus_num = pdev->id;
+ master->bus_num = hs->bus_num;
master->setup = hisi_spi_setup;
master->cleanup = hisi_spi_cleanup;
master->transfer_one = hisi_spi_transfer_one;
@@ -462,6 +506,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
return ret;
}
+ if (hisi_spi_debugfs_init(hs))
+ dev_info(dev, "failed to create debugfs dir\n");
+
ret = spi_register_controller(master);
if (ret) {
dev_err(dev, "failed to register spi master, ret=%d\n", ret);
@@ -478,7 +525,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
static int hisi_spi_remove(struct platform_device *pdev)
{
struct spi_controller *master = platform_get_drvdata(pdev);
+ struct hisi_spi *hs = spi_controller_get_devdata(master);
+ debugfs_remove_recursive(hs->debugfs);
spi_unregister_controller(master);
return 0;
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index f914b8d2043e..ead0507c63be 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -202,7 +202,7 @@ static void spi_lm70llp_attach(struct parport *p)
* the lm70 driver could verify it, reading the manf ID.
*/
- master = spi_alloc_master(p->physport->dev, sizeof *pp);
+ master = spi_alloc_master(p->physport->dev, sizeof(*pp));
if (!master) {
status = -ENOMEM;
goto out_fail;
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index f1cf2232f0b5..4d4f77a186a9 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -875,7 +875,7 @@ static int spi_test_run_iter(struct spi_device *spi,
test.transfers[i].len = len;
if (test.transfers[i].tx_buf)
test.transfers[i].tx_buf += tx_off;
- if (test.transfers[i].tx_buf)
+ if (test.transfers[i].rx_buf)
test.transfers[i].rx_buf += rx_off;
}
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 1513553e4080..37f4443ce9a0 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -6,6 +6,7 @@
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
#include <linux/dmaengine.h>
+#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -743,6 +744,91 @@ static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
return container_of(drv, struct spi_mem_driver, spidrv.driver);
}
+static int spi_mem_read_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 *status)
+{
+ const u8 *bytes = (u8 *)op->data.buf.in;
+ int ret;
+
+ ret = spi_mem_exec_op(mem, op);
+ if (ret)
+ return ret;
+
+ if (op->data.nbytes > 1)
+ *status = ((u16)bytes[0] << 8) | bytes[1];
+ else
+ *status = bytes[0];
+
+ return 0;
+}
+
+/**
+ * spi_mem_poll_status() - Poll memory device status
+ * @mem: SPI memory device
+ * @op: the memory operation to execute
+ * @mask: status bitmask to ckeck
+ * @match: (status & mask) expected value
+ * @initial_delay_us: delay in us before starting to poll
+ * @polling_delay_us: time to sleep between reads in us
+ * @timeout_ms: timeout in milliseconds
+ *
+ * This function polls a status register and returns when
+ * (status & mask) == match or when the timeout has expired.
+ *
+ * Return: 0 in case of success, -ETIMEDOUT in case of error,
+ * -EOPNOTSUPP if not supported.
+ */
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+ int ret = -EOPNOTSUPP;
+ int read_status_ret;
+ u16 status;
+
+ if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
+ op->data.dir != SPI_MEM_DATA_IN)
+ return -EINVAL;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->poll_status) {
+ ret = spi_mem_access_start(mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
+ initial_delay_us, polling_delay_us,
+ timeout_ms);
+
+ spi_mem_access_end(mem);
+ }
+
+ if (ret == -EOPNOTSUPP) {
+ if (!spi_mem_supports_op(mem, op))
+ return ret;
+
+ if (initial_delay_us < 10)
+ udelay(initial_delay_us);
+ else
+ usleep_range((initial_delay_us >> 2) + 1,
+ initial_delay_us);
+
+ ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
+ (read_status_ret || ((status) & mask) == match),
+ polling_delay_us, timeout_ms * 1000, false, mem,
+ op, &status);
+ if (read_status_ret)
+ return read_status_ret;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mem_poll_status);
+
static int spi_mem_probe(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
@@ -810,7 +896,7 @@ int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
/**
- * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
+ * spi_mem_driver_unregister() - Unregister a SPI memory driver
* @memdrv: the SPI memory driver to unregister
*
* Unregisters a SPI memory driver.
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index ecba6b4a5d85..b2c4621db34d 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -725,7 +725,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(spicc->pclk);
if (ret) {
dev_err(&pdev->dev, "pclk clock enable failed\n");
- goto out_master;
+ goto out_core_clk;
}
device_reset_optional(&pdev->dev);
@@ -752,7 +752,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
ret = meson_spicc_clk_init(spicc);
if (ret) {
dev_err(&pdev->dev, "clock registration failed\n");
- goto out_master;
+ goto out_clk;
}
ret = devm_spi_register_master(&pdev->dev, master);
@@ -764,9 +764,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
return 0;
out_clk:
- clk_disable_unprepare(spicc->core);
clk_disable_unprepare(spicc->pclk);
+out_core_clk:
+ clk_disable_unprepare(spicc->core);
+
out_master:
spi_master_put(master);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index ea1b07953d38..78a9bca8cc68 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -369,7 +369,7 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
return -EINVAL;
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
@@ -491,7 +491,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
void *tempp;
struct clk *clk;
- master = spi_alloc_master(dev, sizeof *mps);
+ master = spi_alloc_master(dev, sizeof(*mps));
if (master == NULL)
return -ENOMEM;
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 17935e71b02f..21ef5d481faf 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -265,7 +265,7 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
return -EINVAL;
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -365,7 +365,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
struct spi_master *master;
int ret;
- master = spi_alloc_master(dev, sizeof *mps);
+ master = spi_alloc_master(dev, sizeof(*mps));
if (master == NULL)
return -ENOMEM;
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 124cba7213f1..51041526546d 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -415,7 +415,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
}
dev_dbg(&op->dev, "allocating spi_master struct\n");
- master = spi_alloc_master(&op->dev, sizeof *ms);
+ master = spi_alloc_master(&op->dev, sizeof(*ms));
if (!master) {
rc = -ENOMEM;
goto err_alloc;
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index 56d10c4511db..1668a347e003 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -105,7 +105,7 @@ static void npcm_pspi_set_mode(struct spi_device *spi)
u16 regtemp;
u16 mode_val;
- switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
mode_val = 0;
break;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 6e6c2403944d..a66fa97046ee 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -1124,12 +1124,6 @@ static int nxp_fspi_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
- /* Clear potential interrupts */
- reg = fspi_readl(f, f->iobase + FSPI_INTR);
- if (reg)
- fspi_writel(f, reg, f->iobase + FSPI_INTR);
-
-
/* find the resources - controller memory mapped space */
if (is_acpi_node(f->dev->fwnode))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1167,6 +1161,11 @@ static int nxp_fspi_probe(struct platform_device *pdev)
}
}
+ /* Clear potential interrupts */
+ reg = fspi_readl(f, f->iobase + FSPI_INTR);
+ if (reg)
+ fspi_writel(f, reg, f->iobase + FSPI_INTR);
+
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index f3843f0ff260..38c14c4e4e21 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -86,7 +86,7 @@ static int tiny_spi_setup(struct spi_device *spi)
hw->speed_hz = spi->max_speed_hz;
hw->baud = tiny_spi_baud(spi, hw->speed_hz);
}
- hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA);
+ hw->mode = spi->mode & SPI_MODE_X_MASK;
return 0;
}
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 7062f2902253..20b047172965 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -6,7 +6,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
- * Juha Yrj�l� <juha.yrjola@nokia.com>
+ * Juha Yrjola <juha.yrjola@nokia.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
@@ -241,7 +241,7 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi,
else
word_len = spi->bits_per_word;
- if (spi->bits_per_word > 32)
+ if (word_len > 32)
return -EINVAL;
cs->word_len = word_len;
@@ -296,7 +296,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
- status = -EINVAL;
break;
}
status = omap1_spi100k_setup_transfer(spi, t);
@@ -315,7 +314,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
m->actual_length += count;
if (count != t->len) {
- status = -EIO;
break;
}
}
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 71402f71ddd8..087172a193fa 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -330,7 +330,7 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (spi->mode & SPI_CPOL)
flags |= UWIRE_CLK_INVERTED;
- switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
case SPI_MODE_3:
flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE;
@@ -424,15 +424,22 @@ done:
static int uwire_setup(struct spi_device *spi)
{
struct uwire_state *ust = spi->controller_state;
+ bool initial_setup = false;
+ int status;
if (ust == NULL) {
ust = kzalloc(sizeof(*ust), GFP_KERNEL);
if (ust == NULL)
return -ENOMEM;
spi->controller_state = ust;
+ initial_setup = true;
}
- return uwire_setup_transfer(spi, NULL);
+ status = uwire_setup_transfer(spi, NULL);
+ if (status && initial_setup)
+ kfree(ust);
+
+ return status;
}
static void uwire_cleanup(struct spi_device *spi)
@@ -453,7 +460,7 @@ static int uwire_probe(struct platform_device *pdev)
struct uwire_spi *uwire;
int status;
- master = spi_alloc_master(&pdev->dev, sizeof *uwire);
+ master = spi_alloc_master(&pdev->dev, sizeof(*uwire));
if (!master)
return -ENODEV;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 999c22736416..60c9cdf1c94b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
- * Juha Yrj�l� <juha.yrjola@nokia.com>
+ * Juha Yrjola <juha.yrjola@nokia.com>
*/
#include <linux/kernel.h>
@@ -1032,15 +1032,29 @@ static void omap2_mcspi_release_dma(struct spi_master *master)
}
}
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+ struct omap2_mcspi_cs *cs;
+
+ if (spi->controller_state) {
+ /* Unlink controller state from context save list */
+ cs = spi->controller_state;
+ list_del(&cs->node);
+
+ kfree(cs);
+ }
+}
+
static int omap2_mcspi_setup(struct spi_device *spi)
{
+ bool initial_setup = false;
int ret;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_cs *cs = spi->controller_state;
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -1051,35 +1065,28 @@ static int omap2_mcspi_setup(struct spi_device *spi)
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
+ initial_setup = true;
}
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
+ if (initial_setup)
+ omap2_mcspi_cleanup(spi);
return ret;
}
ret = omap2_mcspi_setup_transfer(spi, NULL);
+ if (ret && initial_setup)
+ omap2_mcspi_cleanup(spi);
+
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
return ret;
}
-static void omap2_mcspi_cleanup(struct spi_device *spi)
-{
- struct omap2_mcspi_cs *cs;
-
- if (spi->controller_state) {
- /* Unlink controller state from context save list */
- cs = spi->controller_state;
- list_del(&cs->node);
-
- kfree(cs);
- }
-}
-
static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
{
struct omap2_mcspi *mcspi = data;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 0c9e3f270f05..feebda66f56e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -288,7 +288,7 @@
#define SPI_POLLING_TIMEOUT 1000
/*
- * The type of reading going on on this chip
+ * The type of reading going on this chip
*/
enum ssp_reading {
READING_NULL,
@@ -298,7 +298,7 @@ enum ssp_reading {
};
/*
- * The type of writing going on on this chip
+ * The type of writing going on this chip
*/
enum ssp_writing {
WRITING_NULL,
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index d8ee363fb714..d65f047b6c82 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -34,7 +34,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
@@ -223,7 +223,7 @@ static int spi_ppc4xx_setup(struct spi_device *spi)
}
if (cs == NULL) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -235,7 +235,7 @@ static int spi_ppc4xx_setup(struct spi_device *spi)
*/
cs->mode = SPI_PPC4XX_MODE_SPE;
- switch (spi->mode & (SPI_CPHA | SPI_CPOL)) {
+ switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
cs->mode |= SPI_CLK_MODE0;
break;
@@ -326,7 +326,7 @@ static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
{
/*
* On all 4xx PPC's the SPI bus is shared/multiplexed with
- * the 2nd I2C bus. We need to enable the the SPI bus before
+ * the 2nd I2C bus. We need to enable the SPI bus before
* using it.
*/
@@ -349,7 +349,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
int ret;
const unsigned int *clk;
- master = spi_alloc_master(dev, sizeof *hw);
+ master = spi_alloc_master(dev, sizeof(*hw));
if (master == NULL)
return -ENOMEM;
master->dev.of_node = np;
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 37567bc7a523..be563f0dd03a 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -2,18 +2,18 @@
/*
* PXA2xx SPI DMA engine support.
*
- * Copyright (C) 2013, Intel Corporation
+ * Copyright (C) 2013, 2021 Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/pxa2xx_ssp.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
-#include <linux/spi/spi.h>
+
#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
#include "spi-pxa2xx.h"
@@ -26,7 +26,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
* It is possible that one CPU is handling ROR interrupt and other
* just gets DMA completion. Calling pump_transfers() twice for the
* same transfer leads to problems thus we prevent concurrent calls
- * by using ->dma_running.
+ * by using dma_running.
*/
if (atomic_dec_and_test(&drv_data->dma_running)) {
/*
@@ -34,25 +34,18 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
* might not know about the error yet. So we re-check the
* ROR bit here before we clear the status register.
*/
- if (!error) {
- u32 status = pxa2xx_spi_read(drv_data, SSSR)
- & drv_data->mask_sr;
- error = status & SSSR_ROR;
- }
+ if (!error)
+ error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR;
/* Clear status & disable interrupts */
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1)
- & ~drv_data->dma_cr1);
+ clear_SSCR1_bits(drv_data, drv_data->dma_cr1);
write_SSSR_CS(drv_data, drv_data->clear_sr);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
if (error) {
/* In case we got an error we disable the SSP now */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0)
- & ~SSCR0_SSE);
+ pxa_ssp_disable(drv_data->ssp);
msg->status = -EIO;
}
@@ -94,14 +87,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
- cfg.dst_addr = drv_data->ssdr_physical;
+ cfg.dst_addr = drv_data->ssp->phys_base + SSDR;
cfg.dst_addr_width = width;
cfg.dst_maxburst = chip->dma_burst_size;
sgt = &xfer->tx_sg;
chan = drv_data->controller->dma_tx;
} else {
- cfg.src_addr = drv_data->ssdr_physical;
+ cfg.src_addr = drv_data->ssp->phys_base + SSDR;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
@@ -111,7 +104,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
- dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
+ dev_warn(drv_data->ssp->dev, "DMA slave config failed\n");
return NULL;
}
@@ -123,9 +116,9 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 status;
- status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
+ status = read_SSSR_bits(drv_data, drv_data->mask_sr);
if (status & SSSR_ROR) {
- dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
+ dev_err(drv_data->ssp->dev, "FIFO overrun\n");
dmaengine_terminate_async(drv_data->controller->dma_rx);
dmaengine_terminate_async(drv_data->controller->dma_tx);
@@ -145,16 +138,14 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
if (!tx_desc) {
- dev_err(&drv_data->pdev->dev,
- "failed to get DMA TX descriptor\n");
+ dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n");
err = -EBUSY;
goto err_tx;
}
rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
if (!rx_desc) {
- dev_err(&drv_data->pdev->dev,
- "failed to get DMA RX descriptor\n");
+ dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n");
err = -EBUSY;
goto err_rx;
}
@@ -191,8 +182,8 @@ void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
- struct device *dev = &drv_data->pdev->dev;
struct spi_controller *controller = drv_data->controller;
+ struct device *dev = drv_data->ssp->dev;
dma_cap_mask_t mask;
dma_cap_zero(mask);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 1833f5876e9f..2e134eb4bd2c 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * CE4100's SPI device is more or less the same one as found on PXA
+ * PCI glue driver for SPI PXA2xx compatible controllers.
+ * CE4100's SPI device is more or less the same one as found on PXA.
*
- * Copyright (C) 2016, Intel Corporation
+ * Copyright (C) 2016, 2021 Intel Corporation
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+
#include <linux/spi/pxa2xx_spi.h>
#include <linux/dmaengine.h>
@@ -178,7 +180,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.rx_param = &bsw2_rx_param,
},
[PORT_MRFLD] = {
- .type = PXA27x_SSP,
+ .type = MRFLD_SSP,
.max_clk_rate = 25000000,
.setup = mrfld_spi_setup,
},
@@ -239,6 +241,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1;
ssp = &spi_pdata.ssp;
+ ssp->dev = &dev->dev;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
@@ -254,7 +257,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0,
c->max_clk_rate);
- if (IS_ERR(ssp->clk))
+ if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
memset(&pi, 0, sizeof(pi));
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 5e59ba075bc7..974e30744b83 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- * Copyright (C) 2013, Intel Corporation
+ * Copyright (C) 2013, 2021 Intel Corporation
*/
#include <linux/acpi.h>
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
@@ -25,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/slab.h>
+
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
@@ -38,11 +40,11 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define TIMOUT_DFLT 1000
/*
- * for testing SSCR1 changes that require SSP restart, basically
- * everything except the service and interrupt enables, the pxa270 developer
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables, the PXA270 developer
* manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
- * list, but the PXA255 dev man says all bits without really meaning the
- * service and interrupt enables
+ * list, but the PXA255 developer manual says all bits without really meaning
+ * the service and interrupt enables.
*/
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
@@ -198,6 +200,17 @@ static bool is_mmp2_ssp(const struct driver_data *drv_data)
return drv_data->ssp_type == MMP2_SSP;
}
+static bool is_mrfld_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == MRFLD_SSP;
+}
+
+static void pxa2xx_spi_update(const struct driver_data *drv_data, u32 reg, u32 mask, u32 value)
+{
+ if ((pxa2xx_spi_read(drv_data, reg) & mask) != value)
+ pxa2xx_spi_write(drv_data, reg, value & mask);
+}
+
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
@@ -239,7 +252,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
break;
}
- return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
+ return read_SSSR_bits(drv_data, mask) == mask;
}
static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@@ -284,13 +297,11 @@ static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
case QUARK_X1000_SSP:
return clk_div
| QUARK_X1000_SSCR0_Motorola
- | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
- | SSCR0_SSE;
+ | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits);
default:
return clk_div
| SSCR0_Motorola
| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
- | SSCR0_SSE
| (bits > 16 ? SSCR0_EDSS : 0);
}
}
@@ -325,7 +336,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
u32 value;
config = lpss_get_config(drv_data);
- drv_data->lpss_base = drv_data->ioaddr + config->offset;
+ drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset;
/* Enable software chip select control */
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
@@ -421,7 +432,7 @@ static void cs_assert(struct spi_device *spi)
spi_controller_get_devdata(spi->controller);
if (drv_data->ssp_type == CE4100_SSP) {
- pxa2xx_spi_write(drv_data, SSSR, chip->frm);
+ pxa2xx_spi_write(drv_data, SSSR, spi->chip_select);
return;
}
@@ -430,11 +441,6 @@ static void cs_assert(struct spi_device *spi)
return;
}
- if (chip->gpiod_cs) {
- gpiod_set_value(chip->gpiod_cs, chip->gpio_cs_inverted);
- return;
- }
-
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, true);
}
@@ -460,11 +466,6 @@ static void cs_deassert(struct spi_device *spi)
return;
}
- if (chip->gpiod_cs) {
- gpiod_set_value(chip->gpiod_cs, !chip->gpio_cs_inverted);
- return;
- }
-
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, false);
}
@@ -482,7 +483,7 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
unsigned long limit = loops_per_jiffy << 1;
do {
- while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ while (read_SSSR_bits(drv_data, SSSR_RNE))
pxa2xx_spi_read(drv_data, SSDR);
} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
write_SSSR_CS(drv_data, SSSR_ROR);
@@ -496,8 +497,7 @@ static void pxa2xx_spi_off(struct driver_data *drv_data)
if (is_mmp2_ssp(drv_data))
return;
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa_ssp_disable(drv_data->ssp);
}
static int null_writer(struct driver_data *drv_data)
@@ -518,8 +518,7 @@ static int null_reader(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += n_bytes;
}
@@ -541,8 +540,7 @@ static int u8_writer(struct driver_data *drv_data)
static int u8_reader(struct driver_data *drv_data)
{
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
++drv_data->rx;
}
@@ -564,8 +562,7 @@ static int u16_writer(struct driver_data *drv_data)
static int u16_reader(struct driver_data *drv_data)
{
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 2;
}
@@ -587,8 +584,7 @@ static int u32_writer(struct driver_data *drv_data)
static int u32_reader(struct driver_data *drv_data)
{
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 4;
}
@@ -618,47 +614,51 @@ static void reset_sccr1(struct driver_data *drv_data)
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
}
-static void int_error_stop(struct driver_data *drv_data, const char *msg)
+static void int_stop_and_reset(struct driver_data *drv_data)
{
- /* Stop and reset SSP */
+ /* Clear and disable interrupts */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
+ if (pxa25x_ssp_comp(drv_data))
+ return;
+
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+}
+
+static void int_error_stop(struct driver_data *drv_data, const char *msg, int err)
+{
+ int_stop_and_reset(drv_data);
pxa2xx_spi_flush(drv_data);
pxa2xx_spi_off(drv_data);
- dev_err(&drv_data->pdev->dev, "%s\n", msg);
+ dev_err(drv_data->ssp->dev, "%s\n", msg);
- drv_data->controller->cur_msg->status = -EIO;
+ drv_data->controller->cur_msg->status = err;
spi_finalize_current_transfer(drv_data->controller);
}
static void int_transfer_complete(struct driver_data *drv_data)
{
- /* Clear and disable interrupts */
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- reset_sccr1(drv_data);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
+ int_stop_and_reset(drv_data);
spi_finalize_current_transfer(drv_data->controller);
}
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
- u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
- drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
+ u32 irq_status;
- u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
+ irq_status = read_SSSR_bits(drv_data, drv_data->mask_sr);
+ if (!(pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE))
+ irq_status &= ~SSSR_TFS;
if (irq_status & SSSR_ROR) {
- int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
+ int_error_stop(drv_data, "interrupt_transfer: FIFO overrun", -EIO);
return IRQ_HANDLED;
}
if (irq_status & SSSR_TUR) {
- int_error_stop(drv_data, "interrupt_transfer: fifo underrun");
+ int_error_stop(drv_data, "interrupt_transfer: FIFO underrun", -EIO);
return IRQ_HANDLED;
}
@@ -670,7 +670,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
}
}
- /* Drain rx fifo, Fill tx fifo and prevent overruns */
+ /* Drain Rx FIFO, Fill Tx FIFO and prevent overruns */
do {
if (drv_data->read(drv_data)) {
int_transfer_complete(drv_data);
@@ -691,8 +691,8 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
sccr1_reg &= ~SSCR1_TIE;
/*
- * PXA25x_SSP has no timeout, set up rx threshould for the
- * remaining RX bytes.
+ * PXA25x_SSP has no timeout, set up Rx threshold for
+ * the remaining Rx bytes.
*/
if (pxa25x_ssp_comp(drv_data)) {
u32 rx_thre;
@@ -725,14 +725,12 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static void handle_bad_msg(struct driver_data *drv_data)
{
pxa2xx_spi_off(drv_data);
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
+ clear_SSCR1_bits(drv_data, drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
write_SSSR_CS(drv_data, drv_data->clear_sr);
- dev_err(&drv_data->pdev->dev,
- "bad message state in interrupt handler\n");
+ dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
}
static irqreturn_t ssp_int(int irq, void *dev_id)
@@ -748,7 +746,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
* the IRQ was not for us (we shouldn't be RPM suspended when the
* interrupt is enabled).
*/
- if (pm_runtime_suspended(&drv_data->pdev->dev))
+ if (pm_runtime_suspended(drv_data->ssp->dev))
return IRQ_NONE;
/*
@@ -916,7 +914,7 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
/*
* Calculate the divisor for the SCR (Serial Clock Rate), avoiding
- * that the SSP transmission rate can be greater than the device rate
+ * that the SSP transmission rate can be greater than the device rate.
*/
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
@@ -974,7 +972,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* Check if we can DMA this transfer */
if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
- /* reject already-mapped transfers; PIO won't always work */
+ /* Reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&spi->dev,
@@ -983,10 +981,10 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
return -EINVAL;
}
- /* warn ... we force this to PIO mode */
+ /* Warn ... we force this to PIO mode */
dev_warn_ratelimited(&spi->dev,
- "DMA disabled for transfer length %ld greater than %d\n",
- (long)transfer->len, MAX_DMA_LEN);
+ "DMA disabled for transfer length %u greater than %d\n",
+ transfer->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
@@ -1028,8 +1026,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
u32_writer : null_writer;
}
/*
- * if bits/word is changed in dma mode, then must check the
- * thresholds and burst also
+ * If bits per word is changed in DMA mode, then must check
+ * the thresholds and burst also.
*/
if (chip->enable_dma) {
if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
@@ -1080,47 +1078,45 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
dma_mapped ? "DMA" : "PIO");
if (is_lpss_ssp(drv_data)) {
- if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
- != chip->lpss_rx_threshold)
- pxa2xx_spi_write(drv_data, SSIRF,
- chip->lpss_rx_threshold);
- if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
- != chip->lpss_tx_threshold)
- pxa2xx_spi_write(drv_data, SSITF,
- chip->lpss_tx_threshold);
+ pxa2xx_spi_update(drv_data, SSIRF, GENMASK(7, 0), chip->lpss_rx_threshold);
+ pxa2xx_spi_update(drv_data, SSITF, GENMASK(15, 0), chip->lpss_tx_threshold);
}
- if (is_quark_x1000_ssp(drv_data) &&
- (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
- pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
-
- /* see if we need to reload the config registers */
- if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
- || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
- != (cr1 & change_mask)) {
- /* stop the SSP, and update the other bits */
- if (!is_mmp2_ssp(drv_data))
- pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
- /* first set CR1 without interrupt and service enables */
- pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
- /* restart the SSP */
- pxa2xx_spi_write(drv_data, SSCR0, cr0);
+ if (is_mrfld_ssp(drv_data)) {
+ u32 mask = SFIFOTT_RFT | SFIFOTT_TFT;
+ u32 thresh = 0;
- } else {
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
+ thresh |= SFIFOTT_RxThresh(chip->lpss_rx_threshold);
+ thresh |= SFIFOTT_TxThresh(chip->lpss_tx_threshold);
+
+ pxa2xx_spi_update(drv_data, SFIFOTT, mask, thresh);
}
+ if (is_quark_x1000_ssp(drv_data))
+ pxa2xx_spi_update(drv_data, DDS_RATE, GENMASK(23, 0), chip->dds_rate);
+
+ /* Stop the SSP */
+ if (!is_mmp2_ssp(drv_data))
+ pxa_ssp_disable(drv_data->ssp);
+
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
+
+ /* First set CR1 without interrupt and service enables */
+ pxa2xx_spi_update(drv_data, SSCR1, change_mask, cr1);
+
+ /* See if we need to reload the configuration registers */
+ pxa2xx_spi_update(drv_data, SSCR0, GENMASK(31, 0), cr0);
+
+ /* Restart the SSP */
+ pxa_ssp_enable(drv_data->ssp);
+
if (is_mmp2_ssp(drv_data)) {
- u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR)
- & SSSR_TFL_MASK) >> 8;
+ u8 tx_level = read_SSSR_bits(drv_data, SSSR_TFL_MASK) >> 8;
if (tx_level) {
- /* On MMP2, flipping SSE doesn't to empty TXFIFO. */
- dev_warn(&spi->dev, "%d bytes of garbage in TXFIFO!\n",
- tx_level);
+ /* On MMP2, flipping SSE doesn't to empty Tx FIFO. */
+ dev_warn(&spi->dev, "%u bytes of garbage in Tx FIFO!\n", tx_level);
if (tx_level > transfer->len)
tx_level = transfer->len;
drv_data->tx += tx_level;
@@ -1139,7 +1135,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/*
* Release the data by enabling service requests and interrupts,
- * without changing any mode bits
+ * without changing any mode bits.
*/
pxa2xx_spi_write(drv_data, SSCR1, cr1);
@@ -1150,18 +1146,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
- /* Stop and reset SSP */
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- reset_sccr1(drv_data);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
- pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_off(drv_data);
-
- dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
-
- drv_data->controller->cur_msg->status = -EINTR;
- spi_finalize_current_transfer(drv_data->controller);
+ int_error_stop(drv_data, "transfer aborted", -EINTR);
return 0;
}
@@ -1175,9 +1160,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
pxa2xx_spi_off(drv_data);
/* Clear and disable interrupts and service requests */
write_SSSR_CS(drv_data, drv_data->clear_sr);
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1)
- & ~(drv_data->int_cr1 | drv_data->dma_cr1));
+ clear_SSCR1_bits(drv_data, drv_data->int_cr1 | drv_data->dma_cr1);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
@@ -1202,61 +1185,61 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
return 0;
}
+static void cleanup_cs(struct spi_device *spi)
+{
+ if (!gpio_is_valid(spi->cs_gpio))
+ return;
+
+ gpio_free(spi->cs_gpio);
+ spi->cs_gpio = -ENOENT;
+}
+
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
struct pxa2xx_spi_chip *chip_info)
{
- struct driver_data *drv_data =
- spi_controller_get_devdata(spi->controller);
- struct gpio_desc *gpiod;
- int err = 0;
+ struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
if (chip == NULL)
return 0;
- if (drv_data->cs_gpiods) {
- gpiod = drv_data->cs_gpiods[spi->chip_select];
- if (gpiod) {
- chip->gpiod_cs = gpiod;
- chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
- gpiod_set_value(gpiod, chip->gpio_cs_inverted);
- }
-
+ if (chip_info == NULL)
return 0;
- }
- if (chip_info == NULL)
+ if (drv_data->ssp_type == CE4100_SSP)
return 0;
- /* NOTE: setup() can be called multiple times, possibly with
- * different chip_info, release previously requested GPIO
+ /*
+ * NOTE: setup() can be called multiple times, possibly with
+ * different chip_info, release previously requested GPIO.
*/
- if (chip->gpiod_cs) {
- gpiod_put(chip->gpiod_cs);
- chip->gpiod_cs = NULL;
- }
+ cleanup_cs(spi);
- /* If (*cs_control) is provided, ignore GPIO chip select */
+ /* If ->cs_control() is provided, ignore GPIO chip select */
if (chip_info->cs_control) {
chip->cs_control = chip_info->cs_control;
return 0;
}
if (gpio_is_valid(chip_info->gpio_cs)) {
- err = gpio_request(chip_info->gpio_cs, "SPI_CS");
+ int gpio = chip_info->gpio_cs;
+ int err;
+
+ err = gpio_request(gpio, "SPI_CS");
if (err) {
- dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
- chip_info->gpio_cs);
+ dev_err(&spi->dev, "failed to request chip select GPIO%d\n", gpio);
return err;
}
- gpiod = gpio_to_desc(chip_info->gpio_cs);
- chip->gpiod_cs = gpiod;
- chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
+ err = gpio_direction_output(gpio, !(spi->mode & SPI_CS_HIGH));
+ if (err) {
+ gpio_free(gpio);
+ return err;
+ }
- err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
+ spi->cs_gpio = gpio;
}
- return err;
+ return 0;
}
static int setup(struct spi_device *spi)
@@ -1267,6 +1250,7 @@ static int setup(struct spi_device *spi)
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
uint tx_thres, tx_hi_thres, rx_thres;
+ int err;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
@@ -1274,6 +1258,11 @@ static int setup(struct spi_device *spi)
tx_hi_thres = 0;
rx_thres = RX_THRESH_QUARK_X1000_DFLT;
break;
+ case MRFLD_SSP:
+ tx_thres = TX_THRESH_MRFLD_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_MRFLD_DFLT;
+ break;
case CE4100_SSP:
tx_thres = TX_THRESH_CE4100_DFLT;
tx_hi_thres = 0;
@@ -1302,7 +1291,7 @@ static int setup(struct spi_device *spi)
break;
}
- /* Only alloc on first setup */
+ /* Only allocate on the first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
@@ -1316,15 +1305,15 @@ static int setup(struct spi_device *spi)
kfree(chip);
return -EINVAL;
}
-
- chip->frm = spi->chip_select;
}
chip->enable_dma = drv_data->controller_info->enable_dma;
chip->timeout = TIMOUT_DFLT;
}
- /* protocol drivers may change the chip settings, so...
- * if chip_info exists, use it */
+ /*
+ * Protocol drivers may change the chip settings, so...
+ * if chip_info exists, use it.
+ */
chip_info = spi->controller_data;
/* chip_info isn't always needed */
@@ -1349,15 +1338,24 @@ static int setup(struct spi_device *spi)
chip->cr1 |= SSCR1_SPH;
}
- chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
- chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
- | SSITF_TxHiThresh(tx_hi_thres);
+ if (is_lpss_ssp(drv_data)) {
+ chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
+ chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) |
+ SSITF_TxHiThresh(tx_hi_thres);
+ }
+
+ if (is_mrfld_ssp(drv_data)) {
+ chip->lpss_rx_threshold = rx_thres;
+ chip->lpss_tx_threshold = tx_thres;
+ }
- /* set dma burst and threshold outside of chip_info path so that if
- * chip_info goes away after setting chip->enable_dma, the
- * burst and threshold can still respond to changes in bits_per_word */
+ /*
+ * Set DMA burst and threshold outside of chip_info path so that if
+ * chip_info goes away after setting chip->enable_dma, the burst and
+ * threshold can still respond to changes in bits_per_word.
+ */
if (chip->enable_dma) {
- /* set up legal burst and threshold for dma */
+ /* Set up legal burst and threshold for DMA */
if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
spi->bits_per_word,
&chip->dma_burst_size,
@@ -1388,8 +1386,8 @@ static int setup(struct spi_device *spi)
}
chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
- chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
- | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
+ chip->cr1 |= ((spi->mode & SPI_CPHA) ? SSCR1_SPH : 0) |
+ ((spi->mode & SPI_CPOL) ? SSCR1_SPO : 0);
if (spi->mode & SPI_LOOP)
chip->cr1 |= SSCR1_LBM;
@@ -1413,22 +1411,18 @@ static int setup(struct spi_device *spi)
if (drv_data->ssp_type == CE4100_SSP)
return 0;
- return setup_cs(spi, chip, chip_info);
+ err = setup_cs(spi, chip, chip_info);
+ if (err)
+ kfree(chip);
+
+ return err;
}
static void cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
- struct driver_data *drv_data =
- spi_controller_get_devdata(spi->controller);
-
- if (!chip)
- return;
-
- if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
- chip->gpiod_cs)
- gpiod_put(chip->gpiod_cs);
+ cleanup_cs(spi);
kfree(chip);
}
@@ -1645,7 +1639,7 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
- if (has_acpi_companion(&drv_data->pdev->dev)) {
+ if (has_acpi_companion(drv_data->ssp->dev)) {
switch (drv_data->ssp_type) {
/*
* For Atoms the ACPI DeviceSelection used by the Windows
@@ -1677,7 +1671,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
struct driver_data *drv_data;
struct ssp_device *ssp;
const struct lpss_config *config;
- int status, count;
+ int status;
u32 tmp;
platform_info = dev_get_platdata(dev);
@@ -1694,7 +1688,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
ssp = &platform_info->ssp;
if (!ssp->mmio_base) {
- dev_err(&pdev->dev, "failed to get ssp\n");
+ dev_err(&pdev->dev, "failed to get SSP\n");
return -ENODEV;
}
@@ -1705,17 +1699,18 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (!controller) {
dev_err(&pdev->dev, "cannot alloc spi_controller\n");
- pxa_ssp_free(ssp);
- return -ENOMEM;
+ status = -ENOMEM;
+ goto out_error_controller_alloc;
}
drv_data = spi_controller_get_devdata(controller);
drv_data->controller = controller;
drv_data->controller_info = platform_info;
- drv_data->pdev = pdev;
drv_data->ssp = ssp;
- controller->dev.of_node = pdev->dev.of_node;
- /* the spi->mode bits understood by this driver: */
+ controller->dev.of_node = dev->of_node;
+ controller->dev.fwnode = dev->fwnode;
+
+ /* The spi->mode bits understood by this driver: */
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
controller->bus_num = ssp->port_id;
@@ -1733,8 +1728,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->ssp_type = ssp->type;
- drv_data->ioaddr = ssp->mmio_base;
- drv_data->ssdr_physical = ssp->phys_base + SSDR;
if (pxa25x_ssp_comp(drv_data)) {
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
@@ -1796,15 +1789,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
controller->min_speed_hz =
DIV_ROUND_UP(controller->max_speed_hz, 512);
+ pxa_ssp_disable(ssp);
+
/* Load default SSP configuration */
- pxa2xx_spi_write(drv_data, SSCR0, 0);
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
pxa2xx_spi_write(drv_data, SSCR1, tmp);
- /* using the Motorola SPI protocol and use 8 bit frame */
+ /* Using the Motorola SPI protocol and use 8 bit frame */
tmp = QUARK_X1000_SSCR0_Motorola | QUARK_X1000_SSCR0_DataSize(8);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
@@ -1856,38 +1850,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
}
controller->num_chipselect = platform_info->num_chipselect;
-
- count = gpiod_count(&pdev->dev, "cs");
- if (count > 0) {
- int i;
-
- controller->num_chipselect = max_t(int, count,
- controller->num_chipselect);
-
- drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
- controller->num_chipselect, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!drv_data->cs_gpiods) {
- status = -ENOMEM;
- goto out_error_clock_enabled;
- }
-
- for (i = 0; i < controller->num_chipselect; i++) {
- struct gpio_desc *gpiod;
-
- gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
- if (IS_ERR(gpiod)) {
- /* Means use native chip select */
- if (PTR_ERR(gpiod) == -ENOENT)
- continue;
-
- status = PTR_ERR(gpiod);
- goto out_error_clock_enabled;
- } else {
- drv_data->cs_gpiods[i] = gpiod;
- }
- }
- }
+ controller->use_gpio_descriptors = true;
if (platform_info->is_slave) {
drv_data->gpiod_ready = devm_gpiod_get_optional(dev,
@@ -1906,8 +1869,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = spi_register_controller(controller);
- if (status != 0) {
- dev_err(&pdev->dev, "problem registering spi controller\n");
+ if (status) {
+ dev_err(&pdev->dev, "problem registering SPI controller\n");
goto out_error_pm_runtime_enabled;
}
@@ -1938,7 +1901,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
spi_unregister_controller(drv_data->controller);
/* Disable the SSP at the peripheral and SOC level */
- pxa2xx_spi_write(drv_data, SSCR0, 0);
+ pxa_ssp_disable(ssp);
clk_disable_unprepare(ssp->clk);
/* Release DMA */
@@ -1965,9 +1928,10 @@ static int pxa2xx_spi_suspend(struct device *dev)
int status;
status = spi_controller_suspend(drv_data->controller);
- if (status != 0)
+ if (status)
return status;
- pxa2xx_spi_write(drv_data, SSCR0, 0);
+
+ pxa_ssp_disable(ssp);
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(ssp->clk);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 1400472bc986..9a20fb88e50f 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -1,28 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- * Copyright (C) 2013, Intel Corporation
+ * Copyright (C) 2013, 2021 Intel Corporation
*/
#ifndef SPI_PXA2XX_H
#define SPI_PXA2XX_H
-#include <linux/atomic.h>
-#include <linux/dmaengine.h>
-#include <linux/errno.h>
-#include <linux/io.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/pxa2xx_ssp.h>
-#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <linux/sizes.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/pxa2xx_spi.h>
-struct driver_data {
- /* Driver model hookup */
- struct platform_device *pdev;
+#include <linux/pxa2xx_ssp.h>
+
+struct gpio_desc;
+struct pxa2xx_spi_controller;
+struct spi_controller;
+struct spi_device;
+struct spi_transfer;
+struct driver_data {
/* SSP Info */
struct ssp_device *ssp;
@@ -33,10 +31,6 @@ struct driver_data {
/* PXA hookup */
struct pxa2xx_spi_controller *controller_info;
- /* SSP register addresses */
- void __iomem *ioaddr;
- phys_addr_t ssdr_physical;
-
/* SSP masks*/
u32 dma_cr1;
u32 int_cr1;
@@ -59,9 +53,6 @@ struct driver_data {
void __iomem *lpss_base;
- /* GPIOs for chip selects */
- struct gpio_desc **cs_gpiods;
-
/* Optional slave FIFO ready signal */
struct gpio_desc *gpiod_ready;
};
@@ -71,37 +62,32 @@ struct chip_data {
u32 dds_rate;
u32 timeout;
u8 n_bytes;
+ u8 enable_dma;
u32 dma_burst_size;
- u32 threshold;
u32 dma_threshold;
+ u32 threshold;
u16 lpss_rx_threshold;
u16 lpss_tx_threshold;
- u8 enable_dma;
- union {
- struct gpio_desc *gpiod_cs;
- unsigned int frm;
- };
- int gpio_cs_inverted;
+
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
+
void (*cs_control)(u32 command);
};
-static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
- unsigned reg)
+static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data, u32 reg)
{
- return __raw_readl(drv_data->ioaddr + reg);
+ return pxa_ssp_read_reg(drv_data->ssp, reg);
}
-static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
- unsigned reg, u32 val)
+static inline void pxa2xx_spi_write(const struct driver_data *drv_data, u32 reg, u32 val)
{
- __raw_writel(val, drv_data->ioaddr + reg);
+ pxa_ssp_write_reg(drv_data->ssp, reg, val);
}
#define DMA_ALIGNMENT 8
-static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
+static inline int pxa25x_ssp_comp(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
case PXA25x_SSP:
@@ -113,11 +99,21 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
}
}
-static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
+static inline void clear_SSCR1_bits(const struct driver_data *drv_data, u32 bits)
+{
+ pxa2xx_spi_write(drv_data, SSCR1, pxa2xx_spi_read(drv_data, SSCR1) & ~bits);
+}
+
+static inline u32 read_SSSR_bits(const struct driver_data *drv_data, u32 bits)
+{
+ return pxa2xx_spi_read(drv_data, SSSR) & bits;
+}
+
+static inline void write_SSSR_CS(const struct driver_data *drv_data, u32 val)
{
if (drv_data->ssp_type == CE4100_SSP ||
drv_data->ssp_type == QUARK_X1000_SSP)
- val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
+ val |= read_SSSR_bits(drv_data, SSSR_ALT_FRM_MASK);
pxa2xx_spi_write(drv_data, SSSR, val);
}
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 52d6259d96ed..540861ca2ba3 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -107,6 +107,8 @@
#define CR0_OPM_MASTER 0x0
#define CR0_OPM_SLAVE 0x1
+#define CR0_SOI_OFFSET 23
+
#define CR0_MTM_OFFSET 0x21
/* Bit fields in SER, 2bit */
@@ -116,13 +118,14 @@
#define BAUDR_SCKDV_MIN 2
#define BAUDR_SCKDV_MAX 65534
-/* Bit fields in SR, 5bit */
-#define SR_MASK 0x1f
+/* Bit fields in SR, 6bit */
+#define SR_MASK 0x3f
#define SR_BUSY (1 << 0)
#define SR_TF_FULL (1 << 1)
#define SR_TF_EMPTY (1 << 2)
#define SR_RF_EMPTY (1 << 3)
#define SR_RF_FULL (1 << 4)
+#define SR_SLAVE_TX_BUSY (1 << 5)
/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
#define INT_MASK 0x1f
@@ -156,7 +159,8 @@
*/
#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
-#define ROCKCHIP_SPI_MAX_CS_NUM 2
+/* 2 for native cs, 2 for cs-gpio */
+#define ROCKCHIP_SPI_MAX_CS_NUM 4
#define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
#define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
@@ -197,13 +201,19 @@ static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
}
-static inline void wait_for_idle(struct rockchip_spi *rs)
+static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5);
do {
- if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
- return;
+ if (slave_mode) {
+ if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) &&
+ !((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)))
+ return;
+ } else {
+ if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
+ return;
+ }
} while (!time_after(jiffies, timeout));
dev_warn(rs->dev, "spi controller is in busy state!\n");
@@ -228,7 +238,7 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
struct spi_controller *ctlr = spi->controller;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
- bool cs_asserted = !enable;
+ bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
/* Return immediately for no-op */
if (cs_asserted == rs->cs_asserted[spi->chip_select])
@@ -238,11 +248,15 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
/* Keep things powered as long as CS is asserted */
pm_runtime_get_sync(rs->dev);
- ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER,
- BIT(spi->chip_select));
+ if (spi->cs_gpiod)
+ ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
+ else
+ ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
} else {
- ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER,
- BIT(spi->chip_select));
+ if (spi->cs_gpiod)
+ ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
+ else
+ ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
/* Drop reference from when we first asserted CS */
pm_runtime_put(rs->dev);
@@ -383,7 +397,7 @@ static void rockchip_spi_dma_txcb(void *data)
return;
/* Wait until the FIFO data completely. */
- wait_for_idle(rs);
+ wait_for_tx_idle(rs, ctlr->slave);
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
@@ -495,6 +509,8 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
if (spi->mode & SPI_LSB_FIRST)
cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
+ if (spi->mode & SPI_CS_HIGH)
+ cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
if (xfer->rx_buf && xfer->tx_buf)
cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
@@ -540,12 +556,12 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
* interrupt exactly when the fifo is full doesn't seem to work,
* so we need the strict inequality here
*/
- if (xfer->len < rs->fifo_len)
- writel_relaxed(xfer->len - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
+ if ((xfer->len / rs->n_bytes) < rs->fifo_len)
+ writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
else
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
- writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
@@ -783,6 +799,14 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ctlr->can_dma = rockchip_spi_can_dma;
}
+ switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
+ case ROCKCHIP_SPI_VER2_TYPE2:
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ break;
+ default:
+ break;
+ }
+
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register controller\n");
@@ -922,6 +946,7 @@ static const struct of_device_id rockchip_spi_dt_match[] = {
{ .compatible = "rockchip,rk3368-spi", },
{ .compatible = "rockchip,rk3399-spi", },
{ .compatible = "rockchip,rv1108-spi", },
+ { .compatible = "rockchip,rv1126-spi", },
{ },
};
MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index e39fd38f5180..d16ed88802d3 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -618,9 +618,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = -ETIMEDOUT;
}
if (tx)
- dmaengine_terminate_all(rspi->ctlr->dma_tx);
+ dmaengine_terminate_sync(rspi->ctlr->dma_tx);
if (rx)
- dmaengine_terminate_all(rspi->ctlr->dma_rx);
+ dmaengine_terminate_sync(rspi->ctlr->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
@@ -634,7 +634,7 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
no_dma_tx:
if (rx)
- dmaengine_terminate_all(rspi->ctlr->dma_rx);
+ dmaengine_terminate_sync(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
dev_warn_once(&rspi->ctlr->dev,
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 297c512069a5..5d27ee482237 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -174,7 +174,7 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
- if (t && t->len + tlen > SC18IS602_BUFSIZ)
+ if (t && t->len + tlen > SC18IS602_BUFSIZ + 1)
return -EINVAL;
return 0;
@@ -219,6 +219,11 @@ static int sc18is602_transfer_one(struct spi_master *master,
return status;
}
+static size_t sc18is602_max_transfer_size(struct spi_device *spi)
+{
+ return SC18IS602_BUFSIZ;
+}
+
static int sc18is602_setup(struct spi_device *spi)
{
struct sc18is602 *hw = spi_master_get_devdata(spi->master);
@@ -293,6 +298,8 @@ static int sc18is602_probe(struct i2c_client *client,
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->setup = sc18is602_setup;
master->transfer_one_message = sc18is602_transfer_one;
+ master->max_transfer_size = sc18is602_max_transfer_size;
+ master->max_message_size = sc18is602_max_transfer_size;
master->dev.of_node = np;
master->min_speed_hz = hw->freq / 128;
master->max_speed_hz = hw->freq / 4;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 41ed9ff8fad0..f88d9acd20d9 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -853,10 +853,10 @@ stop_reset:
sh_msiof_spi_stop(p, rx);
stop_dma:
if (tx)
- dmaengine_terminate_all(p->ctlr->dma_tx);
+ dmaengine_terminate_sync(p->ctlr->dma_tx);
no_dma_tx:
if (rx)
- dmaengine_terminate_all(p->ctlr->dma_rx);
+ dmaengine_terminate_sync(p->ctlr->dma_rx);
sh_msiof_write(p, SIIER, 0);
return ret;
}
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index b41a75749b49..28e70db9bbba 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -1068,6 +1068,7 @@ static const struct of_device_id sprd_spi_of_match[] = {
{ .compatible = "sprd,sc9860-spi", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
static struct platform_driver sprd_spi_driver = {
.driver = {
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 7e640ccc7e77..27f35aa2d746 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -36,6 +36,7 @@
#define CR_FTIE BIT(18)
#define CR_SMIE BIT(19)
#define CR_TOIE BIT(20)
+#define CR_APMS BIT(22)
#define CR_PRESC_MASK GENMASK(31, 24)
#define QSPI_DCR 0x04
@@ -53,6 +54,7 @@
#define QSPI_FCR 0x0c
#define FCR_CTEF BIT(0)
#define FCR_CTCF BIT(1)
+#define FCR_CSMF BIT(3)
#define QSPI_DLR 0x10
@@ -91,7 +93,6 @@
#define STM32_AUTOSUSPEND_DELAY -1
struct stm32_qspi_flash {
- struct stm32_qspi *qspi;
u32 cs;
u32 presc;
};
@@ -107,6 +108,7 @@ struct stm32_qspi {
u32 clk_rate;
struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
struct completion data_completion;
+ struct completion match_completion;
u32 fmode;
struct dma_chan *dma_chtx;
@@ -115,6 +117,7 @@ struct stm32_qspi {
u32 cr_reg;
u32 dcr_reg;
+ unsigned long status_timeout;
/*
* to protect device configuration, could be different between
@@ -128,11 +131,20 @@ static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
u32 cr, sr;
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
sr = readl_relaxed(qspi->io_base + QSPI_SR);
+ if (cr & CR_SMIE && sr & SR_SMF) {
+ /* disable irq */
+ cr &= ~CR_SMIE;
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+ complete(&qspi->match_completion);
+
+ return IRQ_HANDLED;
+ }
+
if (sr & (SR_TEF | SR_TCF)) {
/* disable irq */
- cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_TCIE & ~CR_TEIE;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
complete(&qspi->data_completion);
@@ -294,7 +306,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
int err = 0;
if (!op->data.nbytes)
- return stm32_qspi_wait_nobusy(qspi);
+ goto wait_nobusy;
if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
goto out;
@@ -315,10 +327,31 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
out:
/* clear flags */
writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
+wait_nobusy:
+ if (!err)
+ err = stm32_qspi_wait_nobusy(qspi);
return err;
}
+static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ u32 cr;
+
+ reinit_completion(&qspi->match_completion);
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR);
+
+ if (!wait_for_completion_timeout(&qspi->match_completion,
+ msecs_to_jiffies(qspi->status_timeout)))
+ return -ETIMEDOUT;
+
+ writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR);
+
+ return 0;
+}
+
static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth)
{
if (buswidth == 4)
@@ -332,7 +365,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
u32 ccr, cr;
- int timeout, err = 0;
+ int timeout, err = 0, err_poll_status = 0;
dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
@@ -378,6 +411,9 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
+ if (qspi->fmode == CCR_FMODE_APM)
+ err_poll_status = stm32_qspi_wait_poll_status(qspi, op);
+
err = stm32_qspi_tx(qspi, op);
/*
@@ -387,7 +423,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
* byte of device (device size - fifo size). like device size is not
* knows, the prefetching is always stop.
*/
- if (err || qspi->fmode == CCR_FMODE_MM)
+ if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM)
goto abort;
/* wait end of tx in indirect mode */
@@ -406,15 +442,49 @@ abort:
cr, !(cr & CR_ABORT), 1,
STM32_ABT_TIMEOUT_US);
- writel_relaxed(FCR_CTCF, qspi->io_base + QSPI_FCR);
+ writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR);
- if (err || timeout)
- dev_err(qspi->dev, "%s err:%d abort timeout:%d\n",
- __func__, err, timeout);
+ if (err || err_poll_status || timeout)
+ dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
+ __func__, err, err_poll_status, timeout);
return err;
}
+static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ if (!spi_mem_supports_op(mem, op))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(qspi->dev);
+ return ret;
+ }
+
+ mutex_lock(&qspi->lock);
+
+ writel_relaxed(mask, qspi->io_base + QSPI_PSMKR);
+ writel_relaxed(match, qspi->io_base + QSPI_PSMAR);
+ qspi->fmode = CCR_FMODE_APM;
+ qspi->status_timeout = timeout_ms;
+
+ ret = stm32_qspi_send(mem, op);
+ mutex_unlock(&qspi->lock);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return ret;
+}
+
static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
@@ -522,12 +592,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
flash = &qspi->flash[spi->chip_select];
- flash->qspi = qspi;
flash->cs = spi->chip_select;
flash->presc = presc;
mutex_lock(&qspi->lock);
- qspi->cr_reg = 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
+ qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
@@ -607,6 +676,7 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
.exec_op = stm32_qspi_exec_op,
.dirmap_create = stm32_qspi_dirmap_create,
.dirmap_read = stm32_qspi_dirmap_read,
+ .poll_status = stm32_qspi_poll_status,
};
static int stm32_qspi_probe(struct platform_device *pdev)
@@ -661,6 +731,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
}
init_completion(&qspi->data_completion);
+ init_completion(&qspi->match_completion);
qspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qspi->clk)) {
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index cc8401980125..23ad052528db 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -379,6 +379,10 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
}
sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
+ /* Finally enable the bus - doing so before might raise SCK to HIGH */
+ reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG);
+ reg |= SUN6I_GBL_CTL_BUS_ENABLE;
+ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
/* Setup the transfer now... */
if (sspi->tx_buf)
@@ -504,7 +508,7 @@ static int sun6i_spi_runtime_resume(struct device *dev)
}
sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
- SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
+ SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
return 0;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index a2e5907276e7..5131141bbf0d 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1071,8 +1071,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
ret = wait_for_completion_timeout(&tspi->xfer_completion,
SPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
- dev_err(tspi->dev,
- "spi transfer timeout, err %d\n", ret);
+ dev_err(tspi->dev, "spi transfer timeout\n");
if (tspi->is_curr_dma_xfer &&
(tspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tspi->tx_dma_chan);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index f7c832fd4003..6a726c95ac7a 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1118,6 +1118,11 @@ static int tegra_slink_probe(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
+
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+
tspi->def_command_reg = SLINK_M_S;
tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 2f806f4b2c34..2354ca1e3858 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -1028,7 +1028,7 @@ static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi
ret = wait_for_completion_timeout(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
- dev_err(tqspi->dev, "transfer timeout: %d\n", ret);
+ dev_err(tqspi->dev, "transfer timeout\n");
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tqspi->tx_dma_chan);
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index b8870784fc6e..8c4615b76339 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -580,8 +580,10 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
if (data->pkt_tx_buff != NULL) {
data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
- if (!data->pkt_rx_buff)
+ if (!data->pkt_rx_buff) {
kfree(data->pkt_tx_buff);
+ data->pkt_tx_buff = NULL;
+ }
}
if (!data->pkt_rx_buff) {
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 6a9ef8ee3cc9..8900e51e1a1c 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -142,7 +142,7 @@ static void uniphier_spi_set_mode(struct spi_device *spi)
* FSTRT start frame timing
* 0: rising edge of clock, 1: falling edge of clock
*/
- switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
/* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 5d8a5ee62fa2..9262c6418463 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -367,7 +367,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
}
/**
- * zynq_qspi_setup - Configure the QSPI controller
+ * zynq_qspi_setup_op - Configure the QSPI controller
* @spi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer, baud
@@ -528,18 +528,17 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
int err = 0, i;
u8 *tmpbuf;
- u8 opcode = op->cmd.opcode;
dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth);
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi);
- if (op->cmd.nbytes) {
+ if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
- xqspi->txbuf = &opcode;
+ xqspi->txbuf = (u8 *)&op->cmd.opcode;
xqspi->rxbuf = NULL;
xqspi->tx_bytes = op->cmd.nbytes;
xqspi->rx_bytes = op->cmd.nbytes;
@@ -679,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
- goto remove_master;
+ goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
0, pdev->name, xqspi);
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
- goto remove_master;
+ goto clk_dis_all;
}
ret = of_property_read_u32(np, "num-cs",
@@ -694,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
if (ret < 0) {
ctlr->num_chipselect = 1;
} else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+ ret = -EINVAL;
dev_err(&pdev->dev, "only 2 chip selects are available\n");
- goto remove_master;
+ goto clk_dis_all;
} else {
ctlr->num_chipselect = num_cs;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ba425b9c7700..c99181165321 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -47,10 +47,6 @@ static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- /* spi controllers may cleanup for released devices */
- if (spi->controller->cleanup)
- spi->controller->cleanup(spi);
-
spi_controller_put(spi->controller);
kfree(spi->driver_override);
kfree(spi);
@@ -367,6 +363,10 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
const struct spi_device *spi = to_spi_device(dev);
int rc;
+ rc = of_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
@@ -558,49 +558,29 @@ static int spi_dev_check(struct device *dev, void *data)
return 0;
}
-/**
- * spi_add_device - Add spi_device allocated with spi_alloc_device
- * @spi: spi_device to register
- *
- * Companion function to spi_alloc_device. Devices allocated with
- * spi_alloc_device can be added onto the spi bus with this function.
- *
- * Return: 0 on success; negative errno on failure
- */
-int spi_add_device(struct spi_device *spi)
+static void spi_cleanup(struct spi_device *spi)
+{
+ if (spi->controller->cleanup)
+ spi->controller->cleanup(spi);
+}
+
+static int __spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
int status;
- /* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
- ctlr->num_chipselect);
- return -EINVAL;
- }
-
- /* Set the bus ID string */
- spi_dev_set_name(spi);
-
- /* We need to make sure there's no other device with this
- * chipselect **BEFORE** we call setup(), else we'll trash
- * its configuration. Lock against concurrent add() calls.
- */
- mutex_lock(&spi_add_lock);
-
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
- goto done;
+ return status;
}
/* Controller may unregister concurrently */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
!device_is_registered(&ctlr->dev)) {
- status = -ENODEV;
- goto done;
+ return -ENODEV;
}
/* Descriptors take precedence */
@@ -617,23 +597,77 @@ int spi_add_device(struct spi_device *spi)
if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status);
- goto done;
+ return status;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
- if (status < 0)
+ if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
- else
+ spi_cleanup(spi);
+ } else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+ }
+
+ return status;
+}
+
+/**
+ * spi_add_device - Add spi_device allocated with spi_alloc_device
+ * @spi: spi_device to register
+ *
+ * Companion function to spi_alloc_device. Devices allocated with
+ * spi_alloc_device can be added onto the spi bus with this function.
+ *
+ * Return: 0 on success; negative errno on failure
+ */
+int spi_add_device(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+ int status;
+
+ /* Chipselects are numbered 0..max; validate. */
+ if (spi->chip_select >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
-done:
+ /* We need to make sure there's no other device with this
+ * chipselect **BEFORE** we call setup(), else we'll trash
+ * its configuration. Lock against concurrent add() calls.
+ */
+ mutex_lock(&spi_add_lock);
+ status = __spi_add_device(spi);
mutex_unlock(&spi_add_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
+static int spi_add_device_locked(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+
+ /* Chipselects are numbered 0..max; validate. */
+ if (spi->chip_select >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
+ WARN_ON(!mutex_is_locked(&spi_add_lock));
+ return __spi_add_device(spi);
+}
+
/**
* spi_new_device - instantiate one new SPI device
* @ctlr: Controller to which device is connected
@@ -717,7 +751,9 @@ void spi_unregister_device(struct spi_device *spi)
if (ACPI_COMPANION(&spi->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_remove_software_node(&spi->dev);
- device_unregister(&spi->dev);
+ device_del(&spi->dev);
+ spi_cleanup(spi);
+ put_device(&spi->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
@@ -798,6 +834,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
+ trace_spi_set_cs(spi, activate);
+
spi->controller->last_cs_enable = enable;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
@@ -814,15 +852,29 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
if (!(spi->mode & SPI_NO_CS)) {
- if (spi->cs_gpiod)
- /* polarity handled by gpiolib */
- gpiod_set_value_cansleep(spi->cs_gpiod, activate);
- else
+ if (spi->cs_gpiod) {
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi->cs_gpiod, activate);
+ } else {
/*
* invert the enable line, as active low is
* default for SPI.
*/
gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ }
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -941,11 +993,15 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (ctlr->dma_tx)
tx_dev = ctlr->dma_tx->device->dev;
+ else if (ctlr->dma_map_dev)
+ tx_dev = ctlr->dma_map_dev;
else
tx_dev = ctlr->dev.parent;
if (ctlr->dma_rx)
rx_dev = ctlr->dma_rx->device->dev;
+ else if (ctlr->dma_map_dev)
+ rx_dev = ctlr->dma_map_dev;
else
rx_dev = ctlr->dev.parent;
@@ -1112,10 +1168,20 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
if (!speed_hz)
speed_hz = 100000;
- ms = 8LL * 1000LL * xfer->len;
+ /*
+ * For each byte we wait for 8 cycles of the SPI clock.
+ * Since speed is defined in Hz and we want milliseconds,
+ * use respective multiplier, but before the division,
+ * otherwise we may get 0 for short transfers.
+ */
+ ms = 8LL * MSEC_PER_SEC * xfer->len;
do_div(ms, speed_hz);
- ms += ms + 200; /* some tolerance */
+ /*
+ * Increase it twice and add 200 ms tolerance, use
+ * predefined maximum in case of overflow.
+ */
+ ms += ms + 200;
if (ms > UINT_MAX)
ms = UINT_MAX;
@@ -1138,10 +1204,10 @@ static void _spi_transfer_delay_ns(u32 ns)
{
if (!ns)
return;
- if (ns <= 1000) {
+ if (ns <= NSEC_PER_USEC) {
ndelay(ns);
} else {
- u32 us = DIV_ROUND_UP(ns, 1000);
+ u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
if (us <= 10)
udelay(us);
@@ -1161,21 +1227,25 @@ int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
switch (unit) {
case SPI_DELAY_UNIT_USECS:
- delay *= 1000;
+ delay *= NSEC_PER_USEC;
break;
- case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
+ case SPI_DELAY_UNIT_NSECS:
+ /* Nothing to do here */
break;
case SPI_DELAY_UNIT_SCK:
/* clock cycles need to be obtained from spi_transfer */
if (!xfer)
return -EINVAL;
- /* if there is no effective speed know, then approximate
- * by underestimating with half the requested hz
+ /*
+ * If there is unknown effective speed, approximate it
+ * by underestimating with half of the requested hz.
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
if (!hz)
return -EINVAL;
- delay *= DIV_ROUND_UP(1000000000, hz);
+
+ /* Convert delay to nanoseconds */
+ delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
break;
default:
return -EINVAL;
@@ -1207,6 +1277,7 @@ EXPORT_SYMBOL_GPL(spi_delay_exec);
static void _spi_transfer_cs_change_delay(struct spi_message *msg,
struct spi_transfer *xfer)
{
+ u32 default_delay_ns = 10 * NSEC_PER_USEC;
u32 delay = xfer->cs_change_delay.value;
u32 unit = xfer->cs_change_delay.unit;
int ret;
@@ -1214,16 +1285,16 @@ static void _spi_transfer_cs_change_delay(struct spi_message *msg,
/* return early on "fast" mode - for everything but USECS */
if (!delay) {
if (unit == SPI_DELAY_UNIT_USECS)
- _spi_transfer_delay_ns(10000);
+ _spi_transfer_delay_ns(default_delay_ns);
return;
}
ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
if (ret) {
dev_err_once(&msg->spi->dev,
- "Use of unsupported delay unit %i, using default of 10us\n",
- unit);
- _spi_transfer_delay_ns(10000);
+ "Use of unsupported delay unit %i, using default of %luus\n",
+ unit, default_delay_ns / NSEC_PER_USEC);
+ _spi_transfer_delay_ns(default_delay_ns);
}
}
@@ -2037,6 +2108,7 @@ of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
+ spi->dev.fwnode = of_fwnode_handle(nc);
/* Register the new device */
rc = spi_add_device(spi);
@@ -2084,6 +2156,55 @@ static void of_register_spi_devices(struct spi_controller *ctlr)
static void of_register_spi_devices(struct spi_controller *ctlr) { }
#endif
+/**
+ * spi_new_ancillary_device() - Register ancillary SPI device
+ * @spi: Pointer to the main SPI device registering the ancillary device
+ * @chip_select: Chip Select of the ancillary device
+ *
+ * Register an ancillary SPI device; for example some chips have a chip-select
+ * for normal device usage and another one for setup/firmware upload.
+ *
+ * This may only be called from main SPI device's probe routine.
+ *
+ * Return: 0 on success; negative errno on failure
+ */
+struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
+ u8 chip_select)
+{
+ struct spi_device *ancillary;
+ int rc = 0;
+
+ /* Alloc an spi_device */
+ ancillary = spi_alloc_device(spi->controller);
+ if (!ancillary) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+
+ /* Use provided chip-select for ancillary device */
+ ancillary->chip_select = chip_select;
+
+ /* Take over SPI mode/speed from SPI main device */
+ ancillary->max_speed_hz = spi->max_speed_hz;
+ ancillary->mode = spi->mode;
+
+ /* Register the new device */
+ rc = spi_add_device_locked(ancillary);
+ if (rc) {
+ dev_err(&spi->dev, "failed to register ancillary device\n");
+ goto err_out;
+ }
+
+ return ancillary;
+
+err_out:
+ spi_dev_put(ancillary);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
+
#ifdef CONFIG_ACPI
struct acpi_spi_lookup {
struct spi_controller *ctlr;
@@ -2601,9 +2722,10 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
native_cs_mask |= BIT(i);
}
- ctlr->unused_native_cs = ffz(native_cs_mask);
- if (num_cs_gpios && ctlr->max_native_cs &&
- ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
+
+ if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
+ ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
dev_err(dev, "No unused native chip select available\n");
return -EINVAL;
}
@@ -3420,8 +3542,10 @@ int spi_setup(struct spi_device *spi)
spi_set_thread_rt(spi->controller);
}
- dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
- (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
+ trace_spi_setup(spi, status);
+
+ dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
+ spi->mode & SPI_MODE_X_MASK,
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
(spi->mode & SPI_3WIRE) ? "3wire, " : "",
@@ -3433,74 +3557,6 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
-/**
- * spi_set_cs_timing - configure CS setup, hold, and inactive delays
- * @spi: the device that requires specific CS timing configuration
- * @setup: CS setup time specified via @spi_delay
- * @hold: CS hold time specified via @spi_delay
- * @inactive: CS inactive delay between transfers specified via @spi_delay
- *
- * Return: zero on success, else a negative error code.
- */
-int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
- struct spi_delay *hold, struct spi_delay *inactive)
-{
- struct device *parent = spi->controller->dev.parent;
- size_t len;
- int status;
-
- if (spi->controller->set_cs_timing &&
- !(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) {
- if (spi->controller->auto_runtime_pm) {
- status = pm_runtime_get_sync(parent);
- if (status < 0) {
- pm_runtime_put_noidle(parent);
- dev_err(&spi->controller->dev, "Failed to power device: %d\n",
- status);
- return status;
- }
-
- status = spi->controller->set_cs_timing(spi, setup,
- hold, inactive);
- pm_runtime_mark_last_busy(parent);
- pm_runtime_put_autosuspend(parent);
- return status;
- } else {
- return spi->controller->set_cs_timing(spi, setup, hold,
- inactive);
- }
- }
-
- if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
- (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
- (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
- dev_err(&spi->dev,
- "Clock-cycle delays for CS not supported in SW mode\n");
- return -ENOTSUPP;
- }
-
- len = sizeof(struct spi_delay);
-
- /* copy delays to controller */
- if (setup)
- memcpy(&spi->controller->cs_setup, setup, len);
- else
- memset(&spi->controller->cs_setup, 0, len);
-
- if (hold)
- memcpy(&spi->controller->cs_hold, hold, len);
- else
- memset(&spi->controller->cs_hold, 0, len);
-
- if (inactive)
- memcpy(&spi->controller->cs_inactive, inactive, len);
- else
- memset(&spi->controller->cs_inactive, 0, len);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(spi_set_cs_timing);
-
static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
struct spi_device *spi)
{
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index f56e0e975a46..24e9469ea35b 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -59,7 +59,7 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
*
* REVISIT should changing those flags be privileged?
*/
-#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
+#define SPI_MODE_MASK (SPI_MODE_X_MASK | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
| SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 66a76fd83248..2de3896489c8 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -231,7 +231,8 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
- * a random base number. */
+ * a random base number.
+ */
if (bus->bustype == SSB_BUSTYPE_SSB)
chip->base = 0;
else
@@ -424,7 +425,8 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
chip->ngpio = 5;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
- * a random base number. */
+ * a random base number.
+ */
if (bus->bustype == SSB_BUSTYPE_SSB)
chip->base = 0;
else
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index c1186415896b..d11b4242b6d2 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -55,7 +55,8 @@ void pcicore_write16(struct ssb_pcicore *pc, u16 offset, u16 value)
#include <asm/paccess.h>
/* Probe a 32bit value on the bus and catch bus exceptions.
* Returns nonzero on a bus exception.
- * This is MIPS specific */
+ * This is MIPS specific
+ */
#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
/* Assume one-hot slot wiring */
@@ -255,7 +256,8 @@ static struct pci_controller ssb_pcicore_controller = {
};
/* This function is called when doing a pci_enable_device().
- * We must first check if the device is a device on the PCI-core bridge. */
+ * We must first check if the device is a device on the PCI-core bridge.
+ */
int ssb_pcicore_plat_dev_init(struct pci_dev *d)
{
if (d->bus->ops != &ssb_pcicore_pciops) {
@@ -381,11 +383,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
- * to non-MIPS platform. */
+ * to non-MIPS platform.
+ */
ssb_pcicore_controller.io_map_base = (unsigned long)ioremap(SSB_PCI_MEM, 0x04000000);
set_io_port_base(ssb_pcicore_controller.io_map_base);
/* Give some time to the PCI controller to configure itself with the new
- * values. Not waiting at this point causes crashes of the machine. */
+ * values. Not waiting at this point causes crashes of the machine.
+ */
mdelay(10);
register_pci_controller(&ssb_pcicore_controller);
}
@@ -405,7 +409,8 @@ static int pcicore_is_in_hostmode(struct ssb_pcicore *pc)
return 0;
/* The 200-pin BCM4712 package does not bond out PCI. Even when
- * PCI is bonded out, some boards may leave the pins floating. */
+ * PCI is bonded out, some boards may leave the pins floating.
+ */
if (bus->chip_id == 0x4712) {
if (bus->chip_package == SSB_CHIPPACK_BCM4712S)
return 0;
@@ -685,7 +690,8 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
if (dev->bus->bustype != SSB_BUSTYPE_PCI) {
/* This SSB device is not on a PCI host-bus. So the IRQs are
* not routed through the PCI core.
- * So we must not enable routing through the PCI core. */
+ * So we must not enable routing through the PCI core.
+ */
goto out;
}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 0a26984acb2c..3a29b5570f9f 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -37,7 +37,8 @@ static LIST_HEAD(buses);
/* Software ID counter */
static unsigned int next_busnumber;
/* buses_mutes locks the two buslists and the next_busnumber.
- * Don't lock this directly, but use ssb_buses_[un]lock() below. */
+ * Don't lock this directly, but use ssb_buses_[un]lock() below.
+ */
static DEFINE_MUTEX(buses_mutex);
/* There are differences in the codeflow, if the bus is
@@ -45,7 +46,8 @@ static DEFINE_MUTEX(buses_mutex);
* are not available early. This is a mechanism to delay
* these initializations to after early boot has finished.
* It's also used to avoid mutex locking, as that's not
- * available and needed early. */
+ * available and needed early.
+ */
static bool ssb_is_early_boot = 1;
static void ssb_buses_lock(void);
@@ -161,7 +163,8 @@ int ssb_bus_resume(struct ssb_bus *bus)
int err;
/* Reset HW state information in memory, so that HW is
- * completely reinitialized. */
+ * completely reinitialized.
+ */
bus->mapped_device = NULL;
#ifdef CONFIG_SSB_DRIVER_PCICORE
bus->pcicore.setup_done = 0;
@@ -431,9 +434,7 @@ void ssb_bus_unregister(struct ssb_bus *bus)
int err;
err = ssb_gpio_unregister(bus);
- if (err == -EBUSY)
- pr_debug("Some GPIOs are still in use\n");
- else if (err)
+ if (err)
pr_debug("Can not unregister GPIO driver: %i\n", err);
ssb_buses_lock();
@@ -467,7 +468,8 @@ static int ssb_devices_register(struct ssb_bus *bus)
sdev = &(bus->devices[i]);
/* We don't register SSB-system devices to the kernel,
- * as the drivers for them are built into SSB. */
+ * as the drivers for them are built into SSB.
+ */
switch (sdev->id.coreid) {
case SSB_DEV_CHIPCOMMON:
case SSB_DEV_PCI:
@@ -521,7 +523,8 @@ static int ssb_devices_register(struct ssb_bus *bus)
if (err) {
pr_err("Could not register %s\n", dev_name(dev));
/* Set dev to NULL to not unregister
- * dev on error unwinding. */
+ * dev on error unwinding.
+ */
sdev->dev = NULL;
put_device(dev);
goto error;
@@ -667,7 +670,8 @@ ssb_bus_register(struct ssb_bus *bus,
ssb_bus_may_powerdown(bus);
/* Queue it for attach.
- * See the comment at the ssb_is_early_boot definition. */
+ * See the comment at the ssb_is_early_boot definition.
+ */
list_add_tail(&bus->list, &attach_queue);
if (!ssb_is_early_boot) {
/* This is not early boot, so we must attach the bus now */
@@ -1007,7 +1011,8 @@ static void ssb_flush_tmslow(struct ssb_device *dev)
* a machine check exception otherwise.
* Do this by reading the register back to commit the
* PCI write and delay an additional usec for the device
- * to react to the change. */
+ * to react to the change.
+ */
ssb_read32(dev, SSB_TMSLOW);
udelay(1);
}
@@ -1044,7 +1049,8 @@ void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags)
EXPORT_SYMBOL(ssb_device_enable);
/* Wait for bitmask in a register to get set or cleared.
- * timeout is in units of ten-microseconds */
+ * timeout is in units of ten-microseconds
+ */
static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
int timeout, int set)
{
@@ -1153,7 +1159,8 @@ int ssb_bus_may_powerdown(struct ssb_bus *bus)
/* On buses where more than one core may be working
* at a time, we must not powerdown stuff if there are
- * still cores that may want to run. */
+ * still cores that may want to run.
+ */
if (bus->bustype == SSB_BUSTYPE_SSB)
goto out;
@@ -1303,13 +1310,11 @@ static int __init ssb_modinit(void)
if (err) {
pr_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
/* don't fail SSB init because of this */
- err = 0;
}
err = ssb_host_pcmcia_init();
if (err) {
pr_err("PCMCIA host initialization failed\n");
/* don't fail SSB init because of this */
- err = 0;
}
err = ssb_gige_init();
if (err) {
@@ -1322,7 +1327,8 @@ out:
}
/* ssb must be initialized after PCI but before the ssb drivers.
* That means we must use some initcall between subsys_initcall
- * and device_initcall. */
+ * and device_initcall.
+ */
fs_initcall(ssb_modinit);
static void __exit ssb_modexit(void)
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index dac54041ad8d..148bcb99c212 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -1117,9 +1117,9 @@ const struct ssb_bus_ops ssb_pci_ops = {
#endif
};
-static ssize_t ssb_pci_attr_sprom_show(struct device *pcidev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ssb_sprom_show(struct device *pcidev,
+ struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev);
struct ssb_bus *bus;
@@ -1131,9 +1131,9 @@ static ssize_t ssb_pci_attr_sprom_show(struct device *pcidev,
return ssb_attr_sprom_show(bus, buf, sprom_do_read);
}
-static ssize_t ssb_pci_attr_sprom_store(struct device *pcidev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t ssb_sprom_store(struct device *pcidev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev);
struct ssb_bus *bus;
@@ -1146,9 +1146,7 @@ static ssize_t ssb_pci_attr_sprom_store(struct device *pcidev,
sprom_check_crc, sprom_do_write);
}
-static DEVICE_ATTR(ssb_sprom, 0600,
- ssb_pci_attr_sprom_show,
- ssb_pci_attr_sprom_store);
+static DEVICE_ATTR_ADMIN_RW(ssb_sprom);
void ssb_pci_exit(struct ssb_bus *bus)
{
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index d7d730c245c5..45502098e0c7 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -723,9 +723,9 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
return -ENODEV;
}
-static ssize_t ssb_pcmcia_attr_sprom_show(struct device *pcmciadev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ssb_sprom_show(struct device *pcmciadev,
+ struct device_attribute *attr,
+ char *buf)
{
struct pcmcia_device *pdev =
container_of(pcmciadev, struct pcmcia_device, dev);
@@ -739,9 +739,9 @@ static ssize_t ssb_pcmcia_attr_sprom_show(struct device *pcmciadev,
ssb_pcmcia_sprom_read_all);
}
-static ssize_t ssb_pcmcia_attr_sprom_store(struct device *pcmciadev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t ssb_sprom_store(struct device *pcmciadev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pcmcia_device *pdev =
container_of(pcmciadev, struct pcmcia_device, dev);
@@ -756,9 +756,7 @@ static ssize_t ssb_pcmcia_attr_sprom_store(struct device *pcmciadev,
ssb_pcmcia_sprom_write_all);
}
-static DEVICE_ATTR(ssb_sprom, 0600,
- ssb_pcmcia_attr_sprom_show,
- ssb_pcmcia_attr_sprom_store);
+static DEVICE_ATTR_ADMIN_RW(ssb_sprom);
static int ssb_pcmcia_cor_setup(struct ssb_bus *bus, u8 cor)
{
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index f49ab1aa2149..4161e5d1f276 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -325,6 +325,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
pr_err("More than %d ssb cores found (%d)\n",
SSB_MAX_NR_CORES, bus->nr_devices);
+ err = -EINVAL;
goto err_unmap;
}
if (bus->bustype == SSB_BUSTYPE_SSB) {
diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
index 7fe0afb42234..66c5c2169704 100644
--- a/drivers/ssb/sdio.c
+++ b/drivers/ssb/sdio.c
@@ -411,7 +411,6 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev))) {
error = -EIO;
- memset((void *)buffer, 0xff, count);
goto err_out;
}
offset |= bus->sdio_sbaddr & 0xffff;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 741147a4f0fe..ecc5c9da9027 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -2064,7 +2064,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
int status)
{
- struct nbu2ss_req *req;
+ struct nbu2ss_req *req, *n;
/* Endpoint Disable */
_nbu2ss_epn_exit(udc, ep);
@@ -2076,7 +2076,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
return 0;
/* called with irqs blocked */
- list_for_each_entry(req, &ep->queue, queue) {
+ list_for_each_entry_safe(req, n, &ep->queue, queue) {
_nbu2ss_ep_done(ep, req, status);
}
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index dfd71e99e872..eab534dc4bcc 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -700,7 +700,6 @@ static int ad7746_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
else
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2;
- indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
if (pdata) {
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index ca59986b20f8..e3aaae920847 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -42,4 +42,6 @@ source "drivers/staging/media/tegra-video/Kconfig"
source "drivers/staging/media/ipu3/Kconfig"
+source "drivers/staging/media/av7110/Kconfig"
+
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 716929a1a313..5b5afc5b03a0 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
obj-$(CONFIG_VIDEO_ZORAN) += zoran/
+obj-$(CONFIG_DVB_AV7110) += av7110/
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
index 51498b2e85b8..606b7754fdfd 100644
--- a/drivers/staging/media/atomisp/Makefile
+++ b/drivers/staging/media/atomisp/Makefile
@@ -16,7 +16,6 @@ atomisp-objs += \
pci/atomisp_acc.o \
pci/atomisp_cmd.o \
pci/atomisp_compat_css20.o \
- pci/atomisp_compat_ioctl32.o \
pci/atomisp_csi2.o \
pci/atomisp_drvfs.o \
pci/atomisp_file.o \
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
index 6987bb2d32cf..2d1ef9eb262a 100644
--- a/drivers/staging/media/atomisp/TODO
+++ b/drivers/staging/media/atomisp/TODO
@@ -120,6 +120,11 @@ TODO
for this driver until the other work is done, as there will be a lot
of code churn until this driver becomes functional again.
+16. Fix private ioctls to not need a compat_ioctl handler for running
+ 32-bit tasks. The compat code has been removed because of bugs,
+ and should not be needed for modern drivers. Fixing this properly
+ unfortunately means an incompatible ABI change.
+
Limitations
===========
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index d170d0adfea4..687888d643df 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -300,7 +300,7 @@ static int gc0310_get_intg_factor(struct i2c_client *client,
/* pixel clock calculattion */
dev->vt_pix_clk_freq_mhz = 14400000; // 16.8MHz
buf->vt_pix_clk_freq_mhz = dev->vt_pix_clk_freq_mhz;
- pr_info("vt_pix_clk_freq_mhz=%d\n", buf->vt_pix_clk_freq_mhz);
+ dev_dbg(&client->dev, "vt_pix_clk_freq_mhz=%d\n", buf->vt_pix_clk_freq_mhz);
/* get integration time */
buf->coarse_integration_time_min = GC0310_COARSE_INTG_TIME_MIN;
@@ -326,7 +326,7 @@ static int gc0310_get_intg_factor(struct i2c_client *client,
if (ret)
return ret;
buf->crop_horizontal_start = val | (reg_val & 0xFF);
- pr_info("crop_horizontal_start=%d\n", buf->crop_horizontal_start);
+ dev_dbg(&client->dev, "crop_horizontal_start=%d\n", buf->crop_horizontal_start);
/* Getting crop_vertical_start */
ret = gc0310_read_reg(client, GC0310_8BIT,
@@ -339,7 +339,7 @@ static int gc0310_get_intg_factor(struct i2c_client *client,
if (ret)
return ret;
buf->crop_vertical_start = val | (reg_val & 0xFF);
- pr_info("crop_vertical_start=%d\n", buf->crop_vertical_start);
+ dev_dbg(&client->dev, "crop_vertical_start=%d\n", buf->crop_vertical_start);
/* Getting output_width */
ret = gc0310_read_reg(client, GC0310_8BIT,
@@ -352,7 +352,7 @@ static int gc0310_get_intg_factor(struct i2c_client *client,
if (ret)
return ret;
buf->output_width = val | (reg_val & 0xFF);
- pr_info("output_width=%d\n", buf->output_width);
+ dev_dbg(&client->dev, "output_width=%d\n", buf->output_width);
/* Getting output_height */
ret = gc0310_read_reg(client, GC0310_8BIT,
@@ -365,12 +365,12 @@ static int gc0310_get_intg_factor(struct i2c_client *client,
if (ret)
return ret;
buf->output_height = val | (reg_val & 0xFF);
- pr_info("output_height=%d\n", buf->output_height);
+ dev_dbg(&client->dev, "output_height=%d\n", buf->output_height);
buf->crop_horizontal_end = buf->crop_horizontal_start + buf->output_width - 1;
buf->crop_vertical_end = buf->crop_vertical_start + buf->output_height - 1;
- pr_info("crop_horizontal_end=%d\n", buf->crop_horizontal_end);
- pr_info("crop_vertical_end=%d\n", buf->crop_vertical_end);
+ dev_dbg(&client->dev, "crop_horizontal_end=%d\n", buf->crop_horizontal_end);
+ dev_dbg(&client->dev, "crop_vertical_end=%d\n", buf->crop_vertical_end);
/* Getting line_length_pck */
ret = gc0310_read_reg(client, GC0310_8BIT,
@@ -389,7 +389,7 @@ static int gc0310_get_intg_factor(struct i2c_client *client,
return ret;
sh_delay = reg_val;
buf->line_length_pck = buf->output_width + hori_blanking + sh_delay + 4;
- pr_info("hori_blanking=%d sh_delay=%d line_length_pck=%d\n", hori_blanking,
+ dev_dbg(&client->dev, "hori_blanking=%d sh_delay=%d line_length_pck=%d\n", hori_blanking,
sh_delay, buf->line_length_pck);
/* Getting frame_length_lines */
@@ -404,7 +404,7 @@ static int gc0310_get_intg_factor(struct i2c_client *client,
return ret;
vert_blanking = val | (reg_val & 0xFF);
buf->frame_length_lines = buf->output_height + vert_blanking;
- pr_info("vert_blanking=%d frame_length_lines=%d\n", vert_blanking,
+ dev_dbg(&client->dev, "vert_blanking=%d frame_length_lines=%d\n", vert_blanking,
buf->frame_length_lines);
buf->binning_factor_x = res->bin_factor_x ?
@@ -434,7 +434,7 @@ static int gc0310_set_gain(struct v4l2_subdev *sd, int gain)
dgain = gain / 2;
}
- pr_info("gain=0x%x again=0x%x dgain=0x%x\n", gain, again, dgain);
+ dev_dbg(&client->dev, "gain=0x%x again=0x%x dgain=0x%x\n", gain, again, dgain);
/* set analog gain */
ret = gc0310_write_reg(client, GC0310_8BIT,
@@ -458,7 +458,7 @@ static int __gc0310_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
- pr_info("coarse_itg=%d gain=%d digitgain=%d\n", coarse_itg, gain, digitgain);
+ dev_dbg(&client->dev, "coarse_itg=%d gain=%d digitgain=%d\n", coarse_itg, gain, digitgain);
/* set exposure */
ret = gc0310_write_reg(client, GC0310_8BIT,
@@ -718,7 +718,6 @@ static int gc0310_init(struct v4l2_subdev *sd)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct gc0310_device *dev = to_gc0310_sensor(sd);
- pr_info("%s S\n", __func__);
mutex_lock(&dev->input_lock);
/* set initial registers */
@@ -730,7 +729,6 @@ static int gc0310_init(struct v4l2_subdev *sd)
mutex_unlock(&dev->input_lock);
- pr_info("%s E\n", __func__);
return ret;
}
@@ -796,7 +794,6 @@ static int power_up(struct v4l2_subdev *sd)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
- pr_info("%s S\n", __func__);
if (!dev->platform_data) {
dev_err(&client->dev,
"no camera_sensor_platform_data");
@@ -823,7 +820,6 @@ static int power_up(struct v4l2_subdev *sd)
msleep(100);
- pr_info("%s E\n", __func__);
return 0;
fail_gpio:
@@ -959,20 +955,17 @@ static int startup(struct v4l2_subdev *sd)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- pr_info("%s S\n", __func__);
-
ret = gc0310_write_reg_array(client, gc0310_res[dev->fmt_idx].regs);
if (ret) {
dev_err(&client->dev, "gc0310 write register err.\n");
return ret;
}
- pr_info("%s E\n", __func__);
return ret;
}
static int gc0310_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -982,8 +975,6 @@ static int gc0310_set_fmt(struct v4l2_subdev *sd,
int ret = 0;
int idx = 0;
- pr_info("%s S\n", __func__);
-
if (format->pad)
return -EINVAL;
@@ -1008,7 +999,7 @@ static int gc0310_set_fmt(struct v4l2_subdev *sd,
fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -1020,8 +1011,8 @@ static int gc0310_set_fmt(struct v4l2_subdev *sd,
return -EINVAL;
}
- printk("%s: before gc0310_write_reg_array %s\n", __func__,
- gc0310_res[dev->fmt_idx].desc);
+ dev_dbg(&client->dev, "%s: before gc0310_write_reg_array %s\n",
+ __func__, gc0310_res[dev->fmt_idx].desc);
ret = startup(sd);
if (ret) {
dev_err(&client->dev, "gc0310 startup err\n");
@@ -1035,14 +1026,13 @@ static int gc0310_set_fmt(struct v4l2_subdev *sd,
goto err;
}
- pr_info("%s E\n", __func__);
err:
mutex_unlock(&dev->input_lock);
return ret;
}
static int gc0310_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1068,7 +1058,6 @@ static int gc0310_detect(struct i2c_client *client)
int ret;
u16 id;
- pr_info("%s S\n", __func__);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -1085,7 +1074,7 @@ static int gc0310_detect(struct i2c_client *client)
return -ENODEV;
}
id = ((((u16)high) << 8) | (u16)low);
- pr_info("sensor ID = 0x%x\n", id);
+ dev_dbg(&client->dev, "sensor ID = 0x%x\n", id);
if (id != GC0310_ID) {
dev_err(&client->dev, "sensor ID error, read id = 0x%x, target id = 0x%x\n", id,
@@ -1095,8 +1084,6 @@ static int gc0310_detect(struct i2c_client *client)
dev_dbg(&client->dev, "detect gc0310 success\n");
- pr_info("%s E\n", __func__);
-
return 0;
}
@@ -1106,7 +1093,7 @@ static int gc0310_s_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
- pr_info("%s S enable=%d\n", __func__, enable);
+ dev_dbg(&client->dev, "%s S enable=%d\n", __func__, enable);
mutex_lock(&dev->input_lock);
if (enable) {
@@ -1142,7 +1129,6 @@ static int gc0310_s_stream(struct v4l2_subdev *sd, int enable)
}
mutex_unlock(&dev->input_lock);
- pr_info("%s E\n", __func__);
return ret;
}
@@ -1153,7 +1139,6 @@ static int gc0310_s_config(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- pr_info("%s S\n", __func__);
if (!platform_data)
return -ENODEV;
@@ -1196,7 +1181,6 @@ static int gc0310_s_config(struct v4l2_subdev *sd,
}
mutex_unlock(&dev->input_lock);
- pr_info("%s E\n", __func__);
return 0;
fail_csi_cfg:
@@ -1221,7 +1205,7 @@ static int gc0310_g_frame_interval(struct v4l2_subdev *sd,
}
static int gc0310_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -1232,7 +1216,7 @@ static int gc0310_enum_mbus_code(struct v4l2_subdev *sd,
}
static int gc0310_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
@@ -1365,7 +1349,6 @@ static int gc0310_probe(struct i2c_client *client)
if (ret)
gc0310_remove(client);
- pr_info("%s E\n", __func__);
return ret;
out_free:
v4l2_device_unregister_subdev(&dev->sd);
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index 78147ffb6099..9363c1a52ae9 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -171,8 +171,8 @@ static int __gc2235_buf_reg_array(struct i2c_client *client,
}
static int __gc2235_write_reg_is_consecutive(struct i2c_client *client,
- struct gc2235_write_ctrl *ctrl,
- const struct gc2235_reg *next)
+ struct gc2235_write_ctrl *ctrl,
+ const struct gc2235_reg *next)
{
if (ctrl->index == 0)
return 1;
@@ -228,7 +228,7 @@ static int gc2235_g_focal(struct v4l2_subdev *sd, s32 *val)
static int gc2235_g_fnumber(struct v4l2_subdev *sd, s32 *val)
{
- /*const f number for imx*/
+ /* const f number for imx */
*val = (GC2235_F_NUMBER_DEFAULT_NUM << 16) | GC2235_F_NUMBER_DEM;
return 0;
}
@@ -427,7 +427,8 @@ static long gc2235_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
return 0;
}
-/* This returns the exposure time being used. This should only be used
+/*
+ * This returns the exposure time being used. This should only be used
* for filling in EXIF data, not for actual image processing.
*/
static int gc2235_q_exposure(struct v4l2_subdev *sd, s32 *value)
@@ -658,9 +659,9 @@ static int gc2235_s_power(struct v4l2_subdev *sd, int on)
{
int ret;
- if (on == 0)
+ if (on == 0) {
ret = power_down(sd);
- else {
+ } else {
ret = power_up(sd);
if (!ret)
ret = __gc2235_init(sd);
@@ -746,11 +747,12 @@ static int startup(struct v4l2_subdev *sd)
int ret = 0;
if (is_init == 0) {
- /* force gc2235 to do a reset in res change, otherwise it
- * can not output normal after switching res. and it is not
- * necessary for first time run up after power on, for the sack
- * of performance
- */
+ /*
+ * force gc2235 to do a reset in res change, otherwise it
+ * can not output normal after switching res. and it is not
+ * necessary for first time run up after power on, for the sack
+ * of performance
+ */
power_down(sd);
power_up(sd);
gc2235_write_reg_array(client, gc2235_init_settings);
@@ -767,7 +769,7 @@ static int startup(struct v4l2_subdev *sd)
}
static int gc2235_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -796,7 +798,7 @@ static int gc2235_set_fmt(struct v4l2_subdev *sd,
}
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -825,7 +827,7 @@ err:
}
static int gc2235_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -904,7 +906,8 @@ static int gc2235_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- /* power off the module, then power on it in future
+ /*
+ * power off the module, then power on it in future
* as first power on by board may not fulfill the
* power on sequqence needed by the module
*/
@@ -963,7 +966,7 @@ static int gc2235_g_frame_interval(struct v4l2_subdev *sd,
}
static int gc2235_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -974,7 +977,7 @@ static int gc2235_enum_mbus_code(struct v4l2_subdev *sd,
}
static int gc2235_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
index b93c80471f22..7a20d918a9d5 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
@@ -50,14 +50,16 @@ struct tbd_data_record_header {
static int set_msr_configuration(struct i2c_client *client, uint8_t *bufptr,
unsigned int size)
{
- /* The configuration data contains any number of sequences where
+ /*
+ * The configuration data contains any number of sequences where
* the first byte (that is, uint8_t) that marks the number of bytes
* in the sequence to follow, is indeed followed by the indicated
* number of bytes of actual data to be written to sensor.
* By convention, the first two bytes of actual data should be
* understood as an address in the sensor address space (hibyte
* followed by lobyte) where the remaining data in the sequence
- * will be written. */
+ * will be written.
+ */
u8 *ptr = bufptr;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index f5de81132177..11196180a206 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -475,10 +475,12 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Note: current modules wire only one GPIO signal (RESET#),
+ /*
+ * Note: current modules wire only one GPIO signal (RESET#),
* but the schematic wires up two to the connector. BIOS
* versions have been unfortunately inconsistent with which
- * ACPI index RESET# is on, so hit both */
+ * ACPI index RESET# is on, so hit both
+ */
if (flag) {
ret = dev->platform_data->gpio0_ctrl(sd, 0);
@@ -560,7 +562,7 @@ static int power_down(struct v4l2_subdev *sd)
if (ret)
dev_err(&client->dev, "vprog failed.\n");
- /*according to DS, 20ms is needed after power down*/
+ /* according to DS, 20ms is needed after power down */
msleep(20);
return ret;
@@ -568,9 +570,9 @@ static int power_down(struct v4l2_subdev *sd)
static int mt9m114_s_power(struct v4l2_subdev *sd, int power)
{
- if (power == 0)
+ if (power == 0) {
return power_down(sd);
- else {
+ } else {
if (power_up(sd))
return -EINVAL;
@@ -801,7 +803,7 @@ static int mt9m114_get_intg_factor(struct i2c_client *client,
}
static int mt9m114_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -822,7 +824,7 @@ static int mt9m114_get_fmt(struct v4l2_subdev *sd,
}
static int mt9m114_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -846,7 +848,7 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
mt9m114_try_res(&width, &height);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
res_index = mt9m114_to_res(width, height);
@@ -947,7 +949,7 @@ static int mt9m114_g_focal(struct v4l2_subdev *sd, s32 *val)
static int mt9m114_g_fnumber(struct v4l2_subdev *sd, s32 *val)
{
- /*const f number for mt9m114*/
+ /* const f number for mt9m114 */
*val = (MT9M114_F_NUMBER_DEFAULT_NUM << 16) | MT9M114_F_NUMBER_DEM;
return 0;
}
@@ -998,44 +1000,48 @@ static long mt9m114_s_exposure(struct v4l2_subdev *sd,
struct mt9m114_device *dev = to_mt9m114_sensor(sd);
int ret = 0;
unsigned int coarse_integration = 0;
- unsigned int FLines = 0;
- unsigned int FrameLengthLines = 0; /* ExposureTime.FrameLengthLines; */
- unsigned int AnalogGain, DigitalGain;
- u32 AnalogGainToWrite = 0;
+ unsigned int f_lines = 0;
+ unsigned int frame_len_lines = 0; /* ExposureTime.FrameLengthLines; */
+ unsigned int analog_gain, digital_gain;
+ u32 analog_gain_to_write = 0;
dev_dbg(&client->dev, "%s(0x%X 0x%X 0x%X)\n", __func__,
exposure->integration_time[0], exposure->gain[0],
exposure->gain[1]);
coarse_integration = exposure->integration_time[0];
- /* fine_integration = ExposureTime.FineIntegrationTime; */
- /* FrameLengthLines = ExposureTime.FrameLengthLines; */
- FLines = mt9m114_res[dev->res].lines_per_frame;
- AnalogGain = exposure->gain[0];
- DigitalGain = exposure->gain[1];
+ /*
+ * fine_integration = ExposureTime.FineIntegrationTime;
+ * frame_len_lines = ExposureTime.FrameLengthLines;
+ */
+ f_lines = mt9m114_res[dev->res].lines_per_frame;
+ analog_gain = exposure->gain[0];
+ digital_gain = exposure->gain[1];
if (!dev->streamon) {
/*Save the first exposure values while stream is off*/
dev->first_exp = coarse_integration;
- dev->first_gain = AnalogGain;
- dev->first_diggain = DigitalGain;
+ dev->first_gain = analog_gain;
+ dev->first_diggain = digital_gain;
}
- /* DigitalGain = 0x400 * (((u16) DigitalGain) >> 8) +
- ((unsigned int)(0x400 * (((u16) DigitalGain) & 0xFF)) >>8); */
+ /* digital_gain = 0x400 * (((u16) digital_gain) >> 8) + */
+ /* ((unsigned int)(0x400 * (((u16) digital_gain) & 0xFF)) >>8); */
/* set frame length */
- if (FLines < coarse_integration + 6)
- FLines = coarse_integration + 6;
- if (FLines < FrameLengthLines)
- FLines = FrameLengthLines;
- ret = mt9m114_write_reg(client, MISENSOR_16BIT, 0x300A, FLines);
+ if (f_lines < coarse_integration + 6)
+ f_lines = coarse_integration + 6;
+ if (f_lines < frame_len_lines)
+ f_lines = frame_len_lines;
+ ret = mt9m114_write_reg(client, MISENSOR_16BIT, 0x300A, f_lines);
if (ret) {
- v4l2_err(client, "%s: fail to set FLines\n", __func__);
+ v4l2_err(client, "%s: fail to set f_lines\n", __func__);
return -EINVAL;
}
/* set coarse integration */
- /* 3A provide real exposure time.
- should not translate to any value here. */
+ /*
+ * 3A provide real exposure time.
+ * should not translate to any value here.
+ */
ret = mt9m114_write_reg(client, MISENSOR_16BIT,
REG_EXPO_COARSE, (u16)(coarse_integration));
if (ret) {
@@ -1044,38 +1050,40 @@ static long mt9m114_s_exposure(struct v4l2_subdev *sd,
}
/*
- // set analog/digital gain
- switch(AnalogGain)
+ * set analog/digital gain
+ switch(analog_gain)
{
case 0:
- AnalogGainToWrite = 0x0;
+ analog_gain_to_write = 0x0;
break;
case 1:
- AnalogGainToWrite = 0x20;
+ analog_gain_to_write = 0x20;
break;
case 2:
- AnalogGainToWrite = 0x60;
+ analog_gain_to_write = 0x60;
break;
case 4:
- AnalogGainToWrite = 0xA0;
+ analog_gain_to_write = 0xA0;
break;
case 8:
- AnalogGainToWrite = 0xE0;
+ analog_gain_to_write = 0xE0;
break;
default:
- AnalogGainToWrite = 0x20;
+ analog_gain_to_write = 0x20;
break;
}
*/
- if (DigitalGain >= 16 || DigitalGain <= 1)
- DigitalGain = 1;
- /* AnalogGainToWrite =
- (u16)((DigitalGain << 12) | AnalogGainToWrite); */
- AnalogGainToWrite = (u16)((DigitalGain << 12) | (u16)AnalogGain);
+ if (digital_gain >= 16 || digital_gain <= 1)
+ digital_gain = 1;
+ /*
+ * analog_gain_to_write = (u16)((digital_gain << 12)
+ * | analog_gain_to_write);
+ */
+ analog_gain_to_write = (u16)((digital_gain << 12) | (u16)analog_gain);
ret = mt9m114_write_reg(client, MISENSOR_16BIT,
- REG_GAIN, AnalogGainToWrite);
+ REG_GAIN, analog_gain_to_write);
if (ret) {
- v4l2_err(client, "%s: fail to set AnalogGainToWrite\n",
+ v4l2_err(client, "%s: fail to set analog_gain_to_write\n",
__func__);
return -EINVAL;
}
@@ -1095,8 +1103,10 @@ static long mt9m114_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
return 0;
}
-/* This returns the exposure time being used. This should only be used
- for filling in EXIF data, not for actual image processing. */
+/*
+ * This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
static int mt9m114_g_exposure(struct v4l2_subdev *sd, s32 *value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1158,7 +1168,7 @@ static int mt9m114_s_exposure_metering(struct v4l2_subdev *sd, s32 val)
* This function is for touch exposure feature.
*/
static int mt9m114_s_exposure_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1247,7 +1257,8 @@ static int mt9m114_s_ev(struct v4l2_subdev *sd, s32 val)
s32 luma = 0x37;
int err;
- /* EV value only support -2 to 2
+ /*
+ * EV value only support -2 to 2
* 0: 0x37, 1:0x47, 2:0x57, -1:0x27, -2:0x17
*/
if (val < -2 || val > 2)
@@ -1295,9 +1306,10 @@ static int mt9m114_g_ev(struct v4l2_subdev *sd, s32 *val)
return 0;
}
-/* Fake interface
+/*
+ * Fake interface
* mt9m114 now can not support 3a_lock
-*/
+ */
static int mt9m114_s_3a_lock(struct v4l2_subdev *sd, s32 val)
{
aaalock = val;
@@ -1719,7 +1731,7 @@ static int mt9m114_s_stream(struct v4l2_subdev *sd, int enable)
}
static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index)
@@ -1730,7 +1742,7 @@ static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd,
}
static int mt9m114_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
unsigned int index = fse->index;
@@ -1843,7 +1855,7 @@ static int mt9m114_probe(struct i2c_client *client)
return ret;
}
- /*TODO add format code here*/
+ /* TODO add format code here */
dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
dev->pad.flags = MEDIA_PAD_FL_SOURCE;
dev->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index c90730513438..2111e4a478c1 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -127,7 +127,7 @@ static int ov2680_g_focal(struct v4l2_subdev *sd, s32 *val)
static int ov2680_g_fnumber(struct v4l2_subdev *sd, s32 *val)
{
- /*const f number for ov2680*/
+ /* const f number for ov2680 */
*val = (OV2680_F_NUMBER_DEFAULT_NUM << 16) | OV2680_F_NUMBER_DEM;
return 0;
@@ -399,7 +399,8 @@ static long ov2680_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
return 0;
}
-/* This returns the exposure time being used. This should only be used
+/*
+ * This returns the exposure time being used. This should only be used
* for filling in EXIF data, not for actual image processing.
*/
static int ov2680_q_exposure(struct v4l2_subdev *sd, s32 *value)
@@ -461,11 +462,11 @@ static int ov2680_v_flip(struct v4l2_subdev *sd, s32 value)
ret = ov2680_read_reg(client, 1, OV2680_FLIP_REG, &val);
if (ret)
return ret;
- if (value) {
+ if (value)
val |= OV2680_FLIP_MIRROR_BIT_ENABLE;
- } else {
+ else
val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE;
- }
+
ret = ov2680_write_reg(client, 1,
OV2680_FLIP_REG, val);
if (ret)
@@ -727,11 +728,13 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* The OV2680 documents only one GPIO input (#XSHUTDN), but
+ /*
+ * The OV2680 documents only one GPIO input (#XSHUTDN), but
* existing integrations often wire two (reset/power_down)
* because that is the way other sensors work. There is no
* way to tell how it is wired internally, so existing
- * firmwares expose both and we drive them symmetrically. */
+ * firmwares expose both and we drive them symmetrically.
+ */
if (flag) {
ret = dev->platform_data->gpio0_ctrl(sd, 1);
usleep_range(10000, 15000);
@@ -911,7 +914,7 @@ static int get_resolution_index(int w, int h)
}
static int ov2680_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -948,7 +951,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
}
fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -977,7 +980,8 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
goto err;
}
- /*recall flip functions to avoid flip registers
+ /*
+ * recall flip functions to avoid flip registers
* were overridden by default setting
*/
if (h_flag)
@@ -987,7 +991,8 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
v4l2_info(client, "\n%s idx %d\n", __func__, dev->fmt_idx);
- /*ret = startup(sd);
+ /*
+ * ret = startup(sd);
* if (ret)
* dev_err(&client->dev, "ov2680 startup err\n");
*/
@@ -997,7 +1002,7 @@ err:
}
static int ov2680_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1096,7 +1101,8 @@ static int ov2680_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- /* power off the module, then power on it in future
+ /*
+ * power off the module, then power on it in future
* as first power on by board may not fulfill the
* power on sequqence needed by the module
*/
@@ -1155,7 +1161,7 @@ static int ov2680_g_frame_interval(struct v4l2_subdev *sd,
}
static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -1166,7 +1172,7 @@ static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index 1209492c1826..90d0871a78a3 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -49,8 +49,8 @@ static int ov2722_read_reg(struct i2c_client *client,
return -ENODEV;
}
- if (data_length != OV2722_8BIT && data_length != OV2722_16BIT
- && data_length != OV2722_32BIT) {
+ if (data_length != OV2722_8BIT && data_length != OV2722_16BIT &&
+ data_length != OV2722_32BIT) {
dev_err(&client->dev, "%s error, invalid data length\n",
__func__);
return -EINVAL;
@@ -212,8 +212,8 @@ static int __ov2722_buf_reg_array(struct i2c_client *client,
}
static int __ov2722_write_reg_is_consecutive(struct i2c_client *client,
- struct ov2722_write_ctrl *ctrl,
- const struct ov2722_reg *next)
+ struct ov2722_write_ctrl *ctrl,
+ const struct ov2722_reg *next)
{
if (ctrl->index == 0)
return 1;
@@ -774,11 +774,11 @@ static int ov2722_s_power(struct v4l2_subdev *sd, int on)
if (on == 0)
return power_down(sd);
- else {
- ret = power_up(sd);
- if (!ret)
- return ov2722_init(sd);
- }
+
+ ret = power_up(sd);
+ if (!ret)
+ return ov2722_init(sd);
+
return ret;
}
@@ -876,7 +876,7 @@ static int startup(struct v4l2_subdev *sd)
}
static int ov2722_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -906,7 +906,7 @@ static int ov2722_set_fmt(struct v4l2_subdev *sd,
}
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -961,7 +961,7 @@ err:
}
static int ov2722_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1104,7 +1104,7 @@ static int ov2722_g_frame_interval(struct v4l2_subdev *sd,
}
static int ov2722_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -1115,7 +1115,7 @@ static int ov2722_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2722_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.h b/drivers/staging/media/atomisp/i2c/mt9m114.h
index 787bbf59e895..bcce18b65fa6 100644
--- a/drivers/staging/media/atomisp/i2c/mt9m114.h
+++ b/drivers/staging/media/atomisp/i2c/mt9m114.h
@@ -764,8 +764,10 @@ static struct misensor_reg const mt9m114_common[] = {
{MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
{MISENSOR_16BIT, 0xC868, 0x0280}, /* cam_output_width = 952 */
{MISENSOR_16BIT, 0xC86A, 0x01E0}, /* cam_output_height = 538 */
- /* LOAD = Step3-Recommended
- * Patch,Errata and Sensor optimization Setting */
+ /*
+ * LOAD = Step3-Recommended
+ * Patch, Errata and Sensor optimization Setting
+ */
{MISENSOR_16BIT, 0x316A, 0x8270}, /* DAC_TXLO_ROW */
{MISENSOR_16BIT, 0x316C, 0x8270}, /* DAC_TXLO */
{MISENSOR_16BIT, 0x3ED0, 0x2305}, /* DAC_LD_4_5 */
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
index 49920245e064..4d43b45915e5 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -459,8 +459,8 @@ static struct ov2680_reg const ov2680_656x496_30fps[] = {
};
/*
-* 800x600 30fps VBlanking 1lane 10Bit (binning)
-*/
+ * 800x600 30fps VBlanking 1lane 10Bit (binning)
+ */
static struct ov2680_reg const ov2680_720x592_30fps[] = {
{0x3086, 0x01},
{0x3501, 0x26},
@@ -504,8 +504,8 @@ static struct ov2680_reg const ov2680_720x592_30fps[] = {
};
/*
-* 800x600 30fps VBlanking 1lane 10Bit (binning)
-*/
+ * 800x600 30fps VBlanking 1lane 10Bit (binning)
+ */
static struct ov2680_reg const ov2680_800x600_30fps[] = {
{0x3086, 0x01},
{0x3501, 0x26},
@@ -634,7 +634,7 @@ static struct ov2680_reg const ov2680_1296x976_30fps[] = {
/*
* 1456*1096 30fps VBlanking 1lane 10bit(no-scaling)
-*/
+ */
static struct ov2680_reg const ov2680_1456x1096_30fps[] = {
{0x3086, 0x00},
{0x3501, 0x48},
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index e698b63d6cb7..0828ca9ab6f2 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -1577,7 +1577,7 @@ static int startup(struct v4l2_subdev *sd)
}
static int ov5693_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1608,7 +1608,7 @@ static int ov5693_set_fmt(struct v4l2_subdev *sd,
fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -1676,7 +1676,7 @@ err:
}
static int ov5693_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1825,7 +1825,7 @@ static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
}
static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -1836,7 +1836,7 @@ static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c
index f638d0bd09fe..9a1751895ab0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_acc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c
@@ -77,8 +77,8 @@ acc_get_fw(struct atomisp_sub_device *asd, unsigned int handle)
struct atomisp_acc_fw *acc_fw;
list_for_each_entry(acc_fw, &asd->acc.fw, list)
- if (acc_fw->handle == handle)
- return acc_fw;
+ if (acc_fw->handle == handle)
+ return acc_fw;
return NULL;
}
@@ -464,9 +464,11 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
continue;
for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
- /* QoS (ACC pipe) acceleration stages are currently
- * allowed only in continuous mode. Skip them for
- * all other modes. */
+ /*
+ * QoS (ACC pipe) acceleration stages are
+ * currently allowed only in continuous mode.
+ * Skip them for all other modes.
+ */
if (!continuous &&
acc_flag_to_pipe[i].flag ==
ATOMISP_ACC_FW_LOAD_FL_ACC)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 14abc1ca00e8..366161cff560 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -1138,9 +1138,10 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
asd->frame_status[vb->i] =
ATOMISP_FRAME_STATUS_OK;
}
- } else
+ } else {
asd->frame_status[vb->i] =
ATOMISP_FRAME_STATUS_OK;
+ }
} else {
asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK;
}
@@ -4841,6 +4842,9 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -4876,7 +4880,7 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
snr_mbus_fmt->width, snr_mbus_fmt->height);
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, set_fmt, &pad_cfg, &format);
+ pad, set_fmt, &pad_state, &format);
if (ret)
return ret;
@@ -4941,9 +4945,9 @@ atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f)
depth = get_pixel_depth(pixelformat);
- if (field == V4L2_FIELD_ANY)
+ if (field == V4L2_FIELD_ANY) {
field = V4L2_FIELD_NONE;
- else if (field != V4L2_FIELD_NONE) {
+ } else if (field != V4L2_FIELD_NONE) {
dev_err(isp->dev, "Wrong output field\n");
return -EINVAL;
}
@@ -5251,11 +5255,11 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
atomisp_output_fmts[] in atomisp_v4l2.c */
vf_ffmt.code = V4L2_MBUS_FMT_CUSTOM_YUV420;
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SOURCE_VF,
V4L2_SEL_TGT_COMPOSE, 0, &vf_size);
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SOURCE_VF, &vf_ffmt);
asd->video_out_vf.sh_fmt = IA_CSS_FRAME_FORMAT_NV12;
@@ -5492,6 +5496,9 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
const struct atomisp_format_bridge *format;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format vformat = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -5530,7 +5537,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
vformat.which = V4L2_SUBDEV_FORMAT_TRY;
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, set_fmt, &pad_cfg, &vformat);
+ pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
if (ffmt->width < req_ffmt->width ||
@@ -5568,7 +5575,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
asd->params.video_dis_en = false;
}
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, ffmt);
@@ -5647,7 +5654,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
}
atomisp_subdev_set_selection(
- &asd->subdev, fh.pad,
+ &asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE, source_pad,
V4L2_SEL_TGT_COMPOSE, 0, &r);
@@ -5777,7 +5784,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
ATOMISP_SUBDEV_PAD_SINK);
isp_source_fmt.code = format_bridge->mbus_code;
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
source_pad, &isp_source_fmt);
@@ -5896,13 +5903,13 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
isp_sink_crop.height = f->fmt.pix.height;
}
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK,
V4L2_SEL_TGT_CROP,
V4L2_SEL_FLAG_KEEP_CONFIG,
&isp_sink_crop);
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
source_pad, V4L2_SEL_TGT_COMPOSE,
0, &isp_sink_crop);
@@ -5921,7 +5928,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
f->fmt.pix.height);
}
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
source_pad,
V4L2_SEL_TGT_COMPOSE, 0,
@@ -5955,14 +5962,14 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
f->fmt.pix.width,
ATOM_ISP_STEP_HEIGHT);
}
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK,
V4L2_SEL_TGT_CROP,
V4L2_SEL_FLAG_KEEP_CONFIG,
&sink_crop);
}
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
source_pad,
V4L2_SEL_TGT_COMPOSE, 0,
@@ -6053,7 +6060,8 @@ int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f)
ffmt.height = f->fmt.pix.height;
ffmt.code = format_bridge->mbus_code;
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, V4L2_SUBDEV_FORMAT_ACTIVE,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, &ffmt);
return 0;
@@ -6564,17 +6572,17 @@ static int atomisp_get_pipe_id(struct atomisp_video_pipe *pipe)
{
struct atomisp_sub_device *asd = pipe->asd;
- if (ATOMISP_USE_YUVPP(asd))
+ if (ATOMISP_USE_YUVPP(asd)) {
return IA_CSS_PIPE_ID_YUVPP;
- else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
return IA_CSS_PIPE_ID_VIDEO;
- else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT)
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
return IA_CSS_PIPE_ID_CAPTURE;
- else if (pipe == &asd->video_out_video_capture)
+ } else if (pipe == &asd->video_out_video_capture) {
return IA_CSS_PIPE_ID_VIDEO;
- else if (pipe == &asd->video_out_vf)
+ } else if (pipe == &asd->video_out_vf) {
return IA_CSS_PIPE_ID_CAPTURE;
- else if (pipe == &asd->video_out_preview) {
+ } else if (pipe == &asd->video_out_preview) {
if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
return IA_CSS_PIPE_ID_VIDEO;
else
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
index 412baeb91944..e8bdd264d31b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
@@ -49,9 +49,7 @@ struct ia_css_frame;
/* FIXME: check if can go */
extern int atomisp_punit_hpll_freq;
-/*
- * Helper function
- */
+/* Helper function */
void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
unsigned int size);
struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd);
@@ -65,9 +63,7 @@ bool atomisp_buffers_queued(struct atomisp_sub_device *asd);
/* ISP2401 */
bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe);
-/*
- * Interrupt functions
- */
+/* Interrupt functions */
void atomisp_msi_irq_init(struct atomisp_device *isp);
void atomisp_msi_irq_uninit(struct atomisp_device *isp);
void atomisp_wdt_work(struct work_struct *work);
@@ -82,15 +78,10 @@ int atomisp_get_frame_pgnr(struct atomisp_device *isp,
const struct ia_css_frame *frame, u32 *p_pgnr);
void atomisp_delayed_init_work(struct work_struct *work);
-/*
- * Get internal fmt according to V4L2 fmt
- */
-
+/* Get internal fmt according to V4L2 fmt */
bool atomisp_is_viewfinder_support(struct atomisp_device *isp);
-/*
- * ISP features control function
- */
+/* ISP features control function */
/*
* Function to set sensor runmode by user when
@@ -105,9 +96,7 @@ int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag,
__s32 *value);
-/*
- * Function to enable/disable low light mode (including ANR)
- */
+/* Function to enable/disable low light mode (including ANR) */
int atomisp_low_light(struct atomisp_sub_device *asd, int flag,
__s32 *value);
@@ -120,91 +109,63 @@ int atomisp_xnr(struct atomisp_sub_device *asd, int flag, int *arg);
int atomisp_formats(struct atomisp_sub_device *asd, int flag,
struct atomisp_formats_config *config);
-/*
- * Function to configure noise reduction
- */
+/* Function to configure noise reduction */
int atomisp_nr(struct atomisp_sub_device *asd, int flag,
struct atomisp_nr_config *config);
-/*
- * Function to configure temporal noise reduction (TNR)
- */
+/* Function to configure temporal noise reduction (TNR) */
int atomisp_tnr(struct atomisp_sub_device *asd, int flag,
struct atomisp_tnr_config *config);
-/*
- * Function to configure black level compensation
- */
+/* Function to configure black level compensation */
int atomisp_black_level(struct atomisp_sub_device *asd, int flag,
struct atomisp_ob_config *config);
-/*
- * Function to configure edge enhancement
- */
+/* Function to configure edge enhancement */
int atomisp_ee(struct atomisp_sub_device *asd, int flag,
struct atomisp_ee_config *config);
-/*
- * Function to update Gamma table for gamma, brightness and contrast config
- */
+/* Function to update Gamma table for gamma, brightness and contrast config */
int atomisp_gamma(struct atomisp_sub_device *asd, int flag,
struct atomisp_gamma_table *config);
-/*
- * Function to update Ctc table for Chroma Enhancement
- */
+
+/* Function to update Ctc table for Chroma Enhancement */
int atomisp_ctc(struct atomisp_sub_device *asd, int flag,
struct atomisp_ctc_table *config);
-/*
- * Function to update gamma correction parameters
- */
+/* Function to update gamma correction parameters */
int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag,
struct atomisp_gc_config *config);
-/*
- * Function to update Gdc table for gdc
- */
+/* Function to update Gdc table for gdc */
int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag,
struct atomisp_morph_table *config);
-/*
- * Function to update table for macc
- */
+/* Function to update table for macc */
int atomisp_macc_table(struct atomisp_sub_device *asd, int flag,
struct atomisp_macc_config *config);
-/*
- * Function to get DIS statistics.
- */
+
+/* Function to get DIS statistics. */
int atomisp_get_dis_stat(struct atomisp_sub_device *asd,
struct atomisp_dis_statistics *stats);
-/*
- * Function to get DVS2 BQ resolution settings
- */
+/* Function to get DVS2 BQ resolution settings */
int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd,
struct atomisp_dvs2_bq_resolutions *bq_res);
-/*
- * Function to set the DIS coefficients.
- */
+/* Function to set the DIS coefficients. */
int atomisp_set_dis_coefs(struct atomisp_sub_device *asd,
struct atomisp_dis_coefficients *coefs);
-/*
- * Function to set the DIS motion vector.
- */
+/* Function to set the DIS motion vector. */
int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
struct atomisp_dis_vector *vector);
-/*
- * Function to set/get 3A stat from isp
- */
+/* Function to set/get 3A stat from isp */
int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
struct atomisp_3a_statistics *config);
-/*
- * Function to get metadata from isp
- */
+/* Function to get metadata from isp */
int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
struct atomisp_metadata *config);
@@ -213,84 +174,59 @@ int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
int atomisp_set_parameters(struct video_device *vdev,
struct atomisp_parameters *arg);
-/*
- * Function to set/get isp parameters to isp
- */
+
+/* Function to set/get isp parameters to isp */
int atomisp_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_parm *config);
-/*
- * Function to configure color effect of the image
- */
+/* Function to configure color effect of the image */
int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
__s32 *effect);
-/*
- * Function to configure bad pixel correction
- */
+/* Function to configure bad pixel correction */
int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag,
__s32 *value);
-/*
- * Function to configure bad pixel correction params
- */
+/* Function to configure bad pixel correction params */
int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_dp_config *config);
-/*
- * Function to enable/disable video image stablization
- */
+/* Function to enable/disable video image stablization */
int atomisp_video_stable(struct atomisp_sub_device *asd, int flag,
__s32 *value);
-/*
- * Function to configure fixed pattern noise
- */
+/* Function to configure fixed pattern noise */
int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag,
__s32 *value);
-/*
- * Function to configure fixed pattern noise table
- */
+/* Function to configure fixed pattern noise table */
int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd,
struct v4l2_framebuffer *config);
-/*
- * Function to configure false color correction
- */
+/* Function to configure false color correction */
int atomisp_false_color(struct atomisp_sub_device *asd, int flag,
__s32 *value);
-/*
- * Function to configure false color correction params
- */
+/* Function to configure false color correction params */
int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_de_config *config);
-/*
- * Function to configure white balance params
- */
+/* Function to configure white balance params */
int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_wb_config *config);
int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_3a_config *config);
-/*
- * Function to setup digital zoom
- */
+/* Function to setup digital zoom */
int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag,
__s32 *value);
-/*
- * Function set camera_prefiles.xml current sensor pixel array size
- */
+/* Function set camera_prefiles.xml current sensor pixel array size */
int atomisp_set_array_res(struct atomisp_sub_device *asd,
struct atomisp_resolution *config);
-/*
- * Function to calculate real zoom region for every pipe
- */
+/* Function to calculate real zoom region for every pipe */
int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd,
struct ia_css_dz_config *dz_config,
enum ia_css_pipe_id css_pipe_id);
@@ -371,9 +307,7 @@ void atomisp_css_flush(struct atomisp_device *isp);
int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd,
uint16_t source_pad);
-/*
- * Events. Only one event has to be exported for now.
- */
+/* Events. Only one event has to be exported for now. */
void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id);
enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
@@ -389,34 +323,25 @@ void atomisp_free_css_parameters(struct atomisp_css_params *css_param);
void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe);
void atomisp_flush_params_queue(struct atomisp_video_pipe *asd);
-/*
- * Function to do Raw Buffer related operation, after enable Lock Unlock Raw Buffer
- */
+
+/* Function to do Raw Buffer related operation, after enable Lock Unlock Raw Buffer */
int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id);
int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id);
-/*
- * Function to update Raw Buffer bitmap
- */
+/* Function to update Raw Buffer bitmap */
int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id);
void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd);
-/*
- * Function to enable/disable zoom for capture pipe
- */
+/* Function to enable/disable zoom for capture pipe */
int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
unsigned int *enable);
-/*
- * Function to get metadata type bu pipe id
- */
+/* Function to get metadata type bu pipe id */
enum atomisp_metadata_type
atomisp_get_metadata_type(struct atomisp_sub_device *asd,
enum ia_css_pipe_id pipe_id);
-/*
- * Function for HAL to inject a fake event to wake up poll thread
- */
+/* Function for HAL to inject a fake event to wake up poll thread */
int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event);
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index ce3165291eec..f60198bb8a1a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -2782,9 +2782,9 @@ int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
int stream_index;
struct atomisp_device *isp = asd->isp;
- if (ATOMISP_SOC_CAMERA(asd))
+ if (ATOMISP_SOC_CAMERA(asd)) {
stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
- else {
+ } else {
stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ?
ATOMISP_INPUT_STREAM_VIDEO :
atomisp_source_pad_to_stream_id(asd, source_pad);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c
deleted file mode 100644
index e5553df5bad4..000000000000
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c
+++ /dev/null
@@ -1,1202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-#ifdef CONFIG_COMPAT
-#include <linux/compat.h>
-
-#include <linux/videodev2.h>
-
-#include "atomisp_internal.h"
-#include "atomisp_compat.h"
-#include "atomisp_ioctl.h"
-#include "atomisp_compat_ioctl32.h"
-
-/* Macros borrowed from v4l2-compat-ioctl32.c */
-
-#define get_user_cast(__x, __ptr) \
-({ \
- get_user(__x, (typeof(*__ptr) __user *)(__ptr)); \
-})
-
-#define put_user_force(__x, __ptr) \
-({ \
- put_user((typeof(*__x) __force *)(__x), __ptr); \
-})
-
-/* Use the same argument order as copy_in_user */
-#define assign_in_user(to, from) \
-({ \
- typeof(*from) __assign_tmp; \
- \
- get_user_cast(__assign_tmp, from) || put_user(__assign_tmp, to);\
-})
-
-static int get_atomisp_histogram32(struct atomisp_histogram __user *kp,
- struct atomisp_histogram32 __user *up)
-{
- compat_uptr_t tmp;
-
- if (!access_ok(up, sizeof(struct atomisp_histogram32)) ||
- assign_in_user(&kp->num_elements, &up->num_elements) ||
- get_user(tmp, &up->data) ||
- put_user(compat_ptr(tmp), &kp->data))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_atomisp_histogram32(struct atomisp_histogram __user *kp,
- struct atomisp_histogram32 __user *up)
-{
- void __user *tmp;
-
- if (!access_ok(up, sizeof(struct atomisp_histogram32)) ||
- assign_in_user(&up->num_elements, &kp->num_elements) ||
- get_user(tmp, &kp->data) ||
- put_user(ptr_to_compat(tmp), &up->data))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
- struct v4l2_framebuffer32 __user *up)
-{
- compat_uptr_t tmp;
-
- if (!access_ok(up, sizeof(struct v4l2_framebuffer32)) ||
- get_user(tmp, &up->base) ||
- put_user_force(compat_ptr(tmp), &kp->base) ||
- assign_in_user(&kp->capability, &up->capability) ||
- assign_in_user(&kp->flags, &up->flags) ||
- copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt)))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_dis_statistics32(struct atomisp_dis_statistics __user *kp,
- struct atomisp_dis_statistics32 __user *up)
-{
- compat_uptr_t hor_prod_odd_real;
- compat_uptr_t hor_prod_odd_imag;
- compat_uptr_t hor_prod_even_real;
- compat_uptr_t hor_prod_even_imag;
- compat_uptr_t ver_prod_odd_real;
- compat_uptr_t ver_prod_odd_imag;
- compat_uptr_t ver_prod_even_real;
- compat_uptr_t ver_prod_even_imag;
-
- if (!access_ok(up, sizeof(struct atomisp_dis_statistics32)) ||
- copy_in_user(kp, up, sizeof(struct atomisp_dvs_grid_info)) ||
- get_user(hor_prod_odd_real,
- &up->dvs2_stat.hor_prod.odd_real) ||
- get_user(hor_prod_odd_imag,
- &up->dvs2_stat.hor_prod.odd_imag) ||
- get_user(hor_prod_even_real,
- &up->dvs2_stat.hor_prod.even_real) ||
- get_user(hor_prod_even_imag,
- &up->dvs2_stat.hor_prod.even_imag) ||
- get_user(ver_prod_odd_real,
- &up->dvs2_stat.ver_prod.odd_real) ||
- get_user(ver_prod_odd_imag,
- &up->dvs2_stat.ver_prod.odd_imag) ||
- get_user(ver_prod_even_real,
- &up->dvs2_stat.ver_prod.even_real) ||
- get_user(ver_prod_even_imag,
- &up->dvs2_stat.ver_prod.even_imag) ||
- assign_in_user(&kp->exp_id, &up->exp_id) ||
- put_user(compat_ptr(hor_prod_odd_real),
- &kp->dvs2_stat.hor_prod.odd_real) ||
- put_user(compat_ptr(hor_prod_odd_imag),
- &kp->dvs2_stat.hor_prod.odd_imag) ||
- put_user(compat_ptr(hor_prod_even_real),
- &kp->dvs2_stat.hor_prod.even_real) ||
- put_user(compat_ptr(hor_prod_even_imag),
- &kp->dvs2_stat.hor_prod.even_imag) ||
- put_user(compat_ptr(ver_prod_odd_real),
- &kp->dvs2_stat.ver_prod.odd_real) ||
- put_user(compat_ptr(ver_prod_odd_imag),
- &kp->dvs2_stat.ver_prod.odd_imag) ||
- put_user(compat_ptr(ver_prod_even_real),
- &kp->dvs2_stat.ver_prod.even_real) ||
- put_user(compat_ptr(ver_prod_even_imag),
- &kp->dvs2_stat.ver_prod.even_imag))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_atomisp_dis_statistics32(struct atomisp_dis_statistics __user *kp,
- struct atomisp_dis_statistics32 __user *up)
-{
- void __user *hor_prod_odd_real;
- void __user *hor_prod_odd_imag;
- void __user *hor_prod_even_real;
- void __user *hor_prod_even_imag;
- void __user *ver_prod_odd_real;
- void __user *ver_prod_odd_imag;
- void __user *ver_prod_even_real;
- void __user *ver_prod_even_imag;
-
- if (!!access_ok(up, sizeof(struct atomisp_dis_statistics32)) ||
- copy_in_user(up, kp, sizeof(struct atomisp_dvs_grid_info)) ||
- get_user(hor_prod_odd_real,
- &kp->dvs2_stat.hor_prod.odd_real) ||
- get_user(hor_prod_odd_imag,
- &kp->dvs2_stat.hor_prod.odd_imag) ||
- get_user(hor_prod_even_real,
- &kp->dvs2_stat.hor_prod.even_real) ||
- get_user(hor_prod_even_imag,
- &kp->dvs2_stat.hor_prod.even_imag) ||
- get_user(ver_prod_odd_real,
- &kp->dvs2_stat.ver_prod.odd_real) ||
- get_user(ver_prod_odd_imag,
- &kp->dvs2_stat.ver_prod.odd_imag) ||
- get_user(ver_prod_even_real,
- &kp->dvs2_stat.ver_prod.even_real) ||
- get_user(ver_prod_even_imag,
- &kp->dvs2_stat.ver_prod.even_imag) ||
- put_user(ptr_to_compat(hor_prod_odd_real),
- &up->dvs2_stat.hor_prod.odd_real) ||
- put_user(ptr_to_compat(hor_prod_odd_imag),
- &up->dvs2_stat.hor_prod.odd_imag) ||
- put_user(ptr_to_compat(hor_prod_even_real),
- &up->dvs2_stat.hor_prod.even_real) ||
- put_user(ptr_to_compat(hor_prod_even_imag),
- &up->dvs2_stat.hor_prod.even_imag) ||
- put_user(ptr_to_compat(ver_prod_odd_real),
- &up->dvs2_stat.ver_prod.odd_real) ||
- put_user(ptr_to_compat(ver_prod_odd_imag),
- &up->dvs2_stat.ver_prod.odd_imag) ||
- put_user(ptr_to_compat(ver_prod_even_real),
- &up->dvs2_stat.ver_prod.even_real) ||
- put_user(ptr_to_compat(ver_prod_even_imag),
- &up->dvs2_stat.ver_prod.even_imag) ||
- assign_in_user(&up->exp_id, &kp->exp_id))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_dis_coefficients32(struct atomisp_dis_coefficients __user *kp,
- struct atomisp_dis_coefficients32 __user *up)
-{
- compat_uptr_t hor_coefs_odd_real;
- compat_uptr_t hor_coefs_odd_imag;
- compat_uptr_t hor_coefs_even_real;
- compat_uptr_t hor_coefs_even_imag;
- compat_uptr_t ver_coefs_odd_real;
- compat_uptr_t ver_coefs_odd_imag;
- compat_uptr_t ver_coefs_even_real;
- compat_uptr_t ver_coefs_even_imag;
-
- if (!access_ok(up, sizeof(struct atomisp_dis_coefficients32)) ||
- copy_in_user(kp, up, sizeof(struct atomisp_dvs_grid_info)) ||
- get_user(hor_coefs_odd_real, &up->hor_coefs.odd_real) ||
- get_user(hor_coefs_odd_imag, &up->hor_coefs.odd_imag) ||
- get_user(hor_coefs_even_real, &up->hor_coefs.even_real) ||
- get_user(hor_coefs_even_imag, &up->hor_coefs.even_imag) ||
- get_user(ver_coefs_odd_real, &up->ver_coefs.odd_real) ||
- get_user(ver_coefs_odd_imag, &up->ver_coefs.odd_imag) ||
- get_user(ver_coefs_even_real, &up->ver_coefs.even_real) ||
- get_user(ver_coefs_even_imag, &up->ver_coefs.even_imag) ||
- put_user(compat_ptr(hor_coefs_odd_real),
- &kp->hor_coefs.odd_real) ||
- put_user(compat_ptr(hor_coefs_odd_imag),
- &kp->hor_coefs.odd_imag) ||
- put_user(compat_ptr(hor_coefs_even_real),
- &kp->hor_coefs.even_real) ||
- put_user(compat_ptr(hor_coefs_even_imag),
- &kp->hor_coefs.even_imag) ||
- put_user(compat_ptr(ver_coefs_odd_real),
- &kp->ver_coefs.odd_real) ||
- put_user(compat_ptr(ver_coefs_odd_imag),
- &kp->ver_coefs.odd_imag) ||
- put_user(compat_ptr(ver_coefs_even_real),
- &kp->ver_coefs.even_real) ||
- put_user(compat_ptr(ver_coefs_even_imag),
- &kp->ver_coefs.even_imag))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config __user *kp,
- struct atomisp_dvs_6axis_config32 __user *up)
-{
- compat_uptr_t xcoords_y;
- compat_uptr_t ycoords_y;
- compat_uptr_t xcoords_uv;
- compat_uptr_t ycoords_uv;
-
- if (!access_ok(up, sizeof(struct atomisp_dvs_6axis_config32)) ||
- assign_in_user(&kp->exp_id, &up->exp_id) ||
- assign_in_user(&kp->width_y, &up->width_y) ||
- assign_in_user(&kp->height_y, &up->height_y) ||
- assign_in_user(&kp->width_uv, &up->width_uv) ||
- assign_in_user(&kp->height_uv, &up->height_uv) ||
- get_user(xcoords_y, &up->xcoords_y) ||
- get_user(ycoords_y, &up->ycoords_y) ||
- get_user(xcoords_uv, &up->xcoords_uv) ||
- get_user(ycoords_uv, &up->ycoords_uv) ||
- put_user_force(compat_ptr(xcoords_y), &kp->xcoords_y) ||
- put_user_force(compat_ptr(ycoords_y), &kp->ycoords_y) ||
- put_user_force(compat_ptr(xcoords_uv), &kp->xcoords_uv) ||
- put_user_force(compat_ptr(ycoords_uv), &kp->ycoords_uv))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_3a_statistics32(struct atomisp_3a_statistics __user *kp,
- struct atomisp_3a_statistics32 __user *up)
-{
- compat_uptr_t data;
- compat_uptr_t rgby_data;
-
- if (!access_ok(up, sizeof(struct atomisp_3a_statistics32)) ||
- copy_in_user(kp, up, sizeof(struct atomisp_grid_info)) ||
- get_user(rgby_data, &up->rgby_data) ||
- put_user(compat_ptr(rgby_data), &kp->rgby_data) ||
- get_user(data, &up->data) ||
- put_user(compat_ptr(data), &kp->data) ||
- assign_in_user(&kp->exp_id, &up->exp_id) ||
- assign_in_user(&kp->isp_config_id, &up->isp_config_id))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_atomisp_3a_statistics32(struct atomisp_3a_statistics __user *kp,
- struct atomisp_3a_statistics32 __user *up)
-{
- void __user *data;
- void __user *rgby_data;
-
- if (!access_ok(up, sizeof(struct atomisp_3a_statistics32)) ||
- copy_in_user(up, kp, sizeof(struct atomisp_grid_info)) ||
- get_user(rgby_data, &kp->rgby_data) ||
- put_user(ptr_to_compat(rgby_data), &up->rgby_data) ||
- get_user(data, &kp->data) ||
- put_user(ptr_to_compat(data), &up->data) ||
- assign_in_user(&up->exp_id, &kp->exp_id) ||
- assign_in_user(&up->isp_config_id, &kp->isp_config_id))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_metadata_stat32(struct atomisp_metadata __user *kp,
- struct atomisp_metadata32 __user *up)
-{
- compat_uptr_t data;
- compat_uptr_t effective_width;
-
- if (!access_ok(up, sizeof(struct atomisp_metadata32)) ||
- get_user(data, &up->data) ||
- put_user(compat_ptr(data), &kp->data) ||
- assign_in_user(&kp->width, &up->width) ||
- assign_in_user(&kp->height, &up->height) ||
- assign_in_user(&kp->stride, &up->stride) ||
- assign_in_user(&kp->exp_id, &up->exp_id) ||
- get_user(effective_width, &up->effective_width) ||
- put_user_force(compat_ptr(effective_width), &kp->effective_width))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_atomisp_metadata_stat32(struct atomisp_metadata __user *kp,
- struct atomisp_metadata32 __user *up)
-{
- void __user *data;
- void *effective_width;
-
- if (!access_ok(up, sizeof(struct atomisp_metadata32)) ||
- get_user(data, &kp->data) ||
- put_user(ptr_to_compat(data), &up->data) ||
- assign_in_user(&up->width, &kp->width) ||
- assign_in_user(&up->height, &kp->height) ||
- assign_in_user(&up->stride, &kp->stride) ||
- assign_in_user(&up->exp_id, &kp->exp_id) ||
- get_user(effective_width, &kp->effective_width) ||
- put_user(ptr_to_compat((void __user *)effective_width),
- &up->effective_width))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-put_atomisp_metadata_by_type_stat32(struct atomisp_metadata_with_type __user *kp,
- struct atomisp_metadata_with_type32 __user *up)
-{
- void __user *data;
- u32 *effective_width;
-
- if (!access_ok(up, sizeof(struct atomisp_metadata_with_type32)) ||
- get_user(data, &kp->data) ||
- put_user(ptr_to_compat(data), &up->data) ||
- assign_in_user(&up->width, &kp->width) ||
- assign_in_user(&up->height, &kp->height) ||
- assign_in_user(&up->stride, &kp->stride) ||
- assign_in_user(&up->exp_id, &kp->exp_id) ||
- get_user(effective_width, &kp->effective_width) ||
- put_user(ptr_to_compat((void __user *)effective_width),
- &up->effective_width) ||
- assign_in_user(&up->type, &kp->type))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-get_atomisp_metadata_by_type_stat32(struct atomisp_metadata_with_type __user *kp,
- struct atomisp_metadata_with_type32 __user *up)
-{
- compat_uptr_t data;
- compat_uptr_t effective_width;
-
- if (!access_ok(up, sizeof(struct atomisp_metadata_with_type32)) ||
- get_user(data, &up->data) ||
- put_user(compat_ptr(data), &kp->data) ||
- assign_in_user(&kp->width, &up->width) ||
- assign_in_user(&kp->height, &up->height) ||
- assign_in_user(&kp->stride, &up->stride) ||
- assign_in_user(&kp->exp_id, &up->exp_id) ||
- get_user(effective_width, &up->effective_width) ||
- put_user_force(compat_ptr(effective_width), &kp->effective_width) ||
- assign_in_user(&kp->type, &up->type))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-get_atomisp_morph_table32(struct atomisp_morph_table __user *kp,
- struct atomisp_morph_table32 __user *up)
-{
- unsigned int n = ATOMISP_MORPH_TABLE_NUM_PLANES;
-
- if (!access_ok(up, sizeof(struct atomisp_morph_table32)) ||
- assign_in_user(&kp->enabled, &up->enabled) ||
- assign_in_user(&kp->width, &up->width) ||
- assign_in_user(&kp->height, &up->height))
- return -EFAULT;
-
- while (n-- > 0) {
- compat_uptr_t coord_kp;
-
- if (get_user(coord_kp, &up->coordinates_x[n]) ||
- put_user(compat_ptr(coord_kp), &kp->coordinates_x[n]) ||
- get_user(coord_kp, &up->coordinates_y[n]) ||
- put_user(compat_ptr(coord_kp), &kp->coordinates_y[n]))
- return -EFAULT;
- }
- return 0;
-}
-
-static int put_atomisp_morph_table32(struct atomisp_morph_table __user *kp,
- struct atomisp_morph_table32 __user *up)
-{
- unsigned int n = ATOMISP_MORPH_TABLE_NUM_PLANES;
-
- if (!access_ok(up, sizeof(struct atomisp_morph_table32)) ||
- assign_in_user(&up->enabled, &kp->enabled) ||
- assign_in_user(&up->width, &kp->width) ||
- assign_in_user(&up->height, &kp->height))
- return -EFAULT;
-
- while (n-- > 0) {
- void __user *coord_kp;
-
- if (get_user(coord_kp, &kp->coordinates_x[n]) ||
- put_user(ptr_to_compat(coord_kp), &up->coordinates_x[n]) ||
- get_user(coord_kp, &kp->coordinates_y[n]) ||
- put_user(ptr_to_compat(coord_kp), &up->coordinates_y[n]))
- return -EFAULT;
- }
- return 0;
-}
-
-static int get_atomisp_overlay32(struct atomisp_overlay __user *kp,
- struct atomisp_overlay32 __user *up)
-{
- compat_uptr_t frame;
-
- if (!access_ok(up, sizeof(struct atomisp_overlay32)) ||
- get_user(frame, &up->frame) ||
- put_user_force(compat_ptr(frame), &kp->frame) ||
- assign_in_user(&kp->bg_y, &up->bg_y) ||
- assign_in_user(&kp->bg_u, &up->bg_u) ||
- assign_in_user(&kp->bg_v, &up->bg_v) ||
- assign_in_user(&kp->blend_input_perc_y,
- &up->blend_input_perc_y) ||
- assign_in_user(&kp->blend_input_perc_u,
- &up->blend_input_perc_u) ||
- assign_in_user(&kp->blend_input_perc_v,
- &up->blend_input_perc_v) ||
- assign_in_user(&kp->blend_overlay_perc_y,
- &up->blend_overlay_perc_y) ||
- assign_in_user(&kp->blend_overlay_perc_u,
- &up->blend_overlay_perc_u) ||
- assign_in_user(&kp->blend_overlay_perc_v,
- &up->blend_overlay_perc_v) ||
- assign_in_user(&kp->overlay_start_x, &up->overlay_start_x) ||
- assign_in_user(&kp->overlay_start_y, &up->overlay_start_y))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_atomisp_overlay32(struct atomisp_overlay __user *kp,
- struct atomisp_overlay32 __user *up)
-{
- void *frame;
-
- if (!access_ok(up, sizeof(struct atomisp_overlay32)) ||
- get_user(frame, &kp->frame) ||
- put_user(ptr_to_compat((void __user *)frame), &up->frame) ||
- assign_in_user(&up->bg_y, &kp->bg_y) ||
- assign_in_user(&up->bg_u, &kp->bg_u) ||
- assign_in_user(&up->bg_v, &kp->bg_v) ||
- assign_in_user(&up->blend_input_perc_y,
- &kp->blend_input_perc_y) ||
- assign_in_user(&up->blend_input_perc_u,
- &kp->blend_input_perc_u) ||
- assign_in_user(&up->blend_input_perc_v,
- &kp->blend_input_perc_v) ||
- assign_in_user(&up->blend_overlay_perc_y,
- &kp->blend_overlay_perc_y) ||
- assign_in_user(&up->blend_overlay_perc_u,
- &kp->blend_overlay_perc_u) ||
- assign_in_user(&up->blend_overlay_perc_v,
- &kp->blend_overlay_perc_v) ||
- assign_in_user(&up->overlay_start_x, &kp->overlay_start_x) ||
- assign_in_user(&up->overlay_start_y, &kp->overlay_start_y))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-get_atomisp_calibration_group32(struct atomisp_calibration_group __user *kp,
- struct atomisp_calibration_group32 __user *up)
-{
- compat_uptr_t calb_grp_values;
-
- if (!access_ok(up, sizeof(struct atomisp_calibration_group32)) ||
- assign_in_user(&kp->size, &up->size) ||
- assign_in_user(&kp->type, &up->type) ||
- get_user(calb_grp_values, &up->calb_grp_values) ||
- put_user_force(compat_ptr(calb_grp_values), &kp->calb_grp_values))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-put_atomisp_calibration_group32(struct atomisp_calibration_group __user *kp,
- struct atomisp_calibration_group32 __user *up)
-{
- void *calb_grp_values;
-
- if (!access_ok(up, sizeof(struct atomisp_calibration_group32)) ||
- assign_in_user(&up->size, &kp->size) ||
- assign_in_user(&up->type, &kp->type) ||
- get_user(calb_grp_values, &kp->calb_grp_values) ||
- put_user(ptr_to_compat((void __user *)calb_grp_values),
- &up->calb_grp_values))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_acc_fw_load32(struct atomisp_acc_fw_load __user *kp,
- struct atomisp_acc_fw_load32 __user *up)
-{
- compat_uptr_t data;
-
- if (!access_ok(up, sizeof(struct atomisp_acc_fw_load32)) ||
- assign_in_user(&kp->size, &up->size) ||
- assign_in_user(&kp->fw_handle, &up->fw_handle) ||
- get_user_cast(data, &up->data) ||
- put_user(compat_ptr(data), &kp->data))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_atomisp_acc_fw_load32(struct atomisp_acc_fw_load __user *kp,
- struct atomisp_acc_fw_load32 __user *up)
-{
- void __user *data;
-
- if (!access_ok(up, sizeof(struct atomisp_acc_fw_load32)) ||
- assign_in_user(&up->size, &kp->size) ||
- assign_in_user(&up->fw_handle, &kp->fw_handle) ||
- get_user(data, &kp->data) ||
- put_user(ptr_to_compat(data), &up->data))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_acc_fw_arg32(struct atomisp_acc_fw_arg __user *kp,
- struct atomisp_acc_fw_arg32 __user *up)
-{
- compat_uptr_t value;
-
- if (!access_ok(up, sizeof(struct atomisp_acc_fw_arg32)) ||
- assign_in_user(&kp->fw_handle, &up->fw_handle) ||
- assign_in_user(&kp->index, &up->index) ||
- get_user(value, &up->value) ||
- put_user(compat_ptr(value), &kp->value) ||
- assign_in_user(&kp->size, &up->size))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_atomisp_acc_fw_arg32(struct atomisp_acc_fw_arg __user *kp,
- struct atomisp_acc_fw_arg32 __user *up)
-{
- void __user *value;
-
- if (!access_ok(up, sizeof(struct atomisp_acc_fw_arg32)) ||
- assign_in_user(&up->fw_handle, &kp->fw_handle) ||
- assign_in_user(&up->index, &kp->index) ||
- get_user(value, &kp->value) ||
- put_user(ptr_to_compat(value), &up->value) ||
- assign_in_user(&up->size, &kp->size))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_v4l2_private_int_data32(struct v4l2_private_int_data __user *kp,
- struct v4l2_private_int_data32 __user *up)
-{
- compat_uptr_t data;
-
- if (!access_ok(up, sizeof(struct v4l2_private_int_data32)) ||
- assign_in_user(&kp->size, &up->size) ||
- get_user(data, &up->data) ||
- put_user(compat_ptr(data), &kp->data) ||
- assign_in_user(&kp->reserved[0], &up->reserved[0]) ||
- assign_in_user(&kp->reserved[1], &up->reserved[1]))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_v4l2_private_int_data32(struct v4l2_private_int_data __user *kp,
- struct v4l2_private_int_data32 __user *up)
-{
- void __user *data;
-
- if (!access_ok(up, sizeof(struct v4l2_private_int_data32)) ||
- assign_in_user(&up->size, &kp->size) ||
- get_user(data, &kp->data) ||
- put_user(ptr_to_compat(data), &up->data) ||
- assign_in_user(&up->reserved[0], &kp->reserved[0]) ||
- assign_in_user(&up->reserved[1], &kp->reserved[1]))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_shading_table32(struct atomisp_shading_table __user *kp,
- struct atomisp_shading_table32 __user *up)
-{
- unsigned int n = ATOMISP_NUM_SC_COLORS;
-
- if (!access_ok(up, sizeof(struct atomisp_shading_table32)) ||
- assign_in_user(&kp->enable, &up->enable) ||
- assign_in_user(&kp->sensor_width, &up->sensor_width) ||
- assign_in_user(&kp->sensor_height, &up->sensor_height) ||
- assign_in_user(&kp->width, &up->width) ||
- assign_in_user(&kp->height, &up->height) ||
- assign_in_user(&kp->fraction_bits, &up->fraction_bits))
- return -EFAULT;
-
- while (n-- > 0) {
- compat_uptr_t tmp;
-
- if (get_user(tmp, &up->data[n]) ||
- put_user_force(compat_ptr(tmp), &kp->data[n]))
- return -EFAULT;
- }
- return 0;
-}
-
-static int get_atomisp_acc_map32(struct atomisp_acc_map __user *kp,
- struct atomisp_acc_map32 __user *up)
-{
- compat_uptr_t user_ptr;
-
- if (!access_ok(up, sizeof(struct atomisp_acc_map32)) ||
- assign_in_user(&kp->flags, &up->flags) ||
- assign_in_user(&kp->length, &up->length) ||
- get_user(user_ptr, &up->user_ptr) ||
- put_user(compat_ptr(user_ptr), &kp->user_ptr) ||
- assign_in_user(&kp->css_ptr, &up->css_ptr) ||
- assign_in_user(&kp->reserved[0], &up->reserved[0]) ||
- assign_in_user(&kp->reserved[1], &up->reserved[1]) ||
- assign_in_user(&kp->reserved[2], &up->reserved[2]) ||
- assign_in_user(&kp->reserved[3], &up->reserved[3]))
- return -EFAULT;
-
- return 0;
-}
-
-static int put_atomisp_acc_map32(struct atomisp_acc_map __user *kp,
- struct atomisp_acc_map32 __user *up)
-{
- void __user *user_ptr;
-
- if (!access_ok(up, sizeof(struct atomisp_acc_map32)) ||
- assign_in_user(&up->flags, &kp->flags) ||
- assign_in_user(&up->length, &kp->length) ||
- get_user(user_ptr, &kp->user_ptr) ||
- put_user(ptr_to_compat(user_ptr), &up->user_ptr) ||
- assign_in_user(&up->css_ptr, &kp->css_ptr) ||
- assign_in_user(&up->reserved[0], &kp->reserved[0]) ||
- assign_in_user(&up->reserved[1], &kp->reserved[1]) ||
- assign_in_user(&up->reserved[2], &kp->reserved[2]) ||
- assign_in_user(&up->reserved[3], &kp->reserved[3]))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-get_atomisp_acc_s_mapped_arg32(struct atomisp_acc_s_mapped_arg __user *kp,
- struct atomisp_acc_s_mapped_arg32 __user *up)
-{
- if (!access_ok(up, sizeof(struct atomisp_acc_s_mapped_arg32)) ||
- assign_in_user(&kp->fw_handle, &up->fw_handle) ||
- assign_in_user(&kp->memory, &up->memory) ||
- assign_in_user(&kp->length, &up->length) ||
- assign_in_user(&kp->css_ptr, &up->css_ptr))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-put_atomisp_acc_s_mapped_arg32(struct atomisp_acc_s_mapped_arg __user *kp,
- struct atomisp_acc_s_mapped_arg32 __user *up)
-{
- if (!access_ok(up, sizeof(struct atomisp_acc_s_mapped_arg32)) ||
- assign_in_user(&up->fw_handle, &kp->fw_handle) ||
- assign_in_user(&up->memory, &kp->memory) ||
- assign_in_user(&up->length, &kp->length) ||
- assign_in_user(&up->css_ptr, &kp->css_ptr))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_atomisp_parameters32(struct atomisp_parameters __user *kp,
- struct atomisp_parameters32 __user *up)
-{
- int n = offsetof(struct atomisp_parameters32, output_frame) /
- sizeof(compat_uptr_t);
- compat_uptr_t stp, mtp, dcp, dscp;
- struct {
- struct atomisp_shading_table shading_table;
- struct atomisp_morph_table morph_table;
- struct atomisp_dis_coefficients dvs2_coefs;
- struct atomisp_dvs_6axis_config dvs_6axis_config;
- } __user *karg = (void __user *)(kp + 1);
-
- if (!access_ok(up, sizeof(struct atomisp_parameters32)))
- return -EFAULT;
-
- while (n >= 0) {
- compat_uptr_t __user *src = (compat_uptr_t __user *)up + n;
- void * __user *dst = (void * __user *)kp + n;
- compat_uptr_t tmp;
-
- if (get_user_cast(tmp, src) || put_user_force(compat_ptr(tmp), dst))
- return -EFAULT;
- n--;
- }
-
- if (assign_in_user(&kp->isp_config_id, &up->isp_config_id) ||
- assign_in_user(&kp->per_frame_setting, &up->per_frame_setting) ||
- get_user(stp, &up->shading_table) ||
- get_user(mtp, &up->morph_table) ||
- get_user(dcp, &up->dvs2_coefs) ||
- get_user(dscp, &up->dvs_6axis_config))
- return -EFAULT;
-
- /* handle shading table */
- if (stp && (get_atomisp_shading_table32(&karg->shading_table,
- compat_ptr(stp)) ||
- put_user_force(&karg->shading_table, &kp->shading_table)))
- return -EFAULT;
-
- /* handle morph table */
- if (mtp && (get_atomisp_morph_table32(&karg->morph_table,
- compat_ptr(mtp)) ||
- put_user_force(&karg->morph_table, &kp->morph_table)))
- return -EFAULT;
-
- /* handle dvs2 coefficients */
- if (dcp && (get_atomisp_dis_coefficients32(&karg->dvs2_coefs,
- compat_ptr(dcp)) ||
- put_user_force(&karg->dvs2_coefs, &kp->dvs2_coefs)))
- return -EFAULT;
-
- /* handle dvs 6axis configuration */
- if (dscp &&
- (get_atomisp_dvs_6axis_config32(&karg->dvs_6axis_config,
- compat_ptr(dscp)) ||
- put_user_force(&karg->dvs_6axis_config, &kp->dvs_6axis_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-get_atomisp_acc_fw_load_to_pipe32(struct atomisp_acc_fw_load_to_pipe __user *kp,
- struct atomisp_acc_fw_load_to_pipe32 __user *up)
-{
- compat_uptr_t data;
-
- if (!access_ok(up, sizeof(struct atomisp_acc_fw_load_to_pipe32)) ||
- assign_in_user(&kp->flags, &up->flags) ||
- assign_in_user(&kp->fw_handle, &up->fw_handle) ||
- assign_in_user(&kp->size, &up->size) ||
- assign_in_user(&kp->type, &up->type) ||
- assign_in_user(&kp->reserved[0], &up->reserved[0]) ||
- assign_in_user(&kp->reserved[1], &up->reserved[1]) ||
- assign_in_user(&kp->reserved[2], &up->reserved[2]) ||
- get_user(data, &up->data) ||
- put_user(compat_ptr(data), &kp->data))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-put_atomisp_acc_fw_load_to_pipe32(struct atomisp_acc_fw_load_to_pipe __user *kp,
- struct atomisp_acc_fw_load_to_pipe32 __user *up)
-{
- void __user *data;
-
- if (!access_ok(up, sizeof(struct atomisp_acc_fw_load_to_pipe32)) ||
- assign_in_user(&up->flags, &kp->flags) ||
- assign_in_user(&up->fw_handle, &kp->fw_handle) ||
- assign_in_user(&up->size, &kp->size) ||
- assign_in_user(&up->type, &kp->type) ||
- assign_in_user(&up->reserved[0], &kp->reserved[0]) ||
- assign_in_user(&up->reserved[1], &kp->reserved[1]) ||
- assign_in_user(&up->reserved[2], &kp->reserved[2]) ||
- get_user(data, &kp->data) ||
- put_user(ptr_to_compat(data), &up->data))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-get_atomisp_sensor_ae_bracketing_lut(struct atomisp_sensor_ae_bracketing_lut __user *kp,
- struct atomisp_sensor_ae_bracketing_lut32 __user *up)
-{
- compat_uptr_t lut;
-
- if (!access_ok(up, sizeof(struct atomisp_sensor_ae_bracketing_lut32)) ||
- assign_in_user(&kp->lut_size, &up->lut_size) ||
- get_user(lut, &up->lut) ||
- put_user_force(compat_ptr(lut), &kp->lut))
- return -EFAULT;
-
- return 0;
-}
-
-static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long ret = -ENOIOCTLCMD;
-
- if (file->f_op->unlocked_ioctl)
- ret = file->f_op->unlocked_ioctl(file, cmd, arg);
-
- return ret;
-}
-
-static long atomisp_do_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- union {
- struct atomisp_histogram his;
- struct atomisp_dis_statistics dis_s;
- struct atomisp_dis_coefficients dis_c;
- struct atomisp_dvs_6axis_config dvs_c;
- struct atomisp_3a_statistics s3a_s;
- struct atomisp_morph_table mor_t;
- struct v4l2_framebuffer v4l2_buf;
- struct atomisp_overlay overlay;
- struct atomisp_calibration_group cal_grp;
- struct atomisp_acc_fw_load acc_fw_load;
- struct atomisp_acc_fw_arg acc_fw_arg;
- struct v4l2_private_int_data v4l2_pri_data;
- struct atomisp_shading_table shd_tbl;
- struct atomisp_acc_map acc_map;
- struct atomisp_acc_s_mapped_arg acc_map_arg;
- struct atomisp_parameters param;
- struct atomisp_acc_fw_load_to_pipe acc_fw_to_pipe;
- struct atomisp_metadata md;
- struct atomisp_metadata_with_type md_with_type;
- struct atomisp_sensor_ae_bracketing_lut lut;
- } __user *karg;
- void __user *up = compat_ptr(arg);
- long err = -ENOIOCTLCMD;
-
- karg = compat_alloc_user_space(
- sizeof(*karg) + (cmd == ATOMISP_IOC_S_PARAMETERS32 ?
- sizeof(struct atomisp_shading_table) +
- sizeof(struct atomisp_morph_table) +
- sizeof(struct atomisp_dis_coefficients) +
- sizeof(struct atomisp_dvs_6axis_config) : 0));
- if (!karg)
- return -ENOMEM;
-
- /* First, convert the command. */
- switch (cmd) {
- case ATOMISP_IOC_G_HISTOGRAM32:
- cmd = ATOMISP_IOC_G_HISTOGRAM;
- break;
- case ATOMISP_IOC_S_HISTOGRAM32:
- cmd = ATOMISP_IOC_S_HISTOGRAM;
- break;
- case ATOMISP_IOC_G_DIS_STAT32:
- cmd = ATOMISP_IOC_G_DIS_STAT;
- break;
- case ATOMISP_IOC_S_DIS_COEFS32:
- cmd = ATOMISP_IOC_S_DIS_COEFS;
- break;
- case ATOMISP_IOC_S_DIS_VECTOR32:
- cmd = ATOMISP_IOC_S_DIS_VECTOR;
- break;
- case ATOMISP_IOC_G_3A_STAT32:
- cmd = ATOMISP_IOC_G_3A_STAT;
- break;
- case ATOMISP_IOC_G_ISP_GDC_TAB32:
- cmd = ATOMISP_IOC_G_ISP_GDC_TAB;
- break;
- case ATOMISP_IOC_S_ISP_GDC_TAB32:
- cmd = ATOMISP_IOC_S_ISP_GDC_TAB;
- break;
- case ATOMISP_IOC_S_ISP_FPN_TABLE32:
- cmd = ATOMISP_IOC_S_ISP_FPN_TABLE;
- break;
- case ATOMISP_IOC_G_ISP_OVERLAY32:
- cmd = ATOMISP_IOC_G_ISP_OVERLAY;
- break;
- case ATOMISP_IOC_S_ISP_OVERLAY32:
- cmd = ATOMISP_IOC_S_ISP_OVERLAY;
- break;
- case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32:
- cmd = ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP;
- break;
- case ATOMISP_IOC_ACC_LOAD32:
- cmd = ATOMISP_IOC_ACC_LOAD;
- break;
- case ATOMISP_IOC_ACC_S_ARG32:
- cmd = ATOMISP_IOC_ACC_S_ARG;
- break;
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32:
- cmd = ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA;
- break;
- case ATOMISP_IOC_S_ISP_SHD_TAB32:
- cmd = ATOMISP_IOC_S_ISP_SHD_TAB;
- break;
- case ATOMISP_IOC_ACC_DESTAB32:
- cmd = ATOMISP_IOC_ACC_DESTAB;
- break;
- case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32:
- cmd = ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA;
- break;
- case ATOMISP_IOC_ACC_MAP32:
- cmd = ATOMISP_IOC_ACC_MAP;
- break;
- case ATOMISP_IOC_ACC_UNMAP32:
- cmd = ATOMISP_IOC_ACC_UNMAP;
- break;
- case ATOMISP_IOC_ACC_S_MAPPED_ARG32:
- cmd = ATOMISP_IOC_ACC_S_MAPPED_ARG;
- break;
- case ATOMISP_IOC_S_PARAMETERS32:
- cmd = ATOMISP_IOC_S_PARAMETERS;
- break;
- case ATOMISP_IOC_ACC_LOAD_TO_PIPE32:
- cmd = ATOMISP_IOC_ACC_LOAD_TO_PIPE;
- break;
- case ATOMISP_IOC_G_METADATA32:
- cmd = ATOMISP_IOC_G_METADATA;
- break;
- case ATOMISP_IOC_G_METADATA_BY_TYPE32:
- cmd = ATOMISP_IOC_G_METADATA_BY_TYPE;
- break;
- case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32:
- cmd = ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT;
- break;
- }
-
- switch (cmd) {
- case ATOMISP_IOC_G_HISTOGRAM:
- case ATOMISP_IOC_S_HISTOGRAM:
- err = get_atomisp_histogram32(&karg->his, up);
- break;
- case ATOMISP_IOC_G_DIS_STAT:
- err = get_atomisp_dis_statistics32(&karg->dis_s, up);
- break;
- case ATOMISP_IOC_S_DIS_COEFS:
- err = get_atomisp_dis_coefficients32(&karg->dis_c, up);
- break;
- case ATOMISP_IOC_S_DIS_VECTOR:
- err = get_atomisp_dvs_6axis_config32(&karg->dvs_c, up);
- break;
- case ATOMISP_IOC_G_3A_STAT:
- err = get_atomisp_3a_statistics32(&karg->s3a_s, up);
- break;
- case ATOMISP_IOC_G_ISP_GDC_TAB:
- case ATOMISP_IOC_S_ISP_GDC_TAB:
- err = get_atomisp_morph_table32(&karg->mor_t, up);
- break;
- case ATOMISP_IOC_S_ISP_FPN_TABLE:
- err = get_v4l2_framebuffer32(&karg->v4l2_buf, up);
- break;
- case ATOMISP_IOC_G_ISP_OVERLAY:
- case ATOMISP_IOC_S_ISP_OVERLAY:
- err = get_atomisp_overlay32(&karg->overlay, up);
- break;
- case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
- err = get_atomisp_calibration_group32(&karg->cal_grp, up);
- break;
- case ATOMISP_IOC_ACC_LOAD:
- err = get_atomisp_acc_fw_load32(&karg->acc_fw_load, up);
- break;
- case ATOMISP_IOC_ACC_S_ARG:
- case ATOMISP_IOC_ACC_DESTAB:
- err = get_atomisp_acc_fw_arg32(&karg->acc_fw_arg, up);
- break;
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
- case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
- err = get_v4l2_private_int_data32(&karg->v4l2_pri_data, up);
- break;
- case ATOMISP_IOC_S_ISP_SHD_TAB:
- err = get_atomisp_shading_table32(&karg->shd_tbl, up);
- break;
- case ATOMISP_IOC_ACC_MAP:
- case ATOMISP_IOC_ACC_UNMAP:
- err = get_atomisp_acc_map32(&karg->acc_map, up);
- break;
- case ATOMISP_IOC_ACC_S_MAPPED_ARG:
- err = get_atomisp_acc_s_mapped_arg32(&karg->acc_map_arg, up);
- break;
- case ATOMISP_IOC_S_PARAMETERS:
- err = get_atomisp_parameters32(&karg->param, up);
- break;
- case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
- err = get_atomisp_acc_fw_load_to_pipe32(&karg->acc_fw_to_pipe,
- up);
- break;
- case ATOMISP_IOC_G_METADATA:
- err = get_atomisp_metadata_stat32(&karg->md, up);
- break;
- case ATOMISP_IOC_G_METADATA_BY_TYPE:
- err = get_atomisp_metadata_by_type_stat32(&karg->md_with_type,
- up);
- break;
- case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
- err = get_atomisp_sensor_ae_bracketing_lut(&karg->lut, up);
- break;
- }
- if (err)
- return err;
-
- err = native_ioctl(file, cmd, (unsigned long)karg);
- if (err)
- return err;
-
- switch (cmd) {
- case ATOMISP_IOC_G_HISTOGRAM:
- err = put_atomisp_histogram32(&karg->his, up);
- break;
- case ATOMISP_IOC_G_DIS_STAT:
- err = put_atomisp_dis_statistics32(&karg->dis_s, up);
- break;
- case ATOMISP_IOC_G_3A_STAT:
- err = put_atomisp_3a_statistics32(&karg->s3a_s, up);
- break;
- case ATOMISP_IOC_G_ISP_GDC_TAB:
- err = put_atomisp_morph_table32(&karg->mor_t, up);
- break;
- case ATOMISP_IOC_G_ISP_OVERLAY:
- err = put_atomisp_overlay32(&karg->overlay, up);
- break;
- case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
- err = put_atomisp_calibration_group32(&karg->cal_grp, up);
- break;
- case ATOMISP_IOC_ACC_LOAD:
- err = put_atomisp_acc_fw_load32(&karg->acc_fw_load, up);
- break;
- case ATOMISP_IOC_ACC_S_ARG:
- case ATOMISP_IOC_ACC_DESTAB:
- err = put_atomisp_acc_fw_arg32(&karg->acc_fw_arg, up);
- break;
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
- case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
- err = put_v4l2_private_int_data32(&karg->v4l2_pri_data, up);
- break;
- case ATOMISP_IOC_ACC_MAP:
- case ATOMISP_IOC_ACC_UNMAP:
- err = put_atomisp_acc_map32(&karg->acc_map, up);
- break;
- case ATOMISP_IOC_ACC_S_MAPPED_ARG:
- err = put_atomisp_acc_s_mapped_arg32(&karg->acc_map_arg, up);
- break;
- case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
- err = put_atomisp_acc_fw_load_to_pipe32(&karg->acc_fw_to_pipe,
- up);
- break;
- case ATOMISP_IOC_G_METADATA:
- err = put_atomisp_metadata_stat32(&karg->md, up);
- break;
- case ATOMISP_IOC_G_METADATA_BY_TYPE:
- err = put_atomisp_metadata_by_type_stat32(&karg->md_with_type,
- up);
- break;
- }
-
- return err;
-}
-
-long atomisp_compat_ioctl32(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct video_device *vdev = video_devdata(file);
- struct atomisp_device *isp = video_get_drvdata(vdev);
- long ret = -ENOIOCTLCMD;
-
- if (!file->f_op->unlocked_ioctl)
- return ret;
-
- switch (cmd) {
- case ATOMISP_IOC_G_XNR:
- case ATOMISP_IOC_S_XNR:
- case ATOMISP_IOC_G_NR:
- case ATOMISP_IOC_S_NR:
- case ATOMISP_IOC_G_TNR:
- case ATOMISP_IOC_S_TNR:
- case ATOMISP_IOC_G_BLACK_LEVEL_COMP:
- case ATOMISP_IOC_S_BLACK_LEVEL_COMP:
- case ATOMISP_IOC_G_EE:
- case ATOMISP_IOC_S_EE:
- case ATOMISP_IOC_S_DIS_VECTOR:
- case ATOMISP_IOC_G_ISP_PARM:
- case ATOMISP_IOC_S_ISP_PARM:
- case ATOMISP_IOC_G_ISP_GAMMA:
- case ATOMISP_IOC_S_ISP_GAMMA:
- case ATOMISP_IOC_ISP_MAKERNOTE:
- case ATOMISP_IOC_G_ISP_MACC:
- case ATOMISP_IOC_S_ISP_MACC:
- case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION:
- case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION:
- case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION:
- case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION:
- case ATOMISP_IOC_G_ISP_CTC:
- case ATOMISP_IOC_S_ISP_CTC:
- case ATOMISP_IOC_G_ISP_WHITE_BALANCE:
- case ATOMISP_IOC_S_ISP_WHITE_BALANCE:
- case ATOMISP_IOC_CAMERA_BRIDGE:
- case ATOMISP_IOC_G_SENSOR_MODE_DATA:
- case ATOMISP_IOC_S_EXPOSURE:
- case ATOMISP_IOC_G_3A_CONFIG:
- case ATOMISP_IOC_S_3A_CONFIG:
- case ATOMISP_IOC_ACC_UNLOAD:
- case ATOMISP_IOC_ACC_START:
- case ATOMISP_IOC_ACC_WAIT:
- case ATOMISP_IOC_ACC_ABORT:
- case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION:
- case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION:
- case ATOMISP_IOC_S_CONT_CAPTURE_CONFIG:
- case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS:
- case ATOMISP_IOC_EXT_ISP_CTRL:
- case ATOMISP_IOC_EXP_ID_UNLOCK:
- case ATOMISP_IOC_EXP_ID_CAPTURE:
- case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE:
- case ATOMISP_IOC_G_FORMATS_CONFIG:
- case ATOMISP_IOC_S_FORMATS_CONFIG:
- case ATOMISP_IOC_S_EXPOSURE_WINDOW:
- case ATOMISP_IOC_S_ACC_STATE:
- case ATOMISP_IOC_G_ACC_STATE:
- case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
- case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
- case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
- case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
- case ATOMISP_IOC_G_INVALID_FRAME_NUM:
- case ATOMISP_IOC_S_ARRAY_RESOLUTION:
- case ATOMISP_IOC_S_SENSOR_RUNMODE:
- case ATOMISP_IOC_G_UPDATE_EXPOSURE:
- ret = native_ioctl(file, cmd, arg);
- break;
-
- case ATOMISP_IOC_G_HISTOGRAM32:
- case ATOMISP_IOC_S_HISTOGRAM32:
- case ATOMISP_IOC_G_DIS_STAT32:
- case ATOMISP_IOC_S_DIS_COEFS32:
- case ATOMISP_IOC_S_DIS_VECTOR32:
- case ATOMISP_IOC_G_3A_STAT32:
- case ATOMISP_IOC_G_ISP_GDC_TAB32:
- case ATOMISP_IOC_S_ISP_GDC_TAB32:
- case ATOMISP_IOC_S_ISP_FPN_TABLE32:
- case ATOMISP_IOC_G_ISP_OVERLAY32:
- case ATOMISP_IOC_S_ISP_OVERLAY32:
- case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32:
- case ATOMISP_IOC_ACC_LOAD32:
- case ATOMISP_IOC_ACC_S_ARG32:
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32:
- case ATOMISP_IOC_S_ISP_SHD_TAB32:
- case ATOMISP_IOC_ACC_DESTAB32:
- case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32:
- case ATOMISP_IOC_ACC_MAP32:
- case ATOMISP_IOC_ACC_UNMAP32:
- case ATOMISP_IOC_ACC_S_MAPPED_ARG32:
- case ATOMISP_IOC_S_PARAMETERS32:
- case ATOMISP_IOC_ACC_LOAD_TO_PIPE32:
- case ATOMISP_IOC_G_METADATA32:
- case ATOMISP_IOC_G_METADATA_BY_TYPE32:
- case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32:
- ret = atomisp_do_compat_ioctl(file, cmd, arg);
- break;
-
- default:
- dev_warn(isp->dev,
- "%s: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
- __func__, _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd),
- cmd);
- break;
- }
- return ret;
-}
-#endif /* CONFIG_COMPAT */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
index 060b8765ae96..56456e59bf89 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -25,13 +25,13 @@
static struct v4l2_mbus_framefmt *__csi2_get_format(struct
atomisp_mipi_csi2_device
* csi2,
- struct
- v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum
v4l2_subdev_format_whence
which, unsigned int pad) {
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
+ pad);
else
return &csi2->formats[pad];
}
@@ -44,7 +44,7 @@ static struct v4l2_mbus_framefmt *__csi2_get_format(struct
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct atomisp_in_fmt_conv *ic = atomisp_in_fmt_conv;
@@ -70,13 +70,13 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csi2_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->which, fmt->pad);
+ format = __csi2_get_format(csi2, sd_state, fmt->which, fmt->pad);
fmt->format = *format;
@@ -84,12 +84,14 @@ static int csi2_get_format(struct v4l2_subdev *sd,
}
int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int which, uint16_t pad,
struct v4l2_mbus_framefmt *ffmt)
{
struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *actual_ffmt = __csi2_get_format(csi2, cfg, which, pad);
+ struct v4l2_mbus_framefmt *actual_ffmt = __csi2_get_format(csi2,
+ sd_state,
+ which, pad);
if (pad == CSI2_PAD_SINK) {
const struct atomisp_in_fmt_conv *ic;
@@ -110,12 +112,14 @@ int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
tmp_ffmt = *ffmt = *actual_ffmt;
- return atomisp_csi2_set_ffmt(sd, cfg, which, CSI2_PAD_SOURCE,
+ return atomisp_csi2_set_ffmt(sd, sd_state, which,
+ CSI2_PAD_SOURCE,
&tmp_ffmt);
}
/* FIXME: DPCM decompression */
- *actual_ffmt = *ffmt = *__csi2_get_format(csi2, cfg, which, CSI2_PAD_SINK);
+ *actual_ffmt = *ffmt = *__csi2_get_format(csi2, sd_state, which,
+ CSI2_PAD_SINK);
return 0;
}
@@ -129,10 +133,10 @@ int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csi2_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return atomisp_csi2_set_ffmt(sd, cfg, fmt->which, fmt->pad,
+ return atomisp_csi2_set_ffmt(sd, sd_state, fmt->which, fmt->pad,
&fmt->format);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
index 59261e8f1a1a..e35711be8a37 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
@@ -44,7 +44,7 @@ struct atomisp_mipi_csi2_device {
};
int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int which, uint16_t pad,
struct v4l2_mbus_framefmt *ffmt);
int atomisp_mipi_csi2_init(struct atomisp_device *isp);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp_file.c
index e568ca99c45a..4570a9ab100b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_file.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_file.c
@@ -80,7 +80,7 @@ static int file_input_s_stream(struct v4l2_subdev *sd, int enable)
}
static int file_input_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -104,16 +104,16 @@ static int file_input_get_fmt(struct v4l2_subdev *sd,
}
static int file_input_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
if (format->pad)
return -EINVAL;
- file_input_get_fmt(sd, cfg, format);
+ file_input_get_fmt(sd, sd_state, format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
@@ -130,7 +130,7 @@ static int file_input_s_power(struct v4l2_subdev *sd, int on)
}
static int file_input_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/*to fake*/
@@ -138,7 +138,7 @@ static int file_input_enum_mbus_code(struct v4l2_subdev *sd,
}
static int file_input_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
/*to fake*/
@@ -146,7 +146,7 @@ static int file_input_enum_frame_size(struct v4l2_subdev *sd,
}
static int file_input_enum_frame_ival(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum
*fie)
{
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
index f1e6b2597853..f82bf082aa79 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -837,7 +837,7 @@ dev_init:
}
/* runtime power management, turn on ISP */
- ret = pm_runtime_get_sync(vdev->v4l2_dev->dev);
+ ret = pm_runtime_resume_and_get(vdev->v4l2_dev->dev);
if (ret < 0) {
dev_err(isp->dev, "Failed to power on device\n");
goto error;
@@ -881,9 +881,9 @@ done:
css_error:
atomisp_css_uninit(isp);
+ pm_runtime_put(vdev->v4l2_dev->dev);
error:
hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC);
- pm_runtime_put(vdev->v4l2_dev->dev);
rt_mutex_unlock(&isp->mutex);
return ret;
}
@@ -963,7 +963,7 @@ static int atomisp_release(struct file *file)
if (!isp->sw_contex.file_input && asd->fmt_auto->val) {
struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
}
@@ -975,7 +975,7 @@ subdev_uninit:
if (isp->sw_contex.file_input && asd->fmt_auto->val) {
struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
}
@@ -1016,7 +1016,7 @@ subdev_uninit:
done:
if (!acc_node) {
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
atomisp_subdev_source_pad(vdev),
V4L2_SEL_TGT_COMPOSE, 0,
@@ -1283,7 +1283,8 @@ const struct v4l2_file_operations atomisp_fops = {
.unlocked_ioctl = video_ioctl2,
#ifdef CONFIG_COMPAT
/*
- * There are problems with this code. Disable this for now.
+ * this was removed because of bugs, the interface
+ * needs to be made safe for compat tasks instead.
.compat_ioctl32 = atomisp_compat_ioctl32,
*/
#endif
@@ -1297,10 +1298,7 @@ const struct v4l2_file_operations atomisp_file_fops = {
.mmap = atomisp_file_mmap,
.unlocked_ioctl = video_ioctl2,
#ifdef CONFIG_COMPAT
- /*
- * There are problems with this code. Disable this for now.
- .compat_ioctl32 = atomisp_compat_ioctl32,
- */
+ /* .compat_ioctl32 = atomisp_compat_ioctl32, */
#endif
.poll = atomisp_poll,
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 2ef5f44e4b6b..12f22ad007c7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -213,7 +213,7 @@ static int isp_subdev_unsubscribe_event(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int isp_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(atomisp_in_fmt_conv) - 1)
@@ -246,7 +246,7 @@ static int isp_subdev_validate_rect(struct v4l2_subdev *sd, uint32_t pad,
}
struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad,
uint32_t target)
{
@@ -255,9 +255,9 @@ struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
if (which == V4L2_SUBDEV_FORMAT_TRY) {
switch (target) {
case V4L2_SEL_TGT_CROP:
- return v4l2_subdev_get_try_crop(sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(sd, sd_state, pad);
case V4L2_SEL_TGT_COMPOSE:
- return v4l2_subdev_get_try_compose(sd, cfg, pad);
+ return v4l2_subdev_get_try_compose(sd, sd_state, pad);
}
}
@@ -273,19 +273,20 @@ struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt
*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state, uint32_t which,
uint32_t pad)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(sd, cfg, pad);
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
return &isp_sd->fmt[pad].fmt;
}
static void isp_get_fmt_rect(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
struct v4l2_mbus_framefmt **ffmt,
struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
struct v4l2_rect *comp[ATOMISP_SUBDEV_PADS_NUM])
@@ -293,16 +294,16 @@ static void isp_get_fmt_rect(struct v4l2_subdev *sd,
unsigned int i;
for (i = 0; i < ATOMISP_SUBDEV_PADS_NUM; i++) {
- ffmt[i] = atomisp_subdev_get_ffmt(sd, cfg, which, i);
- crop[i] = atomisp_subdev_get_rect(sd, cfg, which, i,
+ ffmt[i] = atomisp_subdev_get_ffmt(sd, sd_state, which, i);
+ crop[i] = atomisp_subdev_get_rect(sd, sd_state, which, i,
V4L2_SEL_TGT_CROP);
- comp[i] = atomisp_subdev_get_rect(sd, cfg, which, i,
+ comp[i] = atomisp_subdev_get_rect(sd, sd_state, which, i,
V4L2_SEL_TGT_COMPOSE);
}
}
static void isp_subdev_propagate(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad, uint32_t target,
uint32_t flags)
{
@@ -313,7 +314,7 @@ static void isp_subdev_propagate(struct v4l2_subdev *sd,
if (flags & V4L2_SEL_FLAG_KEEP_CONFIG)
return;
- isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp);
+ isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
switch (pad) {
case ATOMISP_SUBDEV_PAD_SINK: {
@@ -323,7 +324,7 @@ static void isp_subdev_propagate(struct v4l2_subdev *sd,
r.width = ffmt[pad]->width;
r.height = ffmt[pad]->height;
- atomisp_subdev_set_selection(sd, cfg, which, pad,
+ atomisp_subdev_set_selection(sd, sd_state, which, pad,
target, flags, &r);
break;
}
@@ -331,7 +332,7 @@ static void isp_subdev_propagate(struct v4l2_subdev *sd,
}
static int isp_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct v4l2_rect *rec;
@@ -340,7 +341,7 @@ static int isp_subdev_get_selection(struct v4l2_subdev *sd,
if (rval)
return rval;
- rec = atomisp_subdev_get_rect(sd, cfg, sel->which, sel->pad,
+ rec = atomisp_subdev_get_rect(sd, sd_state, sel->which, sel->pad,
sel->target);
if (!rec)
return -EINVAL;
@@ -365,7 +366,7 @@ static const char *atomisp_pad_str(unsigned int pad)
}
int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad, uint32_t target,
u32 flags, struct v4l2_rect *r)
{
@@ -382,7 +383,7 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad);
- isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp);
+ isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
dev_dbg(isp->dev,
"sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
@@ -450,7 +451,8 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_rect tmp = *crop[pad];
atomisp_subdev_set_selection(
- sd, cfg, which, i, V4L2_SEL_TGT_COMPOSE,
+ sd, sd_state, which, i,
+ V4L2_SEL_TGT_COMPOSE,
flags, &tmp);
}
}
@@ -472,9 +474,9 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
* when dvs is disabled.
*/
dvs_w = dvs_h = 12;
- } else
+ } else {
dvs_w = dvs_h = 0;
-
+ }
atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h);
atomisp_css_input_set_effective_resolution(isp_sd, stream_id,
crop[pad]->width, crop[pad]->height);
@@ -551,9 +553,9 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
ffmt[pad]->height = comp[pad]->height;
}
- if (!atomisp_subdev_get_rect(sd, cfg, which, pad, target))
+ if (!atomisp_subdev_get_rect(sd, sd_state, which, pad, target))
return -EINVAL;
- *r = *atomisp_subdev_get_rect(sd, cfg, which, pad, target);
+ *r = *atomisp_subdev_get_rect(sd, sd_state, which, pad, target);
dev_dbg(isp->dev, "sel actual: l %d t %d w %d h %d\n",
r->left, r->top, r->width, r->height);
@@ -562,7 +564,7 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
}
static int isp_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
@@ -570,7 +572,8 @@ static int isp_subdev_set_selection(struct v4l2_subdev *sd,
if (rval)
return rval;
- return atomisp_subdev_set_selection(sd, cfg, sel->which, sel->pad,
+ return atomisp_subdev_set_selection(sd, sd_state, sel->which,
+ sel->pad,
sel->target, sel->flags, &sel->r);
}
@@ -609,13 +612,14 @@ static int atomisp_get_sensor_bin_factor(struct atomisp_sub_device *asd)
}
void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
u32 pad, struct v4l2_mbus_framefmt *ffmt)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
struct atomisp_device *isp = isp_sd->isp;
struct v4l2_mbus_framefmt *__ffmt =
- atomisp_subdev_get_ffmt(sd, cfg, which, pad);
+ atomisp_subdev_get_ffmt(sd, sd_state, which, pad);
u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode);
enum atomisp_input_stream_id stream_id;
@@ -640,7 +644,7 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
*__ffmt = *ffmt;
- isp_subdev_propagate(sd, cfg, which, pad,
+ isp_subdev_propagate(sd, sd_state, which, pad,
V4L2_SEL_TGT_CROP, 0);
if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
@@ -679,10 +683,11 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
* to the format type.
*/
static int isp_subdev_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- fmt->format = *atomisp_subdev_get_ffmt(sd, cfg, fmt->which, fmt->pad);
+ fmt->format = *atomisp_subdev_get_ffmt(sd, sd_state, fmt->which,
+ fmt->pad);
return 0;
}
@@ -698,10 +703,11 @@ static int isp_subdev_get_format(struct v4l2_subdev *sd,
* to the format type.
*/
static int isp_subdev_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- atomisp_subdev_set_ffmt(sd, cfg, fmt->which, fmt->pad, &fmt->format);
+ atomisp_subdev_set_ffmt(sd, sd_state, fmt->which, fmt->pad,
+ &fmt->format);
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
index 330a77eed8aa..d6fcfab6352d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
@@ -437,19 +437,20 @@ uint16_t atomisp_subdev_source_pad(struct video_device *vdev);
/* Get pointer to appropriate format */
struct v4l2_mbus_framefmt
*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state, uint32_t which,
uint32_t pad);
struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad,
uint32_t target);
int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad, uint32_t target,
u32 flags, struct v4l2_rect *r);
/* Actually set the format */
void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
u32 pad, struct v4l2_mbus_framefmt *ffmt);
int atomisp_update_run_mode(struct atomisp_sub_device *asd);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp_tpg.c
index 1def80bab180..e29a96da5f98 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_tpg.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_tpg.c
@@ -29,7 +29,7 @@ static int tpg_s_stream(struct v4l2_subdev *sd, int enable)
}
static int tpg_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
/*to fake*/
@@ -37,7 +37,7 @@ static int tpg_get_fmt(struct v4l2_subdev *sd,
}
static int tpg_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -47,7 +47,7 @@ static int tpg_set_fmt(struct v4l2_subdev *sd,
/* only raw8 grbg is supported by TPG */
fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
return 0;
@@ -65,7 +65,7 @@ static int tpg_s_power(struct v4l2_subdev *sd, int on)
}
static int tpg_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/*to fake*/
@@ -73,7 +73,7 @@ static int tpg_enum_mbus_code(struct v4l2_subdev *sd,
}
static int tpg_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
/*to fake*/
@@ -81,7 +81,7 @@ static int tpg_enum_frame_size(struct v4l2_subdev *sd,
}
static int tpg_enum_frame_ival(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
/*to fake*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 0295e2e32d79..948769ca6539 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -1178,7 +1178,7 @@ static void atomisp_unregister_entities(struct atomisp_device *isp)
atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
list_for_each_entry_safe(sd, next, &isp->v4l2_dev.subdevs, list)
- v4l2_device_unregister_subdev(sd);
+ v4l2_device_unregister_subdev(sd);
v4l2_device_unregister(&isp->v4l2_dev);
media_device_unregister(&isp->media_dev);
@@ -1500,9 +1500,9 @@ static int init_atomisp_wdts(struct atomisp_device *isp)
for (i = 0; i < isp->num_of_streams; i++) {
struct atomisp_sub_device *asd = &isp->asd[i];
- if (!IS_ISP2401)
+ if (!IS_ISP2401) {
timer_setup(&asd->wdt, atomisp_wdt, 0);
- else {
+ } else {
timer_setup(&asd->video_out_capture.wdt,
atomisp_wdt, 0);
timer_setup(&asd->video_out_preview.wdt,
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 27dd8ce8ba0a..d26b1301eeb7 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -239,8 +239,8 @@ ia_css_reset_defaults(struct sh_css *css);
static void
sh_css_init_host_sp_control_vars(void);
-static int set_num_primary_stages(unsigned int *num,
- enum ia_css_pipe_version version);
+static int
+set_num_primary_stages(unsigned int *num, enum ia_css_pipe_version version);
static bool
need_capture_pp(const struct ia_css_pipe *pipe);
@@ -308,8 +308,7 @@ sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
const void *acc_fw);
static int
-alloc_continuous_frames(
- struct ia_css_pipe *pipe, bool init_time);
+alloc_continuous_frames(struct ia_css_pipe *pipe, bool init_time);
static void
pipe_global_init(void);
@@ -413,7 +412,6 @@ aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
static void
sh_css_pipe_free_shading_table(struct ia_css_pipe *pipe)
{
- assert(pipe);
if (!pipe) {
IA_CSS_ERROR("NULL input parameter");
return;
@@ -453,15 +451,15 @@ static enum ia_css_frame_format yuv422_copy_formats[] = {
* by the copy binary given the stream format.
* */
static int
-verify_copy_out_frame_format(struct ia_css_pipe *pipe) {
+verify_copy_out_frame_format(struct ia_css_pipe *pipe)
+{
enum ia_css_frame_format out_fmt = pipe->output_info[0].format;
unsigned int i, found = 0;
assert(pipe);
assert(pipe->stream);
- switch (pipe->stream->config.input_config.format)
- {
+ switch (pipe->stream->config.input_config.format) {
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
case ATOMISP_INPUT_FORMAT_YUV420_8:
for (i = 0; i < ARRAY_SIZE(yuv420_copy_formats) && !found; i++)
@@ -528,7 +526,8 @@ ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
#if !defined(ISP2401)
static int
-sh_css_config_input_network(struct ia_css_stream *stream) {
+sh_css_config_input_network(struct ia_css_stream *stream)
+{
unsigned int fmt_type;
struct ia_css_pipe *pipe = stream->last_pipe;
struct ia_css_binary *binary = NULL;
@@ -554,8 +553,7 @@ sh_css_config_input_network(struct ia_css_stream *stream) {
stream->config.mode);
if ((binary && (binary->online || stream->config.continuous)) ||
- pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
- {
+ pipe->config.mode == IA_CSS_PIPE_MODE_COPY) {
err = ia_css_ifmtr_configure(&stream->config,
binary);
if (err)
@@ -563,8 +561,7 @@ sh_css_config_input_network(struct ia_css_stream *stream) {
}
if (stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
- stream->config.mode == IA_CSS_INPUT_MODE_PRBS)
- {
+ stream->config.mode == IA_CSS_INPUT_MODE_PRBS) {
unsigned int hblank_cycles = 100,
vblank_lines = 6,
width,
@@ -723,35 +720,32 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_id(
switch (stream_cfg->mode) {
case IA_CSS_INPUT_MODE_TPG:
- if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID0) {
+ if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID0)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
- } else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID1) {
+ else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID1)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
- } else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID2) {
+ else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID2)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
- }
break;
case IA_CSS_INPUT_MODE_PRBS:
- if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID0) {
+ if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID0)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
- } else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID1) {
+ else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID1)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
- } else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID2) {
+ else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID2)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
- }
break;
case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
- if (stream_cfg->source.port.port == MIPI_PORT0_ID) {
+ if (stream_cfg->source.port.port == MIPI_PORT0_ID)
isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT0_ID;
- } else if (stream_cfg->source.port.port == MIPI_PORT1_ID) {
+ else if (stream_cfg->source.port.port == MIPI_PORT1_ID)
isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT1_ID;
- } else if (stream_cfg->source.port.port == MIPI_PORT2_ID) {
+ else if (stream_cfg->source.port.port == MIPI_PORT2_ID)
isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT2_ID;
- }
break;
default:
@@ -804,15 +798,14 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
rc = true;
switch (stream_cfg->mode) {
case IA_CSS_INPUT_MODE_TPG:
- if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_RAMP) {
+ if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_RAMP)
isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_RAMP;
- } else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_CHECKERBOARD) {
+ else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_CHECKERBOARD)
isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_CHBO;
- } else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_MONO) {
+ else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_MONO)
isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_MONO;
- } else {
+ else
rc = false;
- }
/*
* TODO
@@ -951,12 +944,12 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_resolution(
stream_cfg->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) &&
stream_cfg->source.port.compression.type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
- UNCOMPRESSED_BITS_PER_PIXEL_10) {
+ UNCOMPRESSED_BITS_PER_PIXEL_10)
fmt_type = ATOMISP_INPUT_FORMAT_RAW_10;
- } else if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
- UNCOMPRESSED_BITS_PER_PIXEL_12) {
+ else if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_12)
fmt_type = ATOMISP_INPUT_FORMAT_RAW_12;
- } else
+ else
return false;
}
@@ -1045,7 +1038,8 @@ static bool sh_css_translate_binary_info_to_input_system_output_port_attr(
}
static int
-sh_css_config_input_network(struct ia_css_stream *stream) {
+sh_css_config_input_network(struct ia_css_stream *stream)
+{
bool rc;
ia_css_isys_descr_t isys_stream_descr;
unsigned int sp_thread_id;
@@ -1060,19 +1054,16 @@ sh_css_config_input_network(struct ia_css_stream *stream) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_config_input_network() enter 0x%p:\n", stream);
- if (stream->config.continuous)
- {
- if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
+ if (stream->config.continuous) {
+ if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE)
pipe = stream->last_pipe;
- } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_YUVPP) {
+ else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_YUVPP)
pipe = stream->last_pipe;
- } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) {
+ else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
pipe = stream->last_pipe->pipe_settings.preview.copy_pipe;
- } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) {
+ else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO)
pipe = stream->last_pipe->pipe_settings.video.copy_pipe;
- }
- } else
- {
+ } else {
pipe = stream->last_pipe;
if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
/*
@@ -1087,7 +1078,6 @@ sh_css_config_input_network(struct ia_css_stream *stream) {
}
}
- assert(pipe);
if (!pipe)
return -EINVAL;
@@ -1095,8 +1085,7 @@ sh_css_config_input_network(struct ia_css_stream *stream) {
if (pipe->pipeline.stages->binary)
binary = pipe->pipeline.stages->binary;
- if (binary)
- {
+ if (binary) {
/* this was being done in ifmtr in 2400.
* online and cont bypass the init_in_frameinfo_memory_defaults
* so need to do it here
@@ -1111,8 +1100,7 @@ sh_css_config_input_network(struct ia_css_stream *stream) {
/* get the target input terminal */
sp_pipeline_input_terminal = &sh_css_sp_group.pipe_io[sp_thread_id].input;
- for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++)
- {
+ for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
/* initialization */
memset((void *)(&isys_stream_descr), 0, sizeof(ia_css_isys_descr_t));
sp_pipeline_input_terminal->context.virtual_input_system_stream[i].valid = 0;
@@ -1210,11 +1198,10 @@ static inline struct ia_css_pipe *stream_get_target_pipe(
struct ia_css_pipe *target_pipe;
/* get the pipe that consumes the stream */
- if (stream->config.continuous) {
+ if (stream->config.continuous)
target_pipe = stream_get_copy_pipe(stream);
- } else {
+ else
target_pipe = stream_get_last_pipe(stream);
- }
return target_pipe;
}
@@ -1388,10 +1375,9 @@ start_binary(struct ia_css_pipe *pipe,
/* start the copy function on the SP */
static int
start_copy_on_sp(struct ia_css_pipe *pipe,
- struct ia_css_frame *out_frame) {
+ struct ia_css_frame *out_frame)
+{
(void)out_frame;
- assert(pipe);
- assert(pipe->stream);
if ((!pipe) || (!pipe->stream))
return -EINVAL;
@@ -1406,8 +1392,7 @@ start_copy_on_sp(struct ia_css_pipe *pipe,
sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2);
#if !defined(ISP2401)
- if (pipe->stream->reconfigure_css_rx)
- {
+ if (pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
pipe->stream->config.mode);
pipe->stream->reconfigure_css_rx = false;
@@ -1596,7 +1581,8 @@ ia_css_reset_defaults(struct sh_css *css)
int
ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
- const struct ia_css_fw *fw) {
+ const struct ia_css_fw *fw)
+{
int err;
if (!env)
@@ -1607,16 +1593,14 @@ ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() enter\n");
/* make sure we initialize my_css */
- if (my_css.flush != env->cpu_mem_env.flush)
- {
+ if (my_css.flush != env->cpu_mem_env.flush) {
ia_css_reset_defaults(&my_css);
my_css.flush = env->cpu_mem_env.flush;
}
ia_css_unload_firmware(); /* in case we are called twice */
err = sh_css_load_firmware(dev, fw->data, fw->bytes);
- if (!err)
- {
+ if (!err) {
err = ia_css_binary_init_infos();
if (!err)
fw_explicitly_loaded = true;
@@ -1630,7 +1614,8 @@ int
ia_css_init(struct device *dev, const struct ia_css_env *env,
const struct ia_css_fw *fw,
u32 mmu_l1_base,
- enum ia_css_irq_type irq_type) {
+ enum ia_css_irq_type irq_type)
+{
int err;
ia_css_spctrl_cfg spctrl_cfg;
@@ -1704,16 +1689,14 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
my_css.flush = flush_func;
err = ia_css_rmgr_init();
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
IA_CSS_LOG("init: %d", my_css_save_initialized);
- if (!my_css_save_initialized)
- {
+ if (!my_css_save_initialized) {
my_css_save_initialized = true;
my_css_save.mode = sh_css_mode_working;
memset(my_css_save.stream_seeds, 0,
@@ -1741,19 +1724,16 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_0, 0);
err = ia_css_refcount_init(REFCOUNT_SIZE);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
err = sh_css_params_init();
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
- if (fw)
- {
+ if (fw) {
ia_css_unload_firmware(); /* in case we already had firmware loaded */
err = sh_css_load_firmware(dev, fw->data, fw->bytes);
if (err) {
@@ -1774,23 +1754,20 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
return -EINVAL;
err = ia_css_spctrl_load_fw(SP0_ID, &spctrl_cfg);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
#if WITH_PC_MONITORING
- if (!thread_alive)
- {
+ if (!thread_alive) {
thread_alive++;
sh_css_print("PC_MONITORING: %s() -- create thread DISABLED\n",
__func__);
spying_thread_create();
}
#endif
- if (!sh_css_hrt_system_is_idle())
- {
+ if (!sh_css_hrt_system_is_idle()) {
IA_CSS_LEAVE_ERR(-EBUSY);
return -EBUSY;
}
@@ -1823,7 +1800,8 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
}
int
-ia_css_enable_isys_event_queue(bool enable) {
+ia_css_enable_isys_event_queue(bool enable)
+{
if (sh_css_sp_is_running())
return -EBUSY;
sh_css_sp_enable_isys_event_queue(enable);
@@ -1844,7 +1822,8 @@ sh_css_flush(struct ia_css_acc_fw *fw)
* doing it from stream_create since we could run out of sp threads due to
* allocation on inactive pipelines. */
static int
-map_sp_threads(struct ia_css_stream *stream, bool map) {
+map_sp_threads(struct ia_css_stream *stream, bool map)
+{
struct ia_css_pipe *main_pipe = NULL;
struct ia_css_pipe *copy_pipe = NULL;
struct ia_css_pipe *capture_pipe = NULL;
@@ -1852,12 +1831,10 @@ map_sp_threads(struct ia_css_stream *stream, bool map) {
int err = 0;
enum ia_css_pipe_id pipe_id;
- assert(stream);
IA_CSS_ENTER_PRIVATE("stream = %p, map = %s",
stream, map ? "true" : "false");
- if (!stream)
- {
+ if (!stream) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -1867,8 +1844,7 @@ map_sp_threads(struct ia_css_stream *stream, bool map) {
ia_css_pipeline_map(main_pipe->pipe_num, map);
- switch (pipe_id)
- {
+ switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
@@ -1887,23 +1863,17 @@ map_sp_threads(struct ia_css_stream *stream, bool map) {
}
if (acc_pipe)
- {
ia_css_pipeline_map(acc_pipe->pipe_num, map);
- }
if (capture_pipe)
- {
ia_css_pipeline_map(capture_pipe->pipe_num, map);
- }
/* Firmware expects copy pipe to be the last pipe mapped. (if needed) */
if (copy_pipe)
- {
ia_css_pipeline_map(copy_pipe->pipe_num, map);
- }
+
/* DH regular multi pipe - not continuous mode: map the next pipes too */
- if (!stream->config.continuous)
- {
+ if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes; i++)
@@ -1917,7 +1887,8 @@ map_sp_threads(struct ia_css_stream *stream, bool map) {
/* creates a host pipeline skeleton for all pipes in a stream. Called during
* stream_create. */
static int
-create_host_pipeline_structure(struct ia_css_stream *stream) {
+create_host_pipeline_structure(struct ia_css_stream *stream)
+{
struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
struct ia_css_pipe *acc_pipe = NULL;
enum ia_css_pipe_id pipe_id;
@@ -1926,27 +1897,22 @@ create_host_pipeline_structure(struct ia_css_stream *stream) {
unsigned int copy_pipe_delay = 0,
capture_pipe_delay = 0;
- assert(stream);
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
- if (!stream)
- {
+ if (!stream) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
main_pipe = stream->last_pipe;
- assert(main_pipe);
- if (!main_pipe)
- {
+ if (!main_pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
pipe_id = main_pipe->mode;
- switch (pipe_id)
- {
+ switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
copy_pipe_delay = main_pipe->dvs_frame_delay;
@@ -1986,30 +1952,23 @@ create_host_pipeline_structure(struct ia_css_stream *stream) {
}
if (!(err) && copy_pipe)
- {
err = ia_css_pipeline_create(&copy_pipe->pipeline,
copy_pipe->mode,
copy_pipe->pipe_num,
copy_pipe_delay);
- }
if (!(err) && capture_pipe)
- {
err = ia_css_pipeline_create(&capture_pipe->pipeline,
capture_pipe->mode,
capture_pipe->pipe_num,
capture_pipe_delay);
- }
if (!(err) && acc_pipe)
- {
err = ia_css_pipeline_create(&acc_pipe->pipeline, acc_pipe->mode,
acc_pipe->pipe_num, main_pipe->dvs_frame_delay);
- }
/* DH regular multi pipe - not continuous mode: create the next pipelines too */
- if (!stream->config.continuous)
- {
+ if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes && 0 == err; i++) {
@@ -2028,7 +1987,8 @@ create_host_pipeline_structure(struct ia_css_stream *stream) {
/* creates a host pipeline for all pipes in a stream. Called during
* stream_start. */
static int
-create_host_pipeline(struct ia_css_stream *stream) {
+create_host_pipeline(struct ia_css_stream *stream)
+{
struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
struct ia_css_pipe *acc_pipe = NULL;
enum ia_css_pipe_id pipe_id;
@@ -2037,8 +1997,7 @@ create_host_pipeline(struct ia_css_stream *stream) {
unsigned int max_input_width = 0;
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
- if (!stream)
- {
+ if (!stream) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -2049,8 +2008,7 @@ create_host_pipeline(struct ia_css_stream *stream) {
/* No continuous frame allocation for capture pipe. It uses the
* "main" pipe's frames. */
if ((pipe_id == IA_CSS_PIPE_ID_PREVIEW) ||
- (pipe_id == IA_CSS_PIPE_ID_VIDEO))
- {
+ (pipe_id == IA_CSS_PIPE_ID_VIDEO)) {
/* About pipe_id == IA_CSS_PIPE_ID_PREVIEW && stream->config.mode != IA_CSS_INPUT_MODE_MEMORY:
* The original condition pipe_id == IA_CSS_PIPE_ID_PREVIEW is too strong. E.g. in SkyCam (with memory
* based input frames) there is no continuous mode and thus no need for allocated continuous frames
@@ -2068,24 +2026,21 @@ create_host_pipeline(struct ia_css_stream *stream) {
#if !defined(ISP2401)
/* old isys: need to allocate_mipi_frames() even in IA_CSS_PIPE_MODE_COPY */
- if (pipe_id != IA_CSS_PIPE_ID_ACC)
- {
+ if (pipe_id != IA_CSS_PIPE_ID_ACC) {
err = allocate_mipi_frames(main_pipe, &stream->info);
if (err)
goto ERR;
}
#elif defined(ISP2401)
if ((pipe_id != IA_CSS_PIPE_ID_ACC) &&
- (main_pipe->config.mode != IA_CSS_PIPE_MODE_COPY))
- {
+ (main_pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
err = allocate_mipi_frames(main_pipe, &stream->info);
if (err)
goto ERR;
}
#endif
- switch (pipe_id)
- {
+ switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
@@ -2135,31 +2090,27 @@ create_host_pipeline(struct ia_css_stream *stream) {
if (err)
goto ERR;
- if (copy_pipe)
- {
+ if (copy_pipe) {
err = create_host_copy_pipeline(copy_pipe, max_input_width,
main_pipe->continuous_frames[0]);
if (err)
goto ERR;
}
- if (capture_pipe)
- {
+ if (capture_pipe) {
err = create_host_capture_pipeline(capture_pipe);
if (err)
goto ERR;
}
- if (acc_pipe)
- {
+ if (acc_pipe) {
err = create_host_acc_pipeline(acc_pipe);
if (err)
goto ERR;
}
/* DH regular multi pipe - not continuous mode: create the next pipelines too */
- if (!stream->config.continuous)
- {
+ if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes && 0 == err; i++) {
@@ -2201,10 +2152,9 @@ static const struct ia_css_yuvpp_settings yuvpp = IA_CSS_DEFAULT_YUVPP_SETTINGS;
static int
init_pipe_defaults(enum ia_css_pipe_mode mode,
struct ia_css_pipe *pipe,
- bool copy_pipe) {
-
- if (!pipe)
- {
+ bool copy_pipe)
+{
+ if (!pipe) {
IA_CSS_ERROR("NULL pipe parameter");
return -EINVAL;
}
@@ -2213,18 +2163,17 @@ init_pipe_defaults(enum ia_css_pipe_mode mode,
memcpy(pipe, &default_pipe, sizeof(default_pipe));
/* TODO: JB should not be needed, but temporary backward reference */
- switch (mode)
- {
+ switch (mode) {
case IA_CSS_PIPE_MODE_PREVIEW:
pipe->mode = IA_CSS_PIPE_ID_PREVIEW;
memcpy(&pipe->pipe_settings.preview, &preview, sizeof(preview));
break;
case IA_CSS_PIPE_MODE_CAPTURE:
- if (copy_pipe) {
+ if (copy_pipe)
pipe->mode = IA_CSS_PIPE_ID_COPY;
- } else {
+ else
pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
- }
+
memcpy(&pipe->pipe_settings.capture, &capture, sizeof(capture));
break;
case IA_CSS_PIPE_MODE_VIDEO:
@@ -2254,27 +2203,25 @@ pipe_global_init(void)
u8 i;
my_css.pipe_counter = 0;
- for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
my_css.all_pipes[i] = NULL;
- }
}
static int
pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
- unsigned int *pipe_number) {
+ unsigned int *pipe_number)
+{
const u8 INVALID_PIPE_NUM = (uint8_t)~(0);
u8 pipe_num = INVALID_PIPE_NUM;
u8 i;
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_ERROR("NULL pipe parameter");
return -EINVAL;
}
/* Assign a new pipe_num .... search for empty place */
- for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
- {
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
if (!my_css.all_pipes[i]) {
/*position is reserved */
my_css.all_pipes[i] = (struct ia_css_pipe *)pipe;
@@ -2282,8 +2229,7 @@ pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
break;
}
}
- if (pipe_num == INVALID_PIPE_NUM)
- {
+ if (pipe_num == INVALID_PIPE_NUM) {
/* Max number of pipes already allocated */
IA_CSS_ERROR("Max number of pipes already created");
return -ENOSPC;
@@ -2309,12 +2255,12 @@ pipe_release_pipe_num(unsigned int pipe_num)
static int
create_pipe(enum ia_css_pipe_mode mode,
struct ia_css_pipe **pipe,
- bool copy_pipe) {
+ bool copy_pipe)
+{
int err = 0;
struct ia_css_pipe *me;
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_ERROR("NULL pipe parameter");
return -EINVAL;
}
@@ -2324,15 +2270,13 @@ create_pipe(enum ia_css_pipe_mode mode,
return -ENOMEM;
err = init_pipe_defaults(mode, me, copy_pipe);
- if (err)
- {
+ if (err) {
kfree(me);
return err;
}
err = pipe_generate_pipe_num(me, &me->pipe_num);
- if (err)
- {
+ if (err) {
kfree(me);
return err;
}
@@ -2361,7 +2305,6 @@ static void sh_css_pipe_free_acc_binaries(
struct ia_css_pipeline *pipeline;
struct ia_css_pipeline_stage *stage;
- assert(pipe);
if (!pipe) {
IA_CSS_ERROR("NULL input pointer");
return;
@@ -2378,26 +2321,24 @@ static void sh_css_pipe_free_acc_binaries(
}
int
-ia_css_pipe_destroy(struct ia_css_pipe *pipe) {
+ia_css_pipe_destroy(struct ia_css_pipe *pipe)
+{
int err = 0;
IA_CSS_ENTER("pipe = %p", pipe);
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
- if (pipe->stream)
- {
+ if (pipe->stream) {
IA_CSS_LOG("ia_css_stream_destroy not called!");
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
- switch (pipe->config.mode)
- {
+ switch (pipe->config.mode) {
case IA_CSS_PIPE_MODE_PREVIEW:
/* need to take into account that this function is also called
on the internal copy pipe */
@@ -2461,9 +2402,8 @@ ia_css_pipe_destroy(struct ia_css_pipe *pipe) {
/* Temporarily, not every sh_css_pipe has an acc_extension. */
if (pipe->config.acc_extension)
- {
ia_css_pipe_unload_extension(pipe, pipe->config.acc_extension);
- }
+
kfree(pipe);
IA_CSS_LEAVE("err = %d", err);
return err;
@@ -2493,9 +2433,9 @@ ia_css_uninit(void)
ifmtr_set_if_blocking_mode_reset = true;
#endif
- if (!fw_explicitly_loaded) {
+ if (!fw_explicitly_loaded)
ia_css_unload_firmware();
- }
+
ia_css_spctrl_unload_fw(SP0_ID);
sh_css_sp_set_sp_running(false);
/* check and free any remaining mipi frames */
@@ -2681,8 +2621,8 @@ static int load_copy_binary(
}
static int
-alloc_continuous_frames(
- struct ia_css_pipe *pipe, bool init_time) {
+alloc_continuous_frames(struct ia_css_pipe *pipe, bool init_time)
+{
int err = 0;
struct ia_css_frame_info ref_info;
enum ia_css_pipe_id pipe_id;
@@ -2692,8 +2632,7 @@ alloc_continuous_frames(
IA_CSS_ENTER_PRIVATE("pipe = %p, init_time = %d", pipe, init_time);
- if ((!pipe) || (!pipe->stream))
- {
+ if ((!pipe) || (!pipe->stream)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -2701,26 +2640,22 @@ alloc_continuous_frames(
pipe_id = pipe->mode;
continuous = pipe->stream->config.continuous;
- if (continuous)
- {
+ if (continuous) {
if (init_time) {
num_frames = pipe->stream->config.init_num_cont_raw_buf;
pipe->stream->continuous_pipe = pipe;
- } else
+ } else {
num_frames = pipe->stream->config.target_num_cont_raw_buf;
- } else
- {
+ }
+ } else {
num_frames = NUM_ONLINE_INIT_CONTINUOUS_FRAMES;
}
- if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
- {
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
ref_info = pipe->pipe_settings.preview.preview_binary.in_frame_info;
- } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
- {
+ } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) {
ref_info = pipe->pipe_settings.video.video_binary.in_frame_info;
- } else
- {
+ } else {
/* should not happen */
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
@@ -2736,8 +2671,7 @@ alloc_continuous_frames(
#endif
#if !defined(HAS_NO_PACKED_RAW_PIXELS)
- if (pipe->stream->config.pack_raw_pixels)
- {
+ if (pipe->stream->config.pack_raw_pixels) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW_PACKED\n");
ref_info.format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
@@ -2766,8 +2700,7 @@ alloc_continuous_frames(
else
idx = pipe->stream->config.init_num_cont_raw_buf;
- for (i = idx; i < NUM_CONTINUOUS_FRAMES; i++)
- {
+ for (i = idx; i < NUM_CONTINUOUS_FRAMES; i++) {
/* free previous frame */
if (pipe->continuous_frames[i]) {
ia_css_frame_free(pipe->continuous_frames[i]);
@@ -2797,14 +2730,16 @@ alloc_continuous_frames(
}
int
-ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream) {
+ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream)
+{
if (!stream)
return -EINVAL;
return alloc_continuous_frames(stream->continuous_pipe, false);
}
static int
-load_preview_binaries(struct ia_css_pipe *pipe) {
+load_preview_binaries(struct ia_css_pipe *pipe)
+{
struct ia_css_frame_info prev_in_info,
prev_bds_out_info,
prev_out_info,
@@ -2912,8 +2847,7 @@ load_preview_binaries(struct ia_css_pipe *pipe) {
* then the preview binary selection is done again.
*/
if (need_vf_pp &&
- (mycs->preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE))
- {
+ (mycs->preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE)) {
/* Preview step 2 */
if (pipe->vf_yuv_ds_input_info.res.width)
prev_vf_info = pipe->vf_yuv_ds_input_info;
@@ -2938,8 +2872,7 @@ load_preview_binaries(struct ia_css_pipe *pipe) {
return err;
}
- if (need_vf_pp)
- {
+ if (need_vf_pp) {
struct ia_css_binary_descr vf_pp_descr;
/* Viewfinder post-processing */
@@ -2970,8 +2903,7 @@ load_preview_binaries(struct ia_css_pipe *pipe) {
#endif
/* Copy */
- if (need_isp_copy_binary)
- {
+ if (need_isp_copy_binary) {
err = load_copy_binary(pipe,
&mycs->copy_binary,
&mycs->preview_binary);
@@ -2979,8 +2911,7 @@ load_preview_binaries(struct ia_css_pipe *pipe) {
return err;
}
- if (pipe->shading_table)
- {
+ if (pipe->shading_table) {
ia_css_shading_table_free(pipe->shading_table);
pipe->shading_table = NULL;
}
@@ -2995,11 +2926,11 @@ ia_css_binary_unload(struct ia_css_binary *binary)
}
static int
-unload_preview_binaries(struct ia_css_pipe *pipe) {
+unload_preview_binaries(struct ia_css_pipe *pipe)
+{
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW))
- {
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -3052,20 +2983,19 @@ static int add_firmwares(
struct ia_css_frame *in = NULL;
struct ia_css_frame *vf = NULL;
- if ((fw == last_fw) && (fw->info.isp.sp.enable.out_frame != 0)) {
+ if ((fw == last_fw) && (fw->info.isp.sp.enable.out_frame != 0))
out[0] = out_frame;
- }
- if (fw->info.isp.sp.enable.in_frame != 0) {
+
+ if (fw->info.isp.sp.enable.in_frame != 0)
in = in_frame;
- }
- if (fw->info.isp.sp.enable.out_frame != 0) {
+
+ if (fw->info.isp.sp.enable.out_frame != 0)
vf = vf_frame;
- }
+
ia_css_pipe_get_firmwares_stage_desc(&stage_desc, binary,
out, in, vf, fw, binary_mode);
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &extra_stage);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &extra_stage);
if (err)
return err;
if (fw->info.isp.sp.enable.output != 0)
@@ -3173,9 +3103,8 @@ static int add_yuv_scaler_stage(
ia_css_pipe_get_generic_stage_desc(&stage_desc,
yuv_scaler_binary, out_frames, in_frame, vf_frame);
}
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- pre_vf_pp_stage);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ pre_vf_pp_stage);
if (err)
return err;
in_frame = (*pre_vf_pp_stage)->args.out_frame[0];
@@ -3233,9 +3162,8 @@ static int add_capture_pp_stage(
ia_css_pipe_get_generic_stage_desc(&stage_desc,
capture_pp_binary, out_frames, NULL, vf_frame);
}
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- capture_pp_stage);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ capture_pp_stage);
if (err)
return err;
err = add_firmwares(me, capture_pp_binary, pipe->output_stage, last_fw,
@@ -3274,7 +3202,8 @@ static void sh_css_setup_queues(void)
static int
init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
- struct ia_css_frame *vf_frame, unsigned int idx) {
+ struct ia_css_frame *vf_frame, unsigned int idx)
+{
int err = 0;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
@@ -3439,7 +3368,8 @@ ia_css_get_crop_offsets(
static int
init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
- struct ia_css_frame *frame, enum ia_css_frame_format format) {
+ struct ia_css_frame *frame, enum ia_css_frame_format format)
+{
struct ia_css_frame *in_frame;
int err = 0;
unsigned int thread_id;
@@ -3480,7 +3410,8 @@ init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
static int
init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
- struct ia_css_frame *out_frame, unsigned int idx) {
+ struct ia_css_frame *out_frame, unsigned int idx)
+{
int err = 0;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
@@ -3587,9 +3518,8 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
out_frames, NULL, NULL);
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &copy_stage);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &copy_stage);
if (err)
goto ERR;
in_frame = me->stages->args.out_frame[0];
@@ -3616,9 +3546,8 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
out_frames, in_frame, vf_frame);
}
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &video_stage);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &video_stage);
if (err)
goto ERR;
@@ -3681,13 +3610,10 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
struct ia_css_frame *tmp_out_frame = NULL;
for (i = 0; i < num_yuv_scaler; i++) {
- if (is_output_stage[i]) {
- tmp_out_frame = out_frame;
- } else {
- tmp_out_frame = NULL;
- }
- err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
- NULL,
+ tmp_out_frame = is_output_stage[i] ? out_frame : NULL;
+
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
+ tmp_out_frame, NULL,
&yuv_scaler_binary[i],
&yuv_scaler_stage);
@@ -3711,14 +3637,14 @@ ERR:
}
static int
-create_host_acc_pipeline(struct ia_css_pipe *pipe) {
+create_host_acc_pipeline(struct ia_css_pipe *pipe)
+{
int err = 0;
const struct ia_css_fw_info *fw;
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if ((!pipe) || (!pipe->stream))
- {
+ if ((!pipe) || (!pipe->stream)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -3729,15 +3655,13 @@ create_host_acc_pipeline(struct ia_css_pipe *pipe) {
pipe->pipeline.pipe_qos_config = 0;
fw = pipe->vf_stage;
- for (i = 0; fw; fw = fw->next)
- {
+ for (i = 0; fw; fw = fw->next) {
err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
if (err)
goto ERR;
}
- for (i = 0; i < pipe->config.num_acc_stages; i++)
- {
+ for (i = 0; i < pipe->config.num_acc_stages; i++) {
struct ia_css_fw_info *fw = pipe->config.acc_stages[i];
err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
@@ -3754,7 +3678,8 @@ ERR:
/* Create stages for preview */
static int
-create_host_preview_pipeline(struct ia_css_pipe *pipe) {
+create_host_preview_pipeline(struct ia_css_pipe *pipe)
+{
struct ia_css_pipeline_stage *copy_stage = NULL;
struct ia_css_pipeline_stage *preview_stage = NULL;
struct ia_css_pipeline_stage *vf_pp_stage = NULL;
@@ -3774,8 +3699,7 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe) {
#endif
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW))
- {
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -3803,16 +3727,14 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe) {
/* Construct in_frame info (only in case we have dynamic input */
need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
#endif
- if (need_in_frameinfo_memory)
- {
+ if (need_in_frameinfo_memory) {
err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
IA_CSS_FRAME_FORMAT_RAW);
if (err)
goto ERR;
in_frame = &me->in_frame;
- } else
- {
+ } else {
in_frame = NULL;
}
@@ -3826,14 +3748,12 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe) {
if (pipe->pipe_settings.preview.vf_pp_binary.info)
vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary;
- if (pipe->pipe_settings.preview.copy_binary.info)
- {
+ if (pipe->pipe_settings.preview.copy_binary.info) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
out_frames, NULL, NULL);
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &copy_stage);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &copy_stage);
if (err)
goto ERR;
in_frame = me->stages->args.out_frame[0];
@@ -3842,42 +3762,37 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe) {
/* When continuous is enabled, configure in_frame with the
* last pipe, which is the copy pipe.
*/
- if (continuous || !online) {
+ if (continuous || !online)
in_frame = pipe->stream->last_pipe->continuous_frames[0];
- }
+
#else
in_frame = pipe->continuous_frames[0];
#endif
}
- if (vf_pp_binary)
- {
+ if (vf_pp_binary) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
out_frames, in_frame, NULL);
- } else
- {
+ } else {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
out_frames, in_frame, NULL);
}
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &preview_stage);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &preview_stage);
if (err)
goto ERR;
/* If we use copy iso preview, the input must be yuv iso raw */
preview_stage->args.copy_vf =
preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
preview_stage->args.copy_output = !preview_stage->args.copy_vf;
- if (preview_stage->args.copy_vf && !preview_stage->args.out_vf_frame)
- {
+ if (preview_stage->args.copy_vf && !preview_stage->args.out_vf_frame) {
/* in case of copy, use the vf frame as output frame */
preview_stage->args.out_vf_frame =
preview_stage->args.out_frame[0];
}
- if (vf_pp_binary)
- {
+ if (vf_pp_binary) {
if (preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)
in_frame = preview_stage->args.out_vf_frame;
else
@@ -3917,7 +3832,8 @@ static void send_raw_frames(struct ia_css_pipe *pipe)
}
static int
-preview_start(struct ia_css_pipe *pipe) {
+preview_start(struct ia_css_pipe *pipe)
+{
int err = 0;
struct ia_css_pipe *copy_pipe, *capture_pipe;
struct ia_css_pipe *acc_pipe;
@@ -3927,8 +3843,7 @@ preview_start(struct ia_css_pipe *pipe) {
const struct ia_css_isp_parameters *params = NULL;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW))
- {
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -3955,8 +3870,7 @@ preview_start(struct ia_css_pipe *pipe) {
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
copy_ovrd = 1 << thread_id;
- if (pipe->stream->cont_capt)
- {
+ if (pipe->stream->cont_capt) {
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
&thread_id);
copy_ovrd |= 1 << thread_id;
@@ -3969,8 +3883,7 @@ preview_start(struct ia_css_pipe *pipe) {
}
/* Construct and load the copy pipe */
- if (pipe->stream->config.continuous)
- {
+ if (pipe->stream->config.continuous) {
sh_css_sp_init_pipeline(&copy_pipe->pipeline,
IA_CSS_PIPE_ID_COPY,
(uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
@@ -3991,8 +3904,7 @@ preview_start(struct ia_css_pipe *pipe) {
}
/* Construct and load the capture pipe */
- if (pipe->stream->cont_capt)
- {
+ if (pipe->stream->cont_capt) {
sh_css_sp_init_pipeline(&capture_pipe->pipeline,
IA_CSS_PIPE_ID_CAPTURE,
(uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
@@ -4010,8 +3922,7 @@ preview_start(struct ia_css_pipe *pipe) {
params);
}
- if (acc_pipe)
- {
+ if (acc_pipe) {
sh_css_sp_init_pipeline(&acc_pipe->pipeline,
IA_CSS_PIPE_ID_ACC,
(uint8_t)ia_css_pipe_get_pipe_num(acc_pipe),
@@ -4037,7 +3948,8 @@ preview_start(struct ia_css_pipe *pipe) {
int
ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
- const struct ia_css_buffer *buffer) {
+ const struct ia_css_buffer *buffer)
+{
int return_err = 0;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
@@ -4052,8 +3964,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
- if ((!pipe) || (!buffer))
- {
+ if ((!pipe) || (!buffer)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
@@ -4062,8 +3973,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
/* following code will be enabled when IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME
is removed */
#if 0
- if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
- {
+ if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
bool found_pipe = false;
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
@@ -4077,8 +3987,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
if (!found_pipe)
return -EINVAL;
}
- if (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
- {
+ if (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
bool found_pipe = false;
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
@@ -4099,36 +4008,31 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
assert(pipe_id < IA_CSS_PIPE_ID_NUM);
assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
- if ((buf_type == IA_CSS_BUFFER_TYPE_INVALID) ||
- (buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE) ||
- (pipe_id >= IA_CSS_PIPE_ID_NUM))
- {
+ if (buf_type == IA_CSS_BUFFER_TYPE_INVALID ||
+ buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE ||
+ pipe_id >= IA_CSS_PIPE_ID_NUM) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
- if (!ret_err)
- {
+ if (!ret_err) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
- if (!ret_err)
- {
+ if (!ret_err) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
- if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES))
- {
+ if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
- if (!sh_css_sp_is_running())
- {
+ if (!sh_css_sp_is_running()) {
IA_CSS_LOG("SP is not running!");
IA_CSS_LEAVE_ERR(-EBUSY);
/* SP is not running. The queues are not valid */
@@ -4146,36 +4050,32 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
ddr_buffer.cookie_ptr = buffer->driver_cookie;
ddr_buffer.timing_data = buffer->timing_data;
- if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS)
- {
+ if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS) {
if (!buffer->data.stats_3a) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_3a);
ddr_buffer.payload.s3a = *buffer->data.stats_3a;
- } else if (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS)
- {
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS) {
if (!buffer->data.stats_dvs) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_dvs);
ddr_buffer.payload.dis = *buffer->data.stats_dvs;
- } else if (buf_type == IA_CSS_BUFFER_TYPE_METADATA)
- {
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_METADATA) {
if (!buffer->data.metadata) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.metadata);
ddr_buffer.payload.metadata = *buffer->data.metadata;
- } else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME))
- {
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME) {
if (!buffer->data.frame) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
@@ -4207,22 +4107,17 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
/* TODO: change next to correct pool for optimization */
ia_css_rmgr_acq_vbuf(hmm_buffer_pool, &h_vbuf);
- assert(h_vbuf);
- assert(h_vbuf->vptr != 0x0);
-
- if ((!h_vbuf) || (h_vbuf->vptr == 0x0))
- {
+ if ((!h_vbuf) || (h_vbuf->vptr == 0x0)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
hmm_store(h_vbuf->vptr,
- (void *)(&ddr_buffer),
- sizeof(struct sh_css_hmm_buffer));
- if ((buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS)
- || (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS)
- || (buf_type == IA_CSS_BUFFER_TYPE_LACE_STATISTICS))
- {
+ (void *)(&ddr_buffer),
+ sizeof(struct sh_css_hmm_buffer));
+ if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS ||
+ buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS ||
+ buf_type == IA_CSS_BUFFER_TYPE_LACE_STATISTICS) {
if (!pipeline) {
ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
IA_CSS_LOG("pipeline is empty!");
@@ -4240,19 +4135,18 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
(uint32_t)h_vbuf->vptr);
}
}
- } else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_METADATA))
- {
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_METADATA) {
return_err = ia_css_bufq_enqueue_buffer(thread_id,
queue_id,
(uint32_t)h_vbuf->vptr);
#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
- if (!(return_err) &&
- (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)) {
+ if (!return_err &&
+ buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
IA_CSS_LOG("pfp: enqueued OF %d to q %d thread %d",
ddr_buffer.payload.frame.frame_data,
queue_id, thread_id);
@@ -4260,8 +4154,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
#endif
}
- if (!return_err)
- {
+ if (!return_err) {
if (sh_css_hmm_buffer_record_acquire(
h_vbuf, buf_type,
HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
@@ -4276,8 +4169,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
* Tell the SP which queues are not empty,
* by sending the software event.
*/
- if (!return_err)
- {
+ if (!return_err) {
if (!sh_css_sp_is_running()) {
/* SP is not running. The queues are not valid */
IA_CSS_LOG("SP is not running!");
@@ -4289,8 +4181,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
(uint8_t)thread_id,
queue_id,
0);
- } else
- {
+ } else {
ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
IA_CSS_ERROR("buffer not enqueued");
}
@@ -4305,7 +4196,8 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
*/
int
ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
- struct ia_css_buffer *buffer) {
+ struct ia_css_buffer *buffer)
+{
int return_err;
enum sh_css_queue_id queue_id;
ia_css_ptr ddr_buffer_addr = (ia_css_ptr)0;
@@ -4318,8 +4210,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
- if ((!pipe) || (!buffer))
- {
+ if ((!pipe) || (!buffer)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
@@ -4333,27 +4224,23 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
ddr_buffer.kernel_ptr = 0;
ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
- if (!ret_err)
- {
+ if (!ret_err) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
- if (!ret_err)
- {
+ if (!ret_err) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
- if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES))
- {
+ if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
- if (!sh_css_sp_is_running())
- {
+ if (!sh_css_sp_is_running()) {
IA_CSS_LOG("SP is not running!");
IA_CSS_LEAVE_ERR(-EBUSY);
/* SP is not running. The queues are not valid */
@@ -4363,8 +4250,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
return_err = ia_css_bufq_dequeue_buffer(queue_id,
(uint32_t *)&ddr_buffer_addr);
- if (!return_err)
- {
+ if (!return_err) {
struct ia_css_frame *frame;
struct sh_css_hmm_buffer_record *hmm_buffer_record = NULL;
@@ -4389,8 +4275,8 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
}
hmm_load(ddr_buffer_addr,
- &ddr_buffer,
- sizeof(struct sh_css_hmm_buffer));
+ &ddr_buffer,
+ sizeof(struct sh_css_hmm_buffer));
/* if the kernel_ptr is 0 or an invalid, return an error.
* do not access the buffer via the kernal_ptr.
@@ -4412,8 +4298,8 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
buffer->driver_cookie = ddr_buffer.cookie_ptr;
buffer->timing_data = ddr_buffer.timing_data;
- if ((buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) ||
- (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)) {
+ if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
buffer->isys_eof_clock_tick.ticks = ddr_buffer.isys_eof_clock_tick;
}
@@ -4506,8 +4392,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
* Tell the SP which queues are not full,
* by sending the software event.
*/
- if (!return_err)
- {
+ if (!return_err) {
if (!sh_css_sp_is_running()) {
IA_CSS_LOG("SP is not running!");
IA_CSS_LEAVE_ERR(-EBUSY);
@@ -4556,12 +4441,14 @@ static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
};
int
-ia_css_dequeue_event(struct ia_css_event *event) {
+ia_css_dequeue_event(struct ia_css_event *event)
+{
return ia_css_dequeue_psys_event(event);
}
int
-ia_css_dequeue_psys_event(struct ia_css_event *event) {
+ia_css_dequeue_psys_event(struct ia_css_event *event)
+{
enum ia_css_pipe_id pipe_id = 0;
u8 payload[4] = {0, 0, 0, 0};
int ret_err;
@@ -4576,11 +4463,9 @@ ia_css_dequeue_psys_event(struct ia_css_event *event) {
if (!event)
return -EINVAL;
+ /* SP is not running. The queues are not valid */
if (!sh_css_sp_is_running())
- {
- /* SP is not running. The queues are not valid */
return -EBUSY;
- }
/* dequeue the event (if any) from the psys event queue */
ret_err = ia_css_bufq_dequeue_psys_event(payload);
@@ -4607,8 +4492,7 @@ ia_css_dequeue_psys_event(struct ia_css_event *event) {
event->timer_code = 0;
event->timer_subcode = 0;
- if (event->type == IA_CSS_EVENT_TYPE_TIMER)
- {
+ if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
/* timer event ??? get the 2nd event and decode the data into the event struct */
u32 tmp_data;
/* 1st event: LSB 16-bit timer data and code */
@@ -4632,37 +4516,32 @@ ia_css_dequeue_psys_event(struct ia_css_event *event) {
tmp_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
event->timer_data |= (tmp_data << 16);
event->timer_subcode = payload[2];
- }
+ } else {
/* It's a non timer event. So clear first half of the timer event data.
* If the second part of the TIMER event is not received, we discard
* the first half of the timer data and process the non timer event without
* affecting the flow. So the non timer event falls through
* the code. */
- else {
event->timer_data = 0;
event->timer_code = 0;
event->timer_subcode = 0;
IA_CSS_ERROR("Missing 2nd timer event. Timer event discarded");
}
}
- if (event->type == IA_CSS_EVENT_TYPE_PORT_EOF)
- {
+ if (event->type == IA_CSS_EVENT_TYPE_PORT_EOF) {
event->port = (enum mipi_port_id)payload[1];
event->exp_id = payload[3];
- } else if (event->type == IA_CSS_EVENT_TYPE_FW_WARNING)
- {
+ } else if (event->type == IA_CSS_EVENT_TYPE_FW_WARNING) {
event->fw_warning = (enum ia_css_fw_warning)payload[1];
/* exp_id is only available in these warning types */
if (event->fw_warning == IA_CSS_FW_WARNING_EXP_ID_LOCKED ||
event->fw_warning == IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED)
event->exp_id = payload[3];
- } else if (event->type == IA_CSS_EVENT_TYPE_FW_ASSERT)
- {
+ } else if (event->type == IA_CSS_EVENT_TYPE_FW_ASSERT) {
event->fw_assert_module_id = payload[1]; /* module */
event->fw_assert_line_no = (payload[2] << 8) + payload[3];
/* payload[2] is line_no>>8, payload[3] is line_no&0xff */
- } else if (event->type != IA_CSS_EVENT_TYPE_TIMER)
- {
+ } else if (event->type != IA_CSS_EVENT_TYPE_TIMER) {
/* pipe related events.
* payload[1] contains the pipe_num,
* payload[2] contains the pipe_id. These are different. */
@@ -4712,7 +4591,8 @@ ia_css_dequeue_psys_event(struct ia_css_event *event) {
}
int
-ia_css_dequeue_isys_event(struct ia_css_event *event) {
+ia_css_dequeue_isys_event(struct ia_css_event *event)
+{
u8 payload[4] = {0, 0, 0, 0};
int err = 0;
@@ -4722,11 +4602,9 @@ ia_css_dequeue_isys_event(struct ia_css_event *event) {
if (!event)
return -EINVAL;
+ /* SP is not running. The queues are not valid */
if (!sh_css_sp_is_running())
- {
- /* SP is not running. The queues are not valid */
return -EBUSY;
- }
err = ia_css_bufq_dequeue_isys_event(payload);
if (err)
@@ -4759,7 +4637,8 @@ acc_start(struct ia_css_pipe *pipe)
}
static int
-sh_css_pipe_start(struct ia_css_stream *stream) {
+sh_css_pipe_start(struct ia_css_stream *stream)
+{
int err = 0;
struct ia_css_pipe *pipe;
@@ -4768,22 +4647,19 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
- if (!stream)
- {
+ if (!stream) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
pipe = stream->last_pipe;
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
pipe_id = pipe->mode;
- if (stream->started)
- {
+ if (stream->started) {
IA_CSS_WARNING("Cannot start stream that is already started");
IA_CSS_LEAVE_ERR(err);
return err;
@@ -4791,8 +4667,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
pipe->stop_requested = false;
- switch (pipe_id)
- {
+ switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
err = preview_start(pipe);
break;
@@ -4812,8 +4687,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
err = -EINVAL;
}
/* DH regular multi pipe - not continuous mode: start the next pipes too */
- if (!stream->config.continuous)
- {
+ if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes && 0 == err ; i++) {
@@ -4843,8 +4717,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
}
}
}
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
@@ -4854,8 +4727,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
* don't use ISP parameters anyway. So this should be okay.
* The SP binary (jpeg) copy does not use any parameters.
*/
- if (!copy_on_sp(pipe))
- {
+ if (!copy_on_sp(pipe)) {
sh_css_invalidate_params(stream);
err = sh_css_param_update_isp_params(pipe,
stream->isp_params_configs, true, NULL);
@@ -4869,8 +4741,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
- if (!sh_css_sp_is_running())
- {
+ if (!sh_css_sp_is_running()) {
IA_CSS_LEAVE_ERR_PRIVATE(-EBUSY);
/* SP is not running. The queues are not valid */
return -EBUSY;
@@ -4879,8 +4750,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
(uint8_t)thread_id, 0, 0);
/* DH regular multi pipe - not continuous mode: enqueue event to the next pipes too */
- if (!stream->config.continuous)
- {
+ if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes; i++) {
@@ -4894,8 +4764,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
}
/* in case of continuous capture mode, we also start capture thread and copy thread*/
- if (pipe->stream->config.continuous)
- {
+ if (pipe->stream->config.continuous) {
struct ia_css_pipe *copy_pipe = NULL;
if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
@@ -4914,8 +4783,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
IA_CSS_PSYS_SW_EVENT_START_STREAM,
(uint8_t)thread_id, 0, 0);
}
- if (pipe->stream->cont_capt)
- {
+ if (pipe->stream->cont_capt) {
struct ia_css_pipe *capture_pipe = NULL;
if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
@@ -4936,8 +4804,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
}
/* in case of PREVIEW mode, check whether QOS acc_pipe is available, then start the qos pipe */
- if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
- {
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
struct ia_css_pipe *acc_pipe = NULL;
acc_pipe = pipe->pipe_settings.preview.acc_pipe;
@@ -4988,7 +4855,8 @@ sh_css_continuous_is_enabled(uint8_t pipe_num)
/* ISP2400 */
int
ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream,
- int *buffer_depth) {
+ int *buffer_depth)
+{
if (!buffer_depth)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_max_buffer_depth() enter: void\n");
@@ -4998,7 +4866,8 @@ ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream,
}
int
-ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth) {
+ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
+{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_set_buffer_depth() enter: num_frames=%d\n", buffer_depth);
(void)stream;
if (buffer_depth > NUM_CONTINUOUS_FRAMES || buffer_depth < 1)
@@ -5012,7 +4881,8 @@ ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth) {
/* ISP2401 */
int
ia_css_stream_get_buffer_depth(struct ia_css_stream *stream,
- int *buffer_depth) {
+ int *buffer_depth)
+{
if (!buffer_depth)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_buffer_depth() enter: void\n");
@@ -5036,18 +4906,14 @@ sh_css_pipes_stop(struct ia_css_stream *stream)
enum ia_css_pipe_id main_pipe_id;
int i;
- assert(stream);
- if (!stream)
- {
+ if (!stream) {
IA_CSS_LOG("stream does NOT exist!");
err = -EINVAL;
goto ERR;
}
main_pipe = stream->last_pipe;
- assert(main_pipe);
- if (!main_pipe)
- {
+ if (!main_pipe) {
IA_CSS_LOG("main_pipe does NOT exist!");
err = -EINVAL;
goto ERR;
@@ -5060,11 +4926,10 @@ sh_css_pipes_stop(struct ia_css_stream *stream)
* Stop all "ia_css_pipe" instances in this target
* "ia_css_stream" instance.
*/
- for (i = 0; i < stream->num_pipes; i++)
- {
+ for (i = 0; i < stream->num_pipes; i++) {
/* send the "stop" request to the "ia_css_pipe" instance */
IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d",
- stream->pipes[i]->pipeline.pipe_id);
+ stream->pipes[i]->pipeline.pipe_id);
err = ia_css_pipeline_request_stop(&stream->pipes[i]->pipeline);
/*
@@ -5095,8 +4960,7 @@ sh_css_pipes_stop(struct ia_css_stream *stream)
*
* We need to stop this "Copy Pipe", as well.
*/
- if (main_pipe->stream->config.continuous)
- {
+ if (main_pipe->stream->config.continuous) {
struct ia_css_pipe *copy_pipe = NULL;
/* get the reference to "Copy Pipe" */
@@ -5106,7 +4970,6 @@ sh_css_pipes_stop(struct ia_css_stream *stream)
copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
/* return the error code if "Copy Pipe" does NOT exist */
- assert(copy_pipe);
if (!copy_pipe) {
IA_CSS_LOG("Copy Pipe does NOT exist!");
err = -EINVAL;
@@ -5115,7 +4978,7 @@ sh_css_pipes_stop(struct ia_css_stream *stream)
/* send the "stop" request to "Copy Pipe" */
IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d",
- copy_pipe->pipeline.pipe_id);
+ copy_pipe->pipeline.pipe_id);
err = ia_css_pipeline_request_stop(&copy_pipe->pipeline);
}
@@ -5141,7 +5004,6 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
int i;
- assert(stream);
if (!stream) {
IA_CSS_LOG("stream does NOT exist!");
rval = false;
@@ -5149,7 +5011,6 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
}
main_pipe = stream->last_pipe;
- assert(main_pipe);
if (!main_pipe) {
IA_CSS_LOG("main_pipe does NOT exist!");
@@ -5190,7 +5051,6 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
/* return if "Copy Pipe" does NOT exist */
- assert(copy_pipe);
if (!copy_pipe) {
IA_CSS_LOG("Copy Pipe does NOT exist!");
@@ -5272,8 +5132,7 @@ sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe,
binary = ia_css_pipe_get_shading_correction_binary(pipe);
- if (binary)
- {
+ if (binary) {
err = ia_css_binary_get_shading_info(binary,
IA_CSS_SHADING_CORRECTION_TYPE_1,
pipe->required_bds_factor,
@@ -5283,8 +5142,7 @@ sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe,
/* Other function calls can be added here when other shading correction types will be added
* in the future.
*/
- } else
- {
+ } else {
/* When the pipe does not have a binary which has the shading
* correction, this function does not need to fill the shading
* information. It is not a error case, and then
@@ -5297,7 +5155,8 @@ sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe,
static int
sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe,
- struct ia_css_grid_info *info) {
+ struct ia_css_grid_info *info)
+{
int err = 0;
struct ia_css_binary *binary = NULL;
@@ -5308,30 +5167,27 @@ sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe,
binary = ia_css_pipe_get_s3a_binary(pipe);
- if (binary)
- {
+ if (binary) {
err = ia_css_binary_3a_grid_info(binary, info, pipe);
if (err)
goto ERR;
- } else
+ } else {
memset(&info->s3a_grid, 0, sizeof(info->s3a_grid));
+ }
binary = ia_css_pipe_get_sdis_binary(pipe);
- if (binary)
- {
+ if (binary) {
ia_css_binary_dvs_grid_info(binary, info, pipe);
ia_css_binary_dvs_stat_grid_info(binary, info, pipe);
- } else
- {
+ } else {
memset(&info->dvs_grid.dvs_grid_info, 0,
sizeof(info->dvs_grid.dvs_grid_info));
memset(&info->dvs_grid.dvs_stat_grid_info, 0,
sizeof(info->dvs_grid.dvs_stat_grid_info));
}
- if (binary)
- {
+ if (binary) {
/* copy pipe does not have ISP binary*/
info->isp_in_width = binary->internal_frame_info.res.width;
info->isp_in_height = binary->internal_frame_info.res.height;
@@ -5351,7 +5207,8 @@ ERR :
*/
static int
ia_css_pipe_check_format(struct ia_css_pipe *pipe,
- enum ia_css_frame_format format) {
+ enum ia_css_frame_format format)
+{
const enum ia_css_frame_format *supported_formats;
int number_of_formats;
int found = 0;
@@ -5359,8 +5216,7 @@ ia_css_pipe_check_format(struct ia_css_pipe *pipe,
IA_CSS_ENTER_PRIVATE("");
- if (NULL == pipe || NULL == pipe->pipe_settings.video.video_binary.info)
- {
+ if (NULL == pipe || NULL == pipe->pipe_settings.video.video_binary.info) {
IA_CSS_ERROR("Pipe or binary info is not set");
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
@@ -5369,23 +5225,19 @@ ia_css_pipe_check_format(struct ia_css_pipe *pipe,
supported_formats = pipe->pipe_settings.video.video_binary.info->output_formats;
number_of_formats = sizeof(pipe->pipe_settings.video.video_binary.info->output_formats) / sizeof(enum ia_css_frame_format);
- for (i = 0; i < number_of_formats && !found; i++)
- {
+ for (i = 0; i < number_of_formats && !found; i++) {
if (supported_formats[i] == format) {
found = 1;
break;
}
}
- if (!found)
- {
+ if (!found) {
IA_CSS_ERROR("Requested format is not supported by binary");
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
- } else
- {
- IA_CSS_LEAVE_ERR_PRIVATE(0);
- return 0;
}
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
}
static int load_video_binaries(struct ia_css_pipe *pipe)
@@ -5528,10 +5380,10 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
&mycs->video_binary);
if (err) {
- if (video_vf_info) {
- /* This will do another video binary lookup later for YUV_LINE format*/
+ /* This will do another video binary lookup later for YUV_LINE format*/
+ if (video_vf_info)
need_vf_pp = true;
- } else
+ else
return err;
} else if (video_vf_info) {
/* The first video binary lookup is successful, but we may
@@ -5694,13 +5546,13 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
}
static int
-unload_video_binaries(struct ia_css_pipe *pipe) {
+unload_video_binaries(struct ia_css_pipe *pipe)
+{
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO))
- {
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -5850,31 +5702,29 @@ static int
sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width,
unsigned int height, unsigned int min_width,
enum ia_css_frame_format format,
- unsigned int idx) {
+ unsigned int idx)
+{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, min_width = %d, format = %d, idx = %d\n",
pipe, width, height, min_width, format, idx);
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
err = ia_css_util_check_res(width, height);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (pipe->vf_output_info[idx].res.width != width ||
pipe->vf_output_info[idx].res.height != height ||
pipe->vf_output_info[idx].format != format)
- {
ia_css_frame_info_init(&pipe->vf_output_info[idx], width, height,
format, min_width);
- }
+
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
@@ -5955,7 +5805,7 @@ static bool need_capt_ldc(
}
static int set_num_primary_stages(unsigned int *num,
- enum ia_css_pipe_version version)
+ enum ia_css_pipe_version version)
{
int err = 0;
@@ -6155,10 +6005,13 @@ static int load_primary_binaries(
capt_pp_in_info = &prim_out_info;
ia_css_pipe_get_capturepp_binarydesc(pipe,
- &capture_pp_descr, capt_pp_in_info,
- &capt_pp_out_info, &vf_info);
+ &capture_pp_descr,
+ capt_pp_in_info,
+ &capt_pp_out_info,
+ &vf_info);
+
err = ia_css_binary_find(&capture_pp_descr,
- &mycs->capture_pp_binary);
+ &mycs->capture_pp_binary);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -6168,11 +6021,12 @@ static int load_primary_binaries(
struct ia_css_binary_descr capt_ldc_descr;
ia_css_pipe_get_ldc_binarydesc(pipe,
- &capt_ldc_descr, &prim_out_info,
- &capt_ldc_out_info);
+ &capt_ldc_descr,
+ &prim_out_info,
+ &capt_ldc_out_info);
err = ia_css_binary_find(&capt_ldc_descr,
- &mycs->capture_ldc_binary);
+ &mycs->capture_ldc_binary);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -6189,8 +6043,9 @@ static int load_primary_binaries(
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] &&
(i == mycs->num_primary_stage - 1))
local_vf_info = &vf_info;
- ia_css_pipe_get_primary_binarydesc(pipe, &prim_descr[i], &prim_in_info,
- &prim_out_info, local_vf_info, i);
+ ia_css_pipe_get_primary_binarydesc(pipe, &prim_descr[i],
+ &prim_in_info, &prim_out_info,
+ local_vf_info, i);
err = ia_css_binary_find(&prim_descr[i], &mycs->primary_binary[i]);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -6242,8 +6097,8 @@ static int load_primary_binaries(
/* ISP Copy */
if (need_isp_copy_binary) {
err = load_copy_binary(pipe,
- &mycs->copy_binary,
- &mycs->primary_binary[0]);
+ &mycs->copy_binary,
+ &mycs->primary_binary[0]);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -6254,7 +6109,8 @@ static int load_primary_binaries(
}
static int
-allocate_delay_frames(struct ia_css_pipe *pipe) {
+allocate_delay_frames(struct ia_css_pipe *pipe)
+{
unsigned int num_delay_frames = 0, i = 0;
unsigned int dvs_frame_delay = 0;
struct ia_css_frame_info ref_info;
@@ -6264,8 +6120,7 @@ allocate_delay_frames(struct ia_css_pipe *pipe) {
IA_CSS_ENTER_PRIVATE("");
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_ERROR("Invalid args - pipe %p", pipe);
return -EINVAL;
}
@@ -6276,8 +6131,7 @@ allocate_delay_frames(struct ia_css_pipe *pipe) {
if (dvs_frame_delay > 0)
num_delay_frames = dvs_frame_delay + 1;
- switch (mode)
- {
+ switch (mode) {
case IA_CSS_PIPE_ID_CAPTURE: {
struct ia_css_capture_settings *mycs_capture = &pipe->pipe_settings.capture;
(void)mycs_capture;
@@ -6329,8 +6183,7 @@ allocate_delay_frames(struct ia_css_pipe *pipe) {
ref_info.raw_bit_depth = SH_CSS_REF_BIT_DEPTH;
assert(num_delay_frames <= MAX_NUM_VIDEO_DELAY_FRAMES);
- for (i = 0; i < num_delay_frames; i++)
- {
+ for (i = 0; i < num_delay_frames; i++) {
err = ia_css_frame_allocate_from_info(&delay_frames[i], &ref_info);
if (err)
return err;
@@ -6339,8 +6192,8 @@ allocate_delay_frames(struct ia_css_pipe *pipe) {
return 0;
}
-static int load_advanced_binaries(
- struct ia_css_pipe *pipe) {
+static int load_advanced_binaries(struct ia_css_pipe *pipe)
+{
struct ia_css_frame_info pre_in_info, gdc_in_info,
post_in_info, post_out_info,
vf_info, *vf_pp_in_info, *pipe_out_info,
@@ -6353,7 +6206,7 @@ static int load_advanced_binaries(
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
- pipe->mode == IA_CSS_PIPE_ID_COPY);
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
if (pipe->pipe_settings.capture.pre_isp_binary.info)
return 0;
pipe_out_info = &pipe->output_info[0];
@@ -6366,17 +6219,18 @@ static int load_advanced_binaries(
need_pp = need_capture_pp(pipe);
ia_css_frame_info_set_format(&vf_info,
- IA_CSS_FRAME_FORMAT_YUV_LINE);
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
/* we build up the pipeline starting at the end */
/* Capture post-processing */
if (need_pp) {
struct ia_css_binary_descr capture_pp_descr;
- ia_css_pipe_get_capturepp_binarydesc(pipe,
- &capture_pp_descr, &post_out_info, pipe_out_info, &vf_info);
+ ia_css_pipe_get_capturepp_binarydesc(pipe, &capture_pp_descr,
+ &post_out_info,
+ pipe_out_info, &vf_info);
err = ia_css_binary_find(&capture_pp_descr,
- &pipe->pipe_settings.capture.capture_pp_binary);
+ &pipe->pipe_settings.capture.capture_pp_binary);
if (err)
return err;
} else {
@@ -6387,10 +6241,11 @@ static int load_advanced_binaries(
{
struct ia_css_binary_descr post_gdc_descr;
- ia_css_pipe_get_post_gdc_binarydesc(pipe,
- &post_gdc_descr, &post_in_info, &post_out_info, &vf_info);
+ ia_css_pipe_get_post_gdc_binarydesc(pipe, &post_gdc_descr,
+ &post_in_info,
+ &post_out_info, &vf_info);
err = ia_css_binary_find(&post_gdc_descr,
- &pipe->pipe_settings.capture.post_isp_binary);
+ &pipe->pipe_settings.capture.post_isp_binary);
if (err)
return err;
}
@@ -6400,9 +6255,9 @@ static int load_advanced_binaries(
struct ia_css_binary_descr gdc_descr;
ia_css_pipe_get_gdc_binarydesc(pipe, &gdc_descr, &gdc_in_info,
- &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
+ &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
err = ia_css_binary_find(&gdc_descr,
- &pipe->pipe_settings.capture.anr_gdc_binary);
+ &pipe->pipe_settings.capture.anr_gdc_binary);
if (err)
return err;
}
@@ -6414,9 +6269,9 @@ static int load_advanced_binaries(
struct ia_css_binary_descr pre_gdc_descr;
ia_css_pipe_get_pre_gdc_binarydesc(pipe, &pre_gdc_descr, &pre_in_info,
- &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
+ &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
err = ia_css_binary_find(&pre_gdc_descr,
- &pipe->pipe_settings.capture.pre_isp_binary);
+ &pipe->pipe_settings.capture.pre_isp_binary);
if (err)
return err;
}
@@ -6438,7 +6293,7 @@ static int load_advanced_binaries(
ia_css_pipe_get_vfpp_binarydesc(pipe,
&vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
err = ia_css_binary_find(&vf_pp_descr,
- &pipe->pipe_settings.capture.vf_pp_binary);
+ &pipe->pipe_settings.capture.vf_pp_binary);
if (err)
return err;
}
@@ -6450,14 +6305,14 @@ static int load_advanced_binaries(
#endif
if (need_isp_copy)
load_copy_binary(pipe,
- &pipe->pipe_settings.capture.copy_binary,
- &pipe->pipe_settings.capture.pre_isp_binary);
+ &pipe->pipe_settings.capture.copy_binary,
+ &pipe->pipe_settings.capture.pre_isp_binary);
return err;
}
-static int load_bayer_isp_binaries(
- struct ia_css_pipe *pipe) {
+static int load_bayer_isp_binaries(struct ia_css_pipe *pipe)
+{
struct ia_css_frame_info pre_isp_in_info, *pipe_out_info;
int err = 0;
struct ia_css_binary_descr pre_de_descr;
@@ -6465,7 +6320,7 @@ static int load_bayer_isp_binaries(
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
- pipe->mode == IA_CSS_PIPE_ID_COPY);
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
pipe_out_info = &pipe->output_info[0];
if (pipe->pipe_settings.capture.pre_isp_binary.info)
@@ -6476,17 +6331,17 @@ static int load_bayer_isp_binaries(
return err;
ia_css_pipe_get_pre_de_binarydesc(pipe, &pre_de_descr,
- &pre_isp_in_info,
- pipe_out_info);
+ &pre_isp_in_info,
+ pipe_out_info);
err = ia_css_binary_find(&pre_de_descr,
- &pipe->pipe_settings.capture.pre_isp_binary);
+ &pipe->pipe_settings.capture.pre_isp_binary);
return err;
}
-static int load_low_light_binaries(
- struct ia_css_pipe *pipe) {
+static int load_low_light_binaries(struct ia_css_pipe *pipe)
+{
struct ia_css_frame_info pre_in_info, anr_in_info,
post_in_info, post_out_info,
vf_info, *pipe_vf_out_info, *pipe_out_info,
@@ -6498,7 +6353,7 @@ static int load_low_light_binaries(
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
- pipe->mode == IA_CSS_PIPE_ID_COPY);
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
if (pipe->pipe_settings.capture.pre_isp_binary.info)
return 0;
@@ -6513,17 +6368,18 @@ static int load_low_light_binaries(
need_pp = need_capture_pp(pipe);
ia_css_frame_info_set_format(&vf_info,
- IA_CSS_FRAME_FORMAT_YUV_LINE);
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
/* we build up the pipeline starting at the end */
/* Capture post-processing */
if (need_pp) {
struct ia_css_binary_descr capture_pp_descr;
- ia_css_pipe_get_capturepp_binarydesc(pipe,
- &capture_pp_descr, &post_out_info, pipe_out_info, &vf_info);
+ ia_css_pipe_get_capturepp_binarydesc(pipe, &capture_pp_descr,
+ &post_out_info,
+ pipe_out_info, &vf_info);
err = ia_css_binary_find(&capture_pp_descr,
- &pipe->pipe_settings.capture.capture_pp_binary);
+ &pipe->pipe_settings.capture.capture_pp_binary);
if (err)
return err;
} else {
@@ -6537,7 +6393,7 @@ static int load_low_light_binaries(
ia_css_pipe_get_post_anr_binarydesc(pipe,
&post_anr_descr, &post_in_info, &post_out_info, &vf_info);
err = ia_css_binary_find(&post_anr_descr,
- &pipe->pipe_settings.capture.post_isp_binary);
+ &pipe->pipe_settings.capture.post_isp_binary);
if (err)
return err;
}
@@ -6547,9 +6403,9 @@ static int load_low_light_binaries(
struct ia_css_binary_descr anr_descr;
ia_css_pipe_get_anr_binarydesc(pipe, &anr_descr, &anr_in_info,
- &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
+ &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
err = ia_css_binary_find(&anr_descr,
- &pipe->pipe_settings.capture.anr_gdc_binary);
+ &pipe->pipe_settings.capture.anr_gdc_binary);
if (err)
return err;
}
@@ -6561,9 +6417,9 @@ static int load_low_light_binaries(
struct ia_css_binary_descr pre_anr_descr;
ia_css_pipe_get_pre_anr_binarydesc(pipe, &pre_anr_descr, &pre_in_info,
- &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
+ &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
err = ia_css_binary_find(&pre_anr_descr,
- &pipe->pipe_settings.capture.pre_isp_binary);
+ &pipe->pipe_settings.capture.pre_isp_binary);
if (err)
return err;
}
@@ -6582,10 +6438,10 @@ static int load_low_light_binaries(
{
struct ia_css_binary_descr vf_pp_descr;
- ia_css_pipe_get_vfpp_binarydesc(pipe,
- &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+ ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
+ vf_pp_in_info, pipe_vf_out_info);
err = ia_css_binary_find(&vf_pp_descr,
- &pipe->pipe_settings.capture.vf_pp_binary);
+ &pipe->pipe_settings.capture.vf_pp_binary);
if (err)
return err;
}
@@ -6597,8 +6453,8 @@ static int load_low_light_binaries(
#endif
if (need_isp_copy)
err = load_copy_binary(pipe,
- &pipe->pipe_settings.capture.copy_binary,
- &pipe->pipe_settings.capture.pre_isp_binary);
+ &pipe->pipe_settings.capture.copy_binary,
+ &pipe->pipe_settings.capture.pre_isp_binary);
return err;
}
@@ -6623,15 +6479,15 @@ static bool copy_on_sp(struct ia_css_pipe *pipe)
return rval;
}
-static int load_capture_binaries(
- struct ia_css_pipe *pipe) {
+static int load_capture_binaries(struct ia_css_pipe *pipe)
+{
int err = 0;
bool must_be_raw;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
- pipe->mode == IA_CSS_PIPE_ID_COPY);
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
if (pipe->pipe_settings.capture.primary_binary[0].info) {
IA_CSS_LEAVE_ERR_PRIVATE(0);
@@ -6692,13 +6548,14 @@ static int load_capture_binaries(
}
static int
-unload_capture_binaries(struct ia_css_pipe *pipe) {
+unload_capture_binaries(struct ia_css_pipe *pipe)
+{
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if ((!pipe) || ((pipe->mode != IA_CSS_PIPE_ID_CAPTURE) && (pipe->mode != IA_CSS_PIPE_ID_COPY)))
- {
+ if (!pipe || (pipe->mode != IA_CSS_PIPE_ID_CAPTURE &&
+ pipe->mode != IA_CSS_PIPE_ID_COPY)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -6726,7 +6583,8 @@ unload_capture_binaries(struct ia_css_pipe *pipe) {
static bool
need_downscaling(const struct ia_css_resolution in_res,
- const struct ia_css_resolution out_res) {
+ const struct ia_css_resolution out_res)
+{
if (in_res.width > out_res.width || in_res.height > out_res.height)
return true;
@@ -6734,7 +6592,8 @@ need_downscaling(const struct ia_css_resolution in_res,
}
static bool
-need_yuv_scaler_stage(const struct ia_css_pipe *pipe) {
+need_yuv_scaler_stage(const struct ia_css_pipe *pipe)
+{
unsigned int i;
struct ia_css_resolution in_res, out_res;
@@ -6773,10 +6632,11 @@ need_yuv_scaler_stage(const struct ia_css_pipe *pipe) {
/* which has some hard-coded knowledge which prevents reuse of the function. */
/* Later, merge this with ia_css_pipe_create_cas_scaler_desc */
static int ia_css_pipe_create_cas_scaler_desc_single_output(
- struct ia_css_frame_info *cas_scaler_in_info,
- struct ia_css_frame_info *cas_scaler_out_info,
- struct ia_css_frame_info *cas_scaler_vf_info,
- struct ia_css_cas_binary_descr *descr) {
+ struct ia_css_frame_info *cas_scaler_in_info,
+ struct ia_css_frame_info *cas_scaler_out_info,
+ struct ia_css_frame_info *cas_scaler_vf_info,
+ struct ia_css_cas_binary_descr *descr)
+{
unsigned int i;
unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
int err = 0;
@@ -6794,9 +6654,9 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output(
descr->num_output_stage = 1;
hor_ds_factor = CEIL_DIV(cas_scaler_in_info->res.width,
- cas_scaler_out_info->res.width);
+ cas_scaler_out_info->res.width);
ver_ds_factor = CEIL_DIV(cas_scaler_in_info->res.height,
- cas_scaler_out_info->res.height);
+ cas_scaler_out_info->res.height);
/* use the same horizontal and vertical downscaling factor for simplicity */
assert(hor_ds_factor == ver_ds_factor);
@@ -6806,31 +6666,36 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output(
i *= max_scale_factor_per_stage;
}
- descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
- GFP_KERNEL);
+ descr->in_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
if (!descr->in_info) {
err = -ENOMEM;
goto ERR;
}
- descr->internal_out_info = kmalloc(descr->num_stage * sizeof(
- struct ia_css_frame_info), GFP_KERNEL);
+ descr->internal_out_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
if (!descr->internal_out_info) {
err = -ENOMEM;
goto ERR;
}
- descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
- GFP_KERNEL);
+ descr->out_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
if (!descr->out_info) {
err = -ENOMEM;
goto ERR;
}
- descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
- GFP_KERNEL);
+ descr->vf_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
if (!descr->vf_info) {
err = -ENOMEM;
goto ERR;
}
- descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
+ descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool),
+ GFP_KERNEL);
if (!descr->is_output_stage) {
err = -ENOMEM;
goto ERR;
@@ -6874,9 +6739,9 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output(
max_scale_factor_per_stage;
descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
ia_css_frame_info_init(&descr->internal_out_info[i],
- tmp_in_info.res.width / max_scale_factor_per_stage,
- tmp_in_info.res.height / max_scale_factor_per_stage,
- IA_CSS_FRAME_FORMAT_YUV420, 0);
+ tmp_in_info.res.width / max_scale_factor_per_stage,
+ tmp_in_info.res.height / max_scale_factor_per_stage,
+ IA_CSS_FRAME_FORMAT_YUV420, 0);
descr->out_info[i].res.width = 0;
descr->out_info[i].res.height = 0;
descr->vf_info[i].res.width = 0;
@@ -6892,9 +6757,10 @@ ERR:
}
/* FIXME: merge most of this and single output version */
-static int ia_css_pipe_create_cas_scaler_desc(
- struct ia_css_pipe *pipe,
- struct ia_css_cas_binary_descr *descr) {
+static int
+ia_css_pipe_create_cas_scaler_desc(struct ia_css_pipe *pipe,
+ struct ia_css_cas_binary_descr *descr)
+{
struct ia_css_frame_info in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
struct ia_css_frame_info *out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
struct ia_css_frame_info *vf_out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
@@ -6951,30 +6817,35 @@ static int ia_css_pipe_create_cas_scaler_desc(
descr->num_stage = num_stages;
descr->in_info = kmalloc_array(descr->num_stage,
- sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
if (!descr->in_info) {
err = -ENOMEM;
goto ERR;
}
- descr->internal_out_info = kmalloc(descr->num_stage * sizeof(
- struct ia_css_frame_info), GFP_KERNEL);
+ descr->internal_out_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
if (!descr->internal_out_info) {
err = -ENOMEM;
goto ERR;
}
- descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
- GFP_KERNEL);
+ descr->out_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
if (!descr->out_info) {
err = -ENOMEM;
goto ERR;
}
- descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
- GFP_KERNEL);
+ descr->vf_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
if (!descr->vf_info) {
err = -ENOMEM;
goto ERR;
}
- descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
+ descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool),
+ GFP_KERNEL);
if (!descr->is_output_stage) {
err = -ENOMEM;
goto ERR;
@@ -6984,7 +6855,7 @@ static int ia_css_pipe_create_cas_scaler_desc(
if (out_info[i]) {
if (i > 0) {
assert((out_info[i - 1]->res.width >= out_info[i]->res.width) &&
- (out_info[i - 1]->res.height >= out_info[i]->res.height));
+ (out_info[i - 1]->res.height >= out_info[i]->res.height));
}
}
}
@@ -7032,9 +6903,9 @@ static int ia_css_pipe_create_cas_scaler_desc(
max_scale_factor_per_stage;
descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
ia_css_frame_info_init(&descr->internal_out_info[i],
- tmp_in_info.res.width / max_scale_factor_per_stage,
- tmp_in_info.res.height / max_scale_factor_per_stage,
- IA_CSS_FRAME_FORMAT_YUV420, 0);
+ tmp_in_info.res.width / max_scale_factor_per_stage,
+ tmp_in_info.res.height / max_scale_factor_per_stage,
+ IA_CSS_FRAME_FORMAT_YUV420, 0);
descr->out_info[i].res.width = 0;
descr->out_info[i].res.height = 0;
descr->vf_info[i].res.width = 0;
@@ -7050,7 +6921,8 @@ ERR:
}
static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr
- *descr) {
+ *descr)
+{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_destroy_cas_scaler_desc() enter:\n");
kfree(descr->in_info);
@@ -7068,7 +6940,8 @@ static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr
}
static int
-load_yuvpp_binaries(struct ia_css_pipe *pipe) {
+load_yuvpp_binaries(struct ia_css_pipe *pipe)
+{
int err = 0;
bool need_scaler = false;
struct ia_css_frame_info *vf_pp_in_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
@@ -7093,8 +6966,7 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
mycs = &pipe->pipe_settings.yuvpp;
- for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
- {
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
if (pipe->vf_output_info[i].res.width != 0) {
err = ia_css_util_check_vf_out_info(&pipe->output_info[i],
&pipe->vf_output_info[i]);
@@ -7108,18 +6980,18 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
/* we build up the pipeline starting at the end */
/* Capture post-processing */
- if (need_scaler)
- {
+ if (need_scaler) {
struct ia_css_binary_descr yuv_scaler_descr;
err = ia_css_pipe_create_cas_scaler_desc(pipe,
- &cas_scaler_descr);
+ &cas_scaler_descr);
if (err)
goto ERR;
mycs->num_output = cas_scaler_descr.num_output_stage;
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
- sizeof(struct ia_css_binary), GFP_KERNEL);
+ sizeof(struct ia_css_binary),
+ GFP_KERNEL);
if (!mycs->yuv_scaler_binary) {
err = -ENOMEM;
goto ERR;
@@ -7133,28 +7005,25 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
for (i = 0; i < cas_scaler_descr.num_stage; i++) {
mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
ia_css_pipe_get_yuvscaler_binarydesc(pipe,
- &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
- &cas_scaler_descr.out_info[i],
- &cas_scaler_descr.internal_out_info[i],
- &cas_scaler_descr.vf_info[i]);
+ &yuv_scaler_descr,
+ &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
err = ia_css_binary_find(&yuv_scaler_descr,
- &mycs->yuv_scaler_binary[i]);
+ &mycs->yuv_scaler_binary[i]);
if (err)
goto ERR;
}
ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
- } else
- {
+ } else {
mycs->num_output = 1;
}
if (need_scaler)
- {
next_binary = &mycs->yuv_scaler_binary[0];
- } else
- {
+ else
next_binary = NULL;
- }
#if defined(ISP2401)
/*
@@ -7180,11 +7049,10 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
need_isp_copy_binary = true;
#endif /* ISP2401 */
- if (need_isp_copy_binary)
- {
+ if (need_isp_copy_binary) {
err = load_copy_binary(pipe,
- &mycs->copy_binary,
- next_binary);
+ &mycs->copy_binary,
+ next_binary);
if (err)
goto ERR;
@@ -7211,8 +7079,7 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
}
/* Viewfinder post-processing */
- if (need_scaler)
- {
+ if (need_scaler) {
for (i = 0, j = 0; i < mycs->num_yuv_scaler; i++) {
if (mycs->is_output_stage[i]) {
assert(j < 2);
@@ -7222,19 +7089,18 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
}
}
mycs->num_vf_pp = j;
- } else
- {
+ } else {
vf_pp_in_info[0] =
&mycs->copy_binary.vf_frame_info;
- for (i = 1; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ for (i = 1; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
vf_pp_in_info[i] = NULL;
- }
+
mycs->num_vf_pp = 1;
}
- mycs->vf_pp_binary = kzalloc(mycs->num_vf_pp * sizeof(struct ia_css_binary),
- GFP_KERNEL);
- if (!mycs->vf_pp_binary)
- {
+ mycs->vf_pp_binary = kzalloc(mycs->num_vf_pp *
+ sizeof(struct ia_css_binary),
+ GFP_KERNEL);
+ if (!mycs->vf_pp_binary) {
err = -ENOMEM;
goto ERR;
}
@@ -7242,8 +7108,7 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
{
struct ia_css_binary_descr vf_pp_descr;
- for (i = 0; i < mycs->num_vf_pp; i++)
- {
+ for (i = 0; i < mycs->num_vf_pp; i++) {
if (pipe->vf_output_info[i].res.width != 0) {
ia_css_pipe_get_vfpp_binarydesc(pipe,
&vf_pp_descr, vf_pp_in_info[i], &pipe->vf_output_info[i]);
@@ -7259,34 +7124,31 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
ERR:
if (need_scaler)
- {
ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
- }
+
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "load_yuvpp_binaries() leave, err=%d\n",
err);
return err;
}
static int
-unload_yuvpp_binaries(struct ia_css_pipe *pipe) {
+unload_yuvpp_binaries(struct ia_css_pipe *pipe)
+{
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP))
- {
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_binary_unload(&pipe->pipe_settings.yuvpp.copy_binary);
for (i = 0; i < pipe->pipe_settings.yuvpp.num_yuv_scaler; i++)
- {
ia_css_binary_unload(&pipe->pipe_settings.yuvpp.yuv_scaler_binary[i]);
- }
+
for (i = 0; i < pipe->pipe_settings.yuvpp.num_vf_pp; i++)
- {
ia_css_binary_unload(&pipe->pipe_settings.yuvpp.vf_pp_binary[i]);
- }
+
kfree(pipe->pipe_settings.yuvpp.is_output_stage);
pipe->pipe_settings.yuvpp.is_output_stage = NULL;
kfree(pipe->pipe_settings.yuvpp.yuv_scaler_binary);
@@ -7336,25 +7198,23 @@ static int yuvpp_start(struct ia_css_pipe *pipe)
}
static int
-sh_css_pipe_unload_binaries(struct ia_css_pipe *pipe) {
+sh_css_pipe_unload_binaries(struct ia_css_pipe *pipe)
+{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
/* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
- if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
- {
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY) {
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
- switch (pipe->mode)
- {
+ switch (pipe->mode) {
case IA_CSS_PIPE_ID_PREVIEW:
err = unload_preview_binaries(pipe);
break;
@@ -7375,7 +7235,8 @@ sh_css_pipe_unload_binaries(struct ia_css_pipe *pipe) {
}
static int
-sh_css_pipe_load_binaries(struct ia_css_pipe *pipe) {
+sh_css_pipe_load_binaries(struct ia_css_pipe *pipe)
+{
int err = 0;
assert(pipe);
@@ -7385,8 +7246,7 @@ sh_css_pipe_load_binaries(struct ia_css_pipe *pipe) {
if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
return err;
- switch (pipe->mode)
- {
+ switch (pipe->mode) {
case IA_CSS_PIPE_ID_PREVIEW:
err = load_preview_binaries(pipe);
break;
@@ -7405,8 +7265,7 @@ sh_css_pipe_load_binaries(struct ia_css_pipe *pipe) {
err = -EINVAL;
break;
}
- if (err)
- {
+ if (err) {
if (sh_css_pipe_unload_binaries(pipe)) {
/* currently css does not support multiple error returns in a single function,
* using -EINVAL in this case */
@@ -7417,7 +7276,8 @@ sh_css_pipe_load_binaries(struct ia_css_pipe *pipe) {
}
static int
-create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
+create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
+{
struct ia_css_pipeline *me;
int err = 0;
struct ia_css_pipeline_stage *vf_pp_stage = NULL,
@@ -7444,15 +7304,13 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
#endif
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
- if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP))
- {
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
me = &pipe->pipeline;
ia_css_pipeline_clean(me);
- for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
- {
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
out_frame[i] = NULL;
vf_frame[i] = NULL;
}
@@ -7480,8 +7338,7 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
/* the input frame can come from:
* a) memory: connect yuvscaler to me->in_frame
* b) sensor, via copy binary: connect yuvscaler to copy binary later on */
- if (need_in_frameinfo_memory)
- {
+ if (need_in_frameinfo_memory) {
/* TODO: improve for different input formats. */
/*
@@ -7530,13 +7387,11 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
}
in_frame = &me->in_frame;
- } else
- {
+ } else {
in_frame = NULL;
}
- for (i = 0; i < num_output_stage; i++)
- {
+ for (i = 0; i < num_output_stage; i++) {
assert(i < IA_CSS_PIPE_MAX_OUTPUT_STAGE);
if (pipe->output_info[i].res.width != 0) {
err = init_out_frameinfo_defaults(pipe, &me->out_frame[i], i);
@@ -7563,8 +7418,7 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
yuv_scaler_binary = pipe->pipe_settings.yuvpp.yuv_scaler_binary;
need_scaler = need_yuv_scaler_stage(pipe);
- if (pipe->pipe_settings.yuvpp.copy_binary.info)
- {
+ if (pipe->pipe_settings.yuvpp.copy_binary.info) {
struct ia_css_frame *in_frame_local = NULL;
#ifdef ISP2401
@@ -7574,18 +7428,26 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
#endif
if (need_scaler) {
- ia_css_pipe_util_set_output_frames(bin_out_frame, 0, NULL);
- ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
- bin_out_frame, in_frame_local, NULL);
+ ia_css_pipe_util_set_output_frames(bin_out_frame,
+ 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ bin_out_frame,
+ in_frame_local,
+ NULL);
} else {
- ia_css_pipe_util_set_output_frames(bin_out_frame, 0, out_frame[0]);
- ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
- bin_out_frame, in_frame_local, NULL);
+ ia_css_pipe_util_set_output_frames(bin_out_frame,
+ 0, out_frame[0]);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ bin_out_frame,
+ in_frame_local,
+ NULL);
}
err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &copy_stage);
+ &stage_desc,
+ &copy_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -7602,8 +7464,7 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
}
}
- if (need_scaler)
- {
+ if (need_scaler) {
struct ia_css_frame *tmp_out_frame = NULL;
struct ia_css_frame *tmp_vf_frame = NULL;
struct ia_css_frame *tmp_in_frame = in_frame;
@@ -7618,10 +7479,11 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
tmp_vf_frame = NULL;
}
- err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
- NULL,
- &yuv_scaler_binary[i],
- &yuv_scaler_stage);
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
+ tmp_out_frame,
+ NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -7632,8 +7494,10 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
if (pipe->pipe_settings.yuvpp.is_output_stage[i]) {
if (tmp_vf_frame && (tmp_vf_frame->info.res.width != 0)) {
in_frame = yuv_scaler_stage->args.out_vf_frame;
- err = add_vf_pp_stage(pipe, in_frame, tmp_vf_frame, &vf_pp_binary[j],
- &vf_pp_stage);
+ err = add_vf_pp_stage(pipe, in_frame,
+ tmp_vf_frame,
+ &vf_pp_binary[j],
+ &vf_pp_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -7643,12 +7507,11 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
j++;
}
}
- } else if (copy_stage)
- {
+ } else if (copy_stage) {
if (vf_frame[0] && vf_frame[0]->info.res.width != 0) {
in_frame = copy_stage->args.out_vf_frame;
- err = add_vf_pp_stage(pipe, in_frame, vf_frame[0], &vf_pp_binary[0],
- &vf_pp_stage);
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame[0],
+ &vf_pp_binary[0], &vf_pp_stage);
}
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -7656,7 +7519,8 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
}
}
- ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+ ia_css_pipeline_finalize_stages(&pipe->pipeline,
+ pipe->stream->config.continuous);
IA_CSS_LEAVE_ERR_PRIVATE(0);
@@ -7665,8 +7529,9 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
static int
create_host_copy_pipeline(struct ia_css_pipe *pipe,
- unsigned int max_input_width,
- struct ia_css_frame *out_frame) {
+ unsigned int max_input_width,
+ struct ia_css_frame *out_frame)
+{
struct ia_css_pipeline *me;
int err = 0;
struct ia_css_pipeline_stage_desc stage_desc;
@@ -7683,16 +7548,10 @@ create_host_copy_pipeline(struct ia_css_pipe *pipe,
out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
if (copy_on_sp(pipe) &&
- pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8)
- {
- ia_css_frame_info_init(
- &out_frame->info,
- JPEG_BYTES,
- 1,
- IA_CSS_FRAME_FORMAT_BINARY_8,
- 0);
- } else if (out_frame->info.format == IA_CSS_FRAME_FORMAT_RAW)
- {
+ pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
+ ia_css_frame_info_init(&out_frame->info, JPEG_BYTES, 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8, 0);
+ } else if (out_frame->info.format == IA_CSS_FRAME_FORMAT_RAW) {
out_frame->info.raw_bit_depth =
ia_css_pipe_util_pipe_input_format_bpp(pipe);
}
@@ -7702,12 +7561,12 @@ create_host_copy_pipeline(struct ia_css_pipe *pipe,
pipe->mode = IA_CSS_PIPE_ID_COPY;
ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
- IA_CSS_PIPELINE_RAW_COPY, max_input_width);
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- NULL);
+ IA_CSS_PIPELINE_RAW_COPY,
+ max_input_width);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc, NULL);
- ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+ ia_css_pipeline_finalize_stages(&pipe->pipeline,
+ pipe->stream->config.continuous);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"create_host_copy_pipeline() leave:\n");
@@ -7716,7 +7575,8 @@ create_host_copy_pipeline(struct ia_css_pipe *pipe,
}
static int
-create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe) {
+create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe)
+{
struct ia_css_pipeline *me = &pipe->pipeline;
int err = 0;
struct ia_css_pipeline_stage_desc stage_desc;
@@ -7745,9 +7605,10 @@ create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe) {
me->pipe_id = IA_CSS_PIPE_ID_CAPTURE;
pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
- IA_CSS_PIPELINE_ISYS_COPY, max_input_width);
+ IA_CSS_PIPELINE_ISYS_COPY,
+ max_input_width);
err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc, &out_stage);
+ &stage_desc, &out_stage);
if (err)
return err;
@@ -7760,7 +7621,8 @@ create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe) {
}
static int
-create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
+create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
+{
struct ia_css_pipeline *me;
int err = 0;
enum ia_css_capture_mode mode;
@@ -7798,7 +7660,8 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->stream);
- assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
me = &pipe->pipeline;
mode = pipe->config.default_capture_config.mode;
@@ -7824,8 +7687,7 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
/* Construct in_frame info (only in case we have dynamic input */
need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
#endif
- if (need_in_frameinfo_memory)
- {
+ if (need_in_frameinfo_memory) {
err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
IA_CSS_FRAME_FORMAT_RAW);
if (err) {
@@ -7834,22 +7696,19 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
}
in_frame = &me->in_frame;
- } else
- {
+ } else {
in_frame = NULL;
}
err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
out_frame = &me->out_frame[0];
/* Construct vf_frame info (only in case we have VF) */
- if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0])
- {
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
if (mode == IA_CSS_CAPTURE_MODE_RAW || mode == IA_CSS_CAPTURE_MODE_BAYER) {
/* These modes don't support viewfinder output */
vf_frame = NULL;
@@ -7857,22 +7716,20 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
init_vf_frameinfo_defaults(pipe, &me->vf_frame[0], 0);
vf_frame = &me->vf_frame[0];
}
- } else
- {
+ } else {
vf_frame = NULL;
}
copy_binary = &pipe->pipe_settings.capture.copy_binary;
num_primary_stage = pipe->pipe_settings.capture.num_primary_stage;
- if ((num_primary_stage == 0) && (mode == IA_CSS_CAPTURE_MODE_PRIMARY))
- {
+ if ((num_primary_stage == 0) && (mode == IA_CSS_CAPTURE_MODE_PRIMARY)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
+
for (i = 0; i < num_primary_stage; i++)
- {
primary_binary[i] = &pipe->pipe_settings.capture.primary_binary[i];
- }
+
vf_pp_binary = &pipe->pipe_settings.capture.vf_pp_binary;
pre_isp_binary = &pipe->pipe_settings.capture.pre_isp_binary;
anr_gdc_binary = &pipe->pipe_settings.capture.anr_gdc_binary;
@@ -7889,43 +7746,51 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
need_yuv_pp = (yuv_scaler_binary && yuv_scaler_binary->info);
need_ldc = (capture_ldc_binary && capture_ldc_binary->info);
- if (pipe->pipe_settings.capture.copy_binary.info)
- {
+ if (pipe->pipe_settings.capture.copy_binary.info) {
if (raw) {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
#if defined(ISP2401)
if (!continuous) {
- ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
- out_frames, in_frame, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ in_frame,
+ NULL);
} else {
in_frame = pipe->stream->last_pipe->continuous_frames[0];
- ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
- out_frames, in_frame, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ in_frame,
+ NULL);
}
#else
- ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
- out_frames, NULL, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ NULL, NULL);
#endif
} else {
- ia_css_pipe_util_set_output_frames(out_frames, 0, in_frame);
- ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
- out_frames, NULL, NULL);
+ ia_css_pipe_util_set_output_frames(out_frames, 0,
+ in_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ NULL, NULL);
}
err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &current_stage);
+ &stage_desc,
+ &current_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
- } else if (pipe->stream->config.continuous)
- {
+ } else if (pipe->stream->config.continuous) {
in_frame = pipe->stream->last_pipe->continuous_frames[0];
}
- if (mode == IA_CSS_CAPTURE_MODE_PRIMARY)
- {
+ if (mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
struct ia_css_frame *local_in_frame = NULL;
struct ia_css_frame *local_out_frame = NULL;
@@ -7953,11 +7818,14 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
* Proper investigation should be done to come up with the clean
* solution.
* */
- ia_css_pipe_get_generic_stage_desc(&stage_desc, primary_binary[i],
- out_frames, local_in_frame, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ primary_binary[i],
+ out_frames,
+ local_in_frame,
+ NULL);
err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &current_stage);
+ &stage_desc,
+ &current_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -7970,22 +7838,21 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
IA_CSS_BINARY_MODE_COPY;
current_stage->args.copy_output = current_stage->args.copy_vf;
} else if (mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
- mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT)
- {
+ mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
- out_frames, in_frame, NULL);
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc, NULL);
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ NULL);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, anr_gdc_binary,
- out_frames, NULL, NULL);
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc, NULL);
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ NULL);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -7993,28 +7860,31 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
if (need_pp) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
- ia_css_pipe_get_generic_stage_desc(&stage_desc, post_isp_binary,
- out_frames, NULL, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ post_isp_binary,
+ out_frames,
+ NULL, NULL);
} else {
- ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
- ia_css_pipe_get_generic_stage_desc(&stage_desc, post_isp_binary,
- out_frames, NULL, NULL);
+ ia_css_pipe_util_set_output_frames(out_frames, 0,
+ out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ post_isp_binary,
+ out_frames,
+ NULL, NULL);
}
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc, &current_stage);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &current_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
- } else if (mode == IA_CSS_CAPTURE_MODE_BAYER)
- {
+ } else if (mode == IA_CSS_CAPTURE_MODE_BAYER) {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
- out_frames, in_frame, NULL);
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- NULL);
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ NULL);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -8022,49 +7892,48 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
}
#ifndef ISP2401
- if (need_pp && current_stage)
- {
+ if (need_pp && current_stage) {
struct ia_css_frame *local_in_frame = NULL;
local_in_frame = current_stage->args.out_frame[0];
if (need_ldc) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
- ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary,
- out_frames, local_in_frame, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ capture_ldc_binary,
+ out_frames,
+ local_in_frame,
+ NULL);
err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- &current_stage);
+ &stage_desc,
+ &current_stage);
local_in_frame = current_stage->args.out_frame[0];
}
err = add_capture_pp_stage(pipe, me, local_in_frame,
- need_yuv_pp ? NULL : out_frame,
+ need_yuv_pp ? NULL : out_frame,
#else
/* ldc and capture_pp not supported in same pipeline */
- if (need_ldc && current_stage)
- {
+ if (need_ldc && current_stage) {
in_frame = current_stage->args.out_frame[0];
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary,
- out_frames, in_frame, NULL);
- err = ia_css_pipeline_create_and_add_stage(me,
- &stage_desc,
- NULL);
- } else if (need_pp && current_stage)
- {
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ NULL);
+ } else if (need_pp && current_stage) {
in_frame = current_stage->args.out_frame[0];
- err = add_capture_pp_stage(pipe, me, in_frame, need_yuv_pp ? NULL : out_frame,
+ err = add_capture_pp_stage(pipe, me, in_frame,
+ need_yuv_pp ? NULL : out_frame,
#endif
- capture_pp_binary,
- &current_stage);
+ capture_pp_binary,
+ &current_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
- if (need_yuv_pp && current_stage)
- {
+ if (need_yuv_pp && current_stage) {
struct ia_css_frame *tmp_in_frame = current_stage->args.out_frame[0];
struct ia_css_frame *tmp_out_frame = NULL;
@@ -8074,10 +7943,10 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
else
tmp_out_frame = NULL;
- err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
- NULL,
- &yuv_scaler_binary[i],
- &yuv_scaler_stage);
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
+ tmp_out_frame, NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -8096,11 +7965,12 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
* should not be considered as a clean solution. Proper
* investigation should be done to come up with the clean solution.
* */
- if (mode != IA_CSS_CAPTURE_MODE_RAW && mode != IA_CSS_CAPTURE_MODE_BAYER && current_stage && vf_frame)
- {
+ if (mode != IA_CSS_CAPTURE_MODE_RAW &&
+ mode != IA_CSS_CAPTURE_MODE_BAYER &&
+ current_stage && vf_frame) {
in_frame = current_stage->args.out_vf_frame;
err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
- &current_stage);
+ &current_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -8115,7 +7985,8 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
}
static int
-create_host_capture_pipeline(struct ia_css_pipe *pipe) {
+create_host_capture_pipeline(struct ia_css_pipe *pipe)
+{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
@@ -8124,8 +7995,7 @@ create_host_capture_pipeline(struct ia_css_pipe *pipe) {
err = create_host_isyscopy_capture_pipeline(pipe);
else
err = create_host_regular_capture_pipeline(pipe);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
@@ -8135,8 +8005,8 @@ create_host_capture_pipeline(struct ia_css_pipe *pipe) {
return err;
}
-static int capture_start(
- struct ia_css_pipe *pipe) {
+static int capture_start(struct ia_css_pipe *pipe)
+{
struct ia_css_pipeline *me;
int err = 0;
@@ -8151,7 +8021,7 @@ static int capture_start(
me = &pipe->pipeline;
if ((pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
- pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) &&
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) &&
(pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
if (copy_on_sp(pipe)) {
err = start_copy_on_sp(pipe, &me->out_frame[0]);
@@ -8195,7 +8065,7 @@ static int capture_start(
if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
- pipe->stream->config.mode);
+ pipe->stream->config.mode);
pipe->stream->reconfigure_css_rx = false;
}
#endif
@@ -8206,8 +8076,9 @@ static int capture_start(
static int
sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
- struct ia_css_frame_info *info,
- unsigned int idx) {
+ struct ia_css_frame_info *info,
+ unsigned int idx)
+{
assert(pipe);
assert(info);
@@ -8216,8 +8087,7 @@ sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
*info = pipe->output_info[idx];
if (copy_on_sp(pipe) &&
- pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8)
- {
+ pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
ia_css_frame_info_init(
info,
JPEG_BYTES,
@@ -8225,8 +8095,7 @@ sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
IA_CSS_FRAME_FORMAT_BINARY_8,
0);
} else if (info->format == IA_CSS_FRAME_FORMAT_RAW ||
- info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)
- {
+ info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) {
info->raw_bit_depth =
ia_css_pipe_util_pipe_input_format_bpp(pipe);
}
@@ -8238,9 +8107,10 @@ sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
void
ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
- const unsigned short *data,
- unsigned int width,
- unsigned int height) {
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height)
+{
assert(stream);
ia_css_inputfifo_send_input_frame(
@@ -8251,7 +8121,8 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
}
void
-ia_css_stream_start_input_frame(const struct ia_css_stream *stream) {
+ia_css_stream_start_input_frame(const struct ia_css_stream *stream)
+{
assert(stream);
ia_css_inputfifo_start_frame(
@@ -8262,21 +8133,23 @@ ia_css_stream_start_input_frame(const struct ia_css_stream *stream) {
void
ia_css_stream_send_input_line(const struct ia_css_stream *stream,
- const unsigned short *data,
- unsigned int width,
- const unsigned short *data2,
- unsigned int width2) {
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2)
+{
assert(stream);
ia_css_inputfifo_send_line(stream->config.channel_id,
- data, width, data2, width2);
+ data, width, data2, width2);
}
void
ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
- enum atomisp_input_format format,
- const unsigned short *data,
- unsigned int width) {
+ enum atomisp_input_format format,
+ const unsigned short *data,
+ unsigned int width)
+{
assert(stream);
if (!data || width == 0)
return;
@@ -8285,14 +8158,16 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
}
void
-ia_css_stream_end_input_frame(const struct ia_css_stream *stream) {
+ia_css_stream_end_input_frame(const struct ia_css_stream *stream)
+{
assert(stream);
ia_css_inputfifo_end_frame(stream->config.channel_id);
}
static void
-append_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware) {
+append_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware)
+{
IA_CSS_ENTER_PRIVATE("l = %p, firmware = %p", l, firmware);
if (!l) {
IA_CSS_ERROR("NULL fw_info");
@@ -8307,7 +8182,8 @@ append_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware) {
}
static void
-remove_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware) {
+remove_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware)
+{
assert(*l);
assert(firmware);
(void)l;
@@ -8349,12 +8225,12 @@ static int upload_isp_code(struct ia_css_fw_info *firmware)
}
static int
-acc_load_extension(struct ia_css_fw_info *firmware) {
+acc_load_extension(struct ia_css_fw_info *firmware)
+{
int err;
struct ia_css_fw_info *hd = firmware;
- while (hd)
- {
+ while (hd) {
err = upload_isp_code(hd);
if (err)
return err;
@@ -8368,7 +8244,8 @@ acc_load_extension(struct ia_css_fw_info *firmware) {
}
static void
-acc_unload_extension(struct ia_css_fw_info *firmware) {
+acc_unload_extension(struct ia_css_fw_info *firmware)
+{
struct ia_css_fw_info *hd = firmware;
struct ia_css_fw_info *hdn = NULL;
@@ -8392,13 +8269,13 @@ acc_unload_extension(struct ia_css_fw_info *firmware) {
/* Load firmware for extension */
static int
ia_css_pipe_load_extension(struct ia_css_pipe *pipe,
- struct ia_css_fw_info *firmware) {
+ struct ia_css_fw_info *firmware)
+{
int err = 0;
IA_CSS_ENTER_PRIVATE("fw = %p pipe = %p", firmware, pipe);
- if ((!firmware) || (!pipe))
- {
+ if ((!firmware) || (!pipe)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -8416,7 +8293,8 @@ ia_css_pipe_load_extension(struct ia_css_pipe *pipe,
/* Unload firmware for extension */
static void
ia_css_pipe_unload_extension(struct ia_css_pipe *pipe,
- struct ia_css_fw_info *firmware) {
+ struct ia_css_fw_info *firmware)
+{
IA_CSS_ENTER_PRIVATE("fw = %p pipe = %p", firmware, pipe);
if ((!firmware) || (!pipe)) {
@@ -8435,7 +8313,8 @@ ia_css_pipe_unload_extension(struct ia_css_pipe *pipe,
}
bool
-ia_css_pipeline_uses_params(struct ia_css_pipeline *me) {
+ia_css_pipeline_uses_params(struct ia_css_pipeline *me)
+{
struct ia_css_pipeline_stage *stage;
assert(me);
@@ -8456,7 +8335,8 @@ ia_css_pipeline_uses_params(struct ia_css_pipeline *me) {
static int
sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
- const void *acc_fw) {
+ const void *acc_fw)
+{
struct ia_css_fw_info *fw = (struct ia_css_fw_info *)acc_fw;
/* In QoS case, load_extension already called, so skipping */
int err = 0;
@@ -8468,14 +8348,13 @@ sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
"sh_css_pipeline_add_acc_stage() enter: pipeline=%p, acc_fw=%p\n",
pipeline, acc_fw);
- if (!err)
- {
+ if (!err) {
struct ia_css_pipeline_stage_desc stage_desc;
ia_css_pipe_get_acc_stage_desc(&stage_desc, NULL, fw);
err = ia_css_pipeline_create_and_add_stage(pipeline,
- &stage_desc,
- NULL);
+ &stage_desc,
+ NULL);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
@@ -8488,7 +8367,8 @@ sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
* Refer to "sh_css_internal.h" for details.
*/
int ia_css_stream_capture_frame(struct ia_css_stream *stream,
- unsigned int exp_id) {
+ unsigned int exp_id)
+{
struct sh_css_tag_descr tag_descr;
u32 encoded_tag_descr;
int err;
@@ -8526,11 +8406,9 @@ int ia_css_stream_capture_frame(struct ia_css_stream *stream,
* @brief Configure the continuous capture.
* Refer to "sh_css_internal.h" for details.
*/
-int ia_css_stream_capture(
- struct ia_css_stream *stream,
- int num_captures,
- unsigned int skip,
- int offset) {
+int ia_css_stream_capture(struct ia_css_stream *stream, int num_captures,
+ unsigned int skip, int offset)
+{
struct sh_css_tag_descr tag_descr;
unsigned int encoded_tag_descr;
int return_err;
@@ -8593,8 +8471,9 @@ void ia_css_stream_request_flash(struct ia_css_stream *stream)
ia_css_debug_dump_sp_sw_debug_info();
ia_css_debug_dump_debug_info(NULL);
}
- } else
+ } else {
IA_CSS_LOG("SP is not running!");
+ }
#endif
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
@@ -8602,7 +8481,8 @@ void ia_css_stream_request_flash(struct ia_css_stream *stream)
}
static void
-sh_css_init_host_sp_control_vars(void) {
+sh_css_init_host_sp_control_vars(void)
+{
const struct ia_css_fw_info *fw;
unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started;
@@ -8643,22 +8523,22 @@ sh_css_init_host_sp_control_vars(void) {
(void)HIVE_ADDR_host_sp_com;
sp_dmem_store_uint32(SP0_ID,
- (unsigned int)sp_address_of(ia_css_ispctrl_sp_isp_started),
- (uint32_t)(0));
+ (unsigned int)sp_address_of(ia_css_ispctrl_sp_isp_started),
+ (uint32_t)(0));
sp_dmem_store_uint32(SP0_ID,
- (unsigned int)sp_address_of(host_sp_queues_initialized),
- (uint32_t)(0));
+ (unsigned int)sp_address_of(host_sp_queues_initialized),
+ (uint32_t)(0));
sp_dmem_store_uint32(SP0_ID,
- (unsigned int)sp_address_of(sp_sleep_mode),
- (uint32_t)(0));
+ (unsigned int)sp_address_of(sp_sleep_mode),
+ (uint32_t)(0));
sp_dmem_store_uint32(SP0_ID,
- (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
- (uint32_t)(false));
+ (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
+ (uint32_t)(false));
#ifndef ISP2401
sp_dmem_store_uint32(SP0_ID,
- (unsigned int)sp_address_of(sp_stop_copy_preview),
- my_css.stop_copy_preview ? (uint32_t)(1) : (uint32_t)(0));
+ (unsigned int)sp_address_of(sp_stop_copy_preview),
+ my_css.stop_copy_preview ? (uint32_t)(1) : (uint32_t)(0));
#endif
store_sp_array_uint(host_sp_com, o, host2sp_cmd_ready);
@@ -8685,8 +8565,8 @@ void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config)
}
void
-ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config
- *extra_config) {
+ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config *extra_config)
+{
if (!extra_config) {
IA_CSS_ERROR("NULL input parameter");
return;
@@ -8716,11 +8596,11 @@ void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config)
}
static int
-ia_css_acc_pipe_create(struct ia_css_pipe *pipe) {
+ia_css_acc_pipe_create(struct ia_css_pipe *pipe)
+{
int err = 0;
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_ERROR("NULL input parameter");
return -EINVAL;
}
@@ -8730,9 +8610,7 @@ ia_css_acc_pipe_create(struct ia_css_pipe *pipe) {
pipe->config.acc_num_execs = 1;
if (pipe->config.acc_extension)
- {
err = ia_css_pipe_load_extension(pipe, pipe->config.acc_extension);
- }
return err;
}
@@ -8751,9 +8629,8 @@ int ia_css_pipe_create(const struct ia_css_pipe_config *config,
err = ia_css_pipe_create_extra(config, NULL, pipe);
- if (err == 0) {
+ if (err == 0)
IA_CSS_LOG("pipe created successfully = %p", *pipe);
- }
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -8762,8 +8639,9 @@ int ia_css_pipe_create(const struct ia_css_pipe_config *config,
int
ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
- const struct ia_css_pipe_extra_config *extra_config,
- struct ia_css_pipe **pipe) {
+ const struct ia_css_pipe_extra_config *extra_config,
+ struct ia_css_pipe **pipe)
+{
int err = -EINVAL;
struct ia_css_pipe *internal_pipe = NULL;
unsigned int i;
@@ -8771,14 +8649,12 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
IA_CSS_ENTER_PRIVATE("config = %p, extra_config = %p and pipe = %p", config, extra_config, pipe);
/* do not allow to create more than the maximum limit */
- if (my_css.pipe_counter >= IA_CSS_PIPELINE_NUM_MAX)
- {
+ if (my_css.pipe_counter >= IA_CSS_PIPELINE_NUM_MAX) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOSPC);
return -EINVAL;
}
- if ((!pipe) || (!config))
- {
+ if ((!pipe) || (!config)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
@@ -8787,8 +8663,7 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
ia_css_debug_dump_pipe_extra_config(extra_config);
err = create_pipe(config->mode, &internal_pipe, false);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
@@ -8800,8 +8675,7 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
else
ia_css_pipe_extra_config_defaults(&internal_pipe->extra_config);
- if (config->mode == IA_CSS_PIPE_MODE_ACC)
- {
+ if (config->mode == IA_CSS_PIPE_MODE_ACC) {
/* Temporary hack to migrate acceleration to CSS 2.0.
* In the future the code for all pipe types should be
* unified. */
@@ -8828,15 +8702,13 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
set bayer_ds_out_res equal to IF output resolution(IF may do cropping on
sensor output) or use default decimation factor 1. */
if (internal_pipe->extra_config.enable_raw_binning &&
- internal_pipe->config.bayer_ds_out_res.width)
- {
+ internal_pipe->config.bayer_ds_out_res.width) {
/* fill some code here, if no code is needed, please remove it during integration */
}
/* YUV downscaling */
if ((internal_pipe->config.vf_pp_in_res.width ||
- internal_pipe->config.capt_pp_in_res.width))
- {
+ internal_pipe->config.capt_pp_in_res.width)) {
enum ia_css_frame_format format;
if (internal_pipe->config.vf_pp_in_res.width) {
@@ -8857,8 +8729,7 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
}
}
if (internal_pipe->config.vf_pp_in_res.width &&
- internal_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
- {
+ internal_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) {
ia_css_frame_info_init(
&internal_pipe->vf_yuv_ds_input_info,
internal_pipe->config.vf_pp_in_res.width,
@@ -8866,8 +8737,7 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
IA_CSS_FRAME_FORMAT_YUV_LINE, 0);
}
/* handle bayer downscaling output info */
- if (internal_pipe->config.bayer_ds_out_res.width)
- {
+ if (internal_pipe->config.bayer_ds_out_res.width) {
ia_css_frame_info_init(
&internal_pipe->bds_output_info,
internal_pipe->config.bayer_ds_out_res.width,
@@ -8876,8 +8746,7 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
}
/* handle output info, assume always needed */
- for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
- {
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
if (internal_pipe->config.output_info[i].res.width) {
err = sh_css_pipe_configure_output(
internal_pipe,
@@ -8913,10 +8782,9 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
}
}
}
- if (internal_pipe->config.acc_extension)
- {
+ if (internal_pipe->config.acc_extension) {
err = ia_css_pipe_load_extension(internal_pipe,
- internal_pipe->config.acc_extension);
+ internal_pipe->config.acc_extension);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
kvfree(internal_pipe);
@@ -8934,18 +8802,16 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
int
ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
- struct ia_css_pipe_info *pipe_info) {
+ struct ia_css_pipe_info *pipe_info)
+{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipe_get_info()\n");
- assert(pipe_info);
- if (!pipe_info)
- {
+ if (!pipe_info) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"ia_css_pipe_get_info: pipe_info cannot be NULL\n");
return -EINVAL;
}
- if (!pipe || !pipe->stream)
- {
+ if (!pipe || !pipe->stream) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"ia_css_pipe_get_info: ia_css_stream_create needs to be called before ia_css_[stream/pipe]_get_info\n");
return -EINVAL;
@@ -8972,41 +8838,37 @@ bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info)
int
ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
- int pin_index,
- enum ia_css_frame_format new_format) {
+ int pin_index,
+ enum ia_css_frame_format new_format)
+{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p, pin_index = %d, new_formats = %d", pipe, pin_index, new_format);
- if (!pipe)
- {
+ if (!pipe) {
IA_CSS_ERROR("pipe is not set");
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
- if (0 != pin_index && 1 != pin_index)
- {
+ if (0 != pin_index && 1 != pin_index) {
IA_CSS_ERROR("pin index is not valid");
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
- if (new_format != IA_CSS_FRAME_FORMAT_NV12_TILEY)
- {
+ if (new_format != IA_CSS_FRAME_FORMAT_NV12_TILEY) {
IA_CSS_ERROR("new format is not valid");
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
- } else
- {
+ } else {
err = ia_css_pipe_check_format(pipe, new_format);
if (!err) {
- if (pin_index == 0) {
+ if (pin_index == 0)
pipe->output_info[0].format = new_format;
- } else {
+ else
pipe->vf_output_info[0].format = new_format;
- }
}
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -9016,7 +8878,8 @@ ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
#if !defined(ISP2401)
/* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */
static int
-ia_css_stream_configure_rx(struct ia_css_stream *stream) {
+ia_css_stream_configure_rx(struct ia_css_stream *stream)
+{
struct ia_css_input_port *config;
assert(stream);
@@ -9045,11 +8908,10 @@ ia_css_stream_configure_rx(struct ia_css_stream *stream) {
if (config->compression.type == IA_CSS_CSI2_COMPRESSION_TYPE_NONE)
stream->csi_rx_config.comp = MIPI_PREDICTOR_NONE;
else
- {
/* not implemented yet, requires extension of the rx_cfg_t
* struct */
return -EINVAL;
- }
+
stream->csi_rx_config.is_two_ppc = (stream->config.pixels_per_clock == 2);
stream->reconfigure_css_rx = true;
return 0;
@@ -9057,10 +8919,9 @@ ia_css_stream_configure_rx(struct ia_css_stream *stream) {
#endif
static struct ia_css_pipe *
-find_pipe(struct ia_css_pipe *pipes[],
- unsigned int num_pipes,
- enum ia_css_pipe_mode mode,
- bool copy_pipe) {
+find_pipe(struct ia_css_pipe *pipes[], unsigned int num_pipes,
+ enum ia_css_pipe_mode mode, bool copy_pipe)
+{
unsigned int i;
assert(pipes);
@@ -9076,24 +8937,21 @@ find_pipe(struct ia_css_pipe *pipes[],
}
static int
-ia_css_acc_stream_create(struct ia_css_stream *stream) {
+ia_css_acc_stream_create(struct ia_css_stream *stream)
+{
int i;
int err = 0;
- assert(stream);
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
- if (!stream)
- {
+ if (!stream) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
- for (i = 0; i < stream->num_pipes; i++)
- {
+ for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *pipe = stream->pipes[i];
- assert(pipe);
if (!pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
@@ -9104,14 +8962,12 @@ ia_css_acc_stream_create(struct ia_css_stream *stream) {
/* Map SP threads before doing anything. */
err = map_sp_threads(stream, true);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
- for (i = 0; i < stream->num_pipes; i++)
- {
+ for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *pipe = stream->pipes[i];
assert(pipe);
@@ -9119,8 +8975,7 @@ ia_css_acc_stream_create(struct ia_css_stream *stream) {
}
err = create_host_pipeline_structure(stream);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
@@ -9134,7 +8989,8 @@ ia_css_acc_stream_create(struct ia_css_stream *stream) {
static int
metadata_info_init(const struct ia_css_metadata_config *mdc,
- struct ia_css_metadata_info *md) {
+ struct ia_css_metadata_info *md)
+{
/* Either both width and height should be set or neither */
if ((mdc->resolution.height > 0) ^ (mdc->resolution.width > 0))
return -EINVAL;
@@ -9161,7 +9017,7 @@ static int check_pipe_resolutions(const struct ia_css_pipe *pipe)
}
if (ia_css_util_check_res(pipe->config.input_effective_res.width,
- pipe->config.input_effective_res.height) != 0) {
+ pipe->config.input_effective_res.height) != 0) {
IA_CSS_ERROR("effective resolution not supported");
err = -EINVAL;
goto EXIT;
@@ -9169,7 +9025,7 @@ static int check_pipe_resolutions(const struct ia_css_pipe *pipe)
if (!ia_css_util_resolution_is_zero(
pipe->stream->config.input_config.input_res)) {
if (!ia_css_util_res_leq(pipe->config.input_effective_res,
- pipe->stream->config.input_config.input_res)) {
+ pipe->stream->config.input_config.input_res)) {
IA_CSS_ERROR("effective resolution is larger than input resolution");
err = -EINVAL;
goto EXIT;
@@ -9192,9 +9048,10 @@ EXIT:
int
ia_css_stream_create(const struct ia_css_stream_config *stream_config,
- int num_pipes,
- struct ia_css_pipe *pipes[],
- struct ia_css_stream **stream) {
+ int num_pipes,
+ struct ia_css_pipe *pipes[],
+ struct ia_css_stream **stream)
+{
struct ia_css_pipe *curr_pipe;
struct ia_css_stream *curr_stream = NULL;
bool spcopyonly;
@@ -9213,8 +9070,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* some checks */
if (num_pipes == 0 ||
!stream ||
- !pipes)
- {
+ !pipes) {
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
@@ -9223,8 +9079,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
#if !defined(ISP2401)
/* We don't support metadata for JPEG stream, since they both use str2mem */
if (stream_config->input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8 &&
- stream_config->metadata_config.resolution.height > 0)
- {
+ stream_config->metadata_config.resolution.height > 0) {
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
@@ -9232,8 +9087,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
#endif
#ifdef ISP2401
- if (stream_config->online && stream_config->pack_raw_pixels)
- {
+ if (stream_config->online && stream_config->pack_raw_pixels) {
IA_CSS_LOG("online and pack raw is invalid on input system 2401");
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
@@ -9288,16 +9142,14 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* Currently we only supported metadata up to a certain size. */
err = metadata_info_init(&stream_config->metadata_config, &md_info);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
/* allocate the stream instance */
curr_stream = kzalloc(sizeof(struct ia_css_stream), GFP_KERNEL);
- if (!curr_stream)
- {
+ if (!curr_stream) {
err = -ENOMEM;
IA_CSS_LEAVE_ERR(err);
return err;
@@ -9308,8 +9160,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* allocate pipes */
curr_stream->num_pipes = num_pipes;
curr_stream->pipes = kcalloc(num_pipes, sizeof(struct ia_css_pipe *), GFP_KERNEL);
- if (!curr_stream->pipes)
- {
+ if (!curr_stream->pipes) {
curr_stream->num_pipes = 0;
kfree(curr_stream);
curr_stream = NULL;
@@ -9332,8 +9183,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
#endif
#ifdef ISP2401
- if (curr_stream->config.online)
- {
+ if (curr_stream->config.online) {
curr_stream->config.source.port.num_lanes =
stream_config->source.port.num_lanes;
curr_stream->config.mode = IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
@@ -9351,8 +9201,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
curr_stream->config.lock_all);
/* copy mode specific stuff */
- switch (curr_stream->config.mode)
- {
+ switch (curr_stream->config.mode) {
case IA_CSS_INPUT_MODE_SENSOR:
case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
#if !defined(ISP2401)
@@ -9362,11 +9211,11 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
case IA_CSS_INPUT_MODE_TPG:
#if !defined(ISP2401)
IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
- curr_stream->config.source.tpg.x_mask,
- curr_stream->config.source.tpg.y_mask,
- curr_stream->config.source.tpg.x_delta,
- curr_stream->config.source.tpg.y_delta,
- curr_stream->config.source.tpg.xy_mask);
+ curr_stream->config.source.tpg.x_mask,
+ curr_stream->config.source.tpg.y_mask,
+ curr_stream->config.source.tpg.x_delta,
+ curr_stream->config.source.tpg.y_delta,
+ curr_stream->config.source.tpg.xy_mask);
sh_css_sp_configure_tpg(
curr_stream->config.source.tpg.x_mask,
@@ -9391,17 +9240,14 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
}
#ifdef ISP2401
- err = aspect_ratio_crop_init(curr_stream,
- pipes,
- &aspect_ratio_crop_enabled);
- if (err)
- {
+ err = aspect_ratio_crop_init(curr_stream, pipes,
+ &aspect_ratio_crop_enabled);
+ if (err) {
IA_CSS_LEAVE_ERR(err);
goto ERR;
}
#endif
- for (i = 0; i < num_pipes; i++)
- {
+ for (i = 0; i < num_pipes; i++) {
struct ia_css_resolution effective_res;
curr_pipe = pipes[i];
@@ -9432,8 +9278,8 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
curr_pipe->config.input_effective_res = effective_res;
}
IA_CSS_LOG("effective_res=%dx%d",
- effective_res.width,
- effective_res.height);
+ effective_res.width,
+ effective_res.height);
}
if (IS_ISP2401) {
@@ -9441,9 +9287,8 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
if (pipes[i]->config.mode != IA_CSS_PIPE_MODE_ACC &&
pipes[i]->config.mode != IA_CSS_PIPE_MODE_COPY) {
err = check_pipe_resolutions(pipes[i]);
- if (err) {
+ if (err)
goto ERR;
- }
}
}
}
@@ -9453,32 +9298,28 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
goto ERR;
IA_CSS_LOG("isp_params_configs: %p", curr_stream->isp_params_configs);
- if (num_pipes == 1 && pipes[0]->config.mode == IA_CSS_PIPE_MODE_ACC)
- {
+ if (num_pipes == 1 && pipes[0]->config.mode == IA_CSS_PIPE_MODE_ACC) {
*stream = curr_stream;
err = ia_css_acc_stream_create(curr_stream);
goto ERR;
}
/* sensor binning */
- if (!spcopyonly)
- {
+ if (!spcopyonly) {
sensor_binning_changed =
sh_css_params_set_binning_factor(curr_stream,
- curr_stream->config.sensor_binning_factor);
- } else
- {
+ curr_stream->config.sensor_binning_factor);
+ } else {
sensor_binning_changed = false;
}
IA_CSS_LOG("sensor_binning=%d, changed=%d",
- curr_stream->config.sensor_binning_factor, sensor_binning_changed);
+ curr_stream->config.sensor_binning_factor, sensor_binning_changed);
/* loop over pipes */
IA_CSS_LOG("num_pipes=%d", num_pipes);
curr_stream->cont_capt = false;
/* Temporary hack: we give the preview pipe a reference to the capture
* pipe in continuous capture mode. */
- if (curr_stream->config.continuous)
- {
+ if (curr_stream->config.continuous) {
/* Search for the preview pipe and create the copy pipe */
struct ia_css_pipe *preview_pipe;
struct ia_css_pipe *video_pipe;
@@ -9496,17 +9337,18 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* Create copy pipe here, since it may not be exposed to the driver */
preview_pipe = find_pipe(pipes, num_pipes,
- IA_CSS_PIPE_MODE_PREVIEW, false);
+ IA_CSS_PIPE_MODE_PREVIEW, false);
video_pipe = find_pipe(pipes, num_pipes,
- IA_CSS_PIPE_MODE_VIDEO, false);
- acc_pipe = find_pipe(pipes, num_pipes,
- IA_CSS_PIPE_MODE_ACC, false);
+ IA_CSS_PIPE_MODE_VIDEO, false);
+ acc_pipe = find_pipe(pipes, num_pipes, IA_CSS_PIPE_MODE_ACC,
+ false);
if (acc_pipe && num_pipes == 2 && curr_stream->cont_capt)
curr_stream->cont_capt =
false; /* preview + QoS case will not need cont_capt switch */
if (curr_stream->cont_capt) {
capture_pipe = find_pipe(pipes, num_pipes,
- IA_CSS_PIPE_MODE_CAPTURE, false);
+ IA_CSS_PIPE_MODE_CAPTURE,
+ false);
if (!capture_pipe) {
err = -EINVAL;
goto ERR;
@@ -9526,9 +9368,9 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
preview_pipe->pipe_settings.preview.copy_pipe = copy_pipe;
copy_pipe->stream = curr_stream;
}
- if (preview_pipe && curr_stream->cont_capt) {
+ if (preview_pipe && curr_stream->cont_capt)
preview_pipe->pipe_settings.preview.capture_pipe = capture_pipe;
- }
+
if (video_pipe && !video_pipe->pipe_settings.video.copy_pipe) {
err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, &copy_pipe, true);
if (err)
@@ -9537,15 +9379,13 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
video_pipe->pipe_settings.video.copy_pipe = copy_pipe;
copy_pipe->stream = curr_stream;
}
- if (video_pipe && curr_stream->cont_capt) {
+ if (video_pipe && curr_stream->cont_capt)
video_pipe->pipe_settings.video.capture_pipe = capture_pipe;
- }
- if (preview_pipe && acc_pipe) {
+
+ if (preview_pipe && acc_pipe)
preview_pipe->pipe_settings.preview.acc_pipe = acc_pipe;
- }
}
- for (i = 0; i < num_pipes; i++)
- {
+ for (i = 0; i < num_pipes; i++) {
curr_pipe = pipes[i];
/* set current stream */
curr_pipe->stream = curr_stream;
@@ -9566,8 +9406,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
}
/* now pipes have been configured, info should be available */
- for (i = 0; i < num_pipes; i++)
- {
+ for (i = 0; i < num_pipes; i++) {
struct ia_css_pipe_info *pipe_info = NULL;
curr_pipe = pipes[i];
@@ -9591,10 +9430,12 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
if (!spcopyonly) {
if (!IS_ISP2401)
err = sh_css_pipe_get_shading_info(curr_pipe,
- &pipe_info->shading_info, NULL);
+ &pipe_info->shading_info,
+ NULL);
else
err = sh_css_pipe_get_shading_info(curr_pipe,
- &pipe_info->shading_info, &curr_pipe->config);
+ &pipe_info->shading_info,
+ &curr_pipe->config);
if (err)
goto ERR;
@@ -9604,7 +9445,8 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
goto ERR;
for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
sh_css_pipe_get_viewfinder_frame_info(curr_pipe,
- &pipe_info->vf_output_info[j], j);
+ &pipe_info->vf_output_info[j],
+ j);
if (err)
goto ERR;
}
@@ -9617,22 +9459,19 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* Map SP threads before doing anything. */
err = map_sp_threads(curr_stream, true);
- if (err)
- {
+ if (err) {
IA_CSS_LOG("map_sp_threads: return_err=%d", err);
goto ERR;
}
- for (i = 0; i < num_pipes; i++)
- {
+ for (i = 0; i < num_pipes; i++) {
curr_pipe = pipes[i];
ia_css_pipe_map_queue(curr_pipe, true);
}
/* Create host side pipeline objects without stages */
err = create_host_pipeline_structure(curr_stream);
- if (err)
- {
+ if (err) {
IA_CSS_LOG("create_host_pipeline_structure: return_err=%d", err);
goto ERR;
}
@@ -9670,13 +9509,13 @@ ERR:
}
int
-ia_css_stream_destroy(struct ia_css_stream *stream) {
+ia_css_stream_destroy(struct ia_css_stream *stream)
+{
int i;
int err = 0;
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
- if (!stream)
- {
+ if (!stream) {
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -9685,8 +9524,7 @@ ia_css_stream_destroy(struct ia_css_stream *stream) {
ia_css_stream_isp_parameters_uninit(stream);
if ((stream->last_pipe) &&
- ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num))
- {
+ ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num)) {
#if defined(ISP2401)
bool free_mpi;
@@ -9748,8 +9586,7 @@ ia_css_stream_destroy(struct ia_css_stream *stream) {
}
/* remove references from pipes to stream */
- for (i = 0; i < stream->num_pipes; i++)
- {
+ for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *entry = stream->pipes[i];
assert(entry);
@@ -9778,8 +9615,7 @@ ia_css_stream_destroy(struct ia_css_stream *stream) {
/* working mode: take out of the seed list */
if (my_css_save.mode == sh_css_mode_working) {
for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
- if (my_css_save.stream_seeds[i].stream == stream)
- {
+ if (my_css_save.stream_seeds[i].stream == stream) {
IA_CSS_LOG("took out stream %d", i);
my_css_save.stream_seeds[i].stream = NULL;
break;
@@ -9795,7 +9631,8 @@ ia_css_stream_destroy(struct ia_css_stream *stream) {
int
ia_css_stream_get_info(const struct ia_css_stream *stream,
- struct ia_css_stream_info *stream_info) {
+ struct ia_css_stream_info *stream_info)
+{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_info: enter/exit\n");
assert(stream);
assert(stream_info);
@@ -9811,59 +9648,58 @@ ia_css_stream_get_info(const struct ia_css_stream *stream,
* The stream handle is used to identify the correct entry in the css_save struct
*/
int
-ia_css_stream_load(struct ia_css_stream *stream) {
- if (!IS_ISP2401) {
- int i;
- int err;
-
- assert(stream);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter,\n");
- for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
- {
- if (my_css_save.stream_seeds[i].stream == stream) {
- int j;
-
- for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++) {
- if ((err = ia_css_pipe_create(&my_css_save.stream_seeds[i].pipe_config[j],
- &my_css_save.stream_seeds[i].pipes[j])) != 0) {
- if (j) {
- int k;
+ia_css_stream_load(struct ia_css_stream *stream)
+{
+ int i, j, err;
- for (k = 0; k < j; k++)
- ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[k]);
- }
- return err;
- }
- }
- err = ia_css_stream_create(&my_css_save.stream_seeds[i].stream_config,
- my_css_save.stream_seeds[i].num_pipes,
- my_css_save.stream_seeds[i].pipes,
- &my_css_save.stream_seeds[i].stream);
- if (err) {
- ia_css_stream_destroy(stream);
- for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
- ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
- return err;
- }
- break;
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() exit,\n");
- return 0;
- } else {
+ if (IS_ISP2401) {
/* TODO remove function - DEPRECATED */
(void)stream;
return -ENOTSUPP;
}
+
+ assert(stream);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter,\n");
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
+ if (my_css_save.stream_seeds[i].stream != stream)
+ continue;
+
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++) {
+ int k;
+
+ err = ia_css_pipe_create(&my_css_save.stream_seeds[i].pipe_config[j],
+ &my_css_save.stream_seeds[i].pipes[j]);
+ if (!err)
+ continue;
+
+ for (k = 0; k < j; k++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[k]);
+ return err;
+ }
+ err = ia_css_stream_create(&my_css_save.stream_seeds[i].stream_config,
+ my_css_save.stream_seeds[i].num_pipes,
+ my_css_save.stream_seeds[i].pipes,
+ &my_css_save.stream_seeds[i].stream);
+ if (!err)
+ break;
+
+ ia_css_stream_destroy(stream);
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
+ return err;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() exit,\n");
+ return 0;
}
int
-ia_css_stream_start(struct ia_css_stream *stream) {
+ia_css_stream_start(struct ia_css_stream *stream)
+{
int err = 0;
IA_CSS_ENTER("stream = %p", stream);
- if ((!stream) || (!stream->last_pipe))
- {
+ if ((!stream) || (!stream->last_pipe)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
@@ -9873,8 +9709,7 @@ ia_css_stream_start(struct ia_css_stream *stream) {
/* Create host side pipeline. */
err = create_host_pipeline(stream);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
@@ -9887,8 +9722,7 @@ ia_css_stream_start(struct ia_css_stream *stream) {
#if !defined(ISP2401)
/* Initialize mipi size checks */
- if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
- {
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
unsigned int idx;
unsigned int port = (unsigned int)(stream->config.source.port.port);
@@ -9899,8 +9733,7 @@ ia_css_stream_start(struct ia_css_stream *stream) {
}
#endif
- if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY)
- {
+ if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
err = sh_css_config_input_network(stream);
if (err)
return err;
@@ -9912,7 +9745,8 @@ ia_css_stream_start(struct ia_css_stream *stream) {
}
int
-ia_css_stream_stop(struct ia_css_stream *stream) {
+ia_css_stream_stop(struct ia_css_stream *stream)
+{
int err = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop() enter/exit\n");
@@ -9923,22 +9757,19 @@ ia_css_stream_stop(struct ia_css_stream *stream) {
#if !defined(ISP2401)
/* De-initialize mipi size checks */
- if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
- {
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
unsigned int idx;
unsigned int port = (unsigned int)(stream->config.source.port.port);
- for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) {
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++)
sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = 0;
- }
}
#endif
- if (!IS_ISP2401) {
+ if (!IS_ISP2401)
err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
- } else {
+ else
err = sh_css_pipes_stop(stream);
- }
if (err)
return err;
@@ -9951,16 +9782,16 @@ ia_css_stream_stop(struct ia_css_stream *stream) {
}
bool
-ia_css_stream_has_stopped(struct ia_css_stream *stream) {
+ia_css_stream_has_stopped(struct ia_css_stream *stream)
+{
bool stopped;
assert(stream);
- if (!IS_ISP2401) {
+ if (!IS_ISP2401)
stopped = ia_css_pipeline_has_stopped(&stream->last_pipe->pipeline);
- } else {
+ else
stopped = sh_css_pipes_have_stopped(stream);
- }
return stopped;
}
@@ -9971,7 +9802,8 @@ ia_css_stream_has_stopped(struct ia_css_stream *stream) {
* The stream handle is used to identify the correct entry in the css_save struct
*/
int
-ia_css_stream_unload(struct ia_css_stream *stream) {
+ia_css_stream_unload(struct ia_css_stream *stream)
+{
int i;
assert(stream);
@@ -9979,8 +9811,7 @@ ia_css_stream_unload(struct ia_css_stream *stream) {
/* some checks */
assert(stream);
for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
- if (my_css_save.stream_seeds[i].stream == stream)
- {
+ if (my_css_save.stream_seeds[i].stream == stream) {
int j;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
@@ -10000,7 +9831,8 @@ ia_css_stream_unload(struct ia_css_stream *stream) {
int
ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe,
- enum ia_css_pipe_id *pipe_id) {
+ enum ia_css_pipe_id *pipe_id)
+{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_temp_pipe_to_pipe_id() enter/exit\n");
if (pipe)
*pipe_id = pipe->mode;
@@ -10011,18 +9843,21 @@ ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe,
}
enum atomisp_input_format
-ia_css_stream_get_format(const struct ia_css_stream *stream) {
+ia_css_stream_get_format(const struct ia_css_stream *stream)
+{
return stream->config.input_config.format;
}
bool
-ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream) {
+ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream)
+{
return (stream->config.pixels_per_clock == 2);
}
struct ia_css_binary *
ia_css_stream_get_shading_correction_binary(const struct ia_css_stream
- *stream) {
+ *stream)
+{
struct ia_css_pipe *pipe;
assert(stream);
@@ -10040,7 +9875,8 @@ ia_css_stream_get_shading_correction_binary(const struct ia_css_stream
}
struct ia_css_binary *
-ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream) {
+ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream)
+{
int i;
struct ia_css_pipe *video_pipe = NULL;
@@ -10059,7 +9895,8 @@ ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream) {
}
struct ia_css_binary *
-ia_css_stream_get_3a_binary(const struct ia_css_stream *stream) {
+ia_css_stream_get_3a_binary(const struct ia_css_stream *stream)
+{
struct ia_css_pipe *pipe;
struct ia_css_binary *s3a_binary = NULL;
@@ -10081,7 +9918,8 @@ ia_css_stream_get_3a_binary(const struct ia_css_stream *stream) {
int
ia_css_stream_set_output_padded_width(struct ia_css_stream *stream,
- unsigned int output_padded_width) {
+ unsigned int output_padded_width)
+{
struct ia_css_pipe *pipe;
assert(stream);
@@ -10098,7 +9936,8 @@ ia_css_stream_set_output_padded_width(struct ia_css_stream *stream,
}
static struct ia_css_binary *
-ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe) {
+ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe)
+{
struct ia_css_binary *binary = NULL;
assert(pipe);
@@ -10143,7 +9982,8 @@ ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe) {
}
static struct ia_css_binary *
-ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe) {
+ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe)
+{
struct ia_css_binary *binary = NULL;
assert(pipe);
@@ -10166,9 +10006,9 @@ ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe) {
}
}
} else if (pipe->config.default_capture_config.mode ==
- IA_CSS_CAPTURE_MODE_BAYER)
+ IA_CSS_CAPTURE_MODE_BAYER) {
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
- else if (pipe->config.default_capture_config.mode ==
+ } else if (pipe->config.default_capture_config.mode ==
IA_CSS_CAPTURE_MODE_ADVANCED ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
@@ -10190,7 +10030,8 @@ ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe) {
}
static struct ia_css_binary *
-ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe) {
+ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe)
+{
struct ia_css_binary *binary = NULL;
assert(pipe);
@@ -10210,14 +10051,16 @@ ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe) {
}
struct ia_css_pipeline *
-ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe) {
+ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe)
+{
assert(pipe);
return (struct ia_css_pipeline *)&pipe->pipeline;
}
unsigned int
-ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe) {
+ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe)
+{
assert(pipe);
/* KW was not sure this function was not returning a value
@@ -10234,7 +10077,8 @@ ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe) {
}
unsigned int
-ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe) {
+ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe)
+{
assert(pipe);
return (unsigned int)pipe->config.isp_pipe_version;
@@ -10243,7 +10087,8 @@ ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe) {
#define SP_START_TIMEOUT_US 30000000
int
-ia_css_start_sp(void) {
+ia_css_start_sp(void)
+{
unsigned long timeout;
int err = 0;
@@ -10252,13 +10097,11 @@ ia_css_start_sp(void) {
/* waiting for the SP is completely started */
timeout = SP_START_TIMEOUT_US;
- while ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_INITIALIZED) && timeout)
- {
+ while ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_INITIALIZED) && timeout) {
timeout--;
udelay(1);
}
- if (timeout == 0)
- {
+ if (timeout == 0) {
IA_CSS_ERROR("timeout during SP initialization");
return -EINVAL;
}
@@ -10286,14 +10129,14 @@ ia_css_start_sp(void) {
#define SP_SHUTDOWN_TIMEOUT_US 200000
int
-ia_css_stop_sp(void) {
+ia_css_stop_sp(void)
+{
unsigned long timeout;
int err = 0;
IA_CSS_ENTER("void");
- if (!sh_css_sp_is_running())
- {
+ if (!sh_css_sp_is_running()) {
err = -EINVAL;
IA_CSS_LEAVE("SP already stopped : return_err=%d", err);
@@ -10305,8 +10148,7 @@ ia_css_stop_sp(void) {
if (!IS_ISP2401) {
sh_css_write_host2sp_command(host2sp_cmd_terminate);
} else {
- if (!sh_css_write_host2sp_command(host2sp_cmd_terminate))
- {
+ if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) {
IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
ia_css_debug_dump_sp_sw_debug_info();
ia_css_debug_dump_debug_info(NULL);
@@ -10316,27 +10158,23 @@ ia_css_stop_sp(void) {
sh_css_sp_set_sp_running(false);
timeout = SP_SHUTDOWN_TIMEOUT_US;
- while (!ia_css_spctrl_is_idle(SP0_ID) && timeout)
- {
+ while (!ia_css_spctrl_is_idle(SP0_ID) && timeout) {
timeout--;
udelay(1);
}
if ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_TERMINATED))
IA_CSS_WARNING("SP has not terminated (SW)");
- if (timeout == 0)
- {
+ if (timeout == 0) {
IA_CSS_WARNING("SP is not idle");
ia_css_debug_dump_sp_sw_debug_info();
}
timeout = SP_SHUTDOWN_TIMEOUT_US;
- while (!isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT) && timeout)
- {
+ while (!isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT) && timeout) {
timeout--;
udelay(1);
}
- if (timeout == 0)
- {
+ if (timeout == 0) {
IA_CSS_WARNING("ISP is not idle");
ia_css_debug_dump_sp_sw_debug_info();
}
@@ -10351,7 +10189,8 @@ ia_css_stop_sp(void) {
}
int
-ia_css_update_continuous_frames(struct ia_css_stream *stream) {
+ia_css_update_continuous_frames(struct ia_css_stream *stream)
+{
struct ia_css_pipe *pipe;
unsigned int i;
@@ -10359,8 +10198,7 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream) {
IA_CSS_DEBUG_TRACE,
"sh_css_update_continuous_frames() enter:\n");
- if (!stream)
- {
+ if (!stream) {
ia_css_debug_dtrace(
IA_CSS_DEBUG_TRACE,
"sh_css_update_continuous_frames() leave: invalid stream, return_void\n");
@@ -10371,10 +10209,9 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream) {
for (i = stream->config.init_num_cont_raw_buf;
i < stream->config.target_num_cont_raw_buf; i++)
- {
sh_css_update_host2sp_offline_frame(i,
pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
- }
+
sh_css_update_host2sp_cont_num_raw_frames
(stream->config.target_num_cont_raw_buf, true);
ia_css_debug_dtrace(
@@ -10500,7 +10337,8 @@ void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
#if CONFIG_ON_FRAME_ENQUEUE()
static int set_config_on_frame_enqueue(struct ia_css_frame_info
- *info, struct frame_data_wrapper *frame) {
+ *info, struct frame_data_wrapper *frame)
+{
frame->config_on_frame_enqueue.padded_width = 0;
/* currently we support configuration on frame enqueue only on YUV formats */
@@ -10508,11 +10346,11 @@ static int set_config_on_frame_enqueue(struct ia_css_frame_info
switch (info->format) {
case IA_CSS_FRAME_FORMAT_YUV420:
case IA_CSS_FRAME_FORMAT_NV12:
- if (info->padded_width > info->res.width) {
+ if (info->padded_width > info->res.width)
frame->config_on_frame_enqueue.padded_width = info->padded_width;
- } else if ((info->padded_width < info->res.width) && (info->padded_width > 0)) {
+ else if ((info->padded_width < info->res.width) && (info->padded_width > 0))
return -EINVAL;
- }
+
/* nothing to do if width == padded width or padded width is zeroed (the same) */
break;
default:
@@ -10524,22 +10362,21 @@ static int set_config_on_frame_enqueue(struct ia_css_frame_info
#endif
int
-ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id) {
+ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
+{
int ret;
IA_CSS_ENTER("");
/* Only continuous streams have a tagger to which we can send the
* unlock message. */
- if (!stream || !stream->config.continuous)
- {
+ if (!stream || !stream->config.continuous) {
IA_CSS_ERROR("invalid stream pointer");
return -EINVAL;
}
if (exp_id > IA_CSS_ISYS_MAX_EXPOSURE_ID ||
- exp_id < IA_CSS_ISYS_MIN_EXPOSURE_ID)
- {
+ exp_id < IA_CSS_ISYS_MIN_EXPOSURE_ID) {
IA_CSS_ERROR("invalid exposure ID: %d\n", exp_id);
return -EINVAL;
}
@@ -10558,7 +10395,8 @@ ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id) {
*/
int
ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
- bool enable) {
+ bool enable)
+{
unsigned int thread_id;
struct ia_css_pipeline_stage *stage;
int err = 0;
@@ -10566,20 +10404,16 @@ ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
IA_CSS_ENTER("");
/* Parameter Check */
- if (!pipe || !pipe->stream)
- {
+ if (!pipe || !pipe->stream) {
IA_CSS_ERROR("Invalid Pipe.");
err = -EINVAL;
- } else if (!(pipe->config.acc_extension))
- {
+ } else if (!(pipe->config.acc_extension)) {
IA_CSS_ERROR("Invalid Pipe(No Extension Firmware)");
err = -EINVAL;
- } else if (!sh_css_sp_is_running())
- {
+ } else if (!sh_css_sp_is_running()) {
IA_CSS_ERROR("Leaving: queue unavailable.");
err = -EBUSY;
- } else
- {
+ } else {
/* Query the threadid and stage_num for the Extension firmware*/
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage);
@@ -10607,7 +10441,8 @@ ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
*/
int
ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
- bool *enable) {
+ bool *enable)
+{
struct ia_css_pipeline_stage *stage;
unsigned int thread_id;
int err = 0;
@@ -10615,20 +10450,16 @@ ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
IA_CSS_ENTER("");
/* Parameter Check */
- if (!pipe || !pipe->stream)
- {
+ if (!pipe || !pipe->stream) {
IA_CSS_ERROR("Invalid Pipe.");
err = -EINVAL;
- } else if (!(pipe->config.acc_extension))
- {
+ } else if (!(pipe->config.acc_extension)) {
IA_CSS_ERROR("Invalid Pipe (No Extension Firmware).");
err = -EINVAL;
- } else if (!sh_css_sp_is_running())
- {
+ } else if (!sh_css_sp_is_running()) {
IA_CSS_ERROR("Leaving: queue unavailable.");
err = -EBUSY;
- } else
- {
+ } else {
/* Query the threadid and stage_num corresponding to the Extension firmware*/
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage);
@@ -10646,9 +10477,10 @@ ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
/* ISP2401 */
int
ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
- u32 fw_handle,
- struct ia_css_isp_param_css_segments *css_seg,
- struct ia_css_isp_param_isp_segments *isp_seg) {
+ u32 fw_handle,
+ struct ia_css_isp_param_css_segments *css_seg,
+ struct ia_css_isp_param_isp_segments *isp_seg)
+{
unsigned int HIVE_ADDR_sp_group;
static struct sh_css_sp_group sp_group;
static struct sh_css_sp_stage sp_stage;
@@ -10666,27 +10498,23 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
fw = &sh_css_sp_fw;
/* Parameter Check */
- if (!pipe || !pipe->stream)
- {
+ if (!pipe || !pipe->stream) {
IA_CSS_ERROR("Invalid Pipe.");
err = -EINVAL;
- } else if (!(pipe->config.acc_extension))
- {
+ } else if (!(pipe->config.acc_extension)) {
IA_CSS_ERROR("Invalid Pipe (No Extension Firmware).");
err = -EINVAL;
- } else if (!sh_css_sp_is_running())
- {
+ } else if (!sh_css_sp_is_running()) {
IA_CSS_ERROR("Leaving: queue unavailable.");
err = -EBUSY;
- } else
- {
+ } else {
/* Query the thread_id and stage_num corresponding to the Extension firmware */
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage);
if (!err) {
/* Get the Extension State */
enabled = (SH_CSS_QOS_STAGE_IS_ENABLED(&sh_css_sp_group.pipe[thread_id],
- stage->stage_num)) ? true : false;
+ stage->stage_num)) ? true : false;
/* Update mapped arg only when extension stage is not enabled */
if (enabled) {
IA_CSS_ERROR("Leaving: cannot update when stage is enabled.");
@@ -10696,13 +10524,14 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
HIVE_ADDR_sp_group = fw->info.sp.group;
sp_dmem_load(SP0_ID,
- (unsigned int)sp_address_of(sp_group),
- &sp_group, sizeof(struct sh_css_sp_group));
+ (unsigned int)sp_address_of(sp_group),
+ &sp_group,
+ sizeof(struct sh_css_sp_group));
hmm_load(sp_group.pipe[thread_id].sp_stage_addr[stage_num],
- &sp_stage, sizeof(struct sh_css_sp_stage));
+ &sp_stage, sizeof(struct sh_css_sp_stage));
hmm_load(sp_stage.isp_stage_addr,
- &isp_stage, sizeof(struct sh_css_isp_stage));
+ &isp_stage, sizeof(struct sh_css_isp_stage));
for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++) {
isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address =
@@ -10718,7 +10547,8 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
}
hmm_store(sp_stage.isp_stage_addr,
- &isp_stage, sizeof(struct sh_css_isp_stage));
+ &isp_stage,
+ sizeof(struct sh_css_isp_stage));
}
}
}
@@ -10729,8 +10559,9 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
#ifdef ISP2401
static int
aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
- struct ia_css_pipe *pipes[],
- bool *do_crop_status) {
+ struct ia_css_pipe *pipes[],
+ bool *do_crop_status)
+{
int err = 0;
int i;
struct ia_css_pipe *curr_pipe;
@@ -10739,15 +10570,13 @@ aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
if ((!curr_stream) ||
(curr_stream->num_pipes == 0) ||
(!pipes) ||
- (!do_crop_status))
- {
+ (!do_crop_status)) {
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
}
- for (i = 0; i < curr_stream->num_pipes; i++)
- {
+ for (i = 0; i < curr_stream->num_pipes; i++) {
curr_pipe = pipes[i];
pipe_mask |= (1 << curr_pipe->config.mode);
}
@@ -10761,7 +10590,8 @@ aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
}
static bool
-aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe) {
+aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe)
+{
bool status = false;
if ((curr_pipe) && enabled) {
@@ -10776,7 +10606,8 @@ aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe) {
static int
aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
- struct ia_css_resolution *effective_res) {
+ struct ia_css_resolution *effective_res)
+{
int err = 0;
struct ia_css_resolution crop_res;
struct ia_css_resolution *in_res = NULL;
@@ -10786,8 +10617,7 @@ aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
bool use_capt_pp_in_res = false;
if ((!curr_pipe) ||
- (!effective_res))
- {
+ (!effective_res)) {
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
@@ -10795,8 +10625,7 @@ aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
if ((curr_pipe->config.mode != IA_CSS_PIPE_MODE_PREVIEW) &&
(curr_pipe->config.mode != IA_CSS_PIPE_MODE_VIDEO) &&
- (curr_pipe->config.mode != IA_CSS_PIPE_MODE_CAPTURE))
- {
+ (curr_pipe->config.mode != IA_CSS_PIPE_MODE_CAPTURE)) {
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
@@ -10817,8 +10646,7 @@ aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
in_res = &curr_pipe->stream->config.input_config.effective_res;
out_res = &curr_pipe->output_info[0].res;
- switch (curr_pipe->config.mode)
- {
+ switch (curr_pipe->config.mode) {
case IA_CSS_PIPE_MODE_PREVIEW:
if (use_bds_output_info)
out_res = &curr_pipe->bds_output_info.res;
@@ -10838,27 +10666,26 @@ aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
case IA_CSS_PIPE_MODE_YUVPP:
default:
IA_CSS_ERROR("aspect ratio cropping invalid args: mode[%d]\n",
- curr_pipe->config.mode);
+ curr_pipe->config.mode);
assert(0);
break;
}
err = ia_css_frame_find_crop_resolution(in_res, out_res, &crop_res);
if (!err)
- {
*effective_res = crop_res;
- } else
- {
+ else
/* in case of error fallback to default
* effective resolution from driver. */
IA_CSS_LOG("ia_css_frame_find_crop_resolution() failed with err(%d)", err);
- }
+
return err;
}
#endif
static void
-sh_css_hmm_buffer_record_init(void) {
+sh_css_hmm_buffer_record_init(void)
+{
int i;
for (i = 0; i < MAX_HMM_BUFFER_NUM; i++)
@@ -10866,7 +10693,8 @@ sh_css_hmm_buffer_record_init(void) {
}
static void
-sh_css_hmm_buffer_record_uninit(void) {
+sh_css_hmm_buffer_record_uninit(void)
+{
int i;
struct sh_css_hmm_buffer_record *buffer_record = NULL;
@@ -10882,7 +10710,8 @@ sh_css_hmm_buffer_record_uninit(void) {
}
static void
-sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record) {
+sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record)
+{
assert(buffer_record);
buffer_record->in_use = false;
buffer_record->type = IA_CSS_BUFFER_TYPE_INVALID;
@@ -10893,14 +10722,15 @@ sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record) {
static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
enum ia_css_buffer_type type,
- hrt_address kernel_ptr) {
+ hrt_address kernel_ptr)
+{
int i;
struct sh_css_hmm_buffer_record *buffer_record = NULL;
struct sh_css_hmm_buffer_record *out_buffer_record = NULL;
assert(h_vbuf);
assert((type > IA_CSS_BUFFER_TYPE_INVALID) &&
- (type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE));
+ (type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE));
assert(kernel_ptr != 0);
buffer_record = &hmm_buffer_record[0];
@@ -10921,7 +10751,8 @@ static struct sh_css_hmm_buffer_record
static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_validate(ia_css_ptr ddr_buffer_addr,
- enum ia_css_buffer_type type) {
+ enum ia_css_buffer_type type)
+{
int i;
struct sh_css_hmm_buffer_record *buffer_record = NULL;
bool found_record = false;
diff --git a/drivers/staging/media/av7110/Kconfig b/drivers/staging/media/av7110/Kconfig
new file mode 100644
index 000000000000..9faf9d2d4001
--- /dev/null
+++ b/drivers/staging/media/av7110/Kconfig
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DVB_AV7110_IR
+ bool
+ depends on RC_CORE=y || RC_CORE = DVB_AV7110
+ default DVB_AV7110
+
+config DVB_AV7110
+ tristate "AV7110 cards"
+ depends on DVB_CORE && PCI && I2C
+ select TTPCI_EEPROM
+ select VIDEO_SAA7146_VV
+ depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV
+ select DVB_VES1820 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_VES1X93 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA8083 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SP8870 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0297 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_L64781 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for SAA7146 and AV7110 based DVB cards as produced
+ by Fujitsu-Siemens, Technotrend, Hauppauge and others.
+
+ This driver only supports the fullfeatured cards with
+ onboard MPEG2 decoder.
+
+ This driver needs an external firmware. Please use the script
+ "<kerneldir>/scripts/get_dvb_firmware av7110" to
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
+ Alternatively, you can download the file and use the kernel's
+ EXTRA_FIRMWARE configuration option to build it into your
+ kernel image by adding the filename to the EXTRA_FIRMWARE
+ configuration option string.
+
+ Say Y if you own such a card and want to use it.
+
+config DVB_AV7110_OSD
+ bool "AV7110 OSD support"
+ depends on DVB_AV7110
+ default y if DVB_AV7110=y || DVB_AV7110=m
+ help
+ The AV7110 firmware provides some code to generate an OnScreenDisplay
+ on the video output. This is kind of nonstandard and not guaranteed to
+ be maintained.
+
+ Anyway, some popular DVB software like VDR uses this OSD to render
+ its menus, so say Y if you want to use this software.
+
+ All other people say N.
+
+config DVB_BUDGET_PATCH
+ tristate "AV7110 cards with Budget Patch"
+ depends on DVB_BUDGET_CORE && I2C
+ depends on DVB_AV7110
+ select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_VES1X93 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA8083 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for Budget Patch (full TS) modification on
+ SAA7146+AV7110 based cards (DVB-S cards). This
+ driver doesn't use onboard MPEG2 decoder. The
+ card is driven in Budget-only mode. Card is
+ required to have loaded firmware to tune properly.
+ Firmware can be loaded by insertion and removal of
+ standard AV7110 driver prior to loading this
+ driver.
+
+ Say Y if you own such a card and want to use it.
+
+ To compile this driver as a module, choose M here: the
+ module will be called budget-patch.
+
+if DVB_AV7110
+
+# Frontend driver that it is used only by AV7110 driver
+# While technically independent, it doesn't make sense to keep
+# it if we drop support for AV7110, as no other driver will use it.
+
+config DVB_SP8870
+ tristate "Spase sp8870 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
+
+ This driver needs external firmware. Please use the command
+ "<kerneldir>/scripts/get_dvb_firmware sp8870" to
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
+endif
diff --git a/drivers/staging/media/av7110/Makefile b/drivers/staging/media/av7110/Makefile
new file mode 100644
index 000000000000..307b267598ea
--- /dev/null
+++ b/drivers/staging/media/av7110/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the AV7110 DVB device driver
+#
+
+dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o \
+ av7110_ipack.o dvb_filter.o
+
+ifdef CONFIG_DVB_AV7110_IR
+dvb-ttpci-objs += av7110_ir.o
+endif
+
+obj-$(CONFIG_DVB_BUDGET_PATCH) += budget-patch.o
+
+obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o
+
+obj-$(CONFIG_DVB_SP8870) += sp8870.o
+
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/pci/ttpci
+ccflags-y += -I $(srctree)/drivers/media/common
diff --git a/drivers/staging/media/av7110/TODO b/drivers/staging/media/av7110/TODO
new file mode 100644
index 000000000000..60062d8441b3
--- /dev/null
+++ b/drivers/staging/media/av7110/TODO
@@ -0,0 +1,3 @@
+- This driver is too old and relies on a different API.
+ Drop it from Kernel on a couple of versions.
+- Cleanup patches for the drivers here won't be accepted.
diff --git a/Documentation/userspace-api/media/dvb/audio-bilingual-channel-select.rst b/drivers/staging/media/av7110/audio-bilingual-channel-select.rst
index 33b5363317f1..33b5363317f1 100644
--- a/Documentation/userspace-api/media/dvb/audio-bilingual-channel-select.rst
+++ b/drivers/staging/media/av7110/audio-bilingual-channel-select.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-channel-select.rst b/drivers/staging/media/av7110/audio-channel-select.rst
index 74093df92a68..74093df92a68 100644
--- a/Documentation/userspace-api/media/dvb/audio-channel-select.rst
+++ b/drivers/staging/media/av7110/audio-channel-select.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-clear-buffer.rst b/drivers/staging/media/av7110/audio-clear-buffer.rst
index a0ebb0278260..a0ebb0278260 100644
--- a/Documentation/userspace-api/media/dvb/audio-clear-buffer.rst
+++ b/drivers/staging/media/av7110/audio-clear-buffer.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-continue.rst b/drivers/staging/media/av7110/audio-continue.rst
index a2e9850f37f2..a2e9850f37f2 100644
--- a/Documentation/userspace-api/media/dvb/audio-continue.rst
+++ b/drivers/staging/media/av7110/audio-continue.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-fclose.rst b/drivers/staging/media/av7110/audio-fclose.rst
index 77857d578e83..77857d578e83 100644
--- a/Documentation/userspace-api/media/dvb/audio-fclose.rst
+++ b/drivers/staging/media/av7110/audio-fclose.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-fopen.rst b/drivers/staging/media/av7110/audio-fopen.rst
index 774daaab3bad..774daaab3bad 100644
--- a/Documentation/userspace-api/media/dvb/audio-fopen.rst
+++ b/drivers/staging/media/av7110/audio-fopen.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-fwrite.rst b/drivers/staging/media/av7110/audio-fwrite.rst
index 7b096ac2b6c4..7b096ac2b6c4 100644
--- a/Documentation/userspace-api/media/dvb/audio-fwrite.rst
+++ b/drivers/staging/media/av7110/audio-fwrite.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-get-capabilities.rst b/drivers/staging/media/av7110/audio-get-capabilities.rst
index 6d9eb71dad17..6d9eb71dad17 100644
--- a/Documentation/userspace-api/media/dvb/audio-get-capabilities.rst
+++ b/drivers/staging/media/av7110/audio-get-capabilities.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-get-status.rst b/drivers/staging/media/av7110/audio-get-status.rst
index 7ae8db2e65e9..7ae8db2e65e9 100644
--- a/Documentation/userspace-api/media/dvb/audio-get-status.rst
+++ b/drivers/staging/media/av7110/audio-get-status.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-pause.rst b/drivers/staging/media/av7110/audio-pause.rst
index d37d1ddce4df..d37d1ddce4df 100644
--- a/Documentation/userspace-api/media/dvb/audio-pause.rst
+++ b/drivers/staging/media/av7110/audio-pause.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-play.rst b/drivers/staging/media/av7110/audio-play.rst
index e591930b6ca7..e591930b6ca7 100644
--- a/Documentation/userspace-api/media/dvb/audio-play.rst
+++ b/drivers/staging/media/av7110/audio-play.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-select-source.rst b/drivers/staging/media/av7110/audio-select-source.rst
index 6a0c0f365eb1..6a0c0f365eb1 100644
--- a/Documentation/userspace-api/media/dvb/audio-select-source.rst
+++ b/drivers/staging/media/av7110/audio-select-source.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-set-av-sync.rst b/drivers/staging/media/av7110/audio-set-av-sync.rst
index 85a8016bf025..85a8016bf025 100644
--- a/Documentation/userspace-api/media/dvb/audio-set-av-sync.rst
+++ b/drivers/staging/media/av7110/audio-set-av-sync.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-set-bypass-mode.rst b/drivers/staging/media/av7110/audio-set-bypass-mode.rst
index ecac02f1b2fc..80d551a2053a 100644
--- a/Documentation/userspace-api/media/dvb/audio-set-bypass-mode.rst
+++ b/drivers/staging/media/av7110/audio-set-bypass-mode.rst
@@ -50,7 +50,7 @@ Description
This ioctl call asks the Audio Device to bypass the Audio decoder and
forward the stream without decoding. This mode shall be used if streams
-that can’t be handled by the Digital TV system shall be decoded. Dolby
+that can't be handled by the Digital TV system shall be decoded. Dolby
DigitalTM streams are automatically forwarded by the Digital TV subsystem if
the hardware can handle it.
diff --git a/Documentation/userspace-api/media/dvb/audio-set-id.rst b/drivers/staging/media/av7110/audio-set-id.rst
index 39ad846d412d..39ad846d412d 100644
--- a/Documentation/userspace-api/media/dvb/audio-set-id.rst
+++ b/drivers/staging/media/av7110/audio-set-id.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-set-mixer.rst b/drivers/staging/media/av7110/audio-set-mixer.rst
index 45dbdf4801e0..45dbdf4801e0 100644
--- a/Documentation/userspace-api/media/dvb/audio-set-mixer.rst
+++ b/drivers/staging/media/av7110/audio-set-mixer.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-set-mute.rst b/drivers/staging/media/av7110/audio-set-mute.rst
index 987751f92967..987751f92967 100644
--- a/Documentation/userspace-api/media/dvb/audio-set-mute.rst
+++ b/drivers/staging/media/av7110/audio-set-mute.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-set-streamtype.rst b/drivers/staging/media/av7110/audio-set-streamtype.rst
index 77d73c74882f..77d73c74882f 100644
--- a/Documentation/userspace-api/media/dvb/audio-set-streamtype.rst
+++ b/drivers/staging/media/av7110/audio-set-streamtype.rst
diff --git a/Documentation/userspace-api/media/dvb/audio-stop.rst b/drivers/staging/media/av7110/audio-stop.rst
index d77f786fd797..d77f786fd797 100644
--- a/Documentation/userspace-api/media/dvb/audio-stop.rst
+++ b/drivers/staging/media/av7110/audio-stop.rst
diff --git a/include/uapi/linux/dvb/audio.h b/drivers/staging/media/av7110/audio.h
index 2f869da69171..2f869da69171 100644
--- a/include/uapi/linux/dvb/audio.h
+++ b/drivers/staging/media/av7110/audio.h
diff --git a/Documentation/userspace-api/media/dvb/audio.rst b/drivers/staging/media/av7110/audio.rst
index eaae5675a47d..aa753336b31f 100644
--- a/Documentation/userspace-api/media/dvb/audio.rst
+++ b/drivers/staging/media/av7110/audio.rst
@@ -11,7 +11,7 @@ TV hardware. It can be accessed through ``/dev/dvb/adapter?/audio?``. Data
types and ioctl definitions can be accessed by including
``linux/dvb/audio.h`` in your application.
-Please note that some Digital TV cards don’t have their own MPEG decoder, which
+Please note that some Digital TV cards don't have their own MPEG decoder, which
results in the omission of the audio and video device.
These ioctls were also used by V4L2 to control MPEG decoders implemented
diff --git a/Documentation/userspace-api/media/dvb/audio_data_types.rst b/drivers/staging/media/av7110/audio_data_types.rst
index 4744529136a8..4744529136a8 100644
--- a/Documentation/userspace-api/media/dvb/audio_data_types.rst
+++ b/drivers/staging/media/av7110/audio_data_types.rst
diff --git a/Documentation/userspace-api/media/dvb/audio_function_calls.rst b/drivers/staging/media/av7110/audio_function_calls.rst
index fa5ba9539caf..fa5ba9539caf 100644
--- a/Documentation/userspace-api/media/dvb/audio_function_calls.rst
+++ b/drivers/staging/media/av7110/audio_function_calls.rst
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/staging/media/av7110/av7110.c
index d74ee0ecfb36..d74ee0ecfb36 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/staging/media/av7110/av7110.c
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/staging/media/av7110/av7110.h
index 809d938ae166..b8e8fc8ddbe9 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/staging/media/av7110/av7110.h
@@ -9,11 +9,12 @@
#include <linux/input.h>
#include <linux/time.h>
-#include <linux/dvb/video.h>
-#include <linux/dvb/audio.h>
+#include "video.h"
+#include "audio.h"
+#include "osd.h"
+
#include <linux/dvb/dmx.h>
#include <linux/dvb/ca.h>
-#include <linux/dvb/osd.h>
#include <linux/dvb/net.h>
#include <linux/mutex.h>
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/staging/media/av7110/av7110_av.c
index 91f4866c7e59..91f4866c7e59 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/staging/media/av7110/av7110_av.c
diff --git a/drivers/media/pci/ttpci/av7110_av.h b/drivers/staging/media/av7110/av7110_av.h
index 71bbd4391f57..71bbd4391f57 100644
--- a/drivers/media/pci/ttpci/av7110_av.h
+++ b/drivers/staging/media/av7110/av7110_av.h
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/staging/media/av7110/av7110_ca.c
index c1338e074a3d..c1338e074a3d 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/staging/media/av7110/av7110_ca.c
diff --git a/drivers/media/pci/ttpci/av7110_ca.h b/drivers/staging/media/av7110/av7110_ca.h
index a6e3f2955730..a6e3f2955730 100644
--- a/drivers/media/pci/ttpci/av7110_ca.h
+++ b/drivers/staging/media/av7110/av7110_ca.h
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/staging/media/av7110/av7110_hw.c
index 93ca31e38ddd..93ca31e38ddd 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/staging/media/av7110/av7110_hw.c
diff --git a/drivers/media/pci/ttpci/av7110_hw.h b/drivers/staging/media/av7110/av7110_hw.h
index 6380d8950c69..6380d8950c69 100644
--- a/drivers/media/pci/ttpci/av7110_hw.h
+++ b/drivers/staging/media/av7110/av7110_hw.h
diff --git a/drivers/media/pci/ttpci/av7110_ipack.c b/drivers/staging/media/av7110/av7110_ipack.c
index 30330ed01ce8..30330ed01ce8 100644
--- a/drivers/media/pci/ttpci/av7110_ipack.c
+++ b/drivers/staging/media/av7110/av7110_ipack.c
diff --git a/drivers/media/pci/ttpci/av7110_ipack.h b/drivers/staging/media/av7110/av7110_ipack.h
index 943ec899bb93..943ec899bb93 100644
--- a/drivers/media/pci/ttpci/av7110_ipack.h
+++ b/drivers/staging/media/av7110/av7110_ipack.h
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/staging/media/av7110/av7110_ir.c
index a851ba328e4a..a851ba328e4a 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/staging/media/av7110/av7110_ir.c
diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/staging/media/av7110/av7110_v4l.c
index c89f536f699c..c89f536f699c 100644
--- a/drivers/media/pci/ttpci/av7110_v4l.c
+++ b/drivers/staging/media/av7110/av7110_v4l.c
diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/staging/media/av7110/budget-patch.c
index d173c8ade6a7..d173c8ade6a7 100644
--- a/drivers/media/pci/ttpci/budget-patch.c
+++ b/drivers/staging/media/av7110/budget-patch.c
diff --git a/drivers/media/pci/ttpci/dvb_filter.c b/drivers/staging/media/av7110/dvb_filter.c
index 8c2eca5dcdc9..8c2eca5dcdc9 100644
--- a/drivers/media/pci/ttpci/dvb_filter.c
+++ b/drivers/staging/media/av7110/dvb_filter.c
diff --git a/drivers/media/pci/ttpci/dvb_filter.h b/drivers/staging/media/av7110/dvb_filter.h
index 67a3c6333bca..67a3c6333bca 100644
--- a/drivers/media/pci/ttpci/dvb_filter.h
+++ b/drivers/staging/media/av7110/dvb_filter.h
diff --git a/include/uapi/linux/dvb/osd.h b/drivers/staging/media/av7110/osd.h
index 858997c74043..858997c74043 100644
--- a/include/uapi/linux/dvb/osd.h
+++ b/drivers/staging/media/av7110/osd.h
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/staging/media/av7110/sp8870.c
index 9767159aeb9b..9767159aeb9b 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/staging/media/av7110/sp8870.c
diff --git a/drivers/media/dvb-frontends/sp8870.h b/drivers/staging/media/av7110/sp8870.h
index 5eacf39f425e..5eacf39f425e 100644
--- a/drivers/media/dvb-frontends/sp8870.h
+++ b/drivers/staging/media/av7110/sp8870.h
diff --git a/Documentation/userspace-api/media/dvb/video-clear-buffer.rst b/drivers/staging/media/av7110/video-clear-buffer.rst
index a7730559bbb2..a7730559bbb2 100644
--- a/Documentation/userspace-api/media/dvb/video-clear-buffer.rst
+++ b/drivers/staging/media/av7110/video-clear-buffer.rst
diff --git a/Documentation/userspace-api/media/dvb/video-command.rst b/drivers/staging/media/av7110/video-command.rst
index cae9445eb3af..cae9445eb3af 100644
--- a/Documentation/userspace-api/media/dvb/video-command.rst
+++ b/drivers/staging/media/av7110/video-command.rst
diff --git a/Documentation/userspace-api/media/dvb/video-continue.rst b/drivers/staging/media/av7110/video-continue.rst
index bc34bf3989e4..bc34bf3989e4 100644
--- a/Documentation/userspace-api/media/dvb/video-continue.rst
+++ b/drivers/staging/media/av7110/video-continue.rst
diff --git a/Documentation/userspace-api/media/dvb/video-fast-forward.rst b/drivers/staging/media/av7110/video-fast-forward.rst
index e71fa8d6965b..e71fa8d6965b 100644
--- a/Documentation/userspace-api/media/dvb/video-fast-forward.rst
+++ b/drivers/staging/media/av7110/video-fast-forward.rst
diff --git a/Documentation/userspace-api/media/dvb/video-fclose.rst b/drivers/staging/media/av7110/video-fclose.rst
index 01d24d548439..01d24d548439 100644
--- a/Documentation/userspace-api/media/dvb/video-fclose.rst
+++ b/drivers/staging/media/av7110/video-fclose.rst
diff --git a/Documentation/userspace-api/media/dvb/video-fopen.rst b/drivers/staging/media/av7110/video-fopen.rst
index 1371b083e4e8..1371b083e4e8 100644
--- a/Documentation/userspace-api/media/dvb/video-fopen.rst
+++ b/drivers/staging/media/av7110/video-fopen.rst
diff --git a/Documentation/userspace-api/media/dvb/video-freeze.rst b/drivers/staging/media/av7110/video-freeze.rst
index 4321f257cb70..4321f257cb70 100644
--- a/Documentation/userspace-api/media/dvb/video-freeze.rst
+++ b/drivers/staging/media/av7110/video-freeze.rst
diff --git a/Documentation/userspace-api/media/dvb/video-fwrite.rst b/drivers/staging/media/av7110/video-fwrite.rst
index a07fd7d7a40e..a07fd7d7a40e 100644
--- a/Documentation/userspace-api/media/dvb/video-fwrite.rst
+++ b/drivers/staging/media/av7110/video-fwrite.rst
diff --git a/Documentation/userspace-api/media/dvb/video-get-capabilities.rst b/drivers/staging/media/av7110/video-get-capabilities.rst
index 01e09f56656c..01e09f56656c 100644
--- a/Documentation/userspace-api/media/dvb/video-get-capabilities.rst
+++ b/drivers/staging/media/av7110/video-get-capabilities.rst
diff --git a/Documentation/userspace-api/media/dvb/video-get-event.rst b/drivers/staging/media/av7110/video-get-event.rst
index 90382bc36cfe..90382bc36cfe 100644
--- a/Documentation/userspace-api/media/dvb/video-get-event.rst
+++ b/drivers/staging/media/av7110/video-get-event.rst
diff --git a/Documentation/userspace-api/media/dvb/video-get-frame-count.rst b/drivers/staging/media/av7110/video-get-frame-count.rst
index b48ac8c58a41..b48ac8c58a41 100644
--- a/Documentation/userspace-api/media/dvb/video-get-frame-count.rst
+++ b/drivers/staging/media/av7110/video-get-frame-count.rst
diff --git a/Documentation/userspace-api/media/dvb/video-get-pts.rst b/drivers/staging/media/av7110/video-get-pts.rst
index fedaff41be0b..fedaff41be0b 100644
--- a/Documentation/userspace-api/media/dvb/video-get-pts.rst
+++ b/drivers/staging/media/av7110/video-get-pts.rst
diff --git a/Documentation/userspace-api/media/dvb/video-get-size.rst b/drivers/staging/media/av7110/video-get-size.rst
index de34331c5bd1..de34331c5bd1 100644
--- a/Documentation/userspace-api/media/dvb/video-get-size.rst
+++ b/drivers/staging/media/av7110/video-get-size.rst
diff --git a/Documentation/userspace-api/media/dvb/video-get-status.rst b/drivers/staging/media/av7110/video-get-status.rst
index 9b86fbf411d4..9b86fbf411d4 100644
--- a/Documentation/userspace-api/media/dvb/video-get-status.rst
+++ b/drivers/staging/media/av7110/video-get-status.rst
diff --git a/Documentation/userspace-api/media/dvb/video-play.rst b/drivers/staging/media/av7110/video-play.rst
index 35ac8b98fdbf..35ac8b98fdbf 100644
--- a/Documentation/userspace-api/media/dvb/video-play.rst
+++ b/drivers/staging/media/av7110/video-play.rst
diff --git a/Documentation/userspace-api/media/dvb/video-select-source.rst b/drivers/staging/media/av7110/video-select-source.rst
index 929a20985d53..929a20985d53 100644
--- a/Documentation/userspace-api/media/dvb/video-select-source.rst
+++ b/drivers/staging/media/av7110/video-select-source.rst
diff --git a/Documentation/userspace-api/media/dvb/video-set-blank.rst b/drivers/staging/media/av7110/video-set-blank.rst
index 70249a6ba125..70249a6ba125 100644
--- a/Documentation/userspace-api/media/dvb/video-set-blank.rst
+++ b/drivers/staging/media/av7110/video-set-blank.rst
diff --git a/Documentation/userspace-api/media/dvb/video-set-display-format.rst b/drivers/staging/media/av7110/video-set-display-format.rst
index 1de4f40ae732..1de4f40ae732 100644
--- a/Documentation/userspace-api/media/dvb/video-set-display-format.rst
+++ b/drivers/staging/media/av7110/video-set-display-format.rst
diff --git a/Documentation/userspace-api/media/dvb/video-set-format.rst b/drivers/staging/media/av7110/video-set-format.rst
index bb64e37ae081..bb64e37ae081 100644
--- a/Documentation/userspace-api/media/dvb/video-set-format.rst
+++ b/drivers/staging/media/av7110/video-set-format.rst
diff --git a/Documentation/userspace-api/media/dvb/video-set-streamtype.rst b/drivers/staging/media/av7110/video-set-streamtype.rst
index 1f31c048bdbc..1f31c048bdbc 100644
--- a/Documentation/userspace-api/media/dvb/video-set-streamtype.rst
+++ b/drivers/staging/media/av7110/video-set-streamtype.rst
diff --git a/Documentation/userspace-api/media/dvb/video-slowmotion.rst b/drivers/staging/media/av7110/video-slowmotion.rst
index 1478fcc30cb8..1478fcc30cb8 100644
--- a/Documentation/userspace-api/media/dvb/video-slowmotion.rst
+++ b/drivers/staging/media/av7110/video-slowmotion.rst
diff --git a/Documentation/userspace-api/media/dvb/video-stillpicture.rst b/drivers/staging/media/av7110/video-stillpicture.rst
index d25384222a20..d25384222a20 100644
--- a/Documentation/userspace-api/media/dvb/video-stillpicture.rst
+++ b/drivers/staging/media/av7110/video-stillpicture.rst
diff --git a/Documentation/userspace-api/media/dvb/video-stop.rst b/drivers/staging/media/av7110/video-stop.rst
index 96f61c5b48a2..96f61c5b48a2 100644
--- a/Documentation/userspace-api/media/dvb/video-stop.rst
+++ b/drivers/staging/media/av7110/video-stop.rst
diff --git a/Documentation/userspace-api/media/dvb/video-try-command.rst b/drivers/staging/media/av7110/video-try-command.rst
index 79bf3dfb8a32..79bf3dfb8a32 100644
--- a/Documentation/userspace-api/media/dvb/video-try-command.rst
+++ b/drivers/staging/media/av7110/video-try-command.rst
diff --git a/include/uapi/linux/dvb/video.h b/drivers/staging/media/av7110/video.h
index 179f1ec60af6..179f1ec60af6 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/drivers/staging/media/av7110/video.h
diff --git a/Documentation/userspace-api/media/dvb/video.rst b/drivers/staging/media/av7110/video.rst
index 38a8d39a1d25..808705b769a1 100644
--- a/Documentation/userspace-api/media/dvb/video.rst
+++ b/drivers/staging/media/av7110/video.rst
@@ -16,7 +16,7 @@ stream, not its presentation on the TV or computer screen. On PCs this
is typically handled by an associated video4linux device, e.g.
**/dev/video**, which allows scaling and defining output windows.
-Some Digital TV cards don’t have their own MPEG decoder, which results in the
+Some Digital TV cards don't have their own MPEG decoder, which results in the
omission of the audio and video device as well as the video4linux
device.
diff --git a/Documentation/userspace-api/media/dvb/video_function_calls.rst b/drivers/staging/media/av7110/video_function_calls.rst
index 20a897be5dca..20a897be5dca 100644
--- a/Documentation/userspace-api/media/dvb/video_function_calls.rst
+++ b/drivers/staging/media/av7110/video_function_calls.rst
diff --git a/Documentation/userspace-api/media/dvb/video_types.rst b/drivers/staging/media/av7110/video_types.rst
index c4557d328b7a..c4557d328b7a 100644
--- a/Documentation/userspace-api/media/dvb/video_types.rst
+++ b/drivers/staging/media/av7110/video_types.rst
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig
index 5b6cf9f62b1a..20b1f6d7b69c 100644
--- a/drivers/staging/media/hantro/Kconfig
+++ b/drivers/staging/media/hantro/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_HANTRO
tristate "Hantro VPU driver"
- depends on ARCH_MXC || ARCH_ROCKCHIP || COMPILE_TEST
+ depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2
select MEDIA_CONTROLLER
select MEDIA_CONTROLLER_REQUEST_API
@@ -24,6 +24,14 @@ config VIDEO_HANTRO_IMX8M
help
Enable support for i.MX8M SoCs.
+config VIDEO_HANTRO_SAMA5D4
+ bool "Hantro VDEC SAMA5D4 support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_AT91 || COMPILE_TEST
+ default y
+ help
+ Enable support for Microchip SAMA5D4 SoCs.
+
config VIDEO_HANTRO_ROCKCHIP
bool "Hantro VPU Rockchip support"
depends on VIDEO_HANTRO
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
index 743ce08eb184..287370188d2a 100644
--- a/drivers/staging/media/hantro/Makefile
+++ b/drivers/staging/media/hantro/Makefile
@@ -7,20 +7,25 @@ hantro-vpu-y += \
hantro_v4l2.o \
hantro_postproc.o \
hantro_h1_jpeg_enc.o \
+ hantro_g1.o \
hantro_g1_h264_dec.o \
hantro_g1_mpeg2_dec.o \
+ hantro_g2_hevc_dec.o \
hantro_g1_vp8_dec.o \
- rk3399_vpu_hw_jpeg_enc.o \
- rk3399_vpu_hw_mpeg2_dec.o \
- rk3399_vpu_hw_vp8_dec.o \
+ rockchip_vpu2_hw_jpeg_enc.o \
+ rockchip_vpu2_hw_mpeg2_dec.o \
+ rockchip_vpu2_hw_vp8_dec.o \
hantro_jpeg.o \
hantro_h264.o \
+ hantro_hevc.o \
hantro_mpeg2.o \
hantro_vp8.o
hantro-vpu-$(CONFIG_VIDEO_HANTRO_IMX8M) += \
imx8m_vpu_hw.o
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_SAMA5D4) += \
+ sama5d4_vdec_hw.o
+
hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
- rk3288_vpu_hw.o \
- rk3399_vpu_hw.o
+ rockchip_vpu_hw.o
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index 6c1b888abe75..a70c386de6f1 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -34,6 +34,7 @@ struct hantro_codec_ops;
#define HANTRO_MPEG2_DECODER BIT(16)
#define HANTRO_VP8_DECODER BIT(17)
#define HANTRO_H264_DECODER BIT(18)
+#define HANTRO_HEVC_DECODER BIT(19)
#define HANTRO_DECODERS 0xffff0000
/**
@@ -99,6 +100,7 @@ struct hantro_variant {
* @HANTRO_MODE_H264_DEC: H264 decoder.
* @HANTRO_MODE_MPEG2_DEC: MPEG-2 decoder.
* @HANTRO_MODE_VP8_DEC: VP8 decoder.
+ * @HANTRO_MODE_HEVC_DEC: HEVC decoder.
*/
enum hantro_codec_mode {
HANTRO_MODE_NONE = -1,
@@ -106,6 +108,7 @@ enum hantro_codec_mode {
HANTRO_MODE_H264_DEC,
HANTRO_MODE_MPEG2_DEC,
HANTRO_MODE_VP8_DEC,
+ HANTRO_MODE_HEVC_DEC,
};
/*
@@ -218,6 +221,7 @@ struct hantro_dev {
* @jpeg_enc: JPEG-encoding context.
* @mpeg2_dec: MPEG-2-decoding context.
* @vp8_dec: VP8-decoding context.
+ * @hevc_dec: HEVC-decoding context.
*/
struct hantro_ctx {
struct hantro_dev *dev;
@@ -244,6 +248,7 @@ struct hantro_ctx {
struct hantro_jpeg_enc_hw_ctx jpeg_enc;
struct hantro_mpeg2_dec_hw_ctx mpeg2_dec;
struct hantro_vp8_dec_hw_ctx vp8_dec;
+ struct hantro_hevc_dec_hw_ctx hevc_dec;
};
};
@@ -410,12 +415,8 @@ hantro_get_dst_buf(struct hantro_ctx *ctx)
return v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
}
-static inline bool
-hantro_needs_postproc(const struct hantro_ctx *ctx,
- const struct hantro_fmt *fmt)
-{
- return !ctx->is_encoder && fmt->fourcc != V4L2_PIX_FMT_NV12;
-}
+bool hantro_needs_postproc(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt);
static inline dma_addr_t
hantro_get_dec_buf_addr(struct hantro_ctx *ctx, struct vb2_buffer *vb)
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 595e82a82728..31d8449ca1d2 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -56,16 +56,12 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
return hantro_get_dec_buf_addr(ctx, buf);
}
-static void hantro_job_finish(struct hantro_dev *vpu,
- struct hantro_ctx *ctx,
- enum vb2_buffer_state result)
+static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ enum vb2_buffer_state result)
{
struct vb2_v4l2_buffer *src, *dst;
- pm_runtime_mark_last_busy(vpu->dev);
- pm_runtime_put_autosuspend(vpu->dev);
- clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
-
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -81,6 +77,18 @@ static void hantro_job_finish(struct hantro_dev *vpu,
result);
}
+static void hantro_job_finish(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ pm_runtime_mark_last_busy(vpu->dev);
+ pm_runtime_put_autosuspend(vpu->dev);
+
+ clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
+
+ hantro_job_finish_no_pm(vpu, ctx, result);
+}
+
void hantro_irq_done(struct hantro_dev *vpu,
enum vb2_buffer_state result)
{
@@ -152,20 +160,23 @@ static void device_run(void *priv)
src = hantro_get_src_buf(ctx);
dst = hantro_get_dst_buf(ctx);
+ ret = pm_runtime_resume_and_get(ctx->dev->dev);
+ if (ret < 0)
+ goto err_cancel_job;
+
ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
if (ret)
goto err_cancel_job;
- ret = pm_runtime_get_sync(ctx->dev->dev);
- if (ret < 0)
- goto err_cancel_job;
v4l2_m2m_buf_copy_metadata(src, dst, true);
- ctx->codec_ops->run(ctx);
+ if (ctx->codec_ops->run(ctx))
+ goto err_cancel_job;
+
return;
err_cancel_job:
- hantro_job_finish(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
+ hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
}
static struct v4l2_m2m_ops vpu_m2m_ops = {
@@ -243,6 +254,18 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
if (sps->bit_depth_luma_minus8 != 0)
/* Only 8-bit is supported */
return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_SPS) {
+ const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
+
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED)
+ /* No scaling support */
+ return -EINVAL;
}
return 0;
}
@@ -267,6 +290,26 @@ static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
}
+static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP:
+ ctx->hevc_dec.ctrls.hevc_hdr_skip_length = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
.try_ctrl = hantro_try_ctrl,
};
@@ -275,6 +318,10 @@ static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
.s_ctrl = hantro_jpeg_s_ctrl,
};
+static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
+ .s_ctrl = hantro_hevc_s_ctrl,
+};
+
static const struct hantro_ctrl controls[] = {
{
.codec = HANTRO_JPEG_ENCODER,
@@ -289,12 +336,17 @@ static const struct hantro_ctrl controls[] = {
}, {
.codec = HANTRO_MPEG2_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
+ },
+ }, {
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
},
}, {
.codec = HANTRO_MPEG2_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
},
}, {
.codec = HANTRO_VP8_DECODER,
@@ -349,6 +401,64 @@ static const struct hantro_ctrl controls[] = {
.def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
}
}, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
+ .min = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE,
+ .min = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
+ .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
+ .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ .ops = &hantro_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP,
+ .name = "Hantro HEVC slice header skip bytes",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .def = 0,
+ .max = 0x100,
+ .step = 1,
+ .ops = &hantro_hevc_ctrl_ops,
+ },
},
};
@@ -472,12 +582,18 @@ static const struct v4l2_file_operations hantro_fops = {
static const struct of_device_id of_hantro_match[] = {
#ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
- { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
- { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
+ { .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, },
+ { .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, },
{ .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
+ { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
+ { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
#endif
#ifdef CONFIG_VIDEO_HANTRO_IMX8M
{ .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
+ { .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
+#endif
+#ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
+ { .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
#endif
{ /* sentinel */ }
};
@@ -752,12 +868,23 @@ static int hantro_probe(struct platform_device *pdev)
if (!vpu->clocks)
return -ENOMEM;
- for (i = 0; i < vpu->variant->num_clocks; i++)
- vpu->clocks[i].id = vpu->variant->clk_names[i];
- ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
- vpu->clocks);
- if (ret)
- return ret;
+ if (vpu->variant->num_clocks > 1) {
+ for (i = 0; i < vpu->variant->num_clocks; i++)
+ vpu->clocks[i].id = vpu->variant->clk_names[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
+ vpu->clocks);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If the driver has a single clk, chances are there will be no
+ * actual name in the DT bindings.
+ */
+ vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vpu->clocks[0].clk))
+ return PTR_ERR(vpu->clocks[0].clk);
+ }
num_bases = vpu->variant->num_regs ?: 1;
vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
@@ -785,13 +912,23 @@ static int hantro_probe(struct platform_device *pdev)
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
for (i = 0; i < vpu->variant->num_irqs; i++) {
- const char *irq_name = vpu->variant->irqs[i].name;
+ const char *irq_name;
int irq;
if (!vpu->variant->irqs[i].handler)
continue;
- irq = platform_get_irq_byname(vpu->pdev, irq_name);
+ if (vpu->variant->num_clocks > 1) {
+ irq_name = vpu->variant->irqs[i].name;
+ irq = platform_get_irq_byname(vpu->pdev, irq_name);
+ } else {
+ /*
+ * If the driver has a single IRQ, chances are there
+ * will be no actual name in the DT bindings.
+ */
+ irq_name = "default";
+ irq = platform_get_irq(vpu->pdev, 0);
+ }
if (irq <= 0)
return -ENXIO;
diff --git a/drivers/staging/media/hantro/hantro_g1.c b/drivers/staging/media/hantro/hantro_g1.c
new file mode 100644
index 000000000000..0ab1cee62218
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g1.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ * Copyright (C) 2021 Collabora Ltd, Emil Velikov <emil.velikov@collabora.com>
+ */
+
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+irqreturn_t hantro_g1_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G1_REG_INTERRUPT);
+ state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+void hantro_g1_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+ vdpu_write(vpu, 1, G1_REG_SOFT_RESET);
+}
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 845bef73d218..5c792b7bcb79 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -273,13 +273,15 @@ static void set_buffers(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, G1_REG_ADDR_QTABLE);
}
-void hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
+int hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
+ int ret;
/* Prepare the H264 decoder context. */
- if (hantro_h264_dec_prepare_run(ctx))
- return;
+ ret = hantro_h264_dec_prepare_run(ctx);
+ if (ret)
+ return ret;
/* Configure hardware registers. */
set_params(ctx);
@@ -301,4 +303,6 @@ void hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
G1_REG_CONFIG_DEC_CLK_GATE_E,
G1_REG_CONFIG);
vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
}
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index 6386a3989bfe..9aea331e1a3c 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -10,6 +10,7 @@
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
#include "hantro_hw.h"
+#include "hantro_g1_regs.h"
#define G1_SWREG(nr) ((nr) * 4)
@@ -20,7 +21,6 @@
#define G1_REG_REFER2_BASE G1_SWREG(16)
#define G1_REG_REFER3_BASE G1_SWREG(17)
#define G1_REG_QTABLE_BASE G1_SWREG(40)
-#define G1_REG_DEC_E(v) ((v) ? BIT(0) : 0)
#define G1_REG_DEC_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
#define G1_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(23) : 0)
@@ -77,43 +77,33 @@
#define G1_REG_APF_THRESHOLD(v) (((v) << 0) & GENMASK(13, 0))
-#define PICT_TOP_FIELD 1
-#define PICT_BOTTOM_FIELD 2
-#define PICT_FRAME 3
-
static void
-hantro_g1_mpeg2_dec_set_quantization(struct hantro_dev *vpu,
+hantro_g1_mpeg2_dec_set_quantisation(struct hantro_dev *vpu,
struct hantro_ctx *ctx)
{
- struct v4l2_ctrl_mpeg2_quantization *quantization;
-
- quantization = hantro_get_ctrl(ctx,
- V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
- hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu,
- quantization);
- vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma,
- G1_REG_QTABLE_BASE);
+ struct v4l2_ctrl_mpeg2_quantisation *q;
+
+ q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma, G1_REG_QTABLE_BASE);
}
static void
hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
struct vb2_buffer *src_buf,
struct vb2_buffer *dst_buf,
- const struct v4l2_mpeg2_sequence *sequence,
- const struct v4l2_mpeg2_picture *picture,
- const struct v4l2_ctrl_mpeg2_slice_params *slice_params)
+ const struct v4l2_ctrl_mpeg2_sequence *seq,
+ const struct v4l2_ctrl_mpeg2_picture *pic)
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- switch (picture->picture_coding_type) {
- case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(ctx,
- slice_params->backward_ref_ts);
+ switch (pic->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(ctx, pic->backward_ref_ts);
fallthrough;
- case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(ctx,
- slice_params->forward_ref_ts);
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(ctx, pic->forward_ref_ts);
}
/* Source bitstream buffer */
@@ -124,7 +114,7 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
addr = hantro_get_dec_buf_addr(ctx, dst_buf);
current_addr = addr;
- if (picture->picture_structure == PICT_BOTTOM_FIELD)
+ if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD)
addr += ALIGN(ctx->dst_fmt.width, 16);
vdpu_write_relaxed(vpu, addr, G1_REG_DEC_OUT_BASE);
@@ -134,18 +124,18 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
backward_addr = current_addr;
/* Set forward ref frame (top/bottom field) */
- if (picture->picture_structure == PICT_FRAME ||
- picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B ||
- (picture->picture_structure == PICT_TOP_FIELD &&
- picture->top_field_first) ||
- (picture->picture_structure == PICT_BOTTOM_FIELD &&
- !picture->top_field_first)) {
+ if (pic->picture_structure == V4L2_MPEG2_PIC_FRAME ||
+ pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD &&
+ pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST) ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD &&
+ !(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST))) {
vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
- } else if (picture->picture_structure == PICT_TOP_FIELD) {
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) {
vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER1_BASE);
- } else if (picture->picture_structure == PICT_BOTTOM_FIELD) {
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD) {
vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
}
@@ -155,13 +145,12 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER3_BASE);
}
-void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
+int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
- const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
- const struct v4l2_mpeg2_sequence *sequence;
- const struct v4l2_mpeg2_picture *picture;
+ const struct v4l2_ctrl_mpeg2_sequence *seq;
+ const struct v4l2_ctrl_mpeg2_picture *pic;
u32 reg;
src_buf = hantro_get_src_buf(ctx);
@@ -170,10 +159,10 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
/* Apply request controls if any */
hantro_start_prepare_run(ctx);
- slice_params = hantro_get_ctrl(ctx,
- V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
- sequence = &slice_params->sequence;
- picture = &slice_params->picture;
+ seq = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_SEQUENCE);
+ pic = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_PICTURE);
reg = G1_REG_DEC_AXI_RD_ID(0) |
G1_REG_DEC_TIMEOUT_E(1) |
@@ -193,11 +182,11 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
reg = G1_REG_DEC_MODE(5) |
G1_REG_RLC_MODE_E(0) |
- G1_REG_PIC_INTERLACE_E(!sequence->progressive_sequence) |
- G1_REG_PIC_FIELDMODE_E(picture->picture_structure != PICT_FRAME) |
- G1_REG_PIC_B_E(picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B) |
- G1_REG_PIC_INTER_E(picture->picture_coding_type != V4L2_MPEG2_PICTURE_CODING_TYPE_I) |
- G1_REG_PIC_TOPFIELD_E(picture->picture_structure == PICT_TOP_FIELD) |
+ G1_REG_PIC_INTERLACE_E(!(seq->flags & V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE)) |
+ G1_REG_PIC_FIELDMODE_E(pic->picture_structure != V4L2_MPEG2_PIC_FRAME) |
+ G1_REG_PIC_B_E(pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B) |
+ G1_REG_PIC_INTER_E(pic->picture_coding_type != V4L2_MPEG2_PIC_CODING_TYPE_I) |
+ G1_REG_PIC_TOPFIELD_E(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) |
G1_REG_FWD_INTERLACE_E(0) |
G1_REG_FILTERING_DIS(1) |
G1_REG_WRITE_MVS_E(0) |
@@ -206,27 +195,27 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
reg = G1_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
G1_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
- G1_REG_ALT_SCAN_E(picture->alternate_scan) |
- G1_REG_TOPFIELDFIRST_E(picture->top_field_first);
+ G1_REG_ALT_SCAN_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ G1_REG_TOPFIELDFIRST_E(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
- reg = G1_REG_STRM_START_BIT(slice_params->data_bit_offset) |
- G1_REG_QSCALE_TYPE(picture->q_scale_type) |
- G1_REG_CON_MV_E(picture->concealment_motion_vectors) |
- G1_REG_INTRA_DC_PREC(picture->intra_dc_precision) |
- G1_REG_INTRA_VLC_TAB(picture->intra_vlc_format) |
- G1_REG_FRAME_PRED_DCT(picture->frame_pred_frame_dct);
+ reg = G1_REG_STRM_START_BIT(0) |
+ G1_REG_QSCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE) |
+ G1_REG_CON_MV_E(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV) |
+ G1_REG_INTRA_DC_PREC(pic->intra_dc_precision) |
+ G1_REG_INTRA_VLC_TAB(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC) |
+ G1_REG_FRAME_PRED_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
vdpu_write_relaxed(vpu, reg, G1_SWREG(5));
reg = G1_REG_INIT_QP(1) |
- G1_REG_STREAM_LEN(slice_params->bit_size >> 3);
+ G1_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
vdpu_write_relaxed(vpu, reg, G1_SWREG(6));
- reg = G1_REG_ALT_SCAN_FLAG_E(picture->alternate_scan) |
- G1_REG_FCODE_FWD_HOR(picture->f_code[0][0]) |
- G1_REG_FCODE_FWD_VER(picture->f_code[0][1]) |
- G1_REG_FCODE_BWD_HOR(picture->f_code[1][0]) |
- G1_REG_FCODE_BWD_VER(picture->f_code[1][1]) |
+ reg = G1_REG_ALT_SCAN_FLAG_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ G1_REG_FCODE_FWD_HOR(pic->f_code[0][0]) |
+ G1_REG_FCODE_FWD_VER(pic->f_code[0][1]) |
+ G1_REG_FCODE_BWD_HOR(pic->f_code[1][0]) |
+ G1_REG_FCODE_BWD_VER(pic->f_code[1][1]) |
G1_REG_MV_ACCURACY_FWD(1) |
G1_REG_MV_ACCURACY_BWD(1);
vdpu_write_relaxed(vpu, reg, G1_SWREG(18));
@@ -238,14 +227,14 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
reg = G1_REG_APF_THRESHOLD(8);
vdpu_write_relaxed(vpu, reg, G1_SWREG(55));
- hantro_g1_mpeg2_dec_set_quantization(vpu, ctx);
-
+ hantro_g1_mpeg2_dec_set_quantisation(vpu, ctx);
hantro_g1_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
&dst_buf->vb2_buf,
- sequence, picture, slice_params);
+ seq, pic);
hantro_end_prepare_run(ctx);
- reg = G1_REG_DEC_E(1);
- vdpu_write(vpu, reg, G1_SWREG(1));
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
}
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index 57002ba70176..96622a7f8279 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -425,7 +425,7 @@ static void cfg_buffers(struct hantro_ctx *ctx,
vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
}
-void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
+int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
{
const struct v4l2_ctrl_vp8_frame *hdr;
struct hantro_dev *vpu = ctx->dev;
@@ -438,7 +438,7 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
hdr = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_VP8_FRAME);
if (WARN_ON(!hdr))
- return;
+ return -EINVAL;
/* Reset segment_map buffer in keyframe */
if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
@@ -498,4 +498,6 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
hantro_end_prepare_run(ctx);
vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
}
diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
new file mode 100644
index 000000000000..340efb57fd18
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU HEVC codec driver
+ *
+ * Copyright (C) 2020 Safran Passenger Innovations LLC
+ */
+
+#include "hantro_hw.h"
+#include "hantro_g2_regs.h"
+
+#define HEVC_DEC_MODE 0xC
+
+#define BUS_WIDTH_32 0
+#define BUS_WIDTH_64 1
+#define BUS_WIDTH_128 2
+#define BUS_WIDTH_256 3
+
+static inline void hantro_write_addr(struct hantro_dev *vpu,
+ unsigned long offset,
+ dma_addr_t addr)
+{
+ vdpu_write(vpu, addr & 0xffffffff, offset);
+}
+
+static void prepare_tile_info_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ u16 *p = (u16 *)((u8 *)ctx->hevc_dec.tile_sizes.cpu);
+ unsigned int num_tile_rows = pps->num_tile_rows_minus1 + 1;
+ unsigned int num_tile_cols = pps->num_tile_columns_minus1 + 1;
+ unsigned int pic_width_in_ctbs, pic_height_in_ctbs;
+ unsigned int max_log2_ctb_size, ctb_size;
+ bool tiles_enabled, uniform_spacing;
+ u32 no_chroma = 0;
+
+ tiles_enabled = !!(pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED);
+ uniform_spacing = !!(pps->flags & V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING);
+
+ hantro_reg_write(vpu, &g2_tile_e, tiles_enabled);
+
+ max_log2_ctb_size = sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ pic_width_in_ctbs = (sps->pic_width_in_luma_samples +
+ (1 << max_log2_ctb_size) - 1) >> max_log2_ctb_size;
+ pic_height_in_ctbs = (sps->pic_height_in_luma_samples + (1 << max_log2_ctb_size) - 1)
+ >> max_log2_ctb_size;
+ ctb_size = 1 << max_log2_ctb_size;
+
+ vpu_debug(1, "Preparing tile sizes buffer for %dx%d CTBs (CTB size %d)\n",
+ pic_width_in_ctbs, pic_height_in_ctbs, ctb_size);
+
+ if (tiles_enabled) {
+ unsigned int i, j, h;
+
+ vpu_debug(1, "Tiles enabled! %dx%d\n", num_tile_cols, num_tile_rows);
+
+ hantro_reg_write(vpu, &g2_num_tile_rows, num_tile_rows);
+ hantro_reg_write(vpu, &g2_num_tile_cols, num_tile_cols);
+
+ /* write width + height for each tile in pic */
+ if (!uniform_spacing) {
+ u32 tmp_w = 0, tmp_h = 0;
+
+ for (i = 0; i < num_tile_rows; i++) {
+ if (i == num_tile_rows - 1)
+ h = pic_height_in_ctbs - tmp_h;
+ else
+ h = pps->row_height_minus1[i] + 1;
+ tmp_h += h;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ for (j = 0, tmp_w = 0; j < num_tile_cols - 1; j++) {
+ tmp_w += pps->column_width_minus1[j] + 1;
+ *p++ = pps->column_width_minus1[j + 1];
+ *p++ = h;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ }
+ /* last column */
+ *p++ = pic_width_in_ctbs - tmp_w;
+ *p++ = h;
+ }
+ } else { /* uniform spacing */
+ u32 tmp, prev_h, prev_w;
+
+ for (i = 0, prev_h = 0; i < num_tile_rows; i++) {
+ tmp = (i + 1) * pic_height_in_ctbs / num_tile_rows;
+ h = tmp - prev_h;
+ prev_h = tmp;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ for (j = 0, prev_w = 0; j < num_tile_cols; j++) {
+ tmp = (j + 1) * pic_width_in_ctbs / num_tile_cols;
+ *p++ = tmp - prev_w;
+ *p++ = h;
+ if (j == 0 &&
+ (pps->column_width_minus1[0] + 1) == 1 &&
+ ctb_size == 16)
+ no_chroma = 1;
+ prev_w = tmp;
+ }
+ }
+ }
+ } else {
+ hantro_reg_write(vpu, &g2_num_tile_rows, 1);
+ hantro_reg_write(vpu, &g2_num_tile_cols, 1);
+
+ /* There's one tile, with dimensions equal to pic size. */
+ p[0] = pic_width_in_ctbs;
+ p[1] = pic_height_in_ctbs;
+ }
+
+ if (no_chroma)
+ vpu_debug(1, "%s: no chroma!\n", __func__);
+}
+
+static void set_params(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 min_log2_cb_size, max_log2_ctb_size, min_cb_size, max_ctb_size;
+ u32 pic_width_in_min_cbs, pic_height_in_min_cbs;
+ u32 pic_width_aligned, pic_height_aligned;
+ u32 partial_ctb_x, partial_ctb_y;
+
+ hantro_reg_write(vpu, &g2_bit_depth_y_minus8, sps->bit_depth_luma_minus8);
+ hantro_reg_write(vpu, &g2_bit_depth_c_minus8, sps->bit_depth_chroma_minus8);
+
+ hantro_reg_write(vpu, &g2_output_8_bits, 0);
+
+ hantro_reg_write(vpu, &g2_hdr_skip_length, ctrls->hevc_hdr_skip_length);
+
+ min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3;
+ max_log2_ctb_size = min_log2_cb_size + sps->log2_diff_max_min_luma_coding_block_size;
+
+ hantro_reg_write(vpu, &g2_min_cb_size, min_log2_cb_size);
+ hantro_reg_write(vpu, &g2_max_cb_size, max_log2_ctb_size);
+
+ min_cb_size = 1 << min_log2_cb_size;
+ max_ctb_size = 1 << max_log2_ctb_size;
+
+ pic_width_in_min_cbs = sps->pic_width_in_luma_samples / min_cb_size;
+ pic_height_in_min_cbs = sps->pic_height_in_luma_samples / min_cb_size;
+ pic_width_aligned = ALIGN(sps->pic_width_in_luma_samples, max_ctb_size);
+ pic_height_aligned = ALIGN(sps->pic_height_in_luma_samples, max_ctb_size);
+
+ partial_ctb_x = !!(sps->pic_width_in_luma_samples != pic_width_aligned);
+ partial_ctb_y = !!(sps->pic_height_in_luma_samples != pic_height_aligned);
+
+ hantro_reg_write(vpu, &g2_partial_ctb_x, partial_ctb_x);
+ hantro_reg_write(vpu, &g2_partial_ctb_y, partial_ctb_y);
+
+ hantro_reg_write(vpu, &g2_pic_width_in_cbs, pic_width_in_min_cbs);
+ hantro_reg_write(vpu, &g2_pic_height_in_cbs, pic_height_in_min_cbs);
+
+ hantro_reg_write(vpu, &g2_pic_width_4x4,
+ (pic_width_in_min_cbs * min_cb_size) / 4);
+ hantro_reg_write(vpu, &g2_pic_height_4x4,
+ (pic_height_in_min_cbs * min_cb_size) / 4);
+
+ hantro_reg_write(vpu, &hevc_max_inter_hierdepth,
+ sps->max_transform_hierarchy_depth_inter);
+ hantro_reg_write(vpu, &hevc_max_intra_hierdepth,
+ sps->max_transform_hierarchy_depth_intra);
+ hantro_reg_write(vpu, &hevc_min_trb_size,
+ sps->log2_min_luma_transform_block_size_minus2 + 2);
+ hantro_reg_write(vpu, &hevc_max_trb_size,
+ sps->log2_min_luma_transform_block_size_minus2 + 2 +
+ sps->log2_diff_max_min_luma_transform_block_size);
+
+ hantro_reg_write(vpu, &g2_tempor_mvp_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED) &&
+ !(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC));
+ hantro_reg_write(vpu, &g2_strong_smooth_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED));
+ hantro_reg_write(vpu, &g2_asym_pred_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_AMP_ENABLED));
+ hantro_reg_write(vpu, &g2_sao_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET));
+ hantro_reg_write(vpu, &g2_sign_data_hide,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED));
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED) {
+ hantro_reg_write(vpu, &g2_cu_qpd_e, 1);
+ hantro_reg_write(vpu, &g2_max_cu_qpd_depth, pps->diff_cu_qp_delta_depth);
+ } else {
+ hantro_reg_write(vpu, &g2_cu_qpd_e, 0);
+ hantro_reg_write(vpu, &g2_max_cu_qpd_depth, 0);
+ }
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT) {
+ hantro_reg_write(vpu, &g2_cb_qp_offset, pps->pps_cb_qp_offset);
+ hantro_reg_write(vpu, &g2_cr_qp_offset, pps->pps_cr_qp_offset);
+ } else {
+ hantro_reg_write(vpu, &g2_cb_qp_offset, 0);
+ hantro_reg_write(vpu, &g2_cr_qp_offset, 0);
+ }
+
+ hantro_reg_write(vpu, &g2_filt_offset_beta, pps->pps_beta_offset_div2);
+ hantro_reg_write(vpu, &g2_filt_offset_tc, pps->pps_tc_offset_div2);
+ hantro_reg_write(vpu, &g2_slice_hdr_ext_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT));
+ hantro_reg_write(vpu, &g2_slice_hdr_ext_bits, pps->num_extra_slice_header_bits);
+ hantro_reg_write(vpu, &g2_slice_chqp_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT));
+ hantro_reg_write(vpu, &g2_weight_bipr_idc,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED));
+ hantro_reg_write(vpu, &g2_transq_bypass,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED));
+ hantro_reg_write(vpu, &g2_list_mod_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT));
+ hantro_reg_write(vpu, &g2_entropy_sync_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED));
+ hantro_reg_write(vpu, &g2_cabac_init_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT));
+ hantro_reg_write(vpu, &g2_idr_pic_e,
+ !!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC));
+ hantro_reg_write(vpu, &hevc_parallel_merge,
+ pps->log2_parallel_merge_level_minus2 + 2);
+ hantro_reg_write(vpu, &g2_pcm_filt_d,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED));
+ hantro_reg_write(vpu, &g2_pcm_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED));
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED) {
+ hantro_reg_write(vpu, &g2_max_pcm_size,
+ sps->log2_diff_max_min_pcm_luma_coding_block_size +
+ sps->log2_min_pcm_luma_coding_block_size_minus3 + 3);
+ hantro_reg_write(vpu, &g2_min_pcm_size,
+ sps->log2_min_pcm_luma_coding_block_size_minus3 + 3);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_y,
+ sps->pcm_sample_bit_depth_luma_minus1 + 1);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_c,
+ sps->pcm_sample_bit_depth_chroma_minus1 + 1);
+ } else {
+ hantro_reg_write(vpu, &g2_max_pcm_size, 0);
+ hantro_reg_write(vpu, &g2_min_pcm_size, 0);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_y, 0);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_c, 0);
+ }
+
+ hantro_reg_write(vpu, &g2_start_code_e, 1);
+ hantro_reg_write(vpu, &g2_init_qp, pps->init_qp_minus26 + 26);
+ hantro_reg_write(vpu, &g2_weight_pred_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED));
+ hantro_reg_write(vpu, &g2_cabac_init_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT));
+ hantro_reg_write(vpu, &g2_const_intra_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED));
+ hantro_reg_write(vpu, &g2_transform_skip,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED));
+ hantro_reg_write(vpu, &g2_out_filtering_dis,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER));
+ hantro_reg_write(vpu, &g2_filt_ctrl_pres,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT));
+ hantro_reg_write(vpu, &g2_dependent_slice,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED));
+ hantro_reg_write(vpu, &g2_filter_override,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED));
+ hantro_reg_write(vpu, &g2_refidx0_active,
+ pps->num_ref_idx_l0_default_active_minus1 + 1);
+ hantro_reg_write(vpu, &g2_refidx1_active,
+ pps->num_ref_idx_l1_default_active_minus1 + 1);
+ hantro_reg_write(vpu, &g2_apf_threshold, 8);
+}
+
+static int find_ref_pic_index(const struct v4l2_hevc_dpb_entry *dpb, int pic_order_cnt)
+{
+ int i;
+
+ for (i = 0; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
+ if (dpb[i].pic_order_cnt[0] == pic_order_cnt)
+ return i;
+ }
+
+ return 0x0;
+}
+
+static void set_ref_pic_list(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ struct hantro_dev *vpu = ctx->dev;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
+ u32 list0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
+ u32 list1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
+ static const struct hantro_reg ref_pic_regs0[] = {
+ hevc_rlist_f0,
+ hevc_rlist_f1,
+ hevc_rlist_f2,
+ hevc_rlist_f3,
+ hevc_rlist_f4,
+ hevc_rlist_f5,
+ hevc_rlist_f6,
+ hevc_rlist_f7,
+ hevc_rlist_f8,
+ hevc_rlist_f9,
+ hevc_rlist_f10,
+ hevc_rlist_f11,
+ hevc_rlist_f12,
+ hevc_rlist_f13,
+ hevc_rlist_f14,
+ hevc_rlist_f15,
+ };
+ static const struct hantro_reg ref_pic_regs1[] = {
+ hevc_rlist_b0,
+ hevc_rlist_b1,
+ hevc_rlist_b2,
+ hevc_rlist_b3,
+ hevc_rlist_b4,
+ hevc_rlist_b5,
+ hevc_rlist_b6,
+ hevc_rlist_b7,
+ hevc_rlist_b8,
+ hevc_rlist_b9,
+ hevc_rlist_b10,
+ hevc_rlist_b11,
+ hevc_rlist_b12,
+ hevc_rlist_b13,
+ hevc_rlist_b14,
+ hevc_rlist_b15,
+ };
+ unsigned int i, j;
+
+ /* List 0 contains: short term before, short term after and long term */
+ j = 0;
+ for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = find_ref_pic_index(dpb, decode_params->poc_st_curr_before[i]);
+ for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = find_ref_pic_index(dpb, decode_params->poc_st_curr_after[i]);
+ for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = find_ref_pic_index(dpb, decode_params->poc_lt_curr[i]);
+
+ /* Fill the list, copying over and over */
+ i = 0;
+ while (j < ARRAY_SIZE(list0))
+ list0[j++] = list0[i++];
+
+ j = 0;
+ for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = find_ref_pic_index(dpb, decode_params->poc_st_curr_after[i]);
+ for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = find_ref_pic_index(dpb, decode_params->poc_st_curr_before[i]);
+ for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = find_ref_pic_index(dpb, decode_params->poc_lt_curr[i]);
+
+ i = 0;
+ while (j < ARRAY_SIZE(list1))
+ list1[j++] = list1[i++];
+
+ for (i = 0; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
+ hantro_reg_write(vpu, &ref_pic_regs0[i], list0[i]);
+ hantro_reg_write(vpu, &ref_pic_regs1[i], list1[i]);
+ }
+}
+
+static int set_ref(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
+ dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ struct hantro_dev *vpu = ctx->dev;
+ size_t cr_offset = hantro_hevc_chroma_offset(sps);
+ size_t mv_offset = hantro_hevc_motion_vectors_offset(sps);
+ u32 max_ref_frames;
+ u16 dpb_longterm_e;
+ static const struct hantro_reg cur_poc[] = {
+ hevc_cur_poc_00,
+ hevc_cur_poc_01,
+ hevc_cur_poc_02,
+ hevc_cur_poc_03,
+ hevc_cur_poc_04,
+ hevc_cur_poc_05,
+ hevc_cur_poc_06,
+ hevc_cur_poc_07,
+ hevc_cur_poc_08,
+ hevc_cur_poc_09,
+ hevc_cur_poc_10,
+ hevc_cur_poc_11,
+ hevc_cur_poc_12,
+ hevc_cur_poc_13,
+ hevc_cur_poc_14,
+ hevc_cur_poc_15,
+ };
+ unsigned int i;
+
+ max_ref_frames = decode_params->num_poc_lt_curr +
+ decode_params->num_poc_st_curr_before +
+ decode_params->num_poc_st_curr_after;
+ /*
+ * Set max_ref_frames to non-zero to avoid HW hang when decoding
+ * badly marked I-frames.
+ */
+ max_ref_frames = max_ref_frames ? max_ref_frames : 1;
+ hantro_reg_write(vpu, &g2_num_ref_frames, max_ref_frames);
+ hantro_reg_write(vpu, &g2_filter_over_slices,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED));
+ hantro_reg_write(vpu, &g2_filter_over_tiles,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED));
+
+ /*
+ * Write POC count diff from current pic. For frame decoding only compute
+ * pic_order_cnt[0] and ignore pic_order_cnt[1] used in field-coding.
+ */
+ for (i = 0; i < decode_params->num_active_dpb_entries && i < ARRAY_SIZE(cur_poc); i++) {
+ char poc_diff = decode_params->pic_order_cnt_val - dpb[i].pic_order_cnt[0];
+
+ hantro_reg_write(vpu, &cur_poc[i], poc_diff);
+ }
+
+ if (i < ARRAY_SIZE(cur_poc)) {
+ /*
+ * After the references, fill one entry pointing to itself,
+ * i.e. difference is zero.
+ */
+ hantro_reg_write(vpu, &cur_poc[i], 0);
+ i++;
+ }
+
+ /* Fill the rest with the current picture */
+ for (; i < ARRAY_SIZE(cur_poc); i++)
+ hantro_reg_write(vpu, &cur_poc[i], decode_params->pic_order_cnt_val);
+
+ set_ref_pic_list(ctx);
+
+ /* We will only keep the references picture that are still used */
+ ctx->hevc_dec.ref_bufs_used = 0;
+
+ /* Set up addresses of DPB buffers */
+ dpb_longterm_e = 0;
+ for (i = 0; i < decode_params->num_active_dpb_entries &&
+ i < (V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1); i++) {
+ luma_addr = hantro_hevc_get_ref_buf(ctx, dpb[i].pic_order_cnt[0]);
+ if (!luma_addr)
+ return -ENOMEM;
+
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ if (dpb[i].rps == V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i);
+
+ hantro_write_addr(vpu, G2_REG_ADDR_REF(i), luma_addr);
+ hantro_write_addr(vpu, G2_REG_CHR_REF(i), chroma_addr);
+ hantro_write_addr(vpu, G2_REG_DMV_REF(i), mv_addr);
+ }
+
+ luma_addr = hantro_hevc_get_ref_buf(ctx, decode_params->pic_order_cnt_val);
+ if (!luma_addr)
+ return -ENOMEM;
+
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, G2_REG_ADDR_REF(i), luma_addr);
+ hantro_write_addr(vpu, G2_REG_CHR_REF(i), chroma_addr);
+ hantro_write_addr(vpu, G2_REG_DMV_REF(i++), mv_addr);
+
+ hantro_write_addr(vpu, G2_ADDR_DST, luma_addr);
+ hantro_write_addr(vpu, G2_ADDR_DST_CHR, chroma_addr);
+ hantro_write_addr(vpu, G2_ADDR_DST_MV, mv_addr);
+
+ hantro_hevc_ref_remove_unused(ctx);
+
+ for (; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
+ hantro_write_addr(vpu, G2_REG_ADDR_REF(i), 0);
+ hantro_write_addr(vpu, G2_REG_CHR_REF(i), 0);
+ hantro_write_addr(vpu, G2_REG_DMV_REF(i), 0);
+ }
+
+ hantro_reg_write(vpu, &g2_refer_lterm_e, dpb_longterm_e);
+
+ return 0;
+}
+
+static void set_buffers(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ size_t cr_offset = hantro_hevc_chroma_offset(sps);
+ dma_addr_t src_dma, dst_dma;
+ u32 src_len, src_buf_len;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ /* Source (stream) buffer. */
+ src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ src_len = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ src_buf_len = vb2_plane_size(&src_buf->vb2_buf, 0);
+
+ hantro_write_addr(vpu, G2_ADDR_STR, src_dma);
+ hantro_reg_write(vpu, &g2_stream_len, src_len);
+ hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len);
+ hantro_reg_write(vpu, &g2_strm_start_offset, 0);
+ hantro_reg_write(vpu, &g2_write_mvs_e, 1);
+
+ /* Destination (decoded frame) buffer. */
+ dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
+
+ hantro_write_addr(vpu, G2_RASTER_SCAN, dst_dma);
+ hantro_write_addr(vpu, G2_RASTER_SCAN_CHR, dst_dma + cr_offset);
+ hantro_write_addr(vpu, G2_ADDR_TILE_SIZE, ctx->hevc_dec.tile_sizes.dma);
+ hantro_write_addr(vpu, G2_TILE_FILTER, ctx->hevc_dec.tile_filter.dma);
+ hantro_write_addr(vpu, G2_TILE_SAO, ctx->hevc_dec.tile_sao.dma);
+ hantro_write_addr(vpu, G2_TILE_BSD, ctx->hevc_dec.tile_bsd.dma);
+}
+
+static void hantro_g2_check_idle(struct hantro_dev *vpu)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ u32 status;
+
+ /* Make sure the VPU is idle */
+ status = vdpu_read(vpu, G2_REG_INTERRUPT);
+ if (status & G2_REG_INTERRUPT_DEC_E) {
+ dev_warn(vpu->dev, "device still running, aborting");
+ status |= G2_REG_INTERRUPT_DEC_ABORT_E | G2_REG_INTERRUPT_DEC_IRQ_DIS;
+ vdpu_write(vpu, status, G2_REG_INTERRUPT);
+ }
+ }
+}
+
+int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ int ret;
+
+ hantro_g2_check_idle(vpu);
+
+ /* Prepare HEVC decoder context. */
+ ret = hantro_hevc_dec_prepare_run(ctx);
+ if (ret)
+ return ret;
+
+ /* Configure hardware registers. */
+ set_params(ctx);
+
+ /* set reference pictures */
+ ret = set_ref(ctx);
+ if (ret)
+ return ret;
+
+ set_buffers(ctx);
+ prepare_tile_info_buffer(ctx);
+
+ hantro_end_prepare_run(ctx);
+
+ hantro_reg_write(vpu, &g2_mode, HEVC_DEC_MODE);
+ hantro_reg_write(vpu, &g2_clk_gate_e, 1);
+
+ /* Don't disable output */
+ hantro_reg_write(vpu, &g2_out_dis, 0);
+
+ /* Don't compress buffers */
+ hantro_reg_write(vpu, &g2_ref_compress_bypass, 1);
+
+ /* use NV12 as output format */
+ hantro_reg_write(vpu, &g2_out_rs_e, 1);
+
+ /* Bus width and max burst */
+ hantro_reg_write(vpu, &g2_buswidth, BUS_WIDTH_128);
+ hantro_reg_write(vpu, &g2_max_burst, 16);
+
+ /* Swap */
+ hantro_reg_write(vpu, &g2_strm_swap, 0xf);
+ hantro_reg_write(vpu, &g2_dirmv_swap, 0xf);
+ hantro_reg_write(vpu, &g2_compress_swap, 0xf);
+
+ /* Start decoding! */
+ vdpu_write(vpu, G2_REG_INTERRUPT_DEC_E, G2_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/staging/media/hantro/hantro_g2_regs.h b/drivers/staging/media/hantro/hantro_g2_regs.h
new file mode 100644
index 000000000000..bb22fa921914
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g2_regs.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, Collabora
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@collabora.com>
+ */
+
+#ifndef HANTRO_G2_REGS_H_
+#define HANTRO_G2_REGS_H_
+
+#include "hantro.h"
+
+#define G2_SWREG(nr) ((nr) * 4)
+
+#define G2_DEC_REG(b, s, m) \
+ ((const struct hantro_reg) { \
+ .base = G2_SWREG(b), \
+ .shift = s, \
+ .mask = m, \
+ })
+
+#define G2_REG_VERSION G2_SWREG(0)
+
+#define G2_REG_INTERRUPT G2_SWREG(1)
+#define G2_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+#define G2_REG_INTERRUPT_DEC_ABORT_E BIT(5)
+#define G2_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
+#define G2_REG_INTERRUPT_DEC_E BIT(0)
+
+#define g2_strm_swap G2_DEC_REG(2, 28, 0xf)
+#define g2_dirmv_swap G2_DEC_REG(2, 20, 0xf)
+
+#define g2_mode G2_DEC_REG(3, 27, 0x1f)
+#define g2_compress_swap G2_DEC_REG(3, 20, 0xf)
+#define g2_ref_compress_bypass G2_DEC_REG(3, 17, 0x1)
+#define g2_out_rs_e G2_DEC_REG(3, 16, 0x1)
+#define g2_out_dis G2_DEC_REG(3, 15, 0x1)
+#define g2_out_filtering_dis G2_DEC_REG(3, 14, 0x1)
+#define g2_write_mvs_e G2_DEC_REG(3, 12, 0x1)
+
+#define g2_pic_width_in_cbs G2_DEC_REG(4, 19, 0x1fff)
+#define g2_pic_height_in_cbs G2_DEC_REG(4, 6, 0x1fff)
+#define g2_num_ref_frames G2_DEC_REG(4, 0, 0x1f)
+
+#define g2_scaling_list_e G2_DEC_REG(5, 24, 0x1)
+#define g2_cb_qp_offset G2_DEC_REG(5, 19, 0x1f)
+#define g2_cr_qp_offset G2_DEC_REG(5, 14, 0x1f)
+#define g2_sign_data_hide G2_DEC_REG(5, 12, 0x1)
+#define g2_tempor_mvp_e G2_DEC_REG(5, 11, 0x1)
+#define g2_max_cu_qpd_depth G2_DEC_REG(5, 5, 0x3f)
+#define g2_cu_qpd_e G2_DEC_REG(5, 4, 0x1)
+
+#define g2_stream_len G2_DEC_REG(6, 0, 0xffffffff)
+
+#define g2_cabac_init_present G2_DEC_REG(7, 31, 0x1)
+#define g2_weight_pred_e G2_DEC_REG(7, 28, 0x1)
+#define g2_weight_bipr_idc G2_DEC_REG(7, 26, 0x3)
+#define g2_filter_over_slices G2_DEC_REG(7, 25, 0x1)
+#define g2_filter_over_tiles G2_DEC_REG(7, 24, 0x1)
+#define g2_asym_pred_e G2_DEC_REG(7, 23, 0x1)
+#define g2_sao_e G2_DEC_REG(7, 22, 0x1)
+#define g2_pcm_filt_d G2_DEC_REG(7, 21, 0x1)
+#define g2_slice_chqp_present G2_DEC_REG(7, 20, 0x1)
+#define g2_dependent_slice G2_DEC_REG(7, 19, 0x1)
+#define g2_filter_override G2_DEC_REG(7, 18, 0x1)
+#define g2_strong_smooth_e G2_DEC_REG(7, 17, 0x1)
+#define g2_filt_offset_beta G2_DEC_REG(7, 12, 0x1f)
+#define g2_filt_offset_tc G2_DEC_REG(7, 7, 0x1f)
+#define g2_slice_hdr_ext_e G2_DEC_REG(7, 6, 0x1)
+#define g2_slice_hdr_ext_bits G2_DEC_REG(7, 3, 0x7)
+
+#define g2_const_intra_e G2_DEC_REG(8, 31, 0x1)
+#define g2_filt_ctrl_pres G2_DEC_REG(8, 30, 0x1)
+#define g2_idr_pic_e G2_DEC_REG(8, 16, 0x1)
+#define g2_bit_depth_pcm_y G2_DEC_REG(8, 12, 0xf)
+#define g2_bit_depth_pcm_c G2_DEC_REG(8, 8, 0xf)
+#define g2_bit_depth_y_minus8 G2_DEC_REG(8, 6, 0x3)
+#define g2_bit_depth_c_minus8 G2_DEC_REG(8, 4, 0x3)
+#define g2_output_8_bits G2_DEC_REG(8, 3, 0x1)
+
+#define g2_refidx1_active G2_DEC_REG(9, 19, 0x1f)
+#define g2_refidx0_active G2_DEC_REG(9, 14, 0x1f)
+#define g2_hdr_skip_length G2_DEC_REG(9, 0, 0x3fff)
+
+#define g2_start_code_e G2_DEC_REG(10, 31, 0x1)
+#define g2_init_qp G2_DEC_REG(10, 24, 0x3f)
+#define g2_num_tile_cols G2_DEC_REG(10, 19, 0x1f)
+#define g2_num_tile_rows G2_DEC_REG(10, 14, 0x1f)
+#define g2_tile_e G2_DEC_REG(10, 1, 0x1)
+#define g2_entropy_sync_e G2_DEC_REG(10, 0, 0x1)
+
+#define g2_refer_lterm_e G2_DEC_REG(12, 16, 0xffff)
+#define g2_min_cb_size G2_DEC_REG(12, 13, 0x7)
+#define g2_max_cb_size G2_DEC_REG(12, 10, 0x7)
+#define g2_min_pcm_size G2_DEC_REG(12, 7, 0x7)
+#define g2_max_pcm_size G2_DEC_REG(12, 4, 0x7)
+#define g2_pcm_e G2_DEC_REG(12, 3, 0x1)
+#define g2_transform_skip G2_DEC_REG(12, 2, 0x1)
+#define g2_transq_bypass G2_DEC_REG(12, 1, 0x1)
+#define g2_list_mod_e G2_DEC_REG(12, 0, 0x1)
+
+#define hevc_min_trb_size G2_DEC_REG(13, 13, 0x7)
+#define hevc_max_trb_size G2_DEC_REG(13, 10, 0x7)
+#define hevc_max_intra_hierdepth G2_DEC_REG(13, 7, 0x7)
+#define hevc_max_inter_hierdepth G2_DEC_REG(13, 4, 0x7)
+#define hevc_parallel_merge G2_DEC_REG(13, 0, 0xf)
+
+#define hevc_rlist_f0 G2_DEC_REG(14, 0, 0x1f)
+#define hevc_rlist_f1 G2_DEC_REG(14, 10, 0x1f)
+#define hevc_rlist_f2 G2_DEC_REG(14, 20, 0x1f)
+#define hevc_rlist_b0 G2_DEC_REG(14, 5, 0x1f)
+#define hevc_rlist_b1 G2_DEC_REG(14, 15, 0x1f)
+#define hevc_rlist_b2 G2_DEC_REG(14, 25, 0x1f)
+
+#define hevc_rlist_f3 G2_DEC_REG(15, 0, 0x1f)
+#define hevc_rlist_f4 G2_DEC_REG(15, 10, 0x1f)
+#define hevc_rlist_f5 G2_DEC_REG(15, 20, 0x1f)
+#define hevc_rlist_b3 G2_DEC_REG(15, 5, 0x1f)
+#define hevc_rlist_b4 G2_DEC_REG(15, 15, 0x1f)
+#define hevc_rlist_b5 G2_DEC_REG(15, 25, 0x1f)
+
+#define hevc_rlist_f6 G2_DEC_REG(16, 0, 0x1f)
+#define hevc_rlist_f7 G2_DEC_REG(16, 10, 0x1f)
+#define hevc_rlist_f8 G2_DEC_REG(16, 20, 0x1f)
+#define hevc_rlist_b6 G2_DEC_REG(16, 5, 0x1f)
+#define hevc_rlist_b7 G2_DEC_REG(16, 15, 0x1f)
+#define hevc_rlist_b8 G2_DEC_REG(16, 25, 0x1f)
+
+#define hevc_rlist_f9 G2_DEC_REG(17, 0, 0x1f)
+#define hevc_rlist_f10 G2_DEC_REG(17, 10, 0x1f)
+#define hevc_rlist_f11 G2_DEC_REG(17, 20, 0x1f)
+#define hevc_rlist_b9 G2_DEC_REG(17, 5, 0x1f)
+#define hevc_rlist_b10 G2_DEC_REG(17, 15, 0x1f)
+#define hevc_rlist_b11 G2_DEC_REG(17, 25, 0x1f)
+
+#define hevc_rlist_f12 G2_DEC_REG(18, 0, 0x1f)
+#define hevc_rlist_f13 G2_DEC_REG(18, 10, 0x1f)
+#define hevc_rlist_f14 G2_DEC_REG(18, 20, 0x1f)
+#define hevc_rlist_b12 G2_DEC_REG(18, 5, 0x1f)
+#define hevc_rlist_b13 G2_DEC_REG(18, 15, 0x1f)
+#define hevc_rlist_b14 G2_DEC_REG(18, 25, 0x1f)
+
+#define hevc_rlist_f15 G2_DEC_REG(19, 0, 0x1f)
+#define hevc_rlist_b15 G2_DEC_REG(19, 5, 0x1f)
+
+#define g2_partial_ctb_x G2_DEC_REG(20, 31, 0x1)
+#define g2_partial_ctb_y G2_DEC_REG(20, 30, 0x1)
+#define g2_pic_width_4x4 G2_DEC_REG(20, 16, 0xfff)
+#define g2_pic_height_4x4 G2_DEC_REG(20, 0, 0xfff)
+#define hevc_cur_poc_00 G2_DEC_REG(46, 24, 0xff)
+#define hevc_cur_poc_01 G2_DEC_REG(46, 16, 0xff)
+#define hevc_cur_poc_02 G2_DEC_REG(46, 8, 0xff)
+#define hevc_cur_poc_03 G2_DEC_REG(46, 0, 0xff)
+
+#define hevc_cur_poc_04 G2_DEC_REG(47, 24, 0xff)
+#define hevc_cur_poc_05 G2_DEC_REG(47, 16, 0xff)
+#define hevc_cur_poc_06 G2_DEC_REG(47, 8, 0xff)
+#define hevc_cur_poc_07 G2_DEC_REG(47, 0, 0xff)
+
+#define hevc_cur_poc_08 G2_DEC_REG(48, 24, 0xff)
+#define hevc_cur_poc_09 G2_DEC_REG(48, 16, 0xff)
+#define hevc_cur_poc_10 G2_DEC_REG(48, 8, 0xff)
+#define hevc_cur_poc_11 G2_DEC_REG(48, 0, 0xff)
+
+#define hevc_cur_poc_12 G2_DEC_REG(49, 24, 0xff)
+#define hevc_cur_poc_13 G2_DEC_REG(49, 16, 0xff)
+#define hevc_cur_poc_14 G2_DEC_REG(49, 8, 0xff)
+#define hevc_cur_poc_15 G2_DEC_REG(49, 0, 0xff)
+
+#define g2_apf_threshold G2_DEC_REG(55, 0, 0xffff)
+
+#define g2_clk_gate_e G2_DEC_REG(58, 16, 0x1)
+#define g2_buswidth G2_DEC_REG(58, 8, 0x7)
+#define g2_max_burst G2_DEC_REG(58, 0, 0xff)
+
+#define G2_REG_CONFIG G2_SWREG(58)
+#define G2_REG_CONFIG_DEC_CLK_GATE_E BIT(16)
+#define G2_REG_CONFIG_DEC_CLK_GATE_IDLE_E BIT(17)
+
+#define G2_ADDR_DST (G2_SWREG(65))
+#define G2_REG_ADDR_REF(i) (G2_SWREG(67) + ((i) * 0x8))
+#define G2_ADDR_DST_CHR (G2_SWREG(99))
+#define G2_REG_CHR_REF(i) (G2_SWREG(101) + ((i) * 0x8))
+#define G2_ADDR_DST_MV (G2_SWREG(133))
+#define G2_REG_DMV_REF(i) (G2_SWREG(135) + ((i) * 0x8))
+#define G2_ADDR_TILE_SIZE (G2_SWREG(167))
+#define G2_ADDR_STR (G2_SWREG(169))
+#define HEVC_SCALING_LIST (G2_SWREG(171))
+#define G2_RASTER_SCAN (G2_SWREG(175))
+#define G2_RASTER_SCAN_CHR (G2_SWREG(177))
+#define G2_TILE_FILTER (G2_SWREG(179))
+#define G2_TILE_SAO (G2_SWREG(181))
+#define G2_TILE_BSD (G2_SWREG(183))
+
+#define g2_strm_buffer_len G2_DEC_REG(258, 0, 0xffffffff)
+#define g2_strm_start_offset G2_DEC_REG(259, 0, 0xffffffff)
+
+#endif
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index b88dc4ed06db..56cf261a8e95 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -88,7 +88,7 @@ hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu,
}
}
-void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
+int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
@@ -136,6 +136,8 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
hantro_end_prepare_run(ctx);
vepu_write(vpu, reg, H1_REG_ENC_CTRL);
+
+ return 0;
}
void hantro_jpeg_enc_done(struct hantro_ctx *ctx)
diff --git a/drivers/staging/media/hantro/hantro_hevc.c b/drivers/staging/media/hantro/hantro_hevc.c
new file mode 100644
index 000000000000..5347f5a41c2a
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_hevc.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU HEVC codec driver
+ *
+ * Copyright (C) 2020 Safran Passenger Innovations LLC
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define VERT_FILTER_RAM_SIZE 8 /* bytes per pixel row */
+/*
+ * BSD control data of current picture at tile border
+ * 128 bits per 4x4 tile = 128/(8*4) bytes per row
+ */
+#define BSD_CTRL_RAM_SIZE 4 /* bytes per pixel row */
+/* tile border coefficients of filter */
+#define VERT_SAO_RAM_SIZE 48 /* bytes per pixel */
+
+#define MAX_TILE_COLS 20
+#define MAX_TILE_ROWS 22
+
+#define UNUSED_REF -1
+
+#define G2_ALIGN 16
+
+size_t hantro_hevc_chroma_offset(const struct v4l2_ctrl_hevc_sps *sps)
+{
+ int bytes_per_pixel = sps->bit_depth_luma_minus8 == 0 ? 1 : 2;
+
+ return sps->pic_width_in_luma_samples *
+ sps->pic_height_in_luma_samples * bytes_per_pixel;
+}
+
+size_t hantro_hevc_motion_vectors_offset(const struct v4l2_ctrl_hevc_sps *sps)
+{
+ size_t cr_offset = hantro_hevc_chroma_offset(sps);
+
+ return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
+}
+
+static size_t hantro_hevc_mv_size(const struct v4l2_ctrl_hevc_sps *sps)
+{
+ u32 min_cb_log2_size_y = sps->log2_min_luma_coding_block_size_minus3 + 3;
+ u32 ctb_log2_size_y = min_cb_log2_size_y + sps->log2_diff_max_min_luma_coding_block_size;
+ u32 pic_width_in_ctbs_y = (sps->pic_width_in_luma_samples + (1 << ctb_log2_size_y) - 1)
+ >> ctb_log2_size_y;
+ u32 pic_height_in_ctbs_y = (sps->pic_height_in_luma_samples + (1 << ctb_log2_size_y) - 1)
+ >> ctb_log2_size_y;
+ size_t mv_size;
+
+ mv_size = pic_width_in_ctbs_y * pic_height_in_ctbs_y *
+ (1 << (2 * (ctb_log2_size_y - 4))) * 16;
+
+ vpu_debug(4, "%dx%d (CTBs) %zu MV bytes\n",
+ pic_width_in_ctbs_y, pic_height_in_ctbs_y, mv_size);
+
+ return mv_size;
+}
+
+static size_t hantro_hevc_ref_size(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+
+ return hantro_hevc_motion_vectors_offset(sps) + hantro_hevc_mv_size(sps);
+}
+
+static void hantro_hevc_ref_free(struct hantro_ctx *ctx)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ struct hantro_dev *vpu = ctx->dev;
+ int i;
+
+ for (i = 0; i < NUM_REF_PICTURES; i++) {
+ if (hevc_dec->ref_bufs[i].cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->ref_bufs[i].size,
+ hevc_dec->ref_bufs[i].cpu,
+ hevc_dec->ref_bufs[i].dma);
+ }
+}
+
+static void hantro_hevc_ref_init(struct hantro_ctx *ctx)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ int i;
+
+ for (i = 0; i < NUM_REF_PICTURES; i++)
+ hevc_dec->ref_bufs_poc[i] = UNUSED_REF;
+}
+
+dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx,
+ int poc)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ int i;
+
+ /* Find the reference buffer in already know ones */
+ for (i = 0; i < NUM_REF_PICTURES; i++) {
+ if (hevc_dec->ref_bufs_poc[i] == poc) {
+ hevc_dec->ref_bufs_used |= 1 << i;
+ return hevc_dec->ref_bufs[i].dma;
+ }
+ }
+
+ /* Allocate a new reference buffer */
+ for (i = 0; i < NUM_REF_PICTURES; i++) {
+ if (hevc_dec->ref_bufs_poc[i] == UNUSED_REF) {
+ if (!hevc_dec->ref_bufs[i].cpu) {
+ struct hantro_dev *vpu = ctx->dev;
+
+ /*
+ * Allocate the space needed for the raw data +
+ * motion vector data. Optimizations could be to
+ * allocate raw data in non coherent memory and only
+ * clear the motion vector data.
+ */
+ hevc_dec->ref_bufs[i].cpu =
+ dma_alloc_coherent(vpu->dev,
+ hantro_hevc_ref_size(ctx),
+ &hevc_dec->ref_bufs[i].dma,
+ GFP_KERNEL);
+ if (!hevc_dec->ref_bufs[i].cpu)
+ return 0;
+
+ hevc_dec->ref_bufs[i].size = hantro_hevc_ref_size(ctx);
+ }
+ hevc_dec->ref_bufs_used |= 1 << i;
+ memset(hevc_dec->ref_bufs[i].cpu, 0, hantro_hevc_ref_size(ctx));
+ hevc_dec->ref_bufs_poc[i] = poc;
+
+ return hevc_dec->ref_bufs[i].dma;
+ }
+ }
+
+ return 0;
+}
+
+void hantro_hevc_ref_remove_unused(struct hantro_ctx *ctx)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ int i;
+
+ /* Just tag buffer as unused, do not free them */
+ for (i = 0; i < NUM_REF_PICTURES; i++) {
+ if (hevc_dec->ref_bufs_poc[i] == UNUSED_REF)
+ continue;
+
+ if (hevc_dec->ref_bufs_used & (1 << i))
+ continue;
+
+ hevc_dec->ref_bufs_poc[i] = UNUSED_REF;
+ }
+}
+
+static int tile_buffer_reallocate(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ unsigned int num_tile_cols = pps->num_tile_columns_minus1 + 1;
+ unsigned int height64 = (sps->pic_height_in_luma_samples + 63) & ~63;
+ unsigned int size;
+
+ if (num_tile_cols <= 1 ||
+ num_tile_cols <= hevc_dec->num_tile_cols_allocated)
+ return 0;
+
+ /* Need to reallocate due to tiles passed via PPS */
+ if (hevc_dec->tile_filter.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+ }
+
+ if (hevc_dec->tile_sao.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+ }
+
+ if (hevc_dec->tile_bsd.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
+ hevc_dec->tile_bsd.cpu,
+ hevc_dec->tile_bsd.dma);
+ hevc_dec->tile_bsd.cpu = NULL;
+ }
+
+ size = VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1);
+ hevc_dec->tile_filter.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_filter.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_filter.cpu)
+ goto err_free_tile_buffers;
+ hevc_dec->tile_filter.size = size;
+
+ size = VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1);
+ hevc_dec->tile_sao.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_sao.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_sao.cpu)
+ goto err_free_tile_buffers;
+ hevc_dec->tile_sao.size = size;
+
+ size = BSD_CTRL_RAM_SIZE * height64 * (num_tile_cols - 1);
+ hevc_dec->tile_bsd.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_bsd.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_bsd.cpu)
+ goto err_free_tile_buffers;
+ hevc_dec->tile_bsd.size = size;
+
+ hevc_dec->num_tile_cols_allocated = num_tile_cols;
+
+ return 0;
+
+err_free_tile_buffers:
+ if (hevc_dec->tile_filter.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+
+ if (hevc_dec->tile_sao.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+
+ if (hevc_dec->tile_bsd.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
+ hevc_dec->tile_bsd.cpu,
+ hevc_dec->tile_bsd.dma);
+ hevc_dec->tile_bsd.cpu = NULL;
+
+ return -ENOMEM;
+}
+
+int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_ctx = &ctx->hevc_dec;
+ struct hantro_hevc_dec_ctrls *ctrls = &hevc_ctx->ctrls;
+ int ret;
+
+ hantro_start_prepare_run(ctx);
+
+ ctrls->decode_params =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS);
+ if (WARN_ON(!ctrls->decode_params))
+ return -EINVAL;
+
+ ctrls->sps =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SPS);
+ if (WARN_ON(!ctrls->sps))
+ return -EINVAL;
+
+ ctrls->pps =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_PPS);
+ if (WARN_ON(!ctrls->pps))
+ return -EINVAL;
+
+ ret = tile_buffer_reallocate(ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void hantro_hevc_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+
+ if (hevc_dec->tile_sizes.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sizes.size,
+ hevc_dec->tile_sizes.cpu,
+ hevc_dec->tile_sizes.dma);
+ hevc_dec->tile_sizes.cpu = NULL;
+
+ if (hevc_dec->tile_filter.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+
+ if (hevc_dec->tile_sao.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+
+ if (hevc_dec->tile_bsd.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
+ hevc_dec->tile_bsd.cpu,
+ hevc_dec->tile_bsd.dma);
+ hevc_dec->tile_bsd.cpu = NULL;
+
+ hantro_hevc_ref_free(ctx);
+}
+
+int hantro_hevc_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ unsigned int size;
+
+ memset(hevc_dec, 0, sizeof(*hevc_dec));
+
+ /*
+ * Maximum number of tiles times width and height (2 bytes each),
+ * rounding up to next 16 bytes boundary + one extra 16 byte
+ * chunk (HW guys wanted to have this).
+ */
+ size = round_up(MAX_TILE_COLS * MAX_TILE_ROWS * 4 * sizeof(u16) + 16, 16);
+ hevc_dec->tile_sizes.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_sizes.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_sizes.cpu)
+ return -ENOMEM;
+
+ hevc_dec->tile_sizes.size = size;
+
+ hantro_hevc_ref_init(ctx);
+
+ return 0;
+}
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 83b3e42b63a3..5dcf65805396 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -20,6 +20,8 @@
#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
+#define NUM_REF_PICTURES (V4L2_HEVC_DPB_ENTRIES_NUM_MAX + 1)
+
struct hantro_dev;
struct hantro_ctx;
struct hantro_buf;
@@ -96,6 +98,46 @@ struct hantro_h264_dec_hw_ctx {
};
/**
+ * struct hantro_hevc_dec_ctrls
+ * @decode_params: Decode params
+ * @sps: SPS info
+ * @pps: PPS info
+ * @hevc_hdr_skip_length: the number of data (in bits) to skip in the
+ * slice segment header syntax after 'slice type'
+ * token
+ */
+struct hantro_hevc_dec_ctrls {
+ const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ u32 hevc_hdr_skip_length;
+};
+
+/**
+ * struct hantro_hevc_dec_hw_ctx
+ * @tile_sizes: Tile sizes buffer
+ * @tile_filter: Tile vertical filter buffer
+ * @tile_sao: Tile SAO buffer
+ * @tile_bsd: Tile BSD control buffer
+ * @ref_bufs: Internal reference buffers
+ * @ref_bufs_poc: Internal reference buffers picture order count
+ * @ref_bufs_used: Bitfield of used reference buffers
+ * @ctrls: V4L2 controls attached to a run
+ * @num_tile_cols_allocated: number of allocated tiles
+ */
+struct hantro_hevc_dec_hw_ctx {
+ struct hantro_aux_buf tile_sizes;
+ struct hantro_aux_buf tile_filter;
+ struct hantro_aux_buf tile_sao;
+ struct hantro_aux_buf tile_bsd;
+ struct hantro_aux_buf ref_bufs[NUM_REF_PICTURES];
+ int ref_bufs_poc[NUM_REF_PICTURES];
+ u32 ref_bufs_used;
+ struct hantro_hevc_dec_ctrls ctrls;
+ unsigned int num_tile_cols_allocated;
+};
+
+/**
* struct hantro_mpeg2_dec_hw_ctx
*
* @qtable: Quantization table
@@ -133,14 +175,15 @@ struct hantro_postproc_ctx {
* Optional and called from process context.
* @run: Start single {en,de)coding job. Called from atomic context
* to indicate that a pair of buffers is ready and the hardware
- * should be programmed and started.
+ * should be programmed and started. Returns zero if OK, a
+ * negative value in error cases.
* @done: Read back processing results and additional data from hardware.
* @reset: Reset the hardware in case of a timeout.
*/
struct hantro_codec_ops {
int (*init)(struct hantro_ctx *ctx);
void (*exit)(struct hantro_ctx *ctx);
- void (*run)(struct hantro_ctx *ctx);
+ int (*run)(struct hantro_ctx *ctx);
void (*done)(struct hantro_ctx *ctx);
void (*reset)(struct hantro_ctx *ctx);
};
@@ -148,22 +191,26 @@ struct hantro_codec_ops {
/**
* enum hantro_enc_fmt - source format ID for hardware registers.
*
- * @RK3288_VPU_ENC_FMT_YUV420P: Y/CbCr 4:2:0 planar format
- * @RK3288_VPU_ENC_FMT_YUV420SP: Y/CbCr 4:2:0 semi-planar format
- * @RK3288_VPU_ENC_FMT_YUYV422: YUV 4:2:2 packed format (YUYV)
- * @RK3288_VPU_ENC_FMT_UYVY422: YUV 4:2:2 packed format (UYVY)
+ * @ROCKCHIP_VPU_ENC_FMT_YUV420P: Y/CbCr 4:2:0 planar format
+ * @ROCKCHIP_VPU_ENC_FMT_YUV420SP: Y/CbCr 4:2:0 semi-planar format
+ * @ROCKCHIP_VPU_ENC_FMT_YUYV422: YUV 4:2:2 packed format (YUYV)
+ * @ROCKCHIP_VPU_ENC_FMT_UYVY422: YUV 4:2:2 packed format (UYVY)
*/
enum hantro_enc_fmt {
- RK3288_VPU_ENC_FMT_YUV420P = 0,
- RK3288_VPU_ENC_FMT_YUV420SP = 1,
- RK3288_VPU_ENC_FMT_YUYV422 = 2,
- RK3288_VPU_ENC_FMT_UYVY422 = 3,
+ ROCKCHIP_VPU_ENC_FMT_YUV420P = 0,
+ ROCKCHIP_VPU_ENC_FMT_YUV420SP = 1,
+ ROCKCHIP_VPU_ENC_FMT_YUYV422 = 2,
+ ROCKCHIP_VPU_ENC_FMT_UYVY422 = 3,
};
-extern const struct hantro_variant rk3399_vpu_variant;
-extern const struct hantro_variant rk3328_vpu_variant;
-extern const struct hantro_variant rk3288_vpu_variant;
+extern const struct hantro_variant imx8mq_vpu_g2_variant;
extern const struct hantro_variant imx8mq_vpu_variant;
+extern const struct hantro_variant rk3036_vpu_variant;
+extern const struct hantro_variant rk3066_vpu_variant;
+extern const struct hantro_variant rk3288_vpu_variant;
+extern const struct hantro_variant rk3328_vpu_variant;
+extern const struct hantro_variant rk3399_vpu_variant;
+extern const struct hantro_variant sama5d4_vdec_variant;
extern const struct hantro_postproc_regs hantro_g1_postproc_regs;
@@ -176,8 +223,11 @@ void hantro_irq_done(struct hantro_dev *vpu,
void hantro_start_prepare_run(struct hantro_ctx *ctx);
void hantro_end_prepare_run(struct hantro_ctx *ctx);
-void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
-void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
+irqreturn_t hantro_g1_irq(int irq, void *dev_id);
+void hantro_g1_reset(struct hantro_ctx *ctx);
+
+int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx);
int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
void hantro_jpeg_enc_done(struct hantro_ctx *ctx);
@@ -185,10 +235,19 @@ void hantro_jpeg_enc_done(struct hantro_ctx *ctx);
dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
unsigned int dpb_idx);
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx);
-void hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
+int hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
int hantro_h264_dec_init(struct hantro_ctx *ctx);
void hantro_h264_dec_exit(struct hantro_ctx *ctx);
+int hantro_hevc_dec_init(struct hantro_ctx *ctx);
+void hantro_hevc_dec_exit(struct hantro_ctx *ctx);
+int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx);
+int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx);
+dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, int poc);
+void hantro_hevc_ref_remove_unused(struct hantro_ctx *ctx);
+size_t hantro_hevc_chroma_offset(const struct v4l2_ctrl_hevc_sps *sps);
+size_t hantro_hevc_motion_vectors_offset(const struct v4l2_ctrl_hevc_sps *sps);
+
static inline size_t
hantro_h264_mv_size(unsigned int width, unsigned int height)
{
@@ -216,15 +275,15 @@ hantro_h264_mv_size(unsigned int width, unsigned int height)
return 64 * MB_WIDTH(width) * MB_WIDTH(height) + 32;
}
-void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
-void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx);
+int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx);
void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
- const struct v4l2_ctrl_mpeg2_quantization *ctrl);
+ const struct v4l2_ctrl_mpeg2_quantisation *ctrl);
int hantro_mpeg2_dec_init(struct hantro_ctx *ctx);
void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx);
-void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx);
-void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx);
+int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_vp8_dec_run(struct hantro_ctx *ctx);
int hantro_vp8_dec_init(struct hantro_ctx *ctx);
void hantro_vp8_dec_exit(struct hantro_ctx *ctx);
void hantro_vp8_prob_update(struct hantro_ctx *ctx,
diff --git a/drivers/staging/media/hantro/hantro_mpeg2.c b/drivers/staging/media/hantro/hantro_mpeg2.c
index 1d334e6fcd06..04e545eb0a83 100644
--- a/drivers/staging/media/hantro/hantro_mpeg2.c
+++ b/drivers/staging/media/hantro/hantro_mpeg2.c
@@ -19,7 +19,7 @@ static const u8 zigzag[64] = {
};
void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
- const struct v4l2_ctrl_mpeg2_quantization *ctrl)
+ const struct v4l2_ctrl_mpeg2_quantisation *ctrl)
{
int i, n;
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
index 6d2a8f2a8f0b..ed8916c950a4 100644
--- a/drivers/staging/media/hantro/hantro_postproc.c
+++ b/drivers/staging/media/hantro/hantro_postproc.c
@@ -50,6 +50,20 @@ const struct hantro_postproc_regs hantro_g1_postproc_regs = {
.display_width = {G1_REG_PP_DISPLAY_WIDTH, 0, 0xfff},
};
+bool hantro_needs_postproc(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (ctx->is_encoder)
+ return false;
+
+ if (!vpu->variant->postproc_fmts)
+ return false;
+
+ return fmt->fourcc != V4L2_PIX_FMT_NV12;
+}
+
void hantro_postproc_enable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 1bc118e375a1..bcb0bdff4a9a 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -55,7 +55,9 @@ static const struct hantro_fmt *
hantro_get_postproc_formats(const struct hantro_ctx *ctx,
unsigned int *num_fmts)
{
- if (ctx->is_encoder) {
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (ctx->is_encoder || !vpu->variant->postproc_fmts) {
*num_fmts = 0;
return NULL;
}
@@ -390,6 +392,7 @@ hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
case V4L2_PIX_FMT_MPEG2_SLICE:
case V4L2_PIX_FMT_VP8_FRAME:
case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
break;
default:
@@ -639,7 +642,14 @@ static int hantro_buf_prepare(struct vb2_buffer *vb)
ret = hantro_buf_plane_check(vb, pix_fmt);
if (ret)
return ret;
- vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+ /*
+ * Buffer's bytesused must be written by driver for CAPTURE buffers.
+ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
+ * it to buffer length).
+ */
+ if (V4L2_TYPE_IS_CAPTURE(vq->type))
+ vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+
return 0;
}
diff --git a/drivers/staging/media/hantro/imx8m_vpu_hw.c b/drivers/staging/media/hantro/imx8m_vpu_hw.c
index c222de075ef4..ea919bfb9891 100644
--- a/drivers/staging/media/hantro/imx8m_vpu_hw.c
+++ b/drivers/staging/media/hantro/imx8m_vpu_hw.c
@@ -11,6 +11,7 @@
#include "hantro.h"
#include "hantro_jpeg.h"
#include "hantro_g1_regs.h"
+#include "hantro_g2_regs.h"
#define CTRL_SOFT_RESET 0x00
#define RESET_G1 BIT(1)
@@ -109,10 +110,10 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = 16,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = 16,
+ .step_height = MB_DIM,
},
},
{
@@ -130,6 +131,26 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
},
};
+static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HEVC_SLICE,
+ .codec_mode = HANTRO_MODE_HEVC_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
@@ -148,9 +169,26 @@ static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t imx8m_vpu_g2_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G2_REG_INTERRUPT);
+ state = (status & G2_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G2_REG_INTERRUPT);
+ vdpu_write(vpu, G2_REG_CONFIG_DEC_CLK_GATE_E, G2_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
static int imx8mq_vpu_hw_init(struct hantro_dev *vpu)
{
- vpu->dec_base = vpu->reg_bases[0];
vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1];
return 0;
@@ -163,6 +201,13 @@ static void imx8m_vpu_g1_reset(struct hantro_ctx *ctx)
imx8m_soft_reset(vpu, RESET_G1);
}
+static void imx8m_vpu_g2_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ imx8m_soft_reset(vpu, RESET_G2);
+}
+
/*
* Supported codec ops.
*/
@@ -188,13 +233,25 @@ static const struct hantro_codec_ops imx8mq_vpu_codec_ops[] = {
},
};
+static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = {
+ [HANTRO_MODE_HEVC_DEC] = {
+ .run = hantro_g2_hevc_dec_run,
+ .reset = imx8m_vpu_g2_reset,
+ .init = hantro_hevc_dec_init,
+ .exit = hantro_hevc_dec_exit,
+ },
+};
+
/*
* VPU variants.
*/
static const struct hantro_irq imx8mq_irqs[] = {
{ "g1", imx8m_vpu_g1_irq },
- { "g2", NULL /* TODO: imx8m_vpu_g2_irq */ },
+};
+
+static const struct hantro_irq imx8mq_g2_irqs[] = {
+ { "g2", imx8m_vpu_g2_irq },
};
static const char * const imx8mq_clk_names[] = { "g1", "g2", "bus" };
@@ -218,3 +275,17 @@ const struct hantro_variant imx8mq_vpu_variant = {
.reg_names = imx8mq_reg_names,
.num_regs = ARRAY_SIZE(imx8mq_reg_names)
};
+
+const struct hantro_variant imx8mq_vpu_g2_variant = {
+ .dec_offset = 0x0,
+ .dec_fmts = imx8m_vpu_g2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_g2_dec_fmts),
+ .codec = HANTRO_HEVC_DECODER,
+ .codec_ops = imx8mq_vpu_g2_codec_ops,
+ .init = imx8mq_vpu_hw_init,
+ .runtime_resume = imx8mq_runtime_resume,
+ .irqs = imx8mq_g2_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
+ .clk_names = imx8mq_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_clk_names),
+};
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
deleted file mode 100644
index 7b299ee3e93d..000000000000
--- a/drivers/staging/media/hantro/rk3288_vpu_hw.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Hantro VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- */
-
-#include <linux/clk.h>
-
-#include "hantro.h"
-#include "hantro_jpeg.h"
-#include "hantro_g1_regs.h"
-#include "hantro_h1_regs.h"
-
-#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
-
-/*
- * Supported formats.
- */
-
-static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUV420M,
- .codec_mode = HANTRO_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
- },
- {
- .fourcc = V4L2_PIX_FMT_NV12M,
- .codec_mode = HANTRO_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .codec_mode = HANTRO_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
- },
- {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .codec_mode = HANTRO_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
- },
- {
- .fourcc = V4L2_PIX_FMT_JPEG,
- .codec_mode = HANTRO_MODE_JPEG_ENC,
- .max_depth = 2,
- .header_size = JPEG_HEADER_SIZE,
- .frmsize = {
- .min_width = 96,
- .max_width = 8192,
- .step_width = MB_DIM,
- .min_height = 32,
- .max_height = 8192,
- .step_height = MB_DIM,
- },
- },
-};
-
-static const struct hantro_fmt rk3288_vpu_postproc_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .codec_mode = HANTRO_MODE_NONE,
- },
-};
-
-static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_NV12,
- .codec_mode = HANTRO_MODE_NONE,
- },
- {
- .fourcc = V4L2_PIX_FMT_H264_SLICE,
- .codec_mode = HANTRO_MODE_H264_DEC,
- .max_depth = 2,
- .frmsize = {
- .min_width = 48,
- .max_width = 4096,
- .step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2304,
- .step_height = MB_DIM,
- },
- },
- {
- .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
- .codec_mode = HANTRO_MODE_MPEG2_DEC,
- .max_depth = 2,
- .frmsize = {
- .min_width = 48,
- .max_width = 1920,
- .step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
- .step_height = MB_DIM,
- },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8_FRAME,
- .codec_mode = HANTRO_MODE_VP8_DEC,
- .max_depth = 2,
- .frmsize = {
- .min_width = 48,
- .max_width = 3840,
- .step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2160,
- .step_height = MB_DIM,
- },
- },
-};
-
-static irqreturn_t rk3288_vepu_irq(int irq, void *dev_id)
-{
- struct hantro_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status;
-
- status = vepu_read(vpu, H1_REG_INTERRUPT);
- state = (status & H1_REG_INTERRUPT_FRAME_RDY) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vepu_write(vpu, 0, H1_REG_INTERRUPT);
- vepu_write(vpu, 0, H1_REG_AXI_CTRL);
-
- hantro_irq_done(vpu, state);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rk3288_vdpu_irq(int irq, void *dev_id)
-{
- struct hantro_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status;
-
- status = vdpu_read(vpu, G1_REG_INTERRUPT);
- state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vdpu_write(vpu, 0, G1_REG_INTERRUPT);
- vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
-
- hantro_irq_done(vpu, state);
-
- return IRQ_HANDLED;
-}
-
-static int rk3288_vpu_hw_init(struct hantro_dev *vpu)
-{
- /* Bump ACLK to max. possible freq. to improve performance. */
- clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
- return 0;
-}
-
-static void rk3288_vpu_enc_reset(struct hantro_ctx *ctx)
-{
- struct hantro_dev *vpu = ctx->dev;
-
- vepu_write(vpu, H1_REG_INTERRUPT_DIS_BIT, H1_REG_INTERRUPT);
- vepu_write(vpu, 0, H1_REG_ENC_CTRL);
- vepu_write(vpu, 0, H1_REG_AXI_CTRL);
-}
-
-static void rk3288_vpu_dec_reset(struct hantro_ctx *ctx)
-{
- struct hantro_dev *vpu = ctx->dev;
-
- vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
- vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
- vdpu_write(vpu, 1, G1_REG_SOFT_RESET);
-}
-
-/*
- * Supported codec ops.
- */
-
-static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
- [HANTRO_MODE_JPEG_ENC] = {
- .run = hantro_h1_jpeg_enc_run,
- .reset = rk3288_vpu_enc_reset,
- .init = hantro_jpeg_enc_init,
- .done = hantro_jpeg_enc_done,
- .exit = hantro_jpeg_enc_exit,
- },
- [HANTRO_MODE_H264_DEC] = {
- .run = hantro_g1_h264_dec_run,
- .reset = rk3288_vpu_dec_reset,
- .init = hantro_h264_dec_init,
- .exit = hantro_h264_dec_exit,
- },
- [HANTRO_MODE_MPEG2_DEC] = {
- .run = hantro_g1_mpeg2_dec_run,
- .reset = rk3288_vpu_dec_reset,
- .init = hantro_mpeg2_dec_init,
- .exit = hantro_mpeg2_dec_exit,
- },
- [HANTRO_MODE_VP8_DEC] = {
- .run = hantro_g1_vp8_dec_run,
- .reset = rk3288_vpu_dec_reset,
- .init = hantro_vp8_dec_init,
- .exit = hantro_vp8_dec_exit,
- },
-};
-
-/*
- * VPU variant.
- */
-
-static const struct hantro_irq rk3288_irqs[] = {
- { "vepu", rk3288_vepu_irq },
- { "vdpu", rk3288_vdpu_irq },
-};
-
-static const char * const rk3288_clk_names[] = {
- "aclk", "hclk"
-};
-
-const struct hantro_variant rk3288_vpu_variant = {
- .enc_offset = 0x0,
- .enc_fmts = rk3288_vpu_enc_fmts,
- .num_enc_fmts = ARRAY_SIZE(rk3288_vpu_enc_fmts),
- .dec_offset = 0x400,
- .dec_fmts = rk3288_vpu_dec_fmts,
- .num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
- .postproc_fmts = rk3288_vpu_postproc_fmts,
- .num_postproc_fmts = ARRAY_SIZE(rk3288_vpu_postproc_fmts),
- .postproc_regs = &hantro_g1_postproc_regs,
- .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
- HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
- .codec_ops = rk3288_vpu_codec_ops,
- .irqs = rk3288_irqs,
- .num_irqs = ARRAY_SIZE(rk3288_irqs),
- .init = rk3288_vpu_hw_init,
- .clk_names = rk3288_clk_names,
- .num_clocks = ARRAY_SIZE(rk3288_clk_names)
-};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw.c b/drivers/staging/media/hantro/rk3399_vpu_hw.c
deleted file mode 100644
index 7a7962cf771e..000000000000
--- a/drivers/staging/media/hantro/rk3399_vpu_hw.c
+++ /dev/null
@@ -1,222 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Hantro VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- */
-
-#include <linux/clk.h>
-
-#include "hantro.h"
-#include "hantro_jpeg.h"
-#include "rk3399_vpu_regs.h"
-
-#define RK3399_ACLK_MAX_FREQ (400 * 1000 * 1000)
-
-/*
- * Supported formats.
- */
-
-static const struct hantro_fmt rk3399_vpu_enc_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUV420M,
- .codec_mode = HANTRO_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
- },
- {
- .fourcc = V4L2_PIX_FMT_NV12M,
- .codec_mode = HANTRO_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .codec_mode = HANTRO_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
- },
- {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .codec_mode = HANTRO_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
- },
- {
- .fourcc = V4L2_PIX_FMT_JPEG,
- .codec_mode = HANTRO_MODE_JPEG_ENC,
- .max_depth = 2,
- .header_size = JPEG_HEADER_SIZE,
- .frmsize = {
- .min_width = 96,
- .max_width = 8192,
- .step_width = MB_DIM,
- .min_height = 32,
- .max_height = 8192,
- .step_height = MB_DIM,
- },
- },
-};
-
-static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_NV12,
- .codec_mode = HANTRO_MODE_NONE,
- },
- {
- .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
- .codec_mode = HANTRO_MODE_MPEG2_DEC,
- .max_depth = 2,
- .frmsize = {
- .min_width = 48,
- .max_width = 1920,
- .step_width = MB_DIM,
- .min_height = 48,
- .max_height = 1088,
- .step_height = MB_DIM,
- },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8_FRAME,
- .codec_mode = HANTRO_MODE_VP8_DEC,
- .max_depth = 2,
- .frmsize = {
- .min_width = 48,
- .max_width = 3840,
- .step_width = MB_DIM,
- .min_height = 48,
- .max_height = 2160,
- .step_height = MB_DIM,
- },
- },
-};
-
-static irqreturn_t rk3399_vepu_irq(int irq, void *dev_id)
-{
- struct hantro_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status;
-
- status = vepu_read(vpu, VEPU_REG_INTERRUPT);
- state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-
- hantro_irq_done(vpu, state);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rk3399_vdpu_irq(int irq, void *dev_id)
-{
- struct hantro_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status;
-
- status = vdpu_read(vpu, VDPU_REG_INTERRUPT);
- state = (status & VDPU_REG_INTERRUPT_DEC_IRQ) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vdpu_write(vpu, 0, VDPU_REG_INTERRUPT);
- vdpu_write(vpu, 0, VDPU_REG_AXI_CTRL);
-
- hantro_irq_done(vpu, state);
-
- return IRQ_HANDLED;
-}
-
-static int rk3399_vpu_hw_init(struct hantro_dev *vpu)
-{
- /* Bump ACLK to max. possible freq. to improve performance. */
- clk_set_rate(vpu->clocks[0].clk, RK3399_ACLK_MAX_FREQ);
- return 0;
-}
-
-static void rk3399_vpu_enc_reset(struct hantro_ctx *ctx)
-{
- struct hantro_dev *vpu = ctx->dev;
-
- vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-}
-
-static void rk3399_vpu_dec_reset(struct hantro_ctx *ctx)
-{
- struct hantro_dev *vpu = ctx->dev;
-
- vdpu_write(vpu, VDPU_REG_INTERRUPT_DEC_IRQ_DIS, VDPU_REG_INTERRUPT);
- vdpu_write(vpu, 0, VDPU_REG_EN_FLAGS);
- vdpu_write(vpu, 1, VDPU_REG_SOFT_RESET);
-}
-
-/*
- * Supported codec ops.
- */
-
-static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
- [HANTRO_MODE_JPEG_ENC] = {
- .run = rk3399_vpu_jpeg_enc_run,
- .reset = rk3399_vpu_enc_reset,
- .init = hantro_jpeg_enc_init,
- .exit = hantro_jpeg_enc_exit,
- },
- [HANTRO_MODE_MPEG2_DEC] = {
- .run = rk3399_vpu_mpeg2_dec_run,
- .reset = rk3399_vpu_dec_reset,
- .init = hantro_mpeg2_dec_init,
- .exit = hantro_mpeg2_dec_exit,
- },
- [HANTRO_MODE_VP8_DEC] = {
- .run = rk3399_vpu_vp8_dec_run,
- .reset = rk3399_vpu_dec_reset,
- .init = hantro_vp8_dec_init,
- .exit = hantro_vp8_dec_exit,
- },
-};
-
-/*
- * VPU variant.
- */
-
-static const struct hantro_irq rk3399_irqs[] = {
- { "vepu", rk3399_vepu_irq },
- { "vdpu", rk3399_vdpu_irq },
-};
-
-static const char * const rk3399_clk_names[] = {
- "aclk", "hclk"
-};
-
-const struct hantro_variant rk3399_vpu_variant = {
- .enc_offset = 0x0,
- .enc_fmts = rk3399_vpu_enc_fmts,
- .num_enc_fmts = ARRAY_SIZE(rk3399_vpu_enc_fmts),
- .dec_offset = 0x400,
- .dec_fmts = rk3399_vpu_dec_fmts,
- .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
- .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
- HANTRO_VP8_DECODER,
- .codec_ops = rk3399_vpu_codec_ops,
- .irqs = rk3399_irqs,
- .num_irqs = ARRAY_SIZE(rk3399_irqs),
- .init = rk3399_vpu_hw_init,
- .clk_names = rk3399_clk_names,
- .num_clocks = ARRAY_SIZE(rk3399_clk_names)
-};
-
-static const struct hantro_irq rk3328_irqs[] = {
- { "vdpu", rk3399_vdpu_irq },
-};
-
-const struct hantro_variant rk3328_vpu_variant = {
- .dec_offset = 0x400,
- .dec_fmts = rk3399_vpu_dec_fmts,
- .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
- .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER,
- .codec_ops = rk3399_vpu_codec_ops,
- .irqs = rk3328_irqs,
- .num_irqs = ARRAY_SIZE(rk3328_irqs),
- .init = rk3399_vpu_hw_init,
- .clk_names = rk3399_clk_names,
- .num_clocks = ARRAY_SIZE(rk3399_clk_names),
-};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
index 3498e6124acd..991213ce1610 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
@@ -28,12 +28,12 @@
#include "hantro.h"
#include "hantro_v4l2.h"
#include "hantro_hw.h"
-#include "rk3399_vpu_regs.h"
+#include "rockchip_vpu2_regs.h"
#define VEPU_JPEG_QUANT_TABLE_COUNT 16
-static void rk3399_vpu_set_src_img_ctrl(struct hantro_dev *vpu,
- struct hantro_ctx *ctx)
+static void rockchip_vpu2_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
u32 reg;
@@ -59,9 +59,9 @@ static void rk3399_vpu_set_src_img_ctrl(struct hantro_dev *vpu,
vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_CTRL1);
}
-static void rk3399_vpu_jpeg_enc_set_buffers(struct hantro_dev *vpu,
- struct hantro_ctx *ctx,
- struct vb2_buffer *src_buf)
+static void rockchip_vpu2_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
dma_addr_t src[3];
@@ -92,9 +92,9 @@ static void rk3399_vpu_jpeg_enc_set_buffers(struct hantro_dev *vpu,
}
static void
-rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu,
- unsigned char *luma_qtable,
- unsigned char *chroma_qtable)
+rockchip_vpu2_jpeg_enc_set_qtable(struct hantro_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
{
u32 reg, i;
__be32 *luma_qtable_p;
@@ -118,7 +118,7 @@ rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu,
}
}
-void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
+int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
@@ -141,11 +141,11 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
VEPU_REG_ENCODE_START);
- rk3399_vpu_set_src_img_ctrl(vpu, ctx);
- rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
- rk3399_vpu_jpeg_enc_set_qtable(vpu,
- hantro_jpeg_get_qtable(0),
- hantro_jpeg_get_qtable(1));
+ rockchip_vpu2_set_src_img_ctrl(vpu, ctx);
+ rockchip_vpu2_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
+ rockchip_vpu2_jpeg_enc_set_qtable(vpu,
+ hantro_jpeg_get_qtable(0),
+ hantro_jpeg_get_qtable(1));
reg = VEPU_REG_OUTPUT_SWAP32
| VEPU_REG_OUTPUT_SWAP16
@@ -168,4 +168,6 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
/* Kick the watchdog and start encoding */
hantro_end_prepare_run(ctx);
vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
+
+ return 0;
}
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rockchip_vpu2_hw_mpeg2_dec.c
index f610fa5b4335..b66737fab46b 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rockchip_vpu2_hw_mpeg2_dec.c
@@ -79,43 +79,34 @@
#define VDPU_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
#define VDPU_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
-#define PICT_TOP_FIELD 1
-#define PICT_BOTTOM_FIELD 2
-#define PICT_FRAME 3
-
static void
-rk3399_vpu_mpeg2_dec_set_quantization(struct hantro_dev *vpu,
- struct hantro_ctx *ctx)
+rockchip_vpu2_mpeg2_dec_set_quantisation(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
{
- struct v4l2_ctrl_mpeg2_quantization *quantization;
+ struct v4l2_ctrl_mpeg2_quantisation *q;
- quantization = hantro_get_ctrl(ctx,
- V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
- hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, quantization);
- vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma,
- VDPU_REG_QTABLE_BASE);
+ q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma, VDPU_REG_QTABLE_BASE);
}
static void
-rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
- struct hantro_ctx *ctx,
- struct vb2_buffer *src_buf,
- struct vb2_buffer *dst_buf,
- const struct v4l2_mpeg2_sequence *sequence,
- const struct v4l2_mpeg2_picture *picture,
- const struct v4l2_ctrl_mpeg2_slice_params *slice_params)
+rockchip_vpu2_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_ctrl_mpeg2_sequence *seq,
+ const struct v4l2_ctrl_mpeg2_picture *pic)
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- switch (picture->picture_coding_type) {
- case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(ctx,
- slice_params->backward_ref_ts);
+ switch (pic->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(ctx, pic->backward_ref_ts);
fallthrough;
- case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(ctx,
- slice_params->forward_ref_ts);
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(ctx, pic->forward_ref_ts);
}
/* Source bitstream buffer */
@@ -126,7 +117,7 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
current_addr = addr;
- if (picture->picture_structure == PICT_BOTTOM_FIELD)
+ if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD)
addr += ALIGN(ctx->dst_fmt.width, 16);
vdpu_write_relaxed(vpu, addr, VDPU_REG_DEC_OUT_BASE);
@@ -136,18 +127,18 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
backward_addr = current_addr;
/* Set forward ref frame (top/bottom field) */
- if (picture->picture_structure == PICT_FRAME ||
- picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B ||
- (picture->picture_structure == PICT_TOP_FIELD &&
- picture->top_field_first) ||
- (picture->picture_structure == PICT_BOTTOM_FIELD &&
- !picture->top_field_first)) {
+ if (pic->picture_structure == V4L2_MPEG2_PIC_FRAME ||
+ pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD &&
+ pic->flags & V4L2_MPEG2_PIC_TOP_FIELD) ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD &&
+ !(pic->flags & V4L2_MPEG2_PIC_TOP_FIELD))) {
vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
- } else if (picture->picture_structure == PICT_TOP_FIELD) {
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) {
vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER1_BASE);
- } else if (picture->picture_structure == PICT_BOTTOM_FIELD) {
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD) {
vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
}
@@ -157,13 +148,12 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER3_BASE);
}
-void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
+int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
- const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
- const struct v4l2_mpeg2_sequence *sequence;
- const struct v4l2_mpeg2_picture *picture;
+ const struct v4l2_ctrl_mpeg2_sequence *seq;
+ const struct v4l2_ctrl_mpeg2_picture *pic;
u32 reg;
src_buf = hantro_get_src_buf(ctx);
@@ -171,10 +161,10 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
hantro_start_prepare_run(ctx);
- slice_params = hantro_get_ctrl(ctx,
- V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
- sequence = &slice_params->sequence;
- picture = &slice_params->picture;
+ seq = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_SEQUENCE);
+ pic = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_PICTURE);
reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
VDPU_REG_DEC_SCMD_DIS(0) |
@@ -183,7 +173,7 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
reg = VDPU_REG_INIT_QP(1) |
- VDPU_REG_STREAM_LEN(slice_params->bit_size >> 3);
+ VDPU_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
reg = VDPU_REG_APF_THRESHOLD(8) |
@@ -209,11 +199,11 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
reg = VDPU_REG_RLC_MODE_E(0) |
- VDPU_REG_PIC_INTERLACE_E(!sequence->progressive_sequence) |
- VDPU_REG_PIC_FIELDMODE_E(picture->picture_structure != PICT_FRAME) |
- VDPU_REG_PIC_B_E(picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B) |
- VDPU_REG_PIC_INTER_E(picture->picture_coding_type != V4L2_MPEG2_PICTURE_CODING_TYPE_I) |
- VDPU_REG_PIC_TOPFIELD_E(picture->picture_structure == PICT_TOP_FIELD) |
+ VDPU_REG_PIC_INTERLACE_E(!(seq->flags & V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE)) |
+ VDPU_REG_PIC_FIELDMODE_E(pic->picture_structure != V4L2_MPEG2_PIC_FRAME) |
+ VDPU_REG_PIC_B_E(pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B) |
+ VDPU_REG_PIC_INTER_E(pic->picture_coding_type != V4L2_MPEG2_PIC_CODING_TYPE_I) |
+ VDPU_REG_PIC_TOPFIELD_E(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) |
VDPU_REG_FWD_INTERLACE_E(0) |
VDPU_REG_WRITE_MVS_E(0) |
VDPU_REG_DEC_TIMEOUT_E(1) |
@@ -222,36 +212,37 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
reg = VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
- VDPU_REG_ALT_SCAN_E(picture->alternate_scan) |
- VDPU_REG_TOPFIELDFIRST_E(picture->top_field_first);
+ VDPU_REG_ALT_SCAN_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ VDPU_REG_TOPFIELDFIRST_E(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
- reg = VDPU_REG_STRM_START_BIT(slice_params->data_bit_offset) |
- VDPU_REG_QSCALE_TYPE(picture->q_scale_type) |
- VDPU_REG_CON_MV_E(picture->concealment_motion_vectors) |
- VDPU_REG_INTRA_DC_PREC(picture->intra_dc_precision) |
- VDPU_REG_INTRA_VLC_TAB(picture->intra_vlc_format) |
- VDPU_REG_FRAME_PRED_DCT(picture->frame_pred_frame_dct);
+ reg = VDPU_REG_STRM_START_BIT(0) |
+ VDPU_REG_QSCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE) |
+ VDPU_REG_CON_MV_E(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV) |
+ VDPU_REG_INTRA_DC_PREC(pic->intra_dc_precision) |
+ VDPU_REG_INTRA_VLC_TAB(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC) |
+ VDPU_REG_FRAME_PRED_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(122));
- reg = VDPU_REG_ALT_SCAN_FLAG_E(picture->alternate_scan) |
- VDPU_REG_FCODE_FWD_HOR(picture->f_code[0][0]) |
- VDPU_REG_FCODE_FWD_VER(picture->f_code[0][1]) |
- VDPU_REG_FCODE_BWD_HOR(picture->f_code[1][0]) |
- VDPU_REG_FCODE_BWD_VER(picture->f_code[1][1]) |
+ reg = VDPU_REG_ALT_SCAN_FLAG_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ VDPU_REG_FCODE_FWD_HOR(pic->f_code[0][0]) |
+ VDPU_REG_FCODE_FWD_VER(pic->f_code[0][1]) |
+ VDPU_REG_FCODE_BWD_HOR(pic->f_code[1][0]) |
+ VDPU_REG_FCODE_BWD_VER(pic->f_code[1][1]) |
VDPU_REG_MV_ACCURACY_FWD(1) |
VDPU_REG_MV_ACCURACY_BWD(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(136));
- rk3399_vpu_mpeg2_dec_set_quantization(vpu, ctx);
+ rockchip_vpu2_mpeg2_dec_set_quantisation(vpu, ctx);
- rk3399_vpu_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
- &dst_buf->vb2_buf,
- sequence, picture, slice_params);
+ rockchip_vpu2_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf, seq, pic);
/* Kick the watchdog and start decoding */
hantro_end_prepare_run(ctx);
reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
vdpu_write(vpu, reg, VDPU_SWREG(57));
+
+ return 0;
}
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c
index 8661a3cc1e6b..951b55f58a61 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
+++ b/drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c
@@ -503,7 +503,7 @@ static void cfg_buffers(struct hantro_ctx *ctx,
vdpu_write_relaxed(vpu, dst_dma, VDPU_REG_ADDR_DST);
}
-void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
+int rockchip_vpu2_vp8_dec_run(struct hantro_ctx *ctx)
{
const struct v4l2_ctrl_vp8_frame *hdr;
struct hantro_dev *vpu = ctx->dev;
@@ -516,7 +516,7 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
hdr = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_VP8_FRAME);
if (WARN_ON(!hdr))
- return;
+ return -EINVAL;
/* Reset segment_map buffer in keyframe */
if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
@@ -589,4 +589,6 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
hantro_end_prepare_run(ctx);
hantro_reg_write(vpu, &vp8_dec_start_dec, 1);
+
+ return 0;
}
diff --git a/drivers/staging/media/hantro/rk3399_vpu_regs.h b/drivers/staging/media/hantro/rockchip_vpu2_regs.h
index 88d096920f30..49e40889545b 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_regs.h
+++ b/drivers/staging/media/hantro/rockchip_vpu2_regs.h
@@ -6,8 +6,8 @@
* Alpha Lin <alpha.lin@rock-chips.com>
*/
-#ifndef RK3399_VPU_REGS_H_
-#define RK3399_VPU_REGS_H_
+#ifndef ROCKCHIP_VPU2_REGS_H_
+#define ROCKCHIP_VPU2_REGS_H_
/* Encoder registers. */
#define VEPU_REG_VP8_QUT_1ST(i) (0x000 + ((i) * 0x24))
@@ -597,4 +597,4 @@
#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_3(x) (((x) & 0x3ff) << 12)
#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_0(x) (((x) & 0x3ff) << 2)
-#endif /* RK3399_VPU_REGS_H_ */
+#endif /* ROCKCHIP_VPU2_REGS_H_ */
diff --git a/drivers/staging/media/hantro/rockchip_vpu_hw.c b/drivers/staging/media/hantro/rockchip_vpu_hw.c
new file mode 100644
index 000000000000..3ccc16413f42
--- /dev/null
+++ b/drivers/staging/media/hantro/rockchip_vpu_hw.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_g1_regs.h"
+#include "hantro_h1_regs.h"
+#include "rockchip_vpu2_regs.h"
+
+#define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000)
+#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt rockchip_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = HANTRO_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rockchip_vpu1_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+};
+
+static const struct hantro_fmt rk3066_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 4096,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 2304,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rockchip_vpu1_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vepu_read(vpu, H1_REG_INTERRUPT);
+ state = (status & H1_REG_INTERRUPT_FRAME_RDY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_vpu2_vdpu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, VDPU_REG_INTERRUPT);
+ state = (status & VDPU_REG_INTERRUPT_DEC_IRQ) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_vpu2_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vepu_read(vpu, VEPU_REG_INTERRUPT);
+ state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3036_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3066_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static int rk3066_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLKs to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3066_ACLK_MAX_FREQ);
+ clk_set_rate(vpu->clocks[2].clk, RK3066_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static int rockchip_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3066_vpu_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+}
+
+static void rockchip_vpu1_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, H1_REG_INTERRUPT_DIS_BIT, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_ENC_CTRL);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+}
+
+static void rockchip_vpu2_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, VDPU_REG_INTERRUPT_DEC_IRQ_DIS, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_EN_FLAGS);
+ vdpu_write(vpu, 1, VDPU_REG_SOFT_RESET);
+}
+
+static void rockchip_vpu2_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+}
+
+/*
+ * Supported codec ops.
+ */
+static const struct hantro_codec_ops rk3036_vpu_codec_ops[] = {
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3066_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = rockchip_vpu1_enc_reset,
+ .init = hantro_jpeg_enc_init,
+ .done = hantro_jpeg_enc_done,
+ .exit = hantro_jpeg_enc_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = rockchip_vpu1_enc_reset,
+ .init = hantro_jpeg_enc_init,
+ .done = hantro_jpeg_enc_done,
+ .exit = hantro_jpeg_enc_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = rockchip_vpu2_jpeg_enc_run,
+ .reset = rockchip_vpu2_enc_reset,
+ .init = hantro_jpeg_enc_init,
+ .exit = hantro_jpeg_enc_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = rockchip_vpu2_mpeg2_dec_run,
+ .reset = rockchip_vpu2_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = rockchip_vpu2_vp8_dec_run,
+ .reset = rockchip_vpu2_dec_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+/*
+ * VPU variant.
+ */
+
+static const struct hantro_irq rockchip_vdpu1_irqs[] = {
+ { "vdpu", hantro_g1_irq },
+};
+
+static const struct hantro_irq rockchip_vpu1_irqs[] = {
+ { "vepu", rockchip_vpu1_vepu_irq },
+ { "vdpu", hantro_g1_irq },
+};
+
+static const struct hantro_irq rockchip_vdpu2_irqs[] = {
+ { "vdpu", rockchip_vpu2_vdpu_irq },
+};
+
+static const struct hantro_irq rockchip_vpu2_irqs[] = {
+ { "vepu", rockchip_vpu2_vepu_irq },
+ { "vdpu", rockchip_vpu2_vdpu_irq },
+};
+
+static const char * const rk3066_vpu_clk_names[] = {
+ "aclk_vdpu", "hclk_vdpu",
+ "aclk_vepu", "hclk_vepu"
+};
+
+static const char * const rockchip_vpu_clk_names[] = {
+ "aclk", "hclk"
+};
+
+const struct hantro_variant rk3036_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rk3066_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_regs = &hantro_g1_postproc_regs,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = rk3036_vpu_codec_ops,
+ .irqs = rockchip_vdpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vdpu1_irqs),
+ .init = rk3036_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+/*
+ * Despite this variant has separate clocks for decoder and encoder,
+ * it's still required to enable all four of them for either decoding
+ * or encoding and we can't split it in separate g1/h1 variants.
+ */
+const struct hantro_variant rk3066_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3066_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_regs = &hantro_g1_postproc_regs,
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3066_vpu_codec_ops,
+ .irqs = rockchip_vpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu1_irqs),
+ .init = rk3066_vpu_hw_init,
+ .clk_names = rk3066_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3066_vpu_clk_names)
+};
+
+const struct hantro_variant rk3288_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3288_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_regs = &hantro_g1_postproc_regs,
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3288_vpu_codec_ops,
+ .irqs = rockchip_vpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu1_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+const struct hantro_variant rk3328_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rk3399_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vdpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vdpu2_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names),
+};
+
+const struct hantro_variant rk3399_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3399_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu2_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
diff --git a/drivers/staging/media/hantro/sama5d4_vdec_hw.c b/drivers/staging/media/hantro/sama5d4_vdec_hw.c
new file mode 100644
index 000000000000..58ae72c2b723
--- /dev/null
+++ b/drivers/staging/media/hantro/sama5d4_vdec_hw.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VDEC driver
+ *
+ * Copyright (C) 2021 Collabora Ltd, Emil Velikov <emil.velikov@collabora.com>
+ */
+
+#include "hantro.h"
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt sama5d4_vdec_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+};
+
+static const struct hantro_fmt sama5d4_vdec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1280,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 720,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1280,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 720,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1280,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 720,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static int sama5d4_hw_init(struct hantro_dev *vpu)
+{
+ return 0;
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops sama5d4_vdec_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+static const struct hantro_irq sama5d4_irqs[] = {
+ { "vdec", hantro_g1_irq },
+};
+
+static const char * const sama5d4_clk_names[] = { "vdec_clk" };
+
+const struct hantro_variant sama5d4_vdec_variant = {
+ .dec_fmts = sama5d4_vdec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(sama5d4_vdec_fmts),
+ .postproc_fmts = sama5d4_vdec_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(sama5d4_vdec_postproc_fmts),
+ .postproc_regs = &hantro_g1_postproc_regs,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = sama5d4_vdec_codec_ops,
+ .init = sama5d4_hw_init,
+ .irqs = sama5d4_irqs,
+ .num_irqs = ARRAY_SIZE(sama5d4_irqs),
+ .clk_names = sama5d4_clk_names,
+ .num_clocks = ARRAY_SIZE(sama5d4_clk_names),
+};
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index f21ed881295f..ac5fb332088e 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -79,13 +79,13 @@ static void prp_stop(struct prp_priv *priv)
}
static struct v4l2_mbus_framefmt *
-__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ic_priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
else
return &priv->format_mbus;
}
@@ -95,7 +95,7 @@ __prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_pad_config *cfg,
*/
static int prp_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -115,7 +115,8 @@ static int prp_enum_mbus_code(struct v4l2_subdev *sd,
ret = -EINVAL;
goto out;
}
- infmt = __prp_get_fmt(priv, cfg, PRP_SINK_PAD, code->which);
+ infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD,
+ code->which);
code->code = infmt->code;
break;
default:
@@ -127,7 +128,7 @@ out:
}
static int prp_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -139,7 +140,7 @@ static int prp_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
@@ -152,7 +153,7 @@ out:
}
static int prp_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -171,7 +172,7 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- infmt = __prp_get_fmt(priv, cfg, PRP_SINK_PAD, sdformat->which);
+ infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD, sdformat->which);
switch (sdformat->pad) {
case PRP_SINK_PAD:
@@ -201,7 +202,7 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
imx_media_try_colorimetry(&sdformat->format, true);
- fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
out:
mutex_unlock(&priv->lock);
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index d990553de87b..9b81cfbcd777 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -787,13 +787,13 @@ static void prp_stop(struct prp_priv *priv)
}
static struct v4l2_mbus_framefmt *
-__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ic_priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
@@ -841,7 +841,7 @@ static bool prp_bound_align_output(struct v4l2_mbus_framefmt *outfmt,
*/
static int prp_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad >= PRPENCVF_NUM_PADS)
@@ -852,7 +852,7 @@ static int prp_enum_mbus_code(struct v4l2_subdev *sd,
}
static int prp_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -864,7 +864,7 @@ static int prp_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
@@ -877,7 +877,7 @@ out:
}
static void prp_try_fmt(struct prp_priv *priv,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
const struct imx_media_pixfmt **cc)
{
@@ -894,7 +894,8 @@ static void prp_try_fmt(struct prp_priv *priv,
sdformat->format.code = (*cc)->codes[0];
}
- infmt = __prp_get_fmt(priv, cfg, PRPENCVF_SINK_PAD, sdformat->which);
+ infmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SINK_PAD,
+ sdformat->which);
if (sdformat->pad == PRPENCVF_SRC_PAD) {
sdformat->format.field = infmt->field;
@@ -920,7 +921,7 @@ static void prp_try_fmt(struct prp_priv *priv,
}
static int prp_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -938,9 +939,9 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- prp_try_fmt(priv, cfg, sdformat, &cc);
+ prp_try_fmt(priv, sd_state, sdformat, &cc);
- fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
/* propagate a default format to source pad */
@@ -952,9 +953,9 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
format.pad = PRPENCVF_SRC_PAD;
format.which = sdformat->which;
format.format = sdformat->format;
- prp_try_fmt(priv, cfg, &format, &outcc);
+ prp_try_fmt(priv, sd_state, &format, &outcc);
- outfmt = __prp_get_fmt(priv, cfg, PRPENCVF_SRC_PAD,
+ outfmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SRC_PAD,
sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -970,7 +971,7 @@ out:
}
static int prp_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -988,7 +989,7 @@ static int prp_enum_frame_size(struct v4l2_subdev *sd,
format.format.code = fse->code;
format.format.width = 1;
format.format.height = 1;
- prp_try_fmt(priv, cfg, &format, &cc);
+ prp_try_fmt(priv, sd_state, &format, &cc);
fse->min_width = format.format.width;
fse->min_height = format.format.height;
@@ -1000,7 +1001,7 @@ static int prp_enum_frame_size(struct v4l2_subdev *sd,
format.format.code = fse->code;
format.format.width = -1;
format.format.height = -1;
- prp_try_fmt(priv, cfg, &format, &cc);
+ prp_try_fmt(priv, sd_state, &format, &cc);
fse->max_width = format.format.width;
fse->max_height = format.format.height;
out:
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index e3bfd635a89a..bb1305c9daaf 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -750,9 +750,10 @@ static int csi_setup(struct csi_priv *priv)
static int csi_start(struct csi_priv *priv)
{
- struct v4l2_fract *output_fi;
+ struct v4l2_fract *input_fi, *output_fi;
int ret;
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
output_fi = &priv->frame_interval[priv->active_output_pad];
/* start upstream */
@@ -761,6 +762,17 @@ static int csi_start(struct csi_priv *priv)
if (ret)
return ret;
+ /* Skip first few frames from a BT.656 source */
+ if (priv->upstream_ep.bus_type == V4L2_MBUS_BT656) {
+ u32 delay_usec, bad_frames = 20;
+
+ delay_usec = DIV_ROUND_UP_ULL((u64)USEC_PER_SEC *
+ input_fi->numerator * bad_frames,
+ input_fi->denominator);
+
+ usleep_range(delay_usec, delay_usec + 1000);
+ }
+
if (priv->dest == IPU_CSI_DEST_IDMAC) {
ret = csi_idmac_start(priv);
if (ret)
@@ -1139,31 +1151,32 @@ static int csi_link_validate(struct v4l2_subdev *sd,
}
static struct v4l2_mbus_framefmt *
-__csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
static struct v4l2_rect *
-__csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&priv->sd, cfg, CSI_SINK_PAD);
+ return v4l2_subdev_get_try_crop(&priv->sd, sd_state,
+ CSI_SINK_PAD);
else
return &priv->crop;
}
static struct v4l2_rect *
-__csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_compose(&priv->sd, cfg,
+ return v4l2_subdev_get_try_compose(&priv->sd, sd_state,
CSI_SINK_PAD);
else
return &priv->compose;
@@ -1171,7 +1184,7 @@ __csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
static void csi_try_crop(struct csi_priv *priv,
struct v4l2_rect *crop,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *infmt,
struct v4l2_fwnode_endpoint *upstream_ep)
{
@@ -1210,7 +1223,7 @@ static void csi_try_crop(struct csi_priv *priv,
}
static int csi_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1221,7 +1234,7 @@ static int csi_enum_mbus_code(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, code->which);
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, code->which);
incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
switch (code->pad) {
@@ -1263,7 +1276,7 @@ out:
}
static int csi_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1282,7 +1295,7 @@ static int csi_enum_frame_size(struct v4l2_subdev *sd,
fse->min_height = MIN_H;
fse->max_height = MAX_H;
} else {
- crop = __csi_get_crop(priv, cfg, fse->which);
+ crop = __csi_get_crop(priv, sd_state, fse->which);
fse->min_width = fse->index & 1 ?
crop->width / 2 : crop->width;
@@ -1297,7 +1310,7 @@ static int csi_enum_frame_size(struct v4l2_subdev *sd,
}
static int csi_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1313,7 +1326,7 @@ static int csi_enum_frame_interval(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
input_fi = &priv->frame_interval[CSI_SINK_PAD];
- crop = __csi_get_crop(priv, cfg, fie->which);
+ crop = __csi_get_crop(priv, sd_state, fie->which);
if ((fie->width != crop->width && fie->width != crop->width / 2) ||
(fie->height != crop->height && fie->height != crop->height / 2)) {
@@ -1333,7 +1346,7 @@ out:
}
static int csi_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1345,7 +1358,7 @@ static int csi_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- fmt = __csi_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
@@ -1358,11 +1371,11 @@ out:
}
static void csi_try_field(struct csi_priv *priv,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct v4l2_mbus_framefmt *infmt =
- __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sdformat->which);
+ __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
/*
* no restrictions on sink pad field type except must
@@ -1408,7 +1421,7 @@ static void csi_try_field(struct csi_priv *priv,
static void csi_try_fmt(struct csi_priv *priv,
struct v4l2_fwnode_endpoint *upstream_ep,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
struct v4l2_rect *crop,
struct v4l2_rect *compose,
@@ -1418,7 +1431,7 @@ static void csi_try_fmt(struct csi_priv *priv,
struct v4l2_mbus_framefmt *infmt;
u32 code;
- infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sdformat->which);
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
switch (sdformat->pad) {
case CSI_SRC_PAD_DIRECT:
@@ -1445,7 +1458,7 @@ static void csi_try_fmt(struct csi_priv *priv,
}
}
- csi_try_field(priv, cfg, sdformat);
+ csi_try_field(priv, sd_state, sdformat);
/* propagate colorimetry from sink */
sdformat->format.colorspace = infmt->colorspace;
@@ -1469,7 +1482,7 @@ static void csi_try_fmt(struct csi_priv *priv,
sdformat->format.code = (*cc)->codes[0];
}
- csi_try_field(priv, cfg, sdformat);
+ csi_try_field(priv, sd_state, sdformat);
/* Reset crop and compose rectangles */
crop->left = 0;
@@ -1478,7 +1491,8 @@ static void csi_try_fmt(struct csi_priv *priv,
crop->height = sdformat->format.height;
if (sdformat->format.field == V4L2_FIELD_ALTERNATE)
crop->height *= 2;
- csi_try_crop(priv, crop, cfg, &sdformat->format, upstream_ep);
+ csi_try_crop(priv, crop, sd_state, &sdformat->format,
+ upstream_ep);
compose->left = 0;
compose->top = 0;
compose->width = crop->width;
@@ -1492,7 +1506,7 @@ static void csi_try_fmt(struct csi_priv *priv,
}
static int csi_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1518,12 +1532,13 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- crop = __csi_get_crop(priv, cfg, sdformat->which);
- compose = __csi_get_compose(priv, cfg, sdformat->which);
+ crop = __csi_get_crop(priv, sd_state, sdformat->which);
+ compose = __csi_get_compose(priv, sd_state, sdformat->which);
- csi_try_fmt(priv, &upstream_ep, cfg, sdformat, crop, compose, &cc);
+ csi_try_fmt(priv, &upstream_ep, sd_state, sdformat, crop, compose,
+ &cc);
- fmt = __csi_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
if (sdformat->pad == CSI_SINK_PAD) {
@@ -1538,10 +1553,11 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
format.pad = pad;
format.which = sdformat->which;
format.format = sdformat->format;
- csi_try_fmt(priv, &upstream_ep, cfg, &format,
+ csi_try_fmt(priv, &upstream_ep, sd_state, &format,
NULL, compose, &outcc);
- outfmt = __csi_get_fmt(priv, cfg, pad, sdformat->which);
+ outfmt = __csi_get_fmt(priv, sd_state, pad,
+ sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -1558,7 +1574,7 @@ out:
}
static int csi_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1571,9 +1587,9 @@ static int csi_get_selection(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sel->which);
- crop = __csi_get_crop(priv, cfg, sel->which);
- compose = __csi_get_compose(priv, cfg, sel->which);
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, sd_state, sel->which);
+ compose = __csi_get_compose(priv, sd_state, sel->which);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
@@ -1622,7 +1638,7 @@ static int csi_set_scale(u32 *compose, u32 crop, u32 flags)
}
static int csi_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1647,9 +1663,9 @@ static int csi_set_selection(struct v4l2_subdev *sd,
goto out;
}
- infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sel->which);
- crop = __csi_get_crop(priv, cfg, sel->which);
- compose = __csi_get_compose(priv, cfg, sel->which);
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, sd_state, sel->which);
+ compose = __csi_get_compose(priv, sd_state, sel->which);
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
@@ -1665,7 +1681,7 @@ static int csi_set_selection(struct v4l2_subdev *sd,
goto out;
}
- csi_try_crop(priv, &sel->r, cfg, infmt, &upstream_ep);
+ csi_try_crop(priv, &sel->r, sd_state, infmt, &upstream_ep);
*crop = sel->r;
@@ -1706,7 +1722,7 @@ static int csi_set_selection(struct v4l2_subdev *sd,
for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
struct v4l2_mbus_framefmt *outfmt;
- outfmt = __csi_get_fmt(priv, cfg, pad, sel->which);
+ outfmt = __csi_get_fmt(priv, sd_state, pad, sel->which);
outfmt->width = compose->width;
outfmt->height = compose->height;
}
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 5128915a5d6f..6f90acf9c725 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -429,7 +429,7 @@ EXPORT_SYMBOL_GPL(imx_media_init_mbus_fmt);
* of a subdev. Can be used as the .init_cfg pad operation.
*/
int imx_media_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mf_try;
struct v4l2_subdev_format format;
@@ -445,7 +445,7 @@ int imx_media_init_cfg(struct v4l2_subdev *sd,
if (ret)
continue;
- mf_try = v4l2_subdev_get_try_format(sd, cfg, pad);
+ mf_try = v4l2_subdev_get_try_format(sd, sd_state, pad);
*mf_try = format.format;
}
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index abf290bda98d..3c2093c520ba 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -532,17 +532,17 @@ out:
}
static struct v4l2_mbus_framefmt *
-__vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad >= VDIC_NUM_PADS)
@@ -553,7 +553,7 @@ static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vdic_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
@@ -565,7 +565,7 @@ static int vdic_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- fmt = __vdic_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
@@ -578,7 +578,7 @@ out:
}
static void vdic_try_fmt(struct vdic_priv *priv,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
const struct imx_media_pixfmt **cc)
{
@@ -594,7 +594,7 @@ static void vdic_try_fmt(struct vdic_priv *priv,
sdformat->format.code = (*cc)->codes[0];
}
- infmt = __vdic_get_fmt(priv, cfg, priv->active_input_pad,
+ infmt = __vdic_get_fmt(priv, sd_state, priv->active_input_pad,
sdformat->which);
switch (sdformat->pad) {
@@ -620,7 +620,7 @@ static void vdic_try_fmt(struct vdic_priv *priv,
}
static int vdic_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
@@ -638,9 +638,9 @@ static int vdic_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- vdic_try_fmt(priv, cfg, sdformat, &cc);
+ vdic_try_fmt(priv, sd_state, sdformat, &cc);
- fmt = __vdic_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
/* propagate format to source pad */
@@ -653,9 +653,9 @@ static int vdic_set_fmt(struct v4l2_subdev *sd,
format.pad = VDIC_SRC_PAD_DIRECT;
format.which = sdformat->which;
format.format = sdformat->format;
- vdic_try_fmt(priv, cfg, &format, &outcc);
+ vdic_try_fmt(priv, sd_state, &format, &outcc);
- outfmt = __vdic_get_fmt(priv, cfg, VDIC_SRC_PAD_DIRECT,
+ outfmt = __vdic_get_fmt(priv, sd_state, VDIC_SRC_PAD_DIRECT,
sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index 492d9a64e704..6740e7917458 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -193,7 +193,7 @@ int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
u32 width, u32 height, u32 code, u32 field,
const struct imx_media_pixfmt **cc);
int imx_media_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg);
+ struct v4l2_subdev_state *sd_state);
void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
bool ic_route);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index fc2378ac04b7..9de0ebd439dc 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -508,17 +508,17 @@ out:
}
static struct v4l2_mbus_framefmt *
-__csi2_get_fmt(struct csi2_dev *csi2, struct v4l2_subdev_pad_config *cfg,
+__csi2_get_fmt(struct csi2_dev *csi2, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi2->sd, sd_state, pad);
else
return &csi2->format_mbus;
}
static int csi2_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
@@ -526,7 +526,7 @@ static int csi2_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&csi2->lock);
- fmt = __csi2_get_fmt(csi2, cfg, sdformat->pad, sdformat->which);
+ fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
sdformat->format = *fmt;
@@ -536,7 +536,7 @@ static int csi2_get_fmt(struct v4l2_subdev *sd,
}
static int csi2_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
@@ -557,7 +557,7 @@ static int csi2_set_fmt(struct v4l2_subdev *sd,
if (sdformat->pad != CSI2_SINK_PAD)
sdformat->format = csi2->format_mbus;
- fmt = __csi2_get_fmt(csi2, cfg, sdformat->pad, sdformat->which);
+ fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
out:
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index f85a2f5f1413..894c4de31790 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -724,7 +724,7 @@ out_unlock:
}
static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
@@ -732,7 +732,7 @@ static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
int i;
for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
- mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, i);
ret = imx_media_init_mbus_fmt(mf, 800, 600, 0, V4L2_FIELD_NONE,
&csi->cc[i]);
@@ -745,18 +745,18 @@ static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
static struct v4l2_mbus_framefmt *
imx7_csi_get_format(struct imx7_csi *csi,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
return &csi->format_mbus[pad];
}
static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
@@ -765,7 +765,8 @@ static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
mutex_lock(&csi->lock);
- in_fmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SINK, code->which);
+ in_fmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SINK,
+ code->which);
switch (code->pad) {
case IMX7_CSI_PAD_SINK:
@@ -791,7 +792,7 @@ out_unlock:
}
static int imx7_csi_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
@@ -800,7 +801,8 @@ static int imx7_csi_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&csi->lock);
- fmt = imx7_csi_get_format(csi, cfg, sdformat->pad, sdformat->which);
+ fmt = imx7_csi_get_format(csi, sd_state, sdformat->pad,
+ sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out_unlock;
@@ -815,7 +817,7 @@ out_unlock:
}
static int imx7_csi_try_fmt(struct imx7_csi *csi,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
const struct imx_media_pixfmt **cc)
{
@@ -823,7 +825,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
struct v4l2_mbus_framefmt *in_fmt;
u32 code;
- in_fmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SINK,
+ in_fmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SINK,
sdformat->which);
if (!in_fmt)
return -EINVAL;
@@ -868,7 +870,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
}
static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
@@ -889,11 +891,12 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
goto out_unlock;
}
- ret = imx7_csi_try_fmt(csi, cfg, sdformat, &cc);
+ ret = imx7_csi_try_fmt(csi, sd_state, sdformat, &cc);
if (ret < 0)
goto out_unlock;
- fmt = imx7_csi_get_format(csi, cfg, sdformat->pad, sdformat->which);
+ fmt = imx7_csi_get_format(csi, sd_state, sdformat->pad,
+ sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out_unlock;
@@ -906,11 +909,11 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
format.pad = IMX7_CSI_PAD_SRC;
format.which = sdformat->which;
format.format = sdformat->format;
- if (imx7_csi_try_fmt(csi, cfg, &format, &outcc)) {
+ if (imx7_csi_try_fmt(csi, sd_state, &format, &outcc)) {
ret = -EINVAL;
goto out_unlock;
}
- outfmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SRC,
+ outfmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SRC,
sdformat->which);
*outfmt = format.format;
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 025fdc488bd6..ead696eb4610 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -166,15 +167,11 @@
#define MIPI_CSIS_ISP_CONFIG_CH(n) (0x40 + (n) * 0x10)
#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP_MSK (0xff << 24)
#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP(x) ((x) << 24)
-#define MIPI_CSIS_ISPCFG_DOUBLE_CMPNT BIT(12)
+#define MIPI_CSIS_ISPCFG_PIXEL_MODE_SINGLE (0 << 12)
+#define MIPI_CSIS_ISPCFG_PIXEL_MODE_DUAL (1 << 12)
+#define MIPI_CSIS_ISPCFG_PIXEL_MODE_QUAD (2 << 12) /* i.MX8M[MNP] only */
#define MIPI_CSIS_ISPCFG_ALIGN_32BIT BIT(11)
-#define MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT (0x1e << 2)
-#define MIPI_CSIS_ISPCFG_FMT_RAW8 (0x2a << 2)
-#define MIPI_CSIS_ISPCFG_FMT_RAW10 (0x2b << 2)
-#define MIPI_CSIS_ISPCFG_FMT_RAW12 (0x2c << 2)
-#define MIPI_CSIS_ISPCFG_FMT_RAW14 (0x2d << 2)
-/* User defined formats, x = 1...4 */
-#define MIPI_CSIS_ISPCFG_FMT_USER(x) ((0x30 + (x) - 1) << 2)
+#define MIPI_CSIS_ISPCFG_FMT(fmt) ((fmt) << 2)
#define MIPI_CSIS_ISPCFG_FMT_MASK (0x3f << 2)
/* ISP Image Resolution register */
@@ -195,6 +192,24 @@
/* Debug control register */
#define MIPI_CSIS_DBG_CTRL 0xc0
+#define MIPI_CSIS_DBG_INTR_MSK 0xc4
+#define MIPI_CSIS_DBG_INTR_MSK_DT_NOT_SUPPORT BIT(25)
+#define MIPI_CSIS_DBG_INTR_MSK_DT_IGNORE BIT(24)
+#define MIPI_CSIS_DBG_INTR_MSK_ERR_FRAME_SIZE BIT(20)
+#define MIPI_CSIS_DBG_INTR_MSK_TRUNCATED_FRAME BIT(16)
+#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FE BIT(12)
+#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FS BIT(8)
+#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_FALL BIT(4)
+#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_RISE BIT(0)
+#define MIPI_CSIS_DBG_INTR_SRC 0xc8
+#define MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT BIT(25)
+#define MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE BIT(24)
+#define MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE BIT(20)
+#define MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME BIT(16)
+#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FE BIT(12)
+#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FS BIT(8)
+#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL BIT(4)
+#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE BIT(0)
/* Non-image packet data buffers */
#define MIPI_CSIS_PKTDATA_ODD 0x2000
@@ -203,6 +218,25 @@
#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
+/* MIPI CSI-2 Data Types */
+#define MIPI_CSI2_DATA_TYPE_YUV420_8 0x18
+#define MIPI_CSI2_DATA_TYPE_YUV420_10 0x19
+#define MIPI_CSI2_DATA_TYPE_LE_YUV420_8 0x1a
+#define MIPI_CSI2_DATA_TYPE_CS_YUV420_8 0x1c
+#define MIPI_CSI2_DATA_TYPE_CS_YUV420_10 0x1d
+#define MIPI_CSI2_DATA_TYPE_YUV422_8 0x1e
+#define MIPI_CSI2_DATA_TYPE_YUV422_10 0x1f
+#define MIPI_CSI2_DATA_TYPE_RGB565 0x22
+#define MIPI_CSI2_DATA_TYPE_RGB666 0x23
+#define MIPI_CSI2_DATA_TYPE_RGB888 0x24
+#define MIPI_CSI2_DATA_TYPE_RAW6 0x28
+#define MIPI_CSI2_DATA_TYPE_RAW7 0x29
+#define MIPI_CSI2_DATA_TYPE_RAW8 0x2a
+#define MIPI_CSI2_DATA_TYPE_RAW10 0x2b
+#define MIPI_CSI2_DATA_TYPE_RAW12 0x2c
+#define MIPI_CSI2_DATA_TYPE_RAW14 0x2d
+#define MIPI_CSI2_DATA_TYPE_USER(x) (0x30 + (x))
+
enum {
ST_POWERED = 1,
ST_STREAMING = 2,
@@ -210,6 +244,7 @@ enum {
};
struct mipi_csis_event {
+ bool debug;
u32 mask;
const char * const name;
unsigned int counter;
@@ -217,22 +252,30 @@ struct mipi_csis_event {
static const struct mipi_csis_event mipi_csis_events[] = {
/* Errors */
- { MIPI_CSIS_INT_SRC_ERR_SOT_HS, "SOT Error" },
- { MIPI_CSIS_INT_SRC_ERR_LOST_FS, "Lost Frame Start Error" },
- { MIPI_CSIS_INT_SRC_ERR_LOST_FE, "Lost Frame End Error" },
- { MIPI_CSIS_INT_SRC_ERR_OVER, "FIFO Overflow Error" },
- { MIPI_CSIS_INT_SRC_ERR_WRONG_CFG, "Wrong Configuration Error" },
- { MIPI_CSIS_INT_SRC_ERR_ECC, "ECC Error" },
- { MIPI_CSIS_INT_SRC_ERR_CRC, "CRC Error" },
- { MIPI_CSIS_INT_SRC_ERR_UNKNOWN, "Unknown Error" },
+ { false, MIPI_CSIS_INT_SRC_ERR_SOT_HS, "SOT Error" },
+ { false, MIPI_CSIS_INT_SRC_ERR_LOST_FS, "Lost Frame Start Error" },
+ { false, MIPI_CSIS_INT_SRC_ERR_LOST_FE, "Lost Frame End Error" },
+ { false, MIPI_CSIS_INT_SRC_ERR_OVER, "FIFO Overflow Error" },
+ { false, MIPI_CSIS_INT_SRC_ERR_WRONG_CFG, "Wrong Configuration Error" },
+ { false, MIPI_CSIS_INT_SRC_ERR_ECC, "ECC Error" },
+ { false, MIPI_CSIS_INT_SRC_ERR_CRC, "CRC Error" },
+ { false, MIPI_CSIS_INT_SRC_ERR_UNKNOWN, "Unknown Error" },
+ { true, MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT, "Data Type Not Supported" },
+ { true, MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE, "Data Type Ignored" },
+ { true, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE, "Frame Size Error" },
+ { true, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME, "Truncated Frame" },
+ { true, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE, "Early Frame End" },
+ { true, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS, "Early Frame Start" },
/* Non-image data receive events */
- { MIPI_CSIS_INT_SRC_EVEN_BEFORE, "Non-image data before even frame" },
- { MIPI_CSIS_INT_SRC_EVEN_AFTER, "Non-image data after even frame" },
- { MIPI_CSIS_INT_SRC_ODD_BEFORE, "Non-image data before odd frame" },
- { MIPI_CSIS_INT_SRC_ODD_AFTER, "Non-image data after odd frame" },
+ { false, MIPI_CSIS_INT_SRC_EVEN_BEFORE, "Non-image data before even frame" },
+ { false, MIPI_CSIS_INT_SRC_EVEN_AFTER, "Non-image data after even frame" },
+ { false, MIPI_CSIS_INT_SRC_ODD_BEFORE, "Non-image data before odd frame" },
+ { false, MIPI_CSIS_INT_SRC_ODD_AFTER, "Non-image data after odd frame" },
/* Frame start/end */
- { MIPI_CSIS_INT_SRC_FRAME_START, "Frame Start" },
- { MIPI_CSIS_INT_SRC_FRAME_END, "Frame End" },
+ { false, MIPI_CSIS_INT_SRC_FRAME_START, "Frame Start" },
+ { false, MIPI_CSIS_INT_SRC_FRAME_END, "Frame End" },
+ { true, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL, "VSYNC Falling Edge" },
+ { true, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE, "VSYNC Rising Edge" },
};
#define MIPI_CSIS_NUM_EVENTS ARRAY_SIZE(mipi_csis_events)
@@ -241,63 +284,63 @@ enum mipi_csis_clk {
MIPI_CSIS_CLK_PCLK,
MIPI_CSIS_CLK_WRAP,
MIPI_CSIS_CLK_PHY,
+ MIPI_CSIS_CLK_AXI,
};
static const char * const mipi_csis_clk_id[] = {
"pclk",
"wrap",
"phy",
+ "axi",
};
-struct csis_hw_reset {
- struct regmap *src;
- u8 req_src;
- u8 rst_bit;
+enum mipi_csis_version {
+ MIPI_CSIS_V3_3,
+ MIPI_CSIS_V3_6_3,
+};
+
+struct mipi_csis_info {
+ enum mipi_csis_version version;
+ unsigned int num_clocks;
};
struct csi_state {
- /* lock elements below */
- struct mutex lock;
- /* lock for event handler */
- spinlock_t slock;
struct device *dev;
+ void __iomem *regs;
+ struct clk_bulk_data *clks;
+ struct reset_control *mrst;
+ struct regulator *mipi_phy_regulator;
+ const struct mipi_csis_info *info;
+ u8 index;
+
+ struct v4l2_subdev sd;
struct media_pad pads[CSIS_PADS_NUM];
- struct v4l2_subdev mipi_sd;
struct v4l2_async_notifier notifier;
struct v4l2_subdev *src_sd;
- u8 index;
- struct platform_device *pdev;
- struct phy *phy;
- void __iomem *regs;
- int irq;
- u32 flags;
-
- struct dentry *debugfs_root;
- bool debug;
-
- int num_clks;
- struct clk_bulk_data *clks;
-
+ struct v4l2_fwnode_bus_mipi_csi2 bus;
u32 clk_frequency;
u32 hs_settle;
+ u32 clk_settle;
- struct reset_control *mrst;
-
+ struct mutex lock; /* Protect csis_fmt, format_mbus and state */
const struct csis_pix_format *csis_fmt;
struct v4l2_mbus_framefmt format_mbus;
+ u32 state;
- struct v4l2_fwnode_bus_mipi_csi2 bus;
-
+ spinlock_t slock; /* Protect events */
struct mipi_csis_event events[MIPI_CSIS_NUM_EVENTS];
-
- struct csis_hw_reset hw_reset;
- struct regulator *mipi_phy_regulator;
+ struct dentry *debugfs_root;
+ bool debug;
};
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
struct csis_pix_format {
u32 code;
- u32 fmt_reg;
+ u32 data_type;
u8 width;
};
@@ -305,156 +348,117 @@ static const struct csis_pix_format mipi_csis_formats[] = {
/* YUV formats. */
{
.code = MEDIA_BUS_FMT_UYVY8_1X16,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
+ .data_type = MIPI_CSI2_DATA_TYPE_YUV422_8,
.width = 16,
},
/* RAW (Bayer and greyscale) formats. */
{
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_Y8_1X8,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW8,
.width = 8,
}, {
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_Y10_1X10,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW10,
.width = 10,
}, {
.code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_Y12_1X12,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW12,
.width = 12,
}, {
.code = MEDIA_BUS_FMT_SBGGR14_1X14,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW14,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SGBRG14_1X14,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW14,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SGRBG14_1X14,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW14,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW14,
.width = 14,
}, {
.code = MEDIA_BUS_FMT_SRGGB14_1X14,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW14,
+ .data_type = MIPI_CSI2_DATA_TYPE_RAW14,
.width = 14,
}
};
-static inline void mipi_csis_write(struct csi_state *state, u32 reg, u32 val)
-{
- writel(val, state->regs + reg);
-}
-
-static inline u32 mipi_csis_read(struct csi_state *state, u32 reg)
-{
- return readl(state->regs + reg);
-}
-
-static int mipi_csis_dump_regs(struct csi_state *state)
+static const struct csis_pix_format *find_csis_format(u32 code)
{
- struct device *dev = &state->pdev->dev;
unsigned int i;
- u32 cfg;
- static const struct {
- u32 offset;
- const char * const name;
- } registers[] = {
- { MIPI_CSIS_CMN_CTRL, "CMN_CTRL" },
- { MIPI_CSIS_CLK_CTRL, "CLK_CTRL" },
- { MIPI_CSIS_INT_MSK, "INT_MSK" },
- { MIPI_CSIS_DPHY_STATUS, "DPHY_STATUS" },
- { MIPI_CSIS_DPHY_CMN_CTRL, "DPHY_CMN_CTRL" },
- { MIPI_CSIS_DPHY_SCTRL_L, "DPHY_SCTRL_L" },
- { MIPI_CSIS_DPHY_SCTRL_H, "DPHY_SCTRL_H" },
- { MIPI_CSIS_ISP_CONFIG_CH(0), "ISP_CONFIG_CH0" },
- { MIPI_CSIS_ISP_RESOL_CH(0), "ISP_RESOL_CH0" },
- { MIPI_CSIS_SDW_CONFIG_CH(0), "SDW_CONFIG_CH0" },
- { MIPI_CSIS_SDW_RESOL_CH(0), "SDW_RESOL_CH0" },
- { MIPI_CSIS_DBG_CTRL, "DBG_CTRL" },
- };
- dev_info(dev, "--- REGISTERS ---\n");
-
- for (i = 0; i < ARRAY_SIZE(registers); i++) {
- cfg = mipi_csis_read(state, registers[i].offset);
- dev_info(dev, "%14s: 0x%08x\n", registers[i].name, cfg);
- }
-
- return 0;
+ for (i = 0; i < ARRAY_SIZE(mipi_csis_formats); i++)
+ if (code == mipi_csis_formats[i].code)
+ return &mipi_csis_formats[i];
+ return NULL;
}
-static struct csi_state *
-mipi_notifier_to_csis_state(struct v4l2_async_notifier *n)
-{
- return container_of(n, struct csi_state, notifier);
-}
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
-static struct csi_state *mipi_sd_to_csis_state(struct v4l2_subdev *sdev)
+static inline u32 mipi_csis_read(struct csi_state *state, u32 reg)
{
- return container_of(sdev, struct csi_state, mipi_sd);
+ return readl(state->regs + reg);
}
-static const struct csis_pix_format *find_csis_format(u32 code)
+static inline void mipi_csis_write(struct csi_state *state, u32 reg, u32 val)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(mipi_csis_formats); i++)
- if (code == mipi_csis_formats[i].code)
- return &mipi_csis_formats[i];
- return NULL;
+ writel(val, state->regs + reg);
}
static void mipi_csis_enable_interrupts(struct csi_state *state, bool on)
{
mipi_csis_write(state, MIPI_CSIS_INT_MSK, on ? 0xffffffff : 0);
+ mipi_csis_write(state, MIPI_CSIS_DBG_INTR_MSK, on ? 0xffffffff : 0);
}
static void mipi_csis_sw_reset(struct csi_state *state)
@@ -466,25 +470,6 @@ static void mipi_csis_sw_reset(struct csi_state *state)
usleep_range(10, 20);
}
-static int mipi_csis_phy_init(struct csi_state *state)
-{
- state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy");
- if (IS_ERR(state->mipi_phy_regulator))
- return PTR_ERR(state->mipi_phy_regulator);
-
- return regulator_set_voltage(state->mipi_phy_regulator, 1000000,
- 1000000);
-}
-
-static void mipi_csis_phy_reset(struct csi_state *state)
-{
- reset_control_assert(state->mrst);
-
- msleep(20);
-
- reset_control_deassert(state->mrst);
-}
-
static void mipi_csis_system_enable(struct csi_state *state, int on)
{
u32 val, mask;
@@ -514,7 +499,7 @@ static void __mipi_csis_set_format(struct csi_state *state)
/* Color format */
val = mipi_csis_read(state, MIPI_CSIS_ISP_CONFIG_CH(0));
val &= ~(MIPI_CSIS_ISPCFG_ALIGN_32BIT | MIPI_CSIS_ISPCFG_FMT_MASK);
- val |= state->csis_fmt->fmt_reg;
+ val |= MIPI_CSIS_ISPCFG_FMT(state->csis_fmt->data_type);
mipi_csis_write(state, MIPI_CSIS_ISP_CONFIG_CH(0), val);
/* Pixel resolution */
@@ -546,11 +531,15 @@ static int mipi_csis_calculate_params(struct csi_state *state)
/*
* The HSSETTLE counter value is document in a table, but can also
- * easily be calculated.
+ * easily be calculated. Hardcode the CLKSETTLE value to 0 for now
+ * (which is documented as corresponding to CSI-2 v0.87 to v1.00) until
+ * we figure out how to compute it correctly.
*/
state->hs_settle = (lane_rate - 5000000) / 45000000;
- dev_dbg(state->dev, "lane rate %u, Ths_settle %u\n",
- lane_rate, state->hs_settle);
+ state->clk_settle = 0;
+
+ dev_dbg(state->dev, "lane rate %u, Tclk_settle %u, Ths_settle %u\n",
+ lane_rate, state->clk_settle, state->hs_settle);
return 0;
}
@@ -563,13 +552,15 @@ static void mipi_csis_set_params(struct csi_state *state)
val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
val &= ~MIPI_CSIS_CMN_CTRL_LANE_NR_MASK;
val |= (lanes - 1) << MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET;
- val |= MIPI_CSIS_CMN_CTRL_INTER_MODE;
+ if (state->info->version == MIPI_CSIS_V3_3)
+ val |= MIPI_CSIS_CMN_CTRL_INTER_MODE;
mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val);
__mipi_csis_set_format(state);
mipi_csis_write(state, MIPI_CSIS_DPHY_CMN_CTRL,
- MIPI_CSIS_DPHY_CMN_CTRL_HSSETTLE(state->hs_settle));
+ MIPI_CSIS_DPHY_CMN_CTRL_HSSETTLE(state->hs_settle) |
+ MIPI_CSIS_DPHY_CMN_CTRL_CLKSETTLE(state->clk_settle));
val = (0 << MIPI_CSIS_ISP_SYNC_HSYNC_LINTV_OFFSET)
| (0 << MIPI_CSIS_ISP_SYNC_VSYNC_SINTV_OFFSET)
@@ -601,31 +592,30 @@ static void mipi_csis_set_params(struct csi_state *state)
static int mipi_csis_clk_enable(struct csi_state *state)
{
- return clk_bulk_prepare_enable(state->num_clks, state->clks);
+ return clk_bulk_prepare_enable(state->info->num_clocks, state->clks);
}
static void mipi_csis_clk_disable(struct csi_state *state)
{
- clk_bulk_disable_unprepare(state->num_clks, state->clks);
+ clk_bulk_disable_unprepare(state->info->num_clocks, state->clks);
}
static int mipi_csis_clk_get(struct csi_state *state)
{
- struct device *dev = &state->pdev->dev;
unsigned int i;
int ret;
- state->num_clks = ARRAY_SIZE(mipi_csis_clk_id);
- state->clks = devm_kcalloc(dev, state->num_clks, sizeof(*state->clks),
- GFP_KERNEL);
+ state->clks = devm_kcalloc(state->dev, state->info->num_clocks,
+ sizeof(*state->clks), GFP_KERNEL);
if (!state->clks)
return -ENOMEM;
- for (i = 0; i < state->num_clks; i++)
+ for (i = 0; i < state->info->num_clocks; i++)
state->clks[i].id = mipi_csis_clk_id[i];
- ret = devm_clk_bulk_get(dev, state->num_clks, state->clks);
+ ret = devm_clk_bulk_get(state->dev, state->info->num_clocks,
+ state->clks);
if (ret < 0)
return ret;
@@ -633,8 +623,8 @@ static int mipi_csis_clk_get(struct csi_state *state)
ret = clk_set_rate(state->clks[MIPI_CSIS_CLK_WRAP].clk,
state->clk_frequency);
if (ret < 0)
- dev_err(dev, "set rate=%d failed: %d\n", state->clk_frequency,
- ret);
+ dev_err(state->dev, "set rate=%d failed: %d\n",
+ state->clk_frequency, ret);
return ret;
}
@@ -653,6 +643,89 @@ static void mipi_csis_stop_stream(struct csi_state *state)
mipi_csis_system_enable(state, false);
}
+static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
+{
+ struct csi_state *state = dev_id;
+ unsigned long flags;
+ unsigned int i;
+ u32 status;
+ u32 dbg_status;
+
+ status = mipi_csis_read(state, MIPI_CSIS_INT_SRC);
+ dbg_status = mipi_csis_read(state, MIPI_CSIS_DBG_INTR_SRC);
+
+ spin_lock_irqsave(&state->slock, flags);
+
+ /* Update the event/error counters */
+ if ((status & MIPI_CSIS_INT_SRC_ERRORS) || state->debug) {
+ for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++) {
+ struct mipi_csis_event *event = &state->events[i];
+
+ if ((!event->debug && (status & event->mask)) ||
+ (event->debug && (dbg_status & event->mask)))
+ event->counter++;
+ }
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+
+ mipi_csis_write(state, MIPI_CSIS_INT_SRC, status);
+ mipi_csis_write(state, MIPI_CSIS_DBG_INTR_SRC, dbg_status);
+
+ return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * PHY regulator and reset
+ */
+
+static int mipi_csis_phy_enable(struct csi_state *state)
+{
+ if (state->info->version != MIPI_CSIS_V3_3)
+ return 0;
+
+ return regulator_enable(state->mipi_phy_regulator);
+}
+
+static int mipi_csis_phy_disable(struct csi_state *state)
+{
+ if (state->info->version != MIPI_CSIS_V3_3)
+ return 0;
+
+ return regulator_disable(state->mipi_phy_regulator);
+}
+
+static void mipi_csis_phy_reset(struct csi_state *state)
+{
+ if (state->info->version != MIPI_CSIS_V3_3)
+ return;
+
+ reset_control_assert(state->mrst);
+ msleep(20);
+ reset_control_deassert(state->mrst);
+}
+
+static int mipi_csis_phy_init(struct csi_state *state)
+{
+ if (state->info->version != MIPI_CSIS_V3_3)
+ return 0;
+
+ /* Get MIPI PHY reset and regulator. */
+ state->mrst = devm_reset_control_get_exclusive(state->dev, NULL);
+ if (IS_ERR(state->mrst))
+ return PTR_ERR(state->mrst);
+
+ state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy");
+ if (IS_ERR(state->mipi_phy_regulator))
+ return PTR_ERR(state->mipi_phy_regulator);
+
+ return regulator_set_voltage(state->mipi_phy_regulator, 1000000,
+ 1000000);
+}
+
+/* -----------------------------------------------------------------------------
+ * Debug
+ */
+
static void mipi_csis_clear_counters(struct csi_state *state)
{
unsigned long flags;
@@ -666,26 +739,90 @@ static void mipi_csis_clear_counters(struct csi_state *state)
static void mipi_csis_log_counters(struct csi_state *state, bool non_errors)
{
- int i = non_errors ? MIPI_CSIS_NUM_EVENTS : MIPI_CSIS_NUM_EVENTS - 4;
- struct device *dev = &state->pdev->dev;
+ unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS
+ : MIPI_CSIS_NUM_EVENTS - 8;
unsigned long flags;
+ unsigned int i;
spin_lock_irqsave(&state->slock, flags);
- for (i--; i >= 0; i--) {
+ for (i = 0; i < num_events; ++i) {
if (state->events[i].counter > 0 || state->debug)
- dev_info(dev, "%s events: %d\n", state->events[i].name,
+ dev_info(state->dev, "%s events: %d\n",
+ state->events[i].name,
state->events[i].counter);
}
spin_unlock_irqrestore(&state->slock, flags);
}
-/*
+static int mipi_csis_dump_regs(struct csi_state *state)
+{
+ static const struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { MIPI_CSIS_CMN_CTRL, "CMN_CTRL" },
+ { MIPI_CSIS_CLK_CTRL, "CLK_CTRL" },
+ { MIPI_CSIS_INT_MSK, "INT_MSK" },
+ { MIPI_CSIS_DPHY_STATUS, "DPHY_STATUS" },
+ { MIPI_CSIS_DPHY_CMN_CTRL, "DPHY_CMN_CTRL" },
+ { MIPI_CSIS_DPHY_SCTRL_L, "DPHY_SCTRL_L" },
+ { MIPI_CSIS_DPHY_SCTRL_H, "DPHY_SCTRL_H" },
+ { MIPI_CSIS_ISP_CONFIG_CH(0), "ISP_CONFIG_CH0" },
+ { MIPI_CSIS_ISP_RESOL_CH(0), "ISP_RESOL_CH0" },
+ { MIPI_CSIS_SDW_CONFIG_CH(0), "SDW_CONFIG_CH0" },
+ { MIPI_CSIS_SDW_RESOL_CH(0), "SDW_RESOL_CH0" },
+ { MIPI_CSIS_DBG_CTRL, "DBG_CTRL" },
+ };
+
+ unsigned int i;
+ u32 cfg;
+
+ dev_info(state->dev, "--- REGISTERS ---\n");
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ cfg = mipi_csis_read(state, registers[i].offset);
+ dev_info(state->dev, "%14s: 0x%08x\n", registers[i].name, cfg);
+ }
+
+ return 0;
+}
+
+static int mipi_csis_dump_regs_show(struct seq_file *m, void *private)
+{
+ struct csi_state *state = m->private;
+
+ return mipi_csis_dump_regs(state);
+}
+DEFINE_SHOW_ATTRIBUTE(mipi_csis_dump_regs);
+
+static void mipi_csis_debugfs_init(struct csi_state *state)
+{
+ state->debugfs_root = debugfs_create_dir(dev_name(state->dev), NULL);
+
+ debugfs_create_bool("debug_enable", 0600, state->debugfs_root,
+ &state->debug);
+ debugfs_create_file("dump_regs", 0600, state->debugfs_root, state,
+ &mipi_csis_dump_regs_fops);
+}
+
+static void mipi_csis_debugfs_exit(struct csi_state *state)
+{
+ debugfs_remove_recursive(state->debugfs_root);
+}
+
+/* -----------------------------------------------------------------------------
* V4L2 subdev operations
*/
-static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable)
+
+static struct csi_state *mipi_sd_to_csis_state(struct v4l2_subdev *sdev)
{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ return container_of(sdev, struct csi_state, sd);
+}
+
+static int mipi_csis_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
int ret;
if (enable) {
@@ -695,11 +832,10 @@ static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable)
mipi_csis_clear_counters(state);
- ret = pm_runtime_get_sync(&state->pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&state->pdev->dev);
+ ret = pm_runtime_resume_and_get(state->dev);
+ if (ret < 0)
return ret;
- }
+
ret = v4l2_subdev_call(state->src_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto done;
@@ -708,7 +844,7 @@ static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable)
mutex_lock(&state->lock);
if (enable) {
- if (state->flags & ST_SUSPENDED) {
+ if (state->state & ST_SUSPENDED) {
ret = -EBUSY;
goto unlock;
}
@@ -720,14 +856,14 @@ static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable)
mipi_csis_log_counters(state, true);
- state->flags |= ST_STREAMING;
+ state->state |= ST_STREAMING;
} else {
v4l2_subdev_call(state->src_sd, video, s_stream, 0);
ret = v4l2_subdev_call(state->src_sd, core, s_power, 0);
if (ret == -ENOIOCTLCMD)
ret = 0;
mipi_csis_stop_stream(state);
- state->flags &= ~ST_STREAMING;
+ state->state &= ~ST_STREAMING;
if (state->debug)
mipi_csis_log_counters(state, true);
}
@@ -737,62 +873,33 @@ unlock:
done:
if (!enable || ret < 0)
- pm_runtime_put(&state->pdev->dev);
+ pm_runtime_put(state->dev);
return ret;
}
-static int mipi_csis_link_setup(struct media_entity *entity,
- const struct media_pad *local_pad,
- const struct media_pad *remote_pad, u32 flags)
-{
- struct v4l2_subdev *mipi_sd = media_entity_to_v4l2_subdev(entity);
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
- struct v4l2_subdev *remote_sd;
-
- dev_dbg(state->dev, "link setup %s -> %s", remote_pad->entity->name,
- local_pad->entity->name);
-
- /* We only care about the link to the source. */
- if (!(local_pad->flags & MEDIA_PAD_FL_SINK))
- return 0;
-
- remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
-
- if (flags & MEDIA_LNK_FL_ENABLED) {
- if (state->src_sd)
- return -EBUSY;
-
- state->src_sd = remote_sd;
- } else {
- state->src_sd = NULL;
- }
-
- return 0;
-}
-
static struct v4l2_mbus_framefmt *
mipi_csis_get_format(struct csi_state *state,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which,
unsigned int pad)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&state->mipi_sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&state->sd, sd_state, pad);
return &state->format_mbus;
}
-static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg)
+static int mipi_csis_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
struct v4l2_mbus_framefmt *fmt_sink;
struct v4l2_mbus_framefmt *fmt_source;
enum v4l2_subdev_format_whence which;
- which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt_sink = mipi_csis_get_format(state, cfg, which, CSIS_PAD_SINK);
+ which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt_sink = mipi_csis_get_format(state, sd_state, which, CSIS_PAD_SINK);
fmt_sink->code = MEDIA_BUS_FMT_UYVY8_1X16;
fmt_sink->width = MIPI_CSIS_DEF_PIX_WIDTH;
@@ -811,35 +918,38 @@ static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd,
* configuration, cfg is NULL, which indicates there's no source pad
* configuration to set.
*/
- if (!cfg)
+ if (!sd_state)
return 0;
- fmt_source = mipi_csis_get_format(state, cfg, which, CSIS_PAD_SOURCE);
+ fmt_source = mipi_csis_get_format(state, sd_state, which,
+ CSIS_PAD_SOURCE);
*fmt_source = *fmt_sink;
return 0;
}
-static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg,
+static int mipi_csis_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
struct v4l2_mbus_framefmt *fmt;
+ fmt = mipi_csis_get_format(state, sd_state, sdformat->which,
+ sdformat->pad);
+
mutex_lock(&state->lock);
- fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
sdformat->format = *fmt;
mutex_unlock(&state->lock);
return 0;
}
-static int mipi_csis_enum_mbus_code(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg,
+static int mipi_csis_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
/*
* The CSIS can't transcode in any way, the source format is identical
@@ -851,7 +961,8 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *mipi_sd,
if (code->index > 0)
return -EINVAL;
- fmt = mipi_csis_get_format(state, cfg, code->which, code->pad);
+ fmt = mipi_csis_get_format(state, sd_state, code->which,
+ code->pad);
code->code = fmt->code;
return 0;
}
@@ -867,11 +978,11 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *mipi_sd,
return 0;
}
-static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg,
+static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
struct csis_pix_format const *csis_fmt;
struct v4l2_mbus_framefmt *fmt;
unsigned int align;
@@ -881,29 +992,22 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
* modified.
*/
if (sdformat->pad == CSIS_PAD_SOURCE)
- return mipi_csis_get_fmt(mipi_sd, cfg, sdformat);
+ return mipi_csis_get_fmt(sd, sd_state, sdformat);
if (sdformat->pad != CSIS_PAD_SINK)
return -EINVAL;
- fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
-
- mutex_lock(&state->lock);
-
- /* Validate the media bus code and clamp the size. */
- csis_fmt = find_csis_format(sdformat->format.code);
- if (!csis_fmt)
- csis_fmt = &mipi_csis_formats[0];
-
- fmt->code = csis_fmt->code;
- fmt->width = sdformat->format.width;
- fmt->height = sdformat->format.height;
-
/*
+ * Validate the media bus code and clamp and align the size.
+ *
* The total number of bits per line must be a multiple of 8. We thus
* need to align the width for formats that are not multiples of 8
* bits.
*/
+ csis_fmt = find_csis_format(sdformat->format.code);
+ if (!csis_fmt)
+ csis_fmt = &mipi_csis_formats[0];
+
switch (csis_fmt->width % 8) {
case 0:
align = 0;
@@ -923,13 +1027,24 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
break;
}
- v4l_bound_align_image(&fmt->width, 1, CSIS_MAX_PIX_WIDTH, align,
- &fmt->height, 1, CSIS_MAX_PIX_HEIGHT, 0, 0);
+ v4l_bound_align_image(&sdformat->format.width, 1,
+ CSIS_MAX_PIX_WIDTH, align,
+ &sdformat->format.height, 1,
+ CSIS_MAX_PIX_HEIGHT, 0, 0);
+
+ fmt = mipi_csis_get_format(state, sd_state, sdformat->which,
+ sdformat->pad);
+
+ mutex_lock(&state->lock);
+
+ fmt->code = csis_fmt->code;
+ fmt->width = sdformat->format.width;
+ fmt->height = sdformat->format.height;
sdformat->format = *fmt;
/* Propagate the format from sink to source. */
- fmt = mipi_csis_get_format(state, cfg, sdformat->which,
+ fmt = mipi_csis_get_format(state, sd_state, sdformat->which,
CSIS_PAD_SOURCE);
*fmt = sdformat->format;
@@ -942,55 +1057,23 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
return 0;
}
-static int mipi_csis_log_status(struct v4l2_subdev *mipi_sd)
+static int mipi_csis_log_status(struct v4l2_subdev *sd)
{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
mutex_lock(&state->lock);
mipi_csis_log_counters(state, true);
- if (state->debug && (state->flags & ST_POWERED))
+ if (state->debug && (state->state & ST_POWERED))
mipi_csis_dump_regs(state);
mutex_unlock(&state->lock);
return 0;
}
-static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
-{
- struct csi_state *state = dev_id;
- unsigned long flags;
- unsigned int i;
- u32 status;
-
- status = mipi_csis_read(state, MIPI_CSIS_INT_SRC);
-
- spin_lock_irqsave(&state->slock, flags);
-
- /* Update the event/error counters */
- if ((status & MIPI_CSIS_INT_SRC_ERRORS) || state->debug) {
- for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++) {
- if (!(status & state->events[i].mask))
- continue;
- state->events[i].counter++;
- }
- }
- spin_unlock_irqrestore(&state->slock, flags);
-
- mipi_csis_write(state, MIPI_CSIS_INT_SRC, status);
-
- return IRQ_HANDLED;
-}
-
static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
.log_status = mipi_csis_log_status,
};
-static const struct media_entity_operations mipi_csis_entity_ops = {
- .link_setup = mipi_csis_link_setup,
- .link_validate = v4l2_subdev_link_validate,
- .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
-};
-
static const struct v4l2_subdev_video_ops mipi_csis_video_ops = {
.s_stream = mipi_csis_s_stream,
};
@@ -1008,31 +1091,61 @@ static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
.pad = &mipi_csis_pad_ops,
};
-static int mipi_csis_parse_dt(struct platform_device *pdev,
- struct csi_state *state)
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+static int mipi_csis_link_setup(struct media_entity *entity,
+ const struct media_pad *local_pad,
+ const struct media_pad *remote_pad, u32 flags)
{
- struct device_node *node = pdev->dev.of_node;
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
+ struct v4l2_subdev *remote_sd;
- if (of_property_read_u32(node, "clock-frequency",
- &state->clk_frequency))
- state->clk_frequency = DEFAULT_SCLK_CSIS_FREQ;
+ dev_dbg(state->dev, "link setup %s -> %s", remote_pad->entity->name,
+ local_pad->entity->name);
- /* Get MIPI PHY resets */
- state->mrst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(state->mrst))
- return PTR_ERR(state->mrst);
+ /* We only care about the link to the source. */
+ if (!(local_pad->flags & MEDIA_PAD_FL_SINK))
+ return 0;
+
+ remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (state->src_sd)
+ return -EBUSY;
+
+ state->src_sd = remote_sd;
+ } else {
+ state->src_sd = NULL;
+ }
return 0;
}
-static int mipi_csis_pm_resume(struct device *dev, bool runtime);
+static const struct media_entity_operations mipi_csis_entity_ops = {
+ .link_setup = mipi_csis_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
+};
+
+/* -----------------------------------------------------------------------------
+ * Async subdev notifier
+ */
+
+static struct csi_state *
+mipi_notifier_to_csis_state(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi_state, notifier);
+}
static int mipi_csis_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
struct csi_state *state = mipi_notifier_to_csis_state(notifier);
- struct media_pad *sink = &state->mipi_sd.entity.pads[CSIS_PAD_SINK];
+ struct media_pad *sink = &state->sd.entity.pads[CSIS_PAD_SINK];
return v4l2_create_fwnode_links_to_pad(sd, sink, 0);
}
@@ -1041,38 +1154,6 @@ static const struct v4l2_async_notifier_operations mipi_csis_notify_ops = {
.bound = mipi_csis_notify_bound,
};
-static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
- struct platform_device *pdev,
- const struct v4l2_subdev_ops *ops)
-{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
-
- v4l2_subdev_init(mipi_sd, ops);
- mipi_sd->owner = THIS_MODULE;
- snprintf(mipi_sd->name, sizeof(mipi_sd->name), "%s.%d",
- CSIS_SUBDEV_NAME, state->index);
-
- mipi_sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- mipi_sd->ctrl_handler = NULL;
-
- mipi_sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
- mipi_sd->entity.ops = &mipi_csis_entity_ops;
-
- mipi_sd->dev = &pdev->dev;
-
- state->csis_fmt = &mipi_csis_formats[0];
- mipi_csis_init_cfg(mipi_sd, NULL);
-
- v4l2_set_subdevdata(mipi_sd, &pdev->dev);
-
- state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK
- | MEDIA_PAD_FL_MUST_CONNECT;
- state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE
- | MEDIA_PAD_FL_MUST_CONNECT;
- return media_entity_pads_init(&mipi_sd->entity, CSIS_PADS_NUM,
- state->pads);
-}
-
static int mipi_csis_async_register(struct csi_state *state)
{
struct v4l2_fwnode_endpoint vep = {
@@ -1080,6 +1161,7 @@ static int mipi_csis_async_register(struct csi_state *state)
};
struct v4l2_async_subdev *asd;
struct fwnode_handle *ep;
+ unsigned int i;
int ret;
v4l2_async_notifier_init(&state->notifier);
@@ -1093,6 +1175,15 @@ static int mipi_csis_async_register(struct csi_state *state)
if (ret)
goto err_parse;
+ for (i = 0; i < vep.bus.mipi_csi2.num_data_lanes; ++i) {
+ if (vep.bus.mipi_csi2.data_lanes[i] != i + 1) {
+ dev_err(state->dev,
+ "data lanes reordering is not supported");
+ ret = -EINVAL;
+ goto err_parse;
+ }
+ }
+
state->bus = vep.bus.mipi_csi2;
dev_dbg(state->dev, "data lanes: %d\n", state->bus.num_data_lanes);
@@ -1109,12 +1200,11 @@ static int mipi_csis_async_register(struct csi_state *state)
state->notifier.ops = &mipi_csis_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&state->mipi_sd,
- &state->notifier);
+ ret = v4l2_async_subdev_notifier_register(&state->sd, &state->notifier);
if (ret)
return ret;
- return v4l2_async_register_subdev(&state->mipi_sd);
+ return v4l2_async_register_subdev(&state->sd);
err_parse:
fwnode_handle_put(ep);
@@ -1122,98 +1212,209 @@ err_parse:
return ret;
}
-static int mipi_csis_dump_regs_show(struct seq_file *m, void *private)
+/* -----------------------------------------------------------------------------
+ * Suspend/resume
+ */
+
+static int mipi_csis_pm_suspend(struct device *dev, bool runtime)
{
- struct csi_state *state = m->private;
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
+ int ret = 0;
- return mipi_csis_dump_regs(state);
+ mutex_lock(&state->lock);
+ if (state->state & ST_POWERED) {
+ mipi_csis_stop_stream(state);
+ ret = mipi_csis_phy_disable(state);
+ if (ret)
+ goto unlock;
+ mipi_csis_clk_disable(state);
+ state->state &= ~ST_POWERED;
+ if (!runtime)
+ state->state |= ST_SUSPENDED;
+ }
+
+unlock:
+ mutex_unlock(&state->lock);
+
+ return ret ? -EAGAIN : 0;
}
-DEFINE_SHOW_ATTRIBUTE(mipi_csis_dump_regs);
-static void mipi_csis_debugfs_init(struct csi_state *state)
+static int mipi_csis_pm_resume(struct device *dev, bool runtime)
{
- state->debugfs_root = debugfs_create_dir(dev_name(state->dev), NULL);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
+ int ret = 0;
- debugfs_create_bool("debug_enable", 0600, state->debugfs_root,
- &state->debug);
- debugfs_create_file("dump_regs", 0600, state->debugfs_root, state,
- &mipi_csis_dump_regs_fops);
+ mutex_lock(&state->lock);
+ if (!runtime && !(state->state & ST_SUSPENDED))
+ goto unlock;
+
+ if (!(state->state & ST_POWERED)) {
+ ret = mipi_csis_phy_enable(state);
+ if (ret)
+ goto unlock;
+
+ state->state |= ST_POWERED;
+ mipi_csis_clk_enable(state);
+ }
+ if (state->state & ST_STREAMING)
+ mipi_csis_start_stream(state);
+
+ state->state &= ~ST_SUSPENDED;
+
+unlock:
+ mutex_unlock(&state->lock);
+
+ return ret ? -EAGAIN : 0;
}
-static void mipi_csis_debugfs_exit(struct csi_state *state)
+static int __maybe_unused mipi_csis_suspend(struct device *dev)
{
- debugfs_remove_recursive(state->debugfs_root);
+ return mipi_csis_pm_suspend(dev, false);
+}
+
+static int __maybe_unused mipi_csis_resume(struct device *dev)
+{
+ return mipi_csis_pm_resume(dev, false);
+}
+
+static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev)
+{
+ return mipi_csis_pm_suspend(dev, true);
+}
+
+static int __maybe_unused mipi_csis_runtime_resume(struct device *dev)
+{
+ return mipi_csis_pm_resume(dev, true);
+}
+
+static const struct dev_pm_ops mipi_csis_pm_ops = {
+ SET_RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mipi_csis_suspend, mipi_csis_resume)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe/remove & platform driver
+ */
+
+static int mipi_csis_subdev_init(struct csi_state *state)
+{
+ struct v4l2_subdev *sd = &state->sd;
+
+ v4l2_subdev_init(sd, &mipi_csis_subdev_ops);
+ sd->owner = THIS_MODULE;
+ snprintf(sd->name, sizeof(sd->name), "%s.%d",
+ CSIS_SUBDEV_NAME, state->index);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->ctrl_handler = NULL;
+
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->entity.ops = &mipi_csis_entity_ops;
+
+ sd->dev = state->dev;
+
+ state->csis_fmt = &mipi_csis_formats[0];
+ mipi_csis_init_cfg(sd, NULL);
+
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ return media_entity_pads_init(&sd->entity, CSIS_PADS_NUM,
+ state->pads);
+}
+
+static int mipi_csis_parse_dt(struct csi_state *state)
+{
+ struct device_node *node = state->dev->of_node;
+
+ if (of_property_read_u32(node, "clock-frequency",
+ &state->clk_frequency))
+ state->clk_frequency = DEFAULT_SCLK_CSIS_FREQ;
+
+ return 0;
}
static int mipi_csis_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct csi_state *state;
+ int irq;
int ret;
state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
+ mutex_init(&state->lock);
spin_lock_init(&state->slock);
- state->pdev = pdev;
state->dev = dev;
+ state->info = of_device_get_match_data(dev);
+
+ memcpy(state->events, mipi_csis_events, sizeof(state->events));
- ret = mipi_csis_parse_dt(pdev, state);
+ /* Parse DT properties. */
+ ret = mipi_csis_parse_dt(state);
if (ret < 0) {
dev_err(dev, "Failed to parse device tree: %d\n", ret);
return ret;
}
- ret = mipi_csis_phy_init(state);
- if (ret < 0)
- return ret;
-
- mipi_csis_phy_reset(state);
-
+ /* Acquire resources. */
state->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
- state->irq = platform_get_irq(pdev, 0);
- if (state->irq < 0)
- return state->irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = mipi_csis_phy_init(state);
+ if (ret < 0)
+ return ret;
ret = mipi_csis_clk_get(state);
if (ret < 0)
return ret;
+ /* Reset PHY and enable the clocks. */
+ mipi_csis_phy_reset(state);
+
ret = mipi_csis_clk_enable(state);
if (ret < 0) {
dev_err(state->dev, "failed to enable clocks: %d\n", ret);
return ret;
}
- ret = devm_request_irq(dev, state->irq, mipi_csis_irq_handler,
- 0, dev_name(dev), state);
+ /* Now that the hardware is initialized, request the interrupt. */
+ ret = devm_request_irq(dev, irq, mipi_csis_irq_handler, 0,
+ dev_name(dev), state);
if (ret) {
dev_err(dev, "Interrupt request failed\n");
goto disable_clock;
}
- platform_set_drvdata(pdev, &state->mipi_sd);
-
- mutex_init(&state->lock);
- ret = mipi_csis_subdev_init(&state->mipi_sd, pdev,
- &mipi_csis_subdev_ops);
+ /* Initialize and register the subdev. */
+ ret = mipi_csis_subdev_init(state);
if (ret < 0)
goto disable_clock;
+ platform_set_drvdata(pdev, &state->sd);
+
ret = mipi_csis_async_register(state);
if (ret < 0) {
- dev_err(&pdev->dev, "async register failed: %d\n", ret);
+ dev_err(dev, "async register failed: %d\n", ret);
goto cleanup;
}
- memcpy(state->events, mipi_csis_events, sizeof(state->events));
-
+ /* Initialize debugfs. */
mipi_csis_debugfs_init(state);
+
+ /* Enable runtime PM. */
pm_runtime_enable(dev);
if (!pm_runtime_enabled(dev)) {
ret = mipi_csis_pm_resume(dev, true);
@@ -1221,7 +1422,7 @@ static int mipi_csis_probe(struct platform_device *pdev)
goto unregister_all;
}
- dev_info(&pdev->dev, "lanes: %d, freq: %u\n",
+ dev_info(dev, "lanes: %d, freq: %u\n",
state->bus.num_data_lanes, state->clk_frequency);
return 0;
@@ -1229,10 +1430,10 @@ static int mipi_csis_probe(struct platform_device *pdev)
unregister_all:
mipi_csis_debugfs_exit(state);
cleanup:
- media_entity_cleanup(&state->mipi_sd.entity);
+ media_entity_cleanup(&state->sd.entity);
v4l2_async_notifier_unregister(&state->notifier);
v4l2_async_notifier_cleanup(&state->notifier);
- v4l2_async_unregister_subdev(&state->mipi_sd);
+ v4l2_async_unregister_subdev(&state->sd);
disable_clock:
mipi_csis_clk_disable(state);
mutex_destroy(&state->lock);
@@ -1240,107 +1441,40 @@ disable_clock:
return ret;
}
-static int mipi_csis_pm_suspend(struct device *dev, bool runtime)
-{
- struct v4l2_subdev *mipi_sd = dev_get_drvdata(dev);
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
- int ret = 0;
-
- mutex_lock(&state->lock);
- if (state->flags & ST_POWERED) {
- mipi_csis_stop_stream(state);
- ret = regulator_disable(state->mipi_phy_regulator);
- if (ret)
- goto unlock;
- mipi_csis_clk_disable(state);
- state->flags &= ~ST_POWERED;
- if (!runtime)
- state->flags |= ST_SUSPENDED;
- }
-
-unlock:
- mutex_unlock(&state->lock);
-
- return ret ? -EAGAIN : 0;
-}
-
-static int mipi_csis_pm_resume(struct device *dev, bool runtime)
-{
- struct v4l2_subdev *mipi_sd = dev_get_drvdata(dev);
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
- int ret = 0;
-
- mutex_lock(&state->lock);
- if (!runtime && !(state->flags & ST_SUSPENDED))
- goto unlock;
-
- if (!(state->flags & ST_POWERED)) {
- ret = regulator_enable(state->mipi_phy_regulator);
- if (ret)
- goto unlock;
-
- state->flags |= ST_POWERED;
- mipi_csis_clk_enable(state);
- }
- if (state->flags & ST_STREAMING)
- mipi_csis_start_stream(state);
-
- state->flags &= ~ST_SUSPENDED;
-
-unlock:
- mutex_unlock(&state->lock);
-
- return ret ? -EAGAIN : 0;
-}
-
-static int __maybe_unused mipi_csis_suspend(struct device *dev)
-{
- return mipi_csis_pm_suspend(dev, false);
-}
-
-static int __maybe_unused mipi_csis_resume(struct device *dev)
-{
- return mipi_csis_pm_resume(dev, false);
-}
-
-static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev)
-{
- return mipi_csis_pm_suspend(dev, true);
-}
-
-static int __maybe_unused mipi_csis_runtime_resume(struct device *dev)
-{
- return mipi_csis_pm_resume(dev, true);
-}
-
static int mipi_csis_remove(struct platform_device *pdev)
{
- struct v4l2_subdev *mipi_sd = platform_get_drvdata(pdev);
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csi_state *state = mipi_sd_to_csis_state(sd);
mipi_csis_debugfs_exit(state);
v4l2_async_notifier_unregister(&state->notifier);
v4l2_async_notifier_cleanup(&state->notifier);
- v4l2_async_unregister_subdev(&state->mipi_sd);
+ v4l2_async_unregister_subdev(&state->sd);
pm_runtime_disable(&pdev->dev);
mipi_csis_pm_suspend(&pdev->dev, true);
mipi_csis_clk_disable(state);
- media_entity_cleanup(&state->mipi_sd.entity);
+ media_entity_cleanup(&state->sd.entity);
mutex_destroy(&state->lock);
pm_runtime_set_suspended(&pdev->dev);
return 0;
}
-static const struct dev_pm_ops mipi_csis_pm_ops = {
- SET_RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(mipi_csis_suspend, mipi_csis_resume)
-};
-
static const struct of_device_id mipi_csis_of_match[] = {
- { .compatible = "fsl,imx7-mipi-csi2", },
+ {
+ .compatible = "fsl,imx7-mipi-csi2",
+ .data = &(const struct mipi_csis_info){
+ .version = MIPI_CSIS_V3_3,
+ .num_clocks = 3,
+ },
+ }, {
+ .compatible = "fsl,imx8mm-mipi-csi2",
+ .data = &(const struct mipi_csis_info){
+ .version = MIPI_CSIS_V3_6_3,
+ .num_clocks = 4,
+ },
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mipi_csis_of_match);
@@ -1357,6 +1491,6 @@ static struct platform_driver mipi_csis_driver = {
module_platform_driver(mipi_csis_driver);
-MODULE_DESCRIPTION("i.MX7 MIPI CSI-2 Receiver driver");
+MODULE_DESCRIPTION("i.MX7 & i.MX8 MIPI CSI-2 receiver driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:imx7-mipi-csi2");
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index 9b644fb23dde..fa3d6ee5adf2 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -9,8 +9,10 @@
/* from /drivers/staging/media/ipu3/include/videodev2.h */
/* Vendor specific - used for IPU3 camera sub-system */
-#define V4L2_META_FMT_IPU3_PARAMS v4l2_fourcc('i', 'p', '3', 'p') /* IPU3 processing parameters */
-#define V4L2_META_FMT_IPU3_STAT_3A v4l2_fourcc('i', 'p', '3', 's') /* IPU3 3A statistics */
+/* IPU3 processing parameters */
+#define V4L2_META_FMT_IPU3_PARAMS v4l2_fourcc('i', 'p', '3', 'p')
+/* IPU3 3A statistics */
+#define V4L2_META_FMT_IPU3_STAT_3A v4l2_fourcc('i', 'p', '3', 's')
/* from include/uapi/linux/v4l2-controls.h */
#define V4L2_CID_INTEL_IPU3_BASE (V4L2_CID_USER_BASE + 0x10c0)
@@ -74,7 +76,6 @@ struct ipu3_uapi_grid_config {
(IPU3_UAPI_AWB_MAX_SETS * \
(IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES))
-
/**
* struct ipu3_uapi_awb_raw_buffer - AWB raw buffer
*
@@ -244,8 +245,8 @@ struct ipu3_uapi_ae_ccm {
*/
struct ipu3_uapi_ae_config {
struct ipu3_uapi_ae_grid_config grid_cfg __attribute__((aligned(32)));
- struct ipu3_uapi_ae_weight_elem weights[
- IPU3_UAPI_AE_WEIGHTS] __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_weight_elem weights[IPU3_UAPI_AE_WEIGHTS]
+ __attribute__((aligned(32)));
struct ipu3_uapi_ae_ccm ae_ccm __attribute__((aligned(32)));
} __packed;
@@ -630,7 +631,7 @@ struct ipu3_uapi_bnr_static_config_wb_gains_thr_config {
* @cg: Gain coefficient for threshold calculation, [0, 31], default 8.
* @ci: Intensity coefficient for threshold calculation. range [0, 0x1f]
* default 6.
- * format: u3.2 (3 most significant bits represent whole number,
+ * format: u3.2 (3 most significant bits represent whole number,
* 2 least significant bits represent the fractional part
* with each count representing 0.25)
* e.g. 6 in binary format is 00110, that translates to 1.5
diff --git a/drivers/staging/media/ipu3/ipu3-abi.h b/drivers/staging/media/ipu3/ipu3-abi.h
index e1185602c7fd..c76935b436d7 100644
--- a/drivers/staging/media/ipu3/ipu3-abi.h
+++ b/drivers/staging/media/ipu3/ipu3-abi.h
@@ -4,7 +4,7 @@
#ifndef __IPU3_ABI_H
#define __IPU3_ABI_H
-#include "include/intel-ipu3.h"
+#include "include/uapi/intel-ipu3.h"
/******************* IMGU Hardware information *******************/
diff --git a/drivers/staging/media/ipu3/ipu3-css-pool.h b/drivers/staging/media/ipu3/ipu3-css-pool.h
index 35519a08c08c..3f9e32e0e9a7 100644
--- a/drivers/staging/media/ipu3/ipu3-css-pool.h
+++ b/drivers/staging/media/ipu3/ipu3-css-pool.h
@@ -15,6 +15,7 @@ struct imgu_device;
* @size: size of the buffer in bytes.
* @vaddr: kernel virtual address.
* @daddr: iova dma address to access IPU3.
+ * @pages: pages mapped to this buffer
*/
struct imgu_css_map {
size_t size;
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 6d9c49b39531..38a240764509 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -36,7 +36,7 @@ static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
/* Initialize try_fmt */
for (i = 0; i < IMGU_NODE_NUM; i++) {
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, i);
+ v4l2_subdev_get_try_format(sd, fh->state, i);
try_fmt->width = try_crop.width;
try_fmt->height = try_crop.height;
@@ -44,8 +44,8 @@ static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
try_fmt->field = V4L2_FIELD_NONE;
}
- *v4l2_subdev_get_try_crop(sd, fh->pad, IMGU_NODE_IN) = try_crop;
- *v4l2_subdev_get_try_compose(sd, fh->pad, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_get_try_crop(sd, fh->state, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_get_try_compose(sd, fh->state, IMGU_NODE_IN) = try_crop;
return 0;
}
@@ -120,7 +120,7 @@ static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
}
static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
@@ -136,7 +136,7 @@ static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
fmt->format = imgu_pipe->nodes[pad].pad_fmt;
} else {
- mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, pad);
fmt->format = *mf;
}
@@ -144,7 +144,7 @@ static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
}
static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imgu_media_pipe *imgu_pipe;
@@ -161,7 +161,7 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
imgu_pipe = &imgu->imgu_pipe[pipe];
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, pad);
else
mf = &imgu_pipe->nodes[pad].pad_fmt;
@@ -189,7 +189,7 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct v4l2_rect *try_sel, *r;
@@ -202,11 +202,11 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
r = &imgu_sd->rect.eff;
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
r = &imgu_sd->rect.bds;
break;
default:
@@ -222,7 +222,7 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
}
static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
@@ -241,11 +241,11 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
rect = &imgu_sd->rect.eff;
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
rect = &imgu_sd->rect.bds;
break;
default:
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
index ee1bba6bdcac..8e1e9e46e604 100644
--- a/drivers/staging/media/ipu3/ipu3.c
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -392,10 +392,9 @@ int imgu_s_stream(struct imgu_device *imgu, int enable)
}
/* Set Power */
- r = pm_runtime_get_sync(dev);
+ r = pm_runtime_resume_and_get(dev);
if (r < 0) {
dev_err(dev, "failed to set imgu power\n");
- pm_runtime_put(dev);
return r;
}
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
index 7f07a9175815..b9125c295d1d 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.c
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -183,7 +183,7 @@ int amvdec_set_canvases(struct amvdec_session *sess,
u32 pixfmt = sess->pixfmt_cap;
u32 width = ALIGN(sess->width, 32);
u32 height = ALIGN(sess->height, 32);
- u32 reg_cur = reg_base[0];
+ u32 reg_cur;
u32 reg_num_cur = 0;
u32 reg_base_cur = 0;
int i = 0;
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
index b88f9529683c..3f587e000729 100644
--- a/drivers/staging/media/omap4iss/iss.h
+++ b/drivers/staging/media/omap4iss/iss.h
@@ -119,9 +119,6 @@ struct iss_device {
unsigned int isp_subclk_resources;
};
-#define v4l2_dev_to_iss_device(dev) \
- container_of(dev, struct iss_device, v4l2_dev)
-
int omap4iss_get_external_info(struct iss_pipeline *pipe,
struct media_link *link);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index a6dc2d2b1228..124ab2f44fbf 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -825,19 +825,20 @@ static const struct iss_video_operations csi2_issvideo_ops = {
static struct v4l2_mbus_framefmt *
__csi2_get_format(struct iss_csi2_device *csi2,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
+ pad);
return &csi2->formats[pad];
}
static void
csi2_try_format(struct iss_csi2_device *csi2,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -868,7 +869,8 @@ csi2_try_format(struct iss_csi2_device *csi2,
* compression.
*/
pixelcode = fmt->code;
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK, which);
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
+ which);
memcpy(fmt, format, sizeof(*fmt));
/*
@@ -894,7 +896,7 @@ csi2_try_format(struct iss_csi2_device *csi2,
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -907,7 +909,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csi2_input_fmts[code->index];
} else {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK,
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
code->which);
switch (code->index) {
case 0:
@@ -931,7 +933,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int csi2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -943,7 +945,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -953,7 +955,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -968,13 +970,13 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csi2_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
@@ -990,25 +992,26 @@ static int csi2_get_format(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csi2_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
- csi2_try_format(csi2, cfg, fmt->pad, &fmt->format, fmt->which);
+ csi2_try_format(csi2, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CSI2_PAD_SINK) {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SOURCE,
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- csi2_try_format(csi2, cfg, CSI2_PAD_SOURCE, format, fmt->which);
+ csi2_try_format(csi2, sd_state, CSI2_PAD_SOURCE, format,
+ fmt->which);
}
return 0;
@@ -1050,7 +1053,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- csi2_set_format(sd, fh ? fh->pad : NULL, &format);
+ csi2_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
index 26be078b69f3..23f707cb336f 100644
--- a/drivers/staging/media/omap4iss/iss_ipipe.c
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -21,7 +21,7 @@
static struct v4l2_mbus_framefmt *
__ipipe_get_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which);
@@ -175,12 +175,13 @@ static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
static struct v4l2_mbus_framefmt *
__ipipe_get_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipe->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&ipipe->subdev, sd_state,
+ pad);
return &ipipe->formats[pad];
}
@@ -194,7 +195,7 @@ __ipipe_get_format(struct iss_ipipe_device *ipipe,
*/
static void
ipipe_try_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -222,7 +223,8 @@ ipipe_try_format(struct iss_ipipe_device *ipipe,
break;
case IPIPE_PAD_SOURCE_VP:
- format = __ipipe_get_format(ipipe, cfg, IPIPE_PAD_SINK, which);
+ format = __ipipe_get_format(ipipe, sd_state, IPIPE_PAD_SINK,
+ which);
memcpy(fmt, format, sizeof(*fmt));
fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
@@ -243,7 +245,7 @@ ipipe_try_format(struct iss_ipipe_device *ipipe,
* return -EINVAL or zero on success
*/
static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -270,7 +272,7 @@ static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
@@ -282,7 +284,7 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
+ ipipe_try_format(ipipe, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -292,7 +294,7 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
+ ipipe_try_format(ipipe, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -309,13 +311,13 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
* to the format type.
*/
static int ipipe_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
+ format = __ipipe_get_format(ipipe, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
@@ -333,25 +335,26 @@ static int ipipe_get_format(struct v4l2_subdev *sd,
* to the format type.
*/
static int ipipe_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
+ format = __ipipe_get_format(ipipe, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
- ipipe_try_format(ipipe, cfg, fmt->pad, &fmt->format, fmt->which);
+ ipipe_try_format(ipipe, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == IPIPE_PAD_SINK) {
- format = __ipipe_get_format(ipipe, cfg, IPIPE_PAD_SOURCE_VP,
+ format = __ipipe_get_format(ipipe, sd_state,
+ IPIPE_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ipipe_try_format(ipipe, cfg, IPIPE_PAD_SOURCE_VP, format,
+ ipipe_try_format(ipipe, sd_state, IPIPE_PAD_SOURCE_VP, format,
fmt->which);
}
@@ -392,7 +395,7 @@ static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ipipe_set_format(sd, fh ? fh->pad : NULL, &format);
+ ipipe_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
index c2978d02e797..5e7f25cd53ac 100644
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -357,11 +357,12 @@ static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
static struct v4l2_mbus_framefmt *
__ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipeif->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&ipipeif->subdev, sd_state,
+ pad);
return &ipipeif->formats[pad];
}
@@ -374,7 +375,7 @@ __ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
*/
static void
ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -403,7 +404,8 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
break;
case IPIPEIF_PAD_SOURCE_ISIF_SF:
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
+ format = __ipipeif_get_format(ipipeif, sd_state,
+ IPIPEIF_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -418,7 +420,8 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
break;
case IPIPEIF_PAD_SOURCE_VP:
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
+ format = __ipipeif_get_format(ipipeif, sd_state,
+ IPIPEIF_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -442,7 +445,7 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
* return -EINVAL or zero on success
*/
static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -462,7 +465,8 @@ static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
+ format = __ipipeif_get_format(ipipeif, sd_state,
+ IPIPEIF_PAD_SINK,
code->which);
code->code = format->code;
@@ -476,7 +480,7 @@ static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -488,7 +492,7 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
+ ipipeif_try_format(ipipeif, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -498,7 +502,7 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
+ ipipeif_try_format(ipipeif, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -515,13 +519,13 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
* to the format type.
*/
static int ipipeif_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
+ format = __ipipeif_get_format(ipipeif, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
@@ -539,33 +543,36 @@ static int ipipeif_get_format(struct v4l2_subdev *sd,
* to the format type.
*/
static int ipipeif_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
+ format = __ipipeif_get_format(ipipeif, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
- ipipeif_try_format(ipipeif, cfg, fmt->pad, &fmt->format, fmt->which);
+ ipipeif_try_format(ipipeif, sd_state, fmt->pad, &fmt->format,
+ fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == IPIPEIF_PAD_SINK) {
- format = __ipipeif_get_format(ipipeif, cfg,
+ format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SOURCE_ISIF_SF,
fmt->which);
*format = fmt->format;
- ipipeif_try_format(ipipeif, cfg, IPIPEIF_PAD_SOURCE_ISIF_SF,
+ ipipeif_try_format(ipipeif, sd_state,
+ IPIPEIF_PAD_SOURCE_ISIF_SF,
format, fmt->which);
- format = __ipipeif_get_format(ipipeif, cfg,
+ format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ipipeif_try_format(ipipeif, cfg, IPIPEIF_PAD_SOURCE_VP, format,
+ ipipeif_try_format(ipipeif, sd_state, IPIPEIF_PAD_SOURCE_VP,
+ format,
fmt->which);
}
@@ -608,7 +615,7 @@ static int ipipeif_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ipipeif_set_format(sd, fh ? fh->pad : NULL, &format);
+ ipipeif_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
index 3b6875cbca9b..a5f8f9f1ab16 100644
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -416,11 +416,12 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
static struct v4l2_mbus_framefmt *
__resizer_get_format(struct iss_resizer_device *resizer,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&resizer->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&resizer->subdev, sd_state,
+ pad);
return &resizer->formats[pad];
}
@@ -433,7 +434,7 @@ __resizer_get_format(struct iss_resizer_device *resizer,
*/
static void
resizer_try_format(struct iss_resizer_device *resizer,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -461,7 +462,8 @@ resizer_try_format(struct iss_resizer_device *resizer,
case RESIZER_PAD_SOURCE_MEM:
pixelcode = fmt->code;
- format = __resizer_get_format(resizer, cfg, RESIZER_PAD_SINK,
+ format = __resizer_get_format(resizer, sd_state,
+ RESIZER_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -492,7 +494,7 @@ resizer_try_format(struct iss_resizer_device *resizer,
* return -EINVAL or zero on success
*/
static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
@@ -507,7 +509,8 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
break;
case RESIZER_PAD_SOURCE_MEM:
- format = __resizer_get_format(resizer, cfg, RESIZER_PAD_SINK,
+ format = __resizer_get_format(resizer, sd_state,
+ RESIZER_PAD_SINK,
code->which);
if (code->index == 0) {
@@ -537,7 +540,7 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
}
static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
@@ -549,7 +552,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- resizer_try_format(resizer, cfg, fse->pad, &format, fse->which);
+ resizer_try_format(resizer, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -559,7 +562,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- resizer_try_format(resizer, cfg, fse->pad, &format, fse->which);
+ resizer_try_format(resizer, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -576,13 +579,13 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
* to the format type.
*/
static int resizer_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(resizer, cfg, fmt->pad, fmt->which);
+ format = __resizer_get_format(resizer, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
@@ -600,26 +603,28 @@ static int resizer_get_format(struct v4l2_subdev *sd,
* to the format type.
*/
static int resizer_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(resizer, cfg, fmt->pad, fmt->which);
+ format = __resizer_get_format(resizer, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
- resizer_try_format(resizer, cfg, fmt->pad, &fmt->format, fmt->which);
+ resizer_try_format(resizer, sd_state, fmt->pad, &fmt->format,
+ fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == RESIZER_PAD_SINK) {
- format = __resizer_get_format(resizer, cfg,
+ format = __resizer_get_format(resizer, sd_state,
RESIZER_PAD_SOURCE_MEM,
fmt->which);
*format = fmt->format;
- resizer_try_format(resizer, cfg, RESIZER_PAD_SOURCE_MEM, format,
+ resizer_try_format(resizer, sd_state, RESIZER_PAD_SOURCE_MEM,
+ format,
fmt->which);
}
@@ -662,7 +667,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_UYVY8_1X16;
format.format.width = 4096;
format.format.height = 4096;
- resizer_set_format(sd, fh ? fh->pad : NULL, &format);
+ resizer_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 930f638f51eb..d0da083deed5 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -399,7 +399,7 @@ static void iss_video_buf_queue(struct vb2_buffer *vb)
if (start)
omap4iss_pipeline_set_stream(pipe,
- ISS_PIPELINE_STREAM_SINGLESHOT);
+ ISS_PIPELINE_STREAM_SINGLESHOT);
}
}
@@ -960,7 +960,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
unsigned long flags;
ret = omap4iss_pipeline_set_stream(pipe,
- ISS_PIPELINE_STREAM_CONTINUOUS);
+ ISS_PIPELINE_STREAM_CONTINUOUS);
if (ret < 0)
goto err_omap4iss_set_stream;
spin_lock_irqsave(&video->qlock, flags);
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index d821661d30f3..7131156c1f2c 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -481,7 +481,15 @@ static int rkvdec_buf_prepare(struct vb2_buffer *vb)
if (vb2_plane_size(vb, i) < sizeimage)
return -EINVAL;
}
- vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
+
+ /*
+ * Buffer's bytesused must be written by driver for CAPTURE buffers.
+ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
+ * it to buffer length).
+ */
+ if (V4L2_TYPE_IS_CAPTURE(vq->type))
+ vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
+
return 0;
}
@@ -658,7 +666,7 @@ static void rkvdec_device_run(void *priv)
if (WARN_ON(!desc))
return;
- ret = pm_runtime_get_sync(rkvdec->dev);
+ ret = pm_runtime_resume_and_get(rkvdec->dev);
if (ret < 0) {
rkvdec_job_finish_no_pm(ctx, VB2_BUF_STATE_ERROR);
return;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 92812d1a39d4..c0d005dafc6c 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -31,13 +31,19 @@
static const struct cedrus_control cedrus_controls[] = {
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
},
.codec = CEDRUS_CODEC_MPEG2,
},
{
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
+ },
+ .codec = CEDRUS_CODEC_MPEG2,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
},
.codec = CEDRUS_CODEC_MPEG2,
},
@@ -151,6 +157,12 @@ static const struct cedrus_control cedrus_controls[] = {
},
.codec = CEDRUS_CODEC_VP8,
},
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ },
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 15f147dad4cb..88afba17b78b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -68,14 +68,16 @@ struct cedrus_h264_run {
};
struct cedrus_mpeg2_run {
- const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
- const struct v4l2_ctrl_mpeg2_quantization *quantization;
+ const struct v4l2_ctrl_mpeg2_sequence *sequence;
+ const struct v4l2_ctrl_mpeg2_picture *picture;
+ const struct v4l2_ctrl_mpeg2_quantisation *quantisation;
};
struct cedrus_h265_run {
const struct v4l2_ctrl_hevc_sps *sps;
const struct v4l2_ctrl_hevc_pps *pps;
const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params;
};
struct cedrus_vp8_run {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index d696b3ec70c0..40e8c4123f76 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -40,10 +40,12 @@ void cedrus_device_run(void *priv)
switch (ctx->src_fmt.pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
- run.mpeg2.slice_params = cedrus_find_control_data(ctx,
- V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
- run.mpeg2.quantization = cedrus_find_control_data(ctx,
- V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
+ run.mpeg2.sequence = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_MPEG2_SEQUENCE);
+ run.mpeg2.picture = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_MPEG2_PICTURE);
+ run.mpeg2.quantisation = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_MPEG2_QUANTISATION);
break;
case V4L2_PIX_FMT_H264_SLICE:
@@ -68,6 +70,8 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_HEVC_PPS);
run.h265.slice_params = cedrus_find_control_data(ctx,
V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS);
+ run.h265.decode_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS);
break;
case V4L2_PIX_FMT_VP8_FRAME:
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index ce497d0197df..6821e3d05d34 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -245,6 +245,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
const struct v4l2_ctrl_hevc_sps *sps;
const struct v4l2_ctrl_hevc_pps *pps;
const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params;
const struct v4l2_hevc_pred_weight_table *pred_weight_table;
dma_addr_t src_buf_addr;
dma_addr_t src_buf_end_addr;
@@ -256,6 +257,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
sps = run->h265.sps;
pps = run->h265.pps;
slice_params = run->h265.slice_params;
+ decode_params = run->h265.decode_params;
pred_weight_table = &slice_params->pred_weight_table;
/* MV column buffer size and allocation. */
@@ -477,8 +479,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
slice_params->flags);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
- V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT,
- pps->flags);
+ V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT,
+ slice_params->flags);
/* FIXME: For multi-slice support. */
reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
@@ -487,7 +489,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) |
- VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(slice_params->num_rps_poc_st_curr_after == 0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(decode_params->num_poc_st_curr_after == 0) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) |
VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta);
@@ -527,8 +529,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
cedrus_write(dev, VE_DEC_H265_NEIGHBOR_INFO_ADDR, reg);
/* Write decoded picture buffer in pic list. */
- cedrus_h265_frame_info_write_dpb(ctx, slice_params->dpb,
- slice_params->num_active_dpb_entries);
+ cedrus_h265_frame_info_write_dpb(ctx, decode_params->dpb,
+ decode_params->num_active_dpb_entries);
/* Output frame. */
@@ -545,7 +547,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
/* Reference picture list 0 (for P/B frames). */
if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I) {
- cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ cedrus_h265_ref_pic_list_write(dev, decode_params->dpb,
slice_params->ref_idx_l0,
slice_params->num_ref_idx_l0_active_minus1 + 1,
VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0);
@@ -564,7 +566,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
/* Reference picture list 1 (for B frames). */
if (slice_params->slice_type == V4L2_HEVC_SLICE_TYPE_B) {
- cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ cedrus_h265_ref_pic_list_write(dev, decode_params->dpb,
slice_params->ref_idx_l1,
slice_params->num_ref_idx_l1_active_minus1 + 1,
VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
index 8bcd6b8f9e2d..5dad2f296c6d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -13,30 +13,6 @@
#include "cedrus_hw.h"
#include "cedrus_regs.h"
-/* Default MPEG-2 quantization coefficients, from the specification. */
-
-static const u8 intra_quantization_matrix_default[64] = {
- 8, 16, 16, 19, 16, 19, 22, 22,
- 22, 22, 22, 22, 26, 24, 26, 27,
- 27, 27, 26, 26, 26, 26, 27, 27,
- 27, 29, 29, 29, 34, 34, 34, 29,
- 29, 29, 27, 27, 29, 29, 32, 32,
- 34, 34, 37, 38, 37, 35, 35, 34,
- 35, 38, 38, 40, 40, 40, 48, 48,
- 46, 46, 56, 56, 58, 69, 69, 83
-};
-
-static const u8 non_intra_quantization_matrix_default[64] = {
- 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 16, 16, 16, 16, 16, 16, 16
-};
-
static enum cedrus_irq_status cedrus_mpeg2_irq_status(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
@@ -74,10 +50,9 @@ static void cedrus_mpeg2_irq_disable(struct cedrus_ctx *ctx)
static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
{
- const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
- const struct v4l2_mpeg2_sequence *sequence;
- const struct v4l2_mpeg2_picture *picture;
- const struct v4l2_ctrl_mpeg2_quantization *quantization;
+ const struct v4l2_ctrl_mpeg2_sequence *seq;
+ const struct v4l2_ctrl_mpeg2_picture *pic;
+ const struct v4l2_ctrl_mpeg2_quantisation *quantisation;
dma_addr_t src_buf_addr, dst_luma_addr, dst_chroma_addr;
dma_addr_t fwd_luma_addr, fwd_chroma_addr;
dma_addr_t bwd_luma_addr, bwd_chroma_addr;
@@ -89,22 +64,16 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
unsigned int i;
u32 reg;
- slice_params = run->mpeg2.slice_params;
- sequence = &slice_params->sequence;
- picture = &slice_params->picture;
+ seq = run->mpeg2.sequence;
+ pic = run->mpeg2.picture;
- quantization = run->mpeg2.quantization;
+ quantisation = run->mpeg2.quantisation;
/* Activate MPEG engine. */
cedrus_engine_enable(ctx, CEDRUS_CODEC_MPEG2);
- /* Set intra quantization matrix. */
-
- if (quantization && quantization->load_intra_quantiser_matrix)
- matrix = quantization->intra_quantiser_matrix;
- else
- matrix = intra_quantization_matrix_default;
-
+ /* Set intra quantisation matrix. */
+ matrix = quantisation->intra_quantiser_matrix;
for (i = 0; i < 64; i++) {
reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
reg |= VE_DEC_MPEG_IQMINPUT_FLAG_INTRA;
@@ -112,13 +81,8 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
}
- /* Set non-intra quantization matrix. */
-
- if (quantization && quantization->load_non_intra_quantiser_matrix)
- matrix = quantization->non_intra_quantiser_matrix;
- else
- matrix = non_intra_quantization_matrix_default;
-
+ /* Set non-intra quantisation matrix. */
+ matrix = quantisation->non_intra_quantiser_matrix;
for (i = 0; i < 64; i++) {
reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
reg |= VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA;
@@ -128,19 +92,19 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
/* Set MPEG picture header. */
- reg = VE_DEC_MPEG_MP12HDR_SLICE_TYPE(picture->picture_coding_type);
- reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 0, picture->f_code[0][0]);
- reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 1, picture->f_code[0][1]);
- reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 0, picture->f_code[1][0]);
- reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 1, picture->f_code[1][1]);
- reg |= VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(picture->intra_dc_precision);
- reg |= VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(picture->picture_structure);
- reg |= VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(picture->top_field_first);
- reg |= VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(picture->frame_pred_frame_dct);
- reg |= VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(picture->concealment_motion_vectors);
- reg |= VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(picture->q_scale_type);
- reg |= VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(picture->intra_vlc_format);
- reg |= VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(picture->alternate_scan);
+ reg = VE_DEC_MPEG_MP12HDR_SLICE_TYPE(pic->picture_coding_type);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 0, pic->f_code[0][0]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 1, pic->f_code[0][1]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 0, pic->f_code[1][0]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 1, pic->f_code[1][1]);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(pic->intra_dc_precision);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(pic->picture_structure);
+ reg |= VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
+ reg |= VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
+ reg |= VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV);
+ reg |= VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC);
+ reg |= VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN);
reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(0);
reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(0);
@@ -148,8 +112,8 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
/* Set frame dimensions. */
- reg = VE_DEC_MPEG_PICCODEDSIZE_WIDTH(sequence->horizontal_size);
- reg |= VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(sequence->vertical_size);
+ reg = VE_DEC_MPEG_PICCODEDSIZE_WIDTH(seq->horizontal_size);
+ reg |= VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(seq->vertical_size);
cedrus_write(dev, VE_DEC_MPEG_PICCODEDSIZE, reg);
@@ -162,14 +126,14 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- forward_idx = vb2_find_timestamp(vq, slice_params->forward_ref_ts, 0);
+ forward_idx = vb2_find_timestamp(vq, pic->forward_ref_ts, 0);
fwd_luma_addr = cedrus_dst_buf_addr(ctx, forward_idx, 0);
fwd_chroma_addr = cedrus_dst_buf_addr(ctx, forward_idx, 1);
cedrus_write(dev, VE_DEC_MPEG_FWD_REF_LUMA_ADDR, fwd_luma_addr);
cedrus_write(dev, VE_DEC_MPEG_FWD_REF_CHROMA_ADDR, fwd_chroma_addr);
- backward_idx = vb2_find_timestamp(vq, slice_params->backward_ref_ts, 0);
+ backward_idx = vb2_find_timestamp(vq, pic->backward_ref_ts, 0);
bwd_luma_addr = cedrus_dst_buf_addr(ctx, backward_idx, 0);
bwd_chroma_addr = cedrus_dst_buf_addr(ctx, backward_idx, 1);
@@ -186,10 +150,9 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
/* Source offset and length in bits. */
- cedrus_write(dev, VE_DEC_MPEG_VLD_OFFSET,
- slice_params->data_bit_offset);
+ cedrus_write(dev, VE_DEC_MPEG_VLD_OFFSET, 0);
- reg = slice_params->bit_size - slice_params->data_bit_offset;
+ reg = vb2_get_plane_payload(&run->src->vb2_buf, 0) * 8;
cedrus_write(dev, VE_DEC_MPEG_VLD_LEN, reg);
/* Source beginning and end addresses. */
@@ -203,7 +166,7 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
cedrus_write(dev, VE_DEC_MPEG_VLD_ADDR, reg);
- reg = src_buf_addr + DIV_ROUND_UP(slice_params->bit_size, 8);
+ reg = src_buf_addr + vb2_get_plane_payload(&run->src->vb2_buf, 0);
cedrus_write(dev, VE_DEC_MPEG_VLD_END_ADDR, reg);
/* Macroblock address: start at the beginning. */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index b62eb8e84057..32c13ecb22d8 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -457,7 +457,13 @@ static int cedrus_buf_prepare(struct vb2_buffer *vb)
if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
return -EINVAL;
- vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+ /*
+ * Buffer's bytesused must be written by driver for CAPTURE buffers.
+ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
+ * it to buffer length).
+ */
+ if (V4L2_TYPE_IS_CAPTURE(vq->type))
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
return 0;
}
@@ -490,11 +496,9 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
}
if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
- ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev->dev);
+ ret = pm_runtime_resume_and_get(dev->dev);
+ if (ret < 0)
goto err_cleanup;
- }
if (dev->dec_ops[ctx->current_codec]->start) {
ret = dev->dec_ops[ctx->current_codec]->start(ctx);
diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c
index 28845b5bafaf..ed4c1250b303 100644
--- a/drivers/staging/media/tegra-vde/vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -775,9 +775,9 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
if (ret)
goto release_dpb_frames;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- goto put_runtime_pm;
+ goto unlock;
/*
* We rely on the VDE registers reset value, otherwise VDE
@@ -843,6 +843,8 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
put_runtime_pm:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+
+unlock:
mutex_unlock(&vde->lock);
release_dpb_frames:
@@ -1069,11 +1071,20 @@ static int tegra_vde_probe(struct platform_device *pdev)
* power-cycle it in order to put hardware into a predictable lower
* power state.
*/
- pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ goto err_pm_runtime;
+
pm_runtime_put(dev);
return 0;
+err_pm_runtime:
+ misc_deregister(&vde->miscdev);
+
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+
err_deinit_iommu:
tegra_vde_iommu_deinit(vde);
@@ -1089,7 +1100,12 @@ static int tegra_vde_remove(struct platform_device *pdev)
struct tegra_vde *vde = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ /*
+ * As it increments RPM usage_count even on errors, we don't need to
+ * check the returned code here.
+ */
pm_runtime_get_sync(dev);
+
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
index 033a6935c26d..b26e44adb2be 100644
--- a/drivers/staging/media/tegra-video/csi.c
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -64,7 +64,7 @@ static const struct v4l2_frmsize_discrete tegra_csi_tpg_sizes[] = {
* V4L2 Subdevice Pad Operations
*/
static int csi_enum_bus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
@@ -79,7 +79,7 @@ static int csi_enum_bus_code(struct v4l2_subdev *subdev,
}
static int csi_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
@@ -127,7 +127,7 @@ static void csi_chan_update_blank_intervals(struct tegra_csi_channel *csi_chan,
}
static int csi_enum_framesizes(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
unsigned int i;
@@ -154,7 +154,7 @@ static int csi_enum_framesizes(struct v4l2_subdev *subdev,
}
static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
@@ -181,7 +181,7 @@ static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
}
static int csi_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
@@ -298,10 +298,9 @@ static int tegra_csi_enable_stream(struct v4l2_subdev *subdev)
struct tegra_csi *csi = csi_chan->csi;
int ret, err;
- ret = pm_runtime_get_sync(csi->dev);
+ ret = pm_runtime_resume_and_get(csi->dev);
if (ret < 0) {
dev_err(csi->dev, "failed to get runtime PM: %d\n", ret);
- pm_runtime_put_noidle(csi->dev);
return ret;
}
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index df5ca3596470..89709cd06d4d 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -297,10 +297,9 @@ static int tegra_channel_start_streaming(struct vb2_queue *vq, u32 count)
struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
int ret;
- ret = pm_runtime_get_sync(chan->vi->dev);
+ ret = pm_runtime_resume_and_get(chan->vi->dev);
if (ret < 0) {
dev_err(chan->vi->dev, "failed to get runtime PM: %d\n", ret);
- pm_runtime_put_noidle(chan->vi->dev);
return ret;
}
@@ -494,7 +493,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
const struct tegra_video_format *fmtinfo;
struct v4l2_subdev *subdev;
struct v4l2_subdev_format fmt;
- struct v4l2_subdev_pad_config *pad_cfg;
+ struct v4l2_subdev_state *sd_state;
struct v4l2_subdev_frame_size_enum fse = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -508,8 +507,8 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
if (!subdev)
return -ENODEV;
- pad_cfg = v4l2_subdev_alloc_pad_config(subdev);
- if (!pad_cfg)
+ sd_state = v4l2_subdev_alloc_state(subdev);
+ if (!sd_state)
return -ENOMEM;
/*
* Retrieve the format information and if requested format isn't
@@ -533,33 +532,33 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
* If not available, try to get crop boundary from subdev.
*/
fse.code = fmtinfo->code;
- ret = v4l2_subdev_call(subdev, pad, enum_frame_size, pad_cfg, &fse);
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, sd_state, &fse);
if (ret) {
if (!v4l2_subdev_has_op(subdev, pad, get_selection)) {
- pad_cfg->try_crop.width = 0;
- pad_cfg->try_crop.height = 0;
+ sd_state->pads->try_crop.width = 0;
+ sd_state->pads->try_crop.height = 0;
} else {
ret = v4l2_subdev_call(subdev, pad, get_selection,
NULL, &sdsel);
if (ret)
return -EINVAL;
- pad_cfg->try_crop.width = sdsel.r.width;
- pad_cfg->try_crop.height = sdsel.r.height;
+ sd_state->pads->try_crop.width = sdsel.r.width;
+ sd_state->pads->try_crop.height = sdsel.r.height;
}
} else {
- pad_cfg->try_crop.width = fse.max_width;
- pad_cfg->try_crop.height = fse.max_height;
+ sd_state->pads->try_crop.width = fse.max_width;
+ sd_state->pads->try_crop.height = fse.max_height;
}
- ret = v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
if (ret < 0)
return ret;
v4l2_fill_pix_format(pix, &fmt.format);
tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
- v4l2_subdev_free_pad_config(pad_cfg);
+ v4l2_subdev_free_state(sd_state);
return 0;
}
@@ -1812,8 +1811,8 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
continue;
}
- tvge = v4l2_async_notifier_add_fwnode_subdev(&chan->notifier,
- remote, struct tegra_vi_graph_entity);
+ tvge = v4l2_async_notifier_add_fwnode_subdev(&chan->notifier, remote,
+ struct tegra_vi_graph_entity);
if (IS_ERR(tvge)) {
ret = PTR_ERR(tvge);
dev_err(vi->dev,
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
index e7fe8da7732c..b1ad2a2b914c 100644
--- a/drivers/staging/media/zoran/zoran.h
+++ b/drivers/staging/media/zoran/zoran.h
@@ -158,7 +158,6 @@ struct zoran_jpg_settings {
struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */
};
-
struct zoran;
/* zoran_fh contains per-open() settings */
diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c
index dfc60e2e9dd7..f259585b0689 100644
--- a/drivers/staging/media/zoran/zoran_card.c
+++ b/drivers/staging/media/zoran/zoran_card.c
@@ -37,9 +37,10 @@ module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "Card type");
/*
- * The video mem address of the video card. The driver has a little database for some videocards
- * to determine it from there. If your video card is not in there you have either to give it to
- * the driver as a parameter or set in in a VIDIOCSFBUF ioctl
+ * The video mem address of the video card. The driver has a little database
+ * for some videocards to determine it from there. If your video card is not
+ * in there you have either to give it to the driver as a parameter or set
+ * in a VIDIOCSFBUF ioctl
*/
static unsigned long vidmem; /* default = 0 - Video memory base address */
diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c
index cf788d9cd1df..5b12a730a229 100644
--- a/drivers/staging/media/zoran/zoran_device.c
+++ b/drivers/staging/media/zoran/zoran_device.c
@@ -148,71 +148,6 @@ int post_office_read(struct zoran *zr, unsigned int guest, unsigned int reg)
}
/*
- * detect guests
- */
-
-static void dump_guests(struct zoran *zr)
-{
- if (zr36067_debug > 2) {
- int i, guest[8];
-
- /* do not print random data */
- guest[0] = 0;
-
- for (i = 1; i < 8; i++) /* Don't read jpeg codec here */
- guest[i] = post_office_read(zr, i, 0);
-
- pci_info(zr->pci_dev, "Guests: %*ph\n", 8, guest);
- }
-}
-
-void detect_guest_activity(struct zoran *zr)
-{
- int timeout, i, j, res, guest[8], guest0[8], change[8][3];
- ktime_t t0, t1;
-
- /* do not print random data */
- guest[0] = 0;
- guest0[0] = 0;
-
- dump_guests(zr);
- pci_info(zr->pci_dev, "Detecting guests activity, please wait...\n");
- for (i = 1; i < 8; i++) /* Don't read jpeg codec here */
- guest0[i] = guest[i] = post_office_read(zr, i, 0);
-
- timeout = 0;
- j = 0;
- t0 = ktime_get();
- while (timeout < 10000) {
- udelay(10);
- timeout++;
- for (i = 1; (i < 8) && (j < 8); i++) {
- res = post_office_read(zr, i, 0);
- if (res != guest[i]) {
- t1 = ktime_get();
- change[j][0] = ktime_to_us(ktime_sub(t1, t0));
- t0 = t1;
- change[j][1] = i;
- change[j][2] = res;
- j++;
- guest[i] = res;
- }
- }
- if (j >= 8)
- break;
- }
-
- pci_info(zr->pci_dev, "Guests: %*ph\n", 8, guest0);
-
- if (j == 0) {
- pci_info(zr->pci_dev, "No activity detected.\n");
- return;
- }
- for (i = 0; i < j; i++)
- pci_info(zr->pci_dev, "%6d: %d => 0x%02x\n", change[i][0], change[i][1], change[i][2]);
-}
-
-/*
* JPEG Codec access
*/
diff --git a/drivers/staging/media/zoran/zoran_device.h b/drivers/staging/media/zoran/zoran_device.h
index 24be19a61b6d..6c5d70238228 100644
--- a/drivers/staging/media/zoran/zoran_device.h
+++ b/drivers/staging/media/zoran/zoran_device.h
@@ -20,8 +20,6 @@ extern int post_office_wait(struct zoran *zr);
extern int post_office_write(struct zoran *zr, unsigned int guest, unsigned int reg, unsigned int value);
extern int post_office_read(struct zoran *zr, unsigned int guest, unsigned int reg);
-extern void detect_guest_activity(struct zoran *zr);
-
extern void jpeg_codec_sleep(struct zoran *zr, int sleep);
extern int jpeg_codec_reset(struct zoran *zr);
diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c
index e8902f824d6c..46382e43f1bf 100644
--- a/drivers/staging/media/zoran/zoran_driver.c
+++ b/drivers/staging/media/zoran/zoran_driver.c
@@ -678,12 +678,14 @@ static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selectio
sel->r.height = zr->jpg_settings.img_height;
break;
case V4L2_SEL_TGT_CROP_DEFAULT:
- sel->r.top = sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.left = 0;
sel->r.width = BUZ_MIN_WIDTH;
sel->r.height = BUZ_MIN_HEIGHT;
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.top = sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.left = 0;
sel->r.width = BUZ_MAX_WIDTH;
sel->r.height = BUZ_MAX_HEIGHT;
break;
diff --git a/drivers/staging/media/zoran/zr36016.c b/drivers/staging/media/zoran/zr36016.c
index 2d7dc7abde79..9b350a885879 100644
--- a/drivers/staging/media/zoran/zr36016.c
+++ b/drivers/staging/media/zoran/zr36016.c
@@ -361,7 +361,8 @@ static int zr36016_setup(struct videocodec *codec)
return -ENOSPC;
}
//mem structure init
- codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL);
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ codec->data = ptr;
if (!ptr)
return -ENOMEM;
diff --git a/drivers/staging/media/zoran/zr36050.c b/drivers/staging/media/zoran/zr36050.c
index 2826f4e5d37b..c62af27f2683 100644
--- a/drivers/staging/media/zoran/zr36050.c
+++ b/drivers/staging/media/zoran/zr36050.c
@@ -16,7 +16,7 @@
#include <linux/wait.h>
/* I/O commands, error codes */
-#include <asm/io.h>
+#include <linux/io.h>
/* headerfile of this module */
#include "zr36050.h"
@@ -754,7 +754,8 @@ static int zr36050_setup(struct videocodec *codec)
return -ENOSPC;
}
//mem structure init
- codec->data = ptr = kzalloc(sizeof(struct zr36050), GFP_KERNEL);
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ codec->data = ptr;
if (!ptr)
return -ENOMEM;
diff --git a/drivers/staging/media/zoran/zr36057.h b/drivers/staging/media/zoran/zr36057.h
index 71b651add35a..a2a75fd9f535 100644
--- a/drivers/staging/media/zoran/zr36057.h
+++ b/drivers/staging/media/zoran/zr36057.h
@@ -30,13 +30,13 @@
#define ZR36057_VFESPFR_HOR_DCM 14
#define ZR36057_VFESPFR_VER_DCM 8
#define ZR36057_VFESPFR_DISP_MODE 6
-#define ZR36057_VFESPFR_YUV422 (0<<3)
-#define ZR36057_VFESPFR_RGB888 (1<<3)
-#define ZR36057_VFESPFR_RGB565 (2<<3)
-#define ZR36057_VFESPFR_RGB555 (3<<3)
-#define ZR36057_VFESPFR_ERR_DIF (1<<2)
-#define ZR36057_VFESPFR_PACK24 (1<<1)
-#define ZR36057_VFESPFR_LITTLE_ENDIAN (1<<0)
+#define ZR36057_VFESPFR_YUV422 (0 << 3)
+#define ZR36057_VFESPFR_RGB888 (1 << 3)
+#define ZR36057_VFESPFR_RGB565 (2 << 3)
+#define ZR36057_VFESPFR_RGB555 (3 << 3)
+#define ZR36057_VFESPFR_ERR_DIF (1 << 2)
+#define ZR36057_VFESPFR_PACK24 (1 << 1)
+#define ZR36057_VFESPFR_LITTLE_ENDIAN (1 << 0)
#define ZR36057_VDTR 0x00c /* Video Display "Top" Register */
diff --git a/drivers/staging/media/zoran/zr36060.c b/drivers/staging/media/zoran/zr36060.c
index 4f9eb9ff2c42..1c3af11b5f24 100644
--- a/drivers/staging/media/zoran/zr36060.c
+++ b/drivers/staging/media/zoran/zr36060.c
@@ -790,7 +790,8 @@ static int zr36060_setup(struct videocodec *codec)
return -ENOSPC;
}
//mem structure init
- codec->data = ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ codec->data = ptr;
if (!ptr)
return -ENOMEM;
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index f0c9ae757bcd..093a7f8091b5 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -437,6 +437,10 @@
mediatek,mcm;
resets = <&rstctrl 2>;
reset-names = "mcm";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 23 IRQ_TYPE_LEVEL_HIGH>;
ports {
#address-cells = <1>;
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index f27f20a4aa2d..a1cd81d4a114 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -50,8 +50,10 @@
#include <linux/module.h>
#include <linux/usb/hcd.h>
#include <linux/prefetch.h>
+#include <linux/irqdomain.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index 33e28ccf4d85..b5229bc6eae5 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
* GDMA4740 DMAC support
*/
@@ -914,6 +913,5 @@ static struct platform_driver gdma_dma_driver = {
};
module_platform_driver(gdma_dma_driver);
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Ralink/MTK DMA driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index a6d731e959a2..437859228371 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2091,7 +2091,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
struct net_device *ndev = padapter->pnetdev;
{
- struct station_info sinfo;
+ struct station_info sinfo = {};
u8 ie_offset;
if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
@@ -2284,7 +2284,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
mon_ndev->ieee80211_ptr = mon_wdev;
- ret = register_netdevice(mon_ndev);
+ ret = cfg80211_register_netdevice(mon_ndev);
if (ret) {
goto out;
}
@@ -2360,7 +2360,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
adapter = rtw_netdev_priv(ndev);
pwdev_priv = adapter_wdev_data(adapter);
- unregister_netdevice(ndev);
+ cfg80211_unregister_netdevice(ndev);
if (ndev == pwdev_priv->pmon_ndev) {
pwdev_priv->pmon_ndev = NULL;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index d6fdd1c61f90..a526f9678c34 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -204,11 +204,11 @@ static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
struct iblock_dev_plug *ib_dev_plug;
/*
- * Each se_device has a per cpu work this can be run from. Wwe
+ * Each se_device has a per cpu work this can be run from. We
* shouldn't have multiple threads on the same cpu calling this
* at the same time.
*/
- ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()];
+ ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
return NULL;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 8fbfe75c5744..7e35eddd9eb7 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1416,7 +1416,7 @@ void __target_init_cmd(
cmd->orig_fe_lun = unpacked_lun;
if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
- cmd->cpuid = smp_processor_id();
+ cmd->cpuid = raw_smp_processor_id();
cmd->state_active = false;
}
@@ -3121,9 +3121,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
__releases(&cmd->t_state_lock)
__acquires(&cmd->t_state_lock)
{
-
- assert_spin_locked(&cmd->t_state_lock);
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_held(&cmd->t_state_lock);
if (fabric_stop)
cmd->transport_state |= CMD_T_FABRIC_STOP;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 198d25ae482a..4bba10e7755a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -516,8 +516,10 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
dpi = dbi * udev->data_pages_per_blk;
/* Count the number of already allocated pages */
xas_set(&xas, dpi);
+ rcu_read_lock();
for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
cnt++;
+ rcu_read_unlock();
for (i = cnt; i < page_cnt; i++) {
/* try to get new page from the mm */
@@ -699,11 +701,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
struct scatterlist *sg, unsigned int sg_nents,
struct iovec **iov, size_t data_len)
{
- XA_STATE(xas, &udev->data_pages, 0);
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
size_t page_remaining, cp_len;
- int page_cnt, page_inx;
+ int page_cnt, page_inx, dpi;
struct sg_mapping_iter sg_iter;
unsigned int sg_flags;
struct page *page;
@@ -726,9 +727,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
if (page_cnt > udev->data_pages_per_blk)
page_cnt = udev->data_pages_per_blk;
- xas_set(&xas, dbi * udev->data_pages_per_blk);
- for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) {
- page = xas_next(&xas);
+ dpi = dbi * udev->data_pages_per_blk;
+ for (page_inx = 0; page_inx < page_cnt && data_len;
+ page_inx++, dpi++) {
+ page = xa_load(&udev->data_pages, dpi);
if (direction == TCMU_DATA_AREA_TO_SG)
flush_dcache_page(page);
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 6132cc8d014c..6e6eb836e9b6 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -220,6 +220,7 @@ int optee_open_session(struct tee_context *ctx,
struct optee_msg_arg *msg_arg;
phys_addr_t msg_parg;
struct optee_session *sess = NULL;
+ uuid_t client_uuid;
/* +2 for the meta parameters added below */
shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
@@ -240,10 +241,11 @@ int optee_open_session(struct tee_context *ctx,
memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
msg_arg->params[1].u.value.c = arg->clnt_login;
- rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
- arg->clnt_login, arg->clnt_uuid);
+ rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
+ arg->clnt_uuid);
if (rc)
goto out;
+ export_uuid(msg_arg->params[1].u.octets, &client_uuid);
rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
if (rc)
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 81ff593ac4ec..e3d72d09c484 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
/*
- * This file defines the OP-TEE message protocol used to communicate
+ * This file defines the OP-TEE message protocol (ABI) used to communicate
* with an instance of OP-TEE running in secure world.
*
* This file is divided into two sections.
@@ -144,9 +144,10 @@ struct optee_msg_param_value {
* @tmem: parameter by temporary memory reference
* @rmem: parameter by registered memory reference
* @value: parameter by opaque value
+ * @octets: parameter by octet string
*
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
* OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
@@ -157,6 +158,7 @@ struct optee_msg_param {
struct optee_msg_param_tmem tmem;
struct optee_msg_param_rmem rmem;
struct optee_msg_param_value value;
+ u8 octets[24];
} u;
};
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 480d294a23ab..2b37bc408fc3 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -452,6 +452,7 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
if (put_user((u64)p->u.memref.size, &up->b))
return -EFAULT;
+ break;
default:
break;
}
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index eeb4e4b76c0b..43b1ae8a7789 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -478,7 +478,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
if (ret >= 0) {
cpufreq_cdev->cpufreq_state = state;
- cpus = cpufreq_cdev->policy->cpus;
+ cpus = cpufreq_cdev->policy->related_cpus;
max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
capacity = frequency * max_capacity;
capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index d1248ba943a4..62c0aa5d0783 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -237,6 +237,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
if (ACPI_FAILURE(status))
trip_cnt = 0;
else {
+ int i;
+
int34x_thermal_zone->aux_trips =
kcalloc(trip_cnt,
sizeof(*int34x_thermal_zone->aux_trips),
@@ -247,6 +249,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
}
trip_mask = BIT(trip_cnt) - 1;
int34x_thermal_zone->aux_trip_nr = trip_cnt;
+ for (i = 0; i < trip_cnt; ++i)
+ int34x_thermal_zone->aux_trips[i] = THERMAL_TEMP_INVALID;
}
trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
index f8e882592ba5..99abdc03c44c 100644
--- a/drivers/thermal/intel/therm_throt.c
+++ b/drivers/thermal/intel/therm_throt.c
@@ -621,6 +621,17 @@ bool x86_thermal_enabled(void)
return atomic_read(&therm_throt_en);
}
+void __init therm_lvt_init(void)
+{
+ /*
+ * This function is only called on boot CPU. Save the init thermal
+ * LVT value on BSP and use that value to restore APs' thermal LVT
+ * entry BIOS programmed later
+ */
+ if (intel_thermal_supported(&boot_cpu_data))
+ lvtthmr_init = apic_read(APIC_LVTTHMR);
+}
+
void intel_init_thermal(struct cpuinfo_x86 *c)
{
unsigned int cpu = smp_processor_id();
@@ -630,10 +641,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
if (!intel_thermal_supported(c))
return;
- /* On the BSP? */
- if (c == &boot_cpu_data)
- lvtthmr_init = apic_read(APIC_LVTTHMR);
-
/*
* First check if its enabled already, in which case there might
* be some SMM goo which handles it, so we can't even put a handler
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 295742e83960..4d8edc61a78b 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -166,7 +166,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
if (thres_reg_value)
*temp = zonedev->tj_max - thres_reg_value * 1000;
else
- *temp = 0;
+ *temp = THERMAL_TEMP_INVALID;
pr_debug("sys_get_trip_temp %d\n", *temp);
return 0;
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index b460b56e981c..232fd0b33325 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -441,7 +441,7 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
if (args.args_count != 1 || args.args[0] >= ADC5_MAX_CHANNEL) {
dev_err(dev, "%s: invalid ADC channel number %d\n", name, chan);
- return ret;
+ return -EINVAL;
}
channel->adc_channel = args.args[0];
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d20b25f40d19..10a2d8e1cacf 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -36,10 +36,8 @@ static LIST_HEAD(thermal_governor_list);
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
-static DEFINE_MUTEX(poweroff_lock);
static atomic_t in_suspend;
-static bool power_off_triggered;
static struct thermal_governor *def_governor;
@@ -327,70 +325,18 @@ static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip)
def_governor->throttle(tz, trip);
}
-/**
- * thermal_emergency_poweroff_func - emergency poweroff work after a known delay
- * @work: work_struct associated with the emergency poweroff function
- *
- * This function is called in very critical situations to force
- * a kernel poweroff after a configurable timeout value.
- */
-static void thermal_emergency_poweroff_func(struct work_struct *work)
-{
- /*
- * We have reached here after the emergency thermal shutdown
- * Waiting period has expired. This means orderly_poweroff has
- * not been able to shut off the system for some reason.
- * Try to shut down the system immediately using kernel_power_off
- * if populated
- */
- WARN(1, "Attempting kernel_power_off: Temperature too high\n");
- kernel_power_off();
-
- /*
- * Worst of the worst case trigger emergency restart
- */
- WARN(1, "Attempting emergency_restart: Temperature too high\n");
- emergency_restart();
-}
-
-static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work,
- thermal_emergency_poweroff_func);
-
-/**
- * thermal_emergency_poweroff - Trigger an emergency system poweroff
- *
- * This may be called from any critical situation to trigger a system shutdown
- * after a known period of time. By default this is not scheduled.
- */
-static void thermal_emergency_poweroff(void)
+void thermal_zone_device_critical(struct thermal_zone_device *tz)
{
- int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
/*
* poweroff_delay_ms must be a carefully profiled positive value.
- * Its a must for thermal_emergency_poweroff_work to be scheduled
+ * Its a must for forced_emergency_poweroff_work to be scheduled.
*/
- if (poweroff_delay_ms <= 0)
- return;
- schedule_delayed_work(&thermal_emergency_poweroff_work,
- msecs_to_jiffies(poweroff_delay_ms));
-}
+ int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
-void thermal_zone_device_critical(struct thermal_zone_device *tz)
-{
dev_emerg(&tz->device, "%s: critical temperature reached, "
"shutting down\n", tz->type);
- mutex_lock(&poweroff_lock);
- if (!power_off_triggered) {
- /*
- * Queue a backup emergency shutdown in the event of
- * orderly_poweroff failure
- */
- thermal_emergency_poweroff();
- orderly_poweroff(true);
- power_off_triggered = true;
- }
- mutex_unlock(&poweroff_lock);
+ hw_protection_shutdown("Temperature too high", poweroff_delay_ms);
}
EXPORT_SYMBOL(thermal_zone_device_critical);
@@ -1538,7 +1484,6 @@ error:
ida_destroy(&thermal_cdev_ida);
mutex_destroy(&thermal_list_lock);
mutex_destroy(&thermal_governor_lock);
- mutex_destroy(&poweroff_lock);
return result;
}
postcore_initcall(thermal_init);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index ebe7cb70bfb6..ea0603b59309 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -770,7 +770,7 @@ static int ti_bandgap_tshut_init(struct ti_bandgap *bgp,
}
/**
- * ti_bandgap_alert_init() - setup and initialize talert handling
+ * ti_bandgap_talert_init() - setup and initialize talert handling
* @bgp: pointer to struct ti_bandgap
* @pdev: pointer to device struct platform_device
*
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index 7288aaf01ae6..5631319f7b20 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -366,15 +366,15 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
void *buf, size_t size)
{
unsigned int retries = DMA_PORT_RETRIES;
- unsigned int offset;
-
- offset = address & 3;
- address = address & ~3;
do {
- u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+ unsigned int offset;
+ size_t nbytes;
int ret;
+ offset = address & 3;
+ nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
+
ret = dma_port_flash_read_block(dma, address, dma->buf,
ALIGN(nbytes, 4));
if (ret) {
@@ -386,6 +386,7 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
return ret;
}
+ nbytes -= offset;
memcpy(buf, dma->buf + offset, nbytes);
size -= nbytes;
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 680bc738dd66..671d72af8ba1 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -68,15 +68,15 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
unsigned int retries = USB4_DATA_RETRIES;
unsigned int offset;
- offset = address & 3;
- address = address & ~3;
-
do {
- size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
unsigned int dwaddress, dwords;
u8 data[USB4_DATA_DWORDS * 4];
+ size_t nbytes;
int ret;
+ offset = address & 3;
+ nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
+
dwaddress = address / 4;
dwords = ALIGN(nbytes, 4) / 4;
@@ -87,6 +87,7 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
return ret;
}
+ nbytes -= offset;
memcpy(buf, data + offset, nbytes);
size -= nbytes;
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 52bb21205bb6..6473361525d1 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -7,6 +7,7 @@
* Copyright (C) 2001 Russell King.
*/
+#include <linux/bits.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/dmaengine.h>
@@ -70,24 +71,25 @@ struct serial8250_config {
unsigned int flags;
};
-#define UART_CAP_FIFO (1 << 8) /* UART has FIFO */
-#define UART_CAP_EFR (1 << 9) /* UART has EFR */
-#define UART_CAP_SLEEP (1 << 10) /* UART has IER sleep */
-#define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */
-#define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */
-#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
-#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */
-#define UART_CAP_RPM (1 << 15) /* Runtime PM is active while idle */
-#define UART_CAP_IRDA (1 << 16) /* UART supports IrDA line discipline */
-#define UART_CAP_MINI (1 << 17) /* Mini UART on BCM283X family lacks:
+#define UART_CAP_FIFO BIT(8) /* UART has FIFO */
+#define UART_CAP_EFR BIT(9) /* UART has EFR */
+#define UART_CAP_SLEEP BIT(10) /* UART has IER sleep */
+#define UART_CAP_AFE BIT(11) /* MCR-based hw flow control */
+#define UART_CAP_UUE BIT(12) /* UART needs IER bit 6 set (Xscale) */
+#define UART_CAP_RTOIE BIT(13) /* UART needs IER bit 4 set (Xscale, Tegra) */
+#define UART_CAP_HFIFO BIT(14) /* UART has a "hidden" FIFO */
+#define UART_CAP_RPM BIT(15) /* Runtime PM is active while idle */
+#define UART_CAP_IRDA BIT(16) /* UART supports IrDA line discipline */
+#define UART_CAP_MINI BIT(17) /* Mini UART on BCM283X family lacks:
* STOP PARITY EPAR SPAR WLEN5 WLEN6
*/
-#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
-#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
-#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
-#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
-#define UART_BUG_PARITY (1 << 4) /* UART mishandles parity if FIFO enabled */
+#define UART_BUG_QUOT BIT(0) /* UART has buggy quot LSB */
+#define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */
+#define UART_BUG_NOMSR BIT(2) /* UART has buggy MSR status bits (Au1x00) */
+#define UART_BUG_THRE BIT(3) /* UART has buggy THRE reassertion */
+#define UART_BUG_PARITY BIT(4) /* UART mishandles parity if FIFO enabled */
+#define UART_BUG_TXRACE BIT(5) /* UART Tx fails to set remote DR */
#ifdef CONFIG_SERIAL_8250_SHARE_IRQ
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 61550f24a2d3..d035d08cb987 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -437,6 +437,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.status = UPSTAT_SYNC_FIFO;
port.port.dev = &pdev->dev;
port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ port.bugs |= UART_BUG_TXRACE;
rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
if (rc < 0)
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 9e204f9b799a..a3a0154da567 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -714,6 +714,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "APMC0D08", 0},
{ "AMD0020", 0 },
{ "AMDI0020", 0 },
+ { "AMDI0022", 0 },
{ "BRCM2032", 0 },
{ "HISI0031", 0 },
{ },
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 2f49c580139b..bd4e9f6ac29c 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -553,7 +553,11 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
{
struct exar8250 *priv = pci_get_drvdata(pcidev);
struct uart_8250_port *port = serial8250_get_port(priv->line[0]);
- struct platform_device *pdev = port->port.private_data;
+ struct platform_device *pdev;
+
+ pdev = port->port.private_data;
+ if (!pdev)
+ return;
device_remove_software_node(&pdev->dev);
platform_device_unregister(pdev);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 689d8227f95f..780cc99732b6 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -56,6 +56,8 @@ struct serial_private {
int line[];
};
+#define PCI_DEVICE_ID_HPE_PCI_SERIAL 0x37e
+
static const struct pci_device_id pci_use_msi[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
0xA000, 0x1000) },
@@ -63,6 +65,8 @@ static const struct pci_device_id pci_use_msi[] = {
0xA000, 0x1000) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922,
0xA000, 0x1000) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
+ PCI_ANY_ID, PCI_ANY_ID) },
{ }
};
@@ -1998,6 +2002,16 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_hp_diva_setup,
},
/*
+ * HPE PCI serial device
+ */
+ {
+ .vendor = PCI_VENDOR_ID_HP_3PAR,
+ .device = PCI_DEVICE_ID_HPE_PCI_SERIAL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_hp_diva_setup,
+ },
+ /*
* Intel
*/
{
@@ -3944,21 +3958,26 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
uart.port.uartclk = board->base_baud * 16;
- if (pci_match_id(pci_use_msi, dev)) {
- dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
- pci_set_master(dev);
- rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (board->flags & FL_NOIRQ) {
+ uart.port.irq = 0;
} else {
- dev_dbg(&dev->dev, "Using legacy interrupts\n");
- rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
- }
- if (rc < 0) {
- kfree(priv);
- priv = ERR_PTR(rc);
- goto err_deinit;
+ if (pci_match_id(pci_use_msi, dev)) {
+ dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
+ pci_set_master(dev);
+ rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ } else {
+ dev_dbg(&dev->dev, "Using legacy interrupts\n");
+ rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+ }
+ if (rc < 0) {
+ kfree(priv);
+ priv = ERR_PTR(rc);
+ goto err_deinit;
+ }
+
+ uart.port.irq = pci_irq_vector(dev, 0);
}
- uart.port.irq = pci_irq_vector(dev, 0);
uart.port.dev = &dev->dev;
for (i = 0; i < nr_ports; i++) {
@@ -4973,6 +4992,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b2_1_115200 },
+ /* HPE PCI serial device */
+ { PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_1_115200 },
{ PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index d45dab1ab316..fc5ab2032282 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1809,6 +1809,18 @@ void serial8250_tx_chars(struct uart_8250_port *up)
count = up->tx_loadsz;
do {
serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ if (up->bugs & UART_BUG_TXRACE) {
+ /*
+ * The Aspeed BMC virtual UARTs have a bug where data
+ * may get stuck in the BMC's Tx FIFO from bursts of
+ * writes on the APB interface.
+ *
+ * Delay back-to-back writes by a read cycle to avoid
+ * stalling the VUART. Read a register that won't have
+ * side-effects and discard the result.
+ */
+ serial_in(up, UART_SCR);
+ }
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index d60abffab70e..6689d8add8f7 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -195,7 +195,6 @@ struct rp2_card {
void __iomem *bar0;
void __iomem *bar1;
spinlock_t card_lock;
- struct completion fw_loaded;
};
#define RP_ID(prod) PCI_VDEVICE(RP, (prod))
@@ -662,17 +661,10 @@ static void rp2_remove_ports(struct rp2_card *card)
card->initialized_ports = 0;
}
-static void rp2_fw_cb(const struct firmware *fw, void *context)
+static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw)
{
- struct rp2_card *card = context;
resource_size_t phys_base;
- int i, rc = -ENOENT;
-
- if (!fw) {
- dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n",
- RP2_FW_NAME);
- goto no_fw;
- }
+ int i, rc = 0;
phys_base = pci_resource_start(card->pdev, 1);
@@ -718,23 +710,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context)
card->initialized_ports++;
}
- release_firmware(fw);
-no_fw:
- /*
- * rp2_fw_cb() is called from a workqueue long after rp2_probe()
- * has already returned success. So if something failed here,
- * we'll just leave the now-dormant device in place until somebody
- * unbinds it.
- */
- if (rc)
- dev_warn(&card->pdev->dev, "driver initialization failed\n");
-
- complete(&card->fw_loaded);
+ return rc;
}
static int rp2_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ const struct firmware *fw;
struct rp2_card *card;
struct rp2_uart_port *ports;
void __iomem * const *bars;
@@ -745,7 +727,6 @@ static int rp2_probe(struct pci_dev *pdev,
return -ENOMEM;
pci_set_drvdata(pdev, card);
spin_lock_init(&card->card_lock);
- init_completion(&card->fw_loaded);
rc = pcim_enable_device(pdev);
if (rc)
@@ -778,21 +759,23 @@ static int rp2_probe(struct pci_dev *pdev,
return -ENOMEM;
card->ports = ports;
- rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
- IRQF_SHARED, DRV_NAME, card);
- if (rc)
+ rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "cannot find '%s' firmware image\n",
+ RP2_FW_NAME);
return rc;
+ }
- /*
- * Only catastrophic errors (e.g. ENOMEM) are reported here.
- * If the FW image is missing, we'll find out in rp2_fw_cb()
- * and print an error message.
- */
- rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev,
- GFP_KERNEL, card, rp2_fw_cb);
+ rc = rp2_load_firmware(card, fw);
+
+ release_firmware(fw);
+ if (rc < 0)
+ return rc;
+
+ rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
+ IRQF_SHARED, DRV_NAME, card);
if (rc)
return rc;
- dev_dbg(&pdev->dev, "waiting for firmware blob...\n");
return 0;
}
@@ -801,7 +784,6 @@ static void rp2_remove(struct pci_dev *pdev)
{
struct rp2_card *card = pci_get_drvdata(pdev);
- wait_for_completion(&card->fw_loaded);
rp2_remove_ports(card);
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index bbae072a125d..222032792d6c 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -338,7 +338,7 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
do {
lsr = tegra_uart_read(tup, UART_LSR);
- if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
+ if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
break;
udelay(1);
} while (--tmout);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 87f7127b57e6..18ff85a83f80 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -863,9 +863,11 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
goto check_and_exit;
}
- retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
- if (retval && (change_irq || change_port))
- goto exit;
+ if (change_irq || change_port) {
+ retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
+ if (retval)
+ goto exit;
+ }
/*
* Ask the low level driver to verify the settings.
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ef37fdf37612..4baf1316ea72 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1023,10 +1023,10 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
{
unsigned int bits;
+ if (rx_trig >= port->fifosize)
+ rx_trig = port->fifosize - 1;
if (rx_trig < 1)
rx_trig = 1;
- if (rx_trig >= port->fifosize)
- rx_trig = port->fifosize;
/* HSCIF can be set to an arbitrary level. */
if (sci_getreg(port, HSRTRGR)->size) {
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 9b1bd417cec0..5281f8d3fb3d 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -2007,7 +2007,7 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
else
mask = BIT(priv_ep->num);
- if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
cdns3_set_register_bit(&regs->tdl_from_trb, mask);
cdns3_set_register_bit(&regs->tdl_beh, mask);
cdns3_set_register_bit(&regs->tdl_beh2, mask);
@@ -2046,15 +2046,13 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
case USB_ENDPOINT_XFER_INT:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
- priv_dev->dev_ver > DEV_VER_V2)
+ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
case USB_ENDPOINT_XFER_BULK:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
- priv_dev->dev_ver > DEV_VER_V2)
+ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
default:
@@ -3268,8 +3266,10 @@ static int __cdns3_gadget_init(struct cdns *cdns)
pm_runtime_get_sync(cdns->dev);
ret = cdns3_gadget_start(cdns);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync(cdns->dev);
return ret;
+ }
/*
* Because interrupt line can be shared with other components in
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 56707b6b0f57..c083985e387b 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -422,17 +422,17 @@ unmap:
int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
{
struct cdnsp_device *pdev = pep->pdev;
- int ret;
+ int ret_stop = 0;
+ int ret_rem;
trace_cdnsp_request_dequeue(preq);
- if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
- ret = cdnsp_cmd_stop_ep(pdev, pep);
- if (ret)
- return ret;
- }
+ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
+ ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
+
+ ret_rem = cdnsp_remove_request(pdev, preq, pep);
- return cdnsp_remove_request(pdev, preq, pep);
+ return ret_rem ? ret_rem : ret_stop;
}
static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 5f0513c96c04..68972746e363 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1517,13 +1517,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
{
struct cdnsp_device *pdev = (struct cdnsp_device *)data;
union cdnsp_trb *event_ring_deq;
+ unsigned long flags;
int counter = 0;
- spin_lock(&pdev->lock);
+ spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
cdnsp_died(pdev);
- spin_unlock(&pdev->lock);
+ spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
@@ -1539,7 +1540,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
- spin_unlock(&pdev->lock);
+ spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index c16d900cdaee..393f216b9161 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -2061,6 +2061,7 @@ static int udc_start(struct ci_hdrc *ci)
ci->gadget.name = ci->platdata->name;
ci->gadget.otg_caps = otg_caps;
ci->gadget.sg_supported = 1;
+ ci->gadget.irq = ci->irq;
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
ci->gadget.quirk_avoids_skb_reserve = 1;
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 4545b23bda3f..bac0f5458cab 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
int val;
unsigned long flags;
+ /* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
+ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ /* TVDMSRC_DIS */
+ msleep(20);
+
/* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
- usleep_range(1000, 2000);
+ /* TVDMSRC_ON */
+ msleep(40);
/*
* Per BC 1.2, check voltage of D+:
@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
- usleep_range(1000, 2000);
+ /* TVDPSRC_ON */
+ msleep(40);
/* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index d1e4a7379beb..8e5490ac13a2 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -21,8 +21,10 @@
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/poll.h>
+#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
+#include <linux/wwan.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/usb/cdc-wdm.h>
@@ -55,6 +57,7 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
#define WDM_SUSPENDING 8
#define WDM_RESETTING 9
#define WDM_OVERFLOW 10
+#define WDM_WWAN_IN_USE 11
#define WDM_MAX 16
@@ -106,6 +109,9 @@ struct wdm_device {
struct list_head device_list;
int (*manage_power)(struct usb_interface *, int);
+
+ enum wwan_port_type wwanp_type;
+ struct wwan_port *wwanp;
};
static struct usb_driver wdm_driver;
@@ -157,6 +163,8 @@ static void wdm_out_callback(struct urb *urb)
wake_up_all(&desc->wait);
}
+static void wdm_wwan_rx(struct wdm_device *desc, int length);
+
static void wdm_in_callback(struct urb *urb)
{
unsigned long flags;
@@ -192,6 +200,11 @@ static void wdm_in_callback(struct urb *urb)
}
}
+ if (test_bit(WDM_WWAN_IN_USE, &desc->flags)) {
+ wdm_wwan_rx(desc, length);
+ goto out;
+ }
+
/*
* only set a new error if there is no previous error.
* Errors are only cleared during read/open
@@ -226,6 +239,7 @@ skip_error:
set_bit(WDM_READ, &desc->flags);
wake_up(&desc->wait);
}
+out:
spin_unlock_irqrestore(&desc->iuspin, flags);
}
@@ -708,6 +722,11 @@ static int wdm_open(struct inode *inode, struct file *file)
goto out;
file->private_data = desc;
+ if (test_bit(WDM_WWAN_IN_USE, &desc->flags)) {
+ rv = -EBUSY;
+ goto out;
+ }
+
rv = usb_autopm_get_interface(desc->intf);
if (rv < 0) {
dev_err(&desc->intf->dev, "Error autopm - %d\n", rv);
@@ -804,6 +823,152 @@ static struct usb_class_driver wdm_class = {
.minor_base = WDM_MINOR_BASE,
};
+/* --- WWAN framework integration --- */
+#ifdef CONFIG_WWAN
+static int wdm_wwan_port_start(struct wwan_port *port)
+{
+ struct wdm_device *desc = wwan_port_get_drvdata(port);
+
+ /* The interface is both exposed via the WWAN framework and as a
+ * legacy usbmisc chardev. If chardev is already open, just fail
+ * to prevent concurrent usage. Otherwise, switch to WWAN mode.
+ */
+ mutex_lock(&wdm_mutex);
+ if (desc->count) {
+ mutex_unlock(&wdm_mutex);
+ return -EBUSY;
+ }
+ set_bit(WDM_WWAN_IN_USE, &desc->flags);
+ mutex_unlock(&wdm_mutex);
+
+ desc->manage_power(desc->intf, 1);
+
+ /* tx is allowed */
+ wwan_port_txon(port);
+
+ /* Start getting events */
+ return usb_submit_urb(desc->validity, GFP_KERNEL);
+}
+
+static void wdm_wwan_port_stop(struct wwan_port *port)
+{
+ struct wdm_device *desc = wwan_port_get_drvdata(port);
+
+ /* Stop all transfers and disable WWAN mode */
+ poison_urbs(desc);
+ desc->manage_power(desc->intf, 0);
+ clear_bit(WDM_READ, &desc->flags);
+ clear_bit(WDM_WWAN_IN_USE, &desc->flags);
+ unpoison_urbs(desc);
+}
+
+static void wdm_wwan_port_tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct wdm_device *desc = skb_shinfo(skb)->destructor_arg;
+
+ usb_autopm_put_interface(desc->intf);
+ wwan_port_txon(desc->wwanp);
+ kfree_skb(skb);
+}
+
+static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct wdm_device *desc = wwan_port_get_drvdata(port);
+ struct usb_interface *intf = desc->intf;
+ struct usb_ctrlrequest *req = desc->orq;
+ int rv;
+
+ rv = usb_autopm_get_interface(intf);
+ if (rv)
+ return rv;
+
+ usb_fill_control_urb(
+ desc->command,
+ interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ (unsigned char *)req,
+ skb->data,
+ skb->len,
+ wdm_wwan_port_tx_complete,
+ skb
+ );
+
+ req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+ req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
+ req->wValue = 0;
+ req->wIndex = desc->inum;
+ req->wLength = cpu_to_le16(skb->len);
+
+ skb_shinfo(skb)->destructor_arg = desc;
+
+ rv = usb_submit_urb(desc->command, GFP_KERNEL);
+ if (rv)
+ usb_autopm_put_interface(intf);
+ else /* One transfer at a time, stop TX until URB completion */
+ wwan_port_txoff(port);
+
+ return rv;
+}
+
+static struct wwan_port_ops wdm_wwan_port_ops = {
+ .start = wdm_wwan_port_start,
+ .stop = wdm_wwan_port_stop,
+ .tx = wdm_wwan_port_tx,
+};
+
+static void wdm_wwan_init(struct wdm_device *desc)
+{
+ struct usb_interface *intf = desc->intf;
+ struct wwan_port *port;
+
+ /* Only register to WWAN core if protocol/type is known */
+ if (desc->wwanp_type == WWAN_PORT_UNKNOWN) {
+ dev_info(&intf->dev, "Unknown control protocol\n");
+ return;
+ }
+
+ port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops, desc);
+ if (IS_ERR(port)) {
+ dev_err(&intf->dev, "%s: Unable to create WWAN port\n",
+ dev_name(intf->usb_dev));
+ return;
+ }
+
+ desc->wwanp = port;
+}
+
+static void wdm_wwan_deinit(struct wdm_device *desc)
+{
+ if (!desc->wwanp)
+ return;
+
+ wwan_remove_port(desc->wwanp);
+ desc->wwanp = NULL;
+}
+
+static void wdm_wwan_rx(struct wdm_device *desc, int length)
+{
+ struct wwan_port *port = desc->wwanp;
+ struct sk_buff *skb;
+
+ /* Forward data to WWAN port */
+ skb = alloc_skb(length, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ memcpy(skb_put(skb, length), desc->inbuf, length);
+ wwan_port_rx(port, skb);
+
+ /* inbuf has been copied, it is safe to check for outstanding data */
+ schedule_work(&desc->service_outs_intr);
+}
+#else /* CONFIG_WWAN */
+static void wdm_wwan_init(struct wdm_device *desc) {}
+static void wdm_wwan_deinit(struct wdm_device *desc) {}
+static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
+#endif /* CONFIG_WWAN */
+
/* --- error handling --- */
static void wdm_rxwork(struct work_struct *work)
{
@@ -848,7 +1013,8 @@ static void service_interrupt_work(struct work_struct *work)
/* --- hotplug --- */
static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
- u16 bufsize, int (*manage_power)(struct usb_interface *, int))
+ u16 bufsize, enum wwan_port_type type,
+ int (*manage_power)(struct usb_interface *, int))
{
int rv = -ENOMEM;
struct wdm_device *desc;
@@ -865,6 +1031,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
/* this will be expanded and needed in hardware endianness */
desc->inum = cpu_to_le16((u16)intf->cur_altsetting->desc.bInterfaceNumber);
desc->intf = intf;
+ desc->wwanp_type = type;
INIT_WORK(&desc->rxwork, wdm_rxwork);
INIT_WORK(&desc->service_outs_intr, service_interrupt_work);
@@ -945,6 +1112,9 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
goto err;
else
dev_info(&intf->dev, "%s: USB WDM device\n", dev_name(intf->usb_dev));
+
+ wdm_wwan_init(desc);
+
out:
return rv;
err:
@@ -989,7 +1159,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err;
ep = &iface->endpoint[0].desc;
- rv = wdm_create(intf, ep, maxcom, &wdm_manage_power);
+ rv = wdm_create(intf, ep, maxcom, WWAN_PORT_UNKNOWN, &wdm_manage_power);
err:
return rv;
@@ -1000,6 +1170,7 @@ err:
* @intf: usb interface the subdriver will associate with
* @ep: interrupt endpoint to monitor for notifications
* @bufsize: maximum message size to support for read/write
+ * @type: Type/protocol of the transported data (MBIM, QMI...)
* @manage_power: call-back invoked during open and release to
* manage the device's power
* Create WDM usb class character device and associate it with intf
@@ -1017,12 +1188,12 @@ err:
*/
struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep,
- int bufsize,
+ int bufsize, enum wwan_port_type type,
int (*manage_power)(struct usb_interface *, int))
{
int rv;
- rv = wdm_create(intf, ep, bufsize, manage_power);
+ rv = wdm_create(intf, ep, bufsize, type, manage_power);
if (rv < 0)
goto err;
@@ -1041,6 +1212,8 @@ static void wdm_disconnect(struct usb_interface *intf)
desc = wdm_find_device(intf);
mutex_lock(&wdm_mutex);
+ wdm_wwan_deinit(desc);
+
/* the spinlock makes sure no new urbs are generated in the callbacks */
spin_lock_irqsave(&desc->iuspin, flags);
set_bit(WDM_DISCONNECTING, &desc->flags);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 533236366a03..2218941d35a3 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1218,7 +1218,12 @@ static int do_proc_bulk(struct usb_dev_state *ps,
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
if (ret)
return ret;
- tbuf = kmalloc(len1, GFP_KERNEL);
+
+ /*
+ * len1 can be almost arbitrarily large. Don't WARN if it's
+ * too big, just fail the request.
+ */
+ tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
if (!tbuf) {
ret = -ENOMEM;
goto done;
@@ -1696,7 +1701,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
if (num_sgs) {
as->urb->sg = kmalloc_array(num_sgs,
sizeof(struct scatterlist),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!as->urb->sg) {
ret = -ENOMEM;
goto error;
@@ -1731,7 +1736,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
(uurb_start - as->usbm->vm_start);
} else {
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!as->urb->transfer_buffer) {
ret = -ENOMEM;
goto error;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fc7d6cdacf16..df8e69e60aaf 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -41,6 +41,8 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
#define USB_PRODUCT_USB5534B 0x5534
+#define USB_VENDOR_CYPRESS 0x04b4
+#define USB_PRODUCT_CY7C65632 0x6570
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@@ -5698,6 +5700,11 @@ static const struct usb_device_id hub_id_table[] = {
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT,
+ .idVendor = USB_VENDOR_CYPRESS,
+ .idProduct = USB_PRODUCT_CY7C65632,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index b6e53d8212cd..4ac397e43e19 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1671,8 +1671,8 @@ static int dwc3_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
- dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
+ dwc3_debugfs_exit(dwc);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
@@ -1690,11 +1690,6 @@ static int dwc3_remove(struct platform_device *pdev)
return 0;
}
-static void dwc3_shutdown(struct platform_device *pdev)
-{
- dwc3_remove(pdev);
-}
-
#ifdef CONFIG_PM
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
{
@@ -2012,7 +2007,6 @@ MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = dwc3_remove,
- .shutdown = dwc3_shutdown,
.driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index d0ac89c5b317..d223c54115f4 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
#ifdef CONFIG_DEBUG_FS
+extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
extern void dwc3_debugfs_init(struct dwc3 *d);
extern void dwc3_debugfs_exit(struct dwc3 *d);
#else
+static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+{ }
static inline void dwc3_debugfs_init(struct dwc3 *d)
{ }
static inline void dwc3_debugfs_exit(struct dwc3 *d)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 7146ee2ac057..5dbbe53269d3 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -886,30 +886,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
}
}
-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
- struct dentry *parent)
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{
struct dentry *dir;
- dir = debugfs_create_dir(dep->name, parent);
+ dir = debugfs_create_dir(dep->name, dep->dwc->root);
dwc3_debugfs_create_endpoint_files(dep, dir);
}
-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
- struct dentry *parent)
-{
- int i;
-
- for (i = 0; i < dwc->num_eps; i++) {
- struct dwc3_ep *dep = dwc->eps[i];
-
- if (!dep)
- continue;
-
- dwc3_debugfs_create_endpoint_dir(dep, parent);
- }
-}
-
void dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
@@ -940,7 +924,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
&dwc3_testmode_fops);
debugfs_create_file("link_state", 0644, root, dwc,
&dwc3_link_state_fops);
- dwc3_debugfs_create_endpoint_dirs(dwc, root);
}
}
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index bdf1f98dfad8..ffe301d6ea35 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -651,7 +651,7 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
return PTR_ERR(priv->usb_glue_regmap);
/* Create a regmap for each USB2 PHY control register set */
- for (i = 0; i < priv->usb2_ports; i++) {
+ for (i = 0; i < priv->drvdata->num_phys; i++) {
struct regmap_config u2p_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
@@ -659,6 +659,9 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
.max_register = U2P_R1,
};
+ if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+ continue;
+
u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
"u2p-%d", i);
if (!u2p_regmap_config.name)
@@ -772,13 +775,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->usb_init(priv);
if (ret)
- goto err_disable_clks;
+ goto err_disable_regulator;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_init(priv->phys[i]);
if (ret)
- goto err_disable_clks;
+ goto err_disable_regulator;
}
/* Set PHY Power */
@@ -816,6 +819,10 @@ err_phys_exit:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_exit(priv->phys[i]);
+err_disable_regulator:
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
err_disable_clks:
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 8b668ef46f7f..3cd294264372 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -292,6 +292,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
epnum |= 1;
dep = dwc->eps[epnum];
+ if (dep == NULL)
+ return NULL;
+
if (dep->flags & DWC3_EP_ENABLED)
return dep;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 49ca5da5e279..f14c2aa83759 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1244,6 +1244,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
req->start_sg = sg_next(s);
req->num_queued_sgs++;
+ req->num_pending_sgs--;
/*
* The number of pending SG entries may not correspond to the
@@ -1251,7 +1252,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
* don't include unused SG entries.
*/
if (length == 0) {
- req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
+ req->num_pending_sgs = 0;
break;
}
@@ -2260,13 +2261,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
}
/*
- * Synchronize any pending event handling before executing the controller
- * halt routine.
+ * Synchronize and disable any further event handling while controller
+ * is being enabled/disabled.
*/
- if (!is_on) {
- dwc3_gadget_disable_irq(dwc);
- synchronize_irq(dwc->irq_gadget);
- }
+ disable_irq(dwc->irq_gadget);
spin_lock_irqsave(&dwc->lock, flags);
@@ -2304,6 +2302,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
+ enable_irq(dwc->irq_gadget);
+
pm_runtime_put(dwc->dev);
return ret;
@@ -2753,6 +2753,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
INIT_LIST_HEAD(&dep->started_list);
INIT_LIST_HEAD(&dep->cancelled_list);
+ dwc3_debugfs_create_endpoint_dir(dep);
+
return 0;
}
@@ -2796,6 +2798,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
list_del(&dep->endpoint.ep_list);
}
+ debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
kfree(dep);
}
}
@@ -2873,15 +2876,15 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
struct scatterlist *sg = req->sg;
struct scatterlist *s;
- unsigned int pending = req->num_pending_sgs;
+ unsigned int num_queued = req->num_queued_sgs;
unsigned int i;
int ret = 0;
- for_each_sg(sg, s, pending, i) {
+ for_each_sg(sg, s, num_queued, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
req->sg = sg_next(s);
- req->num_pending_sgs--;
+ req->num_queued_sgs--;
ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
trb, event, status, true);
@@ -2904,7 +2907,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
- return req->num_pending_sgs == 0;
+ return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
}
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@@ -2913,7 +2916,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
{
int ret;
- if (req->num_pending_sgs)
+ if (req->request.num_mapped_sgs)
ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
status);
else
@@ -4045,6 +4048,7 @@ err5:
dwc3_gadget_free_endpoints(dwc);
err4:
usb_put_gadget(dwc->gadget);
+ dwc->gadget = NULL;
err3:
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
@@ -4064,6 +4068,9 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
+ if (!dwc->gadget)
+ return;
+
usb_del_gadget(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
usb_put_gadget(dwc->gadget);
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 8bb25773b61e..05507606b2b4 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -164,6 +164,14 @@ int usb_assign_descriptors(struct usb_function *f,
{
struct usb_gadget *g = f->config->cdev->gadget;
+ /* super-speed-plus descriptor falls back to super-speed one,
+ * if such a descriptor was provided, thus avoiding a NULL
+ * pointer dereference if a 5gbps capable gadget is used with
+ * a 10gbps capable config (device port + cable + host port)
+ */
+ if (!ssp)
+ ssp = ss;
+
if (fs) {
f->fs_descriptors = usb_copy_descriptors(fs);
if (!f->fs_descriptors)
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 7f5cf488b2b1..ffe2486fce71 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -791,7 +791,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
fs_ecm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
- ecm_ss_function, NULL);
+ ecm_ss_function, ecm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index cfcc4e81fb77..2cd9942707b4 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -302,7 +302,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
- eem_ss_function, NULL);
+ eem_ss_function, eem_ss_function);
if (status)
goto fail;
@@ -495,7 +495,7 @@ static int eem_unwrap(struct gether *port,
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "unable to unframe EEM packet\n");
- continue;
+ goto next;
}
skb_trim(skb2, len - ETH_FCS_LEN);
@@ -505,7 +505,7 @@ static int eem_unwrap(struct gether *port,
GFP_ATOMIC);
if (unlikely(!skb3)) {
dev_kfree_skb_any(skb2);
- continue;
+ goto next;
}
dev_kfree_skb_any(skb2);
skb_queue_tail(list, skb3);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index bf109191659a..d4844afeaffc 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3567,6 +3567,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
ffs->func = NULL;
}
+ /* Drain any pending AIO completions */
+ drain_workqueue(ffs->io_completion_wq);
+
if (!--opts->refcnt)
functionfs_unbind(ffs);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 1125f4715830..e55699308117 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -802,7 +802,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_fs_out_ep_desc.bEndpointAddress;
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, hidg_ss_descriptors, NULL);
+ hidg_hs_descriptors, hidg_ss_descriptors,
+ hidg_ss_descriptors);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index b56ad7c3838b..ae41f556eb75 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -207,7 +207,7 @@ autoconf_fail:
ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
- ss_loopback_descs, NULL);
+ ss_loopback_descs, ss_loopback_descs);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 019bea8e09cc..855127249f24 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -583,7 +583,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
data[1] = data[0];
- DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
+ DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget));
ncm->notify_state = NCM_NOTIFY_CONNECT;
break;
}
@@ -1101,11 +1101,11 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
ncm->ndp_dgram_count = 1;
/* Note: we skip opts->next_ndp_index */
- }
- /* Delay the timer. */
- hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
- HRTIMER_MODE_REL_SOFT);
+ /* Start the timer. */
+ hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
+ HRTIMER_MODE_REL_SOFT);
+ }
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index f47fdc1fa7f1..59d382fe1bbf 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1101,7 +1101,8 @@ autoconf_fail:
ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_printer_function,
- hs_printer_function, ss_printer_function, NULL);
+ hs_printer_function, ss_printer_function,
+ ss_printer_function);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 0739b05a0ef7..ee95e8f5f9d4 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -789,7 +789,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
- eth_ss_function, NULL);
+ eth_ss_function, eth_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index e62713846350..1ed8ff0ac2d3 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -233,7 +233,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
- gser_ss_function, NULL);
+ gser_ss_function, gser_ss_function);
if (status)
goto fail;
dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 5a201ba7b155..1abf08e5164a 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -431,7 +431,8 @@ no_iso:
ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_source_sink_descs,
- hs_source_sink_descs, ss_source_sink_descs, NULL);
+ hs_source_sink_descs, ss_source_sink_descs,
+ ss_source_sink_descs);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 4d945254905d..51c1cae162d9 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -358,7 +358,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
fs_subset_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
- ss_eth_function, NULL);
+ ss_eth_function, ss_eth_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 7acb507946e6..de161ee0b1f9 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -2057,7 +2057,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, uasp_fs_function_desc,
- uasp_hs_function_desc, uasp_ss_function_desc, NULL);
+ uasp_hs_function_desc, uasp_ss_function_desc,
+ uasp_ss_function_desc);
if (ret)
goto ep_fail;
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 35179543c327..34f4db554977 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -509,8 +509,7 @@ static irqreturn_t max3420_vbus_handler(int irq, void *dev_id)
? USB_STATE_POWERED : USB_STATE_NOTATTACHED);
spin_unlock_irqrestore(&udc->lock, flags);
- if (udc->thread_task &&
- udc->thread_task->state != TASK_RUNNING)
+ if (udc->thread_task)
wake_up_process(udc->thread_task);
return IRQ_HANDLED;
@@ -529,8 +528,7 @@ static irqreturn_t max3420_irq_handler(int irq, void *dev_id)
}
spin_unlock_irqrestore(&udc->lock, flags);
- if (udc->thread_task &&
- udc->thread_task->state != TASK_RUNNING)
+ if (udc->thread_task)
wake_up_process(udc->thread_task);
return IRQ_HANDLED;
@@ -1093,8 +1091,7 @@ static int max3420_wakeup(struct usb_gadget *gadget)
spin_unlock_irqrestore(&udc->lock, flags);
- if (udc->thread_task &&
- udc->thread_task->state != TASK_RUNNING)
+ if (udc->thread_task)
wake_up_process(udc->thread_task);
return ret;
}
@@ -1117,8 +1114,7 @@ static int max3420_udc_start(struct usb_gadget *gadget,
udc->todo |= UDC_START;
spin_unlock_irqrestore(&udc->lock, flags);
- if (udc->thread_task &&
- udc->thread_task->state != TASK_RUNNING)
+ if (udc->thread_task)
wake_up_process(udc->thread_task);
return 0;
@@ -1137,8 +1133,7 @@ static int max3420_udc_stop(struct usb_gadget *gadget)
udc->todo |= UDC_START;
spin_unlock_irqrestore(&udc->lock, flags);
- if (udc->thread_task &&
- udc->thread_task->state != TASK_RUNNING)
+ if (udc->thread_task)
wake_up_process(udc->thread_task);
return 0;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 0c418ce50ba0..f1b35a39d1ba 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1488,7 +1488,7 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
- struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep);
+ struct renesas_usb3_request *usb3_req_first;
unsigned long flags;
int ret = -EAGAIN;
u32 enable_bits = 0;
@@ -1496,7 +1496,8 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
spin_lock_irqsave(&usb3->lock, flags);
if (usb3_ep->halt || usb3_ep->started)
goto out;
- if (usb3_req != usb3_req_first)
+ usb3_req_first = __usb3_get_request(usb3_ep);
+ if (!usb3_req_first || usb3_req != usb3_req_first)
goto out;
if (usb3_pn_change(usb3, usb3_ep->num) < 0)
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index afd9174d83b1..e7a8e0609853 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1169,8 +1169,7 @@ max3421_irq_handler(int irq, void *dev_id)
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
- if (max3421_hcd->spi_thread &&
- max3421_hcd->spi_thread->state != TASK_RUNNING)
+ if (max3421_hcd->spi_thread)
wake_up_process(max3421_hcd->spi_thread);
if (!test_and_set_bit(ENABLE_IRQ, &max3421_hcd->todo))
disable_irq_nosync(spi->irq);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7bc18cf8042c..18c2bbddf080 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
@@ -182,6 +183,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
+ xhci->quirks |= XHCI_BROKEN_D3COLD;
+
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
@@ -539,7 +544,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* Systems with the TI redriver that loses port status change events
* need to have the registers polled during D3, so avoid D3cold.
*/
- if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
pci_d3cold_disable(pdev);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a8e4189277da..6acd2329e08d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -828,14 +828,10 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
cancelled_td_list) {
- /*
- * Doesn't matter what we pass for status, since the core will
- * just overwrite it (because the URB has been unlinked).
- */
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
if (td->cancel_status == TD_CLEARED)
- xhci_td_cleanup(ep->xhci, td, ring, 0);
+ xhci_td_cleanup(ep->xhci, td, ring, td->status);
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
return;
@@ -937,14 +933,18 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
continue;
}
/*
- * If ring stopped on the TD we need to cancel, then we have to
+ * If a ring stopped on the TD we need to cancel then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
+ * Rings halted due to STALL may show hw_deq is past the stalled
+ * TD, but still require a set TR Deq command to flush xHC cache.
*/
hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
td->urb->stream_id);
hw_deq &= ~0xf;
- if (trb_in_td(xhci, td->start_seg, td->first_trb,
+ if (td->cancel_status == TD_HALTED) {
+ cached_td = td;
+ } else if (trb_in_td(xhci, td->start_seg, td->first_trb,
td->last_trb, hw_deq, false)) {
switch (td->cancel_status) {
case TD_CLEARED: /* TD is already no-op */
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 50bb91b6a4b8..c7387677a26a 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -917,7 +917,6 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
struct xhci_op_regs __iomem *op;
unsigned long timeout;
time64_t timestamp;
- struct tm time;
u64 address;
u32 value;
int err;
@@ -1014,11 +1013,8 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
}
timestamp = le32_to_cpu(header->fwimg_created_time);
- time64_to_tm(timestamp, 0, &time);
- dev_info(dev, "Firmware timestamp: %ld-%02d-%02d %02d:%02d:%02d UTC\n",
- time.tm_year + 1900, time.tm_mon + 1, time.tm_mday,
- time.tm_hour, time.tm_min, time.tm_sec);
+ dev_info(dev, "Firmware timestamp: %ptTs UTC\n", &timestamp);
return 0;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2595a8f057c4..e417f5ce13d1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1892,6 +1892,7 @@ struct xhci_hcd {
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
+#define XHCI_BROKEN_D3COLD BIT_ULL(41)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c
index b3cfe8666ea7..336653091e3b 100644
--- a/drivers/usb/misc/brcmstb-usb-pinmap.c
+++ b/drivers/usb/misc/brcmstb-usb-pinmap.c
@@ -263,6 +263,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
return -EINVAL;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
pdata = devm_kzalloc(&pdev->dev,
sizeof(*pdata) +
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index a3dfc77578ea..26baba3ab7d7 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -61,9 +61,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
/* Set speed */
retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
0x01, /* vendor request: set speed */
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
tv->speed, /* speed value */
- 0, NULL, 0, USB_CTRL_GET_TIMEOUT);
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval) {
tv->speed = old;
dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index b5d661644263..748139d26263 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -736,6 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
+ usb_put_dev(usbdev);
return 0;
probe_abort:
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 8f09a387b773..4c8f0112481f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2009,9 +2009,8 @@ static void musb_pm_runtime_check_session(struct musb *musb)
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
musb->quirk_retries--;
- break;
}
- fallthrough;
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ee595d1bea0a..fcb812bc832c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -252,9 +252,11 @@ struct cp210x_serial_private {
u8 gpio_input;
#endif
u8 partnum;
+ u32 fw_version;
speed_t min_speed;
speed_t max_speed;
bool use_actual_rate;
+ bool no_flow_control;
};
enum cp210x_event_state {
@@ -398,6 +400,7 @@ struct cp210x_special_chars {
/* CP210X_VENDOR_SPECIFIC values */
#define CP210X_READ_2NCONFIG 0x000E
+#define CP210X_GET_FW_VER_2N 0x0010
#define CP210X_READ_LATCH 0x00C2
#define CP210X_GET_PARTNUM 0x370B
#define CP210X_GET_PORTCONFIG 0x370C
@@ -537,6 +540,12 @@ struct cp210x_single_port_config {
#define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX 587
#define CP210X_2NCONFIG_GPIO_CONTROL_IDX 600
+/* CP2102N QFN20 port configuration values */
+#define CP2102N_QFN20_GPIO2_TXLED_MODE BIT(2)
+#define CP2102N_QFN20_GPIO3_RXLED_MODE BIT(3)
+#define CP2102N_QFN20_GPIO1_RS485_MODE BIT(4)
+#define CP2102N_QFN20_GPIO0_CLK_MODE BIT(6)
+
/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
struct cp210x_gpio_write {
u8 mask;
@@ -1122,6 +1131,7 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
static void cp210x_set_flow_control(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
+ struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
struct cp210x_special_chars chars;
struct cp210x_flow_ctl flow_ctl;
@@ -1129,6 +1139,15 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
u32 ctl_hs;
int ret;
+ /*
+ * Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum
+ * CP2102N_E104). Report back that flow control is not supported.
+ */
+ if (priv->no_flow_control) {
+ tty->termios.c_cflag &= ~CRTSCTS;
+ tty->termios.c_iflag &= ~(IXON | IXOFF);
+ }
+
if (old_termios &&
C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
I_IXON(tty) == (old_termios->c_iflag & IXON) &&
@@ -1185,19 +1204,20 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
port_priv->crtscts = false;
}
- if (I_IXOFF(tty))
+ if (I_IXOFF(tty)) {
flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
- else
+
+ flow_ctl.ulXonLimit = cpu_to_le32(128);
+ flow_ctl.ulXoffLimit = cpu_to_le32(128);
+ } else {
flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
+ }
if (I_IXON(tty))
flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
else
flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
- flow_ctl.ulXonLimit = cpu_to_le32(128);
- flow_ctl.ulXoffLimit = cpu_to_le32(128);
-
dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
ctl_hs, flow_repl);
@@ -1733,7 +1753,19 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
/* 0 indicates GPIO mode, 1 is alternate function */
- priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+ if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) {
+ /* QFN20 is special... */
+ if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE) /* GPIO 0 */
+ priv->gpio_altfunc |= BIT(0);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */
+ priv->gpio_altfunc |= BIT(1);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */
+ priv->gpio_altfunc |= BIT(2);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */
+ priv->gpio_altfunc |= BIT(3);
+ } else {
+ priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+ }
if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
/*
@@ -1908,6 +1940,45 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
priv->use_actual_rate = use_actual_rate;
}
+static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ u8 ver[3];
+ int ret;
+
+ ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value,
+ ver, sizeof(ver));
+ if (ret)
+ return ret;
+
+ dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__,
+ ver[0], ver[1], ver[2]);
+
+ priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2];
+
+ return 0;
+}
+
+static void cp210x_determine_quirks(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ int ret;
+
+ switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2102N_QFN28:
+ case CP210X_PARTNUM_CP2102N_QFN24:
+ case CP210X_PARTNUM_CP2102N_QFN20:
+ ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N);
+ if (ret)
+ break;
+ if (priv->fw_version <= 0x10004)
+ priv->no_flow_control = true;
+ break;
+ default:
+ break;
+ }
+}
+
static int cp210x_attach(struct usb_serial *serial)
{
int result;
@@ -1928,6 +1999,7 @@ static int cp210x_attach(struct usb_serial *serial)
usb_set_serial_data(serial, priv);
+ cp210x_determine_quirks(serial);
cp210x_init_max_speed(serial);
result = cp210x_gpio_init(serial);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6f2659e59b2e..4a1f3a95d017 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -611,6 +611,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
@@ -1034,6 +1035,9 @@ static const struct usb_device_id id_table_combined[] = {
/* Sienna devices */
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+ /* IDS GmbH devices */
+ { USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
+ { USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 3d47c6d72256..add602bebd82 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -581,6 +581,7 @@
#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
+#define FTDI_NT_ORIONMX_PID 0x7c93 /* OrionMX */
/*
* Synapse Wireless product ids (FTDI_VID)
@@ -1568,6 +1569,13 @@
#define UNJO_ISODEBUG_V1_PID 0x150D
/*
+ * IDS GmbH
+ */
+#define IDS_VID 0x2CAF
+#define IDS_SI31A_PID 0x13A2
+#define IDS_CM31A_PID 0x13A3
+
+/*
* U-Blox products (http://www.u-blox.com).
*/
#define UBLOX_VID 0x1546
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 83c62f920c50..41f1b872d277 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * USB ZyXEL omni.net LCD PLUS driver
+ * USB ZyXEL omni.net driver
*
* Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
*
@@ -22,10 +22,11 @@
#include <linux/usb/serial.h>
#define DRIVER_AUTHOR "Alessandro Zummo"
-#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
+#define DRIVER_DESC "USB ZyXEL omni.net Driver"
#define ZYXEL_VENDOR_ID 0x0586
#define ZYXEL_OMNINET_ID 0x1000
+#define ZYXEL_OMNI_56K_PLUS_ID 0x1500
/* This one seems to be a re-branded ZyXEL device */
#define BT_IGNITIONPRO_ID 0x2000
@@ -40,6 +41,7 @@ static void omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
+ { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
};
@@ -50,7 +52,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
.owner = THIS_MODULE,
.name = "omninet",
},
- .description = "ZyXEL - omni.net lcd plus usb",
+ .description = "ZyXEL - omni.net usb",
.id_table = id_table,
.num_bulk_out = 2,
.calc_num_ports = omninet_calc_num_ports,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3e79a543d3e7..7608584ef4fe 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1240,6 +1240,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
+ .driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index fd773d252691..940050c31482 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -113,6 +113,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
+ { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ } /* Terminating entry */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 0f681ddbfd28..6097ee8fccb2 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -158,6 +158,7 @@
/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
#define ADLINK_VENDOR_ID 0x0b63
#define ADLINK_ND6530_PRODUCT_ID 0x6530
+#define ADLINK_ND6530GC_PRODUCT_ID 0x653a
/* SMART USB Serial Adapter */
#define SMART_VENDOR_ID 0x0b8c
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 5f2e7f668e68..067690dac24c 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -416,7 +416,7 @@ static void qt2_close(struct usb_serial_port *port)
/* flush the port transmit buffer */
i = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
+ usb_sndctrlpipe(serial->dev, 0),
QT2_FLUSH_DEVICE, 0x40, 1,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
@@ -426,7 +426,7 @@ static void qt2_close(struct usb_serial_port *port)
/* flush the port receive buffer */
i = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
+ usb_sndctrlpipe(serial->dev, 0),
QT2_FLUSH_DEVICE, 0x40, 0,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
@@ -639,7 +639,7 @@ static int qt2_attach(struct usb_serial *serial)
int status;
/* power on unit */
- status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0xc2, 0x40, 0x8000, 0, NULL, 0,
QT2_USB_TIMEOUT);
if (status < 0) {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index caa46ac23db9..310db5abea9d 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -37,6 +37,7 @@
/* Vendor and product ids */
#define TI_VENDOR_ID 0x0451
#define IBM_VENDOR_ID 0x04b3
+#define STARTECH_VENDOR_ID 0x14b0
#define TI_3410_PRODUCT_ID 0x3410
#define IBM_4543_PRODUCT_ID 0x4543
#define IBM_454B_PRODUCT_ID 0x454b
@@ -370,6 +371,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+ { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ } /* terminator */
};
@@ -408,6 +410,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+ { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ } /* terminator */
};
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 9da22ae3006c..77dabd306ba8 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -191,6 +191,7 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
bool match;
int nval;
u16 *val;
+ int ret;
int i;
/*
@@ -218,10 +219,10 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
if (!val)
return ERR_PTR(-ENOMEM);
- nval = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
- if (nval < 0) {
+ ret = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
+ if (ret < 0) {
kfree(val);
- return ERR_PTR(nval);
+ return ERR_PTR(ret);
}
for (i = 0; i < nval; i++) {
@@ -238,7 +239,7 @@ find_mux:
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
- return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 46a25b8db72e..ffa8aa12d5f1 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -582,10 +582,15 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
acpi_dev_free_resource_list(&resource_list);
if (!pmc->iom_base) {
- put_device(&adev->dev);
+ acpi_dev_put(adev);
return -ENOMEM;
}
+ if (IS_ERR(pmc->iom_base)) {
+ acpi_dev_put(adev);
+ return PTR_ERR(pmc->iom_base);
+ }
+
pmc->iom_adev = adev;
return 0;
@@ -636,8 +641,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
break;
ret = pmc_usb_register_port(pmc, i, fwnode);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
goto err_remove_ports;
+ }
}
platform_set_drvdata(pdev, pmc);
@@ -651,7 +658,7 @@ err_remove_ports:
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
- put_device(&pmc->iom_adev->dev);
+ acpi_dev_put(pmc->iom_adev);
return ret;
}
@@ -667,7 +674,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
- put_device(&pmc->iom_adev->dev);
+ acpi_dev_put(pmc->iom_adev);
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 64133e586c64..63470cf7f4cd 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -401,6 +401,8 @@ struct tcpm_port {
unsigned int nr_src_pdo;
u32 snk_pdo[PDO_MAX_OBJECTS];
unsigned int nr_snk_pdo;
+ u32 snk_vdo_v1[VDO_MAX_OBJECTS];
+ unsigned int nr_snk_vdo_v1;
u32 snk_vdo[VDO_MAX_OBJECTS];
unsigned int nr_snk_vdo;
@@ -1547,33 +1549,43 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
if (PD_VDO_VID(p[0]) != USB_SID_PD)
break;
- if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));
- /* 6.4.4.3.1: Only respond as UFP (device) */
- if (port->data_role == TYPEC_DEVICE &&
+ svdm_version = PD_VDO_SVDM_VER(p[0]);
+ }
+
+ port->ams = DISCOVER_IDENTITY;
+ /*
+ * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
+ * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
+ * "wrong configuation" or "Unrecognized"
+ */
+ if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
port->nr_snk_vdo) {
- /*
- * Product Type DFP and Connector Type are not defined in SVDM
- * version 1.0 and shall be set to zero.
- */
- if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
- response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
- & ~IDH_CONN_MASK;
- else
- response[1] = port->snk_vdo[0];
- for (i = 1; i < port->nr_snk_vdo; i++)
- response[i + 1] = port->snk_vdo[i];
- rlen = port->nr_snk_vdo + 1;
+ if (svdm_version < SVDM_VER_2_0) {
+ for (i = 0; i < port->nr_snk_vdo_v1; i++)
+ response[i + 1] = port->snk_vdo_v1[i];
+ rlen = port->nr_snk_vdo_v1 + 1;
+
+ } else {
+ for (i = 0; i < port->nr_snk_vdo; i++)
+ response[i + 1] = port->snk_vdo[i];
+ rlen = port->nr_snk_vdo + 1;
+ }
}
break;
case CMD_DISCOVER_SVID:
+ port->ams = DISCOVER_SVIDS;
break;
case CMD_DISCOVER_MODES:
+ port->ams = DISCOVER_MODES;
break;
case CMD_ENTER_MODE:
+ port->ams = DFP_TO_UFP_ENTER_MODE;
break;
case CMD_EXIT_MODE:
+ port->ams = DFP_TO_UFP_EXIT_MODE;
break;
case CMD_ATTENTION:
/* Attention command does not have response */
@@ -1930,6 +1942,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
+ tcpm_ams_finish(port);
+ } else {
tcpm_ams_finish(port);
}
break;
@@ -2176,20 +2191,25 @@ static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
if (!type) {
tcpm_log(port, "Alert message received with no type");
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
return;
}
/* Just handling non-battery alerts for now */
if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = GET_STATUS_SEND;
+ tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
+ } else {
+ /*
+ * Do not check SinkTxOk here in case the Source doesn't set its Rp to
+ * SinkTxOk in time.
+ */
+ port->ams = GETTING_SOURCE_SINK_STATUS;
tcpm_set_state(port, GET_STATUS_SEND, 0);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
- break;
}
+ } else {
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
}
}
@@ -2287,6 +2307,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
bool frs_enable;
int ret;
+ if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
+ port->vdm_state = VDM_STATE_ERR_BUSY;
+ tcpm_ams_finish(port);
+ mod_vdm_delayed_work(port, 0);
+ }
+
switch (type) {
case PD_DATA_SOURCE_CAP:
for (i = 0; i < cnt; i++)
@@ -2417,14 +2443,22 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
- tcpm_handle_vdm_request(port, msg->payload, cnt);
+ if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
+ tcpm_handle_vdm_request(port, msg->payload, cnt);
+ else if (port->negotiated_rev > PD_REV20)
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
break;
case PD_DATA_BIST:
port->bist_request = le32_to_cpu(msg->payload[0]);
tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
break;
case PD_DATA_ALERT:
- tcpm_handle_alert(port, msg->payload, cnt);
+ if (port->state != SRC_READY && port->state != SNK_READY)
+ tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+ NONE_AMS, 0);
+ else
+ tcpm_handle_alert(port, msg->payload, cnt);
break;
case PD_DATA_BATT_STATUS:
case PD_DATA_GET_COUNTRY_INFO:
@@ -2459,6 +2493,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
enum tcpm_state next_state;
+ /*
+ * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
+ * VDM AMS if waiting for VDM responses and will be handled later.
+ */
+ if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
+ port->vdm_state = VDM_STATE_ERR_BUSY;
+ tcpm_ams_finish(port);
+ mod_vdm_delayed_work(port, 0);
+ }
+
switch (type) {
case PD_CTRL_GOOD_CRC:
case PD_CTRL_PING:
@@ -2717,7 +2761,14 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
enum pd_ext_msg_type type = pd_header_type_le(msg->header);
unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
- if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
+ /* stopping VDM state machine if interrupted by other Messages */
+ if (tcpm_vdm_ams(port)) {
+ port->vdm_state = VDM_STATE_ERR_BUSY;
+ tcpm_ams_finish(port);
+ mod_vdm_delayed_work(port, 0);
+ }
+
+ if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
tcpm_log(port, "Unchunked extended messages unsupported");
return;
@@ -2731,24 +2782,16 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
switch (type) {
case PD_EXT_STATUS:
- /*
- * If PPS related events raised then get PPS status to clear
- * (see USB PD 3.0 Spec, 6.5.2.4)
- */
- if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
- USB_PD_EXT_SDB_PPS_EVENTS)
- tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
- GETTING_SOURCE_SINK_STATUS, 0);
-
- else
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
- break;
case PD_EXT_PPS_STATUS:
- /*
- * For now the PPS status message is used to clear events
- * and nothing more.
- */
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+ if (port->ams == GETTING_SOURCE_SINK_STATUS) {
+ tcpm_ams_finish(port);
+ tcpm_set_state(port, ready_state(port), 0);
+ } else {
+ /* unexpected Status or PPS_Status Message */
+ tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+ NONE_AMS, 0);
+ }
break;
case PD_EXT_SOURCE_CAP_EXT:
case PD_EXT_GET_BATT_CAP:
@@ -2811,7 +2854,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
"Data role mismatch, initiating error recovery");
tcpm_set_state(port, ERROR_RECOVERY, 0);
} else {
- if (msg->header & PD_HEADER_EXT_HDR)
+ if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
tcpm_pd_ext_msg_request(port, msg);
else if (cnt)
tcpm_pd_data_request(port, msg);
@@ -5914,6 +5957,22 @@ sink:
return ret;
}
+ /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
+ if (port->nr_snk_vdo) {
+ ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return -ENODATA;
+
+ port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
+ port->snk_vdo_v1,
+ port->nr_snk_vdo_v1);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -6279,6 +6338,11 @@ void tcpm_unregister_port(struct tcpm_port *port)
{
int i;
+ hrtimer_cancel(&port->send_discover_timer);
+ hrtimer_cancel(&port->enable_frs_timer);
+ hrtimer_cancel(&port->vdm_state_machine_timer);
+ hrtimer_cancel(&port->state_machine_timer);
+
tcpm_reset_port(port);
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 79ae63950050..5d125339687a 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -378,7 +378,7 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
const u8 *data = (void *)msg;
int i;
- for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+ for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
data[i]);
if (ret)
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1d8b7df59ff4..b7d104c80d85 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -717,8 +717,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
ucsi_send_command(con->ucsi, command, NULL, 0);
/* 3. ACK connector change */
- clear_bit(EVENT_PENDING, &ucsi->flags);
ret = ucsi_acknowledge_connector_change(ucsi);
+ clear_bit(EVENT_PENDING, &ucsi->flags);
if (ret) {
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
goto out_unlock;
@@ -1253,6 +1253,7 @@ err_unregister:
}
err_reset:
+ memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
err:
return ret;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 189e4385df40..dda5dc6f7737 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -15,6 +15,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
+#include <linux/mlx5/mpfs.h>
#include "mlx5_vdpa.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -1859,11 +1860,16 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
static void mlx5_vdpa_free(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_net *ndev;
ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev);
+ if (!is_zero_ether_addr(ndev->config.mac)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
+ mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
+ }
mlx5_vdpa_free_resources(&ndev->mvdev);
mutex_destroy(&ndev->reslock);
}
@@ -1990,6 +1996,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
+ struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
struct mlx5_core_dev *mdev;
@@ -2023,10 +2030,17 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err)
goto err_mtu;
+ if (!is_zero_ether_addr(config->mac)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
+ err = mlx5_mpfs_add_mac(pfmdev, config->mac);
+ if (err)
+ goto err_mtu;
+ }
+
mvdev->vdev.dma_dev = mdev->device;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
- goto err_mtu;
+ goto err_mpfs;
err = alloc_resources(ndev);
if (err)
@@ -2044,6 +2058,9 @@ err_reg:
free_resources(ndev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
+err_mpfs:
+ if (!is_zero_ether_addr(config->mac))
+ mlx5_mpfs_del_mac(pfmdev, config->mac);
err_mtu:
mutex_destroy(&ndev->reslock);
put_device(&mvdev->vdev.dev);
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 53ce78d7d07b..5e2e1b9a9fd3 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -2,6 +2,7 @@
config VFIO_PCI
tristate "VFIO support for PCI devices"
depends on VFIO && PCI && EVENTFD
+ depends on MMU
select VFIO_VIRQFD
select IRQ_BYPASS_MANAGER
help
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index d57f037f65b8..70e28efbc51f 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
if (len == 0xFF) {
len = vfio_ext_cap_len(vdev, ecap, epos);
if (len < 0)
- return ret;
+ return len;
}
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 361e5b57e369..470fcf7dac56 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -291,7 +291,7 @@ err_irq:
vfio_platform_regions_cleanup(vdev);
err_reg:
mutex_unlock(&driver_lock);
- module_put(THIS_MODULE);
+ module_put(vdev->parent_module);
return ret;
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index a0747c35a778..4fce73a8a650 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -567,7 +567,7 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
vaddr = untagged_addr(vaddr);
retry:
- vma = find_vma_intersection(mm, vaddr, vaddr + 1);
+ vma = vma_lookup(mm, vaddr);
if (vma && vma->vm_flags & VM_PFNMAP) {
ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
@@ -2795,7 +2795,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
return 0;
}
- size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
+ size = struct_size(cap_iovas, iova_ranges, iovas);
cap_iovas = kzalloc(size, GFP_KERNEL);
if (!cap_iovas)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df82b124170e..6414bd5741b8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -744,11 +744,9 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
if (copied != len)
return -EFAULT;
- xdp->data_hard_start = buf;
- xdp->data = buf + pad;
- xdp->data_end = xdp->data + len;
+ xdp_init_buff(xdp, buflen, NULL);
+ xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen;
- xdp->frame_sz = buflen;
--net->refcnt_bias;
alloc_frag->offset += buflen;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 5e78fb719602..d38c996b4f46 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -31,7 +31,8 @@
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES |
- (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
+ (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
};
enum {
@@ -56,6 +57,7 @@ struct vhost_vsock {
atomic_t queued_replies;
u32 guest_cid;
+ bool seqpacket_allow;
};
static u32 vhost_transport_get_local_cid(void)
@@ -112,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
size_t nbytes;
size_t iov_len, payload_len;
int head;
+ bool restore_flag = false;
spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) {
@@ -168,9 +171,26 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
/* If the packet is greater than the space available in the
* buffer, we split it using multiple buffers.
*/
- if (payload_len > iov_len - sizeof(pkt->hdr))
+ if (payload_len > iov_len - sizeof(pkt->hdr)) {
payload_len = iov_len - sizeof(pkt->hdr);
+ /* As we are copying pieces of large packet's buffer to
+ * small rx buffers, headers of packets in rx queue are
+ * created dynamically and are initialized with header
+ * of current packet(except length). But in case of
+ * SOCK_SEQPACKET, we also must clear record delimeter
+ * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one
+ * packet with delimeter(which marks end of record),
+ * there will be sequence of packets with delimeter
+ * bit set. After initialized header will be copied to
+ * rx buffer, this bit will be restored.
+ */
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ restore_flag = true;
+ }
+ }
+
/* Set the correct length in the header */
pkt->hdr.len = cpu_to_le32(payload_len);
@@ -204,6 +224,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* to send it with the next available buffer.
*/
if (pkt->off < pkt->len) {
+ if (restore_flag)
+ pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+
/* We are queueing the same virtio_vsock_pkt to handle
* the remaining bytes, and we want to deliver it
* to monitoring devices in the next iteration.
@@ -354,8 +377,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
- pkt->len = le32_to_cpu(pkt->hdr.len);
+ pkt->len = le32_to_cpu(pkt->hdr.len);
/* No payload */
if (!pkt->len)
@@ -398,6 +420,8 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num;
}
+static bool vhost_transport_seqpacket_allow(u32 remote_cid);
+
static struct virtio_transport vhost_transport = {
.transport = {
.module = THIS_MODULE,
@@ -424,6 +448,11 @@ static struct virtio_transport vhost_transport = {
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
+ .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
+ .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
+ .seqpacket_allow = vhost_transport_seqpacket_allow,
+ .seqpacket_has_data = virtio_transport_seqpacket_has_data,
+
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
@@ -441,6 +470,22 @@ static struct virtio_transport vhost_transport = {
.send_pkt = vhost_transport_send_pkt,
};
+static bool vhost_transport_seqpacket_allow(u32 remote_cid)
+{
+ struct vhost_vsock *vsock;
+ bool seqpacket_allow = false;
+
+ rcu_read_lock();
+ vsock = vhost_vsock_get(remote_cid);
+
+ if (vsock)
+ seqpacket_allow = vsock->seqpacket_allow;
+
+ rcu_read_unlock();
+
+ return seqpacket_allow;
+}
+
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -689,7 +734,7 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
vsk->peer_shutdown = SHUTDOWN_MASK;
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ECONNRESET;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
@@ -785,6 +830,9 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
goto err;
}
+ if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
+ vsock->seqpacket_allow = true;
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index b292887a2481..a591d291b231 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
get_page(page);
+
+ if (vmf->vma->vm_file)
+ page->mapping = vmf->vma->vm_file->f_mapping;
+ else
+ printk(KERN_ERR "no mapping available\n");
+
+ BUG_ON(!page->mapping);
page->index = vmf->pgoff;
vmf->page = page;
@@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.page_mkwrite = fb_deferred_io_mkwrite,
};
+static int fb_deferred_io_set_page_dirty(struct page *page)
+{
+ if (!PageDirty(page))
+ SetPageDirty(page);
+ return 0;
+}
+
+static const struct address_space_operations fb_deferred_io_aops = {
+ .set_page_dirty = fb_deferred_io_set_page_dirty,
+};
+
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
@@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info)
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
+void fb_deferred_io_open(struct fb_info *info,
+ struct inode *inode,
+ struct file *file)
+{
+ file->f_mapping->a_ops = &fb_deferred_io_aops;
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct page *page;
+ int i;
BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work);
+
+ /* clear out the mapping that we setup */
+ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+ page = fb_deferred_io_page(info, i);
+ page->mapping = NULL;
+ }
+
mutex_destroy(&fbdefio->lock);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 072780b0e570..98f193078c05 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1415,6 +1415,10 @@ __releases(&info->lock)
if (res)
module_put(info->fbops->owner);
}
+#ifdef CONFIG_FB_DEFERRED_IO
+ if (info->fbdefio)
+ fb_deferred_io_open(info, inode, file);
+#endif
out:
unlock_fb_info(info);
if (res)
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index cc8e62ae93f6..bd3d07aa4f0e 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -558,7 +558,7 @@ static int hgafb_probe(struct platform_device *pdev)
int ret;
ret = hga_card_detect();
- if (!ret)
+ if (ret)
return ret;
printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 510e9318854d..47dce91f788c 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -993,6 +993,23 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_unregister_oom;
}
+ /*
+ * The default page reporting order is @pageblock_order, which
+ * corresponds to 512MB in size on ARM64 when 64KB base page
+ * size is used. The page reporting won't be triggered if the
+ * freeing page can't come up with a free area like that huge.
+ * So we specify the page reporting order to 5, corresponding
+ * to 2MB. It helps to avoid THP splitting if 4KB base page
+ * size is used by host.
+ *
+ * Ideally, the page reporting order is selected based on the
+ * host's base page size. However, it needs more work to report
+ * that value. The hard-coded order would be fine currently.
+ */
+#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_64K_PAGES)
+ vb->pr_dev_info.order = 5;
+#endif
+
err = page_reporting_register(&vb->pr_dev_info);
if (err)
goto out_unregister_oom;
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 743377c5b173..73f2221f6222 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -174,6 +174,7 @@ static inline void zf_set_timer(unsigned short new, unsigned char n)
fallthrough;
case WD2:
zf_writeb(COUNTER_2, new > 0xff ? 0xff : new);
+ fallthrough;
default:
return;
}
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index fde9e739b436..391c774a1f67 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -54,6 +54,7 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/mipsregs.h>
#include <asm/uasm.h>
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 7bbfd58958bc..d7e361fb0548 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -642,6 +642,9 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
}
info->eoi_time = 0;
+
+ /* is_active hasn't been reset yet, do it now. */
+ smp_store_release(&info->is_active, 0);
do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
}
@@ -811,6 +814,7 @@ static void xen_evtchn_close(evtchn_port_t port)
BUG();
}
+/* Not called for lateeoi events. */
static void event_handler_exit(struct irq_info *info)
{
smp_store_release(&info->is_active, 0);
@@ -1883,7 +1887,12 @@ static void lateeoi_ack_dynirq(struct irq_data *data)
if (VALID_EVTCHN(evtchn)) {
do_mask(info, EVT_MASK_REASON_EOI_PENDING);
- event_handler_exit(info);
+ /*
+ * Don't call event_handler_exit().
+ * Need to keep is_active non-zero in order to ignore re-raised
+ * events after cpu affinity changes while a lateeoi is pending.
+ */
+ clear_evtchn(evtchn);
}
}
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index fb7ee026d101..adbb3a1edcbf 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -73,6 +73,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations adfs_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = adfs_readpage,
.writepage = adfs_writepage,
.write_begin = adfs_write_begin,
diff --git a/fs/affs/file.c b/fs/affs/file.c
index d91b0133d95d..75ebd2b576ca 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -453,6 +453,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations affs_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = affs_readpage,
.writepage = affs_writepage,
.write_begin = affs_write_begin,
@@ -833,6 +834,7 @@ err_bh:
}
const struct address_space_operations affs_aops_ofs = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = affs_readpage_ofs,
//.writepage = affs_writepage_ofs,
.write_begin = affs_write_begin_ofs,
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index a4e9e6e07e93..d3c6bb22c5f4 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -322,6 +322,8 @@ static int afs_deliver_cb_callback(struct afs_call *call)
return ret;
call->unmarshall++;
+ fallthrough;
+
case 5:
break;
}
@@ -418,6 +420,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
r->node[loop] = ntohl(b[loop + 5]);
call->unmarshall++;
+ fallthrough;
case 2:
break;
@@ -530,6 +533,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
r->node[loop] = ntohl(b[loop + 5]);
call->unmarshall++;
+ fallthrough;
case 2:
break;
@@ -663,6 +667,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
afs_extract_to_tmp(call);
call->unmarshall++;
+ fallthrough;
case 3:
break;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 9fbe5a5ec9bd..78719f2f567e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1919,7 +1919,9 @@ static void afs_rename_edit_dir(struct afs_operation *op)
new_inode = d_inode(new_dentry);
if (new_inode) {
spin_lock(&new_inode->i_lock);
- if (new_inode->i_nlink > 0)
+ if (S_ISDIR(new_inode->i_mode))
+ clear_nlink(new_inode);
+ else if (new_inode->i_nlink > 0)
drop_nlink(new_inode);
spin_unlock(&new_inode->i_lock);
}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 2f695a260442..dd3f45d906d2 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -388,6 +388,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
req->file_size = vp->scb.status.size;
call->unmarshall++;
+ fallthrough;
case 5:
break;
@@ -1408,6 +1409,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
_debug("motd '%s'", p);
call->unmarshall++;
+ fallthrough;
case 8:
break;
@@ -1845,6 +1847,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
xdr_decode_AFSVolSync(&bp, &op->volsync);
call->unmarshall++;
+ fallthrough;
case 6:
break;
@@ -1979,6 +1982,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
xdr_decode_AFSVolSync(&bp, &op->volsync);
call->unmarshall++;
+ fallthrough;
case 4:
break;
diff --git a/fs/afs/main.c b/fs/afs/main.c
index b2975256dadb..179004b15566 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -203,8 +203,8 @@ static int __init afs_init(void)
goto error_fs;
afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
- if (IS_ERR(afs_proc_symlink)) {
- ret = PTR_ERR(afs_proc_symlink);
+ if (!afs_proc_symlink) {
+ ret = -ENOMEM;
goto error_proc;
}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index dc9327332f06..00fca3c66ba6 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -593,6 +593,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
if (ret < 0)
return ret;
call->unmarshall = 6;
+ fallthrough;
case 6:
break;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3edb6204b937..3104b62c2082 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -118,6 +118,15 @@ int afs_write_end(struct file *file, struct address_space *mapping,
_enter("{%llx:%llu},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
+ if (!PageUptodate(page)) {
+ if (copied < len) {
+ copied = 0;
+ goto out;
+ }
+
+ SetPageUptodate(page);
+ }
+
if (copied == 0)
goto out;
@@ -132,8 +141,6 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_sequnlock(&vnode->cb_lock);
}
- ASSERT(PageUptodate(page));
-
if (PagePrivate(page)) {
priv = page_private(page);
f = afs_page_dirty_from(page, priv);
@@ -730,7 +737,7 @@ static int afs_writepages_region(struct address_space *mapping,
return ret;
}
- start += ret * PAGE_SIZE;
+ start += ret;
cond_resched();
} while (wbc->nr_to_write > 0);
@@ -837,6 +844,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
unsigned long priv;
+ vm_fault_t ret = VM_FAULT_RETRY;
_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -848,14 +856,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
#ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page) &&
wait_on_page_fscache_killable(page) < 0)
- return VM_FAULT_RETRY;
+ goto out;
#endif
if (wait_on_page_writeback_killable(page))
- return VM_FAULT_RETRY;
+ goto out;
if (lock_page_killable(page) < 0)
- return VM_FAULT_RETRY;
+ goto out;
/* We mustn't change page->private until writeback is complete as that
* details the portion of the page we need to write back and we might
@@ -863,7 +871,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
*/
if (wait_on_page_writeback_killable(page) < 0) {
unlock_page(page);
- return VM_FAULT_RETRY;
+ goto out;
}
priv = afs_page_dirty(page, 0, thp_size(page));
@@ -877,8 +885,10 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
}
file_update_time(file);
+ ret = VM_FAULT_LOCKED;
+out:
sb_end_pagefault(inode->i_sb);
- return VM_FAULT_LOCKED;
+ return ret;
}
/*
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index 0dceefc54b48..7f8544abf636 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -188,6 +188,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations bfs_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = bfs_readpage,
.writepage = bfs_writepage,
.write_begin = bfs_write_begin,
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 3e84e9bb9084..145917f734fe 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -222,7 +222,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
PROT_READ | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
fd_offset);
if (error != N_TXTADDR(ex))
@@ -230,7 +230,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
fd_offset + ex.a_text);
if (error != N_DATADDR(ex))
return error;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 187b3f2b9202..439ed81e755a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1070,7 +1070,7 @@ out_free_interp:
elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
!!interpreter, false);
- elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
+ elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
vaddr = elf_ppnt->p_vaddr;
/*
@@ -1537,7 +1537,8 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
{
const struct cred *cred;
unsigned int i, len;
-
+ unsigned int state;
+
/* first copy the parameters from user space */
memset(psinfo, 0, sizeof(struct elf_prpsinfo));
@@ -1559,7 +1560,8 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
psinfo->pr_pgrp = task_pgrp_vnr(p);
psinfo->pr_sid = task_session_vnr(p);
- i = p->state ? ffz(~p->state) + 1 : 0;
+ state = READ_ONCE(p->__state);
+ i = state ? ffz(~state) + 1 : 0;
psinfo->pr_state = i;
psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
psinfo->pr_zomb = psinfo->pr_sname == 'Z';
@@ -1571,7 +1573,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
rcu_read_unlock();
strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
-
+
return 0;
}
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 2c99b102c860..cf4028487dcc 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -928,7 +928,7 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
{
struct elf32_fdpic_loadseg *seg;
struct elf32_phdr *phdr;
- unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0, mflags;
+ unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0;
int loop, ret;
load_addr = params->load_addr;
@@ -948,12 +948,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
}
/* allocate one big anon block for everything */
- mflags = MAP_PRIVATE;
- if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
- mflags |= MAP_EXECUTABLE;
-
maddr = vm_mmap(NULL, load_addr, top - base,
- PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0);
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, 0);
if (IS_ERR_VALUE(maddr))
return (int) maddr;
@@ -1046,9 +1042,6 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
if (phdr->p_flags & PF_X) prot |= PROT_EXEC;
flags = MAP_PRIVATE | MAP_DENYWRITE;
- if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
- flags |= MAP_EXECUTABLE;
-
maddr = 0;
switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
@@ -1331,6 +1324,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
{
const struct cred *cred;
unsigned int i, len;
+ unsigned int state;
/* first copy the parameters from user space */
memset(psinfo, 0, sizeof(struct elf_prpsinfo));
@@ -1353,7 +1347,8 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
psinfo->pr_pgrp = task_pgrp_vnr(p);
psinfo->pr_sid = task_session_vnr(p);
- i = p->state ? ffz(~p->state) + 1 : 0;
+ state = READ_ONCE(p->__state);
+ i = state ? ffz(~state) + 1 : 0;
psinfo->pr_state = i;
psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
psinfo->pr_zomb = psinfo->pr_sname == 'Z';
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index a1072c6a2341..5d776f80ee50 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -573,7 +573,7 @@ static int load_flat_file(struct linux_binprm *bprm,
pr_debug("ROM mapping of file (we hope)\n");
textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
- MAP_PRIVATE|MAP_EXECUTABLE, 0);
+ MAP_PRIVATE, 0);
if (!textpos || IS_ERR_VALUE(textpos)) {
ret = textpos;
if (!textpos)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 6cc4d4cfe0c2..ca8bf1869ca8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -895,7 +895,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
bdev = I_BDEV(inode);
- mutex_init(&bdev->bd_mutex);
mutex_init(&bdev->bd_fsfreeze_mutex);
spin_lock_init(&bdev->bd_size_lock);
bdev->bd_disk = disk;
@@ -1154,7 +1153,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
struct bd_holder_disk *holder;
int ret = 0;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
WARN_ON_ONCE(!bdev->bd_holder);
@@ -1199,7 +1198,7 @@ out_del:
out_free:
kfree(holder);
out_unlock:
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(bd_link_disk_holder);
@@ -1218,7 +1217,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
{
struct bd_holder_disk *holder;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
holder = bd_find_holder_disk(bdev, disk);
@@ -1230,138 +1229,97 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
kfree(holder);
}
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
}
EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
#endif
-static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
+static void blkdev_flush_mapping(struct block_device *bdev)
+{
+ WARN_ON_ONCE(bdev->bd_holders);
+ sync_blockdev(bdev);
+ kill_bdev(bdev);
+ bdev_write_inode(bdev);
+}
-int bdev_disk_changed(struct block_device *bdev, bool invalidate)
+static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
int ret = 0;
- lockdep_assert_held(&bdev->bd_mutex);
-
- if (!(disk->flags & GENHD_FL_UP))
- return -ENXIO;
-
-rescan:
- if (bdev->bd_part_count)
- return -EBUSY;
- sync_blockdev(bdev);
- invalidate_bdev(bdev);
- blk_drop_partitions(disk);
-
- clear_bit(GD_NEED_PART_SCAN, &disk->state);
-
- /*
- * Historically we only set the capacity to zero for devices that
- * support partitions (independ of actually having partitions created).
- * Doing that is rather inconsistent, but changing it broke legacy
- * udisks polling for legacy ide-cdrom devices. Use the crude check
- * below to get the sane behavior for most device while not breaking
- * userspace for this particular setup.
- */
- if (invalidate) {
- if (disk_part_scan_enabled(disk) ||
- !(disk->flags & GENHD_FL_REMOVABLE))
- set_capacity(disk, 0);
+ if (disk->fops->open) {
+ ret = disk->fops->open(bdev, mode);
+ if (ret) {
+ /* avoid ghost partitions on a removed medium */
+ if (ret == -ENOMEDIUM &&
+ test_bit(GD_NEED_PART_SCAN, &disk->state))
+ bdev_disk_changed(disk, true);
+ return ret;
+ }
}
- if (get_capacity(disk)) {
- ret = blk_add_partitions(disk, bdev);
- if (ret == -EAGAIN)
- goto rescan;
- } else if (invalidate) {
- /*
- * Tell userspace that the media / partition table may have
- * changed.
- */
- kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+ if (!bdev->bd_openers) {
+ set_init_blocksize(bdev);
+ if (bdev->bd_bdi == &noop_backing_dev_info)
+ bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
}
+ if (test_bit(GD_NEED_PART_SCAN, &disk->state))
+ bdev_disk_changed(disk, false);
+ bdev->bd_openers++;
+ return 0;;
+}
- return ret;
+static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
+{
+ if (!--bdev->bd_openers)
+ blkdev_flush_mapping(bdev);
+ if (bdev->bd_disk->fops->release)
+ bdev->bd_disk->fops->release(bdev->bd_disk, mode);
}
-/*
- * Only exported for loop and dasd for historic reasons. Don't use in new
- * code!
- */
-EXPORT_SYMBOL_GPL(bdev_disk_changed);
-/*
- * bd_mutex locking:
- *
- * mutex_lock(part->bd_mutex)
- * mutex_lock_nested(whole->bd_mutex, 1)
- */
-static int __blkdev_get(struct block_device *bdev, fmode_t mode)
+static int blkdev_get_part(struct block_device *part, fmode_t mode)
{
- struct gendisk *disk = bdev->bd_disk;
- int ret = 0;
+ struct gendisk *disk = part->bd_disk;
+ struct block_device *whole;
+ int ret;
- if (!(disk->flags & GENHD_FL_UP))
- return -ENXIO;
+ if (part->bd_openers)
+ goto done;
- if (!bdev->bd_openers) {
- if (!bdev_is_partition(bdev)) {
- ret = 0;
- if (disk->fops->open)
- ret = disk->fops->open(bdev, mode);
+ whole = bdgrab(disk->part0);
+ ret = blkdev_get_whole(whole, mode);
+ if (ret)
+ goto out_put_whole;
- if (!ret)
- set_init_blocksize(bdev);
+ ret = -ENXIO;
+ if (!bdev_nr_sectors(part))
+ goto out_blkdev_put;
- /*
- * If the device is invalidated, rescan partition
- * if open succeeded or failed with -ENOMEDIUM.
- * The latter is necessary to prevent ghost
- * partitions on a removed medium.
- */
- if (test_bit(GD_NEED_PART_SCAN, &disk->state) &&
- (!ret || ret == -ENOMEDIUM))
- bdev_disk_changed(bdev, ret == -ENOMEDIUM);
+ disk->open_partitions++;
+ set_init_blocksize(part);
+ if (part->bd_bdi == &noop_backing_dev_info)
+ part->bd_bdi = bdi_get(disk->queue->backing_dev_info);
+done:
+ part->bd_openers++;
+ return 0;
- if (ret)
- return ret;
- } else {
- struct block_device *whole = bdgrab(disk->part0);
-
- mutex_lock_nested(&whole->bd_mutex, 1);
- ret = __blkdev_get(whole, mode);
- if (ret) {
- mutex_unlock(&whole->bd_mutex);
- bdput(whole);
- return ret;
- }
- whole->bd_part_count++;
- mutex_unlock(&whole->bd_mutex);
+out_blkdev_put:
+ blkdev_put_whole(whole, mode);
+out_put_whole:
+ bdput(whole);
+ return ret;
+}
- if (!bdev_nr_sectors(bdev)) {
- __blkdev_put(whole, mode, 1);
- bdput(whole);
- return -ENXIO;
- }
- set_init_blocksize(bdev);
- }
+static void blkdev_put_part(struct block_device *part, fmode_t mode)
+{
+ struct block_device *whole = bdev_whole(part);
- if (bdev->bd_bdi == &noop_backing_dev_info)
- bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
- } else {
- if (!bdev_is_partition(bdev)) {
- if (bdev->bd_disk->fops->open)
- ret = bdev->bd_disk->fops->open(bdev, mode);
- /* the same as first opener case, read comment there */
- if (test_bit(GD_NEED_PART_SCAN, &disk->state) &&
- (!ret || ret == -ENOMEDIUM))
- bdev_disk_changed(bdev, ret == -ENOMEDIUM);
- if (ret)
- return ret;
- }
- }
- bdev->bd_openers++;
- return 0;
+ if (--part->bd_openers)
+ return;
+ blkdev_flush_mapping(part);
+ whole->bd_disk->open_partitions--;
+ blkdev_put_whole(whole, mode);
+ bdput(whole);
}
struct block_device *blkdev_get_no_open(dev_t dev)
@@ -1447,8 +1405,14 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
disk_block_events(disk);
- mutex_lock(&bdev->bd_mutex);
- ret =__blkdev_get(bdev, mode);
+ mutex_lock(&disk->open_mutex);
+ ret = -ENXIO;
+ if (!(disk->flags & GENHD_FL_UP))
+ goto abort_claiming;
+ if (bdev_is_partition(bdev))
+ ret = blkdev_get_part(bdev, mode);
+ else
+ ret = blkdev_get_whole(bdev, mode);
if (ret)
goto abort_claiming;
if (mode & FMODE_EXCL) {
@@ -1467,7 +1431,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
unblock_events = false;
}
}
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&disk->open_mutex);
if (unblock_events)
disk_unblock_events(disk);
@@ -1476,7 +1440,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
abort_claiming:
if (mode & FMODE_EXCL)
bd_abort_claiming(bdev, holder);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&disk->open_mutex);
disk_unblock_events(disk);
put_blkdev:
blkdev_put_no_open(bdev);
@@ -1551,10 +1515,9 @@ static int blkdev_open(struct inode * inode, struct file * filp)
return 0;
}
-static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
+void blkdev_put(struct block_device *bdev, fmode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
- struct block_device *victim = NULL;
/*
* Sync early if it looks like we're the last one. If someone else
@@ -1566,41 +1529,14 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
if (bdev->bd_openers == 1)
sync_blockdev(bdev);
- mutex_lock_nested(&bdev->bd_mutex, for_part);
- if (for_part)
- bdev->bd_part_count--;
-
- if (!--bdev->bd_openers) {
- WARN_ON_ONCE(bdev->bd_holders);
- sync_blockdev(bdev);
- kill_bdev(bdev);
- bdev_write_inode(bdev);
- if (bdev_is_partition(bdev))
- victim = bdev_whole(bdev);
- }
-
- if (!bdev_is_partition(bdev) && disk->fops->release)
- disk->fops->release(disk, mode);
- mutex_unlock(&bdev->bd_mutex);
- if (victim) {
- __blkdev_put(victim, mode, 1);
- bdput(victim);
- }
-}
-
-void blkdev_put(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
-
- mutex_lock(&bdev->bd_mutex);
-
+ mutex_lock(&disk->open_mutex);
if (mode & FMODE_EXCL) {
struct block_device *whole = bdev_whole(bdev);
bool bdev_free;
/*
* Release a claim on the device. The holder fields
- * are protected with bdev_lock. bd_mutex is to
+ * are protected with bdev_lock. open_mutex is to
* synchronize disk_holder unlinking.
*/
spin_lock(&bdev_lock);
@@ -1631,9 +1567,13 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
* from userland - e.g. eject(1).
*/
disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
- mutex_unlock(&bdev->bd_mutex);
- __blkdev_put(bdev, mode, 0);
+ if (bdev_is_partition(bdev))
+ blkdev_put_part(bdev, mode);
+ else
+ blkdev_put_whole(bdev, mode);
+ mutex_unlock(&disk->open_mutex);
+
blkdev_put_no_open(bdev);
}
EXPORT_SYMBOL(blkdev_put);
@@ -1754,6 +1694,7 @@ static int blkdev_writepages(struct address_space *mapping,
}
static const struct address_space_operations def_blk_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = blkdev_readpage,
.readahead = blkdev_readahead,
.writepage = blkdev_writepage,
@@ -1940,10 +1881,10 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
old_inode = inode;
bdev = I_BDEV(inode);
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
if (bdev->bd_openers)
func(bdev, arg);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
spin_lock(&blockdev_superblock->s_inode_list_lock);
}
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 68b95ad82126..520a0f6a7d9e 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -18,6 +18,8 @@ config BTRFS_FS
select RAID6_PQ
select XOR_BLOCKS
select SRCU
+ depends on !PPC_256K_PAGES # powerpc
+ depends on !PAGE_SIZE_256KB # hexagon
help
Btrfs is a general purpose copy-on-write filesystem with extents,
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 117d423fdb93..7a8a2fc19533 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2675,7 +2675,7 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
*
* @ref_key: The same as @ref_key in handle_direct_tree_backref()
* @tree_key: The first key of this tree block.
- * @path: A clean (released) path, to avoid allocating path everytime
+ * @path: A clean (released) path, to avoid allocating path every time
* the function get called.
*/
static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index aa57bdc8fc89..38b127b9edfc 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1399,7 +1399,6 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
btrfs_space_info_update_bytes_pinned(fs_info, space_info,
-block_group->pinned);
space_info->bytes_readonly += block_group->pinned;
- __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
block_group->pinned = 0;
spin_unlock(&block_group->lock);
@@ -1491,7 +1490,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
struct btrfs_block_group *bg;
struct btrfs_space_info *space_info;
- int ret;
+ LIST_HEAD(again_list);
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
@@ -1502,6 +1501,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
mutex_lock(&fs_info->reclaim_bgs_lock);
spin_lock(&fs_info->unused_bgs_lock);
while (!list_empty(&fs_info->reclaim_bgs)) {
+ int ret = 0;
+
bg = list_first_entry(&fs_info->reclaim_bgs,
struct btrfs_block_group,
bg_list);
@@ -1547,9 +1548,13 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
bg->start);
next:
- btrfs_put_block_group(bg);
spin_lock(&fs_info->unused_bgs_lock);
+ if (ret == -EAGAIN && list_empty(&bg->bg_list))
+ list_add_tail(&bg->bg_list, &again_list);
+ else
+ btrfs_put_block_group(bg);
}
+ list_splice_tail(&again_list, &fs_info->reclaim_bgs);
spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_exclop_finish(fs_info);
@@ -2442,16 +2447,16 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
if (!--cache->ro) {
- num_bytes = cache->length - cache->reserved -
- cache->pinned - cache->bytes_super -
- cache->zone_unusable - cache->used;
- sinfo->bytes_readonly -= num_bytes;
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */
cache->zone_unusable = cache->alloc_offset - cache->used;
sinfo->bytes_zone_unusable += cache->zone_unusable;
sinfo->bytes_readonly -= cache->zone_unusable;
}
+ num_bytes = cache->length - cache->reserved -
+ cache->pinned - cache->bytes_super -
+ cache->zone_unusable - cache->used;
+ sinfo->bytes_readonly -= num_bytes;
list_del_init(&cache->ro_list);
}
spin_unlock(&cache->lock);
@@ -2505,7 +2510,7 @@ static int cache_save_setup(struct btrfs_block_group *block_group,
struct extent_changeset *data_reserved = NULL;
u64 alloc_hint = 0;
int dcs = BTRFS_DC_ERROR;
- u64 num_pages = 0;
+ u64 cache_size = 0;
int retries = 0;
int ret = 0;
@@ -2617,20 +2622,20 @@ again:
* taking up quite a bit since it's not folded into the other space
* cache.
*/
- num_pages = div_u64(block_group->length, SZ_256M);
- if (!num_pages)
- num_pages = 1;
+ cache_size = div_u64(block_group->length, SZ_256M);
+ if (!cache_size)
+ cache_size = 1;
- num_pages *= 16;
- num_pages *= PAGE_SIZE;
+ cache_size *= 16;
+ cache_size *= fs_info->sectorsize;
ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
- num_pages);
+ cache_size);
if (ret)
goto out_put;
- ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
- num_pages, num_pages,
+ ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size,
+ cache_size, cache_size,
&alloc_hint);
/*
* Our cache requires contiguous chunks so that we don't modify a bunch
@@ -3062,8 +3067,6 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- __btrfs_mod_total_bytes_pinned(cache->space_info,
- num_bytes);
set_extent_dirty(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d17ac301032e..9a023ae0f98b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -149,7 +149,7 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
const u32 csum_size = fs_info->csum_size;
const u32 sectorsize = fs_info->sectorsize;
struct page *page;
- unsigned long i;
+ unsigned int i;
char *kaddr;
u8 csum[BTRFS_CSUM_SIZE];
struct compressed_bio *cb = bio->bi_private;
@@ -208,7 +208,7 @@ static void end_compressed_bio_read(struct bio *bio)
struct compressed_bio *cb = bio->bi_private;
struct inode *inode;
struct page *page;
- unsigned long index;
+ unsigned int index;
unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
int ret = 0;
@@ -334,7 +334,7 @@ static void end_compressed_bio_write(struct bio *bio)
struct compressed_bio *cb = bio->bi_private;
struct inode *inode;
struct page *page;
- unsigned long index;
+ unsigned int index;
if (bio->bi_status)
cb->errors = 1;
@@ -349,12 +349,10 @@ static void end_compressed_bio_write(struct bio *bio)
* call back into the FS and do all the end_io operations
*/
inode = cb->inode;
- cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
btrfs_record_physical_zoned(inode, cb->start, bio);
- btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
+ btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
cb->start, cb->start + cb->len - 1,
bio->bi_status == BLK_STS_OK);
- cb->compressed_pages[0]->mapping = NULL;
end_compressed_writeback(inode, cb);
/* note, our inode could be gone now */
@@ -387,10 +385,10 @@ out:
* the end io hooks.
*/
blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
- unsigned long len, u64 disk_start,
- unsigned long compressed_len,
+ unsigned int len, u64 disk_start,
+ unsigned int compressed_len,
struct page **compressed_pages,
- unsigned long nr_pages,
+ unsigned int nr_pages,
unsigned int write_flags,
struct cgroup_subsys_state *blkcg_css)
{
@@ -427,24 +425,16 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
bio->bi_end_io = end_compressed_bio_write;
if (use_append) {
- struct extent_map *em;
- struct map_lookup *map;
- struct block_device *bdev;
+ struct btrfs_device *device;
- em = btrfs_get_chunk_map(fs_info, disk_start, PAGE_SIZE);
- if (IS_ERR(em)) {
+ device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
+ if (IS_ERR(device)) {
kfree(cb);
bio_put(bio);
return BLK_STS_NOTSUPP;
}
- map = em->map_lookup;
- /* We only support single profile for now */
- ASSERT(map->num_stripes == 1);
- bdev = map->stripes[0].dev->bdev;
-
- bio_set_dev(bio, bdev);
- free_extent_map(em);
+ bio_set_dev(bio, device->bdev);
}
if (blkcg_css) {
@@ -457,7 +447,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
bytes_left = compressed_len;
for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
int submit = 0;
- int len;
+ int len = 0;
page = compressed_pages[pg_index];
page->mapping = inode->vfs_inode.i_mapping;
@@ -465,10 +455,17 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
0);
- if (pg_index == 0 && use_append)
- len = bio_add_zone_append_page(bio, page, PAGE_SIZE, 0);
- else
- len = bio_add_page(bio, page, PAGE_SIZE, 0);
+ /*
+ * Page can only be added to bio if the current bio fits in
+ * stripe.
+ */
+ if (!submit) {
+ if (pg_index == 0 && use_append)
+ len = bio_add_zone_append_page(bio, page,
+ PAGE_SIZE, 0);
+ else
+ len = bio_add_page(bio, page, PAGE_SIZE, 0);
+ }
page->mapping = NULL;
if (submit || len < PAGE_SIZE) {
@@ -508,7 +505,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
}
if (bytes_left < PAGE_SIZE) {
btrfs_info(fs_info,
- "bytes left %lu compress len %lu nr %lu",
+ "bytes left %lu compress len %u nr %u",
bytes_left, cb->compressed_len, cb->nr_pages);
}
bytes_left -= PAGE_SIZE;
@@ -670,9 +667,9 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree;
struct compressed_bio *cb;
- unsigned long compressed_len;
- unsigned long nr_pages;
- unsigned long pg_index;
+ unsigned int compressed_len;
+ unsigned int nr_pages;
+ unsigned int pg_index;
struct page *page;
struct bio *comp_bio;
u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
@@ -1195,9 +1192,6 @@ static unsigned int btrfs_compress_set_level(int type, unsigned level)
*
* @total_out is an in/out parameter, must be set to the input length and will
* be also used to return the total number of compressed bytes
- *
- * @max_out tells us the max number of bytes that we're allowed to
- * stuff into pages
*/
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
u64 start, struct page **pages,
@@ -1218,20 +1212,6 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
return ret;
}
-/*
- * pages_in is an array of pages with compressed data.
- *
- * disk_start is the starting logical offset of this array in the file
- *
- * orig_bio contains the pages from the file that we want to decompress into
- *
- * srclen is the number of bytes in pages_in
- *
- * The basic idea is that we have a bio that was created by readpages.
- * The pages in the bio are for the uncompressed data, and they may not
- * be contiguous. They all correspond to the range of bytes covered by
- * the compressed extent.
- */
static int btrfs_decompress_bio(struct compressed_bio *cb)
{
struct list_head *workspace;
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 8001b700ea3a..c359f20920d0 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -31,6 +31,9 @@ struct compressed_bio {
/* number of bios pending for this compressed extent */
refcount_t pending_bios;
+ /* Number of compressed pages in the array */
+ unsigned int nr_pages;
+
/* the pages with the compressed data on them */
struct page **compressed_pages;
@@ -40,20 +43,17 @@ struct compressed_bio {
/* starting offset in the inode for our pages */
u64 start;
- /* number of bytes in the inode we're working on */
- unsigned long len;
-
- /* number of bytes on disk */
- unsigned long compressed_len;
+ /* Number of bytes in the inode we're working on */
+ unsigned int len;
- /* the compression algorithm for this bio */
- int compress_type;
+ /* Number of bytes on disk */
+ unsigned int compressed_len;
- /* number of compressed pages in the array */
- unsigned long nr_pages;
+ /* The compression algorithm for this bio */
+ u8 compress_type;
/* IO errors */
- int errors;
+ u8 errors;
int mirror_num;
/* for reads, this is the bio we are copying the data into */
@@ -91,10 +91,10 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
struct bio *bio);
blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
- unsigned long len, u64 disk_start,
- unsigned long compressed_len,
+ unsigned int len, u64 disk_start,
+ unsigned int compressed_len,
struct page **compressed_pages,
- unsigned long nr_pages,
+ unsigned int nr_pages,
unsigned int write_flags,
struct cgroup_subsys_state *blkcg_css);
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index a484fb72a01f..4bc3ca2cbd7d 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -596,7 +596,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
trans->transid, fs_info->generation);
if (!should_cow_block(trans, root, buf)) {
- trans->dirty = true;
*cow_ret = buf;
return 0;
}
@@ -1788,10 +1787,8 @@ again:
* then we don't want to set the path blocking,
* so we test it here
*/
- if (!should_cow_block(trans, root, b)) {
- trans->dirty = true;
+ if (!should_cow_block(trans, root, b))
goto cow_done;
- }
/*
* must have write locks on this node and the
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9fb76829a281..e5e53e592d4f 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -561,10 +561,16 @@ enum {
/*
* Indicate that balance has been set up from the ioctl and is in the
* main phase. The fs_info::balance_ctl is initialized.
- * Set and cleared while holding fs_info::balance_mutex.
*/
BTRFS_FS_BALANCE_RUNNING,
+ /*
+ * Indicate that relocation of a chunk has started, it's set per chunk
+ * and is toggled between chunks.
+ * Set, tested and cleared while holding fs_info::send_reloc_lock.
+ */
+ BTRFS_FS_RELOC_RUNNING,
+
/* Indicate that the cleaner thread is awake and doing something. */
BTRFS_FS_CLEANER_RUNNING,
@@ -817,8 +823,6 @@ struct btrfs_fs_info {
struct kobject *space_info_kobj;
struct kobject *qgroups_kobj;
- u64 total_pinned;
-
/* used to keep from writing metadata until there is a nice batch */
struct percpu_counter dirty_metadata_bytes;
struct percpu_counter delalloc_bytes;
@@ -871,6 +875,9 @@ struct btrfs_fs_info {
struct btrfs_balance_control *balance_ctl;
wait_queue_head_t balance_wait_q;
+ /* Cancellation requests for chunk relocation */
+ atomic_t reloc_cancel_req;
+
u32 data_chunk_allocations;
u32 metadata_ratio;
@@ -986,14 +993,15 @@ struct btrfs_fs_info {
struct crypto_shash *csum_shash;
+ spinlock_t send_reloc_lock;
/*
* Number of send operations in progress.
- * Updated while holding fs_info::balance_mutex.
+ * Updated while holding fs_info::send_reloc_lock.
*/
int send_in_progress;
- /* Type of exclusive operation running */
- unsigned long exclusive_operation;
+ /* Type of exclusive operation running, protected by super_lock */
+ enum btrfs_exclusive_operation exclusive_operation;
/*
* Zone size > 0 when in ZONED mode, otherwise it's used for a check
@@ -1375,38 +1383,39 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
*
* Note: don't forget to add new options to btrfs_show_options()
*/
-#define BTRFS_MOUNT_NODATASUM (1 << 0)
-#define BTRFS_MOUNT_NODATACOW (1 << 1)
-#define BTRFS_MOUNT_NOBARRIER (1 << 2)
-#define BTRFS_MOUNT_SSD (1 << 3)
-#define BTRFS_MOUNT_DEGRADED (1 << 4)
-#define BTRFS_MOUNT_COMPRESS (1 << 5)
-#define BTRFS_MOUNT_NOTREELOG (1 << 6)
-#define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7)
-#define BTRFS_MOUNT_SSD_SPREAD (1 << 8)
-#define BTRFS_MOUNT_NOSSD (1 << 9)
-#define BTRFS_MOUNT_DISCARD_SYNC (1 << 10)
-#define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11)
-#define BTRFS_MOUNT_SPACE_CACHE (1 << 12)
-#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13)
-#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
-#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
-#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
-/* bit 17 is free */
-#define BTRFS_MOUNT_USEBACKUPROOT (1 << 18)
-#define BTRFS_MOUNT_SKIP_BALANCE (1 << 19)
-#define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20)
-#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
-#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22)
-#define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23)
-#define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24)
-#define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25)
-#define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26)
-#define BTRFS_MOUNT_NOLOGREPLAY (1 << 27)
-#define BTRFS_MOUNT_REF_VERIFY (1 << 28)
-#define BTRFS_MOUNT_DISCARD_ASYNC (1 << 29)
-#define BTRFS_MOUNT_IGNOREBADROOTS (1 << 30)
-#define BTRFS_MOUNT_IGNOREDATACSUMS (1 << 31)
+enum {
+ BTRFS_MOUNT_NODATASUM = (1UL << 0),
+ BTRFS_MOUNT_NODATACOW = (1UL << 1),
+ BTRFS_MOUNT_NOBARRIER = (1UL << 2),
+ BTRFS_MOUNT_SSD = (1UL << 3),
+ BTRFS_MOUNT_DEGRADED = (1UL << 4),
+ BTRFS_MOUNT_COMPRESS = (1UL << 5),
+ BTRFS_MOUNT_NOTREELOG = (1UL << 6),
+ BTRFS_MOUNT_FLUSHONCOMMIT = (1UL << 7),
+ BTRFS_MOUNT_SSD_SPREAD = (1UL << 8),
+ BTRFS_MOUNT_NOSSD = (1UL << 9),
+ BTRFS_MOUNT_DISCARD_SYNC = (1UL << 10),
+ BTRFS_MOUNT_FORCE_COMPRESS = (1UL << 11),
+ BTRFS_MOUNT_SPACE_CACHE = (1UL << 12),
+ BTRFS_MOUNT_CLEAR_CACHE = (1UL << 13),
+ BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1UL << 14),
+ BTRFS_MOUNT_ENOSPC_DEBUG = (1UL << 15),
+ BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16),
+ BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17),
+ BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18),
+ BTRFS_MOUNT_CHECK_INTEGRITY = (1UL << 19),
+ BTRFS_MOUNT_CHECK_INTEGRITY_DATA = (1UL << 20),
+ BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 21),
+ BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 22),
+ BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 23),
+ BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 24),
+ BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 25),
+ BTRFS_MOUNT_NOLOGREPLAY = (1UL << 26),
+ BTRFS_MOUNT_REF_VERIFY = (1UL << 27),
+ BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 28),
+ BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 29),
+ BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 30),
+};
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
#define BTRFS_DEFAULT_MAX_INLINE (2048)
@@ -2216,11 +2225,13 @@ BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item,
static inline bool btrfs_root_readonly(const struct btrfs_root *root)
{
+ /* Byte-swap the constant at compile time, root_item::flags is LE */
return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
}
static inline bool btrfs_root_dead(const struct btrfs_root *root)
{
+ /* Byte-swap the constant at compile time, root_item::flags is LE */
return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
}
@@ -2746,9 +2757,9 @@ enum btrfs_reserve_flush_enum {
/*
* Flush space by above mentioned methods and by:
* - Running delayed iputs
- * - Commiting transaction
+ * - Committing transaction
*
- * Can be interruped by fatal signal.
+ * Can be interrupted by a fatal signal.
*/
BTRFS_RESERVE_FLUSH_DATA,
BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE,
@@ -2758,7 +2769,7 @@ enum btrfs_reserve_flush_enum {
* Pretty much the same as FLUSH_ALL, but can also steal space from
* global rsv.
*
- * Can be interruped by fatal signal.
+ * Can be interrupted by a fatal signal.
*/
BTRFS_RESERVE_FLUSH_ALL_STEAL,
};
@@ -2774,7 +2785,6 @@ enum btrfs_flush_state {
ALLOC_CHUNK_FORCE = 8,
RUN_DELAYED_IPUTS = 9,
COMMIT_TRANS = 10,
- FORCE_COMMIT_TRANS = 11,
};
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
@@ -3100,8 +3110,8 @@ u64 btrfs_file_extent_end(const struct btrfs_path *path);
/* inode.c */
blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
-int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
- struct page *page, u64 start, u64 end);
+unsigned int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
+ struct page *page, u64 start, u64 end);
struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
u64 start, u64 len);
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
@@ -3125,7 +3135,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode, u64 new_size,
- u32 min_type);
+ u32 min_type, u64 *extents_found);
int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
@@ -3146,9 +3156,7 @@ void btrfs_split_delalloc_extent(struct inode *inode,
struct extent_state *orig, u64 split);
int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
unsigned long bio_flags);
-bool btrfs_bio_fits_in_ordered_extent(struct page *page, struct bio *bio,
- unsigned int size);
-void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end);
+void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
@@ -3187,7 +3195,8 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
u64 start, u64 end, int *page_started, unsigned long *nr_written,
struct writeback_control *wbc);
int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end);
-void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
+void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
+ struct page *page, u64 start,
u64 end, int uptodate);
extern const struct dentry_operations btrfs_dentry_operations;
extern const struct iomap_ops btrfs_dio_iomap_ops;
@@ -3222,6 +3231,9 @@ void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_balance_args *bargs);
bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
enum btrfs_exclusive_operation type);
+bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type);
+void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
/* file.c */
@@ -3786,4 +3798,14 @@ static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
return fs_info->zoned != 0;
}
+/*
+ * We use page status Private2 to indicate there is an ordered extent with
+ * unfinished IO.
+ *
+ * Rename the Private2 accessors to Ordered, to improve readability.
+ */
+#define PageOrdered(page) PagePrivate2(page)
+#define SetPageOrdered(page) SetPagePrivate2(page)
+#define ClearPageOrdered(page) ClearPagePrivate2(page)
+
#endif
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 56642ca7af10..2059d1504149 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -89,7 +89,7 @@
* ->outstanding_extents += 1 (current value is 1)
*
* -> set_delalloc
- * ->outstanding_extents += 1 (currrent value is 2)
+ * ->outstanding_extents += 1 (current value is 2)
*
* -> btrfs_delalloc_release_extents()
* ->outstanding_extents -= 1 (current value is 1)
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 1a88f6214ebc..257c1e18abd4 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -681,7 +681,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
{
struct btrfs_delayed_item *curr, *next;
int free_space;
- int total_data_size = 0, total_size = 0;
+ int total_size = 0;
struct extent_buffer *leaf;
char *data_ptr;
struct btrfs_key *keys;
@@ -706,7 +706,6 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
*/
while (total_size + next->data_len + sizeof(struct btrfs_item) <=
free_space) {
- total_data_size += next->data_len;
total_size += next->data_len + sizeof(struct btrfs_item);
list_add_tail(&next->tree_list, &head);
nitems++;
@@ -974,14 +973,16 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
{
- struct btrfs_delayed_root *delayed_root;
- ASSERT(delayed_node->root);
- clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
- delayed_node->count--;
+ if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
+ struct btrfs_delayed_root *delayed_root;
- delayed_root = delayed_node->root->fs_info->delayed_root;
- finish_one_item(delayed_root);
+ ASSERT(delayed_node->root);
+ delayed_node->count--;
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
+ finish_one_item(delayed_root);
+ }
}
static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
@@ -1009,12 +1010,10 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
nofs_flag = memalloc_nofs_save();
ret = btrfs_lookup_inode(trans, root, path, &key, mod);
memalloc_nofs_restore(nofs_flag);
- if (ret > 0) {
- btrfs_release_path(path);
- return -ENOENT;
- } else if (ret < 0) {
- return ret;
- }
+ if (ret > 0)
+ ret = -ENOENT;
+ if (ret < 0)
+ goto out;
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -1024,7 +1023,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
- goto no_iref;
+ goto out;
path->slots[0]++;
if (path->slots[0] >= btrfs_header_nritems(leaf))
@@ -1046,12 +1045,19 @@ again:
btrfs_del_item(trans, root, path);
out:
btrfs_release_delayed_iref(node);
-no_iref:
btrfs_release_path(path);
err_out:
btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
btrfs_release_delayed_inode(node);
+ /*
+ * If we fail to update the delayed inode we need to abort the
+ * transaction, because we could leave the inode with the improper
+ * counts behind.
+ */
+ if (ret && ret != -ENOENT)
+ btrfs_abort_transaction(trans, ret);
+
return ret;
search:
@@ -1898,8 +1904,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
btrfs_release_delayed_item(prev_item);
}
- if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
- btrfs_release_delayed_iref(delayed_node);
+ btrfs_release_delayed_iref(delayed_node);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index c92d9d4f5f46..06bc842ecdb3 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -641,7 +641,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs =
&trans->transaction->delayed_refs;
struct btrfs_fs_info *fs_info = trans->fs_info;
- u64 flags = btrfs_ref_head_to_space_flags(existing);
int old_ref_mod;
BUG_ON(existing->is_data != update->is_data);
@@ -711,26 +710,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
}
}
- /*
- * This handles the following conditions:
- *
- * 1. We had a ref mod of 0 or more and went negative, indicating that
- * we may be freeing space, so add our space to the
- * total_bytes_pinned counter.
- * 2. We were negative and went to 0 or positive, so no longer can say
- * that the space would be pinned, decrement our counter from the
- * total_bytes_pinned counter.
- * 3. We are now at 0 and have ->must_insert_reserved set, which means
- * this was a new allocation and then we dropped it, and thus must
- * add our space to the total_bytes_pinned counter.
- */
- if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
- btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
- else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
- btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
- else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
- btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
-
spin_unlock(&existing->lock);
}
@@ -835,17 +814,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing;
} else {
- u64 flags = btrfs_ref_head_to_space_flags(head_ref);
-
if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes;
trans->delayed_ref_updates +=
btrfs_csum_bytes_to_leaves(trans->fs_info,
head_ref->num_bytes);
}
- if (head_ref->ref_mod < 0)
- btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
- head_ref->num_bytes);
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index d05f73530af7..d029be40ea6f 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -37,7 +37,7 @@
* - Write duplication
*
* All new writes will be written to both target and source devices, so even
- * if replace gets canceled, sources device still contans up-to-date data.
+ * if replace gets canceled, sources device still contains up-to-date data.
*
* Location: handle_ops_on_dev_replace() from __btrfs_map_block()
* Start: btrfs_dev_replace_start()
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index 306ff20af70f..e1b7bd927d69 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -624,7 +624,7 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
* @fs_info: fs_info of interest
*
* The unused_bgs list needs to be punted to the discard lists because the
- * order of operations is changed. In the normal sychronous discard path, the
+ * order of operations is changed. In the normal synchronous discard path, the
* block groups are trimmed via a single large trim in transaction commit. This
* is ultimately what we are trying to avoid with asynchronous discard. Thus,
* it must be done before going down the unused_bgs path.
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index c9a3036c23bf..b117dd3b8172 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -241,7 +241,6 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
{
struct extent_state *cached_state = NULL;
int ret;
- bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
return 0;
@@ -249,9 +248,6 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
if (atomic)
return -EAGAIN;
- if (need_lock)
- btrfs_tree_read_lock(eb);
-
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state);
if (extent_buffer_uptodate(eb) &&
@@ -264,22 +260,10 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
eb->start,
parent_transid, btrfs_header_generation(eb));
ret = 1;
-
- /*
- * Things reading via commit roots that don't have normal protection,
- * like send, can have a really old block in cache that may point at a
- * block that has been freed and re-allocated. So don't clear uptodate
- * if we find an eb that is under IO (dirty/writeback) because we could
- * end up reading in the stale data and then writing it back out and
- * making everybody very sad.
- */
- if (!extent_buffer_under_io(eb))
- clear_extent_buffer_uptodate(eb);
+ clear_extent_buffer_uptodate(eb);
out:
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state);
- if (need_lock)
- btrfs_tree_read_unlock(eb);
return ret;
}
@@ -584,6 +568,7 @@ static int validate_extent_buffer(struct extent_buffer *eb)
const u32 csum_size = fs_info->csum_size;
u8 found_level;
u8 result[BTRFS_CSUM_SIZE];
+ const u8 *header_csum;
int ret = 0;
found_start = btrfs_header_bytenr(eb);
@@ -608,15 +593,14 @@ static int validate_extent_buffer(struct extent_buffer *eb)
}
csum_tree_block(eb, result);
+ header_csum = page_address(eb->pages[0]) +
+ get_eb_offset_in_page(eb, offsetof(struct btrfs_header, csum));
- if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
- u8 val[BTRFS_CSUM_SIZE] = { 0 };
-
- read_extent_buffer(eb, &val, 0, csum_size);
+ if (memcmp(result, header_csum, csum_size) != 0) {
btrfs_warn_rl(fs_info,
- "%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d",
- fs_info->sb->s_id, eb->start,
- CSUM_FMT_VALUE(csum_size, val),
+ "checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d",
+ eb->start,
+ CSUM_FMT_VALUE(csum_size, header_csum),
CSUM_FMT_VALUE(csum_size, result),
btrfs_header_level(eb));
ret = -EUCLEAN;
@@ -917,23 +901,22 @@ static blk_status_t btree_submit_bio_start(struct inode *inode, struct bio *bio,
return btree_csum_one_bio(bio);
}
-static int check_async_write(struct btrfs_fs_info *fs_info,
+static bool should_async_write(struct btrfs_fs_info *fs_info,
struct btrfs_inode *bi)
{
if (btrfs_is_zoned(fs_info))
- return 0;
+ return false;
if (atomic_read(&bi->sync_writers))
- return 0;
+ return false;
if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
- return 0;
- return 1;
+ return false;
+ return true;
}
blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- int async = check_async_write(fs_info, BTRFS_I(inode));
blk_status_t ret;
if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
@@ -946,7 +929,7 @@ blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
if (ret)
goto out_w_error;
ret = btrfs_map_bio(fs_info, bio, mirror_num);
- } else if (!async) {
+ } else if (!should_async_write(fs_info, BTRFS_I(inode))) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
@@ -2252,6 +2235,7 @@ static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
atomic_set(&fs_info->balance_cancel_req, 0);
fs_info->balance_ctl = NULL;
init_waitqueue_head(&fs_info->balance_wait_q);
+ atomic_set(&fs_info->reloc_cancel_req, 0);
}
static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
@@ -2648,6 +2632,24 @@ static int validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
+ if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+ BTRFS_FSID_SIZE)) {
+ btrfs_err(fs_info,
+ "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
+ fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+ ret = -EINVAL;
+ }
+
+ if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
+ memcmp(fs_info->fs_devices->metadata_uuid,
+ fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+ btrfs_err(fs_info,
+"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
+ fs_info->super_copy->metadata_uuid,
+ fs_info->fs_devices->metadata_uuid);
+ ret = -EINVAL;
+ }
+
if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
@@ -2981,6 +2983,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
spin_lock_init(&fs_info->swapfile_pins_lock);
fs_info->swapfile_pins = RB_ROOT;
+ spin_lock_init(&fs_info->send_reloc_lock);
fs_info->send_in_progress = 0;
fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
@@ -3279,14 +3282,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
disk_super = fs_info->super_copy;
- ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
- BTRFS_FSID_SIZE));
-
- if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
- ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
- fs_info->super_copy->metadata_uuid,
- BTRFS_FSID_SIZE));
- }
features = btrfs_super_flags(disk_super);
if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
@@ -3461,7 +3456,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
* At this point we know all the devices that make this filesystem,
* including the seed devices but we don't know yet if the replace
* target is required. So free devices that are not part of this
- * filesystem but skip the replace traget device which is checked
+ * filesystem but skip the replace target device which is checked
* below in btrfs_init_dev_replace().
*/
btrfs_free_extra_devids(fs_devices);
@@ -3588,8 +3583,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
ret = btrfsic_mount(fs_info, fs_devices,
btrfs_test_opt(fs_info,
- CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
- 1 : 0,
+ CHECK_INTEGRITY_DATA) ? 1 : 0,
fs_info->check_integrity_print_mask);
if (ret)
btrfs_warn(fs_info,
@@ -4686,9 +4680,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
cache->space_info->bytes_reserved -= head->num_bytes;
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- percpu_counter_add_batch(
- &cache->space_info->total_bytes_pinned,
- head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
btrfs_put_block_group(cache);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f1d15b68994a..d296483d148f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1425,7 +1425,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
* bytenr of the parent block. Since new extents are always
* created with indirect references, this will only be the case
* when relocating a shared extent. In that case, root_objectid
- * will be BTRFS_TREE_RELOC_OBJECTID. Otheriwse, parent must
+ * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must
* be 0
*
* @root_objectid: The id of the root where this modification has originated,
@@ -1804,19 +1804,6 @@ void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
}
- /*
- * We were dropping refs, or had a new ref and dropped it, and thus must
- * adjust down our total_bytes_pinned, the space may or may not have
- * been pinned and so is accounted for properly in the pinned space by
- * now.
- */
- if (head->total_ref_mod < 0 ||
- (head->total_ref_mod == 0 && head->must_insert_reserved)) {
- u64 flags = btrfs_ref_head_to_space_flags(head);
-
- btrfs_mod_total_bytes_pinned(fs_info, flags, -head->num_bytes);
- }
-
btrfs_delayed_refs_rsv_release(fs_info, nr_items);
}
@@ -1868,7 +1855,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head);
btrfs_put_delayed_ref_head(head);
- return 0;
+ return ret;
}
static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
@@ -2551,7 +2538,6 @@ static int pin_down_extent(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- __btrfs_mod_total_bytes_pinned(cache->space_info, num_bytes);
set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
return 0;
@@ -2762,7 +2748,6 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
cache->pinned -= len;
btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
space_info->max_extent_size = 0;
- __btrfs_mod_total_bytes_pinned(space_info, -len);
if (cache->ro) {
space_info->bytes_readonly += len;
readonly = true;
@@ -4784,7 +4769,6 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
}
- trans->dirty = true;
/* this returns a buffer locked for blocking */
return buf;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index dee2dafbc872..9e81d25dea70 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -136,7 +136,7 @@ struct tree_entry {
};
struct extent_page_data {
- struct bio *bio;
+ struct btrfs_bio_ctrl bio_ctrl;
/* tells writepage not to lock the state bits for this range
* it still does the unlocking
*/
@@ -185,10 +185,12 @@ int __must_check submit_one_bio(struct bio *bio, int mirror_num,
/* Cleanup unsubmitted bios */
static void end_write_bio(struct extent_page_data *epd, int ret)
{
- if (epd->bio) {
- epd->bio->bi_status = errno_to_blk_status(ret);
- bio_endio(epd->bio);
- epd->bio = NULL;
+ struct bio *bio = epd->bio_ctrl.bio;
+
+ if (bio) {
+ bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(bio);
+ epd->bio_ctrl.bio = NULL;
}
}
@@ -201,9 +203,10 @@ static void end_write_bio(struct extent_page_data *epd, int ret)
static int __must_check flush_write_bio(struct extent_page_data *epd)
{
int ret = 0;
+ struct bio *bio = epd->bio_ctrl.bio;
- if (epd->bio) {
- ret = submit_one_bio(epd->bio, 0, 0);
+ if (bio) {
+ ret = submit_one_bio(bio, 0, 0);
/*
* Clean up of epd->bio is handled by its endio function.
* And endio is either triggered by successful bio execution
@@ -211,7 +214,7 @@ static int __must_check flush_write_bio(struct extent_page_data *epd)
* So at this point, no matter what happened, we don't need
* to clean up epd->bio.
*/
- epd->bio = NULL;
+ epd->bio_ctrl.bio = NULL;
}
return ret;
}
@@ -1805,10 +1808,130 @@ out:
return found;
}
+/*
+ * Process one page for __process_pages_contig().
+ *
+ * Return >0 if we hit @page == @locked_page.
+ * Return 0 if we updated the page status.
+ * Return -EGAIN if the we need to try again.
+ * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
+ */
+static int process_one_page(struct btrfs_fs_info *fs_info,
+ struct address_space *mapping,
+ struct page *page, struct page *locked_page,
+ unsigned long page_ops, u64 start, u64 end)
+{
+ u32 len;
+
+ ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
+ len = end + 1 - start;
+
+ if (page_ops & PAGE_SET_ORDERED)
+ btrfs_page_clamp_set_ordered(fs_info, page, start, len);
+ if (page_ops & PAGE_SET_ERROR)
+ btrfs_page_clamp_set_error(fs_info, page, start, len);
+ if (page_ops & PAGE_START_WRITEBACK) {
+ btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
+ btrfs_page_clamp_set_writeback(fs_info, page, start, len);
+ }
+ if (page_ops & PAGE_END_WRITEBACK)
+ btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
+
+ if (page == locked_page)
+ return 1;
+
+ if (page_ops & PAGE_LOCK) {
+ int ret;
+
+ ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
+ if (ret)
+ return ret;
+ if (!PageDirty(page) || page->mapping != mapping) {
+ btrfs_page_end_writer_lock(fs_info, page, start, len);
+ return -EAGAIN;
+ }
+ }
+ if (page_ops & PAGE_UNLOCK)
+ btrfs_page_end_writer_lock(fs_info, page, start, len);
+ return 0;
+}
+
static int __process_pages_contig(struct address_space *mapping,
struct page *locked_page,
- pgoff_t start_index, pgoff_t end_index,
- unsigned long page_ops, pgoff_t *index_ret);
+ u64 start, u64 end, unsigned long page_ops,
+ u64 *processed_end)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
+ pgoff_t start_index = start >> PAGE_SHIFT;
+ pgoff_t end_index = end >> PAGE_SHIFT;
+ pgoff_t index = start_index;
+ unsigned long nr_pages = end_index - start_index + 1;
+ unsigned long pages_processed = 0;
+ struct page *pages[16];
+ int err = 0;
+ int i;
+
+ if (page_ops & PAGE_LOCK) {
+ ASSERT(page_ops == PAGE_LOCK);
+ ASSERT(processed_end && *processed_end == start);
+ }
+
+ if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
+ mapping_set_error(mapping, -EIO);
+
+ while (nr_pages > 0) {
+ int found_pages;
+
+ found_pages = find_get_pages_contig(mapping, index,
+ min_t(unsigned long,
+ nr_pages, ARRAY_SIZE(pages)), pages);
+ if (found_pages == 0) {
+ /*
+ * Only if we're going to lock these pages, we can find
+ * nothing at @index.
+ */
+ ASSERT(page_ops & PAGE_LOCK);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ for (i = 0; i < found_pages; i++) {
+ int process_ret;
+
+ process_ret = process_one_page(fs_info, mapping,
+ pages[i], locked_page, page_ops,
+ start, end);
+ if (process_ret < 0) {
+ for (; i < found_pages; i++)
+ put_page(pages[i]);
+ err = -EAGAIN;
+ goto out;
+ }
+ put_page(pages[i]);
+ pages_processed++;
+ }
+ nr_pages -= found_pages;
+ index += found_pages;
+ cond_resched();
+ }
+out:
+ if (err && processed_end) {
+ /*
+ * Update @processed_end. I know this is awful since it has
+ * two different return value patterns (inclusive vs exclusive).
+ *
+ * But the exclusive pattern is necessary if @start is 0, or we
+ * underflow and check against processed_end won't work as
+ * expected.
+ */
+ if (pages_processed)
+ *processed_end = min(end,
+ ((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
+ else
+ *processed_end = start;
+ }
+ return err;
+}
static noinline void __unlock_for_delalloc(struct inode *inode,
struct page *locked_page,
@@ -1821,7 +1944,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
if (index == locked_page->index && end_index == index)
return;
- __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
+ __process_pages_contig(inode->i_mapping, locked_page, start, end,
PAGE_UNLOCK, NULL);
}
@@ -1831,19 +1954,19 @@ static noinline int lock_delalloc_pages(struct inode *inode,
u64 delalloc_end)
{
unsigned long index = delalloc_start >> PAGE_SHIFT;
- unsigned long index_ret = index;
unsigned long end_index = delalloc_end >> PAGE_SHIFT;
+ u64 processed_end = delalloc_start;
int ret;
ASSERT(locked_page);
if (index == locked_page->index && index == end_index)
return 0;
- ret = __process_pages_contig(inode->i_mapping, locked_page, index,
- end_index, PAGE_LOCK, &index_ret);
- if (ret == -EAGAIN)
+ ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
+ delalloc_end, PAGE_LOCK, &processed_end);
+ if (ret == -EAGAIN && processed_end > delalloc_start)
__unlock_for_delalloc(inode, locked_page, delalloc_start,
- (u64)index_ret << PAGE_SHIFT);
+ processed_end);
return ret;
}
@@ -1936,84 +2059,6 @@ out_failed:
return found;
}
-static int __process_pages_contig(struct address_space *mapping,
- struct page *locked_page,
- pgoff_t start_index, pgoff_t end_index,
- unsigned long page_ops, pgoff_t *index_ret)
-{
- unsigned long nr_pages = end_index - start_index + 1;
- unsigned long pages_processed = 0;
- pgoff_t index = start_index;
- struct page *pages[16];
- unsigned ret;
- int err = 0;
- int i;
-
- if (page_ops & PAGE_LOCK) {
- ASSERT(page_ops == PAGE_LOCK);
- ASSERT(index_ret && *index_ret == start_index);
- }
-
- if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
- mapping_set_error(mapping, -EIO);
-
- while (nr_pages > 0) {
- ret = find_get_pages_contig(mapping, index,
- min_t(unsigned long,
- nr_pages, ARRAY_SIZE(pages)), pages);
- if (ret == 0) {
- /*
- * Only if we're going to lock these pages,
- * can we find nothing at @index.
- */
- ASSERT(page_ops & PAGE_LOCK);
- err = -EAGAIN;
- goto out;
- }
-
- for (i = 0; i < ret; i++) {
- if (page_ops & PAGE_SET_PRIVATE2)
- SetPagePrivate2(pages[i]);
-
- if (locked_page && pages[i] == locked_page) {
- put_page(pages[i]);
- pages_processed++;
- continue;
- }
- if (page_ops & PAGE_START_WRITEBACK) {
- clear_page_dirty_for_io(pages[i]);
- set_page_writeback(pages[i]);
- }
- if (page_ops & PAGE_SET_ERROR)
- SetPageError(pages[i]);
- if (page_ops & PAGE_END_WRITEBACK)
- end_page_writeback(pages[i]);
- if (page_ops & PAGE_UNLOCK)
- unlock_page(pages[i]);
- if (page_ops & PAGE_LOCK) {
- lock_page(pages[i]);
- if (!PageDirty(pages[i]) ||
- pages[i]->mapping != mapping) {
- unlock_page(pages[i]);
- for (; i < ret; i++)
- put_page(pages[i]);
- err = -EAGAIN;
- goto out;
- }
- }
- put_page(pages[i]);
- pages_processed++;
- }
- nr_pages -= ret;
- index += ret;
- cond_resched();
- }
-out:
- if (err && index_ret)
- *index_ret = start_index + pages_processed - 1;
- return err;
-}
-
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
u32 clear_bits, unsigned long page_ops)
@@ -2021,8 +2066,7 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- page_ops, NULL);
+ start, end, page_ops, NULL);
}
/*
@@ -2381,13 +2425,6 @@ int clean_io_failure(struct btrfs_fs_info *fs_info,
BUG_ON(!failrec->this_mirror);
- if (failrec->in_validation) {
- /* there was no real error, just free the record */
- btrfs_debug(fs_info,
- "clean_io_failure: freeing dummy error at %llu",
- failrec->start);
- goto out;
- }
if (sb_rdonly(fs_info->sb))
goto out;
@@ -2449,7 +2486,7 @@ void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
}
static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
- u64 start, u64 end)
+ u64 start)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct io_failure_record *failrec;
@@ -2457,15 +2494,15 @@ static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ const u32 sectorsize = fs_info->sectorsize;
int ret;
u64 logical;
failrec = get_state_failrec(failure_tree, start);
if (!IS_ERR(failrec)) {
btrfs_debug(fs_info,
- "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
- failrec->logical, failrec->start, failrec->len,
- failrec->in_validation);
+ "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
+ failrec->logical, failrec->start, failrec->len);
/*
* when data can be on disk more than twice, add to failrec here
* (e.g. with a list for failed_mirror) to make
@@ -2480,10 +2517,9 @@ static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode
return ERR_PTR(-ENOMEM);
failrec->start = start;
- failrec->len = end - start + 1;
+ failrec->len = sectorsize;
failrec->this_mirror = 0;
failrec->bio_flags = 0;
- failrec->in_validation = 0;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, failrec->len);
@@ -2519,12 +2555,13 @@ static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode
free_extent_map(em);
/* Set the bits in the private failure tree */
- ret = set_extent_bits(failure_tree, start, end,
+ ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
EXTENT_LOCKED | EXTENT_DIRTY);
if (ret >= 0) {
ret = set_state_failrec(failure_tree, start, failrec);
/* Set the bits in the inode's tree */
- ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
+ ret = set_extent_bits(tree, start, start + sectorsize - 1,
+ EXTENT_DAMAGED);
} else if (ret < 0) {
kfree(failrec);
return ERR_PTR(ret);
@@ -2533,7 +2570,7 @@ static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode
return failrec;
}
-static bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
+static bool btrfs_check_repairable(struct inode *inode,
struct io_failure_record *failrec,
int failed_mirror)
{
@@ -2553,39 +2590,22 @@ static bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
return false;
}
+ /* The failure record should only contain one sector */
+ ASSERT(failrec->len == fs_info->sectorsize);
+
/*
- * there are two premises:
- * a) deliver good data to the caller
- * b) correct the bad sectors on disk
+ * There are two premises:
+ * a) deliver good data to the caller
+ * b) correct the bad sectors on disk
+ *
+ * Since we're only doing repair for one sector, we only need to get
+ * a good copy of the failed sector and if we succeed, we have setup
+ * everything for repair_io_failure to do the rest for us.
*/
- if (needs_validation) {
- /*
- * to fulfill b), we need to know the exact failing sectors, as
- * we don't want to rewrite any more than the failed ones. thus,
- * we need separate read requests for the failed bio
- *
- * if the following BUG_ON triggers, our validation request got
- * merged. we need separate requests for our algorithm to work.
- */
- BUG_ON(failrec->in_validation);
- failrec->in_validation = 1;
- failrec->this_mirror = failed_mirror;
- } else {
- /*
- * we're ready to fulfill a) and b) alongside. get a good copy
- * of the failed sector and if we succeed, we have setup
- * everything for repair_io_failure to do the rest for us.
- */
- if (failrec->in_validation) {
- BUG_ON(failrec->this_mirror != failed_mirror);
- failrec->in_validation = 0;
- failrec->this_mirror = 0;
- }
- failrec->failed_mirror = failed_mirror;
+ failrec->failed_mirror = failed_mirror;
+ failrec->this_mirror++;
+ if (failrec->this_mirror == failed_mirror)
failrec->this_mirror++;
- if (failrec->this_mirror == failed_mirror)
- failrec->this_mirror++;
- }
if (failrec->this_mirror > num_copies) {
btrfs_debug(fs_info,
@@ -2597,53 +2617,11 @@ static bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
return true;
}
-static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio)
-{
- u64 len = 0;
- const u32 blocksize = inode->i_sb->s_blocksize;
-
- /*
- * If bi_status is BLK_STS_OK, then this was a checksum error, not an
- * I/O error. In this case, we already know exactly which sector was
- * bad, so we don't need to validate.
- */
- if (bio->bi_status == BLK_STS_OK)
- return false;
-
- /*
- * We need to validate each sector individually if the failed I/O was
- * for multiple sectors.
- *
- * There are a few possible bios that can end up here:
- * 1. A buffered read bio, which is not cloned.
- * 2. A direct I/O read bio, which is cloned.
- * 3. A (buffered or direct) repair bio, which is not cloned.
- *
- * For cloned bios (case 2), we can get the size from
- * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get
- * it from the bvecs.
- */
- if (bio_flagged(bio, BIO_CLONED)) {
- if (btrfs_io_bio(bio)->iter.bi_size > blocksize)
- return true;
- } else {
- struct bio_vec *bvec;
- int i;
-
- bio_for_each_bvec_all(bvec, bio, i) {
- len += bvec->bv_len;
- if (len > blocksize)
- return true;
- }
- }
- return false;
-}
-
-blk_status_t btrfs_submit_read_repair(struct inode *inode,
- struct bio *failed_bio, u32 bio_offset,
- struct page *page, unsigned int pgoff,
- u64 start, u64 end, int failed_mirror,
- submit_bio_hook_t *submit_bio_hook)
+int btrfs_repair_one_sector(struct inode *inode,
+ struct bio *failed_bio, u32 bio_offset,
+ struct page *page, unsigned int pgoff,
+ u64 start, int failed_mirror,
+ submit_bio_hook_t *submit_bio_hook)
{
struct io_failure_record *failrec;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2651,7 +2629,6 @@ blk_status_t btrfs_submit_read_repair(struct inode *inode,
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
const int icsum = bio_offset >> fs_info->sectorsize_bits;
- bool need_validation;
struct bio *repair_bio;
struct btrfs_io_bio *repair_io_bio;
blk_status_t status;
@@ -2661,23 +2638,19 @@ blk_status_t btrfs_submit_read_repair(struct inode *inode,
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
- failrec = btrfs_get_io_failure_record(inode, start, end);
+ failrec = btrfs_get_io_failure_record(inode, start);
if (IS_ERR(failrec))
- return errno_to_blk_status(PTR_ERR(failrec));
+ return PTR_ERR(failrec);
- need_validation = btrfs_io_needs_validation(inode, failed_bio);
- if (!btrfs_check_repairable(inode, need_validation, failrec,
- failed_mirror)) {
+ if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
free_io_failure(failure_tree, tree, failrec);
- return BLK_STS_IOERR;
+ return -EIO;
}
repair_bio = btrfs_io_bio_alloc(1);
repair_io_bio = btrfs_io_bio(repair_bio);
repair_bio->bi_opf = REQ_OP_READ;
- if (need_validation)
- repair_bio->bi_opf |= REQ_FAILFAST_DEV;
repair_bio->bi_end_io = failed_bio->bi_end_io;
repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
repair_bio->bi_private = failed_bio->bi_private;
@@ -2695,8 +2668,8 @@ blk_status_t btrfs_submit_read_repair(struct inode *inode,
repair_io_bio->iter = repair_bio->bi_iter;
btrfs_debug(btrfs_sb(inode->i_sb),
-"repair read error: submitting new read to mirror %d, in_validation=%d",
- failrec->this_mirror, failrec->in_validation);
+ "repair read error: submitting new read to mirror %d",
+ failrec->this_mirror);
status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
failrec->bio_flags);
@@ -2704,17 +2677,114 @@ blk_status_t btrfs_submit_read_repair(struct inode *inode,
free_io_failure(failure_tree, tree, failrec);
bio_put(repair_bio);
}
- return status;
+ return blk_status_to_errno(status);
+}
+
+static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
+
+ ASSERT(page_offset(page) <= start &&
+ start + len <= page_offset(page) + PAGE_SIZE);
+
+ if (uptodate) {
+ btrfs_page_set_uptodate(fs_info, page, start, len);
+ } else {
+ btrfs_page_clear_uptodate(fs_info, page, start, len);
+ btrfs_page_set_error(fs_info, page, start, len);
+ }
+
+ if (fs_info->sectorsize == PAGE_SIZE)
+ unlock_page(page);
+ else
+ btrfs_subpage_end_reader(fs_info, page, start, len);
+}
+
+static blk_status_t submit_read_repair(struct inode *inode,
+ struct bio *failed_bio, u32 bio_offset,
+ struct page *page, unsigned int pgoff,
+ u64 start, u64 end, int failed_mirror,
+ unsigned int error_bitmap,
+ submit_bio_hook_t *submit_bio_hook)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ const u32 sectorsize = fs_info->sectorsize;
+ const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
+ int error = 0;
+ int i;
+
+ BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
+
+ /* We're here because we had some read errors or csum mismatch */
+ ASSERT(error_bitmap);
+
+ /*
+ * We only get called on buffered IO, thus page must be mapped and bio
+ * must not be cloned.
+ */
+ ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
+
+ /* Iterate through all the sectors in the range */
+ for (i = 0; i < nr_bits; i++) {
+ const unsigned int offset = i * sectorsize;
+ struct extent_state *cached = NULL;
+ bool uptodate = false;
+ int ret;
+
+ if (!(error_bitmap & (1U << i))) {
+ /*
+ * This sector has no error, just end the page read
+ * and unlock the range.
+ */
+ uptodate = true;
+ goto next;
+ }
+
+ ret = btrfs_repair_one_sector(inode, failed_bio,
+ bio_offset + offset,
+ page, pgoff + offset, start + offset,
+ failed_mirror, submit_bio_hook);
+ if (!ret) {
+ /*
+ * We have submitted the read repair, the page release
+ * will be handled by the endio function of the
+ * submitted repair bio.
+ * Thus we don't need to do any thing here.
+ */
+ continue;
+ }
+ /*
+ * Repair failed, just record the error but still continue.
+ * Or the remaining sectors will not be properly unlocked.
+ */
+ if (!error)
+ error = ret;
+next:
+ end_page_read(page, uptodate, start + offset, sectorsize);
+ if (uptodate)
+ set_extent_uptodate(&BTRFS_I(inode)->io_tree,
+ start + offset,
+ start + offset + sectorsize - 1,
+ &cached, GFP_ATOMIC);
+ unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
+ start + offset,
+ start + offset + sectorsize - 1,
+ &cached);
+ }
+ return errno_to_blk_status(error);
}
/* lots and lots of room for performance fixes in the end_bio funcs */
void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
{
+ struct btrfs_inode *inode;
int uptodate = (err == 0);
int ret = 0;
- btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
+ ASSERT(page && page->mapping);
+ inode = BTRFS_I(page->mapping->host);
+ btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
if (!uptodate) {
ClearPageUptodate(page);
@@ -2747,25 +2817,20 @@ static void end_bio_extent_writepage(struct bio *bio)
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ const u32 sectorsize = fs_info->sectorsize;
- /* We always issue full-page reads, but if some block
- * in a page fails to read, blk_update_request() will
- * advance bv_offset and adjust bv_len to compensate.
- * Print a warning for nonzero offsets, and an error
- * if they don't add up to a full page. */
- if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
- if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
- btrfs_err(fs_info,
- "partial page write in btrfs with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
- else
- btrfs_info(fs_info,
- "incomplete page write in btrfs with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
- }
+ /* Our read/write should always be sector aligned. */
+ if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
+ btrfs_err(fs_info,
+ "partial page write in btrfs with offset %u and length %u",
+ bvec->bv_offset, bvec->bv_len);
+ else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
+ btrfs_info(fs_info,
+ "incomplete page write with offset %u and length %u",
+ bvec->bv_offset, bvec->bv_len);
- start = page_offset(page);
- end = start + bvec->bv_offset + bvec->bv_len - 1;
+ start = page_offset(page) + bvec->bv_offset;
+ end = start + bvec->bv_len - 1;
if (first_bvec) {
btrfs_record_physical_zoned(inode, start, bio);
@@ -2773,7 +2838,8 @@ static void end_bio_extent_writepage(struct bio *bio)
}
end_extent_writepage(page, error, start, end);
- end_page_writeback(page);
+
+ btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
}
bio_put(bio);
@@ -2862,30 +2928,6 @@ static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
}
-static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
-
- ASSERT(page_offset(page) <= start &&
- start + len <= page_offset(page) + PAGE_SIZE);
-
- if (uptodate) {
- btrfs_page_set_uptodate(fs_info, page, start, len);
- } else {
- btrfs_page_clear_uptodate(fs_info, page, start, len);
- btrfs_page_set_error(fs_info, page, start, len);
- }
-
- if (fs_info->sectorsize == PAGE_SIZE)
- unlock_page(page);
- else if (is_data_inode(page->mapping->host))
- /*
- * For subpage data, unlock the page if we're the last reader.
- * For subpage metadata, page lock is not utilized for read.
- */
- btrfs_subpage_end_reader(fs_info, page, start, len);
-}
-
/*
* Find extent buffer for a givne bytenr.
*
@@ -2929,7 +2971,6 @@ static struct extent_buffer *find_extent_buffer_readpage(
static void end_bio_extent_readpage(struct bio *bio)
{
struct bio_vec *bvec;
- int uptodate = !bio->bi_status;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct extent_io_tree *tree, *failure_tree;
struct processed_extent processed = { 0 };
@@ -2944,10 +2985,12 @@ static void end_bio_extent_readpage(struct bio *bio)
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
+ bool uptodate = !bio->bi_status;
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const u32 sectorsize = fs_info->sectorsize;
+ unsigned int error_bitmap = (unsigned int)-1;
u64 start;
u64 end;
u32 len;
@@ -2982,14 +3025,16 @@ static void end_bio_extent_readpage(struct bio *bio)
mirror = io_bio->mirror_num;
if (likely(uptodate)) {
- if (is_data_inode(inode))
- ret = btrfs_verify_data_csum(io_bio,
+ if (is_data_inode(inode)) {
+ error_bitmap = btrfs_verify_data_csum(io_bio,
bio_offset, page, start, end);
- else
+ ret = error_bitmap;
+ } else {
ret = btrfs_validate_metadata_buffer(io_bio,
page, start, end, mirror);
+ }
if (ret)
- uptodate = 0;
+ uptodate = false;
else
clean_io_failure(BTRFS_I(inode)->root->fs_info,
failure_tree, tree, start,
@@ -3001,27 +3046,18 @@ static void end_bio_extent_readpage(struct bio *bio)
goto readpage_ok;
if (is_data_inode(inode)) {
-
/*
- * The generic bio_readpage_error handles errors the
- * following way: If possible, new read requests are
- * created and submitted and will end up in
- * end_bio_extent_readpage as well (if we're lucky,
- * not in the !uptodate case). In that case it returns
- * 0 and we just go on with the next page in our bio.
- * If it can't handle the error it will return -EIO and
- * we remain responsible for that page.
+ * btrfs_submit_read_repair() will handle all the good
+ * and bad sectors, we just continue to the next bvec.
*/
- if (!btrfs_submit_read_repair(inode, bio, bio_offset,
- page,
- start - page_offset(page),
- start, end, mirror,
- btrfs_submit_data_bio)) {
- uptodate = !bio->bi_status;
- ASSERT(bio_offset + len > bio_offset);
- bio_offset += len;
- continue;
- }
+ submit_read_repair(inode, bio, bio_offset, page,
+ start - page_offset(page), start,
+ end, mirror, error_bitmap,
+ btrfs_submit_data_bio);
+
+ ASSERT(bio_offset + len > bio_offset);
+ bio_offset += len;
+ continue;
} else {
struct extent_buffer *eb;
@@ -3151,42 +3187,99 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
*
* Return true if successfully page added. Otherwise, return false.
*/
-static bool btrfs_bio_add_page(struct bio *bio, struct page *page,
+static bool btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
+ struct page *page,
u64 disk_bytenr, unsigned int size,
unsigned int pg_offset,
- unsigned long prev_bio_flags,
unsigned long bio_flags)
{
+ struct bio *bio = bio_ctrl->bio;
+ u32 bio_size = bio->bi_iter.bi_size;
const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
bool contig;
int ret;
- if (prev_bio_flags != bio_flags)
+ ASSERT(bio);
+ /* The limit should be calculated when bio_ctrl->bio is allocated */
+ ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
+ if (bio_ctrl->bio_flags != bio_flags)
return false;
- if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
+ if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
contig = bio->bi_iter.bi_sector == sector;
else
contig = bio_end_sector(bio) == sector;
if (!contig)
return false;
- if (btrfs_bio_fits_in_stripe(page, size, bio, bio_flags))
+ if (bio_size + size > bio_ctrl->len_to_oe_boundary ||
+ bio_size + size > bio_ctrl->len_to_stripe_boundary)
return false;
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- struct page *first_page = bio_first_bvec_all(bio)->bv_page;
-
- if (!btrfs_bio_fits_in_ordered_extent(first_page, bio, size))
- return false;
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND)
ret = bio_add_zone_append_page(bio, page, size, pg_offset);
- } else {
+ else
ret = bio_add_page(bio, page, size, pg_offset);
- }
return ret == size;
}
+static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
+ struct btrfs_inode *inode)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct btrfs_io_geometry geom;
+ struct btrfs_ordered_extent *ordered;
+ struct extent_map *em;
+ u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
+ int ret;
+
+ /*
+ * Pages for compressed extent are never submitted to disk directly,
+ * thus it has no real boundary, just set them to U32_MAX.
+ *
+ * The split happens for real compressed bio, which happens in
+ * btrfs_submit_compressed_read/write().
+ */
+ if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
+ bio_ctrl->len_to_oe_boundary = U32_MAX;
+ bio_ctrl->len_to_stripe_boundary = U32_MAX;
+ return 0;
+ }
+ em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+ ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
+ logical, &geom);
+ free_extent_map(em);
+ if (ret < 0) {
+ return ret;
+ }
+ if (geom.len > U32_MAX)
+ bio_ctrl->len_to_stripe_boundary = U32_MAX;
+ else
+ bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
+
+ if (!btrfs_is_zoned(fs_info) ||
+ bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
+ bio_ctrl->len_to_oe_boundary = U32_MAX;
+ return 0;
+ }
+
+ ASSERT(fs_info->max_zone_append_size > 0);
+ /* Ordered extent not yet created, so we're good */
+ ordered = btrfs_lookup_ordered_extent(inode, logical);
+ if (!ordered) {
+ bio_ctrl->len_to_oe_boundary = U32_MAX;
+ return 0;
+ }
+
+ bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
+ ordered->disk_bytenr + ordered->disk_num_bytes - logical);
+ btrfs_put_ordered_extent(ordered);
+ return 0;
+}
+
/*
* @opf: bio REQ_OP_* and REQ_* flags as one value
* @wbc: optional writeback control for io accounting
@@ -3203,12 +3296,11 @@ static bool btrfs_bio_add_page(struct bio *bio, struct page *page,
*/
static int submit_extent_page(unsigned int opf,
struct writeback_control *wbc,
+ struct btrfs_bio_ctrl *bio_ctrl,
struct page *page, u64 disk_bytenr,
size_t size, unsigned long pg_offset,
- struct bio **bio_ret,
bio_end_io_t end_io_func,
int mirror_num,
- unsigned long prev_bio_flags,
unsigned long bio_flags,
bool force_bio_submit)
{
@@ -3219,19 +3311,19 @@ static int submit_extent_page(unsigned int opf,
struct extent_io_tree *tree = &inode->io_tree;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- ASSERT(bio_ret);
+ ASSERT(bio_ctrl);
- if (*bio_ret) {
- bio = *bio_ret;
+ ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
+ pg_offset + size <= PAGE_SIZE);
+ if (bio_ctrl->bio) {
+ bio = bio_ctrl->bio;
if (force_bio_submit ||
- !btrfs_bio_add_page(bio, page, disk_bytenr, io_size,
- pg_offset, prev_bio_flags, bio_flags)) {
- ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
- if (ret < 0) {
- *bio_ret = NULL;
+ !btrfs_bio_add_page(bio_ctrl, page, disk_bytenr, io_size,
+ pg_offset, bio_flags)) {
+ ret = submit_one_bio(bio, mirror_num, bio_ctrl->bio_flags);
+ bio_ctrl->bio = NULL;
+ if (ret < 0)
return ret;
- }
- bio = NULL;
} else {
if (wbc)
wbc_account_cgroup_owner(wbc, page, io_size);
@@ -3254,22 +3346,18 @@ static int submit_extent_page(unsigned int opf,
wbc_account_cgroup_owner(wbc, page, io_size);
}
if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
- struct extent_map *em;
- struct map_lookup *map;
+ struct btrfs_device *device;
- em = btrfs_get_chunk_map(fs_info, disk_bytenr, io_size);
- if (IS_ERR(em))
- return PTR_ERR(em);
+ device = btrfs_zoned_get_device(fs_info, disk_bytenr, io_size);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
- map = em->map_lookup;
- /* We only support single profile for now */
- ASSERT(map->num_stripes == 1);
- btrfs_io_bio(bio)->device = map->stripes[0].dev;
-
- free_extent_map(em);
+ btrfs_io_bio(bio)->device = device;
}
- *bio_ret = bio;
+ bio_ctrl->bio = bio;
+ bio_ctrl->bio_flags = bio_flags;
+ ret = calc_bio_boundaries(bio_ctrl, inode);
return ret;
}
@@ -3382,7 +3470,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
* return 0 on success, otherwise return error
*/
int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
- struct bio **bio, unsigned long *bio_flags,
+ struct btrfs_bio_ctrl *bio_ctrl,
unsigned int read_flags, u64 *prev_em_start)
{
struct inode *inode = page->mapping->host;
@@ -3558,15 +3646,13 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
}
ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
- page, disk_bytenr, iosize,
- pg_offset, bio,
+ bio_ctrl, page, disk_bytenr, iosize,
+ pg_offset,
end_bio_extent_readpage, 0,
- *bio_flags,
this_bio_flag,
force_bio_submit);
if (!ret) {
nr++;
- *bio_flags = this_bio_flag;
} else {
unlock_extent(tree, cur, cur + iosize - 1);
end_page_read(page, false, cur, iosize);
@@ -3580,11 +3666,10 @@ out:
}
static inline void contiguous_readpages(struct page *pages[], int nr_pages,
- u64 start, u64 end,
- struct extent_map **em_cached,
- struct bio **bio,
- unsigned long *bio_flags,
- u64 *prev_em_start)
+ u64 start, u64 end,
+ struct extent_map **em_cached,
+ struct btrfs_bio_ctrl *bio_ctrl,
+ u64 *prev_em_start)
{
struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
int index;
@@ -3592,7 +3677,7 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages,
btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
for (index = 0; index < nr_pages; index++) {
- btrfs_do_readpage(pages[index], em_cached, bio, bio_flags,
+ btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
REQ_RAHEAD, prev_em_start);
put_page(pages[index]);
}
@@ -3680,6 +3765,54 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
}
/*
+ * Find the first byte we need to write.
+ *
+ * For subpage, one page can contain several sectors, and
+ * __extent_writepage_io() will just grab all extent maps in the page
+ * range and try to submit all non-inline/non-compressed extents.
+ *
+ * This is a big problem for subpage, we shouldn't re-submit already written
+ * data at all.
+ * This function will lookup subpage dirty bit to find which range we really
+ * need to submit.
+ *
+ * Return the next dirty range in [@start, @end).
+ * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
+ */
+static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
+ struct page *page, u64 *start, u64 *end)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ u64 orig_start = *start;
+ /* Declare as unsigned long so we can use bitmap ops */
+ unsigned long dirty_bitmap;
+ unsigned long flags;
+ int nbits = (orig_start - page_offset(page)) >> fs_info->sectorsize_bits;
+ int range_start_bit = nbits;
+ int range_end_bit;
+
+ /*
+ * For regular sector size == page size case, since one page only
+ * contains one sector, we return the page offset directly.
+ */
+ if (fs_info->sectorsize == PAGE_SIZE) {
+ *start = page_offset(page);
+ *end = page_offset(page) + PAGE_SIZE;
+ return;
+ }
+
+ /* We should have the page locked, but just in case */
+ spin_lock_irqsave(&subpage->lock, flags);
+ dirty_bitmap = subpage->dirty_bitmap;
+ spin_unlock_irqrestore(&subpage->lock, flags);
+
+ bitmap_next_set_region(&dirty_bitmap, &range_start_bit, &range_end_bit,
+ BTRFS_SUBPAGE_BITMAP_SIZE);
+ *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
+ *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
+}
+
+/*
* helper for __extent_writepage. This calls the writepage start hooks,
* and does the loop to map the page into extents and bios.
*
@@ -3696,7 +3829,6 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
int *nr_ret)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct extent_io_tree *tree = &inode->io_tree;
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
u64 cur = start;
@@ -3727,15 +3859,26 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
while (cur <= end) {
u64 disk_bytenr;
u64 em_end;
+ u64 dirty_range_start = cur;
+ u64 dirty_range_end;
u32 iosize;
if (cur >= i_size) {
- btrfs_writepage_endio_finish_ordered(page, cur, end, 1);
+ btrfs_writepage_endio_finish_ordered(inode, page, cur,
+ end, 1);
break;
}
+
+ find_next_dirty_byte(fs_info, page, &dirty_range_start,
+ &dirty_range_end);
+ if (cur < dirty_range_start) {
+ cur = dirty_range_start;
+ continue;
+ }
+
em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
if (IS_ERR_OR_NULL(em)) {
- SetPageError(page);
+ btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
ret = PTR_ERR_OR_ZERO(em);
break;
}
@@ -3750,8 +3893,11 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
disk_bytenr = em->block_start + extent_offset;
- /* Note that em_end from extent_map_end() is exclusive */
- iosize = min(em_end, end + 1) - cur;
+ /*
+ * Note that em_end from extent_map_end() and dirty_range_end from
+ * find_next_dirty_byte() are all exclusive
+ */
+ iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
if (btrfs_use_zone_append(inode, em->block_start))
opf = REQ_OP_ZONE_APPEND;
@@ -3768,28 +3914,38 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
if (compressed)
nr++;
else
- btrfs_writepage_endio_finish_ordered(page, cur,
- cur + iosize - 1, 1);
+ btrfs_writepage_endio_finish_ordered(inode,
+ page, cur, cur + iosize - 1, 1);
cur += iosize;
continue;
}
- btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
+ btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
if (!PageWriteback(page)) {
btrfs_err(inode->root->fs_info,
"page %lu not writeback, cur %llu end %llu",
page->index, cur, end);
}
- ret = submit_extent_page(opf | write_flags, wbc, page,
+ /*
+ * Although the PageDirty bit is cleared before entering this
+ * function, subpage dirty bit is not cleared.
+ * So clear subpage dirty bit here so next time we won't submit
+ * page for range already written to disk.
+ */
+ btrfs_page_clear_dirty(fs_info, page, cur, iosize);
+
+ ret = submit_extent_page(opf | write_flags, wbc,
+ &epd->bio_ctrl, page,
disk_bytenr, iosize,
- cur - page_offset(page), &epd->bio,
+ cur - page_offset(page),
end_bio_extent_writepage,
- 0, 0, 0, false);
+ 0, 0, false);
if (ret) {
- SetPageError(page);
+ btrfs_page_set_error(fs_info, page, cur, iosize);
if (PageWriteback(page))
- end_page_writeback(page);
+ btrfs_page_clear_writeback(fs_info, page, cur,
+ iosize);
}
cur += iosize;
@@ -4098,12 +4254,15 @@ static struct extent_buffer *find_extent_buffer_nolock(
* Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
* after all extent buffers in the page has finished their writeback.
*/
-static void end_bio_subpage_eb_writepage(struct btrfs_fs_info *fs_info,
- struct bio *bio)
+static void end_bio_subpage_eb_writepage(struct bio *bio)
{
+ struct btrfs_fs_info *fs_info;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
+ fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
+ ASSERT(fs_info->sectorsize < PAGE_SIZE);
+
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
@@ -4154,16 +4313,11 @@ static void end_bio_subpage_eb_writepage(struct btrfs_fs_info *fs_info,
static void end_bio_extent_buffer_writepage(struct bio *bio)
{
- struct btrfs_fs_info *fs_info;
struct bio_vec *bvec;
struct extent_buffer *eb;
int done;
struct bvec_iter_all iter_all;
- fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
- if (fs_info->sectorsize < PAGE_SIZE)
- return end_bio_subpage_eb_writepage(fs_info, bio);
-
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
@@ -4189,12 +4343,34 @@ static void end_bio_extent_buffer_writepage(struct bio *bio)
bio_put(bio);
}
+static void prepare_eb_write(struct extent_buffer *eb)
+{
+ u32 nritems;
+ unsigned long start;
+ unsigned long end;
+
+ clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
+ atomic_set(&eb->io_pages, num_extent_pages(eb));
+
+ /* Set btree blocks beyond nritems with 0 to avoid stale content */
+ nritems = btrfs_header_nritems(eb);
+ if (btrfs_header_level(eb) > 0) {
+ end = btrfs_node_key_ptr_offset(nritems);
+ memzero_extent_buffer(eb, end, eb->len - end);
+ } else {
+ /*
+ * Leaf:
+ * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
+ */
+ start = btrfs_item_nr_offset(nritems);
+ end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
+ memzero_extent_buffer(eb, start, end - start);
+ }
+}
+
/*
* Unlike the work in write_one_eb(), we rely completely on extent locking.
* Page locking is only utilized at minimum to keep the VMM code happy.
- *
- * Caller should still call write_one_eb() other than this function directly.
- * As write_one_eb() has extra preparation before submitting the extent buffer.
*/
static int write_one_subpage_eb(struct extent_buffer *eb,
struct writeback_control *wbc,
@@ -4206,6 +4382,8 @@ static int write_one_subpage_eb(struct extent_buffer *eb,
bool no_dirty_ebs = false;
int ret;
+ prepare_eb_write(eb);
+
/* clear_page_dirty_for_io() in subpage helper needs page locked */
lock_page(page);
btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
@@ -4216,10 +4394,10 @@ static int write_one_subpage_eb(struct extent_buffer *eb,
if (no_dirty_ebs)
clear_page_dirty_for_io(page);
- ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc, page,
- eb->start, eb->len, eb->start - page_offset(page),
- &epd->bio, end_bio_extent_buffer_writepage, 0, 0, 0,
- false);
+ ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
+ &epd->bio_ctrl, page, eb->start, eb->len,
+ eb->start - page_offset(page),
+ end_bio_subpage_eb_writepage, 0, 0, false);
if (ret) {
btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
set_btree_ioerr(page, eb);
@@ -4244,45 +4422,23 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct extent_page_data *epd)
{
u64 disk_bytenr = eb->start;
- u32 nritems;
int i, num_pages;
- unsigned long start, end;
unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
int ret = 0;
- clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
- num_pages = num_extent_pages(eb);
- atomic_set(&eb->io_pages, num_pages);
-
- /* set btree blocks beyond nritems with 0 to avoid stale content. */
- nritems = btrfs_header_nritems(eb);
- if (btrfs_header_level(eb) > 0) {
- end = btrfs_node_key_ptr_offset(nritems);
-
- memzero_extent_buffer(eb, end, eb->len - end);
- } else {
- /*
- * leaf:
- * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
- */
- start = btrfs_item_nr_offset(nritems);
- end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
- memzero_extent_buffer(eb, start, end - start);
- }
-
- if (eb->fs_info->sectorsize < PAGE_SIZE)
- return write_one_subpage_eb(eb, wbc, epd);
+ prepare_eb_write(eb);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
clear_page_dirty_for_io(p);
set_page_writeback(p);
ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
- p, disk_bytenr, PAGE_SIZE, 0,
- &epd->bio,
+ &epd->bio_ctrl, p, disk_bytenr,
+ PAGE_SIZE, 0,
end_bio_extent_buffer_writepage,
- 0, 0, 0, false);
+ 0, 0, false);
if (ret) {
set_btree_ioerr(p, eb);
if (PageWriteback(p))
@@ -4386,7 +4542,7 @@ static int submit_eb_subpage(struct page *page,
free_extent_buffer(eb);
goto cleanup;
}
- ret = write_one_eb(eb, wbc, epd);
+ ret = write_one_subpage_eb(eb, wbc, epd);
free_extent_buffer(eb);
if (ret < 0)
goto cleanup;
@@ -4498,7 +4654,7 @@ int btree_write_cache_pages(struct address_space *mapping,
{
struct extent_buffer *eb_context = NULL;
struct extent_page_data epd = {
- .bio = NULL,
+ .bio_ctrl = { 0 },
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
@@ -4780,7 +4936,7 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{
int ret;
struct extent_page_data epd = {
- .bio = NULL,
+ .bio_ctrl = { 0 },
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
@@ -4807,7 +4963,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
PAGE_SHIFT;
struct extent_page_data epd = {
- .bio = NULL,
+ .bio_ctrl = { 0 },
.extent_locked = 1,
.sync_io = mode == WB_SYNC_ALL,
};
@@ -4827,8 +4983,8 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
if (clear_page_dirty_for_io(page))
ret = __extent_writepage(page, &wbc_writepages, &epd);
else {
- btrfs_writepage_endio_finish_ordered(page, start,
- start + PAGE_SIZE - 1, 1);
+ btrfs_writepage_endio_finish_ordered(BTRFS_I(inode),
+ page, start, start + PAGE_SIZE - 1, 1);
unlock_page(page);
}
put_page(page);
@@ -4850,7 +5006,7 @@ int extent_writepages(struct address_space *mapping,
{
int ret = 0;
struct extent_page_data epd = {
- .bio = NULL,
+ .bio_ctrl = { 0 },
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
@@ -4867,8 +5023,7 @@ int extent_writepages(struct address_space *mapping,
void extent_readahead(struct readahead_control *rac)
{
- struct bio *bio = NULL;
- unsigned long bio_flags = 0;
+ struct btrfs_bio_ctrl bio_ctrl = { 0 };
struct page *pagepool[16];
struct extent_map *em_cached = NULL;
u64 prev_em_start = (u64)-1;
@@ -4879,14 +5034,14 @@ void extent_readahead(struct readahead_control *rac)
u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
contiguous_readpages(pagepool, nr, contig_start, contig_end,
- &em_cached, &bio, &bio_flags, &prev_em_start);
+ &em_cached, &bio_ctrl, &prev_em_start);
}
if (em_cached)
free_extent_map(em_cached);
- if (bio) {
- if (submit_one_bio(bio, 0, bio_flags))
+ if (bio_ctrl.bio) {
+ if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
return;
}
}
@@ -5429,6 +5584,12 @@ static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
subpage = (struct btrfs_subpage *)page->private;
if (atomic_read(&subpage->eb_refs))
return true;
+ /*
+ * Even there is no eb refs here, we may still have
+ * end_page_read() call relying on page::private.
+ */
+ if (atomic_read(&subpage->readers))
+ return true;
}
return false;
}
@@ -5489,7 +5650,7 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag
/*
* We can only detach the page private if there are no other ebs in the
- * page range.
+ * page range and no unfinished IO.
*/
if (!page_range_has_eb(fs_info, page))
btrfs_detach_subpage(fs_info, page);
@@ -6176,7 +6337,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
struct btrfs_fs_info *fs_info = eb->fs_info;
struct extent_io_tree *io_tree;
struct page *page = eb->pages[0];
- struct bio *bio = NULL;
+ struct btrfs_bio_ctrl bio_ctrl = { 0 };
int ret = 0;
ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
@@ -6184,10 +6345,8 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
if (wait == WAIT_NONE) {
- ret = try_lock_extent(io_tree, eb->start,
- eb->start + eb->len - 1);
- if (ret <= 0)
- return ret;
+ if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
+ return -EAGAIN;
} else {
ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
if (ret < 0)
@@ -6209,9 +6368,11 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
check_buffer_tree_ref(eb);
btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
- ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, page, eb->start,
- eb->len, eb->start - page_offset(page), &bio,
- end_bio_extent_readpage, mirror_num, 0, 0,
+ btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
+ ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, &bio_ctrl,
+ page, eb->start, eb->len,
+ eb->start - page_offset(page),
+ end_bio_extent_readpage, mirror_num, 0,
true);
if (ret) {
/*
@@ -6221,10 +6382,11 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
*/
atomic_dec(&eb->io_pages);
}
- if (bio) {
+ if (bio_ctrl.bio) {
int tmp;
- tmp = submit_one_bio(bio, mirror_num, 0);
+ tmp = submit_one_bio(bio_ctrl.bio, mirror_num, 0);
+ bio_ctrl.bio = NULL;
if (tmp < 0)
return tmp;
}
@@ -6247,8 +6409,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
int all_uptodate = 1;
int num_pages;
unsigned long num_reads = 0;
- struct bio *bio = NULL;
- unsigned long bio_flags = 0;
+ struct btrfs_bio_ctrl bio_ctrl = { 0 };
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
@@ -6312,9 +6473,9 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
ClearPageError(page);
err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
- page, page_offset(page), PAGE_SIZE, 0,
- &bio, end_bio_extent_readpage,
- mirror_num, 0, 0, false);
+ &bio_ctrl, page, page_offset(page),
+ PAGE_SIZE, 0, end_bio_extent_readpage,
+ mirror_num, 0, false);
if (err) {
/*
* We failed to submit the bio so it's the
@@ -6331,8 +6492,9 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
}
}
- if (bio) {
- err = submit_one_bio(bio, mirror_num, bio_flags);
+ if (bio_ctrl.bio) {
+ err = submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags);
+ bio_ctrl.bio = NULL;
if (err)
return err;
}
@@ -6515,9 +6677,10 @@ void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
char *kaddr;
assert_eb_page_uptodate(eb, eb->pages[0]);
- kaddr = page_address(eb->pages[0]) + get_eb_offset_in_page(eb, 0);
- memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
- BTRFS_FSID_SIZE);
+ kaddr = page_address(eb->pages[0]) +
+ get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
+ chunk_tree_uuid));
+ memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
}
void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
@@ -6525,9 +6688,9 @@ void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
char *kaddr;
assert_eb_page_uptodate(eb, eb->pages[0]);
- kaddr = page_address(eb->pages[0]) + get_eb_offset_in_page(eb, 0);
- memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
- BTRFS_FSID_SIZE);
+ kaddr = page_address(eb->pages[0]) +
+ get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
+ memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
}
void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 227215a5722c..62027f551b44 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -39,7 +39,7 @@ enum {
/* Page starts writeback, clear dirty bit and set writeback bit */
#define PAGE_START_WRITEBACK (1 << 1)
#define PAGE_END_WRITEBACK (1 << 2)
-#define PAGE_SET_PRIVATE2 (1 << 3)
+#define PAGE_SET_ORDERED (1 << 3)
#define PAGE_SET_ERROR (1 << 4)
#define PAGE_LOCK (1 << 5)
@@ -102,6 +102,17 @@ struct extent_buffer {
};
/*
+ * Structure to record info about the bio being assembled, and other info like
+ * how many bytes are there before stripe/ordered extent boundary.
+ */
+struct btrfs_bio_ctrl {
+ struct bio *bio;
+ unsigned long bio_flags;
+ u32 len_to_stripe_boundary;
+ u32 len_to_oe_boundary;
+};
+
+/*
* Structure to record how many bytes and which ranges are set/cleared
*/
struct extent_changeset {
@@ -169,7 +180,7 @@ int try_release_extent_buffer(struct page *page);
int __must_check submit_one_bio(struct bio *bio, int mirror_num,
unsigned long bio_flags);
int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
- struct bio **bio, unsigned long *bio_flags,
+ struct btrfs_bio_ctrl *bio_ctrl,
unsigned int read_flags, u64 *prev_em_start);
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
@@ -281,7 +292,7 @@ int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num);
* When IO fails, either with EIO or csum verification fails, we
* try other mirrors that might have a good copy of the data. This
* io_failure_record is used to record state as we go through all the
- * mirrors. If another mirror has good data, the page is set up to date
+ * mirrors. If another mirror has good data, the sector is set up to date
* and things continue. If a good mirror can't be found, the original
* bio end_io callback is called to indicate things have failed.
*/
@@ -293,15 +304,13 @@ struct io_failure_record {
unsigned long bio_flags;
int this_mirror;
int failed_mirror;
- int in_validation;
};
-
-blk_status_t btrfs_submit_read_repair(struct inode *inode,
- struct bio *failed_bio, u32 bio_offset,
- struct page *page, unsigned int pgoff,
- u64 start, u64 end, int failed_mirror,
- submit_bio_hook_t *submit_bio_hook);
+int btrfs_repair_one_sector(struct inode *inode,
+ struct bio *failed_bio, u32 bio_offset,
+ struct page *page, unsigned int pgoff,
+ u64 start, int failed_mirror,
+ submit_bio_hook_t *submit_bio_hook);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 294602f139ef..df6631eefc65 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -618,7 +618,7 @@ fail:
* @file_start: offset in file this bio begins to describe
* @contig: Boolean. If true/1 means all bio vecs in this bio are
* contiguous and they begin at @file_start in the file. False/0
- * means this bio can contains potentially discontigous bio vecs
+ * means this bio can contain potentially discontiguous bio vecs
* so the logical offset of each should be calculated separately.
*/
blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
@@ -788,7 +788,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
u64 end_byte = bytenr + len;
u64 csum_end;
struct extent_buffer *leaf;
- int ret;
+ int ret = 0;
const u32 csum_size = fs_info->csum_size;
u32 blocksize_bits = fs_info->sectorsize_bits;
@@ -806,6 +806,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
+ ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
@@ -862,7 +863,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, root, path,
path->slots[0], del_nr);
if (ret)
- goto out;
+ break;
if (key.offset == bytenr)
break;
} else if (key.offset < bytenr && csum_end > end_byte) {
@@ -906,8 +907,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_split_item(trans, root, path, &key, offset);
if (ret && ret != -EAGAIN) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ break;
}
+ ret = 0;
key.offset = end_byte - 1;
} else {
@@ -917,12 +919,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
}
- ret = 0;
-out:
btrfs_free_path(path);
return ret;
}
+static int find_next_csum_offset(struct btrfs_root *root,
+ struct btrfs_path *path,
+ u64 *next_offset)
+{
+ const u32 nritems = btrfs_header_nritems(path->nodes[0]);
+ struct btrfs_key found_key;
+ int slot = path->slots[0] + 1;
+ int ret;
+
+ if (nritems == 0 || slot >= nritems) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0) {
+ return ret;
+ } else if (ret > 0) {
+ *next_offset = (u64)-1;
+ return 0;
+ }
+ slot = path->slots[0];
+ }
+
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+
+ if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+ found_key.type != BTRFS_EXTENT_CSUM_KEY)
+ *next_offset = (u64)-1;
+ else
+ *next_offset = found_key.offset;
+
+ return 0;
+}
+
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums)
@@ -938,7 +969,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
u64 total_bytes = 0;
u64 csum_offset;
u64 bytenr;
- u32 nritems;
u32 ins_size;
int index = 0;
int found_next;
@@ -981,26 +1011,10 @@ again:
goto insert;
}
} else {
- int slot = path->slots[0] + 1;
- /* we didn't find a csum item, insert one */
- nritems = btrfs_header_nritems(path->nodes[0]);
- if (!nritems || (path->slots[0] >= nritems - 1)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- found_next = 1;
- goto insert;
- }
- slot = path->slots[0];
- }
- btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
- if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
- found_key.type != BTRFS_EXTENT_CSUM_KEY) {
- found_next = 1;
- goto insert;
- }
- next_offset = found_key.offset;
+ /* We didn't find a csum item, insert one. */
+ ret = find_next_csum_offset(root, path, &next_offset);
+ if (ret < 0)
+ goto out;
found_next = 1;
goto insert;
}
@@ -1056,8 +1070,48 @@ extend_csum:
tmp = sums->len - total_bytes;
tmp >>= fs_info->sectorsize_bits;
WARN_ON(tmp < 1);
+ extend_nr = max_t(int, 1, tmp);
+
+ /*
+ * A log tree can already have checksum items with a subset of
+ * the checksums we are trying to log. This can happen after
+ * doing a sequence of partial writes into prealloc extents and
+ * fsyncs in between, with a full fsync logging a larger subrange
+ * of an extent for which a previous fast fsync logged a smaller
+ * subrange. And this happens in particular due to merging file
+ * extent items when we complete an ordered extent for a range
+ * covered by a prealloc extent - this is done at
+ * btrfs_mark_extent_written().
+ *
+ * So if we try to extend the previous checksum item, which has
+ * a range that ends at the start of the range we want to insert,
+ * make sure we don't extend beyond the start offset of the next
+ * checksum item. If we are at the last item in the leaf, then
+ * forget the optimization of extending and add a new checksum
+ * item - it is not worth the complexity of releasing the path,
+ * getting the first key for the next leaf, repeat the btree
+ * search, etc, because log trees are temporary anyway and it
+ * would only save a few bytes of leaf space.
+ */
+ if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+ if (path->slots[0] + 1 >=
+ btrfs_header_nritems(path->nodes[0])) {
+ ret = find_next_csum_offset(root, path, &next_offset);
+ if (ret < 0)
+ goto out;
+ found_next = 1;
+ goto insert;
+ }
+
+ ret = find_next_csum_offset(root, path, &next_offset);
+ if (ret < 0)
+ goto out;
+
+ tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
+ if (tmp <= INT_MAX)
+ extend_nr = min_t(int, extend_nr, tmp);
+ }
- extend_nr = max_t(int, 1, (int)tmp);
diff = (csum_offset + extend_nr) * csum_size;
diff = min(diff,
MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3b10d98b4ebb..28a05ba47060 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -28,6 +28,7 @@
#include "compression.h"
#include "delalloc-space.h"
#include "reflink.h"
+#include "subpage.h"
static struct kmem_cache *btrfs_inode_defrag_cachep;
/*
@@ -482,6 +483,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
start_pos = round_down(pos, fs_info->sectorsize);
num_bytes = round_up(write_bytes + pos - start_pos,
fs_info->sectorsize);
+ ASSERT(num_bytes <= U32_MAX);
end_of_last_block = start_pos + num_bytes - 1;
@@ -500,9 +502,10 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
- SetPageUptodate(p);
+
+ btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
ClearPageChecked(p);
- set_page_dirty(p);
+ btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
}
/*
@@ -1094,7 +1097,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
int del_nr = 0;
int del_slot = 0;
int recow;
- int ret;
+ int ret = 0;
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
@@ -1315,7 +1318,7 @@ again:
}
out:
btrfs_free_path(path);
- return 0;
+ return ret;
}
/*
@@ -2483,6 +2486,17 @@ static int btrfs_punch_hole_lock_range(struct inode *inode,
const u64 lockend,
struct extent_state **cached_state)
{
+ /*
+ * For subpage case, if the range is not at page boundary, we could
+ * have pages at the leading/tailing part of the range.
+ * This could lead to dead loop since filemap_range_has_page()
+ * will always return true.
+ * So here we need to do extra page alignment for
+ * filemap_range_has_page().
+ */
+ const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
+ const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
+
while (1) {
struct btrfs_ordered_extent *ordered;
int ret;
@@ -2503,7 +2517,7 @@ static int btrfs_punch_hole_lock_range(struct inode *inode,
(ordered->file_offset + ordered->num_bytes <= lockstart ||
ordered->file_offset > lockend)) &&
!filemap_range_has_page(inode->i_mapping,
- lockstart, lockend)) {
+ page_lockstart, page_lockend)) {
if (ordered)
btrfs_put_ordered_extent(ordered);
break;
@@ -3034,22 +3048,20 @@ struct falloc_range {
*/
static int add_falloc_range(struct list_head *head, u64 start, u64 len)
{
- struct falloc_range *prev = NULL;
struct falloc_range *range = NULL;
- if (list_empty(head))
- goto insert;
-
- /*
- * As fallocate iterate by bytenr order, we only need to check
- * the last range.
- */
- prev = list_entry(head->prev, struct falloc_range, list);
- if (prev->start + prev->len == start) {
- prev->len += len;
- return 0;
+ if (!list_empty(head)) {
+ /*
+ * As fallocate iterates by bytenr order, we only need to check
+ * the last range.
+ */
+ range = list_last_entry(head, struct falloc_range, list);
+ if (range->start + range->len == start) {
+ range->len += len;
+ return 0;
+ }
}
-insert:
+
range = kmalloc(sizeof(*range), GFP_KERNEL);
if (!range)
return -ENOMEM;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4806295116d8..2131ae5b9ed7 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -327,7 +327,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
* need to check for -EAGAIN.
*/
ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
- 0, BTRFS_EXTENT_DATA_KEY);
+ 0, BTRFS_EXTENT_DATA_KEY, NULL);
if (ret)
goto fail;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 33f14573f2ec..e6eb20987351 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -51,6 +51,7 @@
#include "block-group.h"
#include "space-info.h"
#include "zoned.h"
+#include "subpage.h"
struct btrfs_iget_args {
u64 ino;
@@ -166,22 +167,47 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
struct page *page;
while (index <= end_index) {
+ /*
+ * For locked page, we will call end_extent_writepage() on it
+ * in run_delalloc_range() for the error handling. That
+ * end_extent_writepage() function will call
+ * btrfs_mark_ordered_io_finished() to clear page Ordered and
+ * run the ordered extent accounting.
+ *
+ * Here we can't just clear the Ordered bit, or
+ * btrfs_mark_ordered_io_finished() would skip the accounting
+ * for the page range, and the ordered extent will never finish.
+ */
+ if (index == (page_offset(locked_page) >> PAGE_SHIFT)) {
+ index++;
+ continue;
+ }
page = find_get_page(inode->vfs_inode.i_mapping, index);
index++;
if (!page)
continue;
- ClearPagePrivate2(page);
+
+ /*
+ * Here we just clear all Ordered bits for every page in the
+ * range, then __endio_write_update_ordered() will handle
+ * the ordered extent accounting for the range.
+ */
+ btrfs_page_clamp_clear_ordered(inode->root->fs_info, page,
+ offset, bytes);
put_page(page);
}
+ /* The locked page covers the full range, nothing needs to be done */
+ if (bytes + offset <= page_offset(locked_page) + PAGE_SIZE)
+ return;
/*
* In case this page belongs to the delalloc range being instantiated
* then skip it, since the first page of a range is going to be
* properly cleaned up by the caller of run_delalloc_range
*/
if (page_start >= offset && page_end <= (offset + bytes - 1)) {
- offset += PAGE_SIZE;
- bytes -= PAGE_SIZE;
+ bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
+ offset = page_offset(locked_page) + PAGE_SIZE;
}
return __endio_write_update_ordered(inode, offset, bytes, false);
@@ -603,7 +629,7 @@ again:
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
- if (inode_need_compress(BTRFS_I(inode), start, end)) {
+ if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
WARN_ON(pages);
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
@@ -946,7 +972,8 @@ retry:
const u64 end = start + async_extent->ram_size - 1;
p->mapping = inode->vfs_inode.i_mapping;
- btrfs_writepage_endio_finish_ordered(p, start, end, 0);
+ btrfs_writepage_endio_finish_ordered(inode, p, start,
+ end, 0);
p->mapping = NULL;
extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
@@ -1064,7 +1091,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* our outstanding extent for clearing delalloc for this
* range.
*/
- extent_clear_unlock_delalloc(inode, start, end, NULL,
+ extent_clear_unlock_delalloc(inode, start, end,
+ locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
@@ -1072,6 +1100,19 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
*nr_written = *nr_written +
(end - start + PAGE_SIZE) / PAGE_SIZE;
*page_started = 1;
+ /*
+ * locked_page is locked by the caller of
+ * writepage_delalloc(), not locked by
+ * __process_pages_contig().
+ *
+ * We can't let __process_pages_contig() to unlock it,
+ * as it doesn't have any subpage::writers recorded.
+ *
+ * Here we manually unlock the page, since the caller
+ * can't use page_started to determine if it's an
+ * inline extent or a compressed extent.
+ */
+ unlock_page(locked_page);
goto out;
} else if (ret < 0) {
goto out_unlock;
@@ -1150,15 +1191,16 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- /* we're not doing compressed IO, don't unlock the first
- * page (which the caller expects to stay locked), don't
- * clear any dirty bits and don't set any writeback bits
+ /*
+ * We're not doing compressed IO, don't unlock the first page
+ * (which the caller expects to stay locked), don't clear any
+ * dirty bits and don't set any writeback bits
*
- * Do set the Private2 bit so we know this page was properly
- * setup for writepage
+ * Do set the Ordered (Private2) bit so we know this page was
+ * properly setup for writepage.
*/
page_ops = unlock ? PAGE_UNLOCK : 0;
- page_ops |= PAGE_SET_PRIVATE2;
+ page_ops |= PAGE_SET_ORDERED;
extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
locked_page,
@@ -1822,7 +1864,7 @@ out_check:
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
- PAGE_UNLOCK | PAGE_SET_PRIVATE2);
+ PAGE_UNLOCK | PAGE_SET_ORDERED);
cur_offset = extent_end;
@@ -2193,26 +2235,22 @@ int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 logical = bio->bi_iter.bi_sector << 9;
+ u32 bio_len = bio->bi_iter.bi_size;
struct extent_map *em;
- u64 length = 0;
- u64 map_length;
int ret = 0;
struct btrfs_io_geometry geom;
if (bio_flags & EXTENT_BIO_COMPRESSED)
return 0;
- length = bio->bi_iter.bi_size;
- map_length = length;
- em = btrfs_get_chunk_map(fs_info, logical, map_length);
+ em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
if (IS_ERR(em))
return PTR_ERR(em);
- ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), logical,
- map_length, &geom);
+ ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), logical, &geom);
if (ret < 0)
goto out;
- if (geom.len < length + size)
+ if (geom.len < bio_len + size)
ret = 1;
out:
free_extent_map(em);
@@ -2233,33 +2271,6 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
}
-bool btrfs_bio_fits_in_ordered_extent(struct page *page, struct bio *bio,
- unsigned int size)
-{
- struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_ordered_extent *ordered;
- u64 len = bio->bi_iter.bi_size + size;
- bool ret = true;
-
- ASSERT(btrfs_is_zoned(fs_info));
- ASSERT(fs_info->max_zone_append_size > 0);
- ASSERT(bio_op(bio) == REQ_OP_ZONE_APPEND);
-
- /* Ordered extent not yet created, so we're good */
- ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
- if (!ordered)
- return ret;
-
- if ((bio->bi_iter.bi_sector << SECTOR_SHIFT) + len >
- ordered->disk_bytenr + ordered->disk_num_bytes)
- ret = false;
-
- btrfs_put_ordered_extent(ordered);
-
- return ret;
-}
-
static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
struct bio *bio, loff_t file_offset)
{
@@ -2601,7 +2612,7 @@ again:
lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
/* already ordered? We're done */
- if (PagePrivate2(page))
+ if (PageOrdered(page))
goto out_reserved;
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
@@ -2676,8 +2687,8 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_writepage_fixup *fixup;
- /* this page is properly in the ordered list */
- if (TestClearPagePrivate2(page))
+ /* This page has ordered extent covering it already */
+ if (PageOrdered(page))
return 0;
/*
@@ -2773,7 +2784,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
/*
* If we dropped an inline extent here, we know the range where it is
* was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
- * number of bytes only for that range contaning the inline extent.
+ * number of bytes only for that range containing the inline extent.
* The remaining of the range will be processed when clearning the
* EXTENT_DELALLOC_BIT bit through the ordered extent completion.
*/
@@ -3000,6 +3011,18 @@ out:
if (ret || truncated) {
u64 unwritten_start = start;
+ /*
+ * If we failed to finish this ordered extent for any reason we
+ * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
+ * extent, and mark the inode with the error if it wasn't
+ * already set. Any error during writeback would have already
+ * set the mapping error, so we need to set it if we're the ones
+ * marking this ordered extent as failed.
+ */
+ if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
+ &ordered_extent->flags))
+ mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
+
if (truncated)
unwritten_start += logical_len;
clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
@@ -3057,28 +3080,14 @@ static void finish_ordered_fn(struct btrfs_work *work)
btrfs_finish_ordered_io(ordered_extent);
}
-void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
+void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
+ struct page *page, u64 start,
u64 end, int uptodate)
{
- struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_ordered_extent *ordered_extent = NULL;
- struct btrfs_workqueue *wq;
+ trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
- trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
-
- ClearPagePrivate2(page);
- if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
- end - start + 1, uptodate))
- return;
-
- if (btrfs_is_free_space_inode(inode))
- wq = fs_info->endio_freespace_worker;
- else
- wq = fs_info->endio_write_workers;
-
- btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
- btrfs_queue_work(wq, &ordered_extent->work);
+ btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start,
+ finish_ordered_fn, uptodate);
}
/*
@@ -3140,15 +3149,19 @@ zeroit:
* @bio_offset: offset to the beginning of the bio (in bytes)
* @start: file offset of the range start
* @end: file offset of the range end (inclusive)
+ *
+ * Return a bitmap where bit set means a csum mismatch, and bit not set means
+ * csum match.
*/
-int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
- struct page *page, u64 start, u64 end)
+unsigned int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
+ struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_root *root = BTRFS_I(inode)->root;
const u32 sectorsize = root->fs_info->sectorsize;
u32 pg_off;
+ unsigned int result = 0;
if (PageChecked(page)) {
ClearPageChecked(page);
@@ -3176,10 +3189,14 @@ int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off,
page_offset(page) + pg_off);
- if (ret < 0)
- return -EIO;
+ if (ret < 0) {
+ const int nr_bit = (pg_off - offset_in_page(start)) >>
+ root->fs_info->sectorsize_bits;
+
+ result |= (1U << nr_bit);
+ }
}
- return 0;
+ return result;
}
/*
@@ -4097,7 +4114,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
* This is a placeholder inode for a subvolume we didn't have a
* reference to at the time of the snapshot creation. In the meantime
* we could have renamed the real subvol link into our snapshot, so
- * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
+ * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
* Instead simply lookup the dir_index_item for this entry so we can
* remove it. Otherwise we know we have a ref to the root and we can
* call btrfs_del_root_ref, and it _shouldn't_ fail.
@@ -4452,20 +4469,36 @@ out:
#define NEED_TRUNCATE_BLOCK 1
/*
- * this can truncate away extent items, csum items and directory items.
- * It starts at a high offset and removes keys until it can't find
- * any higher than new_size
+ * Remove inode items from a given root.
*
- * csum items that cross the new i_size are truncated to the new size
- * as well.
+ * @trans: A transaction handle.
+ * @root: The root from which to remove items.
+ * @inode: The inode whose items we want to remove.
+ * @new_size: The new i_size for the inode. This is only applicable when
+ * @min_type is BTRFS_EXTENT_DATA_KEY, must be 0 otherwise.
+ * @min_type: The minimum key type to remove. All keys with a type
+ * greater than this value are removed and all keys with
+ * this type are removed only if their offset is >= @new_size.
+ * @extents_found: Output parameter that will contain the number of file
+ * extent items that were removed or adjusted to the new
+ * inode i_size. The caller is responsible for initializing
+ * the counter. Also, it can be NULL if the caller does not
+ * need this counter.
*
- * min_type is the minimum key type to truncate down to. If set to 0, this
- * will kill all the items on this inode, including the INODE_ITEM_KEY.
+ * Remove all keys associated with the inode from the given root that have a key
+ * with a type greater than or equals to @min_type. When @min_type has a value of
+ * BTRFS_EXTENT_DATA_KEY, only remove file extent items that have an offset value
+ * greater than or equals to @new_size. If a file extent item that starts before
+ * @new_size and ends after it is found, its length is adjusted.
+ *
+ * Returns: 0 on success, < 0 on error and NEED_TRUNCATE_BLOCK when @min_type is
+ * BTRFS_EXTENT_DATA_KEY and the caller must truncate the last block.
*/
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode,
- u64 new_size, u32 min_type)
+ u64 new_size, u32 min_type,
+ u64 *extents_found)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
@@ -4611,6 +4644,9 @@ search_again:
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
+ if (extents_found != NULL)
+ (*extents_found)++;
+
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
u64 num_dec;
@@ -4929,7 +4965,7 @@ again:
flush_dcache_page(page);
}
ClearPageChecked(page);
- set_page_dirty(page);
+ btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
if (only_release_metadata)
@@ -5443,7 +5479,7 @@ void btrfs_evict_inode(struct inode *inode)
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
- 0, 0);
+ 0, 0, NULL);
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
@@ -7925,19 +7961,17 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
btrfs_ino(BTRFS_I(inode)),
pgoff);
} else {
- blk_status_t status;
+ int ret;
ASSERT((start - io_bio->logical) < UINT_MAX);
- status = btrfs_submit_read_repair(inode,
- &io_bio->bio,
- start - io_bio->logical,
- bvec.bv_page, pgoff,
- start,
- start + sectorsize - 1,
- io_bio->mirror_num,
- submit_dio_repair_bio);
- if (status)
- err = status;
+ ret = btrfs_repair_one_sector(inode,
+ &io_bio->bio,
+ start - io_bio->logical,
+ bvec.bv_page, pgoff,
+ start, io_bio->mirror_num,
+ submit_dio_repair_bio);
+ if (ret)
+ err = errno_to_blk_status(ret);
}
start += sectorsize;
ASSERT(bio_offset + sectorsize > bio_offset);
@@ -7952,41 +7986,8 @@ static void __endio_write_update_ordered(struct btrfs_inode *inode,
const u64 offset, const u64 bytes,
const bool uptodate)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_ordered_extent *ordered = NULL;
- struct btrfs_workqueue *wq;
- u64 ordered_offset = offset;
- u64 ordered_bytes = bytes;
- u64 last_offset;
-
- if (btrfs_is_free_space_inode(inode))
- wq = fs_info->endio_freespace_worker;
- else
- wq = fs_info->endio_write_workers;
-
- while (ordered_offset < offset + bytes) {
- last_offset = ordered_offset;
- if (btrfs_dec_test_first_ordered_pending(inode, &ordered,
- &ordered_offset,
- ordered_bytes,
- uptodate)) {
- btrfs_init_work(&ordered->work, finish_ordered_fn, NULL,
- NULL);
- btrfs_queue_work(wq, &ordered->work);
- }
-
- /* No ordered extent found in the range, exit */
- if (ordered_offset == last_offset)
- return;
- /*
- * Our bio might span multiple ordered extents. In this case
- * we keep going until we have accounted the whole dio.
- */
- if (ordered_offset < offset + bytes) {
- ordered_bytes = offset + bytes - ordered_offset;
- ordered = NULL;
- }
- }
+ btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes,
+ finish_ordered_fn, uptodate);
}
static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
@@ -8160,7 +8161,7 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
goto out_err_em;
}
ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio),
- logical, submit_len, &geom);
+ logical, &geom);
if (ret) {
status = errno_to_blk_status(ret);
goto out_err_em;
@@ -8264,15 +8265,14 @@ int btrfs_readpage(struct file *file, struct page *page)
struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
- unsigned long bio_flags = 0;
- struct bio *bio = NULL;
+ struct btrfs_bio_ctrl bio_ctrl = { 0 };
int ret;
btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
- ret = btrfs_do_readpage(page, NULL, &bio, &bio_flags, 0, NULL);
- if (bio)
- ret = submit_one_bio(bio, 0, bio_flags);
+ ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL);
+ if (bio_ctrl.bio)
+ ret = submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags);
return ret;
}
@@ -8341,9 +8341,9 @@ static int btrfs_migratepage(struct address_space *mapping,
if (page_has_private(page))
attach_page_private(newpage, detach_page_private(page));
- if (PagePrivate2(page)) {
- ClearPagePrivate2(page);
- SetPagePrivate2(newpage);
+ if (PageOrdered(page)) {
+ ClearPageOrdered(page);
+ SetPageOrdered(newpage);
}
if (mode != MIGRATE_SYNC_NO_COPY)
@@ -8358,27 +8358,42 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *tree = &inode->io_tree;
- struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_SIZE - 1;
- u64 start;
- u64 end;
+ u64 cur;
int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
- bool found_ordered = false;
- bool completed_ordered = false;
/*
- * we have the page locked, so new writeback can't start,
- * and the dirty bit won't be cleared while we are here.
+ * We have page locked so no new ordered extent can be created on this
+ * page, nor bio can be submitted for this page.
*
- * Wait for IO on this page so that we can safely clear
- * the PagePrivate2 bit and do ordered accounting
+ * But already submitted bio can still be finished on this page.
+ * Furthermore, endio function won't skip page which has Ordered
+ * (Private2) already cleared, so it's possible for endio and
+ * invalidatepage to do the same ordered extent accounting twice
+ * on one page.
+ *
+ * So here we wait for any submitted bios to finish, so that we won't
+ * do double ordered extent accounting on the same page.
*/
wait_on_page_writeback(page);
- if (offset) {
+ /*
+ * For subpage case, we have call sites like
+ * btrfs_punch_hole_lock_range() which passes range not aligned to
+ * sectorsize.
+ * If the range doesn't cover the full page, we don't need to and
+ * shouldn't clear page extent mapped, as page->private can still
+ * record subpage dirty bits for other part of the range.
+ *
+ * For cases that can invalidate the full even the range doesn't
+ * cover the full page, like invalidating the last page, we're
+ * still safe to wait for ordered extent to finish.
+ */
+ if (!(offset == 0 && length == PAGE_SIZE)) {
btrfs_releasepage(page, GFP_NOFS);
return;
}
@@ -8386,89 +8401,123 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
if (!inode_evicting)
lock_extent_bits(tree, page_start, page_end, &cached_state);
- start = page_start;
-again:
- ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1);
- if (ordered) {
- found_ordered = true;
- end = min(page_end,
- ordered->file_offset + ordered->num_bytes - 1);
+ cur = page_start;
+ while (cur < page_end) {
+ struct btrfs_ordered_extent *ordered;
+ bool delete_states;
+ u64 range_end;
+ u32 range_len;
+
+ ordered = btrfs_lookup_first_ordered_range(inode, cur,
+ page_end + 1 - cur);
+ if (!ordered) {
+ range_end = page_end;
+ /*
+ * No ordered extent covering this range, we are safe
+ * to delete all extent states in the range.
+ */
+ delete_states = true;
+ goto next;
+ }
+ if (ordered->file_offset > cur) {
+ /*
+ * There is a range between [cur, oe->file_offset) not
+ * covered by any ordered extent.
+ * We are safe to delete all extent states, and handle
+ * the ordered extent in the next iteration.
+ */
+ range_end = ordered->file_offset - 1;
+ delete_states = true;
+ goto next;
+ }
+
+ range_end = min(ordered->file_offset + ordered->num_bytes - 1,
+ page_end);
+ ASSERT(range_end + 1 - cur < U32_MAX);
+ range_len = range_end + 1 - cur;
+ if (!btrfs_page_test_ordered(fs_info, page, cur, range_len)) {
+ /*
+ * If Ordered (Private2) is cleared, it means endio has
+ * already been executed for the range.
+ * We can't delete the extent states as
+ * btrfs_finish_ordered_io() may still use some of them.
+ */
+ delete_states = false;
+ goto next;
+ }
+ btrfs_page_clear_ordered(fs_info, page, cur, range_len);
+
/*
* IO on this page will never be started, so we need to account
* for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
* here, must leave that up for the ordered extent completion.
+ *
+ * This will also unlock the range for incoming
+ * btrfs_finish_ordered_io().
*/
if (!inode_evicting)
- clear_extent_bit(tree, start, end,
+ clear_extent_bit(tree, cur, range_end,
EXTENT_DELALLOC |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 1, 0, &cached_state);
+
+ spin_lock_irq(&inode->ordered_tree.lock);
+ set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
+ ordered->truncated_len = min(ordered->truncated_len,
+ cur - ordered->file_offset);
+ spin_unlock_irq(&inode->ordered_tree.lock);
+
+ if (btrfs_dec_test_ordered_pending(inode, &ordered,
+ cur, range_end + 1 - cur, 1)) {
+ btrfs_finish_ordered_io(ordered);
+ /*
+ * The ordered extent has finished, now we're again
+ * safe to delete all extent states of the range.
+ */
+ delete_states = true;
+ } else {
+ /*
+ * btrfs_finish_ordered_io() will get executed by endio
+ * of other pages, thus we can't delete extent states
+ * anymore
+ */
+ delete_states = false;
+ }
+next:
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
/*
- * whoever cleared the private bit is responsible
- * for the finish_ordered_io
+ * Qgroup reserved space handler
+ * Sector(s) here will be either:
+ *
+ * 1) Already written to disk or bio already finished
+ * Then its QGROUP_RESERVED bit in io_tree is already cleared.
+ * Qgroup will be handled by its qgroup_record then.
+ * btrfs_qgroup_free_data() call will do nothing here.
+ *
+ * 2) Not written to disk yet
+ * Then btrfs_qgroup_free_data() call will clear the
+ * QGROUP_RESERVED bit of its io_tree, and free the qgroup
+ * reserved data space.
+ * Since the IO will never happen for this page.
*/
- if (TestClearPagePrivate2(page)) {
- spin_lock_irq(&inode->ordered_tree.lock);
- set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
- ordered->truncated_len = min(ordered->truncated_len,
- start - ordered->file_offset);
- spin_unlock_irq(&inode->ordered_tree.lock);
-
- if (btrfs_dec_test_ordered_pending(inode, &ordered,
- start,
- end - start + 1, 1)) {
- btrfs_finish_ordered_io(ordered);
- completed_ordered = true;
- }
- }
- btrfs_put_ordered_extent(ordered);
+ btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
if (!inode_evicting) {
- cached_state = NULL;
- lock_extent_bits(tree, start, end,
- &cached_state);
+ clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
+ EXTENT_DELALLOC | EXTENT_UPTODATE |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1,
+ delete_states, &cached_state);
}
-
- start = end + 1;
- if (start < page_end)
- goto again;
+ cur = range_end + 1;
}
-
/*
- * Qgroup reserved space handler
- * Page here will be either
- * 1) Already written to disk or ordered extent already submitted
- * Then its QGROUP_RESERVED bit in io_tree is already cleaned.
- * Qgroup will be handled by its qgroup_record then.
- * btrfs_qgroup_free_data() call will do nothing here.
- *
- * 2) Not written to disk yet
- * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
- * bit of its io_tree, and free the qgroup reserved data space.
- * Since the IO will never happen for this page.
+ * We have iterated through all ordered extents of the page, the page
+ * should not have Ordered (Private2) anymore, or the above iteration
+ * did something wrong.
*/
- btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
- if (!inode_evicting) {
- bool delete = true;
-
- /*
- * If there's an ordered extent for this range and we have not
- * finished it ourselves, we must leave EXTENT_DELALLOC_NEW set
- * in the range for the ordered extent completion. We must also
- * not delete the range, otherwise we would lose that bit (and
- * any other bits set in the range). Make sure EXTENT_UPTODATE
- * is cleared if we don't delete, otherwise it can lead to
- * corruptions if the i_size is extented later.
- */
- if (found_ordered && !completed_ordered)
- delete = false;
- clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
- EXTENT_DELALLOC | EXTENT_UPTODATE |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1,
- delete, &cached_state);
-
+ ASSERT(!PageOrdered(page));
+ if (!inode_evicting)
__btrfs_releasepage(page, GFP_NOFS);
- }
-
ClearPageChecked(page);
clear_page_extent_mapped(page);
}
@@ -8614,8 +8663,8 @@ again:
flush_dcache_page(page);
}
ClearPageChecked(page);
- set_page_dirty(page);
- SetPageUptodate(page);
+ btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
+ btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
@@ -8649,6 +8698,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
struct btrfs_trans_handle *trans;
u64 mask = fs_info->sectorsize - 1;
u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
+ u64 extents_found = 0;
if (!skip_writeback) {
ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
@@ -8706,20 +8756,13 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
min_size, false);
BUG_ON(ret);
- /*
- * So if we truncate and then write and fsync we normally would just
- * write the extents that changed, which is a problem if we need to
- * first truncate that entire inode. So set this flag so we write out
- * all of the extents in the inode to the sync log so we're completely
- * safe.
- */
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
trans->block_rsv = rsv;
while (1) {
ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
inode->i_size,
- BTRFS_EXTENT_DATA_KEY);
+ BTRFS_EXTENT_DATA_KEY,
+ &extents_found);
trans->block_rsv = &fs_info->trans_block_rsv;
if (ret != -ENOSPC && ret != -EAGAIN)
break;
@@ -8781,6 +8824,22 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
}
out:
btrfs_free_block_rsv(fs_info, rsv);
+ /*
+ * So if we truncate and then write and fsync we normally would just
+ * write the extents that changed, which is a problem if we need to
+ * first truncate that entire inode. So set this flag so we write out
+ * all of the extents in the inode to the sync log so we're completely
+ * safe.
+ *
+ * If no extents were dropped or trimmed we don't need to force the next
+ * fsync to truncate all the inode's items from the log and re-log them
+ * all. This means the truncate operation did not change the file size,
+ * or changed it to a smaller size but there was only an implicit hole
+ * between the old i_size and the new i_size, and there were no prealloc
+ * extents beyond i_size to drop.
+ */
+ if (extents_found > 0)
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
return ret;
}
@@ -9076,6 +9135,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
int ret2;
bool root_log_pinned = false;
bool dest_log_pinned = false;
+ bool need_abort = false;
/* we only allow rename subvolume link between subvolumes */
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
@@ -9135,6 +9195,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
old_idx);
if (ret)
goto out_fail;
+ need_abort = true;
}
/* And now for the dest. */
@@ -9150,8 +9211,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
new_ino,
btrfs_ino(BTRFS_I(old_dir)),
new_idx);
- if (ret)
+ if (ret) {
+ if (need_abort)
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
+ }
}
/* Update inode version and ctime/mtime. */
@@ -10182,17 +10246,21 @@ out:
return ret;
}
-void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
+void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
{
- struct inode *inode = tree->private_data;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long index = start >> PAGE_SHIFT;
unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
+ u32 len;
+ ASSERT(end + 1 - start <= U32_MAX);
+ len = end + 1 - start;
while (index <= end_index) {
- page = find_get_page(inode->i_mapping, index);
+ page = find_get_page(inode->vfs_inode.i_mapping, index);
ASSERT(page); /* Pages should be in the extent_io_tree */
- set_page_writeback(page);
+
+ btrfs_page_set_writeback(fs_info, page, start, len);
put_page(page);
index++;
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 5dc2fd843ae3..0ba98e08a029 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -353,15 +353,55 @@ update_flags:
return ret;
}
+/*
+ * Start exclusive operation @type, return true on success
+ */
bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
enum btrfs_exclusive_operation type)
{
- return !cmpxchg(&fs_info->exclusive_operation, BTRFS_EXCLOP_NONE, type);
+ bool ret = false;
+
+ spin_lock(&fs_info->super_lock);
+ if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
+ fs_info->exclusive_operation = type;
+ ret = true;
+ }
+ spin_unlock(&fs_info->super_lock);
+
+ return ret;
+}
+
+/*
+ * Conditionally allow to enter the exclusive operation in case it's compatible
+ * with the running one. This must be paired with btrfs_exclop_start_unlock and
+ * btrfs_exclop_finish.
+ *
+ * Compatibility:
+ * - the same type is already running
+ * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
+ * must check the condition first that would allow none -> @type
+ */
+bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type)
+{
+ spin_lock(&fs_info->super_lock);
+ if (fs_info->exclusive_operation == type)
+ return true;
+
+ spin_unlock(&fs_info->super_lock);
+ return false;
+}
+
+void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
+{
+ spin_unlock(&fs_info->super_lock);
}
void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
{
+ spin_lock(&fs_info->super_lock);
WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
+ spin_unlock(&fs_info->super_lock);
sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
}
@@ -1455,7 +1495,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (btrfs_defrag_cancelled(fs_info)) {
btrfs_debug(fs_info, "defrag_file cancelled");
ret = -EAGAIN;
- break;
+ goto error;
}
if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
@@ -1533,6 +1573,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
}
}
+ ret = defrag_count;
+error:
if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
filemap_flush(inode->i_mapping);
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
@@ -1546,8 +1588,6 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
}
- ret = defrag_count;
-
out_ra:
if (do_compress) {
btrfs_inode_lock(inode, 0);
@@ -1560,6 +1600,48 @@ out_ra:
return ret;
}
+/*
+ * Try to start exclusive operation @type or cancel it if it's running.
+ *
+ * Return:
+ * 0 - normal mode, newly claimed op started
+ * >0 - normal mode, something else is running,
+ * return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space
+ * ECANCELED - cancel mode, successful cancel
+ * ENOTCONN - cancel mode, operation not running anymore
+ */
+static int exclop_start_or_cancel_reloc(struct btrfs_fs_info *fs_info,
+ enum btrfs_exclusive_operation type, bool cancel)
+{
+ if (!cancel) {
+ /* Start normal op */
+ if (!btrfs_exclop_start(fs_info, type))
+ return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
+ /* Exclusive operation is now claimed */
+ return 0;
+ }
+
+ /* Cancel running op */
+ if (btrfs_exclop_start_try_lock(fs_info, type)) {
+ /*
+ * This blocks any exclop finish from setting it to NONE, so we
+ * request cancellation. Either it runs and we will wait for it,
+ * or it has finished and no waiting will happen.
+ */
+ atomic_inc(&fs_info->reloc_cancel_req);
+ btrfs_exclop_start_unlock(fs_info);
+
+ if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
+ wait_on_bit(&fs_info->flags, BTRFS_FS_RELOC_RUNNING,
+ TASK_INTERRUPTIBLE);
+
+ return -ECANCELED;
+ }
+
+ /* Something else is running or none */
+ return -ENOTCONN;
+}
+
static noinline int btrfs_ioctl_resize(struct file *file,
void __user *arg)
{
@@ -1577,6 +1659,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
char *devstr = NULL;
int ret = 0;
int mod = 0;
+ bool cancel;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1585,20 +1668,23 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (ret)
return ret;
- if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_RESIZE)) {
- mnt_drop_write_file(file);
- return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
- }
-
+ /*
+ * Read the arguments before checking exclusivity to be able to
+ * distinguish regular resize and cancel
+ */
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
- goto out;
+ goto out_drop;
}
-
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
-
sizestr = vol_args->name;
+ cancel = (strcmp("cancel", sizestr) == 0);
+ ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel);
+ if (ret)
+ goto out_free;
+ /* Exclusive operation is now claimed */
+
devstr = strchr(sizestr, ':');
if (devstr) {
sizestr = devstr + 1;
@@ -1606,10 +1692,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
devstr = vol_args->name;
ret = kstrtoull(devstr, 10, &devid);
if (ret)
- goto out_free;
+ goto out_finish;
if (!devid) {
ret = -EINVAL;
- goto out_free;
+ goto out_finish;
}
btrfs_info(fs_info, "resizing devid %llu", devid);
}
@@ -1619,7 +1705,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
btrfs_info(fs_info, "resizer unable to find device %llu",
devid);
ret = -ENODEV;
- goto out_free;
+ goto out_finish;
}
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
@@ -1627,7 +1713,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
"resizer unable to apply on readonly device %llu",
devid);
ret = -EPERM;
- goto out_free;
+ goto out_finish;
}
if (!strcmp(sizestr, "max"))
@@ -1643,13 +1729,13 @@ static noinline int btrfs_ioctl_resize(struct file *file,
new_size = memparse(sizestr, &retptr);
if (*retptr != '\0' || new_size == 0) {
ret = -EINVAL;
- goto out_free;
+ goto out_finish;
}
}
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = -EPERM;
- goto out_free;
+ goto out_finish;
}
old_size = btrfs_device_get_total_bytes(device);
@@ -1657,24 +1743,24 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (mod < 0) {
if (new_size > old_size) {
ret = -EINVAL;
- goto out_free;
+ goto out_finish;
}
new_size = old_size - new_size;
} else if (mod > 0) {
if (new_size > ULLONG_MAX - old_size) {
ret = -ERANGE;
- goto out_free;
+ goto out_finish;
}
new_size = old_size + new_size;
}
if (new_size < SZ_256M) {
ret = -EINVAL;
- goto out_free;
+ goto out_finish;
}
if (new_size > device->bdev->bd_inode->i_size) {
ret = -EFBIG;
- goto out_free;
+ goto out_finish;
}
new_size = round_down(new_size, fs_info->sectorsize);
@@ -1683,7 +1769,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
- goto out_free;
+ goto out_finish;
}
ret = btrfs_grow_device(trans, device, new_size);
btrfs_commit_transaction(trans);
@@ -1696,10 +1782,11 @@ static noinline int btrfs_ioctl_resize(struct file *file,
"resize device %s (devid %llu) from %llu to %llu",
rcu_str_deref(device->name), device->devid,
old_size, new_size);
+out_finish:
+ btrfs_exclop_finish(fs_info);
out_free:
kfree(vol_args);
-out:
- btrfs_exclop_finish(fs_info);
+out_drop:
mnt_drop_write_file(file);
return ret;
}
@@ -2897,7 +2984,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
err = PTR_ERR(subvol_name_ptr);
goto free_parent;
}
- /* subvol_name_ptr is already NULL termined */
+ /* subvol_name_ptr is already nul terminated */
subvol_name = (char *)kbasename(subvol_name_ptr);
}
} else {
@@ -3119,6 +3206,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
+ bool cancel = false;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3137,18 +3225,22 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
ret = -EOPNOTSUPP;
goto out;
}
+ vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
+ if (!(vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) &&
+ strcmp("cancel", vol_args->name) == 0)
+ cancel = true;
- if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REMOVE)) {
- ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
+ ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
+ cancel);
+ if (ret)
goto out;
- }
+ /* Exclusive operation is now claimed */
- if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
+ if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
- } else {
- vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
+ else
ret = btrfs_rm_device(fs_info, vol_args->name, 0);
- }
+
btrfs_exclop_finish(fs_info);
if (!ret) {
@@ -3172,6 +3264,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args *vol_args;
int ret;
+ bool cancel;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3180,25 +3273,24 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
if (ret)
return ret;
- if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REMOVE)) {
- ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
- goto out_drop_write;
- }
-
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
- goto out;
+ goto out_drop_write;
}
-
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
- ret = btrfs_rm_device(fs_info, vol_args->name, 0);
+ cancel = (strcmp("cancel", vol_args->name) == 0);
+
+ ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
+ cancel);
+ if (ret == 0) {
+ ret = btrfs_rm_device(fs_info, vol_args->name, 0);
+ if (!ret)
+ btrfs_info(fs_info, "disk deleted %s", vol_args->name);
+ btrfs_exclop_finish(fs_info);
+ }
- if (!ret)
- btrfs_info(fs_info, "disk deleted %s", vol_args->name);
kfree(vol_args);
-out:
- btrfs_exclop_finish(fs_info);
out_drop_write:
mnt_drop_write_file(file);
@@ -3551,7 +3643,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
goto out;
}
transid = trans->transid;
- ret = btrfs_commit_transaction_async(trans, 0);
+ ret = btrfs_commit_transaction_async(trans);
if (ret) {
btrfs_end_transaction(trans);
return ret;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5fafc5e89bb7..313d9d685adb 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -57,7 +57,7 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
/*
* Try-lock for read.
*
- * Retrun 1 if the rwlock has been taken, 0 otherwise
+ * Return 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
@@ -72,7 +72,7 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
/*
* Try-lock for write.
*
- * Retrun 1 if the rwlock has been taken, 0 otherwise
+ * Return 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 6c413bb451a3..6eb41b7c0c84 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -16,6 +16,7 @@
#include "compression.h"
#include "delalloc-space.h"
#include "qgroup.h"
+#include "subpage.h"
static struct kmem_cache *btrfs_ordered_extent_cache;
@@ -300,81 +301,142 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
}
/*
- * Finish IO for one ordered extent across a given range. The range can
- * contain several ordered extents.
+ * Mark all ordered extents io inside the specified range finished.
*
- * @found_ret: Return the finished ordered extent
- * @file_offset: File offset for the finished IO
- * Will also be updated to one byte past the range that is
- * recordered as finished. This allows caller to walk forward.
- * @io_size: Length of the finish IO range
- * @uptodate: If the IO finished without problem
- *
- * Return true if any ordered extent is finished in the range, and update
- * @found_ret and @file_offset.
- * Return false otherwise.
+ * @page: The invovled page for the opeartion.
+ * For uncompressed buffered IO, the page status also needs to be
+ * updated to indicate whether the pending ordered io is finished.
+ * Can be NULL for direct IO and compressed write.
+ * For these cases, callers are ensured they won't execute the
+ * endio function twice.
+ * @finish_func: The function to be executed when all the IO of an ordered
+ * extent are finished.
*
- * NOTE: Although The range can cross multiple ordered extents, only one
- * ordered extent will be updated during one call. The caller is responsible to
- * iterate all ordered extents in the range.
+ * This function is called for endio, thus the range must have ordered
+ * extent(s) coveri it.
*/
-bool btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
- struct btrfs_ordered_extent **finished_ret,
- u64 *file_offset, u64 io_size, int uptodate)
+void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
+ struct page *page, u64 file_offset,
+ u64 num_bytes, btrfs_func_t finish_func,
+ bool uptodate)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct btrfs_workqueue *wq;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- bool finished = false;
unsigned long flags;
- u64 dec_end;
- u64 dec_start;
- u64 to_dec;
+ u64 cur = file_offset;
+
+ if (btrfs_is_free_space_inode(inode))
+ wq = fs_info->endio_freespace_worker;
+ else
+ wq = fs_info->endio_write_workers;
+
+ if (page)
+ ASSERT(page->mapping && page_offset(page) <= file_offset &&
+ file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
spin_lock_irqsave(&tree->lock, flags);
- node = tree_search(tree, *file_offset);
- if (!node)
- goto out;
+ while (cur < file_offset + num_bytes) {
+ u64 entry_end;
+ u64 end;
+ u32 len;
- entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
- if (!in_range(*file_offset, entry->file_offset, entry->num_bytes))
- goto out;
+ node = tree_search(tree, cur);
+ /* No ordered extents at all */
+ if (!node)
+ break;
- dec_start = max(*file_offset, entry->file_offset);
- dec_end = min(*file_offset + io_size,
- entry->file_offset + entry->num_bytes);
- *file_offset = dec_end;
- if (dec_start > dec_end) {
- btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
- dec_start, dec_end);
- }
- to_dec = dec_end - dec_start;
- if (to_dec > entry->bytes_left) {
- btrfs_crit(fs_info,
- "bad ordered accounting left %llu size %llu",
- entry->bytes_left, to_dec);
- }
- entry->bytes_left -= to_dec;
- if (!uptodate)
- set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
+ entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+ entry_end = entry->file_offset + entry->num_bytes;
+ /*
+ * |<-- OE --->| |
+ * cur
+ * Go to next OE.
+ */
+ if (cur >= entry_end) {
+ node = rb_next(node);
+ /* No more ordered extents, exit */
+ if (!node)
+ break;
+ entry = rb_entry(node, struct btrfs_ordered_extent,
+ rb_node);
+
+ /* Go to next ordered extent and continue */
+ cur = entry->file_offset;
+ continue;
+ }
+ /*
+ * | |<--- OE --->|
+ * cur
+ * Go to the start of OE.
+ */
+ if (cur < entry->file_offset) {
+ cur = entry->file_offset;
+ continue;
+ }
- if (entry->bytes_left == 0) {
/*
- * Ensure only one caller can set the flag and finished_ret
- * accordingly
+ * Now we are definitely inside one ordered extent.
+ *
+ * |<--- OE --->|
+ * |
+ * cur
*/
- finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
- /* test_and_set_bit implies a barrier */
- cond_wake_up_nomb(&entry->wait);
- }
-out:
- if (finished && finished_ret && entry) {
- *finished_ret = entry;
- refcount_inc(&entry->refs);
+ end = min(entry->file_offset + entry->num_bytes,
+ file_offset + num_bytes) - 1;
+ ASSERT(end + 1 - cur < U32_MAX);
+ len = end + 1 - cur;
+
+ if (page) {
+ /*
+ * Ordered (Private2) bit indicates whether we still
+ * have pending io unfinished for the ordered extent.
+ *
+ * If there's no such bit, we need to skip to next range.
+ */
+ if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
+ cur += len;
+ continue;
+ }
+ btrfs_page_clear_ordered(fs_info, page, cur, len);
+ }
+
+ /* Now we're fine to update the accounting */
+ if (unlikely(len > entry->bytes_left)) {
+ WARN_ON(1);
+ btrfs_crit(fs_info,
+"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
+ inode->root->root_key.objectid,
+ btrfs_ino(inode),
+ entry->file_offset,
+ entry->num_bytes,
+ len, entry->bytes_left);
+ entry->bytes_left = 0;
+ } else {
+ entry->bytes_left -= len;
+ }
+
+ if (!uptodate)
+ set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
+
+ /*
+ * All the IO of the ordered extent is finished, we need to queue
+ * the finish_func to be executed.
+ */
+ if (entry->bytes_left == 0) {
+ set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
+ cond_wake_up(&entry->wait);
+ refcount_inc(&entry->refs);
+ spin_unlock_irqrestore(&tree->lock, flags);
+ btrfs_init_work(&entry->work, finish_func, NULL, NULL);
+ btrfs_queue_work(wq, &entry->work);
+ spin_lock_irqsave(&tree->lock, flags);
+ }
+ cur += len;
}
spin_unlock_irqrestore(&tree->lock, flags);
- return finished;
}
/*
@@ -870,6 +932,81 @@ out:
}
/*
+ * Lookup the first ordered extent that overlaps the range
+ * [@file_offset, @file_offset + @len).
+ *
+ * The difference between this and btrfs_lookup_first_ordered_extent() is
+ * that this one won't return any ordered extent that does not overlap the range.
+ * And the difference against btrfs_lookup_ordered_extent() is, this function
+ * ensures the first ordered extent gets returned.
+ */
+struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
+ struct btrfs_inode *inode, u64 file_offset, u64 len)
+{
+ struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
+ struct rb_node *node;
+ struct rb_node *cur;
+ struct rb_node *prev;
+ struct rb_node *next;
+ struct btrfs_ordered_extent *entry = NULL;
+
+ spin_lock_irq(&tree->lock);
+ node = tree->tree.rb_node;
+ /*
+ * Here we don't want to use tree_search() which will use tree->last
+ * and screw up the search order.
+ * And __tree_search() can't return the adjacent ordered extents
+ * either, thus here we do our own search.
+ */
+ while (node) {
+ entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+
+ if (file_offset < entry->file_offset) {
+ node = node->rb_left;
+ } else if (file_offset >= entry_end(entry)) {
+ node = node->rb_right;
+ } else {
+ /*
+ * Direct hit, got an ordered extent that starts at
+ * @file_offset
+ */
+ goto out;
+ }
+ }
+ if (!entry) {
+ /* Empty tree */
+ goto out;
+ }
+
+ cur = &entry->rb_node;
+ /* We got an entry around @file_offset, check adjacent entries */
+ if (entry->file_offset < file_offset) {
+ prev = cur;
+ next = rb_next(cur);
+ } else {
+ prev = rb_prev(cur);
+ next = cur;
+ }
+ if (prev) {
+ entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
+ if (range_overlaps(entry, file_offset, len))
+ goto out;
+ }
+ if (next) {
+ entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
+ if (range_overlaps(entry, file_offset, len))
+ goto out;
+ }
+ /* No ordered extent in the range */
+ entry = NULL;
+out:
+ if (entry)
+ refcount_inc(&entry->refs);
+ spin_unlock_irq(&tree->lock);
+ return entry;
+}
+
+/*
* btrfs_flush_ordered_range - Lock the passed range and ensures all pending
* ordered extents in it are run to completion.
*
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index e60c07f36427..566472004edd 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -172,13 +172,13 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry);
+void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
+ struct page *page, u64 file_offset,
+ u64 num_bytes, btrfs_func_t finish_func,
+ bool uptodate);
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size, int uptodate);
-bool btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
- struct btrfs_ordered_extent **finished_ret,
- u64 *file_offset, u64 io_size,
- int uptodate);
int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
int type);
@@ -196,6 +196,8 @@ void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait);
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset);
+struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
+ struct btrfs_inode *inode, u64 file_offset, u64 len);
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct btrfs_inode *inode,
u64 file_offset,
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 2dcb1cb21634..b1cb5a8c2999 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -260,6 +260,10 @@ static int prop_compression_validate(const char *value, size_t len)
if (btrfs_compress_is_valid_type(value, len))
return 0;
+ if ((len == 2 && strncmp("no", value, 2) == 0) ||
+ (len == 4 && strncmp("none", value, 4) == 0))
+ return 0;
+
return -EINVAL;
}
@@ -269,7 +273,17 @@ static int prop_compression_apply(struct inode *inode, const char *value,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int type;
+ /* Reset to defaults */
if (len == 0) {
+ BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
+ BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ BTRFS_I(inode)->prop_compress = BTRFS_COMPRESS_NONE;
+ return 0;
+ }
+
+ /* Set NOCOMPRESS flag */
+ if ((len == 2 && strncmp("no", value, 2) == 0) ||
+ (len == 4 && strncmp("none", value, 4) == 0)) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
BTRFS_I(inode)->prop_compress = BTRFS_COMPRESS_NONE;
@@ -348,7 +362,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
/*
* This is not strictly necessary as the property should be
- * valid, but in case it isn't, don't propagate it futher.
+ * valid, but in case it isn't, don't propagate it further.
*/
ret = h->validate(value, strlen(value));
if (ret)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 3ded812f522c..07ec06d4e972 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2521,7 +2521,7 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
int ret = 0;
/*
- * If quotas get disabled meanwhile, the resouces need to be freed and
+ * If quotas get disabled meanwhile, the resources need to be freed and
* we can't just exit here.
*/
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
@@ -3545,13 +3545,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
struct btrfs_trans_handle *trans;
int ret;
- /*
- * Can't hold an open transaction or we run the risk of deadlocking,
- * and can't either be under the context of a send operation (where
- * current->journal_info is set to BTRFS_SEND_TRANS_STUB), as that
- * would result in a crash when starting a transaction and does not
- * make sense either (send is a read-only operation).
- */
+ /* Can't hold an open transaction or we run the risk of deadlocking. */
ASSERT(current->journal_info == NULL);
if (WARN_ON(current->journal_info))
return 0;
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index d434dc78dadf..9b0814318e72 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -7,6 +7,7 @@
#include "delalloc-space.h"
#include "reflink.h"
#include "transaction.h"
+#include "subpage.h"
#define BTRFS_MAX_DEDUPE_LEN SZ_16M
@@ -52,7 +53,8 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
const u64 datal,
const u8 comp_type)
{
- const u64 block_size = btrfs_inode_sectorsize(inode);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ const u32 block_size = fs_info->sectorsize;
const u64 range_end = file_offset + block_size - 1;
const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
@@ -106,10 +108,12 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
if (comp_type == BTRFS_COMPRESS_NONE) {
- memcpy_to_page(page, 0, data_start, datal);
+ memcpy_to_page(page, offset_in_page(file_offset), data_start,
+ datal);
flush_dcache_page(page);
} else {
- ret = btrfs_decompress(comp_type, data_start, page, 0,
+ ret = btrfs_decompress(comp_type, data_start, page,
+ offset_in_page(file_offset),
inline_size, datal);
if (ret)
goto out_unlock;
@@ -133,9 +137,9 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
flush_dcache_page(page);
}
- SetPageUptodate(page);
+ btrfs_page_set_uptodate(fs_info, page, file_offset, block_size);
ClearPageChecked(page);
- set_page_dirty(page);
+ btrfs_page_set_dirty(fs_info, page, file_offset, block_size);
out_unlock:
if (page) {
unlock_page(page);
@@ -203,10 +207,7 @@ static int clone_copy_inline_extent(struct inode *dst,
* inline extent's data to the page.
*/
ASSERT(key.offset > 0);
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
- inline_data, size, datal,
- comp_type);
- goto out;
+ goto copy_to_page;
}
} else if (i_size_read(dst) <= datal) {
struct btrfs_file_extent_item *ei;
@@ -222,13 +223,10 @@ static int clone_copy_inline_extent(struct inode *dst,
BTRFS_FILE_EXTENT_INLINE)
goto copy_inline_extent;
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
- inline_data, size, datal, comp_type);
- goto out;
+ goto copy_to_page;
}
copy_inline_extent:
- ret = 0;
/*
* We have no extent items, or we have an extent at offset 0 which may
* or may not be inlined. All these cases are dealt the same way.
@@ -240,11 +238,13 @@ copy_inline_extent:
* clone. Deal with all these cases by copying the inline extent
* data into the respective page at the destination inode.
*/
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
- inline_data, size, datal, comp_type);
- goto out;
+ goto copy_to_page;
}
+ /*
+ * Release path before starting a new transaction so we don't hold locks
+ * that would confuse lockdep.
+ */
btrfs_release_path(path);
/*
* If we end up here it means were copy the inline extent into a leaf
@@ -282,11 +282,6 @@ copy_inline_extent:
out:
if (!ret && !trans) {
/*
- * Release path before starting a new transaction so we don't
- * hold locks that would confuse lockdep.
- */
- btrfs_release_path(path);
- /*
* No transaction here means we copied the inline extent into a
* page of the destination inode.
*
@@ -306,6 +301,21 @@ out:
*trans_out = trans;
return ret;
+
+copy_to_page:
+ /*
+ * Release our path because we don't need it anymore and also because
+ * copy_inline_to_page() needs to reserve data and metadata, which may
+ * need to flush delalloc when we are low on available space and
+ * therefore cause a deadlock if writeback of an inline extent needs to
+ * write to the same leaf or an ordered extent completion needs to write
+ * to the same leaf.
+ */
+ btrfs_release_path(path);
+
+ ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+ inline_data, size, datal, comp_type);
+ goto out;
}
/**
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b70be2ac2e9e..fc831597cb22 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2876,11 +2876,12 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
}
/*
- * Allow error injection to test balance cancellation
+ * Allow error injection to test balance/relocation cancellation
*/
noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
{
return atomic_read(&fs_info->balance_cancel_req) ||
+ atomic_read(&fs_info->reloc_cancel_req) ||
fatal_signal_pending(current);
}
ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
@@ -3780,6 +3781,60 @@ out:
return inode;
}
+/*
+ * Mark start of chunk relocation that is cancellable. Check if the cancellation
+ * has been requested meanwhile and don't start in that case.
+ *
+ * Return:
+ * 0 success
+ * -EINPROGRESS operation is already in progress, that's probably a bug
+ * -ECANCELED cancellation request was set before the operation started
+ * -EAGAIN can not start because there are ongoing send operations
+ */
+static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
+{
+ spin_lock(&fs_info->send_reloc_lock);
+ if (fs_info->send_in_progress) {
+ btrfs_warn_rl(fs_info,
+"cannot run relocation while send operations are in progress (%d in progress)",
+ fs_info->send_in_progress);
+ spin_unlock(&fs_info->send_reloc_lock);
+ return -EAGAIN;
+ }
+ if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
+ /* This should not happen */
+ spin_unlock(&fs_info->send_reloc_lock);
+ btrfs_err(fs_info, "reloc already running, cannot start");
+ return -EINPROGRESS;
+ }
+ spin_unlock(&fs_info->send_reloc_lock);
+
+ if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
+ btrfs_info(fs_info, "chunk relocation canceled on start");
+ /*
+ * On cancel, clear all requests but let the caller mark
+ * the end after cleanup operations.
+ */
+ atomic_set(&fs_info->reloc_cancel_req, 0);
+ return -ECANCELED;
+ }
+ return 0;
+}
+
+/*
+ * Mark end of chunk relocation that is cancellable and wake any waiters.
+ */
+static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
+{
+ /* Requested after start, clear bit first so any waiters can continue */
+ if (atomic_read(&fs_info->reloc_cancel_req) > 0)
+ btrfs_info(fs_info, "chunk relocation canceled during operation");
+ spin_lock(&fs_info->send_reloc_lock);
+ clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
+ spin_unlock(&fs_info->send_reloc_lock);
+ atomic_set(&fs_info->reloc_cancel_req, 0);
+}
+
static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
{
struct reloc_control *rc;
@@ -3862,6 +3917,12 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
return -ENOMEM;
}
+ ret = reloc_chunk_start(fs_info);
+ if (ret < 0) {
+ err = ret;
+ goto out_put_bg;
+ }
+
rc->extent_root = extent_root;
rc->block_group = bg;
@@ -3952,7 +4013,9 @@ out:
if (err && rw)
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
- btrfs_put_block_group(rc->block_group);
+out_put_bg:
+ btrfs_put_block_group(bg);
+ reloc_chunk_end(fs_info);
free_reloc_control(rc);
return err;
}
@@ -4073,6 +4136,12 @@ int btrfs_recover_relocation(struct btrfs_root *root)
goto out;
}
+ ret = reloc_chunk_start(fs_info);
+ if (ret < 0) {
+ err = ret;
+ goto out_end;
+ }
+
rc->extent_root = fs_info->extent_root;
set_reloc_control(rc);
@@ -4137,6 +4206,8 @@ out_clean:
err = ret;
out_unset:
unset_reloc_control(rc);
+out_end:
+ reloc_chunk_end(fs_info);
free_reloc_control(rc);
out:
free_reloc_roots(&reloc_roots);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 485cda3eb8d7..088641ba7a8e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -165,6 +165,10 @@ struct scrub_ctx {
int readonly;
int pages_per_rd_bio;
+ /* State of IO submission throttling affecting the associated device */
+ ktime_t throttle_deadline;
+ u64 throttle_sent;
+
int is_dev_replace;
u64 write_pointer;
@@ -605,6 +609,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
spin_lock_init(&sctx->list_lock);
spin_lock_init(&sctx->stat_lock);
init_waitqueue_head(&sctx->list_wait);
+ sctx->throttle_deadline = 0;
WARN_ON(sctx->wr_curr_bio != NULL);
mutex_init(&sctx->wr_lock);
@@ -626,7 +631,6 @@ nomem:
static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
void *warn_ctx)
{
- u64 isize;
u32 nlink;
int ret;
int i;
@@ -662,7 +666,6 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
eb = swarn->path->nodes[0];
inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
struct btrfs_inode_item);
- isize = btrfs_inode_size(eb, inode_item);
nlink = btrfs_inode_nlink(eb, inode_item);
btrfs_release_path(swarn->path);
@@ -691,12 +694,12 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
*/
for (i = 0; i < ipath->fspath->elem_cnt; ++i)
btrfs_warn_in_rcu(fs_info,
-"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
+"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
swarn->errstr, swarn->logical,
rcu_str_deref(swarn->dev->name),
swarn->physical,
root, inum, offset,
- min(isize - offset, (u64)PAGE_SIZE), nlink,
+ fs_info->sectorsize, nlink,
(char *)(unsigned long)ipath->fspath->val[i]);
btrfs_put_root(local_root);
@@ -885,25 +888,25 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
* read all mirrors one after the other. This includes to
* re-read the extent or metadata block that failed (that was
* the cause that this fixup code is called) another time,
- * page by page this time in order to know which pages
+ * sector by sector this time in order to know which sectors
* caused I/O errors and which ones are good (for all mirrors).
* It is the goal to handle the situation when more than one
* mirror contains I/O errors, but the errors do not
* overlap, i.e. the data can be repaired by selecting the
- * pages from those mirrors without I/O error on the
- * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
- * would be that mirror #1 has an I/O error on the first page,
- * the second page is good, and mirror #2 has an I/O error on
- * the second page, but the first page is good.
- * Then the first page of the first mirror can be repaired by
- * taking the first page of the second mirror, and the
- * second page of the second mirror can be repaired by
- * copying the contents of the 2nd page of the 1st mirror.
- * One more note: if the pages of one mirror contain I/O
+ * sectors from those mirrors without I/O error on the
+ * particular sectors. One example (with blocks >= 2 * sectorsize)
+ * would be that mirror #1 has an I/O error on the first sector,
+ * the second sector is good, and mirror #2 has an I/O error on
+ * the second sector, but the first sector is good.
+ * Then the first sector of the first mirror can be repaired by
+ * taking the first sector of the second mirror, and the
+ * second sector of the second mirror can be repaired by
+ * copying the contents of the 2nd sector of the 1st mirror.
+ * One more note: if the sectors of one mirror contain I/O
* errors, the checksum cannot be verified. In order to get
* the best data for repairing, the first attempt is to find
* a mirror without I/O errors and with a validated checksum.
- * Only if this is not possible, the pages are picked from
+ * Only if this is not possible, the sectors are picked from
* mirrors with I/O errors without considering the checksum.
* If the latter is the case, at the end, the checksum of the
* repaired area is verified in order to correctly maintain
@@ -1060,26 +1063,26 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
/*
* In case of I/O errors in the area that is supposed to be
- * repaired, continue by picking good copies of those pages.
- * Select the good pages from mirrors to rewrite bad pages from
+ * repaired, continue by picking good copies of those sectors.
+ * Select the good sectors from mirrors to rewrite bad sectors from
* the area to fix. Afterwards verify the checksum of the block
* that is supposed to be repaired. This verification step is
* only done for the purpose of statistic counting and for the
* final scrub report, whether errors remain.
* A perfect algorithm could make use of the checksum and try
- * all possible combinations of pages from the different mirrors
+ * all possible combinations of sectors from the different mirrors
* until the checksum verification succeeds. For example, when
- * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
+ * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
* of mirror #2 is readable but the final checksum test fails,
- * then the 2nd page of mirror #3 could be tried, whether now
+ * then the 2nd sector of mirror #3 could be tried, whether now
* the final checksum succeeds. But this would be a rare
* exception and is therefore not implemented. At least it is
* avoided that the good copy is overwritten.
* A more useful improvement would be to pick the sectors
* without I/O error based on sector sizes (512 bytes on legacy
- * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
+ * disks) instead of on sectorsize. Then maybe 512 byte of one
* mirror could be repaired by taking 512 byte of a different
- * mirror, even if other 512 byte sectors in the same PAGE_SIZE
+ * mirror, even if other 512 byte sectors in the same sectorsize
* area are unreadable.
*/
success = 1;
@@ -1260,7 +1263,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
{
struct scrub_ctx *sctx = original_sblock->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
- u64 length = original_sblock->page_count * PAGE_SIZE;
+ u64 length = original_sblock->page_count * fs_info->sectorsize;
u64 logical = original_sblock->pagev[0]->logical;
u64 generation = original_sblock->pagev[0]->generation;
u64 flags = original_sblock->pagev[0]->flags;
@@ -1283,13 +1286,13 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
*/
while (length > 0) {
- sublen = min_t(u64, length, PAGE_SIZE);
+ sublen = min_t(u64, length, fs_info->sectorsize);
mapped_length = sublen;
bbio = NULL;
/*
- * with a length of PAGE_SIZE, each returned stripe
- * represents one mirror
+ * With a length of sectorsize, each returned stripe represents
+ * one mirror
*/
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
@@ -1480,7 +1483,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
bio = btrfs_io_bio_alloc(1);
bio_set_dev(bio, spage->dev->bdev);
- bio_add_page(bio, spage->page, PAGE_SIZE, 0);
+ bio_add_page(bio, spage->page, fs_info->sectorsize, 0);
bio->bi_iter.bi_sector = spage->physical >> 9;
bio->bi_opf = REQ_OP_READ;
@@ -1544,6 +1547,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
struct scrub_page *spage_good = sblock_good->pagev[page_num];
struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
+ const u32 sectorsize = fs_info->sectorsize;
BUG_ON(spage_bad->page == NULL);
BUG_ON(spage_good->page == NULL);
@@ -1563,8 +1567,8 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
bio->bi_iter.bi_sector = spage_bad->physical >> 9;
bio->bi_opf = REQ_OP_WRITE;
- ret = bio_add_page(bio, spage_good->page, PAGE_SIZE, 0);
- if (PAGE_SIZE != ret) {
+ ret = bio_add_page(bio, spage_good->page, sectorsize, 0);
+ if (ret != sectorsize) {
bio_put(bio);
return -EIO;
}
@@ -1642,6 +1646,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
{
struct scrub_bio *sbio;
int ret;
+ const u32 sectorsize = sctx->fs_info->sectorsize;
mutex_lock(&sctx->wr_lock);
again:
@@ -1681,16 +1686,16 @@ again:
bio->bi_iter.bi_sector = sbio->physical >> 9;
bio->bi_opf = REQ_OP_WRITE;
sbio->status = 0;
- } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
+ } else if (sbio->physical + sbio->page_count * sectorsize !=
spage->physical_for_dev_replace ||
- sbio->logical + sbio->page_count * PAGE_SIZE !=
+ sbio->logical + sbio->page_count * sectorsize !=
spage->logical) {
scrub_wr_submit(sctx);
goto again;
}
- ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
- if (ret != PAGE_SIZE) {
+ ret = bio_add_page(sbio->bio, spage->page, sectorsize, 0);
+ if (ret != sectorsize) {
if (sbio->page_count < 1) {
bio_put(sbio->bio);
sbio->bio = NULL;
@@ -1729,7 +1734,8 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
btrfsic_submit_bio(sbio->bio);
if (btrfs_is_zoned(sctx->fs_info))
- sctx->write_pointer = sbio->physical + sbio->page_count * PAGE_SIZE;
+ sctx->write_pointer = sbio->physical + sbio->page_count *
+ sctx->fs_info->sectorsize;
}
static void scrub_wr_bio_end_io(struct bio *bio)
@@ -1988,6 +1994,65 @@ static void scrub_page_put(struct scrub_page *spage)
}
}
+/*
+ * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
+ * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
+ */
+static void scrub_throttle(struct scrub_ctx *sctx)
+{
+ const int time_slice = 1000;
+ struct scrub_bio *sbio;
+ struct btrfs_device *device;
+ s64 delta;
+ ktime_t now;
+ u32 div;
+ u64 bwlimit;
+
+ sbio = sctx->bios[sctx->curr];
+ device = sbio->dev;
+ bwlimit = READ_ONCE(device->scrub_speed_max);
+ if (bwlimit == 0)
+ return;
+
+ /*
+ * Slice is divided into intervals when the IO is submitted, adjust by
+ * bwlimit and maximum of 64 intervals.
+ */
+ div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
+ div = min_t(u32, 64, div);
+
+ /* Start new epoch, set deadline */
+ now = ktime_get();
+ if (sctx->throttle_deadline == 0) {
+ sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
+ sctx->throttle_sent = 0;
+ }
+
+ /* Still in the time to send? */
+ if (ktime_before(now, sctx->throttle_deadline)) {
+ /* If current bio is within the limit, send it */
+ sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
+ if (sctx->throttle_sent <= div_u64(bwlimit, div))
+ return;
+
+ /* We're over the limit, sleep until the rest of the slice */
+ delta = ktime_ms_delta(sctx->throttle_deadline, now);
+ } else {
+ /* New request after deadline, start new epoch */
+ delta = 0;
+ }
+
+ if (delta) {
+ long timeout;
+
+ timeout = div_u64(delta * HZ, 1000);
+ schedule_timeout_interruptible(timeout);
+ }
+
+ /* Next call will start the deadline period */
+ sctx->throttle_deadline = 0;
+}
+
static void scrub_submit(struct scrub_ctx *sctx)
{
struct scrub_bio *sbio;
@@ -1995,6 +2060,8 @@ static void scrub_submit(struct scrub_ctx *sctx)
if (sctx->curr == -1)
return;
+ scrub_throttle(sctx);
+
sbio = sctx->bios[sctx->curr];
sctx->curr = -1;
scrub_pending_bio_inc(sctx);
@@ -2006,6 +2073,7 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
{
struct scrub_block *sblock = spage->sblock;
struct scrub_bio *sbio;
+ const u32 sectorsize = sctx->fs_info->sectorsize;
int ret;
again:
@@ -2044,9 +2112,9 @@ again:
bio->bi_iter.bi_sector = sbio->physical >> 9;
bio->bi_opf = REQ_OP_READ;
sbio->status = 0;
- } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
+ } else if (sbio->physical + sbio->page_count * sectorsize !=
spage->physical ||
- sbio->logical + sbio->page_count * PAGE_SIZE !=
+ sbio->logical + sbio->page_count * sectorsize !=
spage->logical ||
sbio->dev != spage->dev) {
scrub_submit(sctx);
@@ -2054,8 +2122,8 @@ again:
}
sbio->pagev[sbio->page_count] = spage;
- ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
- if (ret != PAGE_SIZE) {
+ ret = bio_add_page(sbio->bio, spage->page, sectorsize, 0);
+ if (ret != sectorsize) {
if (sbio->page_count < 1) {
bio_put(sbio->bio);
sbio->bio = NULL;
@@ -2398,7 +2466,7 @@ static void scrub_block_complete(struct scrub_block *sblock)
if (sblock->sparity && corrupted && !sblock->data_corrected) {
u64 start = sblock->pagev[0]->logical;
u64 end = sblock->pagev[sblock->page_count - 1]->logical +
- PAGE_SIZE;
+ sblock->sctx->fs_info->sectorsize;
ASSERT(end - start <= U32_MAX);
scrub_parity_mark_sectors_error(sblock->sparity,
@@ -2418,7 +2486,7 @@ static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *su
* the csum into @csum.
*
* The search source is sctx->csum_list, which is a pre-populated list
- * storing bytenr ordered csum ranges. We're reponsible to cleanup any range
+ * storing bytenr ordered csum ranges. We're responsible to cleanup any range
* that is before @logical.
*
* Return 0 if there is no csum for the range.
@@ -3138,28 +3206,23 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
physical = map->stripes[num].physical;
offset = 0;
nstripes = div64_u64(length, map->stripe_len);
+ mirror_num = 1;
+ increment = map->stripe_len;
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
offset = map->stripe_len * num;
increment = map->stripe_len * map->num_stripes;
- mirror_num = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
int factor = map->num_stripes / map->sub_stripes;
offset = map->stripe_len * (num / map->sub_stripes);
increment = map->stripe_len * factor;
mirror_num = num % map->sub_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
- increment = map->stripe_len;
mirror_num = num % map->num_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- increment = map->stripe_len;
mirror_num = num % map->num_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
get_raid56_logic_offset(physical, num, map, &offset, NULL);
increment = map->stripe_len * nr_data_stripes(map);
- mirror_num = 1;
- } else {
- increment = map->stripe_len;
- mirror_num = 1;
}
path = btrfs_alloc_path();
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index bd69db72acc5..6ac37ae6c811 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -2078,16 +2078,6 @@ static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
}
/*
- * Removes the entry from the list and adds it back to the end. This marks the
- * entry as recently used so that name_cache_clean_unused does not remove it.
- */
-static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
-{
- list_del(&nce->list);
- list_add_tail(&nce->list, &sctx->name_cache_list);
-}
-
-/*
* Remove some entries from the beginning of name_cache_list.
*/
static void name_cache_clean_unused(struct send_ctx *sctx)
@@ -2147,7 +2137,13 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
kfree(nce);
nce = NULL;
} else {
- name_cache_used(sctx, nce);
+ /*
+ * Removes the entry from the list and adds it back to
+ * the end. This marks the entry as recently used so
+ * that name_cache_clean_unused does not remove it.
+ */
+ list_move_tail(&nce->list, &sctx->name_cache_list);
+
*parent_ino = nce->parent_ino;
*parent_gen = nce->parent_gen;
ret = fs_path_add(dest, nce->name, nce->name_len);
@@ -4064,6 +4060,17 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (ret < 0)
goto out;
} else {
+ /*
+ * If we previously orphanized a directory that
+ * collided with a new reference that we already
+ * processed, recompute the current path because
+ * that directory may be part of the path.
+ */
+ if (orphanized_dir) {
+ ret = refresh_ref_path(sctx, cur);
+ if (ret < 0)
+ goto out;
+ }
ret = send_unlink(sctx, cur->full_path);
if (ret < 0)
goto out;
@@ -6507,7 +6514,7 @@ static int changed_extent(struct send_ctx *sctx,
* updates the inode item, but it only changes the iversion (sequence
* field in the inode item) of the inode, so if a file is deduplicated
* the same amount of times in both the parent and send snapshots, its
- * iversion becames the same in both snapshots, whence the inode item is
+ * iversion becomes the same in both snapshots, whence the inode item is
* the same on both snapshots.
*/
if (sctx->cur_ino != sctx->cmp_key->objectid)
@@ -7409,23 +7416,21 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
if (ret)
goto out;
- mutex_lock(&fs_info->balance_mutex);
- if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
- mutex_unlock(&fs_info->balance_mutex);
+ spin_lock(&fs_info->send_reloc_lock);
+ if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
+ spin_unlock(&fs_info->send_reloc_lock);
btrfs_warn_rl(fs_info,
- "cannot run send because a balance operation is in progress");
+ "cannot run send because a relocation operation is in progress");
ret = -EAGAIN;
goto out;
}
fs_info->send_in_progress++;
- mutex_unlock(&fs_info->balance_mutex);
+ spin_unlock(&fs_info->send_reloc_lock);
- current->journal_info = BTRFS_SEND_TRANS_STUB;
ret = send_subvol(sctx);
- current->journal_info = NULL;
- mutex_lock(&fs_info->balance_mutex);
+ spin_lock(&fs_info->send_reloc_lock);
fs_info->send_in_progress--;
- mutex_unlock(&fs_info->balance_mutex);
+ spin_unlock(&fs_info->send_reloc_lock);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 2dc674b7c3b1..f79bf85f2439 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -133,18 +133,13 @@
* operations, however they won't be usable until the transaction commits.
*
* COMMIT_TRANS
- * may_commit_transaction() is the ultimate arbiter on whether we commit the
- * transaction or not. In order to avoid constantly churning we do all the
- * above flushing first and then commit the transaction as the last resort.
- * However we need to take into account things like pinned space that would
- * be freed, plus any delayed work we may not have gotten rid of in the case
- * of metadata.
- *
- * FORCE_COMMIT_TRANS
- * For use by the preemptive flusher. We use this to bypass the ticketing
- * checks in may_commit_transaction, as we have more information about the
- * overall state of the system and may want to commit the transaction ahead
- * of actual ENOSPC conditions.
+ * This will commit the transaction. Historically we had a lot of logic
+ * surrounding whether or not we'd commit the transaction, but this waits born
+ * out of a pre-tickets era where we could end up committing the transaction
+ * thousands of times in a row without making progress. Now thanks to our
+ * ticketing system we know if we're not making progress and can error
+ * everybody out after a few commits rather than burning the disk hoping for
+ * a different answer.
*
* OVERCOMMIT
*
@@ -197,13 +192,6 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
if (!space_info)
return -ENOMEM;
- ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
- GFP_KERNEL);
- if (ret) {
- kfree(space_info);
- return ret;
- }
-
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
INIT_LIST_HEAD(&space_info->block_groups[i]);
init_rwsem(&space_info->groups_sem);
@@ -389,7 +377,7 @@ again:
ticket = list_first_entry(head, struct reserve_ticket, list);
- /* Check and see if our ticket can be satisified now. */
+ /* Check and see if our ticket can be satisfied now. */
if ((used + ticket->bytes <= space_info->total_bytes) ||
btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
flush)) {
@@ -495,7 +483,8 @@ static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
*/
static void shrink_delalloc(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
- u64 to_reclaim, bool wait_ordered)
+ u64 to_reclaim, bool wait_ordered,
+ bool for_preempt)
{
struct btrfs_trans_handle *trans;
u64 delalloc_bytes;
@@ -532,7 +521,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
* ordered extents, otherwise we'll waste time trying to flush delalloc
* that likely won't give us the space back we need.
*/
- if (ordered_bytes > delalloc_bytes)
+ if (ordered_bytes > delalloc_bytes && !for_preempt)
wait_ordered = true;
loops = 0;
@@ -551,6 +540,14 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
break;
}
+ /*
+ * If we are for preemption we just want a one-shot of delalloc
+ * flushing so we can stop flushing if we decide we don't need
+ * to anymore.
+ */
+ if (for_preempt)
+ break;
+
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets) &&
list_empty(&space_info->priority_tickets)) {
@@ -566,109 +563,6 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
}
}
-/**
- * Possibly commit the transaction if its ok to
- *
- * @fs_info: the filesystem
- * @space_info: space_info we are checking for commit, either data or metadata
- *
- * This will check to make sure that committing the transaction will actually
- * get us somewhere and then commit the transaction if it does. Otherwise it
- * will return -ENOSPC.
- */
-static int may_commit_transaction(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
-{
- struct reserve_ticket *ticket = NULL;
- struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
- struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
- struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
- struct btrfs_trans_handle *trans;
- u64 reclaim_bytes = 0;
- u64 bytes_needed = 0;
- u64 cur_free_bytes = 0;
-
- trans = (struct btrfs_trans_handle *)current->journal_info;
- if (trans)
- return -EAGAIN;
-
- spin_lock(&space_info->lock);
- cur_free_bytes = btrfs_space_info_used(space_info, true);
- if (cur_free_bytes < space_info->total_bytes)
- cur_free_bytes = space_info->total_bytes - cur_free_bytes;
- else
- cur_free_bytes = 0;
-
- if (!list_empty(&space_info->priority_tickets))
- ticket = list_first_entry(&space_info->priority_tickets,
- struct reserve_ticket, list);
- else if (!list_empty(&space_info->tickets))
- ticket = list_first_entry(&space_info->tickets,
- struct reserve_ticket, list);
- if (ticket)
- bytes_needed = ticket->bytes;
-
- if (bytes_needed > cur_free_bytes)
- bytes_needed -= cur_free_bytes;
- else
- bytes_needed = 0;
- spin_unlock(&space_info->lock);
-
- if (!bytes_needed)
- return 0;
-
- trans = btrfs_join_transaction(fs_info->extent_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- /*
- * See if there is enough pinned space to make this reservation, or if
- * we have block groups that are going to be freed, allowing us to
- * possibly do a chunk allocation the next loop through.
- */
- if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
- __percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes_needed,
- BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
- goto commit;
-
- /*
- * See if there is some space in the delayed insertion reserve for this
- * reservation. If the space_info's don't match (like for DATA or
- * SYSTEM) then just go enospc, reclaiming this space won't recover any
- * space to satisfy those reservations.
- */
- if (space_info != delayed_rsv->space_info)
- goto enospc;
-
- spin_lock(&delayed_rsv->lock);
- reclaim_bytes += delayed_rsv->reserved;
- spin_unlock(&delayed_rsv->lock);
-
- spin_lock(&delayed_refs_rsv->lock);
- reclaim_bytes += delayed_refs_rsv->reserved;
- spin_unlock(&delayed_refs_rsv->lock);
-
- spin_lock(&trans_rsv->lock);
- reclaim_bytes += trans_rsv->reserved;
- spin_unlock(&trans_rsv->lock);
-
- if (reclaim_bytes >= bytes_needed)
- goto commit;
- bytes_needed -= reclaim_bytes;
-
- if (__percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes_needed,
- BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
- goto enospc;
-
-commit:
- return btrfs_commit_transaction(trans);
-enospc:
- btrfs_end_transaction(trans);
- return -ENOSPC;
-}
-
/*
* Try to flush some data based on policy set by @state. This is only advisory
* and may fail for various reasons. The caller is supposed to examine the
@@ -702,7 +596,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
case FLUSH_DELALLOC:
case FLUSH_DELALLOC_WAIT:
shrink_delalloc(fs_info, space_info, num_bytes,
- state == FLUSH_DELALLOC_WAIT);
+ state == FLUSH_DELALLOC_WAIT, for_preempt);
break;
case FLUSH_DELAYED_REFS_NR:
case FLUSH_DELAYED_REFS:
@@ -743,9 +637,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
btrfs_wait_on_delayed_iputs(fs_info);
break;
case COMMIT_TRANS:
- ret = may_commit_transaction(fs_info, space_info);
- break;
- case FORCE_COMMIT_TRANS:
+ ASSERT(current->journal_info == NULL);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -792,12 +684,14 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info)
{
+ u64 global_rsv_size = fs_info->global_block_rsv.reserved;
u64 ordered, delalloc;
u64 thresh = div_factor_fine(space_info->total_bytes, 98);
u64 used;
/* If we're just plain full then async reclaim just slows us down. */
- if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
+ if ((space_info->bytes_used + space_info->bytes_reserved +
+ global_rsv_size) >= thresh)
return false;
/*
@@ -838,8 +732,10 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
thresh = calc_available_free_space(fs_info, space_info,
BTRFS_RESERVE_FLUSH_ALL);
- thresh += (space_info->total_bytes - space_info->bytes_used -
- space_info->bytes_reserved - space_info->bytes_readonly);
+ used = space_info->bytes_used + space_info->bytes_reserved +
+ space_info->bytes_readonly + global_rsv_size;
+ if (used < space_info->total_bytes)
+ thresh += space_info->total_bytes - used;
thresh >>= space_info->clamp;
used = space_info->bytes_pinned;
@@ -860,14 +756,20 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
* clearly be heavy enough to warrant preemptive flushing. In the case
* of heavy DIO or ordered reservations, preemptive flushing will just
* waste time and cause us to slow down.
+ *
+ * We want to make sure we truly are maxed out on ordered however, so
+ * cut ordered in half, and if it's still higher than delalloc then we
+ * can keep flushing. This is to avoid the case where we start
+ * flushing, and now delalloc == ordered and we stop preemptively
+ * flushing when we could still have several gigs of delalloc to flush.
*/
- ordered = percpu_counter_read_positive(&fs_info->ordered_bytes);
+ ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
if (ordered >= delalloc)
used += fs_info->delayed_refs_rsv.reserved +
fs_info->delayed_block_rsv.reserved;
else
- used += space_info->bytes_may_use;
+ used += space_info->bytes_may_use - global_rsv_size;
return (used >= thresh && !btrfs_fs_closing(fs_info) &&
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
@@ -921,7 +823,6 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
{
struct reserve_ticket *ticket;
u64 tickets_id = space_info->tickets_id;
- u64 first_ticket_bytes = 0;
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
@@ -937,21 +838,6 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
steal_from_global_rsv(fs_info, space_info, ticket))
return true;
- /*
- * may_commit_transaction will avoid committing the transaction
- * if it doesn't feel like the space reclaimed by the commit
- * would result in the ticket succeeding. However if we have a
- * smaller ticket in the queue it may be small enough to be
- * satisified by committing the transaction, so if any
- * subsequent ticket is smaller than the first ticket go ahead
- * and send us back for another loop through the enospc flushing
- * code.
- */
- if (first_ticket_bytes == 0)
- first_ticket_bytes = ticket->bytes;
- else if (first_ticket_bytes > ticket->bytes)
- return true;
-
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
btrfs_info(fs_info, "failing ticket with %llu bytes",
ticket->bytes);
@@ -1117,7 +1003,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
(delayed_block_rsv->reserved +
delayed_refs_rsv->reserved)) {
to_reclaim = space_info->bytes_pinned;
- flush = FORCE_COMMIT_TRANS;
+ flush = COMMIT_TRANS;
} else if (delayed_block_rsv->reserved >
delayed_refs_rsv->reserved) {
to_reclaim = delayed_block_rsv->reserved;
@@ -1171,28 +1057,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* immediately re-usable, it comes in the form of a delayed ref, which must be
* run and then the transaction must be committed.
*
- * FLUSH_DELAYED_REFS
- * The above two cases generate delayed refs that will affect
- * ->total_bytes_pinned. However this counter can be inconsistent with
- * reality if there are outstanding delayed refs. This is because we adjust
- * the counter based solely on the current set of delayed refs and disregard
- * any on-disk state which might include more refs. So for example, if we
- * have an extent with 2 references, but we only drop 1, we'll see that there
- * is a negative delayed ref count for the extent and assume that the space
- * will be freed, and thus increase ->total_bytes_pinned.
- *
- * Running the delayed refs gives us the actual real view of what will be
- * freed at the transaction commit time. This stage will not actually free
- * space for us, it just makes sure that may_commit_transaction() has all of
- * the information it needs to make the right decision.
- *
* COMMIT_TRANS
- * This is where we reclaim all of the pinned space generated by the previous
- * two stages. We will not commit the transaction if we don't think we're
- * likely to satisfy our request, which means if our current free space +
- * total_bytes_pinned < reservation we will not commit. This is why the
- * previous states are actually important, to make sure we know for sure
- * whether committing the transaction will allow us to make progress.
+ * This is where we reclaim all of the pinned space generated by running the
+ * iputs
*
* ALLOC_CHUNK_FORCE
* For data we start with alloc chunk force, however we could have been full
@@ -1202,7 +1069,6 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
static const enum btrfs_flush_state data_flush_states[] = {
FLUSH_DELALLOC_WAIT,
RUN_DELAYED_IPUTS,
- FLUSH_DELAYED_REFS,
COMMIT_TRANS,
ALLOC_CHUNK_FORCE,
};
@@ -1561,6 +1427,15 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
flush == BTRFS_RESERVE_FLUSH_DATA) {
list_add_tail(&ticket.list, &space_info->tickets);
if (!space_info->flush) {
+ /*
+ * We were forced to add a reserve ticket, so
+ * our preemptive flushing is unable to keep
+ * up. Clamp down on the threshold for the
+ * preemptive flushing in order to keep up with
+ * the workload.
+ */
+ maybe_clamp_preempt(fs_info, space_info);
+
space_info->flush = 1;
trace_btrfs_trigger_flush(fs_info,
space_info->flags,
@@ -1572,14 +1447,6 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
list_add_tail(&ticket.list,
&space_info->priority_tickets);
}
-
- /*
- * We were forced to add a reserve ticket, so our preemptive
- * flushing is unable to keep up. Clamp down on the threshold
- * for the preemptive flushing in order to keep up with the
- * workload.
- */
- maybe_clamp_preempt(fs_info, space_info);
} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
used += orig_bytes;
/*
@@ -1588,8 +1455,8 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* the async reclaim as we will panic.
*/
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
- need_preemptive_reclaim(fs_info, space_info) &&
- !work_busy(&fs_info->preempt_reclaim_work)) {
+ !work_busy(&fs_info->preempt_reclaim_work) &&
+ need_preemptive_reclaim(fs_info, space_info)) {
trace_btrfs_trigger_flush(fs_info, space_info->flags,
orig_bytes, flush, "preempt");
queue_work(system_unbound_wq,
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index b1a8ffb03b3e..cb5056472e79 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -43,18 +43,6 @@ struct btrfs_space_info {
u64 flags;
- /*
- * bytes_pinned is kept in line with what is actually pinned, as in
- * we've called update_block_group and dropped the bytes_used counter
- * and increased the bytes_pinned counter. However this means that
- * bytes_pinned does not reflect the bytes that will be pinned once the
- * delayed refs are flushed, so this counter is inc'ed every time we
- * call btrfs_free_extent so it is a realtime count of what will be
- * freed once the transaction is committed. It will be zeroed every
- * time the transaction commits.
- */
- struct percpu_counter total_bytes_pinned;
-
struct list_head list;
/* Protected by the spinlock 'lock'. */
struct list_head ro_bgs;
@@ -157,22 +145,4 @@ static inline void btrfs_space_info_free_bytes_may_use(
}
int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
enum btrfs_reserve_flush_enum flush);
-
-static inline void __btrfs_mod_total_bytes_pinned(
- struct btrfs_space_info *space_info,
- s64 mod)
-{
- percpu_counter_add_batch(&space_info->total_bytes_pinned, mod,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
-}
-
-static inline void btrfs_mod_total_bytes_pinned(struct btrfs_fs_info *fs_info,
- u64 flags, s64 mod)
-{
- struct btrfs_space_info *space_info = btrfs_find_space_info(fs_info, flags);
-
- ASSERT(space_info);
- __btrfs_mod_total_bytes_pinned(space_info, mod);
-}
-
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 2d19089ab625..640bcd21bf28 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include "ctree.h"
#include "subpage.h"
+#include "btrfs_inode.h"
/*
* Subpage (sectorsize < PAGE_SIZE) support overview:
@@ -110,10 +111,12 @@ int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
if (!*ret)
return -ENOMEM;
spin_lock_init(&(*ret)->lock);
- if (type == BTRFS_SUBPAGE_METADATA)
+ if (type == BTRFS_SUBPAGE_METADATA) {
atomic_set(&(*ret)->eb_refs, 0);
- else
+ } else {
atomic_set(&(*ret)->readers, 0);
+ atomic_set(&(*ret)->writers, 0);
+ }
return 0;
}
@@ -183,12 +186,10 @@ void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
{
struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
const int nbits = len >> fs_info->sectorsize_bits;
- int ret;
btrfs_subpage_assert(fs_info, page, start, len);
- ret = atomic_add_return(nbits, &subpage->readers);
- ASSERT(ret == nbits);
+ atomic_add(nbits, &subpage->readers);
}
void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
@@ -196,10 +197,95 @@ void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
{
struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
const int nbits = len >> fs_info->sectorsize_bits;
+ bool is_data;
+ bool last;
btrfs_subpage_assert(fs_info, page, start, len);
+ is_data = is_data_inode(page->mapping->host);
ASSERT(atomic_read(&subpage->readers) >= nbits);
- if (atomic_sub_and_test(nbits, &subpage->readers))
+ last = atomic_sub_and_test(nbits, &subpage->readers);
+
+ /*
+ * For data we need to unlock the page if the last read has finished.
+ *
+ * And please don't replace @last with atomic_sub_and_test() call
+ * inside if () condition.
+ * As we want the atomic_sub_and_test() to be always executed.
+ */
+ if (is_data && last)
+ unlock_page(page);
+}
+
+static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
+{
+ u64 orig_start = *start;
+ u32 orig_len = *len;
+
+ *start = max_t(u64, page_offset(page), orig_start);
+ *len = min_t(u64, page_offset(page) + PAGE_SIZE,
+ orig_start + orig_len) - *start;
+}
+
+void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const int nbits = (len >> fs_info->sectorsize_bits);
+ int ret;
+
+ btrfs_subpage_assert(fs_info, page, start, len);
+
+ ASSERT(atomic_read(&subpage->readers) == 0);
+ ret = atomic_add_return(nbits, &subpage->writers);
+ ASSERT(ret == nbits);
+}
+
+bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const int nbits = (len >> fs_info->sectorsize_bits);
+
+ btrfs_subpage_assert(fs_info, page, start, len);
+
+ ASSERT(atomic_read(&subpage->writers) >= nbits);
+ return atomic_sub_and_test(nbits, &subpage->writers);
+}
+
+/*
+ * Lock a page for delalloc page writeback.
+ *
+ * Return -EAGAIN if the page is not properly initialized.
+ * Return 0 with the page locked, and writer counter updated.
+ *
+ * Even with 0 returned, the page still need extra check to make sure
+ * it's really the correct page, as the caller is using
+ * find_get_pages_contig(), which can race with page invalidating.
+ */
+int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {
+ lock_page(page);
+ return 0;
+ }
+ lock_page(page);
+ if (!PagePrivate(page) || !page->private) {
+ unlock_page(page);
+ return -EAGAIN;
+ }
+ btrfs_subpage_clamp_range(page, &start, &len);
+ btrfs_subpage_start_writer(fs_info, page, start, len);
+ return 0;
+}
+
+void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)
+ return unlock_page(page);
+ btrfs_subpage_clamp_range(page, &start, &len);
+ if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
unlock_page(page);
}
@@ -354,6 +440,32 @@ void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
spin_unlock_irqrestore(&subpage->lock, flags);
}
+void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
+ unsigned long flags;
+
+ spin_lock_irqsave(&subpage->lock, flags);
+ subpage->ordered_bitmap |= tmp;
+ SetPageOrdered(page);
+ spin_unlock_irqrestore(&subpage->lock, flags);
+}
+
+void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
+ unsigned long flags;
+
+ spin_lock_irqsave(&subpage->lock, flags);
+ subpage->ordered_bitmap &= ~tmp;
+ if (subpage->ordered_bitmap == 0)
+ ClearPageOrdered(page);
+ spin_unlock_irqrestore(&subpage->lock, flags);
+}
/*
* Unlike set/clear which is dependent on each page status, for test all bits
* are tested in the same way.
@@ -376,6 +488,7 @@ IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
+IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
/*
* Note that, in selftests (extent-io-tests), we can have empty fs_info passed
@@ -408,6 +521,34 @@ bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \
return test_page_func(page); \
return btrfs_subpage_test_##name(fs_info, page, start, len); \
+} \
+void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len) \
+{ \
+ if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
+ set_page_func(page); \
+ return; \
+ } \
+ btrfs_subpage_clamp_range(page, &start, &len); \
+ btrfs_subpage_set_##name(fs_info, page, start, len); \
+} \
+void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len) \
+{ \
+ if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
+ clear_page_func(page); \
+ return; \
+ } \
+ btrfs_subpage_clamp_range(page, &start, &len); \
+ btrfs_subpage_clear_##name(fs_info, page, start, len); \
+} \
+bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len) \
+{ \
+ if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \
+ return test_page_func(page); \
+ btrfs_subpage_clamp_range(page, &start, &len); \
+ return btrfs_subpage_test_##name(fs_info, page, start, len); \
}
IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
PageUptodate);
@@ -416,3 +557,5 @@ IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
PageDirty);
IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
PageWriteback);
+IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
+ PageOrdered);
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
index bfd626e955be..4d7aca85d915 100644
--- a/fs/btrfs/subpage.h
+++ b/fs/btrfs/subpage.h
@@ -22,6 +22,14 @@ struct btrfs_subpage {
u16 error_bitmap;
u16 dirty_bitmap;
u16 writeback_bitmap;
+ /*
+ * Both data and metadata needs to track how many readers are for the
+ * page.
+ * Data relies on @readers to unlock the page when last reader finished.
+ * While metadata doesn't need page unlock, it needs to prevent
+ * page::private get cleared before the last end_page_read().
+ */
+ atomic_t readers;
union {
/*
* Structures only used by metadata
@@ -32,7 +40,10 @@ struct btrfs_subpage {
atomic_t eb_refs;
/* Structures only used by data */
struct {
- atomic_t readers;
+ atomic_t writers;
+
+ /* Tracke pending ordered extent in this sector */
+ u16 ordered_bitmap;
};
};
};
@@ -63,6 +74,15 @@ void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len);
+void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len);
+bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len);
+int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len);
+void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len);
+
/*
* Template for subpage related operations.
*
@@ -72,6 +92,10 @@ void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
* btrfs_page_*() are for call sites where the page can either be subpage
* specific or regular page. The function will handle both cases.
* But the range still needs to be inside the page.
+ *
+ * btrfs_page_clamp_*() are similar to btrfs_page_*(), except the range doesn't
+ * need to be inside the page. Those functions will truncate the range
+ * automatically.
*/
#define DECLARE_BTRFS_SUBPAGE_OPS(name) \
void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
@@ -85,12 +109,19 @@ void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \
bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len); \
+void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len); \
+void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len); \
+bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len);
DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
DECLARE_BTRFS_SUBPAGE_OPS(error);
DECLARE_BTRFS_SUBPAGE_OPS(dirty);
DECLARE_BTRFS_SUBPAGE_OPS(writeback);
+DECLARE_BTRFS_SUBPAGE_OPS(ordered);
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4a396c1147f1..d07b18b2b250 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -299,17 +299,6 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info;
WRITE_ONCE(trans->aborted, errno);
- /* Nothing used. The other threads that have joined this
- * transaction may be able to continue. */
- if (!trans->dirty && list_empty(&trans->new_bgs)) {
- const char *errstr;
-
- errstr = btrfs_decode_error(errno);
- btrfs_warn(fs_info,
- "%s:%d: Aborting unused transaction(%s).",
- function, line, errstr);
- return;
- }
WRITE_ONCE(trans->transaction->aborted, errno);
/* Wake up anybody who may be waiting on this transaction */
wake_up(&fs_info->transaction_wait);
@@ -945,8 +934,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_check_integrity_including_extent_data:
btrfs_info(info,
"enabling check integrity including extent data");
- btrfs_set_opt(info->mount_opt,
- CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
+ btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY_DATA);
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
case Opt_check_integrity:
@@ -1527,7 +1515,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (btrfs_test_opt(info, SKIP_BALANCE))
seq_puts(seq, ",skip_balance");
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(info, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
+ if (btrfs_test_opt(info, CHECK_INTEGRITY_DATA))
seq_puts(seq, ",check_int_data");
else if (btrfs_test_opt(info, CHECK_INTEGRITY))
seq_puts(seq, ",check_int");
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 436ac7b4b334..9d1d140118ff 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -429,7 +429,7 @@ static ssize_t btrfs_discard_bitmap_bytes_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
- return scnprintf(buf, PAGE_SIZE, "%lld\n",
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
fs_info->discard_ctl.discard_bitmap_bytes);
}
BTRFS_ATTR(discard, discard_bitmap_bytes, btrfs_discard_bitmap_bytes_show);
@@ -451,7 +451,7 @@ static ssize_t btrfs_discard_extent_bytes_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
- return scnprintf(buf, PAGE_SIZE, "%lld\n",
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
fs_info->discard_ctl.discard_extent_bytes);
}
BTRFS_ATTR(discard, discard_extent_bytes, btrfs_discard_extent_bytes_show);
@@ -665,15 +665,6 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \
} \
BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field)
-static ssize_t btrfs_space_info_show_total_bytes_pinned(struct kobject *kobj,
- struct kobj_attribute *a,
- char *buf)
-{
- struct btrfs_space_info *sinfo = to_space_info(kobj);
- s64 val = percpu_counter_sum(&sinfo->total_bytes_pinned);
- return scnprintf(buf, PAGE_SIZE, "%lld\n", val);
-}
-
SPACE_INFO_ATTR(flags);
SPACE_INFO_ATTR(total_bytes);
SPACE_INFO_ATTR(bytes_used);
@@ -684,8 +675,6 @@ SPACE_INFO_ATTR(bytes_readonly);
SPACE_INFO_ATTR(bytes_zone_unusable);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
-BTRFS_ATTR(space_info, total_bytes_pinned,
- btrfs_space_info_show_total_bytes_pinned);
static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(space_info, flags),
@@ -698,7 +687,6 @@ static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(space_info, bytes_zone_unusable),
BTRFS_ATTR_PTR(space_info, disk_used),
BTRFS_ATTR_PTR(space_info, disk_total),
- BTRFS_ATTR_PTR(space_info, total_bytes_pinned),
NULL,
};
ATTRIBUTE_GROUPS(space_info);
@@ -706,7 +694,6 @@ ATTRIBUTE_GROUPS(space_info);
static void space_info_release(struct kobject *kobj)
{
struct btrfs_space_info *sinfo = to_space_info(kobj);
- percpu_counter_destroy(&sinfo->total_bytes_pinned);
kfree(sinfo);
}
@@ -1455,6 +1442,33 @@ static ssize_t btrfs_devinfo_replace_target_show(struct kobject *kobj,
}
BTRFS_ATTR(devid, replace_target, btrfs_devinfo_replace_target_show);
+static ssize_t btrfs_devinfo_scrub_speed_max_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_device *device = container_of(kobj, struct btrfs_device,
+ devid_kobj);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
+ READ_ONCE(device->scrub_speed_max));
+}
+
+static ssize_t btrfs_devinfo_scrub_speed_max_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_device *device = container_of(kobj, struct btrfs_device,
+ devid_kobj);
+ char *endptr;
+ unsigned long long limit;
+
+ limit = memparse(buf, &endptr);
+ WRITE_ONCE(device->scrub_speed_max, limit);
+ return len;
+}
+BTRFS_ATTR_RW(devid, scrub_speed_max, btrfs_devinfo_scrub_speed_max_show,
+ btrfs_devinfo_scrub_speed_max_store);
+
static ssize_t btrfs_devinfo_writeable_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
@@ -1468,10 +1482,40 @@ static ssize_t btrfs_devinfo_writeable_show(struct kobject *kobj,
}
BTRFS_ATTR(devid, writeable, btrfs_devinfo_writeable_show);
+static ssize_t btrfs_devinfo_error_stats_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_device *device = container_of(kobj, struct btrfs_device,
+ devid_kobj);
+
+ if (!device->dev_stats_valid)
+ return scnprintf(buf, PAGE_SIZE, "invalid\n");
+
+ /*
+ * Print all at once so we get a snapshot of all values from the same
+ * time. Keep them in sync and in order of definition of
+ * btrfs_dev_stat_values.
+ */
+ return scnprintf(buf, PAGE_SIZE,
+ "write_errs %d\n"
+ "read_errs %d\n"
+ "flush_errs %d\n"
+ "corruption_errs %d\n"
+ "generation_errs %d\n",
+ btrfs_dev_stat_read(device, BTRFS_DEV_STAT_WRITE_ERRS),
+ btrfs_dev_stat_read(device, BTRFS_DEV_STAT_READ_ERRS),
+ btrfs_dev_stat_read(device, BTRFS_DEV_STAT_FLUSH_ERRS),
+ btrfs_dev_stat_read(device, BTRFS_DEV_STAT_CORRUPTION_ERRS),
+ btrfs_dev_stat_read(device, BTRFS_DEV_STAT_GENERATION_ERRS));
+}
+BTRFS_ATTR(devid, error_stats, btrfs_devinfo_error_stats_show);
+
static struct attribute *devid_attrs[] = {
+ BTRFS_ATTR_PTR(devid, error_stats),
BTRFS_ATTR_PTR(devid, in_fs_metadata),
BTRFS_ATTR_PTR(devid, missing),
BTRFS_ATTR_PTR(devid, replace_target),
+ BTRFS_ATTR_PTR(devid, scrub_speed_max),
BTRFS_ATTR_PTR(devid, writeable),
NULL
};
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index c0aefe6dee0b..319fed82d741 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -557,7 +557,7 @@ int btrfs_test_extent_map(void)
{
/*
* Test a chunk with 2 data stripes one of which
- * interesects the physical address of the super block
+ * intersects the physical address of the super block
* is correctly recognised.
*/
.raid_type = BTRFS_BLOCK_GROUP_RAID1,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f75de9f6c0ad..50318231c1a8 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -583,9 +583,6 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
bool do_chunk_alloc = false;
int ret;
- /* Send isn't supposed to start transactions. */
- ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
-
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return ERR_PTR(-EROFS);
@@ -1406,8 +1403,10 @@ int btrfs_defrag_root(struct btrfs_root *root)
while (1) {
trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
ret = btrfs_defrag_leaves(trans, root);
@@ -1476,7 +1475,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
if (ret) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ return ret;
}
/*
@@ -1869,31 +1868,6 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
}
/*
- * wait for the current transaction commit to start and block subsequent
- * transaction joins
- */
-static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
- struct btrfs_transaction *trans)
-{
- wait_event(fs_info->transaction_blocked_wait,
- trans->state >= TRANS_STATE_COMMIT_START ||
- TRANS_ABORTED(trans));
-}
-
-/*
- * wait for the current transaction to start and then become unblocked.
- * caller holds ref.
- */
-static void wait_current_trans_commit_start_and_unblock(
- struct btrfs_fs_info *fs_info,
- struct btrfs_transaction *trans)
-{
- wait_event(fs_info->transaction_wait,
- trans->state >= TRANS_STATE_UNBLOCKED ||
- TRANS_ABORTED(trans));
-}
-
-/*
* commit transactions asynchronously. once btrfs_commit_transaction_async
* returns, any subsequent transaction will not be allowed to join.
*/
@@ -1920,8 +1894,7 @@ static void do_async_commit(struct work_struct *work)
kfree(ac);
}
-int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
- int wait_for_unblock)
+int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_async_commit *ac;
@@ -1953,13 +1926,13 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
__sb_writers_release(fs_info->sb, SB_FREEZE_FS);
schedule_work(&ac->work);
-
- /* wait for transaction to start and unblock */
- if (wait_for_unblock)
- wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
- else
- wait_current_trans_commit_start(fs_info, cur_trans);
-
+ /*
+ * Wait for the current transaction commit to start and block
+ * subsequent transaction joins
+ */
+ wait_event(fs_info->transaction_blocked_wait,
+ cur_trans->state >= TRANS_STATE_COMMIT_START ||
+ TRANS_ABORTED(cur_trans));
if (current->journal_info == trans)
current->journal_info = NULL;
@@ -2074,14 +2047,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
ASSERT(refcount_read(&trans->use_count) == 1);
- /*
- * Some places just start a transaction to commit it. We need to make
- * sure that if this commit fails that the abort code actually marks the
- * transaction as failed, so set trans->dirty to make the abort code do
- * the right thing.
- */
- trans->dirty = true;
-
/* Stop the commit early if ->aborted is set */
if (TRANS_ABORTED(cur_trans)) {
ret = cur_trans->aborted;
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 364cfbb4c5c5..07d76029f598 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -122,8 +122,6 @@ struct btrfs_transaction {
#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
-#define BTRFS_SEND_TRANS_STUB ((void *)1)
-
struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
@@ -143,7 +141,6 @@ struct btrfs_trans_handle {
bool allocating_chunk;
bool can_flush_pending_bgs;
bool reloc_reserved;
- bool dirty;
bool in_fsync;
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
@@ -227,8 +224,7 @@ void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
-int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
- int wait_for_unblock);
+int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
void btrfs_throttle(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 326be57f2828..cab451d19547 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ if (ret)
+ goto out;
}
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (nlink != inode->i_nlink) {
set_nlink(inode, nlink);
- btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ if (ret)
+ goto out;
}
BTRFS_I(inode)->index_cnt = (u64)-1;
@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
break;
if (ret == 1) {
+ ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
ret = btrfs_del_item(trans, root, path);
if (ret)
- goto out;
+ break;
btrfs_release_path(path);
inode = read_one_inode(root, key.offset);
- if (!inode)
- return -EIO;
+ if (!inode) {
+ ret = -EIO;
+ break;
+ }
ret = fixup_inode_link_count(trans, root, inode);
iput(inode);
if (ret)
- goto out;
+ break;
/*
* fixup on a directory may create new entries,
@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
*/
key.offset = (u64)-1;
}
- ret = 0;
-out:
btrfs_release_path(path);
return ret;
}
@@ -3297,6 +3302,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* begins and releases it only after writing its superblock.
*/
mutex_lock(&fs_info->tree_log_mutex);
+
+ /*
+ * The previous transaction writeout phase could have failed, and thus
+ * marked the fs in an error state. We must not commit here, as we
+ * could have updated our generation in the super_for_commit and
+ * writing the super here would result in transid mismatches. If there
+ * is an error here just bail.
+ */
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+ ret = -EIO;
+ btrfs_set_log_full_commit(trans);
+ btrfs_abort_transaction(trans, ret);
+ mutex_unlock(&fs_info->tree_log_mutex);
+ goto out_wake_log_root;
+ }
+
btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
ret = write_all_supers(fs_info, 1);
@@ -4447,7 +4468,8 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
ret = btrfs_truncate_inode_items(trans,
root->log_root,
inode, truncate_offset,
- BTRFS_EXTENT_DATA_KEY);
+ BTRFS_EXTENT_DATA_KEY,
+ NULL);
} while (ret == -EAGAIN);
if (ret)
goto out;
@@ -5395,7 +5417,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
&inode->runtime_flags);
while(1) {
ret = btrfs_truncate_inode_items(trans,
- log, inode, 0, 0);
+ log, inode, 0, 0, NULL);
if (ret != -EAGAIN)
break;
}
@@ -5445,13 +5467,23 @@ log_extents:
btrfs_release_path(dst_path);
if (need_log_inode_item) {
err = log_inode_item(trans, log, dst_path, inode);
- if (!err && !xattrs_logged) {
+ if (err)
+ goto out_unlock;
+ /*
+ * If we are doing a fast fsync and the inode was logged before
+ * in this transaction, we don't need to log the xattrs because
+ * they were logged before. If xattrs were added, changed or
+ * deleted since the last time we logged the inode, then we have
+ * already logged them because the inode had the runtime flag
+ * BTRFS_INODE_COPY_EVERYTHING set.
+ */
+ if (!xattrs_logged && inode->logged_trans < trans->transid) {
err = btrfs_log_all_xattrs(trans, root, inode, path,
dst_path);
+ if (err)
+ goto out_unlock;
btrfs_release_path(path);
}
- if (err)
- goto out_unlock;
}
if (fast_search) {
ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
@@ -6350,6 +6382,7 @@ next:
error:
if (wc.trans)
btrfs_end_transaction(wc.trans);
+ clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 47d27059d064..807502cd6510 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -717,7 +717,7 @@ static struct btrfs_fs_devices *find_fsid_changed(
/*
* Handles the case where scanned device is part of an fs that had
- * multiple successful changes of FSID but curently device didn't
+ * multiple successful changes of FSID but currently device didn't
* observe it. Meaning our fsid will be different than theirs. We need
* to handle two subcases :
* 1 - The fs still continues to have different METADATA/FSID uuids.
@@ -1247,7 +1247,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
lockdep_assert_held(&uuid_mutex);
/*
* The device_list_mutex cannot be taken here in case opening the
- * underlying device takes further locks like bd_mutex.
+ * underlying device takes further locks like open_mutex.
*
* We also don't need the lock here as this is called during mount and
* exclusion is provided by uuid_mutex
@@ -1550,7 +1550,7 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
* check to ensure dev extents are not double allocated.
* This makes the function safe to allocate dev extents but may not report
* correct usable device space, as device extent freed in current transaction
- * is not reported as avaiable.
+ * is not reported as available.
*/
static int find_free_dev_extent_start(struct btrfs_device *device,
u64 num_bytes, u64 search_start, u64 *start,
@@ -4217,14 +4217,6 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
btrfs_bg_type_to_raid_name(data_target));
}
- if (fs_info->send_in_progress) {
- btrfs_warn_rl(fs_info,
-"cannot run balance while send operations are in progress (%d in progress)",
- fs_info->send_in_progress);
- ret = -EAGAIN;
- goto out;
- }
-
ret = insert_balance_item(fs_info, bctl);
if (ret && ret != -EEXIST)
goto out;
@@ -6127,17 +6119,17 @@ static bool need_full_stripe(enum btrfs_map_op op)
* @em: mapping containing the logical extent
* @op: type of operation - write or read
* @logical: address that we want to figure out the geometry of
- * @len: the length of IO we are going to perform, starting at @logical
* @io_geom: pointer used to return values
*
* Returns < 0 in case a chunk for the given logical address cannot be found,
* usually shouldn't happen unless @logical is corrupted, 0 otherwise.
*/
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
- enum btrfs_map_op op, u64 logical, u64 len,
+ enum btrfs_map_op op, u64 logical,
struct btrfs_io_geometry *io_geom)
{
struct map_lookup *map;
+ u64 len;
u64 offset;
u64 stripe_offset;
u64 stripe_nr;
@@ -6152,7 +6144,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
offset = logical - em->start;
/* Len of a stripe in a chunk */
stripe_len = map->stripe_len;
- /* Stripe wher this block falls in */
+ /* Stripe where this block falls in */
stripe_nr = div64_u64(offset, stripe_len);
/* Offset of stripe in the chunk */
stripe_offset = stripe_nr * stripe_len;
@@ -6243,7 +6235,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
em = btrfs_get_chunk_map(fs_info, logical, *length);
ASSERT(!IS_ERR(em));
- ret = btrfs_get_io_geometry(fs_info, em, op, logical, *length, &geom);
+ ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
if (ret < 0)
return ret;
@@ -6670,8 +6662,6 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
*
* If devid and uuid are both specified, the match must be exact, otherwise
* only devid is used.
- *
- * If @seed is true, traverse through the seed devices.
*/
struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *uuid, u8 *fsid)
@@ -7865,7 +7855,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
ret = -EUCLEAN;
}
- /* Make sure no dev extent is beyond device bondary */
+ /* Make sure no dev extent is beyond device boundary */
dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
if (!dev) {
btrfs_err(fs_info, "failed to find devid %llu", devid);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 9c0d84e5ec06..c7fc7caf575c 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -143,6 +143,9 @@ struct btrfs_device {
struct completion kobj_unregister;
/* For sysfs/FSID/devinfo/devid/ */
struct kobject devid_kobj;
+
+ /* Bandwidth limit for scrub, in bytes */
+ u64 scrub_speed_max;
};
/*
@@ -443,7 +446,7 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret);
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
- enum btrfs_map_op op, u64 logical, u64 len,
+ enum btrfs_map_op op, u64 logical,
struct btrfs_io_geometry *io_geom);
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 1bb8ee97aae0..297c0b1c0634 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -81,7 +81,7 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
* *: Special case, no superblock is written
* 0: Use write pointer of zones[0]
* 1: Use write pointer of zones[1]
- * C: Compare super blcoks from zones[0] and zones[1], use the latest
+ * C: Compare super blocks from zones[0] and zones[1], use the latest
* one determined by generation
* x: Invalid state
*/
@@ -150,6 +150,18 @@ static inline u32 sb_zone_number(int shift, int mirror)
return (u32)zone;
}
+static inline sector_t zone_start_sector(u32 zone_number,
+ struct block_device *bdev)
+{
+ return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
+}
+
+static inline u64 zone_start_physical(u32 zone_number,
+ struct btrfs_zoned_device_info *zone_info)
+{
+ return (u64)zone_number << zone_info->zone_size_shift;
+}
+
/*
* Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
* device into static sized chunks and fake a conventional zone on each of
@@ -405,8 +417,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
if (sb_zone + 1 >= zone_info->nr_zones)
continue;
- sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
- ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
+ ret = btrfs_get_dev_zones(device,
+ zone_start_physical(sb_zone, zone_info),
&zone_info->sb_zones[sb_pos],
&nr_zones);
if (ret)
@@ -421,7 +433,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
}
/*
- * If zones[0] is conventional, always use the beggining of the
+ * If zones[0] is conventional, always use the beginning of the
* zone to record superblock. No need to validate in that case.
*/
if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
@@ -721,7 +733,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
if (sb_zone + 1 >= nr_zones)
return -ENOENT;
- ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
+ ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
zones);
if (ret < 0)
@@ -826,7 +838,7 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
return -ENOENT;
return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
- sb_zone << zone_sectors_shift,
+ zone_start_sector(sb_zone, bdev),
zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
}
@@ -878,7 +890,8 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
if (!(end <= sb_zone ||
sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
have_sb = true;
- pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
+ pos = zone_start_physical(
+ sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
break;
}
@@ -1127,6 +1140,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ btrfs_err_in_rcu(fs_info,
+ "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
+ zone.start << SECTOR_SHIFT,
+ rcu_str_deref(device->name), device->devid);
ret = -EIO;
goto out;
}
@@ -1187,6 +1204,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
case 0: /* single */
+ if (alloc_offsets[0] == WP_MISSING_DEV) {
+ btrfs_err(fs_info,
+ "zoned: cannot recover write pointer for zone %llu",
+ physical);
+ ret = -EIO;
+ goto out;
+ }
cache->alloc_offset = alloc_offsets[0];
break;
case BTRFS_BLOCK_GROUP_DUP:
@@ -1204,6 +1228,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
out:
+ if (cache->alloc_offset > fs_info->zone_size) {
+ btrfs_err(fs_info,
+ "zoned: invalid write pointer %llu in block group %llu",
+ cache->alloc_offset, cache->start);
+ ret = -EIO;
+ }
+
/* An extent is allocated after the write pointer */
if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
btrfs_err(fs_info,
@@ -1502,3 +1533,24 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
length = wp - physical_pos;
return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
}
+
+struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
+{
+ struct btrfs_device *device;
+ struct extent_map *em;
+ struct map_lookup *map;
+
+ em = btrfs_get_chunk_map(fs_info, logical, length);
+ if (IS_ERR(em))
+ return ERR_CAST(em);
+
+ map = em->map_lookup;
+ /* We only support single profile for now */
+ ASSERT(map->num_stripes == 1);
+ device = map->stripes[0].dev;
+
+ free_extent_map(em);
+
+ return device;
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index e55d32595c2c..b0ae2608cb6b 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -65,6 +65,8 @@ void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
u64 physical_start, u64 physical_pos);
+struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone)
@@ -191,6 +193,13 @@ static inline int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev,
return -EOPNOTSUPP;
}
+static inline struct btrfs_device *btrfs_zoned_get_device(
+ struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
diff --git a/fs/buffer.c b/fs/buffer.c
index ea48c01fb76b..6290c3afdba4 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -589,31 +589,6 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
EXPORT_SYMBOL(mark_buffer_dirty_inode);
/*
- * Mark the page dirty, and set it dirty in the page cache, and mark the inode
- * dirty.
- *
- * If warn is true, then emit a warning if the page is not uptodate and has
- * not been truncated.
- *
- * The caller must hold lock_page_memcg().
- */
-void __set_page_dirty(struct page *page, struct address_space *mapping,
- int warn)
-{
- unsigned long flags;
-
- xa_lock_irqsave(&mapping->i_pages, flags);
- if (page->mapping) { /* Race with truncate? */
- WARN_ON_ONCE(warn && !PageUptodate(page));
- account_page_dirtied(page, mapping);
- __xa_set_mark(&mapping->i_pages, page_index(page),
- PAGECACHE_TAG_DIRTY);
- }
- xa_unlock_irqrestore(&mapping->i_pages, flags);
-}
-EXPORT_SYMBOL_GPL(__set_page_dirty);
-
-/*
* Add a page to the dirty page list.
*
* It is a sad fact of life that this function is called from several places
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 5624fae7a603..9ba79b6531fb 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -668,14 +668,13 @@ out:
* Handle lookups for the hidden .snap directory.
*/
struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
- struct dentry *dentry, int err)
+ struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
/* .snap dir? */
- if (err == -ENOENT &&
- ceph_snap(parent) == CEPH_NOSNAP &&
+ if (ceph_snap(parent) == CEPH_NOSNAP &&
strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) {
struct dentry *res;
struct inode *inode = ceph_get_snapdir(parent);
@@ -742,7 +741,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req;
- struct dentry *res;
int op;
int mask;
int err;
@@ -793,12 +791,16 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
req->r_parent = dir;
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
err = ceph_mdsc_do_request(mdsc, NULL, req);
- res = ceph_handle_snapdir(req, dentry, err);
- if (IS_ERR(res)) {
- err = PTR_ERR(res);
- } else {
- dentry = res;
- err = 0;
+ if (err == -ENOENT) {
+ struct dentry *res;
+
+ res = ceph_handle_snapdir(req, dentry);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ } else {
+ dentry = res;
+ err = 0;
+ }
}
dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 77fc037d5beb..d51af3698032 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -578,6 +578,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_inode_info *ci = ceph_inode(dir);
struct inode *inode;
struct timespec64 now;
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
@@ -615,8 +616,10 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
ceph_file_layout_to_legacy(lo, &in.layout);
+ down_read(&mdsc->snap_rwsem);
ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
req->r_fmode, NULL);
+ up_read(&mdsc->snap_rwsem);
if (ret) {
dout("%s failed to fill inode: %d\n", __func__, ret);
ceph_dir_clear_complete(dir);
@@ -739,14 +742,16 @@ retry:
err = ceph_mdsc_do_request(mdsc,
(flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
req);
- dentry = ceph_handle_snapdir(req, dentry, err);
- if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
- goto out_req;
+ if (err == -ENOENT) {
+ dentry = ceph_handle_snapdir(req, dentry);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out_req;
+ }
+ err = 0;
}
- err = 0;
- if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
+ if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
if (d_in_lookup(dentry)) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e1c63adb196d..df0c8a724609 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -777,6 +777,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
umode_t mode = le32_to_cpu(info->mode);
dev_t rdev = le32_to_cpu(info->rdev);
+ lockdep_assert_held(&mdsc->snap_rwsem);
+
dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
inode, ceph_vinop(inode), le64_to_cpu(info->version),
ci->i_version);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index db80d89556b1..839e6b0239ee 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1218,7 +1218,7 @@ extern const struct dentry_operations ceph_dentry_ops;
extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
- struct dentry *dentry, int err);
+ struct dentry *dentry);
extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index bf52e9326ebe..7364950a9ef4 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -19,6 +19,8 @@ config CIFS
select CRYPTO_LIB_DES
select KEYS
select DNS_RESOLVER
+ select ASN1
+ select OID_REGISTRY
help
This is the client VFS module for the SMB3 family of NAS protocols,
(including support for the most recent, most secure dialect SMB3.1.1)
@@ -57,6 +59,7 @@ config CIFS
config CIFS_STATS2
bool "Extended statistics"
depends on CIFS
+ default y
help
Enabling this option will allow more detailed statistics on SMB
request timing to be displayed in /proc/fs/cifs/DebugData and also
@@ -65,8 +68,7 @@ config CIFS_STATS2
for more details. These additional statistics may have a minor effect
on performance and memory utilization.
- Unless you are a developer or are doing network performance analysis
- or tuning, say N.
+ If unsure, say Y.
config CIFS_ALLOW_INSECURE_LEGACY
bool "Support legacy servers which use less secure dialects"
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 3ee3b7de4ded..87fcacdf3de7 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -6,12 +6,16 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_CIFS) += cifs.o
cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
- inode.o link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \
+ inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \
cifs_unicode.o nterr.o cifsencrypt.o \
readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \
smb2ops.o smb2maperror.o smb2transport.o \
smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
- dns_resolve.o
+ dns_resolve.o cifs_spnego_negtokeninit.asn1.o asn1.o
+
+$(obj)/asn1.o: $(obj)/cifs_spnego_negtokeninit.asn1.h
+
+$(obj)/cifs_spnego_negtokeninit.asn1.o: $(obj)/cifs_spnego_negtokeninit.asn1.c $(obj)/cifs_spnego_negtokeninit.asn1.h
cifs-$(CONFIG_CIFS_XATTR) += xattr.o
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 3150c19cdc2f..b5724ef9f182 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -1,612 +1,63 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
- * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
- *
- * Copyright (c) 2000 RP Internet (www.rpi.net.au).
- */
#include <linux/module.h>
-#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include "cifspdu.h"
+#include <linux/oid_registry.h>
#include "cifsglob.h"
#include "cifs_debug.h"
#include "cifsproto.h"
+#include "cifs_spnego_negtokeninit.asn1.h"
-/*****************************************************************************
- *
- * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse)
- *
- *****************************************************************************/
-
-/* Class */
-#define ASN1_UNI 0 /* Universal */
-#define ASN1_APL 1 /* Application */
-#define ASN1_CTX 2 /* Context */
-#define ASN1_PRV 3 /* Private */
-
-/* Tag */
-#define ASN1_EOC 0 /* End Of Contents or N/A */
-#define ASN1_BOL 1 /* Boolean */
-#define ASN1_INT 2 /* Integer */
-#define ASN1_BTS 3 /* Bit String */
-#define ASN1_OTS 4 /* Octet String */
-#define ASN1_NUL 5 /* Null */
-#define ASN1_OJI 6 /* Object Identifier */
-#define ASN1_OJD 7 /* Object Description */
-#define ASN1_EXT 8 /* External */
-#define ASN1_ENUM 10 /* Enumerated */
-#define ASN1_SEQ 16 /* Sequence */
-#define ASN1_SET 17 /* Set */
-#define ASN1_NUMSTR 18 /* Numerical String */
-#define ASN1_PRNSTR 19 /* Printable String */
-#define ASN1_TEXSTR 20 /* Teletext String */
-#define ASN1_VIDSTR 21 /* Video String */
-#define ASN1_IA5STR 22 /* IA5 String */
-#define ASN1_UNITIM 23 /* Universal Time */
-#define ASN1_GENTIM 24 /* General Time */
-#define ASN1_GRASTR 25 /* Graphical String */
-#define ASN1_VISSTR 26 /* Visible String */
-#define ASN1_GENSTR 27 /* General String */
-
-/* Primitive / Constructed methods*/
-#define ASN1_PRI 0 /* Primitive */
-#define ASN1_CON 1 /* Constructed */
-
-/*
- * Error codes.
- */
-#define ASN1_ERR_NOERROR 0
-#define ASN1_ERR_DEC_EMPTY 2
-#define ASN1_ERR_DEC_EOC_MISMATCH 3
-#define ASN1_ERR_DEC_LENGTH_MISMATCH 4
-#define ASN1_ERR_DEC_BADVALUE 5
-
-#define SPNEGO_OID_LEN 7
-#define NTLMSSP_OID_LEN 10
-#define KRB5_OID_LEN 7
-#define KRB5U2U_OID_LEN 8
-#define MSKRB5_OID_LEN 7
-static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
-static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
-static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 };
-static unsigned long KRB5U2U_OID[8] = { 1, 2, 840, 113554, 1, 2, 2, 3 };
-static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 };
-
-/*
- * ASN.1 context.
- */
-struct asn1_ctx {
- int error; /* Error condition */
- unsigned char *pointer; /* Octet just to be decoded */
- unsigned char *begin; /* First octet */
- unsigned char *end; /* Octet after last octet */
-};
-
-/*
- * Octet string (not null terminated)
- */
-struct asn1_octstr {
- unsigned char *data;
- unsigned int len;
-};
-
-static void
-asn1_open(struct asn1_ctx *ctx, unsigned char *buf, unsigned int len)
-{
- ctx->begin = buf;
- ctx->end = buf + len;
- ctx->pointer = buf;
- ctx->error = ASN1_ERR_NOERROR;
-}
-
-static unsigned char
-asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
-{
- if (ctx->pointer >= ctx->end) {
- ctx->error = ASN1_ERR_DEC_EMPTY;
- return 0;
- }
- *ch = *(ctx->pointer)++;
- return 1;
-}
-
-#if 0 /* will be needed later by spnego decoding/encoding of ntlmssp */
-static unsigned char
-asn1_enum_decode(struct asn1_ctx *ctx, __le32 *val)
-{
- unsigned char ch;
-
- if (ctx->pointer >= ctx->end) {
- ctx->error = ASN1_ERR_DEC_EMPTY;
- return 0;
- }
-
- ch = *(ctx->pointer)++; /* ch has 0xa, ptr points to length octet */
- if ((ch) == ASN1_ENUM) /* if ch value is ENUM, 0xa */
- *val = *(++(ctx->pointer)); /* value has enum value */
- else
- return 0;
-
- ctx->pointer++;
- return 1;
-}
-#endif
-
-static unsigned char
-asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
-{
- unsigned char ch;
-
- *tag = 0;
-
- do {
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
- *tag <<= 7;
- *tag |= ch & 0x7F;
- } while ((ch & 0x80) == 0x80);
- return 1;
-}
-
-static unsigned char
-asn1_id_decode(struct asn1_ctx *ctx,
- unsigned int *cls, unsigned int *con, unsigned int *tag)
-{
- unsigned char ch;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *cls = (ch & 0xC0) >> 6;
- *con = (ch & 0x20) >> 5;
- *tag = (ch & 0x1F);
-
- if (*tag == 0x1F) {
- if (!asn1_tag_decode(ctx, tag))
- return 0;
- }
- return 1;
-}
-
-static unsigned char
-asn1_length_decode(struct asn1_ctx *ctx, unsigned int *def, unsigned int *len)
-{
- unsigned char ch, cnt;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- if (ch == 0x80)
- *def = 0;
- else {
- *def = 1;
-
- if (ch < 0x80)
- *len = ch;
- else {
- cnt = (unsigned char) (ch & 0x7F);
- *len = 0;
-
- while (cnt > 0) {
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
- *len <<= 8;
- *len |= ch;
- cnt--;
- }
- }
- }
-
- /* don't trust len bigger than ctx buffer */
- if (*len > ctx->end - ctx->pointer)
- return 0;
-
- return 1;
-}
-
-static unsigned char
-asn1_header_decode(struct asn1_ctx *ctx,
- unsigned char **eoc,
- unsigned int *cls, unsigned int *con, unsigned int *tag)
-{
- unsigned int def = 0;
- unsigned int len = 0;
-
- if (!asn1_id_decode(ctx, cls, con, tag))
- return 0;
-
- if (!asn1_length_decode(ctx, &def, &len))
- return 0;
-
- /* primitive shall be definite, indefinite shall be constructed */
- if (*con == ASN1_PRI && !def)
- return 0;
-
- if (def)
- *eoc = ctx->pointer + len;
- else
- *eoc = NULL;
- return 1;
-}
-
-static unsigned char
-asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
+int
+decode_negTokenInit(unsigned char *security_blob, int length,
+ struct TCP_Server_Info *server)
{
- unsigned char ch;
-
- if (eoc == NULL) {
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- if (ch != 0x00) {
- ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
- return 0;
- }
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- if (ch != 0x00) {
- ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
- return 0;
- }
- return 1;
- } else {
- if (ctx->pointer != eoc) {
- ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH;
- return 0;
- }
+ if (asn1_ber_decoder(&cifs_spnego_negtokeninit_decoder, server,
+ security_blob, length) == 0)
return 1;
- }
-}
-
-/* static unsigned char asn1_null_decode(struct asn1_ctx *ctx,
- unsigned char *eoc)
-{
- ctx->pointer = eoc;
- return 1;
-}
-
-static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
- unsigned char *eoc, long *integer)
-{
- unsigned char ch;
- unsigned int len;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer = (signed char) ch;
- len = 1;
-
- while (ctx->pointer < eoc) {
- if (++len > sizeof(long)) {
- ctx->error = ASN1_ERR_DEC_BADVALUE;
- return 0;
- }
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer <<= 8;
- *integer |= ch;
- }
- return 1;
-}
-
-static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
- unsigned char *eoc,
- unsigned int *integer)
-{
- unsigned char ch;
- unsigned int len;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer = ch;
- if (ch == 0)
- len = 0;
else
- len = 1;
-
- while (ctx->pointer < eoc) {
- if (++len > sizeof(unsigned int)) {
- ctx->error = ASN1_ERR_DEC_BADVALUE;
- return 0;
- }
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer <<= 8;
- *integer |= ch;
- }
- return 1;
-}
-
-static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
- unsigned char *eoc,
- unsigned long *integer)
-{
- unsigned char ch;
- unsigned int len;
-
- if (!asn1_octet_decode(ctx, &ch))
return 0;
-
- *integer = ch;
- if (ch == 0)
- len = 0;
- else
- len = 1;
-
- while (ctx->pointer < eoc) {
- if (++len > sizeof(unsigned long)) {
- ctx->error = ASN1_ERR_DEC_BADVALUE;
- return 0;
- }
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer <<= 8;
- *integer |= ch;
- }
- return 1;
}
-static unsigned char
-asn1_octets_decode(struct asn1_ctx *ctx,
- unsigned char *eoc,
- unsigned char **octets, unsigned int *len)
+int cifs_gssapi_this_mech(void *context, size_t hdrlen,
+ unsigned char tag, const void *value, size_t vlen)
{
- unsigned char *ptr;
-
- *len = 0;
-
- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
- if (*octets == NULL) {
- return 0;
- }
-
- ptr = *octets;
- while (ctx->pointer < eoc) {
- if (!asn1_octet_decode(ctx, (unsigned char *) ptr++)) {
- kfree(*octets);
- *octets = NULL;
- return 0;
- }
- (*len)++;
- }
- return 1;
-} */
-
-static unsigned char
-asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
-{
- unsigned char ch;
-
- *subid = 0;
-
- do {
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *subid <<= 7;
- *subid |= ch & 0x7F;
- } while ((ch & 0x80) == 0x80);
- return 1;
-}
-
-static int
-asn1_oid_decode(struct asn1_ctx *ctx,
- unsigned char *eoc, unsigned long **oid, unsigned int *len)
-{
- unsigned long subid;
- unsigned int size;
- unsigned long *optr;
-
- size = eoc - ctx->pointer + 1;
-
- /* first subid actually encodes first two subids */
- if (size < 2 || size > UINT_MAX/sizeof(unsigned long))
- return 0;
-
- *oid = kmalloc_array(size, sizeof(unsigned long), GFP_ATOMIC);
- if (*oid == NULL)
- return 0;
-
- optr = *oid;
-
- if (!asn1_subid_decode(ctx, &subid)) {
- kfree(*oid);
- *oid = NULL;
- return 0;
- }
-
- if (subid < 40) {
- optr[0] = 0;
- optr[1] = subid;
- } else if (subid < 80) {
- optr[0] = 1;
- optr[1] = subid - 40;
- } else {
- optr[0] = 2;
- optr[1] = subid - 80;
- }
-
- *len = 2;
- optr += 2;
+ enum OID oid;
- while (ctx->pointer < eoc) {
- if (++(*len) > size) {
- ctx->error = ASN1_ERR_DEC_BADVALUE;
- kfree(*oid);
- *oid = NULL;
- return 0;
- }
+ oid = look_up_OID(value, vlen);
+ if (oid != OID_spnego) {
+ char buf[50];
- if (!asn1_subid_decode(ctx, optr++)) {
- kfree(*oid);
- *oid = NULL;
- return 0;
- }
+ sprint_oid(value, vlen, buf, sizeof(buf));
+ cifs_dbg(FYI, "Error decoding negTokenInit header: unexpected OID %s\n",
+ buf);
+ return -EBADMSG;
}
- return 1;
+ return 0;
}
-static int
-compare_oid(unsigned long *oid1, unsigned int oid1len,
- unsigned long *oid2, unsigned int oid2len)
+int cifs_neg_token_init_mech_type(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
{
- unsigned int i;
+ struct TCP_Server_Info *server = context;
+ enum OID oid;
- if (oid1len != oid2len)
- return 0;
+ oid = look_up_OID(value, vlen);
+ if (oid == OID_mskrb5)
+ server->sec_mskerberos = true;
+ else if (oid == OID_krb5u2u)
+ server->sec_kerberosu2u = true;
+ else if (oid == OID_krb5)
+ server->sec_kerberos = true;
+ else if (oid == OID_ntlmssp)
+ server->sec_ntlmssp = true;
else {
- for (i = 0; i < oid1len; i++) {
- if (oid1[i] != oid2[i])
- return 0;
- }
- return 1;
- }
-}
-
- /* BB check for endian conversion issues here */
-
-int
-decode_negTokenInit(unsigned char *security_blob, int length,
- struct TCP_Server_Info *server)
-{
- struct asn1_ctx ctx;
- unsigned char *end;
- unsigned char *sequence_end;
- unsigned long *oid = NULL;
- unsigned int cls, con, tag, oidlen, rc;
-
- /* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
-
- asn1_open(&ctx, security_blob, length);
+ char buf[50];
- /* GSSAPI header */
- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cifs_dbg(FYI, "Error decoding negTokenInit header\n");
- return 0;
- } else if ((cls != ASN1_APL) || (con != ASN1_CON)
- || (tag != ASN1_EOC)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d\n", cls, con, tag);
- return 0;
+ sprint_oid(value, vlen, buf, sizeof(buf));
+ cifs_dbg(FYI, "Decoding negTokenInit: unsupported OID %s\n",
+ buf);
}
-
- /* Check for SPNEGO OID -- remember to free obj->oid */
- rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
- if (rc) {
- if ((tag == ASN1_OJI) && (con == ASN1_PRI) &&
- (cls == ASN1_UNI)) {
- rc = asn1_oid_decode(&ctx, end, &oid, &oidlen);
- if (rc) {
- rc = compare_oid(oid, oidlen, SPNEGO_OID,
- SPNEGO_OID_LEN);
- kfree(oid);
- }
- } else
- rc = 0;
- }
-
- /* SPNEGO OID not present or garbled -- bail out */
- if (!rc) {
- cifs_dbg(FYI, "Error decoding negTokenInit header\n");
- return 0;
- }
-
- /* SPNEGO */
- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cifs_dbg(FYI, "Error decoding negTokenInit\n");
- return 0;
- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
- || (tag != ASN1_EOC)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
- cls, con, tag, end);
- return 0;
- }
-
- /* negTokenInit */
- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cifs_dbg(FYI, "Error decoding negTokenInit\n");
- return 0;
- } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
- || (tag != ASN1_SEQ)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n",
- cls, con, tag, end);
- return 0;
- }
-
- /* sequence */
- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cifs_dbg(FYI, "Error decoding 2nd part of negTokenInit\n");
- return 0;
- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
- || (tag != ASN1_EOC)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
- cls, con, tag, end);
- return 0;
- }
-
- /* sequence of */
- if (asn1_header_decode
- (&ctx, &sequence_end, &cls, &con, &tag) == 0) {
- cifs_dbg(FYI, "Error decoding 2nd part of negTokenInit\n");
- return 0;
- } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
- || (tag != ASN1_SEQ)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n",
- cls, con, tag, sequence_end);
- return 0;
- }
-
- /* list of security mechanisms */
- while (!asn1_eoc_decode(&ctx, sequence_end)) {
- rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
- if (!rc) {
- cifs_dbg(FYI, "Error decoding negTokenInit hdr exit2\n");
- return 0;
- }
- if ((tag == ASN1_OJI) && (con == ASN1_PRI)) {
- if (asn1_oid_decode(&ctx, end, &oid, &oidlen)) {
-
- cifs_dbg(FYI, "OID len = %d oid = 0x%lx 0x%lx 0x%lx 0x%lx\n",
- oidlen, *oid, *(oid + 1), *(oid + 2),
- *(oid + 3));
-
- if (compare_oid(oid, oidlen, MSKRB5_OID,
- MSKRB5_OID_LEN))
- server->sec_mskerberos = true;
- else if (compare_oid(oid, oidlen, KRB5U2U_OID,
- KRB5U2U_OID_LEN))
- server->sec_kerberosu2u = true;
- else if (compare_oid(oid, oidlen, KRB5_OID,
- KRB5_OID_LEN))
- server->sec_kerberos = true;
- else if (compare_oid(oid, oidlen, NTLMSSP_OID,
- NTLMSSP_OID_LEN))
- server->sec_ntlmssp = true;
-
- kfree(oid);
- }
- } else {
- cifs_dbg(FYI, "Should be an oid what is going on?\n");
- }
- }
-
- /*
- * We currently ignore anything at the end of the SPNEGO blob after
- * the mechTypes have been parsed, since none of that info is
- * used at the moment.
- */
- return 1;
+ return 0;
}
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 488fe0ffc1ef..8a3b30ec860c 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/cache.c - CIFS filesystem cache index structure definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "fscache.h"
#include "cifs_debug.h"
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 68e8e5b27841..8857ac7e7a14 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -50,7 +50,6 @@ void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
void cifs_dump_mids(struct TCP_Server_Info *server)
{
#ifdef CONFIG_CIFS_DEBUG2
- struct list_head *tmp;
struct mid_q_entry *mid_entry;
if (server == NULL)
@@ -58,8 +57,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
cifs_dbg(VFS, "Dump pending requests:\n");
spin_lock(&GlobalMid_Lock);
- list_for_each(tmp, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
mid_entry->mid_state,
le16_to_cpu(mid_entry->command),
@@ -168,7 +166,7 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
{
- struct list_head *stmp, *tmp, *tmp1, *tmp2;
+ struct list_head *tmp, *tmp1, *tmp2;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
@@ -183,9 +181,7 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
seq_printf(m, " <filename>\n");
#endif /* CIFS_DEBUG2 */
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(stmp, &cifs_tcp_ses_list) {
- server = list_entry(stmp, struct TCP_Server_Info,
- tcp_ses_list);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
list_for_each(tmp1, &ses->tcon_list) {
@@ -220,7 +216,7 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
- struct list_head *tmp1, *tmp2, *tmp3;
+ struct list_head *tmp2, *tmp3;
struct mid_q_entry *mid_entry;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
@@ -278,11 +274,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
c = 0;
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp1, &cifs_tcp_ses_list) {
- server = list_entry(tmp1, struct TCP_Server_Info,
- tcp_ses_list);
-
- /* channel info will be printed as a part of sessions below */
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (server->is_channel)
continue;
@@ -563,7 +555,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
#ifdef CONFIG_CIFS_STATS2
int j;
#endif /* STATS2 */
- struct list_head *tmp1, *tmp2, *tmp3;
+ struct list_head *tmp2, *tmp3;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
@@ -594,9 +586,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
i = 0;
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp1, &cifs_tcp_ses_list) {
- server = list_entry(tmp1, struct TCP_Server_Info,
- tcp_ses_list);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
seq_printf(m, "\nMax requests in flight: %d", server->max_in_flight);
#ifdef CONFIG_CIFS_STATS2
seq_puts(m, "\nTotal time spent processing by command. Time ");
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 5e66dab712d0..ee4ea2b60c0f 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -3,7 +3,7 @@
*
* Copyright (c) International Business Machines Corp., 2000,2002
* Modified by Steve French (sfrench@us.ibm.com)
-*/
+ */
#ifndef _H_CIFS_DEBUG
#define _H_CIFS_DEBUG
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index c87c37cf2914..ec57cdb1590f 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -125,7 +125,7 @@ cifs_build_devname(char *nodename, const char *prepath)
* @sb_mountdata: parent/root DFS mount options (template)
* @fullpath: full path in UNC format
* @ref: optional server's referral
- *
+ * @devname: return the built cifs device name if passed pointer not NULL
* creates mount options for submount based on template options sb_mountdata
* and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
*
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 9c45b3a82ad9..4fd788586399 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/cifs_fs_sb.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
*/
#include <linux/rbtree.h>
@@ -72,11 +63,12 @@ struct cifs_sb_info {
char *prepath;
/*
- * Path initially provided by the mount call. We might connect
- * to something different via DFS but we want to keep it to do
- * failover properly.
+ * Canonical DFS path initially provided by the mount call. We might connect to something
+ * different via DFS but we want to keep it to do failover properly.
*/
char *origin_fullpath; /* \\HOST\SHARE\[OPTIONAL PATH] */
+ /* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
+ uuid_t dfs_mount_id;
/*
* Indicate whether serverino option was turned off later
* (cifs_autodisable_serverino) in order to match new mounts.
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 4a97fe12006b..ef723be358af 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/cifs_ioctl.h
*
@@ -5,16 +6,6 @@
*
* Copyright (c) 2015 Steve French <steve.french@primarydata.com>
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
*/
struct smb_mnt_fs_info {
@@ -72,15 +63,28 @@ struct smb3_key_debug_info {
} __packed;
/*
- * Dump full key (32 byte encrypt/decrypt keys instead of 16 bytes)
- * is needed if GCM256 (stronger encryption) negotiated
+ * Dump variable-sized keys
*/
struct smb3_full_key_debug_info {
- __u64 Suid;
+ /* INPUT: size of userspace buffer */
+ __u32 in_size;
+
+ /*
+ * INPUT: 0 for current user, otherwise session to dump
+ * OUTPUT: session id that was dumped
+ */
+ __u64 session_id;
__u16 cipher_type;
- __u8 auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
- __u8 smb3encryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */
- __u8 smb3decryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */
+ __u8 session_key_length;
+ __u8 server_in_key_length;
+ __u8 server_out_key_length;
+ __u8 data[];
+ /*
+ * return this struct with the keys appended at the end:
+ * __u8 session_key[session_key_length];
+ * __u8 server_in_key[server_in_key_length];
+ * __u8 server_out_key[server_out_key_length];
+ */
} __packed;
struct smb3_notify {
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 7b9b876b513b..8fa26a8530f8 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton (jlayton@redhat.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/list.h>
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
index 31bef9ee078b..31387d0ea32e 100644
--- a/fs/cifs/cifs_spnego.h
+++ b/fs/cifs/cifs_spnego.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS
*
@@ -5,19 +6,6 @@
* Author(s): Jeff Layton (jlayton@redhat.com)
* Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFS_SPNEGO_H
diff --git a/fs/cifs/cifs_spnego_negtokeninit.asn1 b/fs/cifs/cifs_spnego_negtokeninit.asn1
new file mode 100644
index 000000000000..181c083887d5
--- /dev/null
+++ b/fs/cifs/cifs_spnego_negtokeninit.asn1
@@ -0,0 +1,40 @@
+GSSAPI ::=
+ [APPLICATION 0] IMPLICIT SEQUENCE {
+ thisMech
+ OBJECT IDENTIFIER ({cifs_gssapi_this_mech}),
+ negotiationToken
+ NegotiationToken
+ }
+
+MechType ::= OBJECT IDENTIFIER ({cifs_neg_token_init_mech_type})
+
+MechTypeList ::= SEQUENCE OF MechType
+
+NegHints ::= SEQUENCE {
+ hintName
+ [0] GeneralString OPTIONAL,
+ hintAddress
+ [1] OCTET STRING OPTIONAL
+ }
+
+NegTokenInit2 ::=
+ SEQUENCE {
+ mechTypes
+ [0] MechTypeList OPTIONAL,
+ reqFlags
+ [1] BIT STRING OPTIONAL,
+ mechToken
+ [2] OCTET STRING OPTIONAL,
+ negHints
+ [3] NegHints OPTIONAL,
+ mechListMIC
+ [3] OCTET STRING OPTIONAL
+ }
+
+NegotiationToken ::=
+ CHOICE {
+ negTokenInit
+ [0] NegTokenInit2,
+ negTokenTarg
+ [1] ANY
+ }
diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
index d829b8bf833e..93b47818c6c2 100644
--- a/fs/cifs/cifs_swn.c
+++ b/fs/cifs/cifs_swn.c
@@ -447,15 +447,13 @@ static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new,
const struct sockaddr_storage *old,
struct sockaddr_storage *dst)
{
- __be16 port;
+ __be16 port = cpu_to_be16(CIFS_PORT);
if (old->ss_family == AF_INET) {
struct sockaddr_in *ipv4 = (struct sockaddr_in *)old;
port = ipv4->sin_port;
- }
-
- if (old->ss_family == AF_INET6) {
+ } else if (old->ss_family == AF_INET6) {
struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)old;
port = ipv6->sin6_port;
@@ -465,9 +463,7 @@ static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new,
struct sockaddr_in *ipv4 = (struct sockaddr_in *)new;
ipv4->sin_port = port;
- }
-
- if (new->ss_family == AF_INET6) {
+ } else if (new->ss_family == AF_INET6) {
struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)new;
ipv6->sin6_port = port;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 784407f9280f..388eb536cff1 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/cifsacl.c
*
@@ -6,19 +7,6 @@
*
* Contains the routines for mapping CIFS/NTFS ACLs
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
@@ -409,7 +397,6 @@ try_upcall_to_get_id:
saved_cred = override_creds(root_cred);
sidkey = request_key(&cifs_idmap_key_type, sidstr, "");
if (IS_ERR(sidkey)) {
- rc = -EINVAL;
cifs_dbg(FYI, "%s: Can't map SID %s to a %cid\n",
__func__, sidstr, sidtype == SIDOWNER ? 'u' : 'g');
goto out_revert_creds;
@@ -422,7 +409,6 @@ try_upcall_to_get_id:
*/
BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t));
if (sidkey->datalen != sizeof(uid_t)) {
- rc = -EIO;
cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n",
__func__, sidkey->datalen);
key_invalidate(sidkey);
@@ -1308,7 +1294,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
ndacl_ptr->revision =
dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
- ndacl_ptr->num_aces = dacl_ptr->num_aces;
+ ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0;
if (uid_valid(uid)) { /* chown */
uid_t id;
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index d9e704979d99..f8292bcf8594 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -1,28 +1,15 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/cifsacl.h
*
* Copyright (c) International Business Machines Corp., 2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFSACL_H
#define _CIFSACL_H
-
#define NUM_AUTHS (6) /* number of authority fields */
#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index b8f1ff9a83f3..ecf15d845dbd 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/cifsencrypt.c
*
@@ -7,19 +8,6 @@
* Copyright (C) International Business Machines Corp., 2005,2013
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2ffcb29d5c8f..9fb874dd8d24 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/cifsfs.c
*
@@ -6,19 +7,6 @@
*
* Common Internet FileSystem (CIFS) client
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* Note that BB means BUGBUG (ie something to fix eventually) */
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 6beddb108ba0..177f3e7ab86d 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/cifsfs.h
*
* Copyright (c) International Business Machines Corp., 2002, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFSFS_H
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8488d7024462..3100f8b66e60 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/cifsglob.h
*
@@ -5,16 +6,6 @@
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
*/
#ifndef _CIFS_GLOB_H
#define _CIFS_GLOB_H
@@ -630,7 +621,7 @@ struct TCP_Server_Info {
/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
unsigned int capabilities; /* selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
- __u64 CurrentMid; /* multiplex id - rotating counter */
+ __u64 CurrentMid; /* multiplex id - rotating counter, protected by GlobalMid_Lock */
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
@@ -896,7 +887,7 @@ struct cifs_ses {
struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
- enum statusEnum status;
+ enum statusEnum status; /* updates protected by GlobalMid_Lock */
unsigned overrideSecFlg; /* if non-zero override global sec flags */
char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */
@@ -1093,8 +1084,7 @@ struct cifs_tcon {
struct cached_fid crfid; /* Cached root fid */
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
- char *dfs_path;
- int remap:2;
+ char *dfs_path; /* canonical DFS path */
struct list_head ulist; /* cache update list */
#endif
};
@@ -1795,6 +1785,8 @@ require use of the stronger protocol */
* list operations on pending_mid_q and oplockQ
* updates to XID counters, multiplex id and SMB sequence numbers
* list operations on global DnotifyReqList
+ * updates to ses->status
+ * updates to server->CurrentMid
* tcp_ses_lock protects:
* list operations on tcp and SMB session lists
* tcon->open_file_lock protects the list of open files hanging off the tcon
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index b53a87db282f..0923f72d27e9 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/cifspdu.h
*
* Copyright (c) International Business Machines Corp., 2002,2009
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFSPDU_H
@@ -148,7 +136,8 @@
#define SMB3_SIGN_KEY_SIZE (16)
/*
- * Size of the smb3 encryption/decryption keys
+ * Size of the smb3 encryption/decryption key storage.
+ * This size is big enough to store any cipher key types.
*/
#define SMB3_ENC_DEC_KEY_SIZE (32)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d30cba44ba29..e0def0f0714b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/cifsproto.h
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFSPROTO_H
#define _CIFSPROTO_H
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 41f74163cc1c..58ebec4d4413 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/cifssmb.c
*
@@ -6,19 +7,6 @@
*
* Contains the routines for constructing the SMB PDUs themselves
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* SMB/CIFS PDU handling routines here - except for leftovers in connect.c */
@@ -1220,7 +1208,7 @@ SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon,
int *pOplock, FILE_ALL_INFO *pfile_info,
const struct nls_table *nls_codepage, int remap)
{
- int rc = -EACCES;
+ int rc;
OPENX_REQ *pSMB = NULL;
OPENX_RSP *pSMBr = NULL;
int bytes_returned;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 495c395f9def..5d269f583dac 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/connect.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/net.h>
@@ -368,13 +356,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
cifs_server_dbg(VFS, "%s: failed to update DFS target hint: rc = %d\n",
__func__, rc);
}
- rc = dfs_cache_update_vol(cifs_sb->origin_fullpath, server);
- if (rc) {
- cifs_server_dbg(VFS, "%s: failed to update vol info in DFS cache: rc = %d\n",
- __func__, rc);
- }
dfs_cache_free_tgts(&tgt_list);
-
}
cifs_put_tcp_super(sb);
@@ -1557,29 +1539,25 @@ out:
/**
* cifs_free_ipc - helper to release the session IPC tcon
*
- * Needs to be called everytime a session is destroyed
+ * Needs to be called everytime a session is destroyed.
+ *
+ * On session close, the IPC is closed and the server must release all tcons of the session.
+ * No need to send a tree disconnect here.
+ *
+ * Besides, it will make the server to not close durable and resilient files on session close, as
+ * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
*/
static int
cifs_free_ipc(struct cifs_ses *ses)
{
- int rc = 0, xid;
struct cifs_tcon *tcon = ses->tcon_ipc;
if (tcon == NULL)
return 0;
- if (ses->server->ops->tree_disconnect) {
- xid = get_xid();
- rc = ses->server->ops->tree_disconnect(xid, tcon);
- free_xid(xid);
- }
-
- if (rc)
- cifs_dbg(FYI, "failed to disconnect IPC tcon (rc=%d)\n", rc);
-
tconInfoFree(tcon);
ses->tcon_ipc = NULL;
- return rc;
+ return 0;
}
static struct cifs_ses *
@@ -1605,7 +1583,6 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
{
unsigned int rc, xid;
struct TCP_Server_Info *server = ses->server;
-
cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
spin_lock(&cifs_tcp_ses_lock);
@@ -1613,13 +1590,20 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
spin_unlock(&cifs_tcp_ses_lock);
return;
}
+
+ cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
+ cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE");
+
if (--ses->ses_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ spin_lock(&GlobalMid_Lock);
if (ses->status == CifsGood)
ses->status = CifsExiting;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&GlobalMid_Lock);
cifs_free_ipc(ses);
@@ -1951,10 +1935,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &ses->tcon_list) {
tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- if (tcon->dfs_path)
- continue;
-#endif
+
if (!match_tcon(tcon, ctx))
continue;
++tcon->tc_count;
@@ -3017,9 +2998,8 @@ expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
return rc;
}
-static inline int get_next_dfs_tgt(const char *path,
- struct dfs_cache_tgt_list *tgt_list,
- struct dfs_cache_tgt_iterator **tgt_it)
+static int get_next_dfs_tgt(struct dfs_cache_tgt_list *tgt_list,
+ struct dfs_cache_tgt_iterator **tgt_it)
{
if (!*tgt_it)
*tgt_it = dfs_cache_get_tgt_iterator(tgt_list);
@@ -3059,6 +3039,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
struct cifs_ses **ses, struct cifs_tcon **tcon)
{
int rc;
+ char *npath = NULL;
struct dfs_cache_tgt_list tgt_list = {0};
struct dfs_cache_tgt_iterator *tgt_it = NULL;
struct smb3_fs_context tmp_ctx = {NULL};
@@ -3066,11 +3047,15 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
return -EOPNOTSUPP;
- cifs_dbg(FYI, "%s: path=%s full_path=%s\n", __func__, path, full_path);
+ npath = dfs_cache_canonical_path(path, cifs_sb->local_nls, cifs_remap(cifs_sb));
+ if (IS_ERR(npath))
+ return PTR_ERR(npath);
+
+ cifs_dbg(FYI, "%s: path=%s full_path=%s\n", __func__, npath, full_path);
- rc = dfs_cache_noreq_find(path, NULL, &tgt_list);
+ rc = dfs_cache_noreq_find(npath, NULL, &tgt_list);
if (rc)
- return rc;
+ goto out;
/*
* We use a 'tmp_ctx' here because we need pass it down to the mount_{get,put} functions to
* test connection against new DFS targets.
@@ -3084,11 +3069,11 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
char *fake_devname = NULL, *mdata = NULL;
/* Get next DFS target server - if any */
- rc = get_next_dfs_tgt(path, &tgt_list, &tgt_it);
+ rc = get_next_dfs_tgt(&tgt_list, &tgt_it);
if (rc)
break;
- rc = dfs_cache_get_tgt_referral(path, tgt_it, &ref);
+ rc = dfs_cache_get_tgt_referral(npath, tgt_it, &ref);
if (rc)
break;
@@ -3137,6 +3122,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
}
out:
+ kfree(npath);
smb3_cleanup_fs_context_contents(&tmp_ctx);
dfs_cache_free_tgts(&tgt_list);
return rc;
@@ -3288,25 +3274,18 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-static void set_root_ses(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+static void set_root_ses(struct cifs_sb_info *cifs_sb, const uuid_t *mount_id, struct cifs_ses *ses,
struct cifs_ses **root_ses)
{
if (ses) {
spin_lock(&cifs_tcp_ses_lock);
ses->ses_count++;
- if (ses->tcon_ipc)
- ses->tcon_ipc->remap = cifs_remap(cifs_sb);
spin_unlock(&cifs_tcp_ses_lock);
+ dfs_cache_add_refsrv_session(mount_id, ses);
}
*root_ses = ses;
}
-static void put_root_ses(struct cifs_ses *ses)
-{
- if (ses)
- cifs_put_smb_ses(ses);
-}
-
/* Set up next dfs prefix path in @dfs_path */
static int next_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
const unsigned int xid, struct TCP_Server_Info *server,
@@ -3352,17 +3331,25 @@ out:
}
/* Check if resolved targets can handle any DFS referrals */
-static int is_referral_server(const char *ref_path, struct cifs_tcon *tcon, bool *ref_server)
+static int is_referral_server(const char *ref_path, struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon, bool *ref_server)
{
int rc;
struct dfs_info3_param ref = {0};
+ cifs_dbg(FYI, "%s: ref_path=%s\n", __func__, ref_path);
+
if (is_tcon_dfs(tcon)) {
*ref_server = true;
} else {
- cifs_dbg(FYI, "%s: ref_path=%s\n", __func__, ref_path);
+ char *npath;
- rc = dfs_cache_noreq_find(ref_path, &ref, NULL);
+ npath = dfs_cache_canonical_path(ref_path, cifs_sb->local_nls, cifs_remap(cifs_sb));
+ if (IS_ERR(npath))
+ return PTR_ERR(npath);
+
+ rc = dfs_cache_noreq_find(npath, &ref, NULL);
+ kfree(npath);
if (rc) {
cifs_dbg(VFS, "%s: dfs_cache_noreq_find: failed (rc=%d)\n", __func__, rc);
return rc;
@@ -3386,9 +3373,9 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
struct cifs_ses *ses = NULL, *root_ses = NULL;
struct cifs_tcon *tcon = NULL;
int count = 0;
+ uuid_t mount_id = {0};
char *ref_path = NULL, *full_path = NULL;
char *oldmnt = NULL;
- char *mntdata = NULL;
bool ref_server = false;
rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
@@ -3411,12 +3398,9 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
if (rc != -EREMOTE)
goto error;
}
- /* Save mount options */
- mntdata = kstrdup(cifs_sb->ctx->mount_options, GFP_KERNEL);
- if (!mntdata) {
- rc = -ENOMEM;
- goto error;
- }
+
+ ctx->nosharesock = true;
+
/* Get path of DFS root */
ref_path = build_unc_path_to_root(ctx, cifs_sb, false);
if (IS_ERR(ref_path)) {
@@ -3425,7 +3409,8 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
goto error;
}
- set_root_ses(cifs_sb, ses, &root_ses);
+ uuid_gen(&mount_id);
+ set_root_ses(cifs_sb, &mount_id, ses, &root_ses);
do {
/* Save full path of last DFS path we used to resolve final target server */
kfree(full_path);
@@ -3456,13 +3441,11 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
continue;
/* Make sure that requests go through new root servers */
- rc = is_referral_server(ref_path + 1, tcon, &ref_server);
+ rc = is_referral_server(ref_path + 1, cifs_sb, tcon, &ref_server);
if (rc)
break;
- if (ref_server) {
- put_root_ses(root_ses);
- set_root_ses(cifs_sb, ses, &root_ses);
- }
+ if (ref_server)
+ set_root_ses(cifs_sb, &mount_id, ses, &root_ses);
/* Get next dfs path and then continue chasing them if -EREMOTE */
rc = next_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
@@ -3471,12 +3454,10 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
rc = -ELOOP;
} while (rc == -EREMOTE);
- if (rc)
+ if (rc || !tcon)
goto error;
- put_root_ses(root_ses);
- root_ses = NULL;
+
kfree(ref_path);
- ref_path = NULL;
/*
* Store DFS full path in both superblock and tree connect structures.
*
@@ -3485,21 +3466,27 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
* links, the prefix path is included in both and may be changed during reconnect. See
* cifs_tree_connect().
*/
- cifs_sb->origin_fullpath = kstrdup(full_path, GFP_KERNEL);
- if (!cifs_sb->origin_fullpath) {
+ ref_path = dfs_cache_canonical_path(full_path, cifs_sb->local_nls, cifs_remap(cifs_sb));
+ kfree(full_path);
+ full_path = NULL;
+
+ if (IS_ERR(ref_path)) {
+ rc = PTR_ERR(ref_path);
+ ref_path = NULL;
+ goto error;
+ }
+ cifs_sb->origin_fullpath = ref_path;
+
+ ref_path = kstrdup(cifs_sb->origin_fullpath, GFP_KERNEL);
+ if (!ref_path) {
rc = -ENOMEM;
goto error;
}
spin_lock(&cifs_tcp_ses_lock);
- tcon->dfs_path = full_path;
- full_path = NULL;
- tcon->remap = cifs_remap(cifs_sb);
+ tcon->dfs_path = ref_path;
+ ref_path = NULL;
spin_unlock(&cifs_tcp_ses_lock);
- /* Add original context for DFS cache to be used when refreshing referrals */
- rc = dfs_cache_add_vol(mntdata, ctx, cifs_sb->origin_fullpath);
- if (rc)
- goto error;
/*
* After reconnecting to a different server, unique ids won't
* match anymore, so we disable serverino. This prevents
@@ -3514,6 +3501,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
kfree(cifs_sb->prepath);
cifs_sb->prepath = ctx->prepath;
ctx->prepath = NULL;
+ uuid_copy(&cifs_sb->dfs_mount_id, &mount_id);
out:
free_xid(xid);
@@ -3523,9 +3511,8 @@ out:
error:
kfree(ref_path);
kfree(full_path);
- kfree(mntdata);
kfree(cifs_sb->origin_fullpath);
- put_root_ses(root_ses);
+ dfs_cache_put_refsrv_sessions(&mount_id);
mount_put_conns(cifs_sb, xid, server, ses, tcon);
return rc;
}
@@ -3755,7 +3742,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
kfree(cifs_sb->prepath);
#ifdef CONFIG_CIFS_DFS_UPCALL
- dfs_cache_del_vol(cifs_sb->origin_fullpath);
+ dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
kfree(cifs_sb->origin_fullpath);
#endif
call_rcu(&cifs_sb->rcu, delayed_free);
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index b1fa30fefe1f..7c1769714609 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -11,6 +11,7 @@
#include <linux/proc_fs.h>
#include <linux/nls.h>
#include <linux/workqueue.h>
+#include <linux/uuid.h>
#include "cifsglob.h"
#include "smb2pdu.h"
#include "smb2proto.h"
@@ -18,15 +19,14 @@
#include "cifs_debug.h"
#include "cifs_unicode.h"
#include "smb2glob.h"
-#include "fs_context.h"
#include "dfs_cache.h"
#define CACHE_HTABLE_SIZE 32
#define CACHE_MAX_ENTRIES 64
+#define CACHE_MIN_TTL 120 /* 2 minutes */
-#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
- DFSREF_STORAGE_SERVER))
+#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
struct cache_dfs_tgt {
char *name;
@@ -48,14 +48,15 @@ struct cache_entry {
struct cache_dfs_tgt *tgthint;
};
-struct vol_info {
- char *fullpath;
- spinlock_t ctx_lock;
- struct smb3_fs_context ctx;
- char *mntdata;
+/* List of referral server sessions per dfs mount */
+struct mount_group {
struct list_head list;
- struct list_head rlist;
- struct kref refcnt;
+ uuid_t id;
+ struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
+ int num_sessions;
+ spinlock_t lock;
+ struct list_head refresh_list;
+ struct kref refcount;
};
static struct kmem_cache *cache_slab __read_mostly;
@@ -64,7 +65,7 @@ static struct workqueue_struct *dfscache_wq __read_mostly;
static int cache_ttl;
static DEFINE_SPINLOCK(cache_ttl_lock);
-static struct nls_table *cache_nlsc;
+static struct nls_table *cache_cp;
/*
* Number of entries in the cache
@@ -74,34 +75,145 @@ static atomic_t cache_count;
static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
static DECLARE_RWSEM(htable_rw_lock);
-static LIST_HEAD(vol_list);
-static DEFINE_SPINLOCK(vol_list_lock);
+static LIST_HEAD(mount_group_list);
+static DEFINE_MUTEX(mount_group_list_lock);
static void refresh_cache_worker(struct work_struct *work);
static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
-static int get_normalized_path(const char *path, const char **npath)
+static void get_ipc_unc(const char *ref_path, char *ipc, size_t ipclen)
{
- if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
- return -EINVAL;
+ const char *host;
+ size_t len;
- if (*path == '\\') {
- *npath = path;
- } else {
- char *s = kstrdup(path, GFP_KERNEL);
- if (!s)
- return -ENOMEM;
- convert_delimiter(s, '\\');
- *npath = s;
+ extract_unc_hostname(ref_path, &host, &len);
+ scnprintf(ipc, ipclen, "\\\\%.*s\\IPC$", (int)len, host);
+}
+
+static struct cifs_ses *find_ipc_from_server_path(struct cifs_ses **ses, const char *path)
+{
+ char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
+
+ get_ipc_unc(path, unc, sizeof(unc));
+ for (; *ses; ses++) {
+ if (!strcasecmp(unc, (*ses)->tcon_ipc->treeName))
+ return *ses;
}
- return 0;
+ return ERR_PTR(-ENOENT);
+}
+
+static void __mount_group_release(struct mount_group *mg)
+{
+ int i;
+
+ for (i = 0; i < mg->num_sessions; i++)
+ cifs_put_smb_ses(mg->sessions[i]);
+ kfree(mg);
+}
+
+static void mount_group_release(struct kref *kref)
+{
+ struct mount_group *mg = container_of(kref, struct mount_group, refcount);
+
+ mutex_lock(&mount_group_list_lock);
+ list_del(&mg->list);
+ mutex_unlock(&mount_group_list_lock);
+ __mount_group_release(mg);
+}
+
+static struct mount_group *find_mount_group_locked(const uuid_t *id)
+{
+ struct mount_group *mg;
+
+ list_for_each_entry(mg, &mount_group_list, list) {
+ if (uuid_equal(&mg->id, id))
+ return mg;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+static struct mount_group *__get_mount_group_locked(const uuid_t *id)
+{
+ struct mount_group *mg;
+
+ mg = find_mount_group_locked(id);
+ if (!IS_ERR(mg))
+ return mg;
+
+ mg = kmalloc(sizeof(*mg), GFP_KERNEL);
+ if (!mg)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&mg->refcount);
+ uuid_copy(&mg->id, id);
+ mg->num_sessions = 0;
+ spin_lock_init(&mg->lock);
+ list_add(&mg->list, &mount_group_list);
+ return mg;
+}
+
+static struct mount_group *get_mount_group(const uuid_t *id)
+{
+ struct mount_group *mg;
+
+ mutex_lock(&mount_group_list_lock);
+ mg = __get_mount_group_locked(id);
+ if (!IS_ERR(mg))
+ kref_get(&mg->refcount);
+ mutex_unlock(&mount_group_list_lock);
+
+ return mg;
}
-static inline void free_normalized_path(const char *path, const char *npath)
+static void free_mount_group_list(void)
{
- if (path != npath)
- kfree(npath);
+ struct mount_group *mg, *tmp_mg;
+
+ list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
+ list_del_init(&mg->list);
+ __mount_group_release(mg);
+ }
+}
+
+/**
+ * dfs_cache_canonical_path - get a canonical DFS path
+ *
+ * @path: DFS path
+ * @cp: codepage
+ * @remap: mapping type
+ *
+ * Return canonical path if success, otherwise error.
+ */
+char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
+{
+ char *tmp;
+ int plen = 0;
+ char *npath;
+
+ if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
+ tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
+ if (!tmp) {
+ cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
+ kfree(tmp);
+
+ if (!npath) {
+ cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ npath = kstrdup(path, GFP_KERNEL);
+ if (!npath)
+ return ERR_PTR(-ENOMEM);
+ }
+ convert_delimiter(npath, '\\');
+ return npath;
}
static inline bool cache_entry_expired(const struct cache_entry *ce)
@@ -171,7 +283,7 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
"cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
- IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
+ IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
list_for_each_entry(t, &ce->tlist, list) {
@@ -240,7 +352,7 @@ static inline void dump_ce(const struct cache_entry *ce)
ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
ce->etime.tv_nsec,
ce->hdr_flags, ce->ref_flags,
- IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
+ IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
ce->path_consumed,
cache_entry_expired(ce) ? "yes" : "no");
dump_tgts(ce);
@@ -284,8 +396,7 @@ int dfs_cache_init(void)
int rc;
int i;
- dfscache_wq = alloc_workqueue("cifs-dfscache",
- WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
+ dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
if (!dfscache_wq)
return -ENOMEM;
@@ -301,7 +412,9 @@ int dfs_cache_init(void)
INIT_HLIST_HEAD(&cache_htable[i]);
atomic_set(&cache_count, 0);
- cache_nlsc = load_nls_default();
+ cache_cp = load_nls("utf8");
+ if (!cache_cp)
+ cache_cp = load_nls_default();
cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
return 0;
@@ -311,23 +424,24 @@ out_destroy_wq:
return rc;
}
-static inline unsigned int cache_entry_hash(const void *data, int size)
+static int cache_entry_hash(const void *data, int size, unsigned int *hash)
{
- unsigned int h;
-
- h = jhash(data, size, 0);
- return h & (CACHE_HTABLE_SIZE - 1);
-}
-
-/* Check whether second path component of @path is SYSVOL or NETLOGON */
-static inline bool is_sysvol_or_netlogon(const char *path)
-{
- const char *s;
- char sep = path[0];
-
- s = strchr(path + 1, sep) + 1;
- return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
- !strncasecmp(s, "netlogon", strlen("netlogon"));
+ int i, clen;
+ const unsigned char *s = data;
+ wchar_t c;
+ unsigned int h = 0;
+
+ for (i = 0; i < size; i += clen) {
+ clen = cache_cp->char2uni(&s[i], size - i, &c);
+ if (unlikely(clen < 0)) {
+ cifs_dbg(VFS, "%s: can't convert char\n", __func__);
+ return clen;
+ }
+ c = cifs_toupper(c);
+ h = jhash(&c, sizeof(c), h);
+ }
+ *hash = h % CACHE_HTABLE_SIZE;
+ return 0;
}
/* Return target hint of a DFS cache entry */
@@ -378,7 +492,7 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
{
int i;
- ce->ttl = refs[0].ttl;
+ ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
ce->etime = get_expire_time(ce->ttl);
ce->srvtype = refs[0].server_type;
ce->hdr_flags = refs[0].flags;
@@ -409,9 +523,7 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
}
/* Allocate a new cache entry */
-static struct cache_entry *alloc_cache_entry(const char *path,
- const struct dfs_info3_param *refs,
- int numrefs)
+static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
{
struct cache_entry *ce;
int rc;
@@ -420,11 +532,9 @@ static struct cache_entry *alloc_cache_entry(const char *path,
if (!ce)
return ERR_PTR(-ENOMEM);
- ce->path = kstrdup(path, GFP_KERNEL);
- if (!ce->path) {
- kmem_cache_free(cache_slab, ce);
- return ERR_PTR(-ENOMEM);
- }
+ ce->path = refs[0].path_name;
+ refs[0].path_name = NULL;
+
INIT_HLIST_NODE(&ce->hlist);
INIT_LIST_HEAD(&ce->tlist);
@@ -437,13 +547,14 @@ static struct cache_entry *alloc_cache_entry(const char *path,
return ce;
}
-/* Must be called with htable_rw_lock held */
-static void remove_oldest_entry(void)
+static void remove_oldest_entry_locked(void)
{
int i;
struct cache_entry *ce;
struct cache_entry *to_del = NULL;
+ WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+
for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &cache_htable[i];
@@ -467,12 +578,24 @@ static void remove_oldest_entry(void)
}
/* Add a new DFS cache entry */
-static int add_cache_entry(const char *path, unsigned int hash,
- struct dfs_info3_param *refs, int numrefs)
+static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
{
+ int rc;
struct cache_entry *ce;
+ unsigned int hash;
+
+ WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+
+ if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
+ cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
+ remove_oldest_entry_locked();
+ }
- ce = alloc_cache_entry(path, refs, numrefs);
+ rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
+ if (rc)
+ return rc;
+
+ ce = alloc_cache_entry(refs, numrefs);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -486,65 +609,77 @@ static int add_cache_entry(const char *path, unsigned int hash,
}
spin_unlock(&cache_ttl_lock);
- down_write(&htable_rw_lock);
hlist_add_head(&ce->hlist, &cache_htable[hash]);
dump_ce(ce);
- up_write(&htable_rw_lock);
+
+ atomic_inc(&cache_count);
return 0;
}
-static struct cache_entry *__lookup_cache_entry(const char *path)
+/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
+static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
{
- struct cache_entry *ce;
- unsigned int h;
- bool found = false;
+ int i, l1, l2;
+ wchar_t c1, c2;
- h = cache_entry_hash(path, strlen(path));
+ if (len1 != len2)
+ return false;
- hlist_for_each_entry(ce, &cache_htable[h], hlist) {
- if (!strcasecmp(path, ce->path)) {
- found = true;
- dump_ce(ce);
- break;
+ for (i = 0; i < len1; i += l1) {
+ l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
+ l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
+ if (unlikely(l1 < 0 && l2 < 0)) {
+ if (s1[i] != s2[i])
+ return false;
+ l1 = 1;
+ continue;
}
+ if (l1 != l2)
+ return false;
+ if (cifs_toupper(c1) != cifs_toupper(c2))
+ return false;
}
+ return true;
+}
- if (!found)
- ce = ERR_PTR(-ENOENT);
- return ce;
+static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
+{
+ struct cache_entry *ce;
+
+ hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
+ if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
+ dump_ce(ce);
+ return ce;
+ }
+ }
+ return ERR_PTR(-EEXIST);
}
/*
- * Find a DFS cache entry in hash table and optionally check prefix path against
- * @path.
- * Use whole path components in the match.
- * Must be called with htable_rw_lock held.
+ * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
+ *
+ * Use whole path components in the match. Must be called with htable_rw_lock held.
*
- * Return ERR_PTR(-ENOENT) if the entry is not found.
+ * Return ERR_PTR(-EEXIST) if the entry is not found.
*/
-static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
+static struct cache_entry *lookup_cache_entry(const char *path)
{
- struct cache_entry *ce = ERR_PTR(-ENOENT);
- unsigned int h;
+ struct cache_entry *ce;
int cnt = 0;
- char *npath;
- char *s, *e;
- char sep;
-
- npath = kstrdup(path, GFP_KERNEL);
- if (!npath)
- return ERR_PTR(-ENOMEM);
+ const char *s = path, *e;
+ char sep = *s;
+ unsigned int hash;
+ int rc;
- s = npath;
- sep = *npath;
while ((s = strchr(s, sep)) && ++cnt < 3)
s++;
if (cnt < 3) {
- h = cache_entry_hash(path, strlen(path));
- ce = __lookup_cache_entry(path);
- goto out;
+ rc = cache_entry_hash(path, strlen(path), &hash);
+ if (rc)
+ return ERR_PTR(rc);
+ return __lookup_cache_entry(path, hash, strlen(path));
}
/*
* Handle paths that have more than two path components and are a complete prefix of the DFS
@@ -552,64 +687,29 @@ static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *ha
*
* See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
*/
- h = cache_entry_hash(npath, strlen(npath));
- e = npath + strlen(npath) - 1;
+ e = path + strlen(path) - 1;
while (e > s) {
- char tmp;
+ int len;
/* skip separators */
while (e > s && *e == sep)
e--;
if (e == s)
- goto out;
-
- tmp = *(e+1);
- *(e+1) = 0;
-
- ce = __lookup_cache_entry(npath);
- if (!IS_ERR(ce)) {
- h = cache_entry_hash(npath, strlen(npath));
break;
- }
- *(e+1) = tmp;
+ len = e + 1 - path;
+ rc = cache_entry_hash(path, len, &hash);
+ if (rc)
+ return ERR_PTR(rc);
+ ce = __lookup_cache_entry(path, hash, len);
+ if (!IS_ERR(ce))
+ return ce;
+
/* backward until separator */
while (e > s && *e != sep)
e--;
}
-out:
- if (hash)
- *hash = h;
- kfree(npath);
- return ce;
-}
-
-static void __vol_release(struct vol_info *vi)
-{
- kfree(vi->fullpath);
- kfree(vi->mntdata);
- smb3_cleanup_fs_context_contents(&vi->ctx);
- kfree(vi);
-}
-
-static void vol_release(struct kref *kref)
-{
- struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
-
- spin_lock(&vol_list_lock);
- list_del(&vi->list);
- spin_unlock(&vol_list_lock);
- __vol_release(vi);
-}
-
-static inline void free_vol_list(void)
-{
- struct vol_info *vi, *nvi;
-
- list_for_each_entry_safe(vi, nvi, &vol_list, list) {
- list_del_init(&vi->list);
- __vol_release(vi);
- }
+ return ERR_PTR(-EEXIST);
}
/**
@@ -618,8 +718,8 @@ static inline void free_vol_list(void)
void dfs_cache_destroy(void)
{
cancel_delayed_work_sync(&refresh_task);
- unload_nls(cache_nlsc);
- free_vol_list();
+ unload_nls(cache_cp);
+ free_mount_group_list();
flush_cache_ents();
kmem_cache_destroy(cache_slab);
destroy_workqueue(dfscache_wq);
@@ -627,18 +727,14 @@ void dfs_cache_destroy(void)
cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
}
-/* Must be called with htable_rw_lock held */
-static int __update_cache_entry(const char *path,
- const struct dfs_info3_param *refs,
- int numrefs)
+/* Update a cache entry with the new referral in @refs */
+static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
+ int numrefs)
{
int rc;
- struct cache_entry *ce;
char *s, *th = NULL;
- ce = lookup_cache_entry(path, NULL);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
+ WARN_ON(!rwsem_is_locked(&htable_rw_lock));
if (ce->tgthint) {
s = ce->tgthint->name;
@@ -657,37 +753,30 @@ static int __update_cache_entry(const char *path,
return rc;
}
-static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_codepage, int remap,
- const char *path, struct dfs_info3_param **refs,
- int *numrefs)
+static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
+ struct dfs_info3_param **refs, int *numrefs)
{
- cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
+ int rc;
+ int i;
- if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
- return -EOPNOTSUPP;
- if (unlikely(!nls_codepage))
- return -EINVAL;
+ cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
*refs = NULL;
*numrefs = 0;
- return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
- nls_codepage, remap);
-}
-
-/* Update an expired cache entry by getting a new DFS referral from server */
-static int update_cache_entry(const char *path,
- const struct dfs_info3_param *refs,
- int numrefs)
-{
-
- int rc;
+ if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
+ return -EOPNOTSUPP;
+ if (unlikely(!cache_cp))
+ return -EINVAL;
- down_write(&htable_rw_lock);
- rc = __update_cache_entry(path, refs, numrefs);
- up_write(&htable_rw_lock);
+ rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
+ NO_MAP_UNI_RSVD);
+ if (!rc) {
+ struct dfs_info3_param *ref = *refs;
+ for (i = 0; i < *numrefs; i++)
+ convert_delimiter(ref[i].path_name, '\\');
+ }
return rc;
}
@@ -697,15 +786,12 @@ static int update_cache_entry(const char *path,
* If the entry wasn't found, it will create a new one. Or if it was found but
* expired, then it will update the entry accordingly.
*
- * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
+ * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
* handle them properly.
*/
-static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_codepage, int remap,
- const char *path, bool noreq)
+static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
{
int rc;
- unsigned int hash;
struct cache_entry *ce;
struct dfs_info3_param *refs = NULL;
int numrefs = 0;
@@ -713,62 +799,38 @@ static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
- down_read(&htable_rw_lock);
-
- ce = lookup_cache_entry(path, &hash);
-
- /*
- * If @noreq is set, no requests will be sent to the server. Just return
- * the cache entry.
- */
- if (noreq) {
- up_read(&htable_rw_lock);
- return PTR_ERR_OR_ZERO(ce);
- }
+ down_write(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
if (!IS_ERR(ce)) {
if (!cache_entry_expired(ce)) {
dump_ce(ce);
- up_read(&htable_rw_lock);
+ up_write(&htable_rw_lock);
return 0;
}
} else {
newent = true;
}
- up_read(&htable_rw_lock);
-
/*
- * No entry was found.
- *
- * Request a new DFS referral in order to create a new cache entry, or
- * updating an existing one.
+ * Either the entry was not found, or it is expired.
+ * Request a new DFS referral in order to create or update a cache entry.
*/
- rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
- &refs, &numrefs);
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
if (rc)
- return rc;
+ goto out_unlock;
dump_refs(refs, numrefs);
if (!newent) {
- rc = update_cache_entry(path, refs, numrefs);
- goto out_free_refs;
- }
-
- if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
- cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
- __func__, CACHE_MAX_ENTRIES);
- down_write(&htable_rw_lock);
- remove_oldest_entry();
- up_write(&htable_rw_lock);
+ rc = update_cache_entry_locked(ce, refs, numrefs);
+ goto out_unlock;
}
- rc = add_cache_entry(path, hash, refs, numrefs);
- if (!rc)
- atomic_inc(&cache_count);
+ rc = add_cache_entry_locked(refs, numrefs);
-out_free_refs:
+out_unlock:
+ up_write(&htable_rw_lock);
free_dfs_info_array(refs, numrefs);
return rc;
}
@@ -868,7 +930,7 @@ err_free_it:
* needs to be issued:
* @xid: syscall xid
* @ses: smb session to issue the request on
- * @nls_codepage: charset conversion
+ * @cp: codepage
* @remap: path character remapping type
* @path: path to lookup in DFS referral cache.
*
@@ -877,26 +939,25 @@ err_free_it:
*
* Return zero if the target was found, otherwise non-zero.
*/
-int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_codepage, int remap,
- const char *path, struct dfs_info3_param *ref,
+int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
+ int remap, const char *path, struct dfs_info3_param *ref,
struct dfs_cache_tgt_list *tgt_list)
{
int rc;
const char *npath;
struct cache_entry *ce;
- rc = get_normalized_path(path, &npath);
- if (rc)
- return rc;
+ npath = dfs_cache_canonical_path(path, cp, remap);
+ if (IS_ERR(npath))
+ return PTR_ERR(npath);
- rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
+ rc = cache_refresh_path(xid, ses, npath);
if (rc)
goto out_free_path;
down_read(&htable_rw_lock);
- ce = lookup_cache_entry(npath, NULL);
+ ce = lookup_cache_entry(npath);
if (IS_ERR(ce)) {
up_read(&htable_rw_lock);
rc = PTR_ERR(ce);
@@ -913,7 +974,7 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
up_read(&htable_rw_lock);
out_free_path:
- free_normalized_path(path, npath);
+ kfree(npath);
return rc;
}
@@ -925,7 +986,7 @@ out_free_path:
* expired, nor create a new cache entry if @path hasn't been found. It heavily
* relies on an existing cache entry.
*
- * @path: path to lookup in the DFS referral cache.
+ * @path: canonical DFS path to lookup in the DFS referral cache.
* @ref: when non-NULL, store single DFS referral result in it.
* @tgt_list: when non-NULL, store complete DFS target list in it.
*
@@ -937,18 +998,13 @@ int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
struct dfs_cache_tgt_list *tgt_list)
{
int rc;
- const char *npath;
struct cache_entry *ce;
- rc = get_normalized_path(path, &npath);
- if (rc)
- return rc;
-
- cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
+ cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
down_read(&htable_rw_lock);
- ce = lookup_cache_entry(npath, NULL);
+ ce = lookup_cache_entry(path);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out_unlock;
@@ -963,8 +1019,6 @@ int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
out_unlock:
up_read(&htable_rw_lock);
- free_normalized_path(path, npath);
-
return rc;
}
@@ -979,16 +1033,15 @@ out_unlock:
*
* @xid: syscall id
* @ses: smb session
- * @nls_codepage: charset conversion
+ * @cp: codepage
* @remap: type of character remapping for paths
- * @path: path to lookup in DFS referral cache.
+ * @path: path to lookup in DFS referral cache
* @it: DFS target iterator
*
* Return zero if the target hint was updated successfully, otherwise non-zero.
*/
int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_codepage, int remap,
- const char *path,
+ const struct nls_table *cp, int remap, const char *path,
const struct dfs_cache_tgt_iterator *it)
{
int rc;
@@ -996,19 +1049,19 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
struct cache_entry *ce;
struct cache_dfs_tgt *t;
- rc = get_normalized_path(path, &npath);
- if (rc)
- return rc;
+ npath = dfs_cache_canonical_path(path, cp, remap);
+ if (IS_ERR(npath))
+ return PTR_ERR(npath);
cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
- rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
+ rc = cache_refresh_path(xid, ses, npath);
if (rc)
goto out_free_path;
down_write(&htable_rw_lock);
- ce = lookup_cache_entry(npath, NULL);
+ ce = lookup_cache_entry(npath);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out_unlock;
@@ -1031,8 +1084,7 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
out_unlock:
up_write(&htable_rw_lock);
out_free_path:
- free_normalized_path(path, npath);
-
+ kfree(npath);
return rc;
}
@@ -1044,32 +1096,26 @@ out_free_path:
* expired, nor create a new cache entry if @path hasn't been found. It heavily
* relies on an existing cache entry.
*
- * @path: path to lookup in DFS referral cache.
+ * @path: canonical DFS path to lookup in DFS referral cache.
* @it: target iterator which contains the target hint to update the cache
* entry with.
*
* Return zero if the target hint was updated successfully, otherwise non-zero.
*/
-int dfs_cache_noreq_update_tgthint(const char *path,
- const struct dfs_cache_tgt_iterator *it)
+int dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
{
int rc;
- const char *npath;
struct cache_entry *ce;
struct cache_dfs_tgt *t;
if (!it)
return -EINVAL;
- rc = get_normalized_path(path, &npath);
- if (rc)
- return rc;
-
- cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
+ cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
down_write(&htable_rw_lock);
- ce = lookup_cache_entry(npath, NULL);
+ ce = lookup_cache_entry(path);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out_unlock;
@@ -1092,8 +1138,6 @@ int dfs_cache_noreq_update_tgthint(const char *path,
out_unlock:
up_write(&htable_rw_lock);
- free_normalized_path(path, npath);
-
return rc;
}
@@ -1101,32 +1145,26 @@ out_unlock:
* dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
* target iterator (@it).
*
- * @path: path to lookup in DFS referral cache.
+ * @path: canonical DFS path to lookup in DFS referral cache.
* @it: DFS target iterator.
* @ref: DFS referral pointer to set up the gathered information.
*
* Return zero if the DFS referral was set up correctly, otherwise non-zero.
*/
-int dfs_cache_get_tgt_referral(const char *path,
- const struct dfs_cache_tgt_iterator *it,
+int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
struct dfs_info3_param *ref)
{
int rc;
- const char *npath;
struct cache_entry *ce;
if (!it || !ref)
return -EINVAL;
- rc = get_normalized_path(path, &npath);
- if (rc)
- return rc;
-
- cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
+ cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
down_read(&htable_rw_lock);
- ce = lookup_cache_entry(npath, NULL);
+ ce = lookup_cache_entry(path);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out_unlock;
@@ -1138,132 +1176,55 @@ int dfs_cache_get_tgt_referral(const char *path,
out_unlock:
up_read(&htable_rw_lock);
- free_normalized_path(path, npath);
-
return rc;
}
/**
- * dfs_cache_add_vol - add a cifs context during mount() that will be handled by
- * DFS cache refresh worker.
- *
- * @mntdata: mount data.
- * @ctx: cifs context.
- * @fullpath: origin full path.
+ * dfs_cache_add_refsrv_session - add SMB session of referral server
*
- * Return zero if context was set up correctly, otherwise non-zero.
+ * @mount_id: mount group uuid to lookup.
+ * @ses: reference counted SMB session of referral server.
*/
-int dfs_cache_add_vol(char *mntdata, struct smb3_fs_context *ctx, const char *fullpath)
+void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
{
- int rc;
- struct vol_info *vi;
-
- if (!ctx || !fullpath || !mntdata)
- return -EINVAL;
-
- cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
-
- vi = kzalloc(sizeof(*vi), GFP_KERNEL);
- if (!vi)
- return -ENOMEM;
+ struct mount_group *mg;
- vi->fullpath = kstrdup(fullpath, GFP_KERNEL);
- if (!vi->fullpath) {
- rc = -ENOMEM;
- goto err_free_vi;
- }
-
- rc = smb3_fs_context_dup(&vi->ctx, ctx);
- if (rc)
- goto err_free_fullpath;
-
- vi->mntdata = mntdata;
- spin_lock_init(&vi->ctx_lock);
- kref_init(&vi->refcnt);
-
- spin_lock(&vol_list_lock);
- list_add_tail(&vi->list, &vol_list);
- spin_unlock(&vol_list_lock);
-
- return 0;
-
-err_free_fullpath:
- kfree(vi->fullpath);
-err_free_vi:
- kfree(vi);
- return rc;
-}
+ if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
+ return;
-/* Must be called with vol_list_lock held */
-static struct vol_info *find_vol(const char *fullpath)
-{
- struct vol_info *vi;
+ mg = get_mount_group(mount_id);
+ if (WARN_ON_ONCE(IS_ERR(mg)))
+ return;
- list_for_each_entry(vi, &vol_list, list) {
- cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
- if (!strcasecmp(vi->fullpath, fullpath))
- return vi;
- }
- return ERR_PTR(-ENOENT);
+ spin_lock(&mg->lock);
+ if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
+ mg->sessions[mg->num_sessions++] = ses;
+ spin_unlock(&mg->lock);
+ kref_put(&mg->refcount, mount_group_release);
}
/**
- * dfs_cache_update_vol - update vol info in DFS cache after failover
+ * dfs_cache_put_refsrv_sessions - put all referral server sessions
*
- * @fullpath: fullpath to look up in volume list.
- * @server: TCP ses pointer.
+ * Put all SMB sessions from the given mount group id.
*
- * Return zero if volume was updated, otherwise non-zero.
+ * @mount_id: mount group uuid to lookup.
*/
-int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
+void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
{
- struct vol_info *vi;
-
- if (!fullpath || !server)
- return -EINVAL;
-
- cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
-
- spin_lock(&vol_list_lock);
- vi = find_vol(fullpath);
- if (IS_ERR(vi)) {
- spin_unlock(&vol_list_lock);
- return PTR_ERR(vi);
- }
- kref_get(&vi->refcnt);
- spin_unlock(&vol_list_lock);
-
- cifs_dbg(FYI, "%s: updating volume info\n", __func__);
- spin_lock(&vi->ctx_lock);
- memcpy(&vi->ctx.dstaddr, &server->dstaddr,
- sizeof(vi->ctx.dstaddr));
- spin_unlock(&vi->ctx_lock);
+ struct mount_group *mg;
- kref_put(&vi->refcnt, vol_release);
-
- return 0;
-}
-
-/**
- * dfs_cache_del_vol - remove volume info in DFS cache during umount()
- *
- * @fullpath: fullpath to look up in volume list.
- */
-void dfs_cache_del_vol(const char *fullpath)
-{
- struct vol_info *vi;
-
- if (!fullpath || !*fullpath)
+ if (!mount_id || uuid_is_null(mount_id))
return;
- cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
-
- spin_lock(&vol_list_lock);
- vi = find_vol(fullpath);
- spin_unlock(&vol_list_lock);
-
- if (!IS_ERR(vi))
- kref_put(&vi->refcnt, vol_release);
+ mutex_lock(&mount_group_list_lock);
+ mg = find_mount_group_locked(mount_id);
+ if (IS_ERR(mg)) {
+ mutex_unlock(&mount_group_list_lock);
+ return;
+ }
+ mutex_unlock(&mount_group_list_lock);
+ kref_put(&mg->refcount, mount_group_release);
}
/**
@@ -1276,8 +1237,8 @@ void dfs_cache_del_vol(const char *fullpath)
*
* Return zero if target was parsed correctly, otherwise non-zero.
*/
-int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
- char **share, char **prefix)
+int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
+ char **prefix)
{
char *s, sep, *p;
size_t len;
@@ -1332,278 +1293,190 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
return 0;
}
-/* Get all tcons that are within a DFS namespace and can be refreshed */
-static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
+/*
+ * Refresh all active dfs mounts regardless of whether they are in cache or not.
+ * (cache can be cleared)
+ */
+static void refresh_mounts(struct cifs_ses **sessions)
{
+ struct TCP_Server_Info *server;
struct cifs_ses *ses;
- struct cifs_tcon *tcon;
+ struct cifs_tcon *tcon, *ntcon;
+ struct list_head tcons;
+ unsigned int xid;
- INIT_LIST_HEAD(head);
+ INIT_LIST_HEAD(&tcons);
spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
- list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
- if (!tcon->need_reconnect && !tcon->need_reopen_files &&
- tcon->dfs_path) {
- tcon->tc_count++;
- list_add_tail(&tcon->ulist, head);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->dfs_path) {
+ tcon->tc_count++;
+ list_add_tail(&tcon->ulist, &tcons);
+ }
}
}
- if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
- ses->tcon_ipc->dfs_path) {
- list_add_tail(&ses->tcon_ipc->ulist, head);
- }
}
spin_unlock(&cifs_tcp_ses_lock);
-}
-static bool is_dfs_link(const char *path)
-{
- char *s;
-
- s = strchr(path + 1, '\\');
- if (!s)
- return false;
- return !!strchr(s + 1, '\\');
-}
-
-static char *get_dfs_root(const char *path)
-{
- char *s, *npath;
-
- s = strchr(path + 1, '\\');
- if (!s)
- return ERR_PTR(-EINVAL);
-
- s = strchr(s + 1, '\\');
- if (!s)
- return ERR_PTR(-EINVAL);
-
- npath = kstrndup(path, s - path, GFP_KERNEL);
- if (!npath)
- return ERR_PTR(-ENOMEM);
+ list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+ const char *path = tcon->dfs_path + 1;
+ struct cache_entry *ce;
+ struct dfs_info3_param *refs = NULL;
+ int numrefs = 0;
+ bool needs_refresh = false;
+ int rc = 0;
- return npath;
-}
+ list_del_init(&tcon->ulist);
-static inline void put_tcp_server(struct TCP_Server_Info *server)
-{
- cifs_put_tcp_session(server, 0);
-}
+ ses = find_ipc_from_server_path(sessions, path);
+ if (IS_ERR(ses))
+ goto next_tcon;
-static struct TCP_Server_Info *get_tcp_server(struct smb3_fs_context *ctx)
-{
- struct TCP_Server_Info *server;
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = IS_ERR(ce) || cache_entry_expired(ce);
+ up_read(&htable_rw_lock);
- server = cifs_find_tcp_session(ctx);
- if (IS_ERR_OR_NULL(server))
- return NULL;
+ if (!needs_refresh)
+ goto next_tcon;
+
+ xid = get_xid();
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ free_xid(xid);
+
+ /* Create or update a cache entry with the new referral */
+ if (!rc) {
+ down_write(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ if (IS_ERR(ce))
+ add_cache_entry_locked(refs, numrefs);
+ else if (cache_entry_expired(ce))
+ update_cache_entry_locked(ce, refs, numrefs);
+ up_write(&htable_rw_lock);
+ }
- spin_lock(&GlobalMid_Lock);
- if (server->tcpStatus != CifsGood) {
- spin_unlock(&GlobalMid_Lock);
- put_tcp_server(server);
- return NULL;
+next_tcon:
+ free_dfs_info_array(refs, numrefs);
+ cifs_put_tcon(tcon);
}
- spin_unlock(&GlobalMid_Lock);
-
- return server;
}
-/* Find root SMB session out of a DFS link path */
-static struct cifs_ses *find_root_ses(struct vol_info *vi,
- struct cifs_tcon *tcon,
- const char *path)
+static void refresh_cache(struct cifs_ses **sessions)
{
- char *rpath;
- int rc;
- struct cache_entry *ce;
- struct dfs_info3_param ref = {0};
- char *mdata = NULL, *devname = NULL;
- struct TCP_Server_Info *server;
+ int i;
struct cifs_ses *ses;
- struct smb3_fs_context ctx = {NULL};
+ unsigned int xid;
+ char *ref_paths[CACHE_MAX_ENTRIES];
+ int count = 0;
+ struct cache_entry *ce;
- rpath = get_dfs_root(path);
- if (IS_ERR(rpath))
- return ERR_CAST(rpath);
+ /*
+ * Refresh all cached entries. Get all new referrals outside critical section to avoid
+ * starvation while performing SMB2 IOCTL on broken or slow connections.
+ * The cache entries may cover more paths than the active mounts
+ * (e.g. domain-based DFS referrals or multi tier DFS setups).
+ */
down_read(&htable_rw_lock);
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+ struct hlist_head *l = &cache_htable[i];
- ce = lookup_cache_entry(rpath, NULL);
- if (IS_ERR(ce)) {
- up_read(&htable_rw_lock);
- ses = ERR_CAST(ce);
- goto out;
- }
-
- rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
- if (rc) {
- up_read(&htable_rw_lock);
- ses = ERR_PTR(rc);
- goto out;
+ hlist_for_each_entry(ce, l, hlist) {
+ if (count == ARRAY_SIZE(ref_paths))
+ goto out_unlock;
+ if (hlist_unhashed(&ce->hlist) || !cache_entry_expired(ce) ||
+ IS_ERR(find_ipc_from_server_path(sessions, ce->path)))
+ continue;
+ ref_paths[count++] = kstrdup(ce->path, GFP_ATOMIC);
+ }
}
+out_unlock:
up_read(&htable_rw_lock);
- mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
- &devname);
- free_dfs_info_param(&ref);
-
- if (IS_ERR(mdata)) {
- ses = ERR_CAST(mdata);
- mdata = NULL;
- goto out;
- }
-
- rc = cifs_setup_volume_info(&ctx, NULL, devname);
-
- if (rc) {
- ses = ERR_PTR(rc);
- goto out;
- }
-
- server = get_tcp_server(&ctx);
- if (!server) {
- ses = ERR_PTR(-EHOSTDOWN);
- goto out;
- }
-
- ses = cifs_get_smb_ses(server, &ctx);
-
-out:
- smb3_cleanup_fs_context_contents(&ctx);
- kfree(mdata);
- kfree(rpath);
- kfree(devname);
-
- return ses;
-}
-
-/* Refresh DFS cache entry from a given tcon */
-static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
-{
- int rc = 0;
- unsigned int xid;
- const char *path, *npath;
- struct cache_entry *ce;
- struct cifs_ses *root_ses = NULL, *ses;
- struct dfs_info3_param *refs = NULL;
- int numrefs = 0;
-
- xid = get_xid();
-
- path = tcon->dfs_path + 1;
+ for (i = 0; i < count; i++) {
+ char *path = ref_paths[i];
+ struct dfs_info3_param *refs = NULL;
+ int numrefs = 0;
+ int rc = 0;
- rc = get_normalized_path(path, &npath);
- if (rc)
- goto out_free_xid;
-
- down_read(&htable_rw_lock);
-
- ce = lookup_cache_entry(npath, NULL);
- if (IS_ERR(ce)) {
- rc = PTR_ERR(ce);
- up_read(&htable_rw_lock);
- goto out_free_path;
- }
+ if (!path)
+ continue;
- if (!cache_entry_expired(ce)) {
- up_read(&htable_rw_lock);
- goto out_free_path;
- }
+ ses = find_ipc_from_server_path(sessions, path);
+ if (IS_ERR(ses))
+ goto next_referral;
- up_read(&htable_rw_lock);
+ xid = get_xid();
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ free_xid(xid);
- /* If it's a DFS Link, then use root SMB session for refreshing it */
- if (is_dfs_link(npath)) {
- ses = root_ses = find_root_ses(vi, tcon, npath);
- if (IS_ERR(ses)) {
- rc = PTR_ERR(ses);
- root_ses = NULL;
- goto out_free_path;
+ if (!rc) {
+ down_write(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ /*
+ * We need to re-check it because other tasks might have it deleted or
+ * updated.
+ */
+ if (!IS_ERR(ce) && cache_entry_expired(ce))
+ update_cache_entry_locked(ce, refs, numrefs);
+ up_write(&htable_rw_lock);
}
- } else {
- ses = tcon->ses;
- }
- rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
- &numrefs);
- if (!rc) {
- dump_refs(refs, numrefs);
- rc = update_cache_entry(npath, refs, numrefs);
+next_referral:
+ kfree(path);
free_dfs_info_array(refs, numrefs);
}
-
- if (root_ses)
- cifs_put_smb_ses(root_ses);
-
-out_free_path:
- free_normalized_path(path, npath);
-
-out_free_xid:
- free_xid(xid);
- return rc;
}
/*
- * Worker that will refresh DFS cache based on lowest TTL value from a DFS
+ * Worker that will refresh DFS cache and active mounts based on lowest TTL value from a DFS
* referral.
*/
static void refresh_cache_worker(struct work_struct *work)
{
- struct vol_info *vi, *nvi;
- struct TCP_Server_Info *server;
- LIST_HEAD(vols);
- LIST_HEAD(tcons);
- struct cifs_tcon *tcon, *ntcon;
- int rc;
-
- /*
- * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
- * for refreshing.
- */
- spin_lock(&vol_list_lock);
- list_for_each_entry(vi, &vol_list, list) {
- server = get_tcp_server(&vi->ctx);
- if (!server)
- continue;
-
- kref_get(&vi->refcnt);
- list_add_tail(&vi->rlist, &vols);
- put_tcp_server(server);
+ struct list_head mglist;
+ struct mount_group *mg, *tmp_mg;
+ struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
+ int max_sessions = ARRAY_SIZE(sessions) - 1;
+ int i = 0, count;
+
+ INIT_LIST_HEAD(&mglist);
+
+ /* Get refereces of mount groups */
+ mutex_lock(&mount_group_list_lock);
+ list_for_each_entry(mg, &mount_group_list, list) {
+ kref_get(&mg->refcount);
+ list_add(&mg->refresh_list, &mglist);
}
- spin_unlock(&vol_list_lock);
-
- /* Walk through all TCONs and refresh any expired cache entry */
- list_for_each_entry_safe(vi, nvi, &vols, rlist) {
- spin_lock(&vi->ctx_lock);
- server = get_tcp_server(&vi->ctx);
- spin_unlock(&vi->ctx_lock);
+ mutex_unlock(&mount_group_list_lock);
- if (!server)
- goto next_vol;
-
- get_tcons(server, &tcons);
- rc = 0;
-
- list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
- /*
- * Skip tcp server if any of its tcons failed to refresh
- * (possibily due to reconnects).
- */
- if (!rc)
- rc = refresh_tcon(vi, tcon);
+ /* Fill in local array with an NULL-terminated list of all referral server sessions */
+ list_for_each_entry(mg, &mglist, refresh_list) {
+ if (i >= max_sessions)
+ break;
- list_del_init(&tcon->ulist);
- cifs_put_tcon(tcon);
- }
+ spin_lock(&mg->lock);
+ if (i + mg->num_sessions > max_sessions)
+ count = max_sessions - i;
+ else
+ count = mg->num_sessions;
+ memcpy(&sessions[i], mg->sessions, count * sizeof(mg->sessions[0]));
+ spin_unlock(&mg->lock);
+ i += count;
+ }
- put_tcp_server(server);
+ if (sessions[0]) {
+ /* Refresh all active mounts and cached entries */
+ refresh_mounts(sessions);
+ refresh_cache(sessions);
+ }
-next_vol:
- list_del_init(&vi->rlist);
- kref_put(&vi->refcnt, vol_release);
+ list_for_each_entry_safe(mg, tmp_mg, &mglist, refresh_list) {
+ list_del_init(&mg->refresh_list);
+ kref_put(&mg->refcount, mount_group_release);
}
spin_lock(&cache_ttl_lock);
diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
index 1afc4f590c47..b29d3ae64829 100644
--- a/fs/cifs/dfs_cache.h
+++ b/fs/cifs/dfs_cache.h
@@ -10,6 +10,7 @@
#include <linux/nls.h>
#include <linux/list.h>
+#include <linux/uuid.h>
#include "cifsglob.h"
struct dfs_cache_tgt_list {
@@ -23,34 +24,26 @@ struct dfs_cache_tgt_iterator {
struct list_head it_list;
};
-extern int dfs_cache_init(void);
-extern void dfs_cache_destroy(void);
+int dfs_cache_init(void);
+void dfs_cache_destroy(void);
extern const struct proc_ops dfscache_proc_ops;
-extern int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_codepage, int remap,
- const char *path, struct dfs_info3_param *ref,
- struct dfs_cache_tgt_list *tgt_list);
-extern int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
- struct dfs_cache_tgt_list *tgt_list);
-extern int dfs_cache_update_tgthint(const unsigned int xid,
- struct cifs_ses *ses,
- const struct nls_table *nls_codepage,
- int remap, const char *path,
- const struct dfs_cache_tgt_iterator *it);
-extern int
-dfs_cache_noreq_update_tgthint(const char *path,
- const struct dfs_cache_tgt_iterator *it);
-extern int dfs_cache_get_tgt_referral(const char *path,
- const struct dfs_cache_tgt_iterator *it,
- struct dfs_info3_param *ref);
-extern int dfs_cache_add_vol(char *mntdata, struct smb3_fs_context *ctx,
- const char *fullpath);
-extern int dfs_cache_update_vol(const char *fullpath,
- struct TCP_Server_Info *server);
-extern void dfs_cache_del_vol(const char *fullpath);
-extern int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
- char **share, char **prefix);
+int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
+ int remap, const char *path, struct dfs_info3_param *ref,
+ struct dfs_cache_tgt_list *tgt_list);
+int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
+ struct dfs_cache_tgt_list *tgt_list);
+int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
+ const struct nls_table *cp, int remap, const char *path,
+ const struct dfs_cache_tgt_iterator *it);
+int dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it);
+int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
+ struct dfs_info3_param *ref);
+int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
+ char **prefix);
+void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
+void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
+char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
static inline struct dfs_cache_tgt_iterator *
dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 6bcd3e8f7cda..79402ca0ddfa 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/dir.c
*
@@ -6,19 +7,6 @@
* Copyright (C) International Business Machines Corp., 2002,2009
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
@@ -396,10 +384,11 @@ cifs_create_set_dentry:
goto out_err;
}
- if (S_ISDIR(newinode->i_mode)) {
- rc = -EISDIR;
- goto out_err;
- }
+ if (newinode)
+ if (S_ISDIR(newinode->i_mode)) {
+ rc = -EISDIR;
+ goto out_err;
+ }
d_drop(direntry);
d_add(direntry, newinode);
@@ -630,6 +619,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
struct inode *newInode = NULL;
const char *full_path;
void *page;
+ int retry_count = 0;
xid = get_xid();
@@ -673,6 +663,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
cifs_dbg(FYI, "Full path: %s inode = 0x%p\n",
full_path, d_inode(direntry));
+again:
if (pTcon->posix_extensions)
rc = smb311_posix_get_inode_info(&newInode, full_path, parent_dir_inode->i_sb, xid);
else if (pTcon->unix_ext) {
@@ -687,6 +678,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
/* since paths are not looked up by component - the parent
directories are presumed to be good here */
renew_parental_timestamps(direntry);
+ } else if (rc == -EAGAIN && retry_count++ < 10) {
+ goto again;
} else if (rc == -ENOENT) {
cifs_set_time(direntry, jiffies);
newInode = NULL;
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 534cbba72789..d15b82d569ef 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/dns_resolve.c
*
@@ -10,19 +11,6 @@
* Contains the CIFS DFS upcall routines used for hostname to
* IP address translation.
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index d3f5d27f4d06..5be060b82b13 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
* Handles host name to IP address resolution
@@ -5,19 +6,6 @@
* Copyright (c) International Business Machines Corp., 2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _DNS_RESOLVE_H
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index eb0bb8ca8e63..747a540db954 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/export.c
*
@@ -8,19 +9,6 @@
*
* Operations related to support for exporting files via NFSD
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 379a427f3c2f..cd108607a070 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/file.c
*
@@ -7,19 +8,6 @@
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/backing-dev.h>
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 20d24af33ee2..dd625033cd6b 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/fscache.c - CIFS filesystem cache interface
*
* Copyright (c) 2010 Novell, Inc.
* Author(s): Suresh Jayaraman <sjayaraman@suse.de>
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "fscache.h"
#include "cifsglob.h"
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index e811f2dd7619..3d55cb2ef055 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/fscache.h - CIFS filesystem cache interface definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFS_FSCACHE_H
#define _CIFS_FSCACHE_H
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 1dfa57982522..b96b253e7635 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/inode.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
@@ -367,9 +355,12 @@ cifs_get_file_info_unix(struct file *filp)
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, inode->i_sb);
rc = 0;
- }
+ } else
+ goto cifs_gfiunix_out;
rc = cifs_fattr_to_inode(inode, &fattr);
+
+cifs_gfiunix_out:
free_xid(xid);
return rc;
}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 28ec8d7c521a..42c6a0bac6c8 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/ioctl.c
*
@@ -6,19 +7,6 @@
* Copyright (C) International Business Machines Corp., 2005,2013
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
@@ -33,6 +21,7 @@
#include "cifsfs.h"
#include "cifs_ioctl.h"
#include "smb2proto.h"
+#include "smb2glob.h"
#include <linux/btrfs.h>
static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
@@ -214,48 +203,112 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
return 0;
}
-static int cifs_dump_full_key(struct cifs_tcon *tcon, unsigned long arg)
+static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
{
- struct smb3_full_key_debug_info pfull_key_inf;
- __u64 suid;
- struct list_head *tmp;
+ struct smb3_full_key_debug_info out;
struct cifs_ses *ses;
+ int rc = 0;
bool found = false;
+ u8 __user *end;
- if (!smb3_encryption_required(tcon))
- return -EOPNOTSUPP;
+ if (!smb3_encryption_required(tcon)) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* copy user input into our output buffer */
+ if (copy_from_user(&out, in, sizeof(out))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!out.session_id) {
+ /* if ses id is 0, use current user session */
+ ses = tcon->ses;
+ } else {
+ /* otherwise if a session id is given, look for it in all our sessions */
+ struct cifs_ses *ses_it = NULL;
+ struct TCP_Server_Info *server_it = NULL;
- ses = tcon->ses; /* default to user id for current user */
- if (get_user(suid, (__u64 __user *)arg))
- suid = 0;
- if (suid) {
- /* search to see if there is a session with a matching SMB UID */
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &tcon->ses->server->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
- if (ses->Suid == suid) {
- found = true;
- break;
+ list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
+ if (ses_it->Suid == out.session_id) {
+ ses = ses_it;
+ /*
+ * since we are using the session outside the crit
+ * section, we need to make sure it won't be released
+ * so increment its refcount
+ */
+ ses->ses_count++;
+ found = true;
+ goto search_end;
+ }
}
}
+search_end:
spin_unlock(&cifs_tcp_ses_lock);
- if (found == false)
- return -EINVAL;
- } /* else uses default user's SMB UID (ie current user) */
-
- pfull_key_inf.cipher_type = le16_to_cpu(ses->server->cipher_type);
- pfull_key_inf.Suid = ses->Suid;
- memcpy(pfull_key_inf.auth_key, ses->auth_key.response,
- 16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
- memcpy(pfull_key_inf.smb3decryptionkey, ses->smb3decryptionkey,
- 32 /* SMB3_ENC_DEC_KEY_SIZE */);
- memcpy(pfull_key_inf.smb3encryptionkey,
- ses->smb3encryptionkey, 32 /* SMB3_ENC_DEC_KEY_SIZE */);
- if (copy_to_user((void __user *)arg, &pfull_key_inf,
- sizeof(struct smb3_full_key_debug_info)))
- return -EFAULT;
+ if (!found) {
+ rc = -ENOENT;
+ goto out;
+ }
+ }
- return 0;
+ switch (ses->server->cipher_type) {
+ case SMB2_ENCRYPTION_AES128_CCM:
+ case SMB2_ENCRYPTION_AES128_GCM:
+ out.session_key_length = CIFS_SESS_KEY_SIZE;
+ out.server_in_key_length = out.server_out_key_length = SMB3_GCM128_CRYPTKEY_SIZE;
+ break;
+ case SMB2_ENCRYPTION_AES256_CCM:
+ case SMB2_ENCRYPTION_AES256_GCM:
+ out.session_key_length = CIFS_SESS_KEY_SIZE;
+ out.server_in_key_length = out.server_out_key_length = SMB3_GCM256_CRYPTKEY_SIZE;
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* check if user buffer is big enough to store all the keys */
+ if (out.in_size < sizeof(out) + out.session_key_length + out.server_in_key_length
+ + out.server_out_key_length) {
+ rc = -ENOBUFS;
+ goto out;
+ }
+
+ out.session_id = ses->Suid;
+ out.cipher_type = le16_to_cpu(ses->server->cipher_type);
+
+ /* overwrite user input with our output */
+ if (copy_to_user(in, &out, sizeof(out))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* append all the keys at the end of the user buffer */
+ end = in->data;
+ if (copy_to_user(end, ses->auth_key.response, out.session_key_length)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ end += out.session_key_length;
+
+ if (copy_to_user(end, ses->smb3encryptionkey, out.server_in_key_length)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ end += out.server_in_key_length;
+
+ if (copy_to_user(end, ses->smb3decryptionkey, out.server_out_key_length)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ if (found)
+ cifs_put_smb_ses(ses);
+ return rc;
}
long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
@@ -371,6 +424,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
rc = -EOPNOTSUPP;
break;
case CIFS_DUMP_KEY:
+ /*
+ * Dump encryption keys. This is an old ioctl that only
+ * handles AES-128-{CCM,GCM}.
+ */
if (pSMBFile == NULL)
break;
if (!capable(CAP_SYS_ADMIN)) {
@@ -398,11 +455,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
else
rc = 0;
break;
- /*
- * Dump full key (32 bytes instead of 16 bytes) is
- * needed if GCM256 (stronger encryption) negotiated
- */
case CIFS_DUMP_FULL_KEY:
+ /*
+ * Dump encryption keys (handles any key sizes)
+ */
if (pSMBFile == NULL)
break;
if (!capable(CAP_SYS_ADMIN)) {
@@ -410,8 +466,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
break;
}
tcon = tlink_tcon(pSMBFile->tlink);
- rc = cifs_dump_full_key(tcon, arg);
-
+ rc = cifs_dump_full_key(tcon, (void __user *)arg);
break;
case CIFS_IOC_NOTIFY:
if (!S_ISDIR(inode->i_mode)) {
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 970fcf2adb08..f0a6d63bc08c 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/link.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 7207a63819cb..184138b4eb8c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
diff --git a/fs/cifs/netlink.c b/fs/cifs/netlink.c
index 5aaabe4cc0a7..291cb606f149 100644
--- a/fs/cifs/netlink.c
+++ b/fs/cifs/netlink.c
@@ -30,7 +30,7 @@ static const struct nla_policy cifs_genl_policy[CIFS_GENL_ATTR_MAX + 1] = {
[CIFS_GENL_ATTR_SWN_RESOURCE_NAME] = { .type = NLA_STRING},
};
-static struct genl_ops cifs_genl_ops[] = {
+static const struct genl_ops cifs_genl_ops[] = {
{
.cmd = CIFS_GENL_CMD_SWN_NOTIFY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 3079b38f0afb..378133ce8869 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/ntlmssp.h
*
* Copyright (c) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define NTLMSSP_SIGNATURE "NTLMSSP"
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 63bfc533c9fb..bfee176b901d 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/readdir.c
*
@@ -7,19 +8,6 @@
* Copyright (C) Red Hat, Inc., 2011
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
@@ -321,7 +309,7 @@ static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
{
__dir_info_to_fattr(fattr, info);
- /* See MS-FSCC 2.4.18 FileIdFullDirectoryInformation */
+ /* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
if (fattr->cf_cifsattrs & ATTR_REPARSE)
fattr->cf_cifstag = le32_to_cpu(info->EaSize);
cifs_fill_common_info(fattr, cifs_sb);
diff --git a/fs/cifs/rfc1002pdu.h b/fs/cifs/rfc1002pdu.h
index 8b69fcceb597..137f7c95afd6 100644
--- a/fs/cifs/rfc1002pdu.h
+++ b/fs/cifs/rfc1002pdu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/rfc1002pdu.h
*
@@ -6,19 +7,6 @@
* Copyright (c) International Business Machines Corp., 2004
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* NB: unlike smb/cifs packets, the RFC1002 structures are big endian */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index a92a1fb7cb52..c5785fd3f52e 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/sess.c
*
@@ -6,19 +7,6 @@
* Copyright (c) International Business Machines Corp., 2006, 2009
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "cifspdu.h"
@@ -195,7 +183,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
ses, iface->speed, iface->rdma_capable ? "yes" : "no",
&ipv4->sin_addr);
else
- cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI4)\n",
+ cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI6)\n",
ses, iface->speed, iface->rdma_capable ? "yes" : "no",
&ipv6->sin6_addr);
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 2fa3ba354cc9..c9d8a50062b8 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/smb2file.c
*
@@ -5,19 +6,6 @@
* Author(s): Steve French (sfrench@us.ibm.com),
* Pavel Shilovsky ((pshilovsky@samba.org) 2012
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index d9a990c99121..d0e9f3782bd9 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/smb2glob.h
*
@@ -9,16 +10,6 @@
* Jeremy Allison (jra@samba.org)
* Pavel Shilovsky (pshilovsky@samba.org) 2012
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
*/
#ifndef _SMB2_GLOB_H
#define _SMB2_GLOB_H
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 9a61209a283e..957b2594f02e 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/smb2inode.c
*
@@ -6,19 +7,6 @@
* Author(s): Pavel Shilovsky (pshilovsky@samba.org),
* Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index c775682ee973..cea39bcecbab 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/smb2/smb2maperror.c
*
@@ -6,19 +7,6 @@
* Copyright (C) International Business Machines Corp., 2009
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/errno.h>
#include "cifsglob.h"
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 06d555d4da9a..668f77108831 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/smb2misc.c
*
@@ -6,19 +7,6 @@
* Author(s): Steve French (sfrench@us.ibm.com)
* Pavel Shilovsky (pshilovsky@samba.org) 2012
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/ctype.h>
#include "smb2pdu.h"
@@ -164,19 +152,16 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
struct smb2_transform_hdr *thdr =
(struct smb2_transform_hdr *)buf;
struct cifs_ses *ses = NULL;
- struct list_head *tmp;
/* decrypt frame now that it is completely read in */
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &srvr->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+ list_for_each_entry(ses, &srvr->smb_ses_list, smb_ses_list) {
if (ses->Suid == thdr->SessionId)
break;
-
- ses = NULL;
}
spin_unlock(&cifs_tcp_ses_lock);
- if (ses == NULL) {
+ if (list_entry_is_head(ses, &srvr->smb_ses_list,
+ smb_ses_list)) {
cifs_dbg(VFS, "no decryption - session id not found\n");
return 1;
}
@@ -548,7 +533,6 @@ static bool
smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
{
__u8 lease_state;
- struct list_head *tmp;
struct cifsFileInfo *cfile;
struct cifsInodeInfo *cinode;
int ack_req = le32_to_cpu(rsp->Flags &
@@ -556,8 +540,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
lease_state = le32_to_cpu(rsp->NewLeaseState);
- list_for_each(tmp, &tcon->openFileList) {
- cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+ list_for_each_entry(cfile, &tcon->openFileList, tlist) {
cinode = CIFS_I(d_inode(cfile->dentry));
if (memcmp(cinode->lease_key, rsp->LeaseKey,
@@ -618,7 +601,6 @@ static bool
smb2_is_valid_lease_break(char *buffer)
{
struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
- struct list_head *tmp, *tmp1, *tmp2;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
@@ -628,15 +610,9 @@ smb2_is_valid_lease_break(char *buffer)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &cifs_tcp_ses_list) {
- server = list_entry(tmp, struct TCP_Server_Info, tcp_ses_list);
-
- list_for_each(tmp1, &server->smb_ses_list) {
- ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
-
- list_for_each(tmp2, &ses->tcon_list) {
- tcon = list_entry(tmp2, struct cifs_tcon,
- tcon_list);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
cifs_stats_inc(
&tcon->stats.cifs_stats.num_oplock_brks);
@@ -687,7 +663,6 @@ bool
smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
{
struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer;
- struct list_head *tmp, *tmp1, *tmp2;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct cifsInodeInfo *cinode;
@@ -710,16 +685,11 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
-
- list_for_each(tmp1, &ses->tcon_list) {
- tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
- list_for_each(tmp2, &tcon->openFileList) {
- cfile = list_entry(tmp2, struct cifsFileInfo,
- tlist);
+ list_for_each_entry(cfile, &tcon->openFileList, tlist) {
if (rsp->PersistentFid !=
cfile->fid.persistent_fid ||
rsp->VolatileFid !=
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 21ef51d338e0..e4c8f603dd58 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -388,7 +388,9 @@ smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
{
int rc;
+ spin_lock(&GlobalMid_Lock);
cifs_ses_server(ses)->CurrentMid = 0;
+ spin_unlock(&GlobalMid_Lock);
rc = SMB2_negotiate(xid, ses);
/* BB we probably don't need to retry with modern servers */
if (rc == -EAGAIN)
@@ -2325,6 +2327,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_query_directory_rsp *qd_rsp = NULL;
struct smb2_create_rsp *op_rsp = NULL;
struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+ int retry_count = 0;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
@@ -2372,10 +2375,14 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
smb2_set_related(&rqst[1]);
+again:
rc = compound_send_recv(xid, tcon->ses, server,
flags, 2, rqst,
resp_buftype, rsp_iov);
+ if (rc == -EAGAIN && retry_count++ < 10)
+ goto again;
+
/* If the open failed there is nothing to do */
op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
@@ -3601,6 +3608,119 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
return rc;
}
+static int smb3_simple_fallocate_write_range(unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile,
+ loff_t off, loff_t len,
+ char *buf)
+{
+ struct cifs_io_parms io_parms = {0};
+ int nbytes;
+ struct kvec iov[2];
+
+ io_parms.netfid = cfile->fid.netfid;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = tcon;
+ io_parms.persistent_fid = cfile->fid.persistent_fid;
+ io_parms.volatile_fid = cfile->fid.volatile_fid;
+ io_parms.offset = off;
+ io_parms.length = len;
+
+ /* iov[0] is reserved for smb header */
+ iov[1].iov_base = buf;
+ iov[1].iov_len = io_parms.length;
+ return SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+}
+
+static int smb3_simple_fallocate_range(unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile,
+ loff_t off, loff_t len)
+{
+ struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
+ u32 out_data_len;
+ char *buf = NULL;
+ loff_t l;
+ int rc;
+
+ in_data.file_offset = cpu_to_le64(off);
+ in_data.length = cpu_to_le64(len);
+ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid,
+ FSCTL_QUERY_ALLOCATED_RANGES, true,
+ (char *)&in_data, sizeof(in_data),
+ 1024 * sizeof(struct file_allocated_range_buffer),
+ (char **)&out_data, &out_data_len);
+ if (rc)
+ goto out;
+ /*
+ * It is already all allocated
+ */
+ if (out_data_len == 0)
+ goto out;
+
+ buf = kzalloc(1024 * 1024, GFP_KERNEL);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ tmp_data = out_data;
+ while (len) {
+ /*
+ * The rest of the region is unmapped so write it all.
+ */
+ if (out_data_len == 0) {
+ rc = smb3_simple_fallocate_write_range(xid, tcon,
+ cfile, off, len, buf);
+ goto out;
+ }
+
+ if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (off < le64_to_cpu(tmp_data->file_offset)) {
+ /*
+ * We are at a hole. Write until the end of the region
+ * or until the next allocated data,
+ * whichever comes next.
+ */
+ l = le64_to_cpu(tmp_data->file_offset) - off;
+ if (len < l)
+ l = len;
+ rc = smb3_simple_fallocate_write_range(xid, tcon,
+ cfile, off, l, buf);
+ if (rc)
+ goto out;
+ off = off + l;
+ len = len - l;
+ if (len == 0)
+ goto out;
+ }
+ /*
+ * We are at a section of allocated data, just skip forward
+ * until the end of the data or the end of the region
+ * we are supposed to fallocate, whichever comes first.
+ */
+ l = le64_to_cpu(tmp_data->length);
+ if (len < l)
+ l = len;
+ off += l;
+ len -= l;
+
+ tmp_data = &tmp_data[1];
+ out_data_len -= sizeof(struct file_allocated_range_buffer);
+ }
+
+ out:
+ kfree(out_data);
+ kfree(buf);
+ return rc;
+}
+
+
static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
loff_t off, loff_t len, bool keep_size)
{
@@ -3662,6 +3782,26 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
/*
+ * At this point, we are trying to fallocate an internal
+ * regions of a sparse file. Since smb2 does not have a
+ * fallocate command we have two otions on how to emulate this.
+ * We can either turn the entire file to become non-sparse
+ * which we only do if the fallocate is for virtually
+ * the whole file, or we can overwrite the region with zeroes
+ * using SMB2_write, which could be prohibitevly expensive
+ * if len is large.
+ */
+ /*
+ * We are only trying to fallocate a small region so
+ * just write it with zero.
+ */
+ if (len <= 1024 * 1024) {
+ rc = smb3_simple_fallocate_range(xid, tcon, cfile,
+ off, len);
+ goto out;
+ }
+
+ /*
* Check if falloc starts within first few pages of file
* and ends within a few pages of the end of file to
* ensure that most of file is being forced to be
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 9f24eb88297a..962826dc3316 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/smb2pdu.c
*
@@ -8,19 +9,6 @@
*
* Contains the routines for constructing the SMB2 PDUs themselves
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
@@ -958,6 +946,13 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
/* Internal types */
server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
+ /*
+ * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
+ * Set the cipher type manually.
+ */
+ if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
+
security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
(struct smb2_sync_hdr *)rsp);
/*
@@ -1784,10 +1779,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
if (rc != 0) {
- if (tcon) {
- cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
- tcon->need_reconnect = true;
- }
+ cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
+ tcon->need_reconnect = true;
goto tcon_error_exit;
}
@@ -2899,7 +2892,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
#endif /* CIFS_DEBUG2 */
if (buf) {
- memcpy(buf, &rsp->CreationTime, 32);
+ buf->CreationTime = rsp->CreationTime;
+ buf->LastAccessTime = rsp->LastAccessTime;
+ buf->LastWriteTime = rsp->LastWriteTime;
+ buf->ChangeTime = rsp->ChangeTime;
buf->AllocationSize = rsp->AllocationSize;
buf->EndOfFile = rsp->EndofFile;
buf->Attributes = rsp->FileAttributes;
@@ -3477,6 +3473,8 @@ int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
NULL);
}
+#if 0
+/* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
int
SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
@@ -3488,7 +3486,9 @@ SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
return query_info(xid, tcon, persistent_fid, volatile_fid,
SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
+ /* Note caller must free "data" (passed in above). It may be allocated in query_info call */
}
+#endif
int
SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
@@ -4491,7 +4491,7 @@ int posix_info_parse(const void *beg, const void *end,
{
int total_len = 0;
- int sid_len;
+ int owner_len, group_len;
int name_len;
const void *owner_sid;
const void *group_sid;
@@ -4514,17 +4514,17 @@ int posix_info_parse(const void *beg, const void *end,
/* check owner sid */
owner_sid = beg + total_len;
- sid_len = posix_info_sid_size(owner_sid, end);
- if (sid_len < 0)
+ owner_len = posix_info_sid_size(owner_sid, end);
+ if (owner_len < 0)
return -1;
- total_len += sid_len;
+ total_len += owner_len;
/* check group sid */
group_sid = beg + total_len;
- sid_len = posix_info_sid_size(group_sid, end);
- if (sid_len < 0)
+ group_len = posix_info_sid_size(group_sid, end);
+ if (group_len < 0)
return -1;
- total_len += sid_len;
+ total_len += group_len;
/* check name len */
if (beg + total_len + 4 > end)
@@ -4545,10 +4545,8 @@ int posix_info_parse(const void *beg, const void *end,
out->size = total_len;
out->name_len = name_len;
out->name = name;
- memcpy(&out->owner, owner_sid,
- posix_info_sid_size(owner_sid, end));
- memcpy(&out->group, group_sid,
- posix_info_sid_size(group_sid, end));
+ memcpy(&out->owner, owner_sid, owner_len);
+ memcpy(&out->group, group_sid, group_len);
}
return total_len;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6442dc1c292b..a5c48b85549a 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/smb2pdu.h
*
@@ -6,19 +7,6 @@
* Author(s): Steve French (sfrench@us.ibm.com)
* Pavel Shilovsky (pshilovsky@samba.org) 2012
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _SMB2PDU_H
@@ -276,7 +264,7 @@ struct share_redirect_error_context_rsp {
__le32 NotificationType;
__le32 ResourceNameOffset;
__le32 ResourceNameLength;
- __le16 Flags;
+ __le16 Reserved;
__le16 TargetType;
__le32 IPAddrCount;
struct move_dst_ipaddr IpAddrMoveList[];
@@ -1460,6 +1448,22 @@ struct smb2_echo_rsp {
#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
+/*
+ * Valid FileInformation classes.
+ *
+ * Note that these are a subset of the (file) QUERY_INFO levels defined
+ * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
+ * we do not redefine them here)
+ *
+ * FileDirectoryInfomation 0x01
+ * FileFullDirectoryInformation 0x02
+ * FileIdFullDirectoryInformation 0x26
+ * FileBothDirectoryInformation 0x03
+ * FileIdBothDirectoryInformation 0x25
+ * FileNamesInformation 0x0C
+ * FileIdExtdDirectoryInformation 0x3C
+ */
+
struct smb2_query_directory_req {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
@@ -1696,6 +1700,7 @@ struct smb3_fs_vol_info {
#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
#define FILE_STANDARD_LINK_INFORMATION 54
#define FILE_ID_INFORMATION 59
+#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60
struct smb2_file_internal_info {
__le64 IndexNumber;
@@ -1776,13 +1781,31 @@ struct smb2_file_network_open_info {
__le32 Reserved;
} __packed; /* level 34 Query also similar returned in close rsp and open rsp */
-/* See MS-FSCC 2.4.43 */
+/* See MS-FSCC 2.4.21 */
struct smb2_file_id_information {
__le64 VolumeSerialNumber;
__u64 PersistentFileId; /* opaque endianness */
__u64 VolatileFileId; /* opaque endianness */
} __packed; /* level 59 */
+/* See MS-FSCC 2.4.18 */
+struct smb2_file_id_extd_directory_info {
+ __le32 NextEntryOffset;
+ __u32 FileIndex;
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le64 EndOfFile;
+ __le64 AllocationSize;
+ __le32 FileAttributes;
+ __le32 FileNameLength;
+ __le32 EaSize; /* EA size */
+ __le32 ReparsePointTag; /* valid if FILE_ATTR_REPARSE_POINT set in FileAttributes */
+ __le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit */
+ char FileName[1];
+} __packed; /* level 60 */
+
extern char smb2_padding[7];
/* equivalent of the contents of SMB3.1.1 POSIX open context response */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index a5f87b02cfaf..263767f644f8 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/smb2proto.h
*
@@ -6,19 +7,6 @@
* Author(s): Steve French (sfrench@us.ibm.com)
* Pavel Shilovsky (pshilovsky@samba.org) 2012
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _SMB2PROTO_H
#define _SMB2PROTO_H
@@ -64,8 +52,6 @@ extern void smb2_echo_request(struct work_struct *work);
extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode);
extern bool smb2_is_valid_oplock_break(char *buffer,
struct TCP_Server_Info *srv);
-extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
- __u64 ses_id);
extern int smb3_handle_read_data(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
diff --git a/fs/cifs/smb2status.h b/fs/cifs/smb2status.h
index 7505056e9580..0215ef36e240 100644
--- a/fs/cifs/smb2status.h
+++ b/fs/cifs/smb2status.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/smb2status.h
*
@@ -7,19 +8,6 @@
* Copyright (c) International Business Machines Corp., 2009,2011
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index e6fa76ab70be..6f7952ea4941 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/smb2transport.c
*
@@ -7,19 +8,6 @@
* Jeremy Allison (jra@samba.org) 2006
* Pavel Shilovsky (pshilovsky@samba.org) 2012
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
@@ -154,6 +142,7 @@ smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->Suid != ses_id)
continue;
+ ++ses->ses_count;
return ses;
}
@@ -205,7 +194,14 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
return NULL;
}
tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+ if (!tcon) {
+ cifs_put_smb_ses(ses);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return NULL;
+ }
spin_unlock(&cifs_tcp_ses_lock);
+ /* tcon already has a ref to ses, so we don't need ses anymore */
+ cifs_put_smb_ses(ses);
return tcon;
}
@@ -239,7 +235,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
if (rc) {
cifs_server_dbg(VFS,
"%s: sha256 alloc failed\n", __func__);
- return rc;
+ goto out;
}
shash = &sdesc->shash;
} else {
@@ -290,6 +286,8 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
out:
if (allocate_crypto)
cifs_free_hash(&hash, &sdesc);
+ if (ses)
+ cifs_put_smb_ses(ses);
return rc;
}
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 10dfe5006792..31ef64eb7fbb 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -572,8 +572,13 @@ static struct rdma_cm_id *smbd_create_id(
log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
goto out;
}
- wait_for_completion_interruptible_timeout(
+ rc = wait_for_completion_interruptible_timeout(
&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ /* e.g. if interrupted returns -ERESTARTSYS */
+ if (rc < 0) {
+ log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
+ goto out;
+ }
rc = info->ri_rc;
if (rc) {
log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
@@ -586,8 +591,13 @@ static struct rdma_cm_id *smbd_create_id(
log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
goto out;
}
- wait_for_completion_interruptible_timeout(
+ rc = wait_for_completion_interruptible_timeout(
&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ /* e.g. if interrupted returns -ERESTARTSYS */
+ if (rc < 0) {
+ log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
+ goto out;
+ }
rc = info->ri_rc;
if (rc) {
log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index 7f16cb825fe5..60189efb3236 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/smberr.h
*
@@ -7,19 +8,6 @@
* See Error Codes section of the SNIA CIFS Specification
* for more information
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define SUCCESS 0x00 /* The request was successful. */
diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
index a0e84747f567..d0fc42061f49 100644
--- a/fs/cifs/smbfsctl.h
+++ b/fs/cifs/smbfsctl.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
*
* Copyright (c) International Business Machines Corp., 2002,2013
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* IOCTL information */
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index d6df908dccad..dafcb6ab050d 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -12,6 +12,11 @@
#include <linux/tracepoint.h>
+/*
+ * Please use this 3-part article as a reference for writing new tracepoints:
+ * https://lwn.net/Articles/379903/
+ */
+
/* For logging errors in read or write */
DECLARE_EVENT_CLASS(smb3_rw_err_class,
TP_PROTO(unsigned int xid,
@@ -529,16 +534,16 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class,
TP_ARGS(xid, func_name, rc),
TP_STRUCT__entry(
__field(unsigned int, xid)
- __field(const char *, func_name)
+ __string(func_name, func_name)
__field(int, rc)
),
TP_fast_assign(
__entry->xid = xid;
- __entry->func_name = func_name;
+ __assign_str(func_name, func_name);
__entry->rc = rc;
),
TP_printk("\t%s: xid=%u rc=%d",
- __entry->func_name, __entry->xid, __entry->rc)
+ __get_str(func_name), __entry->xid, __entry->rc)
)
#define DEFINE_SMB3_EXIT_ERR_EVENT(name) \
@@ -583,14 +588,14 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class,
TP_ARGS(xid, func_name),
TP_STRUCT__entry(
__field(unsigned int, xid)
- __field(const char *, func_name)
+ __string(func_name, func_name)
),
TP_fast_assign(
__entry->xid = xid;
- __entry->func_name = func_name;
+ __assign_str(func_name, func_name);
),
TP_printk("\t%s: xid=%u",
- __entry->func_name, __entry->xid)
+ __get_str(func_name), __entry->xid)
)
#define DEFINE_SMB3_ENTER_EXIT_EVENT(name) \
@@ -857,16 +862,16 @@ DECLARE_EVENT_CLASS(smb3_reconnect_class,
TP_STRUCT__entry(
__field(__u64, currmid)
__field(__u64, conn_id)
- __field(char *, hostname)
+ __string(hostname, hostname)
),
TP_fast_assign(
__entry->currmid = currmid;
__entry->conn_id = conn_id;
- __entry->hostname = hostname;
+ __assign_str(hostname, hostname);
),
TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
__entry->conn_id,
- __entry->hostname,
+ __get_str(hostname),
__entry->currmid)
)
@@ -891,7 +896,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
TP_STRUCT__entry(
__field(__u64, currmid)
__field(__u64, conn_id)
- __field(char *, hostname)
+ __string(hostname, hostname)
__field(int, credits)
__field(int, credits_to_add)
__field(int, in_flight)
@@ -899,7 +904,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
TP_fast_assign(
__entry->currmid = currmid;
__entry->conn_id = conn_id;
- __entry->hostname = hostname;
+ __assign_str(hostname, hostname);
__entry->credits = credits;
__entry->credits_to_add = credits_to_add;
__entry->in_flight = in_flight;
@@ -907,7 +912,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
"credits=%d credit_change=%d in_flight=%d",
__entry->conn_id,
- __entry->hostname,
+ __get_str(hostname),
__entry->currmid,
__entry->credits,
__entry->credits_to_add,
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index c1725b55f364..f65f9a692ca2 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/transport.c
*
@@ -5,19 +6,6 @@
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org) 2006.
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index aa3e8ca0457c..9ed481e79ce0 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* fs/cifs/xattr.c
*
* Copyright (c) International Business Machines Corp., 2003, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index eb5ec3e46283..b601610e9907 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -28,12 +28,6 @@
static struct lock_class_key default_group_class[MAX_LOCK_DEPTH];
#endif
-static const struct address_space_operations configfs_aops = {
- .readpage = simple_readpage,
- .write_begin = simple_write_begin,
- .write_end = simple_write_end,
-};
-
static const struct inode_operations configfs_inode_operations ={
.setattr = configfs_setattr,
};
@@ -114,7 +108,7 @@ struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
struct inode * inode = new_inode(s);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_mapping->a_ops = &configfs_aops;
+ inode->i_mapping->a_ops = &ram_aops;
inode->i_op = &configfs_inode_operations;
if (sd->s_iattr) {
diff --git a/fs/coredump.c b/fs/coredump.c
index 2868e3e171ae..c3d8fc14b993 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -519,7 +519,7 @@ static bool dump_interrupted(void)
* but then we need to teach dump_write() to restart and clear
* TIF_SIGPENDING.
*/
- return signal_pending(current);
+ return fatal_signal_pending(current) || freezing(current);
}
static void wait_for_dump_helpers(struct file *file)
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 6ca7d16593ff..d00455440d08 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -344,13 +344,9 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
offsetof(struct fscrypt_nokey_name, sha256));
BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX);
- if (hash) {
- nokey_name.dirhash[0] = hash;
- nokey_name.dirhash[1] = minor_hash;
- } else {
- nokey_name.dirhash[0] = 0;
- nokey_name.dirhash[1] = 0;
- }
+ nokey_name.dirhash[0] = hash;
+ nokey_name.dirhash[1] = minor_hash;
+
if (iname->len <= sizeof(nokey_name.bytes)) {
memcpy(nokey_name.bytes, iname->name, iname->len);
size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]);
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index 261293fb7097..bca9c6658a7c 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -210,15 +210,40 @@ out_unlock:
return err;
}
+/*
+ * Derive a SipHash key from the given fscrypt master key and the given
+ * application-specific information string.
+ *
+ * Note that the KDF produces a byte array, but the SipHash APIs expect the key
+ * as a pair of 64-bit words. Therefore, on big endian CPUs we have to do an
+ * endianness swap in order to get the same results as on little endian CPUs.
+ */
+static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
+ u8 context, const u8 *info,
+ unsigned int infolen, siphash_key_t *key)
+{
+ int err;
+
+ err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
+ (u8 *)key, sizeof(*key));
+ if (err)
+ return err;
+
+ BUILD_BUG_ON(sizeof(*key) != 16);
+ BUILD_BUG_ON(ARRAY_SIZE(key->key) != 2);
+ le64_to_cpus(&key->key[0]);
+ le64_to_cpus(&key->key[1]);
+ return 0;
+}
+
int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
const struct fscrypt_master_key *mk)
{
int err;
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY,
- ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
- (u8 *)&ci->ci_dirhash_key,
- sizeof(ci->ci_dirhash_key));
+ err = fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
+ ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
+ &ci->ci_dirhash_key);
if (err)
return err;
ci->ci_dirhash_key_initialized = true;
@@ -253,10 +278,9 @@ static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
if (mk->mk_ino_hash_key_initialized)
goto unlock;
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- HKDF_CONTEXT_INODE_HASH_KEY, NULL, 0,
- (u8 *)&mk->mk_ino_hash_key,
- sizeof(mk->mk_ino_hash_key));
+ err = fscrypt_derive_siphash_key(mk,
+ HKDF_CONTEXT_INODE_HASH_KEY,
+ NULL, 0, &mk->mk_ino_hash_key);
if (err)
goto unlock;
/* pairs with smp_load_acquire() above */
diff --git a/fs/dax.c b/fs/dax.c
index 62352cbcf0f4..da41f9363568 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -488,10 +488,11 @@ static void *grab_mapping_entry(struct xa_state *xas,
struct address_space *mapping, unsigned int order)
{
unsigned long index = xas->xa_index;
- bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
+ bool pmd_downgrade; /* splitting PMD entry into PTE entries? */
void *entry;
retry:
+ pmd_downgrade = false;
xas_lock_irq(xas);
entry = get_unlocked_entry(xas, order);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index e813acfaa6e8..ba7c01cd9a5d 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -893,7 +893,7 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
copy[copy_len] = '\n';
- ret = simple_read_from_buffer(user_buf, count, ppos, copy, copy_len);
+ ret = simple_read_from_buffer(user_buf, count, ppos, copy, len);
kfree(copy);
return ret;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 1d252164d97b..8129a430d789 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -45,10 +45,13 @@ static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
static int debugfs_setattr(struct user_namespace *mnt_userns,
struct dentry *dentry, struct iattr *ia)
{
- int ret = security_locked_down(LOCKDOWN_DEBUGFS);
+ int ret;
- if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
- return ret;
+ if (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) {
+ ret = security_locked_down(LOCKDOWN_DEBUGFS);
+ if (ret)
+ return ret;
+ }
return simple_setattr(&init_user_ns, dentry, ia);
}
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 88d95d96e36c..42eee2783756 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -20,6 +20,7 @@
#include <net/sock.h>
#include "config.h"
+#include "midcomms.h"
#include "lowcomms.h"
/*
@@ -79,6 +80,9 @@ struct dlm_cluster {
unsigned int cl_new_rsb_count;
unsigned int cl_recover_callbacks;
char cl_cluster_name[DLM_LOCKSPACE_LEN];
+
+ struct dlm_spaces *sps;
+ struct dlm_comms *cms;
};
static struct dlm_cluster *config_item_to_cluster(struct config_item *i)
@@ -204,7 +208,7 @@ static int dlm_check_zero(unsigned int x)
static int dlm_check_buffer_size(unsigned int x)
{
- if (x < DEFAULT_BUFFER_SIZE)
+ if (x < DLM_MAX_SOCKET_BUFSIZE)
return -EINVAL;
return 0;
@@ -409,6 +413,9 @@ static struct config_group *make_cluster(struct config_group *g,
if (!cl || !sps || !cms)
goto fail;
+ cl->sps = sps;
+ cl->cms = cms;
+
config_group_init_type_name(&cl->group, name, &cluster_type);
config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
@@ -458,6 +465,9 @@ static void drop_cluster(struct config_group *g, struct config_item *i)
static void release_cluster(struct config_item *i)
{
struct dlm_cluster *cl = config_item_to_cluster(i);
+
+ kfree(cl->sps);
+ kfree(cl->cms);
kfree(cl);
}
@@ -532,7 +542,7 @@ static void drop_comm(struct config_group *g, struct config_item *i)
struct dlm_comm *cm = config_item_to_comm(i);
if (local_comm == cm)
local_comm = NULL;
- dlm_lowcomms_close(cm->nodeid);
+ dlm_midcomms_close(cm->nodeid);
while (cm->addr_count--)
kfree(cm->addr[cm->addr_count]);
config_item_put(i);
@@ -942,7 +952,7 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
#define DEFAULT_SCAN_SECS 5
#define DEFAULT_LOG_DEBUG 0
#define DEFAULT_LOG_INFO 1
-#define DEFAULT_PROTOCOL 0
+#define DEFAULT_PROTOCOL DLM_PROTO_TCP
#define DEFAULT_MARK 0
#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */
#define DEFAULT_WAITWARN_US 0
@@ -952,7 +962,7 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
struct dlm_config_info dlm_config = {
.ci_tcp_port = DEFAULT_TCP_PORT,
- .ci_buffer_size = DEFAULT_BUFFER_SIZE,
+ .ci_buffer_size = DLM_MAX_SOCKET_BUFSIZE,
.ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
.ci_recover_timer = DEFAULT_RECOVER_TIMER,
.ci_toss_secs = DEFAULT_TOSS_SECS,
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index d2cd4bd20313..df92b0a07fc6 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -12,7 +12,7 @@
#ifndef __CONFIG_DOT_H__
#define __CONFIG_DOT_H__
-#define DEFAULT_BUFFER_SIZE 4096
+#define DLM_MAX_SOCKET_BUFSIZE 4096
struct dlm_config_node {
int nodeid;
@@ -23,6 +23,9 @@ struct dlm_config_node {
#define DLM_MAX_ADDR_COUNT 3
+#define DLM_PROTO_TCP 0
+#define DLM_PROTO_SCTP 1
+
struct dlm_config_info {
int ci_tcp_port;
int ci_buffer_size;
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index d5bd990bcab8..47e9d57e4cae 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include "dlm_internal.h"
+#include "midcomms.h"
#include "lock.h"
#define DLM_DEBUG_BUF_LEN 4096
@@ -23,6 +24,7 @@ static char debug_buf[DLM_DEBUG_BUF_LEN];
static struct mutex debug_buf_lock;
static struct dentry *dlm_root;
+static struct dentry *dlm_comms;
static char *print_lockmode(int mode)
{
@@ -738,6 +740,57 @@ void dlm_delete_debug_file(struct dlm_ls *ls)
debugfs_remove(ls->ls_debug_toss_dentry);
}
+static int dlm_state_show(struct seq_file *file, void *offset)
+{
+ seq_printf(file, "%s\n", dlm_midcomms_state(file->private));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dlm_state);
+
+static int dlm_flags_show(struct seq_file *file, void *offset)
+{
+ seq_printf(file, "%lu\n", dlm_midcomms_flags(file->private));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dlm_flags);
+
+static int dlm_send_queue_cnt_show(struct seq_file *file, void *offset)
+{
+ seq_printf(file, "%d\n", dlm_midcomms_send_queue_cnt(file->private));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dlm_send_queue_cnt);
+
+static int dlm_version_show(struct seq_file *file, void *offset)
+{
+ seq_printf(file, "0x%08x\n", dlm_midcomms_version(file->private));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dlm_version);
+
+void *dlm_create_debug_comms_file(int nodeid, void *data)
+{
+ struct dentry *d_node;
+ char name[256];
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, 256, "%d", nodeid);
+
+ d_node = debugfs_create_dir(name, dlm_comms);
+ debugfs_create_file("state", 0444, d_node, data, &dlm_state_fops);
+ debugfs_create_file("flags", 0444, d_node, data, &dlm_flags_fops);
+ debugfs_create_file("send_queue_count", 0444, d_node, data,
+ &dlm_send_queue_cnt_fops);
+ debugfs_create_file("version", 0444, d_node, data, &dlm_version_fops);
+
+ return d_node;
+}
+
+void dlm_delete_debug_comms_file(void *ctx)
+{
+ debugfs_remove(ctx);
+}
+
void dlm_create_debug_file(struct dlm_ls *ls)
{
char name[DLM_LOCKSPACE_LEN + 8];
@@ -797,6 +850,7 @@ void __init dlm_register_debugfs(void)
{
mutex_init(&debug_buf_lock);
dlm_root = debugfs_create_dir("dlm", NULL);
+ dlm_comms = debugfs_create_dir("comms", dlm_root);
}
void dlm_unregister_debugfs(void)
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 04fe9f525ac7..91d1ca3a121a 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -57,9 +57,12 @@ struct dlm_header;
struct dlm_message;
struct dlm_rcom;
struct dlm_mhandle;
+struct dlm_msg;
#define log_print(fmt, args...) \
printk(KERN_ERR "dlm: "fmt"\n" , ##args)
+#define log_print_ratelimited(fmt, args...) \
+ printk_ratelimited(KERN_ERR "dlm: "fmt"\n", ##args)
#define log_error(ls, fmt, args...) \
printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
@@ -368,23 +371,33 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
/* dlm_header is first element of all structs sent between nodes */
#define DLM_HEADER_MAJOR 0x00030000
-#define DLM_HEADER_MINOR 0x00000001
+#define DLM_HEADER_MINOR 0x00000002
+
+#define DLM_VERSION_3_1 0x00030001
+#define DLM_VERSION_3_2 0x00030002
#define DLM_HEADER_SLOTS 0x00000001
#define DLM_MSG 1
#define DLM_RCOM 2
+#define DLM_OPTS 3
+#define DLM_ACK 4
+#define DLM_FIN 5
struct dlm_header {
uint32_t h_version;
- uint32_t h_lockspace;
+ union {
+ /* for DLM_MSG and DLM_RCOM */
+ uint32_t h_lockspace;
+ /* for DLM_ACK and DLM_OPTS */
+ uint32_t h_seq;
+ } u;
uint32_t h_nodeid; /* nodeid of sender */
uint16_t h_length;
uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */
uint8_t h_pad;
};
-
#define DLM_MSG_REQUEST 1
#define DLM_MSG_CONVERT 2
#define DLM_MSG_UNLOCK 3
@@ -452,10 +465,29 @@ struct dlm_rcom {
char rc_buf[];
};
+struct dlm_opt_header {
+ uint16_t t_type;
+ uint16_t t_length;
+ uint32_t o_pad;
+ /* need to be 8 byte aligned */
+ char t_value[];
+};
+
+/* encapsulation header */
+struct dlm_opts {
+ struct dlm_header o_header;
+ uint8_t o_nextcmd;
+ uint8_t o_pad;
+ uint16_t o_optlen;
+ uint32_t o_pad2;
+ char o_opts[];
+};
+
union dlm_packet {
struct dlm_header header; /* common to other two */
struct dlm_message message;
struct dlm_rcom rcom;
+ struct dlm_opts opts;
};
#define DLM_RSF_NEED_SLOTS 0x00000001
@@ -722,11 +754,15 @@ void dlm_register_debugfs(void);
void dlm_unregister_debugfs(void);
void dlm_create_debug_file(struct dlm_ls *ls);
void dlm_delete_debug_file(struct dlm_ls *ls);
+void *dlm_create_debug_comms_file(int nodeid, void *data);
+void dlm_delete_debug_comms_file(void *ctx);
#else
static inline void dlm_register_debugfs(void) { }
static inline void dlm_unregister_debugfs(void) { }
static inline void dlm_create_debug_file(struct dlm_ls *ls) { }
static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
+static inline void *dlm_create_debug_comms_file(int nodeid, void *data) { return NULL; }
+static inline void dlm_delete_debug_comms_file(void *ctx) { }
#endif
#endif /* __DLM_INTERNAL_DOT_H__ */
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index b93df39d0915..c502c065d007 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -59,7 +59,7 @@
#include "dlm_internal.h"
#include <linux/dlm_device.h>
#include "memory.h"
-#include "lowcomms.h"
+#include "midcomms.h"
#include "requestqueue.h"
#include "util.h"
#include "dir.h"
@@ -3534,17 +3534,17 @@ static int _create_message(struct dlm_ls *ls, int mb_len,
char *mb;
/* get_buffer gives us a message handle (mh) that we need to
- pass into lowcomms_commit and a message buffer (mb) that we
+ pass into midcomms_commit and a message buffer (mb) that we
write our data into */
- mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
+ mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, GFP_NOFS, &mb);
if (!mh)
return -ENOBUFS;
ms = (struct dlm_message *) mb;
ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
- ms->m_header.h_lockspace = ls->ls_global_id;
+ ms->m_header.u.h_lockspace = ls->ls_global_id;
ms->m_header.h_nodeid = dlm_our_nodeid();
ms->m_header.h_length = mb_len;
ms->m_header.h_cmd = DLM_MSG;
@@ -3589,7 +3589,7 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
{
dlm_message_out(ms);
- dlm_lowcomms_commit_buffer(mh);
+ dlm_midcomms_commit_mhandle(mh);
return 0;
}
@@ -5038,16 +5038,16 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid)
if (hd->h_nodeid != nodeid) {
log_print("invalid h_nodeid %d from %d lockspace %x",
- hd->h_nodeid, nodeid, hd->h_lockspace);
+ hd->h_nodeid, nodeid, hd->u.h_lockspace);
return;
}
- ls = dlm_find_lockspace_global(hd->h_lockspace);
+ ls = dlm_find_lockspace_global(hd->u.h_lockspace);
if (!ls) {
if (dlm_config.ci_log_debug) {
printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
"%u from %d cmd %d type %d\n",
- hd->h_lockspace, nodeid, hd->h_cmd, type);
+ hd->u.h_lockspace, nodeid, hd->h_cmd, type);
}
if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index c14cf2b7faab..d71aba8c3e64 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -16,6 +16,7 @@
#include "member.h"
#include "recoverd.h"
#include "dir.h"
+#include "midcomms.h"
#include "lowcomms.h"
#include "config.h"
#include "memory.h"
@@ -390,7 +391,7 @@ static int threads_start(void)
}
/* Thread for sending/receiving messages for all lockspace's */
- error = dlm_lowcomms_start();
+ error = dlm_midcomms_start();
if (error) {
log_print("cannot start dlm lowcomms %d", error);
goto scand_fail;
@@ -566,7 +567,12 @@ static int new_lockspace(const char *name, const char *cluster,
mutex_init(&ls->ls_requestqueue_mutex);
mutex_init(&ls->ls_clear_proc_locks);
- ls->ls_recover_buf = kmalloc(LOWCOMMS_MAX_TX_BUFFER_LEN, GFP_NOFS);
+ /* Due backwards compatibility with 3.1 we need to use maximum
+ * possible dlm message size to be sure the message will fit and
+ * not having out of bounds issues. However on sending side 3.2
+ * might send less.
+ */
+ ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS);
if (!ls->ls_recover_buf)
goto out_lkbidr;
@@ -698,7 +704,7 @@ int dlm_new_lockspace(const char *name, const char *cluster,
error = 0;
if (!ls_count) {
dlm_scand_stop();
- dlm_lowcomms_shutdown();
+ dlm_midcomms_shutdown();
dlm_lowcomms_stop();
}
out:
@@ -787,7 +793,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
if (ls_count == 1) {
dlm_scand_stop();
- dlm_lowcomms_shutdown();
+ dlm_midcomms_shutdown();
}
dlm_callback_stop(ls);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 166e36fcf3e4..0ea9ae35da0b 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -59,7 +59,6 @@
#include "config.h"
#define NEEDED_RMEM (4*1024*1024)
-#define CONN_HASH_SIZE 32
/* Number of messages to send before rescheduling */
#define MAX_SEND_MSG_COUNT 25
@@ -79,14 +78,20 @@ struct connection {
#define CF_CLOSING 8
#define CF_SHUTDOWN 9
#define CF_CONNECTED 10
+#define CF_RECONNECT 11
+#define CF_DELAY_CONNECT 12
+#define CF_EOF 13
struct list_head writequeue; /* List of outgoing writequeue_entries */
spinlock_t writequeue_lock;
+ atomic_t writequeue_cnt;
void (*connect_action) (struct connection *); /* What to do to connect */
void (*shutdown_action)(struct connection *con); /* What to do to shutdown */
+ bool (*eof_condition)(struct connection *con); /* What to do to eof check */
int retries;
#define MAX_CONNECT_RETRIES 3
struct hlist_node list;
struct connection *othercon;
+ struct connection *sendcon;
struct work_struct rwork; /* Receive workqueue */
struct work_struct swork; /* Send workqueue */
wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
@@ -113,7 +118,22 @@ struct writequeue_entry {
int len;
int end;
int users;
+ bool dirty;
struct connection *con;
+ struct list_head msgs;
+ struct kref ref;
+};
+
+struct dlm_msg {
+ struct writequeue_entry *entry;
+ struct dlm_msg *orig_msg;
+ bool retransmit;
+ void *ppc;
+ int len;
+ int idx; /* new()/commit() idx exchange */
+
+ struct list_head list;
+ struct kref ref;
};
struct dlm_node_addr {
@@ -155,33 +175,23 @@ static void sctp_connect_to_sock(struct connection *con);
static void tcp_connect_to_sock(struct connection *con);
static void dlm_tcp_shutdown(struct connection *con);
-/* This is deliberately very simple because most clusters have simple
- sequential nodeids, so we should be able to go straight to a connection
- struct in the array */
-static inline int nodeid_hash(int nodeid)
+static struct connection *__find_con(int nodeid, int r)
{
- return nodeid & (CONN_HASH_SIZE-1);
-}
-
-static struct connection *__find_con(int nodeid)
-{
- int r, idx;
struct connection *con;
- r = nodeid_hash(nodeid);
-
- idx = srcu_read_lock(&connections_srcu);
hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
- if (con->nodeid == nodeid) {
- srcu_read_unlock(&connections_srcu, idx);
+ if (con->nodeid == nodeid)
return con;
- }
}
- srcu_read_unlock(&connections_srcu, idx);
return NULL;
}
+static bool tcp_eof_condition(struct connection *con)
+{
+ return atomic_read(&con->writequeue_cnt);
+}
+
static int dlm_con_init(struct connection *con, int nodeid)
{
con->rx_buflen = dlm_config.ci_buffer_size;
@@ -193,15 +203,23 @@ static int dlm_con_init(struct connection *con, int nodeid)
mutex_init(&con->sock_mutex);
INIT_LIST_HEAD(&con->writequeue);
spin_lock_init(&con->writequeue_lock);
+ atomic_set(&con->writequeue_cnt, 0);
INIT_WORK(&con->swork, process_send_sockets);
INIT_WORK(&con->rwork, process_recv_sockets);
init_waitqueue_head(&con->shutdown_wait);
- if (dlm_config.ci_protocol == 0) {
+ switch (dlm_config.ci_protocol) {
+ case DLM_PROTO_TCP:
con->connect_action = tcp_connect_to_sock;
con->shutdown_action = dlm_tcp_shutdown;
- } else {
+ con->eof_condition = tcp_eof_condition;
+ break;
+ case DLM_PROTO_SCTP:
con->connect_action = sctp_connect_to_sock;
+ break;
+ default:
+ kfree(con->rx_buf);
+ return -EINVAL;
}
return 0;
@@ -216,7 +234,8 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
struct connection *con, *tmp;
int r, ret;
- con = __find_con(nodeid);
+ r = nodeid_hash(nodeid);
+ con = __find_con(nodeid, r);
if (con || !alloc)
return con;
@@ -230,8 +249,6 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
return NULL;
}
- r = nodeid_hash(nodeid);
-
spin_lock(&connections_lock);
/* Because multiple workqueues/threads calls this function it can
* race on multiple cpu's. Instead of locking hot path __find_con()
@@ -239,7 +256,7 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
* under protection of connections_lock. If this is the case we
* abort our connection creation and return the existing connection.
*/
- tmp = __find_con(nodeid);
+ tmp = __find_con(nodeid, r);
if (tmp) {
spin_unlock(&connections_lock);
kfree(con->rx_buf);
@@ -256,15 +273,13 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
/* Loop round all connections */
static void foreach_conn(void (*conn_func)(struct connection *c))
{
- int i, idx;
+ int i;
struct connection *con;
- idx = srcu_read_lock(&connections_srcu);
for (i = 0; i < CONN_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(con, &connection_hash[i], list)
conn_func(con);
}
- srcu_read_unlock(&connections_srcu, idx);
}
static struct dlm_node_addr *find_node_addr(int nodeid)
@@ -462,6 +477,9 @@ static void lowcomms_data_ready(struct sock *sk)
static void lowcomms_listen_data_ready(struct sock *sk)
{
+ if (!dlm_allow_conn)
+ return;
+
queue_work(recv_workqueue, &listen_con.rwork);
}
@@ -518,14 +536,21 @@ static void lowcomms_state_change(struct sock *sk)
int dlm_lowcomms_connect_node(int nodeid)
{
struct connection *con;
+ int idx;
if (nodeid == dlm_our_nodeid())
return 0;
+ idx = srcu_read_lock(&connections_srcu);
con = nodeid2con(nodeid, GFP_NOFS);
- if (!con)
+ if (!con) {
+ srcu_read_unlock(&connections_srcu, idx);
return -ENOMEM;
+ }
+
lowcomms_connect_sock(con);
+ srcu_read_unlock(&connections_srcu, idx);
+
return 0;
}
@@ -587,6 +612,22 @@ static void lowcomms_error_report(struct sock *sk)
dlm_config.ci_tcp_port, sk->sk_err,
sk->sk_err_soft);
}
+
+ /* below sendcon only handling */
+ if (test_bit(CF_IS_OTHERCON, &con->flags))
+ con = con->sendcon;
+
+ switch (sk->sk_err) {
+ case ECONNREFUSED:
+ set_bit(CF_DELAY_CONNECT, &con->flags);
+ break;
+ default:
+ break;
+ }
+
+ if (!test_and_set_bit(CF_RECONNECT, &con->flags))
+ queue_work(send_workqueue, &con->swork);
+
out:
read_unlock_bh(&sk->sk_callback_lock);
if (orig_report)
@@ -669,6 +710,42 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
}
+static void dlm_page_release(struct kref *kref)
+{
+ struct writequeue_entry *e = container_of(kref, struct writequeue_entry,
+ ref);
+
+ __free_page(e->page);
+ kfree(e);
+}
+
+static void dlm_msg_release(struct kref *kref)
+{
+ struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref);
+
+ kref_put(&msg->entry->ref, dlm_page_release);
+ kfree(msg);
+}
+
+static void free_entry(struct writequeue_entry *e)
+{
+ struct dlm_msg *msg, *tmp;
+
+ list_for_each_entry_safe(msg, tmp, &e->msgs, list) {
+ if (msg->orig_msg) {
+ msg->orig_msg->retransmit = false;
+ kref_put(&msg->orig_msg->ref, dlm_msg_release);
+ }
+
+ list_del(&msg->list);
+ kref_put(&msg->ref, dlm_msg_release);
+ }
+
+ list_del(&e->list);
+ atomic_dec(&e->con->writequeue_cnt);
+ kref_put(&e->ref, dlm_page_release);
+}
+
static void dlm_close_sock(struct socket **sock)
{
if (*sock) {
@@ -683,6 +760,7 @@ static void close_connection(struct connection *con, bool and_other,
bool tx, bool rx)
{
bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
+ struct writequeue_entry *e;
if (tx && !closing && cancel_work_sync(&con->swork)) {
log_print("canceled swork for node %d", con->nodeid);
@@ -698,12 +776,35 @@ static void close_connection(struct connection *con, bool and_other,
if (con->othercon && and_other) {
/* Will only re-enter once. */
- close_connection(con->othercon, false, true, true);
+ close_connection(con->othercon, false, tx, rx);
+ }
+
+ /* if we send a writequeue entry only a half way, we drop the
+ * whole entry because reconnection and that we not start of the
+ * middle of a msg which will confuse the other end.
+ *
+ * we can always drop messages because retransmits, but what we
+ * cannot allow is to transmit half messages which may be processed
+ * at the other side.
+ *
+ * our policy is to start on a clean state when disconnects, we don't
+ * know what's send/received on transport layer in this case.
+ */
+ spin_lock(&con->writequeue_lock);
+ if (!list_empty(&con->writequeue)) {
+ e = list_first_entry(&con->writequeue, struct writequeue_entry,
+ list);
+ if (e->dirty)
+ free_entry(e);
}
+ spin_unlock(&con->writequeue_lock);
con->rx_leftover = 0;
con->retries = 0;
clear_bit(CF_CONNECTED, &con->flags);
+ clear_bit(CF_DELAY_CONNECT, &con->flags);
+ clear_bit(CF_RECONNECT, &con->flags);
+ clear_bit(CF_EOF, &con->flags);
mutex_unlock(&con->sock_mutex);
clear_bit(CF_CLOSING, &con->flags);
}
@@ -841,19 +942,26 @@ out_resched:
return -EAGAIN;
out_close:
- mutex_unlock(&con->sock_mutex);
- if (ret != -EAGAIN) {
- /* Reconnect when there is something to send */
- close_connection(con, false, true, false);
- if (ret == 0) {
- log_print("connection %p got EOF from %d",
- con, con->nodeid);
+ if (ret == 0) {
+ log_print("connection %p got EOF from %d",
+ con, con->nodeid);
+
+ if (con->eof_condition && con->eof_condition(con)) {
+ set_bit(CF_EOF, &con->flags);
+ mutex_unlock(&con->sock_mutex);
+ } else {
+ mutex_unlock(&con->sock_mutex);
+ close_connection(con, false, true, false);
+
/* handling for tcp shutdown */
clear_bit(CF_SHUTDOWN, &con->flags);
wake_up(&con->shutdown_wait);
- /* signal to breaking receive worker */
- ret = -1;
}
+
+ /* signal to breaking receive worker */
+ ret = -1;
+ } else {
+ mutex_unlock(&con->sock_mutex);
}
return ret;
}
@@ -864,16 +972,12 @@ static int accept_from_sock(struct listen_connection *con)
int result;
struct sockaddr_storage peeraddr;
struct socket *newsock;
- int len;
+ int len, idx;
int nodeid;
struct connection *newcon;
struct connection *addcon;
unsigned int mark;
- if (!dlm_allow_conn) {
- return -1;
- }
-
if (!con->sock)
return -ENOTCONN;
@@ -907,8 +1011,10 @@ static int accept_from_sock(struct listen_connection *con)
* the same time and the connections cross on the wire.
* In this case we store the incoming one in "othercon"
*/
+ idx = srcu_read_lock(&connections_srcu);
newcon = nodeid2con(nodeid, GFP_NOFS);
if (!newcon) {
+ srcu_read_unlock(&connections_srcu, idx);
result = -ENOMEM;
goto accept_err;
}
@@ -924,6 +1030,7 @@ static int accept_from_sock(struct listen_connection *con)
if (!othercon) {
log_print("failed to allocate incoming socket");
mutex_unlock(&newcon->sock_mutex);
+ srcu_read_unlock(&connections_srcu, idx);
result = -ENOMEM;
goto accept_err;
}
@@ -932,11 +1039,14 @@ static int accept_from_sock(struct listen_connection *con)
if (result < 0) {
kfree(othercon);
mutex_unlock(&newcon->sock_mutex);
+ srcu_read_unlock(&connections_srcu, idx);
goto accept_err;
}
lockdep_set_subclass(&othercon->sock_mutex, 1);
+ set_bit(CF_IS_OTHERCON, &othercon->flags);
newcon->othercon = othercon;
+ othercon->sendcon = newcon;
} else {
/* close other sock con if we have something new */
close_connection(othercon, false, true, false);
@@ -966,6 +1076,8 @@ static int accept_from_sock(struct listen_connection *con)
if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
queue_work(recv_workqueue, &addcon->rwork);
+ srcu_read_unlock(&connections_srcu, idx);
+
return 0;
accept_err:
@@ -977,12 +1089,6 @@ accept_err:
return result;
}
-static void free_entry(struct writequeue_entry *e)
-{
- __free_page(e->page);
- kfree(e);
-}
-
/*
* writequeue_entry_complete - try to delete and free write queue entry
* @e: write queue entry to try to delete
@@ -994,11 +1100,11 @@ static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
{
e->offset += completed;
e->len -= completed;
+ /* signal that page was half way transmitted */
+ e->dirty = true;
- if (e->len == 0 && e->users == 0) {
- list_del(&e->list);
+ if (e->len == 0 && e->users == 0)
free_entry(e);
- }
}
/*
@@ -1075,7 +1181,7 @@ static void sctp_connect_to_sock(struct connection *con)
make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
- log_print("connecting to %d", con->nodeid);
+ log_print_ratelimited("connecting to %d", con->nodeid);
/* Turn off Nagle's algorithm */
sctp_sock_set_nodelay(sock->sk);
@@ -1171,7 +1277,7 @@ static void tcp_connect_to_sock(struct connection *con)
make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
- log_print("connecting to %d", con->nodeid);
+ log_print_ratelimited("connecting to %d", con->nodeid);
/* Turn off Nagle's algorithm */
tcp_sock_set_nodelay(sock->sk);
@@ -1364,12 +1470,16 @@ static struct writequeue_entry *new_writequeue_entry(struct connection *con,
entry->con = con;
entry->users = 1;
+ kref_init(&entry->ref);
+ INIT_LIST_HEAD(&entry->msgs);
return entry;
}
static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
- gfp_t allocation, char **ppc)
+ gfp_t allocation, char **ppc,
+ void (*cb)(struct dlm_mhandle *mh),
+ struct dlm_mhandle *mh)
{
struct writequeue_entry *e;
@@ -1377,7 +1487,12 @@ static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
if (!list_empty(&con->writequeue)) {
e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
if (DLM_WQ_REMAIN_BYTES(e) >= len) {
+ kref_get(&e->ref);
+
*ppc = page_address(e->page) + e->end;
+ if (cb)
+ cb(mh);
+
e->end += len;
e->users++;
spin_unlock(&con->writequeue_lock);
@@ -1391,42 +1506,92 @@ static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
if (!e)
return NULL;
+ kref_get(&e->ref);
*ppc = page_address(e->page);
e->end += len;
+ atomic_inc(&con->writequeue_cnt);
spin_lock(&con->writequeue_lock);
+ if (cb)
+ cb(mh);
+
list_add_tail(&e->list, &con->writequeue);
spin_unlock(&con->writequeue_lock);
return e;
};
-void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
+static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
+ gfp_t allocation, char **ppc,
+ void (*cb)(struct dlm_mhandle *mh),
+ struct dlm_mhandle *mh)
+{
+ struct writequeue_entry *e;
+ struct dlm_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), allocation);
+ if (!msg)
+ return NULL;
+
+ kref_init(&msg->ref);
+
+ e = new_wq_entry(con, len, allocation, ppc, cb, mh);
+ if (!e) {
+ kfree(msg);
+ return NULL;
+ }
+
+ msg->ppc = *ppc;
+ msg->len = len;
+ msg->entry = e;
+
+ return msg;
+}
+
+struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
+ char **ppc, void (*cb)(struct dlm_mhandle *mh),
+ struct dlm_mhandle *mh)
{
struct connection *con;
+ struct dlm_msg *msg;
+ int idx;
- if (len > DEFAULT_BUFFER_SIZE ||
+ if (len > DLM_MAX_SOCKET_BUFSIZE ||
len < sizeof(struct dlm_header)) {
- BUILD_BUG_ON(PAGE_SIZE < DEFAULT_BUFFER_SIZE);
+ BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
log_print("failed to allocate a buffer of size %d", len);
WARN_ON(1);
return NULL;
}
+ idx = srcu_read_lock(&connections_srcu);
con = nodeid2con(nodeid, allocation);
- if (!con)
+ if (!con) {
+ srcu_read_unlock(&connections_srcu, idx);
return NULL;
+ }
- return new_wq_entry(con, len, allocation, ppc);
+ msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, mh);
+ if (!msg) {
+ srcu_read_unlock(&connections_srcu, idx);
+ return NULL;
+ }
+
+ /* we assume if successful commit must called */
+ msg->idx = idx;
+ return msg;
}
-void dlm_lowcomms_commit_buffer(void *mh)
+static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
{
- struct writequeue_entry *e = (struct writequeue_entry *)mh;
+ struct writequeue_entry *e = msg->entry;
struct connection *con = e->con;
int users;
spin_lock(&con->writequeue_lock);
+ kref_get(&msg->ref);
+ list_add(&msg->list, &e->msgs);
+
users = --e->users;
if (users)
goto out;
@@ -1442,6 +1607,42 @@ out:
return;
}
+void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
+{
+ _dlm_lowcomms_commit_msg(msg);
+ srcu_read_unlock(&connections_srcu, msg->idx);
+}
+
+void dlm_lowcomms_put_msg(struct dlm_msg *msg)
+{
+ kref_put(&msg->ref, dlm_msg_release);
+}
+
+/* does not held connections_srcu, usage workqueue only */
+int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
+{
+ struct dlm_msg *msg_resend;
+ char *ppc;
+
+ if (msg->retransmit)
+ return 1;
+
+ msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
+ GFP_ATOMIC, &ppc, NULL, NULL);
+ if (!msg_resend)
+ return -ENOMEM;
+
+ msg->retransmit = true;
+ kref_get(&msg->ref);
+ msg_resend->orig_msg = msg;
+
+ memcpy(ppc, msg->ppc, msg->len);
+ _dlm_lowcomms_commit_msg(msg_resend);
+ dlm_lowcomms_put_msg(msg_resend);
+
+ return 0;
+}
+
/* Send a message */
static void send_to_sock(struct connection *con)
{
@@ -1483,7 +1684,7 @@ static void send_to_sock(struct connection *con)
cond_resched();
goto out;
} else if (ret < 0)
- goto send_error;
+ goto out;
}
/* Don't starve people filling buffers */
@@ -1496,16 +1697,23 @@ static void send_to_sock(struct connection *con)
writequeue_entry_complete(e, ret);
}
spin_unlock(&con->writequeue_lock);
-out:
- mutex_unlock(&con->sock_mutex);
+
+ /* close if we got EOF */
+ if (test_and_clear_bit(CF_EOF, &con->flags)) {
+ mutex_unlock(&con->sock_mutex);
+ close_connection(con, false, false, true);
+
+ /* handling for tcp shutdown */
+ clear_bit(CF_SHUTDOWN, &con->flags);
+ wake_up(&con->shutdown_wait);
+ } else {
+ mutex_unlock(&con->sock_mutex);
+ }
+
return;
-send_error:
+out:
mutex_unlock(&con->sock_mutex);
- close_connection(con, false, false, true);
- /* Requeue the send work. When the work daemon runs again, it will try
- a new connection, then call this function again. */
- queue_work(send_workqueue, &con->swork);
return;
out_connect:
@@ -1520,7 +1728,6 @@ static void clean_one_writequeue(struct connection *con)
spin_lock(&con->writequeue_lock);
list_for_each_entry_safe(e, safe, &con->writequeue, list) {
- list_del(&e->list);
free_entry(e);
}
spin_unlock(&con->writequeue_lock);
@@ -1532,8 +1739,10 @@ int dlm_lowcomms_close(int nodeid)
{
struct connection *con;
struct dlm_node_addr *na;
+ int idx;
log_print("closing connection to node %d", nodeid);
+ idx = srcu_read_lock(&connections_srcu);
con = nodeid2con(nodeid, 0);
if (con) {
set_bit(CF_CLOSE, &con->flags);
@@ -1542,6 +1751,7 @@ int dlm_lowcomms_close(int nodeid)
if (con->othercon)
clean_one_writequeue(con->othercon);
}
+ srcu_read_unlock(&connections_srcu, idx);
spin_lock(&dlm_node_addrs_spin);
na = find_node_addr(nodeid);
@@ -1578,35 +1788,50 @@ static void process_send_sockets(struct work_struct *work)
{
struct connection *con = container_of(work, struct connection, swork);
+ WARN_ON(test_bit(CF_IS_OTHERCON, &con->flags));
+
clear_bit(CF_WRITE_PENDING, &con->flags);
- if (con->sock == NULL) /* not mutex protected so check it inside too */
+
+ if (test_and_clear_bit(CF_RECONNECT, &con->flags)) {
+ close_connection(con, false, false, true);
+ dlm_midcomms_unack_msg_resend(con->nodeid);
+ }
+
+ if (con->sock == NULL) { /* not mutex protected so check it inside too */
+ if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags))
+ msleep(1000);
con->connect_action(con);
+ }
if (!list_empty(&con->writequeue))
send_to_sock(con);
}
static void work_stop(void)
{
- if (recv_workqueue)
+ if (recv_workqueue) {
destroy_workqueue(recv_workqueue);
- if (send_workqueue)
+ recv_workqueue = NULL;
+ }
+
+ if (send_workqueue) {
destroy_workqueue(send_workqueue);
+ send_workqueue = NULL;
+ }
}
static int work_start(void)
{
- recv_workqueue = alloc_workqueue("dlm_recv",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ recv_workqueue = alloc_ordered_workqueue("dlm_recv", WQ_MEM_RECLAIM);
if (!recv_workqueue) {
log_print("can't start dlm_recv");
return -ENOMEM;
}
- send_workqueue = alloc_workqueue("dlm_send",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ send_workqueue = alloc_ordered_workqueue("dlm_send", WQ_MEM_RECLAIM);
if (!send_workqueue) {
log_print("can't start dlm_send");
destroy_workqueue(recv_workqueue);
+ recv_workqueue = NULL;
return -ENOMEM;
}
@@ -1621,6 +1846,8 @@ static void shutdown_conn(struct connection *con)
void dlm_lowcomms_shutdown(void)
{
+ int idx;
+
/* Set all the flags to prevent any
* socket activity.
*/
@@ -1633,7 +1860,9 @@ void dlm_lowcomms_shutdown(void)
dlm_close_sock(&listen_con.sock);
+ idx = srcu_read_lock(&connections_srcu);
foreach_conn(shutdown_conn);
+ srcu_read_unlock(&connections_srcu, idx);
}
static void _stop_conn(struct connection *con, bool and_other)
@@ -1682,7 +1911,7 @@ static void free_conn(struct connection *con)
static void work_flush(void)
{
- int ok, idx;
+ int ok;
int i;
struct connection *con;
@@ -1693,7 +1922,6 @@ static void work_flush(void)
flush_workqueue(recv_workqueue);
if (send_workqueue)
flush_workqueue(send_workqueue);
- idx = srcu_read_lock(&connections_srcu);
for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
hlist_for_each_entry_rcu(con, &connection_hash[i],
list) {
@@ -1707,14 +1935,17 @@ static void work_flush(void)
}
}
}
- srcu_read_unlock(&connections_srcu, idx);
} while (!ok);
}
void dlm_lowcomms_stop(void)
{
+ int idx;
+
+ idx = srcu_read_lock(&connections_srcu);
work_flush();
foreach_conn(free_conn);
+ srcu_read_unlock(&connections_srcu, idx);
work_stop();
deinit_local();
}
@@ -1738,15 +1969,24 @@ int dlm_lowcomms_start(void)
error = work_start();
if (error)
- goto fail;
+ goto fail_local;
dlm_allow_conn = 1;
/* Start listening */
- if (dlm_config.ci_protocol == 0)
+ switch (dlm_config.ci_protocol) {
+ case DLM_PROTO_TCP:
error = tcp_listen_for_all();
- else
+ break;
+ case DLM_PROTO_SCTP:
error = sctp_listen_for_all(&listen_con);
+ break;
+ default:
+ log_print("Invalid protocol identifier %d set",
+ dlm_config.ci_protocol);
+ error = -EINVAL;
+ break;
+ }
if (error)
goto fail_unlisten;
@@ -1755,6 +1995,9 @@ int dlm_lowcomms_start(void)
fail_unlisten:
dlm_allow_conn = 0;
dlm_close_sock(&listen_con.sock);
+ work_stop();
+fail_local:
+ deinit_local();
fail:
return error;
}
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
index 48bbc4e18761..aaae7115c00d 100644
--- a/fs/dlm/lowcomms.h
+++ b/fs/dlm/lowcomms.h
@@ -12,7 +12,22 @@
#ifndef __LOWCOMMS_DOT_H__
#define __LOWCOMMS_DOT_H__
-#define LOWCOMMS_MAX_TX_BUFFER_LEN 4096
+#include "dlm_internal.h"
+
+#define DLM_MIDCOMMS_OPT_LEN sizeof(struct dlm_opts)
+#define DLM_MAX_APP_BUFSIZE (DLM_MAX_SOCKET_BUFSIZE - \
+ DLM_MIDCOMMS_OPT_LEN)
+
+#define CONN_HASH_SIZE 32
+
+/* This is deliberately very simple because most clusters have simple
+ * sequential nodeids, so we should be able to go straight to a connection
+ * struct in the array
+ */
+static inline int nodeid_hash(int nodeid)
+{
+ return nodeid & (CONN_HASH_SIZE-1);
+}
/* switch to check if dlm is running */
extern int dlm_allow_conn;
@@ -22,8 +37,12 @@ void dlm_lowcomms_shutdown(void);
void dlm_lowcomms_stop(void);
void dlm_lowcomms_exit(void);
int dlm_lowcomms_close(int nodeid);
-void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc);
-void dlm_lowcomms_commit_buffer(void *mh);
+struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
+ char **ppc, void (*cb)(struct dlm_mhandle *mh),
+ struct dlm_mhandle *mh);
+void dlm_lowcomms_commit_msg(struct dlm_msg *msg);
+void dlm_lowcomms_put_msg(struct dlm_msg *msg);
+int dlm_lowcomms_resend_msg(struct dlm_msg *msg);
int dlm_lowcomms_connect_node(int nodeid);
int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index ceef3f2074ff..d9e1e4170eb1 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -15,6 +15,7 @@
#include "recover.h"
#include "rcom.h"
#include "config.h"
+#include "midcomms.h"
#include "lowcomms.h"
int dlm_slots_version(struct dlm_header *h)
@@ -270,7 +271,7 @@ int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size,
log_slots(ls, gen, num, NULL, array, array_size);
- max_slots = (LOWCOMMS_MAX_TX_BUFFER_LEN - sizeof(struct dlm_rcom) -
+ max_slots = (DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom) -
sizeof(struct rcom_config)) / sizeof(struct rcom_slot);
if (num > max_slots) {
@@ -329,6 +330,7 @@ static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node)
memb->nodeid = node->nodeid;
memb->weight = node->weight;
memb->comm_seq = node->comm_seq;
+ dlm_midcomms_add_member(node->nodeid);
add_ordered_member(ls, memb);
ls->ls_num_nodes++;
return 0;
@@ -359,26 +361,34 @@ int dlm_is_removed(struct dlm_ls *ls, int nodeid)
return 0;
}
-static void clear_memb_list(struct list_head *head)
+static void clear_memb_list(struct list_head *head,
+ void (*after_del)(int nodeid))
{
struct dlm_member *memb;
while (!list_empty(head)) {
memb = list_entry(head->next, struct dlm_member, list);
list_del(&memb->list);
+ if (after_del)
+ after_del(memb->nodeid);
kfree(memb);
}
}
+static void clear_members_cb(int nodeid)
+{
+ dlm_midcomms_remove_member(nodeid);
+}
+
void dlm_clear_members(struct dlm_ls *ls)
{
- clear_memb_list(&ls->ls_nodes);
+ clear_memb_list(&ls->ls_nodes, clear_members_cb);
ls->ls_num_nodes = 0;
}
void dlm_clear_members_gone(struct dlm_ls *ls)
{
- clear_memb_list(&ls->ls_nodes_gone);
+ clear_memb_list(&ls->ls_nodes_gone, NULL);
}
static void make_member_array(struct dlm_ls *ls)
@@ -552,6 +562,7 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
neg++;
list_move(&memb->list, &ls->ls_nodes_gone);
+ dlm_midcomms_remove_member(memb->nodeid);
ls->ls_num_nodes--;
dlm_lsop_recover_slot(ls, memb);
}
@@ -576,12 +587,18 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
*neg_out = neg;
error = ping_members(ls);
- if (!error || error == -EPROTO) {
- /* new_lockspace() may be waiting to know if the config
- is good or bad */
- ls->ls_members_result = error;
- complete(&ls->ls_members_done);
- }
+ /* error -EINTR means that a new recovery action is triggered.
+ * We ignore this recovery action and let run the new one which might
+ * have new member configuration.
+ */
+ if (error == -EINTR)
+ error = 0;
+
+ /* new_lockspace() may be waiting to know if the config
+ * is good or bad
+ */
+ ls->ls_members_result = error;
+ complete(&ls->ls_members_done);
log_rinfo(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes);
return error;
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index 1c6654a21ec4..e3de268898ed 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -3,7 +3,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2021 Red Hat, Inc. All rights reserved.
**
**
*******************************************************************************
@@ -12,22 +12,866 @@
/*
* midcomms.c
*
- * This is the appallingly named "mid-level" comms layer.
+ * This is the appallingly named "mid-level" comms layer. It takes care about
+ * deliver an on application layer "reliable" communication above the used
+ * lowcomms transport layer.
*
- * Its purpose is to take packets from the "real" comms layer,
- * split them up into packets and pass them to the interested
- * part of the locking mechanism.
+ * How it works:
*
- * It also takes messages from the locking layer, formats them
- * into packets and sends them to the comms layer.
+ * Each nodes keeps track of all send DLM messages in send_queue with a sequence
+ * number. The receive will send an DLM_ACK message back for every DLM message
+ * received at the other side. If a reconnect happens in lowcomms we will send
+ * all unacknowledged dlm messages again. The receiving side might drop any already
+ * received message by comparing sequence numbers.
+ *
+ * How version detection works:
+ *
+ * Due the fact that dlm has pre-configured node addresses on every side
+ * it is in it's nature that every side connects at starts to transmit
+ * dlm messages which ends in a race. However DLM_RCOM_NAMES, DLM_RCOM_STATUS
+ * and their replies are the first messages which are exchanges. Due backwards
+ * compatibility these messages are not covered by the midcomms re-transmission
+ * layer. These messages have their own re-transmission handling in the dlm
+ * application layer. The version field of every node will be set on these RCOM
+ * messages as soon as they arrived and the node isn't yet part of the nodes
+ * hash. There exists also logic to detect version mismatched if something weird
+ * going on or the first messages isn't an expected one.
+ *
+ * Termination:
+ *
+ * The midcomms layer does a 4 way handshake for termination on DLM protocol
+ * like TCP supports it with half-closed socket support. SCTP doesn't support
+ * half-closed socket, so we do it on DLM layer. Also socket shutdown() can be
+ * interrupted by .e.g. tcp reset itself. Additional there exists the othercon
+ * paradigm in lowcomms which cannot be easily without breaking backwards
+ * compatibility. A node cannot send anything to another node when a DLM_FIN
+ * message was send. There exists additional logic to print a warning if
+ * DLM wants to do it. There exists a state handling like RFC 793 but reduced
+ * to termination only. The event "member removal event" describes the cluster
+ * manager removed the node from internal lists, at this point DLM does not
+ * send any message to the other node. There exists two cases:
+ *
+ * 1. The cluster member was removed and we received a FIN
+ * OR
+ * 2. We received a FIN but the member was not removed yet
+ *
+ * One of these cases will do the CLOSE_WAIT to LAST_ACK change.
+ *
+ *
+ * +---------+
+ * | CLOSED |
+ * +---------+
+ * | add member/receive RCOM version
+ * | detection msg
+ * V
+ * +---------+
+ * | ESTAB |
+ * +---------+
+ * CLOSE | | rcv FIN
+ * ------- | | -------
+ * +---------+ snd FIN / \ snd ACK +---------+
+ * | FIN |<----------------- ------------------>| CLOSE |
+ * | WAIT-1 |------------------ | WAIT |
+ * +---------+ rcv FIN \ +---------+
+ * | rcv ACK of FIN ------- | CLOSE | member
+ * | -------------- snd ACK | ------- | removal
+ * V x V snd FIN V event
+ * +---------+ +---------+ +---------+
+ * |FINWAIT-2| | CLOSING | | LAST-ACK|
+ * +---------+ +---------+ +---------+
+ * | rcv ACK of FIN | rcv ACK of FIN |
+ * | rcv FIN -------------- | -------------- |
+ * | ------- x V x V
+ * \ snd ACK +---------+ +---------+
+ * ------------------------>| CLOSED | | CLOSED |
+ * +---------+ +---------+
+ *
+ * NOTE: any state can interrupted by midcomms_close() and state will be
+ * switched to CLOSED in case of fencing. There exists also some timeout
+ * handling when we receive the version detection RCOM messages which is
+ * made by observation.
+ *
+ * Future improvements:
+ *
+ * There exists some known issues/improvements of the dlm handling. Some
+ * of them should be done in a next major dlm version bump which makes
+ * it incompatible with previous versions.
+ *
+ * Unaligned memory access:
+ *
+ * There exists cases when the dlm message buffer length is not aligned
+ * to 8 byte. However seems nobody detected any problem with it. This
+ * can be fixed in the next major version bump of dlm.
+ *
+ * Version detection:
+ *
+ * The version detection and how it's done is related to backwards
+ * compatibility. There exists better ways to make a better handling.
+ * However this should be changed in the next major version bump of dlm.
+ *
+ * Ack handling:
+ *
+ * Currently we send an ack message for every dlm message. However we
+ * can ack multiple dlm messages with one ack by just delaying the ack
+ * message. Will reduce some traffic but makes the drop detection slower.
+ *
+ * Tail Size checking:
+ *
+ * There exists a message tail payload in e.g. DLM_MSG however we don't
+ * check it against the message length yet regarding to the receive buffer
+ * length. That need to be validated.
+ *
+ * Fencing bad nodes:
+ *
+ * At timeout places or weird sequence number behaviours we should send
+ * a fencing request to the cluster manager.
+ */
+
+/* Debug switch to enable a 5 seconds sleep waiting of a termination.
+ * This can be useful to test fencing while termination is running.
+ * This requires a setup with only gfs2 as dlm user, so that the
+ * last umount will terminate the connection.
+ *
+ * However it became useful to test, while the 5 seconds block in umount
+ * just press the reset button. In a lot of dropping the termination
+ * process can could take several seconds.
*/
+#define DLM_DEBUG_FENCE_TERMINATION 0
+
+#include <net/tcp.h>
#include "dlm_internal.h"
#include "lowcomms.h"
#include "config.h"
#include "lock.h"
+#include "util.h"
#include "midcomms.h"
+/* init value for sequence numbers for testing purpose only e.g. overflows */
+#define DLM_SEQ_INIT 0
+/* 3 minutes wait to sync ending of dlm */
+#define DLM_SHUTDOWN_TIMEOUT msecs_to_jiffies(3 * 60 * 1000)
+#define DLM_VERSION_NOT_SET 0
+
+struct midcomms_node {
+ int nodeid;
+ uint32_t version;
+ uint32_t seq_send;
+ uint32_t seq_next;
+ /* These queues are unbound because we cannot drop any message in dlm.
+ * We could send a fence signal for a specific node to the cluster
+ * manager if queues hits some maximum value, however this handling
+ * not supported yet.
+ */
+ struct list_head send_queue;
+ spinlock_t send_queue_lock;
+ atomic_t send_queue_cnt;
+#define DLM_NODE_FLAG_CLOSE 1
+#define DLM_NODE_FLAG_STOP_TX 2
+#define DLM_NODE_FLAG_STOP_RX 3
+ unsigned long flags;
+ wait_queue_head_t shutdown_wait;
+
+ /* dlm tcp termination state */
+#define DLM_CLOSED 1
+#define DLM_ESTABLISHED 2
+#define DLM_FIN_WAIT1 3
+#define DLM_FIN_WAIT2 4
+#define DLM_CLOSE_WAIT 5
+#define DLM_LAST_ACK 6
+#define DLM_CLOSING 7
+ int state;
+ spinlock_t state_lock;
+
+ /* counts how many lockspaces are using this node
+ * this refcount is necessary to determine if the
+ * node wants to disconnect.
+ */
+ int users;
+
+ /* not protected by srcu, node_hash lifetime */
+ void *debugfs;
+
+ struct hlist_node hlist;
+ struct rcu_head rcu;
+};
+
+struct dlm_mhandle {
+ const struct dlm_header *inner_hd;
+ struct midcomms_node *node;
+ struct dlm_opts *opts;
+ struct dlm_msg *msg;
+ bool committed;
+ uint32_t seq;
+
+ void (*ack_rcv)(struct midcomms_node *node);
+
+ /* get_mhandle/commit srcu idx exchange */
+ int idx;
+
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+static struct hlist_head node_hash[CONN_HASH_SIZE];
+static DEFINE_SPINLOCK(nodes_lock);
+DEFINE_STATIC_SRCU(nodes_srcu);
+
+/* This mutex prevents that midcomms_close() is running while
+ * stop() or remove(). As I experienced invalid memory access
+ * behaviours when DLM_DEBUG_FENCE_TERMINATION is enabled and
+ * resetting machines. I will end in some double deletion in nodes
+ * datastructure.
+ */
+static DEFINE_MUTEX(close_lock);
+
+static inline const char *dlm_state_str(int state)
+{
+ switch (state) {
+ case DLM_CLOSED:
+ return "CLOSED";
+ case DLM_ESTABLISHED:
+ return "ESTABLISHED";
+ case DLM_FIN_WAIT1:
+ return "FIN_WAIT1";
+ case DLM_FIN_WAIT2:
+ return "FIN_WAIT2";
+ case DLM_CLOSE_WAIT:
+ return "CLOSE_WAIT";
+ case DLM_LAST_ACK:
+ return "LAST_ACK";
+ case DLM_CLOSING:
+ return "CLOSING";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char *dlm_midcomms_state(struct midcomms_node *node)
+{
+ return dlm_state_str(node->state);
+}
+
+unsigned long dlm_midcomms_flags(struct midcomms_node *node)
+{
+ return node->flags;
+}
+
+int dlm_midcomms_send_queue_cnt(struct midcomms_node *node)
+{
+ return atomic_read(&node->send_queue_cnt);
+}
+
+uint32_t dlm_midcomms_version(struct midcomms_node *node)
+{
+ return node->version;
+}
+
+static struct midcomms_node *__find_node(int nodeid, int r)
+{
+ struct midcomms_node *node;
+
+ hlist_for_each_entry_rcu(node, &node_hash[r], hlist) {
+ if (node->nodeid == nodeid)
+ return node;
+ }
+
+ return NULL;
+}
+
+static void dlm_mhandle_release(struct rcu_head *rcu)
+{
+ struct dlm_mhandle *mh = container_of(rcu, struct dlm_mhandle, rcu);
+
+ dlm_lowcomms_put_msg(mh->msg);
+ kfree(mh);
+}
+
+static void dlm_mhandle_delete(struct midcomms_node *node,
+ struct dlm_mhandle *mh)
+{
+ list_del_rcu(&mh->list);
+ atomic_dec(&node->send_queue_cnt);
+ call_rcu(&mh->rcu, dlm_mhandle_release);
+}
+
+static void dlm_send_queue_flush(struct midcomms_node *node)
+{
+ struct dlm_mhandle *mh;
+
+ pr_debug("flush midcomms send queue of node %d\n", node->nodeid);
+
+ rcu_read_lock();
+ spin_lock(&node->send_queue_lock);
+ list_for_each_entry_rcu(mh, &node->send_queue, list) {
+ dlm_mhandle_delete(node, mh);
+ }
+ spin_unlock(&node->send_queue_lock);
+ rcu_read_unlock();
+}
+
+static void midcomms_node_reset(struct midcomms_node *node)
+{
+ pr_debug("reset node %d\n", node->nodeid);
+
+ node->seq_next = DLM_SEQ_INIT;
+ node->seq_send = DLM_SEQ_INIT;
+ node->version = DLM_VERSION_NOT_SET;
+ node->flags = 0;
+
+ dlm_send_queue_flush(node);
+ node->state = DLM_CLOSED;
+ wake_up(&node->shutdown_wait);
+}
+
+static struct midcomms_node *nodeid2node(int nodeid, gfp_t alloc)
+{
+ struct midcomms_node *node, *tmp;
+ int r = nodeid_hash(nodeid);
+
+ node = __find_node(nodeid, r);
+ if (node || !alloc)
+ return node;
+
+ node = kmalloc(sizeof(*node), alloc);
+ if (!node)
+ return NULL;
+
+ node->nodeid = nodeid;
+ spin_lock_init(&node->state_lock);
+ spin_lock_init(&node->send_queue_lock);
+ atomic_set(&node->send_queue_cnt, 0);
+ INIT_LIST_HEAD(&node->send_queue);
+ init_waitqueue_head(&node->shutdown_wait);
+ node->users = 0;
+ midcomms_node_reset(node);
+
+ spin_lock(&nodes_lock);
+ /* check again if there was somebody else
+ * earlier here to add the node
+ */
+ tmp = __find_node(nodeid, r);
+ if (tmp) {
+ spin_unlock(&nodes_lock);
+ kfree(node);
+ return tmp;
+ }
+
+ hlist_add_head_rcu(&node->hlist, &node_hash[r]);
+ spin_unlock(&nodes_lock);
+
+ node->debugfs = dlm_create_debug_comms_file(nodeid, node);
+ return node;
+}
+
+static int dlm_send_ack(int nodeid, uint32_t seq)
+{
+ int mb_len = sizeof(struct dlm_header);
+ struct dlm_header *m_header;
+ struct dlm_msg *msg;
+ char *ppc;
+
+ msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_NOFS, &ppc,
+ NULL, NULL);
+ if (!msg)
+ return -ENOMEM;
+
+ m_header = (struct dlm_header *)ppc;
+
+ m_header->h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ m_header->h_nodeid = dlm_our_nodeid();
+ m_header->h_length = mb_len;
+ m_header->h_cmd = DLM_ACK;
+ m_header->u.h_seq = seq;
+
+ header_out(m_header);
+ dlm_lowcomms_commit_msg(msg);
+ dlm_lowcomms_put_msg(msg);
+
+ return 0;
+}
+
+static int dlm_send_fin(struct midcomms_node *node,
+ void (*ack_rcv)(struct midcomms_node *node))
+{
+ int mb_len = sizeof(struct dlm_header);
+ struct dlm_header *m_header;
+ struct dlm_mhandle *mh;
+ char *ppc;
+
+ mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_NOFS, &ppc);
+ if (!mh)
+ return -ENOMEM;
+
+ mh->ack_rcv = ack_rcv;
+
+ m_header = (struct dlm_header *)ppc;
+
+ m_header->h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ m_header->h_nodeid = dlm_our_nodeid();
+ m_header->h_length = mb_len;
+ m_header->h_cmd = DLM_FIN;
+
+ header_out(m_header);
+
+ pr_debug("sending fin msg to node %d\n", node->nodeid);
+ dlm_midcomms_commit_mhandle(mh);
+ set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
+
+ return 0;
+}
+
+static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
+{
+ struct dlm_mhandle *mh;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(mh, &node->send_queue, list) {
+ if (before(mh->seq, seq)) {
+ if (mh->ack_rcv)
+ mh->ack_rcv(node);
+ } else {
+ /* send queue should be ordered */
+ break;
+ }
+ }
+
+ spin_lock(&node->send_queue_lock);
+ list_for_each_entry_rcu(mh, &node->send_queue, list) {
+ if (before(mh->seq, seq)) {
+ dlm_mhandle_delete(node, mh);
+ } else {
+ /* send queue should be ordered */
+ break;
+ }
+ }
+ spin_unlock(&node->send_queue_lock);
+ rcu_read_unlock();
+}
+
+static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
+{
+ spin_lock(&node->state_lock);
+ pr_debug("receive passive fin ack from node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+
+ switch (node->state) {
+ case DLM_LAST_ACK:
+ /* DLM_CLOSED */
+ midcomms_node_reset(node);
+ break;
+ case DLM_CLOSED:
+ /* not valid but somehow we got what we want */
+ wake_up(&node->shutdown_wait);
+ break;
+ default:
+ spin_unlock(&node->state_lock);
+ log_print("%s: unexpected state: %d\n",
+ __func__, node->state);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&node->state_lock);
+}
+
+static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ struct midcomms_node *node,
+ uint32_t seq)
+{
+ if (seq == node->seq_next) {
+ node->seq_next++;
+ /* send ack before fin */
+ dlm_send_ack(node->nodeid, node->seq_next);
+
+ switch (p->header.h_cmd) {
+ case DLM_FIN:
+ spin_lock(&node->state_lock);
+ pr_debug("receive fin msg from node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+
+ switch (node->state) {
+ case DLM_ESTABLISHED:
+ node->state = DLM_CLOSE_WAIT;
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ /* passive shutdown DLM_LAST_ACK case 1
+ * additional we check if the node is used by
+ * cluster manager events at all.
+ */
+ if (node->users == 0) {
+ node->state = DLM_LAST_ACK;
+ pr_debug("switch node %d to state %s case 1\n",
+ node->nodeid, dlm_state_str(node->state));
+ spin_unlock(&node->state_lock);
+ goto send_fin;
+ }
+ break;
+ case DLM_FIN_WAIT1:
+ node->state = DLM_CLOSING;
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ break;
+ case DLM_FIN_WAIT2:
+ midcomms_node_reset(node);
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ wake_up(&node->shutdown_wait);
+ break;
+ case DLM_LAST_ACK:
+ /* probably remove_member caught it, do nothing */
+ break;
+ default:
+ spin_unlock(&node->state_lock);
+ log_print("%s: unexpected state: %d\n",
+ __func__, node->state);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&node->state_lock);
+
+ set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ break;
+ default:
+ WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
+ dlm_receive_buffer(p, node->nodeid);
+ break;
+ }
+ } else {
+ /* retry to ack message which we already have by sending back
+ * current node->seq_next number as ack.
+ */
+ if (seq < node->seq_next)
+ dlm_send_ack(node->nodeid, node->seq_next);
+
+ log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d",
+ seq, node->seq_next, node->nodeid);
+ }
+
+ return;
+
+send_fin:
+ set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+}
+
+static struct midcomms_node *
+dlm_midcomms_recv_node_lookup(int nodeid, const union dlm_packet *p,
+ uint16_t msglen, int (*cb)(struct midcomms_node *node))
+{
+ struct midcomms_node *node = NULL;
+ gfp_t allocation = 0;
+ int ret;
+
+ switch (p->header.h_cmd) {
+ case DLM_RCOM:
+ if (msglen < sizeof(struct dlm_rcom)) {
+ log_print("rcom msg too small: %u, will skip this message from node %d",
+ msglen, nodeid);
+ return NULL;
+ }
+
+ switch (le32_to_cpu(p->rcom.rc_type)) {
+ case DLM_RCOM_NAMES:
+ fallthrough;
+ case DLM_RCOM_NAMES_REPLY:
+ fallthrough;
+ case DLM_RCOM_STATUS:
+ fallthrough;
+ case DLM_RCOM_STATUS_REPLY:
+ node = nodeid2node(nodeid, 0);
+ if (node) {
+ spin_lock(&node->state_lock);
+ if (node->state != DLM_ESTABLISHED)
+ pr_debug("receive begin RCOM msg from node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+
+ switch (node->state) {
+ case DLM_CLOSED:
+ node->state = DLM_ESTABLISHED;
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ break;
+ case DLM_ESTABLISHED:
+ break;
+ default:
+ /* some invalid state passive shutdown
+ * was failed, we try to reset and
+ * hope it will go on.
+ */
+ log_print("reset node %d because shutdown stuck",
+ node->nodeid);
+
+ midcomms_node_reset(node);
+ node->state = DLM_ESTABLISHED;
+ break;
+ }
+ spin_unlock(&node->state_lock);
+ }
+
+ allocation = GFP_NOFS;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ node = nodeid2node(nodeid, allocation);
+ if (!node) {
+ switch (p->header.h_cmd) {
+ case DLM_OPTS:
+ if (msglen < sizeof(struct dlm_opts)) {
+ log_print("opts msg too small: %u, will skip this message from node %d",
+ msglen, nodeid);
+ return NULL;
+ }
+
+ log_print_ratelimited("received dlm opts message nextcmd %d from node %d in an invalid sequence",
+ p->opts.o_nextcmd, nodeid);
+ break;
+ default:
+ log_print_ratelimited("received dlm message cmd %d from node %d in an invalid sequence",
+ p->header.h_cmd, nodeid);
+ break;
+ }
+
+ return NULL;
+ }
+
+ ret = cb(node);
+ if (ret < 0)
+ return NULL;
+
+ return node;
+}
+
+static int dlm_midcomms_version_check_3_2(struct midcomms_node *node)
+{
+ switch (node->version) {
+ case DLM_VERSION_NOT_SET:
+ node->version = DLM_VERSION_3_2;
+ log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2,
+ node->nodeid);
+ break;
+ case DLM_VERSION_3_2:
+ break;
+ default:
+ log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
+ DLM_VERSION_3_2, node->nodeid, node->version);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int dlm_opts_check_msglen(union dlm_packet *p, uint16_t msglen, int nodeid)
+{
+ int len = msglen;
+
+ /* we only trust outer header msglen because
+ * it's checked against receive buffer length.
+ */
+ if (len < sizeof(struct dlm_opts))
+ return -1;
+ len -= sizeof(struct dlm_opts);
+
+ if (len < le16_to_cpu(p->opts.o_optlen))
+ return -1;
+ len -= le16_to_cpu(p->opts.o_optlen);
+
+ switch (p->opts.o_nextcmd) {
+ case DLM_FIN:
+ if (len < sizeof(struct dlm_header)) {
+ log_print("fin too small: %d, will skip this message from node %d",
+ len, nodeid);
+ return -1;
+ }
+
+ break;
+ case DLM_MSG:
+ if (len < sizeof(struct dlm_message)) {
+ log_print("msg too small: %d, will skip this message from node %d",
+ msglen, nodeid);
+ return -1;
+ }
+
+ break;
+ case DLM_RCOM:
+ if (len < sizeof(struct dlm_rcom)) {
+ log_print("rcom msg too small: %d, will skip this message from node %d",
+ len, nodeid);
+ return -1;
+ }
+
+ break;
+ default:
+ log_print("unsupported o_nextcmd received: %u, will skip this message from node %d",
+ p->opts.o_nextcmd, nodeid);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
+{
+ uint16_t msglen = le16_to_cpu(p->header.h_length);
+ struct midcomms_node *node;
+ uint32_t seq;
+ int ret, idx;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = dlm_midcomms_recv_node_lookup(nodeid, p, msglen,
+ dlm_midcomms_version_check_3_2);
+ if (!node)
+ goto out;
+
+ switch (p->header.h_cmd) {
+ case DLM_RCOM:
+ /* these rcom message we use to determine version.
+ * they have their own retransmission handling and
+ * are the first messages of dlm.
+ *
+ * length already checked.
+ */
+ switch (le32_to_cpu(p->rcom.rc_type)) {
+ case DLM_RCOM_NAMES:
+ fallthrough;
+ case DLM_RCOM_NAMES_REPLY:
+ fallthrough;
+ case DLM_RCOM_STATUS:
+ fallthrough;
+ case DLM_RCOM_STATUS_REPLY:
+ break;
+ default:
+ log_print("unsupported rcom type received: %u, will skip this message from node %d",
+ le32_to_cpu(p->rcom.rc_type), nodeid);
+ goto out;
+ }
+
+ WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
+ dlm_receive_buffer(p, nodeid);
+ break;
+ case DLM_OPTS:
+ seq = le32_to_cpu(p->header.u.h_seq);
+
+ ret = dlm_opts_check_msglen(p, msglen, nodeid);
+ if (ret < 0) {
+ log_print("opts msg too small: %u, will skip this message from node %d",
+ msglen, nodeid);
+ goto out;
+ }
+
+ p = (union dlm_packet *)((unsigned char *)p->opts.o_opts +
+ le16_to_cpu(p->opts.o_optlen));
+
+ /* recheck inner msglen just if it's not garbage */
+ msglen = le16_to_cpu(p->header.h_length);
+ switch (p->header.h_cmd) {
+ case DLM_RCOM:
+ if (msglen < sizeof(struct dlm_rcom)) {
+ log_print("inner rcom msg too small: %u, will skip this message from node %d",
+ msglen, nodeid);
+ goto out;
+ }
+
+ break;
+ case DLM_MSG:
+ if (msglen < sizeof(struct dlm_message)) {
+ log_print("inner msg too small: %u, will skip this message from node %d",
+ msglen, nodeid);
+ goto out;
+ }
+
+ break;
+ case DLM_FIN:
+ if (msglen < sizeof(struct dlm_header)) {
+ log_print("inner fin too small: %u, will skip this message from node %d",
+ msglen, nodeid);
+ goto out;
+ }
+
+ break;
+ default:
+ log_print("unsupported inner h_cmd received: %u, will skip this message from node %d",
+ msglen, nodeid);
+ goto out;
+ }
+
+ dlm_midcomms_receive_buffer(p, node, seq);
+ break;
+ case DLM_ACK:
+ seq = le32_to_cpu(p->header.u.h_seq);
+ dlm_receive_ack(node, seq);
+ break;
+ default:
+ log_print("unsupported h_cmd received: %u, will skip this message from node %d",
+ p->header.h_cmd, nodeid);
+ break;
+ }
+
+out:
+ srcu_read_unlock(&nodes_srcu, idx);
+}
+
+static int dlm_midcomms_version_check_3_1(struct midcomms_node *node)
+{
+ switch (node->version) {
+ case DLM_VERSION_NOT_SET:
+ node->version = DLM_VERSION_3_1;
+ log_print("version 0x%08x for node %d detected", DLM_VERSION_3_1,
+ node->nodeid);
+ break;
+ case DLM_VERSION_3_1:
+ break;
+ default:
+ log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
+ DLM_VERSION_3_1, node->nodeid, node->version);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void dlm_midcomms_receive_buffer_3_1(union dlm_packet *p, int nodeid)
+{
+ uint16_t msglen = le16_to_cpu(p->header.h_length);
+ struct midcomms_node *node;
+ int idx;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = dlm_midcomms_recv_node_lookup(nodeid, p, msglen,
+ dlm_midcomms_version_check_3_1);
+ if (!node) {
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+ srcu_read_unlock(&nodes_srcu, idx);
+
+ switch (p->header.h_cmd) {
+ case DLM_RCOM:
+ /* length already checked */
+ break;
+ case DLM_MSG:
+ if (msglen < sizeof(struct dlm_message)) {
+ log_print("msg too small: %u, will skip this message from node %d",
+ msglen, nodeid);
+ return;
+ }
+
+ break;
+ default:
+ log_print("unsupported h_cmd received: %u, will skip this message from node %d",
+ p->header.h_cmd, nodeid);
+ return;
+ }
+
+ dlm_receive_buffer(p, nodeid);
+}
+
/*
* Called from the low-level comms layer to process a buffer of
* commands.
@@ -43,7 +887,7 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
while (len >= sizeof(struct dlm_header)) {
hd = (struct dlm_header *)ptr;
- /* no message should be more than DEFAULT_BUFFER_SIZE or
+ /* no message should be more than DLM_MAX_SOCKET_BUFSIZE or
* less than dlm_header size.
*
* Some messages does not have a 8 byte length boundary yet
@@ -55,7 +899,7 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
* the next major version bump.
*/
msglen = le16_to_cpu(hd->h_length);
- if (msglen > DEFAULT_BUFFER_SIZE ||
+ if (msglen > DLM_MAX_SOCKET_BUFSIZE ||
msglen < sizeof(struct dlm_header)) {
log_print("received invalid length header: %u from node %d, will abort message parsing",
msglen, nodeid);
@@ -68,32 +912,19 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
if (msglen > len)
break;
- switch (hd->h_cmd) {
- case DLM_MSG:
- if (msglen < sizeof(struct dlm_message)) {
- log_print("dlm msg too small: %u, will skip this message",
- msglen);
- goto skip;
- }
-
+ switch (le32_to_cpu(hd->h_version)) {
+ case DLM_VERSION_3_1:
+ dlm_midcomms_receive_buffer_3_1((union dlm_packet *)ptr, nodeid);
break;
- case DLM_RCOM:
- if (msglen < sizeof(struct dlm_rcom)) {
- log_print("dlm rcom msg too small: %u, will skip this message",
- msglen);
- goto skip;
- }
-
+ case DLM_VERSION_3_2:
+ dlm_midcomms_receive_buffer_3_2((union dlm_packet *)ptr, nodeid);
break;
default:
- log_print("unsupported h_cmd received: %u, will skip this message",
- hd->h_cmd);
- goto skip;
+ log_print("received invalid version header: %u from node %d, will skip this message",
+ le32_to_cpu(hd->h_version), nodeid);
+ break;
}
- dlm_receive_buffer((union dlm_packet *)ptr, nodeid);
-
-skip:
ret += msglen;
len -= msglen;
ptr += msglen;
@@ -102,3 +933,455 @@ skip:
return ret;
}
+void dlm_midcomms_unack_msg_resend(int nodeid)
+{
+ struct midcomms_node *node;
+ struct dlm_mhandle *mh;
+ int idx, ret;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = nodeid2node(nodeid, 0);
+ if (!node) {
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+
+ /* old protocol, we don't support to retransmit on failure */
+ switch (node->version) {
+ case DLM_VERSION_3_2:
+ break;
+ default:
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(mh, &node->send_queue, list) {
+ if (!mh->committed)
+ continue;
+
+ ret = dlm_lowcomms_resend_msg(mh->msg);
+ if (!ret)
+ log_print_ratelimited("retransmit dlm msg, seq %u, nodeid %d",
+ mh->seq, node->nodeid);
+ }
+ rcu_read_unlock();
+ srcu_read_unlock(&nodes_srcu, idx);
+}
+
+static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len,
+ uint32_t seq)
+{
+ opts->o_header.h_cmd = DLM_OPTS;
+ opts->o_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ opts->o_header.h_nodeid = dlm_our_nodeid();
+ opts->o_header.h_length = DLM_MIDCOMMS_OPT_LEN + inner_len;
+ opts->o_header.u.h_seq = seq;
+ header_out(&opts->o_header);
+}
+
+static void midcomms_new_msg_cb(struct dlm_mhandle *mh)
+{
+ atomic_inc(&mh->node->send_queue_cnt);
+
+ spin_lock(&mh->node->send_queue_lock);
+ list_add_tail_rcu(&mh->list, &mh->node->send_queue);
+ spin_unlock(&mh->node->send_queue_lock);
+
+ mh->seq = mh->node->seq_send++;
+}
+
+static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid,
+ int len, gfp_t allocation, char **ppc)
+{
+ struct dlm_opts *opts;
+ struct dlm_msg *msg;
+
+ msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN,
+ allocation, ppc, midcomms_new_msg_cb, mh);
+ if (!msg)
+ return NULL;
+
+ opts = (struct dlm_opts *)*ppc;
+ mh->opts = opts;
+
+ /* add possible options here */
+ dlm_fill_opts_header(opts, len, mh->seq);
+
+ *ppc += sizeof(*opts);
+ mh->inner_hd = (const struct dlm_header *)*ppc;
+ return msg;
+}
+
+struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
+ gfp_t allocation, char **ppc)
+{
+ struct midcomms_node *node;
+ struct dlm_mhandle *mh;
+ struct dlm_msg *msg;
+ int idx;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = nodeid2node(nodeid, 0);
+ if (!node) {
+ WARN_ON_ONCE(1);
+ goto err;
+ }
+
+ /* this is a bug, however we going on and hope it will be resolved */
+ WARN_ON(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
+
+ mh = kzalloc(sizeof(*mh), GFP_NOFS);
+ if (!mh)
+ goto err;
+
+ mh->idx = idx;
+ mh->node = node;
+
+ switch (node->version) {
+ case DLM_VERSION_3_1:
+ msg = dlm_lowcomms_new_msg(nodeid, len, allocation, ppc,
+ NULL, NULL);
+ if (!msg) {
+ kfree(mh);
+ goto err;
+ }
+
+ break;
+ case DLM_VERSION_3_2:
+ msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
+ ppc);
+ if (!msg) {
+ kfree(mh);
+ goto err;
+ }
+
+ break;
+ default:
+ kfree(mh);
+ WARN_ON(1);
+ goto err;
+ }
+
+ mh->msg = msg;
+
+ /* keep in mind that is a must to call
+ * dlm_midcomms_commit_msg() which releases
+ * nodes_srcu using mh->idx which is assumed
+ * here that the application will call it.
+ */
+ return mh;
+
+err:
+ srcu_read_unlock(&nodes_srcu, idx);
+ return NULL;
+}
+
+static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh)
+{
+ /* nexthdr chain for fast lookup */
+ mh->opts->o_nextcmd = mh->inner_hd->h_cmd;
+ mh->committed = true;
+ dlm_lowcomms_commit_msg(mh->msg);
+}
+
+void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
+{
+ switch (mh->node->version) {
+ case DLM_VERSION_3_1:
+ srcu_read_unlock(&nodes_srcu, mh->idx);
+
+ dlm_lowcomms_commit_msg(mh->msg);
+ dlm_lowcomms_put_msg(mh->msg);
+ /* mh is not part of rcu list in this case */
+ kfree(mh);
+ break;
+ case DLM_VERSION_3_2:
+ dlm_midcomms_commit_msg_3_2(mh);
+ srcu_read_unlock(&nodes_srcu, mh->idx);
+ break;
+ default:
+ srcu_read_unlock(&nodes_srcu, mh->idx);
+ WARN_ON(1);
+ break;
+ }
+}
+
+int dlm_midcomms_start(void)
+{
+ int i;
+
+ for (i = 0; i < CONN_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&node_hash[i]);
+
+ return dlm_lowcomms_start();
+}
+
+static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
+{
+ spin_lock(&node->state_lock);
+ pr_debug("receive active fin ack from node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+
+ switch (node->state) {
+ case DLM_FIN_WAIT1:
+ node->state = DLM_FIN_WAIT2;
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ break;
+ case DLM_CLOSING:
+ midcomms_node_reset(node);
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ wake_up(&node->shutdown_wait);
+ break;
+ case DLM_CLOSED:
+ /* not valid but somehow we got what we want */
+ wake_up(&node->shutdown_wait);
+ break;
+ default:
+ spin_unlock(&node->state_lock);
+ log_print("%s: unexpected state: %d\n",
+ __func__, node->state);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&node->state_lock);
+}
+
+void dlm_midcomms_add_member(int nodeid)
+{
+ struct midcomms_node *node;
+ int idx;
+
+ if (nodeid == dlm_our_nodeid())
+ return;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = nodeid2node(nodeid, GFP_NOFS);
+ if (!node) {
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+
+ spin_lock(&node->state_lock);
+ if (!node->users) {
+ pr_debug("receive add member from node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ switch (node->state) {
+ case DLM_ESTABLISHED:
+ break;
+ case DLM_CLOSED:
+ node->state = DLM_ESTABLISHED;
+ pr_debug("switch node %d to state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ break;
+ default:
+ /* some invalid state passive shutdown
+ * was failed, we try to reset and
+ * hope it will go on.
+ */
+ log_print("reset node %d because shutdown stuck",
+ node->nodeid);
+
+ midcomms_node_reset(node);
+ node->state = DLM_ESTABLISHED;
+ break;
+ }
+ }
+
+ node->users++;
+ pr_debug("users inc count %d\n", node->users);
+ spin_unlock(&node->state_lock);
+
+ srcu_read_unlock(&nodes_srcu, idx);
+}
+
+void dlm_midcomms_remove_member(int nodeid)
+{
+ struct midcomms_node *node;
+ int idx;
+
+ if (nodeid == dlm_our_nodeid())
+ return;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = nodeid2node(nodeid, 0);
+ if (!node) {
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+
+ spin_lock(&node->state_lock);
+ node->users--;
+ pr_debug("users dec count %d\n", node->users);
+
+ /* hitting users count to zero means the
+ * other side is running dlm_midcomms_stop()
+ * we meet us to have a clean disconnect.
+ */
+ if (node->users == 0) {
+ pr_debug("receive remove member from node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ switch (node->state) {
+ case DLM_ESTABLISHED:
+ break;
+ case DLM_CLOSE_WAIT:
+ /* passive shutdown DLM_LAST_ACK case 2 */
+ node->state = DLM_LAST_ACK;
+ spin_unlock(&node->state_lock);
+
+ pr_debug("switch node %d to state %s case 2\n",
+ node->nodeid, dlm_state_str(node->state));
+ goto send_fin;
+ case DLM_LAST_ACK:
+ /* probably receive fin caught it, do nothing */
+ break;
+ case DLM_CLOSED:
+ /* already gone, do nothing */
+ break;
+ default:
+ log_print("%s: unexpected state: %d\n",
+ __func__, node->state);
+ break;
+ }
+ }
+ spin_unlock(&node->state_lock);
+
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+
+send_fin:
+ set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+ srcu_read_unlock(&nodes_srcu, idx);
+}
+
+static void midcomms_node_release(struct rcu_head *rcu)
+{
+ struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
+
+ WARN_ON(atomic_read(&node->send_queue_cnt));
+ kfree(node);
+}
+
+static void midcomms_shutdown(struct midcomms_node *node)
+{
+ int ret;
+
+ /* old protocol, we don't wait for pending operations */
+ switch (node->version) {
+ case DLM_VERSION_3_2:
+ break;
+ default:
+ return;
+ }
+
+ spin_lock(&node->state_lock);
+ pr_debug("receive active shutdown for node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ switch (node->state) {
+ case DLM_ESTABLISHED:
+ node->state = DLM_FIN_WAIT1;
+ pr_debug("switch node %d to state %s case 2\n",
+ node->nodeid, dlm_state_str(node->state));
+ break;
+ case DLM_CLOSED:
+ /* we have what we want */
+ spin_unlock(&node->state_lock);
+ return;
+ default:
+ /* busy to enter DLM_FIN_WAIT1, wait until passive
+ * done in shutdown_wait to enter DLM_CLOSED.
+ */
+ break;
+ }
+ spin_unlock(&node->state_lock);
+
+ if (node->state == DLM_FIN_WAIT1) {
+ dlm_send_fin(node, dlm_act_fin_ack_rcv);
+
+ if (DLM_DEBUG_FENCE_TERMINATION)
+ msleep(5000);
+ }
+
+ /* wait for other side dlm + fin */
+ ret = wait_event_timeout(node->shutdown_wait,
+ node->state == DLM_CLOSED ||
+ test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
+ DLM_SHUTDOWN_TIMEOUT);
+ if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags)) {
+ pr_debug("active shutdown timed out for node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+ midcomms_node_reset(node);
+ return;
+ }
+
+ pr_debug("active shutdown done for node %d with state %s\n",
+ node->nodeid, dlm_state_str(node->state));
+}
+
+void dlm_midcomms_shutdown(void)
+{
+ struct midcomms_node *node;
+ int i, idx;
+
+ mutex_lock(&close_lock);
+ idx = srcu_read_lock(&nodes_srcu);
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
+ midcomms_shutdown(node);
+
+ dlm_delete_debug_comms_file(node->debugfs);
+
+ spin_lock(&nodes_lock);
+ hlist_del_rcu(&node->hlist);
+ spin_unlock(&nodes_lock);
+
+ call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
+ }
+ }
+ srcu_read_unlock(&nodes_srcu, idx);
+ mutex_unlock(&close_lock);
+
+ dlm_lowcomms_shutdown();
+}
+
+int dlm_midcomms_close(int nodeid)
+{
+ struct midcomms_node *node;
+ int idx, ret;
+
+ if (nodeid == dlm_our_nodeid())
+ return 0;
+
+ idx = srcu_read_lock(&nodes_srcu);
+ /* Abort pending close/remove operation */
+ node = nodeid2node(nodeid, 0);
+ if (node) {
+ /* let shutdown waiters leave */
+ set_bit(DLM_NODE_FLAG_CLOSE, &node->flags);
+ wake_up(&node->shutdown_wait);
+ }
+ srcu_read_unlock(&nodes_srcu, idx);
+
+ synchronize_srcu(&nodes_srcu);
+
+ idx = srcu_read_lock(&nodes_srcu);
+ mutex_lock(&close_lock);
+ node = nodeid2node(nodeid, 0);
+ if (!node) {
+ mutex_unlock(&close_lock);
+ srcu_read_unlock(&nodes_srcu, idx);
+ return dlm_lowcomms_close(nodeid);
+ }
+
+ ret = dlm_lowcomms_close(nodeid);
+ spin_lock(&node->state_lock);
+ midcomms_node_reset(node);
+ spin_unlock(&node->state_lock);
+ srcu_read_unlock(&nodes_srcu, idx);
+ mutex_unlock(&close_lock);
+
+ return ret;
+}
diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h
index 61e90a921849..579abc6929be 100644
--- a/fs/dlm/midcomms.h
+++ b/fs/dlm/midcomms.h
@@ -12,7 +12,22 @@
#ifndef __MIDCOMMS_DOT_H__
#define __MIDCOMMS_DOT_H__
+struct midcomms_node;
+
int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen);
+struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
+ gfp_t allocation, char **ppc);
+void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh);
+int dlm_midcomms_close(int nodeid);
+int dlm_midcomms_start(void);
+void dlm_midcomms_shutdown(void);
+void dlm_midcomms_add_member(int nodeid);
+void dlm_midcomms_remove_member(int nodeid);
+void dlm_midcomms_unack_msg_resend(int nodeid);
+const char *dlm_midcomms_state(struct midcomms_node *node);
+unsigned long dlm_midcomms_flags(struct midcomms_node *node);
+int dlm_midcomms_send_queue_cnt(struct midcomms_node *node);
+uint32_t dlm_midcomms_version(struct midcomms_node *node);
#endif /* __MIDCOMMS_DOT_H__ */
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index f5b1bd65728d..5651933f54a4 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -27,25 +27,15 @@ static int rcom_response(struct dlm_ls *ls)
return test_bit(LSFL_RCOM_READY, &ls->ls_flags);
}
-static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
- struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret)
+static void _create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
+ struct dlm_rcom **rc_ret, char *mb, int mb_len)
{
struct dlm_rcom *rc;
- struct dlm_mhandle *mh;
- char *mb;
- int mb_len = sizeof(struct dlm_rcom) + len;
-
- mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
- if (!mh) {
- log_print("create_rcom to %d type %d len %d ENOBUFS",
- to_nodeid, type, len);
- return -ENOBUFS;
- }
rc = (struct dlm_rcom *) mb;
rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
- rc->rc_header.h_lockspace = ls->ls_global_id;
+ rc->rc_header.u.h_lockspace = ls->ls_global_id;
rc->rc_header.h_nodeid = dlm_our_nodeid();
rc->rc_header.h_length = mb_len;
rc->rc_header.h_cmd = DLM_RCOM;
@@ -56,16 +46,67 @@ static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
rc->rc_seq = ls->ls_recover_seq;
spin_unlock(&ls->ls_recover_lock);
- *mh_ret = mh;
*rc_ret = rc;
+}
+
+static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
+ struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret)
+{
+ int mb_len = sizeof(struct dlm_rcom) + len;
+ struct dlm_mhandle *mh;
+ char *mb;
+
+ mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, GFP_NOFS, &mb);
+ if (!mh) {
+ log_print("%s to %d type %d len %d ENOBUFS",
+ __func__, to_nodeid, type, len);
+ return -ENOBUFS;
+ }
+
+ _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len);
+ *mh_ret = mh;
+ return 0;
+}
+
+static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type,
+ int len, struct dlm_rcom **rc_ret,
+ struct dlm_msg **msg_ret)
+{
+ int mb_len = sizeof(struct dlm_rcom) + len;
+ struct dlm_msg *msg;
+ char *mb;
+
+ msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, GFP_NOFS, &mb,
+ NULL, NULL);
+ if (!msg) {
+ log_print("create_rcom to %d type %d len %d ENOBUFS",
+ to_nodeid, type, len);
+ return -ENOBUFS;
+ }
+
+ _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len);
+ *msg_ret = msg;
return 0;
}
+static void _send_rcom(struct dlm_ls *ls, struct dlm_rcom *rc)
+{
+ dlm_rcom_out(rc);
+}
+
static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh,
struct dlm_rcom *rc)
{
- dlm_rcom_out(rc);
- dlm_lowcomms_commit_buffer(mh);
+ _send_rcom(ls, rc);
+ dlm_midcomms_commit_mhandle(mh);
+}
+
+static void send_rcom_stateless(struct dlm_ls *ls, struct dlm_msg *msg,
+ struct dlm_rcom *rc)
+{
+ _send_rcom(ls, rc);
+ dlm_lowcomms_commit_msg(msg);
+ dlm_lowcomms_put_msg(msg);
}
static void set_rcom_status(struct dlm_ls *ls, struct rcom_status *rs,
@@ -141,7 +182,7 @@ static void disallow_sync_reply(struct dlm_ls *ls)
int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
{
struct dlm_rcom *rc;
- struct dlm_mhandle *mh;
+ struct dlm_msg *msg;
int error = 0;
ls->ls_recover_nodeid = nodeid;
@@ -153,17 +194,17 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
}
retry:
- error = create_rcom(ls, nodeid, DLM_RCOM_STATUS,
- sizeof(struct rcom_status), &rc, &mh);
+ error = create_rcom_stateless(ls, nodeid, DLM_RCOM_STATUS,
+ sizeof(struct rcom_status), &rc, &msg);
if (error)
goto out;
set_rcom_status(ls, (struct rcom_status *)rc->rc_buf, status_flags);
allow_sync_reply(ls, &rc->rc_id);
- memset(ls->ls_recover_buf, 0, LOWCOMMS_MAX_TX_BUFFER_LEN);
+ memset(ls->ls_recover_buf, 0, DLM_MAX_SOCKET_BUFSIZE);
- send_rcom(ls, mh, rc);
+ send_rcom_stateless(ls, msg, rc);
error = dlm_wait_function(ls, &rcom_response);
disallow_sync_reply(ls);
@@ -191,11 +232,11 @@ retry:
static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
- struct dlm_mhandle *mh;
struct rcom_status *rs;
uint32_t status;
int nodeid = rc_in->rc_header.h_nodeid;
int len = sizeof(struct rcom_config);
+ struct dlm_msg *msg;
int num_slots = 0;
int error;
@@ -218,8 +259,8 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
len += num_slots * sizeof(struct rcom_slot);
do_create:
- error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY,
- len, &rc, &mh);
+ error = create_rcom_stateless(ls, nodeid, DLM_RCOM_STATUS_REPLY,
+ len, &rc, &msg);
if (error)
return;
@@ -246,7 +287,7 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
spin_unlock(&ls->ls_recover_lock);
do_send:
- send_rcom(ls, mh, rc);
+ send_rcom_stateless(ls, msg, rc);
}
static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
@@ -271,21 +312,22 @@ static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
{
struct dlm_rcom *rc;
- struct dlm_mhandle *mh;
+ struct dlm_msg *msg;
int error = 0;
ls->ls_recover_nodeid = nodeid;
retry:
- error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh);
+ error = create_rcom_stateless(ls, nodeid, DLM_RCOM_NAMES, last_len,
+ &rc, &msg);
if (error)
goto out;
memcpy(rc->rc_buf, last_name, last_len);
allow_sync_reply(ls, &rc->rc_id);
- memset(ls->ls_recover_buf, 0, LOWCOMMS_MAX_TX_BUFFER_LEN);
+ memset(ls->ls_recover_buf, 0, DLM_MAX_SOCKET_BUFSIZE);
- send_rcom(ls, mh, rc);
+ send_rcom_stateless(ls, msg, rc);
error = dlm_wait_function(ls, &rcom_response);
disallow_sync_reply(ls);
@@ -298,14 +340,15 @@ retry:
static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
- struct dlm_mhandle *mh;
int error, inlen, outlen, nodeid;
+ struct dlm_msg *msg;
nodeid = rc_in->rc_header.h_nodeid;
inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
- outlen = LOWCOMMS_MAX_TX_BUFFER_LEN - sizeof(struct dlm_rcom);
+ outlen = DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom);
- error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh);
+ error = create_rcom_stateless(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen,
+ &rc, &msg);
if (error)
return;
rc->rc_id = rc_in->rc_id;
@@ -313,7 +356,7 @@ static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen,
nodeid);
- send_rcom(ls, mh, rc);
+ send_rcom_stateless(ls, msg, rc);
}
int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
@@ -342,10 +385,6 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid;
int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
- error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh);
- if (error)
- return;
-
/* Old code would send this special id to trigger a debug dump. */
if (rc_in->rc_id == 0xFFFFFFFF) {
log_error(ls, "receive_rcom_lookup dump from %d", nodeid);
@@ -353,6 +392,10 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
return;
}
+ error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh);
+ if (error)
+ return;
+
error = dlm_master_lookup(ls, nodeid, rc_in->rc_buf, len,
DLM_LU_RECOVER_MASTER, &ret_nodeid, NULL);
if (error)
@@ -458,14 +501,14 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
char *mb;
int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
- mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_NOFS, &mb);
+ mh = dlm_midcomms_get_mhandle(nodeid, mb_len, GFP_NOFS, &mb);
if (!mh)
return -ENOBUFS;
rc = (struct dlm_rcom *) mb;
rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
- rc->rc_header.h_lockspace = rc_in->rc_header.h_lockspace;
+ rc->rc_header.u.h_lockspace = rc_in->rc_header.u.h_lockspace;
rc->rc_header.h_nodeid = dlm_our_nodeid();
rc->rc_header.h_length = mb_len;
rc->rc_header.h_cmd = DLM_RCOM;
@@ -479,7 +522,7 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
rf->rf_lvblen = cpu_to_le32(~0U);
dlm_rcom_out(rc);
- dlm_lowcomms_commit_buffer(mh);
+ dlm_midcomms_commit_mhandle(mh);
return 0;
}
diff --git a/fs/dlm/util.c b/fs/dlm/util.c
index cfd0d00b19ae..58acbcc2081a 100644
--- a/fs/dlm/util.c
+++ b/fs/dlm/util.c
@@ -20,18 +20,20 @@
#define DLM_ERRNO_ETIMEDOUT 110
#define DLM_ERRNO_EINPROGRESS 115
-static void header_out(struct dlm_header *hd)
+void header_out(struct dlm_header *hd)
{
hd->h_version = cpu_to_le32(hd->h_version);
- hd->h_lockspace = cpu_to_le32(hd->h_lockspace);
+ /* does it for others u32 in union as well */
+ hd->u.h_lockspace = cpu_to_le32(hd->u.h_lockspace);
hd->h_nodeid = cpu_to_le32(hd->h_nodeid);
hd->h_length = cpu_to_le16(hd->h_length);
}
-static void header_in(struct dlm_header *hd)
+void header_in(struct dlm_header *hd)
{
hd->h_version = le32_to_cpu(hd->h_version);
- hd->h_lockspace = le32_to_cpu(hd->h_lockspace);
+ /* does it for others u32 in union as well */
+ hd->u.h_lockspace = le32_to_cpu(hd->u.h_lockspace);
hd->h_nodeid = le32_to_cpu(hd->h_nodeid);
hd->h_length = le16_to_cpu(hd->h_length);
}
diff --git a/fs/dlm/util.h b/fs/dlm/util.h
index cc719ca9397e..d46f23c7a6a0 100644
--- a/fs/dlm/util.h
+++ b/fs/dlm/util.h
@@ -15,6 +15,8 @@ void dlm_message_out(struct dlm_message *ms);
void dlm_message_in(struct dlm_message *ms);
void dlm_rcom_out(struct dlm_rcom *rc);
void dlm_rcom_in(struct dlm_rcom *rc);
+void header_out(struct dlm_header *hd);
+void header_in(struct dlm_header *hd);
#endif
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 392e721b50a3..7d85e64ea62f 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -533,7 +533,20 @@ static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
return block;
}
+#include <linux/buffer_head.h>
+
const struct address_space_operations ecryptfs_aops = {
+ /*
+ * XXX: This is pretty broken for multiple reasons: ecryptfs does not
+ * actually use buffer_heads, and ecryptfs will crash without
+ * CONFIG_BLOCK. But it matches the behavior before the default for
+ * address_space_operations without the ->set_page_dirty method was
+ * cleaned up, so this is the best we can do without maintainer
+ * feedback.
+ */
+#ifdef CONFIG_BLOCK
+ .set_page_dirty = __set_page_dirty_buffers,
+#endif
.writepage = ecryptfs_writepage,
.readpage = ecryptfs_readpage,
.write_begin = ecryptfs_write_begin,
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index 858b3339f381..906af0c1998c 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -75,4 +75,3 @@ config EROFS_FS_ZIP
Enable fixed-sized output compression for EROFS.
If you don't want to enable compression feature, say N.
-
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index aea129ddda74..3701c72bacb2 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2019 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#ifndef __EROFS_FS_COMPRESS_H
#define __EROFS_FS_COMPRESS_H
@@ -85,4 +84,3 @@ int z_erofs_decompress(struct z_erofs_decompress_req *rq,
struct list_head *pagepool);
#endif
-
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index ebac756cb2a3..3787a5fb0a42 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include "internal.h"
#include <linux/prefetch.h>
@@ -315,4 +314,3 @@ const struct address_space_operations erofs_raw_access_aops = {
.readahead = erofs_raw_access_readahead,
.bmap = erofs_bmap,
};
-
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 88e33addf229..a5bc4b1b7813 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2019 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include "compress.h"
#include <linux/module.h>
@@ -407,4 +406,3 @@ int z_erofs_decompress(struct z_erofs_decompress_req *rq,
return z_erofs_shifted_transform(rq, pagepool);
return z_erofs_decompress_generic(rq, pagepool);
}
-
diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c
index 2776bb832127..eee9b0b31b63 100644
--- a/fs/erofs/dir.c
+++ b/fs/erofs/dir.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include "internal.h"
@@ -139,4 +138,3 @@ const struct file_operations erofs_dir_fops = {
.read = generic_read_dir,
.iterate_shared = erofs_readdir,
};
-
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index 8739d3adf51f..0f8da74570b4 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -4,7 +4,6 @@
*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#ifndef __EROFS_FS_H
#define __EROFS_FS_H
@@ -348,4 +347,3 @@ static inline void erofs_check_ondisk_layout_definitions(void)
}
#endif
-
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 7ed2d7391692..aa8a0d770ba3 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include "xattr.h"
@@ -374,4 +373,3 @@ const struct inode_operations erofs_fast_symlink_iops = {
.listxattr = erofs_listxattr,
.get_acl = erofs_get_acl,
};
-
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index f92e3e32b9f4..543c2ff97d30 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#ifndef __EROFS_INTERNAL_H
#define __EROFS_INTERNAL_H
@@ -469,4 +468,3 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb,
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
#endif /* __EROFS_INTERNAL_H */
-
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
index 3a81e1f7fc06..a8271ce5e13f 100644
--- a/fs/erofs/namei.c
+++ b/fs/erofs/namei.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include "xattr.h"
@@ -247,4 +246,3 @@ const struct inode_operations erofs_dir_iops = {
.listxattr = erofs_listxattr,
.get_acl = erofs_get_acl,
};
-
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index bbf3bbd908e0..8fc6c04b54f4 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include <linux/module.h>
#include <linux/buffer_head.h>
@@ -285,6 +284,7 @@ static int erofs_read_superblock(struct super_block *sb)
goto out;
}
+ ret = -EINVAL;
blkszbits = dsb->blkszbits;
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (blkszbits != LOG_BLOCK_SIZE) {
@@ -751,4 +751,3 @@ module_exit(erofs_module_exit);
MODULE_DESCRIPTION("Enhanced ROM File System");
MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
MODULE_LICENSE("GPL");
-
diff --git a/fs/erofs/tagptr.h b/fs/erofs/tagptr.h
index a72897c86744..64ceb7270b5c 100644
--- a/fs/erofs/tagptr.h
+++ b/fs/erofs/tagptr.h
@@ -1,8 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* A tagged pointer implementation
- *
- * Copyright (C) 2018 Gao Xiang <gaoxiang25@huawei.com>
*/
#ifndef __EROFS_FS_TAGPTR_H
#define __EROFS_FS_TAGPTR_H
@@ -107,4 +105,3 @@ tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); })
*ptptr; })
#endif /* __EROFS_FS_TAGPTR_H */
-
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 6758c5b19f7c..bd86067a63f7 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include "internal.h"
#include <linux/pagevec.h>
@@ -278,4 +277,3 @@ void erofs_exit_shrinker(void)
unregister_shrinker(&erofs_shrinker_info);
}
#endif /* !CONFIG_EROFS_FS_ZIP */
-
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index 47314a26767a..8dd54b420a1d 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include <linux/security.h>
#include "xattr.h"
@@ -709,4 +708,3 @@ struct posix_acl *erofs_get_acl(struct inode *inode, int type)
return acl;
}
#endif
-
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index 815304bd335f..366dcb400525 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#ifndef __EROFS_XATTR_H
#define __EROFS_XATTR_H
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 78e4b598ecca..cb4d0889eca9 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include "zdata.h"
#include "compress.h"
@@ -380,7 +379,6 @@ static int z_erofs_attach_page(struct z_erofs_collector *clt,
enum z_erofs_page_type type)
{
int ret;
- bool occupied;
/* give priority for inplaceio */
if (clt->mode >= COLLECT_PRIMARY &&
@@ -388,8 +386,7 @@ static int z_erofs_attach_page(struct z_erofs_collector *clt,
z_erofs_try_inplace_io(clt, page))
return 0;
- ret = z_erofs_pagevec_enqueue(&clt->vector,
- page, type, &occupied);
+ ret = z_erofs_pagevec_enqueue(&clt->vector, page, type);
clt->cl->vcnt += (unsigned int)ret;
return ret ? 0 : -EAGAIN;
@@ -1471,4 +1468,3 @@ const struct address_space_operations z_erofs_aops = {
.readpage = z_erofs_readpage,
.readahead = z_erofs_readahead,
};
-
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index 942ee69dff6a..3a008f1b9f78 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#ifndef __EROFS_FS_ZDATA_H
#define __EROFS_FS_ZDATA_H
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index efaf32596b97..f68aea4baed7 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2018-2019 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#include "internal.h"
#include <asm/unaligned.h>
@@ -597,4 +596,3 @@ out:
DBG_BUGON(err < 0 && err != -ENOMEM);
return err;
}
-
diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h
index 1d67cbd38704..dfd7fe0503bb 100644
--- a/fs/erofs/zpvec.h
+++ b/fs/erofs/zpvec.h
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2018 HUAWEI, Inc.
* https://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
*/
#ifndef __EROFS_FS_ZPVEC_H
#define __EROFS_FS_ZPVEC_H
@@ -107,10 +106,8 @@ static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
struct page *page,
- enum z_erofs_page_type type,
- bool *occupied)
+ enum z_erofs_page_type type)
{
- *occupied = false;
if (!ctor->next && type)
if (ctor->index + 1 == ctor->nr)
return false;
@@ -125,7 +122,6 @@ static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
/* should remind that collector->next never equal to 1, 2 */
if (type == (uintptr_t)ctor->next) {
ctor->next = page;
- *occupied = true;
}
ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
return true;
@@ -154,4 +150,3 @@ z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
return tagptr_unfold_ptr(t);
}
#endif
-
diff --git a/fs/exec.c b/fs/exec.c
index 18594f11c31f..f2bcdbeb3afb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1360,6 +1360,10 @@ int begin_new_exec(struct linux_binprm * bprm)
WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
flush_signal_handlers(me, 0);
+ retval = set_cred_ucounts(bprm->cred);
+ if (retval < 0)
+ goto out_unlock;
+
/*
* install the new credentials for this executable
*/
@@ -1874,7 +1878,7 @@ static int do_execveat_common(int fd, struct filename *filename,
* whether NPROC limit is still exceeded.
*/
if ((current->flags & PF_NPROC_EXCEEDED) &&
- atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
+ is_ucounts_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
retval = -EAGAIN;
goto out_ret;
}
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 1803ef3220fd..ca37d4344361 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -491,6 +491,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
}
static const struct address_space_operations exfat_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = exfat_readpage,
.readahead = exfat_readahead,
.writepage = exfat_writepage,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 68178b2234bd..dadb121beb22 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -961,6 +961,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
}
const struct address_space_operations ext2_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = ext2_readpage,
.readahead = ext2_readahead,
.writepage = ext2_writepage,
@@ -975,6 +976,7 @@ const struct address_space_operations ext2_aops = {
};
const struct address_space_operations ext2_nobh_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = ext2_readpage,
.readahead = ext2_readahead,
.writepage = ext2_nobh_writepage,
@@ -990,7 +992,7 @@ const struct address_space_operations ext2_nobh_aops = {
static const struct address_space_operations ext2_dax_aops = {
.writepages = ext2_dax_writepages,
.direct_IO = noop_direct_IO,
- .set_page_dirty = noop_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_no_writeback,
.invalidatepage = noop_invalidatepage,
};
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 77c84d6f1af6..cbf37b2cf871 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_mark_unwritten(ex2);
err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
- if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+ if (err != -ENOSPC && err != -EDQUOT)
+ goto out;
+
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) {
err = ext4_ext_zeroout(inode, ex2);
@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_pblock(&orig_ex));
}
- if (err)
- goto fix_extent_len;
- /* update the extent length and mark as initialized */
- ex->ee_len = cpu_to_le16(ee_len);
- ext4_ext_try_to_merge(handle, inode, path, ex);
- err = ext4_ext_dirty(handle, inode, path + path->p_depth);
- if (err)
- goto fix_extent_len;
-
- /* update extent status tree */
- err = ext4_zeroout_es(inode, &zero_ex);
-
- goto out;
- } else if (err)
- goto fix_extent_len;
-
-out:
- ext4_ext_show_leaf(inode, path);
- return err;
+ if (!err) {
+ /* update the extent length and mark as initialized */
+ ex->ee_len = cpu_to_le16(ee_len);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+ if (!err)
+ /* update extent status tree */
+ err = ext4_zeroout_es(inode, &zero_ex);
+ /* If we failed at this point, we don't know in which
+ * state the extent tree exactly is so don't try to fix
+ * length of the original extent as it may do even more
+ * damage.
+ */
+ goto out;
+ }
+ }
fix_extent_len:
ex->ee_len = orig_ex.ee_len;
@@ -3260,6 +3260,9 @@ fix_extent_len:
*/
ext4_ext_dirty(handle, inode, path + path->p_depth);
return err;
+out:
+ ext4_ext_show_leaf(inode, path);
+ return err;
}
/*
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index f98ca4f37ef6..e8195229c252 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -1288,28 +1288,29 @@ struct dentry_info_args {
};
static inline void tl_to_darg(struct dentry_info_args *darg,
- struct ext4_fc_tl *tl)
+ struct ext4_fc_tl *tl, u8 *val)
{
- struct ext4_fc_dentry_info *fcd;
+ struct ext4_fc_dentry_info fcd;
- fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
+ memcpy(&fcd, val, sizeof(fcd));
- darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
- darg->ino = le32_to_cpu(fcd->fc_ino);
- darg->dname = fcd->fc_dname;
- darg->dname_len = ext4_fc_tag_len(tl) -
- sizeof(struct ext4_fc_dentry_info);
+ darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
+ darg->ino = le32_to_cpu(fcd.fc_ino);
+ darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
+ darg->dname_len = le16_to_cpu(tl->fc_len) -
+ sizeof(struct ext4_fc_dentry_info);
}
/* Unlink replay function */
-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
struct inode *inode, *old_parent;
struct qstr entry;
struct dentry_info_args darg;
int ret = 0;
- tl_to_darg(&darg, tl);
+ tl_to_darg(&darg, tl, val);
trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
darg.parent_ino, darg.dname_len);
@@ -1399,13 +1400,14 @@ out:
}
/* Link replay function */
-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
struct inode *inode;
struct dentry_info_args darg;
int ret = 0;
- tl_to_darg(&darg, tl);
+ tl_to_darg(&darg, tl, val);
trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
darg.parent_ino, darg.dname_len);
@@ -1450,9 +1452,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
/*
* Inode replay function
*/
-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
- struct ext4_fc_inode *fc_inode;
+ struct ext4_fc_inode fc_inode;
struct ext4_inode *raw_inode;
struct ext4_inode *raw_fc_inode;
struct inode *inode = NULL;
@@ -1460,9 +1463,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
struct ext4_extent_header *eh;
- fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
+ memcpy(&fc_inode, val, sizeof(fc_inode));
- ino = le32_to_cpu(fc_inode->fc_ino);
+ ino = le32_to_cpu(fc_inode.fc_ino);
trace_ext4_fc_replay(sb, tag, ino, 0, 0);
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
@@ -1474,12 +1477,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
ext4_fc_record_modified_inode(sb, ino);
- raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
+ raw_fc_inode = (struct ext4_inode *)
+ (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
if (ret)
goto out;
- inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
+ inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
raw_inode = ext4_raw_inode(&iloc);
memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
@@ -1547,14 +1551,15 @@ out:
* inode for which we are trying to create a dentry here, should already have
* been replayed before we start here.
*/
-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
int ret = 0;
struct inode *inode = NULL;
struct inode *dir = NULL;
struct dentry_info_args darg;
- tl_to_darg(&darg, tl);
+ tl_to_darg(&darg, tl, val);
trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
darg.parent_ino, darg.dname_len);
@@ -1633,9 +1638,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
/* Replay add range tag */
static int ext4_fc_replay_add_range(struct super_block *sb,
- struct ext4_fc_tl *tl)
+ struct ext4_fc_tl *tl, u8 *val)
{
- struct ext4_fc_add_range *fc_add_ex;
+ struct ext4_fc_add_range fc_add_ex;
struct ext4_extent newex, *ex;
struct inode *inode;
ext4_lblk_t start, cur;
@@ -1645,15 +1650,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
struct ext4_ext_path *path = NULL;
int ret;
- fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
- ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
+ memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
+ ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
- le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
+ le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex));
- inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
- EXT4_IGET_NORMAL);
+ inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
jbd_debug(1, "Inode not found.");
return 0;
@@ -1762,32 +1766,33 @@ next:
/* Replay DEL_RANGE tag */
static int
-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
+ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
struct inode *inode;
- struct ext4_fc_del_range *lrange;
+ struct ext4_fc_del_range lrange;
struct ext4_map_blocks map;
ext4_lblk_t cur, remaining;
int ret;
- lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
- cur = le32_to_cpu(lrange->fc_lblk);
- remaining = le32_to_cpu(lrange->fc_len);
+ memcpy(&lrange, val, sizeof(lrange));
+ cur = le32_to_cpu(lrange.fc_lblk);
+ remaining = le32_to_cpu(lrange.fc_len);
trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
- le32_to_cpu(lrange->fc_ino), cur, remaining);
+ le32_to_cpu(lrange.fc_ino), cur, remaining);
- inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
+ inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
+ jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
return 0;
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
- inode->i_ino, le32_to_cpu(lrange->fc_lblk),
- le32_to_cpu(lrange->fc_len));
+ inode->i_ino, le32_to_cpu(lrange.fc_lblk),
+ le32_to_cpu(lrange.fc_len));
while (remaining > 0) {
map.m_lblk = cur;
map.m_len = remaining;
@@ -1808,8 +1813,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
}
ret = ext4_punch_hole(inode,
- le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
- le32_to_cpu(lrange->fc_len) << sb->s_blocksize_bits);
+ le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
+ le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits);
if (ret)
jbd_debug(1, "ext4_punch_hole returned %d", ret);
ext4_ext_replay_shrink_inode(inode,
@@ -1925,11 +1930,11 @@ static int ext4_fc_replay_scan(journal_t *journal,
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_fc_replay_state *state;
int ret = JBD2_FC_REPLAY_CONTINUE;
- struct ext4_fc_add_range *ext;
- struct ext4_fc_tl *tl;
- struct ext4_fc_tail *tail;
- __u8 *start, *end;
- struct ext4_fc_head *head;
+ struct ext4_fc_add_range ext;
+ struct ext4_fc_tl tl;
+ struct ext4_fc_tail tail;
+ __u8 *start, *end, *cur, *val;
+ struct ext4_fc_head head;
struct ext4_extent *ex;
state = &sbi->s_fc_replay_state;
@@ -1956,15 +1961,17 @@ static int ext4_fc_replay_scan(journal_t *journal,
}
state->fc_replay_expected_off++;
- fc_for_each_tl(start, end, tl) {
+ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+ memcpy(&tl, cur, sizeof(tl));
+ val = cur + sizeof(tl);
jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
- tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
- switch (le16_to_cpu(tl->fc_tag)) {
+ tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
+ switch (le16_to_cpu(tl.fc_tag)) {
case EXT4_FC_TAG_ADD_RANGE:
- ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
- ex = (struct ext4_extent *)&ext->fc_ex;
+ memcpy(&ext, val, sizeof(ext));
+ ex = (struct ext4_extent *)&ext.fc_ex;
ret = ext4_fc_record_regions(sb,
- le32_to_cpu(ext->fc_ino),
+ le32_to_cpu(ext.fc_ino),
le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
ext4_ext_get_actual_len(ex));
if (ret < 0)
@@ -1978,18 +1985,18 @@ static int ext4_fc_replay_scan(journal_t *journal,
case EXT4_FC_TAG_INODE:
case EXT4_FC_TAG_PAD:
state->fc_cur_tag++;
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
- sizeof(*tl) + ext4_fc_tag_len(tl));
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ sizeof(tl) + le16_to_cpu(tl.fc_len));
break;
case EXT4_FC_TAG_TAIL:
state->fc_cur_tag++;
- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
- sizeof(*tl) +
+ memcpy(&tail, val, sizeof(tail));
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ sizeof(tl) +
offsetof(struct ext4_fc_tail,
fc_crc));
- if (le32_to_cpu(tail->fc_tid) == expected_tid &&
- le32_to_cpu(tail->fc_crc) == state->fc_crc) {
+ if (le32_to_cpu(tail.fc_tid) == expected_tid &&
+ le32_to_cpu(tail.fc_crc) == state->fc_crc) {
state->fc_replay_num_tags = state->fc_cur_tag;
state->fc_regions_valid =
state->fc_regions_used;
@@ -2000,19 +2007,19 @@ static int ext4_fc_replay_scan(journal_t *journal,
state->fc_crc = 0;
break;
case EXT4_FC_TAG_HEAD:
- head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
- if (le32_to_cpu(head->fc_features) &
+ memcpy(&head, val, sizeof(head));
+ if (le32_to_cpu(head.fc_features) &
~EXT4_FC_SUPPORTED_FEATURES) {
ret = -EOPNOTSUPP;
break;
}
- if (le32_to_cpu(head->fc_tid) != expected_tid) {
+ if (le32_to_cpu(head.fc_tid) != expected_tid) {
ret = JBD2_FC_REPLAY_STOP;
break;
}
state->fc_cur_tag++;
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
- sizeof(*tl) + ext4_fc_tag_len(tl));
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ sizeof(tl) + le16_to_cpu(tl.fc_len));
break;
default:
ret = state->fc_replay_num_tags ?
@@ -2036,11 +2043,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_fc_tl *tl;
- __u8 *start, *end;
+ struct ext4_fc_tl tl;
+ __u8 *start, *end, *cur, *val;
int ret = JBD2_FC_REPLAY_CONTINUE;
struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
- struct ext4_fc_tail *tail;
+ struct ext4_fc_tail tail;
if (pass == PASS_SCAN) {
state->fc_current_pass = PASS_SCAN;
@@ -2067,49 +2074,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
start = (u8 *)bh->b_data;
end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
- fc_for_each_tl(start, end, tl) {
+ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+ memcpy(&tl, cur, sizeof(tl));
+ val = cur + sizeof(tl);
+
if (state->fc_replay_num_tags == 0) {
ret = JBD2_FC_REPLAY_STOP;
ext4_fc_set_bitmaps_and_counters(sb);
break;
}
jbd_debug(3, "Replay phase, tag:%s\n",
- tag2str(le16_to_cpu(tl->fc_tag)));
+ tag2str(le16_to_cpu(tl.fc_tag)));
state->fc_replay_num_tags--;
- switch (le16_to_cpu(tl->fc_tag)) {
+ switch (le16_to_cpu(tl.fc_tag)) {
case EXT4_FC_TAG_LINK:
- ret = ext4_fc_replay_link(sb, tl);
+ ret = ext4_fc_replay_link(sb, &tl, val);
break;
case EXT4_FC_TAG_UNLINK:
- ret = ext4_fc_replay_unlink(sb, tl);
+ ret = ext4_fc_replay_unlink(sb, &tl, val);
break;
case EXT4_FC_TAG_ADD_RANGE:
- ret = ext4_fc_replay_add_range(sb, tl);
+ ret = ext4_fc_replay_add_range(sb, &tl, val);
break;
case EXT4_FC_TAG_CREAT:
- ret = ext4_fc_replay_create(sb, tl);
+ ret = ext4_fc_replay_create(sb, &tl, val);
break;
case EXT4_FC_TAG_DEL_RANGE:
- ret = ext4_fc_replay_del_range(sb, tl);
+ ret = ext4_fc_replay_del_range(sb, &tl, val);
break;
case EXT4_FC_TAG_INODE:
- ret = ext4_fc_replay_inode(sb, tl);
+ ret = ext4_fc_replay_inode(sb, &tl, val);
break;
case EXT4_FC_TAG_PAD:
trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
- ext4_fc_tag_len(tl), 0);
+ le16_to_cpu(tl.fc_len), 0);
break;
case EXT4_FC_TAG_TAIL:
trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
- ext4_fc_tag_len(tl), 0);
- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
- WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
+ le16_to_cpu(tl.fc_len), 0);
+ memcpy(&tail, val, sizeof(tail));
+ WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
break;
case EXT4_FC_TAG_HEAD:
break;
default:
- trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
- ext4_fc_tag_len(tl), 0);
+ trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
+ le16_to_cpu(tl.fc_len), 0);
ret = -ECANCELED;
break;
}
diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
index b77f70f55a62..937c381b4c85 100644
--- a/fs/ext4/fast_commit.h
+++ b/fs/ext4/fast_commit.h
@@ -153,13 +153,6 @@ struct ext4_fc_replay_state {
#define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
#endif
-#define fc_for_each_tl(__start, __end, __tl) \
- for (tl = (struct ext4_fc_tl *)(__start); \
- (__u8 *)tl < (__u8 *)(__end); \
- tl = (struct ext4_fc_tl *)((__u8 *)tl + \
- sizeof(struct ext4_fc_tl) + \
- + le16_to_cpu(tl->fc_len)))
-
static inline const char *tag2str(__u16 tag)
{
switch (tag) {
@@ -186,16 +179,4 @@ static inline const char *tag2str(__u16 tag)
}
}
-/* Get length of a particular tlv */
-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
-{
- return le16_to_cpu(tl->fc_len);
-}
-
-/* Get a pointer to "value" of a tlv */
-static inline __u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
-{
- return (__u8 *)tl + sizeof(*tl);
-}
-
#endif /* __FAST_COMMIT_H__ */
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 81a17a3cd80e..9bab7fd4ccd5 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (is_directory) {
count = ext4_used_dirs_count(sb, gdp) - 1;
ext4_used_dirs_set(sb, gdp, count);
- percpu_counter_dec(&sbi->s_dirs_counter);
+ if (percpu_counter_initialized(&sbi->s_dirs_counter))
+ percpu_counter_dec(&sbi->s_dirs_counter);
}
ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
EXT4_INODES_PER_GROUP(sb) / 8);
ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
- percpu_counter_inc(&sbi->s_freeinodes_counter);
+ if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
+ percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) {
struct flex_groups *fg;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index fe6045a46599..b8170a008590 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3701,7 +3701,7 @@ static const struct address_space_operations ext4_da_aops = {
static const struct address_space_operations ext4_dax_aops = {
.writepages = ext4_dax_writepages,
.direct_IO = noop_direct_IO,
- .set_page_dirty = noop_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_no_writeback,
.bmap = ext4_bmap,
.invalidatepage = noop_invalidatepage,
.swap_activate = ext4_iomap_swap_activate,
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 3239e6669e84..c2c22c2baac0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3217,7 +3217,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
*/
if (sbi->s_es->s_log_groups_per_flex >= 32) {
ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
- goto err_freesgi;
+ goto err_freebuddy;
}
sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index afb9d05a99ba..a4af26d4459a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1376,7 +1376,8 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
struct dx_hash_info *hinfo = &name->hinfo;
int len;
- if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding) {
+ if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding ||
+ (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))) {
cf_name->name = NULL;
return 0;
}
@@ -1427,7 +1428,8 @@ static bool ext4_match(struct inode *parent,
#endif
#ifdef CONFIG_UNICODE
- if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent)) {
+ if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
+ (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
if (fname->cf_name.name) {
struct qstr cf = {.name = fname->cf_name.name,
.len = fname->cf_name.len};
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7dc94f3e18e6..d29f6aa7d96e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4462,14 +4462,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (sb->s_blocksize != blocksize) {
+ /*
+ * bh must be released before kill_bdev(), otherwise
+ * it won't be freed and its page also. kill_bdev()
+ * is called by sb_set_blocksize().
+ */
+ brelse(bh);
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
ext4_msg(sb, KERN_ERR, "bad block size %d",
blocksize);
+ bh = NULL;
goto failed_mount;
}
- brelse(bh);
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
@@ -5202,8 +5208,9 @@ failed_mount:
kfree(get_qf_name(sb, sbi, i));
#endif
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
- ext4_blkdev_remove(sbi);
+ /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
brelse(bh);
+ ext4_blkdev_remove(sbi);
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 6f825dedc3d4..55fcab60a59a 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -315,7 +315,9 @@ EXT4_ATTR_FEATURE(verity);
#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
EXT4_ATTR_FEATURE(fast_commit);
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
EXT4_ATTR_FEATURE(encrypted_casefold);
+#endif
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
@@ -333,7 +335,9 @@ static struct attribute *ext4_feat_attrs[] = {
#endif
ATTR_LIST(metadata_csum_seed),
ATTR_LIST(fast_commit),
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
ATTR_LIST(encrypted_casefold),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(ext4_feat);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index bab9b202b496..de0c9b013a85 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -342,6 +342,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
}
static const struct address_space_operations fat_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = fat_readpage,
.readahead = fat_readahead,
.writepage = fat_writepage,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e91980f49388..8c7e9e51a398 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -131,25 +131,6 @@ static bool inode_io_list_move_locked(struct inode *inode,
return false;
}
-/**
- * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
- * @inode: inode to be removed
- * @wb: bdi_writeback @inode is being removed from
- *
- * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
- * clear %WB_has_dirty_io if all are empty afterwards.
- */
-static void inode_io_list_del_locked(struct inode *inode,
- struct bdi_writeback *wb)
-{
- assert_spin_locked(&wb->list_lock);
- assert_spin_locked(&inode->i_lock);
-
- inode->i_state &= ~I_SYNC_QUEUED;
- list_del_init(&inode->i_io_list);
- wb_io_lists_depopulated(wb);
-}
-
static void wb_wakeup(struct bdi_writeback *wb)
{
spin_lock_bh(&wb->work_lock);
@@ -244,6 +225,13 @@ void wb_wait_for_completion(struct wb_completion *done)
/* one round can affect upto 5 slots */
#define WB_FRN_MAX_IN_FLIGHT 1024 /* don't queue too many concurrently */
+/*
+ * Maximum inodes per isw. A specific value has been chosen to make
+ * struct inode_switch_wbs_context fit into 1024 bytes kmalloc.
+ */
+#define WB_MAX_INODES_PER_ISW ((1024UL - sizeof(struct inode_switch_wbs_context)) \
+ / sizeof(struct inode *))
+
static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
static struct workqueue_struct *isw_wq;
@@ -279,6 +267,28 @@ void __inode_attach_wb(struct inode *inode, struct page *page)
EXPORT_SYMBOL_GPL(__inode_attach_wb);
/**
+ * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
+ * @inode: inode of interest with i_lock held
+ * @wb: target bdi_writeback
+ *
+ * Remove the inode from wb's io lists and if necessarily put onto b_attached
+ * list. Only inodes attached to cgwb's are kept on this list.
+ */
+static void inode_cgwb_move_to_attached(struct inode *inode,
+ struct bdi_writeback *wb)
+{
+ assert_spin_locked(&wb->list_lock);
+ assert_spin_locked(&inode->i_lock);
+
+ inode->i_state &= ~I_SYNC_QUEUED;
+ if (wb != &wb->bdi->wb)
+ list_move(&inode->i_io_list, &wb->b_attached);
+ else
+ list_del_init(&inode->i_io_list);
+ wb_io_lists_depopulated(wb);
+}
+
+/**
* locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
* @inode: inode of interest with i_lock held
*
@@ -332,11 +342,18 @@ static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
}
struct inode_switch_wbs_context {
- struct inode *inode;
- struct bdi_writeback *new_wb;
+ struct rcu_work work;
- struct rcu_head rcu_head;
- struct work_struct work;
+ /*
+ * Multiple inodes can be switched at once. The switching procedure
+ * consists of two parts, separated by a RCU grace period. To make
+ * sure that the second part is executed for each inode gone through
+ * the first part, all inode pointers are placed into a NULL-terminated
+ * array embedded into struct inode_switch_wbs_context. Otherwise
+ * an inode could be left in a non-consistent state.
+ */
+ struct bdi_writeback *new_wb;
+ struct inode *inodes[];
};
static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
@@ -349,50 +366,23 @@ static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
up_write(&bdi->wb_switch_rwsem);
}
-static void inode_switch_wbs_work_fn(struct work_struct *work)
+static bool inode_do_switch_wbs(struct inode *inode,
+ struct bdi_writeback *old_wb,
+ struct bdi_writeback *new_wb)
{
- struct inode_switch_wbs_context *isw =
- container_of(work, struct inode_switch_wbs_context, work);
- struct inode *inode = isw->inode;
- struct backing_dev_info *bdi = inode_to_bdi(inode);
struct address_space *mapping = inode->i_mapping;
- struct bdi_writeback *old_wb = inode->i_wb;
- struct bdi_writeback *new_wb = isw->new_wb;
XA_STATE(xas, &mapping->i_pages, 0);
struct page *page;
bool switched = false;
- /*
- * If @inode switches cgwb membership while sync_inodes_sb() is
- * being issued, sync_inodes_sb() might miss it. Synchronize.
- */
- down_read(&bdi->wb_switch_rwsem);
-
- /*
- * By the time control reaches here, RCU grace period has passed
- * since I_WB_SWITCH assertion and all wb stat update transactions
- * between unlocked_inode_to_wb_begin/end() are guaranteed to be
- * synchronizing against the i_pages lock.
- *
- * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
- * gives us exclusion against all wb related operations on @inode
- * including IO list manipulations and stat updates.
- */
- if (old_wb < new_wb) {
- spin_lock(&old_wb->list_lock);
- spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
- } else {
- spin_lock(&new_wb->list_lock);
- spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
- }
spin_lock(&inode->i_lock);
xa_lock_irq(&mapping->i_pages);
/*
- * Once I_FREEING is visible under i_lock, the eviction path owns
- * the inode and we shouldn't modify ->i_io_list.
+ * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
+ * path owns the inode and we shouldn't modify ->i_io_list.
*/
- if (unlikely(inode->i_state & I_FREEING))
+ if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
goto skip_switch;
trace_inode_switch_wbs(inode, old_wb, new_wb);
@@ -419,21 +409,28 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
wb_get(new_wb);
/*
- * Transfer to @new_wb's IO list if necessary. The specific list
- * @inode was on is ignored and the inode is put on ->b_dirty which
- * is always correct including from ->b_dirty_time. The transfer
- * preserves @inode->dirtied_when ordering.
+ * Transfer to @new_wb's IO list if necessary. If the @inode is dirty,
+ * the specific list @inode was on is ignored and the @inode is put on
+ * ->b_dirty which is always correct including from ->b_dirty_time.
+ * The transfer preserves @inode->dirtied_when ordering. If the @inode
+ * was clean, it means it was on the b_attached list, so move it onto
+ * the b_attached list of @new_wb.
*/
if (!list_empty(&inode->i_io_list)) {
- struct inode *pos;
-
- inode_io_list_del_locked(inode, old_wb);
inode->i_wb = new_wb;
- list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
- if (time_after_eq(inode->dirtied_when,
- pos->dirtied_when))
- break;
- inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
+
+ if (inode->i_state & I_DIRTY_ALL) {
+ struct inode *pos;
+
+ list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
+ if (time_after_eq(inode->dirtied_when,
+ pos->dirtied_when))
+ break;
+ inode_io_list_move_locked(inode, new_wb,
+ pos->i_io_list.prev);
+ } else {
+ inode_cgwb_move_to_attached(inode, new_wb);
+ }
} else {
inode->i_wb = new_wb;
}
@@ -452,31 +449,91 @@ skip_switch:
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&inode->i_lock);
+
+ return switched;
+}
+
+static void inode_switch_wbs_work_fn(struct work_struct *work)
+{
+ struct inode_switch_wbs_context *isw =
+ container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
+ struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
+ struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
+ struct bdi_writeback *new_wb = isw->new_wb;
+ unsigned long nr_switched = 0;
+ struct inode **inodep;
+
+ /*
+ * If @inode switches cgwb membership while sync_inodes_sb() is
+ * being issued, sync_inodes_sb() might miss it. Synchronize.
+ */
+ down_read(&bdi->wb_switch_rwsem);
+
+ /*
+ * By the time control reaches here, RCU grace period has passed
+ * since I_WB_SWITCH assertion and all wb stat update transactions
+ * between unlocked_inode_to_wb_begin/end() are guaranteed to be
+ * synchronizing against the i_pages lock.
+ *
+ * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
+ * gives us exclusion against all wb related operations on @inode
+ * including IO list manipulations and stat updates.
+ */
+ if (old_wb < new_wb) {
+ spin_lock(&old_wb->list_lock);
+ spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock(&new_wb->list_lock);
+ spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
+ }
+
+ for (inodep = isw->inodes; *inodep; inodep++) {
+ WARN_ON_ONCE((*inodep)->i_wb != old_wb);
+ if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
+ nr_switched++;
+ }
+
spin_unlock(&new_wb->list_lock);
spin_unlock(&old_wb->list_lock);
up_read(&bdi->wb_switch_rwsem);
- if (switched) {
+ if (nr_switched) {
wb_wakeup(new_wb);
- wb_put(old_wb);
+ wb_put_many(old_wb, nr_switched);
}
- wb_put(new_wb);
- iput(inode);
+ for (inodep = isw->inodes; *inodep; inodep++)
+ iput(*inodep);
+ wb_put(new_wb);
kfree(isw);
-
atomic_dec(&isw_nr_in_flight);
}
-static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
+static bool inode_prepare_wbs_switch(struct inode *inode,
+ struct bdi_writeback *new_wb)
{
- struct inode_switch_wbs_context *isw = container_of(rcu_head,
- struct inode_switch_wbs_context, rcu_head);
+ /*
+ * Paired with smp_mb() in cgroup_writeback_umount().
+ * isw_nr_in_flight must be increased before checking SB_ACTIVE and
+ * grabbing an inode, otherwise isw_nr_in_flight can be observed as 0
+ * in cgroup_writeback_umount() and the isw_wq will be not flushed.
+ */
+ smp_mb();
+
+ /* while holding I_WB_SWITCH, no one else can update the association */
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
+ inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
+ inode_to_wb(inode) == new_wb) {
+ spin_unlock(&inode->i_lock);
+ return false;
+ }
+ inode->i_state |= I_WB_SWITCH;
+ __iget(inode);
+ spin_unlock(&inode->i_lock);
- /* needs to grab bh-unsafe locks, bounce to work item */
- INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
- queue_work(isw_wq, &isw->work);
+ return true;
}
/**
@@ -501,10 +558,12 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
return;
- isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
+ isw = kzalloc(sizeof(*isw) + 2 * sizeof(struct inode *), GFP_ATOMIC);
if (!isw)
return;
+ atomic_inc(&isw_nr_in_flight);
+
/* find and pin the new wb */
rcu_read_lock();
memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
@@ -514,19 +573,10 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (!isw->new_wb)
goto out_free;
- /* while holding I_WB_SWITCH, no one else can update the association */
- spin_lock(&inode->i_lock);
- if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
- inode->i_state & (I_WB_SWITCH | I_FREEING) ||
- inode_to_wb(inode) == isw->new_wb) {
- spin_unlock(&inode->i_lock);
+ if (!inode_prepare_wbs_switch(inode, isw->new_wb))
goto out_free;
- }
- inode->i_state |= I_WB_SWITCH;
- __iget(inode);
- spin_unlock(&inode->i_lock);
- isw->inode = inode;
+ isw->inodes[0] = inode;
/*
* In addition to synchronizing among switchers, I_WB_SWITCH tells
@@ -534,18 +584,85 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
* lock so that stat transfer can synchronize against them.
* Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/
- call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
-
- atomic_inc(&isw_nr_in_flight);
+ INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
+ queue_rcu_work(isw_wq, &isw->work);
return;
out_free:
+ atomic_dec(&isw_nr_in_flight);
if (isw->new_wb)
wb_put(isw->new_wb);
kfree(isw);
}
/**
+ * cleanup_offline_cgwb - detach associated inodes
+ * @wb: target wb
+ *
+ * Switch all inodes attached to @wb to a nearest living ancestor's wb in order
+ * to eventually release the dying @wb. Returns %true if not all inodes were
+ * switched and the function has to be restarted.
+ */
+bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+{
+ struct cgroup_subsys_state *memcg_css;
+ struct inode_switch_wbs_context *isw;
+ struct inode *inode;
+ int nr;
+ bool restart = false;
+
+ isw = kzalloc(sizeof(*isw) + WB_MAX_INODES_PER_ISW *
+ sizeof(struct inode *), GFP_KERNEL);
+ if (!isw)
+ return restart;
+
+ atomic_inc(&isw_nr_in_flight);
+
+ for (memcg_css = wb->memcg_css->parent; memcg_css;
+ memcg_css = memcg_css->parent) {
+ isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
+ if (isw->new_wb)
+ break;
+ }
+ if (unlikely(!isw->new_wb))
+ isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
+
+ nr = 0;
+ spin_lock(&wb->list_lock);
+ list_for_each_entry(inode, &wb->b_attached, i_io_list) {
+ if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+ continue;
+
+ isw->inodes[nr++] = inode;
+
+ if (nr >= WB_MAX_INODES_PER_ISW - 1) {
+ restart = true;
+ break;
+ }
+ }
+ spin_unlock(&wb->list_lock);
+
+ /* no attached inodes? bail out */
+ if (nr == 0) {
+ atomic_dec(&isw_nr_in_flight);
+ wb_put(isw->new_wb);
+ kfree(isw);
+ return restart;
+ }
+
+ /*
+ * In addition to synchronizing among switchers, I_WB_SWITCH tells
+ * the RCU protected stat update paths to grab the i_page
+ * lock so that stat transfer can synchronize against them.
+ * Let's continue after I_WB_SWITCH is guaranteed to be visible.
+ */
+ INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
+ queue_rcu_work(isw_wq, &isw->work);
+
+ return restart;
+}
+
+/**
* wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
* @wbc: writeback_control of interest
* @inode: target inode
@@ -1000,6 +1117,12 @@ out_bdi_put:
*/
void cgroup_writeback_umount(void)
{
+ /*
+ * SB_ACTIVE should be reliably cleared before checking
+ * isw_nr_in_flight, see generic_shutdown_super().
+ */
+ smp_mb();
+
if (atomic_read(&isw_nr_in_flight)) {
/*
* Use rcu_barrier() to wait for all pending callbacks to
@@ -1024,6 +1147,17 @@ fs_initcall(cgroup_writeback_init);
static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+static void inode_cgwb_move_to_attached(struct inode *inode,
+ struct bdi_writeback *wb)
+{
+ assert_spin_locked(&wb->list_lock);
+ assert_spin_locked(&inode->i_lock);
+
+ inode->i_state &= ~I_SYNC_QUEUED;
+ list_del_init(&inode->i_io_list);
+ wb_io_lists_depopulated(wb);
+}
+
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
__releases(&inode->i_lock)
@@ -1124,7 +1258,11 @@ void inode_io_list_del(struct inode *inode)
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
- inode_io_list_del_locked(inode, wb);
+
+ inode->i_state &= ~I_SYNC_QUEUED;
+ list_del_init(&inode->i_io_list);
+ wb_io_lists_depopulated(wb);
+
spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
}
@@ -1437,7 +1575,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
inode->i_state &= ~I_SYNC_QUEUED;
} else {
/* The inode is clean. Remove from writeback lists. */
- inode_io_list_del_locked(inode, wb);
+ inode_cgwb_move_to_attached(inode, wb);
}
}
@@ -1589,7 +1727,7 @@ static int writeback_single_inode(struct inode *inode,
* responsible for the writeback lists.
*/
if (!(inode->i_state & I_DIRTY_ALL))
- inode_io_list_del_locked(inode, wb);
+ inode_cgwb_move_to_attached(inode, wb);
spin_unlock(&wb->list_lock);
inode_sync_complete(inode);
out:
@@ -2205,28 +2343,6 @@ int dirtytime_interval_handler(struct ctl_table *table, int write,
return ret;
}
-static noinline void block_dump___mark_inode_dirty(struct inode *inode)
-{
- if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
- struct dentry *dentry;
- const char *name = "?";
-
- dentry = d_find_alias(inode);
- if (dentry) {
- spin_lock(&dentry->d_lock);
- name = (const char *) dentry->d_name.name;
- }
- printk(KERN_DEBUG
- "%s(%d): dirtied inode %lu (%s) on %s\n",
- current->comm, task_pid_nr(current), inode->i_ino,
- name, inode->i_sb->s_id);
- if (dentry) {
- spin_unlock(&dentry->d_lock);
- dput(dentry);
- }
- }
-}
-
/**
* __mark_inode_dirty - internal function to mark an inode dirty
*
@@ -2296,9 +2412,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
(dirtytime && (inode->i_state & I_DIRTY_INODE)))
return;
- if (unlikely(block_dump))
- block_dump___mark_inode_dirty(inode);
-
spin_lock(&inode->i_lock);
if (dirtytime && (inode->i_state & I_DIRTY_INODE))
goto out_unlock_inode;
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index ff99ab2a3c43..fb733eb5aead 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/dax.h>
#include <linux/uio.h>
+#include <linux/pagemap.h>
#include <linux/pfn_t.h>
#include <linux/iomap.h>
#include <linux/interval_tree.h>
@@ -1329,7 +1330,7 @@ bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
static const struct address_space_operations fuse_dax_file_aops = {
.writepages = fuse_dax_writepages,
.direct_IO = noop_direct_IO,
- .set_page_dirty = noop_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_no_writeback,
.invalidatepage = noop_invalidatepage,
};
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 23b5be3db044..81d8f064126e 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -784,7 +784,7 @@ static const struct address_space_operations gfs2_aops = {
.writepages = gfs2_writepages,
.readpage = gfs2_readpage,
.readahead = gfs2_readahead,
- .set_page_dirty = iomap_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = iomap_releasepage,
.invalidatepage = iomap_invalidatepage,
.bmap = gfs2_bmap,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 0bcf11a9987b..ed8b67b21718 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -56,14 +56,6 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
u64 block, struct page *page)
{
struct inode *inode = &ip->i_inode;
- int release = 0;
-
- if (!page || page->index) {
- page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
- if (!page)
- return -ENOMEM;
- release = 1;
- }
if (!PageUptodate(page)) {
void *kaddr = kmap(page);
@@ -97,26 +89,10 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
gfs2_ordered_add_inode(ip);
}
- if (release) {
- unlock_page(page);
- put_page(page);
- }
-
return 0;
}
-/**
- * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
- * @ip: The GFS2 inode to unstuff
- * @page: The (optional) page. This is looked up if the @page is NULL
- *
- * This routine unstuffs a dinode and returns it to a "normal" state such
- * that the height can be grown in the traditional way.
- *
- * Returns: errno
- */
-
-int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
+static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
{
struct buffer_head *bh, *dibh;
struct gfs2_dinode *di;
@@ -124,11 +100,9 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
int isdir = gfs2_is_dir(ip);
int error;
- down_write(&ip->i_rw_mutex);
-
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
- goto out;
+ return error;
if (i_size_read(&ip->i_inode)) {
/* Get a free block, fill it with the stuffed data,
@@ -170,12 +144,38 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
out_brelse:
brelse(dibh);
+ return error;
+}
+
+/**
+ * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
+ * @ip: The GFS2 inode to unstuff
+ *
+ * This routine unstuffs a dinode and returns it to a "normal" state such
+ * that the height can be grown in the traditional way.
+ *
+ * Returns: errno
+ */
+
+int gfs2_unstuff_dinode(struct gfs2_inode *ip)
+{
+ struct inode *inode = &ip->i_inode;
+ struct page *page;
+ int error;
+
+ down_write(&ip->i_rw_mutex);
+ page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
+ error = -ENOMEM;
+ if (!page)
+ goto out;
+ error = __gfs2_unstuff_inode(ip, page);
+ unlock_page(page);
+ put_page(page);
out:
up_write(&ip->i_rw_mutex);
return error;
}
-
/**
* find_metapath - Find path through the metadata tree
* @sdp: The superblock
@@ -1079,7 +1079,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
goto out_trans_fail;
if (unstuff) {
- ret = gfs2_unstuff_dinode(ip, NULL);
+ ret = gfs2_unstuff_dinode(ip);
if (ret)
goto out_trans_end;
release_metapath(mp);
@@ -2143,7 +2143,7 @@ static int do_grow(struct inode *inode, u64 size)
goto do_grow_release;
if (unstuff) {
- error = gfs2_unstuff_dinode(ip, NULL);
+ error = gfs2_unstuff_dinode(ip);
if (error)
goto do_end_trans;
}
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index 6676d863faef..53cce6c08e81 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -46,7 +46,7 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
extern const struct iomap_ops gfs2_iomap_ops;
extern const struct iomap_writeback_ops gfs2_writeback_ops;
-extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
+extern int gfs2_unstuff_dinode(struct gfs2_inode *ip);
extern int gfs2_block_map(struct inode *inode, sector_t lblock,
struct buffer_head *bh, int create);
extern int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 18f67b37d6f8..42b7dfffb5e7 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -172,7 +172,7 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
return -EINVAL;
if (gfs2_is_stuffed(ip)) {
- error = gfs2_unstuff_dinode(ip, NULL);
+ error = gfs2_unstuff_dinode(ip);
if (error)
return error;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index a0b542d84cd9..84ec053d43b4 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -210,7 +210,7 @@ void gfs2_set_inode_flags(struct inode *inode)
/**
* do_gfs2_set_flags - set flags on an inode
- * @filp: file pointer
+ * @inode: The inode
* @reqflags: The flags to set
* @mask: Indicates which flags are valid
* @fsflags: The FS_* inode flags passed in
@@ -427,22 +427,25 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
struct gfs2_alloc_parms ap = { .aflags = 0, };
u64 offset = page_offset(page);
unsigned int data_blocks, ind_blocks, rblocks;
+ vm_fault_t ret = VM_FAULT_LOCKED;
struct gfs2_holder gh;
unsigned int length;
loff_t size;
- int ret;
+ int err;
sb_start_pagefault(inode->i_sb);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
- ret = gfs2_glock_nq(&gh);
- if (ret)
+ err = gfs2_glock_nq(&gh);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
goto out_uninit;
+ }
/* Check page index against inode size */
size = i_size_read(inode);
if (offset >= size) {
- ret = -EINVAL;
+ ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -450,8 +453,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
file_update_time(vmf->vma->vm_file);
/* page is wholly or partially inside EOF */
- if (offset > size - PAGE_SIZE)
- length = offset_in_page(size);
+ if (size - offset < PAGE_SIZE)
+ length = size - offset;
else
length = PAGE_SIZE;
@@ -469,24 +472,30 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
!gfs2_write_alloc_required(ip, offset, length)) {
lock_page(page);
if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
- ret = -EAGAIN;
+ ret = VM_FAULT_NOPAGE;
unlock_page(page);
}
goto out_unlock;
}
- ret = gfs2_rindex_update(sdp);
- if (ret)
+ err = gfs2_rindex_update(sdp);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
goto out_unlock;
+ }
gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
ap.target = data_blocks + ind_blocks;
- ret = gfs2_quota_lock_check(ip, &ap);
- if (ret)
+ err = gfs2_quota_lock_check(ip, &ap);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
goto out_unlock;
- ret = gfs2_inplace_reserve(ip, &ap);
- if (ret)
+ }
+ err = gfs2_inplace_reserve(ip, &ap);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
goto out_quota_unlock;
+ }
rblocks = RES_DINODE + ind_blocks;
if (gfs2_is_jdata(ip))
@@ -495,28 +504,38 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
rblocks += RES_STATFS + RES_QUOTA;
rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
}
- ret = gfs2_trans_begin(sdp, rblocks, 0);
- if (ret)
+ err = gfs2_trans_begin(sdp, rblocks, 0);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
goto out_trans_fail;
+ }
+
+ /* Unstuff, if required, and allocate backing blocks for page */
+ if (gfs2_is_stuffed(ip)) {
+ err = gfs2_unstuff_dinode(ip);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
+ goto out_trans_end;
+ }
+ }
lock_page(page);
- ret = -EAGAIN;
/* If truncated, we must retry the operation, we may have raced
* with the glock demotion code.
*/
- if (!PageUptodate(page) || page->mapping != inode->i_mapping)
- goto out_trans_end;
+ if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+ ret = VM_FAULT_NOPAGE;
+ goto out_page_locked;
+ }
- /* Unstuff, if required, and allocate backing blocks for page */
- ret = 0;
- if (gfs2_is_stuffed(ip))
- ret = gfs2_unstuff_dinode(ip, page);
- if (ret == 0)
- ret = gfs2_allocate_page_backing(page, length);
+ err = gfs2_allocate_page_backing(page, length);
+ if (err)
+ ret = block_page_mkwrite_return(err);
-out_trans_end:
- if (ret)
+out_page_locked:
+ if (ret != VM_FAULT_LOCKED)
unlock_page(page);
+out_trans_end:
gfs2_trans_end(sdp);
out_trans_fail:
gfs2_inplace_release(ip);
@@ -526,12 +545,12 @@ out_unlock:
gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh);
- if (ret == 0) {
+ if (ret == VM_FAULT_LOCKED) {
set_page_dirty(page);
wait_for_stable_page(page);
}
sb_end_pagefault(inode->i_sb);
- return block_page_mkwrite_return(ret);
+ return ret;
}
static vm_fault_t gfs2_fault(struct vm_fault *vmf)
@@ -911,8 +930,11 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
current->backing_dev_info = inode_to_bdi(inode);
buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
current->backing_dev_info = NULL;
- if (unlikely(buffered <= 0))
+ if (unlikely(buffered <= 0)) {
+ if (!ret)
+ ret = buffered;
goto out_unlock;
+ }
/*
* We need to ensure that the page cache pages are written to
@@ -959,7 +981,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
gfs2_trans_add_meta(ip->i_gl, dibh);
if (gfs2_is_stuffed(ip)) {
- error = gfs2_unstuff_dinode(ip, NULL);
+ error = gfs2_unstuff_dinode(ip);
if (unlikely(error))
goto out;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index ea7fc5c641c7..1f3902ecdded 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -212,8 +212,7 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
spin_lock(&lru_lock);
- list_del(&gl->gl_lru);
- list_add_tail(&gl->gl_lru, &lru_list);
+ list_move_tail(&gl->gl_lru, &lru_list);
if (!test_bit(GLF_LRU, &gl->gl_flags)) {
set_bit(GLF_LRU, &gl->gl_flags);
@@ -582,6 +581,16 @@ out_locked:
spin_unlock(&gl->gl_lockref.lock);
}
+static bool is_system_glock(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+ if (gl == m_ip->i_gl)
+ return true;
+ return false;
+}
+
/**
* do_xmote - Calls the DLM to change the state of a lock
* @gl: The lock state
@@ -671,17 +680,25 @@ skip_inval:
* to see sd_log_error and withdraw, and in the meantime, requeue the
* work for later.
*
+ * We make a special exception for some system glocks, such as the
+ * system statfs inode glock, which needs to be granted before the
+ * gfs2_quotad daemon can exit, and that exit needs to finish before
+ * we can unmount the withdrawn file system.
+ *
* However, if we're just unlocking the lock (say, for unmount, when
* gfs2_gl_hash_clear calls clear_glock) and recovery is complete
* then it's okay to tell dlm to unlock it.
*/
if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
gfs2_withdraw_delayed(sdp);
- if (glock_blocked_by_withdraw(gl)) {
- if (target != LM_ST_UNLOCKED ||
- test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) {
+ if (glock_blocked_by_withdraw(gl) &&
+ (target != LM_ST_UNLOCKED ||
+ test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
+ if (!is_system_glock(gl)) {
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
goto out;
+ } else {
+ clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
}
@@ -1466,9 +1483,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
glock_blocked_by_withdraw(gl) &&
gh->gh_gl != sdp->sd_jinode_gl) {
sdp->sd_glock_dqs_held++;
+ spin_unlock(&gl->gl_lockref.lock);
might_sleep();
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
TASK_UNINTERRUPTIBLE);
+ spin_lock(&gl->gl_lockref.lock);
}
if (gh->gh_flags & GL_NOCACHE)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1775,6 +1794,7 @@ __acquires(&lru_lock)
while(!list_empty(list)) {
gl = list_first_entry(list, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
+ clear_bit(GLF_LRU, &gl->gl_flags);
if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru:
list_add(&gl->gl_lru, &lru_list);
@@ -1820,7 +1840,6 @@ static long gfs2_scan_glock_lru(int nr)
if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count);
- clear_bit(GLF_LRU, &gl->gl_flags);
freed++;
continue;
}
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 454095e9fedf..54d3fbeb3002 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -396,7 +396,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
struct timespec64 atime;
u16 height, depth;
umode_t mode = be32_to_cpu(str->di_mode);
- bool is_new = ip->i_inode.i_flags & I_NEW;
+ bool is_new = ip->i_inode.i_state & I_NEW;
if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
goto corrupt;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 97d54e581a7b..42c15cfc0821 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -926,10 +926,10 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
}
/**
- * ail_drain - drain the ail lists after a withdraw
+ * gfs2_ail_drain - drain the ail lists after a withdraw
* @sdp: Pointer to GFS2 superblock
*/
-static void ail_drain(struct gfs2_sbd *sdp)
+void gfs2_ail_drain(struct gfs2_sbd *sdp)
{
struct gfs2_trans *tr;
@@ -956,6 +956,7 @@ static void ail_drain(struct gfs2_sbd *sdp)
list_del(&tr->tr_list);
gfs2_trans_free(sdp, tr);
}
+ gfs2_drain_revokes(sdp);
spin_unlock(&sdp->sd_ail_lock);
}
@@ -1162,7 +1163,6 @@ out_withdraw:
if (tr && list_empty(&tr->tr_list))
list_add(&tr->tr_list, &sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock);
- ail_drain(sdp); /* frees all transactions */
tr = NULL;
goto out_end;
}
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index eea58015710e..fc905c2af53c 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -93,5 +93,6 @@ extern int gfs2_logd(void *data);
extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 221e7118cc3b..8ee05d25dfa6 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -885,7 +885,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_write_page(sdp, page);
}
-static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+void gfs2_drain_revokes(struct gfs2_sbd *sdp)
{
struct list_head *head = &sdp->sd_log_revokes;
struct gfs2_bufdata *bd;
@@ -900,6 +900,11 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
}
}
+static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+ gfs2_drain_revokes(sdp);
+}
+
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, int pass)
{
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 31b6dd0d2e5d..f707601597dc 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -20,6 +20,7 @@ extern void gfs2_log_submit_bio(struct bio **biop, int opf);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, bool keep_cache);
+extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
return sdp->sd_ldptrs;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index d68184ebbfdd..7c9619997355 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -89,11 +89,13 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
}
const struct address_space_operations gfs2_meta_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
};
const struct address_space_operations gfs2_rgrp_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
};
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 826f77d9cff5..5f4504dd0875 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -687,6 +687,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
}
iput(pn);
+ pn = NULL;
ip = GFS2_I(sdp->sd_sc_inode);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
&sdp->sd_sc_gh);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 94637c307cc8..be0997e24d60 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -825,7 +825,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
u64 size;
if (gfs2_is_stuffed(ip)) {
- err = gfs2_unstuff_dinode(ip, NULL);
+ err = gfs2_unstuff_dinode(ip);
if (err)
return err;
}
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 3e08027a6c81..f4325b44956d 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -131,6 +131,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
return;
+ gfs2_ail_drain(sdp); /* frees all transactions */
inode = sdp->sd_jdesc->jd_inode;
ip = GFS2_I(inode);
i_gl = ip->i_gl;
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 3fc5cb346586..4a95a92546a0 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -159,6 +159,7 @@ static int hfs_writepages(struct address_space *mapping,
}
const struct address_space_operations hfs_btree_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = hfs_readpage,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,
@@ -168,6 +169,7 @@ const struct address_space_operations hfs_btree_aops = {
};
const struct address_space_operations hfs_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = hfs_readpage,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 8ea447e5c470..70e8374ddac4 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -156,6 +156,7 @@ static int hfsplus_writepages(struct address_space *mapping,
}
const struct address_space_operations hfsplus_btree_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
@@ -165,6 +166,7 @@ const struct address_space_operations hfsplus_btree_aops = {
};
const struct address_space_operations hfsplus_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 077c25128eb7..c3a49aacf20a 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -196,6 +196,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
const struct address_space_operations hpfs_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = hpfs_readpage,
.writepage = hpfs_writepage,
.readahead = hpfs_readahead,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 55efd3dd04f6..926eeb9bf4eb 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -735,6 +735,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
__SetPageUptodate(page);
error = huge_add_to_page_cache(page, mapping, index);
if (unlikely(error)) {
+ restore_reserve_on_error(h, &pseudo_vma, addr, page);
put_page(page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out;
@@ -1445,7 +1446,7 @@ static int get_hstate_idx(int page_size_log)
* otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
*/
struct file *hugetlb_file_setup(const char *name, size_t size,
- vm_flags_t acctflag, struct user_struct **user,
+ vm_flags_t acctflag, struct ucounts **ucounts,
int creat_flags, int page_size_log)
{
struct inode *inode;
@@ -1457,20 +1458,20 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
if (hstate_idx < 0)
return ERR_PTR(-ENODEV);
- *user = NULL;
+ *ucounts = NULL;
mnt = hugetlbfs_vfsmount[hstate_idx];
if (!mnt)
return ERR_PTR(-ENOENT);
if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
- *user = current_user();
- if (user_shm_lock(size, *user)) {
+ *ucounts = current_ucounts();
+ if (user_shm_lock(size, *ucounts)) {
task_lock(current);
pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
current->comm, current->pid);
task_unlock(current);
} else {
- *user = NULL;
+ *ucounts = NULL;
return ERR_PTR(-EPERM);
}
}
@@ -1497,9 +1498,9 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
iput(inode);
out:
- if (*user) {
- user_shm_unlock(size, *user);
- *user = NULL;
+ if (*ucounts) {
+ user_shm_unlock(size, *ucounts);
+ *ucounts = NULL;
}
return file;
}
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 5361a9b4b47b..b3e8624a37d0 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -979,13 +979,16 @@ static bool io_task_work_match(struct callback_head *cb, void *data)
return cwd->wqe->wq == data;
}
+void io_wq_exit_start(struct io_wq *wq)
+{
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+}
+
static void io_wq_exit_workers(struct io_wq *wq)
{
struct callback_head *cb;
int node;
- set_bit(IO_WQ_BIT_EXIT, &wq->state);
-
if (!wq->task)
return;
@@ -1003,13 +1006,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
struct io_wqe *wqe = wq->wqes[node];
io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
- spin_lock_irq(&wq->hash->wait.lock);
- list_del_init(&wq->wqes[node]->wait.entry);
- spin_unlock_irq(&wq->hash->wait.lock);
}
rcu_read_unlock();
io_worker_ref_put(wq);
wait_for_completion(&wq->worker_done);
+
+ for_each_node(node) {
+ spin_lock_irq(&wq->hash->wait.lock);
+ list_del_init(&wq->wqes[node]->wait.entry);
+ spin_unlock_irq(&wq->hash->wait.lock);
+ }
put_task_struct(wq->task);
wq->task = NULL;
}
@@ -1020,8 +1026,6 @@ static void io_wq_destroy(struct io_wq *wq)
cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- io_wq_exit_workers(wq);
-
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
struct io_cb_cancel_data match = {
@@ -1036,16 +1040,13 @@ static void io_wq_destroy(struct io_wq *wq)
kfree(wq);
}
-void io_wq_put(struct io_wq *wq)
-{
- if (refcount_dec_and_test(&wq->refs))
- io_wq_destroy(wq);
-}
-
void io_wq_put_and_exit(struct io_wq *wq)
{
+ WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
+
io_wq_exit_workers(wq);
- io_wq_put(wq);
+ if (refcount_dec_and_test(&wq->refs))
+ io_wq_destroy(wq);
}
static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 0e6d310999e8..af2df0680ee2 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -122,7 +122,7 @@ struct io_wq_data {
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
-void io_wq_put(struct io_wq *wq);
+void io_wq_exit_start(struct io_wq *wq);
void io_wq_put_and_exit(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5f82954004f6..fa8794c61af7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -783,6 +783,11 @@ struct io_task_work {
task_work_func_t func;
};
+enum {
+ IORING_RSRC_FILE = 0,
+ IORING_RSRC_BUFFER = 1,
+};
+
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
@@ -8228,6 +8233,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
{
int i, ret;
+ imu->acct_pages = 0;
for (i = 0; i < nr_pages; i++) {
if (!PageCompound(pages[i])) {
imu->acct_pages++;
@@ -9039,11 +9045,16 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
struct io_tctx_node *node;
unsigned long index;
- tctx->io_wq = NULL;
xa_for_each(&tctx->xa, index, node)
io_uring_del_task_file(index);
- if (wq)
+ if (wq) {
+ /*
+ * Must be after io_uring_del_task_file() (removes nodes under
+ * uring_lock) to avoid race with io_uring_try_cancel_iowq().
+ */
+ tctx->io_wq = NULL;
io_wq_put_and_exit(wq);
+ }
}
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
@@ -9078,6 +9089,9 @@ static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
if (!current->io_uring)
return;
+ if (tctx->io_wq)
+ io_wq_exit_start(tctx->io_wq);
+
WARN_ON_ONCE(!sqd || sqd->thread != current);
atomic_inc(&tctx->in_idle);
@@ -9112,6 +9126,9 @@ void __io_uring_cancel(struct files_struct *files)
DEFINE_WAIT(wait);
s64 inflight;
+ if (tctx->io_wq)
+ io_wq_exit_start(tctx->io_wq);
+
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
do {
@@ -9659,7 +9676,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
- IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
+ IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
+ IORING_FEAT_RSRC_TAGS;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -9899,7 +9917,7 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
}
static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
- unsigned size)
+ unsigned size, unsigned type)
{
struct io_uring_rsrc_update2 up;
@@ -9907,13 +9925,13 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
return -EINVAL;
if (copy_from_user(&up, arg, sizeof(up)))
return -EFAULT;
- if (!up.nr)
+ if (!up.nr || up.resv)
return -EINVAL;
- return __io_register_rsrc_update(ctx, up.type, &up, up.nr);
+ return __io_register_rsrc_update(ctx, type, &up, up.nr);
}
static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
- unsigned int size)
+ unsigned int size, unsigned int type)
{
struct io_uring_rsrc_register rr;
@@ -9924,10 +9942,10 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
memset(&rr, 0, sizeof(rr));
if (copy_from_user(&rr, arg, size))
return -EFAULT;
- if (!rr.nr)
+ if (!rr.nr || rr.resv || rr.resv2)
return -EINVAL;
- switch (rr.type) {
+ switch (type) {
case IORING_RSRC_FILE:
return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
rr.nr, u64_to_user_ptr(rr.tags));
@@ -9949,8 +9967,10 @@ static bool io_register_op_must_quiesce(int op)
case IORING_REGISTER_PROBE:
case IORING_REGISTER_PERSONALITY:
case IORING_UNREGISTER_PERSONALITY:
- case IORING_REGISTER_RSRC:
- case IORING_REGISTER_RSRC_UPDATE:
+ case IORING_REGISTER_FILES2:
+ case IORING_REGISTER_FILES_UPDATE2:
+ case IORING_REGISTER_BUFFERS2:
+ case IORING_REGISTER_BUFFERS_UPDATE:
return false;
default:
return true;
@@ -10076,11 +10096,19 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
case IORING_REGISTER_RESTRICTIONS:
ret = io_register_restrictions(ctx, arg, nr_args);
break;
- case IORING_REGISTER_RSRC:
- ret = io_register_rsrc(ctx, arg, nr_args);
+ case IORING_REGISTER_FILES2:
+ ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
+ break;
+ case IORING_REGISTER_FILES_UPDATE2:
+ ret = io_register_rsrc_update(ctx, arg, nr_args,
+ IORING_RSRC_FILE);
+ break;
+ case IORING_REGISTER_BUFFERS2:
+ ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
break;
- case IORING_REGISTER_RSRC_UPDATE:
- ret = io_register_rsrc_update(ctx, arg, nr_args);
+ case IORING_REGISTER_BUFFERS_UPDATE:
+ ret = io_register_rsrc_update(ctx, arg, nr_args,
+ IORING_RSRC_BUFFER);
break;
default:
ret = -EINVAL;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 9023717c5188..0065781935c7 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -640,31 +640,6 @@ out_no_page:
return status;
}
-int
-iomap_set_page_dirty(struct page *page)
-{
- struct address_space *mapping = page_mapping(page);
- int newly_dirty;
-
- if (unlikely(!mapping))
- return !TestSetPageDirty(page);
-
- /*
- * Lock out page's memcg migration to keep PageDirty
- * synchronized with per-memcg dirty page counters.
- */
- lock_page_memcg(page);
- newly_dirty = !TestSetPageDirty(page);
- if (newly_dirty)
- __set_page_dirty(page, mapping, 0);
- unlock_page_memcg(page);
-
- if (newly_dirty)
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return newly_dirty;
-}
-EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
-
static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
size_t copied, struct page *page)
{
@@ -684,7 +659,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
if (unlikely(copied < len && !PageUptodate(page)))
return 0;
iomap_set_range_uptodate(page, offset_in_page(pos), len);
- iomap_set_page_dirty(page);
+ __set_page_dirty_nobuffers(page);
return copied;
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 6f65bfa9f18d..3663dd5a23bc 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -356,6 +356,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
const struct address_space_operations jfs_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = jfs_readpage,
.readahead = jfs_readahead,
.writepage = jfs_writepage,
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index d73950fc3d57..26f2aa3586f9 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -17,12 +17,6 @@
#include "kernfs-internal.h"
-static const struct address_space_operations kernfs_aops = {
- .readpage = simple_readpage,
- .write_begin = simple_write_begin,
- .write_end = simple_write_end,
-};
-
static const struct inode_operations kernfs_iops = {
.permission = kernfs_iop_permission,
.setattr = kernfs_iop_setattr,
@@ -203,7 +197,7 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
{
kernfs_get(kn);
inode->i_private = kn;
- inode->i_mapping->a_ops = &kernfs_aops;
+ inode->i_mapping->a_ops = &ram_aops;
inode->i_op = &kernfs_iops;
inode->i_generation = kernfs_gen(kn);
diff --git a/fs/libfs.c b/fs/libfs.c
index e9b29c6ffccb..51b4de3b3447 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -512,7 +512,7 @@ int simple_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
EXPORT_SYMBOL(simple_setattr);
-int simple_readpage(struct file *file, struct page *page)
+static int simple_readpage(struct file *file, struct page *page)
{
clear_highpage(page);
flush_dcache_page(page);
@@ -520,7 +520,6 @@ int simple_readpage(struct file *file, struct page *page)
unlock_page(page);
return 0;
}
-EXPORT_SYMBOL(simple_readpage);
int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -568,7 +567,7 @@ EXPORT_SYMBOL(simple_write_begin);
*
* Use *ONLY* with simple_readpage()
*/
-int simple_write_end(struct file *file, struct address_space *mapping,
+static int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
@@ -597,7 +596,17 @@ int simple_write_end(struct file *file, struct address_space *mapping,
return copied;
}
-EXPORT_SYMBOL(simple_write_end);
+
+/*
+ * Provides ramfs-style behavior: data in the pagecache, but no writeback.
+ */
+const struct address_space_operations ram_aops = {
+ .readpage = simple_readpage,
+ .write_begin = simple_write_begin,
+ .write_end = simple_write_end,
+ .set_page_dirty = __set_page_dirty_no_writeback,
+};
+EXPORT_SYMBOL(ram_aops);
/*
* the inodes created here are not hashed. If you use iunique to generate
@@ -1162,22 +1171,6 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
EXPORT_SYMBOL(noop_fsync);
-int noop_set_page_dirty(struct page *page)
-{
- /*
- * Unlike __set_page_dirty_no_writeback that handles dirty page
- * tracking in the page object, dax does all dirty tracking in
- * the inode address_space in response to mkwrite faults. In the
- * dax case we only need to worry about potentially dirty CPU
- * caches, not dirty page cache pages to write back.
- *
- * This callback is defined to prevent fallback to
- * __set_page_dirty_buffers() in set_page_dirty().
- */
- return 0;
-}
-EXPORT_SYMBOL_GPL(noop_set_page_dirty);
-
void noop_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
@@ -1208,19 +1201,10 @@ void kfree_link(void *p)
}
EXPORT_SYMBOL(kfree_link);
-/*
- * nop .set_page_dirty method so that people can use .page_mkwrite on
- * anon inodes.
- */
-static int anon_set_page_dirty(struct page *page)
-{
- return 0;
-};
-
struct inode *alloc_anon_inode(struct super_block *s)
{
static const struct address_space_operations anon_aops = {
- .set_page_dirty = anon_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_no_writeback,
};
struct inode *inode = new_inode_pseudo(s);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index a532a99bbe81..a71f1cf894b9 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -442,6 +442,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations minix_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = minix_readpage,
.writepage = minix_writepage,
.write_begin = minix_write_begin,
diff --git a/fs/namespace.c b/fs/namespace.c
index c3f1a78ba369..ab4174a3c802 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3464,9 +3464,10 @@ out_type:
return ret;
}
-#define FSMOUNT_VALID_FLAGS \
- (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
- MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME)
+#define FSMOUNT_VALID_FLAGS \
+ (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
+ MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
+ MOUNT_ATTR_NOSYMFOLLOW)
#define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
@@ -3487,6 +3488,8 @@ static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
mnt_flags |= MNT_NOEXEC;
if (attr_flags & MOUNT_ATTR_NODIRATIME)
mnt_flags |= MNT_NODIRATIME;
+ if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
+ mnt_flags |= MNT_NOSYMFOLLOW;
return mnt_flags;
}
diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig
index 578112713703..b4db21022cb4 100644
--- a/fs/netfs/Kconfig
+++ b/fs/netfs/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NETFS_SUPPORT
- tristate "Support for network filesystem high-level I/O"
+ tristate
help
This option enables support for network filesystems, including
helpers for high-level buffered I/O, abstracting out read
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 193841d03de0..0b6cd3b8734c 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -1011,12 +1011,42 @@ out:
}
EXPORT_SYMBOL(netfs_readpage);
-static void netfs_clear_thp(struct page *page)
+/**
+ * netfs_skip_page_read - prep a page for writing without reading first
+ * @page: page being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full page write
+ * - write that lies in a page that is completely beyond EOF
+ * - write that covers the the page from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the page and return true. Otherwise, return false.
+ */
+static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
{
- unsigned int i;
+ struct inode *inode = page->mapping->host;
+ loff_t i_size = i_size_read(inode);
+ size_t offset = offset_in_thp(page, pos);
+
+ /* Full page write */
+ if (offset == 0 && len >= thp_size(page))
+ return true;
+
+ /* pos beyond last page in the file */
+ if (pos - offset >= i_size)
+ goto zero_out;
+
+ /* Write that covers from the start of the page to EOF or beyond */
+ if (offset == 0 && (pos + len) >= i_size)
+ goto zero_out;
- for (i = 0; i < thp_nr_pages(page); i++)
- clear_highpage(page + i);
+ return false;
+zero_out:
+ zero_user_segments(page, 0, offset, offset + len, thp_size(page));
+ return true;
}
/**
@@ -1024,7 +1054,7 @@ static void netfs_clear_thp(struct page *page)
* @file: The file to read from
* @mapping: The mapping to read from
* @pos: File position at which the write will begin
- * @len: The length of the write in this page
+ * @len: The length of the write (may extend beyond the end of the page chosen)
* @flags: AOP_* flags
* @_page: Where to put the resultant page
* @_fsdata: Place for the netfs to store a cookie
@@ -1061,14 +1091,12 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = file_inode(file);
unsigned int debug_index = 0;
pgoff_t index = pos >> PAGE_SHIFT;
- int pos_in_page = pos & ~PAGE_MASK;
- loff_t size;
int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
retry:
- page = grab_cache_page_write_begin(mapping, index, 0);
+ page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
@@ -1090,13 +1118,8 @@ retry:
* within the cache granule containing the EOF, in which case we need
* to preload the granule.
*/
- size = i_size_read(inode);
if (!ops->is_cache_enabled(inode) &&
- ((pos_in_page == 0 && len == thp_size(page)) ||
- (pos >= size) ||
- (pos_in_page == 0 && (pos + len) >= size))) {
- netfs_clear_thp(page);
- SetPageUptodate(page);
+ netfs_skip_page_read(page, pos, len)) {
netfs_stat(&netfs_n_rh_write_zskip);
goto have_page_no_wait;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index cfeaadf56bf0..330f65727c45 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -406,7 +406,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
if (cl_init->hostname == NULL) {
WARN_ON(1);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
/* see if the client already exists */
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index d158a500c25c..d2103852475f 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -718,7 +718,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
if (unlikely(!p))
goto out_err;
fl->fh_array[i]->size = be32_to_cpup(p++);
- if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+ if (fl->fh_array[i]->size > NFS_MAXFHSIZE) {
printk(KERN_ERR "NFS: Too big fh %d received %d\n",
i, fl->fh_array[i]->size);
goto out_err;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 93e60e921f92..bc0c698f3350 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -362,7 +362,7 @@ static const struct kernel_param_ops param_ops_nfs_timeout = {
.set = param_set_nfs_timeout,
.get = param_get_nfs_timeout,
};
-#define param_check_nfs_timeout(name, p) __param_check(name, p, int);
+#define param_check_nfs_timeout(name, p) __param_check(name, p, int)
module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644);
MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 065cb04222a1..543d916f79ab 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -205,6 +205,7 @@ struct nfs4_exception {
struct inode *inode;
nfs4_stateid *stateid;
long timeout;
+ unsigned char task_is_privileged : 1;
unsigned char delay : 1,
recovering : 1,
retry : 1;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 889a9f4c0310..42719384e25f 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -435,8 +435,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
*/
nfs_mark_client_ready(clp, -EPERM);
}
- nfs_put_client(clp);
clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
+ nfs_put_client(clp);
return old;
error:
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 57b3821d975a..a1e5c6b85ded 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
case SEEK_HOLE:
case SEEK_DATA:
ret = nfs42_proc_llseek(filep, offset, whence);
- if (ret != -ENOTSUPP)
+ if (ret != -EOPNOTSUPP)
return ret;
fallthrough;
default:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 87d04f2c9385..e653654c10bc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -589,6 +589,8 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
goto out_retry;
}
if (exception->recovering) {
+ if (exception->task_is_privileged)
+ return -EDEADLOCK;
ret = nfs4_wait_clnt_recover(clp);
if (test_bit(NFS_MIG_FAILED, &server->mig_status))
return -EIO;
@@ -614,6 +616,8 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
goto out_retry;
}
if (exception->recovering) {
+ if (exception->task_is_privileged)
+ return -EDEADLOCK;
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
@@ -1706,7 +1710,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
- if (!signal_pending(current)) {
+ if (!fatal_signal_pending(current)) {
if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN;
else
@@ -3487,7 +3491,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
write_sequnlock(&state->seqlock);
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
status = -EINTR;
else
if (schedule_timeout(5*HZ) != 0)
@@ -3878,6 +3882,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
+ server->caps |= NFS_CAP_SECURITY_LABEL;
+#endif
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -3898,10 +3906,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
-#ifdef CONFIG_NFS_V4_SECURITY_LABEL
- if (!(res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL))
- server->fattr_valid &= ~NFS_ATTR_FATTR_V4_SECURITY_LABEL;
-#endif
memcpy(server->attr_bitmask_nl, res.attr_bitmask,
sizeof(server->attr_bitmask));
server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
@@ -5968,6 +5972,14 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
do {
err = __nfs4_proc_set_acl(inode, buf, buflen);
trace_nfs4_set_acl(inode, err);
+ if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
+ /*
+ * no need to retry since the kernel
+ * isn't involved in encoding the ACEs.
+ */
+ err = -EINVAL;
+ break;
+ }
err = nfs4_handle_exception(NFS_SERVER(inode), err,
&exception);
} while (exception.retry);
@@ -6409,6 +6421,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
struct nfs4_exception exception = {
.inode = data->inode,
.stateid = &data->stateid,
+ .task_is_privileged = data->args.seq_args.sa_privileged,
};
if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6532,7 +6545,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
data = kzalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
- nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
nfs4_state_protect(server->nfs_client,
NFS_SP4_MACH_CRED_CLEANUP,
@@ -6563,6 +6575,12 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
}
}
+ if (!data->inode)
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+ 1);
+ else
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+ 0);
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
@@ -9640,15 +9658,20 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
&task_setup_data.rpc_client, &msg);
dprintk("--> %s\n", __func__);
+ lrp->inode = nfs_igrab_and_active(lrp->args.inode);
if (!sync) {
- lrp->inode = nfs_igrab_and_active(lrp->args.inode);
if (!lrp->inode) {
nfs4_layoutreturn_release(lrp);
return -EAGAIN;
}
task_setup_data.flags |= RPC_TASK_ASYNC;
}
- nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
+ if (!lrp->inode)
+ nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+ 1);
+ else
+ nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+ 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index eb1ef3462e84..ccef43e02b48 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -430,10 +430,6 @@ TRACE_DEFINE_ENUM(O_CLOEXEC);
{ O_NOATIME, "O_NOATIME" }, \
{ O_CLOEXEC, "O_CLOEXEC" })
-TRACE_DEFINE_ENUM(FMODE_READ);
-TRACE_DEFINE_ENUM(FMODE_WRITE);
-TRACE_DEFINE_ENUM(FMODE_EXEC);
-
#define show_fmode_flags(mode) \
__print_flags(mode, "|", \
{ ((__force unsigned long)FMODE_READ), "READ" }, \
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 6c20b28d9d7c..cf9cc62ec48e 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1094,15 +1094,16 @@ nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *prev = NULL;
unsigned int size;
- if (mirror->pg_count != 0) {
- prev = nfs_list_entry(mirror->pg_list.prev);
- } else {
+ if (list_empty(&mirror->pg_list)) {
if (desc->pg_ops->pg_init)
desc->pg_ops->pg_init(desc, req);
if (desc->pg_error < 0)
return 0;
mirror->pg_base = req->wb_pgbase;
- }
+ mirror->pg_count = 0;
+ mirror->pg_recoalesce = 0;
+ } else
+ prev = nfs_list_entry(mirror->pg_list.prev);
if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
@@ -1127,18 +1128,13 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
-
if (!list_empty(&mirror->pg_list)) {
int error = desc->pg_ops->pg_doio(desc);
if (error < 0)
desc->pg_error = error;
- else
+ if (list_empty(&mirror->pg_list))
mirror->pg_bytes_written += mirror->pg_count;
}
- if (list_empty(&mirror->pg_list)) {
- mirror->pg_count = 0;
- mirror->pg_base = 0;
- }
}
static void
@@ -1227,10 +1223,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
do {
list_splice_init(&mirror->pg_list, &head);
- mirror->pg_bytes_written -= mirror->pg_count;
- mirror->pg_count = 0;
- mirror->pg_base = 0;
- mirror->pg_recoalesce = 0;
while (!list_empty(&head)) {
struct nfs_page *req;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 03e0b34c4a64..2c01ee805306 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1317,6 +1317,11 @@ _pnfs_return_layout(struct inode *ino)
{
struct pnfs_layout_hdr *lo = NULL;
struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
LIST_HEAD(tmp_list);
const struct cred *cred;
nfs4_stateid stateid;
@@ -1344,16 +1349,10 @@ _pnfs_return_layout(struct inode *ino)
}
valid_layout = pnfs_layout_is_valid(lo);
pnfs_clear_layoutcommit(ino, &tmp_list);
- pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
+ pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
- if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
- struct pnfs_layout_range range = {
- .iomode = IOMODE_ANY,
- .offset = 0,
- .length = NFS4_MAX_UINT64,
- };
+ if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
- }
/* Don't send a LAYOUTRETURN if list was initially empty */
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
@@ -2678,7 +2677,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
- u64 rd_size = req->wb_bytes;
+ u64 rd_size;
pnfs_generic_pg_check_layout(pgio);
pnfs_generic_pg_check_range(pgio, req);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 19a212f9725d..fe58525cfed4 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1379,7 +1379,7 @@ static const struct kernel_param_ops param_ops_portnr = {
.set = param_set_portnr,
.get = param_get_uint,
};
-#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
+#define param_check_portnr(name, p) __param_check(name, p, unsigned int)
module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index c0361ce45f62..97769fe4d588 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -434,6 +434,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
static const struct address_space_operations def_mdt_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.writepage = nilfs_mdt_write_page,
};
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 303d71430bdd..68e8d61e28dd 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -19,19 +19,6 @@
/* /sys/fs/<nilfs>/ */
static struct kset *nilfs_kset;
-#define NILFS_SHOW_TIME(time_t_val, buf) ({ \
- struct tm res; \
- int count = 0; \
- time64_to_tm(time_t_val, 0, &res); \
- res.tm_year += 1900; \
- res.tm_mon += 1; \
- count = scnprintf(buf, PAGE_SIZE, \
- "%ld-%.2d-%.2d %.2d:%.2d:%.2d\n", \
- res.tm_year, res.tm_mon, res.tm_mday, \
- res.tm_hour, res.tm_min, res.tm_sec);\
- count; \
-})
-
#define NILFS_DEV_INT_GROUP_OPS(name, parent_name) \
static ssize_t nilfs_##name##_attr_show(struct kobject *kobj, \
struct attribute *attr, char *buf) \
@@ -576,7 +563,7 @@ nilfs_segctor_last_seg_write_time_show(struct nilfs_segctor_attr *attr,
ctime = nilfs->ns_ctime;
up_read(&nilfs->ns_segctor_sem);
- return NILFS_SHOW_TIME(ctime, buf);
+ return sysfs_emit(buf, "%ptTs\n", &ctime);
}
static ssize_t
@@ -604,7 +591,7 @@ nilfs_segctor_last_nongc_write_time_show(struct nilfs_segctor_attr *attr,
nongc_ctime = nilfs->ns_nongc_ctime;
up_read(&nilfs->ns_segctor_sem);
- return NILFS_SHOW_TIME(nongc_ctime, buf);
+ return sysfs_emit(buf, "%ptTs\n", &nongc_ctime);
}
static ssize_t
@@ -724,7 +711,7 @@ nilfs_superblock_sb_write_time_show(struct nilfs_superblock_attr *attr,
sbwtime = nilfs->ns_sbwtime;
up_read(&nilfs->ns_sem);
- return NILFS_SHOW_TIME(sbwtime, buf);
+ return sysfs_emit(buf, "%ptTs\n", &sbwtime);
}
static ssize_t
@@ -1053,6 +1040,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
nilfs_sysfs_delete_superblock_group(nilfs);
nilfs_sysfs_delete_segctor_group(nilfs);
kobject_del(&nilfs->ns_dev_kobj);
+ kobject_put(&nilfs->ns_dev_kobj);
kfree(nilfs->ns_dev_subgroups);
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 71fefb30e015..64864fb40b40 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -424,11 +424,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
* events generated by the listener process itself, without disclosing
* the pids of other processes.
*/
- if (!capable(CAP_SYS_ADMIN) &&
+ if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
task_tgid(current) != event->pid)
metadata.pid = 0;
- if (path && path->mnt && path->dentry) {
+ /*
+ * For now, fid mode is required for an unprivileged listener and
+ * fid mode does not report fd in events. Keep this check anyway
+ * for safety in case fid mode requirement is relaxed in the future
+ * to allow unprivileged listener to get events with no fd and no fid.
+ */
+ if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
+ path && path->mnt && path->dentry) {
fd = create_fd(group, path, &f);
if (fd < 0)
return fd;
@@ -464,7 +471,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
info_type, fanotify_info_name(info),
info->name_len, buf, count);
if (ret < 0)
- return ret;
+ goto out_close_fd;
buf += ret;
count -= ret;
@@ -512,7 +519,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
fanotify_event_object_fh(event),
info_type, dot, dot_len, buf, count);
if (ret < 0)
- return ret;
+ goto out_close_fd;
buf += ret;
count -= ret;
@@ -1040,6 +1047,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
int f_flags, fd;
unsigned int fid_mode = flags & FANOTIFY_FID_BITS;
unsigned int class = flags & FANOTIFY_CLASS_BITS;
+ unsigned int internal_flags = 0;
pr_debug("%s: flags=%x event_f_flags=%x\n",
__func__, flags, event_f_flags);
@@ -1053,6 +1061,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
*/
if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode)
return -EPERM;
+
+ /*
+ * Setting the internal flag FANOTIFY_UNPRIV on the group
+ * prevents setting mount/filesystem marks on this group and
+ * prevents reporting pid and open fd in events.
+ */
+ internal_flags |= FANOTIFY_UNPRIV;
}
#ifdef CONFIG_AUDITSYSCALL
@@ -1105,7 +1120,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
goto out_destroy_group;
}
- group->fanotify_data.flags = flags;
+ group->fanotify_data.flags = flags | internal_flags;
group->memcg = get_mem_cgroup_from_mm(current->mm);
group->fanotify_data.merge_hash = fanotify_alloc_merge_hash();
@@ -1305,11 +1320,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
group = f.file->private_data;
/*
- * An unprivileged user is not allowed to watch a mount point nor
- * a filesystem.
+ * An unprivileged user is not allowed to setup mount nor filesystem
+ * marks. This also includes setting up such marks by a group that
+ * was initialized by an unprivileged user.
*/
ret = -EPERM;
- if (!capable(CAP_SYS_ADMIN) &&
+ if ((!capable(CAP_SYS_ADMIN) ||
+ FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) &&
mark_type != FAN_MARK_INODE)
goto fput_and_out;
@@ -1460,6 +1477,7 @@ static int __init fanotify_user_setup(void)
max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS,
FANOTIFY_DEFAULT_MAX_USER_MARKS);
+ BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10);
BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index a712b2aaa9ac..57f0d5d9f934 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -144,7 +144,7 @@ void fanotify_show_fdinfo(struct seq_file *m, struct file *f)
struct fsnotify_group *group = f->private_data;
seq_printf(m, "fanotify flags:%x event-flags:%x\n",
- group->fanotify_data.flags,
+ group->fanotify_data.flags & FANOTIFY_INIT_FLAGS,
group->fanotify_data.f_flags);
show_fdinfo(m, f, fanotify_fdinfo);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index f5c058b3192c..4474adb393ca 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -477,7 +477,7 @@ err_corrupt_attr:
}
file_name_attr = (FILE_NAME_ATTR*)((u8*)attr +
le16_to_cpu(attr->data.resident.value_offset));
- p2 = (u8*)attr + le32_to_cpu(attr->data.resident.value_length);
+ p2 = (u8 *)file_name_attr + le32_to_cpu(attr->data.resident.value_length);
if (p2 < (u8*)attr || p2 > p)
goto err_corrupt_attr;
/* This attribute is ok, but is it in the $Extend directory? */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1294925ac94a..68d11c295dd3 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -632,8 +632,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
}
if (PageUptodate(page)) {
- if (!buffer_uptodate(bh))
- set_buffer_uptodate(bh);
+ set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_new(bh) &&
ocfs2_should_read_blk(inode, page, block_start) &&
@@ -2454,6 +2453,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
const struct address_space_operations ocfs2_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = ocfs2_readpage,
.readahead = ocfs2_readahead,
.writepage = ocfs2_writepage,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index e829c2595543..f89ffcbd585f 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1442,8 +1442,6 @@ void o2hb_init(void)
for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++)
INIT_LIST_HEAD(&o2hb_live_slots[i]);
- INIT_LIST_HEAD(&o2hb_node_events);
-
memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap));
memset(o2hb_region_bitmap, 0, sizeof(o2hb_region_bitmap));
memset(o2hb_live_region_bitmap, 0, sizeof(o2hb_live_region_bitmap));
@@ -1598,12 +1596,13 @@ static ssize_t o2hb_region_start_block_store(struct config_item *item,
struct o2hb_region *reg = to_o2hb_region(item);
unsigned long long tmp;
char *p = (char *)page;
+ ssize_t ret;
if (reg->hr_bdev)
return -EINVAL;
- tmp = simple_strtoull(p, &p, 0);
- if (!p || (*p && (*p != '\n')))
+ ret = kstrtoull(p, 0, &tmp);
+ if (ret)
return -EINVAL;
reg->hr_start_block = tmp;
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index bb82e6b1ff4e..625c92521416 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -824,7 +824,7 @@ static void __exit exit_o2nm(void)
static int __init init_o2nm(void)
{
- int ret = -1;
+ int ret;
o2hb_init();
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 4960a6de768d..9b88219febb5 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2977,7 +2977,7 @@ static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
enum dlm_lockres_list idx;
- struct list_head *queue = &res->granted;
+ struct list_head *queue;
struct dlm_lock *lock;
int noderef;
u8 nodenum = O2NM_MAX_NODES;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f17c3d33fb18..775657943057 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1856,6 +1856,45 @@ out:
}
/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ * is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+ u64 start, u64 len)
+{
+ int ret;
+ u64 start_block, end_block, nr_blocks;
+ u64 p_block, offset;
+ u32 cluster, p_cluster, nr_clusters;
+ struct super_block *sb = inode->i_sb;
+ u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+ if (start + len < end)
+ end = start + len;
+
+ start_block = ocfs2_blocks_for_bytes(sb, start);
+ end_block = ocfs2_blocks_for_bytes(sb, end);
+ nr_blocks = end_block - start_block;
+ if (!nr_blocks)
+ return 0;
+
+ cluster = ocfs2_bytes_to_clusters(sb, start);
+ ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+ &nr_clusters, NULL);
+ if (ret)
+ return ret;
+ if (!p_cluster)
+ return 0;
+
+ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
+/*
* Parts of this function taken from xfs_change_file_space()
*/
static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
{
int ret;
s64 llen;
- loff_t size;
+ loff_t size, orig_isize;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *di_bh = NULL;
handle_t *handle;
@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
+ orig_isize = i_size_read(inode);
switch (sr->l_whence) {
case 0: /*SEEK_SET*/
break;
@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
sr->l_start += f_pos;
break;
case 2: /*SEEK_END*/
- sr->l_start += i_size_read(inode);
+ sr->l_start += orig_isize;
break;
default:
ret = -EINVAL;
@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
default:
ret = -EINVAL;
}
+
+ /* zeroout eof blocks in the cluster. */
+ if (!ret && change_size && orig_isize < size) {
+ ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
+ size - orig_isize);
+ if (!ret)
+ i_size_write(inode, size);
+ }
up_write(&OCFS2_I(inode)->ip_alloc_sem);
if (ret) {
mlog_errno(ret);
@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
- if (change_size && i_size_read(inode) < size)
- i_size_write(inode, size);
-
inode->i_ctime = inode->i_mtime = current_time(inode);
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0)
diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
index 90b8d300c1ee..de56e6231af8 100644
--- a/fs/ocfs2/filecheck.c
+++ b/fs/ocfs2/filecheck.c
@@ -326,11 +326,7 @@ static ssize_t ocfs2_filecheck_attr_show(struct kobject *kobj,
ret = snprintf(buf + total, remain, "%lu\t\t%u\t%s\n",
p->fe_ino, p->fe_done,
ocfs2_filecheck_error(p->fe_status));
- if (ret < 0) {
- total = ret;
- break;
- }
- if (ret == remain) {
+ if (ret >= remain) {
/* snprintf() didn't fit */
total = -E2BIG;
break;
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index d50e8b8dfea4..16f1bfc407f2 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -500,11 +500,7 @@ static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj,
list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
ret = snprintf(buf, remain, "%s\n",
p->sp_name);
- if (ret < 0) {
- total = ret;
- break;
- }
- if (ret == remain) {
+ if (ret >= remain) {
/* snprintf() didn't fit */
total = -E2BIG;
break;
@@ -531,7 +527,7 @@ static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj,
if (active_stack) {
ret = snprintf(buf, PAGE_SIZE, "%s\n",
active_stack->sp_name);
- if (ret == PAGE_SIZE)
+ if (ret >= PAGE_SIZE)
ret = -E2BIG;
}
spin_unlock(&ocfs2_stack_lock);
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 11e733aab25d..89725b15a64b 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -372,6 +372,7 @@ const struct inode_operations omfs_file_inops = {
};
const struct address_space_operations omfs_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = omfs_readpage,
.readahead = omfs_readahead,
.writepage = omfs_writepage,
diff --git a/fs/open.c b/fs/open.c
index e53af13b5835..53bc0573c0ec 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -1002,12 +1002,20 @@ inline struct open_how build_open_how(int flags, umode_t mode)
inline int build_open_flags(const struct open_how *how, struct open_flags *op)
{
- int flags = how->flags;
+ u64 flags = how->flags;
+ u64 strip = FMODE_NONOTIFY | O_CLOEXEC;
int lookup_flags = 0;
int acc_mode = ACC_MODE(flags);
- /* Must never be set by userspace */
- flags &= ~(FMODE_NONOTIFY | O_CLOEXEC);
+ BUILD_BUG_ON_MSG(upper_32_bits(VALID_OPEN_FLAGS),
+ "struct open_flags doesn't yet handle flags > 32 bits");
+
+ /*
+ * Strip flags that either shouldn't be set by userspace like
+ * FMODE_NONOTIFY or that aren't relevant in determining struct
+ * open_flags like O_CLOEXEC.
+ */
+ flags &= ~strip;
/*
* Older syscalls implicitly clear all of the invalid flags or argument
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 7ec59171f197..ee0ce8cecc4a 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -284,7 +284,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
collect_sigign_sigcatch(p, &ignored, &caught);
num_threads = get_nr_threads(p);
rcu_read_lock(); /* FIXME: is this correct? */
- qsize = atomic_read(&__task_cred(p)->user->sigpending);
+ qsize = get_ucounts_value(task_ucounts(p), UCOUNT_RLIMIT_SIGPENDING);
rcu_read_unlock();
qlim = task_rlimit(p, RLIMIT_SIGPENDING);
unlock_task_sighand(p, &flags);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3851bfcdba56..9cbd915025ad 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2674,6 +2674,13 @@ out:
}
#ifdef CONFIG_SECURITY
+static int proc_pid_attr_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
+ return 0;
+}
+
static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
@@ -2703,6 +2710,10 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
void *page;
int rv;
+ /* A task may only write when it was the opener. */
+ if (file->private_data != current->mm)
+ return -EPERM;
+
rcu_read_lock();
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (!task) {
@@ -2750,9 +2761,11 @@ out:
}
static const struct file_operations proc_pid_attr_operations = {
+ .open = proc_pid_attr_open,
.read = proc_pid_attr_read,
.write = proc_pid_attr_write,
.llseek = generic_file_llseek,
+ .release = mem_release,
};
#define LSM_DIR_OPS(LSM) \
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index 8468baee951d..f32878d9a39f 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -16,7 +16,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v)
get_avenrun(avnrun, FIXED_1/200, 0);
- seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
+ seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %u/%d %d\n",
LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index f25e8531fd27..6561a06ef905 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -200,8 +200,8 @@ static int show_stat(struct seq_file *p, void *v)
"\nctxt %llu\n"
"btime %llu\n"
"processes %lu\n"
- "procs_running %lu\n"
- "procs_blocked %lu\n",
+ "procs_running %u\n"
+ "procs_blocked %u\n",
nr_context_switches(),
(unsigned long long)boottime.tv_sec,
total_forks,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index fc9784544b24..66965ad88d8b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1047,7 +1047,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr,
return false;
if (!is_cow_mapping(vma->vm_flags))
return false;
- if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
+ if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
return false;
page = vm_normal_page(vma, addr, pte);
if (!page)
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 8adabde685f1..328da35da390 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -173,6 +173,7 @@ config PSTORE_BLK
tristate "Log panic/oops to a block device"
depends on PSTORE
depends on BLOCK
+ depends on BROKEN
select PSTORE_ZONE
default n
help
diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c
index 4bb8a344957a..04ce58c939a0 100644
--- a/fs/pstore/blk.c
+++ b/fs/pstore/blk.c
@@ -8,15 +8,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include "../../block/blk.h"
#include <linux/blkdev.h>
#include <linux/string.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pstore_blk.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/init_syscalls.h>
#include <linux/mount.h>
-#include <linux/uio.h>
static long kmsg_size = CONFIG_PSTORE_BLK_KMSG_SIZE;
module_param(kmsg_size, long, 0400);
@@ -57,27 +58,7 @@ MODULE_PARM_DESC(best_effort, "use best effort to write (i.e. do not require sto
/*
* blkdev - the block device to use for pstore storage
- *
- * Usually, this will be a partition of a block device.
- *
- * blkdev accepts the following variants:
- * 1) <hex_major><hex_minor> device number in hexadecimal representation,
- * with no leading 0x, for example b302.
- * 2) /dev/<disk_name> represents the device number of disk
- * 3) /dev/<disk_name><decimal> represents the device number
- * of partition - device number of disk plus the partition number
- * 4) /dev/<disk_name>p<decimal> - same as the above, that form is
- * used when disk name of partitioned disk ends on a digit.
- * 5) PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF representing the
- * unique id of a partition if the partition table provides it.
- * The UUID may be either an EFI/GPT UUID, or refer to an MSDOS
- * partition using the format SSSSSSSS-PP, where SSSSSSSS is a zero-
- * filled hex representation of the 32-bit "NT disk signature", and PP
- * is a zero-filled hex representation of the 1-based partition number.
- * 6) PARTUUID=<UUID>/PARTNROFF=<int> to select a partition in relation to
- * a partition with a known unique id.
- * 7) <major>:<minor> major and minor number of the device separated by
- * a colon.
+ * See Documentation/admin-guide/pstore-blk.rst for details.
*/
static char blkdev[80] = CONFIG_PSTORE_BLK_BLKDEV;
module_param_string(blkdev, blkdev, 80, 0400);
@@ -88,14 +69,8 @@ MODULE_PARM_DESC(blkdev, "block device for pstore storage");
* during the register/unregister functions.
*/
static DEFINE_MUTEX(pstore_blk_lock);
-static struct block_device *psblk_bdev;
-static struct pstore_zone_info *pstore_zone_info;
-
-struct bdev_info {
- dev_t devt;
- sector_t nr_sects;
- sector_t start_sect;
-};
+static struct file *psblk_file;
+static struct pstore_device_info *pstore_device_info;
#define check_size(name, alignsize) ({ \
long _##name_ = (name); \
@@ -108,57 +83,63 @@ struct bdev_info {
_##name_; \
})
+#define verify_size(name, alignsize, enabled) { \
+ long _##name_; \
+ if (enabled) \
+ _##name_ = check_size(name, alignsize); \
+ else \
+ _##name_ = 0; \
+ /* Synchronize module parameters with resuls. */ \
+ name = _##name_ / 1024; \
+ dev->zone.name = _##name_; \
+}
+
static int __register_pstore_device(struct pstore_device_info *dev)
{
int ret;
lockdep_assert_held(&pstore_blk_lock);
- if (!dev || !dev->total_size || !dev->read || !dev->write)
+ if (!dev) {
+ pr_err("NULL device info\n");
+ return -EINVAL;
+ }
+ if (!dev->zone.total_size) {
+ pr_err("zero sized device\n");
return -EINVAL;
+ }
+ if (!dev->zone.read) {
+ pr_err("no read handler for device\n");
+ return -EINVAL;
+ }
+ if (!dev->zone.write) {
+ pr_err("no write handler for device\n");
+ return -EINVAL;
+ }
/* someone already registered before */
- if (pstore_zone_info)
+ if (pstore_device_info)
return -EBUSY;
- pstore_zone_info = kzalloc(sizeof(struct pstore_zone_info), GFP_KERNEL);
- if (!pstore_zone_info)
- return -ENOMEM;
-
/* zero means not limit on which backends to attempt to store. */
if (!dev->flags)
dev->flags = UINT_MAX;
-#define verify_size(name, alignsize, enabled) { \
- long _##name_; \
- if (enabled) \
- _##name_ = check_size(name, alignsize); \
- else \
- _##name_ = 0; \
- name = _##name_ / 1024; \
- pstore_zone_info->name = _##name_; \
- }
-
+ /* Copy in module parameters. */
verify_size(kmsg_size, 4096, dev->flags & PSTORE_FLAGS_DMESG);
verify_size(pmsg_size, 4096, dev->flags & PSTORE_FLAGS_PMSG);
verify_size(console_size, 4096, dev->flags & PSTORE_FLAGS_CONSOLE);
verify_size(ftrace_size, 4096, dev->flags & PSTORE_FLAGS_FTRACE);
-#undef verify_size
-
- pstore_zone_info->total_size = dev->total_size;
- pstore_zone_info->max_reason = max_reason;
- pstore_zone_info->read = dev->read;
- pstore_zone_info->write = dev->write;
- pstore_zone_info->erase = dev->erase;
- pstore_zone_info->panic_write = dev->panic_write;
- pstore_zone_info->name = KBUILD_MODNAME;
- pstore_zone_info->owner = THIS_MODULE;
-
- ret = register_pstore_zone(pstore_zone_info);
- if (ret) {
- kfree(pstore_zone_info);
- pstore_zone_info = NULL;
- }
+ dev->zone.max_reason = max_reason;
+
+ /* Initialize required zone ownership details. */
+ dev->zone.name = KBUILD_MODNAME;
+ dev->zone.owner = THIS_MODULE;
+
+ ret = register_pstore_zone(&dev->zone);
+ if (ret == 0)
+ pstore_device_info = dev;
+
return ret;
}
/**
@@ -185,10 +166,9 @@ EXPORT_SYMBOL_GPL(register_pstore_device);
static void __unregister_pstore_device(struct pstore_device_info *dev)
{
lockdep_assert_held(&pstore_blk_lock);
- if (pstore_zone_info && pstore_zone_info->read == dev->read) {
- unregister_pstore_zone(pstore_zone_info);
- kfree(pstore_zone_info);
- pstore_zone_info = NULL;
+ if (pstore_device_info && pstore_device_info == dev) {
+ unregister_pstore_zone(&dev->zone);
+ pstore_device_info = NULL;
}
}
@@ -205,204 +185,59 @@ void unregister_pstore_device(struct pstore_device_info *dev)
}
EXPORT_SYMBOL_GPL(unregister_pstore_device);
-/**
- * psblk_get_bdev() - open block device
- *
- * @holder: Exclusive holder identifier
- * @info: Information about bdev to fill in
- *
- * Return: pointer to block device on success and others on error.
- *
- * On success, the returned block_device has reference count of one.
- */
-static struct block_device *psblk_get_bdev(void *holder,
- struct bdev_info *info)
-{
- struct block_device *bdev = ERR_PTR(-ENODEV);
- fmode_t mode = FMODE_READ | FMODE_WRITE;
- sector_t nr_sects;
-
- lockdep_assert_held(&pstore_blk_lock);
-
- if (pstore_zone_info)
- return ERR_PTR(-EBUSY);
-
- if (!blkdev[0])
- return ERR_PTR(-ENODEV);
-
- if (holder)
- mode |= FMODE_EXCL;
- bdev = blkdev_get_by_path(blkdev, mode, holder);
- if (IS_ERR(bdev)) {
- dev_t devt;
-
- devt = name_to_dev_t(blkdev);
- if (devt == 0)
- return ERR_PTR(-ENODEV);
- bdev = blkdev_get_by_dev(devt, mode, holder);
- if (IS_ERR(bdev))
- return bdev;
- }
-
- nr_sects = bdev_nr_sectors(bdev);
- if (!nr_sects) {
- pr_err("not enough space for '%s'\n", blkdev);
- blkdev_put(bdev, mode);
- return ERR_PTR(-ENOSPC);
- }
-
- if (info) {
- info->devt = bdev->bd_dev;
- info->nr_sects = nr_sects;
- info->start_sect = get_start_sect(bdev);
- }
-
- return bdev;
-}
-
-static void psblk_put_bdev(struct block_device *bdev, void *holder)
-{
- fmode_t mode = FMODE_READ | FMODE_WRITE;
-
- lockdep_assert_held(&pstore_blk_lock);
-
- if (!bdev)
- return;
-
- if (holder)
- mode |= FMODE_EXCL;
- blkdev_put(bdev, mode);
-}
-
static ssize_t psblk_generic_blk_read(char *buf, size_t bytes, loff_t pos)
{
- struct block_device *bdev = psblk_bdev;
- struct file file;
- struct kiocb kiocb;
- struct iov_iter iter;
- struct kvec iov = {.iov_base = buf, .iov_len = bytes};
-
- if (!bdev)
- return -ENODEV;
-
- memset(&file, 0, sizeof(struct file));
- file.f_mapping = bdev->bd_inode->i_mapping;
- file.f_flags = O_DSYNC | __O_SYNC | O_NOATIME;
- file.f_inode = bdev->bd_inode;
- file_ra_state_init(&file.f_ra, file.f_mapping);
-
- init_sync_kiocb(&kiocb, &file);
- kiocb.ki_pos = pos;
- iov_iter_kvec(&iter, READ, &iov, 1, bytes);
-
- return generic_file_read_iter(&kiocb, &iter);
+ return kernel_read(psblk_file, buf, bytes, &pos);
}
static ssize_t psblk_generic_blk_write(const char *buf, size_t bytes,
loff_t pos)
{
- struct block_device *bdev = psblk_bdev;
- struct iov_iter iter;
- struct kiocb kiocb;
- struct file file;
- ssize_t ret;
- struct kvec iov = {.iov_base = (void *)buf, .iov_len = bytes};
-
- if (!bdev)
- return -ENODEV;
-
/* Console/Ftrace backend may handle buffer until flush dirty zones */
if (in_interrupt() || irqs_disabled())
return -EBUSY;
-
- memset(&file, 0, sizeof(struct file));
- file.f_mapping = bdev->bd_inode->i_mapping;
- file.f_flags = O_DSYNC | __O_SYNC | O_NOATIME;
- file.f_inode = bdev->bd_inode;
-
- init_sync_kiocb(&kiocb, &file);
- kiocb.ki_pos = pos;
- iov_iter_kvec(&iter, WRITE, &iov, 1, bytes);
-
- inode_lock(bdev->bd_inode);
- ret = generic_write_checks(&kiocb, &iter);
- if (ret > 0)
- ret = generic_perform_write(&file, &iter, pos);
- inode_unlock(bdev->bd_inode);
-
- if (likely(ret > 0)) {
- const struct file_operations f_op = {.fsync = blkdev_fsync};
-
- file.f_op = &f_op;
- kiocb.ki_pos += ret;
- ret = generic_write_sync(&kiocb, ret);
- }
- return ret;
+ return kernel_write(psblk_file, buf, bytes, &pos);
}
/*
* This takes its configuration only from the module parameters now.
- * See psblk_get_bdev() and blkdev.
*/
-static int __register_pstore_blk(void)
+static int __register_pstore_blk(struct pstore_device_info *dev,
+ const char *devpath)
{
- char bdev_name[BDEVNAME_SIZE];
- struct block_device *bdev;
- struct pstore_device_info dev;
- struct bdev_info binfo;
- void *holder = blkdev;
+ struct inode *inode;
int ret = -ENODEV;
lockdep_assert_held(&pstore_blk_lock);
- /* hold bdev exclusively */
- memset(&binfo, 0, sizeof(binfo));
- bdev = psblk_get_bdev(holder, &binfo);
- if (IS_ERR(bdev)) {
- pr_err("failed to open '%s'!\n", blkdev);
- return PTR_ERR(bdev);
+ psblk_file = filp_open(devpath, O_RDWR | O_DSYNC | O_NOATIME | O_EXCL, 0);
+ if (IS_ERR(psblk_file)) {
+ ret = PTR_ERR(psblk_file);
+ pr_err("failed to open '%s': %d!\n", devpath, ret);
+ goto err;
}
- /* only allow driver matching the @blkdev */
- if (!binfo.devt) {
- pr_debug("no major\n");
- ret = -ENODEV;
- goto err_put_bdev;
+ inode = file_inode(psblk_file);
+ if (!S_ISBLK(inode->i_mode)) {
+ pr_err("'%s' is not block device!\n", devpath);
+ goto err_fput;
}
- /* psblk_bdev must be assigned before register to pstore/blk */
- psblk_bdev = bdev;
-
- memset(&dev, 0, sizeof(dev));
- dev.total_size = binfo.nr_sects << SECTOR_SHIFT;
- dev.read = psblk_generic_blk_read;
- dev.write = psblk_generic_blk_write;
+ inode = I_BDEV(psblk_file->f_mapping->host)->bd_inode;
+ dev->zone.total_size = i_size_read(inode);
- ret = __register_pstore_device(&dev);
+ ret = __register_pstore_device(dev);
if (ret)
- goto err_put_bdev;
+ goto err_fput;
- bdevname(bdev, bdev_name);
- pr_info("attached %s (no dedicated panic_write!)\n", bdev_name);
return 0;
-err_put_bdev:
- psblk_bdev = NULL;
- psblk_put_bdev(bdev, holder);
- return ret;
-}
-
-static void __unregister_pstore_blk(unsigned int major)
-{
- struct pstore_device_info dev = { .read = psblk_generic_blk_read };
- void *holder = blkdev;
+err_fput:
+ fput(psblk_file);
+err:
+ psblk_file = NULL;
- lockdep_assert_held(&pstore_blk_lock);
- if (psblk_bdev && MAJOR(psblk_bdev->bd_dev) == major) {
- __unregister_pstore_device(&dev);
- psblk_put_bdev(psblk_bdev, holder);
- psblk_bdev = NULL;
- }
+ return ret;
}
/* get information of pstore/blk */
@@ -419,13 +254,93 @@ int pstore_blk_get_config(struct pstore_blk_config *info)
}
EXPORT_SYMBOL_GPL(pstore_blk_get_config);
+
+#ifndef MODULE
+static const char devname[] = "/dev/pstore-blk";
+static __init const char *early_boot_devpath(const char *initial_devname)
+{
+ /*
+ * During early boot the real root file system hasn't been
+ * mounted yet, and no device nodes are present yet. Use the
+ * same scheme to find the device that we use for mounting
+ * the root file system.
+ */
+ dev_t dev = name_to_dev_t(initial_devname);
+
+ if (!dev) {
+ pr_err("failed to resolve '%s'!\n", initial_devname);
+ return initial_devname;
+ }
+
+ init_unlink(devname);
+ init_mknod(devname, S_IFBLK | 0600, new_encode_dev(dev));
+
+ return devname;
+}
+#else
+static inline const char *early_boot_devpath(const char *initial_devname)
+{
+ return initial_devname;
+}
+#endif
+
+static int __init __best_effort_init(void)
+{
+ struct pstore_device_info *best_effort_dev;
+ int ret;
+
+ /* No best-effort mode requested. */
+ if (!best_effort)
+ return 0;
+
+ /* Reject an empty blkdev. */
+ if (!blkdev[0]) {
+ pr_err("blkdev empty with best_effort=Y\n");
+ return -EINVAL;
+ }
+
+ best_effort_dev = kzalloc(sizeof(*best_effort_dev), GFP_KERNEL);
+ if (!best_effort_dev)
+ return -ENOMEM;
+
+ best_effort_dev->zone.read = psblk_generic_blk_read;
+ best_effort_dev->zone.write = psblk_generic_blk_write;
+
+ ret = __register_pstore_blk(best_effort_dev,
+ early_boot_devpath(blkdev));
+ if (ret)
+ kfree(best_effort_dev);
+ else
+ pr_info("attached %s (%zu) (no dedicated panic_write!)\n",
+ blkdev, best_effort_dev->zone.total_size);
+
+ return ret;
+}
+
+static void __exit __best_effort_exit(void)
+{
+ /*
+ * Currently, the only user of psblk_file is best_effort, so
+ * we can assume that pstore_device_info is associated with it.
+ * Once there are "real" blk devices, there will need to be a
+ * dedicated pstore_blk_info, etc.
+ */
+ if (psblk_file) {
+ struct pstore_device_info *dev = pstore_device_info;
+
+ __unregister_pstore_device(dev);
+ kfree(dev);
+ fput(psblk_file);
+ psblk_file = NULL;
+ }
+}
+
static int __init pstore_blk_init(void)
{
- int ret = 0;
+ int ret;
mutex_lock(&pstore_blk_lock);
- if (!pstore_zone_info && best_effort && blkdev[0])
- ret = __register_pstore_blk();
+ ret = __best_effort_init();
mutex_unlock(&pstore_blk_lock);
return ret;
@@ -435,15 +350,9 @@ late_initcall(pstore_blk_init);
static void __exit pstore_blk_exit(void)
{
mutex_lock(&pstore_blk_lock);
- if (psblk_bdev)
- __unregister_pstore_blk(MAJOR(psblk_bdev->bd_dev));
- else {
- struct pstore_device_info dev = { };
-
- if (pstore_zone_info)
- dev.read = pstore_zone_info->read;
- __unregister_pstore_device(&dev);
- }
+ __best_effort_exit();
+ /* If we've been asked to unload, unregister any remaining device. */
+ __unregister_pstore_device(pstore_device_info);
mutex_unlock(&pstore_blk_lock);
}
module_exit(pstore_blk_exit);
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 9ebd17d7befb..65e7e56005b8 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -53,13 +53,6 @@ struct ramfs_fs_info {
static const struct super_operations ramfs_ops;
static const struct inode_operations ramfs_dir_inode_operations;
-static const struct address_space_operations ramfs_aops = {
- .readpage = simple_readpage,
- .write_begin = simple_write_begin,
- .write_end = simple_write_end,
- .set_page_dirty = __set_page_dirty_no_writeback,
-};
-
struct inode *ramfs_get_inode(struct super_block *sb,
const struct inode *dir, umode_t mode, dev_t dev)
{
@@ -68,7 +61,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(&init_user_ns, inode, dir, mode);
- inode->i_mapping->a_ops = &ramfs_aops;
+ inode->i_mapping->a_ops = &ram_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping);
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 017db70d0f48..3d7a35d6a18b 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -132,6 +132,7 @@ int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
return IO_ERROR;
}
PATH_LAST_POSITION(path)--;
+ break;
case ITEM_FOUND:
break;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index b9e87ebb1060..855f0e87066d 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -226,8 +226,11 @@ out_free_bio:
bio_free_pages(bio);
bio_put(bio);
out:
- if (res < 0)
+ if (res < 0) {
ERROR("Failed to read block 0x%llx: %d\n", index, res);
+ if (msblk->panic_on_errors)
+ panic("squashfs read failed");
+ }
return res;
}
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 166e98806265..1e90c2575f9b 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -65,5 +65,6 @@ struct squashfs_sb_info {
unsigned int fragments;
int xattr_ids;
unsigned int ids;
+ bool panic_on_errors;
};
#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 88cc94be1076..60d6951915f4 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -18,9 +18,11 @@
#include <linux/fs.h>
#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/mutex.h>
+#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -37,6 +39,51 @@
static struct file_system_type squashfs_fs_type;
static const struct super_operations squashfs_super_ops;
+enum Opt_errors {
+ Opt_errors_continue,
+ Opt_errors_panic,
+};
+
+enum squashfs_param {
+ Opt_errors,
+};
+
+struct squashfs_mount_opts {
+ enum Opt_errors errors;
+};
+
+static const struct constant_table squashfs_param_errors[] = {
+ {"continue", Opt_errors_continue },
+ {"panic", Opt_errors_panic },
+ {}
+};
+
+static const struct fs_parameter_spec squashfs_fs_parameters[] = {
+ fsparam_enum("errors", Opt_errors, squashfs_param_errors),
+ {}
+};
+
+static int squashfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct squashfs_mount_opts *opts = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, squashfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_errors:
+ opts->errors = result.uint_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct squashfs_decompressor *supported_squashfs_filesystem(
struct fs_context *fc,
short major, short minor, short id)
@@ -67,6 +114,7 @@ static const struct squashfs_decompressor *supported_squashfs_filesystem(
static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct squashfs_mount_opts *opts = fc->fs_private;
struct squashfs_sb_info *msblk;
struct squashfs_super_block *sblk = NULL;
struct inode *root;
@@ -85,6 +133,8 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
}
msblk = sb->s_fs_info;
+ msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
+
msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
msblk->devblksize_log2 = ffz(~msblk->devblksize);
@@ -350,18 +400,52 @@ static int squashfs_get_tree(struct fs_context *fc)
static int squashfs_reconfigure(struct fs_context *fc)
{
+ struct super_block *sb = fc->root->d_sb;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ struct squashfs_mount_opts *opts = fc->fs_private;
+
sync_filesystem(fc->root->d_sb);
fc->sb_flags |= SB_RDONLY;
+
+ msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
+
return 0;
}
+static void squashfs_free_fs_context(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
static const struct fs_context_operations squashfs_context_ops = {
.get_tree = squashfs_get_tree,
+ .free = squashfs_free_fs_context,
+ .parse_param = squashfs_parse_param,
.reconfigure = squashfs_reconfigure,
};
+static int squashfs_show_options(struct seq_file *s, struct dentry *root)
+{
+ struct super_block *sb = root->d_sb;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+
+ if (msblk->panic_on_errors)
+ seq_puts(s, ",errors=panic");
+ else
+ seq_puts(s, ",errors=continue");
+
+ return 0;
+}
+
static int squashfs_init_fs_context(struct fs_context *fc)
{
+ struct squashfs_mount_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ fc->fs_private = opts;
fc->ops = &squashfs_context_ops;
return 0;
}
@@ -481,6 +565,7 @@ static struct file_system_type squashfs_fs_type = {
.owner = THIS_MODULE,
.name = "squashfs",
.init_fs_context = squashfs_init_fs_context,
+ .parameters = squashfs_fs_parameters,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV
};
@@ -491,6 +576,7 @@ static const struct super_operations squashfs_super_ops = {
.free_inode = squashfs_free_inode,
.statfs = squashfs_statfs,
.put_super = squashfs_put_super,
+ .show_options = squashfs_show_options,
};
module_init(init_squashfs_fs);
diff --git a/fs/super.c b/fs/super.c
index 11b7e7213fd1..91b7f156735b 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1277,9 +1277,9 @@ int get_tree_bdev(struct fs_context *fc,
}
/*
- * s_umount nests inside bd_mutex during
+ * s_umount nests inside open_mutex during
* __invalidate_device(). blkdev_put() acquires
- * bd_mutex and can't be called under s_umount. Drop
+ * open_mutex and can't be called under s_umount. Drop
* s_umount temporarily. This is safe as we're
* holding an active reference.
*/
@@ -1352,9 +1352,9 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
}
/*
- * s_umount nests inside bd_mutex during
+ * s_umount nests inside open_mutex during
* __invalidate_device(). blkdev_put() acquires
- * bd_mutex and can't be called under s_umount. Drop
+ * open_mutex and can't be called under s_umount. Drop
* s_umount temporarily. This is safe as we're
* holding an active reference.
*/
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 8b2e99b7bc9f..749385015a8d 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -495,6 +495,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations sysv_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = sysv_readpage,
.writepage = sysv_writepage,
.write_begin = sysv_write_begin,
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 2846dcd92197..1baff8ddb754 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -125,6 +125,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin
}
const struct address_space_operations udf_adinicb_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
.write_begin = udf_adinicb_write_begin,
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 0dd2f93ac048..4917670860a0 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -235,6 +235,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations udf_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = udf_readpage,
.readahead = udf_readahead,
.writepage = udf_writepage,
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index debc282c1bb4..ac628de69601 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -526,6 +526,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations ufs_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
.readpage = ufs_readpage,
.writepage = ufs_writepage,
.write_begin = ufs_write_begin,
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 14f92285d04f..dd7a6c62b56f 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -337,7 +337,7 @@ out:
return ret;
}
-static inline long userfaultfd_get_blocking_state(unsigned int flags)
+static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
{
if (flags & FAULT_FLAG_INTERRUPTIBLE)
return TASK_INTERRUPTIBLE;
@@ -370,7 +370,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
struct userfaultfd_wait_queue uwq;
vm_fault_t ret = VM_FAULT_SIGBUS;
bool must_wait;
- long blocking_state;
+ unsigned int blocking_state;
/*
* We don't do userfault handling for the final child pid update.
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index e32a1833d523..e0d5cdc57cc2 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -325,10 +325,22 @@ out:
error2 = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, 0);
if (error2)
return error2;
- ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
- xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved <=
- pag->pagf_freeblks + pag->pagf_flcount);
+
+ /*
+ * If there isn't enough space in the AG to satisfy the
+ * reservation, let the caller know that there wasn't enough
+ * space. Callers are responsible for deciding what to do
+ * next, since (in theory) we can stumble along with
+ * insufficient reservation if data blocks are being freed to
+ * replenish the AG's free space.
+ */
+ if (!error &&
+ xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
+ xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved >
+ pag->pagf_freeblks + pag->pagf_flcount)
+ error = -ENOSPC;
}
+
return error;
}
@@ -354,7 +366,7 @@ xfs_ag_resv_alloc_extent(
break;
default:
ASSERT(0);
- /* fall through */
+ fallthrough;
case XFS_AG_RESV_NONE:
field = args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
XFS_TRANS_SB_FDBLOCKS;
@@ -396,7 +408,7 @@ xfs_ag_resv_free_extent(
break;
default:
ASSERT(0);
- /* fall through */
+ fallthrough;
case XFS_AG_RESV_NONE:
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (int64_t)len);
return;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 82b7cbb1f24f..af3d5f9271f6 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -3174,7 +3174,7 @@ xfs_alloc_vextent(
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
args->type = XFS_ALLOCTYPE_NEAR_BNO;
- /* FALLTHROUGH */
+ fallthrough;
case XFS_ALLOCTYPE_FIRST_AG:
/*
* Rotate through the allocation groups looking for a winner.
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 7e3b9b01431e..a3e0e6f672d6 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -605,7 +605,6 @@ xfs_bmap_btree_to_extents(
ASSERT(cur);
ASSERT(whichfork != XFS_COW_FORK);
- ASSERT(!xfs_need_iread_extents(ifp));
ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
ASSERT(be16_to_cpu(rblock->bb_level) == 1);
ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
@@ -5350,7 +5349,6 @@ __xfs_bunmapi(
xfs_fsblock_t sum;
xfs_filblks_t len = *rlen; /* length to unmap in file */
xfs_fileoff_t max_len;
- xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
xfs_fileoff_t end;
struct xfs_iext_cursor icur;
bool done = false;
@@ -5442,16 +5440,6 @@ __xfs_bunmapi(
del = got;
wasdel = isnullstartblock(del.br_startblock);
- /*
- * Make sure we don't touch multiple AGF headers out of order
- * in a single transaction, as that could cause AB-BA deadlocks.
- */
- if (!wasdel && !isrt) {
- agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
- if (prev_agno != NULLAGNUMBER && prev_agno > agno)
- break;
- prev_agno = agno;
- }
if (got.br_startoff < start) {
del.br_startoff = start;
del.br_blockcount -= start - got.br_startoff;
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 83ac9771bfb5..747ec77912c3 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -282,7 +282,7 @@ xfs_da3_node_read_verify(
__this_address);
break;
}
- /* fall through */
+ fallthrough;
case XFS_DA_NODE_MAGIC:
fa = xfs_da3_node_verify(bp);
if (fa)
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 5c9a7440d9e4..f3254a4f4cb4 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -559,8 +559,17 @@ xfs_dinode_calc_crc(
/*
* Validate di_extsize hint.
*
- * The rules are documented at xfs_ioctl_setattr_check_extsize().
- * These functions must be kept in sync with each other.
+ * 1. Extent size hint is only valid for directories and regular files.
+ * 2. FS_XFLAG_EXTSIZE is only valid for regular files.
+ * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
+ * 4. Hint cannot be larger than MAXTEXTLEN.
+ * 5. Can be changed on directories at any time.
+ * 6. Hint value of 0 turns off hints, clears inode flags.
+ * 7. Extent size must be a multiple of the appropriate block size.
+ * For realtime files, this is the rt extent size.
+ * 8. For non-realtime files, the extent size hint must be limited
+ * to half the AG size to avoid alignment extending the extent beyond the
+ * limits of the AG.
*/
xfs_failaddr_t
xfs_inode_validate_extsize(
@@ -580,6 +589,28 @@ xfs_inode_validate_extsize(
inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
extsize_bytes = XFS_FSB_TO_B(mp, extsize);
+ /*
+ * This comment describes a historic gap in this verifier function.
+ *
+ * On older kernels, the extent size hint verifier doesn't check that
+ * the extent size hint is an integer multiple of the realtime extent
+ * size on a directory with both RTINHERIT and EXTSZINHERIT flags set.
+ * The verifier has always enforced the alignment rule for regular
+ * files with the REALTIME flag set.
+ *
+ * If a directory with a misaligned extent size hint is allowed to
+ * propagate that hint into a new regular realtime file, the result
+ * is that the inode cluster buffer verifier will trigger a corruption
+ * shutdown the next time it is run.
+ *
+ * Unfortunately, there could be filesystems with these misconfigured
+ * directories in the wild, so we cannot add a check to this verifier
+ * at this time because that will result a new source of directory
+ * corruption errors when reading an existing filesystem. Instead, we
+ * permit the misconfiguration to pass through the verifiers so that
+ * callers of this function can correct and mitigate externally.
+ */
+
if (rt_flag)
blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
else
@@ -616,8 +647,15 @@ xfs_inode_validate_extsize(
/*
* Validate di_cowextsize hint.
*
- * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
- * These functions must be kept in sync with each other.
+ * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
+ * The inode does not have to have any shared blocks, but it must be a v3.
+ * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
+ * for a directory, the hint is propagated to new files.
+ * 3. Can be changed on files & directories at any time.
+ * 4. Hint value of 0 turns off hints, clears inode flags.
+ * 5. Extent size must be a multiple of the appropriate block size.
+ * 6. The extent size hint must be limited to half the AG size to avoid
+ * alignment extending the extent beyond the limits of the AG.
*/
xfs_failaddr_t
xfs_inode_validate_cowextsize(
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 78324e043e25..8d595a5c4abd 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -143,6 +143,23 @@ xfs_trans_log_inode(
}
/*
+ * Inode verifiers on older kernels don't check that the extent size
+ * hint is an integer multiple of the rt extent size on a directory
+ * with both rtinherit and extszinherit flags set. If we're logging a
+ * directory that is misconfigured in this way, clear the hint.
+ */
+ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
+ (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
+ xfs_info_once(ip->i_mount,
+ "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino);
+ ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+ XFS_DIFLAG_EXTSZINHERIT);
+ ip->i_extsize = 0;
+ flags |= XFS_ILOG_CORE;
+ }
+
+ /*
* Record the specific change for fdatasync optimisation. This allows
* fdatasync to skip log forces for inodes that are only timestamp
* dirty.
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 7a2f9b5f2db5..f96e84793cc9 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -86,6 +86,7 @@ xchk_superblock(
case -ENOSYS:
case -EFBIG:
error = -EFSCORRUPTED;
+ fallthrough;
default:
break;
}
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index b5ebf1d1b4db..77d5c4a0f09f 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -271,7 +271,7 @@ xchk_bmap_iextent_xref(
case XFS_DATA_FORK:
if (xfs_is_reflink_inode(info->sc->ip))
break;
- /* fall through */
+ fallthrough;
case XFS_ATTR_FORK:
xchk_xref_is_not_shared(info->sc, agbno,
irec->br_blockcount);
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index a94bd8122c60..bd1172358964 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -44,7 +44,7 @@ __xchk_btree_process_error(
/* Note the badness but don't abort. */
sc->sm->sm_flags |= errflag;
*error = 0;
- /* fall through */
+ fallthrough;
default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xchk_ifork_btree_op_error(sc, cur, level,
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index be38c960da85..6cc92291c46f 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -83,7 +83,7 @@ __xchk_process_error(
/* Note the badness but don't abort. */
sc->sm->sm_flags |= errflag;
*error = 0;
- /* fall through */
+ fallthrough;
default:
trace_xchk_op_error(sc, agno, bno, *error,
ret_ip);
@@ -136,7 +136,7 @@ __xchk_fblock_process_error(
/* Note the badness but don't abort. */
sc->sm->sm_flags |= errflag;
*error = 0;
- /* fall through */
+ fallthrough;
default:
trace_xchk_file_op_error(sc, whichfork, offset, *error,
ret_ip);
@@ -696,7 +696,7 @@ xchk_get_inode(
if (error)
return -ENOENT;
error = -EFSCORRUPTED;
- /* fall through */
+ fallthrough;
default:
trace_xchk_op_error(sc,
XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 653f3280e1c1..9f0dbb47c82c 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -47,7 +47,7 @@ xchk_da_process_error(
/* Note the badness but don't abort. */
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
*error = 0;
- /* fall through */
+ fallthrough;
default:
trace_xchk_file_op_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo,
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index c2857d854c83..b8202dd08939 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -947,7 +947,7 @@ xrep_ino_dqattach(
xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
- /* fall through */
+ fallthrough;
case -ESRCH:
error = 0;
break;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 826caa6b4a5a..cb4e0fcf4c76 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -561,7 +561,7 @@ const struct address_space_operations xfs_address_space_operations = {
.readahead = xfs_vm_readahead,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
- .set_page_dirty = iomap_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = iomap_releasepage,
.invalidatepage = iomap_invalidatepage,
.bmap = xfs_vm_bmap,
@@ -575,7 +575,7 @@ const struct address_space_operations xfs_address_space_operations = {
const struct address_space_operations xfs_dax_aops = {
.writepages = xfs_dax_writepages,
.direct_IO = noop_direct_IO,
- .set_page_dirty = noop_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_no_writeback,
.invalidatepage = noop_invalidatepage,
.swap_activate = xfs_iomap_swapfile_activate,
};
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 0936f3a96fe6..2988efa97e14 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -286,7 +286,7 @@ xfs_bmap_count_blocks(
*/
*count += btblocks - 1;
- /* fall through */
+ fallthrough;
case XFS_DINODE_FMT_EXTENTS:
*nextents = xfs_bmap_count_leaves(ifp, count);
break;
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 465fd9e048d4..1da59bdff245 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -84,7 +84,7 @@ xfs_fs_encode_fh(
case FILEID_INO32_GEN_PARENT:
fid->i32.parent_ino = XFS_I(parent)->i_ino;
fid->i32.parent_gen = parent->i_generation;
- /*FALLTHRU*/
+ fallthrough;
case FILEID_INO32_GEN:
fid->i32.ino = XFS_I(inode)->i_ino;
fid->i32.gen = inode->i_generation;
@@ -92,7 +92,7 @@ xfs_fs_encode_fh(
case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
fid64->parent_ino = XFS_I(parent)->i_ino;
fid64->parent_gen = parent->i_generation;
- /*FALLTHRU*/
+ fallthrough;
case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
fid64->ino = XFS_I(inode)->i_ino;
fid64->gen = inode->i_generation;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 396ef36dcd0a..3c0749ab9e40 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -863,7 +863,7 @@ xfs_break_layouts(
error = xfs_break_dax_layouts(inode, &retry);
if (error || retry)
break;
- /* fall through */
+ fallthrough;
case BREAK_WRITE:
error = xfs_break_leased_layouts(inode, iolock, &retry);
break;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 0369eb22c1bb..1db99a909b23 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -690,6 +690,7 @@ xfs_inode_inherit_flags(
const struct xfs_inode *pip)
{
unsigned int di_flags = 0;
+ xfs_failaddr_t failaddr;
umode_t mode = VFS_I(ip)->i_mode;
if (S_ISDIR(mode)) {
@@ -729,6 +730,24 @@ xfs_inode_inherit_flags(
di_flags |= XFS_DIFLAG_FILESTREAM;
ip->i_diflags |= di_flags;
+
+ /*
+ * Inode verifiers on older kernels only check that the extent size
+ * hint is an integer multiple of the rt extent size on realtime files.
+ * They did not check the hint alignment on a directory with both
+ * rtinherit and extszinherit flags set. If the misaligned hint is
+ * propagated from a directory into a new realtime file, new file
+ * allocations will fail due to math errors in the rt allocator and/or
+ * trip the verifiers. Validate the hint settings in the new file so
+ * that we don't let broken hints propagate.
+ */
+ failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
+ VFS_I(ip)->i_mode, ip->i_diflags);
+ if (failaddr) {
+ ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+ XFS_DIFLAG_EXTSZINHERIT);
+ ip->i_extsize = 0;
+ }
}
/* Propagate di_flags2 from a parent inode to a child inode. */
@@ -737,12 +756,22 @@ xfs_inode_inherit_flags2(
struct xfs_inode *ip,
const struct xfs_inode *pip)
{
+ xfs_failaddr_t failaddr;
+
if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
ip->i_cowextsize = pip->i_cowextsize;
}
if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
ip->i_diflags2 |= XFS_DIFLAG2_DAX;
+
+ /* Don't let invalid cowextsize hints propagate. */
+ failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
+ VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
+ if (failaddr) {
+ ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
+ ip->i_cowextsize = 0;
+ }
}
/*
@@ -848,7 +877,7 @@ xfs_init_new_inode(
xfs_inode_inherit_flags(ip, pip);
if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
xfs_inode_inherit_flags2(ip, pip);
- /* FALLTHROUGH */
+ fallthrough;
case S_IFLNK:
ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
ip->i_df.if_bytes = 0;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 3925bfcb2365..7b1796cede10 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -558,7 +558,7 @@ xfs_ioc_attrmulti_one(
case ATTR_OP_REMOVE:
value = NULL;
*len = 0;
- /* fall through */
+ fallthrough;
case ATTR_OP_SET:
error = mnt_want_write_file(parfilp);
if (error)
@@ -1267,20 +1267,8 @@ out_error:
}
/*
- * extent size hint validation is somewhat cumbersome. Rules are:
- *
- * 1. extent size hint is only valid for directories and regular files
- * 2. FS_XFLAG_EXTSIZE is only valid for regular files
- * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
- * 4. can only be changed on regular files if no extents are allocated
- * 5. can be changed on directories at any time
- * 6. extsize hint of 0 turns off hints, clears inode flags.
- * 7. Extent size must be a multiple of the appropriate block size.
- * 8. for non-realtime files, the extent size hint must be limited
- * to half the AG size to avoid alignment extending the extent beyond the
- * limits of the AG.
- *
- * Please keep this function in sync with xfs_scrub_inode_extsize.
+ * Validate a proposed extent size hint. For regular files, the hint can only
+ * be changed if no extents are allocated.
*/
static int
xfs_ioctl_setattr_check_extsize(
@@ -1288,86 +1276,65 @@ xfs_ioctl_setattr_check_extsize(
struct fileattr *fa)
{
struct xfs_mount *mp = ip->i_mount;
- xfs_extlen_t size;
- xfs_fsblock_t extsize_fsb;
+ xfs_failaddr_t failaddr;
+ uint16_t new_diflags;
if (!fa->fsx_valid)
return 0;
if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
- ((ip->i_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
+ XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize)
return -EINVAL;
- if (fa->fsx_extsize == 0)
- return 0;
-
- extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
- if (extsize_fsb > MAXEXTLEN)
+ if (fa->fsx_extsize & mp->m_blockmask)
return -EINVAL;
- if (XFS_IS_REALTIME_INODE(ip) ||
- (fa->fsx_xflags & FS_XFLAG_REALTIME)) {
- size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
- } else {
- size = mp->m_sb.sb_blocksize;
- if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
+ new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
+
+ /*
+ * Inode verifiers on older kernels don't check that the extent size
+ * hint is an integer multiple of the rt extent size on a directory
+ * with both rtinherit and extszinherit flags set. Don't let sysadmins
+ * misconfigure directories.
+ */
+ if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
+ (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
+ unsigned int rtextsize_bytes;
+
+ rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
+ if (fa->fsx_extsize % rtextsize_bytes)
return -EINVAL;
}
- if (fa->fsx_extsize % size)
- return -EINVAL;
-
- return 0;
+ failaddr = xfs_inode_validate_extsize(ip->i_mount,
+ XFS_B_TO_FSB(mp, fa->fsx_extsize),
+ VFS_I(ip)->i_mode, new_diflags);
+ return failaddr != NULL ? -EINVAL : 0;
}
-/*
- * CoW extent size hint validation rules are:
- *
- * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
- * The inode does not have to have any shared blocks, but it must be a v3.
- * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
- * for a directory, the hint is propagated to new files.
- * 3. Can be changed on files & directories at any time.
- * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
- * 5. Extent size must be a multiple of the appropriate block size.
- * 6. The extent size hint must be limited to half the AG size to avoid
- * alignment extending the extent beyond the limits of the AG.
- *
- * Please keep this function in sync with xfs_scrub_inode_cowextsize.
- */
static int
xfs_ioctl_setattr_check_cowextsize(
struct xfs_inode *ip,
struct fileattr *fa)
{
struct xfs_mount *mp = ip->i_mount;
- xfs_extlen_t size;
- xfs_fsblock_t cowextsize_fsb;
+ xfs_failaddr_t failaddr;
+ uint64_t new_diflags2;
+ uint16_t new_diflags;
if (!fa->fsx_valid)
return 0;
- if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
- return 0;
-
- if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb))
+ if (fa->fsx_cowextsize & mp->m_blockmask)
return -EINVAL;
- if (fa->fsx_cowextsize == 0)
- return 0;
+ new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
+ new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
- cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
- if (cowextsize_fsb > MAXEXTLEN)
- return -EINVAL;
-
- size = mp->m_sb.sb_blocksize;
- if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
- return -EINVAL;
-
- if (fa->fsx_cowextsize % size)
- return -EINVAL;
-
- return 0;
+ failaddr = xfs_inode_validate_cowextsize(ip->i_mount,
+ XFS_B_TO_FSB(mp, fa->fsx_cowextsize),
+ VFS_I(ip)->i_mode, new_diflags, new_diflags2);
+ return failaddr != NULL ? -EINVAL : 0;
}
static int
@@ -1544,7 +1511,7 @@ xfs_ioc_getbmap(
switch (cmd) {
case XFS_IOC_GETBMAPA:
bmx.bmv_iflags = BMV_IF_ATTRFORK;
- /*FALLTHRU*/
+ fallthrough;
case XFS_IOC_GETBMAP:
/* struct getbmap is a strict subset of struct getbmapx. */
recsize = sizeof(struct getbmap);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d154f42e2dc6..d8cd2583dedb 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1036,7 +1036,7 @@ retry:
prealloc_blocks = 0;
goto retry;
}
- /*FALLTHRU*/
+ fallthrough;
default:
goto out_unlock;
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c19a82adea1e..a002425377b5 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2626,6 +2626,7 @@ xlog_covered_state(
case XLOG_STATE_COVER_IDLE:
if (iclogs_changed == 1)
return XLOG_STATE_COVER_IDLE;
+ fallthrough;
case XLOG_STATE_COVER_NEED:
case XLOG_STATE_COVER_NEED2:
break;
diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h
index 3c392b1512ac..bb9860ec9a93 100644
--- a/fs/xfs/xfs_message.h
+++ b/fs/xfs/xfs_message.h
@@ -2,6 +2,8 @@
#ifndef __XFS_MESSAGE_H
#define __XFS_MESSAGE_H 1
+#include <linux/once_lite.h>
+
struct xfs_mount;
extern __printf(2, 3)
@@ -41,16 +43,7 @@ do { \
} while (0)
#define xfs_printk_once(func, dev, fmt, ...) \
-({ \
- static bool __section(".data.once") __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- func(dev, fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(func, dev, fmt, ##__VA_ARGS__)
#define xfs_emerg_ratelimited(dev, fmt, ...) \
xfs_printk_ratelimited(xfs_emerg, dev, fmt, ##__VA_ARGS__)
@@ -73,6 +66,8 @@ do { \
xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__)
#define xfs_notice_once(dev, fmt, ...) \
xfs_printk_once(xfs_notice, dev, fmt, ##__VA_ARGS__)
+#define xfs_info_once(dev, fmt, ...) \
+ xfs_printk_once(xfs_info, dev, fmt, ##__VA_ARGS__)
void assfail(struct xfs_mount *mp, char *expr, char *f, int l);
void asswarn(struct xfs_mount *mp, char *expr, char *f, int l);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 9aced0a00003..d11d032da0b4 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -294,7 +294,7 @@ xfs_trans_read_buf_map(
default:
if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
- /* fall through */
+ fallthrough;
case -ENOMEM:
case -EAGAIN:
return error;
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index cd145d318b17..dbf03635869c 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -5,7 +5,7 @@
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*/
#include <linux/module.h>
-#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/magic.h>
#include <linux/iomap.h>
#include <linux/init.h>
@@ -185,7 +185,7 @@ static const struct address_space_operations zonefs_file_aops = {
.readahead = zonefs_readahead,
.writepage = zonefs_writepage,
.writepages = zonefs_writepages,
- .set_page_dirty = iomap_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = iomap_releasepage,
.invalidatepage = iomap_invalidatepage,
.migratepage = iomap_migrate_page,
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 18197c16149f..3e8d969b22fe 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -207,4 +207,14 @@ struct acpi_pld_info {
#define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK)
#define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */
+/* Panel position defined in _PLD section of ACPI Specification 6.3 */
+
+#define ACPI_PLD_PANEL_TOP 0
+#define ACPI_PLD_PANEL_BOTTOM 1
+#define ACPI_PLD_PANEL_LEFT 2
+#define ACPI_PLD_PANEL_RIGHT 3
+#define ACPI_PLD_PANEL_FRONT 4
+#define ACPI_PLD_PANEL_BACK 5
+#define ACPI_PLD_PANEL_UNKNOWN 6
+
#endif /* ACBUFFER_H */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index e92f84fa8c68..0362cbb72359 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -188,6 +188,8 @@
#define ACPI_MAX_GSBUS_DATA_SIZE 255
#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE
+#define ACPI_PRM_INPUT_BUFFER_SIZE 26
+
/* _sx_d and _sx_w control methods */
#define ACPI_NUM_sx_d_METHODS 4
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 3a82faac5767..c6bd4f9a80ba 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -280,6 +280,12 @@ struct acpi_device_power {
struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */
};
+struct acpi_dep_data {
+ struct list_head node;
+ acpi_handle supplier;
+ acpi_handle consumer;
+};
+
/* Performance Management */
struct acpi_device_perf_flags {
@@ -498,8 +504,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
*/
int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
-struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
-void acpi_bus_put_acpi_device(struct acpi_device *adev);
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
@@ -586,7 +590,7 @@ struct acpi_pci_root {
/* helper */
-bool acpi_dma_supported(struct acpi_device *adev);
+bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
u64 *size);
@@ -685,6 +689,8 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+void acpi_dev_clear_dependencies(struct acpi_device *supplier);
+struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier);
struct acpi_device *
acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
struct acpi_device *
@@ -718,6 +724,13 @@ static inline void acpi_dev_put(struct acpi_device *adev)
{
put_device(&adev->dev);
}
+
+struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
+
+static inline void acpi_bus_put_acpi_device(struct acpi_device *adev)
+{
+ acpi_dev_put(adev);
+}
#else /* CONFIG_ACPI */
static inline int register_acpi_bus_type(void *bus) { return 0; }
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index 40a91ce87e04..68e4d80c1b32 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -43,4 +43,4 @@ static inline void disable_hmat(void)
{
}
#endif /* CONFIG_ACPI_HMAT */
-#endif /* __ACP_NUMA_H */
+#endif /* __ACPI_NUMA_H */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index f8d44b06f3e3..a43335961e30 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20210331
+#define ACPI_CA_VERSION 0x20210604
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index ce59903c2695..ef2872dea01c 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -327,9 +327,20 @@ struct acpi_cedt_header {
enum acpi_cedt_type {
ACPI_CEDT_TYPE_CHBS = 0,
- ACPI_CEDT_TYPE_RESERVED = 1
+ ACPI_CEDT_TYPE_CFMWS = 1,
+ ACPI_CEDT_TYPE_RESERVED = 2,
};
+/* Values for version field above */
+
+#define ACPI_CEDT_CHBS_VERSION_CXL11 (0)
+#define ACPI_CEDT_CHBS_VERSION_CXL20 (1)
+
+/* Values for length field above */
+
+#define ACPI_CEDT_CHBS_LENGTH_CXL11 (0x2000)
+#define ACPI_CEDT_CHBS_LENGTH_CXL20 (0x10000)
+
/*
* CEDT subtables
*/
@@ -345,6 +356,34 @@ struct acpi_cedt_chbs {
u64 length;
};
+/* 1: CXL Fixed Memory Window Structure */
+
+struct acpi_cedt_cfmws {
+ struct acpi_cedt_header header;
+ u32 reserved1;
+ u64 base_hpa;
+ u64 window_size;
+ u8 interleave_ways;
+ u8 interleave_arithmetic;
+ u16 reserved2;
+ u32 granularity;
+ u16 restrictions;
+ u16 qtg_id;
+ u32 interleave_targets[];
+};
+
+/* Values for Interleave Arithmetic field above */
+
+#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0)
+
+/* Values for Restrictions field above */
+
+#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1)
+#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1)
+#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2)
+#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3)
+#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4)
+
/*******************************************************************************
*
* CPEP - Corrected Platform Error Polling table (ACPI 4.0)
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 18cafe3ebddc..2069ac38a4e2 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -24,6 +24,7 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
+#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
@@ -39,11 +40,14 @@
#define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */
#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
+#define ACPI_SIG_PRMT "PRMT" /* Platform Runtime Mechanism Table */
#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
+#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
#define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */
+#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -65,6 +69,20 @@
/*******************************************************************************
*
+ * BDAT - BIOS Data ACPI Table
+ *
+ * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5
+ * Nov 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_bdat {
+ struct acpi_table_header header;
+ struct acpi_generic_address gas;
+};
+
+/*******************************************************************************
+ *
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
@@ -446,6 +464,12 @@ struct acpi_ivrs_device_hid {
u8 uid_length;
};
+/* Values for uid_type above */
+
+#define ACPI_IVRS_UID_NOT_PRESENT 0
+#define ACPI_IVRS_UID_IS_INTEGER 1
+#define ACPI_IVRS_UID_IS_STRING 2
+
/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */
struct acpi_ivrs_memory {
@@ -763,6 +787,20 @@ struct acpi_madt_multiproc_wakeup {
u64 base_address;
};
+#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032
+#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048
+
+struct acpi_madt_multiproc_wakeup_mailbox {
+ u16 command;
+ u16 reserved; /* reserved - must be zero */
+ u32 apic_id;
+ u64 wakeup_vector;
+ u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE]; /* reserved for OS use */
+ u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */
+};
+
+#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
+
/*
* Common flags fields for MADT subtables
*/
@@ -1675,6 +1713,48 @@ struct acpi_pptt_id {
/*******************************************************************************
*
+ * PRMT - Platform Runtime Mechanism Table
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_prmt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+struct acpi_table_prmt_header {
+ u8 platform_guid[16];
+ u32 module_info_offset;
+ u32 module_info_count;
+};
+
+struct acpi_prmt_module_header {
+ u16 revision;
+ u16 length;
+};
+
+struct acpi_prmt_module_info {
+ u16 revision;
+ u16 length;
+ u8 module_guid[16];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 handler_info_count;
+ u32 handler_info_offset;
+ u64 mmio_list_pointer;
+};
+
+struct acpi_prmt_handler_info {
+ u16 revision;
+ u16 length;
+ u8 handler_guid[16];
+ u64 handler_address;
+ u64 static_data_buffer_address;
+ u64 acpi_param_buffer_address;
+};
+
+/*******************************************************************************
+ *
* RASF - RAS Feature Table (ACPI 5.0)
* Version 1
*
@@ -1771,6 +1851,32 @@ enum acpi_rasf_status {
/*******************************************************************************
*
+ * RGRT - Regulatory Graphics Resource Table
+ * Version 1
+ *
+ * Conforms to "ACPI RGRT" available at:
+ * https://microsoft.github.io/mu/dyn/mu_plus/ms_core_pkg/acpi_RGRT/feature_acpi_rgrt/
+ *
+ ******************************************************************************/
+
+struct acpi_table_rgrt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 version;
+ u8 image_type;
+ u8 reserved;
+ u8 image[0];
+};
+
+/* image_type values */
+
+enum acpi_rgrt_image_type {
+ ACPI_RGRT_TYPE_RESERVED0 = 0,
+ ACPI_RGRT_IMAGE_TYPE_PNG = 1,
+ ACPI_RGRT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
@@ -1899,6 +2005,37 @@ struct acpi_sdev_pcie_path {
u8 function;
};
+/*******************************************************************************
+ *
+ * SVKL - Storage Volume Key Location Table (ACPI 6.4)
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)".
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_svkl {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 count;
+};
+
+struct acpi_svkl_key {
+ u16 type;
+ u16 format;
+ u32 size;
+ u64 address;
+};
+
+enum acpi_svkl_type {
+ ACPI_SVKL_TYPE_MAIN_STORAGE = 0,
+ ACPI_SVKL_TYPE_RESERVED = 1 /* 1 and greater are reserved */
+};
+
+enum acpi_svkl_format {
+ ACPI_SVKL_FORMAT_RAW_BINARY = 0,
+ ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */
+};
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 888b6cfeed91..bc45af52c93b 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -27,17 +27,13 @@ atomic_read(const atomic_t *v)
instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read(v);
}
-#define atomic_read atomic_read
-#if defined(arch_atomic_read_acquire)
static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read_acquire(v);
}
-#define atomic_read_acquire atomic_read_acquire
-#endif
static __always_inline void
atomic_set(atomic_t *v, int i)
@@ -45,17 +41,13 @@ atomic_set(atomic_t *v, int i)
instrument_atomic_write(v, sizeof(*v));
arch_atomic_set(v, i);
}
-#define atomic_set atomic_set
-#if defined(arch_atomic_set_release)
static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
instrument_atomic_write(v, sizeof(*v));
arch_atomic_set_release(v, i);
}
-#define atomic_set_release atomic_set_release
-#endif
static __always_inline void
atomic_add(int i, atomic_t *v)
@@ -63,87 +55,62 @@ atomic_add(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
-#define atomic_add atomic_add
-#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
-#define atomic_add_return atomic_add_return
-#endif
-#if defined(arch_atomic_add_return_acquire)
static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-#if defined(arch_atomic_add_return_release)
static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-#if defined(arch_atomic_add_return_relaxed)
static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#endif
-#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-#if defined(arch_atomic_fetch_add_acquire)
static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-#if defined(arch_atomic_fetch_add_release)
static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-#if defined(arch_atomic_fetch_add_relaxed)
static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#endif
static __always_inline void
atomic_sub(int i, atomic_t *v)
@@ -151,267 +118,188 @@ atomic_sub(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
-#define atomic_sub atomic_sub
-#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
-#define atomic_sub_return atomic_sub_return
-#endif
-#if defined(arch_atomic_sub_return_acquire)
static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-#if defined(arch_atomic_sub_return_release)
static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-#if defined(arch_atomic_sub_return_relaxed)
static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#endif
-#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-#if defined(arch_atomic_fetch_sub_acquire)
static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-#if defined(arch_atomic_fetch_sub_release)
static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-#if defined(arch_atomic_fetch_sub_relaxed)
static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#endif
-#if defined(arch_atomic_inc)
static __always_inline void
atomic_inc(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_inc(v);
}
-#define atomic_inc atomic_inc
-#endif
-#if defined(arch_atomic_inc_return)
static __always_inline int
atomic_inc_return(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
-#define atomic_inc_return atomic_inc_return
-#endif
-#if defined(arch_atomic_inc_return_acquire)
static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-#if defined(arch_atomic_inc_return_release)
static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-#if defined(arch_atomic_inc_return_relaxed)
static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-#if defined(arch_atomic_fetch_inc)
static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-#if defined(arch_atomic_fetch_inc_acquire)
static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-#if defined(arch_atomic_fetch_inc_release)
static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-#if defined(arch_atomic_fetch_inc_relaxed)
static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-#if defined(arch_atomic_dec)
static __always_inline void
atomic_dec(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_dec(v);
}
-#define atomic_dec atomic_dec
-#endif
-#if defined(arch_atomic_dec_return)
static __always_inline int
atomic_dec_return(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
-#define atomic_dec_return atomic_dec_return
-#endif
-#if defined(arch_atomic_dec_return_acquire)
static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-#if defined(arch_atomic_dec_return_release)
static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-#if defined(arch_atomic_dec_return_relaxed)
static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-#if defined(arch_atomic_fetch_dec)
static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-#if defined(arch_atomic_fetch_dec_acquire)
static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-#if defined(arch_atomic_fetch_dec_release)
static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-#if defined(arch_atomic_fetch_dec_relaxed)
static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
static __always_inline void
atomic_and(int i, atomic_t *v)
@@ -419,97 +307,69 @@ atomic_and(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
-#define atomic_and atomic_and
-#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-#if defined(arch_atomic_fetch_and_acquire)
static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-#if defined(arch_atomic_fetch_and_release)
static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-#if defined(arch_atomic_fetch_and_relaxed)
static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#endif
-#if defined(arch_atomic_andnot)
static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}
-#define atomic_andnot atomic_andnot
-#endif
-#if defined(arch_atomic_fetch_andnot)
static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-#if defined(arch_atomic_fetch_andnot_acquire)
static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-#if defined(arch_atomic_fetch_andnot_release)
static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-#if defined(arch_atomic_fetch_andnot_relaxed)
static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
static __always_inline void
atomic_or(int i, atomic_t *v)
@@ -517,47 +377,34 @@ atomic_or(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
-#define atomic_or atomic_or
-#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-#if defined(arch_atomic_fetch_or_acquire)
static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-#if defined(arch_atomic_fetch_or_release)
static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-#if defined(arch_atomic_fetch_or_relaxed)
static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#endif
static __always_inline void
atomic_xor(int i, atomic_t *v)
@@ -565,129 +412,91 @@ atomic_xor(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
-#define atomic_xor atomic_xor
-#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-#if defined(arch_atomic_fetch_xor_acquire)
static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-#if defined(arch_atomic_fetch_xor_release)
static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-#if defined(arch_atomic_fetch_xor_relaxed)
static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#endif
-#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
-#define atomic_xchg atomic_xchg
-#endif
-#if defined(arch_atomic_xchg_acquire)
static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-#if defined(arch_atomic_xchg_release)
static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-#if defined(arch_atomic_xchg_relaxed)
static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#endif
-#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-#if defined(arch_atomic_cmpxchg_acquire)
static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-#if defined(arch_atomic_cmpxchg_release)
static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-#if defined(arch_atomic_cmpxchg_relaxed)
static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#endif
-#if defined(arch_atomic_try_cmpxchg)
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
@@ -695,10 +504,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-#if defined(arch_atomic_try_cmpxchg_acquire)
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
@@ -706,10 +512,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-#if defined(arch_atomic_try_cmpxchg_release)
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
@@ -717,10 +520,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-#if defined(arch_atomic_try_cmpxchg_relaxed)
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
@@ -728,108 +528,76 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-#if defined(arch_atomic_sub_and_test)
static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-#if defined(arch_atomic_dec_and_test)
static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-#if defined(arch_atomic_inc_and_test)
static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-#if defined(arch_atomic_add_negative)
static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
-#define atomic_add_negative atomic_add_negative
-#endif
-#if defined(arch_atomic_fetch_add_unless)
static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-#if defined(arch_atomic_add_unless)
static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}
-#define atomic_add_unless atomic_add_unless
-#endif
-#if defined(arch_atomic_inc_not_zero)
static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-#if defined(arch_atomic_inc_unless_negative)
static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-#if defined(arch_atomic_dec_unless_positive)
static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-#if defined(arch_atomic_dec_if_positive)
static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_if_positive(v);
}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
static __always_inline s64
atomic64_read(const atomic64_t *v)
@@ -837,17 +605,13 @@ atomic64_read(const atomic64_t *v)
instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read(v);
}
-#define atomic64_read atomic64_read
-#if defined(arch_atomic64_read_acquire)
static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read_acquire(v);
}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
static __always_inline void
atomic64_set(atomic64_t *v, s64 i)
@@ -855,17 +619,13 @@ atomic64_set(atomic64_t *v, s64 i)
instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set(v, i);
}
-#define atomic64_set atomic64_set
-#if defined(arch_atomic64_set_release)
static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set_release(v, i);
}
-#define atomic64_set_release atomic64_set_release
-#endif
static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
@@ -873,87 +633,62 @@ atomic64_add(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
-#define atomic64_add atomic64_add
-#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
-#define atomic64_add_return atomic64_add_return
-#endif
-#if defined(arch_atomic64_add_return_acquire)
static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-#if defined(arch_atomic64_add_return_release)
static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-#if defined(arch_atomic64_add_return_relaxed)
static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#endif
-#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-#if defined(arch_atomic64_fetch_add_acquire)
static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-#if defined(arch_atomic64_fetch_add_release)
static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-#if defined(arch_atomic64_fetch_add_relaxed)
static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#endif
static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
@@ -961,267 +696,188 @@ atomic64_sub(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
-#define atomic64_sub atomic64_sub
-#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-#if defined(arch_atomic64_sub_return_acquire)
static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-#if defined(arch_atomic64_sub_return_release)
static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-#if defined(arch_atomic64_sub_return_relaxed)
static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#endif
-#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-#if defined(arch_atomic64_fetch_sub_acquire)
static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-#if defined(arch_atomic64_fetch_sub_release)
static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-#if defined(arch_atomic64_fetch_sub_relaxed)
static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#endif
-#if defined(arch_atomic64_inc)
static __always_inline void
atomic64_inc(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
-#define atomic64_inc atomic64_inc
-#endif
-#if defined(arch_atomic64_inc_return)
static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-#if defined(arch_atomic64_inc_return_acquire)
static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-#if defined(arch_atomic64_inc_return_release)
static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-#if defined(arch_atomic64_inc_return_relaxed)
static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-#if defined(arch_atomic64_fetch_inc)
static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-#if defined(arch_atomic64_fetch_inc_acquire)
static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-#if defined(arch_atomic64_fetch_inc_release)
static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-#if defined(arch_atomic64_fetch_inc_relaxed)
static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-#if defined(arch_atomic64_dec)
static __always_inline void
atomic64_dec(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
-#define atomic64_dec atomic64_dec
-#endif
-#if defined(arch_atomic64_dec_return)
static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-#if defined(arch_atomic64_dec_return_acquire)
static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-#if defined(arch_atomic64_dec_return_release)
static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-#if defined(arch_atomic64_dec_return_relaxed)
static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-#if defined(arch_atomic64_fetch_dec)
static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-#if defined(arch_atomic64_fetch_dec_acquire)
static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-#if defined(arch_atomic64_fetch_dec_release)
static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-#if defined(arch_atomic64_fetch_dec_relaxed)
static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
@@ -1229,97 +885,69 @@ atomic64_and(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
-#define atomic64_and atomic64_and
-#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-#if defined(arch_atomic64_fetch_and_acquire)
static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-#if defined(arch_atomic64_fetch_and_release)
static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-#if defined(arch_atomic64_fetch_and_relaxed)
static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#endif
-#if defined(arch_atomic64_andnot)
static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}
-#define atomic64_andnot atomic64_andnot
-#endif
-#if defined(arch_atomic64_fetch_andnot)
static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-#if defined(arch_atomic64_fetch_andnot_acquire)
static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-#if defined(arch_atomic64_fetch_andnot_release)
static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-#if defined(arch_atomic64_fetch_andnot_relaxed)
static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
@@ -1327,47 +955,34 @@ atomic64_or(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
-#define atomic64_or atomic64_or
-#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-#if defined(arch_atomic64_fetch_or_acquire)
static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-#if defined(arch_atomic64_fetch_or_release)
static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-#if defined(arch_atomic64_fetch_or_relaxed)
static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#endif
static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
@@ -1375,129 +990,91 @@ atomic64_xor(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
-#define atomic64_xor atomic64_xor
-#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-#if defined(arch_atomic64_fetch_xor_acquire)
static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-#if defined(arch_atomic64_fetch_xor_release)
static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-#if defined(arch_atomic64_fetch_xor_relaxed)
static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#endif
-#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
-#define atomic64_xchg atomic64_xchg
-#endif
-#if defined(arch_atomic64_xchg_acquire)
static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_acquire(v, i);
}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-#if defined(arch_atomic64_xchg_release)
static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_release(v, i);
}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-#if defined(arch_atomic64_xchg_relaxed)
static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_relaxed(v, i);
}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
-#endif
-#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-#if defined(arch_atomic64_cmpxchg_acquire)
static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-#if defined(arch_atomic64_cmpxchg_release)
static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_release(v, old, new);
}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-#if defined(arch_atomic64_cmpxchg_relaxed)
static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-#endif
-#if defined(arch_atomic64_try_cmpxchg)
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
@@ -1505,10 +1082,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-#if defined(arch_atomic64_try_cmpxchg_acquire)
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
@@ -1516,10 +1090,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-#if defined(arch_atomic64_try_cmpxchg_release)
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
@@ -1527,10 +1098,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-#if defined(arch_atomic64_try_cmpxchg_relaxed)
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
@@ -1538,218 +1106,161 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-#if defined(arch_atomic64_sub_and_test)
static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-#if defined(arch_atomic64_dec_and_test)
static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-#if defined(arch_atomic64_inc_and_test)
static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-#if defined(arch_atomic64_add_negative)
static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-#if defined(arch_atomic64_fetch_add_unless)
static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-#if defined(arch_atomic64_add_unless)
static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-#if defined(arch_atomic64_inc_not_zero)
static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-#if defined(arch_atomic64_inc_unless_negative)
static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-#if defined(arch_atomic64_dec_unless_positive)
static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-#if defined(arch_atomic64_dec_if_positive)
static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
#define xchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_xchg_acquire)
#define xchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_xchg_release)
#define xchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_release(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_xchg_relaxed)
#define xchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
#define cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg_acquire)
#define cmpxchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg_release)
#define cmpxchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg_relaxed)
#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
#define cmpxchg64(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg64_acquire)
#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg64_release)
#define cmpxchg64_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg64_relaxed)
#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if !defined(arch_try_cmpxchg_relaxed) || defined(arch_try_cmpxchg)
#define try_cmpxchg(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
@@ -1758,9 +1269,7 @@ atomic64_dec_if_positive(atomic64_t *v)
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#endif
-#if defined(arch_try_cmpxchg_acquire)
#define try_cmpxchg_acquire(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
@@ -1769,9 +1278,7 @@ atomic64_dec_if_positive(atomic64_t *v)
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#endif
-#if defined(arch_try_cmpxchg_release)
#define try_cmpxchg_release(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
@@ -1780,9 +1287,7 @@ atomic64_dec_if_positive(atomic64_t *v)
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#endif
-#if defined(arch_try_cmpxchg_relaxed)
#define try_cmpxchg_relaxed(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
@@ -1791,7 +1296,6 @@ atomic64_dec_if_positive(atomic64_t *v)
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#endif
#define cmpxchg_local(ptr, ...) \
({ \
@@ -1830,4 +1334,4 @@ atomic64_dec_if_positive(atomic64_t *v)
})
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// 4bec382e44520f4d8267e42620054db26a659ea3
+// 1d7c3a25aca5c7fb031c307be4c3d24c7b48fcd5
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 11f96f40f4a7..04b8be9f1a77 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Generic C implementation of atomic counter operations. Usable on
- * UP systems only. Do not include in machine independent code.
+ * Generic C implementation of atomic counter operations. Do not include in
+ * machine independent code.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,56 +12,39 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-/*
- * atomic_$op() - $op integer to atomic variable
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
- * smp_mb__{before,after}_atomic().
- */
-
-/*
- * atomic_$op_return() - $op interer to atomic variable and returns the result
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does imply a full memory barrier.
- */
-
#ifdef CONFIG_SMP
/* we can build all atomic primitives from cmpxchg */
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
@@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -110,87 +93,44 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#endif /* CONFIG_SMP */
-#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
-#endif
-
-#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
-#endif
-#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
-#endif
-
-#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
-#endif
-
-#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
-#endif
-
-#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
-#endif
-
-#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
-#endif
-#ifndef atomic_and
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
ATOMIC_OP(and, &)
-#endif
-
-#ifndef atomic_or
ATOMIC_OP(or, |)
-#endif
-
-#ifndef atomic_xor
ATOMIC_OP(xor, ^)
-#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#ifndef atomic_read
-#define atomic_read(v) READ_ONCE((v)->counter)
-#endif
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_add_return generic_atomic_add_return
+#define arch_atomic_sub_return generic_atomic_sub_return
-#include <linux/irqflags.h>
+#define arch_atomic_fetch_add generic_atomic_fetch_add
+#define arch_atomic_fetch_sub generic_atomic_fetch_sub
+#define arch_atomic_fetch_and generic_atomic_fetch_and
+#define arch_atomic_fetch_or generic_atomic_fetch_or
+#define arch_atomic_fetch_xor generic_atomic_fetch_xor
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
+#define arch_atomic_add generic_atomic_add
+#define arch_atomic_sub generic_atomic_sub
+#define arch_atomic_and generic_atomic_and
+#define arch_atomic_or generic_atomic_or
+#define arch_atomic_xor generic_atomic_xor
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 370f01d4450f..100d24b02e52 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-extern s64 atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, s64 i);
-
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+extern s64 generic_atomic64_read(const atomic64_t *v);
+extern void generic_atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void atomic64_##op(s64 a, atomic64_t *v);
+extern void generic_atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +44,32 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-extern s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
-extern s64 atomic64_xchg(atomic64_t *v, s64 new);
-extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
+extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#define arch_atomic64_read generic_atomic64_read
+#define arch_atomic64_set generic_atomic64_set
+#define arch_atomic64_set_release generic_atomic64_set
+
+#define arch_atomic64_add generic_atomic64_add
+#define arch_atomic64_add_return generic_atomic64_add_return
+#define arch_atomic64_fetch_add generic_atomic64_fetch_add
+#define arch_atomic64_sub generic_atomic64_sub
+#define arch_atomic64_sub_return generic_atomic64_sub_return
+#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
+
+#define arch_atomic64_and generic_atomic64_and
+#define arch_atomic64_fetch_and generic_atomic64_fetch_and
+#define arch_atomic64_or generic_atomic64_or
+#define arch_atomic64_fetch_or generic_atomic64_fetch_or
+#define arch_atomic64_xor generic_atomic64_xor
+#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
+
+#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
+#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
+#define arch_atomic64_xchg generic_atomic64_xchg
+#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index b402494883b6..bafc51f483c4 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -4,6 +4,7 @@
#include <linux/compiler.h>
#include <linux/instrumentation.h>
+#include <linux/once_lite.h>
#define CUT_HERE "------------[ cut here ]------------\n"
@@ -140,39 +141,15 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
})
#ifndef WARN_ON_ONCE
-#define WARN_ON_ONCE(condition) ({ \
- static bool __section(".data.once") __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_ON(1); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ON_ONCE(condition) \
+ DO_ONCE_LITE_IF(condition, WARN_ON, 1)
#endif
-#define WARN_ONCE(condition, format...) ({ \
- static bool __section(".data.once") __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN(1, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ONCE(condition, format...) \
+ DO_ONCE_LITE_IF(condition, WARN, 1, format)
-#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
- static bool __section(".data.once") __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_TAINT(1, taint, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_TAINT_ONCE(condition, taint, format...) \
+ DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format)
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index f17f14f84d09..380cdc824e4b 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -12,7 +12,7 @@ extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures.
*/
-static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
@@ -51,7 +51,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
/*
* Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/
-static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 9a24510cd8c1..dca4419922a9 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -14,16 +14,14 @@
#include <linux/types.h>
#include <linux/irqflags.h>
-#ifndef xchg
-
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
-extern void __xchg_called_with_bad_pointer(void);
+extern void __generic_xchg_called_with_bad_pointer(void);
static inline
-unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
@@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#endif /* CONFIG_64BIT */
default:
- __xchg_called_with_bad_pointer();
+ __generic_xchg_called_with_bad_pointer();
return x;
}
}
-#define xchg(ptr, x) ({ \
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+#define generic_xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
-#endif /* xchg */
-
/*
* Atomic compare and exchange.
*/
#include <asm-generic/cmpxchg-local.h>
-#ifndef cmpxchg_local
-#define cmpxchg_local(ptr, o, n) ({ \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr)))); \
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+
+#ifndef arch_xchg
+#define arch_xchg generic_xchg
+#endif
+
+#ifndef arch_cmpxchg_local
+#define arch_cmpxchg_local generic_cmpxchg_local
#endif
-#ifndef cmpxchg64_local
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#ifndef arch_cmpxchg64_local
+#define arch_cmpxchg64_local generic_cmpxchg64_local
#endif
-#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg arch_cmpxchg_local
+#define arch_cmpxchg64 arch_cmpxchg64_local
#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index 515c3fb06ab3..56348a541c50 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -194,6 +194,7 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
#define HV_STATUS_INVALID_ALIGNMENT 4
#define HV_STATUS_INVALID_PARAMETER 5
+#define HV_STATUS_ACCESS_DENIED 6
#define HV_STATUS_OPERATION_DENIED 8
#define HV_STATUS_INSUFFICIENT_MEMORY 11
#define HV_STATUS_INVALID_PORT_ID 17
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 7637fb46ba4f..a2c8ed60233a 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -6,47 +6,18 @@
#ifndef __ASSEMBLY__
+/*
+ * supports 3 memory models.
+ */
#if defined(CONFIG_FLATMEM)
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (0UL)
#endif
-#elif defined(CONFIG_DISCONTIGMEM)
-
-#ifndef arch_pfn_to_nid
-#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
-#endif
-
-#ifndef arch_local_page_offset
-#define arch_local_page_offset(pfn, nid) \
- ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#endif
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-/*
- * supports 3 memory models.
- */
-#if defined(CONFIG_FLATMEM)
-
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
-#elif defined(CONFIG_DISCONTIGMEM)
-
-#define __pfn_to_page(pfn) \
-({ unsigned long __pfn = (pfn); \
- unsigned long __nid = arch_pfn_to_nid(__pfn); \
- NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
-})
-
-#define __page_to_pfn(pg) \
-({ const struct page *__pg = (pg); \
- struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
- (unsigned long)(__pg - __pgdat->node_mem_map) + \
- __pgdat->node_start_pfn; \
-})
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -70,7 +41,7 @@
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
-#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+#endif /* CONFIG_FLATMEM/SPARSEMEM */
/*
* Convert a physical address to a Page Frame Number and back
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index ce2cbb3c380f..2f6b1befb129 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -9,7 +9,6 @@
typedef struct { pgd_t pgd; } p4d_t;
#define P4D_SHIFT PGDIR_SHIFT
-#define MAX_PTRS_PER_P4D 1
#define PTRS_PER_P4D 1
#define P4D_SIZE (1UL << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE-1))
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d683f5e6d791..b4d43a4af5f7 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static __always_inline void set_preempt_need_resched(void)
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 5aa8705df87e..4dbe715be65b 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -45,7 +45,7 @@
#endif
#ifndef cpumask_of_node
- #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #ifdef CONFIG_NUMA
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
#define cpumask_of_node(node) ((void)(node), cpu_online_mask)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 40a9c101565e..17325416e2de 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -960,6 +960,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \
+ *(.data..decrypted) \
*(.data..percpu..decrypted) \
. = ALIGN(PAGE_SIZE);
#else
diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h
index c395238d0922..9b435caa95fe 100644
--- a/include/clocksource/samsung_pwm.h
+++ b/include/clocksource/samsung_pwm.h
@@ -27,6 +27,7 @@ struct samsung_pwm_variant {
};
void samsung_pwm_clocksource_init(void __iomem *base,
- unsigned int *irqs, struct samsung_pwm_variant *variant);
+ unsigned int *irqs,
+ const struct samsung_pwm_variant *variant);
#endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
index 4c61dade8835..f6da8a132639 100644
--- a/include/clocksource/timer-ti-dm.h
+++ b/include/clocksource/timer-ti-dm.h
@@ -74,6 +74,7 @@
#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
struct timer_regs {
+ u32 ocp_cfg;
u32 tidr;
u32 tier;
u32 twer;
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index e728469c4ccc..5af914c1ab8e 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -490,7 +490,7 @@ static inline void aead_request_set_callback(struct aead_request *req,
* The memory structure for cipher operation has the following structure:
*
* - AEAD encryption input: assoc data || plaintext
- * - AEAD encryption output: assoc data || cipherntext || auth tag
+ * - AEAD encryption output: assoc data || ciphertext || auth tag
* - AEAD decryption input: assoc data || ciphertext || auth tag
* - AEAD decryption output: assoc data || plaintext
*
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 86f0748009af..5f6841c73e5a 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -96,6 +96,15 @@ struct scatter_walk {
unsigned int offset;
};
+struct crypto_attr_alg {
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_attr_type {
+ u32 type;
+ u32 mask;
+};
+
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
@@ -118,7 +127,6 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 3f06e40d063a..26cac19b0f46 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -28,7 +28,7 @@
* of a failed backlog request
* crypto-engine, in head position to keep order
* @list: link with the global crypto engine list
- * @queue_lock: spinlock to syncronise access to request queue
+ * @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
* @rt: whether this queue is set to run as a realtime task
* @prepare_crypt_hardware: a request will soon arrive from the queue
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index b2bc1e46e86a..f140e4643949 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -458,7 +458,7 @@ int crypto_ahash_finup(struct ahash_request *req);
*
* Return:
* 0 if the message digest was successfully calculated;
- * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later;
+ * -EINPROGRESS if data is fed into hardware (DMA) or queued for later;
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 0a288dddcf5b..25806141db59 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -75,13 +75,7 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen);
-
-static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
-{
- return alg->setkey != shash_no_setkey;
-}
+bool crypto_shash_alg_has_setkey(struct shash_alg *alg);
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index fef3ef65967f..e6526b138174 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -106,6 +106,10 @@
* <20:16> :: Reserved, Shall be set to zero
* <15:0> :: USB-IF assigned VID for this cable vendor
*/
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF 0
+
/* SOP Product Type (UFP) */
#define IDH_PTYPE_NOT_UFP 0
#define IDH_PTYPE_HUB 1
@@ -163,10 +167,10 @@
#define UFP_VDO_VER1_2 2
/* Device Capability */
-#define DEV_USB2_CAPABLE BIT(0)
-#define DEV_USB2_BILLBOARD BIT(1)
-#define DEV_USB3_CAPABLE BIT(2)
-#define DEV_USB4_CAPABLE BIT(3)
+#define DEV_USB2_CAPABLE (1 << 0)
+#define DEV_USB2_BILLBOARD (1 << 1)
+#define DEV_USB3_CAPABLE (1 << 2)
+#define DEV_USB4_CAPABLE (1 << 3)
/* Connector Type */
#define UFP_RECEPTACLE 2
@@ -191,9 +195,9 @@
/* Alternate Modes */
#define UFP_ALTMODE_NOT_SUPP 0
-#define UFP_ALTMODE_TBT3 BIT(0)
-#define UFP_ALTMODE_RECFG BIT(1)
-#define UFP_ALTMODE_NO_RECFG BIT(2)
+#define UFP_ALTMODE_TBT3 (1 << 0)
+#define UFP_ALTMODE_RECFG (1 << 1)
+#define UFP_ALTMODE_NO_RECFG (1 << 2)
/* USB Highest Speed */
#define UFP_USB2_ONLY 0
@@ -217,9 +221,9 @@
* <4:0> :: Port number
*/
#define DFP_VDO_VER1_1 1
-#define HOST_USB2_CAPABLE BIT(0)
-#define HOST_USB3_CAPABLE BIT(1)
-#define HOST_USB4_CAPABLE BIT(2)
+#define HOST_USB2_CAPABLE (1 << 0)
+#define HOST_USB3_CAPABLE (1 << 1)
+#define HOST_USB4_CAPABLE (1 << 2)
#define DFP_RECEPTACLE 2
#define DFP_CAPTIVE 3
@@ -228,7 +232,25 @@
| ((pnum) & 0x1f))
/*
- * Passive Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9> :: SSTX2 Directionality support
+ * <8> :: SSRX1 Directionality support
+ * <7> :: SSRX2 Directionality support
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -244,7 +266,7 @@
* <4:3> :: Reserved, Shall be set to zero
* <2:0> :: USB highest speed
*
- * Active Cable VDO 1
+ * Active Cable VDO 1 (PD Rev3.0+)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -266,7 +288,9 @@
#define CABLE_VDO_VER1_0 0
#define CABLE_VDO_VER1_3 3
-/* Connector Type */
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE 0
+#define CABLE_BTYPE 1
#define CABLE_CTYPE 2
#define CABLE_CAPTIVE 3
@@ -303,12 +327,22 @@
#define CABLE_CURR_3A 1
#define CABLE_CURR_5A 2
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY 0
+#define CABLE_USBSS_U31_GEN1 1
+#define CABLE_USBSS_U31_GEN2 2
+
/* USB Highest Speed */
#define CABLE_USB2_ONLY 0
#define CABLE_USB32_GEN1 1
#define CABLE_USB32_4_GEN2 2
#define CABLE_USB4_GEN3 3
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
(((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
| ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
@@ -374,6 +408,35 @@
| (iso) << 2 | (gen))
/*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10> :: SSTX2 Directionality support
+ * <9> :: SSRX1 Directionality support
+ * <8> :: SSRX2 Directionality support
+ * <7:5> :: Vconn power
+ * <4> :: Vconn power required
+ * <3> :: Vbus power required
+ * <2:0> :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
+ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
+ | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY 0
+#define AMA_USBSS_U31_GEN1 1
+#define AMA_USBSS_U31_GEN2 2
+#define AMA_USBSS_BBONLY 3
+
+/*
* VPD VDO
* ---------
* <31:28> :: HW version
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 49601c4b98b8..524d4789af22 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -515,8 +515,9 @@ kunit_find_resource(struct kunit *test,
void *match_data)
{
struct kunit_resource *res, *found = NULL;
+ unsigned long flags;
- spin_lock(&test->lock);
+ spin_lock_irqsave(&test->lock, flags);
list_for_each_entry_reverse(res, &test->resources, node) {
if (match(test, res, (void *)match_data)) {
@@ -526,7 +527,7 @@ kunit_find_resource(struct kunit *test,
}
}
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
return found;
}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index ec621180ef09..e602d848fc1a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -72,6 +72,9 @@ struct vgic_global {
bool has_gicv4;
bool has_gicv4_1;
+ /* Pseudo GICv3 from outer space */
+ bool no_hw_deactivation;
+
/* GIC system register CPU interface */
struct static_key_false gicv3_cpuif;
@@ -89,6 +92,26 @@ enum vgic_irq_config {
VGIC_CONFIG_LEVEL
};
+/*
+ * Per-irq ops overriding some common behavious.
+ *
+ * Always called in non-preemptible section and the functions can use
+ * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs.
+ */
+struct irq_ops {
+ /* Per interrupt flags for special-cased interrupts */
+ unsigned long flags;
+
+#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */
+
+ /*
+ * Callback function pointer to in-kernel devices that can tell us the
+ * state of the input level of mapped level-triggered IRQ faster than
+ * peaking into the physical GIC.
+ */
+ bool (*get_input_level)(int vintid);
+};
+
struct vgic_irq {
raw_spinlock_t irq_lock; /* Protects the content of the struct */
struct list_head lpi_list; /* Used to link all LPIs together */
@@ -126,21 +149,17 @@ struct vgic_irq {
u8 group; /* 0 == group 0, 1 == group 1 */
enum vgic_irq_config config; /* Level or edge */
- /*
- * Callback function pointer to in-kernel devices that can tell us the
- * state of the input level of mapped level-triggered IRQ faster than
- * peaking into the physical GIC.
- *
- * Always called in non-preemptible section and the functions can use
- * kvm_arm_get_running_vcpu() to get the vcpu pointer for private
- * IRQs.
- */
- bool (*get_input_level)(int vintid);
+ struct irq_ops *ops;
void *owner; /* Opaque pointer to reserve an interrupt
for in-kernel devices. */
};
+static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq)
+{
+ return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE);
+}
+
struct vgic_register_region;
struct vgic_its;
@@ -352,7 +371,7 @@ void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid, bool (*get_input_level)(int vindid));
+ u32 vintid, struct irq_ops *ops);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c60745f657e9..b338613fb536 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -132,6 +132,7 @@ enum acpi_address_range_id {
union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
};
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
@@ -550,6 +551,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_OSLPI_SUPPORT 0x00000100
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+#define OSC_SB_PRM_SUPPORT 0x00020000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
extern bool osc_sb_apei_support_acked;
@@ -666,7 +668,6 @@ extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
-void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *,
struct property_entry *);
@@ -710,6 +711,8 @@ static inline u64 acpi_arch_get_root_pointer(void)
}
#endif
+int acpi_get_local_address(acpi_handle handle, u32 *addr);
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -765,7 +768,7 @@ static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
return false;
}
-static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -775,12 +778,12 @@ static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
return false;
}
-static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode)
+static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode,
const char *name)
{
return false;
@@ -911,7 +914,7 @@ acpi_create_platform_device(struct acpi_device *adev,
return NULL;
}
-static inline bool acpi_dma_supported(struct acpi_device *adev)
+static inline bool acpi_dma_supported(const struct acpi_device *adev)
{
return false;
}
@@ -965,6 +968,11 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
return NULL;
}
+static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ return -ENODEV;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
@@ -1004,6 +1012,7 @@ int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
+bool acpi_storage_d3(struct device *dev);
#else
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
@@ -1011,6 +1020,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
return 0;
}
+static inline bool acpi_storage_d3(struct device *dev)
+{
+ return false;
+}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
@@ -1096,6 +1109,8 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
+bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio);
int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index);
#else
static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
@@ -1103,6 +1118,11 @@ static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
{
return false;
}
+static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev,
const char *name, int index)
{
diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
new file mode 100644
index 000000000000..0a24ab7cb66f
--- /dev/null
+++ b/include/linux/acpi_mdio.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ACPI helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_ACPI_MDIO_H
+#define __LINUX_ACPI_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_ACPI_MDIO)
+int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode);
+#else /* CONFIG_ACPI_MDIO */
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+{
+ /*
+ * Fall back to mdiobus_register() function to register a bus.
+ * This way, we don't have to keep compat bits around in drivers.
+ */
+
+ return mdiobus_register(mdio);
+}
+#endif
+
+#endif /* __LINUX_ACPI_MDIO_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index f180240dc95f..11e555cfaecb 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -37,7 +37,6 @@ bool topology_scale_freq_invariant(void);
enum scale_freq_source {
SCALE_FREQ_SOURCE_CPUFREQ = 0,
SCALE_FREQ_SOURCE_ARCH,
- SCALE_FREQ_SOURCE_CPPC,
};
struct scale_freq_data {
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 6861489a1890..7d1cabe15262 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -63,6 +63,9 @@
#define ARM_SMCCC_VERSION_1_0 0x10000
#define ARM_SMCCC_VERSION_1_1 0x10001
#define ARM_SMCCC_VERSION_1_2 0x10002
+#define ARM_SMCCC_VERSION_1_3 0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT 0x10000
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
@@ -216,6 +219,8 @@ u32 arm_smccc_get_version(void);
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
+extern u64 smccc_has_sve_hint;
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -227,6 +232,61 @@ struct arm_smccc_res {
unsigned long a3;
};
+#ifdef CONFIG_ARM64
+/**
+ * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call
+ * @a0-a17 argument values from registers 0 to 17
+ */
+struct arm_smccc_1_2_regs {
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long a8;
+ unsigned long a9;
+ unsigned long a10;
+ unsigned long a11;
+ unsigned long a12;
+ unsigned long a13;
+ unsigned long a14;
+ unsigned long a15;
+ unsigned long a16;
+ unsigned long a17;
+};
+
+/**
+ * arm_smccc_1_2_hvc() - make HVC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make HVC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the HVC instruction. The return values
+ * are updated with the content from registers on return from the HVC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+
+/**
+ * arm_smccc_1_2_smc() - make SMC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make SMC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the SMC instruction. The return values
+ * are updated with the content from registers on return from the SMC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+#endif
+
/**
* struct arm_smccc_quirk - Contains quirk information
* @id: quirk identification
@@ -241,6 +301,15 @@ struct arm_smccc_quirk {
};
/**
+ * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls
+ *
+ * Sets the SMCCC hint bit to indicate if there is live state in the SVE
+ * registers, this modifies x0 in place and should never be called from C
+ * code.
+ */
+asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
+
+/**
* __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
@@ -297,6 +366,20 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#endif
+/* nVHE hypervisor doesn't have a current thread so needs separate checks */
+#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__)
+
+#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \
+ ARM64_SVE)
+#define smccc_sve_clobbers "x16", "x30", "cc",
+
+#else
+
+#define SMCCC_SVE_CHECK
+#define smccc_sve_clobbers
+
+#endif
+
#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
#define __count_args(...) \
@@ -364,7 +447,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define ___constraints(count) \
: __constraint_read_ ## count \
- : "memory"
+ : smccc_sve_clobbers "memory"
#define __constraints(count) ___constraints(count)
/*
@@ -379,7 +462,8 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3"); \
__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
- asm volatile(inst "\n" : \
+ asm volatile(SMCCC_SVE_CHECK \
+ inst "\n" : \
"=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
__constraints(__count_args(__VA_ARGS__))); \
if (___res) \
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6e67aded28f8..1b44f40c7700 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -13,7 +13,7 @@
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
deleted file mode 100644
index 2a3f55d98be9..000000000000
--- a/include/linux/atomic-fallback.h
+++ /dev/null
@@ -1,2595 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#include <linux/compiler.h>
-
-#ifndef xchg_relaxed
-#define xchg_acquire xchg
-#define xchg_release xchg
-#define xchg_relaxed xchg
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define xchg_acquire(...) \
- __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define xchg_release(...) \
- __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define xchg(...) \
- __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-
-#endif /* xchg_relaxed */
-
-#ifndef cmpxchg_relaxed
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
-#define cmpxchg_relaxed cmpxchg
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define cmpxchg_acquire(...) \
- __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define cmpxchg_release(...) \
- __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define cmpxchg(...) \
- __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg_relaxed */
-
-#ifndef cmpxchg64_relaxed
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
-#define cmpxchg64_relaxed cmpxchg64
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define cmpxchg64_acquire(...) \
- __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define cmpxchg64_release(...) \
- __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define cmpxchg64(...) \
- __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg64_relaxed */
-
-#ifndef try_cmpxchg_relaxed
-#ifdef try_cmpxchg
-#define try_cmpxchg_acquire try_cmpxchg
-#define try_cmpxchg_release try_cmpxchg
-#define try_cmpxchg_relaxed try_cmpxchg
-#endif /* try_cmpxchg */
-
-#ifndef try_cmpxchg
-#define try_cmpxchg(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg */
-
-#ifndef try_cmpxchg_acquire
-#define try_cmpxchg_acquire(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_acquire((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_acquire */
-
-#ifndef try_cmpxchg_release
-#define try_cmpxchg_release(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_release((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_release */
-
-#ifndef try_cmpxchg_relaxed
-#define try_cmpxchg_relaxed(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_relaxed((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_relaxed */
-
-#else /* try_cmpxchg_relaxed */
-
-#ifndef try_cmpxchg_acquire
-#define try_cmpxchg_acquire(...) \
- __atomic_op_acquire(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef try_cmpxchg_release
-#define try_cmpxchg_release(...) \
- __atomic_op_release(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef try_cmpxchg
-#define try_cmpxchg(...) \
- __atomic_op_fence(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* try_cmpxchg_relaxed */
-
-#define arch_atomic_read atomic_read
-#define arch_atomic_read_acquire atomic_read_acquire
-
-#ifndef atomic_read_acquire
-static __always_inline int
-atomic_read_acquire(const atomic_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-#define arch_atomic_set atomic_set
-#define arch_atomic_set_release atomic_set_release
-
-#ifndef atomic_set_release
-static __always_inline void
-atomic_set_release(atomic_t *v, int i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-#define arch_atomic_add atomic_add
-
-#define arch_atomic_add_return atomic_add_return
-#define arch_atomic_add_return_acquire atomic_add_return_acquire
-#define arch_atomic_add_return_release atomic_add_return_release
-#define arch_atomic_add_return_relaxed atomic_add_return_relaxed
-
-#ifndef atomic_add_return_relaxed
-#define atomic_add_return_acquire atomic_add_return
-#define atomic_add_return_release atomic_add_return
-#define atomic_add_return_relaxed atomic_add_return
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-static __always_inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#ifndef atomic_add_return_release
-static __always_inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#ifndef atomic_add_return
-static __always_inline int
-atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#endif /* atomic_add_return_relaxed */
-
-#define arch_atomic_fetch_add atomic_fetch_add
-#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire
-#define arch_atomic_fetch_add_release atomic_fetch_add_release
-#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_acquire atomic_fetch_add
-#define atomic_fetch_add_release atomic_fetch_add
-#define atomic_fetch_add_relaxed atomic_fetch_add
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-static __always_inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#ifndef atomic_fetch_add_release
-static __always_inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#ifndef atomic_fetch_add
-static __always_inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#endif /* atomic_fetch_add_relaxed */
-
-#define arch_atomic_sub atomic_sub
-
-#define arch_atomic_sub_return atomic_sub_return
-#define arch_atomic_sub_return_acquire atomic_sub_return_acquire
-#define arch_atomic_sub_return_release atomic_sub_return_release
-#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed
-
-#ifndef atomic_sub_return_relaxed
-#define atomic_sub_return_acquire atomic_sub_return
-#define atomic_sub_return_release atomic_sub_return
-#define atomic_sub_return_relaxed atomic_sub_return
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-static __always_inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#ifndef atomic_sub_return_release
-static __always_inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#ifndef atomic_sub_return
-static __always_inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#endif /* atomic_sub_return_relaxed */
-
-#define arch_atomic_fetch_sub atomic_fetch_sub
-#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#define arch_atomic_fetch_sub_release atomic_fetch_sub_release
-#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_acquire atomic_fetch_sub
-#define atomic_fetch_sub_release atomic_fetch_sub
-#define atomic_fetch_sub_relaxed atomic_fetch_sub
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-static __always_inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#ifndef atomic_fetch_sub_release
-static __always_inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#ifndef atomic_fetch_sub
-static __always_inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#endif /* atomic_fetch_sub_relaxed */
-
-#define arch_atomic_inc atomic_inc
-
-#ifndef atomic_inc
-static __always_inline void
-atomic_inc(atomic_t *v)
-{
- atomic_add(1, v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#define arch_atomic_inc_return atomic_inc_return
-#define arch_atomic_inc_return_acquire atomic_inc_return_acquire
-#define arch_atomic_inc_return_release atomic_inc_return_release
-#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed
-
-#ifndef atomic_inc_return_relaxed
-#ifdef atomic_inc_return
-#define atomic_inc_return_acquire atomic_inc_return
-#define atomic_inc_return_release atomic_inc_return
-#define atomic_inc_return_relaxed atomic_inc_return
-#endif /* atomic_inc_return */
-
-#ifndef atomic_inc_return
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- return atomic_add_return(1, v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#ifndef atomic_inc_return_acquire
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- return atomic_add_return_acquire(1, v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- return atomic_add_return_release(1, v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return_relaxed
-static __always_inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- return atomic_add_return_relaxed(1, v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- int ret = atomic_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#endif /* atomic_inc_return_relaxed */
-
-#define arch_atomic_fetch_inc atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#define arch_atomic_fetch_inc_release atomic_fetch_inc_release
-#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-
-#ifndef atomic_fetch_inc_relaxed
-#ifdef atomic_fetch_inc
-#define atomic_fetch_inc_acquire atomic_fetch_inc
-#define atomic_fetch_inc_release atomic_fetch_inc
-#define atomic_fetch_inc_relaxed atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#ifndef atomic_fetch_inc
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- return atomic_fetch_add(1, v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#ifndef atomic_fetch_inc_acquire
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- return atomic_fetch_add_acquire(1, v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- return atomic_fetch_add_release(1, v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc_relaxed
-static __always_inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- return atomic_fetch_add_relaxed(1, v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#endif /* atomic_fetch_inc_relaxed */
-
-#define arch_atomic_dec atomic_dec
-
-#ifndef atomic_dec
-static __always_inline void
-atomic_dec(atomic_t *v)
-{
- atomic_sub(1, v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#define arch_atomic_dec_return atomic_dec_return
-#define arch_atomic_dec_return_acquire atomic_dec_return_acquire
-#define arch_atomic_dec_return_release atomic_dec_return_release
-#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed
-
-#ifndef atomic_dec_return_relaxed
-#ifdef atomic_dec_return
-#define atomic_dec_return_acquire atomic_dec_return
-#define atomic_dec_return_release atomic_dec_return
-#define atomic_dec_return_relaxed atomic_dec_return
-#endif /* atomic_dec_return */
-
-#ifndef atomic_dec_return
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- return atomic_sub_return(1, v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#ifndef atomic_dec_return_acquire
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- return atomic_sub_return_acquire(1, v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- return atomic_sub_return_release(1, v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return_relaxed
-static __always_inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- return atomic_sub_return_relaxed(1, v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- int ret = atomic_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#endif /* atomic_dec_return_relaxed */
-
-#define arch_atomic_fetch_dec atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#define arch_atomic_fetch_dec_release atomic_fetch_dec_release
-#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-
-#ifndef atomic_fetch_dec_relaxed
-#ifdef atomic_fetch_dec
-#define atomic_fetch_dec_acquire atomic_fetch_dec
-#define atomic_fetch_dec_release atomic_fetch_dec
-#define atomic_fetch_dec_relaxed atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#ifndef atomic_fetch_dec
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- return atomic_fetch_sub(1, v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#ifndef atomic_fetch_dec_acquire
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- return atomic_fetch_sub_acquire(1, v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- return atomic_fetch_sub_release(1, v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec_relaxed
-static __always_inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- return atomic_fetch_sub_relaxed(1, v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#endif /* atomic_fetch_dec_relaxed */
-
-#define arch_atomic_and atomic_and
-
-#define arch_atomic_fetch_and atomic_fetch_and
-#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire
-#define arch_atomic_fetch_and_release atomic_fetch_and_release
-#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_acquire atomic_fetch_and
-#define atomic_fetch_and_release atomic_fetch_and
-#define atomic_fetch_and_relaxed atomic_fetch_and
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-static __always_inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#ifndef atomic_fetch_and_release
-static __always_inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#ifndef atomic_fetch_and
-static __always_inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#endif /* atomic_fetch_and_relaxed */
-
-#define arch_atomic_andnot atomic_andnot
-
-#ifndef atomic_andnot
-static __always_inline void
-atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#define arch_atomic_fetch_andnot atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release
-#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-
-#ifndef atomic_fetch_andnot_relaxed
-#ifdef atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#endif /* atomic_fetch_andnot */
-
-#ifndef atomic_fetch_andnot
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#ifndef atomic_fetch_andnot_acquire
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return atomic_fetch_and_acquire(~i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return atomic_fetch_and_release(~i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot_relaxed
-static __always_inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return atomic_fetch_and_relaxed(~i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#endif /* atomic_fetch_andnot_relaxed */
-
-#define arch_atomic_or atomic_or
-
-#define arch_atomic_fetch_or atomic_fetch_or
-#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire
-#define arch_atomic_fetch_or_release atomic_fetch_or_release
-#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_acquire atomic_fetch_or
-#define atomic_fetch_or_release atomic_fetch_or
-#define atomic_fetch_or_relaxed atomic_fetch_or
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-static __always_inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#ifndef atomic_fetch_or_release
-static __always_inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#ifndef atomic_fetch_or
-static __always_inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#endif /* atomic_fetch_or_relaxed */
-
-#define arch_atomic_xor atomic_xor
-
-#define arch_atomic_fetch_xor atomic_fetch_xor
-#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#define arch_atomic_fetch_xor_release atomic_fetch_xor_release
-#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_acquire atomic_fetch_xor
-#define atomic_fetch_xor_release atomic_fetch_xor
-#define atomic_fetch_xor_relaxed atomic_fetch_xor
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-static __always_inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#ifndef atomic_fetch_xor_release
-static __always_inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#ifndef atomic_fetch_xor
-static __always_inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#endif /* atomic_fetch_xor_relaxed */
-
-#define arch_atomic_xchg atomic_xchg
-#define arch_atomic_xchg_acquire atomic_xchg_acquire
-#define arch_atomic_xchg_release atomic_xchg_release
-#define arch_atomic_xchg_relaxed atomic_xchg_relaxed
-
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-#define atomic_xchg_relaxed atomic_xchg
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- int ret = atomic_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#ifndef atomic_xchg_release
-static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- __atomic_release_fence();
- return atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#ifndef atomic_xchg
-static __always_inline int
-atomic_xchg(atomic_t *v, int i)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#endif /* atomic_xchg_relaxed */
-
-#define arch_atomic_cmpxchg atomic_cmpxchg
-#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define arch_atomic_cmpxchg_release atomic_cmpxchg_release
-#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-static __always_inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- int ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#ifndef atomic_cmpxchg_release
-static __always_inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- __atomic_release_fence();
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#ifndef atomic_cmpxchg
-static __always_inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#endif /* atomic_cmpxchg_relaxed */
-
-#define arch_atomic_try_cmpxchg atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-
-#ifndef atomic_try_cmpxchg_relaxed
-#ifdef atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-#ifndef atomic_try_cmpxchg
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#ifndef atomic_try_cmpxchg_acquire
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg_relaxed
-static __always_inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic_try_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg_acquire
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- __atomic_release_fence();
- return atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#endif /* atomic_try_cmpxchg_relaxed */
-
-#define arch_atomic_sub_and_test atomic_sub_and_test
-
-#ifndef atomic_sub_and_test
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- return atomic_sub_return(i, v) == 0;
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#define arch_atomic_dec_and_test atomic_dec_and_test
-
-#ifndef atomic_dec_and_test
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- return atomic_dec_return(v) == 0;
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#define arch_atomic_inc_and_test atomic_inc_and_test
-
-#ifndef atomic_inc_and_test
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- return atomic_inc_return(v) == 0;
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#define arch_atomic_add_negative atomic_add_negative
-
-#ifndef atomic_add_negative
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#define arch_atomic_fetch_add_unless atomic_fetch_add_unless
-
-#ifndef atomic_fetch_add_unless
-/**
- * atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#define arch_atomic_add_unless atomic_add_unless
-
-#ifndef atomic_add_unless
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- return atomic_fetch_add_unless(v, a, u) != u;
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#define arch_atomic_inc_not_zero atomic_inc_not_zero
-
-#ifndef atomic_inc_not_zero
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- return atomic_add_unless(v, 1, 0);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#define arch_atomic_inc_unless_negative atomic_inc_unless_negative
-
-#ifndef atomic_inc_unless_negative
-static __always_inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#define arch_atomic_dec_unless_positive atomic_dec_unless_positive
-
-#ifndef atomic_dec_unless_positive
-static __always_inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#define arch_atomic_dec_if_positive atomic_dec_if_positive
-
-#ifndef atomic_dec_if_positive
-static __always_inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- int dec, c = atomic_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#define arch_atomic64_read atomic64_read
-#define arch_atomic64_read_acquire atomic64_read_acquire
-
-#ifndef atomic64_read_acquire
-static __always_inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-#define arch_atomic64_set atomic64_set
-#define arch_atomic64_set_release atomic64_set_release
-
-#ifndef atomic64_set_release
-static __always_inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-#define arch_atomic64_add atomic64_add
-
-#define arch_atomic64_add_return atomic64_add_return
-#define arch_atomic64_add_return_acquire atomic64_add_return_acquire
-#define arch_atomic64_add_return_release atomic64_add_return_release
-#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed
-
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
-#define atomic64_add_return_relaxed atomic64_add_return
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-static __always_inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#ifndef atomic64_add_return_release
-static __always_inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#ifndef atomic64_add_return
-static __always_inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#endif /* atomic64_add_return_relaxed */
-
-#define arch_atomic64_fetch_add atomic64_fetch_add
-#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#define arch_atomic64_fetch_add_release atomic64_fetch_add_release
-#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_acquire atomic64_fetch_add
-#define atomic64_fetch_add_release atomic64_fetch_add
-#define atomic64_fetch_add_relaxed atomic64_fetch_add
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-static __always_inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#ifndef atomic64_fetch_add_release
-static __always_inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#ifndef atomic64_fetch_add
-static __always_inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#endif /* atomic64_fetch_add_relaxed */
-
-#define arch_atomic64_sub atomic64_sub
-
-#define arch_atomic64_sub_return atomic64_sub_return
-#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire
-#define arch_atomic64_sub_return_release atomic64_sub_return_release
-#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-static __always_inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#ifndef atomic64_sub_return_release
-static __always_inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#ifndef atomic64_sub_return
-static __always_inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#endif /* atomic64_sub_return_relaxed */
-
-#define arch_atomic64_fetch_sub atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release
-#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub
-#define atomic64_fetch_sub_release atomic64_fetch_sub
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-static __always_inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#ifndef atomic64_fetch_sub_release
-static __always_inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#ifndef atomic64_fetch_sub
-static __always_inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#endif /* atomic64_fetch_sub_relaxed */
-
-#define arch_atomic64_inc atomic64_inc
-
-#ifndef atomic64_inc
-static __always_inline void
-atomic64_inc(atomic64_t *v)
-{
- atomic64_add(1, v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#define arch_atomic64_inc_return atomic64_inc_return
-#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire
-#define arch_atomic64_inc_return_release atomic64_inc_return_release
-#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-
-#ifndef atomic64_inc_return_relaxed
-#ifdef atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#endif /* atomic64_inc_return */
-
-#ifndef atomic64_inc_return
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- return atomic64_add_return(1, v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#ifndef atomic64_inc_return_acquire
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- return atomic64_add_return_acquire(1, v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- return atomic64_add_return_release(1, v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return_relaxed
-static __always_inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- return atomic64_add_return_relaxed(1, v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#endif /* atomic64_inc_return_relaxed */
-
-#define arch_atomic64_fetch_inc atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release
-#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-
-#ifndef atomic64_fetch_inc_relaxed
-#ifdef atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc
-#define atomic64_fetch_inc_release atomic64_fetch_inc
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#ifndef atomic64_fetch_inc
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- return atomic64_fetch_add(1, v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#ifndef atomic64_fetch_inc_acquire
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- return atomic64_fetch_add_acquire(1, v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- return atomic64_fetch_add_release(1, v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc_relaxed
-static __always_inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_add_relaxed(1, v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#endif /* atomic64_fetch_inc_relaxed */
-
-#define arch_atomic64_dec atomic64_dec
-
-#ifndef atomic64_dec
-static __always_inline void
-atomic64_dec(atomic64_t *v)
-{
- atomic64_sub(1, v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#define arch_atomic64_dec_return atomic64_dec_return
-#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire
-#define arch_atomic64_dec_return_release atomic64_dec_return_release
-#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-
-#ifndef atomic64_dec_return_relaxed
-#ifdef atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#endif /* atomic64_dec_return */
-
-#ifndef atomic64_dec_return
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- return atomic64_sub_return(1, v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#ifndef atomic64_dec_return_acquire
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- return atomic64_sub_return_acquire(1, v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- return atomic64_sub_return_release(1, v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return_relaxed
-static __always_inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- return atomic64_sub_return_relaxed(1, v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#endif /* atomic64_dec_return_relaxed */
-
-#define arch_atomic64_fetch_dec atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release
-#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-
-#ifndef atomic64_fetch_dec_relaxed
-#ifdef atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec
-#define atomic64_fetch_dec_release atomic64_fetch_dec
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#ifndef atomic64_fetch_dec
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- return atomic64_fetch_sub(1, v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#ifndef atomic64_fetch_dec_acquire
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- return atomic64_fetch_sub_acquire(1, v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- return atomic64_fetch_sub_release(1, v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec_relaxed
-static __always_inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_sub_relaxed(1, v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#endif /* atomic64_fetch_dec_relaxed */
-
-#define arch_atomic64_and atomic64_and
-
-#define arch_atomic64_fetch_and atomic64_fetch_and
-#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#define arch_atomic64_fetch_and_release atomic64_fetch_and_release
-#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_acquire atomic64_fetch_and
-#define atomic64_fetch_and_release atomic64_fetch_and
-#define atomic64_fetch_and_relaxed atomic64_fetch_and
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-static __always_inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#ifndef atomic64_fetch_and_release
-static __always_inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#ifndef atomic64_fetch_and
-static __always_inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#endif /* atomic64_fetch_and_relaxed */
-
-#define arch_atomic64_andnot atomic64_andnot
-
-#ifndef atomic64_andnot
-static __always_inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- atomic64_and(~i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#define arch_atomic64_fetch_andnot atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-
-#ifndef atomic64_fetch_andnot_relaxed
-#ifdef atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#endif /* atomic64_fetch_andnot */
-
-#ifndef atomic64_fetch_andnot
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and(~i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#ifndef atomic64_fetch_andnot_acquire
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_acquire(~i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_release(~i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot_relaxed
-static __always_inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_relaxed(~i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#endif /* atomic64_fetch_andnot_relaxed */
-
-#define arch_atomic64_or atomic64_or
-
-#define arch_atomic64_fetch_or atomic64_fetch_or
-#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#define arch_atomic64_fetch_or_release atomic64_fetch_or_release
-#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_acquire atomic64_fetch_or
-#define atomic64_fetch_or_release atomic64_fetch_or
-#define atomic64_fetch_or_relaxed atomic64_fetch_or
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-static __always_inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#ifndef atomic64_fetch_or_release
-static __always_inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#ifndef atomic64_fetch_or
-static __always_inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#endif /* atomic64_fetch_or_relaxed */
-
-#define arch_atomic64_xor atomic64_xor
-
-#define arch_atomic64_fetch_xor atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release
-#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor
-#define atomic64_fetch_xor_release atomic64_fetch_xor
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-static __always_inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#ifndef atomic64_fetch_xor_release
-static __always_inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#ifndef atomic64_fetch_xor
-static __always_inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#endif /* atomic64_fetch_xor_relaxed */
-
-#define arch_atomic64_xchg atomic64_xchg
-#define arch_atomic64_xchg_acquire atomic64_xchg_acquire
-#define arch_atomic64_xchg_release atomic64_xchg_release
-#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed
-
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-#define atomic64_xchg_relaxed atomic64_xchg
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- s64 ret = atomic64_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#ifndef atomic64_xchg_release
-static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- __atomic_release_fence();
- return atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#ifndef atomic64_xchg
-static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#endif /* atomic64_xchg_relaxed */
-
-#define arch_atomic64_cmpxchg atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release
-#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-static __always_inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_cmpxchg_release
-static __always_inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#ifndef atomic64_cmpxchg
-static __always_inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#endif /* atomic64_cmpxchg_relaxed */
-
-#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-
-#ifndef atomic64_try_cmpxchg_relaxed
-#ifdef atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-#ifndef atomic64_try_cmpxchg
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#ifndef atomic64_try_cmpxchg_acquire
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg_relaxed
-static __always_inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic64_try_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg_acquire
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#endif /* atomic64_try_cmpxchg_relaxed */
-
-#define arch_atomic64_sub_and_test atomic64_sub_and_test
-
-#ifndef atomic64_sub_and_test
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- return atomic64_sub_return(i, v) == 0;
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#define arch_atomic64_dec_and_test atomic64_dec_and_test
-
-#ifndef atomic64_dec_and_test
-/**
- * atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- return atomic64_dec_return(v) == 0;
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#define arch_atomic64_inc_and_test atomic64_inc_and_test
-
-#ifndef atomic64_inc_and_test
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- return atomic64_inc_return(v) == 0;
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#define arch_atomic64_add_negative atomic64_add_negative
-
-#ifndef atomic64_add_negative
-/**
- * atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless
-
-#ifndef atomic64_fetch_add_unless
-/**
- * atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#define arch_atomic64_add_unless atomic64_add_unless
-
-#ifndef atomic64_add_unless
-/**
- * atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- return atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#define arch_atomic64_inc_not_zero atomic64_inc_not_zero
-
-#ifndef atomic64_inc_not_zero
-/**
- * atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- return atomic64_add_unless(v, 1, 0);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative
-
-#ifndef atomic64_inc_unless_negative
-static __always_inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive
-
-#ifndef atomic64_dec_unless_positive
-static __always_inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#define arch_atomic64_dec_if_positive atomic64_dec_if_positive
-
-#ifndef atomic64_dec_if_positive
-static __always_inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = atomic64_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// d78e6c293c661c15188f0ec05bce45188c8d5892
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 571a11008ab5..ed1d3ffd5b9d 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -77,12 +77,8 @@
__ret; \
})
-#ifdef ARCH_ATOMIC
#include <linux/atomic-arch-fallback.h>
#include <asm-generic/atomic-instrumented.h>
-#else
-#include <linux/atomic-fallback.h>
-#endif
#include <asm-generic/atomic-long.h>
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 565deea6ffe8..db0e099c2399 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -412,9 +412,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
* PF removes the filters and returns status.
*/
+/* VIRTCHNL_ETHER_ADDR_LEGACY
+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
+ * bytes. Moving forward all VF drivers should not set type to
+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
+ * behavior. The control plane function (i.e. PF) can use a best effort method
+ * of tracking the primary/device unicast in this case, but there is no
+ * guarantee and functionality depends on the implementation of the PF.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_PRIMARY
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
+ * function (i.e. PF) to accurately track and use this MAC address for
+ * displaying on the host and for VM/function reset.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_EXTRA
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
+ */
struct virtchnl_ether_addr {
u8 addr[ETH_ALEN];
- u8 pad[2];
+ u8 type;
+#define VIRTCHNL_ETHER_ADDR_LEGACY 0
+#define VIRTCHNL_ETHER_ADDR_PRIMARY 1
+#define VIRTCHNL_ETHER_ADDR_EXTRA 2
+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
+ u8 pad;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
@@ -830,6 +857,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
struct virtchnl_proto_hdrs {
u8 tunnel_level;
+ u8 pad[3];
/**
* specify where protocol header start from.
* 0 - from the outer layer
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index fff9367a6348..1d7edad9914f 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -154,6 +154,8 @@ struct bdi_writeback {
struct cgroup_subsys_state *blkcg_css; /* and blkcg */
struct list_head memcg_node; /* anchored at memcg->cgwb_list */
struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+ struct list_head b_attached; /* attached inodes, protected by list_lock */
+ struct list_head offline_node; /* anchored at offline_cgwbs */
union {
struct work_struct release_work;
@@ -239,8 +241,9 @@ static inline void wb_get(struct bdi_writeback *wb)
/**
* wb_put - decrement a wb's refcount
* @wb: bdi_writeback to put
+ * @nr: number of references to put
*/
-static inline void wb_put(struct bdi_writeback *wb)
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
{
if (WARN_ON_ONCE(!wb->bdi)) {
/*
@@ -251,7 +254,16 @@ static inline void wb_put(struct bdi_writeback *wb)
}
if (wb != &wb->bdi->wb)
- percpu_ref_put(&wb->refcnt);
+ percpu_ref_put_many(&wb->refcnt, nr);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ wb_put_many(wb, 1);
}
/**
@@ -280,6 +292,10 @@ static inline void wb_put(struct bdi_writeback *wb)
{
}
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
+{
+}
+
static inline bool wb_dying(struct bdi_writeback *wb)
{
return false;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a0b4cfdf62a4..2203b686e1f0 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -44,9 +44,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs)
#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
-#define bio_multiple_segments(bio) \
- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
-
#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
@@ -271,7 +268,7 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
{
- *bv = bio_iovec(bio);
+ *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
}
static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
@@ -279,10 +276,9 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
struct bvec_iter iter = bio->bi_iter;
int idx;
- if (unlikely(!bio_multiple_segments(bio))) {
- *bv = bio_iovec(bio);
- return;
- }
+ bio_get_first_bvec(bio, bv);
+ if (bv->bv_len == bio->bi_iter.bi_size)
+ return; /* this bio only has a single bvec */
bio_advance_iter(bio, &iter, iter.bi_size);
@@ -822,4 +818,6 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
bio->bi_opf |= REQ_NOWAIT;
}
+struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
+
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 359486940fa0..fd2de2b422ed 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -426,19 +426,29 @@ enum {
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
+#define blk_mq_alloc_disk(set, queuedata) \
+({ \
+ static struct lock_class_key __key; \
+ struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata); \
+ \
+ if (!IS_ERR(__disk)) \
+ lockdep_init_map(&__disk->lockdep_map, \
+ "(bio completion)", &__key, 0); \
+ __disk; \
+})
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
+ void *queuedata);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
void *queuedata);
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q,
- bool elevator_init);
-struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops,
- unsigned int queue_depth,
- unsigned int set_flags);
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
void blk_mq_unregister_dev(struct device *, struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
+int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int queue_depth,
+ unsigned int set_flags);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index e5cf12f102a2..b05f2fd495fc 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -29,7 +29,6 @@ struct block_device {
int bd_openers;
struct inode * bd_inode; /* will die */
struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
void * bd_claiming;
struct device bd_device;
void * bd_holder;
@@ -40,9 +39,6 @@ struct block_device {
#endif
struct kobject *bd_holder_dir;
u8 bd_partno;
- /* number of times partitions within this device have been opened. */
- unsigned bd_part_count;
-
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
struct gendisk * bd_disk;
struct backing_dev_info *bd_bdi;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2db0f376f5d9..103acc5228e7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -25,6 +25,7 @@
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
#include <linux/pm.h>
+#include <linux/sbitmap.h>
struct module;
struct scsi_ioctl_command;
@@ -493,6 +494,9 @@ struct request_queue {
atomic_t nr_active_requests_shared_sbitmap;
+ struct sbitmap_queue sched_bitmap_tags;
+ struct sbitmap_queue sched_breserved_tags;
+
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
@@ -1221,7 +1225,6 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
extern void blk_dump_rq_flags(struct request *, char *);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 02b02cb29ce2..f309fc1509f2 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -22,6 +22,7 @@
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
+#include <linux/bpfptr.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -69,6 +70,8 @@ struct bpf_map_ops {
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
+ void *value, u64 flags);
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
@@ -1428,7 +1431,7 @@ struct bpf_iter__bpf_map_elem {
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
-int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
@@ -1459,7 +1462,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
int bpf_get_file_flag(int flags);
-int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
+int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
size_t actual_size);
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
@@ -1479,8 +1482,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
}
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
- union bpf_attr __user *uattr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
@@ -1499,8 +1501,13 @@ int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
+int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress);
bool dev_map_can_have_prog(struct bpf_map *map);
void __cpu_map_flush(void);
@@ -1668,6 +1675,13 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
return 0;
}
+static inline
+int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress)
+{
+ return 0;
+}
+
struct sk_buff;
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
@@ -1677,6 +1691,14 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
return 0;
}
+static inline
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress)
+{
+ return 0;
+}
+
static inline void __cpu_map_flush(void)
{
}
@@ -1826,6 +1848,9 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
+int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr)
@@ -1851,6 +1876,13 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
+
+static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
@@ -1964,6 +1996,7 @@ extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
+extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -2015,6 +2048,7 @@ struct sk_reuseport_kern {
struct sk_buff *skb;
struct sock *sk;
struct sock *selected_sk;
+ struct sock *migrating_sk;
void *data_end;
u32 hash;
u32 reuseport_id;
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index b902c580c48d..24496bc28e7b 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -58,7 +58,7 @@ struct bpf_local_storage_data {
* from the object's bpf_local_storage.
*
* Put it in the same cacheline as the data to minimize
- * the number of cachelines access during the cache hit case.
+ * the number of cachelines accessed during the cache hit case.
*/
struct bpf_local_storage_map __rcu *smap;
u8 data[] __aligned(8);
@@ -71,7 +71,7 @@ struct bpf_local_storage_elem {
struct bpf_local_storage __rcu *local_storage;
struct rcu_head rcu;
/* 8 bytes hole */
- /* The data is stored in aother cacheline to minimize
+ /* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
*/
struct bpf_local_storage_data sdata ____cacheline_aligned;
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index f883f01a5061..a9db1eae6796 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -77,6 +77,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
void *, void *)
#endif /* CONFIG_BPF_LSM */
#endif
+BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall,
+ void *, void *)
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 06841517ab1e..e774ecc1cd1f 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -215,6 +215,13 @@ struct bpf_idx_pair {
u32 idx;
};
+struct bpf_id_pair {
+ u32 old;
+ u32 cur;
+};
+
+/* Maximum number of register states that can exist at once */
+#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
#define MAX_CALL_FRAMES 8
struct bpf_verifier_state {
/* call stack tracking */
@@ -418,6 +425,7 @@ struct bpf_verifier_env {
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
+ struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
struct {
int *insn_state;
int *insn_stack;
@@ -442,6 +450,7 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
+ bpfptr_t fd_array;
};
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
new file mode 100644
index 000000000000..5cdeab497cb3
--- /dev/null
+++ b/include/linux/bpfptr.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* A pointer that can point to either kernel or userspace memory. */
+#ifndef _LINUX_BPFPTR_H
+#define _LINUX_BPFPTR_H
+
+#include <linux/sockptr.h>
+
+typedef sockptr_t bpfptr_t;
+
+static inline bool bpfptr_is_kernel(bpfptr_t bpfptr)
+{
+ return bpfptr.is_kernel;
+}
+
+static inline bpfptr_t KERNEL_BPFPTR(void *p)
+{
+ return (bpfptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline bpfptr_t USER_BPFPTR(void __user *p)
+{
+ return (bpfptr_t) { .user = p };
+}
+
+static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
+{
+ if (is_kernel)
+ return KERNEL_BPFPTR((void*) (uintptr_t) addr);
+ else
+ return USER_BPFPTR(u64_to_user_ptr(addr));
+}
+
+static inline bool bpfptr_is_null(bpfptr_t bpfptr)
+{
+ if (bpfptr_is_kernel(bpfptr))
+ return !bpfptr.kernel;
+ return !bpfptr.user;
+}
+
+static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
+{
+ if (bpfptr_is_kernel(*bpfptr))
+ bpfptr->kernel += val;
+ else
+ bpfptr->user += val;
+}
+
+static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
+ size_t offset, size_t size)
+{
+ return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size);
+}
+
+static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
+{
+ return copy_from_bpfptr_offset(dst, src, 0, size);
+}
+
+static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
+ const void *src, size_t size)
+{
+ return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
+}
+
+static inline void *memdup_bpfptr(bpfptr_t src, size_t len)
+{
+ return memdup_sockptr((sockptr_t) src, len);
+}
+
+static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
+{
+ return strncpy_from_sockptr(dst, (sockptr_t) src, count);
+}
+
+#endif /* _LINUX_BPFPTR_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 3bac66e0183a..94a0c976c90f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -21,7 +21,7 @@ extern const struct file_operations btf_fops;
void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
-int btf_new_fd(const union bpf_attr *attr);
+int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr);
struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
const union bpf_attr *attr,
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 71b5d481c653..6b138fa97db8 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -50,7 +50,7 @@ struct ceph_auth_client_ops {
* another request.
*/
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
- int (*handle_reply)(struct ceph_auth_client *ac, int result,
+ int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret,
int *con_secret_len);
@@ -104,6 +104,8 @@ struct ceph_auth_client {
struct mutex mutex;
};
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
struct ceph_auth_client *ceph_auth_init(const char *name,
const struct ceph_crypto_key *key,
const int *con_modes);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 559ee05f86b2..fb8f6d2cd104 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -232,7 +232,7 @@ struct css_set {
struct list_head task_iters;
/*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * On the default hierarchy, ->subsys[ssid] may point to a css
* attached to an ancestor instead of the cgroup this css_set is
* associated with. The following node is anchored at
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
@@ -668,7 +668,7 @@ struct cgroup_subsys {
*/
bool threaded:1;
- /* the following two fields are initialized automtically during boot */
+ /* the following two fields are initialized automatically during boot */
int id;
const char *name;
@@ -757,7 +757,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
* On boot, sock_cgroup_data records the cgroup that the sock was created
* in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overriden to carry prioidx and/or
+ * net_cls starts being used, the area is overridden to carry prioidx and/or
* classid. The two modes are distinguished by whether the lowest bit is
* set. Clear bit indicates cgroup pointer while set bit prioidx and
* classid.
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 4f2f79de083e..6bc9c76680b2 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -32,7 +32,7 @@ struct kernel_clone_args;
#ifdef CONFIG_CGROUPS
/*
- * All weight knobs on the default hierarhcy should use the following min,
+ * All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d6ab416ee2d2..1d42d4b17327 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -43,6 +43,8 @@ struct module;
* @shift: Cycle to nanosecond divisor (power of two)
* @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs)
* @maxadj: Maximum adjustment value to mult (~11%)
+ * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second.
+ * Zero says to use default WATCHDOG_THRESHOLD.
* @archdata: Optional arch-specific data
* @max_cycles: Maximum safe cycle value which won't overflow on
* multiplication
@@ -98,6 +100,7 @@ struct clocksource {
u32 shift;
u64 max_idle_ns;
u32 maxadj;
+ u32 uncertainty_margin;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
@@ -137,7 +140,7 @@ struct clocksource {
#define CLOCK_SOURCE_UNSTABLE 0x40
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
#define CLOCK_SOURCE_RESELECT 0x100
-
+#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
@@ -288,4 +291,7 @@ static inline void timer_probe(void) {}
#define TIMER_ACPI_DECLARE(name, table_id, fn) \
ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
+extern ulong max_cswd_read_retries;
+void clocksource_verify_percpu(struct clocksource *cs);
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index df5b405e6305..b67261a1e3e9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -115,18 +115,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* The __COUNTER__ based labels are a hack to make each instance of the macros
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
-#define annotate_reachable() ({ \
- asm volatile("%c0:\n\t" \
+#define __stringify_label(n) #n
+
+#define __annotate_reachable(c) ({ \
+ asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify_label(c) "b - .\n\t" \
+ ".popsection\n\t"); \
})
-#define annotate_unreachable() ({ \
- asm volatile("%c0:\n\t" \
+#define annotate_reachable() __annotate_reachable(__COUNTER__)
+
+#define __annotate_unreachable(c) ({ \
+ asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify_label(c) "b - .\n\t" \
+ ".popsection\n\t"); \
})
+#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
+
#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \
@@ -213,6 +219,16 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__v; \
})
+/*
+ * With CONFIG_CFI_CLANG, the compiler replaces function addresses in
+ * instrumented C code with jump table addresses. Architectures that
+ * support CFI can define this macro to return the actual function address
+ * when needed.
+ */
+#ifndef function_nocfi
+#define function_nocfi(x) (x)
+#endif
+
#endif /* __KERNEL__ */
/*
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index c043b8d2b17b..2487be0e7199 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -27,15 +27,16 @@
*/
#ifndef __has_attribute
# define __has_attribute(x) __GCC4_has_attribute_##x
-# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
+# define __GCC4_has_attribute___assume_aligned__ 1
# define __GCC4_has_attribute___copy__ 0
# define __GCC4_has_attribute___designated_init__ 0
# define __GCC4_has_attribute___externally_visible__ 1
# define __GCC4_has_attribute___no_caller_saved_registers__ 0
# define __GCC4_has_attribute___noclone__ 1
+# define __GCC4_has_attribute___no_profile_instrument_function__ 0
# define __GCC4_has_attribute___nonstring__ 0
-# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
-# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9)
+# define __GCC4_has_attribute___no_sanitize_address__ 1
+# define __GCC4_has_attribute___no_sanitize_undefined__ 1
# define __GCC4_has_attribute___fallthrough__ 0
#endif
@@ -199,6 +200,7 @@
* must end with any of these keywords:
* break;
* fallthrough;
+ * continue;
* goto <label>;
* return [expression];
*
@@ -238,6 +240,18 @@
#endif
/*
+ * Optional: only supported since GCC >= 7.1, clang >= 13.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fprofile_005finstrument_005ffunction-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-profile-instrument-function
+ */
+#if __has_attribute(__no_profile_instrument_function__)
+# define __no_profile __attribute__((__no_profile_instrument_function__))
+#else
+# define __no_profile
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
* clang: https://clang.llvm.org/docs/AttributeReference.html#id1
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index d29bda7f6ebd..d509169860f1 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -210,7 +210,7 @@ struct ftrace_likely_data {
/* Section for code which can't be instrumented at all */
#define noinstr \
noinline notrace __attribute((__section__(".noinstr.text"))) \
- __no_kcsan __no_sanitize_address
+ __no_kcsan __no_sanitize_address __no_profile
#endif /* __KERNEL__ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 4a62b3980642..47e13582d9fc 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -54,7 +54,7 @@ enum cpuhp_state {
CPUHP_MM_MEMCQ_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
- CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 14971322e1a0..fcbc6885cc09 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -143,6 +143,7 @@ struct cred {
#endif
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+ struct ucounts *ucounts;
struct group_info *group_info; /* supplementary groups for euid/fsgid */
/* RCU deletion */
union {
@@ -169,6 +170,7 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
+extern int set_cred_ucounts(struct cred *);
/*
* check for validity of credentials
@@ -369,6 +371,7 @@ static inline void put_cred(const struct cred *_cred)
#define task_uid(task) (task_cred_xxx((task), uid))
#define task_euid(task) (task_cred_xxx((task), euid))
+#define task_ucounts(task) (task_cred_xxx((task), ucounts))
#define current_cred_xxx(xxx) \
({ \
@@ -385,6 +388,7 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
+#define current_ucounts() (current_cred_xxx(ucounts))
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index da5e0d74bb2f..855869e1fd32 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -643,32 +643,6 @@ struct crypto_comp {
struct crypto_tfm base;
};
-enum {
- CRYPTOA_UNSPEC,
- CRYPTOA_ALG,
- CRYPTOA_TYPE,
- CRYPTOA_U32,
- __CRYPTOA_MAX,
-};
-
-#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
-
-/* Maximum number of (rtattr) parameters for each template. */
-#define CRYPTO_MAX_ATTRS 32
-
-struct crypto_attr_alg {
- char name[CRYPTO_MAX_ALG_NAME];
-};
-
-struct crypto_attr_type {
- u32 type;
- u32 mask;
-};
-
-struct crypto_attr_u32 {
- u32 num;
-};
-
/*
* Transform user interface.
*/
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 2915f56ad421..edb5c186b0b7 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -27,8 +27,10 @@ extern int debug_locks_off(void);
int __ret = 0; \
\
if (!oops_in_progress && unlikely(c)) { \
+ instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ instrumentation_end(); \
__ret = 1; \
} \
__ret; \
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 21651f946751..af7e6eb50283 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -58,16 +58,22 @@ struct task_delay_info {
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_TASK_DELAY_ACCT
+DECLARE_STATIC_KEY_FALSE(delayacct_key);
extern int delayacct_on; /* Delay accounting turned on/off */
extern struct kmem_cache *delayacct_cache;
extern void delayacct_init(void);
+
+extern int sysctl_delayacct(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
+
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
extern void __delayacct_blkio_start(void);
extern void __delayacct_blkio_end(struct task_struct *);
-extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+extern int delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
@@ -114,6 +120,9 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
static inline void delayacct_blkio_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
delayacct_set_flag(current, DELAYACCT_PF_BLKIO);
if (current->delays)
__delayacct_blkio_start();
@@ -121,19 +130,14 @@ static inline void delayacct_blkio_start(void)
static inline void delayacct_blkio_end(struct task_struct *p)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (p->delays)
__delayacct_blkio_end(p);
delayacct_clear_flag(p, DELAYACCT_PF_BLKIO);
}
-static inline int delayacct_add_tsk(struct taskstats *d,
- struct task_struct *tsk)
-{
- if (!delayacct_on || !tsk->delays)
- return 0;
- return __delayacct_add_tsk(d, tsk);
-}
-
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
{
if (tsk->delays)
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 6f009559ee54..82d3d46005a1 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -236,7 +236,7 @@ do { \
* using WARN/WARN_ONCE to include file/line information and a backtrace.
*/
#define dev_WARN(dev, format, arg...) \
- WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg)
#define dev_WARN_ONCE(dev, condition, format, arg...) \
WARN_ONCE(condition, "%s %s: " format, \
diff --git a/include/linux/device.h b/include/linux/device.h
index 38a2071cf776..4cd200f8b47a 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -399,7 +399,7 @@ struct dev_links_info {
* along with subsystem-level and driver-level callbacks.
* @em_pd: device's energy model performance domain
* @pins: For device pin management.
- * See Documentation/driver-api/pinctl.rst for details.
+ * See Documentation/driver-api/pin-control.rst for details.
* @msi_list: Hosts MSI descriptors
* @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
@@ -570,7 +570,7 @@ struct device {
* @flags: Link flags.
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
* @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ * @rm_work: Work structure used for removing the link.
* @supplier_preactivated: Supplier has been made active before consumer probe.
*/
struct device_link {
@@ -583,9 +583,7 @@ struct device_link {
u32 flags;
refcount_t rpm_active;
struct kref kref;
-#ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
-#endif
+ struct work_struct rm_work;
bool supplier_preactivated; /* Owned by consumer probe. */
};
@@ -819,6 +817,7 @@ int device_online(struct device *dev);
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
static inline int dev_num_vf(struct device *dev)
{
diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h
index f40f77717a24..74891802200d 100644
--- a/include/linux/devm-helpers.h
+++ b/include/linux/devm-helpers.h
@@ -51,4 +51,29 @@ static inline int devm_delayed_work_autocancel(struct device *dev,
return devm_add_action(dev, devm_delayed_work_drop, w);
}
+static inline void devm_work_drop(void *res)
+{
+ cancel_work_sync(res);
+}
+
+/**
+ * devm_work_autocancel - Resource-managed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work to be added (and automatically cancelled)
+ * @worker: Worker function
+ *
+ * Initialize work which is automatically cancelled when driver is detached.
+ * A few drivers need to queue work which must be cancelled before driver
+ * is detached to avoid accessing removed resources.
+ * devm_work_autocancel() can be used to omit the explicit
+ * cancelleation when driver is detached.
+ */
+static inline int devm_work_autocancel(struct device *dev,
+ struct work_struct *w,
+ work_func_t worker)
+{
+ INIT_WORK(w, worker);
+ return devm_add_action(dev, devm_work_drop, w);
+}
+
#endif
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index b12b05f1c8b4..1587961f1a7b 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -37,8 +37,6 @@ struct dsa_8021q_context {
#define DSA_8021Q_N_SUBVLAN 8
-#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q)
-
int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled);
int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
@@ -52,6 +50,9 @@ int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
+void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
+ int *subvlan);
+
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
@@ -70,78 +71,4 @@ bool vid_is_dsa_8021q_txvlan(u16 vid);
bool vid_is_dsa_8021q(u16 vid);
-#else
-
-int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled)
-{
- return 0;
-}
-
-int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
- struct dsa_8021q_context *other_ctx,
- int other_port)
-{
- return 0;
-}
-
-int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
- struct dsa_8021q_context *other_ctx,
- int other_port)
-{
- return 0;
-}
-
-struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
- u16 tpid, u16 tci)
-{
- return NULL;
-}
-
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan)
-{
- return 0;
-}
-
-int dsa_8021q_rx_switch_id(u16 vid)
-{
- return 0;
-}
-
-int dsa_8021q_rx_source_port(u16 vid)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_subvlan(u16 vid)
-{
- return 0;
-}
-
-bool vid_is_dsa_8021q_rxvlan(u16 vid)
-{
- return false;
-}
-
-bool vid_is_dsa_8021q_txvlan(u16 vid)
-{
- return false;
-}
-
-bool vid_is_dsa_8021q(u16 vid)
-{
- return false;
-}
-
-#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
-
#endif /* _NET_DSA_8021Q_H */
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 1eb84562b311..b6089b88314c 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -14,6 +14,7 @@
#define ETH_P_SJA1105 ETH_P_DSA_8021Q
#define ETH_P_SJA1105_META 0x0008
+#define ETH_P_SJA1110 0xdadc
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
@@ -44,11 +45,14 @@ struct sja1105_tagger_data {
*/
spinlock_t meta_lock;
unsigned long state;
+ u8 ts_id;
};
struct sja1105_skb_cb {
struct sk_buff *clone;
- u32 meta_tstamp;
+ u64 tstamp;
+ /* Only valid for packets cloned for 2-step TX timestamping */
+ u8 ts_id;
};
#define SJA1105_SKB_CB(skb) \
@@ -65,4 +69,24 @@ struct sja1105_port {
u16 xmit_tpid;
};
+enum sja1110_meta_tstamp {
+ SJA1110_META_TSTAMP_TX = 0,
+ SJA1110_META_TSTAMP_RX = 1,
+};
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
+
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+
+#else
+
+static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
+ u8 ts_id, enum sja1110_meta_tstamp dir,
+ u64 tstamp)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
+
#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index dcb2f9022c1d..ef9ceead3db1 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -117,9 +117,11 @@ extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *,
enum elv_merge);
-extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
+extern bool elv_attempt_insert_merge(struct request_queue *, struct request *,
+ struct list_head *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
+void elevator_init_mq(struct request_queue *q);
/*
* io scheduler registration
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 757fc60658fa..3f221dbf5f95 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -91,6 +91,8 @@ void em_dev_unregister_perf_domain(struct device *dev);
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
+ * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
+ might reflect reduced frequency (due to thermal)
*
* This function must be used only for CPU devices. There is no validation,
* i.e. if the EM is a CPU type and has cpumask allocated. It is called from
@@ -100,7 +102,8 @@ void em_dev_unregister_perf_domain(struct device *dev);
* a capacity state satisfying the max utilization of the domain.
*/
static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
- unsigned long max_util, unsigned long sum_util)
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
{
unsigned long freq, scale_cpu;
struct em_perf_state *ps;
@@ -112,11 +115,17 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
/*
* In order to predict the performance state, map the utilization of
* the most utilized CPU of the performance domain to a requested
- * frequency, like schedutil.
+ * frequency, like schedutil. Take also into account that the real
+ * frequency might be set lower (due to thermal capping). Thus, clamp
+ * max utilization to the allowed CPU capacity before calculating
+ * effective frequency.
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(cpu);
ps = &pd->table[pd->nr_perf_states - 1];
+
+ max_util = map_util_perf(max_util);
+ max_util = min(max_util, allowed_cpu_cap);
freq = map_util_freq(max_util, ps->frequency, scale_cpu);
/*
@@ -209,7 +218,8 @@ static inline struct em_perf_domain *em_pd_get(struct device *dev)
return NULL;
}
static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
- unsigned long max_util, unsigned long sum_util)
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
{
return 0;
}
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 8b2b1d68b954..136b8d97d8c0 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -3,6 +3,7 @@
#define __LINUX_ENTRYKVM_H
#include <linux/entry-common.h>
+#include <linux/tick.h>
/* Transfer to guest mode work */
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
static inline void xfer_to_guest_mode_prepare(void)
{
lockdep_assert_irqs_disabled();
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
}
/**
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e030f7510cd3..29dbb603bc91 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -401,12 +401,12 @@ struct ethtool_rmon_stats {
* required information to the driver.
*/
struct ethtool_module_eeprom {
- __u32 offset;
- __u32 length;
- __u8 page;
- __u8 bank;
- __u8 i2c_address;
- __u8 *data;
+ u32 offset;
+ u32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 *data;
};
/**
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 8302bc29bb35..4c374be70247 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -23,18 +23,25 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
struct integrity_iint_cache *iint);
extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
-extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
+extern int evm_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
const void *value, size_t size);
extern void evm_inode_post_setxattr(struct dentry *dentry,
const char *xattr_name,
const void *xattr_value,
size_t xattr_value_len);
-extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+extern int evm_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name);
extern void evm_inode_post_removexattr(struct dentry *dentry,
const char *xattr_name);
extern int evm_inode_init_security(struct inode *inode,
const struct xattr *xattr_array,
struct xattr *evm);
+extern bool evm_revalidate_status(const char *xattr_name);
+extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
+extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt);
#ifdef CONFIG_FS_POSIX_ACL
extern int posix_xattr_acl(const char *xattrname);
#else
@@ -71,7 +78,8 @@ static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
return;
}
-static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
+static inline int evm_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
const void *value, size_t size)
{
return 0;
@@ -85,7 +93,8 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry,
return;
}
-static inline int evm_inode_removexattr(struct dentry *dentry,
+static inline int evm_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
const char *xattr_name)
{
return 0;
@@ -104,5 +113,22 @@ static inline int evm_inode_init_security(struct inode *inode,
return 0;
}
+static inline bool evm_revalidate_status(const char *xattr_name)
+{
+ return false;
+}
+
+static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name)
+{
+ return false;
+}
+
+static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_EVM */
#endif /* LINUX_EVM_H */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index bad41bcb25df..a16dbeced152 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -51,6 +51,10 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
FANOTIFY_USER_INIT_FLAGS)
+/* Internal group flags */
+#define FANOTIFY_UNPRIV 0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
+
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
FAN_MARK_FILESYSTEM)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a8dccd23c249..ecfbcc0553a5 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
/* drivers/video/fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern void fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_open(struct fb_info *info,
+ struct inode *inode,
+ struct file *file);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 766fcd973beb..a332e79b3207 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -12,10 +12,6 @@
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
-/* List of all valid flags for the how->upgrade_mask argument: */
-#define VALID_UPGRADE_FLAGS \
- (UPGRADE_NOWRITE | UPGRADE_NOREAD)
-
/* List of all valid flags for the how->resolve argument: */
#define VALID_RESOLVE_FLAGS \
(RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 9a09547bc7ba..472f97074da0 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -646,6 +646,7 @@ struct bpf_redirect_info {
u32 flags;
u32 tgt_index;
void *tgt_value;
+ struct bpf_map *map;
u32 map_id;
enum bpf_map_type map_type;
u32 kern_flags;
@@ -762,11 +763,9 @@ DECLARE_BPF_DISPATCHER(xdp)
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
- /* Caller needs to hold rcu_read_lock() (!), otherwise program
- * can be released while still running, or map elements could be
- * freed early while still having concurrent users. XDP fastpath
- * already takes rcu_read_lock() when fetching the program, so
- * it's not necessary here anymore.
+ /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
+ * under local_bh_disable(), which provides the needed RCU protection
+ * for accessing map entries.
*/
return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
}
@@ -995,11 +994,13 @@ void bpf_warn_invalid_xdp_action(u32 act);
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash);
#else
static inline struct sock *
bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash)
{
return NULL;
@@ -1464,17 +1465,19 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
}
#endif /* IS_ENABLED(CONFIG_IPV6) */
-static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, u64 flags,
+static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex,
+ u64 flags, const u64 flag_mask,
void *lookup_elem(struct bpf_map *map, u32 key))
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
/* Lower bits of the flags are used as return code on lookup failure */
- if (unlikely(flags > XDP_TX))
+ if (unlikely(flags & ~(action_mask | flag_mask)))
return XDP_ABORTED;
ri->tgt_value = lookup_elem(map, ifindex);
- if (unlikely(!ri->tgt_value)) {
+ if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
/* If the lookup fails we want to clear out the state in the
* redirect_info struct completely, so that if an eBPF program
* performs multiple lookups, the last one always takes
@@ -1482,13 +1485,21 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifind
*/
ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
ri->map_type = BPF_MAP_TYPE_UNSPEC;
- return flags;
+ return flags & action_mask;
}
ri->tgt_index = ifindex;
ri->map_id = map->id;
ri->map_type = map->map_type;
+ if (flags & BPF_F_BROADCAST) {
+ WRITE_ONCE(ri->map, map);
+ ri->flags = flags;
+ } else {
+ WRITE_ONCE(ri->map, NULL);
+ ri->flags = 0;
+ }
+
return XDP_REDIRECT;
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c3c88fdb9b2a..fad6663cd1b0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3417,18 +3417,14 @@ extern int simple_rename(struct user_namespace *, struct inode *,
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
-extern int noop_set_page_dirty(struct page *page);
extern void noop_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
-extern int simple_readpage(struct file *file, struct page *page);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
-extern int simple_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h
new file mode 100644
index 000000000000..faf603c48c86
--- /dev/null
+++ b/include/linux/fwnode_mdio.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * FWNODE helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_FWNODE_MDIO_H
+#define __LINUX_FWNODE_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_FWNODE_MDIO)
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr);
+
+int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child, u32 addr);
+
+#else /* CONFIG_FWNODE_MDIO */
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr)
+{
+ return -EINVAL;
+}
+
+static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child,
+ u32 addr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_FWNODE_MDIO_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 6fc26f7bdf71..13b34177cc85 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -153,6 +153,11 @@ struct gendisk {
unsigned long state;
#define GD_NEED_PART_SCAN 0
#define GD_READ_ONLY 1
+#define GD_QUEUE_REF 2
+
+ struct mutex open_mutex; /* open/close mutex */
+ unsigned open_partitions; /* number of open partitions */
+
struct kobject *slave_dir;
struct timer_rand_state *random;
@@ -218,7 +223,6 @@ static inline void add_disk_no_queue_reg(struct gendisk *disk)
}
extern void del_gendisk(struct gendisk *gp);
-extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
void set_disk_ro(struct gendisk *disk, bool read_only);
@@ -252,8 +256,7 @@ static inline sector_t get_capacity(struct gendisk *disk)
return bdev_nr_sectors(disk->part0);
}
-int bdev_disk_changed(struct block_device *bdev, bool invalidate);
-int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
+int bdev_disk_changed(struct gendisk *disk, bool invalidate);
void blk_drop_partitions(struct gendisk *disk);
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
@@ -277,6 +280,28 @@ extern void put_disk(struct gendisk *disk);
#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
+/**
+ * blk_alloc_disk - allocate a gendisk structure
+ * @node_id: numa node to allocate on
+ *
+ * Allocate and pre-initialize a gendisk structure for use with BIO based
+ * drivers.
+ *
+ * Context: can sleep
+ */
+#define blk_alloc_disk(node_id) \
+({ \
+ struct gendisk *__disk = __blk_alloc_disk(node_id); \
+ static struct lock_class_key __key; \
+ \
+ if (__disk) \
+ lockdep_init_map(&__disk->lockdep_map, \
+ "(bio completion)", &__key, 0); \
+ __disk; \
+})
+struct gendisk *__blk_alloc_disk(int node);
+void blk_cleanup_disk(struct gendisk *disk);
+
int __register_blkdev(unsigned int major, const char *name,
void (*probe)(dev_t devt));
#define register_blkdev(major, name) \
@@ -306,6 +331,7 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
}
#endif /* CONFIG_SYSFS */
+dev_t part_devt(struct gendisk *disk, u8 partno);
dev_t blk_lookup_devt(const char *name, int partno);
void blk_request_module(dev_t devt);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 11da8af06704..55b2ec1f965a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -53,8 +53,10 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x100000u
#define ___GFP_THISNODE 0x200000u
#define ___GFP_ACCOUNT 0x400000u
+#define ___GFP_ZEROTAGS 0x800000u
+#define ___GFP_SKIP_KASAN_POISON 0x1000000u
#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x800000u
+#define ___GFP_NOLOCKDEP 0x2000000u
#else
#define ___GFP_NOLOCKDEP 0
#endif
@@ -229,16 +231,25 @@ struct vm_area_struct;
* %__GFP_COMP address compound page metadata.
*
* %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if
+ * __GFP_ZERO is set.
+ *
+ * %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned
+ * on deallocation. Typically used for userspace pages. Currently only has an
+ * effect in HW tags mode.
*/
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
/* Disable lockdep for GFP context tracking */
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/**
@@ -319,7 +330,8 @@ struct vm_area_struct;
#define GFP_DMA __GFP_DMA
#define GFP_DMA32 __GFP_DMA32
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
+ __GFP_SKIP_KASAN_POISON)
#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
@@ -494,8 +506,8 @@ static inline int gfp_zonelist(gfp_t flags)
* There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to.
*
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
+ * For the case of non-NUMA systems the NODE_DATA() gets optimized to
+ * &contig_page_data at compile-time.
*/
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
@@ -536,6 +548,15 @@ alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_arr
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
}
+static inline unsigned long
+alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array)
+{
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
+}
+
/*
* Allocate pages, preferring the node given as nid. The node must be valid and
* online. For more general interface, see alloc_pages_node().
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index c73b25bc9213..566feb56601f 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -692,6 +692,8 @@ int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios);
void devm_acpi_dev_remove_driver_gpios(struct device *dev);
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label);
+
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
struct acpi_device;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 271021e20a3f..9e067f937dbc 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -102,6 +102,7 @@ struct hid_item {
#define HID_COLLECTION_PHYSICAL 0
#define HID_COLLECTION_APPLICATION 1
#define HID_COLLECTION_LOGICAL 2
+#define HID_COLLECTION_NAMED_ARRAY 4
/*
* HID report descriptor global item tags
@@ -800,6 +801,7 @@ struct hid_driver {
* @raw_request: send raw report request to device (e.g. feature report)
* @output_report: send output report to device
* @idle: send idle request to device
+ * @may_wakeup: return if device may act as a wakeup source during system-suspend
*/
struct hid_ll_driver {
int (*start)(struct hid_device *hdev);
@@ -824,6 +826,7 @@ struct hid_ll_driver {
int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+ bool (*may_wakeup)(struct hid_device *hdev);
};
extern struct hid_ll_driver i2c_hid_ll_driver;
@@ -1150,6 +1153,22 @@ static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
}
/**
+ * hid_may_wakeup - return if the hid device may act as a wakeup source during system-suspend
+ *
+ * @hdev: hid device
+ */
+static inline bool hid_hw_may_wakeup(struct hid_device *hdev)
+{
+ if (hdev->ll_driver->may_wakeup)
+ return hdev->ll_driver->may_wakeup(hdev);
+
+ if (hdev->dev.parent)
+ return device_may_wakeup(hdev->dev.parent);
+
+ return false;
+}
+
+/**
* hid_hw_wait - wait for buffered io to complete
*
* @hdev: hid device
@@ -1167,8 +1186,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*/
static inline u32 hid_report_len(struct hid_report *report)
{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
}
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 832b49b50c7b..8c6e8e996c87 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -152,28 +152,24 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
}
#endif
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
/**
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
* @vma: The VMA the page is to be allocated for
* @vaddr: The virtual address the page will be inserted into
*
- * This function will allocate a page for a VMA but the caller is expected
- * to specify via movableflags whether the page will be movable in the
- * future or not
+ * This function will allocate a page for a VMA that the caller knows will
+ * be able to migrate in the future using move_pages() or reclaimed
*
* An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
* implementation.
*/
static inline struct page *
-__alloc_zeroed_user_highpage(gfp_t movableflags,
- struct vm_area_struct *vma,
- unsigned long vaddr)
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
- vma, vaddr);
+ struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
if (page)
clear_user_highpage(page, vaddr);
@@ -182,21 +178,6 @@ __alloc_zeroed_user_highpage(gfp_t movableflags,
}
#endif
-/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- */
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
- unsigned long vaddr)
-{
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
-}
-
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page);
@@ -204,6 +185,14 @@ static inline void clear_highpage(struct page *page)
kunmap_atomic(kaddr);
}
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+
+static inline void tag_clear_highpage(struct page *page)
+{
+}
+
+#endif
+
/*
* If we pass in a base or tail page, we can zero up to PAGE_SIZE.
* If we pass in a head page, we can zero up to the size of the compound page.
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 232e1bd507a7..9b0487c88571 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int __host1x_client_register(struct host1x_client *client,
- struct lock_class_key *key);
-#define host1x_client_register(class) \
- ({ \
- static struct lock_class_key __key; \
- __host1x_client_register(class, &__key); \
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ __host1x_client_register(client); \
})
int host1x_client_unregister(struct host1x_client *client);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9626fda5efce..2a8ebe6c222e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_page(struct page *page)
{
@@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return is_huge_zero_page(pmd_page(pmd));
+ return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
}
static inline bool is_huge_zero_pud(pud_t pud)
@@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pud(pud_t pud)
{
return false;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b92f25ccef58..8ba79dc64ab8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -149,6 +149,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
bool isolate_huge_page(struct page *page, struct list_head *list);
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
void putback_active_hugepage(struct page *page);
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
@@ -339,6 +340,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
+static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+ return 0;
+}
+
static inline void putback_active_hugepage(struct page *page)
{
}
@@ -445,7 +451,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
- struct user_struct **user, int creat_flags,
+ struct ucounts **ucounts, int creat_flags,
int page_size_log);
static inline bool is_file_hugepages(struct file *file)
@@ -465,7 +471,7 @@ static inline struct hstate *hstate_inode(struct inode *i)
#define is_file_hugepages(file) false
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct user_struct **user, int creat_flags,
+ struct ucounts **ucounts, int creat_flags,
int page_size_log)
{
return ERR_PTR(-ENOSYS);
@@ -604,6 +610,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct page *page);
/* arch callback */
int __init __alloc_bootmem_huge_page(struct hstate *h);
@@ -733,17 +741,6 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-pgoff_t __basepage_index(struct page *page);
-
-/* Return page->index in PAGE_SIZE units */
-static inline pgoff_t basepage_index(struct page *page)
-{
- if (!PageCompound(page))
- return page->index;
-
- return __basepage_index(page);
-}
-
extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
@@ -980,11 +977,6 @@ static inline int hstate_index(struct hstate *h)
return 0;
}
-static inline pgoff_t basepage_index(struct page *page)
-{
- return page->index;
-}
-
static inline int dissolve_free_huge_page(struct page *page)
{
return 0;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index d1e59dbef1dd..2e859d2f9609 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -181,6 +181,10 @@ struct hv_ring_buffer_info {
* being freed while the ring buffer is being accessed.
*/
struct mutex ring_buffer_mutex;
+
+ /* Buffer that holds a copy of an incoming host packet */
+ void *pkt_buffer;
+ u32 pkt_buffer_size;
};
@@ -790,7 +794,11 @@ struct vmbus_requestor {
#define VMBUS_NO_RQSTOR U64_MAX
#define VMBUS_RQST_ERROR (U64_MAX - 1)
+/* NetVSC-specific */
#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
+/* StorVSC-specific */
+#define VMBUS_RQST_INIT (U64_MAX - 2)
+#define VMBUS_RQST_RESET (U64_MAX - 3)
struct vmbus_device {
u16 dev_type;
@@ -799,6 +807,8 @@ struct vmbus_device {
bool allowed_in_isolated;
};
+#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
+
struct vmbus_channel {
struct list_head listentry;
@@ -1018,13 +1028,21 @@ struct vmbus_channel {
u32 fuzz_testing_interrupt_delay;
u32 fuzz_testing_message_delay;
+ /* callback to generate a request ID from a request address */
+ u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
+ /* callback to retrieve a request address from a request ID */
+ u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
+
/* request/transaction ids for VMBus */
struct vmbus_requestor requestor;
u32 rqstor_size;
+
+ /* The max size of a packet on this channel */
+ u32 max_pkt_size;
};
-u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr);
-u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id);
+u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
@@ -1663,31 +1681,54 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
struct vmpacket_descriptor *
+hv_pkt_iter_first_raw(struct vmbus_channel *channel);
+
+struct vmpacket_descriptor *
hv_pkt_iter_first(struct vmbus_channel *channel);
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
- const struct vmpacket_descriptor *pkt);
+ const struct vmpacket_descriptor *pkt,
+ bool copy);
void hv_pkt_iter_close(struct vmbus_channel *channel);
-/*
- * Get next packet descriptor from iterator
- * If at end of list, return NULL and update host.
- */
static inline struct vmpacket_descriptor *
-hv_pkt_iter_next(struct vmbus_channel *channel,
- const struct vmpacket_descriptor *pkt)
+hv_pkt_iter_next_pkt(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt,
+ bool copy)
{
struct vmpacket_descriptor *nxt;
- nxt = __hv_pkt_iter_next(channel, pkt);
+ nxt = __hv_pkt_iter_next(channel, pkt, copy);
if (!nxt)
hv_pkt_iter_close(channel);
return nxt;
}
+/*
+ * Get next packet descriptor without copying it out of the ring buffer
+ * If at end of list, return NULL and update host.
+ */
+static inline struct vmpacket_descriptor *
+hv_pkt_iter_next_raw(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt)
+{
+ return hv_pkt_iter_next_pkt(channel, pkt, false);
+}
+
+/*
+ * Get next packet descriptor from iterator
+ * If at end of list, return NULL and update host.
+ */
+static inline struct vmpacket_descriptor *
+hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt)
+{
+ return hv_pkt_iter_next_pkt(channel, pkt, true);
+}
+
#define foreach_vmbus_pkt(pkt, channel) \
for (pkt = hv_pkt_iter_first(channel); pkt; \
pkt = hv_pkt_iter_next(channel, pkt))
diff --git a/include/linux/ide.h b/include/linux/ide.h
deleted file mode 100644
index 2c300689a51a..000000000000
--- a/include/linux/ide.h
+++ /dev/null
@@ -1,1623 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _IDE_H
-#define _IDE_H
-/*
- * linux/include/linux/ide.h
- *
- * Copyright (C) 1994-2002 Linus Torvalds & authors
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/ata.h>
-#include <linux/blk-mq.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/bio.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/pm.h>
-#include <linux/mutex.h>
-/* for request_sense */
-#include <linux/cdrom.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-
-/*
- * Probably not wise to fiddle with these
- */
-#define SUPPORT_VLB_SYNC 1
-#define IDE_DEFAULT_MAX_FAILURES 1
-#define ERROR_MAX 8 /* Max read/write errors per sector */
-#define ERROR_RESET 3 /* Reset controller every 4th retry */
-#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
-
-struct device;
-
-/* values for ide_request.type */
-enum ata_priv_type {
- ATA_PRIV_MISC,
- ATA_PRIV_TASKFILE,
- ATA_PRIV_PC,
- ATA_PRIV_SENSE, /* sense request */
- ATA_PRIV_PM_SUSPEND, /* suspend request */
- ATA_PRIV_PM_RESUME, /* resume request */
-};
-
-struct ide_request {
- struct scsi_request sreq;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- u8 type;
- void *special;
-};
-
-static inline struct ide_request *ide_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline bool ata_misc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
-}
-
-static inline bool ata_taskfile_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
-}
-
-static inline bool ata_pc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
-}
-
-static inline bool ata_sense_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
-}
-
-static inline bool ata_pm_request(struct request *rq)
-{
- return blk_rq_is_private(rq) &&
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
- ide_req(rq)->type == ATA_PRIV_PM_RESUME);
-}
-
-/* Error codes returned in result to the higher part of the driver. */
-enum {
- IDE_DRV_ERROR_GENERAL = 101,
- IDE_DRV_ERROR_FILEMARK = 102,
- IDE_DRV_ERROR_EOD = 103,
-};
-
-/*
- * Definitions for accessing IDE controller registers
- */
-#define IDE_NR_PORTS (10)
-
-struct ide_io_ports {
- unsigned long data_addr;
-
- union {
- unsigned long error_addr; /* read: error */
- unsigned long feature_addr; /* write: feature */
- };
-
- unsigned long nsect_addr;
- unsigned long lbal_addr;
- unsigned long lbam_addr;
- unsigned long lbah_addr;
-
- unsigned long device_addr;
-
- union {
- unsigned long status_addr; /*  read: status  */
- unsigned long command_addr; /* write: command */
- };
-
- unsigned long ctl_addr;
-
- unsigned long irq_addr;
-};
-
-#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
-
-#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
-#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
-#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
-#define DRIVE_READY (ATA_DRDY | ATA_DSC)
-
-#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
-
-#define SATA_NR_PORTS (3) /* 16 possible ?? */
-
-#define SATA_STATUS_OFFSET (0)
-#define SATA_ERROR_OFFSET (1)
-#define SATA_CONTROL_OFFSET (2)
-
-/*
- * Our Physical Region Descriptor (PRD) table should be large enough
- * to handle the biggest I/O request we are likely to see. Since requests
- * can have no more than 256 sectors, and since the typical blocksize is
- * two or more sectors, we could get by with a limit of 128 entries here for
- * the usual worst case. Most requests seem to include some contiguous blocks,
- * further reducing the number of table entries required.
- *
- * The driver reverts to PIO mode for individual requests that exceed
- * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
- * 100% of all crazy scenarios here is not necessary.
- *
- * As it turns out though, we must allocate a full 4KB page for this,
- * so the two PRD tables (ide0 & ide1) will each get half of that,
- * allowing each to have about 256 entries (8 bytes each) from this.
- */
-#define PRD_BYTES 8
-#define PRD_ENTRIES 256
-
-/*
- * Some more useful definitions
- */
-#define PARTN_BITS 6 /* number of minor dev bits for partitions */
-#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-
-/*
- * Timeouts for various operations:
- */
-enum {
- /* spec allows up to 20ms, but CF cards and SSD drives need more */
- WAIT_DRQ = 1 * HZ, /* 1s */
- /* some laptops are very slow */
- WAIT_READY = 5 * HZ, /* 5s */
- /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
- WAIT_PIDENTIFY = 10 * HZ, /* 10s */
- /* worst case when spinning up */
- WAIT_WORSTCASE = 30 * HZ, /* 30s */
- /* maximum wait for an IRQ to happen */
- WAIT_CMD = 10 * HZ, /* 10s */
- /* Some drives require a longer IRQ timeout. */
- WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
- /*
- * Some drives (for example, Seagate STT3401A Travan) require a very
- * long timeout, because they don't return an interrupt or clear their
- * BSY bit until after the command completes (even retension commands).
- */
- WAIT_TAPE_CMD = 900 * HZ, /* 900s */
- /* minimum sleep time */
- WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
-};
-
-/*
- * Op codes for special requests to be handled by ide_special_rq().
- * Values should be in the range of 0x20 to 0x3f.
- */
-#define REQ_DRIVE_RESET 0x20
-#define REQ_DEVSET_EXEC 0x21
-#define REQ_PARK_HEADS 0x22
-#define REQ_UNPARK_HEADS 0x23
-
-/*
- * hwif_chipset_t is used to keep track of the specific hardware
- * chipset used by each IDE interface, if known.
- */
-enum { ide_unknown, ide_generic, ide_pci,
- ide_cmd640, ide_dtc2278, ide_ali14xx,
- ide_qd65xx, ide_umc8672, ide_ht6560b,
- ide_4drives, ide_pmac, ide_acorn,
- ide_au1xxx, ide_palm3710
-};
-
-typedef u8 hwif_chipset_t;
-
-/*
- * Structure to hold all information about the location of this port
- */
-struct ide_hw {
- union {
- struct ide_io_ports io_ports;
- unsigned long io_ports_array[IDE_NR_PORTS];
- };
-
- int irq; /* our irq number */
- struct device *dev, *parent;
- unsigned long config;
-};
-
-static inline void ide_std_init_ports(struct ide_hw *hw,
- unsigned long io_addr,
- unsigned long ctl_addr)
-{
- unsigned int i;
-
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = io_addr++;
-
- hw->io_ports.ctl_addr = ctl_addr;
-}
-
-#define MAX_HWIFS 10
-
-/*
- * Now for the data we need to maintain per-drive: ide_drive_t
- */
-
-#define ide_scsi 0x21
-#define ide_disk 0x20
-#define ide_optical 0x7
-#define ide_cdrom 0x5
-#define ide_tape 0x1
-#define ide_floppy 0x0
-
-/*
- * Special Driver Flags
- */
-enum {
- IDE_SFLAG_SET_GEOMETRY = BIT(0),
- IDE_SFLAG_RECALIBRATE = BIT(1),
- IDE_SFLAG_SET_MULTMODE = BIT(2),
-};
-
-/*
- * Status returned from various ide_ functions
- */
-typedef enum {
- ide_stopped, /* no drive operation was started */
- ide_started, /* a drive operation was started, handler was set */
-} ide_startstop_t;
-
-enum {
- IDE_VALID_ERROR = BIT(1),
- IDE_VALID_FEATURE = IDE_VALID_ERROR,
- IDE_VALID_NSECT = BIT(2),
- IDE_VALID_LBAL = BIT(3),
- IDE_VALID_LBAM = BIT(4),
- IDE_VALID_LBAH = BIT(5),
- IDE_VALID_DEVICE = BIT(6),
- IDE_VALID_LBA = IDE_VALID_LBAL |
- IDE_VALID_LBAM |
- IDE_VALID_LBAH,
- IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_IN_TF = IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
- IDE_VALID_IN_HOB = IDE_VALID_ERROR |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
-};
-
-enum {
- IDE_TFLAG_LBA48 = BIT(0),
- IDE_TFLAG_WRITE = BIT(1),
- IDE_TFLAG_CUSTOM_HANDLER = BIT(2),
- IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3),
- /* force 16-bit I/O operations */
- IDE_TFLAG_IO_16BIT = BIT(4),
- /* struct ide_cmd was allocated using kmalloc() */
- IDE_TFLAG_DYN = BIT(5),
- IDE_TFLAG_FS = BIT(6),
- IDE_TFLAG_MULTI_PIO = BIT(7),
- IDE_TFLAG_SET_XFER = BIT(8),
-};
-
-enum {
- IDE_FTFLAG_FLAGGED = BIT(0),
- IDE_FTFLAG_SET_IN_FLAGS = BIT(1),
- IDE_FTFLAG_OUT_DATA = BIT(2),
- IDE_FTFLAG_IN_DATA = BIT(3),
-};
-
-struct ide_taskfile {
- u8 data; /* 0: data byte (for TASKFILE ioctl) */
- union { /* 1: */
- u8 error; /* read: error */
- u8 feature; /* write: feature */
- };
- u8 nsect; /* 2: number of sectors */
- u8 lbal; /* 3: LBA low */
- u8 lbam; /* 4: LBA mid */
- u8 lbah; /* 5: LBA high */
- u8 device; /* 6: device select */
- union { /* 7: */
- u8 status; /* read: status */
- u8 command; /* write: command */
- };
-};
-
-struct ide_cmd {
- struct ide_taskfile tf;
- struct ide_taskfile hob;
- struct {
- struct {
- u8 tf;
- u8 hob;
- } out, in;
- } valid;
-
- u16 tf_flags;
- u8 ftf_flags; /* for TASKFILE ioctl */
- int protocol;
-
- int sg_nents; /* number of sg entries */
- int orig_sg_nents;
- int sg_dma_direction; /* DMA transfer direction */
-
- unsigned int nbytes;
- unsigned int nleft;
- unsigned int last_xfer_len;
-
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
-
- struct request *rq; /* copy of request */
-};
-
-/* ATAPI packet command flags */
-enum {
- /* set when an error is considered normal - no retry (ide-tape) */
- PC_FLAG_ABORT = BIT(0),
- PC_FLAG_SUPPRESS_ERROR = BIT(1),
- PC_FLAG_WAIT_FOR_DSC = BIT(2),
- PC_FLAG_DMA_OK = BIT(3),
- PC_FLAG_DMA_IN_PROGRESS = BIT(4),
- PC_FLAG_DMA_ERROR = BIT(5),
- PC_FLAG_WRITING = BIT(6),
-};
-
-#define ATAPI_WAIT_PC (60 * HZ)
-
-struct ide_atapi_pc {
- /* actual packet bytes */
- u8 c[12];
- /* incremented on each retry */
- int retries;
- int error;
-
- /* bytes to transfer */
- int req_xfer;
-
- /* the corresponding request */
- struct request *rq;
-
- unsigned long flags;
-
- /*
- * those are more or less driver-specific and some of them are subject
- * to change/removal later.
- */
- unsigned long timeout;
-};
-
-struct ide_devset;
-struct ide_driver;
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-struct ide_acpi_drive_link;
-struct ide_acpi_hwif_link;
-#endif
-
-struct ide_drive_s;
-
-struct ide_disk_ops {
- int (*check)(struct ide_drive_s *, const char *);
- int (*get_capacity)(struct ide_drive_s *);
- void (*unlock_native_capacity)(struct ide_drive_s *);
- void (*setup)(struct ide_drive_s *);
- void (*flush)(struct ide_drive_s *);
- int (*init_media)(struct ide_drive_s *, struct gendisk *);
- int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
- int);
- ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
- sector_t);
- int (*ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
- int (*compat_ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
-};
-
-/* ATAPI device flags */
-enum {
- IDE_AFLAG_DRQ_INTERRUPT = BIT(0),
-
- /* ide-cd */
- /* Drive cannot eject the disc. */
- IDE_AFLAG_NO_EJECT = BIT(1),
- /* Drive is a pre ATAPI 1.2 drive. */
- IDE_AFLAG_PRE_ATAPI12 = BIT(2),
- /* TOC addresses are in BCD. */
- IDE_AFLAG_TOCADDR_AS_BCD = BIT(3),
- /* TOC track numbers are in BCD. */
- IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4),
- /* Saved TOC information is current. */
- IDE_AFLAG_TOC_VALID = BIT(6),
- /* We think that the drive door is locked. */
- IDE_AFLAG_DOOR_LOCKED = BIT(7),
- /* SET_CD_SPEED command is unsupported. */
- IDE_AFLAG_NO_SPEED_SELECT = BIT(8),
- IDE_AFLAG_VERTOS_300_SSD = BIT(9),
- IDE_AFLAG_VERTOS_600_ESD = BIT(10),
- IDE_AFLAG_SANYO_3CD = BIT(11),
- IDE_AFLAG_FULL_CAPS_PAGE = BIT(12),
- IDE_AFLAG_PLAY_AUDIO_OK = BIT(13),
- IDE_AFLAG_LE_SPEED_FIELDS = BIT(14),
-
- /* ide-floppy */
- /* Avoid commands not supported in Clik drive */
- IDE_AFLAG_CLIK_DRIVE = BIT(15),
- /* Requires BH algorithm for packets */
- IDE_AFLAG_ZIP_DRIVE = BIT(16),
- /* Supports format progress report */
- IDE_AFLAG_SRFP = BIT(17),
-
- /* ide-tape */
- IDE_AFLAG_IGNORE_DSC = BIT(18),
- /* 0 When the tape position is unknown */
- IDE_AFLAG_ADDRESS_VALID = BIT(19),
- /* Device already opened */
- IDE_AFLAG_BUSY = BIT(20),
- /* Attempt to auto-detect the current user block size */
- IDE_AFLAG_DETECT_BS = BIT(21),
- /* Currently on a filemark */
- IDE_AFLAG_FILEMARK = BIT(22),
- /* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDE_AFLAG_MEDIUM_PRESENT = BIT(23),
-
- IDE_AFLAG_NO_AUTOCLOSE = BIT(24),
-};
-
-/* device flags */
-enum {
- /* restore settings after device reset */
- IDE_DFLAG_KEEP_SETTINGS = BIT(0),
- /* device is using DMA for read/write */
- IDE_DFLAG_USING_DMA = BIT(1),
- /* okay to unmask other IRQs */
- IDE_DFLAG_UNMASK = BIT(2),
- /* don't attempt flushes */
- IDE_DFLAG_NOFLUSH = BIT(3),
- /* DSC overlap */
- IDE_DFLAG_DSC_OVERLAP = BIT(4),
- /* give potential excess bandwidth */
- IDE_DFLAG_NICE1 = BIT(5),
- /* device is physically present */
- IDE_DFLAG_PRESENT = BIT(6),
- /* disable Host Protected Area */
- IDE_DFLAG_NOHPA = BIT(7),
- /* id read from device (synthetic if not set) */
- IDE_DFLAG_ID_READ = BIT(8),
- IDE_DFLAG_NOPROBE = BIT(9),
- /* need to do check_media_change() */
- IDE_DFLAG_REMOVABLE = BIT(10),
- IDE_DFLAG_FORCED_GEOM = BIT(12),
- /* disallow setting unmask bit */
- IDE_DFLAG_NO_UNMASK = BIT(13),
- /* disallow enabling 32-bit I/O */
- IDE_DFLAG_NO_IO_32BIT = BIT(14),
- /* for removable only: door lock/unlock works */
- IDE_DFLAG_DOORLOCKING = BIT(15),
- /* disallow DMA */
- IDE_DFLAG_NODMA = BIT(16),
- /* powermanagement told us not to do anything, so sleep nicely */
- IDE_DFLAG_BLOCKED = BIT(17),
- /* sleeping & sleep field valid */
- IDE_DFLAG_SLEEPING = BIT(18),
- IDE_DFLAG_POST_RESET = BIT(19),
- IDE_DFLAG_UDMA33_WARNED = BIT(20),
- IDE_DFLAG_LBA48 = BIT(21),
- /* status of write cache */
- IDE_DFLAG_WCACHE = BIT(22),
- /* used for ignoring ATA_DF */
- IDE_DFLAG_NOWERR = BIT(23),
- /* retrying in PIO */
- IDE_DFLAG_DMA_PIO_RETRY = BIT(24),
- IDE_DFLAG_LBA = BIT(25),
- /* don't unload heads */
- IDE_DFLAG_NO_UNLOAD = BIT(26),
- /* heads unloaded, please don't reset port */
- IDE_DFLAG_PARKED = BIT(27),
- IDE_DFLAG_MEDIA_CHANGED = BIT(28),
- /* write protect */
- IDE_DFLAG_WP = BIT(29),
- IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30),
- IDE_DFLAG_NIEN_QUIRK = BIT(31),
-};
-
-struct ide_drive_s {
- char name[4]; /* drive name, such as "hda" */
- char driver_req[10]; /* requests specific driver */
-
- struct request_queue *queue; /* request queue */
-
- bool (*prep_rq)(struct ide_drive_s *, struct request *);
-
- struct blk_mq_tag_set tag_set;
-
- struct request *rq; /* current request */
- void *driver_data; /* extra driver data */
- u16 *id; /* identification info */
-#ifdef CONFIG_IDE_PROC_FS
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
- const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
-#endif
- struct hwif_s *hwif; /* actually (ide_hwif_t *) */
-
- const struct ide_disk_ops *disk_ops;
-
- unsigned long dev_flags;
-
- unsigned long sleep; /* sleep until this time */
- unsigned long timeout; /* max time to wait for irq */
-
- u8 special_flags; /* special action flags */
-
- u8 select; /* basic drive/head select reg value */
- u8 retry_pio; /* retrying dma capable host in pio */
- u8 waiting_for_dma; /* dma currently in progress */
- u8 dma; /* atapi dma flag */
-
- u8 init_speed; /* transfer rate set at boot */
- u8 current_speed; /* current transfer rate set */
- u8 desired_speed; /* desired transfer rate set */
- u8 pio_mode; /* for ->set_pio_mode _only_ */
- u8 dma_mode; /* for ->set_dma_mode _only_ */
- u8 dn; /* now wide spread use */
- u8 acoustic; /* acoustic management */
- u8 media; /* disk, cdrom, tape, floppy, ... */
- u8 ready_stat; /* min status value for drive ready */
- u8 mult_count; /* current multiple sector setting */
- u8 mult_req; /* requested multiple sector setting */
- u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
- u8 bad_wstat; /* used for ignoring ATA_DF */
- u8 head; /* "real" number of heads */
- u8 sect; /* "real" sectors per track */
- u8 bios_head; /* BIOS/fdisk/LILO number of heads */
- u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
-
- /* delay this long before sending packet command */
- u8 pc_delay;
-
- unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
- unsigned int cyl; /* "real" number of cyls */
- void *drive_data; /* used by set_pio_mode/dev_select() */
- unsigned int failures; /* current failure count */
- unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial/native media capacity */
- u64 capacity64; /* total number of sectors */
-
- int lun; /* logical unit */
- int crc_count; /* crc counter to reduce drive speed */
-
- unsigned long debug_mask; /* debugging levels switch */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_drive_link *acpidata;
-#endif
- struct list_head list;
- struct device gendev;
- struct completion gendev_rel_comp; /* to deal with device release() */
-
- /* current packet command */
- struct ide_atapi_pc *pc;
-
- /* last failed packet command */
- struct ide_atapi_pc *failed_pc;
-
- /* callback for packet commands */
- int (*pc_callback)(struct ide_drive_s *, int);
-
- ide_startstop_t (*irq_handler)(struct ide_drive_s *);
-
- unsigned long atapi_flags;
-
- struct ide_atapi_pc request_sense_pc;
-
- /* current sense rq and buffer */
- bool sense_rq_armed;
- bool sense_rq_active;
- struct request *sense_rq;
- struct request_sense sense_data;
-
- /* async sense insertion */
- struct work_struct rq_work;
- struct list_head rq_list;
-};
-
-typedef struct ide_drive_s ide_drive_t;
-
-#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
-
-#define to_ide_drv(obj, cont_type) \
- container_of(obj, struct cont_type, dev)
-
-#define ide_drv_g(disk, cont_type) \
- container_of((disk)->private_data, struct cont_type, driver)
-
-struct ide_port_info;
-
-struct ide_tp_ops {
- void (*exec_command)(struct hwif_s *, u8);
- u8 (*read_status)(struct hwif_s *);
- u8 (*read_altstatus)(struct hwif_s *);
- void (*write_devctl)(struct hwif_s *, u8);
-
- void (*dev_select)(ide_drive_t *);
- void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
- void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
-
- void (*input_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
- void (*output_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
-};
-
-extern const struct ide_tp_ops default_tp_ops;
-
-/**
- * struct ide_port_ops - IDE port operations
- *
- * @init_dev: host specific initialization of a device
- * @set_pio_mode: routine to program host for PIO mode
- * @set_dma_mode: routine to program host for DMA mode
- * @reset_poll: chipset polling based on hba specifics
- * @pre_reset: chipset specific changes to default for device-hba resets
- * @resetproc: routine to reset controller after a disk reset
- * @maskproc: special host masking for drive selection
- * @quirkproc: check host's drive quirk list
- * @clear_irq: clear IRQ
- *
- * @mdma_filter: filter MDMA modes
- * @udma_filter: filter UDMA modes
- *
- * @cable_detect: detect cable type
- */
-struct ide_port_ops {
- void (*init_dev)(ide_drive_t *);
- void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
- void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
- blk_status_t (*reset_poll)(ide_drive_t *);
- void (*pre_reset)(ide_drive_t *);
- void (*resetproc)(ide_drive_t *);
- void (*maskproc)(ide_drive_t *, int);
- void (*quirkproc)(ide_drive_t *);
- void (*clear_irq)(ide_drive_t *);
- int (*test_irq)(struct hwif_s *);
-
- u8 (*mdma_filter)(ide_drive_t *);
- u8 (*udma_filter)(ide_drive_t *);
-
- u8 (*cable_detect)(struct hwif_s *);
-};
-
-struct ide_dma_ops {
- void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
- void (*dma_start)(struct ide_drive_s *);
- int (*dma_end)(struct ide_drive_s *);
- int (*dma_test_irq)(struct ide_drive_s *);
- void (*dma_lost_irq)(struct ide_drive_s *);
- /* below ones are optional */
- int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
- int (*dma_timer_expiry)(struct ide_drive_s *);
- void (*dma_clear)(struct ide_drive_s *);
- /*
- * The following method is optional and only required to be
- * implemented for the SFF-8038i compatible controllers.
- */
- u8 (*dma_sff_read_status)(struct hwif_s *);
-};
-
-enum {
- IDE_PFLAG_PROBING = BIT(0),
-};
-
-struct ide_host;
-
-typedef struct hwif_s {
- struct hwif_s *mate; /* other hwif from same PCI chip */
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
-
- struct ide_host *host;
-
- char name[6]; /* name of interface, eg. "ide0" */
-
- struct ide_io_ports io_ports;
-
- unsigned long sata_scr[SATA_NR_PORTS];
-
- ide_drive_t *devices[MAX_DRIVES + 1];
-
- unsigned long port_flags;
-
- u8 major; /* our major number */
- u8 index; /* 0 for ide0; 1 for ide1; ... */
- u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
-
- u32 host_flags;
-
- u8 pio_mask;
-
- u8 ultra_mask;
- u8 mwdma_mask;
- u8 swdma_mask;
-
- u8 cbl; /* cable type */
-
- hwif_chipset_t chipset; /* sub-module for tuning.. */
-
- struct device *dev;
-
- void (*rw_disk)(ide_drive_t *, struct request *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- /* dma physical region descriptor table (cpu view) */
- unsigned int *dmatable_cpu;
- /* dma physical region descriptor table (dma view) */
- dma_addr_t dmatable_dma;
-
- /* maximum number of PRD table entries */
- int prd_max_nents;
- /* PRD entry size in bytes */
- int prd_ent_size;
-
- /* Scatter-gather list used to build the above */
- struct scatterlist *sg_table;
- int sg_max_nents; /* Maximum number of entries in it */
-
- struct ide_cmd cmd; /* current command */
-
- int rqsize; /* max sectors per request */
- int irq; /* our irq number */
-
- unsigned long dma_base; /* base addr for dma ports */
-
- unsigned long config_data; /* for use by chipset-specific code */
- unsigned long select_data; /* for use by chipset-specific code */
-
- unsigned long extra_base; /* extra addr for dma ports */
- unsigned extra_ports; /* number of extra dma ports */
-
- unsigned present : 1; /* this interface exists */
- unsigned busy : 1; /* serializes devices on a port */
-
- struct device gendev;
- struct device *portdev;
-
- struct completion gendev_rel_comp; /* To deal with device release() */
-
- void *hwif_data; /* extra hwif data */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_hwif_link *acpidata;
-#endif
-
- /* IRQ handler, if active */
- ide_startstop_t (*handler)(ide_drive_t *);
-
- /* BOOL: polling active & poll_timeout field valid */
- unsigned int polling : 1;
-
- /* current drive */
- ide_drive_t *cur_dev;
-
- /* current request */
- struct request *rq;
-
- /* failsafe timer */
- struct timer_list timer;
- /* timeout value during long polls */
- unsigned long poll_timeout;
- /* queried upon timeouts */
- int (*expiry)(ide_drive_t *);
-
- int req_gen;
- int req_gen_timer;
-
- spinlock_t lock;
-} ____cacheline_internodealigned_in_smp ide_hwif_t;
-
-#define MAX_HOST_PORTS 4
-
-struct ide_host {
- ide_hwif_t *ports[MAX_HOST_PORTS + 1];
- unsigned int n_ports;
- struct device *dev[2];
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- irq_handler_t irq_handler;
-
- unsigned long host_flags;
-
- int irq_flags;
-
- void *host_priv;
- ide_hwif_t *cur_port; /* for hosts requiring serialization */
-
- /* used for hosts requiring serialization */
- volatile unsigned long host_busy;
-};
-
-#define IDE_HOST_BUSY 0
-
-/*
- * internal ide interrupt handler type
- */
-typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
-typedef int (ide_expiry_t)(ide_drive_t *);
-
-/* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
-
-extern struct mutex ide_setting_mtx;
-
-/*
- * configurable drive settings
- */
-
-#define DS_SYNC BIT(0)
-
-struct ide_devset {
- int (*get)(ide_drive_t *);
- int (*set)(ide_drive_t *, int);
- unsigned int flags;
-};
-
-#define __DEVSET(_flags, _get, _set) { \
- .flags = _flags, \
- .get = _get, \
- .set = _set, \
-}
-
-#define ide_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return drive->field; \
-}
-
-#define ide_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- drive->field = arg; \
- return 0; \
-}
-
-#define ide_devset_get_flag(name, flag) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return !!(drive->dev_flags & flag); \
-}
-
-#define ide_devset_set_flag(name, flag) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- if (arg) \
- drive->dev_flags |= flag; \
- else \
- drive->dev_flags &= ~flag; \
- return 0; \
-}
-
-#define __IDE_DEVSET(_name, _flags, _get, _set) \
-const struct ide_devset ide_devset_##_name = \
- __DEVSET(_flags, _get, _set)
-
-#define IDE_DEVSET(_name, _flags, _get, _set) \
-static __IDE_DEVSET(_name, _flags, _get, _set)
-
-#define ide_devset_rw(_name, _func) \
-IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_devset_w(_name, _func) \
-IDE_DEVSET(_name, 0, NULL, set_##_func)
-
-#define ide_ext_devset_rw(_name, _func) \
-__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_ext_devset_rw_sync(_name, _func) \
-__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
-
-#define ide_decl_devset(_name) \
-extern const struct ide_devset ide_devset_##_name
-
-ide_decl_devset(io_32bit);
-ide_decl_devset(keepsettings);
-ide_decl_devset(pio_mode);
-ide_decl_devset(unmaskirq);
-ide_decl_devset(using_dma);
-
-#ifdef CONFIG_IDE_PROC_FS
-/*
- * /proc/ide interface
- */
-
-#define ide_devset_rw_field(_name, _field) \
-ide_devset_get(_name, _field); \
-ide_devset_set(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_devset_ro_field(_name, _field) \
-ide_devset_get(_name, _field); \
-IDE_DEVSET(_name, 0, get_##_name, NULL)
-
-#define ide_devset_rw_flag(_name, _field) \
-ide_devset_get_flag(_name, _field); \
-ide_devset_set_flag(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-struct ide_proc_devset {
- const char *name;
- const struct ide_devset *setting;
- int min, max;
- int (*mulf)(ide_drive_t *);
- int (*divf)(ide_drive_t *);
-};
-
-#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
- .name = __stringify(_name), \
- .setting = &ide_devset_##_name, \
- .min = _min, \
- .max = _max, \
- .mulf = _mulf, \
- .divf = _divf, \
-}
-
-#define IDE_PROC_DEVSET(_name, _min, _max) \
-__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
-
-typedef struct {
- const char *name;
- umode_t mode;
- int (*show)(struct seq_file *, void *);
-} ide_proc_entry_t;
-
-void proc_ide_create(void);
-void proc_ide_destroy(void);
-void ide_proc_register_port(ide_hwif_t *);
-void ide_proc_port_register_devices(ide_hwif_t *);
-void ide_proc_unregister_device(ide_drive_t *);
-void ide_proc_unregister_port(ide_hwif_t *);
-void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
-void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
-
-int ide_capacity_proc_show(struct seq_file *m, void *v);
-int ide_geometry_proc_show(struct seq_file *m, void *v);
-#else
-static inline void proc_ide_create(void) { ; }
-static inline void proc_ide_destroy(void) { ; }
-static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
-static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_register_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-static inline void ide_proc_unregister_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-#endif
-
-enum {
- /* enter/exit functions */
- IDE_DBG_FUNC = BIT(0),
- /* sense key/asc handling */
- IDE_DBG_SENSE = BIT(1),
- /* packet commands handling */
- IDE_DBG_PC = BIT(2),
- /* request handling */
- IDE_DBG_RQ = BIT(3),
- /* driver probing/setup */
- IDE_DBG_PROBE = BIT(4),
-};
-
-/* DRV_NAME has to be defined in the driver before using the macro below */
-#define __ide_debug_log(lvl, fmt, args...) \
-{ \
- if (unlikely(drive->debug_mask & lvl)) \
- printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
- __func__, ## args); \
-}
-
-/*
- * Power Management state machine (rq->pm->pm_step).
- *
- * For each step, the core calls ide_start_power_step() first.
- * This can return:
- * - ide_stopped : In this case, the core calls us back again unless
- * step have been set to ide_power_state_completed.
- * - ide_started : In this case, the channel is left busy until an
- * async event (interrupt) occurs.
- * Typically, ide_start_power_step() will issue a taskfile request with
- * do_rw_taskfile().
- *
- * Upon reception of the interrupt, the core will call ide_complete_power_step()
- * with the error code if any. This routine should update the step value
- * and return. It should not start a new request. The core will call
- * ide_start_power_step() for the new step value, unless step have been
- * set to IDE_PM_COMPLETED.
- */
-enum {
- IDE_PM_START_SUSPEND,
- IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
- IDE_PM_STANDBY,
-
- IDE_PM_START_RESUME,
- IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
- IDE_PM_IDLE,
- IDE_PM_RESTORE_DMA,
-
- IDE_PM_COMPLETED,
-};
-
-int generic_ide_suspend(struct device *, pm_message_t);
-int generic_ide_resume(struct device *);
-
-void ide_complete_power_step(ide_drive_t *, struct request *);
-ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
-void ide_complete_pm_rq(ide_drive_t *, struct request *);
-void ide_check_pm_state(ide_drive_t *, struct request *);
-
-/*
- * Subdrivers support.
- *
- * The gendriver.owner field should be set to the module owner of this driver.
- * The gendriver.name field should be set to the name of this driver
- */
-struct ide_driver {
- const char *version;
- ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
- struct device_driver gen_driver;
- int (*probe)(ide_drive_t *);
- void (*remove)(ide_drive_t *);
- void (*resume)(ide_drive_t *);
- void (*shutdown)(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
- ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
- const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
-#endif
-};
-
-#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
-
-int ide_device_get(ide_drive_t *);
-void ide_device_put(ide_drive_t *);
-
-struct ide_ioctl_devset {
- unsigned int get_ioctl;
- unsigned int set_ioctl;
- const struct ide_devset *setting;
-};
-
-int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
- unsigned long, const struct ide_ioctl_devset *);
-
-int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
-
-extern int ide_vlb_clk;
-extern int ide_pci_clk;
-
-int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
-void ide_kill_rq(ide_drive_t *, struct request *);
-void ide_insert_request_head(ide_drive_t *, struct request *);
-
-void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-
-void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
- unsigned int);
-
-void ide_pad_transfer(ide_drive_t *, int, int);
-
-ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
-
-void ide_fix_driveid(u16 *);
-
-extern void ide_fixstring(u8 *, const int, const int);
-
-int ide_busy_sleep(ide_drive_t *, unsigned long, int);
-
-int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
-int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
-ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_do_reset (ide_drive_t *);
-
-extern int ide_devset_execute(ide_drive_t *drive,
- const struct ide_devset *setting, int arg);
-
-void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
-void ide_tf_dump(const char *, struct ide_cmd *);
-
-void ide_exec_command(ide_hwif_t *, u8);
-u8 ide_read_status(ide_hwif_t *);
-u8 ide_read_altstatus(ide_hwif_t *);
-void ide_write_devctl(ide_hwif_t *, u8);
-
-void ide_dev_select(ide_drive_t *);
-void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
-void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
-
-void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-
-void SELECT_MASK(ide_drive_t *, int);
-
-u8 ide_read_error(ide_drive_t *);
-void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
-
-int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
-
-int ide_check_atapi_device(ide_drive_t *, const char *);
-
-void ide_init_pc(struct ide_atapi_pc *);
-
-/* Disk head parking */
-extern wait_queue_head_t ide_park_wq;
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len);
-
-/*
- * Special requests for ide-tape block device strategy routine.
- *
- * In order to service a character device command, we add special requests to
- * the tail of our block device request queue and wait for their completion.
- */
-enum {
- REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */
- REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */
- REQ_IDETAPE_READ = BIT(2),
- REQ_IDETAPE_WRITE = BIT(3),
-};
-
-int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
- void *, unsigned int);
-
-int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
-int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
-int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
-void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *drive);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq);
-int ide_queue_sense_rq(ide_drive_t *drive, void *special);
-
-int ide_cd_expiry(ide_drive_t *);
-
-int ide_cd_get_xferlen(struct request *);
-
-ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
-
-void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
-
-void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
-
-int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
-int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
-
-int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
-
-int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
-
-extern int ide_driveid_update(ide_drive_t *);
-extern int ide_config_drive_speed(ide_drive_t *, u8);
-extern u8 eighty_ninty_three (ide_drive_t *);
-extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
-
-extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
-
-extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
-
-extern void ide_timer_expiry(struct timer_list *t);
-extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
-extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
-
-void ide_init_disk(struct gendisk *, ide_drive_t *);
-
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
-#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
-#else
-#define ide_pci_register_driver(d) pci_register_driver(d)
-#endif
-
-static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
-{
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
- return 1;
- return 0;
-}
-
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
- struct ide_hw *, struct ide_hw **);
-void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-int ide_pci_set_master(struct pci_dev *, const char *);
-unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
-int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
-int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
-#else
-static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EINVAL;
-}
-#endif
-
-struct ide_pci_enablebit {
- u8 reg; /* byte pci reg holding the enable-bit */
- u8 mask; /* mask to isolate the enable-bit */
- u8 val; /* value of masked reg when "enabled" */
-};
-
-enum {
- /* Uses ISA control ports not PCI ones. */
- IDE_HFLAG_ISA_PORTS = BIT(0),
- /* single port device */
- IDE_HFLAG_SINGLE = BIT(1),
- /* don't use legacy PIO blacklist */
- IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2),
- /* set for the second port of QD65xx */
- IDE_HFLAG_QD_2ND_PORT = BIT(3),
- /* use PIO8/9 for prefetch off/on */
- IDE_HFLAG_ABUSE_PREFETCH = BIT(4),
- /* use PIO6/7 for fast-devsel off/on */
- IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5),
- /* use 100-102 and 200-202 PIO values to set DMA modes */
- IDE_HFLAG_ABUSE_DMA_MODES = BIT(6),
- /*
- * keep DMA setting when programming PIO mode, may be used only
- * for hosts which have separate PIO and DMA timings (ie. PMAC)
- */
- IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7),
- /* program host for the transfer mode after programming device */
- IDE_HFLAG_POST_SET_MODE = BIT(8),
- /* don't program host/device for the transfer mode ("smart" hosts) */
- IDE_HFLAG_NO_SET_MODE = BIT(9),
- /* trust BIOS for programming chipset/device for DMA */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10),
- /* host is CS5510/CS5520 */
- IDE_HFLAG_CS5520 = BIT(11),
- /* ATAPI DMA is unsupported */
- IDE_HFLAG_NO_ATAPI_DMA = BIT(12),
- /* set if host is a "non-bootable" controller */
- IDE_HFLAG_NON_BOOTABLE = BIT(13),
- /* host doesn't support DMA */
- IDE_HFLAG_NO_DMA = BIT(14),
- /* check if host is PCI IDE device before allowing DMA */
- IDE_HFLAG_NO_AUTODMA = BIT(15),
- /* host uses MMIO */
- IDE_HFLAG_MMIO = BIT(16),
- /* no LBA48 */
- IDE_HFLAG_NO_LBA48 = BIT(17),
- /* no LBA48 DMA */
- IDE_HFLAG_NO_LBA48_DMA = BIT(18),
- /* data FIFO is cleared by an error */
- IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19),
- /* serialize ports */
- IDE_HFLAG_SERIALIZE = BIT(20),
- /* host is DTC2278 */
- IDE_HFLAG_DTC2278 = BIT(21),
- /* 4 devices on a single set of I/O ports */
- IDE_HFLAG_4DRIVES = BIT(22),
- /* host is TRM290 */
- IDE_HFLAG_TRM290 = BIT(23),
- /* use 32-bit I/O ops */
- IDE_HFLAG_IO_32BIT = BIT(24),
- /* unmask IRQs */
- IDE_HFLAG_UNMASK_IRQS = BIT(25),
- IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26),
- /* serialize ports if DMA is possible (for sl82c105) */
- IDE_HFLAG_SERIALIZE_DMA = BIT(27),
- /* force host out of "simplex" mode */
- IDE_HFLAG_CLEAR_SIMPLEX = BIT(28),
- /* DSC overlap is unsupported */
- IDE_HFLAG_NO_DSC = BIT(29),
- /* never use 32-bit I/O ops */
- IDE_HFLAG_NO_IO_32BIT = BIT(30),
- /* never unmask IRQs */
- IDE_HFLAG_NO_UNMASK_IRQS = BIT(31),
-};
-
-#ifdef CONFIG_BLK_DEV_OFFBOARD
-# define IDE_HFLAG_OFF_BOARD 0
-#else
-# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
-#endif
-
-struct ide_port_info {
- char *name;
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- void (*init_iops)(ide_hwif_t *);
- void (*init_hwif)(ide_hwif_t *);
- int (*init_dma)(ide_hwif_t *,
- const struct ide_port_info *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- struct ide_pci_enablebit enablebits[2];
-
- hwif_chipset_t chipset;
-
- u16 max_sectors; /* if < than the default one */
-
- u32 host_flags;
-
- int irq_flags;
-
- u8 pio_mask;
- u8 swdma_mask;
- u8 mwdma_mask;
- u8 udma_mask;
-};
-
-/*
- * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
- * requests.
- */
-struct ide_pm_state {
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
-
-int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
-int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
- const struct ide_port_info *, void *);
-void ide_pci_remove(struct pci_dev *);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *, pm_message_t);
-int ide_pci_resume(struct pci_dev *);
-#else
-#define ide_pci_suspend NULL
-#define ide_pci_resume NULL
-#endif
-
-void ide_map_sg(ide_drive_t *, struct ide_cmd *);
-void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
-
-#define BAD_DMA_DRIVE 0
-#define GOOD_DMA_DRIVE 1
-
-struct drive_list_entry {
- const char *id_model;
- const char *id_firmware;
-};
-
-int ide_in_drive_list(u16 *, const struct drive_list_entry *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
-int ide_dma_good_drive(ide_drive_t *);
-int __ide_dma_bad_drive(ide_drive_t *);
-
-u8 ide_find_dma_mode(ide_drive_t *, u8);
-
-static inline u8 ide_max_dma_mode(ide_drive_t *drive)
-{
- return ide_find_dma_mode(drive, XFER_UDMA_6);
-}
-
-void ide_dma_off_quietly(ide_drive_t *);
-void ide_dma_off(ide_drive_t *);
-void ide_dma_on(ide_drive_t *);
-int ide_set_dma(ide_drive_t *);
-void ide_check_dma_crc(ide_drive_t *);
-ide_startstop_t ide_dma_intr(ide_drive_t *);
-
-int ide_allocate_dma_engine(ide_hwif_t *);
-void ide_release_dma_engine(ide_hwif_t *);
-
-int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
-void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
-int config_drive_for_dma(ide_drive_t *);
-int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
-void ide_dma_host_set(ide_drive_t *, int);
-int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
-extern void ide_dma_start(ide_drive_t *);
-int ide_dma_end(ide_drive_t *);
-int ide_dma_test_irq(ide_drive_t *);
-int ide_dma_sff_timer_expiry(ide_drive_t *);
-u8 ide_dma_sff_read_status(ide_hwif_t *);
-extern const struct ide_dma_ops sff_dma_ops;
-#else
-static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
-
-void ide_dma_lost_irq(ide_drive_t *);
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
-
-#else
-static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
-static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
-static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
-static inline void ide_dma_off(ide_drive_t *drive) { ; }
-static inline void ide_dma_on(ide_drive_t *drive) { ; }
-static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
-static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
-static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
-static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
-static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
-static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
-static inline int ide_dma_prepare(ide_drive_t *drive,
- struct ide_cmd *cmd) { return 1; }
-static inline void ide_dma_unmap_sg(ide_drive_t *drive,
- struct ide_cmd *cmd) { ; }
-#endif /* CONFIG_BLK_DEV_IDEDMA */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_acpi_init(void);
-bool ide_port_acpi(ide_hwif_t *hwif);
-extern int ide_acpi_exec_tfs(ide_drive_t *drive);
-extern void ide_acpi_get_timing(ide_hwif_t *hwif);
-extern void ide_acpi_push_timing(ide_hwif_t *hwif);
-void ide_acpi_init_port(ide_hwif_t *);
-void ide_acpi_port_init_devices(ide_hwif_t *);
-extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
-#else
-static inline int ide_acpi_init(void) { return 0; }
-static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
-static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
-static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
-#endif
-
-void ide_check_nien_quirk_list(ide_drive_t *);
-void ide_undecoded_slave(ide_drive_t *);
-
-void ide_port_apply_params(ide_hwif_t *);
-int ide_sysfs_register_port(ide_hwif_t *);
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
- unsigned int);
-void ide_host_free(struct ide_host *);
-int ide_host_register(struct ide_host *, const struct ide_port_info *,
- struct ide_hw **);
-int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
- struct ide_host **);
-void ide_host_remove(struct ide_host *);
-int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
-void ide_port_unregister_devices(ide_hwif_t *);
-void ide_port_scan(ide_hwif_t *);
-
-static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
-{
- return hwif->hwif_data;
-}
-
-static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
-{
- hwif->hwif_data = data;
-}
-
-u64 ide_get_lba_addr(struct ide_cmd *, int);
-u8 ide_dump_status(ide_drive_t *, const char *, u8);
-
-struct ide_timing {
- u8 mode;
- u8 setup; /* t1 */
- u16 act8b; /* t2 for 8-bit io */
- u16 rec8b; /* t2i for 8-bit io */
- u16 cyc8b; /* t0 for 8-bit io */
- u16 active; /* t2 or tD */
- u16 recover; /* t2i or tK */
- u16 cycle; /* t0 */
- u16 udma; /* t2CYCTYP/2 */
-};
-
-enum {
- IDE_TIMING_SETUP = BIT(0),
- IDE_TIMING_ACT8B = BIT(1),
- IDE_TIMING_REC8B = BIT(2),
- IDE_TIMING_CYC8B = BIT(3),
- IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
- IDE_TIMING_CYC8B,
- IDE_TIMING_ACTIVE = BIT(4),
- IDE_TIMING_RECOVER = BIT(5),
- IDE_TIMING_CYCLE = BIT(6),
- IDE_TIMING_UDMA = BIT(7),
- IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
- IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
- IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
-};
-
-struct ide_timing *ide_timing_find_mode(u8);
-u16 ide_pio_cycle_time(ide_drive_t *, u8);
-void ide_timing_merge(struct ide_timing *, struct ide_timing *,
- struct ide_timing *, unsigned int);
-int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
-
-#ifdef CONFIG_IDE_XFER_MODE
-int ide_scan_pio_blacklist(char *);
-const char *ide_xfer_verbose(u8);
-int ide_pio_need_iordy(ide_drive_t *, const u8);
-int ide_set_pio_mode(ide_drive_t *, u8);
-int ide_set_dma_mode(ide_drive_t *, u8);
-void ide_set_pio(ide_drive_t *, u8);
-int ide_set_xfer_rate(ide_drive_t *, u8);
-#else
-static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
-static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
-#endif
-
-static inline void ide_set_max_pio(ide_drive_t *drive)
-{
- ide_set_pio(drive, 255);
-}
-
-char *ide_media_string(ide_drive_t *);
-
-extern const struct attribute_group *ide_dev_groups[];
-extern struct bus_type ide_bus_type;
-extern struct class *ide_port_class;
-
-static inline void ide_dump_identify(u8 *id)
-{
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
-}
-
-static inline int hwif_to_node(ide_hwif_t *hwif)
-{
- return hwif->dev ? dev_to_node(hwif->dev) : -1;
-}
-
-static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
-{
- ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
-
- return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
-}
-
-static inline void *ide_get_drivedata(ide_drive_t *drive)
-{
- return drive->drive_data;
-}
-
-static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
-{
- drive->drive_data = data;
-}
-
-#define ide_port_for_each_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
-
-#define ide_port_for_each_present_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
- if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
-
-#define ide_host_for_each_port(i, port, host) \
- for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
-
-
-#endif /* _IDE_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 2967437f1b11..a6730072d13a 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2020 Intel Corporation
+ * Copyright (c) 2018 - 2021 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -2179,6 +2179,8 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0
#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0
+#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01
+
/* 802.11ax HE TX/RX MCS NSS Support */
#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
@@ -2933,6 +2935,7 @@ enum ieee80211_category {
WLAN_CATEGORY_BACK = 3,
WLAN_CATEGORY_PUBLIC = 4,
WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ WLAN_CATEGORY_FAST_BBS_TRANSITION = 6,
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -3110,6 +3113,11 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6)
+/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte
+ * of the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7)
+
/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */
#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index bf5c5f32c65e..b712217f7030 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -48,6 +48,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
+ case ARPHRD_IP6GRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
case ARPHRD_RAWIP:
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 2cc35038a8ca..b651c5e32a28 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -67,10 +67,12 @@ int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
bool br_multicast_router(const struct net_device *dev);
int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb, struct netlink_ext_ack *extack);
+ const void *ctx, bool adding, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
#else
static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
@@ -87,6 +89,13 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
{
return false;
}
+
+static inline bool br_multicast_has_router_adjacent(struct net_device *dev,
+ int proto)
+{
+ return true;
+}
+
static inline bool br_multicast_enabled(const struct net_device *dev)
{
return false;
@@ -95,9 +104,9 @@ static inline bool br_multicast_router(const struct net_device *dev)
{
return false;
}
-static inline int br_mdb_replay(struct net_device *br_dev,
- struct net_device *dev,
- struct notifier_block *nb,
+static inline int br_mdb_replay(const struct net_device *br_dev,
+ const struct net_device *dev, const void *ctx,
+ bool adding, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
@@ -112,7 +121,8 @@ int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo);
int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb, struct netlink_ext_ack *extack);
+ const void *ctx, bool adding, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
@@ -141,8 +151,8 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
}
static inline int br_vlan_replay(struct net_device *br_dev,
- struct net_device *dev,
- struct notifier_block *nb,
+ struct net_device *dev, const void *ctx,
+ bool adding, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
@@ -156,9 +166,9 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev,
void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
u8 br_port_get_stp_state(const struct net_device *dev);
-clock_t br_get_ageing_time(struct net_device *br_dev);
-int br_fdb_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb);
+clock_t br_get_ageing_time(const struct net_device *br_dev);
+int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
+ const void *ctx, bool adding, struct notifier_block *nb);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@@ -183,14 +193,14 @@ static inline u8 br_port_get_stp_state(const struct net_device *dev)
return BR_STATE_DISABLED;
}
-static inline clock_t br_get_ageing_time(struct net_device *br_dev)
+static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
{
return 0;
}
-static inline int br_fdb_replay(struct net_device *br_dev,
- struct net_device *dev,
- struct notifier_block *nb)
+static inline int br_fdb_replay(const struct net_device *br_dev,
+ const struct net_device *dev, const void *ctx,
+ bool adding, struct notifier_block *nb)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index 4efb537f57f3..10e7521ecb6c 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only
- * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_IF_RMNET_H_
@@ -12,10 +12,12 @@ struct rmnet_map_header {
} __aligned(1);
/* rmnet_map_header flags field:
- * PAD_LEN: number of pad bytes following packet data
- * CMD: 1 = packet contains a MAP command; 0 = packet contains data
+ * PAD_LEN: number of pad bytes following packet data
+ * CMD: 1 = packet contains a MAP command; 0 = packet contains data
+ * NEXT_HEADER: 1 = packet contains V5 CSUM header 0 = no V5 CSUM header
*/
#define MAP_PAD_LEN_MASK GENMASK(5, 0)
+#define MAP_NEXT_HEADER_FLAG BIT(6)
#define MAP_CMD_FLAG BIT(7)
struct rmnet_map_dl_csum_trailer {
@@ -23,7 +25,7 @@ struct rmnet_map_dl_csum_trailer {
u8 flags; /* MAP_CSUM_DL_VALID_FLAG */
__be16 csum_start_offset;
__be16 csum_length;
- __be16 csum_value;
+ __sum16 csum_value;
} __aligned(1);
/* rmnet_map_dl_csum_trailer flags field:
@@ -45,4 +47,26 @@ struct rmnet_map_ul_csum_header {
#define MAP_CSUM_UL_UDP_FLAG BIT(14)
#define MAP_CSUM_UL_ENABLED_FLAG BIT(15)
+/* MAP CSUM headers */
+struct rmnet_map_v5_csum_header {
+ u8 header_info;
+ u8 csum_info;
+ __be16 reserved;
+} __aligned(1);
+
+/* v5 header_info field
+ * NEXT_HEADER: represents whether there is any next header
+ * HEADER_TYPE: represents the type of this header
+ *
+ * csum_info field
+ * CSUM_VALID_OR_REQ:
+ * 1 = for UL, checksum computation is requested.
+ * 1 = for DL, validated the checksum and has found it valid
+ */
+
+#define MAPV5_HDRINFO_NXT_HDR_FLAG BIT(0)
+#define MAPV5_HDRINFO_HDR_TYPE_FMASK GENMASK(7, 1)
+#define MAPV5_CSUMINFO_VALID_FLAG BIT(7)
+
+#define RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD 2
#endif /* !(_LINUX_IF_RMNET_H_) */
diff --git a/include/linux/init.h b/include/linux/init.h
index 045ad1650ed1..d82b4b2e1d25 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -242,7 +242,8 @@ extern bool initcall_debug;
asm(".section \"" __sec "\", \"a\" \n" \
__stringify(__name) ": \n" \
".long " __stringify(__stub) " - . \n" \
- ".previous \n");
+ ".previous \n"); \
+ static_assert(__same_type(initcall_t, &fn));
#else
#define ____define_initcall(fn, __unused, __name, __sec) \
static initcall_t __name __used \
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
index 93e2ad67fc10..fa2cd8c63dcc 100644
--- a/include/linux/instrumentation.h
+++ b/include/linux/instrumentation.h
@@ -4,13 +4,16 @@
#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION)
+#include <linux/stringify.h>
+
/* Begin/end of an instrumentation safe region */
-#define instrumentation_begin() ({ \
- asm volatile("%c0: nop\n\t" \
+#define __instrumentation_begin(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_begin\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t"); \
})
+#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
/*
* Because instrumentation_{begin,end}() can nest, objtool validation considers
@@ -43,12 +46,13 @@
* To avoid this, have _end() be a NOP instruction, this ensures it will be
* part of the condition block and does not escape.
*/
-#define instrumentation_end() ({ \
- asm volatile("%c0: nop\n\t" \
+#define __instrumentation_end(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_end\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t"); \
})
+#define instrumentation_end() __instrumentation_end(__COUNTER__)
#else
# define instrumentation_begin() do { } while(0)
# define instrumentation_end() do { } while(0)
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 2271939c5c31..2ea0f2f65ab6 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -13,6 +13,7 @@ enum integrity_status {
INTEGRITY_PASS = 0,
INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
+ INTEGRITY_FAIL_IMMUTABLE,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
INTEGRITY_UNKNOWN,
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index 0d6b4bc191c5..25e2b4e80502 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -8,11 +8,17 @@
#ifndef _INTEL_ISH_CLIENT_IF_H_
#define _INTEL_ISH_CLIENT_IF_H_
+#include <linux/device.h>
+#include <linux/uuid.h>
+
struct ishtp_cl_device;
struct ishtp_device;
struct ishtp_cl;
struct ishtp_fw_client;
+typedef __printf(2, 3) void (*ishtp_print_log)(struct ishtp_device *dev,
+ const char *format, ...);
+
/* Client state */
enum cl_state {
ISHTP_CL_INITIALIZING = 0,
@@ -36,7 +42,7 @@ struct ishtp_cl_driver {
const char *name;
const guid_t *guid;
int (*probe)(struct ishtp_cl_device *dev);
- int (*remove)(struct ishtp_cl_device *dev);
+ void (*remove)(struct ishtp_cl_device *dev);
int (*reset)(struct ishtp_cl_device *dev);
const struct dev_pm_ops *pm;
};
@@ -76,7 +82,7 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device,
/* Get the device * from ishtp device instance */
struct device *ishtp_device(struct ishtp_cl_device *cl_device);
/* Trace interface for clients */
-void *ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
/* Get device pointer of PCI device for DMA acces */
struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 4777850a6dc7..2ed65b01c961 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -64,6 +64,8 @@
* IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
* Users will enable it explicitly by enable_irq() or enable_nmi()
* later.
+ * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
+ * depends on IRQF_PERCPU.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -78,6 +80,7 @@
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_NO_AUTOEN 0x00080000
+#define IRQF_NO_DEBUG 0x00100000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -319,39 +322,8 @@ struct irq_affinity_desc {
extern cpumask_var_t irq_default_affinity;
-/* Internal implementation. Use the helpers below */
-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
- bool force);
-
-/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
- *
- * Fails if cpumask does not contain an online CPU
- */
-static inline int
-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
-{
- return __irq_set_affinity(irq, cpumask, false);
-}
-
-/**
- * irq_force_affinity - Force the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
- *
- * Same as irq_set_affinity, but without checking the mask against
- * online cpus.
- *
- * Solely for low level cpu hotplug code, where we need to make per
- * cpu interrupts affine before the cpu becomes online.
- */
-static inline int
-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
-{
- return __irq_set_affinity(irq, cpumask, true);
-}
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index c87d0cb0de6d..479c1da3e221 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -159,7 +159,6 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
-int iomap_set_page_dirty(struct page *page);
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count);
int iomap_releasepage(struct page *page, gfp_t gfp_mask);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 31b347c9f8dd..8e9a9ae471a6 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -72,6 +72,7 @@ enum irqchip_irq_state;
* mechanism and from core side polling.
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
* IRQ_HIDDEN - Don't show up in /proc/interrupts
+ * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -99,6 +100,7 @@ enum {
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
IRQ_HIDDEN = (1 << 20),
+ IRQ_NO_DEBUG = (1 << 21),
};
#define IRQF_MODIFY_MASK \
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index fa8c0455c352..1177f3a1aed5 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -7,8 +7,7 @@
#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
-#include <linux/types.h>
-#include <linux/ioport.h>
+#include <linux/irqchip/arm-vgic-info.h>
#define GICD_INT_DEF_PRI 0xa0
#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
@@ -16,28 +15,6 @@
(GICD_INT_DEF_PRI << 8) |\
GICD_INT_DEF_PRI)
-enum gic_type {
- GIC_V2,
- GIC_V3,
-};
-
-struct gic_kvm_info {
- /* GIC type */
- enum gic_type type;
- /* Virtual CPU interface */
- struct resource vcpu;
- /* Interrupt number */
- unsigned int maint_irq;
- /* Virtual control interface */
- struct resource vctrl;
- /* vlpi support */
- bool has_v4;
- /* rvpeid support */
- bool has_v4_1;
-};
-
-const struct gic_kvm_info *gic_get_kvm_info(void);
-
struct irq_domain;
struct fwnode_handle;
int gicv2m_init(struct fwnode_handle *parent_handle,
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
new file mode 100644
index 000000000000..a75b2c7de69d
--- /dev/null
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/linux/irqchip/arm-vgic-info.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+#define __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+ /* Full GICv2 */
+ GIC_V2,
+ /* Full GICv3, optionally with v2 compat */
+ GIC_V3,
+};
+
+struct gic_kvm_info {
+ /* GIC type */
+ enum gic_type type;
+ /* Virtual CPU interface */
+ struct resource vcpu;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* No interrupt mask, no need to use the above field */
+ bool no_maint_irq_mask;
+ /* Virtual control interface */
+ struct resource vctrl;
+ /* vlpi support */
+ bool has_v4;
+ /* rvpeid support */
+ bool has_v4_1;
+ /* Deactivation impared, subpar stuff */
+ bool no_hw_deactivation;
+};
+
+#ifdef CONFIG_KVM
+void vgic_set_kvm_info(const struct gic_kvm_info *info);
+#else
+static inline void vgic_set_kvm_info(const struct gic_kvm_info *info) {}
+#endif
+
+#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index df4651250785..59aea39785bf 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -158,25 +158,21 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc)
desc->handle_irq(desc);
}
+int handle_irq_desc(struct irq_desc *desc);
int generic_handle_irq(unsigned int irq);
-#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+#ifdef CONFIG_IRQ_DOMAIN
/*
* Convert a HW interrupt number to a logical one using a IRQ domain,
* and handle the result interrupt number. Return -EINVAL if
- * conversion failed. Providing a NULL domain indicates that the
- * conversion has already been done.
+ * conversion failed.
*/
-int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
- bool lookup, struct pt_regs *regs);
+int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
-static inline int handle_domain_irq(struct irq_domain *domain,
- unsigned int hwirq, struct pt_regs *regs)
-{
- return __handle_domain_irq(domain, hwirq, true, regs);
-}
+#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+int handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs);
-#ifdef CONFIG_IRQ_DOMAIN
int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
struct pt_regs *regs);
#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 62a8e3d23829..23e4ee523576 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -41,13 +41,11 @@ struct fwnode_handle;
struct irq_domain;
struct irq_chip;
struct irq_data;
+struct irq_desc;
struct cpumask;
struct seq_file;
struct irq_affinity_desc;
-/* Number of irqs reserved for a legacy isa controller */
-#define NUM_ISA_INTERRUPTS 16
-
#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
/**
@@ -152,11 +150,10 @@ struct irq_domain_chip_generic;
* @parent: Pointer to parent irq_domain to support hierarchy irq_domains
*
* Revmap data, used internally by irq_domain
- * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
- * support direct mapping
- * @revmap_size: Size of the linear map table @linear_revmap[]
+ * @revmap_size: Size of the linear map table @revmap[]
* @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
- * @linear_revmap: Linear table of hwirq->virq reverse mappings
+ * @revmap_mutex: Lock for the revmap
+ * @revmap: Linear table of irq_data pointers
*/
struct irq_domain {
struct list_head link;
@@ -176,11 +173,10 @@ struct irq_domain {
/* reverse map data. The linear map gets appended to the irq_domain */
irq_hw_number_t hwirq_max;
- unsigned int revmap_direct_max_irq;
unsigned int revmap_size;
struct radix_tree_root revmap_tree;
- struct mutex revmap_tree_mutex;
- unsigned int linear_revmap[];
+ struct mutex revmap_mutex;
+ struct irq_data __rcu *revmap[];
};
/* Irq domain flags */
@@ -210,6 +206,9 @@ enum {
*/
IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6),
+ /* Irq domain doesn't translate anything */
+ IRQ_DOMAIN_FLAG_NO_MAP = (1 << 7),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@@ -348,6 +347,8 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no
{
return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
}
+
+#ifdef CONFIG_IRQ_DOMAIN_NOMAP
static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
const struct irq_domain_ops *ops,
@@ -355,14 +356,10 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod
{
return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
}
-static inline struct irq_domain *irq_domain_add_legacy_isa(
- struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
- host_data);
-}
+
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+#endif
+
static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
@@ -405,25 +402,37 @@ static inline unsigned int irq_create_mapping(struct irq_domain *host,
return irq_create_mapping_affinity(host, hwirq, NULL);
}
+extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ unsigned int *irq);
+
+static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ return __irq_resolve_mapping(domain, hwirq, NULL);
+}
/**
- * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
- *
- * This is a fast path alternative to irq_find_mapping() that can be
- * called directly by irq controller code to save a handful of
- * instructions. It is always safe to call, but won't find irqs mapped
- * using the radix tree.
*/
+static inline unsigned int irq_find_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ unsigned int irq;
+
+ if (__irq_resolve_mapping(domain, hwirq, &irq))
+ return irq;
+
+ return 0;
+}
+
static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
- return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0;
+ return irq_find_mapping(domain, hwirq);
}
-extern unsigned int irq_find_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
-extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
extern const struct irq_domain_ops irq_domain_simple_ops;
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 05f5554d860f..48b9b2a82767 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -171,9 +171,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry)
return (unsigned long)entry->key & 2UL;
}
-static inline void jump_entry_set_init(struct jump_entry *entry)
+static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
{
- entry->key |= 2;
+ if (set)
+ entry->key |= 2;
+ else
+ entry->key &= ~2;
+}
+
+static inline int jump_entry_size(struct jump_entry *entry)
+{
+#ifdef JUMP_LABEL_NOP_SIZE
+ return JUMP_LABEL_NOP_SIZE;
+#else
+ return arch_jump_entry_size(entry);
+#endif
}
#endif
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b1678a61e6a7..5310e217bd74 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H
+#include <linux/bug.h>
#include <linux/static_key.h>
#include <linux/types.h>
@@ -17,7 +18,6 @@ struct task_struct;
/* kasan_data struct is used in KUnit tests for KASAN expected failures */
struct kunit_kasan_expectation {
- bool report_expected;
bool report_found;
};
@@ -41,9 +41,9 @@ struct kunit_kasan_expectation {
#endif
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
-extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
-extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
+extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
+extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
@@ -79,14 +79,6 @@ static inline void kasan_disable_current(void) {}
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-#ifdef CONFIG_KASAN
-
-struct kasan_cache {
- int alloc_meta_offset;
- int free_meta_offset;
- bool is_kmalloc;
-};
-
#ifdef CONFIG_KASAN_HW_TAGS
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
@@ -101,11 +93,14 @@ static inline bool kasan_has_integrated_init(void)
return kasan_enabled();
}
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
+void kasan_free_pages(struct page *page, unsigned int order);
+
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_enabled(void)
{
- return true;
+ return IS_ENABLED(CONFIG_KASAN);
}
static inline bool kasan_has_integrated_init(void)
@@ -113,8 +108,30 @@ static inline bool kasan_has_integrated_init(void)
return false;
}
+static __always_inline void kasan_alloc_pages(struct page *page,
+ unsigned int order, gfp_t flags)
+{
+ /* Only available for integrated init. */
+ BUILD_BUG();
+}
+
+static __always_inline void kasan_free_pages(struct page *page,
+ unsigned int order)
+{
+ /* Only available for integrated init. */
+ BUILD_BUG();
+}
+
#endif /* CONFIG_KASAN_HW_TAGS */
+#ifdef CONFIG_KASAN
+
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+ bool is_kmalloc;
+};
+
slab_flags_t __kasan_never_merge(void);
static __always_inline slab_flags_t kasan_never_merge(void)
{
@@ -130,20 +147,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
__kasan_unpoison_range(addr, size);
}
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_alloc_pages(struct page *page,
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_alloc_pages(page, order, init);
+ __kasan_poison_pages(page, order, init);
}
-void __kasan_free_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_free_pages(struct page *page,
- unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_unpoison_pages(struct page *page,
+ unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_free_pages(page, order, init);
+ __kasan_unpoison_pages(page, order, init);
}
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -285,21 +302,15 @@ void kasan_restore_multi_shot(bool enabled);
#else /* CONFIG_KASAN */
-static inline bool kasan_enabled(void)
-{
- return false;
-}
-static inline bool kasan_has_integrated_init(void)
-{
- return false;
-}
static inline slab_flags_t kasan_never_merge(void)
{
return 0;
}
static inline void kasan_unpoison_range(const void *address, size_t size) {}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+ bool init) {}
+static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
+ bool init) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 15d8bad3d2f2..f2ad8a53f71f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -71,6 +71,18 @@
*/
#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
+/**
+ * upper_16_bits - return bits 16-31 of a number
+ * @n: the number we're accessing
+ */
+#define upper_16_bits(n) ((u16)((n) >> 16))
+
+/**
+ * lower_16_bits - return bits 0-15 of a number
+ * @n: the number we're accessing
+ */
+#define lower_16_bits(n) ((u16)((n) & 0xffff))
+
struct completion;
struct pt_regs;
struct user;
@@ -357,6 +369,8 @@ int sscanf(const char *, const char *, ...);
extern __scanf(2, 0)
int vsscanf(const char *, const char *, va_list);
+extern int no_hash_pointers_enable(char *str);
+
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1883a4a9f16a..523ffc7bc3a8 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -54,8 +54,6 @@ struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
-typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
- int trapnr);
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
struct pt_regs *);
@@ -83,12 +81,6 @@ struct kprobe {
/* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler;
- /*
- * ... called if executing addr causes a fault (eg. page fault).
- * Return 1 if it handled fault, otherwise kernel will see it.
- */
- kprobe_fault_handler_t fault_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 2484ed97e72f..346b0f269161 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -18,7 +18,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @arg...: arguments for @namefmt.
+ * @arg: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
@@ -33,6 +33,8 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
+void set_kthread_struct(struct task_struct *p);
+
void kthread_set_per_cpu(struct task_struct *k, int cpu);
bool kthread_is_per_cpu(struct task_struct *k);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2f34487e21f2..ae7735b490b4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/sched/stat.h>
#include <linux/bug.h>
#include <linux/minmax.h>
#include <linux/mm.h>
@@ -27,6 +28,7 @@
#include <linux/rcuwait.h>
#include <linux/refcount.h>
#include <linux/nospec.h>
+#include <linux/notifier.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -146,7 +148,7 @@ static inline bool is_error_page(struct page *page)
*/
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER 2
+#define KVM_REQ_UNBLOCK 2
#define KVM_REQ_UNHALT 3
#define KVM_REQUEST_ARCH_BASE 8
@@ -265,6 +267,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
return !!map->hva;
}
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+ return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -298,7 +305,6 @@ struct kvm_vcpu {
struct pid __rcu *pid;
int sigset_active;
sigset_t sigset;
- struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
bool valid_wakeup;
@@ -335,6 +341,8 @@ struct kvm_vcpu {
bool preempted;
bool ready;
struct kvm_vcpu_arch arch;
+ struct kvm_vcpu_stat stat;
+ char stats_id[KVM_STATS_NAME_SIZE];
struct kvm_dirty_ring dirty_ring;
};
@@ -517,6 +525,15 @@ struct kvm {
#endif /* KVM_HAVE_MMU_RWLOCK */
struct mutex slots_lock;
+
+ /*
+ * Protects the arch-specific fields of struct kvm_memory_slots in
+ * use by the VM. To be used under the slots_lock (above) or in a
+ * kvm->srcu critical section where acquiring the slots_lock would
+ * lead to deadlock with the synchronize_srcu in
+ * install_new_memslots.
+ */
+ struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
@@ -579,6 +596,11 @@ struct kvm {
pid_t userspace_pid;
unsigned int max_halt_poll_ns;
u32 dirty_ring_size;
+
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+ struct notifier_block pm_notifier;
+#endif
+ char stats_id[KVM_STATS_NAME_SIZE];
};
#define kvm_err(fmt, ...) \
@@ -992,6 +1014,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
+#endif
+
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
#endif
@@ -1179,7 +1205,15 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ /*
+ * The index was checked originally in search_memslots. To avoid
+ * that a malicious guest builds a Spectre gadget out of e.g. page
+ * table walks, do not let the processor speculate loads outside
+ * the guest's registered memslots.
+ */
+ unsigned long offset = gfn - slot->base_gfn;
+ offset = array_index_nospec(offset, slot->npages);
+ return slot->userspace_addr + offset * PAGE_SIZE;
}
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
@@ -1230,26 +1264,104 @@ enum kvm_stat_kind {
struct kvm_stat_data {
struct kvm *kvm;
- struct kvm_stats_debugfs_item *dbgfs_item;
-};
-
-struct kvm_stats_debugfs_item {
- const char *name;
- int offset;
+ const struct _kvm_stats_desc *desc;
enum kvm_stat_kind kind;
- int mode;
};
-#define KVM_DBGFS_GET_MODE(dbgfs_item) \
- ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
+struct _kvm_stats_desc {
+ struct kvm_stats_desc desc;
+ char name[KVM_STATS_NAME_SIZE];
+};
-#define VM_STAT(n, x, ...) \
- { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ }
-#define VCPU_STAT(n, x, ...) \
- { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ }
+#define STATS_DESC_COMMON(type, unit, base, exp) \
+ .flags = type | unit | base | \
+ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
+ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
+ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
+ .exponent = exp, \
+ .size = 1
+
+#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp), \
+ .offset = offsetof(struct kvm_vm_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp), \
+ .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VM_STATS_DESC(stat, type, unit, base, exp) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp), \
+ .offset = offsetof(struct kvm_vm_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_STATS_DESC(stat, type, unit, base, exp) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp), \
+ .offset = offsetof(struct kvm_vcpu_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
+#define STATS_DESC(SCOPE, stat, type, unit, base, exp) \
+ SCOPE##_STATS_DESC(stat, type, unit, base, exp)
+
+#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, unit, base, exponent)
+#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, unit, base, exponent)
+#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, unit, base, exponent)
+
+/* Cumulative counter, read/write */
+#define STATS_DESC_COUNTER(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Instantaneous counter, read only */
+#define STATS_DESC_ICOUNTER(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak counter, read/write */
+#define STATS_DESC_PCOUNTER(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Cumulative time in nanosecond */
+#define STATS_DESC_TIME_NSEC(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9)
+
+#define KVM_GENERIC_VM_STATS() \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush)
+
+#define KVM_GENERIC_VCPU_STATS() \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns)
-extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
+ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
+ const struct _kvm_stats_desc *desc,
+ void *stats, size_t size_stats,
+ char __user *user_buffer, size_t size, loff_t *offset);
+extern const struct kvm_stats_header kvm_vm_stats_header;
+extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
+extern const struct kvm_stats_header kvm_vcpu_stats_header;
+extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index a7580f69dda0..ed6a985c5680 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -76,5 +76,19 @@ struct kvm_mmu_memory_cache {
};
#endif
+struct kvm_vm_stat_generic {
+ u64 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat_generic {
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+};
+
+#define KVM_STATS_NAME_SIZE 48
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5f550eb27f81..3fcd24236793 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1397,25 +1397,28 @@ extern struct device_attribute *ata_common_sdev_attrs[];
ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
.dma_need_drain = ata_scsi_dma_need_drain, \
- .can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
- .slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity
-#define ATA_BASE_SHT(drv_name) \
+#define ATA_SUBBASE_SHT(drv_name) \
__ATA_BASE_SHT(drv_name), \
+ .can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
+#define ATA_BASE_SHT(drv_name) \
+ ATA_SUBBASE_SHT(drv_name), \
.sdev_attrs = ata_common_sdev_attrs
#ifdef CONFIG_SATA_HOST
extern struct device_attribute *ata_ncq_sdev_attrs[];
#define ATA_NCQ_SHT(drv_name) \
- __ATA_BASE_SHT(drv_name), \
+ ATA_SUBBASE_SHT(drv_name), \
.sdev_attrs = ata_ncq_sdev_attrs, \
.change_queue_depth = ata_scsi_change_queue_depth
#endif
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 2ec9ff5a7fff..3e726ace5c62 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -52,7 +52,7 @@ enum lockdep_lock_type {
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map
*
- * Currently main class (subclass == 0) and signle depth subclass
+ * Currently main class (subclass == 0) and single depth subclass
* are cached in lockdep_map. This optimization is mainly targeting
* on rq->lock. double_rq_lock() acquires this highly competitive with
* single depth.
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index cd23355d2271..17d02eda9538 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -48,13 +48,13 @@ struct lsm_ioctlop_audit {
};
struct lsm_ibpkey_audit {
- u64 subnet_prefix;
- u16 pkey;
+ u64 subnet_prefix;
+ u16 pkey;
};
struct lsm_ibendport_audit {
- char dev_name[IB_DEVICE_NAME_MAX];
- u8 port;
+ const char *dev_name;
+ u8 port;
};
/* Auxiliary data to use in generating the audit record. */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 04c01794de83..2adeea44c0d5 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -358,8 +358,7 @@ LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x)
LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x)
-LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid,
- u8 dir)
+LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid)
LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x,
struct xfrm_policy *xp, const struct flowi_common *flic)
LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index d5a983d65f05..44365aab043c 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -65,14 +65,10 @@ enum cmdq_code {
CMDQ_CODE_LOGIC = 0xa0,
};
-enum cmdq_cb_status {
- CMDQ_CB_NORMAL = 0,
- CMDQ_CB_ERROR
-};
-
struct cmdq_cb_data {
- enum cmdq_cb_status sta;
+ int sta;
void *data;
+ struct cmdq_pkt *pkt;
};
typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 66deb1fdc2ef..2928f03d6d46 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -3,6 +3,7 @@
#define _LINUX_MATH64_H
#include <linux/types.h>
+#include <linux/math.h>
#include <vdso/math64.h>
#include <asm/div64.h>
@@ -234,6 +235,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
#endif
+#ifndef mul_s64_u64_shr
+static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
+{
+ u64 ret;
+
+ /*
+ * Extract the sign before the multiplication and put it back
+ * afterwards if needed.
+ */
+ ret = mul_u64_u64_shr(abs(a), b, shift);
+
+ if (a < 0)
+ ret = -((s64) ret);
+
+ return ret;
+}
+#endif /* mul_s64_u64_shr */
+
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 5984fff3f175..552309342c38 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -50,7 +50,7 @@ struct memblock_region {
phys_addr_t base;
phys_addr_t size;
enum memblock_flags flags;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int nid;
#endif
};
@@ -347,7 +347,7 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
r->nid = nid;
@@ -366,7 +366,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
{
return 0;
}
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index c193be760709..6d66037be646 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -192,7 +192,7 @@ enum memcg_kmem_state {
struct memcg_padding {
char x[0];
} ____cacheline_internodealigned_in_smp;
-#define MEMCG_PADDING(name) struct memcg_padding name;
+#define MEMCG_PADDING(name) struct memcg_padding name
#else
#define MEMCG_PADDING(name)
#endif
@@ -349,8 +349,7 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
- struct mem_cgroup_per_node *nodeinfo[0];
- /* WARNING: nodeinfo must be the last member here */
+ struct mem_cgroup_per_node *nodeinfo[];
};
/*
@@ -743,35 +742,18 @@ out:
/**
* mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
* @page: the page
- * @pgdat: pgdat of the page
*
* This function relies on page->mem_cgroup being stable.
*/
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct pglist_data *pgdat)
+static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
{
+ pg_data_t *pgdat = page_pgdat(page);
struct mem_cgroup *memcg = page_memcg(page);
VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
return mem_cgroup_lruvec(memcg, pgdat);
}
-static inline bool lruvec_holds_page_lru_lock(struct page *page,
- struct lruvec *lruvec)
-{
- pg_data_t *pgdat = page_pgdat(page);
- const struct mem_cgroup *memcg;
- struct mem_cgroup_per_node *mz;
-
- if (mem_cgroup_disabled())
- return lruvec == &pgdat->__lruvec;
-
- mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- memcg = page_memcg(page) ? : root_mem_cgroup;
-
- return lruvec->pgdat == pgdat && mz->memcg == memcg;
-}
-
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
@@ -1221,18 +1203,11 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
return &pgdat->__lruvec;
}
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct pglist_data *pgdat)
-{
- return &pgdat->__lruvec;
-}
-
-static inline bool lruvec_holds_page_lru_lock(struct page *page,
- struct lruvec *lruvec)
+static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
{
pg_data_t *pgdat = page_pgdat(page);
- return lruvec == &pgdat->__lruvec;
+ return &pgdat->__lruvec;
}
static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
@@ -1255,6 +1230,12 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
+{
+ return NULL;
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1516,12 +1497,19 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
+/* Test requires a stable page->memcg binding, see page_memcg() */
+static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
+{
+ return lruvec_pgdat(lruvec) == page_pgdat(page) &&
+ lruvec_memcg(lruvec) == page_memcg(page);
+}
+
/* Don't lock again iff page's lruvec locked */
static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
struct lruvec *locked_lruvec)
{
if (locked_lruvec) {
- if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+ if (page_matches_lruvec(page, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irq(locked_lruvec);
@@ -1535,7 +1523,7 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
struct lruvec *locked_lruvec, unsigned long *flags)
{
if (locked_lruvec) {
- if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+ if (page_matches_lruvec(page, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 601cbbc10370..32e3470708ed 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -31,7 +31,7 @@ struct pinctrl_map;
* @irq_flags: Mode for primary IRQ (defaults to active low)
* @gpio_base: Base GPIO number
* @gpio_configs: Array of GPIO configurations (See
- * Documentation/driver-api/pinctl.rst)
+ * Documentation/driver-api/pin-control.rst)
* @n_gpio_configs: Number of entries in gpio_configs
* @gpsw: General purpose switch mode setting. Depends on the external
* hardware connected to the switch. (See the SW1_MODE field
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
index c5a11b7458d4..68578e2019b0 100644
--- a/include/linux/mfd/mt6358/core.h
+++ b/include/linux/mfd/mt6358/core.h
@@ -6,12 +6,9 @@
#ifndef __MFD_MT6358_CORE_H__
#define __MFD_MT6358_CORE_H__
-#define MT6358_REG_WIDTH 16
-
struct irq_top_t {
int hwirq_base;
unsigned int num_int_regs;
- unsigned int num_int_bits;
unsigned int en_reg;
unsigned int en_reg_shift;
unsigned int sta_reg;
@@ -25,6 +22,7 @@ struct pmic_irq_data {
unsigned short top_int_status_reg;
bool *enable_hwirq;
bool *cache_hwirq;
+ const struct irq_top_t *pmic_ints;
};
enum mt6358_irq_top_status_shift {
@@ -146,8 +144,8 @@ enum mt6358_irq_numbers {
{ \
.hwirq_base = MT6358_IRQ_##sp##_BASE, \
.num_int_regs = \
- ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \
- .num_int_bits = MT6358_IRQ_##sp##_BITS, \
+ ((MT6358_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
.en_reg = MT6358_##sp##_TOP_INT_CON0, \
.en_reg_shift = 0x6, \
.sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h
new file mode 100644
index 000000000000..8d298868126d
--- /dev/null
+++ b/include/linux/mfd/mt6359/core.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_CORE_H__
+#define __MFD_MT6359_CORE_H__
+
+enum mt6359_irq_top_status_shift {
+ MT6359_BUCK_TOP = 0,
+ MT6359_LDO_TOP,
+ MT6359_PSC_TOP,
+ MT6359_SCK_TOP,
+ MT6359_BM_TOP,
+ MT6359_HK_TOP,
+ MT6359_AUD_TOP = 7,
+ MT6359_MISC_TOP,
+};
+
+enum mt6359_irq_numbers {
+ MT6359_IRQ_VCORE_OC = 1,
+ MT6359_IRQ_VGPU11_OC,
+ MT6359_IRQ_VGPU12_OC,
+ MT6359_IRQ_VMODEM_OC,
+ MT6359_IRQ_VPROC1_OC,
+ MT6359_IRQ_VPROC2_OC,
+ MT6359_IRQ_VS1_OC,
+ MT6359_IRQ_VS2_OC,
+ MT6359_IRQ_VPA_OC = 9,
+ MT6359_IRQ_VFE28_OC = 16,
+ MT6359_IRQ_VXO22_OC,
+ MT6359_IRQ_VRF18_OC,
+ MT6359_IRQ_VRF12_OC,
+ MT6359_IRQ_VEFUSE_OC,
+ MT6359_IRQ_VCN33_1_OC,
+ MT6359_IRQ_VCN33_2_OC,
+ MT6359_IRQ_VCN13_OC,
+ MT6359_IRQ_VCN18_OC,
+ MT6359_IRQ_VA09_OC,
+ MT6359_IRQ_VCAMIO_OC,
+ MT6359_IRQ_VA12_OC,
+ MT6359_IRQ_VAUX18_OC,
+ MT6359_IRQ_VAUD18_OC,
+ MT6359_IRQ_VIO18_OC,
+ MT6359_IRQ_VSRAM_PROC1_OC,
+ MT6359_IRQ_VSRAM_PROC2_OC,
+ MT6359_IRQ_VSRAM_OTHERS_OC,
+ MT6359_IRQ_VSRAM_MD_OC,
+ MT6359_IRQ_VEMC_OC,
+ MT6359_IRQ_VSIM1_OC,
+ MT6359_IRQ_VSIM2_OC,
+ MT6359_IRQ_VUSB_OC,
+ MT6359_IRQ_VRFCK_OC,
+ MT6359_IRQ_VBBCK_OC,
+ MT6359_IRQ_VBIF28_OC,
+ MT6359_IRQ_VIBR_OC,
+ MT6359_IRQ_VIO28_OC,
+ MT6359_IRQ_VM18_OC,
+ MT6359_IRQ_VUFS_OC = 45,
+ MT6359_IRQ_PWRKEY = 48,
+ MT6359_IRQ_HOMEKEY,
+ MT6359_IRQ_PWRKEY_R,
+ MT6359_IRQ_HOMEKEY_R,
+ MT6359_IRQ_NI_LBAT_INT,
+ MT6359_IRQ_CHRDET_EDGE = 53,
+ MT6359_IRQ_RTC = 64,
+ MT6359_IRQ_FG_BAT_H = 80,
+ MT6359_IRQ_FG_BAT_L,
+ MT6359_IRQ_FG_CUR_H,
+ MT6359_IRQ_FG_CUR_L,
+ MT6359_IRQ_FG_ZCV = 84,
+ MT6359_IRQ_FG_N_CHARGE_L = 87,
+ MT6359_IRQ_FG_IAVG_H,
+ MT6359_IRQ_FG_IAVG_L = 89,
+ MT6359_IRQ_FG_DISCHARGE = 91,
+ MT6359_IRQ_FG_CHARGE,
+ MT6359_IRQ_BATON_LV = 96,
+ MT6359_IRQ_BATON_BAT_IN = 98,
+ MT6359_IRQ_BATON_BAT_OU,
+ MT6359_IRQ_BIF = 100,
+ MT6359_IRQ_BAT_H = 112,
+ MT6359_IRQ_BAT_L,
+ MT6359_IRQ_BAT2_H,
+ MT6359_IRQ_BAT2_L,
+ MT6359_IRQ_BAT_TEMP_H,
+ MT6359_IRQ_BAT_TEMP_L,
+ MT6359_IRQ_THR_H,
+ MT6359_IRQ_THR_L,
+ MT6359_IRQ_AUXADC_IMP,
+ MT6359_IRQ_NAG_C_DLTV = 121,
+ MT6359_IRQ_AUDIO = 128,
+ MT6359_IRQ_ACCDET = 133,
+ MT6359_IRQ_ACCDET_EINT0,
+ MT6359_IRQ_ACCDET_EINT1,
+ MT6359_IRQ_SPI_CMD_ALERT = 144,
+ MT6359_IRQ_NR,
+};
+
+#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC
+#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC
+#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY
+#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC
+#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H
+#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H
+#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO
+#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT
+
+#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1)
+#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1)
+#define MT6359_IRQ_PSC_BITS \
+ (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1)
+#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1)
+#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1)
+#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1)
+#define MT6359_IRQ_AUD_BITS \
+ (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1)
+#define MT6359_IRQ_MISC_BITS \
+ (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1)
+
+#define MT6359_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6359_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6359_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6359_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6359_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6359_CORE_H__ */
diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h
new file mode 100644
index 000000000000..2135c9695918
--- /dev/null
+++ b/include/linux/mfd/mt6359/registers.h
@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_REGISTERS_H__
+#define __MFD_MT6359_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6359_SWCID 0xa
+#define MT6359_MISC_TOP_INT_CON0 0x188
+#define MT6359_MISC_TOP_INT_STATUS0 0x194
+#define MT6359_TOP_INT_STATUS0 0x19e
+#define MT6359_SCK_TOP_INT_CON0 0x528
+#define MT6359_SCK_TOP_INT_STATUS0 0x534
+#define MT6359_EOSC_CALI_CON0 0x53a
+#define MT6359_EOSC_CALI_CON1 0x53c
+#define MT6359_RTC_MIX_CON0 0x53e
+#define MT6359_RTC_MIX_CON1 0x540
+#define MT6359_RTC_MIX_CON2 0x542
+#define MT6359_RTC_DSN_ID 0x580
+#define MT6359_RTC_DSN_REV0 0x582
+#define MT6359_RTC_DBI 0x584
+#define MT6359_RTC_DXI 0x586
+#define MT6359_RTC_BBPU 0x588
+#define MT6359_RTC_IRQ_STA 0x58a
+#define MT6359_RTC_IRQ_EN 0x58c
+#define MT6359_RTC_CII_EN 0x58e
+#define MT6359_RTC_AL_MASK 0x590
+#define MT6359_RTC_TC_SEC 0x592
+#define MT6359_RTC_TC_MIN 0x594
+#define MT6359_RTC_TC_HOU 0x596
+#define MT6359_RTC_TC_DOM 0x598
+#define MT6359_RTC_TC_DOW 0x59a
+#define MT6359_RTC_TC_MTH 0x59c
+#define MT6359_RTC_TC_YEA 0x59e
+#define MT6359_RTC_AL_SEC 0x5a0
+#define MT6359_RTC_AL_MIN 0x5a2
+#define MT6359_RTC_AL_HOU 0x5a4
+#define MT6359_RTC_AL_DOM 0x5a6
+#define MT6359_RTC_AL_DOW 0x5a8
+#define MT6359_RTC_AL_MTH 0x5aa
+#define MT6359_RTC_AL_YEA 0x5ac
+#define MT6359_RTC_OSC32CON 0x5ae
+#define MT6359_RTC_POWERKEY1 0x5b0
+#define MT6359_RTC_POWERKEY2 0x5b2
+#define MT6359_RTC_PDN1 0x5b4
+#define MT6359_RTC_PDN2 0x5b6
+#define MT6359_RTC_SPAR0 0x5b8
+#define MT6359_RTC_SPAR1 0x5ba
+#define MT6359_RTC_PROT 0x5bc
+#define MT6359_RTC_DIFF 0x5be
+#define MT6359_RTC_CALI 0x5c0
+#define MT6359_RTC_WRTGR 0x5c2
+#define MT6359_RTC_CON 0x5c4
+#define MT6359_RTC_SEC_CTRL 0x5c6
+#define MT6359_RTC_INT_CNT 0x5c8
+#define MT6359_RTC_SEC_DAT0 0x5ca
+#define MT6359_RTC_SEC_DAT1 0x5cc
+#define MT6359_RTC_SEC_DAT2 0x5ce
+#define MT6359_RTC_SEC_DSN_ID 0x600
+#define MT6359_RTC_SEC_DSN_REV0 0x602
+#define MT6359_RTC_SEC_DBI 0x604
+#define MT6359_RTC_SEC_DXI 0x606
+#define MT6359_RTC_TC_SEC_SEC 0x608
+#define MT6359_RTC_TC_MIN_SEC 0x60a
+#define MT6359_RTC_TC_HOU_SEC 0x60c
+#define MT6359_RTC_TC_DOM_SEC 0x60e
+#define MT6359_RTC_TC_DOW_SEC 0x610
+#define MT6359_RTC_TC_MTH_SEC 0x612
+#define MT6359_RTC_TC_YEA_SEC 0x614
+#define MT6359_RTC_SEC_CK_PDN 0x616
+#define MT6359_RTC_SEC_WRTGR 0x618
+#define MT6359_PSC_TOP_INT_CON0 0x910
+#define MT6359_PSC_TOP_INT_STATUS0 0x91c
+#define MT6359_BM_TOP_INT_CON0 0xc32
+#define MT6359_BM_TOP_INT_CON1 0xc38
+#define MT6359_BM_TOP_INT_STATUS0 0xc4a
+#define MT6359_BM_TOP_INT_STATUS1 0xc4c
+#define MT6359_HK_TOP_INT_CON0 0xf92
+#define MT6359_HK_TOP_INT_STATUS0 0xf9e
+#define MT6359_BUCK_TOP_INT_CON0 0x1418
+#define MT6359_BUCK_TOP_INT_STATUS0 0x1424
+#define MT6359_BUCK_VPU_CON0 0x1488
+#define MT6359_BUCK_VPU_DBG0 0x14a6
+#define MT6359_BUCK_VPU_DBG1 0x14a8
+#define MT6359_BUCK_VPU_ELR0 0x14ac
+#define MT6359_BUCK_VCORE_CON0 0x1508
+#define MT6359_BUCK_VCORE_DBG0 0x1526
+#define MT6359_BUCK_VCORE_DBG1 0x1528
+#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a
+#define MT6359_BUCK_VCORE_ELR0 0x1534
+#define MT6359_BUCK_VGPU11_CON0 0x1588
+#define MT6359_BUCK_VGPU11_DBG0 0x15a6
+#define MT6359_BUCK_VGPU11_DBG1 0x15a8
+#define MT6359_BUCK_VGPU11_ELR0 0x15ac
+#define MT6359_BUCK_VMODEM_CON0 0x1688
+#define MT6359_BUCK_VMODEM_DBG0 0x16a6
+#define MT6359_BUCK_VMODEM_DBG1 0x16a8
+#define MT6359_BUCK_VMODEM_ELR0 0x16ae
+#define MT6359_BUCK_VPROC1_CON0 0x1708
+#define MT6359_BUCK_VPROC1_DBG0 0x1726
+#define MT6359_BUCK_VPROC1_DBG1 0x1728
+#define MT6359_BUCK_VPROC1_ELR0 0x172e
+#define MT6359_BUCK_VPROC2_CON0 0x1788
+#define MT6359_BUCK_VPROC2_DBG0 0x17a6
+#define MT6359_BUCK_VPROC2_DBG1 0x17a8
+#define MT6359_BUCK_VPROC2_ELR0 0x17b2
+#define MT6359_BUCK_VS1_CON0 0x1808
+#define MT6359_BUCK_VS1_DBG0 0x1826
+#define MT6359_BUCK_VS1_DBG1 0x1828
+#define MT6359_BUCK_VS1_ELR0 0x1834
+#define MT6359_BUCK_VS2_CON0 0x1888
+#define MT6359_BUCK_VS2_DBG0 0x18a6
+#define MT6359_BUCK_VS2_DBG1 0x18a8
+#define MT6359_BUCK_VS2_ELR0 0x18b4
+#define MT6359_BUCK_VPA_CON0 0x1908
+#define MT6359_BUCK_VPA_CON1 0x190e
+#define MT6359_BUCK_VPA_CFG0 0x1910
+#define MT6359_BUCK_VPA_CFG1 0x1912
+#define MT6359_BUCK_VPA_DBG0 0x1914
+#define MT6359_BUCK_VPA_DBG1 0x1916
+#define MT6359_VGPUVCORE_ANA_CON2 0x198e
+#define MT6359_VGPUVCORE_ANA_CON13 0x19a4
+#define MT6359_VPROC1_ANA_CON3 0x19b2
+#define MT6359_VPROC2_ANA_CON3 0x1a0e
+#define MT6359_VMODEM_ANA_CON3 0x1a1a
+#define MT6359_VPU_ANA_CON3 0x1a26
+#define MT6359_VS1_ANA_CON0 0x1a2c
+#define MT6359_VS2_ANA_CON0 0x1a34
+#define MT6359_VPA_ANA_CON0 0x1a3c
+#define MT6359_LDO_TOP_INT_CON0 0x1b14
+#define MT6359_LDO_TOP_INT_CON1 0x1b1a
+#define MT6359_LDO_TOP_INT_STATUS0 0x1b28
+#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a
+#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40
+#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42
+#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44
+#define MT6359_LDO_VSRAM_MD_ELR 0x1b46
+#define MT6359_LDO_VFE28_CON0 0x1b88
+#define MT6359_LDO_VFE28_MON 0x1b8a
+#define MT6359_LDO_VXO22_CON0 0x1b98
+#define MT6359_LDO_VXO22_MON 0x1b9a
+#define MT6359_LDO_VRF18_CON0 0x1ba8
+#define MT6359_LDO_VRF18_MON 0x1baa
+#define MT6359_LDO_VRF12_CON0 0x1bb8
+#define MT6359_LDO_VRF12_MON 0x1bba
+#define MT6359_LDO_VEFUSE_CON0 0x1bc8
+#define MT6359_LDO_VEFUSE_MON 0x1bca
+#define MT6359_LDO_VCN33_1_CON0 0x1bd8
+#define MT6359_LDO_VCN33_1_MON 0x1bda
+#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8
+#define MT6359_LDO_VCN33_2_CON0 0x1c08
+#define MT6359_LDO_VCN33_2_MON 0x1c0a
+#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18
+#define MT6359_LDO_VCN13_CON0 0x1c1a
+#define MT6359_LDO_VCN13_MON 0x1c1c
+#define MT6359_LDO_VCN18_CON0 0x1c2a
+#define MT6359_LDO_VCN18_MON 0x1c2c
+#define MT6359_LDO_VA09_CON0 0x1c3a
+#define MT6359_LDO_VA09_MON 0x1c3c
+#define MT6359_LDO_VCAMIO_CON0 0x1c4a
+#define MT6359_LDO_VCAMIO_MON 0x1c4c
+#define MT6359_LDO_VA12_CON0 0x1c5a
+#define MT6359_LDO_VA12_MON 0x1c5c
+#define MT6359_LDO_VAUX18_CON0 0x1c88
+#define MT6359_LDO_VAUX18_MON 0x1c8a
+#define MT6359_LDO_VAUD18_CON0 0x1c98
+#define MT6359_LDO_VAUD18_MON 0x1c9a
+#define MT6359_LDO_VIO18_CON0 0x1ca8
+#define MT6359_LDO_VIO18_MON 0x1caa
+#define MT6359_LDO_VEMC_CON0 0x1cb8
+#define MT6359_LDO_VEMC_MON 0x1cba
+#define MT6359_LDO_VSIM1_CON0 0x1cc8
+#define MT6359_LDO_VSIM1_MON 0x1cca
+#define MT6359_LDO_VSIM2_CON0 0x1cd8
+#define MT6359_LDO_VSIM2_MON 0x1cda
+#define MT6359_LDO_VUSB_CON0 0x1d08
+#define MT6359_LDO_VUSB_MON 0x1d0a
+#define MT6359_LDO_VUSB_MULTI_SW 0x1d18
+#define MT6359_LDO_VRFCK_CON0 0x1d1a
+#define MT6359_LDO_VRFCK_MON 0x1d1c
+#define MT6359_LDO_VBBCK_CON0 0x1d2a
+#define MT6359_LDO_VBBCK_MON 0x1d2c
+#define MT6359_LDO_VBIF28_CON0 0x1d3a
+#define MT6359_LDO_VBIF28_MON 0x1d3c
+#define MT6359_LDO_VIBR_CON0 0x1d4a
+#define MT6359_LDO_VIBR_MON 0x1d4c
+#define MT6359_LDO_VIO28_CON0 0x1d5a
+#define MT6359_LDO_VIO28_MON 0x1d5c
+#define MT6359_LDO_VM18_CON0 0x1d88
+#define MT6359_LDO_VM18_MON 0x1d8a
+#define MT6359_LDO_VUFS_CON0 0x1d98
+#define MT6359_LDO_VUFS_MON 0x1d9a
+#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a
+#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e
+#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6
+#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8
+#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac
+#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a
+#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e
+#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26
+#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c
+#define MT6359_LDO_VSRAM_MD_MON 0x1f2e
+#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32
+#define MT6359_VFE28_ANA_CON0 0x1f88
+#define MT6359_VAUX18_ANA_CON0 0x1f8c
+#define MT6359_VUSB_ANA_CON0 0x1f90
+#define MT6359_VBIF28_ANA_CON0 0x1f94
+#define MT6359_VCN33_1_ANA_CON0 0x1f98
+#define MT6359_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359_VEMC_ANA_CON0 0x1fa0
+#define MT6359_VSIM1_ANA_CON0 0x1fa4
+#define MT6359_VSIM2_ANA_CON0 0x1fa8
+#define MT6359_VIO28_ANA_CON0 0x1fac
+#define MT6359_VIBR_ANA_CON0 0x1fb0
+#define MT6359_VRF18_ANA_CON0 0x2008
+#define MT6359_VEFUSE_ANA_CON0 0x200c
+#define MT6359_VCN18_ANA_CON0 0x2010
+#define MT6359_VCAMIO_ANA_CON0 0x2014
+#define MT6359_VAUD18_ANA_CON0 0x2018
+#define MT6359_VIO18_ANA_CON0 0x201c
+#define MT6359_VM18_ANA_CON0 0x2020
+#define MT6359_VUFS_ANA_CON0 0x2024
+#define MT6359_VRF12_ANA_CON0 0x202a
+#define MT6359_VCN13_ANA_CON0 0x202e
+#define MT6359_VA09_ANA_CON0 0x2032
+#define MT6359_VA12_ANA_CON0 0x2036
+#define MT6359_VXO22_ANA_CON0 0x2088
+#define MT6359_VRFCK_ANA_CON0 0x208c
+#define MT6359_VBBCK_ANA_CON0 0x2094
+#define MT6359_AUD_TOP_INT_CON0 0x2328
+#define MT6359_AUD_TOP_INT_STATUS0 0x2334
+
+#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_SHIFT 1
+#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0
+#define MT6359_DA_VPU_VOSEL_MASK 0x7F
+#define MT6359_DA_VPU_VOSEL_SHIFT 0
+#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1
+#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0
+#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1
+#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0
+#define MT6359_DA_VCORE_VOSEL_MASK 0x7F
+#define MT6359_DA_VCORE_VOSEL_SHIFT 0
+#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1
+#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4
+#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0
+#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1
+#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0
+#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_DA_VGPU11_VOSEL_SHIFT 0
+#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1
+#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0
+#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1
+#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0
+#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_DA_VMODEM_VOSEL_SHIFT 0
+#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1
+#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0
+#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1
+#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0
+#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC1_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1
+#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0
+#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1
+#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0
+#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC2_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1
+#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0
+#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_SHIFT 1
+#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0
+#define MT6359_DA_VS1_VOSEL_MASK 0x7F
+#define MT6359_DA_VS1_VOSEL_SHIFT 0
+#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1
+#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0
+#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_SHIFT 1
+#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0
+#define MT6359_DA_VS2_VOSEL_MASK 0x7F
+#define MT6359_DA_VS2_VOSEL_SHIFT 0
+#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1
+#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0
+#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_SHIFT 1
+#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1
+#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F
+#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0
+#define MT6359_DA_VPA_VOSEL_MASK 0x3F
+#define MT6359_DA_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1
+#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2
+#define MT6359_RG_VGPU11_FCCM_SHIFT 9
+#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13
+#define MT6359_RG_VCORE_FCCM_SHIFT 5
+#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3
+#define MT6359_RG_VPROC1_FCCM_SHIFT 1
+#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3
+#define MT6359_RG_VPROC2_FCCM_SHIFT 1
+#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3
+#define MT6359_RG_VMODEM_FCCM_SHIFT 1
+#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3
+#define MT6359_RG_VPU_FCCM_SHIFT 1
+#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0
+#define MT6359_RG_VS1_FPWM_SHIFT 3
+#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0
+#define MT6359_RG_VS2_FPWM_SHIFT 3
+#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0
+#define MT6359_RG_VPA_MODESET_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0
+#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON
+#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0
+#define MT6359_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON
+#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0
+#define MT6359_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON
+#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0
+#define MT6359_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON
+#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0
+#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON
+#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0
+#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON
+#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW
+#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0
+#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON
+#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW
+#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0
+#define MT6359_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON
+#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0
+#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON
+#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0
+#define MT6359_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON
+#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0
+#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON
+#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0
+#define MT6359_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON
+#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0
+#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON
+#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0
+#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON
+#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0
+#define MT6359_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON
+#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0
+#define MT6359_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON
+#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0
+#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON
+#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0
+#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON
+#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0
+#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0
+#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON
+#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW
+#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0
+#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON
+#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0
+#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON
+#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0
+#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON
+#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0
+#define MT6359_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON
+#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0
+#define MT6359_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON
+#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0
+#define MT6359_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON
+#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0
+#define MT6359_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON
+#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0
+#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON
+#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0
+#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON
+#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0
+#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0
+#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON
+#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1
+#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0
+#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0
+#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8
+#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0
+#define MT6359_RG_VEMC_VOSEL_MASK 0xF
+#define MT6359_RG_VEMC_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0
+#define MT6359_RG_VSIM1_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM1_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0
+#define MT6359_RG_VSIM2_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM2_VOSEL_SHIFT 8
+#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0
+#define MT6359_RG_VIO28_VOSEL_MASK 0xF
+#define MT6359_RG_VIO28_VOSEL_SHIFT 8
+#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0
+#define MT6359_RG_VIBR_VOSEL_MASK 0xF
+#define MT6359_RG_VIBR_VOSEL_SHIFT 8
+#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0
+#define MT6359_RG_VRF18_VOSEL_MASK 0xF
+#define MT6359_RG_VRF18_VOSEL_SHIFT 8
+#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0
+#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF
+#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8
+#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0
+#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF
+#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8
+#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0
+#define MT6359_RG_VIO18_VOSEL_MASK 0xF
+#define MT6359_RG_VIO18_VOSEL_SHIFT 8
+#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0
+#define MT6359_RG_VM18_VOSEL_MASK 0xF
+#define MT6359_RG_VM18_VOSEL_SHIFT 8
+#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0
+#define MT6359_RG_VUFS_VOSEL_MASK 0xF
+#define MT6359_RG_VUFS_VOSEL_SHIFT 8
+#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0
+#define MT6359_RG_VRF12_VOSEL_MASK 0xF
+#define MT6359_RG_VRF12_VOSEL_SHIFT 8
+#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0
+#define MT6359_RG_VCN13_VOSEL_MASK 0xF
+#define MT6359_RG_VCN13_VOSEL_SHIFT 8
+#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0
+#define MT6359_RG_VA09_VOSEL_MASK 0xF
+#define MT6359_RG_VA09_VOSEL_SHIFT 8
+#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0
+#define MT6359_RG_VA12_VOSEL_MASK 0xF
+#define MT6359_RG_VA12_VOSEL_SHIFT 8
+#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0
+#define MT6359_RG_VXO22_VOSEL_MASK 0xF
+#define MT6359_RG_VXO22_VOSEL_SHIFT 8
+#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0
+#define MT6359_RG_VRFCK_VOSEL_MASK 0xF
+#define MT6359_RG_VRFCK_VOSEL_SHIFT 8
+#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0
+#define MT6359_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359_RG_VBBCK_VOSEL_SHIFT 8
+
+#endif /* __MFD_MT6359_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h
new file mode 100644
index 000000000000..3d97c1885171
--- /dev/null
+++ b/include/linux/mfd/mt6359p/registers.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359P_REGISTERS_H__
+#define __MFD_MT6359P_REGISTERS_H__
+
+#define MT6359P_CHIP_VER 0x5930
+
+/* PMIC Registers */
+#define MT6359P_HWCID 0x8
+#define MT6359P_TOP_TRAP 0x50
+#define MT6359P_TOP_TMA_KEY 0x3a8
+#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a
+#define MT6359P_BUCK_VCORE_ELR0 0x152c
+#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa
+#define MT6359P_BUCK_VGPU11_ELR0 0x15b4
+#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44
+#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46
+#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48
+#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a
+#define MT6359P_LDO_VEMC_ELR_0 0x1b4c
+#define MT6359P_LDO_VFE28_CON0 0x1b88
+#define MT6359P_LDO_VFE28_MON 0x1b8c
+#define MT6359P_LDO_VXO22_CON0 0x1b9a
+#define MT6359P_LDO_VXO22_MON 0x1b9e
+#define MT6359P_LDO_VRF18_CON0 0x1bac
+#define MT6359P_LDO_VRF18_MON 0x1bb0
+#define MT6359P_LDO_VRF12_CON0 0x1bbe
+#define MT6359P_LDO_VRF12_MON 0x1bc2
+#define MT6359P_LDO_VEFUSE_CON0 0x1bd0
+#define MT6359P_LDO_VEFUSE_MON 0x1bd4
+#define MT6359P_LDO_VCN33_1_CON0 0x1be2
+#define MT6359P_LDO_VCN33_1_MON 0x1be6
+#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4
+#define MT6359P_LDO_VCN33_2_CON0 0x1c08
+#define MT6359P_LDO_VCN33_2_MON 0x1c0c
+#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a
+#define MT6359P_LDO_VCN13_CON0 0x1c1c
+#define MT6359P_LDO_VCN13_MON 0x1c20
+#define MT6359P_LDO_VCN18_CON0 0x1c2e
+#define MT6359P_LDO_VCN18_MON 0x1c32
+#define MT6359P_LDO_VA09_CON0 0x1c40
+#define MT6359P_LDO_VA09_MON 0x1c44
+#define MT6359P_LDO_VCAMIO_CON0 0x1c52
+#define MT6359P_LDO_VCAMIO_MON 0x1c56
+#define MT6359P_LDO_VA12_CON0 0x1c64
+#define MT6359P_LDO_VA12_MON 0x1c68
+#define MT6359P_LDO_VAUX18_CON0 0x1c88
+#define MT6359P_LDO_VAUX18_MON 0x1c8c
+#define MT6359P_LDO_VAUD18_CON0 0x1c9a
+#define MT6359P_LDO_VAUD18_MON 0x1c9e
+#define MT6359P_LDO_VIO18_CON0 0x1cac
+#define MT6359P_LDO_VIO18_MON 0x1cb0
+#define MT6359P_LDO_VEMC_CON0 0x1cbe
+#define MT6359P_LDO_VEMC_MON 0x1cc2
+#define MT6359P_LDO_VSIM1_CON0 0x1cd0
+#define MT6359P_LDO_VSIM1_MON 0x1cd4
+#define MT6359P_LDO_VSIM2_CON0 0x1ce2
+#define MT6359P_LDO_VSIM2_MON 0x1ce6
+#define MT6359P_LDO_VUSB_CON0 0x1d08
+#define MT6359P_LDO_VUSB_MON 0x1d0c
+#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a
+#define MT6359P_LDO_VRFCK_CON0 0x1d1c
+#define MT6359P_LDO_VRFCK_MON 0x1d20
+#define MT6359P_LDO_VBBCK_CON0 0x1d2e
+#define MT6359P_LDO_VBBCK_MON 0x1d32
+#define MT6359P_LDO_VBIF28_CON0 0x1d40
+#define MT6359P_LDO_VBIF28_MON 0x1d44
+#define MT6359P_LDO_VIBR_CON0 0x1d52
+#define MT6359P_LDO_VIBR_MON 0x1d56
+#define MT6359P_LDO_VIO28_CON0 0x1d64
+#define MT6359P_LDO_VIO28_MON 0x1d68
+#define MT6359P_LDO_VM18_CON0 0x1d88
+#define MT6359P_LDO_VM18_MON 0x1d8c
+#define MT6359P_LDO_VUFS_CON0 0x1d9a
+#define MT6359P_LDO_VUFS_MON 0x1d9e
+#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c
+#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90
+#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8
+#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac
+#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0
+#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c
+#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10
+#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28
+#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e
+#define MT6359P_LDO_VSRAM_MD_MON 0x1f32
+#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36
+#define MT6359P_VFE28_ANA_CON0 0x1f88
+#define MT6359P_VAUX18_ANA_CON0 0x1f8c
+#define MT6359P_VUSB_ANA_CON0 0x1f90
+#define MT6359P_VBIF28_ANA_CON0 0x1f94
+#define MT6359P_VCN33_1_ANA_CON0 0x1f98
+#define MT6359P_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359P_VEMC_ANA_CON0 0x1fa0
+#define MT6359P_VSIM1_ANA_CON0 0x1fa2
+#define MT6359P_VSIM2_ANA_CON0 0x1fa6
+#define MT6359P_VIO28_ANA_CON0 0x1faa
+#define MT6359P_VIBR_ANA_CON0 0x1fae
+#define MT6359P_VFE28_ELR_4 0x1fc0
+#define MT6359P_VRF18_ANA_CON0 0x2008
+#define MT6359P_VEFUSE_ANA_CON0 0x200c
+#define MT6359P_VCN18_ANA_CON0 0x2010
+#define MT6359P_VCAMIO_ANA_CON0 0x2014
+#define MT6359P_VAUD18_ANA_CON0 0x2018
+#define MT6359P_VIO18_ANA_CON0 0x201c
+#define MT6359P_VM18_ANA_CON0 0x2020
+#define MT6359P_VUFS_ANA_CON0 0x2024
+#define MT6359P_VRF12_ANA_CON0 0x202a
+#define MT6359P_VCN13_ANA_CON0 0x202e
+#define MT6359P_VA09_ANA_CON0 0x2032
+#define MT6359P_VRF18_ELR_3 0x204e
+#define MT6359P_VXO22_ANA_CON0 0x2088
+#define MT6359P_VRFCK_ANA_CON0 0x208c
+#define MT6359P_VBBCK_ANA_CON0 0x2096
+
+#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4
+#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR
+#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR
+#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR
+#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0
+#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0
+#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON
+#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0
+#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON
+#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0
+#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON
+#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0
+#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON
+#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0
+#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0
+#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW
+#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0
+#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON
+#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW
+#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0
+#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON
+#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0
+#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON
+#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0
+#define MT6359P_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON
+#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0
+#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON
+#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0
+#define MT6359P_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON
+#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0
+#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON
+#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0
+#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON
+#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0
+#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON
+#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0
+#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON
+#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0
+#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON
+#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0
+#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON
+#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0
+#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON
+#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW
+#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0
+#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON
+#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0
+#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON
+#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0
+#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON
+#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0
+#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON
+#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0
+#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON
+#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0
+#define MT6359P_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON
+#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0
+#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON
+#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0
+#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON
+#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0
+#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON
+#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0
+#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON
+#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0
+#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON
+#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1
+#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0
+#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0
+#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0
+#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0
+#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0
+#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0
+#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0
+#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0
+#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0
+#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0
+#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0
+#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0
+#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0
+#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0
+#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0
+#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3
+#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4
+#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0
+#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4
+#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP
+#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY
+
+#define TMA_KEY 0x9CA6
+
+#endif /* __MFD_MT6359P_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 949268581b36..56f210eebc54 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -13,6 +13,7 @@
enum chip_id {
MT6323_CHIP_ID = 0x23,
MT6358_CHIP_ID = 0x58,
+ MT6359_CHIP_ID = 0x59,
MT6391_CHIP_ID = 0x91,
MT6397_CHIP_ID = 0x97,
};
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index c3748b53bf7d..068ae1c0f0e8 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -36,6 +36,7 @@
#define RTC_AL_MASK_DOW BIT(4)
#define RTC_TC_SEC 0x000a
+#define RTC_TC_MTH_MASK 0x000f
/* Min, Hour, Dom... register offset to RTC_TC_SEC */
#define RTC_OFFSET_SEC 0
#define RTC_OFFSET_MIN 1
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
index a57af878fd0c..4a5966475a35 100644
--- a/include/linux/mfd/rohm-bd70528.h
+++ b/include/linux/mfd/rohm-bd70528.h
@@ -26,9 +26,7 @@ struct bd70528_data {
struct mutex rtc_timer_lock;
};
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
+#define BD70528_BUCK_VOLTS 0x10
#define BD70528_LDO_VOLTS 0x20
#define BD70528_REG_BUCK1_EN 0x0F
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
index c7ab69c87ee8..3b5f3a7db4bd 100644
--- a/include/linux/mfd/rohm-bd71828.h
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -26,11 +26,11 @@ enum {
BD71828_REGULATOR_AMOUNT,
};
-#define BD71828_BUCK1267_VOLTS 0xEF
-#define BD71828_BUCK3_VOLTS 0x10
-#define BD71828_BUCK4_VOLTS 0x20
-#define BD71828_BUCK5_VOLTS 0x10
-#define BD71828_LDO_VOLTS 0x32
+#define BD71828_BUCK1267_VOLTS 0x100
+#define BD71828_BUCK3_VOLTS 0x20
+#define BD71828_BUCK4_VOLTS 0x40
+#define BD71828_BUCK5_VOLTS 0x20
+#define BD71828_LDO_VOLTS 0x40
/* LDO6 is fixed 1.8V voltage */
#define BD71828_LDO_6_VOLTAGE 1800000
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 416ee6dd2574..3d43c60b49fa 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -39,10 +39,26 @@
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
+#define MICREL_KSZ8_P1_ERRATA 0x00000003
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104
#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105
+/* Device specific MII_BMCR (Reg 0) bits */
+/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */
+#define KSZ886X_BMCR_HP_MDIX BIT(5)
+/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation
+ * (transmit on TXP/TXM pins)
+ */
+#define KSZ886X_BMCR_FORCE_MDI BIT(4)
+/* 1 = Disable auto MDI-X */
+#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3)
+#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2)
+#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1)
+#define KSZ886X_BMCR_DISABLE_LED BIT(0)
+
+#define KSZ886X_CTRL_MDIX_STAT BIT(4)
+
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 236a7d04f891..30bb59fe970c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -630,6 +630,7 @@ struct mlx4_caps {
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
u32 health_buffer_addrs;
+ bool map_clock_to_user;
};
struct mlx4_buf_list {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 578c4ccae91c..0025913505ab 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1179,6 +1179,7 @@ enum mlx5_cap_type {
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
+ MLX5_CAP_GENERAL_2 = 0x20,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1220,6 +1221,15 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_GEN_MAX(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
+#define MLX5_CAP_GEN_2(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap)
+
+#define MLX5_CAP_GEN_2_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap)
+
+#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_max[MLX5_CAP_GENERAL_2], cap)
+
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index f8e8d7e90616..1efe37466969 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -542,6 +542,10 @@ struct mlx5_core_roce {
enum {
MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+ /* Set during device detach to block any further devices
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
};
struct mlx5_adev {
@@ -550,6 +554,7 @@ struct mlx5_adev {
int idx;
};
+struct mlx5_ft_pool;
struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
struct mlx5_irq_table *irq_table;
@@ -602,6 +607,7 @@ struct mlx5_priv {
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
+ struct mlx5_ft_pool *ft_pool;
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
@@ -703,6 +709,27 @@ struct mlx5_hv_vhca;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MR_CACHE_LAST_STD_ENTRY = 20,
+ MLX5_IMR_MTT_CACHE_ENTRY,
+ MLX5_IMR_KSM_CACHE_ENTRY,
+ MAX_MR_CACHE_ENTRIES
+};
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MR_CACHE_ENTRIES];
+};
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -731,7 +758,7 @@ struct mlx5_core_dev {
struct mutex intf_state_mutex;
unsigned long intf_state;
struct mlx5_priv priv;
- struct mlx5_profile *profile;
+ struct mlx5_profile profile;
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
@@ -1083,18 +1110,6 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
return mkey & 0xff;
}
-enum {
- MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
-};
-
-enum {
- MR_CACHE_LAST_STD_ENTRY = 20,
- MLX5_IMR_MTT_CACHE_ENTRY,
- MLX5_IMR_KSM_CACHE_ENTRY,
- MAX_MR_CACHE_ENTRIES
-};
-
/* Async-atomic event notifier used by mlx5 core to forward FW
* evetns recived from event queue to mlx5 consumers.
* Optimise event queue dipatching.
@@ -1148,15 +1163,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device,
struct rdma_netdev_alloc_params *params);
-struct mlx5_profile {
- u64 mask;
- u8 log_max_qp;
- struct {
- int size;
- int limit;
- } mr_cache[MAX_MR_CACHE_ENTRIES];
-};
-
enum {
MLX5_PCI_DEV_IS_VF = 1 << 0,
};
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
index e49d8c0d4f26..cea6ecb4b73e 100644
--- a/include/linux/mlx5/eq.h
+++ b/include/linux/mlx5/eq.h
@@ -16,6 +16,7 @@ struct mlx5_eq_param {
u8 irq_index;
int nent;
u64 mask[4];
+ cpumask_var_t affinity;
};
struct mlx5_eq *
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 17109b65c1ac..bc7db2e059eb 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -98,10 +98,11 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
u16 vport_num);
/* Reg C1 usage:
- * Reg C1 = < ESW_TUN_ID(12) | ESW_TUN_OPTS(12) | ESW_ZONE_ID(8) >
+ * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
*
- * Highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is
- * encapsulation tunnel options, and the lowest 8 bits are used for zone id.
+ * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
+ * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
+ * and the lowest 8 bits are used for zone id.
*
* Zone id is used to restore CT flow when packet misses on chain.
*
@@ -109,16 +110,18 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
* on miss and to support inner header rewrite by means of implicit chain 0
* flows.
*/
+#define ESW_RESERVED_BITS 1
#define ESW_ZONE_ID_BITS 8
-#define ESW_TUN_OPTS_BITS 12
+#define ESW_TUN_OPTS_BITS 11
#define ESW_TUN_ID_BITS 12
#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
-#define ESW_TUN_OPTS_MASK GENMASK(32 - ESW_TUN_ID_BITS - 1, ESW_TUN_OPTS_OFFSET)
-#define ESW_TUN_MASK GENMASK(31, ESW_TUN_OFFSET)
+#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
+#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
-#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT 0xFFF /* 0xFFF is a reserved mapping */
+/* 0x7FF is a reserved mapping */
+#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 1f51f4c3b1af..77746f7e35b8 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -87,6 +87,8 @@ enum {
FDB_BYPASS_PATH,
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
+ FDB_TC_MISS,
+ FDB_BR_OFFLOAD,
FDB_SLOW_PATH,
FDB_PER_VPORT,
};
@@ -254,10 +256,16 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
+struct mlx5_pkt_reformat_params {
+ int type;
+ u8 param_0;
+ u8 param_1;
+ size_t size;
+ void *data;
+};
+
struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type ns_type);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d16eed6850e..e32a0d61929b 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -435,7 +435,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_40[0x20];
- u8 reserved_at_60[0x18];
+ u8 reserved_at_60[0x2];
+ u8 reformat_insert[0x1];
+ u8 reformat_remove[0x1];
+ u8 reserver_at_64[0x14];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
@@ -1289,6 +1292,8 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
enum {
MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
@@ -1310,7 +1315,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x1f];
u8 vhca_resource_manager[0x1];
- u8 reserved_at_20[0x3];
+ u8 hca_cap_2[0x1];
+ u8 reserved_at_21[0x2];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
@@ -1730,6 +1736,17 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_7c0[0x40];
};
+struct mlx5_ifc_cmd_hca_cap_2_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 max_reformat_insert_size[0x8];
+ u8 max_reformat_insert_offset[0x8];
+ u8 max_reformat_remove_size[0x8];
+ u8 max_reformat_remove_offset[0x8];
+
+ u8 reserved_at_c0[0x740];
+};
+
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
@@ -3103,6 +3120,7 @@ struct mlx5_ifc_roce_addr_layout_bits {
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
struct mlx5_ifc_odp_cap_bits odp_cap;
struct mlx5_ifc_atomic_caps_bits atomic_caps;
struct mlx5_ifc_roce_cap_bits roce_cap;
@@ -3788,8 +3806,8 @@ struct mlx5_ifc_eqc_bits {
u8 reserved_at_80[0x20];
- u8 reserved_at_a0[0x18];
- u8 intr[0x8];
+ u8 reserved_at_a0[0x14];
+ u8 intr[0xc];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -5783,12 +5801,14 @@ struct mlx5_ifc_query_eq_in_bits {
};
struct mlx5_ifc_packet_reformat_context_in_bits {
- u8 reserved_at_0[0x5];
- u8 reformat_type[0x3];
- u8 reserved_at_8[0xe];
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_10[0x6];
u8 reformat_data_size[0xa];
- u8 reserved_at_20[0x10];
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_28[0x8];
u8 reformat_data[2][0x8];
u8 more_reformat_data[][0x8];
@@ -5828,12 +5848,20 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 reserved_at_60[0x20];
};
+enum {
+ MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
+};
+
enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
+ MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
@@ -5954,6 +5982,8 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -11053,6 +11083,11 @@ struct mlx5_ifc_create_sampler_obj_in_bits {
struct mlx5_ifc_sampler_obj_bits sampler_object;
};
+struct mlx5_ifc_query_sampler_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
new file mode 100644
index 000000000000..bf700c8d5516
--- /dev/null
+++ b/include/linux/mlx5/mpfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2021 Mellanox Technologies Ltd.
+ */
+
+#ifndef _MLX5_MPFS_
+#define _MLX5_MPFS_
+
+struct mlx5_core_dev;
+
+#ifdef CONFIG_MLX5_MPFS
+int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+#else /* #ifndef CONFIG_MLX5_MPFS */
+static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 028f442530cf..60ffeb6b67ae 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_hairpin_params *params);
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c274f75efcf9..b8bc39237dac 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -46,7 +46,7 @@ extern int sysctl_page_lock_unfairness;
void init_mm_internals(void);
-#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
+#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
static inline void set_max_mapnr(unsigned long limit)
@@ -125,16 +125,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
/*
- * With CONFIG_CFI_CLANG, the compiler replaces function addresses in
- * instrumented C code with jump table addresses. Architectures that
- * support CFI can define this macro to return the actual function address
- * when needed.
- */
-#ifndef function_nocfi
-#define function_nocfi(x) (x)
-#endif
-
-/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
* This macro should be defined within <asm/pgtable.h>.
@@ -234,7 +224,11 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp, void **shadowp);
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+#else
+#define nth_page(page,n) ((page) + (n))
+#endif
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
@@ -1341,7 +1335,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
if (!is_cow_mapping(vma->vm_flags))
return false;
- if (!atomic_read(&vma->vm_mm->has_pinned))
+ if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;
return page_maybe_dma_pinned(page);
@@ -1668,10 +1662,11 @@ struct address_space *page_mapping(struct page *page);
static inline bool page_is_pfmemalloc(const struct page *page)
{
/*
- * Page index cannot be this large so this must be
- * a pfmemalloc page.
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
*/
- return page->index == -1UL;
+ return (uintptr_t)page->lru.next & BIT(1);
}
/*
@@ -1680,12 +1675,12 @@ static inline bool page_is_pfmemalloc(const struct page *page)
*/
static inline void set_page_pfmemalloc(struct page *page)
{
- page->index = -1UL;
+ page->lru.next = (void *)BIT(1);
}
static inline void clear_page_pfmemalloc(struct page *page)
{
- page->index = 0;
+ page->lru.next = NULL;
}
/*
@@ -1709,8 +1704,8 @@ extern bool can_do_mlock(void);
#else
static inline bool can_do_mlock(void) { return false; }
#endif
-extern int user_shm_lock(size_t, struct user_struct *);
-extern void user_shm_unlock(size_t, struct user_struct *);
+extern int user_shm_lock(size_t, struct ucounts *);
+extern void user_shm_unlock(size_t, struct ucounts *);
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
@@ -1719,6 +1714,7 @@ struct zap_details {
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
+ struct page *single_page; /* Locked page to be unmapped */
};
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -1766,6 +1762,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
+void unmap_mapping_page(struct page *page);
void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
@@ -1786,6 +1783,7 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
BUG();
return -EFAULT;
}
+static inline void unmap_mapping_page(struct page *page) { }
static inline void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows) { }
static inline void unmap_mapping_range(struct address_space *mapping,
@@ -1847,12 +1845,8 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
-void __set_page_dirty(struct page *, struct address_space *, int warn);
-int __set_page_dirty_nobuffers(struct page *page);
-int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping);
void account_page_cleaned(struct page *page, struct address_space *mapping,
struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
@@ -2417,7 +2411,7 @@ static inline unsigned long free_initmem_default(int poison)
extern char __init_begin[], __init_end[];
return free_reserved_area(&__init_begin, &__init_end,
- poison, "unused kernel");
+ poison, "unused kernel image (initmem)");
}
static inline unsigned long get_num_physpages(void)
@@ -2457,7 +2451,7 @@ extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
extern unsigned long find_min_pfn_with_active_regions(void);
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
static inline int early_pfn_to_nid(unsigned long pfn)
{
return 0;
@@ -2471,7 +2465,6 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_range(unsigned long, int, unsigned long,
unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype);
-extern void memmap_init_zone(struct zone *zone);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
@@ -2678,17 +2671,45 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
-/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
- NULL if none. Assume start_addr < end_addr. */
-static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+/**
+ * find_vma_intersection() - Look up the first VMA which intersects the interval
+ * @mm: The process address space.
+ * @start_addr: The inclusive start user address.
+ * @end_addr: The exclusive end user address.
+ *
+ * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
+ * start_addr < end_addr.
+ */
+static inline
+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr,
+ unsigned long end_addr)
{
- struct vm_area_struct * vma = find_vma(mm,start_addr);
+ struct vm_area_struct *vma = find_vma(mm, start_addr);
if (vma && end_addr <= vma->vm_start)
vma = NULL;
return vma;
}
+/**
+ * vma_lookup() - Find a VMA at a specific address
+ * @mm: The process address space.
+ * @addr: The user address.
+ *
+ * Return: The vm_area_struct at the given address, %NULL otherwise.
+ */
+static inline
+struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma = find_vma(mm, addr);
+
+ if (vma && addr < vma->vm_start)
+ vma = NULL;
+
+ return vma;
+}
+
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
{
unsigned long vm_start = vma->vm_start;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5aacc1c10a45..d33d97c69da9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -97,6 +97,13 @@ struct page {
};
struct { /* page_pool used by netstack */
/**
+ * @pp_magic: magic value to avoid recycling non
+ * page_pool allocated pages.
+ */
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ /**
* @dma_addr: might require a 64-bit value on
* 32-bit architectures.
*/
@@ -435,23 +442,6 @@ struct mm_struct {
*/
atomic_t mm_count;
- /**
- * @has_pinned: Whether this mm has pinned any pages. This can
- * be either replaced in the future by @pinned_vm when it
- * becomes stable, or grow into a counter on its own. We're
- * aggresive on this bit now - even if the pinned pages were
- * unpinned later on, we'll still keep this bit set for the
- * lifecycle of this mm just for simplicity.
- */
- atomic_t has_pinned;
-
- /**
- * @write_protect_seq: Locked when any thread is write
- * protecting pages mapped by this mm to enforce a later COW,
- * for instance during page table copying for fork().
- */
- seqcount_t write_protect_seq;
-
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
@@ -460,6 +450,18 @@ struct mm_struct {
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
+ /*
+ * With some kernel config, the current mmap_lock's offset
+ * inside 'mm_struct' is at 0x120, which is very optimal, as
+ * its two hot fields 'count' and 'owner' sit in 2 different
+ * cachelines, and when mmap_lock is highly contended, both
+ * of the 2 fields will be accessed frequently, current layout
+ * will help to reduce cache bouncing.
+ *
+ * So please be careful with adding new fields before
+ * mmap_lock, which can easily push the 2 fields into one
+ * cacheline.
+ */
struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
@@ -480,7 +482,15 @@ struct mm_struct {
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
+ /**
+ * @write_protect_seq: Locked when any thread is write
+ * protecting pages mapped by this mm to enforce a later COW,
+ * for instance during page table copying for fork().
+ */
+ seqcount_t write_protect_seq;
+
spinlock_t arg_lock; /* protect the below fields */
+
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 629cefc4ecba..ebb09a964272 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -31,6 +31,8 @@
/*
* The historical set of flags that all mmap implementations implicitly
* support when a ->mmap_validate() op is not provided in file_operations.
+ *
+ * MAP_EXECUTABLE is completely ignored throughout the kernel.
*/
#define LEGACY_MAP_MASK (MAP_SHARED \
| MAP_PRIVATE \
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index f9ad35dd6012..74e6c0624d27 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -139,6 +139,8 @@ struct sd_scr {
unsigned char cmds;
#define SD_SCR_CMD20_SUPPORT (1<<0)
#define SD_SCR_CMD23_SUPPORT (1<<1)
+#define SD_SCR_CMD48_SUPPORT (1<<2)
+#define SD_SCR_CMD58_SUPPORT (1<<3)
};
struct sd_ssr {
@@ -189,6 +191,25 @@ struct sd_switch_caps {
#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
};
+struct sd_ext_reg {
+ u8 fno;
+ u8 page;
+ u16 offset;
+ u8 rev;
+ u8 feature_enabled;
+ u8 feature_support;
+/* Power Management Function. */
+#define SD_EXT_POWER_OFF_NOTIFY (1<<0)
+#define SD_EXT_POWER_SUSTENANCE (1<<1)
+#define SD_EXT_POWER_DOWN_MODE (1<<2)
+/* Performance Enhancement Function. */
+#define SD_EXT_PERF_FX_EVENT (1<<0)
+#define SD_EXT_PERF_CARD_MAINT (1<<1)
+#define SD_EXT_PERF_HOST_MAINT (1<<2)
+#define SD_EXT_PERF_CACHE (1<<3)
+#define SD_EXT_PERF_CMD_QUEUE (1<<4)
+};
+
struct sdio_cccr {
unsigned int sdio_vsn;
unsigned int sd_vsn;
@@ -290,6 +311,8 @@ struct mmc_card {
struct sd_scr scr; /* extra SD information */
struct sd_ssr ssr; /* yet more SD information */
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
+ struct sd_ext_reg ext_power; /* SD extension reg for PM */
+ struct sd_ext_reg ext_perf; /* SD extension reg for PERF */
unsigned int sdio_funcs; /* number of SDIO functions */
atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c7e7b43600e9..0abd47e9ef9b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -632,6 +632,6 @@ static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
}
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 2236aa540faa..6727576a8755 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -29,6 +29,10 @@
#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
#define SD_APP_SEND_SCR 51 /* adtc R1 */
+ /* class 11 */
+#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */
+#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */
+
/* OCR bit definitions */
#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 5d0767cb424a..1935d4c72d10 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,8 +9,7 @@ struct page;
struct vm_area_struct;
struct mm_struct;
-extern void dump_page(struct page *page, const char *reason);
-extern void __dump_page(struct page *page, const char *reason);
+void dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0d53eba1c383..265a32e1ff74 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
+#include <linux/local_lock.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
@@ -134,10 +135,10 @@ enum numa_stat_item {
NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */
- NR_VM_NUMA_STAT_ITEMS
+ NR_VM_NUMA_EVENT_ITEMS
};
#else
-#define NR_VM_NUMA_STAT_ITEMS 0
+#define NR_VM_NUMA_EVENT_ITEMS 0
#endif
enum zone_stat_item {
@@ -332,29 +333,55 @@ enum zone_watermarks {
NR_WMARK
};
+/*
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional
+ * for pageblock size for THP if configured.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define NR_PCP_THP 1
+#else
+#define NR_PCP_THP 0
+#endif
+#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP))
+
+/*
+ * Shift to encode migratetype and order in the same integer, with order
+ * in the least significant bits.
+ */
+#define NR_PCP_ORDER_WIDTH 8
+#define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1)
+
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
+/* Fields and list protected by pagesets local_lock in page_alloc.c */
struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
+ short free_factor; /* batch scaling factor during free */
+#ifdef CONFIG_NUMA
+ short expire; /* When 0, remote pagesets are drained */
+#endif
/* Lists of pages, one per migrate type stored on the pcp-lists */
- struct list_head lists[MIGRATE_PCPTYPES];
+ struct list_head lists[NR_PCP_LISTS];
};
-struct per_cpu_pageset {
- struct per_cpu_pages pcp;
-#ifdef CONFIG_NUMA
- s8 expire;
- u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
-#endif
+struct per_cpu_zonestat {
#ifdef CONFIG_SMP
- s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+ s8 stat_threshold;
+#endif
+#ifdef CONFIG_NUMA
+ /*
+ * Low priority inaccurate counters that are only folded
+ * on demand. Use a large type to avoid the overhead of
+ * folding during refresh_cpu_vm_stats.
+ */
+ unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#endif
};
@@ -484,7 +511,8 @@ struct zone {
int node;
#endif
struct pglist_data *zone_pgdat;
- struct per_cpu_pageset __percpu *pageset;
+ struct per_cpu_pages __percpu *per_cpu_pageset;
+ struct per_cpu_zonestat __percpu *per_cpu_zonestats;
/*
* the high and batch values are copied to individual pagesets for
* faster access
@@ -619,7 +647,7 @@ struct zone {
ZONE_PADDING(_pad3_)
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
- atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
+ atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
@@ -637,6 +665,7 @@ enum zone_flags {
ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
* Cleared when kswapd is woken.
*/
+ ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
};
static inline unsigned long zone_managed_pages(struct zone *zone)
@@ -738,10 +767,12 @@ struct zonelist {
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
};
-#ifndef CONFIG_DISCONTIGMEM
-/* The array of struct pages - for discontigmem use pgdat->lmem_map */
+/*
+ * The array of struct pages for flatmem.
+ * It must be declared for SPARSEMEM as well because there are configurations
+ * that rely on that.
+ */
extern struct page *mem_map;
-#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split {
@@ -775,7 +806,7 @@ typedef struct pglist_data {
struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones; /* number of populated zones in this node */
-#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
+#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
struct page_ext *node_page_ext;
@@ -865,7 +896,7 @@ typedef struct pglist_data {
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
+#ifdef CONFIG_FLATMEM
#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
#else
#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
@@ -982,22 +1013,11 @@ static inline void zone_set_nid(struct zone *zone, int nid) {}
extern int movable_zone;
-#ifdef CONFIG_HIGHMEM
-static inline int zone_movable_is_highmem(void)
-{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
- return movable_zone == ZONE_HIGHMEM;
-#else
- return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
-#endif
-}
-#endif
-
static inline int is_highmem_idx(enum zone_type idx)
{
#ifdef CONFIG_HIGHMEM
return (idx == ZONE_HIGHMEM ||
- (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
+ (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
#else
return 0;
#endif
@@ -1029,7 +1049,7 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
size_t *, loff_t *);
-int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
+int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
@@ -1037,21 +1057,21 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int numa_zonelist_order_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
-extern int percpu_pagelist_fraction;
+extern int percpu_pagelist_high_fraction;
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
extern struct pglist_data contig_page_data;
#define NODE_DATA(nid) (&contig_page_data)
#define NODE_MEM_MAP(nid) mem_map
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
#include <asm/mmzone.h>
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
@@ -1200,8 +1220,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#ifdef CONFIG_SPARSEMEM
/*
- * SECTION_SHIFT #bits space required to store a section #
- *
* PA_SECTION_SHIFT physical address to/from section number
* PFN_SECTION_SHIFT pfn to/from section number
*/
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 7d45b5f989b0..8e291cfdaf06 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -447,6 +447,7 @@ struct hv_vmbus_device_id {
struct rpmsg_device_id {
char name[RPMSG_NAME_SIZE];
+ kernel_ulong_t driver_data;
};
/* i2c */
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 6bb92f26833e..6988956b8492 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -170,6 +170,28 @@ struct spinand_op;
struct spinand_device;
#define SPINAND_MAX_ID_LEN 4
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
/**
* struct spinand_id - SPI NAND id structure
diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h
index f41387a8969f..41f24b5241ab 100644
--- a/include/linux/net/intel/i40e_client.h
+++ b/include/linux/net/intel/i40e_client.h
@@ -4,6 +4,8 @@
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
+#include <linux/auxiliary_bus.h>
+
#define I40E_CLIENT_STR_LENGTH 10
/* Client interface version should be updated anytime there is a change in the
@@ -48,7 +50,7 @@ struct i40e_qv_info {
struct i40e_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[1];
+ struct i40e_qv_info qv_info[];
};
@@ -78,6 +80,7 @@ struct i40e_info {
u8 lanmac[6];
struct net_device *netdev;
struct pci_dev *pcidev;
+ struct auxiliary_device *aux_dev;
u8 __iomem *hw_addr;
u8 fid; /* function id, PF id or VF id */
#define I40E_CLIENT_FTYPE_PF 0
@@ -100,6 +103,11 @@ struct i40e_info {
u32 fw_build; /* firmware build number */
};
+struct i40e_auxiliary_device {
+ struct auxiliary_device aux_dev;
+ struct i40e_info *ldev;
+};
+
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
@@ -187,6 +195,8 @@ static inline bool i40e_client_is_registered(struct i40e_client *client)
return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
}
+void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client);
+void i40e_client_device_unregister(struct i40e_info *ldev);
/* used by clients */
int i40e_register_client(struct i40e_client *client);
int i40e_unregister_client(struct i40e_client *client);
diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h
new file mode 100644
index 000000000000..e32f6712aee0
--- /dev/null
+++ b/include/linux/net/intel/iidc.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _IIDC_H_
+#define _IIDC_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/dcbnl.h>
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+enum iidc_event_type {
+ IIDC_EVENT_BEFORE_MTU_CHANGE,
+ IIDC_EVENT_AFTER_MTU_CHANGE,
+ IIDC_EVENT_BEFORE_TC_CHANGE,
+ IIDC_EVENT_AFTER_TC_CHANGE,
+ IIDC_EVENT_CRIT_ERR,
+ IIDC_EVENT_NBITS /* must be last */
+};
+
+enum iidc_reset_type {
+ IIDC_PFR,
+ IIDC_CORER,
+ IIDC_GLOBR,
+};
+
+#define IIDC_MAX_USER_PRIORITY 8
+
+/* Struct to hold per RDMA Qset info */
+struct iidc_rdma_qset_params {
+ /* Qset TEID returned to the RDMA driver in
+ * ice_add_rdma_qset and used by RDMA driver
+ * for calls to ice_del_rdma_qset
+ */
+ u32 teid; /* Qset TEID */
+ u16 qs_handle; /* RDMA driver provides this */
+ u16 vport_id; /* VSI index */
+ u8 tc; /* TC branch the Qset should belong to */
+};
+
+struct iidc_qos_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+/* Struct to pass QoS info */
+struct iidc_qos_params {
+ struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
+ u8 up2tc[IIDC_MAX_USER_PRIORITY];
+ u8 vport_relative_bw;
+ u8 vport_priority_type;
+ u8 num_tc;
+};
+
+struct iidc_event {
+ DECLARE_BITMAP(type, IIDC_EVENT_NBITS);
+ u32 reg;
+};
+
+struct ice_pf;
+
+int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
+int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
+int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
+int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
+void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
+
+#define IIDC_RDMA_ROCE_NAME "roce"
+
+/* Structure representing auxiliary driver tailored information about the core
+ * PCI dev, each auxiliary driver using the IIDC interface will have an
+ * instance of this struct dedicated to it.
+ */
+
+struct iidc_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct ice_pf *pf;
+};
+
+/* structure representing the auxiliary driver. This struct is to be
+ * allocated and populated by the auxiliary driver's owner. The core PCI
+ * driver will access these ops by performing a container_of on the
+ * auxiliary_device->dev.driver.
+ */
+struct iidc_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ /* This event_handler is meant to be a blocking call. For instance,
+ * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not
+ * return until the auxiliary driver is ready for the MTU change to
+ * happen.
+ */
+ void (*event_handler)(struct ice_pf *pf, struct iidc_event *event);
+};
+
+#endif /* _IIDC_H_*/
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 3de38d6a0aea..2c6b9e416225 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -93,7 +93,7 @@ enum {
/*
* Add your fresh new feature above and remember to update
- * netdev_features_strings[] in net/core/ethtool.c and maybe
+ * netdev_features_strings[] in net/ethtool/common.c and maybe
* some feature mask #defines below. Please also describe it
* in Documentation/networking/netdev-features.rst.
*/
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5cbc950b34df..eaf5bb008aa9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4114,7 +4114,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
return NET_RX_DROP;
}
- skb_scrub_packet(skb, true);
+ skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
skb->priority = 0;
return 0;
}
@@ -4187,8 +4187,8 @@ unsigned long dev_trans_start(struct net_device *dev);
void __netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
-
void netif_carrier_off(struct net_device *dev);
+void netif_carrier_event(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index f0f3a8354c3c..3fda1a508733 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -65,8 +65,8 @@ struct nf_hook_ops;
struct sock;
struct nf_hook_state {
- unsigned int hook;
- u_int8_t pf;
+ u8 hook;
+ u8 pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
@@ -77,12 +77,18 @@ struct nf_hook_state {
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
+enum nf_hook_ops_type {
+ NF_HOOK_OP_UNDEFINED,
+ NF_HOOK_OP_NF_TABLES,
+};
+
struct nf_hook_ops {
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
- u_int8_t pf;
+ u8 pf;
+ enum nf_hook_ops_type hook_ops_type:8;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 515ce53aa20d..241e005f290a 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -11,6 +11,7 @@ struct nfnl_info {
struct net *net;
struct sock *sk;
const struct nlmsghdr *nlh;
+ const struct nfgenmsg *nfmsg;
struct netlink_ext_ack *extack;
};
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 07c6ad8f2a02..28d7027cd460 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -36,8 +36,8 @@ struct xt_action_param {
const void *matchinfo, *targinfo;
};
const struct nf_hook_state *state;
- int fragoff;
unsigned int thoff;
+ u16 fragoff;
bool hotdrop;
};
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index edcbd60b88b9..b7c4c4130b65 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -636,8 +636,8 @@ struct nvme_lba_range_type {
__u8 type;
__u8 attributes;
__u8 rsvd2[14];
- __u64 slba;
- __u64 nlb;
+ __le64 slba;
+ __le64 nlb;
__u8 guid[16];
__u8 rsvd48[16];
};
@@ -944,6 +944,13 @@ struct nvme_zone_mgmt_recv_cmd {
enum {
NVME_ZRA_ZONE_REPORT = 0,
NVME_ZRASF_ZONE_REPORT_ALL = 0,
+ NVME_ZRASF_ZONE_STATE_EMPTY = 0x01,
+ NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02,
+ NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03,
+ NVME_ZRASF_ZONE_STATE_CLOSED = 0x04,
+ NVME_ZRASF_ZONE_STATE_READONLY = 0x05,
+ NVME_ZRASF_ZONE_STATE_FULL = 0x06,
+ NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07,
NVME_REPORT_ZONE_PARTIAL = 1,
};
@@ -1504,6 +1511,7 @@ enum {
NVME_SC_NS_WRITE_PROTECTED = 0x20,
NVME_SC_CMD_INTERRUPTED = 0x21,
NVME_SC_TRANSIENT_TR_ERR = 0x22,
+ NVME_SC_INVALID_IO_CMD_SET = 0x2C,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 2b05e7f7c238..da633d34ab86 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -72,6 +72,13 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
return mdiobus_register(mdio);
}
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return devm_mdiobus_register(dev, mdio);
+}
+
static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
{
return NULL;
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 461b7aa587ba..3d8db1f6a5db 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -54,6 +54,10 @@ enum OID {
OID_md4, /* 1.2.840.113549.2.4 */
OID_md5, /* 1.2.840.113549.2.5 */
+ OID_mskrb5, /* 1.2.840.48018.1.2.2 */
+ OID_krb5, /* 1.2.840.113554.1.2.2 */
+ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */
+
/* Microsoft Authenticode & Software Publishing */
OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */
@@ -62,6 +66,10 @@ enum OID {
OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
+ OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+
+ OID_spnego, /* 1.3.6.1.5.5.2 */
+
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h
new file mode 100644
index 000000000000..861e606b820f
--- /dev/null
+++ b/include/linux/once_lite.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ONCE_LITE_H
+#define _LINUX_ONCE_LITE_H
+
+#include <linux/types.h>
+
+/* Call a function once. Similar to DO_ONCE(), but does not use jump label
+ * patching via static keys.
+ */
+#define DO_ONCE_LITE(func, ...) \
+ DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__)
+#define DO_ONCE_LITE_IF(condition, func, ...) \
+ ({ \
+ static bool __section(".data.once") __already_done; \
+ bool __ret_do_once = !!(condition); \
+ \
+ if (unlikely(__ret_do_once && !__already_done)) { \
+ __already_done = true; \
+ func(__VA_ARGS__); \
+ } \
+ unlikely(__ret_do_once); \
+ })
+
+#endif /* _LINUX_ONCE_LITE_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 04a34c08e0a6..458696550028 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -138,6 +138,9 @@ enum pageflags {
#ifdef CONFIG_64BIT
PG_arch_2,
#endif
+#ifdef CONFIG_KASAN_HW_TAGS
+ PG_skip_kasan_poison,
+#endif
__NR_PAGEFLAGS,
/* Filesystems */
@@ -177,17 +180,17 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
-struct page; /* forward declaration */
-
-static inline struct page *compound_head(struct page *page)
+static inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
if (unlikely(head & 1))
- return (struct page *) (head - 1);
- return page;
+ return head - 1;
+ return (unsigned long)page;
}
+#define compound_head(page) ((typeof(page))_compound_head(page))
+
static __always_inline int PageTail(struct page *page)
{
return READ_ONCE(page->compound_head) & 1;
@@ -443,6 +446,12 @@ TESTCLEARFLAG(Young, young, PF_ANY)
PAGEFLAG(Idle, idle, PF_ANY)
#endif
+#ifdef CONFIG_KASAN_HW_TAGS
+PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
+#else
+PAGEFLAG_FALSE(SkipKASanPoison)
+#endif
+
/*
* PageReported() is used to track reported free pages within the Buddy
* allocator. We can use the non-atomic version of the test and set
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 3468794f83d2..719bfe5108c5 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page,
extern void __split_page_owner(struct page *page, unsigned int nr);
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
-extern void __dump_page_owner(struct page *page);
+extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
@@ -46,7 +46,7 @@ static inline void set_page_owner_migrate_reason(struct page *page, int reason)
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner_migrate_reason(page, reason);
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
if (static_branch_unlikely(&page_owner_inited))
__dump_page_owner(page);
@@ -69,7 +69,7 @@ static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
}
#endif /* CONFIG_PAGE_OWNER */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index f3318f34fc54..7ad46f45df39 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -62,12 +62,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
#endif
-static inline int page_ref_count(struct page *page)
+static inline int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
-static inline int page_count(struct page *page)
+static inline int page_count(const struct page *page)
{
return atomic_read(&compound_head(page)->_refcount);
}
diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h
index 3b99e0ec24f2..fe648dfa3a7c 100644
--- a/include/linux/page_reporting.h
+++ b/include/linux/page_reporting.h
@@ -18,6 +18,9 @@ struct page_reporting_dev_info {
/* Current state of page reporting */
atomic_t state;
+
+ /* Minimal order of page reporting */
+ unsigned int order;
};
/* Tear-down and bring-up for page reporting devices */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index fff52ad370c1..973fd731a520 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -54,7 +54,7 @@ extern unsigned int pageblock_order;
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(struct page *page,
+unsigned long get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long mask);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e89df447fae3..ed02aa522263 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -516,7 +516,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
}
/*
- * Get index of the page with in radix-tree
+ * Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_index(struct page *page)
@@ -535,15 +535,16 @@ static inline pgoff_t page_to_index(struct page *page)
return pgoff;
}
+extern pgoff_t hugetlb_basepage_index(struct page *page);
+
/*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
+ * Get the offset in PAGE_SIZE (even for hugetlb pages).
+ * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
- if (unlikely(PageHeadHuge(page)))
- return page->index << compound_order(page);
-
+ if (unlikely(PageHuge(page)))
+ return hugetlb_basepage_index(page);
return page_to_index(page);
}
@@ -701,6 +702,10 @@ int wait_on_page_writeback_killable(struct page *page);
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
+void __set_page_dirty(struct page *, struct address_space *, int warn);
+int __set_page_dirty_nobuffers(struct page *page);
+int __set_page_dirty_no_writeback(struct page *page);
+
void page_endio(struct page *page, bool is_write, int err);
/**
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c20211e59a57..24306504226a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2344,6 +2344,7 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
struct device_node;
struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2351,6 +2352,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
#endif /* CONFIG_OF */
static inline struct device_node *
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4c3fa5293d76..4bac1831de80 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -555,6 +555,7 @@
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -631,6 +632,8 @@
#define PCI_DEVICE_ID_DELL_RAC4 0x0012
#define PCI_DEVICE_ID_DELL_PERC5 0x0015
+#define PCI_SUBVENDOR_ID_DELL 0x1028
+
#define PCI_VENDOR_ID_MATROX 0x102B
#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
#define PCI_DEVICE_ID_MATROX_MIL 0x0519
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index 2cb5188a7ef1..add077a81b21 100644
--- a/include/linux/pcs/pcs-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -10,37 +10,33 @@
#include <linux/phy.h>
#include <linux/phylink.h>
+#define NXP_SJA1105_XPCS_ID 0x00000010
+#define NXP_SJA1110_XPCS_ID 0x00000020
+
/* AN mode */
#define DW_AN_C73 1
#define DW_AN_C37_SGMII 2
+#define DW_2500BASEX 3
-struct mdio_xpcs_args {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- struct mii_bus *bus;
- int addr;
- int an_mode;
-};
+struct xpcs_id;
-struct mdio_xpcs_ops {
- int (*validate)(struct mdio_xpcs_args *xpcs,
- unsigned long *supported,
- struct phylink_link_state *state);
- int (*config)(struct mdio_xpcs_args *xpcs,
- const struct phylink_link_state *state);
- int (*get_state)(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state);
- int (*link_up)(struct mdio_xpcs_args *xpcs, int speed,
- phy_interface_t interface);
- int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface);
+struct dw_xpcs {
+ struct mdio_device *mdiodev;
+ const struct xpcs_id *id;
+ struct phylink_pcs pcs;
};
-#if IS_ENABLED(CONFIG_PCS_XPCS)
-struct mdio_xpcs_ops *mdio_xpcs_get_ops(void);
-#else
-static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
-{
- return NULL;
-}
-#endif
+int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
+void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
+int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ unsigned int mode);
+void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported,
+ struct phylink_link_state *state);
+int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
+ int enable);
+struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
+ phy_interface_t interface);
+void xpcs_destroy(struct dw_xpcs *xpcs);
#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f5a6a2f069ed..2d510ad750ed 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1576,6 +1576,12 @@ static struct perf_pmu_events_attr _var = { \
.event_str = _str, \
};
+#define PMU_EVENT_ATTR_ID(_name, _show, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, _show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
#define PMU_FORMAT_ATTR(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 46b13780c2c8..c32600c9e1ad 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
* where software maintains page access bit.
*/
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+ return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
#ifndef pte_savedwrite
#define pte_savedwrite pte_write
#endif
@@ -1584,4 +1592,26 @@ typedef unsigned int pgtbl_mod_mask;
#define pte_leaf_size(x) PAGE_SIZE
#endif
+/*
+ * Some architectures have MMUs that are configurable or selectable at boot
+ * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
+ * helps to have a static maximum value.
+ */
+
+#ifndef MAX_PTRS_PER_PTE
+#define MAX_PTRS_PER_PTE PTRS_PER_PTE
+#endif
+
+#ifndef MAX_PTRS_PER_PMD
+#define MAX_PTRS_PER_PMD PTRS_PER_PMD
+#endif
+
+#ifndef MAX_PTRS_PER_PUD
+#define MAX_PTRS_PER_PUD PTRS_PER_PUD
+#endif
+
+#ifndef MAX_PTRS_PER_P4D
+#define MAX_PTRS_PER_P4D PTRS_PER_P4D
+#endif
+
#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 60d2b26026a2..3b80dc3ed68b 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -93,6 +93,7 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
* @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
* @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface
+ * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role
* @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface
* @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay
* @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
@@ -111,6 +112,7 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
* @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
* @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
+ * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR
* @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII
* @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
* @PHY_INTERFACE_MODE_MAX: Book keeping
@@ -126,6 +128,7 @@ typedef enum {
PHY_INTERFACE_MODE_TBI,
PHY_INTERFACE_MODE_REVMII,
PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_REVRMII,
PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_RGMII_ID,
PHY_INTERFACE_MODE_RGMII_RXID,
@@ -145,6 +148,7 @@ typedef enum {
PHY_INTERFACE_MODE_XAUI,
/* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_25GBASER,
PHY_INTERFACE_MODE_USXGMII,
/* 10GBASE-KR - with Clause 73 AN */
PHY_INTERFACE_MODE_10GKR,
@@ -185,6 +189,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "rev-mii";
case PHY_INTERFACE_MODE_RMII:
return "rmii";
+ case PHY_INTERFACE_MODE_REVRMII:
+ return "rev-rmii";
case PHY_INTERFACE_MODE_RGMII:
return "rgmii";
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -219,6 +225,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "xaui";
case PHY_INTERFACE_MODE_10GBASER:
return "10gbase-r";
+ case PHY_INTERFACE_MODE_25GBASER:
+ return "25gbase-r";
case PHY_INTERFACE_MODE_USXGMII:
return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
@@ -496,6 +504,11 @@ struct macsec_ops;
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
+ * Bits [15:0] are free to use by the PHY driver to communicate
+ * driver specific behavior.
+ * Bits [23:16] are currently reserved for future use.
+ * Bits [31:24] are reserved for defining generic
+ * PHY driver behavior.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
* @phy_timer: The timer for handling the state machine
* @phylink: Pointer to phylink instance for this PHY
@@ -1368,10 +1381,42 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
#if IS_ENABLED(CONFIG_PHYLIB)
+int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id);
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode);
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
+struct phy_device *device_phy_find_device(struct device *dev);
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
void phy_device_free(struct phy_device *phydev);
#else
+static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
+{
+ return 0;
+}
+static inline
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
+{
+ return 0;
+}
+
+static inline
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
+{
+ return NULL;
+}
+
+static inline struct phy_device *device_phy_find_device(struct device *dev)
+{
+ return NULL;
+}
+
+static inline
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
static inline
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index fd2acfd9b597..afb3ded0b691 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -441,6 +441,9 @@ void phylink_destroy(struct phylink *);
int phylink_connect_phy(struct phylink *, struct phy_device *);
int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
+int phylink_fwnode_phy_connect(struct phylink *pl,
+ struct fwnode_handle *fwnode,
+ u32 flags);
void phylink_disconnect_phy(struct phylink *);
void phylink_mac_change(struct phylink *, bool up);
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index e18ab3d5908f..5a96602a3316 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -89,7 +89,7 @@ struct pinctrl_map;
* it.
* @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
* value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/driver-api/pinctl.rst,
+ * indicate low level. (Please see Documentation/driver-api/pin-control.rst,
* section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
diff --git a/include/linux/platform_data/spi-ath79.h b/include/linux/platform_data/spi-ath79.h
deleted file mode 100644
index 81a388ff58cc..000000000000
--- a/include/linux/platform_data/spi-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#ifndef _ATH79_SPI_PLATFORM_H
-#define _ATH79_SPI_PLATFORM_H
-
-struct ath79_spi_platform_data {
- unsigned bus_num;
- unsigned num_chipselect;
-};
-
-#endif /* _ATH79_SPI_PLATFORM_H */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index fafc1beea504..9837fb011f2f 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -50,6 +50,7 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
#define SYSC_QUIRK_GPMC_DEBUG BIT(26)
#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
#define SYSC_MODULE_QUIRK_PRUSS BIT(24)
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
index a6329003aee7..e5cbb6841f3a 100644
--- a/include/linux/platform_profile.h
+++ b/include/linux/platform_profile.h
@@ -2,7 +2,7 @@
/*
* Platform profile sysfs interface
*
- * See Documentation/ABI/testing/sysfs-platform_profile.rst for more
+ * See Documentation/userspace-api/sysfs-platform_profile.rst for more
* information.
*/
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index dfcfbcecc34b..21a0577305ef 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -198,6 +198,7 @@ struct generic_pm_domain_data {
struct notifier_block *power_nb;
int cpu;
unsigned int performance_state;
+ unsigned int rpm_pstate;
ktime_t next_wakeup;
void *data;
};
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 6c08a085367b..aab8b35e9f8a 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -380,6 +380,9 @@ static inline int pm_runtime_get(struct device *dev)
* The possible return values of this function are the same as for
* pm_runtime_resume() and the runtime PM usage counter of @dev remains
* incremented in all cases, even if it returns an error code.
+ * Consider using pm_runtime_resume_and_get() instead of it, especially
+ * if its return value is checked by the caller, as this is likely to result
+ * in cleaner code.
*/
static inline int pm_runtime_get_sync(struct device *dev)
{
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index 12cbbf305969..fa9f08164c36 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -43,6 +43,36 @@
*/
#define PMBUS_NO_CAPABILITY BIT(2)
+/*
+ * PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+ *
+ * Some PMBus chips end up in an undefined state when trying to read an
+ * unsupported register. For such chips, it is necessary to reset the
+ * chip pmbus controller to a known state after a failed register check.
+ * This can be done by reading a known register. By setting this flag the
+ * driver will try to read the STATUS register after each failed
+ * register check. This read may fail, but it will put the chip in a
+ * known state.
+ */
+#define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3)
+
+/*
+ * PMBUS_NO_WRITE_PROTECT
+ *
+ * Some PMBus chips respond with invalid data when reading the WRITE_PROTECT
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use the WRITE_PROTECT command to determine its behavior.
+ */
+#define PMBUS_NO_WRITE_PROTECT BIT(4)
+
+/*
+ * PMBUS_USE_COEFFICIENTS_CMD
+ *
+ * When this flag is set the PMBus core driver will use the COEFFICIENTS
+ * register to initialize the coefficients for the direct mode format.
+ */
+#define PMBUS_USE_COEFFICIENTS_CMD BIT(5)
+
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index aff1c9250c82..d62ef5a6b4e9 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -78,4 +78,7 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
+/********** net/core/page_pool.c **********/
+#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
+
#endif
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index bbf4b4ad61df..056d31317e49 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -111,7 +111,7 @@ static inline u32 __seed(u32 x, u32 m)
*/
static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
{
- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
state->s1 = __seed(i, 2U);
state->s2 = __seed(i, 8U);
diff --git a/include/linux/printk.h b/include/linux/printk.h
index fe7eb2351610..e834d78f0478 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/ratelimit_types.h>
+#include <linux/once_lite.h>
extern const char linux_banner[];
extern const char linux_proc_banner[];
@@ -206,6 +207,7 @@ void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
+extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
extern void printk_safe_flush(void);
extern void printk_safe_flush_on_panic(void);
@@ -269,6 +271,10 @@ static inline void show_regs_print_info(const char *log_lvl)
{
}
+static inline void dump_stack_lvl(const char *log_lvl)
+{
+}
+
static inline void dump_stack(void)
{
}
@@ -282,6 +288,47 @@ static inline void printk_safe_flush_on_panic(void)
}
#endif
+#ifdef CONFIG_SMP
+extern int __printk_cpu_trylock(void);
+extern void __printk_wait_on_cpu_lock(void);
+extern void __printk_cpu_unlock(void);
+
+/**
+ * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
+ * lock and disable interrupts.
+ * @flags: Stack-allocated storage for saving local interrupt state,
+ * to be passed to printk_cpu_unlock_irqrestore().
+ *
+ * If the lock is owned by another CPU, spin until it becomes available.
+ * Interrupts are restored while spinning.
+ */
+#define printk_cpu_lock_irqsave(flags) \
+ for (;;) { \
+ local_irq_save(flags); \
+ if (__printk_cpu_trylock()) \
+ break; \
+ local_irq_restore(flags); \
+ __printk_wait_on_cpu_lock(); \
+ }
+
+/**
+ * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
+ * lock and restore interrupts.
+ * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
+ */
+#define printk_cpu_unlock_irqrestore(flags) \
+ do { \
+ __printk_cpu_unlock(); \
+ local_irq_restore(flags); \
+ } while (0) \
+
+#else
+
+#define printk_cpu_lock_irqsave(flags) ((void)flags)
+#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
+
+#endif /* CONFIG_SMP */
+
extern int kptr_restrict;
/**
@@ -436,27 +483,9 @@ extern int kptr_restrict;
#ifdef CONFIG_PRINTK
#define printk_once(fmt, ...) \
-({ \
- static bool __section(".data.once") __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__)
#define printk_deferred_once(fmt, ...) \
-({ \
- static bool __section(".data.once") __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk_deferred(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__)
#else
#define printk_once(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
new file mode 100644
index 000000000000..24da8364b919
--- /dev/null
+++ b/include/linux/prmt.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef CONFIG_ACPI_PRMT
+void init_prmt(void);
+#else
+static inline void init_prmt(void) { }
+#endif
diff --git a/include/linux/property.h b/include/linux/property.h
index 0d876316e61d..073e680c35e2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -119,7 +119,7 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev,
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
-int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index);
+int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
unsigned int device_get_child_node_count(struct device *dev);
diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h
index 99564f93d774..924ca07aafbd 100644
--- a/include/linux/pstore_blk.h
+++ b/include/linux/pstore_blk.h
@@ -10,36 +10,15 @@
/**
* struct pstore_device_info - back-end pstore/blk driver structure.
*
- * @total_size: The total size in bytes pstore/blk can use. It must be greater
- * than 4096 and be multiple of 4096.
* @flags: Refer to macro starting with PSTORE_FLAGS defined in
* linux/pstore.h. It means what front-ends this device support.
* Zero means all backends for compatible.
- * @read: The general read operation. Both of the function parameters
- * @size and @offset are relative value to bock device (not the
- * whole disk).
- * On success, the number of bytes should be returned, others
- * means error.
- * @write: The same as @read, but the following error number:
- * -EBUSY means try to write again later.
- * -ENOMSG means to try next zone.
- * @erase: The general erase operation for device with special removing
- * job. Both of the function parameters @size and @offset are
- * relative value to storage.
- * Return 0 on success and others on failure.
- * @panic_write:The write operation only used for panic case. It's optional
- * if you do not care panic log. The parameters are relative
- * value to storage.
- * On success, the number of bytes should be returned, others
- * excluding -ENOMSG mean error. -ENOMSG means to try next zone.
+ * @zone: The struct pstore_zone_info details.
+ *
*/
struct pstore_device_info {
- unsigned long total_size;
unsigned int flags;
- pstore_zone_read_op read;
- pstore_zone_write_op write;
- pstore_zone_erase_op erase;
- pstore_zone_write_op panic_write;
+ struct pstore_zone_info zone;
};
int register_pstore_device(struct pstore_device_info *dev);
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 0d47fd33b228..aba237c0b3a2 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -186,6 +186,32 @@ struct ptp_clock_event {
};
};
+/**
+ * scaled_ppm_to_ppb() - convert scaled ppm to ppb
+ *
+ * @ppm: Parts per million, but with a 16 bit binary fractional field
+ */
+static inline long scaled_ppm_to_ppb(long ppm)
+{
+ /*
+ * The 'freq' field in the 'struct timex' is in parts per
+ * million, but with a 16 bit binary fractional field.
+ *
+ * We want to calculate
+ *
+ * ppb = scaled_ppm * 1000 / 2^16
+ *
+ * which simplifies to
+ *
+ * ppb = scaled_ppm * 125 / 2^13
+ */
+ s64 ppb = 1 + ppm;
+
+ ppb *= 125;
+ ppb >>= 13;
+ return (long)ppb;
+}
+
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
/**
@@ -230,14 +256,6 @@ extern void ptp_clock_event(struct ptp_clock *ptp,
extern int ptp_clock_index(struct ptp_clock *ptp);
/**
- * scaled_ppm_to_ppb() - convert scaled ppm to ppb
- *
- * @ppm: Parts per million, but with a 16 bit binary fractional field
- */
-
-extern s32 scaled_ppm_to_ppb(long ppm);
-
-/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
*
* The caller must hold ptp_clock::pincfg_mux. Drivers do not have
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 7f73b26ed22e..a3fec2de512f 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This driver supports the following PXA CPU/SSP ports:-
*
@@ -11,8 +11,8 @@
* PXA3xx SSP1, SSP2, SSP3, SSP4
*/
-#ifndef __LINUX_SSP_H
-#define __LINUX_SSP_H
+#ifndef __LINUX_PXA2XX_SSP_H
+#define __LINUX_PXA2XX_SSP_H
#include <linux/bits.h>
#include <linux/compiler_types.h>
@@ -38,7 +38,6 @@ struct device_node;
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
-#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
@@ -60,7 +59,7 @@ struct device_node;
/* PXA27x, PXA3xx */
#define SSCR0_EDSS BIT(20) /* Extended data size select */
#define SSCR0_NCS BIT(21) /* Network clock select */
-#define SSCR0_RIM BIT(22) /* Receive FIFO overrrun interrupt mask */
+#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */
#define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */
#define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */
#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
@@ -105,6 +104,9 @@ struct device_node;
#define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */
#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
+/* Intel Quark X1000 */
+#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */
+
/* QUARK_X1000 SSCR0 bit definition */
#define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */
#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
@@ -124,7 +126,7 @@ struct device_node;
#define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */
#define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */
-/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
+/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
@@ -181,6 +183,21 @@ struct device_node;
#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */
+/* Intel Merrifield SSP */
+#define SFIFOL 0x68 /* FIFO level */
+#define SFIFOTT 0x6c /* FIFO trigger threshold */
+
+#define RX_THRESH_MRFLD_DFLT 16
+#define TX_THRESH_MRFLD_DFLT 16
+
+#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */
+#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */
+
+#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */
+#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */
+#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */
+#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */
+
/* LPSS SSP */
#define SSITF 0x44 /* TX FIFO trigger level */
#define SSITF_TxHiThresh(x) (((x) - 1) << 0)
@@ -203,8 +220,10 @@ enum pxa_ssp_type {
MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
+ MRFLD_SSP,
QUARK_X1000_SSP,
- LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_LPT_SSP,
LPSS_BYT_SSP,
LPSS_BSW_SSP,
LPSS_SPT_SSP,
@@ -252,6 +271,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
return __raw_readl(dev->mmio_base + reg);
}
+static inline void pxa_ssp_enable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
+static inline void pxa_ssp_disable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
#if IS_ENABLED(CONFIG_PXA_SSP)
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
@@ -270,4 +305,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
-#endif
+#endif /* __LINUX_PXA2XX_SSP_H */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 977807e1be53..0a3807e927c5 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -702,7 +702,7 @@ enum mf_mode {
/* Per-protocol connection types */
enum protocol_type {
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
PROTOCOLID_FCOE,
PROTOCOLID_ROCE,
PROTOCOLID_CORE,
diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h
new file mode 100644
index 000000000000..5a2ab0606308
--- /dev/null
+++ b/include/linux/qed/nvmetcp_common.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef __NVMETCP_COMMON__
+#define __NVMETCP_COMMON__
+
+#include "tcp_common.h"
+#include <linux/nvme-tcp.h>
+
+#define NVMETCP_SLOW_PATH_LAYER_CODE (6)
+#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf)
+
+/* NVMeTCP firmware function init parameters */
+struct nvmetcp_spe_func_init {
+ __le16 half_way_close_timeout;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 ll2_rx_queue_id;
+ u8 flags;
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2
+ u8 debug_flags;
+ __le16 reserved1;
+ u8 params;
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT 4
+ u8 reserved2[5];
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+};
+
+/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */
+struct nvmetcp_init_ramrod_params {
+ struct nvmetcp_spe_func_init nvmetcp_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+/* NVMeTCP Ramrod Command IDs */
+enum nvmetcp_ramrod_cmd_id {
+ NVMETCP_RAMROD_CMD_ID_UNUSED = 0,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ MAX_NVMETCP_RAMROD_CMD_ID
+};
+
+struct nvmetcp_glbl_queue_entry {
+ struct regpair cq_pbl_addr;
+ struct regpair reserved;
+};
+
+/* NVMeTCP conn level EQEs */
+enum nvmetcp_eqe_opcode {
+ NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */
+ NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */
+ NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */
+ NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */
+ NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */
+ NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */
+ NVMETCP_EVENT_TYPE_RESERVED0,
+ NVMETCP_EVENT_TYPE_RESERVED1,
+ NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */
+ NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */
+ MAX_NVMETCP_EQE_OPCODE
+};
+
+struct nvmetcp_conn_offload_section {
+ struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */
+ __le16 cccid_max_range; /* CCCID max value - used for validation */
+ __le16 reserved[3];
+};
+
+/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */
+struct nvmetcp_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4
+ u8 default_cq;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 initial_ack;
+
+ struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */
+};
+
+/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */
+struct nvmetcp_spe_conn_offload {
+ __le16 reserved;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct nvmetcp_conn_offload_params nvmetcp;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */
+struct nvmetcp_conn_update_ramrod_params {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 flags;
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7
+ u8 reserved3[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 reserved4[5];
+};
+
+/* NVMeTCP connection termination request */
+struct nvmetcp_spe_conn_termination {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 abortive;
+ u8 reserved2[7];
+ struct regpair reserved3;
+ struct regpair reserved4;
+};
+
+struct nvmetcp_dif_flags {
+ u8 flags;
+};
+
+enum nvmetcp_wqe_type {
+ NVMETCP_WQE_TYPE_NORMAL,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH,
+ NVMETCP_WQE_TYPE_IC,
+ MAX_NVMETCP_WQE_TYPE
+};
+
+struct nvmetcp_wqe {
+ __le16 task_id;
+ u8 flags;
+#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */
+#define NVMETCP_WQE_WQE_TYPE_SHIFT 0
+#define NVMETCP_WQE_NUM_SGES_MASK 0xF
+#define NVMETCP_WQE_NUM_SGES_SHIFT 3
+#define NVMETCP_WQE_RESPONSE_MASK 0x1
+#define NVMETCP_WQE_RESPONSE_SHIFT 7
+ struct nvmetcp_dif_flags prot_flags;
+ __le32 contlen_cdbsize;
+#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF
+#define NVMETCP_WQE_CONT_LEN_SHIFT 0
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24
+};
+
+struct nvmetcp_host_cccid_itid_entry {
+ __le16 itid;
+};
+
+struct nvmetcp_connect_done_results {
+ __le16 icid;
+ __le16 conn_id;
+ struct tcp_ulp_connect_done_params params;
+};
+
+struct nvmetcp_eqe_data {
+ __le16 icid;
+ __le16 conn_id;
+ __le16 reserved;
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1
+#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7
+};
+
+enum nvmetcp_task_type {
+ NVMETCP_TASK_TYPE_HOST_WRITE,
+ NVMETCP_TASK_TYPE_HOST_READ,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST,
+ NVMETCP_TASK_TYPE_RESERVED0,
+ NVMETCP_TASK_TYPE_CLEANUP,
+ NVMETCP_TASK_TYPE_HOST_READ_NO_CQE,
+ MAX_NVMETCP_TASK_TYPE
+};
+
+struct nvmetcp_db_data {
+ u8 params;
+#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */
+#define NVMETCP_DB_DATA_DEST_SHIFT 0
+#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2
+#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4
+#define NVMETCP_DB_DATA_RESERVED_MASK 0x1
+#define NVMETCP_DB_DATA_RESERVED_SHIFT 5
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */
+ __le16 sq_prod;
+};
+
+struct nvmetcp_fw_nvmf_cqe {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_icresp_mdata {
+ u8 digest;
+ u8 cpda;
+ __le16 pfv;
+ __le32 maxdata;
+ __le16 rsvd[4];
+};
+
+union nvmetcp_fw_cqe_data {
+ struct nvmetcp_fw_nvmf_cqe nvme_cqe;
+ struct nvmetcp_icresp_mdata icresp_mdata;
+};
+
+struct nvmetcp_fw_cqe {
+ __le16 conn_id;
+ u8 cqe_type;
+ u8 cqe_error_status_bits;
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+ __le16 itid;
+ u8 task_type;
+ u8 fw_dbg_field;
+ u8 caused_conn_err;
+ u8 reserved0[3];
+ __le32 reserved1;
+ union nvmetcp_fw_cqe_data cqe_data;
+ struct regpair task_opaque;
+ __le32 reserved[6];
+};
+
+enum nvmetcp_fw_cqes_type {
+ NVMETCP_FW_CQE_TYPE_NORMAL = 1,
+ NVMETCP_FW_CQE_TYPE_RESERVED0,
+ NVMETCP_FW_CQE_TYPE_RESERVED1,
+ NVMETCP_FW_CQE_TYPE_CLEANUP,
+ NVMETCP_FW_CQE_TYPE_DUMMY,
+ MAX_NVMETCP_FW_CQES_TYPE
+};
+
+struct ystorm_nvmetcp_task_state {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 resrved0;
+ __le32 buffer_offset;
+ __le16 cccid;
+ struct nvmetcp_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3
+};
+
+struct ystorm_nvmetcp_task_rxmit_opt {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_task_hdr {
+ __le32 reg[18];
+};
+
+struct nvmetcp_task_hdr_aligned {
+ struct nvmetcp_task_hdr task_hdr;
+ __le32 reserved[2]; /* HSI_COMMENT: Align to QREG */
+};
+
+struct e5_tdif_task_context {
+ __le32 reserved[16];
+};
+
+struct e5_rdif_task_context {
+ __le32 reserved[12];
+};
+
+struct ystorm_nvmetcp_task_st_ctx {
+ struct ystorm_nvmetcp_task_state state;
+ struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt;
+ struct nvmetcp_task_hdr_aligned pdu_hdr;
+};
+
+struct mstorm_nvmetcp_task_st_ctx {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
+ u8 task_type;
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 dif_task_icid;
+ struct regpair reserved0;
+ __le32 expected_itt;
+ __le32 reserved1;
+};
+
+struct ustorm_nvmetcp_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair reserved0;
+ __le32 reg1_map;
+#define REG1_NUM_SGES_MASK 0xF
+#define REG1_NUM_SGES_SHIFT 0
+#define REG1_RESERVED1_MASK 0xFFFFFFF
+#define REG1_RESERVED1_SHIFT 4
+ u8 flags2;
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 reserved3;
+ __le16 tqe_opaque[2];
+ __le32 reserved5;
+ __le32 nvme_tcp_opaque_lo;
+ __le32 nvme_tcp_opaque_hi;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3
+ u8 flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+ u8 cq_rss_number;
+};
+
+struct e5_ystorm_nvmetcp_task_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state_and_core_id */;
+ __le16 word0 /* icid */;
+ u8 flags0;
+ u8 flags1;
+ u8 flags2;
+ u8 flags3;
+ __le32 TTT;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 e4_reserved7;
+};
+
+struct e5_mstorm_nvmetcp_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+ u8 flags3;
+ __le32 reg0;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 e4_reserved7;
+};
+
+struct e5_ustorm_nvmetcp_task_ag_ctx {
+ u8 reserved;
+ u8 state_and_core_id;
+ __le16 icid;
+ u8 flags0;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+ u8 flags4;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ u8 byte2;
+ u8 byte3;
+ u8 e4_reserved8;
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ __le16 word1;
+ __le16 next_tid;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+struct e5_nvmetcp_task_context {
+ struct ystorm_nvmetcp_task_st_ctx ystorm_st_context;
+ struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct e5_tdif_task_context tdif_context;
+ struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context;
+ struct regpair ustorm_ag_padding[2];
+ struct mstorm_nvmetcp_task_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_nvmetcp_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct e5_rdif_task_context rdif_context;
+};
+
+#endif /* __NVMETCP_COMMON__*/
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 68d17a4fbf20..850b98991670 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -542,6 +542,22 @@ struct qed_iscsi_pf_params {
u8 bdq_pbl_num_entries[3];
};
+struct qed_nvmetcp_pf_params {
+ u64 glbl_q_params_addr;
+ u16 cq_num_entries;
+ u16 num_cons;
+ u16 num_tasks;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u16 min_rto;
+};
+
struct qed_rdma_pf_params {
/* Supplied to QED during resource allocation (may affect the ILT and
* the doorbell BAR).
@@ -560,6 +576,7 @@ struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
struct qed_fcoe_pf_params fcoe_pf_params;
struct qed_iscsi_pf_params iscsi_pf_params;
+ struct qed_nvmetcp_pf_params nvmetcp_pf_params;
struct qed_rdma_pf_params rdma_pf_params;
};
@@ -662,6 +679,7 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
QED_PROTOCOL_ISCSI,
+ QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
QED_PROTOCOL_FCOE,
};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index ea273ba1c991..ff808d248883 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -18,7 +18,7 @@
enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE,
- QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TCP_ULP,
QED_LL2_TYPE_TEST,
QED_LL2_TYPE_OOO,
QED_LL2_TYPE_RESERVED2,
diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
new file mode 100644
index 000000000000..14671bc19ed1
--- /dev/null
+++ b/include/linux/qed/qed_nvmetcp_if.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_IF_H
+#define _QED_NVMETCP_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+
+#define QED_NVMETCP_MAX_IO_SIZE 0x800000
+#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr))
+#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu))
+#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16))
+
+typedef int (*nvmetcp_event_cb_t) (void *context,
+ u8 fw_event_code, void *fw_handle);
+
+struct qed_dev_nvmetcp_info {
+ struct qed_dev_info common;
+ u8 port_id; /* Physical port */
+ u8 num_cqs;
+};
+
+#define MAX_TID_BLOCKS_NVMETCP (512)
+struct qed_nvmetcp_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_NVMETCP];
+};
+
+struct qed_nvmetcp_id_params {
+ u8 mac[ETH_ALEN];
+ u32 ip[4];
+ u16 port;
+};
+
+struct qed_nvmetcp_params_offload {
+ /* FW initializations */
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+ u16 nvmetcp_cccid_max_range;
+ u8 default_cq;
+
+ /* Networking and TCP stack initializations */
+ struct qed_nvmetcp_id_params src;
+ struct qed_nvmetcp_id_params dst;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 cwnd;
+ u16 mss;
+ u16 vlan_id;
+ bool timestamp_en;
+ bool delayed_ack_en;
+ bool tcp_keep_alive_en;
+ bool ecn_en;
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u8 ttl;
+ u8 tos_or_tc;
+ u8 rcv_wnd_scale;
+};
+
+struct qed_nvmetcp_params_update {
+ u32 max_io_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+
+ /* Placeholder: pfv, cpda, hpda */
+
+ bool hdr_digest_en;
+ bool data_digest_en;
+};
+
+struct qed_nvmetcp_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+struct nvmetcp_sge {
+ struct regpair sge_addr; /* SGE address */
+ __le32 sge_len; /* SGE length */
+ __le32 reserved;
+};
+
+/* IO path HSI function SGL params */
+struct storage_sgl_task_params {
+ struct nvmetcp_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+ bool small_mid_sge;
+};
+
+/* IO path HSI function FW task context params */
+struct nvmetcp_task_params {
+ void *context; /* Output parameter - set/filled by the HSI function */
+ struct nvmetcp_wqe *sqe;
+ u32 tx_io_size; /* in bytes (Without DIF, if exists) */
+ u32 rx_io_size; /* in bytes (Without DIF, if exists) */
+ u16 conn_icid;
+ u16 itid;
+ struct regpair opq; /* qedn_task_ctx address */
+ u16 host_cccid;
+ u8 cq_rss_number;
+ bool send_write_incapsule;
+};
+
+/**
+ * struct qed_nvmetcp_ops - qed NVMeTCP operations.
+ * @common: common operations pointer
+ * @ll2: light L2 operations pointer
+ * @fill_dev_info: fills NVMeTCP specific information
+ * @param cdev
+ * @param info
+ * @return 0 on success, otherwise error value.
+ * @register_ops: register nvmetcp operations
+ * @param cdev
+ * @param ops - specified using qed_nvmetcp_cb_ops
+ * @param cookie - driver private
+ * @start: nvmetcp in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: nvmetcp in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new nvmetcp connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * @return 0 on sucesss, otherwise error value.
+ * @release_conn: release a previously acquired nvmetcp connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @update_conn: updates an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @clear_sq: clear all task in sq
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @add_src_tcp_port_filter: Add source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @remove_src_tcp_port_filter: Remove source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @add_dst_tcp_port_filter: Add destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @remove_dst_tcp_port_filter: Remove destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @clear_all_filters: Clear all filters.
+ * @param cdev
+ */
+struct qed_nvmetcp_ops {
+ const struct qed_common_ops *common;
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops, void *cookie);
+
+ int (*start)(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context, nvmetcp_event_cb_t async_event_cb);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info);
+
+ int (*update_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info);
+
+ int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
+
+ int (*clear_sq)(struct qed_dev *cdev, u32 handle);
+
+ int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*clear_all_filters)(struct qed_dev *cdev);
+
+ void (*init_read_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_write_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+
+ void (*init_task_cleanup)(struct nvmetcp_task_params *task_params);
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void);
+void qed_put_nvmetcp_ops(void);
+#endif
diff --git a/include/linux/qed/qed_nvmetcp_ip_services_if.h b/include/linux/qed/qed_nvmetcp_ip_services_if.h
new file mode 100644
index 000000000000..3604aee53796
--- /dev/null
+++ b/include/linux/qed/qed_nvmetcp_ip_services_if.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright 2021 Marvell. All rights reserved.
+ */
+
+#ifndef _QED_IP_SERVICES_IF_H
+#define _QED_IP_SERVICES_IF_H
+
+#include <linux/types.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+#include <linux/inetdevice.h>
+
+int qed_route_ipv4(struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *remote_addr,
+ struct sockaddr *hardware_address,
+ struct net_device **ndev);
+int qed_route_ipv6(struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *remote_addr,
+ struct sockaddr *hardware_address,
+ struct net_device **ndev);
+void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id);
+struct pci_dev *qed_validate_ndev(struct net_device *ndev);
+void qed_return_tcp_port(struct socket *sock);
+int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
+ struct socket **sock, u16 *port);
+__be16 qed_get_in_port(struct sockaddr_storage *sa);
+
+#endif /* _QED_IP_SERVICES_IF_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 9455476c5ba2..d7895b81264e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -363,6 +363,20 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_check_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */
+/**
+ * unrcu_pointer - mark a pointer as not being RCU protected
+ * @p: pointer needing to lose its __rcu property
+ *
+ * Converts @p from an __rcu pointer to a __kernel pointer.
+ * This allows an __rcu pointer to be used with xchg() and friends.
+ */
+#define unrcu_pointer(p) \
+({ \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \
+ rcu_check_sparse(p, __rcu); \
+ ((typeof(*p) __force __kernel *)(_________p1)); \
+})
+
#define __rcu_access_pointer(p, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 3734cd8f38a8..af907a3d68d1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -79,6 +79,7 @@ extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
+void hw_protection_shutdown(const char *reason, int ms_until_forced);
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f87a11a5cc4a..f5f08dd0a116 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -27,6 +27,7 @@ struct device_node;
struct i2c_client;
struct i3c_device;
struct irq_domain;
+struct mdio_device;
struct slim_device;
struct spi_device;
struct spmi_device;
@@ -502,6 +503,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* DEFAULT, BIG is assumed.
* @max_raw_read: Max raw read size that can be used on the bus.
* @max_raw_write: Max raw write size that can be used on the bus.
+ * @free_on_exit: kfree this on exit of regmap
*/
struct regmap_bus {
bool fast_io;
@@ -519,6 +521,7 @@ struct regmap_bus {
enum regmap_endian val_format_endian_default;
size_t max_raw_read;
size_t max_raw_write;
+ bool free_on_exit;
};
/*
@@ -538,6 +541,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -594,6 +601,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -698,6 +709,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_mdio() - Initialise register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* regmap_init_sccb() - Initialise register map
*
* @i2c: Device that will be interacted with
@@ -889,6 +913,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_mdio() - Initialise managed register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* devm_regmap_init_sccb() - Initialise managed register map
*
* @i2c: Device that will be interacted with
@@ -1411,6 +1449,7 @@ struct regmap_irq_sub_irq_map {
* @not_fixed_stride: Used when chip peripherals are not laid out with fixed
* stride. Must be used with sub_reg_offsets containing the
* offsets to each peripheral.
+ * @status_invert: Inverted status register: cleared bits are active interrupts.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
* @num_regs: Number of registers in each control bank.
@@ -1463,6 +1502,7 @@ struct regmap_irq_chip {
bool type_in_mask:1;
bool clear_on_unmask:1;
bool not_fixed_stride:1;
+ bool status_invert:1;
int num_regs;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 20e84a84fb77..f72ca73631be 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -119,6 +119,16 @@ struct regulator_dev;
#define REGULATOR_EVENT_PRE_DISABLE 0x400
#define REGULATOR_EVENT_ABORT_DISABLE 0x800
#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems esacalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
/*
* Regulator errors that can be queried using regulator_get_error_flags
@@ -138,6 +148,10 @@ struct regulator_dev;
#define REGULATOR_ERROR_FAIL BIT(4)
#define REGULATOR_ERROR_OVER_TEMP BIT(5)
+#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6)
+#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7)
+#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8)
+#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9)
/**
* struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h
index 5f86824bd117..73291f280a23 100644
--- a/include/linux/regulator/coupler.h
+++ b/include/linux/regulator/coupler.h
@@ -52,7 +52,6 @@ struct regulator_coupler {
#ifdef CONFIG_REGULATOR
int regulator_coupler_register(struct regulator_coupler *coupler);
-const char *rdev_get_name(struct regulator_dev *rdev);
int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state);
@@ -69,10 +68,6 @@ static inline int regulator_coupler_register(struct regulator_coupler *coupler)
{
return 0;
}
-static inline const char *rdev_get_name(struct regulator_dev *rdev)
-{
- return NULL;
-}
static inline int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state)
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4ea520c248e9..4aec20387857 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,6 +40,15 @@ enum regulator_status {
REGULATOR_STATUS_UNDEFINED,
};
+enum regulator_detection_severity {
+ /* Hardware shut down voltage outputs if condition is detected */
+ REGULATOR_SEVERITY_PROT,
+ /* Hardware is probably damaged/inoperable */
+ REGULATOR_SEVERITY_ERR,
+ /* Hardware is still recoverable but recovery action must be taken */
+ REGULATOR_SEVERITY_WARN,
+};
+
/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
{ \
@@ -78,8 +87,25 @@ enum regulator_status {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
- * @set_over_current_protection: Support capability of automatically shutting
- * down when detecting an over current event.
+ * @set_over_current_protection: Support enabling of and setting limits for over
+ * current situation detection. Detection can be configured for three
+ * levels of severity.
+ * REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s).
+ * REGULATOR_SEVERITY_ERR should indicate that over-current situation is
+ * caused by an unrecoverable error but HW does not perform
+ * automatic shut down.
+ * REGULATOR_SEVERITY_WARN should indicate situation where hardware is
+ * still believed to not be damaged but that a board sepcific
+ * recovery action is needed. If lim_uA is 0 the limit should not
+ * be changed but the detection should just be enabled/disabled as
+ * is requested.
+ * @set_over_voltage_protection: Support enabling of and setting limits for over
+ * voltage situation detection. Detection can be configured for same
+ * severities as over current protection.
+ * @set_under_voltage_protection: Support enabling of and setting limits for
+ * under situation detection.
+ * @set_thermal_protection: Support enabling of and setting limits for over
+ * temperature situation detection.
*
* @set_active_discharge: Set active discharge enable/disable of regulators.
*
@@ -143,8 +169,15 @@ struct regulator_ops {
int (*get_current_limit) (struct regulator_dev *);
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
- int (*set_over_current_protection) (struct regulator_dev *);
- int (*set_active_discharge) (struct regulator_dev *, bool enable);
+ int (*set_over_current_protection)(struct regulator_dev *, int lim_uA,
+ int severity, bool enable);
+ int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_thermal_protection)(struct regulator_dev *, int lim,
+ int severity, bool enable);
+ int (*set_active_discharge)(struct regulator_dev *, bool enable);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
@@ -413,6 +446,128 @@ struct regulator_config {
struct gpio_desc *ena_gpiod;
};
+/**
+ * struct regulator_err_state - regulator error/notification status
+ *
+ * @rdev: Regulator which status the struct indicates.
+ * @notifs: Events which have occurred on the regulator.
+ * @errors: Errors which are active on the regulator.
+ * @possible_errs: Errors which can be signaled (by given IRQ).
+ */
+struct regulator_err_state {
+ struct regulator_dev *rdev;
+ unsigned long notifs;
+ unsigned long errors;
+ int possible_errs;
+};
+
+/**
+ * struct regulator_irq_data - regulator error/notification status date
+ *
+ * @states: Status structs for each of the associated regulators.
+ * @num_states: Amount of associated regulators.
+ * @data: Driver data pointer given at regulator_irq_desc.
+ * @opaque: Value storage for IC driver. Core does not update this. ICs
+ * may want to store status register value here at map_event and
+ * compare contents at 'renable' callback to see if new problems
+ * have been added to status. If that is the case it may be
+ * desirable to return REGULATOR_ERROR_CLEARED and not
+ * REGULATOR_ERROR_ON to allow IRQ fire again and to generate
+ * notifications also for the new issues.
+ *
+ * This structure is passed to 'map_event' and 'renable' callbacks for
+ * reporting regulator status to core.
+ */
+struct regulator_irq_data {
+ struct regulator_err_state *states;
+ int num_states;
+ void *data;
+ long opaque;
+};
+
+/**
+ * struct regulator_irq_desc - notification sender for IRQ based events.
+ *
+ * @name: The visible name for the IRQ
+ * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be
+ * best to shut-down regulator(s) or reboot the SOC if error
+ * handling is repeatedly failing. If fatal_cnt is given the IRQ
+ * handling is aborted if it fails for fatal_cnt times and die()
+ * callback (if populated) or BUG() is called to try to prevent
+ * further damage.
+ * @reread_ms: The time which is waited before attempting to re-read status
+ * at the worker if IC reading fails. Immediate re-read is done
+ * if time is not specified.
+ * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the
+ * status for devices which keep IRQ disabled for duration of the
+ * error. If this is not given the IRQ is left enabled and renable
+ * is not called.
+ * @skip_off: If set to true the IRQ handler will attempt to check if any of
+ * the associated regulators are enabled prior to taking other
+ * actions. If no regulators are enabled and this is set to true
+ * a spurious IRQ is assumed and IRQ_NONE is returned.
+ * @high_prio: Boolean to indicate that high priority WQ should be used.
+ * @data: Driver private data pointer which will be passed as such to
+ * the renable, map_event and die callbacks in regulator_irq_data.
+ * @die: Protection callback. If IC status reading or recovery actions
+ * fail fatal_cnt times this callback or BUG() is called. This
+ * callback should implement a final protection attempt like
+ * disabling the regulator. If protection succeeded this may
+ * return 0. If anything else is returned the core assumes final
+ * protection failed and calls BUG() as a last resort.
+ * @map_event: Driver callback to map IRQ status into regulator devices with
+ * events / errors. NOTE: callback MUST initialize both the
+ * errors and notifs for all rdevs which it signals having
+ * active events as core does not clean the map data.
+ * REGULATOR_FAILED_RETRY can be returned to indicate that the
+ * status reading from IC failed. If this is repeated for
+ * fatal_cnt times the core will call die() callback or BUG()
+ * as a last resort to protect the HW.
+ * @renable: Optional callback to check status (if HW supports that) before
+ * re-enabling IRQ. If implemented this should clear the error
+ * flags so that errors fetched by regulator_get_error_flags()
+ * are updated. If callback is not implemented then errors are
+ * assumed to be cleared and IRQ is re-enabled.
+ * REGULATOR_FAILED_RETRY can be returned to
+ * indicate that the status reading from IC failed. If this is
+ * repeated for 'fatal_cnt' times the core will call die()
+ * callback or BUG() as a last resort to protect the HW.
+ * Returning zero indicates that the problem in HW has been solved
+ * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
+ * indicates the error condition is still active and keeps IRQ
+ * disabled. Please note that returning REGULATOR_ERROR_ON does
+ * not retrigger evaluating what events are active or resending
+ * notifications. If this is needed you probably want to return
+ * zero and allow IRQ to retrigger causing events to be
+ * re-evaluated and re-sent.
+ *
+ * This structure is used for registering regulator IRQ notification helper.
+ */
+struct regulator_irq_desc {
+ const char *name;
+ int irq_flags;
+ int fatal_cnt;
+ int reread_ms;
+ int irq_off_ms;
+ bool skip_off;
+ bool high_prio;
+ void *data;
+
+ int (*die)(struct regulator_irq_data *rid);
+ int (*map_event)(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
+ int (*renable)(struct regulator_irq_data *rid);
+};
+
+/*
+ * Return values for regulator IRQ helpers.
+ */
+enum {
+ REGULATOR_ERROR_CLEARED,
+ REGULATOR_FAILED_RETRY,
+ REGULATOR_ERROR_ON,
+};
+
/*
* struct coupling_desc
*
@@ -477,6 +632,9 @@ struct regulator_dev {
/* time when this regulator was disabled last time */
ktime_t last_off;
+ int cached_err;
+ bool use_cached_err;
+ spinlock_t err_lock;
};
struct regulator_dev *
@@ -491,6 +649,16 @@ void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+void *devm_regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs,
+ int *per_rdev_errs, struct regulator_dev **rdev,
+ int rdev_amount);
+void *regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs, int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount);
+void regulator_irq_helper_cancel(void **handle);
void *rdev_get_drvdata(struct regulator_dev *rdev);
struct device *rdev_get_dev(struct regulator_dev *rdev);
@@ -540,6 +708,7 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
int regulator_get_current_limit_regmap(struct regulator_dev *rdev);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay);
+int regulator_sync_voltage_rdev(struct regulator_dev *rdev);
/*
* Helper functions intended to be used by regulator drivers prior registering
@@ -550,4 +719,14 @@ int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
int regulator_desc_list_voltage_linear(const struct regulator_desc *desc,
unsigned int selector);
+
+#ifdef CONFIG_REGULATOR
+const char *rdev_get_name(struct regulator_dev *rdev);
+#else
+static inline const char *rdev_get_name(struct regulator_dev *rdev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 8a56f033b6cd..68b4a514a410 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -83,6 +83,14 @@ struct regulator_state {
bool changeable;
};
+#define REGULATOR_NOTIF_LIMIT_DISABLE -1
+#define REGULATOR_NOTIF_LIMIT_ENABLE -2
+struct notification_limit {
+ int prot;
+ int err;
+ int warn;
+};
+
/**
* struct regulation_constraints - regulator operating constraints.
*
@@ -100,6 +108,11 @@ struct regulator_state {
* @ilim_uA: Maximum input current.
* @system_load: Load that isn't captured by any consumer requests.
*
+ * @over_curr_limits: Limits for acting on over current.
+ * @over_voltage_limits: Limits for acting on over voltage.
+ * @under_voltage_limits: Limits for acting on under voltage.
+ * @temp_limits: Limits for acting on over temperature.
+
* @max_spread: Max possible spread between coupled regulators
* @max_uV_step: Max possible step change in voltage
* @valid_modes_mask: Mask of modes which may be configured by consumers.
@@ -116,6 +129,11 @@ struct regulator_state {
* @pull_down: Enable pull down when regulator is disabled.
* @over_current_protection: Auto disable on over current event.
*
+ * @over_current_detection: Configure over current limits.
+ * @over_voltage_detection: Configure over voltage limits.
+ * @under_voltage_detection: Configure under voltage limits.
+ * @over_temp_detection: Configure over temperature limits.
+ *
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
* @state_disk: State for regulator when system is suspended in disk mode.
@@ -172,6 +190,10 @@ struct regulation_constraints {
struct regulator_state state_disk;
struct regulator_state state_mem;
struct regulator_state state_standby;
+ struct notification_limit over_curr_limits;
+ struct notification_limit over_voltage_limits;
+ struct notification_limit under_voltage_limits;
+ struct notification_limit temp_limits;
suspend_state_t initial_state; /* suspend state to set at init */
/* mode to set on startup */
@@ -193,6 +215,10 @@ struct regulation_constraints {
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
unsigned over_current_protection:1; /* auto disable on over current */
+ unsigned over_current_detection:1; /* notify on over current */
+ unsigned over_voltage_detection:1; /* notify on over voltage */
+ unsigned under_voltage_detection:1; /* notify on under voltage */
+ unsigned over_temp_detection:1; /* notify on over temperature */
};
/**
diff --git a/include/linux/regulator/mt6359-regulator.h b/include/linux/regulator/mt6359-regulator.h
new file mode 100644
index 000000000000..6d6e5a58f482
--- /dev/null
+++ b/include/linux/regulator/mt6359-regulator.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6359_H
+#define __LINUX_REGULATOR_MT6359_H
+
+enum {
+ MT6359_ID_VS1 = 0,
+ MT6359_ID_VGPU11,
+ MT6359_ID_VMODEM,
+ MT6359_ID_VPU,
+ MT6359_ID_VCORE,
+ MT6359_ID_VS2,
+ MT6359_ID_VPA,
+ MT6359_ID_VPROC2,
+ MT6359_ID_VPROC1,
+ MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VGPU11_SSHUB = MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VAUD18 = 10,
+ MT6359_ID_VSIM1,
+ MT6359_ID_VIBR,
+ MT6359_ID_VRF12,
+ MT6359_ID_VUSB,
+ MT6359_ID_VSRAM_PROC2,
+ MT6359_ID_VIO18,
+ MT6359_ID_VCAMIO,
+ MT6359_ID_VCN18,
+ MT6359_ID_VFE28,
+ MT6359_ID_VCN13,
+ MT6359_ID_VCN33_1_BT,
+ MT6359_ID_VCN33_1_WIFI,
+ MT6359_ID_VAUX18,
+ MT6359_ID_VSRAM_OTHERS,
+ MT6359_ID_VEFUSE,
+ MT6359_ID_VXO22,
+ MT6359_ID_VRFCK,
+ MT6359_ID_VBIF28,
+ MT6359_ID_VIO28,
+ MT6359_ID_VEMC,
+ MT6359_ID_VCN33_2_BT,
+ MT6359_ID_VCN33_2_WIFI,
+ MT6359_ID_VA12,
+ MT6359_ID_VA09,
+ MT6359_ID_VRF18,
+ MT6359_ID_VSRAM_MD,
+ MT6359_ID_VUFS,
+ MT6359_ID_VM18,
+ MT6359_ID_VBBCK,
+ MT6359_ID_VSRAM_PROC1,
+ MT6359_ID_VSIM2,
+ MT6359_ID_VSRAM_OTHERS_SSHUB,
+ MT6359_ID_RG_MAX,
+};
+
+#define MT6359_MAX_REGULATOR MT6359_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6359_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index def5c62c93b3..8d04e7deedc6 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -91,6 +91,7 @@ enum ttu_flags {
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
+ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 6f155f99aa16..4ab7bfc675f1 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1109,6 +1109,7 @@ struct pcr_ops {
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE {ASPM_MODE_CFG, ASPM_MODE_REG};
#define ASPM_L1_1_EN BIT(0)
#define ASPM_L1_2_EN BIT(1)
@@ -1234,6 +1235,7 @@ struct rtsx_pcr {
u8 card_drive_sel;
#define ASPM_L1_EN 0x02
u8 aspm_en;
+ enum ASPM_MODE aspm_mode;
bool aspm_enabled;
#define PCR_MS_PMOS (1 << 0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2c881384517..ec8d07d88641 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -113,11 +113,13 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
-#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
+
+#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -132,14 +134,14 @@ struct task_group;
do { \
WARN_ON_ONCE(is_special_task_state(state_value));\
current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
+ WRITE_ONCE(current->__state, (state_value)); \
} while (0)
#define set_current_state(state_value) \
do { \
WARN_ON_ONCE(is_special_task_state(state_value));\
current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->state, (state_value)); \
+ smp_store_mb(current->__state, (state_value)); \
} while (0)
#define set_special_state(state_value) \
@@ -148,7 +150,7 @@ struct task_group;
WARN_ON_ONCE(!is_special_task_state(state_value)); \
raw_spin_lock_irqsave(&current->pi_lock, flags); \
current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
+ WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
#else
@@ -190,10 +192,10 @@ struct task_group;
* Also see the comments of try_to_wake_up().
*/
#define __set_current_state(state_value) \
- current->state = (state_value)
+ WRITE_ONCE(current->__state, (state_value))
#define set_current_state(state_value) \
- smp_store_mb(current->state, (state_value))
+ smp_store_mb(current->__state, (state_value))
/*
* set_special_state() should be used for those states when the blocking task
@@ -205,12 +207,14 @@ struct task_group;
do { \
unsigned long flags; /* may shadow */ \
raw_spin_lock_irqsave(&current->pi_lock, flags); \
- current->state = (state_value); \
+ WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
#endif
+#define get_current_state() READ_ONCE(current->__state)
+
/* Task command name length: */
#define TASK_COMM_LEN 16
@@ -350,11 +354,19 @@ struct load_weight {
* Only for tasks we track a moving average of the past instantaneous
* estimated utilization. This allows to absorb sporadic drops in utilization
* of an otherwise almost periodic task.
+ *
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
+ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
+ * for a task) it is safe to use MSB.
*/
struct util_est {
unsigned int enqueued;
unsigned int ewma;
#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
} __attribute__((__aligned__(sizeof(u64))));
/*
@@ -654,8 +666,7 @@ struct task_struct {
*/
struct thread_info thread_info;
#endif
- /* -1 unrunnable, 0 runnable, >0 stopped: */
- volatile long state;
+ unsigned int __state;
/*
* This begins the randomizable portion of task_struct. Only
@@ -700,10 +711,17 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+ struct sched_dl_entity dl;
+
+#ifdef CONFIG_SCHED_CORE
+ struct rb_node core_node;
+ unsigned long core_cookie;
+ unsigned int core_occupation;
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
- struct sched_dl_entity dl;
#ifdef CONFIG_UCLAMP_TASK
/*
@@ -989,7 +1007,6 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
- struct sigqueue *sigqueue_cache;
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
@@ -1513,7 +1530,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
static inline unsigned int task_state_index(struct task_struct *tsk)
{
- unsigned int tsk_state = READ_ONCE(tsk->state);
+ unsigned int tsk_state = READ_ONCE(tsk->__state);
unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
@@ -1821,10 +1838,10 @@ static __always_inline void scheduler_ipi(void)
*/
preempt_fold_need_resched();
}
-extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
+extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
#else
static inline void scheduler_ipi(void) { }
-static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
{
return 1;
}
@@ -2011,6 +2028,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+extern bool sched_task_on_rq(struct task_struct *p);
+
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -2172,4 +2191,14 @@ int sched_trace_rq_nr_running(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
+#ifdef CONFIG_SCHED_CORE
+extern void sched_core_free(struct task_struct *tsk);
+extern void sched_core_fork(struct task_struct *p);
+extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
+ unsigned long uaddr);
+#else
+static inline void sched_core_free(struct task_struct *tsk) { }
+static inline void sched_core_fork(struct task_struct *p) { }
+#endif
+
#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index dfd82eab2902..4d9e3a656875 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -73,6 +73,14 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
+/*
+ * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
+ * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
+ * a counter on its own. We're aggresive on this bit for now: even if the
+ * pinned pages were unpinned later on, we'll still keep this bit set for the
+ * lifecycle of this mm, just for simplicity.
+ */
+#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 6205578ab6ee..bdd31ab93bc5 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -26,7 +26,7 @@ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
static inline unsigned long map_util_freq(unsigned long util,
unsigned long freq, unsigned long cap)
{
- return (freq + (freq >> 2)) * util / cap;
+ return freq * util / cap;
}
static inline unsigned long map_util_perf(unsigned long util)
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index ae51f4529fc9..b5035afa2396 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -14,7 +14,7 @@ extern void dump_cpu_task(int cpu);
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
-extern void show_state_filter(unsigned long state_filter);
+extern void show_state_filter(unsigned int state_filter);
static inline void show_state(void)
{
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
index 34b21e971d77..57bde66d95f7 100644
--- a/include/linux/sched/sd_flags.h
+++ b/include/linux/sched/sd_flags.h
@@ -91,6 +91,16 @@ SD_FLAG(SD_WAKE_AFFINE, SDF_SHARED_CHILD)
SD_FLAG(SD_ASYM_CPUCAPACITY, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
/*
+ * Domain members have different CPU capacities spanning all unique CPU
+ * capacity values.
+ *
+ * SHARED_PARENT: Set from the topmost domain down to the first domain where
+ * all available CPU capacities are visible
+ * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups.
+ */
+SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
* Domain members share CPU capacity (i.e. SMT)
*
* SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 7f4278fa21fe..c9cf678c347d 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -382,7 +382,7 @@ static inline int fatal_signal_pending(struct task_struct *p)
return task_sigpending(p) && __fatal_signal_pending(p);
}
-static inline int signal_pending_state(long state, struct task_struct *p)
+static inline int signal_pending_state(unsigned int state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 568286411b43..0108a38bb64d 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -3,6 +3,7 @@
#define _LINUX_SCHED_STAT_H
#include <linux/percpu.h>
+#include <linux/kconfig.h>
/*
* Various counters maintained by the scheduler and fork(),
@@ -16,21 +17,14 @@ extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
-extern unsigned long nr_running(void);
+extern unsigned int nr_running(void);
extern bool single_task_running(void);
-extern unsigned long nr_iowait(void);
-extern unsigned long nr_iowait_cpu(int cpu);
+extern unsigned int nr_iowait(void);
+extern unsigned int nr_iowait_cpu(int cpu);
static inline int sched_info_on(void)
{
-#ifdef CONFIG_SCHEDSTATS
- return 1;
-#elif defined(CONFIG_TASK_DELAY_ACCT)
- extern int delayacct_on;
- return delayacct_on;
-#else
- return 0;
-#endif
+ return IS_ENABLED(CONFIG_SCHED_INFO);
}
#ifdef CONFIG_SCHEDSTATS
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 3632c5d6ec55..2462f7d07695 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -12,16 +12,9 @@
*/
struct user_struct {
refcount_t __count; /* reference count */
- atomic_t processes; /* How many processes does this user have? */
- atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_EPOLL
atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
#endif
-#ifdef CONFIG_POSIX_MQUEUE
- /* protected by mq_lock */
- unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
-#endif
- unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 528718e4ed52..835ee87ed792 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -14,7 +14,7 @@
* @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
* clocks.
* @read_sched_clock: Current clock source (or dummy source when suspended).
- * @mult: Multipler for scaled math conversion.
+ * @mult: Multiplier for scaled math conversion.
* @shift: Shift value for scaled math conversion.
*
* Care must be taken when updating this structure; it is read by
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index bb1926589693..a86e852507b3 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -98,6 +98,7 @@ enum sctp_cid {
SCTP_CID_I_FWD_TSN = 0xC2,
SCTP_CID_ASCONF_ACK = 0x80,
SCTP_CID_RECONF = 0x82,
+ SCTP_CID_PAD = 0x84,
}; /* enum */
@@ -410,6 +411,12 @@ struct sctp_heartbeat_chunk {
};
+/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */
+struct sctp_pad_chunk {
+ struct sctp_chunkhdr uh;
+};
+
+
/* For the abort and shutdown ACK we must carry the init tag in the
* common header. Just the common header is all that is needed with a
* chunk descriptor.
diff --git a/include/linux/security.h b/include/linux/security.h
index 06f7c50ce77f..24eda04221e9 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1681,7 +1681,7 @@ int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid);
int security_xfrm_state_delete(struct xfrm_state *x);
void security_xfrm_state_free(struct xfrm_state *x);
-int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid);
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi_common *flic);
@@ -1732,7 +1732,7 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x)
return 0;
}
-static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
return 0;
}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f61e34fbaaea..37ded6b8fee6 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -182,9 +182,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
-#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
-#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
-#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
+#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
+#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
+#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex)
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index d82b6f396588..aa77dcd1646f 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -65,7 +65,7 @@ extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
extern int shmem_zero_setup(struct vm_area_struct *);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
extern const struct address_space_operations shmem_aops;
static inline bool shmem_mapping(struct address_space *mapping)
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 201f88e3738b..5160fd45e5ca 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -267,7 +267,6 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
-extern void exit_task_sigqueue_cache(struct task_struct *tsk);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index 68e06c75c5b2..34cb28b8f16c 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -13,6 +13,8 @@ typedef struct kernel_siginfo {
__SIGINFO;
} kernel_siginfo_t;
+struct ucounts;
+
/*
* Real Time signals may be queued.
*/
@@ -21,7 +23,7 @@ struct sigqueue {
struct list_head list;
int flags;
kernel_siginfo_t info;
- struct user_struct *user;
+ struct ucounts *ucounts;
};
/* flags values. */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dbf820a50a39..b2db9cd9a73f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
#include <linux/in6.h>
#include <linux/if_packet.h>
#include <net/flow.h>
+#include <net/page_pool.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
@@ -667,6 +668,8 @@ typedef unsigned char *sk_buff_data_t;
* @head_frag: skb was allocated from page fragments,
* not allocated by kmalloc() or vmalloc().
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @pp_recycle: mark the packet for recycling instead of freeing (implies
+ * page_pool support on driver)
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
@@ -791,10 +794,12 @@ struct sk_buff {
fclone:2,
peeked:1,
head_frag:1,
- pfmemalloc:1;
+ pfmemalloc:1,
+ pp_recycle:1; /* page_pool recycle indicator */
#ifdef CONFIG_SKB_EXTENSIONS
__u8 active_extensions;
#endif
+
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
*/
@@ -3081,12 +3086,20 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
/**
* __skb_frag_unref - release a reference on a paged fragment.
* @frag: the paged fragment
+ * @recycle: recycle the page if allocated via page_pool
*
- * Releases a reference on the paged fragment @frag.
+ * Releases a reference on the paged fragment @frag
+ * or recycles the page via the page_pool API.
*/
-static inline void __skb_frag_unref(skb_frag_t *frag)
+static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
- put_page(skb_frag_page(frag));
+ struct page *page = skb_frag_page(frag);
+
+#ifdef CONFIG_PAGE_POOL
+ if (recycle && page_pool_return_skb_page(page))
+ return;
+#endif
+ put_page(page);
}
/**
@@ -3098,7 +3111,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag)
*/
static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
- __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
+ __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
}
/**
@@ -4697,5 +4710,21 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
#endif
}
+#ifdef CONFIG_PAGE_POOL
+static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
+ struct page_pool *pp)
+{
+ skb->pp_recycle = 1;
+ page_pool_store_mem_info(page, pp);
+}
+#endif
+
+static inline bool skb_pp_recycle(struct sk_buff *skb, void *data)
+{
+ if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
+ return false;
+ return page_pool_return_skb_page(virt_to_page(data));
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index aba0f0f429be..96f319099744 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -126,8 +126,6 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
-int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
- long timeo, int *err);
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);
@@ -348,7 +346,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
struct sock *sk = psock->sk;
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
struct sk_psock *sk_psock_init(struct sock *sk, int node);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0c97d788762c..083f3ce550bc 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -305,9 +305,21 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
/*
* Whenever changing this, take care of that kmalloc_type() and
* create_kmalloc_caches() still work as intended.
+ *
+ * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
+ * is for accounted but unreclaimable and non-dma objects. All the other
+ * kmem caches can have both accounted and unaccounted objects.
*/
enum kmalloc_cache_type {
KMALLOC_NORMAL = 0,
+#ifndef CONFIG_ZONE_DMA
+ KMALLOC_DMA = KMALLOC_NORMAL,
+#endif
+#ifndef CONFIG_MEMCG_KMEM
+ KMALLOC_CGROUP = KMALLOC_NORMAL,
+#else
+ KMALLOC_CGROUP,
+#endif
KMALLOC_RECLAIM,
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
@@ -319,24 +331,36 @@ enum kmalloc_cache_type {
extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
+/*
+ * Define gfp bits that should not be set for KMALLOC_NORMAL.
+ */
+#define KMALLOC_NOT_NORMAL_BITS \
+ (__GFP_RECLAIMABLE | \
+ (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
+ (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
+
static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
{
-#ifdef CONFIG_ZONE_DMA
/*
* The most common case is KMALLOC_NORMAL, so test for it
- * with a single branch for both flags.
+ * with a single branch for all the relevant flags.
*/
- if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
+ if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
return KMALLOC_NORMAL;
/*
- * At least one of the flags has to be set. If both are, __GFP_DMA
- * is more important.
+ * At least one of the flags has to be set. Their priorities in
+ * decreasing order are:
+ * 1) __GFP_DMA
+ * 2) __GFP_RECLAIMABLE
+ * 3) __GFP_ACCOUNT
*/
- return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
-#else
- return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
-#endif
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
+ return KMALLOC_DMA;
+ if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
+ return KMALLOC_RECLAIM;
+ else
+ return KMALLOC_CGROUP;
}
/*
@@ -346,8 +370,14 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
* 1 = 65 .. 96 bytes
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
+ *
+ * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
+ * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
+ * Callers where !size_is_constant should only be test modules, where runtime
+ * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
*/
-static __always_inline unsigned int kmalloc_index(size_t size)
+static __always_inline unsigned int __kmalloc_index(size_t size,
+ bool size_is_constant)
{
if (!size)
return 0;
@@ -382,12 +412,17 @@ static __always_inline unsigned int kmalloc_index(size_t size)
if (size <= 8 * 1024 * 1024) return 23;
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;
- if (size <= 64 * 1024 * 1024) return 26;
- BUG();
+
+ if ((IS_ENABLED(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 110000)
+ && !IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
+ BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
+ else
+ BUG();
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
+#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b8fc5c53ba6f..0d8e3dcb7f88 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 31f00c7f4f59..eaab121ee575 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -2,8 +2,10 @@
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*/
-#ifndef __linux_pxa2xx_spi_h
-#define __linux_pxa2xx_spi_h
+#ifndef __LINUX_SPI_PXA2XX_SPI_H
+#define __LINUX_SPI_PXA2XX_SPI_H
+
+#include <linux/types.h>
#include <linux/pxa2xx_ssp.h>
@@ -12,7 +14,10 @@
struct dma_chan;
-/* device.platform_data for SSP controller devices */
+/*
+ * The platform data for SSP controller devices
+ * (resides in device.platform_data).
+ */
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
@@ -28,8 +33,11 @@ struct pxa2xx_spi_controller {
struct ssp_device ssp;
};
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
+/*
+ * The controller specific data for SPI slave devices
+ * (resides in spi_board_info.controller_data),
+ * copied to spi_device.platform_data ... mostly for
+ * DMA tuning.
*/
struct pxa2xx_spi_chip {
u8 tx_threshold;
@@ -49,4 +57,5 @@ struct pxa2xx_spi_chip {
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
#endif
-#endif
+
+#endif /* __LINUX_SPI_PXA2XX_SPI_H */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 2b65c9edc34e..85e2ff7b840d 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -250,6 +250,9 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* the currently mapped area), and the caller of
* spi_mem_dirmap_write() is responsible for calling it again in
* this case.
+ * @poll_status: poll memory device status until (status & mask) == match or
+ * when the timeout has expired. It fills the data buffer with
+ * the last status value.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@@ -274,6 +277,12 @@ struct spi_controller_mem_ops {
u64 offs, size_t len, void *buf);
ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
+ int (*poll_status)(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms);
};
/**
@@ -369,6 +378,13 @@ devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc);
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 360a3bc767ca..97b8d12b5f2b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -299,6 +299,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
+extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
+
/* use a define to avoid include chaining to get THIS_MODULE */
#define spi_register_driver(driver) \
__spi_register_driver(THIS_MODULE, driver)
@@ -586,6 +588,7 @@ struct spi_controller {
bool (*can_dma)(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer);
+ struct device *dma_map_dev;
/*
* These hooks are for drivers that want to use the generic
@@ -644,8 +647,8 @@ struct spi_controller {
int *cs_gpios;
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
- u8 unused_native_cs;
- u8 max_native_cs;
+ s8 unused_native_cs;
+ s8 max_native_cs;
/* statistics */
struct spi_statistics statistics;
@@ -1108,11 +1111,6 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-extern int spi_set_cs_timing(struct spi_device *spi,
- struct spi_delay *setup,
- struct spi_delay *hold,
- struct spi_delay *inactive);
-
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
extern int spi_async_locked(struct spi_device *spi,
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0db36360ef21..d5ae621d66ba 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -172,6 +172,18 @@ struct stmmac_fpe_cfg {
enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
};
+struct stmmac_safety_feature_cfg {
+ u32 tsoee;
+ u32 mrxpee;
+ u32 mestee;
+ u32 mrxee;
+ u32 mtxee;
+ u32 epsi;
+ u32 edpp;
+ u32 prtyen;
+ u32 tmouten;
+};
+
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
@@ -184,6 +196,7 @@ struct plat_stmmacenet_data {
struct stmmac_dma_cfg *dma_cfg;
struct stmmac_est *est;
struct stmmac_fpe_cfg *fpe_cfg;
+ struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
int has_gmac;
int enh_desc;
@@ -210,6 +223,7 @@ struct plat_stmmacenet_data {
void (*fix_mac_speed)(void *priv, unsigned int speed);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
+ void (*speed_mode_2500)(struct net_device *ndev, void *priv);
void (*ptp_clk_freq_config)(void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
@@ -223,8 +237,10 @@ struct plat_stmmacenet_data {
struct clk *clk_ptp_ref;
unsigned int clk_ptp_rate;
unsigned int clk_ref_rate;
+ unsigned int mult_fact_100ns;
s32 ptp_max_adj;
struct reset_control *stmmac_rst;
+ struct reset_control *stmmac_ahb_rst;
struct stmmac_axi *axi;
int has_gmac4;
bool has_sun8i;
@@ -249,5 +265,6 @@ struct plat_stmmacenet_data {
int msi_sfty_ue_vec;
int msi_rx_base_vec;
int msi_tx_base_vec;
+ bool use_phy_wol;
};
#endif
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index d81fe8b364d0..61b622e334ee 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -368,6 +368,8 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
static inline int
xprt_enable_swap(struct rpc_xprt *xprt)
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
index 0806796eabcb..068e1982ad37 100644
--- a/include/linux/surface_aggregator/controller.h
+++ b/include/linux/surface_aggregator/controller.h
@@ -6,7 +6,7 @@
* managing access and communication to and from the SSAM EC, as well as main
* communication structures and definitions.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
@@ -796,6 +796,20 @@ enum ssam_event_mask {
SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
/**
+ * enum ssam_event_notifier_flags - Flags for event notifiers.
+ * @SSAM_EVENT_NOTIFIER_OBSERVER:
+ * The corresponding notifier acts as observer. Registering a notifier
+ * with this flag set will not attempt to enable any event. Equally,
+ * unregistering will not attempt to disable any event. Note that a
+ * notifier with this flag may not even correspond to a certain event at
+ * all, only to a specific event target category. Event matching will not
+ * be influenced by this flag.
+ */
+enum ssam_event_notifier_flags {
+ SSAM_EVENT_NOTIFIER_OBSERVER = BIT(0),
+};
+
+/**
* struct ssam_event_notifier - Notifier block for SSAM events.
* @base: The base notifier block with callback function and priority.
* @event: The event for which this block will receive notifications.
@@ -803,6 +817,7 @@ enum ssam_event_mask {
* @event.id: ID specifying the event.
* @event.mask: Flags determining how events are matched to the notifier.
* @event.flags: Flags used for enabling the event.
+ * @flags: Notifier flags (see &enum ssam_event_notifier_flags).
*/
struct ssam_event_notifier {
struct ssam_notifier_block base;
@@ -813,6 +828,8 @@ struct ssam_event_notifier {
enum ssam_event_mask mask;
u8 flags;
} event;
+
+ unsigned long flags;
};
int ssam_notifier_register(struct ssam_controller *ctrl,
@@ -821,4 +838,12 @@ int ssam_notifier_register(struct ssam_controller *ctrl,
int ssam_notifier_unregister(struct ssam_controller *ctrl,
struct ssam_event_notifier *n);
+int ssam_controller_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags);
+
+int ssam_controller_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags);
+
#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
index 6ff9c58b3e17..f636c5310321 100644
--- a/include/linux/surface_aggregator/device.h
+++ b/include/linux/surface_aggregator/device.h
@@ -7,7 +7,7 @@
* Provides support for non-platform/non-ACPI SSAM clients via dedicated
* subsystem.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
index 64276fbfa1d5..c3de43edcffa 100644
--- a/include/linux/surface_aggregator/serial_hub.h
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -6,7 +6,7 @@
* Surface System Aggregator Module (SSAM). Provides the interface for basic
* packet- and request-based communication with the SSAM EC via SSH.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 144727041e78..49b1dd2c100b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -177,7 +177,6 @@ enum {
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
- SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
/* add others here before... */
SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
@@ -240,6 +239,7 @@ struct swap_cluster_list {
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
+ struct percpu_ref users; /* indicate and keep swap device valid. */
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
@@ -260,6 +260,7 @@ struct swap_info_struct {
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
+ struct completion comp; /* seldom referenced */
#ifdef CONFIG_FRONTSWAP
unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
atomic_t frontswap_pages; /* frontswap pages in-use counter */
@@ -445,6 +446,7 @@ extern void __delete_from_swap_cache(struct page *page,
extern void delete_from_swap_cache(struct page *);
extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end);
+extern void free_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t entry,
@@ -511,7 +513,7 @@ sector_t swap_page_sector(struct page *page);
static inline void put_swap_device(struct swap_info_struct *si)
{
- rcu_read_unlock();
+ percpu_ref_put(&si->users);
}
#else /* CONFIG_SWAP */
@@ -526,6 +528,15 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
return NULL;
}
+static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
+{
+ return NULL;
+}
+
+static inline void put_swap_device(struct swap_info_struct *si)
+{
+}
+
#define swap_address_space(entry) (NULL)
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
@@ -541,6 +552,10 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
+static inline void free_swap_cache(struct page *page)
+{
+}
+
static inline void show_swap_cache_info(void)
{
}
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index d9b7c9132c2f..5907205c712c 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -23,6 +23,16 @@
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
+/* Clear all flags but only keep swp_entry_t related information */
+static inline pte_t pte_swp_clear_flags(pte_t pte)
+{
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
+ if (pte_swp_uffd_wp(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
+ return pte;
+}
+
/*
* Store a type+offset into a swp_entry_t in an arch-independent format
*/
@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{
swp_entry_t arch_entry;
- if (pte_swp_soft_dirty(pte))
- pte = pte_swp_clear_soft_dirty(pte);
- if (pte_swp_uffd_wp(pte))
- pte = pte_swp_clear_uffd_wp(pte);
+ pte = pte_swp_clear_flags(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
@@ -323,6 +330,11 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
return swp_type(entry) == SWP_HWPOISON;
}
+static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry)
+{
+ return swp_offset(entry);
+}
+
static inline void num_poisoned_pages_inc(void)
{
atomic_long_inc(&num_poisoned_pages);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 7340613c7eff..bfd571f18cfd 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -11,6 +11,7 @@
#include <linux/context_tracking_state.h>
#include <linux/cpumask.h>
#include <linux/sched.h>
+#include <linux/rcupdate.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
@@ -185,13 +186,17 @@ static inline bool tick_nohz_full_enabled(void)
return tick_nohz_full_running;
}
-static inline bool tick_nohz_full_cpu(int cpu)
-{
- if (!tick_nohz_full_enabled())
- return false;
-
- return cpumask_test_cpu(cpu, tick_nohz_full_mask);
-}
+/*
+ * Check if a CPU is part of the nohz_full subset. Arrange for evaluating
+ * the cpu expression (typically smp_processor_id()) _after_ the static
+ * key.
+ */
+#define tick_nohz_full_cpu(_cpu) ({ \
+ bool __ret = false; \
+ if (tick_nohz_full_enabled()) \
+ __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \
+ __ret; \
+})
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
{
@@ -207,7 +212,7 @@ extern void tick_nohz_dep_set_task(struct task_struct *tsk,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit);
-extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
+extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit);
@@ -252,11 +257,11 @@ static inline void tick_dep_clear_task(struct task_struct *tsk,
if (tick_nohz_full_enabled())
tick_nohz_dep_clear_task(tsk, bit);
}
-static inline void tick_dep_set_signal(struct signal_struct *signal,
+static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit)
{
if (tick_nohz_full_enabled())
- tick_nohz_dep_set_signal(signal, bit);
+ tick_nohz_dep_set_signal(tsk, bit);
}
static inline void tick_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit)
@@ -284,7 +289,7 @@ static inline void tick_dep_set_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
-static inline void tick_dep_set_signal(struct signal_struct *signal,
+static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit) { }
@@ -300,4 +305,10 @@ static inline void tick_nohz_task_switch(void)
__tick_nohz_task_switch();
}
+static inline void tick_nohz_user_enter_prepare(void)
+{
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ rcu_nocb_flush_deferred_wakeup();
+}
+
#endif
diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h
index 9b895f93d8de..9f5a51f79ba5 100644
--- a/include/linux/usb/cdc-wdm.h
+++ b/include/linux/usb/cdc-wdm.h
@@ -12,11 +12,12 @@
#ifndef __LINUX_USB_CDC_WDM_H
#define __LINUX_USB_CDC_WDM_H
+#include <linux/wwan.h>
#include <uapi/linux/usb/cdc-wdm.h>
extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep,
- int bufsize,
+ int bufsize, enum wwan_port_type type,
int (*manage_power)(struct usb_interface *, int));
#endif /* __LINUX_USB_CDC_WDM_H */
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index bf00259493e0..96b7ff66f074 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -460,7 +460,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_RECEIVER_RESPONSE 15 /* 15ms max */
#define PD_T_SOURCE_ACTIVITY 45
#define PD_T_SINK_ACTIVITY 135
-#define PD_T_SINK_WAIT_CAP 240
+#define PD_T_SINK_WAIT_CAP 310 /* 310 - 620 ms */
#define PD_T_PS_TRANSITION 500
#define PD_T_SRC_TRANSITION 35
#define PD_T_DRP_SNK 40
diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h
index 0eb83ce19597..b517ebc8f0ff 100644
--- a/include/linux/usb/pd_ext_sdb.h
+++ b/include/linux/usb/pd_ext_sdb.h
@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
#define USB_PD_EXT_SDB_EVENT_OVP BIT(3)
#define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4)
-#define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \
- USB_PD_EXT_SDB_EVENT_OTP | \
- USB_PD_EXT_SDB_EVENT_OVP)
-
#endif /* __LINUX_USB_PD_EXT_SDB_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 1d08dbbcfe32..eb70cabe6e7f 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -54,9 +54,15 @@ enum ucount_type {
UCOUNT_FANOTIFY_GROUPS,
UCOUNT_FANOTIFY_MARKS,
#endif
+ UCOUNT_RLIMIT_NPROC,
+ UCOUNT_RLIMIT_MSGQUEUE,
+ UCOUNT_RLIMIT_SIGPENDING,
+ UCOUNT_RLIMIT_MEMLOCK,
UCOUNT_COUNTS,
};
+#define MAX_PER_NAMESPACE_UCOUNTS UCOUNT_RLIMIT_NPROC
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -92,23 +98,42 @@ struct user_namespace {
struct ctl_table_header *sysctls;
#endif
struct ucounts *ucounts;
- int ucount_max[UCOUNT_COUNTS];
+ long ucount_max[UCOUNT_COUNTS];
} __randomize_layout;
struct ucounts {
struct hlist_node node;
struct user_namespace *ns;
kuid_t uid;
- int count;
- atomic_t ucount[UCOUNT_COUNTS];
+ atomic_t count;
+ atomic_long_t ucount[UCOUNT_COUNTS];
};
extern struct user_namespace init_user_ns;
+extern struct ucounts init_ucounts;
bool setup_userns_sysctls(struct user_namespace *ns);
void retire_userns_sysctls(struct user_namespace *ns);
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
+struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
+struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
+void put_ucounts(struct ucounts *ucounts);
+
+static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type type)
+{
+ return atomic_long_read(&ucounts->ucount[type]);
+}
+
+long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
+bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
+bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);
+
+static inline void set_rlimit_ucount_max(struct user_namespace *ns,
+ enum ucount_type type, unsigned long max)
+{
+ ns->ucount_max[type] = max <= LONG_MAX ? max : LONG_MAX;
+}
#ifdef CONFIG_USER_NS
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index dc636b727179..35d7eedb5e8e 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -36,6 +36,7 @@ struct virtio_vsock_sock {
u32 rx_bytes;
u32 buf_alloc;
struct list_head rx_queue;
+ u32 msg_count;
};
struct virtio_vsock_pkt {
@@ -80,8 +81,17 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len, int flags);
+int
+virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len);
+ssize_t
+virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ int flags);
s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
+u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4d668abb6391..bfaaf0b6fa76 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -135,6 +135,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
const void *caller);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller);
+void *vmalloc_no_huge(unsigned long size);
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 3299cd69e4ca..d6a6cf53b127 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -138,34 +138,27 @@ static inline void vm_events_fold_cpu(int cpu)
* Zone and node-based page accounting with per cpu differentials.
*/
extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
-extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
+extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#ifdef CONFIG_NUMA
-static inline void zone_numa_state_add(long x, struct zone *zone,
- enum numa_stat_item item)
+static inline void zone_numa_event_add(long x, struct zone *zone,
+ enum numa_stat_item item)
{
- atomic_long_add(x, &zone->vm_numa_stat[item]);
- atomic_long_add(x, &vm_numa_stat[item]);
+ atomic_long_add(x, &zone->vm_numa_event[item]);
+ atomic_long_add(x, &vm_numa_event[item]);
}
-static inline unsigned long global_numa_state(enum numa_stat_item item)
+static inline unsigned long zone_numa_event_state(struct zone *zone,
+ enum numa_stat_item item)
{
- long x = atomic_long_read(&vm_numa_stat[item]);
-
- return x;
+ return atomic_long_read(&zone->vm_numa_event[item]);
}
-static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
- enum numa_stat_item item)
+static inline unsigned long
+global_numa_event_state(enum numa_stat_item item)
{
- long x = atomic_long_read(&zone->vm_numa_stat[item]);
- int cpu;
-
- for_each_online_cpu(cpu)
- x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
-
- return x;
+ return atomic_long_read(&vm_numa_event[item]);
}
#endif /* CONFIG_NUMA */
@@ -236,7 +229,7 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
#ifdef CONFIG_SMP
int cpu;
for_each_online_cpu(cpu)
- x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+ x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
if (x < 0)
x = 0;
@@ -245,18 +238,38 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
}
#ifdef CONFIG_NUMA
-extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
+/* See __count_vm_event comment on why raw_cpu_inc is used. */
+static inline void
+__count_numa_event(struct zone *zone, enum numa_stat_item item)
+{
+ struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+
+ raw_cpu_inc(pzstats->vm_numa_event[item]);
+}
+
+static inline void
+__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
+{
+ struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+
+ raw_cpu_add(pzstats->vm_numa_event[item], delta);
+}
+
extern unsigned long sum_zone_node_page_state(int node,
enum zone_stat_item item);
-extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
+extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
extern unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item);
extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
enum node_stat_item item);
+extern void fold_vm_numa_events(void);
#else
#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
#define node_page_state(node, item) global_node_page_state(item)
#define node_page_state_pages(node, item) global_node_page_state_pages(item)
+static inline void fold_vm_numa_events(void)
+{
+}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_SMP
@@ -291,7 +304,7 @@ struct ctl_table;
int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
loff_t *ppos);
-void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
+void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
@@ -399,7 +412,7 @@ static inline void cpu_vm_stats_fold(int cpu) { }
static inline void quiet_vmstat(void) { }
static inline void drain_zonestat(struct zone *zone,
- struct per_cpu_pageset *pset) { }
+ struct per_cpu_zonestat *pzstats) { }
#endif /* CONFIG_SMP */
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
@@ -428,7 +441,7 @@ static inline const char *numa_stat_name(enum numa_stat_item item)
static inline const char *node_stat_name(enum node_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
item];
}
@@ -440,7 +453,7 @@ static inline const char *lru_list_name(enum lru_list lru)
static inline const char *writeback_stat_name(enum writeback_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
item];
}
@@ -449,7 +462,7 @@ static inline const char *writeback_stat_name(enum writeback_stat_item item)
static inline const char *vm_event_name(enum vm_event_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
NR_VM_WRITEBACK_STAT_ITEMS +
item];
diff --git a/include/linux/wait.h b/include/linux/wait.h
index fe10e8570a52..6598ae35e1b5 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1136,7 +1136,7 @@ do { \
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
-void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 8e5c5bb16e2d..667e86cfbdcf 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -221,6 +221,7 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr_pages,
enum wb_reason reason, struct wb_completion *done);
void cgroup_writeback_umount(void);
+bool cleanup_offline_cgwb(struct bdi_writeback *wb);
/**
* inode_attach_wb - associate an inode with its wb
@@ -360,7 +361,6 @@ extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
extern unsigned int dirtytime_expire_interval;
extern int vm_highmem_is_dirtyable;
-extern int block_dump;
extern int laptop_mode;
int dirty_background_ratio_handler(struct ctl_table *table, int write,
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index aa05a253dcf9..9fac819f92e3 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -6,7 +6,10 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/poll.h>
#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
/**
* enum wwan_port_type - WWAN port types
@@ -15,7 +18,10 @@
* @WWAN_PORT_QMI: Qcom modem/MSM interface for modem control
* @WWAN_PORT_QCDM: Qcom Modem diagnostic interface
* @WWAN_PORT_FIREHOSE: XML based command protocol
- * @WWAN_PORT_MAX: Number of supported port types
+ *
+ * @WWAN_PORT_MAX: Highest supported port types
+ * @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
+ * @__WWAN_PORT_MAX: Internal use
*/
enum wwan_port_type {
WWAN_PORT_AT,
@@ -23,7 +29,12 @@ enum wwan_port_type {
WWAN_PORT_QMI,
WWAN_PORT_QCDM,
WWAN_PORT_FIREHOSE,
- WWAN_PORT_MAX,
+
+ /* Add new port types above this line */
+
+ __WWAN_PORT_MAX,
+ WWAN_PORT_MAX = __WWAN_PORT_MAX - 1,
+ WWAN_PORT_UNKNOWN,
};
struct wwan_port;
@@ -31,15 +42,23 @@ struct wwan_port;
/** struct wwan_port_ops - The WWAN port operations
* @start: The routine for starting the WWAN port device.
* @stop: The routine for stopping the WWAN port device.
- * @tx: The routine that sends WWAN port protocol data to the device.
+ * @tx: Non-blocking routine that sends WWAN port protocol data to the device.
+ * @tx_blocking: Optional blocking routine that sends WWAN port protocol data
+ * to the device.
+ * @tx_poll: Optional routine that sets additional TX poll flags.
*
* The wwan_port_ops structure contains a list of low-level operations
- * that control a WWAN port device. All functions are mandatory.
+ * that control a WWAN port device. All functions are mandatory unless specified.
*/
struct wwan_port_ops {
int (*start)(struct wwan_port *port);
void (*stop)(struct wwan_port *port);
int (*tx)(struct wwan_port *port, struct sk_buff *skb);
+
+ /* Optional operations */
+ int (*tx_blocking)(struct wwan_port *port, struct sk_buff *skb);
+ __poll_t (*tx_poll)(struct wwan_port *port, struct file *filp,
+ poll_table *wait);
};
/**
@@ -108,4 +127,48 @@ void wwan_port_txon(struct wwan_port *port);
*/
void *wwan_port_get_drvdata(struct wwan_port *port);
+/**
+ * struct wwan_netdev_priv - WWAN core network device private data
+ * @link_id: WWAN device data link id
+ * @drv_priv: driver private data area, size is determined in &wwan_ops
+ */
+struct wwan_netdev_priv {
+ u32 link_id;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+static inline void *wwan_netdev_drvpriv(struct net_device *dev)
+{
+ return ((struct wwan_netdev_priv *)netdev_priv(dev))->drv_priv;
+}
+
+/*
+ * Used to indicate that the WWAN core should not create a default network
+ * link.
+ */
+#define WWAN_NO_DEFAULT_LINK U32_MAX
+
+/**
+ * struct wwan_ops - WWAN device ops
+ * @priv_size: size of private netdev data area
+ * @setup: set up a new netdev
+ * @newlink: register the new netdev
+ * @dellink: remove the given netdev
+ */
+struct wwan_ops {
+ unsigned int priv_size;
+ void (*setup)(struct net_device *dev);
+ int (*newlink)(void *ctxt, struct net_device *dev,
+ u32 if_id, struct netlink_ext_ack *extack);
+ void (*dellink)(void *ctxt, struct net_device *dev,
+ struct list_head *head);
+};
+
+int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
+ void *ctxt, u32 def_link_id);
+
+void wwan_unregister_ops(struct device *parent);
+
#endif /* __WWAN_H */
diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
index b4cb2ef02f17..53c0038c792b 100644
--- a/include/media/hevc-ctrls.h
+++ b/include/media/hevc-ctrls.h
@@ -19,6 +19,7 @@
#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_CODEC_BASE + 1008)
#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_CODEC_BASE + 1009)
#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_BASE + 1010)
+#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_BASE + 1012)
#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_CODEC_BASE + 1015)
#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_CODEC_BASE + 1016)
@@ -26,6 +27,7 @@
#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120
#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121
#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122
+#define V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS 0x0124
enum v4l2_mpeg_video_hevc_decode_mode {
V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
@@ -75,13 +77,12 @@ struct v4l2_ctrl_hevc_sps {
__u8 num_short_term_ref_pic_sets;
__u8 num_long_term_ref_pics_sps;
__u8 chroma_format_idc;
-
- __u8 padding;
+ __u8 sps_max_sub_layers_minus1;
__u64 flags;
};
-#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0)
+#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0)
#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
@@ -100,10 +101,14 @@ struct v4l2_ctrl_hevc_sps {
#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19)
+#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20)
struct v4l2_ctrl_hevc_pps {
/* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */
__u8 num_extra_slice_header_bits;
+ __u8 num_ref_idx_l0_default_active_minus1;
+ __u8 num_ref_idx_l1_default_active_minus1;
__s8 init_qp_minus26;
__u8 diff_cu_qp_delta_depth;
__s8 pps_cb_qp_offset;
@@ -160,6 +165,7 @@ struct v4l2_hevc_pred_weight_table {
#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9)
struct v4l2_ctrl_hevc_slice_params {
__u32 bit_size;
@@ -190,18 +196,10 @@ struct v4l2_ctrl_hevc_slice_params {
__u8 pic_struct;
/* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
- __u8 num_active_dpb_entries;
__u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __u8 num_rps_poc_st_curr_before;
- __u8 num_rps_poc_st_curr_after;
- __u8 num_rps_poc_lt_curr;
-
- __u8 padding;
-
- /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
- struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 padding[5];
/* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
struct v4l2_hevc_pred_weight_table pred_weight_table;
@@ -209,4 +207,34 @@ struct v4l2_ctrl_hevc_slice_params {
__u64 flags;
};
+#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1
+#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2
+#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4
+
+struct v4l2_ctrl_hevc_decode_params {
+ __s32 pic_order_cnt_val;
+ __u8 num_active_dpb_entries;
+ struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 num_poc_st_curr_before;
+ __u8 num_poc_st_curr_after;
+ __u8 num_poc_lt_curr;
+ __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u64 flags;
+};
+
+/* MPEG-class control IDs specific to the Hantro driver as defined by V4L2 */
+#define V4L2_CID_CODEC_HANTRO_BASE (V4L2_CTRL_CLASS_CODEC | 0x1200)
+/*
+ * V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP -
+ * the number of data (in bits) to skip in the
+ * slice segment header.
+ * If non-IDR, the bits to be skipped go from syntax element "pic_output_flag"
+ * to before syntax element "slice_temporal_mvp_enabled_flag".
+ * If IDR, the skipped bits are just "pic_output_flag"
+ * (separate_colour_plane_flag is not supported).
+ */
+#define V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP (V4L2_CID_CODEC_HANTRO_BASE + 0)
+
#endif
diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h
index b35ea6062596..2ab54d426c64 100644
--- a/include/media/media-dev-allocator.h
+++ b/include/media/media-dev-allocator.h
@@ -19,7 +19,7 @@
struct usb_device;
-#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB)
+#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB)
/**
* media_device_usb_allocate() - Allocate and return struct &media device
*
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
deleted file mode 100644
index 2a4ae6701166..000000000000
--- a/include/media/mpeg2-ctrls.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * These are the MPEG2 state controls for use with stateless MPEG-2
- * codec drivers.
- *
- * It turns out that these structs are not stable yet and will undergo
- * more changes. So keep them private until they are stable and ready to
- * become part of the official public API.
- */
-
-#ifndef _MPEG2_CTRLS_H_
-#define _MPEG2_CTRLS_H_
-
-#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_CODEC_BASE+250)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_CODEC_BASE+251)
-
-/* enum v4l2_ctrl_type type values */
-#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
-#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
-
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
-
-struct v4l2_mpeg2_sequence {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
- __u16 horizontal_size;
- __u16 vertical_size;
- __u32 vbv_buffer_size;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
- __u16 profile_and_level_indication;
- __u8 progressive_sequence;
- __u8 chroma_format;
-};
-
-struct v4l2_mpeg2_picture {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
- __u8 picture_coding_type;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
- __u8 f_code[2][2];
- __u8 intra_dc_precision;
- __u8 picture_structure;
- __u8 top_field_first;
- __u8 frame_pred_frame_dct;
- __u8 concealment_motion_vectors;
- __u8 q_scale_type;
- __u8 intra_vlc_format;
- __u8 alternate_scan;
- __u8 repeat_first_field;
- __u16 progressive_frame;
-};
-
-struct v4l2_ctrl_mpeg2_slice_params {
- __u32 bit_size;
- __u32 data_bit_offset;
- __u64 backward_ref_ts;
- __u64 forward_ref_ts;
-
- struct v4l2_mpeg2_sequence sequence;
- struct v4l2_mpeg2_picture picture;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
- __u32 quantiser_scale_code;
-};
-
-struct v4l2_ctrl_mpeg2_quantization {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
- __u8 load_intra_quantiser_matrix;
- __u8 load_non_intra_quantiser_matrix;
- __u8 load_chroma_intra_quantiser_matrix;
- __u8 load_chroma_non_intra_quantiser_matrix;
-
- __u8 intra_quantiser_matrix[64];
- __u8 non_intra_quantiser_matrix[64];
- __u8 chroma_intra_quantiser_matrix[64];
- __u8 chroma_non_intra_quantiser_matrix[64];
-};
-
-#endif
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index b5585d14fff4..793b54342dff 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -231,6 +231,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_CEC "rc-cec"
#define RC_MAP_CINERGY "rc-cinergy"
#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
+#define RC_MAP_CT_90405 "rc-ct-90405"
#define RC_MAP_D680_DMB "rc-d680-dmb"
#define RC_MAP_DELOCK_61959 "rc-delock-61959"
#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec"
@@ -312,7 +313,6 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
#define RC_MAP_SU3000 "rc-su3000"
-#define RC_MAP_TANGO "rc-tango"
#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini"
#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max"
#define RC_MAP_TBS_NEC "rc-tbs-nec"
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index a5953b812878..575b59fbac77 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -17,7 +17,6 @@
* Include the stateless codec compound control definitions.
* This will move to the public headers once this API is fully stable.
*/
-#include <media/mpeg2-ctrls.h>
#include <media/hevc-ctrls.h>
/* forward references */
@@ -40,8 +39,9 @@ struct video_device;
* @p_u16: Pointer to a 16-bit unsigned value.
* @p_u32: Pointer to a 32-bit unsigned value.
* @p_char: Pointer to a string.
- * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure.
- * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure.
+ * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure.
+ * @p_mpeg2_picture: Pointer to a MPEG2 picture structure.
+ * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure.
* @p_fwht_params: Pointer to a FWHT stateless parameters structure.
* @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps.
* @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps.
@@ -66,8 +66,9 @@ union v4l2_ctrl_ptr {
u16 *p_u16;
u32 *p_u32;
char *p_char;
- struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
- struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization;
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation;
struct v4l2_ctrl_fwht_params *p_fwht_params;
struct v4l2_ctrl_h264_sps *p_h264_sps;
struct v4l2_ctrl_h264_pps *p_h264_pps;
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index d0e9a5bdb08b..95f8bfd63273 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -162,6 +162,9 @@ struct v4l2_subdev_io_pin_config {
* @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
* a direction argument if needed.
*
+ * @command: called by in-kernel drivers in order to call functions internal
+ * to subdev drivers driver that have a separate callback.
+ *
* @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
* used to provide support for private ioctls used on the driver.
*
@@ -193,6 +196,7 @@ struct v4l2_subdev_core_ops {
int (*load_fw)(struct v4l2_subdev *sd);
int (*reset)(struct v4l2_subdev *sd, u32 val);
int (*s_gpio)(struct v4l2_subdev *sd, u32 val);
+ long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd,
@@ -624,6 +628,19 @@ struct v4l2_subdev_pad_config {
};
/**
+ * struct v4l2_subdev_state - Used for storing subdev state information.
+ *
+ * @pads: &struct v4l2_subdev_pad_config array
+ *
+ * This structure only needs to be passed to the pad op if the 'which' field
+ * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For
+ * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL.
+ */
+struct v4l2_subdev_state {
+ struct v4l2_subdev_pad_config *pads;
+};
+
+/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
*
* @init_cfg: initialize the pad config to default values
@@ -687,27 +704,27 @@ struct v4l2_subdev_pad_config {
*/
struct v4l2_subdev_pad_ops {
int (*init_cfg)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg);
+ struct v4l2_subdev_state *state);
int (*enum_mbus_code)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code);
int (*enum_frame_size)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse);
int (*enum_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_interval_enum *fie);
int (*get_fmt)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
int (*set_fmt)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
int (*get_selection)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel);
int (*set_selection)(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel);
int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
@@ -918,14 +935,14 @@ struct v4l2_subdev {
* struct v4l2_subdev_fh - Used for storing subdev information per file handle
*
* @vfh: pointer to &struct v4l2_fh
- * @pad: pointer to &struct v4l2_subdev_pad_config
+ * @state: pointer to &struct v4l2_subdev_state
* @owner: module pointer to the owner of this file handle
*/
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
struct module *owner;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- struct v4l2_subdev_pad_config *pad;
+ struct v4l2_subdev_state *state;
#endif
};
@@ -945,17 +962,17 @@ struct v4l2_subdev_fh {
* &struct v4l2_subdev_pad_config->try_fmt
*
* @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
+ * @state: pointer to &struct v4l2_subdev_state
+ * @pad: index of the pad in the &struct v4l2_subdev_state->pads array
*/
static inline struct v4l2_mbus_framefmt *
v4l2_subdev_get_try_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
unsigned int pad)
{
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
- return &cfg[pad].try_fmt;
+ return &state->pads[pad].try_fmt;
}
/**
@@ -963,17 +980,17 @@ v4l2_subdev_get_try_format(struct v4l2_subdev *sd,
* &struct v4l2_subdev_pad_config->try_crop
*
* @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
+ * @state: pointer to &struct v4l2_subdev_state.
+ * @pad: index of the pad in the &struct v4l2_subdev_state->pads array.
*/
static inline struct v4l2_rect *
v4l2_subdev_get_try_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
unsigned int pad)
{
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
- return &cfg[pad].try_crop;
+ return &state->pads[pad].try_crop;
}
/**
@@ -981,17 +998,17 @@ v4l2_subdev_get_try_crop(struct v4l2_subdev *sd,
* &struct v4l2_subdev_pad_config->try_compose
*
* @sd: pointer to &struct v4l2_subdev
- * @cfg: pointer to &struct v4l2_subdev_pad_config array.
- * @pad: index of the pad in the @cfg array.
+ * @state: pointer to &struct v4l2_subdev_state.
+ * @pad: index of the pad in the &struct v4l2_subdev_state->pads array.
*/
static inline struct v4l2_rect *
v4l2_subdev_get_try_compose(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
unsigned int pad)
{
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
- return &cfg[pad].try_compose;
+ return &state->pads[pad].try_compose;
}
#endif
@@ -1093,20 +1110,21 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
int v4l2_subdev_link_validate(struct media_link *link);
/**
- * v4l2_subdev_alloc_pad_config - Allocates memory for pad config
+ * v4l2_subdev_alloc_state - allocate v4l2_subdev_state
+ *
+ * @sd: pointer to &struct v4l2_subdev for which the state is being allocated.
*
- * @sd: pointer to struct v4l2_subdev
+ * Must call v4l2_subdev_free_state() when state is no longer needed.
*/
-struct
-v4l2_subdev_pad_config *v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd);
+struct v4l2_subdev_state *v4l2_subdev_alloc_state(struct v4l2_subdev *sd);
/**
- * v4l2_subdev_free_pad_config - Frees memory allocated by
- * v4l2_subdev_alloc_pad_config().
+ * v4l2_subdev_free_state - free a v4l2_subdev_state
*
- * @cfg: pointer to &struct v4l2_subdev_pad_config
+ * @state: v4l2_subdev_state to be freed.
*/
-void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg);
+void v4l2_subdev_free_state(struct v4l2_subdev_state *state);
+
#endif /* CONFIG_MEDIA_CONTROLLER */
/**
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index c203047eb834..b66585e304e2 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -262,6 +262,22 @@ int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name);
void vb2_queue_release(struct vb2_queue *q);
/**
+ * vb2_queue_change_type() - change the type of an inactive vb2_queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @type: the type to change to (V4L2_BUF_TYPE_VIDEO_*)
+ *
+ * This function changes the type of the vb2_queue. This is only possible
+ * if the queue is not busy (i.e. no buffers have been allocated).
+ *
+ * vb2_queue_change_type() can be used to support multiple buffer types using
+ * the same queue. The driver can implement v4l2_ioctl_ops.vidioc_reqbufs and
+ * v4l2_ioctl_ops.vidioc_create_bufs functions and call vb2_queue_change_type()
+ * before calling vb2_ioctl_reqbufs() or vb2_ioctl_create_bufs(), and thus
+ * "lock" the buffer type until the buffers have been released.
+ */
+int vb2_queue_change_type(struct vb2_queue *q, unsigned int type);
+
+/**
* vb2_poll() - implements poll userspace operation
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @file: file argument passed to the poll file operation handler
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index b1c717286993..ab207677e0a8 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -135,6 +135,14 @@ struct vsock_transport {
bool (*stream_is_active)(struct vsock_sock *);
bool (*stream_allow)(u32 cid, u32 port);
+ /* SEQ_PACKET. */
+ ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+ int flags);
+ int (*seqpacket_enqueue)(struct vsock_sock *vsk, struct msghdr *msg,
+ size_t len);
+ bool (*seqpacket_allow)(u32 remote_cid);
+ u32 (*seqpacket_has_data)(struct vsock_sock *vsk);
+
/* Notification. */
int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index ea4ae551c426..b80415011dcd 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -36,7 +36,7 @@
#define HCI_MAX_AMP_ASSOC_SIZE 672
-#define HCI_MAX_CSB_DATA_SIZE 252
+#define HCI_MAX_CPB_DATA_SIZE 252
/* HCI dev events */
#define HCI_DEV_REG 1
@@ -339,6 +339,7 @@ enum {
#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */
#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */
#define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
@@ -471,10 +472,10 @@ enum {
#define LMP_EXTFEATURES 0x80
/* Extended LMP features */
-#define LMP_CSB_MASTER 0x01
-#define LMP_CSB_SLAVE 0x02
-#define LMP_SYNC_TRAIN 0x04
-#define LMP_SYNC_SCAN 0x08
+#define LMP_CPB_CENTRAL 0x01
+#define LMP_CPB_PERIPHERAL 0x02
+#define LMP_SYNC_TRAIN 0x04
+#define LMP_SYNC_SCAN 0x08
#define LMP_SC 0x01
#define LMP_PING 0x02
@@ -488,7 +489,7 @@ enum {
/* LE features */
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
-#define HCI_LE_SLAVE_FEATURES 0x08
+#define HCI_LE_PERIPHERAL_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_LL_PRIVACY 0x40
@@ -497,8 +498,8 @@ enum {
#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_EXT_ADV 0x10
#define HCI_LE_CHAN_SEL_ALG2 0x40
-#define HCI_LE_CIS_MASTER 0x10
-#define HCI_LE_CIS_SLAVE 0x20
+#define HCI_LE_CIS_CENTRAL 0x10
+#define HCI_LE_CIS_PERIPHERAL 0x20
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -876,17 +877,17 @@ struct hci_rp_logical_link_cancel {
__u8 flow_spec_id;
} __packed;
-#define HCI_OP_SET_CSB 0x0441
-struct hci_cp_set_csb {
+#define HCI_OP_SET_CPB 0x0441
+struct hci_cp_set_cpb {
__u8 enable;
__u8 lt_addr;
__u8 lpo_allowed;
__le16 packet_type;
__le16 interval_min;
__le16 interval_max;
- __le16 csb_sv_tout;
+ __le16 cpb_sv_tout;
} __packed;
-struct hci_rp_set_csb {
+struct hci_rp_set_cpb {
__u8 status;
__u8 lt_addr;
__le16 interval;
@@ -1183,14 +1184,14 @@ struct hci_rp_delete_reserved_lt_addr {
__u8 lt_addr;
} __packed;
-#define HCI_OP_SET_CSB_DATA 0x0c76
-struct hci_cp_set_csb_data {
+#define HCI_OP_SET_CPB_DATA 0x0c76
+struct hci_cp_set_cpb_data {
__u8 lt_addr;
__u8 fragment;
__u8 data_length;
- __u8 data[HCI_MAX_CSB_DATA_SIZE];
+ __u8 data[HCI_MAX_CPB_DATA_SIZE];
} __packed;
-struct hci_rp_set_csb_data {
+struct hci_rp_set_cpb_data {
__u8 status;
__u8 lt_addr;
} __packed;
@@ -1504,7 +1505,7 @@ struct hci_cp_le_set_scan_enable {
} __packed;
#define HCI_LE_USE_PEER_ADDR 0x00
-#define HCI_LE_USE_WHITELIST 0x01
+#define HCI_LE_USE_ACCEPT_LIST 0x01
#define HCI_OP_LE_CREATE_CONN 0x200d
struct hci_cp_le_create_conn {
@@ -1524,22 +1525,22 @@ struct hci_cp_le_create_conn {
#define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e
-#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f
-struct hci_rp_le_read_white_list_size {
+#define HCI_OP_LE_READ_ACCEPT_LIST_SIZE 0x200f
+struct hci_rp_le_read_accept_list_size {
__u8 status;
__u8 size;
} __packed;
-#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010
+#define HCI_OP_LE_CLEAR_ACCEPT_LIST 0x2010
-#define HCI_OP_LE_ADD_TO_WHITE_LIST 0x2011
-struct hci_cp_le_add_to_white_list {
+#define HCI_OP_LE_ADD_TO_ACCEPT_LIST 0x2011
+struct hci_cp_le_add_to_accept_list {
__u8 bdaddr_type;
bdaddr_t bdaddr;
} __packed;
-#define HCI_OP_LE_DEL_FROM_WHITE_LIST 0x2012
-struct hci_cp_le_del_from_white_list {
+#define HCI_OP_LE_DEL_FROM_ACCEPT_LIST 0x2012
+struct hci_cp_le_del_from_accept_list {
__u8 bdaddr_type;
bdaddr_t bdaddr;
} __packed;
@@ -1774,13 +1775,15 @@ struct hci_cp_ext_adv_set {
__u8 max_events;
} __packed;
+#define HCI_MAX_EXT_AD_LENGTH 251
+
#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
struct hci_cp_le_set_ext_adv_data {
__u8 handle;
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[HCI_MAX_AD_LENGTH];
+ __u8 data[];
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
@@ -1789,7 +1792,7 @@ struct hci_cp_le_set_ext_scan_rsp_data {
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[HCI_MAX_AD_LENGTH];
+ __u8 data[];
} __packed;
#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
@@ -1838,23 +1841,23 @@ struct hci_rp_le_read_iso_tx_sync {
#define HCI_OP_LE_SET_CIG_PARAMS 0x2062
struct hci_cis_params {
__u8 cis_id;
- __le16 m_sdu;
- __le16 s_sdu;
- __u8 m_phy;
- __u8 s_phy;
- __u8 m_rtn;
- __u8 s_rtn;
+ __le16 c_sdu;
+ __le16 p_pdu;
+ __u8 c_phy;
+ __u8 p_phy;
+ __u8 c_rtn;
+ __u8 p_rtn;
} __packed;
struct hci_cp_le_set_cig_params {
__u8 cig_id;
- __u8 m_interval[3];
- __u8 s_interval[3];
- __u8 sca;
+ __u8 c_interval[3];
+ __u8 p_interval[3];
+ __u8 wc_sca;
__u8 packing;
__u8 framing;
- __le16 m_latency;
- __le16 s_latency;
+ __le16 c_latency;
+ __le16 p_latency;
__u8 num_cis;
struct hci_cis_params cis[];
} __packed;
@@ -2259,7 +2262,7 @@ struct hci_ev_sync_train_complete {
__u8 status;
} __packed;
-#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54
+#define HCI_EV_PERIPHERAL_PAGE_RESP_TIMEOUT 0x54
#define HCI_EV_LE_CONN_COMPLETE 0x01
struct hci_ev_le_conn_complete {
@@ -2417,17 +2420,17 @@ struct hci_evt_le_cis_established {
__le16 handle;
__u8 cig_sync_delay[3];
__u8 cis_sync_delay[3];
- __u8 m_latency[3];
- __u8 s_latency[3];
- __u8 m_phy;
- __u8 s_phy;
+ __u8 c_latency[3];
+ __u8 p_latency[3];
+ __u8 c_phy;
+ __u8 p_phy;
__u8 nse;
- __u8 m_bn;
- __u8 s_bn;
- __u8 m_ft;
- __u8 s_ft;
- __le16 m_mtu;
- __le16 s_mtu;
+ __u8 c_bn;
+ __u8 p_bn;
+ __u8 c_ft;
+ __u8 p_ft;
+ __le16 c_mtu;
+ __le16 p_mtu;
__le16 interval;
} __packed;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index c73ac52af186..a53e94459ecd 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -122,7 +122,7 @@ struct hci_conn_hash {
unsigned int amp_num;
unsigned int sco_num;
unsigned int le_num;
- unsigned int le_num_slave;
+ unsigned int le_num_peripheral;
};
struct bdaddr_list {
@@ -228,9 +228,9 @@ struct adv_info {
__u16 remaining_time;
__u16 duration;
__u16 adv_data_len;
- __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
__u16 scan_rsp_len;
- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
__s8 tx_power;
__u32 min_interval;
__u32 max_interval;
@@ -327,7 +327,7 @@ struct hci_dev {
__u8 max_page;
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
- __u8 le_white_list_size;
+ __u8 le_accept_list_size;
__u8 le_resolv_list_size;
__u8 le_num_of_adv_sets;
__u8 le_states[8];
@@ -470,6 +470,7 @@ struct hci_dev {
struct delayed_work service_cache;
struct delayed_work cmd_timer;
+ struct delayed_work ncmd_timer;
struct work_struct rx_work;
struct work_struct cmd_work;
@@ -521,14 +522,14 @@ struct hci_dev {
struct hci_conn_hash conn_hash;
struct list_head mgmt_pending;
- struct list_head blacklist;
- struct list_head whitelist;
+ struct list_head reject_list;
+ struct list_head accept_list;
struct list_head uuids;
struct list_head link_keys;
struct list_head long_term_keys;
struct list_head identity_resolving_keys;
struct list_head remote_oob_data;
- struct list_head le_white_list;
+ struct list_head le_accept_list;
struct list_head le_resolv_list;
struct list_head le_conn_params;
struct list_head pend_le_conns;
@@ -550,9 +551,9 @@ struct hci_dev {
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
__s8 adv_tx_power;
- __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
__u8 adv_data_len;
- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
__u8 scan_rsp_data_len;
struct list_head adv_instances;
@@ -893,7 +894,7 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
case LE_LINK:
h->le_num++;
if (c->role == HCI_ROLE_SLAVE)
- h->le_num_slave++;
+ h->le_num_peripheral++;
break;
case SCO_LINK:
case ESCO_LINK:
@@ -919,7 +920,7 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
case LE_LINK:
h->le_num--;
if (c->role == HCI_ROLE_SLAVE)
- h->le_num_slave--;
+ h->le_num_peripheral--;
break;
case SCO_LINK:
case ESCO_LINK:
@@ -1393,8 +1394,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
/* ----- Extended LMP capabilities ----- */
-#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
-#define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE)
+#define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL)
+#define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL)
#define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
#define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN)
#define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC)
@@ -1768,7 +1769,7 @@ void __mgmt_power_off(struct hci_dev *hdev);
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent);
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
- u32 flags, u8 *name, u8 name_len);
+ u8 *name, u8 name_len);
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 reason,
bool mgmt_connected);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index a7cffb069565..23a0524061b7 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -202,7 +202,7 @@ struct mgmt_cp_load_link_keys {
struct mgmt_ltk_info {
struct mgmt_addr_info addr;
__u8 type;
- __u8 master;
+ __u8 initiator;
__u8 enc_size;
__le16 ediv;
__le64 rand;
@@ -939,6 +939,7 @@ struct mgmt_ev_auth_failed {
#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04
+#define MGMT_DEV_FOUND_INITIATED_CONN 0x08
#define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found {
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 019e998d944a..15335732e166 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -232,7 +232,7 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- u32 rr_tx_counter;
+ u32 __percpu *rr_tx_counter;
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
struct bond_params params;
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 48ecca8530ff..b655d8666f55 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
* The link_support layer is used to add any Link Layer specific
* framing.
*/
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room,
struct cflayer **layer, int (**rcv_func)(
struct sk_buff *, struct net_device *,
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 2aa5e91d8457..8819ff4db35a 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
* @fcs: Specify if checksum is used in CAIF Framing Layer.
* @head_room: Head space needed by link specific protocol.
*/
-void
+int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
index 14a55e03bb3c..67cce8757175 100644
--- a/include/net/caif/cfserl.h
+++ b/include/net/caif/cfserl.h
@@ -9,4 +9,5 @@
#include <net/caif/caif_layer.h>
struct cflayer *cfserl_create(int instance, bool use_stx);
+void cfserl_release(struct cflayer *layer);
#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5224f885a99a..161cdf7df1a0 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/ethtool.h>
@@ -22,6 +22,7 @@
#include <linux/if_ether.h>
#include <linux/ieee80211.h>
#include <linux/net.h>
+#include <linux/rfkill.h>
#include <net/regulatory.h>
/**
@@ -370,11 +371,18 @@ struct ieee80211_sta_he_cap {
* @he_cap: holds the HE capabilities
* @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a
* 6 GHz band channel (and 0 may be valid value).
+ * @vendor_elems: vendor element(s) to advertise
+ * @vendor_elems.data: vendor element(s) data
+ * @vendor_elems.len: vendor element(s) length
*/
struct ieee80211_sband_iftype_data {
u16 types_mask;
struct ieee80211_sta_he_cap he_cap;
struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct {
+ const u8 *data;
+ unsigned int len;
+ } vendor_elems;
};
/**
@@ -534,18 +542,6 @@ ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband,
}
/**
- * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
- * @sband: the sband to search for the STA on
- *
- * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
- */
-static inline const struct ieee80211_sta_he_cap *
-ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
-{
- return ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_STATION);
-}
-
-/**
* ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities
* @sband: the sband to search for the STA on
* @iftype: the iftype to search for
@@ -906,6 +902,17 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
}
/**
+ * cfg80211_any_usable_channels - check for usable channels
+ * @wiphy: the wiphy to check for
+ * @band_mask: which bands to check on
+ * @prohibited_flags: which channels to not consider usable,
+ * %IEEE80211_CHAN_DISABLED is always taken into account
+ */
+bool cfg80211_any_usable_channels(struct wiphy *wiphy,
+ unsigned long band_mask,
+ u32 prohibited_flags);
+
+/**
* enum survey_info_flags - survey information flags
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
@@ -1245,8 +1252,6 @@ struct cfg80211_csa_settings {
u8 count;
};
-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
-
/**
* struct iface_combination_params - input parameters for interface combinations
*
@@ -3522,7 +3527,10 @@ struct cfg80211_pmsr_result {
* If neither @trigger_based nor @non_trigger_based is set,
* EDCA based ranging will be used.
* @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either
- * @trigger_based or @non_trigger_based is set.
+ * @trigger_based or @non_trigger_based is set.
+ * @bss_color: the bss color of the responder. Optional. Set to zero to
+ * indicate the driver should set the BSS color. Only valid if
+ * @non_trigger_based or @trigger_based is set.
*
* See also nl80211 for the respective attribute documentation.
*/
@@ -3540,6 +3548,7 @@ struct cfg80211_pmsr_ftm_request_peer {
u8 burst_duration;
u8 ftms_per_burst;
u8 ftmr_retries;
+ u8 bss_color;
};
/**
@@ -4945,6 +4954,7 @@ struct wiphy_iftype_akm_suites {
* configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
* @sar_capa: SAR control capabilities
+ * @rfkill: a pointer to the rfkill structure
*/
struct wiphy {
struct mutex mtx;
@@ -5087,6 +5097,8 @@ struct wiphy {
const struct cfg80211_sar_capa *sar_capa;
+ struct rfkill *rfkill;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -5760,7 +5772,7 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
*/
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
- u8 data_offset);
+ u8 data_offset, bool is_amsdu);
/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -5772,7 +5784,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
- return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
}
/**
@@ -6661,7 +6673,10 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy);
* wiphy_rfkill_stop_polling - stop polling rfkill
* @wiphy: the wiphy
*/
-void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
+static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
+{
+ rfkill_pause_polling(wiphy->rfkill);
+}
/**
* DOC: Vendor commands
@@ -8154,6 +8169,8 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
dev_notice(&(wiphy)->dev, format, ##args)
#define wiphy_info(wiphy, format, args...) \
dev_info(&(wiphy)->dev, format, ##args)
+#define wiphy_info_once(wiphy, format, args...) \
+ dev_info_once(&(wiphy)->dev, format, ##args)
#define wiphy_err_ratelimited(wiphy, format, args...) \
dev_err_ratelimited(&(wiphy)->dev, format, ##args)
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 7c984cadfec4..57b738b78073 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -34,6 +34,7 @@ struct devlink_ops;
struct devlink {
struct list_head list;
struct list_head port_list;
+ struct list_head rate_list;
struct list_head sb_list;
struct list_head dpipe_table_list;
struct list_head resource_list;
@@ -133,6 +134,24 @@ struct devlink_port_attrs {
};
};
+struct devlink_rate {
+ struct list_head list;
+ enum devlink_rate_type type;
+ struct devlink *devlink;
+ void *priv;
+ u64 tx_share;
+ u64 tx_max;
+
+ struct devlink_rate *parent;
+ union {
+ struct devlink_port *devlink_port;
+ struct {
+ char *name;
+ refcount_t refcnt;
+ };
+ };
+};
+
struct devlink_port {
struct list_head list;
struct list_head param_list;
@@ -152,6 +171,8 @@ struct devlink_port {
struct delayed_work type_warn_dw;
struct list_head reporter_list;
struct mutex reporters_lock; /* Protects reporter_list */
+
+ struct devlink_rate *devlink_rate;
};
struct devlink_port_new_attrs {
@@ -1327,6 +1348,16 @@ struct devlink_ops {
enum devlink_trap_action action,
struct netlink_ext_ack *extack);
/**
+ * @trap_drop_counter_get: Trap drop counter get function.
+ *
+ * Should be used by device drivers to report number of packets
+ * that have been dropped, and cannot be passed to the devlink
+ * subsystem by the underlying device.
+ */
+ int (*trap_drop_counter_get)(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops);
+ /**
* @trap_policer_init: Trap policer initialization function.
*
* Should be used by device drivers to initialize the trap policer in
@@ -1453,6 +1484,30 @@ struct devlink_ops {
struct devlink_port *port,
enum devlink_port_fn_state state,
struct netlink_ext_ack *extack);
+
+ /**
+ * Rate control callbacks.
+ */
+ int (*rate_leaf_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack);
+ int (*rate_leaf_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack);
+ int (*rate_node_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack);
+ int (*rate_node_new)(struct devlink_rate *rate_node, void **priv,
+ struct netlink_ext_ack *extack);
+ int (*rate_node_del)(struct devlink_rate *rate_node, void *priv,
+ struct netlink_ext_ack *extack);
+ int (*rate_leaf_parent_set)(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack);
+ int (*rate_node_parent_set)(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -1512,6 +1567,9 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
u32 controller, u16 pf, u32 sf,
bool external);
+int devlink_rate_leaf_create(struct devlink_port *port, void *priv);
+void devlink_rate_leaf_destroy(struct devlink_port *devlink_port);
+void devlink_rate_nodes_destroy(struct devlink *devlink);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index e1a2610a0e06..33f40c1ec379 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -50,6 +50,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
#define DSA_TAG_PROTO_SEVILLE_VALUE 21
#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
+#define DSA_TAG_PROTO_SJA1110_VALUE 23
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -75,6 +76,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
+ DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
};
struct packet_type;
@@ -91,7 +93,8 @@ struct dsa_device_ops {
* as regular on the master net device.
*/
bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
- unsigned int overhead;
+ unsigned int needed_headroom;
+ unsigned int needed_tailroom;
const char *name;
enum dsa_tag_protocol proto;
/* Some tagging protocols either mangle or shift the destination MAC
@@ -100,7 +103,6 @@ struct dsa_device_ops {
* its RX filter.
*/
bool promisc_on_master;
- bool tail_tag;
};
/* This structure defines the control interfaces that are overlayed by the
@@ -283,6 +285,12 @@ struct dsa_port {
*/
const struct dsa_netdevice_ops *netdev_ops;
+ /* List of MAC addresses that must be forwarded on this port.
+ * These are only valid on CPU ports and DSA links.
+ */
+ struct list_head fdbs;
+ struct list_head mdbs;
+
bool setup;
};
@@ -297,6 +305,13 @@ struct dsa_link {
struct list_head list;
};
+struct dsa_mac_addr {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ refcount_t refcount;
+ struct list_head list;
+};
+
struct dsa_switch {
bool setup;
@@ -407,6 +422,21 @@ static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
return NULL;
}
+static inline bool dsa_port_is_dsa(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_DSA;
+}
+
+static inline bool dsa_port_is_cpu(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_CPU;
+}
+
+static inline bool dsa_port_is_user(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_USER;
+}
+
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
{
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
@@ -474,6 +504,32 @@ static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
}
+/* Return true if this is the local port used to reach the CPU port */
+static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
+{
+ if (dsa_is_unused_port(ds, port))
+ return false;
+
+ return port == dsa_upstream_port(ds, port);
+}
+
+/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
+ * that the routing port from @downstream_ds to @upstream_ds is also the port
+ * which @downstream_ds uses to reach its dedicated CPU.
+ */
+static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
+ struct dsa_switch *downstream_ds)
+{
+ int routing_port;
+
+ if (upstream_ds == downstream_ds)
+ return true;
+
+ routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
+
+ return dsa_is_upstream_port(downstream_ds, routing_port);
+}
+
static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
{
const struct dsa_switch *ds = dp->ds;
@@ -926,7 +982,7 @@ static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
{
#if IS_ENABLED(CONFIG_NET_DSA)
const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
- int tag_len = ops->overhead;
+ int tag_len = ops->needed_headroom;
*offset = tag_len;
*proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index dc5c1e69cd9f..69c9eabf8325 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -319,12 +319,14 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
if (flow_offload_has_one_action(action))
return true;
- flow_action_for_each(i, action_entry, action) {
- if (i && action_entry->hw_stats != last_hw_stats) {
- NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
- return false;
+ if (action) {
+ flow_action_for_each(i, action_entry, action) {
+ if (i && action_entry->hw_stats != last_hw_stats) {
+ NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+ return false;
+ }
+ last_hw_stats = action_entry->hw_stats;
}
- last_hw_stats = action_entry->hw_stats;
}
return true;
}
diff --git a/include/net/icmp.h b/include/net/icmp.h
index fd84adc47963..caddf4a59ad1 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -57,5 +57,6 @@ int icmp_rcv(struct sk_buff *skb);
int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
void icmp_out_count(struct net *net, unsigned char type);
+bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr);
#endif /* _ICMP_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 3c8c59471bc1..b06c2d02ec84 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -135,7 +135,7 @@ struct inet_connection_sock {
u32 icsk_user_timeout;
u64 icsk_ca_priv[104 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
+#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv)
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/include/net/ip.h b/include/net/ip.h
index e20874059f82..d9683bef8684 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -31,6 +31,7 @@
#include <net/flow.h>
#include <net/flow_dissector.h>
#include <net/netns/hash.h>
+#include <net/lwtunnel.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -445,22 +446,25 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
/* 'forwarding = true' case should always honour route mtu */
mtu = dst_metric_raw(dst, RTAX_MTU);
- if (mtu)
- return mtu;
+ if (!mtu)
+ mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
- return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
const struct sk_buff *skb)
{
+ unsigned int mtu;
+
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
}
- return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
}
struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f51a118bfce8..f14149df5a65 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -265,11 +265,18 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
{
+ int mtu;
+
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
- return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+ if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
+ mtu = READ_ONCE(skb_dst(skb)->dev->mtu);
+ mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
+ } else
+ mtu = dst_mtu(skb_dst(skb));
+
+ return mtu;
}
static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
@@ -317,7 +324,7 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
if (dst_metric_locked(dst, RTAX_MTU)) {
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
- return mtu;
+ goto out;
}
mtu = IPV6_MIN_MTU;
@@ -327,7 +334,8 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
mtu = idev->cnf.mtu6;
rcu_read_unlock();
- return mtu;
+out:
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
u32 ip6_mtu_from_fib6(const struct fib6_result *res,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a914f33f3ed5..3ab2563b1a23 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -466,6 +466,49 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags);
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
+/* Fields used for sysctl_fib_multipath_hash_fields.
+ * Common to IPv4 and IPv6.
+ *
+ * Add new fields at the end. This is user API.
+ */
+#define FIB_MULTIPATH_HASH_FIELD_SRC_IP BIT(0)
+#define FIB_MULTIPATH_HASH_FIELD_DST_IP BIT(1)
+#define FIB_MULTIPATH_HASH_FIELD_IP_PROTO BIT(2)
+#define FIB_MULTIPATH_HASH_FIELD_FLOWLABEL BIT(3)
+#define FIB_MULTIPATH_HASH_FIELD_SRC_PORT BIT(4)
+#define FIB_MULTIPATH_HASH_FIELD_DST_PORT BIT(5)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP BIT(6)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP BIT(7)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO BIT(8)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL BIT(9)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT BIT(10)
+#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT BIT(11)
+
+#define FIB_MULTIPATH_HASH_FIELD_OUTER_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_IP_PROTO | \
+ FIB_MULTIPATH_HASH_FIELD_FLOWLABEL | \
+ FIB_MULTIPATH_HASH_FIELD_SRC_PORT | \
+ FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+
+#define FIB_MULTIPATH_HASH_FIELD_INNER_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+
+#define FIB_MULTIPATH_HASH_FIELD_ALL_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_OUTER_MASK | \
+ FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
+
+#define FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK \
+ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \
+ FIB_MULTIPATH_HASH_FIELD_DST_IP | \
+ FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 448bf2b34759..f2d0ecc257bb 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -926,11 +926,19 @@ static inline int ip6_multipath_hash_policy(const struct net *net)
{
return net->ipv6.sysctl.multipath_hash_policy;
}
+static inline u32 ip6_multipath_hash_fields(const struct net *net)
+{
+ return net->ipv6.sysctl.multipath_hash_fields;
+}
#else
static inline int ip6_multipath_hash_policy(const struct net *net)
{
return 0;
}
+static inline u32 ip6_multipath_hash_fields(const struct net *net)
+{
+ return 0;
+}
#endif
/*
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 445b66c6eb7e..d8a1d09a2141 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
*/
#ifndef MAC80211_H
@@ -526,6 +526,7 @@ struct ieee80211_fils_discovery {
* @twt_responder: does this BSS support TWT requester (relevant for managed
* mode only, set if the AP advertises TWT responder role)
* @twt_protected: does this BSS support protected TWT frames
+ * @twt_broadcast: does this BSS support broadcast TWT
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS
* or not
@@ -642,6 +643,7 @@ struct ieee80211_bss_conf {
bool twt_requester;
bool twt_responder;
bool twt_protected;
+ bool twt_broadcast;
/* association related data */
bool assoc, ibss_joined;
bool ibss_creator;
@@ -3345,6 +3347,21 @@ enum ieee80211_reconfig_type {
};
/**
+ * struct ieee80211_prep_tx_info - prepare TX information
+ * @duration: if non-zero, hint about the required duration,
+ * only used with the mgd_prepare_tx() method.
+ * @subtype: frame subtype (auth, (re)assoc, deauth, disassoc)
+ * @success: whether the frame exchange was successful, only
+ * used with the mgd_complete_tx() method, and then only
+ * valid for auth and (re)assoc.
+ */
+struct ieee80211_prep_tx_info {
+ u16 duration;
+ u16 subtype;
+ u8 success:1;
+};
+
+/**
* struct ieee80211_ops - callbacks from mac80211 to the driver
*
* This structure contains various callbacks that the driver may
@@ -3756,9 +3773,13 @@ enum ieee80211_reconfig_type {
* frame in case that no beacon was heard from the AP/P2P GO.
* The callback will be called before each transmission and upon return
* mac80211 will transmit the frame right away.
- * If duration is greater than zero, mac80211 hints to the driver the
- * duration for which the operation is requested.
+ * Additional information is passed in the &struct ieee80211_prep_tx_info
+ * data. If duration there is greater than zero, mac80211 hints to the
+ * driver the duration for which the operation is requested.
* The callback is optional and can (should!) sleep.
+ * @mgd_complete_tx: Notify the driver that the response frame for a previously
+ * transmitted frame announced with @mgd_prepare_tx was received, the data
+ * is filled similarly to @mgd_prepare_tx though the duration is not used.
*
* @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
* a TDLS discovery-request, we expect a reply to arrive on the AP's
@@ -4109,7 +4130,10 @@ struct ieee80211_ops {
void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u16 duration);
+ struct ieee80211_prep_tx_info *info);
+ void (*mgd_complete_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info);
void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@@ -5537,7 +5561,7 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
*
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
- * This version can only be used while holding the RTNL.
+ * This version can only be used while holding the wiphy mutex.
*
* @hw: the hardware struct of which the interfaces should be iterated over
* @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
@@ -6184,6 +6208,11 @@ enum rate_control_capabilities {
* otherwise the NSS difference doesn't bother us.
*/
RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0),
+ /**
+ * @RATE_CTRL_CAPA_AMPDU_TRIGGER:
+ * mac80211 should start A-MPDU sessions on tx
+ */
+ RATE_CTRL_CAPA_AMPDU_TRIGGER = BIT(1),
};
struct rate_control_ops {
@@ -6392,7 +6421,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
/**
* ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
- * of injected frames
+ * of injected frames.
+ *
+ * To accurately parse and take into account rate and retransmission fields,
+ * you must initialize the chandef field in the ieee80211_tx_info structure
+ * of the skb before calling this function.
+ *
* @skb: packet injected by userspace
* @dev: the &struct device of this 802.11 device
*/
@@ -6571,9 +6605,6 @@ static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
{
}
-void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq, bool force);
-
/**
* ieee80211_schedule_txq - schedule a TXQ for transmission
*
@@ -6586,11 +6617,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
* The driver may call this function if it has buffered packets for
* this TXQ internally.
*/
-static inline void
-ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
-{
- __ieee80211_schedule_txq(hw, txq, true);
-}
+void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
/**
* ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
@@ -6602,12 +6629,8 @@ ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
* The driver may set force=true if it has buffered packets for this TXQ
* internally.
*/
-static inline void
-ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
- bool force)
-{
- __ieee80211_schedule_txq(hw, txq, force);
-}
+void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+ bool force);
/**
* ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
@@ -6747,4 +6770,22 @@ struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
struct sk_buff *
ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_is_tx_data - check if frame is a data frame
+ *
+ * The function is used to check if a frame is a data frame. Frames with
+ * hardware encapsulation enabled are data frames.
+ *
+ * @skb: the frame to be transmitted.
+ */
+static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+
+ return info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+ ieee80211_is_data(hdr->frame_control);
+}
+
#endif /* MAC80211_H */
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 52874cdfe226..d6fa6b97f6ef 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -241,7 +241,7 @@ struct macsec_context {
struct macsec_rx_sc *rx_sc;
struct {
unsigned char assoc_num;
- u8 key[MACSEC_KEYID_LEN];
+ u8 key[MACSEC_MAX_KEY_LEN];
union {
struct macsec_rx_sa *rx_sa;
struct macsec_tx_sa *tx_sa;
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 83f23774b908..cb580b06152f 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -23,6 +23,7 @@ struct mptcp_ext {
u64 data_seq;
u32 subflow_seq;
u16 data_len;
+ __sum16 csum;
u8 use_map:1,
dsn64:1,
data_fin:1,
@@ -31,7 +32,8 @@ struct mptcp_ext {
mpc_map:1,
frozen:1,
reset_transient:1;
- u8 reset_reason:4;
+ u8 reset_reason:4,
+ csum_reqd:1;
};
#define MPTCP_RM_IDS_MAX 8
@@ -63,8 +65,10 @@ struct mptcp_out_options {
struct mptcp_rm_list rm_list;
u8 join_id;
u8 backup;
- u8 reset_reason:4;
- u8 reset_transient:1;
+ u8 reset_reason:4,
+ reset_transient:1,
+ csum_reqd:1,
+ allow_join_id0:1;
u32 nonce;
u64 thmac;
u32 token;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index fa5887143f0d..12cf6d7ea62c 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -32,6 +32,7 @@
#include <net/netns/mpls.h>
#include <net/netns/can.h>
#include <net/netns/xdp.h>
+#include <net/netns/smc.h>
#include <net/netns/bpf.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
@@ -170,6 +171,9 @@ struct net {
struct sock *crypto_nlsk;
#endif
struct sock *diag_nlsk;
+#if IS_ENABLED(CONFIG_SMC)
+ struct netns_smc smc;
+#endif
} __randomize_layout;
#include <linux/seq_file_net.h>
@@ -184,6 +188,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
void net_ns_barrier(void);
+
+struct ns_common *get_net_ns(struct ns_common *ns);
+struct net *get_net_ns_by_fd(int fd);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
#include <linux/nsproxy.h>
@@ -203,13 +210,22 @@ static inline void net_ns_get_ownership(const struct net *net,
}
static inline void net_ns_barrier(void) {}
+
+static inline struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct net *get_net_ns_by_fd(int fd)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_NET_NS */
extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int fd);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 06dc6db70d18..cc663c68ddc4 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -346,6 +346,13 @@ nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
skb_set_nfct(skb, (unsigned long)ct | info);
}
+extern unsigned int nf_conntrack_net_id;
+
+static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net)
+{
+ return net_generic(net, nf_conntrack_net_id);
+}
+
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 96f9cf81f46b..1f47bef51722 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -159,22 +159,26 @@ unsigned int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
-__printf(3, 4) __cold
+__printf(4, 5) __cold
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
+ const struct nf_hook_state *state,
const char *fmt, ...);
-__printf(5, 6) __cold
+__printf(4, 5) __cold
void nf_l4proto_log_invalid(const struct sk_buff *skb,
- struct net *net,
- u16 pf, u8 protonum,
+ const struct nf_hook_state *state,
+ u8 protonum,
const char *fmt, ...);
#else
-static inline __printf(5, 6) __cold
-void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net,
- u16 pf, u8 protonum, const char *fmt, ...) {}
-static inline __printf(3, 4) __cold
+static inline __printf(4, 5) __cold
+void nf_l4proto_log_invalid(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ u8 protonum,
+ const char *fmt, ...) {}
+static inline __printf(4, 5) __cold
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
+ const struct nf_hook_state *state,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 51d8eb99764d..a3647fadf1cc 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -157,7 +157,6 @@ enum nf_flow_flags {
NF_FLOW_HW,
NF_FLOW_HW_DYING,
NF_FLOW_HW_DEAD,
- NF_FLOW_HW_REFRESH,
NF_FLOW_HW_PENDING,
};
@@ -178,6 +177,8 @@ struct flow_offload {
#define NF_FLOW_TIMEOUT (30 * HZ)
#define nf_flowtable_time_stamp (u32)jiffies
+unsigned long flow_offload_get_timeout(struct flow_offload *flow);
+
static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
{
return (__s32)(timeout - nf_flowtable_time_stamp);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 27eeb613bb4e..148f5d8ee5ab 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -23,35 +23,46 @@ struct module;
struct nft_pktinfo {
struct sk_buff *skb;
+ const struct nf_hook_state *state;
bool tprot_set;
u8 tprot;
- /* for x_tables compatibility */
- struct xt_action_param xt;
+ u16 fragoff;
+ unsigned int thoff;
};
+static inline struct sock *nft_sk(const struct nft_pktinfo *pkt)
+{
+ return pkt->state->sk;
+}
+
+static inline unsigned int nft_thoff(const struct nft_pktinfo *pkt)
+{
+ return pkt->thoff;
+}
+
static inline struct net *nft_net(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->net;
+ return pkt->state->net;
}
static inline unsigned int nft_hook(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->hook;
+ return pkt->state->hook;
}
static inline u8 nft_pf(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->pf;
+ return pkt->state->pf;
}
static inline const struct net_device *nft_in(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->in;
+ return pkt->state->in;
}
static inline const struct net_device *nft_out(const struct nft_pktinfo *pkt)
{
- return pkt->xt.state->out;
+ return pkt->state->out;
}
static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
@@ -59,16 +70,15 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
const struct nf_hook_state *state)
{
pkt->skb = skb;
- pkt->xt.state = state;
+ pkt->state = state;
}
-static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt)
{
pkt->tprot_set = false;
pkt->tprot = 0;
- pkt->xt.thoff = 0;
- pkt->xt.fragoff = 0;
+ pkt->thoff = 0;
+ pkt->fragoff = 0;
}
/**
@@ -1506,16 +1516,10 @@ struct nft_trans_chain {
struct nft_trans_table {
bool update;
- u8 state;
- u32 flags;
};
#define nft_trans_table_update(trans) \
(((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_state(trans) \
- (((struct nft_trans_table *)trans->data)->state)
-#define nft_trans_table_flags(trans) \
- (((struct nft_trans_table *)trans->data)->flags)
struct nft_trans_elem {
struct nft_set *set;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index fd10a7862fdc..0fa5a6d98a00 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -3,6 +3,7 @@
#define _NET_NF_TABLES_CORE_H
#include <net/netfilter/nf_tables.h>
+#include <linux/indirect_call_wrapper.h>
extern struct nft_expr_type nft_imm_type;
extern struct nft_expr_type nft_cmp_type;
@@ -15,6 +16,7 @@ extern struct nft_expr_type nft_range_type;
extern struct nft_expr_type nft_meta_type;
extern struct nft_expr_type nft_rt_type;
extern struct nft_expr_type nft_exthdr_type;
+extern struct nft_expr_type nft_last_type;
#ifdef CONFIG_NETWORK_SECMARK
extern struct nft_object_type nft_secmark_obj_type;
@@ -88,6 +90,36 @@ extern const struct nft_set_type nft_set_bitmap_type;
extern const struct nft_set_type nft_set_pipapo_type;
extern const struct nft_set_type nft_set_pipapo_avx2_type;
+#ifdef CONFIG_RETPOLINE
+bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_hash_lookup_fast(const struct net *net,
+ const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+#else
+static inline bool
+nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+{
+ return set->ops->lookup(net, set, key, ext);
+}
+#endif
+
+/* called from nft_pipapo_avx2.c */
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+/* called from nft_set_pipapo.c */
+bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+
struct nft_expr;
struct nft_regs;
struct nft_pktinfo;
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 1f7bea39ad1b..eb4c094cd54d 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -5,26 +5,24 @@
#include <net/netfilter/nf_tables.h>
#include <net/ip.h>
-static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
{
struct iphdr *ip;
ip = ip_hdr(pkt->skb);
pkt->tprot_set = true;
pkt->tprot = ip->protocol;
- pkt->xt.thoff = ip_hdrlen(pkt->skb);
- pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+ pkt->thoff = ip_hdrlen(pkt->skb);
+ pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
-static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
struct iphdr *iph, _iph;
u32 len, thoff;
- iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
- &_iph);
+ iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*iph), &_iph);
if (!iph)
return -1;
@@ -33,42 +31,40 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
len = ntohs(iph->tot_len);
thoff = iph->ihl * 4;
- if (skb->len < len)
+ if (pkt->skb->len < len)
return -1;
else if (len < thoff)
return -1;
pkt->tprot_set = true;
pkt->tprot = iph->protocol;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+ pkt->thoff = thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
return 0;
}
-static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
- if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
- nft_set_pktinfo_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv4_validate(pkt) < 0)
+ nft_set_pktinfo_unspec(pkt);
}
-static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
{
struct iphdr *iph;
u32 len, thoff;
- if (!pskb_may_pull(skb, sizeof(*iph)))
+ if (!pskb_may_pull(pkt->skb, sizeof(*iph)))
return -1;
- iph = ip_hdr(skb);
+ iph = ip_hdr(pkt->skb);
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
len = ntohs(iph->tot_len);
thoff = iph->ihl * 4;
- if (skb->len < len) {
+ if (pkt->skb->len < len) {
__IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
return -1;
} else if (len < thoff) {
@@ -77,8 +73,8 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt,
pkt->tprot_set = true;
pkt->tprot = iph->protocol;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+ pkt->thoff = thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
return 0;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 867de29f3f7a..7595e02b00ba 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -6,8 +6,7 @@
#include <net/ipv6.h>
#include <net/netfilter/nf_tables.h>
-static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt)
{
unsigned int flags = IP6_FH_F_AUTH;
int protohdr, thoff = 0;
@@ -15,18 +14,17 @@ static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0) {
- nft_set_pktinfo_unspec(pkt, skb);
+ nft_set_pktinfo_unspec(pkt);
return;
}
pkt->tprot_set = true;
pkt->tprot = protohdr;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = frag_off;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
}
-static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
{
#if IS_ENABLED(CONFIG_IPV6)
unsigned int flags = IP6_FH_F_AUTH;
@@ -36,8 +34,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
int protohdr;
u32 pkt_len;
- ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
- &_ip6h);
+ ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*ip6h), &_ip6h);
if (!ip6h)
return -1;
@@ -45,7 +43,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
return -1;
pkt_len = ntohs(ip6h->payload_len);
- if (pkt_len + sizeof(*ip6h) > skb->len)
+ if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
return -1;
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
@@ -54,8 +52,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
pkt->tprot_set = true;
pkt->tprot = protohdr;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = frag_off;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
return 0;
#else
@@ -63,15 +61,13 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
#endif
}
-static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
{
- if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
- nft_set_pktinfo_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv6_validate(pkt) < 0)
+ nft_set_pktinfo_unspec(pkt);
}
-static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
{
#if IS_ENABLED(CONFIG_IPV6)
unsigned int flags = IP6_FH_F_AUTH;
@@ -82,15 +78,15 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt,
int protohdr;
u32 pkt_len;
- if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ if (!pskb_may_pull(pkt->skb, sizeof(*ip6h)))
return -1;
- ip6h = ipv6_hdr(skb);
+ ip6h = ipv6_hdr(pkt->skb);
if (ip6h->version != 6)
goto inhdr_error;
pkt_len = ntohs(ip6h->payload_len);
- if (pkt_len + sizeof(*ip6h) > skb->len) {
+ if (pkt_len + sizeof(*ip6h) > pkt->skb->len) {
idev = __in6_dev_get(nft_in(pkt));
__IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS);
return -1;
@@ -102,8 +98,8 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt,
pkt->tprot_set = true;
pkt->tprot = protohdr;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = frag_off;
+ pkt->thoff = thoff;
+ pkt->fragoff = frag_off;
return 0;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index ad0a95c2335e..c3094b83a525 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -27,6 +27,10 @@ struct nf_tcp_net {
u8 tcp_loose;
u8 tcp_be_liberal;
u8 tcp_max_retrans;
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+ unsigned int offload_pickup;
+#endif
};
enum udp_conntrack {
@@ -37,6 +41,10 @@ enum udp_conntrack {
struct nf_udp_net {
unsigned int timeouts[UDP_CT_MAX];
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+ unsigned int offload_pickup;
+#endif
};
struct nf_icmp_net {
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index f6af8d96d3c6..b8620519eace 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -126,6 +126,7 @@ struct netns_ipv4 {
u8 sysctl_tcp_syn_retries;
u8 sysctl_tcp_synack_retries;
u8 sysctl_tcp_syncookies;
+ u8 sysctl_tcp_migrate_req;
int sysctl_tcp_reordering;
u8 sysctl_tcp_retries1;
u8 sysctl_tcp_retries2;
@@ -210,6 +211,7 @@ struct netns_ipv4 {
#endif
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ u32 sysctl_fib_multipath_hash_fields;
u8 sysctl_fib_multipath_use_neigh;
u8 sysctl_fib_multipath_hash_policy;
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 6153c8067009..bde0b7adb4a3 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -28,8 +28,9 @@ struct netns_sysctl_ipv6 {
int ip6_rt_gc_elasticity;
int ip6_rt_mtu_expires;
int ip6_rt_min_advmss;
- u8 bindv6only;
+ u32 multipath_hash_fields;
u8 multipath_hash_policy;
+ u8 bindv6only;
u8 flowlabel_consistency;
u8 auto_flowlabels;
int icmpv6_time;
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index a0f315effa94..40240722cdca 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -84,6 +84,9 @@ struct netns_sctp {
/* HB.interval - 30 seconds */
unsigned int hb_interval;
+ /* The interval for PLPMTUD probe timer */
+ unsigned int probe_interval;
+
/* Association.Max.Retrans - 10 attempts
* Path.Max.Retrans - 5 attempts (per destination address)
* Max.Init.Retransmits - 8 attempts
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
new file mode 100644
index 000000000000..ea8a9cf2619b
--- /dev/null
+++ b/include/net/netns/smc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_SMC_H__
+#define __NETNS_SMC_H__
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+
+struct smc_stats_rsn;
+struct smc_stats;
+struct netns_smc {
+ /* per cpu counters for SMC */
+ struct smc_stats __percpu *smc_stats;
+ /* protect fback_rsn */
+ struct mutex mutex_fback_rsn;
+ struct smc_stats_rsn *fback_rsn;
+};
+#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index e816b6a3ef2b..e946366e8ba5 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -42,6 +42,7 @@ struct netns_xfrm {
struct hlist_head __rcu *state_bydst;
struct hlist_head __rcu *state_bysrc;
struct hlist_head __rcu *state_byspi;
+ struct hlist_head __rcu *state_byseq;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index bd76e8e082c0..1df0f8074c9d 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -298,6 +298,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
struct sk_buff **resp);
struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+void nci_hci_deallocate(struct nci_dev *ndev);
int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
const u8 *param, size_t param_len);
int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index b4b6de909c93..3dd62dd73027 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -146,6 +146,8 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
return pool->p.dma_dir;
}
+bool page_pool_return_skb_page(struct page *page);
+
struct page_pool *page_pool_create(const struct page_pool_params *params);
#ifdef CONFIG_PAGE_POOL
@@ -251,4 +253,11 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
spin_unlock_bh(&pool->ring.producer_lock);
}
+/* Store mem_info on struct page and use it while recycling skb frags */
+static inline
+void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
+{
+ page->pp = pp;
+}
+
#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 255e4f4b521f..ec7823921bd2 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -709,6 +709,17 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
cls_common->extack = extack;
}
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
+{
+ struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+
+ if (tc_skb_ext)
+ memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
+ return tc_skb_ext;
+}
+#endif
+
enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index f5c1bee0cd6a..6d7b12cba015 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -128,12 +128,7 @@ void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
if (qdisc_run_begin(q)) {
- /* NOLOCK qdisc must check 'state' under the qdisc seqlock
- * to avoid racing with dev_qdisc_reset()
- */
- if (!(q->flags & TCQ_F_NOLOCK) ||
- likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
- __qdisc_run(q);
+ __qdisc_run(q);
qdisc_run_end(q);
}
}
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 2b778e1d2d8f..f51c06ae365f 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -43,7 +43,6 @@ struct net_protocol {
int (*err_handler)(struct sk_buff *skb, u32 info);
unsigned int no_policy:1,
- netns_ok:1,
/* does the protocol do more stringent
* icmp tag validation than simple
* socket lookup?
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 479f60ef54c0..384e800665f2 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -37,6 +37,9 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
* @maxtype: Highest device specific netlink attribute number
* @policy: Netlink policy for device specific attribute validation
* @validate: Optional validation function for netlink/changelink parameters
+ * @alloc: netdev allocation function, can be %NULL and is then used
+ * in place of alloc_netdev_mqs(), in this case @priv_size
+ * and @setup are unused. Returns a netdev or ERR_PTR().
* @priv_size: sizeof net_device private space
* @setup: net_device setup function
* @newlink: Function for configuring and registering a new device
@@ -63,6 +66,11 @@ struct rtnl_link_ops {
const char *kind;
size_t priv_size;
+ struct net_device *(*alloc)(struct nlattr *tb[],
+ const char *ifname,
+ unsigned char name_assign_type,
+ unsigned int num_tx_queues,
+ unsigned int num_rx_queues);
void (*setup)(struct net_device *dev);
bool netns_refund;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f7a6e14491fb..9ed33e6840bd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,8 +36,16 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_MISSED,
+ __QDISC_STATE_DRAINING,
};
+#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
+#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
+
+#define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \
+ QDISC_STATE_DRAINING)
+
struct qdisc_size_table {
struct rcu_head rcu;
struct list_head list;
@@ -109,8 +117,6 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock;
- /* for NOLOCK qdisc, true if there are no enqueued skbs */
- bool empty;
struct rcu_head rcu;
/* private data */
@@ -144,6 +150,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}
+static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
+{
+ return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
+}
+
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
return q->flags & TCQ_F_CPUSTATS;
@@ -152,16 +163,49 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc))
- return READ_ONCE(qdisc->empty);
+ return nolock_qdisc_is_empty(qdisc);
return !READ_ONCE(qdisc->q.qlen);
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
- if (!spin_trylock(&qdisc->seqlock))
+ if (spin_trylock(&qdisc->seqlock))
+ return true;
+
+ /* Paired with smp_mb__after_atomic() to make sure
+ * STATE_MISSED checking is synchronized with clearing
+ * in pfifo_fast_dequeue().
+ */
+ smp_mb__before_atomic();
+
+ /* If the MISSED flag is set, it means other thread has
+ * set the MISSED flag before second spin_trylock(), so
+ * we can return false here to avoid multi cpus doing
+ * the set_bit() and second spin_trylock() concurrently.
+ */
+ if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
return false;
- WRITE_ONCE(qdisc->empty, false);
+
+ /* Set the MISSED flag before the second spin_trylock(),
+ * if the second spin_trylock() return false, it means
+ * other cpu holding the lock will do dequeuing for us
+ * or it will see the MISSED flag set after releasing
+ * lock and reschedule the net_tx_action() to do the
+ * dequeuing.
+ */
+ set_bit(__QDISC_STATE_MISSED, &qdisc->state);
+
+ /* spin_trylock() only has load-acquire semantic, so use
+ * smp_mb__after_atomic() to ensure STATE_MISSED is set
+ * before doing the second spin_trylock().
+ */
+ smp_mb__after_atomic();
+
+ /* Retry again in case other CPU may not see the new flag
+ * after it releases the lock at the end of qdisc_run_end().
+ */
+ return spin_trylock(&qdisc->seqlock);
} else if (qdisc_is_running(qdisc)) {
return false;
}
@@ -175,9 +219,15 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- write_seqcount_end(&qdisc->running);
- if (qdisc->flags & TCQ_F_NOLOCK)
+ if (qdisc->flags & TCQ_F_NOLOCK) {
spin_unlock(&qdisc->seqlock);
+
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
+ &qdisc->state)))
+ __netif_schedule(qdisc);
+ } else {
+ write_seqcount_end(&qdisc->running);
+ }
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 5e848884ff61..2058fabffbf6 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -59,6 +59,7 @@ enum sctp_verb {
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */
SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */
+ SCTP_CMD_PROBE_TIMER_UPDATE, /* Update a probe timer. */
SCTP_CMD_TRANSPORT_HB_SENT, /* Reset the status of a transport. */
SCTP_CMD_TRANSPORT_IDLE, /* Do manipulations on idle transport */
SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 14a0d22c9113..265fffa33dad 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -77,6 +77,7 @@ enum sctp_event_timeout {
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
SCTP_EVENT_TIMEOUT_HEARTBEAT,
SCTP_EVENT_TIMEOUT_RECONF,
+ SCTP_EVENT_TIMEOUT_PROBE,
SCTP_EVENT_TIMEOUT_SACK,
SCTP_EVENT_TIMEOUT_AUTOCLOSE,
};
@@ -200,6 +201,23 @@ enum sctp_sock_state {
SCTP_SS_CLOSING = TCP_CLOSE_WAIT,
};
+enum sctp_plpmtud_state {
+ SCTP_PL_DISABLED,
+ SCTP_PL_BASE,
+ SCTP_PL_SEARCH,
+ SCTP_PL_COMPLETE,
+ SCTP_PL_ERROR,
+};
+
+#define SCTP_BASE_PLPMTU 1200
+#define SCTP_MAX_PLPMTU 9000
+#define SCTP_MIN_PLPMTU 512
+
+#define SCTP_MAX_PROBES 3
+
+#define SCTP_PL_BIG_STEP 32
+#define SCTP_PL_MIN_STEP 4
+
/* These functions map various type to printable names. */
const char *sctp_cname(const union sctp_subtype id); /* chunk types */
const char *sctp_oname(const union sctp_subtype id); /* other events */
@@ -424,4 +442,6 @@ enum {
*/
#define SCTP_AUTH_RANDOM_LENGTH 32
+#define SCTP_PROBE_TIMER_MIN 5000
+
#endif /* __sctp_constants_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 86f74f2fe6de..69bab88ad66b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -145,6 +145,8 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
struct sctphdr *, struct sctp_association **,
struct sctp_transport **);
void sctp_err_finish(struct sock *, struct sctp_transport *);
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb);
+int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb);
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu);
void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
@@ -573,14 +575,15 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *
/* Calculate max payload size given a MTU, or the total overhead if
* given MTU is zero
*/
-static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
- __u32 mtu, __u32 extra)
+static inline __u32 __sctp_mtu_payload(const struct sctp_sock *sp,
+ const struct sctp_transport *t,
+ __u32 mtu, __u32 extra)
{
__u32 overhead = sizeof(struct sctphdr) + extra;
if (sp) {
overhead += sp->pf->af->net_header_len;
- if (sp->udp_port)
+ if (sp->udp_port && (!t || t->encap_port))
overhead += sizeof(struct udphdr);
} else {
overhead += sizeof(struct ipv6hdr);
@@ -592,6 +595,12 @@ static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
return mtu ? mtu - overhead : overhead;
}
+static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
+ __u32 mtu, __u32 extra)
+{
+ return __sctp_mtu_payload(sp, NULL, mtu, extra);
+}
+
static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
{
return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
@@ -615,6 +624,48 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
}
+static inline int sctp_transport_pl_hlen(struct sctp_transport *t)
+{
+ return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0);
+}
+
+static inline void sctp_transport_pl_reset(struct sctp_transport *t)
+{
+ if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) &&
+ (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) {
+ if (t->pl.state == SCTP_PL_DISABLED) {
+ t->pl.state = SCTP_PL_BASE;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
+ }
+ } else {
+ if (t->pl.state != SCTP_PL_DISABLED) {
+ if (del_timer(&t->probe_timer))
+ sctp_transport_put(t);
+ t->pl.state = SCTP_PL_DISABLED;
+ }
+ }
+}
+
+static inline void sctp_transport_pl_update(struct sctp_transport *t)
+{
+ if (t->pl.state == SCTP_PL_DISABLED)
+ return;
+
+ if (del_timer(&t->probe_timer))
+ sctp_transport_put(t);
+
+ t->pl.state = SCTP_PL_BASE;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+}
+
+static inline bool sctp_transport_pl_enabled(struct sctp_transport *t)
+{
+ return t->pl.state != SCTP_PL_DISABLED;
+}
+
static inline bool sctp_newsk_ready(const struct sock *sk)
{
return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index fd223c94589a..2eb6d7c2c931 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -151,6 +151,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
/* Prototypes for timeout event state functions. */
sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
sctp_state_fn_t sctp_sf_send_reconf;
+sctp_state_fn_t sctp_sf_send_probe;
sctp_state_fn_t sctp_sf_do_6_2_sack;
sctp_state_fn_t sctp_sf_autoclose_timer_expire;
@@ -225,11 +226,13 @@ struct sctp_chunk *sctp_make_new_encap_port(
const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
- const struct sctp_transport *transport);
+ const struct sctp_transport *transport,
+ __u32 probe_size);
struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
const void *payload,
const size_t paylen);
+struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len);
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
__be16 cause_code, const void *payload,
@@ -310,6 +313,7 @@ int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
void sctp_generate_t3_rtx_event(struct timer_list *t);
void sctp_generate_heartbeat_event(struct timer_list *t);
void sctp_generate_reconf_event(struct timer_list *t);
+void sctp_generate_probe_event(struct timer_list *t);
void sctp_generate_proto_unreach_event(struct timer_list *t);
void sctp_ootb_pkt_free(struct sctp_packet *packet);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 1aa585216f34..32fc4a309df5 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -177,6 +177,7 @@ struct sctp_sock {
* will be inherited by all new associations.
*/
__u32 hbinterval;
+ __u32 probe_interval;
__be16 udp_port;
__be16 encap_port;
@@ -385,6 +386,7 @@ struct sctp_sender_hb_info {
union sctp_addr daddr;
unsigned long sent_at;
__u64 hb_nonce;
+ __u32 probe_size;
};
int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
@@ -461,7 +463,7 @@ struct sctp_af {
int saddr);
void (*from_sk) (union sctp_addr *,
struct sock *sk);
- void (*from_addr_param) (union sctp_addr *,
+ bool (*from_addr_param) (union sctp_addr *,
union sctp_addr_param *,
__be16 port, int iif);
int (*to_addr_param) (const union sctp_addr *,
@@ -656,6 +658,7 @@ struct sctp_chunk {
data_accepted:1, /* At least 1 chunk accepted */
auth:1, /* IN: was auth'ed | OUT: needs auth */
has_asconf:1, /* IN: have seen an asconf before */
+ pmtu_probe:1, /* Used by PLPMTUD, can be set in s HB chunk */
tsn_missing_report:2, /* Data chunk missing counter. */
fast_retransmit:2; /* Is this chunk fast retransmitted? */
};
@@ -858,6 +861,7 @@ struct sctp_transport {
* the destination address every heartbeat interval.
*/
unsigned long hbinterval;
+ unsigned long probe_interval;
/* SACK delay timeout */
unsigned long sackdelay;
@@ -934,6 +938,9 @@ struct sctp_transport {
/* Timer to handler reconf chunk rtx */
struct timer_list reconf_timer;
+ /* Timer to send a probe HB packet for PLPMTUD */
+ struct timer_list probe_timer;
+
/* Since we're using per-destination retransmission timers
* (see above), we're also using per-destination "transmitted"
* queues. This probably ought to be a private struct
@@ -976,6 +983,15 @@ struct sctp_transport {
char cacc_saw_newack;
} cacc;
+ struct {
+ __u16 pmtu;
+ __u16 probe_size;
+ __u16 probe_high;
+ __u8 probe_count:3;
+ __u8 raise_count:5;
+ __u8 state;
+ } pl; /* plpmtud related */
+
/* 64-bit random number sent with heartbeat. */
__u64 hb_nonce;
@@ -993,6 +1009,7 @@ void sctp_transport_free(struct sctp_transport *);
void sctp_transport_reset_t3_rtx(struct sctp_transport *);
void sctp_transport_reset_hb_timer(struct sctp_transport *);
void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
+void sctp_transport_reset_probe_timer(struct sctp_transport *transport);
int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1007,6 +1024,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
+void sctp_transport_pl_send(struct sctp_transport *t);
+void sctp_transport_pl_recv(struct sctp_transport *t);
/* This is the structure we use to queue packets as they come into
@@ -1795,6 +1814,7 @@ struct sctp_association {
* will be inherited by all new transports.
*/
unsigned long hbinterval;
+ unsigned long probe_interval;
__be16 encap_port;
diff --git a/include/net/sock.h b/include/net/sock.h
index 42bc5e1a627f..8bdd80027ffb 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1934,7 +1934,8 @@ static inline u32 net_tx_rndhash(void)
static inline void sk_set_txhash(struct sock *sk)
{
- sk->sk_txhash = net_tx_rndhash();
+ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
}
static inline bool sk_rethink_txhash(struct sock *sk)
@@ -2206,9 +2207,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
{
- if (sk->sk_txhash) {
+ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+ u32 txhash = READ_ONCE(sk->sk_txhash);
+
+ if (txhash) {
skb->l4_hash = 1;
- skb->hash = sk->sk_txhash;
+ skb->hash = txhash;
}
}
@@ -2231,13 +2235,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
-static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
{
if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
skb_orphan(skb);
skb->destructor = sock_efree;
skb->sk = sk;
+ return true;
}
+ return false;
}
void sk_reset_timer(struct sock *sk, struct timer_list *timer,
@@ -2264,12 +2270,19 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
static inline int sock_error(struct sock *sk)
{
int err;
- if (likely(!sk->sk_err))
+
+ /* Avoid an atomic operation for the common case.
+ * This is racy since another cpu/thread can change sk_err under us.
+ */
+ if (likely(data_race(!sk->sk_err)))
return 0;
+
err = xchg(&sk->sk_err, 0);
return -err;
}
+void sk_error_report(struct sock *sk);
+
static inline unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;
@@ -2741,6 +2754,9 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
void sock_def_readable(struct sock *sk);
int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
+void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
+int sock_set_timestamping(struct sock *sk, int optname, int val);
+
void sock_enable_timestamps(struct sock *sk);
void sock_no_linger(struct sock *sk);
void sock_set_keepalive(struct sock *sk);
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 505f1e18e9bf..473b0b0fa4ab 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -13,8 +13,9 @@ extern spinlock_t reuseport_lock;
struct sock_reuseport {
struct rcu_head rcu;
- u16 max_socks; /* length of socks */
- u16 num_socks; /* elements in socks */
+ u16 max_socks; /* length of socks */
+ u16 num_socks; /* elements in socks */
+ u16 num_closed_socks; /* closed elements in socks */
/* The last synq overflow event timestamp of this
* reuse->socks[] group.
*/
@@ -31,10 +32,14 @@ extern int reuseport_alloc(struct sock *sk, bool bind_inany);
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
bool bind_inany);
extern void reuseport_detach_sock(struct sock *sk);
+void reuseport_stop_listen_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash,
struct sk_buff *skb,
int hdr_len);
+struct sock *reuseport_migrate_sock(struct sock *sk,
+ struct sock *migrating_sk,
+ struct sk_buff *skb);
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
extern int reuseport_detach_prog(struct sock *sk);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index f1a5a9a3634d..e4cac9218ce1 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -202,6 +202,7 @@ enum switchdev_notifier_type {
struct switchdev_notifier_info {
struct net_device *dev;
struct netlink_ext_ack *extack;
+ const void *ctx;
};
struct switchdev_notifier_fdb_info {
@@ -268,19 +269,19 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*add_cb)(struct net_device *dev,
+ int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack));
int switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*del_cb)(struct net_device *dev,
+ int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj));
int switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
- int (*set_cb)(struct net_device *dev,
+ int (*set_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack));
#else
@@ -352,7 +353,7 @@ static inline int
switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*add_cb)(struct net_device *dev,
+ int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack))
{
@@ -363,7 +364,7 @@ static inline int
switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*del_cb)(struct net_device *dev,
+ int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj))
{
return 0;
@@ -373,7 +374,7 @@ static inline int
switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
- int (*set_cb)(struct net_device *dev,
+ int (*set_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack))
{
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index f051046ba034..f94b8bc26f9e 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -16,6 +16,7 @@ struct tcf_vlan_params {
u16 tcfv_push_vid;
__be16 tcfv_push_proto;
u8 tcfv_push_prio;
+ bool tcfv_push_prio_exists;
struct rcu_head rcu;
};
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d05193cb0d99..e668f1bf780d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -412,6 +412,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
int tcp_set_rcvlowat(struct sock *sk, int val);
int tcp_set_window_clamp(struct sock *sk, int val);
+void tcp_update_recv_tstamps(struct sk_buff *skb,
+ struct scm_timestamping_internal *tss);
+void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+ struct scm_timestamping_internal *tss);
void tcp_data_ready(struct sock *sk);
#ifdef CONFIG_MMU
int tcp_mmap(struct file *file, struct socket *sock,
diff --git a/include/net/tls.h b/include/net/tls.h
index 3eccb525e8f7..be4b3e1cac46 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -79,8 +79,6 @@
__SNMP_INC_STATS((net)->mib.tls_statistics, field)
#define TLS_INC_STATS(net, field) \
SNMP_INC_STATS((net)->mib.tls_statistics, field)
-#define __TLS_DEC_STATS(net, field) \
- __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
#define TLS_DEC_STATS(net, field) \
SNMP_DEC_STATS((net)->mib.tls_statistics, field)
@@ -193,7 +191,11 @@ struct tls_offload_context_tx {
(sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
enum tls_context_flags {
- TLS_RX_SYNC_RUNNING = 0,
+ /* tls_device_down was called after the netdev went down, device state
+ * was released, and kTLS works in software, even though rx_conf is
+ * still TLS_HW (needed for transition).
+ */
+ TLS_RX_DEV_DEGRADED = 0,
/* Unlike RX where resync is driven entirely by the core in TX only
* the driver knows when things went out of sync, so we need the flag
* to be atomic.
@@ -266,6 +268,7 @@ struct tls_context {
/* cache cold stuff */
struct proto *sk_proto;
+ struct sock *sk;
void (*sk_destruct)(struct sock *sk);
@@ -448,6 +451,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
+struct sk_buff *
+tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
@@ -463,7 +469,7 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
static inline void tls_err_abort(struct sock *sk, int err)
{
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
static inline bool tls_bigint_increment(unsigned char *seq, int len)
diff --git a/include/net/xdp.h b/include/net/xdp.h
index a5bc214a49d9..5533f0ab2afc 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -170,6 +170,7 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct net_device *dev);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
+struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
static inline
void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 9c0722c6d7ac..fff069d2ed1b 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -37,7 +37,7 @@ struct xdp_umem {
struct xsk_map {
struct bpf_map map;
spinlock_t lock; /* Synchronize map updates */
- struct xdp_sock *xsk_map[];
+ struct xdp_sock __rcu *xsk_map[];
};
struct xdp_sock {
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index c58a6d4eb610..cbff7c2a9724 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -145,6 +145,12 @@ enum {
XFRM_MODE_FLAG_TUNNEL = 1,
};
+enum xfrm_replay_mode {
+ XFRM_REPLAY_MODE_LEGACY,
+ XFRM_REPLAY_MODE_BMP,
+ XFRM_REPLAY_MODE_ESN,
+};
+
/* Full description of state of transformer. */
struct xfrm_state {
possible_net_t xs_net;
@@ -154,6 +160,7 @@ struct xfrm_state {
};
struct hlist_node bysrc;
struct hlist_node byspi;
+ struct hlist_node byseq;
refcount_t refcnt;
spinlock_t lock;
@@ -214,9 +221,8 @@ struct xfrm_state {
struct xfrm_replay_state preplay;
struct xfrm_replay_state_esn *preplay_esn;
- /* The functions for replay detection. */
- const struct xfrm_replay *repl;
-
+ /* replay detection mode */
+ enum xfrm_replay_mode repl_mode;
/* internal flag that only holds state for delayed aevent at the
* moment
*/
@@ -296,18 +302,6 @@ struct km_event {
struct net *net;
};
-struct xfrm_replay {
- void (*advance)(struct xfrm_state *x, __be32 net_seq);
- int (*check)(struct xfrm_state *x,
- struct sk_buff *skb,
- __be32 net_seq);
- int (*recheck)(struct xfrm_state *x,
- struct sk_buff *skb,
- __be32 net_seq);
- void (*notify)(struct xfrm_state *x, int event);
- int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
-};
-
struct xfrm_if_cb {
struct xfrm_if *(*decode_session)(struct sk_buff *skb,
unsigned short family);
@@ -387,7 +381,6 @@ void xfrm_flush_gc(void);
void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
- char *description;
struct module *owner;
u8 proto;
u8 flags;
@@ -402,14 +395,12 @@ struct xfrm_type {
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
int (*reject)(struct xfrm_state *, struct sk_buff *,
const struct flowi *);
- int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
};
int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
struct xfrm_type_offload {
- char *description;
struct module *owner;
u8 proto;
void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
@@ -1024,6 +1015,7 @@ struct xfrm_offload {
#define CRYPTO_INVALID_PROTOCOL 128
__u8 proto;
+ __u8 inner_ipproto;
};
struct sec_path {
@@ -1546,6 +1538,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x);
+u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);
@@ -1570,7 +1563,6 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int xfrm4_transport_finish(struct sk_buff *skb, int async);
int xfrm4_rcv(struct sk_buff *skb);
-int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
@@ -1581,7 +1573,6 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
}
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
@@ -1605,9 +1596,6 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
-int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
- u8 **prevhdr);
#ifdef CONFIG_XFRM
void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
@@ -1721,6 +1709,12 @@ static inline int xfrm_policy_id2dir(u32 index)
}
#ifdef CONFIG_XFRM
+void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
+int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+void xfrm_replay_notify(struct xfrm_state *x, int event);
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+
static inline int xfrm_aevent_is_on(struct net *net)
{
struct sock *nlsk;
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index eaa8386dbc63..7a9a23e7a604 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -147,11 +147,16 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
{
bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
- if (pool->dma_pages_cnt && cross_pg) {
+ if (likely(!cross_pg))
+ return false;
+
+ if (pool->dma_pages_cnt) {
return !(pool->dma_pages[addr >> PAGE_SHIFT] &
XSK_NEXT_PG_CONTIG_MASK);
}
- return false;
+
+ /* skb path */
+ return addr + len > pool->addrs_cnt;
}
static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h
new file mode 100644
index 000000000000..2b64c95f3be5
--- /dev/null
+++ b/include/soc/microchip/mpfs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * Microchip PolarFire SoC (MPFS)
+ *
+ * Copyright (c) 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#ifndef __SOC_MPFS_H__
+#define __SOC_MPFS_H__
+
+#include <linux/types.h>
+#include <linux/of_device.h>
+
+struct mpfs_sys_controller;
+
+struct mpfs_mss_msg {
+ u8 cmd_opcode;
+ u16 cmd_data_size;
+ struct mpfs_mss_response *response;
+ u8 *cmd_data;
+ u16 mbox_offset;
+ u16 resp_offset;
+};
+
+struct mpfs_mss_response {
+ u32 resp_status;
+ u32 *resp_msg;
+ u16 resp_size;
+};
+
+#if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL)
+
+int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, void *msg);
+
+struct mpfs_sys_controller *mpfs_sys_controller_get(struct device_node *mailbox_node);
+
+#endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */
+
+#endif /* __SOC_MPFS_H__ */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 1358a0ceb4d0..0bc29c4516e7 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -81,7 +81,7 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_CBP_CFP (1 << 12) /* codec clk provider & frame provider */
#define SND_SOC_DAIFMT_CBC_CFP (2 << 12) /* codec clk consumer & frame provider */
#define SND_SOC_DAIFMT_CBP_CFC (3 << 12) /* codec clk provider & frame consumer */
-#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame follower */
+#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame consumer */
/* previous definitions kept for backwards-compatibility, do not use in new contributions */
#define SND_SOC_DAIFMT_CBM_CFM SND_SOC_DAIFMT_CBP_CFP
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index a41dd8a0c730..c7237317a8b9 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -99,8 +99,7 @@ struct btrfs_space_info;
EM( ALLOC_CHUNK, "ALLOC_CHUNK") \
EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \
EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \
- EM( COMMIT_TRANS, "COMMIT_TRANS") \
- EMe(FORCE_COMMIT_TRANS, "FORCE_COMMIT_TRANS")
+ EMe(COMMIT_TRANS, "COMMIT_TRANS")
/*
* First define the enums in the above macros to be exported to userspace via
@@ -654,34 +653,30 @@ DEFINE_EVENT(btrfs__writepage, __extent_writepage,
TRACE_EVENT(btrfs_writepage_end_io_hook,
- TP_PROTO(const struct page *page, u64 start, u64 end, int uptodate),
+ TP_PROTO(const struct btrfs_inode *inode, u64 start, u64 end,
+ int uptodate),
- TP_ARGS(page, start, end, uptodate),
+ TP_ARGS(inode, start, end, uptodate),
TP_STRUCT__entry_btrfs(
__field( u64, ino )
- __field( unsigned long, index )
__field( u64, start )
__field( u64, end )
__field( int, uptodate )
__field( u64, root_objectid )
),
- TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb),
- __entry->ino = btrfs_ino(BTRFS_I(page->mapping->host));
- __entry->index = page->index;
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
__entry->start = start;
__entry->end = end;
__entry->uptodate = uptodate;
- __entry->root_objectid =
- BTRFS_I(page->mapping->host)->root->root_key.objectid;
+ __entry->root_objectid = inode->root->root_key.objectid;
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu "
- "end=%llu uptodate=%d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
- __entry->ino, __entry->index,
- __entry->start,
+ __entry->ino, __entry->start,
__entry->end, __entry->uptodate)
);
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index c3d354702cb0..3d708dae1542 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(cma_alloc_class,
__entry->align = align;
),
- TP_printk("name=%s pfn=%lx page=%p count=%lu align=%u",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u",
__get_str(name),
__entry->pfn,
__entry->page,
@@ -60,7 +60,7 @@ TRACE_EVENT(cma_release,
__entry->count = count;
),
- TP_printk("name=%s pfn=%lx page=%p count=%lu",
+ TP_printk("name=%s pfn=0x%lx page=%p count=%lu",
__get_str(name),
__entry->pfn,
__entry->page,
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 796053e162d2..c47b63db124e 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -36,7 +36,7 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
__entry->s_dev = page->mapping->host->i_rdev;
),
- TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
+ TP_printk("dev %d:%d ino %lx page=%p pfn=0x%lx ofs=%lu",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
pfn_to_page(__entry->pfn),
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 829a75692cc0..ddc8c944f417 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -173,7 +173,7 @@ TRACE_EVENT(mm_page_free,
__entry->order = order;
),
- TP_printk("page=%p pfn=%lu order=%d",
+ TP_printk("page=%p pfn=0x%lx order=%d",
pfn_to_page(__entry->pfn),
__entry->pfn,
__entry->order)
@@ -193,7 +193,7 @@ TRACE_EVENT(mm_page_free_batched,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page=%p pfn=%lu order=0",
+ TP_printk("page=%p pfn=0x%lx order=0",
pfn_to_page(__entry->pfn),
__entry->pfn)
);
@@ -219,7 +219,7 @@ TRACE_EVENT(mm_page_alloc,
__entry->migratetype = migratetype;
),
- TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
+ TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
__entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
@@ -245,7 +245,7 @@ DECLARE_EVENT_CLASS(mm_page,
__entry->migratetype = migratetype;
),
- TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
+ TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
__entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
@@ -278,7 +278,7 @@ TRACE_EVENT(mm_page_pcpu_drain,
__entry->migratetype = migratetype;
),
- TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
+ TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
pfn_to_page(__entry->pfn), __entry->pfn,
__entry->order, __entry->migratetype)
);
@@ -312,7 +312,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
get_pageblock_migratetype(page));
),
- TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+ TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
pfn_to_page(__entry->pfn),
__entry->pfn,
__entry->alloc_order,
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 629c7a0eaff2..390270e00a1d 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -85,6 +85,12 @@
#define IF_HAVE_PG_ARCH_2(flag,string)
#endif
+#ifdef CONFIG_KASAN_HW_TAGS
+#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string)
+#endif
+
#define __def_pageflag_names \
{1UL << PG_locked, "locked" }, \
{1UL << PG_waiters, "waiters" }, \
@@ -112,7 +118,8 @@ IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
IF_HAVE_PG_IDLE(PG_young, "young" ) \
IF_HAVE_PG_IDLE(PG_idle, "idle" ) \
-IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" )
+IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \
+IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
#define show_page_flags(flags) \
(flags) ? __print_flags(flags, "|", \
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 775a46d0b0f0..6bf43176f14c 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -73,6 +73,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
__field(u64, data_seq)
__field(u32, subflow_seq)
__field(u16, data_len)
+ __field(u16, csum)
__field(u8, use_map)
__field(u8, dsn64)
__field(u8, data_fin)
@@ -82,6 +83,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
__field(u8, frozen)
__field(u8, reset_transient)
__field(u8, reset_reason)
+ __field(u8, csum_reqd)
),
TP_fast_assign(
@@ -89,6 +91,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
__entry->data_seq = mpext->data_seq;
__entry->subflow_seq = mpext->subflow_seq;
__entry->data_len = mpext->data_len;
+ __entry->csum = (__force u16)mpext->csum;
__entry->use_map = mpext->use_map;
__entry->dsn64 = mpext->dsn64;
__entry->data_fin = mpext->data_fin;
@@ -98,16 +101,18 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
__entry->frozen = mpext->frozen;
__entry->reset_transient = mpext->reset_transient;
__entry->reset_reason = mpext->reset_reason;
+ __entry->csum_reqd = mpext->csum_reqd;
),
- TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u",
+ TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u",
__entry->data_ack, __entry->data_seq,
__entry->subflow_seq, __entry->data_len,
- __entry->use_map, __entry->dsn64,
- __entry->data_fin, __entry->use_ack,
- __entry->ack64, __entry->mpc_map,
- __entry->frozen, __entry->reset_transient,
- __entry->reset_reason)
+ __entry->csum, __entry->use_map,
+ __entry->dsn64, __entry->data_fin,
+ __entry->use_ack, __entry->ack64,
+ __entry->mpc_map, __entry->frozen,
+ __entry->reset_transient, __entry->reset_reason,
+ __entry->csum_reqd)
);
DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status,
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index ad0aa7f31675..ca534501158b 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -60,7 +60,7 @@ TRACE_EVENT(page_pool_state_release,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p pfn=%lu release=%u",
+ TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->release)
);
@@ -85,7 +85,7 @@ TRACE_EVENT(page_pool_state_hold,
__entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p pfn=%lu hold=%u",
+ TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u",
__entry->pool, __entry->page, __entry->pfn, __entry->hold)
);
diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h
index e1735fe7c76a..1d28431e85bd 100644
--- a/include/trace/events/pagemap.h
+++ b/include/trace/events/pagemap.h
@@ -46,7 +46,7 @@ TRACE_EVENT(mm_lru_insertion,
),
/* Flag format is based on page-types.c formatting for pagemap */
- TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s",
+ TP_printk("page=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
__entry->page,
__entry->pfn,
__entry->lru,
@@ -75,7 +75,7 @@ TRACE_EVENT(mm_lru_activate,
),
/* Flag format is based on page-types.c formatting for pagemap */
- TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn)
+ TP_printk("page=%p pfn=0x%lx", __entry->page, __entry->pfn)
);
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index a966d4b5ab37..12c315782766 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -201,6 +201,66 @@ TRACE_EVENT(inet_sock_set_state,
show_tcp_state_name(__entry->newstate))
);
+TRACE_EVENT(inet_sk_error_report,
+
+ TP_PROTO(const struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __field(__u16, protocol)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->error = sk->sk_err;
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = sk->sk_v6_rcv_saddr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+ }
+ ),
+
+ TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d",
+ show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->error)
+);
+
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index 0dd9171d2ad8..c0d9844befd7 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -42,6 +42,63 @@ DEFINE_EVENT(spi_controller, spi_controller_busy,
);
+TRACE_EVENT(spi_setup,
+ TP_PROTO(struct spi_device *spi, int status),
+ TP_ARGS(spi, status),
+
+ TP_STRUCT__entry(
+ __field(int, bus_num)
+ __field(int, chip_select)
+ __field(unsigned long, mode)
+ __field(unsigned int, bits_per_word)
+ __field(unsigned int, max_speed_hz)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = spi->controller->bus_num;
+ __entry->chip_select = spi->chip_select;
+ __entry->mode = spi->mode;
+ __entry->bits_per_word = spi->bits_per_word;
+ __entry->max_speed_hz = spi->max_speed_hz;
+ __entry->status = status;
+ ),
+
+ TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d",
+ __entry->bus_num, __entry->chip_select,
+ (__entry->mode & SPI_MODE_X_MASK),
+ (__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+ (__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+ (__entry->mode & SPI_3WIRE) ? "3wire, " : "",
+ (__entry->mode & SPI_LOOP) ? "loopback, " : "",
+ __entry->bits_per_word, __entry->max_speed_hz,
+ __entry->status)
+);
+
+TRACE_EVENT(spi_set_cs,
+ TP_PROTO(struct spi_device *spi, bool enable),
+ TP_ARGS(spi, enable),
+
+ TP_STRUCT__entry(
+ __field(int, bus_num)
+ __field(int, chip_select)
+ __field(unsigned long, mode)
+ __field(bool, enable)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = spi->controller->bus_num;
+ __entry->chip_select = spi->chip_select;
+ __entry->mode = spi->mode;
+ __entry->enable = enable;
+ ),
+
+ TP_printk("spi%d.%d %s%s",
+ __entry->bus_num, __entry->chip_select,
+ __entry->enable ? "activate" : "deactivate",
+ (__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "")
+);
+
DECLARE_EVENT_CLASS(spi_message,
TP_PROTO(struct spi_message *msg),
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index ba94857eea11..521059d8dc0a 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -295,6 +295,82 @@ TRACE_EVENT(tcp_probe,
__entry->srtt, __entry->rcv_wnd, __entry->sock_cookie)
);
+#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \
+ do { \
+ const struct tcphdr *th = (const struct tcphdr *)skb->data; \
+ struct sockaddr_in *v4 = (void *)__entry->saddr; \
+ \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = th->source; \
+ v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \
+ v4 = (void *)__entry->daddr; \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = th->dest; \
+ v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \
+ } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \
+ do { \
+ const struct iphdr *iph = ip_hdr(skb); \
+ \
+ if (iph->version == 6) { \
+ const struct tcphdr *th = (const struct tcphdr *)skb->data; \
+ struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
+ \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = th->source; \
+ v6->sin6_addr = ipv6_hdr(skb)->saddr; \
+ v6 = (void *)__entry->daddr; \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = th->dest; \
+ v6->sin6_addr = ipv6_hdr(skb)->daddr; \
+ } else \
+ TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb); \
+ } while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \
+ TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb)
+
+#endif
+
+/*
+ * tcp event with only skb
+ */
+DECLARE_EVENT_CLASS(tcp_event_skb,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(const void *, skbaddr)
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS_SKB(__entry, skb);
+ ),
+
+ TP_printk("src=%pISpc dest=%pISpc", __entry->saddr, __entry->daddr)
+);
+
+DEFINE_EVENT(tcp_event_skb, tcp_bad_csum,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
#endif /* _TRACE_TCP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 2070df64958e..00d1180527d8 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -330,7 +330,7 @@ TRACE_EVENT(mm_vmscan_writepage,
page_is_file_lru(page));
),
- TP_printk("page=%p pfn=%lu flags=%s",
+ TP_printk("page=%p pfn=0x%lx flags=%s",
pfn_to_page(__entry->pfn),
__entry->pfn,
show_reclaim_flags(__entry->reclaim_flags))
diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h
index 6782213778be..d0b3f0ea9ba1 100644
--- a/include/trace/events/vsock_virtio_transport_common.h
+++ b/include/trace/events/vsock_virtio_transport_common.h
@@ -9,9 +9,12 @@
#include <linux/tracepoint.h>
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_SEQPACKET);
#define show_type(val) \
- __print_symbolic(val, { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" })
+ __print_symbolic(val, \
+ { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }, \
+ { VIRTIO_VSOCK_TYPE_SEQPACKET, "SEQPACKET" })
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID);
TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST);
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index fcad3645a70b..c40fc97f9417 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -110,7 +110,11 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
u32 ifindex = 0, map_index = index;
if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
- ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
+ /* Just leave to_ifindex to 0 if do broadcast redirect,
+ * as tgt will be NULL.
+ */
+ if (tgt)
+ ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
} else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
ifindex = index;
map_index = 0;
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 4dcd13d097a9..d588c244ec2f 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -122,6 +122,8 @@
#define SO_PREFER_BUSY_POLL 69
#define SO_BUSY_POLL_BUDGET 70
+#define SO_NETNS_COOKIE 71
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 6de5a7fc066b..d2a942086fcb 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index cd2d8279a5e4..daa481729e9b 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -48,7 +48,7 @@
* 2500 - 2999 future user space (maybe integrity labels and related events)
*
* Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are
- * exclusively user space. 1300-2099 is kernel --> user space
+ * exclusively user space. 1300-2099 is kernel --> user space
* communication.
*/
#define AUDIT_GET 1000 /* Get status */
@@ -78,7 +78,7 @@
#define AUDIT_LAST_USER_MSG 1199
#define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */
#define AUDIT_LAST_USER_MSG2 2999
-
+
#define AUDIT_DAEMON_START 1200 /* Daemon startup record */
#define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */
#define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index ec6d85a81744..bf9252c7381e 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -527,6 +527,15 @@ union bpf_iter_link_info {
* Look up an element with the given *key* in the map referred to
* by the file descriptor *fd*, and if found, delete the element.
*
+ * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
+ * types, the *flags* argument needs to be set to 0, but for other
+ * map types, it may be specified as:
+ *
+ * **BPF_F_LOCK**
+ * Look up and delete the value of a spin-locked map
+ * without returning the lock. This must be specified if
+ * the elements contain a spinlock.
+ *
* The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
* implement this command as a "pop" operation, deleting the top
* element rather than one corresponding to *key*.
@@ -536,6 +545,10 @@ union bpf_iter_link_info {
* This command is only valid for the following map types:
* * **BPF_MAP_TYPE_QUEUE**
* * **BPF_MAP_TYPE_STACK**
+ * * **BPF_MAP_TYPE_HASH**
+ * * **BPF_MAP_TYPE_PERCPU_HASH**
+ * * **BPF_MAP_TYPE_LRU_HASH**
+ * * **BPF_MAP_TYPE_LRU_PERCPU_HASH**
*
* Return
* Returns zero on success. On error, -1 is returned and *errno*
@@ -837,6 +850,7 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
+ BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
@@ -937,6 +951,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
+ BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
};
enum bpf_attach_type {
@@ -979,6 +994,8 @@ enum bpf_attach_type {
BPF_SK_LOOKUP,
BPF_XDP,
BPF_SK_SKB_VERDICT,
+ BPF_SK_REUSEPORT_SELECT,
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
__MAX_BPF_ATTACH_TYPE
};
@@ -1097,8 +1114,8 @@ enum bpf_link_type {
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
*
- * insn[0].src_reg: BPF_PSEUDO_MAP_FD
- * insn[0].imm: map fd
+ * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
+ * insn[0].imm: map fd or fd_idx
* insn[1].imm: 0
* insn[0].off: 0
* insn[1].off: 0
@@ -1106,15 +1123,19 @@ enum bpf_link_type {
* verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
-/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE
- * insn[0].imm: map fd
+#define BPF_PSEUDO_MAP_IDX 5
+
+/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
+ * insn[0].imm: map fd or fd_idx
* insn[1].imm: offset into value
* insn[0].off: 0
* insn[1].off: 0
* ldimm64 rewrite: address of map[0]+offset
* verifier type: PTR_TO_MAP_VALUE
*/
-#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX_VALUE 6
+
/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
* insn[0].imm: kernel btd id of VAR
* insn[1].imm: 0
@@ -1314,6 +1335,8 @@ union bpf_attr {
/* or valid module BTF object fd or 0 to attach to vmlinux */
__u32 attach_btf_obj_fd;
};
+ __u32 :32; /* pad */
+ __aligned_u64 fd_array; /* array of FDs */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -2534,8 +2557,12 @@ union bpf_attr {
* The lower two bits of *flags* are used as the return code if
* the map lookup fails. This is so that the return value can be
* one of the XDP program return codes up to **XDP_TX**, as chosen
- * by the caller. Any higher bits in the *flags* argument must be
- * unset.
+ * by the caller. The higher bits of *flags* can be set to
+ * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
+ *
+ * With BPF_F_BROADCAST the packet will be broadcasted to all the
+ * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
+ * interface will be excluded when do broadcasting.
*
* See also **bpf_redirect**\ (), which only supports redirecting
* to an ifindex, but doesn't require a map to do so.
@@ -4735,6 +4762,24 @@ union bpf_attr {
* be zero-terminated except when **str_size** is 0.
*
* Or **-EBUSY** if the per-CPU memory copy buffer is busy.
+ *
+ * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
+ * Description
+ * Execute bpf syscall with given arguments.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
+ * Description
+ * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
+ * Return
+ * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
+ *
+ * long bpf_sys_close(u32 fd)
+ * Description
+ * Execute close syscall for given FD.
+ * Return
+ * A syscall result.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -4903,6 +4948,9 @@ union bpf_attr {
FN(check_mtu), \
FN(for_each_map_elem), \
FN(snprintf), \
+ FN(sys_bpf), \
+ FN(btf_find_by_name_kind), \
+ FN(sys_close), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -5080,6 +5128,12 @@ enum {
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
};
+/* Flags for bpf_redirect_map helper */
+enum {
+ BPF_F_BROADCAST = (1ULL << 3),
+ BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
+};
+
#define __bpf_md_ptr(type, name) \
union { \
type name; \
@@ -5364,6 +5418,20 @@ struct sk_reuseport_md {
__u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
__u32 bind_inany; /* Is sock bound to an INANY address? */
__u32 hash; /* A hash of the packet 4 tuples */
+ /* When reuse->migrating_sk is NULL, it is selecting a sk for the
+ * new incoming connection request (e.g. selecting a listen sk for
+ * the received SYN in the TCP case). reuse->sk is one of the sk
+ * in the reuseport group. The bpf prog can use reuse->sk to learn
+ * the local listening ip/port without looking into the skb.
+ *
+ * When reuse->migrating_sk is not NULL, reuse->sk is closed and
+ * reuse->migrating_sk is the socket that needs to be migrated
+ * to another listening socket. migrating_sk could be a fullsock
+ * sk that is fully established or a reqsk that is in-the-middle
+ * of 3-way handshake.
+ */
+ __bpf_md_ptr(struct bpf_sock *, sk);
+ __bpf_md_ptr(struct bpf_sock *, migrating_sk);
};
#define BPF_TAG_SIZE 8
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 5df73001aad4..22cd037123fa 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -154,7 +154,7 @@ struct btrfs_scrub_progress {
__u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */
__u64 read_errors; /* # of read errors encountered (EIO) */
__u64 csum_errors; /* # of failed csum checks */
- __u64 verify_errors; /* # of occurences, where the metadata
+ __u64 verify_errors; /* # of occurrences, where the metadata
* of a tree block did not match the
* expected values, like generation or
* logical */
@@ -174,7 +174,7 @@ struct btrfs_scrub_progress {
__u64 last_physical; /* last physical address scrubbed. In
* case a scrub was aborted, this can
* be used to restart the scrub */
- __u64 unverified_errors; /* # of occurences where a read for a
+ __u64 unverified_errors; /* # of occurrences where a read for a
* full (64k) bio failed, but the re-
* check succeeded for each 4k piece.
* Intermittent error. */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 58d7cff9afb1..ccdb40fe40dc 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -59,7 +59,7 @@
/* for storing balance parameters in the root tree */
#define BTRFS_BALANCE_OBJECTID -4ULL
-/* orhpan objectid for tracking unlinked/truncated files */
+/* orphan objectid for tracking unlinked/truncated files */
#define BTRFS_ORPHAN_OBJECTID -5ULL
/* does write ahead logging to speed up fsyncs */
@@ -275,7 +275,7 @@
#define BTRFS_PERSISTENT_ITEM_KEY 249
/*
- * Persistantly stores the device replace state in the device tree.
+ * Persistently stores the device replace state in the device tree.
* The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0).
*/
#define BTRFS_DEV_REPLACE_KEY 250
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index c7535352fef6..90801ada2bbe 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -123,8 +123,8 @@ struct can_frame {
/*
* defined bits for canfd_frame.flags
*
- * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to
- * be set in the CAN frame bitstream on the wire. The EDL bit switch turns
+ * The use of struct canfd_frame implies the FD Frame (FDF) bit to
+ * be set in the CAN frame bitstream on the wire. The FDF bit switch turns
* the CAN controllers bitstream processor into the CAN FD mode which creates
* two new options within the CAN FD frame specification:
*
@@ -135,9 +135,18 @@ struct can_frame {
* controller only the CANFD_BRS bit is relevant for real CAN controllers when
* building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make
* sense for virtual CAN interfaces to test applications with echoed frames.
+ *
+ * The struct can_frame and struct canfd_frame intentionally share the same
+ * layout to be able to write CAN frame content into a CAN FD frame structure.
+ * When this is done the former differentiation via CAN_MTU / CANFD_MTU gets
+ * lost. CANFD_FDF allows programmers to mark CAN FD frames in the case of
+ * using struct canfd_frame for mixed CAN / CAN FD content (dual use).
+ * N.B. the Kernel APIs do NOT provide mixed CAN / CAN FD content inside of
+ * struct canfd_frame therefore the CANFD_FDF flag is disregarded by Linux.
*/
#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */
#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */
+#define CANFD_FDF 0x04 /* mark CAN FD for dual use of struct canfd_frame */
/**
* struct canfd_frame - CAN flexible data rate frame structure
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 37590027b604..c3baaea0b8ef 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1665,7 +1665,7 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
if (*audio_out_compensated == 3 && msg->len >= 7)
*audio_out_delay = msg->msg[6];
else
- *audio_out_delay = 0;
+ *audio_out_delay = 1;
}
static inline void cec_msg_request_current_latency(struct cec_msg *msg,
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index f6008b2fa60f..32f53a0069d6 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -126,6 +126,11 @@ enum devlink_command {
DEVLINK_CMD_HEALTH_REPORTER_TEST,
+ DEVLINK_CMD_RATE_GET, /* can dump */
+ DEVLINK_CMD_RATE_SET,
+ DEVLINK_CMD_RATE_NEW,
+ DEVLINK_CMD_RATE_DEL,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -206,6 +211,11 @@ enum devlink_port_flavour {
*/
};
+enum devlink_rate_type {
+ DEVLINK_RATE_TYPE_LEAF,
+ DEVLINK_RATE_TYPE_NODE,
+};
+
enum devlink_param_cmode {
DEVLINK_PARAM_CMODE_RUNTIME,
DEVLINK_PARAM_CMODE_DRIVERINIT,
@@ -534,6 +544,13 @@ enum devlink_attr {
DEVLINK_ATTR_RELOAD_ACTION_STATS, /* nested */
DEVLINK_ATTR_PORT_PCI_SF_NUMBER, /* u32 */
+
+ DEVLINK_ATTR_RATE_TYPE, /* u16 */
+ DEVLINK_ATTR_RATE_TX_SHARE, /* u64 */
+ DEVLINK_ATTR_RATE_TX_MAX, /* u64 */
+ DEVLINK_ATTR_RATE_NODE_NAME, /* string */
+ DEVLINK_ATTR_RATE_PARENT_NODE_NAME, /* string */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cfef6b08169a..67aa7134b301 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -233,7 +233,7 @@ enum tunable_id {
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
/*
* Add your fresh new tunable attribute above and remember to update
- * tunable_strings[] in net/core/ethtool.c
+ * tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_TUNABLE_COUNT,
};
@@ -297,7 +297,7 @@ enum phy_tunable_id {
ETHTOOL_PHY_EDPD,
/*
* Add your fresh new phy tunable attribute above and remember to update
- * phy_tunable_strings[] in net/core/ethtool.c
+ * phy_tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_PHY_TUNABLE_COUNT,
};
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 825cfda1c5d5..c7135c9c37a5 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -675,7 +675,7 @@ enum {
ETHTOOL_A_MODULE_EEPROM_PAGE, /* u8 */
ETHTOOL_A_MODULE_EEPROM_BANK, /* u8 */
ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS, /* u8 */
- ETHTOOL_A_MODULE_EEPROM_DATA, /* nested */
+ ETHTOOL_A_MODULE_EEPROM_DATA, /* binary */
__ETHTOOL_A_MODULE_EEPROM_CNT,
ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1)
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index a89eb0accd5e..235e5b2facaa 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -21,6 +21,7 @@
#define FUTEX_WAKE_BITSET 10
#define FUTEX_WAIT_REQUEUE_PI 11
#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_LOCK_PI2 13
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256
@@ -32,6 +33,7 @@
#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_LOCK_PI2_PRIVATE (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG)
#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)
diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h
index c1da8244c5e1..163c0998aec9 100644
--- a/include/uapi/linux/icmp.h
+++ b/include/uapi/linux/icmp.h
@@ -20,7 +20,6 @@
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/in.h>
#include <linux/if.h>
#include <linux/in6.h>
@@ -154,7 +153,7 @@ struct icmp_ext_echo_iio {
struct {
struct icmp_ext_echo_ctype3_hdr ctype3_hdr;
union {
- struct in_addr ipv4_addr;
+ __be32 ipv4_addr;
struct in6_addr ipv6_addr;
} ip_addr;
} addr;
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 13d59c51ef5b..6b56a7549531 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -627,6 +627,8 @@ enum {
MDBA_ROUTER_PATTR_UNSPEC,
MDBA_ROUTER_PATTR_TIMER,
MDBA_ROUTER_PATTR_TYPE,
+ MDBA_ROUTER_PATTR_INET_TIMER,
+ MDBA_ROUTER_PATTR_INET6_TIMER,
__MDBA_ROUTER_PATTR_MAX
};
#define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1)
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index cd5b382a4138..4882e81514b6 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -341,6 +341,13 @@ enum {
IFLA_ALT_IFNAME, /* Alternative ifname */
IFLA_PERM_ADDRESS,
IFLA_PROTO_DOWN_REASON,
+
+ /* device (sysfs) name as parent, used instead
+ * of IFLA_LINK where there's no parent netdev
+ */
+ IFLA_PARENT_DEV_NAME,
+ IFLA_PARENT_DEV_BUS_NAME,
+
__IFLA_MAX
};
@@ -1236,6 +1243,8 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
+#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)
enum {
IFLA_RMNET_UNSPEC,
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 7d6687618d80..d1b327036ae4 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -289,6 +289,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index ee93428ced9a..225ec87d4f22 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -611,6 +611,7 @@
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index e1ae46683301..162ff99ed2cb 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -280,6 +280,7 @@ struct io_uring_params {
#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
#define IORING_FEAT_EXT_ARG (1U << 8)
#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
+#define IORING_FEAT_RSRC_TAGS (1U << 10)
/*
* io_uring_register(2) opcodes and arguments
@@ -298,8 +299,12 @@ enum {
IORING_UNREGISTER_PERSONALITY = 10,
IORING_REGISTER_RESTRICTIONS = 11,
IORING_REGISTER_ENABLE_RINGS = 12,
- IORING_REGISTER_RSRC = 13,
- IORING_REGISTER_RSRC_UPDATE = 14,
+
+ /* extended with tagging */
+ IORING_REGISTER_FILES2 = 13,
+ IORING_REGISTER_FILES_UPDATE2 = 14,
+ IORING_REGISTER_BUFFERS2 = 15,
+ IORING_REGISTER_BUFFERS_UPDATE = 16,
/* this goes last */
IORING_REGISTER_LAST
@@ -312,14 +317,10 @@ struct io_uring_files_update {
__aligned_u64 /* __s32 * */ fds;
};
-enum {
- IORING_RSRC_FILE = 0,
- IORING_RSRC_BUFFER = 1,
-};
-
struct io_uring_rsrc_register {
- __u32 type;
__u32 nr;
+ __u32 resv;
+ __u64 resv2;
__aligned_u64 data;
__aligned_u64 tags;
};
@@ -335,8 +336,8 @@ struct io_uring_rsrc_update2 {
__u32 resv;
__aligned_u64 data;
__aligned_u64 tags;
- __u32 type;
__u32 nr;
+ __u32 resv2;
};
/* Skip updating fd indexes set to this value in the fd table */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 3fd9a7e9d90c..d9e4aabcb31a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -8,6 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -279,6 +280,9 @@ struct kvm_xen_exit {
/* Encounter unexpected vm-exit reason */
#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
+/* Flags that describe what fields in emulation_failure hold valid data. */
+#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
+
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
/* in */
@@ -382,6 +386,25 @@ struct kvm_run {
__u32 ndata;
__u64 data[16];
} internal;
+ /*
+ * KVM_INTERNAL_ERROR_EMULATION
+ *
+ * "struct emulation_failure" is an overlay of "struct internal"
+ * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of
+ * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error
+ * sub-types, this struct is ABI! It also needs to be backwards
+ * compatible with "struct internal". Take special care that
+ * "ndata" is correct, that new fields are enumerated in "flags",
+ * and that each flag enumerates fields that are 64-bit aligned
+ * and sized (so that ndata+internal.data[] is valid/accurate).
+ */
+ struct {
+ __u32 suberror;
+ __u32 ndata;
+ __u64 flags;
+ __u8 insn_size;
+ __u8 insn_bytes[15];
+ } emulation_failure;
/* KVM_EXIT_OSI */
struct {
__u64 gprs[32];
@@ -1082,6 +1105,13 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_SGX_ATTRIBUTE 196
#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
#define KVM_CAP_PTP_KVM 198
+#define KVM_CAP_HYPERV_ENFORCE_CPUID 199
+#define KVM_CAP_SREGS2 200
+#define KVM_CAP_EXIT_HYPERCALL 201
+#define KVM_CAP_PPC_RPT_INVALIDATE 202
+#define KVM_CAP_BINARY_STATS_FD 203
+#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
+#define KVM_CAP_ARM_MTE 205
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1427,6 +1457,7 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PMU_EVENT_FILTER */
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
+#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1620,6 +1651,9 @@ struct kvm_xen_hvm_attr {
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
+#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
+#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
+
struct kvm_xen_vcpu_attr {
__u16 type;
__u16 pad[3];
@@ -1879,8 +1913,8 @@ struct kvm_hyperv_eventfd {
* conversion after harvesting an entry. Also, it must not skip any
* dirty bits, so that dirty bits are always harvested in sequence.
*/
-#define KVM_DIRTY_GFN_F_DIRTY BIT(0)
-#define KVM_DIRTY_GFN_F_RESET BIT(1)
+#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
#define KVM_DIRTY_GFN_F_MASK 0x3
/*
@@ -1898,4 +1932,76 @@ struct kvm_dirty_gfn {
#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
+/**
+ * struct kvm_stats_header - Header of per vm/vcpu binary statistics data.
+ * @flags: Some extra information for header, always 0 for now.
+ * @name_size: The size in bytes of the memory which contains statistics
+ * name string including trailing '\0'. The memory is allocated
+ * at the send of statistics descriptor.
+ * @num_desc: The number of statistics the vm or vcpu has.
+ * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed
+ * by vm/vcpu stats fd.
+ * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file
+ * pointd by vm/vcpu stats fd.
+ * @data_offset: The offset of the vm/vcpu stats' data block in the file
+ * pointed by vm/vcpu stats fd.
+ *
+ * This is the header userspace needs to read from stats fd before any other
+ * readings. It is used by userspace to discover all the information about the
+ * vm/vcpu's binary statistics.
+ * Userspace reads this header from the start of the vm/vcpu's stats fd.
+ */
+struct kvm_stats_header {
+ __u32 flags;
+ __u32 name_size;
+ __u32 num_desc;
+ __u32 id_offset;
+ __u32 desc_offset;
+ __u32 data_offset;
+};
+
+#define KVM_STATS_TYPE_SHIFT 0
+#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_PEAK
+
+#define KVM_STATS_UNIT_SHIFT 4
+#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
+
+#define KVM_STATS_BASE_SHIFT 8
+#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2
+
+/**
+ * struct kvm_stats_desc - Descriptor of a KVM statistics.
+ * @flags: Annotations of the stats, like type, unit, etc.
+ * @exponent: Used together with @flags to determine the unit.
+ * @size: The number of data items for this stats.
+ * Every data item is of type __u64.
+ * @offset: The offset of the stats to the start of stat structure in
+ * struture kvm or kvm_vcpu.
+ * @unused: Unused field for future usage. Always 0 for now.
+ * @name: The name string for the stats. Its size is indicated by the
+ * &kvm_stats_header->name_size.
+ */
+struct kvm_stats_desc {
+ __u32 flags;
+ __s16 exponent;
+ __u16 size;
+ __u32 offset;
+ __u32 unused;
+ char name[];
+};
+
+#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h
index 8b86609849b9..960c7e93d1a9 100644
--- a/include/uapi/linux/kvm_para.h
+++ b/include/uapi/linux/kvm_para.h
@@ -29,6 +29,7 @@
#define KVM_HC_CLOCK_PAIRING 9
#define KVM_HC_SEND_IPI 10
#define KVM_HC_SCHED_YIELD 11
+#define KVM_HC_MAP_GPA_RANGE 12
/*
* hypercalls use architecture specific
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index c45a4eaea667..9919f2062b14 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* lirc.h - linux infrared remote control header file
- * last modified 2010/07/13 by Jarod Wilson
*/
#ifndef _LINUX_LIRC_H
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index e6524ead2b7b..dd7a166fdf9c 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -120,6 +120,7 @@ enum fsconfig_command {
#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
+#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */
/*
* mount_setattr()
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 8eb3c0844bff..7b05f7102321 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -105,6 +105,7 @@ struct mptcp_info {
__u64 mptcpi_rcv_nxt;
__u8 mptcpi_local_addr_used;
__u8 mptcpi_local_addr_max;
+ __u8 mptcpi_csum_enabled;
};
/*
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 1fb4ca18ffbb..e94d1fa554cb 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -813,11 +813,13 @@ enum nft_exthdr_flags {
* @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers
* @NFT_EXTHDR_OP_TCP: match against tcp options
* @NFT_EXTHDR_OP_IPV4: match against ipv4 options
+ * @NFT_EXTHDR_OP_SCTP: match against sctp chunks
*/
enum nft_exthdr_op {
NFT_EXTHDR_OP_IPV6,
NFT_EXTHDR_OP_TCPOPT,
NFT_EXTHDR_OP_IPV4,
+ NFT_EXTHDR_OP_SCTP,
__NFT_EXTHDR_OP_MAX
};
#define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1)
@@ -1194,6 +1196,21 @@ enum nft_counter_attributes {
#define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1)
/**
+ * enum nft_last_attributes - nf_tables last expression netlink attributes
+ *
+ * @NFTA_LAST_SET: last update has been set, zero means never updated (NLA_U32)
+ * @NFTA_LAST_MSECS: milliseconds since last update (NLA_U64)
+ */
+enum nft_last_attributes {
+ NFTA_LAST_UNSPEC,
+ NFTA_LAST_SET,
+ NFTA_LAST_MSECS,
+ NFTA_LAST_PAD,
+ __NFTA_LAST_MAX
+};
+#define NFTA_LAST_MAX (__NFTA_LAST_MAX - 1)
+
+/**
* enum nft_log_attributes - nf_tables log expression netlink attributes
*
* @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32)
diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h
index 5bc960f220b3..6cd58cd2a6f0 100644
--- a/include/uapi/linux/netfilter/nfnetlink.h
+++ b/include/uapi/linux/netfilter/nfnetlink.h
@@ -60,7 +60,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_CTHELPER 9
#define NFNL_SUBSYS_NFTABLES 10
#define NFNL_SUBSYS_NFT_COMPAT 11
-#define NFNL_SUBSYS_COUNT 12
+#define NFNL_SUBSYS_HOOK 12
+#define NFNL_SUBSYS_COUNT 13
/* Reserved control nfnetlink messages */
#define NFNL_MSG_BATCH_BEGIN NLMSG_MIN_TYPE
diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h
new file mode 100644
index 000000000000..912ec60b26b0
--- /dev/null
+++ b/include/uapi/linux/netfilter/nfnetlink_hook.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _NFNL_HOOK_H_
+#define _NFNL_HOOK_H_
+
+enum nfnl_hook_msg_types {
+ NFNL_MSG_HOOK_GET,
+ NFNL_MSG_HOOK_MAX,
+};
+
+/**
+ * enum nfnl_hook_attributes - netfilter hook netlink attributes
+ *
+ * @NFNLA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
+ * @NFNLA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFNLA_HOOK_DEV: netdevice name (NLA_STRING)
+ * @NFNLA_HOOK_FUNCTION_NAME: hook function name (NLA_STRING)
+ * @NFNLA_HOOK_MODULE_NAME: kernel module that registered this hook (NLA_STRING)
+ * @NFNLA_HOOK_CHAIN_INFO: basechain hook metadata (NLA_NESTED)
+ */
+enum nfnl_hook_attributes {
+ NFNLA_HOOK_UNSPEC,
+ NFNLA_HOOK_HOOKNUM,
+ NFNLA_HOOK_PRIORITY,
+ NFNLA_HOOK_DEV,
+ NFNLA_HOOK_FUNCTION_NAME,
+ NFNLA_HOOK_MODULE_NAME,
+ NFNLA_HOOK_CHAIN_INFO,
+ __NFNLA_HOOK_MAX
+};
+#define NFNLA_HOOK_MAX (__NFNLA_HOOK_MAX - 1)
+
+/**
+ * enum nfnl_hook_chain_info_attributes - chain description
+ *
+ * NFNLA_HOOK_INFO_DESC: nft chain and table name (enum nft_table_attributes) (NLA_NESTED)
+ * NFNLA_HOOK_INFO_TYPE: chain type (enum nfnl_hook_chaintype) (NLA_U32)
+ */
+enum nfnl_hook_chain_info_attributes {
+ NFNLA_HOOK_INFO_UNSPEC,
+ NFNLA_HOOK_INFO_DESC,
+ NFNLA_HOOK_INFO_TYPE,
+ __NFNLA_HOOK_INFO_MAX,
+};
+#define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1)
+
+/**
+ * enum nfnl_hook_chaintype - chain type
+ *
+ * @NFNL_HOOK_TYPE_NFTABLES nf_tables base chain
+ */
+enum nfnl_hook_chaintype {
+ NFNL_HOOK_TYPE_NFTABLES = 0x1,
+};
+
+#endif /* _NFNL_HOOK_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 3d94269bbfa8..4c0cde075c27 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -91,9 +91,10 @@ struct nlmsghdr {
#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
#define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN)
#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
-#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
+#define NLMSG_DATA(nlh) ((void *)(((char *)nlh) + NLMSG_HDRLEN))
#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
- (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len)))
+ (struct nlmsghdr *)(((char *)(nlh)) + \
+ NLMSG_ALIGN((nlh)->nlmsg_len)))
#define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len <= (len))
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f962c06e9818..db474994fa73 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -3654,6 +3654,8 @@ enum nl80211_mpath_info {
* defined
* @NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA: HE 6GHz band capabilities (__le16),
* given for all 6 GHz band channels
+ * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are
+ * advertised on this band/for this iftype (binary)
* @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
*/
enum nl80211_band_iftype_attr {
@@ -3665,6 +3667,7 @@ enum nl80211_band_iftype_attr {
NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA,
+ NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS,
/* keep last */
__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
@@ -6912,6 +6915,9 @@ enum nl80211_peer_measurement_ftm_capa {
* @NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK: negotiate for LMR feedback. Only
* valid if either %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED or
* %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set.
+ * @NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR: optional. The BSS color of the
+ * responder. Only valid if %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
+ * or %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED is set.
*
* @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
* @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
@@ -6931,6 +6937,7 @@ enum nl80211_peer_measurement_ftm_req {
NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED,
NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED,
NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK,
+ NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR,
/* keep last */
NUM_NL80211_PMSR_FTM_REQ_ATTR,
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 18a9f59dc067..967d9c55323d 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -259,4 +259,12 @@ struct prctl_mm_map {
#define PR_PAC_SET_ENABLED_KEYS 60
#define PR_PAC_GET_ENABLED_KEYS 61
+/* Request the scheduler to share a core */
+#define PR_SCHED_CORE 62
+# define PR_SCHED_CORE_GET 0
+# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+# define PR_SCHED_CORE_MAX 4
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index cb78e7a739da..c4ff1ebd8bcc 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -141,6 +141,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE 131
#define SCTP_EXPOSE_PF_STATE SCTP_EXPOSE_POTENTIALLY_FAILED_STATE
#define SCTP_REMOTE_UDP_ENCAPS_PORT 132
+#define SCTP_PLPMTUD_PROBE_INTERVAL 133
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -1213,4 +1214,11 @@ enum sctp_sched_type {
SCTP_SS_MAX = SCTP_SS_RR
};
+/* Probe Interval socket option */
+struct sctp_probeinterval {
+ sctp_assoc_t spi_assoc_id;
+ struct sockaddr_storage spi_address;
+ __u32 spi_interval;
+};
+
#endif /* _UAPI_SCTP_H */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 6ba18b82a02e..78074254ab98 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -115,6 +115,7 @@ struct seccomp_notif_resp {
/* valid flags for seccomp_notif_addfd */
#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
+#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
/**
* struct seccomp_notif_addfd
diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h
index 5ae3ace84de0..332b18f318f8 100644
--- a/include/uapi/linux/seg6_local.h
+++ b/include/uapi/linux/seg6_local.h
@@ -64,6 +64,8 @@ enum {
SEG6_LOCAL_ACTION_END_AM = 14,
/* custom BPF action */
SEG6_LOCAL_ACTION_END_BPF = 15,
+ /* decap and lookup of DA in v4 or v6 table */
+ SEG6_LOCAL_ACTION_END_DT46 = 16,
__SEG6_LOCAL_ACTION_MAX,
};
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index 3e68da07fba2..0f7f87c70baf 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -47,6 +47,8 @@ enum {
SMC_NETLINK_GET_LGR_SMCD,
SMC_NETLINK_GET_DEV_SMCD,
SMC_NETLINK_GET_DEV_SMCR,
+ SMC_NETLINK_GET_STATS,
+ SMC_NETLINK_GET_FBACK_STATS,
};
/* SMC_GENL_FAMILY top level attributes */
@@ -58,6 +60,8 @@ enum {
SMC_GEN_LGR_SMCD, /* nest */
SMC_GEN_DEV_SMCD, /* nest */
SMC_GEN_DEV_SMCR, /* nest */
+ SMC_GEN_STATS, /* nest */
+ SMC_GEN_FBACK_STATS, /* nest */
__SMC_GEN_MAX,
SMC_GEN_MAX = __SMC_GEN_MAX - 1
};
@@ -159,4 +163,83 @@ enum {
SMC_NLA_DEV_MAX = __SMC_NLA_DEV_MAX - 1
};
+/* SMC_NLA_STATS_T_TX(RX)_RMB_SIZE nested attributes */
+/* SMC_NLA_STATS_TX(RX)PLOAD_SIZE nested attributes */
+enum {
+ SMC_NLA_STATS_PLOAD_PAD,
+ SMC_NLA_STATS_PLOAD_8K, /* u64 */
+ SMC_NLA_STATS_PLOAD_16K, /* u64 */
+ SMC_NLA_STATS_PLOAD_32K, /* u64 */
+ SMC_NLA_STATS_PLOAD_64K, /* u64 */
+ SMC_NLA_STATS_PLOAD_128K, /* u64 */
+ SMC_NLA_STATS_PLOAD_256K, /* u64 */
+ SMC_NLA_STATS_PLOAD_512K, /* u64 */
+ SMC_NLA_STATS_PLOAD_1024K, /* u64 */
+ SMC_NLA_STATS_PLOAD_G_1024K, /* u64 */
+ __SMC_NLA_STATS_PLOAD_MAX,
+ SMC_NLA_STATS_PLOAD_MAX = __SMC_NLA_STATS_PLOAD_MAX - 1
+};
+
+/* SMC_NLA_STATS_T_TX(RX)_RMB_STATS nested attributes */
+enum {
+ SMC_NLA_STATS_RMB_PAD,
+ SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_SIZE_SM_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_FULL_PEER_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_FULL_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_REUSE_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_ALLOC_CNT, /* u64 */
+ SMC_NLA_STATS_RMB_DGRADE_CNT, /* u64 */
+ __SMC_NLA_STATS_RMB_MAX,
+ SMC_NLA_STATS_RMB_MAX = __SMC_NLA_STATS_RMB_MAX - 1
+};
+
+/* SMC_NLA_STATS_SMCD_TECH and _SMCR_TECH nested attributes */
+enum {
+ SMC_NLA_STATS_T_PAD,
+ SMC_NLA_STATS_T_TX_RMB_SIZE, /* nest */
+ SMC_NLA_STATS_T_RX_RMB_SIZE, /* nest */
+ SMC_NLA_STATS_T_TXPLOAD_SIZE, /* nest */
+ SMC_NLA_STATS_T_RXPLOAD_SIZE, /* nest */
+ SMC_NLA_STATS_T_TX_RMB_STATS, /* nest */
+ SMC_NLA_STATS_T_RX_RMB_STATS, /* nest */
+ SMC_NLA_STATS_T_CLNT_V1_SUCC, /* u64 */
+ SMC_NLA_STATS_T_CLNT_V2_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SRV_V1_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SRV_V2_SUCC, /* u64 */
+ SMC_NLA_STATS_T_SENDPAGE_CNT, /* u64 */
+ SMC_NLA_STATS_T_SPLICE_CNT, /* u64 */
+ SMC_NLA_STATS_T_CORK_CNT, /* u64 */
+ SMC_NLA_STATS_T_NDLY_CNT, /* u64 */
+ SMC_NLA_STATS_T_URG_DATA_CNT, /* u64 */
+ SMC_NLA_STATS_T_RX_BYTES, /* u64 */
+ SMC_NLA_STATS_T_TX_BYTES, /* u64 */
+ SMC_NLA_STATS_T_RX_CNT, /* u64 */
+ SMC_NLA_STATS_T_TX_CNT, /* u64 */
+ __SMC_NLA_STATS_T_MAX,
+ SMC_NLA_STATS_T_MAX = __SMC_NLA_STATS_T_MAX - 1
+};
+
+/* SMC_GEN_STATS attributes */
+enum {
+ SMC_NLA_STATS_PAD,
+ SMC_NLA_STATS_SMCD_TECH, /* nest */
+ SMC_NLA_STATS_SMCR_TECH, /* nest */
+ SMC_NLA_STATS_CLNT_HS_ERR_CNT, /* u64 */
+ SMC_NLA_STATS_SRV_HS_ERR_CNT, /* u64 */
+ __SMC_NLA_STATS_MAX,
+ SMC_NLA_STATS_MAX = __SMC_NLA_STATS_MAX - 1
+};
+
+/* SMC_GEN_FBACK_STATS attributes */
+enum {
+ SMC_NLA_FBACK_STATS_PAD,
+ SMC_NLA_FBACK_STATS_TYPE, /* u8 */
+ SMC_NLA_FBACK_STATS_SRV_CNT, /* u64 */
+ SMC_NLA_FBACK_STATS_CLNT_CNT, /* u64 */
+ SMC_NLA_FBACK_STATS_RSN_CODE, /* u32 */
+ SMC_NLA_FBACK_STATS_RSN_CNT, /* u16 */
+ __SMC_NLA_FBACK_STATS_MAX,
+ SMC_NLA_FBACK_STATS_MAX = __SMC_NLA_FBACK_STATS_MAX - 1
+};
#endif /* _UAPI_LINUX_SMC_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 26fc60ce9298..904909d020e2 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -290,6 +290,8 @@ enum
LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */
LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */
LINUX_MIB_TCPDSACKIGNOREDDUBIOUS, /* TCPDSACKIgnoredDubious */
+ LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */
+ LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h
index fbcce04abfe9..08f46b60b151 100644
--- a/include/uapi/linux/surface_aggregator/cdev.h
+++ b/include/uapi/linux/surface_aggregator/cdev.h
@@ -6,7 +6,7 @@
* device. This device provides direct user-space access to the SSAM EC.
* Intended for debugging and development.
*
- * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
@@ -73,6 +73,75 @@ struct ssam_cdev_request {
} response;
} __attribute__((__packed__));
-#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request)
+/**
+ * struct ssam_cdev_notifier_desc - Notifier descriptor.
+ * @priority: Priority value determining the order in which notifier
+ * callbacks will be called. A higher value means higher
+ * priority, i.e. the associated callback will be executed
+ * earlier than other (lower priority) callbacks.
+ * @target_category: The event target category for which this notifier should
+ * receive events.
+ *
+ * Specifies the notifier that should be registered or unregistered,
+ * specifically with which priority and for which target category of events.
+ */
+struct ssam_cdev_notifier_desc {
+ __s32 priority;
+ __u8 target_category;
+} __attribute__((__packed__));
+
+/**
+ * struct ssam_cdev_event_desc - Event descriptor.
+ * @reg: Registry via which the event will be enabled/disabled.
+ * @reg.target_category: Target category for the event registry requests.
+ * @reg.target_id: Target ID for the event registry requests.
+ * @reg.cid_enable: Command ID for the event-enable request.
+ * @reg.cid_disable: Command ID for the event-disable request.
+ * @id: ID specifying the event.
+ * @id.target_category: Target category of the event source.
+ * @id.instance: Instance ID of the event source.
+ * @flags: Flags used for enabling the event.
+ *
+ * Specifies which event should be enabled/disabled and how to do that.
+ */
+struct ssam_cdev_event_desc {
+ struct {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 cid_enable;
+ __u8 cid_disable;
+ } reg;
+
+ struct {
+ __u8 target_category;
+ __u8 instance;
+ } id;
+
+ __u8 flags;
+} __attribute__((__packed__));
+
+/**
+ * struct ssam_cdev_event - SSAM event sent by the EC.
+ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
+ * @target_id: Target ID of the event source.
+ * @command_id: Command ID of the event.
+ * @instance_id: Instance ID of the event source.
+ * @length: Length of the event payload in bytes.
+ * @data: Event payload data.
+ */
+struct ssam_cdev_event {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 command_id;
+ __u8 instance_id;
+ __u16 length;
+ __u8 data[];
+} __attribute__((__packed__));
+
+#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request)
+#define SSAM_CDEV_NOTIF_REGISTER _IOW(0xA5, 2, struct ssam_cdev_notifier_desc)
+#define SSAM_CDEV_NOTIF_UNREGISTER _IOW(0xA5, 3, struct ssam_cdev_notifier_desc)
+#define SSAM_CDEV_EVENT_ENABLE _IOW(0xA5, 4, struct ssam_cdev_event_desc)
+#define SSAM_CDEV_EVENT_DISABLE _IOW(0xA5, 5, struct ssam_cdev_event_desc)
#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index bafbeb1a2624..650480f41f1d 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -80,8 +80,8 @@
struct uffdio_zeropage)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
-#define UFFDIO_CONTINUE _IOR(UFFDIO, _UFFDIO_CONTINUE, \
- struct uffdio_continue)
+#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
+ struct uffdio_continue)
/* read() structure */
struct uffd_msg {
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index d43bec5f1afd..fdf97a6d7d18 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -50,6 +50,7 @@
#ifndef __LINUX_V4L2_CONTROLS_H
#define __LINUX_V4L2_CONTROLS_H
+#include <linux/const.h>
#include <linux/types.h>
/* Control classes */
@@ -1602,30 +1603,30 @@ struct v4l2_ctrl_h264_decode_params {
#define V4L2_FWHT_VERSION 3
/* Set if this is an interlaced format */
-#define V4L2_FWHT_FL_IS_INTERLACED BIT(0)
+#define V4L2_FWHT_FL_IS_INTERLACED _BITUL(0)
/* Set if this is a bottom-first (NTSC) interlaced format */
-#define V4L2_FWHT_FL_IS_BOTTOM_FIRST BIT(1)
+#define V4L2_FWHT_FL_IS_BOTTOM_FIRST _BITUL(1)
/* Set if each 'frame' contains just one field */
-#define V4L2_FWHT_FL_IS_ALTERNATE BIT(2)
+#define V4L2_FWHT_FL_IS_ALTERNATE _BITUL(2)
/*
* If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this
* 'frame' is the bottom field, else it is the top field.
*/
-#define V4L2_FWHT_FL_IS_BOTTOM_FIELD BIT(3)
+#define V4L2_FWHT_FL_IS_BOTTOM_FIELD _BITUL(3)
/* Set if the Y' plane is uncompressed */
-#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED BIT(4)
+#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED _BITUL(4)
/* Set if the Cb plane is uncompressed */
-#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED BIT(5)
+#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED _BITUL(5)
/* Set if the Cr plane is uncompressed */
-#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED BIT(6)
+#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED _BITUL(6)
/* Set if the chroma plane is full height, if cleared it is half height */
-#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT BIT(7)
+#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT _BITUL(7)
/* Set if the chroma plane is full width, if cleared it is half width */
-#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH BIT(8)
+#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH _BITUL(8)
/* Set if the alpha plane is uncompressed */
-#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9)
+#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED _BITUL(9)
/* Set if this is an I Frame */
-#define V4L2_FWHT_FL_I_FRAME BIT(10)
+#define V4L2_FWHT_FL_I_FRAME _BITUL(10)
/* A 4-values flag - the number of components - 1 */
#define V4L2_FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16)
@@ -1862,6 +1863,118 @@ struct v4l2_ctrl_vp8_frame {
__u64 flags;
};
+/* Stateless MPEG-2 controls */
+
+#define V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE 0x01
+
+#define V4L2_CID_STATELESS_MPEG2_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE+220)
+/**
+ * struct v4l2_ctrl_mpeg2_sequence - MPEG-2 sequence header
+ *
+ * All the members on this structure match the sequence header and sequence
+ * extension syntaxes as specified by the MPEG-2 specification.
+ *
+ * Fields horizontal_size, vertical_size and vbv_buffer_size are a
+ * combination of respective _value and extension syntax elements,
+ * as described in section 6.3.3 "Sequence header".
+ *
+ * @horizontal_size: combination of elements horizontal_size_value and
+ * horizontal_size_extension.
+ * @vertical_size: combination of elements vertical_size_value and
+ * vertical_size_extension.
+ * @vbv_buffer_size: combination of elements vbv_buffer_size_value and
+ * vbv_buffer_size_extension.
+ * @profile_and_level_indication: see MPEG-2 specification.
+ * @chroma_format: see MPEG-2 specification.
+ * @flags: see V4L2_MPEG2_SEQ_FLAG_{}.
+ */
+struct v4l2_ctrl_mpeg2_sequence {
+ __u16 horizontal_size;
+ __u16 vertical_size;
+ __u32 vbv_buffer_size;
+ __u16 profile_and_level_indication;
+ __u8 chroma_format;
+ __u8 flags;
+};
+
+#define V4L2_MPEG2_PIC_CODING_TYPE_I 1
+#define V4L2_MPEG2_PIC_CODING_TYPE_P 2
+#define V4L2_MPEG2_PIC_CODING_TYPE_B 3
+#define V4L2_MPEG2_PIC_CODING_TYPE_D 4
+
+#define V4L2_MPEG2_PIC_TOP_FIELD 0x1
+#define V4L2_MPEG2_PIC_BOTTOM_FIELD 0x2
+#define V4L2_MPEG2_PIC_FRAME 0x3
+
+#define V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST 0x0001
+#define V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT 0x0002
+#define V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV 0x0004
+#define V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE 0x0008
+#define V4L2_MPEG2_PIC_FLAG_INTRA_VLC 0x0010
+#define V4L2_MPEG2_PIC_FLAG_ALT_SCAN 0x0020
+#define V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST 0x0040
+#define V4L2_MPEG2_PIC_FLAG_PROGRESSIVE 0x0080
+
+#define V4L2_CID_STATELESS_MPEG2_PICTURE (V4L2_CID_CODEC_STATELESS_BASE+221)
+/**
+ * struct v4l2_ctrl_mpeg2_picture - MPEG-2 picture header
+ *
+ * All the members on this structure match the picture header and picture
+ * coding extension syntaxes as specified by the MPEG-2 specification.
+ *
+ * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as
+ * reference for backward prediction.
+ * @forward_ref_ts: timestamp of the V4L2 capture buffer to use as
+ * reference for forward prediction. These timestamp refers to the
+ * timestamp field in struct v4l2_buffer. Use v4l2_timeval_to_ns()
+ * to convert the struct timeval to a __u64.
+ * @flags: see V4L2_MPEG2_PIC_FLAG_{}.
+ * @f_code: see MPEG-2 specification.
+ * @picture_coding_type: see MPEG-2 specification.
+ * @picture_structure: see V4L2_MPEG2_PIC_{}_FIELD.
+ * @intra_dc_precision: see MPEG-2 specification.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_mpeg2_picture {
+ __u64 backward_ref_ts;
+ __u64 forward_ref_ts;
+ __u32 flags;
+ __u8 f_code[2][2];
+ __u8 picture_coding_type;
+ __u8 picture_structure;
+ __u8 intra_dc_precision;
+ __u8 reserved[5];
+};
+
+#define V4L2_CID_STATELESS_MPEG2_QUANTISATION (V4L2_CID_CODEC_STATELESS_BASE+222)
+/**
+ * struct v4l2_ctrl_mpeg2_quantisation - MPEG-2 quantisation
+ *
+ * Quantisation matrices as specified by section 6.3.7
+ * "Quant matrix extension".
+ *
+ * @intra_quantiser_matrix: The quantisation matrix coefficients
+ * for intra-coded frames, in zigzag scanning order. It is relevant
+ * for both luma and chroma components, although it can be superseded
+ * by the chroma-specific matrix for non-4:2:0 YUV formats.
+ * @non_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for non-intra-coded frames, in zigzag scanning order. It is relevant
+ * for both luma and chroma components, although it can be superseded
+ * by the chroma-specific matrix for non-4:2:0 YUV formats.
+ * @chroma_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for the chominance component of intra-coded frames, in zigzag scanning
+ * order. Only relevant for 4:2:2 and 4:4:4 YUV formats.
+ * @chroma_non_intra_quantiser_matrix: The quantisation matrix coefficients
+ * for the chrominance component of non-intra-coded frames, in zigzag scanning
+ * order. Only relevant for 4:2:2 and 4:4:4 YUV formats.
+ */
+struct v4l2_ctrl_mpeg2_quantisation {
+ __u8 intra_quantiser_matrix[64];
+ __u8 non_intra_quantiser_matrix[64];
+ __u8 chroma_intra_quantiser_matrix[64];
+ __u8 chroma_non_intra_quantiser_matrix[64];
+};
+
#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900)
#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1)
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 311a01cc5775..9260791b8438 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1747,6 +1747,9 @@ struct v4l2_ext_control {
struct v4l2_ctrl_h264_decode_params __user *p_h264_decode_params;
struct v4l2_ctrl_fwht_params __user *p_fwht_params;
struct v4l2_ctrl_vp8_frame __user *p_vp8_frame;
+ struct v4l2_ctrl_mpeg2_sequence __user *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture __user *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation __user *p_mpeg2_quantisation;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1807,6 +1810,10 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220,
V4L2_CTRL_TYPE_VP8_FRAME = 0x0240,
+
+ V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250,
+ V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251,
+ V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index f0c35ce8628c..4fe842c3a3a9 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -54,7 +54,7 @@
#define VIRTIO_ID_SOUND 25 /* virtio sound */
#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
-#define VIRTIO_ID_BT 28 /* virtio bluetooth */
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_BT 40 /* virtio bluetooth */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
index 1d57ed3d84d2..3dd3555b2740 100644
--- a/include/uapi/linux/virtio_vsock.h
+++ b/include/uapi/linux/virtio_vsock.h
@@ -38,6 +38,9 @@
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+/* The feature bitmap for virtio vsock */
+#define VIRTIO_VSOCK_F_SEQPACKET 1 /* SOCK_SEQPACKET supported */
+
struct virtio_vsock_config {
__le64 guest_cid;
} __attribute__((packed));
@@ -65,6 +68,7 @@ struct virtio_vsock_hdr {
enum virtio_vsock_type {
VIRTIO_VSOCK_TYPE_STREAM = 1,
+ VIRTIO_VSOCK_TYPE_SEQPACKET = 2,
};
enum virtio_vsock_op {
@@ -91,4 +95,9 @@ enum virtio_vsock_shutdown {
VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
};
+/* VIRTIO_VSOCK_OP_RW flags values */
+enum virtio_vsock_rw {
+ VIRTIO_VSOCK_SEQ_EOR = 1,
+};
+
#endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */
diff --git a/include/uapi/linux/wwan.h b/include/uapi/linux/wwan.h
new file mode 100644
index 000000000000..32a2720b4d11
--- /dev/null
+++ b/include/uapi/linux/wwan.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2021 Intel Corporation.
+ */
+#ifndef _UAPI_WWAN_H_
+#define _UAPI_WWAN_H_
+
+enum {
+ IFLA_WWAN_UNSPEC,
+ IFLA_WWAN_LINK_ID, /* u32 */
+
+ __IFLA_WWAN_MAX
+};
+#define IFLA_WWAN_MAX (__IFLA_WWAN_MAX - 1)
+
+#endif /* _UAPI_WWAN_H_ */
diff --git a/init/Kconfig b/init/Kconfig
index 1ea12c64e4c9..55f9f7738ebb 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -83,6 +83,9 @@ config TOOLS_SUPPORT_RELR
config CC_HAS_ASM_INLINE
def_bool $(success,echo 'void foo(void) { asm inline (""); }' | $(CC) -x c - -c -o /dev/null)
+config CC_HAS_NO_PROFILE_FN_ATTR
+ def_bool $(success,echo '__attribute__((no_profile_instrument_function)) int x();' | $(CC) -x c - -c -o /dev/null -Werror)
+
config CONSTRUCTORS
bool
@@ -442,6 +445,7 @@ config AUDITSYSCALL
source "kernel/irq/Kconfig"
source "kernel/time/Kconfig"
+source "kernel/bpf/Kconfig"
source "kernel/Kconfig.preempt"
menu "CPU/Task time and stats accounting"
@@ -1713,46 +1717,6 @@ config KALLSYMS_BASE_RELATIVE
# syscall, maps, verifier
-config BPF_LSM
- bool "LSM Instrumentation with BPF"
- depends on BPF_EVENTS
- depends on BPF_SYSCALL
- depends on SECURITY
- depends on BPF_JIT
- help
- Enables instrumentation of the security hooks with eBPF programs for
- implementing dynamic MAC and Audit Policies.
-
- If you are unsure how to answer this question, answer N.
-
-config BPF_SYSCALL
- bool "Enable bpf() system call"
- select BPF
- select IRQ_WORK
- select TASKS_TRACE_RCU
- select BINARY_PRINTF
- select NET_SOCK_MSG if INET
- default n
- help
- Enable the bpf() system call that allows to manipulate eBPF
- programs and maps via file descriptors.
-
-config ARCH_WANT_DEFAULT_BPF_JIT
- bool
-
-config BPF_JIT_ALWAYS_ON
- bool "Permanently enable BPF JIT and remove BPF interpreter"
- depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
- help
- Enables BPF JIT and removes BPF interpreter to avoid
- speculative execution of BPF instructions by the interpreter
-
-config BPF_JIT_DEFAULT_ON
- def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
- depends on HAVE_EBPF_JIT && BPF_JIT
-
-source "kernel/bpf/preload/Kconfig"
-
config USERFAULTFD
bool "Enable userfaultfd() system call"
depends on MMU
diff --git a/init/do_mounts.c b/init/do_mounts.c
index a78e44ee6adb..74aede860de7 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -133,14 +133,8 @@ static dev_t devt_from_partuuid(const char *uuid_str)
* Attempt to find the requested partition by adding an offset
* to the partition number found by UUID.
*/
- struct block_device *part;
-
- part = bdget_disk(dev_to_disk(dev),
- dev_to_bdev(dev)->bd_partno + offset);
- if (part) {
- devt = part->bd_dev;
- bdput(part);
- }
+ devt = part_devt(dev_to_disk(dev),
+ dev_to_bdev(dev)->bd_partno + offset);
} else {
devt = dev->devt;
}
diff --git a/init/init_task.c b/init/init_task.c
index 8b08c2e19cbb..562f2ef8d157 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -71,7 +71,7 @@ struct task_struct init_task
.thread_info = INIT_THREAD_INFO(init_task),
.stack_refcount = REFCOUNT_INIT(1),
#endif
- .state = 0,
+ .__state = 0,
.stack = init_stack,
.usage = REFCOUNT_INIT(2),
.flags = PF_KTHREAD,
diff --git a/init/main.c b/init/main.c
index eb01e121d2f1..359358500e54 100644
--- a/init/main.c
+++ b/init/main.c
@@ -692,6 +692,7 @@ noinline void __ref rest_init(void)
*/
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ tsk->flags |= PF_NO_SETAFFINITY;
set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
rcu_read_unlock();
@@ -941,11 +942,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
* time - but meanwhile we still have a functioning scheduler.
*/
sched_init();
- /*
- * Disable preemption - early bootup scheduling is extremely
- * fragile until we cpu_idle() for the first time.
- */
- preempt_disable();
+
if (WARN(!irqs_disabled(),
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
@@ -1444,6 +1441,11 @@ static int __ref kernel_init(void *unused)
{
int ret;
+ /*
+ * Wait until kthreadd is all set-up.
+ */
+ wait_for_completion(&kthreadd_done);
+
kernel_init_freeable();
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
@@ -1524,11 +1526,6 @@ void __init console_on_rootfs(void)
static noinline void __init kernel_init_freeable(void)
{
- /*
- * Wait until kthreadd is all set-up.
- */
- wait_for_completion(&kthreadd_done);
-
/* Now the scheduler is fully set up and can do blocking allocations */
gfp_allowed_mask = __GFP_BITS_MASK;
@@ -1537,7 +1534,7 @@ static noinline void __init kernel_init_freeable(void)
*/
set_mems_allowed(node_states[N_MEMORY]);
- cad_pid = task_pid(current);
+ cad_pid = get_pid(task_pid(current));
smp_prepare_cpus(setup_max_cpus);
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 4e4e61111500..5becca9be867 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -144,7 +144,7 @@ struct mqueue_inode_info {
struct pid *notify_owner;
u32 notify_self_exec_id;
struct user_namespace *notify_user_ns;
- struct user_struct *user; /* user who created, for accounting */
+ struct ucounts *ucounts; /* user who created, for accounting */
struct sock *notify_sock;
struct sk_buff *notify_cookie;
@@ -292,7 +292,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
struct ipc_namespace *ipc_ns, umode_t mode,
struct mq_attr *attr)
{
- struct user_struct *u = current_user();
struct inode *inode;
int ret = -ENOMEM;
@@ -321,7 +320,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
info->notify_owner = NULL;
info->notify_user_ns = NULL;
info->qsize = 0;
- info->user = NULL; /* set when all is ok */
+ info->ucounts = NULL; /* set when all is ok */
info->msg_tree = RB_ROOT;
info->msg_tree_rightmost = NULL;
info->node_cache = NULL;
@@ -371,19 +370,23 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
if (mq_bytes + mq_treesize < mq_bytes)
goto out_inode;
mq_bytes += mq_treesize;
- spin_lock(&mq_lock);
- if (u->mq_bytes + mq_bytes < u->mq_bytes ||
- u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
+ info->ucounts = get_ucounts(current_ucounts());
+ if (info->ucounts) {
+ long msgqueue;
+
+ spin_lock(&mq_lock);
+ msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
+ if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) {
+ dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
+ spin_unlock(&mq_lock);
+ put_ucounts(info->ucounts);
+ info->ucounts = NULL;
+ /* mqueue_evict_inode() releases info->messages */
+ ret = -EMFILE;
+ goto out_inode;
+ }
spin_unlock(&mq_lock);
- /* mqueue_evict_inode() releases info->messages */
- ret = -EMFILE;
- goto out_inode;
}
- u->mq_bytes += mq_bytes;
- spin_unlock(&mq_lock);
-
- /* all is ok */
- info->user = get_uid(u);
} else if (S_ISDIR(mode)) {
inc_nlink(inode);
/* Some things misbehave if size == 0 on a directory */
@@ -497,7 +500,6 @@ static void mqueue_free_inode(struct inode *inode)
static void mqueue_evict_inode(struct inode *inode)
{
struct mqueue_inode_info *info;
- struct user_struct *user;
struct ipc_namespace *ipc_ns;
struct msg_msg *msg, *nmsg;
LIST_HEAD(tmp_msg);
@@ -520,8 +522,7 @@ static void mqueue_evict_inode(struct inode *inode)
free_msg(msg);
}
- user = info->user;
- if (user) {
+ if (info->ucounts) {
unsigned long mq_bytes, mq_treesize;
/* Total amount of bytes accounted for the mqueue */
@@ -533,7 +534,7 @@ static void mqueue_evict_inode(struct inode *inode)
info->attr.mq_msgsize);
spin_lock(&mq_lock);
- user->mq_bytes -= mq_bytes;
+ dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
/*
* get_ns_from_inode() ensures that the
* (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
@@ -543,7 +544,8 @@ static void mqueue_evict_inode(struct inode *inode)
if (ipc_ns)
ipc_ns->mq_queues_count--;
spin_unlock(&mq_lock);
- free_uid(user);
+ put_ucounts(info->ucounts);
+ info->ucounts = NULL;
}
if (ipc_ns)
put_ipc_ns(ipc_ns);
diff --git a/ipc/shm.c b/ipc/shm.c
index febd88daba8c..003234fbbd17 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -60,7 +60,7 @@ struct shmid_kernel /* private to the kernel */
time64_t shm_ctim;
struct pid *shm_cprid;
struct pid *shm_lprid;
- struct user_struct *mlock_user;
+ struct ucounts *mlock_ucounts;
/* The task created the shm object. NULL if the task is dead. */
struct task_struct *shm_creator;
@@ -286,10 +286,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
shm_rmid(ns, shp);
shm_unlock(shp);
if (!is_file_hugepages(shm_file))
- shmem_lock(shm_file, 0, shp->mlock_user);
- else if (shp->mlock_user)
+ shmem_lock(shm_file, 0, shp->mlock_ucounts);
+ else if (shp->mlock_ucounts)
user_shm_unlock(i_size_read(file_inode(shm_file)),
- shp->mlock_user);
+ shp->mlock_ucounts);
fput(shm_file);
ipc_update_pid(&shp->shm_cprid, NULL);
ipc_update_pid(&shp->shm_lprid, NULL);
@@ -625,7 +625,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_perm.key = key;
shp->shm_perm.mode = (shmflg & S_IRWXUGO);
- shp->mlock_user = NULL;
+ shp->mlock_ucounts = NULL;
shp->shm_perm.security = NULL;
error = security_shm_alloc(&shp->shm_perm);
@@ -650,7 +650,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if (shmflg & SHM_NORESERVE)
acctflag = VM_NORESERVE;
file = hugetlb_file_setup(name, hugesize, acctflag,
- &shp->mlock_user, HUGETLB_SHMFS_INODE,
+ &shp->mlock_ucounts, HUGETLB_SHMFS_INODE,
(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
} else {
/*
@@ -698,8 +698,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
no_id:
ipc_update_pid(&shp->shm_cprid, NULL);
ipc_update_pid(&shp->shm_lprid, NULL);
- if (is_file_hugepages(file) && shp->mlock_user)
- user_shm_unlock(size, shp->mlock_user);
+ if (is_file_hugepages(file) && shp->mlock_ucounts)
+ user_shm_unlock(size, shp->mlock_ucounts);
fput(file);
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
return error;
@@ -1105,12 +1105,12 @@ static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
goto out_unlock0;
if (cmd == SHM_LOCK) {
- struct user_struct *user = current_user();
+ struct ucounts *ucounts = current_ucounts();
- err = shmem_lock(shm_file, 1, user);
+ err = shmem_lock(shm_file, 1, ucounts);
if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
shp->shm_perm.mode |= SHM_LOCKED;
- shp->mlock_user = user;
+ shp->mlock_ucounts = ucounts;
}
goto out_unlock0;
}
@@ -1118,9 +1118,9 @@ static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
/* SHM_UNLOCK */
if (!(shp->shm_perm.mode & SHM_LOCKED))
goto out_unlock0;
- shmem_lock(shm_file, 0, shp->mlock_user);
+ shmem_lock(shm_file, 0, shp->mlock_ucounts);
shp->shm_perm.mode &= ~SHM_LOCKED;
- shp->mlock_user = NULL;
+ shp->mlock_ucounts = NULL;
get_file(shm_file);
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 416017301660..5876e30c5740 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -99,3 +99,23 @@ config PREEMPT_DYNAMIC
Interesting if you want the same pre-built kernel should be used for
both Server and Desktop workloads.
+
+config SCHED_CORE
+ bool "Core Scheduling for SMT"
+ depends on SCHED_SMT
+ help
+ This option permits Core Scheduling, a means of coordinated task
+ selection across SMT siblings. When enabled -- see
+ prctl(PR_SCHED_CORE) -- task selection ensures that all SMT siblings
+ will execute a task from the same 'core group', forcing idle when no
+ matching task is found.
+
+ Use of this feature includes:
+ - mitigation of some (not all) SMT side channels;
+ - limiting SMT interference to improve determinism and/or performance.
+
+ SCHED_CORE is default disabled. When it is enabled and unused,
+ which is the likely usage by Linux distributions, there should
+ be no measurable impact on performance.
+
+
diff --git a/kernel/audit.h b/kernel/audit.h
index 1522e100fd17..b565ea16c0a5 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* audit -- definition of audit_context structure and supporting types
+/* audit -- definition of audit_context structure and supporting types
*
* Copyright 2003-2004 Red Hat, Inc.
* Copyright 2005 Hewlett-Packard Development Company, L.P.
@@ -21,16 +21,16 @@
a per-task filter. At syscall entry, the audit_state is augmented by
the syscall filter. */
enum audit_state {
- AUDIT_DISABLED, /* Do not create per-task audit_context.
+ AUDIT_STATE_DISABLED, /* Do not create per-task audit_context.
* No syscall-specific audit records can
* be generated. */
- AUDIT_BUILD_CONTEXT, /* Create the per-task audit_context,
+ AUDIT_STATE_BUILD, /* Create the per-task audit_context,
* and fill it in at syscall
* entry time. This makes a full
* syscall record available if some
* other part of the kernel decides it
* should be recorded. */
- AUDIT_RECORD_CONTEXT /* Create the per-task audit_context,
+ AUDIT_STATE_RECORD /* Create the per-task audit_context,
* always fill it in at syscall entry
* time, and always write out the audit
* record at syscall exit time. */
@@ -322,7 +322,7 @@ static inline int audit_signal_info_syscall(struct task_struct *t)
return 0;
}
-#define audit_filter_inodes(t, c) AUDIT_DISABLED
+#define audit_filter_inodes(t, c) AUDIT_STATE_DISABLED
#endif /* CONFIG_AUDITSYSCALL */
extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 6c91902f4f45..b2be4e978ba3 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -689,8 +689,7 @@ void audit_trim_trees(void)
tree = container_of(cursor.next, struct audit_tree, list);
get_tree(tree);
- list_del(&cursor);
- list_add(&cursor, &tree->list);
+ list_move(&cursor, &tree->list);
mutex_unlock(&audit_filter_mutex);
err = kern_path(tree->pathname, 0, &path);
@@ -899,8 +898,7 @@ int audit_tag_tree(char *old, char *new)
tree = container_of(cursor.next, struct audit_tree, list);
get_tree(tree);
- list_del(&cursor);
- list_add(&cursor, &tree->list);
+ list_move(&cursor, &tree->list);
mutex_unlock(&audit_filter_mutex);
err = kern_path(tree->pathname, 0, &path2);
@@ -925,8 +923,7 @@ int audit_tag_tree(char *old, char *new)
mutex_lock(&audit_filter_mutex);
spin_lock(&hash_lock);
if (!tree->goner) {
- list_del(&tree->list);
- list_add(&tree->list, &tree_list);
+ list_move(&tree->list, &tree_list);
}
spin_unlock(&hash_lock);
put_tree(tree);
@@ -937,8 +934,7 @@ int audit_tag_tree(char *old, char *new)
tree = container_of(barrier.prev, struct audit_tree, list);
get_tree(tree);
- list_del(&tree->list);
- list_add(&tree->list, &barrier);
+ list_move(&tree->list, &barrier);
mutex_unlock(&audit_filter_mutex);
if (!failed) {
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 175ef6f3ea4e..8dd73a64f921 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -160,6 +160,7 @@ static const struct audit_nfcfgop_tab audit_nfcfgs[] = {
static int audit_match_perm(struct audit_context *ctx, int mask)
{
unsigned n;
+
if (unlikely(!ctx))
return 0;
n = ctx->major;
@@ -231,7 +232,7 @@ static void audit_set_auditable(struct audit_context *ctx)
{
if (!ctx->prio) {
ctx->prio = 1;
- ctx->current_state = AUDIT_RECORD_CONTEXT;
+ ctx->current_state = AUDIT_STATE_RECORD;
}
}
@@ -239,6 +240,7 @@ static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk)
{
struct audit_tree_refs *p = ctx->trees;
int left = ctx->tree_count;
+
if (likely(left)) {
p->c[--left] = chunk;
ctx->tree_count = left;
@@ -259,6 +261,7 @@ static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk)
static int grow_tree_refs(struct audit_context *ctx)
{
struct audit_tree_refs *p = ctx->trees;
+
ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL);
if (!ctx->trees) {
ctx->trees = p;
@@ -277,6 +280,7 @@ static void unroll_tree_refs(struct audit_context *ctx,
{
struct audit_tree_refs *q;
int n;
+
if (!p) {
/* we started with empty chain */
p = ctx->first_trees;
@@ -303,6 +307,7 @@ static void unroll_tree_refs(struct audit_context *ctx,
static void free_tree_refs(struct audit_context *ctx)
{
struct audit_tree_refs *p, *q;
+
for (p = ctx->first_trees; p; p = q) {
q = p->next;
kfree(p);
@@ -313,6 +318,7 @@ static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
{
struct audit_tree_refs *p;
int n;
+
if (!tree)
return 0;
/* full ones */
@@ -337,13 +343,13 @@ static int audit_compare_uid(kuid_t uid,
{
struct audit_names *n;
int rc;
-
+
if (name) {
rc = audit_uid_comparator(uid, f->op, name->uid);
if (rc)
return rc;
}
-
+
if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
rc = audit_uid_comparator(uid, f->op, n->uid);
@@ -361,13 +367,13 @@ static int audit_compare_gid(kgid_t gid,
{
struct audit_names *n;
int rc;
-
+
if (name) {
rc = audit_gid_comparator(gid, f->op, name->gid);
if (rc)
return rc;
}
-
+
if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
rc = audit_gid_comparator(gid, f->op, n->gid);
@@ -751,10 +757,10 @@ static int audit_filter_rules(struct task_struct *tsk,
}
switch (rule->action) {
case AUDIT_NEVER:
- *state = AUDIT_DISABLED;
+ *state = AUDIT_STATE_DISABLED;
break;
case AUDIT_ALWAYS:
- *state = AUDIT_RECORD_CONTEXT;
+ *state = AUDIT_STATE_RECORD;
break;
}
return 1;
@@ -773,14 +779,14 @@ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) {
if (audit_filter_rules(tsk, &e->rule, NULL, NULL,
&state, true)) {
- if (state == AUDIT_RECORD_CONTEXT)
+ if (state == AUDIT_STATE_RECORD)
*key = kstrdup(e->rule.filterkey, GFP_ATOMIC);
rcu_read_unlock();
return state;
}
}
rcu_read_unlock();
- return AUDIT_BUILD_CONTEXT;
+ return AUDIT_STATE_BUILD;
}
static int audit_in_mask(const struct audit_krule *rule, unsigned long val)
@@ -802,7 +808,7 @@ static int audit_in_mask(const struct audit_krule *rule, unsigned long val)
/* At syscall exit time, this filter is called if the audit_state is
* not low enough that auditing cannot take place, but is also not
* high enough that we already know we have to write an audit record
- * (i.e., the state is AUDIT_SETUP_CONTEXT or AUDIT_BUILD_CONTEXT).
+ * (i.e., the state is AUDIT_STATE_BUILD).
*/
static void audit_filter_syscall(struct task_struct *tsk,
struct audit_context *ctx)
@@ -923,7 +929,7 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state)
if (!context)
return NULL;
context->state = state;
- context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
+ context->prio = state == AUDIT_STATE_RECORD ? ~0ULL : 0;
INIT_LIST_HEAD(&context->killed_trees);
INIT_LIST_HEAD(&context->names_list);
context->fds[0] = -1;
@@ -950,7 +956,7 @@ int audit_alloc(struct task_struct *tsk)
return 0; /* Return if not auditing. */
state = audit_filter_task(tsk, &key);
- if (state == AUDIT_DISABLED) {
+ if (state == AUDIT_STATE_DISABLED) {
clear_task_syscall_work(tsk, SYSCALL_AUDIT);
return 0;
}
@@ -1225,6 +1231,7 @@ static void show_special(struct audit_context *context, int *call_panic)
switch (context->type) {
case AUDIT_SOCKETCALL: {
int nargs = context->socketcall.nargs;
+
audit_log_format(ab, "nargs=%d", nargs);
for (i = 0; i < nargs; i++)
audit_log_format(ab, " a%d=%lx", i,
@@ -1240,6 +1247,7 @@ static void show_special(struct audit_context *context, int *call_panic)
if (osid) {
char *ctx = NULL;
u32 len;
+
if (security_secid_to_secctx(osid, &ctx, &len)) {
audit_log_format(ab, " osid=%u", osid);
*call_panic = 1;
@@ -1289,6 +1297,7 @@ static void show_special(struct audit_context *context, int *call_panic)
break;
case AUDIT_MQ_GETSETATTR: {
struct mq_attr *attr = &context->mq_getsetattr.mqstat;
+
audit_log_format(ab,
"mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld "
"mq_curmsgs=%ld ",
@@ -1325,6 +1334,7 @@ static void show_special(struct audit_context *context, int *call_panic)
static inline int audit_proctitle_rtrim(char *proctitle, int len)
{
char *end = proctitle + len - 1;
+
while (end > proctitle && !isprint(*end))
end--;
@@ -1513,6 +1523,7 @@ static void audit_log_exit(void)
case AUDIT_BPRM_FCAPS: {
struct audit_aux_data_bprm_fcaps *axs = (void *)aux;
+
audit_log_format(ab, "fver=%x", axs->fcap_ver);
audit_log_cap(ab, "fp", &axs->fcap.permitted);
audit_log_cap(ab, "fi", &axs->fcap.inheritable);
@@ -1628,7 +1639,7 @@ void __audit_free(struct task_struct *tsk)
audit_filter_syscall(tsk, context);
audit_filter_inodes(tsk, context);
- if (context->current_state == AUDIT_RECORD_CONTEXT)
+ if (context->current_state == AUDIT_STATE_RECORD)
audit_log_exit();
}
@@ -1647,7 +1658,7 @@ void __audit_free(struct task_struct *tsk)
* Fill in audit context at syscall entry. This only happens if the
* audit context was created when the task was created and the state or
* filters demand the audit context be built. If the state from the
- * per-task filter or from the per-syscall filter is AUDIT_RECORD_CONTEXT,
+ * per-task filter or from the per-syscall filter is AUDIT_STATE_RECORD,
* then the record will be written at syscall exit time (otherwise, it
* will only be written if another part of the kernel requests that it
* be written).
@@ -1664,11 +1675,11 @@ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2,
BUG_ON(context->in_syscall || context->name_count);
state = context->state;
- if (state == AUDIT_DISABLED)
+ if (state == AUDIT_STATE_DISABLED)
return;
context->dummy = !audit_n_rules;
- if (!context->dummy && state == AUDIT_BUILD_CONTEXT) {
+ if (!context->dummy && state == AUDIT_STATE_BUILD) {
context->prio = 0;
if (auditd_test_task(current))
return;
@@ -1693,7 +1704,7 @@ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2,
* @return_code: return value of the syscall
*
* Tear down after system call. If the audit context has been marked as
- * auditable (either because of the AUDIT_RECORD_CONTEXT state from
+ * auditable (either because of the AUDIT_STATE_RECORD state from
* filtering, or because some other part of the kernel wrote an audit
* message), then write out the syscall information. In call cases,
* free the names stored from getname().
@@ -1735,12 +1746,12 @@ void __audit_syscall_exit(int success, long return_code)
audit_filter_syscall(current, context);
audit_filter_inodes(current, context);
- if (context->current_state == AUDIT_RECORD_CONTEXT)
+ if (context->current_state == AUDIT_STATE_RECORD)
audit_log_exit();
}
context->in_syscall = 0;
- context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
+ context->prio = context->state == AUDIT_STATE_RECORD ? ~0ULL : 0;
audit_free_module(context);
audit_free_names(context);
@@ -1753,7 +1764,7 @@ void __audit_syscall_exit(int success, long return_code)
context->sockaddr_len = 0;
context->type = 0;
context->fds[0] = -1;
- if (context->state != AUDIT_RECORD_CONTEXT) {
+ if (context->state != AUDIT_STATE_RECORD) {
kfree(context->filterkey);
context->filterkey = NULL;
}
@@ -1765,6 +1776,7 @@ static inline void handle_one(const struct inode *inode)
struct audit_tree_refs *p;
struct audit_chunk *chunk;
int count;
+
if (likely(!inode->i_fsnotify_marks))
return;
context = audit_context();
@@ -1806,8 +1818,10 @@ retry:
seq = read_seqbegin(&rename_lock);
for(;;) {
struct inode *inode = d_backing_inode(d);
+
if (inode && unlikely(inode->i_fsnotify_marks)) {
struct audit_chunk *chunk;
+
chunk = audit_tree_lookup(inode);
if (chunk) {
if (unlikely(!put_tree_ref(context, chunk))) {
@@ -2203,7 +2217,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
*serial = ctx->serial;
if (!ctx->prio) {
ctx->prio = 1;
- ctx->current_state = AUDIT_RECORD_CONTEXT;
+ ctx->current_state = AUDIT_STATE_RECORD;
}
return 1;
}
@@ -2285,6 +2299,7 @@ void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification)
void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
{
struct audit_context *context = audit_context();
+
context->mq_getsetattr.mqdes = mqdes;
context->mq_getsetattr.mqstat = *mqstat;
context->type = AUDIT_MQ_GETSETATTR;
@@ -2298,6 +2313,7 @@ void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
void __audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
struct audit_context *context = audit_context();
+
context->ipc.uid = ipcp->uid;
context->ipc.gid = ipcp->gid;
context->ipc.mode = ipcp->mode;
@@ -2362,6 +2378,7 @@ int __audit_socketcall(int nargs, unsigned long *args)
void __audit_fd_pair(int fd1, int fd2)
{
struct audit_context *context = audit_context();
+
context->fds[0] = fd1;
context->fds[1] = fd2;
}
@@ -2379,6 +2396,7 @@ int __audit_sockaddr(int len, void *a)
if (!context->sockaddr) {
void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL);
+
if (!p)
return -ENOMEM;
context->sockaddr = p;
@@ -2510,6 +2528,7 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
void __audit_log_capset(const struct cred *new, const struct cred *old)
{
struct audit_context *context = audit_context();
+
context->capset.pid = task_tgid_nr(current);
context->capset.cap.effective = new->cap_effective;
context->capset.cap.inheritable = new->cap_effective;
@@ -2521,6 +2540,7 @@ void __audit_log_capset(const struct cred *new, const struct cred *old)
void __audit_mmap_fd(int fd, int flags)
{
struct audit_context *context = audit_context();
+
context->mmap.fd = fd;
context->mmap.flags = flags;
context->type = AUDIT_MMAP;
@@ -2686,6 +2706,7 @@ void audit_seccomp_actions_logged(const char *names, const char *old_names,
struct list_head *audit_killed_trees(void)
{
struct audit_context *ctx = audit_context();
+
if (likely(!ctx || !ctx->in_syscall))
return NULL;
return &ctx->killed_trees;
diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig
new file mode 100644
index 000000000000..bd04f4a44c01
--- /dev/null
+++ b/kernel/bpf/Kconfig
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# BPF interpreter that, for example, classic socket filters depend on.
+config BPF
+ bool
+
+# Used by archs to tell that they support BPF JIT compiler plus which
+# flavour. Only one of the two can be selected for a specific arch since
+# eBPF JIT supersedes the cBPF JIT.
+
+# Classic BPF JIT (cBPF)
+config HAVE_CBPF_JIT
+ bool
+
+# Extended BPF JIT (eBPF)
+config HAVE_EBPF_JIT
+ bool
+
+# Used by archs to tell that they want the BPF JIT compiler enabled by
+# default for kernels that were compiled with BPF JIT support.
+config ARCH_WANT_DEFAULT_BPF_JIT
+ bool
+
+menu "BPF subsystem"
+
+config BPF_SYSCALL
+ bool "Enable bpf() system call"
+ select BPF
+ select IRQ_WORK
+ select TASKS_TRACE_RCU
+ select BINARY_PRINTF
+ select NET_SOCK_MSG if INET
+ default n
+ help
+ Enable the bpf() system call that allows to manipulate BPF programs
+ and maps via file descriptors.
+
+config BPF_JIT
+ bool "Enable BPF Just In Time compiler"
+ depends on BPF
+ depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
+ depends on MODULES
+ help
+ BPF programs are normally handled by a BPF interpreter. This option
+ allows the kernel to generate native code when a program is loaded
+ into the kernel. This will significantly speed-up processing of BPF
+ programs.
+
+ Note, an admin should enable this feature changing:
+ /proc/sys/net/core/bpf_jit_enable
+ /proc/sys/net/core/bpf_jit_harden (optional)
+ /proc/sys/net/core/bpf_jit_kallsyms (optional)
+
+config BPF_JIT_ALWAYS_ON
+ bool "Permanently enable BPF JIT and remove BPF interpreter"
+ depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
+ help
+ Enables BPF JIT and removes BPF interpreter to avoid speculative
+ execution of BPF instructions by the interpreter.
+
+config BPF_JIT_DEFAULT_ON
+ def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
+ depends on HAVE_EBPF_JIT && BPF_JIT
+
+config BPF_UNPRIV_DEFAULT_OFF
+ bool "Disable unprivileged BPF by default"
+ depends on BPF_SYSCALL
+ help
+ Disables unprivileged BPF by default by setting the corresponding
+ /proc/sys/kernel/unprivileged_bpf_disabled knob to 2. An admin can
+ still reenable it by setting it to 0 later on, or permanently
+ disable it by setting it to 1 (from which no other transition to
+ 0 is possible anymore).
+
+source "kernel/bpf/preload/Kconfig"
+
+config BPF_LSM
+ bool "Enable BPF LSM Instrumentation"
+ depends on BPF_EVENTS
+ depends on BPF_SYSCALL
+ depends on SECURITY
+ depends on BPF_JIT
+ help
+ Enables instrumentation of the security hooks with BPF programs for
+ implementing dynamic MAC and Audit Policies.
+
+ If you are unsure how to answer this question, answer N.
+
+endmenu # "BPF subsystem"
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index 2921ca39a93e..96ceed0e0fb5 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -72,7 +72,7 @@ void bpf_inode_storage_free(struct inode *inode)
return;
}
- /* Netiher the bpf_prog nor the bpf-map's syscall
+ /* Neither the bpf_prog nor the bpf-map's syscall
* could be modifying the local_storage->list now.
* Thus, no elem can be added-to or deleted-from the
* local_storage->list by the bpf_prog or by the bpf-map's syscall.
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 931870f9cf56..2d4fbdbb194e 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -473,15 +473,16 @@ bool bpf_link_is_iter(struct bpf_link *link)
return link->ops == &bpf_iter_link_lops;
}
-int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
+ struct bpf_prog *prog)
{
- union bpf_iter_link_info __user *ulinfo;
struct bpf_link_primer link_primer;
struct bpf_iter_target_info *tinfo;
union bpf_iter_link_info linfo;
struct bpf_iter_link *link;
u32 prog_btf_id, linfo_len;
bool existed = false;
+ bpfptr_t ulinfo;
int err;
if (attr->link_create.target_fd || attr->link_create.flags)
@@ -489,18 +490,18 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
memset(&linfo, 0, sizeof(union bpf_iter_link_info));
- ulinfo = u64_to_user_ptr(attr->link_create.iter_info);
+ ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel);
linfo_len = attr->link_create.iter_info_len;
- if (!ulinfo ^ !linfo_len)
+ if (bpfptr_is_null(ulinfo) ^ !linfo_len)
return -EINVAL;
- if (ulinfo) {
+ if (!bpfptr_is_null(ulinfo)) {
err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
linfo_len);
if (err)
return err;
linfo_len = min_t(u32, linfo_len, sizeof(linfo));
- if (copy_from_user(&linfo, ulinfo, linfo_len))
+ if (copy_from_bpfptr(&linfo, ulinfo, linfo_len))
return -EFAULT;
}
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 5efb2b24012c..06062370c3b8 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -107,10 +107,12 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_inode_storage_get_proto;
case BPF_FUNC_inode_storage_delete:
return &bpf_inode_storage_delete_proto;
+#ifdef CONFIG_NET
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
+#endif /* CONFIG_NET */
case BPF_FUNC_spin_lock:
return &bpf_spin_lock_proto;
case BPF_FUNC_spin_unlock:
@@ -125,7 +127,7 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
/* The set of hooks which are called without pagefaults disabled and are allowed
- * to "sleep" and thus can be used for sleeable BPF programs.
+ * to "sleep" and thus can be used for sleepable BPF programs.
*/
BTF_SET_START(sleepable_lsm_hooks)
BTF_ID(func, bpf_lsm_bpf)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 0600ed325fa0..cb4b72997d9b 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -51,7 +51,7 @@
* The BTF type section contains a list of 'struct btf_type' objects.
* Each one describes a C type. Recall from the above section
* that a 'struct btf_type' object could be immediately followed by extra
- * data in order to desribe some particular C types.
+ * data in order to describe some particular C types.
*
* type_id:
* ~~~~~~~
@@ -1143,7 +1143,7 @@ static void *btf_show_obj_safe(struct btf_show *show,
/*
* We need a new copy to our safe object, either because we haven't
- * yet copied and are intializing safe data, or because the data
+ * yet copied and are initializing safe data, or because the data
* we want falls outside the boundaries of the safe object.
*/
if (!safe) {
@@ -3417,7 +3417,7 @@ static struct btf_kind_operations func_proto_ops = {
* BTF_KIND_FUNC_PROTO cannot be directly referred by
* a struct's member.
*
- * It should be a funciton pointer instead.
+ * It should be a function pointer instead.
* (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
*
* Hence, there is no btf_func_check_member().
@@ -4257,7 +4257,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
return 0;
}
-static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
+static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
u32 log_level, char __user *log_ubuf, u32 log_size)
{
struct btf_verifier_env *env = NULL;
@@ -4306,7 +4306,7 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
btf->data = data;
btf->data_size = btf_data_size;
- if (copy_from_user(data, btf_data, btf_data_size)) {
+ if (copy_from_bpfptr(data, btf_data, btf_data_size)) {
err = -EFAULT;
goto errout;
}
@@ -5206,6 +5206,12 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
m->ret_size = ret;
for (i = 0; i < nargs; i++) {
+ if (i == nargs - 1 && args[i].type == 0) {
+ bpf_log(log,
+ "The function %s with variable args is unsupported.\n",
+ tname);
+ return -EINVAL;
+ }
ret = __get_type_size(btf, args[i].type, &t);
if (ret < 0) {
bpf_log(log,
@@ -5213,6 +5219,12 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
return -EINVAL;
}
+ if (ret == 0) {
+ bpf_log(log,
+ "The function %s has malformed void argument.\n",
+ tname);
+ return -EINVAL;
+ }
m->arg_size[i] = ret;
}
m->nr_args = nargs;
@@ -5780,12 +5792,12 @@ static int __btf_new_fd(struct btf *btf)
return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
}
-int btf_new_fd(const union bpf_attr *attr)
+int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
{
struct btf *btf;
int ret;
- btf = btf_parse(u64_to_user_ptr(attr->btf),
+ btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel),
attr->btf_size, attr->btf_log_level,
u64_to_user_ptr(attr->btf_log_buf),
attr->btf_log_size);
@@ -6085,3 +6097,65 @@ struct module *btf_try_get_module(const struct btf *btf)
return res;
}
+
+BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
+{
+ struct btf *btf;
+ long ret;
+
+ if (flags)
+ return -EINVAL;
+
+ if (name_sz <= 1 || name[name_sz - 1])
+ return -EINVAL;
+
+ btf = bpf_get_btf_vmlinux();
+ if (IS_ERR(btf))
+ return PTR_ERR(btf);
+
+ ret = btf_find_by_name_kind(btf, name, kind);
+ /* ret is never zero, since btf_find_by_name_kind returns
+ * positive btf_id or negative error.
+ */
+ if (ret < 0) {
+ struct btf *mod_btf;
+ int id;
+
+ /* If name is not found in vmlinux's BTF then search in module's BTFs */
+ spin_lock_bh(&btf_idr_lock);
+ idr_for_each_entry(&btf_idr, mod_btf, id) {
+ if (!btf_is_module(mod_btf))
+ continue;
+ /* linear search could be slow hence unlock/lock
+ * the IDR to avoiding holding it for too long
+ */
+ btf_get(mod_btf);
+ spin_unlock_bh(&btf_idr_lock);
+ ret = btf_find_by_name_kind(mod_btf, name, kind);
+ if (ret > 0) {
+ int btf_obj_fd;
+
+ btf_obj_fd = __btf_new_fd(mod_btf);
+ if (btf_obj_fd < 0) {
+ btf_put(mod_btf);
+ return btf_obj_fd;
+ }
+ return ret | (((u64)btf_obj_fd) << 32);
+ }
+ spin_lock_bh(&btf_idr_lock);
+ btf_put(mod_btf);
+ }
+ spin_unlock_bh(&btf_idr_lock);
+ }
+ return ret;
+}
+
+const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
+ .func = bpf_btf_find_by_name_kind,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+};
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5e31ee9f7512..034ad93a1ad7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1392,29 +1392,54 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
select_insn:
goto *jumptable[insn->code];
- /* ALU */
-#define ALU(OPCODE, OP) \
- ALU64_##OPCODE##_X: \
- DST = DST OP SRC; \
- CONT; \
- ALU_##OPCODE##_X: \
- DST = (u32) DST OP (u32) SRC; \
- CONT; \
- ALU64_##OPCODE##_K: \
- DST = DST OP IMM; \
- CONT; \
- ALU_##OPCODE##_K: \
- DST = (u32) DST OP (u32) IMM; \
+ /* Explicitly mask the register-based shift amounts with 63 or 31
+ * to avoid undefined behavior. Normally this won't affect the
+ * generated code, for example, in case of native 64 bit archs such
+ * as x86-64 or arm64, the compiler is optimizing the AND away for
+ * the interpreter. In case of JITs, each of the JIT backends compiles
+ * the BPF shift operations to machine instructions which produce
+ * implementation-defined results in such a case; the resulting
+ * contents of the register may be arbitrary, but program behaviour
+ * as a whole remains defined. In other words, in case of JIT backends,
+ * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
+ */
+ /* ALU (shifts) */
+#define SHT(OPCODE, OP) \
+ ALU64_##OPCODE##_X: \
+ DST = DST OP (SRC & 63); \
+ CONT; \
+ ALU_##OPCODE##_X: \
+ DST = (u32) DST OP ((u32) SRC & 31); \
+ CONT; \
+ ALU64_##OPCODE##_K: \
+ DST = DST OP IMM; \
+ CONT; \
+ ALU_##OPCODE##_K: \
+ DST = (u32) DST OP (u32) IMM; \
+ CONT;
+ /* ALU (rest) */
+#define ALU(OPCODE, OP) \
+ ALU64_##OPCODE##_X: \
+ DST = DST OP SRC; \
+ CONT; \
+ ALU_##OPCODE##_X: \
+ DST = (u32) DST OP (u32) SRC; \
+ CONT; \
+ ALU64_##OPCODE##_K: \
+ DST = DST OP IMM; \
+ CONT; \
+ ALU_##OPCODE##_K: \
+ DST = (u32) DST OP (u32) IMM; \
CONT;
-
ALU(ADD, +)
ALU(SUB, -)
ALU(AND, &)
ALU(OR, |)
- ALU(LSH, <<)
- ALU(RSH, >>)
ALU(XOR, ^)
ALU(MUL, *)
+ SHT(LSH, <<)
+ SHT(RSH, >>)
+#undef SHT
#undef ALU
ALU_NEG:
DST = (u32) -DST;
@@ -1439,13 +1464,13 @@ select_insn:
insn++;
CONT;
ALU_ARSH_X:
- DST = (u64) (u32) (((s32) DST) >> SRC);
+ DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
CONT;
ALU_ARSH_K:
DST = (u64) (u32) (((s32) DST) >> IMM);
CONT;
ALU64_ARSH_X:
- (*(s64 *) &DST) >>= SRC;
+ (*(s64 *) &DST) >>= (SRC & 63);
CONT;
ALU64_ARSH_K:
(*(s64 *) &DST) >>= IMM;
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 5dd3e866599a..480e936c54d0 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -74,7 +74,7 @@ struct bpf_cpu_map_entry {
struct bpf_cpu_map {
struct bpf_map map;
/* Below members specific for map type */
- struct bpf_cpu_map_entry **cpu_map;
+ struct bpf_cpu_map_entry __rcu **cpu_map;
};
static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
@@ -469,7 +469,7 @@ static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
{
struct bpf_cpu_map_entry *old_rcpu;
- old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
+ old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu)));
if (old_rcpu) {
call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
@@ -551,7 +551,7 @@ static void cpu_map_free(struct bpf_map *map)
for (i = 0; i < cmap->map.max_entries; i++) {
struct bpf_cpu_map_entry *rcpu;
- rcpu = READ_ONCE(cmap->cpu_map[i]);
+ rcpu = rcu_dereference_raw(cmap->cpu_map[i]);
if (!rcpu)
continue;
@@ -562,6 +562,10 @@ static void cpu_map_free(struct bpf_map *map)
kfree(cmap);
}
+/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
+ * by local_bh_disable() (from XDP calls inside NAPI). The
+ * rcu_read_lock_bh_held() below makes lockdep accept both.
+ */
static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
@@ -570,7 +574,8 @@ static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
if (key >= map->max_entries)
return NULL;
- rcpu = READ_ONCE(cmap->cpu_map[key]);
+ rcpu = rcu_dereference_check(cmap->cpu_map[key],
+ rcu_read_lock_bh_held());
return rcpu;
}
@@ -601,7 +606,8 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
{
- return __bpf_xdp_redirect_map(map, ifindex, flags, __cpu_map_lookup_elem);
+ return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
+ __cpu_map_lookup_elem);
}
static int cpu_map_btf_id;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index aa516472ce46..2546dafd6672 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -57,6 +57,7 @@ struct xdp_dev_bulk_queue {
struct list_head flush_node;
struct net_device *dev;
struct net_device *dev_rx;
+ struct bpf_prog *xdp_prog;
unsigned int count;
};
@@ -72,7 +73,7 @@ struct bpf_dtab_netdev {
struct bpf_dtab {
struct bpf_map map;
- struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
+ struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
struct list_head list;
/* these are only used for DEVMAP_HASH type maps */
@@ -92,7 +93,7 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries,
int i;
struct hlist_head *hash;
- hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
+ hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
if (hash != NULL)
for (i = 0; i < entries; i++)
INIT_HLIST_HEAD(&hash[i]);
@@ -143,7 +144,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
spin_lock_init(&dtab->index_lock);
} else {
- dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
+ dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
@@ -197,6 +198,7 @@ static void dev_map_free(struct bpf_map *map)
list_del_rcu(&dtab->list);
spin_unlock(&dev_map_lock);
+ bpf_clear_redirect_map(map);
synchronize_rcu();
/* Make sure prior __dev_map_entry_free() have completed. */
@@ -224,7 +226,7 @@ static void dev_map_free(struct bpf_map *map)
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
- dev = dtab->netdev_map[i];
+ dev = rcu_dereference_raw(dtab->netdev_map[i]);
if (!dev)
continue;
@@ -257,6 +259,10 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
+/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
+ * by local_bh_disable() (from XDP calls inside NAPI). The
+ * rcu_read_lock_bh_held() below makes lockdep accept both.
+ */
static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
@@ -326,22 +332,69 @@ bool dev_map_can_have_prog(struct bpf_map *map)
return false;
}
+static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
+ struct xdp_frame **frames, int n,
+ struct net_device *dev)
+{
+ struct xdp_txq_info txq = { .dev = dev };
+ struct xdp_buff xdp;
+ int i, nframes = 0;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ u32 act;
+ int err;
+
+ xdp_convert_frame_to_buff(xdpf, &xdp);
+ xdp.txq = &txq;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ err = xdp_update_frame_from_buff(&xdp, xdpf);
+ if (unlikely(err < 0))
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ frames[nframes++] = xdpf;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ xdp_return_frame_rx_napi(xdpf);
+ break;
+ }
+ }
+ return nframes; /* sent frames count */
+}
+
static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
{
struct net_device *dev = bq->dev;
+ unsigned int cnt = bq->count;
int sent = 0, err = 0;
+ int to_send = cnt;
int i;
- if (unlikely(!bq->count))
+ if (unlikely(!cnt))
return;
- for (i = 0; i < bq->count; i++) {
+ for (i = 0; i < cnt; i++) {
struct xdp_frame *xdpf = bq->q[i];
prefetch(xdpf);
}
- sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
+ if (bq->xdp_prog) {
+ to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
+ if (!to_send)
+ goto out;
+ }
+
+ sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
if (sent < 0) {
/* If ndo_xdp_xmit fails with an errno, no frames have
* been xmit'ed.
@@ -353,37 +406,34 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
/* If not all frames have been transmitted, it is our
* responsibility to free them
*/
- for (i = sent; unlikely(i < bq->count); i++)
+ for (i = sent; unlikely(i < to_send); i++)
xdp_return_frame_rx_napi(bq->q[i]);
- trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err);
- bq->dev_rx = NULL;
+out:
bq->count = 0;
- __list_del_clearprev(&bq->flush_node);
-}
-
-/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
- * from the driver before returning from its napi->poll() routine. The poll()
- * routine is called either from busy_poll context or net_rx_action signaled
- * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
- * net device can be torn down. On devmap tear down we ensure the flush list
- * is empty before completing to ensure all flush operations have completed.
- * When drivers update the bpf program they may need to ensure any flush ops
- * are also complete. Using synchronize_rcu or call_rcu will suffice for this
- * because both wait for napi context to exit.
+ trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
+}
+
+/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
+ * driver before returning from its napi->poll() routine. See the comment above
+ * xdp_do_flush() in filter.c.
*/
void __dev_flush(void)
{
struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
struct xdp_dev_bulk_queue *bq, *tmp;
- list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
bq_xmit_all(bq, XDP_XMIT_FLUSH);
+ bq->dev_rx = NULL;
+ bq->xdp_prog = NULL;
+ __list_del_clearprev(&bq->flush_node);
+ }
}
-/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
- * update happens in parallel here a dev_put wont happen until after reading the
- * ifindex.
+/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
+ * by local_bh_disable() (from XDP calls inside NAPI). The
+ * rcu_read_lock_bh_held() below makes lockdep accept both.
*/
static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
{
@@ -393,15 +443,17 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
if (key >= map->max_entries)
return NULL;
- obj = READ_ONCE(dtab->netdev_map[key]);
+ obj = rcu_dereference_check(dtab->netdev_map[key],
+ rcu_read_lock_bh_held());
return obj;
}
-/* Runs under RCU-read-side, plus in softirq under NAPI protection.
- * Thus, safe percpu variable access.
+/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
+ * variable access, and map elements stick around. See comment above
+ * xdp_do_flush() in filter.c.
*/
static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
- struct net_device *dev_rx)
+ struct net_device *dev_rx, struct bpf_prog *xdp_prog)
{
struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
@@ -412,18 +464,22 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
/* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed
* from net_device drivers NAPI func end.
+ *
+ * Do the same with xdp_prog and flush_list since these fields
+ * are only ever modified together.
*/
- if (!bq->dev_rx)
+ if (!bq->dev_rx) {
bq->dev_rx = dev_rx;
+ bq->xdp_prog = xdp_prog;
+ list_add(&bq->flush_node, flush_list);
+ }
bq->q[bq->count++] = xdpf;
-
- if (!bq->flush_node.prev)
- list_add(&bq->flush_node, flush_list);
}
static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
- struct net_device *dev_rx)
+ struct net_device *dev_rx,
+ struct bpf_prog *xdp_prog)
{
struct xdp_frame *xdpf;
int err;
@@ -439,55 +495,115 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
if (unlikely(!xdpf))
return -EOVERFLOW;
- bq_enqueue(dev, xdpf, dev_rx);
+ bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
return 0;
}
-static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
- struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+ struct net_device *dev_rx)
{
- struct xdp_txq_info txq = { .dev = dev };
- u32 act;
+ return __xdp_enqueue(dev, xdp, dev_rx, NULL);
+}
- xdp_set_data_meta_invalid(xdp);
- xdp->txq = &txq;
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+ struct net_device *dev_rx)
+{
+ struct net_device *dev = dst->dev;
- act = bpf_prog_run_xdp(xdp_prog, xdp);
- switch (act) {
- case XDP_PASS:
- return xdp;
- case XDP_DROP:
- break;
- default:
- bpf_warn_invalid_xdp_action(act);
- fallthrough;
- case XDP_ABORTED:
- trace_xdp_exception(dev, xdp_prog, act);
- break;
- }
+ return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
+}
- xdp_return_buff(xdp);
- return NULL;
+static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp,
+ int exclude_ifindex)
+{
+ if (!obj || obj->dev->ifindex == exclude_ifindex ||
+ !obj->dev->netdev_ops->ndo_xdp_xmit)
+ return false;
+
+ if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data))
+ return false;
+
+ return true;
}
-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
- struct net_device *dev_rx)
+static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
+ struct net_device *dev_rx,
+ struct xdp_frame *xdpf)
{
- return __xdp_enqueue(dev, xdp, dev_rx);
+ struct xdp_frame *nxdpf;
+
+ nxdpf = xdpf_clone(xdpf);
+ if (!nxdpf)
+ return -ENOMEM;
+
+ bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
+
+ return 0;
}
-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
- struct net_device *dev_rx)
+int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress)
{
- struct net_device *dev = dst->dev;
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0;
+ struct bpf_dtab_netdev *dst, *last_dst = NULL;
+ struct hlist_head *head;
+ struct xdp_frame *xdpf;
+ unsigned int i;
+ int err;
- if (dst->xdp_prog) {
- xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog);
- if (!xdp)
- return 0;
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return -EOVERFLOW;
+
+ if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
+ for (i = 0; i < map->max_entries; i++) {
+ dst = READ_ONCE(dtab->netdev_map[i]);
+ if (!is_valid_dst(dst, xdp, exclude_ifindex))
+ continue;
+
+ /* we only need n-1 clones; last_dst enqueued below */
+ if (!last_dst) {
+ last_dst = dst;
+ continue;
+ }
+
+ err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
+ if (err)
+ return err;
+
+ last_dst = dst;
+ }
+ } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
+ for (i = 0; i < dtab->n_buckets; i++) {
+ head = dev_map_index_hash(dtab, i);
+ hlist_for_each_entry_rcu(dst, head, index_hlist,
+ lockdep_is_held(&dtab->index_lock)) {
+ if (!is_valid_dst(dst, xdp, exclude_ifindex))
+ continue;
+
+ /* we only need n-1 clones; last_dst enqueued below */
+ if (!last_dst) {
+ last_dst = dst;
+ continue;
+ }
+
+ err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
+ if (err)
+ return err;
+
+ last_dst = dst;
+ }
+ }
}
- return __xdp_enqueue(dev, xdp, dev_rx);
+
+ /* consume the last copy of the frame */
+ if (last_dst)
+ bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
+ else
+ xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
+
+ return 0;
}
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
@@ -504,6 +620,87 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
return 0;
}
+static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
+ struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ struct sk_buff *nskb;
+ int err;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ err = dev_map_generic_redirect(dst, nskb, xdp_prog);
+ if (unlikely(err)) {
+ consume_skb(nskb);
+ return err;
+ }
+
+ return 0;
+}
+
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ int exclude_ifindex = exclude_ingress ? dev->ifindex : 0;
+ struct bpf_dtab_netdev *dst, *last_dst = NULL;
+ struct hlist_head *head;
+ struct hlist_node *next;
+ unsigned int i;
+ int err;
+
+ if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
+ for (i = 0; i < map->max_entries; i++) {
+ dst = READ_ONCE(dtab->netdev_map[i]);
+ if (!dst || dst->dev->ifindex == exclude_ifindex)
+ continue;
+
+ /* we only need n-1 clones; last_dst enqueued below */
+ if (!last_dst) {
+ last_dst = dst;
+ continue;
+ }
+
+ err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
+ if (err)
+ return err;
+
+ last_dst = dst;
+ }
+ } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
+ for (i = 0; i < dtab->n_buckets; i++) {
+ head = dev_map_index_hash(dtab, i);
+ hlist_for_each_entry_safe(dst, next, head, index_hlist) {
+ if (!dst || dst->dev->ifindex == exclude_ifindex)
+ continue;
+
+ /* we only need n-1 clones; last_dst enqueued below */
+ if (!last_dst) {
+ last_dst = dst;
+ continue;
+ }
+
+ err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
+ if (err)
+ return err;
+
+ last_dst = dst;
+ }
+ }
+ }
+
+ /* consume the first skb and return */
+ if (last_dst)
+ return dev_map_generic_redirect(last_dst, skb, xdp_prog);
+
+ /* dtab is empty */
+ consume_skb(skb);
+ return 0;
+}
+
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
@@ -538,14 +735,7 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
if (k >= map->max_entries)
return -EINVAL;
- /* Use call_rcu() here to ensure any rcu critical sections have
- * completed as well as any flush operations because call_rcu
- * will wait for preempt-disable region to complete, NAPI in this
- * context. And additionally, the driver tear down ensures all
- * soft irqs are complete before removing the net device in the
- * case of dev_put equals zero.
- */
- old_dev = xchg(&dtab->netdev_map[k], NULL);
+ old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
if (old_dev)
call_rcu(&old_dev->rcu, __dev_map_entry_free);
return 0;
@@ -654,7 +844,7 @@ static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
* Remembering the driver side flush operation will happen before the
* net device is removed.
*/
- old_dev = xchg(&dtab->netdev_map[i], dev);
+ old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
if (old_dev)
call_rcu(&old_dev->rcu, __dev_map_entry_free);
@@ -730,12 +920,16 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
{
- return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem);
+ return __bpf_xdp_redirect_map(map, ifindex, flags,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
+ __dev_map_lookup_elem);
}
static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
{
- return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem);
+ return __bpf_xdp_redirect_map(map, ifindex, flags,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
+ __dev_map_hash_lookup_elem);
}
static int dev_map_btf_id;
@@ -830,10 +1024,10 @@ static int dev_map_notification(struct notifier_block *notifier,
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev, *odev;
- dev = READ_ONCE(dtab->netdev_map[i]);
+ dev = rcu_dereference(dtab->netdev_map[i]);
if (!dev || netdev != dev->dev)
continue;
- odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
+ odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
if (dev == odev)
call_rcu(&dev->rcu,
__dev_map_entry_free);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index d7ebb12ffffc..72c58cc516a3 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -46,12 +46,12 @@
* events, kprobes and tracing to be invoked before the prior invocation
* from one of these contexts completed. sys_bpf() uses the same mechanism
* by pinning the task to the current CPU and incrementing the recursion
- * protection accross the map operation.
+ * protection across the map operation.
*
* This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
* operations like memory allocations (even with GFP_ATOMIC) from atomic
* contexts. This is required because even with GFP_ATOMIC the memory
- * allocator calls into code pathes which acquire locks with long held lock
+ * allocator calls into code paths which acquire locks with long held lock
* sections. To ensure the deterministic behaviour these locks are regular
* spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
* true atomic contexts on an RT kernel are the low level hardware
@@ -596,7 +596,8 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
struct htab_elem *l;
u32 hash, key_size;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
key_size = map->key_size;
@@ -989,7 +990,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
key_size = map->key_size;
@@ -1082,7 +1084,8 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
key_size = map->key_size;
@@ -1148,7 +1151,8 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
key_size = map->key_size;
@@ -1202,7 +1206,8 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
key_size = map->key_size;
@@ -1276,7 +1281,8 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
u32 hash, key_size;
int ret;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
key_size = map->key_size;
@@ -1311,7 +1317,8 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
u32 hash, key_size;
int ret;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
key_size = map->key_size;
@@ -1401,6 +1408,100 @@ static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
+static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
+ void *value, bool is_lru_map,
+ bool is_percpu, u64 flags)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct hlist_nulls_head *head;
+ unsigned long bflags;
+ struct htab_elem *l;
+ u32 hash, key_size;
+ struct bucket *b;
+ int ret;
+
+ key_size = map->key_size;
+
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
+ b = __select_bucket(htab, hash);
+ head = &b->head;
+
+ ret = htab_lock_bucket(htab, b, hash, &bflags);
+ if (ret)
+ return ret;
+
+ l = lookup_elem_raw(head, hash, key, key_size);
+ if (!l) {
+ ret = -ENOENT;
+ } else {
+ if (is_percpu) {
+ u32 roundup_value_size = round_up(map->value_size, 8);
+ void __percpu *pptr;
+ int off = 0, cpu;
+
+ pptr = htab_elem_get_ptr(l, key_size);
+ for_each_possible_cpu(cpu) {
+ bpf_long_memcpy(value + off,
+ per_cpu_ptr(pptr, cpu),
+ roundup_value_size);
+ off += roundup_value_size;
+ }
+ } else {
+ u32 roundup_key_size = round_up(map->key_size, 8);
+
+ if (flags & BPF_F_LOCK)
+ copy_map_value_locked(map, value, l->key +
+ roundup_key_size,
+ true);
+ else
+ copy_map_value(map, value, l->key +
+ roundup_key_size);
+ check_and_init_map_lock(map, value);
+ }
+
+ hlist_nulls_del_rcu(&l->hash_node);
+ if (!is_lru_map)
+ free_htab_elem(htab, l);
+ }
+
+ htab_unlock_bucket(htab, b, hash, bflags);
+
+ if (is_lru_map && l)
+ bpf_lru_push_free(&htab->lru, &l->lru_node);
+
+ return ret;
+}
+
+static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
+ flags);
+}
+
+static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
+ void *key, void *value,
+ u64 flags)
+{
+ return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
+ flags);
+}
+
+static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
+ flags);
+}
+
+static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
+ void *key, void *value,
+ u64 flags)
+{
+ return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
+ flags);
+}
+
static int
__htab_map_lookup_and_delete_batch(struct bpf_map *map,
const union bpf_attr *attr,
@@ -1934,6 +2035,7 @@ const struct bpf_map_ops htab_map_ops = {
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
.map_lookup_elem = htab_map_lookup_elem,
+ .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
.map_update_elem = htab_map_update_elem,
.map_delete_elem = htab_map_delete_elem,
.map_gen_lookup = htab_map_gen_lookup,
@@ -1954,6 +2056,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
.map_lookup_elem = htab_lru_map_lookup_elem,
+ .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
.map_update_elem = htab_lru_map_update_elem,
.map_delete_elem = htab_lru_map_delete_elem,
@@ -2077,6 +2180,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
.map_lookup_elem = htab_percpu_map_lookup_elem,
+ .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
.map_update_elem = htab_percpu_map_update_elem,
.map_delete_elem = htab_map_delete_elem,
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
@@ -2096,6 +2200,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
+ .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
.map_update_elem = htab_lru_percpu_map_update_elem,
.map_delete_elem = htab_lru_map_delete_elem,
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 544773970dbc..62cf00383910 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -14,6 +14,7 @@
#include <linux/jiffies.h>
#include <linux/pid_namespace.h>
#include <linux/proc_ns.h>
+#include <linux/security.h>
#include "../../lib/kstrtox.h"
@@ -28,7 +29,7 @@
*/
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
return (unsigned long) map->ops->map_lookup_elem(map, key);
}
@@ -44,7 +45,7 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
void *, value, u64, flags)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
return map->ops->map_update_elem(map, key, value, flags);
}
@@ -61,7 +62,7 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
return map->ops->map_delete_elem(map, key);
}
@@ -692,38 +693,41 @@ static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
return -EINVAL;
}
-/* Per-cpu temp buffers which can be used by printf-like helpers for %s or %p
+/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
+ * arguments representation.
*/
-#define MAX_PRINTF_BUF_LEN 512
+#define MAX_BPRINTF_BUF_LEN 512
-struct bpf_printf_buf {
- char tmp_buf[MAX_PRINTF_BUF_LEN];
+/* Support executing three nested bprintf helper calls on a given CPU */
+#define MAX_BPRINTF_NEST_LEVEL 3
+struct bpf_bprintf_buffers {
+ char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
};
-static DEFINE_PER_CPU(struct bpf_printf_buf, bpf_printf_buf);
-static DEFINE_PER_CPU(int, bpf_printf_buf_used);
+static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
+static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
static int try_get_fmt_tmp_buf(char **tmp_buf)
{
- struct bpf_printf_buf *bufs;
- int used;
+ struct bpf_bprintf_buffers *bufs;
+ int nest_level;
preempt_disable();
- used = this_cpu_inc_return(bpf_printf_buf_used);
- if (WARN_ON_ONCE(used > 1)) {
- this_cpu_dec(bpf_printf_buf_used);
+ nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
+ if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
+ this_cpu_dec(bpf_bprintf_nest_level);
preempt_enable();
return -EBUSY;
}
- bufs = this_cpu_ptr(&bpf_printf_buf);
- *tmp_buf = bufs->tmp_buf;
+ bufs = this_cpu_ptr(&bpf_bprintf_bufs);
+ *tmp_buf = bufs->tmp_bufs[nest_level - 1];
return 0;
}
void bpf_bprintf_cleanup(void)
{
- if (this_cpu_read(bpf_printf_buf_used)) {
- this_cpu_dec(bpf_printf_buf_used);
+ if (this_cpu_read(bpf_bprintf_nest_level)) {
+ this_cpu_dec(bpf_bprintf_nest_level);
preempt_enable();
}
}
@@ -760,7 +764,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
return -EBUSY;
- tmp_buf_end = tmp_buf + MAX_PRINTF_BUF_LEN;
+ tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
*bin_args = (u32 *)tmp_buf;
}
@@ -1066,11 +1070,13 @@ bpf_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
- return &bpf_probe_read_kernel_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
- return &bpf_probe_read_kernel_str_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_kernel_str_proto;
case BPF_FUNC_snprintf_btf:
return &bpf_snprintf_btf_proto;
case BPF_FUNC_snprintf:
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index b4ebd60a6c16..80da1db47c68 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -543,7 +543,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
return PTR_ERR(raw);
if (type == BPF_TYPE_PROG)
- ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw);
+ ret = bpf_prog_new_fd(raw);
else if (type == BPF_TYPE_MAP)
ret = bpf_map_new_fd(raw, f_flags);
else if (type == BPF_TYPE_LINK)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 1b7b8a6f34ee..423549d2c52e 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -232,7 +232,8 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
/* Start walking the trie from the root node ... */
- for (node = rcu_dereference(trie->root); node;) {
+ for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held());
+ node;) {
unsigned int next_bit;
size_t matchlen;
@@ -264,7 +265,8 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
* traverse down.
*/
next_bit = extract_bit(key->data, node->prefixlen);
- node = rcu_dereference(node->child[next_bit]);
+ node = rcu_dereference_check(node->child[next_bit],
+ rcu_read_lock_bh_held());
}
if (!found)
diff --git a/kernel/bpf/preload/iterators/iterators.bpf.c b/kernel/bpf/preload/iterators/iterators.bpf.c
index 52aa7b38e8b8..03af863314ea 100644
--- a/kernel/bpf/preload/iterators/iterators.bpf.c
+++ b/kernel/bpf/preload/iterators/iterators.bpf.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2020 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 4838922f723d..93a55391791a 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -102,7 +102,7 @@ static void reuseport_array_free(struct bpf_map *map)
/*
* ops->map_*_elem() will not be able to access this
* array now. Hence, this function only races with
- * bpf_sk_reuseport_detach() which was triggerred by
+ * bpf_sk_reuseport_detach() which was triggered by
* close() or disconnect().
*
* This function and bpf_sk_reuseport_detach() are
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index f25b719ac786..9e0c10c6892a 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -8,6 +8,7 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/poll.h>
+#include <linux/kmemleak.h>
#include <uapi/linux/btf.h>
#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
@@ -105,6 +106,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
VM_ALLOC | VM_USERMAP, PAGE_KERNEL);
if (rb) {
+ kmemleak_not_leak(pages);
rb->pages = pages;
rb->nr_pages = nr_pages;
return rb;
@@ -221,25 +223,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
return -ENOTSUPP;
}
-static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
-{
- size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
-
- /* consumer page + producer page + 2 x data pages */
- return RINGBUF_POS_PAGES + 2 * data_pages;
-}
-
static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
{
struct bpf_ringbuf_map *rb_map;
- size_t mmap_sz;
rb_map = container_of(map, struct bpf_ringbuf_map, map);
- mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
-
- if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
- return -EINVAL;
+ if (vma->vm_flags & VM_WRITE) {
+ /* allow writable mapping for the consumer_pos only */
+ if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EPERM;
+ } else {
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+ /* remap_vmalloc_range() checks size and offset constraints */
return remap_vmalloc_range(vma, rb_map->rb,
vma->vm_pgoff + RINGBUF_PGOFF);
}
@@ -315,6 +312,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
return NULL;
len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
+ if (len > rb->mask + 1)
+ return NULL;
+
cons_pos = smp_load_acquire(&rb->consumer_pos);
if (in_nmi()) {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 941ca06d9dfa..e343f158e556 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -50,7 +50,8 @@ static DEFINE_SPINLOCK(map_idr_lock);
static DEFINE_IDR(link_idr);
static DEFINE_SPINLOCK(link_idr_lock);
-int sysctl_unprivileged_bpf_disabled __read_mostly;
+int sysctl_unprivileged_bpf_disabled __read_mostly =
+ IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
@@ -72,11 +73,10 @@ static const struct bpf_map_ops * const bpf_map_types[] = {
* copy_from_user() call. However, this is not a concern since this function is
* meant to be a future-proofing of bits.
*/
-int bpf_check_uarg_tail_zero(void __user *uaddr,
+int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
size_t expected_size,
size_t actual_size)
{
- unsigned char __user *addr = uaddr + expected_size;
int res;
if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
@@ -85,7 +85,12 @@ int bpf_check_uarg_tail_zero(void __user *uaddr,
if (actual_size <= expected_size)
return 0;
- res = check_zeroed_user(addr, actual_size - expected_size);
+ if (uaddr.is_kernel)
+ res = memchr_inv(uaddr.kernel + expected_size, 0,
+ actual_size - expected_size) == NULL;
+ else
+ res = check_zeroed_user(uaddr.user + expected_size,
+ actual_size - expected_size);
if (res < 0)
return res;
return res ? 0 : -E2BIG;
@@ -1004,6 +1009,17 @@ static void *__bpf_copy_key(void __user *ukey, u64 key_size)
return NULL;
}
+static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
+{
+ if (key_size)
+ return memdup_bpfptr(ukey, key_size);
+
+ if (!bpfptr_is_null(ukey))
+ return ERR_PTR(-EINVAL);
+
+ return NULL;
+}
+
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
@@ -1074,10 +1090,10 @@ err_put:
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
-static int map_update_elem(union bpf_attr *attr)
+static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
{
- void __user *ukey = u64_to_user_ptr(attr->key);
- void __user *uvalue = u64_to_user_ptr(attr->value);
+ bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
+ bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
@@ -1103,7 +1119,7 @@ static int map_update_elem(union bpf_attr *attr)
goto err_put;
}
- key = __bpf_copy_key(ukey, map->key_size);
+ key = ___bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
@@ -1123,7 +1139,7 @@ static int map_update_elem(union bpf_attr *attr)
goto free_key;
err = -EFAULT;
- if (copy_from_user(value, uvalue, value_size) != 0)
+ if (copy_from_bpfptr(value, uvalue, value_size) != 0)
goto free_value;
err = bpf_map_update_value(map, f, key, value, attr->flags);
@@ -1468,7 +1484,7 @@ free_buf:
return err;
}
-#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
+#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
static int map_lookup_and_delete_elem(union bpf_attr *attr)
{
@@ -1484,6 +1500,9 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
return -EINVAL;
+ if (attr->flags & ~BPF_F_LOCK)
+ return -EINVAL;
+
f = fdget(ufd);
map = __bpf_map_get(f);
if (IS_ERR(map))
@@ -1494,24 +1513,47 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
goto err_put;
}
+ if (attr->flags &&
+ (map->map_type == BPF_MAP_TYPE_QUEUE ||
+ map->map_type == BPF_MAP_TYPE_STACK)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ if ((attr->flags & BPF_F_LOCK) &&
+ !map_value_has_spin_lock(map)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
key = __bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
}
- value_size = map->value_size;
+ value_size = bpf_map_value_size(map);
err = -ENOMEM;
value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
if (!value)
goto free_key;
+ err = -ENOTSUPP;
if (map->map_type == BPF_MAP_TYPE_QUEUE ||
map->map_type == BPF_MAP_TYPE_STACK) {
err = map->ops->map_pop_elem(map, value);
- } else {
- err = -ENOTSUPP;
+ } else if (map->map_type == BPF_MAP_TYPE_HASH ||
+ map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+ if (!bpf_map_is_dev_bound(map)) {
+ bpf_disable_instrumentation();
+ rcu_read_lock();
+ err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
+ rcu_read_unlock();
+ bpf_enable_instrumentation();
+ }
}
if (err)
@@ -1931,6 +1973,11 @@ static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
attr->expected_attach_type =
BPF_CGROUP_INET_SOCK_CREATE;
break;
+ case BPF_PROG_TYPE_SK_REUSEPORT:
+ if (!attr->expected_attach_type)
+ attr->expected_attach_type =
+ BPF_SK_REUSEPORT_SELECT;
+ break;
}
}
@@ -2014,6 +2061,15 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
if (expected_attach_type == BPF_SK_LOOKUP)
return 0;
return -EINVAL;
+ case BPF_PROG_TYPE_SK_REUSEPORT:
+ switch (expected_attach_type) {
+ case BPF_SK_REUSEPORT_SELECT:
+ case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ case BPF_PROG_TYPE_SYSCALL:
case BPF_PROG_TYPE_EXT:
if (expected_attach_type)
return -EINVAL;
@@ -2073,9 +2129,9 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
}
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
+#define BPF_PROG_LOAD_LAST_FIELD fd_array
-static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
+static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
{
enum bpf_prog_type type = attr->prog_type;
struct bpf_prog *prog, *dst_prog = NULL;
@@ -2100,8 +2156,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
return -EPERM;
/* copy eBPF program license from user space */
- if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
- sizeof(license) - 1) < 0)
+ if (strncpy_from_bpfptr(license,
+ make_bpfptr(attr->license, uattr.is_kernel),
+ sizeof(license) - 1) < 0)
return -EFAULT;
license[sizeof(license) - 1] = 0;
@@ -2185,8 +2242,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
prog->len = attr->insn_cnt;
err = -EFAULT;
- if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
- bpf_prog_insn_size(prog)) != 0)
+ if (copy_from_bpfptr(prog->insns,
+ make_bpfptr(attr->insns, uattr.is_kernel),
+ bpf_prog_insn_size(prog)) != 0)
goto free_prog_sec;
prog->orig_prog = NULL;
@@ -3422,7 +3480,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
u32 ulen;
int err;
- err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
+ err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
if (err)
return err;
info_len = min_t(u32, sizeof(info), info_len);
@@ -3701,7 +3759,7 @@ static int bpf_map_get_info_by_fd(struct file *file,
u32 info_len = attr->info.info_len;
int err;
- err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
+ err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
if (err)
return err;
info_len = min_t(u32, sizeof(info), info_len);
@@ -3744,7 +3802,7 @@ static int bpf_btf_get_info_by_fd(struct file *file,
u32 info_len = attr->info.info_len;
int err;
- err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
+ err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
if (err)
return err;
@@ -3761,7 +3819,7 @@ static int bpf_link_get_info_by_fd(struct file *file,
u32 info_len = attr->info.info_len;
int err;
- err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
+ err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
if (err)
return err;
info_len = min_t(u32, sizeof(info), info_len);
@@ -3824,7 +3882,7 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
-static int bpf_btf_load(const union bpf_attr *attr)
+static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
{
if (CHECK_ATTR(BPF_BTF_LOAD))
return -EINVAL;
@@ -3832,7 +3890,7 @@ static int bpf_btf_load(const union bpf_attr *attr)
if (!bpf_capable())
return -EPERM;
- return btf_new_fd(attr);
+ return btf_new_fd(attr, uattr);
}
#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
@@ -4022,13 +4080,14 @@ err_put:
return err;
}
-static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
+ struct bpf_prog *prog)
{
if (attr->link_create.attach_type != prog->expected_attach_type)
return -EINVAL;
if (prog->expected_attach_type == BPF_TRACE_ITER)
- return bpf_iter_link_attach(attr, prog);
+ return bpf_iter_link_attach(attr, uattr, prog);
else if (prog->type == BPF_PROG_TYPE_EXT)
return bpf_tracing_prog_attach(prog,
attr->link_create.target_fd,
@@ -4037,7 +4096,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *
}
#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
-static int link_create(union bpf_attr *attr)
+static int link_create(union bpf_attr *attr, bpfptr_t uattr)
{
enum bpf_prog_type ptype;
struct bpf_prog *prog;
@@ -4056,7 +4115,7 @@ static int link_create(union bpf_attr *attr)
goto out;
if (prog->type == BPF_PROG_TYPE_EXT) {
- ret = tracing_bpf_link_attach(attr, prog);
+ ret = tracing_bpf_link_attach(attr, uattr, prog);
goto out;
}
@@ -4077,7 +4136,7 @@ static int link_create(union bpf_attr *attr)
ret = cgroup_bpf_link_attach(attr, prog);
break;
case BPF_PROG_TYPE_TRACING:
- ret = tracing_bpf_link_attach(attr, prog);
+ ret = tracing_bpf_link_attach(attr, uattr, prog);
break;
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_SK_LOOKUP:
@@ -4365,7 +4424,7 @@ out_prog_put:
return ret;
}
-SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
+static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
{
union bpf_attr attr;
int err;
@@ -4380,7 +4439,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
memset(&attr, 0, sizeof(attr));
- if (copy_from_user(&attr, uattr, size) != 0)
+ if (copy_from_bpfptr(&attr, uattr, size) != 0)
return -EFAULT;
err = security_bpf(cmd, &attr, size);
@@ -4395,7 +4454,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = map_lookup_elem(&attr);
break;
case BPF_MAP_UPDATE_ELEM:
- err = map_update_elem(&attr);
+ err = map_update_elem(&attr, uattr);
break;
case BPF_MAP_DELETE_ELEM:
err = map_delete_elem(&attr);
@@ -4422,21 +4481,21 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = bpf_prog_detach(&attr);
break;
case BPF_PROG_QUERY:
- err = bpf_prog_query(&attr, uattr);
+ err = bpf_prog_query(&attr, uattr.user);
break;
case BPF_PROG_TEST_RUN:
- err = bpf_prog_test_run(&attr, uattr);
+ err = bpf_prog_test_run(&attr, uattr.user);
break;
case BPF_PROG_GET_NEXT_ID:
- err = bpf_obj_get_next_id(&attr, uattr,
+ err = bpf_obj_get_next_id(&attr, uattr.user,
&prog_idr, &prog_idr_lock);
break;
case BPF_MAP_GET_NEXT_ID:
- err = bpf_obj_get_next_id(&attr, uattr,
+ err = bpf_obj_get_next_id(&attr, uattr.user,
&map_idr, &map_idr_lock);
break;
case BPF_BTF_GET_NEXT_ID:
- err = bpf_obj_get_next_id(&attr, uattr,
+ err = bpf_obj_get_next_id(&attr, uattr.user,
&btf_idr, &btf_idr_lock);
break;
case BPF_PROG_GET_FD_BY_ID:
@@ -4446,38 +4505,38 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = bpf_map_get_fd_by_id(&attr);
break;
case BPF_OBJ_GET_INFO_BY_FD:
- err = bpf_obj_get_info_by_fd(&attr, uattr);
+ err = bpf_obj_get_info_by_fd(&attr, uattr.user);
break;
case BPF_RAW_TRACEPOINT_OPEN:
err = bpf_raw_tracepoint_open(&attr);
break;
case BPF_BTF_LOAD:
- err = bpf_btf_load(&attr);
+ err = bpf_btf_load(&attr, uattr);
break;
case BPF_BTF_GET_FD_BY_ID:
err = bpf_btf_get_fd_by_id(&attr);
break;
case BPF_TASK_FD_QUERY:
- err = bpf_task_fd_query(&attr, uattr);
+ err = bpf_task_fd_query(&attr, uattr.user);
break;
case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
err = map_lookup_and_delete_elem(&attr);
break;
case BPF_MAP_LOOKUP_BATCH:
- err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
+ err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
break;
case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
- err = bpf_map_do_batch(&attr, uattr,
+ err = bpf_map_do_batch(&attr, uattr.user,
BPF_MAP_LOOKUP_AND_DELETE_BATCH);
break;
case BPF_MAP_UPDATE_BATCH:
- err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
+ err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
break;
case BPF_MAP_DELETE_BATCH:
- err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
+ err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
break;
case BPF_LINK_CREATE:
- err = link_create(&attr);
+ err = link_create(&attr, uattr);
break;
case BPF_LINK_UPDATE:
err = link_update(&attr);
@@ -4486,7 +4545,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = bpf_link_get_fd_by_id(&attr);
break;
case BPF_LINK_GET_NEXT_ID:
- err = bpf_obj_get_next_id(&attr, uattr,
+ err = bpf_obj_get_next_id(&attr, uattr.user,
&link_idr, &link_idr_lock);
break;
case BPF_ENABLE_STATS:
@@ -4508,3 +4567,94 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
return err;
}
+
+SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
+{
+ return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
+}
+
+static bool syscall_prog_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (off < 0 || off >= U16_MAX)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
+}
+
+BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size)
+{
+ switch (cmd) {
+ case BPF_MAP_CREATE:
+ case BPF_MAP_UPDATE_ELEM:
+ case BPF_MAP_FREEZE:
+ case BPF_PROG_LOAD:
+ case BPF_BTF_LOAD:
+ break;
+ /* case BPF_PROG_TEST_RUN:
+ * is not part of this list to prevent recursive test_run
+ */
+ default:
+ return -EINVAL;
+ }
+ return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
+}
+
+static const struct bpf_func_proto bpf_sys_bpf_proto = {
+ .func = bpf_sys_bpf,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_ANYTHING,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+};
+
+const struct bpf_func_proto * __weak
+tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return bpf_base_func_proto(func_id);
+}
+
+BPF_CALL_1(bpf_sys_close, u32, fd)
+{
+ /* When bpf program calls this helper there should not be
+ * an fdget() without matching completed fdput().
+ * This helper is allowed in the following callchain only:
+ * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
+ */
+ return close_fd(fd);
+}
+
+static const struct bpf_func_proto bpf_sys_close_proto = {
+ .func = bpf_sys_close,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *
+syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_sys_bpf:
+ return &bpf_sys_bpf_proto;
+ case BPF_FUNC_btf_find_by_name_kind:
+ return &bpf_btf_find_by_name_kind_proto;
+ case BPF_FUNC_sys_close:
+ return &bpf_sys_close_proto;
+ default:
+ return tracing_prog_func_proto(func_id, prog);
+ }
+}
+
+const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
+ .get_func_proto = syscall_prog_func_proto,
+ .is_valid_access = syscall_prog_is_valid_access,
+};
+
+const struct bpf_prog_ops bpf_syscall_prog_ops = {
+ .test_run = bpf_prog_test_run_syscall,
+};
diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
index ceac5281bd31..3d7127f439a1 100644
--- a/kernel/bpf/tnum.c
+++ b/kernel/bpf/tnum.c
@@ -111,28 +111,31 @@ struct tnum tnum_xor(struct tnum a, struct tnum b)
return TNUM(v & ~mu, mu);
}
-/* half-multiply add: acc += (unknown * mask * value).
- * An intermediate step in the multiply algorithm.
+/* Generate partial products by multiplying each bit in the multiplier (tnum a)
+ * with the multiplicand (tnum b), and add the partial products after
+ * appropriately bit-shifting them. Instead of directly performing tnum addition
+ * on the generated partial products, equivalenty, decompose each partial
+ * product into two tnums, consisting of the value-sum (acc_v) and the
+ * mask-sum (acc_m) and then perform tnum addition on them. The following paper
+ * explains the algorithm in more detail: https://arxiv.org/abs/2105.05398.
*/
-static struct tnum hma(struct tnum acc, u64 value, u64 mask)
-{
- while (mask) {
- if (mask & 1)
- acc = tnum_add(acc, TNUM(0, value));
- mask >>= 1;
- value <<= 1;
- }
- return acc;
-}
-
struct tnum tnum_mul(struct tnum a, struct tnum b)
{
- struct tnum acc;
- u64 pi;
-
- pi = a.value * b.value;
- acc = hma(TNUM(pi, 0), a.mask, b.mask | b.value);
- return hma(acc, b.mask, a.value);
+ u64 acc_v = a.value * b.value;
+ struct tnum acc_m = TNUM(0, 0);
+
+ while (a.value || a.mask) {
+ /* LSB of tnum a is a certain 1 */
+ if (a.value & 1)
+ acc_m = tnum_add(acc_m, TNUM(0, b.mask));
+ /* LSB of tnum a is uncertain */
+ else if (a.mask & 1)
+ acc_m = tnum_add(acc_m, TNUM(0, b.value | b.mask));
+ /* Note: no case for LSB is certain 0 */
+ a = tnum_rshift(a, 1);
+ b = tnum_lshift(b, 1);
+ }
+ return tnum_add(TNUM(acc_v, 0), acc_m);
}
/* Note that if a and b disagree - i.e. one has a 'known 1' where the other has
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 2d44b5aa0057..28a3630c48ee 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -552,7 +552,7 @@ static void notrace inc_misses_counter(struct bpf_prog *prog)
* __bpf_prog_enter returns:
* 0 - skip execution of the bpf prog
* 1 - execute bpf prog
- * [2..MAX_U64] - excute bpf prog and record execution time.
+ * [2..MAX_U64] - execute bpf prog and record execution time.
* This is start time.
*/
u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 757476c91c98..be38bb930bf1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -47,7 +47,7 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
* - unreachable insns exist (shouldn't be a forest. program = one function)
* - out of bounds or malformed jumps
* The second pass is all possible path descent from the 1st insn.
- * Since it's analyzing all pathes through the program, the length of the
+ * Since it's analyzing all paths through the program, the length of the
* analysis is limited to 64k insn, which may be hit even if total number of
* insn is less then 4K, but there are too many branches that change stack/regs.
* Number of 'branches to be analyzed' is limited to 1k
@@ -132,7 +132,7 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
* If it's ok, then verifier allows this BPF_CALL insn and looks at
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
- * returns ether pointer to map value or NULL.
+ * returns either pointer to map value or NULL.
*
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
* insn, the register holding that pointer in the true branch changes state to
@@ -737,81 +737,104 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose(env, "\n");
}
-#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
-static int copy_##NAME##_state(struct bpf_func_state *dst, \
- const struct bpf_func_state *src) \
-{ \
- if (!src->FIELD) \
- return 0; \
- if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
- /* internal bug, make state invalid to reject the program */ \
- memset(dst, 0, sizeof(*dst)); \
- return -EFAULT; \
- } \
- memcpy(dst->FIELD, src->FIELD, \
- sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
- return 0; \
-}
-/* copy_reference_state() */
-COPY_STATE_FN(reference, acquired_refs, refs, 1)
-/* copy_stack_state() */
-COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
-#undef COPY_STATE_FN
-
-#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
-static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
- bool copy_old) \
-{ \
- u32 old_size = state->COUNT; \
- struct bpf_##NAME##_state *new_##FIELD; \
- int slot = size / SIZE; \
- \
- if (size <= old_size || !size) { \
- if (copy_old) \
- return 0; \
- state->COUNT = slot * SIZE; \
- if (!size && old_size) { \
- kfree(state->FIELD); \
- state->FIELD = NULL; \
- } \
- return 0; \
- } \
- new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
- GFP_KERNEL); \
- if (!new_##FIELD) \
- return -ENOMEM; \
- if (copy_old) { \
- if (state->FIELD) \
- memcpy(new_##FIELD, state->FIELD, \
- sizeof(*new_##FIELD) * (old_size / SIZE)); \
- memset(new_##FIELD + old_size / SIZE, 0, \
- sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
- } \
- state->COUNT = slot * SIZE; \
- kfree(state->FIELD); \
- state->FIELD = new_##FIELD; \
- return 0; \
-}
-/* realloc_reference_state() */
-REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
-/* realloc_stack_state() */
-REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
-#undef REALLOC_STATE_FN
-
-/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
- * make it consume minimal amount of memory. check_stack_write() access from
- * the program calls into realloc_func_state() to grow the stack size.
- * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
- * which realloc_stack_state() copies over. It points to previous
- * bpf_verifier_state which is never reallocated.
+/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
+ * small to hold src. This is different from krealloc since we don't want to preserve
+ * the contents of dst.
+ *
+ * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
+ * not be allocated.
*/
-static int realloc_func_state(struct bpf_func_state *state, int stack_size,
- int refs_size, bool copy_old)
+static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
{
- int err = realloc_reference_state(state, refs_size, copy_old);
- if (err)
- return err;
- return realloc_stack_state(state, stack_size, copy_old);
+ size_t bytes;
+
+ if (ZERO_OR_NULL_PTR(src))
+ goto out;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ if (ksize(dst) < bytes) {
+ kfree(dst);
+ dst = kmalloc_track_caller(bytes, flags);
+ if (!dst)
+ return NULL;
+ }
+
+ memcpy(dst, src, bytes);
+out:
+ return dst ? dst : ZERO_SIZE_PTR;
+}
+
+/* resize an array from old_n items to new_n items. the array is reallocated if it's too
+ * small to hold new_n items. new items are zeroed out if the array grows.
+ *
+ * Contrary to krealloc_array, does not free arr if new_n is zero.
+ */
+static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
+{
+ if (!new_n || old_n == new_n)
+ goto out;
+
+ arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
+ if (!arr)
+ return NULL;
+
+ if (new_n > old_n)
+ memset(arr + old_n * size, 0, (new_n - old_n) * size);
+
+out:
+ return arr ? arr : ZERO_SIZE_PTR;
+}
+
+static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
+{
+ dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
+ sizeof(struct bpf_reference_state), GFP_KERNEL);
+ if (!dst->refs)
+ return -ENOMEM;
+
+ dst->acquired_refs = src->acquired_refs;
+ return 0;
+}
+
+static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
+{
+ size_t n = src->allocated_stack / BPF_REG_SIZE;
+
+ dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
+ GFP_KERNEL);
+ if (!dst->stack)
+ return -ENOMEM;
+
+ dst->allocated_stack = src->allocated_stack;
+ return 0;
+}
+
+static int resize_reference_state(struct bpf_func_state *state, size_t n)
+{
+ state->refs = realloc_array(state->refs, state->acquired_refs, n,
+ sizeof(struct bpf_reference_state));
+ if (!state->refs)
+ return -ENOMEM;
+
+ state->acquired_refs = n;
+ return 0;
+}
+
+static int grow_stack_state(struct bpf_func_state *state, int size)
+{
+ size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
+
+ if (old_n >= n)
+ return 0;
+
+ state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
+ if (!state->stack)
+ return -ENOMEM;
+
+ state->allocated_stack = size;
+ return 0;
}
/* Acquire a pointer id from the env and update the state->refs to include
@@ -825,7 +848,7 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
int new_ofs = state->acquired_refs;
int id, err;
- err = realloc_reference_state(state, state->acquired_refs + 1, true);
+ err = resize_reference_state(state, state->acquired_refs + 1);
if (err)
return err;
id = ++env->id_gen;
@@ -854,18 +877,6 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id)
return -EINVAL;
}
-static int transfer_reference_state(struct bpf_func_state *dst,
- struct bpf_func_state *src)
-{
- int err = realloc_reference_state(dst, src->acquired_refs, false);
- if (err)
- return err;
- err = copy_reference_state(dst, src);
- if (err)
- return err;
- return 0;
-}
-
static void free_func_state(struct bpf_func_state *state)
{
if (!state)
@@ -904,10 +915,6 @@ static int copy_func_state(struct bpf_func_state *dst,
{
int err;
- err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
- false);
- if (err)
- return err;
memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
err = copy_reference_state(dst, src);
if (err)
@@ -919,16 +926,13 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
const struct bpf_verifier_state *src)
{
struct bpf_func_state *dst;
- u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
int i, err;
- if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
- kfree(dst_state->jmp_history);
- dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
- if (!dst_state->jmp_history)
- return -ENOMEM;
- }
- memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
+ dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
+ src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
+ GFP_USER);
+ if (!dst_state->jmp_history)
+ return -ENOMEM;
dst_state->jmp_history_cnt = src->jmp_history_cnt;
/* if dst has more stack frames then src frame, free them */
@@ -2590,8 +2594,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
struct bpf_reg_state *reg = NULL;
- err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
- state->acquired_refs, true);
+ err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -2613,7 +2616,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
if (dst_reg != BPF_REG_FP) {
/* The backtracking logic can only recognize explicit
* stack slot address like [fp - 8]. Other spill of
- * scalar via different register has to be conervative.
+ * scalar via different register has to be conservative.
* Backtrack from here and mark all registers as precise
* that contributed into 'reg' being a constant.
*/
@@ -2753,8 +2756,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
if (value_reg && register_is_null(value_reg))
writing_zero = true;
- err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE),
- state->acquired_refs, true);
+ err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
if (err)
return err;
@@ -5629,7 +5631,7 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
subprog /* subprog number within this prog */);
/* Transfer references to the callee */
- err = transfer_reference_state(callee, caller);
+ err = copy_reference_state(callee, caller);
if (err)
return err;
@@ -5780,7 +5782,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
}
/* Transfer references to the caller */
- err = transfer_reference_state(caller, callee);
+ err = copy_reference_state(caller, callee);
if (err)
return err;
@@ -6409,18 +6411,10 @@ enum {
};
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
- const struct bpf_reg_state *off_reg,
- u32 *alu_limit, u8 opcode)
+ u32 *alu_limit, bool mask_to_left)
{
- bool off_is_neg = off_reg->smin_value < 0;
- bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
- (opcode == BPF_SUB && !off_is_neg);
u32 max = 0, ptr_limit = 0;
- if (!tnum_is_const(off_reg->var_off) &&
- (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
- return REASON_BOUNDS;
-
switch (ptr_reg->type) {
case PTR_TO_STACK:
/* Offset 0 is out-of-bounds, but acceptable start for the
@@ -6486,15 +6480,41 @@ static bool sanitize_needed(u8 opcode)
return opcode == BPF_ADD || opcode == BPF_SUB;
}
+struct bpf_sanitize_info {
+ struct bpf_insn_aux_data aux;
+ bool mask_to_left;
+};
+
+static struct bpf_verifier_state *
+sanitize_speculative_path(struct bpf_verifier_env *env,
+ const struct bpf_insn *insn,
+ u32 next_idx, u32 curr_idx)
+{
+ struct bpf_verifier_state *branch;
+ struct bpf_reg_state *regs;
+
+ branch = push_stack(env, next_idx, curr_idx, true);
+ if (branch && insn) {
+ regs = branch->frame[branch->curframe]->regs;
+ if (BPF_SRC(insn->code) == BPF_K) {
+ mark_reg_unknown(env, regs, insn->dst_reg);
+ } else if (BPF_SRC(insn->code) == BPF_X) {
+ mark_reg_unknown(env, regs, insn->dst_reg);
+ mark_reg_unknown(env, regs, insn->src_reg);
+ }
+ }
+ return branch;
+}
+
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg,
struct bpf_reg_state *dst_reg,
- struct bpf_insn_aux_data *tmp_aux,
+ struct bpf_sanitize_info *info,
const bool commit_window)
{
- struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
struct bpf_verifier_state *vstate = env->cur_state;
bool off_is_imm = tnum_is_const(off_reg->var_off);
bool off_is_neg = off_reg->smin_value < 0;
@@ -6515,7 +6535,16 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
if (vstate->speculative)
goto do_sim;
- err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+ if (!commit_window) {
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+ return REASON_BOUNDS;
+
+ info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+ }
+
+ err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
if (err < 0)
return err;
@@ -6523,8 +6552,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
/* In commit phase we narrow the masking window based on
* the observed pointer move after the simulated operation.
*/
- alu_state = tmp_aux->alu_state;
- alu_limit = abs(tmp_aux->alu_limit - alu_limit);
+ alu_state = info->aux.alu_state;
+ alu_limit = abs(info->aux.alu_limit - alu_limit);
} else {
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
@@ -6539,8 +6568,12 @@ do_sim:
/* If we're in commit phase, we're done here given we already
* pushed the truncated dst_reg into the speculative verification
* stack.
+ *
+ * Also, when register is a known constant, we rewrite register-based
+ * operation to immediate-based, and thus do not need masking (and as
+ * a consequence, do not need to simulate the zero-truncation either).
*/
- if (commit_window)
+ if (commit_window || off_is_imm)
return 0;
/* Simulate and find potential out-of-bounds access under
@@ -6556,12 +6589,26 @@ do_sim:
tmp = *dst_reg;
*dst_reg = *ptr_reg;
}
- ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+ ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
+ env->insn_idx);
if (!ptr_is_dst_reg && ret)
*dst_reg = tmp;
return !ret ? REASON_STACK : 0;
}
+static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *vstate = env->cur_state;
+
+ /* If we simulate paths under speculation, we don't update the
+ * insn as 'seen' such that when we verify unreachable paths in
+ * the non-speculative domain, sanitize_dead_code() can still
+ * rewrite/sanitize them.
+ */
+ if (!vstate->speculative)
+ env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+}
+
static int sanitize_err(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int reason,
const struct bpf_reg_state *off_reg,
@@ -6685,7 +6732,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
- struct bpf_insn_aux_data tmp_aux = {};
+ struct bpf_sanitize_info info = {};
u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
int ret;
@@ -6754,7 +6801,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
if (sanitize_needed(opcode)) {
ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
- &tmp_aux, false);
+ &info, false);
if (ret < 0)
return sanitize_err(env, insn, ret, off_reg, dst_reg);
}
@@ -6895,7 +6942,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
return -EACCES;
if (sanitize_needed(opcode)) {
ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
- &tmp_aux, true);
+ &info, true);
if (ret < 0)
return sanitize_err(env, insn, ret, off_reg, dst_reg);
}
@@ -7084,11 +7131,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
s32 smin_val = src_reg->s32_min_value;
u32 umax_val = src_reg->u32_max_value;
- /* Assuming scalar64_min_max_and will be called so its safe
- * to skip updating register for known 32-bit case.
- */
- if (src_known && dst_known)
+ if (src_known && dst_known) {
+ __mark_reg32_known(dst_reg, var32_off.value);
return;
+ }
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
@@ -7108,7 +7154,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
dst_reg->s32_min_value = dst_reg->u32_min_value;
dst_reg->s32_max_value = dst_reg->u32_max_value;
}
-
}
static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
@@ -7155,11 +7200,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
s32 smin_val = src_reg->s32_min_value;
u32 umin_val = src_reg->u32_min_value;
- /* Assuming scalar64_min_max_or will be called so it is safe
- * to skip updating register for known case.
- */
- if (src_known && dst_known)
+ if (src_known && dst_known) {
+ __mark_reg32_known(dst_reg, var32_off.value);
return;
+ }
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
@@ -7224,11 +7268,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
struct tnum var32_off = tnum_subreg(dst_reg->var_off);
s32 smin_val = src_reg->s32_min_value;
- /* Assuming scalar64_min_max_xor will be called so it is safe
- * to skip updating register for known case.
- */
- if (src_known && dst_known)
+ if (src_known && dst_known) {
+ __mark_reg32_known(dst_reg, var32_off.value);
return;
+ }
/* We get both minimum and maximum from the var32_off. */
dst_reg->u32_min_value = var32_off.value;
@@ -8744,14 +8787,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (err)
return err;
}
+
if (pred == 1) {
- /* only follow the goto, ignore fall-through */
+ /* Only follow the goto, ignore fall-through. If needed, push
+ * the fall-through branch for simulation under speculative
+ * execution.
+ */
+ if (!env->bypass_spec_v1 &&
+ !sanitize_speculative_path(env, insn, *insn_idx + 1,
+ *insn_idx))
+ return -EFAULT;
*insn_idx += insn->off;
return 0;
} else if (pred == 0) {
- /* only follow fall-through branch, since
- * that's where the program will go
+ /* Only follow the fall-through branch, since that's where the
+ * program will go. If needed, push the goto branch for
+ * simulation under speculative execution.
*/
+ if (!env->bypass_spec_v1 &&
+ !sanitize_speculative_path(env, insn,
+ *insn_idx + insn->off + 1,
+ *insn_idx))
+ return -EFAULT;
return 0;
}
@@ -8913,12 +8970,14 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
mark_reg_known_zero(env, regs, insn->dst_reg);
dst_reg->map_ptr = map;
- if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
+ if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
+ insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
dst_reg->type = PTR_TO_MAP_VALUE;
dst_reg->off = aux->map_off;
if (map_value_has_spin_lock(map))
dst_reg->id = ++env->id_gen;
- } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
+ } else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
+ insn->src_reg == BPF_PSEUDO_MAP_IDX) {
dst_reg->type = CONST_PTR_TO_MAP;
} else {
verbose(env, "bpf verifier is misconfigured\n");
@@ -9049,7 +9108,7 @@ static int check_return_code(struct bpf_verifier_env *env)
!prog->aux->attach_func_proto->type)
return 0;
- /* eBPF calling convetion is such that R0 is used
+ /* eBPF calling convention is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
* of bpf_exit, which means that program wrote
@@ -9434,7 +9493,7 @@ static int check_abnormal_return(struct bpf_verifier_env *env)
static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ bpfptr_t uattr)
{
const struct btf_type *type, *func_proto, *ret_type;
u32 i, nfuncs, urec_size, min_size;
@@ -9443,7 +9502,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
struct bpf_func_info_aux *info_aux = NULL;
struct bpf_prog *prog;
const struct btf *btf;
- void __user *urecord;
+ bpfptr_t urecord;
u32 prev_offset = 0;
bool scalar_return;
int ret = -ENOMEM;
@@ -9471,7 +9530,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
prog = env->prog;
btf = prog->aux->btf;
- urecord = u64_to_user_ptr(attr->func_info);
+ urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
min_size = min_t(u32, krec_size, urec_size);
krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
@@ -9489,13 +9548,15 @@ static int check_btf_func(struct bpf_verifier_env *env,
/* set the size kernel expects so loader can zero
* out the rest of the record.
*/
- if (put_user(min_size, &uattr->func_info_rec_size))
+ if (copy_to_bpfptr_offset(uattr,
+ offsetof(union bpf_attr, func_info_rec_size),
+ &min_size, sizeof(min_size)))
ret = -EFAULT;
}
goto err_free;
}
- if (copy_from_user(&krecord[i], urecord, min_size)) {
+ if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
ret = -EFAULT;
goto err_free;
}
@@ -9547,7 +9608,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
}
prev_offset = krecord[i].insn_off;
- urecord += urec_size;
+ bpfptr_add(&urecord, urec_size);
}
prog->aux->func_info = krecord;
@@ -9579,14 +9640,14 @@ static void adjust_btf_func(struct bpf_verifier_env *env)
static int check_btf_line(struct bpf_verifier_env *env,
const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ bpfptr_t uattr)
{
u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
struct bpf_subprog_info *sub;
struct bpf_line_info *linfo;
struct bpf_prog *prog;
const struct btf *btf;
- void __user *ulinfo;
+ bpfptr_t ulinfo;
int err;
nr_linfo = attr->line_info_cnt;
@@ -9612,7 +9673,7 @@ static int check_btf_line(struct bpf_verifier_env *env,
s = 0;
sub = env->subprog_info;
- ulinfo = u64_to_user_ptr(attr->line_info);
+ ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
expected_size = sizeof(struct bpf_line_info);
ncopy = min_t(u32, expected_size, rec_size);
for (i = 0; i < nr_linfo; i++) {
@@ -9620,14 +9681,15 @@ static int check_btf_line(struct bpf_verifier_env *env,
if (err) {
if (err == -E2BIG) {
verbose(env, "nonzero tailing record in line_info");
- if (put_user(expected_size,
- &uattr->line_info_rec_size))
+ if (copy_to_bpfptr_offset(uattr,
+ offsetof(union bpf_attr, line_info_rec_size),
+ &expected_size, sizeof(expected_size)))
err = -EFAULT;
}
goto err_free;
}
- if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
+ if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
err = -EFAULT;
goto err_free;
}
@@ -9679,7 +9741,7 @@ static int check_btf_line(struct bpf_verifier_env *env,
}
prev_offset = linfo[i].insn_off;
- ulinfo += rec_size;
+ bpfptr_add(&ulinfo, rec_size);
}
if (s != env->subprog_cnt) {
@@ -9701,7 +9763,7 @@ err_free:
static int check_btf_info(struct bpf_verifier_env *env,
const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+ bpfptr_t uattr)
{
struct btf *btf;
int err;
@@ -9746,13 +9808,6 @@ static bool range_within(struct bpf_reg_state *old,
old->s32_max_value >= cur->s32_max_value;
}
-/* Maximum number of register states that can exist at once */
-#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
-struct idpair {
- u32 old;
- u32 cur;
-};
-
/* If in the old state two registers had the same id, then they need to have
* the same id in the new state as well. But that id could be different from
* the old state, so we need to track the mapping from old to new ids.
@@ -9763,11 +9818,11 @@ struct idpair {
* So we look through our idmap to see if this old id has been seen before. If
* so, we require the new id to match; otherwise, we add the id pair to the map.
*/
-static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
+static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
{
unsigned int i;
- for (i = 0; i < ID_MAP_SIZE; i++) {
+ for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
if (!idmap[i].old) {
/* Reached an empty slot; haven't seen this id before */
idmap[i].old = old_id;
@@ -9844,7 +9899,7 @@ static void clean_verifier_state(struct bpf_verifier_env *env,
* Since the verifier pushes the branch states as it sees them while exploring
* the program the condition of walking the branch instruction for the second
* time means that all states below this branch were already explored and
- * their final liveness markes are already propagated.
+ * their final liveness marks are already propagated.
* Hence when the verifier completes the search of state list in is_state_visited()
* we can call this clean_live_states() function to mark all liveness states
* as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
@@ -9880,7 +9935,7 @@ next:
/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
- struct idpair *idmap)
+ struct bpf_id_pair *idmap)
{
bool equal;
@@ -9998,7 +10053,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
static bool stacksafe(struct bpf_func_state *old,
struct bpf_func_state *cur,
- struct idpair *idmap)
+ struct bpf_id_pair *idmap)
{
int i, spi;
@@ -10095,32 +10150,23 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
-static bool func_states_equal(struct bpf_func_state *old,
+static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur)
{
- struct idpair *idmap;
- bool ret = false;
int i;
- idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
- /* If we failed to allocate the idmap, just say it's not safe */
- if (!idmap)
- return false;
-
- for (i = 0; i < MAX_BPF_REG; i++) {
- if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
- goto out_free;
- }
+ memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
+ for (i = 0; i < MAX_BPF_REG; i++)
+ if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
+ return false;
- if (!stacksafe(old, cur, idmap))
- goto out_free;
+ if (!stacksafe(old, cur, env->idmap_scratch))
+ return false;
if (!refsafe(old, cur))
- goto out_free;
- ret = true;
-out_free:
- kfree(idmap);
- return ret;
+ return false;
+
+ return true;
}
static bool states_equal(struct bpf_verifier_env *env,
@@ -10147,7 +10193,7 @@ static bool states_equal(struct bpf_verifier_env *env,
for (i = 0; i <= old->curframe; i++) {
if (old->frame[i]->callsite != cur->frame[i]->callsite)
return false;
- if (!func_states_equal(old->frame[i], cur->frame[i]))
+ if (!func_states_equal(env, old->frame[i], cur->frame[i]))
return false;
}
return true;
@@ -10624,7 +10670,7 @@ static int do_check(struct bpf_verifier_env *env)
}
regs = cur_regs(env);
- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+ sanitize_mark_insn_seen(env);
prev_insn_idx = env->insn_idx;
if (class == BPF_ALU || class == BPF_ALU64) {
@@ -10851,7 +10897,7 @@ process_bpf_exit:
return err;
env->insn_idx++;
- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+ sanitize_mark_insn_seen(env);
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
@@ -11184,6 +11230,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
struct bpf_map *map;
struct fd f;
u64 addr;
+ u32 fd;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
@@ -11213,16 +11260,38 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
/* In final convert_pseudo_ld_imm64() step, this is
* converted into regular 64-bit imm load insn.
*/
- if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
- insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
- (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
- insn[1].imm != 0)) {
- verbose(env,
- "unrecognized bpf_ld_imm64 insn\n");
+ switch (insn[0].src_reg) {
+ case BPF_PSEUDO_MAP_VALUE:
+ case BPF_PSEUDO_MAP_IDX_VALUE:
+ break;
+ case BPF_PSEUDO_MAP_FD:
+ case BPF_PSEUDO_MAP_IDX:
+ if (insn[1].imm == 0)
+ break;
+ fallthrough;
+ default:
+ verbose(env, "unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
- f = fdget(insn[0].imm);
+ switch (insn[0].src_reg) {
+ case BPF_PSEUDO_MAP_IDX_VALUE:
+ case BPF_PSEUDO_MAP_IDX:
+ if (bpfptr_is_null(env->fd_array)) {
+ verbose(env, "fd_idx without fd_array is invalid\n");
+ return -EPROTO;
+ }
+ if (copy_from_bpfptr_offset(&fd, env->fd_array,
+ insn[0].imm * sizeof(fd),
+ sizeof(fd)))
+ return -EFAULT;
+ break;
+ default:
+ fd = insn[0].imm;
+ break;
+ }
+
+ f = fdget(fd);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n",
@@ -11237,7 +11306,8 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
}
aux = &env->insn_aux_data[i];
- if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
+ if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
+ insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
addr = (unsigned long)map;
} else {
u32 off = insn[1].imm;
@@ -11360,6 +11430,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
struct bpf_insn *insn = new_prog->insnsi;
+ u32 old_seen = old_data[off].seen;
u32 prog_len;
int i;
@@ -11380,7 +11451,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
for (i = off; i < off + cnt - 1; i++) {
- new_data[i].seen = env->pass_cnt;
+ /* Expand insni[off]'s seen count to the patched range. */
+ new_data[i].seen = old_seen;
new_data[i].zext_dst = insn_has_def32(env, insn + i);
}
env->insn_aux_data = new_data;
@@ -11402,7 +11474,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
}
}
-static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
+static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
{
struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
int i, sz = prog->aux->size_poke_tab;
@@ -11410,6 +11482,8 @@ static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
for (i = 0; i < sz; i++) {
desc = &tab[i];
+ if (desc->insn_idx <= off)
+ continue;
desc->insn_idx += len - 1;
}
}
@@ -11430,7 +11504,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
if (adjust_insn_aux_data(env, new_prog, off, len))
return NULL;
adjust_subprog_starts(env, off, len);
- adjust_poke_descs(new_prog, len);
+ adjust_poke_descs(new_prog, off, len);
return new_prog;
}
@@ -12449,7 +12523,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
prog->aux->max_pkt_offset = MAX_PACKET_OFF;
/* mark bpf_tail_call as different opcode to avoid
- * conditional branch in the interpeter for every normal
+ * conditional branch in the interpreter for every normal
* call and to prevent accidental JITing by JIT compiler
* that doesn't support bpf_tail_call yet
*/
@@ -12704,6 +12778,9 @@ static void free_states(struct bpf_verifier_env *env)
* insn_aux_data was touched. These variables are compared to clear temporary
* data from failed pass. For testing and experiments do_check_common() can be
* run multiple times even when prior attempt to verify is unsuccessful.
+ *
+ * Note that special handling is needed on !env->bypass_spec_v1 if this is
+ * ever called outside of error path with subsequent program rejection.
*/
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
{
@@ -13200,6 +13277,17 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
return 0;
}
+BTF_SET_START(btf_id_deny)
+BTF_ID_UNUSED
+#ifdef CONFIG_SMP
+BTF_ID(func, migrate_disable)
+BTF_ID(func, migrate_enable)
+#endif
+#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
+BTF_ID(func, rcu_read_unlock_strict)
+#endif
+BTF_SET_END(btf_id_deny)
+
static int check_attach_btf_id(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
@@ -13210,6 +13298,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
int ret;
u64 key;
+ if (prog->type == BPF_PROG_TYPE_SYSCALL) {
+ if (prog->aux->sleepable)
+ /* attach_btf_id checked to be zero already */
+ return 0;
+ verbose(env, "Syscall programs can only be sleepable\n");
+ return -EINVAL;
+ }
+
if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
prog->type != BPF_PROG_TYPE_LSM) {
verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
@@ -13259,6 +13355,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
ret = bpf_lsm_verify_prog(&env->log, prog);
if (ret < 0)
return ret;
+ } else if (prog->type == BPF_PROG_TYPE_TRACING &&
+ btf_id_set_contains(&btf_id_deny, btf_id)) {
+ return -EINVAL;
}
key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
@@ -13281,8 +13380,7 @@ struct btf *bpf_get_btf_vmlinux(void)
return btf_vmlinux;
}
-int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
- union bpf_attr __user *uattr)
+int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
{
u64 start_time = ktime_get_ns();
struct bpf_verifier_env *env;
@@ -13312,6 +13410,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
env->insn_aux_data[i].orig_idx = i;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
+ env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
is_priv = bpf_capable();
bpf_get_btf_vmlinux();
@@ -13358,12 +13457,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (is_priv)
env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
- if (bpf_prog_is_dev_bound(env->prog->aux)) {
- ret = bpf_prog_offload_verifier_prep(env->prog);
- if (ret)
- goto skip_full_check;
- }
-
env->explored_states = kvcalloc(state_htab_size(env),
sizeof(struct bpf_verifier_state_list *),
GFP_USER);
@@ -13391,6 +13484,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (ret < 0)
goto skip_full_check;
+ if (bpf_prog_is_dev_bound(env->prog->aux)) {
+ ret = bpf_prog_offload_verifier_prep(env->prog);
+ if (ret)
+ goto skip_full_check;
+ }
+
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 391aa570369b..ee93b6e89587 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -713,7 +713,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
css_task_iter_start(&cgrp->self, 0, &it);
while ((tsk = css_task_iter_next(&it))) {
- switch (tsk->state) {
+ switch (READ_ONCE(tsk->__state)) {
case TASK_RUNNING:
stats->nr_running++;
break;
@@ -820,6 +820,10 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
struct cgroup *cgrp = kn->priv;
int ret;
+ /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
+ if (strchr(new_name_str, '\n'))
+ return -EINVAL;
+
if (kernfs_type(kn) != KERNFS_DIR)
return -ENOTDIR;
if (kn->parent != new_parent)
@@ -1001,7 +1005,7 @@ static int check_cgroupfs_options(struct fs_context *fc)
ctx->subsys_mask &= enabled;
/*
- * In absense of 'none', 'name=' or subsystem name options,
+ * In absence of 'none', 'name=' and subsystem name options,
* let's default to 'all'.
*/
if (!ctx->subsys_mask && !ctx->none && !ctx->name)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e049edd66776..9cc8c3a686b1 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -468,7 +468,7 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
*
- * Find and get @cgrp's css assocaited with @ss. If the css doesn't exist
+ * Find and get @cgrp's css associated with @ss. If the css doesn't exist
* or is offline, %NULL is returned.
*/
static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
@@ -577,6 +577,7 @@ out_unlock:
rcu_read_unlock();
return css;
}
+EXPORT_SYMBOL_GPL(cgroup_get_e_css);
static void cgroup_get_live(struct cgroup *cgrp)
{
@@ -1633,7 +1634,7 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
/**
* css_clear_dir - remove subsys files in a cgroup directory
- * @css: taget css
+ * @css: target css
*/
static void css_clear_dir(struct cgroup_subsys_state *css)
{
@@ -5350,7 +5351,7 @@ out_unlock:
/*
* This is called when the refcnt of a css is confirmed to be killed.
* css_tryget_online() is now guaranteed to fail. Tell the subsystem to
- * initate destruction and put the css ref from kill_css().
+ * initiate destruction and put the css ref from kill_css().
*/
static void css_killed_work_fn(struct work_struct *work)
{
@@ -5634,8 +5635,6 @@ int __init cgroup_init_early(void)
return 0;
}
-static u16 cgroup_disable_mask __initdata;
-
/**
* cgroup_init - cgroup initialization
*
@@ -5694,12 +5693,8 @@ int __init cgroup_init(void)
* disabled flag and cftype registration needs kmalloc,
* both of which aren't available during early_init.
*/
- if (cgroup_disable_mask & (1 << ssid)) {
- static_branch_disable(cgroup_subsys_enabled_key[ssid]);
- printk(KERN_INFO "Disabling %s control group subsystem\n",
- ss->name);
+ if (!cgroup_ssid_enabled(ssid))
continue;
- }
if (cgroup1_ssid_disabled(ssid))
printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
@@ -6058,7 +6053,7 @@ out_revert:
* @kargs: the arguments passed to create the child process
*
* This calls the cancel_fork() callbacks if a fork failed *after*
- * cgroup_can_fork() succeded and cleans up references we took to
+ * cgroup_can_fork() succeeded and cleans up references we took to
* prepare a new css_set for the child process in cgroup_can_fork().
*/
void cgroup_cancel_fork(struct task_struct *child,
@@ -6214,7 +6209,10 @@ static int __init cgroup_disable(char *str)
if (strcmp(token, ss->name) &&
strcmp(token, ss->legacy_name))
continue;
- cgroup_disable_mask |= 1 << i;
+
+ static_branch_disable(cgroup_subsys_enabled_key[i]);
+ pr_info("Disabling %s control group subsystem\n",
+ ss->name);
}
}
return 1;
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index a945504c0ae7..adb5190c4429 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -3376,7 +3376,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
}
/**
- * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
+ * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
* @nodemask: the nodemask to be checked
*
* Are any of the nodes in the nodemask allowed in current->mems_allowed?
diff --git a/kernel/cgroup/rdma.c b/kernel/cgroup/rdma.c
index ae042c347c64..3135406608c7 100644
--- a/kernel/cgroup/rdma.c
+++ b/kernel/cgroup/rdma.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(rdmacg_uncharge);
* This function follows charging resource in hierarchical way.
* It will fail if the charge would cause the new value to exceed the
* hierarchical limit.
- * Returns 0 if the charge succeded, otherwise -EAGAIN, -ENOMEM or -EINVAL.
+ * Returns 0 if the charge succeeded, otherwise -EAGAIN, -ENOMEM or -EINVAL.
* Returns pointer to rdmacg for this resource when charging is successful.
*
* Charger needs to account resources on two criteria.
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 3a3fd2993a65..cee265cb535c 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -75,7 +75,7 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
* @root: root of the tree to traversal
* @cpu: target cpu
*
- * Walks the udpated rstat_cpu tree on @cpu from @root. %NULL @pos starts
+ * Walks the updated rstat_cpu tree on @cpu from @root. %NULL @pos starts
* the traversal and %NULL return indicates the end. During traversal,
* each returned cgroup is unlinked from the tree. Must be called with the
* matching cgroup_rstat_cpu_lock held.
diff --git a/kernel/cpu.c b/kernel/cpu.c
index e538518556f4..804b847912dc 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -32,6 +32,7 @@
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
+#include <linux/cpuset.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
@@ -873,6 +874,52 @@ void __init cpuhp_threads_init(void)
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}
+/*
+ *
+ * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
+ * protected region.
+ *
+ * The operation is still serialized against concurrent CPU hotplug via
+ * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
+ * serialized against other hotplug related activity like adding or
+ * removing of state callbacks and state instances, which invoke either the
+ * startup or the teardown callback of the affected state.
+ *
+ * This is required for subsystems which are unfixable vs. CPU hotplug and
+ * evade lock inversion problems by scheduling work which has to be
+ * completed _before_ cpu_up()/_cpu_down() returns.
+ *
+ * Don't even think about adding anything to this for any new code or even
+ * drivers. It's only purpose is to keep existing lock order trainwrecks
+ * working.
+ *
+ * For cpu_down() there might be valid reasons to finish cleanups which are
+ * not required to be done under cpu_hotplug_lock, but that's a different
+ * story and would be not invoked via this.
+ */
+static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
+{
+ /*
+ * cpusets delegate hotplug operations to a worker to "solve" the
+ * lock order problems. Wait for the worker, but only if tasks are
+ * _not_ frozen (suspend, hibernate) as that would wait forever.
+ *
+ * The wait is required because otherwise the hotplug operation
+ * returns with inconsistent state, which could even be observed in
+ * user space when a new CPU is brought up. The CPU plug uevent
+ * would be delivered and user space reacting on it would fail to
+ * move tasks to the newly plugged CPU up to the point where the
+ * work has finished because up to that point the newly plugged CPU
+ * is not assignable in cpusets/cgroups. On unplug that's not
+ * necessarily a visible issue, but it is still inconsistent state,
+ * which is the real problem which needs to be "fixed". This can't
+ * prevent the transient state between scheduling the work and
+ * returning from waiting for it.
+ */
+ if (!tasks_frozen)
+ cpuset_wait_for_hotplug();
+}
+
#ifdef CONFIG_HOTPLUG_CPU
#ifndef arch_clear_mm_cpumask_cpu
#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
@@ -961,7 +1008,7 @@ static int takedown_cpu(unsigned int cpu)
int err;
/* Park the smpboot threads */
- kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
+ kthread_park(st->thread);
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
@@ -977,7 +1024,7 @@ static int takedown_cpu(unsigned int cpu)
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
- kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
+ kthread_unpark(st->thread);
return err;
}
BUG_ON(cpu_online(cpu));
@@ -1108,6 +1155,7 @@ out:
*/
lockup_detector_cleanup();
arch_smt_update();
+ cpu_up_down_serialize_trainwrecks(tasks_frozen);
return ret;
}
@@ -1302,6 +1350,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
out:
cpus_write_unlock();
arch_smt_update();
+ cpu_up_down_serialize_trainwrecks(tasks_frozen);
return ret;
}
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 825284baaf46..da449c1cdca7 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -455,7 +455,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_SYMBOL(_stext);
VMCOREINFO_SYMBOL(vmap_area_list);
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
VMCOREINFO_SYMBOL(mem_map);
VMCOREINFO_SYMBOL(contig_page_data);
#endif
@@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
VMCOREINFO_STRUCT_SIZE(mem_section);
VMCOREINFO_OFFSET(mem_section, section_mem_map);
+ VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
#endif
VMCOREINFO_STRUCT_SIZE(page);
@@ -483,7 +484,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(page, compound_head);
VMCOREINFO_OFFSET(pglist_data, node_zones);
VMCOREINFO_OFFSET(pglist_data, nr_zones);
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
+#ifdef CONFIG_FLATMEM
VMCOREINFO_OFFSET(pglist_data, node_mem_map);
#endif
VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
diff --git a/kernel/cred.c b/kernel/cred.c
index e1d274cd741b..e6fd2b3fc31f 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -60,6 +60,7 @@ struct cred init_cred = {
.user = INIT_USER,
.user_ns = &init_user_ns,
.group_info = &init_groups,
+ .ucounts = &init_ucounts,
};
static inline void set_cred_subscribers(struct cred *cred, int n)
@@ -119,6 +120,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
if (cred->group_info)
put_group_info(cred->group_info);
free_uid(cred->user);
+ if (cred->ucounts)
+ put_ucounts(cred->ucounts);
put_user_ns(cred->user_ns);
kmem_cache_free(cred_jar, cred);
}
@@ -222,6 +225,7 @@ struct cred *cred_alloc_blank(void)
#ifdef CONFIG_DEBUG_CREDENTIALS
new->magic = CRED_MAGIC;
#endif
+ new->ucounts = get_ucounts(&init_ucounts);
if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
goto error;
@@ -284,6 +288,11 @@ struct cred *prepare_creds(void)
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
+
+ new->ucounts = get_ucounts(new->ucounts);
+ if (!new->ucounts)
+ goto error;
+
validate_creds(new);
return new;
@@ -351,7 +360,7 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
kdebug("share_creds(%p{%d,%d})",
p->cred, atomic_read(&p->cred->usage),
read_cred_subscribers(p->cred));
- atomic_inc(&p->cred->user->processes);
+ inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
return 0;
}
@@ -363,6 +372,9 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
ret = create_user_ns(new);
if (ret < 0)
goto error_put;
+ ret = set_cred_ucounts(new);
+ if (ret < 0)
+ goto error_put;
}
#ifdef CONFIG_KEYS
@@ -384,8 +396,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
}
#endif
- atomic_inc(&new->user->processes);
p->cred = p->real_cred = get_cred(new);
+ inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
alter_cred_subscribers(new, 2);
validate_creds(new);
return 0;
@@ -485,12 +497,12 @@ int commit_creds(struct cred *new)
* in set_user().
*/
alter_cred_subscribers(new, 2);
- if (new->user != old->user)
- atomic_inc(&new->user->processes);
+ if (new->user != old->user || new->user_ns != old->user_ns)
+ inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
rcu_assign_pointer(task->real_cred, new);
rcu_assign_pointer(task->cred, new);
if (new->user != old->user)
- atomic_dec(&old->user->processes);
+ dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
alter_cred_subscribers(old, -2);
/* send notifications */
@@ -653,6 +665,31 @@ int cred_fscmp(const struct cred *a, const struct cred *b)
}
EXPORT_SYMBOL(cred_fscmp);
+int set_cred_ucounts(struct cred *new)
+{
+ struct task_struct *task = current;
+ const struct cred *old = task->real_cred;
+ struct ucounts *old_ucounts = new->ucounts;
+
+ if (new->user == old->user && new->user_ns == old->user_ns)
+ return 0;
+
+ /*
+ * This optimization is needed because alloc_ucounts() uses locks
+ * for table lookups.
+ */
+ if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
+ return 0;
+
+ if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
+ return -EAGAIN;
+
+ if (old_ucounts)
+ put_ucounts(old_ucounts);
+
+ return 0;
+}
+
/*
* initialise the credentials stuff
*/
@@ -719,6 +756,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
+ new->ucounts = get_ucounts(new->ucounts);
+ if (!new->ucounts)
+ goto error;
+
put_cred(old);
validate_creds(new);
return new;
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 1baa96a2ecb8..622410c45da1 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2488,7 +2488,6 @@ static void kdb_sysinfo(struct sysinfo *val)
static int kdb_summary(int argc, const char **argv)
{
time64_t now;
- struct tm tm;
struct sysinfo val;
if (argc)
@@ -2502,13 +2501,7 @@ static int kdb_summary(int argc, const char **argv)
kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
now = __ktime_get_real_seconds();
- time64_to_tm(now, 0, &tm);
- kdb_printf("date %04ld-%02d-%02d %02d:%02d:%02d "
- "tz_minuteswest %d\n",
- 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec,
- sys_tz.tz_minuteswest);
-
+ kdb_printf("date %ptTs tz_minuteswest %d\n", &now, sys_tz.tz_minuteswest);
kdb_sysinfo(&val);
kdb_printf("uptime ");
if (val.uptime > (24*60*60)) {
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 91bb666d7c03..9f50d22d68e6 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -609,23 +609,25 @@ unsigned long kdb_task_state_string(const char *s)
*/
char kdb_task_state_char (const struct task_struct *p)
{
- int cpu;
- char state;
+ unsigned int p_state;
unsigned long tmp;
+ char state;
+ int cpu;
if (!p ||
copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long)))
return 'E';
cpu = kdb_process_cpu(p);
- state = (p->state == 0) ? 'R' :
- (p->state < 0) ? 'U' :
- (p->state & TASK_UNINTERRUPTIBLE) ? 'D' :
- (p->state & TASK_STOPPED) ? 'T' :
- (p->state & TASK_TRACED) ? 'C' :
+ p_state = READ_ONCE(p->__state);
+ state = (p_state == 0) ? 'R' :
+ (p_state < 0) ? 'U' :
+ (p_state & TASK_UNINTERRUPTIBLE) ? 'D' :
+ (p_state & TASK_STOPPED) ? 'T' :
+ (p_state & TASK_TRACED) ? 'C' :
(p->exit_state & EXIT_ZOMBIE) ? 'Z' :
(p->exit_state & EXIT_DEAD) ? 'E' :
- (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+ (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?';
if (is_idle_task(p)) {
/* Idle task. Is it really idle, apart from the kdb
* interrupt? */
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 27725754ac99..51530d5b15a8 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -7,30 +7,64 @@
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/cputime.h>
+#include <linux/sched/clock.h>
#include <linux/slab.h>
#include <linux/taskstats.h>
-#include <linux/time.h>
#include <linux/sysctl.h>
#include <linux/delayacct.h>
#include <linux/module.h>
-int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */
-EXPORT_SYMBOL_GPL(delayacct_on);
+DEFINE_STATIC_KEY_FALSE(delayacct_key);
+int delayacct_on __read_mostly; /* Delay accounting turned on/off */
struct kmem_cache *delayacct_cache;
-static int __init delayacct_setup_disable(char *str)
+static void set_delayacct(bool enabled)
{
- delayacct_on = 0;
+ if (enabled) {
+ static_branch_enable(&delayacct_key);
+ delayacct_on = 1;
+ } else {
+ delayacct_on = 0;
+ static_branch_disable(&delayacct_key);
+ }
+}
+
+static int __init delayacct_setup_enable(char *str)
+{
+ delayacct_on = 1;
return 1;
}
-__setup("nodelayacct", delayacct_setup_disable);
+__setup("delayacct", delayacct_setup_enable);
void delayacct_init(void)
{
delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT);
delayacct_tsk_init(&init_task);
+ set_delayacct(delayacct_on);
}
+#ifdef CONFIG_PROC_SYSCTL
+int sysctl_delayacct(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int state = delayacct_on;
+ struct ctl_table t;
+ int err;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ t = *table;
+ t.data = &state;
+ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
+ if (err < 0)
+ return err;
+ if (write)
+ set_delayacct(state);
+ return err;
+}
+#endif
+
void __delayacct_tsk_init(struct task_struct *tsk)
{
tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
@@ -42,10 +76,9 @@ void __delayacct_tsk_init(struct task_struct *tsk)
* Finish delay accounting for a statistic using its timestamps (@start),
* accumalator (@total) and @count
*/
-static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total,
- u32 *count)
+static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count)
{
- s64 ns = ktime_get_ns() - *start;
+ s64 ns = local_clock() - *start;
unsigned long flags;
if (ns > 0) {
@@ -58,7 +91,7 @@ static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total,
void __delayacct_blkio_start(void)
{
- current->delays->blkio_start = ktime_get_ns();
+ current->delays->blkio_start = local_clock();
}
/*
@@ -82,7 +115,7 @@ void __delayacct_blkio_end(struct task_struct *p)
delayacct_end(&delays->lock, &delays->blkio_start, total, count);
}
-int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
{
u64 utime, stime, stimescaled, utimescaled;
unsigned long long t2, t3;
@@ -117,6 +150,9 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
d->cpu_run_virtual_total =
(tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
+ if (!tsk->delays)
+ return 0;
+
/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
raw_spin_lock_irqsave(&tsk->delays->lock, flags);
@@ -151,21 +187,20 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
void __delayacct_freepages_start(void)
{
- current->delays->freepages_start = ktime_get_ns();
+ current->delays->freepages_start = local_clock();
}
void __delayacct_freepages_end(void)
{
- delayacct_end(
- &current->delays->lock,
- &current->delays->freepages_start,
- &current->delays->freepages_delay,
- &current->delays->freepages_count);
+ delayacct_end(&current->delays->lock,
+ &current->delays->freepages_start,
+ &current->delays->freepages_delay,
+ &current->delays->freepages_count);
}
void __delayacct_thrashing_start(void)
{
- current->delays->thrashing_start = ktime_get_ns();
+ current->delays->thrashing_start = local_clock();
}
void __delayacct_thrashing_end(void)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 8ca7d505d61c..e50df8d8f87e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -335,6 +335,14 @@ void __init swiotlb_exit(void)
}
/*
+ * Return the offset into a iotlb slot required to keep the device happy.
+ */
+static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
+{
+ return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
+}
+
+/*
* Bounce: copy the swiotlb buffer from or back to the original dma location
*/
static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
@@ -346,10 +354,17 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
size_t alloc_size = mem->slots[index].alloc_size;
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr);
+ unsigned int tlb_offset;
if (orig_addr == INVALID_PHYS_ADDR)
return;
+ tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
+ swiotlb_align_offset(dev, orig_addr);
+
+ orig_addr += tlb_offset;
+ alloc_size -= tlb_offset;
+
if (size > alloc_size) {
dev_WARN_ONCE(dev, 1,
"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
@@ -391,14 +406,6 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
/*
- * Return the offset into a iotlb slot required to keep the device happy.
- */
-static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
-{
- return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
-}
-
-/*
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
*/
static inline unsigned long get_max_slots(unsigned long boundary_mask)
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index a0b3b04fb596..bf16395b9e13 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -5,6 +5,7 @@
#include <linux/highmem.h>
#include <linux/livepatch.h>
#include <linux/audit.h>
+#include <linux/tick.h>
#include "common.h"
@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
local_irq_disable_exit_to_user();
/* Check if any of the above work has queued a deferred wakeup */
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
ti_work = READ_ONCE(current_thread_info()->flags);
}
@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
lockdep_assert_irqs_disabled();
/* Flush pending rcuog wakeup before the last need_resched() check */
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
ti_work = exit_to_user_mode_loop(regs, ti_work);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6fee4a7e88d7..464917096e73 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -132,6 +132,7 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
/**
* cpu_function_call - call a function on the cpu
+ * @cpu: target cpu to queue this function
* @func: the function to be called
* @info: the function call argument
*
@@ -3821,9 +3822,16 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
- struct pmu *pmu = ctx->pmu;
+ struct pmu *pmu;
cpuctx = __get_cpu_context(ctx);
+
+ /*
+ * HACK: for HETEROGENEOUS the task context might have switched to a
+ * different PMU, force (re)set the context,
+ */
+ pmu = ctx->pmu = cpuctx->ctx.pmu;
+
if (cpuctx->task_ctx == ctx) {
if (cpuctx->sched_cb_usage)
__perf_pmu_sched_task(cpuctx, true);
@@ -4609,7 +4617,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
++ctx->pin_count;
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
return ctx;
}
@@ -6667,10 +6677,10 @@ out:
return data->aux_size;
}
-long perf_pmu_snapshot_aux(struct perf_buffer *rb,
- struct perf_event *event,
- struct perf_output_handle *handle,
- unsigned long size)
+static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
+ struct perf_event *event,
+ struct perf_output_handle *handle,
+ unsigned long size)
{
unsigned long flags;
long ret;
@@ -8299,8 +8309,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
if (vma->vm_flags & VM_DENYWRITE)
flags |= MAP_DENYWRITE;
- if (vma->vm_flags & VM_MAYEXEC)
- flags |= MAP_EXECUTABLE;
if (vma->vm_flags & VM_LOCKED)
flags |= MAP_LOCKED;
if (is_vm_hugetlb_page(vma))
@@ -8680,13 +8688,12 @@ static void perf_event_switch(struct task_struct *task,
},
};
- if (!sched_in && task->state == TASK_RUNNING)
+ if (!sched_in && task->on_rq) {
switch_event.event_id.header.misc |=
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
+ }
- perf_iterate_sb(perf_event_switch_output,
- &switch_event,
- NULL);
+ perf_iterate_sb(perf_event_switch_output, &switch_event, NULL);
}
/*
@@ -11917,6 +11924,7 @@ again:
* @pid: target pid
* @cpu: target cpu
* @group_fd: group leader event fd
+ * @flags: perf event open flags
*/
SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
@@ -12373,6 +12381,8 @@ err_fd:
* @attr: attributes of the counter to create
* @cpu: cpu in which the counter is bound
* @task: task to profile (NULL for percpu)
+ * @overflow_handler: callback to trigger when we hit the event
+ * @context: context data could be used in overflow_handler callback
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index b48d7039a015..835973444a1e 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -451,6 +451,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
* register_user_hw_breakpoint - register a hardware breakpoint for user space
* @attr: breakpoint attributes
* @triggered: callback to trigger when we hit the breakpoint
+ * @context: context data could be used in the triggered callback
* @tsk: pointer to 'task_struct' of the process to which the address belongs
*/
struct perf_event *
@@ -550,6 +551,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
* register_wide_hw_breakpoint - register a wide breakpoint in the kernel
* @attr: breakpoint attributes
* @triggered: callback to trigger when we hit the breakpoint
+ * @context: context data could be used in the triggered callback
*
* @return a set of per_cpu pointers to perf events
*/
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 6addc9780319..af24dc3febbe 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -453,6 +453,7 @@ static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
* that have fixed length instructions.
*
* uprobe_write_opcode - write the opcode at a given virtual address.
+ * @auprobe: arch specific probepoint information.
* @mm: the probed process address space.
* @vaddr: the virtual address to store the opcode.
* @opcode: opcode to be written at @vaddr.
@@ -2046,8 +2047,8 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
struct vm_area_struct *vma;
mmap_read_lock(mm);
- vma = find_vma(mm, bp_vaddr);
- if (vma && vma->vm_start <= bp_vaddr) {
+ vma = vma_lookup(mm, bp_vaddr);
+ if (vma) {
if (valid_vma(vma, false)) {
struct inode *inode = file_inode(vma->vm_file);
loff_t offset = vaddr_to_offset(vma, bp_vaddr);
diff --git a/kernel/exit.c b/kernel/exit.c
index fd1c04193e18..9a89e7f36acb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -162,7 +162,6 @@ static void __exit_signal(struct task_struct *tsk)
flush_sigqueue(&sig->shared_pending);
tty_kref_put(tty);
}
- exit_task_sigqueue_cache(tsk);
}
static void delayed_put_task_struct(struct rcu_head *rhp)
@@ -189,7 +188,7 @@ repeat:
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
- atomic_dec(&__task_cred(p)->user->processes);
+ dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
rcu_read_unlock();
cgroup_release(p);
diff --git a/kernel/fork.c b/kernel/fork.c
index dc06afd725cb..bc94b2cc5995 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -425,7 +425,7 @@ static int memcg_charge_kernel_stack(struct task_struct *tsk)
static void release_task_stack(struct task_struct *tsk)
{
- if (WARN_ON(tsk->state != TASK_DEAD))
+ if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
return; /* Better to leak the stack than to free prematurely */
account_kernel_stack(tsk, -1);
@@ -742,6 +742,7 @@ void __put_task_struct(struct task_struct *tsk)
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
+ sched_core_free(tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
@@ -824,9 +825,14 @@ void __init fork_init(void)
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
- for (i = 0; i < UCOUNT_COUNTS; i++)
+ for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++)
init_user_ns.ucount_max[i] = max_threads/2;
+ set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC));
+ set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE));
+ set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING));
+ set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK));
+
#ifdef CONFIG_VMAP_STACK
cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
NULL, free_vm_stack_cache);
@@ -1029,7 +1035,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_pgtables_bytes_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
- atomic_set(&mm->has_pinned, 0);
atomic64_set(&mm->pinned_vm, 0);
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
@@ -1977,8 +1982,7 @@ static __latent_entropy struct task_struct *copy_process(
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
- if (atomic_read(&p->real_cred->user->processes) >=
- task_rlimit(p, RLIMIT_NPROC)) {
+ if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
if (p->real_cred->user != INIT_USER &&
!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
goto bad_fork_free;
@@ -1999,7 +2003,7 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cleanup_count;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
- p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
+ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
p->flags |= PF_FORKNOEXEC;
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
@@ -2008,7 +2012,6 @@ static __latent_entropy struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
- p->sigqueue_cache = NULL;
p->utime = p->stime = p->gtime = 0;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
@@ -2250,6 +2253,8 @@ static __latent_entropy struct task_struct *copy_process(
klp_copy_process(p);
+ sched_core_fork(p);
+
spin_lock(&current->sighand->siglock);
/*
@@ -2337,6 +2342,7 @@ static __latent_entropy struct task_struct *copy_process(
return p;
bad_fork_cancel_cgroup:
+ sched_core_free(p);
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
cgroup_cancel_fork(p, args);
@@ -2385,10 +2391,10 @@ bad_fork_cleanup_threadgroup_lock:
#endif
delayacct_tsk_free(p);
bad_fork_cleanup_count:
- atomic_dec(&p->cred->user->processes);
+ dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
exit_creds(p);
bad_fork_free:
- p->state = TASK_DEAD;
+ WRITE_ONCE(p->__state, TASK_DEAD);
put_task_stack(p);
delayed_free_task(p);
fork_out:
@@ -2408,7 +2414,7 @@ static inline void init_idle_pids(struct task_struct *idle)
}
}
-struct task_struct *fork_idle(int cpu)
+struct task_struct * __init fork_idle(int cpu)
{
struct task_struct *task;
struct kernel_clone_args args = {
@@ -2998,6 +3004,12 @@ int ksys_unshare(unsigned long unshare_flags)
if (err)
goto bad_unshare_cleanup_cred;
+ if (new_cred) {
+ err = set_cred_ucounts(new_cred);
+ if (err)
+ goto bad_unshare_cleanup_cred;
+ }
+
if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
if (do_sysvsem) {
/*
diff --git a/kernel/freezer.c b/kernel/freezer.c
index dc520f01f99d..45ab36ffd0e7 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -58,7 +58,7 @@ bool __refrigerator(bool check_kthr_stop)
/* Hmm, should we be allowed to suspend when there are realtime
processes around? */
bool was_frozen = false;
- long save = current->state;
+ unsigned int save = get_current_state();
pr_debug("%s entered refrigerator\n", current->comm);
diff --git a/kernel/futex.c b/kernel/futex.c
index 4938a00bc785..2ecb07575055 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -35,7 +35,6 @@
#include <linux/jhash.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
-#include <linux/hugetlb.h>
#include <linux/freezer.h>
#include <linux/memblock.h>
#include <linux/fault-inject.h>
@@ -650,7 +649,7 @@ again:
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.i_seq = get_inode_sequence_number(inode);
- key->shared.pgoff = basepage_index(tail);
+ key->shared.pgoff = page_to_pgoff(tail);
rcu_read_unlock();
}
@@ -1728,12 +1727,9 @@ retry_private:
return ret;
}
- if (!(flags & FLAGS_SHARED)) {
- cond_resched();
- goto retry_private;
- }
-
cond_resched();
+ if (!(flags & FLAGS_SHARED))
+ goto retry_private;
goto retry;
}
@@ -1874,7 +1870,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
* If the caller intends to requeue more than 1 waiter to pifutex,
* force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
* as we have means to handle the possible fault. If not, don't set
- * the bit unecessarily as it will force the subsequent unlock to enter
+ * the bit unnecessarily as it will force the subsequent unlock to enter
* the kernel.
*/
top_waiter = futex_top_waiter(hb1, key1);
@@ -2103,7 +2099,7 @@ retry_private:
continue;
/*
- * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
+ * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*
* We should never be requeueing a futex_q with a pi_state,
@@ -2318,7 +2314,7 @@ retry:
}
/*
- * PI futexes can not be requeued and must remove themself from the
+ * PI futexes can not be requeued and must remove themselves from the
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
*/
static void unqueue_me_pi(struct futex_q *q)
@@ -2786,7 +2782,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
if (refill_pi_state_cache())
return -ENOMEM;
- to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
+ to = futex_setup_timer(time, &timeout, flags, 0);
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
@@ -2903,7 +2899,7 @@ no_block:
*/
res = fixup_owner(uaddr, &q, !ret);
/*
- * If fixup_owner() returned an error, proprogate that. If it acquired
+ * If fixup_owner() returned an error, propagate that. If it acquired
* the lock, clear our -ETIMEDOUT or -EINTR.
*/
if (res)
@@ -3280,7 +3276,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
- * If fixup_owner() returned an error, proprogate that. If it
+ * If fixup_owner() returned an error, propagate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
@@ -3678,7 +3674,7 @@ void futex_exec_release(struct task_struct *tsk)
{
/*
* The state handling is done for consistency, but in the case of
- * exec() there is no way to prevent futher damage as the PID stays
+ * exec() there is no way to prevent further damage as the PID stays
* the same. But for the unlikely and arguably buggy case that a
* futex is held on exec(), this provides at least as much state
* consistency protection which is possible.
@@ -3710,12 +3706,14 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
if (op & FUTEX_CLOCK_REALTIME) {
flags |= FLAGS_CLOCKRT;
- if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
+ if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI &&
+ cmd != FUTEX_LOCK_PI2)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_LOCK_PI:
+ case FUTEX_LOCK_PI2:
case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI:
@@ -3742,6 +3740,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
case FUTEX_WAKE_OP:
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
case FUTEX_LOCK_PI:
+ flags |= FLAGS_CLOCKRT;
+ fallthrough;
+ case FUTEX_LOCK_PI2:
return futex_lock_pi(uaddr, flags, timeout, 0);
case FUTEX_UNLOCK_PI:
return futex_unlock_pi(uaddr, flags);
@@ -3762,6 +3763,7 @@ static __always_inline bool futex_cmd_has_timeout(u32 cmd)
switch (cmd) {
case FUTEX_WAIT:
case FUTEX_LOCK_PI:
+ case FUTEX_LOCK_PI2:
case FUTEX_WAIT_BITSET:
case FUTEX_WAIT_REQUEUE_PI:
return true;
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 58f87a3092f3..053447183ac5 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -5,6 +5,7 @@ config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
depends on !CC_IS_CLANG || CLANG_VERSION >= 110000
+ depends on !ARCH_WANTS_NO_INSTR || CC_HAS_NO_PROFILE_FN_ATTR
select CONSTRUCTORS
default n
help
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 396ebaebea3f..b0ce8b3f3822 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -196,7 +196,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
last_break = jiffies;
}
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
- if (t->state == TASK_UNINTERRUPTIBLE)
+ if (READ_ONCE(t->__state) == TASK_UNINTERRUPTIBLE)
check_hung_task(t, timeout);
}
unlock:
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index d79ef2493a28..fbc54c2a7f23 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -70,6 +70,11 @@ config IRQ_DOMAIN_HIERARCHY
bool
select IRQ_DOMAIN
+# Support for obsolete non-mapping irq domains
+config IRQ_DOMAIN_NOMAP
+ bool
+ select IRQ_DOMAIN
+
# Support for hierarchical fasteoi+edge and fasteoi+level handlers
config IRQ_FASTEOI_HIERARCHY_HANDLERS
bool
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 8cc8e5713287..7f04c7d8296e 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -481,7 +481,7 @@ void handle_nested_irq(unsigned int irq)
for_each_action_of_desc(desc, action)
action_ret |= action->thread_fn(action->irq, action->dev_id);
- if (!noirqdebug)
+ if (!irq_settings_no_debug(desc))
note_interrupt(desc, action_ret);
raw_spin_lock_irq(&desc->lock);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 762a928e18f9..221d80c31e94 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -197,7 +197,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
add_interrupt_randomness(desc->irq_data.irq, flags);
- if (!noirqdebug)
+ if (!irq_settings_no_debug(desc))
note_interrupt(desc, retval);
return retval;
}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 4a617d7312a4..f4dd5186858a 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -632,14 +632,8 @@ void irq_init_desc(unsigned int irq)
#endif /* !CONFIG_SPARSE_IRQ */
-/**
- * generic_handle_irq - Invoke the handler for a particular irq
- * @irq: The irq number to handle
- *
- */
-int generic_handle_irq(unsigned int irq)
+int handle_irq_desc(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_data *data;
if (!desc)
@@ -652,11 +646,40 @@ int generic_handle_irq(unsigned int irq)
generic_handle_irq_desc(desc);
return 0;
}
+EXPORT_SYMBOL_GPL(handle_irq_desc);
+
+/**
+ * generic_handle_irq - Invoke the handler for a particular irq
+ * @irq: The irq number to handle
+ *
+ */
+int generic_handle_irq(unsigned int irq)
+{
+ return handle_irq_desc(irq_to_desc(irq));
+}
EXPORT_SYMBOL_GPL(generic_handle_irq);
+#ifdef CONFIG_IRQ_DOMAIN
+/**
+ * generic_handle_domain_irq - Invoke the handler for a HW irq belonging
+ * to a domain, usually for a non-root interrupt
+ * controller
+ * @domain: The domain where to perform the lookup
+ * @hwirq: The HW irq number to convert to a logical one
+ *
+ * Returns: 0 on success, or -EINVAL if conversion has failed
+ *
+ */
+int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
+{
+ return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
+}
+EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
+
#ifdef CONFIG_HANDLE_DOMAIN_IRQ
/**
- * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
+ * handle_domain_irq - Invoke the handler for a HW irq belonging to a domain,
+ * usually for a root interrupt controller
* @domain: The domain where to perform the lookup
* @hwirq: The HW irq number to convert to a logical one
* @lookup: Whether to perform the domain lookup or not
@@ -664,37 +687,27 @@ EXPORT_SYMBOL_GPL(generic_handle_irq);
*
* Returns: 0 on success, or -EINVAL if conversion has failed
*/
-int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
- bool lookup, struct pt_regs *regs)
+int handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned int irq = hwirq;
+ struct irq_desc *desc;
int ret = 0;
irq_enter();
-#ifdef CONFIG_IRQ_DOMAIN
- if (lookup)
- irq = irq_find_mapping(domain, hwirq);
-#endif
-
- /*
- * Some hardware gives randomly wrong interrupts. Rather
- * than crashing, do something sensible.
- */
- if (unlikely(!irq || irq >= nr_irqs)) {
- ack_bad_irq(irq);
+ /* The irqdomain code provides boundary checks */
+ desc = irq_resolve_mapping(domain, hwirq);
+ if (likely(desc))
+ handle_irq_desc(desc);
+ else
ret = -EINVAL;
- } else {
- generic_handle_irq(irq);
- }
irq_exit();
set_irq_regs(old_regs);
return ret;
}
-#ifdef CONFIG_IRQ_DOMAIN
/**
* handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain
* @domain: The domain where to perform the lookup
@@ -709,7 +722,7 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned int irq;
+ struct irq_desc *desc;
int ret = 0;
/*
@@ -717,14 +730,14 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
*/
WARN_ON(!in_nmi());
- irq = irq_find_mapping(domain, hwirq);
+ desc = irq_resolve_mapping(domain, hwirq);
/*
* ack_bad_irq is not NMI-safe, just report
* an invalid interrupt.
*/
- if (likely(irq))
- generic_handle_irq(irq);
+ if (likely(desc))
+ handle_irq_desc(desc);
else
ret = -EINVAL;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 6284443b87ec..51c483ce2447 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -146,7 +146,11 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
static atomic_t unknown_domains;
- domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
+ if (WARN_ON((size && direct_max) ||
+ (!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && direct_max)))
+ return NULL;
+
+ domain = kzalloc_node(struct_size(domain, revmap, size),
GFP_KERNEL, of_node_to_nid(to_of_node(fwnode)));
if (!domain)
return NULL;
@@ -209,12 +213,18 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
/* Fill structure */
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
- mutex_init(&domain->revmap_tree_mutex);
+ mutex_init(&domain->revmap_mutex);
domain->ops = ops;
domain->host_data = host_data;
domain->hwirq_max = hwirq_max;
+
+ if (direct_max) {
+ size = direct_max;
+ domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP;
+ }
+
domain->revmap_size = size;
- domain->revmap_direct_max_irq = direct_max;
+
irq_domain_check_hierarchy(domain);
mutex_lock(&irq_domain_mutex);
@@ -482,29 +492,39 @@ struct irq_domain *irq_get_default_host(void)
return irq_default_domain;
}
+static bool irq_domain_is_nomap(struct irq_domain *domain)
+{
+ return IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) &&
+ (domain->flags & IRQ_DOMAIN_FLAG_NO_MAP);
+}
+
static void irq_domain_clear_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
- if (hwirq < domain->revmap_size) {
- domain->linear_revmap[hwirq] = 0;
- } else {
- mutex_lock(&domain->revmap_tree_mutex);
+ if (irq_domain_is_nomap(domain))
+ return;
+
+ mutex_lock(&domain->revmap_mutex);
+ if (hwirq < domain->revmap_size)
+ rcu_assign_pointer(domain->revmap[hwirq], NULL);
+ else
radix_tree_delete(&domain->revmap_tree, hwirq);
- mutex_unlock(&domain->revmap_tree_mutex);
- }
+ mutex_unlock(&domain->revmap_mutex);
}
static void irq_domain_set_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_data *irq_data)
{
- if (hwirq < domain->revmap_size) {
- domain->linear_revmap[hwirq] = irq_data->irq;
- } else {
- mutex_lock(&domain->revmap_tree_mutex);
+ if (irq_domain_is_nomap(domain))
+ return;
+
+ mutex_lock(&domain->revmap_mutex);
+ if (hwirq < domain->revmap_size)
+ rcu_assign_pointer(domain->revmap[hwirq], irq_data);
+ else
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
- mutex_unlock(&domain->revmap_tree_mutex);
- }
+ mutex_unlock(&domain->revmap_mutex);
}
static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
@@ -604,6 +624,7 @@ void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
}
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
+#ifdef CONFIG_IRQ_DOMAIN_NOMAP
/**
* irq_create_direct_mapping() - Allocate an irq for direct mapping
* @domain: domain to allocate the irq for or NULL for default domain
@@ -628,9 +649,9 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
pr_debug("create_direct virq allocation failed\n");
return 0;
}
- if (virq >= domain->revmap_direct_max_irq) {
+ if (virq >= domain->revmap_size) {
pr_err("ERROR: no free irqs available below %i maximum\n",
- domain->revmap_direct_max_irq);
+ domain->revmap_size);
irq_free_desc(virq);
return 0;
}
@@ -644,6 +665,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
+#endif
/**
* irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
@@ -862,37 +884,53 @@ void irq_dispose_mapping(unsigned int virq)
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
/**
- * irq_find_mapping() - Find a linux irq from a hw irq number.
+ * __irq_resolve_mapping() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
+ * @irq: optional pointer to return the Linux irq if required
+ *
+ * Returns the interrupt descriptor.
*/
-unsigned int irq_find_mapping(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ unsigned int *irq)
{
+ struct irq_desc *desc = NULL;
struct irq_data *data;
/* Look for default domain if necessary */
if (domain == NULL)
domain = irq_default_domain;
if (domain == NULL)
- return 0;
+ return desc;
+
+ if (irq_domain_is_nomap(domain)) {
+ if (hwirq < domain->revmap_size) {
+ data = irq_domain_get_irq_data(domain, hwirq);
+ if (data && data->hwirq == hwirq)
+ desc = irq_data_to_desc(data);
+ }
- if (hwirq < domain->revmap_direct_max_irq) {
- data = irq_domain_get_irq_data(domain, hwirq);
- if (data && data->hwirq == hwirq)
- return hwirq;
+ return desc;
}
+ rcu_read_lock();
/* Check if the hwirq is in the linear revmap. */
if (hwirq < domain->revmap_size)
- return domain->linear_revmap[hwirq];
+ data = rcu_dereference(domain->revmap[hwirq]);
+ else
+ data = radix_tree_lookup(&domain->revmap_tree, hwirq);
+
+ if (likely(data)) {
+ desc = irq_data_to_desc(data);
+ if (irq)
+ *irq = data->irq;
+ }
- rcu_read_lock();
- data = radix_tree_lookup(&domain->revmap_tree, hwirq);
rcu_read_unlock();
- return data ? data->irq : 0;
+ return desc;
}
-EXPORT_SYMBOL_GPL(irq_find_mapping);
+EXPORT_SYMBOL_GPL(__irq_resolve_mapping);
/**
* irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
@@ -1468,15 +1506,20 @@ static void irq_domain_fix_revmap(struct irq_data *d)
{
void __rcu **slot;
- if (d->hwirq < d->domain->revmap_size)
- return; /* Not using radix tree. */
+ if (irq_domain_is_nomap(d->domain))
+ return;
/* Fix up the revmap. */
- mutex_lock(&d->domain->revmap_tree_mutex);
- slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
- if (slot)
- radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
- mutex_unlock(&d->domain->revmap_tree_mutex);
+ mutex_lock(&d->domain->revmap_mutex);
+ if (d->hwirq < d->domain->revmap_size) {
+ /* Not using radix tree */
+ rcu_assign_pointer(d->domain->revmap[d->hwirq], d);
+ } else {
+ slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
+ if (slot)
+ radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
+ }
+ mutex_unlock(&d->domain->revmap_mutex);
}
/**
@@ -1828,8 +1871,7 @@ static void
irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
{
seq_printf(m, "%*sname: %s\n", ind, "", d->name);
- seq_printf(m, "%*ssize: %u\n", ind + 1, "",
- d->revmap_size + d->revmap_direct_max_irq);
+ seq_printf(m, "%*ssize: %u\n", ind + 1, "", d->revmap_size);
seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
if (d->ops && d->ops->debug_show)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4c14356543d9..ef30b4762947 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -441,7 +441,8 @@ out_unlock:
return ret;
}
-int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
+ bool force)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
@@ -456,6 +457,36 @@ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
return ret;
}
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @cpumask: cpumask
+ *
+ * Fails if cpumask does not contain an online CPU
+ */
+int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return __irq_set_affinity(irq, cpumask, false);
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity);
+
+/**
+ * irq_force_affinity - Force the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @cpumask: cpumask
+ *
+ * Same as irq_set_affinity, but without checking the mask against
+ * online cpus.
+ *
+ * Solely for low level cpu hotplug code, where we need to make per
+ * cpu interrupts affine before the cpu becomes online.
+ */
+int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return __irq_set_affinity(irq, cpumask, true);
+}
+EXPORT_SYMBOL_GPL(irq_force_affinity);
+
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
unsigned long flags;
@@ -1686,8 +1717,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (new->flags & IRQF_PERCPU) {
irqd_set(&desc->irq_data, IRQD_PER_CPU);
irq_settings_set_per_cpu(desc);
+ if (new->flags & IRQF_NO_DEBUG)
+ irq_settings_set_no_debug(desc);
}
+ if (noirqdebug)
+ irq_settings_set_no_debug(desc);
+
if (new->flags & IRQF_ONESHOT)
desc->istate |= IRQS_ONESHOT;
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 403378b9947b..7b7efb1a114b 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -18,6 +18,7 @@ enum {
_IRQ_IS_POLLED = IRQ_IS_POLLED,
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
_IRQ_HIDDEN = IRQ_HIDDEN,
+ _IRQ_NO_DEBUG = IRQ_NO_DEBUG,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -33,6 +34,7 @@ enum {
#define IRQ_IS_POLLED GOT_YOU_MORON
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
#define IRQ_HIDDEN GOT_YOU_MORON
+#define IRQ_NO_DEBUG GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -174,3 +176,13 @@ static inline bool irq_settings_is_hidden(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_HIDDEN;
}
+
+static inline void irq_settings_set_no_debug(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_NO_DEBUG;
+}
+
+static inline bool irq_settings_no_debug(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_NO_DEBUG;
+}
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 23a7a0ba1388..db8c248ebc8c 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -70,9 +70,6 @@ bool irq_work_queue(struct irq_work *work)
if (!irq_work_claim(work))
return false;
- /*record irq_work call stack in order to print it in KASAN reports*/
- kasan_record_aux_stack(work);
-
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
__irq_work_queue_local(work);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index ba39fbb1f8e7..bdb0681bece8 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit);
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (jump_entry_code(entry) <= (unsigned long)end &&
- jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
+ jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
return 1;
return 0;
@@ -483,13 +483,14 @@ void __init jump_label_init(void)
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
+ bool in_init;
/* rewrite NOPs */
if (jump_label_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
- if (init_section_contains((void *)jump_entry_code(iter), 1))
- jump_entry_set_init(iter);
+ in_init = init_section_contains((void *)jump_entry_code(iter), 1);
+ jump_entry_set_init(iter, in_init);
iterk = jump_entry_key(iter);
if (iterk == key)
@@ -634,9 +635,10 @@ static int jump_label_add_module(struct module *mod)
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
+ bool in_init;
- if (within_module_init(jump_entry_code(iter), mod))
- jump_entry_set_init(iter);
+ in_init = within_module_init(jump_entry_code(iter), mod);
+ jump_entry_set_init(iter, in_init);
iterk = jump_entry_key(iter);
if (iterk == key)
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 13dce3c664d6..56016e8e7461 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -460,7 +460,7 @@ static void set_other_info_task_blocking(unsigned long *flags,
* We may be instrumenting a code-path where current->state is already
* something other than TASK_RUNNING.
*/
- const bool is_running = current->state == TASK_RUNNING;
+ const bool is_running = task_is_running(current);
/*
* To avoid deadlock in case we are in an interrupt here and this is a
* race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 745f08fdd7a6..e41385afe79d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1183,23 +1183,6 @@ static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
}
NOKPROBE_SYMBOL(aggr_post_handler);
-static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
- int trapnr)
-{
- struct kprobe *cur = __this_cpu_read(kprobe_instance);
-
- /*
- * if we faulted "during" the execution of a user specified
- * probe handler, invoke just that probe's fault handler
- */
- if (cur && cur->fault_handler) {
- if (cur->fault_handler(cur, regs, trapnr))
- return 1;
- }
- return 0;
-}
-NOKPROBE_SYMBOL(aggr_fault_handler);
-
/* Walks the list and increments nmissed count for multiprobe case */
void kprobes_inc_nmissed_count(struct kprobe *p)
{
@@ -1330,7 +1313,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
ap->addr = p->addr;
ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
ap->pre_handler = aggr_pre_handler;
- ap->fault_handler = aggr_fault_handler;
/* We don't care the kprobe which has gone. */
if (p->post_handler && !kprobe_gone(p))
ap->post_handler = aggr_post_handler;
@@ -2014,7 +1996,6 @@ int register_kretprobe(struct kretprobe *rp)
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
- rp->kp.fault_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0) {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index fe3f2a40d61e..5b37a8567168 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -68,16 +68,6 @@ enum KTHREAD_BITS {
KTHREAD_SHOULD_PARK,
};
-static inline void set_kthread_struct(void *kthread)
-{
- /*
- * We abuse ->set_child_tid to avoid the new member and because it
- * can't be wrongly copied by copy_process(). We also rely on fact
- * that the caller can't exec, so PF_KTHREAD can't be cleared.
- */
- current->set_child_tid = (__force void __user *)kthread;
-}
-
static inline struct kthread *to_kthread(struct task_struct *k)
{
WARN_ON(!(k->flags & PF_KTHREAD));
@@ -103,6 +93,22 @@ static inline struct kthread *__to_kthread(struct task_struct *p)
return kthread;
}
+void set_kthread_struct(struct task_struct *p)
+{
+ struct kthread *kthread;
+
+ if (__to_kthread(p))
+ return;
+
+ kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
+ /*
+ * We abuse ->set_child_tid to avoid the new member and because it
+ * can't be wrongly copied by copy_process(). We also rely on fact
+ * that the caller can't exec, so PF_KTHREAD can't be cleared.
+ */
+ p->set_child_tid = (__force void __user *)kthread;
+}
+
void free_kthread_struct(struct task_struct *k)
{
struct kthread *kthread;
@@ -272,8 +278,8 @@ static int kthread(void *_create)
struct kthread *self;
int ret;
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- set_kthread_struct(self);
+ set_kthread_struct(current);
+ self = to_kthread(current);
/* If user was SIGKILLed, I release the structure. */
done = xchg(&create->done, NULL);
@@ -451,7 +457,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
}
EXPORT_SYMBOL(kthread_create_on_node);
-static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
+static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
{
unsigned long flags;
@@ -467,7 +473,7 @@ static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mas
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
-static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
+static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
{
__kthread_bind_mask(p, cpumask_of(cpu), state);
}
@@ -1093,8 +1099,38 @@ void kthread_flush_work(struct kthread_work *work)
EXPORT_SYMBOL_GPL(kthread_flush_work);
/*
- * This function removes the work from the worker queue. Also it makes sure
- * that it won't get queued later via the delayed work's timer.
+ * Make sure that the timer is neither set nor running and could
+ * not manipulate the work list_head any longer.
+ *
+ * The function is called under worker->lock. The lock is temporary
+ * released but the timer can't be set again in the meantime.
+ */
+static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
+ unsigned long *flags)
+{
+ struct kthread_delayed_work *dwork =
+ container_of(work, struct kthread_delayed_work, work);
+ struct kthread_worker *worker = work->worker;
+
+ /*
+ * del_timer_sync() must be called to make sure that the timer
+ * callback is not running. The lock must be temporary released
+ * to avoid a deadlock with the callback. In the meantime,
+ * any queuing is blocked by setting the canceling counter.
+ */
+ work->canceling++;
+ raw_spin_unlock_irqrestore(&worker->lock, *flags);
+ del_timer_sync(&dwork->timer);
+ raw_spin_lock_irqsave(&worker->lock, *flags);
+ work->canceling--;
+}
+
+/*
+ * This function removes the work from the worker queue.
+ *
+ * It is called under worker->lock. The caller must make sure that
+ * the timer used by delayed work is not running, e.g. by calling
+ * kthread_cancel_delayed_work_timer().
*
* The work might still be in use when this function finishes. See the
* current_work proceed by the worker.
@@ -1102,28 +1138,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work);
* Return: %true if @work was pending and successfully canceled,
* %false if @work was not pending
*/
-static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
- unsigned long *flags)
+static bool __kthread_cancel_work(struct kthread_work *work)
{
- /* Try to cancel the timer if exists. */
- if (is_dwork) {
- struct kthread_delayed_work *dwork =
- container_of(work, struct kthread_delayed_work, work);
- struct kthread_worker *worker = work->worker;
-
- /*
- * del_timer_sync() must be called to make sure that the timer
- * callback is not running. The lock must be temporary released
- * to avoid a deadlock with the callback. In the meantime,
- * any queuing is blocked by setting the canceling counter.
- */
- work->canceling++;
- raw_spin_unlock_irqrestore(&worker->lock, *flags);
- del_timer_sync(&dwork->timer);
- raw_spin_lock_irqsave(&worker->lock, *flags);
- work->canceling--;
- }
-
/*
* Try to remove the work from a worker list. It might either
* be from worker->work_list or from worker->delayed_work_list.
@@ -1146,14 +1162,14 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
* modify @dwork's timer so that it expires after @delay. If @delay is zero,
* @work is guaranteed to be queued immediately.
*
- * Return: %true if @dwork was pending and its timer was modified,
- * %false otherwise.
+ * Return: %false if @dwork was idle and queued, %true otherwise.
*
* A special case is when the work is being canceled in parallel.
* It might be caused either by the real kthread_cancel_delayed_work_sync()
* or yet another kthread_mod_delayed_work() call. We let the other command
- * win and return %false here. The caller is supposed to synchronize these
- * operations a reasonable way.
+ * win and return %true here. The return value can be used for reference
+ * counting and the number of queued works stays the same. Anyway, the caller
+ * is supposed to synchronize these operations a reasonable way.
*
* This function is safe to call from any context including IRQ handler.
* See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
@@ -1165,22 +1181,39 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
{
struct kthread_work *work = &dwork->work;
unsigned long flags;
- int ret = false;
+ int ret;
raw_spin_lock_irqsave(&worker->lock, flags);
/* Do not bother with canceling when never queued. */
- if (!work->worker)
+ if (!work->worker) {
+ ret = false;
goto fast_queue;
+ }
/* Work must not be used with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker != worker);
- /* Do not fight with another command that is canceling this work. */
- if (work->canceling)
+ /*
+ * Temporary cancel the work but do not fight with another command
+ * that is canceling the work as well.
+ *
+ * It is a bit tricky because of possible races with another
+ * mod_delayed_work() and cancel_delayed_work() callers.
+ *
+ * The timer must be canceled first because worker->lock is released
+ * when doing so. But the work can be removed from the queue (list)
+ * only when it can be queued again so that the return value can
+ * be used for reference counting.
+ */
+ kthread_cancel_delayed_work_timer(work, &flags);
+ if (work->canceling) {
+ /* The number of works in the queue does not change. */
+ ret = true;
goto out;
+ }
+ ret = __kthread_cancel_work(work);
- ret = __kthread_cancel_work(work, true, &flags);
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
out:
@@ -1202,7 +1235,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
- ret = __kthread_cancel_work(work, is_dwork, &flags);
+ if (is_dwork)
+ kthread_cancel_delayed_work_timer(work, &flags);
+
+ ret = __kthread_cancel_work(work);
if (worker->current_work != work)
goto out_fast;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7641bd407239..e97d08001437 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -760,7 +760,7 @@ static void lockdep_print_held_locks(struct task_struct *p)
* It's not reliable to print a task's held locks if it's not sleeping
* and it's not the current task.
*/
- if (p->state == TASK_RUNNING && p != current)
+ if (p != current && task_is_running(p))
return;
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
@@ -843,7 +843,7 @@ static int count_matching_names(struct lock_class *new_class)
}
/* used from NMI context -- must be lockless */
-static __always_inline struct lock_class *
+static noinstr struct lock_class *
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
@@ -851,12 +851,14 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
struct lock_class *class;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+ instrumentation_begin();
debug_locks_off();
printk(KERN_ERR
"BUG: looking up invalid subclass: %u\n", subclass);
printk(KERN_ERR
"turning off the locking correctness validator.\n");
dump_stack();
+ instrumentation_end();
return NULL;
}
@@ -2304,7 +2306,56 @@ static void print_lock_class_header(struct lock_class *class, int depth)
}
/*
- * printk the shortest lock dependencies from @start to @end in reverse order:
+ * Dependency path printing:
+ *
+ * After BFS we get a lock dependency path (linked via ->parent of lock_list),
+ * printing out each lock in the dependency path will help on understanding how
+ * the deadlock could happen. Here are some details about dependency path
+ * printing:
+ *
+ * 1) A lock_list can be either forwards or backwards for a lock dependency,
+ * for a lock dependency A -> B, there are two lock_lists:
+ *
+ * a) lock_list in the ->locks_after list of A, whose ->class is B and
+ * ->links_to is A. In this case, we can say the lock_list is
+ * "A -> B" (forwards case).
+ *
+ * b) lock_list in the ->locks_before list of B, whose ->class is A
+ * and ->links_to is B. In this case, we can say the lock_list is
+ * "B <- A" (bacwards case).
+ *
+ * The ->trace of both a) and b) point to the call trace where B was
+ * acquired with A held.
+ *
+ * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
+ * represent a certain lock dependency, it only provides an initial entry
+ * for BFS. For example, BFS may introduce a "helper" lock_list whose
+ * ->class is A, as a result BFS will search all dependencies starting with
+ * A, e.g. A -> B or A -> C.
+ *
+ * The notation of a forwards helper lock_list is like "-> A", which means
+ * we should search the forwards dependencies starting with "A", e.g A -> B
+ * or A -> C.
+ *
+ * The notation of a bacwards helper lock_list is like "<- B", which means
+ * we should search the backwards dependencies ending with "B", e.g.
+ * B <- A or B <- C.
+ */
+
+/*
+ * printk the shortest lock dependencies from @root to @leaf in reverse order.
+ *
+ * We have a lock dependency path as follow:
+ *
+ * @root @leaf
+ * | |
+ * V V
+ * ->parent ->parent
+ * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
+ * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
+ *
+ * , so it's natural that we start from @leaf and print every ->class and
+ * ->trace until we reach the @root.
*/
static void __used
print_shortest_lock_dependencies(struct lock_list *leaf,
@@ -2332,6 +2383,61 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
} while (entry && (depth >= 0));
}
+/*
+ * printk the shortest lock dependencies from @leaf to @root.
+ *
+ * We have a lock dependency path (from a backwards search) as follow:
+ *
+ * @leaf @root
+ * | |
+ * V V
+ * ->parent ->parent
+ * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
+ * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
+ *
+ * , so when we iterate from @leaf to @root, we actually print the lock
+ * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
+ *
+ * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
+ * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
+ * trace of L1 in the dependency path, which is alright, because most of the
+ * time we can figure out where L1 is held from the call trace of L2.
+ */
+static void __used
+print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
+ struct lock_list *root)
+{
+ struct lock_list *entry = leaf;
+ const struct lock_trace *trace = NULL;
+ int depth;
+
+ /*compute depth from generated tree by BFS*/
+ depth = get_lock_depth(leaf);
+
+ do {
+ print_lock_class_header(entry->class, depth);
+ if (trace) {
+ printk("%*s ... acquired at:\n", depth, "");
+ print_lock_trace(trace, 2);
+ printk("\n");
+ }
+
+ /*
+ * Record the pointer to the trace for the next lock_list
+ * entry, see the comments for the function.
+ */
+ trace = entry->trace;
+
+ if (depth == 0 && (entry != root)) {
+ printk("lockdep:%s bad path found in chain graph\n", __func__);
+ break;
+ }
+
+ entry = get_lock_parent(entry);
+ depth--;
+ } while (entry && (depth >= 0));
+}
+
static void
print_irq_lock_scenario(struct lock_list *safe_entry,
struct lock_list *unsafe_entry,
@@ -2446,10 +2552,7 @@ print_bad_irq_dependency(struct task_struct *curr,
lockdep_print_held_locks(curr);
pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
- prev_root->trace = save_trace();
- if (!prev_root->trace)
- return;
- print_shortest_lock_dependencies(backwards_entry, prev_root);
+ print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
pr_warn("\nthe dependencies between the lock to be acquired");
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
@@ -2667,8 +2770,18 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
* Step 3: we found a bad match! Now retrieve a lock from the backward
* list whose usage mask matches the exclusive usage mask from the
* lock found on the forward list.
+ *
+ * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
+ * the follow case:
+ *
+ * When trying to add A -> B to the graph, we find that there is a
+ * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
+ * that B -> ... -> M. However M is **softirq-safe**, if we use exact
+ * invert bits of M's usage_mask, we will find another lock N that is
+ * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
+ * cause a inversion deadlock.
*/
- backward_mask = original_mask(target_entry1->class->usage_mask);
+ backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
ret = find_usage_backwards(&this, backward_mask, &target_entry);
if (bfs_error(ret)) {
@@ -2718,7 +2831,7 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
* <target> or not. If it can, <src> -> <target> dependency is already
* in the graph.
*
- * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
+ * Return BFS_RMATCH if it does, or BFS_RNOMATCH if it does not, return BFS_E* if
* any error appears in the bfs search.
*/
static noinline enum bfs_result
@@ -4577,7 +4690,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
u8 curr_inner;
int depth;
- if (!curr->lockdep_depth || !next_inner || next->trylock)
+ if (!next_inner || next->trylock)
return 0;
if (!next_outer)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 013e1b08a1bf..d2df5e68b503 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -923,7 +923,7 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
* Lock a mutex (possibly interruptible), slowpath:
*/
static __always_inline int __sched
-__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
@@ -1098,14 +1098,14 @@ err_early_kill:
}
static int __sched
-__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
+__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip)
{
return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
}
static int __sched
-__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
+__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
struct ww_acquire_ctx *ww_ctx)
{
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 406818196a9f..b5d9bb5202c6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1135,7 +1135,7 @@ void __sched rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
*
* Must be called with lock->wait_lock held and interrupts disabled
*/
-static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
struct hrtimer_sleeper *timeout,
struct rt_mutex_waiter *waiter)
{
@@ -1190,7 +1190,7 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
/*
* Slow path lock function:
*/
-static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state,
+static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk)
{
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 809b0016d344..16bfbb10c74d 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -889,7 +889,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
* Wait for the read lock to be granted
*/
static struct rw_semaphore __sched *
-rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
+rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
{
long adjustment = -RWSEM_READER_BIAS;
long rcnt = (count >> RWSEM_READER_SHIFT);
diff --git a/kernel/module.c b/kernel/module.c
index 7e78dfabca97..927d46cb8eb9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -266,9 +266,18 @@ static void module_assert_mutex_or_preempt(void)
#endif
}
+#ifdef CONFIG_MODULE_SIG
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644);
+void set_module_sig_enforced(void)
+{
+ sig_enforce = true;
+}
+#else
+#define sig_enforce false
+#endif
+
/*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely
* on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
@@ -279,11 +288,6 @@ bool is_module_sig_enforced(void)
}
EXPORT_SYMBOL(is_module_sig_enforced);
-void set_module_sig_enforced(void)
-{
- sig_enforce = true;
-}
-
/* Block module loading/unloading? */
int modules_disabled = 0;
core_param(nomodule, modules_disabled, bint, 0);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 6bfe3ead10ad..a12779650f15 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -98,20 +98,20 @@ config PM_STD_PARTITION
default ""
help
The default resume partition is the partition that the suspend-
- to-disk implementation will look for a suspended disk image.
+ to-disk implementation will look for a suspended disk image.
- The partition specified here will be different for almost every user.
+ The partition specified here will be different for almost every user.
It should be a valid swap partition (at least for now) that is turned
- on before suspending.
+ on before suspending.
The partition specified can be overridden by specifying:
- resume=/dev/<other device>
+ resume=/dev/<other device>
- which will set the resume partition to the device specified.
+ which will set the resume partition to the device specified.
Note there is currently not a way to specify which device to save the
- suspended image to. It will simply pick the first available swap
+ suspended image to. It will simply pick the first available swap
device.
config PM_SLEEP
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 50cc63534486..37401c99b7d7 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/power/process.c - Functions for starting/stopping processes on
+ * drivers/power/process.c - Functions for starting/stopping processes on
* suspend transitions.
*
* Originally from swsusp.
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 1a221dcb3c01..f7a986078213 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -331,7 +331,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
*
* Memory bitmap is a structure consisting of many linked lists of
* objects. The main list's elements are of type struct zone_bitmap
- * and each of them corresonds to one zone. For each zone bitmap
+ * and each of them corresponds to one zone. For each zone bitmap
* object there is a list of objects of type struct bm_block that
* represent each blocks of bitmap in which information is stored.
*
@@ -1146,7 +1146,7 @@ int create_basic_memory_bitmaps(void)
Free_second_object:
kfree(bm2);
Free_first_bitmap:
- memory_bm_free(bm1, PG_UNSAFE_CLEAR);
+ memory_bm_free(bm1, PG_UNSAFE_CLEAR);
Free_first_object:
kfree(bm1);
return -ENOMEM;
@@ -1500,7 +1500,7 @@ static struct memory_bitmap copy_bm;
/**
* swsusp_free - Free pages allocated for hibernation image.
*
- * Image pages are alocated before snapshot creation, so they need to be
+ * Image pages are allocated before snapshot creation, so they need to be
* released after resume.
*/
void swsusp_free(void)
@@ -2326,7 +2326,7 @@ static struct memory_bitmap *safe_highmem_bm;
* (@nr_highmem_p points to the variable containing the number of highmem image
* pages). The pages that are "safe" (ie. will not be overwritten when the
* hibernation image is restored entirely) have the corresponding bits set in
- * @bm (it must be unitialized).
+ * @bm (it must be uninitialized).
*
* NOTE: This function should not be called if there are no highmem image pages.
*/
@@ -2483,7 +2483,7 @@ static inline void free_highmem_data(void) {}
/**
* prepare_image - Make room for loading hibernation image.
- * @new_bm: Unitialized memory bitmap structure.
+ * @new_bm: Uninitialized memory bitmap structure.
* @bm: Memory bitmap with unsafe pages marked.
*
* Use @bm to mark the pages that will be overwritten in the process of
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index bea3cb8afa11..3cb89baebc79 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -1125,7 +1125,7 @@ struct dec_data {
};
/**
- * Deompression function that runs in its own thread.
+ * Decompression function that runs in its own thread.
*/
static int lzo_decompress_threadfn(void *data)
{
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 421c35571797..142a58d124d9 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3531,3 +3531,119 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif
+
+#ifdef CONFIG_SMP
+static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
+static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
+
+/**
+ * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
+ * spinning lock is not owned by any CPU.
+ *
+ * Context: Any context.
+ */
+void __printk_wait_on_cpu_lock(void)
+{
+ do {
+ cpu_relax();
+ } while (atomic_read(&printk_cpulock_owner) != -1);
+}
+EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
+
+/**
+ * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
+ * spinning lock.
+ *
+ * If no processor has the lock, the calling processor takes the lock and
+ * becomes the owner. If the calling processor is already the owner of the
+ * lock, this function succeeds immediately.
+ *
+ * Context: Any context. Expects interrupts to be disabled.
+ * Return: 1 on success, otherwise 0.
+ */
+int __printk_cpu_trylock(void)
+{
+ int cpu;
+ int old;
+
+ cpu = smp_processor_id();
+
+ /*
+ * Guarantee loads and stores from this CPU when it is the lock owner
+ * are _not_ visible to the previous lock owner. This pairs with
+ * __printk_cpu_unlock:B.
+ *
+ * Memory barrier involvement:
+ *
+ * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
+ * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
+ *
+ * Relies on:
+ *
+ * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
+ * of the previous CPU
+ * matching
+ * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
+ * of this CPU
+ */
+ old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1,
+ cpu); /* LMM(__printk_cpu_trylock:A) */
+ if (old == -1) {
+ /*
+ * This CPU is now the owner and begins loading/storing
+ * data: LMM(__printk_cpu_trylock:B)
+ */
+ return 1;
+
+ } else if (old == cpu) {
+ /* This CPU is already the owner. */
+ atomic_inc(&printk_cpulock_nested);
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(__printk_cpu_trylock);
+
+/**
+ * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
+ *
+ * The calling processor must be the owner of the lock.
+ *
+ * Context: Any context. Expects interrupts to be disabled.
+ */
+void __printk_cpu_unlock(void)
+{
+ if (atomic_read(&printk_cpulock_nested)) {
+ atomic_dec(&printk_cpulock_nested);
+ return;
+ }
+
+ /*
+ * This CPU is finished loading/storing data:
+ * LMM(__printk_cpu_unlock:A)
+ */
+
+ /*
+ * Guarantee loads and stores from this CPU when it was the
+ * lock owner are visible to the next lock owner. This pairs
+ * with __printk_cpu_trylock:A.
+ *
+ * Memory barrier involvement:
+ *
+ * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
+ * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
+ *
+ * Relies on:
+ *
+ * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
+ * of this CPU
+ * matching
+ * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
+ * of the next CPU
+ */
+ atomic_set_release(&printk_cpulock_owner,
+ -1); /* LMM(__printk_cpu_unlock:B) */
+}
+EXPORT_SYMBOL(__printk_cpu_unlock);
+#endif /* CONFIG_SMP */
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 7a1414622051..94232186fccb 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -391,6 +391,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
/* No obstacles. */
return vprintk_default(fmt, args);
}
+EXPORT_SYMBOL(vprintk);
void __init printk_safe_init(void)
{
@@ -411,4 +412,3 @@ void __init printk_safe_init(void)
/* Flush pending messages that did not have scheduled IRQ works. */
printk_safe_flush();
}
-EXPORT_SYMBOL(vprintk);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2997ca600d18..f8589bf8d7dc 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -197,7 +197,7 @@ static bool ptrace_freeze_traced(struct task_struct *task)
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
!__fatal_signal_pending(task)) {
- task->state = __TASK_TRACED;
+ WRITE_ONCE(task->__state, __TASK_TRACED);
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
@@ -207,7 +207,7 @@ static bool ptrace_freeze_traced(struct task_struct *task)
static void ptrace_unfreeze_traced(struct task_struct *task)
{
- if (task->state != __TASK_TRACED)
+ if (READ_ONCE(task->__state) != __TASK_TRACED)
return;
WARN_ON(!task->ptrace || task->parent != current);
@@ -217,11 +217,11 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
* Recheck state under the lock to close this race.
*/
spin_lock_irq(&task->sighand->siglock);
- if (task->state == __TASK_TRACED) {
+ if (READ_ONCE(task->__state) == __TASK_TRACED) {
if (__fatal_signal_pending(task))
wake_up_state(task, __TASK_TRACED);
else
- task->state = TASK_TRACED;
+ WRITE_ONCE(task->__state, TASK_TRACED);
}
spin_unlock_irq(&task->sighand->siglock);
}
@@ -256,7 +256,7 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
*/
read_lock(&tasklist_lock);
if (child->ptrace && child->parent == current) {
- WARN_ON(child->state == __TASK_TRACED);
+ WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
/*
* child->sighand can't be NULL, release_task()
* does ptrace_unlink() before __exit_signal().
@@ -273,7 +273,7 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
* ptrace_stop() changes ->state back to TASK_RUNNING,
* so we should not worry about leaking __TASK_TRACED.
*/
- WARN_ON(child->state == __TASK_TRACED);
+ WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
ret = -ESRCH;
}
}
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 29d2f4c647d3..194b9c145c40 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1831,10 +1831,10 @@ rcu_torture_stats_print(void)
srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
&flags, &gp_seq);
wtp = READ_ONCE(writer_task);
- pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
+ pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
rcu_torture_writer_state_getname(),
rcu_torture_writer_state, gp_seq, flags,
- wtp == NULL ? ~0UL : wtp->state,
+ wtp == NULL ? ~0U : wtp->__state,
wtp == NULL ? -1 : (int)task_cpu(wtp));
if (!splatted && wtp) {
sched_show_task(wtp);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index ad0156b86937..4d6962048c30 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2768,7 +2768,7 @@ EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
#ifdef CONFIG_SMP
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
{
- return tsp && tsp->state == TASK_RUNNING && !tsp->on_cpu ? "!" : "";
+ return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
}
#else // #ifdef CONFIG_SMP
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 59b95cc5cbdf..acb2288063b5 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -460,12 +460,12 @@ static void rcu_check_gp_kthread_starvation(void)
if (rcu_is_gp_kthread_starving(&j)) {
cpu = gpk ? task_cpu(gpk) : -1;
- pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
+ pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
rcu_state.name, j,
(long)rcu_seq_current(&rcu_state.gp_seq),
data_race(rcu_state.gp_flags),
gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
- gpk ? gpk->state : ~0, cpu);
+ gpk ? gpk->__state : ~0, cpu);
if (gpk) {
pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
pr_err("RCU grace-period kthread stack dump:\n");
@@ -503,12 +503,12 @@ static void rcu_check_gp_kthread_expired_fqs_timer(void)
time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
gpk && !READ_ONCE(gpk->on_rq)) {
cpu = task_cpu(gpk);
- pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx\n",
+ pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
rcu_state.name, (jiffies - jiffies_fqs),
(long)rcu_seq_current(&rcu_state.gp_seq),
data_race(rcu_state.gp_flags),
gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
- gpk->state);
+ gpk->__state);
pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
}
@@ -735,9 +735,9 @@ void show_rcu_gp_kthreads(void)
ja = j - data_race(rcu_state.gp_activity);
jr = j - data_race(rcu_state.gp_req_activity);
jw = j - data_race(rcu_state.gp_wake_time);
- pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
+ pr_info("%s: wait state: %s(%d) ->state: %#x delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
rcu_state.name, gp_state_getname(rcu_state.gp_state),
- rcu_state.gp_state, t ? t->state : 0x1ffffL,
+ rcu_state.gp_state, t ? t->__state : 0x1ffff,
ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
(long)data_race(rcu_state.gp_seq),
(long)data_race(rcu_get_root()->gp_seq_needed),
diff --git a/kernel/reboot.c b/kernel/reboot.c
index a6ad5eb2fa73..f7440c0c7e43 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "reboot: " fmt
+#include <linux/atomic.h>
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/kexec.h>
@@ -518,6 +519,84 @@ void orderly_reboot(void)
}
EXPORT_SYMBOL_GPL(orderly_reboot);
+/**
+ * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay
+ * @work: work_struct associated with the emergency poweroff function
+ *
+ * This function is called in very critical situations to force
+ * a kernel poweroff after a configurable timeout value.
+ */
+static void hw_failure_emergency_poweroff_func(struct work_struct *work)
+{
+ /*
+ * We have reached here after the emergency shutdown waiting period has
+ * expired. This means orderly_poweroff has not been able to shut off
+ * the system for some reason.
+ *
+ * Try to shut down the system immediately using kernel_power_off
+ * if populated
+ */
+ pr_emerg("Hardware protection timed-out. Trying forced poweroff\n");
+ kernel_power_off();
+
+ /*
+ * Worst of the worst case trigger emergency restart
+ */
+ pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
+ emergency_restart();
+}
+
+static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
+ hw_failure_emergency_poweroff_func);
+
+/**
+ * hw_failure_emergency_poweroff - Trigger an emergency system poweroff
+ *
+ * This may be called from any critical situation to trigger a system shutdown
+ * after a given period of time. If time is negative this is not scheduled.
+ */
+static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
+{
+ if (poweroff_delay_ms <= 0)
+ return;
+ schedule_delayed_work(&hw_failure_emergency_poweroff_work,
+ msecs_to_jiffies(poweroff_delay_ms));
+}
+
+/**
+ * hw_protection_shutdown - Trigger an emergency system poweroff
+ *
+ * @reason: Reason of emergency shutdown to be printed.
+ * @ms_until_forced: Time to wait for orderly shutdown before tiggering a
+ * forced shudown. Negative value disables the forced
+ * shutdown.
+ *
+ * Initiate an emergency system shutdown in order to protect hardware from
+ * further damage. Usage examples include a thermal protection or a voltage or
+ * current regulator failures.
+ * NOTE: The request is ignored if protection shutdown is already pending even
+ * if the previous request has given a large timeout for forced shutdown.
+ * Can be called from any context.
+ */
+void hw_protection_shutdown(const char *reason, int ms_until_forced)
+{
+ static atomic_t allow_proceed = ATOMIC_INIT(1);
+
+ pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
+
+ /* Shutdown should be initiated only once. */
+ if (!atomic_dec_and_test(&allow_proceed))
+ return;
+
+ /*
+ * Queue a backup emergency shutdown in the event of
+ * orderly_poweroff failure
+ */
+ hw_failure_emergency_poweroff(ms_until_forced);
+ orderly_poweroff(true);
+}
+EXPORT_SYMBOL_GPL(hw_protection_shutdown);
+
static int __init reboot_setup(char *str)
{
for (;;) {
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 5fc9c9b70862..978fcfca5871 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_MEMBARRIER) += membarrier.o
obj-$(CONFIG_CPU_ISOLATION) += isolation.o
obj-$(CONFIG_PSI) += psi.o
+obj-$(CONFIG_SCHED_CORE) += core_sched.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5226cc26a095..cf16f8fda9a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -84,6 +84,272 @@ unsigned int sysctl_sched_rt_period = 1000000;
__read_mostly int scheduler_running;
+#ifdef CONFIG_SCHED_CORE
+
+DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
+
+/* kernel prio, less is more */
+static inline int __task_prio(struct task_struct *p)
+{
+ if (p->sched_class == &stop_sched_class) /* trumps deadline */
+ return -2;
+
+ if (rt_prio(p->prio)) /* includes deadline */
+ return p->prio; /* [-1, 99] */
+
+ if (p->sched_class == &idle_sched_class)
+ return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
+
+ return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
+}
+
+/*
+ * l(a,b)
+ * le(a,b) := !l(b,a)
+ * g(a,b) := l(b,a)
+ * ge(a,b) := !l(a,b)
+ */
+
+/* real prio, less is less */
+static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
+{
+
+ int pa = __task_prio(a), pb = __task_prio(b);
+
+ if (-pa < -pb)
+ return true;
+
+ if (-pb < -pa)
+ return false;
+
+ if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
+ return !dl_time_before(a->dl.deadline, b->dl.deadline);
+
+ if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
+ return cfs_prio_less(a, b, in_fi);
+
+ return false;
+}
+
+static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
+{
+ if (a->core_cookie < b->core_cookie)
+ return true;
+
+ if (a->core_cookie > b->core_cookie)
+ return false;
+
+ /* flip prio, so high prio is leftmost */
+ if (prio_less(b, a, task_rq(a)->core->core_forceidle))
+ return true;
+
+ return false;
+}
+
+#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
+
+static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
+{
+ return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
+}
+
+static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
+{
+ const struct task_struct *p = __node_2_sc(node);
+ unsigned long cookie = (unsigned long)key;
+
+ if (cookie < p->core_cookie)
+ return -1;
+
+ if (cookie > p->core_cookie)
+ return 1;
+
+ return 0;
+}
+
+void sched_core_enqueue(struct rq *rq, struct task_struct *p)
+{
+ rq->core->core_task_seq++;
+
+ if (!p->core_cookie)
+ return;
+
+ rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
+}
+
+void sched_core_dequeue(struct rq *rq, struct task_struct *p)
+{
+ rq->core->core_task_seq++;
+
+ if (!sched_core_enqueued(p))
+ return;
+
+ rb_erase(&p->core_node, &rq->core_tree);
+ RB_CLEAR_NODE(&p->core_node);
+}
+
+/*
+ * Find left-most (aka, highest priority) task matching @cookie.
+ */
+static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
+{
+ struct rb_node *node;
+
+ node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
+ /*
+ * The idle task always matches any cookie!
+ */
+ if (!node)
+ return idle_sched_class.pick_task(rq);
+
+ return __node_2_sc(node);
+}
+
+static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
+{
+ struct rb_node *node = &p->core_node;
+
+ node = rb_next(node);
+ if (!node)
+ return NULL;
+
+ p = container_of(node, struct task_struct, core_node);
+ if (p->core_cookie != cookie)
+ return NULL;
+
+ return p;
+}
+
+/*
+ * Magic required such that:
+ *
+ * raw_spin_rq_lock(rq);
+ * ...
+ * raw_spin_rq_unlock(rq);
+ *
+ * ends up locking and unlocking the _same_ lock, and all CPUs
+ * always agree on what rq has what lock.
+ *
+ * XXX entirely possible to selectively enable cores, don't bother for now.
+ */
+
+static DEFINE_MUTEX(sched_core_mutex);
+static atomic_t sched_core_count;
+static struct cpumask sched_core_mask;
+
+static void __sched_core_flip(bool enabled)
+{
+ int cpu, t, i;
+
+ cpus_read_lock();
+
+ /*
+ * Toggle the online cores, one by one.
+ */
+ cpumask_copy(&sched_core_mask, cpu_online_mask);
+ for_each_cpu(cpu, &sched_core_mask) {
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+
+ i = 0;
+ local_irq_disable();
+ for_each_cpu(t, smt_mask) {
+ /* supports up to SMT8 */
+ raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
+ }
+
+ for_each_cpu(t, smt_mask)
+ cpu_rq(t)->core_enabled = enabled;
+
+ for_each_cpu(t, smt_mask)
+ raw_spin_unlock(&cpu_rq(t)->__lock);
+ local_irq_enable();
+
+ cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
+ }
+
+ /*
+ * Toggle the offline CPUs.
+ */
+ cpumask_copy(&sched_core_mask, cpu_possible_mask);
+ cpumask_andnot(&sched_core_mask, &sched_core_mask, cpu_online_mask);
+
+ for_each_cpu(cpu, &sched_core_mask)
+ cpu_rq(cpu)->core_enabled = enabled;
+
+ cpus_read_unlock();
+}
+
+static void sched_core_assert_empty(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
+}
+
+static void __sched_core_enable(void)
+{
+ static_branch_enable(&__sched_core_enabled);
+ /*
+ * Ensure all previous instances of raw_spin_rq_*lock() have finished
+ * and future ones will observe !sched_core_disabled().
+ */
+ synchronize_rcu();
+ __sched_core_flip(true);
+ sched_core_assert_empty();
+}
+
+static void __sched_core_disable(void)
+{
+ sched_core_assert_empty();
+ __sched_core_flip(false);
+ static_branch_disable(&__sched_core_enabled);
+}
+
+void sched_core_get(void)
+{
+ if (atomic_inc_not_zero(&sched_core_count))
+ return;
+
+ mutex_lock(&sched_core_mutex);
+ if (!atomic_read(&sched_core_count))
+ __sched_core_enable();
+
+ smp_mb__before_atomic();
+ atomic_inc(&sched_core_count);
+ mutex_unlock(&sched_core_mutex);
+}
+
+static void __sched_core_put(struct work_struct *work)
+{
+ if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
+ __sched_core_disable();
+ mutex_unlock(&sched_core_mutex);
+ }
+}
+
+void sched_core_put(void)
+{
+ static DECLARE_WORK(_work, __sched_core_put);
+
+ /*
+ * "There can be only one"
+ *
+ * Either this is the last one, or we don't actually need to do any
+ * 'work'. If it is the last *again*, we rely on
+ * WORK_STRUCT_PENDING_BIT.
+ */
+ if (!atomic_add_unless(&sched_core_count, -1, 1))
+ schedule_work(&_work);
+}
+
+#else /* !CONFIG_SCHED_CORE */
+
+static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
+static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { }
+
+#endif /* CONFIG_SCHED_CORE */
+
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
@@ -184,6 +450,79 @@ int sysctl_sched_rt_runtime = 950000;
*
*/
+void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
+{
+ raw_spinlock_t *lock;
+
+ /* Matches synchronize_rcu() in __sched_core_enable() */
+ preempt_disable();
+ if (sched_core_disabled()) {
+ raw_spin_lock_nested(&rq->__lock, subclass);
+ /* preempt_count *MUST* be > 1 */
+ preempt_enable_no_resched();
+ return;
+ }
+
+ for (;;) {
+ lock = __rq_lockp(rq);
+ raw_spin_lock_nested(lock, subclass);
+ if (likely(lock == __rq_lockp(rq))) {
+ /* preempt_count *MUST* be > 1 */
+ preempt_enable_no_resched();
+ return;
+ }
+ raw_spin_unlock(lock);
+ }
+}
+
+bool raw_spin_rq_trylock(struct rq *rq)
+{
+ raw_spinlock_t *lock;
+ bool ret;
+
+ /* Matches synchronize_rcu() in __sched_core_enable() */
+ preempt_disable();
+ if (sched_core_disabled()) {
+ ret = raw_spin_trylock(&rq->__lock);
+ preempt_enable();
+ return ret;
+ }
+
+ for (;;) {
+ lock = __rq_lockp(rq);
+ ret = raw_spin_trylock(lock);
+ if (!ret || (likely(lock == __rq_lockp(rq)))) {
+ preempt_enable();
+ return ret;
+ }
+ raw_spin_unlock(lock);
+ }
+}
+
+void raw_spin_rq_unlock(struct rq *rq)
+{
+ raw_spin_unlock(rq_lockp(rq));
+}
+
+#ifdef CONFIG_SMP
+/*
+ * double_rq_lock - safely lock two runqueues
+ */
+void double_rq_lock(struct rq *rq1, struct rq *rq2)
+{
+ lockdep_assert_irqs_disabled();
+
+ if (rq_order_less(rq2, rq1))
+ swap(rq1, rq2);
+
+ raw_spin_rq_lock(rq1);
+ if (__rq_lockp(rq1) == __rq_lockp(rq2))
+ return;
+
+ raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
+}
+#endif
+
/*
* __task_rq_lock - lock the rq @p resides on.
*/
@@ -196,12 +535,12 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
for (;;) {
rq = task_rq(p);
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
rq_pin_lock(rq, rf);
return rq;
}
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
while (unlikely(task_on_rq_migrating(p)))
cpu_relax();
@@ -220,7 +559,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
for (;;) {
raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
rq = task_rq(p);
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
/*
* move_queued_task() task_rq_lock()
*
@@ -242,7 +581,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
rq_pin_lock(rq, rf);
return rq;
}
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
while (unlikely(task_on_rq_migrating(p)))
@@ -312,7 +651,7 @@ void update_rq_clock(struct rq *rq)
{
s64 delta;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
if (rq->clock_update_flags & RQCF_ACT_SKIP)
return;
@@ -585,7 +924,6 @@ void wake_up_q(struct wake_q_head *head)
struct task_struct *task;
task = container_of(node, struct task_struct, wake_q);
- BUG_ON(!task);
/* Task can safely be re-inserted now: */
node = node->next;
task->wake_q.next = NULL;
@@ -611,7 +949,7 @@ void resched_curr(struct rq *rq)
struct task_struct *curr = rq->curr;
int cpu;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
if (test_tsk_need_resched(curr))
return;
@@ -635,10 +973,10 @@ void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_rq_lock_irqsave(rq, flags);
if (cpu_online(cpu) || cpu == smp_processor_id())
resched_curr(rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_rq_unlock_irqrestore(rq, flags);
}
#ifdef CONFIG_SMP
@@ -1065,9 +1403,10 @@ static void uclamp_sync_util_min_rt_default(void)
static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
{
+ /* Copy by value as we could modify it */
struct uclamp_se uc_req = p->uclamp_req[clamp_id];
#ifdef CONFIG_UCLAMP_TASK_GROUP
- struct uclamp_se uc_max;
+ unsigned int tg_min, tg_max, value;
/*
* Tasks in autogroups or root task group will be
@@ -1078,9 +1417,11 @@ uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
if (task_group(p) == &root_task_group)
return uc_req;
- uc_max = task_group(p)->uclamp[clamp_id];
- if (uc_req.value > uc_max.value || !uc_req.user_defined)
- return uc_max;
+ tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
+ tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
+ value = uc_req.value;
+ value = clamp(value, tg_min, tg_max);
+ uclamp_se_set(&uc_req, value, false);
#endif
return uc_req;
@@ -1137,7 +1478,7 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
struct uclamp_se *uc_se = &p->uclamp[clamp_id];
struct uclamp_bucket *bucket;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
/* Update task effective clamp */
p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
@@ -1177,7 +1518,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
unsigned int bkt_clamp;
unsigned int rq_clamp;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
/*
* If sched_uclamp_used was enabled after task @p was enqueued,
@@ -1279,8 +1620,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
}
static inline void
-uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
+uclamp_update_active(struct task_struct *p)
{
+ enum uclamp_id clamp_id;
struct rq_flags rf;
struct rq *rq;
@@ -1300,9 +1642,11 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
* affecting a valid clamp bucket, the next time it's enqueued,
* it will already see the updated clamp bucket value.
*/
- if (p->uclamp[clamp_id].active) {
- uclamp_rq_dec_id(rq, p, clamp_id);
- uclamp_rq_inc_id(rq, p, clamp_id);
+ for_each_clamp_id(clamp_id) {
+ if (p->uclamp[clamp_id].active) {
+ uclamp_rq_dec_id(rq, p, clamp_id);
+ uclamp_rq_inc_id(rq, p, clamp_id);
+ }
}
task_rq_unlock(rq, p, &rf);
@@ -1310,20 +1654,14 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
#ifdef CONFIG_UCLAMP_TASK_GROUP
static inline void
-uclamp_update_active_tasks(struct cgroup_subsys_state *css,
- unsigned int clamps)
+uclamp_update_active_tasks(struct cgroup_subsys_state *css)
{
- enum uclamp_id clamp_id;
struct css_task_iter it;
struct task_struct *p;
css_task_iter_start(css, 0, &it);
- while ((p = css_task_iter_next(&it))) {
- for_each_clamp_id(clamp_id) {
- if ((0x1 << clamp_id) & clamps)
- uclamp_update_active(p, clamp_id);
- }
- }
+ while ((p = css_task_iter_next(&it)))
+ uclamp_update_active(p);
css_task_iter_end(&it);
}
@@ -1590,27 +1928,38 @@ static inline void uclamp_post_fork(struct task_struct *p) { }
static inline void init_uclamp(void) { }
#endif /* CONFIG_UCLAMP_TASK */
+bool sched_task_on_rq(struct task_struct *p)
+{
+ return task_on_rq_queued(p);
+}
+
static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
if (!(flags & ENQUEUE_NOCLOCK))
update_rq_clock(rq);
if (!(flags & ENQUEUE_RESTORE)) {
- sched_info_queued(rq, p);
+ sched_info_enqueue(rq, p);
psi_enqueue(p, flags & ENQUEUE_WAKEUP);
}
uclamp_rq_inc(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
+
+ if (sched_core_enabled(rq))
+ sched_core_enqueue(rq, p);
}
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
+ if (sched_core_enabled(rq))
+ sched_core_dequeue(rq, p);
+
if (!(flags & DEQUEUE_NOCLOCK))
update_rq_clock(rq);
if (!(flags & DEQUEUE_SAVE)) {
- sched_info_dequeued(rq, p);
+ sched_info_dequeue(rq, p);
psi_dequeue(p, flags & DEQUEUE_SLEEP);
}
@@ -1850,7 +2199,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int new_cpu)
{
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
deactivate_task(rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, new_cpu);
@@ -1916,7 +2265,6 @@ static int migration_cpu_stop(void *data)
struct migration_arg *arg = data;
struct set_affinity_pending *pending = arg->pending;
struct task_struct *p = arg->task;
- int dest_cpu = arg->dest_cpu;
struct rq *rq = this_rq();
bool complete = false;
struct rq_flags rf;
@@ -1954,19 +2302,15 @@ static int migration_cpu_stop(void *data)
if (pending) {
p->migration_pending = NULL;
complete = true;
- }
- if (dest_cpu < 0) {
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
goto out;
-
- dest_cpu = cpumask_any_distribute(&p->cpus_mask);
}
if (task_on_rq_queued(p))
- rq = __migrate_task(rq, &rf, p, dest_cpu);
+ rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
else
- p->wake_cpu = dest_cpu;
+ p->wake_cpu = arg->dest_cpu;
/*
* XXX __migrate_task() can fail, at which point we might end
@@ -2024,7 +2368,7 @@ int push_cpu_stop(void *arg)
struct task_struct *p = arg;
raw_spin_lock_irq(&p->pi_lock);
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
if (task_rq(p) != rq)
goto out_unlock;
@@ -2054,7 +2398,7 @@ int push_cpu_stop(void *arg)
out_unlock:
rq->push_busy = false;
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
@@ -2107,7 +2451,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32
* Because __kthread_bind() calls this on blocked tasks without
* holding rq->lock.
*/
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
}
if (running)
@@ -2249,7 +2593,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
init_completion(&my_pending.done);
my_pending.arg = (struct migration_arg) {
.task = p,
- .dest_cpu = -1, /* any */
+ .dest_cpu = dest_cpu,
.pending = &my_pending,
};
@@ -2257,6 +2601,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
} else {
pending = p->migration_pending;
refcount_inc(&pending->refs);
+ /*
+ * Affinity has changed, but we've already installed a
+ * pending. migration_cpu_stop() *must* see this, else
+ * we risk a completion of the pending despite having a
+ * task on a disallowed CPU.
+ *
+ * Serialized by p->pi_lock, so this is safe.
+ */
+ pending->arg.dest_cpu = dest_cpu;
}
}
pending = p->migration_pending;
@@ -2277,7 +2630,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
return -EINVAL;
}
- if (task_running(rq, p) || p->state == TASK_WAKING) {
+ if (task_running(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
/*
* MIGRATE_ENABLE gets here because 'p == current', but for
* anything else we cannot do is_migration_disabled(), punt
@@ -2420,19 +2773,20 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
+ unsigned int state = READ_ONCE(p->__state);
+
/*
* We should never call set_task_cpu() on a blocked task,
* ttwu() will sort out the placement.
*/
- WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
- !p->on_rq);
+ WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
/*
* Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
* because schedstat_wait_{start,end} rebase migrating task's wait_start
* time relying on p->on_rq.
*/
- WARN_ON_ONCE(p->state == TASK_RUNNING &&
+ WARN_ON_ONCE(state == TASK_RUNNING &&
p->sched_class == &fair_sched_class &&
(p->on_rq && !task_on_rq_migrating(p)));
@@ -2448,7 +2802,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
* task_rq_lock().
*/
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
- lockdep_is_held(&task_rq(p)->lock)));
+ lockdep_is_held(__rq_lockp(task_rq(p)))));
#endif
/*
* Clearly, migrating tasks to offline CPUs is a fairly daft thing.
@@ -2604,7 +2958,7 @@ out:
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
-unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
{
int running, queued;
struct rq_flags rf;
@@ -2632,7 +2986,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
- if (match_state && unlikely(p->state != match_state))
+ if (match_state && unlikely(READ_ONCE(p->__state) != match_state))
return 0;
cpu_relax();
}
@@ -2647,7 +3001,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
- if (!match_state || p->state == match_state)
+ if (!match_state || READ_ONCE(p->__state) == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
@@ -2956,7 +3310,7 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
struct rq_flags *rf)
{
check_preempt_curr(rq, p, wake_flags);
- p->state = TASK_RUNNING;
+ WRITE_ONCE(p->__state, TASK_RUNNING);
trace_sched_wakeup(p);
#ifdef CONFIG_SMP
@@ -2979,6 +3333,9 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
if (rq->avg_idle > max)
rq->avg_idle = max;
+ rq->wake_stamp = jiffies;
+ rq->wake_avg_idle = rq->avg_idle / 2;
+
rq->idle_stamp = 0;
}
#endif
@@ -2990,7 +3347,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
{
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
if (p->sched_contributes_to_load)
rq->nr_uninterruptible--;
@@ -3345,12 +3702,12 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* - we're serialized against set_special_state() by virtue of
* it disabling IRQs (this allows not taking ->pi_lock).
*/
- if (!(p->state & state))
+ if (!(READ_ONCE(p->__state) & state))
goto out;
success = 1;
trace_sched_waking(p);
- p->state = TASK_RUNNING;
+ WRITE_ONCE(p->__state, TASK_RUNNING);
trace_sched_wakeup(p);
goto out;
}
@@ -3363,7 +3720,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
- if (!(p->state & state))
+ if (!(READ_ONCE(p->__state) & state))
goto unlock;
trace_sched_waking(p);
@@ -3429,7 +3786,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* TASK_WAKING such that we can unlock p->pi_lock before doing the
* enqueue, such as ttwu_queue_wakelist().
*/
- p->state = TASK_WAKING;
+ WRITE_ONCE(p->__state, TASK_WAKING);
/*
* If the owning (remote) CPU is still in the middle of schedule() with
@@ -3522,7 +3879,7 @@ bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct t
ret = func(p, arg);
rq_unlock(rq, &rf);
} else {
- switch (p->state) {
+ switch (READ_ONCE(p->__state)) {
case TASK_RUNNING:
case TASK_WAKING:
break;
@@ -3648,7 +4005,6 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
#ifdef CONFIG_SCHEDSTATS
DEFINE_STATIC_KEY_FALSE(sched_schedstats);
-static bool __initdata __sched_schedstats = false;
static void set_schedstats(bool enabled)
{
@@ -3672,16 +4028,11 @@ static int __init setup_schedstats(char *str)
if (!str)
goto out;
- /*
- * This code is called before jump labels have been set up, so we can't
- * change the static branch directly just yet. Instead set a temporary
- * variable so init_schedstats() can do it later.
- */
if (!strcmp(str, "enable")) {
- __sched_schedstats = true;
+ set_schedstats(true);
ret = 1;
} else if (!strcmp(str, "disable")) {
- __sched_schedstats = false;
+ set_schedstats(false);
ret = 1;
}
out:
@@ -3692,11 +4043,6 @@ out:
}
__setup("schedstats=", setup_schedstats);
-static void __init init_schedstats(void)
-{
- set_schedstats(__sched_schedstats);
-}
-
#ifdef CONFIG_PROC_SYSCTL
int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
@@ -3718,8 +4064,6 @@ int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
return err;
}
#endif /* CONFIG_PROC_SYSCTL */
-#else /* !CONFIG_SCHEDSTATS */
-static inline void init_schedstats(void) {}
#endif /* CONFIG_SCHEDSTATS */
/*
@@ -3735,7 +4079,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
- p->state = TASK_NEW;
+ p->__state = TASK_NEW;
/*
* Make sure we do not leak PI boosting priority to the child.
@@ -3841,7 +4185,7 @@ void wake_up_new_task(struct task_struct *p)
struct rq *rq;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
- p->state = TASK_RUNNING;
+ WRITE_ONCE(p->__state, TASK_RUNNING);
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
@@ -4001,7 +4345,7 @@ static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
void (*func)(struct rq *rq);
struct callback_head *next;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
while (head) {
func = (void (*)(struct rq *))head->func;
@@ -4024,7 +4368,7 @@ static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
{
struct callback_head *head = rq->balance_callback;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
if (head)
rq->balance_callback = NULL;
@@ -4041,9 +4385,9 @@ static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
unsigned long flags;
if (unlikely(head)) {
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_rq_lock_irqsave(rq, flags);
do_balance_callbacks(rq, head);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_rq_unlock_irqrestore(rq, flags);
}
}
@@ -4074,10 +4418,10 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf
* do an early lockdep release here:
*/
rq_unpin_lock(rq, rf);
- spin_release(&rq->lock.dep_map, _THIS_IP_);
+ spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
- rq->lock.owner = next;
+ rq_lockp(rq)->owner = next;
#endif
}
@@ -4088,9 +4432,9 @@ static inline void finish_lock_switch(struct rq *rq)
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+ spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
__balance_callbacks(rq);
- raw_spin_unlock_irq(&rq->lock);
+ raw_spin_rq_unlock_irq(rq);
}
/*
@@ -4203,10 +4547,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
- prev_state = prev->state;
+ prev_state = READ_ONCE(prev->__state);
vtime_task_switch(prev);
perf_event_task_sched_in(prev, current);
finish_task(prev);
+ tick_nohz_task_switch();
finish_lock_switch(rq);
finish_arch_post_lock_switch();
kcov_finish_switch(current);
@@ -4252,7 +4597,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
put_task_struct_rcu_user(prev);
}
- tick_nohz_task_switch();
return rq;
}
@@ -4348,9 +4692,9 @@ context_switch(struct rq *rq, struct task_struct *prev,
* externally visible scheduler statistics: current number of runnable
* threads, total number of context switches performed since bootup.
*/
-unsigned long nr_running(void)
+unsigned int nr_running(void)
{
- unsigned long i, sum = 0;
+ unsigned int i, sum = 0;
for_each_online_cpu(i)
sum += cpu_rq(i)->nr_running;
@@ -4395,7 +4739,7 @@ unsigned long long nr_context_switches(void)
* it does become runnable.
*/
-unsigned long nr_iowait_cpu(int cpu)
+unsigned int nr_iowait_cpu(int cpu)
{
return atomic_read(&cpu_rq(cpu)->nr_iowait);
}
@@ -4430,9 +4774,9 @@ unsigned long nr_iowait_cpu(int cpu)
* Task CPU affinities can make all that even more 'interesting'.
*/
-unsigned long nr_iowait(void)
+unsigned int nr_iowait(void)
{
- unsigned long i, sum = 0;
+ unsigned int i, sum = 0;
for_each_possible_cpu(i)
sum += nr_iowait_cpu(i);
@@ -4897,7 +5241,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- if (!preempt && prev->state && prev->non_block_count) {
+ if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
prev->comm, prev->pid, prev->non_block_count);
dump_stack();
@@ -4943,7 +5287,7 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
* Pick up the highest-prio task:
*/
static inline struct task_struct *
-pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
const struct sched_class *class;
struct task_struct *p;
@@ -4961,7 +5305,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (unlikely(p == RETRY_TASK))
goto restart;
- /* Assumes fair_sched_class->next == idle_sched_class */
+ /* Assume the next prioritized class is idle_sched_class */
if (!p) {
put_prev_task(rq, prev);
p = pick_next_task_idle(rq);
@@ -4983,6 +5327,455 @@ restart:
BUG();
}
+#ifdef CONFIG_SCHED_CORE
+static inline bool is_task_rq_idle(struct task_struct *t)
+{
+ return (task_rq(t)->idle == t);
+}
+
+static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
+{
+ return is_task_rq_idle(a) || (a->core_cookie == cookie);
+}
+
+static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
+{
+ if (is_task_rq_idle(a) || is_task_rq_idle(b))
+ return true;
+
+ return a->core_cookie == b->core_cookie;
+}
+
+// XXX fairness/fwd progress conditions
+/*
+ * Returns
+ * - NULL if there is no runnable task for this class.
+ * - the highest priority task for this runqueue if it matches
+ * rq->core->core_cookie or its priority is greater than max.
+ * - Else returns idle_task.
+ */
+static struct task_struct *
+pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *max, bool in_fi)
+{
+ struct task_struct *class_pick, *cookie_pick;
+ unsigned long cookie = rq->core->core_cookie;
+
+ class_pick = class->pick_task(rq);
+ if (!class_pick)
+ return NULL;
+
+ if (!cookie) {
+ /*
+ * If class_pick is tagged, return it only if it has
+ * higher priority than max.
+ */
+ if (max && class_pick->core_cookie &&
+ prio_less(class_pick, max, in_fi))
+ return idle_sched_class.pick_task(rq);
+
+ return class_pick;
+ }
+
+ /*
+ * If class_pick is idle or matches cookie, return early.
+ */
+ if (cookie_equals(class_pick, cookie))
+ return class_pick;
+
+ cookie_pick = sched_core_find(rq, cookie);
+
+ /*
+ * If class > max && class > cookie, it is the highest priority task on
+ * the core (so far) and it must be selected, otherwise we must go with
+ * the cookie pick in order to satisfy the constraint.
+ */
+ if (prio_less(cookie_pick, class_pick, in_fi) &&
+ (!max || prio_less(max, class_pick, in_fi)))
+ return class_pick;
+
+ return cookie_pick;
+}
+
+extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
+
+static struct task_struct *
+pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+ struct task_struct *next, *max = NULL;
+ const struct sched_class *class;
+ const struct cpumask *smt_mask;
+ bool fi_before = false;
+ int i, j, cpu, occ = 0;
+ bool need_sync;
+
+ if (!sched_core_enabled(rq))
+ return __pick_next_task(rq, prev, rf);
+
+ cpu = cpu_of(rq);
+
+ /* Stopper task is switching into idle, no need core-wide selection. */
+ if (cpu_is_offline(cpu)) {
+ /*
+ * Reset core_pick so that we don't enter the fastpath when
+ * coming online. core_pick would already be migrated to
+ * another cpu during offline.
+ */
+ rq->core_pick = NULL;
+ return __pick_next_task(rq, prev, rf);
+ }
+
+ /*
+ * If there were no {en,de}queues since we picked (IOW, the task
+ * pointers are all still valid), and we haven't scheduled the last
+ * pick yet, do so now.
+ *
+ * rq->core_pick can be NULL if no selection was made for a CPU because
+ * it was either offline or went offline during a sibling's core-wide
+ * selection. In this case, do a core-wide selection.
+ */
+ if (rq->core->core_pick_seq == rq->core->core_task_seq &&
+ rq->core->core_pick_seq != rq->core_sched_seq &&
+ rq->core_pick) {
+ WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
+
+ next = rq->core_pick;
+ if (next != prev) {
+ put_prev_task(rq, prev);
+ set_next_task(rq, next);
+ }
+
+ rq->core_pick = NULL;
+ return next;
+ }
+
+ put_prev_task_balance(rq, prev, rf);
+
+ smt_mask = cpu_smt_mask(cpu);
+ need_sync = !!rq->core->core_cookie;
+
+ /* reset state */
+ rq->core->core_cookie = 0UL;
+ if (rq->core->core_forceidle) {
+ need_sync = true;
+ fi_before = true;
+ rq->core->core_forceidle = false;
+ }
+
+ /*
+ * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
+ *
+ * @task_seq guards the task state ({en,de}queues)
+ * @pick_seq is the @task_seq we did a selection on
+ * @sched_seq is the @pick_seq we scheduled
+ *
+ * However, preemptions can cause multiple picks on the same task set.
+ * 'Fix' this by also increasing @task_seq for every pick.
+ */
+ rq->core->core_task_seq++;
+
+ /*
+ * Optimize for common case where this CPU has no cookies
+ * and there are no cookied tasks running on siblings.
+ */
+ if (!need_sync) {
+ for_each_class(class) {
+ next = class->pick_task(rq);
+ if (next)
+ break;
+ }
+
+ if (!next->core_cookie) {
+ rq->core_pick = NULL;
+ /*
+ * For robustness, update the min_vruntime_fi for
+ * unconstrained picks as well.
+ */
+ WARN_ON_ONCE(fi_before);
+ task_vruntime_update(rq, next, false);
+ goto done;
+ }
+ }
+
+ for_each_cpu(i, smt_mask) {
+ struct rq *rq_i = cpu_rq(i);
+
+ rq_i->core_pick = NULL;
+
+ if (i != cpu)
+ update_rq_clock(rq_i);
+ }
+
+ /*
+ * Try and select tasks for each sibling in descending sched_class
+ * order.
+ */
+ for_each_class(class) {
+again:
+ for_each_cpu_wrap(i, smt_mask, cpu) {
+ struct rq *rq_i = cpu_rq(i);
+ struct task_struct *p;
+
+ if (rq_i->core_pick)
+ continue;
+
+ /*
+ * If this sibling doesn't yet have a suitable task to
+ * run; ask for the most eligible task, given the
+ * highest priority task already selected for this
+ * core.
+ */
+ p = pick_task(rq_i, class, max, fi_before);
+ if (!p)
+ continue;
+
+ if (!is_task_rq_idle(p))
+ occ++;
+
+ rq_i->core_pick = p;
+ if (rq_i->idle == p && rq_i->nr_running) {
+ rq->core->core_forceidle = true;
+ if (!fi_before)
+ rq->core->core_forceidle_seq++;
+ }
+
+ /*
+ * If this new candidate is of higher priority than the
+ * previous; and they're incompatible; we need to wipe
+ * the slate and start over. pick_task makes sure that
+ * p's priority is more than max if it doesn't match
+ * max's cookie.
+ *
+ * NOTE: this is a linear max-filter and is thus bounded
+ * in execution time.
+ */
+ if (!max || !cookie_match(max, p)) {
+ struct task_struct *old_max = max;
+
+ rq->core->core_cookie = p->core_cookie;
+ max = p;
+
+ if (old_max) {
+ rq->core->core_forceidle = false;
+ for_each_cpu(j, smt_mask) {
+ if (j == i)
+ continue;
+
+ cpu_rq(j)->core_pick = NULL;
+ }
+ occ = 1;
+ goto again;
+ }
+ }
+ }
+ }
+
+ rq->core->core_pick_seq = rq->core->core_task_seq;
+ next = rq->core_pick;
+ rq->core_sched_seq = rq->core->core_pick_seq;
+
+ /* Something should have been selected for current CPU */
+ WARN_ON_ONCE(!next);
+
+ /*
+ * Reschedule siblings
+ *
+ * NOTE: L1TF -- at this point we're no longer running the old task and
+ * sending an IPI (below) ensures the sibling will no longer be running
+ * their task. This ensures there is no inter-sibling overlap between
+ * non-matching user state.
+ */
+ for_each_cpu(i, smt_mask) {
+ struct rq *rq_i = cpu_rq(i);
+
+ /*
+ * An online sibling might have gone offline before a task
+ * could be picked for it, or it might be offline but later
+ * happen to come online, but its too late and nothing was
+ * picked for it. That's Ok - it will pick tasks for itself,
+ * so ignore it.
+ */
+ if (!rq_i->core_pick)
+ continue;
+
+ /*
+ * Update for new !FI->FI transitions, or if continuing to be in !FI:
+ * fi_before fi update?
+ * 0 0 1
+ * 0 1 1
+ * 1 0 1
+ * 1 1 0
+ */
+ if (!(fi_before && rq->core->core_forceidle))
+ task_vruntime_update(rq_i, rq_i->core_pick, rq->core->core_forceidle);
+
+ rq_i->core_pick->core_occupation = occ;
+
+ if (i == cpu) {
+ rq_i->core_pick = NULL;
+ continue;
+ }
+
+ /* Did we break L1TF mitigation requirements? */
+ WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
+
+ if (rq_i->curr == rq_i->core_pick) {
+ rq_i->core_pick = NULL;
+ continue;
+ }
+
+ resched_curr(rq_i);
+ }
+
+done:
+ set_next_task(rq, next);
+ return next;
+}
+
+static bool try_steal_cookie(int this, int that)
+{
+ struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
+ struct task_struct *p;
+ unsigned long cookie;
+ bool success = false;
+
+ local_irq_disable();
+ double_rq_lock(dst, src);
+
+ cookie = dst->core->core_cookie;
+ if (!cookie)
+ goto unlock;
+
+ if (dst->curr != dst->idle)
+ goto unlock;
+
+ p = sched_core_find(src, cookie);
+ if (p == src->idle)
+ goto unlock;
+
+ do {
+ if (p == src->core_pick || p == src->curr)
+ goto next;
+
+ if (!cpumask_test_cpu(this, &p->cpus_mask))
+ goto next;
+
+ if (p->core_occupation > dst->idle->core_occupation)
+ goto next;
+
+ p->on_rq = TASK_ON_RQ_MIGRATING;
+ deactivate_task(src, p, 0);
+ set_task_cpu(p, this);
+ activate_task(dst, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+
+ resched_curr(dst);
+
+ success = true;
+ break;
+
+next:
+ p = sched_core_next(p, cookie);
+ } while (p);
+
+unlock:
+ double_rq_unlock(dst, src);
+ local_irq_enable();
+
+ return success;
+}
+
+static bool steal_cookie_task(int cpu, struct sched_domain *sd)
+{
+ int i;
+
+ for_each_cpu_wrap(i, sched_domain_span(sd), cpu) {
+ if (i == cpu)
+ continue;
+
+ if (need_resched())
+ break;
+
+ if (try_steal_cookie(cpu, i))
+ return true;
+ }
+
+ return false;
+}
+
+static void sched_core_balance(struct rq *rq)
+{
+ struct sched_domain *sd;
+ int cpu = cpu_of(rq);
+
+ preempt_disable();
+ rcu_read_lock();
+ raw_spin_rq_unlock_irq(rq);
+ for_each_domain(cpu, sd) {
+ if (need_resched())
+ break;
+
+ if (steal_cookie_task(cpu, sd))
+ break;
+ }
+ raw_spin_rq_lock_irq(rq);
+ rcu_read_unlock();
+ preempt_enable();
+}
+
+static DEFINE_PER_CPU(struct callback_head, core_balance_head);
+
+void queue_core_balance(struct rq *rq)
+{
+ if (!sched_core_enabled(rq))
+ return;
+
+ if (!rq->core->core_cookie)
+ return;
+
+ if (!rq->nr_running) /* not forced idle */
+ return;
+
+ queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
+}
+
+static inline void sched_core_cpu_starting(unsigned int cpu)
+{
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+ struct rq *rq, *core_rq = NULL;
+ int i;
+
+ core_rq = cpu_rq(cpu)->core;
+
+ if (!core_rq) {
+ for_each_cpu(i, smt_mask) {
+ rq = cpu_rq(i);
+ if (rq->core && rq->core == rq)
+ core_rq = rq;
+ }
+
+ if (!core_rq)
+ core_rq = cpu_rq(cpu);
+
+ for_each_cpu(i, smt_mask) {
+ rq = cpu_rq(i);
+
+ WARN_ON_ONCE(rq->core && rq->core != core_rq);
+ rq->core = core_rq;
+ }
+ }
+}
+#else /* !CONFIG_SCHED_CORE */
+
+static inline void sched_core_cpu_starting(unsigned int cpu) {}
+
+static struct task_struct *
+pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+ return __pick_next_task(rq, prev, rf);
+}
+
+#endif /* CONFIG_SCHED_CORE */
+
/*
* __schedule() is the main scheduler function.
*
@@ -5074,10 +5867,10 @@ static void __sched notrace __schedule(bool preempt)
* - we form a control dependency vs deactivate_task() below.
* - ptrace_{,un}freeze_traced() can change ->state underneath us.
*/
- prev_state = prev->state;
+ prev_state = READ_ONCE(prev->__state);
if (!preempt && prev_state) {
if (signal_pending_state(prev_state, prev)) {
- prev->state = TASK_RUNNING;
+ WRITE_ONCE(prev->__state, TASK_RUNNING);
} else {
prev->sched_contributes_to_load =
(prev_state & TASK_UNINTERRUPTIBLE) &&
@@ -5150,7 +5943,7 @@ static void __sched notrace __schedule(bool preempt)
rq_unpin_lock(rq, &rf);
__balance_callbacks(rq);
- raw_spin_unlock_irq(&rq->lock);
+ raw_spin_rq_unlock_irq(rq);
}
}
@@ -5174,7 +5967,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
{
unsigned int task_flags;
- if (!tsk->state)
+ if (task_is_running(tsk))
return;
task_flags = tsk->flags;
@@ -5249,7 +6042,7 @@ void __sched schedule_idle(void)
* current task can be in any other state. Note, idle is always in the
* TASK_RUNNING state.
*/
- WARN_ON_ONCE(current->state);
+ WARN_ON_ONCE(current->__state);
do {
__schedule(false);
} while (need_resched());
@@ -5692,7 +6485,7 @@ out_unlock:
rq_unpin_lock(rq, &rf);
__balance_callbacks(rq);
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
preempt_enable();
}
@@ -6389,7 +7182,6 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, false, true);
}
-EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
@@ -7149,7 +7941,7 @@ again:
if (curr->sched_class != p->sched_class)
goto out_unlock;
- if (task_running(p_rq, p) || p->state)
+ if (task_running(p_rq, p) || !task_is_running(p))
goto out_unlock;
yielded = curr->sched_class->yield_to_task(rq, p);
@@ -7352,7 +8144,7 @@ void sched_show_task(struct task_struct *p)
pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
pr_cont(" running task ");
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
@@ -7376,26 +8168,28 @@ EXPORT_SYMBOL_GPL(sched_show_task);
static inline bool
state_filter_match(unsigned long state_filter, struct task_struct *p)
{
+ unsigned int state = READ_ONCE(p->__state);
+
/* no filter, everything matches */
if (!state_filter)
return true;
/* filter, but doesn't match */
- if (!(p->state & state_filter))
+ if (!(state & state_filter))
return false;
/*
* When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
* TASK_KILLABLE).
*/
- if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
+ if (state_filter == TASK_UNINTERRUPTIBLE && state == TASK_IDLE)
return false;
return true;
}
-void show_state_filter(unsigned long state_filter)
+void show_state_filter(unsigned int state_filter)
{
struct task_struct *g, *p;
@@ -7434,19 +8228,32 @@ void show_state_filter(unsigned long state_filter)
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
-void init_idle(struct task_struct *idle, int cpu)
+void __init init_idle(struct task_struct *idle, int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
__sched_fork(0, idle);
+ /*
+ * The idle task doesn't need the kthread struct to function, but it
+ * is dressed up as a per-CPU kthread and thus needs to play the part
+ * if we want to avoid special-casing it in code that deals with per-CPU
+ * kthreads.
+ */
+ set_kthread_struct(idle);
+
raw_spin_lock_irqsave(&idle->pi_lock, flags);
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
- idle->state = TASK_RUNNING;
+ idle->__state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
- idle->flags |= PF_IDLE;
+ /*
+ * PF_KTHREAD should already be set at this point; regardless, make it
+ * look like a proper per-CPU kthread.
+ */
+ idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
+ kthread_set_per_cpu(idle, cpu);
scs_task_reset(idle);
kasan_unpoison_task_stack(idle);
@@ -7480,7 +8287,7 @@ void init_idle(struct task_struct *idle, int cpu)
#ifdef CONFIG_SMP
idle->on_cpu = 1;
#endif
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
@@ -7646,7 +8453,7 @@ static void balance_push(struct rq *rq)
{
struct task_struct *push_task = rq->curr;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
SCHED_WARN_ON(rq->cpu != smp_processor_id());
/*
@@ -7663,12 +8470,8 @@ static void balance_push(struct rq *rq)
/*
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
- *
- * XXX: the idle task does not match kthread_is_per_cpu() due to
- * histerical raisins.
*/
- if (rq->idle == push_task ||
- kthread_is_per_cpu(push_task) ||
+ if (kthread_is_per_cpu(push_task) ||
is_migration_disabled(push_task)) {
/*
@@ -7684,9 +8487,9 @@ static void balance_push(struct rq *rq)
*/
if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
rcuwait_active(&rq->hotplug_wait)) {
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
rcuwait_wake_up(&rq->hotplug_wait);
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
}
return;
}
@@ -7696,7 +8499,7 @@ static void balance_push(struct rq *rq)
* Temporarily drop rq->lock such that we can wake-up the stop task.
* Both preemption and IRQs are still disabled.
*/
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
this_cpu_ptr(&push_work));
/*
@@ -7704,7 +8507,7 @@ static void balance_push(struct rq *rq)
* schedule(). The next pick is obviously going to be the stop task
* which kthread_is_per_cpu() and will push this task away.
*/
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
}
static void balance_push_set(int cpu, bool on)
@@ -7948,6 +8751,7 @@ static void sched_rq_cpu_starting(unsigned int cpu)
int sched_cpu_starting(unsigned int cpu)
{
+ sched_core_cpu_starting(cpu);
sched_rq_cpu_starting(cpu);
sched_tick_start(cpu);
return 0;
@@ -7994,7 +8798,7 @@ static void dump_rq_tasks(struct rq *rq, const char *loglvl)
struct task_struct *g, *p;
int cpu = cpu_of(rq);
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
for_each_process_thread(g, p) {
@@ -8046,6 +8850,7 @@ void __init sched_init_smp(void)
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
BUG();
+ current->flags &= ~PF_NO_SETAFFINITY;
sched_init_granularity();
init_sched_rt_class();
@@ -8167,7 +8972,7 @@ void __init sched_init(void)
struct rq *rq;
rq = cpu_rq(i);
- raw_spin_lock_init(&rq->lock);
+ raw_spin_lock_init(&rq->__lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -8215,6 +9020,8 @@ void __init sched_init(void)
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
+ rq->wake_stamp = jiffies;
+ rq->wake_avg_idle = rq->avg_idle;
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
INIT_LIST_HEAD(&rq->cfs_tasks);
@@ -8232,6 +9039,16 @@ void __init sched_init(void)
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
atomic_set(&rq->nr_iowait, 0);
+
+#ifdef CONFIG_SCHED_CORE
+ rq->core = NULL;
+ rq->core_pick = NULL;
+ rq->core_enabled = 0;
+ rq->core_tree = RB_ROOT;
+ rq->core_forceidle = false;
+
+ rq->core_cookie = 0UL;
+#endif
}
set_load_weight(&init_task, false);
@@ -8258,8 +9075,6 @@ void __init sched_init(void)
#endif
init_sched_fair_class();
- init_schedstats();
-
psi_init();
init_uclamp();
@@ -8277,15 +9092,15 @@ static inline int preempt_count_equals(int preempt_offset)
void __might_sleep(const char *file, int line, int preempt_offset)
{
+ unsigned int state = get_current_state();
/*
* Blocking primitives will set (and therefore destroy) current->state,
* since we will exit with TASK_RUNNING make sure we enter with it,
* otherwise we will destroy state.
*/
- WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
+ WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
"do not call blocking ops when !TASK_RUNNING; "
- "state=%lx set at [<%p>] %pS\n",
- current->state,
+ "state=%x set at [<%p>] %pS\n", state,
(void *)current->task_state_change,
(void *)current->task_state_change);
@@ -8681,7 +9496,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
#ifdef CONFIG_UCLAMP_TASK_GROUP
/* Propagate the effective uclamp value for the new group */
+ mutex_lock(&uclamp_mutex);
+ rcu_read_lock();
cpu_util_update_eff(css);
+ rcu_read_unlock();
+ mutex_unlock(&uclamp_mutex);
#endif
return 0;
@@ -8742,7 +9561,7 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
* has happened. This would lead to problems with PELT, due to
* move wanting to detach+attach while we're not attached yet.
*/
- if (task->state == TASK_NEW)
+ if (READ_ONCE(task->__state) == TASK_NEW)
ret = -EINVAL;
raw_spin_unlock_irq(&task->pi_lock);
@@ -8771,6 +9590,9 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
enum uclamp_id clamp_id;
unsigned int clamps;
+ lockdep_assert_held(&uclamp_mutex);
+ SCHED_WARN_ON(!rcu_read_lock_held());
+
css_for_each_descendant_pre(css, top_css) {
uc_parent = css_tg(css)->parent
? css_tg(css)->parent->uclamp : NULL;
@@ -8803,7 +9625,7 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
}
/* Immediately update descendants RUNNABLE tasks */
- uclamp_update_active_tasks(css, clamps);
+ uclamp_update_active_tasks(css);
}
}
@@ -8962,7 +9784,8 @@ static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
-static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
+static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
+ u64 burst)
{
int i, ret = 0, runtime_enabled, runtime_was_enabled;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
@@ -8992,6 +9815,10 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
if (quota != RUNTIME_INF && quota > max_cfs_runtime)
return -EINVAL;
+ if (quota != RUNTIME_INF && (burst > quota ||
+ burst + quota > max_cfs_runtime))
+ return -EINVAL;
+
/*
* Prevent race between setting of cfs_rq->runtime_enabled and
* unthrottle_offline_cfs_rqs().
@@ -9013,6 +9840,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
+ cfs_b->burst = burst;
__refill_cfs_bandwidth_runtime(cfs_b);
@@ -9046,9 +9874,10 @@ out_unlock:
static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
{
- u64 quota, period;
+ u64 quota, period, burst;
period = ktime_to_ns(tg->cfs_bandwidth.period);
+ burst = tg->cfs_bandwidth.burst;
if (cfs_quota_us < 0)
quota = RUNTIME_INF;
else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
@@ -9056,7 +9885,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
else
return -EINVAL;
- return tg_set_cfs_bandwidth(tg, period, quota);
+ return tg_set_cfs_bandwidth(tg, period, quota, burst);
}
static long tg_get_cfs_quota(struct task_group *tg)
@@ -9074,15 +9903,16 @@ static long tg_get_cfs_quota(struct task_group *tg)
static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
{
- u64 quota, period;
+ u64 quota, period, burst;
if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
period = (u64)cfs_period_us * NSEC_PER_USEC;
quota = tg->cfs_bandwidth.quota;
+ burst = tg->cfs_bandwidth.burst;
- return tg_set_cfs_bandwidth(tg, period, quota);
+ return tg_set_cfs_bandwidth(tg, period, quota, burst);
}
static long tg_get_cfs_period(struct task_group *tg)
@@ -9095,6 +9925,30 @@ static long tg_get_cfs_period(struct task_group *tg)
return cfs_period_us;
}
+static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
+{
+ u64 quota, period, burst;
+
+ if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
+ return -EINVAL;
+
+ burst = (u64)cfs_burst_us * NSEC_PER_USEC;
+ period = ktime_to_ns(tg->cfs_bandwidth.period);
+ quota = tg->cfs_bandwidth.quota;
+
+ return tg_set_cfs_bandwidth(tg, period, quota, burst);
+}
+
+static long tg_get_cfs_burst(struct task_group *tg)
+{
+ u64 burst_us;
+
+ burst_us = tg->cfs_bandwidth.burst;
+ do_div(burst_us, NSEC_PER_USEC);
+
+ return burst_us;
+}
+
static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
@@ -9119,6 +9973,18 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
return tg_set_cfs_period(css_tg(css), cfs_period_us);
}
+static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ return tg_get_cfs_burst(css_tg(css));
+}
+
+static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 cfs_burst_us)
+{
+ return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
+}
+
struct cfs_schedulable_data {
struct task_group *tg;
u64 period, quota;
@@ -9272,6 +10138,11 @@ static struct cftype cpu_legacy_files[] = {
.write_u64 = cpu_cfs_period_write_u64,
},
{
+ .name = "cfs_burst_us",
+ .read_u64 = cpu_cfs_burst_read_u64,
+ .write_u64 = cpu_cfs_burst_write_u64,
+ },
+ {
.name = "stat",
.seq_show = cpu_cfs_stat_show,
},
@@ -9436,12 +10307,13 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
{
struct task_group *tg = css_tg(of_css(of));
u64 period = tg_get_cfs_period(tg);
+ u64 burst = tg_get_cfs_burst(tg);
u64 quota;
int ret;
ret = cpu_period_quota_parse(buf, &period, &quota);
if (!ret)
- ret = tg_set_cfs_bandwidth(tg, period, quota);
+ ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
return ret ?: nbytes;
}
#endif
@@ -9468,6 +10340,12 @@ static struct cftype cpu_files[] = {
.seq_show = cpu_max_show,
.write = cpu_max_write,
},
+ {
+ .name = "max.burst",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .read_u64 = cpu_cfs_burst_read_u64,
+ .write_u64 = cpu_cfs_burst_write_u64,
+ },
#endif
#ifdef CONFIG_UCLAMP_TASK_GROUP
{
diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
new file mode 100644
index 000000000000..9a80e9a474c0
--- /dev/null
+++ b/kernel/sched/core_sched.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/prctl.h>
+#include "sched.h"
+
+/*
+ * A simple wrapper around refcount. An allocated sched_core_cookie's
+ * address is used to compute the cookie of the task.
+ */
+struct sched_core_cookie {
+ refcount_t refcnt;
+};
+
+unsigned long sched_core_alloc_cookie(void)
+{
+ struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL);
+ if (!ck)
+ return 0;
+
+ refcount_set(&ck->refcnt, 1);
+ sched_core_get();
+
+ return (unsigned long)ck;
+}
+
+void sched_core_put_cookie(unsigned long cookie)
+{
+ struct sched_core_cookie *ptr = (void *)cookie;
+
+ if (ptr && refcount_dec_and_test(&ptr->refcnt)) {
+ kfree(ptr);
+ sched_core_put();
+ }
+}
+
+unsigned long sched_core_get_cookie(unsigned long cookie)
+{
+ struct sched_core_cookie *ptr = (void *)cookie;
+
+ if (ptr)
+ refcount_inc(&ptr->refcnt);
+
+ return cookie;
+}
+
+/*
+ * sched_core_update_cookie - replace the cookie on a task
+ * @p: the task to update
+ * @cookie: the new cookie
+ *
+ * Effectively exchange the task cookie; caller is responsible for lifetimes on
+ * both ends.
+ *
+ * Returns: the old cookie
+ */
+unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie)
+{
+ unsigned long old_cookie;
+ struct rq_flags rf;
+ struct rq *rq;
+ bool enqueued;
+
+ rq = task_rq_lock(p, &rf);
+
+ /*
+ * Since creating a cookie implies sched_core_get(), and we cannot set
+ * a cookie until after we've created it, similarly, we cannot destroy
+ * a cookie until after we've removed it, we must have core scheduling
+ * enabled here.
+ */
+ SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq));
+
+ enqueued = sched_core_enqueued(p);
+ if (enqueued)
+ sched_core_dequeue(rq, p);
+
+ old_cookie = p->core_cookie;
+ p->core_cookie = cookie;
+
+ if (enqueued)
+ sched_core_enqueue(rq, p);
+
+ /*
+ * If task is currently running, it may not be compatible anymore after
+ * the cookie change, so enter the scheduler on its CPU to schedule it
+ * away.
+ */
+ if (task_running(rq, p))
+ resched_curr(rq);
+
+ task_rq_unlock(rq, p, &rf);
+
+ return old_cookie;
+}
+
+static unsigned long sched_core_clone_cookie(struct task_struct *p)
+{
+ unsigned long cookie, flags;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ cookie = sched_core_get_cookie(p->core_cookie);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ return cookie;
+}
+
+void sched_core_fork(struct task_struct *p)
+{
+ RB_CLEAR_NODE(&p->core_node);
+ p->core_cookie = sched_core_clone_cookie(current);
+}
+
+void sched_core_free(struct task_struct *p)
+{
+ sched_core_put_cookie(p->core_cookie);
+}
+
+static void __sched_core_set(struct task_struct *p, unsigned long cookie)
+{
+ cookie = sched_core_get_cookie(cookie);
+ cookie = sched_core_update_cookie(p, cookie);
+ sched_core_put_cookie(cookie);
+}
+
+/* Called from prctl interface: PR_SCHED_CORE */
+int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
+ unsigned long uaddr)
+{
+ unsigned long cookie = 0, id = 0;
+ struct task_struct *task, *p;
+ struct pid *grp;
+ int err = 0;
+
+ if (!static_branch_likely(&sched_smt_present))
+ return -ENODEV;
+
+ if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 ||
+ (cmd != PR_SCHED_CORE_GET && uaddr))
+ return -EINVAL;
+
+ rcu_read_lock();
+ if (pid == 0) {
+ task = current;
+ } else {
+ task = find_task_by_vpid(pid);
+ if (!task) {
+ rcu_read_unlock();
+ return -ESRCH;
+ }
+ }
+ get_task_struct(task);
+ rcu_read_unlock();
+
+ /*
+ * Check if this process has the right to modify the specified
+ * process. Use the regular "ptrace_may_access()" checks.
+ */
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ switch (cmd) {
+ case PR_SCHED_CORE_GET:
+ if (type != PIDTYPE_PID || uaddr & 7) {
+ err = -EINVAL;
+ goto out;
+ }
+ cookie = sched_core_clone_cookie(task);
+ if (cookie) {
+ /* XXX improve ? */
+ ptr_to_hashval((void *)cookie, &id);
+ }
+ err = put_user(id, (u64 __user *)uaddr);
+ goto out;
+
+ case PR_SCHED_CORE_CREATE:
+ cookie = sched_core_alloc_cookie();
+ if (!cookie) {
+ err = -ENOMEM;
+ goto out;
+ }
+ break;
+
+ case PR_SCHED_CORE_SHARE_TO:
+ cookie = sched_core_clone_cookie(current);
+ break;
+
+ case PR_SCHED_CORE_SHARE_FROM:
+ if (type != PIDTYPE_PID) {
+ err = -EINVAL;
+ goto out;
+ }
+ cookie = sched_core_clone_cookie(task);
+ __sched_core_set(current, cookie);
+ goto out;
+
+ default:
+ err = -EINVAL;
+ goto out;
+ };
+
+ if (type == PIDTYPE_PID) {
+ __sched_core_set(task, cookie);
+ goto out;
+ }
+
+ read_lock(&tasklist_lock);
+ grp = task_pid_type(task, type);
+
+ do_each_pid_thread(grp, type, p) {
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) {
+ err = -EPERM;
+ goto out_tasklist;
+ }
+ } while_each_pid_thread(grp, type, p);
+
+ do_each_pid_thread(grp, type, p) {
+ __sched_core_set(p, cookie);
+ } while_each_pid_thread(grp, type, p);
+out_tasklist:
+ read_unlock(&tasklist_lock);
+
+out:
+ sched_core_put_cookie(cookie);
+ put_task_struct(task);
+ return err;
+}
+
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index 104a1bade14f..893eece65bfd 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -112,7 +112,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
- raw_spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_rq_lock_irq(cpu_rq(cpu));
#endif
if (index == CPUACCT_STAT_NSTATS) {
@@ -126,7 +126,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
}
#ifndef CONFIG_64BIT
- raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_rq_unlock_irq(cpu_rq(cpu));
#endif
return data;
@@ -141,14 +141,14 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
- raw_spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_rq_lock_irq(cpu_rq(cpu));
#endif
for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
cpuusage->usages[i] = val;
#ifndef CONFIG_64BIT
- raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_rq_unlock_irq(cpu_rq(cpu));
#endif
}
@@ -253,13 +253,13 @@ static int cpuacct_all_seq_show(struct seq_file *m, void *V)
* Take rq->lock to make 64-bit read safe on 32-bit
* platforms.
*/
- raw_spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_rq_lock_irq(cpu_rq(cpu));
#endif
seq_printf(m, " %llu", cpuusage->usages[index]);
#ifndef CONFIG_64BIT
- raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_rq_unlock_irq(cpu_rq(cpu));
#endif
}
seq_puts(m, "\n");
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 4f09afd2f321..57124614363d 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -151,6 +151,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
+ util = map_util_perf(util);
freq = map_util_freq(util, freq, max);
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9a2989749b8d..aaacd6cfd42f 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -157,7 +157,7 @@ void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
- lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
+ lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
dl_rq->running_bw += dl_bw;
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
@@ -170,7 +170,7 @@ void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
- lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
+ lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
dl_rq->running_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
if (dl_rq->running_bw > old)
@@ -184,7 +184,7 @@ void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
- lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
+ lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
dl_rq->this_bw += dl_bw;
SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
}
@@ -194,7 +194,7 @@ void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
- lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
+ lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
dl_rq->this_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
if (dl_rq->this_bw > old)
@@ -348,10 +348,10 @@ static void task_non_contending(struct task_struct *p)
if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
if (dl_task(p))
sub_running_bw(dl_se, dl_rq);
- if (!dl_task(p) || p->state == TASK_DEAD) {
+ if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
- if (p->state == TASK_DEAD)
+ if (READ_ONCE(p->__state) == TASK_DEAD)
sub_rq_bw(&p->dl, &rq->dl);
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
@@ -987,7 +987,7 @@ static int start_dl_timer(struct task_struct *p)
ktime_t now, act;
s64 delta;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
/*
* We want the timer to fire at the deadline, but considering
@@ -1097,9 +1097,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
* If the runqueue is no longer available, migrate the
* task elsewhere. This necessarily changes rq.
*/
- lockdep_unpin_lock(&rq->lock, rf.cookie);
+ lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
rq = dl_task_offline_migration(rq, p);
- rf.cookie = lockdep_pin_lock(&rq->lock);
+ rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
update_rq_clock(rq);
/*
@@ -1355,10 +1355,10 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
sched_clock_tick();
update_rq_clock(rq);
- if (!dl_task(p) || p->state == TASK_DEAD) {
+ if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
- if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
+ if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
dl_se->dl_non_contending = 0;
@@ -1722,7 +1722,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
{
struct rq *rq;
- if (p->state != TASK_WAKING)
+ if (READ_ONCE(p->__state) != TASK_WAKING)
return;
rq = task_rq(p);
@@ -1731,7 +1731,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
* from try_to_wake_up(). Hence, p->pi_lock is locked, but
* rq->lock is not... So, lock it
*/
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
if (p->dl.dl_non_contending) {
sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
@@ -1746,7 +1746,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
put_task_struct(p);
}
sub_rq_bw(&p->dl, &rq->dl);
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
}
static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
@@ -1852,7 +1852,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
return rb_entry(left, struct sched_dl_entity, rb_node);
}
-static struct task_struct *pick_next_task_dl(struct rq *rq)
+static struct task_struct *pick_task_dl(struct rq *rq)
{
struct sched_dl_entity *dl_se;
struct dl_rq *dl_rq = &rq->dl;
@@ -1864,7 +1864,18 @@ static struct task_struct *pick_next_task_dl(struct rq *rq)
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
p = dl_task_of(dl_se);
- set_next_task_dl(rq, p, true);
+
+ return p;
+}
+
+static struct task_struct *pick_next_task_dl(struct rq *rq)
+{
+ struct task_struct *p;
+
+ p = pick_task_dl(rq);
+ if (p)
+ set_next_task_dl(rq, p, true);
+
return p;
}
@@ -2291,10 +2302,10 @@ skip:
double_unlock_balance(this_rq, src_rq);
if (push_task) {
- raw_spin_unlock(&this_rq->lock);
+ raw_spin_rq_unlock(this_rq);
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
push_task, &src_rq->push_work);
- raw_spin_lock(&this_rq->lock);
+ raw_spin_rq_lock(this_rq);
}
}
@@ -2486,6 +2497,8 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
check_preempt_curr_dl(rq, p, 0);
else
resched_curr(rq);
+ } else {
+ update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
}
}
@@ -2539,6 +2552,7 @@ DEFINE_SCHED_CLASS(dl) = {
#ifdef CONFIG_SMP
.balance = balance_dl,
+ .pick_task = pick_task_dl,
.select_task_rq = select_task_rq_dl,
.migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 9c882f20803e..0c5ec2776ddf 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -576,7 +576,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_rq_lock_irqsave(rq, flags);
if (rb_first_cached(&cfs_rq->tasks_timeline))
MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
@@ -584,7 +584,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_rq_unlock_irqrestore(rq, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
@@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = {
#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
#define __P(F) __PS(#F, F)
#define P(F) __PS(#F, p->F)
+#define PM(F, M) __PS(#F, p->F & (M))
#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
#define __PN(F) __PSN(#F, F)
#define PN(F) __PSN(#F, p->F)
@@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_avg);
P(se.avg.last_update_time);
P(se.avg.util_est.ewma);
- P(se.avg.util_est.enqueued);
+ PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
#endif
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3248e24a90b0..fb469b26b00a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -268,33 +268,11 @@ const struct sched_class fair_sched_class;
*/
#ifdef CONFIG_FAIR_GROUP_SCHED
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
- SCHED_WARN_ON(!entity_is_task(se));
- return container_of(se, struct task_struct, se);
-}
/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
for (; se; se = se->parent)
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
-{
- return p->se.cfs_rq;
-}
-
-/* runqueue on which this entity is (to be) queued */
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
-{
- return se->cfs_rq;
-}
-
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
- return grp->my_q;
-}
-
static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
{
if (!path)
@@ -455,33 +433,9 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
#else /* !CONFIG_FAIR_GROUP_SCHED */
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
- return container_of(se, struct task_struct, se);
-}
-
#define for_each_sched_entity(se) \
for (; se; se = NULL)
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
-{
- return &task_rq(p)->cfs;
-}
-
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
-{
- struct task_struct *p = task_of(se);
- struct rq *rq = task_rq(p);
-
- return &rq->cfs;
-}
-
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
- return NULL;
-}
-
static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
{
if (path)
@@ -1039,11 +993,14 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
+ unsigned int state;
- if (tsk->state & TASK_INTERRUPTIBLE)
+ /* XXX racy against TTWU */
+ state = READ_ONCE(tsk->__state);
+ if (state & TASK_INTERRUPTIBLE)
__schedstat_set(se->statistics.sleep_start,
rq_clock(rq_of(cfs_rq)));
- if (tsk->state & TASK_UNINTERRUPTIBLE)
+ if (state & TASK_UNINTERRUPTIBLE)
__schedstat_set(se->statistics.block_start,
rq_clock(rq_of(cfs_rq)));
}
@@ -1107,7 +1064,7 @@ struct numa_group {
static struct numa_group *deref_task_numa_group(struct task_struct *p)
{
return rcu_dereference_check(p->numa_group, p == current ||
- (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
+ (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
}
static struct numa_group *deref_curr_numa_group(struct task_struct *p)
@@ -3139,7 +3096,7 @@ void reweight_task(struct task_struct *p, int prio)
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- (1)
- * \Sum grq->load.weight
+ * \Sum grq->load.weight
*
* Now, because computing that sum is prohibitively expensive to compute (been
* there, done that) we approximate it with this average stuff. The average
@@ -3153,7 +3110,7 @@ void reweight_task(struct task_struct *p, int prio)
*
* tg->weight * grq->avg.load_avg
* ge->load.weight = ------------------------------ (3)
- * tg->load_avg
+ * tg->load_avg
*
* Where: tg->load_avg ~= \Sum grq->avg.load_avg
*
@@ -3169,7 +3126,7 @@ void reweight_task(struct task_struct *p, int prio)
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- = tg->weight (4)
- * grp->load.weight
+ * grp->load.weight
*
* That is, the sum collapses because all other CPUs are idle; the UP scenario.
*
@@ -3188,7 +3145,7 @@ void reweight_task(struct task_struct *p, int prio)
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- (6)
- * tg_load_avg'
+ * tg_load_avg'
*
* Where:
*
@@ -3298,6 +3255,61 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
#ifdef CONFIG_SMP
#ifdef CONFIG_FAIR_GROUP_SCHED
+/*
+ * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
+ * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
+ * bottom-up, we only have to test whether the cfs_rq before us on the list
+ * is our child.
+ * If cfs_rq is not on the list, test whether a child needs its to be added to
+ * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details).
+ */
+static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
+{
+ struct cfs_rq *prev_cfs_rq;
+ struct list_head *prev;
+
+ if (cfs_rq->on_list) {
+ prev = cfs_rq->leaf_cfs_rq_list.prev;
+ } else {
+ struct rq *rq = rq_of(cfs_rq);
+
+ prev = rq->tmp_alone_branch;
+ }
+
+ prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
+
+ return (prev_cfs_rq->tg->parent == cfs_rq->tg);
+}
+
+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+{
+ if (cfs_rq->load.weight)
+ return false;
+
+ if (cfs_rq->avg.load_sum)
+ return false;
+
+ if (cfs_rq->avg.util_sum)
+ return false;
+
+ if (cfs_rq->avg.runnable_sum)
+ return false;
+
+ if (child_cfs_rq_on_list(cfs_rq))
+ return false;
+
+ /*
+ * _avg must be null when _sum are null because _avg = _sum / divider
+ * Make sure that rounding and/or propagation of PELT values never
+ * break this.
+ */
+ SCHED_WARN_ON(cfs_rq->avg.load_avg ||
+ cfs_rq->avg.util_avg ||
+ cfs_rq->avg.runnable_avg);
+
+ return true;
+}
+
/**
* update_tg_load_avg - update the tg's load avg
* @cfs_rq: the cfs_rq whose avg changed
@@ -3499,10 +3511,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+ long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg;
u64 load_sum = 0;
- s64 delta_sum;
u32 divider;
if (!runnable_sum)
@@ -3549,13 +3560,16 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, divider);
- delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
- delta_avg = load_avg - se->avg.load_avg;
-
se->avg.load_sum = runnable_sum;
+
+ delta = load_avg - se->avg.load_avg;
+ if (!delta)
+ return;
+
se->avg.load_avg = load_avg;
- add_positive(&cfs_rq->avg.load_avg, delta_avg);
- add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+ add_positive(&cfs_rq->avg.load_avg, delta);
+ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3671,15 +3685,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
r = removed_load;
sub_positive(&sa->load_avg, r);
- sub_positive(&sa->load_sum, r * divider);
+ sa->load_sum = sa->load_avg * divider;
r = removed_util;
sub_positive(&sa->util_avg, r);
- sub_positive(&sa->util_sum, r * divider);
+ sa->util_sum = sa->util_avg * divider;
r = removed_runnable;
sub_positive(&sa->runnable_avg, r);
- sub_positive(&sa->runnable_sum, r * divider);
+ sa->runnable_sum = sa->runnable_avg * divider;
/*
* removed_runnable is the unweighted version of removed_load so we
@@ -3766,11 +3780,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ /*
+ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
+ * See ___update_load_avg() for details.
+ */
+ u32 divider = get_pelt_divider(&cfs_rq->avg);
+
dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
- sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+ cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
- sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+ cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
@@ -3902,7 +3922,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
{
struct util_est ue = READ_ONCE(p->se.avg.util_est);
- return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
+ return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
}
static inline unsigned long task_util_est(struct task_struct *p)
@@ -4002,7 +4022,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
* Reset EWMA on utilization increases, the moving average is used only
* to smooth utilization decreases.
*/
- ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
+ ue.enqueued = task_util(p);
if (sched_feat(UTIL_EST_FASTUP)) {
if (ue.ewma < ue.enqueued) {
ue.ewma = ue.enqueued;
@@ -4051,6 +4071,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
+ ue.enqueued |= UTIL_AVG_UNCHANGED;
WRITE_ONCE(p->se.avg.util_est, ue);
trace_sched_util_est_se_tp(&p->se);
@@ -4085,6 +4106,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
#else /* CONFIG_SMP */
+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+{
+ return true;
+}
+
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
#define DO_ATTACH 0x0
@@ -4419,6 +4445,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ clear_buddies(cfs_rq, se);
+
/* 'current' is not kept within the tree. */
if (se->on_rq) {
/*
@@ -4478,7 +4506,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
* Avoid running the skip buddy, if running something else can
* be done without getting too unfair.
*/
- if (cfs_rq->skip == se) {
+ if (cfs_rq->skip && cfs_rq->skip == se) {
struct sched_entity *second;
if (se == curr) {
@@ -4505,8 +4533,6 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
se = cfs_rq->last;
}
- clear_buddies(cfs_rq, se);
-
return se;
}
@@ -4628,8 +4654,11 @@ static inline u64 sched_cfs_bandwidth_slice(void)
*/
void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
{
- if (cfs_b->quota != RUNTIME_INF)
- cfs_b->runtime = cfs_b->quota;
+ if (unlikely(cfs_b->quota == RUNTIME_INF))
+ return;
+
+ cfs_b->runtime += cfs_b->quota;
+ cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
}
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -4743,8 +4772,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
cfs_rq->throttled_clock_task;
- /* Add cfs_rq with already running entity in the list */
- if (cfs_rq->nr_running >= 1)
+ /* Add cfs_rq with load or one or more already running entities to the list */
+ if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
list_add_leaf_cfs_rq(cfs_rq);
}
@@ -4990,6 +5019,9 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b->nr_periods += overrun;
+ /* Refill extra burst quota even if cfs_b->idle */
+ __refill_cfs_bandwidth_runtime(cfs_b);
+
/*
* idle depends on !throttled (for the case of a large deficit), and if
* we're going inactive then everything else can be deferred
@@ -4997,8 +5029,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u
if (cfs_b->idle && !throttled)
goto out_deactivate;
- __refill_cfs_bandwidth_runtime(cfs_b);
-
if (!throttled) {
/* mark as potentially idle for the upcoming period */
cfs_b->idle = 1;
@@ -5248,6 +5278,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
if (new < max_cfs_quota_period) {
cfs_b->period = ns_to_ktime(new);
cfs_b->quota *= 2;
+ cfs_b->burst *= 2;
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
@@ -5279,6 +5310,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b->runtime = 0;
cfs_b->quota = RUNTIME_INF;
cfs_b->period = ns_to_ktime(default_cfs_period());
+ cfs_b->burst = 0;
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
@@ -5328,7 +5360,7 @@ static void __maybe_unused update_runtime_enabled(struct rq *rq)
{
struct task_group *tg;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
@@ -5347,7 +5379,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
{
struct task_group *tg;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
@@ -5935,11 +5967,15 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
/* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
+ struct rq *rq = cpu_rq(i);
+
+ if (!sched_core_cookie_match(rq, p))
+ continue;
+
if (sched_idle_cpu(i))
return i;
if (available_idle_cpu(i)) {
- struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
if (idle && idle->exit_latency < min_exit_latency) {
/*
@@ -6025,9 +6061,10 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
return new_cpu;
}
-static inline int __select_idle_cpu(int cpu)
+static inline int __select_idle_cpu(int cpu, struct task_struct *p)
{
- if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
+ if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) &&
+ sched_cpu_cookie_match(cpu_rq(cpu), p))
return cpu;
return -1;
@@ -6097,7 +6134,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
int cpu;
if (!static_branch_likely(&sched_smt_present))
- return __select_idle_cpu(core);
+ return __select_idle_cpu(core, p);
for_each_cpu(cpu, cpu_smt_mask(core)) {
if (!available_idle_cpu(cpu)) {
@@ -6153,7 +6190,7 @@ static inline bool test_idle_cores(int cpu, bool def)
static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
{
- return __select_idle_cpu(core);
+ return __select_idle_cpu(core, p);
}
static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
@@ -6172,9 +6209,10 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
int i, cpu, idle_cpu = -1, nr = INT_MAX;
+ struct rq *this_rq = this_rq();
int this = smp_processor_id();
struct sched_domain *this_sd;
- u64 time;
+ u64 time = 0;
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
if (!this_sd)
@@ -6184,12 +6222,21 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
if (sched_feat(SIS_PROP) && !has_idle_core) {
u64 avg_cost, avg_idle, span_avg;
+ unsigned long now = jiffies;
/*
- * Due to large variance we need a large fuzz factor;
- * hackbench in particularly is sensitive here.
+ * If we're busy, the assumption that the last idle period
+ * predicts the future is flawed; age away the remaining
+ * predicted idle time.
*/
- avg_idle = this_rq()->avg_idle / 512;
+ if (unlikely(this_rq->wake_stamp < now)) {
+ while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
+ this_rq->wake_stamp++;
+ this_rq->wake_avg_idle >>= 1;
+ }
+ }
+
+ avg_idle = this_rq->wake_avg_idle;
avg_cost = this_sd->avg_scan_cost + 1;
span_avg = sd->span_weight * avg_idle;
@@ -6210,7 +6257,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
} else {
if (!--nr)
return -1;
- idle_cpu = __select_idle_cpu(cpu);
+ idle_cpu = __select_idle_cpu(cpu, p);
if ((unsigned int)idle_cpu < nr_cpumask_bits)
break;
}
@@ -6221,6 +6268,13 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
if (sched_feat(SIS_PROP) && !has_idle_core) {
time = cpu_clock(this) - time;
+
+ /*
+ * Account for the scan cost of wakeups against the average
+ * idle time.
+ */
+ this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
+
update_avg(&this_sd->avg_scan_cost, time);
}
@@ -6288,6 +6342,11 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
task_util = uclamp_task_util(p);
}
+ /*
+ * per-cpu select_idle_mask usage
+ */
+ lockdep_assert_irqs_disabled();
+
if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
asym_fits_capacity(task_util, target))
return target;
@@ -6563,8 +6622,11 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
struct cpumask *pd_mask = perf_domain_span(pd);
unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
unsigned long max_util = 0, sum_util = 0;
+ unsigned long _cpu_cap = cpu_cap;
int cpu;
+ _cpu_cap -= arch_scale_thermal_pressure(cpumask_first(pd_mask));
+
/*
* The capacity state of CPUs of the current rd can be driven by CPUs
* of another rd if they belong to the same pd. So, account for the
@@ -6600,8 +6662,10 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
* is already enough to scale the EM reported power
* consumption at the (eventually clamped) cpu_capacity.
*/
- sum_util += effective_cpu_util(cpu, util_running, cpu_cap,
- ENERGY_UTIL, NULL);
+ cpu_util = effective_cpu_util(cpu, util_running, cpu_cap,
+ ENERGY_UTIL, NULL);
+
+ sum_util += min(cpu_util, _cpu_cap);
/*
* Performance domain frequency: utilization clamping
@@ -6612,10 +6676,10 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
*/
cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap,
FREQUENCY_UTIL, tsk);
- max_util = max(max_util, cpu_util);
+ max_util = max(max_util, min(cpu_util, _cpu_cap));
}
- return em_cpu_energy(pd->em_pd, max_util, sum_util);
+ return em_cpu_energy(pd->em_pd, max_util, sum_util, _cpu_cap);
}
/*
@@ -6661,15 +6725,15 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
{
unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+ int cpu, best_energy_cpu = prev_cpu, target = -1;
unsigned long cpu_cap, util, base_energy = 0;
- int cpu, best_energy_cpu = prev_cpu;
struct sched_domain *sd;
struct perf_domain *pd;
rcu_read_lock();
pd = rcu_dereference(rd->pd);
if (!pd || READ_ONCE(rd->overutilized))
- goto fail;
+ goto unlock;
/*
* Energy-aware wake-up happens on the lowest sched_domain starting
@@ -6679,7 +6743,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
sd = sd->parent;
if (!sd)
- goto fail;
+ goto unlock;
+
+ target = prev_cpu;
sync_entity_load_avg(&p->se);
if (!task_util_est(p))
@@ -6687,13 +6753,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
for (; pd; pd = pd->next) {
unsigned long cur_delta, spare_cap, max_spare_cap = 0;
+ bool compute_prev_delta = false;
unsigned long base_energy_pd;
int max_spare_cap_cpu = -1;
- /* Compute the 'base' energy of the pd, without @p */
- base_energy_pd = compute_energy(p, -1, pd);
- base_energy += base_energy_pd;
-
for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
continue;
@@ -6714,26 +6777,40 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
if (!fits_capacity(util, cpu_cap))
continue;
- /* Always use prev_cpu as a candidate. */
if (cpu == prev_cpu) {
- prev_delta = compute_energy(p, prev_cpu, pd);
- prev_delta -= base_energy_pd;
- best_delta = min(best_delta, prev_delta);
- }
-
- /*
- * Find the CPU with the maximum spare capacity in
- * the performance domain
- */
- if (spare_cap > max_spare_cap) {
+ /* Always use prev_cpu as a candidate. */
+ compute_prev_delta = true;
+ } else if (spare_cap > max_spare_cap) {
+ /*
+ * Find the CPU with the maximum spare capacity
+ * in the performance domain.
+ */
max_spare_cap = spare_cap;
max_spare_cap_cpu = cpu;
}
}
- /* Evaluate the energy impact of using this CPU. */
- if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) {
+ if (max_spare_cap_cpu < 0 && !compute_prev_delta)
+ continue;
+
+ /* Compute the 'base' energy of the pd, without @p */
+ base_energy_pd = compute_energy(p, -1, pd);
+ base_energy += base_energy_pd;
+
+ /* Evaluate the energy impact of using prev_cpu. */
+ if (compute_prev_delta) {
+ prev_delta = compute_energy(p, prev_cpu, pd);
+ if (prev_delta < base_energy_pd)
+ goto unlock;
+ prev_delta -= base_energy_pd;
+ best_delta = min(best_delta, prev_delta);
+ }
+
+ /* Evaluate the energy impact of using max_spare_cap_cpu. */
+ if (max_spare_cap_cpu >= 0) {
cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
+ if (cur_delta < base_energy_pd)
+ goto unlock;
cur_delta -= base_energy_pd;
if (cur_delta < best_delta) {
best_delta = cur_delta;
@@ -6741,25 +6818,22 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
}
}
}
-unlock:
rcu_read_unlock();
/*
* Pick the best CPU if prev_cpu cannot be used, or if it saves at
* least 6% of the energy used by prev_cpu.
*/
- if (prev_delta == ULONG_MAX)
- return best_energy_cpu;
-
- if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4))
- return best_energy_cpu;
+ if ((prev_delta == ULONG_MAX) ||
+ (prev_delta - best_delta) > ((prev_delta + base_energy) >> 4))
+ target = best_energy_cpu;
- return prev_cpu;
+ return target;
-fail:
+unlock:
rcu_read_unlock();
- return -1;
+ return target;
}
/*
@@ -6771,8 +6845,6 @@ fail:
* certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
*
* Returns the target CPU number.
- *
- * preempt must be disabled.
*/
static int
select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
@@ -6785,6 +6857,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
+ /*
+ * required for stable ->cpus_allowed
+ */
+ lockdep_assert_held(&p->pi_lock);
if (wake_flags & WF_TTWU) {
record_wakee(p);
@@ -6849,7 +6925,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
* min_vruntime -- the latter is done by enqueue_entity() when placing
* the task on the new runqueue.
*/
- if (p->state == TASK_WAKING) {
+ if (READ_ONCE(p->__state) == TASK_WAKING) {
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 min_vruntime;
@@ -6874,7 +6950,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
* In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
* rq->lock and can modify state directly.
*/
- lockdep_assert_held(&task_rq(p)->lock);
+ lockdep_assert_rq_held(task_rq(p));
detach_entity_cfs_rq(&p->se);
} else {
@@ -7078,6 +7154,39 @@ preempt:
set_last_buddy(se);
}
+#ifdef CONFIG_SMP
+static struct task_struct *pick_task_fair(struct rq *rq)
+{
+ struct sched_entity *se;
+ struct cfs_rq *cfs_rq;
+
+again:
+ cfs_rq = &rq->cfs;
+ if (!cfs_rq->nr_running)
+ return NULL;
+
+ do {
+ struct sched_entity *curr = cfs_rq->curr;
+
+ /* When we pick for a remote RQ, we'll not have done put_prev_entity() */
+ if (curr) {
+ if (curr->on_rq)
+ update_curr(cfs_rq);
+ else
+ curr = NULL;
+
+ if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+ goto again;
+ }
+
+ se = pick_next_entity(cfs_rq, curr);
+ cfs_rq = group_cfs_rq(se);
+ } while (cfs_rq);
+
+ return task_of(se);
+}
+#endif
+
struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
@@ -7501,7 +7610,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
{
s64 delta;
- lockdep_assert_held(&env->src_rq->lock);
+ lockdep_assert_rq_held(env->src_rq);
if (p->sched_class != &fair_sched_class)
return 0;
@@ -7523,6 +7632,14 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (sysctl_sched_migration_cost == -1)
return 1;
+
+ /*
+ * Don't migrate task if the task's cookie does not match
+ * with the destination CPU's core cookie.
+ */
+ if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
+ return 1;
+
if (sysctl_sched_migration_cost == 0)
return 0;
@@ -7599,7 +7716,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
- lockdep_assert_held(&env->src_rq->lock);
+ lockdep_assert_rq_held(env->src_rq);
/*
* We do not migrate tasks that are:
@@ -7688,7 +7805,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
*/
static void detach_task(struct task_struct *p, struct lb_env *env)
{
- lockdep_assert_held(&env->src_rq->lock);
+ lockdep_assert_rq_held(env->src_rq);
deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, env->dst_cpu);
@@ -7704,7 +7821,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
{
struct task_struct *p;
- lockdep_assert_held(&env->src_rq->lock);
+ lockdep_assert_rq_held(env->src_rq);
list_for_each_entry_reverse(p,
&env->src_rq->cfs_tasks, se.group_node) {
@@ -7740,7 +7857,7 @@ static int detach_tasks(struct lb_env *env)
struct task_struct *p;
int detached = 0;
- lockdep_assert_held(&env->src_rq->lock);
+ lockdep_assert_rq_held(env->src_rq);
/*
* Source run queue has been emptied by another CPU, clear
@@ -7870,7 +7987,7 @@ next:
*/
static void attach_task(struct rq *rq, struct task_struct *p)
{
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
BUG_ON(task_rq(p) != rq);
activate_task(rq, p, ENQUEUE_NOCLOCK);
@@ -7990,23 +8107,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
#ifdef CONFIG_FAIR_GROUP_SCHED
-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-{
- if (cfs_rq->load.weight)
- return false;
-
- if (cfs_rq->avg.load_sum)
- return false;
-
- if (cfs_rq->avg.util_sum)
- return false;
-
- if (cfs_rq->avg.runnable_sum)
- return false;
-
- return true;
-}
-
static bool __update_blocked_fair(struct rq *rq, bool *done)
{
struct cfs_rq *cfs_rq, *pos;
@@ -8030,7 +8130,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
- update_load_avg(cfs_rq_of(se), se, 0);
+ update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
@@ -8853,6 +8953,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
p->cpus_ptr))
continue;
+ /* Skip over this group if no cookie matched */
+ if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
+ continue;
+
local_group = cpumask_test_cpu(this_cpu,
sched_group_span(group));
@@ -9781,7 +9885,7 @@ more_balance:
if (need_active_balance(&env)) {
unsigned long flags;
- raw_spin_lock_irqsave(&busiest->lock, flags);
+ raw_spin_rq_lock_irqsave(busiest, flags);
/*
* Don't kick the active_load_balance_cpu_stop,
@@ -9789,8 +9893,7 @@ more_balance:
* moved to this_cpu:
*/
if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
- raw_spin_unlock_irqrestore(&busiest->lock,
- flags);
+ raw_spin_rq_unlock_irqrestore(busiest, flags);
goto out_one_pinned;
}
@@ -9807,7 +9910,7 @@ more_balance:
busiest->push_cpu = this_cpu;
active_balance = 1;
}
- raw_spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_rq_unlock_irqrestore(busiest, flags);
if (active_balance) {
stop_one_cpu_nowait(cpu_of(busiest),
@@ -10592,6 +10695,14 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
u64 curr_cost = 0;
update_misfit_status(NULL, this_rq);
+
+ /*
+ * There is a task waiting to run. No need to search for one.
+ * Return 0; the task will be enqueued when switching to idle.
+ */
+ if (this_rq->ttwu_pending)
+ return 0;
+
/*
* We must set idle_stamp _before_ calling idle_balance(), such that we
* measure the duration of idle_balance() as idle time.
@@ -10624,7 +10735,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
goto out;
}
- raw_spin_unlock(&this_rq->lock);
+ raw_spin_rq_unlock(this_rq);
update_blocked_averages(this_cpu);
rcu_read_lock();
@@ -10657,12 +10768,13 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
* Stop searching for tasks to pull if there are
* now runnable tasks on this rq.
*/
- if (pulled_task || this_rq->nr_running > 0)
+ if (pulled_task || this_rq->nr_running > 0 ||
+ this_rq->ttwu_pending)
break;
}
rcu_read_unlock();
- raw_spin_lock(&this_rq->lock);
+ raw_spin_rq_lock(this_rq);
if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost;
@@ -10755,6 +10867,119 @@ static void rq_offline_fair(struct rq *rq)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_CORE
+static inline bool
+__entity_slice_used(struct sched_entity *se, int min_nr_tasks)
+{
+ u64 slice = sched_slice(cfs_rq_of(se), se);
+ u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+
+ return (rtime * min_nr_tasks > slice);
+}
+
+#define MIN_NR_TASKS_DURING_FORCEIDLE 2
+static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
+{
+ if (!sched_core_enabled(rq))
+ return;
+
+ /*
+ * If runqueue has only one task which used up its slice and
+ * if the sibling is forced idle, then trigger schedule to
+ * give forced idle task a chance.
+ *
+ * sched_slice() considers only this active rq and it gets the
+ * whole slice. But during force idle, we have siblings acting
+ * like a single runqueue and hence we need to consider runnable
+ * tasks on this CPU and the forced idle CPU. Ideally, we should
+ * go through the forced idle rq, but that would be a perf hit.
+ * We can assume that the forced idle CPU has at least
+ * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
+ * if we need to give up the CPU.
+ */
+ if (rq->core->core_forceidle && rq->cfs.nr_running == 1 &&
+ __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
+ resched_curr(rq);
+}
+
+/*
+ * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
+ */
+static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
+{
+ for_each_sched_entity(se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ if (forceidle) {
+ if (cfs_rq->forceidle_seq == fi_seq)
+ break;
+ cfs_rq->forceidle_seq = fi_seq;
+ }
+
+ cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
+ }
+}
+
+void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
+{
+ struct sched_entity *se = &p->se;
+
+ if (p->sched_class != &fair_sched_class)
+ return;
+
+ se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
+}
+
+bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
+{
+ struct rq *rq = task_rq(a);
+ struct sched_entity *sea = &a->se;
+ struct sched_entity *seb = &b->se;
+ struct cfs_rq *cfs_rqa;
+ struct cfs_rq *cfs_rqb;
+ s64 delta;
+
+ SCHED_WARN_ON(task_rq(b)->core != rq->core);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ /*
+ * Find an se in the hierarchy for tasks a and b, such that the se's
+ * are immediate siblings.
+ */
+ while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
+ int sea_depth = sea->depth;
+ int seb_depth = seb->depth;
+
+ if (sea_depth >= seb_depth)
+ sea = parent_entity(sea);
+ if (sea_depth <= seb_depth)
+ seb = parent_entity(seb);
+ }
+
+ se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
+ se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
+
+ cfs_rqa = sea->cfs_rq;
+ cfs_rqb = seb->cfs_rq;
+#else
+ cfs_rqa = &task_rq(a)->cfs;
+ cfs_rqb = &task_rq(b)->cfs;
+#endif
+
+ /*
+ * Find delta after normalizing se's vruntime with its cfs_rq's
+ * min_vruntime_fi, which would have been updated in prior calls
+ * to se_fi_update().
+ */
+ delta = (s64)(sea->vruntime - seb->vruntime) +
+ (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
+
+ return delta > 0;
+}
+#else
+static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
+#endif
+
/*
* scheduler tick hitting a task of our scheduling class.
*
@@ -10778,6 +11003,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_misfit_status(curr, rq);
update_overutilized_status(task_rq(curr));
+
+ task_tick_core(rq, curr);
}
/*
@@ -10863,7 +11090,7 @@ static inline bool vruntime_normalized(struct task_struct *p)
* waiting for actually being woken up by sched_ttwu_pending().
*/
if (!se->sum_exec_runtime ||
- (p->state == TASK_WAKING && p->sched_remote_wakeup))
+ (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup))
return true;
return false;
@@ -11149,9 +11376,9 @@ void unregister_fair_sched_group(struct task_group *tg)
rq = cpu_rq(cpu);
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_rq_lock_irqsave(rq, flags);
list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_rq_unlock_irqrestore(rq, flags);
}
}
@@ -11273,6 +11500,7 @@ DEFINE_SCHED_CLASS(fair) = {
#ifdef CONFIG_SMP
.balance = balance_fair,
+ .pick_task = pick_task_fair,
.select_task_rq = select_task_rq_fair,
.migrate_task_rq = migrate_task_rq_fair,
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 7ca3d3d86c2a..912b47aa99d8 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -437,8 +437,16 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
{
update_idle_core(rq);
schedstat_inc(rq->sched_goidle);
+ queue_core_balance(rq);
}
+#ifdef CONFIG_SMP
+static struct task_struct *pick_task_idle(struct rq *rq)
+{
+ return rq->idle;
+}
+#endif
+
struct task_struct *pick_next_task_idle(struct rq *rq)
{
struct task_struct *next = rq->idle;
@@ -455,10 +463,10 @@ struct task_struct *pick_next_task_idle(struct rq *rq)
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{
- raw_spin_unlock_irq(&rq->lock);
+ raw_spin_rq_unlock_irq(rq);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
- raw_spin_lock_irq(&rq->lock);
+ raw_spin_rq_lock_irq(rq);
}
/*
@@ -506,6 +514,7 @@ DEFINE_SCHED_CLASS(idle) = {
#ifdef CONFIG_SMP
.balance = balance_idle,
+ .pick_task = pick_task_idle,
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index 5a6ea03f9882..7f06eaf12818 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -81,11 +81,9 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags)
{
cpumask_var_t non_housekeeping_mask;
cpumask_var_t tmp;
- int err;
alloc_bootmem_cpumask_var(&non_housekeeping_mask);
- err = cpulist_parse(str, non_housekeeping_mask);
- if (err < 0 || cpumask_last(non_housekeeping_mask) >= nr_cpu_ids) {
+ if (cpulist_parse(str, non_housekeeping_mask) < 0) {
pr_warn("Housekeeping: nohz_full= or isolcpus= incorrect CPU range\n");
free_bootmem_cpumask_var(non_housekeeping_mask);
return 0;
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index 1c79896f1bc0..954b229868d9 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -81,7 +81,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
long nr_active, delta = 0;
nr_active = this_rq->nr_running - adjust;
- nr_active += (long)this_rq->nr_uninterruptible;
+ nr_active += (int)this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 1462846d244e..e06071bf3472 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
return LOAD_AVG_MAX - 1024 + avg->period_contrib;
}
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
static inline void cfs_se_util_change(struct sched_avg *avg)
{
unsigned int enqueued;
@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
if (!sched_feat(UTIL_EST))
return;
- /* Avoid store if the flag has been already set */
+ /* Avoid store if the flag has been already reset */
enqueued = avg->util_est.enqueued;
if (!(enqueued & UTIL_AVG_UNCHANGED))
return;
@@ -141,7 +132,7 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq)
static inline u64 rq_clock_pelt(struct rq *rq)
{
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
assert_clock_updated(rq);
return rq->clock_pelt - rq->lost_idle_time;
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index cc25a3cff41f..58b36d17a09a 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -182,6 +182,8 @@ struct psi_group psi_system = {
static void psi_avgs_work(struct work_struct *work);
+static void poll_timer_fn(struct timer_list *t);
+
static void group_init(struct psi_group *group)
{
int cpu;
@@ -201,6 +203,8 @@ static void group_init(struct psi_group *group)
memset(group->polling_total, 0, sizeof(group->polling_total));
group->polling_next_update = ULLONG_MAX;
group->polling_until = 0;
+ init_waitqueue_head(&group->poll_wait);
+ timer_setup(&group->poll_timer, poll_timer_fn, 0);
rcu_assign_pointer(group->poll_task, NULL);
}
@@ -1157,9 +1161,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
return ERR_CAST(task);
}
atomic_set(&group->poll_wakeup, 0);
- init_waitqueue_head(&group->poll_wait);
wake_up_process(task);
- timer_setup(&group->poll_timer, poll_timer_fn, 0);
rcu_assign_pointer(group->poll_task, task);
}
@@ -1211,6 +1213,7 @@ static void psi_trigger_destroy(struct kref *ref)
group->poll_task,
lockdep_is_held(&group->trigger_lock));
rcu_assign_pointer(group->poll_task, NULL);
+ del_timer(&group->poll_timer);
}
}
@@ -1223,17 +1226,14 @@ static void psi_trigger_destroy(struct kref *ref)
*/
synchronize_rcu();
/*
- * Destroy the kworker after releasing trigger_lock to prevent a
+ * Stop kthread 'psimon' after releasing trigger_lock to prevent a
* deadlock while waiting for psi_poll_work to acquire trigger_lock
*/
if (task_to_destroy) {
/*
* After the RCU grace period has expired, the worker
* can no longer be found through group->poll_task.
- * But it might have been already scheduled before
- * that - deschedule it cleanly before destroying it.
*/
- del_timer_sync(&group->poll_timer);
kthread_stop(task_to_destroy);
}
kfree(t);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c286e5ba3c94..3daf42a0f462 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -888,7 +888,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (skip)
continue;
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
update_rq_clock(rq);
if (rt_rq->rt_time) {
@@ -926,7 +926,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
}
if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
@@ -1626,7 +1626,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
return rt_task_of(rt_se);
}
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct task_struct *pick_task_rt(struct rq *rq)
{
struct task_struct *p;
@@ -1634,7 +1634,17 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
return NULL;
p = _pick_next_task_rt(rq);
- set_next_task_rt(rq, p, true);
+
+ return p;
+}
+
+static struct task_struct *pick_next_task_rt(struct rq *rq)
+{
+ struct task_struct *p = pick_task_rt(rq);
+
+ if (p)
+ set_next_task_rt(rq, p, true);
+
return p;
}
@@ -1894,10 +1904,10 @@ retry:
*/
push_task = get_push_task(rq);
if (push_task) {
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
push_task, &rq->push_work);
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
}
return 0;
@@ -2122,10 +2132,10 @@ void rto_push_irq_work_func(struct irq_work *work)
* When it gets updated, a check is made if a push is possible.
*/
if (has_pushable_tasks(rq)) {
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
while (push_rt_task(rq, true))
;
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
}
raw_spin_lock(&rd->rto_lock);
@@ -2243,10 +2253,10 @@ skip:
double_unlock_balance(this_rq, src_rq);
if (push_task) {
- raw_spin_unlock(&this_rq->lock);
+ raw_spin_rq_unlock(this_rq);
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
push_task, &src_rq->push_work);
- raw_spin_lock(&this_rq->lock);
+ raw_spin_rq_lock(this_rq);
}
}
@@ -2331,13 +2341,20 @@ void __init init_sched_rt_class(void)
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
/*
- * If we are already running, then there's nothing
- * that needs to be done. But if we are not running
- * we may need to preempt the current running task.
- * If that current running task is also an RT task
+ * If we are running, update the avg_rt tracking, as the running time
+ * will now on be accounted into the latter.
+ */
+ if (task_current(rq, p)) {
+ update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
+ return;
+ }
+
+ /*
+ * If we are not running we may need to preempt the current
+ * running task. If that current running task is also an RT task
* then see if we can move to another run queue.
*/
- if (task_on_rq_queued(p) && rq->curr != p) {
+ if (task_on_rq_queued(p)) {
#ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
rt_queue_push_tasks(rq);
@@ -2483,6 +2500,7 @@ DEFINE_SCHED_CLASS(rt) = {
#ifdef CONFIG_SMP
.balance = balance_rt,
+ .pick_task = pick_task_rt,
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a189bec13729..c80d42e9589b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -366,6 +366,7 @@ struct cfs_bandwidth {
ktime_t period;
u64 quota;
u64 runtime;
+ u64 burst;
s64 hierarchical_quota;
u8 idle;
@@ -526,6 +527,11 @@ struct cfs_rq {
u64 exec_clock;
u64 min_vruntime;
+#ifdef CONFIG_SCHED_CORE
+ unsigned int forceidle_seq;
+ u64 min_vruntime_fi;
+#endif
+
#ifndef CONFIG_64BIT
u64 min_vruntime_copy;
#endif
@@ -631,8 +637,8 @@ struct rt_rq {
} highest_prio;
#endif
#ifdef CONFIG_SMP
- unsigned long rt_nr_migratory;
- unsigned long rt_nr_total;
+ unsigned int rt_nr_migratory;
+ unsigned int rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
@@ -646,7 +652,7 @@ struct rt_rq {
raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
- unsigned long rt_nr_boosted;
+ unsigned int rt_nr_boosted;
struct rq *rq;
struct task_group *tg;
@@ -663,7 +669,7 @@ struct dl_rq {
/* runqueue is an rbtree, ordered by deadline */
struct rb_root_cached root;
- unsigned long dl_nr_running;
+ unsigned int dl_nr_running;
#ifdef CONFIG_SMP
/*
@@ -677,7 +683,7 @@ struct dl_rq {
u64 next;
} earliest_dl;
- unsigned long dl_nr_migratory;
+ unsigned int dl_nr_migratory;
int overloaded;
/*
@@ -905,7 +911,7 @@ DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
*/
struct rq {
/* runqueue lock: */
- raw_spinlock_t lock;
+ raw_spinlock_t __lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -955,7 +961,7 @@ struct rq {
* one CPU and if it got migrated afterwards it may decrease
* it on another CPU. Always updated under the runqueue lock:
*/
- unsigned long nr_uninterruptible;
+ unsigned int nr_uninterruptible;
struct task_struct __rcu *curr;
struct task_struct *idle;
@@ -1017,6 +1023,9 @@ struct rq {
u64 idle_stamp;
u64 avg_idle;
+ unsigned long wake_stamp;
+ u64 wake_avg_idle;
+
/* This is used to determine avg_idle's max value */
u64 max_idle_balance_cost;
@@ -1075,6 +1084,22 @@ struct rq {
#endif
unsigned int push_busy;
struct cpu_stop_work push_work;
+
+#ifdef CONFIG_SCHED_CORE
+ /* per rq */
+ struct rq *core;
+ struct task_struct *core_pick;
+ unsigned int core_enabled;
+ unsigned int core_sched_seq;
+ struct rb_root core_tree;
+
+ /* shared state */
+ unsigned int core_task_seq;
+ unsigned int core_pick_seq;
+ unsigned long core_cookie;
+ unsigned char core_forceidle;
+ unsigned int core_forceidle_seq;
+#endif
};
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1113,6 +1138,206 @@ static inline bool is_migration_disabled(struct task_struct *p)
#endif
}
+struct sched_group;
+#ifdef CONFIG_SCHED_CORE
+static inline struct cpumask *sched_group_span(struct sched_group *sg);
+
+DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
+
+static inline bool sched_core_enabled(struct rq *rq)
+{
+ return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
+}
+
+static inline bool sched_core_disabled(void)
+{
+ return !static_branch_unlikely(&__sched_core_enabled);
+}
+
+/*
+ * Be careful with this function; not for general use. The return value isn't
+ * stable unless you actually hold a relevant rq->__lock.
+ */
+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
+{
+ if (sched_core_enabled(rq))
+ return &rq->core->__lock;
+
+ return &rq->__lock;
+}
+
+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+{
+ if (rq->core_enabled)
+ return &rq->core->__lock;
+
+ return &rq->__lock;
+}
+
+bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
+
+/*
+ * Helpers to check if the CPU's core cookie matches with the task's cookie
+ * when core scheduling is enabled.
+ * A special case is that the task's cookie always matches with CPU's core
+ * cookie if the CPU is in an idle core.
+ */
+static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
+{
+ /* Ignore cookie match if core scheduler is not enabled on the CPU. */
+ if (!sched_core_enabled(rq))
+ return true;
+
+ return rq->core->core_cookie == p->core_cookie;
+}
+
+static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
+{
+ bool idle_core = true;
+ int cpu;
+
+ /* Ignore cookie match if core scheduler is not enabled on the CPU. */
+ if (!sched_core_enabled(rq))
+ return true;
+
+ for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
+ if (!available_idle_cpu(cpu)) {
+ idle_core = false;
+ break;
+ }
+ }
+
+ /*
+ * A CPU in an idle core is always the best choice for tasks with
+ * cookies.
+ */
+ return idle_core || rq->core->core_cookie == p->core_cookie;
+}
+
+static inline bool sched_group_cookie_match(struct rq *rq,
+ struct task_struct *p,
+ struct sched_group *group)
+{
+ int cpu;
+
+ /* Ignore cookie match if core scheduler is not enabled on the CPU. */
+ if (!sched_core_enabled(rq))
+ return true;
+
+ for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
+ if (sched_core_cookie_match(rq, p))
+ return true;
+ }
+ return false;
+}
+
+extern void queue_core_balance(struct rq *rq);
+
+static inline bool sched_core_enqueued(struct task_struct *p)
+{
+ return !RB_EMPTY_NODE(&p->core_node);
+}
+
+extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
+extern void sched_core_dequeue(struct rq *rq, struct task_struct *p);
+
+extern void sched_core_get(void);
+extern void sched_core_put(void);
+
+extern unsigned long sched_core_alloc_cookie(void);
+extern void sched_core_put_cookie(unsigned long cookie);
+extern unsigned long sched_core_get_cookie(unsigned long cookie);
+extern unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie);
+
+#else /* !CONFIG_SCHED_CORE */
+
+static inline bool sched_core_enabled(struct rq *rq)
+{
+ return false;
+}
+
+static inline bool sched_core_disabled(void)
+{
+ return true;
+}
+
+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
+{
+ return &rq->__lock;
+}
+
+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+{
+ return &rq->__lock;
+}
+
+static inline void queue_core_balance(struct rq *rq)
+{
+}
+
+static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
+{
+ return true;
+}
+
+static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
+{
+ return true;
+}
+
+static inline bool sched_group_cookie_match(struct rq *rq,
+ struct task_struct *p,
+ struct sched_group *group)
+{
+ return true;
+}
+#endif /* CONFIG_SCHED_CORE */
+
+static inline void lockdep_assert_rq_held(struct rq *rq)
+{
+ lockdep_assert_held(__rq_lockp(rq));
+}
+
+extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
+extern bool raw_spin_rq_trylock(struct rq *rq);
+extern void raw_spin_rq_unlock(struct rq *rq);
+
+static inline void raw_spin_rq_lock(struct rq *rq)
+{
+ raw_spin_rq_lock_nested(rq, 0);
+}
+
+static inline void raw_spin_rq_lock_irq(struct rq *rq)
+{
+ local_irq_disable();
+ raw_spin_rq_lock(rq);
+}
+
+static inline void raw_spin_rq_unlock_irq(struct rq *rq)
+{
+ raw_spin_rq_unlock(rq);
+ local_irq_enable();
+}
+
+static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ raw_spin_rq_lock(rq);
+ return flags;
+}
+
+static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
+{
+ raw_spin_rq_unlock(rq);
+ local_irq_restore(flags);
+}
+
+#define raw_spin_rq_lock_irqsave(rq, flags) \
+do { \
+ flags = _raw_spin_rq_lock_irqsave(rq); \
+} while (0)
+
#ifdef CONFIG_SCHED_SMT
extern void __update_idle_core(struct rq *rq);
@@ -1134,6 +1359,57 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues)
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+ SCHED_WARN_ON(!entity_is_task(se));
+ return container_of(se, struct task_struct, se);
+}
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+ return p->se.cfs_rq;
+}
+
+/* runqueue on which this entity is (to be) queued */
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+ return se->cfs_rq;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+ return grp->my_q;
+}
+
+#else
+
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+ return container_of(se, struct task_struct, se);
+}
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+ return &task_rq(p)->cfs;
+}
+
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+ struct task_struct *p = task_of(se);
+ struct rq *rq = task_rq(p);
+
+ return &rq->cfs;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+ return NULL;
+}
+#endif
+
extern void update_rq_clock(struct rq *rq);
static inline u64 __rq_clock_broken(struct rq *rq)
@@ -1179,7 +1455,7 @@ static inline void assert_clock_updated(struct rq *rq)
static inline u64 rq_clock(struct rq *rq)
{
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
assert_clock_updated(rq);
return rq->clock;
@@ -1187,7 +1463,7 @@ static inline u64 rq_clock(struct rq *rq)
static inline u64 rq_clock_task(struct rq *rq)
{
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
assert_clock_updated(rq);
return rq->clock_task;
@@ -1213,7 +1489,7 @@ static inline u64 rq_clock_thermal(struct rq *rq)
static inline void rq_clock_skip_update(struct rq *rq)
{
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
rq->clock_update_flags |= RQCF_REQ_SKIP;
}
@@ -1223,7 +1499,7 @@ static inline void rq_clock_skip_update(struct rq *rq)
*/
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
{
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
}
@@ -1254,7 +1530,7 @@ extern struct callback_head balance_push_callback;
*/
static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
{
- rf->cookie = lockdep_pin_lock(&rq->lock);
+ rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
#ifdef CONFIG_SCHED_DEBUG
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
@@ -1272,12 +1548,12 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
rf->clock_update_flags = RQCF_UPDATED;
#endif
- lockdep_unpin_lock(&rq->lock, rf->cookie);
+ lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
}
static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
{
- lockdep_repin_lock(&rq->lock, rf->cookie);
+ lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
#ifdef CONFIG_SCHED_DEBUG
/*
@@ -1298,7 +1574,7 @@ static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
}
static inline void
@@ -1307,7 +1583,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
__releases(p->pi_lock)
{
rq_unpin_lock(rq, rf);
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}
@@ -1315,7 +1591,7 @@ static inline void
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
- raw_spin_lock_irqsave(&rq->lock, rf->flags);
+ raw_spin_rq_lock_irqsave(rq, rf->flags);
rq_pin_lock(rq, rf);
}
@@ -1323,7 +1599,7 @@ static inline void
rq_lock_irq(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
- raw_spin_lock_irq(&rq->lock);
+ raw_spin_rq_lock_irq(rq);
rq_pin_lock(rq, rf);
}
@@ -1331,7 +1607,7 @@ static inline void
rq_lock(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
rq_pin_lock(rq, rf);
}
@@ -1339,7 +1615,7 @@ static inline void
rq_relock(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
- raw_spin_lock(&rq->lock);
+ raw_spin_rq_lock(rq);
rq_repin_lock(rq, rf);
}
@@ -1348,7 +1624,7 @@ rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
- raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
+ raw_spin_rq_unlock_irqrestore(rq, rf->flags);
}
static inline void
@@ -1356,7 +1632,7 @@ rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
- raw_spin_unlock_irq(&rq->lock);
+ raw_spin_rq_unlock_irq(rq);
}
static inline void
@@ -1364,7 +1640,7 @@ rq_unlock(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
- raw_spin_unlock(&rq->lock);
+ raw_spin_rq_unlock(rq);
}
static inline struct rq *
@@ -1429,7 +1705,7 @@ queue_balance_callback(struct rq *rq,
struct callback_head *head,
void (*func)(struct rq *rq))
{
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
return;
@@ -1844,6 +2120,9 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
+
+ struct task_struct * (*pick_task)(struct rq *rq);
+
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
@@ -1893,7 +2172,6 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
static inline void set_next_task(struct rq *rq, struct task_struct *next)
{
- WARN_ON_ONCE(rq->curr != next);
next->sched_class->set_next_task(rq, next, false);
}
@@ -1969,7 +2247,7 @@ static inline struct task_struct *get_push_task(struct rq *rq)
{
struct task_struct *p = rq->curr;
- lockdep_assert_held(&rq->lock);
+ lockdep_assert_rq_held(rq);
if (rq->push_busy)
return NULL;
@@ -2181,10 +2459,38 @@ unsigned long arch_scale_freq_capacity(int cpu)
}
#endif
+
#ifdef CONFIG_SMP
-#ifdef CONFIG_PREEMPTION
-static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
+static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
+{
+#ifdef CONFIG_SCHED_CORE
+ /*
+ * In order to not have {0,2},{1,3} turn into into an AB-BA,
+ * order by core-id first and cpu-id second.
+ *
+ * Notably:
+ *
+ * double_rq_lock(0,3); will take core-0, core-1 lock
+ * double_rq_lock(1,2); will take core-1, core-0 lock
+ *
+ * when only cpu-id is considered.
+ */
+ if (rq1->core->cpu < rq2->core->cpu)
+ return true;
+ if (rq1->core->cpu > rq2->core->cpu)
+ return false;
+
+ /*
+ * __sched_core_flip() relies on SMT having cpu-id lock order.
+ */
+#endif
+ return rq1->cpu < rq2->cpu;
+}
+
+extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
+#ifdef CONFIG_PREEMPTION
/*
* fair double_lock_balance: Safely acquires both rq->locks in a fair
@@ -2199,7 +2505,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- raw_spin_unlock(&this_rq->lock);
+ raw_spin_rq_unlock(this_rq);
double_rq_lock(this_rq, busiest);
return 1;
@@ -2218,20 +2524,21 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- int ret = 0;
-
- if (unlikely(!raw_spin_trylock(&busiest->lock))) {
- if (busiest < this_rq) {
- raw_spin_unlock(&this_rq->lock);
- raw_spin_lock(&busiest->lock);
- raw_spin_lock_nested(&this_rq->lock,
- SINGLE_DEPTH_NESTING);
- ret = 1;
- } else
- raw_spin_lock_nested(&busiest->lock,
- SINGLE_DEPTH_NESTING);
+ if (__rq_lockp(this_rq) == __rq_lockp(busiest))
+ return 0;
+
+ if (likely(raw_spin_rq_trylock(busiest)))
+ return 0;
+
+ if (rq_order_less(this_rq, busiest)) {
+ raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING);
+ return 0;
}
- return ret;
+
+ raw_spin_rq_unlock(this_rq);
+ double_rq_lock(this_rq, busiest);
+
+ return 1;
}
#endif /* CONFIG_PREEMPTION */
@@ -2241,11 +2548,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
*/
static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
- if (unlikely(!irqs_disabled())) {
- /* printk() doesn't work well under rq->lock */
- raw_spin_unlock(&this_rq->lock);
- BUG_ON(1);
- }
+ lockdep_assert_irqs_disabled();
return _double_lock_balance(this_rq, busiest);
}
@@ -2253,8 +2556,9 @@ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- raw_spin_unlock(&busiest->lock);
- lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+ if (__rq_lockp(this_rq) != __rq_lockp(busiest))
+ raw_spin_rq_unlock(busiest);
+ lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
}
static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
@@ -2285,31 +2589,6 @@ static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
}
/*
- * double_rq_lock - safely lock two runqueues
- *
- * Note this does not disable interrupts like task_rq_lock,
- * you need to do so manually before calling.
- */
-static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
- __acquires(rq1->lock)
- __acquires(rq2->lock)
-{
- BUG_ON(!irqs_disabled());
- if (rq1 == rq2) {
- raw_spin_lock(&rq1->lock);
- __acquire(rq2->lock); /* Fake it out ;) */
- } else {
- if (rq1 < rq2) {
- raw_spin_lock(&rq1->lock);
- raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
- } else {
- raw_spin_lock(&rq2->lock);
- raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
- }
- }
-}
-
-/*
* double_rq_unlock - safely unlock two runqueues
*
* Note this does not restore interrupts like task_rq_unlock,
@@ -2319,11 +2598,11 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
- raw_spin_unlock(&rq1->lock);
- if (rq1 != rq2)
- raw_spin_unlock(&rq2->lock);
+ if (__rq_lockp(rq1) != __rq_lockp(rq2))
+ raw_spin_rq_unlock(rq2);
else
__release(rq2->lock);
+ raw_spin_rq_unlock(rq1);
}
extern void set_rq_online (struct rq *rq);
@@ -2344,7 +2623,7 @@ static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
{
BUG_ON(!irqs_disabled());
BUG_ON(rq1 != rq2);
- raw_spin_lock(&rq1->lock);
+ raw_spin_rq_lock(rq1);
__acquire(rq2->lock); /* Fake it out ;) */
}
@@ -2359,7 +2638,7 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq2->lock)
{
BUG_ON(rq1 != rq2);
- raw_spin_unlock(&rq1->lock);
+ raw_spin_rq_unlock(rq1);
__release(rq2->lock);
}
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index dc218e9f4558..d8f8eb0c655b 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -25,7 +25,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
}
static inline void
-rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
+rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
{
if (rq)
rq->rq_sched_info.run_delay += delta;
@@ -42,7 +42,7 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
#else /* !CONFIG_SCHEDSTATS: */
static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
-static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
+static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
# define schedstat_enabled() 0
# define __schedstat_inc(var) do { } while (0)
@@ -150,29 +150,24 @@ static inline void psi_sched_switch(struct task_struct *prev,
#endif /* CONFIG_PSI */
#ifdef CONFIG_SCHED_INFO
-static inline void sched_info_reset_dequeued(struct task_struct *t)
-{
- t->sched_info.last_queued = 0;
-}
-
/*
* We are interested in knowing how long it was from the *first* time a
* task was queued to the time that it finally hit a CPU, we call this routine
* from dequeue_task() to account for possible rq->clock skew across CPUs. The
* delta taken on each CPU would annul the skew.
*/
-static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
+static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
{
- unsigned long long now = rq_clock(rq), delta = 0;
+ unsigned long long delta = 0;
- if (sched_info_on()) {
- if (t->sched_info.last_queued)
- delta = now - t->sched_info.last_queued;
- }
- sched_info_reset_dequeued(t);
+ if (!t->sched_info.last_queued)
+ return;
+
+ delta = rq_clock(rq) - t->sched_info.last_queued;
+ t->sched_info.last_queued = 0;
t->sched_info.run_delay += delta;
- rq_sched_info_dequeued(rq, delta);
+ rq_sched_info_dequeue(rq, delta);
}
/*
@@ -182,11 +177,14 @@ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
*/
static void sched_info_arrive(struct rq *rq, struct task_struct *t)
{
- unsigned long long now = rq_clock(rq), delta = 0;
+ unsigned long long now, delta = 0;
+
+ if (!t->sched_info.last_queued)
+ return;
- if (t->sched_info.last_queued)
- delta = now - t->sched_info.last_queued;
- sched_info_reset_dequeued(t);
+ now = rq_clock(rq);
+ delta = now - t->sched_info.last_queued;
+ t->sched_info.last_queued = 0;
t->sched_info.run_delay += delta;
t->sched_info.last_arrival = now;
t->sched_info.pcount++;
@@ -197,14 +195,12 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t)
/*
* This function is only called from enqueue_task(), but also only updates
* the timestamp if it is already not set. It's assumed that
- * sched_info_dequeued() will clear that stamp when appropriate.
+ * sched_info_dequeue() will clear that stamp when appropriate.
*/
-static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
+static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
{
- if (sched_info_on()) {
- if (!t->sched_info.last_queued)
- t->sched_info.last_queued = rq_clock(rq);
- }
+ if (!t->sched_info.last_queued)
+ t->sched_info.last_queued = rq_clock(rq);
}
/*
@@ -212,7 +208,7 @@ static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
* due, typically, to expiring its time slice (this may also be called when
* switching to the idle task). Now we can calculate how long we ran.
* Also, if the process is still in the TASK_RUNNING state, call
- * sched_info_queued() to mark that it has now again started waiting on
+ * sched_info_enqueue() to mark that it has now again started waiting on
* the runqueue.
*/
static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
@@ -221,8 +217,8 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
rq_sched_info_depart(rq, delta);
- if (t->state == TASK_RUNNING)
- sched_info_queued(rq, t);
+ if (task_is_running(t))
+ sched_info_enqueue(rq, t);
}
/*
@@ -231,7 +227,7 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
* the idle task.) We are only called when prev != next.
*/
static inline void
-__sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
+sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
/*
* prev now departs the CPU. It's not interesting to record
@@ -245,18 +241,8 @@ __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct
sched_info_arrive(rq, next);
}
-static inline void
-sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
-{
- if (sched_info_on())
- __sched_info_switch(rq, prev, next);
-}
-
#else /* !CONFIG_SCHED_INFO: */
-# define sched_info_queued(rq, t) do { } while (0)
-# define sched_info_reset_dequeued(t) do { } while (0)
-# define sched_info_dequeued(rq, t) do { } while (0)
-# define sched_info_depart(rq, t) do { } while (0)
-# define sched_info_arrive(rq, next) do { } while (0)
+# define sched_info_enqueue(rq, t) do { } while (0)
+# define sched_info_dequeue(rq, t) do { } while (0)
# define sched_info_switch(rq, t, next) do { } while (0)
#endif /* CONFIG_SCHED_INFO */
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 55f39125c0e1..f988ebe3febb 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -34,15 +34,24 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool fir
stop->se.exec_start = rq_clock_task(rq);
}
-static struct task_struct *pick_next_task_stop(struct rq *rq)
+static struct task_struct *pick_task_stop(struct rq *rq)
{
if (!sched_stop_runnable(rq))
return NULL;
- set_next_task_stop(rq, rq->stop, true);
return rq->stop;
}
+static struct task_struct *pick_next_task_stop(struct rq *rq)
+{
+ struct task_struct *p = pick_task_stop(rq);
+
+ if (p)
+ set_next_task_stop(rq, p, true);
+
+ return p;
+}
+
static void
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
@@ -123,6 +132,7 @@ DEFINE_SCHED_CLASS(stop) = {
#ifdef CONFIG_SMP
.balance = balance_stop,
+ .pick_task = pick_task_stop,
.select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 55a0a243e871..b77ad49dc14f 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -467,7 +467,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
struct root_domain *old_rd = NULL;
unsigned long flags;
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_rq_lock_irqsave(rq, flags);
if (rq->rd) {
old_rd = rq->rd;
@@ -493,7 +493,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_rq_unlock_irqrestore(rq, flags);
if (old_rd)
call_rcu(&old_rd->rcu, free_rootdomain);
@@ -675,7 +675,7 @@ static void update_top_cache_domain(int cpu)
sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
- sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
+ sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);
rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
}
@@ -1267,6 +1267,116 @@ next:
}
/*
+ * Asymmetric CPU capacity bits
+ */
+struct asym_cap_data {
+ struct list_head link;
+ unsigned long capacity;
+ unsigned long cpus[];
+};
+
+/*
+ * Set of available CPUs grouped by their corresponding capacities
+ * Each list entry contains a CPU mask reflecting CPUs that share the same
+ * capacity.
+ * The lifespan of data is unlimited.
+ */
+static LIST_HEAD(asym_cap_list);
+
+#define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
+
+/*
+ * Verify whether there is any CPU capacity asymmetry in a given sched domain.
+ * Provides sd_flags reflecting the asymmetry scope.
+ */
+static inline int
+asym_cpu_capacity_classify(const struct cpumask *sd_span,
+ const struct cpumask *cpu_map)
+{
+ struct asym_cap_data *entry;
+ int count = 0, miss = 0;
+
+ /*
+ * Count how many unique CPU capacities this domain spans across
+ * (compare sched_domain CPUs mask with ones representing available
+ * CPUs capacities). Take into account CPUs that might be offline:
+ * skip those.
+ */
+ list_for_each_entry(entry, &asym_cap_list, link) {
+ if (cpumask_intersects(sd_span, cpu_capacity_span(entry)))
+ ++count;
+ else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry)))
+ ++miss;
+ }
+
+ WARN_ON_ONCE(!count && !list_empty(&asym_cap_list));
+
+ /* No asymmetry detected */
+ if (count < 2)
+ return 0;
+ /* Some of the available CPU capacity values have not been detected */
+ if (miss)
+ return SD_ASYM_CPUCAPACITY;
+
+ /* Full asymmetry */
+ return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
+
+}
+
+static inline void asym_cpu_capacity_update_data(int cpu)
+{
+ unsigned long capacity = arch_scale_cpu_capacity(cpu);
+ struct asym_cap_data *entry = NULL;
+
+ list_for_each_entry(entry, &asym_cap_list, link) {
+ if (capacity == entry->capacity)
+ goto done;
+ }
+
+ entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
+ if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))
+ return;
+ entry->capacity = capacity;
+ list_add(&entry->link, &asym_cap_list);
+done:
+ __cpumask_set_cpu(cpu, cpu_capacity_span(entry));
+}
+
+/*
+ * Build-up/update list of CPUs grouped by their capacities
+ * An update requires explicit request to rebuild sched domains
+ * with state indicating CPU topology changes.
+ */
+static void asym_cpu_capacity_scan(void)
+{
+ struct asym_cap_data *entry, *next;
+ int cpu;
+
+ list_for_each_entry(entry, &asym_cap_list, link)
+ cpumask_clear(cpu_capacity_span(entry));
+
+ for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_FLAG_DOMAIN))
+ asym_cpu_capacity_update_data(cpu);
+
+ list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
+ if (cpumask_empty(cpu_capacity_span(entry))) {
+ list_del(&entry->link);
+ kfree(entry);
+ }
+ }
+
+ /*
+ * Only one capacity value has been detected i.e. this system is symmetric.
+ * No need to keep this data around.
+ */
+ if (list_is_singular(&asym_cap_list)) {
+ entry = list_first_entry(&asym_cap_list, typeof(*entry), link);
+ list_del(&entry->link);
+ kfree(entry);
+ }
+}
+
+/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
*/
@@ -1399,11 +1509,12 @@ int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
static struct sched_domain *
sd_init(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map,
- struct sched_domain *child, int dflags, int cpu)
+ struct sched_domain *child, int cpu)
{
struct sd_data *sdd = &tl->data;
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
int sd_id, sd_weight, sd_flags = 0;
+ struct cpumask *sd_span;
#ifdef CONFIG_NUMA
/*
@@ -1420,9 +1531,6 @@ sd_init(struct sched_domain_topology_level *tl,
"wrong sd_flags in topology description\n"))
sd_flags &= TOPOLOGY_SD_FLAGS;
- /* Apply detected topology flags */
- sd_flags |= dflags;
-
*sd = (struct sched_domain){
.min_interval = sd_weight,
.max_interval = 2*sd_weight,
@@ -1454,13 +1562,19 @@ sd_init(struct sched_domain_topology_level *tl,
#endif
};
- cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
- sd_id = cpumask_first(sched_domain_span(sd));
+ sd_span = sched_domain_span(sd);
+ cpumask_and(sd_span, cpu_map, tl->mask(cpu));
+ sd_id = cpumask_first(sd_span);
+
+ sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
+
+ WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) ==
+ (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY),
+ "CPU capacity asymmetry not supported on SMT\n");
/*
* Convert topological properties into behaviour.
*/
-
/* Don't attempt to spread across CPUs of different capacities. */
if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
sd->child->flags &= ~SD_PREFER_SIBLING;
@@ -1926,9 +2040,9 @@ static void __sdt_free(const struct cpumask *cpu_map)
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
- struct sched_domain *child, int dflags, int cpu)
+ struct sched_domain *child, int cpu)
{
- struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
+ struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
if (child) {
sd->level = child->level + 1;
@@ -1991,65 +2105,6 @@ static bool topology_span_sane(struct sched_domain_topology_level *tl,
}
/*
- * Find the sched_domain_topology_level where all CPU capacities are visible
- * for all CPUs.
- */
-static struct sched_domain_topology_level
-*asym_cpu_capacity_level(const struct cpumask *cpu_map)
-{
- int i, j, asym_level = 0;
- bool asym = false;
- struct sched_domain_topology_level *tl, *asym_tl = NULL;
- unsigned long cap;
-
- /* Is there any asymmetry? */
- cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
-
- for_each_cpu(i, cpu_map) {
- if (arch_scale_cpu_capacity(i) != cap) {
- asym = true;
- break;
- }
- }
-
- if (!asym)
- return NULL;
-
- /*
- * Examine topology from all CPU's point of views to detect the lowest
- * sched_domain_topology_level where a highest capacity CPU is visible
- * to everyone.
- */
- for_each_cpu(i, cpu_map) {
- unsigned long max_capacity = arch_scale_cpu_capacity(i);
- int tl_id = 0;
-
- for_each_sd_topology(tl) {
- if (tl_id < asym_level)
- goto next_level;
-
- for_each_cpu_and(j, tl->mask(i), cpu_map) {
- unsigned long capacity;
-
- capacity = arch_scale_cpu_capacity(j);
-
- if (capacity <= max_capacity)
- continue;
-
- max_capacity = capacity;
- asym_level = tl_id;
- asym_tl = tl;
- }
-next_level:
- tl_id++;
- }
- }
-
- return asym_tl;
-}
-
-
-/*
* Build sched domains for a given set of CPUs and attach the sched domains
* to the individual CPUs
*/
@@ -2061,7 +2116,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
struct s_data d;
struct rq *rq = NULL;
int i, ret = -ENOMEM;
- struct sched_domain_topology_level *tl_asym;
bool has_asym = false;
if (WARN_ON(cpumask_empty(cpu_map)))
@@ -2071,24 +2125,19 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
if (alloc_state != sa_rootdomain)
goto error;
- tl_asym = asym_cpu_capacity_level(cpu_map);
-
/* Set up domains for CPUs specified by the cpu_map: */
for_each_cpu(i, cpu_map) {
struct sched_domain_topology_level *tl;
- int dflags = 0;
sd = NULL;
for_each_sd_topology(tl) {
- if (tl == tl_asym) {
- dflags |= SD_ASYM_CPUCAPACITY;
- has_asym = true;
- }
if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
goto error;
- sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
+ sd = build_sched_domain(tl, cpu_map, attr, sd, i);
+
+ has_asym |= sd->flags & SD_ASYM_CPUCAPACITY;
if (tl == sched_domain_topology)
*per_cpu_ptr(d.sd, i) = sd;
@@ -2217,6 +2266,7 @@ int sched_init_domains(const struct cpumask *cpu_map)
zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
arch_update_cpu_topology();
+ asym_cpu_capacity_scan();
ndoms_cur = 1;
doms_cur = alloc_sched_domains(ndoms_cur);
if (!doms_cur)
@@ -2299,6 +2349,9 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
/* Let the architecture update CPU core mappings: */
new_topology = arch_update_cpu_topology();
+ /* Trigger rebuilding CPU capacity asymmetry data */
+ if (new_topology)
+ asym_cpu_capacity_scan();
if (!doms_new) {
WARN_ON_ONCE(dattr_new);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 183cc6ae68a6..76577d1642a5 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -264,17 +264,22 @@ prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_ent
}
EXPORT_SYMBOL(prepare_to_wait);
-void
+/* Returns true if we are the first waiter in the queue, false otherwise. */
+bool
prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
+ bool was_empty = false;
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&wq_head->lock, flags);
- if (list_empty(&wq_entry->entry))
+ if (list_empty(&wq_entry->entry)) {
+ was_empty = list_empty(&wq_head->head);
__add_wait_queue_entry_tail(wq_head, wq_entry);
+ }
set_current_state(state);
spin_unlock_irqrestore(&wq_head->lock, flags);
+ return was_empty;
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 6ecd3f3a52b5..057e17f3215d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -107,6 +107,7 @@ struct seccomp_knotif {
* installing process should allocate the fd as normal.
* @flags: The flags for the new file descriptor. At the moment, only O_CLOEXEC
* is allowed.
+ * @ioctl_flags: The flags used for the seccomp_addfd ioctl.
* @ret: The return value of the installing process. It is set to the fd num
* upon success (>= 0).
* @completion: Indicates that the installing process has completed fd
@@ -118,6 +119,7 @@ struct seccomp_kaddfd {
struct file *file;
int fd;
unsigned int flags;
+ __u32 ioctl_flags;
union {
bool setfd;
@@ -1065,18 +1067,37 @@ static u64 seccomp_next_notify_id(struct seccomp_filter *filter)
return filter->notif->next_id++;
}
-static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd)
+static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_knotif *n)
{
+ int fd;
+
/*
* Remove the notification, and reset the list pointers, indicating
* that it has been handled.
*/
list_del_init(&addfd->list);
if (!addfd->setfd)
- addfd->ret = receive_fd(addfd->file, addfd->flags);
+ fd = receive_fd(addfd->file, addfd->flags);
else
- addfd->ret = receive_fd_replace(addfd->fd, addfd->file,
- addfd->flags);
+ fd = receive_fd_replace(addfd->fd, addfd->file, addfd->flags);
+ addfd->ret = fd;
+
+ if (addfd->ioctl_flags & SECCOMP_ADDFD_FLAG_SEND) {
+ /* If we fail reset and return an error to the notifier */
+ if (fd < 0) {
+ n->state = SECCOMP_NOTIFY_SENT;
+ } else {
+ /* Return the FD we just added */
+ n->flags = 0;
+ n->error = 0;
+ n->val = fd;
+ }
+ }
+
+ /*
+ * Mark the notification as completed. From this point, addfd mem
+ * might be invalidated and we can't safely read it anymore.
+ */
complete(&addfd->completion);
}
@@ -1105,28 +1126,30 @@ static int seccomp_do_user_notification(int this_syscall,
up(&match->notif->request);
wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
- mutex_unlock(&match->notify_lock);
/*
* This is where we wait for a reply from userspace.
*/
-wait:
- err = wait_for_completion_interruptible(&n.ready);
- mutex_lock(&match->notify_lock);
- if (err == 0) {
- /* Check if we were woken up by a addfd message */
+ do {
+ mutex_unlock(&match->notify_lock);
+ err = wait_for_completion_interruptible(&n.ready);
+ mutex_lock(&match->notify_lock);
+ if (err != 0)
+ goto interrupted;
+
addfd = list_first_entry_or_null(&n.addfd,
struct seccomp_kaddfd, list);
- if (addfd && n.state != SECCOMP_NOTIFY_REPLIED) {
- seccomp_handle_addfd(addfd);
- mutex_unlock(&match->notify_lock);
- goto wait;
- }
- ret = n.val;
- err = n.error;
- flags = n.flags;
- }
+ /* Check if we were woken up by a addfd message */
+ if (addfd)
+ seccomp_handle_addfd(addfd, &n);
+ } while (n.state != SECCOMP_NOTIFY_REPLIED);
+
+ ret = n.val;
+ err = n.error;
+ flags = n.flags;
+
+interrupted:
/* If there were any pending addfd calls, clear them out */
list_for_each_entry_safe(addfd, tmp, &n.addfd, list) {
/* The process went away before we got a chance to handle it */
@@ -1579,7 +1602,7 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter,
if (addfd.newfd_flags & ~O_CLOEXEC)
return -EINVAL;
- if (addfd.flags & ~SECCOMP_ADDFD_FLAG_SETFD)
+ if (addfd.flags & ~(SECCOMP_ADDFD_FLAG_SETFD | SECCOMP_ADDFD_FLAG_SEND))
return -EINVAL;
if (addfd.newfd && !(addfd.flags & SECCOMP_ADDFD_FLAG_SETFD))
@@ -1589,6 +1612,7 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter,
if (!kaddfd.file)
return -EBADF;
+ kaddfd.ioctl_flags = addfd.flags;
kaddfd.flags = addfd.newfd_flags;
kaddfd.setfd = addfd.flags & SECCOMP_ADDFD_FLAG_SETFD;
kaddfd.fd = addfd.newfd;
@@ -1614,6 +1638,23 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter,
goto out_unlock;
}
+ if (addfd.flags & SECCOMP_ADDFD_FLAG_SEND) {
+ /*
+ * Disallow queuing an atomic addfd + send reply while there are
+ * some addfd requests still to process.
+ *
+ * There is no clear reason to support it and allows us to keep
+ * the loop on the other side straight-forward.
+ */
+ if (!list_empty(&knotif->addfd)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Allow exactly only one reply */
+ knotif->state = SECCOMP_NOTIFY_REPLIED;
+ }
+
list_add(&kaddfd.list, &knotif->addfd);
complete(&knotif->ready);
mutex_unlock(&filter->notify_lock);
diff --git a/kernel/signal.c b/kernel/signal.c
index f7c6ffcbd044..de0920353d30 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -412,8 +412,8 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
int override_rlimit, const unsigned int sigqueue_flags)
{
struct sigqueue *q = NULL;
- struct user_struct *user;
- int sigpending;
+ struct ucounts *ucounts = NULL;
+ long sigpending;
/*
* Protect access to @t credentials. This can go away when all
@@ -424,77 +424,38 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
* changes from/to zero.
*/
rcu_read_lock();
- user = __task_cred(t)->user;
- sigpending = atomic_inc_return(&user->sigpending);
+ ucounts = task_ucounts(t);
+ sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
if (sigpending == 1)
- get_uid(user);
+ ucounts = get_ucounts(ucounts);
rcu_read_unlock();
- if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
- /*
- * Preallocation does not hold sighand::siglock so it can't
- * use the cache. The lockless caching requires that only
- * one consumer and only one producer run at a time.
- */
- q = READ_ONCE(t->sigqueue_cache);
- if (!q || sigqueue_flags)
- q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
- else
- WRITE_ONCE(t->sigqueue_cache, NULL);
+ if (override_rlimit || (sigpending < LONG_MAX && sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
+ q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
} else {
print_dropped_signal(sig);
}
if (unlikely(q == NULL)) {
- if (atomic_dec_and_test(&user->sigpending))
- free_uid(user);
+ if (ucounts && dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
+ put_ucounts(ucounts);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = sigqueue_flags;
- q->user = user;
+ q->ucounts = ucounts;
}
-
return q;
}
-void exit_task_sigqueue_cache(struct task_struct *tsk)
-{
- /* Race free because @tsk is mopped up */
- struct sigqueue *q = tsk->sigqueue_cache;
-
- if (q) {
- tsk->sigqueue_cache = NULL;
- /*
- * Hand it back to the cache as the task might
- * be self reaping which would leak the object.
- */
- kmem_cache_free(sigqueue_cachep, q);
- }
-}
-
-static void sigqueue_cache_or_free(struct sigqueue *q)
-{
- /*
- * Cache one sigqueue per task. This pairs with the consumer side
- * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
- * compiler from store tearing and to tell KCSAN that the data race
- * is intentional when run without holding current->sighand->siglock,
- * which is fine as current obviously cannot run __sigqueue_free()
- * concurrently.
- */
- if (!READ_ONCE(current->sigqueue_cache))
- WRITE_ONCE(current->sigqueue_cache, q);
- else
- kmem_cache_free(sigqueue_cachep, q);
-}
-
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
- if (atomic_dec_and_test(&q->user->sigpending))
- free_uid(q->user);
- sigqueue_cache_or_free(q);
+ if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
+ put_ucounts(q->ucounts);
+ q->ucounts = NULL;
+ }
+ kmem_cache_free(sigqueue_cachep, q);
}
void flush_sigqueue(struct sigpending *queue)
@@ -4719,7 +4680,7 @@ void kdb_send_sig(struct task_struct *t, int sig)
}
new_t = kdb_prev_t != t;
kdb_prev_t = t;
- if (t->state != TASK_RUNNING && new_t) {
+ if (!task_is_running(t) && new_t) {
spin_unlock(&t->sighand->siglock);
kdb_printf("Process is not RUNNING, sending a signal from "
"kdb risks deadlock\n"
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index f25208e8df83..e4163042c4d6 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -33,7 +33,6 @@ struct task_struct *idle_thread_get(unsigned int cpu)
if (!tsk)
return ERR_PTR(-ENOMEM);
- init_idle(tsk, cpu);
return tsk;
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 4992853ef53d..f3a012179f47 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -76,7 +76,7 @@ static void wakeup_softirqd(void)
/* Interrupts are disabled: no need to stop preemption */
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
- if (tsk && tsk->state != TASK_RUNNING)
+ if (tsk)
wake_up_process(tsk);
}
@@ -92,8 +92,7 @@ static bool ksoftirqd_running(unsigned long pending)
if (pending & SOFTIRQ_NOW_MASK)
return false;
- return tsk && (tsk->state == TASK_RUNNING) &&
- !__kthread_should_park(tsk);
+ return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
}
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/kernel/sys.c b/kernel/sys.c
index 3a583a29815f..ef1a78f5d71c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -479,7 +479,7 @@ static int set_user(struct cred *new)
* for programs doing set*uid()+execve() by harmlessly deferring the
* failure to the execve() stage.
*/
- if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
+ if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
new_user != INIT_USER)
current->flags |= PF_NPROC_EXCEEDED;
else
@@ -558,6 +558,10 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
if (retval < 0)
goto error;
+ retval = set_cred_ucounts(new);
+ if (retval < 0)
+ goto error;
+
return commit_creds(new);
error:
@@ -616,6 +620,10 @@ long __sys_setuid(uid_t uid)
if (retval < 0)
goto error;
+ retval = set_cred_ucounts(new);
+ if (retval < 0)
+ goto error;
+
return commit_creds(new);
error:
@@ -691,6 +699,10 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if (retval < 0)
goto error;
+ retval = set_cred_ucounts(new);
+ if (retval < 0)
+ goto error;
+
return commit_creds(new);
error:
@@ -2550,6 +2562,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = set_syscall_user_dispatch(arg2, arg3, arg4,
(char __user *) arg5);
break;
+#ifdef CONFIG_SCHED_CORE
+ case PR_SCHED_CORE:
+ error = sched_core_share_pid(arg2, arg3, arg4, arg5);
+ break;
+#endif
default:
error = -EINVAL;
break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 14edf84cc571..e1aa24e1545c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -71,6 +71,7 @@
#include <linux/coredump.h>
#include <linux/latencytop.h>
#include <linux/pid.h>
+#include <linux/delayacct.h>
#include "../lib/kstrtox.h"
@@ -225,7 +226,27 @@ static int bpf_stats_handler(struct ctl_table *table, int write,
mutex_unlock(&bpf_stats_enabled_mutex);
return ret;
}
-#endif
+
+static int bpf_unpriv_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, unpriv_enable = *(int *)table->data;
+ bool locked_state = unpriv_enable == 1;
+ struct ctl_table tmp = *table;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ tmp.data = &unpriv_enable;
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret) {
+ if (locked_state && unpriv_enable != 1)
+ return -EPERM;
+ *(int *)table->data = unpriv_enable;
+ }
+ return ret;
+}
+#endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
/*
* /proc/sys support
@@ -1727,6 +1748,17 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif /* CONFIG_SCHEDSTATS */
+#ifdef CONFIG_TASK_DELAY_ACCT
+ {
+ .procname = "task_delayacct",
+ .data = NULL,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sysctl_delayacct,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+#endif /* CONFIG_TASK_DELAY_ACCT */
#ifdef CONFIG_NUMA_BALANCING
{
.procname = "numa_balancing",
@@ -2600,10 +2632,9 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_unprivileged_bpf_disabled,
.maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
.mode = 0644,
- /* only handle a transition from default "0" to "1" */
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ONE,
- .extra2 = SYSCTL_ONE,
+ .proc_handler = bpf_unpriv_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &two,
},
{
.procname = "bpf_stats_enabled",
@@ -2890,11 +2921,11 @@ static struct ctl_table vm_table[] = {
.extra2 = &one_thousand,
},
{
- .procname = "percpu_pagelist_fraction",
- .data = &percpu_pagelist_fraction,
- .maxlen = sizeof(percpu_pagelist_fraction),
+ .procname = "percpu_pagelist_high_fraction",
+ .data = &percpu_pagelist_high_fraction,
+ .maxlen = sizeof(percpu_pagelist_high_fraction),
.mode = 0644,
- .proc_handler = percpu_pagelist_fraction_sysctl_handler,
+ .proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
.extra1 = SYSCTL_ZERO,
},
{
@@ -2932,14 +2963,6 @@ static struct ctl_table vm_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
{
- .procname = "block_dump",
- .data = &block_dump,
- .maxlen = sizeof(block_dump),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- },
- {
.procname = "vfs_cache_pressure",
.data = &sysctl_vfs_cache_pressure,
.maxlen = sizeof(sysctl_vfs_cache_pressure),
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 83e158d016ba..04bfd62f5e5c 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -64,6 +64,15 @@ config LEGACY_TIMER_TICK
lack support for the generic clockevent framework.
New platforms should use generic clockevents instead.
+config TIME_KUNIT_TEST
+ tristate "KUnit test for kernel/time functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test RTC library functions.
+
+ If unsure, say N.
+
if GENERIC_CLOCKEVENTS
menu "Timers subsystem"
@@ -117,13 +126,14 @@ config NO_HZ_FULL
the task mostly runs in userspace and has few kernel activity.
You need to fill up the nohz_full boot parameter with the
- desired range of dynticks CPUs.
+ desired range of dynticks CPUs to use it. This is implemented at
+ the expense of some overhead in user <-> kernel transitions:
+ syscalls, exceptions and interrupts.
- This is implemented at the expense of some overhead in user <-> kernel
- transitions: syscalls, exceptions and interrupts. Even when it's
- dynamically off.
+ By default, without passing the nohz_full parameter, this behaves just
+ like NO_HZ_IDLE.
- Say N.
+ If you're a distro say Y.
endchoice
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 1fb1c1ef6a19..7e875e63ff3b 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -21,3 +21,5 @@ obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
obj-$(CONFIG_TIME_NS) += namespace.o
+obj-$(CONFIG_TEST_CLOCKSOURCE_WATCHDOG) += clocksource-wdtest.o
+obj-$(CONFIG_TIME_KUNIT_TEST) += time_test.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index f5490222e134..003ccf338d20 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -347,8 +347,7 @@ static void clockevents_notify_released(void)
while (!list_empty(&clockevents_released)) {
dev = list_entry(clockevents_released.next,
struct clock_event_device, list);
- list_del(&dev->list);
- list_add(&dev->list, &clockevent_devices);
+ list_move(&dev->list, &clockevent_devices);
tick_check_new_device(dev);
}
}
@@ -576,8 +575,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
if (old) {
module_put(old->owner);
clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
- list_del(&old->list);
- list_add(&old->list, &clockevents_released);
+ list_move(&old->list, &clockevents_released);
}
if (new) {
@@ -629,6 +627,7 @@ void tick_offline_cpu(unsigned int cpu)
/**
* tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
+ * @cpu: The dead CPU
*/
void tick_cleanup_dead_cpu(int cpu)
{
@@ -668,9 +667,9 @@ static struct bus_type clockevents_subsys = {
static DEFINE_PER_CPU(struct device, tick_percpu_dev);
static struct tick_device *tick_get_tick_dev(struct device *dev);
-static ssize_t sysfs_show_current_tick_dev(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t current_device_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct tick_device *td;
ssize_t count = 0;
@@ -682,12 +681,12 @@ static ssize_t sysfs_show_current_tick_dev(struct device *dev,
raw_spin_unlock_irq(&clockevents_lock);
return count;
}
-static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
+static DEVICE_ATTR_RO(current_device);
/* We don't support the abomination of removable broadcast devices */
-static ssize_t sysfs_unbind_tick_dev(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t unbind_device_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
char name[CS_NAME_LEN];
ssize_t ret = sysfs_get_uname(buf, name, count);
@@ -714,7 +713,7 @@ static ssize_t sysfs_unbind_tick_dev(struct device *dev,
mutex_unlock(&clockevents_mutex);
return ret ? ret : count;
}
-static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
+static DEVICE_ATTR_WO(unbind_device);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static struct device tick_bc_dev = {
diff --git a/kernel/time/clocksource-wdtest.c b/kernel/time/clocksource-wdtest.c
new file mode 100644
index 000000000000..01df12395c0e
--- /dev/null
+++ b/kernel/time/clocksource-wdtest.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Unit test for the clocksource watchdog.
+ *
+ * Copyright (C) 2021 Facebook, Inc.
+ *
+ * Author: Paul E. McKenney <paulmck@kernel.org>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
+#include <linux/tick.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/prandom.h>
+#include <linux/cpu.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
+
+static int holdoff = IS_BUILTIN(CONFIG_TEST_CLOCKSOURCE_WATCHDOG) ? 10 : 0;
+module_param(holdoff, int, 0444);
+MODULE_PARM_DESC(holdoff, "Time to wait to start test (s).");
+
+/* Watchdog kthread's task_struct pointer for debug purposes. */
+static struct task_struct *wdtest_task;
+
+static u64 wdtest_jiffies_read(struct clocksource *cs)
+{
+ return (u64)jiffies;
+}
+
+/* Assume HZ > 100. */
+#define JIFFIES_SHIFT 8
+
+static struct clocksource clocksource_wdtest_jiffies = {
+ .name = "wdtest-jiffies",
+ .rating = 1, /* lowest valid rating*/
+ .uncertainty_margin = TICK_NSEC,
+ .read = wdtest_jiffies_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_MUST_VERIFY,
+ .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */
+ .shift = JIFFIES_SHIFT,
+ .max_cycles = 10,
+};
+
+static int wdtest_ktime_read_ndelays;
+static bool wdtest_ktime_read_fuzz;
+
+static u64 wdtest_ktime_read(struct clocksource *cs)
+{
+ int wkrn = READ_ONCE(wdtest_ktime_read_ndelays);
+ static int sign = 1;
+ u64 ret;
+
+ if (wkrn) {
+ udelay(cs->uncertainty_margin / 250);
+ WRITE_ONCE(wdtest_ktime_read_ndelays, wkrn - 1);
+ }
+ ret = ktime_get_real_fast_ns();
+ if (READ_ONCE(wdtest_ktime_read_fuzz)) {
+ sign = -sign;
+ ret = ret + sign * 100 * NSEC_PER_MSEC;
+ }
+ return ret;
+}
+
+static void wdtest_ktime_cs_mark_unstable(struct clocksource *cs)
+{
+ pr_info("--- Marking %s unstable due to clocksource watchdog.\n", cs->name);
+}
+
+#define KTIME_FLAGS (CLOCK_SOURCE_IS_CONTINUOUS | \
+ CLOCK_SOURCE_VALID_FOR_HRES | \
+ CLOCK_SOURCE_MUST_VERIFY | \
+ CLOCK_SOURCE_VERIFY_PERCPU)
+
+static struct clocksource clocksource_wdtest_ktime = {
+ .name = "wdtest-ktime",
+ .rating = 300,
+ .read = wdtest_ktime_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = KTIME_FLAGS,
+ .mark_unstable = wdtest_ktime_cs_mark_unstable,
+ .list = LIST_HEAD_INIT(clocksource_wdtest_ktime.list),
+};
+
+/* Reset the clocksource if needed. */
+static void wdtest_ktime_clocksource_reset(void)
+{
+ if (clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE) {
+ clocksource_unregister(&clocksource_wdtest_ktime);
+ clocksource_wdtest_ktime.flags = KTIME_FLAGS;
+ schedule_timeout_uninterruptible(HZ / 10);
+ clocksource_register_khz(&clocksource_wdtest_ktime, 1000 * 1000);
+ }
+}
+
+/* Run the specified series of watchdog tests. */
+static int wdtest_func(void *arg)
+{
+ unsigned long j1, j2;
+ char *s;
+ int i;
+
+ schedule_timeout_uninterruptible(holdoff * HZ);
+
+ /*
+ * Verify that jiffies-like clocksources get the manually
+ * specified uncertainty margin.
+ */
+ pr_info("--- Verify jiffies-like uncertainty margin.\n");
+ __clocksource_register(&clocksource_wdtest_jiffies);
+ WARN_ON_ONCE(clocksource_wdtest_jiffies.uncertainty_margin != TICK_NSEC);
+
+ j1 = clocksource_wdtest_jiffies.read(&clocksource_wdtest_jiffies);
+ schedule_timeout_uninterruptible(HZ);
+ j2 = clocksource_wdtest_jiffies.read(&clocksource_wdtest_jiffies);
+ WARN_ON_ONCE(j1 == j2);
+
+ clocksource_unregister(&clocksource_wdtest_jiffies);
+
+ /*
+ * Verify that tsc-like clocksources are assigned a reasonable
+ * uncertainty margin.
+ */
+ pr_info("--- Verify tsc-like uncertainty margin.\n");
+ clocksource_register_khz(&clocksource_wdtest_ktime, 1000 * 1000);
+ WARN_ON_ONCE(clocksource_wdtest_ktime.uncertainty_margin < NSEC_PER_USEC);
+
+ j1 = clocksource_wdtest_ktime.read(&clocksource_wdtest_ktime);
+ udelay(1);
+ j2 = clocksource_wdtest_ktime.read(&clocksource_wdtest_ktime);
+ pr_info("--- tsc-like times: %lu - %lu = %lu.\n", j2, j1, j2 - j1);
+ WARN_ON_ONCE(time_before(j2, j1 + NSEC_PER_USEC));
+
+ /* Verify tsc-like stability with various numbers of errors injected. */
+ for (i = 0; i <= max_cswd_read_retries + 1; i++) {
+ if (i <= 1 && i < max_cswd_read_retries)
+ s = "";
+ else if (i <= max_cswd_read_retries)
+ s = ", expect message";
+ else
+ s = ", expect clock skew";
+ pr_info("--- Watchdog with %dx error injection, %lu retries%s.\n", i, max_cswd_read_retries, s);
+ WRITE_ONCE(wdtest_ktime_read_ndelays, i);
+ schedule_timeout_uninterruptible(2 * HZ);
+ WARN_ON_ONCE(READ_ONCE(wdtest_ktime_read_ndelays));
+ WARN_ON_ONCE((i <= max_cswd_read_retries) !=
+ !(clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE));
+ wdtest_ktime_clocksource_reset();
+ }
+
+ /* Verify tsc-like stability with clock-value-fuzz error injection. */
+ pr_info("--- Watchdog clock-value-fuzz error injection, expect clock skew and per-CPU mismatches.\n");
+ WRITE_ONCE(wdtest_ktime_read_fuzz, true);
+ schedule_timeout_uninterruptible(2 * HZ);
+ WARN_ON_ONCE(!(clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE));
+ clocksource_verify_percpu(&clocksource_wdtest_ktime);
+ WRITE_ONCE(wdtest_ktime_read_fuzz, false);
+
+ clocksource_unregister(&clocksource_wdtest_ktime);
+
+ pr_info("--- Done with test.\n");
+ return 0;
+}
+
+static void wdtest_print_module_parms(void)
+{
+ pr_alert("--- holdoff=%d\n", holdoff);
+}
+
+/* Cleanup function. */
+static void clocksource_wdtest_cleanup(void)
+{
+}
+
+static int __init clocksource_wdtest_init(void)
+{
+ int ret = 0;
+
+ wdtest_print_module_parms();
+
+ /* Create watchdog-test task. */
+ wdtest_task = kthread_run(wdtest_func, NULL, "wdtest");
+ if (IS_ERR(wdtest_task)) {
+ ret = PTR_ERR(wdtest_task);
+ pr_warn("%s: Failed to create wdtest kthread.\n", __func__);
+ wdtest_task = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+module_init(clocksource_wdtest_init);
+module_exit(clocksource_wdtest_cleanup);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 2cd902592fc1..b89c76e1c02c 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -14,6 +14,8 @@
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
#include <linux/tick.h>
#include <linux/kthread.h>
+#include <linux/prandom.h>
+#include <linux/cpu.h>
#include "tick-internal.h"
#include "timekeeping_internal.h"
@@ -93,6 +95,20 @@ static char override_name[CS_NAME_LEN];
static int finished_booting;
static u64 suspend_start;
+/*
+ * Threshold: 0.0312s, when doubled: 0.0625s.
+ * Also a default for cs->uncertainty_margin when registering clocks.
+ */
+#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5)
+
+/*
+ * Maximum permissible delay between two readouts of the watchdog
+ * clocksource surrounding a read of the clocksource being validated.
+ * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as
+ * a lower bound for cs->uncertainty_margin values when registering clocks.
+ */
+#define WATCHDOG_MAX_SKEW (50 * NSEC_PER_USEC)
+
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static void clocksource_watchdog_work(struct work_struct *work);
static void clocksource_select(void);
@@ -119,10 +135,9 @@ static int clocksource_watchdog_kthread(void *data);
static void __clocksource_change_rating(struct clocksource *cs, int rating);
/*
- * Interval: 0.5sec Threshold: 0.0625s
+ * Interval: 0.5sec.
*/
#define WATCHDOG_INTERVAL (HZ >> 1)
-#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
static void clocksource_watchdog_work(struct work_struct *work)
{
@@ -184,12 +199,164 @@ void clocksource_mark_unstable(struct clocksource *cs)
spin_unlock_irqrestore(&watchdog_lock, flags);
}
+ulong max_cswd_read_retries = 3;
+module_param(max_cswd_read_retries, ulong, 0644);
+EXPORT_SYMBOL_GPL(max_cswd_read_retries);
+static int verify_n_cpus = 8;
+module_param(verify_n_cpus, int, 0644);
+
+static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
+{
+ unsigned int nretries;
+ u64 wd_end, wd_delta;
+ int64_t wd_delay;
+
+ for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
+ local_irq_disable();
+ *wdnow = watchdog->read(watchdog);
+ *csnow = cs->read(cs);
+ wd_end = watchdog->read(watchdog);
+ local_irq_enable();
+
+ wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
+ wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
+ watchdog->shift);
+ if (wd_delay <= WATCHDOG_MAX_SKEW) {
+ if (nretries > 1 || nretries >= max_cswd_read_retries) {
+ pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
+ smp_processor_id(), watchdog->name, nretries);
+ }
+ return true;
+ }
+ }
+
+ pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n",
+ smp_processor_id(), watchdog->name, wd_delay, nretries);
+ return false;
+}
+
+static u64 csnow_mid;
+static cpumask_t cpus_ahead;
+static cpumask_t cpus_behind;
+static cpumask_t cpus_chosen;
+
+static void clocksource_verify_choose_cpus(void)
+{
+ int cpu, i, n = verify_n_cpus;
+
+ if (n < 0) {
+ /* Check all of the CPUs. */
+ cpumask_copy(&cpus_chosen, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
+ return;
+ }
+
+ /* If no checking desired, or no other CPU to check, leave. */
+ cpumask_clear(&cpus_chosen);
+ if (n == 0 || num_online_cpus() <= 1)
+ return;
+
+ /* Make sure to select at least one CPU other than the current CPU. */
+ cpu = cpumask_next(-1, cpu_online_mask);
+ if (cpu == smp_processor_id())
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
+ return;
+ cpumask_set_cpu(cpu, &cpus_chosen);
+
+ /* Force a sane value for the boot parameter. */
+ if (n > nr_cpu_ids)
+ n = nr_cpu_ids;
+
+ /*
+ * Randomly select the specified number of CPUs. If the same
+ * CPU is selected multiple times, that CPU is checked only once,
+ * and no replacement CPU is selected. This gracefully handles
+ * situations where verify_n_cpus is greater than the number of
+ * CPUs that are currently online.
+ */
+ for (i = 1; i < n; i++) {
+ cpu = prandom_u32() % nr_cpu_ids;
+ cpu = cpumask_next(cpu - 1, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_next(-1, cpu_online_mask);
+ if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
+ cpumask_set_cpu(cpu, &cpus_chosen);
+ }
+
+ /* Don't verify ourselves. */
+ cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
+}
+
+static void clocksource_verify_one_cpu(void *csin)
+{
+ struct clocksource *cs = (struct clocksource *)csin;
+
+ csnow_mid = cs->read(cs);
+}
+
+void clocksource_verify_percpu(struct clocksource *cs)
+{
+ int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
+ u64 csnow_begin, csnow_end;
+ int cpu, testcpu;
+ s64 delta;
+
+ if (verify_n_cpus == 0)
+ return;
+ cpumask_clear(&cpus_ahead);
+ cpumask_clear(&cpus_behind);
+ get_online_cpus();
+ preempt_disable();
+ clocksource_verify_choose_cpus();
+ if (cpumask_weight(&cpus_chosen) == 0) {
+ preempt_enable();
+ put_online_cpus();
+ pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
+ return;
+ }
+ testcpu = smp_processor_id();
+ pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
+ for_each_cpu(cpu, &cpus_chosen) {
+ if (cpu == testcpu)
+ continue;
+ csnow_begin = cs->read(cs);
+ smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
+ csnow_end = cs->read(cs);
+ delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
+ if (delta < 0)
+ cpumask_set_cpu(cpu, &cpus_behind);
+ delta = (csnow_end - csnow_mid) & cs->mask;
+ if (delta < 0)
+ cpumask_set_cpu(cpu, &cpus_ahead);
+ delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
+ cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
+ if (cs_nsec > cs_nsec_max)
+ cs_nsec_max = cs_nsec;
+ if (cs_nsec < cs_nsec_min)
+ cs_nsec_min = cs_nsec;
+ }
+ preempt_enable();
+ put_online_cpus();
+ if (!cpumask_empty(&cpus_ahead))
+ pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
+ cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
+ if (!cpumask_empty(&cpus_behind))
+ pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n",
+ cpumask_pr_args(&cpus_behind), testcpu, cs->name);
+ if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
+ pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
+ testcpu, cs_nsec_min, cs_nsec_max, cs->name);
+}
+EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
+
static void clocksource_watchdog(struct timer_list *unused)
{
- struct clocksource *cs;
u64 csnow, wdnow, cslast, wdlast, delta;
- int64_t wd_nsec, cs_nsec;
int next_cpu, reset_pending;
+ int64_t wd_nsec, cs_nsec;
+ struct clocksource *cs;
+ u32 md;
spin_lock(&watchdog_lock);
if (!watchdog_running)
@@ -206,10 +373,11 @@ static void clocksource_watchdog(struct timer_list *unused)
continue;
}
- local_irq_disable();
- csnow = cs->read(cs);
- wdnow = watchdog->read(watchdog);
- local_irq_enable();
+ if (!cs_watchdog_read(cs, &csnow, &wdnow)) {
+ /* Clock readout unreliable, so give it up. */
+ __clocksource_unstable(cs);
+ continue;
+ }
/* Clocksource initialized ? */
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
@@ -235,13 +403,20 @@ static void clocksource_watchdog(struct timer_list *unused)
continue;
/* Check the deviation from the watchdog clocksource. */
- if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
+ md = cs->uncertainty_margin + watchdog->uncertainty_margin;
+ if (abs(cs_nsec - wd_nsec) > md) {
pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
smp_processor_id(), cs->name);
- pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
- watchdog->name, wdnow, wdlast, watchdog->mask);
- pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
- cs->name, csnow, cslast, cs->mask);
+ pr_warn(" '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n",
+ watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
+ pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
+ cs->name, cs_nsec, csnow, cslast, cs->mask);
+ if (curr_clocksource == cs)
+ pr_warn(" '%s' is current clocksource.\n", cs->name);
+ else if (curr_clocksource)
+ pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
+ else
+ pr_warn(" No current clocksource.\n");
__clocksource_unstable(cs);
continue;
}
@@ -407,6 +582,12 @@ static int __clocksource_watchdog_kthread(void)
unsigned long flags;
int select = 0;
+ /* Do any required per-CPU skew verification. */
+ if (curr_clocksource &&
+ curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
+ curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
+ clocksource_verify_percpu(curr_clocksource);
+
spin_lock_irqsave(&watchdog_lock, flags);
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
@@ -876,6 +1057,26 @@ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq
clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
NSEC_PER_SEC / scale, sec * scale);
}
+
+ /*
+ * If the uncertainty margin is not specified, calculate it.
+ * If both scale and freq are non-zero, calculate the clock
+ * period, but bound below at 2*WATCHDOG_MAX_SKEW. However,
+ * if either of scale or freq is zero, be very conservative and
+ * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the
+ * uncertainty margin. Allow stupidly small uncertainty margins
+ * to be specified by the caller for testing purposes, but warn
+ * to discourage production use of this capability.
+ */
+ if (scale && freq && !cs->uncertainty_margin) {
+ cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
+ if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
+ cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
+ } else if (!cs->uncertainty_margin) {
+ cs->uncertainty_margin = WATCHDOG_THRESHOLD;
+ }
+ WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
+
/*
* Ensure clocksources that have large 'mult' values don't overflow
* when adjusted.
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a492e4da69ba..01935aafdb46 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -49,13 +49,14 @@ static u64 jiffies_read(struct clocksource *cs)
* for "tick-less" systems.
*/
static struct clocksource clocksource_jiffies = {
- .name = "jiffies",
- .rating = 1, /* lowest valid rating*/
- .read = jiffies_read,
- .mask = CLOCKSOURCE_MASK(32),
- .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */
- .shift = JIFFIES_SHIFT,
- .max_cycles = 10,
+ .name = "jiffies",
+ .rating = 1, /* lowest valid rating*/
+ .uncertainty_margin = 32 * NSEC_PER_MSEC,
+ .read = jiffies_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */
+ .shift = JIFFIES_SHIFT,
+ .max_cycles = 10,
};
__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 3bb96a8b49c9..29a5e54e6e10 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -523,7 +523,7 @@ static void arm_timer(struct k_itimer *timer, struct task_struct *p)
if (CPUCLOCK_PERTHREAD(timer->it_clock))
tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
else
- tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
+ tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER);
}
/*
@@ -1358,7 +1358,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
if (*newval < *nextevt)
*nextevt = *newval;
- tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
+ tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER);
}
static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index a44055228796..f7fe6fe36173 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -33,6 +33,8 @@ static int tick_broadcast_forced;
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
#ifdef CONFIG_TICK_ONESHOT
+static DEFINE_PER_CPU(struct clock_event_device *, tick_oneshot_wakeup_device);
+
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
static void tick_broadcast_clear_oneshot(int cpu);
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
@@ -61,6 +63,13 @@ struct cpumask *tick_get_broadcast_mask(void)
return tick_broadcast_mask;
}
+static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu);
+
+const struct clock_event_device *tick_get_wakeup_device(int cpu)
+{
+ return tick_get_oneshot_wakeup_device(cpu);
+}
+
/*
* Start the device in periodic mode
*/
@@ -88,13 +97,75 @@ static bool tick_check_broadcast_device(struct clock_event_device *curdev,
return !curdev || newdev->rating > curdev->rating;
}
+#ifdef CONFIG_TICK_ONESHOT
+static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
+{
+ return per_cpu(tick_oneshot_wakeup_device, cpu);
+}
+
+static void tick_oneshot_wakeup_handler(struct clock_event_device *wd)
+{
+ /*
+ * If we woke up early and the tick was reprogrammed in the
+ * meantime then this may be spurious but harmless.
+ */
+ tick_receive_broadcast();
+}
+
+static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev,
+ int cpu)
+{
+ struct clock_event_device *curdev = tick_get_oneshot_wakeup_device(cpu);
+
+ if (!newdev)
+ goto set_device;
+
+ if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
+ (newdev->features & CLOCK_EVT_FEAT_C3STOP))
+ return false;
+
+ if (!(newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
+ !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
+ return false;
+
+ if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
+ return false;
+
+ if (curdev && newdev->rating <= curdev->rating)
+ return false;
+
+ if (!try_module_get(newdev->owner))
+ return false;
+
+ newdev->event_handler = tick_oneshot_wakeup_handler;
+set_device:
+ clockevents_exchange_device(curdev, newdev);
+ per_cpu(tick_oneshot_wakeup_device, cpu) = newdev;
+ return true;
+}
+#else
+static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
+{
+ return NULL;
+}
+
+static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev,
+ int cpu)
+{
+ return false;
+}
+#endif
+
/*
* Conditionally install/replace broadcast device
*/
-void tick_install_broadcast_device(struct clock_event_device *dev)
+void tick_install_broadcast_device(struct clock_event_device *dev, int cpu)
{
struct clock_event_device *cur = tick_broadcast_device.evtdev;
+ if (tick_set_oneshot_wakeup_device(dev, cpu))
+ return;
+
if (!tick_check_broadcast_device(cur, dev))
return;
@@ -253,7 +324,6 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
return ret;
}
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
@@ -268,7 +338,6 @@ int tick_receive_broadcast(void)
evt->event_handler(evt);
return 0;
}
-#endif
/*
* Broadcast the event to the cpus, which are set in the mask (mangled).
@@ -719,24 +788,16 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
}
-int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+static int ___tick_broadcast_oneshot_control(enum tick_broadcast_state state,
+ struct tick_device *td,
+ int cpu)
{
- struct clock_event_device *bc, *dev;
- int cpu, ret = 0;
+ struct clock_event_device *bc, *dev = td->evtdev;
+ int ret = 0;
ktime_t now;
- /*
- * If there is no broadcast device, tell the caller not to go
- * into deep idle.
- */
- if (!tick_broadcast_device.evtdev)
- return -EBUSY;
-
- dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
-
raw_spin_lock(&tick_broadcast_lock);
bc = tick_broadcast_device.evtdev;
- cpu = smp_processor_id();
if (state == TICK_BROADCAST_ENTER) {
/*
@@ -865,6 +926,53 @@ out:
return ret;
}
+static int tick_oneshot_wakeup_control(enum tick_broadcast_state state,
+ struct tick_device *td,
+ int cpu)
+{
+ struct clock_event_device *dev, *wd;
+
+ dev = td->evtdev;
+ if (td->mode != TICKDEV_MODE_ONESHOT)
+ return -EINVAL;
+
+ wd = tick_get_oneshot_wakeup_device(cpu);
+ if (!wd)
+ return -ENODEV;
+
+ switch (state) {
+ case TICK_BROADCAST_ENTER:
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
+ clockevents_switch_state(wd, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_program_event(wd, dev->next_event, 1);
+ break;
+ case TICK_BROADCAST_EXIT:
+ /* We may have transitioned to oneshot mode while idle */
+ if (clockevent_get_state(wd) != CLOCK_EVT_STATE_ONESHOT)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+ int cpu = smp_processor_id();
+
+ if (!tick_oneshot_wakeup_control(state, td, cpu))
+ return 0;
+
+ if (tick_broadcast_device.evtdev)
+ return ___tick_broadcast_oneshot_control(state, td, cpu);
+
+ /*
+ * If there is no broadcast or wakeup device, tell the caller not
+ * to go into deep idle.
+ */
+ return -EBUSY;
+}
+
/*
* Reset the one shot broadcast for a cpu
*
@@ -991,6 +1099,9 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
*/
static void tick_broadcast_oneshot_offline(unsigned int cpu)
{
+ if (tick_get_oneshot_wakeup_device(cpu))
+ tick_set_oneshot_wakeup_device(NULL, cpu);
+
/*
* Clear the broadcast masks for the dead cpu, but do not stop
* the broadcast device!
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index e15bc0ef1912..d663249652ef 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -373,7 +373,7 @@ out_bc:
/*
* Can the new device be used as a broadcast device ?
*/
- tick_install_broadcast_device(newdev);
+ tick_install_broadcast_device(newdev, cpu);
}
/**
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 7a981c9e87a4..6a742a29e545 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -61,7 +61,7 @@ extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
/* Broadcasting support */
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
-extern void tick_install_broadcast_device(struct clock_event_device *dev);
+extern void tick_install_broadcast_device(struct clock_event_device *dev, int cpu);
extern int tick_is_broadcast_device(struct clock_event_device *dev);
extern void tick_suspend_broadcast(void);
extern void tick_resume_broadcast(void);
@@ -71,8 +71,9 @@ extern void tick_set_periodic_handler(struct clock_event_device *dev, int broadc
extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq);
extern struct tick_device *tick_get_broadcast_device(void);
extern struct cpumask *tick_get_broadcast_mask(void);
+extern const struct clock_event_device *tick_get_wakeup_device(int cpu);
# else /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST: */
-static inline void tick_install_broadcast_device(struct clock_event_device *dev) { }
+static inline void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) { }
static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; }
static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; }
static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 828b091501ca..6bffe5af8cb1 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
+EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
bool tick_nohz_full_running;
EXPORT_SYMBOL_GPL(tick_nohz_full_running);
static atomic_t tick_dep_mask;
@@ -322,6 +323,46 @@ void tick_nohz_full_kick_cpu(int cpu)
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
}
+static void tick_nohz_kick_task(struct task_struct *tsk)
+{
+ int cpu;
+
+ /*
+ * If the task is not running, run_posix_cpu_timers()
+ * has nothing to elapse, IPI can then be spared.
+ *
+ * activate_task() STORE p->tick_dep_mask
+ * STORE p->on_rq
+ * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or())
+ * LOCK rq->lock LOAD p->on_rq
+ * smp_mb__after_spin_lock()
+ * tick_nohz_task_switch()
+ * LOAD p->tick_dep_mask
+ */
+ if (!sched_task_on_rq(tsk))
+ return;
+
+ /*
+ * If the task concurrently migrates to another CPU,
+ * we guarantee it sees the new tick dependency upon
+ * schedule.
+ *
+ * set_task_cpu(p, cpu);
+ * STORE p->cpu = @cpu
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock
+ * smp_mb__after_spin_lock() STORE p->tick_dep_mask
+ * tick_nohz_task_switch() smp_mb() (atomic_fetch_or())
+ * LOAD p->tick_dep_mask LOAD p->cpu
+ */
+ cpu = task_cpu(tsk);
+
+ preempt_disable();
+ if (cpu_online(cpu))
+ tick_nohz_full_kick_cpu(cpu);
+ preempt_enable();
+}
+
/*
* Kick all full dynticks CPUs in order to force these to re-evaluate
* their dependency on the tick and restart it if necessary.
@@ -404,19 +445,8 @@ EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
*/
void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
- if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) {
- if (tsk == current) {
- preempt_disable();
- tick_nohz_full_kick();
- preempt_enable();
- } else {
- /*
- * Some future tick_nohz_full_kick_task()
- * should optimize this.
- */
- tick_nohz_full_kick_all();
- }
- }
+ if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask))
+ tick_nohz_kick_task(tsk);
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
@@ -430,9 +460,20 @@ EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
* Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
* per process timers.
*/
-void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
+void tick_nohz_dep_set_signal(struct task_struct *tsk,
+ enum tick_dep_bits bit)
{
- tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
+ int prev;
+ struct signal_struct *sig = tsk->signal;
+
+ prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask);
+ if (!prev) {
+ struct task_struct *t;
+
+ lockdep_assert_held(&tsk->sighand->siglock);
+ __for_each_thread(sig, t)
+ tick_nohz_kick_task(t);
+ }
}
void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
@@ -447,13 +488,10 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi
*/
void __tick_nohz_task_switch(void)
{
- unsigned long flags;
struct tick_sched *ts;
- local_irq_save(flags);
-
if (!tick_nohz_full_cpu(smp_processor_id()))
- goto out;
+ return;
ts = this_cpu_ptr(&tick_cpu_sched);
@@ -462,8 +500,6 @@ void __tick_nohz_task_switch(void)
atomic_read(&current->signal->tick_dep_mask))
tick_nohz_full_kick();
}
-out:
- local_irq_restore(flags);
}
/* Get the boot-time nohz CPU list from the kernel parameters. */
@@ -921,27 +957,31 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
* Cancel the scheduled timer and restore the tick
*/
ts->tick_stopped = 0;
- ts->idle_exittime = now;
-
tick_nohz_restart(ts, now);
}
-static void tick_nohz_full_update_tick(struct tick_sched *ts)
+static void __tick_nohz_full_update_tick(struct tick_sched *ts,
+ ktime_t now)
{
#ifdef CONFIG_NO_HZ_FULL
int cpu = smp_processor_id();
- if (!tick_nohz_full_cpu(cpu))
+ if (can_stop_full_tick(cpu, ts))
+ tick_nohz_stop_sched_tick(ts, cpu);
+ else if (ts->tick_stopped)
+ tick_nohz_restart_sched_tick(ts, now);
+#endif
+}
+
+static void tick_nohz_full_update_tick(struct tick_sched *ts)
+{
+ if (!tick_nohz_full_cpu(smp_processor_id()))
return;
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
return;
- if (can_stop_full_tick(cpu, ts))
- tick_nohz_stop_sched_tick(ts, cpu);
- else if (ts->tick_stopped)
- tick_nohz_restart_sched_tick(ts, ktime_get());
-#endif
+ __tick_nohz_full_update_tick(ts, ktime_get());
}
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
@@ -1188,11 +1228,13 @@ unsigned long tick_nohz_get_idle_calls(void)
return ts->idle_calls;
}
-static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
+static void tick_nohz_account_idle_time(struct tick_sched *ts,
+ ktime_t now)
{
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
unsigned long ticks;
+ ts->idle_exittime = now;
+
if (vtime_accounting_enabled_this_cpu())
return;
/*
@@ -1206,21 +1248,27 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
*/
if (ticks && ticks < LONG_MAX)
account_idle_ticks(ticks);
-#endif
}
-static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now)
+void tick_nohz_idle_restart_tick(void)
{
- tick_nohz_restart_sched_tick(ts, now);
- tick_nohz_account_idle_ticks(ts);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+
+ if (ts->tick_stopped) {
+ ktime_t now = ktime_get();
+ tick_nohz_restart_sched_tick(ts, now);
+ tick_nohz_account_idle_time(ts, now);
+ }
}
-void tick_nohz_idle_restart_tick(void)
+static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
{
- struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ __tick_nohz_full_update_tick(ts, now);
+ else
+ tick_nohz_restart_sched_tick(ts, now);
- if (ts->tick_stopped)
- __tick_nohz_idle_restart_tick(ts, ktime_get());
+ tick_nohz_account_idle_time(ts, now);
}
/**
@@ -1252,7 +1300,7 @@ void tick_nohz_idle_exit(void)
tick_nohz_stop_idle(ts, now);
if (tick_stopped)
- __tick_nohz_idle_restart_tick(ts, now);
+ tick_nohz_idle_update_tick(ts, now);
local_irq_enable();
}
diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
new file mode 100644
index 000000000000..831e8e779ace
--- /dev/null
+++ b/kernel/time/time_test.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: LGPL-2.1+
+
+#include <kunit/test.h>
+#include <linux/time.h>
+
+/*
+ * Traditional implementation of leap year evaluation.
+ */
+static bool is_leap(long year)
+{
+ return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
+}
+
+/*
+ * Gets the last day of a month.
+ */
+static int last_day_of_month(long year, int month)
+{
+ if (month == 2)
+ return 28 + is_leap(year);
+ if (month == 4 || month == 6 || month == 9 || month == 11)
+ return 30;
+ return 31;
+}
+
+/*
+ * Advances a date by one day.
+ */
+static void advance_date(long *year, int *month, int *mday, int *yday)
+{
+ if (*mday != last_day_of_month(*year, *month)) {
+ ++*mday;
+ ++*yday;
+ return;
+ }
+
+ *mday = 1;
+ if (*month != 12) {
+ ++*month;
+ ++*yday;
+ return;
+ }
+
+ *month = 1;
+ *yday = 0;
+ ++*year;
+}
+
+/*
+ * Checks every day in a 160000 years interval centered at 1970-01-01
+ * against the expected result.
+ */
+static void time64_to_tm_test_date_range(struct kunit *test)
+{
+ /*
+ * 80000 years = (80000 / 400) * 400 years
+ * = (80000 / 400) * 146097 days
+ * = (80000 / 400) * 146097 * 86400 seconds
+ */
+ time64_t total_secs = ((time64_t) 80000) / 400 * 146097 * 86400;
+ long year = 1970 - 80000;
+ int month = 1;
+ int mdday = 1;
+ int yday = 0;
+
+ struct tm result;
+ time64_t secs;
+ s64 days;
+
+ for (secs = -total_secs; secs <= total_secs; secs += 86400) {
+
+ time64_to_tm(secs, 0, &result);
+
+ days = div_s64(secs, 86400);
+
+ #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \
+ year, month, mdday, yday, days
+
+ KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, month - 1, result.tm_mon, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, mdday, result.tm_mday, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, yday, result.tm_yday, FAIL_MSG);
+
+ advance_date(&year, &month, &mdday, &yday);
+ }
+}
+
+static struct kunit_case time_test_cases[] = {
+ KUNIT_CASE(time64_to_tm_test_date_range),
+ {}
+};
+
+static struct kunit_suite time_test_suite = {
+ .name = "time_test_cases",
+ .test_cases = time_test_cases,
+};
+
+kunit_test_suite(time_test_suite);
+MODULE_LICENSE("GPL");
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
index 62e3b46717a6..59b922c826e7 100644
--- a/kernel/time/timeconv.c
+++ b/kernel/time/timeconv.c
@@ -22,47 +22,16 @@
/*
* Converts the calendar time to broken-down time representation
- * Based on code from glibc-2.6
*
* 2009-7-14:
* Moved from glibc-2.6 to kernel by Zhaolei<zhaolei@cn.fujitsu.com>
+ * 2021-06-02:
+ * Reimplemented by Cassio Neri <cassio.neri@gmail.com>
*/
#include <linux/time.h>
#include <linux/module.h>
-
-/*
- * Nonzero if YEAR is a leap year (every 4 years,
- * except every 100th isn't, and every 400th is).
- */
-static int __isleap(long year)
-{
- return (year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0);
-}
-
-/* do a mathdiv for long type */
-static long math_div(long a, long b)
-{
- return a / b - (a % b < 0);
-}
-
-/* How many leap years between y1 and y2, y1 must less or equal to y2 */
-static long leaps_between(long y1, long y2)
-{
- long leaps1 = math_div(y1 - 1, 4) - math_div(y1 - 1, 100)
- + math_div(y1 - 1, 400);
- long leaps2 = math_div(y2 - 1, 4) - math_div(y2 - 1, 100)
- + math_div(y2 - 1, 400);
- return leaps2 - leaps1;
-}
-
-/* How many days come before each month (0-12). */
-static const unsigned short __mon_yday[2][13] = {
- /* Normal years. */
- {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
- /* Leap years. */
- {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
-};
+#include <linux/kernel.h>
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
@@ -77,9 +46,11 @@ static const unsigned short __mon_yday[2][13] = {
*/
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
{
- long days, rem, y;
+ u32 u32tmp, day_of_century, year_of_century, day_of_year, month, day;
+ u64 u64tmp, udays, century, year;
+ bool is_Jan_or_Feb, is_leap_year;
+ long days, rem;
int remainder;
- const unsigned short *ip;
days = div_s64_rem(totalsecs, SECS_PER_DAY, &remainder);
rem = remainder;
@@ -103,27 +74,68 @@ void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
if (result->tm_wday < 0)
result->tm_wday += 7;
- y = 1970;
-
- while (days < 0 || days >= (__isleap(y) ? 366 : 365)) {
- /* Guess a corrected year, assuming 365 days per year. */
- long yg = y + math_div(days, 365);
-
- /* Adjust DAYS and Y to match the guessed year. */
- days -= (yg - y) * 365 + leaps_between(y, yg);
- y = yg;
- }
-
- result->tm_year = y - 1900;
-
- result->tm_yday = days;
-
- ip = __mon_yday[__isleap(y)];
- for (y = 11; days < ip[y]; y--)
- continue;
- days -= ip[y];
-
- result->tm_mon = y;
- result->tm_mday = days + 1;
+ /*
+ * The following algorithm is, basically, Proposition 6.3 of Neri
+ * and Schneider [1]. In a few words: it works on the computational
+ * (fictitious) calendar where the year starts in March, month = 2
+ * (*), and finishes in February, month = 13. This calendar is
+ * mathematically convenient because the day of the year does not
+ * depend on whether the year is leap or not. For instance:
+ *
+ * March 1st 0-th day of the year;
+ * ...
+ * April 1st 31-st day of the year;
+ * ...
+ * January 1st 306-th day of the year; (Important!)
+ * ...
+ * February 28th 364-th day of the year;
+ * February 29th 365-th day of the year (if it exists).
+ *
+ * After having worked out the date in the computational calendar
+ * (using just arithmetics) it's easy to convert it to the
+ * corresponding date in the Gregorian calendar.
+ *
+ * [1] "Euclidean Affine Functions and Applications to Calendar
+ * Algorithms". https://arxiv.org/abs/2102.06959
+ *
+ * (*) The numbering of months follows tm more closely and thus,
+ * is slightly different from [1].
+ */
+
+ udays = ((u64) days) + 2305843009213814918ULL;
+
+ u64tmp = 4 * udays + 3;
+ century = div64_u64_rem(u64tmp, 146097, &u64tmp);
+ day_of_century = (u32) (u64tmp / 4);
+
+ u32tmp = 4 * day_of_century + 3;
+ u64tmp = 2939745ULL * u32tmp;
+ year_of_century = upper_32_bits(u64tmp);
+ day_of_year = lower_32_bits(u64tmp) / 2939745 / 4;
+
+ year = 100 * century + year_of_century;
+ is_leap_year = year_of_century ? !(year_of_century % 4) : !(century % 4);
+
+ u32tmp = 2141 * day_of_year + 132377;
+ month = u32tmp >> 16;
+ day = ((u16) u32tmp) / 2141;
+
+ /*
+ * Recall that January 1st is the 306-th day of the year in the
+ * computational (not Gregorian) calendar.
+ */
+ is_Jan_or_Feb = day_of_year >= 306;
+
+ /* Convert to the Gregorian calendar and adjust to Unix time. */
+ year = year + is_Jan_or_Feb - 6313183731940000ULL;
+ month = is_Jan_or_Feb ? month - 12 : month;
+ day = day + 1;
+ day_of_year += is_Jan_or_Feb ? -306 : 31 + 28 + is_leap_year;
+
+ /* Convert to tm's format. */
+ result->tm_year = (long) (year - 1900);
+ result->tm_mon = (int) month;
+ result->tm_mday = (int) day;
+ result->tm_yday = (int) day_of_year;
}
EXPORT_SYMBOL(time64_to_tm);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index d111adf4a0cb..467087d7bdb6 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1879,7 +1879,7 @@ signed long __sched schedule_timeout(signed long timeout)
printk(KERN_ERR "schedule_timeout: wrong timeout "
"value %lx\n", timeout);
dump_stack();
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
goto out;
}
}
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 6939140ab7c5..ed7d6ad694fb 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -228,6 +228,14 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
SEQ_printf(m, " event_handler: %ps\n", dev->event_handler);
SEQ_printf(m, "\n");
SEQ_printf(m, " retries: %lu\n", dev->retries);
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ if (cpu >= 0) {
+ const struct clock_event_device *wd = tick_get_wakeup_device(cpu);
+
+ SEQ_printf(m, "Wakeup Device: %s\n", wd ? wd->name : "<NULL>");
+ }
+#endif
SEQ_printf(m, "\n");
}
@@ -248,7 +256,7 @@ static void timer_list_show_tickdevices_header(struct seq_file *m)
static inline void timer_list_header(struct seq_file *m, u64 now)
{
- SEQ_printf(m, "Timer List Version: v0.8\n");
+ SEQ_printf(m, "Timer List Version: v0.9\n");
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
SEQ_printf(m, "\n");
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d2d7cf6cfe83..64bd2d84367f 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
static __always_inline int
bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
{
- int ret = security_locked_down(LOCKDOWN_BPF_READ);
+ int ret;
- if (unlikely(ret < 0))
- goto fail;
ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
- goto fail;
- return ret;
-fail:
- memset(dst, 0, size);
+ memset(dst, 0, size);
return ret;
}
@@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
static __always_inline int
bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
{
- int ret = security_locked_down(LOCKDOWN_BPF_READ);
-
- if (unlikely(ret < 0))
- goto fail;
+ int ret;
/*
* The strncpy_from_kernel_nofault() call will likely not fill the
@@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
*/
ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
- goto fail;
-
- return ret;
-fail:
- memset(dst, 0, size);
+ memset(dst, 0, size);
return ret;
}
@@ -1011,20 +999,26 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
- return &bpf_probe_read_kernel_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
- return &bpf_probe_read_kernel_str_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_kernel_str_proto;
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
case BPF_FUNC_probe_read:
- return &bpf_probe_read_compat_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_compat_proto;
case BPF_FUNC_probe_read_str:
- return &bpf_probe_read_compat_str_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_compat_str_proto;
#endif
#ifdef CONFIG_CGROUPS
case BPF_FUNC_get_current_cgroup_id:
return &bpf_get_current_cgroup_id_proto;
+ case BPF_FUNC_get_current_ancestor_cgroup_id:
+ return &bpf_get_current_ancestor_cgroup_id_proto;
#endif
case BPF_FUNC_send_signal:
return &bpf_send_signal_proto;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2e8a3fde7104..72ef4dccbcc4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1967,12 +1967,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
static void print_ip_ins(const char *fmt, const unsigned char *p)
{
+ char ins[MCOUNT_INSN_SIZE];
int i;
+ if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
+ printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
+ return;
+ }
+
printk(KERN_CONT "%s", fmt);
for (i = 0; i < MCOUNT_INSN_SIZE; i++)
- printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+ printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
}
enum ftrace_bug_type ftrace_bug_type;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a21ef9cd2aae..d23a09d3eb37 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2198,9 +2198,6 @@ struct saved_cmdlines_buffer {
};
static struct saved_cmdlines_buffer *savedcmd;
-/* temporary disable recording */
-static atomic_t trace_record_taskinfo_disabled __read_mostly;
-
static inline char *get_saved_cmdlines(int idx)
{
return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
@@ -2486,8 +2483,6 @@ static bool tracing_record_taskinfo_skip(int flags)
{
if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
return true;
- if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
- return true;
if (!__this_cpu_read(trace_taskinfo_save))
return true;
return false;
@@ -2736,7 +2731,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
- if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
+ if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
trace_event_setup(entry, type, trace_ctx);
entry->array[0] = len;
return entry;
@@ -3998,9 +3993,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-EBUSY);
#endif
- if (!iter->snapshot)
- atomic_inc(&trace_record_taskinfo_disabled);
-
if (*pos != iter->pos) {
iter->ent = NULL;
iter->cpu = 0;
@@ -4043,9 +4035,6 @@ static void s_stop(struct seq_file *m, void *p)
return;
#endif
- if (!iter->snapshot)
- atomic_dec(&trace_record_taskinfo_disabled);
-
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cd80d046c7a5..d5d8c088a55d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -20,6 +20,7 @@
#include <linux/irq_work.h>
#include <linux/workqueue.h>
#include <linux/ctype.h>
+#include <linux/once_lite.h>
#ifdef CONFIG_FTRACE_SYSCALLS
#include <asm/unistd.h> /* For NR_SYSCALLS */
@@ -99,16 +100,8 @@ enum trace_type {
#include "trace_entries.h"
/* Use this for memory failure errors */
-#define MEM_FAIL(condition, fmt, ...) ({ \
- static bool __section(".data.once") __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- pr_err("ERROR: " fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define MEM_FAIL(condition, fmt, ...) \
+ DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
/*
* syscalls are special, and need special handling, this is why
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index c1637f90c8a3..4702efb00ff2 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
prev_time = READ_ONCE(trace_clock_struct.prev_time);
now = sched_clock_cpu(this_cpu);
- /* Make sure that now is always greater than prev_time */
+ /* Make sure that now is always greater than or equal to prev_time */
if ((s64)(now - prev_time) < 0)
- now = prev_time + 1;
+ now = prev_time;
/*
* If in an NMI context then dont risk lockups and simply return
@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
/* Reread prev_time in case it was already updated */
prev_time = READ_ONCE(trace_clock_struct.prev_time);
if ((s64)(now - prev_time) < 0)
- now = prev_time + 1;
+ now = prev_time;
trace_clock_struct.prev_time = now;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 8d8874f1c35e..87799e2379bd 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -8,6 +8,12 @@
#include <linux/kmemleak.h>
#include <linux/user_namespace.h>
+struct ucounts init_ucounts = {
+ .ns = &init_user_ns,
+ .uid = GLOBAL_ROOT_UID,
+ .count = ATOMIC_INIT(1),
+};
+
#define UCOUNTS_HASHTABLE_BITS 10
static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
static DEFINE_SPINLOCK(ucounts_lock);
@@ -78,6 +84,10 @@ static struct ctl_table user_table[] = {
UCOUNT_ENTRY("max_fanotify_groups"),
UCOUNT_ENTRY("max_fanotify_marks"),
#endif
+ { },
+ { },
+ { },
+ { },
{ }
};
#endif /* CONFIG_SYSCTL */
@@ -129,7 +139,24 @@ static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struc
return NULL;
}
-static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
+static void hlist_add_ucounts(struct ucounts *ucounts)
+{
+ struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
+ spin_lock_irq(&ucounts_lock);
+ hlist_add_head(&ucounts->node, hashent);
+ spin_unlock_irq(&ucounts_lock);
+}
+
+struct ucounts *get_ucounts(struct ucounts *ucounts)
+{
+ if (ucounts && atomic_add_negative(1, &ucounts->count)) {
+ put_ucounts(ucounts);
+ ucounts = NULL;
+ }
+ return ucounts;
+}
+
+struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
struct ucounts *ucounts, *new;
@@ -145,7 +172,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
new->ns = ns;
new->uid = uid;
- new->count = 0;
+ atomic_set(&new->count, 1);
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -153,40 +180,35 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
kfree(new);
} else {
hlist_add_head(&new->node, hashent);
- ucounts = new;
+ spin_unlock_irq(&ucounts_lock);
+ return new;
}
}
- if (ucounts->count == INT_MAX)
- ucounts = NULL;
- else
- ucounts->count += 1;
spin_unlock_irq(&ucounts_lock);
+ ucounts = get_ucounts(ucounts);
return ucounts;
}
-static void put_ucounts(struct ucounts *ucounts)
+void put_ucounts(struct ucounts *ucounts)
{
unsigned long flags;
- spin_lock_irqsave(&ucounts_lock, flags);
- ucounts->count -= 1;
- if (!ucounts->count)
+ if (atomic_dec_and_test(&ucounts->count)) {
+ spin_lock_irqsave(&ucounts_lock, flags);
hlist_del_init(&ucounts->node);
- else
- ucounts = NULL;
- spin_unlock_irqrestore(&ucounts_lock, flags);
-
- kfree(ucounts);
+ spin_unlock_irqrestore(&ucounts_lock, flags);
+ kfree(ucounts);
+ }
}
-static inline bool atomic_inc_below(atomic_t *v, int u)
+static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
{
- int c, old;
- c = atomic_read(v);
+ long c, old;
+ c = atomic_long_read(v);
for (;;) {
if (unlikely(c >= u))
return false;
- old = atomic_cmpxchg(v, c, c+1);
+ old = atomic_long_cmpxchg(v, c, c+1);
if (likely(old == c))
return true;
c = old;
@@ -198,19 +220,19 @@ struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
{
struct ucounts *ucounts, *iter, *bad;
struct user_namespace *tns;
- ucounts = get_ucounts(ns, uid);
+ ucounts = alloc_ucounts(ns, uid);
for (iter = ucounts; iter; iter = tns->ucounts) {
- int max;
+ long max;
tns = iter->ns;
max = READ_ONCE(tns->ucount_max[type]);
- if (!atomic_inc_below(&iter->ucount[type], max))
+ if (!atomic_long_inc_below(&iter->ucount[type], max))
goto fail;
}
return ucounts;
fail:
bad = iter;
for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
- atomic_dec(&iter->ucount[type]);
+ atomic_long_dec(&iter->ucount[type]);
put_ucounts(ucounts);
return NULL;
@@ -220,12 +242,54 @@ void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
{
struct ucounts *iter;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- int dec = atomic_dec_if_positive(&iter->ucount[type]);
+ long dec = atomic_long_dec_if_positive(&iter->ucount[type]);
WARN_ON_ONCE(dec < 0);
}
put_ucounts(ucounts);
}
+long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
+{
+ struct ucounts *iter;
+ long ret = 0;
+
+ for (iter = ucounts; iter; iter = iter->ns->ucounts) {
+ long max = READ_ONCE(iter->ns->ucount_max[type]);
+ long new = atomic_long_add_return(v, &iter->ucount[type]);
+ if (new < 0 || new > max)
+ ret = LONG_MAX;
+ else if (iter == ucounts)
+ ret = new;
+ }
+ return ret;
+}
+
+bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
+{
+ struct ucounts *iter;
+ long new = -1; /* Silence compiler warning */
+ for (iter = ucounts; iter; iter = iter->ns->ucounts) {
+ long dec = atomic_long_add_return(-v, &iter->ucount[type]);
+ WARN_ON_ONCE(dec < 0);
+ if (iter == ucounts)
+ new = dec;
+ }
+ return (new == 0);
+}
+
+bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
+{
+ struct ucounts *iter;
+ if (get_ucounts_value(ucounts, type) > max)
+ return true;
+ for (iter = ucounts; iter; iter = iter->ns->ucounts) {
+ max = READ_ONCE(iter->ns->ucount_max[type]);
+ if (get_ucounts_value(iter, type) > max)
+ return true;
+ }
+ return false;
+}
+
static __init int user_namespace_sysctl_init(void)
{
#ifdef CONFIG_SYSCTL
@@ -241,6 +305,8 @@ static __init int user_namespace_sysctl_init(void)
BUG_ON(!user_header);
BUG_ON(!setup_userns_sysctls(&init_user_ns));
#endif
+ hlist_add_ucounts(&init_ucounts);
+ inc_rlimit_ucounts(&init_ucounts, UCOUNT_RLIMIT_NPROC, 1);
return 0;
}
subsys_initcall(user_namespace_sysctl_init);
diff --git a/kernel/user.c b/kernel/user.c
index a2478cddf536..c82399c1618a 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -98,9 +98,6 @@ static DEFINE_SPINLOCK(uidhash_lock);
/* root_user.__count is 1, for init task cred */
struct user_struct root_user = {
.__count = REFCOUNT_INIT(1),
- .processes = ATOMIC_INIT(1),
- .sigpending = ATOMIC_INIT(0),
- .locked_shm = 0,
.uid = GLOBAL_ROOT_UID,
.ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
};
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 8d62863721b0..ef82d401dde8 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -119,9 +119,13 @@ int create_user_ns(struct cred *new)
ns->owner = owner;
ns->group = group;
INIT_WORK(&ns->work, free_user_ns);
- for (i = 0; i < UCOUNT_COUNTS; i++) {
+ for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) {
ns->ucount_max[i] = INT_MAX;
}
+ set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC));
+ set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MSGQUEUE, rlimit(RLIMIT_MSGQUEUE));
+ set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_SIGPENDING, rlimit(RLIMIT_SIGPENDING));
+ set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MEMLOCK, rlimit(RLIMIT_MEMLOCK));
ns->ucounts = ucounts;
/* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
@@ -1340,6 +1344,9 @@ static int userns_install(struct nsset *nsset, struct ns_common *ns)
put_user_ns(cred->user_ns);
set_cred_user_ns(cred, get_user_ns(user_ns));
+ if (set_cred_ucounts(cred) < 0)
+ return -EINVAL;
+
return 0;
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 92d3bcc5a5e0..ad912511a0c0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -92,7 +92,7 @@ __setup("nmi_watchdog=", hardlockup_panic_setup);
* own hardlockup detector.
*
* watchdog_nmi_enable/disable can be implemented to start and stop when
- * softlockup watchdog threads start and stop. The arch must select the
+ * softlockup watchdog start and stop. The arch must select the
* SOFTLOCKUP_DETECTOR Kconfig.
*/
int __weak watchdog_nmi_enable(unsigned int cpu)
@@ -335,7 +335,7 @@ static DEFINE_PER_CPU(struct completion, softlockup_completion);
static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
/*
- * The watchdog thread function - touches the timestamp.
+ * The watchdog feed function - touches the timestamp.
*
* It only runs once every sample_period seconds (4 seconds by
* default) to reset the softlockup timestamp. If this gets delayed
@@ -558,11 +558,7 @@ static void lockup_detector_reconfigure(void)
}
/*
- * Create the watchdog thread infrastructure and configure the detector(s).
- *
- * The threads are not unparked as watchdog_allowed_mask is empty. When
- * the threads are successfully initialized, take the proper locks and
- * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
+ * Create the watchdog infrastructure and configure the detector(s).
*/
static __init void lockup_detector_setup(void)
{
@@ -628,7 +624,7 @@ void lockup_detector_soft_poweroff(void)
#ifdef CONFIG_SYSCTL
-/* Propagate any changes to the watchdog threads */
+/* Propagate any changes to the watchdog infrastructure */
static void proc_watchdog_update(void)
{
/* Remove impossible cpus to keep sysctl output clean. */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b19d759e55a5..50142fc08902 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -50,6 +50,7 @@
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
+#include <linux/kvm_para.h>
#include "workqueue_internal.h"
@@ -5772,6 +5773,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
{
unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
bool lockup_detected = false;
+ unsigned long now = jiffies;
struct worker_pool *pool;
int pi;
@@ -5786,6 +5788,12 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
if (list_empty(&pool->worklist))
continue;
+ /*
+ * If a virtual machine is stopped by the host it can look to
+ * the watchdog like a stall.
+ */
+ kvm_check_and_clear_guest_paused();
+
/* get the latest of pool and touched timestamps */
if (pool->cpu >= 0)
touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
@@ -5799,12 +5807,12 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
ts = touched;
/* did we stall? */
- if (time_after(jiffies, ts + thresh)) {
+ if (time_after(now, ts + thresh)) {
lockup_detected = true;
pr_emerg("BUG: workqueue lockup - pool");
pr_cont_pool_info(pool);
pr_cont(" stuck for %us!\n",
- jiffies_to_msecs(jiffies - pool_ts) / 1000);
+ jiffies_to_msecs(now - pool_ts) / 1000);
}
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 678c13967580..1c9857fdb1a0 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -313,6 +313,9 @@ config DEBUG_INFO_BTF
config PAHOLE_HAS_SPLIT_BTF
def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "119")
+config PAHOLE_HAS_ZEROSIZE_PERCPU_SUPPORT
+ def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "122")
+
config DEBUG_INFO_BTF_MODULES
def_bool y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
@@ -1372,7 +1375,6 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE
- depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
select KALLSYMS
select KALLSYMS_ALL
@@ -2181,6 +2183,9 @@ config TEST_KSTRTOX
config TEST_PRINTF
tristate "Test printf() family of functions at runtime"
+config TEST_SCANF
+ tristate "Test scanf() family of functions at runtime"
+
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
help
@@ -2429,6 +2434,18 @@ config BITS_TEST
If unsure, say N.
+config SLUB_KUNIT_TEST
+ tristate "KUnit test for SLUB cache error detection" if !KUNIT_ALL_TESTS
+ depends on SLUB_DEBUG && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds SLUB allocator unit test.
+ Tests SLUB cache debugging functionality.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2571,6 +2588,18 @@ config TEST_FPU
If unsure, say N.
+config TEST_CLOCKSOURCE_WATCHDOG
+ tristate "Test clocksource watchdog in kernel space"
+ depends on CLOCKSOURCE_WATCHDOG
+ help
+ Enable this option to create a kernel module that will trigger
+ a test of the clocksource watchdog. This module may be loaded
+ via modprobe or insmod in which case it will run upon being
+ loaded, or it may be built in, in which case it will run
+ shortly after boot.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index cffc2ebbf185..1e2d10f86011 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -12,6 +12,13 @@ config HAVE_ARCH_KASAN_HW_TAGS
config HAVE_ARCH_KASAN_VMALLOC
bool
+config ARCH_DISABLE_KASAN_INLINE
+ bool
+ help
+ An architecture might not support inline instrumentation.
+ When this option is selected, inline and stack instrumentation are
+ disabled.
+
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
@@ -130,6 +137,7 @@ config KASAN_OUTLINE
config KASAN_INLINE
bool "Inline instrumentation"
+ depends on !ARCH_DISABLE_KASAN_INLINE
help
Compiler directly inserts code checking shadow memory before
memory accesses. This is faster than outline (in some workloads
@@ -141,6 +149,7 @@ endchoice
config KASAN_STACK
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
depends on KASAN_GENERIC || KASAN_SW_TAGS
+ depends on !ARCH_DISABLE_KASAN_INLINE
default y if CC_IS_GCC
help
The LLVM stack address sanitizer has a know problem that
@@ -154,10 +163,13 @@ config KASAN_STACK
but clang users can still enable it for builds without
CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
to use and enabled by default.
+ If the architecture disables inline instrumentation, stack
+ instrumentation is also disabled as it adds inline-style
+ instrumentation that is run unconditionally.
-config KASAN_SW_TAGS_IDENTIFY
+config KASAN_TAGS_IDENTIFY
bool "Enable memory corruption identification"
- depends on KASAN_SW_TAGS
+ depends on KASAN_SW_TAGS || KASAN_HW_TAGS
help
This option enables best-effort identification of bug type
(use-after-free or out-of-bounds) at the cost of increased
diff --git a/lib/Makefile b/lib/Makefile
index 2cc359ec1fdd..6d765d5fb8ac 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
+obj-$(CONFIG_TEST_SCANF) += test_scanf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
@@ -354,5 +355,6 @@ obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e98c85a99787..3df653994177 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-s64 atomic64_read(const atomic64_t *v)
+s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_read);
+EXPORT_SYMBOL(generic_atomic64_read);
-void atomic64_set(atomic64_t *v, s64 i)
+void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(atomic64_set);
+EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(s64 a, atomic64_t *v) \
+void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
-EXPORT_SYMBOL(atomic64_##op);
+EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_##op##_return);
+EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+EXPORT_SYMBOL(generic_atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
@@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-s64 atomic64_dec_if_positive(atomic64_t *v)
+s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
+EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
-s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_cmpxchg);
+EXPORT_SYMBOL(generic_atomic64_cmpxchg);
-s64 atomic64_xchg(atomic64_t *v, s64 new)
+s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_xchg);
+EXPORT_SYMBOL(generic_atomic64_xchg);
-s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return val;
}
-EXPORT_SYMBOL(atomic64_fetch_add_unless);
+EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
diff --git a/lib/crc64.c b/lib/crc64.c
index 47cfa054827f..9f852a89ee2a 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2");
/**
* crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
* @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
- or the previous crc64 value if computing incrementally.
+ * or the previous crc64 value if computing incrementally.
* @p: pointer to buffer over which CRC64 is run
* @len: length of buffer @p
*/
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 06d3135bd184..a75ee30b77cb 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
* Generic 'turn off all lock debugging' function:
*/
-noinstr int debug_locks_off(void)
+int debug_locks_off(void)
{
if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index f5a33b6f773f..27f16872320d 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -73,10 +73,10 @@ void show_regs_print_info(const char *log_lvl)
dump_stack_print_info(log_lvl);
}
-static void __dump_stack(void)
+static void __dump_stack(const char *log_lvl)
{
- dump_stack_print_info(KERN_DEFAULT);
- show_stack(NULL, NULL, KERN_DEFAULT);
+ dump_stack_print_info(log_lvl);
+ show_stack(NULL, NULL, log_lvl);
}
/**
@@ -84,50 +84,22 @@ static void __dump_stack(void)
*
* Architectures can override this implementation by implementing its own.
*/
-#ifdef CONFIG_SMP
-static atomic_t dump_lock = ATOMIC_INIT(-1);
-
-asmlinkage __visible void dump_stack(void)
+asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
unsigned long flags;
- int was_locked;
- int old;
- int cpu;
/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
-retry:
- local_irq_save(flags);
- cpu = smp_processor_id();
- old = atomic_cmpxchg(&dump_lock, -1, cpu);
- if (old == -1) {
- was_locked = 0;
- } else if (old == cpu) {
- was_locked = 1;
- } else {
- local_irq_restore(flags);
- /*
- * Wait for the lock to release before jumping to
- * atomic_cmpxchg() in order to mitigate the thundering herd
- * problem.
- */
- do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
- goto retry;
- }
-
- __dump_stack();
-
- if (!was_locked)
- atomic_set(&dump_lock, -1);
-
- local_irq_restore(flags);
+ printk_cpu_lock_irqsave(flags);
+ __dump_stack(log_lvl);
+ printk_cpu_unlock_irqrestore(flags);
}
-#else
+EXPORT_SYMBOL(dump_stack_lvl);
+
asmlinkage __visible void dump_stack(void)
{
- __dump_stack();
+ dump_stack_lvl(KERN_DEFAULT);
}
-#endif
EXPORT_SYMBOL(dump_stack);
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index a118b0b1e9b2..0b5fe8b41173 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -39,20 +39,22 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
/*
* Convert non-negative integer string representation in explicitly given radix
- * to an integer.
+ * to an integer. A maximum of max_chars characters will be converted.
+ *
* Return number of characters consumed maybe or-ed with overflow bit.
* If overflow occurs, result integer (incorrect) is still returned.
*
* Don't you dare use this function.
*/
-unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
+unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
+ size_t max_chars)
{
unsigned long long res;
unsigned int rv;
res = 0;
rv = 0;
- while (1) {
+ while (max_chars--) {
unsigned int c = *s;
unsigned int lc = c | 0x20; /* don't tolower() this line */
unsigned int val;
@@ -82,6 +84,11 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
return rv;
}
+unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
+{
+ return _parse_integer_limit(s, base, p, INT_MAX);
+}
+
static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
unsigned long long _res;
diff --git a/lib/kstrtox.h b/lib/kstrtox.h
index 3b4637bcd254..158c400ca865 100644
--- a/lib/kstrtox.h
+++ b/lib/kstrtox.h
@@ -4,6 +4,8 @@
#define KSTRTOX_OVERFLOW (1U << 31)
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base);
+unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *res,
+ size_t max_chars);
unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res);
#endif
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 2f6cc0123232..45f068864d76 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -475,6 +475,7 @@ int kunit_add_resource(struct kunit *test,
void *data)
{
int ret = 0;
+ unsigned long flags;
res->free = free;
kref_init(&res->refcount);
@@ -487,10 +488,10 @@ int kunit_add_resource(struct kunit *test,
res->data = data;
}
- spin_lock(&test->lock);
+ spin_lock_irqsave(&test->lock, flags);
list_add_tail(&res->node, &test->resources);
/* refcount for list is established by kref_init() */
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
return ret;
}
@@ -548,9 +549,11 @@ EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
{
- spin_lock(&test->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&test->lock, flags);
list_del(&res->node);
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
kunit_put_resource(res);
}
EXPORT_SYMBOL_GPL(kunit_remove_resource);
@@ -630,6 +633,7 @@ EXPORT_SYMBOL_GPL(kunit_kfree);
void kunit_cleanup(struct kunit *test)
{
struct kunit_resource *res;
+ unsigned long flags;
/*
* test->resources is a stack - each allocation must be freed in the
@@ -641,9 +645,9 @@ void kunit_cleanup(struct kunit *test)
* protect against the current node being deleted, not the next.
*/
while (true) {
- spin_lock(&test->lock);
+ spin_lock_irqsave(&test->lock, flags);
if (list_empty(&test->resources)) {
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
break;
}
res = list_last_entry(&test->resources,
@@ -654,7 +658,7 @@ void kunit_cleanup(struct kunit *test)
* resource, and this can't happen if the test->lock
* is held.
*/
- spin_unlock(&test->lock);
+ spin_unlock_irqrestore(&test->lock, flags);
kunit_remove_resource(test, res);
}
current->kunit_test = NULL;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 2d85abac1744..161108e5d2fe 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -53,6 +53,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20
#define LOCKTYPE_LL 0x40
+#define LOCKTYPE_SPECIAL 0x80
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@@ -194,6 +195,7 @@ static void init_shared_classes(void)
#define HARDIRQ_ENTER() \
local_irq_disable(); \
__irq_enter(); \
+ lockdep_hardirq_threaded(); \
WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \
@@ -2492,16 +2494,6 @@ static void rcu_sched_exit(int *_)
int rcu_sched_guard_##name __guard(rcu_sched_exit); \
rcu_read_lock_sched();
-static void rcu_callback_exit(int *_)
-{
- rcu_lock_release(&rcu_callback_map);
-}
-
-#define RCU_CALLBACK_CONTEXT(name, ...) \
- int rcu_callback_guard_##name __guard(rcu_callback_exit); \
- rcu_lock_acquire(&rcu_callback_map);
-
-
static void raw_spinlock_exit(raw_spinlock_t **lock)
{
raw_spin_unlock(*lock);
@@ -2558,8 +2550,6 @@ static void __maybe_unused inner##_in_##outer(void) \
* ---------------+-------+----------+------+-------
* RCU_BH | o | o | o | x
* ---------------+-------+----------+------+-------
- * RCU_CALLBACK | o | o | o | x
- * ---------------+-------+----------+------+-------
* RCU_SCHED | o | o | x | x
* ---------------+-------+----------+------+-------
* RAW_SPIN | o | o | x | x
@@ -2576,7 +2566,6 @@ GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
@@ -2638,10 +2627,6 @@ static void wait_context_tests(void)
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
pr_cont("\n");
- print_testname("in RCU callback context");
- DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
- pr_cont("\n");
-
print_testname("in RCU-sched context");
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
pr_cont("\n");
@@ -2744,6 +2729,66 @@ static void local_lock_tests(void)
pr_cont("\n");
}
+static void hardirq_deadlock_softirq_not_deadlock(void)
+{
+ /* mutex_A is hardirq-unsafe and softirq-unsafe */
+ /* mutex_A -> lock_C */
+ mutex_lock(&mutex_A);
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+ mutex_unlock(&mutex_A);
+
+ /* lock_A is hardirq-safe */
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT();
+
+ /* lock_A -> lock_B */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B);
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ /* lock_B -> lock_C */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_B);
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ spin_unlock(&lock_B);
+ HARDIRQ_ENABLE();
+
+ /* lock_D is softirq-safe */
+ SOFTIRQ_ENTER();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_EXIT();
+
+ /* And lock_D is hardirq-unsafe */
+ SOFTIRQ_DISABLE();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_ENABLE();
+
+ /*
+ * mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not
+ * deadlock.
+ *
+ * lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe ->
+ * hardirq-unsafe, deadlock.
+ */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+}
+
void locking_selftest(void)
{
/*
@@ -2872,6 +2917,10 @@ void locking_selftest(void)
local_lock_tests();
+ print_testname("hardirq_unsafe_softirq_safe");
+ dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
+ pr_cont("\n");
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index a1071cdefb5a..af9302141bcf 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -275,7 +275,7 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref,
wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
percpu_ref_switch_lock);
- if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ if (data->force_atomic || percpu_ref_is_dying(ref))
__percpu_ref_switch_to_atomic(ref, confirm_switch);
else
__percpu_ref_switch_to_percpu(ref);
@@ -385,7 +385,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+ WARN_ONCE(percpu_ref_is_dying(ref),
"%s called more than once on %ps!", __func__,
ref->data->release);
@@ -465,7 +465,7 @@ void percpu_ref_resurrect(struct percpu_ref *ref)
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+ WARN_ON_ONCE(!percpu_ref_is_dying(ref));
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
new file mode 100644
index 000000000000..8662dc6cb509
--- /dev/null
+++ b/lib/slub_kunit.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "../mm/slab.h"
+
+static struct kunit_resource resource;
+static int slab_errors;
+
+static void test_clobber_zone(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
+ SLAB_RED_ZONE, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ p[64] = 0x12;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+#ifndef CONFIG_KASAN
+static void test_next_pointer(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+ unsigned long tmp;
+ unsigned long *ptr_addr;
+
+ kmem_cache_free(s, p);
+
+ ptr_addr = (unsigned long *)(p + s->offset);
+ tmp = *ptr_addr;
+ p[s->offset] = 0x12;
+
+ /*
+ * Expecting three errors.
+ * One for the corrupted freechain and the other one for the wrong
+ * count of objects in use. The third error is fixing broken cache.
+ */
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 3, slab_errors);
+
+ /*
+ * Try to repair corrupted freepointer.
+ * Still expecting two errors. The first for the wrong count
+ * of objects in use.
+ * The second error is for fixing broken cache.
+ */
+ *ptr_addr = tmp;
+ slab_errors = 0;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ /*
+ * Previous validation repaired the count of objects in use.
+ * Now expecting no error.
+ */
+ slab_errors = 0;
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_first_word(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ *p = 0x78;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_clobber_50th_byte(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
+ SLAB_POISON, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ p[50] = 0x9a;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+#endif
+
+static void test_clobber_redzone_free(struct kunit *test)
+{
+ struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
+ SLAB_RED_ZONE, NULL);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ kmem_cache_free(s, p);
+ p[64] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
+static int test_init(struct kunit *test)
+{
+ slab_errors = 0;
+
+ kunit_add_named_resource(test, NULL, NULL, &resource,
+ "slab_errors", &slab_errors);
+ return 0;
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(test_clobber_zone),
+
+#ifndef CONFIG_KASAN
+ KUNIT_CASE(test_next_pointer),
+ KUNIT_CASE(test_first_word),
+ KUNIT_CASE(test_clobber_50th_byte),
+#endif
+
+ KUNIT_CASE(test_clobber_redzone_free),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "slub_test",
+ .init = test_init,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 1c1dbd300325..046ac6297c78 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -19,11 +19,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
if (irqs_disabled())
goto out;
- /*
- * Kernel threads bound to a single CPU can safely use
- * smp_processor_id():
- */
- if (current->nr_cpus_allowed == 1)
+ if (is_percpu_thread())
goto out;
#ifdef CONFIG_SMP
diff --git a/lib/syscall.c b/lib/syscall.c
index ba13e924c430..006e256d2264 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -68,13 +68,13 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
*/
int task_current_syscall(struct task_struct *target, struct syscall_info *info)
{
- long state;
unsigned long ncsw;
+ unsigned int state;
if (target == current)
return collect_syscall(target, info);
- state = target->state;
+ state = READ_ONCE(target->__state);
if (unlikely(!state))
return -EAGAIN;
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 80a78877bd93..15f2e2db77bc 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -686,9 +686,8 @@ static int dmirror_migrate(struct dmirror *dmirror,
mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
- vma = find_vma(mm, addr);
- if (!vma || addr < vma->vm_start ||
- !(vma->vm_flags & VM_READ)) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
ret = -EINVAL;
goto out;
}
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index cacbbbdef768..44e08f4d9c52 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -55,7 +55,6 @@ static int kasan_test_init(struct kunit *test)
multishot = kasan_save_enable_multi_shot();
kasan_set_tagging_report_once(false);
fail_data.report_found = false;
- fail_data.report_expected = false;
kunit_add_named_resource(test, NULL, NULL, &resource,
"kasan_data", &fail_data);
return 0;
@@ -94,20 +93,20 @@ static void kasan_test_exit(struct kunit *test)
!kasan_async_mode_enabled()) \
migrate_disable(); \
KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
- WRITE_ONCE(fail_data.report_expected, true); \
barrier(); \
expression; \
barrier(); \
- KUNIT_EXPECT_EQ(test, \
- READ_ONCE(fail_data.report_expected), \
- READ_ONCE(fail_data.report_found)); \
+ if (!READ_ONCE(fail_data.report_found)) { \
+ KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
+ "expected in \"" #expression \
+ "\", but none occurred"); \
+ } \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
if (READ_ONCE(fail_data.report_found)) \
kasan_enable_tagging_sync(); \
migrate_enable(); \
} \
WRITE_ONCE(fail_data.report_found, false); \
- WRITE_ONCE(fail_data.report_expected, false); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
diff --git a/lib/test_printf.c b/lib/test_printf.c
index ec0d5976bb69..8ac71aee46af 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -528,6 +528,11 @@ time_and_date(void)
test("0119-00-04T15:32:23", "%ptTr", &t);
test("15:32:23|2019-01-04", "%ptTt|%ptTd", &t, &t);
test("15:32:23|0119-00-04", "%ptTtr|%ptTdr", &t, &t);
+
+ test("2019-01-04 15:32:23", "%ptTs", &t);
+ test("0119-00-04 15:32:23", "%ptTsr", &t);
+ test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
}
static void __init
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
new file mode 100644
index 000000000000..48ff5747a4da
--- /dev/null
+++ b/lib/test_scanf.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for sscanf facility.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "../tools/testing/selftests/kselftest_module.h"
+
+#define BUF_SIZE 1024
+
+KSTM_MODULE_GLOBALS();
+static char *test_buffer __initdata;
+static char *fmt_buffer __initdata;
+static struct rnd_state rnd_state __initdata;
+
+typedef int (*check_fn)(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap);
+
+static void __scanf(4, 6) __init
+_test(check_fn fn, const void *check_data, const char *string, const char *fmt,
+ int n_args, ...)
+{
+ va_list ap, ap_copy;
+ int ret;
+
+ total_tests++;
+
+ va_start(ap, n_args);
+ va_copy(ap_copy, ap);
+ ret = vsscanf(string, fmt, ap_copy);
+ va_end(ap_copy);
+
+ if (ret != n_args) {
+ pr_warn("vsscanf(\"%s\", \"%s\", ...) returned %d expected %d\n",
+ string, fmt, ret, n_args);
+ goto fail;
+ }
+
+ ret = (*fn)(check_data, string, fmt, n_args, ap);
+ if (ret)
+ goto fail;
+
+ va_end(ap);
+
+ return;
+
+fail:
+ failed_tests++;
+ va_end(ap);
+}
+
+#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
+do { \
+ pr_debug("\"%s\", \"%s\" ->\n", str, fmt); \
+ for (; n_args > 0; n_args--, expect++) { \
+ typeof(*expect) got = *va_arg(ap, typeof(expect)); \
+ pr_debug("\t" arg_fmt "\n", got); \
+ if (got != *expect) { \
+ pr_warn("vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt "\n", \
+ str, fmt, *expect, got); \
+ return 1; \
+ } \
+ } \
+ return 0; \
+} while (0)
+
+static int __init check_ull(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long long *pval = check_data;
+
+ _check_numbers_template("%llu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ll(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const long long *pval = check_data;
+
+ _check_numbers_template("%lld", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ulong(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long *pval = check_data;
+
+ _check_numbers_template("%lu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_long(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const long *pval = check_data;
+
+ _check_numbers_template("%ld", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_uint(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned int *pval = check_data;
+
+ _check_numbers_template("%u", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_int(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const int *pval = check_data;
+
+ _check_numbers_template("%d", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ushort(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned short *pval = check_data;
+
+ _check_numbers_template("%hu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_short(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const short *pval = check_data;
+
+ _check_numbers_template("%hd", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_uchar(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned char *pval = check_data;
+
+ _check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_char(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const signed char *pval = check_data;
+
+ _check_numbers_template("%hhd", pval, string, fmt, n_args, ap);
+}
+
+/* Selection of interesting numbers to test, copied from test-kstrtox.c */
+static const unsigned long long numbers[] __initconst = {
+ 0x0ULL,
+ 0x1ULL,
+ 0x7fULL,
+ 0x80ULL,
+ 0x81ULL,
+ 0xffULL,
+ 0x100ULL,
+ 0x101ULL,
+ 0x7fffULL,
+ 0x8000ULL,
+ 0x8001ULL,
+ 0xffffULL,
+ 0x10000ULL,
+ 0x10001ULL,
+ 0x7fffffffULL,
+ 0x80000000ULL,
+ 0x80000001ULL,
+ 0xffffffffULL,
+ 0x100000000ULL,
+ 0x100000001ULL,
+ 0x7fffffffffffffffULL,
+ 0x8000000000000000ULL,
+ 0x8000000000000001ULL,
+ 0xfffffffffffffffeULL,
+ 0xffffffffffffffffULL,
+};
+
+#define value_representable_in_type(T, val) \
+(is_signed_type(T) \
+ ? ((long long)(val) >= type_min(T)) && ((long long)(val) <= type_max(T)) \
+ : ((unsigned long long)(val) <= type_max(T)))
+
+
+#define test_one_number(T, gen_fmt, scan_fmt, val, fn) \
+do { \
+ const T expect_val = (T)(val); \
+ T result = ~expect_val; /* should be overwritten */ \
+ \
+ snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
+ _test(fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result); \
+} while (0)
+
+#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ if (value_representable_in_type(T, numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ numbers[i], fn); \
+ \
+ if (value_representable_in_type(T, -numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ -numbers[i], fn); \
+ } \
+} while (0)
+
+static void __init numbers_simple(void)
+{
+ simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
+ simple_numbers_loop(long long, "%lld", "lld", check_ll);
+ simple_numbers_loop(long long, "%lld", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "%llx", "llx", check_ll);
+ simple_numbers_loop(long long, "0x%llx", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "0x%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "0x%llx", "llx", check_ll);
+
+ simple_numbers_loop(unsigned long, "%lu", "lu", check_ulong);
+ simple_numbers_loop(long, "%ld", "ld", check_long);
+ simple_numbers_loop(long, "%ld", "li", check_long);
+ simple_numbers_loop(unsigned long, "%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "%lx", "lx", check_long);
+ simple_numbers_loop(long, "0x%lx", "li", check_long);
+ simple_numbers_loop(unsigned long, "0x%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "0x%lx", "lx", check_long);
+
+ simple_numbers_loop(unsigned int, "%u", "u", check_uint);
+ simple_numbers_loop(int, "%d", "d", check_int);
+ simple_numbers_loop(int, "%d", "i", check_int);
+ simple_numbers_loop(unsigned int, "%x", "x", check_uint);
+ simple_numbers_loop(int, "%x", "x", check_int);
+ simple_numbers_loop(int, "0x%x", "i", check_int);
+ simple_numbers_loop(unsigned int, "0x%x", "x", check_uint);
+ simple_numbers_loop(int, "0x%x", "x", check_int);
+
+ simple_numbers_loop(unsigned short, "%hu", "hu", check_ushort);
+ simple_numbers_loop(short, "%hd", "hd", check_short);
+ simple_numbers_loop(short, "%hd", "hi", check_short);
+ simple_numbers_loop(unsigned short, "%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "%hx", "hx", check_short);
+ simple_numbers_loop(short, "0x%hx", "hi", check_short);
+ simple_numbers_loop(unsigned short, "0x%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "0x%hx", "hx", check_short);
+
+ simple_numbers_loop(unsigned char, "%hhu", "hhu", check_uchar);
+ simple_numbers_loop(signed char, "%hhd", "hhd", check_char);
+ simple_numbers_loop(signed char, "%hhd", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "%hhx", "hhx", check_char);
+ simple_numbers_loop(signed char, "0x%hhx", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "0x%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "0x%hhx", "hhx", check_char);
+}
+
+/*
+ * This gives a better variety of number "lengths" in a small sample than
+ * the raw prandom*() functions (Not mathematically rigorous!!).
+ * Variabilty of length and value is more important than perfect randomness.
+ */
+static u32 __init next_test_random(u32 max_bits)
+{
+ u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
+
+ return prandom_u32_state(&rnd_state) & (UINT_MAX >> (32 - n_bits));
+}
+
+static unsigned long long __init next_test_random_ull(void)
+{
+ u32 rand1 = prandom_u32_state(&rnd_state);
+ u32 n_bits = (hweight32(rand1) * 3) % 64;
+ u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
+
+ return val & (ULLONG_MAX >> (64 - n_bits));
+}
+
+#define random_for_type(T) \
+ ((T)(sizeof(T) <= sizeof(u32) \
+ ? next_test_random(BITS_PER_TYPE(T)) \
+ : next_test_random_ull()))
+
+/*
+ * Define a pattern of negative and positive numbers to ensure we get
+ * some of both within the small number of samples in a test string.
+ */
+#define NEGATIVES_PATTERN 0x3246 /* 00110010 01000110 */
+
+#define fill_random_array(arr) \
+do { \
+ unsigned int neg_pattern = NEGATIVES_PATTERN; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(arr); i++, neg_pattern >>= 1) { \
+ (arr)[i] = random_for_type(typeof((arr)[0])); \
+ if (is_signed_type(typeof((arr)[0])) && (neg_pattern & 1)) \
+ (arr)[i] = -(arr)[i]; \
+ } \
+} while (0)
+
+/*
+ * Convenience wrapper around snprintf() to append at buf_pos in buf,
+ * updating buf_pos and returning the number of characters appended.
+ * On error buf_pos is not changed and return value is 0.
+ */
+static int __init __printf(4, 5)
+append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
+{
+ va_list ap;
+ int field_len;
+
+ va_start(ap, val_fmt);
+ field_len = vsnprintf(buf + *buf_pos, buf_len - *buf_pos, val_fmt, ap);
+ va_end(ap);
+
+ if (field_len < 0)
+ field_len = 0;
+
+ *buf_pos += field_len;
+
+ return field_len;
+}
+
+/*
+ * Convenience function to append the field delimiter string
+ * to both the value string and format string buffers.
+ */
+static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
+ char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
+ const char *delim_str)
+{
+ append_fmt(str_buf, str_buf_pos, str_buf_len, delim_str);
+ append_fmt(fmt_buf, fmt_buf_pos, fmt_buf_len, delim_str);
+}
+
+#define test_array_8(fn, check_data, string, fmt, arr) \
+do { \
+ BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
+ _test(fn, check_data, string, fmt, 8, \
+ &(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
+ &(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
+} while (0)
+
+#define numbers_list_8(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, \
+ field_sep); \
+ \
+ append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, "%%%s", scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+#define numbers_list_fix_width(T, gen_fmt, field_sep, width, scan_fmt, fn) \
+do { \
+ char full_fmt[16]; \
+ \
+ snprintf(full_fmt, sizeof(full_fmt), "%u%s", width, scan_fmt); \
+ numbers_list_8(T, gen_fmt, field_sep, full_fmt, fn); \
+} while (0)
+
+#define numbers_list_val_width(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, val_len, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, field_sep);\
+ \
+ val_len = append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, \
+ expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, \
+ "%%%u%s", val_len, scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+static void __init numbers_list(const char *delim)
+{
+ numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_8(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_8(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_8(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_8(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
+
+ numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_8(long, "%ld", delim, "ld", check_long);
+ numbers_list_8(long, "%ld", delim, "li", check_long);
+ numbers_list_8(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_8(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_8(long, "0x%lx", delim, "li", check_long);
+
+ numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_8(int, "%d", delim, "d", check_int);
+ numbers_list_8(int, "%d", delim, "i", check_int);
+ numbers_list_8(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_8(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_8(int, "0x%x", delim, "i", check_int);
+
+ numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_8(short, "%hd", delim, "hd", check_short);
+ numbers_list_8(short, "%hd", delim, "hi", check_short);
+ numbers_list_8(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_8(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_8(short, "0x%hx", delim, "hi", check_short);
+
+ numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_8(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_8(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * maximum possible digits for the given type and base.
+ */
+static void __init numbers_list_field_width_typemax(const char *delim)
+{
+ numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lli", check_ll);
+ numbers_list_fix_width(unsigned long long, "%llx", delim, 16, "llx", check_ull);
+ numbers_list_fix_width(unsigned long long, "0x%llx", delim, 18, "llx", check_ull);
+ numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
+
+#if BITS_PER_LONG == 64
+ numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 20, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 20, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 16, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 18, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 18, "li", check_long);
+#else
+ numbers_list_fix_width(unsigned long, "%lu", delim, 10, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 11, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 11, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 8, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 10, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 10, "li", check_long);
+#endif
+
+ numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
+ numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
+ numbers_list_fix_width(int, "%d", delim, 11, "i", check_int);
+ numbers_list_fix_width(unsigned int, "%x", delim, 8, "x", check_uint);
+ numbers_list_fix_width(unsigned int, "0x%x", delim, 10, "x", check_uint);
+ numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
+
+ numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hi", check_short);
+ numbers_list_fix_width(unsigned short, "%hx", delim, 4, "hx", check_ushort);
+ numbers_list_fix_width(unsigned short, "0x%hx", delim, 6, "hx", check_ushort);
+ numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
+
+ numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhi", check_char);
+ numbers_list_fix_width(unsigned char, "%hhx", delim, 2, "hhx", check_uchar);
+ numbers_list_fix_width(unsigned char, "0x%hhx", delim, 4, "hhx", check_uchar);
+ numbers_list_fix_width(signed char, "0x%hhx", delim, 4, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * exact length of the corresponding value digits in the string being scanned.
+ */
+static void __init numbers_list_field_width_val_width(const char *delim)
+{
+ numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_val_width(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_val_width(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_val_width(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
+
+ numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_val_width(long, "%ld", delim, "ld", check_long);
+ numbers_list_val_width(long, "%ld", delim, "li", check_long);
+ numbers_list_val_width(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
+
+ numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_val_width(int, "%d", delim, "d", check_int);
+ numbers_list_val_width(int, "%d", delim, "i", check_int);
+ numbers_list_val_width(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_val_width(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_val_width(int, "0x%x", delim, "i", check_int);
+
+ numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_val_width(short, "%hd", delim, "hd", check_short);
+ numbers_list_val_width(short, "%hd", delim, "hi", check_short);
+ numbers_list_val_width(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
+
+ numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_val_width(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+/*
+ * Slice a continuous string of digits without field delimiters, containing
+ * numbers of varying length, using the field width to extract each group
+ * of digits. For example the hex values c0,3,bf01,303 would have a
+ * string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
+ */
+static void __init numbers_slice(void)
+{
+ numbers_list_field_width_val_width("");
+}
+
+#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
+do { \
+ const T expect[2] = { expect0, expect1 }; \
+ T result[2] = {~expect[0], ~expect[1]}; \
+ \
+ _test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
+} while (0)
+
+/*
+ * Number prefix is >= field width.
+ * Expected behaviour is derived from testing userland sscanf.
+ */
+static void __init numbers_prefix_overflow(void)
+{
+ /*
+ * Negative decimal with a field of width 1, should quit scanning
+ * and return 0.
+ */
+ test_number_prefix(long long, "-1 1", "%1lld %lld", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1ld %ld", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1d %d", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hd %hd", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhd %hhd", 0, 0, 0, check_char);
+
+ test_number_prefix(long long, "-1 1", "%1lli %lli", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1li %li", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1i %i", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hi %hi", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhi %hhi", 0, 0, 0, check_char);
+
+ /*
+ * 0x prefix in a field of width 1: 0 is a valid digit so should
+ * convert. Next field scan starts at the 'x' which isn't a digit so
+ * scan quits with one field converted.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%1llx%llx", 0, 0, 1, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%1lx%lx", 0, 0, 1, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%1x%x", 0, 0, 1, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%1hx%hx", 0, 0, 1, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%1hhx%hhx", 0, 0, 1, check_uchar);
+ test_number_prefix(long long, "0xA7", "%1lli%llx", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%1li%lx", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%1i%x", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%1hi%hx", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%1hhi%hhx", 0, 0, 1, check_char);
+
+ /*
+ * 0x prefix in a field of width 2 using %x conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x".
+ * Both fields will convert.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%2llx%llx", 0, 0xa7, 2, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%2lx%lx", 0, 0xa7, 2, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%2x%x", 0, 0xa7, 2, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%2hx%hx", 0, 0xa7, 2, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%2hhx%hhx", 0, 0xa7, 2, check_uchar);
+
+ /*
+ * 0x prefix in a field of width 2 using %i conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x",
+ * which will convert if can be intepreted as decimal but will fail
+ * if it contains any hex digits (since no 0x prefix).
+ */
+ test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
+ test_number_prefix(long, "0x67", "%2li%li", 0, 67, 2, check_long);
+ test_number_prefix(int, "0x67", "%2i%i", 0, 67, 2, check_int);
+ test_number_prefix(short, "0x67", "%2hi%hi", 0, 67, 2, check_short);
+ test_number_prefix(char, "0x67", "%2hhi%hhi", 0, 67, 2, check_char);
+
+ test_number_prefix(long long, "0xA7", "%2lli%lli", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%2li%li", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%2i%i", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%2hi%hi", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%2hhi%hhi", 0, 0, 1, check_char);
+}
+
+#define _test_simple_strtoxx(T, fn, gen_fmt, expect, base) \
+do { \
+ T got; \
+ char *endp; \
+ int len; \
+ bool fail = false; \
+ \
+ total_tests++; \
+ len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
+ got = (fn)(test_buffer, &endp, base); \
+ pr_debug(#fn "(\"%s\", %d) -> " gen_fmt "\n", test_buffer, base, got); \
+ if (got != (expect)) { \
+ fail = true; \
+ pr_warn(#fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt "\n", \
+ test_buffer, base, got, expect); \
+ } else if (endp != test_buffer + len) { \
+ fail = true; \
+ pr_warn(#fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px\n", \
+ test_buffer, base, test_buffer, \
+ test_buffer + len, endp); \
+ } \
+ \
+ if (fail) \
+ failed_tests++; \
+} while (0)
+
+#define test_simple_strtoxx(T, fn, gen_fmt, base) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ _test_simple_strtoxx(T, fn, gen_fmt, (T)numbers[i], base); \
+ \
+ if (is_signed_type(T)) \
+ _test_simple_strtoxx(T, fn, gen_fmt, \
+ -(T)numbers[i], base); \
+ } \
+} while (0)
+
+static void __init test_simple_strtoull(void)
+{
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
+}
+
+static void __init test_simple_strtoll(void)
+{
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
+ test_simple_strtoxx(long long, simple_strtoll, "%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
+}
+
+static void __init test_simple_strtoul(void)
+{
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
+}
+
+static void __init test_simple_strtol(void)
+{
+ test_simple_strtoxx(long, simple_strtol, "%ld", 10);
+ test_simple_strtoxx(long, simple_strtol, "%ld", 0);
+ test_simple_strtoxx(long, simple_strtol, "%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 0);
+}
+
+/* Selection of common delimiters/separators between numbers in a string. */
+static const char * const number_delimiters[] __initconst = {
+ " ", ":", ",", "-", "/",
+};
+
+static void __init test_numbers(void)
+{
+ int i;
+
+ /* String containing only one number. */
+ numbers_simple();
+
+ /* String with multiple numbers separated by delimiter. */
+ for (i = 0; i < ARRAY_SIZE(number_delimiters); i++) {
+ numbers_list(number_delimiters[i]);
+
+ /* Field width may be longer than actual field digits. */
+ numbers_list_field_width_typemax(number_delimiters[i]);
+
+ /* Each field width exactly length of actual field digits. */
+ numbers_list_field_width_val_width(number_delimiters[i]);
+ }
+
+ /* Slice continuous sequence of digits using field widths. */
+ numbers_slice();
+
+ numbers_prefix_overflow();
+}
+
+static void __init selftest(void)
+{
+ test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!test_buffer)
+ return;
+
+ fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!fmt_buffer) {
+ kfree(test_buffer);
+ return;
+ }
+
+ prandom_seed_state(&rnd_state, 3141592653589793238ULL);
+
+ test_numbers();
+
+ test_simple_strtoull();
+ test_simple_strtoll();
+ test_simple_strtoul();
+ test_simple_strtol();
+
+ kfree(fmt_buffer);
+ kfree(test_buffer);
+}
+
+KSTM_MODULE_LOADERS(test_scanf);
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f0c35d9b65bf..e5c7afbf7405 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -53,6 +53,31 @@
#include <linux/string_helpers.h>
#include "kstrtox.h"
+static unsigned long long simple_strntoull(const char *startp, size_t max_chars,
+ char **endp, unsigned int base)
+{
+ const char *cp;
+ unsigned long long result = 0ULL;
+ size_t prefix_chars;
+ unsigned int rv;
+
+ cp = _parse_integer_fixup_radix(startp, &base);
+ prefix_chars = cp - startp;
+ if (prefix_chars < max_chars) {
+ rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
+ /* FIXME */
+ cp += (rv & ~KSTRTOX_OVERFLOW);
+ } else {
+ /* Field too short for prefix + digit, skip over without converting */
+ cp = startp + max_chars;
+ }
+
+ if (endp)
+ *endp = (char *)cp;
+
+ return result;
+}
+
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
@@ -63,18 +88,7 @@
*/
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- unsigned long long result;
- unsigned int rv;
-
- cp = _parse_integer_fixup_radix(cp, &base);
- rv = _parse_integer(cp, base, &result);
- /* FIXME */
- cp += (rv & ~KSTRTOX_OVERFLOW);
-
- if (endp)
- *endp = (char *)cp;
-
- return result;
+ return simple_strntoull(cp, INT_MAX, endp, base);
}
EXPORT_SYMBOL(simple_strtoull);
@@ -109,6 +123,21 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtol);
+static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
+ unsigned int base)
+{
+ /*
+ * simple_strntoull() safely handles receiving max_chars==0 in the
+ * case cp[0] == '-' && max_chars == 1.
+ * If max_chars == 0 we can drop through and pass it to simple_strntoull()
+ * and the content of *cp is irrelevant.
+ */
+ if (*cp == '-' && max_chars > 0)
+ return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
+
+ return simple_strntoull(cp, max_chars, endp, base);
+}
+
/**
* simple_strtoll - convert a string to a signed long long
* @cp: The start of the string
@@ -119,10 +148,7 @@ EXPORT_SYMBOL(simple_strtol);
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
- if (*cp == '-')
- return -simple_strtoull(cp + 1, endp, base);
-
- return simple_strtoull(cp, endp, base);
+ return simple_strntoll(cp, INT_MAX, endp, base);
}
EXPORT_SYMBOL(simple_strtoll);
@@ -1834,7 +1860,8 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
struct printf_spec spec, const char *fmt)
{
bool have_t = true, have_d = true;
- bool raw = false;
+ bool raw = false, iso8601_separator = true;
+ bool found = true;
int count = 2;
if (check_pointer(&buf, end, tm, spec))
@@ -1851,14 +1878,25 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
break;
}
- raw = fmt[count] == 'r';
+ do {
+ switch (fmt[count++]) {
+ case 'r':
+ raw = true;
+ break;
+ case 's':
+ iso8601_separator = false;
+ break;
+ default:
+ found = false;
+ break;
+ }
+ } while (found);
if (have_d)
buf = date_str(buf, end, tm, raw);
if (have_d && have_t) {
- /* Respect ISO 8601 */
if (buf < end)
- *buf = 'T';
+ *buf = iso8601_separator ? 'T' : ' ';
buf++;
}
if (have_t)
@@ -2186,7 +2224,7 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
-static int __init no_hash_pointers_enable(char *str)
+int __init no_hash_pointers_enable(char *str)
{
if (no_hash_pointers)
return 0;
@@ -2298,7 +2336,7 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'g' For block_device name (gendisk + partition number)
- * - 't[RT][dt][r]' For time and date as represented by:
+ * - 't[RT][dt][r][s]' For time and date as represented by:
* R struct rtc_time
* T time64_t
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
@@ -3565,8 +3603,12 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
str = skip_spaces(str);
digit = *str;
- if (is_sign && digit == '-')
+ if (is_sign && digit == '-') {
+ if (field_width == 1)
+ break;
+
digit = *(str + 1);
+ }
if (!digit
|| (base == 16 && !isxdigit(digit))
@@ -3576,25 +3618,13 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
if (is_sign)
- val.s = qualifier != 'L' ?
- simple_strtol(str, &next, base) :
- simple_strtoll(str, &next, base);
+ val.s = simple_strntoll(str,
+ field_width >= 0 ? field_width : INT_MAX,
+ &next, base);
else
- val.u = qualifier != 'L' ?
- simple_strtoul(str, &next, base) :
- simple_strtoull(str, &next, base);
-
- if (field_width > 0 && next - str > field_width) {
- if (base == 0)
- _parse_integer_fixup_radix(str, &base);
- while (next - str > field_width) {
- if (is_sign)
- val.s = div_s64(val.s, base);
- else
- val.u = div_u64(val.u, base);
- --next;
- }
- }
+ val.u = simple_strntoull(str,
+ field_width >= 0 ? field_width : INT_MAX,
+ &next, base);
switch (qualifier) {
case 'H': /* that's 'hh' in format */
diff --git a/mm/Kconfig b/mm/Kconfig
index 02d44e3420f5..ded98fb859ab 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -19,7 +19,7 @@ choice
config FLATMEM_MANUAL
bool "Flat Memory"
- depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE
+ depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE
help
This option is best suited for non-NUMA systems with
flat address space. The FLATMEM is the most efficient
@@ -32,21 +32,6 @@ config FLATMEM_MANUAL
If unsure, choose this option (Flat Memory) over any other.
-config DISCONTIGMEM_MANUAL
- bool "Discontiguous Memory"
- depends on ARCH_DISCONTIGMEM_ENABLE
- help
- This option provides enhanced support for discontiguous
- memory systems, over FLATMEM. These systems have holes
- in their physical address spaces, and this option provides
- more efficient handling of these holes.
-
- Although "Discontiguous Memory" is still used by several
- architectures, it is considered deprecated in favor of
- "Sparse Memory".
-
- If unsure, choose "Sparse Memory" over this option.
-
config SPARSEMEM_MANUAL
bool "Sparse Memory"
depends on ARCH_SPARSEMEM_ENABLE
@@ -62,30 +47,13 @@ config SPARSEMEM_MANUAL
endchoice
-config DISCONTIGMEM
- def_bool y
- depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL
-
config SPARSEMEM
def_bool y
depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
config FLATMEM
def_bool y
- depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL
-
-config FLAT_NODE_MEM_MAP
- def_bool y
- depends on !SPARSEMEM
-
-#
-# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
-# to represent different areas of memory. This variable allows
-# those dependencies to exist individually.
-#
-config NEED_MULTIPLE_NODES
- def_bool y
- depends on DISCONTIGMEM || NUMA
+ depends on !SPARSEMEM || FLATMEM_MANUAL
#
# SPARSEMEM_EXTREME (which is the default) does some bootmem
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 576220acd686..271f2ca862c8 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -371,12 +371,16 @@ static void wb_exit(struct bdi_writeback *wb)
#include <linux/memcontrol.h>
/*
- * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list.
- * bdi->cgwb_tree is also RCU protected.
+ * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
+ * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected.
*/
static DEFINE_SPINLOCK(cgwb_lock);
static struct workqueue_struct *cgwb_release_wq;
+static LIST_HEAD(offline_cgwbs);
+static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
+static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
+
static void cgwb_release_workfn(struct work_struct *work)
{
struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
@@ -395,7 +399,13 @@ static void cgwb_release_workfn(struct work_struct *work)
fprop_local_destroy_percpu(&wb->memcg_completions);
percpu_ref_exit(&wb->refcnt);
+
+ spin_lock_irq(&cgwb_lock);
+ list_del(&wb->offline_node);
+ spin_unlock_irq(&cgwb_lock);
+
wb_exit(wb);
+ WARN_ON_ONCE(!list_empty(&wb->b_attached));
kfree_rcu(wb, rcu);
}
@@ -413,6 +423,7 @@ static void cgwb_kill(struct bdi_writeback *wb)
WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
list_del(&wb->memcg_node);
list_del(&wb->blkcg_node);
+ list_add(&wb->offline_node, &offline_cgwbs);
percpu_ref_kill(&wb->refcnt);
}
@@ -472,6 +483,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
wb->memcg_css = memcg_css;
wb->blkcg_css = blkcg_css;
+ INIT_LIST_HEAD(&wb->b_attached);
INIT_WORK(&wb->release_work, cgwb_release_workfn);
set_bit(WB_registered, &wb->state);
@@ -633,6 +645,54 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
mutex_unlock(&bdi->cgwb_release_mutex);
}
+/*
+ * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
+ *
+ * Try to release dying cgwbs by switching attached inodes to the nearest
+ * living ancestor's writeback. Processed wbs are placed at the end
+ * of the list to guarantee the forward progress.
+ */
+static void cleanup_offline_cgwbs_workfn(struct work_struct *work)
+{
+ struct bdi_writeback *wb;
+ LIST_HEAD(processed);
+
+ spin_lock_irq(&cgwb_lock);
+
+ while (!list_empty(&offline_cgwbs)) {
+ wb = list_first_entry(&offline_cgwbs, struct bdi_writeback,
+ offline_node);
+ list_move(&wb->offline_node, &processed);
+
+ /*
+ * If wb is dirty, cleaning up the writeback by switching
+ * attached inodes will result in an effective removal of any
+ * bandwidth restrictions, which isn't the goal. Instead,
+ * it can be postponed until the next time, when all io
+ * will be likely completed. If in the meantime some inodes
+ * will get re-dirtied, they should be eventually switched to
+ * a new cgwb.
+ */
+ if (wb_has_dirty_io(wb))
+ continue;
+
+ if (!wb_tryget(wb))
+ continue;
+
+ spin_unlock_irq(&cgwb_lock);
+ while (cleanup_offline_cgwb(wb))
+ cond_resched();
+ spin_lock_irq(&cgwb_lock);
+
+ wb_put(wb);
+ }
+
+ if (!list_empty(&processed))
+ list_splice_tail(&processed, &offline_cgwbs);
+
+ spin_unlock_irq(&cgwb_lock);
+}
+
/**
* wb_memcg_offline - kill all wb's associated with a memcg being offlined
* @memcg: memcg being offlined
@@ -649,6 +709,8 @@ void wb_memcg_offline(struct mem_cgroup *memcg)
cgwb_kill(wb);
memcg_cgwb_list->next = NULL; /* prevent new wb's */
spin_unlock_irq(&cgwb_lock);
+
+ queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work);
}
/**
diff --git a/mm/compaction.c b/mm/compaction.c
index 84fde270ae74..3a509fbf2bea 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1028,7 +1028,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (!TestClearPageLRU(page))
goto isolate_fail_put;
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
+ lruvec = mem_cgroup_page_lruvec(page);
/* If we already hold the lock, we can skip some rechecking */
if (lruvec != locked) {
@@ -1955,7 +1955,7 @@ static inline bool is_via_compact_memory(int order)
static bool kswapd_is_running(pg_data_t *pgdat)
{
- return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING);
+ return pgdat->kswapd && task_is_running(pgdat->kswapd);
}
/*
diff --git a/mm/debug.c b/mm/debug.c
index 0bdda8407f71..e73fe0a8ec3d 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -42,11 +42,10 @@ const struct trace_print_flags vmaflag_names[] = {
{0, NULL}
};
-void __dump_page(struct page *page, const char *reason)
+static void __dump_page(struct page *page)
{
struct page *head = compound_head(page);
struct address_space *mapping;
- bool page_poisoned = PagePoisoned(page);
bool compound = PageCompound(page);
/*
* Accessing the pageblock without the zone lock. It could change to
@@ -58,16 +57,6 @@ void __dump_page(struct page *page, const char *reason)
int mapcount;
char *type = "";
- /*
- * If struct page is poisoned don't access Page*() functions as that
- * leads to recursive loop. Page*() check for poisoned pages, and calls
- * dump_page() when detected.
- */
- if (page_poisoned) {
- pr_warn("page:%px is uninitialized and poisoned", page);
- goto hex_only;
- }
-
if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
/*
* Corrupt page, so we cannot call page_mapping. Instead, do a
@@ -173,8 +162,6 @@ out_mapping:
pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags,
page_cma ? " CMA" : "");
-
-hex_only:
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
sizeof(struct page), false);
@@ -182,14 +169,16 @@ hex_only:
print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), head,
sizeof(struct page), false);
-
- if (reason)
- pr_warn("page dumped because: %s\n", reason);
}
void dump_page(struct page *page, const char *reason)
{
- __dump_page(page, reason);
+ if (PagePoisoned(page))
+ pr_warn("page:%p is uninitialized and poisoned", page);
+ else
+ __dump_page(page);
+ if (reason)
+ pr_warn("page dumped because: %s\n", reason);
dump_page_owner(page);
}
EXPORT_SYMBOL(dump_page);
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index 05efe98a9ac2..92bfc37300df 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -146,13 +146,14 @@ static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
static void __init pmd_basic_tests(unsigned long pfn, int idx)
{
pgprot_t prot = protection_map[idx];
- pmd_t pmd = pfn_pmd(pfn, prot);
unsigned long val = idx, *ptr = &val;
+ pmd_t pmd;
if (!has_transparent_hugepage())
return;
pr_debug("Validating PMD basic (%pGv)\n", ptr);
+ pmd = pfn_pmd(pfn, prot);
/*
* This test needs to be executed after the given page table entry
@@ -185,14 +186,14 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
unsigned long pfn, unsigned long vaddr,
pgprot_t prot, pgtable_t pgtable)
{
- pmd_t pmd = pfn_pmd(pfn, prot);
+ pmd_t pmd;
if (!has_transparent_hugepage())
return;
pr_debug("Validating PMD advanced\n");
/* Align the address wrt HPAGE_PMD_SIZE */
- vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
+ vaddr &= HPAGE_PMD_MASK;
pgtable_trans_huge_deposit(mm, pmdp, pgtable);
@@ -232,9 +233,14 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
{
- pmd_t pmd = pfn_pmd(pfn, prot);
+ pmd_t pmd;
+
+ if (!has_transparent_hugepage())
+ return;
pr_debug("Validating PMD leaf\n");
+ pmd = pfn_pmd(pfn, prot);
+
/*
* PMD based THP is a leaf entry.
*/
@@ -267,12 +273,16 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
{
- pmd_t pmd = pfn_pmd(pfn, prot);
+ pmd_t pmd;
if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
return;
+ if (!has_transparent_hugepage())
+ return;
+
pr_debug("Validating PMD saved write\n");
+ pmd = pfn_pmd(pfn, prot);
WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
}
@@ -281,13 +291,14 @@ static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
{
pgprot_t prot = protection_map[idx];
- pud_t pud = pfn_pud(pfn, prot);
unsigned long val = idx, *ptr = &val;
+ pud_t pud;
if (!has_transparent_hugepage())
return;
pr_debug("Validating PUD basic (%pGv)\n", ptr);
+ pud = pfn_pud(pfn, prot);
/*
* This test needs to be executed after the given page table entry
@@ -323,15 +334,16 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
unsigned long pfn, unsigned long vaddr,
pgprot_t prot)
{
- pud_t pud = pfn_pud(pfn, prot);
+ pud_t pud;
if (!has_transparent_hugepage())
return;
pr_debug("Validating PUD advanced\n");
/* Align the address wrt HPAGE_PUD_SIZE */
- vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
+ vaddr &= HPAGE_PUD_MASK;
+ pud = pfn_pud(pfn, prot);
set_pud_at(mm, vaddr, pudp, pud);
pudp_set_wrprotect(mm, vaddr, pudp);
pud = READ_ONCE(*pudp);
@@ -370,9 +382,13 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
{
- pud_t pud = pfn_pud(pfn, prot);
+ pud_t pud;
+
+ if (!has_transparent_hugepage())
+ return;
pr_debug("Validating PUD leaf\n");
+ pud = pfn_pud(pfn, prot);
/*
* PUD based THP is a leaf entry.
*/
@@ -654,12 +670,16 @@ static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
{
- pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
+ pmd_t pmd;
if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
return;
+ if (!has_transparent_hugepage())
+ return;
+
pr_debug("Validating PMD protnone\n");
+ pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
WARN_ON(!pmd_protnone(pmd));
WARN_ON(!pmd_present(pmd));
}
@@ -679,18 +699,26 @@ static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
{
- pmd_t pmd = pfn_pmd(pfn, prot);
+ pmd_t pmd;
+
+ if (!has_transparent_hugepage())
+ return;
pr_debug("Validating PMD devmap\n");
+ pmd = pfn_pmd(pfn, prot);
WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
{
- pud_t pud = pfn_pud(pfn, prot);
+ pud_t pud;
+
+ if (!has_transparent_hugepage())
+ return;
pr_debug("Validating PUD devmap\n");
+ pud = pfn_pud(pfn, prot);
WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
}
#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
@@ -733,25 +761,33 @@ static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
{
- pmd_t pmd = pfn_pmd(pfn, prot);
+ pmd_t pmd;
if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
return;
+ if (!has_transparent_hugepage())
+ return;
+
pr_debug("Validating PMD soft dirty\n");
+ pmd = pfn_pmd(pfn, prot);
WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
}
static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
{
- pmd_t pmd = pfn_pmd(pfn, prot);
+ pmd_t pmd;
if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
return;
+ if (!has_transparent_hugepage())
+ return;
+
pr_debug("Validating PMD swap soft dirty\n");
+ pmd = pfn_pmd(pfn, prot);
WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
}
@@ -780,6 +816,9 @@ static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
swp_entry_t swp;
pmd_t pmd;
+ if (!has_transparent_hugepage())
+ return;
+
pr_debug("Validating PMD swap\n");
pmd = pfn_pmd(pfn, prot);
swp = __pmd_to_swp_entry(pmd);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 16483f86360e..64b537b3ccb0 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -62,8 +62,7 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
static DEFINE_MUTEX(pools_lock);
static DEFINE_MUTEX(pools_reg_lock);
-static ssize_t
-show_pools(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
{
unsigned temp;
unsigned size;
@@ -103,7 +102,7 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
return PAGE_SIZE - size;
}
-static DEVICE_ATTR(pools, 0444, show_pools, NULL);
+static DEVICE_ATTR_RO(pools);
/**
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
diff --git a/mm/filemap.c b/mm/filemap.c
index 66f7e9fdfbc4..ac82a93d4f38 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -872,7 +872,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
page->index = offset;
if (!huge) {
- error = mem_cgroup_charge(page, current->mm, gfp);
+ error = mem_cgroup_charge(page, NULL, gfp);
if (error)
goto error;
charged = true;
diff --git a/mm/gup.c b/mm/gup.c
index 3ded6a5f26b2..8651309f8ec3 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -44,6 +44,23 @@ static void hpage_pincount_sub(struct page *page, int refs)
atomic_sub(refs, compound_pincount_ptr(page));
}
+/* Equivalent to calling put_page() @refs times. */
+static void put_page_refs(struct page *page, int refs)
+{
+#ifdef CONFIG_DEBUG_VM
+ if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
+ return;
+#endif
+
+ /*
+ * Calling put_page() for each ref is unnecessarily slow. Only the last
+ * ref needs a put_page().
+ */
+ if (refs > 1)
+ page_ref_sub(page, refs - 1);
+ put_page(page);
+}
+
/*
* Return the compound head page with ref appropriately incremented,
* or NULL if that failed.
@@ -56,6 +73,21 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
return NULL;
if (unlikely(!page_cache_add_speculative(head, refs)))
return NULL;
+
+ /*
+ * At this point we have a stable reference to the head page; but it
+ * could be that between the compound_head() lookup and the refcount
+ * increment, the compound page was split, in which case we'd end up
+ * holding a reference on a page that has nothing to do with the page
+ * we were given anymore.
+ * So now that the head page is stable, recheck that the pages still
+ * belong together.
+ */
+ if (unlikely(compound_head(page) != head)) {
+ put_page_refs(head, refs);
+ return NULL;
+ }
+
return head;
}
@@ -96,6 +128,14 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
return NULL;
/*
+ * CAUTION: Don't use compound_head() on the page before this
+ * point, the result won't be stable.
+ */
+ page = try_get_compound_head(page, refs);
+ if (!page)
+ return NULL;
+
+ /*
* When pinning a compound page of order > 1 (which is what
* hpage_pincount_available() checks for), use an exact count to
* track it, via hpage_pincount_add/_sub().
@@ -103,15 +143,10 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
* However, be sure to *also* increment the normal page refcount
* field at least once, so that the page really is pinned.
*/
- if (!hpage_pincount_available(page))
- refs *= GUP_PIN_COUNTING_BIAS;
-
- page = try_get_compound_head(page, refs);
- if (!page)
- return NULL;
-
if (hpage_pincount_available(page))
hpage_pincount_add(page, refs);
+ else
+ page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
orig_refs);
@@ -135,14 +170,7 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
refs *= GUP_PIN_COUNTING_BIAS;
}
- VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
- /*
- * Calling put_page() for each ref is unnecessarily slow. Only the last
- * ref needs a put_page().
- */
- if (refs > 1)
- page_ref_sub(page, refs - 1);
- put_page(page);
+ put_page_refs(page, refs);
}
/**
@@ -392,6 +420,17 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
}
EXPORT_SYMBOL(unpin_user_pages);
+/*
+ * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
+ * lifecycle. Avoid setting the bit unless necessary, or it might cause write
+ * cache bouncing on large SMP machines for concurrent pinned gups.
+ */
+static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
+{
+ if (!test_bit(MMF_HAS_PINNED, mm_flags))
+ set_bit(MMF_HAS_PINNED, mm_flags);
+}
+
#ifdef CONFIG_MMU
static struct page *no_page_table(struct vm_area_struct *vma,
unsigned int flags)
@@ -1293,7 +1332,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
}
if (flags & FOLL_PIN)
- atomic_set(&mm->has_pinned, 1);
+ mm_set_has_pinned_flag(&mm->flags);
/*
* FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
@@ -2614,7 +2653,7 @@ static int internal_get_user_pages_fast(unsigned long start,
return -EINVAL;
if (gup_flags & FOLL_PIN)
- atomic_set(&current->mm->has_pinned, 1);
+ mm_set_has_pinned_flag(&current->mm->flags);
if (!(gup_flags & FOLL_FAST_ONLY))
might_lock_read(&current->mm->mmap_lock);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 63ed6b25deaa..6d2a0119fc58 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
+unsigned long huge_zero_pfn __read_mostly = ~0UL;
bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
@@ -98,6 +99,7 @@ retry:
__free_pages(zero_page, compound_order(zero_page));
goto retry;
}
+ WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
@@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
struct page *zero_page = xchg(&huge_zero_page, NULL);
BUG_ON(zero_page == NULL);
+ WRITE_ONCE(huge_zero_pfn, ~0UL);
__free_pages(zero_page, compound_order(zero_page));
return HPAGE_PMD_NR;
}
@@ -2044,7 +2047,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
count_vm_event(THP_SPLIT_PMD);
if (!vma_is_anonymous(vma)) {
- _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+ old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
/*
* We are going to unmap this huge page. So
* just go ahead and zap it
@@ -2053,16 +2056,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
zap_deposited_table(mm, pmd);
if (vma_is_special_huge(vma))
return;
- page = pmd_page(_pmd);
- if (!PageDirty(page) && pmd_dirty(_pmd))
- set_page_dirty(page);
- if (!PageReferenced(page) && pmd_young(_pmd))
- SetPageReferenced(page);
- page_remove_rmap(page, true);
- put_page(page);
+ if (unlikely(is_pmd_migration_entry(old_pmd))) {
+ swp_entry_t entry;
+
+ entry = pmd_to_swp_entry(old_pmd);
+ page = migration_entry_to_page(entry);
+ } else {
+ page = pmd_page(old_pmd);
+ if (!PageDirty(page) && pmd_dirty(old_pmd))
+ set_page_dirty(page);
+ if (!PageReferenced(page) && pmd_young(old_pmd))
+ SetPageReferenced(page);
+ page_remove_rmap(page, true);
+ put_page(page);
+ }
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
- } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+ }
+
+ if (is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
@@ -2338,17 +2350,17 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_page(struct page *page)
{
- enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
+ enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
- bool unmap_success;
VM_BUG_ON_PAGE(!PageHead(page), page);
if (PageAnon(page))
ttu_flags |= TTU_SPLIT_FREEZE;
- unmap_success = try_to_unmap(page, ttu_flags);
- VM_BUG_ON_PAGE(!unmap_success, page);
+ try_to_unmap(page, ttu_flags);
+
+ VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
}
static void remap_page(struct page *page, unsigned int nr)
@@ -2659,7 +2671,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
struct deferred_split *ds_queue = get_deferred_split_queue(head);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
- int count, mapcount, extra_pins, ret;
+ int extra_pins, ret;
pgoff_t end;
VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
@@ -2718,7 +2730,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}
unmap_page(head);
- VM_BUG_ON_PAGE(compound_mapcount(head), head);
/* block interrupt reentry in xa_lock and spinlock */
local_irq_disable();
@@ -2736,9 +2747,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
- count = page_count(head);
- mapcount = total_mapcount(head);
- if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
+ if (page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
@@ -2758,16 +2767,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
__split_huge_page(page, list, end);
ret = 0;
} else {
- if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
- pr_alert("total_mapcount: %u, page_count(): %u\n",
- mapcount, count);
- if (PageTail(page))
- dump_page(head, NULL);
- dump_page(page, "total_mapcount(head) > 0");
- BUG();
- }
spin_unlock(&ds_queue->split_queue_lock);
-fail: if (mapping)
+fail:
+ if (mapping)
xa_unlock(&mapping->i_pages);
local_irq_enable();
remap_page(head, thp_nr_pages(head));
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 95918f410c0f..103f1187043f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1588,15 +1588,12 @@ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
return NULL;
}
-pgoff_t __basepage_index(struct page *page)
+pgoff_t hugetlb_basepage_index(struct page *page)
{
struct page *page_head = compound_head(page);
pgoff_t index = page_index(page_head);
unsigned long compound_idx;
- if (!PageHuge(page_head))
- return page_index(page);
-
if (compound_order(page_head) >= MAX_ORDER)
compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
else
@@ -1793,7 +1790,7 @@ retry:
SetPageHWPoison(page);
ClearPageHWPoison(head);
}
- remove_hugetlb_page(h, page, false);
+ remove_hugetlb_page(h, head, false);
h->max_huge_pages--;
spin_unlock_irq(&hugetlb_lock);
update_and_free_page(h, head);
@@ -2121,12 +2118,18 @@ out:
* be restored when a newly allocated huge page must be freed. It is
* to be called after calling vma_needs_reservation to determine if a
* reservation exists.
+ *
+ * vma_del_reservation is used in error paths where an entry in the reserve
+ * map was created during huge page allocation and must be removed. It is to
+ * be called after calling vma_needs_reservation to determine if a reservation
+ * exists.
*/
enum vma_resv_mode {
VMA_NEEDS_RESV,
VMA_COMMIT_RESV,
VMA_END_RESV,
VMA_ADD_RESV,
+ VMA_DEL_RESV,
};
static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr,
@@ -2170,11 +2173,21 @@ static long __vma_reservation_common(struct hstate *h,
ret = region_del(resv, idx, idx + 1);
}
break;
+ case VMA_DEL_RESV:
+ if (vma->vm_flags & VM_MAYSHARE) {
+ region_abort(resv, idx, idx + 1, 1);
+ ret = region_del(resv, idx, idx + 1);
+ } else {
+ ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
+ /* region_add calls of range 1 should never fail. */
+ VM_BUG_ON(ret < 0);
+ }
+ break;
default:
BUG();
}
- if (vma->vm_flags & VM_MAYSHARE)
+ if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
return ret;
/*
* We know private mapping must have HPAGE_RESV_OWNER set.
@@ -2222,25 +2235,39 @@ static long vma_add_reservation(struct hstate *h,
return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
}
+static long vma_del_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
+}
+
/*
- * This routine is called to restore a reservation on error paths. In the
- * specific error paths, a huge page was allocated (via alloc_huge_page)
- * and is about to be freed. If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set
- * HPageRestoreReserve in the newly allocated page. When the page is freed
- * via free_huge_page, the global reservation count will be incremented if
- * HPageRestoreReserve is set. However, free_huge_page can not adjust the
- * reserve map. Adjust the reserve map here to be consistent with global
- * reserve count adjustments to be made by free_huge_page.
+ * This routine is called to restore reservation information on error paths.
+ * It should ONLY be called for pages allocated via alloc_huge_page(), and
+ * the hugetlb mutex should remain held when calling this routine.
+ *
+ * It handles two specific cases:
+ * 1) A reservation was in place and the page consumed the reservation.
+ * HPageRestoreReserve is set in the page.
+ * 2) No reservation was in place for the page, so HPageRestoreReserve is
+ * not set. However, alloc_huge_page always updates the reserve map.
+ *
+ * In case 1, free_huge_page later in the error path will increment the
+ * global reserve count. But, free_huge_page does not have enough context
+ * to adjust the reservation map. This case deals primarily with private
+ * mappings. Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page. Make sure the
+ * reserve map indicates there is a reservation present.
+ *
+ * In case 2, simply undo reserve map modifications done by alloc_huge_page.
*/
-static void restore_reserve_on_error(struct hstate *h,
- struct vm_area_struct *vma, unsigned long address,
- struct page *page)
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct page *page)
{
- if (unlikely(HPageRestoreReserve(page))) {
- long rc = vma_needs_reservation(h, vma, address);
+ long rc = vma_needs_reservation(h, vma, address);
- if (unlikely(rc < 0)) {
+ if (HPageRestoreReserve(page)) {
+ if (unlikely(rc < 0))
/*
* Rare out of memory condition in reserve map
* manipulation. Clear HPageRestoreReserve so that
@@ -2253,16 +2280,57 @@ static void restore_reserve_on_error(struct hstate *h,
* accounting of reserve counts.
*/
ClearHPageRestoreReserve(page);
- } else if (rc) {
- rc = vma_add_reservation(h, vma, address);
- if (unlikely(rc < 0))
+ else if (rc)
+ (void)vma_add_reservation(h, vma, address);
+ else
+ vma_end_reservation(h, vma, address);
+ } else {
+ if (!rc) {
+ /*
+ * This indicates there is an entry in the reserve map
+ * added by alloc_huge_page. We know it was added
+ * before the alloc_huge_page call, otherwise
+ * HPageRestoreReserve would be set on the page.
+ * Remove the entry so that a subsequent allocation
+ * does not consume a reservation.
+ */
+ rc = vma_del_reservation(h, vma, address);
+ if (rc < 0)
+ /*
+ * VERY rare out of memory condition. Since
+ * we can not delete the entry, set
+ * HPageRestoreReserve so that the reserve
+ * count will be incremented when the page
+ * is freed. This reserve will be consumed
+ * on a subsequent allocation.
+ */
+ SetHPageRestoreReserve(page);
+ } else if (rc < 0) {
+ /*
+ * Rare out of memory condition from
+ * vma_needs_reservation call. Memory allocation is
+ * only attempted if a new entry is needed. Therefore,
+ * this implies there is not an entry in the
+ * reserve map.
+ *
+ * For shared mappings, no entry in the map indicates
+ * no reservation. We are done.
+ */
+ if (!(vma->vm_flags & VM_MAYSHARE))
/*
- * See above comment about rare out of
- * memory condition.
+ * For private mappings, no entry indicates
+ * a reservation is present. Since we can
+ * not add an entry, set SetHPageRestoreReserve
+ * on the page so reserve count will be
+ * incremented when freed. This reserve will
+ * be consumed on a subsequent allocation.
*/
- ClearHPageRestoreReserve(page);
+ SetHPageRestoreReserve(page);
} else
- vma_end_reservation(h, vma, address);
+ /*
+ * No reservation present, do nothing
+ */
+ vma_end_reservation(h, vma, address);
}
}
@@ -4037,6 +4105,8 @@ again:
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
if (!pte_same(src_pte_old, entry)) {
+ restore_reserve_on_error(h, vma, addr,
+ new);
put_page(new);
/* dst_entry won't change as in child */
goto again;
@@ -4889,10 +4959,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
if (!page)
goto out;
} else if (!*pagep) {
- ret = -ENOMEM;
+ /* If a page already exists, then it's UFFDIO_COPY for
+ * a non-missing case. Return -EEXIST.
+ */
+ if (vm_shared &&
+ hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+ ret = -EEXIST;
+ goto out;
+ }
+
page = alloc_huge_page(dst_vma, dst_addr, 0);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ ret = -ENOMEM;
goto out;
+ }
ret = copy_huge_page_from_user(page,
(const void __user *) src_addr,
@@ -4996,6 +5076,7 @@ out_release_unlock:
if (vm_shared || is_continue)
unlock_page(page);
out_release_nounlock:
+ restore_reserve_on_error(h, dst_vma, dst_addr, page);
put_page(page);
goto out;
}
@@ -5847,6 +5928,23 @@ unlock:
return ret;
}
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+ int ret = 0;
+
+ *hugetlb = false;
+ spin_lock_irq(&hugetlb_lock);
+ if (PageHeadHuge(page)) {
+ *hugetlb = true;
+ if (HPageFreed(page) || HPageMigratable(page))
+ ret = get_page_unless_zero(page);
+ else
+ ret = -EBUSY;
+ }
+ spin_unlock_irq(&hugetlb_lock);
+ return ret;
+}
+
void putback_active_hugepage(struct page *page)
{
spin_lock_irq(&hugetlb_lock);
diff --git a/mm/internal.h b/mm/internal.h
index 2f1182948aa6..6ec2cea9926b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -116,6 +116,11 @@ extern void putback_lru_page(struct page *page);
extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
/*
+ * in mm/memcontrol.c:
+ */
+extern bool cgroup_memory_nokmem;
+
+/*
* in mm/page_alloc.c
*/
@@ -198,10 +203,10 @@ extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
extern int user_min_free_kbytes;
-extern void free_unref_page(struct page *page);
+extern void free_unref_page(struct page *page, unsigned int order);
extern void free_unref_page_list(struct list_head *list);
-extern void zone_pcp_update(struct zone *zone);
+extern void zone_pcp_update(struct zone *zone, int cpu_online);
extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone);
@@ -384,27 +389,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/*
- * At what user virtual address is page expected in @vma?
+ * At what user virtual address is page expected in vma?
+ * Returns -EFAULT if all of the page is outside the range of vma.
+ * If page is a compound head, the entire compound page is considered.
*/
static inline unsigned long
-__vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address(struct page *page, struct vm_area_struct *vma)
{
- pgoff_t pgoff = page_to_pgoff(page);
- return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ pgoff_t pgoff;
+ unsigned long address;
+
+ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
+ pgoff = page_to_pgoff(page);
+ if (pgoff >= vma->vm_pgoff) {
+ address = vma->vm_start +
+ ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ /* Check for address beyond vma (or wrapped through 0?) */
+ if (address < vma->vm_start || address >= vma->vm_end)
+ address = -EFAULT;
+ } else if (PageHead(page) &&
+ pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
+ /* Test above avoids possibility of wrap to 0 on 32-bit */
+ address = vma->vm_start;
+ } else {
+ address = -EFAULT;
+ }
+ return address;
}
+/*
+ * Then at what user virtual address will none of the page be found in vma?
+ * Assumes that vma_address() already returned a good starting address.
+ * If page is a compound head, the entire compound page is considered.
+ */
static inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address_end(struct page *page, struct vm_area_struct *vma)
{
- unsigned long start, end;
-
- start = __vma_address(page, vma);
- end = start + thp_size(page) - PAGE_SIZE;
-
- /* page should be within @vma mapping range */
- VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
-
- return max(start, vma->vm_start);
+ pgoff_t pgoff;
+ unsigned long address;
+
+ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
+ pgoff = page_to_pgoff(page) + compound_nr(page);
+ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ /* Check for address beyond vma (or wrapped through 0?) */
+ if (address < vma->vm_start || address > vma->vm_end)
+ address = vma->vm_end;
+ return address;
}
static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 9fe39a66388a..adcd9acaef61 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -37,5 +37,5 @@ CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
obj-$(CONFIG_KASAN) := common.o report.o
obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o
-obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o
-obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o
+obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o
+obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6bb87f2acd4e..2baf121fb8c5 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -51,11 +51,14 @@ void kasan_enable_current(void)
{
current->kasan_depth++;
}
+EXPORT_SYMBOL(kasan_enable_current);
void kasan_disable_current(void)
{
current->kasan_depth--;
}
+EXPORT_SYMBOL(kasan_disable_current);
+
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
void __kasan_unpoison_range(const void *address, size_t size)
@@ -97,7 +100,7 @@ slab_flags_t __kasan_never_merge(void)
return 0;
}
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
unsigned long i;
@@ -111,7 +114,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
}
-void __kasan_free_pages(struct page *page, unsigned int order, bool init)
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
{
if (likely(!PageHighMem(page)))
kasan_poison(page_address(page), PAGE_SIZE << order,
@@ -328,6 +331,9 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
u8 tag;
void *tagged_object;
+ if (!kasan_arch_is_ready())
+ return false;
+
tag = get_tag(object);
tagged_object = object;
object = kasan_reset_tag(object);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 53cbf28859b5..c3f5ba7a294a 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -163,6 +163,9 @@ static __always_inline bool check_region_inline(unsigned long addr,
size_t size, bool write,
unsigned long ret_ip)
{
+ if (!kasan_arch_is_ready())
+ return true;
+
if (unlikely(size == 0))
return true;
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 4004388b4e4b..4ea8c368b5b8 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -216,26 +216,36 @@ void __init kasan_init_hw_tags(void)
pr_info("KernelAddressSanitizer initialized\n");
}
-void kasan_set_free_info(struct kmem_cache *cache,
- void *object, u8 tag)
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
{
- struct kasan_alloc_meta *alloc_meta;
+ /*
+ * This condition should match the one in post_alloc_hook() in
+ * page_alloc.c.
+ */
+ bool init = !want_init_on_free() && want_init_on_alloc(flags);
+
+ if (flags & __GFP_SKIP_KASAN_POISON)
+ SetPageSkipKASanPoison(page);
+
+ if (flags & __GFP_ZEROTAGS) {
+ int i;
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- kasan_set_track(&alloc_meta->free_track[0], GFP_NOWAIT);
+ for (i = 0; i != 1 << order; ++i)
+ tag_clear_highpage(page + i);
+ } else {
+ kasan_unpoison_pages(page, order, init);
+ }
}
-struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
- void *object, u8 tag)
+void kasan_free_pages(struct page *page, unsigned int order)
{
- struct kasan_alloc_meta *alloc_meta;
-
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (!alloc_meta)
- return NULL;
+ /*
+ * This condition should match the one in free_pages_prepare() in
+ * page_alloc.c.
+ */
+ bool init = want_init_on_free();
- return &alloc_meta->free_track[0];
+ kasan_poison_pages(page, order, init);
}
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index c4605ac9837b..cc64ed6858c6 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -41,7 +41,7 @@ static inline bool kasan_p4d_table(pgd_t pgd)
}
#endif
#if CONFIG_PGTABLE_LEVELS > 3
-pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss;
+pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD] __page_aligned_bss;
static inline bool kasan_pud_table(p4d_t p4d)
{
return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
@@ -53,7 +53,7 @@ static inline bool kasan_pud_table(p4d_t p4d)
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2
-pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss;
+pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss;
static inline bool kasan_pmd_table(pud_t pud)
{
return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
@@ -64,7 +64,7 @@ static inline bool kasan_pmd_table(pud_t pud)
return false;
}
#endif
-pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]
__page_aligned_bss;
static inline bool kasan_pte_table(pmd_t pmd)
@@ -220,8 +220,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
/**
* kasan_populate_early_shadow - populate shadow memory region with
* kasan_early_shadow_page
- * @shadow_start - start of the memory range to populate
- * @shadow_end - end of the memory range to populate
+ * @shadow_start: start of the memory range to populate
+ * @shadow_end: end of the memory range to populate
*/
int __ref kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 8f450bc28045..98e3059bfea4 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -153,7 +153,7 @@ struct kasan_track {
depot_stack_handle_t stack;
};
-#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+#if defined(CONFIG_KASAN_TAGS_IDENTIFY) && defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_NR_FREE_STACKS 5
#else
#define KASAN_NR_FREE_STACKS 1
@@ -170,7 +170,7 @@ struct kasan_alloc_meta {
#else
struct kasan_track free_track[KASAN_NR_FREE_STACKS];
#endif
-#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+#ifdef CONFIG_KASAN_TAGS_IDENTIFY
u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
u8 free_track_idx;
#endif
@@ -449,6 +449,12 @@ static inline void kasan_poison_last_granule(const void *address, size_t size) {
#endif /* CONFIG_KASAN_GENERIC */
+#ifndef kasan_arch_is_ready
+static inline bool kasan_arch_is_ready(void) { return true; }
+#elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE)
+#error kasan_arch_is_ready only works in KASAN generic outline mode!
+#endif
+
/*
* Exported functions for interfaces called from assembly or from generated
* code. Declarations here to avoid warning about missing declarations.
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 14bd51ea2348..8fff1825b22c 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -230,7 +230,7 @@ static void print_address_description(void *addr, u8 tag)
{
struct page *page = kasan_addr_to_page(addr);
- dump_stack();
+ dump_stack_lvl(KERN_ERR);
pr_err("\n");
if (page && PageSlab(page)) {
@@ -375,7 +375,7 @@ void kasan_report_async(void)
pr_err("BUG: KASAN: invalid-access\n");
pr_err("Asynchronous mode enabled: no access details available\n");
pr_err("\n");
- dump_stack();
+ dump_stack_lvl(KERN_ERR);
end_report(&flags, 0);
}
#endif /* CONFIG_KASAN_HW_TAGS */
@@ -420,7 +420,7 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
pr_err("\n");
print_memory_metadata(info.first_bad_addr);
} else {
- dump_stack();
+ dump_stack_lvl(KERN_ERR);
}
end_report(&flags, addr);
diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c
index 42b2168755d6..5dbbbb930e7a 100644
--- a/mm/kasan/report_hw_tags.c
+++ b/mm/kasan/report_hw_tags.c
@@ -15,11 +15,6 @@
#include "kasan.h"
-const char *kasan_get_bug_type(struct kasan_access_info *info)
-{
- return "invalid-access";
-}
-
void *kasan_find_first_bad_addr(void *addr, size_t size)
{
return kasan_reset_tag(addr);
diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c
index 3d20d3451d9e..d2298c357834 100644
--- a/mm/kasan/report_sw_tags.c
+++ b/mm/kasan/report_sw_tags.c
@@ -29,49 +29,6 @@
#include "kasan.h"
#include "../slab.h"
-const char *kasan_get_bug_type(struct kasan_access_info *info)
-{
-#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
- struct kasan_alloc_meta *alloc_meta;
- struct kmem_cache *cache;
- struct page *page;
- const void *addr;
- void *object;
- u8 tag;
- int i;
-
- tag = get_tag(info->access_addr);
- addr = kasan_reset_tag(info->access_addr);
- page = kasan_addr_to_page(addr);
- if (page && PageSlab(page)) {
- cache = page->slab_cache;
- object = nearest_obj(cache, page, (void *)addr);
- alloc_meta = kasan_get_alloc_meta(cache, object);
-
- if (alloc_meta) {
- for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
- if (alloc_meta->free_pointer_tag[i] == tag)
- return "use-after-free";
- }
- }
- return "out-of-bounds";
- }
-
-#endif
- /*
- * If access_size is a negative number, then it has reason to be
- * defined as out-of-bounds bug type.
- *
- * Casting negative numbers to size_t would indeed turn up as
- * a large size_t and its value will be larger than ULONG_MAX/2,
- * so that this can qualify as out-of-bounds.
- */
- if (info->access_addr + info->access_size < info->access_addr)
- return "out-of-bounds";
-
- return "invalid-access";
-}
-
void *kasan_find_first_bad_addr(void *addr, size_t size)
{
u8 tag = get_tag(addr);
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
new file mode 100644
index 000000000000..8a319fc16dab
--- /dev/null
+++ b/mm/kasan/report_tags.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2020 Google, Inc.
+ */
+
+#include "kasan.h"
+#include "../slab.h"
+
+const char *kasan_get_bug_type(struct kasan_access_info *info)
+{
+#ifdef CONFIG_KASAN_TAGS_IDENTIFY
+ struct kasan_alloc_meta *alloc_meta;
+ struct kmem_cache *cache;
+ struct page *page;
+ const void *addr;
+ void *object;
+ u8 tag;
+ int i;
+
+ tag = get_tag(info->access_addr);
+ addr = kasan_reset_tag(info->access_addr);
+ page = kasan_addr_to_page(addr);
+ if (page && PageSlab(page)) {
+ cache = page->slab_cache;
+ object = nearest_obj(cache, page, (void *)addr);
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+
+ if (alloc_meta) {
+ for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
+ if (alloc_meta->free_pointer_tag[i] == tag)
+ return "use-after-free";
+ }
+ }
+ return "out-of-bounds";
+ }
+#endif
+
+ /*
+ * If access_size is a negative number, then it has reason to be
+ * defined as out-of-bounds bug type.
+ *
+ * Casting negative numbers to size_t would indeed turn up as
+ * a large size_t and its value will be larger than ULONG_MAX/2,
+ * so that this can qualify as out-of-bounds.
+ */
+ if (info->access_addr + info->access_size < info->access_addr)
+ return "out-of-bounds";
+
+ return "invalid-access";
+}
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 082ee5b6d9a1..8d95ee52d019 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -73,6 +73,9 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
{
void *shadow_start, *shadow_end;
+ if (!kasan_arch_is_ready())
+ return;
+
/*
* Perform shadow offset calculation based on untagged address, as
* some of the callers (e.g. kasan_poison_object_data) pass tagged
@@ -99,6 +102,9 @@ EXPORT_SYMBOL(kasan_poison);
#ifdef CONFIG_KASAN_GENERIC
void kasan_poison_last_granule(const void *addr, size_t size)
{
+ if (!kasan_arch_is_ready())
+ return;
+
if (size & KASAN_GRANULE_MASK) {
u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
*shadow = size & KASAN_GRANULE_MASK;
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index 9df8e7f69e87..bd3f540feb47 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -167,43 +167,9 @@ void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
}
EXPORT_SYMBOL(__hwasan_tag_memory);
-void kasan_set_free_info(struct kmem_cache *cache,
- void *object, u8 tag)
+void kasan_tag_mismatch(unsigned long addr, unsigned long access_info,
+ unsigned long ret_ip)
{
- struct kasan_alloc_meta *alloc_meta;
- u8 idx = 0;
-
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (!alloc_meta)
- return;
-
-#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
- idx = alloc_meta->free_track_idx;
- alloc_meta->free_pointer_tag[idx] = tag;
- alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
-#endif
-
- kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
-}
-
-struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
- void *object, u8 tag)
-{
- struct kasan_alloc_meta *alloc_meta;
- int i = 0;
-
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (!alloc_meta)
- return NULL;
-
-#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
- for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
- if (alloc_meta->free_pointer_tag[i] == tag)
- break;
- }
- if (i == KASAN_NR_FREE_STACKS)
- i = alloc_meta->free_track_idx;
-#endif
-
- return &alloc_meta->free_track[i];
+ kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10,
+ ret_ip);
}
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
new file mode 100644
index 000000000000..8f48b9502a17
--- /dev/null
+++ b/mm/kasan/tags.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains common tag-based KASAN code.
+ *
+ * Copyright (c) 2018 Google, Inc.
+ * Copyright (c) 2020 Google, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/static_key.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "kasan.h"
+
+void kasan_set_free_info(struct kmem_cache *cache,
+ void *object, u8 tag)
+{
+ struct kasan_alloc_meta *alloc_meta;
+ u8 idx = 0;
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return;
+
+#ifdef CONFIG_KASAN_TAGS_IDENTIFY
+ idx = alloc_meta->free_track_idx;
+ alloc_meta->free_pointer_tag[idx] = tag;
+ alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
+#endif
+
+ kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
+}
+
+struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
+ void *object, u8 tag)
+{
+ struct kasan_alloc_meta *alloc_meta;
+ int i = 0;
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return NULL;
+
+#ifdef CONFIG_KASAN_TAGS_IDENTIFY
+ for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
+ if (alloc_meta->free_pointer_tag[i] == tag)
+ break;
+ }
+ if (i == KASAN_NR_FREE_STACKS)
+ i = alloc_meta->free_track_idx;
+#endif
+
+ return &alloc_meta->free_track[i];
+}
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index e18fbbd5d9b4..4d21ac44d5d3 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -627,10 +627,10 @@ static void toggle_allocation_gate(struct work_struct *work)
* During low activity with no allocations we might wait a
* while; let's avoid the hung task warning.
*/
- wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
- sysctl_hung_task_timeout_secs * HZ / 2);
+ wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
+ sysctl_hung_task_timeout_secs * HZ / 2);
} else {
- wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
+ wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
}
/* Disable static key and reset timer. */
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 4acf4251ee04..7f24b9bcb2ec 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -197,7 +197,7 @@ static void test_cache_destroy(void)
static inline size_t kmalloc_cache_alignment(size_t size)
{
- return kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)]->align;
+ return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align;
}
/* Must always inline to match stack trace against caller. */
@@ -267,7 +267,8 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
if (is_kfence_address(alloc)) {
struct page *page = virt_to_head_page(alloc);
- struct kmem_cache *s = test_cache ?: kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)];
+ struct kmem_cache *s = test_cache ?:
+ kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
/*
* Verify that various helpers return the right values
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 92a2d4885808..228a2fbe0657 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -219,7 +219,7 @@ static struct task_struct *scan_thread;
static unsigned long jiffies_min_age;
static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */
-static signed long jiffies_scan_wait;
+static unsigned long jiffies_scan_wait;
/* enables or disables the task stacks scanning */
static int kmemleak_stack_scan = 1;
/* protects the memory scanning, parameters and debug/kmemleak file access */
@@ -1567,7 +1567,7 @@ static int kmemleak_scan_thread(void *arg)
}
while (!kthread_should_stop()) {
- signed long timeout = jiffies_scan_wait;
+ signed long timeout = READ_ONCE(jiffies_scan_wait);
mutex_lock(&scan_mutex);
kmemleak_scan();
@@ -1807,14 +1807,20 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
else if (strncmp(buf, "scan=off", 8) == 0)
stop_scan_thread();
else if (strncmp(buf, "scan=", 5) == 0) {
- unsigned long secs;
+ unsigned secs;
+ unsigned long msecs;
- ret = kstrtoul(buf + 5, 0, &secs);
+ ret = kstrtouint(buf + 5, 0, &secs);
if (ret < 0)
goto out;
+
+ msecs = secs * MSEC_PER_SEC;
+ if (msecs > UINT_MAX)
+ msecs = UINT_MAX;
+
stop_scan_thread();
- if (secs) {
- jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
+ if (msecs) {
+ WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
start_scan_thread();
}
} else if (strncmp(buf, "scan", 4) == 0)
diff --git a/mm/ksm.c b/mm/ksm.c
index 2f3aaeb34a42..3fa9bc8a67cf 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -521,10 +521,8 @@ static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
struct vm_area_struct *vma;
if (ksm_test_exit(mm))
return NULL;
- vma = find_vma(mm, addr);
- if (!vma || vma->vm_start > addr)
- return NULL;
- if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
return NULL;
return vma;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index afaefa8fc6ab..123feef5259d 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -92,7 +92,7 @@
* system initialization completes.
*/
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
struct pglist_data __refdata contig_page_data;
EXPORT_SYMBOL(contig_page_data);
#endif
@@ -607,7 +607,7 @@ repeat:
* area, insert that portion.
*/
if (rbase > base) {
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
WARN_ON(nid != memblock_get_region_node(rgn));
#endif
WARN_ON(flags != rgn->flags);
@@ -1205,7 +1205,7 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int start_rgn, end_rgn;
int i, ret;
@@ -1849,7 +1849,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
size = rgn->size;
end = base + size - 1;
flags = rgn->flags;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
if (memblock_get_region_node(rgn) != MAX_NUMNODES)
snprintf(nid_buf, sizeof(nid_buf), " on node %d",
memblock_get_region_node(rgn));
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 64ada9e650a5..4ee243ce6135 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -78,12 +78,13 @@ struct mem_cgroup *root_mem_cgroup __read_mostly;
/* Active memory cgroup to use from an interrupt context */
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
+EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
/* Socket memory accounting disabled? */
static bool cgroup_memory_nosocket;
/* Kernel memory accounting disabled? */
-static bool cgroup_memory_nokmem;
+bool cgroup_memory_nokmem;
/* Whether the swap controller is active */
#ifdef CONFIG_MEMCG_SWAP
@@ -261,7 +262,6 @@ static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
static void obj_cgroup_release(struct percpu_ref *ref)
{
struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
- struct mem_cgroup *memcg;
unsigned int nr_bytes;
unsigned int nr_pages;
unsigned long flags;
@@ -290,12 +290,11 @@ static void obj_cgroup_release(struct percpu_ref *ref)
WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
nr_pages = nr_bytes >> PAGE_SHIFT;
- spin_lock_irqsave(&css_set_lock, flags);
- memcg = obj_cgroup_memcg(objcg);
if (nr_pages)
obj_cgroup_uncharge_pages(objcg, nr_pages);
+
+ spin_lock_irqsave(&css_set_lock, flags);
list_del(&objcg->list);
- mem_cgroup_put(memcg);
spin_unlock_irqrestore(&css_set_lock, flags);
percpu_ref_exit(ref);
@@ -330,17 +329,12 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
spin_lock_irq(&css_set_lock);
- /* Move active objcg to the parent's list */
- xchg(&objcg->memcg, parent);
- css_get(&parent->css);
- list_add(&objcg->list, &parent->objcg_list);
-
- /* Move already reparented objcgs to the parent's list */
- list_for_each_entry(iter, &memcg->objcg_list, list) {
- css_get(&parent->css);
- xchg(&iter->memcg, parent);
- css_put(&memcg->css);
- }
+ /* 1) Ready to reparent active objcg. */
+ list_add(&objcg->list, &memcg->objcg_list);
+ /* 2) Reparent active objcg and already reparented objcgs to parent. */
+ list_for_each_entry(iter, &memcg->objcg_list, list)
+ WRITE_ONCE(iter->memcg, parent);
+ /* 3) Move already reparented objcgs to the parent's list */
list_splice(&memcg->objcg_list, &parent->objcg_list);
spin_unlock_irq(&css_set_lock);
@@ -782,6 +776,24 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
rcu_read_unlock();
}
+/*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+ * mod_memcg_lruvec_state() should be used.
+ */
+static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
+ struct pglist_data *pgdat,
+ enum node_stat_item idx, int nr)
+{
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ mod_memcg_lruvec_state(lruvec, idx, nr);
+ rcu_read_unlock();
+}
+
/**
* __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup
@@ -886,13 +898,24 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
}
EXPORT_SYMBOL(mem_cgroup_from_task);
+static __always_inline struct mem_cgroup *active_memcg(void)
+{
+ if (in_interrupt())
+ return this_cpu_read(int_active_memcg);
+ else
+ return current->active_memcg;
+}
+
/**
* get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
* @mm: mm from which memcg should be extracted. It can be NULL.
*
- * Obtain a reference on mm->memcg and returns it if successful. Otherwise
- * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
- * returned.
+ * Obtain a reference on mm->memcg and returns it if successful. If mm
+ * is NULL, then the memcg is chosen as follows:
+ * 1) The active memcg, if set.
+ * 2) current->mm->memcg, if available
+ * 3) root memcg
+ * If mem_cgroup is disabled, NULL is returned.
*/
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
{
@@ -901,34 +924,38 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
if (mem_cgroup_disabled())
return NULL;
+ /*
+ * Page cache insertions can happen without an
+ * actual mm context, e.g. during disk probing
+ * on boot, loopback IO, acct() writes etc.
+ *
+ * No need to css_get on root memcg as the reference
+ * counting is disabled on the root level in the
+ * cgroup core. See CSS_NO_REF.
+ */
+ if (unlikely(!mm)) {
+ memcg = active_memcg();
+ if (unlikely(memcg)) {
+ /* remote memcg must hold a ref */
+ css_get(&memcg->css);
+ return memcg;
+ }
+ mm = current->mm;
+ if (unlikely(!mm))
+ return root_mem_cgroup;
+ }
+
rcu_read_lock();
do {
- /*
- * Page cache insertions can happen without an
- * actual mm context, e.g. during disk probing
- * on boot, loopback IO, acct() writes etc.
- */
- if (unlikely(!mm))
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
memcg = root_mem_cgroup;
- else {
- memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!memcg))
- memcg = root_mem_cgroup;
- }
} while (!css_tryget(&memcg->css));
rcu_read_unlock();
return memcg;
}
EXPORT_SYMBOL(get_mem_cgroup_from_mm);
-static __always_inline struct mem_cgroup *active_memcg(void)
-{
- if (in_interrupt())
- return this_cpu_read(int_active_memcg);
- else
- return current->active_memcg;
-}
-
static __always_inline bool memcg_kmem_bypass(void)
{
/* Allow remote memcg charging from any context. */
@@ -1178,9 +1205,8 @@ void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
struct lruvec *lock_page_lruvec(struct page *page)
{
struct lruvec *lruvec;
- struct pglist_data *pgdat = page_pgdat(page);
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
+ lruvec = mem_cgroup_page_lruvec(page);
spin_lock(&lruvec->lru_lock);
lruvec_memcg_debug(lruvec, page);
@@ -1191,9 +1217,8 @@ struct lruvec *lock_page_lruvec(struct page *page)
struct lruvec *lock_page_lruvec_irq(struct page *page)
{
struct lruvec *lruvec;
- struct pglist_data *pgdat = page_pgdat(page);
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
+ lruvec = mem_cgroup_page_lruvec(page);
spin_lock_irq(&lruvec->lru_lock);
lruvec_memcg_debug(lruvec, page);
@@ -1204,9 +1229,8 @@ struct lruvec *lock_page_lruvec_irq(struct page *page)
struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
{
struct lruvec *lruvec;
- struct pglist_data *pgdat = page_pgdat(page);
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
+ lruvec = mem_cgroup_page_lruvec(page);
spin_lock_irqsave(&lruvec->lru_lock, *flags);
lruvec_memcg_debug(lruvec, page);
@@ -2040,14 +2064,23 @@ void unlock_page_memcg(struct page *page)
}
EXPORT_SYMBOL(unlock_page_memcg);
-struct memcg_stock_pcp {
- struct mem_cgroup *cached; /* this never be root cgroup */
- unsigned int nr_pages;
-
+struct obj_stock {
#ifdef CONFIG_MEMCG_KMEM
struct obj_cgroup *cached_objcg;
+ struct pglist_data *cached_pgdat;
unsigned int nr_bytes;
+ int nr_slab_reclaimable_b;
+ int nr_slab_unreclaimable_b;
+#else
+ int dummy[0];
#endif
+};
+
+struct memcg_stock_pcp {
+ struct mem_cgroup *cached; /* this never be root cgroup */
+ unsigned int nr_pages;
+ struct obj_stock task_obj;
+ struct obj_stock irq_obj;
struct work_struct work;
unsigned long flags;
@@ -2057,12 +2090,12 @@ static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
static DEFINE_MUTEX(percpu_charge_mutex);
#ifdef CONFIG_MEMCG_KMEM
-static void drain_obj_stock(struct memcg_stock_pcp *stock);
+static void drain_obj_stock(struct obj_stock *stock);
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
struct mem_cgroup *root_memcg);
#else
-static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
+static inline void drain_obj_stock(struct obj_stock *stock)
{
}
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
@@ -2072,6 +2105,41 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
}
#endif
+/*
+ * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
+ * sequence used in this case to access content from object stock is slow.
+ * To optimize for user context access, there are now two object stocks for
+ * task context and interrupt context access respectively.
+ *
+ * The task context object stock can be accessed by disabling preemption only
+ * which is cheap in non-preempt kernel. The interrupt context object stock
+ * can only be accessed after disabling interrupt. User context code can
+ * access interrupt object stock, but not vice versa.
+ */
+static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
+{
+ struct memcg_stock_pcp *stock;
+
+ if (likely(in_task())) {
+ *pflags = 0UL;
+ preempt_disable();
+ stock = this_cpu_ptr(&memcg_stock);
+ return &stock->task_obj;
+ }
+
+ local_irq_save(*pflags);
+ stock = this_cpu_ptr(&memcg_stock);
+ return &stock->irq_obj;
+}
+
+static inline void put_obj_stock(unsigned long flags)
+{
+ if (likely(in_task()))
+ preempt_enable();
+ else
+ local_irq_restore(flags);
+}
+
/**
* consume_stock: Try to consume stocked charge on this cpu.
* @memcg: memcg to consume from.
@@ -2138,7 +2206,9 @@ static void drain_local_stock(struct work_struct *dummy)
local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
- drain_obj_stock(stock);
+ drain_obj_stock(&stock->irq_obj);
+ if (in_task())
+ drain_obj_stock(&stock->task_obj);
drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
@@ -2504,8 +2574,8 @@ out:
css_put(&memcg->css);
}
-static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
- unsigned int nr_pages)
+static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ unsigned int nr_pages)
{
unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
int nr_retries = MAX_RECLAIM_RETRIES;
@@ -2517,8 +2587,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
bool drained = false;
unsigned long pflags;
- if (mem_cgroup_is_root(memcg))
- return 0;
retry:
if (consume_stock(memcg, nr_pages))
return 0;
@@ -2698,6 +2766,15 @@ done_restock:
return 0;
}
+static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ unsigned int nr_pages)
+{
+ if (mem_cgroup_is_root(memcg))
+ return 0;
+
+ return try_charge_memcg(memcg, gfp_mask, nr_pages);
+}
+
#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
{
@@ -2739,6 +2816,13 @@ retry:
}
#ifdef CONFIG_MEMCG_KMEM
+/*
+ * The allocated objcg pointers array is not accounted directly.
+ * Moreover, it should not come from DMA buffer and is not readily
+ * reclaimable. So those GFP bits should be masked off.
+ */
+#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
+
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp, bool new_page)
{
@@ -2746,6 +2830,7 @@ int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
unsigned long memcg_data;
void *vec;
+ gfp &= ~OBJCGS_CLEAR_MASK;
vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
page_to_nid(page));
if (!vec)
@@ -2925,7 +3010,7 @@ static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
memcg = get_mem_cgroup_from_objcg(objcg);
- ret = try_charge(memcg, gfp, nr_pages);
+ ret = try_charge_memcg(memcg, gfp, nr_pages);
if (ret)
goto out;
@@ -2995,26 +3080,81 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
obj_cgroup_put(objcg);
}
+void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
+ enum node_stat_item idx, int nr)
+{
+ unsigned long flags;
+ struct obj_stock *stock = get_obj_stock(&flags);
+ int *bytes;
+
+ /*
+ * Save vmstat data in stock and skip vmstat array update unless
+ * accumulating over a page of vmstat data or when pgdat or idx
+ * changes.
+ */
+ if (stock->cached_objcg != objcg) {
+ drain_obj_stock(stock);
+ obj_cgroup_get(objcg);
+ stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
+ ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+ stock->cached_objcg = objcg;
+ stock->cached_pgdat = pgdat;
+ } else if (stock->cached_pgdat != pgdat) {
+ /* Flush the existing cached vmstat data */
+ if (stock->nr_slab_reclaimable_b) {
+ mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B,
+ stock->nr_slab_reclaimable_b);
+ stock->nr_slab_reclaimable_b = 0;
+ }
+ if (stock->nr_slab_unreclaimable_b) {
+ mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B,
+ stock->nr_slab_unreclaimable_b);
+ stock->nr_slab_unreclaimable_b = 0;
+ }
+ stock->cached_pgdat = pgdat;
+ }
+
+ bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
+ : &stock->nr_slab_unreclaimable_b;
+ /*
+ * Even for large object >= PAGE_SIZE, the vmstat data will still be
+ * cached locally at least once before pushing it out.
+ */
+ if (!*bytes) {
+ *bytes = nr;
+ nr = 0;
+ } else {
+ *bytes += nr;
+ if (abs(*bytes) > PAGE_SIZE) {
+ nr = *bytes;
+ *bytes = 0;
+ } else {
+ nr = 0;
+ }
+ }
+ if (nr)
+ mod_objcg_mlstate(objcg, pgdat, idx, nr);
+
+ put_obj_stock(flags);
+}
+
static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
{
- struct memcg_stock_pcp *stock;
unsigned long flags;
+ struct obj_stock *stock = get_obj_stock(&flags);
bool ret = false;
- local_irq_save(flags);
-
- stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
stock->nr_bytes -= nr_bytes;
ret = true;
}
- local_irq_restore(flags);
+ put_obj_stock(flags);
return ret;
}
-static void drain_obj_stock(struct memcg_stock_pcp *stock)
+static void drain_obj_stock(struct obj_stock *stock)
{
struct obj_cgroup *old = stock->cached_objcg;
@@ -3042,6 +3182,25 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
stock->nr_bytes = 0;
}
+ /*
+ * Flush the vmstat data in current stock
+ */
+ if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
+ if (stock->nr_slab_reclaimable_b) {
+ mod_objcg_mlstate(old, stock->cached_pgdat,
+ NR_SLAB_RECLAIMABLE_B,
+ stock->nr_slab_reclaimable_b);
+ stock->nr_slab_reclaimable_b = 0;
+ }
+ if (stock->nr_slab_unreclaimable_b) {
+ mod_objcg_mlstate(old, stock->cached_pgdat,
+ NR_SLAB_UNRECLAIMABLE_B,
+ stock->nr_slab_unreclaimable_b);
+ stock->nr_slab_unreclaimable_b = 0;
+ }
+ stock->cached_pgdat = NULL;
+ }
+
obj_cgroup_put(old);
stock->cached_objcg = NULL;
}
@@ -3051,8 +3210,13 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
{
struct mem_cgroup *memcg;
- if (stock->cached_objcg) {
- memcg = obj_cgroup_memcg(stock->cached_objcg);
+ if (in_task() && stock->task_obj.cached_objcg) {
+ memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg);
+ if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
+ return true;
+ }
+ if (stock->irq_obj.cached_objcg) {
+ memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg);
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
return true;
}
@@ -3060,26 +3224,32 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
return false;
}
-static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
+ bool allow_uncharge)
{
- struct memcg_stock_pcp *stock;
unsigned long flags;
+ struct obj_stock *stock = get_obj_stock(&flags);
+ unsigned int nr_pages = 0;
- local_irq_save(flags);
-
- stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
drain_obj_stock(stock);
obj_cgroup_get(objcg);
stock->cached_objcg = objcg;
- stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
+ stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
+ ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+ allow_uncharge = true; /* Allow uncharge when objcg changes */
}
stock->nr_bytes += nr_bytes;
- if (stock->nr_bytes > PAGE_SIZE)
- drain_obj_stock(stock);
+ if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
+ nr_pages = stock->nr_bytes >> PAGE_SHIFT;
+ stock->nr_bytes &= (PAGE_SIZE - 1);
+ }
- local_irq_restore(flags);
+ put_obj_stock(flags);
+
+ if (nr_pages)
+ obj_cgroup_uncharge_pages(objcg, nr_pages);
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
@@ -3091,14 +3261,27 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
return 0;
/*
- * In theory, memcg->nr_charged_bytes can have enough
+ * In theory, objcg->nr_charged_bytes can have enough
* pre-charged bytes to satisfy the allocation. However,
- * flushing memcg->nr_charged_bytes requires two atomic
- * operations, and memcg->nr_charged_bytes can't be big,
- * so it's better to ignore it and try grab some new pages.
- * memcg->nr_charged_bytes will be flushed in
- * refill_obj_stock(), called from this function or
- * independently later.
+ * flushing objcg->nr_charged_bytes requires two atomic
+ * operations, and objcg->nr_charged_bytes can't be big.
+ * The shared objcg->nr_charged_bytes can also become a
+ * performance bottleneck if all tasks of the same memcg are
+ * trying to update it. So it's better to ignore it and try
+ * grab some new pages. The stock's nr_bytes will be flushed to
+ * objcg->nr_charged_bytes later on when objcg changes.
+ *
+ * The stock's nr_bytes may contain enough pre-charged bytes
+ * to allow one less page from being charged, but we can't rely
+ * on the pre-charged bytes not being changed outside of
+ * consume_obj_stock() or refill_obj_stock(). So ignore those
+ * pre-charged bytes as well when charging pages. To avoid a
+ * page uncharge right after a page charge, we set the
+ * allow_uncharge flag to false when calling refill_obj_stock()
+ * to temporarily allow the pre-charged bytes to exceed the page
+ * size limit. The maximum reachable value of the pre-charged
+ * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
+ * race.
*/
nr_pages = size >> PAGE_SHIFT;
nr_bytes = size & (PAGE_SIZE - 1);
@@ -3108,14 +3291,14 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
if (!ret && nr_bytes)
- refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
+ refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
return ret;
}
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
{
- refill_obj_stock(objcg, size);
+ refill_obj_stock(objcg, size, true);
}
#endif /* CONFIG_MEMCG_KMEM */
@@ -6541,7 +6724,8 @@ out:
* @gfp_mask: reclaim mode
*
* Try to charge @page to the memcg that @mm belongs to, reclaiming
- * pages according to @gfp_mask if necessary.
+ * pages according to @gfp_mask if necessary. if @mm is NULL, try to
+ * charge to the active memcg.
*
* Do not use this for pages allocated for swapin.
*
@@ -6671,6 +6855,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
unsigned long nr_pages;
struct mem_cgroup *memcg;
struct obj_cgroup *objcg;
+ bool use_objcg = PageMemcgKmem(page);
VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -6679,7 +6864,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
* page memcg or objcg at this point, we have fully
* exclusive access to the page.
*/
- if (PageMemcgKmem(page)) {
+ if (use_objcg) {
objcg = __page_objcg(page);
/*
* This get matches the put at the end of the function and
@@ -6707,7 +6892,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
nr_pages = compound_nr(page);
- if (PageMemcgKmem(page)) {
+ if (use_objcg) {
ug->nr_memory += nr_pages;
ug->nr_kmem += nr_pages;
@@ -6806,9 +6991,11 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
/* Force-charge the new page. The old one will be freed soon */
nr_pages = thp_nr_pages(newpage);
- page_counter_charge(&memcg->memory, nr_pages);
- if (do_memsw_account())
- page_counter_charge(&memcg->memsw, nr_pages);
+ if (!mem_cgroup_is_root(memcg)) {
+ page_counter_charge(&memcg->memory, nr_pages);
+ if (do_memsw_account())
+ page_counter_charge(&memcg->memsw, nr_pages);
+ }
css_get(&memcg->css);
commit_charge(newpage, memcg);
diff --git a/mm/memfd.c b/mm/memfd.c
index 2647c898990c..081dd33e6a61 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -297,9 +297,9 @@ SYSCALL_DEFINE2(memfd_create,
}
if (flags & MFD_HUGETLB) {
- struct user_struct *user = NULL;
+ struct ucounts *ucounts = NULL;
- file = hugetlb_file_setup(name, 0, VM_NORESERVE, &user,
+ file = hugetlb_file_setup(name, 0, VM_NORESERVE, &ucounts,
HUGETLB_ANONHUGE_INODE,
(flags >> MFD_HUGE_SHIFT) &
MFD_HUGE_MASK);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 85ad98c00fd9..e5a1531f7f4e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -56,6 +56,7 @@
#include <linux/kfifo.h>
#include <linux/ratelimit.h>
#include <linux/page-isolation.h>
+#include <linux/pagewalk.h>
#include "internal.h"
#include "ras/ras_event.h"
@@ -554,6 +555,148 @@ static void collect_procs(struct page *page, struct list_head *tokill,
collect_procs_file(page, tokill, force_early);
}
+struct hwp_walk {
+ struct to_kill tk;
+ unsigned long pfn;
+ int flags;
+};
+
+static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
+{
+ tk->addr = addr;
+ tk->size_shift = shift;
+}
+
+static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
+ unsigned long poisoned_pfn, struct to_kill *tk)
+{
+ unsigned long pfn = 0;
+
+ if (pte_present(pte)) {
+ pfn = pte_pfn(pte);
+ } else {
+ swp_entry_t swp = pte_to_swp_entry(pte);
+
+ if (is_hwpoison_entry(swp))
+ pfn = hwpoison_entry_to_pfn(swp);
+ }
+
+ if (!pfn || pfn != poisoned_pfn)
+ return 0;
+
+ set_to_kill(tk, addr, shift);
+ return 1;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
+ struct hwp_walk *hwp)
+{
+ pmd_t pmd = *pmdp;
+ unsigned long pfn;
+ unsigned long hwpoison_vaddr;
+
+ if (!pmd_present(pmd))
+ return 0;
+ pfn = pmd_pfn(pmd);
+ if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
+ hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
+ set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
+ return 1;
+ }
+ return 0;
+}
+#else
+static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
+ struct hwp_walk *hwp)
+{
+ return 0;
+}
+#endif
+
+static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct hwp_walk *hwp = (struct hwp_walk *)walk->private;
+ int ret = 0;
+ pte_t *ptep;
+ spinlock_t *ptl;
+
+ ptl = pmd_trans_huge_lock(pmdp, walk->vma);
+ if (ptl) {
+ ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
+ spin_unlock(ptl);
+ goto out;
+ }
+
+ if (pmd_trans_unstable(pmdp))
+ goto out;
+
+ ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, addr, &ptl);
+ for (; addr != end; ptep++, addr += PAGE_SIZE) {
+ ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
+ hwp->pfn, &hwp->tk);
+ if (ret == 1)
+ break;
+ }
+ pte_unmap_unlock(ptep - 1, ptl);
+out:
+ cond_resched();
+ return ret;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct hwp_walk *hwp = (struct hwp_walk *)walk->private;
+ pte_t pte = huge_ptep_get(ptep);
+ struct hstate *h = hstate_vma(walk->vma);
+
+ return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
+ hwp->pfn, &hwp->tk);
+}
+#else
+#define hwpoison_hugetlb_range NULL
+#endif
+
+static struct mm_walk_ops hwp_walk_ops = {
+ .pmd_entry = hwpoison_pte_range,
+ .hugetlb_entry = hwpoison_hugetlb_range,
+};
+
+/*
+ * Sends SIGBUS to the current process with error info.
+ *
+ * This function is intended to handle "Action Required" MCEs on already
+ * hardware poisoned pages. They could happen, for example, when
+ * memory_failure() failed to unmap the error page at the first call, or
+ * when multiple local machine checks happened on different CPUs.
+ *
+ * MCE handler currently has no easy access to the error virtual address,
+ * so this function walks page table to find it. The returned virtual address
+ * is proper in most cases, but it could be wrong when the application
+ * process has multiple entries mapping the error page.
+ */
+static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
+ int flags)
+{
+ int ret;
+ struct hwp_walk priv = {
+ .pfn = pfn,
+ };
+ priv.tk.tsk = p;
+
+ mmap_read_lock(p->mm);
+ ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
+ (void *)&priv);
+ if (ret == 1 && priv.tk.addr)
+ kill_proc(&priv.tk, pfn, flags);
+ mmap_read_unlock(p->mm);
+ return ret ? -EFAULT : -EHWPOISON;
+}
+
static const char *action_name[] = {
[MF_IGNORED] = "Ignored",
[MF_FAILED] = "Failed",
@@ -658,6 +801,7 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
*/
static int me_kernel(struct page *p, unsigned long pfn)
{
+ unlock_page(p);
return MF_IGNORED;
}
@@ -667,6 +811,7 @@ static int me_kernel(struct page *p, unsigned long pfn)
static int me_unknown(struct page *p, unsigned long pfn)
{
pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
+ unlock_page(p);
return MF_FAILED;
}
@@ -675,6 +820,7 @@ static int me_unknown(struct page *p, unsigned long pfn)
*/
static int me_pagecache_clean(struct page *p, unsigned long pfn)
{
+ int ret;
struct address_space *mapping;
delete_from_lru_cache(p);
@@ -683,8 +829,10 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
* For anonymous pages we're done the only reference left
* should be the one m_f() holds.
*/
- if (PageAnon(p))
- return MF_RECOVERED;
+ if (PageAnon(p)) {
+ ret = MF_RECOVERED;
+ goto out;
+ }
/*
* Now truncate the page in the page cache. This is really
@@ -698,7 +846,8 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
/*
* Page has been teared down in the meanwhile
*/
- return MF_FAILED;
+ ret = MF_FAILED;
+ goto out;
}
/*
@@ -706,7 +855,10 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
*
* Open: to take i_mutex or not for this? Right now we don't.
*/
- return truncate_error_page(p, pfn, mapping);
+ ret = truncate_error_page(p, pfn, mapping);
+out:
+ unlock_page(p);
+ return ret;
}
/*
@@ -782,24 +934,26 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn)
*/
static int me_swapcache_dirty(struct page *p, unsigned long pfn)
{
+ int ret;
+
ClearPageDirty(p);
/* Trigger EIO in shmem: */
ClearPageUptodate(p);
- if (!delete_from_lru_cache(p))
- return MF_DELAYED;
- else
- return MF_FAILED;
+ ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
+ unlock_page(p);
+ return ret;
}
static int me_swapcache_clean(struct page *p, unsigned long pfn)
{
+ int ret;
+
delete_from_swap_cache(p);
- if (!delete_from_lru_cache(p))
- return MF_RECOVERED;
- else
- return MF_FAILED;
+ ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
+ unlock_page(p);
+ return ret;
}
/*
@@ -820,6 +974,7 @@ static int me_huge_page(struct page *p, unsigned long pfn)
mapping = page_mapping(hpage);
if (mapping) {
res = truncate_error_page(hpage, pfn, mapping);
+ unlock_page(hpage);
} else {
res = MF_FAILED;
unlock_page(hpage);
@@ -834,7 +989,6 @@ static int me_huge_page(struct page *p, unsigned long pfn)
page_ref_inc(p);
res = MF_RECOVERED;
}
- lock_page(hpage);
}
return res;
@@ -866,6 +1020,8 @@ static struct page_state {
unsigned long mask;
unsigned long res;
enum mf_action_page_type type;
+
+ /* Callback ->action() has to unlock the relevant page inside it. */
int (*action)(struct page *p, unsigned long pfn);
} error_states[] = {
{ reserved, reserved, MF_MSG_KERNEL, me_kernel },
@@ -929,6 +1085,7 @@ static int page_action(struct page_state *ps, struct page *p,
int result;
int count;
+ /* page p should be unlocked after returning from ps->action(). */
result = ps->action(p, pfn);
count = page_count(p) - 1;
@@ -949,18 +1106,36 @@ static int page_action(struct page_state *ps, struct page *p,
return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
}
-/**
- * __get_hwpoison_page() - Get refcount for memory error handling:
- * @page: raw error page (hit by memory error)
- *
- * Return: return 0 if failed to grab the refcount, otherwise true (some
- * non-zero value.)
+/*
+ * Return true if a page type of a given page is supported by hwpoison
+ * mechanism (while handling could fail), otherwise false. This function
+ * does not return true for hugetlb or device memory pages, so it's assumed
+ * to be called only in the context where we never have such pages.
*/
+static inline bool HWPoisonHandlable(struct page *page)
+{
+ return PageLRU(page) || __PageMovable(page);
+}
+
static int __get_hwpoison_page(struct page *page)
{
struct page *head = compound_head(page);
+ int ret = 0;
+ bool hugetlb = false;
+
+ ret = get_hwpoison_huge_page(head, &hugetlb);
+ if (hugetlb)
+ return ret;
- if (!PageHuge(head) && PageTransHuge(head)) {
+ /*
+ * This check prevents from calling get_hwpoison_unless_zero()
+ * for any unsupported type of page in order to reduce the risk of
+ * unexpected races caused by taking a page refcount.
+ */
+ if (!HWPoisonHandlable(head))
+ return 0;
+
+ if (PageTransHuge(head)) {
/*
* Non anonymous thp exists only in allocation/free time. We
* can't handle such a case correctly, so let's give it up.
@@ -986,15 +1161,6 @@ static int __get_hwpoison_page(struct page *page)
return 0;
}
-/*
- * Safely get reference count of an arbitrary page.
- *
- * Returns 0 for a free page, 1 for an in-use page,
- * -EIO for a page-type we cannot handle and -EBUSY if we raced with an
- * allocation.
- * We only incremented refcount in case the page was already in-use and it
- * is a known type we can handle.
- */
static int get_any_page(struct page *p, unsigned long flags)
{
int ret = 0, pass = 0;
@@ -1004,50 +1170,77 @@ static int get_any_page(struct page *p, unsigned long flags)
count_increased = true;
try_again:
- if (!count_increased && !__get_hwpoison_page(p)) {
- if (page_count(p)) {
- /* We raced with an allocation, retry. */
- if (pass++ < 3)
- goto try_again;
- ret = -EBUSY;
- } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
- /* We raced with put_page, retry. */
+ if (!count_increased) {
+ ret = __get_hwpoison_page(p);
+ if (!ret) {
+ if (page_count(p)) {
+ /* We raced with an allocation, retry. */
+ if (pass++ < 3)
+ goto try_again;
+ ret = -EBUSY;
+ } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
+ /* We raced with put_page, retry. */
+ if (pass++ < 3)
+ goto try_again;
+ ret = -EIO;
+ }
+ goto out;
+ } else if (ret == -EBUSY) {
+ /* We raced with freeing huge page to buddy, retry. */
if (pass++ < 3)
goto try_again;
- ret = -EIO;
+ goto out;
}
+ }
+
+ if (PageHuge(p) || HWPoisonHandlable(p)) {
+ ret = 1;
} else {
- if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
- ret = 1;
- } else {
- /*
- * A page we cannot handle. Check whether we can turn
- * it into something we can handle.
- */
- if (pass++ < 3) {
- put_page(p);
- shake_page(p, 1);
- count_increased = false;
- goto try_again;
- }
+ /*
+ * A page we cannot handle. Check whether we can turn
+ * it into something we can handle.
+ */
+ if (pass++ < 3) {
put_page(p);
- ret = -EIO;
+ shake_page(p, 1);
+ count_increased = false;
+ goto try_again;
}
+ put_page(p);
+ ret = -EIO;
}
-
+out:
return ret;
}
-static int get_hwpoison_page(struct page *p, unsigned long flags,
- enum mf_flags ctxt)
+/**
+ * get_hwpoison_page() - Get refcount for memory error handling
+ * @p: Raw error page (hit by memory error)
+ * @flags: Flags controlling behavior of error handling
+ *
+ * get_hwpoison_page() takes a page refcount of an error page to handle memory
+ * error on it, after checking that the error page is in a well-defined state
+ * (defined as a page-type we can successfully handle the memor error on it,
+ * such as LRU page and hugetlb page).
+ *
+ * Memory error handling could be triggered at any time on any type of page,
+ * so it's prone to race with typical memory management lifecycle (like
+ * allocation and free). So to avoid such races, get_hwpoison_page() takes
+ * extra care for the error page's state (as done in __get_hwpoison_page()),
+ * and has some retry logic in get_any_page().
+ *
+ * Return: 0 on failure,
+ * 1 on success for in-use pages in a well-defined state,
+ * -EIO for pages on which we can not handle memory errors,
+ * -EBUSY when get_hwpoison_page() has raced with page lifecycle
+ * operations like allocation and free.
+ */
+static int get_hwpoison_page(struct page *p, unsigned long flags)
{
int ret;
zone_pcp_disable(page_zone(p));
- if (ctxt == MF_SOFT_OFFLINE)
- ret = get_any_page(p, flags);
- else
- ret = __get_hwpoison_page(p);
+ ret = get_any_page(p, flags);
zone_pcp_enable(page_zone(p));
return ret;
@@ -1228,32 +1421,41 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
if (TestSetPageHWPoison(head)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
- return 0;
+ res = -EHWPOISON;
+ if (flags & MF_ACTION_REQUIRED)
+ res = kill_accessing_process(current, page_to_pfn(head), flags);
+ return res;
}
num_poisoned_pages_inc();
- if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p, flags, 0)) {
- /*
- * Check "filter hit" and "race with other subpage."
- */
- lock_page(head);
- if (PageHWPoison(head)) {
- if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
- || (p != head && TestSetPageHWPoison(head))) {
- num_poisoned_pages_dec();
- unlock_page(head);
- return 0;
+ if (!(flags & MF_COUNT_INCREASED)) {
+ res = get_hwpoison_page(p, flags);
+ if (!res) {
+ /*
+ * Check "filter hit" and "race with other subpage."
+ */
+ lock_page(head);
+ if (PageHWPoison(head)) {
+ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != head && TestSetPageHWPoison(head))) {
+ num_poisoned_pages_dec();
+ unlock_page(head);
+ return 0;
+ }
}
+ unlock_page(head);
+ res = MF_FAILED;
+ if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) {
+ page_ref_inc(p);
+ res = MF_RECOVERED;
+ }
+ action_result(pfn, MF_MSG_FREE_HUGE, res);
+ return res == MF_RECOVERED ? 0 : -EBUSY;
+ } else if (res < 0) {
+ action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
+ return -EBUSY;
}
- unlock_page(head);
- res = MF_FAILED;
- if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) {
- page_ref_inc(p);
- res = MF_RECOVERED;
- }
- action_result(pfn, MF_MSG_FREE_HUGE, res);
- return res == MF_RECOVERED ? 0 : -EBUSY;
}
lock_page(head);
@@ -1288,7 +1490,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
goto out;
}
- res = identify_page_state(pfn, p, page_flags);
+ return identify_page_state(pfn, p, page_flags);
out:
unlock_page(head);
return res;
@@ -1404,9 +1606,10 @@ int memory_failure(unsigned long pfn, int flags)
struct page *hpage;
struct page *orig_head;
struct dev_pagemap *pgmap;
- int res;
+ int res = 0;
unsigned long page_flags;
bool retry = true;
+ static DEFINE_MUTEX(mf_mutex);
if (!sysctl_memory_failure_recovery)
panic("Memory failure on page %lx", pfn);
@@ -1424,13 +1627,21 @@ int memory_failure(unsigned long pfn, int flags)
return -ENXIO;
}
+ mutex_lock(&mf_mutex);
+
try_again:
- if (PageHuge(p))
- return memory_failure_hugetlb(pfn, flags);
+ if (PageHuge(p)) {
+ res = memory_failure_hugetlb(pfn, flags);
+ goto unlock_mutex;
+ }
+
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
- return 0;
+ res = -EHWPOISON;
+ if (flags & MF_ACTION_REQUIRED)
+ res = kill_accessing_process(current, pfn, flags);
+ goto unlock_mutex;
}
orig_head = hpage = compound_head(p);
@@ -1447,33 +1658,42 @@ try_again:
* In fact it's dangerous to directly bump up page count from 0,
* that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
*/
- if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p, flags, 0)) {
- if (is_free_buddy_page(p)) {
- if (take_page_off_buddy(p)) {
- page_ref_inc(p);
- res = MF_RECOVERED;
- } else {
- /* We lost the race, try again */
- if (retry) {
- ClearPageHWPoison(p);
- num_poisoned_pages_dec();
- retry = false;
- goto try_again;
+ if (!(flags & MF_COUNT_INCREASED)) {
+ res = get_hwpoison_page(p, flags);
+ if (!res) {
+ if (is_free_buddy_page(p)) {
+ if (take_page_off_buddy(p)) {
+ page_ref_inc(p);
+ res = MF_RECOVERED;
+ } else {
+ /* We lost the race, try again */
+ if (retry) {
+ ClearPageHWPoison(p);
+ num_poisoned_pages_dec();
+ retry = false;
+ goto try_again;
+ }
+ res = MF_FAILED;
}
- res = MF_FAILED;
+ action_result(pfn, MF_MSG_BUDDY, res);
+ res = res == MF_RECOVERED ? 0 : -EBUSY;
+ } else {
+ action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
+ res = -EBUSY;
}
- action_result(pfn, MF_MSG_BUDDY, res);
- return res == MF_RECOVERED ? 0 : -EBUSY;
- } else {
- action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
- return -EBUSY;
+ goto unlock_mutex;
+ } else if (res < 0) {
+ action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
+ res = -EBUSY;
+ goto unlock_mutex;
}
}
if (PageTransHuge(hpage)) {
if (try_to_split_thp_page(p, "Memory Failure") < 0) {
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
- return -EBUSY;
+ res = -EBUSY;
+ goto unlock_mutex;
}
VM_BUG_ON_PAGE(!page_count(p), p);
}
@@ -1497,7 +1717,7 @@ try_again:
if (PageCompound(p) && compound_head(p) != orig_head) {
action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
res = -EBUSY;
- goto out;
+ goto unlock_page;
}
/*
@@ -1517,17 +1737,22 @@ try_again:
num_poisoned_pages_dec();
unlock_page(p);
put_page(p);
- return 0;
+ goto unlock_mutex;
}
if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p))
num_poisoned_pages_dec();
unlock_page(p);
put_page(p);
- return 0;
+ goto unlock_mutex;
}
- if (!PageTransTail(p) && !PageLRU(p))
+ /*
+ * __munlock_pagevec may clear a writeback page's LRU flag without
+ * page_lock. We need wait writeback completion for this page or it
+ * may trigger vfs BUG while evict inode.
+ */
+ if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
goto identify_page_state;
/*
@@ -1543,7 +1768,7 @@ try_again:
if (!hwpoison_user_mappings(p, pfn, flags, &p)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
- goto out;
+ goto unlock_page;
}
/*
@@ -1552,13 +1777,17 @@ try_again:
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
res = -EBUSY;
- goto out;
+ goto unlock_page;
}
identify_page_state:
res = identify_page_state(pfn, p, page_flags);
-out:
+ mutex_unlock(&mf_mutex);
+ return res;
+unlock_page:
unlock_page(p);
+unlock_mutex:
+ mutex_unlock(&mf_mutex);
return res;
}
EXPORT_SYMBOL_GPL(memory_failure);
@@ -1735,7 +1964,7 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
- if (!get_hwpoison_page(p, flags, 0)) {
+ if (!get_hwpoison_page(p, flags)) {
if (TestClearPageHWPoison(p))
num_poisoned_pages_dec();
unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
@@ -1951,7 +2180,7 @@ int soft_offline_page(unsigned long pfn, int flags)
retry:
get_online_mems();
- ret = get_hwpoison_page(page, flags, MF_SOFT_OFFLINE);
+ ret = get_hwpoison_page(page, flags);
put_online_mems();
if (ret > 0) {
diff --git a/mm/memory.c b/mm/memory.c
index 730daa00952b..48c4576df898 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -90,8 +90,7 @@
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-/* use the per-pgdat data instead for discontigmem - mbligh */
+#ifndef CONFIG_NUMA
unsigned long max_mapnr;
EXPORT_SYMBOL(max_mapnr);
@@ -1361,7 +1360,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
+ } else if (details && details->single_page &&
+ PageTransCompound(details->single_page) &&
+ next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
+ spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
+ /*
+ * Take and drop THP pmd lock so that we cannot return
+ * prematurely, while zap_huge_pmd() has cleared *pmd,
+ * but not yet decremented compound_mapcount().
+ */
+ spin_unlock(ptl);
}
+
/*
* Here there can be other concurrent MADV_DONTNEED or
* trans huge page faults running, and if the pmd is
@@ -2939,6 +2949,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
}
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
+ entry = pte_sw_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/*
@@ -3011,6 +3022,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
munlock_vma_page(old_page);
unlock_page(old_page);
}
+ if (page_copied)
+ free_swap_cache(old_page);
put_page(old_page);
}
return page_copied ? VM_FAULT_WRITE : 0;
@@ -3035,7 +3048,7 @@ oom:
* The function expects the page to be locked or other protection against
* concurrent faults / writeback (such as DAX radix tree locks).
*
- * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
+ * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
* we acquired PTE lock.
*/
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
@@ -3236,6 +3249,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
}
/**
+ * unmap_mapping_page() - Unmap single page from processes.
+ * @page: The locked page to be unmapped.
+ *
+ * Unmap this page from any userspace process which still has it mmaped.
+ * Typically, for efficiency, the range of nearby pages has already been
+ * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
+ * truncation or invalidation holds the lock on a page, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_page()
+ * to unmap it finally.
+ */
+void unmap_mapping_page(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct zap_details details = { };
+
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(PageTail(page));
+
+ details.check_mapping = mapping;
+ details.first_index = page->index;
+ details.last_index = page->index + thp_nr_pages(page) - 1;
+ details.single_page = page;
+
+ i_mmap_lock_write(mapping);
+ if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+ unmap_mapping_range_tree(&mapping->i_mmap, &details);
+ i_mmap_unlock_write(mapping);
+}
+
+/**
* unmap_mapping_pages() - Unmap pages from processes.
* @mapping: The address space containing pages to be unmapped.
* @start: Index of first page to be unmapped.
@@ -3311,6 +3354,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL, *swapcache;
+ struct swap_info_struct *si = NULL;
swp_entry_t entry;
pte_t pte;
int locked;
@@ -3338,14 +3382,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out;
}
+ /* Prevent swapoff from happening to us. */
+ si = get_swap_device(entry);
+ if (unlikely(!si))
+ goto out;
delayacct_set_flag(current, DELAYACCT_PF_SWAPIN);
page = lookup_swap_cache(entry, vma, vmf->address);
swapcache = page;
if (!page) {
- struct swap_info_struct *si = swp_swap_info(entry);
-
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
/* skip swapcache */
@@ -3514,6 +3560,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
+ if (si)
+ put_swap_device(si);
return ret;
out_nomap:
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -3525,6 +3573,8 @@ out_release:
unlock_page(swapcache);
put_page(swapcache);
}
+ if (si)
+ put_swap_device(si);
return ret;
}
@@ -3602,6 +3652,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
__SetPageUptodate(page);
entry = mk_pte(page, vma->vm_page_prot);
+ entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
@@ -3786,6 +3837,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
if (prefault && arch_wants_old_prefaulted_pte())
entry = pte_mkold(entry);
+ else
+ entry = pte_sw_mkyoung(entry);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -4940,8 +4993,8 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
*/
- vma = find_vma(mm, addr);
- if (!vma || vma->vm_start > addr)
+ vma = vma_lookup(mm, addr);
+ if (!vma)
break;
if (vma->vm_ops && vma->vm_ops->access)
ret = vma->vm_ops->access(vma, addr, buf,
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 70620d0dd923..974a565797d8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -961,7 +961,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
node_states_set_node(nid, &arg);
if (need_zonelists_rebuild)
build_all_zonelists(NULL);
- zone_pcp_update(zone);
/* Basic onlining is complete, allow allocation of onlined pages. */
undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
@@ -974,6 +973,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
*/
shuffle_zone(zone);
+ /* reinitialise watermarks and update pcp limits */
init_per_zone_wmark_min();
kswapd_run(nid);
@@ -1829,13 +1829,13 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
adjust_present_page_count(zone, -nr_pages);
+ /* reinitialise watermarks and update pcp limits */
init_per_zone_wmark_min();
if (!populated_zone(zone)) {
zone_pcp_reset(zone);
build_all_zonelists(NULL);
- } else
- zone_pcp_update(zone);
+ }
node_states_clear_node(node, &arg);
if (arg.status_change_nid >= 0) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d79fa299b70c..b5d95bf1025d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -975,7 +975,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
* want to return MPOL_DEFAULT in this case.
*/
mmap_read_lock(mm);
- vma = find_vma_intersection(mm, addr, addr+1);
+ vma = vma_lookup(mm, addr);
if (!vma) {
mmap_read_unlock(mm);
return -EFAULT;
@@ -2150,7 +2150,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
return page;
if (page && page_to_nid(page) == nid) {
preempt_disable();
- __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
+ __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
preempt_enable();
}
return page;
diff --git a/mm/mempool.c b/mm/mempool.c
index a258cf4de575..0b8afbec3e35 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_slab_free_mempool(element);
else if (pool->alloc == mempool_alloc_pages)
- kasan_free_pages(element, (unsigned long)pool->pool_data, false);
+ kasan_poison_pages(element, (unsigned long)pool->pool_data,
+ false);
}
static void kasan_unpoison_element(mempool_t *pool, void *element)
@@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_range(element, __ksize(element));
else if (pool->alloc == mempool_alloc_pages)
- kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
+ kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
+ false);
}
static __always_inline void add_element(mempool_t *pool, void *element)
diff --git a/mm/migrate.c b/mm/migrate.c
index b234c3f3acb7..380ca57b9031 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -295,6 +295,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
goto out;
page = migration_entry_to_page(entry);
+ page = compound_head(page);
/*
* Once page cache replacement of page migration started, page_count
@@ -1833,8 +1834,8 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
struct page *page;
int err = -EFAULT;
- vma = find_vma(mm, addr);
- if (!vma || addr < vma->vm_start)
+ vma = vma_lookup(mm, addr);
+ if (!vma)
goto set_status;
/* FOLL_DUMP to ignore special (like zero) pages */
diff --git a/mm/mlock.c b/mm/mlock.c
index df590fda5688..e338ebc4ad29 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -817,9 +817,10 @@ SYSCALL_DEFINE0(munlockall)
*/
static DEFINE_SPINLOCK(shmlock_user_lock);
-int user_shm_lock(size_t size, struct user_struct *user)
+int user_shm_lock(size_t size, struct ucounts *ucounts)
{
unsigned long lock_limit, locked;
+ long memlock;
int allowed = 0;
locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -828,21 +829,26 @@ int user_shm_lock(size_t size, struct user_struct *user)
allowed = 1;
lock_limit >>= PAGE_SHIFT;
spin_lock(&shmlock_user_lock);
- if (!allowed &&
- locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
+ memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
+
+ if (!allowed && (memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
+ goto out;
+ }
+ if (!get_ucounts(ucounts)) {
+ dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
goto out;
- get_uid(user);
- user->locked_shm += locked;
+ }
allowed = 1;
out:
spin_unlock(&shmlock_user_lock);
return allowed;
}
-void user_shm_unlock(size_t size, struct user_struct *user)
+void user_shm_unlock(size_t size, struct ucounts *ucounts)
{
spin_lock(&shmlock_user_lock);
- user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
spin_unlock(&shmlock_user_lock);
- free_uid(user);
+ put_ucounts(ucounts);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 0584e540246e..aa9de981b659 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1457,9 +1457,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
return addr;
if (flags & MAP_FIXED_NOREPLACE) {
- struct vm_area_struct *vma = find_vma(mm, addr);
-
- if (vma && vma->vm_start < addr + len)
+ if (find_vma_intersection(mm, addr, addr + len))
return -EEXIST;
}
@@ -1611,7 +1609,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
goto out_fput;
}
} else if (flags & MAP_HUGETLB) {
- struct user_struct *user = NULL;
+ struct ucounts *ucounts = NULL;
struct hstate *hs;
hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
@@ -1627,13 +1625,13 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
*/
file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
VM_NORESERVE,
- &user, HUGETLB_ANONHUGE_INODE,
+ &ucounts, HUGETLB_ANONHUGE_INODE,
(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
if (IS_ERR(file))
return PTR_ERR(file);
}
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ flags &= ~MAP_DENYWRITE;
retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
out_fput:
@@ -2802,6 +2800,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return __split_vma(mm, vma, addr, new_below);
}
+static inline void
+unlock_range(struct vm_area_struct *start, unsigned long limit)
+{
+ struct mm_struct *mm = start->vm_mm;
+ struct vm_area_struct *tmp = start;
+
+ while (tmp && tmp->vm_start < limit) {
+ if (tmp->vm_flags & VM_LOCKED) {
+ mm->locked_vm -= vma_pages(tmp);
+ munlock_vma_pages_all(tmp);
+ }
+
+ tmp = tmp->vm_next;
+ }
+}
+
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
@@ -2828,16 +2842,11 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
*/
arch_unmap(mm, start, end);
- /* Find the first overlapping VMA */
- vma = find_vma(mm, start);
+ /* Find the first overlapping VMA where start < vma->vm_end */
+ vma = find_vma_intersection(mm, start, end);
if (!vma)
return 0;
prev = vma->vm_prev;
- /* we have start < vma->vm_end */
-
- /* if it doesn't overlap, we have nothing.. */
- if (vma->vm_start >= end)
- return 0;
/*
* If we need to split any vma, do it now to save pain later.
@@ -2890,17 +2899,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
/*
* unlock any mlock()ed ranges before detaching vmas
*/
- if (mm->locked_vm) {
- struct vm_area_struct *tmp = vma;
- while (tmp && tmp->vm_start < end) {
- if (tmp->vm_flags & VM_LOCKED) {
- mm->locked_vm -= vma_pages(tmp);
- munlock_vma_pages_all(tmp);
- }
-
- tmp = tmp->vm_next;
- }
- }
+ if (mm->locked_vm)
+ unlock_range(vma, end);
/* Detach vmas from rbtree */
if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
@@ -3185,14 +3185,8 @@ void exit_mmap(struct mm_struct *mm)
mmap_write_unlock(mm);
}
- if (mm->locked_vm) {
- vma = mm->mmap;
- while (vma) {
- if (vma->vm_flags & VM_LOCKED)
- munlock_vma_pages_all(vma);
- vma = vma->vm_next;
- }
- }
+ if (mm->locked_vm)
+ unlock_range(mm->mmap, ULONG_MAX);
arch_exit_mmap(mm);
diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index dcdde4f722a4..2ae3f33b85b1 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -11,6 +11,7 @@
#include <linux/rcupdate.h>
#include <linux/smp.h>
#include <linux/trace_events.h>
+#include <linux/local_lock.h>
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
@@ -39,21 +40,30 @@ static int reg_refcount; /* Protected by reg_lock. */
*/
#define CONTEXT_COUNT 4
-static DEFINE_PER_CPU(char __rcu *, memcg_path_buf);
+struct memcg_path {
+ local_lock_t lock;
+ char __rcu *buf;
+ local_t buf_idx;
+};
+static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+ .buf_idx = LOCAL_INIT(0),
+};
+
static char **tmp_bufs;
-static DEFINE_PER_CPU(int, memcg_path_buf_idx);
/* Called with reg_lock held. */
static void free_memcg_path_bufs(void)
{
+ struct memcg_path *memcg_path;
int cpu;
char **old = tmp_bufs;
for_each_possible_cpu(cpu) {
- *(old++) = rcu_dereference_protected(
- per_cpu(memcg_path_buf, cpu),
+ memcg_path = per_cpu_ptr(&memcg_paths, cpu);
+ *(old++) = rcu_dereference_protected(memcg_path->buf,
lockdep_is_held(&reg_lock));
- rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), NULL);
+ rcu_assign_pointer(memcg_path->buf, NULL);
}
/* Wait for inflight memcg_path_buf users to finish. */
@@ -88,7 +98,7 @@ int trace_mmap_lock_reg(void)
new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
if (new == NULL)
goto out_fail_free;
- rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), new);
+ rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
/* Don't need to wait for inflights, they'd have gotten NULL. */
}
@@ -122,23 +132,24 @@ out:
static inline char *get_memcg_path_buf(void)
{
+ struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
char *buf;
int idx;
rcu_read_lock();
- buf = rcu_dereference(*this_cpu_ptr(&memcg_path_buf));
+ buf = rcu_dereference(memcg_path->buf);
if (buf == NULL) {
rcu_read_unlock();
return NULL;
}
- idx = this_cpu_add_return(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE) -
+ idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
MEMCG_PATH_BUF_SIZE;
return &buf[idx];
}
static inline void put_memcg_path_buf(void)
{
- this_cpu_sub(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE);
+ local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
rcu_read_unlock();
}
@@ -179,14 +190,14 @@ out:
#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
do { \
const char *memcg_path; \
- preempt_disable(); \
+ local_lock(&memcg_paths.lock); \
memcg_path = get_mm_memcg_path(mm); \
trace_mmap_lock_##type(mm, \
memcg_path != NULL ? memcg_path : "", \
##__VA_ARGS__); \
if (likely(memcg_path != NULL)) \
put_memcg_path_buf(); \
- preempt_enable(); \
+ local_unlock(&memcg_paths.lock); \
} while (0)
#else /* !CONFIG_MEMCG */
diff --git a/mm/mremap.c b/mm/mremap.c
index 47c255b60150..a369a6100698 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -634,10 +634,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
unsigned long *p)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma = find_vma(mm, addr);
+ struct vm_area_struct *vma;
unsigned long pgoff;
- if (!vma || vma->vm_start > addr)
+ vma = vma_lookup(mm, addr);
+ if (!vma)
return ERR_PTR(-EFAULT);
/*
diff --git a/mm/nommu.c b/mm/nommu.c
index 85a3a68dffb6..affda71641ca 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1296,7 +1296,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
goto out;
}
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ flags &= ~MAP_DENYWRITE;
retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0062d5c57d41..9f63548f247c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -32,7 +32,6 @@
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
-#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
#include <linux/pagevec.h>
#include <linux/timer.h>
#include <linux/sched/rt.h>
@@ -109,11 +108,6 @@ EXPORT_SYMBOL_GPL(dirty_writeback_interval);
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
/*
- * Flag that makes the machine dump writes/reads and block dirtyings.
- */
-int block_dump;
-
-/*
* Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
* a full sync is triggered after this time elapses without any disk activity.
*/
@@ -845,7 +839,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
* ^ pos_ratio
* |
* | |<===== global dirty control scope ======>|
- * 2.0 .............*
+ * 2.0 * * * * * * *
* | .*
* | . *
* | . *
@@ -1869,10 +1863,9 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
* which was newly dirtied. The function will periodically check the system's
* dirty state and will initiate writeback if needed.
*
- * On really big machines, get_writeback_state is expensive, so try to avoid
- * calling it too often (ratelimiting). But once we're over the dirty memory
- * limit we decrease the ratelimiting by a lot, to prevent individual processes
- * from overshooting the limit by (ratelimit_pages) each.
+ * Once we're over the dirty memory limit we decrease the ratelimiting
+ * by a lot, to prevent individual processes from overshooting the limit
+ * by (ratelimit_pages) each.
*/
void balance_dirty_pages_ratelimited(struct address_space *mapping)
{
@@ -1945,6 +1938,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
struct dirty_throttle_control * const gdtc = &gdtc_stor;
struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
&mdtc_stor : NULL;
+ unsigned long reclaimable;
+ unsigned long thresh;
/*
* Similar to balance_dirty_pages() but ignores pages being written
@@ -1957,8 +1952,13 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
if (gdtc->dirty > gdtc->bg_thresh)
return true;
- if (wb_stat(wb, WB_RECLAIMABLE) >
- wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
+ thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
+ if (thresh < 2 * wb_stat_error())
+ reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
+ else
+ reclaimable = wb_stat(wb, WB_RECLAIMABLE);
+
+ if (reclaimable > thresh)
return true;
if (mdtc) {
@@ -1972,8 +1972,13 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
if (mdtc->dirty > mdtc->bg_thresh)
return true;
- if (wb_stat(wb, WB_RECLAIMABLE) >
- wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
+ thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
+ if (thresh < 2 * wb_stat_error())
+ reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
+ else
+ reclaimable = wb_stat(wb, WB_RECLAIMABLE);
+
+ if (reclaimable > thresh)
return true;
}
@@ -2045,8 +2050,6 @@ void laptop_sync_completion(void)
/*
* If ratelimit_pages is too high then we can get into dirty-data overload
* if a large number of processes all perform writes at the same time.
- * If it is too low then SMP machines will call the (expensive)
- * get_writeback_state too often.
*
* Here we set ratelimit_pages to a level which ensures that when all CPUs are
* dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
@@ -2409,6 +2412,7 @@ int __set_page_dirty_no_writeback(struct page *page)
return !TestSetPageDirty(page);
return 0;
}
+EXPORT_SYMBOL(__set_page_dirty_no_writeback);
/*
* Helper function for set_page_dirty family.
@@ -2417,7 +2421,8 @@ int __set_page_dirty_no_writeback(struct page *page)
*
* NOTE: This relies on being atomic wrt interrupts.
*/
-void account_page_dirtied(struct page *page, struct address_space *mapping)
+static void account_page_dirtied(struct page *page,
+ struct address_space *mapping)
{
struct inode *inode = mapping->host;
@@ -2436,7 +2441,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
inc_wb_stat(wb, WB_DIRTIED);
task_io_account_write(PAGE_SIZE);
current->nr_dirtied++;
- this_cpu_inc(bdp_ratelimits);
+ __this_cpu_inc(bdp_ratelimits);
mem_cgroup_track_foreign_dirty(page, wb);
}
@@ -2459,6 +2464,30 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
}
/*
+ * Mark the page dirty, and set it dirty in the page cache, and mark the inode
+ * dirty.
+ *
+ * If warn is true, then emit a warning if the page is not uptodate and has
+ * not been truncated.
+ *
+ * The caller must hold lock_page_memcg().
+ */
+void __set_page_dirty(struct page *page, struct address_space *mapping,
+ int warn)
+{
+ unsigned long flags;
+
+ xa_lock_irqsave(&mapping->i_pages, flags);
+ if (page->mapping) { /* Race with truncate? */
+ WARN_ON_ONCE(warn && !PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ __xa_set_mark(&mapping->i_pages, page_index(page),
+ PAGECACHE_TAG_DIRTY);
+ }
+ xa_unlock_irqrestore(&mapping->i_pages, flags);
+}
+
+/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* the xarray.
*
@@ -2475,20 +2504,12 @@ int __set_page_dirty_nobuffers(struct page *page)
lock_page_memcg(page);
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
- unsigned long flags;
if (!mapping) {
unlock_page_memcg(page);
return 1;
}
-
- xa_lock_irqsave(&mapping->i_pages, flags);
- BUG_ON(page_mapping(page) != mapping);
- WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
- account_page_dirtied(page, mapping);
- __xa_set_mark(&mapping->i_pages, page_index(page),
- PAGECACHE_TAG_DIRTY);
- xa_unlock_irqrestore(&mapping->i_pages, flags);
+ __set_page_dirty(page, mapping, !PagePrivate(page));
unlock_page_memcg(page);
if (mapping->host) {
@@ -2546,13 +2567,9 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
/*
* Dirty a page.
*
- * For pages with a mapping this should be done under the page lock
- * for the benefit of asynchronous memory errors who prefer a consistent
- * dirty state. This rule can be broken in some special cases,
- * but should be better not to.
- *
- * If the mapping doesn't provide a set_page_dirty a_op, then
- * just fall through and assume that it wants buffer_heads.
+ * For pages with a mapping this should be done under the page lock for the
+ * benefit of asynchronous memory errors who prefer a consistent dirty state.
+ * This rule can be broken in some special cases, but should be better not to.
*/
int set_page_dirty(struct page *page)
{
@@ -2560,7 +2577,6 @@ int set_page_dirty(struct page *page)
page = compound_head(page);
if (likely(mapping)) {
- int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
/*
* readahead/lru_deactivate_page could remain
* PG_readahead/PG_reclaim due to race with end_page_writeback
@@ -2573,11 +2589,7 @@ int set_page_dirty(struct page *page)
*/
if (PageReclaim(page))
ClearPageReclaim(page);
-#ifdef CONFIG_BLOCK
- if (!spd)
- spd = __set_page_dirty_buffers;
-#endif
- return (*spd)(page);
+ return mapping->a_ops->set_page_dirty(page);
}
if (!PageDirty(page)) {
if (!TestSetPageDirty(page))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aaa1655cf682..0817d88383d5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -120,7 +120,25 @@ typedef int __bitwise fpi_t;
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
static DEFINE_MUTEX(pcp_batch_high_lock);
-#define MIN_PERCPU_PAGELIST_FRACTION (8)
+#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
+
+struct pagesets {
+ local_lock_t lock;
+#if defined(CONFIG_DEBUG_INFO_BTF) && \
+ !defined(CONFIG_DEBUG_LOCK_ALLOC) && \
+ !defined(CONFIG_PAHOLE_HAS_ZEROSIZE_PERCPU_SUPPORT)
+ /*
+ * pahole 1.21 and earlier gets confused by zero-sized per-CPU
+ * variables and produces invalid BTF. Ensure that
+ * sizeof(struct pagesets) != 0 for older versions of pahole.
+ */
+ char __pahole_hack;
+ #warning "pahole too old to support zero-sized struct pagesets"
+#endif
+};
+static DEFINE_PER_CPU(struct pagesets, pagesets) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
@@ -175,7 +193,7 @@ EXPORT_SYMBOL(_totalram_pages);
unsigned long totalreserve_pages __read_mostly;
unsigned long totalcma_pages __read_mostly;
-int percpu_pagelist_fraction;
+int percpu_pagelist_high_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
EXPORT_SYMBOL(init_on_alloc);
@@ -331,20 +349,7 @@ compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
-#ifdef CONFIG_DISCONTIGMEM
-/*
- * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
- * are not on separate NUMA nodes. Functionally this works but with
- * watermark_boost_factor, it can reclaim prematurely as the ranges can be
- * quite small. By default, do not boost watermarks on discontigmem as in
- * many cases very high-order allocations like THP are likely to be
- * unsupported and the premature reclaim offsets the advantage of long-term
- * fragmentation avoidance.
- */
-int watermark_boost_factor __read_mostly;
-#else
int watermark_boost_factor __read_mostly = 15000;
-#endif
int watermark_scale_factor = 10;
static unsigned long nr_kernel_pages __initdata;
@@ -382,7 +387,7 @@ int page_group_by_mobility_disabled __read_mostly;
static DEFINE_STATIC_KEY_TRUE(deferred_pages);
/*
- * Calling kasan_free_pages() only after deferred memory initialization
+ * Calling kasan_poison_pages() only after deferred memory initialization
* has completed. Poisoning pages during deferred memory init will greatly
* lengthen the process and cause problem in large memory systems as the
* deferred pages initialization is done with interrupt disabled.
@@ -394,15 +399,12 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
* on-demand allocation and then freed again before the deferred pages
* initialization is done, but this is not likely to happen.
*/
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
- bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
{
- if (static_branch_unlikely(&deferred_pages))
- return;
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
- (fpi_flags & FPI_SKIP_KASAN_POISON))
- return;
- kasan_free_pages(page, order, init);
+ return static_branch_unlikely(&deferred_pages) ||
+ (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
+ PageSkipKASanPoison(page);
}
/* Returns true if the struct page for the pfn is uninitialised */
@@ -453,13 +455,11 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
return false;
}
#else
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
- bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
{
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
- (fpi_flags & FPI_SKIP_KASAN_POISON))
- return;
- kasan_free_pages(page, order, init);
+ return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
+ PageSkipKASanPoison(page);
}
static inline bool early_page_uninitialised(unsigned long pfn)
@@ -474,7 +474,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
#endif
/* Return a pointer to the bitmap storing bits affecting a block of pages */
-static inline unsigned long *get_pageblock_bitmap(struct page *page,
+static inline unsigned long *get_pageblock_bitmap(const struct page *page,
unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
@@ -484,7 +484,7 @@ static inline unsigned long *get_pageblock_bitmap(struct page *page,
#endif /* CONFIG_SPARSEMEM */
}
-static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
+static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
pfn &= (PAGES_PER_SECTION-1);
@@ -495,7 +495,7 @@ static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
}
static __always_inline
-unsigned long __get_pfnblock_flags_mask(struct page *page,
+unsigned long __get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long mask)
{
@@ -520,13 +520,14 @@ unsigned long __get_pfnblock_flags_mask(struct page *page,
*
* Return: pageblock_bits flags
*/
-unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
- unsigned long mask)
+unsigned long get_pfnblock_flags_mask(const struct page *page,
+ unsigned long pfn, unsigned long mask)
{
return __get_pfnblock_flags_mask(page, pfn, mask);
}
-static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
+static __always_inline int get_pfnblock_migratetype(const struct page *page,
+ unsigned long pfn)
{
return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
}
@@ -658,8 +659,7 @@ static void bad_page(struct page *page, const char *reason)
pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
current->comm, page_to_pfn(page));
- __dump_page(page, reason);
- dump_page_owner(page);
+ dump_page(page, reason);
print_modules();
dump_stack();
@@ -669,6 +669,57 @@ out:
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
+static inline unsigned int order_to_pindex(int migratetype, int order)
+{
+ int base = order;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (order > PAGE_ALLOC_COSTLY_ORDER) {
+ VM_BUG_ON(order != pageblock_order);
+ base = PAGE_ALLOC_COSTLY_ORDER + 1;
+ }
+#else
+ VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
+#endif
+
+ return (MIGRATE_PCPTYPES * base) + migratetype;
+}
+
+static inline int pindex_to_order(unsigned int pindex)
+{
+ int order = pindex / MIGRATE_PCPTYPES;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (order > PAGE_ALLOC_COSTLY_ORDER) {
+ order = pageblock_order;
+ VM_BUG_ON(order != pageblock_order);
+ }
+#else
+ VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
+#endif
+
+ return order;
+}
+
+static inline bool pcp_allowed_order(unsigned int order)
+{
+ if (order <= PAGE_ALLOC_COSTLY_ORDER)
+ return true;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (order == pageblock_order)
+ return true;
+#endif
+ return false;
+}
+
+static inline void free_the_page(struct page *page, unsigned int order)
+{
+ if (pcp_allowed_order(order)) /* Via pcp? */
+ free_unref_page(page, order);
+ else
+ __free_pages_ok(page, order, FPI_NONE);
+}
+
/*
* Higher-order pages are called "compound pages". They are structured thusly:
*
@@ -687,7 +738,7 @@ out:
void free_compound_page(struct page *page)
{
mem_cgroup_uncharge(page);
- __free_pages_ok(page, compound_order(page), FPI_NONE);
+ free_the_page(page, compound_order(page));
}
void prep_compound_page(struct page *page, unsigned int order)
@@ -1226,10 +1277,16 @@ out:
return ret;
}
-static void kernel_init_free_pages(struct page *page, int numpages)
+static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
{
int i;
+ if (zero_tags) {
+ for (i = 0; i < numpages; i++)
+ tag_clear_highpage(page + i);
+ return;
+ }
+
/* s390's use of memset() could override KASAN redzones. */
kasan_disable_current();
for (i = 0; i < numpages; i++) {
@@ -1245,7 +1302,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free, fpi_t fpi_flags)
{
int bad = 0;
- bool init;
+ bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
VM_BUG_ON_PAGE(PageTail(page), page);
@@ -1314,10 +1371,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
* With hardware tag-based KASAN, memory tags must be set before the
* page becomes unavailable via debug_pagealloc or arch_free_page.
*/
- init = want_init_on_free();
- if (init && !kasan_has_integrated_init())
- kernel_init_free_pages(page, 1 << order);
- kasan_free_nondeferred_pages(page, order, init, fpi_flags);
+ if (kasan_has_integrated_init()) {
+ if (!skip_kasan_poison)
+ kasan_free_pages(page, order);
+ } else {
+ bool init = want_init_on_free();
+
+ if (init)
+ kernel_init_free_pages(page, 1 << order, false);
+ if (!skip_kasan_poison)
+ kasan_poison_pages(page, order, init);
+ }
/*
* arch_free_page() can make the page's contents inaccessible. s390
@@ -1337,9 +1401,9 @@ static __always_inline bool free_pages_prepare(struct page *page,
* to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
* moved from pcp lists to free lists.
*/
-static bool free_pcp_prepare(struct page *page)
+static bool free_pcp_prepare(struct page *page, unsigned int order)
{
- return free_pages_prepare(page, 0, true, FPI_NONE);
+ return free_pages_prepare(page, order, true, FPI_NONE);
}
static bool bulkfree_pcp_prepare(struct page *page)
@@ -1356,12 +1420,12 @@ static bool bulkfree_pcp_prepare(struct page *page)
* debug_pagealloc enabled, they are checked also immediately when being freed
* to the pcp lists.
*/
-static bool free_pcp_prepare(struct page *page)
+static bool free_pcp_prepare(struct page *page, unsigned int order)
{
if (debug_pagealloc_enabled_static())
- return free_pages_prepare(page, 0, true, FPI_NONE);
+ return free_pages_prepare(page, order, true, FPI_NONE);
else
- return free_pages_prepare(page, 0, false, FPI_NONE);
+ return free_pages_prepare(page, order, false, FPI_NONE);
}
static bool bulkfree_pcp_prepare(struct page *page)
@@ -1393,8 +1457,10 @@ static inline void prefetch_buddy(struct page *page)
static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp)
{
- int migratetype = 0;
+ int pindex = 0;
int batch_free = 0;
+ int nr_freed = 0;
+ unsigned int order;
int prefetch_nr = READ_ONCE(pcp->batch);
bool isolated_pageblocks;
struct page *page, *tmp;
@@ -1405,7 +1471,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
* below while (list_empty(list)) loop.
*/
count = min(pcp->count, count);
- while (count) {
+ while (count > 0) {
struct list_head *list;
/*
@@ -1417,24 +1483,31 @@ static void free_pcppages_bulk(struct zone *zone, int count,
*/
do {
batch_free++;
- if (++migratetype == MIGRATE_PCPTYPES)
- migratetype = 0;
- list = &pcp->lists[migratetype];
+ if (++pindex == NR_PCP_LISTS)
+ pindex = 0;
+ list = &pcp->lists[pindex];
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
- if (batch_free == MIGRATE_PCPTYPES)
+ if (batch_free == NR_PCP_LISTS)
batch_free = count;
+ order = pindex_to_order(pindex);
+ BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
do {
page = list_last_entry(list, struct page, lru);
/* must delete to avoid corrupting pcp list */
list_del(&page->lru);
- pcp->count--;
+ nr_freed += 1 << order;
+ count -= 1 << order;
if (bulkfree_pcp_prepare(page))
continue;
+ /* Encode order with the migratetype */
+ page->index <<= NR_PCP_ORDER_WIDTH;
+ page->index |= order;
+
list_add_tail(&page->lru, &head);
/*
@@ -1450,9 +1523,14 @@ static void free_pcppages_bulk(struct zone *zone, int count,
prefetch_buddy(page);
prefetch_nr--;
}
- } while (--count && --batch_free && !list_empty(list));
+ } while (count > 0 && --batch_free && !list_empty(list));
}
+ pcp->count -= nr_freed;
+ /*
+ * local_lock_irq held so equivalent to spin_lock_irqsave for
+ * both PREEMPT_RT and non-PREEMPT_RT configurations.
+ */
spin_lock(&zone->lock);
isolated_pageblocks = has_isolate_pageblock(zone);
@@ -1462,14 +1540,19 @@ static void free_pcppages_bulk(struct zone *zone, int count,
*/
list_for_each_entry_safe(page, tmp, &head, lru) {
int mt = get_pcppage_migratetype(page);
+
+ /* mt has been encoded with the order (see above) */
+ order = mt & NR_PCP_ORDER_MASK;
+ mt >>= NR_PCP_ORDER_WIDTH;
+
/* MIGRATE_ISOLATE page should not go to pcplists */
VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
/* Pageblock could have been isolated meanwhile */
if (unlikely(isolated_pageblocks))
mt = get_pageblock_migratetype(page);
- __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
- trace_mm_page_pcpu_drain(page, 0, mt);
+ __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
+ trace_mm_page_pcpu_drain(page, order, mt);
}
spin_unlock(&zone->lock);
}
@@ -1479,13 +1562,15 @@ static void free_one_page(struct zone *zone,
unsigned int order,
int migratetype, fpi_t fpi_flags)
{
- spin_lock(&zone->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->lock, flags);
if (unlikely(has_isolate_pageblock(zone) ||
is_migrate_isolate(migratetype))) {
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
- spin_unlock(&zone->lock);
+ spin_unlock_irqrestore(&zone->lock, flags);
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1568,16 +1653,22 @@ static void __free_pages_ok(struct page *page, unsigned int order,
unsigned long flags;
int migratetype;
unsigned long pfn = page_to_pfn(page);
+ struct zone *zone = page_zone(page);
if (!free_pages_prepare(page, order, true, fpi_flags))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
- local_irq_save(flags);
+
+ spin_lock_irqsave(&zone->lock, flags);
+ if (unlikely(has_isolate_pageblock(zone) ||
+ is_migrate_isolate(migratetype))) {
+ migratetype = get_pfnblock_migratetype(page, pfn);
+ }
+ __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
+ spin_unlock_irqrestore(&zone->lock, flags);
+
__count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, pfn, order, migratetype,
- fpi_flags);
- local_irq_restore(flags);
}
void __free_pages_core(struct page *page, unsigned int order)
@@ -1609,7 +1700,7 @@ void __free_pages_core(struct page *page, unsigned int order)
__free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
}
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
/*
* During memory init memblocks map pfns to nids. The search is expensive and
@@ -1659,7 +1750,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
return nid;
}
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
void __init memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order)
@@ -2147,14 +2238,6 @@ void __init page_alloc_init_late(void)
wait_for_completion(&pgdat_init_all_done_comp);
/*
- * The number of managed pages has changed due to the initialisation
- * so the pcpu batch and high limits needs to be updated or the limits
- * will be artificially small.
- */
- for_each_populated_zone(zone)
- zone_pcp_update(zone);
-
- /*
* We initialized the rest of the deferred pages. Permanently disable
* on-demand struct page initialization.
*/
@@ -2324,8 +2407,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
- bool init;
-
set_page_private(page, 0);
set_page_refcounted(page);
@@ -2344,10 +2425,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
* kasan_alloc_pages and kernel_init_free_pages must be
* kept together to avoid discrepancies in behavior.
*/
- init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
- kasan_alloc_pages(page, order, init);
- if (init && !kasan_has_integrated_init())
- kernel_init_free_pages(page, 1 << order);
+ if (kasan_has_integrated_init()) {
+ kasan_alloc_pages(page, order, gfp_flags);
+ } else {
+ bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
+
+ kasan_unpoison_pages(page, order, init);
+ if (init)
+ kernel_init_free_pages(page, 1 << order,
+ gfp_flags & __GFP_ZEROTAGS);
+ }
set_page_owner(page, order, gfp_flags);
}
@@ -2955,6 +3042,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
{
int i, allocated = 0;
+ /*
+ * local_lock_irq held so equivalent to spin_lock_irqsave for
+ * both PREEMPT_RT and non-PREEMPT_RT configurations.
+ */
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype,
@@ -3007,12 +3098,12 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
unsigned long flags;
int to_drain, batch;
- local_irq_save(flags);
+ local_lock_irqsave(&pagesets.lock, flags);
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0)
free_pcppages_bulk(zone, to_drain, pcp);
- local_irq_restore(flags);
+ local_unlock_irqrestore(&pagesets.lock, flags);
}
#endif
@@ -3026,16 +3117,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
unsigned long flags;
- struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- local_irq_save(flags);
- pset = per_cpu_ptr(zone->pageset, cpu);
+ local_lock_irqsave(&pagesets.lock, flags);
- pcp = &pset->pcp;
+ pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
if (pcp->count)
free_pcppages_bulk(zone, pcp->count, pcp);
- local_irq_restore(flags);
+
+ local_unlock_irqrestore(&pagesets.lock, flags);
}
/*
@@ -3133,7 +3223,7 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
* disables preemption as part of its processing
*/
for_each_online_cpu(cpu) {
- struct per_cpu_pageset *pcp;
+ struct per_cpu_pages *pcp;
struct zone *z;
bool has_pcps = false;
@@ -3144,13 +3234,13 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
*/
has_pcps = true;
} else if (zone) {
- pcp = per_cpu_ptr(zone->pageset, cpu);
- if (pcp->pcp.count)
+ pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+ if (pcp->count)
has_pcps = true;
} else {
for_each_populated_zone(z) {
- pcp = per_cpu_ptr(z->pageset, cpu);
- if (pcp->pcp.count) {
+ pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
+ if (pcp->count) {
has_pcps = true;
break;
}
@@ -3243,11 +3333,12 @@ void mark_free_pages(struct zone *zone)
}
#endif /* CONFIG_PM */
-static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
+static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
+ unsigned int order)
{
int migratetype;
- if (!free_pcp_prepare(page))
+ if (!free_pcp_prepare(page, order))
return false;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -3255,52 +3346,99 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
return true;
}
-static void free_unref_page_commit(struct page *page, unsigned long pfn)
+static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch)
+{
+ int min_nr_free, max_nr_free;
+
+ /* Check for PCP disabled or boot pageset */
+ if (unlikely(high < batch))
+ return 1;
+
+ /* Leave at least pcp->batch pages on the list */
+ min_nr_free = batch;
+ max_nr_free = high - batch;
+
+ /*
+ * Double the number of pages freed each time there is subsequent
+ * freeing of pages without any allocation.
+ */
+ batch <<= pcp->free_factor;
+ if (batch < max_nr_free)
+ pcp->free_factor++;
+ batch = clamp(batch, min_nr_free, max_nr_free);
+
+ return batch;
+}
+
+static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
+{
+ int high = READ_ONCE(pcp->high);
+
+ if (unlikely(!high))
+ return 0;
+
+ if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
+ return high;
+
+ /*
+ * If reclaim is active, limit the number of pages that can be
+ * stored on pcp lists
+ */
+ return min(READ_ONCE(pcp->batch) << 2, high);
+}
+
+static void free_unref_page_commit(struct page *page, unsigned long pfn,
+ int migratetype, unsigned int order)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
- int migratetype;
+ int high;
+ int pindex;
- migratetype = get_pcppage_migratetype(page);
__count_vm_event(PGFREE);
+ pcp = this_cpu_ptr(zone->per_cpu_pageset);
+ pindex = order_to_pindex(migratetype, order);
+ list_add(&page->lru, &pcp->lists[pindex]);
+ pcp->count += 1 << order;
+ high = nr_pcp_high(pcp, zone);
+ if (pcp->count >= high) {
+ int batch = READ_ONCE(pcp->batch);
+
+ free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp);
+ }
+}
+
+/*
+ * Free a pcp page
+ */
+void free_unref_page(struct page *page, unsigned int order)
+{
+ unsigned long flags;
+ unsigned long pfn = page_to_pfn(page);
+ int migratetype;
+
+ if (!free_unref_page_prepare(page, pfn, order))
+ return;
/*
* We only track unmovable, reclaimable and movable on pcp lists.
- * Free ISOLATE pages back to the allocator because they are being
+ * Place ISOLATE pages on the isolated list because they are being
* offlined but treat HIGHATOMIC as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
- if (migratetype >= MIGRATE_PCPTYPES) {
+ migratetype = get_pcppage_migratetype(page);
+ if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
if (unlikely(is_migrate_isolate(migratetype))) {
- free_one_page(zone, page, pfn, 0, migratetype,
- FPI_NONE);
+ free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
return;
}
migratetype = MIGRATE_MOVABLE;
}
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list_add(&page->lru, &pcp->lists[migratetype]);
- pcp->count++;
- if (pcp->count >= READ_ONCE(pcp->high))
- free_pcppages_bulk(zone, READ_ONCE(pcp->batch), pcp);
-}
-
-/*
- * Free a 0-order page
- */
-void free_unref_page(struct page *page)
-{
- unsigned long flags;
- unsigned long pfn = page_to_pfn(page);
-
- if (!free_unref_page_prepare(page, pfn))
- return;
-
- local_irq_save(flags);
- free_unref_page_commit(page, pfn);
- local_irq_restore(flags);
+ local_lock_irqsave(&pagesets.lock, flags);
+ free_unref_page_commit(page, pfn, migratetype, order);
+ local_unlock_irqrestore(&pagesets.lock, flags);
}
/*
@@ -3311,34 +3449,56 @@ void free_unref_page_list(struct list_head *list)
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
+ int migratetype;
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
pfn = page_to_pfn(page);
- if (!free_unref_page_prepare(page, pfn))
+ if (!free_unref_page_prepare(page, pfn, 0))
list_del(&page->lru);
+
+ /*
+ * Free isolated pages directly to the allocator, see
+ * comment in free_unref_page.
+ */
+ migratetype = get_pcppage_migratetype(page);
+ if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
+ if (unlikely(is_migrate_isolate(migratetype))) {
+ list_del(&page->lru);
+ free_one_page(page_zone(page), page, pfn, 0,
+ migratetype, FPI_NONE);
+ continue;
+ }
+
+ /*
+ * Non-isolated types over MIGRATE_PCPTYPES get added
+ * to the MIGRATE_MOVABLE pcp list.
+ */
+ set_pcppage_migratetype(page, MIGRATE_MOVABLE);
+ }
+
set_page_private(page, pfn);
}
- local_irq_save(flags);
+ local_lock_irqsave(&pagesets.lock, flags);
list_for_each_entry_safe(page, next, list, lru) {
- unsigned long pfn = page_private(page);
-
+ pfn = page_private(page);
set_page_private(page, 0);
+ migratetype = get_pcppage_migratetype(page);
trace_mm_page_free_batched(page);
- free_unref_page_commit(page, pfn);
+ free_unref_page_commit(page, pfn, migratetype, 0);
/*
* Guard against excessive IRQ disabled times when we get
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
- local_irq_restore(flags);
+ local_unlock_irqrestore(&pagesets.lock, flags);
batch_count = 0;
- local_irq_save(flags);
+ local_lock_irqsave(&pagesets.lock, flags);
}
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(&pagesets.lock, flags);
}
/*
@@ -3437,7 +3597,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
*
* Must be called with interrupts disabled.
*/
-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
+static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
+ long nr_account)
{
#ifdef CONFIG_NUMA
enum numa_stat_item local_stat = NUMA_LOCAL;
@@ -3450,18 +3611,19 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
local_stat = NUMA_OTHER;
if (zone_to_nid(z) == zone_to_nid(preferred_zone))
- __inc_numa_state(z, NUMA_HIT);
+ __count_numa_events(z, NUMA_HIT, nr_account);
else {
- __inc_numa_state(z, NUMA_MISS);
- __inc_numa_state(preferred_zone, NUMA_FOREIGN);
+ __count_numa_events(z, NUMA_MISS, nr_account);
+ __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
}
- __inc_numa_state(z, local_stat);
+ __count_numa_events(z, local_stat, nr_account);
#endif
}
/* Remove page from the per-cpu list, caller must protect the list */
static inline
-struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
+struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
+ int migratetype,
unsigned int alloc_flags,
struct per_cpu_pages *pcp,
struct list_head *list)
@@ -3470,16 +3632,30 @@ struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
do {
if (list_empty(list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- READ_ONCE(pcp->batch), list,
+ int batch = READ_ONCE(pcp->batch);
+ int alloced;
+
+ /*
+ * Scale batch relative to order if batch implies
+ * free pages can be stored on the PCP. Batch can
+ * be 1 for small zones or for boot pagesets which
+ * should never store free pages as the pages may
+ * belong to arbitrary zones.
+ */
+ if (batch > 1)
+ batch = max(batch >> order, 2);
+ alloced = rmqueue_bulk(zone, order,
+ batch, list,
migratetype, alloc_flags);
+
+ pcp->count += alloced << order;
if (unlikely(list_empty(list)))
return NULL;
}
page = list_first_entry(list, struct page, lru);
list_del(&page->lru);
- pcp->count--;
+ pcp->count -= 1 << order;
} while (check_new_pcp(page));
return page;
@@ -3487,23 +3663,31 @@ struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
/* Lock and remove page from the per-cpu list */
static struct page *rmqueue_pcplist(struct zone *preferred_zone,
- struct zone *zone, gfp_t gfp_flags,
- int migratetype, unsigned int alloc_flags)
+ struct zone *zone, unsigned int order,
+ gfp_t gfp_flags, int migratetype,
+ unsigned int alloc_flags)
{
struct per_cpu_pages *pcp;
struct list_head *list;
struct page *page;
unsigned long flags;
- local_irq_save(flags);
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
+ local_lock_irqsave(&pagesets.lock, flags);
+
+ /*
+ * On allocation, reduce the number of pages that are batch freed.
+ * See nr_pcp_free() where free_factor is increased for subsequent
+ * frees.
+ */
+ pcp = this_cpu_ptr(zone->per_cpu_pageset);
+ pcp->free_factor >>= 1;
+ list = &pcp->lists[order_to_pindex(migratetype, order)];
+ page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
+ local_unlock_irqrestore(&pagesets.lock, flags);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
- zone_statistics(preferred_zone, zone);
+ zone_statistics(preferred_zone, zone, 1);
}
- local_irq_restore(flags);
return page;
}
@@ -3519,15 +3703,15 @@ struct page *rmqueue(struct zone *preferred_zone,
unsigned long flags;
struct page *page;
- if (likely(order == 0)) {
+ if (likely(pcp_allowed_order(order))) {
/*
* MIGRATE_MOVABLE pcplist could have the pages on CMA area and
* we need to skip it when CMA area isn't allowed.
*/
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
migratetype != MIGRATE_MOVABLE) {
- page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
- migratetype, alloc_flags);
+ page = rmqueue_pcplist(preferred_zone, zone, order,
+ gfp_flags, migratetype, alloc_flags);
goto out;
}
}
@@ -3555,15 +3739,15 @@ struct page *rmqueue(struct zone *preferred_zone,
if (!page)
page = __rmqueue(zone, order, migratetype, alloc_flags);
} while (page && check_new_pages(page, order));
- spin_unlock(&zone->lock);
if (!page)
goto failed;
+
__mod_zone_freepage_state(zone, -(1 << order),
get_pcppage_migratetype(page));
+ spin_unlock_irqrestore(&zone->lock, flags);
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
- zone_statistics(preferred_zone, zone);
- local_irq_restore(flags);
+ zone_statistics(preferred_zone, zone, 1);
out:
/* Separate test+clear to avoid unnecessary atomics */
@@ -3576,7 +3760,7 @@ out:
return page;
failed:
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&zone->lock, flags);
return NULL;
}
@@ -4252,6 +4436,9 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
if (!order)
return false;
+ if (fatal_signal_pending(current))
+ return false;
+
if (compaction_made_progress(compact_result))
(*compaction_retries)++;
@@ -5044,7 +5231,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
struct alloc_context ac;
gfp_t alloc_gfp;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
- int nr_populated = 0;
+ int nr_populated = 0, nr_account = 0;
if (unlikely(nr_pages <= 0))
return 0;
@@ -5053,9 +5240,13 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
* Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs.
*/
- while (page_array && page_array[nr_populated] && nr_populated < nr_pages)
+ while (page_array && nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
+ /* Already populated array? */
+ if (unlikely(page_array && nr_pages - nr_populated == 0))
+ return nr_populated;
+
/* Use the single page allocator for one page. */
if (nr_pages - nr_populated == 1)
goto failed;
@@ -5097,9 +5288,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
goto failed;
/* Attempt the batch allocation */
- local_irq_save(flags);
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- pcp_list = &pcp->lists[ac.migratetype];
+ local_lock_irqsave(&pagesets.lock, flags);
+ pcp = this_cpu_ptr(zone->per_cpu_pageset);
+ pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
while (nr_populated < nr_pages) {
@@ -5109,7 +5300,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
continue;
}
- page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags,
+ page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
pcp, pcp_list);
if (unlikely(!page)) {
/* Try and get at least one page */
@@ -5117,15 +5308,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
goto failed_irq;
break;
}
-
- /*
- * Ideally this would be batched but the best way to do
- * that cheaply is to first convert zone_statistics to
- * be inaccurate per-cpu counter like vm_events to avoid
- * a RMW cycle then do the accounting with IRQs enabled.
- */
- __count_zid_vm_events(PGALLOC, zone_idx(zone), 1);
- zone_statistics(ac.preferred_zoneref->zone, zone);
+ nr_account++;
prep_new_page(page, 0, gfp, 0);
if (page_list)
@@ -5135,12 +5318,15 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nr_populated++;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(&pagesets.lock, flags);
+
+ __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
+ zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
return nr_populated;
failed_irq:
- local_irq_restore(flags);
+ local_unlock_irqrestore(&pagesets.lock, flags);
failed:
page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
@@ -5247,14 +5433,6 @@ unsigned long get_zeroed_page(gfp_t gfp_mask)
}
EXPORT_SYMBOL(get_zeroed_page);
-static inline void free_the_page(struct page *page, unsigned int order)
-{
- if (order == 0) /* Via pcp? */
- free_unref_page(page);
- else
- __free_pages_ok(page, order, FPI_NONE);
-}
-
/**
* __free_pages - Free pages allocated with alloc_pages().
* @page: The page pointer returned from alloc_pages().
@@ -5713,7 +5891,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
continue;
for_each_online_cpu(cpu)
- free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
+ free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
}
printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
@@ -5805,7 +5983,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
free_pcp = 0;
for_each_online_cpu(cpu)
- free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
+ free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
show_node(zone);
printk(KERN_CONT
@@ -5846,7 +6024,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(zone_page_state(zone, NR_MLOCK)),
K(zone_page_state(zone, NR_BOUNCE)),
K(free_pcp),
- K(this_cpu_read(zone->pageset->pcp.count)),
+ K(this_cpu_read(zone->per_cpu_pageset->count)),
K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
@@ -6173,11 +6351,12 @@ static void build_zonelists(pg_data_t *pgdat)
* not check if the processor is online before following the pageset pointer.
* Other parts of the kernel may not check if the zone is available.
*/
-static void pageset_init(struct per_cpu_pageset *p);
+static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
/* These effectively disable the pcplists in the boot pageset completely */
#define BOOT_PAGESET_HIGH 0
#define BOOT_PAGESET_BATCH 1
-static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
+static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
static void __build_all_zonelists(void *data)
@@ -6244,7 +6423,7 @@ build_all_zonelists_init(void)
* (a chicken-egg dilemma).
*/
for_each_possible_cpu(cpu)
- pageset_init(&per_cpu(boot_pageset, cpu));
+ per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
mminit_verify_zonelist();
cpuset_init_current_mems_allowed();
@@ -6396,7 +6575,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
return;
/*
- * The call to memmap_init_zone should have already taken care
+ * The call to memmap_init should have already taken care
* of the pages reserved for the memmap, so we can just jump to
* the end of that region and start processing the device pages.
*/
@@ -6457,11 +6636,11 @@ static void __meminit zone_init_free_lists(struct zone *zone)
}
}
-#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
+#if !defined(CONFIG_FLATMEM)
/*
* Only struct pages that correspond to ranges defined by memblock.memory
* are zeroed and initialized by going through __init_single_page() during
- * memmap_init_zone().
+ * memmap_init_zone_range().
*
* But, there could be struct pages that correspond to holes in
* memblock.memory. This can happen because of the following reasons:
@@ -6480,9 +6659,9 @@ static void __meminit zone_init_free_lists(struct zone *zone)
* zone/node above the hole except for the trailing pages in the last
* section that will be appended to the zone/node below.
*/
-static u64 __meminit init_unavailable_range(unsigned long spfn,
- unsigned long epfn,
- int zone, int node)
+static void __init init_unavailable_range(unsigned long spfn,
+ unsigned long epfn,
+ int zone, int node)
{
unsigned long pfn;
u64 pgcnt = 0;
@@ -6498,56 +6677,77 @@ static u64 __meminit init_unavailable_range(unsigned long spfn,
pgcnt++;
}
- return pgcnt;
+ if (pgcnt)
+ pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
+ node, zone_names[zone], pgcnt);
}
#else
-static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
- int zone, int node)
+static inline void init_unavailable_range(unsigned long spfn,
+ unsigned long epfn,
+ int zone, int node)
{
- return 0;
}
#endif
-void __meminit __weak memmap_init_zone(struct zone *zone)
+static void __init memmap_init_zone_range(struct zone *zone,
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long *hole_pfn)
{
unsigned long zone_start_pfn = zone->zone_start_pfn;
unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
- int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
- static unsigned long hole_pfn;
+ int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
+
+ start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
+ end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
+
+ if (start_pfn >= end_pfn)
+ return;
+
+ memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
+ zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
+
+ if (*hole_pfn < start_pfn)
+ init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
+
+ *hole_pfn = end_pfn;
+}
+
+static void __init memmap_init(void)
+{
unsigned long start_pfn, end_pfn;
- u64 pgcnt = 0;
+ unsigned long hole_pfn = 0;
+ int i, j, zone_id, nid;
- for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
- start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
- end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ struct pglist_data *node = NODE_DATA(nid);
- if (end_pfn > start_pfn)
- memmap_init_range(end_pfn - start_pfn, nid,
- zone_id, start_pfn, zone_end_pfn,
- MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ struct zone *zone = node->node_zones + j;
- if (hole_pfn < start_pfn)
- pgcnt += init_unavailable_range(hole_pfn, start_pfn,
- zone_id, nid);
- hole_pfn = end_pfn;
+ if (!populated_zone(zone))
+ continue;
+
+ memmap_init_zone_range(zone, start_pfn, end_pfn,
+ &hole_pfn);
+ zone_id = j;
+ }
}
#ifdef CONFIG_SPARSEMEM
/*
- * Initialize the hole in the range [zone_end_pfn, section_end].
- * If zone boundary falls in the middle of a section, this hole
- * will be re-initialized during the call to this function for the
- * higher zone.
+ * Initialize the memory map for hole in the range [memory_end,
+ * section_end].
+ * Append the pages in this hole to the highest zone in the last
+ * node.
+ * The call to init_unavailable_range() is outside the ifdef to
+ * silence the compiler warining about zone_id set but not used;
+ * for FLATMEM it is a nop anyway
*/
- end_pfn = round_up(zone_end_pfn, PAGES_PER_SECTION);
+ end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
if (hole_pfn < end_pfn)
- pgcnt += init_unavailable_range(hole_pfn, end_pfn,
- zone_id, nid);
#endif
-
- if (pgcnt)
- pr_info(" %s zone: %llu pages in unavailable ranges\n",
- zone->name, pgcnt);
+ init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
}
static int zone_batchsize(struct zone *zone)
@@ -6556,13 +6756,12 @@ static int zone_batchsize(struct zone *zone)
int batch;
/*
- * The per-cpu-pages pools are set to around 1000th of the
- * size of the zone.
+ * The number of pages to batch allocate is either ~0.1%
+ * of the zone or 1MB, whichever is smaller. The batch
+ * size is striking a balance between allocation latency
+ * and zone lock contention.
*/
- batch = zone_managed_pages(zone) / 1024;
- /* But no more than a meg. */
- if (batch * PAGE_SIZE > 1024 * 1024)
- batch = (1024 * 1024) / PAGE_SIZE;
+ batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE);
batch /= 4; /* We effectively *= 4 below */
if (batch < 1)
batch = 1;
@@ -6599,6 +6798,54 @@ static int zone_batchsize(struct zone *zone)
#endif
}
+static int zone_highsize(struct zone *zone, int batch, int cpu_online)
+{
+#ifdef CONFIG_MMU
+ int high;
+ int nr_split_cpus;
+ unsigned long total_pages;
+
+ if (!percpu_pagelist_high_fraction) {
+ /*
+ * By default, the high value of the pcp is based on the zone
+ * low watermark so that if they are full then background
+ * reclaim will not be started prematurely.
+ */
+ total_pages = low_wmark_pages(zone);
+ } else {
+ /*
+ * If percpu_pagelist_high_fraction is configured, the high
+ * value is based on a fraction of the managed pages in the
+ * zone.
+ */
+ total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
+ }
+
+ /*
+ * Split the high value across all online CPUs local to the zone. Note
+ * that early in boot that CPUs may not be online yet and that during
+ * CPU hotplug that the cpumask is not yet updated when a CPU is being
+ * onlined. For memory nodes that have no CPUs, split pcp->high across
+ * all online CPUs to mitigate the risk that reclaim is triggered
+ * prematurely due to pages stored on pcp lists.
+ */
+ nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
+ if (!nr_split_cpus)
+ nr_split_cpus = num_online_cpus();
+ high = total_pages / nr_split_cpus;
+
+ /*
+ * Ensure high is at least batch*4. The multiple is based on the
+ * historical relationship between high and batch.
+ */
+ high = max(high, batch << 2);
+
+ return high;
+#else
+ return 0;
+#endif
+}
+
/*
* pcp->high and pcp->batch values are related and generally batch is lower
* than high. They are also related to pcp->count such that count is lower
@@ -6622,16 +6869,15 @@ static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
WRITE_ONCE(pcp->high, high);
}
-static void pageset_init(struct per_cpu_pageset *p)
+static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
{
- struct per_cpu_pages *pcp;
- int migratetype;
+ int pindex;
- memset(p, 0, sizeof(*p));
+ memset(pcp, 0, sizeof(*pcp));
+ memset(pzstats, 0, sizeof(*pzstats));
- pcp = &p->pcp;
- for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
- INIT_LIST_HEAD(&pcp->lists[migratetype]);
+ for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
+ INIT_LIST_HEAD(&pcp->lists[pindex]);
/*
* Set batch and high values safe for a boot pageset. A true percpu
@@ -6641,38 +6887,31 @@ static void pageset_init(struct per_cpu_pageset *p)
*/
pcp->high = BOOT_PAGESET_HIGH;
pcp->batch = BOOT_PAGESET_BATCH;
+ pcp->free_factor = 0;
}
static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
unsigned long batch)
{
- struct per_cpu_pageset *p;
+ struct per_cpu_pages *pcp;
int cpu;
for_each_possible_cpu(cpu) {
- p = per_cpu_ptr(zone->pageset, cpu);
- pageset_update(&p->pcp, high, batch);
+ pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+ pageset_update(pcp, high, batch);
}
}
/*
* Calculate and set new high and batch values for all per-cpu pagesets of a
- * zone, based on the zone's size and the percpu_pagelist_fraction sysctl.
+ * zone based on the zone's size.
*/
-static void zone_set_pageset_high_and_batch(struct zone *zone)
+static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
{
- unsigned long new_high, new_batch;
+ int new_high, new_batch;
- if (percpu_pagelist_fraction) {
- new_high = zone_managed_pages(zone) / percpu_pagelist_fraction;
- new_batch = max(1UL, new_high / 4);
- if ((new_high / 4) > (PAGE_SHIFT * 8))
- new_batch = PAGE_SHIFT * 8;
- } else {
- new_batch = zone_batchsize(zone);
- new_high = 6 * new_batch;
- new_batch = max(1UL, 1 * new_batch);
- }
+ new_batch = max(1, zone_batchsize(zone));
+ new_high = zone_highsize(zone, new_batch, cpu_online);
if (zone->pageset_high == new_high &&
zone->pageset_batch == new_batch)
@@ -6686,16 +6925,23 @@ static void zone_set_pageset_high_and_batch(struct zone *zone)
void __meminit setup_zone_pageset(struct zone *zone)
{
- struct per_cpu_pageset *p;
int cpu;
- zone->pageset = alloc_percpu(struct per_cpu_pageset);
+ /* Size may be 0 on !SMP && !NUMA */
+ if (sizeof(struct per_cpu_zonestat) > 0)
+ zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
+
+ zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
for_each_possible_cpu(cpu) {
- p = per_cpu_ptr(zone->pageset, cpu);
- pageset_init(p);
+ struct per_cpu_pages *pcp;
+ struct per_cpu_zonestat *pzstats;
+
+ pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+ pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
+ per_cpu_pages_init(pcp, pzstats);
}
- zone_set_pageset_high_and_batch(zone);
+ zone_set_pageset_high_and_batch(zone, 0);
}
/*
@@ -6719,9 +6965,9 @@ void __init setup_per_cpu_pageset(void)
* the nodes these zones are associated with.
*/
for_each_possible_cpu(cpu) {
- struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
- memset(pcp->vm_numa_stat_diff, 0,
- sizeof(pcp->vm_numa_stat_diff));
+ struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
+ memset(pzstats->vm_numa_event, 0,
+ sizeof(pzstats->vm_numa_event));
}
#endif
@@ -6737,14 +6983,14 @@ static __meminit void zone_pcp_init(struct zone *zone)
* relies on the ability of the linker to provide the
* offset of a (static) per cpu variable into the per cpu area.
*/
- zone->pageset = &boot_pageset;
+ zone->per_cpu_pageset = &boot_pageset;
+ zone->per_cpu_zonestats = &boot_zonestats;
zone->pageset_high = BOOT_PAGESET_HIGH;
zone->pageset_batch = BOOT_PAGESET_BATCH;
if (populated_zone(zone))
- printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
- zone->name, zone->present_pages,
- zone_batchsize(zone));
+ pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
+ zone->present_pages, zone_batchsize(zone));
}
void __meminit init_currently_empty_zone(struct zone *zone,
@@ -7014,8 +7260,7 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
pgdat->node_spanned_pages = totalpages;
pgdat->node_present_pages = realtotalpages;
- printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
- realtotalpages);
+ pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
}
#ifndef CONFIG_SPARSEMEM
@@ -7215,19 +7460,17 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
if (freesize >= memmap_pages) {
freesize -= memmap_pages;
if (memmap_pages)
- printk(KERN_DEBUG
- " %s zone: %lu pages used for memmap\n",
- zone_names[j], memmap_pages);
+ pr_debug(" %s zone: %lu pages used for memmap\n",
+ zone_names[j], memmap_pages);
} else
- pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
+ pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
zone_names[j], memmap_pages, freesize);
}
/* Account for reserved pages */
if (j == 0 && freesize > dma_reserve) {
freesize -= dma_reserve;
- printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
- zone_names[0], dma_reserve);
+ pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
}
if (!is_highmem_idx(j))
@@ -7250,11 +7493,10 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
set_pageblock_order();
setup_usemap(zone);
init_currently_empty_zone(zone, zone->zone_start_pfn, size);
- memmap_init_zone(zone);
}
}
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
+#ifdef CONFIG_FLATMEM
static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
{
unsigned long __maybe_unused start = 0;
@@ -7289,7 +7531,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
__func__, pgdat->node_id, (unsigned long)pgdat,
(unsigned long)pgdat->node_mem_map);
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
/*
* With no DISCONTIG, the global mem_map is just set as node 0's
*/
@@ -7302,7 +7544,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
}
#else
static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
-#endif /* CONFIG_FLAT_NODE_MEM_MAP */
+#endif /* CONFIG_FLATMEM */
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
@@ -7776,6 +8018,8 @@ void __init free_area_init(unsigned long *max_zone_pfn)
node_set_state(nid, N_MEMORY);
check_for_memory(pgdat, nid);
}
+
+ memmap_init();
}
static int __init cmdline_parse_core(char *p, unsigned long *core,
@@ -7952,6 +8196,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
static int page_alloc_cpu_dead(unsigned int cpu)
{
+ struct zone *zone;
lru_add_drain_cpu(cpu);
drain_pages(cpu);
@@ -7972,6 +8217,19 @@ static int page_alloc_cpu_dead(unsigned int cpu)
* race with what we are doing.
*/
cpu_vm_stats_fold(cpu);
+
+ for_each_populated_zone(zone)
+ zone_pcp_update(zone, 0);
+
+ return 0;
+}
+
+static int page_alloc_cpu_online(unsigned int cpu)
+{
+ struct zone *zone;
+
+ for_each_populated_zone(zone)
+ zone_pcp_update(zone, 1);
return 0;
}
@@ -7997,8 +8255,9 @@ void __init page_alloc_init(void)
hashdist = 0;
#endif
- ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
- "mm/page_alloc:dead", NULL,
+ ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
+ "mm/page_alloc:pcp",
+ page_alloc_cpu_online,
page_alloc_cpu_dead);
WARN_ON(ret < 0);
}
@@ -8061,14 +8320,14 @@ static void setup_per_zone_lowmem_reserve(void)
unsigned long managed_pages = 0;
for (j = i + 1; j < MAX_NR_ZONES; j++) {
- if (clear) {
- zone->lowmem_reserve[j] = 0;
- } else {
- struct zone *upper_zone = &pgdat->node_zones[j];
+ struct zone *upper_zone = &pgdat->node_zones[j];
- managed_pages += zone_managed_pages(upper_zone);
+ managed_pages += zone_managed_pages(upper_zone);
+
+ if (clear)
+ zone->lowmem_reserve[j] = 0;
+ else
zone->lowmem_reserve[j] = managed_pages / ratio;
- }
}
}
}
@@ -8148,11 +8407,19 @@ static void __setup_per_zone_wmarks(void)
*/
void setup_per_zone_wmarks(void)
{
+ struct zone *zone;
static DEFINE_SPINLOCK(lock);
spin_lock(&lock);
__setup_per_zone_wmarks();
spin_unlock(&lock);
+
+ /*
+ * The watermark size have changed so update the pcpu batch
+ * and high limits or the limits may be inappropriate.
+ */
+ for_each_zone(zone)
+ zone_pcp_update(zone, 0);
}
/*
@@ -8331,38 +8598,38 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
}
/*
- * percpu_pagelist_fraction - changes the pcp->high for each zone on each
- * cpu. It is the fraction of total pages in each zone that a hot per cpu
+ * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
+ * cpu. It is the fraction of total pages in each zone that a hot per cpu
* pagelist can have before it gets flushed back to buddy allocator.
*/
-int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
- void *buffer, size_t *length, loff_t *ppos)
+int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
+ int write, void *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
- int old_percpu_pagelist_fraction;
+ int old_percpu_pagelist_high_fraction;
int ret;
mutex_lock(&pcp_batch_high_lock);
- old_percpu_pagelist_fraction = percpu_pagelist_fraction;
+ old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (!write || ret < 0)
goto out;
/* Sanity checking to avoid pcp imbalance */
- if (percpu_pagelist_fraction &&
- percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
- percpu_pagelist_fraction = old_percpu_pagelist_fraction;
+ if (percpu_pagelist_high_fraction &&
+ percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
+ percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
ret = -EINVAL;
goto out;
}
/* No change? */
- if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
+ if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
goto out;
for_each_populated_zone(zone)
- zone_set_pageset_high_and_batch(zone);
+ zone_set_pageset_high_and_batch(zone, 0);
out:
mutex_unlock(&pcp_batch_high_lock);
return ret;
@@ -8717,7 +8984,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
lru_cache_enable();
if (ret < 0) {
- alloc_contig_dump_pages(&cc->migratepages);
+ if (ret == -EBUSY)
+ alloc_contig_dump_pages(&cc->migratepages);
putback_movable_pages(&cc->migratepages);
return ret;
}
@@ -8990,10 +9258,10 @@ EXPORT_SYMBOL(free_contig_range);
* The zone indicated has a new number of managed_pages; batch sizes and percpu
* page high values need to be recalculated.
*/
-void __meminit zone_pcp_update(struct zone *zone)
+void zone_pcp_update(struct zone *zone, int cpu_online)
{
mutex_lock(&pcp_batch_high_lock);
- zone_set_pageset_high_and_batch(zone);
+ zone_set_pageset_high_and_batch(zone, cpu_online);
mutex_unlock(&pcp_batch_high_lock);
}
@@ -9021,15 +9289,17 @@ void zone_pcp_enable(struct zone *zone)
void zone_pcp_reset(struct zone *zone)
{
int cpu;
- struct per_cpu_pageset *pset;
+ struct per_cpu_zonestat *pzstats;
- if (zone->pageset != &boot_pageset) {
+ if (zone->per_cpu_pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
- pset = per_cpu_ptr(zone->pageset, cpu);
- drain_zonestat(zone, pset);
+ pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
+ drain_zonestat(zone, pzstats);
}
- free_percpu(zone->pageset);
- zone->pageset = &boot_pageset;
+ free_percpu(zone->per_cpu_pageset);
+ free_percpu(zone->per_cpu_zonestats);
+ zone->per_cpu_pageset = &boot_pageset;
+ zone->per_cpu_zonestats = &boot_zonestats;
}
}
@@ -9158,6 +9428,8 @@ bool take_page_off_buddy(struct page *page)
del_page_from_free_list(page_head, zone, page_order);
break_down_buddy_pages(zone, page_head, page, 0,
page_order, migratetype);
+ if (!is_migrate_isolate(migratetype))
+ __mod_zone_freepage_state(zone, -1, migratetype);
ret = true;
break;
}
diff --git a/mm/page_ext.c b/mm/page_ext.c
index df6f74aac8e1..293b2685fc48 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -191,7 +191,7 @@ fail:
panic("Out of memory");
}
-#else /* CONFIG_FLAT_NODE_MEM_MAP */
+#else /* CONFIG_FLATMEM */
struct page_ext *lookup_page_ext(const struct page *page)
{
diff --git a/mm/page_owner.c b/mm/page_owner.c
index adfabb560eb9..f51a57e92aa3 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -392,7 +392,7 @@ err:
return -ENOMEM;
}
-void __dump_page_owner(struct page *page)
+void __dump_page_owner(const struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_owner *page_owner;
diff --git a/mm/page_reporting.c b/mm/page_reporting.c
index c50d93ffa252..382958eef8a9 100644
--- a/mm/page_reporting.c
+++ b/mm/page_reporting.c
@@ -4,12 +4,17 @@
#include <linux/page_reporting.h>
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include "page_reporting.h"
#include "internal.h"
+unsigned int page_reporting_order = MAX_ORDER;
+module_param(page_reporting_order, uint, 0644);
+MODULE_PARM_DESC(page_reporting_order, "Set page reporting order");
+
#define PAGE_REPORTING_DELAY (2 * HZ)
static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly;
@@ -31,8 +36,8 @@ __page_reporting_request(struct page_reporting_dev_info *prdev)
return;
/*
- * If reporting is already active there is nothing we need to do.
- * Test against 0 as that represents PAGE_REPORTING_IDLE.
+ * If reporting is already active there is nothing we need to do.
+ * Test against 0 as that represents PAGE_REPORTING_IDLE.
*/
state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED);
if (state != PAGE_REPORTING_IDLE)
@@ -229,7 +234,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
/* Generate minimum watermark to be able to guarantee progress */
watermark = low_wmark_pages(zone) +
- (PAGE_REPORTING_CAPACITY << PAGE_REPORTING_MIN_ORDER);
+ (PAGE_REPORTING_CAPACITY << page_reporting_order);
/*
* Cancel request if insufficient free memory or if we failed
@@ -239,7 +244,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
return err;
/* Process each free list starting from lowest order/mt */
- for (order = PAGE_REPORTING_MIN_ORDER; order < MAX_ORDER; order++) {
+ for (order = page_reporting_order; order < MAX_ORDER; order++) {
for (mt = 0; mt < MIGRATE_TYPES; mt++) {
/* We do not pull pages from the isolate free list */
if (is_migrate_isolate(mt))
@@ -324,6 +329,12 @@ int page_reporting_register(struct page_reporting_dev_info *prdev)
goto err_out;
}
+ /*
+ * Update the page reporting order if it's specified by driver.
+ * Otherwise, it falls back to @pageblock_order.
+ */
+ page_reporting_order = prdev->order ? : pageblock_order;
+
/* initialize state and work structures */
atomic_set(&prdev->state, PAGE_REPORTING_IDLE);
INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
diff --git a/mm/page_reporting.h b/mm/page_reporting.h
index 2c385dd4ddbd..c51dbc228b94 100644
--- a/mm/page_reporting.h
+++ b/mm/page_reporting.h
@@ -10,10 +10,9 @@
#include <linux/pgtable.h>
#include <linux/scatterlist.h>
-#define PAGE_REPORTING_MIN_ORDER pageblock_order
-
#ifdef CONFIG_PAGE_REPORTING
DECLARE_STATIC_KEY_FALSE(page_reporting_enabled);
+extern unsigned int page_reporting_order;
void __page_reporting_notify(void);
static inline bool page_reported(struct page *page)
@@ -38,7 +37,7 @@ static inline void page_reporting_notify_free(unsigned int order)
return;
/* Determine if we have crossed reporting threshold */
- if (order < PAGE_REPORTING_MIN_ORDER)
+ if (order < page_reporting_order)
return;
/* This will add a few cycles, but should be called infrequently */
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 2cf01d933f13..a4435311754b 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -116,6 +116,13 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
return pfn_is_match(pvmw->page, pfn);
}
+static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
+{
+ pvmw->address = (pvmw->address + size) & ~(size - 1);
+ if (!pvmw->address)
+ pvmw->address = ULONG_MAX;
+}
+
/**
* page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
* @pvmw->address
@@ -144,6 +151,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
{
struct mm_struct *mm = pvmw->vma->vm_mm;
struct page *page = pvmw->page;
+ unsigned long end;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
@@ -153,10 +161,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (pvmw->pmd && !pvmw->pte)
return not_found(pvmw);
- if (pvmw->pte)
- goto next_pte;
+ if (unlikely(PageHuge(page))) {
+ /* The only possible mapping was handled on last iteration */
+ if (pvmw->pte)
+ return not_found(pvmw);
- if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */
pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
if (!pvmw->pte)
@@ -168,78 +177,108 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return not_found(pvmw);
return true;
}
-restart:
- pgd = pgd_offset(mm, pvmw->address);
- if (!pgd_present(*pgd))
- return false;
- p4d = p4d_offset(pgd, pvmw->address);
- if (!p4d_present(*p4d))
- return false;
- pud = pud_offset(p4d, pvmw->address);
- if (!pud_present(*pud))
- return false;
- pvmw->pmd = pmd_offset(pud, pvmw->address);
+
/*
- * Make sure the pmd value isn't cached in a register by the
- * compiler and used as a stale value after we've observed a
- * subsequent update.
+ * Seek to next pte only makes sense for THP.
+ * But more important than that optimization, is to filter out
+ * any PageKsm page: whose page->index misleads vma_address()
+ * and vma_address_end() to disaster.
*/
- pmde = READ_ONCE(*pvmw->pmd);
- if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
- pvmw->ptl = pmd_lock(mm, pvmw->pmd);
- if (likely(pmd_trans_huge(*pvmw->pmd))) {
- if (pvmw->flags & PVMW_MIGRATION)
- return not_found(pvmw);
- if (pmd_page(*pvmw->pmd) != page)
- return not_found(pvmw);
- return true;
- } else if (!pmd_present(*pvmw->pmd)) {
- if (thp_migration_supported()) {
- if (!(pvmw->flags & PVMW_MIGRATION))
+ end = PageTransCompound(page) ?
+ vma_address_end(page, pvmw->vma) :
+ pvmw->address + PAGE_SIZE;
+ if (pvmw->pte)
+ goto next_pte;
+restart:
+ do {
+ pgd = pgd_offset(mm, pvmw->address);
+ if (!pgd_present(*pgd)) {
+ step_forward(pvmw, PGDIR_SIZE);
+ continue;
+ }
+ p4d = p4d_offset(pgd, pvmw->address);
+ if (!p4d_present(*p4d)) {
+ step_forward(pvmw, P4D_SIZE);
+ continue;
+ }
+ pud = pud_offset(p4d, pvmw->address);
+ if (!pud_present(*pud)) {
+ step_forward(pvmw, PUD_SIZE);
+ continue;
+ }
+
+ pvmw->pmd = pmd_offset(pud, pvmw->address);
+ /*
+ * Make sure the pmd value isn't cached in a register by the
+ * compiler and used as a stale value after we've observed a
+ * subsequent update.
+ */
+ pmde = READ_ONCE(*pvmw->pmd);
+
+ if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+ pmde = *pvmw->pmd;
+ if (likely(pmd_trans_huge(pmde))) {
+ if (pvmw->flags & PVMW_MIGRATION)
return not_found(pvmw);
- if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
- swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
+ if (pmd_page(pmde) != page)
+ return not_found(pvmw);
+ return true;
+ }
+ if (!pmd_present(pmde)) {
+ swp_entry_t entry;
- if (migration_entry_to_page(entry) != page)
- return not_found(pvmw);
- return true;
- }
+ if (!thp_migration_supported() ||
+ !(pvmw->flags & PVMW_MIGRATION))
+ return not_found(pvmw);
+ entry = pmd_to_swp_entry(pmde);
+ if (!is_migration_entry(entry) ||
+ migration_entry_to_page(entry) != page)
+ return not_found(pvmw);
+ return true;
}
- return not_found(pvmw);
- } else {
/* THP pmd was split under us: handle on pte level */
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
+ } else if (!pmd_present(pmde)) {
+ /*
+ * If PVMW_SYNC, take and drop THP pmd lock so that we
+ * cannot return prematurely, while zap_huge_pmd() has
+ * cleared *pmd but not decremented compound_mapcount().
+ */
+ if ((pvmw->flags & PVMW_SYNC) &&
+ PageTransCompound(page)) {
+ spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+
+ spin_unlock(ptl);
+ }
+ step_forward(pvmw, PMD_SIZE);
+ continue;
}
- } else if (!pmd_present(pmde)) {
- return false;
- }
- if (!map_pte(pvmw))
- goto next_pte;
- while (1) {
+ if (!map_pte(pvmw))
+ goto next_pte;
+this_pte:
if (check_pte(pvmw))
return true;
next_pte:
- /* Seek to next pte only makes sense for THP */
- if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
- return not_found(pvmw);
do {
pvmw->address += PAGE_SIZE;
- if (pvmw->address >= pvmw->vma->vm_end ||
- pvmw->address >=
- __vma_address(pvmw->page, pvmw->vma) +
- thp_size(pvmw->page))
+ if (pvmw->address >= end)
return not_found(pvmw);
/* Did we cross page table boundary? */
- if (pvmw->address % PMD_SIZE == 0) {
- pte_unmap(pvmw->pte);
+ if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
if (pvmw->ptl) {
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
}
+ pte_unmap(pvmw->pte);
+ pvmw->pte = NULL;
goto restart;
- } else {
- pvmw->pte++;
+ }
+ pvmw->pte++;
+ if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
+ pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
+ spin_lock(pvmw->ptl);
}
} while (pte_none(*pvmw->pte));
@@ -247,7 +286,10 @@ next_pte:
pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
spin_lock(pvmw->ptl);
}
- }
+ goto this_pte;
+ } while (pvmw->address < end);
+
+ return false;
}
/**
@@ -266,14 +308,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.vma = vma,
.flags = PVMW_SYNC,
};
- unsigned long start, end;
-
- start = __vma_address(page, vma);
- end = start + thp_size(page) - PAGE_SIZE;
- if (unlikely(end < vma->vm_start || start >= vma->vm_end))
+ pvmw.address = vma_address(page, vma);
+ if (pvmw.address == -EFAULT)
return 0;
- pvmw.address = max(start, vma->vm_start);
if (!page_vma_mapped_walk(&pvmw))
return 0;
page_vma_mapped_walk_done(&pvmw);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index e81640d9f177..9b3db11a4d1d 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -58,6 +58,45 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return err;
}
+#ifdef CONFIG_ARCH_HAS_HUGEPD
+static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk, int pdshift)
+{
+ int err = 0;
+ const struct mm_walk_ops *ops = walk->ops;
+ int shift = hugepd_shift(*phpd);
+ int page_size = 1 << shift;
+
+ if (!ops->pte_entry)
+ return 0;
+
+ if (addr & (page_size - 1))
+ return 0;
+
+ for (;;) {
+ pte_t *pte;
+
+ spin_lock(&walk->mm->page_table_lock);
+ pte = hugepte_offset(*phpd, addr, pdshift);
+ err = ops->pte_entry(pte, addr, addr + page_size, walk);
+ spin_unlock(&walk->mm->page_table_lock);
+
+ if (err)
+ break;
+ if (addr >= end - page_size)
+ break;
+ addr += page_size;
+ }
+ return err;
+}
+#else
+static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk, int pdshift)
+{
+ return 0;
+}
+#endif
+
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
@@ -108,7 +147,10 @@ again:
goto again;
}
- err = walk_pte_range(pmd, addr, next, walk);
+ if (is_hugepd(__hugepd(pmd_val(*pmd))))
+ err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT);
+ else
+ err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
} while (pmd++, addr = next, addr != end);
@@ -157,7 +199,10 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
if (pud_none(*pud))
goto again;
- err = walk_pmd_range(pud, addr, next, walk);
+ if (is_hugepd(__hugepd(pud_val(*pud))))
+ err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT);
+ else
+ err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
} while (pud++, addr = next, addr != end);
@@ -189,7 +234,9 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
if (err)
break;
}
- if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
+ if (is_hugepd(__hugepd(p4d_val(*p4d))))
+ err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT);
+ else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
err = walk_pud_range(p4d, addr, next, walk);
if (err)
break;
@@ -224,8 +271,9 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
if (err)
break;
}
- if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
- ops->pte_entry)
+ if (is_hugepd(__hugepd(pgd_val(*pgd))))
+ err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT);
+ else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry)
err = walk_p4d_range(pgd, addr, next, walk);
if (err)
break;
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index c2210e1cdb51..4e640baf9794 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -135,9 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
{
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(!pmd_present(*pmdp));
- /* Below assumes pmd_present() is true */
- VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+ VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+ !pmd_devmap(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
diff --git a/mm/rmap.c b/mm/rmap.c
index 693a610e181d..e05c300048e6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -707,7 +707,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
- unsigned long address;
if (PageAnon(page)) {
struct anon_vma *page__anon_vma = page_anon_vma(page);
/*
@@ -717,15 +716,13 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
if (!vma->anon_vma || !page__anon_vma ||
vma->anon_vma->root != page__anon_vma->root)
return -EFAULT;
- } else if (page->mapping) {
- if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
- return -EFAULT;
- } else
+ } else if (!vma->vm_file) {
return -EFAULT;
- address = __vma_address(page, vma);
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+ } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
return -EFAULT;
- return address;
+ }
+
+ return vma_address(page, vma);
}
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
@@ -919,7 +916,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, vma, vma->vm_mm, address,
- min(vma->vm_end, address + page_size(page)));
+ vma_address_end(page, vma));
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
@@ -1405,6 +1402,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ /*
+ * When racing against e.g. zap_pte_range() on another cpu,
+ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * try_to_unmap() may return false when it is about to become true,
+ * if page table locking is skipped: use TTU_SYNC to wait for that.
+ */
+ if (flags & TTU_SYNC)
+ pvmw.flags = PVMW_SYNC;
+
/* munlock has nothing to gain from examining un-locked vmas */
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
return true;
@@ -1426,9 +1432,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
+ range.end = PageKsm(page) ?
+ address + PAGE_SIZE : vma_address_end(page, vma);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
- address,
- min(vma->vm_end, address + page_size(page)));
+ address, range.end);
if (PageHuge(page)) {
/*
* If sharing is possible, start and end will be adjusted
@@ -1777,7 +1784,13 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
else
rmap_walk(page, &rwc);
- return !page_mapcount(page) ? true : false;
+ /*
+ * When racing against e.g. zap_pte_range() on another cpu,
+ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * try_to_unmap() may return false when it is about to become true,
+ * if page table locking is skipped: use TTU_SYNC to wait for that.
+ */
+ return !page_mapcount(page);
}
/**
@@ -1874,6 +1887,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
+ VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
@@ -1928,6 +1942,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);
+ VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
diff --git a/mm/shmem.c b/mm/shmem.c
index 5d46611cba8d..6268b9b4e41a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1695,8 +1695,9 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
- struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
- struct page *page;
+ struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
+ struct swap_info_struct *si;
+ struct page *page = NULL;
swp_entry_t swap;
int error;
@@ -1704,6 +1705,12 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
swap = radix_to_swp_entry(*pagep);
*pagep = NULL;
+ /* Prevent swapoff from happening to us. */
+ si = get_swap_device(swap);
+ if (!si) {
+ error = EINVAL;
+ goto failed;
+ }
/* Look it up and read it in.. */
page = lookup_swap_cache(swap, NULL, 0);
if (!page) {
@@ -1765,6 +1772,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
swap_free(swap);
*pagep = page;
+ if (si)
+ put_swap_device(si);
return 0;
failed:
if (!shmem_confirm_swap(mapping, index, swap))
@@ -1775,6 +1784,9 @@ unlock:
put_page(page);
}
+ if (si)
+ put_swap_device(si);
+
return error;
}
@@ -1816,7 +1828,7 @@ repeat:
}
sbinfo = SHMEM_SB(inode->i_sb);
- charge_mm = vma ? vma->vm_mm : current->mm;
+ charge_mm = vma ? vma->vm_mm : NULL;
page = pagecache_get_page(mapping, index,
FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
@@ -2227,7 +2239,7 @@ static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
}
#endif
-int shmem_lock(struct file *file, int lock, struct user_struct *user)
+int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
{
struct inode *inode = file_inode(file);
struct shmem_inode_info *info = SHMEM_I(inode);
@@ -2239,13 +2251,13 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
* no serialization needed when called from shm_destroy().
*/
if (lock && !(info->flags & VM_LOCKED)) {
- if (!user_shm_lock(inode->i_size, user))
+ if (!user_shm_lock(inode->i_size, ucounts))
goto out_nomem;
info->flags |= VM_LOCKED;
mapping_set_unevictable(file->f_mapping);
}
- if (!lock && (info->flags & VM_LOCKED) && user) {
- user_shm_unlock(inode->i_size, user);
+ if (!lock && (info->flags & VM_LOCKED) && ucounts) {
+ user_shm_unlock(inode->i_size, ucounts);
info->flags &= ~VM_LOCKED;
mapping_clear_unevictable(file->f_mapping);
}
@@ -4092,7 +4104,7 @@ int shmem_unuse(unsigned int type, bool frontswap,
return 0;
}
-int shmem_lock(struct file *file, int lock, struct user_struct *user)
+int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
{
return 0;
}
diff --git a/mm/slab.h b/mm/slab.h
index 18c1927cd196..7b60ef2f32c3 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -215,6 +215,7 @@ DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
+long validate_slab_cache(struct kmem_cache *s);
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{
@@ -239,6 +240,8 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
#ifdef CONFIG_MEMCG_KMEM
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp, bool new_page);
+void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
+ enum node_stat_item idx, int nr);
static inline void memcg_free_page_obj_cgroups(struct page *page)
{
@@ -283,20 +286,6 @@ static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
return true;
}
-static inline void mod_objcg_state(struct obj_cgroup *objcg,
- struct pglist_data *pgdat,
- enum node_stat_item idx, int nr)
-{
- struct mem_cgroup *memcg;
- struct lruvec *lruvec;
-
- rcu_read_lock();
- memcg = obj_cgroup_memcg(objcg);
- lruvec = mem_cgroup_lruvec(memcg, pgdat);
- mod_memcg_lruvec_state(lruvec, idx, nr);
- rcu_read_unlock();
-}
-
static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
struct obj_cgroup *objcg,
gfp_t flags, size_t size,
@@ -309,7 +298,6 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
if (!memcg_kmem_enabled() || !objcg)
return;
- flags &= ~__GFP_ACCOUNT;
for (i = 0; i < size; i++) {
if (likely(p[i])) {
page = virt_to_head_page(p[i]);
@@ -630,6 +618,12 @@ static inline bool slab_want_init_on_free(struct kmem_cache *c)
return false;
}
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
+void debugfs_slab_release(struct kmem_cache *);
+#else
+static inline void debugfs_slab_release(struct kmem_cache *s) { }
+#endif
+
#ifdef CONFIG_PRINTK
#define KS_ADDRS_COUNT 16
struct kmem_obj_info {
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a4a571428c51..c126e6f6b5a5 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -97,8 +97,7 @@ EXPORT_SYMBOL(kmem_cache_size);
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, unsigned int size)
{
- if (!name || in_interrupt() || size < sizeof(void *) ||
- size > KMALLOC_MAX_SIZE) {
+ if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
pr_err("kmem_cache_create(%s) integrity check failed\n", name);
return -EINVAL;
}
@@ -378,11 +377,11 @@ out_unlock:
if (err) {
if (flags & SLAB_PANIC)
- panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
- name, err);
+ panic("%s: Failed to create slab '%s'. Error %d\n",
+ __func__, name, err);
else {
- pr_warn("kmem_cache_create(%s) failed with error %d\n",
- name, err);
+ pr_warn("%s(%s) failed with error %d\n",
+ __func__, name, err);
dump_stack();
}
return NULL;
@@ -449,6 +448,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier();
list_for_each_entry_safe(s, s2, &to_destroy, list) {
+ debugfs_slab_release(s);
kfence_shutdown_cache(s);
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_release(s);
@@ -476,6 +476,7 @@ static int shutdown_cache(struct kmem_cache *s)
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
kfence_shutdown_cache(s);
+ debugfs_slab_release(s);
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_unlink(s);
sysfs_slab_release(s);
@@ -509,8 +510,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
err = shutdown_cache(s);
if (err) {
- pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
- s->name);
+ pr_err("%s %s: Slab cache still has objects\n",
+ __func__, s->name);
dump_stack();
}
out_unlock:
@@ -737,26 +738,30 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
}
#ifdef CONFIG_ZONE_DMA
-#define INIT_KMALLOC_INFO(__size, __short_size) \
-{ \
- .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
- .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
- .name[KMALLOC_DMA] = "dma-kmalloc-" #__short_size, \
- .size = __size, \
-}
+#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
#else
+#define KMALLOC_DMA_NAME(sz)
+#endif
+
+#ifdef CONFIG_MEMCG_KMEM
+#define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
+#else
+#define KMALLOC_CGROUP_NAME(sz)
+#endif
+
#define INIT_KMALLOC_INFO(__size, __short_size) \
{ \
.name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
+ KMALLOC_CGROUP_NAME(__short_size) \
+ KMALLOC_DMA_NAME(__short_size) \
.size = __size, \
}
-#endif
/*
* kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
- * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
- * kmalloc-67108864.
+ * kmalloc_index() supports up to 2^25=32MB, so the final entry of the table is
+ * kmalloc-32M.
*/
const struct kmalloc_info_struct kmalloc_info[] __initconst = {
INIT_KMALLOC_INFO(0, 0),
@@ -784,8 +789,7 @@ const struct kmalloc_info_struct kmalloc_info[] __initconst = {
INIT_KMALLOC_INFO(4194304, 4M),
INIT_KMALLOC_INFO(8388608, 8M),
INIT_KMALLOC_INFO(16777216, 16M),
- INIT_KMALLOC_INFO(33554432, 32M),
- INIT_KMALLOC_INFO(67108864, 64M)
+ INIT_KMALLOC_INFO(33554432, 32M)
};
/*
@@ -838,13 +842,27 @@ void __init setup_kmalloc_cache_index_table(void)
static void __init
new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
{
- if (type == KMALLOC_RECLAIM)
+ if (type == KMALLOC_RECLAIM) {
flags |= SLAB_RECLAIM_ACCOUNT;
+ } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
+ if (cgroup_memory_nokmem) {
+ kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
+ return;
+ }
+ flags |= SLAB_ACCOUNT;
+ }
kmalloc_caches[type][idx] = create_kmalloc_cache(
kmalloc_info[idx].name[type],
kmalloc_info[idx].size, flags, 0,
kmalloc_info[idx].size);
+
+ /*
+ * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
+ * KMALLOC_NORMAL caches.
+ */
+ if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
+ kmalloc_caches[type][idx]->refcount = -1;
}
/*
@@ -857,6 +875,9 @@ void __init create_kmalloc_caches(slab_flags_t flags)
int i;
enum kmalloc_cache_type type;
+ /*
+ * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
+ */
for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
if (!kmalloc_caches[type][i])
diff --git a/mm/slub.c b/mm/slub.c
index 3f96e099817a..3bc8b940c933 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
+#include <linux/swab.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "slab.h"
@@ -35,7 +36,9 @@
#include <linux/prefetch.h>
#include <linux/memcontrol.h>
#include <linux/random.h>
+#include <kunit/test.h>
+#include <linux/debugfs.h>
#include <trace/events/kmem.h>
#include "internal.h"
@@ -116,12 +119,26 @@
*/
#ifdef CONFIG_SLUB_DEBUG
+
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
-#endif
+
+static inline bool __slub_debug_enabled(void)
+{
+ return static_branch_unlikely(&slub_debug_enabled);
+}
+
+#else /* CONFIG_SLUB_DEBUG */
+
+static inline bool __slub_debug_enabled(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SLUB_DEBUG */
static inline bool kmem_cache_debug(struct kmem_cache *s)
{
@@ -153,9 +170,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
* - Variable sizing of the per node arrays
*/
-/* Enable to test recovery from slab corruption on boot */
-#undef SLUB_RESILIENCY_TEST
-
/* Enable to log cmpxchg failures */
#undef SLUB_DEBUG_CMPXCHG
@@ -225,6 +239,12 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
#endif
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
+static void debugfs_slab_add(struct kmem_cache *);
+#else
+static inline void debugfs_slab_add(struct kmem_cache *s) { }
+#endif
+
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
@@ -448,6 +468,26 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
static DEFINE_SPINLOCK(object_map_lock);
+#if IS_ENABLED(CONFIG_KUNIT)
+static bool slab_add_kunit_errors(void)
+{
+ struct kunit_resource *resource;
+
+ if (likely(!current->kunit_test))
+ return false;
+
+ resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
+ if (!resource)
+ return false;
+
+ (*(int *)resource->data)++;
+ kunit_put_resource(resource);
+ return true;
+}
+#else
+static inline bool slab_add_kunit_errors(void) { return false; }
+#endif
+
/*
* Determine a map of object in use on a page.
*
@@ -668,16 +708,18 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
pr_err("=============================================================================\n");
pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
pr_err("-----------------------------------------------------------------------------\n\n");
-
- add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
va_end(args);
}
+__printf(2, 3)
static void slab_fix(struct kmem_cache *s, char *fmt, ...)
{
struct va_format vaf;
va_list args;
+ if (slab_add_kunit_errors())
+ return;
+
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
@@ -712,15 +754,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
p, p - addr, get_freepointer(s, p));
if (s->flags & SLAB_RED_ZONE)
- print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
s->red_left_pad);
else if (p > addr + 16)
print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
- print_section(KERN_ERR, "Object ", p,
+ print_section(KERN_ERR, "Object ", p,
min_t(unsigned int, s->object_size, PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
- print_section(KERN_ERR, "Redzone ", p + s->object_size,
+ print_section(KERN_ERR, "Redzone ", p + s->object_size,
s->inuse - s->object_size);
off = get_info_end(s);
@@ -732,7 +774,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section(KERN_ERR, "Padding ", p + off,
+ print_section(KERN_ERR, "Padding ", p + off,
size_from_object(s) - off);
dump_stack();
@@ -741,8 +783,12 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason)
{
+ if (slab_add_kunit_errors())
+ return;
+
slab_bug(s, "%s", reason);
print_trailer(s, page, object);
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
@@ -751,12 +797,16 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
va_list args;
char buf[100];
+ if (slab_add_kunit_errors())
+ return;
+
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
slab_bug(s, "%s", buf);
print_page_info(page);
dump_stack();
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
static void init_object(struct kmem_cache *s, void *object, u8 val)
@@ -778,7 +828,7 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
void *from, void *to)
{
- slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
+ slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
memset(from, data, to - from);
}
@@ -800,12 +850,17 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
while (end > fault && end[-1] == value)
end--;
+ if (slab_add_kunit_errors())
+ goto skip_bug_print;
+
slab_bug(s, "%s overwritten", what);
pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
fault, end - 1, fault - addr,
fault[0], value);
print_trailer(s, page, object);
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+skip_bug_print:
restore_bytes(s, what, value, fault, end);
return 0;
}
@@ -909,11 +964,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
u8 *endobject = object + s->object_size;
if (s->flags & SLAB_RED_ZONE) {
- if (!check_bytes_and_report(s, page, object, "Redzone",
+ if (!check_bytes_and_report(s, page, object, "Left Redzone",
object - s->red_left_pad, val, s->red_left_pad))
return 0;
- if (!check_bytes_and_report(s, page, object, "Redzone",
+ if (!check_bytes_and_report(s, page, object, "Right Redzone",
endobject, val, s->inuse - s->object_size))
return 0;
} else {
@@ -928,7 +983,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->object_size - 1) ||
- !check_bytes_and_report(s, page, p, "Poison",
+ !check_bytes_and_report(s, page, p, "End Poison",
p + s->object_size - 1, POISON_END, 1)))
return 0;
/*
@@ -1027,13 +1082,13 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
page->objects, max_objects);
page->objects = max_objects;
- slab_fix(s, "Number of objects adjusted.");
+ slab_fix(s, "Number of objects adjusted");
}
if (page->inuse != page->objects - nr) {
slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
page->inuse, page->objects - nr);
page->inuse = page->objects - nr;
- slab_fix(s, "Object count adjusted.");
+ slab_fix(s, "Object count adjusted");
}
return search == NULL;
}
@@ -1397,6 +1452,8 @@ static int __init setup_slub_debug(char *str)
out:
if (slub_debug != 0 || slub_debug_string)
static_branch_enable(&slub_debug_enabled);
+ else
+ static_branch_disable(&slub_debug_enabled);
if ((static_branch_unlikely(&init_on_alloc) ||
static_branch_unlikely(&init_on_free)) &&
(slub_debug & SLAB_POISON))
@@ -3689,7 +3746,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
- unsigned int freepointer_area;
unsigned int order;
/*
@@ -3698,13 +3754,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* the possible location of the free pointer.
*/
size = ALIGN(size, sizeof(void *));
- /*
- * This is the area of the object where a freepointer can be
- * safely written. If redzoning adds more to the inuse size, we
- * can't use that portion for writing the freepointer, so
- * s->offset must be limited within this for the general case.
- */
- freepointer_area = size;
#ifdef CONFIG_SLUB_DEBUG
/*
@@ -3730,19 +3779,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
/*
* With that we have determined the number of bytes in actual use
- * by the object. This is the potential offset to the free pointer.
+ * by the object and redzoning.
*/
s->inuse = size;
- if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
- s->ctor)) {
+ if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+ ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
+ s->ctor) {
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
* kmem_cache_free.
*
* This is the case if we do RCU, have a constructor or
- * destructor or are poisoning the objects.
+ * destructor, are poisoning the objects, or are
+ * redzoning an object smaller than sizeof(void *).
*
* The assumption that s->offset >= s->inuse means free
* pointer is outside of the object is used in the
@@ -3751,13 +3802,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/
s->offset = size;
size += sizeof(void *);
- } else if (freepointer_area > sizeof(void *)) {
+ } else {
/*
* Store freelist pointer near middle of object to keep
* it away from the edges of the object to avoid small
* sized over/underflows from neighboring allocations.
*/
- s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
+ s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
}
#ifdef CONFIG_SLUB_DEBUG
@@ -4458,6 +4509,10 @@ void __init kmem_cache_init(void)
if (debug_guardpage_minorder())
slub_max_order = 0;
+ /* Print slub debugging pointers without hashing */
+ if (__slub_debug_enabled())
+ no_hash_pointers_enable(NULL);
+
kmem_cache_node = &boot_kmem_cache_node;
kmem_cache = &boot_kmem_cache;
@@ -4546,6 +4601,9 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
if (err)
__kmem_cache_release(s);
+ if (s->flags & SLAB_STORE_USER)
+ debugfs_slab_add(s);
+
return err;
}
@@ -4654,9 +4712,11 @@ static int validate_slab_node(struct kmem_cache *s,
validate_slab(s, page);
count++;
}
- if (count != n->nr_partial)
+ if (count != n->nr_partial) {
pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
s->name, count, n->nr_partial);
+ slab_add_kunit_errors();
+ }
if (!(s->flags & SLAB_STORE_USER))
goto out;
@@ -4665,16 +4725,18 @@ static int validate_slab_node(struct kmem_cache *s,
validate_slab(s, page);
count++;
}
- if (count != atomic_long_read(&n->nr_slabs))
+ if (count != atomic_long_read(&n->nr_slabs)) {
pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
s->name, count, atomic_long_read(&n->nr_slabs));
+ slab_add_kunit_errors();
+ }
out:
spin_unlock_irqrestore(&n->list_lock, flags);
return count;
}
-static long validate_slab_cache(struct kmem_cache *s)
+long validate_slab_cache(struct kmem_cache *s)
{
int node;
unsigned long count = 0;
@@ -4686,6 +4748,9 @@ static long validate_slab_cache(struct kmem_cache *s)
return count;
}
+EXPORT_SYMBOL(validate_slab_cache);
+
+#ifdef CONFIG_DEBUG_FS
/*
* Generate lists of code addresses where slabcache objects are allocated
* and freed.
@@ -4709,6 +4774,8 @@ struct loc_track {
struct location *loc;
};
+static struct dentry *slab_debugfs_root;
+
static void free_loc_track(struct loc_track *t)
{
if (t->max)
@@ -4825,144 +4892,9 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
add_location(t, s, get_track(s, p, alloc));
put_map(map);
}
-
-static int list_locations(struct kmem_cache *s, char *buf,
- enum track_item alloc)
-{
- int len = 0;
- unsigned long i;
- struct loc_track t = { 0, 0, NULL };
- int node;
- struct kmem_cache_node *n;
-
- if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
- GFP_KERNEL)) {
- return sysfs_emit(buf, "Out of memory\n");
- }
- /* Push back cpu slabs */
- flush_all(s);
-
- for_each_kmem_cache_node(s, node, n) {
- unsigned long flags;
- struct page *page;
-
- if (!atomic_long_read(&n->nr_slabs))
- continue;
-
- spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, slab_list)
- process_slab(&t, s, page, alloc);
- list_for_each_entry(page, &n->full, slab_list)
- process_slab(&t, s, page, alloc);
- spin_unlock_irqrestore(&n->list_lock, flags);
- }
-
- for (i = 0; i < t.count; i++) {
- struct location *l = &t.loc[i];
-
- len += sysfs_emit_at(buf, len, "%7ld ", l->count);
-
- if (l->addr)
- len += sysfs_emit_at(buf, len, "%pS", (void *)l->addr);
- else
- len += sysfs_emit_at(buf, len, "<not-available>");
-
- if (l->sum_time != l->min_time)
- len += sysfs_emit_at(buf, len, " age=%ld/%ld/%ld",
- l->min_time,
- (long)div_u64(l->sum_time,
- l->count),
- l->max_time);
- else
- len += sysfs_emit_at(buf, len, " age=%ld", l->min_time);
-
- if (l->min_pid != l->max_pid)
- len += sysfs_emit_at(buf, len, " pid=%ld-%ld",
- l->min_pid, l->max_pid);
- else
- len += sysfs_emit_at(buf, len, " pid=%ld",
- l->min_pid);
-
- if (num_online_cpus() > 1 &&
- !cpumask_empty(to_cpumask(l->cpus)))
- len += sysfs_emit_at(buf, len, " cpus=%*pbl",
- cpumask_pr_args(to_cpumask(l->cpus)));
-
- if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
- len += sysfs_emit_at(buf, len, " nodes=%*pbl",
- nodemask_pr_args(&l->nodes));
-
- len += sysfs_emit_at(buf, len, "\n");
- }
-
- free_loc_track(&t);
- if (!t.count)
- len += sysfs_emit_at(buf, len, "No data\n");
-
- return len;
-}
+#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_SLUB_DEBUG */
-#ifdef SLUB_RESILIENCY_TEST
-static void __init resiliency_test(void)
-{
- u8 *p;
- int type = KMALLOC_NORMAL;
-
- BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
-
- pr_err("SLUB resiliency testing\n");
- pr_err("-----------------------\n");
- pr_err("A. Corruption after allocation\n");
-
- p = kzalloc(16, GFP_KERNEL);
- p[16] = 0x12;
- pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
- p + 16);
-
- validate_slab_cache(kmalloc_caches[type][4]);
-
- /* Hmmm... The next two are dangerous */
- p = kzalloc(32, GFP_KERNEL);
- p[32 + sizeof(void *)] = 0x34;
- pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
- p);
- pr_err("If allocated object is overwritten then not detectable\n\n");
-
- validate_slab_cache(kmalloc_caches[type][5]);
- p = kzalloc(64, GFP_KERNEL);
- p += 64 + (get_cycles() & 0xff) * sizeof(void *);
- *p = 0x56;
- pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
- p);
- pr_err("If allocated object is overwritten then not detectable\n\n");
- validate_slab_cache(kmalloc_caches[type][6]);
-
- pr_err("\nB. Corruption after free\n");
- p = kzalloc(128, GFP_KERNEL);
- kfree(p);
- *p = 0x78;
- pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
- validate_slab_cache(kmalloc_caches[type][7]);
-
- p = kzalloc(256, GFP_KERNEL);
- kfree(p);
- p[50] = 0x9a;
- pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
- validate_slab_cache(kmalloc_caches[type][8]);
-
- p = kzalloc(512, GFP_KERNEL);
- kfree(p);
- p[512] = 0xab;
- pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
- validate_slab_cache(kmalloc_caches[type][9]);
-}
-#else
-#ifdef CONFIG_SYSFS
-static void resiliency_test(void) {};
-#endif
-#endif /* SLUB_RESILIENCY_TEST */
-
#ifdef CONFIG_SYSFS
enum slab_stat_type {
SL_ALL, /* All slabs */
@@ -5350,21 +5282,6 @@ static ssize_t validate_store(struct kmem_cache *s,
}
SLAB_ATTR(validate);
-static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
-{
- if (!(s->flags & SLAB_STORE_USER))
- return -ENOSYS;
- return list_locations(s, buf, TRACK_ALLOC);
-}
-SLAB_ATTR_RO(alloc_calls);
-
-static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
-{
- if (!(s->flags & SLAB_STORE_USER))
- return -ENOSYS;
- return list_locations(s, buf, TRACK_FREE);
-}
-SLAB_ATTR_RO(free_calls);
#endif /* CONFIG_SLUB_DEBUG */
#ifdef CONFIG_FAILSLAB
@@ -5528,8 +5445,6 @@ static struct attribute *slab_attrs[] = {
&poison_attr.attr,
&store_user_attr.attr,
&validate_attr.attr,
- &alloc_calls_attr.attr,
- &free_calls_attr.attr,
#endif
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
@@ -5811,13 +5726,179 @@ static int __init slab_sysfs_init(void)
}
mutex_unlock(&slab_mutex);
- resiliency_test();
return 0;
}
__initcall(slab_sysfs_init);
#endif /* CONFIG_SYSFS */
+#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
+static int slab_debugfs_show(struct seq_file *seq, void *v)
+{
+
+ struct location *l;
+ unsigned int idx = *(unsigned int *)v;
+ struct loc_track *t = seq->private;
+
+ if (idx < t->count) {
+ l = &t->loc[idx];
+
+ seq_printf(seq, "%7ld ", l->count);
+
+ if (l->addr)
+ seq_printf(seq, "%pS", (void *)l->addr);
+ else
+ seq_puts(seq, "<not-available>");
+
+ if (l->sum_time != l->min_time) {
+ seq_printf(seq, " age=%ld/%llu/%ld",
+ l->min_time, div_u64(l->sum_time, l->count),
+ l->max_time);
+ } else
+ seq_printf(seq, " age=%ld", l->min_time);
+
+ if (l->min_pid != l->max_pid)
+ seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
+ else
+ seq_printf(seq, " pid=%ld",
+ l->min_pid);
+
+ if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
+ seq_printf(seq, " cpus=%*pbl",
+ cpumask_pr_args(to_cpumask(l->cpus)));
+
+ if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
+ seq_printf(seq, " nodes=%*pbl",
+ nodemask_pr_args(&l->nodes));
+
+ seq_puts(seq, "\n");
+ }
+
+ if (!idx && !t->count)
+ seq_puts(seq, "No data\n");
+
+ return 0;
+}
+
+static void slab_debugfs_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
+{
+ struct loc_track *t = seq->private;
+
+ v = ppos;
+ ++*ppos;
+ if (*ppos <= t->count)
+ return v;
+
+ return NULL;
+}
+
+static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
+{
+ return ppos;
+}
+
+static const struct seq_operations slab_debugfs_sops = {
+ .start = slab_debugfs_start,
+ .next = slab_debugfs_next,
+ .stop = slab_debugfs_stop,
+ .show = slab_debugfs_show,
+};
+
+static int slab_debug_trace_open(struct inode *inode, struct file *filep)
+{
+
+ struct kmem_cache_node *n;
+ enum track_item alloc;
+ int node;
+ struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
+ sizeof(struct loc_track));
+ struct kmem_cache *s = file_inode(filep)->i_private;
+
+ if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
+ alloc = TRACK_ALLOC;
+ else
+ alloc = TRACK_FREE;
+
+ if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
+ return -ENOMEM;
+
+ /* Push back cpu slabs */
+ flush_all(s);
+
+ for_each_kmem_cache_node(s, node, n) {
+ unsigned long flags;
+ struct page *page;
+
+ if (!atomic_long_read(&n->nr_slabs))
+ continue;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, slab_list)
+ process_slab(t, s, page, alloc);
+ list_for_each_entry(page, &n->full, slab_list)
+ process_slab(t, s, page, alloc);
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ }
+
+ return 0;
+}
+
+static int slab_debug_trace_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct loc_track *t = seq->private;
+
+ free_loc_track(t);
+ return seq_release_private(inode, file);
+}
+
+static const struct file_operations slab_debugfs_fops = {
+ .open = slab_debug_trace_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = slab_debug_trace_release,
+};
+
+static void debugfs_slab_add(struct kmem_cache *s)
+{
+ struct dentry *slab_cache_dir;
+
+ if (unlikely(!slab_debugfs_root))
+ return;
+
+ slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
+
+ debugfs_create_file("alloc_traces", 0400,
+ slab_cache_dir, s, &slab_debugfs_fops);
+
+ debugfs_create_file("free_traces", 0400,
+ slab_cache_dir, s, &slab_debugfs_fops);
+}
+
+void debugfs_slab_release(struct kmem_cache *s)
+{
+ debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
+}
+
+static int __init slab_debugfs_init(void)
+{
+ struct kmem_cache *s;
+
+ slab_debugfs_root = debugfs_create_dir("slab", NULL);
+
+ list_for_each_entry(s, &slab_caches, list)
+ if (s->flags & SLAB_STORE_USER)
+ debugfs_slab_add(s);
+
+ return 0;
+
+}
+__initcall(slab_debugfs_init);
+#endif
/*
* The /proc/slabinfo ABI
*/
diff --git a/mm/sparse.c b/mm/sparse.c
index b2ada9dc00cb..7272f7a1449d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -344,6 +344,15 @@ size_t mem_section_usage_size(void)
return sizeof(struct mem_section_usage) + usemap_size();
}
+static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat)
+{
+#ifndef CONFIG_NUMA
+ return __pa_symbol(pgdat);
+#else
+ return __pa(pgdat);
+#endif
+}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
static struct mem_section_usage * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
@@ -362,7 +371,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
* from the same section as the pgdat where possible to avoid
* this problem.
*/
- goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+ goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
limit = goal + (1UL << PA_SECTION_SHIFT);
nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
@@ -390,7 +399,7 @@ static void __init check_usemap_section_nr(int nid,
}
usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
- pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+ pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT);
if (usemap_snr == pgdat_snr)
return;
diff --git a/mm/swap.c b/mm/swap.c
index dfb48cf9c2c9..6c11db780467 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -95,7 +95,7 @@ static void __put_single_page(struct page *page)
{
__page_cache_release(page);
mem_cgroup_uncharge(page);
- free_unref_page(page);
+ free_unref_page(page, 0);
}
static void __put_compound_page(struct page *page)
@@ -313,7 +313,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
void lru_note_cost_page(struct page *page)
{
- lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
+ lru_note_cost(mem_cgroup_page_lruvec(page),
page_is_file_lru(page), thp_nr_pages(page));
}
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 6248d1030a9b..a66f3e0ec973 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -43,8 +43,6 @@ static DEFINE_MUTEX(swap_slots_cache_mutex);
static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
static void __drain_swap_slots_cache(unsigned int type);
-static void deactivate_swap_slots_cache(void);
-static void reactivate_swap_slots_cache(void);
#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
#define SLOTS_CACHE 0x1
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 272ea2108c9d..c56aa9ac050d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -114,8 +114,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
SetPageSwapCache(page);
do {
- unsigned long nr_shadows = 0;
-
xas_lock_irq(&xas);
xas_create_range(&xas);
if (xas_error(&xas))
@@ -124,7 +122,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
old = xas_load(&xas);
if (xa_is_value(old)) {
- nr_shadows++;
if (shadowp)
*shadowp = old;
}
@@ -260,7 +257,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
void *old;
for (;;) {
- unsigned long nr_shadows = 0;
swp_entry_t entry = swp_entry(type, curr);
struct address_space *address_space = swap_address_space(entry);
XA_STATE(xas, &address_space->i_pages, curr);
@@ -270,7 +266,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
if (!xa_is_value(old))
continue;
xas_store(&xas, NULL);
- nr_shadows++;
}
xa_unlock_irq(&address_space->i_pages);
@@ -291,7 +286,7 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
* try_to_free_swap() _with_ the lock.
* - Marcelo
*/
-static inline void free_swap_cache(struct page *page)
+void free_swap_cache(struct page *page)
{
if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
try_to_free_swap(page);
@@ -698,7 +693,12 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
void exit_swap_address_space(unsigned int type)
{
- kvfree(swapper_spaces[type]);
+ int i;
+ struct address_space *spaces = swapper_spaces[type];
+
+ for (i = 0; i < nr_swapper_spaces[type]; i++)
+ VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
+ kvfree(spaces);
nr_swapper_spaces[type] = 0;
swapper_spaces[type] = NULL;
}
@@ -721,7 +721,6 @@ static void swap_ra_info(struct vm_fault *vmf,
{
struct vm_area_struct *vma = vmf->vma;
unsigned long ra_val;
- swp_entry_t entry;
unsigned long faddr, pfn, fpfn;
unsigned long start, end;
pte_t *pte, *orig_pte;
@@ -739,11 +738,6 @@ static void swap_ra_info(struct vm_fault *vmf,
faddr = vmf->address;
orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
- entry = pte_to_swp_entry(*pte);
- if ((unlikely(non_swap_entry(entry)))) {
- pte_unmap(orig_pte);
- return;
- }
fpfn = PFN_DOWN(faddr);
ra_val = GET_SWAP_RA_VAL(vma);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 149e77454e3c..e898c879a434 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -39,6 +39,7 @@
#include <linux/export.h>
#include <linux/swap_slots.h>
#include <linux/sort.h>
+#include <linux/completion.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
@@ -99,11 +100,10 @@ atomic_t nr_rotate_swap = ATOMIC_INIT(0);
static struct swap_info_struct *swap_type_to_swap_info(int type)
{
- if (type >= READ_ONCE(nr_swapfiles))
+ if (type >= MAX_SWAPFILES)
return NULL;
- smp_rmb(); /* Pairs with smp_wmb in alloc_swap_info. */
- return READ_ONCE(swap_info[type]);
+ return READ_ONCE(swap_info[type]); /* rcu_dereference() */
}
static inline unsigned char swap_count(unsigned char ent)
@@ -452,10 +452,10 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
unsigned int idx)
{
/*
- * If scan_swap_map() can't find a free cluster, it will check
+ * If scan_swap_map_slots() can't find a free cluster, it will check
* si->swap_map directly. To make sure the discarding cluster isn't
- * taken by scan_swap_map(), mark the swap entries bad (occupied). It
- * will be cleared after discard
+ * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
+ * It will be cleared after discard
*/
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER);
@@ -511,6 +511,14 @@ static void swap_discard_work(struct work_struct *work)
spin_unlock(&si->lock);
}
+static void swap_users_ref_free(struct percpu_ref *ref)
+{
+ struct swap_info_struct *si;
+
+ si = container_of(ref, struct swap_info_struct, users);
+ complete(&si->comp);
+}
+
static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
{
struct swap_cluster_info *ci = si->cluster_info;
@@ -580,7 +588,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p,
}
/*
- * It's possible scan_swap_map() uses a free cluster in the middle of free
+ * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
* cluster list. Avoiding such abuse to avoid list corruption.
*/
static bool
@@ -1028,21 +1036,6 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
swap_range_free(si, offset, SWAPFILE_CLUSTER);
}
-static unsigned long scan_swap_map(struct swap_info_struct *si,
- unsigned char usage)
-{
- swp_entry_t entry;
- int n_ret;
-
- n_ret = scan_swap_map_slots(si, usage, 1, &entry);
-
- if (n_ret)
- return swp_offset(entry);
- else
- return 0;
-
-}
-
int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
{
unsigned long size = swap_entry_size(entry_size);
@@ -1105,14 +1098,14 @@ start_over:
nextsi:
/*
* if we got here, it's likely that si was almost full before,
- * and since scan_swap_map() can drop the si->lock, multiple
- * callers probably all tried to get a page from the same si
- * and it filled up before we could get one; or, the si filled
- * up between us dropping swap_avail_lock and taking si->lock.
- * Since we dropped the swap_avail_lock, the swap_avail_head
- * list may have been modified; so if next is still in the
- * swap_avail_head list then try it, otherwise start over
- * if we have not gotten any slots.
+ * and since scan_swap_map_slots() can drop the si->lock,
+ * multiple callers probably all tried to get a page from the
+ * same si and it filled up before we could get one; or, the si
+ * filled up between us dropping swap_avail_lock and taking
+ * si->lock. Since we dropped the swap_avail_lock, the
+ * swap_avail_head list may have been modified; so if next is
+ * still in the swap_avail_head list then try it, otherwise
+ * start over if we have not gotten any slots.
*/
if (plist_node_empty(&next->avail_lists[node]))
goto start_over;
@@ -1128,30 +1121,6 @@ noswap:
return n_ret;
}
-/* The only caller of this function is now suspend routine */
-swp_entry_t get_swap_page_of_type(int type)
-{
- struct swap_info_struct *si = swap_type_to_swap_info(type);
- pgoff_t offset;
-
- if (!si)
- goto fail;
-
- spin_lock(&si->lock);
- if (si->flags & SWP_WRITEOK) {
- /* This is called for allocating swap entry, not cache */
- offset = scan_swap_map(si, 1);
- if (offset) {
- atomic_long_dec(&nr_swap_pages);
- spin_unlock(&si->lock);
- return swp_entry(type, offset);
- }
- }
- spin_unlock(&si->lock);
-fail:
- return (swp_entry_t) {0};
-}
-
static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *p;
@@ -1270,18 +1239,12 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
* via preventing the swap device from being swapoff, until
* put_swap_device() is called. Otherwise return NULL.
*
- * The entirety of the RCU read critical section must come before the
- * return from or after the call to synchronize_rcu() in
- * enable_swap_info() or swapoff(). So if "si->flags & SWP_VALID" is
- * true, the si->map, si->cluster_info, etc. must be valid in the
- * critical section.
- *
* Notice that swapoff or swapoff+swapon can still happen before the
- * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock()
- * in put_swap_device() if there isn't any other way to prevent
- * swapoff, such as page lock, page table lock, etc. The caller must
- * be prepared for that. For example, the following situation is
- * possible.
+ * percpu_ref_tryget_live() in get_swap_device() or after the
+ * percpu_ref_put() in put_swap_device() if there isn't any other way
+ * to prevent swapoff, such as page lock, page table lock, etc. The
+ * caller must be prepared for that. For example, the following
+ * situation is possible.
*
* CPU1 CPU2
* do_swap_page()
@@ -1309,21 +1272,27 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry)
si = swp_swap_info(entry);
if (!si)
goto bad_nofile;
-
- rcu_read_lock();
- if (data_race(!(si->flags & SWP_VALID)))
- goto unlock_out;
+ if (!percpu_ref_tryget_live(&si->users))
+ goto out;
+ /*
+ * Guarantee the si->users are checked before accessing other
+ * fields of swap_info_struct.
+ *
+ * Paired with the spin_unlock() after setup_swap_info() in
+ * enable_swap_info().
+ */
+ smp_rmb();
offset = swp_offset(entry);
if (offset >= si->max)
- goto unlock_out;
+ goto put_out;
return si;
bad_nofile:
pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
out:
return NULL;
-unlock_out:
- rcu_read_unlock();
+put_out:
+ percpu_ref_put(&si->users);
return NULL;
}
@@ -1803,6 +1772,24 @@ int free_swap_and_cache(swp_entry_t entry)
}
#ifdef CONFIG_HIBERNATION
+
+swp_entry_t get_swap_page_of_type(int type)
+{
+ struct swap_info_struct *si = swap_type_to_swap_info(type);
+ swp_entry_t entry = {0};
+
+ if (!si)
+ goto fail;
+
+ /* This is called for allocating swap entry, not cache */
+ spin_lock(&si->lock);
+ if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
+ atomic_long_dec(&nr_swap_pages);
+ spin_unlock(&si->lock);
+fail:
+ return entry;
+}
+
/*
* Find the swap type that corresponds to given device (if any).
*
@@ -1900,7 +1887,7 @@ unsigned int count_swap_pages(int type, int free)
static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
{
- return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
+ return pte_same(pte_swp_clear_flags(pte), swp_pte);
}
/*
@@ -2466,7 +2453,7 @@ static void setup_swap_info(struct swap_info_struct *p, int prio,
static void _enable_swap_info(struct swap_info_struct *p)
{
- p->flags |= SWP_WRITEOK | SWP_VALID;
+ p->flags |= SWP_WRITEOK;
atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
@@ -2497,10 +2484,9 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
/*
- * Guarantee swap_map, cluster_info, etc. fields are valid
- * between get/put_swap_device() if SWP_VALID bit is set
+ * Finished initializing swap device, now it's safe to reference it.
*/
- synchronize_rcu();
+ percpu_ref_resurrect(&p->users);
spin_lock(&swap_lock);
spin_lock(&p->lock);
_enable_swap_info(p);
@@ -2616,16 +2602,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
reenable_swap_slots_cache_unlock();
- spin_lock(&swap_lock);
- spin_lock(&p->lock);
- p->flags &= ~SWP_VALID; /* mark swap device as invalid */
- spin_unlock(&p->lock);
- spin_unlock(&swap_lock);
/*
- * wait for swap operations protected by get/put_swap_device()
- * to complete
+ * Wait for swap operations protected by get/put_swap_device()
+ * to complete.
+ *
+ * We need synchronize_rcu() here to protect the accessing to
+ * the swap cache data structure.
*/
+ percpu_ref_kill(&p->users);
synchronize_rcu();
+ wait_for_completion(&p->comp);
flush_work(&p->discard_work);
@@ -2641,7 +2627,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_lock(&p->lock);
drain_mmlist();
- /* wait for anyone still in scan_swap_map */
+ /* wait for anyone still in scan_swap_map_slots */
p->highest_bit = 0; /* cuts scans short */
while (p->flags >= SWP_SCANNING) {
spin_unlock(&p->lock);
@@ -2857,6 +2843,12 @@ static struct swap_info_struct *alloc_swap_info(void)
if (!p)
return ERR_PTR(-ENOMEM);
+ if (percpu_ref_init(&p->users, swap_users_ref_free,
+ PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
+ kvfree(p);
+ return ERR_PTR(-ENOMEM);
+ }
+
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
if (!(swap_info[type]->flags & SWP_USED))
@@ -2864,19 +2856,18 @@ static struct swap_info_struct *alloc_swap_info(void)
}
if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
+ percpu_ref_exit(&p->users);
kvfree(p);
return ERR_PTR(-EPERM);
}
if (type >= nr_swapfiles) {
p->type = type;
- WRITE_ONCE(swap_info[type], p);
/*
- * Write swap_info[type] before nr_swapfiles, in case a
- * racing procfs swap_start() or swap_next() is reading them.
- * (We never shrink nr_swapfiles, we never free this entry.)
+ * Publish the swap_info_struct after initializing it.
+ * Note that kvzalloc() above zeroes all its fields.
*/
- smp_wmb();
- WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
+ smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
+ nr_swapfiles++;
} else {
defer = p;
p = swap_info[type];
@@ -2891,9 +2882,13 @@ static struct swap_info_struct *alloc_swap_info(void)
plist_node_init(&p->avail_lists[i], 0);
p->flags = SWP_USED;
spin_unlock(&swap_lock);
- kvfree(defer);
+ if (defer) {
+ percpu_ref_exit(&defer->users);
+ kvfree(defer);
+ }
spin_lock_init(&p->lock);
spin_lock_init(&p->cont_lock);
+ init_completion(&p->comp);
return p;
}
diff --git a/mm/truncate.c b/mm/truncate.c
index 95af244b112a..234ddd879caa 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -167,13 +167,10 @@ void do_invalidatepage(struct page *page, unsigned int offset,
* its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/
-static void
-truncate_cleanup_page(struct address_space *mapping, struct page *page)
+static void truncate_cleanup_page(struct page *page)
{
- if (page_mapped(page)) {
- unsigned int nr = thp_nr_pages(page);
- unmap_mapping_pages(mapping, page->index, nr, false);
- }
+ if (page_mapped(page))
+ unmap_mapping_page(page);
if (page_has_private(page))
do_invalidatepage(page, 0, thp_size(page));
@@ -218,7 +215,7 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
if (page->mapping != mapping)
return -EIO;
- truncate_cleanup_page(mapping, page);
+ truncate_cleanup_page(page);
delete_from_page_cache(page);
return 0;
}
@@ -325,7 +322,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
index = indices[pagevec_count(&pvec) - 1] + 1;
truncate_exceptional_pvec_entries(mapping, &pvec, indices);
for (i = 0; i < pagevec_count(&pvec); i++)
- truncate_cleanup_page(mapping, pvec.pages[i]);
+ truncate_cleanup_page(pvec.pages[i]);
delete_from_page_cache_batch(mapping, &pvec);
for (i = 0; i < pagevec_count(&pvec); i++)
unlock_page(pvec.pages[i]);
@@ -639,6 +636,16 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
continue;
}
+ if (!did_range_unmap && page_mapped(page)) {
+ /*
+ * If page is mapped, before taking its lock,
+ * zap the rest of the file in one hit.
+ */
+ unmap_mapping_pages(mapping, index,
+ (1 + end - index), false);
+ did_range_unmap = 1;
+ }
+
lock_page(page);
WARN_ON(page_to_index(page) != index);
if (page->mapping != mapping) {
@@ -646,23 +653,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
continue;
}
wait_on_page_writeback(page);
- if (page_mapped(page)) {
- if (!did_range_unmap) {
- /*
- * Zap the rest of the file in one hit.
- */
- unmap_mapping_pages(mapping, index,
- (1 + end - index), false);
- did_range_unmap = 1;
- } else {
- /*
- * Just zap this page
- */
- unmap_mapping_pages(mapping, index,
- 1, false);
- }
- }
+
+ if (page_mapped(page))
+ unmap_mapping_page(page);
BUG_ON(page_mapped(page));
+
ret2 = do_launder_page(mapping, page);
if (ret2 == 0) {
if (!invalidate_complete_page2(mapping, page))
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a13ac524f6ff..b2ec7f751bd0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2344,15 +2344,16 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
}
static struct vm_struct *__get_vm_area_node(unsigned long size,
- unsigned long align, unsigned long flags, unsigned long start,
- unsigned long end, int node, gfp_t gfp_mask, const void *caller)
+ unsigned long align, unsigned long shift, unsigned long flags,
+ unsigned long start, unsigned long end, int node,
+ gfp_t gfp_mask, const void *caller)
{
struct vmap_area *va;
struct vm_struct *area;
unsigned long requested_size = size;
BUG_ON(in_interrupt());
- size = PAGE_ALIGN(size);
+ size = ALIGN(size, 1ul << shift);
if (unlikely(!size))
return NULL;
@@ -2384,8 +2385,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
const void *caller)
{
- return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
- GFP_KERNEL, caller);
+ return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
+ NUMA_NO_NODE, GFP_KERNEL, caller);
}
/**
@@ -2401,7 +2402,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
- return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+ return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
+ VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
}
@@ -2409,7 +2411,8 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
- return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+ return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
+ VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, caller);
}
@@ -2564,6 +2567,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
BUG_ON(!page);
__free_pages(page, page_order);
+ cond_resched();
}
atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
@@ -2755,6 +2759,54 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
EXPORT_SYMBOL_GPL(vmap_pfn);
#endif /* CONFIG_VMAP_PFN */
+static inline unsigned int
+vm_area_alloc_pages(gfp_t gfp, int nid,
+ unsigned int order, unsigned long nr_pages, struct page **pages)
+{
+ unsigned int nr_allocated = 0;
+
+ /*
+ * For order-0 pages we make use of bulk allocator, if
+ * the page array is partly or not at all populated due
+ * to fails, fallback to a single page allocator that is
+ * more permissive.
+ */
+ if (!order)
+ nr_allocated = alloc_pages_bulk_array_node(
+ gfp, nid, nr_pages, pages);
+ else
+ /*
+ * Compound pages required for remap_vmalloc_page if
+ * high-order pages.
+ */
+ gfp |= __GFP_COMP;
+
+ /* High-order pages or fallback path if "bulk" fails. */
+ while (nr_allocated < nr_pages) {
+ struct page *page;
+ int i;
+
+ page = alloc_pages_node(nid, gfp, order);
+ if (unlikely(!page))
+ break;
+
+ /*
+ * Careful, we allocate and map page-order pages, but
+ * tracking is done per PAGE_SIZE page so as to keep the
+ * vm_struct APIs independent of the physical/mapped size.
+ */
+ for (i = 0; i < (1U << order); i++)
+ pages[nr_allocated + i] = page + i;
+
+ if (gfpflags_allow_blocking(gfp))
+ cond_resched();
+
+ nr_allocated += 1U << order;
+ }
+
+ return nr_allocated;
+}
+
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, unsigned int page_shift,
int node)
@@ -2765,8 +2817,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
unsigned long array_size;
unsigned int nr_small_pages = size >> PAGE_SHIFT;
unsigned int page_order;
- struct page **pages;
- unsigned int i;
array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
gfp_mask |= __GFP_NOWARN;
@@ -2775,62 +2825,44 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
- pages = __vmalloc_node(array_size, 1, nested_gfp, node,
+ area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
area->caller);
} else {
- pages = kmalloc_node(array_size, nested_gfp, node);
+ area->pages = kmalloc_node(array_size, nested_gfp, node);
}
- if (!pages) {
- free_vm_area(area);
+ if (!area->pages) {
warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "page array size %lu allocation failed",
- nr_small_pages * PAGE_SIZE, array_size);
+ "vmalloc error: size %lu, failed to allocated page array size %lu",
+ nr_small_pages * PAGE_SIZE, array_size);
+ free_vm_area(area);
return NULL;
}
- area->pages = pages;
- area->nr_pages = nr_small_pages;
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
-
page_order = vm_area_page_order(area);
- /*
- * Careful, we allocate and map page_order pages, but tracking is done
- * per PAGE_SIZE page so as to keep the vm_struct APIs independent of
- * the physical/mapped size.
- */
- for (i = 0; i < area->nr_pages; i += 1U << page_order) {
- struct page *page;
- int p;
-
- /* Compound pages required for remap_vmalloc_page */
- page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order);
- if (unlikely(!page)) {
- /* Successfully allocated i pages, free them in __vfree() */
- area->nr_pages = i;
- atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
- warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "page order %u allocation failed",
- area->nr_pages * PAGE_SIZE, page_order);
- goto fail;
- }
+ area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
+ page_order, nr_small_pages, area->pages);
- for (p = 0; p < (1U << page_order); p++)
- area->pages[i + p] = page + p;
+ atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
- if (gfpflags_allow_blocking(gfp_mask))
- cond_resched();
+ /*
+ * If not enough pages were obtained to accomplish an
+ * allocation request, free them via __vfree() if any.
+ */
+ if (area->nr_pages != nr_small_pages) {
+ warn_alloc(gfp_mask, NULL,
+ "vmalloc error: size %lu, page order %u, failed to allocate pages",
+ area->nr_pages * PAGE_SIZE, page_order);
+ goto fail;
}
- atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
- if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) {
+ if (vmap_pages_range(addr, addr + size, prot, area->pages,
+ page_shift) < 0) {
warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "failed to map pages",
- area->nr_pages * PAGE_SIZE);
+ "vmalloc error: size %lu, failed to map pages",
+ area->nr_pages * PAGE_SIZE);
goto fail;
}
@@ -2875,8 +2907,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if ((size >> PAGE_SHIFT) > totalram_pages()) {
warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "exceeds total pages", real_size);
+ "vmalloc error: size %lu, exceeds total pages",
+ real_size);
return NULL;
}
@@ -2902,13 +2934,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
}
again:
- size = PAGE_ALIGN(size);
- area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
- vm_flags, start, end, node, gfp_mask, caller);
+ area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
+ VM_UNINITIALIZED | vm_flags, start, end, node,
+ gfp_mask, caller);
if (!area) {
warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "vm_struct allocation failed", real_size);
+ "vmalloc error: size %lu, vm_struct allocation failed",
+ real_size);
goto fail;
}
@@ -2923,6 +2955,7 @@ again:
*/
clear_vm_uninitialized_flag(area);
+ size = PAGE_ALIGN(size);
kmemleak_vmalloc(area, size, gfp_mask);
return addr;
@@ -2999,6 +3032,23 @@ void *vmalloc(unsigned long size)
EXPORT_SYMBOL(vmalloc);
/**
+ * vmalloc_no_huge - allocate virtually contiguous memory using small pages
+ * @size: allocation size
+ *
+ * Allocate enough non-huge pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+void *vmalloc_no_huge(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
+ NUMA_NO_NODE, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(vmalloc_no_huge);
+
+/**
* vzalloc - allocate virtually contiguous memory with zero fill
* @size: allocation size
*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5199b9696bab..d7c3cb8688dd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2015,8 +2015,8 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
*
* Returns the number of pages moved to the given lruvec.
*/
-static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
- struct list_head *list)
+static unsigned int move_pages_to_lru(struct lruvec *lruvec,
+ struct list_head *list)
{
int nr_pages, nr_moved = 0;
LIST_HEAD(pages_to_free);
@@ -2063,7 +2063,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
- VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
+ VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page);
nr_moved += nr_pages;
@@ -2096,7 +2096,7 @@ static int current_may_throttle(void)
* shrink_inactive_list() is a helper for shrink_node(). It returns the number
* of reclaimed pages
*/
-static noinline_for_stack unsigned long
+static unsigned long
shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, enum lru_list lru)
{
@@ -3722,6 +3722,38 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
return sc->nr_scanned >= sc->nr_to_reclaim;
}
+/* Page allocator PCP high watermark is lowered if reclaim is active. */
+static inline void
+update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
+{
+ int i;
+ struct zone *zone;
+
+ for (i = 0; i <= highest_zoneidx; i++) {
+ zone = pgdat->node_zones + i;
+
+ if (!managed_zone(zone))
+ continue;
+
+ if (active)
+ set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
+ else
+ clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
+ }
+}
+
+static inline void
+set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
+{
+ update_reclaim_active(pgdat, highest_zoneidx, true);
+}
+
+static inline void
+clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
+{
+ update_reclaim_active(pgdat, highest_zoneidx, false);
+}
+
/*
* For kswapd, balance_pgdat() will reclaim pages across a node from zones
* that are eligible for use by the caller until at least one zone is
@@ -3774,6 +3806,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
boosted = nr_boost_reclaim;
restart:
+ set_reclaim_active(pgdat, highest_zoneidx);
sc.priority = DEF_PRIORITY;
do {
unsigned long nr_reclaimed = sc.nr_reclaimed;
@@ -3907,6 +3940,8 @@ restart:
pgdat->kswapd_failures++;
out:
+ clear_reclaim_active(pgdat, highest_zoneidx);
+
/* If reclaim was boosted, account for the reclaim done in this pass */
if (boosted) {
unsigned long flags;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index cccee36b289c..b0534e068166 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -31,8 +31,6 @@
#include "internal.h"
-#define NUMA_STATS_THRESHOLD (U16_MAX - 2)
-
#ifdef CONFIG_NUMA
int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
@@ -41,11 +39,12 @@ static void zero_zone_numa_counters(struct zone *zone)
{
int item, cpu;
- for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
- atomic_long_set(&zone->vm_numa_stat[item], 0);
- for_each_online_cpu(cpu)
- per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
+ for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
+ atomic_long_set(&zone->vm_numa_event[item], 0);
+ for_each_online_cpu(cpu) {
+ per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
= 0;
+ }
}
}
@@ -63,8 +62,8 @@ static void zero_global_numa_counters(void)
{
int item;
- for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
- atomic_long_set(&vm_numa_stat[item], 0);
+ for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
+ atomic_long_set(&vm_numa_event[item], 0);
}
static void invalid_numa_statistics(void)
@@ -161,10 +160,9 @@ void vm_events_fold_cpu(int cpu)
* vm_stat contains the global counters
*/
atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
-atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
+atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
EXPORT_SYMBOL(vm_zone_stat);
-EXPORT_SYMBOL(vm_numa_stat);
EXPORT_SYMBOL(vm_node_stat);
#ifdef CONFIG_SMP
@@ -266,7 +264,7 @@ void refresh_zone_stat_thresholds(void)
for_each_online_cpu(cpu) {
int pgdat_threshold;
- per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+ per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
= threshold;
/* Base nodestat threshold on the largest populated zone. */
@@ -303,7 +301,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
threshold = (*calculate_pressure)(zone);
for_each_online_cpu(cpu)
- per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+ per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
= threshold;
}
}
@@ -316,7 +314,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long delta)
{
- struct per_cpu_pageset __percpu *pcp = zone->pageset;
+ struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
s8 __percpu *p = pcp->vm_stat_diff + item;
long x;
long t;
@@ -389,7 +387,7 @@ EXPORT_SYMBOL(__mod_node_page_state);
*/
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct per_cpu_pageset __percpu *pcp = zone->pageset;
+ struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -435,7 +433,7 @@ EXPORT_SYMBOL(__inc_node_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct per_cpu_pageset __percpu *pcp = zone->pageset;
+ struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -495,7 +493,7 @@ EXPORT_SYMBOL(__dec_node_page_state);
static inline void mod_zone_state(struct zone *zone,
enum zone_stat_item item, long delta, int overstep_mode)
{
- struct per_cpu_pageset __percpu *pcp = zone->pageset;
+ struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
s8 __percpu *p = pcp->vm_stat_diff + item;
long o, n, t, z;
@@ -706,8 +704,7 @@ EXPORT_SYMBOL(dec_node_page_state);
* Fold a differential into the global counters.
* Returns the number of counters updated.
*/
-#ifdef CONFIG_NUMA
-static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
+static int fold_diff(int *zone_diff, int *node_diff)
{
int i;
int changes = 0;
@@ -718,12 +715,6 @@ static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
changes++;
}
- for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
- if (numa_diff[i]) {
- atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
- changes++;
- }
-
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
if (node_diff[i]) {
atomic_long_add(node_diff[i], &vm_node_stat[i]);
@@ -731,26 +722,34 @@ static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
}
return changes;
}
-#else
-static int fold_diff(int *zone_diff, int *node_diff)
+
+#ifdef CONFIG_NUMA
+static void fold_vm_zone_numa_events(struct zone *zone)
{
- int i;
- int changes = 0;
+ unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
+ int cpu;
+ enum numa_stat_item item;
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (zone_diff[i]) {
- atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
- changes++;
- }
+ for_each_online_cpu(cpu) {
+ struct per_cpu_zonestat *pzstats;
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
- if (node_diff[i]) {
- atomic_long_add(node_diff[i], &vm_node_stat[i]);
- changes++;
+ pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
+ for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
+ zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
}
- return changes;
+
+ for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
+ zone_numa_event_add(zone_numa_events[item], zone, item);
+}
+
+void fold_vm_numa_events(void)
+{
+ struct zone *zone;
+
+ for_each_populated_zone(zone)
+ fold_vm_zone_numa_events(zone);
}
-#endif /* CONFIG_NUMA */
+#endif
/*
* Update the zone counters for the current cpu.
@@ -774,41 +773,30 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
struct zone *zone;
int i;
int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
-#ifdef CONFIG_NUMA
- int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
-#endif
int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
int changes = 0;
for_each_populated_zone(zone) {
- struct per_cpu_pageset __percpu *p = zone->pageset;
+ struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+#ifdef CONFIG_NUMA
+ struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
+#endif
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
int v;
- v = this_cpu_xchg(p->vm_stat_diff[i], 0);
+ v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
if (v) {
atomic_long_add(v, &zone->vm_stat[i]);
global_zone_diff[i] += v;
#ifdef CONFIG_NUMA
/* 3 seconds idle till flush */
- __this_cpu_write(p->expire, 3);
+ __this_cpu_write(pcp->expire, 3);
#endif
}
}
#ifdef CONFIG_NUMA
- for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
- int v;
-
- v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
- if (v) {
-
- atomic_long_add(v, &zone->vm_numa_stat[i]);
- global_numa_diff[i] += v;
- __this_cpu_write(p->expire, 3);
- }
- }
if (do_pagesets) {
cond_resched();
@@ -819,23 +807,23 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
* Check if there are pages remaining in this pageset
* if not then there is nothing to expire.
*/
- if (!__this_cpu_read(p->expire) ||
- !__this_cpu_read(p->pcp.count))
+ if (!__this_cpu_read(pcp->expire) ||
+ !__this_cpu_read(pcp->count))
continue;
/*
* We never drain zones local to this processor.
*/
if (zone_to_nid(zone) == numa_node_id()) {
- __this_cpu_write(p->expire, 0);
+ __this_cpu_write(pcp->expire, 0);
continue;
}
- if (__this_cpu_dec_return(p->expire))
+ if (__this_cpu_dec_return(pcp->expire))
continue;
- if (__this_cpu_read(p->pcp.count)) {
- drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
+ if (__this_cpu_read(pcp->count)) {
+ drain_zone_pages(zone, this_cpu_ptr(pcp));
changes++;
}
}
@@ -856,12 +844,7 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
}
}
-#ifdef CONFIG_NUMA
- changes += fold_diff(global_zone_diff, global_numa_diff,
- global_node_diff);
-#else
changes += fold_diff(global_zone_diff, global_node_diff);
-#endif
return changes;
}
@@ -876,36 +859,33 @@ void cpu_vm_stats_fold(int cpu)
struct zone *zone;
int i;
int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
-#ifdef CONFIG_NUMA
- int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
-#endif
int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
for_each_populated_zone(zone) {
- struct per_cpu_pageset *p;
+ struct per_cpu_zonestat *pzstats;
- p = per_cpu_ptr(zone->pageset, cpu);
+ pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (p->vm_stat_diff[i]) {
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
+ if (pzstats->vm_stat_diff[i]) {
int v;
- v = p->vm_stat_diff[i];
- p->vm_stat_diff[i] = 0;
+ v = pzstats->vm_stat_diff[i];
+ pzstats->vm_stat_diff[i] = 0;
atomic_long_add(v, &zone->vm_stat[i]);
global_zone_diff[i] += v;
}
-
+ }
#ifdef CONFIG_NUMA
- for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
- if (p->vm_numa_stat_diff[i]) {
- int v;
+ for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
+ if (pzstats->vm_numa_event[i]) {
+ unsigned long v;
- v = p->vm_numa_stat_diff[i];
- p->vm_numa_stat_diff[i] = 0;
- atomic_long_add(v, &zone->vm_numa_stat[i]);
- global_numa_diff[i] += v;
+ v = pzstats->vm_numa_event[i];
+ pzstats->vm_numa_event[i] = 0;
+ zone_numa_event_add(v, zone, i);
}
+ }
#endif
}
@@ -925,58 +905,39 @@ void cpu_vm_stats_fold(int cpu)
}
}
-#ifdef CONFIG_NUMA
- fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
-#else
fold_diff(global_zone_diff, global_node_diff);
-#endif
}
/*
* this is only called if !populated_zone(zone), which implies no other users of
* pset->vm_stat_diff[] exist.
*/
-void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
+void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
{
+ unsigned long v;
int i;
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (pset->vm_stat_diff[i]) {
- int v = pset->vm_stat_diff[i];
- pset->vm_stat_diff[i] = 0;
- atomic_long_add(v, &zone->vm_stat[i]);
- atomic_long_add(v, &vm_zone_stat[i]);
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
+ if (pzstats->vm_stat_diff[i]) {
+ v = pzstats->vm_stat_diff[i];
+ pzstats->vm_stat_diff[i] = 0;
+ zone_page_state_add(v, zone, i);
}
+ }
#ifdef CONFIG_NUMA
- for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
- if (pset->vm_numa_stat_diff[i]) {
- int v = pset->vm_numa_stat_diff[i];
-
- pset->vm_numa_stat_diff[i] = 0;
- atomic_long_add(v, &zone->vm_numa_stat[i]);
- atomic_long_add(v, &vm_numa_stat[i]);
+ for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
+ if (pzstats->vm_numa_event[i]) {
+ v = pzstats->vm_numa_event[i];
+ pzstats->vm_numa_event[i] = 0;
+ zone_numa_event_add(v, zone, i);
}
+ }
#endif
}
#endif
#ifdef CONFIG_NUMA
-void __inc_numa_state(struct zone *zone,
- enum numa_stat_item item)
-{
- struct per_cpu_pageset __percpu *pcp = zone->pageset;
- u16 __percpu *p = pcp->vm_numa_stat_diff + item;
- u16 v;
-
- v = __this_cpu_inc_return(*p);
-
- if (unlikely(v > NUMA_STATS_THRESHOLD)) {
- zone_numa_state_add(v, zone, item);
- __this_cpu_write(*p, 0);
- }
-}
-
/*
* Determine the per node value of a stat item. This function
* is called frequently in a NUMA machine, so try to be as
@@ -995,19 +956,16 @@ unsigned long sum_zone_node_page_state(int node,
return count;
}
-/*
- * Determine the per node value of a numa stat item. To avoid deviation,
- * the per cpu stat number in vm_numa_stat_diff[] is also included.
- */
-unsigned long sum_zone_numa_state(int node,
+/* Determine the per node value of a numa stat item. */
+unsigned long sum_zone_numa_event_state(int node,
enum numa_stat_item item)
{
struct zone *zones = NODE_DATA(node)->node_zones;
- int i;
unsigned long count = 0;
+ int i;
for (i = 0; i < MAX_NR_ZONES; i++)
- count += zone_numa_state_snapshot(zones + i, item);
+ count += zone_numa_event_state(zones + i, item);
return count;
}
@@ -1686,28 +1644,30 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
zone_page_state(zone, i));
#ifdef CONFIG_NUMA
- for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
+ for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
- zone_numa_state_snapshot(zone, i));
+ zone_numa_event_state(zone, i));
#endif
seq_printf(m, "\n pagesets");
for_each_online_cpu(i) {
- struct per_cpu_pageset *pageset;
+ struct per_cpu_pages *pcp;
+ struct per_cpu_zonestat __maybe_unused *pzstats;
- pageset = per_cpu_ptr(zone->pageset, i);
+ pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
seq_printf(m,
"\n cpu: %i"
"\n count: %i"
"\n high: %i"
"\n batch: %i",
i,
- pageset->pcp.count,
- pageset->pcp.high,
- pageset->pcp.batch);
+ pcp->count,
+ pcp->high,
+ pcp->batch);
#ifdef CONFIG_SMP
+ pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
seq_printf(m, "\n vm stats threshold: %d",
- pageset->stat_threshold);
+ pzstats->stat_threshold);
#endif
}
seq_printf(m,
@@ -1740,7 +1700,7 @@ static const struct seq_operations zoneinfo_op = {
};
#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
- NR_VM_NUMA_STAT_ITEMS + \
+ NR_VM_NUMA_EVENT_ITEMS + \
NR_VM_NODE_STAT_ITEMS + \
NR_VM_WRITEBACK_STAT_ITEMS + \
(IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
@@ -1755,6 +1715,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
return NULL;
BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
+ fold_vm_numa_events();
v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
m->private = v;
if (!v)
@@ -1764,9 +1725,9 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
v += NR_VM_ZONE_STAT_ITEMS;
#ifdef CONFIG_NUMA
- for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
- v[i] = global_numa_state(i);
- v += NR_VM_NUMA_STAT_ITEMS;
+ for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
+ v[i] = global_numa_event_state(i);
+ v += NR_VM_NUMA_EVENT_ITEMS;
#endif
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
@@ -1927,19 +1888,16 @@ static bool need_update(int cpu)
struct zone *zone;
for_each_populated_zone(zone) {
- struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
+ struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
struct per_cpu_nodestat *n;
+
/*
* The fast way of checking if there are any vmstat diffs.
*/
- if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
- sizeof(p->vm_stat_diff[0])))
- return true;
-#ifdef CONFIG_NUMA
- if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
- sizeof(p->vm_numa_stat_diff[0])))
+ if (memchr_inv(pzstats->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
+ sizeof(pzstats->vm_stat_diff[0])))
return true;
-#endif
+
if (last_pgdat == zone->zone_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
diff --git a/mm/workingset.c b/mm/workingset.c
index b7cdeca5a76d..4f7a306ce75a 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -408,7 +408,7 @@ void workingset_activation(struct page *page)
memcg = page_memcg_rcu(page);
if (!mem_cgroup_disabled() && !memcg)
goto out;
- lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+ lruvec = mem_cgroup_page_lruvec(page);
workingset_age_nonresident(lruvec, thp_nr_pages(page));
out:
rcu_read_unlock();
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index fb3d3262dc1a..4cdf8416869d 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -638,7 +638,8 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
case GET_VLAN_REALDEV_NAME_CMD:
err = 0;
- vlan_dev_get_realdev_name(dev, args.u.device2);
+ vlan_dev_get_realdev_name(dev, args.u.device2,
+ sizeof(args.u.device2));
if (copy_to_user(arg, &args,
sizeof(struct vlan_ioctl_args)))
err = -EFAULT;
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index fa3ad3d4d58c..1a705a4ef7fa 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -108,7 +108,8 @@ static inline netdev_features_t vlan_tnl_features(struct net_device *real_dev)
netdev_features_t ret;
ret = real_dev->hw_enc_features &
- (NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO | NETIF_F_GSO_ENCAP_ALL);
+ (NETIF_F_CSUM_MASK | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_GSO_ENCAP_ALL);
if ((ret & NETIF_F_GSO_ENCAP_ALL) && (ret & NETIF_F_CSUM_MASK))
return (ret & ~NETIF_F_CSUM_MASK) | NETIF_F_HW_CSUM;
@@ -129,7 +130,8 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
-void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result,
+ size_t size);
int vlan_check_real_dev(struct net_device *real_dev,
__be16 protocol, u16 vlan_id,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4db3f0621959..a0367b37512d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -239,9 +239,9 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
return 0;
}
-void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result, size_t size)
{
- strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
+ strscpy_pad(result, vlan_dev_priv(dev)->real_dev->name, size);
}
bool vlan_dev_inherit_address(struct net_device *dev,
@@ -360,7 +360,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct ifreq ifrr;
int err = -EOPNOTSUPP;
- strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
ifrr.ifr_ifru = ifr->ifr_ifru;
switch (cmd) {
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 93f2f8654882..2bbd7dce0f1d 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -99,7 +99,7 @@ static unsigned int rest_of_page(void *data)
* @client: client instance
*
* This reclaims a channel by freeing its resources and
- * reseting its inuse flag.
+ * resetting its inuse flag.
*
*/
@@ -463,7 +463,7 @@ req_retry_pinned:
* For example TREAD have 11.
* 11 is the read/write header = PDU Header(7) + IO Size (4).
* Arrange in such a way that server places header in the
- * alloced memory and payload onto the user buffer.
+ * allocated memory and payload onto the user buffer.
*/
in = pack_sg_list(chan->sg, out,
VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
@@ -760,7 +760,7 @@ static struct p9_trans_module p9_virtio_trans = {
.cancelled = p9_virtio_cancelled,
/*
* We leave one entry for input and one entry for response
- * headers. We also skip one more entry to accomodate, address
+ * headers. We also skip one more entry to accommodate, address
* that are not at page boundary, that can result in an extra
* page in zero copy.
*/
diff --git a/net/Kconfig b/net/Kconfig
index f5ee7c65e6b4..c7392c449b25 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -302,21 +302,6 @@ config BQL
select DQL
default y
-config BPF_JIT
- bool "enable BPF Just In Time compiler"
- depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
- depends on MODULES
- help
- Berkeley Packet Filter filtering capabilities are normally handled
- by an interpreter. This option allows kernel to generate a native
- code when filter is loaded in memory. This should speedup
- packet sniffing (libpcap/tcpdump).
-
- Note, admin should enable this feature changing:
- /proc/sys/net/core/bpf_jit_enable
- /proc/sys/net/core/bpf_jit_harden (optional)
- /proc/sys/net/core/bpf_jit_kallsyms (optional)
-
config BPF_STREAM_PARSER
bool "enable BPF STREAM_PARSER"
depends on INET
@@ -470,15 +455,3 @@ config ETHTOOL_NETLINK
e.g. notification messages.
endif # if NET
-
-# Used by archs to tell that they support BPF JIT compiler plus which flavour.
-# Only one of the two can be selected for a specific arch since eBPF JIT supersedes
-# the cBPF JIT.
-
-# Classic BPF JIT (cBPF)
-config HAVE_CBPF_JIT
- bool
-
-# Extended BPF JIT (eBPF)
-config HAVE_EBPF_JIT
- bool
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index be18af481d7d..c7236daa2415 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -768,7 +768,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
if (a && a->status & ATIF_PROBE) {
a->status |= ATIF_PROBE_FAIL;
/*
- * we do not respond to probe or request packets for
+ * we do not respond to probe or request packets of
* this address while we are probing this address
*/
goto unlock;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index ebda397fa95a..8ade5a4ceaf5 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -707,7 +707,7 @@ static int atif_ioctl(int cmd, void __user *arg)
/*
* Phase 1 is fine on LocalTalk but we don't do
- * EtherTalk phase 1. Anyone wanting to add it go ahead.
+ * EtherTalk phase 1. Anyone wanting to add it, go ahead.
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
@@ -828,7 +828,7 @@ static int atif_ioctl(int cmd, void __user *arg)
nr = (struct atalk_netrange *)&(atif->nets);
/*
* Phase 1 is fine on Localtalk but we don't do
- * Ethertalk phase 1. Anyone wanting to add it go ahead.
+ * Ethertalk phase 1. Anyone wanting to add it, go ahead.
*/
if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
return -EPROTONOSUPPORT;
@@ -2018,7 +2018,7 @@ module_init(atalk_init);
* by the network device layer.
*
* Ergo, before the AppleTalk module can be removed, all AppleTalk
- * sockets be closed from user space.
+ * sockets should be closed from user space.
*/
static void __exit atalk_exit(void)
{
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index aa1b57161f3b..0fdbdfd19474 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -11,7 +11,7 @@
#define to_atm_dev(cldev) container_of(cldev, struct atm_dev, class_dev)
-static ssize_t show_type(struct device *cdev,
+static ssize_t type_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
@@ -19,7 +19,7 @@ static ssize_t show_type(struct device *cdev,
return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type);
}
-static ssize_t show_address(struct device *cdev,
+static ssize_t address_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
@@ -27,7 +27,7 @@ static ssize_t show_address(struct device *cdev,
return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi);
}
-static ssize_t show_atmaddress(struct device *cdev,
+static ssize_t atmaddress_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
unsigned long flags;
@@ -50,7 +50,7 @@ static ssize_t show_atmaddress(struct device *cdev,
return count;
}
-static ssize_t show_atmindex(struct device *cdev,
+static ssize_t atmindex_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
@@ -58,7 +58,7 @@ static ssize_t show_atmindex(struct device *cdev,
return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number);
}
-static ssize_t show_carrier(struct device *cdev,
+static ssize_t carrier_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
@@ -67,7 +67,7 @@ static ssize_t show_carrier(struct device *cdev,
adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
}
-static ssize_t show_link_rate(struct device *cdev,
+static ssize_t link_rate_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
@@ -90,12 +90,12 @@ static ssize_t show_link_rate(struct device *cdev,
return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
}
-static DEVICE_ATTR(address, 0444, show_address, NULL);
-static DEVICE_ATTR(atmaddress, 0444, show_atmaddress, NULL);
-static DEVICE_ATTR(atmindex, 0444, show_atmindex, NULL);
-static DEVICE_ATTR(carrier, 0444, show_carrier, NULL);
-static DEVICE_ATTR(type, 0444, show_type, NULL);
-static DEVICE_ATTR(link_rate, 0444, show_link_rate, NULL);
+static DEVICE_ATTR_RO(address);
+static DEVICE_ATTR_RO(atmaddress);
+static DEVICE_ATTR_RO(atmindex);
+static DEVICE_ATTR_RO(carrier);
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(link_rate);
static struct device_attribute *atm_attrs[] = {
&dev_attr_atmaddress,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 3e17a5ecaa94..dd2a8dabed84 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -93,8 +93,8 @@ struct br2684_dev {
* This lock should be held for writing any time the list of devices or
* their attached vcc's could be altered. It should be held for reading
* any time these are being queried. Note that we sometimes need to
- * do read-locking under interrupt context, so write locking must block
- * the current CPU's interrupts
+ * do read-locking under interrupting context, so write locking must block
+ * the current CPU's interrupts.
*/
static DEFINE_RWLOCK(devs_lock);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 53236986dfe0..2b2d33eeaf20 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -52,10 +52,8 @@ static struct atm_dev *__alloc_atm_dev(const char *type)
static struct atm_dev *__atm_dev_lookup(int number)
{
struct atm_dev *dev;
- struct list_head *p;
- list_for_each(p, &atm_devs) {
- dev = list_entry(p, struct atm_dev, dev_list);
+ list_for_each_entry(dev, &atm_devs, dev_list) {
if (dev->number == number) {
atm_dev_hold(dev);
return dev;
@@ -215,8 +213,7 @@ int atm_getnames(void __user *buf, int __user *iobuf_len)
return -ENOMEM;
}
tmp_p = tmp_buf;
- list_for_each(p, &atm_devs) {
- dev = list_entry(p, struct atm_dev, dev_list);
+ list_for_each_entry(dev, &atm_devs, dev_list) {
*tmp_p++ = dev->number;
}
mutex_unlock(&atm_dev_mutex);
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 789f257be24f..12022378f892 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
if (WARN_ON(!forw_packet->if_outgoing))
return;
- if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
+ if (forw_packet->if_outgoing->soft_iface != soft_iface) {
+ pr_warn("%s: soft interface switch for queued OGM\n", __func__);
return;
+ }
if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
return;
@@ -1849,6 +1851,8 @@ batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
orig_node->orig) ||
nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
neigh_node->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ neigh_node->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
neigh_node->if_incoming->net_dev->ifindex) ||
nla_put_u8(msg, BATADV_ATTR_TQ, tq_avg) ||
@@ -2078,6 +2082,8 @@ batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
hardif_neigh->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ hardif_neigh->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
hardif_neigh->if_incoming->net_dev->ifindex) ||
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
@@ -2459,6 +2465,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid,
router->addr) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
router->if_incoming->net_dev->name) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ router->if_incoming->net_dev->ifindex) ||
nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN,
gw_node->bandwidth_down) ||
nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP,
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index e1ca2b8c3152..b98aea958e3d 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -146,6 +146,8 @@ batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
hardif_neigh->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ hardif_neigh->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
hardif_neigh->if_incoming->net_dev->ifindex) ||
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
@@ -298,6 +300,8 @@ batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, orig_node->orig) ||
nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
neigh_node->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ neigh_node->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
neigh_node->if_incoming->net_dev->ifindex) ||
nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput) ||
@@ -739,6 +743,12 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid,
goto out;
}
+ if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ router->if_incoming->net_dev->ifindex)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN,
gw_node->bandwidth_down)) {
genlmsg_cancel(msg, hdr);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 7dc133cfc363..63d42dcc9324 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -395,7 +395,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
break;
case BATADV_CLAIM_TYPE_ANNOUNCE:
/* announcement frame
- * set HW SRC to the special mac containg the crc
+ * set HW SRC to the special mac containing the crc
*/
ether_addr_copy(hw_src, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
@@ -1040,7 +1040,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
/* lets see if this originator is in our mesh */
orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
- /* dont accept claims from gateways which are not in
+ /* don't accept claims from gateways which are not in
* the same mesh or group.
*/
if (!orig_node)
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 5c22955bb9d5..8673a265995f 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -52,7 +52,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
void batadv_bla_status_update(struct net_device *net_dev);
int batadv_bla_init(struct batadv_priv *bat_priv);
void batadv_bla_free(struct batadv_priv *bat_priv);
-int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb);
#ifdef CONFIG_BATMAN_ADV_DAT
bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
unsigned short vid);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4a6a25d551a8..55d97e18aa4a 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -9,7 +9,6 @@
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
-#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/if_arp.h>
@@ -403,7 +402,7 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
goto out;
}
- /* >1 neighbors -> (re)brodcast */
+ /* >1 neighbors -> (re)broadcast */
if (rcu_dereference(hlist_next_rcu(first)))
goto out;
@@ -678,43 +677,16 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_master_del_slave() - remove hard_iface from the current master iface
- * @slave: the interface enslaved in another master
- * @master: the master from which slave has to be removed
- *
- * Invoke ndo_del_slave on master passing slave as argument. In this way the
- * slave is free'd and the master can correctly change its internal state.
- *
- * Return: 0 on success, a negative value representing the error otherwise
- */
-static int batadv_master_del_slave(struct batadv_hard_iface *slave,
- struct net_device *master)
-{
- int ret;
-
- if (!master)
- return 0;
-
- ret = -EBUSY;
- if (master->netdev_ops->ndo_del_slave)
- ret = master->netdev_ops->ndo_del_slave(master, slave->net_dev);
-
- return ret;
-}
-
-/**
* batadv_hardif_enable_interface() - Enslave hard interface to soft interface
* @hard_iface: hard interface to add to soft interface
- * @net: the applicable net namespace
- * @iface_name: name of the soft interface
+ * @soft_iface: netdev struct of the mesh interface
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
- struct net *net, const char *iface_name)
+ struct net_device *soft_iface)
{
struct batadv_priv *bat_priv;
- struct net_device *soft_iface, *master;
__be16 ethertype = htons(ETH_P_BATMAN);
int max_header_len = batadv_max_header_len();
int ret;
@@ -724,35 +696,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
kref_get(&hard_iface->refcount);
- soft_iface = dev_get_by_name(net, iface_name);
-
- if (!soft_iface) {
- soft_iface = batadv_softif_create(net, iface_name);
-
- if (!soft_iface) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* dev_get_by_name() increases the reference counter for us */
- dev_hold(soft_iface);
- }
-
- if (!batadv_softif_is_valid(soft_iface)) {
- pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
- soft_iface->name);
- ret = -EINVAL;
- goto err_dev;
- }
-
- /* check if the interface is enslaved in another virtual one and
- * in that case unlink it first
- */
- master = netdev_master_upper_dev_get(hard_iface->net_dev);
- ret = batadv_master_del_slave(hard_iface, master);
- if (ret)
- goto err_dev;
-
+ dev_hold(soft_iface);
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -810,7 +754,6 @@ err_upper:
err_dev:
hard_iface->soft_iface = NULL;
dev_put(soft_iface);
-err:
batadv_hardif_put(hard_iface);
return ret;
}
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 83d11b46a9d8..8cb2a1f10080 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -16,7 +16,6 @@
#include <linux/rcupdate.h>
#include <linux/stddef.h>
#include <linux/types.h>
-#include <net/net_namespace.h>
/**
* enum batadv_hard_if_state - State of a hard interface
@@ -75,7 +74,7 @@ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface);
struct batadv_hard_iface*
batadv_hardif_get_by_netdev(const struct net_device *net_dev);
int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
- struct net *net, const char *iface_name);
+ struct net_device *soft_iface);
void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface);
int batadv_hardif_min_mtu(struct net_device *soft_iface);
void batadv_update_min_mtu(struct net_device *soft_iface);
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 46696759f194..fb251c385a1b 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -18,7 +18,7 @@
#include <linux/stddef.h>
#include <linux/types.h>
-/* callback to a compare function. should compare 2 element datas for their
+/* callback to a compare function. should compare 2 element data for their
* keys
*
* Return: true if same and false if not same
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 8f0102b71656..014235fd4681 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2021.1"
+#define BATADV_SOURCE_VERSION "2021.2"
#endif
/* B.A.T.M.A.N. parameters */
@@ -88,7 +88,6 @@
/* number of packets to send for broadcasts on different interface types */
#define BATADV_NUM_BCASTS_DEFAULT 1
#define BATADV_NUM_BCASTS_WIRELESS 3
-#define BATADV_NUM_BCASTS_MAX 3
/* length of the single packet used by the TP meter */
#define BATADV_TP_PACKET_LEN ETH_DATA_LEN
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 1d63c8cbbfe7..923e2197c2db 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -193,53 +193,22 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv,
* BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
* The former two OR'd: no multicast router is present
*/
-#if IS_ENABLED(CONFIG_IPV6)
static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
struct net_device *bridge)
{
- struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
struct net_device *dev = bat_priv->soft_iface;
- struct br_ip_list *br_ip_entry, *tmp;
- u8 flags = BATADV_MCAST_WANT_NO_RTR6;
- int ret;
+ u8 flags = BATADV_NO_FLAGS;
if (!bridge)
return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
- /* TODO: ask the bridge if a multicast router is present (the bridge
- * is capable of performing proper RFC4286 multicast router
- * discovery) instead of searching for a ff02::2 listener here
- */
- ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
- if (ret < 0)
- return BATADV_NO_FLAGS;
-
- list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
- /* the bridge snooping does not maintain IPv4 link-local
- * addresses - therefore we won't find any IPv4 multicast router
- * address here, only IPv6 ones
- */
- if (br_ip_entry->addr.proto == htons(ETH_P_IPV6) &&
- ipv6_addr_is_ll_all_routers(&br_ip_entry->addr.dst.ip6))
- flags &= ~BATADV_MCAST_WANT_NO_RTR6;
-
- list_del(&br_ip_entry->list);
- kfree(br_ip_entry);
- }
+ if (!br_multicast_has_router_adjacent(dev, ETH_P_IP))
+ flags |= BATADV_MCAST_WANT_NO_RTR4;
+ if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6))
+ flags |= BATADV_MCAST_WANT_NO_RTR6;
return flags;
}
-#else
-static inline u8
-batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
- struct net_device *bridge)
-{
- if (bridge)
- return BATADV_NO_FLAGS;
- else
- return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
-}
-#endif
/**
* batadv_mcast_mla_rtr_flags_get() - get multicast router flags
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index f317d206b411..b6cc746e01a6 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -814,6 +814,10 @@ static int batadv_netlink_hardif_fill(struct sk_buff *msg,
bat_priv->soft_iface->ifindex))
goto nla_put_failure;
+ if (nla_put_string(msg, BATADV_ATTR_MESH_IFNAME,
+ bat_priv->soft_iface->name))
+ goto nla_put_failure;
+
if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
net_dev->ifindex) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
@@ -1045,6 +1049,10 @@ static int batadv_netlink_vlan_fill(struct sk_buff *msg,
bat_priv->soft_iface->ifindex))
goto nla_put_failure;
+ if (nla_put_string(msg, BATADV_ATTR_MESH_IFNAME,
+ bat_priv->soft_iface->name))
+ goto nla_put_failure;
+
if (nla_put_u32(msg, BATADV_ATTR_VLANID, vlan->vid & VLAN_VID_MASK))
goto nla_put_failure;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 40f5cffde6a3..bb9e93e3d98c 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1182,9 +1182,9 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
struct batadv_bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
int hdr_size = sizeof(*bcast_packet);
- int ret = NET_RX_DROP;
s32 seq_diff;
u32 seqno;
+ int ret;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
@@ -1210,7 +1210,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
goto free_skb;
- if (bcast_packet->ttl < 2)
+ if (bcast_packet->ttl-- < 2)
goto free_skb;
orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
@@ -1249,7 +1249,9 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
/* rebroadcast packet */
- batadv_add_bcast_packet_to_list(bat_priv, skb, 1, false);
+ ret = batadv_forw_bcast_packet(bat_priv, skb, 0, false);
+ if (ret == NETDEV_TX_BUSY)
+ goto free_skb;
/* don't hand the broadcast up if it is from an originator
* from the same backbone.
@@ -1275,6 +1277,7 @@ spin_unlock:
spin_unlock_bh(&orig_node->bcast_seqno_lock);
free_skb:
kfree_skb(skb);
+ ret = NET_RX_DROP;
out:
if (orig_node)
batadv_orig_node_put(orig_node);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 157abe92d827..0b9dd29d3b6a 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -737,57 +737,48 @@ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
}
/**
- * batadv_add_bcast_packet_to_list() - queue broadcast packet for multiple sends
+ * batadv_forw_bcast_packet_to_list() - queue broadcast packet for transmissions
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to add
* @delay: number of jiffies to wait before sending
* @own_packet: true if it is a self-generated broadcast packet
+ * @if_in: the interface where the packet was received on
+ * @if_out: the outgoing interface to queue on
*
- * add a broadcast packet to the queue and setup timers. broadcast packets
+ * Adds a broadcast packet to the queue and sets up timers. Broadcast packets
* are sent multiple times to increase probability for being received.
*
- * The skb is not consumed, so the caller should make sure that the
- * skb is freed.
- *
* Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
*/
-int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
- const struct sk_buff *skb,
- unsigned long delay,
- bool own_packet)
+static int batadv_forw_bcast_packet_to_list(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet,
+ struct batadv_hard_iface *if_in,
+ struct batadv_hard_iface *if_out)
{
- struct batadv_hard_iface *primary_if;
struct batadv_forw_packet *forw_packet;
- struct batadv_bcast_packet *bcast_packet;
+ unsigned long send_time = jiffies;
struct sk_buff *newskb;
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto err;
-
newskb = skb_copy(skb, GFP_ATOMIC);
- if (!newskb) {
- batadv_hardif_put(primary_if);
+ if (!newskb)
goto err;
- }
- forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
+ forw_packet = batadv_forw_packet_alloc(if_in, if_out,
&bat_priv->bcast_queue_left,
bat_priv, newskb);
- batadv_hardif_put(primary_if);
if (!forw_packet)
goto err_packet_free;
- /* as we have a copy now, it is safe to decrease the TTL */
- bcast_packet = (struct batadv_bcast_packet *)newskb->data;
- bcast_packet->ttl--;
-
forw_packet->own = own_packet;
INIT_DELAYED_WORK(&forw_packet->delayed_work,
batadv_send_outstanding_bcast_packet);
- batadv_forw_packet_bcast_queue(bat_priv, forw_packet, jiffies + delay);
+ send_time += delay ? delay : msecs_to_jiffies(5);
+
+ batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time);
return NETDEV_TX_OK;
err_packet_free:
@@ -797,9 +788,219 @@ err:
}
/**
+ * batadv_forw_bcast_packet_if() - forward and queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ * @if_in: the interface where the packet was received on
+ * @if_out: the outgoing interface to forward to
+ *
+ * Transmits a broadcast packet on the specified interface either immediately
+ * or if a delay is given after that. Furthermore, queues additional
+ * retransmissions if this interface is a wireless one.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
+ */
+static int batadv_forw_bcast_packet_if(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet,
+ struct batadv_hard_iface *if_in,
+ struct batadv_hard_iface *if_out)
+{
+ unsigned int num_bcasts = if_out->num_bcasts;
+ struct sk_buff *newskb;
+ int ret = NETDEV_TX_OK;
+
+ if (!delay) {
+ newskb = skb_copy(skb, GFP_ATOMIC);
+ if (!newskb)
+ return NETDEV_TX_BUSY;
+
+ batadv_send_broadcast_skb(newskb, if_out);
+ num_bcasts--;
+ }
+
+ /* delayed broadcast or rebroadcasts? */
+ if (num_bcasts >= 1) {
+ BATADV_SKB_CB(skb)->num_bcasts = num_bcasts;
+
+ ret = batadv_forw_bcast_packet_to_list(bat_priv, skb, delay,
+ own_packet, if_in,
+ if_out);
+ }
+
+ return ret;
+}
+
+/**
+ * batadv_send_no_broadcast() - check whether (re)broadcast is necessary
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to check
+ * @own_packet: true if it is a self-generated broadcast packet
+ * @if_out: the outgoing interface checked and considered for (re)broadcast
+ *
+ * Return: False if a packet needs to be (re)broadcasted on the given interface,
+ * true otherwise.
+ */
+static bool batadv_send_no_broadcast(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, bool own_packet,
+ struct batadv_hard_iface *if_out)
+{
+ struct batadv_hardif_neigh_node *neigh_node = NULL;
+ struct batadv_bcast_packet *bcast_packet;
+ u8 *orig_neigh;
+ u8 *neigh_addr;
+ char *type;
+ int ret;
+
+ if (!own_packet) {
+ neigh_addr = eth_hdr(skb)->h_source;
+ neigh_node = batadv_hardif_neigh_get(if_out,
+ neigh_addr);
+ }
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ orig_neigh = neigh_node ? neigh_node->orig : NULL;
+
+ ret = batadv_hardif_no_broadcast(if_out, bcast_packet->orig,
+ orig_neigh);
+
+ if (neigh_node)
+ batadv_hardif_neigh_put(neigh_node);
+
+ /* ok, may broadcast */
+ if (!ret)
+ return false;
+
+ /* no broadcast */
+ switch (ret) {
+ case BATADV_HARDIF_BCAST_NORECIPIENT:
+ type = "no neighbor";
+ break;
+ case BATADV_HARDIF_BCAST_DUPFWD:
+ type = "single neighbor is source";
+ break;
+ case BATADV_HARDIF_BCAST_DUPORIG:
+ type = "single neighbor is originator";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "BCAST packet from orig %pM on %s suppressed: %s\n",
+ bcast_packet->orig,
+ if_out->net_dev->name, type);
+
+ return true;
+}
+
+/**
+ * __batadv_forw_bcast_packet() - forward and queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ *
+ * Transmits a broadcast packet either immediately or if a delay is given
+ * after that. Furthermore, queues additional retransmissions on wireless
+ * interfaces.
+ *
+ * This call clones the given skb, hence the caller needs to take into
+ * account that the data segment of the given skb might not be
+ * modifiable anymore.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
+ */
+static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_hard_iface *primary_if;
+ int ret = NETDEV_TX_OK;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return NETDEV_TX_BUSY;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (!kref_get_unless_zero(&hard_iface->refcount))
+ continue;
+
+ if (batadv_send_no_broadcast(bat_priv, skb, own_packet,
+ hard_iface)) {
+ batadv_hardif_put(hard_iface);
+ continue;
+ }
+
+ ret = batadv_forw_bcast_packet_if(bat_priv, skb, delay,
+ own_packet, primary_if,
+ hard_iface);
+ batadv_hardif_put(hard_iface);
+
+ if (ret == NETDEV_TX_BUSY)
+ break;
+ }
+ rcu_read_unlock();
+
+ batadv_hardif_put(primary_if);
+ return ret;
+}
+
+/**
+ * batadv_forw_bcast_packet() - forward and queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ *
+ * Transmits a broadcast packet either immediately or if a delay is given
+ * after that. Furthermore, queues additional retransmissions on wireless
+ * interfaces.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
+ */
+int batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet)
+{
+ return __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
+}
+
+/**
+ * batadv_send_bcast_packet() - send and queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ *
+ * Transmits a broadcast packet either immediately or if a delay is given
+ * after that. Furthermore, queues additional retransmissions on wireless
+ * interfaces.
+ *
+ * Consumes the provided skb.
+ */
+void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet)
+{
+ __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
+ consume_skb(skb);
+}
+
+/**
* batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
* @forw_packet: the forwarding packet to check
- * @hard_iface: the interface to check on
*
* Checks whether a given packet has any (re)transmissions left on the provided
* interface.
@@ -811,28 +1012,20 @@ err:
* Return: True if (re)transmissions are left, false otherwise.
*/
static bool
-batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
- struct batadv_hard_iface *hard_iface)
+batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet)
{
- unsigned int max;
-
- if (hard_iface)
- max = hard_iface->num_bcasts;
- else
- max = BATADV_NUM_BCASTS_MAX;
-
- return BATADV_SKB_CB(forw_packet->skb)->num_bcasts < max;
+ return BATADV_SKB_CB(forw_packet->skb)->num_bcasts;
}
/**
- * batadv_forw_packet_bcasts_inc() - increment retransmission counter of a
+ * batadv_forw_packet_bcasts_dec() - decrement retransmission counter of a
* packet
- * @forw_packet: the packet to increase the counter for
+ * @forw_packet: the packet to decrease the counter for
*/
static void
-batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
+batadv_forw_packet_bcasts_dec(struct batadv_forw_packet *forw_packet)
{
- BATADV_SKB_CB(forw_packet->skb)->num_bcasts++;
+ BATADV_SKB_CB(forw_packet->skb)->num_bcasts--;
}
/**
@@ -843,30 +1036,30 @@ batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
*/
bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet)
{
- return BATADV_SKB_CB(forw_packet->skb)->num_bcasts > 0;
+ unsigned char num_bcasts = BATADV_SKB_CB(forw_packet->skb)->num_bcasts;
+
+ return num_bcasts != forw_packet->if_outgoing->num_bcasts;
}
+/**
+ * batadv_send_outstanding_bcast_packet() - transmit a queued broadcast packet
+ * @work: work queue item
+ *
+ * Transmits a queued broadcast packet and if necessary reschedules it.
+ */
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
{
- struct batadv_hard_iface *hard_iface;
- struct batadv_hardif_neigh_node *neigh_node;
- struct delayed_work *delayed_work;
+ unsigned long send_time = jiffies + msecs_to_jiffies(5);
struct batadv_forw_packet *forw_packet;
- struct batadv_bcast_packet *bcast_packet;
- struct sk_buff *skb1;
- struct net_device *soft_iface;
+ struct delayed_work *delayed_work;
struct batadv_priv *bat_priv;
- unsigned long send_time = jiffies + msecs_to_jiffies(5);
+ struct sk_buff *skb1;
bool dropped = false;
- u8 *neigh_addr;
- u8 *orig_neigh;
- int ret = 0;
delayed_work = to_delayed_work(work);
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
delayed_work);
- soft_iface = forw_packet->if_incoming->soft_iface;
- bat_priv = netdev_priv(soft_iface);
+ bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
dropped = true;
@@ -878,76 +1071,15 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
goto out;
}
- bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data;
-
- /* rebroadcast packet */
- rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->soft_iface != soft_iface)
- continue;
-
- if (!batadv_forw_packet_bcasts_left(forw_packet, hard_iface))
- continue;
-
- if (forw_packet->own) {
- neigh_node = NULL;
- } else {
- neigh_addr = eth_hdr(forw_packet->skb)->h_source;
- neigh_node = batadv_hardif_neigh_get(hard_iface,
- neigh_addr);
- }
-
- orig_neigh = neigh_node ? neigh_node->orig : NULL;
-
- ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig,
- orig_neigh);
-
- if (ret) {
- char *type;
-
- switch (ret) {
- case BATADV_HARDIF_BCAST_NORECIPIENT:
- type = "no neighbor";
- break;
- case BATADV_HARDIF_BCAST_DUPFWD:
- type = "single neighbor is source";
- break;
- case BATADV_HARDIF_BCAST_DUPORIG:
- type = "single neighbor is originator";
- break;
- default:
- type = "unknown";
- }
-
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s suppressed: %s\n",
- bcast_packet->orig,
- hard_iface->net_dev->name, type);
-
- if (neigh_node)
- batadv_hardif_neigh_put(neigh_node);
-
- continue;
- }
-
- if (neigh_node)
- batadv_hardif_neigh_put(neigh_node);
-
- if (!kref_get_unless_zero(&hard_iface->refcount))
- continue;
-
- /* send a copy of the saved skb */
- skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
- if (skb1)
- batadv_send_broadcast_skb(skb1, hard_iface);
-
- batadv_hardif_put(hard_iface);
- }
- rcu_read_unlock();
+ /* send a copy of the saved skb */
+ skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
+ if (!skb1)
+ goto out;
- batadv_forw_packet_bcasts_inc(forw_packet);
+ batadv_send_broadcast_skb(skb1, forw_packet->if_outgoing);
+ batadv_forw_packet_bcasts_dec(forw_packet);
- /* if we still have some more bcasts to send */
- if (batadv_forw_packet_bcasts_left(forw_packet, NULL)) {
+ if (batadv_forw_packet_bcasts_left(forw_packet)) {
batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
send_time);
return;
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 2b0daf8b2bc4..08af251b765c 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -39,10 +39,14 @@ int batadv_send_broadcast_skb(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface);
int batadv_send_unicast_skb(struct sk_buff *skb,
struct batadv_neigh_node *neigh_node);
-int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
- const struct sk_buff *skb,
- unsigned long delay,
- bool own_packet);
+int batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet);
+void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet);
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
const struct batadv_hard_iface *hard_iface);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6b8181bc3122..ae368a42a4ad 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -26,7 +26,6 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/percpu.h>
-#include <linux/printk.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -37,6 +36,7 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <net/net_namespace.h>
#include <net/netlink.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
@@ -191,7 +191,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
struct vlan_ethhdr *vhdr;
unsigned int header_len = 0;
int data_len = skb->len, ret;
- unsigned long brd_delay = 1;
+ unsigned long brd_delay = 0;
bool do_bcast = false, client_added;
unsigned short vid;
u32 seqno;
@@ -330,7 +330,7 @@ send:
bcast_packet = (struct batadv_bcast_packet *)skb->data;
bcast_packet->version = BATADV_COMPAT_VERSION;
- bcast_packet->ttl = BATADV_TTL;
+ bcast_packet->ttl = BATADV_TTL - 1;
/* batman packet type: broadcast */
bcast_packet->packet_type = BATADV_BCAST;
@@ -346,13 +346,7 @@ send:
seqno = atomic_inc_return(&bat_priv->bcast_seqno);
bcast_packet->seqno = htonl(seqno);
- batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay, true);
-
- /* a copy is stored in the bcast list, therefore removing
- * the original skb.
- */
- consume_skb(skb);
-
+ batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
/* unicast packet */
} else {
/* DHCP packets going to a server will use the GW feature */
@@ -848,14 +842,13 @@ static int batadv_softif_slave_add(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct batadv_hard_iface *hard_iface;
- struct net *net = dev_net(dev);
int ret = -EINVAL;
hard_iface = batadv_hardif_get_by_netdev(slave_dev);
if (!hard_iface || hard_iface->soft_iface)
goto out;
- ret = batadv_hardif_enable_interface(hard_iface, net, dev->name);
+ ret = batadv_hardif_enable_interface(hard_iface, dev);
out:
if (hard_iface)
@@ -1093,38 +1086,6 @@ static int batadv_softif_newlink(struct net *src_net, struct net_device *dev,
}
/**
- * batadv_softif_create() - Create and register soft interface
- * @net: the applicable net namespace
- * @name: name of the new soft interface
- *
- * Return: newly allocated soft_interface, NULL on errors
- */
-struct net_device *batadv_softif_create(struct net *net, const char *name)
-{
- struct net_device *soft_iface;
- int ret;
-
- soft_iface = alloc_netdev(sizeof(struct batadv_priv), name,
- NET_NAME_UNKNOWN, batadv_softif_init_early);
- if (!soft_iface)
- return NULL;
-
- dev_net_set(soft_iface, net);
-
- soft_iface->rtnl_link_ops = &batadv_link_ops;
-
- ret = register_netdevice(soft_iface);
- if (ret < 0) {
- pr_err("Unable to register the batman interface '%s': %i\n",
- name, ret);
- free_netdev(soft_iface);
- return NULL;
- }
-
- return soft_iface;
-}
-
-/**
* batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
* netlink
* @soft_iface: the to-be-removed batman-adv interface
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 38b0ad182584..67a2ddd6832f 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -12,14 +12,12 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
-#include <net/net_namespace.h>
#include <net/rtnetlink.h>
int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, int hdr_size,
struct batadv_orig_node *orig_node);
-struct net_device *batadv_softif_create(struct net *net, const char *name);
bool batadv_softif_is_valid(const struct net_device *net_dev);
extern struct rtnl_link_ops batadv_link_ops;
int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 97617d02c8f9..fd164a248569 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -103,34 +103,6 @@ static inline bool peer_del(struct lowpan_btle_dev *dev,
return false;
}
-static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev,
- bdaddr_t *ba, __u8 type)
-{
- struct lowpan_peer *peer;
-
- BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
- ba, type);
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(peer, &dev->peers, list) {
- BT_DBG("dst addr %pMR dst type %d",
- &peer->chan->dst, peer->chan->dst_type);
-
- if (bacmp(&peer->chan->dst, ba))
- continue;
-
- if (type == peer->chan->dst_type) {
- rcu_read_unlock();
- return peer;
- }
- }
-
- rcu_read_unlock();
-
- return NULL;
-}
-
static inline struct lowpan_peer *
__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
{
@@ -195,7 +167,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
rcu_read_lock();
list_for_each_entry_rcu(peer, &dev->peers, list) {
- BT_DBG("dst addr %pMR dst type %d ip %pI6c",
+ BT_DBG("dst addr %pMR dst type %u ip %pI6c",
&peer->chan->dst, peer->chan->dst_type,
&peer->peer_addr);
@@ -506,7 +478,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
local_skb = skb_clone(skb, GFP_ATOMIC);
- BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
+ BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
netdev->name,
&pentry->chan->dst, pentry->chan->dst_type,
&pentry->peer_addr, pentry->chan);
@@ -549,7 +521,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
if (err) {
if (lowpan_cb(skb)->chan) {
- BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
+ BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
netdev->name, &addr, addr_type,
&lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
@@ -691,7 +663,7 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
{
struct net_device *netdev;
- int err = 0;
+ int err;
netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
@@ -818,7 +790,7 @@ static void chan_close_cb(struct l2cap_chan *chan)
BT_DBG("dev %p removing %speer %p", dev,
last ? "last " : "1 ", peer);
- BT_DBG("chan %p orig refcnt %d", chan,
+ BT_DBG("chan %p orig refcnt %u", chan,
kref_read(&chan->kref));
l2cap_chan_put(chan);
@@ -907,14 +879,6 @@ static const struct l2cap_ops bt_6lowpan_chan_ops = {
.set_shutdown = l2cap_chan_no_set_shutdown,
};
-static inline __u8 bdaddr_type(__u8 type)
-{
- if (type == ADDR_LE_DEV_PUBLIC)
- return BDADDR_LE_PUBLIC;
- else
- return BDADDR_LE_RANDOM;
-}
-
static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
{
struct l2cap_chan *chan;
@@ -940,7 +904,7 @@ static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
{
struct lowpan_peer *peer;
- BT_DBG("conn %p dst type %d", conn, dst_type);
+ BT_DBG("conn %p dst type %u", conn, dst_type);
peer = lookup_peer(conn);
if (!peer)
@@ -972,7 +936,7 @@ static struct l2cap_chan *bt_6lowpan_listen(void)
atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
- BT_DBG("chan %p src type %d", chan, chan->src_type);
+ BT_DBG("chan %p src type %u", chan, chan->src_type);
err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
if (err) {
@@ -1013,7 +977,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
*conn = (struct l2cap_conn *)hcon->l2cap_data;
- BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
+ BT_DBG("conn %p dst %pMR type %u", *conn, &hcon->dst, hcon->dst_type);
return 0;
}
@@ -1155,7 +1119,7 @@ static ssize_t lowpan_control_write(struct file *fp,
return -EALREADY;
}
- BT_DBG("conn %p dst %pMR type %d user %d", conn,
+ BT_DBG("conn %p dst %pMR type %d user %u", conn,
&conn->hcon->dst, conn->hcon->dst_type,
addr_type);
}
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 463bad58478b..1fcc482397c3 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -120,7 +120,7 @@ static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
if (le16_to_cpu(hdr->len) < sizeof(*rej))
return -EINVAL;
- BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
+ BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason));
skb_pull(skb, sizeof(*rej));
@@ -219,7 +219,7 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
cl = (void *) skb->data;
while (len >= sizeof(*cl)) {
- BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
+ BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type,
cl->status);
if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
@@ -273,7 +273,7 @@ static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cl *cl = (void *) skb->data;
while (skb->len >= sizeof(*cl)) {
- BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
+ BT_DBG("Controller id %u type %u status %u", cl->id, cl->type,
cl->status);
cl = skb_pull(skb, sizeof(*cl));
}
@@ -302,7 +302,7 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
- BT_DBG("id %d", req->id);
+ BT_DBG("id %u", req->id);
hdev = hci_dev_get(req->id);
if (!hdev || hdev->dev_type != HCI_AMP) {
@@ -344,7 +344,7 @@ static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
if (le16_to_cpu(hdr->len) < sizeof(*rsp))
return -EINVAL;
- BT_DBG("id %d status 0x%2.2x", rsp->id, rsp->status);
+ BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status);
if (rsp->status)
return -EINVAL;
@@ -373,7 +373,7 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
- BT_DBG("id %d", req->id);
+ BT_DBG("id %u", req->id);
/* Make sure that other request is not processed */
tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
@@ -423,7 +423,7 @@ static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
assoc_len = len - sizeof(*rsp);
- BT_DBG("id %d status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
+ BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
assoc_len);
if (rsp->status)
@@ -457,7 +457,7 @@ static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
if (!hcon)
goto done;
- BT_DBG("Created hcon %p: loc:%d -> rem:%d", hcon, hdev->id, rsp->id);
+ BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id);
mgr->bredr_chan->remote_amp_id = rsp->id;
@@ -481,7 +481,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
- BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+ BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id);
memset(&rsp, 0, sizeof(rsp));
@@ -562,7 +562,7 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
- BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
+ BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id);
memset(&rsp, 0, sizeof(rsp));
@@ -599,7 +599,7 @@ send_rsp:
static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
- BT_DBG("ident %d code 0x%2.2x", hdr->ident, hdr->code);
+ BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code);
skb_pull(skb, le16_to_cpu(hdr->len));
return 0;
@@ -620,7 +620,7 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
hdr = (void *) skb->data;
len = le16_to_cpu(hdr->len);
- BT_DBG("code 0x%2.2x id %d len %u", hdr->code, hdr->ident, len);
+ BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len);
skb_pull(skb, sizeof(*hdr));
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index be2d469d6369..2134f92bd7ac 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -78,7 +78,7 @@ struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
{
struct amp_ctrl *ctrl;
- BT_DBG("mgr %p id %d", mgr, id);
+ BT_DBG("mgr %p id %u", mgr, id);
mutex_lock(&mgr->amp_ctrls_lock);
list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
@@ -179,7 +179,7 @@ int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
/* Legacy key */
if (conn->key_type < 3) {
- bt_dev_err(hdev, "legacy key type %d", conn->key_type);
+ bt_dev_err(hdev, "legacy key type %u", conn->key_type);
return -EACCES;
}
@@ -257,7 +257,7 @@ void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
struct hci_request req;
int err;
- BT_DBG("%s handle %d", hdev->name, phy_handle);
+ BT_DBG("%s handle %u", hdev->name, phy_handle);
cp.phy_handle = phy_handle;
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 43c284158f63..72f47b372705 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -126,8 +126,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len
f[i].start = get_unaligned_be16(data++);
f[i].end = get_unaligned_be16(data++);
- BT_DBG("proto filter start %d end %d",
- f[i].start, f[i].end);
+ BT_DBG("proto filter start %u end %u",
+ f[i].start, f[i].end);
}
if (i < BNEP_MAX_PROTO_FILTERS)
@@ -266,7 +266,7 @@ static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb)
break;
}
- BT_DBG("type 0x%x len %d", h->type, h->len);
+ BT_DBG("type 0x%x len %u", h->type, h->len);
switch (h->type & BNEP_TYPE_MASK) {
case BNEP_EXT_CONTROL:
@@ -424,7 +424,7 @@ static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
int len = 0, il = 0;
u8 type = 0;
- BT_DBG("skb %p dev %p type %d", skb, skb->dev, skb->pkt_type);
+ BT_DBG("skb %p dev %p type %u", skb, skb->dev, skb->pkt_type);
if (!skb->dev) {
/* Control frame sent by us */
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index eb41556002e3..f3bedc3b613a 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -74,7 +74,7 @@ static struct cmtp_application *cmtp_application_add(struct cmtp_session *sessio
{
struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL);
- BT_DBG("session %p application %p appl %d", session, app, appl);
+ BT_DBG("session %p application %p appl %u", session, app, appl);
if (!app)
return NULL;
@@ -135,7 +135,7 @@ static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb)
{
struct cmtp_scb *scb = (void *) skb->cb;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %p skb %p len %u", session, skb, skb->len);
scb->id = -1;
scb->data = (CAPIMSG_COMMAND(skb->data) == CAPI_DATA_B3);
@@ -152,7 +152,7 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
struct sk_buff *skb;
unsigned char *s;
- BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
+ BT_DBG("session %p subcmd 0x%02x appl %u msgnum %u", session, subcmd, appl, msgnum);
skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
if (!skb) {
@@ -188,7 +188,7 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
__u16 appl, msgnum, func, info;
__u32 controller;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %p skb %p len %u", session, skb, skb->len);
switch (CAPIMSG_SUBCOMMAND(skb->data)) {
case CAPI_CONF:
@@ -321,7 +321,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
__u16 appl;
__u32 contr;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %p skb %p len %u", session, skb, skb->len);
if (skb->len < CAPI_MSG_BASELEN)
return;
@@ -344,7 +344,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
appl = application->appl;
CAPIMSG_SETAPPID(skb->data, appl);
} else {
- BT_ERR("Can't find application with id %d", appl);
+ BT_ERR("Can't find application with id %u", appl);
kfree_skb(skb);
return;
}
@@ -385,8 +385,8 @@ static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_
unsigned char buf[8];
int err = 0, nconn, want = rp->level3cnt;
- BT_DBG("ctrl %p appl %d level3cnt %d datablkcnt %d datablklen %d",
- ctrl, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
+ BT_DBG("ctrl %p appl %u level3cnt %u datablkcnt %u datablklen %u",
+ ctrl, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
application = cmtp_application_add(session, appl);
if (!application) {
@@ -450,7 +450,7 @@ static void cmtp_release_appl(struct capi_ctr *ctrl, __u16 appl)
struct cmtp_session *session = ctrl->driverdata;
struct cmtp_application *application;
- BT_DBG("ctrl %p appl %d", ctrl, appl);
+ BT_DBG("ctrl %p appl %u", ctrl, appl);
application = cmtp_application_get(session, CMTP_APPLID, appl);
if (!application) {
@@ -483,7 +483,7 @@ static u16 cmtp_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
application = cmtp_application_get(session, CMTP_APPLID, appl);
if ((!application) || (application->state != BT_CONNECTED)) {
- BT_ERR("Can't find application with id %d", appl);
+ BT_ERR("Can't find application with id %u", appl);
return CAPI_ILLAPPNR;
}
@@ -515,7 +515,7 @@ static int cmtp_proc_show(struct seq_file *m, void *v)
seq_printf(m, "ctrl %d\n", session->num);
list_for_each_entry(app, &session->applications, list) {
- seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
+ seq_printf(m, "appl %u -> %u\n", app->appl, app->mapping);
}
return 0;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 07cfa3249f83..0a2d78e811cf 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -392,6 +392,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
if (!(session->flags & BIT(CMTP_LOOPBACK))) {
err = cmtp_attach_device(session);
if (err < 0) {
+ /* Caller will call fput in case of failure, and so
+ * will cmtp_session kthread.
+ */
+ get_file(session->sock->file);
+
atomic_inc(&session->terminate);
wake_up_interruptible(sk_sleep(session->sock->sk));
up_write(&cmtp_session_sem);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 88ec08978ff4..2b5059a56cda 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -257,7 +257,7 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason)
{
BT_DBG("hcon %p", conn);
- /* When we are master of an established connection and it enters
+ /* When we are central of an established connection and it enters
* the disconnect timeout, then go ahead and try to read the
* current clock offset. Processing of the result is done
* within the event handling and hci_clock_offset_evt function.
@@ -758,7 +758,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
conn->state = BT_CLOSED;
/* If the status indicates successful cancellation of
- * the attempt (i.e. Unkown Connection Id) there's no point of
+ * the attempt (i.e. Unknown Connection Id) there's no point of
* notifying failure since we'll go back to keep trying to
* connect. The only exception is explicit connect requests
* where a timeout + cancel does indicate an actual failure.
@@ -1109,9 +1109,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
hci_req_init(&req, hdev);
- /* Disable advertising if we're active. For master role
+ /* Disable advertising if we're active. For central role
* connections most controllers will refuse to connect if
- * advertising is enabled, and for slave role connections we
+ * advertising is enabled, and for peripheral role connections we
* anyway have to disable it in order to start directed
* advertising. Any registered advertisements will be
* re-enabled after the connection attempt is finished.
@@ -1119,7 +1119,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
__hci_req_pause_adv_instances(&req);
- /* If requested to connect as slave use directed advertising */
+ /* If requested to connect as peripheral use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
/* If we're active scanning most controllers are unable
* to initiate advertising. Simply reject the attempt.
@@ -1842,7 +1842,7 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
* Table 6.2: Packets defined for synchronous, asynchronous, and
- * CSB logical transport types.
+ * CPB logical transport types.
*/
switch (conn->type) {
case SCO_LINK:
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index fd12f1652bdf..2560ed2f144d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -545,24 +545,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
bool changed = false;
- /* If Connectionless Slave Broadcast master role is supported
+ /* If Connectionless Peripheral Broadcast central role is supported
* enable all necessary events for it.
*/
- if (lmp_csb_master_capable(hdev)) {
+ if (lmp_cpb_central_capable(hdev)) {
events[1] |= 0x40; /* Triggered Clock Capture */
events[1] |= 0x80; /* Synchronization Train Complete */
- events[2] |= 0x10; /* Slave Page Response Timeout */
- events[2] |= 0x20; /* CSB Channel Map Change */
+ events[2] |= 0x10; /* Peripheral Page Response Timeout */
+ events[2] |= 0x20; /* CPB Channel Map Change */
changed = true;
}
- /* If Connectionless Slave Broadcast slave role is supported
+ /* If Connectionless Peripheral Broadcast peripheral role is supported
* enable all necessary events for it.
*/
- if (lmp_csb_slave_capable(hdev)) {
+ if (lmp_cpb_peripheral_capable(hdev)) {
events[2] |= 0x01; /* Synchronization Train Received */
- events[2] |= 0x02; /* CSB Receive */
- events[2] |= 0x04; /* CSB Timeout */
+ events[2] |= 0x02; /* CPB Receive */
+ events[2] |= 0x04; /* CPB Timeout */
events[2] |= 0x08; /* Truncated Page Complete */
changed = true;
}
@@ -648,7 +648,7 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
*/
/* If the controller supports Extended Scanner Filter
- * Policies, enable the correspondig event.
+ * Policies, enable the corresponding event.
*/
if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
events[1] |= 0x04; /* LE Direct Advertising
@@ -749,14 +749,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
}
if (hdev->commands[26] & 0x40) {
- /* Read LE White List Size */
- hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
+ /* Read LE Accept List Size */
+ hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
0, NULL);
}
if (hdev->commands[26] & 0x80) {
- /* Clear LE White List */
- hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
+ /* Clear LE Accept List */
+ hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
}
if (hdev->commands[34] & 0x40) {
@@ -1454,7 +1454,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
}
/* Check for valid public address or a configured static
- * random adddress, but let the HCI setup proceed to
+ * random address, but let the HCI setup proceed to
* be able to determine if there is a public address
* or not.
*
@@ -1610,8 +1610,13 @@ setup_failed:
} else {
/* Init failed, cleanup */
flush_work(&hdev->tx_work);
- flush_work(&hdev->cmd_work);
+
+ /* Since hci_rx_work() is possible to awake new cmd_work
+ * it should be flushed first to avoid unexpected call of
+ * hci_cmd_work()
+ */
flush_work(&hdev->rx_work);
+ flush_work(&hdev->cmd_work);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
@@ -1716,15 +1721,8 @@ int hci_dev_do_close(struct hci_dev *hdev)
BT_DBG("%s %p", hdev->name, hdev);
- if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- test_bit(HCI_UP, &hdev->flags)) {
- /* Execute vendor specific shutdown routine */
- if (hdev->shutdown)
- hdev->shutdown(hdev);
- }
-
cancel_delayed_work(&hdev->power_off);
+ cancel_delayed_work(&hdev->ncmd_timer);
hci_request_cancel_all(hdev);
hci_req_sync_lock(hdev);
@@ -1800,6 +1798,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
clear_bit(HCI_INIT, &hdev->flags);
}
+ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ /* Execute vendor specific shutdown routine */
+ if (hdev->shutdown)
+ hdev->shutdown(hdev);
+ }
+
/* flush cmd work */
flush_work(&hdev->cmd_work);
@@ -2772,6 +2778,24 @@ static void hci_cmd_timeout(struct work_struct *work)
queue_work(hdev->workqueue, &hdev->cmd_work);
}
+/* HCI ncmd timer function */
+static void hci_ncmd_timeout(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ ncmd_timer.work);
+
+ bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
+
+ /* During HCI_INIT phase no events can be injected if the ncmd timer
+ * triggers since the procedure has its own timeout handling.
+ */
+ if (test_bit(HCI_INIT, &hdev->flags))
+ return;
+
+ /* This is an irrecoverable state, inject hardware error event */
+ hci_reset_dev(hdev);
+}
+
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 bdaddr_type)
{
@@ -3544,7 +3568,7 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
continue;
- /* If trying to estabilish one time connection to disabled
+ /* If trying to establish one time connection to disabled
* device, leave the params, but mark them as just once.
*/
if (params->explicit_connect) {
@@ -3689,13 +3713,13 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
/* Suspend consists of two actions:
* - First, disconnect everything and make the controller not
* connectable (disabling scanning)
- * - Second, program event filter/whitelist and enable scan
+ * - Second, program event filter/accept list and enable scan
*/
ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
if (!ret)
state = BT_SUSPEND_DISCONNECT;
- /* Only configure whitelist if disconnect succeeded and wake
+ /* Only configure accept list if disconnect succeeded and wake
* isn't being prevented.
*/
if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
@@ -3803,14 +3827,14 @@ struct hci_dev *hci_alloc_dev(void)
mutex_init(&hdev->req_lock);
INIT_LIST_HEAD(&hdev->mgmt_pending);
- INIT_LIST_HEAD(&hdev->blacklist);
- INIT_LIST_HEAD(&hdev->whitelist);
+ INIT_LIST_HEAD(&hdev->reject_list);
+ INIT_LIST_HEAD(&hdev->accept_list);
INIT_LIST_HEAD(&hdev->uuids);
INIT_LIST_HEAD(&hdev->link_keys);
INIT_LIST_HEAD(&hdev->long_term_keys);
INIT_LIST_HEAD(&hdev->identity_resolving_keys);
INIT_LIST_HEAD(&hdev->remote_oob_data);
- INIT_LIST_HEAD(&hdev->le_white_list);
+ INIT_LIST_HEAD(&hdev->le_accept_list);
INIT_LIST_HEAD(&hdev->le_resolv_list);
INIT_LIST_HEAD(&hdev->le_conn_params);
INIT_LIST_HEAD(&hdev->pend_le_conns);
@@ -3836,6 +3860,7 @@ struct hci_dev *hci_alloc_dev(void)
init_waitqueue_head(&hdev->suspend_wait_q);
INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
+ INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
hci_request_setup(hdev);
@@ -4022,8 +4047,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
destroy_workqueue(hdev->req_workqueue);
hci_dev_lock(hdev);
- hci_bdaddr_list_clear(&hdev->blacklist);
- hci_bdaddr_list_clear(&hdev->whitelist);
+ hci_bdaddr_list_clear(&hdev->reject_list);
+ hci_bdaddr_list_clear(&hdev->accept_list);
hci_uuids_clear(hdev);
hci_link_keys_clear(hdev);
hci_smp_ltks_clear(hdev);
@@ -4031,7 +4056,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_remote_oob_data_clear(hdev);
hci_adv_instances_clear(hdev);
hci_adv_monitors_clear(hdev);
- hci_bdaddr_list_clear(&hdev->le_white_list);
+ hci_bdaddr_list_clear(&hdev->le_accept_list);
hci_bdaddr_list_clear(&hdev->le_resolv_list);
hci_conn_params_clear_all(hdev);
hci_discovery_filter_clear(hdev);
@@ -4073,6 +4098,8 @@ int hci_reset_dev(struct hci_dev *hdev)
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb_put_data(skb, hw_err, 3);
+ bt_dev_err(hdev, "Injecting HCI hardware error event");
+
/* Send Hardware Error to upper stack */
return hci_recv_frame(hdev, skb);
}
@@ -4279,7 +4306,7 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
-/* Send HCI command and wait for command commplete event */
+/* Send HCI command and wait for command complete event */
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout)
{
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 47f4f21fbc1a..841393389f7b 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -125,7 +125,7 @@ static int device_list_show(struct seq_file *f, void *ptr)
struct bdaddr_list *b;
hci_dev_lock(hdev);
- list_for_each_entry(b, &hdev->whitelist, list)
+ list_for_each_entry(b, &hdev->accept_list, list)
seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
list_for_each_entry(p, &hdev->le_conn_params, list) {
seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
@@ -144,7 +144,7 @@ static int blacklist_show(struct seq_file *f, void *p)
struct bdaddr_list *b;
hci_dev_lock(hdev);
- list_for_each_entry(b, &hdev->blacklist, list)
+ list_for_each_entry(b, &hdev->reject_list, list)
seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
hci_dev_unlock(hdev);
@@ -784,7 +784,7 @@ static int white_list_show(struct seq_file *f, void *ptr)
struct bdaddr_list *b;
hci_dev_lock(hdev);
- list_for_each_entry(b, &hdev->le_white_list, list)
+ list_for_each_entry(b, &hdev->le_accept_list, list)
seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
hci_dev_unlock(hdev);
@@ -1195,7 +1195,7 @@ void hci_debugfs_create_le(struct hci_dev *hdev)
&force_static_address_fops);
debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
- &hdev->le_white_list_size);
+ &hdev->le_accept_list_size);
debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
&white_list_fops);
debugfs_create_u8("resolv_list_size", 0444, hdev->debugfs,
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 016b2999f219..1c3018202564 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -236,7 +236,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hdev->ssp_debug_mode = 0;
- hci_bdaddr_list_clear(&hdev->le_white_list);
+ hci_bdaddr_list_clear(&hdev->le_accept_list);
hci_bdaddr_list_clear(&hdev->le_resolv_list);
}
@@ -1492,21 +1492,21 @@ static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
hdev->le_num_of_adv_sets = rp->num_of_sets;
}
-static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
+ struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
if (rp->status)
return;
- hdev->le_white_list_size = rp->size;
+ hdev->le_accept_list_size = rp->size;
}
-static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -1515,13 +1515,13 @@ static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
if (status)
return;
- hci_bdaddr_list_clear(&hdev->le_white_list);
+ hci_bdaddr_list_clear(&hdev->le_accept_list);
}
-static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_cp_le_add_to_white_list *sent;
+ struct hci_cp_le_add_to_accept_list *sent;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -1529,18 +1529,18 @@ static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
if (status)
return;
- sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
if (!sent)
return;
- hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
- sent->bdaddr_type);
+ hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
+ sent->bdaddr_type);
}
-static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_cp_le_del_from_white_list *sent;
+ struct hci_cp_le_del_from_accept_list *sent;
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -1548,11 +1548,11 @@ static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
if (status)
return;
- sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
if (!sent)
return;
- hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
+ hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
sent->bdaddr_type);
}
@@ -2069,7 +2069,7 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
if (conn &&
(conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, conn, 0, name, name_len);
+ mgmt_device_connected(hdev, conn, name, name_len);
if (discov->state == DISCOVERY_STOPPED)
return;
@@ -2367,7 +2367,7 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
/* We don't want the connection attempt to stick around
* indefinitely since LE doesn't have a page timeout concept
* like BR/EDR. Set a timer for any connection that doesn't use
- * the white list for connecting.
+ * the accept list for connecting.
*/
if (filter_policy == HCI_LE_USE_PEER_ADDR)
queue_delayed_work(conn->hdev->workqueue,
@@ -2623,7 +2623,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
* only used during suspend.
*/
if (ev->link_type == ACL_LINK &&
- hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
+ hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
&ev->bdaddr,
BDADDR_BREDR)) {
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
@@ -2745,19 +2745,19 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
+ if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr);
return;
}
- /* Require HCI_CONNECTABLE or a whitelist entry to accept the
+ /* Require HCI_CONNECTABLE or an accept list entry to accept the
* connection. These features are only touched through mgmt so
* only do the checks if HCI_MGMT is set.
*/
if (hci_dev_test_flag(hdev, HCI_MGMT) &&
!hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
- !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr,
+ !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr);
return;
@@ -2795,9 +2795,9 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
bacpy(&cp.bdaddr, &ev->bdaddr);
if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
- cp.role = 0x00; /* Become master */
+ cp.role = 0x00; /* Become central */
else
- cp.role = 0x01; /* Remain slave */
+ cp.role = 0x01; /* Remain peripheral */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
} else if (!(flags & HCI_PROTO_DEFER)) {
@@ -3256,7 +3256,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, conn, 0, NULL, 0);
+ mgmt_device_connected(hdev, conn, NULL, 0);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -3268,6 +3268,23 @@ unlock:
hci_dev_unlock(hdev);
}
+static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev,
+ u16 opcode, u8 ncmd)
+{
+ if (opcode != HCI_OP_NOP)
+ cancel_delayed_work(&hdev->cmd_timer);
+
+ if (!test_bit(HCI_RESET, &hdev->flags)) {
+ if (ncmd) {
+ cancel_delayed_work(&hdev->ncmd_timer);
+ atomic_set(&hdev->cmd_cnt, 1);
+ } else {
+ schedule_delayed_work(&hdev->ncmd_timer,
+ HCI_NCMD_TIMEOUT);
+ }
+ }
+}
+
static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
u16 *opcode, u8 *status,
hci_req_complete_t *req_complete,
@@ -3521,20 +3538,20 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_le_set_scan_enable(hdev, skb);
break;
- case HCI_OP_LE_READ_WHITE_LIST_SIZE:
- hci_cc_le_read_white_list_size(hdev, skb);
+ case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
+ hci_cc_le_read_accept_list_size(hdev, skb);
break;
- case HCI_OP_LE_CLEAR_WHITE_LIST:
- hci_cc_le_clear_white_list(hdev, skb);
+ case HCI_OP_LE_CLEAR_ACCEPT_LIST:
+ hci_cc_le_clear_accept_list(hdev, skb);
break;
- case HCI_OP_LE_ADD_TO_WHITE_LIST:
- hci_cc_le_add_to_white_list(hdev, skb);
+ case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
+ hci_cc_le_add_to_accept_list(hdev, skb);
break;
- case HCI_OP_LE_DEL_FROM_WHITE_LIST:
- hci_cc_le_del_from_white_list(hdev, skb);
+ case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
+ hci_cc_le_del_from_accept_list(hdev, skb);
break;
case HCI_OP_LE_READ_SUPPORTED_STATES:
@@ -3630,11 +3647,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
break;
}
- if (*opcode != HCI_OP_NOP)
- cancel_delayed_work(&hdev->cmd_timer);
-
- if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
- atomic_set(&hdev->cmd_cnt, 1);
+ handle_cmd_cnt_and_timer(hdev, *opcode, ev->ncmd);
hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
req_complete_skb);
@@ -3735,11 +3748,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
break;
}
- if (*opcode != HCI_OP_NOP)
- cancel_delayed_work(&hdev->cmd_timer);
-
- if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
- atomic_set(&hdev->cmd_cnt, 1);
+ handle_cmd_cnt_and_timer(hdev, *opcode, ev->ncmd);
/* Indicate request completion if the command failed. Also, if
* we're not waiting for a special event and we get a success
@@ -4330,7 +4339,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, conn, 0, NULL, 0);
+ mgmt_device_connected(hdev, conn, NULL, 0);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -4404,12 +4413,12 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
- switch (conn->setting & SCO_AIRMODE_MASK) {
- case SCO_AIRMODE_CVSD:
+ switch (ev->air_mode) {
+ case 0x02:
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
break;
- case SCO_AIRMODE_TRANSP:
+ case 0x03:
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
break;
@@ -5122,8 +5131,8 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn->dst_type = bdaddr_type;
/* If we didn't have a hci_conn object previously
- * but we're in master role this must be something
- * initiated using a white list. Since white list based
+ * but we're in central role this must be something
+ * initiated using an accept list. Since accept list based
* connections are not "first class citizens" we don't
* have full tracking of them. Therefore, we go ahead
* with a "best effort" approach of determining the
@@ -5187,6 +5196,23 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn->dst_type = irk->addr_type;
}
+ /* When using controller based address resolution, then the new
+ * address types 0x02 and 0x03 are used. These types need to be
+ * converted back into either public address or random address type
+ */
+ if (use_ll_privacy(hdev) &&
+ hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
+ hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
+ switch (conn->dst_type) {
+ case ADDR_LE_DEV_PUBLIC_RESOLVED:
+ conn->dst_type = ADDR_LE_DEV_PUBLIC;
+ break;
+ case ADDR_LE_DEV_RANDOM_RESOLVED:
+ conn->dst_type = ADDR_LE_DEV_RANDOM;
+ break;
+ }
+ }
+
if (status) {
hci_le_conn_failed(conn, status);
goto unlock;
@@ -5198,13 +5224,13 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
addr_type = BDADDR_LE_RANDOM;
/* Drop the connection if the device is blocked */
- if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
+ if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
hci_conn_drop(conn);
goto unlock;
}
if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, conn, 0, NULL, 0);
+ mgmt_device_connected(hdev, conn, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = handle;
@@ -5217,17 +5243,17 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
- /* The remote features procedure is defined for master
+ /* The remote features procedure is defined for central
* role only. So only in case of an initiated connection
* request the remote features.
*
- * If the local controller supports slave-initiated features
- * exchange, then requesting the remote features in slave
+ * If the local controller supports peripheral-initiated features
+ * exchange, then requesting the remote features in peripheral
* role is possible. Otherwise just transition into the
* connected state without requesting the remote features.
*/
if (conn->out ||
- (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
+ (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
struct hci_cp_le_read_remote_features cp;
cp.handle = __cpu_to_le16(conn->handle);
@@ -5296,8 +5322,19 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
- if (ev->status)
+ if (ev->status) {
+ struct adv_info *adv;
+
+ adv = hci_find_adv_instance(hdev, ev->handle);
+ if (!adv)
+ return;
+
+ /* Remove advertising as it has been terminated */
+ hci_remove_adv_instance(hdev, ev->handle);
+ mgmt_advertising_removed(NULL, hdev, ev->handle);
+
return;
+ }
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
if (conn) {
@@ -5354,13 +5391,13 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
return NULL;
/* Ignore if the device is blocked */
- if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
+ if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
return NULL;
/* Most controller will fail if we try to create new connections
- * while we have an existing one in slave role.
+ * while we have an existing one in peripheral role.
*/
- if (hdev->conn_hash.le_num_slave > 0 &&
+ if (hdev->conn_hash.le_num_peripheral > 0 &&
(!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
!(hdev->le_states[3] & 0x10)))
return NULL;
@@ -5378,7 +5415,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
case HCI_AUTO_CONN_DIRECT:
/* Only devices advertising with ADV_DIRECT_IND are
* triggering a connection attempt. This is allowing
- * incoming connections from slave devices.
+ * incoming connections from peripheral devices.
*/
if (adv_type != LE_ADV_DIRECT_IND)
return NULL;
@@ -5386,8 +5423,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
case HCI_AUTO_CONN_ALWAYS:
/* Devices advertising with ADV_IND or ADV_DIRECT_IND
* are triggering a connection attempt. This means
- * that incoming connections from slave device are
- * accepted and also outgoing connections to slave
+ * that incoming connections from peripheral device are
+ * accepted and also outgoing connections to peripheral
* devices are established when found.
*/
break;
@@ -5441,7 +5478,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
struct hci_conn *conn;
bool match;
u32 flags;
- u8 *ptr, real_len;
+ u8 *ptr;
switch (type) {
case LE_ADV_IND:
@@ -5472,14 +5509,10 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
break;
}
- real_len = ptr - data;
-
- /* Adjust for actual length */
- if (len != real_len) {
- bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u",
- len, real_len);
- len = real_len;
- }
+ /* Adjust for actual length. This handles the case when remote
+ * device is advertising with incorrect data length.
+ */
+ len = ptr - data;
/* If the direct address is present, then this report is from
* a LE Direct Advertising Report event. In that case it is
@@ -5752,7 +5785,7 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
if (conn->state == BT_CONFIG) {
__u8 status;
- /* If the local controller supports slave-initiated
+ /* If the local controller supports peripheral-initiated
* features exchange, but the remote controller does
* not, then it is possible that the error code 0x1a
* for unsupported remote feature gets returned.
@@ -5761,8 +5794,8 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
* transition into connected state and mark it as
* successful.
*/
- if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
- !conn->out && ev->status == 0x1a)
+ if (!conn->out && ev->status == 0x1a &&
+ (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
status = 0x00;
else
status = ev->status;
@@ -6032,7 +6065,7 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
return true;
}
- /* Check if request ended in Command Status - no way to retreive
+ /* Check if request ended in Command Status - no way to retrieve
* any extra parameters in this case.
*/
if (hdr->evt == HCI_EV_CMD_STATUS)
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index fa9125b782f8..1d14adc023e9 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -745,17 +745,17 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
}
}
-static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
- u8 bdaddr_type)
+static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
+ u8 bdaddr_type)
{
- struct hci_cp_le_del_from_white_list cp;
+ struct hci_cp_le_del_from_accept_list cp;
cp.bdaddr_type = bdaddr_type;
bacpy(&cp.bdaddr, bdaddr);
- bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
+ bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
cp.bdaddr_type);
- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
if (use_ll_privacy(req->hdev) &&
hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
@@ -774,31 +774,31 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
}
}
-/* Adds connection to white list if needed. On error, returns -1. */
-static int add_to_white_list(struct hci_request *req,
- struct hci_conn_params *params, u8 *num_entries,
- bool allow_rpa)
+/* Adds connection to accept list if needed. On error, returns -1. */
+static int add_to_accept_list(struct hci_request *req,
+ struct hci_conn_params *params, u8 *num_entries,
+ bool allow_rpa)
{
- struct hci_cp_le_add_to_white_list cp;
+ struct hci_cp_le_add_to_accept_list cp;
struct hci_dev *hdev = req->hdev;
- /* Already in white list */
- if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
+ /* Already in accept list */
+ if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
params->addr_type))
return 0;
/* Select filter policy to accept all advertising */
- if (*num_entries >= hdev->le_white_list_size)
+ if (*num_entries >= hdev->le_accept_list_size)
return -1;
- /* White list can not be used with RPAs */
+ /* Accept list can not be used with RPAs */
if (!allow_rpa &&
!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
return -1;
}
- /* During suspend, only wakeable devices can be in whitelist */
+ /* During suspend, only wakeable devices can be in accept list */
if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
params->current_flags))
return 0;
@@ -807,9 +807,9 @@ static int add_to_white_list(struct hci_request *req,
cp.bdaddr_type = params->addr_type;
bacpy(&cp.bdaddr, &params->addr);
- bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
+ bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
cp.bdaddr_type);
- hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
@@ -837,15 +837,15 @@ static int add_to_white_list(struct hci_request *req,
return 0;
}
-static u8 update_white_list(struct hci_request *req)
+static u8 update_accept_list(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_conn_params *params;
struct bdaddr_list *b;
u8 num_entries = 0;
bool pend_conn, pend_report;
- /* We allow whitelisting even with RPAs in suspend. In the worst case,
- * we won't be able to wake from devices that use the privacy1.2
+ /* We allow usage of accept list even with RPAs in suspend. In the worst
+ * case, we won't be able to wake from devices that use the privacy1.2
* features. Additionally, once we support privacy1.2 and IRK
* offloading, we can update this to also check for those conditions.
*/
@@ -855,13 +855,13 @@ static u8 update_white_list(struct hci_request *req)
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
allow_rpa = true;
- /* Go through the current white list programmed into the
+ /* Go through the current accept list programmed into the
* controller one by one and check if that address is still
* in the list of pending connections or list of devices to
* report. If not present in either list, then queue the
* command to remove it from the controller.
*/
- list_for_each_entry(b, &hdev->le_white_list, list) {
+ list_for_each_entry(b, &hdev->le_accept_list, list) {
pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
&b->bdaddr,
b->bdaddr_type);
@@ -870,14 +870,14 @@ static u8 update_white_list(struct hci_request *req)
b->bdaddr_type);
/* If the device is not likely to connect or report,
- * remove it from the whitelist.
+ * remove it from the accept list.
*/
if (!pend_conn && !pend_report) {
- del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
+ del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
continue;
}
- /* White list can not be used with RPAs */
+ /* Accept list can not be used with RPAs */
if (!allow_rpa &&
!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
@@ -887,27 +887,27 @@ static u8 update_white_list(struct hci_request *req)
num_entries++;
}
- /* Since all no longer valid white list entries have been
+ /* Since all no longer valid accept list entries have been
* removed, walk through the list of pending connections
* and ensure that any new device gets programmed into
* the controller.
*
* If the list of the devices is larger than the list of
- * available white list entries in the controller, then
+ * available accept list entries in the controller, then
* just abort and return filer policy value to not use the
- * white list.
+ * accept list.
*/
list_for_each_entry(params, &hdev->pend_le_conns, action) {
- if (add_to_white_list(req, params, &num_entries, allow_rpa))
+ if (add_to_accept_list(req, params, &num_entries, allow_rpa))
return 0x00;
}
/* After adding all new pending connections, walk through
* the list of pending reports and also add these to the
- * white list if there is still space. Abort if space runs out.
+ * accept list if there is still space. Abort if space runs out.
*/
list_for_each_entry(params, &hdev->pend_le_reports, action) {
- if (add_to_white_list(req, params, &num_entries, allow_rpa))
+ if (add_to_accept_list(req, params, &num_entries, allow_rpa))
return 0x00;
}
@@ -921,7 +921,7 @@ static u8 update_white_list(struct hci_request *req)
hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
return 0x00;
- /* Select filter policy to use white list */
+ /* Select filter policy to use accept list */
return 0x01;
}
@@ -932,7 +932,7 @@ static bool scan_use_rpa(struct hci_dev *hdev)
static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
u16 window, u8 own_addr_type, u8 filter_policy,
- bool addr_resolv)
+ bool filter_dup, bool addr_resolv)
{
struct hci_dev *hdev = req->hdev;
@@ -997,7 +997,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
ext_enable_cp.enable = LE_SCAN_ENABLE;
- ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ ext_enable_cp.filter_dup = filter_dup;
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
sizeof(ext_enable_cp), &ext_enable_cp);
@@ -1016,7 +1016,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
memset(&enable_cp, 0, sizeof(enable_cp));
enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ enable_cp.filter_dup = filter_dup;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
&enable_cp);
}
@@ -1053,6 +1053,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
u8 own_addr_type;
u8 filter_policy;
u16 window, interval;
+ /* Default is to enable duplicates filter */
+ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
/* Background scanning should run with address resolution */
bool addr_resolv = true;
@@ -1076,20 +1078,20 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
return;
bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
- /* Adding or removing entries from the white list must
+ /* Adding or removing entries from the accept list must
* happen before enabling scanning. The controller does
- * not allow white list modification while scanning.
+ * not allow accept list modification while scanning.
*/
- filter_policy = update_white_list(req);
+ filter_policy = update_accept_list(req);
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
* for handling directed advertising.
*
- * So instead of using filter polices 0x00 (no whitelist)
- * and 0x01 (whitelist enabled) use the new filter policies
- * 0x02 (no whitelist) and 0x03 (whitelist enabled).
+ * So instead of using filter polices 0x00 (no accept list)
+ * and 0x01 (accept list enabled) use the new filter policies
+ * 0x02 (no accept list) and 0x03 (accept list enabled).
*/
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
@@ -1106,14 +1108,30 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
} else if (hci_is_adv_monitoring(hdev)) {
window = hdev->le_scan_window_adv_monitor;
interval = hdev->le_scan_int_adv_monitor;
+
+ /* Disable duplicates filter when scanning for advertisement
+ * monitor for the following reasons.
+ *
+ * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
+ * controllers ignore RSSI_Sampling_Period when the duplicates
+ * filter is enabled.
+ *
+ * For SW pattern filtering, when we're not doing interleaved
+ * scanning, it is necessary to disable duplicates filter,
+ * otherwise hosts can only receive one advertisement and it's
+ * impossible to know if a peer is still in range.
+ */
+ filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
} else {
window = hdev->le_scan_window;
interval = hdev->le_scan_interval;
}
- bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
+ bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
+ filter_policy);
hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
- own_addr_type, filter_policy, addr_resolv);
+ own_addr_type, filter_policy, filter_dup,
+ addr_resolv);
}
static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
@@ -1163,7 +1181,7 @@ static void hci_req_set_event_filter(struct hci_request *req)
/* Always clear event filter when starting */
hci_req_clear_event_filter(req);
- list_for_each_entry(b, &hdev->whitelist, list) {
+ list_for_each_entry(b, &hdev->accept_list, list) {
if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
b->current_flags))
continue;
@@ -1502,13 +1520,14 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
if (hci_conn_num(hdev, LE_LINK) == 0)
return true;
- /* Check le_states if there is any connection in slave role. */
- if (hdev->conn_hash.le_num_slave > 0) {
- /* Slave connection state and non connectable mode bit 20. */
+ /* Check le_states if there is any connection in peripheral role. */
+ if (hdev->conn_hash.le_num_peripheral > 0) {
+ /* Peripheral connection state and non connectable mode bit 20.
+ */
if (!connectable && !(hdev->le_states[2] & 0x10))
return false;
- /* Slave connection state and connectable mode bit 38
+ /* Peripheral connection state and connectable mode bit 38
* and scannable bit 21.
*/
if (connectable && (!(hdev->le_states[4] & 0x40) ||
@@ -1516,13 +1535,13 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
return false;
}
- /* Check le_states if there is any connection in master role. */
- if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
- /* Master connection state and non connectable mode bit 18. */
+ /* Check le_states if there is any connection in central role. */
+ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
+ /* Central connection state and non connectable mode bit 18. */
if (!connectable && !(hdev->le_states[2] & 0x02))
return false;
- /* Master connection state and connectable mode bit 35 and
+ /* Central connection state and connectable mode bit 35 and
* scannable 19.
*/
if (connectable && (!(hdev->le_states[4] & 0x08) ||
@@ -1697,30 +1716,33 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
return;
if (ext_adv_capable(hdev)) {
- struct hci_cp_le_set_ext_scan_rsp_data cp;
+ struct {
+ struct hci_cp_le_set_ext_scan_rsp_data cp;
+ u8 data[HCI_MAX_EXT_AD_LENGTH];
+ } pdu;
- memset(&cp, 0, sizeof(cp));
+ memset(&pdu, 0, sizeof(pdu));
if (instance)
len = create_instance_scan_rsp_data(hdev, instance,
- cp.data);
+ pdu.data);
else
- len = create_default_scan_rsp_data(hdev, cp.data);
+ len = create_default_scan_rsp_data(hdev, pdu.data);
if (hdev->scan_rsp_data_len == len &&
- !memcmp(cp.data, hdev->scan_rsp_data, len))
+ !memcmp(pdu.data, hdev->scan_rsp_data, len))
return;
- memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+ memcpy(hdev->scan_rsp_data, pdu.data, len);
hdev->scan_rsp_data_len = len;
- cp.handle = instance;
- cp.length = len;
- cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ pdu.cp.handle = instance;
+ pdu.cp.length = len;
+ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
- &cp);
+ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
+ sizeof(pdu.cp) + len, &pdu.cp);
} else {
struct hci_cp_le_set_scan_rsp_data cp;
@@ -1843,26 +1865,30 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
return;
if (ext_adv_capable(hdev)) {
- struct hci_cp_le_set_ext_adv_data cp;
+ struct {
+ struct hci_cp_le_set_ext_adv_data cp;
+ u8 data[HCI_MAX_EXT_AD_LENGTH];
+ } pdu;
- memset(&cp, 0, sizeof(cp));
+ memset(&pdu, 0, sizeof(pdu));
- len = create_instance_adv_data(hdev, instance, cp.data);
+ len = create_instance_adv_data(hdev, instance, pdu.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
- memcmp(cp.data, hdev->adv_data, len) == 0)
+ memcmp(pdu.data, hdev->adv_data, len) == 0)
return;
- memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ memcpy(hdev->adv_data, pdu.data, len);
hdev->adv_data_len = len;
- cp.length = len;
- cp.handle = instance;
- cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ pdu.cp.length = len;
+ pdu.cp.handle = instance;
+ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
+ sizeof(pdu.cp) + len, &pdu.cp);
} else {
struct hci_cp_le_set_adv_data cp;
@@ -2605,11 +2631,11 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
return 0;
}
-static bool disconnected_whitelist_entries(struct hci_dev *hdev)
+static bool disconnected_accept_list_entries(struct hci_dev *hdev)
{
struct bdaddr_list *b;
- list_for_each_entry(b, &hdev->whitelist, list) {
+ list_for_each_entry(b, &hdev->accept_list, list) {
struct hci_conn *conn;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
@@ -2641,7 +2667,7 @@ void __hci_req_update_scan(struct hci_request *req)
return;
if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
- disconnected_whitelist_entries(hdev))
+ disconnected_accept_list_entries(hdev))
scan = SCAN_PAGE;
else
scan = SCAN_DISABLED;
@@ -3133,8 +3159,10 @@ static int active_scan(struct hci_request *req, unsigned long opt)
uint16_t interval = opt;
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
- /* White list is not used for discovery */
+ /* Accept list is not used for discovery */
u8 filter_policy = 0x00;
+ /* Default is to enable duplicates filter */
+ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
/* Discovery doesn't require controller address resolution */
bool addr_resolv = false;
int err;
@@ -3159,9 +3187,26 @@ static int active_scan(struct hci_request *req, unsigned long opt)
if (err < 0)
own_addr_type = ADDR_LE_DEV_PUBLIC;
+ if (hci_is_adv_monitoring(hdev)) {
+ /* Duplicate filter should be disabled when some advertisement
+ * monitor is activated, otherwise AdvMon can only receive one
+ * advertisement for one peer(*) during active scanning, and
+ * might report loss to these peers.
+ *
+ * Note that different controllers have different meanings of
+ * |duplicate|. Some of them consider packets with the same
+ * address as duplicate, and others consider packets with the
+ * same address and the same RSSI as duplicate. Although in the
+ * latter case we don't need to disable duplicate filter, but
+ * it is common to have active scanning for a short period of
+ * time, the power impact should be neglectable.
+ */
+ filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
+ }
+
hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
hdev->le_scan_window_discovery, own_addr_type,
- filter_policy, addr_resolv);
+ filter_policy, filter_dup, addr_resolv);
return 0;
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 251b9128f530..b04a5a02ecf3 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
/* Detach sockets from device */
read_lock(&hci_sk_list.lock);
sk_for_each(sk, &hci_sk_list.head) {
- bh_lock_sock_nested(sk);
+ lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
sk->sk_err = EPIPE;
@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
hci_dev_put(hdev);
}
- bh_unlock_sock(sk);
+ release_sock(sk);
}
read_unlock(&hci_sk_list.lock);
}
@@ -892,7 +892,7 @@ static int hci_sock_release(struct socket *sock)
return 0;
}
-static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
+static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
int err;
@@ -902,14 +902,14 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
hci_dev_lock(hdev);
- err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
+ err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
hci_dev_unlock(hdev);
return err;
}
-static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
+static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
int err;
@@ -919,7 +919,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
hci_dev_lock(hdev);
- err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
+ err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
hci_dev_unlock(hdev);
@@ -959,12 +959,12 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
case HCIBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- return hci_sock_blacklist_add(hdev, (void __user *)arg);
+ return hci_sock_reject_list_add(hdev, (void __user *)arg);
case HCIUNBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- return hci_sock_blacklist_del(hdev, (void __user *)arg);
+ return hci_sock_reject_list_del(hdev, (void __user *)arg);
}
return -ENOIOCTLCMD;
@@ -1130,7 +1130,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
if (!hci_sock_gen_cookie(sk)) {
/* In the case when a cookie has already been assigned,
* then there has been already an ioctl issued against
- * an unbound socket and with that triggerd an open
+ * an unbound socket and with that triggered an open
* notification. Send a close notification first to
* allow the state transition to bounded.
*/
@@ -1326,9 +1326,9 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
if (!hci_sock_gen_cookie(sk)) {
/* In the case when a cookie has already been
- * assigned, this socket will transtion from
+ * assigned, this socket will transition from
* a raw socket into a control socket. To
- * allow for a clean transtion, send the
+ * allow for a clean transition, send the
* close notification first.
*/
skb = create_monitor_ctrl_close(sk);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 0db48c812662..80848dfc01db 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -508,7 +508,7 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
unsigned char param)
{
int done_with_skb = 1;
- BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
+ BT_DBG("session %p skb %p len %u param 0x%02x", session, skb, skb->len, param);
switch (param) {
case HIDP_DATA_RTYPE_INPUT:
@@ -553,7 +553,7 @@ static void hidp_recv_ctrl_frame(struct hidp_session *session,
unsigned char hdr, type, param;
int free_skb = 1;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %p skb %p len %u", session, skb, skb->len);
hdr = skb->data[0];
skb_pull(skb, 1);
@@ -589,7 +589,7 @@ static void hidp_recv_intr_frame(struct hidp_session *session,
{
unsigned char hdr;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %p skb %p len %u", session, skb, skb->len);
hdr = skb->data[0];
skb_pull(skb, 1);
@@ -794,7 +794,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->dev.parent = &session->conn->hcon->dev;
hid->ll_driver = &hidp_hid_driver;
- /* True if device is blacklisted in drivers/hid/hid-quirks.c */
+ /* True if device is blocked in drivers/hid/hid-quirks.c */
if (hid_ignore(hid)) {
hid_destroy_device(session->hid);
session->hid = NULL;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b6a88b8256c7..77ba68209dbd 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1691,7 +1691,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
if (hcon->out)
smp_conn_security(hcon, hcon->pending_sec_level);
- /* For LE slave connections, make sure the connection interval
+ /* For LE peripheral connections, make sure the connection interval
* is in the range of the minimum and maximum interval that has
* been configured for this connection. If not, then trigger
* the connection update procedure.
@@ -4237,7 +4237,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
hci_dev_lock(hdev);
if (hci_dev_test_flag(hdev, HCI_MGMT) &&
!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
- mgmt_device_connected(hdev, hcon, 0, NULL, 0);
+ mgmt_device_connected(hdev, hcon, NULL, 0);
hci_dev_unlock(hdev);
l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
@@ -6066,7 +6066,7 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
struct l2cap_ecred_conn_rsp *rsp = (void *) data;
struct hci_conn *hcon = conn->hcon;
u16 mtu, mps, credits, result;
- struct l2cap_chan *chan;
+ struct l2cap_chan *chan, *tmp;
int err = 0, sec_level;
int i = 0;
@@ -6085,7 +6085,7 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
cmd_len -= sizeof(*rsp);
- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
u16 dcid;
if (chan->ident != cmd->ident ||
@@ -6248,7 +6248,7 @@ static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
u8 *data)
{
- struct l2cap_chan *chan;
+ struct l2cap_chan *chan, *tmp;
struct l2cap_ecred_conn_rsp *rsp = (void *) data;
u16 result;
@@ -6262,7 +6262,7 @@ static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
if (!result)
return 0;
- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
if (chan->ident != cmd->ident)
continue;
@@ -7662,7 +7662,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
* at least ensure that we ignore incoming data from them.
*/
if (hcon->type == LE_LINK &&
- hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
+ hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
bdaddr_dst_type(hcon))) {
kfree_skb(skb);
return;
@@ -8119,7 +8119,7 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
dst_type = bdaddr_dst_type(hcon);
/* If device is blocked, do not create channels for it */
- if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
+ if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
return;
/* Find fixed channels and notify them of the new connection. We
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f9be7f9084d6..3663f880df11 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -40,7 +40,7 @@
#include "msft.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 20
+#define MGMT_REVISION 21
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -252,12 +252,15 @@ static const u8 mgmt_status_table[] = {
MGMT_STATUS_TIMEOUT, /* Instant Passed */
MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
MGMT_STATUS_FAILED, /* Transaction Collision */
+ MGMT_STATUS_FAILED, /* Reserved for future use */
MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
MGMT_STATUS_REJECTED, /* QoS Rejected */
MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
MGMT_STATUS_REJECTED, /* Insufficient Security */
MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
+ MGMT_STATUS_FAILED, /* Reserved for future use */
MGMT_STATUS_BUSY, /* Role Switch Pending */
+ MGMT_STATUS_FAILED, /* Reserved for future use */
MGMT_STATUS_FAILED, /* Slot Violation */
MGMT_STATUS_FAILED, /* Role Switch Failed */
MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
@@ -2956,7 +2959,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
/* When pairing a new device, it is expected to remember
* this device for future connections. Adding the connection
* parameter information ahead of time allows tracking
- * of the slave preferred values and will speed up any
+ * of the peripheral preferred values and will speed up any
* further connection establishment.
*
* If connection parameters already exist, then they
@@ -3341,7 +3344,7 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
}
/* The name is stored in the scan response data and so
- * no need to udpate the advertising data here.
+ * no need to update the advertising data here.
*/
if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
@@ -4058,8 +4061,10 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ memset(&rp, 0, sizeof(rp));
+
if (cp->addr.type == BDADDR_BREDR) {
- br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
+ br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
&cp->addr.bdaddr,
cp->addr.type);
if (!br_params)
@@ -4127,7 +4132,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
if (cp->addr.type == BDADDR_BREDR) {
- br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
+ br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
&cp->addr.bdaddr,
cp->addr.type);
@@ -4274,7 +4279,7 @@ int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
done:
hci_dev_unlock(hdev);
- bt_dev_dbg(hdev, "add monitor %d complete, status %d",
+ bt_dev_dbg(hdev, "add monitor %d complete, status %u",
rp.monitor_handle, status);
return err;
@@ -4499,7 +4504,7 @@ int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
done:
hci_dev_unlock(hdev);
- bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
+ bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
rp.monitor_handle, status);
return err;
@@ -4829,7 +4834,7 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
{
struct mgmt_pending_cmd *cmd;
- bt_dev_dbg(hdev, "status %d", status);
+ bt_dev_dbg(hdev, "status %u", status);
hci_dev_lock(hdev);
@@ -5085,7 +5090,7 @@ void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
{
struct mgmt_pending_cmd *cmd;
- bt_dev_dbg(hdev, "status %d", status);
+ bt_dev_dbg(hdev, "status %u", status);
hci_dev_lock(hdev);
@@ -5204,7 +5209,7 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
- err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
+ err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
cp->addr.type);
if (err < 0) {
status = MGMT_STATUS_FAILED;
@@ -5240,7 +5245,7 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
- err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
+ err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
cp->addr.type);
if (err < 0) {
status = MGMT_STATUS_INVALID_PARAMS;
@@ -5298,7 +5303,7 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
u16 opcode)
{
- bt_dev_dbg(hdev, "status %d", status);
+ bt_dev_dbg(hdev, "status %u", status);
}
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
@@ -6164,7 +6169,7 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
- if (key->master != 0x00 && key->master != 0x01)
+ if (key->initiator != 0x00 && key->initiator != 0x01)
return false;
switch (key->addr.type) {
@@ -6242,11 +6247,11 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
switch (key->type) {
case MGMT_LTK_UNAUTHENTICATED:
authenticated = 0x00;
- type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
+ type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
break;
case MGMT_LTK_AUTHENTICATED:
authenticated = 0x01;
- type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
+ type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
break;
case MGMT_LTK_P256_UNAUTH:
authenticated = 0x00;
@@ -6342,7 +6347,7 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
handle = __le16_to_cpu(cp->handle);
conn = hci_conn_hash_lookup_handle(hdev, handle);
if (!conn) {
- bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
+ bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
handle);
goto unlock;
}
@@ -6731,7 +6736,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
+ err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
&cp->addr.bdaddr,
cp->addr.type, 0);
if (err)
@@ -6829,7 +6834,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
}
if (cp->addr.type == BDADDR_BREDR) {
- err = hci_bdaddr_list_del(&hdev->whitelist,
+ err = hci_bdaddr_list_del(&hdev->accept_list,
&cp->addr.bdaddr,
cp->addr.type);
if (err) {
@@ -6900,7 +6905,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
+ list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
list_del(&b->list);
kfree(b);
@@ -7585,6 +7590,9 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
cur_len = data[i];
+ if (!cur_len)
+ continue;
+
if (data[i + 1] == EIR_FLAGS &&
(!is_adv_data || flags_managed(adv_flags)))
return false;
@@ -7646,7 +7654,7 @@ static void add_advertising_complete(struct hci_dev *hdev, u8 status,
struct adv_info *adv_instance, *n;
u8 instance;
- bt_dev_dbg(hdev, "status %d", status);
+ bt_dev_dbg(hdev, "status %u", status);
hci_dev_lock(hdev);
@@ -8176,7 +8184,7 @@ static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
struct mgmt_cp_remove_advertising *cp;
struct mgmt_rp_remove_advertising rp;
- bt_dev_dbg(hdev, "status %d", status);
+ bt_dev_dbg(hdev, "status %u", status);
hci_dev_lock(hdev);
@@ -8641,7 +8649,7 @@ static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
switch (ltk->type) {
case SMP_LTK:
- case SMP_LTK_SLAVE:
+ case SMP_LTK_RESPONDER:
if (ltk->authenticated)
return MGMT_LTK_AUTHENTICATED;
return MGMT_LTK_UNAUTHENTICATED;
@@ -8687,7 +8695,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
ev.key.rand = key->rand;
if (key->type == SMP_LTK)
- ev.key.master = 1;
+ ev.key.initiator = 1;
/* Make sure we copy only the significant bytes based on the
* encryption key size, and set the rest of the value to zeroes.
@@ -8767,15 +8775,19 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
- u32 flags, u8 *name, u8 name_len)
+ u8 *name, u8 name_len)
{
char buf[512];
struct mgmt_ev_device_connected *ev = (void *) buf;
u16 eir_len = 0;
+ u32 flags = 0;
bacpy(&ev->addr.bdaddr, &conn->dst);
ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
+ if (conn->out)
+ flags |= MGMT_DEV_FOUND_INITIATED_CONN;
+
ev->flags = __cpu_to_le32(flags);
/* We must ensure that the EIR Data fields are ordered and
diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c
index 1deb0ca7a929..6ef701c27da4 100644
--- a/net/bluetooth/mgmt_config.c
+++ b/net/bluetooth/mgmt_config.c
@@ -146,7 +146,7 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
const u16 type = le16_to_cpu(TO_TLV(buffer)->type);
if (buffer_left < exp_len) {
- bt_dev_warn(hdev, "invalid len left %d, exp >= %d",
+ bt_dev_warn(hdev, "invalid len left %u, exp >= %u",
buffer_left, exp_len);
return mgmt_cmd_status(sk, hdev->id,
@@ -198,7 +198,7 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (exp_type_len && len != exp_type_len) {
- bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d",
+ bt_dev_warn(hdev, "invalid length %d, exp %zu for type %u",
len, exp_type_len, type);
return mgmt_cmd_status(sk, hdev->id,
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index e28f15439ce4..b4bfae41e8a5 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -34,12 +34,12 @@ struct msft_le_monitor_advertisement_pattern {
__u8 length;
__u8 data_type;
__u8 start_byte;
- __u8 pattern[0];
+ __u8 pattern[];
};
struct msft_le_monitor_advertisement_pattern_data {
__u8 count;
- __u8 data[0];
+ __u8 data[];
};
struct msft_cp_le_monitor_advertisement {
@@ -49,7 +49,7 @@ struct msft_cp_le_monitor_advertisement {
__u8 rssi_low_interval;
__u8 rssi_sampling_period;
__u8 cond_type;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct msft_rp_le_monitor_advertisement {
@@ -311,7 +311,7 @@ static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev,
monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle);
if (!monitor) {
- bt_dev_err(hdev, "msft add advmon: monitor %d is not found!",
+ bt_dev_err(hdev, "msft add advmon: monitor %u is not found!",
msft->pending_add_handle);
status = HCI_ERROR_UNSPECIFIED;
goto unlock;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index a58584949a95..8cb53e10a985 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -198,20 +198,22 @@ static void rfcomm_reparent_device(struct rfcomm_dev *dev)
hci_dev_put(hdev);
}
-static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
+static ssize_t address_show(struct device *tty_dev,
+ struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
return sprintf(buf, "%pMR\n", &dev->dst);
}
-static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf)
+static ssize_t channel_show(struct device *tty_dev,
+ struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
return sprintf(buf, "%d\n", dev->channel);
}
-static DEVICE_ATTR(address, 0444, show_address, NULL);
-static DEVICE_ATTR(channel, 0444, show_channel, NULL);
+static DEVICE_ATTR_RO(address);
+static DEVICE_ATTR_RO(channel);
static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
struct rfcomm_dlc *dlc)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 3bd41563f118..d9a4e88dacbb 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -310,7 +310,7 @@ static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
if (!sk)
goto drop;
- BT_DBG("sk %p len %d", sk, skb->len);
+ BT_DBG("sk %p len %u", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
@@ -905,7 +905,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
opts.mtu = sco_pi(sk)->conn->mtu;
- BT_DBG("mtu %d", opts.mtu);
+ BT_DBG("mtu %u", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
@@ -1167,7 +1167,7 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return;
- BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+ BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status);
if (!status) {
struct sco_conn *conn;
@@ -1196,7 +1196,7 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
if (!conn)
goto drop;
- BT_DBG("conn %p len %d", conn, skb->len);
+ BT_DBG("conn %p len %u", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 372e3b25aaa4..11f853d0500f 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -40,7 +40,7 @@
((struct smp_dev *)((struct l2cap_chan *)((hdev)->smp_data))->data)
/* Low-level debug macros to be used for stuff that we don't want
- * accidentially in dmesg, i.e. the values of the various crypto keys
+ * accidentally in dmesg, i.e. the values of the various crypto keys
* and the inputs & outputs of crypto functions.
*/
#ifdef DEBUG
@@ -111,9 +111,9 @@ struct smp_chan {
u8 id_addr_type;
u8 irk[16];
struct smp_csrk *csrk;
- struct smp_csrk *slave_csrk;
+ struct smp_csrk *responder_csrk;
struct smp_ltk *ltk;
- struct smp_ltk *slave_ltk;
+ struct smp_ltk *responder_ltk;
struct smp_irk *remote_irk;
u8 *link_key;
unsigned long flags;
@@ -560,7 +560,7 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
return err;
/* This is unlikely, but we need to check that
- * we didn't accidentially generate a debug key.
+ * we didn't accidentally generate a debug key.
*/
if (crypto_memneq(smp->local_pk, debug_pk, 64))
break;
@@ -753,7 +753,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
mgmt_smp_complete(hcon, complete);
kfree_sensitive(smp->csrk);
- kfree_sensitive(smp->slave_csrk);
+ kfree_sensitive(smp->responder_csrk);
kfree_sensitive(smp->link_key);
crypto_free_shash(smp->tfm_cmac);
@@ -776,9 +776,9 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
kfree_rcu(smp->ltk, rcu);
}
- if (smp->slave_ltk) {
- list_del_rcu(&smp->slave_ltk->list);
- kfree_rcu(smp->slave_ltk, rcu);
+ if (smp->responder_ltk) {
+ list_del_rcu(&smp->responder_ltk->list);
+ kfree_rcu(smp->responder_ltk, rcu);
}
if (smp->remote_irk) {
@@ -859,7 +859,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
memset(smp->tk, 0, sizeof(smp->tk));
clear_bit(SMP_FLAG_TK_VALID, &smp->flags);
- bt_dev_dbg(hcon->hdev, "auth:%d lcl:%d rem:%d", auth, local_io,
+ bt_dev_dbg(hcon->hdev, "auth:%u lcl:%u rem:%u", auth, local_io,
remote_io);
/* If neither side wants MITM, either "just" confirm an incoming
@@ -909,8 +909,8 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
hcon->pending_sec_level = BT_SECURITY_HIGH;
}
- /* If both devices have Keyoard-Display I/O, the master
- * Confirms and the slave Enters the passkey.
+ /* If both devices have Keyboard-Display I/O, the initiator
+ * Confirms and the responder Enters the passkey.
*/
if (smp->method == OVERLAP) {
if (hcon->role == HCI_ROLE_MASTER)
@@ -925,7 +925,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
get_random_bytes(&passkey, sizeof(passkey));
passkey %= 1000000;
put_unaligned_le32(passkey, smp->tk);
- bt_dev_dbg(hcon->hdev, "PassKey: %d", passkey);
+ bt_dev_dbg(hcon->hdev, "PassKey: %u", passkey);
set_bit(SMP_FLAG_TK_VALID, &smp->flags);
}
@@ -979,7 +979,7 @@ static u8 smp_random(struct smp_chan *smp)
int ret;
bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn,
- conn->hcon->out ? "master" : "slave");
+ conn->hcon->out ? "initiator" : "responder");
ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp,
hcon->init_addr_type, &hcon->init_addr,
@@ -1021,8 +1021,8 @@ static u8 smp_random(struct smp_chan *smp)
else
auth = 0;
- /* Even though there's no _SLAVE suffix this is the
- * slave STK we're adding for later lookup (the master
+ /* Even though there's no _RESPONDER suffix this is the
+ * responder STK we're adding for later lookup (the initiator
* STK never needs to be stored).
*/
hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
@@ -1077,10 +1077,10 @@ static void smp_notify_keys(struct l2cap_conn *conn)
mgmt_new_csrk(hdev, smp->csrk, persistent);
}
- if (smp->slave_csrk) {
- smp->slave_csrk->bdaddr_type = hcon->dst_type;
- bacpy(&smp->slave_csrk->bdaddr, &hcon->dst);
- mgmt_new_csrk(hdev, smp->slave_csrk, persistent);
+ if (smp->responder_csrk) {
+ smp->responder_csrk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
+ mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
}
if (smp->ltk) {
@@ -1089,10 +1089,10 @@ static void smp_notify_keys(struct l2cap_conn *conn)
mgmt_new_ltk(hdev, smp->ltk, persistent);
}
- if (smp->slave_ltk) {
- smp->slave_ltk->bdaddr_type = hcon->dst_type;
- bacpy(&smp->slave_ltk->bdaddr, &hcon->dst);
- mgmt_new_ltk(hdev, smp->slave_ltk, persistent);
+ if (smp->responder_ltk) {
+ smp->responder_ltk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
+ mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
}
if (smp->link_key) {
@@ -1272,7 +1272,7 @@ static void smp_distribute_keys(struct smp_chan *smp)
if (*keydist & SMP_DIST_ENC_KEY) {
struct smp_cmd_encrypt_info enc;
- struct smp_cmd_master_ident ident;
+ struct smp_cmd_initiator_ident ident;
struct smp_ltk *ltk;
u8 authenticated;
__le16 ediv;
@@ -1293,14 +1293,15 @@ static void smp_distribute_keys(struct smp_chan *smp)
authenticated = hcon->sec_level == BT_SECURITY_HIGH;
ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type,
- SMP_LTK_SLAVE, authenticated, enc.ltk,
+ SMP_LTK_RESPONDER, authenticated, enc.ltk,
smp->enc_key_size, ediv, rand);
- smp->slave_ltk = ltk;
+ smp->responder_ltk = ltk;
ident.ediv = ediv;
ident.rand = rand;
- smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
+ smp_send_cmd(conn, SMP_CMD_INITIATOR_IDENT, sizeof(ident),
+ &ident);
*keydist &= ~SMP_DIST_ENC_KEY;
}
@@ -1343,7 +1344,7 @@ static void smp_distribute_keys(struct smp_chan *smp)
csrk->type = MGMT_CSRK_LOCAL_UNAUTHENTICATED;
memcpy(csrk->val, sign.csrk, sizeof(csrk->val));
}
- smp->slave_csrk = csrk;
+ smp->responder_csrk = csrk;
smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
@@ -1654,7 +1655,7 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
case MGMT_OP_USER_PASSKEY_REPLY:
value = le32_to_cpu(passkey);
memset(smp->tk, 0, sizeof(smp->tk));
- bt_dev_dbg(conn->hcon->hdev, "PassKey: %d", value);
+ bt_dev_dbg(conn->hcon->hdev, "PassKey: %u", value);
put_unaligned_le32(value, smp->tk);
fallthrough;
case MGMT_OP_USER_CONFIRM_REPLY:
@@ -1902,7 +1903,7 @@ static u8 sc_send_public_key(struct smp_chan *smp)
return SMP_UNSPECIFIED;
/* This is unlikely, but we need to check that
- * we didn't accidentially generate a debug key.
+ * we didn't accidentally generate a debug key.
*/
if (crypto_memneq(smp->local_pk, debug_pk, 64))
break;
@@ -2048,7 +2049,7 @@ static int fixup_sc_false_positive(struct smp_chan *smp)
struct smp_cmd_pairing *req, *rsp;
u8 auth;
- /* The issue is only observed when we're in slave role */
+ /* The issue is only observed when we're in responder role */
if (hcon->out)
return SMP_UNSPECIFIED;
@@ -2084,7 +2085,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
- bt_dev_dbg(hdev, "conn %p %s", conn, hcon->out ? "master" : "slave");
+ bt_dev_dbg(hdev, "conn %p %s", conn,
+ hcon->out ? "initiator" : "responder");
if (skb->len < sizeof(smp->pcnf))
return SMP_INVALID_PARAMS;
@@ -2251,7 +2253,7 @@ static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
hci_le_start_enc(hcon, key->ediv, key->rand, key->val, key->enc_size);
hcon->enc_key_size = key->enc_size;
- /* We never store STKs for master role, so clear this flag */
+ /* We never store STKs for initiator role, so clear this flag */
clear_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags);
return true;
@@ -2467,7 +2469,7 @@ int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
/* Set keys to NULL to make sure smp_failure() does not try to
* remove and free already invalidated rcu list entries. */
smp->ltk = NULL;
- smp->slave_ltk = NULL;
+ smp->responder_ltk = NULL;
smp->remote_irk = NULL;
if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
@@ -2503,7 +2505,7 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
return SMP_INVALID_PARAMS;
}
- SMP_ALLOW_CMD(smp, SMP_CMD_MASTER_IDENT);
+ SMP_ALLOW_CMD(smp, SMP_CMD_INITIATOR_IDENT);
skb_pull(skb, sizeof(*rp));
@@ -2512,9 +2514,9 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
-static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
+static int smp_cmd_initiator_ident(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct smp_cmd_master_ident *rp = (void *) skb->data;
+ struct smp_cmd_initiator_ident *rp = (void *)skb->data;
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
struct hci_dev *hdev = conn->hcon->hdev;
@@ -2913,7 +2915,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
- /* Slave sends DHKey check as response to master */
+ /* Responder sends DHKey check as response to initiator */
sc_dhkey_check(smp);
}
@@ -3000,8 +3002,8 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
reason = smp_cmd_encrypt_info(conn, skb);
break;
- case SMP_CMD_MASTER_IDENT:
- reason = smp_cmd_master_ident(conn, skb);
+ case SMP_CMD_INITIATOR_IDENT:
+ reason = smp_cmd_initiator_ident(conn, skb);
break;
case SMP_CMD_IDENT_INFO:
@@ -3081,7 +3083,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
return;
- /* Only master may initiate SMP over BR/EDR */
+ /* Only initiator may initiate SMP over BR/EDR */
if (hcon->role != HCI_ROLE_MASTER)
return;
@@ -3229,7 +3231,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
{
struct l2cap_chan *chan;
- bt_dev_dbg(pchan->conn->hcon->hdev, "pchan %p", pchan);
+ BT_DBG("pchan %p", pchan);
chan = l2cap_chan_create();
if (!chan)
@@ -3250,7 +3252,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
*/
atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
- bt_dev_dbg(pchan->conn->hcon->hdev, "created chan %p", chan);
+ BT_DBG("created chan %p", chan);
return chan;
}
@@ -3354,7 +3356,7 @@ static void smp_del_chan(struct l2cap_chan *chan)
{
struct smp_dev *smp;
- bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan);
+ BT_DBG("chan %p", chan);
smp = chan->data;
if (smp) {
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index fc35a8bf358e..87a59ec2c9f0 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -79,8 +79,8 @@ struct smp_cmd_encrypt_info {
__u8 ltk[16];
} __packed;
-#define SMP_CMD_MASTER_IDENT 0x07
-struct smp_cmd_master_ident {
+#define SMP_CMD_INITIATOR_IDENT 0x07
+struct smp_cmd_initiator_ident {
__le16 ediv;
__le64 rand;
} __packed;
@@ -146,7 +146,7 @@ struct smp_cmd_keypress_notify {
enum {
SMP_STK,
SMP_LTK,
- SMP_LTK_SLAVE,
+ SMP_LTK_RESPONDER,
SMP_LTK_P256,
SMP_LTK_P256_DEBUG,
};
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index a5d72c48fb66..aa47af349ba8 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -409,7 +409,7 @@ static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
return ERR_PTR(-ENOMEM);
if (data_in) {
- err = bpf_check_uarg_tail_zero(data_in, max_size, size);
+ err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
if (err) {
kfree(data);
return ERR_PTR(err);
@@ -918,3 +918,46 @@ out:
kfree(user_ctx);
return ret;
}
+
+int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
+ __u32 ctx_size_in = kattr->test.ctx_size_in;
+ void *ctx = NULL;
+ u32 retval;
+ int err = 0;
+
+ /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
+ if (kattr->test.data_in || kattr->test.data_out ||
+ kattr->test.ctx_out || kattr->test.duration ||
+ kattr->test.repeat || kattr->test.flags)
+ return -EINVAL;
+
+ if (ctx_size_in < prog->aux->max_ctx_offset ||
+ ctx_size_in > U16_MAX)
+ return -EINVAL;
+
+ if (ctx_size_in) {
+ ctx = kzalloc(ctx_size_in, GFP_USER);
+ if (!ctx)
+ return -ENOMEM;
+ if (copy_from_user(ctx, ctx_in, ctx_size_in)) {
+ err = -EFAULT;
+ goto out;
+ }
+ }
+ retval = bpf_prog_run_pin_on_cpu(prog, ctx);
+
+ if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
+ err = -EFAULT;
+ goto out;
+ }
+ if (ctx_size_in)
+ if (copy_to_user(ctx_in, ctx, ctx_size_in))
+ err = -EFAULT;
+out:
+ kfree(ctx);
+ return err;
+}
diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
index 05e1cfc1e5cd..291a92546246 100644
--- a/net/bpfilter/main.c
+++ b/net/bpfilter/main.c
@@ -57,7 +57,7 @@ int main(void)
{
debug_f = fopen("/dev/kmsg", "w");
setvbuf(debug_f, 0, _IOLBF, 0);
- fprintf(debug_f, "Started bpfilter\n");
+ fprintf(debug_f, "<5>Started bpfilter\n");
loop();
fclose(debug_f);
return 0;
diff --git a/net/bridge/br_cfm.c b/net/bridge/br_cfm.c
index 001064f7583d..a3c755d0a09d 100644
--- a/net/bridge/br_cfm.c
+++ b/net/bridge/br_cfm.c
@@ -142,7 +142,7 @@ static void br_cfm_notify(int event, const struct net_bridge_port *port)
{
u32 filter = RTEXT_FILTER_CFM_STATUS;
- return br_info_notify(event, port->br, NULL, filter);
+ br_info_notify(event, port->br, NULL, filter);
}
static void cc_peer_enable(struct br_cfm_peer_mep *peer_mep)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 698b79747d32..2b862cffc03a 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -440,9 +440,14 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
if (!port)
ret = 0;
else {
+ const struct net_bridge_port *dst = NULL;
+
fdb = br_fdb_find_rcu(port->br, addr, 0);
- ret = fdb && fdb->dst && fdb->dst->dev != dev &&
- fdb->dst->state == BR_STATE_FORWARDING;
+ if (fdb)
+ dst = READ_ONCE(fdb->dst);
+
+ ret = dst && dst->dev != dev &&
+ dst->state == BR_STATE_FORWARDING;
}
rcu_read_unlock();
@@ -509,7 +514,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
- fdb->dst = source;
+ WRITE_ONCE(fdb->dst, source);
fdb->key.vlan_id = vid;
fdb->flags = flags;
fdb->updated = fdb->used = jiffies;
@@ -600,10 +605,10 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
}
/* fastpath: update of existing entry */
- if (unlikely(source != fdb->dst &&
+ if (unlikely(source != READ_ONCE(fdb->dst) &&
!test_bit(BR_FDB_STICKY, &fdb->flags))) {
- br_switchdev_fdb_notify(fdb, RTM_DELNEIGH);
- fdb->dst = source;
+ br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
+ WRITE_ONCE(fdb->dst, source);
fdb_modified = true;
/* Take over HW learned entry */
if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
@@ -650,6 +655,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb,
u32 portid, u32 seq, int type, unsigned int flags)
{
+ const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
unsigned long now = jiffies;
struct nda_cacheinfo ci;
struct nlmsghdr *nlh;
@@ -665,7 +671,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_pad2 = 0;
ndm->ndm_flags = 0;
ndm->ndm_type = 0;
- ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
+ ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(br, fdb);
if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
@@ -727,8 +733,9 @@ static inline size_t fdb_nlmsg_size(void)
}
static int br_fdb_replay_one(struct notifier_block *nb,
- struct net_bridge_fdb_entry *fdb,
- struct net_device *dev)
+ const struct net_bridge_fdb_entry *fdb,
+ struct net_device *dev, unsigned long action,
+ const void *ctx)
{
struct switchdev_notifier_fdb_info item;
int err;
@@ -737,35 +744,46 @@ static int br_fdb_replay_one(struct notifier_block *nb,
item.vid = fdb->key.vlan_id;
item.added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
item.offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
+ item.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
item.info.dev = dev;
+ item.info.ctx = ctx;
- err = nb->notifier_call(nb, SWITCHDEV_FDB_ADD_TO_DEVICE, &item);
+ err = nb->notifier_call(nb, action, &item);
return notifier_to_errno(err);
}
-int br_fdb_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb)
+int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
+ const void *ctx, bool adding, struct notifier_block *nb)
{
struct net_bridge_fdb_entry *fdb;
struct net_bridge *br;
+ unsigned long action;
int err = 0;
- if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
+ if (!netif_is_bridge_master(br_dev))
+ return -EINVAL;
+
+ if (!netif_is_bridge_port(dev) && !netif_is_bridge_master(dev))
return -EINVAL;
br = netdev_priv(br_dev);
+ if (adding)
+ action = SWITCHDEV_FDB_ADD_TO_DEVICE;
+ else
+ action = SWITCHDEV_FDB_DEL_TO_DEVICE;
+
rcu_read_lock();
hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
- struct net_bridge_port *dst = READ_ONCE(fdb->dst);
+ const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
struct net_device *dst_dev;
dst_dev = dst ? dst->dev : br->dev;
if (dst_dev != br_dev && dst_dev != dev)
continue;
- err = br_fdb_replay_one(nb, fdb, dst_dev);
+ err = br_fdb_replay_one(nb, fdb, dst_dev, action, ctx);
if (err)
break;
}
@@ -785,7 +803,7 @@ static void fdb_notify(struct net_bridge *br,
int err = -ENOBUFS;
if (swdev_notify)
- br_switchdev_fdb_notify(fdb, type);
+ br_switchdev_fdb_notify(br, fdb, type);
skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
@@ -955,8 +973,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
if (flags & NLM_F_EXCL)
return -EEXIST;
- if (fdb->dst != source) {
- fdb->dst = source;
+ if (READ_ONCE(fdb->dst) != source) {
+ WRITE_ONCE(fdb->dst, source);
modified = true;
}
}
@@ -1123,7 +1141,7 @@ static int fdb_delete_by_addr_and_port(struct net_bridge *br,
struct net_bridge_fdb_entry *fdb;
fdb = br_fdb_find(br, addr, vlan);
- if (!fdb || fdb->dst != p)
+ if (!fdb || READ_ONCE(fdb->dst) != p)
return -ENOENT;
fdb_delete(br, fdb, true);
@@ -1272,8 +1290,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
} else {
fdb->updated = jiffies;
- if (fdb->dst != p) {
- fdb->dst = p;
+ if (READ_ONCE(fdb->dst) != p) {
+ WRITE_ONCE(fdb->dst, p);
modified = true;
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 6e9b049ae521..07856362538f 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -276,7 +276,8 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
bool allow_mode_include = true;
struct hlist_node *rp;
- rp = rcu_dereference(hlist_first_rcu(&br->router_list));
+ rp = br_multicast_get_first_rport_node(br, skb);
+
if (mdst) {
p = rcu_dereference(mdst->ports);
if (br_multicast_should_handle_mode(br, mdst->addr.proto) &&
@@ -290,7 +291,7 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct net_bridge_port *port, *lport, *rport;
lport = p ? p->key.port : NULL;
- rport = hlist_entry_safe(rp, struct net_bridge_port, rlist);
+ rport = br_multicast_rport_from_node_skb(rp, skb);
if ((unsigned long)lport > (unsigned long)rport) {
port = lport;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8875e953ac53..1f506309efa8 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -132,7 +132,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br, eth_hdr(skb), mdst)) {
if ((mdst && mdst->host_joined) ||
- br_multicast_is_router(br)) {
+ br_multicast_is_router(br, skb)) {
local_rcv = true;
br->dev->stats.multicast++;
}
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 95fa4af0e8dd..17a720b4473f 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -16,31 +16,76 @@
#include "br_private.h"
+static bool br_rports_have_mc_router(struct net_bridge *br)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ return !hlist_empty(&br->ip4_mc_router_list) ||
+ !hlist_empty(&br->ip6_mc_router_list);
+#else
+ return !hlist_empty(&br->ip4_mc_router_list);
+#endif
+}
+
+static bool
+br_ip4_rports_get_timer(struct net_bridge_port *port, unsigned long *timer)
+{
+ *timer = br_timer_value(&port->ip4_mc_router_timer);
+ return !hlist_unhashed(&port->ip4_rlist);
+}
+
+static bool
+br_ip6_rports_get_timer(struct net_bridge_port *port, unsigned long *timer)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ *timer = br_timer_value(&port->ip6_mc_router_timer);
+ return !hlist_unhashed(&port->ip6_rlist);
+#else
+ *timer = 0;
+ return false;
+#endif
+}
+
static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- struct net_bridge_port *p;
+ bool have_ip4_mc_rtr, have_ip6_mc_rtr;
+ unsigned long ip4_timer, ip6_timer;
struct nlattr *nest, *port_nest;
+ struct net_bridge_port *p;
+
+ if (!br->multicast_router)
+ return 0;
- if (!br->multicast_router || hlist_empty(&br->router_list))
+ if (!br_rports_have_mc_router(br))
return 0;
nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
if (nest == NULL)
return -EMSGSIZE;
- hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
- if (!p)
+ list_for_each_entry_rcu(p, &br->port_list, list) {
+ have_ip4_mc_rtr = br_ip4_rports_get_timer(p, &ip4_timer);
+ have_ip6_mc_rtr = br_ip6_rports_get_timer(p, &ip6_timer);
+
+ if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
continue;
+
port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
if (!port_nest)
goto fail;
+
if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
- br_timer_value(&p->multicast_router_timer)) ||
+ max(ip4_timer, ip6_timer)) ||
nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
- p->multicast_router)) {
+ p->multicast_router) ||
+ (have_ip4_mc_rtr &&
+ nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
+ ip4_timer)) ||
+ (have_ip6_mc_rtr &&
+ nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
+ ip6_timer))) {
nla_nest_cancel(skb, port_nest);
goto fail;
}
@@ -522,19 +567,21 @@ static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
}
static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
- struct switchdev_obj_port_mdb *mdb,
+ const struct switchdev_obj_port_mdb *mdb,
+ unsigned long action, const void *ctx,
struct netlink_ext_ack *extack)
{
struct switchdev_notifier_port_obj_info obj_info = {
.info = {
.dev = dev,
.extack = extack,
+ .ctx = ctx,
},
.obj = &mdb->obj,
};
int err;
- err = nb->notifier_call(nb, SWITCHDEV_PORT_OBJ_ADD, &obj_info);
+ err = nb->notifier_call(nb, action, &obj_info);
return notifier_to_errno(err);
}
@@ -558,11 +605,13 @@ static int br_mdb_queue_one(struct list_head *mdb_list,
}
int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb, struct netlink_ext_ack *extack)
+ const void *ctx, bool adding, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
- struct net_bridge_mdb_entry *mp;
+ const struct net_bridge_mdb_entry *mp;
struct switchdev_obj *obj, *tmp;
struct net_bridge *br;
+ unsigned long action;
LIST_HEAD(mdb_list);
int err = 0;
@@ -587,8 +636,8 @@ int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
rcu_read_lock();
hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
- struct net_bridge_port_group __rcu **pp;
- struct net_bridge_port_group *p;
+ struct net_bridge_port_group __rcu * const *pp;
+ const struct net_bridge_port_group *p;
if (mp->host_joined) {
err = br_mdb_queue_one(&mdb_list,
@@ -617,9 +666,14 @@ int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
rcu_read_unlock();
+ if (adding)
+ action = SWITCHDEV_PORT_OBJ_ADD;
+ else
+ action = SWITCHDEV_PORT_OBJ_DEL;
+
list_for_each_entry(obj, &mdb_list, list) {
err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- extack);
+ action, ctx, extack);
if (err)
goto out_free_mdb;
}
diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c
index cd2b1e424e54..fd2de35ffb3c 100644
--- a/net/bridge/br_mrp.c
+++ b/net/bridge/br_mrp.c
@@ -204,6 +204,33 @@ static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
br_mrp_skb_common(skb, mrp);
+
+ /* In case the node behaves as MRA then the Test frame needs to have
+ * an Option TLV which includes eventually a sub-option TLV that has
+ * the type AUTO_MGR
+ */
+ if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
+ struct br_mrp_sub_option1_hdr *sub_opt = NULL;
+ struct br_mrp_tlv_hdr *sub_tlv = NULL;
+ struct br_mrp_oui_hdr *oui = NULL;
+ u8 length;
+
+ length = sizeof(*sub_opt) + sizeof(*sub_tlv) + sizeof(oui) +
+ MRP_OPT_PADDING;
+ br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_OPTION, length);
+
+ oui = skb_put(skb, sizeof(*oui));
+ memset(oui, 0x0, sizeof(*oui));
+ sub_opt = skb_put(skb, sizeof(*sub_opt));
+ memset(sub_opt, 0x0, sizeof(*sub_opt));
+
+ sub_tlv = skb_put(skb, sizeof(*sub_tlv));
+ sub_tlv->type = BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR;
+
+ /* 32 bit alligment shall be ensured therefore add 2 bytes */
+ skb_put(skb, MRP_OPT_PADDING);
+ }
+
br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
return skb;
@@ -627,8 +654,7 @@ int br_mrp_set_ring_state(struct net_bridge *br,
if (!mrp)
return -EINVAL;
- if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED &&
- state->ring_state != BR_MRP_RING_STATE_CLOSED)
+ if (mrp->ring_state != state->ring_state)
mrp->ring_transitions++;
mrp->ring_state = state->ring_state;
@@ -715,8 +741,7 @@ int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
if (!mrp)
return -EINVAL;
- if (mrp->in_state == BR_MRP_IN_STATE_CLOSED &&
- state->in_state != BR_MRP_IN_STATE_CLOSED)
+ if (mrp->in_state != state->in_state)
mrp->in_transitions++;
mrp->in_state = state->in_state;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 226bb05c3b42..53c3a9d80d9c 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -51,8 +51,8 @@ static const struct rhashtable_params br_sg_port_rht_params = {
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query);
-static void br_multicast_add_router(struct net_bridge *br,
- struct net_bridge_port *port);
+static void br_ip4_multicast_add_router(struct net_bridge *br,
+ struct net_bridge_port *port);
static void br_ip4_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
__be32 group,
@@ -60,7 +60,10 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
const unsigned char *src);
static void br_multicast_port_group_rexmit(struct timer_list *t);
-static void __del_port_router(struct net_bridge_port *p);
+static void
+br_multicast_rport_del_notify(struct net_bridge_port *p, bool deleted);
+static void br_ip6_multicast_add_router(struct net_bridge *br,
+ struct net_bridge_port *port);
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
@@ -1354,23 +1357,64 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
}
#endif
-static void br_multicast_router_expired(struct timer_list *t)
+static bool br_multicast_rport_del(struct hlist_node *rlist)
+{
+ if (hlist_unhashed(rlist))
+ return false;
+
+ hlist_del_init_rcu(rlist);
+ return true;
+}
+
+static bool br_ip4_multicast_rport_del(struct net_bridge_port *p)
+{
+ return br_multicast_rport_del(&p->ip4_rlist);
+}
+
+static bool br_ip6_multicast_rport_del(struct net_bridge_port *p)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ return br_multicast_rport_del(&p->ip6_rlist);
+#else
+ return false;
+#endif
+}
+
+static void br_multicast_router_expired(struct net_bridge_port *port,
+ struct timer_list *t,
+ struct hlist_node *rlist)
{
- struct net_bridge_port *port =
- from_timer(port, t, multicast_router_timer);
struct net_bridge *br = port->br;
+ bool del;
spin_lock(&br->multicast_lock);
if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
port->multicast_router == MDB_RTR_TYPE_PERM ||
- timer_pending(&port->multicast_router_timer))
+ timer_pending(t))
goto out;
- __del_port_router(port);
+ del = br_multicast_rport_del(rlist);
+ br_multicast_rport_del_notify(port, del);
out:
spin_unlock(&br->multicast_lock);
}
+static void br_ip4_multicast_router_expired(struct timer_list *t)
+{
+ struct net_bridge_port *port = from_timer(port, t, ip4_mc_router_timer);
+
+ br_multicast_router_expired(port, t, &port->ip4_rlist);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_router_expired(struct timer_list *t)
+{
+ struct net_bridge_port *port = from_timer(port, t, ip6_mc_router_timer);
+
+ br_multicast_router_expired(port, t, &port->ip6_rlist);
+}
+#endif
+
static void br_mc_router_state_change(struct net_bridge *p,
bool is_mc_router)
{
@@ -1384,14 +1428,14 @@ static void br_mc_router_state_change(struct net_bridge *p,
switchdev_port_attr_set(p->dev, &attr, NULL);
}
-static void br_multicast_local_router_expired(struct timer_list *t)
+static void br_multicast_local_router_expired(struct net_bridge *br,
+ struct timer_list *timer)
{
- struct net_bridge *br = from_timer(br, t, multicast_router_timer);
-
spin_lock(&br->multicast_lock);
if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
br->multicast_router == MDB_RTR_TYPE_PERM ||
- timer_pending(&br->multicast_router_timer))
+ br_ip4_multicast_is_router(br) ||
+ br_ip6_multicast_is_router(br))
goto out;
br_mc_router_state_change(br, false);
@@ -1399,6 +1443,22 @@ out:
spin_unlock(&br->multicast_lock);
}
+static void br_ip4_multicast_local_router_expired(struct timer_list *t)
+{
+ struct net_bridge *br = from_timer(br, t, ip4_mc_router_timer);
+
+ br_multicast_local_router_expired(br, t);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_local_router_expired(struct timer_list *t)
+{
+ struct net_bridge *br = from_timer(br, t, ip6_mc_router_timer);
+
+ br_multicast_local_router_expired(br, t);
+}
+#endif
+
static void br_multicast_querier_expired(struct net_bridge *br,
struct bridge_mcast_own_query *query)
{
@@ -1613,11 +1673,13 @@ int br_multicast_add_port(struct net_bridge_port *port)
port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
- timer_setup(&port->multicast_router_timer,
- br_multicast_router_expired, 0);
+ timer_setup(&port->ip4_mc_router_timer,
+ br_ip4_multicast_router_expired, 0);
timer_setup(&port->ip4_own_query.timer,
br_ip4_multicast_port_query_expired, 0);
#if IS_ENABLED(CONFIG_IPV6)
+ timer_setup(&port->ip6_mc_router_timer,
+ br_ip6_multicast_router_expired, 0);
timer_setup(&port->ip6_own_query.timer,
br_ip6_multicast_port_query_expired, 0);
#endif
@@ -1649,7 +1711,10 @@ void br_multicast_del_port(struct net_bridge_port *port)
hlist_move_list(&br->mcast_gc_list, &deleted_head);
spin_unlock_bh(&br->multicast_lock);
br_multicast_gc(&deleted_head);
- del_timer_sync(&port->multicast_router_timer);
+ del_timer_sync(&port->ip4_mc_router_timer);
+#if IS_ENABLED(CONFIG_IPV6)
+ del_timer_sync(&port->ip6_mc_router_timer);
+#endif
free_percpu(port->mcast_stats);
}
@@ -1673,9 +1738,10 @@ static void __br_multicast_enable_port(struct net_bridge_port *port)
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_enable(&port->ip6_own_query);
#endif
- if (port->multicast_router == MDB_RTR_TYPE_PERM &&
- hlist_unhashed(&port->rlist))
- br_multicast_add_router(br, port);
+ if (port->multicast_router == MDB_RTR_TYPE_PERM) {
+ br_ip4_multicast_add_router(br, port);
+ br_ip6_multicast_add_router(br, port);
+ }
}
void br_multicast_enable_port(struct net_bridge_port *port)
@@ -1692,19 +1758,22 @@ void br_multicast_disable_port(struct net_bridge_port *port)
struct net_bridge *br = port->br;
struct net_bridge_port_group *pg;
struct hlist_node *n;
+ bool del = false;
spin_lock(&br->multicast_lock);
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
br_multicast_find_del_pg(br, pg);
- __del_port_router(port);
-
- del_timer(&port->multicast_router_timer);
+ del |= br_ip4_multicast_rport_del(port);
+ del_timer(&port->ip4_mc_router_timer);
del_timer(&port->ip4_own_query.timer);
+ del |= br_ip6_multicast_rport_del(port);
#if IS_ENABLED(CONFIG_IPV6)
+ del_timer(&port->ip6_mc_router_timer);
del_timer(&port->ip6_own_query.timer);
#endif
+ br_multicast_rport_del_notify(port, del);
spin_unlock(&br->multicast_lock);
}
@@ -2615,22 +2684,6 @@ update:
}
#endif
-static bool br_multicast_select_querier(struct net_bridge *br,
- struct net_bridge_port *port,
- struct br_ip *saddr)
-{
- switch (saddr->proto) {
- case htons(ETH_P_IP):
- return br_ip4_multicast_select_querier(br, port, saddr->src.ip4);
-#if IS_ENABLED(CONFIG_IPV6)
- case htons(ETH_P_IPV6):
- return br_ip6_multicast_select_querier(br, port, &saddr->src.ip6);
-#endif
- }
-
- return false;
-}
-
static void
br_multicast_update_query_timer(struct net_bridge *br,
struct bridge_mcast_other_query *query,
@@ -2655,45 +2708,122 @@ static void br_port_mc_router_state_change(struct net_bridge_port *p,
switchdev_port_attr_set(p->dev, &attr, NULL);
}
-/*
- * Add port to router_list
+static struct net_bridge_port *
+br_multicast_rport_from_node(struct net_bridge *br,
+ struct hlist_head *mc_router_list,
+ struct hlist_node *rlist)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (mc_router_list == &br->ip6_mc_router_list)
+ return hlist_entry(rlist, struct net_bridge_port, ip6_rlist);
+#endif
+ return hlist_entry(rlist, struct net_bridge_port, ip4_rlist);
+}
+
+static struct hlist_node *
+br_multicast_get_rport_slot(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct hlist_head *mc_router_list)
+
+{
+ struct hlist_node *slot = NULL;
+ struct net_bridge_port *p;
+ struct hlist_node *rlist;
+
+ hlist_for_each(rlist, mc_router_list) {
+ p = br_multicast_rport_from_node(br, mc_router_list, rlist);
+
+ if ((unsigned long)port >= (unsigned long)p)
+ break;
+
+ slot = rlist;
+ }
+
+ return slot;
+}
+
+static bool br_multicast_no_router_otherpf(struct net_bridge_port *port,
+ struct hlist_node *rnode)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (rnode != &port->ip6_rlist)
+ return hlist_unhashed(&port->ip6_rlist);
+ else
+ return hlist_unhashed(&port->ip4_rlist);
+#else
+ return true;
+#endif
+}
+
+/* Add port to router_list
* list is maintained ordered by pointer value
* and locked by br->multicast_lock and RCU
*/
static void br_multicast_add_router(struct net_bridge *br,
- struct net_bridge_port *port)
+ struct net_bridge_port *port,
+ struct hlist_node *rlist,
+ struct hlist_head *mc_router_list)
{
- struct net_bridge_port *p;
- struct hlist_node *slot = NULL;
+ struct hlist_node *slot;
- if (!hlist_unhashed(&port->rlist))
+ if (!hlist_unhashed(rlist))
return;
- hlist_for_each_entry(p, &br->router_list, rlist) {
- if ((unsigned long) port >= (unsigned long) p)
- break;
- slot = &p->rlist;
- }
+ slot = br_multicast_get_rport_slot(br, port, mc_router_list);
if (slot)
- hlist_add_behind_rcu(&port->rlist, slot);
+ hlist_add_behind_rcu(rlist, slot);
else
- hlist_add_head_rcu(&port->rlist, &br->router_list);
- br_rtr_notify(br->dev, port, RTM_NEWMDB);
- br_port_mc_router_state_change(port, true);
+ hlist_add_head_rcu(rlist, mc_router_list);
+
+ /* For backwards compatibility for now, only notify if we
+ * switched from no IPv4/IPv6 multicast router to a new
+ * IPv4 or IPv6 multicast router.
+ */
+ if (br_multicast_no_router_otherpf(port, rlist)) {
+ br_rtr_notify(br->dev, port, RTM_NEWMDB);
+ br_port_mc_router_state_change(port, true);
+ }
+}
+
+/* Add port to router_list
+ * list is maintained ordered by pointer value
+ * and locked by br->multicast_lock and RCU
+ */
+static void br_ip4_multicast_add_router(struct net_bridge *br,
+ struct net_bridge_port *port)
+{
+ br_multicast_add_router(br, port, &port->ip4_rlist,
+ &br->ip4_mc_router_list);
+}
+
+/* Add port to router_list
+ * list is maintained ordered by pointer value
+ * and locked by br->multicast_lock and RCU
+ */
+static void br_ip6_multicast_add_router(struct net_bridge *br,
+ struct net_bridge_port *port)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ br_multicast_add_router(br, port, &port->ip6_rlist,
+ &br->ip6_mc_router_list);
+#endif
}
static void br_multicast_mark_router(struct net_bridge *br,
- struct net_bridge_port *port)
+ struct net_bridge_port *port,
+ struct timer_list *timer,
+ struct hlist_node *rlist,
+ struct hlist_head *mc_router_list)
{
unsigned long now = jiffies;
if (!port) {
if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
- if (!timer_pending(&br->multicast_router_timer))
+ if (!br_ip4_multicast_is_router(br) &&
+ !br_ip6_multicast_is_router(br))
br_mc_router_state_change(br, true);
- mod_timer(&br->multicast_router_timer,
- now + br->multicast_querier_interval);
+ mod_timer(timer, now + br->multicast_querier_interval);
}
return;
}
@@ -2702,24 +2832,71 @@ static void br_multicast_mark_router(struct net_bridge *br,
port->multicast_router == MDB_RTR_TYPE_PERM)
return;
- br_multicast_add_router(br, port);
+ br_multicast_add_router(br, port, rlist, mc_router_list);
+ mod_timer(timer, now + br->multicast_querier_interval);
+}
+
+static void br_ip4_multicast_mark_router(struct net_bridge *br,
+ struct net_bridge_port *port)
+{
+ struct timer_list *timer = &br->ip4_mc_router_timer;
+ struct hlist_node *rlist = NULL;
+
+ if (port) {
+ timer = &port->ip4_mc_router_timer;
+ rlist = &port->ip4_rlist;
+ }
- mod_timer(&port->multicast_router_timer,
- now + br->multicast_querier_interval);
+ br_multicast_mark_router(br, port, timer, rlist,
+ &br->ip4_mc_router_list);
}
-static void br_multicast_query_received(struct net_bridge *br,
- struct net_bridge_port *port,
- struct bridge_mcast_other_query *query,
- struct br_ip *saddr,
- unsigned long max_delay)
+static void br_ip6_multicast_mark_router(struct net_bridge *br,
+ struct net_bridge_port *port)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct timer_list *timer = &br->ip6_mc_router_timer;
+ struct hlist_node *rlist = NULL;
+
+ if (port) {
+ timer = &port->ip6_mc_router_timer;
+ rlist = &port->ip6_rlist;
+ }
+
+ br_multicast_mark_router(br, port, timer, rlist,
+ &br->ip6_mc_router_list);
+#endif
+}
+
+static void
+br_ip4_multicast_query_received(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct bridge_mcast_other_query *query,
+ struct br_ip *saddr,
+ unsigned long max_delay)
+{
+ if (!br_ip4_multicast_select_querier(br, port, saddr->src.ip4))
+ return;
+
+ br_multicast_update_query_timer(br, query, max_delay);
+ br_ip4_multicast_mark_router(br, port);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void
+br_ip6_multicast_query_received(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct bridge_mcast_other_query *query,
+ struct br_ip *saddr,
+ unsigned long max_delay)
{
- if (!br_multicast_select_querier(br, port, saddr))
+ if (!br_ip6_multicast_select_querier(br, port, &saddr->src.ip6))
return;
br_multicast_update_query_timer(br, query, max_delay);
- br_multicast_mark_router(br, port);
+ br_ip6_multicast_mark_router(br, port);
}
+#endif
static void br_ip4_multicast_query(struct net_bridge *br,
struct net_bridge_port *port,
@@ -2768,8 +2945,8 @@ static void br_ip4_multicast_query(struct net_bridge *br,
saddr.proto = htons(ETH_P_IP);
saddr.src.ip4 = iph->saddr;
- br_multicast_query_received(br, port, &br->ip4_other_query,
- &saddr, max_delay);
+ br_ip4_multicast_query_received(br, port, &br->ip4_other_query,
+ &saddr, max_delay);
goto out;
}
@@ -2856,8 +3033,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
saddr.proto = htons(ETH_P_IPV6);
saddr.src.ip6 = ipv6_hdr(skb)->saddr;
- br_multicast_query_received(br, port, &br->ip6_other_query,
- &saddr, max_delay);
+ br_ip6_multicast_query_received(br, port, &br->ip6_other_query,
+ &saddr, max_delay);
goto out;
} else if (!group) {
goto out;
@@ -3087,7 +3264,7 @@ static void br_multicast_pim(struct net_bridge *br,
pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
return;
- br_multicast_mark_router(br, port);
+ br_ip4_multicast_mark_router(br, port);
}
static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
@@ -3098,7 +3275,7 @@ static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
return -ENOMSG;
- br_multicast_mark_router(br, port);
+ br_ip4_multicast_mark_router(br, port);
return 0;
}
@@ -3166,7 +3343,7 @@ static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
return;
- br_multicast_mark_router(br, port);
+ br_ip6_multicast_mark_router(br, port);
}
static int br_multicast_ipv6_rcv(struct net_bridge *br,
@@ -3316,13 +3493,15 @@ void br_multicast_init(struct net_bridge *br)
br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
spin_lock_init(&br->multicast_lock);
- timer_setup(&br->multicast_router_timer,
- br_multicast_local_router_expired, 0);
+ timer_setup(&br->ip4_mc_router_timer,
+ br_ip4_multicast_local_router_expired, 0);
timer_setup(&br->ip4_other_query.timer,
br_ip4_multicast_querier_expired, 0);
timer_setup(&br->ip4_own_query.timer,
br_ip4_multicast_query_expired, 0);
#if IS_ENABLED(CONFIG_IPV6)
+ timer_setup(&br->ip6_mc_router_timer,
+ br_ip6_multicast_local_router_expired, 0);
timer_setup(&br->ip6_other_query.timer,
br_ip6_multicast_querier_expired, 0);
timer_setup(&br->ip6_own_query.timer,
@@ -3416,10 +3595,11 @@ void br_multicast_open(struct net_bridge *br)
void br_multicast_stop(struct net_bridge *br)
{
- del_timer_sync(&br->multicast_router_timer);
+ del_timer_sync(&br->ip4_mc_router_timer);
del_timer_sync(&br->ip4_other_query.timer);
del_timer_sync(&br->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
+ del_timer_sync(&br->ip6_mc_router_timer);
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
@@ -3453,7 +3633,10 @@ int br_multicast_set_router(struct net_bridge *br, unsigned long val)
case MDB_RTR_TYPE_DISABLED:
case MDB_RTR_TYPE_PERM:
br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
- del_timer(&br->multicast_router_timer);
+ del_timer(&br->ip4_mc_router_timer);
+#if IS_ENABLED(CONFIG_IPV6)
+ del_timer(&br->ip6_mc_router_timer);
+#endif
br->multicast_router = val;
err = 0;
break;
@@ -3470,11 +3653,22 @@ int br_multicast_set_router(struct net_bridge *br, unsigned long val)
return err;
}
-static void __del_port_router(struct net_bridge_port *p)
+static void
+br_multicast_rport_del_notify(struct net_bridge_port *p, bool deleted)
{
- if (hlist_unhashed(&p->rlist))
+ if (!deleted)
+ return;
+
+ /* For backwards compatibility for now, only notify if there is
+ * no multicast router anymore for both IPv4 and IPv6.
+ */
+ if (!hlist_unhashed(&p->ip4_rlist))
return;
- hlist_del_init_rcu(&p->rlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!hlist_unhashed(&p->ip6_rlist))
+ return;
+#endif
+
br_rtr_notify(p->br->dev, p, RTM_DELMDB);
br_port_mc_router_state_change(p, false);
@@ -3488,34 +3682,52 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
struct net_bridge *br = p->br;
unsigned long now = jiffies;
int err = -EINVAL;
+ bool del = false;
spin_lock(&br->multicast_lock);
if (p->multicast_router == val) {
/* Refresh the temp router port timer */
- if (p->multicast_router == MDB_RTR_TYPE_TEMP)
- mod_timer(&p->multicast_router_timer,
+ if (p->multicast_router == MDB_RTR_TYPE_TEMP) {
+ mod_timer(&p->ip4_mc_router_timer,
now + br->multicast_querier_interval);
+#if IS_ENABLED(CONFIG_IPV6)
+ mod_timer(&p->ip6_mc_router_timer,
+ now + br->multicast_querier_interval);
+#endif
+ }
err = 0;
goto unlock;
}
switch (val) {
case MDB_RTR_TYPE_DISABLED:
p->multicast_router = MDB_RTR_TYPE_DISABLED;
- __del_port_router(p);
- del_timer(&p->multicast_router_timer);
+ del |= br_ip4_multicast_rport_del(p);
+ del_timer(&p->ip4_mc_router_timer);
+ del |= br_ip6_multicast_rport_del(p);
+#if IS_ENABLED(CONFIG_IPV6)
+ del_timer(&p->ip6_mc_router_timer);
+#endif
+ br_multicast_rport_del_notify(p, del);
break;
case MDB_RTR_TYPE_TEMP_QUERY:
p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
- __del_port_router(p);
+ del |= br_ip4_multicast_rport_del(p);
+ del |= br_ip6_multicast_rport_del(p);
+ br_multicast_rport_del_notify(p, del);
break;
case MDB_RTR_TYPE_PERM:
p->multicast_router = MDB_RTR_TYPE_PERM;
- del_timer(&p->multicast_router_timer);
- br_multicast_add_router(br, p);
+ del_timer(&p->ip4_mc_router_timer);
+ br_ip4_multicast_add_router(br, p);
+#if IS_ENABLED(CONFIG_IPV6)
+ del_timer(&p->ip6_mc_router_timer);
+#endif
+ br_ip6_multicast_add_router(br, p);
break;
case MDB_RTR_TYPE_TEMP:
p->multicast_router = MDB_RTR_TYPE_TEMP;
- br_multicast_mark_router(br, p);
+ br_ip4_multicast_mark_router(br, p);
+ br_ip6_multicast_mark_router(br, p);
break;
default:
goto unlock;
@@ -3621,7 +3833,7 @@ bool br_multicast_router(const struct net_device *dev)
bool is_router;
spin_lock_bh(&br->multicast_lock);
- is_router = br_multicast_is_router(br);
+ is_router = br_multicast_is_router(br, NULL);
spin_unlock_bh(&br->multicast_lock);
return is_router;
}
@@ -3842,6 +4054,61 @@ unlock:
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
+/**
+ * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
+ * @dev: The bridge port adjacent to which to check for a multicast router
+ * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
+ *
+ * Checks whether the given interface has a bridge on top and if so returns
+ * true if a multicast router is behind one of the other ports of this
+ * bridge. Otherwise returns false.
+ */
+bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
+{
+ struct net_bridge_port *port, *p;
+ bool ret = false;
+
+ rcu_read_lock();
+ port = br_port_get_check_rcu(dev);
+ if (!port)
+ goto unlock;
+
+ switch (proto) {
+ case ETH_P_IP:
+ hlist_for_each_entry_rcu(p, &port->br->ip4_mc_router_list,
+ ip4_rlist) {
+ if (p == port)
+ continue;
+
+ ret = true;
+ goto unlock;
+ }
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case ETH_P_IPV6:
+ hlist_for_each_entry_rcu(p, &port->br->ip6_mc_router_list,
+ ip6_rlist) {
+ if (p == port)
+ continue;
+
+ ret = true;
+ goto unlock;
+ }
+ break;
+#endif
+ default:
+ /* when compiled without IPv6 support, be conservative and
+ * always assume presence of an IPv6 multicast router
+ */
+ ret = true;
+ }
+
+unlock:
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
+
static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
const struct sk_buff *skb, u8 type, u8 dir)
{
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e4e6e991313e..8642e56059fb 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1644,7 +1644,6 @@ static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
p = br_port_get_rtnl(dev);
if (!p)
return 0;
- br = p->br;
vg = nbp_vlan_group(p);
break;
default:
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 7ce8a77cc6b6..2b48b204205e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -90,8 +90,8 @@ struct bridge_mcast_stats {
#endif
struct br_tunnel_info {
- __be64 tunnel_id;
- struct metadata_dst *tunnel_dst;
+ __be64 tunnel_id;
+ struct metadata_dst __rcu *tunnel_dst;
};
/* private vlan flags */
@@ -307,16 +307,18 @@ struct net_bridge_port {
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct bridge_mcast_own_query ip4_own_query;
+ struct timer_list ip4_mc_router_timer;
+ struct hlist_node ip4_rlist;
#if IS_ENABLED(CONFIG_IPV6)
struct bridge_mcast_own_query ip6_own_query;
+ struct timer_list ip6_mc_router_timer;
+ struct hlist_node ip6_rlist;
#endif /* IS_ENABLED(CONFIG_IPV6) */
u32 multicast_eht_hosts_limit;
u32 multicast_eht_hosts_cnt;
unsigned char multicast_router;
struct bridge_mcast_stats __percpu *mcast_stats;
- struct timer_list multicast_router_timer;
struct hlist_head mglist;
- struct hlist_node rlist;
#endif
#ifdef CONFIG_SYSFS
@@ -449,14 +451,16 @@ struct net_bridge {
struct hlist_head mcast_gc_list;
struct hlist_head mdb_list;
- struct hlist_head router_list;
- struct timer_list multicast_router_timer;
+ struct hlist_head ip4_mc_router_list;
+ struct timer_list ip4_mc_router_timer;
struct bridge_mcast_other_query ip4_other_query;
struct bridge_mcast_own_query ip4_own_query;
struct bridge_mcast_querier ip4_querier;
struct bridge_mcast_stats __percpu *mcast_stats;
#if IS_ENABLED(CONFIG_IPV6)
+ struct hlist_head ip6_mc_router_list;
+ struct timer_list ip6_mc_router_timer;
struct bridge_mcast_other_query ip6_other_query;
struct bridge_mcast_own_query ip6_own_query;
struct bridge_mcast_querier ip6_querier;
@@ -864,11 +868,58 @@ static inline bool br_group_is_l2(const struct br_ip *group)
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
-static inline bool br_multicast_is_router(struct net_bridge *br)
+static inline struct hlist_node *
+br_multicast_get_first_rport_node(struct net_bridge *b, struct sk_buff *skb) {
+#if IS_ENABLED(CONFIG_IPV6)
+ if (skb->protocol == htons(ETH_P_IPV6))
+ return rcu_dereference(hlist_first_rcu(&b->ip6_mc_router_list));
+#endif
+ return rcu_dereference(hlist_first_rcu(&b->ip4_mc_router_list));
+}
+
+static inline struct net_bridge_port *
+br_multicast_rport_from_node_skb(struct hlist_node *rp, struct sk_buff *skb) {
+#if IS_ENABLED(CONFIG_IPV6)
+ if (skb->protocol == htons(ETH_P_IPV6))
+ return hlist_entry_safe(rp, struct net_bridge_port, ip6_rlist);
+#endif
+ return hlist_entry_safe(rp, struct net_bridge_port, ip4_rlist);
+}
+
+static inline bool br_ip4_multicast_is_router(struct net_bridge *br)
+{
+ return timer_pending(&br->ip4_mc_router_timer);
+}
+
+static inline bool br_ip6_multicast_is_router(struct net_bridge *br)
{
- return br->multicast_router == 2 ||
- (br->multicast_router == 1 &&
- timer_pending(&br->multicast_router_timer));
+#if IS_ENABLED(CONFIG_IPV6)
+ return timer_pending(&br->ip6_mc_router_timer);
+#else
+ return false;
+#endif
+}
+
+static inline bool
+br_multicast_is_router(struct net_bridge *br, struct sk_buff *skb)
+{
+ switch (br->multicast_router) {
+ case MDB_RTR_TYPE_PERM:
+ return true;
+ case MDB_RTR_TYPE_TEMP_QUERY:
+ if (skb) {
+ if (skb->protocol == htons(ETH_P_IP))
+ return br_ip4_multicast_is_router(br);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return br_ip6_multicast_is_router(br);
+ } else {
+ return br_ip4_multicast_is_router(br) ||
+ br_ip6_multicast_is_router(br);
+ }
+ fallthrough;
+ default:
+ return false;
+ }
}
static inline bool
@@ -1017,7 +1068,8 @@ static inline void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
{
}
-static inline bool br_multicast_is_router(struct net_bridge *br)
+static inline bool br_multicast_is_router(struct net_bridge *br,
+ struct sk_buff *skb)
{
return false;
}
@@ -1602,8 +1654,8 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
unsigned long flags,
unsigned long mask,
struct netlink_ext_ack *extack);
-void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb,
- int type);
+void br_switchdev_fdb_notify(struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb, int type);
int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
struct netlink_ext_ack *extack);
int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid);
@@ -1650,7 +1702,8 @@ static inline int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
}
static inline void
-br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+br_switchdev_fdb_notify(struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb, int type)
{
}
diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h
index 9559aa2750fb..bda8e1896712 100644
--- a/net/bridge/br_private_mrp.h
+++ b/net/bridge/br_private_mrp.h
@@ -6,6 +6,8 @@
#include "br_private.h"
#include <uapi/linux/mrp_bridge.h>
+#define MRP_OPT_PADDING 0x2
+
struct br_mrp {
/* list of mrp instances */
struct hlist_node list;
@@ -134,4 +136,13 @@ struct br_mrp_in_test_hdr {
__be32 timestamp;
} __attribute__((__packed__));
+struct br_mrp_oui_hdr {
+ __u8 oui[MRP_OUI_LENGTH];
+};
+
+struct br_mrp_sub_option1_hdr {
+ __u8 type;
+ __u8 data[MRP_MANUFACTURE_DATA_LENGTH];
+};
+
#endif /* _BR_PRIVATE_MRP_H */
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 3dafb6143cff..1d80f34a139c 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -639,9 +639,9 @@ int br_set_ageing_time(struct net_bridge *br, clock_t ageing_time)
return 0;
}
-clock_t br_get_ageing_time(struct net_device *br_dev)
+clock_t br_get_ageing_time(const struct net_device *br_dev)
{
- struct net_bridge *br;
+ const struct net_bridge *br;
if (!netif_is_bridge_master(br_dev))
return 0;
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index a5e601e41cb9..d3adee0f91f9 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -108,8 +108,11 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
}
void
-br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+br_switchdev_fdb_notify(struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb, int type)
{
+ const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
+ struct net_device *dev = dst ? dst->dev : br->dev;
struct switchdev_notifier_fdb_info info = {
.addr = fdb->key.addr.addr,
.vid = fdb->key.vlan_id,
@@ -118,17 +121,14 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
.offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags),
};
- if (!fdb->dst)
- return;
-
switch (type) {
case RTM_DELNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
- fdb->dst->dev, &info.info, NULL);
+ dev, &info.info, NULL);
break;
case RTM_NEWNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
- fdb->dst->dev, &info.info, NULL);
+ dev, &info.info, NULL);
break;
}
}
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index da3256a3eed0..a08e9f193009 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -113,9 +113,7 @@ static void __vlan_add_list(struct net_bridge_vlan *v)
headp = &vg->vlan_list;
list_for_each_prev(hpos, headp) {
vent = list_entry(hpos, struct net_bridge_vlan, vlist);
- if (v->vid < vent->vid)
- continue;
- else
+ if (v->vid >= vent->vid)
break;
}
list_add_rcu(&v->vlist, hpos);
@@ -1809,28 +1807,32 @@ out_kfree:
static int br_vlan_replay_one(struct notifier_block *nb,
struct net_device *dev,
struct switchdev_obj_port_vlan *vlan,
+ const void *ctx, unsigned long action,
struct netlink_ext_ack *extack)
{
struct switchdev_notifier_port_obj_info obj_info = {
.info = {
.dev = dev,
.extack = extack,
+ .ctx = ctx,
},
.obj = &vlan->obj,
};
int err;
- err = nb->notifier_call(nb, SWITCHDEV_PORT_OBJ_ADD, &obj_info);
+ err = nb->notifier_call(nb, action, &obj_info);
return notifier_to_errno(err);
}
int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb, struct netlink_ext_ack *extack)
+ const void *ctx, bool adding, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
struct net_bridge_port *p;
struct net_bridge *br;
+ unsigned long action;
int err = 0;
u16 pvid;
@@ -1857,6 +1859,11 @@ int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
if (!vg)
return 0;
+ if (adding)
+ action = SWITCHDEV_PORT_OBJ_ADD;
+ else
+ action = SWITCHDEV_PORT_OBJ_DEL;
+
pvid = br_get_pvid(vg);
list_for_each_entry(v, &vg->vlan_list, vlist) {
@@ -1870,7 +1877,7 @@ int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
if (!br_vlan_should_use(v))
continue;
- err = br_vlan_replay_one(nb, dev, &vlan, extack);
+ err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack);
if (err)
return err;
}
diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
index 0d3a8c01552e..01017448ebde 100644
--- a/net/bridge/br_vlan_tunnel.c
+++ b/net/bridge/br_vlan_tunnel.c
@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
br_vlan_tunnel_rht_params);
}
+static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
+{
+ struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
+
+ WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
+ RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
+ dst_release(&tdst->dst);
+}
+
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan)
{
- if (!vlan->tinfo.tunnel_dst)
+ if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
return;
rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
- vlan->tinfo.tunnel_id = 0;
- dst_release(&vlan->tinfo.tunnel_dst->dst);
- vlan->tinfo.tunnel_dst = NULL;
+ vlan_tunnel_info_release(vlan);
}
static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan, u32 tun_id)
{
- struct metadata_dst *metadata = NULL;
+ struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
int err;
- if (vlan->tinfo.tunnel_dst)
+ if (metadata)
return -EEXIST;
metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
return -EINVAL;
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
- vlan->tinfo.tunnel_dst = metadata;
- vlan->tinfo.tunnel_id = key;
+ rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
+ WRITE_ONCE(vlan->tinfo.tunnel_id, key);
err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
return 0;
out:
- dst_release(&vlan->tinfo.tunnel_dst->dst);
- vlan->tinfo.tunnel_dst = NULL;
- vlan->tinfo.tunnel_id = 0;
+ vlan_tunnel_info_release(vlan);
return err;
}
@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan)
{
+ struct metadata_dst *tunnel_dst;
+ __be64 tunnel_id;
int err;
- if (!vlan || !vlan->tinfo.tunnel_id)
+ if (!vlan)
return 0;
- if (unlikely(!skb_vlan_tag_present(skb)))
+ tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
+ if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
return 0;
skb_dst_drop(skb);
@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
if (err)
return err;
- skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
+ tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
+ if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
+ skb_dst_set(skb, &tunnel_dst->dst);
return 0;
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index c10e5a55758d..440139706130 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
caifd_put(caifd);
}
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room,
struct cflayer **layer,
int (**rcv_func)(struct sk_buff *, struct net_device *,
@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
enum cfcnfg_phy_preference pref;
struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
struct caif_device_entry_list *caifdevs;
+ int res;
caifdevs = caif_device_list(dev_net(dev));
caifd = caif_device_alloc(dev);
if (!caifd)
- return;
+ return -ENOMEM;
*layer = &caifd->layer;
spin_lock_init(&caifd->flow_lock);
@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
strlcpy(caifd->layer.name, dev->name,
sizeof(caifd->layer.name));
caifd->layer.transmit = transmit;
- cfcnfg_add_phy_layer(cfg,
+ res = cfcnfg_add_phy_layer(cfg,
dev,
&caifd->layer,
pref,
@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
mutex_unlock(&caifdevs->lock);
if (rcv_func)
*rcv_func = receive;
+ return res;
}
EXPORT_SYMBOL(caif_enroll_dev);
@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
struct cflayer *layer, *link_support;
int head_room = 0;
struct caif_device_entry_list *caifdevs;
+ int res;
cfg = get_cfcnfg(dev_net(dev));
caifdevs = caif_device_list(dev_net(dev));
@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
break;
}
}
- caif_enroll_dev(dev, caifdev, link_support, head_room,
+ res = caif_enroll_dev(dev, caifdev, link_support, head_room,
&layer, NULL);
+ if (res)
+ cfserl_release(link_support);
caifdev->flowctrl = dev_flowctrl;
break;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3ad0a1df6712..647554c9813b 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -243,7 +243,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
cf_sk->sk.sk_err = ECONNRESET;
set_rx_flow_on(cf_sk);
- cf_sk->sk.sk_error_report(&cf_sk->sk);
+ sk_error_report(&cf_sk->sk);
break;
default:
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index a0116b9503d9..b02e1292f7f1 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
return (struct cflayer *) this;
}
+static void cfusbl_release(struct cflayer *layer)
+{
+ kfree(layer);
+}
+
static struct packet_type caif_usb_type __read_mostly = {
.type = cpu_to_be16(ETH_P_802_EX1),
};
@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
struct cflayer *layer, *link_support;
struct usbnet *usbnet;
struct usb_device *usbdev;
+ int res;
/* Check whether we have a NCM device, and find its VID/PID. */
if (!(dev->dev.parent && dev->dev.parent->driver &&
@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
if (dev->num_tx_queues > 1)
pr_warn("USB device uses more than one tx queue\n");
- caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+ res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
&layer, &caif_usb_type.func);
+ if (res)
+ goto err;
+
if (!pack_added)
dev_add_pack(&caif_usb_type);
pack_added = true;
@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
strlcpy(layer->name, dev->name, sizeof(layer->name));
return 0;
+err:
+ cfusbl_release(link_support);
+ return res;
}
static struct notifier_block caif_device_notifier = {
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 399239a14420..23267c8db7c4 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -450,7 +450,7 @@ unlock:
rcu_read_unlock();
}
-void
+int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
{
struct cflayer *frml;
struct cfcnfg_phyinfo *phyinfo = NULL;
- int i;
+ int i, res = 0;
u8 phyid;
mutex_lock(&cnfg->lock);
@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
goto got_phyid;
}
pr_warn("Too many CAIF Link Layers (max 6)\n");
+ res = -EEXIST;
goto out;
got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
- if (!phyinfo)
- goto out_err;
+ if (!phyinfo) {
+ res = -ENOMEM;
+ goto out;
+ }
phy_layer->id = phyid;
phyinfo->pref = pref;
@@ -492,8 +495,10 @@ got_phyid:
frml = cffrml_create(phyid, fcs);
- if (!frml)
+ if (!frml) {
+ res = -ENOMEM;
goto out_err;
+ }
phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux);
@@ -511,11 +516,12 @@ got_phyid:
list_add_rcu(&phyinfo->node, &cnfg->phys);
out:
mutex_unlock(&cnfg->lock);
- return;
+ return res;
out_err:
kfree(phyinfo);
mutex_unlock(&cnfg->lock);
+ return res;
}
EXPORT_SYMBOL(cfcnfg_add_phy_layer);
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index e11725a4bb0e..40cd57ad0a0f 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid);
+void cfserl_release(struct cflayer *layer)
+{
+ kfree(layer);
+}
+
struct cflayer *cfserl_create(int instance, bool use_stx)
{
struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index fadc7c8a3107..37b67194c0df 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,8 +76,6 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
u8 buf;
priv = container_of(layr, struct chnl_net, chnl);
- if (!priv)
- return -EINVAL;
skb = (struct sk_buff *) cfpkt_tonative(pkt);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 909b9e684e04..508f67de0b80 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
struct sock sk;
int bound;
int ifindex;
- struct notifier_block notifier;
+ struct list_head notifier;
struct list_head rx_ops;
struct list_head tx_ops;
unsigned long dropped_usr_msgs;
@@ -133,6 +133,10 @@ struct bcm_sock {
char procname [32]; /* inode number in decimal with \0 */
};
+static LIST_HEAD(bcm_notifier_list);
+static DEFINE_SPINLOCK(bcm_notifier_lock);
+static struct bcm_sock *bcm_busy_notifier;
+
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
if (!op->count && (op->flags & TX_COUNTEVT)) {
/* create notification to user */
+ memset(&msg_head, 0, sizeof(msg_head));
msg_head.opcode = TX_EXPIRED;
msg_head.flags = op->flags;
msg_head.count = op->count;
@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
/* this element is not throttled anymore */
data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
head.flags = op->flags;
head.count = op->count;
@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
}
/* create notification to user */
+ memset(&msg_head, 0, sizeof(msg_head));
msg_head.opcode = RX_TIMEOUT;
msg_head.flags = op->flags;
msg_head.count = op->count;
@@ -778,6 +785,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
bcm_rx_handler, op);
list_del(&op->list);
+ synchronize_rcu();
bcm_remove_op(op);
return 1; /* done */
}
@@ -1378,20 +1386,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/*
* notification handler for netdevice status changes
*/
-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
+static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
struct sock *sk = &bo->sk;
struct bcm_op *op;
int notify_enodev = 0;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
switch (msg) {
@@ -1415,7 +1418,7 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
if (notify_enodev) {
sk->sk_err = ENODEV;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
break;
@@ -1423,10 +1426,31 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
if (bo->bound && bo->ifindex == dev->ifindex) {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
}
+}
+
+static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+
+ spin_lock(&bcm_notifier_lock);
+ list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
+ spin_unlock(&bcm_notifier_lock);
+ bcm_notify(bcm_busy_notifier, msg, dev);
+ spin_lock(&bcm_notifier_lock);
+ }
+ bcm_busy_notifier = NULL;
+ spin_unlock(&bcm_notifier_lock);
return NOTIFY_DONE;
}
@@ -1446,9 +1470,9 @@ static int bcm_init(struct sock *sk)
INIT_LIST_HEAD(&bo->rx_ops);
/* set notifier */
- bo->notifier.notifier_call = bcm_notifier;
-
- register_netdevice_notifier(&bo->notifier);
+ spin_lock(&bcm_notifier_lock);
+ list_add_tail(&bo->notifier, &bcm_notifier_list);
+ spin_unlock(&bcm_notifier_lock);
return 0;
}
@@ -1471,7 +1495,14 @@ static int bcm_release(struct socket *sock)
/* remove bcm_ops, timer, rx_unregister(), etc. */
- unregister_netdevice_notifier(&bo->notifier);
+ spin_lock(&bcm_notifier_lock);
+ while (bcm_busy_notifier == bo) {
+ spin_unlock(&bcm_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&bcm_notifier_lock);
+ }
+ list_del(&bo->notifier);
+ spin_unlock(&bcm_notifier_lock);
lock_sock(sk);
@@ -1503,9 +1534,13 @@ static int bcm_release(struct socket *sock)
REGMASK(op->can_id),
bcm_rx_handler, op);
- bcm_remove_op(op);
}
+ synchronize_rcu();
+
+ list_for_each_entry_safe(op, next, &bo->rx_ops, list)
+ bcm_remove_op(op);
+
#if IS_ENABLED(CONFIG_PROC_FS)
/* remove procfs entry */
if (net->can.bcmproc_dir && bo->bcm_proc_read)
@@ -1692,6 +1727,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
.exit = canbcm_pernet_exit,
};
+static struct notifier_block canbcm_notifier = {
+ .notifier_call = bcm_notifier
+};
+
static int __init bcm_module_init(void)
{
int err;
@@ -1705,12 +1744,14 @@ static int __init bcm_module_init(void)
}
register_pernet_subsys(&canbcm_pernet_ops);
+ register_netdevice_notifier(&canbcm_notifier);
return 0;
}
static void __exit bcm_module_exit(void)
{
can_proto_unregister(&bcm_can_proto);
+ unregister_netdevice_notifier(&canbcm_notifier);
unregister_pernet_subsys(&canbcm_pernet_ops);
}
diff --git a/net/can/gw.c b/net/can/gw.c
index ba4124805602..d8861e862f15 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -596,6 +596,7 @@ static int cgw_notifier(struct notifier_block *nb,
if (gwj->src.dev == dev || gwj->dst.dev == dev) {
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
+ synchronize_rcu();
kmem_cache_free(cgw_cache, gwj);
}
}
@@ -1154,6 +1155,7 @@ static void cgw_remove_all_jobs(struct net *net)
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
+ synchronize_rcu();
kmem_cache_free(cgw_cache, gwj);
}
}
@@ -1222,6 +1224,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
+ synchronize_rcu();
kmem_cache_free(cgw_cache, gwj);
err = 0;
break;
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 9f94ad3caee9..caaa532ece94 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -143,10 +143,14 @@ struct isotp_sock {
u32 force_tx_stmin;
u32 force_rx_stmin;
struct tpcon rx, tx;
- struct notifier_block notifier;
+ struct list_head notifier;
wait_queue_head_t wait;
};
+static LIST_HEAD(isotp_notifier_list);
+static DEFINE_SPINLOCK(isotp_notifier_lock);
+static struct isotp_sock *isotp_busy_notifier;
+
static inline struct isotp_sock *isotp_sk(const struct sock *sk)
{
return (struct isotp_sock *)sk;
@@ -164,7 +168,7 @@ static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer)
/* report 'connection timed out' */
sk->sk_err = ETIMEDOUT;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
/* reset rx state */
so->rx.state = ISOTP_IDLE;
@@ -221,8 +225,8 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
can_send_ret = can_send(nskb, 1);
if (can_send_ret)
- pr_notice_once("can-isotp: %s: can_send_ret %d\n",
- __func__, can_send_ret);
+ pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
+ __func__, ERR_PTR(can_send_ret));
dev_put(dev);
@@ -335,7 +339,7 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
/* malformed PDU - report 'not a data message' */
sk->sk_err = EBADMSG;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
@@ -388,7 +392,7 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
/* overflow on receiver side - report 'message too long' */
sk->sk_err = EMSGSIZE;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
fallthrough;
default:
@@ -416,7 +420,7 @@ static int isotp_rcv_sf(struct sock *sk, struct canfd_frame *cf, int pcilen,
/* malformed PDU - report 'not a data message' */
sk->sk_err = EBADMSG;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return 1;
}
@@ -531,7 +535,7 @@ static int isotp_rcv_cf(struct sock *sk, struct canfd_frame *cf, int ae,
/* wrong sn detected - report 'illegal byte sequence' */
sk->sk_err = EILSEQ;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
/* reset rx state */
so->rx.state = ISOTP_IDLE;
@@ -555,7 +559,7 @@ static int isotp_rcv_cf(struct sock *sk, struct canfd_frame *cf, int ae,
/* malformed PDU - report 'not a data message' */
sk->sk_err = EBADMSG;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return 1;
}
@@ -754,7 +758,7 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
/* report 'communication error on send' */
sk->sk_err = ECOMM;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
/* reset tx state */
so->tx.state = ISOTP_IDLE;
@@ -797,10 +801,12 @@ isotp_tx_burst:
can_skb_set_owner(skb, sk);
can_send_ret = can_send(skb, 1);
- if (can_send_ret)
- pr_notice_once("can-isotp: %s: can_send_ret %d\n",
- __func__, can_send_ret);
-
+ if (can_send_ret) {
+ pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
+ __func__, ERR_PTR(can_send_ret));
+ if (can_send_ret == -ENOBUFS)
+ pr_notice_once("can-isotp: tx queue is full, increasing txqueuelen may prevent this error\n");
+ }
if (so->tx.idx >= so->tx.len) {
/* we are done */
so->tx.state = ISOTP_IDLE;
@@ -946,8 +952,8 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
err = can_send(skb, 1);
dev_put(dev);
if (err) {
- pr_notice_once("can-isotp: %s: can_send_ret %d\n",
- __func__, err);
+ pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
+ __func__, ERR_PTR(err));
return err;
}
@@ -1013,13 +1019,17 @@ static int isotp_release(struct socket *sock)
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
- unregister_netdevice_notifier(&so->notifier);
+ spin_lock(&isotp_notifier_lock);
+ while (isotp_busy_notifier == so) {
+ spin_unlock(&isotp_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&isotp_notifier_lock);
+ }
+ list_del(&so->notifier);
+ spin_unlock(&isotp_notifier_lock);
lock_sock(sk);
- hrtimer_cancel(&so->txtimer);
- hrtimer_cancel(&so->rxtimer);
-
/* remove current filters & unregister */
if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
if (so->ifindex) {
@@ -1031,10 +1041,14 @@ static int isotp_release(struct socket *sock)
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
dev_put(dev);
+ synchronize_rcu();
}
}
}
+ hrtimer_cancel(&so->txtimer);
+ hrtimer_cancel(&so->rxtimer);
+
so->ifindex = 0;
so->bound = 0;
@@ -1062,27 +1076,31 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (len < ISOTP_MIN_NAMELEN)
return -EINVAL;
+ if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
+ return -EADDRNOTAVAIL;
+
+ if (!addr->can_ifindex)
+ return -ENODEV;
+
+ lock_sock(sk);
+
/* do not register frame reception for functional addressing */
if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
do_rx_reg = 0;
/* do not validate rx address for functional addressing */
if (do_rx_reg) {
- if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id)
- return -EADDRNOTAVAIL;
+ if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
- if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
- return -EADDRNOTAVAIL;
+ if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
}
- if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
- return -EADDRNOTAVAIL;
-
- if (!addr->can_ifindex)
- return -ENODEV;
-
- lock_sock(sk);
-
if (so->bound && addr->can_ifindex == so->ifindex &&
addr->can_addr.tp.rx_id == so->rxid &&
addr->can_addr.tp.tx_id == so->txid)
@@ -1140,7 +1158,7 @@ out:
if (notify_enetdown) {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
return err;
@@ -1164,16 +1182,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
return ISOTP_MIN_NAMELEN;
}
-static int isotp_setsockopt(struct socket *sock, int level, int optname,
+static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
int ret = 0;
- if (level != SOL_CAN_ISOTP)
- return -EINVAL;
-
if (so->bound)
return -EISCONN;
@@ -1248,6 +1263,22 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
return ret;
}
+static int isotp_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+
+{
+ struct sock *sk = sock->sk;
+ int ret;
+
+ if (level != SOL_CAN_ISOTP)
+ return -EINVAL;
+
+ lock_sock(sk);
+ ret = isotp_setsockopt_locked(sock, level, optname, optval, optlen);
+ release_sock(sk);
+ return ret;
+}
+
static int isotp_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -1300,21 +1331,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
+static void isotp_notify(struct isotp_sock *so, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
struct sock *sk = &so->sk;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
if (so->ifindex != dev->ifindex)
- return NOTIFY_DONE;
+ return;
switch (msg) {
case NETDEV_UNREGISTER:
@@ -1331,16 +1357,37 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
sk->sk_err = ENODEV;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
break;
case NETDEV_DOWN:
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
break;
}
+}
+static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+
+ spin_lock(&isotp_notifier_lock);
+ list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
+ spin_unlock(&isotp_notifier_lock);
+ isotp_notify(isotp_busy_notifier, msg, dev);
+ spin_lock(&isotp_notifier_lock);
+ }
+ isotp_busy_notifier = NULL;
+ spin_unlock(&isotp_notifier_lock);
return NOTIFY_DONE;
}
@@ -1377,8 +1424,9 @@ static int isotp_init(struct sock *sk)
init_waitqueue_head(&so->wait);
- so->notifier.notifier_call = isotp_notifier;
- register_netdevice_notifier(&so->notifier);
+ spin_lock(&isotp_notifier_lock);
+ list_add_tail(&so->notifier, &isotp_notifier_list);
+ spin_unlock(&isotp_notifier_lock);
return 0;
}
@@ -1425,6 +1473,10 @@ static const struct can_proto isotp_can_proto = {
.prot = &isotp_proto,
};
+static struct notifier_block canisotp_notifier = {
+ .notifier_call = isotp_notifier
+};
+
static __init int isotp_module_init(void)
{
int err;
@@ -1433,7 +1485,9 @@ static __init int isotp_module_init(void)
err = can_proto_register(&isotp_can_proto);
if (err < 0)
- pr_err("can: registration of isotp protocol failed\n");
+ pr_err("can: registration of isotp protocol failed %pe\n", ERR_PTR(err));
+ else
+ register_netdevice_notifier(&canisotp_notifier);
return err;
}
@@ -1441,6 +1495,7 @@ static __init int isotp_module_init(void)
static __exit void isotp_module_exit(void)
{
can_proto_unregister(&isotp_can_proto);
+ unregister_netdevice_notifier(&canisotp_notifier);
}
module_init(isotp_module_init);
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index da3a7a7bcff2..08c8606cfd9c 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -193,6 +193,10 @@ static void j1939_can_rx_unregister(struct j1939_priv *priv)
can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
j1939_can_recv, priv);
+ /* The last reference of priv is dropped by the RCU deferred
+ * j1939_sk_sock_destruct() of the last socket, so we can
+ * safely drop this reference here.
+ */
j1939_priv_put(priv);
}
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 56aa66147d5a..54f6d521492f 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -398,6 +398,9 @@ static int j1939_sk_init(struct sock *sk)
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
+
+ /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
+ sock_set_flag(sk, SOCK_RCU_FREE);
sk->sk_destruct = j1939_sk_sock_destruct;
sk->sk_protocol = CAN_J1939;
@@ -673,7 +676,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case SO_J1939_FILTER:
- if (!sockptr_is_null(optval)) {
+ if (!sockptr_is_null(optval) && optlen != 0) {
struct j1939_filter *f;
int c;
@@ -1009,7 +1012,7 @@ void j1939_sk_send_loop_abort(struct sock *sk, int err)
{
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
@@ -1189,7 +1192,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
list_for_each_entry(jsk, &priv->j1939_socks, list) {
jsk->sk.sk_err = error_code;
if (!sock_flag(&jsk->sk, SOCK_DEAD))
- jsk->sk.sk_error_report(&jsk->sk);
+ sk_error_report(&jsk->sk);
j1939_sk_queue_drop_all(priv, jsk, error_code);
}
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index e09d087ba240..c3946c355882 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
if ((do_skcb->offset + do_skb->len) < offset_start) {
__skb_unlink(do_skb, &session->skb_queue);
+ /* drop ref taken in j1939_session_skb_queue() */
+ skb_unref(do_skb);
+
kfree_skb(do_skb);
}
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
skcb->flags |= J1939_ECU_LOCAL_SRC;
+ skb_get(skb);
skb_queue_tail(&session->skb_queue, skb);
}
static struct
-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
- unsigned int offset_start)
+sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
+ unsigned int offset_start)
{
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *do_skcb;
@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
skb = do_skb;
}
}
+
+ if (skb)
+ skb_get(skb);
+
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
if (!skb)
@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
return skb;
}
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
{
unsigned int offset_start;
offset_start = session->pkt.dpo * 7;
- return j1939_session_skb_find_by_offset(session, offset_start);
+ return j1939_session_skb_get_by_offset(session, offset_start);
}
/* see if we are receiver
@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
int ret = 0;
u8 dat[8];
- se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
+ se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
if (!se_skb)
return -ENOBUFS;
@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
netdev_err_once(priv->ndev,
"%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
__func__, session, skcb->offset, se_skb->len , session->pkt.tx);
- return -EOVERFLOW;
+ ret = -EOVERFLOW;
+ goto out_free;
}
if (!len) {
@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
if (pkt_done)
j1939_tp_set_rxtimeout(session, 250);
+ out_free:
+ if (ret)
+ kfree_skb(se_skb);
+ else
+ consume_skb(se_skb);
+
return ret;
}
@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
static int j1939_simple_txnext(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
- struct sk_buff *se_skb = j1939_session_skb_find(session);
+ struct sk_buff *se_skb = j1939_session_skb_get(session);
struct sk_buff *skb;
int ret;
@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
return 0;
skb = skb_clone(se_skb, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
can_skb_set_owner(skb, se_skb->sk);
@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
ret = j1939_send_one(priv, skb);
if (ret)
- return ret;
+ goto out_free;
j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
j1939_sk_queue_activate_next(session);
- return 0;
+ out_free:
+ if (ret)
+ kfree_skb(se_skb);
+ else
+ consume_skb(se_skb);
+
+ return ret;
}
static bool j1939_session_deactivate_locked(struct j1939_session *session)
@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
struct sk_buff *skb;
if (!session->transmission) {
- skb = j1939_session_skb_find(session);
+ skb = j1939_session_skb_get(session);
/* distribute among j1939 receivers */
j1939_sk_recv(session->priv, skb);
+ consume_skb(skb);
}
j1939_session_deactivate_activate_next(session);
@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
{
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *skcb;
- struct sk_buff *se_skb;
+ struct sk_buff *se_skb = NULL;
const u8 *dat;
u8 *tpdat;
int offset;
@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
goto out_session_cancel;
}
- se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
+ se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
if (!se_skb) {
netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
session);
@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
j1939_tp_set_rxtimeout(session, 250);
}
session->last_cmd = 0xff;
+ consume_skb(se_skb);
j1939_session_put(session);
return;
out_session_cancel:
+ kfree_skb(se_skb);
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
j1939_session_put(session);
diff --git a/net/can/proc.c b/net/can/proc.c
index d1fe49e6f16d..b3099f0a3cb8 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -99,8 +99,6 @@ static void can_init_stats(struct net *net)
static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif,
unsigned long count)
{
- unsigned long rate;
-
if (oldjif == newjif)
return 0;
@@ -111,9 +109,7 @@ static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif,
return 99999999;
}
- rate = (count * HZ) / (newjif - oldjif);
-
- return rate;
+ return (count * HZ) / (newjif - oldjif);
}
void can_stat_update(struct timer_list *t)
diff --git a/net/can/raw.c b/net/can/raw.c
index 139d9471ddcf..ed4fcb7ab0c3 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -83,7 +83,7 @@ struct raw_sock {
struct sock sk;
int bound;
int ifindex;
- struct notifier_block notifier;
+ struct list_head notifier;
int loopback;
int recv_own_msgs;
int fd_frames;
@@ -95,6 +95,10 @@ struct raw_sock {
struct uniqframe __percpu *uniq;
};
+static LIST_HEAD(raw_notifier_list);
+static DEFINE_SPINLOCK(raw_notifier_lock);
+static struct raw_sock *raw_busy_notifier;
+
/* Return pointer to store the extra msg flags for raw_recvmsg().
* We use the space of one unsigned int beyond the 'struct sockaddr_can'
* in skb->cb.
@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
return err;
}
-static int raw_notifier(struct notifier_block *nb,
- unsigned long msg, void *ptr)
+static void raw_notify(struct raw_sock *ro, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
struct sock *sk = &ro->sk;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
if (ro->ifindex != dev->ifindex)
- return NOTIFY_DONE;
+ return;
switch (msg) {
case NETDEV_UNREGISTER:
@@ -296,16 +295,37 @@ static int raw_notifier(struct notifier_block *nb,
sk->sk_err = ENODEV;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
break;
case NETDEV_DOWN:
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
break;
}
+}
+
+static int raw_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+ spin_lock(&raw_notifier_lock);
+ list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
+ spin_unlock(&raw_notifier_lock);
+ raw_notify(raw_busy_notifier, msg, dev);
+ spin_lock(&raw_notifier_lock);
+ }
+ raw_busy_notifier = NULL;
+ spin_unlock(&raw_notifier_lock);
return NOTIFY_DONE;
}
@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
return -ENOMEM;
/* set notifier */
- ro->notifier.notifier_call = raw_notifier;
-
- register_netdevice_notifier(&ro->notifier);
+ spin_lock(&raw_notifier_lock);
+ list_add_tail(&ro->notifier, &raw_notifier_list);
+ spin_unlock(&raw_notifier_lock);
return 0;
}
@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
ro = raw_sk(sk);
- unregister_netdevice_notifier(&ro->notifier);
+ spin_lock(&raw_notifier_lock);
+ while (raw_busy_notifier == ro) {
+ spin_unlock(&raw_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&raw_notifier_lock);
+ }
+ list_del(&ro->notifier);
+ spin_unlock(&raw_notifier_lock);
lock_sock(sk);
@@ -461,7 +488,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (notify_enetdown) {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
return err;
@@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
.prot = &raw_proto,
};
+static struct notifier_block canraw_notifier = {
+ .notifier_call = raw_notifier
+};
+
static __init int raw_module_init(void)
{
int err;
@@ -898,6 +929,8 @@ static __init int raw_module_init(void)
err = can_proto_register(&raw_can_proto);
if (err < 0)
pr_err("can: registration of raw protocol failed\n");
+ else
+ register_netdevice_notifier(&canraw_notifier);
return err;
}
@@ -905,6 +938,7 @@ static __init int raw_module_init(void)
static __exit void raw_module_exit(void)
{
can_proto_unregister(&raw_can_proto);
+ unregister_netdevice_notifier(&canraw_notifier);
}
module_init(raw_module_init);
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index de407e8feb97..d2b268a1838e 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -36,7 +36,7 @@ static int init_protocol(struct ceph_auth_client *ac, int proto)
}
}
-static void set_global_id(struct ceph_auth_client *ac, u64 global_id)
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id)
{
dout("%s global_id %llu\n", __func__, global_id);
@@ -260,19 +260,22 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
ac->negotiating = false;
}
- ret = ac->ops->handle_reply(ac, result, payload, payload_end,
+ if (result) {
+ pr_err("auth protocol '%s' mauth authentication failed: %d\n",
+ ceph_auth_proto_name(ac->protocol), result);
+ ret = result;
+ goto out;
+ }
+
+ ret = ac->ops->handle_reply(ac, global_id, payload, payload_end,
NULL, NULL, NULL, NULL);
if (ret == -EAGAIN) {
ret = build_request(ac, true, reply_buf, reply_len);
goto out;
} else if (ret) {
- pr_err("auth protocol '%s' mauth authentication failed: %d\n",
- ceph_auth_proto_name(ac->protocol), result);
goto out;
}
- set_global_id(ac, global_id);
-
out:
mutex_unlock(&ac->mutex);
return ret;
@@ -498,11 +501,10 @@ int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
int ret;
mutex_lock(&ac->mutex);
- ret = ac->ops->handle_reply(ac, 0, reply, reply + reply_len,
+ ret = ac->ops->handle_reply(ac, global_id, reply, reply + reply_len,
session_key, session_key_len,
con_secret, con_secret_len);
- if (!ret)
- set_global_id(ac, global_id);
+ WARN_ON(ret == -EAGAIN || ret > 0);
mutex_unlock(&ac->mutex);
return ret;
}
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 70e86e462250..097e9f8d87a7 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -69,7 +69,7 @@ static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
* the generic auth code decode the global_id, and we carry no actual
* authenticate state, so nothing happens here.
*/
-static int handle_reply(struct ceph_auth_client *ac, int result,
+static int handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret,
int *con_secret_len)
@@ -77,7 +77,8 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
struct ceph_auth_none_info *xi = ac->private;
xi->starting = false;
- return result;
+ ceph_auth_set_global_id(ac, global_id);
+ return 0;
}
static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 79641c4afee9..b71b1635916e 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -597,7 +597,7 @@ bad:
return -EINVAL;
}
-static int handle_auth_session_key(struct ceph_auth_client *ac,
+static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
void **p, void *end,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
@@ -613,6 +613,7 @@ static int handle_auth_session_key(struct ceph_auth_client *ac,
if (ret)
return ret;
+ ceph_auth_set_global_id(ac, global_id);
if (*p == end) {
/* pre-nautilus (or didn't request service tickets!) */
WARN_ON(session_key || con_secret);
@@ -661,7 +662,7 @@ e_inval:
return -EINVAL;
}
-static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
+static int ceph_x_handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
@@ -669,13 +670,11 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
struct ceph_x_info *xi = ac->private;
struct ceph_x_ticket_handler *th;
int len = end - buf;
+ int result;
void *p;
int op;
int ret;
- if (result)
- return result; /* XXX hmm? */
-
if (xi->starting) {
/* it's a hello */
struct ceph_x_server_challenge *sc = buf;
@@ -697,9 +696,9 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
switch (op) {
case CEPHX_GET_AUTH_SESSION_KEY:
/* AUTH ticket + [connection secret] + service tickets */
- ret = handle_auth_session_key(ac, &p, end, session_key,
- session_key_len, con_secret,
- con_secret_len);
+ ret = handle_auth_session_key(ac, global_id, &p, end,
+ session_key, session_key_len,
+ con_secret, con_secret_len);
break;
case CEPHX_GET_PRINCIPAL_SESSION_KEY:
diff --git a/net/ceph/auth_x_protocol.h b/net/ceph/auth_x_protocol.h
index 792fcb974dc3..9c60feeb1bcb 100644
--- a/net/ceph/auth_x_protocol.h
+++ b/net/ceph/auth_x_protocol.h
@@ -87,7 +87,7 @@ struct ceph_x_authorize_reply {
/*
- * encyption bundle
+ * encryption bundle
*/
#define CEPHX_ENC_MAGIC 0xff009cad8826aa55ull
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 195ceb8afb06..013cbdb6cfe2 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1508,7 +1508,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
return get_generic_reply(con, hdr, skip);
/*
- * Older OSDs don't set reply tid even if the orignal
+ * Older OSDs don't set reply tid even if the original
* request had a non-zero tid. Work around this weirdness
* by allocating a new message.
*/
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index c959320c4775..75b738083523 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1309,7 +1309,7 @@ static int get_osdmap_client_data_v(void **p, void *end,
return -EINVAL;
}
- /* old osdmap enconding */
+ /* old osdmap encoding */
struct_v = 0;
}
@@ -3010,7 +3010,7 @@ static bool is_valid_crush_name(const char *name)
* parent, returns 0.
*
* Does a linear search, as there are no parent pointers of any
- * kind. Note that the result is ambigous for items that occur
+ * kind. Note that the result is ambiguous for items that occur
* multiple times in the map.
*/
static int get_immediate_parent(struct crush_map *c, int id,
diff --git a/net/compat.c b/net/compat.c
index ddd15af3a283..210fc3b4d0d8 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -177,7 +177,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
if (kcmlen > stackbuf_size)
kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
if (kcmsg == NULL)
- return -ENOBUFS;
+ return -ENOMEM;
/* Now copy them over neatly. */
memset(kcmsg, 0, kcmlen);
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index cc3712ad8716..f564f82e91d9 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -524,8 +524,7 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
nr_maps++;
}
- diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps,
- GFP_KERNEL);
+ diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
if (!diag)
return ERR_PTR(-ENOMEM);
diff --git a/net/core/dev.c b/net/core/dev.c
index 222b1d322c96..c253c2aafe97 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,7 @@
#include <net/devlink.h>
#include <linux/pm_runtime.h>
#include <linux/prandom.h>
+#include <linux/once_lite.h>
#include "net-sysfs.h"
@@ -3487,13 +3488,16 @@ EXPORT_SYMBOL(__skb_gso_segment);
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
+static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
+{
+ pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ skb_dump(KERN_ERR, skb, true);
+ dump_stack();
+}
+
void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
- if (net_ratelimit()) {
- pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
- skb_dump(KERN_ERR, skb, true);
- dump_stack();
- }
+ DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
}
EXPORT_SYMBOL(netdev_rx_csum_fault);
#endif
@@ -3852,9 +3856,33 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_calculate_pkt_len(skb, q);
if (q->flags & TCQ_F_NOLOCK) {
+ if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
+ qdisc_run_begin(q)) {
+ /* Retest nolock_qdisc_is_empty() within the protection
+ * of q->seqlock to protect from racing with requeuing.
+ */
+ if (unlikely(!nolock_qdisc_is_empty(q))) {
+ rc = q->enqueue(skb, q, &to_free) &
+ NET_XMIT_MASK;
+ __qdisc_run(q);
+ qdisc_run_end(q);
+
+ goto no_lock_out;
+ }
+
+ qdisc_bstats_cpu_update(q, skb);
+ if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
+ !nolock_qdisc_is_empty(q))
+ __qdisc_run(q);
+
+ qdisc_run_end(q);
+ return NET_XMIT_SUCCESS;
+ }
+
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
qdisc_run(q);
+no_lock_out:
if (unlikely(to_free))
kfree_skb_list(to_free);
return rc;
@@ -4362,7 +4390,7 @@ static inline void ____napi_schedule(struct softnet_data *sd,
* makes sure to proceed with napi polling
* if the thread is explicitly woken from here.
*/
- if (READ_ONCE(thread->state) != TASK_INTERRUPTIBLE)
+ if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
wake_up_process(thread);
return;
@@ -5025,25 +5053,43 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
sd->output_queue_tailp = &sd->output_queue;
local_irq_enable();
+ rcu_read_lock();
+
while (head) {
struct Qdisc *q = head;
spinlock_t *root_lock = NULL;
head = head->next_sched;
- if (!(q->flags & TCQ_F_NOLOCK)) {
- root_lock = qdisc_lock(q);
- spin_lock(root_lock);
- }
/* We need to make sure head->next_sched is read
* before clearing __QDISC_STATE_SCHED
*/
smp_mb__before_atomic();
+
+ if (!(q->flags & TCQ_F_NOLOCK)) {
+ root_lock = qdisc_lock(q);
+ spin_lock(root_lock);
+ } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state))) {
+ /* There is a synchronize_net() between
+ * STATE_DEACTIVATED flag being set and
+ * qdisc_reset()/some_qdisc_is_busy() in
+ * dev_deactivate(), so we can safely bail out
+ * early here to avoid data race between
+ * qdisc_deactivate() and some_qdisc_is_busy()
+ * for lockless qdisc.
+ */
+ clear_bit(__QDISC_STATE_SCHED, &q->state);
+ continue;
+ }
+
clear_bit(__QDISC_STATE_SCHED, &q->state);
qdisc_run(q);
if (root_lock)
spin_unlock(root_lock);
}
+
+ rcu_read_unlock();
}
xfrm_dev_backlog(sd);
@@ -5258,9 +5304,9 @@ another_round:
if (static_branch_unlikely(&generic_xdp_needed_key)) {
int ret2;
- preempt_disable();
+ migrate_disable();
ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
- preempt_enable();
+ migrate_enable();
if (ret2 != XDP_PASS) {
ret = NET_RX_DROP;
@@ -6501,11 +6547,18 @@ EXPORT_SYMBOL(napi_schedule_prep);
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
*
- * Variant of __napi_schedule() assuming hard irqs are masked
+ * Variant of __napi_schedule() assuming hard irqs are masked.
+ *
+ * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
+ * because the interrupt disabled assumption might not be true
+ * due to force-threaded interrupts and spinlock substitution.
*/
void __napi_schedule_irqoff(struct napi_struct *n)
{
- ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ else
+ __napi_schedule(n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 4eb969518ee0..8fdd04f00fd7 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -190,6 +190,80 @@ static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
return devlink_port_get_from_attrs(devlink, info->attrs);
}
+static inline bool
+devlink_rate_is_leaf(struct devlink_rate *devlink_rate)
+{
+ return devlink_rate->type == DEVLINK_RATE_TYPE_LEAF;
+}
+
+static inline bool
+devlink_rate_is_node(struct devlink_rate *devlink_rate)
+{
+ return devlink_rate->type == DEVLINK_RATE_TYPE_NODE;
+}
+
+static struct devlink_rate *
+devlink_rate_leaf_get_from_info(struct devlink *devlink, struct genl_info *info)
+{
+ struct devlink_rate *devlink_rate;
+ struct devlink_port *devlink_port;
+
+ devlink_port = devlink_port_get_from_attrs(devlink, info->attrs);
+ if (IS_ERR(devlink_port))
+ return ERR_CAST(devlink_port);
+ devlink_rate = devlink_port->devlink_rate;
+ return devlink_rate ?: ERR_PTR(-ENODEV);
+}
+
+static struct devlink_rate *
+devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name)
+{
+ static struct devlink_rate *devlink_rate;
+
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+ if (devlink_rate_is_node(devlink_rate) &&
+ !strcmp(node_name, devlink_rate->name))
+ return devlink_rate;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static struct devlink_rate *
+devlink_rate_node_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
+{
+ const char *rate_node_name;
+ size_t len;
+
+ if (!attrs[DEVLINK_ATTR_RATE_NODE_NAME])
+ return ERR_PTR(-EINVAL);
+ rate_node_name = nla_data(attrs[DEVLINK_ATTR_RATE_NODE_NAME]);
+ len = strlen(rate_node_name);
+ /* Name cannot be empty or decimal number */
+ if (!len || strspn(rate_node_name, "0123456789") == len)
+ return ERR_PTR(-EINVAL);
+
+ return devlink_rate_node_get_by_name(devlink, rate_node_name);
+}
+
+static struct devlink_rate *
+devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info)
+{
+ return devlink_rate_node_get_from_attrs(devlink, info->attrs);
+}
+
+static struct devlink_rate *
+devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
+{
+ struct nlattr **attrs = info->attrs;
+
+ if (attrs[DEVLINK_ATTR_PORT_INDEX])
+ return devlink_rate_leaf_get_from_info(devlink, info);
+ else if (attrs[DEVLINK_ATTR_RATE_NODE_NAME])
+ return devlink_rate_node_get_from_info(devlink, info);
+ else
+ return ERR_PTR(-EINVAL);
+}
+
struct devlink_sb {
struct list_head list;
unsigned int index;
@@ -408,12 +482,14 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
+#define DEVLINK_NL_FLAG_NEED_RATE BIT(2)
+#define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
/* The per devlink instance lock is taken by default in the pre-doit
* operation, yet several commands do not require this. The global
* devlink lock is taken and protects from disruption by user-calls.
*/
-#define DEVLINK_NL_FLAG_NO_LOCK BIT(2)
+#define DEVLINK_NL_FLAG_NO_LOCK BIT(4)
static int devlink_nl_pre_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
@@ -442,6 +518,24 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
devlink_port = devlink_port_get_from_info(devlink, info);
if (!IS_ERR(devlink_port))
info->user_ptr[1] = devlink_port;
+ } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE) {
+ struct devlink_rate *devlink_rate;
+
+ devlink_rate = devlink_rate_get_from_info(devlink, info);
+ if (IS_ERR(devlink_rate)) {
+ err = PTR_ERR(devlink_rate);
+ goto unlock;
+ }
+ info->user_ptr[1] = devlink_rate;
+ } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE_NODE) {
+ struct devlink_rate *rate_node;
+
+ rate_node = devlink_rate_node_get_from_info(devlink, info);
+ if (IS_ERR(rate_node)) {
+ err = PTR_ERR(rate_node);
+ goto unlock;
+ }
+ info->user_ptr[1] = rate_node;
}
return 0;
@@ -705,7 +799,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
case DEVLINK_PORT_FLAVOUR_CPU:
case DEVLINK_PORT_FLAVOUR_DSA:
- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
attrs->phys.port_number))
return -EMSGSIZE;
@@ -749,6 +842,56 @@ devlink_port_fn_hw_addr_fill(struct devlink *devlink, const struct devlink_ops *
return 0;
}
+static int devlink_nl_rate_fill(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_rate *devlink_rate,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags,
+ struct netlink_ext_ack *extack)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (nla_put_u16(msg, DEVLINK_ATTR_RATE_TYPE, devlink_rate->type))
+ goto nla_put_failure;
+
+ if (devlink_rate_is_leaf(devlink_rate)) {
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+ devlink_rate->devlink_port->index))
+ goto nla_put_failure;
+ } else if (devlink_rate_is_node(devlink_rate)) {
+ if (nla_put_string(msg, DEVLINK_ATTR_RATE_NODE_NAME,
+ devlink_rate->name))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_SHARE,
+ devlink_rate->tx_share, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_MAX,
+ devlink_rate->tx_max, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (devlink_rate->parent)
+ if (nla_put_string(msg, DEVLINK_ATTR_RATE_PARENT_NODE_NAME,
+ devlink_rate->parent->name))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
static bool
devlink_port_fn_state_valid(enum devlink_port_fn_state state)
{
@@ -920,6 +1063,111 @@ static void devlink_port_notify(struct devlink_port *devlink_port,
msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
+static void devlink_rate_notify(struct devlink_rate *devlink_rate,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = devlink_rate->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_RATE_NEW &&
+ cmd != DEVLINK_CMD_RATE_DEL);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_rate_fill(msg, devlink, devlink_rate,
+ cmd, 0, 0, 0, NULL);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_rate *devlink_rate;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err = 0;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+ enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
+ u32 id = NETLINK_CB(cb->skb).portid;
+
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_rate_fill(msg, devlink,
+ devlink_rate,
+ cmd, id,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, NULL);
+ if (err) {
+ mutex_unlock(&devlink->lock);
+ goto out;
+ }
+ idx++;
+ }
+ mutex_unlock(&devlink->lock);
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+ if (err != -EMSGSIZE)
+ return err;
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int devlink_nl_cmd_rate_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_rate *devlink_rate = info->user_ptr[1];
+ struct devlink *devlink = devlink_rate->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_rate_fill(msg, devlink, devlink_rate,
+ DEVLINK_CMD_RATE_NEW,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static bool
+devlink_rate_is_parent_node(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent)
+{
+ while (parent) {
+ if (parent == devlink_rate)
+ return true;
+ parent = parent->parent;
+ }
+ return false;
+}
+
static int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
@@ -1340,6 +1588,255 @@ static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb,
return devlink->ops->port_del(devlink, port_index, extack);
}
+static int
+devlink_nl_rate_parent_node_set(struct devlink_rate *devlink_rate,
+ struct genl_info *info,
+ struct nlattr *nla_parent)
+{
+ struct devlink *devlink = devlink_rate->devlink;
+ const char *parent_name = nla_data(nla_parent);
+ const struct devlink_ops *ops = devlink->ops;
+ size_t len = strlen(parent_name);
+ struct devlink_rate *parent;
+ int err = -EOPNOTSUPP;
+
+ parent = devlink_rate->parent;
+ if (parent && len) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Rate object already has parent.");
+ return -EBUSY;
+ } else if (parent && !len) {
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_parent_set(devlink_rate, NULL,
+ devlink_rate->priv, NULL,
+ info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_parent_set(devlink_rate, NULL,
+ devlink_rate->priv, NULL,
+ info->extack);
+ if (err)
+ return err;
+
+ refcount_dec(&parent->refcnt);
+ devlink_rate->parent = NULL;
+ } else if (!parent && len) {
+ parent = devlink_rate_node_get_by_name(devlink, parent_name);
+ if (IS_ERR(parent))
+ return -ENODEV;
+
+ if (parent == devlink_rate) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Parent to self is not allowed");
+ return -EINVAL;
+ }
+
+ if (devlink_rate_is_node(devlink_rate) &&
+ devlink_rate_is_parent_node(devlink_rate, parent->parent)) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Node is already a parent of parent node.");
+ return -EEXIST;
+ }
+
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_parent_set(devlink_rate, parent,
+ devlink_rate->priv, parent->priv,
+ info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_parent_set(devlink_rate, parent,
+ devlink_rate->priv, parent->priv,
+ info->extack);
+ if (err)
+ return err;
+
+ refcount_inc(&parent->refcnt);
+ devlink_rate->parent = parent;
+ }
+
+ return 0;
+}
+
+static int devlink_nl_rate_set(struct devlink_rate *devlink_rate,
+ const struct devlink_ops *ops,
+ struct genl_info *info)
+{
+ struct nlattr *nla_parent, **attrs = info->attrs;
+ int err = -EOPNOTSUPP;
+ u64 rate;
+
+ if (attrs[DEVLINK_ATTR_RATE_TX_SHARE]) {
+ rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_SHARE]);
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_tx_share_set(devlink_rate, devlink_rate->priv,
+ rate, info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_tx_share_set(devlink_rate, devlink_rate->priv,
+ rate, info->extack);
+ if (err)
+ return err;
+ devlink_rate->tx_share = rate;
+ }
+
+ if (attrs[DEVLINK_ATTR_RATE_TX_MAX]) {
+ rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_MAX]);
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_tx_max_set(devlink_rate, devlink_rate->priv,
+ rate, info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_tx_max_set(devlink_rate, devlink_rate->priv,
+ rate, info->extack);
+ if (err)
+ return err;
+ devlink_rate->tx_max = rate;
+ }
+
+ nla_parent = attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME];
+ if (nla_parent) {
+ err = devlink_nl_rate_parent_node_set(devlink_rate, info,
+ nla_parent);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
+ struct genl_info *info,
+ enum devlink_rate_type type)
+{
+ struct nlattr **attrs = info->attrs;
+
+ if (type == DEVLINK_RATE_TYPE_LEAF) {
+ if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_leaf_tx_share_set) {
+ NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the leafs");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_leaf_tx_max_set) {
+ NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the leafs");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
+ !ops->rate_leaf_parent_set) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the leafs");
+ return false;
+ }
+ } else if (type == DEVLINK_RATE_TYPE_NODE) {
+ if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_node_tx_share_set) {
+ NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the nodes");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_node_tx_max_set) {
+ NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the nodes");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
+ !ops->rate_node_parent_set) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the nodes");
+ return false;
+ }
+ } else {
+ WARN(1, "Unknown type of rate object");
+ return false;
+ }
+
+ return true;
+}
+
+static int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_rate *devlink_rate = info->user_ptr[1];
+ struct devlink *devlink = devlink_rate->devlink;
+ const struct devlink_ops *ops = devlink->ops;
+ int err;
+
+ if (!ops || !devlink_rate_set_ops_supported(ops, info, devlink_rate->type))
+ return -EOPNOTSUPP;
+
+ err = devlink_nl_rate_set(devlink_rate, ops, info);
+
+ if (!err)
+ devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
+ return err;
+}
+
+static int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_rate *rate_node;
+ const struct devlink_ops *ops;
+ int err;
+
+ ops = devlink->ops;
+ if (!ops || !ops->rate_node_new || !ops->rate_node_del) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Rate nodes aren't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!devlink_rate_set_ops_supported(ops, info, DEVLINK_RATE_TYPE_NODE))
+ return -EOPNOTSUPP;
+
+ rate_node = devlink_rate_node_get_from_attrs(devlink, info->attrs);
+ if (!IS_ERR(rate_node))
+ return -EEXIST;
+ else if (rate_node == ERR_PTR(-EINVAL))
+ return -EINVAL;
+
+ rate_node = kzalloc(sizeof(*rate_node), GFP_KERNEL);
+ if (!rate_node)
+ return -ENOMEM;
+
+ rate_node->devlink = devlink;
+ rate_node->type = DEVLINK_RATE_TYPE_NODE;
+ rate_node->name = nla_strdup(info->attrs[DEVLINK_ATTR_RATE_NODE_NAME], GFP_KERNEL);
+ if (!rate_node->name) {
+ err = -ENOMEM;
+ goto err_strdup;
+ }
+
+ err = ops->rate_node_new(rate_node, &rate_node->priv, info->extack);
+ if (err)
+ goto err_node_new;
+
+ err = devlink_nl_rate_set(rate_node, ops, info);
+ if (err)
+ goto err_rate_set;
+
+ refcount_set(&rate_node->refcnt, 1);
+ list_add(&rate_node->list, &devlink->rate_list);
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
+ return 0;
+
+err_rate_set:
+ ops->rate_node_del(rate_node, rate_node->priv, info->extack);
+err_node_new:
+ kfree(rate_node->name);
+err_strdup:
+ kfree(rate_node);
+ return err;
+}
+
+static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_rate *rate_node = info->user_ptr[1];
+ struct devlink *devlink = rate_node->devlink;
+ const struct devlink_ops *ops = devlink->ops;
+ int err;
+
+ if (refcount_read(&rate_node->refcnt) > 1) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Node has children. Cannot delete node.");
+ return -EBUSY;
+ }
+
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
+ err = ops->rate_node_del(rate_node, rate_node->priv, info->extack);
+ if (rate_node->parent)
+ refcount_dec(&rate_node->parent->refcnt);
+ list_del(&rate_node->list);
+ kfree(rate_node->name);
+ kfree(rate_node);
+ return err;
+}
+
static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
struct devlink_sb *devlink_sb,
enum devlink_command cmd, u32 portid,
@@ -2208,6 +2705,23 @@ static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
+static int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink_rate *devlink_rate;
+
+ /* Take the lock to sync with devlink_rate_nodes_destroy() */
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list)
+ if (devlink_rate_is_node(devlink_rate)) {
+ mutex_unlock(&devlink->lock);
+ NL_SET_ERR_MSG_MOD(extack, "Rate node(s) exists.");
+ return -EBUSY;
+ }
+ mutex_unlock(&devlink->lock);
+ return 0;
+}
+
static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
@@ -2222,6 +2736,9 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
if (!ops->eswitch_mode_set)
return -EOPNOTSUPP;
mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
+ err = devlink_rate_nodes_check(devlink, mode, info->extack);
+ if (err)
+ return err;
err = ops->eswitch_mode_set(devlink, mode, info->extack);
if (err)
return err;
@@ -6995,8 +7512,9 @@ static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
}
}
-static int devlink_trap_stats_put(struct sk_buff *msg,
- struct devlink_stats __percpu *trap_stats)
+static int
+devlink_trap_group_stats_put(struct sk_buff *msg,
+ struct devlink_stats __percpu *trap_stats)
{
struct devlink_stats stats;
struct nlattr *attr;
@@ -7024,6 +7542,50 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int devlink_trap_stats_put(struct sk_buff *msg, struct devlink *devlink,
+ const struct devlink_trap_item *trap_item)
+{
+ struct devlink_stats stats;
+ struct nlattr *attr;
+ u64 drops = 0;
+ int err;
+
+ if (devlink->ops->trap_drop_counter_get) {
+ err = devlink->ops->trap_drop_counter_get(devlink,
+ trap_item->trap,
+ &drops);
+ if (err)
+ return err;
+ }
+
+ devlink_trap_stats_read(trap_item->stats, &stats);
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
+ if (!attr)
+ return -EMSGSIZE;
+
+ if (devlink->ops->trap_drop_counter_get &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
+ stats.rx_packets, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
+ stats.rx_bytes, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, attr);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, attr);
+ return -EMSGSIZE;
+}
+
static int devlink_nl_trap_fill(struct sk_buff *msg, struct devlink *devlink,
const struct devlink_trap_item *trap_item,
enum devlink_command cmd, u32 portid, u32 seq,
@@ -7061,7 +7623,7 @@ static int devlink_nl_trap_fill(struct sk_buff *msg, struct devlink *devlink,
if (err)
goto nla_put_failure;
- err = devlink_trap_stats_put(msg, trap_item->stats);
+ err = devlink_trap_stats_put(msg, devlink, trap_item);
if (err)
goto nla_put_failure;
@@ -7278,7 +7840,7 @@ devlink_nl_trap_group_fill(struct sk_buff *msg, struct devlink *devlink,
group_item->policer_item->policer->id))
goto nla_put_failure;
- err = devlink_trap_stats_put(msg, group_item->stats);
+ err = devlink_trap_group_stats_put(msg, group_item->stats);
if (err)
goto nla_put_failure;
@@ -7802,6 +8364,11 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16 },
[DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32 },
[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_RATE_TYPE] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
};
static const struct genl_small_ops devlink_nl_ops[] = {
@@ -7828,6 +8395,30 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
+ .cmd = DEVLINK_CMD_RATE_GET,
+ .doit = devlink_nl_cmd_rate_get_doit,
+ .dumpit = devlink_nl_cmd_rate_get_dumpit,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_SET,
+ .doit = devlink_nl_cmd_rate_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_NEW,
+ .doit = devlink_nl_cmd_rate_new_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_DEL,
+ .doit = devlink_nl_cmd_rate_del_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_RATE_NODE,
+ },
+ {
.cmd = DEVLINK_CMD_PORT_SPLIT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_split_doit,
@@ -8202,6 +8793,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
__devlink_net_set(devlink, &init_net);
INIT_LIST_HEAD(&devlink->port_list);
+ INIT_LIST_HEAD(&devlink->rate_list);
INIT_LIST_HEAD(&devlink->sb_list);
INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
INIT_LIST_HEAD(&devlink->resource_list);
@@ -8304,6 +8896,7 @@ void devlink_free(struct devlink *devlink)
WARN_ON(!list_empty(&devlink->resource_list));
WARN_ON(!list_empty(&devlink->dpipe_table_list));
WARN_ON(!list_empty(&devlink->sb_list));
+ WARN_ON(!list_empty(&devlink->rate_list));
WARN_ON(!list_empty(&devlink->port_list));
xa_destroy(&devlink->snapshot_ids);
@@ -8620,6 +9213,110 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro
}
EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
+/**
+ * devlink_rate_leaf_create - create devlink rate leaf
+ *
+ * @devlink_port: devlink port object to create rate object on
+ * @priv: driver private data
+ *
+ * Create devlink rate object of type leaf on provided @devlink_port.
+ * Throws call trace if @devlink_port already has a devlink rate object.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ *
+ * Return: -ENOMEM if failed to allocate rate object, 0 otherwise.
+ */
+int
+devlink_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
+{
+ struct devlink *devlink = devlink_port->devlink;
+ struct devlink_rate *devlink_rate;
+
+ devlink_rate = kzalloc(sizeof(*devlink_rate), GFP_KERNEL);
+ if (!devlink_rate)
+ return -ENOMEM;
+
+ mutex_lock(&devlink->lock);
+ WARN_ON(devlink_port->devlink_rate);
+ devlink_rate->type = DEVLINK_RATE_TYPE_LEAF;
+ devlink_rate->devlink = devlink;
+ devlink_rate->devlink_port = devlink_port;
+ devlink_rate->priv = priv;
+ list_add_tail(&devlink_rate->list, &devlink->rate_list);
+ devlink_port->devlink_rate = devlink_rate;
+ devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
+ mutex_unlock(&devlink->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_rate_leaf_create);
+
+/**
+ * devlink_rate_leaf_destroy - destroy devlink rate leaf
+ *
+ * @devlink_port: devlink port linked to the rate object
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_rate_leaf_destroy(struct devlink_port *devlink_port)
+{
+ struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
+ struct devlink *devlink = devlink_port->devlink;
+
+ if (!devlink_rate)
+ return;
+
+ mutex_lock(&devlink->lock);
+ devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
+ if (devlink_rate->parent)
+ refcount_dec(&devlink_rate->parent->refcnt);
+ list_del(&devlink_rate->list);
+ devlink_port->devlink_rate = NULL;
+ mutex_unlock(&devlink->lock);
+ kfree(devlink_rate);
+}
+EXPORT_SYMBOL_GPL(devlink_rate_leaf_destroy);
+
+/**
+ * devlink_rate_nodes_destroy - destroy all devlink rate nodes on device
+ *
+ * @devlink: devlink instance
+ *
+ * Unset parent for all rate objects and destroy all rate nodes
+ * on specified device.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_rate_nodes_destroy(struct devlink *devlink)
+{
+ static struct devlink_rate *devlink_rate, *tmp;
+ const struct devlink_ops *ops = devlink->ops;
+
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+ if (!devlink_rate->parent)
+ continue;
+
+ refcount_dec(&devlink_rate->parent->refcnt);
+ if (devlink_rate_is_leaf(devlink_rate))
+ ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
+ NULL, NULL);
+ else if (devlink_rate_is_node(devlink_rate))
+ ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
+ NULL, NULL);
+ }
+ list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
+ if (devlink_rate_is_node(devlink_rate)) {
+ ops->rate_node_del(devlink_rate, devlink_rate->priv, NULL);
+ list_del(&devlink_rate->list);
+ kfree(devlink_rate->name);
+ kfree(devlink_rate);
+ }
+ }
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_rate_nodes_destroy);
+
static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
char *name, size_t len)
{
@@ -8632,12 +9329,17 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
switch (attrs->flavour) {
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ n = snprintf(name, len, "p%u", attrs->phys.port_number);
+ if (n < len && attrs->split)
+ n += snprintf(name + n, len - n, "s%u",
+ attrs->phys.split_subport_number);
if (!attrs->split)
n = snprintf(name, len, "p%u", attrs->phys.port_number);
else
n = snprintf(name, len, "p%us%u",
attrs->phys.port_number,
attrs->phys.split_subport_number);
+
break;
case DEVLINK_PORT_FLAVOUR_CPU:
case DEVLINK_PORT_FLAVOUR_DSA:
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index cd80ffed6d26..a9f937975080 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -1168,7 +1168,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
{
struct net *net;
struct sk_buff *skb;
- int err = -ENOBUFS;
+ int err = -ENOMEM;
net = ops->fro_net;
skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
diff --git a/net/core/filter.c b/net/core/filter.c
index cae56d08a670..d70187ce851b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -17,6 +17,7 @@
* Kris Katterjohn - Added many additional checks in bpf_check_classic()
*/
+#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -41,7 +42,6 @@
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
-#include <asm/cmpxchg.h>
#include <linux/filter.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
@@ -3241,9 +3241,6 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
- return -ENOTSUPP;
-
ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0))
return ret;
@@ -3255,19 +3252,11 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* SKB_GSO_TCPV4 needs to be changed into
- * SKB_GSO_TCPV6.
- */
+ /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */
if (shinfo->gso_type & SKB_GSO_TCPV4) {
shinfo->gso_type &= ~SKB_GSO_TCPV4;
shinfo->gso_type |= SKB_GSO_TCPV6;
}
-
- /* Due to IPv6 header, MSS needs to be downgraded. */
- skb_decrease_gso_size(shinfo, len_diff);
- /* Header must be checked, and gso_segs recomputed. */
- shinfo->gso_type |= SKB_GSO_DODGY;
- shinfo->gso_segs = 0;
}
skb->protocol = htons(ETH_P_IPV6);
@@ -3282,9 +3271,6 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
- return -ENOTSUPP;
-
ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0))
return ret;
@@ -3296,19 +3282,11 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* SKB_GSO_TCPV6 needs to be changed into
- * SKB_GSO_TCPV4.
- */
+ /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */
if (shinfo->gso_type & SKB_GSO_TCPV6) {
shinfo->gso_type &= ~SKB_GSO_TCPV6;
shinfo->gso_type |= SKB_GSO_TCPV4;
}
-
- /* Due to IPv4 header, MSS can be upgraded. */
- skb_increase_gso_size(shinfo, len_diff);
- /* Header must be checked, and gso_segs recomputed. */
- shinfo->gso_type |= SKB_GSO_DODGY;
- shinfo->gso_segs = 0;
}
skb->protocol = htons(ETH_P_IP);
@@ -3784,6 +3762,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
__skb_push(skb, head_room);
memset(skb->data, 0, head_room);
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
}
return ret;
@@ -3918,6 +3897,34 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
.arg2_type = ARG_ANYTHING,
};
+/* XDP_REDIRECT works by a three-step process, implemented in the functions
+ * below:
+ *
+ * 1. The bpf_redirect() and bpf_redirect_map() helpers will lookup the target
+ * of the redirect and store it (along with some other metadata) in a per-CPU
+ * struct bpf_redirect_info.
+ *
+ * 2. When the program returns the XDP_REDIRECT return code, the driver will
+ * call xdp_do_redirect() which will use the information in struct
+ * bpf_redirect_info to actually enqueue the frame into a map type-specific
+ * bulk queue structure.
+ *
+ * 3. Before exiting its NAPI poll loop, the driver will call xdp_do_flush(),
+ * which will flush all the different bulk queues, thus completing the
+ * redirect.
+ *
+ * Pointers to the map entries will be kept around for this whole sequence of
+ * steps, protected by RCU. However, there is no top-level rcu_read_lock() in
+ * the core code; instead, the RCU protection relies on everything happening
+ * inside a single NAPI poll sequence, which means it's between a pair of calls
+ * to local_bh_disable()/local_bh_enable().
+ *
+ * The map entries are marked as __rcu and the map code makes sure to
+ * dereference those pointers with rcu_dereference_check() in a way that works
+ * for both sections that to hold an rcu_read_lock() and sections that are
+ * called from NAPI without a separate rcu_read_lock(). The code below does not
+ * use RCU annotations, but relies on those in the map code.
+ */
void xdp_do_flush(void)
{
__dev_flush();
@@ -3926,6 +3933,23 @@ void xdp_do_flush(void)
}
EXPORT_SYMBOL_GPL(xdp_do_flush);
+void bpf_clear_redirect_map(struct bpf_map *map)
+{
+ struct bpf_redirect_info *ri;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ ri = per_cpu_ptr(&bpf_redirect_info, cpu);
+ /* Avoid polluting remote cacheline due to writes if
+ * not needed. Once we pass this test, we need the
+ * cmpxchg() to make sure it hasn't been changed in
+ * the meantime by remote CPU.
+ */
+ if (unlikely(READ_ONCE(ri->map) == map))
+ cmpxchg(&ri->map, map, NULL);
+ }
+}
+
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
@@ -3933,6 +3957,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
+ struct bpf_map *map;
int err;
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
@@ -3942,7 +3967,14 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
- err = dev_map_enqueue(fwd, xdp, dev);
+ map = READ_ONCE(ri->map);
+ if (unlikely(map)) {
+ WRITE_ONCE(ri->map, NULL);
+ err = dev_map_enqueue_multi(xdp, dev, map,
+ ri->flags & BPF_F_EXCLUDE_INGRESS);
+ } else {
+ err = dev_map_enqueue(fwd, xdp, dev);
+ }
break;
case BPF_MAP_TYPE_CPUMAP:
err = cpu_map_enqueue(fwd, xdp, dev);
@@ -3984,13 +4016,21 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
enum bpf_map_type map_type, u32 map_id)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_map *map;
int err;
switch (map_type) {
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
- err = dev_map_generic_redirect(fwd, skb, xdp_prog);
+ map = READ_ONCE(ri->map);
+ if (unlikely(map)) {
+ WRITE_ONCE(ri->map, NULL);
+ err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
+ ri->flags & BPF_F_EXCLUDE_INGRESS);
+ } else {
+ err = dev_map_generic_redirect(fwd, skb, xdp_prog);
+ }
if (unlikely(err))
goto err;
break;
@@ -10007,11 +10047,13 @@ out:
static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
struct sock_reuseport *reuse,
struct sock *sk, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash)
{
reuse_kern->skb = skb;
reuse_kern->sk = sk;
reuse_kern->selected_sk = NULL;
+ reuse_kern->migrating_sk = migrating_sk;
reuse_kern->data_end = skb->data + skb_headlen(skb);
reuse_kern->hash = hash;
reuse_kern->reuseport_id = reuse->reuseport_id;
@@ -10020,12 +10062,13 @@ static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash)
{
struct sk_reuseport_kern reuse_kern;
enum sk_action action;
- bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
+ bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, migrating_sk, hash);
action = BPF_PROG_RUN(prog, &reuse_kern);
if (action == SK_PASS)
@@ -10135,6 +10178,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
return &sk_reuseport_load_bytes_proto;
case BPF_FUNC_skb_load_bytes_relative:
return &sk_reuseport_load_bytes_relative_proto;
+ case BPF_FUNC_get_socket_cookie:
+ return &bpf_get_socket_ptr_cookie_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -10164,6 +10209,14 @@ sk_reuseport_is_valid_access(int off, int size,
case offsetof(struct sk_reuseport_md, hash):
return size == size_default;
+ case offsetof(struct sk_reuseport_md, sk):
+ info->reg_type = PTR_TO_SOCKET;
+ return size == sizeof(__u64);
+
+ case offsetof(struct sk_reuseport_md, migrating_sk):
+ info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
+ return size == sizeof(__u64);
+
/* Fields that allow narrowing */
case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
if (size < sizeof_field(struct sk_buff, protocol))
@@ -10236,6 +10289,14 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct sk_reuseport_md, bind_inany):
SK_REUSEPORT_LOAD_FIELD(bind_inany);
break;
+
+ case offsetof(struct sk_reuseport_md, sk):
+ SK_REUSEPORT_LOAD_FIELD(sk);
+ break;
+
+ case offsetof(struct sk_reuseport_md, migrating_sk):
+ SK_REUSEPORT_LOAD_FIELD(migrating_sk);
+ break;
}
return insn - insn_buf;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 3ed7c98a98e1..2aadbfc5193b 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -943,8 +943,8 @@ bool __skb_flow_dissect(const struct net *net,
int offset = 0;
ops = skb->dev->dsa_ptr->tag_ops;
- /* Tail taggers don't break flow dissection */
- if (!ops->tail_tag) {
+ /* Only DSA header taggers break flow dissection */
+ if (ops->needed_headroom) {
if (ops->flow_dissect)
ops->flow_dissect(skb, &proto, &offset);
else
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 98f20efbfadf..53e85c70c6e5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
write_lock(&n->lock);
if ((n->nud_state == NUD_FAILED) ||
+ (n->nud_state == NUD_NOARP) ||
(tbl->is_multicast &&
tbl->is_multicast(n->primary_key)) ||
time_after(tref, n->updated))
@@ -3141,7 +3142,7 @@ static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
struct net *net = seq_file_net(seq);
struct neigh_table *tbl = state->tbl;
struct pneigh_entry *pn = NULL;
- int bucket = state->bucket;
+ int bucket;
state->flags |= NEIGH_SEQ_IS_PNEIGH;
for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index 283ddb2dbc7d..c40cd8dd75c7 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -60,3 +60,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset);
+EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_bad_csum);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 43b6ac4c4439..9b5a767eddd5 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -641,6 +641,18 @@ void __put_net(struct net *net)
}
EXPORT_SYMBOL_GPL(__put_net);
+/**
+ * get_net_ns - increment the refcount of the network namespace
+ * @ns: common namespace (net)
+ *
+ * Returns the net's common namespace.
+ */
+struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return &get_net(container_of(ns, struct net, ns))->ns;
+}
+EXPORT_SYMBOL_GPL(get_net_ns);
+
struct net *get_net_ns_by_fd(int fd)
{
struct file *file;
@@ -660,14 +672,8 @@ struct net *get_net_ns_by_fd(int fd)
fput(file);
return net;
}
-
-#else
-struct net *get_net_ns_by_fd(int fd)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
+#endif
struct net *get_net_ns_by_pid(pid_t pid)
{
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c310c7c1cef7..0a6b04714558 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -36,6 +36,7 @@
#include <net/ip6_checksum.h>
#include <asm/unaligned.h>
#include <trace/events/napi.h>
+#include <linux/kconfig.h>
/*
* We maintain a small pool of fully-sized skbs, to make sure the
@@ -389,7 +390,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
static atomic_t ip_ident;
struct ipv6hdr *ip6h;
- WARN_ON_ONCE(!irqs_disabled());
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ WARN_ON_ONCE(!irqs_disabled());
udp_len = len + sizeof(*udph);
if (np->ipv6)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 3c4c4c7a0402..5e4eb45b139c 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
#include <linux/mm.h> /* for __put_page() */
+#include <linux/poison.h>
#include <trace/events/page_pool.h>
@@ -221,6 +222,8 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
return NULL;
}
+ page->pp_magic |= PP_SIGNATURE;
+
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
@@ -263,6 +266,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page);
continue;
}
+ page->pp_magic |= PP_SIGNATURE;
pool->alloc.cache[pool->alloc.count++] = page;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@@ -341,6 +345,8 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
DMA_ATTR_SKIP_CPU_SYNC);
page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
+ page->pp_magic = 0;
+
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
*/
@@ -622,3 +628,25 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
}
}
EXPORT_SYMBOL(page_pool_update_nid);
+
+bool page_pool_return_skb_page(struct page *page)
+{
+ struct page_pool *pp;
+
+ page = compound_head(page);
+ if (unlikely(page->pp_magic != PP_SIGNATURE))
+ return false;
+
+ pp = page->pp;
+
+ /* Driver set this to memory recycling info. Reset it on recycle.
+ * This will *not* work for NIC using a split-page memory model.
+ * The page will be returned to the pool here regardless of the
+ * 'flipped' fragment being in use or not.
+ */
+ page->pp = NULL;
+ page_pool_put_full_page(pp, page, false);
+
+ return true;
+}
+EXPORT_SYMBOL(page_pool_return_skb_page);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 3fba429f1f57..7e258d255e90 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -467,7 +467,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
static void pktgen_run_all_threads(struct pktgen_net *pn);
static void pktgen_reset_all_threads(struct pktgen_net *pn);
-static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn);
+static void pktgen_stop_all_threads(struct pktgen_net *pn);
static void pktgen_stop(struct pktgen_thread *t);
static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
@@ -516,14 +516,11 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
data[count - 1] = 0; /* Strip trailing '\n' and terminate string */
if (!strcmp(data, "stop"))
- pktgen_stop_all_threads_ifs(pn);
-
+ pktgen_stop_all_threads(pn);
else if (!strcmp(data, "start"))
pktgen_run_all_threads(pn);
-
else if (!strcmp(data, "reset"))
pktgen_reset_all_threads(pn);
-
else
return -EINVAL;
@@ -3027,20 +3024,25 @@ static void pktgen_run(struct pktgen_thread *t)
t->control &= ~(T_STOP);
}
-static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn)
+static void pktgen_handle_all_threads(struct pktgen_net *pn, u32 flags)
{
struct pktgen_thread *t;
- func_enter();
-
mutex_lock(&pktgen_thread_lock);
list_for_each_entry(t, &pn->pktgen_threads, th_list)
- t->control |= T_STOP;
+ t->control |= (flags);
mutex_unlock(&pktgen_thread_lock);
}
+static void pktgen_stop_all_threads(struct pktgen_net *pn)
+{
+ func_enter();
+
+ pktgen_handle_all_threads(pn, T_STOP);
+}
+
static int thread_is_running(const struct pktgen_thread *t)
{
const struct pktgen_dev *pkt_dev;
@@ -3103,16 +3105,9 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
static void pktgen_run_all_threads(struct pktgen_net *pn)
{
- struct pktgen_thread *t;
-
func_enter();
- mutex_lock(&pktgen_thread_lock);
-
- list_for_each_entry(t, &pn->pktgen_threads, th_list)
- t->control |= (T_RUN);
-
- mutex_unlock(&pktgen_thread_lock);
+ pktgen_handle_all_threads(pn, T_RUN);
/* Propagate thread->control */
schedule_timeout_interruptible(msecs_to_jiffies(125));
@@ -3122,16 +3117,9 @@ static void pktgen_run_all_threads(struct pktgen_net *pn)
static void pktgen_reset_all_threads(struct pktgen_net *pn)
{
- struct pktgen_thread *t;
-
func_enter();
- mutex_lock(&pktgen_thread_lock);
-
- list_for_each_entry(t, &pn->pktgen_threads, th_list)
- t->control |= (T_REMDEVALL);
-
- mutex_unlock(&pktgen_thread_lock);
+ pktgen_handle_all_threads(pn, T_REMDEVALL);
/* Propagate thread->control */
schedule_timeout_interruptible(msecs_to_jiffies(125));
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 714d5fa38546..f6af3e74fc44 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -9,7 +9,7 @@
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Fixes:
- * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
+ * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
*/
#include <linux/bitops.h>
@@ -234,7 +234,7 @@ unlock:
* @msgtype: rtnetlink message type
* @doit: Function pointer called for each request message
* @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
- * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
+ * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
*
* Like rtnl_register, but for use by removable modules.
*/
@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(rtnl_register_module);
* @msgtype: rtnetlink message type
* @doit: Function pointer called for each request message
* @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
- * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
+ * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
*
* Registers the specified function pointers (at least one of them has
* to be non-NULL) to be called whenever a request message for the
@@ -376,12 +376,12 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
if (rtnl_link_ops_get(ops->kind))
return -EEXIST;
- /* The check for setup is here because if ops
+ /* The check for alloc/setup is here because if ops
* does not have that filled up, it is not possible
* to use the ops for creating device. So do not
* fill up dellink as well. That disables rtnl_dellink.
*/
- if (ops->setup && !ops->dellink)
+ if ((ops->alloc || ops->setup) && !ops->dellink)
ops->dellink = unregister_netdevice_queue;
list_add_tail(&ops->list, &link_ops);
@@ -543,7 +543,9 @@ static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
{
const struct rtnl_af_ops *ops;
- list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
+ ASSERT_RTNL();
+
+ list_for_each_entry(ops, &rtnl_af_ops, list) {
if (ops->family == family)
return ops;
}
@@ -1819,6 +1821,16 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
if (rtnl_fill_prop_list(skb, dev))
goto nla_put_failure;
+ if (dev->dev.parent &&
+ nla_put_string(skb, IFLA_PARENT_DEV_NAME,
+ dev_name(dev->dev.parent)))
+ goto nla_put_failure;
+
+ if (dev->dev.parent && dev->dev.parent->bus &&
+ nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
+ dev->dev.parent->bus->name))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -1878,6 +1890,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
[IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
[IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
+ [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -2274,27 +2287,18 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
const struct rtnl_af_ops *af_ops;
- rcu_read_lock();
af_ops = rtnl_af_lookup(nla_type(af));
- if (!af_ops) {
- rcu_read_unlock();
+ if (!af_ops)
return -EAFNOSUPPORT;
- }
- if (!af_ops->set_link_af) {
- rcu_read_unlock();
+ if (!af_ops->set_link_af)
return -EOPNOTSUPP;
- }
if (af_ops->validate_link_af) {
err = af_ops->validate_link_af(dev, af);
- if (err < 0) {
- rcu_read_unlock();
+ if (err < 0)
return err;
- }
}
-
- rcu_read_unlock();
}
}
@@ -2574,7 +2578,7 @@ static int do_set_proto_down(struct net_device *dev,
if (nl_proto_down) {
proto_down = nla_get_u8(nl_proto_down);
- /* Dont turn off protodown if there are active reasons */
+ /* Don't turn off protodown if there are active reasons */
if (!proto_down && dev->proto_down_reason) {
NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
return -EBUSY;
@@ -2868,17 +2872,12 @@ static int do_setlink(const struct sk_buff *skb,
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
const struct rtnl_af_ops *af_ops;
- rcu_read_lock();
-
BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
err = af_ops->set_link_af(dev, af, extack);
- if (err < 0) {
- rcu_read_unlock();
+ if (err < 0)
goto errout;
- }
- rcu_read_unlock();
status |= DO_SETLINK_NOTIFY;
}
}
@@ -3177,8 +3176,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
return ERR_PTR(-EINVAL);
}
- dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
- ops->setup, num_tx_queues, num_rx_queues);
+ if (ops->alloc) {
+ dev = ops->alloc(tb, ifname, name_assign_type,
+ num_tx_queues, num_rx_queues);
+ if (IS_ERR(dev))
+ return dev;
+ } else {
+ dev = alloc_netdev_mqs(ops->priv_size, ifname,
+ name_assign_type, ops->setup,
+ num_tx_queues, num_rx_queues);
+ }
+
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -3411,7 +3419,7 @@ replay:
return -EOPNOTSUPP;
}
- if (!ops->setup)
+ if (!ops->alloc && !ops->setup)
return -EOPNOTSUPP;
if (!ifname[0]) {
@@ -3939,12 +3947,12 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm,
* implement its own handler for this.
*/
if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- pr_info("%s: FDB only supports static addresses\n", dev->name);
+ netdev_info(dev, "default FDB implementation only supports local addresses\n");
return err;
}
if (vid) {
- pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
+ netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
return err;
}
@@ -4078,7 +4086,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
* implement its own handler for this.
*/
if (!(ndm->ndm_state & NUD_PERMANENT)) {
- pr_info("%s: FDB only supports static addresses\n", dev->name);
+ netdev_info(dev, "default FDB implementation only supports local addresses\n");
return err;
}
@@ -4842,6 +4850,10 @@ static int rtnl_bridge_notify(struct net_device *dev)
if (err < 0)
goto errout;
+ /* Notification info is only filled for bridge ports, not the bridge
+ * device itself. Therefore, a zero notification length is valid and
+ * should not result in an error.
+ */
if (!skb->len)
goto errout;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ad22870298c..12aabcda6db2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -70,6 +70,7 @@
#include <net/xfrm.h>
#include <net/mpls.h>
#include <net/mptcp.h>
+#include <net/page_pool.h>
#include <linux/uaccess.h>
#include <trace/events/skb.h>
@@ -645,10 +646,13 @@ static void skb_free_head(struct sk_buff *skb)
{
unsigned char *head = skb->head;
- if (skb->head_frag)
+ if (skb->head_frag) {
+ if (skb_pp_recycle(skb, head))
+ return;
skb_free_frag(head);
- else
+ } else {
kfree(head);
+ }
}
static void skb_release_data(struct sk_buff *skb)
@@ -664,7 +668,7 @@ static void skb_release_data(struct sk_buff *skb)
skb_zcopy_clear(skb, true);
for (i = 0; i < shinfo->nr_frags; i++)
- __skb_frag_unref(&shinfo->frags[i]);
+ __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
if (shinfo->frag_list)
kfree_skb_list(shinfo->frag_list);
@@ -1046,6 +1050,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
n->nohdr = 0;
n->peeked = 0;
C(pfmemalloc);
+ C(pp_recycle);
n->destructor = NULL;
C(tail);
C(end);
@@ -1253,6 +1258,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
struct sock *sk = skb->sk;
struct sk_buff_head *q;
unsigned long flags;
+ bool is_zerocopy;
u32 lo, hi;
u16 len;
@@ -1267,6 +1273,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
len = uarg->len;
lo = uarg->id;
hi = uarg->id + len - 1;
+ is_zerocopy = uarg->zerocopy;
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
@@ -1274,7 +1281,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
serr->ee.ee_data = hi;
serr->ee.ee_info = lo;
- if (!uarg->zerocopy)
+ if (!is_zerocopy)
serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
q = &sk->sk_error_queue;
@@ -1287,7 +1294,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
}
spin_unlock_irqrestore(&q->lock, flags);
- sk->sk_error_report(sk);
+ sk_error_report(sk);
release:
consume_skb(skb);
@@ -3495,7 +3502,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragto = &skb_shinfo(tgt)->frags[merge];
skb_frag_size_add(fragto, skb_frag_size(fragfrom));
- __skb_frag_unref(fragfrom);
+ __skb_frag_unref(fragfrom, skb->pp_recycle);
}
/* Reposition in the original skb */
@@ -4678,7 +4685,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
@@ -4709,7 +4716,7 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
sk->sk_err = 0;
if (skb_next)
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return skb;
}
@@ -5285,6 +5292,13 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
if (skb_cloned(to))
return false;
+ /* The page pool signature of struct page will eventually figure out
+ * which pages can be recycled or not but for now let's prohibit slab
+ * allocated and page_pool allocated SKBs from being coalesced.
+ */
+ if (to->pp_recycle != from->pp_recycle)
+ return false;
+
if (len <= skb_tailroom(to)) {
if (len)
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 43ce17a6a585..9b6160a191f8 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -399,29 +399,6 @@ out:
}
EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
-int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
- long timeo, int *err)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int ret = 0;
-
- if (sk->sk_shutdown & RCV_SHUTDOWN)
- return 1;
-
- if (!timeo)
- return ret;
-
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- ret = sk_wait_event(sk, &timeo,
- !list_empty(&psock->ingress_msg) ||
- !skb_queue_empty(&sk->sk_receive_queue), &wait);
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
- return ret;
-}
-EXPORT_SYMBOL_GPL(sk_msg_wait_data);
-
/* Receive sk_msg from psock->ingress_msg to @msg. */
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags)
@@ -601,6 +578,12 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
return sk_psock_skb_ingress(psock, skb);
}
+static void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ kfree_skb(skb);
+}
+
static void sk_psock_backlog(struct work_struct *work)
{
struct sk_psock *psock = container_of(work, struct sk_psock, work);
@@ -640,7 +623,7 @@ start:
/* Hard errors break pipe and stop xmit. */
sk_psock_report_error(psock, ret ? -ret : EPIPE);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
- kfree_skb(skb);
+ sock_drop(psock->sk, skb);
goto end;
}
off += ret;
@@ -731,7 +714,7 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
skb_bpf_redirect_clear(skb);
- kfree_skb(skb);
+ sock_drop(psock->sk, skb);
}
__sk_psock_purge_ingress_msg(psock);
}
@@ -847,7 +830,7 @@ out:
}
EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
-static void sk_psock_skb_redirect(struct sk_buff *skb)
+static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
{
struct sk_psock *psock_other;
struct sock *sk_other;
@@ -857,8 +840,8 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
* return code, but then didn't set a redirect interface.
*/
if (unlikely(!sk_other)) {
- kfree_skb(skb);
- return;
+ sock_drop(from->sk, skb);
+ return -EIO;
}
psock_other = sk_psock(sk_other);
/* This error indicates the socket is being torn down or had another
@@ -866,26 +849,30 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
* a socket that is in this state so we drop the skb.
*/
if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
- kfree_skb(skb);
- return;
+ skb_bpf_redirect_clear(skb);
+ sock_drop(from->sk, skb);
+ return -EIO;
}
spin_lock_bh(&psock_other->ingress_lock);
if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
spin_unlock_bh(&psock_other->ingress_lock);
- kfree_skb(skb);
- return;
+ skb_bpf_redirect_clear(skb);
+ sock_drop(from->sk, skb);
+ return -EIO;
}
skb_queue_tail(&psock_other->ingress_skb, skb);
schedule_work(&psock_other->work);
spin_unlock_bh(&psock_other->ingress_lock);
+ return 0;
}
-static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
+static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
+ struct sk_psock *from, int verdict)
{
switch (verdict) {
case __SK_REDIRECT:
- sk_psock_skb_redirect(skb);
+ sk_psock_skb_redirect(from, skb);
break;
case __SK_PASS:
case __SK_DROP:
@@ -909,20 +896,21 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
skb->sk = NULL;
}
- sk_psock_tls_verdict_apply(skb, psock->sk, ret);
+ sk_psock_tls_verdict_apply(skb, psock, ret);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
-static void sk_psock_verdict_apply(struct sk_psock *psock,
- struct sk_buff *skb, int verdict)
+static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
+ int verdict)
{
struct sock *sk_other;
- int err = -EIO;
+ int err = 0;
switch (verdict) {
case __SK_PASS:
+ err = -EIO;
sk_other = psock->sk;
if (sock_flag(sk_other, SOCK_DEAD) ||
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
@@ -945,18 +933,25 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
skb_queue_tail(&psock->ingress_skb, skb);
schedule_work(&psock->work);
+ err = 0;
}
spin_unlock_bh(&psock->ingress_lock);
+ if (err < 0) {
+ skb_bpf_redirect_clear(skb);
+ goto out_free;
+ }
}
break;
case __SK_REDIRECT:
- sk_psock_skb_redirect(skb);
+ err = sk_psock_skb_redirect(psock, skb);
break;
case __SK_DROP:
default:
out_free:
- kfree_skb(skb);
+ sock_drop(psock->sk, skb);
}
+
+ return err;
}
static void sk_psock_write_space(struct sock *sk)
@@ -988,7 +983,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
sk = strp->sk;
psock = sk_psock(sk);
if (unlikely(!psock)) {
- kfree_skb(skb);
+ sock_drop(sk, skb);
goto out;
}
prog = READ_ONCE(psock->progs.stream_verdict);
@@ -1109,7 +1104,7 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
psock = sk_psock(sk);
if (unlikely(!psock)) {
len = 0;
- kfree_skb(skb);
+ sock_drop(sk, skb);
goto out;
}
prog = READ_ONCE(psock->progs.stream_verdict);
@@ -1123,7 +1118,8 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
skb->sk = NULL;
}
- sk_psock_verdict_apply(psock, skb, ret);
+ if (sk_psock_verdict_apply(psock, skb, ret) < 0)
+ len = 0;
out:
rcu_read_unlock();
return len;
diff --git a/net/core/sock.c b/net/core/sock.c
index c761c4a0b66b..ba1c0f75cd45 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -331,6 +331,22 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(__sk_backlog_rcv);
+void sk_error_report(struct sock *sk)
+{
+ sk->sk_error_report(sk);
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ fallthrough;
+ case AF_INET6:
+ trace_inet_sk_error_report(sk);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(sk_error_report);
+
static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
{
struct __kernel_sock_timeval tv;
@@ -776,6 +792,58 @@ void sock_enable_timestamps(struct sock *sk)
}
EXPORT_SYMBOL(sock_enable_timestamps);
+void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
+{
+ switch (optname) {
+ case SO_TIMESTAMP_OLD:
+ __sock_set_timestamps(sk, valbool, false, false);
+ break;
+ case SO_TIMESTAMP_NEW:
+ __sock_set_timestamps(sk, valbool, true, false);
+ break;
+ case SO_TIMESTAMPNS_OLD:
+ __sock_set_timestamps(sk, valbool, false, true);
+ break;
+ case SO_TIMESTAMPNS_NEW:
+ __sock_set_timestamps(sk, valbool, true, true);
+ break;
+ }
+}
+
+int sock_set_timestamping(struct sock *sk, int optname, int val)
+{
+ if (val & ~SOF_TIMESTAMPING_MASK)
+ return -EINVAL;
+
+ if (val & SOF_TIMESTAMPING_OPT_ID &&
+ !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
+ if (sk->sk_protocol == IPPROTO_TCP &&
+ sk->sk_type == SOCK_STREAM) {
+ if ((1 << sk->sk_state) &
+ (TCPF_CLOSE | TCPF_LISTEN))
+ return -EINVAL;
+ sk->sk_tskey = tcp_sk(sk)->snd_una;
+ } else {
+ sk->sk_tskey = 0;
+ }
+ }
+
+ if (val & SOF_TIMESTAMPING_OPT_STATS &&
+ !(val & SOF_TIMESTAMPING_OPT_TSONLY))
+ return -EINVAL;
+
+ sk->sk_tsflags = val;
+ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
+
+ if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
+ sock_enable_timestamp(sk,
+ SOCK_TIMESTAMPING_RX_SOFTWARE);
+ else
+ sock_disable_timestamp(sk,
+ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
+ return 0;
+}
+
void sock_set_keepalive(struct sock *sk)
{
lock_sock(sk);
@@ -815,10 +883,18 @@ void sock_set_rcvbuf(struct sock *sk, int val)
}
EXPORT_SYMBOL(sock_set_rcvbuf);
+static void __sock_set_mark(struct sock *sk, u32 val)
+{
+ if (val != sk->sk_mark) {
+ sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
+}
+
void sock_set_mark(struct sock *sk, u32 val)
{
lock_sock(sk);
- sk->sk_mark = val;
+ __sock_set_mark(sk, val);
release_sock(sk);
}
EXPORT_SYMBOL(sock_set_mark);
@@ -989,54 +1065,15 @@ set_sndbuf:
break;
case SO_TIMESTAMP_OLD:
- __sock_set_timestamps(sk, valbool, false, false);
- break;
case SO_TIMESTAMP_NEW:
- __sock_set_timestamps(sk, valbool, true, false);
- break;
case SO_TIMESTAMPNS_OLD:
- __sock_set_timestamps(sk, valbool, false, true);
- break;
case SO_TIMESTAMPNS_NEW:
- __sock_set_timestamps(sk, valbool, true, true);
+ sock_set_timestamp(sk, valbool, optname);
break;
+
case SO_TIMESTAMPING_NEW:
case SO_TIMESTAMPING_OLD:
- if (val & ~SOF_TIMESTAMPING_MASK) {
- ret = -EINVAL;
- break;
- }
-
- if (val & SOF_TIMESTAMPING_OPT_ID &&
- !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
- if (sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM) {
- if ((1 << sk->sk_state) &
- (TCPF_CLOSE | TCPF_LISTEN)) {
- ret = -EINVAL;
- break;
- }
- sk->sk_tskey = tcp_sk(sk)->snd_una;
- } else {
- sk->sk_tskey = 0;
- }
- }
-
- if (val & SOF_TIMESTAMPING_OPT_STATS &&
- !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
- ret = -EINVAL;
- break;
- }
-
- sk->sk_tsflags = val;
- sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
-
- if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
- sock_enable_timestamp(sk,
- SOCK_TIMESTAMPING_RX_SOFTWARE);
- else
- sock_disable_timestamp(sk,
- (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
+ ret = sock_set_timestamping(sk, optname, val);
break;
case SO_RCVLOWAT:
@@ -1126,10 +1163,10 @@ set_sndbuf:
case SO_MARK:
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
ret = -EPERM;
- } else if (val != sk->sk_mark) {
- sk->sk_mark = val;
- sk_dst_reset(sk);
+ break;
}
+
+ __sock_set_mark(sk, val);
break;
case SO_RXQ_OVFL:
@@ -1614,6 +1651,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_bound_dev_if;
break;
+ case SO_NETNS_COOKIE:
+ lv = sizeof(u64);
+ if (len != lv)
+ return -EINVAL;
+ v.val64 = sock_net(sk)->net_cookie;
+ break;
+
default:
/* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7).
@@ -2132,10 +2176,10 @@ void skb_orphan_partial(struct sk_buff *skb)
if (skb_is_tcp_pure_ack(skb))
return;
- if (can_skb_orphan_partial(skb))
- skb_set_owner_sk_safe(skb, skb->sk);
- else
- skb_orphan(skb);
+ if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
+ return;
+
+ skb_orphan(skb);
}
EXPORT_SYMBOL(skb_orphan_partial);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 6f1b82b8ad49..60decd6420ca 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -48,7 +48,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
bpf_map_init_from_attr(&stab->map, attr);
raw_spin_lock_init(&stab->lock);
- stab->sks = bpf_map_area_alloc(stab->map.max_entries *
+ stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
sizeof(struct sock *),
stab->map.numa_node);
if (!stab->sks) {
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index b065f0a103ed..3f00a28fe762 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -6,6 +6,7 @@
* selecting the socket index from the array of available sockets.
*/
+#include <net/ip.h>
#include <net/sock_reuseport.h>
#include <linux/bpf.h>
#include <linux/idr.h>
@@ -17,6 +18,74 @@
DEFINE_SPINLOCK(reuseport_lock);
static DEFINE_IDA(reuseport_ida);
+static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
+ struct sock_reuseport *reuse, bool bind_inany);
+
+static int reuseport_sock_index(struct sock *sk,
+ const struct sock_reuseport *reuse,
+ bool closed)
+{
+ int left, right;
+
+ if (!closed) {
+ left = 0;
+ right = reuse->num_socks;
+ } else {
+ left = reuse->max_socks - reuse->num_closed_socks;
+ right = reuse->max_socks;
+ }
+
+ for (; left < right; left++)
+ if (reuse->socks[left] == sk)
+ return left;
+ return -1;
+}
+
+static void __reuseport_add_sock(struct sock *sk,
+ struct sock_reuseport *reuse)
+{
+ reuse->socks[reuse->num_socks] = sk;
+ /* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
+ smp_wmb();
+ reuse->num_socks++;
+}
+
+static bool __reuseport_detach_sock(struct sock *sk,
+ struct sock_reuseport *reuse)
+{
+ int i = reuseport_sock_index(sk, reuse, false);
+
+ if (i == -1)
+ return false;
+
+ reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
+ reuse->num_socks--;
+
+ return true;
+}
+
+static void __reuseport_add_closed_sock(struct sock *sk,
+ struct sock_reuseport *reuse)
+{
+ reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
+ /* paired with READ_ONCE() in inet_csk_bind_conflict() */
+ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
+}
+
+static bool __reuseport_detach_closed_sock(struct sock *sk,
+ struct sock_reuseport *reuse)
+{
+ int i = reuseport_sock_index(sk, reuse, true);
+
+ if (i == -1)
+ return false;
+
+ reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
+ /* paired with READ_ONCE() in inet_csk_bind_conflict() */
+ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
+
+ return true;
+}
static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
{
@@ -49,6 +118,12 @@ int reuseport_alloc(struct sock *sk, bool bind_inany)
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
if (reuse) {
+ if (reuse->num_closed_socks) {
+ /* sk was shutdown()ed before */
+ ret = reuseport_resurrect(sk, reuse, NULL, bind_inany);
+ goto out;
+ }
+
/* Only set reuse->bind_inany if the bind_inany is true.
* Otherwise, it will overwrite the reuse->bind_inany
* which was set by the bind/hash path.
@@ -72,9 +147,9 @@ int reuseport_alloc(struct sock *sk, bool bind_inany)
}
reuse->reuseport_id = id;
+ reuse->bind_inany = bind_inany;
reuse->socks[0] = sk;
reuse->num_socks = 1;
- reuse->bind_inany = bind_inany;
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
out:
@@ -90,14 +165,30 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
u32 more_socks_size, i;
more_socks_size = reuse->max_socks * 2U;
- if (more_socks_size > U16_MAX)
+ if (more_socks_size > U16_MAX) {
+ if (reuse->num_closed_socks) {
+ /* Make room by removing a closed sk.
+ * The child has already been migrated.
+ * Only reqsk left at this point.
+ */
+ struct sock *sk;
+
+ sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
+ RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL);
+ __reuseport_detach_closed_sock(sk, reuse);
+
+ return reuse;
+ }
+
return NULL;
+ }
more_reuse = __reuseport_alloc(more_socks_size);
if (!more_reuse)
return NULL;
more_reuse->num_socks = reuse->num_socks;
+ more_reuse->num_closed_socks = reuse->num_closed_socks;
more_reuse->prog = reuse->prog;
more_reuse->reuseport_id = reuse->reuseport_id;
more_reuse->bind_inany = reuse->bind_inany;
@@ -105,9 +196,13 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
memcpy(more_reuse->socks, reuse->socks,
reuse->num_socks * sizeof(struct sock *));
+ memcpy(more_reuse->socks +
+ (more_reuse->max_socks - more_reuse->num_closed_socks),
+ reuse->socks + (reuse->max_socks - reuse->num_closed_socks),
+ reuse->num_closed_socks * sizeof(struct sock *));
more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
- for (i = 0; i < reuse->num_socks; ++i)
+ for (i = 0; i < reuse->max_socks; ++i)
rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
more_reuse);
@@ -152,13 +247,21 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock));
+ lockdep_is_held(&reuseport_lock));
+ if (old_reuse && old_reuse->num_closed_socks) {
+ /* sk was shutdown()ed before */
+ int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany);
+
+ spin_unlock_bh(&reuseport_lock);
+ return err;
+ }
+
if (old_reuse && old_reuse->num_socks != 1) {
spin_unlock_bh(&reuseport_lock);
return -EBUSY;
}
- if (reuse->num_socks == reuse->max_socks) {
+ if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
reuse = reuseport_grow(reuse);
if (!reuse) {
spin_unlock_bh(&reuseport_lock);
@@ -166,10 +269,7 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
}
}
- reuse->socks[reuse->num_socks] = sk;
- /* paired with smp_rmb() in reuseport_select_sock() */
- smp_wmb();
- reuse->num_socks++;
+ __reuseport_add_sock(sk, reuse);
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
spin_unlock_bh(&reuseport_lock);
@@ -180,15 +280,77 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
}
EXPORT_SYMBOL(reuseport_add_sock);
+static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
+ struct sock_reuseport *reuse, bool bind_inany)
+{
+ if (old_reuse == reuse) {
+ /* If sk was in the same reuseport group, just pop sk out of
+ * the closed section and push sk into the listening section.
+ */
+ __reuseport_detach_closed_sock(sk, old_reuse);
+ __reuseport_add_sock(sk, old_reuse);
+ return 0;
+ }
+
+ if (!reuse) {
+ /* In bind()/listen() path, we cannot carry over the eBPF prog
+ * for the shutdown()ed socket. In setsockopt() path, we should
+ * not change the eBPF prog of listening sockets by attaching a
+ * prog to the shutdown()ed socket. Thus, we will allocate a new
+ * reuseport group and detach sk from the old group.
+ */
+ int id;
+
+ reuse = __reuseport_alloc(INIT_SOCKS);
+ if (!reuse)
+ return -ENOMEM;
+
+ id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
+ if (id < 0) {
+ kfree(reuse);
+ return id;
+ }
+
+ reuse->reuseport_id = id;
+ reuse->bind_inany = bind_inany;
+ } else {
+ /* Move sk from the old group to the new one if
+ * - all the other listeners in the old group were close()d or
+ * shutdown()ed, and then sk2 has listen()ed on the same port
+ * OR
+ * - sk listen()ed without bind() (or with autobind), was
+ * shutdown()ed, and then listen()s on another port which
+ * sk2 listen()s on.
+ */
+ if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
+ reuse = reuseport_grow(reuse);
+ if (!reuse)
+ return -ENOMEM;
+ }
+ }
+
+ __reuseport_detach_closed_sock(sk, old_reuse);
+ __reuseport_add_sock(sk, reuse);
+ rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
+
+ if (old_reuse->num_socks + old_reuse->num_closed_socks == 0)
+ call_rcu(&old_reuse->rcu, reuseport_free_rcu);
+
+ return 0;
+}
+
void reuseport_detach_sock(struct sock *sk)
{
struct sock_reuseport *reuse;
- int i;
spin_lock_bh(&reuseport_lock);
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
+ /* reuseport_grow() has detached a closed sk */
+ if (!reuse)
+ goto out;
+
/* Notify the bpf side. The sk may be added to a sockarray
* map. If so, sockarray logic will remove it from the map.
*
@@ -201,19 +363,52 @@ void reuseport_detach_sock(struct sock *sk)
rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
- for (i = 0; i < reuse->num_socks; i++) {
- if (reuse->socks[i] == sk) {
- reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
- reuse->num_socks--;
- if (reuse->num_socks == 0)
- call_rcu(&reuse->rcu, reuseport_free_rcu);
- break;
- }
- }
+ if (!__reuseport_detach_closed_sock(sk, reuse))
+ __reuseport_detach_sock(sk, reuse);
+
+ if (reuse->num_socks + reuse->num_closed_socks == 0)
+ call_rcu(&reuse->rcu, reuseport_free_rcu);
+
+out:
spin_unlock_bh(&reuseport_lock);
}
EXPORT_SYMBOL(reuseport_detach_sock);
+void reuseport_stop_listen_sock(struct sock *sk)
+{
+ if (sk->sk_protocol == IPPROTO_TCP) {
+ struct sock_reuseport *reuse;
+ struct bpf_prog *prog;
+
+ spin_lock_bh(&reuseport_lock);
+
+ reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
+ lockdep_is_held(&reuseport_lock));
+ prog = rcu_dereference_protected(reuse->prog,
+ lockdep_is_held(&reuseport_lock));
+
+ if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req ||
+ (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) {
+ /* Migration capable, move sk from the listening section
+ * to the closed section.
+ */
+ bpf_sk_reuseport_detach(sk);
+
+ __reuseport_detach_sock(sk, reuse);
+ __reuseport_add_closed_sock(sk, reuse);
+
+ spin_unlock_bh(&reuseport_lock);
+ return;
+ }
+
+ spin_unlock_bh(&reuseport_lock);
+ }
+
+ /* Not capable to do migration, detach immediately */
+ reuseport_detach_sock(sk);
+}
+EXPORT_SYMBOL(reuseport_stop_listen_sock);
+
static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
struct bpf_prog *prog, struct sk_buff *skb,
int hdr_len)
@@ -244,6 +439,23 @@ static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
return reuse->socks[index];
}
+static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
+ u32 hash, u16 num_socks)
+{
+ int i, j;
+
+ i = j = reciprocal_scale(hash, num_socks);
+ while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
+ i++;
+ if (i >= num_socks)
+ i = 0;
+ if (i == j)
+ return NULL;
+ }
+
+ return reuse->socks[i];
+}
+
/**
* reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
* @sk: First socket in the group.
@@ -274,32 +486,21 @@ struct sock *reuseport_select_sock(struct sock *sk,
prog = rcu_dereference(reuse->prog);
socks = READ_ONCE(reuse->num_socks);
if (likely(socks)) {
- /* paired with smp_wmb() in reuseport_add_sock() */
+ /* paired with smp_wmb() in __reuseport_add_sock() */
smp_rmb();
if (!prog || !skb)
goto select_by_hash;
if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
- sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
+ sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash);
else
sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
select_by_hash:
/* no bpf or invalid bpf result: fall back to hash usage */
- if (!sk2) {
- int i, j;
-
- i = j = reciprocal_scale(hash, socks);
- while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
- i++;
- if (i >= socks)
- i = 0;
- if (i == j)
- goto out;
- }
- sk2 = reuse->socks[i];
- }
+ if (!sk2)
+ sk2 = reuseport_select_sock_by_hash(reuse, hash, socks);
}
out:
@@ -308,14 +509,90 @@ out:
}
EXPORT_SYMBOL(reuseport_select_sock);
+/**
+ * reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group.
+ * @sk: close()ed or shutdown()ed socket in the group.
+ * @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or
+ * NEW_SYN_RECV request socket during 3WHS.
+ * @skb: skb to run through BPF filter.
+ * Returns a socket (with sk_refcnt +1) that should accept the child socket
+ * (or NULL on error).
+ */
+struct sock *reuseport_migrate_sock(struct sock *sk,
+ struct sock *migrating_sk,
+ struct sk_buff *skb)
+{
+ struct sock_reuseport *reuse;
+ struct sock *nsk = NULL;
+ bool allocated = false;
+ struct bpf_prog *prog;
+ u16 socks;
+ u32 hash;
+
+ rcu_read_lock();
+
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (!reuse)
+ goto out;
+
+ socks = READ_ONCE(reuse->num_socks);
+ if (unlikely(!socks))
+ goto failure;
+
+ /* paired with smp_wmb() in __reuseport_add_sock() */
+ smp_rmb();
+
+ hash = migrating_sk->sk_hash;
+ prog = rcu_dereference(reuse->prog);
+ if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
+ if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req)
+ goto select_by_hash;
+ goto failure;
+ }
+
+ if (!skb) {
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb)
+ goto failure;
+ allocated = true;
+ }
+
+ nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash);
+
+ if (allocated)
+ kfree_skb(skb);
+
+select_by_hash:
+ if (!nsk)
+ nsk = reuseport_select_sock_by_hash(reuse, hash, socks);
+
+ if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) {
+ nsk = NULL;
+ goto failure;
+ }
+
+out:
+ rcu_read_unlock();
+ return nsk;
+
+failure:
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+ goto out;
+}
+EXPORT_SYMBOL(reuseport_migrate_sock);
+
int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
{
struct sock_reuseport *reuse;
struct bpf_prog *old_prog;
- if (sk_unhashed(sk) && sk->sk_reuseport) {
- int err = reuseport_alloc(sk, false);
+ if (sk_unhashed(sk)) {
+ int err;
+ if (!sk->sk_reuseport)
+ return -EINVAL;
+
+ err = reuseport_alloc(sk, false);
if (err)
return err;
} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
@@ -341,13 +618,24 @@ int reuseport_detach_prog(struct sock *sk)
struct sock_reuseport *reuse;
struct bpf_prog *old_prog;
- if (!rcu_access_pointer(sk->sk_reuseport_cb))
- return sk->sk_reuseport ? -ENOENT : -EINVAL;
-
old_prog = NULL;
spin_lock_bh(&reuseport_lock);
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
+
+ /* reuse must be checked after acquiring the reuseport_lock
+ * because reuseport_grow() can detach a closed sk.
+ */
+ if (!reuse) {
+ spin_unlock_bh(&reuseport_lock);
+ return sk->sk_reuseport ? -ENOENT : -EINVAL;
+ }
+
+ if (sk_unhashed(sk) && reuse->num_closed_socks) {
+ spin_unlock_bh(&reuseport_lock);
+ return -ENOENT;
+ }
+
old_prog = rcu_replace_pointer(reuse->prog, old_prog,
lockdep_is_held(&reuseport_lock));
spin_unlock_bh(&reuseport_lock);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 858276e72c68..cc92ccb38432 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -113,8 +113,13 @@ static void mem_allocator_disconnect(void *allocator)
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
{
struct xdp_mem_allocator *xa;
+ int type = xdp_rxq->mem.type;
int id = xdp_rxq->mem.id;
+ /* Reset mem info to defaults */
+ xdp_rxq->mem.id = 0;
+ xdp_rxq->mem.type = 0;
+
if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
WARN(1, "Missing register, driver bug");
return;
@@ -123,7 +128,7 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
if (id == 0)
return;
- if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
+ if (type == MEM_TYPE_PAGE_POOL) {
rcu_read_lock();
xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
page_pool_destroy(xa->page_pool);
@@ -144,10 +149,6 @@ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
xdp_rxq->dev = NULL;
-
- /* Reset mem info to defaults */
- xdp_rxq->mem.id = 0;
- xdp_rxq->mem.type = 0;
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
@@ -584,3 +585,31 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
return __xdp_build_skb_from_frame(xdpf, skb, dev);
}
EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
+
+struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
+{
+ unsigned int headroom, totalsize;
+ struct xdp_frame *nxdpf;
+ struct page *page;
+ void *addr;
+
+ headroom = xdpf->headroom + sizeof(*xdpf);
+ totalsize = headroom + xdpf->len;
+
+ if (unlikely(totalsize > PAGE_SIZE))
+ return NULL;
+ page = dev_alloc_page();
+ if (!page)
+ return NULL;
+ addr = page_to_virt(page);
+
+ memcpy(addr, xdpf, totalsize);
+
+ nxdpf = addr;
+ nxdpf->data = addr + headroom;
+ nxdpf->frame_sz = PAGE_SIZE;
+ nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ nxdpf->mem.id = 0;
+
+ return nxdpf;
+}
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 653e3bc9c87b..b441ab330fd3 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1381,7 +1381,7 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
if (!skb)
- return -ENOBUFS;
+ return -ENOMEM;
if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
err = dcbnl_ieee_fill(skb, dev);
@@ -1781,7 +1781,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
nlh->nlmsg_flags, &reply_nlh);
if (!reply_skb)
- return -ENOBUFS;
+ return -ENOMEM;
ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
if (ret < 0) {
@@ -2075,8 +2075,6 @@ EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
static int __init dcbnl_init(void)
{
- INIT_LIST_HEAD(&dcb_app_list);
-
rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index e2a337fa9ff7..92a8c6bea316 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -688,6 +688,7 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
/**
* tfrc_invert_loss_event_rate - Compute p so that 10^6 corresponds to 100%
+ * @loss_event_rate: loss event rate to invert
* When @loss_event_rate is large, there is a chance that p is truncated to 0.
* To avoid re-entering slow-start in that case, we set p = TFRC_SMALLEST_P > 0.
*/
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ffc601a3b329..0ea29270d7e5 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -329,7 +329,7 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
dccp_done(sk);
} else
@@ -356,7 +356,7 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
} else /* Only an error on timeout */
sk->sk_err_soft = err;
out:
@@ -977,7 +977,6 @@ static const struct net_protocol dccp_v4_protocol = {
.handler = dccp_v4_rcv,
.err_handler = dccp_v4_err,
.no_policy = 1,
- .netns_ok = 1,
.icmp_strict_tag_validation = 1,
};
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6f5304db5a67..fa663518fa0e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -172,7 +172,7 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
* Wake people up to see the error
* (see connect in sock.c)
*/
- sk->sk_error_report(sk);
+ sk_error_report(sk);
dccp_done(sk);
} else
sk->sk_err_soft = err;
@@ -181,7 +181,7 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
} else
sk->sk_err_soft = err;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 6d705d90c614..7eb0fb231940 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -302,7 +302,7 @@ int dccp_disconnect(struct sock *sk, int flags)
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return 0;
}
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index db768f223ef7..27a3b37acd2e 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -20,7 +20,7 @@ int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
static void dccp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
dccp_done(sk);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 1a12912b88d6..7ab788f41a3f 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -870,7 +870,7 @@ int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
/*
* Read out ack data here, this applies equally
- * to data, other data, link serivce and both
+ * to data, other data, link service and both
* ack data and ack otherdata.
*/
dn_process_ack(sk, skb, other);
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 00f2ed721ec1..eadc89583168 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -179,7 +179,7 @@ static void dn_nsp_rtt(struct sock *sk, long rtt)
scp->nsp_srtt = 1;
/*
- * Add new rtt varience to smoothed varience
+ * Add new rtt variance to smoothed varience
*/
delta >>= 1;
rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 32b1bed8ae51..729d3de6020d 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -604,7 +604,7 @@ drop_it:
static int dn_route_discard(struct net *net, struct sock *sk, struct sk_buff *skb)
{
/*
- * I know we drop the packet here, but thats considered success in
+ * I know we drop the packet here, but that's considered success in
* this case
*/
kfree_skb(skb);
diff --git a/net/devres.c b/net/devres.c
index 1f9be2133787..5ccf6ca311dc 100644
--- a/net/devres.c
+++ b/net/devres.c
@@ -60,7 +60,7 @@ static int netdev_devres_match(struct device *dev, void *this, void *match_data)
* @ndev: device to register
*
* This is a devres variant of register_netdev() for which the unregister
- * function will be call automatically when the managing device is
+ * function will be called automatically when the managing device is
* detached. Note: the net_device used must also be resource managed by
* the same struct device.
*/
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index b71e87909f0e..185629f27f80 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -219,21 +219,6 @@ static void dsa_tree_put(struct dsa_switch_tree *dst)
kref_put(&dst->refcount, dsa_tree_release);
}
-static bool dsa_port_is_dsa(struct dsa_port *port)
-{
- return port->type == DSA_PORT_TYPE_DSA;
-}
-
-static bool dsa_port_is_cpu(struct dsa_port *port)
-{
- return port->type == DSA_PORT_TYPE_CPU;
-}
-
-static bool dsa_port_is_user(struct dsa_port *dp)
-{
- return dp->type == DSA_PORT_TYPE_USER;
-}
-
static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
struct device_node *dn)
{
@@ -363,6 +348,9 @@ static int dsa_port_setup(struct dsa_port *dp)
if (dp->setup)
return 0;
+ INIT_LIST_HEAD(&dp->fdbs);
+ INIT_LIST_HEAD(&dp->mdbs);
+
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
dsa_port_disable(dp);
@@ -458,6 +446,7 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_mac_addr *a, *tmp;
if (!dp->setup)
return;
@@ -483,6 +472,16 @@ static void dsa_port_teardown(struct dsa_port *dp)
break;
}
+ list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
+ list_del(&a->list);
+ kfree(a);
+ }
+
+ list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
+ list_del(&a->list);
+ kfree(a);
+ }
+
dp->setup = false;
}
@@ -1259,6 +1258,13 @@ static int dsa_switch_parse_member_of(struct dsa_switch *ds,
if (!ds->dst)
return -ENOMEM;
+ if (dsa_switch_find(ds->dst->index, ds->index)) {
+ dev_err(ds->dev,
+ "A DSA switch with index %d already exists in tree %d\n",
+ ds->index, ds->dst->index);
+ return -EEXIST;
+ }
+
return 0;
}
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 92282de54230..f201c33980bf 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -20,6 +20,8 @@ enum {
DSA_NOTIFIER_BRIDGE_LEAVE,
DSA_NOTIFIER_FDB_ADD,
DSA_NOTIFIER_FDB_DEL,
+ DSA_NOTIFIER_HOST_FDB_ADD,
+ DSA_NOTIFIER_HOST_FDB_DEL,
DSA_NOTIFIER_HSR_JOIN,
DSA_NOTIFIER_HSR_LEAVE,
DSA_NOTIFIER_LAG_CHANGE,
@@ -27,6 +29,8 @@ enum {
DSA_NOTIFIER_LAG_LEAVE,
DSA_NOTIFIER_MDB_ADD,
DSA_NOTIFIER_MDB_DEL,
+ DSA_NOTIFIER_HOST_MDB_ADD,
+ DSA_NOTIFIER_HOST_MDB_DEL,
DSA_NOTIFIER_VLAN_ADD,
DSA_NOTIFIER_VLAN_DEL,
DSA_NOTIFIER_MTU,
@@ -84,7 +88,7 @@ struct dsa_notifier_vlan_info {
/* DSA_NOTIFIER_MTU */
struct dsa_notifier_mtu_info {
- bool propagate_upstream;
+ bool targeted_match;
int sw_index;
int port;
int mtu;
@@ -112,6 +116,7 @@ struct dsa_notifier_mrp_ring_role_info {
struct dsa_switchdev_event_work {
struct dsa_switch *ds;
int port;
+ struct net_device *dev;
struct work_struct work;
unsigned long event;
/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
@@ -119,6 +124,7 @@ struct dsa_switchdev_event_work {
*/
unsigned char addr[ETH_ALEN];
u16 vid;
+ bool host_addr;
};
/* DSA_NOTIFIER_HSR_* */
@@ -154,6 +160,11 @@ const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
+static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
+{
+ return ops->needed_headroom + ops->needed_tailroom;
+}
+
/* master.c */
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
void dsa_master_teardown(struct net_device *dev);
@@ -183,28 +194,40 @@ void dsa_port_disable_rt(struct dsa_port *dp);
void dsa_port_disable(struct dsa_port *dp);
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
struct netlink_ext_ack *extack);
+int dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br,
+ struct netlink_ext_ack *extack);
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
int dsa_port_lag_change(struct dsa_port *dp,
struct netdev_lag_lower_state_info *linfo);
int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack);
+int dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack);
void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
struct netlink_ext_ack *extack);
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
- bool propagate_upstream);
+ bool targeted_match);
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
+int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid);
+int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid);
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
int dsa_port_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
int dsa_port_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_host_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_host_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack);
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 052a977914a6..3fc90e36772d 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -147,8 +147,7 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
struct dsa_switch *ds = cpu_dp->ds;
int port = cpu_dp->index;
int len = ETH_GSTRING_LEN;
- int mcount = 0, count;
- unsigned int i;
+ int mcount = 0, count, i;
uint8_t pfx[4];
uint8_t *ndata;
@@ -178,6 +177,8 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
*/
ds->ops->get_strings(ds, port, stringset, ndata);
count = ds->ops->get_sset_count(ds, port, stringset);
+ if (count < 0)
+ return;
for (i = 0; i < count; i++) {
memmove(ndata + (i * len + sizeof(pfx)),
ndata + i * len, len - sizeof(pfx));
@@ -345,10 +346,12 @@ static struct lock_class_key dsa_master_addr_list_lock_key;
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
- int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
+ const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
struct dsa_switch *ds = cpu_dp->ds;
struct device_link *consumer_link;
- int ret;
+ int mtu, ret;
+
+ mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
/* The DSA master must use SET_NETDEV_DEV for this to work. */
consumer_link = device_link_add(ds->dev, dev->dev.parent,
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 6379d66a6bb3..28b45b7e66df 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -194,26 +194,63 @@ static int dsa_port_switchdev_sync(struct dsa_port *dp,
if (err && err != -EOPNOTSUPP)
return err;
- err = br_mdb_replay(br, brport_dev,
- &dsa_slave_switchdev_blocking_notifier,
- extack);
+ err = br_mdb_replay(br, brport_dev, dp, true,
+ &dsa_slave_switchdev_blocking_notifier, extack);
if (err && err != -EOPNOTSUPP)
return err;
- err = br_fdb_replay(br, brport_dev, &dsa_slave_switchdev_notifier);
+ /* Forwarding and termination FDB entries on the port */
+ err = br_fdb_replay(br, brport_dev, dp, true,
+ &dsa_slave_switchdev_notifier);
if (err && err != -EOPNOTSUPP)
return err;
- err = br_vlan_replay(br, brport_dev,
- &dsa_slave_switchdev_blocking_notifier,
- extack);
+ /* Termination FDB entries on the bridge itself */
+ err = br_fdb_replay(br, br, dp, true, &dsa_slave_switchdev_notifier);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ err = br_vlan_replay(br, brport_dev, dp, true,
+ &dsa_slave_switchdev_blocking_notifier, extack);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ return 0;
+}
+
+static int dsa_port_switchdev_unsync_objs(struct dsa_port *dp,
+ struct net_device *br,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
+ int err;
+
+ /* Delete the switchdev objects left on this port */
+ err = br_mdb_replay(br, brport_dev, dp, false,
+ &dsa_slave_switchdev_blocking_notifier, extack);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ /* Forwarding and termination FDB entries on the port */
+ err = br_fdb_replay(br, brport_dev, dp, false,
+ &dsa_slave_switchdev_notifier);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ /* Termination FDB entries on the bridge itself */
+ err = br_fdb_replay(br, br, dp, false, &dsa_slave_switchdev_notifier);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ err = br_vlan_replay(br, brport_dev, dp, false,
+ &dsa_slave_switchdev_blocking_notifier, extack);
if (err && err != -EOPNOTSUPP)
return err;
return 0;
}
-static void dsa_port_switchdev_unsync(struct dsa_port *dp)
+static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
{
/* Configure the port for standalone mode (no address learning,
* flood everything).
@@ -279,6 +316,12 @@ out_rollback:
return err;
}
+int dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br,
+ struct netlink_ext_ack *extack)
+{
+ return dsa_port_switchdev_unsync_objs(dp, br, extack);
+}
+
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
{
struct dsa_notifier_bridge_info info = {
@@ -298,7 +341,7 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
if (err)
pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
- dsa_port_switchdev_unsync(dp);
+ dsa_port_switchdev_unsync_attrs(dp);
}
int dsa_port_lag_change(struct dsa_port *dp,
@@ -366,6 +409,15 @@ err_lag_join:
return err;
}
+int dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag,
+ struct netlink_ext_ack *extack)
+{
+ if (dp->bridge_dev)
+ return dsa_port_pre_bridge_leave(dp, dp->bridge_dev, extack);
+
+ return 0;
+}
+
void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
{
struct dsa_notifier_lag_info info = {
@@ -567,11 +619,11 @@ int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
}
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
- bool propagate_upstream)
+ bool targeted_match)
{
struct dsa_notifier_mtu_info info = {
.sw_index = dp->ds->index,
- .propagate_upstream = propagate_upstream,
+ .targeted_match = targeted_match,
.port = dp->index,
.mtu = new_mtu,
};
@@ -606,6 +658,44 @@ int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
}
+int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid)
+{
+ struct dsa_notifier_fdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .addr = addr,
+ .vid = vid,
+ };
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ int err;
+
+ err = dev_uc_add(cpu_dp->master, addr);
+ if (err)
+ return err;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
+}
+
+int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid)
+{
+ struct dsa_notifier_fdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .addr = addr,
+ .vid = vid,
+ };
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ int err;
+
+ err = dev_uc_del(cpu_dp->master, addr);
+ if (err)
+ return err;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
+}
+
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
{
struct dsa_switch *ds = dp->ds;
@@ -641,6 +731,42 @@ int dsa_port_mdb_del(const struct dsa_port *dp,
return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
}
+int dsa_port_host_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_notifier_mdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .mdb = mdb,
+ };
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ int err;
+
+ err = dev_mc_add(cpu_dp->master, mdb->addr);
+ if (err)
+ return err;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
+}
+
+int dsa_port_host_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_notifier_mdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .mdb = mdb,
+ };
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ int err;
+
+ err = dev_mc_del(cpu_dp->master, mdb->addr);
+ if (err)
+ return err;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
+}
+
int dsa_port_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 8c0f3c6ab365..ffbba1e71551 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -271,13 +271,16 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
}
-static int dsa_slave_port_attr_set(struct net_device *dev,
+static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
int ret;
+ if (ctx && ctx != dp)
+ return 0;
+
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
@@ -394,13 +397,16 @@ static int dsa_slave_vlan_add(struct net_device *dev,
return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
}
-static int dsa_slave_port_obj_add(struct net_device *dev,
+static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
int err;
+ if (ctx && ctx != dp)
+ return 0;
+
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_MDB:
if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
@@ -412,10 +418,7 @@ static int dsa_slave_port_obj_add(struct net_device *dev,
if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
return -EOPNOTSUPP;
- /* DSA can directly translate this to a normal MDB add,
- * but on the CPU port.
- */
- err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ err = dsa_port_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
@@ -469,12 +472,15 @@ static int dsa_slave_vlan_del(struct net_device *dev,
return 0;
}
-static int dsa_slave_port_obj_del(struct net_device *dev,
+static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
int err;
+ if (ctx && ctx != dp)
+ return 0;
+
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_MDB:
if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
@@ -486,10 +492,7 @@ static int dsa_slave_port_obj_del(struct net_device *dev,
if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
return -EOPNOTSUPP;
- /* DSA can directly translate this to a normal MDB add,
- * but on the CPU port.
- */
- err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ err = dsa_port_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
@@ -776,13 +779,15 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
struct dsa_switch *ds = dp->ds;
if (sset == ETH_SS_STATS) {
- int count;
+ int count = 0;
- count = 4;
- if (ds->ops->get_sset_count)
- count += ds->ops->get_sset_count(ds, dp->index, sset);
+ if (ds->ops->get_sset_count) {
+ count = ds->ops->get_sset_count(ds, dp->index, sset);
+ if (count < 0)
+ return count;
+ }
- return count;
+ return count + 4;
} else if (sset == ETH_SS_TEST) {
return net_selftest_get_count();
}
@@ -1526,6 +1531,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
+ struct dsa_port *dp_iter;
struct dsa_port *cpu_dp;
int port = p->dp->index;
int largest_mtu = 0;
@@ -1533,31 +1539,31 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
int old_master_mtu;
int mtu_limit;
int cpu_mtu;
- int err, i;
+ int err;
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
- for (i = 0; i < ds->num_ports; i++) {
+ list_for_each_entry(dp_iter, &ds->dst->ports, list) {
int slave_mtu;
- if (!dsa_is_user_port(ds, i))
+ if (!dsa_port_is_user(dp_iter))
continue;
/* During probe, this function will be called for each slave
* device, while not all of them have been allocated. That's
* ok, it doesn't change what the maximum is, so ignore it.
*/
- if (!dsa_to_port(ds, i)->slave)
+ if (!dp_iter->slave)
continue;
/* Pretend that we already applied the setting, which we
* actually haven't (still haven't done all integrity checks)
*/
- if (i == port)
+ if (dp_iter == dp)
slave_mtu = new_mtu;
else
- slave_mtu = dsa_to_port(ds, i)->slave->mtu;
+ slave_mtu = dp_iter->slave->mtu;
if (largest_mtu < slave_mtu)
largest_mtu = slave_mtu;
@@ -1567,7 +1573,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
old_master_mtu = master->mtu;
- new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
+ new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
if (new_master_mtu > mtu_limit)
return -ERANGE;
@@ -1583,14 +1589,15 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
goto out_master_failed;
/* We only need to propagate the MTU of the CPU port to
- * upstream switches.
+ * upstream switches, so create a non-targeted notifier which
+ * updates all switches.
*/
- err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
+ err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false);
if (err)
goto out_cpu_failed;
}
- err = dsa_port_mtu_change(dp, new_mtu, false);
+ err = dsa_port_mtu_change(dp, new_mtu, true);
if (err)
goto out_port_failed;
@@ -1603,8 +1610,8 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
out_port_failed:
if (new_master_mtu != old_master_mtu)
dsa_port_mtu_change(cpu_dp, old_master_mtu -
- cpu_dp->tag_ops->overhead,
- true);
+ dsa_tag_protocol_overhead(cpu_dp->tag_ops),
+ false);
out_cpu_failed:
if (new_master_mtu != old_master_mtu)
dev_set_mtu(master, old_master_mtu);
@@ -1638,27 +1645,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.self_test = dsa_slave_net_selftest,
};
-/* legacy way, bypassing the bridge *****************************************/
-static int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid,
- u16 flags,
- struct netlink_ext_ack *extack)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- return dsa_port_fdb_add(dp, addr, vid);
-}
-
-static int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- return dsa_port_fdb_del(dp, addr, vid);
-}
-
static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -1700,8 +1686,6 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
- .ndo_fdb_add = dsa_legacy_fdb_add,
- .ndo_fdb_del = dsa_legacy_fdb_del,
.ndo_fdb_dump = dsa_slave_fdb_dump,
.ndo_do_ioctl = dsa_slave_ioctl,
.ndo_get_iflink = dsa_slave_get_iflink,
@@ -1747,7 +1731,8 @@ static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
}
/* slave device setup *******************************************************/
-static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
+static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
+ u32 flags)
{
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct dsa_switch *ds = dp->ds;
@@ -1758,6 +1743,8 @@ static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
return -ENODEV;
}
+ slave_dev->phydev->dev_flags |= flags;
+
return phylink_connect_phy(dp->pl, slave_dev->phydev);
}
@@ -1802,7 +1789,7 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
/* We could not connect to a designated PHY or SFP, so try to
* use the switch internal MDIO bus instead
*/
- ret = dsa_slave_phy_connect(slave_dev, dp->index);
+ ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
if (ret) {
netdev_err(slave_dev,
"failed to connect to port %d: %d\n",
@@ -1822,10 +1809,8 @@ void dsa_slave_setup_tagger(struct net_device *slave)
const struct dsa_port *cpu_dp = dp->cpu_dp;
struct net_device *master = cpu_dp->master;
- if (cpu_dp->tag_ops->tail_tag)
- slave->needed_tailroom = cpu_dp->tag_ops->overhead;
- else
- slave->needed_headroom = cpu_dp->tag_ops->overhead;
+ slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
+ slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
/* Try to save one extra realloc later in the TX path (in the master)
* by also inheriting the master's needed headroom and tailroom.
* The 8021q driver also does this.
@@ -2063,6 +2048,26 @@ static int dsa_slave_changeupper(struct net_device *dev,
return err;
}
+static int dsa_slave_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ err = dsa_port_pre_bridge_leave(dp, info->upper_dev, extack);
+ else if (netif_is_lag_master(info->upper_dev) && !info->linking)
+ err = dsa_port_pre_lag_leave(dp, info->upper_dev, extack);
+ /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
+ * meaningfully enslaved to a bridge yet
+ */
+
+ return notifier_from_errno(err);
+}
+
static int
dsa_slave_lag_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
@@ -2089,6 +2094,35 @@ dsa_slave_lag_changeupper(struct net_device *dev,
return err;
}
+/* Same as dsa_slave_lag_changeupper() except that it calls
+ * dsa_slave_prechangeupper()
+ */
+static int
+dsa_slave_lag_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+ int err = NOTIFY_DONE;
+ struct dsa_port *dp;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!dsa_slave_dev_check(lower))
+ continue;
+
+ dp = dsa_slave_to_port(lower);
+ if (!dp->lag_dev)
+ /* Software LAG */
+ continue;
+
+ err = dsa_slave_prechangeupper(lower, info);
+ if (notifier_to_errno(err))
+ break;
+ }
+
+ return err;
+}
+
static int
dsa_prevent_bridging_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
@@ -2152,6 +2186,32 @@ dsa_slave_check_8021q_upper(struct net_device *dev,
return NOTIFY_DONE;
}
+static int
+dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct dsa_switch *ds;
+ struct dsa_port *dp;
+ int err;
+
+ if (!dsa_slave_dev_check(dev))
+ return dsa_prevent_bridging_8021q_upper(dev, info);
+
+ dp = dsa_slave_to_port(dev);
+ ds = dp->ds;
+
+ if (ds->ops->port_prechangeupper) {
+ err = ds->ops->port_prechangeupper(ds, dp->index, info);
+ if (err)
+ return notifier_from_errno(err);
+ }
+
+ if (is_vlan_dev(info->upper_dev))
+ return dsa_slave_check_8021q_upper(dev, info);
+
+ return NOTIFY_DONE;
+}
+
static int dsa_slave_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2160,24 +2220,18 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
switch (event) {
case NETDEV_PRECHANGEUPPER: {
struct netdev_notifier_changeupper_info *info = ptr;
- struct dsa_switch *ds;
- struct dsa_port *dp;
int err;
- if (!dsa_slave_dev_check(dev))
- return dsa_prevent_bridging_8021q_upper(dev, ptr);
+ err = dsa_slave_prechangeupper_sanity_check(dev, info);
+ if (err != NOTIFY_DONE)
+ return err;
- dp = dsa_slave_to_port(dev);
- ds = dp->ds;
+ if (dsa_slave_dev_check(dev))
+ return dsa_slave_prechangeupper(dev, ptr);
- if (ds->ops->port_prechangeupper) {
- err = ds->ops->port_prechangeupper(ds, dp->index, info);
- if (err)
- return notifier_from_errno(err);
- }
+ if (netif_is_lag_master(dev))
+ return dsa_slave_lag_prechangeupper(dev, ptr);
- if (is_vlan_dev(info->upper_dev))
- return dsa_slave_check_8021q_upper(dev, ptr);
break;
}
case NETDEV_CHANGEUPPER:
@@ -2261,8 +2315,12 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
rtnl_lock();
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
- err = dsa_port_fdb_add(dp, switchdev_work->addr,
- switchdev_work->vid);
+ if (switchdev_work->host_addr)
+ err = dsa_port_host_fdb_add(dp, switchdev_work->addr,
+ switchdev_work->vid);
+ else
+ err = dsa_port_fdb_add(dp, switchdev_work->addr,
+ switchdev_work->vid);
if (err) {
dev_err(ds->dev,
"port %d failed to add %pM vid %d to fdb: %d\n",
@@ -2274,8 +2332,12 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
- err = dsa_port_fdb_del(dp, switchdev_work->addr,
- switchdev_work->vid);
+ if (switchdev_work->host_addr)
+ err = dsa_port_host_fdb_del(dp, switchdev_work->addr,
+ switchdev_work->vid);
+ else
+ err = dsa_port_fdb_del(dp, switchdev_work->addr,
+ switchdev_work->vid);
if (err) {
dev_err(ds->dev,
"port %d failed to delete %pM vid %d from fdb: %d\n",
@@ -2287,9 +2349,8 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
}
rtnl_unlock();
+ dev_put(switchdev_work->dev);
kfree(switchdev_work);
- if (dsa_is_user_port(ds, dp->index))
- dev_put(dp->slave);
}
static int dsa_lower_dev_walk(struct net_device *lower_dev,
@@ -2321,6 +2382,7 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
const struct switchdev_notifier_fdb_info *fdb_info;
struct dsa_switchdev_event_work *switchdev_work;
+ bool host_addr = false;
struct dsa_port *dp;
int err;
@@ -2335,19 +2397,28 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
fdb_info = ptr;
if (dsa_slave_dev_check(dev)) {
- if (!fdb_info->added_by_user || fdb_info->is_local)
- return NOTIFY_OK;
-
dp = dsa_slave_to_port(dev);
+
+ if (fdb_info->is_local)
+ host_addr = true;
+ else if (!fdb_info->added_by_user)
+ return NOTIFY_OK;
} else {
- /* Snoop addresses learnt on foreign interfaces
- * bridged with us, for switches that don't
- * automatically learn SA from CPU-injected traffic
+ /* Snoop addresses added to foreign interfaces
+ * bridged with us, or the bridge
+ * itself. Dynamically learned addresses can
+ * also be added for switches that don't
+ * automatically learn SA from CPU-injected
+ * traffic.
*/
struct net_device *br_dev;
struct dsa_slave_priv *p;
- br_dev = netdev_master_upper_dev_get_rcu(dev);
+ if (netif_is_bridge_master(dev))
+ br_dev = dev;
+ else
+ br_dev = netdev_master_upper_dev_get_rcu(dev);
+
if (!br_dev)
return NOTIFY_DONE;
@@ -2358,17 +2429,30 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
if (!p)
return NOTIFY_DONE;
- dp = p->dp->cpu_dp;
+ dp = p->dp;
+ host_addr = fdb_info->is_local;
- if (!dp->ds->assisted_learning_on_cpu_port)
+ /* FDB entries learned by the software bridge should
+ * be installed as host addresses only if the driver
+ * requests assisted learning.
+ * On the other hand, FDB entries for local termination
+ * should always be installed.
+ */
+ if (!fdb_info->added_by_user && !fdb_info->is_local &&
+ !dp->ds->assisted_learning_on_cpu_port)
return NOTIFY_DONE;
/* When the bridge learns an address on an offloaded
* LAG we don't want to send traffic to the CPU, the
* other ports bridged with the LAG should be able to
* autonomously forward towards it.
+ * On the other hand, if the address is local
+ * (therefore not learned) then we want to trap it to
+ * the CPU regardless of whether the interface it
+ * belongs to is offloaded or not.
*/
- if (dsa_tree_offloads_bridge_port(dp->ds->dst, dev))
+ if (dsa_tree_offloads_bridge_port(dp->ds->dst, dev) &&
+ !fdb_info->is_local)
return NOTIFY_DONE;
}
@@ -2384,14 +2468,15 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
switchdev_work->ds = dp->ds;
switchdev_work->port = dp->index;
switchdev_work->event = event;
+ switchdev_work->dev = dev;
ether_addr_copy(switchdev_work->addr,
fdb_info->addr);
switchdev_work->vid = fdb_info->vid;
+ switchdev_work->host_addr = host_addr;
- /* Hold a reference on the slave for dsa_fdb_offload_notify */
- if (dsa_is_user_port(dp->ds, dp->index))
- dev_hold(dev);
+ /* Hold a reference for dsa_fdb_offload_notify */
+ dev_hold(dev);
dsa_schedule_work(&switchdev_work->work);
break;
default:
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 9bf8e20ecdf3..af71b8638098 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -52,10 +52,13 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds,
static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
struct dsa_notifier_mtu_info *info)
{
- if (ds->index == info->sw_index)
- return (port == info->port) || dsa_is_dsa_port(ds, port);
+ if (ds->index == info->sw_index && port == info->port)
+ return true;
- if (!info->propagate_upstream)
+ /* Do not propagate to other switches in the tree if the notifier was
+ * targeted for a single switch.
+ */
+ if (info->targeted_match)
return false;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
@@ -151,6 +154,214 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
return 0;
}
+/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
+ * DSA links) that sit between the targeted port on which the notifier was
+ * emitted and its dedicated CPU port.
+ */
+static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
+ int info_sw_index, int info_port)
+{
+ struct dsa_port *targeted_dp, *cpu_dp;
+ struct dsa_switch *targeted_ds;
+
+ targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
+ targeted_dp = dsa_to_port(targeted_ds, info_port);
+ cpu_dp = targeted_dp->cpu_dp;
+
+ if (dsa_switch_is_upstream_of(ds, targeted_ds))
+ return port == dsa_towards_port(ds, cpu_dp->ds->index,
+ cpu_dp->index);
+
+ return false;
+}
+
+static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct dsa_mac_addr *a;
+
+ list_for_each_entry(a, addr_list, list)
+ if (ether_addr_equal(a->addr, addr) && a->vid == vid)
+ return a;
+
+ return NULL;
+}
+
+static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_mac_addr *a;
+ int err;
+
+ /* No need to bother with refcounting for user ports */
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
+ return ds->ops->port_mdb_add(ds, port, mdb);
+
+ a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
+ if (a) {
+ refcount_inc(&a->refcount);
+ return 0;
+ }
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ err = ds->ops->port_mdb_add(ds, port, mdb);
+ if (err) {
+ kfree(a);
+ return err;
+ }
+
+ ether_addr_copy(a->addr, mdb->addr);
+ a->vid = mdb->vid;
+ refcount_set(&a->refcount, 1);
+ list_add_tail(&a->list, &dp->mdbs);
+
+ return 0;
+}
+
+static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_mac_addr *a;
+ int err;
+
+ /* No need to bother with refcounting for user ports */
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
+ return ds->ops->port_mdb_del(ds, port, mdb);
+
+ a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
+ if (!a)
+ return -ENOENT;
+
+ if (!refcount_dec_and_test(&a->refcount))
+ return 0;
+
+ err = ds->ops->port_mdb_del(ds, port, mdb);
+ if (err) {
+ refcount_inc(&a->refcount);
+ return err;
+ }
+
+ list_del(&a->list);
+ kfree(a);
+
+ return 0;
+}
+
+static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_mac_addr *a;
+ int err;
+
+ /* No need to bother with refcounting for user ports */
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
+ return ds->ops->port_fdb_add(ds, port, addr, vid);
+
+ a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
+ if (a) {
+ refcount_inc(&a->refcount);
+ return 0;
+ }
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ err = ds->ops->port_fdb_add(ds, port, addr, vid);
+ if (err) {
+ kfree(a);
+ return err;
+ }
+
+ ether_addr_copy(a->addr, addr);
+ a->vid = vid;
+ refcount_set(&a->refcount, 1);
+ list_add_tail(&a->list, &dp->fdbs);
+
+ return 0;
+}
+
+static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_mac_addr *a;
+ int err;
+
+ /* No need to bother with refcounting for user ports */
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
+ return ds->ops->port_fdb_del(ds, port, addr, vid);
+
+ a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
+ if (!a)
+ return -ENOENT;
+
+ if (!refcount_dec_and_test(&a->refcount))
+ return 0;
+
+ err = ds->ops->port_fdb_del(ds, port, addr, vid);
+ if (err) {
+ refcount_inc(&a->refcount);
+ return err;
+ }
+
+ list_del(&a->list);
+ kfree(a);
+
+ return 0;
+}
+
+static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_fdb_info *info)
+{
+ int err = 0;
+ int port;
+
+ if (!ds->ops->port_fdb_add)
+ return -EOPNOTSUPP;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_switch_host_address_match(ds, port, info->sw_index,
+ info->port)) {
+ err = dsa_switch_do_fdb_add(ds, port, info->addr,
+ info->vid);
+ if (err)
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_fdb_info *info)
+{
+ int err = 0;
+ int port;
+
+ if (!ds->ops->port_fdb_del)
+ return -EOPNOTSUPP;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_switch_host_address_match(ds, port, info->sw_index,
+ info->port)) {
+ err = dsa_switch_do_fdb_del(ds, port, info->addr,
+ info->vid);
+ if (err)
+ break;
+ }
+ }
+
+ return err;
+}
+
static int dsa_switch_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
@@ -159,7 +370,7 @@ static int dsa_switch_fdb_add(struct dsa_switch *ds,
if (!ds->ops->port_fdb_add)
return -EOPNOTSUPP;
- return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
+ return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
}
static int dsa_switch_fdb_del(struct dsa_switch *ds,
@@ -170,7 +381,7 @@ static int dsa_switch_fdb_del(struct dsa_switch *ds,
if (!ds->ops->port_fdb_del)
return -EOPNOTSUPP;
- return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
+ return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
}
static int dsa_switch_hsr_join(struct dsa_switch *ds,
@@ -232,21 +443,31 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds,
return 0;
}
-static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_mdb_info *info)
+static int dsa_switch_mdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
{
- if (ds->index == info->sw_index && port == info->port)
- return true;
+ int port = dsa_towards_port(ds, info->sw_index, info->port);
- if (dsa_is_dsa_port(ds, port))
- return true;
+ if (!ds->ops->port_mdb_add)
+ return -EOPNOTSUPP;
- return false;
+ return dsa_switch_do_mdb_add(ds, port, info->mdb);
}
-static int dsa_switch_mdb_add(struct dsa_switch *ds,
+static int dsa_switch_mdb_del(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
+ int port = dsa_towards_port(ds, info->sw_index, info->port);
+
+ if (!ds->ops->port_mdb_del)
+ return -EOPNOTSUPP;
+
+ return dsa_switch_do_mdb_del(ds, port, info->mdb);
+}
+
+static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
+{
int err = 0;
int port;
@@ -254,8 +475,9 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
return -EOPNOTSUPP;
for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_mdb_match(ds, port, info)) {
- err = ds->ops->port_mdb_add(ds, port, info->mdb);
+ if (dsa_switch_host_address_match(ds, port, info->sw_index,
+ info->port)) {
+ err = dsa_switch_do_mdb_add(ds, port, info->mdb);
if (err)
break;
}
@@ -264,16 +486,25 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
return err;
}
-static int dsa_switch_mdb_del(struct dsa_switch *ds,
- struct dsa_notifier_mdb_info *info)
+static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
{
+ int err = 0;
+ int port;
+
if (!ds->ops->port_mdb_del)
return -EOPNOTSUPP;
- if (ds->index == info->sw_index)
- return ds->ops->port_mdb_del(ds, info->port, info->mdb);
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_switch_host_address_match(ds, port, info->sw_index,
+ info->port)) {
+ err = dsa_switch_do_mdb_del(ds, port, info->mdb);
+ if (err)
+ break;
+ }
+ }
- return 0;
+ return err;
}
static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
@@ -364,36 +595,16 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
return 0;
}
-static bool dsa_switch_mrp_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_mrp_info *info)
-{
- if (ds->index == info->sw_index && port == info->port)
- return true;
-
- if (dsa_is_dsa_port(ds, port))
- return true;
-
- return false;
-}
-
static int dsa_switch_mrp_add(struct dsa_switch *ds,
struct dsa_notifier_mrp_info *info)
{
- int err = 0;
- int port;
-
if (!ds->ops->port_mrp_add)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_mrp_match(ds, port, info)) {
- err = ds->ops->port_mrp_add(ds, port, info->mrp);
- if (err)
- break;
- }
- }
+ if (ds->index == info->sw_index)
+ return ds->ops->port_mrp_add(ds, info->port, info->mrp);
- return err;
+ return 0;
}
static int dsa_switch_mrp_del(struct dsa_switch *ds,
@@ -408,39 +619,18 @@ static int dsa_switch_mrp_del(struct dsa_switch *ds,
return 0;
}
-static bool
-dsa_switch_mrp_ring_role_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_mrp_ring_role_info *info)
-{
- if (ds->index == info->sw_index && port == info->port)
- return true;
-
- if (dsa_is_dsa_port(ds, port))
- return true;
-
- return false;
-}
-
static int
dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
struct dsa_notifier_mrp_ring_role_info *info)
{
- int err = 0;
- int port;
-
if (!ds->ops->port_mrp_add)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_mrp_ring_role_match(ds, port, info)) {
- err = ds->ops->port_mrp_add_ring_role(ds, port,
- info->mrp);
- if (err)
- break;
- }
- }
+ if (ds->index == info->sw_index)
+ return ds->ops->port_mrp_add_ring_role(ds, info->port,
+ info->mrp);
- return err;
+ return 0;
}
static int
@@ -479,6 +669,12 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_FDB_DEL:
err = dsa_switch_fdb_del(ds, info);
break;
+ case DSA_NOTIFIER_HOST_FDB_ADD:
+ err = dsa_switch_host_fdb_add(ds, info);
+ break;
+ case DSA_NOTIFIER_HOST_FDB_DEL:
+ err = dsa_switch_host_fdb_del(ds, info);
+ break;
case DSA_NOTIFIER_HSR_JOIN:
err = dsa_switch_hsr_join(ds, info);
break;
@@ -500,6 +696,12 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_MDB_DEL:
err = dsa_switch_mdb_del(ds, info);
break;
+ case DSA_NOTIFIER_HOST_MDB_ADD:
+ err = dsa_switch_host_mdb_add(ds, info);
+ break;
+ case DSA_NOTIFIER_HOST_MDB_DEL:
+ err = dsa_switch_host_mdb_del(ds, info);
+ break;
case DSA_NOTIFIER_VLAN_ADD:
err = dsa_switch_vlan_add(ds, info);
break;
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 008c1ec6e20c..4aa29f90ecea 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -64,7 +64,7 @@
#define DSA_8021Q_SUBVLAN_HI_SHIFT 9
#define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9)
#define DSA_8021Q_SUBVLAN_LO_SHIFT 4
-#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(4, 3)
+#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4)
#define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2)
#define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0))
#define DSA_8021Q_SUBVLAN(x) \
@@ -471,4 +471,27 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
+void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
+ int *subvlan)
+{
+ u16 vid, tci;
+
+ skb_push_rcsum(skb, ETH_HLEN);
+ if (skb_vlan_tag_present(skb)) {
+ tci = skb_vlan_tag_get(skb);
+ __vlan_hwaccel_clear_tag(skb);
+ } else {
+ __skb_vlan_pop(skb, &tci);
+ }
+ skb_pull_rcsum(skb, ETH_HLEN);
+
+ vid = tci & VLAN_VID_MASK;
+
+ *source_port = dsa_8021q_rx_source_port(vid);
+ *switch_id = dsa_8021q_rx_switch_id(vid);
+ *subvlan = dsa_8021q_rx_subvlan(vid);
+ skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
+
MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_ar9331.c b/net/dsa/tag_ar9331.c
index 002cf7f952e2..0efae1a372b3 100644
--- a/net/dsa/tag_ar9331.c
+++ b/net/dsa/tag_ar9331.c
@@ -85,7 +85,7 @@ static const struct dsa_device_ops ar9331_netdev_ops = {
.proto = DSA_TAG_PROTO_AR9331,
.xmit = ar9331_tag_xmit,
.rcv = ar9331_tag_rcv,
- .overhead = AR9331_HDR_LEN,
+ .needed_headroom = AR9331_HDR_LEN,
};
MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 40e9f3098c8d..0750af951fc9 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -205,7 +205,7 @@ static const struct dsa_device_ops brcm_netdev_ops = {
.proto = DSA_TAG_PROTO_BRCM,
.xmit = brcm_tag_xmit,
.rcv = brcm_tag_rcv,
- .overhead = BRCM_TAG_LEN,
+ .needed_headroom = BRCM_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_netdev_ops);
@@ -286,7 +286,7 @@ static const struct dsa_device_ops brcm_legacy_netdev_ops = {
.proto = DSA_TAG_PROTO_BRCM_LEGACY,
.xmit = brcm_leg_tag_xmit,
.rcv = brcm_leg_tag_rcv,
- .overhead = BRCM_LEG_TAG_LEN,
+ .needed_headroom = BRCM_LEG_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
@@ -314,7 +314,7 @@ static const struct dsa_device_ops brcm_prepend_netdev_ops = {
.proto = DSA_TAG_PROTO_BRCM_PREPEND,
.xmit = brcm_tag_xmit_prepend,
.rcv = brcm_tag_rcv_prepend,
- .overhead = BRCM_TAG_LEN,
+ .needed_headroom = BRCM_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_prepend_netdev_ops);
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 7e7b7decdf39..a822355afc90 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -303,7 +303,7 @@ static const struct dsa_device_ops dsa_netdev_ops = {
.proto = DSA_TAG_PROTO_DSA,
.xmit = dsa_xmit,
.rcv = dsa_rcv,
- .overhead = DSA_HLEN,
+ .needed_headroom = DSA_HLEN,
};
DSA_TAG_DRIVER(dsa_netdev_ops);
@@ -346,7 +346,7 @@ static const struct dsa_device_ops edsa_netdev_ops = {
.proto = DSA_TAG_PROTO_EDSA,
.xmit = edsa_xmit,
.rcv = edsa_rcv,
- .overhead = EDSA_HLEN,
+ .needed_headroom = EDSA_HLEN,
};
DSA_TAG_DRIVER(edsa_netdev_ops);
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
index 2f5bd5e338ab..5985dab06ab8 100644
--- a/net/dsa/tag_gswip.c
+++ b/net/dsa/tag_gswip.c
@@ -103,7 +103,7 @@ static const struct dsa_device_ops gswip_netdev_ops = {
.proto = DSA_TAG_PROTO_GSWIP,
.xmit = gswip_tag_xmit,
.rcv = gswip_tag_rcv,
- .overhead = GSWIP_RX_HEADER_LEN,
+ .needed_headroom = GSWIP_RX_HEADER_LEN,
};
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
index a09805c8e1ab..424130f85f59 100644
--- a/net/dsa/tag_hellcreek.c
+++ b/net/dsa/tag_hellcreek.c
@@ -54,8 +54,7 @@ static const struct dsa_device_ops hellcreek_netdev_ops = {
.proto = DSA_TAG_PROTO_HELLCREEK,
.xmit = hellcreek_xmit,
.rcv = hellcreek_rcv,
- .overhead = HELLCREEK_TAG_LEN,
- .tail_tag = true,
+ .needed_tailroom = HELLCREEK_TAG_LEN,
};
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 4820dbcedfa2..53565f48934c 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -77,8 +77,7 @@ static const struct dsa_device_ops ksz8795_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ8795,
.xmit = ksz8795_xmit,
.rcv = ksz8795_rcv,
- .overhead = KSZ_INGRESS_TAG_LEN,
- .tail_tag = true,
+ .needed_tailroom = KSZ_INGRESS_TAG_LEN,
};
DSA_TAG_DRIVER(ksz8795_netdev_ops);
@@ -149,8 +148,7 @@ static const struct dsa_device_ops ksz9477_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ9477,
.xmit = ksz9477_xmit,
.rcv = ksz9477_rcv,
- .overhead = KSZ9477_INGRESS_TAG_LEN,
- .tail_tag = true,
+ .needed_tailroom = KSZ9477_INGRESS_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9477_netdev_ops);
@@ -183,8 +181,7 @@ static const struct dsa_device_ops ksz9893_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ9893,
.xmit = ksz9893_xmit,
.rcv = ksz9477_rcv,
- .overhead = KSZ_INGRESS_TAG_LEN,
- .tail_tag = true,
+ .needed_tailroom = KSZ_INGRESS_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9893_netdev_ops);
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index aa1318dccaf0..26207ef39ebc 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -125,7 +125,7 @@ static const struct dsa_device_ops lan9303_netdev_ops = {
.proto = DSA_TAG_PROTO_LAN9303,
.xmit = lan9303_xmit,
.rcv = lan9303_rcv,
- .overhead = LAN9303_TAG_LEN,
+ .needed_headroom = LAN9303_TAG_LEN,
};
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index f9b2966d1936..cc3ba864ad5b 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -102,7 +102,7 @@ static const struct dsa_device_ops mtk_netdev_ops = {
.proto = DSA_TAG_PROTO_MTK,
.xmit = mtk_tag_xmit,
.rcv = mtk_tag_rcv,
- .overhead = MTK_HDR_LEN,
+ .needed_headroom = MTK_HDR_LEN,
};
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 91f0fd1242cd..190f4bfd3bef 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -143,7 +143,7 @@ static const struct dsa_device_ops ocelot_netdev_ops = {
.proto = DSA_TAG_PROTO_OCELOT,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
- .overhead = OCELOT_TOTAL_TAG_LEN,
+ .needed_headroom = OCELOT_TOTAL_TAG_LEN,
.promisc_on_master = true,
};
@@ -155,7 +155,7 @@ static const struct dsa_device_ops seville_netdev_ops = {
.proto = DSA_TAG_PROTO_SEVILLE,
.xmit = seville_xmit,
.rcv = ocelot_rcv,
- .overhead = OCELOT_TOTAL_TAG_LEN,
+ .needed_headroom = OCELOT_TOTAL_TAG_LEN,
.promisc_on_master = true,
};
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 62a93303bd63..85ac85c3af8c 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -41,29 +41,15 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
- int src_port, switch_id, qos_class;
- u16 vid, tci;
+ int src_port, switch_id, subvlan;
- skb_push_rcsum(skb, ETH_HLEN);
- if (skb_vlan_tag_present(skb)) {
- tci = skb_vlan_tag_get(skb);
- __vlan_hwaccel_clear_tag(skb);
- } else {
- __skb_vlan_pop(skb, &tci);
- }
- skb_pull_rcsum(skb, ETH_HLEN);
-
- vid = tci & VLAN_VID_MASK;
- src_port = dsa_8021q_rx_source_port(vid);
- switch_id = dsa_8021q_rx_switch_id(vid);
- qos_class = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ dsa_8021q_rcv(skb, &src_port, &switch_id, &subvlan);
skb->dev = dsa_master_find_slave(netdev, switch_id, src_port);
if (!skb->dev)
return NULL;
skb->offload_fwd_mark = 1;
- skb->priority = qos_class;
return skb;
}
@@ -73,7 +59,7 @@ static const struct dsa_device_ops ocelot_8021q_netdev_ops = {
.proto = DSA_TAG_PROTO_OCELOT_8021Q,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
- .overhead = VLAN_HLEN,
+ .needed_headroom = VLAN_HLEN,
.promisc_on_master = true,
};
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 88181b52f480..693bda013065 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -91,7 +91,7 @@ static const struct dsa_device_ops qca_netdev_ops = {
.proto = DSA_TAG_PROTO_QCA,
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
- .overhead = QCA_HDR_LEN,
+ .needed_headroom = QCA_HDR_LEN,
};
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
index cf8ac316f4c7..57c46b4ab2b3 100644
--- a/net/dsa/tag_rtl4_a.c
+++ b/net/dsa/tag_rtl4_a.c
@@ -124,7 +124,7 @@ static const struct dsa_device_ops rtl4a_netdev_ops = {
.proto = DSA_TAG_PROTO_RTL4_A,
.xmit = rtl4a_tag_xmit,
.rcv = rtl4a_tag_rcv,
- .overhead = RTL4_A_HDR_LEN,
+ .needed_headroom = RTL4_A_HDR_LEN,
};
module_dsa_tag_driver(rtl4a_netdev_ops);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 50496013cdb7..9c2df9ece01b 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -7,6 +7,52 @@
#include <linux/packing.h>
#include "dsa_priv.h"
+/* Is this a TX or an RX header? */
+#define SJA1110_HEADER_HOST_TO_SWITCH BIT(15)
+
+/* RX header */
+#define SJA1110_RX_HEADER_IS_METADATA BIT(14)
+#define SJA1110_RX_HEADER_HOST_ONLY BIT(13)
+#define SJA1110_RX_HEADER_HAS_TRAILER BIT(12)
+
+/* Trap-to-host format (no trailer present) */
+#define SJA1110_RX_HEADER_SRC_PORT(x) (((x) & GENMASK(7, 4)) >> 4)
+#define SJA1110_RX_HEADER_SWITCH_ID(x) ((x) & GENMASK(3, 0))
+
+/* Timestamp format (trailer present) */
+#define SJA1110_RX_HEADER_TRAILER_POS(x) ((x) & GENMASK(11, 0))
+
+#define SJA1110_RX_TRAILER_SWITCH_ID(x) (((x) & GENMASK(7, 4)) >> 4)
+#define SJA1110_RX_TRAILER_SRC_PORT(x) ((x) & GENMASK(3, 0))
+
+/* Meta frame format (for 2-step TX timestamps) */
+#define SJA1110_RX_HEADER_N_TS(x) (((x) & GENMASK(8, 4)) >> 4)
+
+/* TX header */
+#define SJA1110_TX_HEADER_UPDATE_TC BIT(14)
+#define SJA1110_TX_HEADER_TAKE_TS BIT(13)
+#define SJA1110_TX_HEADER_TAKE_TS_CASC BIT(12)
+#define SJA1110_TX_HEADER_HAS_TRAILER BIT(11)
+
+/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */
+#define SJA1110_TX_HEADER_PRIO(x) (((x) << 7) & GENMASK(10, 7))
+#define SJA1110_TX_HEADER_TSTAMP_ID(x) ((x) & GENMASK(7, 0))
+
+/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */
+#define SJA1110_TX_HEADER_TRAILER_POS(x) ((x) & GENMASK(10, 0))
+
+#define SJA1110_TX_TRAILER_TSTAMP_ID(x) (((x) << 24) & GENMASK(31, 24))
+#define SJA1110_TX_TRAILER_PRIO(x) (((x) << 21) & GENMASK(23, 21))
+#define SJA1110_TX_TRAILER_SWITCHID(x) (((x) << 12) & GENMASK(15, 12))
+#define SJA1110_TX_TRAILER_DESTPORTS(x) (((x) << 1) & GENMASK(11, 1))
+
+#define SJA1110_META_TSTAMP_SIZE 10
+
+#define SJA1110_HEADER_LEN 4
+#define SJA1110_RX_TRAILER_LEN 13
+#define SJA1110_TX_TRAILER_LEN 4
+#define SJA1110_MAX_PADDING_LEN 15
+
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
static inline bool sja1105_is_link_local(const struct sk_buff *skb)
{
@@ -140,6 +186,57 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
}
+static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
+ struct dsa_port *dp = dsa_slave_to_port(netdev);
+ u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
+ struct ethhdr *eth_hdr;
+ __be32 *tx_trailer;
+ __be16 *tx_header;
+ int trailer_pos;
+
+ /* Transmitting control packets is done using in-band control
+ * extensions, while data packets are transmitted using
+ * tag_8021q TX VLANs.
+ */
+ if (likely(!sja1105_is_link_local(skb)))
+ return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp->priv),
+ ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
+
+ skb_push(skb, SJA1110_HEADER_LEN);
+
+ /* Move Ethernet header to the left, making space for DSA tag */
+ memmove(skb->data, skb->data + SJA1110_HEADER_LEN, 2 * ETH_ALEN);
+
+ trailer_pos = skb->len;
+
+ /* On TX, skb->data points to skb_mac_header(skb) */
+ eth_hdr = (struct ethhdr *)skb->data;
+ tx_header = (__be16 *)(eth_hdr + 1);
+ tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
+
+ eth_hdr->h_proto = htons(ETH_P_SJA1110);
+
+ *tx_header = htons(SJA1110_HEADER_HOST_TO_SWITCH |
+ SJA1110_TX_HEADER_HAS_TRAILER |
+ SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
+ *tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
+ SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
+ SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
+ if (clone) {
+ u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
+
+ *tx_header |= htons(SJA1110_TX_HEADER_TAKE_TS);
+ *tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
+ }
+
+ return skb;
+}
+
static void sja1105_transfer_meta(struct sk_buff *skb,
const struct sja1105_meta *meta)
{
@@ -147,7 +244,7 @@ static void sja1105_transfer_meta(struct sk_buff *skb,
hdr->h_dest[3] = meta->dmac_byte_3;
hdr->h_dest[4] = meta->dmac_byte_4;
- SJA1105_SKB_CB(skb)->meta_tstamp = meta->tstamp;
+ SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
}
/* This is a simple state machine which follows the hardware mechanism of
@@ -275,46 +372,38 @@ static void sja1105_decode_subvlan(struct sk_buff *skb, u16 subvlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
}
+static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
+{
+ u16 tpid = ntohs(eth_hdr(skb)->h_proto);
+
+ return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
+ skb_vlan_tag_present(skb);
+}
+
+static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
+{
+ return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
+}
+
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
+ int source_port, switch_id, subvlan = 0;
struct sja1105_meta meta = {0};
- int source_port, switch_id;
struct ethhdr *hdr;
- u16 tpid, vid, tci;
bool is_link_local;
- u16 subvlan = 0;
- bool is_tagged;
bool is_meta;
hdr = eth_hdr(skb);
- tpid = ntohs(hdr->h_proto);
- is_tagged = (tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
- skb_vlan_tag_present(skb));
is_link_local = sja1105_is_link_local(skb);
is_meta = sja1105_is_meta_frame(skb);
skb->offload_fwd_mark = 1;
- if (is_tagged) {
+ if (sja1105_skb_has_tag_8021q(skb)) {
/* Normal traffic path. */
- skb_push_rcsum(skb, ETH_HLEN);
- if (skb_vlan_tag_present(skb)) {
- tci = skb_vlan_tag_get(skb);
- __vlan_hwaccel_clear_tag(skb);
- } else {
- __skb_vlan_pop(skb, &tci);
- }
- skb_pull_rcsum(skb, ETH_HLEN);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- vid = tci & VLAN_VID_MASK;
- source_port = dsa_8021q_rx_source_port(vid);
- switch_id = dsa_8021q_rx_switch_id(vid);
- skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- subvlan = dsa_8021q_rx_subvlan(vid);
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &subvlan);
} else if (is_link_local) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
@@ -346,6 +435,138 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_meta);
}
+static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
+{
+ int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
+ int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
+ struct net_device *master = skb->dev;
+ struct dsa_port *cpu_dp;
+ u8 *buf = skb->data + 2;
+ struct dsa_switch *ds;
+ int i;
+
+ cpu_dp = master->dsa_ptr;
+ ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
+ if (!ds) {
+ net_err_ratelimited("%s: cannot find switch id %d\n",
+ master->name, switch_id);
+ return NULL;
+ }
+
+ for (i = 0; i <= n_ts; i++) {
+ u8 ts_id, source_port, dir;
+ u64 tstamp;
+
+ ts_id = buf[0];
+ source_port = (buf[1] & GENMASK(7, 4)) >> 4;
+ dir = (buf[1] & BIT(3)) >> 3;
+ tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
+
+ sja1110_process_meta_tstamp(ds, source_port, ts_id, dir,
+ tstamp);
+
+ buf += SJA1110_META_TSTAMP_SIZE;
+ }
+
+ /* Discard the meta frame, we've consumed the timestamps it contained */
+ return NULL;
+}
+
+static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
+ int *source_port,
+ int *switch_id)
+{
+ u16 rx_header;
+
+ if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
+ return NULL;
+
+ /* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly
+ * what we need because the caller has checked the EtherType (which is
+ * located 2 bytes back) and we just need a pointer to the header that
+ * comes afterwards.
+ */
+ rx_header = ntohs(*(__be16 *)skb->data);
+
+ if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
+ return sja1110_rcv_meta(skb, rx_header);
+
+ /* Timestamp frame, we have a trailer */
+ if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
+ int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
+ u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
+ u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
+ u8 last_byte = rx_trailer[12];
+
+ /* The timestamp is unaligned, so we need to use packing()
+ * to get it
+ */
+ packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
+
+ *source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
+ *switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
+
+ /* skb->len counts from skb->data, while start_of_padding
+ * counts from the destination MAC address. Right now skb->data
+ * is still as set by the DSA master, so to trim away the
+ * padding and trailer we need to account for the fact that
+ * skb->data points to skb_mac_header(skb) + ETH_HLEN.
+ */
+ pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN);
+ /* Trap-to-host frame, no timestamp trailer */
+ } else {
+ *source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
+ *switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
+ }
+
+ /* Advance skb->data past the DSA header */
+ skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
+
+ /* Remove the DSA header */
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - SJA1110_HEADER_LEN,
+ 2 * ETH_ALEN);
+
+ /* With skb->data in its final place, update the MAC header
+ * so that eth_hdr() continues to works properly.
+ */
+ skb_set_mac_header(skb, -ETH_HLEN);
+
+ return skb;
+}
+
+static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct packet_type *pt)
+{
+ int source_port = -1, switch_id = -1, subvlan = 0;
+
+ skb->offload_fwd_mark = 1;
+
+ if (sja1110_skb_has_inband_control_extension(skb)) {
+ skb = sja1110_rcv_inband_control_extension(skb, &source_port,
+ &switch_id);
+ if (!skb)
+ return NULL;
+ }
+
+ /* Packets with in-band control extensions might still have RX VLANs */
+ if (likely(sja1105_skb_has_tag_8021q(skb)))
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &subvlan);
+
+ skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
+ if (!skb->dev) {
+ netdev_warn(netdev,
+ "Couldn't decode source port %d and switch id %d\n",
+ source_port, switch_id);
+ return NULL;
+ }
+
+ if (subvlan)
+ sja1105_decode_subvlan(skb, subvlan);
+
+ return skb;
+}
+
static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
int *offset)
{
@@ -356,18 +577,53 @@ static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
dsa_tag_generic_flow_dissect(skb, proto, offset);
}
+static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+ int *offset)
+{
+ /* Management frames have 2 DSA tags on RX, so the needed_headroom we
+ * declared is fine for the generic dissector adjustment procedure.
+ */
+ if (unlikely(sja1105_is_link_local(skb)))
+ return dsa_tag_generic_flow_dissect(skb, proto, offset);
+
+ /* For the rest, there is a single DSA tag, the tag_8021q one */
+ *offset = VLAN_HLEN;
+ *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
+}
+
static const struct dsa_device_ops sja1105_netdev_ops = {
.name = "sja1105",
.proto = DSA_TAG_PROTO_SJA1105,
.xmit = sja1105_xmit,
.rcv = sja1105_rcv,
.filter = sja1105_filter,
- .overhead = VLAN_HLEN,
+ .needed_headroom = VLAN_HLEN,
.flow_dissect = sja1105_flow_dissect,
.promisc_on_master = true,
};
-MODULE_LICENSE("GPL v2");
+DSA_TAG_DRIVER(sja1105_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105);
-module_dsa_tag_driver(sja1105_netdev_ops);
+static const struct dsa_device_ops sja1110_netdev_ops = {
+ .name = "sja1110",
+ .proto = DSA_TAG_PROTO_SJA1110,
+ .xmit = sja1110_xmit,
+ .rcv = sja1110_rcv,
+ .filter = sja1105_filter,
+ .flow_dissect = sja1110_flow_dissect,
+ .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
+ .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
+};
+
+DSA_TAG_DRIVER(sja1110_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110);
+
+static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
+ &DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
+ &DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
+};
+
+module_dsa_tag_drivers(sja1105_tag_driver_array);
+
+MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 5b97ede56a0f..ba73804340a5 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -55,8 +55,7 @@ static const struct dsa_device_ops trailer_netdev_ops = {
.proto = DSA_TAG_PROTO_TRAILER,
.xmit = trailer_xmit,
.rcv = trailer_rcv,
- .overhead = 4,
- .tail_tag = true,
+ .needed_tailroom = 4,
};
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_xrs700x.c b/net/dsa/tag_xrs700x.c
index 858cdf9d2913..a31ff7fcb45f 100644
--- a/net/dsa/tag_xrs700x.c
+++ b/net/dsa/tag_xrs700x.c
@@ -56,8 +56,7 @@ static const struct dsa_device_ops xrs700x_netdev_ops = {
.proto = DSA_TAG_PROTO_XRS700X,
.xmit = xrs700x_xmit,
.rcv = xrs700x_rcv,
- .overhead = 1,
- .tail_tag = true,
+ .needed_tailroom = 1,
};
MODULE_LICENSE("GPL");
diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
index 2a6733a6449a..7e6b37a54add 100644
--- a/net/ethtool/eeprom.c
+++ b/net/ethtool/eeprom.c
@@ -95,7 +95,7 @@ static int get_module_eeprom_by_page(struct net_device *dev,
if (dev->sfp_bus)
return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
- if (ops->get_module_info)
+ if (ops->get_module_eeprom_by_page)
return ops->get_module_eeprom_by_page(dev, page_data, extack);
return -EOPNOTSUPP;
@@ -159,9 +159,6 @@ static int eeprom_parse_request(struct ethnl_req_info *req_info, struct nlattr *
request->offset = nla_get_u32(tb[ETHTOOL_A_MODULE_EEPROM_OFFSET]);
request->length = nla_get_u32(tb[ETHTOOL_A_MODULE_EEPROM_LENGTH]);
- if (!request->length)
- return -EINVAL;
-
/* The following set of conditions limit the API to only dump 1/2
* EEPROM page without crossing low page boundary located at offset 128.
* This means user may only request dumps of length limited to 128 from
@@ -180,10 +177,6 @@ static int eeprom_parse_request(struct ethnl_req_info *req_info, struct nlattr *
NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_LENGTH],
"reading cross half page boundary is illegal");
return -EINVAL;
- } else if (request->offset >= ETH_MODULE_EEPROM_PAGE_LEN * 2) {
- NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_OFFSET],
- "offset is out of bounds");
- return -EINVAL;
} else if (request->offset + request->length > ETH_MODULE_EEPROM_PAGE_LEN * 2) {
NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_LENGTH],
"reading cross page boundary is illegal");
@@ -236,8 +229,10 @@ const struct ethnl_request_ops ethnl_module_eeprom_request_ops = {
const struct nla_policy ethnl_module_eeprom_get_policy[] = {
[ETHTOOL_A_MODULE_EEPROM_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
- [ETHTOOL_A_MODULE_EEPROM_OFFSET] = { .type = NLA_U32 },
- [ETHTOOL_A_MODULE_EEPROM_LENGTH] = { .type = NLA_U32 },
+ [ETHTOOL_A_MODULE_EEPROM_OFFSET] =
+ NLA_POLICY_MAX(NLA_U32, ETH_MODULE_EEPROM_PAGE_LEN * 2 - 1),
+ [ETHTOOL_A_MODULE_EEPROM_LENGTH] =
+ NLA_POLICY_RANGE(NLA_U32, 1, ETH_MODULE_EEPROM_PAGE_LEN),
[ETHTOOL_A_MODULE_EEPROM_PAGE] = { .type = NLA_U8 },
[ETHTOOL_A_MODULE_EEPROM_BANK] = { .type = NLA_U8 },
[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS] =
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 3fa7a394eabf..baa5d10043cb 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1421,7 +1421,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
if (eeprom.offset + eeprom.len > total_len)
return -EINVAL;
- data = kmalloc(PAGE_SIZE, GFP_USER);
+ data = kzalloc(PAGE_SIZE, GFP_USER);
if (!data)
return -ENOMEM;
@@ -1486,7 +1486,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
return -EINVAL;
- data = kmalloc(PAGE_SIZE, GFP_USER);
+ data = kzalloc(PAGE_SIZE, GFP_USER);
if (!data)
return -ENOMEM;
@@ -1765,7 +1765,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
return -EFAULT;
test.len = test_len;
- data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
+ data = kcalloc(test_len, sizeof(u64), GFP_USER);
if (!data)
return -ENOMEM;
@@ -2293,7 +2293,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
ret = ethtool_tunable_valid(&tuna);
if (ret)
return ret;
- data = kmalloc(tuna.len, GFP_USER);
+ data = kzalloc(tuna.len, GFP_USER);
if (!data)
return -ENOMEM;
ret = ops->get_tunable(dev, &tuna, data);
@@ -2485,7 +2485,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
ret = ethtool_phy_tunable_valid(&tuna);
if (ret)
return ret;
- data = kmalloc(tuna.len, GFP_USER);
+ data = kzalloc(tuna.len, GFP_USER);
if (!data)
return -ENOMEM;
if (phy_drv_tunable) {
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 88d8a0243f35..a7346346114f 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -315,9 +315,9 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
struct ethnl_req_info *req_info = NULL;
const u8 cmd = info->genlhdr->cmd;
const struct ethnl_request_ops *ops;
+ int hdr_len, reply_len;
struct sk_buff *rskb;
void *reply_payload;
- int reply_len;
int ret;
ops = ethnl_default_requests[cmd];
@@ -346,15 +346,20 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
ret = ops->reply_size(req_info, reply_data);
if (ret < 0)
goto err_cleanup;
- reply_len = ret + ethnl_reply_header_size();
+ reply_len = ret;
ret = -ENOMEM;
- rskb = ethnl_reply_init(reply_len, req_info->dev, ops->reply_cmd,
+ rskb = ethnl_reply_init(reply_len + ethnl_reply_header_size(),
+ req_info->dev, ops->reply_cmd,
ops->hdr_attr, info, &reply_payload);
if (!rskb)
goto err_cleanup;
+ hdr_len = rskb->len;
ret = ops->fill_reply(rskb, req_info, reply_data);
if (ret < 0)
goto err_msg;
+ WARN_ONCE(rskb->len - hdr_len > reply_len,
+ "ethnl cmd %d: calculated reply length %d, but consumed %d\n",
+ cmd, reply_len, rskb->len - hdr_len);
if (ops->cleanup_data)
ops->cleanup_data(reply_data);
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 8abcbc10796c..3e25a47fd482 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -138,7 +138,7 @@ static inline void ethnl_update_bool32(u32 *dst, const struct nlattr *attr,
}
/**
- * ethnl_update_binary() - update binary data from NLA_BINARY atribute
+ * ethnl_update_binary() - update binary data from NLA_BINARY attribute
* @dst: value to update
* @len: destination buffer length
* @attr: netlink attribute with new value or null
@@ -380,7 +380,7 @@ extern const struct nla_policy ethnl_cable_test_tdr_act_policy[ETHTOOL_A_CABLE_T
extern const struct nla_policy ethnl_tunnel_info_get_policy[ETHTOOL_A_TUNNEL_INFO_HEADER + 1];
extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1];
extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1];
-extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_DATA + 1];
+extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1];
extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1];
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
index b7642dc96d50..ec07f5765e03 100644
--- a/net/ethtool/stats.c
+++ b/net/ethtool/stats.c
@@ -119,7 +119,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
*/
memset(&data->phy_stats, 0xff, sizeof(data->phy_stats));
memset(&data->mac_stats, 0xff, sizeof(data->mac_stats));
- memset(&data->ctrl_stats, 0xff, sizeof(data->mac_stats));
+ memset(&data->ctrl_stats, 0xff, sizeof(data->ctrl_stats));
memset(&data->rmon_stats, 0xff, sizeof(data->rmon_stats));
if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index b3029fff715d..2d51b7ab4dc5 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -353,6 +353,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
int len = 0;
int ret;
+ len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
+
for (i = 0; i < ETH_SS_COUNT; i++) {
const struct strset_info *set_info = &data->sets[i];
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index bfcdc75fc01e..26c32407f029 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -218,6 +218,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (master) {
skb->dev = master->dev;
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
hsr_forward_skb(skb, master);
} else {
atomic_long_inc(&dev->tx_dropped);
@@ -259,6 +260,7 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master)
goto out;
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 6852e9bccf5b..ceb8afb2a62f 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -474,8 +474,8 @@ static void handle_std_frame(struct sk_buff *skb,
}
}
-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame)
+int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
{
struct hsr_port *port = frame->port_rcv;
struct hsr_priv *hsr = port->hsr;
@@ -483,20 +483,26 @@ void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
proto == htons(ETH_P_HSR)) {
+ /* Check if skb contains hsr_ethhdr */
+ if (skb->mac_len < sizeof(struct hsr_ethhdr))
+ return -EINVAL;
+
/* HSR tagged frame :- Data or Supervision */
frame->skb_std = NULL;
frame->skb_prp = NULL;
frame->skb_hsr = skb;
frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
- return;
+ return 0;
}
/* Standard frame or PRP from master port */
handle_std_frame(skb, frame);
+
+ return 0;
}
-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame)
+int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
{
/* Supervision frame */
struct prp_rct *rct = skb_get_PRP_rct(skb);
@@ -507,9 +513,11 @@ void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
frame->skb_std = NULL;
frame->skb_prp = skb;
frame->sequence_nr = prp_get_skb_sequence_nr(rct);
- return;
+ return 0;
}
handle_std_frame(skb, frame);
+
+ return 0;
}
static int fill_frame_info(struct hsr_frame_info *frame,
@@ -519,9 +527,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
struct hsr_vlan_ethhdr *vlan_hdr;
struct ethhdr *ethhdr;
__be16 proto;
+ int ret;
- /* Check if skb contains hsr_ethhdr */
- if (skb->mac_len < sizeof(struct hsr_ethhdr))
+ /* Check if skb contains ethhdr */
+ if (skb->mac_len < sizeof(struct ethhdr))
return -EINVAL;
memset(frame, 0, sizeof(*frame));
@@ -548,7 +557,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
frame->is_from_san = false;
frame->port_rcv = port;
- hsr->proto_ops->fill_frame_info(proto, skb, frame);
+ ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
+ if (ret)
+ return ret;
+
check_local_dest(port->hsr, skb, frame);
return 0;
diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h
index b6acaafa83fc..206636750b30 100644
--- a/net/hsr/hsr_forward.h
+++ b/net/hsr/hsr_forward.h
@@ -24,8 +24,8 @@ struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
struct hsr_port *port);
bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame);
-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame);
+int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
+int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
#endif /* __HSR_FORWARD_H */
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index bb1351c38397..e31949479305 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -397,7 +397,8 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
* ensures entries of restarted nodes gets pruned so that they can
* re-register and resume communications.
*/
- if (seq_nr_before(sequence_nr, node->seq_out[port->type]))
+ if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
+ seq_nr_before(sequence_nr, node->seq_out[port->type]))
return;
node->time_in[port->type] = jiffies;
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 8f264672b70b..53d1f7a82463 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -186,8 +186,8 @@ struct hsr_proto_ops {
struct hsr_port *port);
struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
struct hsr_port *port);
- void (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame);
+ int (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
bool (*invalid_dan_ingress_frame)(__be16 protocol);
void (*update_san_info)(struct hsr_node *node, bool is_sup);
};
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index c5227d42faf5..b70e6bbf6021 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -60,12 +60,11 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
goto finish_pass;
skb_push(skb, ETH_HLEN);
-
- if (skb_mac_header(skb) != skb->data) {
- WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n",
- __func__, __LINE__, port->dev->name);
- goto finish_consume;
- }
+ skb_reset_mac_header(skb);
+ if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
+ protocol == htons(ETH_P_HSR))
+ skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
+ skb_reset_mac_len(skb);
hsr_forward_skb(skb, port);
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 0c1b0770c59e..29bf97640166 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
be32_to_cpu(params.frame_counter)) ||
- ieee802154_llsec_fill_key_id(msg, &params.out_key))
+ ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
+ rc = -ENOBUFS;
goto out_free;
+ }
dev_put(dev);
@@ -1184,7 +1186,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
{
struct ieee802154_llsec_device *dpos;
struct ieee802154_llsec_device_key *kpos;
- int rc = 0, idx = 0, idx2;
+ int idx = 0, idx2;
list_for_each_entry(dpos, &data->table->devices, list) {
if (idx++ < data->s_idx)
@@ -1200,7 +1202,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
data->nlmsg_seq,
dpos->hwaddr, kpos,
data->dev)) {
- return rc = -EMSGSIZE;
+ return -EMSGSIZE;
}
data->s_idx2++;
@@ -1209,7 +1211,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
data->s_idx++;
}
- return rc;
+ return 0;
}
int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 2cdc7e63fe17..88215b5c93aa 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
}
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
- nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
+ rc = -EMSGSIZE;
goto nla_put_failure;
+ }
dev_put(dev);
wpan_phy_put(phy);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 05f6bd89a7dd..0cf2374c143b 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -1298,19 +1298,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
if (!nla || nla_parse_nested_deprecated(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, nl802154_dev_addr_policy, NULL))
return -EINVAL;
- if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
- !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
- !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
- attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
+ if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
return -EINVAL;
addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
switch (addr->mode) {
case NL802154_DEV_ADDR_SHORT:
+ if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
+ return -EINVAL;
addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
break;
case NL802154_DEV_ADDR_EXTENDED:
+ if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
+ return -EINVAL;
addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
break;
default:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f17870ee558b..54648181dd56 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -318,7 +318,7 @@ lookup_protocol:
WARN_ON(!answer_prot->slab);
- err = -ENOBUFS;
+ err = -ENOMEM;
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
if (!sk)
goto out;
@@ -575,7 +575,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
return err;
}
- if (!inet_sk(sk)->inet_num && inet_autobind(sk))
+ if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
return -EAGAIN;
return sk->sk_prot->connect(sk, uaddr, addr_len);
}
@@ -803,7 +803,7 @@ int inet_send_prepare(struct sock *sk)
sock_rps_record_flow(sk);
/* We may need to bind the socket. */
- if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
+ if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
inet_autobind(sk))
return -EAGAIN;
@@ -1720,7 +1720,6 @@ EXPORT_SYMBOL_GPL(snmp_fold_field64);
#ifdef CONFIG_IP_MULTICAST
static const struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
- .netns_ok = 1,
};
#endif
@@ -1733,7 +1732,6 @@ static struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.no_policy = 1,
- .netns_ok = 1,
.icmp_strict_tag_validation = 1,
};
@@ -1746,14 +1744,12 @@ static struct net_protocol udp_protocol = {
.handler = udp_rcv,
.err_handler = udp_err,
.no_policy = 1,
- .netns_ok = 1,
};
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.err_handler = icmp_err,
.no_policy = 1,
- .netns_ok = 1,
};
static __net_init int ipv4_mib_init_net(struct net *net)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 36ed85bf2ad5..6eea1e9e998d 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -450,6 +450,7 @@ static int ah4_err(struct sk_buff *skb, u32 info)
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return 0;
+ break;
case ICMP_REDIRECT:
break;
default:
@@ -554,7 +555,6 @@ static int ah4_rcv_cb(struct sk_buff *skb, int err)
static const struct xfrm_type ah_type =
{
- .description = "AH4",
.owner = THIS_MODULE,
.proto = IPPROTO_AH,
.flags = XFRM_TYPE_REPLAY_PROT,
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index dff4f0eb96b0..9e41eff4a685 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -185,6 +185,7 @@ BTF_ID(func, tcp_reno_cong_avoid)
BTF_ID(func, tcp_reno_undo_cwnd)
BTF_ID(func, tcp_slow_start)
BTF_ID(func, tcp_cong_avoid_ai)
+#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
#if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
BTF_ID(func, cubictcp_init)
@@ -213,6 +214,7 @@ BTF_ID(func, bbr_min_tso_segs)
BTF_ID(func, bbr_set_state)
#endif
#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_X86 */
BTF_SET_END(bpf_tcp_ca_kfunc_ids)
static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index bfaf327e9d12..099259fc826a 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -187,8 +187,7 @@ static int __init cipso_v4_cache_init(void)
* cipso_v4_cache_invalidate - Invalidates the current CIPSO cache
*
* Description:
- * Invalidates and frees any entries in the CIPSO cache. Returns zero on
- * success and negative values on failure.
+ * Invalidates and frees any entries in the CIPSO cache.
*
*/
void cipso_v4_cache_invalidate(void)
@@ -472,6 +471,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
kfree(doi_def->map.std->lvl.local);
kfree(doi_def->map.std->cat.cipso);
kfree(doi_def->map.std->cat.local);
+ kfree(doi_def->map.std);
break;
}
kfree(doi_def);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2e35f68da40a..73721a4448bd 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1955,7 +1955,7 @@ static int inet_validate_link_af(const struct net_device *dev,
struct nlattr *a, *tb[IFLA_INET_MAX+1];
int err, rem;
- if (dev && !__in_dev_get_rcu(dev))
+ if (dev && !__in_dev_get_rtnl(dev))
return -EAFNOSUPPORT;
err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
@@ -1981,7 +1981,7 @@ static int inet_validate_link_af(const struct net_device *dev,
static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
- struct in_device *in_dev = __in_dev_get_rcu(dev);
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
struct nlattr *a, *tb[IFLA_INET_MAX+1];
int rem;
@@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
- BUG();
+ return -EINVAL;
if (tb[IFLA_INET_CONF]) {
nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 35803ab7ac80..a09e36c4a413 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -673,7 +673,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
+ padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
@@ -982,6 +982,7 @@ static int esp4_err(struct sk_buff *skb, u32 info)
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return 0;
+ break;
case ICMP_REDIRECT:
break;
default:
@@ -1198,7 +1199,6 @@ static int esp4_rcv_cb(struct sk_buff *skb, int err)
static const struct xfrm_type esp_type =
{
- .description = "ESP4",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.flags = XFRM_TYPE_REPLAY_PROT,
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 33687cf58286..8e4e9aa12130 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -33,12 +33,11 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
struct xfrm_state *x;
__be32 seq;
__be32 spi;
- int err;
if (!pskb_pull(skb, offset))
return NULL;
- if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
+ if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
goto out;
xo = xfrm_offload(skb);
@@ -343,7 +342,6 @@ static const struct net_offload esp4_offload = {
};
static const struct xfrm_type_offload esp_type_offload = {
- .description = "ESP4 OFFLOAD",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.input_tail = esp_input_tail,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 84bb707bd88d..a933bd6345b1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -371,6 +371,8 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fl4.flowi4_proto = 0;
fl4.fl4_sport = 0;
fl4.fl4_dport = 0;
+ } else {
+ swap(fl4.fl4_sport, fl4.fl4_dport);
}
if (fib_lookup(net, &fl4, &res, 0))
@@ -1122,10 +1124,8 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
prefix, ifa->ifa_prefixlen, prim,
ifa->ifa_rt_priority);
- /* Add network specific broadcasts, when it takes a sense */
+ /* Add the network broadcast address, when it makes sense */
if (ifa->ifa_prefixlen < 31) {
- fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32,
- prim, 0);
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
32, prim, 0);
}
@@ -1516,6 +1516,12 @@ static int __net_init ip_fib_net_init(struct net *net)
if (err)
return err;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ /* Default to 3-tuple */
+ net->ipv4.sysctl_fib_multipath_hash_fields =
+ FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK;
+#endif
+
/* Avoid false sharing : Use at least a full cache line */
size = max_t(size_t, size, L1_CACHE_BYTES);
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index b58db1ca4bfb..e184bcb19943 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -25,7 +25,7 @@ struct fib_alias {
#define FA_S_ACCESSED 0x01
-/* Dont write on fa_state unless needed, to keep it shared on all cpus */
+/* Don't write on fa_state unless needed, to keep it shared on all cpus */
static inline void fib_alias_accessed(struct fib_alias *fa)
{
if (!(fa->fa_state & FA_S_ACCESSED))
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a632b66bc13a..4c0c33e4710d 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1874,6 +1874,7 @@ static int call_fib_nh_notifiers(struct fib_nh *nh,
(nh->fib_nh_flags & RTNH_F_DEAD))
return call_fib4_notifiers(dev_net(nh->fib_nh_dev),
event_type, &info.info);
+ break;
default:
break;
}
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 5d1e6fe9d838..cbb2b4bb0dfa 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -195,7 +195,6 @@ static int gre_err(struct sk_buff *skb, u32 info)
static const struct net_protocol net_gre_protocol = {
.handler = gre_rcv,
.err_handler = gre_err,
- .netns_ok = 1,
};
static int __init gre_init(void)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 7b6931a4d775..c695d294a5df 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr);
+ /* if we don't have a source address at this point, fall back to the
+ * dummy address instead of sending out a packet with a source address
+ * of 0.0.0.0
+ */
+ if (!fl4.saddr)
+ fl4.saddr = htonl(INADDR_DUMMY);
+
icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
ende:
ip_rt_put(rt);
@@ -986,14 +993,8 @@ static bool icmp_redirect(struct sk_buff *skb)
static bool icmp_echo(struct sk_buff *skb)
{
- struct icmp_ext_hdr *ext_hdr, _ext_hdr;
- struct icmp_ext_echo_iio *iio, _iio;
struct icmp_bxm icmp_param;
- struct net_device *dev;
- char buff[IFNAMSIZ];
struct net *net;
- u16 ident_len;
- u8 status;
net = dev_net(skb_dst(skb)->dev);
/* should there be an ICMP stat for ignored echos? */
@@ -1006,20 +1007,46 @@ static bool icmp_echo(struct sk_buff *skb)
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
- if (icmp_param.data.icmph.type == ICMP_ECHO) {
+ if (icmp_param.data.icmph.type == ICMP_ECHO)
icmp_param.data.icmph.type = ICMP_ECHOREPLY;
- goto send_reply;
- }
- if (!net->ipv4.sysctl_icmp_echo_enable_probe)
+ else if (!icmp_build_probe(skb, &icmp_param.data.icmph))
return true;
+
+ icmp_reply(&icmp_param, skb);
+ return true;
+}
+
+/* Helper for icmp_echo and icmpv6_echo_reply.
+ * Searches for net_device that matches PROBE interface identifier
+ * and builds PROBE reply message in icmphdr.
+ *
+ * Returns false if PROBE responses are disabled via sysctl
+ */
+
+bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
+{
+ struct icmp_ext_hdr *ext_hdr, _ext_hdr;
+ struct icmp_ext_echo_iio *iio, _iio;
+ struct net *net = dev_net(skb->dev);
+ struct net_device *dev;
+ char buff[IFNAMSIZ];
+ u16 ident_len;
+ u8 status;
+
+ if (!net->ipv4.sysctl_icmp_echo_enable_probe)
+ return false;
+
/* We currently only support probing interfaces on the proxy node
* Check to ensure L-bit is set
*/
- if (!(ntohs(icmp_param.data.icmph.un.echo.sequence) & 1))
- return true;
+ if (!(ntohs(icmphdr->un.echo.sequence) & 1))
+ return false;
/* Clear status bits in reply message */
- icmp_param.data.icmph.un.echo.sequence &= htons(0xFF00);
- icmp_param.data.icmph.type = ICMP_EXT_ECHOREPLY;
+ icmphdr->un.echo.sequence &= htons(0xFF00);
+ if (icmphdr->type == ICMP_EXT_ECHO)
+ icmphdr->type = ICMP_EXT_ECHOREPLY;
+ else
+ icmphdr->type = ICMPV6_EXT_ECHO_REPLY;
ext_hdr = skb_header_pointer(skb, 0, sizeof(_ext_hdr), &_ext_hdr);
/* Size of iio is class_type dependent.
* Only check header here and assign length based on ctype in the switch statement
@@ -1059,7 +1086,7 @@ static bool icmp_echo(struct sk_buff *skb)
if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
sizeof(struct in_addr))
goto send_mal_query;
- dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr.s_addr);
+ dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case ICMP_AFI_IP6:
@@ -1080,8 +1107,8 @@ static bool icmp_echo(struct sk_buff *skb)
goto send_mal_query;
}
if (!dev) {
- icmp_param.data.icmph.code = ICMP_EXT_CODE_NO_IF;
- goto send_reply;
+ icmphdr->code = ICMP_EXT_CODE_NO_IF;
+ return true;
}
/* Fill bits in reply message */
if (dev->flags & IFF_UP)
@@ -1091,14 +1118,13 @@ static bool icmp_echo(struct sk_buff *skb)
if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
status |= ICMP_EXT_ECHOREPLY_IPV6;
dev_put(dev);
- icmp_param.data.icmph.un.echo.sequence |= htons(status);
-send_reply:
- icmp_reply(&icmp_param, skb);
- return true;
+ icmphdr->un.echo.sequence |= htons(status);
+ return true;
send_mal_query:
- icmp_param.data.icmph.code = ICMP_EXT_CODE_MAL_QUERY;
- goto send_reply;
+ icmphdr->code = ICMP_EXT_CODE_MAL_QUERY;
+ return true;
}
+EXPORT_SYMBOL_GPL(icmp_build_probe);
/*
* Handle ICMP Timestamp requests.
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 7b272bbed2b4..6b3c558a4f23 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
+ ip_mc_clear_src(i);
ip_ma_put(i);
}
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index fd472eae4f5c..754013fa393b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -135,10 +135,18 @@ static int inet_csk_bind_conflict(const struct sock *sk,
bool relax, bool reuseport_ok)
{
struct sock *sk2;
+ bool reuseport_cb_ok;
bool reuse = sk->sk_reuse;
bool reuseport = !!sk->sk_reuseport;
+ struct sock_reuseport *reuseport_cb;
kuid_t uid = sock_i_uid((struct sock *)sk);
+ rcu_read_lock();
+ reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
+ /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
+ reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
+ rcu_read_unlock();
+
/*
* Unlike other sk lookup places we do not check
* for sk_net here, since _all_ the socks listed
@@ -156,14 +164,14 @@ static int inet_csk_bind_conflict(const struct sock *sk,
if ((!relax ||
(!reuseport_ok &&
reuseport && sk2->sk_reuseport &&
- !rcu_access_pointer(sk->sk_reuseport_cb) &&
+ reuseport_cb_ok &&
(sk2->sk_state == TCP_TIME_WAIT ||
uid_eq(uid, sock_i_uid(sk2))))) &&
inet_rcv_saddr_equal(sk, sk2, true))
break;
} else if (!reuseport_ok ||
!reuseport || !sk2->sk_reuseport ||
- rcu_access_pointer(sk->sk_reuseport_cb) ||
+ !reuseport_cb_ok ||
(sk2->sk_state != TCP_TIME_WAIT &&
!uid_eq(uid, sock_i_uid(sk2)))) {
if (inet_rcv_saddr_equal(sk, sk2, true))
@@ -687,6 +695,66 @@ int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
}
EXPORT_SYMBOL(inet_rtx_syn_ack);
+static struct request_sock *inet_reqsk_clone(struct request_sock *req,
+ struct sock *sk)
+{
+ struct sock *req_sk, *nreq_sk;
+ struct request_sock *nreq;
+
+ nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
+ if (!nreq) {
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+
+ /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
+ sock_put(sk);
+ return NULL;
+ }
+
+ req_sk = req_to_sk(req);
+ nreq_sk = req_to_sk(nreq);
+
+ memcpy(nreq_sk, req_sk,
+ offsetof(struct sock, sk_dontcopy_begin));
+ memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
+ req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
+
+ sk_node_init(&nreq_sk->sk_node);
+ nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
+#ifdef CONFIG_XPS
+ nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
+#endif
+ nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
+
+ nreq->rsk_listener = sk;
+
+ /* We need not acquire fastopenq->lock
+ * because the child socket is locked in inet_csk_listen_stop().
+ */
+ if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
+ rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
+
+ return nreq;
+}
+
+static void reqsk_queue_migrated(struct request_sock_queue *queue,
+ const struct request_sock *req)
+{
+ if (req->num_timeout == 0)
+ atomic_inc(&queue->young);
+ atomic_inc(&queue->qlen);
+}
+
+static void reqsk_migrate_reset(struct request_sock *req)
+{
+ req->saved_syn = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ inet_rsk(req)->ipv6_opt = NULL;
+ inet_rsk(req)->pktopts = NULL;
+#else
+ inet_rsk(req)->ireq_opt = NULL;
+#endif
+}
+
/* return true if req was found in the ehash table */
static bool reqsk_queue_unlink(struct request_sock *req)
{
@@ -727,15 +795,39 @@ EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
static void reqsk_timer_handler(struct timer_list *t)
{
struct request_sock *req = from_timer(req, t, rsk_timer);
+ struct request_sock *nreq = NULL, *oreq = req;
struct sock *sk_listener = req->rsk_listener;
- struct net *net = sock_net(sk_listener);
- struct inet_connection_sock *icsk = inet_csk(sk_listener);
- struct request_sock_queue *queue = &icsk->icsk_accept_queue;
+ struct inet_connection_sock *icsk;
+ struct request_sock_queue *queue;
+ struct net *net;
int max_syn_ack_retries, qlen, expire = 0, resend = 0;
- if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
- goto drop;
+ if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
+ struct sock *nsk;
+
+ nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
+ if (!nsk)
+ goto drop;
+ nreq = inet_reqsk_clone(req, nsk);
+ if (!nreq)
+ goto drop;
+
+ /* The new timer for the cloned req can decrease the 2
+ * by calling inet_csk_reqsk_queue_drop_and_put(), so
+ * hold another count to prevent use-after-free and
+ * call reqsk_put() just before return.
+ */
+ refcount_set(&nreq->rsk_refcnt, 2 + 1);
+ timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
+ reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
+
+ req = nreq;
+ sk_listener = nsk;
+ }
+
+ icsk = inet_csk(sk_listener);
+ net = sock_net(sk_listener);
max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
/* Normally all the openreqs are young and become mature
* (i.e. converted to established socket) for first timeout.
@@ -754,6 +846,7 @@ static void reqsk_timer_handler(struct timer_list *t)
* embrions; and abort old ones without pity, if old
* ones are about to clog our table.
*/
+ queue = &icsk->icsk_accept_queue;
qlen = reqsk_queue_len(queue);
if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
int young = reqsk_queue_len_young(queue) << 1;
@@ -778,10 +871,39 @@ static void reqsk_timer_handler(struct timer_list *t)
atomic_dec(&queue->young);
timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
mod_timer(&req->rsk_timer, jiffies + timeo);
+
+ if (!nreq)
+ return;
+
+ if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
+ /* delete timer */
+ inet_csk_reqsk_queue_drop(sk_listener, nreq);
+ goto no_ownership;
+ }
+
+ __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
+ reqsk_migrate_reset(oreq);
+ reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
+ reqsk_put(oreq);
+
+ reqsk_put(nreq);
return;
}
+
+ /* Even if we can clone the req, we may need not retransmit any more
+ * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
+ * CPU may win the "own_req" race so that inet_ehash_insert() fails.
+ */
+ if (nreq) {
+ __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
+no_ownership:
+ reqsk_migrate_reset(nreq);
+ reqsk_queue_removed(queue, nreq);
+ __reqsk_free(nreq);
+ }
+
drop:
- inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
+ inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
}
static void reqsk_queue_hash_req(struct request_sock *req,
@@ -997,12 +1119,42 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req, bool own_req)
{
if (own_req) {
- inet_csk_reqsk_queue_drop(sk, req);
- reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
- if (inet_csk_reqsk_queue_add(sk, req, child))
+ inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+ reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
+
+ if (sk != req->rsk_listener) {
+ /* another listening sk has been selected,
+ * migrate the req to it.
+ */
+ struct request_sock *nreq;
+
+ /* hold a refcnt for the nreq->rsk_listener
+ * which is assigned in inet_reqsk_clone()
+ */
+ sock_hold(sk);
+ nreq = inet_reqsk_clone(req, sk);
+ if (!nreq) {
+ inet_child_forget(sk, req, child);
+ goto child_put;
+ }
+
+ refcount_set(&nreq->rsk_refcnt, 1);
+ if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
+ reqsk_migrate_reset(req);
+ reqsk_put(req);
+ return child;
+ }
+
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+ reqsk_migrate_reset(nreq);
+ __reqsk_free(nreq);
+ } else if (inet_csk_reqsk_queue_add(sk, req, child)) {
return child;
+ }
}
/* Too bad, another child took ownership of the request, undo. */
+child_put:
bh_unlock_sock(child);
sock_put(child);
return NULL;
@@ -1028,14 +1180,40 @@ void inet_csk_listen_stop(struct sock *sk)
* of the variants now. --ANK
*/
while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
- struct sock *child = req->sk;
+ struct sock *child = req->sk, *nsk;
+ struct request_sock *nreq;
local_bh_disable();
bh_lock_sock(child);
WARN_ON(sock_owned_by_user(child));
sock_hold(child);
+ nsk = reuseport_migrate_sock(sk, child, NULL);
+ if (nsk) {
+ nreq = inet_reqsk_clone(req, nsk);
+ if (nreq) {
+ refcount_set(&nreq->rsk_refcnt, 1);
+
+ if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
+ __NET_INC_STATS(sock_net(nsk),
+ LINUX_MIB_TCPMIGRATEREQSUCCESS);
+ reqsk_migrate_reset(req);
+ } else {
+ __NET_INC_STATS(sock_net(nsk),
+ LINUX_MIB_TCPMIGRATEREQFAILURE);
+ reqsk_migrate_reset(nreq);
+ __reqsk_free(nreq);
+ }
+
+ /* inet_csk_reqsk_queue_add() has already
+ * called inet_child_forget() on failure case.
+ */
+ goto skip_child_forget;
+ }
+ }
+
inet_child_forget(sk, req, child);
+skip_child_forget:
reqsk_put(req);
bh_unlock_sock(child);
local_bh_enable();
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 93474b1bea4e..e65f4ef024a4 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -416,7 +416,7 @@ EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
static int inet_twsk_diag_fill(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb,
- u16 nlmsg_flags)
+ u16 nlmsg_flags, bool net_admin)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct inet_diag_msg *r;
@@ -444,6 +444,12 @@ static int inet_twsk_diag_fill(struct sock *sk,
r->idiag_uid = 0;
r->idiag_inode = 0;
+ if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
+ tw->tw_mark)) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
+
nlmsg_end(skb, nlh);
return 0;
}
@@ -494,7 +500,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
u16 nlmsg_flags, bool net_admin)
{
if (sk->sk_state == TCP_TIME_WAIT)
- return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags);
+ return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
if (sk->sk_state == TCP_NEW_SYN_RECV)
return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
@@ -801,6 +807,8 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
entry.mark = sk->sk_mark;
else if (sk->sk_state == TCP_NEW_SYN_RECV)
entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
+ else if (sk->sk_state == TCP_TIME_WAIT)
+ entry.mark = inet_twsk(sk)->tw_mark;
else
entry.mark = 0;
#ifdef CONFIG_SOCK_CGROUP_DATA
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c96866a53a66..80aeaf9e6e16 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -697,7 +697,7 @@ void inet_unhash(struct sock *sk)
goto unlock;
if (rcu_access_pointer(sk->sk_reuseport_cb))
- reuseport_detach_sock(sk);
+ reuseport_stop_listen_sock(sk);
if (ilb) {
inet_unhash2(hashinfo, sk);
ilb->count--;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a68bf4c6fe9b..12dca0c85f3c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -107,6 +107,8 @@ module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static struct rtnl_link_ops ipgre_link_ops __read_mostly;
+static const struct header_ops ipgre_header_ops;
+
static int ipgre_tunnel_init(struct net_device *dev);
static void erspan_build_header(struct sk_buff *skb,
u32 id, u32 index,
@@ -364,7 +366,10 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
raw_proto, false) < 0)
goto drop;
- if (tunnel->dev->type != ARPHRD_NONE)
+ /* Special case for ipgre_header_parse(), which expects the
+ * mac_header to point to the outer IP header.
+ */
+ if (tunnel->dev->header_ops == &ipgre_header_ops)
skb_pop_mac_header(skb);
else
skb_reset_mac_header(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c3efc7d658f6..8d8a8da3ae7e 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1054,7 +1054,7 @@ static int __ip_append_data(struct sock *sk,
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
- unsigned int alloclen;
+ unsigned int alloclen, alloc_extra;
unsigned int pagedlen;
struct sk_buff *skb_prev;
alloc_new_skb:
@@ -1074,35 +1074,39 @@ alloc_new_skb:
fraglen = datalen + fragheaderlen;
pagedlen = 0;
+ alloc_extra = hh_len + 15;
+ alloc_extra += exthdrlen;
+
+ /* The last fragment gets additional space at tail.
+ * Note, with MSG_MORE we overallocate on fragments,
+ * because we have no idea what fragment will be
+ * the last.
+ */
+ if (datalen == length + fraggap)
+ alloc_extra += rt->dst.trailer_len;
+
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
- else if (!paged)
+ else if (!paged &&
+ (fraglen + alloc_extra < SKB_MAX_ALLOC ||
+ !(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen;
else {
alloclen = min_t(int, fraglen, MAX_HEADER);
pagedlen = fraglen - alloclen;
}
- alloclen += exthdrlen;
-
- /* The last fragment gets additional space at tail.
- * Note, with MSG_MORE we overallocate on fragments,
- * because we have no idea what fragment will be
- * the last.
- */
- if (datalen == length + fraggap)
- alloclen += rt->dst.trailer_len;
+ alloclen += alloc_extra;
if (transhdrlen) {
- skb = sock_alloc_send_skb(sk,
- alloclen + hh_len + 15,
+ skb = sock_alloc_send_skb(sk, alloclen,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf)
- skb = alloc_skb(alloclen + hh_len + 15,
+ skb = alloc_skb(alloclen,
sk->sk_allocation);
if (unlikely(!skb))
err = -ENOBUFS;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 31c6c6d99d5e..eb560eecee08 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -351,6 +351,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return 0;
+ break;
case ICMP_REDIRECT:
break;
default:
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index b42683212c65..366094c1ce6c 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -31,6 +31,7 @@ static int ipcomp4_err(struct sk_buff *skb, u32 info)
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return 0;
+ break;
case ICMP_REDIRECT:
break;
default:
@@ -152,7 +153,6 @@ static int ipcomp4_rcv_cb(struct sk_buff *skb, int err)
}
static const struct xfrm_type ipcomp_type = {
- .description = "IPCOMP4",
.owner = THIS_MODULE,
.proto = IPPROTO_COMP,
.init_state = ipcomp4_init_state,
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index bc2f6ca97152..816d8aad5a68 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -886,7 +886,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
/*
- * Copy BOOTP-supplied string if not already set.
+ * Copy BOOTP-supplied string
*/
static int __init ic_bootp_string(char *dest, char *src, int len, int max)
{
@@ -935,12 +935,15 @@ static void __init ic_do_bootp_ext(u8 *ext)
}
break;
case 12: /* Host name */
- ic_bootp_string(utsname()->nodename, ext+1, *ext,
- __NEW_UTS_LEN);
- ic_host_name_set = 1;
+ if (!ic_host_name_set) {
+ ic_bootp_string(utsname()->nodename, ext+1, *ext,
+ __NEW_UTS_LEN);
+ ic_host_name_set = 1;
+ }
break;
case 15: /* Domain name (DNS) */
- ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
+ if (!ic_domain[0])
+ ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
break;
case 17: /* Root path */
if (!root_server_path[0])
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index d5bfa087c23a..266c65577ba6 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -242,6 +242,8 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
if (!tun_dst)
return 0;
}
+ skb_reset_mac_header(skb);
+
return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 939792a38814..7b12a40dd465 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1317,7 +1317,7 @@ static void mroute_clean_tables(struct mr_table *mrt, int flags)
}
/* called from ip_ra_control(), before an RCU grace period,
- * we dont need to call synchronize_rcu() here
+ * we don't need to call synchronize_rcu() here
*/
static void mrtsock_destruct(struct sock *sk)
{
@@ -1938,7 +1938,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
struct mfc_cache *cache_proxy;
- /* For an (*,G) entry, we only check that the incomming
+ /* For an (*,G) entry, we only check that the incoming
* interface is part of the static tree.
*/
cache_proxy = mr_mfc_find_any_parent(mrt, vif);
@@ -3007,7 +3007,6 @@ static const struct seq_operations ipmr_mfc_seq_ops = {
#ifdef CONFIG_IP_PIMSM_V2
static const struct net_protocol pim_protocol = {
.handler = pim_rcv,
- .netns_ok = 1,
};
#endif
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index ff437e4ed6db..55fc23a8f7a7 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -27,7 +27,7 @@ static void nft_reject_ipv4_eval(const struct nft_expr *expr,
nf_send_unreach(pkt->skb, priv->icmp_code, nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
- nf_send_reset(nft_net(pkt), pkt->xt.state->sk, pkt->skb,
+ nf_send_reset(nft_net(pkt), nft_sk(pkt), pkt->skb,
nft_hook(pkt));
break;
default:
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 1c9f71a37258..1e44a43acfe2 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -573,7 +573,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
}
}
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
out:
sock_put(sk);
}
@@ -954,6 +954,7 @@ bool ping_rcv(struct sk_buff *skb)
struct sock *sk;
struct net *net = dev_net(skb->dev);
struct icmphdr *icmph = icmp_hdr(skb);
+ bool rc = false;
/* We assume the packet has already been checked by icmp_rcv */
@@ -968,14 +969,15 @@ bool ping_rcv(struct sk_buff *skb)
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
pr_debug("rcv on socket %p\n", sk);
- if (skb2)
- ping_queue_rcv_skb(sk, skb2);
+ if (skb2 && !ping_queue_rcv_skb(sk, skb2))
+ rc = true;
sock_put(sk);
- return true;
}
- pr_debug("no socket, dropping\n");
- return false;
+ if (!rc)
+ pr_debug("no socket, dropping\n");
+
+ return rc;
}
EXPORT_SYMBOL_GPL(ping_rcv);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6d46297a99f8..b0d3a09dc84e 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -295,6 +295,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH),
SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS),
SNMP_MIB_ITEM("TCPDSACKIgnoredDubious", LINUX_MIB_TCPDSACKIGNOREDDUBIOUS),
+ SNMP_MIB_ITEM("TCPMigrateReqSuccess", LINUX_MIB_TCPMIGRATEREQSUCCESS),
+ SNMP_MIB_ITEM("TCPMigrateReqFailure", LINUX_MIB_TCPMIGRATEREQFAILURE),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 9a8c0892622b..6913979948d7 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -31,12 +31,6 @@ EXPORT_SYMBOL(inet_offloads);
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- if (!prot->netns_ok) {
- pr_err("Protocol %u is not namespace aware, cannot register.\n",
- protocol);
- return -EINVAL;
- }
-
return !cmpxchg((const struct net_protocol **)&inet_protos[protocol],
NULL, prot) ? 0 : -1;
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 50a73178d63a..bb446e60cf58 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -280,7 +280,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
if (inet->recverr || harderr) {
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
}
@@ -929,7 +929,7 @@ int raw_abort(struct sock *sk, int err)
lock_sock(sk);
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
__udp_disconnect(sk, 0);
release_sock(sk);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f6787c55f6ab..99c06944501a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1306,7 +1306,7 @@ INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
- return mtu;
+ goto out;
mtu = READ_ONCE(dst->dev->mtu);
@@ -1315,6 +1315,7 @@ INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = 576;
}
+out:
mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
@@ -1906,13 +1907,128 @@ out:
hash_keys->addrs.v4addrs.dst = key_iph->daddr;
}
+static u32 fib_multipath_custom_hash_outer(const struct net *net,
+ const struct sk_buff *skb,
+ bool *p_has_inner)
+{
+ u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+ struct flow_keys keys, hash_keys;
+
+ if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
+ return 0;
+
+ memset(&hash_keys, 0, sizeof(hash_keys));
+ skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
+
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
+ hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
+ hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+ hash_keys.basic.ip_proto = keys.basic.ip_proto;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+ hash_keys.ports.src = keys.ports.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+ hash_keys.ports.dst = keys.ports.dst;
+
+ *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
+ return flow_hash_from_keys(&hash_keys);
+}
+
+static u32 fib_multipath_custom_hash_inner(const struct net *net,
+ const struct sk_buff *skb,
+ bool has_inner)
+{
+ u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+ struct flow_keys keys, hash_keys;
+
+ /* We assume the packet carries an encapsulation, but if none was
+ * encountered during dissection of the outer flow, then there is no
+ * point in calling the flow dissector again.
+ */
+ if (!has_inner)
+ return 0;
+
+ if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
+ return 0;
+
+ memset(&hash_keys, 0, sizeof(hash_keys));
+ skb_flow_dissect_flow_keys(skb, &keys, 0);
+
+ if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
+ return 0;
+
+ if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
+ hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
+ hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
+ } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
+ hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
+ hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
+ hash_keys.tags.flow_label = keys.tags.flow_label;
+ }
+
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
+ hash_keys.basic.ip_proto = keys.basic.ip_proto;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
+ hash_keys.ports.src = keys.ports.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+ hash_keys.ports.dst = keys.ports.dst;
+
+ return flow_hash_from_keys(&hash_keys);
+}
+
+static u32 fib_multipath_custom_hash_skb(const struct net *net,
+ const struct sk_buff *skb)
+{
+ u32 mhash, mhash_inner;
+ bool has_inner = true;
+
+ mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
+ mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
+
+ return jhash_2words(mhash, mhash_inner, 0);
+}
+
+static u32 fib_multipath_custom_hash_fl4(const struct net *net,
+ const struct flowi4 *fl4)
+{
+ u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+ struct flow_keys hash_keys;
+
+ if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
+ return 0;
+
+ memset(&hash_keys, 0, sizeof(hash_keys));
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
+ hash_keys.addrs.v4addrs.src = fl4->saddr;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
+ hash_keys.addrs.v4addrs.dst = fl4->daddr;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+ hash_keys.basic.ip_proto = fl4->flowi4_proto;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+ hash_keys.ports.src = fl4->fl4_sport;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+ hash_keys.ports.dst = fl4->fl4_dport;
+
+ return flow_hash_from_keys(&hash_keys);
+}
+
/* if skb is set it will be used and fl4 can be NULL */
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys)
{
u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
struct flow_keys hash_keys;
- u32 mhash;
+ u32 mhash = 0;
switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
case 0:
@@ -1924,6 +2040,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
+ mhash = flow_hash_from_keys(&hash_keys);
break;
case 1:
/* skb is currently provided only when forwarding */
@@ -1957,6 +2074,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.ports.dst = fl4->fl4_dport;
hash_keys.basic.ip_proto = fl4->flowi4_proto;
}
+ mhash = flow_hash_from_keys(&hash_keys);
break;
case 2:
memset(&hash_keys, 0, sizeof(hash_keys));
@@ -1987,9 +2105,15 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
+ mhash = flow_hash_from_keys(&hash_keys);
+ break;
+ case 3:
+ if (skb)
+ mhash = fib_multipath_custom_hash_skb(net, skb);
+ else
+ mhash = fib_multipath_custom_hash_fl4(net, fl4);
break;
}
- mhash = flow_hash_from_keys(&hash_keys);
if (multipath_hash)
mhash = jhash_2words(mhash, multipath_hash, 0);
@@ -2056,6 +2180,19 @@ martian_source:
return err;
}
+/* get device for dst_alloc with local routes */
+static struct net_device *ip_rt_get_dev(struct net *net,
+ const struct fib_result *res)
+{
+ struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
+ struct net_device *dev = NULL;
+
+ if (nhc)
+ dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
+
+ return dev ? : net->loopback_dev;
+}
+
/*
* NOTE. We drop all the packets that has local source
* addresses, because every properly looped back packet
@@ -2212,7 +2349,7 @@ local_input:
}
}
- rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
+ rth = rt_dst_alloc(ip_rt_get_dev(net, res),
flags | RTCF_LOCAL, res->type,
IN_DEV_ORCONF(in_dev, NOPOLICY), false);
if (!rth)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index a62934b9f15a..6f1e64d49232 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -19,6 +19,7 @@
#include <net/snmp.h>
#include <net/icmp.h>
#include <net/ip.h>
+#include <net/ip_fib.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -29,6 +30,7 @@
#include <net/netevent.h>
static int two = 2;
+static int three __maybe_unused = 3;
static int four = 4;
static int thousand = 1000;
static int tcp_retr1_max = 255;
@@ -48,6 +50,8 @@ static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
static u32 u32_max_div_HZ = UINT_MAX / HZ;
static int one_day_secs = 24 * 3600;
+static u32 fib_multipath_hash_fields_all_mask __maybe_unused =
+ FIB_MULTIPATH_HASH_FIELD_ALL_MASK;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -461,6 +465,22 @@ static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
return ret;
}
+
+static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct net *net;
+ int ret;
+
+ net = container_of(table->data, struct net,
+ ipv4.sysctl_fib_multipath_hash_fields);
+ ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ call_netevent_notifiers(NETEVENT_IPV4_MPATH_HASH_UPDATE, net);
+
+ return ret;
+}
#endif
static struct ctl_table ipv4_table[] = {
@@ -941,6 +961,15 @@ static struct ctl_table ipv4_net_table[] = {
},
#endif
{
+ .procname = "tcp_migrate_req",
+ .data = &init_net.ipv4.sysctl_tcp_migrate_req,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
+ },
+ {
.procname = "tcp_reordering",
.data = &init_net.ipv4.sysctl_tcp_reordering,
.maxlen = sizeof(int),
@@ -1050,7 +1079,16 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_fib_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = &three,
+ },
+ {
+ .procname = "fib_multipath_hash_fields",
+ .data = &init_net.ipv4.sysctl_fib_multipath_hash_fields,
+ .maxlen = sizeof(u32),
+ .mode = 0644,
+ .proc_handler = proc_fib_multipath_hash_fields,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &fib_multipath_hash_fields_all_mask,
},
#endif
{
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f1c1f9e3de72..d5ab5f243640 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1738,8 +1738,8 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
}
EXPORT_SYMBOL(tcp_set_rcvlowat);
-static void tcp_update_recv_tstamps(struct sk_buff *skb,
- struct scm_timestamping_internal *tss)
+void tcp_update_recv_tstamps(struct sk_buff *skb,
+ struct scm_timestamping_internal *tss)
{
if (skb->tstamp)
tss->ts[0] = ktime_to_timespec64(skb->tstamp);
@@ -2024,8 +2024,6 @@ static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
}
#define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS)
-static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
- struct scm_timestamping_internal *tss);
static void tcp_zc_finalize_rx_tstamp(struct sock *sk,
struct tcp_zerocopy_receive *zc,
struct scm_timestamping_internal *tss)
@@ -2095,8 +2093,8 @@ static int tcp_zerocopy_receive(struct sock *sk,
mmap_read_lock(current->mm);
- vma = find_vma(current->mm, address);
- if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
+ vma = vma_lookup(current->mm, address);
+ if (!vma || vma->vm_ops != &tcp_vm_ops) {
mmap_read_unlock(current->mm);
return -EINVAL;
}
@@ -2197,8 +2195,8 @@ out:
#endif
/* Similar to __sock_recv_timestamp, but does not require an skb */
-static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
- struct scm_timestamping_internal *tss)
+void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+ struct scm_timestamping_internal *tss)
{
int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
bool has_timestamping = false;
@@ -3061,7 +3059,7 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_frag.offset = 0;
}
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return 0;
}
EXPORT_SYMBOL(tcp_disconnect);
@@ -4450,7 +4448,7 @@ int tcp_abort(struct sock *sk, int err)
sk->sk_err = err;
/* This barrier is coupled with smp_rmb() in tcp_poll() */
smp_wmb();
- sk->sk_error_report(sk);
+ sk_error_report(sk);
if (tcp_need_reset(sk->sk_state))
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index ad9d17923fc5..f26916a62f25 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -163,6 +163,28 @@ static bool tcp_bpf_stream_read(const struct sock *sk)
return !empty;
}
+static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
+ long timeo)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return 1;
+
+ if (!timeo)
+ return ret;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ ret = sk_wait_event(sk, &timeo,
+ !list_empty(&psock->ingress_msg) ||
+ !skb_queue_empty(&sk->sk_receive_queue), &wait);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return ret;
+}
+
static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
@@ -184,11 +206,11 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
msg_bytes_ready:
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
if (!copied) {
- int data, err = 0;
long timeo;
+ int data;
timeo = sock_rcvtimeo(sk, nonblock);
- data = sk_msg_wait_data(sk, psock, flags, timeo, &err);
+ data = tcp_msg_wait_data(sk, psock, timeo);
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
@@ -196,14 +218,9 @@ msg_bytes_ready:
sk_psock_put(sk, psock);
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
}
- if (err) {
- ret = err;
- goto out;
- }
copied = -EAGAIN;
}
ret = copied;
-out:
release_sock(sk);
sk_psock_put(sk, psock);
return ret;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index af2814c9342a..47c32604d38f 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -526,7 +526,7 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
if (!tfo_da_times)
return false;
- /* Limit timout to max: 2^6 * initial timeout */
+ /* Limit timeout to max: 2^6 * initial timeout */
multiplier = 1 << min(tfo_da_times - 1, 6);
timeout = multiplier * tfo_bh_timeout * HZ;
if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4cf4dd532d1c..e6ca5a1f3b59 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2816,8 +2816,17 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
*rexmit = REXMIT_LOST;
}
+static bool tcp_force_fast_retransmit(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ return after(tcp_highest_sack_seq(tp),
+ tp->snd_una + tp->reordering * tp->mss_cache);
+}
+
/* Undo during fast recovery after partial ACK. */
-static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
+static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
+ bool *do_lost)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2842,7 +2851,9 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk);
- return true;
+ } else {
+ /* Partial ACK arrived. Force fast retransmit. */
+ *do_lost = tcp_force_fast_retransmit(sk);
}
return false;
}
@@ -2866,14 +2877,6 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
}
}
-static bool tcp_force_fast_retransmit(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- return after(tcp_highest_sack_seq(tp),
- tp->snd_una + tp->reordering * tp->mss_cache);
-}
-
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
@@ -2943,17 +2946,21 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp))
tcp_add_reno_sack(sk, num_dupack, ece_ack);
- } else {
- if (tcp_try_undo_partial(sk, prior_snd_una))
- return;
- /* Partial ACK arrived. Force fast retransmit. */
- do_lost = tcp_force_fast_retransmit(sk);
- }
- if (tcp_try_undo_dsack(sk)) {
- tcp_try_keep_open(sk);
+ } else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
return;
- }
+
+ if (tcp_try_undo_dsack(sk))
+ tcp_try_keep_open(sk);
+
tcp_identify_packet_loss(sk, ack_flag);
+ if (icsk->icsk_ca_state != TCP_CA_Recovery) {
+ if (!tcp_time_to_recover(sk, flag))
+ return;
+ /* Undo reverts the recovery state. If loss is evident,
+ * starts a new recovery (e.g. reordering then loss);
+ */
+ tcp_enter_recovery(sk, ece_ack);
+ }
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, num_dupack, rexmit);
@@ -4263,7 +4270,7 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
tcp_done(sk);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
/*
@@ -5885,6 +5892,7 @@ step5:
return;
csum_error:
+ trace_tcp_bad_csum(skb);
TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 312184cead57..e66ad6bfe808 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -585,7 +585,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
tcp_done(sk);
} else {
@@ -613,7 +613,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
} else { /* Only an error on timeout */
sk->sk_err_soft = err;
}
@@ -1731,6 +1731,7 @@ discard:
return 0;
csum_err:
+ trace_tcp_bad_csum(skb);
TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard;
@@ -1801,6 +1802,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (unlikely(tcp_checksum_complete(skb))) {
bh_unlock_sock(sk);
+ trace_tcp_bad_csum(skb);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
return true;
@@ -2000,13 +2002,21 @@ process:
goto csum_error;
}
if (unlikely(sk->sk_state != TCP_LISTEN)) {
- inet_csk_reqsk_queue_drop_and_put(sk, req);
- goto lookup;
+ nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
+ if (!nsk) {
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ goto lookup;
+ }
+ sk = nsk;
+ /* reuseport_migrate_sock() has already held one sk_refcnt
+ * before returning.
+ */
+ } else {
+ /* We own a reference on the listener, increase it again
+ * as we might lose it too soon.
+ */
+ sock_hold(sk);
}
- /* We own a reference on the listener, increase it again
- * as we might lose it too soon.
- */
- sock_hold(sk);
refcounted = true;
nsk = NULL;
if (!tcp_filter(sk, skb)) {
@@ -2098,6 +2108,7 @@ no_tcp_socket:
if (tcp_checksum_complete(skb)) {
csum_error:
+ trace_tcp_bad_csum(skb);
__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
bad_packet:
__TCP_INC_STATS(net, TCP_MIB_INERRS);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7513ba45553d..0a4f3f16140a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -775,8 +775,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
goto listen_overflow;
if (own_req && rsk_drop_req(req)) {
- reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
- inet_csk_reqsk_queue_drop_and_put(sk, req);
+ reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
+ inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
return child;
}
@@ -786,6 +786,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
return inet_csk_complete_hashdance(sk, child, req, own_req);
listen_overflow:
+ if (sk != req->rsk_listener)
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+
if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
inet_rsk(req)->acked = 1;
return NULL;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 4ef08079ccfa..20cf4a98c69d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -68,7 +68,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
tcp_write_queue_purge(sk);
tcp_done(sk);
@@ -441,7 +441,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
* This function gets called when the kernel timer for a TCP packet
* of this socket expires.
*
- * It handles retransmission, timer adjustment and other necesarry measures.
+ * It handles retransmission, timer adjustment and other necessary measures.
*
* Returns: Nothing (void)
*/
@@ -766,7 +766,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
if (!sock_owned_by_user(sk)) {
if (tp->compressed_ack) {
/* Since we have to send one ack finally,
- * substract one from tp->compressed_ack to keep
+ * subtract one from tp->compressed_ack to keep
* LINUX_MIB_TCPACKCOMPRESSED accurate.
*/
tp->compressed_ack--;
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 3bb448761ca3..07c4c93b9fdb 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -221,7 +221,7 @@ static struct tcp_congestion_ops tcp_yeah __read_mostly = {
static int __init tcp_yeah_register(void)
{
- BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
+ BUILD_BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
tcp_register_congestion_control(&tcp_yeah);
return 0;
}
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index e44aaf41a138..5048c47c79b2 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -218,7 +218,6 @@ static const struct net_protocol tunnel4_protocol = {
.handler = tunnel4_rcv,
.err_handler = tunnel4_err,
.no_policy = 1,
- .netns_ok = 1,
};
#if IS_ENABLED(CONFIG_IPV6)
@@ -226,7 +225,6 @@ static const struct net_protocol tunnel64_protocol = {
.handler = tunnel64_rcv,
.err_handler = tunnel64_err,
.no_policy = 1,
- .netns_ok = 1,
};
#endif
@@ -235,7 +233,6 @@ static const struct net_protocol tunnelmpls4_protocol = {
.handler = tunnelmpls4_rcv,
.err_handler = tunnelmpls4_err,
.no_policy = 1,
- .netns_ok = 1,
};
#endif
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 15f5504adf5b..62682807b4b2 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -776,7 +776,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
out:
return 0;
}
@@ -1798,11 +1798,13 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
if (used <= 0) {
if (!copied)
copied = used;
+ kfree_skb(skb);
break;
} else if (used <= skb->len) {
copied += used;
}
+ kfree_skb(skb);
if (!desc->count)
break;
}
@@ -2607,6 +2609,9 @@ void udp_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
bool slow = lock_sock_fast(sk);
+
+ /* protects from races with udp_abort() */
+ sock_set_flag(sk, SOCK_DEAD);
udp_flush_pending_frames(sk);
unlock_sock_fast(sk, slow);
if (static_branch_unlikely(&udp_encap_needed_key)) {
@@ -2857,10 +2862,17 @@ int udp_abort(struct sock *sk, int err)
{
lock_sock(sk);
+ /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
+ * with close()
+ */
+ if (sock_flag(sk, SOCK_DEAD))
+ goto out;
+
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
__udp_disconnect(sk, 0);
+out:
release_sock(sk);
return 0;
diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
index 954c4591a6fd..45b8782aec0c 100644
--- a/net/ipv4/udp_bpf.c
+++ b/net/ipv4/udp_bpf.c
@@ -21,6 +21,45 @@ static int sk_udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
return udp_prot.recvmsg(sk, msg, len, noblock, flags, addr_len);
}
+static bool udp_sk_has_data(struct sock *sk)
+{
+ return !skb_queue_empty(&udp_sk(sk)->reader_queue) ||
+ !skb_queue_empty(&sk->sk_receive_queue);
+}
+
+static bool psock_has_data(struct sk_psock *psock)
+{
+ return !skb_queue_empty(&psock->ingress_skb) ||
+ !sk_psock_queue_empty(psock);
+}
+
+#define udp_msg_has_data(__sk, __psock) \
+ ({ udp_sk_has_data(__sk) || psock_has_data(__psock); })
+
+static int udp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
+ long timeo)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return 1;
+
+ if (!timeo)
+ return ret;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ ret = udp_msg_has_data(sk, psock);
+ if (!ret) {
+ wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+ ret = udp_msg_has_data(sk, psock);
+ }
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return ret;
+}
+
static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
@@ -34,8 +73,7 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (unlikely(!psock))
return sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
- lock_sock(sk);
- if (sk_psock_queue_empty(psock)) {
+ if (!psock_has_data(psock)) {
ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
goto out;
}
@@ -43,26 +81,21 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
msg_bytes_ready:
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
if (!copied) {
- int data, err = 0;
long timeo;
+ int data;
timeo = sock_rcvtimeo(sk, nonblock);
- data = sk_msg_wait_data(sk, psock, flags, timeo, &err);
+ data = udp_msg_wait_data(sk, psock, timeo);
if (data) {
- if (!sk_psock_queue_empty(psock))
+ if (psock_has_data(psock))
goto msg_bytes_ready;
ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
goto out;
}
- if (err) {
- ret = err;
- goto out;
- }
copied = -EAGAIN;
}
ret = copied;
out:
- release_sock(sk);
sk_psock_put(sk, psock);
return ret;
}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index bd8773b49e72..cd1cd68adeec 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -31,7 +31,6 @@ static const struct net_protocol udplite_protocol = {
.handler = udplite_rcv,
.err_handler = udplite_err,
.no_policy = 1,
- .netns_ok = 1,
};
struct proto udplite_prot = {
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index ea595c8549c7..2fe5860c21d6 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -181,21 +181,18 @@ static const struct net_protocol esp4_protocol = {
.handler = xfrm4_esp_rcv,
.err_handler = xfrm4_esp_err,
.no_policy = 1,
- .netns_ok = 1,
};
static const struct net_protocol ah4_protocol = {
.handler = xfrm4_ah_rcv,
.err_handler = xfrm4_ah_err,
.no_policy = 1,
- .netns_ok = 1,
};
static const struct net_protocol ipcomp4_protocol = {
.handler = xfrm4_ipcomp_rcv,
.err_handler = xfrm4_ipcomp_err,
.no_policy = 1,
- .netns_ok = 1,
};
static const struct xfrm_input_afinfo xfrm4_input_afinfo = {
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index fb0648e7fb32..f4555a88f86b 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -42,7 +42,6 @@ static void ipip_destroy(struct xfrm_state *x)
}
static const struct xfrm_type ipip_type = {
- .description = "IPIP",
.owner = THIS_MODULE,
.proto = IPPROTO_IPIP,
.init_state = ipip_init_state,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b0ef65eb9bd2..3bf685fe64b9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5827,7 +5827,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
- BUG();
+ return -EINVAL;
if (tb[IFLA_INET6_TOKEN]) {
err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
@@ -6903,10 +6903,10 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = proc_dointvec,
},
{
- .procname = "addr_gen_mode",
- .data = &ipv6_devconf.addr_gen_mode,
- .maxlen = sizeof(int),
- .mode = 0644,
+ .procname = "addr_gen_mode",
+ .data = &ipv6_devconf.addr_gen_mode,
+ .maxlen = sizeof(int),
+ .mode = 0644,
.proc_handler = addrconf_sysctl_addr_gen_mode,
},
{
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 20d492da725a..828e62514260 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -755,7 +755,6 @@ static int ah6_rcv_cb(struct sk_buff *skb, int err)
}
static const struct xfrm_type ah6_type = {
- .description = "AH6",
.owner = THIS_MODULE,
.proto = IPPROTO_AH,
.flags = XFRM_TYPE_REPLAY_PROT,
@@ -763,7 +762,6 @@ static const struct xfrm_type ah6_type = {
.destructor = ah6_destroy,
.input = ah6_input,
.output = ah6_output,
- .hdr_offset = xfrm6_find_1stfragopt,
};
static struct xfrm6_protocol ah6_protocol = {
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 393ae2b78e7d..ed2f061b8768 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -708,7 +708,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
+ padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
@@ -1243,7 +1243,6 @@ static int esp6_rcv_cb(struct sk_buff *skb, int err)
}
static const struct xfrm_type esp6_type = {
- .description = "ESP6",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.flags = XFRM_TYPE_REPLAY_PROT,
@@ -1251,7 +1250,6 @@ static const struct xfrm_type esp6_type = {
.destructor = esp6_destroy,
.input = esp6_input,
.output = esp6_output,
- .hdr_offset = xfrm6_find_1stfragopt,
};
static struct xfrm6_protocol esp6_protocol = {
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 40ed4fcf1cf4..a349d4798077 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -377,7 +377,6 @@ static const struct net_offload esp6_offload = {
};
static const struct xfrm_type_offload esp6_type_offload = {
- .description = "ESP6 OFFLOAD",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.input_tail = esp6_input_tail,
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 56e479d158b7..26882e165c9e 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -135,18 +135,23 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
len -= 2;
while (len > 0) {
- int optlen = nh[off + 1] + 2;
- int i;
+ int optlen, i;
- switch (nh[off]) {
- case IPV6_TLV_PAD1:
- optlen = 1;
+ if (nh[off] == IPV6_TLV_PAD1) {
padlen++;
if (padlen > 7)
goto bad;
- break;
+ off++;
+ len--;
+ continue;
+ }
+ if (len < 2)
+ goto bad;
+ optlen = nh[off + 1] + 2;
+ if (optlen > len)
+ goto bad;
- case IPV6_TLV_PADN:
+ if (nh[off] == IPV6_TLV_PADN) {
/* RFC 2460 states that the purpose of PadN is
* to align the containing header to multiples
* of 8. 7 is therefore the highest valid value.
@@ -163,12 +168,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
if (nh[off + i] != 0)
goto bad;
}
- break;
-
- default: /* Other TLV code so scan list */
- if (optlen > len)
- goto bad;
-
+ } else {
tlv_count++;
if (tlv_count > max_count)
goto bad;
@@ -188,7 +188,6 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
return false;
padlen = 0;
- break;
}
off += optlen;
len -= optlen;
@@ -306,7 +305,7 @@ fail_and_free:
#endif
if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
- init_net.ipv6.sysctl.max_dst_opts_cnt)) {
+ net->ipv6.sysctl.max_dst_opts_cnt)) {
skb->transport_header += extlen;
opt = IP6CB(skb);
#if IS_ENABLED(CONFIG_IPV6_MIP6)
@@ -1037,7 +1036,7 @@ fail_and_free:
opt->flags |= IP6SKB_HOPBYHOP;
if (ip6_parse_tlv(tlvprochopopt_lst, skb,
- init_net.ipv6.sysctl.max_hbh_opts_cnt)) {
+ net->ipv6.sysctl.max_hbh_opts_cnt)) {
skb->transport_header += extlen;
opt = IP6CB(skb);
opt->nhoff = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 8f9a83314de7..40f3e4f9f33a 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -467,7 +467,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
static int __net_init fib6_rules_net_init(struct net *net)
{
struct fib_rules_ops *ops;
- int err = -ENOMEM;
+ int err;
ops = fib_rules_register(&fib6_rules_ops_template, net);
if (IS_ERR(ops))
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index e8398ffb5e35..a7c31ab67c5d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -725,6 +725,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
struct ipcm6_cookie ipc6;
u32 mark = IP6_REPLY_MARK(net, skb->mark);
bool acast;
+ u8 type;
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
net->ipv6.sysctl.icmpv6_echo_ignore_multicast)
@@ -740,8 +741,13 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
!(net->ipv6.sysctl.anycast_src_echo_reply && acast))
saddr = NULL;
+ if (icmph->icmp6_type == ICMPV6_EXT_ECHO_REQUEST)
+ type = ICMPV6_EXT_ECHO_REPLY;
+ else
+ type = ICMPV6_ECHO_REPLY;
+
memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
- tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
+ tmp_hdr.icmp6_type = type;
memset(&fl6, 0, sizeof(fl6));
if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES)
@@ -752,7 +758,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (saddr)
fl6.saddr = *saddr;
fl6.flowi6_oif = icmp6_iif(skb);
- fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
+ fl6.fl6_icmp_type = type;
fl6.flowi6_mark = mark;
fl6.flowi6_uid = sock_net_uid(net, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
@@ -783,13 +789,17 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
msg.skb = skb;
msg.offset = 0;
- msg.type = ICMPV6_ECHO_REPLY;
+ msg.type = type;
ipcm6_init_sk(&ipc6, np);
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
ipc6.sockc.mark = mark;
+ if (icmph->icmp6_type == ICMPV6_EXT_ECHO_REQUEST)
+ if (!icmp_build_probe(skb, (struct icmphdr *)&tmp_hdr))
+ goto out_dst_release;
+
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
skb->len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), &ipc6, &fl6,
@@ -911,6 +921,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
if (!net->ipv6.sysctl.icmpv6_echo_ignore_all)
icmpv6_echo_reply(skb);
break;
+ case ICMPV6_EXT_ECHO_REQUEST:
+ if (!net->ipv6.sysctl.icmpv6_echo_ignore_all &&
+ net->ipv4.sysctl_icmp_echo_enable_probe)
+ icmpv6_echo_reply(skb);
+ break;
case ICMPV6_ECHO_REPLY:
success = ping_rcv(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 679699e953f1..2d650dc24349 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -32,6 +32,7 @@
#include <net/lwtunnel.h>
#include <net/fib_notifier.h>
+#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
@@ -2355,6 +2356,10 @@ static int __net_init fib6_net_init(struct net *net)
if (err)
return err;
+ /* Default to 3-tuple */
+ net->ipv6.sysctl.multipath_hash_fields =
+ FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK;
+
spin_lock_init(&net->ipv6.fib6_gc_lock);
rwlock_init(&net->ipv6.fib6_walker_lock);
INIT_LIST_HEAD(&net->ipv6.fib6_walkers);
@@ -2362,7 +2367,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
if (!net->ipv6.rt6_stats)
- goto out_timer;
+ goto out_notifier;
/* Avoid false sharing : Use at least a full cache line */
size = max_t(size_t, size, L1_CACHE_BYTES);
@@ -2407,7 +2412,7 @@ out_fib_table_hash:
kfree(net->ipv6.fib_table_hash);
out_rt6_stats:
kfree(net->ipv6.rt6_stats);
-out_timer:
+out_notifier:
fib6_notifier_exit(net);
return -ENOMEM;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ff4f9ebcf7f6..984050f35c61 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1055,13 +1055,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
* ip6_route_output will fail given src=any saddr, though, so
* that's why we try it again later.
*/
- if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
+ if (ipv6_addr_any(&fl6->saddr)) {
struct fib6_info *from;
struct rt6_info *rt;
- bool had_dst = *dst != NULL;
- if (!had_dst)
- *dst = ip6_route_output(net, sk, fl6);
+ *dst = ip6_route_output(net, sk, fl6);
rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
rcu_read_lock();
@@ -1078,7 +1076,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
* never existed and let the SA-enabled version take
* over.
*/
- if (!had_dst && (*dst)->error) {
+ if ((*dst)->error) {
dst_release(*dst);
*dst = NULL;
}
@@ -1555,7 +1553,7 @@ emsgsize:
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
- unsigned int alloclen;
+ unsigned int alloclen, alloc_extra;
unsigned int pagedlen;
alloc_new_skb:
/* There's no room in the current skb */
@@ -1582,17 +1580,28 @@ alloc_new_skb:
fraglen = datalen + fragheaderlen;
pagedlen = 0;
+ alloc_extra = hh_len;
+ alloc_extra += dst_exthdrlen;
+ alloc_extra += rt->dst.trailer_len;
+
+ /* We just reserve space for fragment header.
+ * Note: this may be overallocation if the message
+ * (without MSG_MORE) fits into the MTU.
+ */
+ alloc_extra += sizeof(struct frag_hdr);
+
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
- else if (!paged)
+ else if (!paged &&
+ (fraglen + alloc_extra < SKB_MAX_ALLOC ||
+ !(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen;
else {
alloclen = min_t(int, fraglen, MAX_HEADER);
pagedlen = fraglen - alloclen;
}
-
- alloclen += dst_exthdrlen;
+ alloclen += alloc_extra;
if (datalen != length + fraggap) {
/*
@@ -1602,30 +1611,21 @@ alloc_new_skb:
datalen += rt->dst.trailer_len;
}
- alloclen += rt->dst.trailer_len;
fraglen = datalen + fragheaderlen;
- /*
- * We just reserve space for fragment header.
- * Note: this may be overallocation if the message
- * (without MSG_MORE) fits into the MTU.
- */
- alloclen += sizeof(struct frag_hdr);
-
copy = datalen - transhdrlen - fraggap - pagedlen;
if (copy < 0) {
err = -EINVAL;
goto error;
}
if (transhdrlen) {
- skb = sock_alloc_send_skb(sk,
- alloclen + hh_len,
+ skb = sock_alloc_send_skb(sk, alloclen,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf)
- skb = alloc_skb(alloclen + hh_len,
+ skb = alloc_skb(alloclen,
sk->sk_allocation);
if (unlikely(!skb))
err = -ENOBUFS;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 288bafded998..322698d9fcf4 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -837,6 +837,7 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
} else {
skb->dev = tunnel->dev;
+ skb_reset_mac_header(skb);
}
skb_reset_network_header(skb);
@@ -1239,8 +1240,6 @@ route_lookup:
if (max_headroom > dev->needed_headroom)
dev->needed_headroom = max_headroom;
- skb_set_inner_ipproto(skb, proto);
-
err = ip6_tnl_encap(skb, t, &proto, fl6);
if (err)
return err;
@@ -1377,6 +1376,8 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
+ skb_set_inner_ipproto(skb, protocol);
+
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
protocol);
if (err != 0) {
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index daef890460b7..15f984be3570 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -172,14 +172,12 @@ static int ipcomp6_rcv_cb(struct sk_buff *skb, int err)
}
static const struct xfrm_type ipcomp6_type = {
- .description = "IPCOMP6",
.owner = THIS_MODULE,
.proto = IPPROTO_COMP,
.init_state = ipcomp6_init_state,
.destructor = ipcomp_destroy,
.input = ipcomp_input,
.output = ipcomp_output,
- .hdr_offset = xfrm6_find_1stfragopt,
};
static struct xfrm6_protocol ipcomp6_protocol = {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 0d59efb6b49e..54ec163fbafa 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1729,26 +1729,26 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
{
+ u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
+ 2, 0, 0, IPV6_TLV_PADN, 0 };
struct net_device *dev = idev->dev;
- struct net *net = dev_net(dev);
- struct sock *sk = net->ipv6.igmp_sk;
- struct sk_buff *skb;
- struct mld2_report *pmr;
- struct in6_addr addr_buf;
- const struct in6_addr *saddr;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
- unsigned int size = mtu + hlen + tlen;
+ struct net *net = dev_net(dev);
+ const struct in6_addr *saddr;
+ struct in6_addr addr_buf;
+ struct mld2_report *pmr;
+ struct sk_buff *skb;
+ unsigned int size;
+ struct sock *sk;
int err;
- u8 ra[8] = { IPPROTO_ICMPV6, 0,
- IPV6_TLV_ROUTERALERT, 2, 0, 0,
- IPV6_TLV_PADN, 0 };
- /* we assume size > sizeof(ra) here */
- /* limit our allocations to order-0 page */
- size = min_t(int, size, SKB_MAX_ORDER(0, 0));
+ sk = net->ipv6.igmp_sk;
+ /* we assume size > sizeof(ra) here
+ * Also try to not allocate high-order pages for big MTU
+ */
+ size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
skb = sock_alloc_send_skb(sk, size, 1, &err);
-
if (!skb)
return NULL;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 878fcec14949..aeb35d26e474 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -247,54 +247,6 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
return err;
}
-static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
- u8 **nexthdr)
-{
- u16 offset = sizeof(struct ipv6hdr);
- struct ipv6_opt_hdr *exthdr =
- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
- const unsigned char *nh = skb_network_header(skb);
- unsigned int packet_len = skb_tail_pointer(skb) -
- skb_network_header(skb);
- int found_rhdr = 0;
-
- *nexthdr = &ipv6_hdr(skb)->nexthdr;
-
- while (offset + 1 <= packet_len) {
-
- switch (**nexthdr) {
- case NEXTHDR_HOP:
- break;
- case NEXTHDR_ROUTING:
- found_rhdr = 1;
- break;
- case NEXTHDR_DEST:
- /*
- * HAO MUST NOT appear more than once.
- * XXX: It is better to try to find by the end of
- * XXX: packet if HAO exists.
- */
- if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
- net_dbg_ratelimited("mip6: hao exists already, override\n");
- return offset;
- }
-
- if (found_rhdr)
- return offset;
-
- break;
- default:
- return offset;
- }
-
- offset += ipv6_optlen(exthdr);
- *nexthdr = &exthdr->nexthdr;
- exthdr = (struct ipv6_opt_hdr *)(nh + offset);
- }
-
- return offset;
-}
-
static int mip6_destopt_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
@@ -324,7 +276,6 @@ static void mip6_destopt_destroy(struct xfrm_state *x)
}
static const struct xfrm_type mip6_destopt_type = {
- .description = "MIP6DESTOPT",
.owner = THIS_MODULE,
.proto = IPPROTO_DSTOPTS,
.flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_LOCAL_COADDR,
@@ -333,7 +284,6 @@ static const struct xfrm_type mip6_destopt_type = {
.input = mip6_destopt_input,
.output = mip6_destopt_output,
.reject = mip6_destopt_reject,
- .hdr_offset = mip6_destopt_offset,
};
static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
@@ -383,53 +333,6 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
-static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
- u8 **nexthdr)
-{
- u16 offset = sizeof(struct ipv6hdr);
- struct ipv6_opt_hdr *exthdr =
- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
- const unsigned char *nh = skb_network_header(skb);
- unsigned int packet_len = skb_tail_pointer(skb) -
- skb_network_header(skb);
- int found_rhdr = 0;
-
- *nexthdr = &ipv6_hdr(skb)->nexthdr;
-
- while (offset + 1 <= packet_len) {
-
- switch (**nexthdr) {
- case NEXTHDR_HOP:
- break;
- case NEXTHDR_ROUTING:
- if (offset + 3 <= packet_len) {
- struct ipv6_rt_hdr *rt;
- rt = (struct ipv6_rt_hdr *)(nh + offset);
- if (rt->type != 0)
- return offset;
- }
- found_rhdr = 1;
- break;
- case NEXTHDR_DEST:
- if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
- return offset;
-
- if (found_rhdr)
- return offset;
-
- break;
- default:
- return offset;
- }
-
- offset += ipv6_optlen(exthdr);
- *nexthdr = &exthdr->nexthdr;
- exthdr = (struct ipv6_opt_hdr *)(nh + offset);
- }
-
- return offset;
-}
-
static int mip6_rthdr_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
@@ -456,7 +359,6 @@ static void mip6_rthdr_destroy(struct xfrm_state *x)
}
static const struct xfrm_type mip6_rthdr_type = {
- .description = "MIP6RT",
.owner = THIS_MODULE,
.proto = IPPROTO_ROUTING,
.flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_REMOTE_COADDR,
@@ -464,7 +366,6 @@ static const struct xfrm_type mip6_rthdr_type = {
.destructor = mip6_rthdr_destroy,
.input = mip6_rthdr_input,
.output = mip6_rthdr_output,
- .hdr_offset = mip6_rthdr_offset,
};
static int __init mip6_init(void)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index e810a23baf99..de2cf3943b91 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -51,7 +51,7 @@ ip6_packet_match(const struct sk_buff *skb,
const char *outdev,
const struct ip6t_ip6 *ip6info,
unsigned int *protoff,
- int *fragoff, bool *hotdrop)
+ u16 *fragoff, bool *hotdrop)
{
unsigned long ret;
const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index e204163c7036..92f3235fa287 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
}
EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
+static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
+{
+ if (likely(next != IPPROTO_ICMPV6))
+ return false;
+
+ if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
+ return false;
+
+ return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
+}
+
void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
- if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
- nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
- nft_fib_store_result(dest, priv, nft_in(pkt));
- return;
+ if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
+ nft_hook(pkt) == NF_INET_INGRESS) {
+ if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
+ nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
+ nft_fib_store_result(dest, priv, nft_in(pkt));
+ return;
+ }
}
*dest = 0;
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index 7969d1f3018d..ed69c768797e 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -28,7 +28,7 @@ static void nft_reject_ipv6_eval(const struct nft_expr *expr,
nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
- nf_send_reset6(nft_net(pkt), pkt->xt.state->sk, pkt->skb,
+ nf_send_reset6(nft_net(pkt), nft_sk(pkt), pkt->skb,
nft_hook(pkt));
break;
default:
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index af36acc1a644..2880dc7d9a49 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -15,29 +15,11 @@ static u32 __ipv6_select_ident(struct net *net,
const struct in6_addr *dst,
const struct in6_addr *src)
{
- const struct {
- struct in6_addr dst;
- struct in6_addr src;
- } __aligned(SIPHASH_ALIGNMENT) combined = {
- .dst = *dst,
- .src = *src,
- };
- u32 hash, id;
-
- /* Note the following code is not safe, but this is okay. */
- if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
- get_random_bytes(&net->ipv4.ip_id_key,
- sizeof(net->ipv4.ip_id_key));
-
- hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
-
- /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
- * set the hight order instead thus minimizing possible future
- * collisions.
- */
- id = ip_idents_reserve(hash, 1);
- if (unlikely(!id))
- id = 1 << 31;
+ u32 id;
+
+ do {
+ id = prandom_u32();
+ } while (!id);
return id;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index bf3646b57c68..60f1e4f5be5a 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -354,7 +354,7 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
if (np->recverr || harderr) {
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 47a0dc46cbdb..28e44782c94d 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -343,7 +343,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
- if (!(fhdr->frag_off & htons(0xFFF9))) {
+ if (!(fhdr->frag_off & htons(IP6_OFFSET | IP6_MF))) {
/* It is not a fragmented frame */
skb->transport_header += sizeof(struct frag_hdr);
__IP6_INC_STATS(net,
@@ -351,6 +351,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ IP6CB(skb)->frag_max_size = ntohs(hdr->payload_len) +
+ sizeof(struct ipv6hdr);
return 1;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a22822bdbf39..7b756a7dc036 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2326,12 +2326,131 @@ out:
}
}
+static u32 rt6_multipath_custom_hash_outer(const struct net *net,
+ const struct sk_buff *skb,
+ bool *p_has_inner)
+{
+ u32 hash_fields = ip6_multipath_hash_fields(net);
+ struct flow_keys keys, hash_keys;
+
+ if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
+ return 0;
+
+ memset(&hash_keys, 0, sizeof(hash_keys));
+ skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
+
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
+ hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
+ hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+ hash_keys.basic.ip_proto = keys.basic.ip_proto;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
+ hash_keys.tags.flow_label = keys.tags.flow_label;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+ hash_keys.ports.src = keys.ports.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+ hash_keys.ports.dst = keys.ports.dst;
+
+ *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
+ return flow_hash_from_keys(&hash_keys);
+}
+
+static u32 rt6_multipath_custom_hash_inner(const struct net *net,
+ const struct sk_buff *skb,
+ bool has_inner)
+{
+ u32 hash_fields = ip6_multipath_hash_fields(net);
+ struct flow_keys keys, hash_keys;
+
+ /* We assume the packet carries an encapsulation, but if none was
+ * encountered during dissection of the outer flow, then there is no
+ * point in calling the flow dissector again.
+ */
+ if (!has_inner)
+ return 0;
+
+ if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
+ return 0;
+
+ memset(&hash_keys, 0, sizeof(hash_keys));
+ skb_flow_dissect_flow_keys(skb, &keys, 0);
+
+ if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
+ return 0;
+
+ if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
+ hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
+ hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
+ } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
+ hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
+ hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
+ hash_keys.tags.flow_label = keys.tags.flow_label;
+ }
+
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
+ hash_keys.basic.ip_proto = keys.basic.ip_proto;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
+ hash_keys.ports.src = keys.ports.src;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+ hash_keys.ports.dst = keys.ports.dst;
+
+ return flow_hash_from_keys(&hash_keys);
+}
+
+static u32 rt6_multipath_custom_hash_skb(const struct net *net,
+ const struct sk_buff *skb)
+{
+ u32 mhash, mhash_inner;
+ bool has_inner = true;
+
+ mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
+ mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
+
+ return jhash_2words(mhash, mhash_inner, 0);
+}
+
+static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
+ const struct flowi6 *fl6)
+{
+ u32 hash_fields = ip6_multipath_hash_fields(net);
+ struct flow_keys hash_keys;
+
+ if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
+ return 0;
+
+ memset(&hash_keys, 0, sizeof(hash_keys));
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
+ hash_keys.addrs.v6addrs.src = fl6->saddr;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
+ hash_keys.addrs.v6addrs.dst = fl6->daddr;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+ hash_keys.basic.ip_proto = fl6->flowi6_proto;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
+ hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+ hash_keys.ports.src = fl6->fl6_sport;
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+ hash_keys.ports.dst = fl6->fl6_dport;
+
+ return flow_hash_from_keys(&hash_keys);
+}
+
/* if skb is set it will be used and fl6 can be NULL */
u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
const struct sk_buff *skb, struct flow_keys *flkeys)
{
struct flow_keys hash_keys;
- u32 mhash;
+ u32 mhash = 0;
switch (ip6_multipath_hash_policy(net)) {
case 0:
@@ -2345,6 +2464,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
+ mhash = flow_hash_from_keys(&hash_keys);
break;
case 1:
if (skb) {
@@ -2376,6 +2496,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.ports.dst = fl6->fl6_dport;
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
+ mhash = flow_hash_from_keys(&hash_keys);
break;
case 2:
memset(&hash_keys, 0, sizeof(hash_keys));
@@ -2412,9 +2533,15 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
hash_keys.basic.ip_proto = fl6->flowi6_proto;
}
+ mhash = flow_hash_from_keys(&hash_keys);
+ break;
+ case 3:
+ if (skb)
+ mhash = rt6_multipath_custom_hash_skb(net, skb);
+ else
+ mhash = rt6_multipath_custom_hash_fl6(net, fl6);
break;
}
- mhash = flow_hash_from_keys(&hash_keys);
return mhash >> 1;
}
@@ -3673,11 +3800,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (nh) {
if (rt->fib6_src.plen) {
NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
- goto out;
+ goto out_free;
}
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
- goto out;
+ goto out_free;
}
rt->nh = nh;
fib6_nh = nexthop_fib6_nh(rt->nh);
@@ -3714,6 +3841,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
out:
fib6_info_release(rt);
return ERR_PTR(err);
+out_free:
+ ip_fib_metrics_put(rt->fib6_metrics);
+ kfree(rt);
+ return ERR_PTR(err);
}
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 4ff38cb08f4b..60bf3b877957 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -87,10 +87,10 @@ struct seg6_end_dt_info {
int vrf_ifindex;
int vrf_table;
- /* tunneled packet proto and family (IPv4 or IPv6) */
- __be16 proto;
+ /* tunneled packet family (IPv4 or IPv6).
+ * Protocol and header length are inferred from family.
+ */
u16 family;
- int hdrlen;
};
struct pcpu_seg6_local_counters {
@@ -521,19 +521,6 @@ static int __seg6_end_dt_vrf_build(struct seg6_local_lwt *slwt, const void *cfg,
info->net = net;
info->vrf_ifindex = vrf_ifindex;
- switch (family) {
- case AF_INET:
- info->proto = htons(ETH_P_IP);
- info->hdrlen = sizeof(struct iphdr);
- break;
- case AF_INET6:
- info->proto = htons(ETH_P_IPV6);
- info->hdrlen = sizeof(struct ipv6hdr);
- break;
- default:
- return -EINVAL;
- }
-
info->family = family;
info->mode = DT_VRF_MODE;
@@ -622,22 +609,44 @@ error:
}
static struct sk_buff *end_dt_vrf_core(struct sk_buff *skb,
- struct seg6_local_lwt *slwt)
+ struct seg6_local_lwt *slwt, u16 family)
{
struct seg6_end_dt_info *info = &slwt->dt_info;
struct net_device *vrf;
+ __be16 protocol;
+ int hdrlen;
vrf = end_dt_get_vrf_rcu(skb, info);
if (unlikely(!vrf))
goto drop;
- skb->protocol = info->proto;
+ switch (family) {
+ case AF_INET:
+ protocol = htons(ETH_P_IP);
+ hdrlen = sizeof(struct iphdr);
+ break;
+ case AF_INET6:
+ protocol = htons(ETH_P_IPV6);
+ hdrlen = sizeof(struct ipv6hdr);
+ break;
+ case AF_UNSPEC:
+ fallthrough;
+ default:
+ goto drop;
+ }
+
+ if (unlikely(info->family != AF_UNSPEC && info->family != family)) {
+ pr_warn_once("seg6local: SRv6 End.DT* family mismatch");
+ goto drop;
+ }
+
+ skb->protocol = protocol;
skb_dst_drop(skb);
- skb_set_transport_header(skb, info->hdrlen);
+ skb_set_transport_header(skb, hdrlen);
- return end_dt_vrf_rcv(skb, info->family, vrf);
+ return end_dt_vrf_rcv(skb, family, vrf);
drop:
kfree_skb(skb);
@@ -656,7 +665,7 @@ static int input_action_end_dt4(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto drop;
- skb = end_dt_vrf_core(skb, slwt);
+ skb = end_dt_vrf_core(skb, slwt, AF_INET);
if (!skb)
/* packet has been processed and consumed by the VRF */
return 0;
@@ -739,7 +748,7 @@ static int input_action_end_dt6(struct sk_buff *skb,
goto legacy_mode;
/* DT6_VRF_MODE */
- skb = end_dt_vrf_core(skb, slwt);
+ skb = end_dt_vrf_core(skb, slwt, AF_INET6);
if (!skb)
/* packet has been processed and consumed by the VRF */
return 0;
@@ -767,6 +776,36 @@ drop:
return -EINVAL;
}
+#ifdef CONFIG_NET_L3_MASTER_DEV
+static int seg6_end_dt46_build(struct seg6_local_lwt *slwt, const void *cfg,
+ struct netlink_ext_ack *extack)
+{
+ return __seg6_end_dt_vrf_build(slwt, cfg, AF_UNSPEC, extack);
+}
+
+static int input_action_end_dt46(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+{
+ unsigned int off = 0;
+ int nexthdr;
+
+ nexthdr = ipv6_find_hdr(skb, &off, -1, NULL, NULL);
+ if (unlikely(nexthdr < 0))
+ goto drop;
+
+ switch (nexthdr) {
+ case IPPROTO_IPIP:
+ return input_action_end_dt4(skb, slwt);
+ case IPPROTO_IPV6:
+ return input_action_end_dt6(skb, slwt);
+ }
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+#endif
+
/* push an SRH on top of the current one */
static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
{
@@ -969,6 +1008,17 @@ static struct seg6_action_desc seg6_action_table[] = {
.input = input_action_end_dt6,
},
{
+ .action = SEG6_LOCAL_ACTION_END_DT46,
+ .attrs = SEG6_F_ATTR(SEG6_LOCAL_VRFTABLE),
+ .optattrs = SEG6_F_LOCAL_COUNTERS,
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ .input = input_action_end_dt46,
+ .slwt_ops = {
+ .build_state = seg6_end_dt46_build,
+ },
+#endif
+ },
+ {
.action = SEG6_LOCAL_ACTION_END_B6,
.attrs = SEG6_F_ATTR(SEG6_LOCAL_SRH),
.optattrs = SEG6_F_LOCAL_COUNTERS,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index aa98294a3ad3..df5bea818410 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -271,6 +271,9 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
if (ipip6_tunnel_create(dev) < 0)
goto failed_free;
+ if (!parms->name[0])
+ strcpy(parms->name, dev->name);
+
return nt;
failed_free:
@@ -707,6 +710,8 @@ static int ipip6_rcv(struct sk_buff *skb)
* old iph is no longer valid
*/
iph = (const struct iphdr *)skb_mac_header(skb);
+ skb_reset_mac_header(skb);
+
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
@@ -777,6 +782,8 @@ static int sit_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
tpi = &ipip_tpi;
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
+ skb_reset_mac_header(skb);
+
return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
}
@@ -970,7 +977,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (df) {
mtu = dst_mtu(&rt->dst) - t_hlen;
- if (mtu < 68) {
+ if (mtu < IPV4_MIN_MTU) {
dev->stats.collisions++;
ip_rt_put(rt);
goto tx_error;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 27102c3d6e1d..d7cf26f730d7 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -17,13 +17,17 @@
#include <net/addrconf.h>
#include <net/inet_frag.h>
#include <net/netevent.h>
+#include <net/ip_fib.h>
#ifdef CONFIG_NETLABEL
#include <net/calipso.h>
#endif
static int two = 2;
+static int three = 3;
static int flowlabel_reflect_max = 0x7;
static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
+static u32 rt6_multipath_hash_fields_all_mask =
+ FIB_MULTIPATH_HASH_FIELD_ALL_MASK;
static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
@@ -40,6 +44,22 @@ static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write,
return ret;
}
+static int
+proc_rt6_multipath_hash_fields(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct net *net;
+ int ret;
+
+ net = container_of(table->data, struct net,
+ ipv6.sysctl.multipath_hash_fields);
+ ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ call_netevent_notifiers(NETEVENT_IPV6_MPATH_HASH_UPDATE, net);
+
+ return ret;
+}
+
static struct ctl_table ipv6_table_template[] = {
{
.procname = "bindv6only",
@@ -149,7 +169,16 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_rt6_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = &three,
+ },
+ {
+ .procname = "fib_multipath_hash_fields",
+ .data = &init_net.ipv6.sysctl.multipath_hash_fields,
+ .maxlen = sizeof(u32),
+ .mode = 0644,
+ .proc_handler = proc_rt6_multipath_hash_fields,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &rt6_multipath_hash_fields_all_mask,
},
{
.procname = "seg6_flowlabel",
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5f47c0b6e3de..578ab6305c3f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -467,7 +467,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
- sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
+ sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
} else
@@ -486,7 +486,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
} else
sk->sk_err_soft = err;
@@ -1538,6 +1538,7 @@ discard:
kfree_skb(skb);
return 0;
csum_err:
+ trace_tcp_bad_csum(skb);
TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard;
@@ -1663,10 +1664,18 @@ process:
goto csum_error;
}
if (unlikely(sk->sk_state != TCP_LISTEN)) {
- inet_csk_reqsk_queue_drop_and_put(sk, req);
- goto lookup;
+ nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
+ if (!nsk) {
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ goto lookup;
+ }
+ sk = nsk;
+ /* reuseport_migrate_sock() has already held one sk_refcnt
+ * before returning.
+ */
+ } else {
+ sock_hold(sk);
}
- sock_hold(sk);
refcounted = true;
nsk = NULL;
if (!tcp_filter(sk, skb)) {
@@ -1754,6 +1763,7 @@ no_tcp_socket:
if (tcp_checksum_complete(skb)) {
csum_error:
+ trace_tcp_bad_csum(skb);
__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
bad_packet:
__TCP_INC_STATS(net, TCP_MIB_INERRS);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 199b080d418a..368972dbd919 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -610,7 +610,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
out:
return 0;
}
@@ -1598,6 +1598,9 @@ void udpv6_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
lock_sock(sk);
+
+ /* protects from races with udp_abort() */
+ sock_set_flag(sk, SOCK_DEAD);
udp_v6_flush_pending_frames(sk);
release_sock(sk);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8b84d534b19d..57fa27c1cdf9 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -16,13 +16,6 @@
#include <net/ip6_route.h>
#include <net/xfrm.h>
-int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
- u8 **prevhdr)
-{
- return ip6_find_1stfragopt(skb, prevhdr);
-}
-EXPORT_SYMBOL(xfrm6_find_1stfragopt);
-
void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
{
struct flowi6 fl6;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f696d46e6910..2b31112c0856 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -291,7 +291,6 @@ static void xfrm6_tunnel_destroy(struct xfrm_state *x)
}
static const struct xfrm_type xfrm6_tunnel_type = {
- .description = "IP6IP6",
.owner = THIS_MODULE,
.proto = IPPROTO_IPV6,
.init_state = xfrm6_tunnel_init_state,
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 0fdb389c3390..44453b35c7b7 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -44,6 +44,7 @@ static struct proto iucv_proto = {
};
static struct iucv_interface *pr_iucv;
+static struct iucv_handler af_iucv_handler;
/* special AF_IUCV IPRM messages */
static const u8 iprm_shutdown[8] =
@@ -91,28 +92,11 @@ static void iucv_sock_close(struct sock *sk);
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
-/* Call Back functions */
-static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
-static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
-static void iucv_callback_connack(struct iucv_path *, u8 *);
-static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
-static void iucv_callback_connrej(struct iucv_path *, u8 *);
-static void iucv_callback_shutdown(struct iucv_path *, u8 *);
-
static struct iucv_sock_list iucv_sk_list = {
.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
.autobind_name = ATOMIC_INIT(0)
};
-static struct iucv_handler af_iucv_handler = {
- .path_pending = iucv_callback_connreq,
- .path_complete = iucv_callback_connack,
- .path_severed = iucv_callback_connrej,
- .message_pending = iucv_callback_rx,
- .message_complete = iucv_callback_txdone,
- .path_quiesced = iucv_callback_shutdown,
-};
-
static inline void high_nmcpy(unsigned char *dst, char *src)
{
memcpy(dst, src, 8);
@@ -1817,6 +1801,15 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
bh_unlock_sock(sk);
}
+static struct iucv_handler af_iucv_handler = {
+ .path_pending = iucv_callback_connreq,
+ .path_complete = iucv_callback_connack,
+ .path_severed = iucv_callback_connrej,
+ .message_pending = iucv_callback_rx,
+ .message_complete = iucv_callback_txdone,
+ .path_quiesced = iucv_callback_shutdown,
+};
+
/***************** HiperSockets transport callbacks ********************/
static void afiucv_swap_src_dest(struct sk_buff *skb)
{
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 6201965bd822..11a715d76a4f 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -47,7 +47,7 @@ static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
static void report_csk_error(struct sock *csk, int err)
{
csk->sk_err = EPIPE;
- csk->sk_error_report(csk);
+ sk_error_report(csk);
}
static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index ef9b4ac03e7b..de24a7d474df 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -141,7 +141,6 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
struct pfkey_sock *pfk;
- int err;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -150,10 +149,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
if (protocol != PF_KEY_V2)
return -EPROTONOSUPPORT;
- err = -ENOMEM;
sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern);
if (sk == NULL)
- goto out;
+ return -ENOMEM;
pfk = pfkey_sk(sk);
mutex_init(&pfk->dump_lock);
@@ -169,8 +167,6 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
pfkey_insert(sk);
return 0;
-out:
- return err;
}
static int pfkey_release(struct socket *sock)
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 97ae1255fcb6..b3edafa5fba4 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -488,7 +488,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
}
- /* We dont need to clone dst here, it is guaranteed to not disappear.
+ /* We don't need to clone dst here, it is guaranteed to not disappear.
* __dev_xmit_skb() might force a refcount if needed.
*/
skb_dst_set_noref(skb, &rt->dst);
@@ -635,7 +635,6 @@ static struct inet_protosw l2tp_ip_protosw = {
static struct net_protocol l2tp_ip_protocol __read_mostly = {
.handler = l2tp_ip_recv,
- .netns_ok = 1,
};
static int __init l2tp_ip_init(void)
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index aea85f91f059..bf35710127dd 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -226,7 +226,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
/* If the first two bytes are 0xFF03, consider that it is the PPP's
* Address and Control fields and skip them. The L2TP module has always
* worked this way, although, in theory, the use of these fields should
- * be negociated and handled at the PPP layer. These fields are
+ * be negotiated and handled at the PPP layer. These fields are
* constant: 0xFF is the All-Stations Address and 0x03 the Unnumbered
* Information command with Poll/Final bit set to zero (RFC 1662).
*/
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 1078e14f1acf..0971ca48ba15 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -80,11 +80,9 @@ static void __lapb_insert_cb(struct lapb_cb *lapb)
static struct lapb_cb *__lapb_devtostruct(struct net_device *dev)
{
- struct list_head *entry;
struct lapb_cb *lapb, *use = NULL;
- list_for_each(entry, &lapb_list) {
- lapb = list_entry(entry, struct lapb_cb, node);
+ list_for_each_entry(lapb, &lapb_list, node) {
if (lapb->dev == dev) {
use = lapb;
break;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7a99892e5aba..84cc7733ea66 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1442,6 +1442,38 @@ static void sta_apply_mesh_params(struct ieee80211_local *local,
#endif
}
+static void sta_apply_airtime_params(struct ieee80211_local *local,
+ struct sta_info *sta,
+ struct station_parameters *params)
+{
+ u8 ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct airtime_sched_info *air_sched = &local->airtime[ac];
+ struct airtime_info *air_info = &sta->airtime[ac];
+ struct txq_info *txqi;
+ u8 tid;
+
+ spin_lock_bh(&air_sched->lock);
+ for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
+ if (air_info->weight == params->airtime_weight ||
+ !sta->sta.txq[tid] ||
+ ac != ieee80211_ac_from_tid(tid))
+ continue;
+
+ airtime_weight_set(air_info, params->airtime_weight);
+
+ txqi = to_txq_info(sta->sta.txq[tid]);
+ if (RB_EMPTY_NODE(&txqi->schedule_order))
+ continue;
+
+ ieee80211_update_airtime_weight(local, air_sched,
+ 0, true);
+ }
+ spin_unlock_bh(&air_sched->lock);
+ }
+}
+
static int sta_apply_parameters(struct ieee80211_local *local,
struct sta_info *sta,
struct station_parameters *params)
@@ -1629,7 +1661,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
sta_apply_mesh_params(local, sta, params);
if (params->airtime_weight)
- sta->airtime_weight = params->airtime_weight;
+ sta_apply_airtime_params(local, sta, params);
+
/* set the STA state after all sta info from usermode has been set */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
@@ -1693,15 +1726,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
test_sta_flag(sta, WLAN_STA_ASSOC))
rate_control_rate_init(sta);
- err = sta_info_insert_rcu(sta);
- if (err) {
- rcu_read_unlock();
- return err;
- }
-
- rcu_read_unlock();
-
- return 0;
+ return sta_info_insert(sta);
}
static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 907bb1f748a1..76fc36a68750 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* mac80211 - channel management
+ * Copyright 2020 - 2021 Intel Corporation
*/
#include <linux/nl80211.h>
@@ -308,8 +309,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
* the max of min required widths of all the interfaces bound to this
* channel context.
*/
-void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx)
+static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
{
enum nl80211_chan_width max_bw;
struct cfg80211_chan_def min_def;
@@ -326,7 +327,7 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
ctx->conf.def.width == NL80211_CHAN_WIDTH_16 ||
ctx->conf.radar_enabled) {
ctx->conf.min_def = ctx->conf.def;
- return;
+ return 0;
}
max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf);
@@ -337,17 +338,21 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
ieee80211_chandef_downgrade(&min_def);
if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
- return;
+ return 0;
ctx->conf.min_def = min_def;
if (!ctx->driver_present)
- return;
+ return 0;
- drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH);
+ return IEEE80211_CHANCTX_CHANGE_MIN_WIDTH;
}
+/* calling this function is assuming that station vif is updated to
+ * lates changes by calling ieee80211_vif_update_chandef
+ */
static void ieee80211_chan_bw_change(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx)
+ struct ieee80211_chanctx *ctx,
+ bool narrowed)
{
struct sta_info *sta;
struct ieee80211_supported_band *sband =
@@ -366,9 +371,16 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
continue;
new_sta_bw = ieee80211_sta_cur_vht_bw(sta);
+
+ /* nothing change */
if (new_sta_bw == sta->sta.bandwidth)
continue;
+ /* vif changed to narrow BW and narrow BW for station wasn't
+ * requested or vise versa */
+ if ((new_sta_bw < sta->sta.bandwidth) == !narrowed)
+ continue;
+
sta->sta.bandwidth = new_sta_bw;
rate_control_rate_update(local, sband, sta,
IEEE80211_RC_BW_CHANGED);
@@ -376,21 +388,34 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
rcu_read_unlock();
}
-static void ieee80211_change_chanctx(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *chandef)
+/*
+ * recalc the min required chan width of the channel context, which is
+ * the max of min required widths of all the interfaces bound to this
+ * channel context.
+ */
+void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
{
- enum nl80211_chan_width width;
+ u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx);
- if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
- ieee80211_recalc_chanctx_min_def(local, ctx);
+ if (!changed)
return;
- }
- WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+ /* check is BW narrowed */
+ ieee80211_chan_bw_change(local, ctx, true);
- width = ctx->conf.def.width;
- ctx->conf.def = *chandef;
+ drv_change_chanctx(local, ctx, changed);
+
+ /* check is BW wider */
+ ieee80211_chan_bw_change(local, ctx, false);
+}
+
+static void ieee80211_change_chanctx(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ struct ieee80211_chanctx *old_ctx,
+ const struct cfg80211_chan_def *chandef)
+{
+ u32 changed;
/* expected to handle only 20/40/80/160 channel widths */
switch (chandef->width) {
@@ -405,19 +430,33 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
WARN_ON(1);
}
- if (chandef->width < width)
- ieee80211_chan_bw_change(local, ctx);
+ /* Check maybe BW narrowed - we do this _before_ calling recalc_chanctx_min_def
+ * due to maybe not returning from it, e.g in case new context was added
+ * first time with all parameters up to date.
+ */
+ ieee80211_chan_bw_change(local, old_ctx, true);
+
+ if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+ ieee80211_recalc_chanctx_min_def(local, ctx);
+ return;
+ }
- drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH);
- ieee80211_recalc_chanctx_min_def(local, ctx);
+ WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+
+ ctx->conf.def = *chandef;
+
+ /* check if min chanctx also changed */
+ changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
+ _ieee80211_recalc_chanctx_min_def(local, ctx);
+ drv_change_chanctx(local, ctx, changed);
if (!local->use_chanctx) {
local->_oper_chandef = *chandef;
ieee80211_hw_config(local, 0);
}
- if (chandef->width > width)
- ieee80211_chan_bw_change(local, ctx);
+ /* check is BW wider */
+ ieee80211_chan_bw_change(local, old_ctx, false);
}
static struct ieee80211_chanctx *
@@ -450,7 +489,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
if (!compat)
continue;
- ieee80211_change_chanctx(local, ctx, compat);
+ ieee80211_change_chanctx(local, ctx, ctx, compat);
return ctx;
}
@@ -679,7 +718,7 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
if (!compat)
return;
- ieee80211_change_chanctx(local, ctx, compat);
+ ieee80211_change_chanctx(local, ctx, ctx, compat);
}
static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
@@ -1107,13 +1146,12 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
if (WARN_ON(!chandef))
return -EINVAL;
- if (old_ctx->conf.def.width > new_ctx->conf.def.width)
- ieee80211_chan_bw_change(local, new_ctx);
+ if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
+ changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_change_chanctx(local, new_ctx, chandef);
+ ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
- if (old_ctx->conf.def.width < new_ctx->conf.def.width)
- ieee80211_chan_bw_change(local, new_ctx);
+ ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef);
vif_chsw[0].vif = &sdata->vif;
vif_chsw[0].old_ctx = &old_ctx->conf;
@@ -1142,14 +1180,9 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
ieee80211_free_chanctx(local, old_ctx);
- if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
- changed = BSS_CHANGED_BANDWIDTH;
-
- ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
-
+ ieee80211_recalc_chanctx_min_def(local, new_ctx);
ieee80211_recalc_smps_chanctx(local, new_ctx);
ieee80211_recalc_radar_chanctx(local, new_ctx);
- ieee80211_recalc_chanctx_min_def(local, new_ctx);
if (changed)
ieee80211_bss_info_change_notify(sdata, changed);
@@ -1188,7 +1221,7 @@ ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata)
if (WARN_ON(!chandef))
return -EINVAL;
- ieee80211_change_chanctx(local, new_ctx, chandef);
+ ieee80211_change_chanctx(local, new_ctx, new_ctx, chandef);
list_del(&sdata->reserved_chanctx_list);
sdata->reserved_chanctx = NULL;
@@ -1505,7 +1538,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
ieee80211_recalc_smps_chanctx(local, ctx);
ieee80211_recalc_radar_chanctx(local, ctx);
ieee80211_recalc_chanctx_min_def(local, ctx);
- ieee80211_chan_bw_change(local, ctx);
list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
reserved_chanctx_list) {
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 9245c0421bda..8dbfe325ee66 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -4,7 +4,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021 Intel Corporation
*/
#include <linux/debugfs.h>
@@ -216,14 +216,14 @@ static ssize_t aql_txq_limit_read(struct file *file,
"VI %u %u\n"
"BE %u %u\n"
"BK %u %u\n",
- local->aql_txq_limit_low[IEEE80211_AC_VO],
- local->aql_txq_limit_high[IEEE80211_AC_VO],
- local->aql_txq_limit_low[IEEE80211_AC_VI],
- local->aql_txq_limit_high[IEEE80211_AC_VI],
- local->aql_txq_limit_low[IEEE80211_AC_BE],
- local->aql_txq_limit_high[IEEE80211_AC_BE],
- local->aql_txq_limit_low[IEEE80211_AC_BK],
- local->aql_txq_limit_high[IEEE80211_AC_BK]);
+ local->airtime[IEEE80211_AC_VO].aql_txq_limit_low,
+ local->airtime[IEEE80211_AC_VO].aql_txq_limit_high,
+ local->airtime[IEEE80211_AC_VI].aql_txq_limit_low,
+ local->airtime[IEEE80211_AC_VI].aql_txq_limit_high,
+ local->airtime[IEEE80211_AC_BE].aql_txq_limit_low,
+ local->airtime[IEEE80211_AC_BE].aql_txq_limit_high,
+ local->airtime[IEEE80211_AC_BK].aql_txq_limit_low,
+ local->airtime[IEEE80211_AC_BK].aql_txq_limit_high);
return simple_read_from_buffer(user_buf, count, ppos,
buf, len);
}
@@ -255,11 +255,11 @@ static ssize_t aql_txq_limit_write(struct file *file,
if (ac >= IEEE80211_NUM_ACS)
return -EINVAL;
- q_limit_low_old = local->aql_txq_limit_low[ac];
- q_limit_high_old = local->aql_txq_limit_high[ac];
+ q_limit_low_old = local->airtime[ac].aql_txq_limit_low;
+ q_limit_high_old = local->airtime[ac].aql_txq_limit_high;
- local->aql_txq_limit_low[ac] = q_limit_low;
- local->aql_txq_limit_high[ac] = q_limit_high;
+ local->airtime[ac].aql_txq_limit_low = q_limit_low;
+ local->airtime[ac].aql_txq_limit_high = q_limit_high;
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
@@ -382,15 +382,62 @@ static const struct file_operations force_tx_status_ops = {
.llseek = default_llseek,
};
+static ssize_t airtime_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ char buf[200];
+ u64 v_t[IEEE80211_NUM_ACS];
+ u64 wt[IEEE80211_NUM_ACS];
+ int len = 0, ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ spin_lock_bh(&local->airtime[ac].lock);
+ v_t[ac] = local->airtime[ac].v_t;
+ wt[ac] = local->airtime[ac].weight_sum;
+ spin_unlock_bh(&local->airtime[ac].lock);
+ }
+ len = scnprintf(buf, sizeof(buf),
+ "\tVO VI BE BK\n"
+ "Virt-t\t%-10llu %-10llu %-10llu %-10llu\n"
+ "Weight\t%-10llu %-10llu %-10llu %-10llu\n",
+ v_t[0],
+ v_t[1],
+ v_t[2],
+ v_t[3],
+ wt[0],
+ wt[1],
+ wt[2],
+ wt[3]);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, len);
+}
+
+static const struct file_operations airtime_ops = {
+ .read = airtime_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
#ifdef CONFIG_PM
static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
+ int ret;
rtnl_lock();
+ wiphy_lock(local->hw.wiphy);
__ieee80211_suspend(&local->hw, NULL);
- __ieee80211_resume(&local->hw);
+ ret = __ieee80211_resume(&local->hw);
+ wiphy_unlock(local->hw.wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
rtnl_unlock();
return count;
@@ -625,7 +672,11 @@ void debugfs_hw_add(struct ieee80211_local *local)
if (local->ops->wake_tx_queue)
DEBUGFS_ADD_MODE(aqm, 0600);
- DEBUGFS_ADD_MODE(airtime_flags, 0600);
+ if (wiphy_ext_feature_isset(local->hw.wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
+ DEBUGFS_ADD_MODE(airtime, 0600);
+ DEBUGFS_ADD_MODE(airtime_flags, 0600);
+ }
DEBUGFS_ADD(aql_txq_limit);
debugfs_create_u32("aql_threshold", 0600,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 0ad3860852ff..db724fc10a5f 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -57,7 +57,6 @@ static ssize_t ieee80211_if_write(
return -EFAULT;
buf[count] = '\0';
- ret = -ENODEV;
rtnl_lock();
ret = (*write)(sdata, buf, count);
rtnl_unlock();
@@ -513,6 +512,34 @@ static ssize_t ieee80211_if_fmt_aqm(
}
IEEE80211_IF_FILE_R(aqm);
+static ssize_t ieee80211_if_fmt_airtime(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_txq *txq = sdata->vif.txq;
+ struct airtime_info *air_info;
+ int len;
+
+ if (!txq)
+ return 0;
+
+ spin_lock_bh(&local->airtime[txq->ac].lock);
+ air_info = to_airtime_info(txq);
+ len = scnprintf(buf,
+ buflen,
+ "RX: %llu us\nTX: %llu us\nWeight: %u\n"
+ "Virt-T: %lld us\n",
+ air_info->rx_airtime,
+ air_info->tx_airtime,
+ air_info->weight,
+ air_info->v_t);
+ spin_unlock_bh(&local->airtime[txq->ac].lock);
+
+ return len;
+}
+
+IEEE80211_IF_FILE_R(airtime);
+
IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
/* IBSS attributes */
@@ -658,8 +685,10 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
if (sdata->local->ops->wake_tx_queue &&
sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
- sdata->vif.type != NL80211_IFTYPE_NAN)
+ sdata->vif.type != NL80211_IFTYPE_NAN) {
DEBUGFS_ADD(aqm);
+ DEBUGFS_ADD(airtime);
+ }
}
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 936c9dfa86c8..8be28cfd6f64 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
size_t bufsz = 400;
char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
u64 rx_airtime = 0, tx_airtime = 0;
- s64 deficit[IEEE80211_NUM_ACS];
+ u64 v_t[IEEE80211_NUM_ACS];
ssize_t rv;
int ac;
@@ -210,18 +210,18 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
return -ENOMEM;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- spin_lock_bh(&local->active_txq_lock[ac]);
+ spin_lock_bh(&local->airtime[ac].lock);
rx_airtime += sta->airtime[ac].rx_airtime;
tx_airtime += sta->airtime[ac].tx_airtime;
- deficit[ac] = sta->airtime[ac].deficit;
- spin_unlock_bh(&local->active_txq_lock[ac]);
+ v_t[ac] = sta->airtime[ac].v_t;
+ spin_unlock_bh(&local->airtime[ac].lock);
}
p += scnprintf(p, bufsz + buf - p,
"RX: %llu us\nTX: %llu us\nWeight: %u\n"
- "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
- rx_airtime, tx_airtime, sta->airtime_weight,
- deficit[0], deficit[1], deficit[2], deficit[3]);
+ "Virt-T: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
+ rx_airtime, tx_airtime, sta->airtime[0].weight,
+ v_t[0], v_t[1], v_t[2], v_t[3]);
rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
@@ -236,11 +236,11 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
int ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- spin_lock_bh(&local->active_txq_lock[ac]);
+ spin_lock_bh(&local->airtime[ac].lock);
sta->airtime[ac].rx_airtime = 0;
sta->airtime[ac].tx_airtime = 0;
- sta->airtime[ac].deficit = sta->airtime_weight;
- spin_unlock_bh(&local->active_txq_lock[ac]);
+ sta->airtime[ac].v_t = 0;
+ spin_unlock_bh(&local->airtime[ac].lock);
}
return count;
@@ -263,10 +263,10 @@ static ssize_t sta_aql_read(struct file *file, char __user *userbuf,
return -ENOMEM;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- spin_lock_bh(&local->active_txq_lock[ac]);
+ spin_lock_bh(&local->airtime[ac].lock);
q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
- spin_unlock_bh(&local->active_txq_lock[ac]);
+ spin_unlock_bh(&local->airtime[ac].lock);
q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 604ca59937f0..bcb7cc06db3d 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2019 Intel Corporation
+* Copyright (C) 2018 - 2019, 2021 Intel Corporation
*/
#ifndef __MAC80211_DRIVER_OPS
@@ -821,7 +821,7 @@ drv_allow_buffered_frames(struct ieee80211_local *local,
static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- u16 duration)
+ struct ieee80211_prep_tx_info *info)
{
might_sleep();
@@ -829,9 +829,27 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
return;
WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
- trace_drv_mgd_prepare_tx(local, sdata, duration);
+ trace_drv_mgd_prepare_tx(local, sdata, info->duration,
+ info->subtype, info->success);
if (local->ops->mgd_prepare_tx)
- local->ops->mgd_prepare_tx(&local->hw, &sdata->vif, duration);
+ local->ops->mgd_prepare_tx(&local->hw, &sdata->vif, info);
+ trace_drv_return_void(local);
+}
+
+static inline void drv_mgd_complete_tx(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_prep_tx_info *info)
+{
+ might_sleep();
+
+ if (!check_sdata_in_driver(sdata))
+ return;
+ WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
+
+ trace_drv_mgd_complete_tx(local, sdata, info->duration,
+ info->subtype, info->success);
+ if (local->ops->mgd_complete_tx)
+ local->ops->mgd_complete_tx(&local->hw, &sdata->vif, info);
trace_drv_return_void(local);
}
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
index 0c0b970835ce..c05af7018f79 100644
--- a/net/mac80211/he.c
+++ b/net/mac80211/he.c
@@ -111,7 +111,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
- struct ieee80211_sta_he_cap own_he_cap = sband->iftype_data->he_cap;
+ struct ieee80211_sta_he_cap own_he_cap;
struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
u8 he_ppe_size;
u8 mcs_nss_size;
@@ -120,9 +120,13 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
memset(he_cap, 0, sizeof(*he_cap));
- if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband))
+ if (!he_cap_ie ||
+ !ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif)))
return;
+ own_he_cap = sband->iftype_data->he_cap;
+
/* Make sure size is OK */
mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
he_ppe_size =
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 3d62a80b5790..2eb7641f5556 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2020-2021 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -555,17 +555,15 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP))
+ if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION))
return;
- if (vif->type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.driver_smps_mode == smps_mode)
- return;
- sdata->u.mgd.driver_smps_mode = smps_mode;
- ieee80211_queue_work(&sdata->local->hw,
- &sdata->u.mgd.request_smps_work);
- }
+ if (sdata->u.mgd.driver_smps_mode == smps_mode)
+ return;
+
+ sdata->u.mgd.driver_smps_mode = smps_mode;
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.request_smps_work);
}
/* this might change ... don't want non-open drivers using it */
EXPORT_SYMBOL_GPL(ieee80211_request_smps);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8fcbaa1eedf3..22549b95d1aa 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef IEEE80211_I_H
@@ -50,12 +50,6 @@ struct ieee80211_local;
#define IEEE80211_ENCRYPT_HEADROOM 8
#define IEEE80211_ENCRYPT_TAILROOM 18
-/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
- * reception of at least three fragmented frames. This limit can be increased
- * by changing this define, at the cost of slower frame reassembly and
- * increased memory use (about 2 kB of RAM per entry). */
-#define IEEE80211_FRAGMENT_MAX 4
-
/* power level hasn't been configured (or set to automatic) */
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
@@ -88,18 +82,6 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
#define IEEE80211_MAX_NAN_INSTANCE_ID 255
-struct ieee80211_fragment_entry {
- struct sk_buff_head skb_list;
- unsigned long first_frag_time;
- u16 seq;
- u16 extra_len;
- u16 last_frag;
- u8 rx_queue;
- bool check_sequential_pn; /* needed for CCMP/GCMP */
- u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
-};
-
-
struct ieee80211_bss {
u32 device_ts_beacon, device_ts_presp;
@@ -241,8 +223,15 @@ struct ieee80211_rx_data {
*/
int security_idx;
- u32 tkip_iv32;
- u16 tkip_iv16;
+ union {
+ struct {
+ u32 iv32;
+ u16 iv16;
+ } tkip;
+ struct {
+ u8 pn[IEEE80211_CCMP_PN_LEN];
+ } ccm_gcm;
+ };
};
struct ieee80211_csa_settings {
@@ -842,17 +831,16 @@ enum txq_info_flags {
* @def_flow: used as a fallback flow when a packet destined to @tin hashes to
* a fq_flow which is already owned by a different tin
* @def_cvars: codel vars for @def_flow
- * @frags: used to keep fragments created after dequeue
* @schedule_order: used with ieee80211_local->active_txqs
- * @schedule_round: counter to prevent infinite loops on TXQ scheduling
+ * @frags: used to keep fragments created after dequeue
*/
struct txq_info {
struct fq_tin tin;
struct codel_vars def_cvars;
struct codel_stats cstats;
+ struct rb_node schedule_order;
+
struct sk_buff_head frags;
- struct list_head schedule_order;
- u16 schedule_round;
unsigned long flags;
/* keep last! */
@@ -902,9 +890,7 @@ struct ieee80211_sub_if_data {
char name[IFNAMSIZ];
- /* Fragment table for host-based reassembly */
- struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
- unsigned int fragment_next;
+ struct ieee80211_fragment_cache frags;
/* TID bitmap for NoAck policy */
u16 noack_map;
@@ -931,6 +917,8 @@ struct ieee80211_sub_if_data {
struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
struct mac80211_qos_map __rcu *qos_map;
+ struct airtime_info airtime[IEEE80211_NUM_ACS];
+
struct work_struct csa_finalize_work;
bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
struct cfg80211_chan_def csa_chandef;
@@ -1143,6 +1131,44 @@ enum mac80211_scan_state {
SCAN_ABORT,
};
+/**
+ * struct airtime_sched_info - state used for airtime scheduling and AQL
+ *
+ * @lock: spinlock that protects all the fields in this struct
+ * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time
+ * @schedule_pos: the current position maintained while a driver walks the tree
+ * with ieee80211_next_txq()
+ * @active_list: list of struct airtime_info structs that were active within
+ * the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute
+ * weight_sum
+ * @last_weight_update: used for rate limiting walking active_list
+ * @last_schedule_time: tracks the last time a transmission was scheduled; used
+ * for catching up v_t if no stations are eligible for
+ * transmission.
+ * @v_t: global virtual time; queues with v_t < this are eligible for
+ * transmission
+ * @weight_sum: total sum of all active stations used for dividing airtime
+ * @weight_sum_reciprocal: reciprocal of weight_sum (to avoid divisions in fast
+ * path - see comment above
+ * IEEE80211_RECIPROCAL_DIVISOR_64)
+ * @aql_txq_limit_low: AQL limit when total outstanding airtime
+ * is < IEEE80211_AQL_THRESHOLD
+ * @aql_txq_limit_high: AQL limit when total outstanding airtime
+ * is > IEEE80211_AQL_THRESHOLD
+ */
+struct airtime_sched_info {
+ spinlock_t lock;
+ struct rb_root_cached active_txqs;
+ struct rb_node *schedule_pos;
+ struct list_head active_list;
+ u64 last_weight_update;
+ u64 last_schedule_activity;
+ u64 v_t;
+ u64 weight_sum;
+ u64 weight_sum_reciprocal;
+ u32 aql_txq_limit_low;
+ u32 aql_txq_limit_high;
+};
DECLARE_STATIC_KEY_FALSE(aql_disable);
struct ieee80211_local {
@@ -1156,13 +1182,8 @@ struct ieee80211_local {
struct codel_params cparams;
/* protects active_txqs and txqi->schedule_order */
- spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
- struct list_head active_txqs[IEEE80211_NUM_ACS];
- u16 schedule_round[IEEE80211_NUM_ACS];
-
+ struct airtime_sched_info airtime[IEEE80211_NUM_ACS];
u16 airtime_flags;
- u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
- u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
u32 aql_threshold;
atomic_t aql_total_pending_airtime;
@@ -1427,10 +1448,6 @@ struct ieee80211_local {
/* extended capabilities provided by mac80211 */
u8 ext_capa[8];
-
- /* TDLS channel switch */
- struct work_struct tdls_chsw_work;
- struct sk_buff_head skb_queue_tdls_chsw;
};
static inline struct ieee80211_sub_if_data *
@@ -1455,7 +1472,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (WARN_ON_ONCE(!chanctx_conf)) {
+ if (!chanctx_conf) {
rcu_read_unlock();
return NULL;
}
@@ -1580,6 +1597,125 @@ static inline bool txq_has_queue(struct ieee80211_txq *txq)
return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
}
+static inline struct airtime_info *to_airtime_info(struct ieee80211_txq *txq)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct sta_info *sta;
+
+ if (txq->sta) {
+ sta = container_of(txq->sta, struct sta_info, sta);
+ return &sta->airtime[txq->ac];
+ }
+
+ sdata = vif_to_sdata(txq->vif);
+ return &sdata->airtime[txq->ac];
+}
+
+/* To avoid divisions in the fast path, we keep pre-computed reciprocals for
+ * airtime weight calculations. There are two different weights to keep track
+ * of: The per-station weight and the sum of weights per phy.
+ *
+ * For the per-station weights (kept in airtime_info below), we use 32-bit
+ * reciprocals with a devisor of 2^19. This lets us keep the multiplications and
+ * divisions for the station weights as 32-bit operations at the cost of a bit
+ * of rounding error for high weights; but the choice of divisor keeps rounding
+ * errors <10% for weights <2^15, assuming no more than 8ms of airtime is
+ * reported at a time.
+ *
+ * For the per-phy sum of weights the values can get higher, so we use 64-bit
+ * operations for those with a 32-bit divisor, which should avoid any
+ * significant rounding errors.
+ */
+#define IEEE80211_RECIPROCAL_DIVISOR_64 0x100000000ULL
+#define IEEE80211_RECIPROCAL_SHIFT_64 32
+#define IEEE80211_RECIPROCAL_DIVISOR_32 0x80000U
+#define IEEE80211_RECIPROCAL_SHIFT_32 19
+
+static inline void airtime_weight_set(struct airtime_info *air_info, u16 weight)
+{
+ if (air_info->weight == weight)
+ return;
+
+ air_info->weight = weight;
+ if (weight) {
+ air_info->weight_reciprocal =
+ IEEE80211_RECIPROCAL_DIVISOR_32 / weight;
+ } else {
+ air_info->weight_reciprocal = 0;
+ }
+}
+
+static inline void airtime_weight_sum_set(struct airtime_sched_info *air_sched,
+ int weight_sum)
+{
+ if (air_sched->weight_sum == weight_sum)
+ return;
+
+ air_sched->weight_sum = weight_sum;
+ if (air_sched->weight_sum) {
+ air_sched->weight_sum_reciprocal = IEEE80211_RECIPROCAL_DIVISOR_64;
+ do_div(air_sched->weight_sum_reciprocal, air_sched->weight_sum);
+ } else {
+ air_sched->weight_sum_reciprocal = 0;
+ }
+}
+
+/* A problem when trying to enforce airtime fairness is that we want to divide
+ * the airtime between the currently *active* stations. However, basing this on
+ * the instantaneous queue state of stations doesn't work, as queues tend to
+ * oscillate very quickly between empty and occupied, leading to the scheduler
+ * thinking only a single station is active when deciding whether to allow
+ * transmission (and thus not throttling correctly).
+ *
+ * To fix this we use a timer-based notion of activity: a station is considered
+ * active if it has been scheduled within the last 100 ms; we keep a separate
+ * list of all the stations considered active in this manner, and lazily update
+ * the total weight of active stations from this list (filtering the stations in
+ * the list by their 'last active' time).
+ *
+ * We add one additional safeguard to guard against stations that manage to get
+ * scheduled every 100 ms but don't transmit a lot of data, and thus don't use
+ * up any airtime. Such stations would be able to get priority for an extended
+ * period of time if they do start transmitting at full capacity again, and so
+ * we add an explicit maximum for how far behind a station is allowed to fall in
+ * the virtual airtime domain. This limit is set to a relatively high value of
+ * 20 ms because the main mechanism for catching up idle stations is the active
+ * state as described above; i.e., the hard limit should only be hit in
+ * pathological cases.
+ */
+#define AIRTIME_ACTIVE_DURATION (100 * NSEC_PER_MSEC)
+#define AIRTIME_MAX_BEHIND 20000 /* 20 ms */
+
+static inline bool airtime_is_active(struct airtime_info *air_info, u64 now)
+{
+ return air_info->last_scheduled >= now - AIRTIME_ACTIVE_DURATION;
+}
+
+static inline void airtime_set_active(struct airtime_sched_info *air_sched,
+ struct airtime_info *air_info, u64 now)
+{
+ air_info->last_scheduled = now;
+ air_sched->last_schedule_activity = now;
+ list_move_tail(&air_info->list, &air_sched->active_list);
+}
+
+static inline bool airtime_catchup_v_t(struct airtime_sched_info *air_sched,
+ u64 v_t, u64 now)
+{
+ air_sched->v_t = v_t;
+ return true;
+}
+
+static inline void init_airtime_info(struct airtime_info *air_info,
+ struct airtime_sched_info *air_sched)
+{
+ atomic_set(&air_info->aql_tx_pending, 0);
+ air_info->aql_limit_low = air_sched->aql_txq_limit_low;
+ air_info->aql_limit_high = air_sched->aql_txq_limit_high;
+ airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT);
+ INIT_LIST_HEAD(&air_info->list);
+}
+
static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
{
return ether_addr_equal(raddr, addr) ||
@@ -1822,6 +1958,14 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
u64 *cookie);
int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
const u8 *buf, size_t len);
+void ieee80211_resort_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq,
+ bool purge);
+void ieee80211_update_airtime_weight(struct ieee80211_local *local,
+ struct airtime_sched_info *air_sched,
+ u64 now, bool force);
/* HT */
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
@@ -1892,7 +2036,6 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
-void ieee80211_sta_set_rx_nss(struct sta_info *sta);
void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt);
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
@@ -2300,9 +2443,13 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
-void ieee80211_tdls_chsw_work(struct work_struct *wk);
void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *peer, u16 reason);
+void
+ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
+
+
const char *ieee80211_get_reason_code_string(u16 reason_code);
u16 ieee80211_encode_usf(int val);
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
@@ -2320,4 +2467,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
#define debug_noinline
#endif
+void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
+void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
+
#endif /* IEEE80211_I_H */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7032a2b59249..1e5e9fc45523 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -476,14 +476,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
GFP_KERNEL);
}
- /* APs need special treatment */
if (sdata->vif.type == NL80211_IFTYPE_AP) {
- struct ieee80211_sub_if_data *vlan, *tmpsdata;
-
- /* down all dependent devices, that is VLANs */
- list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
- u.vlan.list)
- dev_close(vlan->dev);
WARN_ON(!list_empty(&sdata->u.ap.vlans));
} else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
/* remove all packets in parent bc_buf pointing to this dev */
@@ -641,6 +634,15 @@ static int ieee80211_stop(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ /* close all dependent VLAN interfaces before locking wiphy */
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ struct ieee80211_sub_if_data *vlan, *tmpsdata;
+
+ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
+ u.vlan.list)
+ dev_close(vlan->dev);
+ }
+
wiphy_lock(sdata->local->hw.wiphy);
ieee80211_do_stop(sdata, true);
wiphy_unlock(sdata->local->hw.wiphy);
@@ -677,16 +679,12 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
*/
static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
{
- int i;
-
/* free extra data */
ieee80211_free_keys(sdata, false);
ieee80211_debugfs_remove_netdev(sdata);
- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
- __skb_queue_purge(&sdata->fragments[i].skb_list);
- sdata->fragment_next = 0;
+ ieee80211_destroy_frag_cache(&sdata->frags);
if (ieee80211_vif_is_mesh(&sdata->vif))
ieee80211_mesh_teardown_sdata(sdata);
@@ -1320,13 +1318,130 @@ static void ieee80211_if_setup_no_queue(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
}
+static void ieee80211_iface_process_skb(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK) {
+ struct sta_info *sta;
+ int len = skb->len;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get_bss(sdata, mgmt->sa);
+ if (sta) {
+ switch (mgmt->u.action.u.addba_req.action_code) {
+ case WLAN_ACTION_ADDBA_REQ:
+ ieee80211_process_addba_request(local, sta,
+ mgmt, len);
+ break;
+ case WLAN_ACTION_ADDBA_RESP:
+ ieee80211_process_addba_resp(local, sta,
+ mgmt, len);
+ break;
+ case WLAN_ACTION_DELBA:
+ ieee80211_process_delba(sdata, sta,
+ mgmt, len);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
+ mutex_unlock(&local->sta_mtx);
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_VHT) {
+ switch (mgmt->u.action.u.vht_group_notif.action_code) {
+ case WLAN_VHT_ACTION_OPMODE_NOTIF: {
+ struct ieee80211_rx_status *status;
+ enum nl80211_band band;
+ struct sta_info *sta;
+ u8 opmode;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ band = status->band;
+ opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get_bss(sdata, mgmt->sa);
+
+ if (sta)
+ ieee80211_vht_handle_opmode(sdata, sta, opmode,
+ band);
+
+ mutex_unlock(&local->sta_mtx);
+ break;
+ }
+ case WLAN_VHT_ACTION_GROUPID_MGMT:
+ ieee80211_process_mu_groups(sdata, mgmt);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ } else if (ieee80211_is_ext(mgmt->frame_control)) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ ieee80211_sta_rx_queued_ext(sdata, skb);
+ else
+ WARN_ON(1);
+ } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *)mgmt;
+ struct sta_info *sta;
+
+ /*
+ * So the frame isn't mgmt, but frame_control
+ * is at the right place anyway, of course, so
+ * the if statement is correct.
+ *
+ * Warn if we have other data frame types here,
+ * they must not get here.
+ */
+ WARN_ON(hdr->frame_control &
+ cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
+ WARN_ON(!(hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)));
+ /*
+ * This was a fragment of a frame, received while
+ * a block-ack session was active. That cannot be
+ * right, so terminate the session.
+ */
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get_bss(sdata, mgmt->sa);
+ if (sta) {
+ u16 tid = ieee80211_get_tid(hdr);
+
+ __ieee80211_stop_rx_ba_session(
+ sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP,
+ true);
+ }
+ mutex_unlock(&local->sta_mtx);
+ } else switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ieee80211_sta_rx_queued_mgmt(sdata, skb);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ieee80211_ibss_rx_queued_mgmt(sdata, skb);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ ieee80211_mesh_rx_queued_mgmt(sdata, skb);
+ break;
+ default:
+ WARN(1, "frame for unexpected interface type");
+ break;
+ }
+}
+
static void ieee80211_iface_work(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data, work);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
- struct sta_info *sta;
if (!ieee80211_sdata_running(sdata))
return;
@@ -1339,116 +1454,12 @@ static void ieee80211_iface_work(struct work_struct *work)
/* first process frames */
while ((skb = skb_dequeue(&sdata->skb_queue))) {
- struct ieee80211_mgmt *mgmt = (void *)skb->data;
-
kcov_remote_start_common(skb_get_kcov_handle(skb));
- if (ieee80211_is_action(mgmt->frame_control) &&
- mgmt->u.action.category == WLAN_CATEGORY_BACK) {
- int len = skb->len;
- mutex_lock(&local->sta_mtx);
- sta = sta_info_get_bss(sdata, mgmt->sa);
- if (sta) {
- switch (mgmt->u.action.u.addba_req.action_code) {
- case WLAN_ACTION_ADDBA_REQ:
- ieee80211_process_addba_request(
- local, sta, mgmt, len);
- break;
- case WLAN_ACTION_ADDBA_RESP:
- ieee80211_process_addba_resp(local, sta,
- mgmt, len);
- break;
- case WLAN_ACTION_DELBA:
- ieee80211_process_delba(sdata, sta,
- mgmt, len);
- break;
- default:
- WARN_ON(1);
- break;
- }
- }
- mutex_unlock(&local->sta_mtx);
- } else if (ieee80211_is_action(mgmt->frame_control) &&
- mgmt->u.action.category == WLAN_CATEGORY_VHT) {
- switch (mgmt->u.action.u.vht_group_notif.action_code) {
- case WLAN_VHT_ACTION_OPMODE_NOTIF: {
- struct ieee80211_rx_status *status;
- enum nl80211_band band;
- u8 opmode;
-
- status = IEEE80211_SKB_RXCB(skb);
- band = status->band;
- opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
-
- mutex_lock(&local->sta_mtx);
- sta = sta_info_get_bss(sdata, mgmt->sa);
-
- if (sta)
- ieee80211_vht_handle_opmode(sdata, sta,
- opmode,
- band);
-
- mutex_unlock(&local->sta_mtx);
- break;
- }
- case WLAN_VHT_ACTION_GROUPID_MGMT:
- ieee80211_process_mu_groups(sdata, mgmt);
- break;
- default:
- WARN_ON(1);
- break;
- }
- } else if (ieee80211_is_ext(mgmt->frame_control)) {
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- ieee80211_sta_rx_queued_ext(sdata, skb);
- else
- WARN_ON(1);
- } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
- struct ieee80211_hdr *hdr = (void *)mgmt;
- /*
- * So the frame isn't mgmt, but frame_control
- * is at the right place anyway, of course, so
- * the if statement is correct.
- *
- * Warn if we have other data frame types here,
- * they must not get here.
- */
- WARN_ON(hdr->frame_control &
- cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
- WARN_ON(!(hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG)));
- /*
- * This was a fragment of a frame, received while
- * a block-ack session was active. That cannot be
- * right, so terminate the session.
- */
- mutex_lock(&local->sta_mtx);
- sta = sta_info_get_bss(sdata, mgmt->sa);
- if (sta) {
- u16 tid = ieee80211_get_tid(hdr);
-
- __ieee80211_stop_rx_ba_session(
- sta, tid, WLAN_BACK_RECIPIENT,
- WLAN_REASON_QSTA_REQUIRE_SETUP,
- true);
- }
- mutex_unlock(&local->sta_mtx);
- } else switch (sdata->vif.type) {
- case NL80211_IFTYPE_STATION:
- ieee80211_sta_rx_queued_mgmt(sdata, skb);
- break;
- case NL80211_IFTYPE_ADHOC:
- ieee80211_ibss_rx_queued_mgmt(sdata, skb);
- break;
- case NL80211_IFTYPE_MESH_POINT:
- if (!ieee80211_vif_is_mesh(&sdata->vif))
- break;
- ieee80211_mesh_rx_queued_mgmt(sdata, skb);
- break;
- default:
- WARN(1, "frame for unexpected interface type");
- break;
- }
+ if (skb->protocol == cpu_to_be16(ETH_P_TDLS))
+ ieee80211_process_tdls_channel_switch(sdata, skb);
+ else
+ ieee80211_iface_process_skb(local, sdata, skb);
kfree_skb(skb);
kcov_remote_stop();
@@ -1595,6 +1606,9 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
+ if (!list_empty(&sdata->u.ap.vlans))
+ return -EBUSY;
+ break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_OCB:
@@ -1930,8 +1944,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->wdev.wiphy = local->hw.wiphy;
sdata->local = local;
- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
- skb_queue_head_init(&sdata->fragments[i].skb_list);
+ ieee80211_init_frag_cache(&sdata->frags);
INIT_LIST_HEAD(&sdata->key_list);
@@ -1964,6 +1977,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
}
}
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ init_airtime_info(&sdata->airtime[i], &local->airtime[i]);
+
ieee80211_set_default_queues(sdata);
sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 56c068cb49c4..f695fc80088b 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -799,6 +799,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
+ static atomic_t key_color = ATOMIC_INIT(0);
struct ieee80211_key *old_key;
int idx = key->conf.keyidx;
bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
@@ -850,6 +851,12 @@ int ieee80211_key_link(struct ieee80211_key *key,
key->sdata = sdata;
key->sta = sta;
+ /*
+ * Assign a unique ID to every key so we can easily prevent mixed
+ * key and fragment cache attacks.
+ */
+ key->color = atomic_inc_return(&key_color);
+
increment_tailroom_need_count(sdata);
ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 7ad72e9b4991..1e326c89d721 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -128,6 +128,8 @@ struct ieee80211_key {
} debugfs;
#endif
+ unsigned int color;
+
/*
* key config, must be last because it contains key
* material as variable length member
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index b275c8853074..6de8d0ad5497 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -259,7 +259,6 @@ static void tpt_trig_timer(struct timer_list *t)
{
struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer);
struct ieee80211_local *local = tpt_trig->local;
- struct led_classdev *led_cdev;
unsigned long on, off, tpt;
int i;
@@ -283,10 +282,7 @@ static void tpt_trig_timer(struct timer_list *t)
}
}
- read_lock(&local->tpt_led.leddev_list_lock);
- list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
- led_blink_set(led_cdev, &on, &off);
- read_unlock(&local->tpt_led.leddev_list_lock);
+ led_trigger_blink(&local->tpt_led, &on, &off);
}
const char *
@@ -341,7 +337,6 @@ static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local)
static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
{
struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
- struct led_classdev *led_cdev;
if (!tpt_trig->running)
return;
@@ -349,10 +344,7 @@ static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
tpt_trig->running = false;
del_timer_sync(&tpt_trig->timer);
- read_lock(&local->tpt_led.leddev_list_lock);
- list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
- led_set_brightness(led_cdev, LED_OFF);
- read_unlock(&local->tpt_led.leddev_list_lock);
+ led_trigger_event(&local->tpt_led, LED_OFF);
}
void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 62145e5f9628..05f4c3c72619 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -252,18 +252,18 @@ static void ieee80211_restart_work(struct work_struct *work)
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, restart_work);
struct ieee80211_sub_if_data *sdata;
+ int ret;
/* wait for scan work complete */
flush_workqueue(local->workqueue);
flush_work(&local->sched_scan_stopped_work);
+ flush_work(&local->radar_detected_work);
+
+ rtnl_lock();
WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
"%s called with hardware scan in progress\n", __func__);
- flush_work(&local->radar_detected_work);
- /* we might do interface manipulations, so need both */
- rtnl_lock();
- wiphy_lock(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
/*
* XXX: there may be more work for other vif types and even
@@ -301,8 +301,12 @@ static void ieee80211_restart_work(struct work_struct *work)
/* wait for all packet processing to be done */
synchronize_net();
- ieee80211_reconfig(local);
+ ret = ieee80211_reconfig(local);
wiphy_unlock(local->hw.wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
rtnl_unlock();
}
@@ -701,10 +705,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
spin_lock_init(&local->queue_stop_reason_lock);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- INIT_LIST_HEAD(&local->active_txqs[i]);
- spin_lock_init(&local->active_txq_lock[i]);
- local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
- local->aql_txq_limit_high[i] =
+ struct airtime_sched_info *air_sched = &local->airtime[i];
+
+ air_sched->active_txqs = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&air_sched->active_list);
+ spin_lock_init(&air_sched->lock);
+ air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
+ air_sched->aql_txq_limit_high =
IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
}
@@ -734,8 +741,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
- INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work);
-
spin_lock_init(&local->ack_status_lock);
idr_init(&local->ack_status_frames);
@@ -752,7 +757,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
- skb_queue_head_init(&local->skb_queue_tdls_chsw);
ieee80211_alloc_led_names(local);
@@ -1009,8 +1013,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
- if (!supp_he)
- supp_he = !!ieee80211_get_he_sta_cap(sband);
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *iftd;
+
+ iftd = &sband->iftype_data[i];
+
+ supp_he = supp_he || (iftd && iftd->he_cap.has_he);
+ }
/* HT, VHT, HE require QoS, thus >= 4 queues */
if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS &&
@@ -1384,7 +1393,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&local->roc_work);
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
- cancel_work_sync(&local->tdls_chsw_work);
flush_work(&local->sched_scan_stopped_work);
flush_work(&local->radar_detected_work);
@@ -1396,7 +1404,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
- skb_queue_purge(&local->skb_queue_tdls_chsw);
wiphy_unregister(local->hw.wiphy);
destroy_workqueue(local->workqueue);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 40492d1bd8fd..77080b4f87b8 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -134,7 +134,7 @@ struct mesh_path {
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
- * @walk_head: linked list containging all mesh_path objects
+ * @walk_head: linked list containing all mesh_path objects
* @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 3db514c4c63a..a05b615deb51 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1124,7 +1124,7 @@ enddiscovery:
* forwarding information is found.
*
* Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
- * skb is freeed here if no mpath could be allocated.
+ * skb is freed here if no mpath could be allocated.
*/
int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 620ecf922408..efbefcbac3ac 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -122,7 +122,7 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
hdr = (struct ieee80211_hdr *) skb->data;
/* we preserve the previous mesh header and only add
- * the new addreses */
+ * the new addresses */
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
mshdr->flags = MESH_FLAGS_AE_A5_A6;
memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index aca26df7587d..a6915847d78a 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -150,7 +150,7 @@ out:
* mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
* mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
* selected if any non-HT peers are present in our MBSS. 20MHz-protection mode
- * is selected if all peers in our 20/40MHz MBSS support HT and atleast one
+ * is selected if all peers in our 20/40MHz MBSS support HT and at least one
* HT20 peer is present. Otherwise no-protection mode is selected.
*/
static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2480bd0577bb..a00f11a33699 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -8,7 +8,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
*/
#include <linux/delay.h>
@@ -371,7 +371,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
struct cfg80211_chan_def chandef;
u16 ht_opmode;
u32 flags;
- enum ieee80211_sta_rx_bandwidth new_sta_bw;
u32 vht_cap_info = 0;
int ret;
@@ -385,7 +384,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
/* don't check HE if we associated as non-HE station */
if (ifmgd->flags & IEEE80211_STA_DISABLE_HE ||
- !ieee80211_get_he_sta_cap(sband))
+ !ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif)))
+
he_oper = NULL;
if (WARN_ON_ONCE(!sta))
@@ -445,40 +446,13 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
IEEE80211_STA_DISABLE_160MHZ)) ||
!cfg80211_chandef_valid(&chandef)) {
sdata_info(sdata,
- "AP %pM changed bandwidth in a way we can't support - disconnect\n",
- ifmgd->bssid);
+ "AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
+ ifmgd->bssid, flags, ifmgd->flags);
return -EINVAL;
}
- switch (chandef.width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- new_sta_bw = IEEE80211_STA_RX_BW_20;
- break;
- case NL80211_CHAN_WIDTH_40:
- new_sta_bw = IEEE80211_STA_RX_BW_40;
- break;
- case NL80211_CHAN_WIDTH_80:
- new_sta_bw = IEEE80211_STA_RX_BW_80;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- case NL80211_CHAN_WIDTH_160:
- new_sta_bw = IEEE80211_STA_RX_BW_160;
- break;
- default:
- return -EINVAL;
- }
-
- if (new_sta_bw > sta->cur_max_bandwidth)
- new_sta_bw = sta->cur_max_bandwidth;
-
- if (new_sta_bw < sta->sta.bandwidth) {
- sta->sta.bandwidth = new_sta_bw;
- rate_control_rate_update(local, sband, sta,
- IEEE80211_RC_BW_CHANGED);
- }
-
ret = ieee80211_vif_change_bandwidth(sdata, &chandef, changed);
+
if (ret) {
sdata_info(sdata,
"AP %pM changed bandwidth to incompatible one - disconnect\n",
@@ -486,12 +460,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
return ret;
}
- if (new_sta_bw > sta->sta.bandwidth) {
- sta->sta.bandwidth = new_sta_bw;
- rate_control_rate_update(local, sband, sta,
- IEEE80211_RC_BW_CHANGED);
- }
-
return 0;
}
@@ -617,7 +585,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
/*
- * If some other vif is using the MU-MIMO capablity we cannot associate
+ * If some other vif is using the MU-MIMO capability we cannot associate
* using MU-MIMO - this will lead to contradictions in the group-id
* mechanism.
* Ownership is defined since association request, in order to avoid
@@ -676,7 +644,8 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
- he_cap = ieee80211_get_he_sta_cap(sband);
+ he_cap = ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif));
if (!he_cap || !reg_cap)
return;
@@ -712,6 +681,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
u32 rates = 0;
__le16 listen_int;
struct element *ext_capa = NULL;
+ enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
+ const struct ieee80211_sband_iftype_data *iftd;
+ struct ieee80211_prep_tx_info info = {};
/* we know it's writable, cast away the const */
if (assoc_data->ie_len)
@@ -756,6 +728,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
}
+ iftd = ieee80211_get_sband_iftype_data(sband, iftype);
+
skb = alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + /* bit too much but doesn't matter */
2 + assoc_data->ssid_len + /* SSID */
@@ -770,7 +744,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) +
assoc_data->ie_len + /* extra IEs */
(assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) +
- 9, /* WMM */
+ 9 + /* WMM */
+ (iftd ? iftd->vendor_elems.len : 0),
GFP_KERNEL);
if (!skb)
return;
@@ -810,12 +785,14 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
mgmt->u.reassoc_req.listen_interval = listen_int;
memcpy(mgmt->u.reassoc_req.current_ap, assoc_data->prev_bssid,
ETH_ALEN);
+ info.subtype = IEEE80211_STYPE_REASSOC_REQ;
} else {
skb_put(skb, 4);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ASSOC_REQ);
mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
mgmt->u.assoc_req.listen_interval = listen_int;
+ info.subtype = IEEE80211_STYPE_ASSOC_REQ;
}
/* SSID */
@@ -1043,6 +1020,9 @@ skip_rates:
ieee80211_add_s1g_capab_ie(sdata, &sband->s1g_cap, skb);
}
+ if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len)
+ skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len);
+
/* add any remaining custom (i.e. vendor specific here) IEs */
if (assoc_data->ie_len) {
noffset = assoc_data->ie_len;
@@ -1060,7 +1040,7 @@ skip_rates:
ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
ifmgd->assoc_req_ies_len = pos - ie_start;
- drv_mgd_prepare_tx(local, sdata, 0);
+ drv_mgd_prepare_tx(local, sdata, &info);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
@@ -1094,11 +1074,6 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- /* Don't send NDPs when STA is connected HE */
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
- return;
-
skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
!ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
if (!skb)
@@ -1130,10 +1105,6 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return;
- /* Don't send NDPs when connected HE */
- if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
- return;
-
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
if (!skb)
return;
@@ -1183,10 +1154,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
*/
if (sdata->reserved_chanctx) {
- struct ieee80211_supported_band *sband = NULL;
- struct sta_info *mgd_sta = NULL;
- enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
-
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
@@ -1195,48 +1162,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
if (sdata->reserved_ready)
goto out;
- if (sdata->vif.bss_conf.chandef.width !=
- sdata->csa_chandef.width) {
- /*
- * For managed interface, we need to also update the AP
- * station bandwidth and align the rate scale algorithm
- * on the bandwidth change. Here we only consider the
- * bandwidth of the new channel definition (as channel
- * switch flow does not have the full HT/VHT/HE
- * information), assuming that if additional changes are
- * required they would be done as part of the processing
- * of the next beacon from the AP.
- */
- switch (sdata->csa_chandef.width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- default:
- bw = IEEE80211_STA_RX_BW_20;
- break;
- case NL80211_CHAN_WIDTH_40:
- bw = IEEE80211_STA_RX_BW_40;
- break;
- case NL80211_CHAN_WIDTH_80:
- bw = IEEE80211_STA_RX_BW_80;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- case NL80211_CHAN_WIDTH_160:
- bw = IEEE80211_STA_RX_BW_160;
- break;
- }
-
- mgd_sta = sta_info_get(sdata, ifmgd->bssid);
- sband =
- local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
- }
-
- if (sdata->vif.bss_conf.chandef.width >
- sdata->csa_chandef.width) {
- mgd_sta->sta.bandwidth = bw;
- rate_control_rate_update(local, sband, mgd_sta,
- IEEE80211_RC_BW_CHANGED);
- }
-
ret = ieee80211_vif_use_reserved_context(sdata);
if (ret) {
sdata_info(sdata,
@@ -1247,13 +1172,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
goto out;
}
- if (sdata->vif.bss_conf.chandef.width <
- sdata->csa_chandef.width) {
- mgd_sta->sta.bandwidth = bw;
- rate_control_rate_update(local, sband, mgd_sta,
- IEEE80211_RC_BW_CHANGED);
- }
-
goto out;
}
@@ -2341,6 +2259,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
u32 changed = 0;
+ struct ieee80211_prep_tx_info info = {
+ .subtype = stype,
+ };
sdata_assert_lock(sdata);
@@ -2390,8 +2311,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
* driver requested so.
*/
if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP) &&
- !ifmgd->have_beacon)
- drv_mgd_prepare_tx(sdata->local, sdata, 0);
+ !ifmgd->have_beacon) {
+ drv_mgd_prepare_tx(sdata->local, sdata, &info);
+ }
ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid,
ifmgd->bssid, stype, reason,
@@ -2402,6 +2324,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
if (tx)
ieee80211_flush_queues(local, sdata, false);
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
+
/* clear bssid only after building the needed mgmt frames */
eth_zero_addr(ifmgd->bssid);
@@ -2617,10 +2541,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
ifmgd->nullfunc_failed = false;
- if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
- ifmgd->probe_send_count--;
- else
- ieee80211_send_nullfunc(sdata->local, sdata, false);
+ ieee80211_send_nullfunc(sdata->local, sdata, false);
} else {
int ssid_len;
@@ -2952,6 +2873,9 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
u8 *pos;
struct ieee802_11_elems elems;
u32 tx_flags = 0;
+ struct ieee80211_prep_tx_info info = {
+ .subtype = IEEE80211_STYPE_AUTH,
+ };
pos = mgmt->u.auth.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
@@ -2959,7 +2883,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
if (!elems.challenge)
return;
auth_data->expected_transaction = 4;
- drv_mgd_prepare_tx(sdata->local, sdata, 0);
+ drv_mgd_prepare_tx(sdata->local, sdata, &info);
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
@@ -3012,6 +2936,9 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
.type = MLME_EVENT,
.u.mlme.data = AUTH_EVENT,
};
+ struct ieee80211_prep_tx_info info = {
+ .subtype = IEEE80211_STYPE_AUTH,
+ };
sdata_assert_lock(sdata);
@@ -3040,7 +2967,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
mgmt->sa, auth_alg, ifmgd->auth_data->algorithm,
auth_transaction,
ifmgd->auth_data->expected_transaction);
- return;
+ goto notify_driver;
}
if (status_code != WLAN_STATUS_SUCCESS) {
@@ -3051,7 +2978,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
(auth_transaction == 1 &&
(status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT ||
status_code == WLAN_STATUS_SAE_PK))))
- return;
+ goto notify_driver;
sdata_info(sdata, "%pM denied authentication (status %d)\n",
mgmt->sa, status_code);
@@ -3059,7 +2986,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
event.u.mlme.status = MLME_DENIED;
event.u.mlme.reason = status_code;
drv_event_callback(sdata->local, sdata, &event);
- return;
+ goto notify_driver;
}
switch (ifmgd->auth_data->algorithm) {
@@ -3081,10 +3008,11 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
default:
WARN_ONCE(1, "invalid auth alg %d",
ifmgd->auth_data->algorithm);
- return;
+ goto notify_driver;
}
event.u.mlme.status = MLME_SUCCESS;
+ info.success = 1;
drv_event_callback(sdata->local, sdata, &event);
if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE ||
(auth_transaction == 2 &&
@@ -3098,6 +3026,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
}
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+notify_driver:
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
}
#define case_WLAN(type) \
@@ -3314,6 +3244,23 @@ static int ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ieee80211_supported_band *sband,
+ struct sta_info *sta)
+{
+ const struct ieee80211_sta_he_cap *own_he_cap =
+ ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif));
+
+ return bss_conf->he_support &&
+ (sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT) &&
+ own_he_cap &&
+ (own_he_cap->he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT);
+}
+
static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss,
struct ieee80211_mgmt *mgmt, size_t len,
@@ -3529,6 +3476,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
bss_conf->twt_protected = false;
}
+ bss_conf->twt_broadcast =
+ ieee80211_twt_bcast_support(sdata, bss_conf, sband, sta);
+
if (bss_conf->he_support) {
bss_conf->he_bss_color.color =
le32_get_bits(elems->he_operation->he_oper_params,
@@ -3699,6 +3649,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
.type = MLME_EVENT,
.u.mlme.data = ASSOC_EVENT,
};
+ struct ieee80211_prep_tx_info info = {};
sdata_assert_lock(sdata);
@@ -3728,6 +3679,15 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
aid = 0; /* TODO */
}
+ /*
+ * Note: this may not be perfect, AP might misbehave - if
+ * anyone needs to rely on perfect complete notification
+ * with the exact right subtype, then we need to track what
+ * we actually transmitted.
+ */
+ info.subtype = reassoc ? IEEE80211_STYPE_REASSOC_REQ :
+ IEEE80211_STYPE_ASSOC_REQ;
+
sdata_info(sdata,
"RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
reassoc ? "Rea" : "A", mgmt->sa,
@@ -3753,7 +3713,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
assoc_data->timeout_started = true;
if (ms > IEEE80211_ASSOC_TIMEOUT)
run_again(sdata, assoc_data->timeout);
- return;
+ goto notify_driver;
}
if (status_code != WLAN_STATUS_SUCCESS) {
@@ -3768,7 +3728,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
/* oops -- internal error -- send timeout for now */
ieee80211_destroy_assoc_data(sdata, false, false);
cfg80211_assoc_timeout(sdata->dev, cbss);
- return;
+ goto notify_driver;
}
event.u.mlme.status = MLME_SUCCESS;
drv_event_callback(sdata->local, sdata, &event);
@@ -3786,10 +3746,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
if (sdata->tx_conf[ac].uapsd)
uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
+
+ info.success = 1;
}
cfg80211_rx_assoc_resp(sdata->dev, cbss, (u8 *)mgmt, len, uapsd_queues,
ifmgd->assoc_req_ies, ifmgd->assoc_req_ies_len);
+notify_driver:
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
}
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -4062,10 +4026,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (elems.mbssid_config_ie)
bss_conf->profile_periodicity =
elems.mbssid_config_ie->profile_periodicity;
+ else
+ bss_conf->profile_periodicity = 0;
if (elems.ext_capab_len >= 11 &&
(elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
bss_conf->ema_ap = true;
+ else
+ bss_conf->ema_ap = false;
/* continue assoc process */
ifmgd->assoc_data->timeout = jiffies;
@@ -4404,7 +4372,9 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
u32 tx_flags = 0;
u16 trans = 1;
u16 status = 0;
- u16 prepare_tx_duration = 0;
+ struct ieee80211_prep_tx_info info = {
+ .subtype = IEEE80211_STYPE_AUTH,
+ };
sdata_assert_lock(sdata);
@@ -4427,10 +4397,9 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
}
if (auth_data->algorithm == WLAN_AUTH_SAE)
- prepare_tx_duration =
- jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE);
+ info.duration = jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE);
- drv_mgd_prepare_tx(local, sdata, prepare_tx_duration);
+ drv_mgd_prepare_tx(local, sdata, &info);
sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
auth_data->bss->bssid, auth_data->tries,
@@ -4925,11 +4894,13 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
}
static bool
-ieee80211_verify_sta_he_mcs_support(struct ieee80211_supported_band *sband,
+ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
const struct ieee80211_he_operation *he_op)
{
const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_sta_cap(sband);
+ ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif));
u16 ap_min_req_set;
int i;
@@ -5023,7 +4994,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
- if (!ieee80211_get_he_sta_cap(sband))
+ if (!ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif)))
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
rcu_read_lock();
@@ -5081,7 +5053,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
else
he_oper = NULL;
- if (!ieee80211_verify_sta_he_mcs_support(sband, he_oper))
+ if (!ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
@@ -5651,15 +5623,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2 * FILS_NONCE_LEN);
assoc_data->bss = req->bss;
-
- if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
- if (ifmgd->powersave)
- sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
- else
- sdata->smps_mode = IEEE80211_SMPS_OFF;
- } else
- sdata->smps_mode = ifmgd->req_smps;
-
assoc_data->capability = req->bss->capability;
assoc_data->supp_rates = bss->supp_rates;
assoc_data->supp_rates_len = bss->supp_rates_len;
@@ -5766,6 +5729,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (err)
goto err_clear;
+ if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
+ if (ifmgd->powersave)
+ sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ sdata->smps_mode = IEEE80211_SMPS_OFF;
+ } else {
+ sdata->smps_mode = ifmgd->req_smps;
+ }
+
rcu_read_lock();
beacon_ies = rcu_dereference(req->bss->beacon_ies);
@@ -5802,12 +5774,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
beacon_ies->data, beacon_ies->len);
if (elem && elem->datalen >= 3)
sdata->vif.bss_conf.profile_periodicity = elem->data[2];
+ else
+ sdata->vif.bss_conf.profile_periodicity = 0;
elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
beacon_ies->data, beacon_ies->len);
if (elem && elem->datalen >= 11 &&
(elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
sdata->vif.bss_conf.ema_ap = true;
+ else
+ sdata->vif.bss_conf.ema_ap = false;
} else {
assoc_data->timeout = jiffies;
assoc_data->timeout_started = true;
@@ -5846,6 +5822,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
bool tx = !req->local_state_change;
+ struct ieee80211_prep_tx_info info = {
+ .subtype = IEEE80211_STYPE_DEAUTH,
+ };
if (ifmgd->auth_data &&
ether_addr_equal(ifmgd->auth_data->bss->bssid, req->bssid)) {
@@ -5854,7 +5833,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
req->bssid, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
- drv_mgd_prepare_tx(sdata->local, sdata, 0);
+ drv_mgd_prepare_tx(sdata->local, sdata, &info);
ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
IEEE80211_STYPE_DEAUTH,
req->reason_code, tx,
@@ -5863,7 +5842,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code, false);
-
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
return 0;
}
@@ -5874,7 +5853,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
req->bssid, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
- drv_mgd_prepare_tx(sdata->local, sdata, 0);
+ drv_mgd_prepare_tx(sdata->local, sdata, &info);
ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
IEEE80211_STYPE_DEAUTH,
req->reason_code, tx,
@@ -5898,6 +5877,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code, false);
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
return 0;
}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 63652c39c8e0..e5935e3d7a07 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -297,15 +297,11 @@ void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata)
static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- __le16 fc;
-
- fc = hdr->frame_control;
return (info->flags & (IEEE80211_TX_CTL_NO_ACK |
IEEE80211_TX_CTL_USE_MINRATE)) ||
- !ieee80211_is_data(fc);
+ !ieee80211_is_tx_data(skb);
}
static void rc_send_low_basicrate(struct ieee80211_tx_rate *rate,
@@ -396,6 +392,10 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
int mcast_rate;
bool use_basicrate = false;
+ if (ieee80211_is_tx_data(txrc->skb) &&
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return false;
+
if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {
__rate_control_send_low(txrc->hw, sband, pubsta, info,
txrc->rate_idx_mask);
@@ -870,7 +870,6 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
int max_rates)
{
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_supported_band *sband;
@@ -882,7 +881,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
sdata = vif_to_sdata(vif);
sband = sdata->local->hw.wiphy->bands[info->band];
- if (ieee80211_is_data(hdr->frame_control))
+ if (ieee80211_is_tx_data(skb))
rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
if (dest[0].idx < 0)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 6487b05da6fa..72b44d4c42d0 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -434,7 +434,7 @@ minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
unsigned int nsecs = 0, overhead = mi->overhead;
unsigned int ampdu_len = 1;
- /* do not account throughput if sucess prob is below 10% */
+ /* do not account throughput if success prob is below 10% */
if (prob_avg < MINSTREL_FRAC(10, 100))
return 0;
@@ -1176,29 +1176,6 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
}
static void
-minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
- u16 tid;
-
- if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
- return;
-
- if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
- return;
-
- if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
- return;
-
- tid = ieee80211_get_tid(hdr);
- if (likely(sta->ampdu_mlme.tid_tx[tid]))
- return;
-
- ieee80211_start_tx_ba_session(pubsta, tid, 0);
-}
-
-static void
minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
void *priv_sta, struct ieee80211_tx_status *st)
{
@@ -1211,6 +1188,10 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
bool last, update = false;
int i;
+ /* Ignore packet that was sent with noAck flag */
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
@@ -1498,10 +1479,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct minstrel_priv *mp = priv;
u16 sample_idx;
- if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_prob_rate)))
- minstrel_aggr_check(sta, txrc->skb);
-
info->flags |= mi->tx_flags;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1514,7 +1491,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
return;
- if (time_is_before_jiffies(mi->sample_time))
+ if (time_is_after_jiffies(mi->sample_time))
return;
mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
@@ -1907,6 +1884,7 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
static const struct rate_control_ops mac80211_minstrel_ht = {
.name = "minstrel_ht",
+ .capa = RATE_CTRL_CAPA_AMPDU_TRIGGER,
.tx_status_ext = minstrel_ht_tx_status,
.get_rate = minstrel_ht_get_rate,
.rate_init = minstrel_ht_rate_init,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 62047e93e217..771921c057e8 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/jiffies.h>
@@ -214,6 +214,24 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
return len;
}
+static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct sk_buff *skb)
+{
+ skb_queue_tail(&sdata->skb_queue, skb);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ if (sta)
+ sta->rx_stats.packets++;
+}
+
+static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct sk_buff *skb)
+{
+ skb->protocol = 0;
+ __ieee80211_queue_skb_to_iface(sdata, sta, skb);
+}
+
static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
int rtap_space)
@@ -254,8 +272,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
if (!skb)
return;
- skb_queue_tail(&sdata->skb_queue, skb);
- ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ ieee80211_queue_skb_to_iface(sdata, NULL, skb);
}
/*
@@ -1339,7 +1356,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
struct sk_buff_head *frames)
{
struct sk_buff *skb = rx->skb;
- struct ieee80211_local *local = rx->local;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct sta_info *sta = rx->sta;
struct tid_ampdu_rx *tid_agg_rx;
@@ -1391,8 +1407,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
/* if this mpdu is fragmented - terminate rx aggregation session */
sc = le16_to_cpu(hdr->seq_ctrl);
if (sc & IEEE80211_SCTL_FRAG) {
- skb_queue_tail(&rx->sdata->skb_queue, skb);
- ieee80211_queue_work(&local->hw, &rx->sdata->work);
+ ieee80211_queue_skb_to_iface(rx->sdata, NULL, skb);
return;
}
@@ -1563,12 +1578,8 @@ static void sta_ps_start(struct sta_info *sta)
for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
struct ieee80211_txq *txq = sta->sta.txq[tid];
- struct txq_info *txqi = to_txq_info(txq);
- spin_lock(&local->active_txq_lock[txq->ac]);
- if (!list_empty(&txqi->schedule_order))
- list_del_init(&txqi->schedule_order);
- spin_unlock(&local->active_txq_lock[txq->ac]);
+ ieee80211_unschedule_txq(&local->hw, txq, false);
if (txq_has_queue(txq))
set_bit(tid, &sta->txq_buffered_tids);
@@ -2123,19 +2134,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
return result;
}
+void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
+ skb_queue_head_init(&cache->entries[i].skb_list);
+}
+
+void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
+ __skb_queue_purge(&cache->entries[i].skb_list);
+}
+
static inline struct ieee80211_fragment_entry *
-ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
+ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
unsigned int frag, unsigned int seq, int rx_queue,
struct sk_buff **skb)
{
struct ieee80211_fragment_entry *entry;
- entry = &sdata->fragments[sdata->fragment_next++];
- if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
- sdata->fragment_next = 0;
+ entry = &cache->entries[cache->next++];
+ if (cache->next >= IEEE80211_FRAGMENT_MAX)
+ cache->next = 0;
- if (!skb_queue_empty(&entry->skb_list))
- __skb_queue_purge(&entry->skb_list);
+ __skb_queue_purge(&entry->skb_list);
__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
*skb = NULL;
@@ -2150,14 +2176,14 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
}
static inline struct ieee80211_fragment_entry *
-ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
+ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
unsigned int frag, unsigned int seq,
int rx_queue, struct ieee80211_hdr *hdr)
{
struct ieee80211_fragment_entry *entry;
int i, idx;
- idx = sdata->fragment_next;
+ idx = cache->next;
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
struct ieee80211_hdr *f_hdr;
struct sk_buff *f_skb;
@@ -2166,7 +2192,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
if (idx < 0)
idx = IEEE80211_FRAGMENT_MAX - 1;
- entry = &sdata->fragments[idx];
+ entry = &cache->entries[idx];
if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
entry->rx_queue != rx_queue ||
entry->last_frag + 1 != frag)
@@ -2194,15 +2220,27 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
return NULL;
}
+static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
+{
+ return rx->key &&
+ (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
+ ieee80211_has_protected(fc);
+}
+
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
{
+ struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
struct ieee80211_hdr *hdr;
u16 sc;
__le16 fc;
unsigned int frag, seq;
struct ieee80211_fragment_entry *entry;
struct sk_buff *skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
hdr = (struct ieee80211_hdr *)rx->skb->data;
fc = hdr->frame_control;
@@ -2213,14 +2251,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
- if (is_multicast_ether_addr(hdr->addr1)) {
- I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
- goto out_no_led;
- }
+ if (rx->sta)
+ cache = &rx->sta->frags;
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
+ if (is_multicast_ether_addr(hdr->addr1))
+ return RX_DROP_MONITOR;
+
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@@ -2236,20 +2275,17 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (frag == 0) {
/* This is the first fragment of a new frame. */
- entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
+ entry = ieee80211_reassemble_add(cache, frag, seq,
rx->seqno_idx, &(rx->skb));
- if (rx->key &&
- (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
- ieee80211_has_protected(fc)) {
+ if (requires_sequential_pn(rx, fc)) {
int queue = rx->security_idx;
/* Store CCMP/GCMP PN so that we can verify that the
* next fragment has a sequential PN value.
*/
entry->check_sequential_pn = true;
+ entry->is_protected = true;
+ entry->key_color = rx->key->color;
memcpy(entry->last_pn,
rx->key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN);
@@ -2261,6 +2297,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sizeof(rx->key->u.gcmp.rx_pn[queue]));
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
IEEE80211_GCMP_PN_LEN);
+ } else if (rx->key &&
+ (ieee80211_has_protected(fc) ||
+ (status->flag & RX_FLAG_DECRYPTED))) {
+ entry->is_protected = true;
+ entry->key_color = rx->key->color;
}
return RX_QUEUED;
}
@@ -2268,7 +2309,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
/* This is a fragment for a frame that should already be pending in
* fragment cache. Add this fragment to the end of the pending entry.
*/
- entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
+ entry = ieee80211_reassemble_find(cache, frag, seq,
rx->seqno_idx, hdr);
if (!entry) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
@@ -2283,25 +2324,39 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (entry->check_sequential_pn) {
int i;
u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
- int queue;
- if (!rx->key ||
- (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
+ if (!requires_sequential_pn(rx, fc))
+ return RX_DROP_UNUSABLE;
+
+ /* Prevent mixed key and fragment cache attacks */
+ if (entry->key_color != rx->key->color)
return RX_DROP_UNUSABLE;
+
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
pn[i]++;
if (pn[i])
break;
}
- queue = rx->security_idx;
- rpn = rx->key->u.ccmp.rx_pn[queue];
+
+ rpn = rx->ccm_gcm.pn;
if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
return RX_DROP_UNUSABLE;
memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
+ } else if (entry->is_protected &&
+ (!rx->key ||
+ (!ieee80211_has_protected(fc) &&
+ !(status->flag & RX_FLAG_DECRYPTED)) ||
+ rx->key->color != entry->key_color)) {
+ /* Drop this as a mixed key or fragment cache attack, even
+ * if for TKIP Michael MIC should protect us, and WEP is a
+ * lost cause anyway.
+ */
+ return RX_DROP_UNUSABLE;
+ } else if (entry->is_protected && rx->key &&
+ entry->key_color != rx->key->color &&
+ (status->flag & RX_FLAG_DECRYPTED)) {
+ return RX_DROP_UNUSABLE;
}
skb_pull(rx->skb, ieee80211_hdrlen(fc));
@@ -2330,7 +2385,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
out:
ieee80211_led_rx(rx->local);
- out_no_led:
if (rx->sta)
rx->sta->rx_stats.packets++;
return RX_CONTINUE;
@@ -2494,13 +2548,13 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
/*
- * Allow EAPOL frames to us/the PAE group address regardless
- * of whether the frame was encrypted or not.
+ * Allow EAPOL frames to us/the PAE group address regardless of
+ * whether the frame was encrypted or not, and always disallow
+ * all other destination addresses for them.
*/
- if (ehdr->h_proto == rx->sdata->control_port_protocol &&
- (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
- ether_addr_equal(ehdr->h_dest, pae_group_addr)))
- return true;
+ if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
+ return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
+ ether_addr_equal(ehdr->h_dest, pae_group_addr);
if (ieee80211_802_1x_port_control(rx) ||
ieee80211_drop_unencrypted(rx, fc))
@@ -2525,8 +2579,28 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
cfg80211_rx_control_port(dev, skb, noencrypt);
dev_kfree_skb(skb);
} else {
+ struct ethhdr *ehdr = (void *)skb_mac_header(skb);
+
memset(skb->cb, 0, sizeof(skb->cb));
+ /*
+ * 802.1X over 802.11 requires that the authenticator address
+ * be used for EAPOL frames. However, 802.1X allows the use of
+ * the PAE group address instead. If the interface is part of
+ * a bridge and we pass the frame with the PAE group address,
+ * then the bridge will forward it to the network (even if the
+ * client was not associated yet), which isn't supposed to
+ * happen.
+ * To avoid that, rewrite the destination address to our own
+ * address, so that the authenticator (e.g. hostapd) will see
+ * the frame, but bridge won't forward it anywhere else. Note
+ * that due to earlier filtering, the only other address can
+ * be the PAE group address.
+ */
+ if (unlikely(skb->protocol == sdata->control_port_protocol &&
+ !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
+ ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
+
/* deliver to local stack */
if (rx->list)
list_add_tail(&skb->list, rx->list);
@@ -2566,6 +2640,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
+ ehdr->h_proto != rx->sdata->control_port_protocol &&
(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
if (is_multicast_ether_addr(ehdr->h_dest) &&
ieee80211_vif_get_num_mcast_if(sdata) != 0) {
@@ -2675,7 +2750,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
rx->sdata->vif.addr,
rx->sdata->vif.type,
- data_offset))
+ data_offset, true))
return RX_DROP_UNUSABLE;
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
@@ -2732,6 +2807,23 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
+ if (rx->key) {
+ /*
+ * We should not receive A-MSDUs on pre-HT connections,
+ * and HT connections cannot use old ciphers. Thus drop
+ * them, as in those cases we couldn't even have SPP
+ * A-MSDUs or such.
+ */
+ switch (rx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ return RX_DROP_UNUSABLE;
+ default:
+ break;
+ }
+ }
+
return __ieee80211_rx_h_amsdu(rx, 0);
}
@@ -2928,11 +3020,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
tf->category == WLAN_CATEGORY_TDLS &&
(tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
- skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
- schedule_work(&local->tdls_chsw_work);
- if (rx->sta)
- rx->sta->rx_stats.packets++;
-
+ rx->skb->protocol = cpu_to_be16(ETH_P_TDLS);
+ __ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb);
return RX_QUEUED;
}
}
@@ -3412,10 +3501,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
return RX_QUEUED;
queue:
- skb_queue_tail(&sdata->skb_queue, rx->skb);
- ieee80211_queue_work(&local->hw, &sdata->work);
- if (rx->sta)
- rx->sta->rx_stats.packets++;
+ ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb);
return RX_QUEUED;
}
@@ -3563,10 +3649,7 @@ ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
/* for now only beacons are ext, so queue them */
- skb_queue_tail(&sdata->skb_queue, rx->skb);
- ieee80211_queue_work(&rx->local->hw, &sdata->work);
- if (rx->sta)
- rx->sta->rx_stats.packets++;
+ ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb);
return RX_QUEUED;
}
@@ -3623,11 +3706,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
}
- /* queue up frame and kick off work to process it */
- skb_queue_tail(&sdata->skb_queue, rx->skb);
- ieee80211_queue_work(&rx->local->hw, &sdata->work);
- if (rx->sta)
- rx->sta->rx_stats.packets++;
+ ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb);
return RX_QUEUED;
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d4cc9ac2d703..6b50cb5e0e3c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
+ size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+
+ if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+ !ieee80211_is_beacon(mgmt->frame_control) &&
+ !ieee80211_is_s1g_beacon(mgmt->frame_control))
+ return;
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
- if (skb->len < 15)
- return;
- } else if (skb->len < 24 ||
- (!ieee80211_is_probe_resp(mgmt->frame_control) &&
- !ieee80211_is_beacon(mgmt->frame_control)))
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_short_beacon.variable);
+ else
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_beacon);
+ }
+
+ if (skb->len < min_hdr_len)
return;
sdata1 = rcu_dereference(local->scan_sdata);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ec6973ee88ef..a5505ee51229 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/module.h>
@@ -392,6 +392,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
u64_stats_init(&sta->rx_stats.syncp);
+ ieee80211_init_frag_cache(&sta->frags);
+
sta->sta_state = IEEE80211_STA_NONE;
/* Mark TID as unreserved */
@@ -423,15 +425,11 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (sta_prepare_rate_control(local, sta, gfp))
goto free_txq;
- sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
skb_queue_head_init(&sta->ps_tx_buf[i]);
skb_queue_head_init(&sta->tx_filtered[i]);
- sta->airtime[i].deficit = sta->airtime_weight;
- atomic_set(&sta->airtime[i].aql_tx_pending, 0);
- sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
- sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
+ init_airtime_info(&sta->airtime[i], &local->airtime[i]);
}
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
@@ -1102,6 +1100,8 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
ieee80211_sta_debugfs_remove(sta);
+ ieee80211_destroy_frag_cache(&sta->frags);
+
cleanup_single_sta(sta);
}
@@ -1394,11 +1394,6 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
struct ieee80211_tx_info *info;
struct ieee80211_chanctx_conf *chanctx_conf;
- /* Don't send NDPs when STA is connected HE */
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
- return;
-
if (qos) {
fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1893,24 +1888,59 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
}
EXPORT_SYMBOL(ieee80211_sta_set_buffered);
-void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
- u32 tx_airtime, u32 rx_airtime)
+void ieee80211_register_airtime(struct ieee80211_txq *txq,
+ u32 tx_airtime, u32 rx_airtime)
{
- struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
- struct ieee80211_local *local = sta->sdata->local;
- u8 ac = ieee80211_ac_from_tid(tid);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
+ struct ieee80211_local *local = sdata->local;
+ u64 weight_sum, weight_sum_reciprocal;
+ struct airtime_sched_info *air_sched;
+ struct airtime_info *air_info;
u32 airtime = 0;
- if (sta->local->airtime_flags & AIRTIME_USE_TX)
+ air_sched = &local->airtime[txq->ac];
+ air_info = to_airtime_info(txq);
+
+ if (local->airtime_flags & AIRTIME_USE_TX)
airtime += tx_airtime;
- if (sta->local->airtime_flags & AIRTIME_USE_RX)
+ if (local->airtime_flags & AIRTIME_USE_RX)
airtime += rx_airtime;
- spin_lock_bh(&local->active_txq_lock[ac]);
- sta->airtime[ac].tx_airtime += tx_airtime;
- sta->airtime[ac].rx_airtime += rx_airtime;
- sta->airtime[ac].deficit -= airtime;
- spin_unlock_bh(&local->active_txq_lock[ac]);
+ /* Weights scale so the unit weight is 256 */
+ airtime <<= 8;
+
+ spin_lock_bh(&air_sched->lock);
+
+ air_info->tx_airtime += tx_airtime;
+ air_info->rx_airtime += rx_airtime;
+
+ if (air_sched->weight_sum) {
+ weight_sum = air_sched->weight_sum;
+ weight_sum_reciprocal = air_sched->weight_sum_reciprocal;
+ } else {
+ weight_sum = air_info->weight;
+ weight_sum_reciprocal = air_info->weight_reciprocal;
+ }
+
+ /* Round the calculation of global vt */
+ air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) *
+ weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64;
+ air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) *
+ air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32;
+ ieee80211_resort_txq(&local->hw, txq);
+
+ spin_unlock_bh(&air_sched->lock);
+}
+
+void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
+ u32 tx_airtime, u32 rx_airtime)
+{
+ struct ieee80211_txq *txq = pubsta->txq[tid];
+
+ if (!txq)
+ return;
+
+ ieee80211_register_airtime(txq, tx_airtime, rx_airtime);
}
EXPORT_SYMBOL(ieee80211_sta_register_airtime);
@@ -2089,10 +2119,9 @@ static struct ieee80211_sta_rx_stats *
sta_get_last_rx_stats(struct sta_info *sta)
{
struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
- struct ieee80211_local *local = sta->local;
int cpu;
- if (!ieee80211_hw_check(&local->hw, USES_RSS))
+ if (!sta->pcpu_rx_stats)
return stats;
for_each_possible_cpu(cpu) {
@@ -2192,9 +2221,7 @@ static void sta_set_tidstats(struct sta_info *sta,
int cpu;
if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
- if (!ieee80211_hw_check(&local->hw, USES_RSS))
- tidstats->rx_msdu +=
- sta_get_tidstats_msdu(&sta->rx_stats, tid);
+ tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid);
if (sta->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
@@ -2273,7 +2300,6 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
drv_sta_statistics(local, sdata, &sta->sta, sinfo);
-
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
BIT_ULL(NL80211_STA_INFO_STA_FLAGS) |
BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
@@ -2308,8 +2334,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
- if (!ieee80211_hw_check(&local->hw, USES_RSS))
- sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
+ sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
if (sta->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
@@ -2359,7 +2384,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
}
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
- sinfo->airtime_weight = sta->airtime_weight;
+ sinfo->airtime_weight = sta->airtime[0].weight;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 78b9d0c7cc58..ba2796782008 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -3,7 +3,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2020-2021 Intel Corporation
*/
#ifndef STA_INFO_H
@@ -135,18 +135,25 @@ enum ieee80211_agg_stop_reason {
#define AIRTIME_USE_TX BIT(0)
#define AIRTIME_USE_RX BIT(1)
+
struct airtime_info {
u64 rx_airtime;
u64 tx_airtime;
- s64 deficit;
+ u64 v_t;
+ u64 last_scheduled;
+ struct list_head list;
atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
u32 aql_limit_low;
u32 aql_limit_high;
+ u32 weight_reciprocal;
+ u16 weight;
};
void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
struct sta_info *sta, u8 ac,
u16 tx_airtime, bool tx_completed);
+void ieee80211_register_airtime(struct ieee80211_txq *txq,
+ u32 tx_airtime, u32 rx_airtime);
struct sta_info;
@@ -439,6 +446,34 @@ struct ieee80211_sta_rx_stats {
};
/*
+ * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent
+ * reception of at least one MSDU per access category per associated STA"
+ * on APs, or "at least one MSDU per access category" on other interface types.
+ *
+ * This limit can be increased by changing this define, at the cost of slower
+ * frame reassembly and increased memory use while fragments are pending.
+ */
+#define IEEE80211_FRAGMENT_MAX 4
+
+struct ieee80211_fragment_entry {
+ struct sk_buff_head skb_list;
+ unsigned long first_frag_time;
+ u16 seq;
+ u16 extra_len;
+ u16 last_frag;
+ u8 rx_queue;
+ u8 check_sequential_pn:1, /* needed for CCMP/GCMP */
+ is_protected:1;
+ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
+ unsigned int key_color;
+};
+
+struct ieee80211_fragment_cache {
+ struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX];
+ unsigned int next;
+};
+
+/*
* The bandwidth threshold below which the per-station CoDel parameters will be
* scaled to be more lenient (to prevent starvation of slow stations). This
* value will be scaled by the number of active stations when it is being
@@ -487,7 +522,6 @@ struct ieee80211_sta_rx_stats {
* @tid_seq: per-TID sequence numbers for sending to this STA
* @airtime: per-AC struct airtime_info describing airtime statistics for this
* station
- * @airtime_weight: station weight for airtime fairness calculation purposes
* @ampdu_mlme: A-MPDU state machine state
* @mesh: mesh STA information
* @debugfs_dir: debug filesystem directory dentry
@@ -531,6 +565,7 @@ struct ieee80211_sta_rx_stats {
* @status_stats.last_ack_signal: last ACK signal
* @status_stats.ack_signal_filled: last ACK signal validity
* @status_stats.avg_ack_signal: average ACK signal
+ * @frags: fragment cache
*/
struct sta_info {
/* General information, mostly static */
@@ -617,7 +652,6 @@ struct sta_info {
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
struct airtime_info airtime[IEEE80211_NUM_ACS];
- u16 airtime_weight;
/*
* Aggregation information, locked with lock.
@@ -639,6 +673,8 @@ struct sta_info {
struct cfg80211_chan_def tdls_chandef;
+ struct ieee80211_fragment_cache frags;
+
/* keep last! */
struct ieee80211_sta sta;
};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 9baf185ee4c7..bae321ff77f6 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -970,6 +970,25 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
ieee80211_frame_acked(sta, skb);
+ } else if (wiphy_ext_feature_isset(local->hw.wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_txq *txq;
+ u32 airtime;
+
+ /* Account airtime to multicast queue */
+ sdata = ieee80211_sdata_from_skb(local, skb);
+
+ if (sdata && (txq = sdata->vif.txq)) {
+ airtime = info->status.tx_time ?:
+ ieee80211_calc_expected_tx_airtime(hw,
+ &sdata->vif,
+ NULL,
+ skb->len,
+ false);
+
+ ieee80211_register_airtime(txq, airtime, 0);
+ }
}
/* SNMP counters
@@ -1006,12 +1025,11 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
local->ps_sdata && !(local->scanning)) {
- if (info->flags & IEEE80211_TX_STAT_ACK) {
+ if (info->flags & IEEE80211_TX_STAT_ACK)
local->ps_sdata->u.mgd.flags |=
IEEE80211_STA_NULLFUNC_ACKED;
- } else
- mod_timer(&local->dynamic_ps_timer, jiffies +
- msecs_to_jiffies(10));
+ mod_timer(&local->dynamic_ps_timer,
+ jiffies + msecs_to_jiffies(10));
}
ieee80211_report_used_skb(local, skb, false);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index f91d02b81b92..45e532ad1215 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -1920,7 +1920,7 @@ out:
return ret;
}
-static void
+void
ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -1971,32 +1971,6 @@ void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
rcu_read_unlock();
}
-void ieee80211_tdls_chsw_work(struct work_struct *wk)
-{
- struct ieee80211_local *local =
- container_of(wk, struct ieee80211_local, tdls_chsw_work);
- struct ieee80211_sub_if_data *sdata;
- struct sk_buff *skb;
- struct ieee80211_tdls_data *tf;
-
- wiphy_lock(local->hw.wiphy);
- while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
- tf = (struct ieee80211_tdls_data *)skb->data;
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata) ||
- sdata->vif.type != NL80211_IFTYPE_STATION ||
- !ether_addr_equal(tf->da, sdata->vif.addr))
- continue;
-
- ieee80211_process_tdls_channel_switch(sdata, skb);
- break;
- }
-
- kfree_skb(skb);
- }
- wiphy_unlock(local->hw.wiphy);
-}
-
void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *peer, u16 reason)
{
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 8fcc39056402..f6ef15366938 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2020 Intel Corporation
+* Copyright (C) 2018 - 2021 Intel Corporation
*/
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -1461,31 +1461,52 @@ DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
TP_ARGS(local, sta, tids, num_frames, reason, more_data)
);
-TRACE_EVENT(drv_mgd_prepare_tx,
+DECLARE_EVENT_CLASS(mgd_prepare_complete_tx_evt,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- u16 duration),
+ u16 duration, u16 subtype, bool success),
- TP_ARGS(local, sdata, duration),
+ TP_ARGS(local, sdata, duration, subtype, success),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
__field(u32, duration)
+ __field(u16, subtype)
+ __field(u8, success)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
__entry->duration = duration;
+ __entry->subtype = subtype;
+ __entry->success = success;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " duration: %u",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->duration
+ LOCAL_PR_FMT VIF_PR_FMT " duration: %u, subtype:0x%x, success:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->duration,
+ __entry->subtype, __entry->success
)
);
+DEFINE_EVENT(mgd_prepare_complete_tx_evt, drv_mgd_prepare_tx,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u16 duration, u16 subtype, bool success),
+
+ TP_ARGS(local, sdata, duration, subtype, success)
+);
+
+DEFINE_EVENT(mgd_prepare_complete_tx_evt, drv_mgd_complete_tx,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u16 duration, u16 subtype, bool success),
+
+ TP_ARGS(local, sdata, duration, subtype, success)
+);
+
DEFINE_EVENT(local_sdata_evt, drv_mgd_protect_tdls_discover,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 0b719f3d2dec..e96981144358 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -18,6 +18,7 @@
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
+#include <linux/timekeeping.h>
#include <net/net_namespace.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
@@ -666,6 +667,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
u32 len;
struct ieee80211_tx_rate_control txrc;
struct ieee80211_sta_rates *ratetbl = NULL;
+ bool encap = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
bool assoc = false;
memset(&txrc, 0, sizeof(txrc));
@@ -707,7 +709,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
* just wants a probe response.
*/
if (tx->sdata->vif.bss_conf.use_short_preamble &&
- (ieee80211_is_data(hdr->frame_control) ||
+ (ieee80211_is_tx_data(tx->skb) ||
(tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
txrc.short_preamble = true;
@@ -729,7 +731,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
"%s: Dropped data frame as no usable bitrate found while "
"scanning and associated. Target station: "
"%pM on %d GHz band\n",
- tx->sdata->name, hdr->addr1,
+ tx->sdata->name,
+ encap ? ((struct ethhdr *)hdr)->h_dest : hdr->addr1,
info->band ? 5 : 2))
return TX_DROP;
@@ -763,7 +766,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
if (txrc.reported_rate.idx < 0) {
txrc.reported_rate = tx->rate;
- if (tx->sta && ieee80211_is_data(hdr->frame_control))
+ if (tx->sta && ieee80211_is_tx_data(tx->skb))
tx->sta->tx_stats.last_rate = txrc.reported_rate;
} else if (tx->sta)
tx->sta->tx_stats.last_rate = txrc.reported_rate;
@@ -1447,7 +1450,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
codel_vars_init(&txqi->def_cvars);
codel_stats_init(&txqi->cstats);
__skb_queue_head_init(&txqi->frags);
- INIT_LIST_HEAD(&txqi->schedule_order);
+ RB_CLEAR_NODE(&txqi->schedule_order);
txqi->txq.vif = &sdata->vif;
@@ -1491,9 +1494,7 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
spin_unlock_bh(&fq->lock);
- spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
- list_del_init(&txqi->schedule_order);
- spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
+ ieee80211_unschedule_txq(&local->hw, &txqi->txq, true);
}
void ieee80211_txq_set_params(struct ieee80211_local *local)
@@ -1768,8 +1769,6 @@ static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
CALL_TXH(ieee80211_tx_h_ps_buf);
CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
CALL_TXH(ieee80211_tx_h_select_key);
- if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
- CALL_TXH(ieee80211_tx_h_rate_ctrl);
txh_done:
if (unlikely(res == TX_DROP)) {
@@ -1802,6 +1801,9 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
goto txh_done;
}
+ if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
+ CALL_TXH(ieee80211_tx_h_rate_ctrl);
+
CALL_TXH(ieee80211_tx_h_michael_mic_add);
CALL_TXH(ieee80211_tx_h_sequence);
CALL_TXH(ieee80211_tx_h_fragment);
@@ -2014,6 +2016,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
ieee80211_tx(sdata, sta, skb, false);
}
+static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
+{
+ struct ieee80211_radiotap_header *rthdr =
+ (struct ieee80211_radiotap_header *)skb->data;
+
+ /* check for not even having the fixed radiotap header part */
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ return false; /* too short to be possibly valid */
+
+ /* is it a header version we can trust to find length from? */
+ if (unlikely(rthdr->it_version))
+ return false; /* only version 0 is supported */
+
+ /* does the skb contain enough to deliver on the alleged length? */
+ if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
+ return false; /* skb too short for claimed rt header extent */
+
+ return true;
+}
+
bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2022,8 +2044,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
struct ieee80211_radiotap_header *rthdr =
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_supported_band *sband =
- local->hw.wiphy->bands[info->band];
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
NULL);
u16 txflags;
@@ -2036,17 +2056,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
u8 vht_mcs = 0, vht_nss = 0;
int i;
- /* check for not even having the fixed radiotap header part */
- if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
- return false; /* too short to be possibly valid */
-
- /* is it a header version we can trust to find length from? */
- if (unlikely(rthdr->it_version))
- return false; /* only version 0 is supported */
-
- /* does the skb contain enough to deliver on the alleged length? */
- if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
- return false; /* skb too short for claimed rt header extent */
+ if (!ieee80211_validate_radiotap_len(skb))
+ return false;
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_DONTFRAG;
@@ -2186,6 +2197,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
return false;
if (rate_found) {
+ struct ieee80211_supported_band *sband =
+ local->hw.wiphy->bands[info->band];
+
info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -2199,7 +2213,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
ieee80211_rate_set_vht(info->control.rates, vht_mcs,
vht_nss);
- } else {
+ } else if (sband) {
for (i = 0; i < sband->n_bitrates; i++) {
if (rate * 5 != sband->bitrates[i].bitrate)
continue;
@@ -2236,8 +2250,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_INJECTED;
- /* Sanity-check and process the injection radiotap header */
- if (!ieee80211_parse_tx_radiotap(skb, dev))
+ /* Sanity-check the length of the radiotap header */
+ if (!ieee80211_validate_radiotap_len(skb))
goto fail;
/* we now know there is a radiotap header with a length we can use */
@@ -2351,6 +2365,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
ieee80211_select_queue_80211(sdata, skb, hdr);
skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
+ /*
+ * Process the radiotap header. This will now take into account the
+ * selected chandef above to accurately set injection rates and
+ * retransmissions.
+ */
+ if (!ieee80211_parse_tx_radiotap(skb, dev))
+ goto fail_rcu;
+
/* remove the injection radiotap header */
skb_pull(skb, len_rthdr);
@@ -3264,6 +3286,9 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
return false;
+ if (sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+ return false;
+
if (skb_is_gso(skb))
return false;
@@ -3369,15 +3394,21 @@ out:
* Can be called while the sta lock is held. Anything that can cause packets to
* be generated will cause deadlock!
*/
-static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, u8 pn_offs,
- struct ieee80211_key *key,
- struct sk_buff *skb)
+static ieee80211_tx_result
+ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, u8 pn_offs,
+ struct ieee80211_key *key,
+ struct ieee80211_tx_data *tx)
{
+ struct sk_buff *skb = tx->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
u8 tid = IEEE80211_NUM_TIDS;
+ if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL) &&
+ ieee80211_tx_h_rate_ctrl(tx) != TX_CONTINUE)
+ return TX_DROP;
+
if (key)
info->control.hw_key = &key->conf;
@@ -3426,6 +3457,8 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
break;
}
}
+
+ return TX_CONTINUE;
}
static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
@@ -3529,24 +3562,17 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
tx.sta = sta;
tx.key = fast_tx->key;
- if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
- tx.skb = skb;
- r = ieee80211_tx_h_rate_ctrl(&tx);
- skb = tx.skb;
- tx.skb = NULL;
-
- if (r != TX_CONTINUE) {
- if (r != TX_QUEUED)
- kfree_skb(skb);
- return true;
- }
- }
-
if (ieee80211_queue_skb(local, sdata, sta, skb))
return true;
- ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
- fast_tx->key, skb);
+ tx.skb = skb;
+ r = ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
+ fast_tx->key, &tx);
+ tx.skb = NULL;
+ if (r == TX_DROP) {
+ kfree_skb(skb);
+ return true;
+ }
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
@@ -3651,8 +3677,16 @@ begin:
else
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
+ r = ieee80211_tx_h_rate_ctrl(&tx);
+ if (r != TX_CONTINUE) {
+ ieee80211_free_txskb(&local->hw, skb);
+ goto begin;
+ }
+ }
goto encap_out;
+ }
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
struct sta_info *sta = container_of(txq->sta, struct sta_info,
@@ -3663,8 +3697,12 @@ begin:
(tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
pn_offs = ieee80211_hdrlen(hdr->frame_control);
- ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
- tx.key, skb);
+ r = ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
+ tx.key, &tx);
+ if (r != TX_CONTINUE) {
+ ieee80211_free_txskb(&local->hw, skb);
+ goto begin;
+ }
} else {
if (invoke_tx_handlers_late(&tx))
goto begin;
@@ -3744,102 +3782,259 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct airtime_sched_info *air_sched;
+ u64 now = ktime_get_boottime_ns();
struct ieee80211_txq *ret = NULL;
- struct txq_info *txqi = NULL, *head = NULL;
- bool found_eligible_txq = false;
+ struct airtime_info *air_info;
+ struct txq_info *txqi = NULL;
+ struct rb_node *node;
+ bool first = false;
- spin_lock_bh(&local->active_txq_lock[ac]);
+ air_sched = &local->airtime[ac];
+ spin_lock_bh(&air_sched->lock);
- begin:
- txqi = list_first_entry_or_null(&local->active_txqs[ac],
- struct txq_info,
- schedule_order);
- if (!txqi)
+ node = air_sched->schedule_pos;
+
+begin:
+ if (!node) {
+ node = rb_first_cached(&air_sched->active_txqs);
+ first = true;
+ } else {
+ node = rb_next(node);
+ }
+
+ if (!node)
goto out;
- if (txqi == head) {
- if (!found_eligible_txq)
- goto out;
- else
- found_eligible_txq = false;
+ txqi = container_of(node, struct txq_info, schedule_order);
+ air_info = to_airtime_info(&txqi->txq);
+
+ if (air_info->v_t > air_sched->v_t &&
+ (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
+ goto out;
+
+ if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
+ first = false;
+ goto begin;
}
- if (!head)
- head = txqi;
+ air_sched->schedule_pos = node;
+ air_sched->last_schedule_activity = now;
+ ret = &txqi->txq;
+out:
+ spin_unlock_bh(&air_sched->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ieee80211_next_txq);
- if (txqi->txq.sta) {
- struct sta_info *sta = container_of(txqi->txq.sta,
- struct sta_info, sta);
- bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
- s64 deficit = sta->airtime[txqi->txq.ac].deficit;
+static void __ieee80211_insert_txq(struct rb_root_cached *root,
+ struct txq_info *txqi)
+{
+ struct rb_node **new = &root->rb_root.rb_node;
+ struct airtime_info *old_air, *new_air;
+ struct rb_node *parent = NULL;
+ struct txq_info *__txqi;
+ bool leftmost = true;
+
+ while (*new) {
+ parent = *new;
+ __txqi = rb_entry(parent, struct txq_info, schedule_order);
+ old_air = to_airtime_info(&__txqi->txq);
+ new_air = to_airtime_info(&txqi->txq);
+
+ if (new_air->v_t <= old_air->v_t) {
+ new = &parent->rb_left;
+ } else {
+ new = &parent->rb_right;
+ leftmost = false;
+ }
+ }
- if (aql_check)
- found_eligible_txq = true;
+ rb_link_node(&txqi->schedule_order, parent, new);
+ rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
+}
- if (deficit < 0)
- sta->airtime[txqi->txq.ac].deficit +=
- sta->airtime_weight;
+void ieee80211_resort_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct airtime_info *air_info = to_airtime_info(txq);
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = to_txq_info(txq);
+ struct airtime_sched_info *air_sched;
- if (deficit < 0 || !aql_check) {
- list_move_tail(&txqi->schedule_order,
- &local->active_txqs[txqi->txq.ac]);
- goto begin;
+ air_sched = &local->airtime[txq->ac];
+
+ lockdep_assert_held(&air_sched->lock);
+
+ if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
+ struct airtime_info *a_prev = NULL, *a_next = NULL;
+ struct txq_info *t_prev, *t_next;
+ struct rb_node *n_prev, *n_next;
+
+ /* Erasing a node can cause an expensive rebalancing operation,
+ * so we check the previous and next nodes first and only remove
+ * and re-insert if the current node is not already in the
+ * correct position.
+ */
+ if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
+ t_prev = container_of(n_prev, struct txq_info,
+ schedule_order);
+ a_prev = to_airtime_info(&t_prev->txq);
+ }
+
+ if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
+ t_next = container_of(n_next, struct txq_info,
+ schedule_order);
+ a_next = to_airtime_info(&t_next->txq);
}
+
+ if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
+ (!a_next || a_next->v_t > air_info->v_t))
+ return;
+
+ if (air_sched->schedule_pos == &txqi->schedule_order)
+ air_sched->schedule_pos = n_prev;
+
+ rb_erase_cached(&txqi->schedule_order,
+ &air_sched->active_txqs);
+ RB_CLEAR_NODE(&txqi->schedule_order);
+ __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
}
+}
+
+void ieee80211_update_airtime_weight(struct ieee80211_local *local,
+ struct airtime_sched_info *air_sched,
+ u64 now, bool force)
+{
+ struct airtime_info *air_info, *tmp;
+ u64 weight_sum = 0;
+
+ if (unlikely(!now))
+ now = ktime_get_boottime_ns();
+ lockdep_assert_held(&air_sched->lock);
+
+ if (!force && (air_sched->last_weight_update <
+ now - AIRTIME_ACTIVE_DURATION))
+ return;
+
+ list_for_each_entry_safe(air_info, tmp,
+ &air_sched->active_list, list) {
+ if (airtime_is_active(air_info, now))
+ weight_sum += air_info->weight;
+ else
+ list_del_init(&air_info->list);
+ }
+ airtime_weight_sum_set(air_sched, weight_sum);
+ air_sched->last_weight_update = now;
+}
- if (txqi->schedule_round == local->schedule_round[ac])
+void ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+ __acquires(txq_lock) __releases(txq_lock)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = to_txq_info(txq);
+ struct airtime_sched_info *air_sched;
+ u64 now = ktime_get_boottime_ns();
+ struct airtime_info *air_info;
+ u8 ac = txq->ac;
+ bool was_active;
+
+ air_sched = &local->airtime[ac];
+ air_info = to_airtime_info(txq);
+
+ spin_lock_bh(&air_sched->lock);
+ was_active = airtime_is_active(air_info, now);
+ airtime_set_active(air_sched, air_info, now);
+
+ if (!RB_EMPTY_NODE(&txqi->schedule_order))
goto out;
- list_del_init(&txqi->schedule_order);
- txqi->schedule_round = local->schedule_round[ac];
- ret = &txqi->txq;
+ /* If the station has been inactive for a while, catch up its v_t so it
+ * doesn't get indefinite priority; see comment above the definition of
+ * AIRTIME_MAX_BEHIND.
+ */
+ if ((!was_active && air_info->v_t < air_sched->v_t) ||
+ air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND)
+ air_info->v_t = air_sched->v_t;
+
+ ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
+ __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
out:
- spin_unlock_bh(&local->active_txq_lock[ac]);
- return ret;
+ spin_unlock_bh(&air_sched->lock);
}
-EXPORT_SYMBOL(ieee80211_next_txq);
+EXPORT_SYMBOL(ieee80211_schedule_txq);
-void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq,
- bool force)
+static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq,
+ bool purge)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = to_txq_info(txq);
+ struct airtime_sched_info *air_sched;
+ struct airtime_info *air_info;
- spin_lock_bh(&local->active_txq_lock[txq->ac]);
-
- if (list_empty(&txqi->schedule_order) &&
- (force || !skb_queue_empty(&txqi->frags) ||
- txqi->tin.backlog_packets)) {
- /* If airtime accounting is active, always enqueue STAs at the
- * head of the list to ensure that they only get moved to the
- * back by the airtime DRR scheduler once they have a negative
- * deficit. A station that already has a negative deficit will
- * get immediately moved to the back of the list on the next
- * call to ieee80211_next_txq().
- */
- if (txqi->txq.sta && local->airtime_flags &&
- wiphy_ext_feature_isset(local->hw.wiphy,
- NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
- list_add(&txqi->schedule_order,
- &local->active_txqs[txq->ac]);
- else
- list_add_tail(&txqi->schedule_order,
- &local->active_txqs[txq->ac]);
+ air_sched = &local->airtime[txq->ac];
+ air_info = to_airtime_info(&txqi->txq);
+
+ lockdep_assert_held(&air_sched->lock);
+
+ if (purge) {
+ list_del_init(&air_info->list);
+ ieee80211_update_airtime_weight(local, air_sched, 0, true);
}
- spin_unlock_bh(&local->active_txq_lock[txq->ac]);
+ if (RB_EMPTY_NODE(&txqi->schedule_order))
+ return;
+
+ if (air_sched->schedule_pos == &txqi->schedule_order)
+ air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
+
+ if (!purge)
+ airtime_set_active(air_sched, air_info,
+ ktime_get_boottime_ns());
+
+ rb_erase_cached(&txqi->schedule_order,
+ &air_sched->active_txqs);
+ RB_CLEAR_NODE(&txqi->schedule_order);
}
-EXPORT_SYMBOL(__ieee80211_schedule_txq);
+
+void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq,
+ bool purge)
+ __acquires(txq_lock) __releases(txq_lock)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ spin_lock_bh(&local->airtime[txq->ac].lock);
+ __ieee80211_unschedule_txq(hw, txq, purge);
+ spin_unlock_bh(&local->airtime[txq->ac].lock);
+}
+
+void ieee80211_return_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq, bool force)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = to_txq_info(txq);
+
+ spin_lock_bh(&local->airtime[txq->ac].lock);
+
+ if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
+ !txq_has_queue(txq))
+ __ieee80211_unschedule_txq(hw, txq, false);
+
+ spin_unlock_bh(&local->airtime[txq->ac].lock);
+}
+EXPORT_SYMBOL(ieee80211_return_txq);
DEFINE_STATIC_KEY_FALSE(aql_disable);
bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
- struct sta_info *sta;
+ struct airtime_info *air_info = to_airtime_info(txq);
struct ieee80211_local *local = hw_to_local(hw);
if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
@@ -3854,15 +4049,12 @@ bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
return true;
- sta = container_of(txq->sta, struct sta_info, sta);
- if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
- sta->airtime[txq->ac].aql_limit_low)
+ if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low)
return true;
if (atomic_read(&local->aql_total_pending_airtime) <
local->aql_threshold &&
- atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
- sta->airtime[txq->ac].aql_limit_high)
+ atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high)
return true;
return false;
@@ -3872,63 +4064,85 @@ EXPORT_SYMBOL(ieee80211_txq_airtime_check);
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
+ struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
struct ieee80211_local *local = hw_to_local(hw);
- struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
- struct sta_info *sta;
- u8 ac = txq->ac;
+ struct airtime_sched_info *air_sched;
+ struct airtime_info *air_info;
+ struct rb_node *node = NULL;
+ bool ret = false;
+ u64 now;
- spin_lock_bh(&local->active_txq_lock[ac]);
- if (!txqi->txq.sta)
- goto out;
+ if (!ieee80211_txq_airtime_check(hw, txq))
+ return false;
+
+ air_sched = &local->airtime[txq->ac];
+ spin_lock_bh(&air_sched->lock);
- if (list_empty(&txqi->schedule_order))
+ if (RB_EMPTY_NODE(&txqi->schedule_order))
goto out;
- list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
- schedule_order) {
- if (iter == txqi)
- break;
+ now = ktime_get_boottime_ns();
- if (!iter->txq.sta) {
- list_move_tail(&iter->schedule_order,
- &local->active_txqs[ac]);
- continue;
- }
- sta = container_of(iter->txq.sta, struct sta_info, sta);
- if (sta->airtime[ac].deficit < 0)
- sta->airtime[ac].deficit += sta->airtime_weight;
- list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
- }
+ /* Like in ieee80211_next_txq(), make sure the first station in the
+ * scheduling order is eligible for transmission to avoid starvation.
+ */
+ node = rb_first_cached(&air_sched->active_txqs);
+ if (node) {
+ first_txqi = container_of(node, struct txq_info,
+ schedule_order);
+ air_info = to_airtime_info(&first_txqi->txq);
- sta = container_of(txqi->txq.sta, struct sta_info, sta);
- if (sta->airtime[ac].deficit >= 0)
- goto out;
+ if (air_sched->v_t < air_info->v_t)
+ airtime_catchup_v_t(air_sched, air_info->v_t, now);
+ }
- sta->airtime[ac].deficit += sta->airtime_weight;
- list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
- spin_unlock_bh(&local->active_txq_lock[ac]);
+ air_info = to_airtime_info(&txqi->txq);
+ if (air_info->v_t <= air_sched->v_t) {
+ air_sched->last_schedule_activity = now;
+ ret = true;
+ }
- return false;
out:
- if (!list_empty(&txqi->schedule_order))
- list_del_init(&txqi->schedule_order);
- spin_unlock_bh(&local->active_txq_lock[ac]);
-
- return true;
+ spin_unlock_bh(&air_sched->lock);
+ return ret;
}
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct airtime_sched_info *air_sched = &local->airtime[ac];
- spin_lock_bh(&local->active_txq_lock[ac]);
- local->schedule_round[ac]++;
- spin_unlock_bh(&local->active_txq_lock[ac]);
+ spin_lock_bh(&air_sched->lock);
+ air_sched->schedule_pos = NULL;
+ spin_unlock_bh(&air_sched->lock);
}
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
+static void
+ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct sk_buff *skb)
+{
+ struct rate_control_ref *ref = sdata->local->rate_ctrl;
+ u16 tid;
+
+ if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
+ return;
+
+ if (!sta || !sta->sta.ht_cap.ht_supported ||
+ !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
+ skb->protocol == sdata->control_port_protocol)
+ return;
+
+ tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ if (likely(sta->ampdu_mlme.tid_tx[tid]))
+ return;
+
+ ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
+}
+
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags,
@@ -3959,6 +4173,8 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
skb_get_hash(skb);
}
+ ieee80211_aggr_check(sdata, sta, skb);
+
if (sta) {
struct ieee80211_fast_tx *fast_tx;
@@ -4222,6 +4438,8 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
memset(info, 0, sizeof(*info));
+ ieee80211_aggr_check(sdata, sta, skb);
+
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (tid_tx) {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0a0481f5af48..05e96212b104 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -6,7 +6,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*
* utilities for mac80211
*/
@@ -947,7 +947,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
switch (elem->data[0]) {
case WLAN_EID_EXT_HE_MU_EDCA:
- if (len == sizeof(*elems->mu_edca_param_set)) {
+ if (len >= sizeof(*elems->mu_edca_param_set)) {
elems->mu_edca_param_set = data;
if (crc)
*crc = crc32_be(*crc, (void *)elem,
@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
}
break;
case WLAN_EID_EXT_UORA:
- if (len == 1)
+ if (len >= 1)
elems->uora_element = data;
break;
case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
@@ -976,7 +976,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
elems->max_channel_switch_time = data;
break;
case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
- if (len == sizeof(*elems->mbssid_config_ie))
+ if (len >= sizeof(*elems->mbssid_config_ie))
elems->mbssid_config_ie = data;
break;
case WLAN_EID_EXT_HE_SPR:
@@ -985,7 +985,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
elems->he_spr = data;
break;
case WLAN_EID_EXT_HE_6GHZ_CAPA:
- if (len == sizeof(*elems->he_6ghz_capa))
+ if (len >= sizeof(*elems->he_6ghz_capa))
elems->he_6ghz_capa = data;
break;
}
@@ -1074,14 +1074,14 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
switch (id) {
case WLAN_EID_LINK_ID:
- if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) {
+ if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
elem_parse_failed = true;
break;
}
elems->lnk_id = (void *)(pos - 2);
break;
case WLAN_EID_CHAN_SWITCH_TIMING:
- if (elen != sizeof(struct ieee80211_ch_switch_timing)) {
+ if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
elem_parse_failed = true;
break;
}
@@ -1244,7 +1244,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elems->sec_chan_offs = (void *)pos;
break;
case WLAN_EID_CHAN_SWITCH_PARAM:
- if (elen !=
+ if (elen <
sizeof(*elems->mesh_chansw_params_ie)) {
elem_parse_failed = true;
break;
@@ -1253,7 +1253,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
break;
case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
if (!action ||
- elen != sizeof(*elems->wide_bw_chansw_ie)) {
+ elen < sizeof(*elems->wide_bw_chansw_ie)) {
elem_parse_failed = true;
break;
}
@@ -1272,7 +1272,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
pos, elen);
if (ie) {
- if (ie[1] == sizeof(*elems->wide_bw_chansw_ie))
+ if (ie[1] >= sizeof(*elems->wide_bw_chansw_ie))
elems->wide_bw_chansw_ie =
(void *)(ie + 2);
else
@@ -1316,7 +1316,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elems->cisco_dtpc_elem = pos;
break;
case WLAN_EID_ADDBA_EXT:
- if (elen != sizeof(struct ieee80211_addba_ext_ie)) {
+ if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
elem_parse_failed = true;
break;
}
@@ -1342,7 +1342,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elem, elems);
break;
case WLAN_EID_S1G_CAPABILITIES:
- if (elen == sizeof(*elems->s1g_capab))
+ if (elen >= sizeof(*elems->s1g_capab))
elems->s1g_capab = (void *)pos;
else
elem_parse_failed = true;
@@ -1693,7 +1693,10 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) {
mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx);
- WARN_ON(err);
+ if (WARN_ON(err)) {
+ kfree_skb(skb);
+ return;
+ }
}
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
@@ -1934,13 +1937,26 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
*offset = noffset;
}
- he_cap = ieee80211_get_he_sta_cap(sband);
- if (he_cap) {
+ he_cap = ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif));
+ if (he_cap &&
+ cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ IEEE80211_CHAN_NO_HE)) {
pos = ieee80211_ie_build_he_cap(pos, he_cap, end);
if (!pos)
goto out_err;
+ }
+
+ if (cfg80211_any_usable_channels(local->hw.wiphy,
+ BIT(NL80211_BAND_6GHZ),
+ IEEE80211_CHAN_NO_HE)) {
+ struct ieee80211_supported_band *sband6;
+
+ sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
+ he_cap = ieee80211_get_he_iftype_cap(sband6,
+ ieee80211_vif_type_p2p(&sdata->vif));
- if (sband->band == NL80211_BAND_6GHZ) {
+ if (he_cap) {
enum nl80211_iftype iftype =
ieee80211_vif_type_p2p(&sdata->vif);
__le16 cap = ieee80211_get_he_6ghz_capa(sband, iftype);
@@ -2178,8 +2194,6 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
list_for_each_entry(ctx, &local->chanctx_list, list)
ctx->driver_present = false;
mutex_unlock(&local->chanctx_mtx);
-
- cfg80211_shutdown_all_interfaces(local->hw.wiphy);
}
static void ieee80211_assign_chanctx(struct ieee80211_local *local,
@@ -2946,12 +2960,15 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
u8 *pos;
u16 cap;
- sband = ieee80211_get_sband(sdata);
- if (!sband)
+ if (!cfg80211_any_usable_channels(sdata->local->hw.wiphy,
+ BIT(NL80211_BAND_6GHZ),
+ IEEE80211_CHAN_NO_HE))
return;
+ sband = sdata->local->hw.wiphy->bands[NL80211_BAND_6GHZ];
+
iftd = ieee80211_get_sband_iftype_data(sband, iftype);
- if (WARN_ON(!iftd))
+ if (!iftd)
return;
/* Check for device HE 6 GHz capability before adding element */
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 91bf32af55e9..bca47fad5a16 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -3,6 +3,7 @@
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2008, Jouni Malinen <j@w1.fi>
* Copyright (C) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2020-2021 Intel Corporation
*/
#include <linux/netdevice.h>
@@ -167,8 +168,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
update_iv:
/* update IV in key information to be able to detect replays */
- rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
- rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
+ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32;
+ rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16;
return RX_CONTINUE;
@@ -294,8 +295,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
key, skb->data + hdrlen,
skb->len - hdrlen, rx->sta->sta.addr,
hdr->addr1, hwaccel, rx->security_idx,
- &rx->tkip_iv32,
- &rx->tkip_iv16);
+ &rx->tkip.iv32,
+ &rx->tkip.iv16);
if (res != TKIP_DECRYPT_OK)
return RX_DROP_UNUSABLE;
@@ -553,6 +554,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
}
memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
+ if (unlikely(ieee80211_is_frag(hdr)))
+ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove CCMP header and MIC */
@@ -781,6 +784,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
}
memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+ if (unlikely(ieee80211_is_frag(hdr)))
+ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove GCMP header and MIC */
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 96ba616f59bf..7d738bd06f2c 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -4,7 +4,9 @@
* Copyright (c) 2019, Tessares SA.
*/
+#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
+#endif
#include <net/net_namespace.h>
#include <net/netns/generic.h>
@@ -15,10 +17,14 @@
static int mptcp_pernet_id;
struct mptcp_pernet {
+#ifdef CONFIG_SYSCTL
struct ctl_table_header *ctl_table_hdr;
+#endif
- int mptcp_enabled;
+ u8 mptcp_enabled;
unsigned int add_addr_timeout;
+ u8 checksum_enabled;
+ u8 allow_join_initial_addr_port;
};
static struct mptcp_pernet *mptcp_get_pernet(struct net *net)
@@ -36,15 +42,36 @@ unsigned int mptcp_get_add_addr_timeout(struct net *net)
return mptcp_get_pernet(net)->add_addr_timeout;
}
+int mptcp_is_checksum_enabled(struct net *net)
+{
+ return mptcp_get_pernet(net)->checksum_enabled;
+}
+
+int mptcp_allow_join_id0(struct net *net)
+{
+ return mptcp_get_pernet(net)->allow_join_initial_addr_port;
+}
+
+static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
+{
+ pernet->mptcp_enabled = 1;
+ pernet->add_addr_timeout = TCP_RTO_MAX;
+ pernet->checksum_enabled = 0;
+ pernet->allow_join_initial_addr_port = 1;
+}
+
+#ifdef CONFIG_SYSCTL
static struct ctl_table mptcp_sysctl_table[] = {
{
.procname = "enabled",
- .maxlen = sizeof(int),
+ .maxlen = sizeof(u8),
.mode = 0644,
/* users with CAP_NET_ADMIN or root (not and) can change this
* value, same as other sysctl or the 'net' tree.
*/
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
},
{
.procname = "add_addr_timeout",
@@ -52,15 +79,25 @@ static struct ctl_table mptcp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+ {
+ .procname = "checksum_enabled",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
+ },
+ {
+ .procname = "allow_join_initial_addr_port",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE
+ },
{}
};
-static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
-{
- pernet->mptcp_enabled = 1;
- pernet->add_addr_timeout = TCP_RTO_MAX;
-}
-
static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
{
struct ctl_table_header *hdr;
@@ -75,6 +112,8 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
table[0].data = &pernet->mptcp_enabled;
table[1].data = &pernet->add_addr_timeout;
+ table[2].data = &pernet->checksum_enabled;
+ table[3].data = &pernet->allow_join_initial_addr_port;
hdr = register_net_sysctl(net, MPTCP_SYSCTL_PATH, table);
if (!hdr)
@@ -100,6 +139,17 @@ static void mptcp_pernet_del_table(struct mptcp_pernet *pernet)
kfree(table);
}
+#else
+
+static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
+{
+ return 0;
+}
+
+static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) {}
+
+#endif /* CONFIG_SYSCTL */
+
static int __net_init mptcp_net_init(struct net *net)
{
struct mptcp_pernet *pernet = mptcp_get_pernet(net);
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index eb2dc6dbe212..52ea2517e856 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -25,6 +25,8 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
+ SNMP_MIB_ITEM("DSSNoMatchTCP", MPTCP_MIB_DSSTCPMISMATCH),
+ SNMP_MIB_ITEM("DataCsumErr", MPTCP_MIB_DATACSUMERR),
SNMP_MIB_ITEM("OFOQueueTail", MPTCP_MIB_OFOQUEUETAIL),
SNMP_MIB_ITEM("OFOQueue", MPTCP_MIB_OFOQUEUE),
SNMP_MIB_ITEM("OFOMerge", MPTCP_MIB_OFOMERGE),
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index f0da4f060fe1..193466c9b549 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -18,6 +18,8 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
+ MPTCP_MIB_DSSTCPMISMATCH, /* DSS-mapping did not map with TCP's sequence numbers */
+ MPTCP_MIB_DATACSUMERR, /* The data checksum fail */
MPTCP_MIB_OFOQUEUETAIL, /* Segments inserted into OoO queue tail */
MPTCP_MIB_OFOQUEUE, /* Segments inserted into OoO queue */
MPTCP_MIB_OFOMERGE, /* Segments merged in OoO queue */
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index f16d9b5ee978..8f88ddeab6a2 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -144,6 +144,7 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
info->mptcpi_write_seq = READ_ONCE(msk->write_seq);
info->mptcpi_snd_una = READ_ONCE(msk->snd_una);
info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq);
+ info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled);
unlock_sock_fast(sk, slow);
}
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 99fc21406168..b5850afea343 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -44,7 +44,20 @@ static void mptcp_parse_option(const struct sk_buff *skb,
else
expected_opsize = TCPOLEN_MPTCP_MPC_SYN;
}
- if (opsize != expected_opsize)
+
+ /* Cfr RFC 8684 Section 3.3.0:
+ * If a checksum is present but its use had
+ * not been negotiated in the MP_CAPABLE handshake, the receiver MUST
+ * close the subflow with a RST, as it is not behaving as negotiated.
+ * If a checksum is not present when its use has been negotiated, the
+ * receiver MUST close the subflow with a RST, as it is considered
+ * broken
+ * We parse even option with mismatching csum presence, so that
+ * later in subflow_data_ready we can trigger the reset.
+ */
+ if (opsize != expected_opsize &&
+ (expected_opsize != TCPOLEN_MPTCP_MPC_ACK_DATA ||
+ opsize != TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM))
break;
/* try to be gentle vs future versions on the initial syn */
@@ -66,16 +79,12 @@ static void mptcp_parse_option(const struct sk_buff *skb,
* host requires the use of checksums, checksums MUST be used.
* In other words, the only way for checksums not to be used
* is if both hosts in their SYNs set A=0."
- *
- * Section 3.3.0:
- * "If a checksum is not present when its use has been
- * negotiated, the receiver MUST close the subflow with a RST as
- * it is considered broken."
- *
- * We don't implement DSS checksum - fall back to TCP.
*/
if (flags & MPTCP_CAP_CHECKSUM_REQD)
- break;
+ mp_opt->csum_reqd = 1;
+
+ if (flags & MPTCP_CAP_DENY_JOIN_ID0)
+ mp_opt->deny_join_id0 = 1;
mp_opt->mp_capable = 1;
if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) {
@@ -86,7 +95,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->rcvr_key = get_unaligned_be64(ptr);
ptr += 8;
}
- if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA) {
+ if (opsize >= TCPOLEN_MPTCP_MPC_ACK_DATA) {
/* Section 3.1.:
* "the data parameters in a MP_CAPABLE are semantically
* equivalent to those in a DSS option and can be used
@@ -98,9 +107,14 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->data_len = get_unaligned_be16(ptr);
ptr += 2;
}
- pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d",
+ if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) {
+ mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
+ mp_opt->csum_reqd = 1;
+ ptr += 2;
+ }
+ pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
version, flags, opsize, mp_opt->sndr_key,
- mp_opt->rcvr_key, mp_opt->data_len);
+ mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
break;
case MPTCPOPT_MP_JOIN:
@@ -130,7 +144,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
pr_debug("MP_JOIN hmac");
} else {
- pr_warn("MP_JOIN bad option size");
mp_opt->mp_join = 0;
}
break;
@@ -172,10 +185,8 @@ static void mptcp_parse_option(const struct sk_buff *skb,
expected_opsize += TCPOLEN_MPTCP_DSS_MAP32;
}
- /* RFC 6824, Section 3.3:
- * If a checksum is present, but its use had
- * not been negotiated in the MP_CAPABLE handshake,
- * the checksum field MUST be ignored.
+ /* Always parse any csum presence combination, we will enforce
+ * RFC 8684 Section 3.3.0 checks later in subflow_data_ready
*/
if (opsize != expected_opsize &&
opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM)
@@ -210,9 +221,15 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->data_len = get_unaligned_be16(ptr);
ptr += 2;
- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u",
+ if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) {
+ mp_opt->csum_reqd = 1;
+ mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
+ ptr += 2;
+ }
+
+ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
mp_opt->data_seq, mp_opt->subflow_seq,
- mp_opt->data_len);
+ mp_opt->data_len, mp_opt->csum_reqd, mp_opt->csum);
}
break;
@@ -324,9 +341,12 @@ static void mptcp_parse_option(const struct sk_buff *skb,
}
}
-void mptcp_get_options(const struct sk_buff *skb,
+void mptcp_get_options(const struct sock *sk,
+ const struct sk_buff *skb,
struct mptcp_options_received *mp_opt)
{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
const struct tcphdr *th = tcp_hdr(skb);
const unsigned char *ptr;
int length;
@@ -342,6 +362,8 @@ void mptcp_get_options(const struct sk_buff *skb,
mp_opt->dss = 0;
mp_opt->mp_prio = 0;
mp_opt->reset = 0;
+ mp_opt->csum_reqd = READ_ONCE(msk->csum_enabled);
+ mp_opt->deny_join_id0 = 0;
length = (th->doff * 4) - sizeof(struct tcphdr);
ptr = (const unsigned char *)(th + 1);
@@ -357,6 +379,8 @@ void mptcp_get_options(const struct sk_buff *skb,
length--;
continue;
default:
+ if (length < 2)
+ return;
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
@@ -381,6 +405,8 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
if (subflow->request_mptcp) {
opts->suboptions = OPTION_MPTCP_MPC_SYN;
+ opts->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk));
+ opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk));
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
} else if (subflow->request_join) {
@@ -436,8 +462,10 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
struct mptcp_out_options *opts)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct mptcp_ext *mpext;
unsigned int data_len;
+ u8 len;
/* When skb is not available, we better over-estimate the emitted
* options len. A full DSS option (28 bytes) is longer than
@@ -466,16 +494,27 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
opts->suboptions = OPTION_MPTCP_MPC_ACK;
opts->sndr_key = subflow->local_key;
opts->rcvr_key = subflow->remote_key;
+ opts->csum_reqd = READ_ONCE(msk->csum_enabled);
+ opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk));
/* Section 3.1.
* The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK
* packets that start the first subflow of an MPTCP connection,
* as well as the first packet that carries data
*/
- if (data_len > 0)
- *size = ALIGN(TCPOLEN_MPTCP_MPC_ACK_DATA, 4);
- else
+ if (data_len > 0) {
+ len = TCPOLEN_MPTCP_MPC_ACK_DATA;
+ if (opts->csum_reqd) {
+ /* we need to propagate more info to csum the pseudo hdr */
+ opts->ext_copy.data_seq = mpext->data_seq;
+ opts->ext_copy.subflow_seq = mpext->subflow_seq;
+ opts->ext_copy.csum = mpext->csum;
+ len += TCPOLEN_MPTCP_DSS_CHECKSUM;
+ }
+ *size = ALIGN(len, 4);
+ } else {
*size = TCPOLEN_MPTCP_MPC_ACK;
+ }
pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
subflow, subflow->local_key, subflow->remote_key,
@@ -536,18 +575,21 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
bool ret = false;
u64 ack_seq;
+ opts->csum_reqd = READ_ONCE(msk->csum_enabled);
mpext = skb ? mptcp_get_ext(skb) : NULL;
if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
- unsigned int map_size;
+ unsigned int map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
- map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
+ if (mpext) {
+ if (opts->csum_reqd)
+ map_size += TCPOLEN_MPTCP_DSS_CHECKSUM;
- remaining -= map_size;
- dss_size = map_size;
- if (mpext)
opts->ext_copy = *mpext;
+ }
+ remaining -= map_size;
+ dss_size = map_size;
if (skb && snd_data_fin_enable)
mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
ret = true;
@@ -790,6 +832,8 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
if (subflow_req->mp_capable) {
opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
opts->sndr_key = subflow_req->local_key;
+ opts->csum_reqd = subflow_req->csum_reqd;
+ opts->allow_join_id0 = subflow_req->allow_join_id0;
*size = TCPOLEN_MPTCP_MPC_SYNACK;
pr_debug("subflow_req=%p, local_key=%llu",
subflow_req, subflow_req->local_key);
@@ -868,6 +912,9 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
return false;
}
+ if (mp_opt->deny_join_id0)
+ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+
if (unlikely(!READ_ONCE(msk->pm.server_side)))
pr_warn_once("bogus mpc option on established client sk");
mptcp_subflow_fully_established(subflow, mp_opt);
@@ -895,19 +942,20 @@ reset:
return false;
}
-static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
+u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
{
- u32 old_ack32, cur_ack32;
-
- if (use_64bit)
- return cur_ack;
-
- old_ack32 = (u32)old_ack;
- cur_ack32 = (u32)cur_ack;
- cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32;
- if (unlikely(before(cur_ack32, old_ack32)))
- return cur_ack + (1LL << 32);
- return cur_ack;
+ u32 old_seq32, cur_seq32;
+
+ old_seq32 = (u32)old_seq;
+ cur_seq32 = (u32)cur_seq;
+ cur_seq = (old_seq & GENMASK_ULL(63, 32)) + cur_seq32;
+ if (unlikely(cur_seq32 < old_seq32 && before(old_seq32, cur_seq32)))
+ return cur_seq + (1LL << 32);
+
+ /* reverse wrap could happen, too */
+ if (unlikely(cur_seq32 > old_seq32 && after(old_seq32, cur_seq32)))
+ return cur_seq - (1LL << 32);
+ return cur_seq;
}
static void ack_update_msk(struct mptcp_sock *msk,
@@ -925,7 +973,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
* more dangerous than missing an ack
*/
old_snd_una = msk->snd_una;
- new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
+ new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
/* ACK for data not even sent yet? Ignore. */
if (after64(new_snd_una, snd_nxt))
@@ -962,7 +1010,7 @@ bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool us
return false;
WRITE_ONCE(msk->rcv_data_fin_seq,
- expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
+ mptcp_expand_seq(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
WRITE_ONCE(msk->rcv_data_fin, 1);
return true;
@@ -1008,7 +1056,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
return;
}
- mptcp_get_options(skb, &mp_opt);
+ mptcp_get_options(sk, skb, &mp_opt);
if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
return;
@@ -1024,7 +1072,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
} else {
mptcp_pm_add_addr_echoed(msk, &mp_opt.addr);
- mptcp_pm_del_add_timer(msk, &mp_opt.addr);
+ mptcp_pm_del_add_timer(msk, &mp_opt.addr, true);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
}
@@ -1100,6 +1148,10 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
}
mpext->data_len = mp_opt.data_len;
mpext->use_map = 1;
+ mpext->csum_reqd = mp_opt.csum_reqd;
+
+ if (mpext->csum_reqd)
+ mpext->csum = mp_opt.csum;
}
}
@@ -1119,25 +1171,53 @@ static void mptcp_set_rwin(const struct tcp_sock *tp)
WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
}
+static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
+{
+ struct csum_pseudo_header header;
+ __wsum csum;
+
+ /* cfr RFC 8684 3.3.1.:
+ * the data sequence number used in the pseudo-header is
+ * always the 64-bit value, irrespective of what length is used in the
+ * DSS option itself.
+ */
+ header.data_seq = cpu_to_be64(mpext->data_seq);
+ header.subflow_seq = htonl(mpext->subflow_seq);
+ header.data_len = htons(mpext->data_len);
+ header.csum = 0;
+
+ csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum));
+ return (__force u16)csum_fold(csum);
+}
+
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
struct mptcp_out_options *opts)
{
if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
OPTION_MPTCP_MPC_ACK) & opts->suboptions) {
- u8 len;
+ u8 len, flag = MPTCP_CAP_HMAC_SHA256;
- if (OPTION_MPTCP_MPC_SYN & opts->suboptions)
+ if (OPTION_MPTCP_MPC_SYN & opts->suboptions) {
len = TCPOLEN_MPTCP_MPC_SYN;
- else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions)
+ } else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) {
len = TCPOLEN_MPTCP_MPC_SYNACK;
- else if (opts->ext_copy.data_len)
+ } else if (opts->ext_copy.data_len) {
len = TCPOLEN_MPTCP_MPC_ACK_DATA;
- else
+ if (opts->csum_reqd)
+ len += TCPOLEN_MPTCP_DSS_CHECKSUM;
+ } else {
len = TCPOLEN_MPTCP_MPC_ACK;
+ }
+
+ if (opts->csum_reqd)
+ flag |= MPTCP_CAP_CHECKSUM_REQD;
+
+ if (!opts->allow_join_id0)
+ flag |= MPTCP_CAP_DENY_JOIN_ID0;
*ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len,
MPTCP_SUPPORTED_VERSION,
- MPTCP_CAP_HMAC_SHA256);
+ flag);
if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) &
opts->suboptions))
@@ -1153,8 +1233,13 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
if (!opts->ext_copy.data_len)
goto mp_capable_done;
- put_unaligned_be32(opts->ext_copy.data_len << 16 |
- TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+ if (opts->csum_reqd) {
+ put_unaligned_be32(opts->ext_copy.data_len << 16 |
+ mptcp_make_csum(&opts->ext_copy), ptr);
+ } else {
+ put_unaligned_be32(opts->ext_copy.data_len << 16 |
+ TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+ }
ptr += 1;
}
@@ -1306,6 +1391,9 @@ mp_capable_done:
flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64;
if (mpext->data_fin)
flags |= MPTCP_DSS_DATA_FIN;
+
+ if (opts->csum_reqd)
+ len += TCPOLEN_MPTCP_DSS_CHECKSUM;
}
*ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags);
@@ -1325,8 +1413,13 @@ mp_capable_done:
ptr += 2;
put_unaligned_be32(mpext->subflow_seq, ptr);
ptr += 1;
- put_unaligned_be32(mpext->data_len << 16 |
- TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+ if (opts->csum_reqd) {
+ put_unaligned_be32(mpext->data_len << 16 |
+ mptcp_make_csum(mpext), ptr);
+ } else {
+ put_unaligned_be32(mpext->data_len << 16 |
+ TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+ }
}
}
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 9d00fa6d22e9..639271e09604 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -320,6 +320,7 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
WRITE_ONCE(msk->pm.addr_signal, 0);
WRITE_ONCE(msk->pm.accept_addr, false);
WRITE_ONCE(msk->pm.accept_subflow, false);
+ WRITE_ONCE(msk->pm.remote_deny_join_id0, false);
msk->pm.status = 0;
spin_lock_init(&msk->pm.lock);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 6ba040897738..d2591ebf01d9 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -346,18 +346,18 @@ out:
struct mptcp_pm_add_entry *
mptcp_pm_del_add_timer(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr)
+ struct mptcp_addr_info *addr, bool check_id)
{
struct mptcp_pm_add_entry *entry;
struct sock *sk = (struct sock *)msk;
spin_lock_bh(&msk->pm.lock);
entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
- if (entry)
+ if (entry && (!check_id || entry->addr.id == addr->id))
entry->retrans_times = ADD_ADDR_RETRANS_MAX;
spin_unlock_bh(&msk->pm.lock);
- if (entry)
+ if (entry && (!check_id || entry->addr.id == addr->id))
sk_stop_timer_sync(sk, &entry->add_timer);
return entry;
@@ -451,7 +451,8 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
/* check if should create a new subflow */
if (msk->pm.local_addr_used < local_addr_max &&
- msk->pm.subflows < subflows_max) {
+ msk->pm.subflows < subflows_max &&
+ !READ_ONCE(msk->pm.remote_deny_join_id0)) {
local = select_local_address(pernet, msk);
if (local) {
struct mptcp_addr_info remote = { 0 };
@@ -540,6 +541,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
if (subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
spin_unlock_bh(&msk->pm.lock);
pr_debug("send ack for %s%s%s",
@@ -547,9 +549,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
- lock_sock(ssk);
+ slow = lock_sock_fast(ssk);
tcp_send_ack(ssk);
- release_sock(ssk);
+ unlock_sock_fast(ssk, slow);
spin_lock_bh(&msk->pm.lock);
}
}
@@ -566,6 +568,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
struct sock *sk = (struct sock *)msk;
struct mptcp_addr_info local;
+ bool slow;
local_address((struct sock_common *)ssk, &local);
if (!addresses_equal(&local, addr, addr->port))
@@ -578,9 +581,9 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
spin_unlock_bh(&msk->pm.lock);
pr_debug("send ack for mp_prio");
- lock_sock(ssk);
+ slow = lock_sock_fast(ssk);
tcp_send_ack(ssk);
- release_sock(ssk);
+ unlock_sock_fast(ssk, slow);
spin_lock_bh(&msk->pm.lock);
return 0;
@@ -971,8 +974,14 @@ skip_family:
if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
- if (tb[MPTCP_PM_ADDR_ATTR_PORT])
+ if (tb[MPTCP_PM_ADDR_ATTR_PORT]) {
+ if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
+ NL_SET_ERR_MSG_ATTR(info->extack, attr,
+ "flags must have signal when using port");
+ return -EINVAL;
+ }
entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
+ }
return 0;
}
@@ -1064,7 +1073,7 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *entry;
- entry = mptcp_pm_del_add_timer(msk, addr);
+ entry = mptcp_pm_del_add_timer(msk, addr, false);
if (entry) {
list_del(&entry->list);
kfree(entry);
@@ -1913,10 +1922,13 @@ static int __net_init pm_nl_init_net(struct net *net)
struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
- __reset_counters(pernet);
pernet->next_id = 1;
- bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1);
spin_lock_init(&pernet->lock);
+
+ /* No need to initialize other pernet fields, the struct is zeroed at
+ * allocation time.
+ */
+
return 0;
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 29a2d690d8d5..7a5afa8c6866 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -39,10 +39,15 @@ struct mptcp_skb_cb {
u64 map_seq;
u64 end_seq;
u32 offset;
+ u8 has_rxtstamp:1;
};
#define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
+enum {
+ MPTCP_CMSG_TS = BIT(0),
+};
+
static struct percpu_counter mptcp_sockets_allocated;
static void __mptcp_destroy_sock(struct sock *sk);
@@ -272,6 +277,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = (struct sock *)msk;
struct sk_buff *tail;
+ bool has_rxtstamp;
__skb_unlink(skb, &ssk->sk_receive_queue);
@@ -280,13 +286,17 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
/* try to fetch required memory from subflow */
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
- if (ssk->sk_forward_alloc < skb->truesize)
- goto drop;
- __sk_mem_reclaim(ssk, skb->truesize);
- if (!sk_rmem_schedule(sk, skb, skb->truesize))
+ int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
+
+ if (ssk->sk_forward_alloc < amount)
goto drop;
+
+ ssk->sk_forward_alloc -= amount;
+ sk->sk_forward_alloc += amount;
}
+ has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+
/* the skb map_seq accounts for the skb offset:
* mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
* value
@@ -294,6 +304,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
MPTCP_SKB_CB(skb)->offset = offset;
+ MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
/* in sequence */
@@ -422,56 +433,55 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
- lock_sock(ssk);
+ slow = lock_sock_fast(ssk);
if (tcp_can_send_ack(ssk))
tcp_send_ack(ssk);
- release_sock(ssk);
+ unlock_sock_fast(ssk, slow);
}
}
-static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
+static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
{
- int ret;
+ bool slow;
- lock_sock(ssk);
- ret = tcp_can_send_ack(ssk);
- if (ret)
+ slow = lock_sock_fast(ssk);
+ if (tcp_can_send_ack(ssk))
tcp_cleanup_rbuf(ssk, 1);
- release_sock(ssk);
- return ret;
+ unlock_sock_fast(ssk, slow);
+}
+
+static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
+{
+ const struct inet_connection_sock *icsk = inet_csk(ssk);
+ u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
+ const struct tcp_sock *tp = tcp_sk(ssk);
+
+ return (ack_pending & ICSK_ACK_SCHED) &&
+ ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
+ READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
+ (rx_empty && ack_pending &
+ (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
}
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
{
- struct sock *ack_hint = READ_ONCE(msk->ack_hint);
int old_space = READ_ONCE(msk->old_wspace);
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- bool cleanup;
+ int space = __mptcp_space(sk);
+ bool cleanup, rx_empty;
- /* this is a simple superset of what tcp_cleanup_rbuf() implements
- * so that we don't have to acquire the ssk socket lock most of the time
- * to do actually nothing
- */
- cleanup = __mptcp_space(sk) - old_space >= max(0, old_space);
- if (!cleanup)
- return;
+ cleanup = (space > 0) && (space >= (old_space << 1));
+ rx_empty = !atomic_read(&sk->sk_rmem_alloc);
- /* if the hinted ssk is still active, try to use it */
- if (likely(ack_hint)) {
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- if (ack_hint == ssk && mptcp_subflow_cleanup_rbuf(ssk))
- return;
- }
+ if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
+ mptcp_subflow_cleanup_rbuf(ssk);
}
-
- /* otherwise pick the first active subflow */
- mptcp_for_each_subflow(msk, subflow)
- if (mptcp_subflow_cleanup_rbuf(mptcp_subflow_tcp_sock(subflow)))
- return;
}
static bool mptcp_check_data_fin(struct sock *sk)
@@ -616,7 +626,6 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
break;
}
} while (more_data_avail);
- WRITE_ONCE(msk->ack_hint, ssk);
*bytes += moved;
return done;
@@ -668,18 +677,19 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
/* In most cases we will be able to lock the mptcp socket. If its already
* owned, we need to defer to the work queue to avoid ABBA deadlock.
*/
-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
{
struct sock *sk = (struct sock *)msk;
unsigned int moved = 0;
- if (inet_sk_state_load(sk) == TCP_CLOSE)
- return;
-
- mptcp_data_lock(sk);
-
__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
__mptcp_ofo_queue(msk);
+ if (unlikely(ssk->sk_err)) {
+ if (!sock_owned_by_user(sk))
+ __mptcp_error_report(sk);
+ else
+ set_bit(MPTCP_ERROR_REPORT, &msk->flags);
+ }
/* If the moves have caught up with the DATA_FIN sequence number
* it's time to ack the DATA_FIN and change socket state, but
@@ -688,7 +698,7 @@ static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
*/
if (mptcp_pending_data_fin(sk, NULL))
mptcp_schedule_work(sk);
- mptcp_data_unlock(sk);
+ return moved > 0;
}
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
@@ -696,7 +706,6 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
int sk_rbuf, ssk_rbuf;
- bool wake;
/* The peer can send data while we are shutting down this
* subflow at msk destruction time, but we must avoid enqueuing
@@ -705,28 +714,22 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
if (unlikely(subflow->disposable))
return;
- /* move_skbs_to_msk below can legitly clear the data_avail flag,
- * but we will need later to properly woke the reader, cache its
- * value
- */
- wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
- if (wake)
- set_bit(MPTCP_DATA_READY, &msk->flags);
-
ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
if (unlikely(ssk_rbuf > sk_rbuf))
sk_rbuf = ssk_rbuf;
- /* over limit? can't append more skbs to msk */
+ /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
- goto wake;
-
- move_skbs_to_msk(msk, ssk);
+ return;
-wake:
- if (wake)
+ /* Wake-up the reader only for in-sequence data */
+ mptcp_data_lock(sk);
+ if (move_skbs_to_msk(msk, ssk)) {
+ set_bit(MPTCP_DATA_READY, &msk->flags);
sk->sk_data_ready(sk);
+ }
+ mptcp_data_unlock(sk);
}
static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
@@ -858,7 +861,7 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
sock_owned_by_me(sk);
mptcp_for_each_subflow(msk, subflow) {
- if (subflow->data_avail)
+ if (READ_ONCE(subflow->data_avail))
return mptcp_subflow_tcp_sock(subflow);
}
@@ -879,31 +882,29 @@ static bool mptcp_skb_can_collapse_to(u64 write_seq,
!mpext->frozen;
}
+/* we can append data to the given data frag if:
+ * - there is space available in the backing page_frag
+ * - the data frag tail matches the current page_frag free offset
+ * - the data frag end sequence number matches the current write seq
+ */
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
const struct page_frag *pfrag,
const struct mptcp_data_frag *df)
{
return df && pfrag->page == df->page &&
pfrag->size - pfrag->offset > 0 &&
+ pfrag->offset == (df->offset + df->data_len) &&
df->data_seq + df->data_len == msk->write_seq;
}
-static int mptcp_wmem_with_overhead(struct sock *sk, int size)
+static int mptcp_wmem_with_overhead(int size)
{
- struct mptcp_sock *msk = mptcp_sk(sk);
- int ret, skbs;
-
- ret = size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
- skbs = (msk->tx_pending_data + size) / msk->size_goal_cache;
- if (skbs < msk->skb_tx_cache.qlen)
- return ret;
-
- return ret + (skbs - msk->skb_tx_cache.qlen) * SKB_TRUESIZE(MAX_TCP_HEADER);
+ return size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
}
static void __mptcp_wmem_reserve(struct sock *sk, int size)
{
- int amount = mptcp_wmem_with_overhead(sk, size);
+ int amount = mptcp_wmem_with_overhead(size);
struct mptcp_sock *msk = mptcp_sk(sk);
WARN_ON_ONCE(msk->wmem_reserved);
@@ -941,6 +942,10 @@ static void __mptcp_update_wmem(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
+
if (!msk->wmem_reserved)
return;
@@ -1079,10 +1084,20 @@ out:
static void __mptcp_clean_una_wakeup(struct sock *sk)
{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
__mptcp_clean_una(sk);
mptcp_write_space(sk);
}
+static void mptcp_clean_una_wakeup(struct sock *sk)
+{
+ mptcp_data_lock(sk);
+ __mptcp_clean_una_wakeup(sk);
+ mptcp_data_unlock(sk);
+}
+
static void mptcp_enter_memory_pressure(struct sock *sk)
{
struct mptcp_subflow_context *subflow;
@@ -1184,49 +1199,8 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
return NULL;
}
-static bool mptcp_tx_cache_refill(struct sock *sk, int size,
- struct sk_buff_head *skbs, int *total_ts)
-{
- struct mptcp_sock *msk = mptcp_sk(sk);
- struct sk_buff *skb;
- int space_needed;
-
- if (unlikely(tcp_under_memory_pressure(sk))) {
- mptcp_mem_reclaim_partial(sk);
-
- /* under pressure pre-allocate at most a single skb */
- if (msk->skb_tx_cache.qlen)
- return true;
- space_needed = msk->size_goal_cache;
- } else {
- space_needed = msk->tx_pending_data + size -
- msk->skb_tx_cache.qlen * msk->size_goal_cache;
- }
-
- while (space_needed > 0) {
- skb = __mptcp_do_alloc_tx_skb(sk, sk->sk_allocation);
- if (unlikely(!skb)) {
- /* under memory pressure, try to pass the caller a
- * single skb to allow forward progress
- */
- while (skbs->qlen > 1) {
- skb = __skb_dequeue_tail(skbs);
- *total_ts -= skb->truesize;
- __kfree_skb(skb);
- }
- return skbs->qlen > 0;
- }
-
- *total_ts += skb->truesize;
- __skb_queue_tail(skbs, skb);
- space_needed -= msk->size_goal_cache;
- }
- return true;
-}
-
static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
{
- struct mptcp_sock *msk = mptcp_sk(sk);
struct sk_buff *skb;
if (ssk->sk_tx_skb_cache) {
@@ -1237,22 +1211,6 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
return true;
}
- skb = skb_peek(&msk->skb_tx_cache);
- if (skb) {
- if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
- skb = __skb_dequeue(&msk->skb_tx_cache);
- if (WARN_ON_ONCE(!skb))
- return false;
-
- mptcp_wmem_uncharge(sk, skb->truesize);
- ssk->sk_tx_skb_cache = skb;
- return true;
- }
-
- /* over memory limit, no point to try to allocate a new skb */
- return false;
- }
-
skb = __mptcp_do_alloc_tx_skb(sk, gfp);
if (!skb)
return false;
@@ -1268,7 +1226,6 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
{
return !ssk->sk_tx_skb_cache &&
- !skb_peek(&mptcp_sk(sk)->skb_tx_cache) &&
tcp_under_memory_pressure(sk);
}
@@ -1279,6 +1236,18 @@ static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk)
return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation);
}
+/* note: this always recompute the csum on the whole skb, even
+ * if we just appended a single frag. More status info needed
+ */
+static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
+{
+ struct mptcp_ext *mpext = mptcp_get_ext(skb);
+ __wsum csum = ~csum_unfold(mpext->csum);
+ int offset = skb->len - added;
+
+ mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
+}
+
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
struct mptcp_data_frag *dfrag,
struct mptcp_sendmsg_info *info)
@@ -1299,7 +1268,6 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
/* compute send limit */
info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
avail_size = info->size_goal;
- msk->size_goal_cache = info->size_goal;
skb = tcp_write_queue_tail(ssk);
if (skb) {
/* Limit the write to the size available in the
@@ -1373,10 +1341,14 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
if (zero_window_probe) {
mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
mpext->frozen = 1;
- ret = 0;
+ if (READ_ONCE(msk->csum_enabled))
+ mptcp_update_data_checksum(tail, ret);
tcp_push_pending_frames(ssk);
+ return 0;
}
out:
+ if (READ_ONCE(msk->csum_enabled))
+ mptcp_update_data_checksum(tail, ret);
mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
return ret;
}
@@ -1644,7 +1616,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
while (msg_data_left(msg)) {
int total_ts, frag_truesize = 0;
struct mptcp_data_frag *dfrag;
- struct sk_buff_head skbs;
bool dfrag_collapsed;
size_t psize, offset;
@@ -1677,16 +1648,10 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
psize = pfrag->size - offset;
psize = min_t(size_t, psize, msg_data_left(msg));
total_ts = psize + frag_truesize;
- __skb_queue_head_init(&skbs);
- if (!mptcp_tx_cache_refill(sk, psize, &skbs, &total_ts))
- goto wait_for_memory;
- if (!mptcp_wmem_alloc(sk, total_ts)) {
- __skb_queue_purge(&skbs);
+ if (!mptcp_wmem_alloc(sk, total_ts))
goto wait_for_memory;
- }
- skb_queue_splice_tail(&skbs, &msk->skb_tx_cache);
if (copy_page_from_iter(dfrag->page, offset, psize,
&msg->msg_iter) != psize) {
mptcp_wmem_uncharge(sk, psize + frag_truesize);
@@ -1743,7 +1708,7 @@ static void mptcp_wait_data(struct sock *sk, long *timeo)
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk_wait_event(sk, timeo,
- test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
+ test_bit(MPTCP_DATA_READY, &msk->flags), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
@@ -1751,7 +1716,9 @@ static void mptcp_wait_data(struct sock *sk, long *timeo)
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
struct msghdr *msg,
- size_t len, int flags)
+ size_t len, int flags,
+ struct scm_timestamping_internal *tss,
+ int *cmsg_flags)
{
struct sk_buff *skb, *tmp;
int copied = 0;
@@ -1771,6 +1738,11 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
}
}
+ if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
+ tcp_update_recv_tstamps(skb, tss);
+ *cmsg_flags |= MPTCP_CMSG_TS;
+ }
+
copied += count;
if (count < data_len) {
@@ -1934,7 +1906,9 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
__mptcp_update_rmem(sk);
done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
mptcp_data_unlock(sk);
- tcp_cleanup_rbuf(ssk, moved);
+
+ if (unlikely(ssk->sk_err))
+ __mptcp_error_report(sk);
unlock_sock_fast(ssk, slowpath);
} while (!done);
@@ -1947,7 +1921,6 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
ret |= __mptcp_ofo_queue(msk);
__mptcp_splice_receive_queue(sk);
mptcp_data_unlock(sk);
- mptcp_cleanup_rbuf(msk);
}
if (ret)
mptcp_check_data_fin((struct sock *)msk);
@@ -1958,7 +1931,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- int copied = 0;
+ struct scm_timestamping_internal tss;
+ int copied = 0, cmsg_flags = 0;
int target;
long timeo;
@@ -1980,7 +1954,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
while (copied < len) {
int bytes_read;
- bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags);
+ bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
if (unlikely(bytes_read < 0)) {
if (!copied)
copied = bytes_read;
@@ -2056,11 +2030,14 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
*/
if (unlikely(__mptcp_move_skbs(msk)))
set_bit(MPTCP_DATA_READY, &msk->flags);
- } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
- /* data to read but mptcp_wait_data() cleared DATA_READY */
- set_bit(MPTCP_DATA_READY, &msk->flags);
}
+
out_err:
+ if (cmsg_flags && copied >= 0) {
+ if (cmsg_flags & MPTCP_CMSG_TS)
+ tcp_recv_timestamp(msg, sk, &tss);
+ }
+
pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
msk, test_bit(MPTCP_DATA_READY, &msk->flags),
skb_queue_empty_lockless(&sk->sk_receive_queue), copied);
@@ -2192,9 +2169,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
if (ssk == msk->last_snd)
msk->last_snd = NULL;
- if (ssk == msk->ack_hint)
- msk->ack_hint = NULL;
-
if (ssk == msk->first)
msk->first = NULL;
@@ -2266,13 +2240,14 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
- lock_sock(tcp_sk);
+ slow = lock_sock_fast(tcp_sk);
if (tcp_sk->sk_state != TCP_CLOSE) {
tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
tcp_set_state(tcp_sk, TCP_CLOSE);
}
- release_sock(tcp_sk);
+ unlock_sock_fast(tcp_sk, slow);
}
inet_sk_state_store(sk, TCP_CLOSE);
@@ -2293,7 +2268,7 @@ static void __mptcp_retrans(struct sock *sk)
struct sock *ssk;
int ret;
- __mptcp_clean_una_wakeup(sk);
+ mptcp_clean_una_wakeup(sk);
dfrag = mptcp_rtx_head(sk);
if (!dfrag) {
if (mptcp_data_fin_enabled(msk)) {
@@ -2317,8 +2292,8 @@ static void __mptcp_retrans(struct sock *sk)
/* limit retransmission to the bytes already sent on some subflows */
info.sent = 0;
- info.limit = dfrag->already_sent;
- while (info.sent < dfrag->already_sent) {
+ info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
+ while (info.sent < info.limit) {
if (!mptcp_alloc_tx_skb(sk, ssk))
break;
@@ -2330,9 +2305,11 @@ static void __mptcp_retrans(struct sock *sk)
copied += ret;
info.sent += ret;
}
- if (copied)
+ if (copied) {
+ dfrag->already_sent = max(dfrag->already_sent, info.sent);
tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
info.size_goal);
+ }
mptcp_set_timeout(sk, ssk);
release_sock(ssk);
@@ -2400,17 +2377,15 @@ static int __mptcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&msk->rtx_queue);
INIT_WORK(&msk->work, mptcp_worker);
__skb_queue_head_init(&msk->receive_queue);
- __skb_queue_head_init(&msk->skb_tx_cache);
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
msk->wmem_reserved = 0;
msk->rmem_released = 0;
msk->tx_pending_data = 0;
- msk->size_goal_cache = TCP_BASE_MSS;
- msk->ack_hint = NULL;
msk->first = NULL;
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
+ WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
mptcp_pm_data_init(msk);
@@ -2418,13 +2393,12 @@ static int __mptcp_init_sock(struct sock *sk)
timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
- tcp_assign_congestion_control(sk);
-
return 0;
}
static int mptcp_init_sock(struct sock *sk)
{
+ struct inet_connection_sock *icsk = inet_csk(sk);
struct net *net = sock_net(sk);
int ret;
@@ -2442,6 +2416,16 @@ static int mptcp_init_sock(struct sock *sk)
if (ret)
return ret;
+ /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
+ * propagate the correct value
+ */
+ tcp_assign_congestion_control(sk);
+ strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
+
+ /* no need to keep a reference to the ops, the name will suffice */
+ tcp_cleanup_congestion_control(sk);
+ icsk->icsk_ca_ops = NULL;
+
sk_sockets_allocated_inc(sk);
sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
@@ -2453,15 +2437,10 @@ static void __mptcp_clear_xmit(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_data_frag *dtmp, *dfrag;
- struct sk_buff *skb;
WRITE_ONCE(msk->first_pending, NULL);
list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
dfrag_clear(sk, dfrag);
- while ((skb = __skb_dequeue(&msk->skb_tx_cache)) != NULL) {
- sk->sk_forward_alloc += skb->truesize;
- kfree_skb(skb);
- }
}
static void mptcp_cancel_work(struct sock *sk)
@@ -2616,7 +2595,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
- tcp_cleanup_congestion_control(sk);
sk_refcnt_debug_release(sk);
mptcp_dispose_initial_subflow(msk);
sock_put(sk);
@@ -2743,6 +2721,8 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
msk->token = subflow_req->token;
msk->subflow = NULL;
WRITE_ONCE(msk->fully_established, false);
+ if (mp_opt->csum_reqd)
+ WRITE_ONCE(msk->csum_enabled, true);
msk->write_seq = subflow_req->idsn + 1;
msk->snd_nxt = msk->write_seq;
@@ -2916,6 +2896,11 @@ static void mptcp_release_cb(struct sock *sk)
spin_lock_bh(&sk->sk_lock.slock);
}
+ /* be sure to set the current sk state before tacking actions
+ * depending on sk_state
+ */
+ if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
+ __mptcp_set_connected(sk);
if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
__mptcp_clean_una_wakeup(sk);
if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index edc0128730df..426ed80fe72f 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -68,6 +68,8 @@
#define TCPOLEN_MPTCP_FASTCLOSE 12
#define TCPOLEN_MPTCP_RST 4
+#define TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM (TCPOLEN_MPTCP_DSS_CHECKSUM + TCPOLEN_MPTCP_MPC_ACK_DATA)
+
/* MPTCP MP_JOIN flags */
#define MPTCPOPT_BACKUP BIT(0)
#define MPTCPOPT_HMAC_LEN 20
@@ -77,8 +79,9 @@
#define MPTCP_VERSION_MASK (0x0F)
#define MPTCP_CAP_CHECKSUM_REQD BIT(7)
#define MPTCP_CAP_EXTENSIBILITY BIT(6)
+#define MPTCP_CAP_DENY_JOIN_ID0 BIT(5)
#define MPTCP_CAP_HMAC_SHA256 BIT(0)
-#define MPTCP_CAP_FLAG_MASK (0x3F)
+#define MPTCP_CAP_FLAG_MASK (0x1F)
/* MPTCP DSS flags */
#define MPTCP_DSS_DATA_FIN BIT(4)
@@ -109,6 +112,7 @@
#define MPTCP_ERROR_REPORT 8
#define MPTCP_RETRANSMIT 9
#define MPTCP_WORK_SYNC_SETSOCKOPT 10
+#define MPTCP_CONNECTED 11
static inline bool before64(__u64 seq1, __u64 seq2)
{
@@ -124,6 +128,7 @@ struct mptcp_options_received {
u64 data_seq;
u32 subflow_seq;
u16 data_len;
+ __sum16 csum;
u16 mp_capable : 1,
mp_join : 1,
fastclose : 1,
@@ -133,7 +138,9 @@ struct mptcp_options_received {
rm_addr : 1,
mp_prio : 1,
echo : 1,
- backup : 1;
+ csum_reqd : 1,
+ backup : 1,
+ deny_join_id0 : 1;
u32 token;
u32 nonce;
u64 thmac;
@@ -188,6 +195,7 @@ struct mptcp_pm_data {
bool work_pending;
bool accept_addr;
bool accept_subflow;
+ bool remote_deny_join_id0;
u8 add_addr_signaled;
u8 add_addr_accepted;
u8 local_addr_used;
@@ -234,15 +242,13 @@ struct mptcp_sock {
bool snd_data_fin_enable;
bool rcv_fastclose;
bool use_64bit_ack; /* Set when we received a 64-bit DSN */
+ bool csum_enabled;
spinlock_t join_list_lock;
- struct sock *ack_hint;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
struct sk_buff_head receive_queue;
- struct sk_buff_head skb_tx_cache; /* this is wmem accounted */
int tx_pending_data;
- int size_goal_cache;
struct list_head conn_list;
struct list_head rtx_queue;
struct mptcp_data_frag *first_pending;
@@ -258,6 +264,7 @@ struct mptcp_sock {
} rcvq_space;
u32 setsockopt_seq;
+ char ca_name[TCP_CA_NAME_MAX];
};
#define mptcp_lock_sock(___sk, cb) do { \
@@ -334,11 +341,20 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
}
+struct csum_pseudo_header {
+ __be64 data_seq;
+ __be32 subflow_seq;
+ __be16 data_len;
+ __sum16 csum;
+};
+
struct mptcp_subflow_request_sock {
struct tcp_request_sock sk;
u16 mp_capable : 1,
mp_join : 1,
- backup : 1;
+ backup : 1,
+ csum_reqd : 1,
+ allow_join_id0 : 1;
u8 local_id;
u8 remote_id;
u64 local_key;
@@ -361,7 +377,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
enum mptcp_data_avail {
MPTCP_SUBFLOW_NODATA,
MPTCP_SUBFLOW_DATA_AVAIL,
- MPTCP_SUBFLOW_OOO_DATA
};
struct mptcp_delegated_action {
@@ -386,6 +401,8 @@ struct mptcp_subflow_context {
u32 map_subflow_seq;
u32 ssn_offset;
u32 map_data_len;
+ __wsum map_data_csum;
+ u32 map_csum_len;
u32 request_mptcp : 1, /* send MP_CAPABLE */
request_join : 1, /* send MP_JOIN */
request_bkup : 1,
@@ -395,6 +412,8 @@ struct mptcp_subflow_context {
pm_notified : 1, /* PM hook called for established status */
conn_finished : 1,
map_valid : 1,
+ map_csum_reqd : 1,
+ map_data_fin : 1,
mpc_map : 1,
backup : 1,
send_mp_prio : 1,
@@ -524,6 +543,8 @@ static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *su
int mptcp_is_enabled(struct net *net);
unsigned int mptcp_get_add_addr_timeout(struct net *net);
+int mptcp_is_checksum_enabled(struct net *net);
+int mptcp_allow_join_id0(struct net *net);
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool mptcp_subflow_data_available(struct sock *sk);
@@ -575,10 +596,12 @@ int __init mptcp_proto_v6_init(void);
struct sock *mptcp_sk_clone(const struct sock *sk,
const struct mptcp_options_received *mp_opt,
struct request_sock *req);
-void mptcp_get_options(const struct sk_buff *skb,
+void mptcp_get_options(const struct sock *sk,
+ const struct sk_buff *skb,
struct mptcp_options_received *mp_opt);
void mptcp_finish_connect(struct sock *sk);
+void __mptcp_set_connected(struct sock *sk);
static inline bool mptcp_is_fully_established(struct sock *sk)
{
return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
@@ -593,6 +616,14 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
int mptcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *option);
+u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq);
+static inline u64 mptcp_expand_seq(u64 old_seq, u64 cur_seq, bool use_64bit)
+{
+ if (use_64bit)
+ return cur_seq;
+
+ return __mptcp_expand_seq(old_seq, cur_seq);
+}
void __mptcp_check_push(struct sock *sk, struct sock *ssk);
void __mptcp_data_acked(struct sock *sk);
void __mptcp_error_report(struct sock *sk);
@@ -626,6 +657,8 @@ static inline void mptcp_write_space(struct sock *sk)
void mptcp_destroy_common(struct mptcp_sock *msk);
+#define MPTCP_TOKEN_MAX_RETRIES 4
+
void __init mptcp_token_init(void);
static inline void mptcp_token_init_request(struct request_sock *req)
{
@@ -671,7 +704,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
struct mptcp_pm_add_entry *
mptcp_pm_del_add_timer(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr);
+ struct mptcp_addr_info *addr, bool check_id);
struct mptcp_pm_add_entry *
mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
struct mptcp_addr_info *addr);
@@ -752,9 +785,6 @@ unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk);
unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
-int mptcp_setsockopt(struct sock *sk, int level, int optname,
- sockptr_t optval, unsigned int optlen);
-
void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
void mptcp_sockopt_sync_all(struct mptcp_sock *msk);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 00d941b66c1e..092d1f635d27 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -140,6 +140,43 @@ static void mptcp_so_incoming_cpu(struct mptcp_sock *msk, int val)
mptcp_sol_socket_sync_intval(msk, SO_INCOMING_CPU, val);
}
+static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optname, int val)
+{
+ sockptr_t optval = KERNEL_SOCKPTR(&val);
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ int ret;
+
+ ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname,
+ optval, sizeof(val));
+ if (ret)
+ return ret;
+
+ lock_sock(sk);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow = lock_sock_fast(ssk);
+
+ switch (optname) {
+ case SO_TIMESTAMP_OLD:
+ case SO_TIMESTAMP_NEW:
+ case SO_TIMESTAMPNS_OLD:
+ case SO_TIMESTAMPNS_NEW:
+ sock_set_timestamp(sk, optname, !!val);
+ break;
+ case SO_TIMESTAMPING_NEW:
+ case SO_TIMESTAMPING_OLD:
+ sock_set_timestamping(sk, optname, val);
+ break;
+ }
+
+ unlock_sock_fast(ssk, slow);
+ }
+
+ release_sock(sk);
+ return 0;
+}
+
static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -164,6 +201,13 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
case SO_INCOMING_CPU:
mptcp_so_incoming_cpu(msk, val);
return 0;
+ case SO_TIMESTAMP_OLD:
+ case SO_TIMESTAMP_NEW:
+ case SO_TIMESTAMPNS_OLD:
+ case SO_TIMESTAMPNS_NEW:
+ case SO_TIMESTAMPING_OLD:
+ case SO_TIMESTAMPING_NEW:
+ return mptcp_setsockopt_sol_socket_tstamp(msk, optname, val);
}
return -ENOPROTOOPT;
@@ -251,9 +295,23 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
case SO_MARK:
case SO_INCOMING_CPU:
case SO_DEBUG:
+ case SO_TIMESTAMP_OLD:
+ case SO_TIMESTAMP_NEW:
+ case SO_TIMESTAMPNS_OLD:
+ case SO_TIMESTAMPNS_NEW:
+ case SO_TIMESTAMPING_OLD:
+ case SO_TIMESTAMPING_NEW:
return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen);
case SO_LINGER:
return mptcp_setsockopt_sol_socket_linger(msk, optval, optlen);
+ case SO_RCVLOWAT:
+ case SO_RCVTIMEO_OLD:
+ case SO_RCVTIMEO_NEW:
+ case SO_BUSY_POLL:
+ case SO_PREFER_BUSY_POLL:
+ case SO_BUSY_POLL_BUDGET:
+ /* No need to copy: only relevant for msk */
+ return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
case SO_NO_CHECK:
case SO_DONTROUTE:
case SO_BROADCAST:
@@ -267,7 +325,24 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
return 0;
}
- return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
+ /* SO_OOBINLINE is not supported, let's avoid the related mess
+ * SO_ATTACH_FILTER, SO_ATTACH_BPF, SO_ATTACH_REUSEPORT_CBPF,
+ * SO_DETACH_REUSEPORT_BPF, SO_DETACH_FILTER, SO_LOCK_FILTER,
+ * we must be careful with subflows
+ *
+ * SO_ATTACH_REUSEPORT_EBPF is not supported, at it checks
+ * explicitly the sk_protocol field
+ *
+ * SO_PEEK_OFF is unsupported, as it is for plain TCP
+ * SO_MAX_PACING_RATE is unsupported, we must be careful with subflows
+ * SO_CNX_ADVICE is currently unsupported, could possibly be relevant,
+ * but likely needs careful design
+ *
+ * SO_ZEROCOPY is currently unsupported, TODO in sndmsg
+ * SO_TXTIME is currently unsupported
+ */
+
+ return -EOPNOTSUPP;
}
static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
@@ -299,72 +374,6 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
static bool mptcp_supported_sockopt(int level, int optname)
{
- if (level == SOL_SOCKET) {
- switch (optname) {
- case SO_DEBUG:
- case SO_REUSEPORT:
- case SO_REUSEADDR:
-
- /* the following ones need a better implementation,
- * but are quite common we want to preserve them
- */
- case SO_BINDTODEVICE:
- case SO_SNDBUF:
- case SO_SNDBUFFORCE:
- case SO_RCVBUF:
- case SO_RCVBUFFORCE:
- case SO_KEEPALIVE:
- case SO_PRIORITY:
- case SO_LINGER:
- case SO_TIMESTAMP_OLD:
- case SO_TIMESTAMP_NEW:
- case SO_TIMESTAMPNS_OLD:
- case SO_TIMESTAMPNS_NEW:
- case SO_TIMESTAMPING_OLD:
- case SO_TIMESTAMPING_NEW:
- case SO_RCVLOWAT:
- case SO_RCVTIMEO_OLD:
- case SO_RCVTIMEO_NEW:
- case SO_SNDTIMEO_OLD:
- case SO_SNDTIMEO_NEW:
- case SO_MARK:
- case SO_INCOMING_CPU:
- case SO_BINDTOIFINDEX:
- case SO_BUSY_POLL:
- case SO_PREFER_BUSY_POLL:
- case SO_BUSY_POLL_BUDGET:
-
- /* next ones are no-op for plain TCP */
- case SO_NO_CHECK:
- case SO_DONTROUTE:
- case SO_BROADCAST:
- case SO_BSDCOMPAT:
- case SO_PASSCRED:
- case SO_PASSSEC:
- case SO_RXQ_OVFL:
- case SO_WIFI_STATUS:
- case SO_NOFCS:
- case SO_SELECT_ERR_QUEUE:
- return true;
- }
-
- /* SO_OOBINLINE is not supported, let's avoid the related mess */
- /* SO_ATTACH_FILTER, SO_ATTACH_BPF, SO_ATTACH_REUSEPORT_CBPF,
- * SO_DETACH_REUSEPORT_BPF, SO_DETACH_FILTER, SO_LOCK_FILTER,
- * we must be careful with subflows
- */
- /* SO_ATTACH_REUSEPORT_EBPF is not supported, at it checks
- * explicitly the sk_protocol field
- */
- /* SO_PEEK_OFF is unsupported, as it is for plain TCP */
- /* SO_MAX_PACING_RATE is unsupported, we must be careful with subflows */
- /* SO_CNX_ADVICE is currently unsupported, could possibly be relevant,
- * but likely needs careful design
- */
- /* SO_ZEROCOPY is currently unsupported, TODO in sndmsg */
- /* SO_TXTIME is currently unsupported */
- return false;
- }
if (level == SOL_IP) {
switch (optname) {
/* should work fine */
@@ -547,7 +556,7 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
}
if (ret == 0)
- tcp_set_congestion_control(sk, name, false, cap_net_admin);
+ strcpy(msk->ca_name, name);
release_sock(sk);
return ret;
@@ -574,12 +583,12 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
pr_debug("msk=%p", msk);
- if (!mptcp_supported_sockopt(level, optname))
- return -ENOPROTOOPT;
-
if (level == SOL_SOCKET)
return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
+ if (!mptcp_supported_sockopt(level, optname))
+ return -ENOPROTOOPT;
+
/* @@ the meaning of setsockopt() when the socket is connected and
* there are multiple subflows is not yet defined. It is up to the
* MPTCP-level socket to configure the subflows until the subflow
@@ -705,7 +714,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG));
if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
- tcp_set_congestion_control(ssk, inet_csk(sk)->icsk_ca_ops->name, false, true);
+ tcp_set_congestion_control(ssk, msk->ca_name, false, true);
}
static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index a5ede357cfbc..66d0b1893d26 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -108,6 +108,8 @@ static void subflow_init_req(struct request_sock *req, const struct sock *sk_lis
subflow_req->mp_capable = 0;
subflow_req->mp_join = 0;
+ subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
+ subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
subflow_req->msk = NULL;
mptcp_token_init_request(req);
}
@@ -150,7 +152,7 @@ static int subflow_check_req(struct request_sock *req,
return -EINVAL;
#endif
- mptcp_get_options(skb, &mp_opt);
+ mptcp_get_options(sk_listener, skb, &mp_opt);
if (mp_opt.mp_capable) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
@@ -162,7 +164,7 @@ static int subflow_check_req(struct request_sock *req,
}
if (mp_opt.mp_capable && listener->request_mptcp) {
- int err, retries = 4;
+ int err, retries = MPTCP_TOKEN_MAX_RETRIES;
subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
again:
@@ -247,7 +249,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
int err;
subflow_init_req(req, sk_listener);
- mptcp_get_options(skb, &mp_opt);
+ mptcp_get_options(sk_listener, skb, &mp_opt);
if (mp_opt.mp_capable && mp_opt.mp_join)
return -EINVAL;
@@ -371,6 +373,24 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
}
+void __mptcp_set_connected(struct sock *sk)
+{
+ if (sk->sk_state == TCP_SYN_SENT) {
+ inet_sk_state_store(sk, TCP_ESTABLISHED);
+ sk->sk_state_change(sk);
+ }
+}
+
+static void mptcp_set_connected(struct sock *sk)
+{
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_set_connected(sk);
+ else
+ set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags);
+ mptcp_data_unlock(sk);
+}
+
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -379,10 +399,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
- if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
- inet_sk_state_store(parent, TCP_ESTABLISHED);
- parent->sk_state_change(parent);
- }
/* be sure no special action on any packet other than syn-ack */
if (subflow->conn_finished)
@@ -394,7 +410,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
- mptcp_get_options(skb, &mp_opt);
+ mptcp_get_options(sk, skb, &mp_opt);
if (subflow->request_mptcp) {
if (!mp_opt.mp_capable) {
MPTCP_INC_STATS(sock_net(sk),
@@ -404,6 +420,10 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
goto fallback;
}
+ if (mp_opt.csum_reqd)
+ WRITE_ONCE(mptcp_sk(parent)->csum_enabled, true);
+ if (mp_opt.deny_join_id0)
+ WRITE_ONCE(mptcp_sk(parent)->pm.remote_deny_join_id0, true);
subflow->mp_capable = 1;
subflow->can_ack = 1;
subflow->remote_key = mp_opt.sndr_key;
@@ -411,6 +431,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->remote_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
mptcp_finish_connect(sk);
+ mptcp_set_connected(parent);
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];
@@ -430,15 +451,15 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
goto do_reset;
}
+ if (!mptcp_finish_join(sk))
+ goto do_reset;
+
subflow_generate_hmac(subflow->local_key, subflow->remote_key,
subflow->local_nonce,
subflow->remote_nonce,
hmac);
memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
- if (!mptcp_finish_join(sk))
- goto do_reset;
-
subflow->mp_join = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
@@ -451,6 +472,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
} else if (mptcp_check_fallback(sk)) {
fallback:
mptcp_rcv_space_init(mptcp_sk(parent), sk);
+ mptcp_set_connected(parent);
}
return;
@@ -558,6 +580,7 @@ static void mptcp_sock_destruct(struct sock *sk)
static void mptcp_force_close(struct sock *sk)
{
+ /* the msk is not yet exposed to user-space */
inet_sk_state_store(sk, TCP_CLOSE);
sk_common_release(sk);
}
@@ -630,26 +653,25 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
/* if the sk is MP_CAPABLE, we try to fetch the client key */
if (subflow_req->mp_capable) {
- if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
- /* here we can receive and accept an in-window,
- * out-of-order pkt, which will not carry the MP_CAPABLE
- * opt even on mptcp enabled paths
- */
- goto create_msk;
- }
-
- mptcp_get_options(skb, &mp_opt);
+ /* we can receive and accept an in-window, out-of-order pkt,
+ * which may not carry the MP_CAPABLE opt even on mptcp enabled
+ * paths: always try to extract the peer key, and fallback
+ * for packets missing it.
+ * Even OoO DSS packets coming legitly after dropped or
+ * reordered MPC will cause fallback, but we don't have other
+ * options.
+ */
+ mptcp_get_options(sk, skb, &mp_opt);
if (!mp_opt.mp_capable) {
fallback = true;
goto create_child;
}
-create_msk:
new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
if (!new_msk)
fallback = true;
} else if (subflow_req->mp_join) {
- mptcp_get_options(skb, &mp_opt);
+ mptcp_get_options(sk, skb, &mp_opt);
if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
!mptcp_can_accept_new_subflow(subflow_req->msk)) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
@@ -776,19 +798,10 @@ enum mapping_status {
MAPPING_DUMMY
};
-static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
-{
- if ((u32)seq == (u32)old_seq)
- return old_seq;
-
- /* Assume map covers data not mapped yet. */
- return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
-}
-
-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{
- WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
- ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+ ssn, subflow->map_subflow_seq, subflow->map_data_len);
}
static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
@@ -813,22 +826,104 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
/* Mapping covers data later in the subflow stream,
* currently unsupported.
*/
- warn_bad_map(subflow, ssn);
+ dbg_bad_map(subflow, ssn);
return false;
}
if (unlikely(!before(ssn, subflow->map_subflow_seq +
subflow->map_data_len))) {
/* Mapping does covers past subflow data, invalid */
- warn_bad_map(subflow, ssn + skb->len);
+ dbg_bad_map(subflow, ssn);
return false;
}
return true;
}
+static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
+ bool csum_reqd)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct csum_pseudo_header header;
+ u32 offset, seq, delta;
+ __wsum csum;
+ int len;
+
+ if (!csum_reqd)
+ return MAPPING_OK;
+
+ /* mapping already validated on previous traversal */
+ if (subflow->map_csum_len == subflow->map_data_len)
+ return MAPPING_OK;
+
+ /* traverse the receive queue, ensuring it contains a full
+ * DSS mapping and accumulating the related csum.
+ * Preserve the accoumlate csum across multiple calls, to compute
+ * the csum only once
+ */
+ delta = subflow->map_data_len - subflow->map_csum_len;
+ for (;;) {
+ seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
+ offset = seq - TCP_SKB_CB(skb)->seq;
+
+ /* if the current skb has not been accounted yet, csum its contents
+ * up to the amount covered by the current DSS
+ */
+ if (offset < skb->len) {
+ __wsum csum;
+
+ len = min(skb->len - offset, delta);
+ csum = skb_checksum(skb, offset, len, 0);
+ subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
+ subflow->map_csum_len);
+
+ delta -= len;
+ subflow->map_csum_len += len;
+ }
+ if (delta == 0)
+ break;
+
+ if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
+ /* if this subflow is closed, the partial mapping
+ * will be never completed; flush the pending skbs, so
+ * that subflow_sched_work_if_closed() can kick in
+ */
+ if (unlikely(ssk->sk_state == TCP_CLOSE))
+ while ((skb = skb_peek(&ssk->sk_receive_queue)))
+ sk_eat_skb(ssk, skb);
+
+ /* not enough data to validate the csum */
+ return MAPPING_EMPTY;
+ }
+
+ /* the DSS mapping for next skbs will be validated later,
+ * when a get_mapping_status call will process such skb
+ */
+ skb = skb->next;
+ }
+
+ /* note that 'map_data_len' accounts only for the carried data, does
+ * not include the eventual seq increment due to the data fin,
+ * while the pseudo header requires the original DSS data len,
+ * including that
+ */
+ header.data_seq = cpu_to_be64(subflow->map_seq);
+ header.subflow_seq = htonl(subflow->map_subflow_seq);
+ header.data_len = htons(subflow->map_data_len + subflow->map_data_fin);
+ header.csum = 0;
+
+ csum = csum_partial(&header, sizeof(header), subflow->map_data_csum);
+ if (unlikely(csum_fold(csum))) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
+ return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
+ }
+
+ return MAPPING_OK;
+}
+
static enum mapping_status get_mapping_status(struct sock *ssk,
struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ bool csum_reqd = READ_ONCE(msk->csum_enabled);
struct mptcp_ext *mpext;
struct sk_buff *skb;
u16 data_len;
@@ -867,7 +962,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_len = mpext->data_len;
if (data_len == 0) {
- pr_err("Infinite mapping not handled");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
return MAPPING_INVALID;
}
@@ -909,22 +1003,17 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_len--;
}
- if (!mpext->dsn64) {
- map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
- mpext->data_seq);
- pr_debug("expanded seq=%llu", subflow->map_seq);
- } else {
- map_seq = mpext->data_seq;
- }
+ map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
if (subflow->map_valid) {
/* Allow replacing only with an identical map */
if (subflow->map_seq == map_seq &&
subflow->map_subflow_seq == mpext->subflow_seq &&
- subflow->map_data_len == data_len) {
+ subflow->map_data_len == data_len &&
+ subflow->map_csum_reqd == mpext->csum_reqd) {
skb_ext_del(skb, SKB_EXT_MPTCP);
- return MAPPING_OK;
+ goto validate_csum;
}
/* If this skb data are fully covered by the current mapping,
@@ -936,27 +1025,41 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
}
/* will validate the next map after consuming the current one */
- return MAPPING_OK;
+ goto validate_csum;
}
subflow->map_seq = map_seq;
subflow->map_subflow_seq = mpext->subflow_seq;
subflow->map_data_len = data_len;
subflow->map_valid = 1;
+ subflow->map_data_fin = mpext->data_fin;
subflow->mpc_map = mpext->mpc_map;
- pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
+ subflow->map_csum_reqd = mpext->csum_reqd;
+ subflow->map_csum_len = 0;
+ subflow->map_data_csum = csum_unfold(mpext->csum);
+
+ /* Cfr RFC 8684 Section 3.3.0 */
+ if (unlikely(subflow->map_csum_reqd != csum_reqd))
+ return MAPPING_INVALID;
+
+ pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
subflow->map_seq, subflow->map_subflow_seq,
- subflow->map_data_len);
+ subflow->map_data_len, subflow->map_csum_reqd,
+ subflow->map_data_csum);
validate_seq:
/* we revalidate valid mapping on new skb, because we must ensure
* the current skb is completely covered by the available mapping
*/
- if (!validate_mapping(ssk, skb))
+ if (!validate_mapping(ssk, skb)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
return MAPPING_INVALID;
+ }
skb_ext_del(skb, SKB_EXT_MPTCP);
- return MAPPING_OK;
+
+validate_csum:
+ return validate_data_csum(ssk, skb, csum_reqd);
}
static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
@@ -1002,7 +1105,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
struct sk_buff *skb;
if (!skb_peek(&ssk->sk_receive_queue))
- subflow->data_avail = 0;
+ WRITE_ONCE(subflow->data_avail, 0);
if (subflow->data_avail)
return true;
@@ -1013,21 +1116,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
status = get_mapping_status(ssk, msk);
trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
- if (status == MAPPING_INVALID) {
- ssk->sk_err = EBADMSG;
- goto fatal;
- }
- if (status == MAPPING_DUMMY) {
- __mptcp_do_fallback(msk);
- skb = skb_peek(&ssk->sk_receive_queue);
- subflow->map_valid = 1;
- subflow->map_seq = READ_ONCE(msk->ack_seq);
- subflow->map_data_len = skb->len;
- subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
- subflow->ssn_offset;
- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
- return true;
- }
+ if (unlikely(status == MAPPING_INVALID))
+ goto fallback;
+
+ if (unlikely(status == MAPPING_DUMMY))
+ goto fallback;
if (status != MAPPING_OK)
goto no_data;
@@ -1040,10 +1133,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
* MP_CAPABLE-based mapping
*/
if (unlikely(!READ_ONCE(msk->can_ack))) {
- if (!subflow->mpc_map) {
- ssk->sk_err = EBADMSG;
- goto fatal;
- }
+ if (!subflow->mpc_map)
+ goto fallback;
WRITE_ONCE(msk->remote_key, subflow->remote_key);
WRITE_ONCE(msk->ack_seq, subflow->map_seq);
WRITE_ONCE(msk->can_ack, true);
@@ -1053,35 +1144,43 @@ static bool subflow_check_data_avail(struct sock *ssk)
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
ack_seq);
- if (ack_seq == old_ack) {
- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
- break;
- } else if (after64(ack_seq, old_ack)) {
- subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
- break;
+ if (unlikely(before64(ack_seq, old_ack))) {
+ mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+ continue;
}
- /* only accept in-sequence mapping. Old values are spurious
- * retransmission
- */
- mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ break;
}
return true;
no_data:
subflow_sched_work_if_closed(msk, ssk);
return false;
-fatal:
- /* fatal protocol error, close the socket */
- /* This barrier is coupled with smp_rmb() in tcp_poll() */
- smp_wmb();
- ssk->sk_error_report(ssk);
- tcp_set_state(ssk, TCP_CLOSE);
- subflow->reset_transient = 0;
- subflow->reset_reason = MPTCP_RST_EMPTCP;
- tcp_send_active_reset(ssk, GFP_ATOMIC);
- subflow->data_avail = 0;
- return false;
+
+fallback:
+ /* RFC 8684 section 3.7. */
+ if (subflow->mp_join || subflow->fully_established) {
+ /* fatal protocol error, close the socket.
+ * subflow_error_report() will introduce the appropriate barriers
+ */
+ ssk->sk_err = EBADMSG;
+ tcp_set_state(ssk, TCP_CLOSE);
+ subflow->reset_transient = 0;
+ subflow->reset_reason = MPTCP_RST_EMPTCP;
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ WRITE_ONCE(subflow->data_avail, 0);
+ return false;
+ }
+
+ __mptcp_do_fallback(msk);
+ skb = skb_peek(&ssk->sk_receive_queue);
+ subflow->map_valid = 1;
+ subflow->map_seq = READ_ONCE(msk->ack_seq);
+ subflow->map_data_len = skb->len;
+ subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ return true;
}
bool mptcp_subflow_data_available(struct sock *sk)
@@ -1092,7 +1191,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
if (subflow->map_valid &&
mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
subflow->map_valid = 0;
- subflow->data_avail = 0;
+ WRITE_ONCE(subflow->data_avail, 0);
pr_debug("Done with mapping: seq=%u data_len=%u",
subflow->map_subflow_seq,
@@ -1120,41 +1219,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
*full_space = tcp_full_space(sk);
}
-static void subflow_data_ready(struct sock *sk)
-{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- u16 state = 1 << inet_sk_state_load(sk);
- struct sock *parent = subflow->conn;
- struct mptcp_sock *msk;
-
- msk = mptcp_sk(parent);
- if (state & TCPF_LISTEN) {
- /* MPJ subflow are removed from accept queue before reaching here,
- * avoid stray wakeups
- */
- if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
- return;
-
- set_bit(MPTCP_DATA_READY, &msk->flags);
- parent->sk_data_ready(parent);
- return;
- }
-
- WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
- !subflow->mp_join && !(state & TCPF_CLOSE));
-
- if (mptcp_subflow_data_available(sk))
- mptcp_data_ready(parent, sk);
-}
-
-static void subflow_write_space(struct sock *ssk)
-{
- struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
-
- mptcp_propagate_sndbuf(sk, ssk);
- mptcp_write_space(sk);
-}
-
void __mptcp_error_report(struct sock *sk)
{
struct mptcp_subflow_context *subflow;
@@ -1178,7 +1242,7 @@ void __mptcp_error_report(struct sock *sk)
/* This barrier is coupled with smp_rmb() in mptcp_poll() */
smp_wmb();
- sk->sk_error_report(sk);
+ sk_error_report(sk);
break;
}
}
@@ -1195,6 +1259,43 @@ static void subflow_error_report(struct sock *ssk)
mptcp_data_unlock(sk);
}
+static void subflow_data_ready(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ u16 state = 1 << inet_sk_state_load(sk);
+ struct sock *parent = subflow->conn;
+ struct mptcp_sock *msk;
+
+ msk = mptcp_sk(parent);
+ if (state & TCPF_LISTEN) {
+ /* MPJ subflow are removed from accept queue before reaching here,
+ * avoid stray wakeups
+ */
+ if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
+ return;
+
+ set_bit(MPTCP_DATA_READY, &msk->flags);
+ parent->sk_data_ready(parent);
+ return;
+ }
+
+ WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+ !subflow->mp_join && !(state & TCPF_CLOSE));
+
+ if (mptcp_subflow_data_available(sk))
+ mptcp_data_ready(parent, sk);
+ else if (unlikely(sk->sk_err))
+ subflow_error_report(sk);
+}
+
+static void subflow_write_space(struct sock *ssk)
+{
+ struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+
+ mptcp_propagate_sndbuf(sk, ssk);
+ mptcp_write_space(sk);
+}
+
static struct inet_connection_sock_af_ops *
subflow_default_af_ops(struct sock *sk)
{
@@ -1493,10 +1594,7 @@ static void subflow_state_change(struct sock *sk)
mptcp_rcv_space_init(mptcp_sk(parent), sk);
pr_fallback(mptcp_sk(parent));
subflow->conn_finished = 1;
- if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
- inet_sk_state_store(parent, TCP_ESTABLISHED);
- parent->sk_state_change(parent);
- }
+ mptcp_set_connected(parent);
}
/* as recvmsg() does not acquire the subflow socket for ssk selection
@@ -1505,6 +1603,8 @@ static void subflow_state_change(struct sock *sk)
*/
if (mptcp_subflow_data_available(sk))
mptcp_data_ready(parent, sk);
+ else if (unlikely(sk->sk_err))
+ subflow_error_report(sk);
subflow_sched_work_if_closed(mptcp_sk(parent), sk);
diff --git a/net/mptcp/token.c b/net/mptcp/token.c
index 8f0270a780ce..a98e554b034f 100644
--- a/net/mptcp/token.c
+++ b/net/mptcp/token.c
@@ -33,7 +33,6 @@
#include <net/mptcp.h>
#include "protocol.h"
-#define TOKEN_MAX_RETRIES 4
#define TOKEN_MAX_CHAIN_LEN 4
struct token_bucket {
@@ -153,12 +152,9 @@ int mptcp_token_new_connect(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
- int retries = TOKEN_MAX_RETRIES;
+ int retries = MPTCP_TOKEN_MAX_RETRIES;
struct token_bucket *bucket;
- pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
- sk, subflow->local_key, subflow->token, subflow->idsn);
-
again:
mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token,
&subflow->idsn);
@@ -172,6 +168,9 @@ again:
goto again;
}
+ pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
+ sk, subflow->local_key, subflow->token, subflow->idsn);
+
WRITE_ONCE(msk->token, subflow->token);
__sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
bucket->chain_len++;
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 49031f804276..cbbb0de4750a 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -238,7 +238,7 @@ struct ncsi_package {
struct ncsi_dev_priv *ndp; /* NCSI device */
spinlock_t lock; /* Protect the package */
unsigned int channel_num; /* Number of channels */
- struct list_head channels; /* List of chanels */
+ struct list_head channels; /* List of channels */
struct list_head node; /* Form list of packages */
bool multi_channel; /* Enable multiple channels */
@@ -339,7 +339,7 @@ struct ncsi_cmd_arg {
unsigned char type; /* Command in the NCSI packet */
unsigned char id; /* Request ID (sequence number) */
unsigned char package; /* Destination package ID */
- unsigned char channel; /* Detination channel ID or 0x1f */
+ unsigned char channel; /* Destination channel ID or 0x1f */
unsigned short payload; /* Command packet payload length */
unsigned int req_flags; /* NCSI request properties */
union {
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index ffff8da707b8..ca04b6df1341 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -627,7 +627,7 @@ static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
return 0;
}
-/* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
+/* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
* packet.
*/
static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 56a2531a3402..54395266339d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -19,6 +19,16 @@ config NETFILTER_FAMILY_BRIDGE
config NETFILTER_FAMILY_ARP
bool
+config NETFILTER_NETLINK_HOOK
+ tristate "Netfilter base hook dump support"
+ depends on NETFILTER_ADVANCED
+ depends on NF_TABLES
+ select NETFILTER_NETLINK
+ help
+ If this option is enabled, the kernel will include support
+ to list the base netfilter hooks via NFNETLINK.
+ This is helpful for debugging.
+
config NETFILTER_NETLINK_ACCT
tristate "Netfilter NFACCT over NFNETLINK interface"
depends on NETFILTER_ADVANCED
@@ -816,7 +826,7 @@ config NETFILTER_XT_TARGET_CLASSIFY
the priority of a packet. Some qdiscs can use this value for
classification, among these are:
- atm, cbq, dsmark, pfifo_fast, htb, prio
+ atm, cbq, dsmark, pfifo_fast, htb, prio
To compile it as a module, choose M here. If unsure, say N.
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index e80e010354b1..049890e00a3d 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
obj-$(CONFIG_NETFILTER_NETLINK_OSF) += nfnetlink_osf.o
+obj-$(CONFIG_NETFILTER_NETLINK_HOOK) += nfnetlink_hook.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
@@ -73,7 +74,7 @@ obj-$(CONFIG_NF_DUP_NETDEV) += nf_dup_netdev.o
nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
- nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o \
+ nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o nft_last.o \
nft_chain_route.o nf_tables_offload.o \
nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
nft_set_pipapo.o
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index de2d20c37cda..16ae92054baa 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1685,8 +1685,8 @@ static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
};
static int
-call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
- struct nlattr *tb[], enum ipset_adt adt,
+call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
+ struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt,
u32 flags, bool use_lineno)
{
int ret;
@@ -1738,8 +1738,7 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
*errline = lineno;
- netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
+ nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
/* Signal netlink not to send its ACK/errmsg. */
return -EINTR;
}
@@ -1783,7 +1782,7 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
attr[IPSET_ATTR_DATA],
set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
- ret = call_ad(ctnl, skb, set, tb, adt, flags,
+ ret = call_ad(net, ctnl, skb, set, tb, adt, flags,
use_lineno);
} else {
int nla_rem;
@@ -1794,7 +1793,7 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
- ret = call_ad(ctnl, skb, set, tb, adt,
+ ret = call_ad(net, ctnl, skb, set, tb, adt,
flags, use_lineno);
if (ret < 0)
return ret;
@@ -1859,7 +1858,6 @@ static int ip_set_header(struct sk_buff *skb, const struct nfnl_info *info,
const struct ip_set *set;
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
- int ret = 0;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME]))
@@ -1885,12 +1883,7 @@ static int ip_set_header(struct sk_buff *skb, const struct nfnl_info *info,
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret < 0)
- return ret;
-
- return 0;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
@@ -1945,12 +1938,7 @@ static int ip_set_type(struct sk_buff *skb, const struct nfnl_info *info,
nlmsg_end(skb2, nlh2);
pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret < 0)
- return ret;
-
- return 0;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
@@ -1971,7 +1959,6 @@ static int ip_set_protocol(struct sk_buff *skb, const struct nfnl_info *info,
{
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
- int ret = 0;
if (unlikely(!attr[IPSET_ATTR_PROTOCOL]))
return -IPSET_ERR_PROTOCOL;
@@ -1990,12 +1977,7 @@ static int ip_set_protocol(struct sk_buff *skb, const struct nfnl_info *info,
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret < 0)
- return ret;
-
- return 0;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
@@ -2014,7 +1996,6 @@ static int ip_set_byname(struct sk_buff *skb, const struct nfnl_info *info,
struct nlmsghdr *nlh2;
ip_set_id_t id = IPSET_INVALID_ID;
const struct ip_set *set;
- int ret = 0;
if (unlikely(protocol_failed(attr) ||
!attr[IPSET_ATTR_SETNAME]))
@@ -2038,12 +2019,7 @@ static int ip_set_byname(struct sk_buff *skb, const struct nfnl_info *info,
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret < 0)
- return ret;
-
- return 0;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
@@ -2065,7 +2041,6 @@ static int ip_set_byindex(struct sk_buff *skb, const struct nfnl_info *info,
struct nlmsghdr *nlh2;
ip_set_id_t id = IPSET_INVALID_ID;
const struct ip_set *set;
- int ret = 0;
if (unlikely(protocol_failed(attr) ||
!attr[IPSET_ATTR_INDEX]))
@@ -2091,12 +2066,7 @@ static int ip_set_byindex(struct sk_buff *skb, const struct nfnl_info *info,
goto nla_put_failure;
nlmsg_end(skb2, nlh2);
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret < 0)
- return ret;
-
- return 0;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index d61886874940..271da8447b29 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -318,7 +318,7 @@ config IP_VS_MH_TAB_INDEX
comment 'IPVS application helper'
config IP_VS_FTP
- tristate "FTP protocol helper"
+ tristate "FTP protocol helper"
depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT && \
NF_CONNTRACK_FTP
select IP_VS_NFCT
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index d45dbcba8b49..c25097092a06 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
svc->port = u->port;
svc->fwmark = u->fwmark;
- svc->flags = u->flags;
+ svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
svc->ipvs = ipvs;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e0befcf8113a..96ba19fc8155 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -55,8 +55,6 @@
#include "nf_internals.h"
-extern unsigned int nf_conntrack_net_id;
-
__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
EXPORT_SYMBOL_GPL(nf_conntrack_locks);
@@ -87,8 +85,6 @@ static __read_mostly bool nf_conntrack_locks_all;
static struct conntrack_gc_work conntrack_gc_work;
-extern unsigned int nf_conntrack_net_id;
-
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{
/* 1) Acquire the lock */
@@ -1404,7 +1400,7 @@ static void gc_worker(struct work_struct *work)
continue;
net = nf_ct_net(tmp);
- cnet = net_generic(net, nf_conntrack_net_id);
+ cnet = nf_ct_pernet(net);
if (atomic_read(&cnet->count) < nf_conntrack_max95)
continue;
@@ -1484,7 +1480,7 @@ __nf_conntrack_alloc(struct net *net,
const struct nf_conntrack_tuple *repl,
gfp_t gfp, u32 hash)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
unsigned int ct_count;
struct nf_conn *ct;
@@ -1556,7 +1552,7 @@ void nf_conntrack_free(struct nf_conn *ct)
nf_ct_ext_destroy(ct);
kmem_cache_free(nf_conntrack_cachep, ct);
- cnet = net_generic(net, nf_conntrack_net_id);
+ cnet = nf_ct_pernet(net);
smp_mb__before_atomic();
atomic_dec(&cnet->count);
@@ -1614,7 +1610,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
GFP_ATOMIC);
local_bh_disable();
- cnet = net_generic(net, nf_conntrack_net_id);
+ cnet = nf_ct_pernet(net);
if (cnet->expect_count) {
spin_lock(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple);
@@ -2317,7 +2313,7 @@ __nf_ct_unconfirmed_destroy(struct net *net)
void nf_ct_unconfirmed_destroy(struct net *net)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
might_sleep();
@@ -2333,7 +2329,7 @@ void nf_ct_iterate_cleanup_net(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
void *data, u32 portid, int report)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct iter_data d;
might_sleep();
@@ -2367,7 +2363,7 @@ nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
down_read(&net_rwsem);
for_each_net(net) {
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
if (atomic_read(&cnet->count) == 0)
continue;
@@ -2449,7 +2445,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
nf_ct_iterate_cleanup(kill_all, net, 0, 0);
if (atomic_read(&cnet->count) != 0)
@@ -2733,7 +2729,7 @@ void nf_conntrack_init_end(void)
int nf_conntrack_init_net(struct net *net)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
int ret = -ENOMEM;
int cpu;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 759d87aef95f..296e4a171bd1 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -27,8 +27,6 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_extend.h>
-extern unsigned int nf_conntrack_net_id;
-
static DEFINE_MUTEX(nf_ct_ecache_mutex);
#define ECACHE_RETRY_WAIT (HZ/10)
@@ -348,7 +346,7 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
if (state == NFCT_ECACHE_DESTROY_FAIL &&
!delayed_work_pending(&cnet->ecache_dwork)) {
@@ -371,7 +369,7 @@ static const struct nf_ct_ext_type event_extend = {
void nf_conntrack_ecache_pernet_init(struct net *net)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
net->ct.sysctl_events = nf_ct_events;
cnet->ct_net = &net->ct;
@@ -380,7 +378,7 @@ void nf_conntrack_ecache_pernet_init(struct net *net)
void nf_conntrack_ecache_pernet_fini(struct net *net)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
cancel_delayed_work_sync(&cnet->ecache_dwork);
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index efdd391b3f72..1e851bc2e61a 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -43,8 +43,6 @@ unsigned int nf_ct_expect_max __read_mostly;
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
static unsigned int nf_ct_expect_hashrnd __read_mostly;
-extern unsigned int nf_conntrack_net_id;
-
/* nf_conntrack_expect helper functions */
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
u32 portid, int report)
@@ -58,7 +56,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
hlist_del_rcu(&exp->hnode);
- cnet = net_generic(net, nf_conntrack_net_id);
+ cnet = nf_ct_pernet(net);
cnet->expect_count--;
hlist_del_rcu(&exp->lnode);
@@ -123,7 +121,7 @@ __nf_ct_expect_find(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct nf_conntrack_expect *i;
unsigned int h;
@@ -164,7 +162,7 @@ nf_ct_find_expectation(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct nf_conntrack_expect *i, *exp = NULL;
unsigned int h;
@@ -397,7 +395,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
master_help->expecting[exp->class]++;
hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
- cnet = net_generic(net, nf_conntrack_net_id);
+ cnet = nf_ct_pernet(net);
cnet->expect_count++;
NF_CT_STAT_INC(net, expect_create);
@@ -468,7 +466,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect,
}
}
- cnet = net_generic(net, nf_conntrack_net_id);
+ cnet = nf_ct_pernet(net);
if (cnet->expect_count >= nf_ct_expect_max) {
net_warn_ratelimited("nf_conntrack: expectation table full\n");
ret = -EMFILE;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index aafaff00baf1..2eb31ffb3d14 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -194,7 +194,7 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
if (tcpdatalen == 4) { /* Separate TPKT header */
/* Netmeeting sends TPKT header and data separately */
pr_debug("nf_ct_h323: separate TPKT header indicates "
- "there will be TPKT data of %hu bytes\n",
+ "there will be TPKT data of %d bytes\n",
tpktlen - 4);
info->tpkt_len[dir] = tpktlen - 4;
return 0;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index ac396cc8bfae..ae4488a13c70 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -43,8 +43,6 @@ MODULE_PARM_DESC(nf_conntrack_helper,
static DEFINE_MUTEX(nf_ct_nat_helpers_mutex);
static struct list_head nf_ct_nat_helpers __read_mostly;
-extern unsigned int nf_conntrack_net_id;
-
/* Stupid hash, but collision free for the default registrations of the
* helpers currently in the kernel. */
static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
@@ -214,7 +212,7 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
static struct nf_conntrack_helper *
nf_ct_lookup_helper(struct nf_conn *ct, struct net *net)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
if (!cnet->sysctl_auto_assign_helper) {
if (cnet->auto_assign_helper_warned)
@@ -560,7 +558,7 @@ static const struct nf_ct_ext_type helper_extend = {
void nf_conntrack_helper_pernet_init(struct net *net)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
cnet->sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 8690fc07030f..4e1a9dba7077 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1528,7 +1528,7 @@ static int ctnetlink_del_conntrack(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
+ u8 family = info->nfmsg->nfgen_family;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
@@ -1541,12 +1541,12 @@ static int ctnetlink_del_conntrack(struct sk_buff *skb,
if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
- nfmsg->nfgen_family, &zone);
+ family, &zone);
else if (cda[CTA_TUPLE_REPLY])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
- nfmsg->nfgen_family, &zone);
+ family, &zone);
else {
- u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
+ u_int8_t u3 = info->nfmsg->version ? family : AF_UNSPEC;
return ctnetlink_flush_conntrack(info->net, cda,
NETLINK_CB(skb).portid,
@@ -1586,8 +1586,7 @@ static int ctnetlink_get_conntrack(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
- u_int8_t u3 = nfmsg->nfgen_family;
+ u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
@@ -1628,9 +1627,8 @@ static int ctnetlink_get_conntrack(struct sk_buff *skb,
ct = nf_ct_tuplehash_to_ctrack(h);
- err = -ENOMEM;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb2 == NULL) {
+ if (!skb2) {
nf_ct_put(ct);
return -ENOMEM;
}
@@ -1640,21 +1638,12 @@ static int ctnetlink_get_conntrack(struct sk_buff *skb,
NFNL_MSG_TYPE(info->nlh->nlmsg_type), ct,
true, 0);
nf_ct_put(ct);
- if (err <= 0)
- goto free;
-
- err = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (err < 0)
- goto out;
-
- return 0;
+ if (err <= 0) {
+ kfree_skb(skb2);
+ return -ENOMEM;
+ }
-free:
- kfree_skb(skb2);
-out:
- /* this avoids a loop in nfnetlink. */
- return err == -EAGAIN ? -ENOBUFS : err;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
static int ctnetlink_done_list(struct netlink_callback *cb)
@@ -2373,10 +2362,9 @@ static int ctnetlink_new_conntrack(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct nf_conntrack_tuple otuple, rtuple;
struct nf_conntrack_tuple_hash *h = NULL;
- u_int8_t u3 = nfmsg->nfgen_family;
+ u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_zone zone;
struct nf_conn *ct;
int err;
@@ -2590,21 +2578,12 @@ static int ctnetlink_stat_ct(struct sk_buff *skb, const struct nfnl_info *info,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type),
sock_net(skb->sk));
- if (err <= 0)
- goto free;
-
- err = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (err < 0)
- goto out;
-
- return 0;
+ if (err <= 0) {
+ kfree_skb(skb2);
+ return -ENOMEM;
+ }
-free:
- kfree_skb(skb2);
-out:
- /* this avoids a loop in nfnetlink. */
- return err == -EAGAIN ? -ENOBUFS : err;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
@@ -3278,8 +3257,7 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
- u_int8_t u3 = nfmsg->nfgen_family;
+ u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_expect *exp;
struct nf_conntrack_zone zone;
@@ -3329,11 +3307,10 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
}
}
- err = -ENOMEM;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb2 == NULL) {
+ if (!skb2) {
nf_ct_expect_put(exp);
- goto out;
+ return -ENOMEM;
}
rcu_read_lock();
@@ -3342,21 +3319,12 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
exp);
rcu_read_unlock();
nf_ct_expect_put(exp);
- if (err <= 0)
- goto free;
-
- err = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (err < 0)
- goto out;
-
- return 0;
+ if (err <= 0) {
+ kfree_skb(skb2);
+ return -ENOMEM;
+ }
-free:
- kfree_skb(skb2);
-out:
- /* this avoids a loop in nfnetlink. */
- return err == -EAGAIN ? -ENOBUFS : err;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
@@ -3378,8 +3346,7 @@ static int ctnetlink_del_expect(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
- u_int8_t u3 = nfmsg->nfgen_family;
+ u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
@@ -3630,8 +3597,7 @@ static int ctnetlink_new_expect(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
{
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
- u_int8_t u3 = nfmsg->nfgen_family;
+ u_int8_t u3 = info->nfmsg->nfgen_family;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_expect *exp;
struct nf_conntrack_zone zone;
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 89e5bac384d7..55647409a9be 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -42,17 +42,16 @@
#include <net/ipv6.h>
#include <net/inet_frag.h>
-extern unsigned int nf_conntrack_net_id;
-
static DEFINE_MUTEX(nf_ct_proto_mutex);
#ifdef CONFIG_SYSCTL
-__printf(5, 6)
+__printf(4, 5)
void nf_l4proto_log_invalid(const struct sk_buff *skb,
- struct net *net,
- u16 pf, u8 protonum,
+ const struct nf_hook_state *state,
+ u8 protonum,
const char *fmt, ...)
{
+ struct net *net = state->net;
struct va_format vaf;
va_list args;
@@ -64,15 +63,16 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb,
vaf.fmt = fmt;
vaf.va = &args;
- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
- "nf_ct_proto_%d: %pV ", protonum, &vaf);
+ nf_log_packet(net, state->pf, 0, skb, state->in, state->out,
+ NULL, "nf_ct_proto_%d: %pV ", protonum, &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(nf_l4proto_log_invalid);
-__printf(3, 4)
+__printf(4, 5)
void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
+ const struct nf_hook_state *state,
const char *fmt, ...)
{
struct va_format vaf;
@@ -87,7 +87,7 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
vaf.fmt = fmt;
vaf.va = &args;
- nf_l4proto_log_invalid(skb, net, nf_ct_l3num(ct),
+ nf_l4proto_log_invalid(skb, state,
nf_ct_protonum(ct), "%pV", &vaf);
va_end(args);
}
@@ -446,7 +446,7 @@ static struct nf_ct_bridge_info *nf_ct_bridge_info;
static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
bool fixup_needed = false, retry = true;
int err = 0;
retry:
@@ -531,7 +531,7 @@ retry:
static void nf_ct_netns_do_put(struct net *net, u8 nfproto)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
mutex_lock(&nf_ct_proto_mutex);
switch (nfproto) {
@@ -664,7 +664,7 @@ int nf_conntrack_proto_init(void)
#if IS_ENABLED(CONFIG_IPV6)
cleanup_sockopt:
- nf_unregister_sockopt(&so_getorigdst6);
+ nf_unregister_sockopt(&so_getorigdst);
#endif
return ret;
}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 4f33307fa3cf..c1557d47ccd1 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -382,7 +382,8 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
static noinline bool
dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
- const struct dccp_hdr *dh)
+ const struct dccp_hdr *dh,
+ const struct nf_hook_state *hook_state)
{
struct net *net = nf_ct_net(ct);
struct nf_dccp_net *dn;
@@ -414,7 +415,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
out_invalid:
- nf_ct_l4proto_log_invalid(skb, ct, "%s", msg);
+ nf_ct_l4proto_log_invalid(skb, ct, hook_state, "%s", msg);
return false;
}
@@ -464,8 +465,7 @@ static bool dccp_error(const struct dccp_hdr *dh,
}
return false;
out_invalid:
- nf_l4proto_log_invalid(skb, state->net, state->pf,
- IPPROTO_DCCP, "%s", msg);
+ nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg);
return true;
}
@@ -488,7 +488,7 @@ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
return -NF_ACCEPT;
type = dh->dccph_type;
- if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh))
+ if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state))
return -NF_ACCEPT;
if (type == DCCP_PKT_RESET &&
@@ -543,11 +543,11 @@ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
ct->proto.dccp.last_pkt = type;
spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid packet");
+ nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid packet");
return NF_ACCEPT;
case CT_DCCP_INVALID:
spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid state transition");
+ nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid state transition");
return -NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 4efd8741c105..b38b7164acd5 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -170,12 +170,12 @@ int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
ct_daddr = &ct->tuplehash[dir].tuple.dst.u3;
if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) {
if (state->pf == AF_INET) {
- nf_l4proto_log_invalid(skb, state->net, state->pf,
+ nf_l4proto_log_invalid(skb, state,
l4proto,
"outer daddr %pI4 != inner %pI4",
&outer_daddr->ip, &ct_daddr->ip);
} else if (state->pf == AF_INET6) {
- nf_l4proto_log_invalid(skb, state->net, state->pf,
+ nf_l4proto_log_invalid(skb, state,
l4proto,
"outer daddr %pI6 != inner %pI6",
&outer_daddr->ip6, &ct_daddr->ip6);
@@ -197,8 +197,7 @@ static void icmp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
- nf_l4proto_log_invalid(skb, state->net, state->pf,
- IPPROTO_ICMP, "%s", msg);
+ nf_l4proto_log_invalid(skb, state, IPPROTO_ICMP, "%s", msg);
}
/* Small and modified version of icmp_rcv */
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index facd8c64ec4e..61e3b05cf02c 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -126,8 +126,7 @@ static void icmpv6_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
- nf_l4proto_log_invalid(skb, state->net, state->pf,
- IPPROTO_ICMPV6, "%s", msg);
+ nf_l4proto_log_invalid(skb, state, IPPROTO_ICMPV6, "%s", msg);
}
int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index fb8dc02e502f..2394238d01c9 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -351,7 +351,7 @@ static bool sctp_error(struct sk_buff *skb,
}
return false;
out_invalid:
- nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_SCTP, "%s", logmsg);
+ nf_l4proto_log_invalid(skb, state, IPPROTO_SCTP, "%s", logmsg);
return true;
}
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 34e22416a721..f7e8baf59b51 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -446,14 +446,15 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
}
-static bool tcp_in_window(const struct nf_conn *ct,
- struct ip_ct_tcp *state,
+static bool tcp_in_window(struct nf_conn *ct,
enum ip_conntrack_dir dir,
unsigned int index,
const struct sk_buff *skb,
unsigned int dataoff,
- const struct tcphdr *tcph)
+ const struct tcphdr *tcph,
+ const struct nf_hook_state *hook_state)
{
+ struct ip_ct_tcp *state = &ct->proto.tcp;
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
@@ -670,7 +671,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
tn->tcp_be_liberal)
res = true;
if (!res) {
- nf_ct_l4proto_log_invalid(skb, ct,
+ nf_ct_l4proto_log_invalid(skb, ct, hook_state,
"%s",
before(seq, sender->td_maxend + 1) ?
in_recv_win ?
@@ -710,7 +711,7 @@ static void tcp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
- nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg);
+ nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
}
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
@@ -970,7 +971,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
IP_CT_EXP_CHALLENGE_ACK;
}
spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct,
+ nf_ct_l4proto_log_invalid(skb, ct, state,
"packet (index %d) in dir %d ignored, state %s",
index, dir,
tcp_conntrack_names[old_state]);
@@ -995,7 +996,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
dir, get_conntrack_index(th), old_state);
spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, "invalid state");
+ nf_ct_l4proto_log_invalid(skb, ct, state, "invalid state");
return -NF_ACCEPT;
case TCP_CONNTRACK_TIME_WAIT:
/* RFC5961 compliance cause stack to send "challenge-ACK"
@@ -1010,7 +1011,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
/* Detected RFC5961 challenge ACK */
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored");
+ nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
return NF_ACCEPT; /* Don't change state */
}
break;
@@ -1035,7 +1036,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
/* Invalid RST */
spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
+ nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
return -NF_ACCEPT;
}
@@ -1079,8 +1080,8 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
break;
}
- if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
- skb, dataoff, th)) {
+ if (!tcp_in_window(ct, dir, index,
+ skb, dataoff, th, state)) {
spin_unlock_bh(&ct->lock);
return -NF_ACCEPT;
}
@@ -1441,6 +1442,11 @@ void nf_conntrack_tcp_init_net(struct net *net)
* will be started.
*/
tn->tcp_max_retrans = 3;
+
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ tn->offload_timeout = 30 * HZ;
+ tn->offload_pickup = 120 * HZ;
+#endif
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index af402f458ee0..698fee49e732 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -38,8 +38,7 @@ static void udp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
- nf_l4proto_log_invalid(skb, state->net, state->pf,
- IPPROTO_UDP, "%s", msg);
+ nf_l4proto_log_invalid(skb, state, IPPROTO_UDP, "%s", msg);
}
static bool udp_error(struct sk_buff *skb,
@@ -130,8 +129,7 @@ static void udplite_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
- nf_l4proto_log_invalid(skb, state->net, state->pf,
- IPPROTO_UDPLITE, "%s", msg);
+ nf_l4proto_log_invalid(skb, state, IPPROTO_UDPLITE, "%s", msg);
}
static bool udplite_error(struct sk_buff *skb,
@@ -270,6 +268,11 @@ void nf_conntrack_udp_init_net(struct net *net)
for (i = 0; i < UDP_CT_MAX; i++)
un->timeouts[i] = udp_timeouts[i];
+
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ un->offload_timeout = 30 * HZ;
+ un->offload_pickup = 30 * HZ;
+#endif
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index aaa55246d0ca..f57a951c9b5e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -512,9 +512,7 @@ static void nf_conntrack_standalone_fini_proc(struct net *net)
u32 nf_conntrack_count(const struct net *net)
{
- const struct nf_conntrack_net *cnet;
-
- cnet = net_generic(net, nf_conntrack_net_id);
+ const struct nf_conntrack_net *cnet = nf_ct_pernet(net);
return atomic_read(&cnet->count);
}
@@ -575,11 +573,19 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP,
+#endif
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP,
+#endif
NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
#ifdef CONFIG_NF_CT_PROTO_SCTP
@@ -762,6 +768,20 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD] = {
+ .procname = "nf_flowtable_tcp_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP] = {
+ .procname = "nf_flowtable_tcp_pickup",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
[NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
.procname = "nf_conntrack_tcp_loose",
.maxlen = sizeof(u8),
@@ -796,6 +816,20 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+#if IS_ENABLED(CONFIG_NFT_FLOW_OFFLOAD)
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = {
+ .procname = "nf_flowtable_udp_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP] = {
+ .procname = "nf_flowtable_udp_pickup",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
.procname = "nf_conntrack_icmp_timeout",
.maxlen = sizeof(unsigned int),
@@ -971,6 +1005,12 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
#undef XASSIGN
+
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP].data = &tn->offload_pickup;
+#endif
+
}
static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
@@ -1032,7 +1072,7 @@ static void nf_conntrack_standalone_init_gre_sysctl(struct net *net,
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct nf_udp_net *un = nf_udp_pernet(net);
struct ctl_table *table;
@@ -1059,6 +1099,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED];
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP].data = &un->offload_pickup;
+#endif
nf_conntrack_standalone_init_tcp_sysctl(net, table);
nf_conntrack_standalone_init_sctp_sysctl(net, table);
@@ -1085,7 +1129,7 @@ out_unregister_netfilter:
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
- struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
struct ctl_table *table;
table = cnet->sysctl_header->ctl_table_arg;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 39c02d1aeedf..1e50908b1b7e 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -178,12 +178,10 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
tcp->seen[1].td_maxwin = 0;
}
-#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
-#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-
static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
+ struct net *net = nf_ct_net(ct);
int l4num = nf_ct_protonum(ct);
unsigned int timeout;
@@ -191,12 +189,17 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
if (!l4proto)
return;
- if (l4num == IPPROTO_TCP)
- timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
- else if (l4num == IPPROTO_UDP)
- timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
- else
+ if (l4num == IPPROTO_TCP) {
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+ timeout = tn->offload_pickup;
+ } else if (l4num == IPPROTO_UDP) {
+ struct nf_udp_net *tn = nf_udp_pernet(net);
+
+ timeout = tn->offload_pickup;
+ } else {
return;
+ }
if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
ct->timeout = nfct_time_stamp + timeout;
@@ -268,11 +271,35 @@ static const struct rhashtable_params nf_flow_offload_rhash_params = {
.automatic_shrinking = true,
};
+unsigned long flow_offload_get_timeout(struct flow_offload *flow)
+{
+ const struct nf_conntrack_l4proto *l4proto;
+ unsigned long timeout = NF_FLOW_TIMEOUT;
+ struct net *net = nf_ct_net(flow->ct);
+ int l4num = nf_ct_protonum(flow->ct);
+
+ l4proto = nf_ct_l4proto_find(l4num);
+ if (!l4proto)
+ return timeout;
+
+ if (l4num == IPPROTO_TCP) {
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+ timeout = tn->offload_timeout;
+ } else if (l4num == IPPROTO_UDP) {
+ struct nf_udp_net *tn = nf_udp_pernet(net);
+
+ timeout = tn->offload_timeout;
+ }
+
+ return timeout;
+}
+
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
{
int err;
- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
err = rhashtable_insert_fast(&flow_table->rhashtable,
&flow->tuplehash[0].node,
@@ -304,10 +331,9 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
void flow_offload_refresh(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
- if (likely(!nf_flowtable_hw_offload(flow_table) ||
- !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
+ if (likely(!nf_flowtable_hw_offload(flow_table)))
return;
nf_flow_offload_add(flow_table, flow);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 2af7bdb38407..f92006cec94c 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -902,10 +902,11 @@ static void flow_offload_work_add(struct flow_offload_work *offload)
err = flow_offload_rule_add(offload, flow_rule);
if (err < 0)
- set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);
- else
- set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
+ goto out;
+
+ set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
+out:
nf_flow_offload_destroy(flow_rule);
}
@@ -936,7 +937,7 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
offload->flow->timeout = max_t(u64, offload->flow->timeout,
- lastused + NF_FLOW_TIMEOUT);
+ lastused + flow_offload_get_timeout(offload->flow));
if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
if (stats[0].pkts)
@@ -1040,7 +1041,7 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
__s32 delta;
delta = nf_flow_timeout_delta(flow->timeout);
- if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
+ if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10))
return;
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index b100c04a0e43..3d6d49420db8 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
int length = (th->doff * 4) - sizeof(*th);
u8 buf[40], *ptr;
+ if (unlikely(length < 0))
+ return false;
+
ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
if (ptr == NULL)
return false;
@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
length--;
continue;
default:
+ if (length < 2)
+ return true;
opsize = *ptr++;
if (opsize < 2)
return true;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d63d2d8f769c..390d4466567f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -571,7 +571,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
table->family == family &&
nft_active_genmask(table, genmask)) {
if (nft_table_has_owner(table) &&
- table->nlpid != nlpid)
+ nlpid && table->nlpid != nlpid)
return ERR_PTR(-EPERM);
return table;
@@ -583,7 +583,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
const struct nlattr *nla,
- u8 genmask)
+ u8 genmask, u32 nlpid)
{
struct nftables_pernet *nft_net;
struct nft_table *table;
@@ -591,8 +591,13 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
nft_net = nft_pernet(net);
list_for_each_entry(table, &nft_net->tables, list) {
if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
- nft_active_genmask(table, genmask))
+ nft_active_genmask(table, genmask)) {
+ if (nft_table_has_owner(table) &&
+ nlpid && table->nlpid != nlpid)
+ return ERR_PTR(-EPERM);
+
return table;
+ }
}
return ERR_PTR(-ENOENT);
@@ -736,7 +741,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
goto nla_put_failure;
if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
- nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
+ nla_put_be32(skb, NFTA_TABLE_FLAGS,
+ htonl(table->flags & NFT_TABLE_F_MASK)) ||
nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
NFTA_TABLE_PAD))
@@ -861,10 +867,9 @@ static int nft_netlink_dump_start_rcu(struct sock *nlsk, struct sk_buff *skb,
static int nf_tables_gettable(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
const struct nft_table *table;
struct net *net = info->net;
struct sk_buff *skb2;
@@ -947,20 +952,22 @@ err_register_hooks:
static void nf_tables_table_disable(struct net *net, struct nft_table *table)
{
+ table->flags &= ~NFT_TABLE_F_DORMANT;
nft_table_disable(net, table, 0);
+ table->flags |= NFT_TABLE_F_DORMANT;
}
-enum {
- NFT_TABLE_STATE_UNCHANGED = 0,
- NFT_TABLE_STATE_DORMANT,
- NFT_TABLE_STATE_WAKEUP
-};
+#define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1)
+#define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0)
+#define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
+ __NFT_TABLE_F_WAS_AWAKEN)
static int nf_tables_updtable(struct nft_ctx *ctx)
{
struct nft_trans *trans;
u32 flags;
- int ret = 0;
+ int ret;
if (!ctx->nla[NFTA_TABLE_FLAGS])
return 0;
@@ -985,21 +992,27 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if ((flags & NFT_TABLE_F_DORMANT) &&
!(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
- nft_trans_table_state(trans) = NFT_TABLE_STATE_DORMANT;
+ ctx->table->flags |= NFT_TABLE_F_DORMANT;
+ if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE))
+ ctx->table->flags |= __NFT_TABLE_F_WAS_AWAKEN;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
ctx->table->flags & NFT_TABLE_F_DORMANT) {
- ret = nf_tables_table_enable(ctx->net, ctx->table);
- if (ret >= 0)
- nft_trans_table_state(trans) = NFT_TABLE_STATE_WAKEUP;
+ ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+ if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE)) {
+ ret = nf_tables_table_enable(ctx->net, ctx->table);
+ if (ret < 0)
+ goto err_register_hooks;
+
+ ctx->table->flags |= __NFT_TABLE_F_WAS_DORMANT;
+ }
}
- if (ret < 0)
- goto err;
- nft_trans_table_flags(trans) = flags;
nft_trans_table_update(trans) = true;
nft_trans_commit_list_add_tail(ctx->net, trans);
+
return 0;
-err:
+
+err_register_hooks:
nft_trans_destroy(trans);
return ret;
}
@@ -1059,10 +1072,9 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct nftables_pernet *nft_net = nft_pernet(info->net);
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
@@ -1254,10 +1266,9 @@ out:
static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
@@ -1270,7 +1281,8 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_TABLE_HANDLE]) {
attr = nla[NFTA_TABLE_HANDLE];
- table = nft_table_lookup_byhandle(net, attr, genmask);
+ table = nft_table_lookup_byhandle(net, attr, genmask,
+ NETLINK_CB(skb).portid);
} else {
attr = nla[NFTA_TABLE_NAME];
table = nft_table_lookup(net, attr, family, genmask,
@@ -1627,10 +1639,9 @@ done:
static int nf_tables_getchain(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
const struct nft_chain *chain;
struct net *net = info->net;
struct nft_table *table;
@@ -1905,7 +1916,7 @@ static int nft_chain_parse_netdev(struct net *net,
static int nft_chain_parse_hook(struct net *net,
const struct nlattr * const nla[],
struct nft_chain_hook *hook, u8 family,
- bool autoload)
+ struct netlink_ext_ack *extack, bool autoload)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nlattr *ha[NFTA_HOOK_MAX + 1];
@@ -1935,8 +1946,10 @@ static int nft_chain_parse_hook(struct net *net,
if (nla[NFTA_CHAIN_TYPE]) {
type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
family, autoload);
- if (IS_ERR(type))
+ if (IS_ERR(type)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
return PTR_ERR(type);
+ }
}
if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
return -EOPNOTSUPP;
@@ -1945,8 +1958,11 @@ static int nft_chain_parse_hook(struct net *net,
hook->priority <= NF_IP_PRI_CONNTRACK)
return -EOPNOTSUPP;
- if (!try_module_get(type->owner))
+ if (!try_module_get(type->owner)) {
+ if (nla[NFTA_CHAIN_TYPE])
+ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
return -ENOENT;
+ }
hook->type = type;
@@ -2001,11 +2017,12 @@ static void nft_basechain_hook_init(struct nf_hook_ops *ops, u8 family,
const struct nft_chain_hook *hook,
struct nft_chain *chain)
{
- ops->pf = family;
- ops->hooknum = hook->num;
- ops->priority = hook->priority;
- ops->priv = chain;
- ops->hook = hook->type->hooks[ops->hooknum];
+ ops->pf = family;
+ ops->hooknum = hook->num;
+ ops->priority = hook->priority;
+ ops->priv = chain;
+ ops->hook = hook->type->hooks[ops->hooknum];
+ ops->hook_ops_type = NF_HOOK_OP_NF_TABLES;
}
static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
@@ -2057,7 +2074,8 @@ static int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
static u64 chain_id;
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
- u8 policy, u32 flags)
+ u8 policy, u32 flags,
+ struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
@@ -2079,7 +2097,8 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (flags & NFT_CHAIN_BINDING)
return -EOPNOTSUPP;
- err = nft_chain_parse_hook(net, nla, &hook, family, true);
+ err = nft_chain_parse_hook(net, nla, &hook, family, extack,
+ true);
if (err < 0)
return err;
@@ -2234,7 +2253,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
return -EEXIST;
}
err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
- false);
+ extack, false);
if (err < 0)
return err;
@@ -2355,10 +2374,9 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct nftables_pernet *nft_net = nft_pernet(info->net);
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
struct nft_chain *chain = NULL;
struct net *net = info->net;
const struct nlattr *attr;
@@ -2447,16 +2465,15 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
extack);
}
- return nf_tables_addchain(&ctx, family, genmask, policy, flags);
+ return nf_tables_addchain(&ctx, family, genmask, policy, flags, extack);
}
static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
@@ -3080,10 +3097,9 @@ static int nf_tables_dump_rules_done(struct netlink_callback *cb)
static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
const struct nft_chain *chain;
const struct nft_rule *rule;
struct net *net = info->net;
@@ -3221,15 +3237,14 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct nftables_pernet *nft_net = nft_pernet(info->net);
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
unsigned int size, i, n, ulen = 0, usize = 0;
u8 genmask = nft_genmask_next(info->net);
struct nft_rule *rule, *old_rule = NULL;
struct nft_expr_info *expr_info = NULL;
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
+ struct nft_flow_rule *flow = NULL;
struct net *net = info->net;
- struct nft_flow_rule *flow;
struct nft_userdata *udata;
struct nft_table *table;
struct nft_chain *chain;
@@ -3324,12 +3339,14 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
err = -EINVAL;
if (nla_type(tmp) != NFTA_LIST_ELEM)
- goto err1;
+ goto err_release_expr;
if (n == NFT_RULE_MAXEXPRS)
- goto err1;
+ goto err_release_expr;
err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
- if (err < 0)
- goto err1;
+ if (err < 0) {
+ NL_SET_BAD_ATTR(extack, tmp);
+ goto err_release_expr;
+ }
size += expr_info[n].ops->size;
n++;
}
@@ -3337,7 +3354,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
/* Check for overflow of dlen field */
err = -EFBIG;
if (size >= 1 << 12)
- goto err1;
+ goto err_release_expr;
if (nla[NFTA_RULE_USERDATA]) {
ulen = nla_len(nla[NFTA_RULE_USERDATA]);
@@ -3348,7 +3365,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
err = -ENOMEM;
rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
if (rule == NULL)
- goto err1;
+ goto err_release_expr;
nft_activate_next(net, rule);
@@ -3367,7 +3384,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
err = nf_tables_newexpr(&ctx, &expr_info[i], expr);
if (err < 0) {
NL_SET_BAD_ATTR(extack, expr_info[i].attr);
- goto err2;
+ goto err_release_rule;
}
if (expr_info[i].ops->validate)
@@ -3377,16 +3394,24 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
expr = nft_expr_next(expr);
}
+ if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
+ flow = nft_flow_rule_create(net, rule);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto err_release_rule;
+ }
+ }
+
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (trans == NULL) {
err = -ENOMEM;
- goto err2;
+ goto err_destroy_flow_rule;
}
err = nft_delrule(&ctx, old_rule);
if (err < 0) {
nft_trans_destroy(trans);
- goto err2;
+ goto err_destroy_flow_rule;
}
list_add_tail_rcu(&rule->list, &old_rule->list);
@@ -3394,7 +3419,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (!trans) {
err = -ENOMEM;
- goto err2;
+ goto err_destroy_flow_rule;
}
if (info->nlh->nlmsg_flags & NLM_F_APPEND) {
@@ -3412,21 +3437,19 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
kvfree(expr_info);
chain->use++;
+ if (flow)
+ nft_trans_flow_rule(trans) = flow;
+
if (nft_net->validate_state == NFT_VALIDATE_DO)
return nft_table_validate(net, table);
- if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
- flow = nft_flow_rule_create(net, rule);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- nft_trans_flow_rule(trans) = flow;
- }
-
return 0;
-err2:
+
+err_destroy_flow_rule:
+ nft_flow_rule_destroy(flow);
+err_release_rule:
nf_tables_rule_release(&ctx, rule);
-err1:
+err_release_expr:
for (i = 0; i < n; i++) {
if (expr_info[i].ops) {
module_put(expr_info[i].ops->type->owner);
@@ -3459,15 +3482,15 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
- int family = nfmsg->nfgen_family, err = 0;
u8 genmask = nft_genmask_next(info->net);
+ u8 family = info->nfmsg->nfgen_family;
struct nft_chain *chain = NULL;
struct net *net = info->net;
struct nft_table *table;
struct nft_rule *rule;
struct nft_ctx ctx;
+ int err = 0;
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask,
NETLINK_CB(skb).portid);
@@ -3647,30 +3670,6 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
[NFTA_SET_DESC_CONCAT] = { .type = NLA_NESTED },
};
-static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
- const struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const nla[],
- struct netlink_ext_ack *extack,
- u8 genmask, u32 nlpid)
-{
- const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
- int family = nfmsg->nfgen_family;
- struct nft_table *table = NULL;
-
- if (nla[NFTA_SET_TABLE] != NULL) {
- table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family,
- genmask, nlpid);
- if (IS_ERR(table)) {
- NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
- return PTR_ERR(table);
- }
- }
-
- nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
- return 0;
-}
-
static struct nft_set *nft_set_lookup(const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
@@ -4050,20 +4049,26 @@ static int nf_tables_dump_sets_done(struct netlink_callback *cb)
static int nf_tables_getset(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
+ u8 family = info->nfmsg->nfgen_family;
+ struct nft_table *table = NULL;
struct net *net = info->net;
const struct nft_set *set;
struct sk_buff *skb2;
struct nft_ctx ctx;
int err;
- /* Verify existence before starting dump */
- err = nft_ctx_init_from_setattr(&ctx, net, skb, info->nlh, nla, extack,
- genmask, 0);
- if (err < 0)
- return err;
+ if (nla[NFTA_SET_TABLE]) {
+ table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family,
+ genmask, 0);
+ if (IS_ERR(table)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
+ return PTR_ERR(table);
+ }
+ }
+
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
@@ -4078,12 +4083,12 @@ static int nf_tables_getset(struct sk_buff *skb, const struct nfnl_info *info,
}
/* Only accept unspec with dump */
- if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
+ if (info->nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
if (!nla[NFTA_SET_TABLE])
return -EINVAL;
- set = nft_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask);
+ set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
@@ -4171,11 +4176,10 @@ static int nf_tables_set_desc_parse(struct nft_set_desc *desc,
static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
u32 ktype, dtype, flags, policy, gc_int, objtype;
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
const struct nft_set_ops *ops;
struct nft_expr *expr = NULL;
struct net *net = info->net;
@@ -4346,13 +4350,45 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
err = nf_tables_set_alloc_name(&ctx, set, name);
kfree(name);
if (err < 0)
- goto err_set_alloc_name;
+ goto err_set_name;
+
+ udata = NULL;
+ if (udlen) {
+ udata = set->data + size;
+ nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
+ }
+
+ INIT_LIST_HEAD(&set->bindings);
+ INIT_LIST_HEAD(&set->catchall_list);
+ set->table = table;
+ write_pnet(&set->net, net);
+ set->ops = ops;
+ set->ktype = ktype;
+ set->klen = desc.klen;
+ set->dtype = dtype;
+ set->objtype = objtype;
+ set->dlen = desc.dlen;
+ set->flags = flags;
+ set->size = desc.size;
+ set->policy = policy;
+ set->udlen = udlen;
+ set->udata = udata;
+ set->timeout = timeout;
+ set->gc_int = gc_int;
+
+ set->field_count = desc.field_count;
+ for (i = 0; i < desc.field_count; i++)
+ set->field_len[i] = desc.field_len[i];
+
+ err = ops->init(set, &desc, nla);
+ if (err < 0)
+ goto err_set_init;
if (nla[NFTA_SET_EXPR]) {
expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
- goto err_set_alloc_name;
+ goto err_set_expr_alloc;
}
set->exprs[0] = expr;
set->num_exprs++;
@@ -4363,75 +4399,44 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(flags & NFT_SET_EXPR)) {
err = -EINVAL;
- goto err_set_alloc_name;
+ goto err_set_expr_alloc;
}
i = 0;
nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
if (i == NFT_SET_EXPR_MAX) {
err = -E2BIG;
- goto err_set_init;
+ goto err_set_expr_alloc;
}
if (nla_type(tmp) != NFTA_LIST_ELEM) {
err = -EINVAL;
- goto err_set_init;
+ goto err_set_expr_alloc;
}
expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
- goto err_set_init;
+ goto err_set_expr_alloc;
}
set->exprs[i++] = expr;
set->num_exprs++;
}
}
- udata = NULL;
- if (udlen) {
- udata = set->data + size;
- nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
- }
-
- INIT_LIST_HEAD(&set->bindings);
- INIT_LIST_HEAD(&set->catchall_list);
- set->table = table;
- write_pnet(&set->net, net);
- set->ops = ops;
- set->ktype = ktype;
- set->klen = desc.klen;
- set->dtype = dtype;
- set->objtype = objtype;
- set->dlen = desc.dlen;
- set->flags = flags;
- set->size = desc.size;
- set->policy = policy;
- set->udlen = udlen;
- set->udata = udata;
- set->timeout = timeout;
- set->gc_int = gc_int;
set->handle = nf_tables_alloc_handle(table);
- set->field_count = desc.field_count;
- for (i = 0; i < desc.field_count; i++)
- set->field_len[i] = desc.field_len[i];
-
- err = ops->init(set, &desc, nla);
- if (err < 0)
- goto err_set_init;
-
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
if (err < 0)
- goto err_set_trans;
+ goto err_set_expr_alloc;
list_add_tail_rcu(&set->list, &table->sets);
table->use++;
return 0;
-err_set_trans:
- ops->destroy(set);
-err_set_init:
+err_set_expr_alloc:
for (i = 0; i < set->num_exprs; i++)
nft_expr_destroy(&ctx, set->exprs[i]);
-err_set_alloc_name:
+
+ ops->destroy(set);
+err_set_init:
kfree(set->name);
err_set_name:
kvfree(set);
@@ -4475,31 +4480,31 @@ static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
+ u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
+ struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
- int err;
- if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
+ if (info->nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
- if (nla[NFTA_SET_TABLE] == NULL)
- return -EINVAL;
- err = nft_ctx_init_from_setattr(&ctx, net, skb, info->nlh, nla, extack,
- genmask, NETLINK_CB(skb).portid);
- if (err < 0)
- return err;
+ table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family,
+ genmask, NETLINK_CB(skb).portid);
+ if (IS_ERR(table)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
+ return PTR_ERR(table);
+ }
if (nla[NFTA_SET_HANDLE]) {
attr = nla[NFTA_SET_HANDLE];
- set = nft_set_lookup_byhandle(ctx.table, attr, genmask);
+ set = nft_set_lookup_byhandle(table, attr, genmask);
} else {
attr = nla[NFTA_SET_NAME];
- set = nft_set_lookup(ctx.table, attr, genmask);
+ set = nft_set_lookup(table, attr, genmask);
}
if (IS_ERR(set)) {
@@ -4513,6 +4518,8 @@ static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info,
return -EBUSY;
}
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+
return nft_delset(&ctx, set);
}
@@ -4714,28 +4721,6 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
[NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
};
-static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
- const struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const nla[],
- struct netlink_ext_ack *extack,
- u8 genmask, u32 nlpid)
-{
- const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
- int family = nfmsg->nfgen_family;
- struct nft_table *table;
-
- table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
- genmask, nlpid);
- if (IS_ERR(table)) {
- NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
- return PTR_ERR(table);
- }
-
- nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
- return 0;
-}
-
static int nft_set_elem_expr_dump(struct sk_buff *skb,
const struct nft_set *set,
const struct nft_set_ext *ext)
@@ -5193,21 +5178,27 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
+ u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
+ struct nft_table *table;
struct nft_set *set;
struct nlattr *attr;
struct nft_ctx ctx;
int rem, err = 0;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, info->nlh, nla, extack,
- genmask, NETLINK_CB(skb).portid);
- if (err < 0)
- return err;
+ table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
+ genmask, NETLINK_CB(skb).portid);
+ if (IS_ERR(table)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
+ return PTR_ERR(table);
+ }
- set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
+ set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nf_tables_dump_set_start,
@@ -5976,8 +5967,10 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
struct nftables_pernet *nft_net = nft_pernet(info->net);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
+ u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
+ struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
int rem, err;
@@ -5985,12 +5978,14 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
return -EINVAL;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, info->nlh, nla, extack,
- genmask, NETLINK_CB(skb).portid);
- if (err < 0)
- return err;
+ table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
+ genmask, NETLINK_CB(skb).portid);
+ if (IS_ERR(table)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
+ return PTR_ERR(table);
+ }
- set = nft_set_lookup_global(net, ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
+ set = nft_set_lookup_global(net, table, nla[NFTA_SET_ELEM_LIST_SET],
nla[NFTA_SET_ELEM_LIST_SET_ID], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
@@ -5998,6 +5993,8 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
return -EBUSY;
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags);
if (err < 0)
@@ -6005,7 +6002,7 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
}
if (nft_net->validate_state == NFT_VALIDATE_DO)
- return nft_table_validate(net, ctx.table);
+ return nft_table_validate(net, table);
return 0;
}
@@ -6243,23 +6240,29 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
+ u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
+ struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
int rem, err = 0;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, info->nlh, nla, extack,
- genmask, NETLINK_CB(skb).portid);
- if (err < 0)
- return err;
+ table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
+ genmask, NETLINK_CB(skb).portid);
+ if (IS_ERR(table)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
+ return PTR_ERR(table);
+ }
- set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
+ set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
return -EBUSY;
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+
if (!nla[NFTA_SET_ELEM_LIST_ELEMENTS])
return nft_set_flush(&ctx, set, genmask);
@@ -6527,11 +6530,10 @@ err_free_trans:
static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
+ u8 family = info->nfmsg->nfgen_family;
const struct nft_object_type *type;
- int family = nfmsg->nfgen_family;
struct net *net = info->net;
struct nft_table *table;
struct nft_object *obj;
@@ -6783,10 +6785,9 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
const struct nft_table *table;
struct net *net = info->net;
struct nft_object *obj;
@@ -6873,10 +6874,9 @@ static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
struct net *net = info->net;
const struct nlattr *attr;
struct nft_table *table;
@@ -7304,12 +7304,11 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
struct nft_flowtable_hook flowtable_hook;
u8 genmask = nft_genmask_next(info->net);
+ u8 family = info->nfmsg->nfgen_family;
const struct nf_flowtable_type *type;
- int family = nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
struct nft_hook *hook, *next;
struct net *net = info->net;
@@ -7493,10 +7492,9 @@ static int nf_tables_delflowtable(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
struct net *net = info->net;
const struct nlattr *attr;
@@ -7688,9 +7686,8 @@ static int nf_tables_getflowtable(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
- const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
u8 genmask = nft_genmask_cur(info->net);
- int family = nfmsg->nfgen_family;
+ u8 family = info->nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
const struct nft_table *table;
struct net *net = info->net;
@@ -8547,10 +8544,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (nft_trans_table_state(trans) == NFT_TABLE_STATE_DORMANT)
+ if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ nft_trans_destroy(trans);
+ break;
+ }
+ if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
nf_tables_table_disable(net, trans->ctx.table);
- trans->ctx.table->flags = nft_trans_table_flags(trans);
+ trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
} else {
nft_clear(net, trans->ctx.table);
}
@@ -8768,9 +8769,17 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (nft_trans_table_state(trans) == NFT_TABLE_STATE_WAKEUP)
+ if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ nft_trans_destroy(trans);
+ break;
+ }
+ if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
nf_tables_table_disable(net, trans->ctx.table);
-
+ trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+ } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
+ trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
+ }
+ trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
list_del_rcu(&trans->ctx.table->list);
@@ -8808,11 +8817,16 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_rule_expr_deactivate(&trans->ctx,
nft_trans_rule(trans),
NFT_TRANS_ABORT);
+ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
trans->ctx.chain->use++;
nft_clear(trans->ctx.net, nft_trans_rule(trans));
nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
+ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSET:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index dbc2e945c98e..866cfba04d6c 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -81,7 +81,7 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr,
else {
if (!pkt->tprot_set)
return false;
- ptr = skb_network_header(skb) + pkt->xt.thoff;
+ ptr = skb_network_header(skb) + nft_thoff(pkt);
}
ptr += priv->offset;
@@ -268,6 +268,7 @@ static struct nft_expr_type *nft_basic_types[] = {
&nft_meta_type,
&nft_rt_type,
&nft_exthdr_type,
+ &nft_last_type,
};
static struct nft_object_type *nft_basic_objects[] = {
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index a48c5fd53a80..b58d73a96523 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -54,15 +54,10 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow)
{
struct nft_flow_match *match = &flow->match;
- struct nft_offload_ethertype ethertype;
-
- if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
- match->key.basic.n_proto != htons(ETH_P_8021Q) &&
- match->key.basic.n_proto != htons(ETH_P_8021AD))
- return;
-
- ethertype.value = match->key.basic.n_proto;
- ethertype.mask = match->mask.basic.n_proto;
+ struct nft_offload_ethertype ethertype = {
+ .value = match->key.basic.n_proto,
+ .mask = match->mask.basic.n_proto,
+ };
if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
(match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
@@ -76,7 +71,9 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
offsetof(struct nft_flow_key, cvlan);
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
- } else {
+ } else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) &&
+ (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
+ match->key.basic.n_proto == htons(ETH_P_8021AD))) {
match->key.basic.n_proto = match->key.vlan.vlan_tpid;
match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
match->key.vlan.vlan_tpid = ethertype.value;
@@ -594,23 +591,6 @@ int nft_flow_rule_offload_commit(struct net *net)
}
}
- list_for_each_entry(trans, &nft_net->commit_list, list) {
- if (trans->ctx.family != NFPROTO_NETDEV)
- continue;
-
- switch (trans->msg_type) {
- case NFT_MSG_NEWRULE:
- case NFT_MSG_DELRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
- continue;
-
- nft_flow_rule_destroy(nft_trans_flow_rule(trans));
- break;
- default:
- break;
- }
- }
-
return err;
}
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index 0cf3278007ba..e4fe2f0780eb 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -113,17 +113,17 @@ static int nf_trace_fill_pkt_info(struct sk_buff *nlskb,
int off = skb_network_offset(skb);
unsigned int len, nh_end;
- nh_end = pkt->tprot_set ? pkt->xt.thoff : skb->len;
+ nh_end = pkt->tprot_set ? nft_thoff(pkt) : skb->len;
len = min_t(unsigned int, nh_end - skb_network_offset(skb),
NFT_TRACETYPE_NETWORK_HSIZE);
if (trace_fill_header(nlskb, NFTA_TRACE_NETWORK_HEADER, skb, off, len))
return -1;
if (pkt->tprot_set) {
- len = min_t(unsigned int, skb->len - pkt->xt.thoff,
+ len = min_t(unsigned int, skb->len - nft_thoff(pkt),
NFT_TRACETYPE_TRANSPORT_HSIZE);
if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb,
- pkt->xt.thoff, len))
+ nft_thoff(pkt), len))
return -1;
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e8dbd8379027..7e2c8dd01408 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -68,6 +68,7 @@ static const char *const nfnl_lockdep_names[NFNL_SUBSYS_COUNT] = {
[NFNL_SUBSYS_CTHELPER] = "nfnl_subsys_cthelper",
[NFNL_SUBSYS_NFTABLES] = "nfnl_subsys_nftables",
[NFNL_SUBSYS_NFT_COMPAT] = "nfnl_subsys_nftcompat",
+ [NFNL_SUBSYS_HOOK] = "nfnl_subsys_hook",
};
static const int nfnl_group2type[NFNLGRP_MAX+1] = {
@@ -256,6 +257,7 @@ replay:
.net = net,
.sk = nfnlnet->nfnl,
.nlh = nlh,
+ .nfmsg = nlmsg_data(nlh),
.extack = extack,
};
@@ -491,6 +493,7 @@ replay_abort:
.net = net,
.sk = nfnlnet->nfnl,
.nlh = nlh,
+ .nfmsg = nlmsg_data(nlh),
.extack = &extack,
};
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 3c8cf8748cfb..505f46a32173 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -314,14 +314,11 @@ static int nfnl_acct_get(struct sk_buff *skb, const struct nfnl_info *info,
kfree_skb(skb2);
break;
}
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret > 0)
- ret = 0;
- /* this avoids a loop in nfnetlink. */
- return ret == -EAGAIN ? -ENOBUFS : ret;
+ ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
+ break;
}
+
return ret;
}
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 322ac5dd5402..5c622f55c9d6 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -380,10 +380,14 @@ static int
nfnl_cthelper_update(const struct nlattr * const tb[],
struct nf_conntrack_helper *helper)
{
+ u32 size;
int ret;
- if (tb[NFCTH_PRIV_DATA_LEN])
- return -EBUSY;
+ if (tb[NFCTH_PRIV_DATA_LEN]) {
+ size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+ if (size != helper->data_len)
+ return -EBUSY;
+ }
if (tb[NFCTH_POLICY]) {
ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
@@ -663,14 +667,10 @@ static int nfnl_cthelper_get(struct sk_buff *skb, const struct nfnl_info *info,
break;
}
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret > 0)
- ret = 0;
-
- /* this avoids a loop in nfnetlink. */
- return ret == -EAGAIN ? -ENOBUFS : ret;
+ ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
+ break;
}
+
return ret;
}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 38848ad68899..c57673d499be 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -287,14 +287,11 @@ static int cttimeout_get_timeout(struct sk_buff *skb,
kfree_skb(skb2);
break;
}
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret > 0)
- ret = 0;
- /* this avoids a loop in nfnetlink. */
- return ret == -EAGAIN ? -ENOBUFS : ret;
+ ret = nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
+ break;
}
+
return ret;
}
@@ -427,9 +424,9 @@ static int cttimeout_default_get(struct sk_buff *skb,
const struct nf_conntrack_l4proto *l4proto;
unsigned int *timeouts = NULL;
struct sk_buff *skb2;
- int ret, err;
__u16 l3num;
__u8 l4num;
+ int ret;
if (!cda[CTA_TIMEOUT_L3PROTO] || !cda[CTA_TIMEOUT_L4PROTO])
return -EINVAL;
@@ -438,9 +435,8 @@ static int cttimeout_default_get(struct sk_buff *skb,
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
l4proto = nf_ct_l4proto_find(l4num);
- err = -EOPNOTSUPP;
if (l4proto->l4proto != l4num)
- goto err;
+ return -EOPNOTSUPP;
switch (l4proto->l4proto) {
case IPPROTO_ICMP:
@@ -480,13 +476,11 @@ static int cttimeout_default_get(struct sk_buff *skb,
}
if (!timeouts)
- goto err;
+ return -EOPNOTSUPP;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb2 == NULL) {
- err = -ENOMEM;
- goto err;
- }
+ if (!skb2)
+ return -ENOMEM;
ret = cttimeout_default_fill_info(info->net, skb2,
NETLINK_CB(skb).portid,
@@ -496,18 +490,10 @@ static int cttimeout_default_get(struct sk_buff *skb,
l3num, l4proto, timeouts);
if (ret <= 0) {
kfree_skb(skb2);
- err = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
- ret = netlink_unicast(info->sk, skb2, NETLINK_CB(skb).portid,
- MSG_DONTWAIT);
- if (ret > 0)
- ret = 0;
- /* this avoids a loop in nfnetlink. */
- return ret == -EAGAIN ? -ENOBUFS : ret;
-err:
- return err;
+ return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c
new file mode 100644
index 000000000000..50b4e3c9347a
--- /dev/null
+++ b/net/netfilter/nfnetlink_hook.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021 Red Hat GmbH
+ *
+ * Author: Florian Westphal <fw@strlen.de>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/slab.h>
+
+#include <linux/netfilter.h>
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_hook.h>
+
+#include <net/netfilter/nf_tables.h>
+#include <net/sock.h>
+
+static const struct nla_policy nfnl_hook_nla_policy[NFNLA_HOOK_MAX + 1] = {
+ [NFNLA_HOOK_HOOKNUM] = { .type = NLA_U32 },
+ [NFNLA_HOOK_PRIORITY] = { .type = NLA_U32 },
+ [NFNLA_HOOK_DEV] = { .type = NLA_STRING,
+ .len = IFNAMSIZ - 1 },
+ [NFNLA_HOOK_FUNCTION_NAME] = { .type = NLA_NUL_STRING,
+ .len = KSYM_NAME_LEN, },
+ [NFNLA_HOOK_MODULE_NAME] = { .type = NLA_NUL_STRING,
+ .len = MODULE_NAME_LEN, },
+ [NFNLA_HOOK_CHAIN_INFO] = { .type = NLA_NESTED, },
+};
+
+static int nf_netlink_dump_start_rcu(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *c)
+{
+ int err;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ rcu_read_unlock();
+ err = netlink_dump_start(nlsk, skb, nlh, c);
+ rcu_read_lock();
+ module_put(THIS_MODULE);
+
+ return err;
+}
+
+struct nfnl_dump_hook_data {
+ char devname[IFNAMSIZ];
+ unsigned long headv;
+ u8 hook;
+};
+
+static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
+ const struct nfnl_dump_hook_data *ctx,
+ unsigned int seq,
+ const struct nf_hook_ops *ops)
+{
+ struct net *net = sock_net(nlskb->sk);
+ struct nlattr *nest, *nest2;
+ struct nft_chain *chain;
+ int ret = 0;
+
+ if (ops->hook_ops_type != NF_HOOK_OP_NF_TABLES)
+ return 0;
+
+ chain = ops->priv;
+ if (WARN_ON_ONCE(!chain))
+ return 0;
+
+ if (!nft_is_active(net, chain))
+ return 0;
+
+ nest = nla_nest_start(nlskb, NFNLA_HOOK_CHAIN_INFO);
+ if (!nest)
+ return -EMSGSIZE;
+
+ ret = nla_put_be32(nlskb, NFNLA_HOOK_INFO_TYPE,
+ htonl(NFNL_HOOK_TYPE_NFTABLES));
+ if (ret)
+ goto cancel_nest;
+
+ nest2 = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC);
+ if (!nest2)
+ goto cancel_nest;
+
+ ret = nla_put_string(nlskb, NFTA_CHAIN_TABLE, chain->table->name);
+ if (ret)
+ goto cancel_nest;
+
+ ret = nla_put_string(nlskb, NFTA_CHAIN_NAME, chain->name);
+ if (ret)
+ goto cancel_nest;
+
+ nla_nest_end(nlskb, nest2);
+ nla_nest_end(nlskb, nest);
+ return ret;
+
+cancel_nest:
+ nla_nest_cancel(nlskb, nest);
+ return -EMSGSIZE;
+}
+
+static int nfnl_hook_dump_one(struct sk_buff *nlskb,
+ const struct nfnl_dump_hook_data *ctx,
+ const struct nf_hook_ops *ops,
+ unsigned int seq)
+{
+ u16 event = nfnl_msg_type(NFNL_SUBSYS_HOOK, NFNL_MSG_HOOK_GET);
+ unsigned int portid = NETLINK_CB(nlskb).portid;
+ struct nlmsghdr *nlh;
+ int ret = -EMSGSIZE;
+#ifdef CONFIG_KALLSYMS
+ char sym[KSYM_SYMBOL_LEN];
+ char *module_name;
+#endif
+ nlh = nfnl_msg_put(nlskb, portid, seq, event,
+ NLM_F_MULTI, ops->pf, NFNETLINK_V0, 0);
+ if (!nlh)
+ goto nla_put_failure;
+
+#ifdef CONFIG_KALLSYMS
+ ret = snprintf(sym, sizeof(sym), "%ps", ops->hook);
+ if (ret >= sizeof(sym)) {
+ ret = -EINVAL;
+ goto nla_put_failure;
+ }
+
+ module_name = strstr(sym, " [");
+ if (module_name) {
+ char *end;
+
+ module_name += 2;
+ end = strchr(module_name, ']');
+ if (end) {
+ *end = 0;
+
+ ret = nla_put_string(nlskb, NFNLA_HOOK_MODULE_NAME, module_name);
+ if (ret)
+ goto nla_put_failure;
+ }
+ }
+
+ ret = nla_put_string(nlskb, NFNLA_HOOK_FUNCTION_NAME, sym);
+ if (ret)
+ goto nla_put_failure;
+#endif
+
+ ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(ops->hooknum));
+ if (ret)
+ goto nla_put_failure;
+
+ ret = nla_put_be32(nlskb, NFNLA_HOOK_PRIORITY, htonl(ops->priority));
+ if (ret)
+ goto nla_put_failure;
+
+ ret = nfnl_hook_put_nft_chain_info(nlskb, ctx, seq, ops);
+ if (ret)
+ goto nla_put_failure;
+
+ nlmsg_end(nlskb, nlh);
+ return 0;
+nla_put_failure:
+ nlmsg_trim(nlskb, nlh);
+ return ret;
+}
+
+static const struct nf_hook_entries *
+nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
+{
+ const struct nf_hook_entries *hook_head = NULL;
+ struct net_device *netdev;
+
+ switch (pf) {
+ case NFPROTO_IPV4:
+ if (hook >= ARRAY_SIZE(net->nf.hooks_ipv4))
+ return ERR_PTR(-EINVAL);
+ hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+ break;
+ case NFPROTO_IPV6:
+ if (hook >= ARRAY_SIZE(net->nf.hooks_ipv6))
+ return ERR_PTR(-EINVAL);
+ hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+ break;
+ case NFPROTO_ARP:
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ if (hook >= ARRAY_SIZE(net->nf.hooks_arp))
+ return ERR_PTR(-EINVAL);
+ hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
+#endif
+ break;
+ case NFPROTO_BRIDGE:
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ if (hook >= ARRAY_SIZE(net->nf.hooks_bridge))
+ return ERR_PTR(-EINVAL);
+ hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
+#endif
+ break;
+#if IS_ENABLED(CONFIG_DECNET)
+ case NFPROTO_DECNET:
+ if (hook >= ARRAY_SIZE(net->nf.hooks_decnet))
+ return ERR_PTR(-EINVAL);
+ hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
+ break;
+#endif
+#ifdef CONFIG_NETFILTER_INGRESS
+ case NFPROTO_NETDEV:
+ if (hook != NF_NETDEV_INGRESS)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ netdev = dev_get_by_name_rcu(net, dev);
+ if (!netdev)
+ return ERR_PTR(-ENODEV);
+
+ return rcu_dereference(netdev->nf_hooks_ingress);
+#endif
+ default:
+ return ERR_PTR(-EPROTONOSUPPORT);
+ }
+
+ return hook_head;
+}
+
+static int nfnl_hook_dump(struct sk_buff *nlskb,
+ struct netlink_callback *cb)
+{
+ struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ struct nfnl_dump_hook_data *ctx = cb->data;
+ int err, family = nfmsg->nfgen_family;
+ struct net *net = sock_net(nlskb->sk);
+ struct nf_hook_ops * const *ops;
+ const struct nf_hook_entries *e;
+ unsigned int i = cb->args[0];
+
+ rcu_read_lock();
+
+ e = nfnl_hook_entries_head(family, ctx->hook, net, ctx->devname);
+ if (!e)
+ goto done;
+
+ if (IS_ERR(e)) {
+ cb->seq++;
+ goto done;
+ }
+
+ if ((unsigned long)e != ctx->headv || i >= e->num_hook_entries)
+ cb->seq++;
+
+ ops = nf_hook_entries_get_hook_ops(e);
+
+ for (; i < e->num_hook_entries; i++) {
+ err = nfnl_hook_dump_one(nlskb, ctx, ops[i], cb->seq);
+ if (err)
+ break;
+ }
+
+done:
+ nl_dump_check_consistent(cb, nlmsg_hdr(nlskb));
+ rcu_read_unlock();
+ cb->args[0] = i;
+ return nlskb->len;
+}
+
+static int nfnl_hook_dump_start(struct netlink_callback *cb)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ const struct nlattr * const *nla = cb->data;
+ struct nfnl_dump_hook_data *ctx = NULL;
+ struct net *net = sock_net(cb->skb->sk);
+ u8 family = nfmsg->nfgen_family;
+ char name[IFNAMSIZ] = "";
+ const void *head;
+ u32 hooknum;
+
+ hooknum = ntohl(nla_get_be32(nla[NFNLA_HOOK_HOOKNUM]));
+ if (hooknum > 255)
+ return -EINVAL;
+
+ if (family == NFPROTO_NETDEV) {
+ if (!nla[NFNLA_HOOK_DEV])
+ return -EINVAL;
+
+ nla_strscpy(name, nla[NFNLA_HOOK_DEV], sizeof(name));
+ }
+
+ rcu_read_lock();
+ /* Not dereferenced; for consistency check only */
+ head = nfnl_hook_entries_head(family, hooknum, net, name);
+ rcu_read_unlock();
+
+ if (head && IS_ERR(head))
+ return PTR_ERR(head);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ strscpy(ctx->devname, name, sizeof(ctx->devname));
+ ctx->headv = (unsigned long)head;
+ ctx->hook = hooknum;
+
+ cb->seq = 1;
+ cb->data = ctx;
+
+ return 0;
+}
+
+static int nfnl_hook_dump_stop(struct netlink_callback *cb)
+{
+ kfree(cb->data);
+ return 0;
+}
+
+static int nfnl_hook_get(struct sk_buff *skb,
+ const struct nfnl_info *info,
+ const struct nlattr * const nla[])
+{
+ if (!nla[NFNLA_HOOK_HOOKNUM])
+ return -EINVAL;
+
+ if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .start = nfnl_hook_dump_start,
+ .done = nfnl_hook_dump_stop,
+ .dump = nfnl_hook_dump,
+ .module = THIS_MODULE,
+ .data = (void *)nla,
+ };
+
+ return nf_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct nfnl_callback nfnl_hook_cb[NFNL_MSG_HOOK_MAX] = {
+ [NFNL_MSG_HOOK_GET] = {
+ .call = nfnl_hook_get,
+ .type = NFNL_CB_RCU,
+ .attr_count = NFNLA_HOOK_MAX,
+ .policy = nfnl_hook_nla_policy
+ },
+};
+
+static const struct nfnetlink_subsystem nfhook_subsys = {
+ .name = "nfhook",
+ .subsys_id = NFNL_SUBSYS_HOOK,
+ .cb_count = NFNL_MSG_HOOK_MAX,
+ .cb = nfnl_hook_cb,
+};
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_HOOK);
+
+static int __init nfnetlink_hook_init(void)
+{
+ return nfnetlink_subsys_register(&nfhook_subsys);
+}
+
+static void __exit nfnetlink_hook_exit(void)
+{
+ nfnetlink_subsys_unregister(&nfhook_subsys);
+}
+
+module_init(nfnetlink_hook_init);
+module_exit(nfnetlink_hook_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("nfnetlink_hook: list registered netfilter hooks");
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 587086b18c36..691ef4cffdd9 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -871,15 +871,14 @@ static int nfulnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nfula[])
{
struct nfnl_log_net *log = nfnl_log_pernet(info->net);
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
- u_int16_t group_num = ntohs(nfmsg->res_id);
+ u_int16_t group_num = ntohs(info->nfmsg->res_id);
struct nfulnl_msg_config_cmd *cmd = NULL;
struct nfulnl_instance *inst;
u16 flags = 0;
int ret = 0;
if (nfula[NFULA_CFG_CMD]) {
- u_int8_t pf = nfmsg->nfgen_family;
+ u_int8_t pf = info->nfmsg->nfgen_family;
cmd = nla_data(nfula[NFULA_CFG_CMD]);
/* Commands without queue context */
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index f37a575ebd7f..f774de0fc24f 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1051,8 +1051,7 @@ static int nfqnl_recv_verdict_batch(struct sk_buff *skb,
const struct nlattr * const nfqa[])
{
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
- u16 queue_num = ntohs(nfmsg->res_id);
+ u16 queue_num = ntohs(info->nfmsg->res_id);
struct nf_queue_entry *entry, *tmp;
struct nfqnl_msg_verdict_hdr *vhdr;
struct nfqnl_instance *queue;
@@ -1160,8 +1159,7 @@ static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nfqa[])
{
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
- u_int16_t queue_num = ntohs(nfmsg->res_id);
+ u_int16_t queue_num = ntohs(info->nfmsg->res_id);
struct nfqnl_msg_verdict_hdr *vhdr;
enum ip_conntrack_info ctinfo;
struct nfqnl_instance *queue;
@@ -1243,8 +1241,7 @@ static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nfqa[])
{
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
- struct nfgenmsg *nfmsg = nlmsg_data(info->nlh);
- u_int16_t queue_num = ntohs(nfmsg->res_id);
+ u_int16_t queue_num = ntohs(info->nfmsg->res_id);
struct nfqnl_msg_config_cmd *cmd = NULL;
struct nfqnl_instance *queue;
__u32 flags = 0, mask = 0;
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 363bdd7044ec..5b02408a920b 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -18,7 +18,7 @@ static unsigned int nft_do_chain_ipv4(void *priv,
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
- nft_set_pktinfo_ipv4(&pkt, skb);
+ nft_set_pktinfo_ipv4(&pkt);
return nft_do_chain(&pkt, priv);
}
@@ -62,7 +62,7 @@ static unsigned int nft_do_chain_arp(void *priv, struct sk_buff *skb,
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
- nft_set_pktinfo_unspec(&pkt, skb);
+ nft_set_pktinfo_unspec(&pkt);
return nft_do_chain(&pkt, priv);
}
@@ -102,7 +102,7 @@ static unsigned int nft_do_chain_ipv6(void *priv,
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, skb, state);
- nft_set_pktinfo_ipv6(&pkt, skb);
+ nft_set_pktinfo_ipv6(&pkt);
return nft_do_chain(&pkt, priv);
}
@@ -149,10 +149,10 @@ static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb,
switch (state->pf) {
case NFPROTO_IPV4:
- nft_set_pktinfo_ipv4(&pkt, skb);
+ nft_set_pktinfo_ipv4(&pkt);
break;
case NFPROTO_IPV6:
- nft_set_pktinfo_ipv6(&pkt, skb);
+ nft_set_pktinfo_ipv6(&pkt);
break;
default:
break;
@@ -174,7 +174,7 @@ static unsigned int nft_do_chain_inet_ingress(void *priv, struct sk_buff *skb,
ingress_state.hook = NF_INET_INGRESS;
nft_set_pktinfo(&pkt, skb, &ingress_state);
- if (nft_set_pktinfo_ipv4_ingress(&pkt, skb) < 0)
+ if (nft_set_pktinfo_ipv4_ingress(&pkt) < 0)
return NF_DROP;
break;
case htons(ETH_P_IPV6):
@@ -182,7 +182,7 @@ static unsigned int nft_do_chain_inet_ingress(void *priv, struct sk_buff *skb,
ingress_state.hook = NF_INET_INGRESS;
nft_set_pktinfo(&pkt, skb, &ingress_state);
- if (nft_set_pktinfo_ipv6_ingress(&pkt, skb) < 0)
+ if (nft_set_pktinfo_ipv6_ingress(&pkt) < 0)
return NF_DROP;
break;
default:
@@ -238,13 +238,13 @@ nft_do_chain_bridge(void *priv,
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
- nft_set_pktinfo_ipv4_validate(&pkt, skb);
+ nft_set_pktinfo_ipv4_validate(&pkt);
break;
case htons(ETH_P_IPV6):
- nft_set_pktinfo_ipv6_validate(&pkt, skb);
+ nft_set_pktinfo_ipv6_validate(&pkt);
break;
default:
- nft_set_pktinfo_unspec(&pkt, skb);
+ nft_set_pktinfo_unspec(&pkt);
break;
}
@@ -293,13 +293,13 @@ static unsigned int nft_do_chain_netdev(void *priv, struct sk_buff *skb,
switch (skb->protocol) {
case htons(ETH_P_IP):
- nft_set_pktinfo_ipv4_validate(&pkt, skb);
+ nft_set_pktinfo_ipv4_validate(&pkt);
break;
case htons(ETH_P_IPV6):
- nft_set_pktinfo_ipv6_validate(&pkt, skb);
+ nft_set_pktinfo_ipv6_validate(&pkt);
break;
default:
- nft_set_pktinfo_unspec(&pkt, skb);
+ nft_set_pktinfo_unspec(&pkt);
break;
}
diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
index eac4a901233f..98e4946100c5 100644
--- a/net/netfilter/nft_chain_nat.c
+++ b/net/netfilter/nft_chain_nat.c
@@ -17,12 +17,12 @@ static unsigned int nft_nat_do_chain(void *priv, struct sk_buff *skb,
switch (state->pf) {
#ifdef CONFIG_NF_TABLES_IPV4
case NFPROTO_IPV4:
- nft_set_pktinfo_ipv4(&pkt, skb);
+ nft_set_pktinfo_ipv4(&pkt);
break;
#endif
#ifdef CONFIG_NF_TABLES_IPV6
case NFPROTO_IPV6:
- nft_set_pktinfo_ipv6(&pkt, skb);
+ nft_set_pktinfo_ipv6(&pkt);
break;
#endif
default:
diff --git a/net/netfilter/nft_chain_route.c b/net/netfilter/nft_chain_route.c
index edd02cda57fc..925db0dce48d 100644
--- a/net/netfilter/nft_chain_route.c
+++ b/net/netfilter/nft_chain_route.c
@@ -26,7 +26,7 @@ static unsigned int nf_route_table_hook4(void *priv,
u8 tos;
nft_set_pktinfo(&pkt, skb, state);
- nft_set_pktinfo_ipv4(&pkt, skb);
+ nft_set_pktinfo_ipv4(&pkt);
mark = skb->mark;
iph = ip_hdr(skb);
@@ -74,7 +74,7 @@ static unsigned int nf_route_table_hook6(void *priv,
int err;
nft_set_pktinfo(&pkt, skb, state);
- nft_set_pktinfo_ipv6(&pkt, skb);
+ nft_set_pktinfo_ipv6(&pkt);
/* save source/dest address, mark, hoplimit, flowlabel, priority */
memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 5415ab14400d..639c337c885b 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -57,8 +57,13 @@ union nft_entry {
};
static inline void
-nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info)
+nft_compat_set_par(struct xt_action_param *par,
+ const struct nft_pktinfo *pkt,
+ const void *xt, const void *xt_info)
{
+ par->state = pkt->state;
+ par->thoff = nft_thoff(pkt);
+ par->fragoff = pkt->fragoff;
par->target = xt;
par->targinfo = xt_info;
par->hotdrop = false;
@@ -71,13 +76,14 @@ static void nft_target_eval_xt(const struct nft_expr *expr,
void *info = nft_expr_priv(expr);
struct xt_target *target = expr->ops->data;
struct sk_buff *skb = pkt->skb;
+ struct xt_action_param xt;
int ret;
- nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
+ nft_compat_set_par(&xt, pkt, target, info);
- ret = target->target(skb, &pkt->xt);
+ ret = target->target(skb, &xt);
- if (pkt->xt.hotdrop)
+ if (xt.hotdrop)
ret = NF_DROP;
switch (ret) {
@@ -97,13 +103,14 @@ static void nft_target_eval_bridge(const struct nft_expr *expr,
void *info = nft_expr_priv(expr);
struct xt_target *target = expr->ops->data;
struct sk_buff *skb = pkt->skb;
+ struct xt_action_param xt;
int ret;
- nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
+ nft_compat_set_par(&xt, pkt, target, info);
- ret = target->target(skb, &pkt->xt);
+ ret = target->target(skb, &xt);
- if (pkt->xt.hotdrop)
+ if (xt.hotdrop)
ret = NF_DROP;
switch (ret) {
@@ -350,13 +357,14 @@ static void __nft_match_eval(const struct nft_expr *expr,
{
struct xt_match *match = expr->ops->data;
struct sk_buff *skb = pkt->skb;
+ struct xt_action_param xt;
bool ret;
- nft_compat_set_par((struct xt_action_param *)&pkt->xt, match, info);
+ nft_compat_set_par(&xt, pkt, match, info);
- ret = match->match(skb, (struct xt_action_param *)&pkt->xt);
+ ret = match->match(skb, &xt);
- if (pkt->xt.hotdrop) {
+ if (xt.hotdrop) {
regs->verdict.code = NF_DROP;
return;
}
@@ -617,7 +625,7 @@ static int nfnl_compat_get_rcu(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const tb[])
{
- struct nfgenmsg *nfmsg;
+ u8 family = info->nfmsg->nfgen_family;
const char *name, *fmt;
struct sk_buff *skb2;
int ret = 0, target;
@@ -632,9 +640,7 @@ static int nfnl_compat_get_rcu(struct sk_buff *skb,
rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV]));
target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE]));
- nfmsg = nlmsg_data(info->nlh);
-
- switch(nfmsg->nfgen_family) {
+ switch(family) {
case AF_INET:
fmt = "ipt_%s";
break;
@@ -648,8 +654,7 @@ static int nfnl_compat_get_rcu(struct sk_buff *skb,
fmt = "arpt_%s";
break;
default:
- pr_err("nft_compat: unsupported protocol %d\n",
- nfmsg->nfgen_family);
+ pr_err("nft_compat: unsupported protocol %d\n", family);
return -EINVAL;
}
@@ -657,9 +662,8 @@ static int nfnl_compat_get_rcu(struct sk_buff *skb,
return -EINVAL;
rcu_read_unlock();
- try_then_request_module(xt_find_revision(nfmsg->nfgen_family, name,
- rev, target, &ret),
- fmt, name);
+ try_then_request_module(xt_find_revision(family, name, rev, target, &ret),
+ fmt, name);
if (ret < 0)
goto out_put;
@@ -674,8 +678,7 @@ static int nfnl_compat_get_rcu(struct sk_buff *skb,
info->nlh->nlmsg_seq,
NFNL_MSG_TYPE(info->nlh->nlmsg_type),
NFNL_MSG_COMPAT_GET,
- nfmsg->nfgen_family,
- name, ret, target) <= 0) {
+ family, name, ret, target) <= 0) {
kfree_skb(skb2);
goto out_put;
}
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 0592a9456084..337e22d8b40b 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -1217,7 +1217,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
struct nf_conn *ct;
ct = nf_ct_get(pkt->skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_UNTRACKED) {
+ if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
regs->verdict.code = NFT_BREAK;
return;
}
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index f64f0017e9a5..af4ee874a067 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -10,8 +10,10 @@
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
+#include <linux/sctp.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
+#include <net/sctp/sctp.h>
#include <net/tcp.h>
struct nft_exthdr {
@@ -42,6 +44,9 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
unsigned int offset = 0;
int err;
+ if (pkt->skb->protocol != htons(ETH_P_IPV6))
+ goto err;
+
err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
nft_reg_store8(dest, err >= 0);
@@ -162,10 +167,10 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
{
struct tcphdr *tcph;
- if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP)
+ if (pkt->tprot != IPPROTO_TCP)
return NULL;
- tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buffer);
+ tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
if (!tcph)
return NULL;
@@ -173,7 +178,7 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len)
return NULL;
- return skb_header_pointer(pkt->skb, pkt->xt.thoff, *tcphdr_len, buffer);
+ return skb_header_pointer(pkt->skb, nft_thoff(pkt), *tcphdr_len, buffer);
}
static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
@@ -249,7 +254,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
return;
if (skb_ensure_writable(pkt->skb,
- pkt->xt.thoff + i + priv->len))
+ nft_thoff(pkt) + i + priv->len))
return;
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
@@ -300,6 +305,48 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
}
}
+static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ unsigned int offset = nft_thoff(pkt) + sizeof(struct sctphdr);
+ struct nft_exthdr *priv = nft_expr_priv(expr);
+ u32 *dest = &regs->data[priv->dreg];
+ const struct sctp_chunkhdr *sch;
+ struct sctp_chunkhdr _sch;
+
+ if (pkt->tprot != IPPROTO_SCTP)
+ goto err;
+
+ do {
+ sch = skb_header_pointer(pkt->skb, offset, sizeof(_sch), &_sch);
+ if (!sch || !sch->length)
+ break;
+
+ if (sch->type == priv->type) {
+ if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+ nft_reg_store8(dest, true);
+ return;
+ }
+ if (priv->offset + priv->len > ntohs(sch->length) ||
+ offset + ntohs(sch->length) > pkt->skb->len)
+ break;
+
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (skb_copy_bits(pkt->skb, offset + priv->offset,
+ dest, priv->len) < 0)
+ break;
+ return;
+ }
+ offset += SCTP_PAD4(ntohs(sch->length));
+ } while (offset < pkt->skb->len);
+err:
+ if (priv->flags & NFT_EXTHDR_F_PRESENT)
+ nft_reg_store8(dest, false);
+ else
+ regs->verdict.code = NFT_BREAK;
+}
+
static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
[NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
[NFTA_EXTHDR_TYPE] = { .type = NLA_U8 },
@@ -499,6 +546,14 @@ static const struct nft_expr_ops nft_exthdr_tcp_set_ops = {
.dump = nft_exthdr_dump_set,
};
+static const struct nft_expr_ops nft_exthdr_sctp_ops = {
+ .type = &nft_exthdr_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
+ .eval = nft_exthdr_sctp_eval,
+ .init = nft_exthdr_init,
+ .dump = nft_exthdr_dump,
+};
+
static const struct nft_expr_ops *
nft_exthdr_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
@@ -529,6 +584,10 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx,
return &nft_exthdr_ipv4_ops;
}
break;
+ case NFT_EXTHDR_OP_SCTP:
+ if (tb[NFTA_EXTHDR_DREG])
+ return &nft_exthdr_sctp_ops;
+ break;
}
return ERR_PTR(-EOPNOTSUPP);
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 4843dd2b410c..0af34ad41479 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -291,7 +291,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
case IPPROTO_TCP:
- tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
+ tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
sizeof(_tcph), &_tcph);
if (unlikely(!tcph || tcph->fin || tcph->rst))
goto out;
diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
new file mode 100644
index 000000000000..913ac45167f2
--- /dev/null
+++ b/net/netfilter/nft_last.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_last_priv {
+ unsigned long last_jiffies;
+ unsigned int last_set;
+};
+
+static const struct nla_policy nft_last_policy[NFTA_LAST_MAX + 1] = {
+ [NFTA_LAST_SET] = { .type = NLA_U32 },
+ [NFTA_LAST_MSECS] = { .type = NLA_U64 },
+};
+
+static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_last_priv *priv = nft_expr_priv(expr);
+ u64 last_jiffies;
+ int err;
+
+ if (tb[NFTA_LAST_MSECS]) {
+ err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
+ if (err < 0)
+ return err;
+
+ priv->last_jiffies = jiffies + (unsigned long)last_jiffies;
+ priv->last_set = 1;
+ }
+
+ return 0;
+}
+
+static void nft_last_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt)
+{
+ struct nft_last_priv *priv = nft_expr_priv(expr);
+
+ priv->last_jiffies = jiffies;
+ priv->last_set = 1;
+}
+
+static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_last_priv *priv = nft_expr_priv(expr);
+ __be64 msecs;
+
+ if (time_before(jiffies, priv->last_jiffies))
+ priv->last_set = 0;
+
+ if (priv->last_set)
+ msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies);
+ else
+ msecs = 0;
+
+ if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) ||
+ nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static const struct nft_expr_ops nft_last_ops = {
+ .type = &nft_last_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_last_priv)),
+ .eval = nft_last_eval,
+ .init = nft_last_init,
+ .dump = nft_last_dump,
+};
+
+struct nft_expr_type nft_last_type __read_mostly = {
+ .name = "last",
+ .ops = &nft_last_ops,
+ .policy = nft_last_policy,
+ .maxattr = NFTA_LAST_MAX,
+ .flags = NFT_EXPR_STATEFUL,
+ .owner = THIS_MODULE,
+};
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index a479f8a1270c..90becbf5bff3 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -23,6 +23,37 @@ struct nft_lookup {
struct nft_set_binding binding;
};
+#ifdef CONFIG_RETPOLINE
+bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+{
+ if (set->ops == &nft_set_hash_fast_type.ops)
+ return nft_hash_lookup_fast(net, set, key, ext);
+ if (set->ops == &nft_set_hash_type.ops)
+ return nft_hash_lookup(net, set, key, ext);
+
+ if (set->ops == &nft_set_rhash_type.ops)
+ return nft_rhash_lookup(net, set, key, ext);
+
+ if (set->ops == &nft_set_bitmap_type.ops)
+ return nft_bitmap_lookup(net, set, key, ext);
+
+ if (set->ops == &nft_set_pipapo_type.ops)
+ return nft_pipapo_lookup(net, set, key, ext);
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
+ if (set->ops == &nft_set_pipapo_avx2_type.ops)
+ return nft_pipapo_avx2_lookup(net, set, key, ext);
+#endif
+
+ if (set->ops == &nft_set_rbtree_type.ops)
+ return nft_rbtree_lookup(net, set, key, ext);
+
+ WARN_ON_ONCE(1);
+ return set->ops->lookup(net, set, key, ext);
+}
+EXPORT_SYMBOL_GPL(nft_set_do_lookup);
+#endif
+
void nft_lookup_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -33,8 +64,8 @@ void nft_lookup_eval(const struct nft_expr *expr,
const struct net *net = nft_net(pkt);
bool found;
- found = set->ops->lookup(net, set, &regs->data[priv->sreg], &ext) ^
- priv->invert;
+ found = nft_set_do_lookup(net, set, &regs->data[priv->sreg], &ext) ^
+ priv->invert;
if (!found) {
ext = nft_set_catchall_lookup(net, set);
if (!ext) {
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 7e47edee88ee..94b2327e71dc 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -9,7 +9,7 @@
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
#define nft_objref_priv(expr) *((struct nft_object **)nft_expr_priv(expr))
@@ -110,7 +110,7 @@ static void nft_objref_map_eval(const struct nft_expr *expr,
struct nft_object *obj;
bool found;
- found = set->ops->lookup(net, set, &regs->data[priv->sreg], &ext);
+ found = nft_set_do_lookup(net, set, &regs->data[priv->sreg], &ext);
if (!found) {
ext = nft_set_catchall_lookup(net, set);
if (!ext) {
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index ac61f708b82d..d82677e83400 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -28,6 +28,11 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
struct nf_osf_data data;
struct tcphdr _tcph;
+ if (pkt->tprot != IPPROTO_TCP) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
tcp = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(struct tcphdr), &_tcph);
if (!tcp) {
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 501c5b24cc39..a44b14f6c0dc 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -110,7 +110,7 @@ void nft_payload_eval(const struct nft_expr *expr,
case NFT_PAYLOAD_TRANSPORT_HEADER:
if (!pkt->tprot_set)
goto err;
- offset = pkt->xt.thoff;
+ offset = nft_thoff(pkt);
break;
default:
BUG();
@@ -507,7 +507,7 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
*l4csum_offset = offsetof(struct tcphdr, check);
break;
case IPPROTO_UDP:
- if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
+ if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
return -1;
fallthrough;
case IPPROTO_UDPLITE:
@@ -520,7 +520,7 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
return -1;
}
- *l4csum_offset += pkt->xt.thoff;
+ *l4csum_offset += nft_thoff(pkt);
return 0;
}
@@ -612,7 +612,7 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
case NFT_PAYLOAD_TRANSPORT_HEADER:
if (!pkt->tprot_set)
goto err;
- offset = pkt->xt.thoff;
+ offset = nft_thoff(pkt);
break;
default:
BUG();
@@ -643,7 +643,7 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
pkt->tprot == IPPROTO_SCTP &&
skb->ip_summed != CHECKSUM_PARTIAL) {
- if (nft_payload_csum_sctp(skb, pkt->xt.thoff))
+ if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
goto err;
}
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 95090186ee90..554caf967baa 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -28,7 +28,7 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
- nf_send_reset(nft_net(pkt), pkt->xt.state->sk,
+ nf_send_reset(nft_net(pkt), nft_sk(pkt),
pkt->skb, nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
@@ -45,7 +45,7 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
priv->icmp_code, nft_hook(pkt));
break;
case NFT_REJECT_TCP_RST:
- nf_send_reset6(nft_net(pkt), pkt->xt.state->sk,
+ nf_send_reset6(nft_net(pkt), nft_sk(pkt),
pkt->skb, nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 2a81ea421819..e7ae5914971e 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -73,8 +73,9 @@ nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask)
return (bitmap[idx] & (0x3 << off)) & (genmask << off);
}
-static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+INDIRECT_CALLABLE_SCOPE
+bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
{
const struct nft_bitmap *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 7b3d0a78c569..df40314de21f 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -74,8 +74,9 @@ static const struct rhashtable_params nft_rhash_params = {
.automatic_shrinking = true,
};
-static bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+INDIRECT_CALLABLE_SCOPE
+bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
{
struct nft_rhash *priv = nft_set_priv(set);
const struct nft_rhash_elem *he;
@@ -446,8 +447,9 @@ struct nft_hash_elem {
struct nft_set_ext ext;
};
-static bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+INDIRECT_CALLABLE_SCOPE
+bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
{
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
@@ -484,9 +486,10 @@ static void *nft_hash_get(const struct net *net, const struct nft_set *set,
return ERR_PTR(-ENOENT);
}
-static bool nft_hash_lookup_fast(const struct net *net,
- const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+INDIRECT_CALLABLE_SCOPE
+bool nft_hash_lookup_fast(const struct net *net,
+ const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
{
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 528a2d7ca991..dce866d93fee 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -408,8 +408,8 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
*
* Return: true on match, false otherwise.
*/
-static bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
{
struct nft_pipapo *priv = nft_set_priv(set);
unsigned long *res_map, *fill_map;
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index d65ae0e23028..e517663e0cd1 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -142,7 +142,6 @@ static void nft_pipapo_avx2_fill(unsigned long *data, int start, int len)
* @map: Bitmap to be scanned for set bits
* @dst: Destination bitmap
* @mt: Mapping table containing bit set specifiers
- * @len: Length of bitmap in longs
* @last: Return index of first set bit, if this is the last field
*
* This is an alternative implementation of pipapo_refill() suitable for usage
@@ -1109,7 +1108,7 @@ bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
* nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation
* @net: Network namespace
* @set: nftables API set representation
- * @elem: nftables API element representation containing key data
+ * @key: nftables API element representation containing key data
* @ext: nftables API extension pointer, filled with matching reference
*
* For more details, see DOC: Theory of Operation in nft_set_pipapo.c.
@@ -1131,10 +1130,18 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
bool map_index;
int i, ret = 0;
+ if (unlikely(!irq_fpu_usable()))
+ return nft_pipapo_lookup(net, set, key, ext);
+
m = rcu_dereference(priv->match);
- /* This also protects access to all data related to scratch maps */
- kernel_fpu_begin();
+ /* This also protects access to all data related to scratch maps.
+ *
+ * Note that we don't need a valid MXCSR state for any of the
+ * operations we use here, so pass 0 as mask and spare a LDMXCSR
+ * instruction.
+ */
+ kernel_fpu_begin_mask(0);
scratch = *raw_cpu_ptr(m->scratch_aligned);
if (unlikely(!scratch)) {
diff --git a/net/netfilter/nft_set_pipapo_avx2.h b/net/netfilter/nft_set_pipapo_avx2.h
index 394bcb704db7..dbb6aaca8a7a 100644
--- a/net/netfilter/nft_set_pipapo_avx2.h
+++ b/net/netfilter/nft_set_pipapo_avx2.h
@@ -5,8 +5,6 @@
#include <asm/fpu/xstate.h>
#define NFT_PIPAPO_ALIGN (XSAVE_YMM_SIZE / BITS_PER_BYTE)
-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est);
#endif /* defined(CONFIG_X86_64) && !defined(CONFIG_UML) */
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 9e36eb4a7429..d600a566da32 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -107,8 +107,9 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
return false;
}
-static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+INDIRECT_CALLABLE_SCOPE
+bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
{
struct nft_rbtree *priv = nft_set_priv(set);
unsigned int seq = read_seqcount_begin(&priv->count);
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index 4fda8b3f1762..a0109fa1e92d 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -109,7 +109,7 @@ static void nft_synproxy_do_eval(const struct nft_synproxy *priv,
{
struct synproxy_options opts = {};
struct sk_buff *skb = pkt->skb;
- int thoff = pkt->xt.thoff;
+ int thoff = nft_thoff(pkt);
const struct tcphdr *tcp;
struct tcphdr _tcph;
@@ -123,7 +123,7 @@ static void nft_synproxy_do_eval(const struct nft_synproxy *priv,
return;
}
- tcp = skb_header_pointer(skb, pkt->xt.thoff,
+ tcp = skb_header_pointer(skb, thoff,
sizeof(struct tcphdr),
&_tcph);
if (!tcp) {
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index accef672088c..b5b09a902c7a 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -30,6 +30,12 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
__be16 tport = 0;
struct sock *sk;
+ if (pkt->tprot != IPPROTO_TCP &&
+ pkt->tprot != IPPROTO_UDP) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
if (!hp) {
regs->verdict.code = NFT_BREAK;
@@ -82,16 +88,17 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
const struct nft_tproxy *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
const struct ipv6hdr *iph = ipv6_hdr(skb);
- struct in6_addr taddr;
- int thoff = pkt->xt.thoff;
+ int thoff = nft_thoff(pkt);
struct udphdr _hdr, *hp;
+ struct in6_addr taddr;
__be16 tport = 0;
struct sock *sk;
int l4proto;
memset(&taddr, 0, sizeof(taddr));
- if (!pkt->tprot_set) {
+ if (pkt->tprot != IPPROTO_TCP &&
+ pkt->tprot != IPPROTO_UDP) {
regs->verdict.code = NFT_BREAK;
return;
}
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index 9cdc16b0d0d8..b6a015aee0ce 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -117,7 +117,7 @@ static int audit_tg_check(const struct xt_tgchk_param *par)
const struct xt_audit_info *info = par->targinfo;
if (info->type > XT_AUDIT_TYPE_MAX) {
- pr_info_ratelimited("Audit type out of range (valid range: 0..%hhu)\n",
+ pr_info_ratelimited("Audit type out of range (valid range: 0..%u)\n",
XT_AUDIT_TYPE_MAX);
return -ERANGE;
}
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index d4deee39158b..12404d221026 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -172,7 +172,6 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
goto err2;
}
- ret = 0;
if ((info->ct_events || info->exp_events) &&
!nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
GFP_KERNEL)) {
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 24d4afb9988d..8b4fd27857f2 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -8,16 +8,14 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_limit.h>
struct xt_limit_priv {
- spinlock_t lock;
unsigned long prev;
- uint32_t credit;
+ u32 credit;
};
MODULE_LICENSE("GPL");
@@ -66,22 +64,31 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rateinfo *r = par->matchinfo;
struct xt_limit_priv *priv = r->master;
- unsigned long now = jiffies;
-
- spin_lock_bh(&priv->lock);
- priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY;
- if (priv->credit > r->credit_cap)
- priv->credit = r->credit_cap;
-
- if (priv->credit >= r->cost) {
- /* We're not limited. */
- priv->credit -= r->cost;
- spin_unlock_bh(&priv->lock);
- return true;
- }
-
- spin_unlock_bh(&priv->lock);
- return false;
+ unsigned long now;
+ u32 old_credit, new_credit, credit_increase = 0;
+ bool ret;
+
+ /* fastpath if there is nothing to update */
+ if ((READ_ONCE(priv->credit) < r->cost) && (READ_ONCE(priv->prev) == jiffies))
+ return false;
+
+ do {
+ now = jiffies;
+ credit_increase += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY;
+ old_credit = READ_ONCE(priv->credit);
+ new_credit = old_credit;
+ new_credit += credit_increase;
+ if (new_credit > r->credit_cap)
+ new_credit = r->credit_cap;
+ if (new_credit >= r->cost) {
+ ret = true;
+ new_credit -= r->cost;
+ } else {
+ ret = false;
+ }
+ } while (cmpxchg(&priv->credit, old_credit, new_credit) != old_credit);
+
+ return ret;
}
/* Precision saver. */
@@ -122,7 +129,6 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
r->credit_cap = priv->credit; /* Credits full. */
r->cost = user2credits(r->avg);
}
- spin_lock_init(&priv->lock);
return 0;
}
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
index f28c8947c730..91a19c3ea1a3 100644
--- a/net/netlabel/netlabel_calipso.c
+++ b/net/netlabel/netlabel_calipso.c
@@ -105,7 +105,7 @@ static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info)
!info->attrs[NLBL_CALIPSO_A_MTYPE])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
switch (nla_get_u32(info->attrs[NLBL_CALIPSO_A_MTYPE])) {
case CALIPSO_MAP_PASS:
ret_val = netlbl_calipso_add_pass(info, &audit_info);
@@ -287,7 +287,7 @@ static int netlbl_calipso_remove(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NLBL_CALIPSO_A_DOI])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
cb_arg.doi = nla_get_u32(info->attrs[NLBL_CALIPSO_A_DOI]);
cb_arg.audit_info = &audit_info;
ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain,
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 4f50a64315cf..baf235721c43 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -410,7 +410,7 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
!info->attrs[NLBL_CIPSOV4_A_MTYPE])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) {
case CIPSO_V4_MAP_TRANS:
ret_val = netlbl_cipsov4_add_std(info, &audit_info);
@@ -709,7 +709,7 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NLBL_CIPSOV4_A_DOI])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
cb_arg.doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
cb_arg.audit_info = &audit_info;
ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain,
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index dc8c39f51f7d..8158a25972b4 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -929,7 +929,7 @@ struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
* @cb_arg: argument for the callback function
*
* Description:
- * Interate over the domain mapping hash table, skipping the first @skip_bkt
+ * Iterate over the domain mapping hash table, skipping the first @skip_bkt
* buckets and @skip_chain entries. For each entry in the table call
* @callback, if @callback returns a negative value stop 'walking' through the
* table and return. Updates the values in @skip_bkt and @skip_chain on
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 5e1239cef000..beb0e573266d 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -719,7 +719,7 @@ int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset)
* it in @bitmap. The @offset must be aligned to an unsigned long and will be
* updated on return if different from what was requested; if the catmap is
* empty at the requested offset and beyond, the @offset is set to (u32)-1.
- * Returns zero on sucess, negative values on failure.
+ * Returns zero on success, negative values on failure.
*
*/
int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index ca52f5085989..032b7d7b32c7 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -76,6 +76,7 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = {
static int netlbl_mgmt_add_common(struct genl_info *info,
struct netlbl_audit *audit_info)
{
+ void *pmap = NULL;
int ret_val = -EINVAL;
struct netlbl_domaddr_map *addrmap = NULL;
struct cipso_v4_doi *cipsov4 = NULL;
@@ -175,6 +176,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = -ENOMEM;
goto add_free_addrmap;
}
+ pmap = map;
map->list.addr = addr->s_addr & mask->s_addr;
map->list.mask = mask->s_addr;
map->list.valid = 1;
@@ -183,10 +185,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map->def.cipso = cipsov4;
ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
- if (ret_val != 0) {
- kfree(map);
- goto add_free_addrmap;
- }
+ if (ret_val != 0)
+ goto add_free_map;
entry->family = AF_INET;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
@@ -223,6 +223,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = -ENOMEM;
goto add_free_addrmap;
}
+ pmap = map;
map->list.addr = *addr;
map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
@@ -235,10 +236,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map->def.calipso = calipso;
ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
- if (ret_val != 0) {
- kfree(map);
- goto add_free_addrmap;
- }
+ if (ret_val != 0)
+ goto add_free_map;
entry->family = AF_INET6;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
@@ -248,10 +247,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = netlbl_domhsh_add(entry, audit_info);
if (ret_val != 0)
- goto add_free_addrmap;
+ goto add_free_map;
return 0;
+add_free_map:
+ kfree(pmap);
add_free_addrmap:
kfree(addrmap);
add_doi_put_def:
@@ -434,7 +435,7 @@ static int netlbl_mgmt_add(struct sk_buff *skb, struct genl_info *info)
(info->attrs[NLBL_MGMT_A_IPV6MASK] != NULL)))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
return netlbl_mgmt_add_common(info, &audit_info);
}
@@ -457,7 +458,7 @@ static int netlbl_mgmt_remove(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NLBL_MGMT_A_DOMAIN])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]);
return netlbl_domhsh_remove(domain, AF_UNSPEC, &audit_info);
@@ -557,7 +558,7 @@ static int netlbl_mgmt_adddef(struct sk_buff *skb, struct genl_info *info)
(info->attrs[NLBL_MGMT_A_IPV6MASK] != NULL)))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
return netlbl_mgmt_add_common(info, &audit_info);
}
@@ -576,7 +577,7 @@ static int netlbl_mgmt_removedef(struct sk_buff *skb, struct genl_info *info)
{
struct netlbl_audit audit_info;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
return netlbl_domhsh_remove_default(AF_UNSPEC, &audit_info);
}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 3e6ac9b790b1..2483df0bbd7c 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -814,7 +814,7 @@ static int netlbl_unlabel_accept(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NLBL_UNLABEL_A_ACPTFLG]) {
value = nla_get_u8(info->attrs[NLBL_UNLABEL_A_ACPTFLG]);
if (value == 1 || value == 0) {
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
netlbl_unlabel_acceptflg_set(value, &audit_info);
return 0;
}
@@ -897,7 +897,7 @@ static int netlbl_unlabel_staticadd(struct sk_buff *skb,
!info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
if (ret_val != 0)
@@ -947,7 +947,7 @@ static int netlbl_unlabel_staticadddef(struct sk_buff *skb,
!info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
if (ret_val != 0)
@@ -994,7 +994,7 @@ static int netlbl_unlabel_staticremove(struct sk_buff *skb,
!info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
if (ret_val != 0)
@@ -1034,7 +1034,7 @@ static int netlbl_unlabel_staticremovedef(struct sk_buff *skb,
!info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
if (ret_val != 0)
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index b9ba8112b3c5..6190cbf94bf0 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -28,11 +28,9 @@
/**
* netlbl_netlink_auditinfo - Fetch the audit information from a NETLINK msg
- * @skb: the packet
* @audit_info: NetLabel audit information
*/
-static inline void netlbl_netlink_auditinfo(struct sk_buff *skb,
- struct netlbl_audit *audit_info)
+static inline void netlbl_netlink_auditinfo(struct netlbl_audit *audit_info)
{
security_task_getsecid_subj(current, &audit_info->secid);
audit_info->loginuid = audit_get_loginuid(current);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 3a62f97acf39..d233ac4a91b6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -351,7 +351,7 @@ static void netlink_overrun(struct sock *sk)
if (!test_and_set_bit(NETLINK_S_CONGESTED,
&nlk_sk(sk)->state)) {
sk->sk_err = ENOBUFS;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
}
atomic_inc(&sk->sk_drops);
@@ -461,11 +461,13 @@ void netlink_table_ungrab(void)
static inline void
netlink_lock_table(void)
{
+ unsigned long flags;
+
/* read_lock() synchronizes us to netlink_table_grab */
- read_lock(&nl_table_lock);
+ read_lock_irqsave(&nl_table_lock, flags);
atomic_inc(&nl_table_users);
- read_unlock(&nl_table_lock);
+ read_unlock_irqrestore(&nl_table_lock, flags);
}
static inline void
@@ -1574,7 +1576,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
}
sk->sk_err = p->code;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
out:
return ret;
}
@@ -2010,7 +2012,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ret = netlink_dump(sk);
if (ret) {
sk->sk_err = -ret;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
}
@@ -2437,7 +2439,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
if (!skb) {
NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
- NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk);
+ sk_error_report(NETLINK_CB(in_skb).sk);
return;
}
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 78da5eab252a..de0456073dc0 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -266,6 +266,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
fallthrough;
case 2:
re_sort_routes(nr_node, 0, 1);
+ break;
case 1:
break;
}
@@ -359,6 +360,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
fallthrough;
case 1:
nr_node->routes[1] = nr_node->routes[2];
+ fallthrough;
case 2:
break;
}
@@ -482,6 +484,7 @@ static int nr_dec_obs(void)
fallthrough;
case 1:
s->routes[1] = s->routes[2];
+ break;
case 2:
break;
}
@@ -529,6 +532,7 @@ void nr_rt_device_down(struct net_device *dev)
fallthrough;
case 1:
t->routes[1] = t->routes[2];
+ break;
case 2:
break;
}
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index e02b9befce0b..3a89bd9b89fc 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -34,7 +34,7 @@ static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
* HCI command execution completion callback.
* err will be a standard linux error (may be converted from HCI response)
* skb contains the response data and must be disposed, or may be NULL if
- * an error occured
+ * an error occurred
*/
static void nfc_hci_execute_cb(void *context, struct sk_buff *skb, int err)
{
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 43811b5219b5..3481941be70b 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -705,7 +705,7 @@ static void hci_transceive_cb(void *context, struct sk_buff *skb, int err)
/*
* TODO: Check RF Error indicator to make sure data is valid.
* It seems that HCI cmd can complete without error, but data
- * can be invalid if an RF error occured? Ignore for now.
+ * can be invalid if an RF error occurred? Ignore for now.
*/
if (err == 0)
skb_trim(skb, skb->len - 1); /* RF Err ind */
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
index c0c8fea3a186..1e3a90049da9 100644
--- a/net/nfc/hci/llc_shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -406,7 +406,7 @@ static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
case SHDLC_NEGOTIATING:
case SHDLC_CONNECTING:
/*
- * We sent RSET, but chip wants to negociate or we
+ * We sent RSET, but chip wants to negotiate or we
* got RSET before we managed to send out our.
*/
if (skb->len > 0)
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 53dbe733f998..6cfd30fc0798 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (!llcp_sock->service_name) {
nfc_llcp_local_put(llcp_sock->local);
llcp_sock->local = NULL;
+ llcp_sock->dev = NULL;
ret = -ENOMEM;
goto put_dev;
}
@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
llcp_sock->local = NULL;
kfree(llcp_sock->service_name);
llcp_sock->service_name = NULL;
+ llcp_sock->dev = NULL;
ret = -EADDRINUSE;
goto put_dev;
}
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 9a585332ea84..da7fe9db1b00 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1191,6 +1191,7 @@ EXPORT_SYMBOL(nci_allocate_device);
void nci_free_device(struct nci_dev *ndev)
{
nfc_free_device(ndev->nfc_dev);
+ nci_hci_deallocate(ndev);
kfree(ndev);
}
EXPORT_SYMBOL(nci_free_device);
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index 6b275a387a92..d6732e5e8958 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -161,8 +161,6 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
*(u8 *)skb_push(skb, 1) = data_type;
do {
- len = conn_info->max_pkt_payload_len;
-
/* If last packet add NCI_HFP_NO_CHAINING */
if (i + conn_info->max_pkt_payload_len -
(skb->len + 1) >= data_len) {
@@ -792,3 +790,8 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
return hdev;
}
+
+void nci_hci_deallocate(struct nci_dev *ndev)
+{
+ kfree(ndev->hci_dev);
+}
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 9c7eb8455ba8..5e39640becdb 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -49,7 +49,7 @@ static void rawsock_report_error(struct sock *sk, int err)
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_err = -err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
rawsock_write_queue_purge(sk);
}
@@ -329,7 +329,7 @@ static int rawsock_create(struct net *net, struct socket *sock,
return -ESOCKTNOSUPPORT;
if (sock->type == SOCK_RAW) {
- if (!capable(CAP_NET_RAW))
+ if (!ns_capable(net->user_ns, CAP_NET_RAW))
return -EPERM;
sock->ops = &rawsock_raw_ops;
} else {
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 41109c326f3a..28982630bef3 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -13,6 +13,7 @@ openvswitch-y := \
flow_netlink.o \
flow_table.o \
meter.o \
+ openvswitch_trace.o \
vport.o \
vport-internal_dev.o \
vport-netdev.o
@@ -24,3 +25,5 @@ endif
obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
obj-$(CONFIG_OPENVSWITCH_GRE) += vport-gre.o
+
+CFLAGS_openvswitch_trace.o = -I$(src)
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 77d924ab8cdb..ef15d9eb4774 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -30,6 +30,7 @@
#include "conntrack.h"
#include "vport.h"
#include "flow_netlink.h"
+#include "openvswitch_trace.h"
struct deferred_action {
struct sk_buff *skb;
@@ -1242,6 +1243,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
a = nla_next(a, &rem)) {
int err = 0;
+ if (trace_ovs_do_execute_action_enabled())
+ trace_ovs_do_execute_action(dp, skb, key, a, rem);
+
switch (nla_type(a)) {
case OVS_ACTION_ATTR_OUTPUT: {
int port = nla_get_u32(a);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index cadb6a29b285..1b5eae57bc90 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -967,8 +967,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
/* Associate skb with specified zone. */
if (tmpl) {
- if (skb_nfct(skb))
- nf_conntrack_put(skb_nfct(skb));
+ nf_conntrack_put(skb_nfct(skb));
nf_conntrack_get(&tmpl->ct_general);
nf_ct_set(skb, tmpl, IP_CT_NEW);
}
@@ -1329,11 +1328,9 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
{
- if (skb_nfct(skb)) {
- nf_conntrack_put(skb_nfct(skb));
- nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
- ovs_ct_fill_key(skb, key, false);
- }
+ nf_conntrack_put(skb_nfct(skb));
+ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+ ovs_ct_fill_key(skb, key, false);
return 0;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 9d6ef6cb9b26..bc164b35e67d 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -43,6 +43,7 @@
#include "flow_table.h"
#include "flow_netlink.h"
#include "meter.h"
+#include "openvswitch_trace.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
@@ -275,6 +276,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
struct dp_stats_percpu *stats;
int err;
+ if (trace_ovs_dp_upcall_enabled())
+ trace_ovs_dp_upcall(dp, skb, key, upcall_info);
+
if (upcall_info->portid == 0) {
err = -ENOTCONN;
goto err;
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 96b524ceabca..896b8f5bc885 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -611,6 +611,14 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
spin_lock(&meter->lock);
long_delta_ms = (now_ms - meter->used); /* ms */
+ if (long_delta_ms < 0) {
+ /* This condition means that we have several threads fighting
+ * for a meter lock, and the one who received the packets a
+ * bit later wins. Assuming that all racing threads received
+ * packets at the same time to avoid overflow.
+ */
+ long_delta_ms = 0;
+ }
/* Make sure delta_ms will not be too large, so that bucket will not
* wrap around below.
diff --git a/net/openvswitch/openvswitch_trace.c b/net/openvswitch/openvswitch_trace.c
new file mode 100644
index 000000000000..62c5f7d6f023
--- /dev/null
+++ b/net/openvswitch/openvswitch_trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/* bug in tracepoint.h, it should include this */
+#include <linux/module.h>
+
+/* sparse isn't too happy with all macros... */
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "openvswitch_trace.h"
+
+#endif
diff --git a/net/openvswitch/openvswitch_trace.h b/net/openvswitch/openvswitch_trace.h
new file mode 100644
index 000000000000..3eb35d9eb700
--- /dev/null
+++ b/net/openvswitch/openvswitch_trace.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM openvswitch
+
+#if !defined(_TRACE_OPENVSWITCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OPENVSWITCH_H
+
+#include <linux/tracepoint.h>
+
+#include "datapath.h"
+
+TRACE_EVENT(ovs_do_execute_action,
+
+ TP_PROTO(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key, const struct nlattr *a, int rem),
+
+ TP_ARGS(dp, skb, key, a, rem),
+
+ TP_STRUCT__entry(
+ __field( void *, dpaddr )
+ __string( dp_name, ovs_dp_name(dp) )
+ __string( dev_name, skb->dev->name )
+ __field( void *, skbaddr )
+ __field( unsigned int, len )
+ __field( unsigned int, data_len )
+ __field( unsigned int, truesize )
+ __field( u8, nr_frags )
+ __field( u16, gso_size )
+ __field( u16, gso_type )
+ __field( u32, ovs_flow_hash )
+ __field( u32, recirc_id )
+ __field( void *, keyaddr )
+ __field( u16, key_eth_type )
+ __field( u8, key_ct_state )
+ __field( u8, key_ct_orig_proto )
+ __field( u16, key_ct_zone )
+ __field( unsigned int, flow_key_valid )
+ __field( u8, action_type )
+ __field( unsigned int, action_len )
+ __field( void *, action_data )
+ __field( u8, is_last )
+ ),
+
+ TP_fast_assign(
+ __entry->dpaddr = dp;
+ __assign_str(dp_name, ovs_dp_name(dp));
+ __assign_str(dev_name, skb->dev->name);
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->truesize = skb->truesize;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->gso_size = skb_shinfo(skb)->gso_size;
+ __entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->ovs_flow_hash = key->ovs_flow_hash;
+ __entry->recirc_id = key->recirc_id;
+ __entry->keyaddr = key;
+ __entry->key_eth_type = key->eth.type;
+ __entry->key_ct_state = key->ct_state;
+ __entry->key_ct_orig_proto = key->ct_orig_proto;
+ __entry->key_ct_zone = key->ct_zone;
+ __entry->flow_key_valid = !(key->mac_proto & SW_FLOW_KEY_INVALID);
+ __entry->action_type = nla_type(a);
+ __entry->action_len = nla_len(a);
+ __entry->action_data = nla_data(a);
+ __entry->is_last = nla_is_last(a, rem);
+ ),
+
+ TP_printk("dpaddr=%p dp_name=%s dev=%s skbaddr=%p len=%u data_len=%u truesize=%u nr_frags=%d gso_size=%d gso_type=%#x ovs_flow_hash=0x%08x recirc_id=0x%08x keyaddr=%p eth_type=0x%04x ct_state=%02x ct_orig_proto=%02x ct_Zone=%04x flow_key_valid=%d action_type=%u action_len=%u action_data=%p is_last=%d",
+ __entry->dpaddr, __get_str(dp_name), __get_str(dev_name),
+ __entry->skbaddr, __entry->len, __entry->data_len,
+ __entry->truesize, __entry->nr_frags, __entry->gso_size,
+ __entry->gso_type, __entry->ovs_flow_hash,
+ __entry->recirc_id, __entry->keyaddr, __entry->key_eth_type,
+ __entry->key_ct_state, __entry->key_ct_orig_proto,
+ __entry->key_ct_zone,
+ __entry->flow_key_valid,
+ __entry->action_type, __entry->action_len,
+ __entry->action_data, __entry->is_last)
+);
+
+TRACE_EVENT(ovs_dp_upcall,
+
+ TP_PROTO(struct datapath *dp, struct sk_buff *skb,
+ const struct sw_flow_key *key,
+ const struct dp_upcall_info *upcall_info),
+
+ TP_ARGS(dp, skb, key, upcall_info),
+
+ TP_STRUCT__entry(
+ __field( void *, dpaddr )
+ __string( dp_name, ovs_dp_name(dp) )
+ __string( dev_name, skb->dev->name )
+ __field( void *, skbaddr )
+ __field( unsigned int, len )
+ __field( unsigned int, data_len )
+ __field( unsigned int, truesize )
+ __field( u8, nr_frags )
+ __field( u16, gso_size )
+ __field( u16, gso_type )
+ __field( u32, ovs_flow_hash )
+ __field( u32, recirc_id )
+ __field( const void *, keyaddr )
+ __field( u16, key_eth_type )
+ __field( u8, key_ct_state )
+ __field( u8, key_ct_orig_proto )
+ __field( u16, key_ct_zone )
+ __field( unsigned int, flow_key_valid )
+ __field( u8, upcall_cmd )
+ __field( u32, upcall_port )
+ __field( u16, upcall_mru )
+ ),
+
+ TP_fast_assign(
+ __entry->dpaddr = dp;
+ __assign_str(dp_name, ovs_dp_name(dp));
+ __assign_str(dev_name, skb->dev->name);
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->truesize = skb->truesize;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->gso_size = skb_shinfo(skb)->gso_size;
+ __entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->ovs_flow_hash = key->ovs_flow_hash;
+ __entry->recirc_id = key->recirc_id;
+ __entry->keyaddr = key;
+ __entry->key_eth_type = key->eth.type;
+ __entry->key_ct_state = key->ct_state;
+ __entry->key_ct_orig_proto = key->ct_orig_proto;
+ __entry->key_ct_zone = key->ct_zone;
+ __entry->flow_key_valid = !(key->mac_proto & SW_FLOW_KEY_INVALID);
+ __entry->upcall_cmd = upcall_info->cmd;
+ __entry->upcall_port = upcall_info->portid;
+ __entry->upcall_mru = upcall_info->mru;
+ ),
+
+ TP_printk("dpaddr=%p dp_name=%s dev=%s skbaddr=%p len=%u data_len=%u truesize=%u nr_frags=%d gso_size=%d gso_type=%#x ovs_flow_hash=0x%08x recirc_id=0x%08x keyaddr=%p eth_type=0x%04x ct_state=%02x ct_orig_proto=%02x ct_zone=%04x flow_key_valid=%d upcall_cmd=%u upcall_port=%u upcall_mru=%u",
+ __entry->dpaddr, __get_str(dp_name), __get_str(dev_name),
+ __entry->skbaddr, __entry->len, __entry->data_len,
+ __entry->truesize, __entry->nr_frags, __entry->gso_size,
+ __entry->gso_type, __entry->ovs_flow_hash,
+ __entry->recirc_id, __entry->keyaddr, __entry->key_eth_type,
+ __entry->key_ct_state, __entry->key_ct_orig_proto,
+ __entry->key_ct_zone,
+ __entry->flow_key_valid,
+ __entry->upcall_cmd, __entry->upcall_port,
+ __entry->upcall_mru)
+);
+
+#endif /* _TRACE_OPENVSWITCH_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE openvswitch_trace
+#include <trace/define_trace.h>
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ba96db1880ea..57a1971f29e5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -422,7 +422,8 @@ static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
return TP_STATUS_TS_RAW_HARDWARE;
- if (ktime_to_timespec64_cond(skb->tstamp, ts))
+ if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
+ ktime_to_timespec64_cond(skb->tstamp, ts))
return TP_STATUS_TS_SOFTWARE;
return 0;
@@ -1655,6 +1656,7 @@ static int fanout_add(struct sock *sk, struct fanout_args *args)
case PACKET_FANOUT_ROLLOVER:
if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
return -EINVAL;
+ break;
case PACKET_FANOUT_HASH:
case PACKET_FANOUT_LB:
case PACKET_FANOUT_CPU:
@@ -2340,7 +2342,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
- if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
+ /* Always timestamp; prefer an existing software timestamp taken
+ * closer to the time of capture.
+ */
+ ts_status = tpacket_get_timestamp(skb, &ts,
+ po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
+ if (!ts_status)
ktime_get_real_ts64(&ts);
status |= ts_status;
@@ -2677,7 +2684,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
- proto = po->num;
+ proto = READ_ONCE(po->num);
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2890,7 +2897,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
- proto = po->num;
+ proto = READ_ONCE(po->num);
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -3028,10 +3035,13 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- if (po->tx_ring.pg_vec)
+ /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
+ * tpacket_snd() will redo the check safely.
+ */
+ if (data_race(po->tx_ring.pg_vec))
return tpacket_snd(po, msg);
- else
- return packet_snd(sock, msg, len);
+
+ return packet_snd(sock, msg, len);
}
/*
@@ -3162,7 +3172,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
/* prevents packet_notifier() from calling
* register_prot_hook()
*/
- po->num = 0;
+ WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, true);
rcu_read_lock();
dev_curr = po->prot_hook.dev;
@@ -3172,17 +3182,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
}
BUG_ON(po->running);
- po->num = proto;
+ WRITE_ONCE(po->num, proto);
po->prot_hook.type = proto;
if (unlikely(unlisted)) {
dev_put(dev);
po->prot_hook.dev = NULL;
- po->ifindex = -1;
+ WRITE_ONCE(po->ifindex, -1);
packet_cached_dev_reset(po);
} else {
po->prot_hook.dev = dev;
- po->ifindex = dev ? dev->ifindex : 0;
+ WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
packet_cached_dev_assign(po, dev);
}
}
@@ -3197,7 +3207,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
} else {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
out_unlock:
@@ -3496,7 +3506,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
uaddr->sa_family = AF_PACKET;
memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
rcu_read_lock();
- dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+ dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
if (dev)
strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
rcu_read_unlock();
@@ -3511,16 +3521,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
+ int ifindex;
if (peer)
return -EOPNOTSUPP;
+ ifindex = READ_ONCE(po->ifindex);
sll->sll_family = AF_PACKET;
- sll->sll_ifindex = po->ifindex;
- sll->sll_protocol = po->num;
+ sll->sll_ifindex = ifindex;
+ sll->sll_protocol = READ_ONCE(po->num);
sll->sll_pkttype = 0;
rcu_read_lock();
- dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
+ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
@@ -3923,12 +3935,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
return -EFAULT;
lock_sock(sk);
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
- ret = -EBUSY;
- } else {
+ if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
po->tp_tx_has_off = !!val;
- ret = 0;
- }
+
release_sock(sk);
return 0;
}
@@ -4095,11 +4104,11 @@ static int packet_notifier(struct notifier_block *this,
__unregister_prot_hook(sk, false);
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
- po->ifindex = -1;
+ WRITE_ONCE(po->ifindex, -1);
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
@@ -4405,7 +4414,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
was_running = po->running;
num = po->num;
if (was_running) {
- po->num = 0;
+ WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, false);
}
spin_unlock(&po->bind_lock);
@@ -4440,7 +4449,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
spin_lock(&po->bind_lock);
if (was_running) {
- po->num = num;
+ WRITE_ONCE(po->num, num);
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
@@ -4610,8 +4619,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
s,
refcount_read(&s->sk_refcnt),
s->sk_type,
- ntohs(po->num),
- po->ifindex,
+ ntohs(READ_ONCE(po->num)),
+ READ_ONCE(po->ifindex),
po->running,
atomic_read(&s->sk_rmem_alloc),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index 8d00dfe8139e..1990d496fcfc 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -775,8 +775,10 @@ int qrtr_ns_init(void)
}
qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
- if (!qrtr_ns.workqueue)
+ if (!qrtr_ns.workqueue) {
+ ret = -ENOMEM;
goto err_sock;
+ }
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c0477bec09bd..e6f4a6202f82 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -436,7 +436,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
struct qrtr_sock *ipc;
struct sk_buff *skb;
struct qrtr_cb *cb;
- unsigned int size;
+ size_t size;
unsigned int ver;
size_t hdrlen;
@@ -751,7 +751,7 @@ static void qrtr_reset_ports(void)
xa_for_each_start(&qrtr_ports, index, ipc, 1) {
sock_hold(&ipc->sk);
ipc->sk.sk_err = ENETRESET;
- ipc->sk.sk_error_report(&ipc->sk);
+ sk_error_report(&ipc->sk);
sock_put(&ipc->sk);
}
rcu_read_unlock();
diff --git a/net/rds/connection.c b/net/rds/connection.c
index f2fcab182095..a3bc4b54d491 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -240,12 +240,23 @@ static struct rds_connection *__rds_conn_create(struct net *net,
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
- if (is_outgoing && trans->t_prefer_loopback) {
- /* "outgoing" connection - and the transport
- * says it wants the connection handled by the
- * loopback transport. This is what TCP does.
- */
- trans = &rds_loop_transport;
+ if (trans->t_prefer_loopback) {
+ if (likely(is_outgoing)) {
+ /* "outgoing" connection to local address.
+ * Protocol says it wants the connection
+ * handled by the loopback transport.
+ * This is what TCP does.
+ */
+ trans = &rds_loop_transport;
+ } else {
+ /* No transport currently in use
+ * should end up here, but if it
+ * does, reset/destroy the connection.
+ */
+ kmem_cache_free(rds_conn_slab, conn);
+ conn = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
}
}
diff --git a/net/rds/ib_ring.c b/net/rds/ib_ring.c
index ff97e8eda858..006b2e441418 100644
--- a/net/rds/ib_ring.c
+++ b/net/rds/ib_ring.c
@@ -141,7 +141,7 @@ int rds_ib_ring_low(struct rds_ib_work_ring *ring)
}
/*
- * returns the oldest alloced ring entry. This will be the next one
+ * returns the oldest allocated ring entry. This will be the next one
* freed. This can't be called if there are none allocated.
*/
u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring)
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 4db109fb6ec2..5b426dc3634d 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
if (rds_cmsg_recv(inc, msg, rs)) {
ret = -EFAULT;
- goto out;
+ break;
}
rds_recvmsg_zcookie(rs, msg);
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 43db0eca911f..abf19c0e3ba0 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -313,8 +313,8 @@ out:
}
#endif
-static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
- __u32 scope_id)
+int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
+ __u32 scope_id)
{
struct net_device *dev = NULL;
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index bad9cf49d565..dc8d745d6857 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -59,7 +59,8 @@ u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
extern struct rds_transport rds_tcp_transport;
void rds_tcp_accept_work(struct sock *sk);
-
+int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
+ __u32 scope_id);
/* tcp_connect.c */
int rds_tcp_conn_path_connect(struct rds_conn_path *cp);
void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 4e64598176b0..5461d77fff4f 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -78,6 +78,7 @@ void rds_tcp_state_change(struct sock *sk)
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
rds_conn_path_drop(cp, false);
+ break;
default:
break;
}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 101cf14215a0..09cadd556d1e 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -167,6 +167,12 @@ int rds_tcp_accept_one(struct socket *sock)
}
#endif
+ if (!rds_tcp_laddr_check(sock_net(sock->sk), peer_addr, dev_if)) {
+ /* local address connection is only allowed via loopback */
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
conn = rds_conn_create(sock_net(sock->sk),
my_addr, peer_addr,
&rds_tcp_transport, 0, GFP_KERNEL, dev_if);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 42c5ff1eda95..f4ee13da90c7 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -177,7 +177,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
goto out;
}
tc->t_tinc = tinc;
- rdsdebug("alloced tinc %p\n", tinc);
+ rdsdebug("allocated tinc %p\n", tinc);
rds_inc_path_init(&tinc->ti_inc, cp,
&cp->cp_conn->c_faddr);
tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 32dc50f0a303..1f424cbfcbb4 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -208,6 +208,7 @@ void rds_send_worker(struct work_struct *work)
case -ENOMEM:
rds_stats_inc(s_send_delayed_retry);
queue_delayed_work(rds_wq, &cp->cp_send_w, 2);
+ break;
default:
break;
}
@@ -232,6 +233,7 @@ void rds_recv_worker(struct work_struct *work)
case -ENOMEM:
rds_stats_inc(s_recv_delayed_retry);
queue_delayed_work(rds_wq, &cp->cp_recv_w, 2);
+ break;
default:
break;
}
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 41671af6b33f..2b5f89713e36 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -471,6 +471,7 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
switch (rx->sk.sk_state) {
case RXRPC_UNBOUND:
rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
+ break;
case RXRPC_CLIENT_UNBOUND:
case RXRPC_CLIENT_BOUND:
break;
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index 3ce6d628cd75..19e929c7c38b 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -77,7 +77,7 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
}
/*
- * Process event packets targetted at a local endpoint.
+ * Process event packets targeted at a local endpoint.
*/
void rxrpc_process_local_events(struct rxrpc_local *local)
{
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f6d5755d669e..d17a66aab8ee 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -381,7 +381,8 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
}
mutex_unlock(&idrinfo->lock);
- if (nla_put_u32(skb, TCA_FCNT, n_i))
+ ret = nla_put_u32(skb, TCA_FCNT, n_i);
+ if (ret)
goto nla_put_failure;
nla_nest_end(skb, nest);
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index e48e980c3b93..e409a0005717 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -43,7 +43,6 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
tcf_lastuse_update(&prog->tcf_tm);
bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
- rcu_read_lock();
filter = rcu_dereference(prog->filter);
if (at_ingress) {
__skb_push(skb, skb->mac_len);
@@ -56,7 +55,6 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
}
if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
skb_orphan(skb);
- rcu_read_unlock();
/* A BPF program may overwrite the default action opcode.
* Similarly as in cls_bpf, if filter_res == -1 we use the
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index ec7a1c438df9..a656baa321fe 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
}
err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
- if (err == NF_ACCEPT &&
- ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
- if (maniptype == NF_NAT_MANIP_SRC)
- maniptype = NF_NAT_MANIP_DST;
- else
- maniptype = NF_NAT_MANIP_SRC;
-
- err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
+ if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
+ if (ct->status & IPS_SRC_NAT) {
+ if (maniptype == NF_NAT_MANIP_SRC)
+ maniptype = NF_NAT_MANIP_DST;
+ else
+ maniptype = NF_NAT_MANIP_SRC;
+
+ err = ct_nat_execute(skb, ct, ctinfo, range,
+ maniptype);
+ } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ err = ct_nat_execute(skb, ct, ctinfo, NULL,
+ NF_NAT_MANIP_SRC);
+ }
}
return err;
#else
@@ -984,7 +989,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
*/
cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
if (!cached) {
- if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
+ if (tcf_ct_flow_table_lookup(p, skb, family)) {
skip_add = true;
goto do_nat;
}
@@ -1022,10 +1027,11 @@ do_nat:
* even if the connection is already confirmed.
*/
nf_conntrack_confirm(skb);
- } else if (!skip_add) {
- tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
}
+ if (!skip_add)
+ tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
+
out_push:
skb_push_rcsum(skb, nh_ofs);
@@ -1202,9 +1208,6 @@ static int tcf_ct_fill_params(struct net *net,
sizeof(p->zone));
}
- if (p->zone == NF_CT_DEFAULT_ZONE_ID)
- return 0;
-
nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
if (!tmpl) {
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 1cac3c6fbb49..71f2015c70ca 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -70,7 +70,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
/* replace the vid */
tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
/* replace prio bits, if tcfv_push_prio specified */
- if (p->tcfv_push_prio) {
+ if (p->tcfv_push_prio_exists) {
tci &= ~VLAN_PRIO_MASK;
tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
}
@@ -121,6 +121,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
struct tc_action_net *tn = net_generic(net, vlan_net_id);
struct nlattr *tb[TCA_VLAN_MAX + 1];
struct tcf_chain *goto_ch = NULL;
+ bool push_prio_exists = false;
struct tcf_vlan_params *p;
struct tc_vlan *parm;
struct tcf_vlan *v;
@@ -189,7 +190,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
push_proto = htons(ETH_P_8021Q);
}
- if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
+ push_prio_exists = !!tb[TCA_VLAN_PUSH_VLAN_PRIORITY];
+ if (push_prio_exists)
push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
break;
case TCA_VLAN_ACT_POP_ETH:
@@ -241,6 +243,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
p->tcfv_action = action;
p->tcfv_push_vid = push_vid;
p->tcfv_push_prio = push_prio;
+ p->tcfv_push_prio_exists = push_prio_exists || action == TCA_VLAN_ACT_PUSH;
p->tcfv_push_proto = push_proto;
if (action == TCA_VLAN_ACT_PUSH_ETH) {
@@ -304,8 +307,8 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
(nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) ||
nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
p->tcfv_push_proto) ||
- (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY,
- p->tcfv_push_prio))))
+ (p->tcfv_push_prio_exists &&
+ nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY, p->tcfv_push_prio))))
goto nla_put_failure;
if (p->tcfv_action == TCA_VLAN_ACT_PUSH_ETH) {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 40fbea626dfd..d73b5c5514a9 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1531,7 +1531,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
u32 *last_executed_chain)
{
#ifdef CONFIG_NET_CLS_ACT
- const int max_reclassify_loop = 4;
+ const int max_reclassify_loop = 16;
const struct tcf_proto *first_tp;
int limit = 0;
@@ -1624,7 +1624,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
/* If we missed on some chain */
if (ret == TC_ACT_UNSPEC && last_executed_chain) {
- ext = skb_ext_add(skb, TC_SKB_EXT);
+ ext = tc_skb_ext_alloc(skb);
if (WARN_ON_ONCE(!ext))
return TC_ACT_SHOT;
ext->chain = last_executed_chain;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 6e3e63db0e01..fa739efa59f4 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -85,8 +85,6 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_bpf_prog *prog;
int ret = -1;
- /* Needed here for accessing maps. */
- rcu_read_lock();
list_for_each_entry_rcu(prog, &head->plist, link) {
int filter_res;
@@ -131,7 +129,6 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
break;
}
- rcu_read_unlock();
return ret;
}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 2e288f88ff02..27a4b6dbcf57 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -7,7 +7,7 @@
/*
Comparing to general packet classification problem,
- RSVP needs only sevaral relatively simple rules:
+ RSVP needs only several relatively simple rules:
* (dst, protocol) are always specified,
so that we are able to hash them.
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index c4007b9cd16d..5b274534264c 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -304,7 +304,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
int i, err = 0;
cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!cp->perfect)
return -ENOMEM;
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index f885bea5b452..4ce681361851 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -141,7 +141,7 @@ errout:
EXPORT_SYMBOL(tcf_em_register);
/**
- * tcf_em_unregister - unregster and extended match
+ * tcf_em_unregister - unregister and extended match
*
* @ops: ematch operations lookup table
*
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7d37638ee1c7..951542843cab 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
}
tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
- if (!tcph)
+ if (!tcph || tcph->doff < 5)
return NULL;
return skb_header_pointer(skb, offset,
@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
length--;
continue;
}
+ if (length < 2)
+ break;
opsize = *ptr++;
if (opsize < 2 || opsize > length)
break;
@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
length--;
continue;
}
+ if (length < 2)
+ break;
opsize = *ptr++;
if (opsize < 2 || opsize > length)
break;
@@ -2338,7 +2342,7 @@ static int cake_config_precedence(struct Qdisc *sch)
/* List of known Diffserv codepoints:
*
- * Least Effort (CS1)
+ * Least Effort (CS1, LE)
* Best Effort (CS0)
* Max Reliability & LLT "Lo" (TOS1)
* Max Throughput (TOS2)
@@ -2360,7 +2364,7 @@ static int cake_config_precedence(struct Qdisc *sch)
* Total 25 codepoints.
*/
-/* List of traffic classes in RFC 4594:
+/* List of traffic classes in RFC 4594, updated by RFC 8622:
* (roughly descending order of contended priority)
* (roughly ascending order of uncontended throughput)
*
@@ -2375,7 +2379,7 @@ static int cake_config_precedence(struct Qdisc *sch)
* Ops, Admin, Management (CS2,TOS1) - eg. ssh
* Standard Service (CS0 & unrecognised codepoints)
* High Throughput Data (AF1x,TOS2) - eg. web traffic
- * Low Priority Data (CS1) - eg. BitTorrent
+ * Low Priority Data (CS1,LE) - eg. BitTorrent
* Total 12 traffic classes.
*/
@@ -2391,7 +2395,7 @@ static int cake_config_diffserv8(struct Qdisc *sch)
* Video Streaming (AF4x, AF3x, CS3)
* Bog Standard (CS0 etc.)
* High Throughput (AF1x, TOS2)
- * Background Traffic (CS1)
+ * Background Traffic (CS1, LE)
*
* Total 8 traffic classes.
*/
@@ -2435,7 +2439,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
* Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
* Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
* Best Effort (CS0, AF1x, TOS2, and those not specified)
- * Background Traffic (CS1)
+ * Background Traffic (CS1, LE)
*
* Total 4 traffic classes.
*/
@@ -2473,7 +2477,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
static int cake_config_diffserv3(struct Qdisc *sch)
{
/* Simplified Diffserv structure with 3 tins.
- * Low Priority (CS1)
+ * Low Priority (CS1, LE)
* Best Effort
* Latency Sensitive (TOS4, VA, EF, CS6, CS7)
*/
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index cd2748e2d4a2..d320bcfb2da2 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -407,7 +407,8 @@ static void dsmark_reset(struct Qdisc *sch)
struct dsmark_qdisc_data *p = qdisc_priv(sch);
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
- qdisc_reset(p->q);
+ if (p->q)
+ qdisc_reset(p->q);
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 949163fe68af..cac684952edc 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -138,8 +138,15 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Classifies packet into corresponding flow */
idx = fq_pie_classify(skb, sch, &ret);
- sel_flow = &q->flows[idx];
+ if (idx == 0) {
+ if (ret & __NET_XMIT_BYPASS)
+ qdisc_qstats_drop(sch);
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
+ idx--;
+ sel_flow = &q->flows[idx];
/* Checks whether adding a new packet would exceed memory limit */
get_pie_cb(skb)->mem_usage = skb->truesize;
memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
@@ -297,9 +304,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
goto flow_error;
}
q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
- if (!q->flows_cnt || q->flows_cnt >= 65536) {
+ if (!q->flows_cnt || q->flows_cnt > 65536) {
NL_SET_ERR_MSG_MOD(extack,
- "Number of flows must range in [1..65535]");
+ "Number of flows must range in [1..65536]");
goto flow_error;
}
}
@@ -367,7 +374,7 @@ static void fq_pie_timer(struct timer_list *t)
struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
struct Qdisc *sch = q->sch;
spinlock_t *root_lock; /* to lock qdisc for probability calculations */
- u16 idx;
+ u32 idx;
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
@@ -388,7 +395,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
int err;
- u16 idx;
+ u32 idx;
pie_params_init(&q->p_params);
sch->limit = 10 * 1024;
@@ -500,7 +507,7 @@ static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
static void fq_pie_reset(struct Qdisc *sch)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
- u16 idx;
+ u32 idx;
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 44991ea726fc..d9ac60ffe927 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -35,6 +35,27 @@
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
EXPORT_SYMBOL(default_qdisc_ops);
+static void qdisc_maybe_clear_missed(struct Qdisc *q,
+ const struct netdev_queue *txq)
+{
+ clear_bit(__QDISC_STATE_MISSED, &q->state);
+
+ /* Make sure the below netif_xmit_frozen_or_stopped()
+ * checking happens after clearing STATE_MISSED.
+ */
+ smp_mb__after_atomic();
+
+ /* Checking netif_xmit_frozen_or_stopped() again to
+ * make sure STATE_MISSED is set if the STATE_MISSED
+ * set by netif_tx_wake_queue()'s rescheduling of
+ * net_tx_action() is cleared by the above clear_bit().
+ */
+ if (!netif_xmit_frozen_or_stopped(txq))
+ set_bit(__QDISC_STATE_MISSED, &q->state);
+ else
+ set_bit(__QDISC_STATE_DRAINING, &q->state);
+}
+
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
@@ -74,6 +95,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
}
} else {
skb = SKB_XOFF_MAGIC;
+ qdisc_maybe_clear_missed(q, txq);
}
}
@@ -144,9 +166,13 @@ static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
skb = next;
}
- if (lock)
+
+ if (lock) {
spin_unlock(lock);
- __netif_schedule(q);
+ set_bit(__QDISC_STATE_MISSED, &q->state);
+ } else {
+ __netif_schedule(q);
+ }
}
static void try_bulk_dequeue_skb(struct Qdisc *q,
@@ -242,6 +268,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
}
} else {
skb = NULL;
+ qdisc_maybe_clear_missed(q, txq);
}
if (lock)
spin_unlock(lock);
@@ -251,8 +278,10 @@ validate:
*validate = true;
if ((q->flags & TCQ_F_ONETXQUEUE) &&
- netif_xmit_frozen_or_stopped(txq))
+ netif_xmit_frozen_or_stopped(txq)) {
+ qdisc_maybe_clear_missed(q, txq);
return skb;
+ }
skb = qdisc_dequeue_skb_bad_txq(q);
if (unlikely(skb)) {
@@ -311,6 +340,8 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ else
+ qdisc_maybe_clear_missed(q, txq);
HARD_TX_UNLOCK(dev, txq);
} else {
@@ -384,7 +415,11 @@ void __qdisc_run(struct Qdisc *q)
while (qdisc_restart(q, &packets)) {
quota -= packets;
if (quota <= 0) {
- __netif_schedule(q);
+ if (q->flags & TCQ_F_NOLOCK)
+ set_bit(__QDISC_STATE_MISSED, &q->state);
+ else
+ __netif_schedule(q);
+
break;
}
}
@@ -515,6 +550,24 @@ void netif_carrier_off(struct net_device *dev)
}
EXPORT_SYMBOL(netif_carrier_off);
+/**
+ * netif_carrier_event - report carrier state event
+ * @dev: network device
+ *
+ * Device has detected a carrier event but the carrier state wasn't changed.
+ * Use in drivers when querying carrier state asynchronously, to avoid missing
+ * events (link flaps) if link recovers before it's queried.
+ */
+void netif_carrier_event(struct net_device *dev)
+{
+ if (dev->reg_state == NETREG_UNINITIALIZED)
+ return;
+ atomic_inc(&dev->carrier_up_count);
+ atomic_inc(&dev->carrier_down_count);
+ linkwatch_fire_event(dev);
+}
+EXPORT_SYMBOL_GPL(netif_carrier_event);
+
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
cheaper.
@@ -640,8 +693,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct sk_buff *skb = NULL;
+ bool need_retry = true;
int band;
+retry:
for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
struct skb_array *q = band2list(priv, band);
@@ -652,8 +707,24 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
}
if (likely(skb)) {
qdisc_update_stats_at_dequeue(qdisc, skb);
- } else {
- WRITE_ONCE(qdisc->empty, true);
+ } else if (need_retry &&
+ READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
+ /* Delay clearing the STATE_MISSED here to reduce
+ * the overhead of the second spin_trylock() in
+ * qdisc_run_begin() and __netif_schedule() calling
+ * in qdisc_run_end().
+ */
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
+
+ /* Make sure dequeuing happens after clearing
+ * STATE_MISSED.
+ */
+ smp_mb__after_atomic();
+
+ need_retry = false;
+
+ goto retry;
}
return skb;
@@ -854,7 +925,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
- sch->empty = true;
dev_hold(dev);
refcount_set(&sch->refcnt, 1);
@@ -1158,8 +1228,11 @@ static void dev_reset_queue(struct net_device *dev,
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
- if (nolock)
+ if (nolock) {
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
spin_unlock_bh(&qdisc->seqlock);
+ }
}
static bool some_qdisc_is_busy(struct net_device *dev)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index f4132dc25ac0..621dc6afde8f 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -6,7 +6,7 @@
*
* 991129: - Bug fix with grio mode
* - a better sing. AvgQ mode with Grio(WRED)
- * - A finer grained VQ dequeue based on sugestion
+ * - A finer grained VQ dequeue based on suggestion
* from Ren Liu
* - More error checks
*
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 081c11d5717c..5f7ac27a5264 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -52,7 +52,7 @@
*/
static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
-#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
+#define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
@@ -273,6 +273,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
/**
* htb_add_to_id_tree - adds class to the round robin list
+ * @root: the root of the tree
+ * @cl: the class to add
+ * @prio: the give prio in class
*
* Routine adds class to the list (actually tree) sorted by classid.
* Make sure that class is not already on such list for given prio.
@@ -298,6 +301,9 @@ static void htb_add_to_id_tree(struct rb_root *root,
/**
* htb_add_to_wait_tree - adds class to the event queue with delay
+ * @q: the priority event queue
+ * @cl: the class to add
+ * @delay: delay in microseconds
*
* The class is added to priority event queue to indicate that class will
* change its mode in cl->pq_key microseconds. Make sure that class is not
@@ -331,6 +337,7 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
/**
* htb_next_rb_node - finds next node in binary tree
+ * @n: the current node in binary tree
*
* When we are past last key we return NULL.
* Average complexity is 2 steps per call.
@@ -342,6 +349,9 @@ static inline void htb_next_rb_node(struct rb_node **n)
/**
* htb_add_class_to_row - add class to its row
+ * @q: the priority event queue
+ * @cl: the class to add
+ * @mask: the given priorities in class in bitmap
*
* The class is added to row at priorities marked in mask.
* It does nothing if mask == 0.
@@ -371,6 +381,9 @@ static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
/**
* htb_remove_class_from_row - removes class from its row
+ * @q: the priority event queue
+ * @cl: the class to add
+ * @mask: the given priorities in class in bitmap
*
* The class is removed from row at priorities marked in mask.
* It does nothing if mask == 0.
@@ -398,6 +411,8 @@ static inline void htb_remove_class_from_row(struct htb_sched *q,
/**
* htb_activate_prios - creates active classe's feed chain
+ * @q: the priority event queue
+ * @cl: the class to activate
*
* The class is connected to ancestors and/or appropriate rows
* for priorities it is participating on. cl->cmode must be new
@@ -433,6 +448,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
/**
* htb_deactivate_prios - remove class from feed chain
+ * @q: the priority event queue
+ * @cl: the class to deactivate
*
* cl->cmode must represent old mode (before deactivation). It does
* nothing if cl->prio_activity == 0. Class is removed from all feed
@@ -493,6 +510,8 @@ static inline s64 htb_hiwater(const struct htb_class *cl)
/**
* htb_class_mode - computes and returns current class mode
+ * @cl: the target class
+ * @diff: diff time in microseconds
*
* It computes cl's mode at time cl->t_c+diff and returns it. If mode
* is not HTB_CAN_SEND then cl->pq_key is updated to time difference
@@ -521,9 +540,12 @@ htb_class_mode(struct htb_class *cl, s64 *diff)
/**
* htb_change_class_mode - changes classe's mode
+ * @q: the priority event queue
+ * @cl: the target class
+ * @diff: diff time in microseconds
*
* This should be the only way how to change classe's mode under normal
- * cirsumstances. Routine will update feed lists linkage, change mode
+ * circumstances. Routine will update feed lists linkage, change mode
* and add class to the wait event queue if appropriate. New mode should
* be different from old one and cl->pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
@@ -553,6 +575,8 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
/**
* htb_activate - inserts leaf cl into appropriate active feeds
+ * @q: the priority event queue
+ * @cl: the target class
*
* Routine learns (new) priority of leaf and activates feed chain
* for the prio. It can be called on already active leaf safely.
@@ -570,6 +594,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
/**
* htb_deactivate - remove leaf cl from active feeds
+ * @q: the priority event queue
+ * @cl: the target class
*
* Make sure that leaf is active. In the other words it can't be called
* with non-active leaf. It also removes class from the drop list.
@@ -649,6 +675,10 @@ static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
/**
* htb_charge_class - charges amount "bytes" to leaf and ancestors
+ * @q: the priority event queue
+ * @cl: the class to start iterate
+ * @level: the minimum level to account
+ * @skb: the socket buffer
*
* Routine assumes that packet "bytes" long was dequeued from leaf cl
* borrowing from "level". It accounts bytes to ceil leaky bucket for
@@ -698,6 +728,9 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
/**
* htb_do_events - make mode changes to classes at the level
+ * @q: the priority event queue
+ * @level: which wait_pq in 'q->hlevel'
+ * @start: start jiffies
*
* Scans event queue for pending events and applies them. Returns time of
* next pending event (0 for no event in pq, q->now for too many events).
@@ -766,6 +799,8 @@ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
/**
* htb_lookup_leaf - returns next leaf class in DRR order
+ * @hprio: the current one
+ * @prio: which prio in class
*
* Find leaf where current feed pointers points to.
*/
@@ -1488,7 +1523,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch,
struct Qdisc *old_q;
/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
- qdisc_refcount_inc(new_q);
+ if (new_q)
+ qdisc_refcount_inc(new_q);
old_q = htb_graft_helper(dev_queue, new_q);
WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
}
@@ -1675,10 +1711,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
cl->parent->common.classid,
NULL);
if (q->offload) {
- if (new_q) {
+ if (new_q)
htb_set_lockdep_class_child(new_q);
- htb_parent_to_leaf_offload(sch, dev_queue, new_q);
- }
+ htb_parent_to_leaf_offload(sch, dev_queue, new_q);
}
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 1db9d4a2ef5e..b692a0de1ad5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -485,11 +485,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl->qdisc != &noop_qdisc)
qdisc_hash_add(cl->qdisc, true);
- sch_tree_lock(sch);
- qdisc_class_hash_insert(&q->clhash, &cl->common);
- sch_tree_unlock(sch);
-
- qdisc_class_hash_grow(sch, &q->clhash);
set_change_agg:
sch_tree_lock(sch);
@@ -507,8 +502,11 @@ set_change_agg:
}
if (existing)
qfq_deact_rm_from_agg(q, cl);
+ else
+ qdisc_class_hash_insert(&q->clhash, &cl->common);
qfq_add_to_agg(q, new_agg, cl);
sch_tree_unlock(sch);
+ qdisc_class_hash_grow(sch, &q->clhash);
*arg = (unsigned long)cl;
return 0;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 5c91df52b8c2..66fe2b82af9a 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -114,9 +114,6 @@ static void taprio_free_sched_cb(struct rcu_head *head)
struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
struct sched_entry *entry, *n;
- if (!sched)
- return;
-
list_for_each_entry_safe(entry, n, &sched->entries, list) {
list_del(&entry->list);
kfree(entry);
@@ -438,6 +435,11 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child;
int queue;
+ if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) {
+ WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n");
+ return qdisc_drop(skb, sch, to_free);
+ }
+
queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue];
@@ -529,23 +531,7 @@ static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
{
- struct taprio_sched *q = qdisc_priv(sch);
- struct net_device *dev = qdisc_dev(sch);
- struct sk_buff *skb;
- int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct Qdisc *child = q->qdiscs[i];
-
- if (unlikely(!child))
- continue;
-
- skb = child->ops->peek(child);
- if (!skb)
- continue;
-
- return skb;
- }
+ WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n");
return NULL;
}
@@ -654,27 +640,7 @@ done:
static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
{
- struct taprio_sched *q = qdisc_priv(sch);
- struct net_device *dev = qdisc_dev(sch);
- struct sk_buff *skb;
- int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct Qdisc *child = q->qdiscs[i];
-
- if (unlikely(!child))
- continue;
-
- skb = child->ops->dequeue(child);
- if (unlikely(!skb))
- continue;
-
- qdisc_bstats_update(sch, skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
-
- return skb;
- }
+ WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n");
return NULL;
}
@@ -1759,6 +1725,37 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
return taprio_change(sch, opt, extack);
}
+static void taprio_attach(struct Qdisc *sch)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned int ntx;
+
+ /* Attach underlying qdisc */
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ struct Qdisc *qdisc = q->qdiscs[ntx];
+ struct Qdisc *old;
+
+ if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (ntx < dev->real_num_tx_queues)
+ qdisc_hash_add(qdisc, false);
+ } else {
+ old = dev_graft_qdisc(qdisc->dev_queue, sch);
+ qdisc_refcount_inc(sch);
+ }
+ if (old)
+ qdisc_put(old);
+ }
+
+ /* access to the child qdiscs is not needed in offload mode */
+ if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
+ kfree(q->qdiscs);
+ q->qdiscs = NULL;
+ }
+}
+
static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
unsigned long cl)
{
@@ -1785,8 +1782,12 @@ static int taprio_graft(struct Qdisc *sch, unsigned long cl,
if (dev->flags & IFF_UP)
dev_deactivate(dev);
- *old = q->qdiscs[cl - 1];
- q->qdiscs[cl - 1] = new;
+ if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
+ *old = dev_graft_qdisc(dev_queue, new);
+ } else {
+ *old = q->qdiscs[cl - 1];
+ q->qdiscs[cl - 1] = new;
+ }
if (new)
new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
@@ -2020,6 +2021,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.change = taprio_change,
.destroy = taprio_destroy,
.reset = taprio_reset,
+ .attach = taprio_attach,
.peek = taprio_peek,
.dequeue = taprio_dequeue,
.enqueue = taprio_enqueue,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 336df4b36655..be29da09cc7a 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -98,6 +98,7 @@ static struct sctp_association *sctp_association_init(
* sock configured value.
*/
asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
+ asoc->probe_interval = msecs_to_jiffies(sp->probe_interval);
asoc->encap_port = sp->encap_port;
@@ -625,6 +626,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
* association configured value.
*/
peer->hbinterval = asoc->hbinterval;
+ peer->probe_interval = asoc->probe_interval;
peer->encap_port = asoc->encap_port;
@@ -714,6 +716,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
return NULL;
}
+ sctp_transport_pl_reset(peer);
+
/* Attach the remote transport to our asoc. */
list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
asoc->peer.transport_count++;
@@ -812,6 +816,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
spc_state = SCTP_ADDR_CONFIRMED;
transport->state = SCTP_ACTIVE;
+ sctp_transport_pl_reset(transport);
break;
case SCTP_TRANSPORT_DOWN:
@@ -821,6 +826,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
*/
if (transport->state != SCTP_UNCONFIRMED) {
transport->state = SCTP_INACTIVE;
+ sctp_transport_pl_reset(transport);
spc_state = SCTP_ADDR_UNREACHABLE;
} else {
sctp_transport_dst_release(transport);
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 53e5ed79f63f..59e653b528b1 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -270,22 +270,19 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
rawaddr = (union sctp_addr_param *)raw_addr_list;
af = sctp_get_af_specific(param_type2af(param->type));
- if (unlikely(!af)) {
+ if (unlikely(!af) ||
+ !af->from_addr_param(&addr, rawaddr, htons(port), 0)) {
retval = -EINVAL;
- sctp_bind_addr_clean(bp);
- break;
+ goto out_err;
}
- af->from_addr_param(&addr, rawaddr, htons(port), 0);
if (sctp_bind_addr_state(bp, &addr) != -1)
goto next;
retval = sctp_add_bind_addr(bp, &addr, sizeof(addr),
SCTP_ADDR_SRC, gfp);
- if (retval) {
+ if (retval)
/* Can't finish building the list, clean up. */
- sctp_bind_addr_clean(bp);
- break;
- }
+ goto out_err;
next:
len = ntohs(param->length);
@@ -294,6 +291,12 @@ next:
}
return retval;
+
+out_err:
+ if (retval)
+ sctp_bind_addr_clean(bp);
+
+ return retval;
}
/********************************************************************
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index c4d9c7feffb9..ccd773e4c371 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -154,6 +154,7 @@ static const char *const sctp_timer_tbl[] = {
"TIMEOUT_T5_SHUTDOWN_GUARD",
"TIMEOUT_HEARTBEAT",
"TIMEOUT_RECONF",
+ "TIMEOUT_PROBE",
"TIMEOUT_SACK",
"TIMEOUT_AUTOCLOSE",
};
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d508f6f3dd08..eb3c2a34a31c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -385,7 +385,9 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)
{
- if (!t || (t->pathmtu <= pmtu))
+ if (!t ||
+ (t->pathmtu <= pmtu &&
+ t->pl.probe_size + sctp_transport_pl_hlen(t) <= pmtu))
return;
if (sock_owned_by_user(sk)) {
@@ -554,6 +556,50 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
sctp_transport_put(t);
}
+static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb,
+ __u8 type, __u8 code, __u32 info)
+{
+ struct sctp_association *asoc = t->asoc;
+ struct sock *sk = asoc->base.sk;
+ int err = 0;
+
+ switch (type) {
+ case ICMP_PARAMETERPROB:
+ err = EPROTO;
+ break;
+ case ICMP_DEST_UNREACH:
+ if (code > NR_ICMP_UNREACH)
+ return;
+ if (code == ICMP_FRAG_NEEDED) {
+ sctp_icmp_frag_needed(sk, asoc, t, SCTP_TRUNC4(info));
+ return;
+ }
+ if (code == ICMP_PROT_UNREACH) {
+ sctp_icmp_proto_unreachable(sk, asoc, t);
+ return;
+ }
+ err = icmp_err_convert[code].errno;
+ break;
+ case ICMP_TIME_EXCEEDED:
+ if (code == ICMP_EXC_FRAGTIME)
+ return;
+
+ err = EHOSTUNREACH;
+ break;
+ case ICMP_REDIRECT:
+ sctp_icmp_redirect(sk, t, skb);
+ return;
+ default:
+ return;
+ }
+ if (!sock_owned_by_user(sk) && inet_sk(sk)->recverr) {
+ sk->sk_err = err;
+ sk_error_report(sk);
+ } else { /* Only an error on timeout */
+ sk->sk_err_soft = err;
+ }
+}
+
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
@@ -572,22 +618,19 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
int sctp_v4_err(struct sk_buff *skb, __u32 info)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
- const int ihlen = iph->ihl * 4;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
- struct sock *sk;
- struct sctp_association *asoc = NULL;
+ struct net *net = dev_net(skb->dev);
struct sctp_transport *transport;
- struct inet_sock *inet;
+ struct sctp_association *asoc;
__u16 saveip, savesctp;
- int err;
- struct net *net = dev_net(skb->dev);
+ struct sock *sk;
/* Fix up skb to look at the embedded net header. */
saveip = skb->network_header;
savesctp = skb->transport_header;
skb_reset_network_header(skb);
- skb_set_transport_header(skb, ihlen);
+ skb_set_transport_header(skb, iph->ihl * 4);
sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
/* Put back, the original values. */
skb->network_header = saveip;
@@ -596,59 +639,41 @@ int sctp_v4_err(struct sk_buff *skb, __u32 info)
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return -ENOENT;
}
- /* Warning: The sock lock is held. Remember to call
- * sctp_err_finish!
- */
- switch (type) {
- case ICMP_PARAMETERPROB:
- err = EPROTO;
- break;
- case ICMP_DEST_UNREACH:
- if (code > NR_ICMP_UNREACH)
- goto out_unlock;
+ sctp_v4_err_handle(transport, skb, type, code, info);
+ sctp_err_finish(sk, transport);
- /* PMTU discovery (RFC1191) */
- if (ICMP_FRAG_NEEDED == code) {
- sctp_icmp_frag_needed(sk, asoc, transport,
- SCTP_TRUNC4(info));
- goto out_unlock;
- } else {
- if (ICMP_PROT_UNREACH == code) {
- sctp_icmp_proto_unreachable(sk, asoc,
- transport);
- goto out_unlock;
- }
- }
- err = icmp_err_convert[code].errno;
- break;
- case ICMP_TIME_EXCEEDED:
- /* Ignore any time exceeded errors due to fragment reassembly
- * timeouts.
- */
- if (ICMP_EXC_FRAGTIME == code)
- goto out_unlock;
+ return 0;
+}
- err = EHOSTUNREACH;
- break;
- case ICMP_REDIRECT:
- sctp_icmp_redirect(sk, transport, skb);
- /* Fall through to out_unlock. */
- default:
- goto out_unlock;
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ struct icmphdr *hdr;
+ __u32 info = 0;
+
+ skb->transport_header += sizeof(struct udphdr);
+ sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &t);
+ if (!sk) {
+ __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
+ return -ENOENT;
}
- inet = inet_sk(sk);
- if (!sock_owned_by_user(sk) && inet->recverr) {
- sk->sk_err = err;
- sk->sk_error_report(sk);
- } else { /* Only an error on timeout */
- sk->sk_err_soft = err;
+ skb->transport_header -= sizeof(struct udphdr);
+ hdr = (struct icmphdr *)(skb_network_header(skb) - sizeof(struct icmphdr));
+ if (hdr->type == ICMP_REDIRECT) {
+ /* can't be handled without outer iphdr known, leave it to udp_err */
+ sctp_err_finish(sk, t);
+ return 0;
}
+ if (hdr->type == ICMP_DEST_UNREACH && hdr->code == ICMP_FRAG_NEEDED)
+ info = ntohs(hdr->un.frag.mtu);
+ sctp_v4_err_handle(t, skb, hdr->type, hdr->code, info);
-out_unlock:
- sctp_err_finish(sk, transport);
- return 0;
+ sctp_err_finish(sk, t);
+ return 1;
}
/*
@@ -1131,7 +1156,8 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
if (!af)
continue;
- af->from_addr_param(paddr, params.addr, sh->source, 0);
+ if (!af->from_addr_param(paddr, params.addr, sh->source, 0))
+ continue;
asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
if (asoc)
@@ -1167,6 +1193,9 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
union sctp_addr_param *param;
union sctp_addr paddr;
+ if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr))
+ return NULL;
+
/* Skip over the ADDIP header and find the Address parameter */
param = (union sctp_addr_param *)(asconf + 1);
@@ -1174,7 +1203,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
if (unlikely(!af))
return NULL;
- af->from_addr_param(&paddr, param, peer_port, 0);
+ if (af->from_addr_param(&paddr, param, peer_port, 0))
+ return NULL;
return __sctp_lookup_association(net, laddr, &paddr, transportp);
}
@@ -1236,6 +1266,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
net, ch, laddr,
sctp_hdr(skb)->source,
transportp);
+ break;
default:
break;
}
@@ -1245,7 +1276,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
ch = (struct sctp_chunkhdr *)ch_end;
chunk_num++;
- } while (ch_end < skb_tail_pointer(skb));
+ } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
return asoc;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index bd08807c9e44..e48dd909dee5 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -122,54 +122,28 @@ static struct notifier_block sctp_inet6addr_notifier = {
.notifier_call = sctp_inet6addr_event,
};
-/* ICMP error handler. */
-static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, __be32 info)
+static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb,
+ __u8 type, __u8 code, __u32 info)
{
- struct inet6_dev *idev;
- struct sock *sk;
- struct sctp_association *asoc;
- struct sctp_transport *transport;
+ struct sctp_association *asoc = t->asoc;
+ struct sock *sk = asoc->base.sk;
struct ipv6_pinfo *np;
- __u16 saveip, savesctp;
- int err, ret = 0;
- struct net *net = dev_net(skb->dev);
-
- idev = in6_dev_get(skb->dev);
-
- /* Fix up skb to look at the embedded net header. */
- saveip = skb->network_header;
- savesctp = skb->transport_header;
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, offset);
- sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
- /* Put back, the original pointers. */
- skb->network_header = saveip;
- skb->transport_header = savesctp;
- if (!sk) {
- __ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS);
- ret = -ENOENT;
- goto out;
- }
-
- /* Warning: The sock lock is held. Remember to call
- * sctp_err_finish!
- */
+ int err = 0;
switch (type) {
case ICMPV6_PKT_TOOBIG:
if (ip6_sk_accept_pmtu(sk))
- sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info));
- goto out_unlock;
+ sctp_icmp_frag_needed(sk, asoc, t, info);
+ return;
case ICMPV6_PARAMPROB:
if (ICMPV6_UNK_NEXTHDR == code) {
- sctp_icmp_proto_unreachable(sk, asoc, transport);
- goto out_unlock;
+ sctp_icmp_proto_unreachable(sk, asoc, t);
+ return;
}
break;
case NDISC_REDIRECT:
- sctp_icmp_redirect(sk, transport, skb);
- goto out_unlock;
+ sctp_icmp_redirect(sk, t, skb);
+ return;
default:
break;
}
@@ -178,18 +152,70 @@ static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
icmpv6_err_convert(type, code, &err);
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
- sk->sk_error_report(sk);
- } else { /* Only an error on timeout */
+ sk_error_report(sk);
+ } else {
sk->sk_err_soft = err;
}
+}
+
+/* ICMP error handler. */
+static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sctp_transport *transport;
+ struct sctp_association *asoc;
+ __u16 saveip, savesctp;
+ struct sock *sk;
+
+ /* Fix up skb to look at the embedded net header. */
+ saveip = skb->network_header;
+ savesctp = skb->transport_header;
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, offset);
+ sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
+ /* Put back, the original pointers. */
+ skb->network_header = saveip;
+ skb->transport_header = savesctp;
+ if (!sk) {
+ __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+ return -ENOENT;
+ }
-out_unlock:
+ sctp_v6_err_handle(transport, skb, type, code, ntohl(info));
sctp_err_finish(sk, transport);
-out:
- if (likely(idev != NULL))
- in6_dev_put(idev);
- return ret;
+ return 0;
+}
+
+int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ struct icmp6hdr *hdr;
+ __u32 info = 0;
+
+ skb->transport_header += sizeof(struct udphdr);
+ sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &t);
+ if (!sk) {
+ __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+ return -ENOENT;
+ }
+
+ skb->transport_header -= sizeof(struct udphdr);
+ hdr = (struct icmp6hdr *)(skb_network_header(skb) - sizeof(struct icmp6hdr));
+ if (hdr->icmp6_type == NDISC_REDIRECT) {
+ /* can't be handled without outer ip6hdr known, leave it to udpv6_err */
+ sctp_err_finish(sk, t);
+ return 0;
+ }
+ if (hdr->icmp6_type == ICMPV6_PKT_TOOBIG)
+ info = ntohl(hdr->icmp6_mtu);
+ sctp_v6_err_handle(t, skb, hdr->icmp6_type, hdr->icmp6_code, info);
+
+ sctp_err_finish(sk, t);
+ return 1;
}
static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
@@ -551,15 +577,20 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
}
/* Initialize a sctp_addr from an address parameter. */
-static void sctp_v6_from_addr_param(union sctp_addr *addr,
+static bool sctp_v6_from_addr_param(union sctp_addr *addr,
union sctp_addr_param *param,
__be16 port, int iif)
{
+ if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param))
+ return false;
+
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = port;
addr->v6.sin6_flowinfo = 0; /* BUG */
addr->v6.sin6_addr = param->v6.addr;
addr->v6.sin6_scope_id = iif;
+
+ return true;
}
/* Initialize an address parameter from a sctp_addr and return the length
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a6aa17df09ef..9032ce60d50e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -103,7 +103,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
sctp_transport_route(tp, NULL, sp);
if (asoc->param_flags & SPP_PMTUD_ENABLE)
sctp_assoc_sync_pmtu(asoc);
- } else if (!sctp_transport_pmtu_check(tp)) {
+ } else if (!sctp_transport_pl_enabled(tp) &&
+ !sctp_transport_pmtu_check(tp)) {
if (asoc->param_flags & SPP_PMTUD_ENABLE)
sctp_assoc_sync_pmtu(asoc);
}
@@ -211,6 +212,30 @@ enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
return retval;
}
+/* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */
+static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk)
+{
+ struct sctp_transport *t = pkt->transport;
+ struct sctp_chunk *pad;
+ int overhead = 0;
+
+ if (!chunk->pmtu_probe)
+ return SCTP_XMIT_OK;
+
+ /* calculate the Padding Data size for the pad chunk */
+ overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+ overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk);
+ pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead);
+ if (!pad)
+ return SCTP_XMIT_DELAY;
+
+ list_add_tail(&pad->list, &pkt->chunk_list);
+ pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length));
+ chunk->transport = t;
+
+ return SCTP_XMIT_OK;
+}
+
/* Try to bundle an auth chunk into the packet. */
static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
struct sctp_chunk *chunk)
@@ -382,6 +407,10 @@ enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
goto finish;
retval = __sctp_packet_append_chunk(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ retval = sctp_packet_bundle_pad(packet, chunk);
finish:
return retval;
@@ -553,7 +582,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
sk = chunk->skb->sk;
/* check gso */
- if (packet->size > tp->pathmtu && !packet->ipfragok) {
+ if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) {
if (!sk_can_gso(sk)) {
pr_err_once("Trying to GSO but underlying device doesn't support it.");
goto out;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 5cb1aa5f067b..ff47091c385e 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -769,7 +769,11 @@ static int sctp_packet_singleton(struct sctp_transport *transport,
sctp_packet_init(&singleton, transport, sport, dport);
sctp_packet_config(&singleton, vtag, 0);
- sctp_packet_append_chunk(&singleton, chunk);
+ if (sctp_packet_append_chunk(&singleton, chunk) != SCTP_XMIT_OK) {
+ list_del_init(&chunk->list);
+ sctp_chunk_free(chunk);
+ return -ENOMEM;
+ }
return sctp_packet_transmit(&singleton, gfp);
}
@@ -929,8 +933,13 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
one_packet = 1;
fallthrough;
- case SCTP_CID_SACK:
case SCTP_CID_HEARTBEAT:
+ if (chunk->pmtu_probe) {
+ sctp_packet_singleton(ctx->transport, chunk, ctx->gfp);
+ break;
+ }
+ fallthrough;
+ case SCTP_CID_SACK:
case SCTP_CID_SHUTDOWN:
case SCTP_CID_ECN_ECNE:
case SCTP_CID_ASCONF:
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 6f2bbfeec3a4..3c1fbf38f4f7 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -254,14 +254,19 @@ static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
}
/* Initialize a sctp_addr from an address parameter. */
-static void sctp_v4_from_addr_param(union sctp_addr *addr,
+static bool sctp_v4_from_addr_param(union sctp_addr *addr,
union sctp_addr_param *param,
__be16 port, int iif)
{
+ if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param))
+ return false;
+
addr->v4.sin_family = AF_INET;
addr->v4.sin_port = port;
addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+
+ return true;
}
/* Initialize an address parameter from a sctp_addr and return the length
@@ -850,23 +855,6 @@ static int sctp_udp_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
-static int sctp_udp_err_lookup(struct sock *sk, struct sk_buff *skb)
-{
- struct sctp_association *asoc;
- struct sctp_transport *t;
- int family;
-
- skb->transport_header += sizeof(struct udphdr);
- family = (ip_hdr(skb)->version == 4) ? AF_INET : AF_INET6;
- sk = sctp_err_lookup(dev_net(skb->dev), family, skb, sctp_hdr(skb),
- &asoc, &t);
- if (!sk)
- return -ENOENT;
-
- sctp_err_finish(sk, t);
- return 0;
-}
-
int sctp_udp_sock_start(struct net *net)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
@@ -885,7 +873,7 @@ int sctp_udp_sock_start(struct net *net)
tuncfg.encap_type = 1;
tuncfg.encap_rcv = sctp_udp_rcv;
- tuncfg.encap_err_lookup = sctp_udp_err_lookup;
+ tuncfg.encap_err_lookup = sctp_udp_v4_err;
setup_udp_tunnel_sock(net, sock, &tuncfg);
net->sctp.udp4_sock = sock->sk;
@@ -907,7 +895,7 @@ int sctp_udp_sock_start(struct net *net)
tuncfg.encap_type = 1;
tuncfg.encap_rcv = sctp_udp_rcv;
- tuncfg.encap_err_lookup = sctp_udp_err_lookup;
+ tuncfg.encap_err_lookup = sctp_udp_v6_err;
setup_udp_tunnel_sock(net, sock, &tuncfg);
net->sctp.udp6_sock = sock->sk;
#endif
@@ -1171,7 +1159,6 @@ static const struct net_protocol sctp_protocol = {
.handler = sctp4_rcv,
.err_handler = sctp_v4_err,
.no_policy = 1,
- .netns_ok = 1,
.icmp_strict_tag_validation = 1,
};
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 5b44d228b6ca..6c08e5048d38 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1160,7 +1160,8 @@ nodata:
/* Make a HEARTBEAT chunk. */
struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
- const struct sctp_transport *transport)
+ const struct sctp_transport *transport,
+ __u32 probe_size)
{
struct sctp_sender_hb_info hbinfo;
struct sctp_chunk *retval;
@@ -1176,6 +1177,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
hbinfo.daddr = transport->ipaddr;
hbinfo.sent_at = jiffies;
hbinfo.hb_nonce = transport->hb_nonce;
+ hbinfo.probe_size = probe_size;
/* Cast away the 'const', as this is just telling the chunk
* what transport it belongs to.
@@ -1183,6 +1185,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
retval->transport = (struct sctp_transport *) transport;
retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo),
&hbinfo);
+ retval->pmtu_probe = !!probe_size;
nodata:
return retval;
@@ -1218,6 +1221,32 @@ nodata:
return retval;
}
+/* RFC4820 3. Padding Chunk (PAD)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 0x84 | Flags=0 | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * \ Padding Data /
+ * / \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len)
+{
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_PAD, 0, len, GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ skb_put_zero(retval->skb, len);
+ retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + len);
+ retval->chunk_end = skb_tail_pointer(retval->skb);
+
+ return retval;
+}
+
/* Create an Operation Error chunk with the specified space reserved.
* This routine can be used for containing multiple causes in the chunk.
*/
@@ -2166,9 +2195,16 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
break;
case SCTP_PARAM_SET_PRIMARY:
- if (ep->asconf_enable)
- break;
- goto unhandled;
+ if (!ep->asconf_enable)
+ goto unhandled;
+
+ if (ntohs(param.p->length) < sizeof(struct sctp_addip_param) +
+ sizeof(struct sctp_paramhdr)) {
+ sctp_process_inv_paramlength(asoc, param.p,
+ chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
+ break;
case SCTP_PARAM_HOST_NAME_ADDRESS:
/* Tell the peer, we won't support this param. */
@@ -2346,11 +2382,13 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
/* Process the initialization parameters. */
sctp_walk_params(param, peer_init, init_hdr.params) {
- if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
- param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
+ if (!src_match &&
+ (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+ param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
af = sctp_get_af_specific(param_type2af(param.p->type));
- af->from_addr_param(&addr, param.addr,
- chunk->sctp_hdr->source, 0);
+ if (!af->from_addr_param(&addr, param.addr,
+ chunk->sctp_hdr->source, 0))
+ continue;
if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
src_match = 1;
}
@@ -2531,7 +2569,8 @@ static int sctp_process_param(struct sctp_association *asoc,
break;
do_addr_param:
af = sctp_get_af_specific(param_type2af(param.p->type));
- af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0))
+ break;
scope = sctp_scope(peer_addr);
if (sctp_in_scope(net, &addr, scope))
if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
@@ -2632,15 +2671,13 @@ do_addr_param:
addr_param = param.v + sizeof(struct sctp_addip_param);
af = sctp_get_af_specific(param_type2af(addr_param->p.type));
- if (af == NULL)
+ if (!af)
break;
- af->from_addr_param(&addr, addr_param,
- htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, addr_param,
+ htons(asoc->peer.port), 0))
+ break;
- /* if the address is invalid, we can't process it.
- * XXX: see spec for what to do.
- */
if (!af->addr_valid(&addr, NULL, NULL))
break;
@@ -3054,7 +3091,8 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
if (unlikely(!af))
return SCTP_ERROR_DNS_FAILED;
- af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0))
+ return SCTP_ERROR_DNS_FAILED;
/* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast
* or multicast address.
@@ -3331,7 +3369,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
/* We have checked the packet before, so we do not check again. */
af = sctp_get_af_specific(param_type2af(addr_param->p.type));
- af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
+ if (!af->from_addr_param(&addr, addr_param, htons(bp->port), 0))
+ return;
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index ce15d590a615..b3815b568e8e 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -471,6 +471,38 @@ out_unlock:
sctp_transport_put(transport);
}
+/* Handle the timeout of the probe timer. */
+void sctp_generate_probe_event(struct timer_list *t)
+{
+ struct sctp_transport *transport = from_timer(transport, t, probe_timer);
+ struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+ int error = 0;
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ pr_debug("%s: sock is busy\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->probe_timer, jiffies + (HZ / 20)))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
+ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+ SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_PROBE),
+ asoc->state, asoc->ep, asoc,
+ transport, GFP_ATOMIC);
+
+ if (error)
+ sk->sk_err = -error;
+
+out_unlock:
+ bh_unlock_sock(sk);
+ sctp_transport_put(transport);
+}
+
/* Inject a SACK Timeout event into the state machine. */
static void sctp_generate_sack_event(struct timer_list *t)
{
@@ -1641,6 +1673,11 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
sctp_cmd_hb_timers_stop(commands, asoc);
break;
+ case SCTP_CMD_PROBE_TIMER_UPDATE:
+ t = cmd->obj.transport;
+ sctp_transport_reset_probe_timer(t);
+ break;
+
case SCTP_CMD_REPORT_ERROR:
error = cmd->obj.error;
break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index fd1e319eda00..09a8f23ec709 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -361,7 +361,7 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
/* If the INIT is coming toward a closing socket, we'll send back
* and ABORT. Essentially, this catches the race of INIT being
- * backloged to the socket at the same time as the user isses close().
+ * backloged to the socket at the same time as the user issues close().
* Since the socket and all its associations are going away, we
* can treat this OOTB
*/
@@ -608,8 +608,8 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_ECHOED));
- /* SCTP-AUTH: genereate the assocition shared keys so that
- * we can potentially signe the COOKIE-ECHO.
+ /* SCTP-AUTH: generate the association shared keys so that
+ * we can potentially sign the COOKIE-ECHO.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
@@ -787,7 +787,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
goto nomem_init;
/* SCTP-AUTH: Now that we've populate required fields in
- * sctp_process_init, set up the assocaition shared keys as
+ * sctp_process_init, set up the association shared keys as
* necessary so that we can potentially authenticate the ACK
*/
error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC);
@@ -838,7 +838,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
/* Add all the state machine commands now since we've created
* everything. This way we don't introduce memory corruptions
- * during side-effect processing and correclty count established
+ * during side-effect processing and correctly count established
* associations.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
@@ -923,7 +923,7 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
commands);
/* Reset init error count upon receipt of COOKIE-ACK,
- * to avoid problems with the managemement of this
+ * to avoid problems with the management of this
* counter in stale cookie situations when a transition back
* from the COOKIE-ECHOED state to the COOKIE-WAIT
* state is performed.
@@ -1004,7 +1004,7 @@ static enum sctp_disposition sctp_sf_heartbeat(
struct sctp_chunk *reply;
/* Send a heartbeat to our peer. */
- reply = sctp_make_heartbeat(asoc, transport);
+ reply = sctp_make_heartbeat(asoc, transport, 0);
if (!reply)
return SCTP_DISPOSITION_NOMEM;
@@ -1095,6 +1095,32 @@ enum sctp_disposition sctp_sf_send_reconf(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
+/* send hb chunk with padding for PLPMUTD. */
+enum sctp_disposition sctp_sf_send_probe(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_transport *transport = (struct sctp_transport *)arg;
+ struct sctp_chunk *reply;
+
+ if (!sctp_transport_pl_enabled(transport))
+ return SCTP_DISPOSITION_CONSUME;
+
+ sctp_transport_pl_send(transport);
+
+ reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+ if (!reply)
+ return SCTP_DISPOSITION_NOMEM;
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
+ SCTP_TRANSPORT(transport));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
/*
* Process an heartbeat request.
*
@@ -1243,6 +1269,18 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
if (hbinfo->hb_nonce != link->hb_nonce)
return SCTP_DISPOSITION_DISCARD;
+ if (hbinfo->probe_size) {
+ if (hbinfo->probe_size != link->pl.probe_size ||
+ !sctp_transport_pl_enabled(link))
+ return SCTP_DISPOSITION_DISCARD;
+
+ sctp_transport_pl_recv(link);
+ if (link->pl.state == SCTP_PL_COMPLETE)
+ return SCTP_DISPOSITION_CONSUME;
+
+ return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
+ }
+
max_interval = link->hbinterval + link->rto;
/* Check if the timestamp looks valid. */
@@ -2950,7 +2988,7 @@ enum sctp_disposition sctp_sf_do_9_2_reshutack(
commands);
/* Since we are not going to really process this INIT, there
- * is no point in verifying chunk boundries. Just generate
+ * is no point in verifying chunk boundaries. Just generate
* the SHUTDOWN ACK.
*/
reply = sctp_make_shutdown_ack(asoc, chunk);
@@ -3560,7 +3598,7 @@ enum sctp_disposition sctp_sf_do_9_2_final(struct net *net,
goto nomem_chunk;
/* Do all the commands now (after allocation), so that we
- * have consistent state if memory allocation failes
+ * have consistent state if memory allocation fails
*/
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
@@ -3747,7 +3785,7 @@ static enum sctp_disposition sctp_sf_shut_8_4_5(
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* We need to discard the rest of the packet to prevent
- * potential bomming attacks from additional bundled chunks.
+ * potential boomming attacks from additional bundled chunks.
* This is documented in SCTP Threats ID.
*/
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
@@ -4257,7 +4295,7 @@ gen_shutdown:
}
/*
- * SCTP-AUTH Section 6.3 Receiving authenticated chukns
+ * SCTP-AUTH Section 6.3 Receiving authenticated chunks
*
* The receiver MUST use the HMAC algorithm indicated in the HMAC
* Identifier field. If this algorithm was not specified by the
@@ -4812,7 +4850,7 @@ static enum sctp_disposition sctp_sf_violation_ctsn(
/* Handle protocol violation of an invalid chunk bundling. For example,
* when we have an association and we receive bundled INIT-ACK, or
- * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
+ * SHUTDOWN-COMPLETE, our peer is clearly violating the "MUST NOT bundle"
* statement from the specs. Additionally, there might be an attacker
* on the path and we may not want to continue this communication.
*/
@@ -5208,7 +5246,7 @@ enum sctp_disposition sctp_sf_cookie_wait_prm_shutdown(
* Inputs
* (endpoint, asoc)
*
- * The RFC does not explcitly address this issue, but is the route through the
+ * The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues a shutdown while in COOKIE_ECHOED state.
*
* Outputs
@@ -5932,7 +5970,7 @@ enum sctp_disposition sctp_sf_t1_cookie_timer_expire(
/* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN
* with the updated last sequential TSN received from its peer.
*
- * An endpoint should limit the number of retransmissions of the
+ * An endpoint should limit the number of retransmission of the
* SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'.
* If this threshold is exceeded the endpoint should destroy the TCB and
* MUST report the peer endpoint unreachable to the upper layer (and
@@ -6010,7 +6048,7 @@ nomem:
}
/*
- * ADDIP Section 4.1 ASCONF CHunk Procedures
+ * ADDIP Section 4.1 ASCONF Chunk Procedures
* If the T4 RTO timer expires the endpoint should do B1 to B5
*/
enum sctp_disposition sctp_sf_t4_timer_expire(
@@ -6441,7 +6479,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
chunk->ecn_ce_done = 1;
if (af->is_ce(sctp_gso_headskb(chunk->skb))) {
- /* Do real work as sideffect. */
+ /* Do real work as side effect. */
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
SCTP_U32(tsn));
}
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 88ea87f4f0e7..1816a4410b2b 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -527,6 +527,26 @@ auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
}; /*state_fn_t auth_chunk_event_table[][] */
static const struct sctp_sm_table_entry
+pad_chunk_event_table[SCTP_STATE_NUM_STATES] = {
+ /* SCTP_STATE_CLOSED */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_COOKIE_WAIT */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_COOKIE_ECHOED */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_ESTABLISHED */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_SHUTDOWN_PENDING */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_SHUTDOWN_SENT */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+}; /* chunk pad */
+
+static const struct sctp_sm_table_entry
chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
/* SCTP_STATE_CLOSED */
TYPE_SCTP_FUNC(sctp_sf_ootb),
@@ -947,6 +967,25 @@ other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = {
TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
}
+#define TYPE_SCTP_EVENT_TIMEOUT_PROBE { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_send_probe), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
static const struct sctp_sm_table_entry
timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = {
TYPE_SCTP_EVENT_TIMEOUT_NONE,
@@ -958,6 +997,7 @@ timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = {
TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT,
TYPE_SCTP_EVENT_TIMEOUT_RECONF,
+ TYPE_SCTP_EVENT_TIMEOUT_PROBE,
TYPE_SCTP_EVENT_TIMEOUT_SACK,
TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
};
@@ -992,6 +1032,9 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
case SCTP_CID_AUTH:
return &auth_chunk_event_table[0][state];
+
+ case SCTP_CID_PAD:
+ return &pad_chunk_event_table[state];
}
return &chunk_event_table_unknown[state];
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 40f9f6c4a0a1..e64e01f61b11 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2496,6 +2496,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
sctp_assoc_sync_pmtu(asoc);
}
+ sctp_transport_pl_reset(trans);
} else if (asoc) {
asoc->param_flags =
(asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
@@ -4473,6 +4474,7 @@ static int sctp_setsockopt_encap_port(struct sock *sk,
transports)
t->encap_port = encap_port;
+ asoc->encap_port = encap_port;
return 0;
}
@@ -4480,6 +4482,61 @@ static int sctp_setsockopt_encap_port(struct sock *sk,
return 0;
}
+static int sctp_setsockopt_probe_interval(struct sock *sk,
+ struct sctp_probeinterval *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ __u32 probe_interval;
+
+ if (optlen != sizeof(*params))
+ return -EINVAL;
+
+ probe_interval = params->spi_interval;
+ if (probe_interval && probe_interval < SCTP_PROBE_TIMER_MIN)
+ return -EINVAL;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ if (!sctp_is_any(sk, (union sctp_addr *)&params->spi_address)) {
+ t = sctp_addr_id2transport(sk, &params->spi_address,
+ params->spi_assoc_id);
+ if (!t)
+ return -EINVAL;
+
+ t->probe_interval = msecs_to_jiffies(probe_interval);
+ sctp_transport_pl_reset(t);
+ return 0;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, params->spi_assoc_id);
+ if (!asoc && params->spi_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* If changes are for association, also apply probe_interval to
+ * each transport.
+ */
+ if (asoc) {
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
+ t->probe_interval = msecs_to_jiffies(probe_interval);
+ sctp_transport_pl_reset(t);
+ }
+
+ asoc->probe_interval = msecs_to_jiffies(probe_interval);
+ return 0;
+ }
+
+ sctp_sk(sk)->probe_interval = probe_interval;
+ return 0;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4702,6 +4759,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_REMOTE_UDP_ENCAPS_PORT:
retval = sctp_setsockopt_encap_port(sk, kopt, optlen);
break;
+ case SCTP_PLPMTUD_PROBE_INTERVAL:
+ retval = sctp_setsockopt_probe_interval(sk, kopt, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -4988,6 +5048,7 @@ static int sctp_init_sock(struct sock *sk)
atomic_set(&sp->pd_mode, 0);
skb_queue_head_init(&sp->pd_lobby);
sp->frag_interleave = 0;
+ sp->probe_interval = net->sctp.probe_interval;
/* Create a per socket endpoint structure. Even if we
* change the data structure relationships, this may still
@@ -7904,6 +7965,66 @@ out:
return 0;
}
+static int sctp_getsockopt_probe_interval(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_probeinterval params;
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ __u32 probe_interval;
+
+ if (len < sizeof(params))
+ return -EINVAL;
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ return -EFAULT;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ if (!sctp_is_any(sk, (union sctp_addr *)&params.spi_address)) {
+ t = sctp_addr_id2transport(sk, &params.spi_address,
+ params.spi_assoc_id);
+ if (!t) {
+ pr_debug("%s: failed no transport\n", __func__);
+ return -EINVAL;
+ }
+
+ probe_interval = jiffies_to_msecs(t->probe_interval);
+ goto out;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, params.spi_assoc_id);
+ if (!asoc && params.spi_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ pr_debug("%s: failed no association\n", __func__);
+ return -EINVAL;
+ }
+
+ if (asoc) {
+ probe_interval = jiffies_to_msecs(asoc->probe_interval);
+ goto out;
+ }
+
+ probe_interval = sctp_sk(sk)->probe_interval;
+
+out:
+ params.spi_interval = probe_interval;
+ if (copy_to_user(optval, &params, len))
+ return -EFAULT;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ return 0;
+}
+
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -8127,6 +8248,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_REMOTE_UDP_ENCAPS_PORT:
retval = sctp_getsockopt_encap_port(sk, len, optval, optlen);
break;
+ case SCTP_PLPMTUD_PROBE_INTERVAL:
+ retval = sctp_getsockopt_probe_interval(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index e92df779af73..b46a416787ec 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -55,6 +55,8 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
static struct ctl_table sctp_table[] = {
{
@@ -294,6 +296,13 @@ static struct ctl_table sctp_net_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "plpmtud_probe_interval",
+ .data = &init_net.sctp.probe_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_probe_interval,
+ },
+ {
.procname = "udp_port",
.data = &init_net.sctp.udp_port,
.maxlen = sizeof(int),
@@ -307,7 +316,7 @@ static struct ctl_table sctp_net_table[] = {
.data = &init_net.sctp.encap_port,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &udp_port_max,
},
@@ -539,6 +548,32 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write,
return ret;
}
+static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct ctl_table tbl;
+ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.probe_interval;
+
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0) {
+ if (new_value && new_value < SCTP_PROBE_TIMER_MIN)
+ return -EINVAL;
+
+ net->sctp.probe_interval = new_value;
+ }
+
+ return ret;
+}
+
int sctp_sysctl_net_register(struct net *net)
{
struct ctl_table *table;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index bf0ac467e757..5f23804f21c7 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -75,6 +75,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0);
timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0);
timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0);
+ timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0);
timer_setup(&peer->proto_unreach_timer,
sctp_generate_proto_unreach_event, 0);
@@ -131,6 +132,9 @@ void sctp_transport_free(struct sctp_transport *transport)
if (del_timer(&transport->reconf_timer))
sctp_transport_put(transport);
+ if (del_timer(&transport->probe_timer))
+ sctp_transport_put(transport);
+
/* Delete the ICMP proto unreachable timer if it's active. */
if (del_timer(&transport->proto_unreach_timer))
sctp_transport_put(transport);
@@ -207,6 +211,15 @@ void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
sctp_transport_hold(transport);
}
+void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
+{
+ if (timer_pending(&transport->probe_timer))
+ return;
+ if (!mod_timer(&transport->probe_timer,
+ jiffies + transport->probe_interval))
+ sctp_transport_hold(transport);
+}
+
/* This transport has been assigned to an association.
* Initialize fields from the association or from the sock itself.
* Register the reference count in the association.
@@ -241,12 +254,143 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = sctp_dst_mtu(transport->dst);
else
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+
+ sctp_transport_pl_update(transport);
+}
+
+void sctp_transport_pl_send(struct sctp_transport *t)
+{
+ pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+ __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+ if (t->pl.probe_count < SCTP_MAX_PROBES) {
+ t->pl.probe_count++;
+ return;
+ }
+
+ if (t->pl.state == SCTP_PL_BASE) {
+ if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
+ t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
+
+ t->pl.pmtu = SCTP_MIN_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ }
+ } else if (t->pl.state == SCTP_PL_SEARCH) {
+ if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
+ t->pl.state = SCTP_PL_BASE; /* Search -> Base */
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ t->pl.probe_high = 0;
+
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ } else { /* Normal probe failure. */
+ t->pl.probe_high = t->pl.probe_size;
+ t->pl.probe_size = t->pl.pmtu;
+ }
+ } else if (t->pl.state == SCTP_PL_COMPLETE) {
+ if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
+ t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ }
+ }
+ t->pl.probe_count = 1;
+}
+
+void sctp_transport_pl_recv(struct sctp_transport *t)
+{
+ pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+ __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+ t->pl.pmtu = t->pl.probe_size;
+ t->pl.probe_count = 0;
+ if (t->pl.state == SCTP_PL_BASE) {
+ t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */
+ t->pl.probe_size += SCTP_PL_BIG_STEP;
+ } else if (t->pl.state == SCTP_PL_ERROR) {
+ t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */
+
+ t->pl.pmtu = t->pl.probe_size;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ t->pl.probe_size += SCTP_PL_BIG_STEP;
+ } else if (t->pl.state == SCTP_PL_SEARCH) {
+ if (!t->pl.probe_high) {
+ t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
+ SCTP_MAX_PLPMTU);
+ return;
+ }
+ t->pl.probe_size += SCTP_PL_MIN_STEP;
+ if (t->pl.probe_size >= t->pl.probe_high) {
+ t->pl.probe_high = 0;
+ t->pl.raise_count = 0;
+ t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
+
+ t->pl.probe_size = t->pl.pmtu;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ }
+ } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) {
+ /* Raise probe_size again after 30 * interval in Search Complete */
+ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
+ t->pl.probe_size += SCTP_PL_MIN_STEP;
+ }
+}
+
+static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
+{
+ pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n",
+ __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu);
+
+ if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size)
+ return false;
+
+ if (t->pl.state == SCTP_PL_BASE) {
+ if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) {
+ t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
+
+ t->pl.pmtu = SCTP_MIN_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ }
+ } else if (t->pl.state == SCTP_PL_SEARCH) {
+ if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
+ t->pl.state = SCTP_PL_BASE; /* Search -> Base */
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ t->pl.probe_count = 0;
+
+ t->pl.probe_high = 0;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) {
+ t->pl.probe_size = pmtu;
+ t->pl.probe_count = 0;
+
+ return false;
+ }
+ } else if (t->pl.state == SCTP_PL_COMPLETE) {
+ if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
+ t->pl.state = SCTP_PL_BASE; /* Complete -> Base */
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ t->pl.probe_count = 0;
+
+ t->pl.probe_high = 0;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ }
+ }
+
+ return true;
}
bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
- struct dst_entry *dst = sctp_transport_dst_check(t);
struct sock *sk = t->asoc->base.sk;
+ struct dst_entry *dst;
bool change = true;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
@@ -257,6 +401,10 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
}
pmtu = SCTP_TRUNC4(pmtu);
+ if (sctp_transport_pl_enabled(t))
+ return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t));
+
+ dst = sctp_transport_dst_check(t);
if (dst) {
struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
union sctp_addr addr;
diff --git a/net/smc/Makefile b/net/smc/Makefile
index 77e54fe42b1c..99a0186cba5b 100644
--- a/net/smc/Makefile
+++ b/net/smc/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_SMC) += smc.o
obj-$(CONFIG_SMC_DIAG) += smc_diag.o
smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
-smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o
+smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5eff7cccceff..898389611ae8 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -49,6 +49,7 @@
#include "smc_tx.h"
#include "smc_rx.h"
#include "smc_close.h"
+#include "smc_stats.h"
static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
* creation on server
@@ -508,9 +509,44 @@ static void smc_link_save_peer_info(struct smc_link *link,
link->peer_mtu = clc->r0.qp_mtu;
}
-static void smc_switch_to_fallback(struct smc_sock *smc)
+static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
+ struct smc_stats_fback *fback_arr)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
+ if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
+ fback_arr[cnt].count++;
+ break;
+ }
+ if (!fback_arr[cnt].fback_code) {
+ fback_arr[cnt].fback_code = smc->fallback_rsn;
+ fback_arr[cnt].count++;
+ break;
+ }
+ }
+}
+
+static void smc_stat_fallback(struct smc_sock *smc)
+{
+ struct net *net = sock_net(&smc->sk);
+
+ mutex_lock(&net->smc.mutex_fback_rsn);
+ if (smc->listen_smc) {
+ smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
+ net->smc.fback_rsn->srv_fback_cnt++;
+ } else {
+ smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
+ net->smc.fback_rsn->clnt_fback_cnt++;
+ }
+ mutex_unlock(&net->smc.mutex_fback_rsn);
+}
+
+static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
smc->use_fallback = true;
+ smc->fallback_rsn = reason_code;
+ smc_stat_fallback(smc);
if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
smc->clcsock->file = smc->sk.sk_socket->file;
smc->clcsock->file->private_data = smc->clcsock;
@@ -522,8 +558,7 @@ static void smc_switch_to_fallback(struct smc_sock *smc)
/* fall back during connect */
static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
{
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = reason_code;
+ smc_switch_to_fallback(smc, reason_code);
smc_copy_sock_settings_to_clc(smc);
smc->connect_nonblock = 0;
if (smc->sk.sk_state == SMC_INIT)
@@ -535,9 +570,11 @@ static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
u8 version)
{
+ struct net *net = sock_net(&smc->sk);
int rc;
if (reason_code < 0) { /* error, fallback is not possible */
+ this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return reason_code;
@@ -545,6 +582,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
if (reason_code != SMC_CLC_DECL_PEERDECL) {
rc = smc_clc_send_decline(smc, reason_code, version);
if (rc < 0) {
+ this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return rc;
@@ -992,6 +1030,7 @@ static int __smc_connect(struct smc_sock *smc)
if (rc)
goto vlan_cleanup;
+ SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
smc_connect_ism_vlan_cleanup(smc, ini);
kfree(buf);
kfree(ini);
@@ -1307,7 +1346,9 @@ static void smc_listen_out_connected(struct smc_sock *new_smc)
static void smc_listen_out_err(struct smc_sock *new_smc)
{
struct sock *newsmcsk = &new_smc->sk;
+ struct net *net = sock_net(newsmcsk);
+ this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
@@ -1325,8 +1366,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
smc_listen_out_err(new_smc);
return;
}
- smc_switch_to_fallback(new_smc);
- new_smc->fallback_rsn = reason_code;
+ smc_switch_to_fallback(new_smc, reason_code);
if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
smc_listen_out_err(new_smc);
@@ -1699,8 +1739,7 @@ static void smc_listen_work(struct work_struct *work)
/* check if peer is smc capable */
if (!tcp_sk(newclcsock->sk)->syn_smc) {
- smc_switch_to_fallback(new_smc);
- new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
+ smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
smc_listen_out_connected(new_smc);
return;
}
@@ -1778,6 +1817,7 @@ static void smc_listen_work(struct work_struct *work)
}
smc_conn_save_peer_info(new_smc, cclc);
smc_listen_out_connected(new_smc);
+ SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
goto out_free;
out_unlock:
@@ -1984,18 +2024,19 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_FASTOPEN) {
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
+ smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
} else {
rc = -EINVAL;
goto out;
}
}
- if (smc->use_fallback)
+ if (smc->use_fallback) {
rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
- else
+ } else {
rc = smc_tx_sendmsg(smc, msg, len);
+ SMC_STAT_TX_PAYLOAD(smc, len, rc);
+ }
out:
release_sock(sk);
return rc;
@@ -2030,6 +2071,7 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
} else {
msg->msg_namelen = 0;
rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
+ SMC_STAT_RX_PAYLOAD(smc, rc, rc);
}
out:
@@ -2176,7 +2218,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
optval, optlen);
if (smc->clcsock->sk->sk_err) {
sk->sk_err = smc->clcsock->sk->sk_err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
if (optlen < sizeof(int))
@@ -2194,8 +2236,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
case TCP_FASTOPEN_NO_COOKIE:
/* option not supported by SMC */
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
+ smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
} else {
rc = -EINVAL;
}
@@ -2204,18 +2245,22 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
if (sk->sk_state != SMC_INIT &&
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
- if (val)
+ if (val) {
+ SMC_STAT_INC(smc, ndly_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0);
+ }
}
break;
case TCP_CORK:
if (sk->sk_state != SMC_INIT &&
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
- if (!val)
+ if (!val) {
+ SMC_STAT_INC(smc, cork_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0);
+ }
}
break;
case TCP_DEFER_ACCEPT:
@@ -2338,11 +2383,13 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
goto out;
}
release_sock(sk);
- if (smc->use_fallback)
+ if (smc->use_fallback) {
rc = kernel_sendpage(smc->clcsock, page, offset,
size, flags);
- else
+ } else {
+ SMC_STAT_INC(smc, sendpage_cnt);
rc = sock_no_sendpage(sock, page, offset, size, flags);
+ }
out:
return rc;
@@ -2391,6 +2438,7 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
flags = MSG_DONTWAIT;
else
flags = 0;
+ SMC_STAT_INC(smc, splice_cnt);
rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
}
out:
@@ -2479,6 +2527,16 @@ static void __net_exit smc_net_exit(struct net *net)
smc_pnet_net_exit(net);
}
+static __net_init int smc_net_stat_init(struct net *net)
+{
+ return smc_stats_init(net);
+}
+
+static void __net_exit smc_net_stat_exit(struct net *net)
+{
+ smc_stats_exit(net);
+}
+
static struct pernet_operations smc_net_ops = {
.init = smc_net_init,
.exit = smc_net_exit,
@@ -2486,6 +2544,11 @@ static struct pernet_operations smc_net_ops = {
.size = sizeof(struct smc_net),
};
+static struct pernet_operations smc_net_stat_ops = {
+ .init = smc_net_stat_init,
+ .exit = smc_net_stat_exit,
+};
+
static int __init smc_init(void)
{
int rc;
@@ -2494,6 +2557,10 @@ static int __init smc_init(void)
if (rc)
return rc;
+ rc = register_pernet_subsys(&smc_net_stat_ops);
+ if (rc)
+ return rc;
+
smc_ism_init();
smc_clc_init();
@@ -2595,6 +2662,7 @@ static void __exit smc_exit(void)
proto_unregister(&smc_proto);
smc_pnet_exit();
smc_nl_exit();
+ unregister_pernet_subsys(&smc_net_stat_ops);
unregister_pernet_subsys(&smc_net_ops);
rcu_barrier();
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 0df85a12651e..cd0d7c908b2a 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -33,6 +33,7 @@
#include "smc_close.h"
#include "smc_ism.h"
#include "smc_netlink.h"
+#include "smc_stats.h"
#define SMC_LGR_NUM_INCR 256
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
@@ -1235,20 +1236,6 @@ static void smc_lgr_free(struct smc_link_group *lgr)
kfree(lgr);
}
-static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
-{
- int i;
-
- for (i = 0; i < SMC_RMBE_SIZES; i++) {
- struct smc_buf_desc *buf_desc;
-
- list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
- buf_desc->len += sizeof(struct smcd_cdc_msg);
- smc_ism_unregister_dmb(lgr->smcd, buf_desc);
- }
- }
-}
-
static void smc_sk_wake_ups(struct smc_sock *smc)
{
smc->sk.sk_write_space(&smc->sk);
@@ -1285,7 +1272,6 @@ static void smc_lgr_cleanup(struct smc_link_group *lgr)
{
if (lgr->is_smcd) {
smc_ism_signal_shutdown(lgr);
- smcd_unregister_all_dmbs(lgr);
} else {
u32 rsn = lgr->llc_termination_rsn;
@@ -2044,6 +2030,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
struct smc_link_group *lgr = conn->lgr;
struct list_head *buf_list;
int bufsize, bufsize_short;
+ bool is_dgraded = false;
struct mutex *lock; /* lock buffer list */
int sk_buf_size;
@@ -2071,6 +2058,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
/* check for reusable slot in the link group */
buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
if (buf_desc) {
+ SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
+ SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
memset(buf_desc->cpu_addr, 0, bufsize);
break; /* found reusable slot */
}
@@ -2082,9 +2071,16 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
if (PTR_ERR(buf_desc) == -ENOMEM)
break;
- if (IS_ERR(buf_desc))
+ if (IS_ERR(buf_desc)) {
+ if (!is_dgraded) {
+ is_dgraded = true;
+ SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
+ }
continue;
+ }
+ SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
+ SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
buf_desc->used = 1;
mutex_lock(lock);
list_add(&buf_desc->list, buf_list);
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 94b31f2551bc..9cb2df289963 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -429,6 +429,8 @@ EXPORT_SYMBOL_GPL(smcd_alloc_dev);
int smcd_register_dev(struct smcd_dev *smcd)
{
+ int rc;
+
mutex_lock(&smcd_dev_list.mutex);
if (list_empty(&smcd_dev_list.list)) {
u8 *system_eid = NULL;
@@ -448,7 +450,14 @@ int smcd_register_dev(struct smcd_dev *smcd)
dev_name(&smcd->dev), smcd->pnetid,
smcd->pnetid_by_user ? " (user defined)" : "");
- return device_add(&smcd->dev);
+ rc = device_add(&smcd->dev);
+ if (rc) {
+ mutex_lock(&smcd_dev_list.mutex);
+ list_del(&smcd->list);
+ mutex_unlock(&smcd_dev_list.mutex);
+ }
+
+ return rc;
}
EXPORT_SYMBOL_GPL(smcd_register_dev);
@@ -461,7 +470,6 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
mutex_unlock(&smcd_dev_list.mutex);
smcd->going_away = 1;
smc_smcd_terminate_all(smcd);
- flush_workqueue(smcd->event_wq);
destroy_workqueue(smcd->event_wq);
device_del(&smcd->dev);
diff --git a/net/smc/smc_netlink.c b/net/smc/smc_netlink.c
index 140419a19dbf..6fb6f96c1d17 100644
--- a/net/smc/smc_netlink.c
+++ b/net/smc/smc_netlink.c
@@ -19,6 +19,7 @@
#include "smc_core.h"
#include "smc_ism.h"
#include "smc_ib.h"
+#include "smc_stats.h"
#include "smc_netlink.h"
#define SMC_CMD_MAX_ATTR 1
@@ -55,6 +56,16 @@ static const struct genl_ops smc_gen_nl_ops[] = {
/* can be retrieved by unprivileged users */
.dumpit = smcr_nl_get_device,
},
+ {
+ .cmd = SMC_NETLINK_GET_STATS,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smc_nl_get_stats,
+ },
+ {
+ .cmd = SMC_NETLINK_GET_FBACK_STATS,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smc_nl_get_fback_stats,
+ },
};
static const struct nla_policy smc_gen_nl_policy[2] = {
diff --git a/net/smc/smc_netlink.h b/net/smc/smc_netlink.h
index 3477265cba6c..5ce2c0a89ccd 100644
--- a/net/smc/smc_netlink.h
+++ b/net/smc/smc_netlink.h
@@ -18,7 +18,7 @@
extern struct genl_family smc_gen_nl_family;
struct smc_nl_dmp_ctx {
- int pos[2];
+ int pos[3];
};
static inline struct smc_nl_dmp_ctx *smc_nl_dmp_ctx(struct netlink_callback *c)
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index fcfac59f8b72..170b733bc736 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -21,6 +21,7 @@
#include "smc_cdc.h"
#include "smc_tx.h" /* smc_tx_consumer_update() */
#include "smc_rx.h"
+#include "smc_stats.h"
/* callback implementation to wakeup consumers blocked with smc_rx_wait().
* indirectly called by smc_cdc_msg_recv_action().
@@ -227,6 +228,7 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
conn->urg_state == SMC_URG_READ)
return -EINVAL;
+ SMC_STAT_INC(smc, urg_data_cnt);
if (conn->urg_state == SMC_URG_VALID) {
if (!(flags & MSG_PEEK))
smc->conn.urg_state = SMC_URG_READ;
@@ -303,6 +305,12 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ readable = atomic_read(&conn->bytes_to_rcv);
+ if (readable >= conn->rmb_desc->len)
+ SMC_STAT_RMB_RX_FULL(smc, !conn->lnk);
+
+ if (len < readable)
+ SMC_STAT_RMB_RX_SIZE_SMALL(smc, !conn->lnk);
/* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
diff --git a/net/smc/smc_stats.c b/net/smc/smc_stats.c
new file mode 100644
index 000000000000..e80e34f7ac15
--- /dev/null
+++ b/net/smc/smc_stats.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * SMC statistics netlink routines
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Author(s): Guvenc Gulce
+ */
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/ctype.h>
+#include <linux/smc.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
+#include "smc_netlink.h"
+#include "smc_stats.h"
+
+int smc_stats_init(struct net *net)
+{
+ net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL);
+ if (!net->smc.fback_rsn)
+ goto err_fback;
+ net->smc.smc_stats = alloc_percpu(struct smc_stats);
+ if (!net->smc.smc_stats)
+ goto err_stats;
+ mutex_init(&net->smc.mutex_fback_rsn);
+ return 0;
+
+err_stats:
+ kfree(net->smc.fback_rsn);
+err_fback:
+ return -ENOMEM;
+}
+
+void smc_stats_exit(struct net *net)
+{
+ kfree(net->smc.fback_rsn);
+ if (net->smc.smc_stats)
+ free_percpu(net->smc.smc_stats);
+}
+
+static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
+ struct smc_stats *stats, int tech,
+ int type)
+{
+ struct smc_stats_rmbcnt *stats_rmb_cnt;
+ struct nlattr *attrs;
+
+ if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
+ stats_rmb_cnt = &stats->smc[tech].rmb_tx;
+ else
+ stats_rmb_cnt = &stats->smc[tech].rmb_rx;
+
+ attrs = nla_nest_start(skb, type);
+ if (!attrs)
+ goto errout;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
+ stats_rmb_cnt->reuse_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
+ stats_rmb_cnt->buf_size_small_peer_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
+ stats_rmb_cnt->buf_size_small_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
+ stats_rmb_cnt->buf_full_peer_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
+ stats_rmb_cnt->buf_full_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
+ stats_rmb_cnt->alloc_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
+ stats_rmb_cnt->dgrade_cnt,
+ SMC_NLA_STATS_RMB_PAD))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ return 0;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ return -EMSGSIZE;
+}
+
+static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
+ struct smc_stats *stats, int tech,
+ int type)
+{
+ struct smc_stats_memsize *stats_pload;
+ struct nlattr *attrs;
+
+ if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
+ stats_pload = &stats->smc[tech].tx_pd;
+ else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
+ stats_pload = &stats->smc[tech].rx_pd;
+ else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
+ stats_pload = &stats->smc[tech].tx_rmbsize;
+ else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
+ stats_pload = &stats->smc[tech].rx_rmbsize;
+ else
+ goto errout;
+
+ attrs = nla_nest_start(skb, type);
+ if (!attrs)
+ goto errout;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
+ stats_pload->buf[SMC_BUF_8K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
+ stats_pload->buf[SMC_BUF_16K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
+ stats_pload->buf[SMC_BUF_32K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
+ stats_pload->buf[SMC_BUF_64K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
+ stats_pload->buf[SMC_BUF_128K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
+ stats_pload->buf[SMC_BUF_256K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
+ stats_pload->buf[SMC_BUF_512K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
+ stats_pload->buf[SMC_BUF_1024K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
+ stats_pload->buf[SMC_BUF_G_1024K],
+ SMC_NLA_STATS_PLOAD_PAD))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ return 0;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ return -EMSGSIZE;
+}
+
+static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
+ struct smc_stats *stats, int tech)
+{
+ struct smc_stats_tech *smc_tech;
+ struct nlattr *attrs;
+
+ smc_tech = &stats->smc[tech];
+ if (tech == SMC_TYPE_D)
+ attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
+ else
+ attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
+
+ if (!attrs)
+ goto errout;
+ if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
+ SMC_NLA_STATS_T_TX_RMB_STATS))
+ goto errattr;
+ if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
+ SMC_NLA_STATS_T_RX_RMB_STATS))
+ goto errattr;
+ if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
+ SMC_NLA_STATS_T_TXPLOAD_SIZE))
+ goto errattr;
+ if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
+ SMC_NLA_STATS_T_RXPLOAD_SIZE))
+ goto errattr;
+ if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
+ SMC_NLA_STATS_T_TX_RMB_SIZE))
+ goto errattr;
+ if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
+ SMC_NLA_STATS_T_RX_RMB_SIZE))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
+ smc_tech->clnt_v1_succ_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
+ smc_tech->clnt_v2_succ_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
+ smc_tech->srv_v1_succ_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
+ smc_tech->srv_v2_succ_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
+ smc_tech->rx_bytes,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
+ smc_tech->tx_bytes,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
+ smc_tech->rx_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
+ smc_tech->tx_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
+ smc_tech->sendpage_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
+ smc_tech->cork_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
+ smc_tech->ndly_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
+ smc_tech->splice_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
+ smc_tech->urg_data_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ return 0;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ return -EMSGSIZE;
+}
+
+int smc_nl_get_stats(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct net *net = sock_net(skb->sk);
+ struct smc_stats *stats;
+ struct nlattr *attrs;
+ int cpu, i, size;
+ void *nlh;
+ u64 *src;
+ u64 *sum;
+
+ if (cb_ctx->pos[0])
+ goto errmsg;
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_STATS);
+ if (!nlh)
+ goto errmsg;
+
+ attrs = nla_nest_start(skb, SMC_GEN_STATS);
+ if (!attrs)
+ goto errnest;
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ goto erralloc;
+ size = sizeof(*stats) / sizeof(u64);
+ for_each_possible_cpu(cpu) {
+ src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
+ sum = (u64 *)stats;
+ for (i = 0; i < size; i++)
+ *(sum++) += *(src++);
+ }
+ if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
+ goto errattr;
+ if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
+ stats->clnt_hshake_err_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
+ stats->srv_hshake_err_cnt,
+ SMC_NLA_STATS_PAD))
+ goto errattr;
+
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ cb_ctx->pos[0] = 1;
+ kfree(stats);
+ return skb->len;
+
+errattr:
+ kfree(stats);
+erralloc:
+ nla_nest_cancel(skb, attrs);
+errnest:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return skb->len;
+}
+
+static int smc_nl_get_fback_details(struct sk_buff *skb,
+ struct netlink_callback *cb, int pos,
+ bool is_srv)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct net *net = sock_net(skb->sk);
+ int cnt_reported = cb_ctx->pos[2];
+ struct smc_stats_fback *trgt_arr;
+ struct nlattr *attrs;
+ int rc = 0;
+ void *nlh;
+
+ if (is_srv)
+ trgt_arr = &net->smc.fback_rsn->srv[0];
+ else
+ trgt_arr = &net->smc.fback_rsn->clnt[0];
+ if (!trgt_arr[pos].fback_code)
+ return -ENODATA;
+ nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_GET_FBACK_STATS);
+ if (!nlh)
+ goto errmsg;
+ attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS);
+ if (!attrs)
+ goto errout;
+ if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv))
+ goto errattr;
+ if (!cnt_reported) {
+ if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
+ net->smc.fback_rsn->srv_fback_cnt,
+ SMC_NLA_FBACK_STATS_PAD))
+ goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
+ net->smc.fback_rsn->clnt_fback_cnt,
+ SMC_NLA_FBACK_STATS_PAD))
+ goto errattr;
+ cnt_reported = 1;
+ }
+
+ if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE,
+ trgt_arr[pos].fback_code))
+ goto errattr;
+ if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT,
+ trgt_arr[pos].count))
+ goto errattr;
+
+ cb_ctx->pos[2] = cnt_reported;
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, nlh);
+ return rc;
+
+errattr:
+ nla_nest_cancel(skb, attrs);
+errout:
+ genlmsg_cancel(skb, nlh);
+errmsg:
+ return -EMSGSIZE;
+}
+
+int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ struct net *net = sock_net(skb->sk);
+ int rc_srv = 0, rc_clnt = 0, k;
+ int skip_serv = cb_ctx->pos[1];
+ int snum = cb_ctx->pos[0];
+ bool is_srv = true;
+
+ mutex_lock(&net->smc.mutex_fback_rsn);
+ for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
+ if (k < snum)
+ continue;
+ if (!skip_serv) {
+ rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
+ if (rc_srv && rc_srv != -ENODATA)
+ break;
+ } else {
+ skip_serv = 0;
+ }
+ rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
+ if (rc_clnt && rc_clnt != -ENODATA) {
+ skip_serv = 1;
+ break;
+ }
+ if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
+ break;
+ }
+ mutex_unlock(&net->smc.mutex_fback_rsn);
+ cb_ctx->pos[1] = skip_serv;
+ cb_ctx->pos[0] = k;
+ return skb->len;
+}
diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
new file mode 100644
index 000000000000..84b7ecd8c05c
--- /dev/null
+++ b/net/smc/smc_stats.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Macros for SMC statistics
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Author(s): Guvenc Gulce
+ */
+
+#ifndef NET_SMC_SMC_STATS_H_
+#define NET_SMC_SMC_STATS_H_
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/ctype.h>
+#include <linux/smc.h>
+
+#include "smc_clc.h"
+
+#define SMC_MAX_FBACK_RSN_CNT 30
+
+enum {
+ SMC_BUF_8K,
+ SMC_BUF_16K,
+ SMC_BUF_32K,
+ SMC_BUF_64K,
+ SMC_BUF_128K,
+ SMC_BUF_256K,
+ SMC_BUF_512K,
+ SMC_BUF_1024K,
+ SMC_BUF_G_1024K,
+ SMC_BUF_MAX,
+};
+
+struct smc_stats_fback {
+ int fback_code;
+ u16 count;
+};
+
+struct smc_stats_rsn {
+ struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
+ struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
+ u64 srv_fback_cnt;
+ u64 clnt_fback_cnt;
+};
+
+struct smc_stats_rmbcnt {
+ u64 buf_size_small_peer_cnt;
+ u64 buf_size_small_cnt;
+ u64 buf_full_peer_cnt;
+ u64 buf_full_cnt;
+ u64 reuse_cnt;
+ u64 alloc_cnt;
+ u64 dgrade_cnt;
+};
+
+struct smc_stats_memsize {
+ u64 buf[SMC_BUF_MAX];
+};
+
+struct smc_stats_tech {
+ struct smc_stats_memsize tx_rmbsize;
+ struct smc_stats_memsize rx_rmbsize;
+ struct smc_stats_memsize tx_pd;
+ struct smc_stats_memsize rx_pd;
+ struct smc_stats_rmbcnt rmb_tx;
+ struct smc_stats_rmbcnt rmb_rx;
+ u64 clnt_v1_succ_cnt;
+ u64 clnt_v2_succ_cnt;
+ u64 srv_v1_succ_cnt;
+ u64 srv_v2_succ_cnt;
+ u64 sendpage_cnt;
+ u64 urg_data_cnt;
+ u64 splice_cnt;
+ u64 cork_cnt;
+ u64 ndly_cnt;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 rx_cnt;
+ u64 tx_cnt;
+};
+
+struct smc_stats {
+ struct smc_stats_tech smc[2];
+ u64 clnt_hshake_err_cnt;
+ u64 srv_hshake_err_cnt;
+};
+
+#define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) \
+do { \
+ typeof(_smc_stats) stats = (_smc_stats); \
+ typeof(_tech) t = (_tech); \
+ typeof(_len) l = (_len); \
+ int _pos = fls64((l) >> 13); \
+ typeof(_rc) r = (_rc); \
+ int m = SMC_BUF_MAX - 1; \
+ this_cpu_inc((*stats).smc[t].key ## _cnt); \
+ if (r <= 0) \
+ break; \
+ _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+ this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
+ this_cpu_add((*stats).smc[t].key ## _bytes, r); \
+} \
+while (0)
+
+#define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
+do { \
+ typeof(_smc) __smc = _smc; \
+ struct net *_net = sock_net(&__smc->sk); \
+ struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
+ typeof(length) _len = (length); \
+ typeof(rcode) _rc = (rcode); \
+ bool is_smcd = !__smc->conn.lnk; \
+ if (is_smcd) \
+ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, tx, _len, _rc); \
+ else \
+ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, tx, _len, _rc); \
+} \
+while (0)
+
+#define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
+do { \
+ typeof(_smc) __smc = _smc; \
+ struct net *_net = sock_net(&__smc->sk); \
+ struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
+ typeof(length) _len = (length); \
+ typeof(rcode) _rc = (rcode); \
+ bool is_smcd = !__smc->conn.lnk; \
+ if (is_smcd) \
+ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, rx, _len, _rc); \
+ else \
+ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, rx, _len, _rc); \
+} \
+while (0)
+
+#define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len) \
+do { \
+ typeof(_len) _l = (_len); \
+ typeof(_tech) t = (_tech); \
+ int _pos = fls((_l) >> 13); \
+ int m = SMC_BUF_MAX - 1; \
+ _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+ this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
+} \
+while (0)
+
+#define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \
+ this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt)
+
+#define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _len) \
+do { \
+ struct net *_net = sock_net(&(_smc)->sk); \
+ struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
+ typeof(_is_smcd) is_d = (_is_smcd); \
+ typeof(_is_rx) is_r = (_is_rx); \
+ typeof(_len) l = (_len); \
+ if ((is_d) && (is_r)) \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, l); \
+ if ((is_d) && !(is_r)) \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, l); \
+ if (!(is_d) && (is_r)) \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, l); \
+ if (!(is_d) && !(is_r)) \
+ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, l); \
+} \
+while (0)
+
+#define SMC_STAT_RMB(_smc, type, _is_smcd, _is_rx) \
+do { \
+ struct net *net = sock_net(&(_smc)->sk); \
+ struct smc_stats __percpu *_smc_stats = net->smc.smc_stats; \
+ typeof(_is_smcd) is_d = (_is_smcd); \
+ typeof(_is_rx) is_r = (_is_rx); \
+ if ((is_d) && (is_r)) \
+ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, rx); \
+ if ((is_d) && !(is_r)) \
+ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, tx); \
+ if (!(is_d) && (is_r)) \
+ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, rx); \
+ if (!(is_d) && !(is_r)) \
+ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, tx); \
+} \
+while (0)
+
+#define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx) \
+ SMC_STAT_RMB(smc, reuse, is_smcd, is_rx)
+
+#define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx) \
+ SMC_STAT_RMB(smc, alloc, is_smcd, is_rx)
+
+#define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rx) \
+ SMC_STAT_RMB(smc, dgrade, is_smcd, is_rx)
+
+#define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_full_peer, is_smcd, false)
+
+#define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_full, is_smcd, false)
+
+#define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_size_small_peer, is_smcd, false)
+
+#define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_size_small, is_smcd, false)
+
+#define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_size_small, is_smcd, true)
+
+#define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \
+ SMC_STAT_RMB(smc, buf_full, is_smcd, true)
+
+#define SMC_STAT_INC(_smc, type) \
+do { \
+ typeof(_smc) __smc = _smc; \
+ bool is_smcd = !(__smc)->conn.lnk; \
+ struct net *net = sock_net(&(__smc)->sk); \
+ struct smc_stats __percpu *smc_stats = net->smc.smc_stats; \
+ if ((is_smcd)) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
+ else \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \
+} \
+while (0)
+
+#define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \
+do { \
+ typeof(_aclc) acl = (_aclc); \
+ bool is_v2 = (acl->hdr.version == SMC_V2); \
+ bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
+ struct smc_stats __percpu *smc_stats = (net)->smc.smc_stats; \
+ if (is_v2 && is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
+ else if (is_v2 && !is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \
+ else if (!is_v2 && is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \
+ else if (!is_v2 && !is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \
+} \
+while (0)
+
+#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
+do { \
+ typeof(_ini) i = (_ini); \
+ bool is_v2 = (i->smcd_version & SMC_V2); \
+ bool is_smcd = (i->is_smcd); \
+ typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
+ if (is_v2 && is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
+ else if (is_v2 && !is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \
+ else if (!is_v2 && is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \
+ else if (!is_v2 && !is_smcd) \
+ this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \
+} \
+while (0)
+
+int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
+int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
+int smc_stats_init(struct net *net);
+void smc_stats_exit(struct net *net);
+
+#endif /* NET_SMC_SMC_STATS_H_ */
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 4532c16bf85e..289025cd545a 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -27,6 +27,7 @@
#include "smc_close.h"
#include "smc_ism.h"
#include "smc_tx.h"
+#include "smc_stats.h"
#define SMC_TX_WORK_DELAY 0
#define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
@@ -45,6 +46,8 @@ static void smc_tx_write_space(struct sock *sk)
/* similar to sk_stream_write_space */
if (atomic_read(&smc->conn.sndbuf_space) && sock) {
+ if (test_bit(SOCK_NOSPACE, &sock->flags))
+ SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk);
clear_bit(SOCK_NOSPACE, &sock->flags);
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
@@ -151,9 +154,19 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
goto out_err;
}
+ if (sk->sk_state == SMC_INIT)
+ return -ENOTCONN;
+
+ if (len > conn->sndbuf_desc->len)
+ SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk);
+
+ if (len > conn->peer_rmbe_size)
+ SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk);
+
+ if (msg->msg_flags & MSG_OOB)
+ SMC_STAT_INC(smc, urg_data_cnt);
+
while (msg_data_left(msg)) {
- if (sk->sk_state == SMC_INIT)
- return -ENOTCONN;
if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
(smc->sk.sk_err == ECONNABORTED) ||
conn->killed)
@@ -419,8 +432,12 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
/* destination: RMBE */
/* cf. snd_wnd */
rmbespace = atomic_read(&conn->peer_rmbe_space);
- if (rmbespace <= 0)
+ if (rmbespace <= 0) {
+ struct smc_sock *smc = container_of(conn, struct smc_sock,
+ conn);
+ SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk);
return 0;
+ }
smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
diff --git a/net/socket.c b/net/socket.c
index 27e3e7d53f8e..bd9233da2497 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -165,6 +165,54 @@ static const struct file_operations socket_file_ops = {
.show_fdinfo = sock_show_fdinfo,
};
+static const char * const pf_family_names[] = {
+ [PF_UNSPEC] = "PF_UNSPEC",
+ [PF_UNIX] = "PF_UNIX/PF_LOCAL",
+ [PF_INET] = "PF_INET",
+ [PF_AX25] = "PF_AX25",
+ [PF_IPX] = "PF_IPX",
+ [PF_APPLETALK] = "PF_APPLETALK",
+ [PF_NETROM] = "PF_NETROM",
+ [PF_BRIDGE] = "PF_BRIDGE",
+ [PF_ATMPVC] = "PF_ATMPVC",
+ [PF_X25] = "PF_X25",
+ [PF_INET6] = "PF_INET6",
+ [PF_ROSE] = "PF_ROSE",
+ [PF_DECnet] = "PF_DECnet",
+ [PF_NETBEUI] = "PF_NETBEUI",
+ [PF_SECURITY] = "PF_SECURITY",
+ [PF_KEY] = "PF_KEY",
+ [PF_NETLINK] = "PF_NETLINK/PF_ROUTE",
+ [PF_PACKET] = "PF_PACKET",
+ [PF_ASH] = "PF_ASH",
+ [PF_ECONET] = "PF_ECONET",
+ [PF_ATMSVC] = "PF_ATMSVC",
+ [PF_RDS] = "PF_RDS",
+ [PF_SNA] = "PF_SNA",
+ [PF_IRDA] = "PF_IRDA",
+ [PF_PPPOX] = "PF_PPPOX",
+ [PF_WANPIPE] = "PF_WANPIPE",
+ [PF_LLC] = "PF_LLC",
+ [PF_IB] = "PF_IB",
+ [PF_MPLS] = "PF_MPLS",
+ [PF_CAN] = "PF_CAN",
+ [PF_TIPC] = "PF_TIPC",
+ [PF_BLUETOOTH] = "PF_BLUETOOTH",
+ [PF_IUCV] = "PF_IUCV",
+ [PF_RXRPC] = "PF_RXRPC",
+ [PF_ISDN] = "PF_ISDN",
+ [PF_PHONET] = "PF_PHONET",
+ [PF_IEEE802154] = "PF_IEEE802154",
+ [PF_CAIF] = "PF_CAIF",
+ [PF_ALG] = "PF_ALG",
+ [PF_NFC] = "PF_NFC",
+ [PF_VSOCK] = "PF_VSOCK",
+ [PF_KCM] = "PF_KCM",
+ [PF_QIPCRTR] = "PF_QIPCRTR",
+ [PF_SMC] = "PF_SMC",
+ [PF_XDP] = "PF_XDP",
+};
+
/*
* The protocol list. Each protocol is registered in here.
*/
@@ -1072,19 +1120,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
* what to do with it - that's up to the protocol still.
*/
-/**
- * get_net_ns - increment the refcount of the network namespace
- * @ns: common namespace (net)
- *
- * Returns the net's common namespace.
- */
-
-struct ns_common *get_net_ns(struct ns_common *ns)
-{
- return &get_net(container_of(ns, struct net, ns))->ns;
-}
-EXPORT_SYMBOL_GPL(get_net_ns);
-
static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct socket *sock;
@@ -2988,7 +3023,7 @@ int sock_register(const struct net_proto_family *ops)
}
spin_unlock(&net_family_lock);
- pr_info("NET: Registered protocol family %d\n", ops->family);
+ pr_info("NET: Registered %s protocol family\n", pf_family_names[ops->family]);
return err;
}
EXPORT_SYMBOL(sock_register);
@@ -3016,7 +3051,7 @@ void sock_unregister(int family)
synchronize_rcu();
- pr_info("NET: Unregistered protocol family %d\n", family);
+ pr_info("NET: Unregistered %s protocol family\n", pf_family_names[family]);
}
EXPORT_SYMBOL(sock_unregister);
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index b3815c1e8f2e..9c0343568d2a 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -58,7 +58,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
/* Report an error on the lower socket */
sk->sk_err = -err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f555d335e910..42623d6b8f0e 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1677,13 +1677,6 @@ call_reserveresult(struct rpc_task *task)
return;
}
- /*
- * Even though there was an error, we may have acquired
- * a request slot somehow. Make sure not to leak it.
- */
- if (task->tk_rqstp)
- xprt_release(task);
-
switch (status) {
case -ENOMEM:
rpc_delay(task, HZ >> 2);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e5b5a960a69b..3509a7f139b9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -70,6 +70,7 @@
static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
static void xprt_destroy(struct rpc_xprt *xprt);
+static void xprt_request_init(struct rpc_task *task);
static DEFINE_SPINLOCK(xprt_list_lock);
static LIST_HEAD(xprt_list);
@@ -1606,17 +1607,40 @@ xprt_transmit(struct rpc_task *task)
spin_unlock(&xprt->queue_lock);
}
-static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
+static void xprt_complete_request_init(struct rpc_task *task)
+{
+ if (task->tk_rqstp)
+ xprt_request_init(task);
+}
+
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
set_bit(XPRT_CONGESTED, &xprt->state);
- rpc_sleep_on(&xprt->backlog, task, NULL);
+ rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
+}
+EXPORT_SYMBOL_GPL(xprt_add_backlog);
+
+static bool __xprt_set_rq(struct rpc_task *task, void *data)
+{
+ struct rpc_rqst *req = data;
+
+ if (task->tk_rqstp == NULL) {
+ memset(req, 0, sizeof(*req)); /* mark unused */
+ task->tk_rqstp = req;
+ return true;
+ }
+ return false;
}
-static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- if (rpc_wake_up_next(&xprt->backlog) == NULL)
+ if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
clear_bit(XPRT_CONGESTED, &xprt->state);
+ return false;
+ }
+ return true;
}
+EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
@@ -1626,7 +1650,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task
goto out;
spin_lock(&xprt->reserve_lock);
if (test_bit(XPRT_CONGESTED, &xprt->state)) {
- rpc_sleep_on(&xprt->backlog, task, NULL);
+ xprt_add_backlog(xprt, task);
ret = true;
}
spin_unlock(&xprt->reserve_lock);
@@ -1703,11 +1727,11 @@ EXPORT_SYMBOL_GPL(xprt_alloc_slot);
void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
spin_lock(&xprt->reserve_lock);
- if (!xprt_dynamic_free_slot(xprt, req)) {
+ if (!xprt_wake_up_backlog(xprt, req) &&
+ !xprt_dynamic_free_slot(xprt, req)) {
memset(req, 0, sizeof(*req)); /* mark unused */
list_add(&req->rq_list, &xprt->free);
}
- xprt_wake_up_backlog(xprt);
spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_free_slot);
@@ -1894,10 +1918,10 @@ void xprt_release(struct rpc_task *task)
xdr_free_bvec(&req->rq_snd_buf);
if (req->rq_cred != NULL)
put_rpccred(req->rq_cred);
- task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
+ task->tk_rqstp = NULL;
if (likely(!bc_prealloc(req)))
xprt->ops->free_slot(xprt, req);
else
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 78c075a68c04..1b4073131c6f 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -7,13 +7,13 @@
* Trond Myklebust <trond.myklebust@primarydata.com>
*
*/
+#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/slab.h>
-#include <asm/cmpxchg.h>
#include <linux/spinlock.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/addr.h>
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 649f7d8b9733..c335c1361564 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -628,8 +628,9 @@ out_mapping_err:
return false;
}
-/* The tail iovec might not reside in the same page as the
- * head iovec.
+/* The tail iovec may include an XDR pad for the page list,
+ * as well as additional content, and may not reside in the
+ * same page as the head iovec.
*/
static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
struct xdr_buf *xdr,
@@ -747,19 +748,27 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req,
struct xdr_buf *xdr)
{
- struct kvec *tail = &xdr->tail[0];
-
if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
return false;
- /* If there is a Read chunk, the page list is handled
+ /* If there is a Read chunk, the page list is being handled
* via explicit RDMA, and thus is skipped here.
*/
- if (tail->iov_len) {
- if (!rpcrdma_prepare_tail_iov(req, xdr,
- offset_in_page(tail->iov_base),
- tail->iov_len))
+ /* Do not include the tail if it is only an XDR pad */
+ if (xdr->tail[0].iov_len > 3) {
+ unsigned int page_base, len;
+
+ /* If the content in the page list is an odd length,
+ * xdr_write_pages() adds a pad at the beginning of
+ * the tail iovec. Force the tail's non-pad content to
+ * land at the next XDR position in the Send message.
+ */
+ page_base = offset_in_page(xdr->tail[0].iov_base);
+ len = xdr->tail[0].iov_len;
+ page_base += len & 3;
+ len -= len & 3;
+ if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
return false;
kref_get(&req->rl_kref);
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 09953597d055..19a49d26b1e4 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -520,9 +520,8 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
return;
out_sleep:
- set_bit(XPRT_CONGESTED, &xprt->state);
- rpc_sleep_on(&xprt->backlog, task, NULL);
task->tk_status = -EAGAIN;
+ xprt_add_backlog(xprt, task);
}
/**
@@ -537,10 +536,11 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
struct rpcrdma_xprt *r_xprt =
container_of(xprt, struct rpcrdma_xprt, rx_xprt);
- memset(rqst, 0, sizeof(*rqst));
- rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
- if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
- clear_bit(XPRT_CONGESTED, &xprt->state);
+ rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+ if (!xprt_wake_up_backlog(xprt, rqst)) {
+ memset(rqst, 0, sizeof(*rqst));
+ rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+ }
}
static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 1e965a380896..649c23518ec0 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1201,6 +1201,20 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
}
/**
+ * rpcrdma_reply_put - Put reply buffers back into pool
+ * @buffers: buffer pool
+ * @req: object to return
+ *
+ */
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
+{
+ if (req->rl_reply) {
+ rpcrdma_rep_put(buffers, req->rl_reply);
+ req->rl_reply = NULL;
+ }
+}
+
+/**
* rpcrdma_buffer_get - Get a request buffer
* @buffers: Buffer pool from which to obtain a buffer
*
@@ -1228,9 +1242,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
*/
void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
- if (req->rl_reply)
- rpcrdma_rep_put(buffers, req->rl_reply);
- req->rl_reply = NULL;
+ rpcrdma_reply_put(buffers, req);
spin_lock(&buffers->rb_lock);
list_add(&req->rl_list, &buffers->rb_send_bufs);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 436ad7312614..5d231d94e944 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -479,6 +479,7 @@ struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
struct rpcrdma_req *req);
void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep);
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
gfp_t flags);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 47aa47a2b07c..316d04945587 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1010,6 +1010,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
kernel_sock_shutdown(transport->sock, SHUT_RDWR);
return -ENOTCONN;
}
+ if (!transport->inet)
+ return -ENOTCONN;
xs_pktdump("packet data:",
req->rq_svec->iov_base,
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 89a36db47ab4..070698dd19bc 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -381,19 +381,20 @@ EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
static int __switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*add_cb)(struct net_device *dev,
+ int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack))
{
+ struct switchdev_notifier_info *info = &port_obj_info->info;
struct netlink_ext_ack *extack;
struct net_device *lower_dev;
struct list_head *iter;
int err = -EOPNOTSUPP;
- extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
+ extack = switchdev_notifier_info_to_extack(info);
if (check_cb(dev)) {
- err = add_cb(dev, port_obj_info->obj, extack);
+ err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
if (err != -EOPNOTSUPP)
port_obj_info->handled = true;
return err;
@@ -422,7 +423,7 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*add_cb)(struct net_device *dev,
+ int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack))
{
@@ -439,15 +440,16 @@ EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
static int __switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*del_cb)(struct net_device *dev,
+ int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj))
{
+ struct switchdev_notifier_info *info = &port_obj_info->info;
struct net_device *lower_dev;
struct list_head *iter;
int err = -EOPNOTSUPP;
if (check_cb(dev)) {
- err = del_cb(dev, port_obj_info->obj);
+ err = del_cb(dev, info->ctx, port_obj_info->obj);
if (err != -EOPNOTSUPP)
port_obj_info->handled = true;
return err;
@@ -476,7 +478,7 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
int switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
- int (*del_cb)(struct net_device *dev,
+ int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj))
{
int err;
@@ -492,19 +494,20 @@ EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
static int __switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
- int (*set_cb)(struct net_device *dev,
+ int (*set_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack))
{
+ struct switchdev_notifier_info *info = &port_attr_info->info;
struct netlink_ext_ack *extack;
struct net_device *lower_dev;
struct list_head *iter;
int err = -EOPNOTSUPP;
- extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
+ extack = switchdev_notifier_info_to_extack(info);
if (check_cb(dev)) {
- err = set_cb(dev, port_attr_info->attr, extack);
+ err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
if (err != -EOPNOTSUPP)
port_attr_info->handled = true;
return err;
@@ -533,7 +536,7 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
int switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
- int (*set_cb)(struct net_device *dev,
+ int (*set_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack))
{
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index d4beca895992..593846d25214 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -699,7 +699,7 @@ int tipc_bcast_init(struct net *net)
spin_lock_init(&tipc_net(net)->bclock);
if (!tipc_link_bc_create(net, 0, 0, NULL,
- FB_MTU,
+ one_page_mtu,
BCLINK_WIN_DEFAULT,
BCLINK_WIN_DEFAULT,
0,
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 5cc1f0307215..3f4542e0f065 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -60,7 +60,7 @@ static int __net_init tipc_init_net(struct net *net)
tn->trial_addr = 0;
tn->addr_trial_end = 0;
tn->capabilities = TIPC_NODE_CAPABILITIES;
- INIT_WORK(&tn->final_work.work, tipc_net_finalize_work);
+ INIT_WORK(&tn->work, tipc_net_finalize_work);
memset(tn->node_id, 0, sizeof(tn->node_id));
memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
@@ -110,7 +110,7 @@ static void __net_exit tipc_exit_net(struct net *net)
tipc_detach_loopback(net);
/* Make sure the tipc_net_finalize_work() finished */
- cancel_work_sync(&tn->final_work.work);
+ cancel_work_sync(&tn->work);
tipc_net_stop(net);
tipc_bcast_stop(net);
@@ -119,6 +119,8 @@ static void __net_exit tipc_exit_net(struct net *net)
#ifdef CONFIG_TIPC_CRYPTO
tipc_crypto_stop(&tipc_net(net)->crypto_tx);
#endif
+ while (atomic_read(&tn->wq_count))
+ cond_resched();
}
static void __net_exit tipc_pernet_pre_exit(struct net *net)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 03de7b213f55..0a3f7a70a50a 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -91,12 +91,6 @@ extern unsigned int tipc_net_id __read_mostly;
extern int sysctl_tipc_rmem[3] __read_mostly;
extern int sysctl_tipc_named_timeout __read_mostly;
-struct tipc_net_work {
- struct work_struct work;
- struct net *net;
- u32 addr;
-};
-
struct tipc_net {
u8 node_id[NODE_ID_LEN];
u32 node_addr;
@@ -148,7 +142,9 @@ struct tipc_net {
struct tipc_crypto *crypto_tx;
#endif
/* Work item for net finalize */
- struct tipc_net_work final_work;
+ struct work_struct work;
+ /* The numbers of work queues in schedule */
+ atomic_t wq_count;
};
static inline struct tipc_net *tipc_net(struct net *net)
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 5380f605b851..da69e1abf68f 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -168,7 +168,7 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
/* Apply trial address if we just left trial period */
if (!trial && !self) {
- tipc_sched_net_finalize(net, tn->trial_addr);
+ schedule_work(&tn->work);
msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
}
@@ -308,7 +308,7 @@ static void tipc_disc_timeout(struct timer_list *t)
if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
spin_unlock_bh(&d->lock);
- tipc_sched_net_finalize(net, tn->trial_addr);
+ schedule_work(&tn->work);
return;
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 115109259430..cf586840caeb 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -372,6 +372,11 @@ char tipc_link_plane(struct tipc_link *l)
return l->net_plane;
}
+struct net *tipc_link_net(struct tipc_link *l)
+{
+ return l->net;
+}
+
void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
{
l->peer_caps = capabilities;
@@ -649,6 +654,7 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
break;
case LINK_FAILOVER_BEGIN_EVT:
l->state = LINK_FAILINGOVER;
+ break;
case LINK_FAILURE_EVT:
case LINK_RESET_EVT:
case LINK_ESTABLISH_EVT:
@@ -907,7 +913,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
dnode, l->addr, dport, 0, 0);
if (!skb)
- return -ENOBUFS;
+ return -ENOMEM;
msg_set_dest_droppable(buf_msg(skb), true);
TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
skb_queue_tail(&l->wakeupq, skb);
@@ -1025,7 +1031,7 @@ void tipc_link_reset(struct tipc_link *l)
*
* Consumes the buffer chain.
* Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
- * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS or -ENOMEM
*/
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *xmitq)
@@ -1083,7 +1089,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
if (!_skb) {
kfree_skb(skb);
__skb_queue_purge(list);
- return -ENOBUFS;
+ return -ENOMEM;
}
__skb_queue_tail(transmq, skb);
tipc_link_set_skb_retransmit_time(skb, l);
diff --git a/net/tipc/link.h b/net/tipc/link.h
index fc07232c9a12..a16f401fdabd 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -156,4 +156,5 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
bool tipc_link_too_silent(struct tipc_link *l);
+struct net *tipc_link_net(struct tipc_link *l);
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 3f0a25345a7c..5c9fd4791c4b 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,19 +41,18 @@
#include "name_table.h"
#include "crypto.h"
+#define BUF_ALIGN(x) ALIGN(x, 4)
#define MAX_FORWARD_SIZE 1024
#ifdef CONFIG_TIPC_CRYPTO
#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
-#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
+#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
#else
#define BUF_HEADROOM (LL_MAX_HEADER + 48)
-#define BUF_TAILROOM 16
+#define BUF_OVERHEAD BUF_HEADROOM
#endif
-static unsigned int align(unsigned int i)
-{
- return (i + 3) & ~3u;
-}
+const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/**
* tipc_buf_acquire - creates a TIPC message buffer
@@ -69,13 +68,8 @@ static unsigned int align(unsigned int i)
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
-#ifdef CONFIG_TIPC_CRYPTO
- unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
-#else
- unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
-#endif
- skb = alloc_skb_fclone(buf_size, gfp);
+ skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
if (skb) {
skb_reserve(skb, BUF_HEADROOM);
skb_put(skb, size);
@@ -149,18 +143,13 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (unlikely(head))
goto err;
*buf = NULL;
+ if (skb_has_frag_list(frag) && __skb_linearize(frag))
+ goto err;
frag = skb_unshare(frag, GFP_ATOMIC);
if (unlikely(!frag))
goto err;
head = *headbuf = frag;
TIPC_SKB_CB(head)->tail = NULL;
- if (skb_is_nonlinear(head)) {
- skb_walk_frags(head, tail) {
- TIPC_SKB_CB(head)->tail = tail;
- }
- } else {
- skb_frag_list_init(head);
- }
return 0;
}
@@ -400,7 +389,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
if (unlikely(!skb)) {
if (pktmax != MAX_MSG_SIZE)
return -ENOMEM;
- rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
+ rc = tipc_msg_build(mhdr, m, offset, dsz,
+ one_page_mtu, list);
if (rc != dsz)
return rc;
if (tipc_msg_assemble(list))
@@ -495,7 +485,7 @@ static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
msz = msg_size(msg);
bsz = msg_size(bmsg);
- offset = align(bsz);
+ offset = BUF_ALIGN(bsz);
pad = offset - bsz;
if (unlikely(skb_tailroom(bskb) < (pad + msz)))
@@ -552,7 +542,7 @@ bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
/* Make a new bundle of the two messages if possible */
tsz = msg_size(buf_msg(tskb));
- if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
+ if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
return true;
if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
GFP_ATOMIC)))
@@ -611,7 +601,7 @@ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
if (unlikely(!tipc_msg_validate(iskb)))
goto none;
- *pos += align(imsz);
+ *pos += BUF_ALIGN(imsz);
return true;
none:
kfree_skb(skb);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 5d64596ba987..64ae4c4c44f8 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -99,9 +99,10 @@ struct plist;
#define MAX_H_SIZE 60 /* Largest possible TIPC header size */
#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
-#define FB_MTU 3744
#define TIPC_MEDIA_INFO_OFFSET 5
+extern const int one_page_mtu;
+
struct tipc_skb_cb {
union {
struct {
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index fecab516bf41..01396dd1c899 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -673,12 +673,12 @@ exit:
* Returns a list of local sockets
*/
void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua,
- bool exact, struct list_head *dports)
+ struct list_head *dports)
{
struct service_range *sr;
struct tipc_service *sc;
struct publication *p;
- u32 scope = ua->scope;
+ u8 scope = ua->scope;
rcu_read_lock();
sc = tipc_service_find(net, ua);
@@ -688,7 +688,7 @@ void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua,
spin_lock_bh(&sc->lock);
service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) {
list_for_each_entry(p, &sr->local_publ, local_publ) {
- if (p->scope == scope || (!exact && p->scope < scope))
+ if (scope == p->scope || scope == TIPC_ANY_SCOPE)
tipc_dest_push(dports, 0, p->sk.ref);
}
}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index c7c9a3ddd420..259f95e3d99c 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -51,6 +51,8 @@ struct tipc_uaddr;
#define TIPC_PUBL_SCOPE_NUM (TIPC_NODE_SCOPE + 1)
#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
+#define TIPC_ANY_SCOPE 10 /* Both node and cluster scope will match */
+
/**
* struct publication - info about a published service address or range
* @sr: service range represented by this publication
@@ -113,7 +115,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
bool tipc_nametbl_lookup_anycast(struct net *net, struct tipc_uaddr *ua,
struct tipc_socket_addr *sk);
void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua,
- bool exact, struct list_head *dports);
+ struct list_head *dports);
void tipc_nametbl_lookup_mcast_nodes(struct net *net, struct tipc_uaddr *ua,
struct tipc_nlist *nodes);
bool tipc_nametbl_lookup_group(struct net *net, struct tipc_uaddr *ua,
diff --git a/net/tipc/net.c b/net/tipc/net.c
index a130195af188..0e95572e56b4 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -41,6 +41,7 @@
#include "socket.h"
#include "node.h"
#include "bcast.h"
+#include "link.h"
#include "netlink.h"
#include "monitor.h"
@@ -142,19 +143,9 @@ static void tipc_net_finalize(struct net *net, u32 addr)
void tipc_net_finalize_work(struct work_struct *work)
{
- struct tipc_net_work *fwork;
+ struct tipc_net *tn = container_of(work, struct tipc_net, work);
- fwork = container_of(work, struct tipc_net_work, work);
- tipc_net_finalize(fwork->net, fwork->addr);
-}
-
-void tipc_sched_net_finalize(struct net *net, u32 addr)
-{
- struct tipc_net *tn = tipc_net(net);
-
- tn->final_work.net = net;
- tn->final_work.addr = addr;
- schedule_work(&tn->final_work.work);
+ tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr);
}
void tipc_net_stop(struct net *net)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 8217905348f4..9947b7dfe1d2 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -423,18 +423,18 @@ static void tipc_node_write_unlock(struct tipc_node *n)
write_unlock_bh(&n->lock);
if (flags & TIPC_NOTIFY_NODE_DOWN)
- tipc_publ_notify(net, publ_list, n->addr, n->capabilities);
+ tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
if (flags & TIPC_NOTIFY_NODE_UP)
- tipc_named_node_up(net, n->addr, n->capabilities);
+ tipc_named_node_up(net, sk.node, n->capabilities);
if (flags & TIPC_NOTIFY_LINK_UP) {
- tipc_mon_peer_up(net, n->addr, bearer_id);
- tipc_nametbl_publish(net, &ua, &sk, n->link_id);
+ tipc_mon_peer_up(net, sk.node, bearer_id);
+ tipc_nametbl_publish(net, &ua, &sk, sk.ref);
}
if (flags & TIPC_NOTIFY_LINK_DOWN) {
- tipc_mon_peer_down(net, n->addr, bearer_id);
- tipc_nametbl_withdraw(net, &ua, &sk, n->link_id);
+ tipc_mon_peer_down(net, sk.node, bearer_id);
+ tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
}
}
@@ -1214,7 +1214,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
/* Peer has changed i/f address without rebooting.
* If so, the link will reset soon, and the next
* discovery will be accepted. So we can ignore it.
- * It may also be an cloned or malicious peer having
+ * It may also be a cloned or malicious peer having
* chosen the same node address and signature as an
* existing one.
* Ignore requests until the link goes down, if ever.
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 58935cd0d068..34a97ea36cc8 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -73,9 +73,6 @@ struct sockaddr_pair {
/**
* struct tipc_sock - TIPC socket structure
* @sk: socket - interacts with 'port' and with user via the socket API
- * @conn_type: TIPC type used when connection was established
- * @conn_instance: TIPC instance used when connection was established
- * @published: non-zero if port has one or more associated names
* @max_pkt: maximum packet size "hint" used when building messages sent by port
* @maxnagle: maximum size of msg which can be subject to nagle
* @portid: unique port identity in TIPC socket hash table
@@ -106,11 +103,11 @@ struct sockaddr_pair {
* @expect_ack: whether this TIPC socket is expecting an ack
* @nodelay: setsockopt() TIPC_NODELAY setting
* @group_is_open: TIPC socket group is fully open (FIXME)
+ * @published: true if port has one or more associated names
+ * @conn_addrtype: address type used when establishing connection
*/
struct tipc_sock {
struct sock sk;
- u32 conn_type;
- u32 conn_instance;
u32 max_pkt;
u32 maxnagle;
u32 portid;
@@ -141,6 +138,7 @@ struct tipc_sock {
bool nodelay;
bool group_is_open;
bool published;
+ u8 conn_addrtype;
};
static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
@@ -664,7 +662,7 @@ static int tipc_release(struct socket *sock)
* @skaddr: socket address describing name(s) and desired operation
* @alen: size of socket address data structure
*
- * Name and name sequence binding is indicated using a positive scope value;
+ * Name and name sequence binding are indicated using a positive scope value;
* a negative scope value unbinds the specified name. Specifying no name
* (i.e. a socket address length of 0) unbinds all names from the socket.
*
@@ -1202,12 +1200,12 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
struct tipc_msg *hdr;
struct tipc_uaddr ua;
int user, mtyp, hlen;
- bool exact;
__skb_queue_head_init(&tmpq);
INIT_LIST_HEAD(&dports);
ua.addrtype = TIPC_SERVICE_RANGE;
+ /* tipc_skb_peek() increments the head skb's reference counter */
skb = tipc_skb_peek(arrvq, &inputq->lock);
for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
hdr = buf_msg(skb);
@@ -1216,6 +1214,12 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
onode = msg_orignode(hdr);
ua.sr.type = msg_nametype(hdr);
+ ua.sr.lower = msg_namelower(hdr);
+ ua.sr.upper = msg_nameupper(hdr);
+ if (onode == self)
+ ua.scope = TIPC_ANY_SCOPE;
+ else
+ ua.scope = TIPC_CLUSTER_SCOPE;
if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
spin_lock_bh(&inputq->lock);
@@ -1233,20 +1237,10 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
ua.sr.lower = 0;
ua.sr.upper = ~0;
ua.scope = msg_lookup_scope(hdr);
- exact = true;
- } else {
- /* TIPC_NODE_SCOPE means "any scope" in this context */
- if (onode == self)
- ua.scope = TIPC_NODE_SCOPE;
- else
- ua.scope = TIPC_CLUSTER_SCOPE;
- exact = false;
- ua.sr.lower = msg_namelower(hdr);
- ua.sr.upper = msg_nameupper(hdr);
}
/* Create destination port list: */
- tipc_nametbl_lookup_mcast_sockets(net, &ua, exact, &dports);
+ tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
/* Clone message per destination */
while (tipc_dest_pop(&dports, NULL, &portid)) {
@@ -1258,11 +1252,12 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
}
pr_warn("Failed to clone mcast rcv buffer\n");
}
- /* Append to inputq if not already done by other thread */
+ /* Append clones to inputq only if skb is still head of arrvq */
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
skb_queue_splice_tail_init(&tmpq, inputq);
- __skb_dequeue(arrvq);
+ /* Decrement the skb's refcnt */
+ kfree_skb(__skb_dequeue(arrvq));
}
spin_unlock_bh(&inputq->lock);
__skb_queue_purge(&tmpq);
@@ -1460,10 +1455,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
return -EISCONN;
if (tsk->published)
return -EOPNOTSUPP;
- if (atype == TIPC_SERVICE_ADDR) {
- tsk->conn_type = ua->sa.type;
- tsk->conn_instance = ua->sa.instance;
- }
+ if (atype == TIPC_SERVICE_ADDR)
+ tsk->conn_addrtype = atype;
msg_set_syn(hdr, 1);
}
@@ -1734,67 +1727,58 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
struct tipc_sock *tsk)
{
- struct tipc_msg *msg;
- u32 anc_data[3];
- u32 err;
- u32 dest_type;
- int has_name;
- int res;
+ struct tipc_msg *hdr;
+ u32 data[3] = {0,};
+ bool has_addr;
+ int dlen, rc;
if (likely(m->msg_controllen == 0))
return 0;
- msg = buf_msg(skb);
- /* Optionally capture errored message object(s) */
- err = msg ? msg_errcode(msg) : 0;
- if (unlikely(err)) {
- anc_data[0] = err;
- anc_data[1] = msg_data_sz(msg);
- res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
- if (res)
- return res;
- if (anc_data[1]) {
- if (skb_linearize(skb))
- return -ENOMEM;
- msg = buf_msg(skb);
- res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
- msg_data(msg));
- if (res)
- return res;
- }
+ hdr = buf_msg(skb);
+ dlen = msg_data_sz(hdr);
+
+ /* Capture errored message object, if any */
+ if (msg_errcode(hdr)) {
+ if (skb_linearize(skb))
+ return -ENOMEM;
+ hdr = buf_msg(skb);
+ data[0] = msg_errcode(hdr);
+ data[1] = dlen;
+ rc = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, data);
+ if (rc || !dlen)
+ return rc;
+ rc = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, dlen, msg_data(hdr));
+ if (rc)
+ return rc;
}
- /* Optionally capture message destination object */
- dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
- switch (dest_type) {
+ /* Capture TIPC_SERVICE_ADDR/RANGE destination address, if any */
+ switch (msg_type(hdr)) {
case TIPC_NAMED_MSG:
- has_name = 1;
- anc_data[0] = msg_nametype(msg);
- anc_data[1] = msg_namelower(msg);
- anc_data[2] = msg_namelower(msg);
+ has_addr = true;
+ data[0] = msg_nametype(hdr);
+ data[1] = msg_namelower(hdr);
+ data[2] = data[1];
break;
case TIPC_MCAST_MSG:
- has_name = 1;
- anc_data[0] = msg_nametype(msg);
- anc_data[1] = msg_namelower(msg);
- anc_data[2] = msg_nameupper(msg);
+ has_addr = true;
+ data[0] = msg_nametype(hdr);
+ data[1] = msg_namelower(hdr);
+ data[2] = msg_nameupper(hdr);
break;
case TIPC_CONN_MSG:
- has_name = (tsk->conn_type != 0);
- anc_data[0] = tsk->conn_type;
- anc_data[1] = tsk->conn_instance;
- anc_data[2] = tsk->conn_instance;
+ has_addr = !!tsk->conn_addrtype;
+ data[0] = msg_nametype(&tsk->phdr);
+ data[1] = msg_nameinst(&tsk->phdr);
+ data[2] = data[1];
break;
default:
- has_name = 0;
+ has_addr = false;
}
- if (has_name) {
- res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
- if (res)
- return res;
- }
-
- return 0;
+ if (!has_addr)
+ return 0;
+ return put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, data);
}
static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
@@ -2747,8 +2731,9 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
tsk_set_importance(new_sk, msg_importance(msg));
if (msg_named(msg)) {
- new_tsock->conn_type = msg_nametype(msg);
- new_tsock->conn_instance = msg_nameinst(msg);
+ new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
+ msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
+ msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
}
/*
@@ -3452,13 +3437,14 @@ void tipc_socket_stop(void)
/* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
{
- u32 peer_node;
- u32 peer_port;
+ u32 peer_node, peer_port;
+ u32 conn_type, conn_instance;
struct nlattr *nest;
peer_node = tsk_peer_node(tsk);
peer_port = tsk_peer_port(tsk);
-
+ conn_type = msg_nametype(&tsk->phdr);
+ conn_instance = msg_nameinst(&tsk->phdr);
nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
if (!nest)
return -EMSGSIZE;
@@ -3468,12 +3454,12 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
goto msg_full;
- if (tsk->conn_type != 0) {
+ if (tsk->conn_addrtype != 0) {
if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
goto msg_full;
- if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
+ if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, conn_type))
goto msg_full;
- if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
+ if (nla_put_u32(skb, TIPC_NLA_CON_INST, conn_instance))
goto msg_full;
}
nla_nest_end(skb, nest);
@@ -3863,9 +3849,9 @@ bool tipc_sk_filtering(struct sock *sk)
}
if (!tipc_sk_type_connectionless(sk)) {
- type = tsk->conn_type;
- lower = tsk->conn_instance;
- upper = tsk->conn_instance;
+ type = msg_nametype(&tsk->phdr);
+ lower = msg_nameinst(&tsk->phdr);
+ upper = lower;
}
if ((_type && _type != type) || (_lower && _lower != lower) ||
@@ -3930,6 +3916,7 @@ int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
{
int i = 0;
size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
+ u32 conn_type, conn_instance;
struct tipc_sock *tsk;
struct publication *p;
bool tsk_connected;
@@ -3950,8 +3937,10 @@ int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
if (tsk_connected) {
i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
- i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
- i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
+ conn_type = msg_nametype(&tsk->phdr);
+ conn_instance = msg_nameinst(&tsk->phdr);
+ i += scnprintf(buf + i, sz - i, " %u", conn_type);
+ i += scnprintf(buf + i, sz - i, " %u", conn_instance);
}
i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
if (tsk->published) {
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 8e00d739f03a..05d49ad81290 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -66,7 +66,7 @@ static void tipc_sub_send_event(struct tipc_subscription *sub,
/**
* tipc_sub_check_overlap - test for subscription overlap with the given values
* @subscribed: the service range subscribed for
- * @found: the service range we are checning for match
+ * @found: the service range we are checking for match
*
* Returns true if there is overlap, otherwise false.
*/
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index e556d2cdc064..c2bb818704c8 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -814,6 +814,7 @@ static void cleanup_bearer(struct work_struct *work)
kfree_rcu(rcast, rcu);
}
+ atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
dst_cache_destroy(&ub->rcast.dst_cache);
udp_tunnel_sock_release(ub->ubsock);
synchronize_net();
@@ -834,6 +835,7 @@ static void tipc_udp_disable(struct tipc_bearer *b)
RCU_INIT_POINTER(ub->bearer, NULL);
/* sock_release need to be done outside of rtnl lock */
+ atomic_inc(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
INIT_WORK(&ub->work, cleanup_bearer);
schedule_work(&ub->work);
}
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 76a6f8c2eec4..b932469ee69c 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work);
static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
static LIST_HEAD(tls_device_gc_list);
static LIST_HEAD(tls_device_list);
+static LIST_HEAD(tls_device_down_list);
static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx)
@@ -127,7 +128,7 @@ static void destroy_record(struct tls_record_info *record)
int i;
for (i = 0; i < record->num_frags; i++)
- __skb_frag_unref(&record->frags[i]);
+ __skb_frag_unref(&record->frags[i], false);
kfree(record);
}
@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
struct net_device *netdev;
- if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
- return;
-
trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
+ rcu_read_lock();
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
TLS_OFFLOAD_CTX_DIR_RX);
- clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+ rcu_read_unlock();
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
}
@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
if (tls_ctx->rx_conf != TLS_HW)
return;
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
+ return;
prot = &tls_ctx->prot_info;
rx_ctx = tls_offload_ctx_rx(tls_ctx);
@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
ctx->sw.decrypted |= is_decrypted;
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
+ if (likely(is_encrypted || is_decrypted))
+ return 0;
+
+ /* After tls_device_down disables the offload, the next SKB will
+ * likely have initial fragments decrypted, and final ones not
+ * decrypted. We need to reencrypt that single SKB.
+ */
+ return tls_device_reencrypt(sk, skb);
+ }
+
/* Return immediately if the record is either entirely plaintext or
* entirely ciphertext. Otherwise handle reencrypt partially decrypted
* record.
@@ -1292,6 +1304,26 @@ static int tls_device_down(struct net_device *netdev)
spin_unlock_irqrestore(&tls_device_lock, flags);
list_for_each_entry_safe(ctx, tmp, &list, list) {
+ /* Stop offloaded TX and switch to the fallback.
+ * tls_is_sk_tx_device_offloaded will return false.
+ */
+ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
+
+ /* Stop the RX and TX resync.
+ * tls_dev_resync must not be called after tls_dev_del.
+ */
+ WRITE_ONCE(ctx->netdev, NULL);
+
+ /* Start skipping the RX resync logic completely. */
+ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
+
+ /* Sync with inflight packets. After this point:
+ * TX: no non-encrypted packets will be passed to the driver.
+ * RX: resync requests from the driver will be ignored.
+ */
+ synchronize_net();
+
+ /* Release the offload context on the driver side. */
if (ctx->tx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_TX);
@@ -1299,15 +1331,21 @@ static int tls_device_down(struct net_device *netdev)
!test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
- WRITE_ONCE(ctx->netdev, NULL);
- smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
- while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
- usleep_range(10, 200);
+
dev_put(netdev);
- list_del_init(&ctx->list);
- if (refcount_dec_and_test(&ctx->refcount))
- tls_device_free_ctx(ctx);
+ /* Move the context to a separate list for two reasons:
+ * 1. When the context is deallocated, list_del is called.
+ * 2. It's no longer an offloaded context, so we don't want to
+ * run offload-specific code on this context.
+ */
+ spin_lock_irqsave(&tls_device_lock, flags);
+ list_move_tail(&ctx->list, &tls_device_down_list);
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+
+ /* Device contexts for RX and TX will be freed in on sk_destruct
+ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+ */
}
up_write(&device_offload_lock);
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index cacf040872c7..e40bedd112b6 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -431,6 +431,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
}
EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
+struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return tls_sw_fallback(sk, skb);
+}
+
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
{
return tls_sw_fallback(skb->sk, skb);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 47b7c5334c34..fde56ff49163 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -636,6 +636,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
mutex_init(&ctx->tx_lock);
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
ctx->sk_proto = READ_ONCE(sk->sk_prot);
+ ctx->sk = sk;
return ctx;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 1dcb34dfd56b..4feb95e34b64 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -37,6 +37,7 @@
#include <linux/sched/signal.h>
#include <linux/module.h>
+#include <linux/splice.h>
#include <crypto/aead.h>
#include <net/strparser.h>
@@ -1152,7 +1153,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
int ret = 0;
bool eor;
- eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
+ eor = !(flags & MSG_SENDPAGE_NOTLAST);
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
/* Call the sk_stream functions to manage the sndbuf mem. */
@@ -1281,7 +1282,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
}
static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
- int flags, long timeo, int *err)
+ bool nonblock, long timeo, int *err)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -1306,7 +1307,7 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
if (sock_flag(sk, SOCK_DONE))
return NULL;
- if ((flags & MSG_DONTWAIT) || !timeo) {
+ if (nonblock || !timeo) {
*err = -EAGAIN;
return NULL;
}
@@ -1786,7 +1787,7 @@ int tls_sw_recvmsg(struct sock *sk,
bool async_capable;
bool async = false;
- skb = tls_wait_data(sk, psock, flags, timeo, &err);
+ skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
if (!skb) {
if (psock) {
int ret = sk_msg_recvmsg(sk, psock, msg, len,
@@ -1990,9 +1991,9 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
- skb = tls_wait_data(sk, NULL, flags, timeo, &err);
+ skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
if (!skb)
goto splice_read_end;
@@ -2018,8 +2019,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
if (copied < 0)
goto splice_read_end;
- if (likely(!(flags & MSG_PEEK)))
- tls_sw_advance_skb(sk, skb, copied);
+ tls_sw_advance_skb(sk, skb, copied);
splice_read_end:
release_sock(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5a31307ceb76..23c92ad15c61 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -262,6 +262,14 @@ static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
sk_add_node(sk, list);
}
+static void __unix_set_addr(struct sock *sk, struct unix_address *addr,
+ unsigned hash)
+{
+ __unix_remove_socket(sk);
+ smp_store_release(&unix_sk(sk)->addr, addr);
+ __unix_insert_socket(&unix_socket_table[hash], sk);
+}
+
static inline void unix_remove_socket(struct sock *sk)
{
spin_lock(&unix_table_lock);
@@ -278,11 +286,11 @@ static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
static struct sock *__unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
- int len, int type, unsigned int hash)
+ int len, unsigned int hash)
{
struct sock *s;
- sk_for_each(s, &unix_socket_table[hash ^ type]) {
+ sk_for_each(s, &unix_socket_table[hash]) {
struct unix_sock *u = unix_sk(s);
if (!net_eq(sock_net(s), net))
@@ -297,13 +305,12 @@ static struct sock *__unix_find_socket_byname(struct net *net,
static inline struct sock *unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
- int len, int type,
- unsigned int hash)
+ int len, unsigned int hash)
{
struct sock *s;
spin_lock(&unix_table_lock);
- s = __unix_find_socket_byname(net, sunname, len, type, hash);
+ s = __unix_find_socket_byname(net, sunname, len, hash);
if (s)
sock_hold(s);
spin_unlock(&unix_table_lock);
@@ -484,7 +491,7 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
*/
if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
other->sk_err = ECONNRESET;
- other->sk_error_report(other);
+ sk_error_report(other);
}
}
}
@@ -535,12 +542,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.mnt = NULL;
state = sk->sk_state;
sk->sk_state = TCP_CLOSE;
+
+ skpair = unix_peer(sk);
+ unix_peer(sk) = NULL;
+
unix_state_unlock(sk);
wake_up_interruptible_all(&u->peer_wait);
- skpair = unix_peer(sk);
-
if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
unix_state_lock(skpair);
@@ -555,7 +564,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
- unix_peer(sk) = NULL;
}
/* Try to flush out this socket. Throw out buffers at least */
@@ -890,12 +898,12 @@ static int unix_autobind(struct socket *sock)
retry:
addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
+ addr->hash ^= sk->sk_type;
spin_lock(&unix_table_lock);
ordernum = (ordernum+1)&0xFFFFF;
- if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
- addr->hash)) {
+ if (__unix_find_socket_byname(net, addr->name, addr->len, addr->hash)) {
spin_unlock(&unix_table_lock);
/*
* __unix_find_socket_byname() may take long time if many names
@@ -910,11 +918,8 @@ retry:
}
goto retry;
}
- addr->hash ^= sk->sk_type;
- __unix_remove_socket(sk);
- smp_store_release(&u->addr, addr);
- __unix_insert_socket(&unix_socket_table[addr->hash], sk);
+ __unix_set_addr(sk, addr, addr->hash);
spin_unlock(&unix_table_lock);
err = 0;
@@ -959,7 +964,7 @@ static struct sock *unix_find_other(struct net *net,
}
} else {
err = -ECONNREFUSED;
- u = unix_find_socket_byname(net, sunname, len, type, hash);
+ u = unix_find_socket_byname(net, sunname, len, type ^ hash);
if (u) {
struct dentry *dentry;
dentry = unix_sk(u)->path.dentry;
@@ -977,125 +982,125 @@ fail:
return NULL;
}
-static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
+static int unix_bind_bsd(struct sock *sk, struct unix_address *addr)
{
+ struct unix_sock *u = unix_sk(sk);
+ umode_t mode = S_IFSOCK |
+ (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
+ struct user_namespace *ns; // barf...
+ struct path parent;
struct dentry *dentry;
- struct path path;
- int err = 0;
+ unsigned int hash;
+ int err;
+
/*
* Get the parent directory, calculate the hash for last
* component.
*/
- dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
- err = PTR_ERR(dentry);
+ dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
if (IS_ERR(dentry))
- return err;
+ return PTR_ERR(dentry);
+ ns = mnt_user_ns(parent.mnt);
/*
* All right, let's create it.
*/
- err = security_path_mknod(&path, dentry, mode, 0);
- if (!err) {
- err = vfs_mknod(mnt_user_ns(path.mnt), d_inode(path.dentry),
- dentry, mode, 0);
- if (!err) {
- res->mnt = mntget(path.mnt);
- res->dentry = dget(dentry);
- }
- }
- done_path_create(&path, dentry);
+ err = security_path_mknod(&parent, dentry, mode, 0);
+ if (!err)
+ err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
+ if (err)
+ goto out;
+ err = mutex_lock_interruptible(&u->bindlock);
+ if (err)
+ goto out_unlink;
+ if (u->addr)
+ goto out_unlock;
+
+ addr->hash = UNIX_HASH_SIZE;
+ hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ spin_lock(&unix_table_lock);
+ u->path.mnt = mntget(parent.mnt);
+ u->path.dentry = dget(dentry);
+ __unix_set_addr(sk, addr, hash);
+ spin_unlock(&unix_table_lock);
+ mutex_unlock(&u->bindlock);
+ done_path_create(&parent, dentry);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&u->bindlock);
+ err = -EINVAL;
+out_unlink:
+ /* failed after successful mknod? unlink what we'd created... */
+ vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
+out:
+ done_path_create(&parent, dentry);
return err;
}
+static int unix_bind_abstract(struct sock *sk, struct unix_address *addr)
+{
+ struct unix_sock *u = unix_sk(sk);
+ int err;
+
+ err = mutex_lock_interruptible(&u->bindlock);
+ if (err)
+ return err;
+
+ if (u->addr) {
+ mutex_unlock(&u->bindlock);
+ return -EINVAL;
+ }
+
+ spin_lock(&unix_table_lock);
+ if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
+ addr->hash)) {
+ spin_unlock(&unix_table_lock);
+ mutex_unlock(&u->bindlock);
+ return -EADDRINUSE;
+ }
+ __unix_set_addr(sk, addr, addr->hash);
+ spin_unlock(&unix_table_lock);
+ mutex_unlock(&u->bindlock);
+ return 0;
+}
+
static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
- struct net *net = sock_net(sk);
- struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
char *sun_path = sunaddr->sun_path;
int err;
unsigned int hash;
struct unix_address *addr;
- struct hlist_head *list;
- struct path path = { };
- err = -EINVAL;
if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
sunaddr->sun_family != AF_UNIX)
- goto out;
+ return -EINVAL;
- if (addr_len == sizeof(short)) {
- err = unix_autobind(sock);
- goto out;
- }
+ if (addr_len == sizeof(short))
+ return unix_autobind(sock);
err = unix_mkname(sunaddr, addr_len, &hash);
if (err < 0)
- goto out;
+ return err;
addr_len = err;
-
- if (sun_path[0]) {
- umode_t mode = S_IFSOCK |
- (SOCK_INODE(sock)->i_mode & ~current_umask());
- err = unix_mknod(sun_path, mode, &path);
- if (err) {
- if (err == -EEXIST)
- err = -EADDRINUSE;
- goto out;
- }
- }
-
- err = mutex_lock_interruptible(&u->bindlock);
- if (err)
- goto out_put;
-
- err = -EINVAL;
- if (u->addr)
- goto out_up;
-
- err = -ENOMEM;
addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
if (!addr)
- goto out_up;
+ return -ENOMEM;
memcpy(addr->name, sunaddr, addr_len);
addr->len = addr_len;
addr->hash = hash ^ sk->sk_type;
refcount_set(&addr->refcnt, 1);
- if (sun_path[0]) {
- addr->hash = UNIX_HASH_SIZE;
- hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
- spin_lock(&unix_table_lock);
- u->path = path;
- list = &unix_socket_table[hash];
- } else {
- spin_lock(&unix_table_lock);
- err = -EADDRINUSE;
- if (__unix_find_socket_byname(net, sunaddr, addr_len,
- sk->sk_type, hash)) {
- unix_release_addr(addr);
- goto out_unlock;
- }
-
- list = &unix_socket_table[addr->hash];
- }
-
- err = 0;
- __unix_remove_socket(sk);
- smp_store_release(&u->addr, addr);
- __unix_insert_socket(list, sk);
-
-out_unlock:
- spin_unlock(&unix_table_lock);
-out_up:
- mutex_unlock(&u->bindlock);
-out_put:
+ if (sun_path[0])
+ err = unix_bind_bsd(sk, addr);
+ else
+ err = unix_bind_abstract(sk, addr);
if (err)
- path_put(&path);
-out:
- return err;
+ unix_release_addr(addr);
+ return err == -EEXIST ? -EADDRINUSE : err;
}
static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
@@ -1392,7 +1397,7 @@ restart:
unix_state_unlock(sk);
- /* take ten and and send info to listening sock */
+ /* take ten and send info to listening sock */
spin_lock(&other->sk_receive_queue.lock);
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 92a72f0e0d94..3e02cc3b24f8 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -415,8 +415,8 @@ static void vsock_deassign_transport(struct vsock_sock *vsk)
/* Assign a transport to a socket and call the .init transport callback.
*
- * Note: for stream socket this must be called when vsk->remote_addr is set
- * (e.g. during the connect() or when a connection request on a listener
+ * Note: for connection oriented socket this must be called when vsk->remote_addr
+ * is set (e.g. during the connect() or when a connection request on a listener
* socket is received).
* The vsk->remote_addr is used to decide which transport to use:
* - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
@@ -452,6 +452,7 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
new_transport = transport_dgram;
break;
case SOCK_STREAM:
+ case SOCK_SEQPACKET:
if (vsock_use_local_transport(remote_cid))
new_transport = transport_local;
else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
@@ -469,10 +470,10 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
return 0;
/* transport->release() must be called with sock lock acquired.
- * This path can only be taken during vsock_stream_connect(),
- * where we have already held the sock lock.
- * In the other cases, this function is called on a new socket
- * which is not assigned to any transport.
+ * This path can only be taken during vsock_connect(), where we
+ * have already held the sock lock. In the other cases, this
+ * function is called on a new socket which is not assigned to
+ * any transport.
*/
vsk->transport->release(vsk);
vsock_deassign_transport(vsk);
@@ -484,6 +485,14 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
if (!new_transport || !try_module_get(new_transport->module))
return -ENODEV;
+ if (sk->sk_type == SOCK_SEQPACKET) {
+ if (!new_transport->seqpacket_allow ||
+ !new_transport->seqpacket_allow(remote_cid)) {
+ module_put(new_transport->module);
+ return -ESOCKTNOSUPPORT;
+ }
+ }
+
ret = new_transport->init(vsk, psk);
if (ret) {
module_put(new_transport->module);
@@ -604,8 +613,8 @@ out:
/**** SOCKET OPERATIONS ****/
-static int __vsock_bind_stream(struct vsock_sock *vsk,
- struct sockaddr_vm *addr)
+static int __vsock_bind_connectible(struct vsock_sock *vsk,
+ struct sockaddr_vm *addr)
{
static u32 port;
struct sockaddr_vm new_addr;
@@ -649,9 +658,10 @@ static int __vsock_bind_stream(struct vsock_sock *vsk,
vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
- /* Remove stream sockets from the unbound list and add them to the hash
- * table for easy lookup by its address. The unbound list is simply an
- * extra entry at the end of the hash table, a trick used by AF_UNIX.
+ /* Remove connection oriented sockets from the unbound list and add them
+ * to the hash table for easy lookup by its address. The unbound list
+ * is simply an extra entry at the end of the hash table, a trick used
+ * by AF_UNIX.
*/
__vsock_remove_bound(vsk);
__vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
@@ -684,8 +694,9 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
switch (sk->sk_socket->type) {
case SOCK_STREAM:
+ case SOCK_SEQPACKET:
spin_lock_bh(&vsock_table_lock);
- retval = __vsock_bind_stream(vsk, addr);
+ retval = __vsock_bind_connectible(vsk, addr);
spin_unlock_bh(&vsock_table_lock);
break;
@@ -768,6 +779,11 @@ static struct sock *__vsock_create(struct net *net,
return sk;
}
+static bool sock_type_connectible(u16 type)
+{
+ return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
+}
+
static void __vsock_release(struct sock *sk, int level)
{
if (sk) {
@@ -786,7 +802,7 @@ static void __vsock_release(struct sock *sk, int level)
if (vsk->transport)
vsk->transport->release(vsk);
- else if (sk->sk_type == SOCK_STREAM)
+ else if (sock_type_connectible(sk->sk_type))
vsock_remove_sock(vsk);
sock_orphan(sk);
@@ -844,6 +860,16 @@ s64 vsock_stream_has_data(struct vsock_sock *vsk)
}
EXPORT_SYMBOL_GPL(vsock_stream_has_data);
+static s64 vsock_connectible_has_data(struct vsock_sock *vsk)
+{
+ struct sock *sk = sk_vsock(vsk);
+
+ if (sk->sk_type == SOCK_SEQPACKET)
+ return vsk->transport->seqpacket_has_data(vsk);
+ else
+ return vsock_stream_has_data(vsk);
+}
+
s64 vsock_stream_has_space(struct vsock_sock *vsk)
{
return vsk->transport->stream_has_space(vsk);
@@ -937,10 +963,10 @@ static int vsock_shutdown(struct socket *sock, int mode)
if ((mode & ~SHUTDOWN_MASK) || !mode)
return -EINVAL;
- /* If this is a STREAM socket and it is not connected then bail out
- * immediately. If it is a DGRAM socket then we must first kick the
- * socket so that it wakes up from any sleeping calls, for example
- * recv(), and then afterwards return the error.
+ /* If this is a connection oriented socket and it is not connected then
+ * bail out immediately. If it is a DGRAM socket then we must first
+ * kick the socket so that it wakes up from any sleeping calls, for
+ * example recv(), and then afterwards return the error.
*/
sk = sock->sk;
@@ -948,7 +974,7 @@ static int vsock_shutdown(struct socket *sock, int mode)
lock_sock(sk);
if (sock->state == SS_UNCONNECTED) {
err = -ENOTCONN;
- if (sk->sk_type == SOCK_STREAM)
+ if (sock_type_connectible(sk->sk_type))
goto out;
} else {
sock->state = SS_DISCONNECTING;
@@ -961,7 +987,7 @@ static int vsock_shutdown(struct socket *sock, int mode)
sk->sk_shutdown |= mode;
sk->sk_state_change(sk);
- if (sk->sk_type == SOCK_STREAM) {
+ if (sock_type_connectible(sk->sk_type)) {
sock_reset_flag(sk, SOCK_DONE);
vsock_send_shutdown(sk, mode);
}
@@ -1016,7 +1042,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
- } else if (sock->type == SOCK_STREAM) {
+ } else if (sock_type_connectible(sk->sk_type)) {
const struct vsock_transport *transport;
lock_sock(sk);
@@ -1255,7 +1281,7 @@ static void vsock_connect_timeout(struct work_struct *work)
(sk->sk_shutdown != SHUTDOWN_MASK)) {
sk->sk_state = TCP_CLOSE;
sk->sk_err = ETIMEDOUT;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
vsock_transport_cancel_pkt(vsk);
}
release_sock(sk);
@@ -1263,8 +1289,8 @@ static void vsock_connect_timeout(struct work_struct *work)
sock_put(sk);
}
-static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
- int addr_len, int flags)
+static int vsock_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len, int flags)
{
int err;
struct sock *sk;
@@ -1369,7 +1395,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
- sk->sk_state = TCP_CLOSE;
+ sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
goto out_wait;
@@ -1414,7 +1440,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
lock_sock(listener);
- if (sock->type != SOCK_STREAM) {
+ if (!sock_type_connectible(sock->type)) {
err = -EOPNOTSUPP;
goto out;
}
@@ -1491,7 +1517,7 @@ static int vsock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if (sock->type != SOCK_STREAM) {
+ if (!sock_type_connectible(sk->sk_type)) {
err = -EOPNOTSUPP;
goto out;
}
@@ -1535,11 +1561,11 @@ static void vsock_update_buffer_size(struct vsock_sock *vsk,
vsk->buffer_size = val;
}
-static int vsock_stream_setsockopt(struct socket *sock,
- int level,
- int optname,
- sockptr_t optval,
- unsigned int optlen)
+static int vsock_connectible_setsockopt(struct socket *sock,
+ int level,
+ int optname,
+ sockptr_t optval,
+ unsigned int optlen)
{
int err;
struct sock *sk;
@@ -1617,10 +1643,10 @@ exit:
return err;
}
-static int vsock_stream_getsockopt(struct socket *sock,
- int level, int optname,
- char __user *optval,
- int __user *optlen)
+static int vsock_connectible_getsockopt(struct socket *sock,
+ int level, int optname,
+ char __user *optval,
+ int __user *optlen)
{
int err;
int len;
@@ -1688,8 +1714,8 @@ static int vsock_stream_getsockopt(struct socket *sock,
return 0;
}
-static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
- size_t len)
+static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
{
struct sock *sk;
struct vsock_sock *vsk;
@@ -1712,7 +1738,9 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
transport = vsk->transport;
- /* Callers should not provide a destination with stream sockets. */
+ /* Callers should not provide a destination with connection oriented
+ * sockets.
+ */
if (msg->msg_namelen) {
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out;
@@ -1803,9 +1831,13 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
* responsibility to check how many bytes we were able to send.
*/
- written = transport->stream_enqueue(
- vsk, msg,
- len - total_written);
+ if (sk->sk_type == SOCK_SEQPACKET) {
+ written = transport->seqpacket_enqueue(vsk,
+ msg, len - total_written);
+ } else {
+ written = transport->stream_enqueue(vsk,
+ msg, len - total_written);
+ }
if (written < 0) {
err = -ENOMEM;
goto out_err;
@@ -1821,72 +1853,98 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
}
out_err:
- if (total_written > 0)
- err = total_written;
+ if (total_written > 0) {
+ /* Return number of written bytes only if:
+ * 1) SOCK_STREAM socket.
+ * 2) SOCK_SEQPACKET socket when whole buffer is sent.
+ */
+ if (sk->sk_type == SOCK_STREAM || total_written == len)
+ err = total_written;
+ }
out:
release_sock(sk);
return err;
}
-
-static int
-vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+static int vsock_connectible_wait_data(struct sock *sk,
+ struct wait_queue_entry *wait,
+ long timeout,
+ struct vsock_transport_recv_notify_data *recv_data,
+ size_t target)
{
- struct sock *sk;
- struct vsock_sock *vsk;
const struct vsock_transport *transport;
+ struct vsock_sock *vsk;
+ s64 data;
int err;
- size_t target;
- ssize_t copied;
- long timeout;
- struct vsock_transport_recv_notify_data recv_data;
- DEFINE_WAIT(wait);
-
- sk = sock->sk;
vsk = vsock_sk(sk);
err = 0;
+ transport = vsk->transport;
- lock_sock(sk);
+ while ((data = vsock_connectible_has_data(vsk)) == 0) {
+ prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
- transport = vsk->transport;
+ if (sk->sk_err != 0 ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ (vsk->peer_shutdown & SEND_SHUTDOWN)) {
+ break;
+ }
- if (!transport || sk->sk_state != TCP_ESTABLISHED) {
- /* Recvmsg is supposed to return 0 if a peer performs an
- * orderly shutdown. Differentiate between that case and when a
- * peer has not connected or a local shutdown occurred with the
- * SOCK_DONE flag.
- */
- if (sock_flag(sk, SOCK_DONE))
- err = 0;
- else
- err = -ENOTCONN;
+ /* Don't wait for non-blocking sockets. */
+ if (timeout == 0) {
+ err = -EAGAIN;
+ break;
+ }
- goto out;
- }
+ if (recv_data) {
+ err = transport->notify_recv_pre_block(vsk, target, recv_data);
+ if (err < 0)
+ break;
+ }
- if (flags & MSG_OOB) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ release_sock(sk);
+ timeout = schedule_timeout(timeout);
+ lock_sock(sk);
- /* We don't check peer_shutdown flag here since peer may actually shut
- * down, but there can be data in the queue that a local socket can
- * receive.
- */
- if (sk->sk_shutdown & RCV_SHUTDOWN) {
- err = 0;
- goto out;
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeout);
+ break;
+ } else if (timeout == 0) {
+ err = -EAGAIN;
+ break;
+ }
}
- /* It is valid on Linux to pass in a zero-length receive buffer. This
- * is not an error. We may as well bail out now.
+ finish_wait(sk_sleep(sk), wait);
+
+ if (err)
+ return err;
+
+ /* Internal transport error when checking for available
+ * data. XXX This should be changed to a connection
+ * reset in a later change.
*/
- if (!len) {
- err = 0;
- goto out;
- }
+ if (data < 0)
+ return -ENOMEM;
+
+ return data;
+}
+
+static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg,
+ size_t len, int flags)
+{
+ struct vsock_transport_recv_notify_data recv_data;
+ const struct vsock_transport *transport;
+ struct vsock_sock *vsk;
+ ssize_t copied;
+ size_t target;
+ long timeout;
+ int err;
+
+ DEFINE_WAIT(wait);
+
+ vsk = vsock_sk(sk);
+ transport = vsk->transport;
/* We must not copy less than target bytes into the user's buffer
* before returning successfully, so we wait for the consume queue to
@@ -1908,94 +1966,158 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
while (1) {
- s64 ready;
+ ssize_t read;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- ready = vsock_stream_has_data(vsk);
+ err = vsock_connectible_wait_data(sk, &wait, timeout,
+ &recv_data, target);
+ if (err <= 0)
+ break;
- if (ready == 0) {
- if (sk->sk_err != 0 ||
- (sk->sk_shutdown & RCV_SHUTDOWN) ||
- (vsk->peer_shutdown & SEND_SHUTDOWN)) {
- finish_wait(sk_sleep(sk), &wait);
- break;
- }
- /* Don't wait for non-blocking sockets. */
- if (timeout == 0) {
- err = -EAGAIN;
- finish_wait(sk_sleep(sk), &wait);
- break;
- }
+ err = transport->notify_recv_pre_dequeue(vsk, target,
+ &recv_data);
+ if (err < 0)
+ break;
- err = transport->notify_recv_pre_block(
- vsk, target, &recv_data);
- if (err < 0) {
- finish_wait(sk_sleep(sk), &wait);
- break;
- }
- release_sock(sk);
- timeout = schedule_timeout(timeout);
- lock_sock(sk);
+ read = transport->stream_dequeue(vsk, msg, len - copied, flags);
+ if (read < 0) {
+ err = -ENOMEM;
+ break;
+ }
- if (signal_pending(current)) {
- err = sock_intr_errno(timeout);
- finish_wait(sk_sleep(sk), &wait);
- break;
- } else if (timeout == 0) {
- err = -EAGAIN;
- finish_wait(sk_sleep(sk), &wait);
- break;
- }
- } else {
- ssize_t read;
+ copied += read;
- finish_wait(sk_sleep(sk), &wait);
+ err = transport->notify_recv_post_dequeue(vsk, target, read,
+ !(flags & MSG_PEEK), &recv_data);
+ if (err < 0)
+ goto out;
- if (ready < 0) {
- /* Invalid queue pair content. XXX This should
- * be changed to a connection reset in a later
- * change.
- */
+ if (read >= target || flags & MSG_PEEK)
+ break;
- err = -ENOMEM;
- goto out;
- }
+ target -= read;
+ }
- err = transport->notify_recv_pre_dequeue(
- vsk, target, &recv_data);
- if (err < 0)
- break;
+ if (sk->sk_err)
+ err = -sk->sk_err;
+ else if (sk->sk_shutdown & RCV_SHUTDOWN)
+ err = 0;
- read = transport->stream_dequeue(
- vsk, msg,
- len - copied, flags);
- if (read < 0) {
- err = -ENOMEM;
- break;
- }
+ if (copied > 0)
+ err = copied;
+
+out:
+ return err;
+}
- copied += read;
+static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
+ size_t len, int flags)
+{
+ const struct vsock_transport *transport;
+ struct vsock_sock *vsk;
+ ssize_t record_len;
+ long timeout;
+ int err = 0;
+ DEFINE_WAIT(wait);
- err = transport->notify_recv_post_dequeue(
- vsk, target, read,
- !(flags & MSG_PEEK), &recv_data);
- if (err < 0)
- goto out;
+ vsk = vsock_sk(sk);
+ transport = vsk->transport;
- if (read >= target || flags & MSG_PEEK)
- break;
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
- target -= read;
- }
+ err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0);
+ if (err <= 0)
+ goto out;
+
+ record_len = transport->seqpacket_dequeue(vsk, msg, flags);
+
+ if (record_len < 0) {
+ err = -ENOMEM;
+ goto out;
}
- if (sk->sk_err)
+ if (sk->sk_err) {
err = -sk->sk_err;
- else if (sk->sk_shutdown & RCV_SHUTDOWN)
+ } else if (sk->sk_shutdown & RCV_SHUTDOWN) {
err = 0;
+ } else {
+ /* User sets MSG_TRUNC, so return real length of
+ * packet.
+ */
+ if (flags & MSG_TRUNC)
+ err = record_len;
+ else
+ err = len - msg_data_left(msg);
- if (copied > 0)
- err = copied;
+ /* Always set MSG_TRUNC if real length of packet is
+ * bigger than user's buffer.
+ */
+ if (record_len > len)
+ msg->msg_flags |= MSG_TRUNC;
+ }
+
+out:
+ return err;
+}
+
+static int
+vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+{
+ struct sock *sk;
+ struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
+ int err;
+
+ DEFINE_WAIT(wait);
+
+ sk = sock->sk;
+ vsk = vsock_sk(sk);
+ err = 0;
+
+ lock_sock(sk);
+
+ transport = vsk->transport;
+
+ if (!transport || sk->sk_state != TCP_ESTABLISHED) {
+ /* Recvmsg is supposed to return 0 if a peer performs an
+ * orderly shutdown. Differentiate between that case and when a
+ * peer has not connected or a local shutdown occurred with the
+ * SOCK_DONE flag.
+ */
+ if (sock_flag(sk, SOCK_DONE))
+ err = 0;
+ else
+ err = -ENOTCONN;
+
+ goto out;
+ }
+
+ if (flags & MSG_OOB) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* We don't check peer_shutdown flag here since peer may actually shut
+ * down, but there can be data in the queue that a local socket can
+ * receive.
+ */
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ err = 0;
+ goto out;
+ }
+
+ /* It is valid on Linux to pass in a zero-length receive buffer. This
+ * is not an error. We may as well bail out now.
+ */
+ if (!len) {
+ err = 0;
+ goto out;
+ }
+
+ if (sk->sk_type == SOCK_STREAM)
+ err = __vsock_stream_recvmsg(sk, msg, len, flags);
+ else
+ err = __vsock_seqpacket_recvmsg(sk, msg, len, flags);
out:
release_sock(sk);
@@ -2007,7 +2129,7 @@ static const struct proto_ops vsock_stream_ops = {
.owner = THIS_MODULE,
.release = vsock_release,
.bind = vsock_bind,
- .connect = vsock_stream_connect,
+ .connect = vsock_connect,
.socketpair = sock_no_socketpair,
.accept = vsock_accept,
.getname = vsock_getname,
@@ -2015,10 +2137,31 @@ static const struct proto_ops vsock_stream_ops = {
.ioctl = sock_no_ioctl,
.listen = vsock_listen,
.shutdown = vsock_shutdown,
- .setsockopt = vsock_stream_setsockopt,
- .getsockopt = vsock_stream_getsockopt,
- .sendmsg = vsock_stream_sendmsg,
- .recvmsg = vsock_stream_recvmsg,
+ .setsockopt = vsock_connectible_setsockopt,
+ .getsockopt = vsock_connectible_getsockopt,
+ .sendmsg = vsock_connectible_sendmsg,
+ .recvmsg = vsock_connectible_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static const struct proto_ops vsock_seqpacket_ops = {
+ .family = PF_VSOCK,
+ .owner = THIS_MODULE,
+ .release = vsock_release,
+ .bind = vsock_bind,
+ .connect = vsock_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = vsock_accept,
+ .getname = vsock_getname,
+ .poll = vsock_poll,
+ .ioctl = sock_no_ioctl,
+ .listen = vsock_listen,
+ .shutdown = vsock_shutdown,
+ .setsockopt = vsock_connectible_setsockopt,
+ .getsockopt = vsock_connectible_getsockopt,
+ .sendmsg = vsock_connectible_sendmsg,
+ .recvmsg = vsock_connectible_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
@@ -2043,6 +2186,9 @@ static int vsock_create(struct net *net, struct socket *sock,
case SOCK_STREAM:
sock->ops = &vsock_stream_ops;
break;
+ case SOCK_SEQPACKET:
+ sock->ops = &vsock_seqpacket_ops;
+ break;
default:
return -ESOCKTNOSUPPORT;
}
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index cc3bae2659e7..19189cf30a72 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -596,7 +596,7 @@ static ssize_t hvs_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
return -EOPNOTSUPP;
if (need_refill) {
- hvs->recv_desc = hv_pkt_iter_first(hvs->chan);
+ hvs->recv_desc = hv_pkt_iter_first_raw(hvs->chan);
ret = hvs_update_recv_data(hvs);
if (ret)
return ret;
@@ -610,7 +610,7 @@ static ssize_t hvs_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
hvs->recv_data_len -= to_read;
if (hvs->recv_data_len == 0) {
- hvs->recv_desc = hv_pkt_iter_next(hvs->chan, hvs->recv_desc);
+ hvs->recv_desc = hv_pkt_iter_next_raw(hvs->chan, hvs->recv_desc);
if (hvs->recv_desc) {
ret = hvs_update_recv_data(hvs);
if (ret)
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 2700a63ab095..e0c2c992ad9c 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -62,6 +62,7 @@ struct virtio_vsock {
struct virtio_vsock_event event_list[8];
u32 guest_cid;
+ bool seqpacket_allow;
};
static u32 virtio_transport_get_local_cid(void)
@@ -359,7 +360,7 @@ static void virtio_vsock_reset_sock(struct sock *sk)
lock_sock(sk);
sk->sk_state = TCP_CLOSE;
sk->sk_err = ECONNRESET;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
release_sock(sk);
}
@@ -443,6 +444,8 @@ static void virtio_vsock_rx_done(struct virtqueue *vq)
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}
+static bool virtio_transport_seqpacket_allow(u32 remote_cid);
+
static struct virtio_transport virtio_transport = {
.transport = {
.module = THIS_MODULE,
@@ -469,6 +472,11 @@ static struct virtio_transport virtio_transport = {
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
+ .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
+ .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
+ .seqpacket_allow = virtio_transport_seqpacket_allow,
+ .seqpacket_has_data = virtio_transport_seqpacket_has_data,
+
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
@@ -485,6 +493,21 @@ static struct virtio_transport virtio_transport = {
.send_pkt = virtio_transport_send_pkt,
};
+static bool virtio_transport_seqpacket_allow(u32 remote_cid)
+{
+ struct virtio_vsock *vsock;
+ bool seqpacket_allow;
+
+ seqpacket_allow = false;
+ rcu_read_lock();
+ vsock = rcu_dereference(the_virtio_vsock);
+ if (vsock)
+ seqpacket_allow = vsock->seqpacket_allow;
+ rcu_read_unlock();
+
+ return seqpacket_allow;
+}
+
static void virtio_transport_rx_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
@@ -608,10 +631,14 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
+ if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
+ vsock->seqpacket_allow = true;
+
vdev->priv = vsock;
rcu_assign_pointer(the_virtio_vsock, vsock);
mutex_unlock(&the_virtio_vsock_mutex);
+
return 0;
out:
@@ -695,6 +722,7 @@ static struct virtio_device_id id_table[] = {
};
static unsigned int features[] = {
+ VIRTIO_VSOCK_F_SEQPACKET
};
static struct virtio_driver virtio_vsock_driver = {
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 902cb6dd710b..169ba8b72a63 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -74,6 +74,10 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
err = memcpy_from_msg(pkt->buf, info->msg, len);
if (err)
goto out;
+
+ if (msg_data_left(info->msg) == 0 &&
+ info->type == VIRTIO_VSOCK_TYPE_SEQPACKET)
+ pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
}
trace_virtio_transport_alloc_pkt(src_cid, src_port,
@@ -165,6 +169,14 @@ void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
}
EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
+static u16 virtio_transport_get_type(struct sock *sk)
+{
+ if (sk->sk_type == SOCK_STREAM)
+ return VIRTIO_VSOCK_TYPE_STREAM;
+ else
+ return VIRTIO_VSOCK_TYPE_SEQPACKET;
+}
+
/* This function can only be used on connecting/connected sockets,
* since a socket assigned to a transport is required.
*
@@ -179,6 +191,8 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct virtio_vsock_pkt *pkt;
u32 pkt_len = info->pkt_len;
+ info->type = virtio_transport_get_type(sk_vsock(vsk));
+
t_ops = virtio_transport_get_ops(vsk);
if (unlikely(!t_ops))
return -EFAULT;
@@ -269,13 +283,10 @@ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
}
EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
-static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
- int type,
- struct virtio_vsock_hdr *hdr)
+static int virtio_transport_send_credit_update(struct vsock_sock *vsk)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
- .type = type,
.vsk = vsk,
};
@@ -383,11 +394,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
* messages, we set the limit to a high value. TODO: experiment
* with different values.
*/
- if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
- virtio_transport_send_credit_update(vsk,
- VIRTIO_VSOCK_TYPE_STREAM,
- NULL);
- }
+ if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
+ virtio_transport_send_credit_update(vsk);
return total;
@@ -397,6 +405,75 @@ out:
return err;
}
+static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ int flags)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ struct virtio_vsock_pkt *pkt;
+ int dequeued_len = 0;
+ size_t user_buf_len = msg_data_left(msg);
+ bool msg_ready = false;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+ if (vvs->msg_count == 0) {
+ spin_unlock_bh(&vvs->rx_lock);
+ return 0;
+ }
+
+ while (!msg_ready) {
+ pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
+
+ if (dequeued_len >= 0) {
+ size_t pkt_len;
+ size_t bytes_to_copy;
+
+ pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
+ bytes_to_copy = min(user_buf_len, pkt_len);
+
+ if (bytes_to_copy) {
+ int err;
+
+ /* sk_lock is held by caller so no one else can dequeue.
+ * Unlock rx_lock since memcpy_to_msg() may sleep.
+ */
+ spin_unlock_bh(&vvs->rx_lock);
+
+ err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy);
+ if (err) {
+ /* Copy of message failed. Rest of
+ * fragments will be freed without copy.
+ */
+ dequeued_len = err;
+ } else {
+ user_buf_len -= bytes_to_copy;
+ }
+
+ spin_lock_bh(&vvs->rx_lock);
+ }
+
+ if (dequeued_len >= 0)
+ dequeued_len += pkt_len;
+ }
+
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ msg_ready = true;
+ vvs->msg_count--;
+ }
+
+ virtio_transport_dec_rx_pkt(vvs, pkt);
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+
+ spin_unlock_bh(&vvs->rx_lock);
+
+ virtio_transport_send_credit_update(vsk);
+
+ return dequeued_len;
+}
+
ssize_t
virtio_transport_stream_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
@@ -409,6 +486,38 @@ virtio_transport_stream_dequeue(struct vsock_sock *vsk,
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
+ssize_t
+virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ int flags)
+{
+ if (flags & MSG_PEEK)
+ return -EOPNOTSUPP;
+
+ return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
+
+int
+virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ spin_lock_bh(&vvs->tx_lock);
+
+ if (len > vvs->peer_buf_alloc) {
+ spin_unlock_bh(&vvs->tx_lock);
+ return -EMSGSIZE;
+ }
+
+ spin_unlock_bh(&vvs->tx_lock);
+
+ return virtio_transport_stream_enqueue(vsk, msg, len);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue);
+
int
virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
@@ -431,6 +540,19 @@ s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
+u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ u32 msg_count;
+
+ spin_lock_bh(&vvs->rx_lock);
+ msg_count = vvs->msg_count;
+ spin_unlock_bh(&vvs->rx_lock);
+
+ return msg_count;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
+
static s64 virtio_transport_has_space(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
@@ -496,8 +618,7 @@ void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
vvs->buf_alloc = *val;
- virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
- NULL);
+ virtio_transport_send_credit_update(vsk);
}
EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
@@ -624,7 +745,6 @@ int virtio_transport_connect(struct vsock_sock *vsk)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_REQUEST,
- .type = VIRTIO_VSOCK_TYPE_STREAM,
.vsk = vsk,
};
@@ -636,7 +756,6 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_SHUTDOWN,
- .type = VIRTIO_VSOCK_TYPE_STREAM,
.flags = (mode & RCV_SHUTDOWN ?
VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
(mode & SEND_SHUTDOWN ?
@@ -665,7 +784,6 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RW,
- .type = VIRTIO_VSOCK_TYPE_STREAM,
.msg = msg,
.pkt_len = len,
.vsk = vsk,
@@ -688,7 +806,6 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
- .type = VIRTIO_VSOCK_TYPE_STREAM,
.reply = !!pkt,
.vsk = vsk,
};
@@ -848,7 +965,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
struct sock *sk = &vsk->sk;
bool remove_sock = true;
- if (sk->sk_type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
remove_sock = virtio_transport_close(vsk);
if (remove_sock) {
@@ -890,7 +1007,7 @@ destroy:
virtio_transport_reset(vsk, pkt);
sk->sk_state = TCP_CLOSE;
sk->sk_err = skerr;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return err;
}
@@ -912,6 +1029,9 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
goto out;
}
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)
+ vvs->msg_count++;
+
/* Try to copy small packets into the buffer of last packet queued,
* to avoid wasting memory queueing the entire buffer with a small
* payload.
@@ -923,13 +1043,18 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
struct virtio_vsock_pkt, list);
/* If there is space in the last packet queued, we copy the
- * new packet in its buffer.
+ * new packet in its buffer. We avoid this if the last packet
+ * queued has VIRTIO_VSOCK_SEQ_EOR set, because this is
+ * delimiter of SEQPACKET record, so 'pkt' is the first packet
+ * of a new record.
*/
- if (pkt->len <= last_pkt->buf_len - last_pkt->len) {
+ if ((pkt->len <= last_pkt->buf_len - last_pkt->len) &&
+ !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)) {
memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
pkt->len);
last_pkt->len += pkt->len;
free_pkt = true;
+ last_pkt->hdr.flags |= pkt->hdr.flags;
goto out;
}
}
@@ -1000,7 +1125,6 @@ virtio_transport_send_response(struct vsock_sock *vsk,
{
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RESPONSE,
- .type = VIRTIO_VSOCK_TYPE_STREAM,
.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
.remote_port = le32_to_cpu(pkt->hdr.src_port),
.reply = true,
@@ -1096,6 +1220,12 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
return 0;
}
+static bool virtio_transport_valid_type(u16 type)
+{
+ return (type == VIRTIO_VSOCK_TYPE_STREAM) ||
+ (type == VIRTIO_VSOCK_TYPE_SEQPACKET);
+}
+
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
* lock.
*/
@@ -1121,7 +1251,7 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
le32_to_cpu(pkt->hdr.buf_alloc),
le32_to_cpu(pkt->hdr.fwd_cnt));
- if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
+ if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) {
(void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
@@ -1138,6 +1268,12 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
}
}
+ if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) {
+ (void)virtio_transport_reset_no_sock(t, pkt);
+ sock_put(sk);
+ goto free_pkt;
+ }
+
vsk = vsock_sk(sk);
lock_sock(sk);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index c99bc4ce78e2..7aef34e32bdf 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -831,7 +831,7 @@ static void vmci_transport_handle_detach(struct sock *sk)
sk->sk_state = TCP_CLOSE;
sk->sk_err = ECONNRESET;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return;
}
sk->sk_state = TCP_CLOSE;
@@ -1248,7 +1248,7 @@ vmci_transport_recv_connecting_server(struct sock *listener,
vsock_remove_pending(listener, pending);
vsock_enqueue_accept(listener, pending);
- /* Callers of accept() will be be waiting on the listening socket, not
+ /* Callers of accept() will be waiting on the listening socket, not
* the pending socket.
*/
listener->sk_data_ready(listener);
@@ -1365,7 +1365,7 @@ destroy:
sk->sk_state = TCP_CLOSE;
sk->sk_err = skerr;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
return err;
}
diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c
index a45f7ffca8c5..169a8cf65b39 100644
--- a/net/vmw_vsock/vsock_loopback.c
+++ b/net/vmw_vsock/vsock_loopback.c
@@ -63,6 +63,8 @@ static int vsock_loopback_cancel_pkt(struct vsock_sock *vsk)
return 0;
}
+static bool vsock_loopback_seqpacket_allow(u32 remote_cid);
+
static struct virtio_transport loopback_transport = {
.transport = {
.module = THIS_MODULE,
@@ -89,6 +91,11 @@ static struct virtio_transport loopback_transport = {
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
+ .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
+ .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
+ .seqpacket_allow = vsock_loopback_seqpacket_allow,
+ .seqpacket_has_data = virtio_transport_seqpacket_has_data,
+
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
@@ -105,6 +112,11 @@ static struct virtio_transport loopback_transport = {
.send_pkt = vsock_loopback_send_pkt,
};
+static bool vsock_loopback_seqpacket_allow(u32 remote_cid)
+{
+ return true;
+}
+
static void vsock_loopback_work(struct work_struct *work)
{
struct vsock_loopback *vsock =
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 2eee93985ab0..af590ae606b6 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
@$(kecho) " GEN $@"
@(echo '#include "reg.h"'; \
echo 'const u8 shipped_regdb_certs[] = {'; \
- cat $^ ; \
+ echo | cat - $^ ; \
echo '};'; \
echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
) > $@
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 285b8076054b..869c43d4414c 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,7 +6,7 @@
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2018-2020 Intel Corporation
+ * Copyright 2018-2021 Intel Corporation
*/
#include <linux/export.h>
@@ -942,7 +942,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
struct ieee80211_sta_vht_cap *vht_cap;
struct ieee80211_edmg *edmg_cap;
u32 width, control_freq, cap;
- bool support_80_80 = false;
+ bool ext_nss_cap, support_80_80 = false;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
@@ -950,6 +950,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
ht_cap = &wiphy->bands[chandef->chan->band]->ht_cap;
vht_cap = &wiphy->bands[chandef->chan->band]->vht_cap;
edmg_cap = &wiphy->bands[chandef->chan->band]->edmg_cap;
+ ext_nss_cap = __le16_to_cpu(vht_cap->vht_mcs.tx_highest) &
+ IEEE80211_VHT_EXT_NSS_BW_CAPABLE;
if (edmg_cap->channels &&
!cfg80211_edmg_usable(wiphy,
@@ -1015,7 +1017,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
(cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
(cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
- u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1;
+ (ext_nss_cap &&
+ u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1);
if (chandef->chan->band != NL80211_BAND_6GHZ && !support_80_80)
return false;
fallthrough;
@@ -1037,7 +1040,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ &&
- !(vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK))
+ !(ext_nss_cap &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)))
return false;
break;
default:
@@ -1335,3 +1339,34 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
WARN_ON(1);
}
}
+
+bool cfg80211_any_usable_channels(struct wiphy *wiphy,
+ unsigned long sband_mask,
+ u32 prohibited_flags)
+{
+ int idx;
+
+ prohibited_flags |= IEEE80211_CHAN_DISABLED;
+
+ for_each_set_bit(idx, &sband_mask, NUM_NL80211_BANDS) {
+ struct ieee80211_supported_band *sband = wiphy->bands[idx];
+ int chanidx;
+
+ if (!sband)
+ continue;
+
+ for (chanidx = 0; chanidx < sband->n_channels; chanidx++) {
+ struct ieee80211_channel *chan;
+
+ chan = &sband->channels[chanidx];
+
+ if (chan->flags & prohibited_flags)
+ continue;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(cfg80211_any_usable_channels);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6fbf7537faf5..03323121ca50 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -532,11 +532,11 @@ use_default_name:
wiphy_net_set(&rdev->wiphy, &init_net);
rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
- rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
- &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
- &rdev->rfkill_ops, rdev);
+ rdev->wiphy.rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
+ &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
+ &rdev->rfkill_ops, rdev);
- if (!rdev->rfkill) {
+ if (!rdev->wiphy.rfkill) {
wiphy_free(&rdev->wiphy);
return NULL;
}
@@ -589,14 +589,6 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
if (WARN_ON(!c->num_different_channels))
return -EINVAL;
- /*
- * Put a sane limit on maximum number of different
- * channels to simplify channel accounting code.
- */
- if (WARN_ON(c->num_different_channels >
- CFG80211_MAX_NUM_DIFFERENT_CHANNELS))
- return -EINVAL;
-
/* DFS only works on one channel. */
if (WARN_ON(c->radar_detect_widths &&
(c->num_different_channels > 1)))
@@ -936,9 +928,6 @@ int wiphy_register(struct wiphy *wiphy)
return res;
}
- /* set up regulatory info */
- wiphy_regulatory_register(wiphy);
-
list_add_rcu(&rdev->list, &cfg80211_rdev_list);
cfg80211_rdev_list_generation++;
@@ -949,6 +938,9 @@ int wiphy_register(struct wiphy *wiphy)
cfg80211_debugfs_rdev_add(rdev);
nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
+ /* set up regulatory info */
+ wiphy_regulatory_register(wiphy);
+
if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
struct regulatory_request request;
@@ -993,10 +985,10 @@ int wiphy_register(struct wiphy *wiphy)
rdev->wiphy.registered = true;
rtnl_unlock();
- res = rfkill_register(rdev->rfkill);
+ res = rfkill_register(rdev->wiphy.rfkill);
if (res) {
- rfkill_destroy(rdev->rfkill);
- rdev->rfkill = NULL;
+ rfkill_destroy(rdev->wiphy.rfkill);
+ rdev->wiphy.rfkill = NULL;
wiphy_unregister(&rdev->wiphy);
return res;
}
@@ -1012,18 +1004,10 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
if (!rdev->ops->rfkill_poll)
return;
rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
- rfkill_resume_polling(rdev->rfkill);
+ rfkill_resume_polling(wiphy->rfkill);
}
EXPORT_SYMBOL(wiphy_rfkill_start_polling);
-void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
-{
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
-
- rfkill_pause_polling(rdev->rfkill);
-}
-EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
-
void wiphy_unregister(struct wiphy *wiphy)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -1035,8 +1019,8 @@ void wiphy_unregister(struct wiphy *wiphy)
wiphy_unlock(&rdev->wiphy);
__count == 0; }));
- if (rdev->rfkill)
- rfkill_unregister(rdev->rfkill);
+ if (rdev->wiphy.rfkill)
+ rfkill_unregister(rdev->wiphy.rfkill);
rtnl_lock();
wiphy_lock(&rdev->wiphy);
@@ -1088,7 +1072,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
{
struct cfg80211_internal_bss *scan, *tmp;
struct cfg80211_beacon_registration *reg, *treg;
- rfkill_destroy(rdev->rfkill);
+ rfkill_destroy(rdev->wiphy.rfkill);
list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) {
list_del(&reg->list);
kfree(reg);
@@ -1110,7 +1094,7 @@ void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- if (rfkill_set_hw_state_reason(rdev->rfkill, blocked, reason))
+ if (rfkill_set_hw_state_reason(wiphy->rfkill, blocked, reason))
schedule_work(&rdev->rfkill_block);
}
EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason);
@@ -1340,6 +1324,11 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
rdev->devlist_generation++;
wdev->registered = true;
+ if (wdev->netdev &&
+ sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj,
+ "phy80211"))
+ pr_err("failed to add phy80211 symlink to netdev!\n");
+
nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
}
@@ -1365,14 +1354,6 @@ int cfg80211_register_netdevice(struct net_device *dev)
if (ret)
goto out;
- if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
- "phy80211")) {
- pr_err("failed to add phy80211 symlink to netdev!\n");
- unregister_netdevice(dev);
- ret = -EINVAL;
- goto out;
- }
-
cfg80211_register_wdev(rdev, wdev);
ret = 0;
out:
@@ -1506,7 +1487,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
wdev->use_4addr, 0))
return notifier_from_errno(-EOPNOTSUPP);
- if (rfkill_blocked(rdev->rfkill))
+ if (rfkill_blocked(rdev->wiphy.rfkill))
return notifier_from_errno(-ERFKILL);
break;
default:
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a7d19b4b40ac..b35d0db12f1d 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,7 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -27,7 +27,6 @@ struct cfg80211_registered_device {
/* rfkill support */
struct rfkill_ops rfkill_ops;
- struct rfkill *rfkill;
struct work_struct rfkill_block;
/* ISO / IEC 3166 alpha2 for which this device is receiving
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index fc9286afe3c9..50eb405b0690 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -330,7 +330,7 @@ nl80211_pmsr_req_attr_policy[NL80211_PMSR_REQ_ATTR_MAX + 1] = {
};
static const struct nla_policy
-nl80211_psmr_peer_attr_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = {
+nl80211_pmsr_peer_attr_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = {
[NL80211_PMSR_PEER_ATTR_ADDR] = NLA_POLICY_ETH_ADDR,
[NL80211_PMSR_PEER_ATTR_CHAN] = NLA_POLICY_NESTED(nl80211_policy),
[NL80211_PMSR_PEER_ATTR_REQ] =
@@ -345,7 +345,7 @@ nl80211_pmsr_attr_policy[NL80211_PMSR_ATTR_MAX + 1] = {
[NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_REJECT },
[NL80211_PMSR_ATTR_TYPE_CAPA] = { .type = NLA_REJECT },
[NL80211_PMSR_ATTR_PEERS] =
- NLA_POLICY_NESTED_ARRAY(nl80211_psmr_peer_attr_policy),
+ NLA_POLICY_NESTED_ARRAY(nl80211_pmsr_peer_attr_policy),
};
static const struct nla_policy
@@ -1731,6 +1731,11 @@ nl80211_send_iftype_data(struct sk_buff *msg,
&iftdata->he_6ghz_capa))
return -ENOBUFS;
+ if (iftdata->vendor_elems.data && iftdata->vendor_elems.len &&
+ nla_put(msg, NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS,
+ iftdata->vendor_elems.len, iftdata->vendor_elems.data))
+ return -ENOBUFS;
+
return 0;
}
@@ -4781,11 +4786,10 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
sband->ht_cap.mcs.rx_mask,
sizeof(mask->control[i].ht_mcs));
- if (!sband->vht_cap.vht_supported)
- continue;
-
- vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
- vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+ if (sband->vht_cap.vht_supported) {
+ vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+ }
he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype);
if (!he_cap)
@@ -13042,7 +13046,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
if (wdev_running(wdev))
return 0;
- if (rfkill_blocked(rdev->rfkill))
+ if (rfkill_blocked(rdev->wiphy.rfkill))
return -ERFKILL;
err = rdev_start_p2p_device(rdev, wdev);
@@ -13084,7 +13088,7 @@ static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
if (wdev_running(wdev))
return -EEXIST;
- if (rfkill_blocked(rdev->rfkill))
+ if (rfkill_blocked(rdev->wiphy.rfkill))
return -ERFKILL;
if (!info->attrs[NL80211_ATTR_NAN_MASTER_PREF])
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index 6bdd96408022..328cf54bda82 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -168,6 +168,18 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR]) {
+ if (!out->ftm.non_trigger_based && !out->ftm.trigger_based) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR],
+ "FTM: BSS color set for EDCA based ranging");
+ return -EINVAL;
+ }
+
+ out->ftm.bss_color =
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR]);
+ }
+
return 0;
}
@@ -334,6 +346,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
gfp_t gfp)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
struct sk_buff *msg;
void *hdr;
@@ -364,9 +377,20 @@ free_msg:
nlmsg_free(msg);
free_request:
spin_lock_bh(&wdev->pmsr_lock);
- list_del(&req->list);
+ /*
+ * cfg80211_pmsr_process_abort() may have already moved this request
+ * to the free list, and will free it later. In this case, don't free
+ * it here.
+ */
+ list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
+ if (tmp == req) {
+ list_del(&req->list);
+ to_free = req;
+ break;
+ }
+ }
spin_unlock_bh(&wdev->pmsr_lock);
- kfree(req);
+ kfree(to_free);
}
EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 8b1358d04ca2..b1d37f582dc6 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -464,8 +464,18 @@ static inline int rdev_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_assoc_request *req)
{
+ const struct cfg80211_bss_ies *bss_ies;
int ret;
- trace_rdev_assoc(&rdev->wiphy, dev, req);
+
+ /*
+ * Note: we might trace not exactly the data that's processed,
+ * due to races and the driver/mac80211 getting a newer copy.
+ */
+ rcu_read_lock();
+ bss_ies = rcu_dereference(req->bss->ies);
+ trace_rdev_assoc(&rdev->wiphy, dev, req, bss_ies);
+ rcu_read_unlock();
+
ret = rdev->ops->assoc(&rdev->wiphy, dev, req);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 0406ce7334fa..c2d0ff7f089f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3975,7 +3975,9 @@ static int __regulatory_set_wiphy_regd(struct wiphy *wiphy,
"wiphy should have REGULATORY_WIPHY_SELF_MANAGED\n"))
return -EPERM;
- if (WARN(!is_valid_rd(rd), "Invalid regulatory domain detected\n")) {
+ if (WARN(!is_valid_rd(rd),
+ "Invalid regulatory domain detected: %c%c\n",
+ rd->alpha2[0], rd->alpha2[1])) {
print_regdomain_info(rd);
return -EINVAL;
}
@@ -4049,6 +4051,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
wiphy_update_regulatory(wiphy, lr->initiator);
wiphy_all_share_dfs_chan_state(wiphy);
+ reg_process_self_managed_hints();
}
void wiphy_regulatory_deregister(struct wiphy *wiphy)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 4f06c1825029..f03c7ac8e184 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,7 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -618,7 +618,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
freq = ieee80211_channel_to_frequency(ap_info->channel, band);
- if (end - pos < count * ap_info->tbtt_info_len)
+ if (end - pos < count * length)
break;
/*
@@ -630,7 +630,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
if (band != NL80211_BAND_6GHZ ||
(length != IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM &&
length < IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM)) {
- pos += count * ap_info->tbtt_info_len;
+ pos += count * length;
continue;
}
@@ -653,7 +653,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
kfree(entry);
}
- pos += ap_info->tbtt_info_len;
+ pos += length;
}
}
@@ -757,7 +757,8 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
}
request = kzalloc(struct_size(request, channels, n_channels) +
- sizeof(*request->scan_6ghz_params) * count,
+ sizeof(*request->scan_6ghz_params) * count +
+ sizeof(*request->ssids) * rdev_req->n_ssids,
GFP_KERNEL);
if (!request) {
cfg80211_free_coloc_ap_list(&coloc_ap_list);
@@ -848,10 +849,19 @@ skip:
if (request->n_channels) {
struct cfg80211_scan_request *old = rdev->int_scan_req;
-
rdev->int_scan_req = request;
/*
+ * Add the ssids from the parent scan request to the new scan
+ * request, so the driver would be able to use them in its
+ * probe requests to discover hidden APs on PSC channels.
+ */
+ request->ssids = (void *)&request->channels[request->n_channels];
+ request->n_ssids = rdev_req->n_ssids;
+ memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
+ request->n_ssids);
+
+ /*
* If this scan follows a previous scan, save the scan start
* info from the first part of the scan
*/
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9b959e3b09c6..0c3f05c9be27 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -133,6 +133,10 @@ static int wiphy_resume(struct device *dev)
if (rdev->wiphy.registered && rdev->ops->resume)
ret = rdev_resume(rdev);
wiphy_unlock(&rdev->wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(&rdev->wiphy);
+
rtnl_unlock();
return ret;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 76b777d5903f..440bce5f0274 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1195,8 +1195,9 @@ TRACE_EVENT(rdev_auth,
TRACE_EVENT(rdev_assoc,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_assoc_request *req),
- TP_ARGS(wiphy, netdev, req),
+ struct cfg80211_assoc_request *req,
+ const struct cfg80211_bss_ies *bss_ies),
+ TP_ARGS(wiphy, netdev, req, bss_ies),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
@@ -1204,6 +1205,17 @@ TRACE_EVENT(rdev_assoc,
MAC_ENTRY(prev_bssid)
__field(bool, use_mfp)
__field(u32, flags)
+ __dynamic_array(u8, bss_elements, bss_ies->len)
+ __field(bool, bss_elements_bcon)
+ __field(u64, bss_elements_tsf)
+ __dynamic_array(u8, elements, req->ie_len)
+ __array(u8, ht_capa, sizeof(struct ieee80211_ht_cap))
+ __array(u8, ht_capa_mask, sizeof(struct ieee80211_ht_cap))
+ __array(u8, vht_capa, sizeof(struct ieee80211_vht_cap))
+ __array(u8, vht_capa_mask, sizeof(struct ieee80211_vht_cap))
+ __dynamic_array(u8, fils_kek, req->fils_kek_len)
+ __dynamic_array(u8, fils_nonces,
+ req->fils_nonces ? 2 * FILS_NONCE_LEN : 0)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -1215,6 +1227,26 @@ TRACE_EVENT(rdev_assoc,
MAC_ASSIGN(prev_bssid, req->prev_bssid);
__entry->use_mfp = req->use_mfp;
__entry->flags = req->flags;
+ if (bss_ies->len)
+ memcpy(__get_dynamic_array(bss_elements),
+ bss_ies->data, bss_ies->len);
+ __entry->bss_elements_bcon = bss_ies->from_beacon;
+ __entry->bss_elements_tsf = bss_ies->tsf;
+ if (req->ie)
+ memcpy(__get_dynamic_array(elements),
+ req->ie, req->ie_len);
+ memcpy(__entry->ht_capa, &req->ht_capa, sizeof(req->ht_capa));
+ memcpy(__entry->ht_capa_mask, &req->ht_capa_mask,
+ sizeof(req->ht_capa_mask));
+ memcpy(__entry->vht_capa, &req->vht_capa, sizeof(req->vht_capa));
+ memcpy(__entry->vht_capa_mask, &req->vht_capa_mask,
+ sizeof(req->vht_capa_mask));
+ if (req->fils_kek)
+ memcpy(__get_dynamic_array(fils_kek),
+ req->fils_kek, req->fils_kek_len);
+ if (req->fils_nonces)
+ memcpy(__get_dynamic_array(fils_nonces),
+ req->fils_nonces, 2 * FILS_NONCE_LEN);
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
", previous bssid: " MAC_PR_FMT ", use mfp: %s, flags: %u",
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 382c5262d997..18dba3d7c638 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
- u8 data_offset)
+ u8 data_offset, bool is_amsdu)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct {
@@ -629,7 +629,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
tmp.h_proto = payload.proto;
- if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
+ if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
tmp.h_proto != htons(ETH_P_AARP) &&
tmp.h_proto != htons(ETH_P_IPX)) ||
ether_addr_equal(payload.hdr, bridge_tunnel_header)))
@@ -771,6 +771,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
remaining = skb->len - offset;
if (subframe_len > remaining)
goto purge;
+ /* mitigate A-MSDU aggregation injection attacks */
+ if (ether_addr_equal(eth.h_dest, rfc1042_header))
+ goto purge;
offset += sizeof(struct ethhdr);
last = remaining <= subframe_len + padding;
@@ -1056,6 +1059,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_MESH_POINT:
/* mesh should be handled? */
break;
+ case NL80211_IFTYPE_OCB:
+ cfg80211_leave_ocb(rdev, dev);
+ break;
default:
break;
}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index a8320dc59af7..a32065d600a1 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -902,7 +902,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
/* only change when not disabling */
if (!data->txpower.disabled) {
- rfkill_set_sw_state(rdev->rfkill, false);
+ rfkill_set_sw_state(rdev->wiphy.rfkill, false);
if (data->txpower.fixed) {
/*
@@ -927,7 +927,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
}
}
} else {
- if (rfkill_set_sw_state(rdev->rfkill, true))
+ if (rfkill_set_sw_state(rdev->wiphy.rfkill, true))
schedule_work(&rdev->rfkill_block);
return 0;
}
@@ -963,7 +963,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
/* well... oh well */
data->txpower.fixed = 1;
- data->txpower.disabled = rfkill_blocked(rdev->rfkill);
+ data->txpower.disabled = rfkill_blocked(rdev->wiphy.rfkill);
data->txpower.value = val;
data->txpower.flags = IW_TXPOW_DBM;
@@ -1167,7 +1167,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- bool ps = wdev->ps;
+ bool ps;
int timeout = wdev->ps_timeout;
int err;
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
index 33bef22e44e9..b379a0371653 100644
--- a/net/wireless/wext-spy.c
+++ b/net/wireless/wext-spy.c
@@ -120,8 +120,8 @@ int iw_handler_set_thrspy(struct net_device * dev,
return -EOPNOTSUPP;
/* Just do it */
- memcpy(&(spydata->spy_thr_low), &(threshold->low),
- 2 * sizeof(struct iw_quality));
+ spydata->spy_thr_low = threshold->low;
+ spydata->spy_thr_high = threshold->high;
/* Clear flag */
memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under));
@@ -147,8 +147,8 @@ int iw_handler_get_thrspy(struct net_device * dev,
return -EOPNOTSUPP;
/* Just do it */
- memcpy(&(threshold->low), &(spydata->spy_thr_low),
- 2 * sizeof(struct iw_quality));
+ threshold->low = spydata->spy_thr_low;
+ threshold->high = spydata->spy_thr_high;
return 0;
}
@@ -173,10 +173,10 @@ static void iw_send_thrspy_event(struct net_device * dev,
memcpy(threshold.addr.sa_data, address, ETH_ALEN);
threshold.addr.sa_family = ARPHRD_ETHER;
/* Copy stats */
- memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality));
+ threshold.qual = *wstats;
/* Copy also thresholds */
- memcpy(&(threshold.low), &(spydata->spy_thr_low),
- 2 * sizeof(struct iw_quality));
+ threshold.low = spydata->spy_thr_low;
+ threshold.high = spydata->spy_thr_high;
/* Send event to user space */
wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 44d6566dd23e..3583354a7d7f 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -366,7 +366,7 @@ static void x25_destroy_timer(struct timer_list *t)
/*
* This is called from user mode and the timers. Thus it protects itself
- * against interrupt users but doesn't worry about being called during
+ * against interrupting users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
* Not static as it's used by the timer
@@ -536,7 +536,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
if (protocol)
goto out;
- rc = -ENOBUFS;
+ rc = -ENOMEM;
if ((sk = x25_alloc_socket(net, kern)) == NULL)
goto out;
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
index d48ad6d29197..21b30b56e889 100644
--- a/net/x25/x25_forward.c
+++ b/net/x25/x25_forward.c
@@ -19,7 +19,6 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
{
struct x25_route *rt;
struct x25_neigh *neigh_new = NULL;
- struct list_head *entry;
struct x25_forward *x25_frwd, *new_frwd;
struct sk_buff *skbn;
short same_lci = 0;
@@ -46,8 +45,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
* established LCI? It shouldn't happen, just in case..
*/
read_lock_bh(&x25_forward_list_lock);
- list_for_each(entry, &x25_forward_list) {
- x25_frwd = list_entry(entry, struct x25_forward, node);
+ list_for_each_entry(x25_frwd, &x25_forward_list, node) {
if (x25_frwd->lci == lci) {
pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
same_lci = 1;
@@ -92,15 +90,13 @@ out_no_route:
int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
struct x25_forward *frwd;
- struct list_head *entry;
struct net_device *peer = NULL;
struct x25_neigh *nb;
struct sk_buff *skbn;
int rc = 0;
read_lock_bh(&x25_forward_list_lock);
- list_for_each(entry, &x25_forward_list) {
- frwd = list_entry(entry, struct x25_forward, node);
+ list_for_each_entry(frwd, &x25_forward_list, node) {
if (frwd->lci == lci) {
/* The call is established, either side can send */
if (from->dev == frwd->dev1) {
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 57a81100c5da..5460b9146dd8 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -332,12 +332,9 @@ void x25_link_device_down(struct net_device *dev)
struct x25_neigh *x25_get_neigh(struct net_device *dev)
{
struct x25_neigh *nb, *use = NULL;
- struct list_head *entry;
read_lock_bh(&x25_neigh_list_lock);
- list_for_each(entry, &x25_neigh_list) {
- nb = list_entry(entry, struct x25_neigh, node);
-
+ list_for_each_entry(nb, &x25_neigh_list, node) {
if (nb->dev == dev) {
use = nb;
break;
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 9fbe4bb38d94..647f325ed867 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -27,14 +27,11 @@ static int x25_add_route(struct x25_address *address, unsigned int sigdigits,
struct net_device *dev)
{
struct x25_route *rt;
- struct list_head *entry;
int rc = -EINVAL;
write_lock_bh(&x25_route_list_lock);
- list_for_each(entry, &x25_route_list) {
- rt = list_entry(entry, struct x25_route, node);
-
+ list_for_each_entry(rt, &x25_route_list, node) {
if (!memcmp(&rt->address, address, sigdigits) &&
rt->sigdigits == sigdigits)
goto out;
@@ -78,14 +75,11 @@ static int x25_del_route(struct x25_address *address, unsigned int sigdigits,
struct net_device *dev)
{
struct x25_route *rt;
- struct list_head *entry;
int rc = -EINVAL;
write_lock_bh(&x25_route_list_lock);
- list_for_each(entry, &x25_route_list) {
- rt = list_entry(entry, struct x25_route, node);
-
+ list_for_each_entry(rt, &x25_route_list, node) {
if (!memcmp(&rt->address, address, sigdigits) &&
rt->sigdigits == sigdigits && rt->dev == dev) {
__x25_remove_route(rt);
@@ -141,13 +135,10 @@ struct net_device *x25_dev_get(char *devname)
struct x25_route *x25_get_route(struct x25_address *addr)
{
struct x25_route *rt, *use = NULL;
- struct list_head *entry;
read_lock_bh(&x25_route_list_lock);
- list_for_each(entry, &x25_route_list) {
- rt = list_entry(entry, struct x25_route, node);
-
+ list_for_each_entry(rt, &x25_route_list, node) {
if (!memcmp(&rt->address, addr, rt->sigdigits)) {
if (!use)
use = rt;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 56a28a686988..f01ef6bda390 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -27,7 +27,7 @@ static void xdp_umem_unpin_pages(struct xdp_umem *umem)
{
unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
- kfree(umem->pgs);
+ kvfree(umem->pgs);
umem->pgs = NULL;
}
@@ -99,8 +99,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
long npgs;
int err;
- umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
- GFP_KERNEL | __GFP_NOWARN);
+ umem->pgs = kvcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL | __GFP_NOWARN);
if (!umem->pgs)
return -ENOMEM;
@@ -123,7 +122,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
out_pin:
xdp_umem_unpin_pages(umem);
out_pgs:
- kfree(umem->pgs);
+ kvfree(umem->pgs);
umem->pgs = NULL;
return err;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index cd62d4ba87a9..d6b500dc4208 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -749,7 +749,7 @@ static void xsk_unbind_dev(struct xdp_sock *xs)
}
static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
- struct xdp_sock ***map_entry)
+ struct xdp_sock __rcu ***map_entry)
{
struct xsk_map *map = NULL;
struct xsk_map_node *node;
@@ -785,7 +785,7 @@ static void xsk_delete_from_maps(struct xdp_sock *xs)
* might be updates to the map between
* xsk_get_map_list_entry() and xsk_map_try_sock_delete().
*/
- struct xdp_sock **map_entry = NULL;
+ struct xdp_sock __rcu **map_entry = NULL;
struct xsk_map *map;
while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
@@ -1313,7 +1313,7 @@ static int xsk_notifier(struct notifier_block *this,
if (xs->dev == dev) {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
+ sk_error_report(sk);
xsk_unbind_dev(xs);
diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h
index edcf249ad1f1..a4bc4749faac 100644
--- a/net/xdp/xsk.h
+++ b/net/xdp/xsk.h
@@ -31,7 +31,7 @@ struct xdp_mmap_offsets_v1 {
struct xsk_map_node {
struct list_head node;
struct xsk_map *map;
- struct xdp_sock **map_entry;
+ struct xdp_sock __rcu **map_entry;
};
static inline struct xdp_sock *xdp_sk(struct sock *sk)
@@ -40,7 +40,7 @@ static inline struct xdp_sock *xdp_sk(struct sock *sk)
}
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
- struct xdp_sock **map_entry);
+ struct xdp_sock __rcu **map_entry);
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id);
int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
u16 queue_id);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 9d2a89d793c0..9ae13cccfb28 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -128,12 +128,15 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{
- u64 chunk;
-
- if (desc->len > pool->chunk_size)
- return false;
+ u64 chunk, chunk_end;
chunk = xp_aligned_extract_addr(pool, desc->addr);
+ if (likely(desc->len)) {
+ chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
+ if (chunk != chunk_end)
+ return false;
+ }
+
if (chunk >= pool->addrs_cnt)
return false;
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 67b4ce504852..2e48d0e094d9 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -12,7 +12,7 @@
#include "xsk.h"
static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
- struct xdp_sock **map_entry)
+ struct xdp_sock __rcu **map_entry)
{
struct xsk_map_node *node;
@@ -42,7 +42,7 @@ static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
}
static void xsk_map_sock_delete(struct xdp_sock *xs,
- struct xdp_sock **map_entry)
+ struct xdp_sock __rcu **map_entry)
{
struct xsk_map_node *n, *tmp;
@@ -124,6 +124,10 @@ static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
return insn - insn_buf;
}
+/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
+ * by local_bh_disable() (from XDP calls inside NAPI). The
+ * rcu_read_lock_bh_held() below makes lockdep accept both.
+ */
static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
@@ -131,12 +135,11 @@ static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
if (key >= map->max_entries)
return NULL;
- return READ_ONCE(m->xsk_map[key]);
+ return rcu_dereference_check(m->xsk_map[key], rcu_read_lock_bh_held());
}
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
return __xsk_map_lookup_elem(map, *(u32 *)key);
}
@@ -149,7 +152,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct xdp_sock *xs, *old_xs, **map_entry;
+ struct xdp_sock __rcu **map_entry;
+ struct xdp_sock *xs, *old_xs;
u32 i = *(u32 *)key, fd = *(u32 *)value;
struct xsk_map_node *node;
struct socket *sock;
@@ -179,7 +183,7 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
}
spin_lock_bh(&m->lock);
- old_xs = READ_ONCE(*map_entry);
+ old_xs = rcu_dereference_protected(*map_entry, lockdep_is_held(&m->lock));
if (old_xs == xs) {
err = 0;
goto out;
@@ -191,7 +195,7 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
goto out;
}
xsk_map_sock_add(xs, node);
- WRITE_ONCE(*map_entry, xs);
+ rcu_assign_pointer(*map_entry, xs);
if (old_xs)
xsk_map_sock_delete(old_xs, map_entry);
spin_unlock_bh(&m->lock);
@@ -208,7 +212,8 @@ out:
static int xsk_map_delete_elem(struct bpf_map *map, void *key)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct xdp_sock *old_xs, **map_entry;
+ struct xdp_sock __rcu **map_entry;
+ struct xdp_sock *old_xs;
int k = *(u32 *)key;
if (k >= map->max_entries)
@@ -216,7 +221,7 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
spin_lock_bh(&m->lock);
map_entry = &m->xsk_map[k];
- old_xs = xchg(map_entry, NULL);
+ old_xs = unrcu_pointer(xchg(map_entry, NULL));
if (old_xs)
xsk_map_sock_delete(old_xs, map_entry);
spin_unlock_bh(&m->lock);
@@ -226,15 +231,16 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
static int xsk_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
{
- return __bpf_xdp_redirect_map(map, ifindex, flags, __xsk_map_lookup_elem);
+ return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
+ __xsk_map_lookup_elem);
}
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
- struct xdp_sock **map_entry)
+ struct xdp_sock __rcu **map_entry)
{
spin_lock_bh(&map->lock);
- if (READ_ONCE(*map_entry) == xs) {
- WRITE_ONCE(*map_entry, NULL);
+ if (rcu_access_pointer(*map_entry) == xs) {
+ rcu_assign_pointer(*map_entry, NULL);
xsk_map_sock_delete(xs, map_entry);
}
spin_unlock_bh(&map->lock);
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 6d6917b68856..e843b0d9e2a6 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -268,6 +268,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
xso->num_exthdrs = 0;
xso->flags = 0;
xso->dev = NULL;
+ xso->real_dev = NULL;
dev_put(dev);
if (err != -EOPNOTSUPP)
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index ce66323102f9..d12bb906c9c9 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -131,6 +131,13 @@ __xfrm_spi_hash(const xfrm_address_t *daddr, __be32 spi, u8 proto,
return (h ^ (h >> 10) ^ (h >> 20)) & hmask;
}
+static inline unsigned int
+__xfrm_seq_hash(u32 seq, unsigned int hmask)
+{
+ unsigned int h = seq;
+ return (h ^ (h >> 10) ^ (h >> 20)) & hmask;
+}
+
static inline unsigned int __idx_hash(u32 index, unsigned int hmask)
{
return (index ^ (index >> 8)) & hmask;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 1158cd0311d7..3df0861d4390 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -612,7 +612,7 @@ lock:
goto drop_unlock;
}
- if (x->repl->check(x, skb, seq)) {
+ if (xfrm_replay_check(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
@@ -660,12 +660,12 @@ resume:
/* only the first xfrm gets the encap type */
encap_type = 0;
- if (x->repl->recheck(x, skb, seq)) {
+ if (xfrm_replay_recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
- x->repl->advance(x, seq);
+ xfrm_replay_advance(x, seq);
x->curlft.bytes += skb->len;
x->curlft.packets++;
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 8831f5a9e992..41de46b5ffa9 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -432,6 +432,7 @@ static int xfrmi4_err(struct sk_buff *skb, u32 info)
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return 0;
+ break;
case ICMP_REDIRECT:
break;
default:
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index e4cb0ff4dcf4..229544bc70c2 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -77,6 +77,83 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
+static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type)
+{
+ const unsigned char *nh = skb_network_header(skb);
+ unsigned int offset = sizeof(struct ipv6hdr);
+ unsigned int packet_len;
+ int found_rhdr = 0;
+
+ packet_len = skb_tail_pointer(skb) - nh;
+ *nexthdr = &ipv6_hdr(skb)->nexthdr;
+
+ while (offset <= packet_len) {
+ struct ipv6_opt_hdr *exthdr;
+
+ switch (**nexthdr) {
+ case NEXTHDR_HOP:
+ break;
+ case NEXTHDR_ROUTING:
+ if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) {
+ struct ipv6_rt_hdr *rt;
+
+ rt = (struct ipv6_rt_hdr *)(nh + offset);
+ if (rt->type != 0)
+ return offset;
+ }
+ found_rhdr = 1;
+ break;
+ case NEXTHDR_DEST:
+ /* HAO MUST NOT appear more than once.
+ * XXX: It is better to try to find by the end of
+ * XXX: packet if HAO exists.
+ */
+ if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
+ net_dbg_ratelimited("mip6: hao exists already, override\n");
+ return offset;
+ }
+
+ if (found_rhdr)
+ return offset;
+
+ break;
+ default:
+ return offset;
+ }
+
+ if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+ return -EINVAL;
+
+ exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+ offset);
+ offset += ipv6_optlen(exthdr);
+ if (offset > IPV6_MAXPLEN)
+ return -EINVAL;
+ *nexthdr = &exthdr->nexthdr;
+ }
+
+ return -EINVAL;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
+{
+ switch (x->type->proto) {
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_ROUTING:
+ return mip6_rthdr_offset(skb, prevhdr, x->type->proto);
+#endif
+ default:
+ break;
+ }
+
+ return ip6_find_1stfragopt(skb, prevhdr);
+}
+#endif
+
/* Add encapsulation header.
*
* The IP header and mutable extension headers will be moved forward to make
@@ -92,7 +169,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
iph = ipv6_hdr(skb);
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
- hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+ hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
if (hdr_len < 0)
return hdr_len;
skb_set_mac_header(skb,
@@ -122,7 +199,7 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
iph = ipv6_hdr(skb);
- hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+ hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
if (hdr_len < 0)
return hdr_len;
skb_set_mac_header(skb,
@@ -448,7 +525,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
goto error;
}
- err = x->repl->overflow(x, skb);
+ err = xfrm_replay_overflow(x, skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
goto error;
@@ -565,6 +642,42 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
return 0;
}
+/* For partial checksum offload, the outer header checksum is calculated
+ * by software and the inner header checksum is calculated by hardware.
+ * This requires hardware to know the inner packet type to calculate
+ * the inner header checksum. Save inner ip protocol here to avoid
+ * traversing the packet in the vendor's xmit code.
+ * If the encap type is IPIP, just save skb->inner_ipproto. Otherwise,
+ * get the ip protocol from the IP header.
+ */
+static void xfrm_get_inner_ipproto(struct sk_buff *skb)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ const struct ethhdr *eth;
+
+ if (!xo)
+ return;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
+ xo->inner_ipproto = skb->inner_ipproto;
+ return;
+ }
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return;
+
+ eth = (struct ethhdr *)skb_inner_mac_header(skb);
+
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IPV6:
+ xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
+ break;
+ case ETH_P_IP:
+ xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
+ break;
+ }
+}
+
int xfrm_output(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -594,12 +707,15 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
return -ENOMEM;
}
- skb->encapsulation = 1;
sp->olen++;
sp->xvec[sp->len++] = x;
xfrm_state_hold(x);
+ if (skb->encapsulation)
+ xfrm_get_inner_ipproto(skb);
+ skb->encapsulation = 1;
+
if (skb_is_gso(skb)) {
if (skb->inner_protocol)
return xfrm_output_gso(net, sk, skb);
@@ -711,15 +827,8 @@ out:
static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
- unsigned int ptr = 0;
int err;
- if (x->outer_mode.encap == XFRM_MODE_BEET &&
- ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
- net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
- return -EAFNOSUPPORT;
- }
-
err = xfrm6_tunnel_check_size(skb);
if (err)
return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ce500f847b99..827d84255021 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1902,8 +1902,7 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
match = xfrm_selector_match(sel, fl, family);
if (match)
- ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
- dir);
+ ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
return ret;
}
@@ -2092,12 +2091,15 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
if (unlikely(!daddr || !saddr))
return NULL;
- rcu_read_lock();
retry:
- do {
- sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
- chain = policy_hash_direct(net, daddr, saddr, family, dir);
- } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
+ sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
+ rcu_read_lock();
+
+ chain = policy_hash_direct(net, daddr, saddr, family, dir);
+ if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
+ rcu_read_unlock();
+ goto retry;
+ }
ret = NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
@@ -2128,11 +2130,15 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
skip_inexact:
- if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
+ if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
+ rcu_read_unlock();
goto retry;
+ }
- if (ret && !xfrm_pol_hold_rcu(ret))
+ if (ret && !xfrm_pol_hold_rcu(ret)) {
+ rcu_read_unlock();
goto retry;
+ }
fail:
rcu_read_unlock();
@@ -2181,8 +2187,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
goto out;
}
err = security_xfrm_policy_lookup(pol->security,
- fl->flowi_secid,
- dir);
+ fl->flowi_secid);
if (!err) {
if (!xfrm_pol_hold_rcu(pol))
goto again;
@@ -3247,7 +3252,7 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
/*
* 0 or more than 0 is returned when validation is succeeded (either bypass
- * because of optional transport mode, or next index of the mathced secpath
+ * because of optional transport mode, or next index of the matched secpath
* state with the template.
* -1 is returned when no matching template is found.
* Otherwise "-2 - errored_index" is returned.
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index c6a4338a0d08..9277d81b344c 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -34,8 +34,11 @@ u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
return seq_hi;
}
EXPORT_SYMBOL(xfrm_replay_seqhi);
-;
-static void xfrm_replay_notify(struct xfrm_state *x, int event)
+
+static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event);
+static void xfrm_replay_notify_esn(struct xfrm_state *x, int event);
+
+void xfrm_replay_notify(struct xfrm_state *x, int event)
{
struct km_event c;
/* we send notify messages in case
@@ -48,6 +51,17 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
* The state structure must be locked!
*/
+ switch (x->repl_mode) {
+ case XFRM_REPLAY_MODE_LEGACY:
+ break;
+ case XFRM_REPLAY_MODE_BMP:
+ xfrm_replay_notify_bmp(x, event);
+ return;
+ case XFRM_REPLAY_MODE_ESN:
+ xfrm_replay_notify_esn(x, event);
+ return;
+ }
+
switch (event) {
case XFRM_REPLAY_UPDATE:
if (!x->replay_maxdiff ||
@@ -81,7 +95,7 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
x->xflags &= ~XFRM_TIME_DEFER;
}
-static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
+static int __xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct net *net = xs_net(x);
@@ -98,14 +112,14 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
return err;
}
if (xfrm_aevent_is_on(net))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
-static int xfrm_replay_check(struct xfrm_state *x,
- struct sk_buff *skb, __be32 net_seq)
+static int xfrm_replay_check_legacy(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
{
u32 diff;
u32 seq = ntohl(net_seq);
@@ -136,14 +150,26 @@ err:
return -EINVAL;
}
-static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
+static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq);
+static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq);
+
+void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
{
- u32 diff;
- u32 seq = ntohl(net_seq);
+ u32 diff, seq;
+
+ switch (x->repl_mode) {
+ case XFRM_REPLAY_MODE_LEGACY:
+ break;
+ case XFRM_REPLAY_MODE_BMP:
+ return xfrm_replay_advance_bmp(x, net_seq);
+ case XFRM_REPLAY_MODE_ESN:
+ return xfrm_replay_advance_esn(x, net_seq);
+ }
if (!x->props.replay_window)
return;
+ seq = ntohl(net_seq);
if (seq > x->replay.seq) {
diff = seq - x->replay.seq;
if (diff < x->props.replay_window)
@@ -157,7 +183,7 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
}
if (xfrm_aevent_is_on(xs_net(x)))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
@@ -178,7 +204,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
return err;
}
if (xfrm_aevent_is_on(net))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
@@ -273,7 +299,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
replay_esn->bmp[nr] |= (1U << bitnr);
if (xfrm_aevent_is_on(xs_net(x)))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
@@ -416,7 +442,7 @@ static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
}
}
if (xfrm_aevent_is_on(net))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
@@ -481,6 +507,21 @@ err:
return -EINVAL;
}
+int xfrm_replay_check(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+ switch (x->repl_mode) {
+ case XFRM_REPLAY_MODE_LEGACY:
+ break;
+ case XFRM_REPLAY_MODE_BMP:
+ return xfrm_replay_check_bmp(x, skb, net_seq);
+ case XFRM_REPLAY_MODE_ESN:
+ return xfrm_replay_check_esn(x, skb, net_seq);
+ }
+
+ return xfrm_replay_check_legacy(x, skb, net_seq);
+}
+
static int xfrm_replay_recheck_esn(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
@@ -493,6 +534,22 @@ static int xfrm_replay_recheck_esn(struct xfrm_state *x,
return xfrm_replay_check_esn(x, skb, net_seq);
}
+int xfrm_replay_recheck(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+ switch (x->repl_mode) {
+ case XFRM_REPLAY_MODE_LEGACY:
+ break;
+ case XFRM_REPLAY_MODE_BMP:
+ /* no special recheck treatment */
+ return xfrm_replay_check_bmp(x, skb, net_seq);
+ case XFRM_REPLAY_MODE_ESN:
+ return xfrm_replay_recheck_esn(x, skb, net_seq);
+ }
+
+ return xfrm_replay_check_legacy(x, skb, net_seq);
+}
+
static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
{
unsigned int bitnr, nr, i;
@@ -548,7 +605,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
replay_esn->bmp[nr] |= (1U << bitnr);
if (xfrm_aevent_is_on(xs_net(x)))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
#ifdef CONFIG_XFRM_OFFLOAD
@@ -560,7 +617,7 @@ static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *sk
__u32 oseq = x->replay.oseq;
if (!xo)
- return xfrm_replay_overflow(x, skb);
+ return __xfrm_replay_overflow(x, skb);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
if (!skb_is_gso(skb)) {
@@ -585,7 +642,7 @@ static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *sk
x->replay.oseq = oseq;
if (xfrm_aevent_is_on(net))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
@@ -625,7 +682,7 @@ static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff
}
if (xfrm_aevent_is_on(net))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
@@ -674,59 +731,39 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
replay_esn->oseq = oseq;
if (xfrm_aevent_is_on(net))
- x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
-static const struct xfrm_replay xfrm_replay_legacy = {
- .advance = xfrm_replay_advance,
- .check = xfrm_replay_check,
- .recheck = xfrm_replay_check,
- .notify = xfrm_replay_notify,
- .overflow = xfrm_replay_overflow_offload,
-};
-
-static const struct xfrm_replay xfrm_replay_bmp = {
- .advance = xfrm_replay_advance_bmp,
- .check = xfrm_replay_check_bmp,
- .recheck = xfrm_replay_check_bmp,
- .notify = xfrm_replay_notify_bmp,
- .overflow = xfrm_replay_overflow_offload_bmp,
-};
-
-static const struct xfrm_replay xfrm_replay_esn = {
- .advance = xfrm_replay_advance_esn,
- .check = xfrm_replay_check_esn,
- .recheck = xfrm_replay_recheck_esn,
- .notify = xfrm_replay_notify_esn,
- .overflow = xfrm_replay_overflow_offload_esn,
-};
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
+{
+ switch (x->repl_mode) {
+ case XFRM_REPLAY_MODE_LEGACY:
+ break;
+ case XFRM_REPLAY_MODE_BMP:
+ return xfrm_replay_overflow_offload_bmp(x, skb);
+ case XFRM_REPLAY_MODE_ESN:
+ return xfrm_replay_overflow_offload_esn(x, skb);
+ }
+
+ return xfrm_replay_overflow_offload(x, skb);
+}
#else
-static const struct xfrm_replay xfrm_replay_legacy = {
- .advance = xfrm_replay_advance,
- .check = xfrm_replay_check,
- .recheck = xfrm_replay_check,
- .notify = xfrm_replay_notify,
- .overflow = xfrm_replay_overflow,
-};
-
-static const struct xfrm_replay xfrm_replay_bmp = {
- .advance = xfrm_replay_advance_bmp,
- .check = xfrm_replay_check_bmp,
- .recheck = xfrm_replay_check_bmp,
- .notify = xfrm_replay_notify_bmp,
- .overflow = xfrm_replay_overflow_bmp,
-};
-
-static const struct xfrm_replay xfrm_replay_esn = {
- .advance = xfrm_replay_advance_esn,
- .check = xfrm_replay_check_esn,
- .recheck = xfrm_replay_recheck_esn,
- .notify = xfrm_replay_notify_esn,
- .overflow = xfrm_replay_overflow_esn,
-};
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
+{
+ switch (x->repl_mode) {
+ case XFRM_REPLAY_MODE_LEGACY:
+ break;
+ case XFRM_REPLAY_MODE_BMP:
+ return xfrm_replay_overflow_bmp(x, skb);
+ case XFRM_REPLAY_MODE_ESN:
+ return xfrm_replay_overflow_esn(x, skb);
+ }
+
+ return __xfrm_replay_overflow(x, skb);
+}
#endif
int xfrm_init_replay(struct xfrm_state *x)
@@ -741,12 +778,12 @@ int xfrm_init_replay(struct xfrm_state *x)
if (x->props.flags & XFRM_STATE_ESN) {
if (replay_esn->replay_window == 0)
return -EINVAL;
- x->repl = &xfrm_replay_esn;
+ x->repl_mode = XFRM_REPLAY_MODE_ESN;
} else {
- x->repl = &xfrm_replay_bmp;
+ x->repl_mode = XFRM_REPLAY_MODE_BMP;
}
} else {
- x->repl = &xfrm_replay_legacy;
+ x->repl_mode = XFRM_REPLAY_MODE_LEGACY;
}
return 0;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 4496f7efa220..a2f4001221d1 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -78,10 +78,16 @@ xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
}
+static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
+{
+ return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
+}
+
static void xfrm_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
struct hlist_head *nsrctable,
struct hlist_head *nspitable,
+ struct hlist_head *nseqtable,
unsigned int nhashmask)
{
struct hlist_node *tmp;
@@ -106,6 +112,11 @@ static void xfrm_hash_transfer(struct hlist_head *list,
nhashmask);
hlist_add_head_rcu(&x->byspi, nspitable + h);
}
+
+ if (x->km.seq) {
+ h = __xfrm_seq_hash(x->km.seq, nhashmask);
+ hlist_add_head_rcu(&x->byseq, nseqtable + h);
+ }
}
}
@@ -117,7 +128,7 @@ static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
static void xfrm_hash_resize(struct work_struct *work)
{
struct net *net = container_of(work, struct net, xfrm.state_hash_work);
- struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
+ struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq;
unsigned long nsize, osize;
unsigned int nhashmask, ohashmask;
int i;
@@ -137,6 +148,13 @@ static void xfrm_hash_resize(struct work_struct *work)
xfrm_hash_free(nsrc, nsize);
return;
}
+ nseq = xfrm_hash_alloc(nsize);
+ if (!nseq) {
+ xfrm_hash_free(ndst, nsize);
+ xfrm_hash_free(nsrc, nsize);
+ xfrm_hash_free(nspi, nsize);
+ return;
+ }
spin_lock_bh(&net->xfrm.xfrm_state_lock);
write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
@@ -144,15 +162,17 @@ static void xfrm_hash_resize(struct work_struct *work)
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
for (i = net->xfrm.state_hmask; i >= 0; i--)
- xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
+ xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask);
osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
+ oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net);
ohashmask = net->xfrm.state_hmask;
rcu_assign_pointer(net->xfrm.state_bydst, ndst);
rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
rcu_assign_pointer(net->xfrm.state_byspi, nspi);
+ rcu_assign_pointer(net->xfrm.state_byseq, nseq);
net->xfrm.state_hmask = nhashmask;
write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
@@ -165,6 +185,7 @@ static void xfrm_hash_resize(struct work_struct *work)
xfrm_hash_free(odst, osize);
xfrm_hash_free(osrc, osize);
xfrm_hash_free(ospi, osize);
+ xfrm_hash_free(oseq, osize);
}
static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
@@ -621,6 +642,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
+ INIT_HLIST_NODE(&x->byseq);
hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
x->mtimer.function = xfrm_timer_handler;
timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
@@ -664,6 +686,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
list_del(&x->km.all);
hlist_del_rcu(&x->bydst);
hlist_del_rcu(&x->bysrc);
+ if (x->km.seq)
+ hlist_del_rcu(&x->byseq);
if (x->id.spi)
hlist_del_rcu(&x->byspi);
net->xfrm.state_num--;
@@ -1148,6 +1172,10 @@ found:
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
+ if (x->km.seq) {
+ h = xfrm_seq_hash(net, x->km.seq);
+ hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h);
+ }
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
hrtimer_start(&x->mtimer,
ktime_set(net->xfrm.sysctl_acq_expires, 0),
@@ -1263,6 +1291,12 @@ static void __xfrm_state_insert(struct xfrm_state *x)
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
+ if (x->km.seq) {
+ h = xfrm_seq_hash(net, x->km.seq);
+
+ hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h);
+ }
+
hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
if (x->replay_maxage)
mod_timer(&x->rtimer, jiffies + x->replay_maxage);
@@ -1932,20 +1966,18 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
{
- int i;
-
- for (i = 0; i <= net->xfrm.state_hmask; i++) {
- struct xfrm_state *x;
+ unsigned int h = xfrm_seq_hash(net, seq);
+ struct xfrm_state *x;
- hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
- if (x->km.seq == seq &&
- (mark & x->mark.m) == x->mark.v &&
- x->km.state == XFRM_STATE_ACQ) {
- xfrm_state_hold(x);
- return x;
- }
+ hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) {
+ if (x->km.seq == seq &&
+ (mark & x->mark.m) == x->mark.v &&
+ x->km.state == XFRM_STATE_ACQ) {
+ xfrm_state_hold(x);
+ return x;
}
}
+
return NULL;
}
@@ -2145,7 +2177,7 @@ static void xfrm_replay_timer_handler(struct timer_list *t)
if (x->km.state == XFRM_STATE_VALID) {
if (xfrm_aevent_is_on(xs_net(x)))
- x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
+ xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
else
x->xflags |= XFRM_TIME_DEFER;
}
@@ -2518,7 +2550,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
+u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
const struct xfrm_type *type = READ_ONCE(x->type);
struct crypto_aead *aead;
@@ -2549,7 +2581,17 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
net_adj) & ~(blksize - 1)) + net_adj - 2;
}
-EXPORT_SYMBOL_GPL(xfrm_state_mtu);
+EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
+
+u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
+{
+ mtu = __xfrm_state_mtu(x, mtu);
+
+ if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
+ return IPV6_MIN_MTU;
+
+ return mtu;
+}
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
{
@@ -2660,6 +2702,9 @@ int __net_init xfrm_state_init(struct net *net)
net->xfrm.state_byspi = xfrm_hash_alloc(sz);
if (!net->xfrm.state_byspi)
goto out_byspi;
+ net->xfrm.state_byseq = xfrm_hash_alloc(sz);
+ if (!net->xfrm.state_byseq)
+ goto out_byseq;
net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
net->xfrm.state_num = 0;
@@ -2669,6 +2714,8 @@ int __net_init xfrm_state_init(struct net *net)
&net->xfrm.xfrm_state_lock);
return 0;
+out_byseq:
+ xfrm_hash_free(net->xfrm.state_byspi, sz);
out_byspi:
xfrm_hash_free(net->xfrm.state_bysrc, sz);
out_bysrc:
@@ -2688,6 +2735,8 @@ void xfrm_state_fini(struct net *net)
WARN_ON(!list_empty(&net->xfrm.state_all));
sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
+ WARN_ON(!hlist_empty(net->xfrm.state_byseq));
+ xfrm_hash_free(net->xfrm.state_byseq, sz);
WARN_ON(!hlist_empty(net->xfrm.state_byspi));
xfrm_hash_free(net->xfrm.state_byspi, sz);
WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index f0aecee4d539..b47d613409b7 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -580,6 +580,20 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
copy_from_user_state(x, p);
+ if (attrs[XFRMA_ENCAP]) {
+ x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
+ sizeof(*x->encap), GFP_KERNEL);
+ if (x->encap == NULL)
+ goto error;
+ }
+
+ if (attrs[XFRMA_COADDR]) {
+ x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
+ sizeof(*x->coaddr), GFP_KERNEL);
+ if (x->coaddr == NULL)
+ goto error;
+ }
+
if (attrs[XFRMA_SA_EXTRA_FLAGS])
x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
@@ -600,23 +614,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
attrs[XFRMA_ALG_COMP])))
goto error;
- if (attrs[XFRMA_ENCAP]) {
- x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
- sizeof(*x->encap), GFP_KERNEL);
- if (x->encap == NULL)
- goto error;
- }
-
if (attrs[XFRMA_TFCPAD])
x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
- if (attrs[XFRMA_COADDR]) {
- x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
- sizeof(*x->coaddr), GFP_KERNEL);
- if (x->coaddr == NULL)
- goto error;
- }
-
xfrm_mark_get(attrs, &x->mark);
xfrm_smark_init(attrs, &x->props.smark);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 45ceca4e2c70..520434ea966f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -41,6 +41,7 @@ tprogs-y += test_map_in_map
tprogs-y += per_socket_stats_example
tprogs-y += xdp_redirect
tprogs-y += xdp_redirect_map
+tprogs-y += xdp_redirect_map_multi
tprogs-y += xdp_redirect_cpu
tprogs-y += xdp_monitor
tprogs-y += xdp_rxq_info
@@ -99,6 +100,7 @@ test_map_in_map-objs := test_map_in_map_user.o
per_socket_stats_example-objs := cookie_uid_helper_example.o
xdp_redirect-objs := xdp_redirect_user.o
xdp_redirect_map-objs := xdp_redirect_map_user.o
+xdp_redirect_map_multi-objs := xdp_redirect_map_multi_user.o
xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o
xdp_monitor-objs := xdp_monitor_user.o
xdp_rxq_info-objs := xdp_rxq_info_user.o
@@ -160,6 +162,7 @@ always-y += tcp_tos_reflect_kern.o
always-y += tcp_dumpstats_kern.o
always-y += xdp_redirect_kern.o
always-y += xdp_redirect_map_kern.o
+always-y += xdp_redirect_map_multi_kern.o
always-y += xdp_redirect_cpu_kern.o
always-y += xdp_monitor_kern.o
always-y += xdp_rxq_info_kern.o
diff --git a/samples/bpf/ibumad_kern.c b/samples/bpf/ibumad_kern.c
index 26dcd4dde946..9b193231024a 100644
--- a/samples/bpf/ibumad_kern.c
+++ b/samples/bpf/ibumad_kern.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/**
+/*
* ibumad BPF sample kernel side
*
* This program is free software; you can redistribute it and/or
diff --git a/samples/bpf/ibumad_user.c b/samples/bpf/ibumad_user.c
index d83d8102f489..0746ca516097 100644
--- a/samples/bpf/ibumad_user.c
+++ b/samples/bpf/ibumad_user.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/**
+/*
* ibumad BPF sample user side
*
* This program is free software; you can redistribute it and/or
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index a78025b0026b..c9a0ca8351fd 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -396,7 +396,7 @@ int main(int argc, char **argv)
* on different systems with different compilers. The right way is
* to parse the ELF file. We took a shortcut here.
*/
- uprobe_file_offset = (__u64)main - (__u64)&__executable_start;
+ uprobe_file_offset = (unsigned long)main - (unsigned long)&__executable_start;
CHECK_AND_RET(test_nondebug_fs_probe("uprobe", (char *)argv[0],
uprobe_file_offset, 0x0, false,
BPF_FD_TYPE_UPROBE,
diff --git a/samples/bpf/xdp_fwd_user.c b/samples/bpf/xdp_fwd_user.c
index 74a4583d0d86..00061261a8da 100644
--- a/samples/bpf/xdp_fwd_user.c
+++ b/samples/bpf/xdp_fwd_user.c
@@ -67,6 +67,8 @@ static void usage(const char *prog)
"usage: %s [OPTS] interface-list\n"
"\nOPTS:\n"
" -d detach program\n"
+ " -S use skb-mode\n"
+ " -F force loading prog\n"
" -D direct table lookups (skip fib rules)\n",
prog);
}
diff --git a/samples/bpf/xdp_redirect_map_multi_kern.c b/samples/bpf/xdp_redirect_map_multi_kern.c
new file mode 100644
index 000000000000..71aa23d1cb2b
--- /dev/null
+++ b/samples/bpf/xdp_redirect_map_multi_kern.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 32);
+} forward_map_general SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 32);
+} forward_map_native SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 1);
+} rxcnt SEC(".maps");
+
+/* map to store egress interfaces mac addresses, set the
+ * max_entries to 1 and extend it in user sapce prog.
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, __be64);
+ __uint(max_entries, 1);
+} mac_map SEC(".maps");
+
+static int xdp_redirect_map(struct xdp_md *ctx, void *forward_map)
+{
+ long *value;
+ u32 key = 0;
+
+ /* count packet in global counter */
+ value = bpf_map_lookup_elem(&rxcnt, &key);
+ if (value)
+ *value += 1;
+
+ return bpf_redirect_map(forward_map, key,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
+}
+
+SEC("xdp_redirect_general")
+int xdp_redirect_map_general(struct xdp_md *ctx)
+{
+ return xdp_redirect_map(ctx, &forward_map_general);
+}
+
+SEC("xdp_redirect_native")
+int xdp_redirect_map_native(struct xdp_md *ctx)
+{
+ return xdp_redirect_map(ctx, &forward_map_native);
+}
+
+SEC("xdp_devmap/map_prog")
+int xdp_devmap_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = ctx->egress_ifindex;
+ struct ethhdr *eth = data;
+ __be64 *mac;
+ u64 nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ mac = bpf_map_lookup_elem(&mac_map, &key);
+ if (mac)
+ __builtin_memcpy(eth->h_source, mac, ETH_ALEN);
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_map_multi_user.c b/samples/bpf/xdp_redirect_map_multi_user.c
new file mode 100644
index 000000000000..84cdbbed20b7
--- /dev/null
+++ b/samples/bpf/xdp_redirect_map_multi_user.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <net/if.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <sys/resource.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include "bpf_util.h"
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#define MAX_IFACE_NUM 32
+
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static int ifaces[MAX_IFACE_NUM] = {};
+static int rxcnt_map_fd;
+
+static void int_exit(int sig)
+{
+ __u32 prog_id = 0;
+ int i;
+
+ for (i = 0; ifaces[i] > 0; i++) {
+ if (bpf_get_link_xdp_id(ifaces[i], &prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (prog_id)
+ bpf_set_link_xdp_fd(ifaces[i], -1, xdp_flags);
+ }
+
+ exit(0);
+}
+
+static void poll_stats(int interval)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ __u64 values[nr_cpus], prev[nr_cpus];
+
+ memset(prev, 0, sizeof(prev));
+
+ while (1) {
+ __u64 sum = 0;
+ __u32 key = 0;
+ int i;
+
+ sleep(interval);
+ assert(bpf_map_lookup_elem(rxcnt_map_fd, &key, values) == 0);
+ for (i = 0; i < nr_cpus; i++)
+ sum += (values[i] - prev[i]);
+ if (sum)
+ printf("Forwarding %10llu pkt/s\n", sum / interval);
+ memcpy(prev, values, sizeof(values));
+ }
+}
+
+static int get_mac_addr(unsigned int ifindex, void *mac_addr)
+{
+ char ifname[IF_NAMESIZE];
+ struct ifreq ifr;
+ int fd, ret = -1;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return ret;
+
+ if (!if_indextoname(ifindex, ifname))
+ goto err_out;
+
+ strcpy(ifr.ifr_name, ifname);
+
+ if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0)
+ goto err_out;
+
+ memcpy(mac_addr, ifr.ifr_hwaddr.sa_data, 6 * sizeof(char));
+ ret = 0;
+
+err_out:
+ close(fd);
+ return ret;
+}
+
+static int update_mac_map(struct bpf_object *obj)
+{
+ int i, ret = -1, mac_map_fd;
+ unsigned char mac_addr[6];
+ unsigned int ifindex;
+
+ mac_map_fd = bpf_object__find_map_fd_by_name(obj, "mac_map");
+ if (mac_map_fd < 0) {
+ printf("find mac map fd failed\n");
+ return ret;
+ }
+
+ for (i = 0; ifaces[i] > 0; i++) {
+ ifindex = ifaces[i];
+
+ ret = get_mac_addr(ifindex, mac_addr);
+ if (ret < 0) {
+ printf("get interface %d mac failed\n", ifindex);
+ return ret;
+ }
+
+ ret = bpf_map_update_elem(mac_map_fd, &ifindex, mac_addr, 0);
+ if (ret) {
+ perror("bpf_update_elem mac_map_fd");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void usage(const char *prog)
+{
+ fprintf(stderr,
+ "usage: %s [OPTS] <IFNAME|IFINDEX> <IFNAME|IFINDEX> ...\n"
+ "OPTS:\n"
+ " -S use skb-mode\n"
+ " -N enforce native mode\n"
+ " -F force loading prog\n"
+ " -X load xdp program on egress\n",
+ prog);
+}
+
+int main(int argc, char **argv)
+{
+ int i, ret, opt, forward_map_fd, max_ifindex = 0;
+ struct bpf_program *ingress_prog, *egress_prog;
+ int ingress_prog_fd, egress_prog_fd = 0;
+ struct bpf_devmap_val devmap_val;
+ bool attach_egress_prog = false;
+ char ifname[IF_NAMESIZE];
+ struct bpf_map *mac_map;
+ struct bpf_object *obj;
+ unsigned int ifindex;
+ char filename[256];
+
+ while ((opt = getopt(argc, argv, "SNFX")) != -1) {
+ switch (opt) {
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case 'N':
+ /* default, set below */
+ break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ case 'X':
+ attach_egress_prog = true;
+ break;
+ default:
+ usage(basename(argv[0]));
+ return 1;
+ }
+ }
+
+ if (!(xdp_flags & XDP_FLAGS_SKB_MODE)) {
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+ } else if (attach_egress_prog) {
+ printf("Load xdp program on egress with SKB mode not supported yet\n");
+ return 1;
+ }
+
+ if (optind == argc) {
+ printf("usage: %s <IFNAME|IFINDEX> <IFNAME|IFINDEX> ...\n", argv[0]);
+ return 1;
+ }
+
+ printf("Get interfaces");
+ for (i = 0; i < MAX_IFACE_NUM && argv[optind + i]; i++) {
+ ifaces[i] = if_nametoindex(argv[optind + i]);
+ if (!ifaces[i])
+ ifaces[i] = strtoul(argv[optind + i], NULL, 0);
+ if (!if_indextoname(ifaces[i], ifname)) {
+ perror("Invalid interface name or i");
+ return 1;
+ }
+
+ /* Find the largest index number */
+ if (ifaces[i] > max_ifindex)
+ max_ifindex = ifaces[i];
+
+ printf(" %d", ifaces[i]);
+ }
+ printf("\n");
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ obj = bpf_object__open(filename);
+ if (libbpf_get_error(obj)) {
+ printf("ERROR: opening BPF object file failed\n");
+ obj = NULL;
+ goto err_out;
+ }
+
+ /* Reset the map size to max ifindex + 1 */
+ if (attach_egress_prog) {
+ mac_map = bpf_object__find_map_by_name(obj, "mac_map");
+ ret = bpf_map__resize(mac_map, max_ifindex + 1);
+ if (ret < 0) {
+ printf("ERROR: reset mac map size failed\n");
+ goto err_out;
+ }
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ printf("ERROR: loading BPF object file failed\n");
+ goto err_out;
+ }
+
+ if (xdp_flags & XDP_FLAGS_SKB_MODE) {
+ ingress_prog = bpf_object__find_program_by_name(obj, "xdp_redirect_map_general");
+ forward_map_fd = bpf_object__find_map_fd_by_name(obj, "forward_map_general");
+ } else {
+ ingress_prog = bpf_object__find_program_by_name(obj, "xdp_redirect_map_native");
+ forward_map_fd = bpf_object__find_map_fd_by_name(obj, "forward_map_native");
+ }
+ if (!ingress_prog || forward_map_fd < 0) {
+ printf("finding ingress_prog/forward_map in obj file failed\n");
+ goto err_out;
+ }
+
+ ingress_prog_fd = bpf_program__fd(ingress_prog);
+ if (ingress_prog_fd < 0) {
+ printf("find ingress_prog fd failed\n");
+ goto err_out;
+ }
+
+ rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
+ if (rxcnt_map_fd < 0) {
+ printf("bpf_object__find_map_fd_by_name failed\n");
+ goto err_out;
+ }
+
+ if (attach_egress_prog) {
+ /* Update mac_map with all egress interfaces' mac addr */
+ if (update_mac_map(obj) < 0) {
+ printf("Error: update mac map failed");
+ goto err_out;
+ }
+
+ /* Find egress prog fd */
+ egress_prog = bpf_object__find_program_by_name(obj, "xdp_devmap_prog");
+ if (!egress_prog) {
+ printf("finding egress_prog in obj file failed\n");
+ goto err_out;
+ }
+ egress_prog_fd = bpf_program__fd(egress_prog);
+ if (egress_prog_fd < 0) {
+ printf("find egress_prog fd failed\n");
+ goto err_out;
+ }
+ }
+
+ /* Remove attached program when program is interrupted or killed */
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ /* Init forward multicast groups */
+ for (i = 0; ifaces[i] > 0; i++) {
+ ifindex = ifaces[i];
+
+ /* bind prog_fd to each interface */
+ ret = bpf_set_link_xdp_fd(ifindex, ingress_prog_fd, xdp_flags);
+ if (ret) {
+ printf("Set xdp fd failed on %d\n", ifindex);
+ goto err_out;
+ }
+
+ /* Add all the interfaces to forward group and attach
+ * egress devmap programe if exist
+ */
+ devmap_val.ifindex = ifindex;
+ devmap_val.bpf_prog.fd = egress_prog_fd;
+ ret = bpf_map_update_elem(forward_map_fd, &ifindex, &devmap_val, 0);
+ if (ret) {
+ perror("bpf_map_update_elem forward_map");
+ goto err_out;
+ }
+ }
+
+ poll_stats(2);
+
+ return 0;
+
+err_out:
+ return 1;
+}
diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
index 41d705c3a1f7..93854e135134 100644
--- a/samples/bpf/xdp_redirect_user.c
+++ b/samples/bpf/xdp_redirect_user.c
@@ -130,7 +130,7 @@ int main(int argc, char **argv)
if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
xdp_flags |= XDP_FLAGS_DRV_MODE;
- if (optind == argc) {
+ if (optind + 2 != argc) {
printf("usage: %s <IFNAME|IFINDEX>_IN <IFNAME|IFINDEX>_OUT\n", argv[0]);
return 1;
}
@@ -213,5 +213,5 @@ int main(int argc, char **argv)
poll_stats(2, ifindex_out);
out:
- return 0;
+ return ret;
}
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
index 706475e004cb..495e09897bd3 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -103,7 +103,8 @@ static void usage(const char *prog)
fprintf(stderr,
"%s: %s [OPTS] <ifname|ifindex>\n\n"
"OPTS:\n"
- " -F force loading prog\n",
+ " -F force loading prog\n"
+ " -S use skb-mode\n",
__func__, prog);
}
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index aa696854be78..53e300f860bb 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -1255,7 +1255,7 @@ static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
for (i = 0; i < batch_size; i++) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
idx + i);
- tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
+ tx_desc->addr = (*frame_nb + i) * opt_xsk_frame_size;
tx_desc->len = PKT_SIZE;
}
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index c495664c0a9b..f991a66b5b02 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -10,6 +10,8 @@
* whenever kernel_clone() is invoked to create a new process.
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kprobes.h>
@@ -27,32 +29,31 @@ static struct kprobe kp = {
static int __kprobes handler_pre(struct kprobe *p, struct pt_regs *regs)
{
#ifdef CONFIG_X86
- pr_info("<%s> pre_handler: p->addr = 0x%p, ip = %lx, flags = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, ip = %lx, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->ip, regs->flags);
#endif
#ifdef CONFIG_PPC
- pr_info("<%s> pre_handler: p->addr = 0x%p, nip = 0x%lx, msr = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, nip = 0x%lx, msr = 0x%lx\n",
p->symbol_name, p->addr, regs->nip, regs->msr);
#endif
#ifdef CONFIG_MIPS
- pr_info("<%s> pre_handler: p->addr = 0x%p, epc = 0x%lx, status = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, epc = 0x%lx, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status);
#endif
#ifdef CONFIG_ARM64
- pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx,"
- " pstate = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, pstate = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->pc, (long)regs->pstate);
#endif
#ifdef CONFIG_ARM
- pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx, cpsr = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, cpsr = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->ARM_pc, (long)regs->ARM_cpsr);
#endif
#ifdef CONFIG_RISCV
- pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx, status = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, status = 0x%lx\n",
p->symbol_name, p->addr, regs->epc, regs->status);
#endif
#ifdef CONFIG_S390
- pr_info("<%s> pre_handler: p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n",
+ pr_info("<%s> p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->psw.addr, regs->flags);
#endif
@@ -65,55 +66,40 @@ static void __kprobes handler_post(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
#ifdef CONFIG_X86
- pr_info("<%s> post_handler: p->addr = 0x%p, flags = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->flags);
#endif
#ifdef CONFIG_PPC
- pr_info("<%s> post_handler: p->addr = 0x%p, msr = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, msr = 0x%lx\n",
p->symbol_name, p->addr, regs->msr);
#endif
#ifdef CONFIG_MIPS
- pr_info("<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_status);
#endif
#ifdef CONFIG_ARM64
- pr_info("<%s> post_handler: p->addr = 0x%p, pstate = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, pstate = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->pstate);
#endif
#ifdef CONFIG_ARM
- pr_info("<%s> post_handler: p->addr = 0x%p, cpsr = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, cpsr = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->ARM_cpsr);
#endif
#ifdef CONFIG_RISCV
- pr_info("<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n",
+ pr_info("<%s> p->addr = 0x%p, status = 0x%lx\n",
p->symbol_name, p->addr, regs->status);
#endif
#ifdef CONFIG_S390
- pr_info("<%s> pre_handler: p->addr, 0x%p, flags = 0x%lx\n",
+ pr_info("<%s> p->addr, 0x%p, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->flags);
#endif
}
-/*
- * fault_handler: this is called if an exception is generated for any
- * instruction within the pre- or post-handler, or when Kprobes
- * single-steps the probed instruction.
- */
-static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
-{
- pr_info("fault_handler: p->addr = 0x%p, trap #%dn", p->addr, trapnr);
- /* Return 0 because we don't handle the fault. */
- return 0;
-}
-/* NOKPROBE_SYMBOL() is also available */
-NOKPROBE_SYMBOL(handler_fault);
-
static int __init kprobe_init(void)
{
int ret;
kp.pre_handler = handler_pre;
kp.post_handler = handler_post;
- kp.fault_handler = handler_fault;
ret = register_kprobe(&kp);
if (ret < 0) {
diff --git a/samples/pktgen/parameters.sh b/samples/pktgen/parameters.sh
index b4c1b371e4b8..81906f199454 100644
--- a/samples/pktgen/parameters.sh
+++ b/samples/pktgen/parameters.sh
@@ -11,6 +11,7 @@ function usage() {
echo " -d : (\$DEST_IP) destination IP. CIDR (e.g. 198.18.0.0/15) is also allowed"
echo " -m : (\$DST_MAC) destination MAC-addr"
echo " -p : (\$DST_PORT) destination PORT range (e.g. 433-444) is also allowed"
+ echo " -k : (\$UDP_CSUM) enable UDP tx checksum"
echo " -t : (\$THREADS) threads to start"
echo " -f : (\$F_THREAD) index of first thread (zero indexed CPU number)"
echo " -c : (\$SKB_CLONE) SKB clones send before alloc new SKB"
@@ -26,7 +27,7 @@ function usage() {
## --- Parse command line arguments / parameters ---
## echo "Commandline options:"
-while getopts "s:i:d:m:p:f:t:c:n:b:w:vxh6a" option; do
+while getopts "s:i:d:m:p:f:t:c:n:b:w:vxh6ak" option; do
case $option in
i) # interface
export DEV=$OPTARG
@@ -88,6 +89,10 @@ while getopts "s:i:d:m:p:f:t:c:n:b:w:vxh6a" option; do
export APPEND=yes
info "Append mode: APPEND=$APPEND"
;;
+ k)
+ export UDP_CSUM=yes
+ info "UDP tx checksum: UDP_CSUM=$UDP_CSUM"
+ ;;
h|?|*)
usage;
err 2 "[ERROR] Unknown parameters!!!"
diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh
index a09f3422fbcc..246cfe02bb82 100755
--- a/samples/pktgen/pktgen_sample01_simple.sh
+++ b/samples/pktgen/pktgen_sample01_simple.sh
@@ -72,6 +72,8 @@ if [ -n "$DST_PORT" ]; then
pg_set $DEV "udp_dst_max $UDP_DST_MAX"
fi
+[ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
# Setup random UDP port src range
pg_set $DEV "flag UDPSRC_RND"
pg_set $DEV "udp_src_min $UDP_SRC_MIN"
diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh
index acae8ede0d6c..c6af3d9d5171 100755
--- a/samples/pktgen/pktgen_sample02_multiqueue.sh
+++ b/samples/pktgen/pktgen_sample02_multiqueue.sh
@@ -75,6 +75,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
# Setup random UDP port src range
pg_set $dev "flag UDPSRC_RND"
pg_set $dev "udp_src_min $UDP_SRC_MIN"
diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
index 5adcf954de73..ab87de440277 100755
--- a/samples/pktgen/pktgen_sample03_burst_single_flow.sh
+++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
@@ -73,6 +73,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
# Setup burst, for easy testing -b 0 disable bursting
# (internally in pktgen default and minimum burst=1)
if [[ ${BURST} -ne 0 ]]; then
diff --git a/samples/pktgen/pktgen_sample04_many_flows.sh b/samples/pktgen/pktgen_sample04_many_flows.sh
index ddce876635aa..56c5f5af350f 100755
--- a/samples/pktgen/pktgen_sample04_many_flows.sh
+++ b/samples/pktgen/pktgen_sample04_many_flows.sh
@@ -72,6 +72,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
# Randomize source IP-addresses
pg_set $dev "flag IPSRC_RND"
pg_set $dev "src_min $SRC_MIN"
diff --git a/samples/pktgen/pktgen_sample05_flow_per_thread.sh b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
index 4a65fe2fcee9..6e0effabca59 100755
--- a/samples/pktgen/pktgen_sample05_flow_per_thread.sh
+++ b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
@@ -62,6 +62,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
# Setup source IP-addresses based on thread number
pg_set $dev "src_min 198.18.$((thread+1)).1"
pg_set $dev "src_max 198.18.$((thread+1)).1"
diff --git a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
index 10f1da571f40..7c27923083a6 100755
--- a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
+++ b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
@@ -92,6 +92,8 @@ for ((i = 0; i < $THREADS; i++)); do
pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
# Setup random UDP port src range
pg_set $dev "flag UDPSRC_RND"
pg_set $dev "udp_src_min $UDP_SRC_MIN"
diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
index 21dbf63d6e41..9ec93d90e8a5 100644
--- a/samples/vfio-mdev/mdpy-fb.c
+++ b/samples/vfio-mdev/mdpy-fb.c
@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
if (format != DRM_FORMAT_XRGB8888) {
pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
format, DRM_FORMAT_XRGB8888);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_release_regions;
}
if (width < 100 || width > 10000) {
pci_err(pdev, "width (%d) out of range\n", width);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_release_regions;
}
if (height < 100 || height > 10000) {
pci_err(pdev, "height (%d) out of range\n", height);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_release_regions;
}
pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
width, height);
info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
- if (!info)
+ if (!info) {
+ ret = -ENOMEM;
goto err_release_regions;
+ }
pci_set_drvdata(pdev, info);
par = info->par;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 949f723efe53..34d257653fb4 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -268,7 +268,8 @@ define rule_as_o_S
endef
# Built-in and composite module parts
-$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
+.SECONDEXPANSION:
+$(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE
$(call if_changed_rule,cc_o_c)
$(call cmd,force_checksrc)
@@ -349,7 +350,7 @@ cmd_modversions_S = \
fi
endif
-$(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE
+$(obj)/%.o: $(src)/%.S $$(objtool_dep) FORCE
$(call if_changed_rule,as_o_S)
targets += $(filter-out $(subdir-builtin), $(real-obj-y))
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 3d791908ed36..801c415bac59 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -50,6 +50,7 @@ endif
CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
$(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
$(call cc-param,hwasan-use-short-granules=0) \
+ $(call cc-param,hwasan-inline-all-checks=0) \
$(instrumentation_flags)
endif # CONFIG_KASAN_SW_TAGS
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index dd87cea9fba7..a7883e455290 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -59,7 +59,7 @@ quiet_cmd_ld_ko_o = LD [M] $@
quiet_cmd_btf_ko = BTF [M] $@
cmd_btf_ko = \
if [ -f vmlinux ]; then \
- LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@; \
+ LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
else \
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
fi;
diff --git a/scripts/atomic/check-atomics.sh b/scripts/atomic/check-atomics.sh
index 82748d42ecc5..9c7fbd4bcbce 100755
--- a/scripts/atomic/check-atomics.sh
+++ b/scripts/atomic/check-atomics.sh
@@ -17,7 +17,6 @@ cat <<EOF |
asm-generic/atomic-instrumented.h
asm-generic/atomic-long.h
linux/atomic-arch-fallback.h
-linux/atomic-fallback.h
EOF
while read header; do
OLDSUM="$(tail -n 1 ${LINUXDIR}/include/${header})"
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
index 5766ffcec7c5..b0c45aee19d7 100755
--- a/scripts/atomic/gen-atomic-instrumented.sh
+++ b/scripts/atomic/gen-atomic-instrumented.sh
@@ -41,34 +41,6 @@ gen_params_checks()
done
}
-# gen_guard(meta, atomic, pfx, name, sfx, order)
-gen_guard()
-{
- local meta="$1"; shift
- local atomic="$1"; shift
- local pfx="$1"; shift
- local name="$1"; shift
- local sfx="$1"; shift
- local order="$1"; shift
-
- local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
-
- local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
-
- # We definitely need a preprocessor symbol for this atomic if it is an
- # ordering variant, or if there's a generic fallback.
- if [ ! -z "${order}" ] || [ ! -z "${template}" ]; then
- printf "defined(${atomicname})"
- return
- fi
-
- # If this is a base variant, but a relaxed variant *may* exist, then we
- # only have a preprocessor symbol if the relaxed variant isn't defined
- if meta_has_relaxed "${meta}"; then
- printf "!defined(${atomicname}_relaxed) || defined(${atomicname})"
- fi
-}
-
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
gen_proto_order_variant()
{
@@ -82,16 +54,12 @@ gen_proto_order_variant()
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
- local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")"
-
local ret="$(gen_ret_type "${meta}" "${int}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local checks="$(gen_params_checks "${meta}" "$@")"
local args="$(gen_args "$@")"
local retstmt="$(gen_ret_stmt "${meta}")"
- [ ! -z "${guard}" ] && printf "#if ${guard}\n"
-
cat <<EOF
static __always_inline ${ret}
${atomicname}(${params})
@@ -99,11 +67,8 @@ ${atomicname}(${params})
${checks}
${retstmt}arch_${atomicname}(${args});
}
-#define ${atomicname} ${atomicname}
EOF
- [ ! -z "${guard}" ] && printf "#endif\n"
-
printf "\n"
}
@@ -139,19 +104,6 @@ EOF
fi
}
-gen_optional_xchg()
-{
- local name="$1"; shift
- local sfx="$1"; shift
- local guard="defined(arch_${name}${sfx})"
-
- [ -z "${sfx}" ] && guard="!defined(arch_${name}_relaxed) || defined(arch_${name})"
-
- printf "#if ${guard}\n"
- gen_xchg "${name}${sfx}" ""
- printf "#endif\n\n"
-}
-
cat << EOF
// SPDX-License-Identifier: GPL-2.0
@@ -188,7 +140,8 @@ done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do
for order in "" "_acquire" "_release" "_relaxed"; do
- gen_optional_xchg "${xchg}" "${order}"
+ gen_xchg "${xchg}${order}" ""
+ printf "\n"
done
done
diff --git a/scripts/atomic/gen-atomics.sh b/scripts/atomic/gen-atomics.sh
index d29e159ef489..f776a574224d 100755
--- a/scripts/atomic/gen-atomics.sh
+++ b/scripts/atomic/gen-atomics.sh
@@ -11,7 +11,6 @@ cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_
-gen-atomic-fallback.sh linux/atomic-fallback.h
EOF
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check
index c71832b2312b..7187ea5e5149 100755
--- a/scripts/documentation-file-ref-check
+++ b/scripts/documentation-file-ref-check
@@ -24,7 +24,7 @@ my $help = 0;
my $fix = 0;
my $warn = 0;
-if (! -d ".git") {
+if (! -e ".git") {
printf "Warning: can't check if file exists, as this is not a git tree\n";
exit 0;
}
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 911c72a2dbc4..1a5fea0519eb 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -601,12 +601,12 @@ if (defined($ENV{'LMC_KEEP'})) {
sub in_preserved_kconfigs {
my $kconfig = $config2kfile{$_[0]};
if (!defined($kconfig)) {
- return 0;
+ return 0;
}
foreach my $excl (@preserved_kconfigs) {
- if($kconfig =~ /^$excl/) {
- return 1;
- }
+ if($kconfig =~ /^$excl/) {
+ return 1;
+ }
}
return 0;
}
@@ -629,52 +629,52 @@ foreach my $line (@config_file) {
}
if (/CONFIG_MODULE_SIG_KEY="(.+)"/) {
- my $orig_cert = $1;
- my $default_cert = "certs/signing_key.pem";
-
- # Check that the logic in this script still matches the one in Kconfig
- if (!defined($depends{"MODULE_SIG_KEY"}) ||
- $depends{"MODULE_SIG_KEY"} !~ /"\Q$default_cert\E"/) {
- print STDERR "WARNING: MODULE_SIG_KEY assertion failure, ",
- "update needed to ", __FILE__, " line ", __LINE__, "\n";
- print;
- } elsif ($orig_cert ne $default_cert && ! -f $orig_cert) {
- print STDERR "Module signature verification enabled but ",
- "module signing key \"$orig_cert\" not found. Resetting ",
- "signing key to default value.\n";
- print "CONFIG_MODULE_SIG_KEY=\"$default_cert\"\n";
- } else {
- print;
- }
- next;
+ my $orig_cert = $1;
+ my $default_cert = "certs/signing_key.pem";
+
+ # Check that the logic in this script still matches the one in Kconfig
+ if (!defined($depends{"MODULE_SIG_KEY"}) ||
+ $depends{"MODULE_SIG_KEY"} !~ /"\Q$default_cert\E"/) {
+ print STDERR "WARNING: MODULE_SIG_KEY assertion failure, ",
+ "update needed to ", __FILE__, " line ", __LINE__, "\n";
+ print;
+ } elsif ($orig_cert ne $default_cert && ! -f $orig_cert) {
+ print STDERR "Module signature verification enabled but ",
+ "module signing key \"$orig_cert\" not found. Resetting ",
+ "signing key to default value.\n";
+ print "CONFIG_MODULE_SIG_KEY=\"$default_cert\"\n";
+ } else {
+ print;
+ }
+ next;
}
if (/CONFIG_SYSTEM_TRUSTED_KEYS="(.+)"/) {
- my $orig_keys = $1;
-
- if (! -f $orig_keys) {
- print STDERR "System keyring enabled but keys \"$orig_keys\" ",
- "not found. Resetting keys to default value.\n";
- print "CONFIG_SYSTEM_TRUSTED_KEYS=\"\"\n";
- } else {
- print;
- }
- next;
+ my $orig_keys = $1;
+
+ if (! -f $orig_keys) {
+ print STDERR "System keyring enabled but keys \"$orig_keys\" ",
+ "not found. Resetting keys to default value.\n";
+ print "CONFIG_SYSTEM_TRUSTED_KEYS=\"\"\n";
+ } else {
+ print;
+ }
+ next;
}
if (/^(CONFIG.*)=(m|y)/) {
- if (in_preserved_kconfigs($1)) {
- dprint "Preserve config $1";
- print;
- next;
- }
+ if (in_preserved_kconfigs($1)) {
+ dprint "Preserve config $1";
+ print;
+ next;
+ }
if (defined($configs{$1})) {
if ($localyesconfig) {
- $setconfigs{$1} = 'y';
+ $setconfigs{$1} = 'y';
print "$1=y\n";
next;
} else {
- $setconfigs{$1} = $2;
+ $setconfigs{$1} = $2;
}
} elsif ($2 eq "m") {
print "# $1 is not set\n";
@@ -702,3 +702,5 @@ foreach my $module (keys(%modules)) {
print STDERR "\n";
}
}
+
+# vim: softtabstop=4
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 4840e748fca8..7c4a6a507ac4 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -406,6 +406,8 @@ my $doc_inline_sect = '\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)';
my $doc_inline_end = '^\s*\*/\s*$';
my $doc_inline_oneline = '^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$';
my $export_symbol = '^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*;';
+my $function_pointer = qr{([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)};
+my $attribute = qr{__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)}i;
my %parameterdescs;
my %parameterdesc_start_lines;
@@ -694,7 +696,7 @@ sub output_function_man(%) {
$post = ");";
}
$type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
+ if ($type =~ m/$function_pointer/) {
# pointer-to-function
print ".BI \"" . $parenth . $1 . "\" " . " \") (" . $2 . ")" . $post . "\"\n";
} else {
@@ -974,7 +976,7 @@ sub output_function_rst(%) {
$count++;
$type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
+ if ($type =~ m/$function_pointer/) {
# pointer-to-function
print $1 . $parameter . ") (" . $2 . ")";
} else {
@@ -1211,7 +1213,9 @@ sub dump_struct($$) {
my $members;
my $type = qr{struct|union};
# For capturing struct/union definition body, i.e. "{members*}qualifiers*"
- my $definition_body = qr{\{(.*)\}(?:\s*(?:__packed|__aligned|____cacheline_aligned_in_smp|____cacheline_aligned|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*};
+ my $qualifiers = qr{$attribute|__packed|__aligned|____cacheline_aligned_in_smp|____cacheline_aligned};
+ my $definition_body = qr{\{(.*)\}\s*$qualifiers*};
+ my $struct_members = qr{($type)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;};
if ($x =~ /($type)\s+(\w+)\s*$definition_body/) {
$decl_type = $1;
@@ -1235,27 +1239,27 @@ sub dump_struct($$) {
# strip comments:
$members =~ s/\/\*.*?\*\///gos;
# strip attributes
- $members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)/ /gi;
+ $members =~ s/\s*$attribute/ /gi;
$members =~ s/\s*__aligned\s*\([^;]*\)/ /gos;
$members =~ s/\s*__packed\s*/ /gos;
$members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos;
$members =~ s/\s*____cacheline_aligned_in_smp/ /gos;
$members =~ s/\s*____cacheline_aligned/ /gos;
+ my $args = qr{([^,)]+)};
# replace DECLARE_BITMAP
$members =~ s/__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)/DECLARE_BITMAP($1, __ETHTOOL_LINK_MODE_MASK_NBITS)/gos;
- $members =~ s/DECLARE_BITMAP\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
+ $members =~ s/DECLARE_BITMAP\s*\($args,\s*$args\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
# replace DECLARE_HASHTABLE
- $members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
+ $members =~ s/DECLARE_HASHTABLE\s*\($args,\s*$args\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
# replace DECLARE_KFIFO
- $members =~ s/DECLARE_KFIFO\s*\(([^,)]+),\s*([^,)]+),\s*([^,)]+)\)/$2 \*$1/gos;
+ $members =~ s/DECLARE_KFIFO\s*\($args,\s*$args,\s*$args\)/$2 \*$1/gos;
# replace DECLARE_KFIFO_PTR
- $members =~ s/DECLARE_KFIFO_PTR\s*\(([^,)]+),\s*([^,)]+)\)/$2 \*$1/gos;
-
+ $members =~ s/DECLARE_KFIFO_PTR\s*\($args,\s*$args\)/$2 \*$1/gos;
my $declaration = $members;
# Split nested struct/union elements as newer ones
- while ($members =~ m/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/) {
+ while ($members =~ m/$struct_members/) {
my $newmember;
my $maintype = $1;
my $ids = $4;
@@ -1315,7 +1319,7 @@ sub dump_struct($$) {
}
}
}
- $members =~ s/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/$newmember/;
+ $members =~ s/$struct_members/$newmember/;
}
# Ignore other nested elements, like enums
@@ -1555,8 +1559,9 @@ sub create_parameterlist($$$$) {
my $param;
# temporarily replace commas inside function pointer definition
- while ($args =~ /(\([^\),]+),/) {
- $args =~ s/(\([^\),]+),/$1#/g;
+ my $arg_expr = qr{\([^\),]+};
+ while ($args =~ /$arg_expr,/) {
+ $args =~ s/($arg_expr),/$1#/g;
}
foreach my $arg (split($splitter, $args)) {
@@ -1707,7 +1712,7 @@ sub check_sections($$$$$) {
foreach $px (0 .. $#prms) {
$prm_clean = $prms[$px];
$prm_clean =~ s/\[.*\]//;
- $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
+ $prm_clean =~ s/$attribute//i;
# ignore array size in a parameter string;
# however, the original param string may contain
# spaces, e.g.: addr[6 + 2]
@@ -1809,8 +1814,14 @@ sub dump_function($$) {
# - parport_register_device (function pointer parameters)
# - atomic_set (macro)
# - pci_match_device, __copy_to_user (long return type)
-
- if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) {
+ my $name = qr{[a-zA-Z0-9_~:]+};
+ my $prototype_end1 = qr{[^\(]*};
+ my $prototype_end2 = qr{[^\{]*};
+ my $prototype_end = qr{\(($prototype_end1|$prototype_end2)\)};
+ my $type1 = qr{[\w\s]+};
+ my $type2 = qr{$type1\*+};
+
+ if ($define && $prototype =~ m/^()($name)\s+/) {
# This is an object-like macro, it has no return type and no parameter
# list.
# Function-like macros are not allowed to have spaces between
@@ -1818,23 +1829,9 @@ sub dump_function($$) {
$return_type = $1;
$declaration_name = $2;
$noret = 1;
- } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
- $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
- $prototype =~ m/^(\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
- $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s*\*+\s*\w+\s*\*+\s*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/) {
+ } elsif ($prototype =~ m/^()($name)\s*$prototype_end/ ||
+ $prototype =~ m/^($type1)\s+($name)\s*$prototype_end/ ||
+ $prototype =~ m/^($type2+)\s*($name)\s*$prototype_end/) {
$return_type = $1;
$declaration_name = $2;
my $args = $3;
@@ -2111,12 +2108,12 @@ sub process_name($$) {
} elsif (/$doc_decl/o) {
$identifier = $1;
my $is_kernel_comment = 0;
- my $decl_start = qr{\s*\*};
+ my $decl_start = qr{$doc_com};
# test for pointer declaration type, foo * bar() - desc
my $fn_type = qr{\w+\s*\*\s*};
my $parenthesis = qr{\(\w*\)};
my $decl_end = qr{[-:].*};
- if (/^$decl_start\s*([\w\s]+?)$parenthesis?\s*$decl_end?$/) {
+ if (/^$decl_start([\w\s]+?)$parenthesis?\s*$decl_end?$/) {
$identifier = $1;
}
if ($identifier =~ m/^(struct|union|enum|typedef)\b\s*(\S*)/) {
@@ -2126,8 +2123,8 @@ sub process_name($$) {
}
# Look for foo() or static void foo() - description; or misspelt
# identifier
- elsif (/^$decl_start\s*$fn_type?(\w+)\s*$parenthesis?\s*$decl_end?$/ ||
- /^$decl_start\s*$fn_type?(\w+.*)$parenthesis?\s*$decl_end$/) {
+ elsif (/^$decl_start$fn_type?(\w+)\s*$parenthesis?\s*$decl_end?$/ ||
+ /^$decl_start$fn_type?(\w+.*)$parenthesis?\s*$decl_end$/) {
$identifier = $1;
$decl_type = 'function';
$identifier =~ s/^define\s+//;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index f4de4c97015b..475faa15854e 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -235,12 +235,16 @@ gen_btf()
vmlinux_link ${1}
+ if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then
+ # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars
+ extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars"
+ fi
if [ "${pahole_ver}" -ge "121" ]; then
extra_paholeopt="${extra_paholeopt} --btf_gen_floats"
fi
info "BTF" ${2}
- LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${extra_paholeopt} ${1}
+ LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${extra_paholeopt} ${1}
# Create ${2} which contains just .BTF section but no symbols. Add
# SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index f9b19524da11..1e9baa5c4fc6 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -192,15 +192,20 @@ static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
Elf32_Word const *symtab_shndx)
{
unsigned long offset;
+ unsigned short shndx = w2(sym->st_shndx);
int index;
- if (sym->st_shndx != SHN_XINDEX)
- return w2(sym->st_shndx);
+ if (shndx > SHN_UNDEF && shndx < SHN_LORESERVE)
+ return shndx;
- offset = (unsigned long)sym - (unsigned long)symtab;
- index = offset / sizeof(*sym);
+ if (shndx == SHN_XINDEX) {
+ offset = (unsigned long)sym - (unsigned long)symtab;
+ index = offset / sizeof(*sym);
- return w(symtab_shndx[index]);
+ return w(symtab_shndx[index]);
+ }
+
+ return 0;
}
static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 7b6a01291598..17fdc620d548 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -22,6 +22,7 @@ absolut||absolute
absoulte||absolute
acccess||access
acceess||access
+accelaration||acceleration
acceleratoin||acceleration
accelleration||acceleration
accesing||accessing
@@ -264,6 +265,7 @@ calucate||calculate
calulate||calculate
cancelation||cancellation
cancle||cancel
+canot||cannot
capabilites||capabilities
capabilties||capabilities
capabilty||capability
@@ -494,7 +496,10 @@ digial||digital
dimention||dimension
dimesions||dimensions
diconnected||disconnected
+disabed||disabled
+disble||disable
disgest||digest
+disired||desired
dispalying||displaying
diplay||display
directon||direction
@@ -710,6 +715,7 @@ havind||having
heirarchically||hierarchically
heirarchy||hierarchy
helpfull||helpful
+hearbeat||heartbeat
heterogenous||heterogeneous
hexdecimal||hexadecimal
hybernate||hibernate
@@ -989,6 +995,7 @@ notications||notifications
notifcations||notifications
notifed||notified
notity||notify
+nubmer||number
numebr||number
numner||number
obtaion||obtain
@@ -1014,8 +1021,10 @@ ommiting||omitting
ommitted||omitted
onself||oneself
ony||only
+openning||opening
operatione||operation
opertaions||operations
+opportunies||opportunities
optionnal||optional
optmizations||optimizations
orientatied||orientated
@@ -1111,6 +1120,7 @@ prefitler||prefilter
preform||perform
premption||preemption
prepaired||prepared
+prepate||prepare
preperation||preparation
preprare||prepare
pressre||pressure
@@ -1123,6 +1133,7 @@ privilaged||privileged
privilage||privilege
priviledge||privilege
priviledges||privileges
+privleges||privileges
probaly||probably
procceed||proceed
proccesors||processors
@@ -1167,6 +1178,7 @@ promixity||proximity
psudo||pseudo
psuedo||pseudo
psychadelic||psychedelic
+purgable||purgeable
pwoer||power
queing||queuing
quering||querying
@@ -1180,6 +1192,7 @@ receieve||receive
recepient||recipient
recevied||received
receving||receiving
+recievd||received
recieved||received
recieve||receive
reciever||receiver
@@ -1228,6 +1241,7 @@ reponse||response
representaion||representation
reqeust||request
reqister||register
+requed||requeued
requestied||requested
requiere||require
requirment||requirement
@@ -1332,6 +1346,7 @@ singal||signal
singed||signed
sleeped||slept
sliped||slipped
+softwade||software
softwares||software
soley||solely
souce||source
@@ -1510,6 +1525,7 @@ unintialized||uninitialized
unitialized||uninitialized
unkmown||unknown
unknonw||unknown
+unknouwn||unknown
unknow||unknown
unkown||unknown
unamed||unnamed
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index fe92020d67e3..288e86a9d1e5 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -22,16 +22,18 @@ my $need = 0;
my $optional = 0;
my $need_symlink = 0;
my $need_sphinx = 0;
-my $need_venv = 0;
+my $need_pip = 0;
my $need_virtualenv = 0;
my $rec_sphinx_upgrade = 0;
my $install = "";
my $virtenv_dir = "";
my $python_cmd = "";
+my $activate_cmd;
my $min_version;
my $cur_version;
my $rec_version = "1.7.9"; # PDF won't build here
my $min_pdf_version = "2.4.4"; # Min version where pdf builds
+my $latest_avail_ver;
#
# Command line arguments
@@ -319,10 +321,7 @@ sub check_sphinx()
return;
}
- if ($cur_version lt $rec_version) {
- $rec_sphinx_upgrade = 1;
- return;
- }
+ return if ($cur_version lt $rec_version);
# On version check mode, just assume Sphinx has all mandatory deps
exit (0) if ($version_check);
@@ -701,6 +700,162 @@ sub deactivate_help()
printf "\tdeactivate\n";
}
+sub get_virtenv()
+{
+ my $ver;
+ my $min_activate = "$ENV{'PWD'}/${virtenv_prefix}${min_version}/bin/activate";
+ my @activates = glob "$ENV{'PWD'}/${virtenv_prefix}*/bin/activate";
+
+ @activates = sort {$b cmp $a} @activates;
+
+ foreach my $f (@activates) {
+ next if ($f lt $min_activate);
+
+ my $sphinx_cmd = $f;
+ $sphinx_cmd =~ s/activate/sphinx-build/;
+ next if (! -f $sphinx_cmd);
+
+ my $ver = get_sphinx_version($sphinx_cmd);
+ if ($need_sphinx && ($ver ge $min_version)) {
+ return ($f, $ver);
+ } elsif ($ver gt $cur_version) {
+ return ($f, $ver);
+ }
+ }
+ return ("", "");
+}
+
+sub recommend_sphinx_upgrade()
+{
+ my $venv_ver;
+
+ # Avoid running sphinx-builds from venv if $cur_version is good
+ if ($cur_version && ($cur_version ge $rec_version)) {
+ $latest_avail_ver = $cur_version;
+ return;
+ }
+
+ # Get the highest version from sphinx_*/bin/sphinx-build and the
+ # corresponding command to activate the venv/virtenv
+ $activate_cmd = get_virtenv();
+
+ # Store the highest version from Sphinx existing virtualenvs
+ if (($activate_cmd ne "") && ($venv_ver gt $cur_version)) {
+ $latest_avail_ver = $venv_ver;
+ } else {
+ $latest_avail_ver = $cur_version if ($cur_version);
+ }
+
+ # As we don't know package version of Sphinx, and there's no
+ # virtual environments, don't check if upgrades are needed
+ if (!$virtualenv) {
+ return if (!$latest_avail_ver);
+ }
+
+ # Either there are already a virtual env or a new one should be created
+ $need_pip = 1;
+
+ # Return if the reason is due to an upgrade or not
+ if ($latest_avail_ver lt $rec_version) {
+ $rec_sphinx_upgrade = 1;
+ }
+}
+
+#
+# The logic here is complex, as it have to deal with different versions:
+# - minimal supported version;
+# - minimal PDF version;
+# - recommended version.
+# It also needs to work fine with both distro's package and venv/virtualenv
+sub recommend_sphinx_version($)
+{
+ my $virtualenv_cmd = shift;
+
+ if ($latest_avail_ver lt $min_pdf_version) {
+ print "note: If you want pdf, you need at least Sphinx $min_pdf_version.\n";
+ }
+
+ # Version is OK. Nothing to do.
+ return if ($cur_version && ($cur_version ge $rec_version));
+
+ if (!$need_sphinx) {
+ # sphinx-build is present and its version is >= $min_version
+
+ #only recommend enabling a newer virtenv version if makes sense.
+ if ($latest_avail_ver gt $cur_version) {
+ printf "\nYou may also use the newer Sphinx version $latest_avail_ver with:\n";
+ printf "\tdeactivate\n" if ($ENV{'PWD'} =~ /${virtenv_prefix}/);
+ printf "\t. $activate_cmd\n";
+ deactivate_help();
+
+ return;
+ }
+ return if ($latest_avail_ver ge $rec_version);
+ }
+
+ if (!$virtualenv) {
+ # No sphinx either via package or via virtenv. As we can't
+ # Compare the versions here, just return, recommending the
+ # user to install it from the package distro.
+ return if (!$latest_avail_ver);
+
+ # User doesn't want a virtenv recommendation, but he already
+ # installed one via virtenv with a newer version.
+ # So, print commands to enable it
+ if ($latest_avail_ver gt $cur_version) {
+ printf "\nYou may also use the Sphinx virtualenv version $latest_avail_ver with:\n";
+ printf "\tdeactivate\n" if ($ENV{'PWD'} =~ /${virtenv_prefix}/);
+ printf "\t. $activate_cmd\n";
+ deactivate_help();
+
+ return;
+ }
+ print "\n";
+ } else {
+ $need++ if ($need_sphinx);
+ }
+
+ # Suggest newer versions if current ones are too old
+ if ($latest_avail_ver && $cur_version ge $min_version) {
+ # If there's a good enough version, ask the user to enable it
+ if ($latest_avail_ver ge $rec_version) {
+ printf "\nNeed to activate Sphinx (version $latest_avail_ver) on virtualenv with:\n";
+ printf "\t. $activate_cmd\n";
+ deactivate_help();
+
+ return;
+ }
+
+ # Version is above the minimal required one, but may be
+ # below the recommended one. So, print warnings/notes
+
+ if ($latest_avail_ver lt $rec_version) {
+ print "Warning: It is recommended at least Sphinx version $rec_version.\n";
+ }
+ }
+
+ # At this point, either it needs Sphinx or upgrade is recommended,
+ # both via pip
+
+ if ($rec_sphinx_upgrade) {
+ if (!$virtualenv) {
+ print "Instead of install/upgrade Python Sphinx pkg, you could use pip/pypi with:\n\n";
+ } else {
+ print "To upgrade Sphinx, use:\n\n";
+ }
+ } else {
+ print "Sphinx needs to be installed either as a package or via pip/pypi with:\n";
+ }
+
+ $python_cmd = find_python_no_venv();
+
+ printf "\t$virtualenv_cmd $virtenv_dir\n";
+
+ printf "\t. $virtenv_dir/bin/activate\n";
+ printf "\tpip install -r $requirement_file\n";
+ deactivate_help();
+}
+
sub check_needs()
{
# Check if Sphinx is already accessible from current environment
@@ -722,15 +877,14 @@ sub check_needs()
if ($virtualenv) {
my $tmp = qx($python_cmd --version 2>&1);
if ($tmp =~ m/(\d+\.)(\d+\.)/) {
- if ($1 >= 3 && $2 >= 3) {
- $need_venv = 1; # python 3.3 or upper
- } else {
- $need_virtualenv = 1;
- }
if ($1 < 3) {
# Fail if it finds python2 (or worse)
die "Python 3 is required to build the kernel docs\n";
}
+ if ($1 == 3 && $2 < 3) {
+ # Need Python 3.3 or upper for venv
+ $need_virtualenv = 1;
+ }
} else {
die "Warning: couldn't identify $python_cmd version!";
}
@@ -739,14 +893,22 @@ sub check_needs()
}
}
- # Set virtualenv command line, if python < 3.3
+ recommend_sphinx_upgrade();
+
my $virtualenv_cmd;
- if ($need_virtualenv) {
- $virtualenv_cmd = findprog("virtualenv-3");
- $virtualenv_cmd = findprog("virtualenv-3.5") if (!$virtualenv_cmd);
- if (!$virtualenv_cmd) {
- check_program("virtualenv", 0);
- $virtualenv_cmd = "virtualenv";
+
+ if ($need_pip) {
+ # Set virtualenv command line, if python < 3.3
+ if ($need_virtualenv) {
+ $virtualenv_cmd = findprog("virtualenv-3");
+ $virtualenv_cmd = findprog("virtualenv-3.5") if (!$virtualenv_cmd);
+ if (!$virtualenv_cmd) {
+ check_program("virtualenv", 0);
+ $virtualenv_cmd = "virtualenv";
+ }
+ } else {
+ $virtualenv_cmd = "$python_cmd -m venv";
+ check_python_module("ensurepip", 0);
}
}
@@ -763,10 +925,6 @@ sub check_needs()
check_program("rsvg-convert", 2) if ($pdf);
check_program("latexmk", 2) if ($pdf);
- if ($need_sphinx || $rec_sphinx_upgrade) {
- check_python_module("ensurepip", 0) if ($need_venv);
- }
-
# Do distro-specific checks and output distro-install commands
check_distros();
@@ -784,67 +942,7 @@ sub check_needs()
which("sphinx-build-3");
}
- # NOTE: if the system has a too old Sphinx version installed,
- # it will recommend installing a newer version using virtualenv
-
- if ($need_sphinx || $rec_sphinx_upgrade) {
- my $min_activate = "$ENV{'PWD'}/${virtenv_prefix}${min_version}/bin/activate";
- my @activates = glob "$ENV{'PWD'}/${virtenv_prefix}*/bin/activate";
-
- if ($cur_version lt $rec_version) {
- print "Warning: It is recommended at least Sphinx version $rec_version.\n";
- print " If you want pdf, you need at least $min_pdf_version.\n";
- }
- if ($cur_version lt $min_pdf_version) {
- print "Note: It is recommended at least Sphinx version $min_pdf_version if you need PDF support.\n";
- }
- @activates = sort {$b cmp $a} @activates;
- my ($activate, $ver);
- foreach my $f (@activates) {
- next if ($f lt $min_activate);
-
- my $sphinx_cmd = $f;
- $sphinx_cmd =~ s/activate/sphinx-build/;
- next if (! -f $sphinx_cmd);
-
- $ver = get_sphinx_version($sphinx_cmd);
- if ($need_sphinx && ($ver ge $min_version)) {
- $activate = $f;
- last;
- } elsif ($ver gt $cur_version) {
- $activate = $f;
- last;
- }
- }
- if ($activate ne "") {
- if ($need_sphinx) {
- printf "\nNeed to activate Sphinx (version $ver) on virtualenv with:\n";
- printf "\t. $activate\n";
- deactivate_help();
- exit (1);
- } else {
- printf "\nYou may also use a newer Sphinx (version $ver) with:\n";
- printf "\tdeactivate && . $activate\n";
- }
- } else {
- my $rec_activate = "$virtenv_dir/bin/activate";
-
- print "To upgrade Sphinx, use:\n\n" if ($rec_sphinx_upgrade);
-
- $python_cmd = find_python_no_venv();
-
- if ($need_venv) {
- printf "\t$python_cmd -m venv $virtenv_dir\n";
- } else {
- printf "\t$virtualenv_cmd $virtenv_dir\n";
- }
- printf "\t. $rec_activate\n";
- printf "\tpip install -r $requirement_file\n";
- deactivate_help();
-
- $need++ if (!$rec_sphinx_upgrade);
- }
- }
+ recommend_sphinx_version($virtualenv_cmd);
printf "\n";
print "All optional dependencies are met.\n" if (!$optional);
diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh
index 45e8aa360b45..cb55878bd5b8 100755
--- a/scripts/tools-support-relr.sh
+++ b/scripts/tools-support-relr.sh
@@ -7,7 +7,8 @@ trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
void *p = &p;
END
-$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \
+ --use-android-relr-tags -o $tmp_file
# Despite printing an error message, GNU nm still exits with exit code 0 if it
# sees a relr section. So we need to check that nothing is printed to stderr.
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index f2fef2b5ed51..0d44f41d16f8 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -29,6 +29,7 @@
struct xattr_list {
struct list_head list;
char *name;
+ bool enabled;
};
extern int evm_initialized;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index d76b006cbcc4..0450d79afdc8 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -10,6 +10,8 @@
* Using root's kernel master key (kmk), calculate the HMAC
*/
+#define pr_fmt(fmt) "EVM: "fmt
+
#include <linux/export.h>
#include <linux/crypto.h>
#include <linux/xattr.h>
@@ -175,6 +177,30 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
type != EVM_XATTR_PORTABLE_DIGSIG)
crypto_shash_update(desc, (u8 *)&inode->i_sb->s_uuid, UUID_SIZE);
crypto_shash_final(desc, digest);
+
+ pr_debug("hmac_misc: (%zu) [%*phN]\n", sizeof(struct h_misc),
+ (int)sizeof(struct h_misc), &hmac_misc);
+}
+
+/*
+ * Dump large security xattr values as a continuous ascii hexademical string.
+ * (pr_debug is limited to 64 bytes.)
+ */
+static void dump_security_xattr(const char *prefix, const void *src,
+ size_t count)
+{
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+ char *asciihex, *p;
+
+ p = asciihex = kmalloc(count * 2 + 1, GFP_KERNEL);
+ if (!asciihex)
+ return;
+
+ p = bin2hex(p, src, count);
+ *p = 0;
+ pr_debug("%s: (%zu) %.*s\n", prefix, count, (int)count * 2, asciihex);
+ kfree(asciihex);
+#endif
}
/*
@@ -196,7 +222,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
size_t xattr_size = 0;
char *xattr_value = NULL;
int error;
- int size;
+ int size, user_space_size;
bool ima_present = false;
if (!(inode->i_opflags & IOP_XATTR) ||
@@ -216,6 +242,13 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
is_ima = true;
+ /*
+ * Skip non-enabled xattrs for locally calculated
+ * signatures/HMACs.
+ */
+ if (type != EVM_XATTR_PORTABLE_DIGSIG && !xattr->enabled)
+ continue;
+
if ((req_xattr_name && req_xattr_value)
&& !strcmp(xattr->name, req_xattr_name)) {
error = 0;
@@ -223,6 +256,16 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
req_xattr_value_len);
if (is_ima)
ima_present = true;
+
+ if (req_xattr_value_len < 64)
+ pr_debug("%s: (%zu) [%*phN]\n", req_xattr_name,
+ req_xattr_value_len,
+ (int)req_xattr_value_len,
+ req_xattr_value);
+ else
+ dump_security_xattr(req_xattr_name,
+ req_xattr_value,
+ req_xattr_value_len);
continue;
}
size = vfs_getxattr_alloc(&init_user_ns, dentry, xattr->name,
@@ -234,11 +277,24 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
if (size < 0)
continue;
+ user_space_size = vfs_getxattr(&init_user_ns, dentry,
+ xattr->name, NULL, 0);
+ if (user_space_size != size)
+ pr_debug("file %s: xattr %s size mismatch (kernel: %d, user: %d)\n",
+ dentry->d_name.name, xattr->name, size,
+ user_space_size);
error = 0;
xattr_size = size;
crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
if (is_ima)
ima_present = true;
+
+ if (xattr_size < 64)
+ pr_debug("%s: (%zu) [%*phN]", xattr->name, xattr_size,
+ (int)xattr_size, xattr_value);
+ else
+ dump_security_xattr(xattr->name, xattr_value,
+ xattr_size);
}
hmac_add_misc(desc, inode, type, data->digest);
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 0de367aaa2d3..1c8435dfabee 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -11,6 +11,8 @@
* evm_inode_removexattr, and evm_verifyxattr
*/
+#define pr_fmt(fmt) "EVM: "fmt
+
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/audit.h>
@@ -18,6 +20,7 @@
#include <linux/integrity.h>
#include <linux/evm.h>
#include <linux/magic.h>
+#include <linux/posix_acl_xattr.h>
#include <crypto/hash.h>
#include <crypto/hash_info.h>
@@ -27,29 +30,50 @@
int evm_initialized;
static const char * const integrity_status_msg[] = {
- "pass", "pass_immutable", "fail", "no_label", "no_xattrs", "unknown"
+ "pass", "pass_immutable", "fail", "fail_immutable", "no_label",
+ "no_xattrs", "unknown"
};
int evm_hmac_attrs;
static struct xattr_list evm_config_default_xattrnames[] = {
+ {.name = XATTR_NAME_SELINUX,
#ifdef CONFIG_SECURITY_SELINUX
- {.name = XATTR_NAME_SELINUX},
+ .enabled = true
#endif
+ },
+ {.name = XATTR_NAME_SMACK,
#ifdef CONFIG_SECURITY_SMACK
- {.name = XATTR_NAME_SMACK},
+ .enabled = true
+#endif
+ },
+ {.name = XATTR_NAME_SMACKEXEC,
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
- {.name = XATTR_NAME_SMACKEXEC},
- {.name = XATTR_NAME_SMACKTRANSMUTE},
- {.name = XATTR_NAME_SMACKMMAP},
+ .enabled = true
#endif
+ },
+ {.name = XATTR_NAME_SMACKTRANSMUTE,
+#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
+ .enabled = true
+#endif
+ },
+ {.name = XATTR_NAME_SMACKMMAP,
+#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
+ .enabled = true
#endif
+ },
+ {.name = XATTR_NAME_APPARMOR,
#ifdef CONFIG_SECURITY_APPARMOR
- {.name = XATTR_NAME_APPARMOR},
+ .enabled = true
#endif
+ },
+ {.name = XATTR_NAME_IMA,
#ifdef CONFIG_IMA_APPRAISE
- {.name = XATTR_NAME_IMA},
+ .enabled = true
#endif
- {.name = XATTR_NAME_CAPS},
+ },
+ {.name = XATTR_NAME_CAPS,
+ .enabled = true
+ },
};
LIST_HEAD(evm_config_xattrnames);
@@ -74,7 +98,9 @@ static void __init evm_init_config(void)
pr_info("Initialising EVM extended attributes:\n");
for (i = 0; i < xattrs; i++) {
- pr_info("%s\n", evm_config_default_xattrnames[i].name);
+ pr_info("%s%s\n", evm_config_default_xattrnames[i].name,
+ !evm_config_default_xattrnames[i].enabled ?
+ " (disabled)" : "");
list_add_tail(&evm_config_default_xattrnames[i].list,
&evm_config_xattrnames);
}
@@ -90,6 +116,24 @@ static bool evm_key_loaded(void)
return (bool)(evm_initialized & EVM_KEY_MASK);
}
+/*
+ * This function determines whether or not it is safe to ignore verification
+ * errors, based on the ability of EVM to calculate HMACs. If the HMAC key
+ * is not loaded, and it cannot be loaded in the future due to the
+ * EVM_SETUP_COMPLETE initialization flag, allowing an operation despite the
+ * attrs/xattrs being found invalid will not make them valid.
+ */
+static bool evm_hmac_disabled(void)
+{
+ if (evm_initialized & EVM_INIT_HMAC)
+ return false;
+
+ if (!(evm_initialized & EVM_SETUP_COMPLETE))
+ return false;
+
+ return true;
+}
+
static int evm_find_protected_xattrs(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
@@ -137,7 +181,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
enum integrity_status evm_status = INTEGRITY_PASS;
struct evm_digest digest;
struct inode *inode;
- int rc, xattr_len;
+ int rc, xattr_len, evm_immutable = 0;
if (iint && (iint->evm_status == INTEGRITY_PASS ||
iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
@@ -182,8 +226,10 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
if (rc)
rc = -EINVAL;
break;
- case EVM_IMA_XATTR_DIGSIG:
case EVM_XATTR_PORTABLE_DIGSIG:
+ evm_immutable = 1;
+ fallthrough;
+ case EVM_IMA_XATTR_DIGSIG:
/* accept xattr with non-empty signature field */
if (xattr_len <= sizeof(struct signature_v2_hdr)) {
evm_status = INTEGRITY_FAIL;
@@ -220,9 +266,16 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
break;
}
- if (rc)
- evm_status = (rc == -ENODATA) ?
- INTEGRITY_NOXATTRS : INTEGRITY_FAIL;
+ if (rc) {
+ if (rc == -ENODATA)
+ evm_status = INTEGRITY_NOXATTRS;
+ else if (evm_immutable)
+ evm_status = INTEGRITY_FAIL_IMMUTABLE;
+ else
+ evm_status = INTEGRITY_FAIL;
+ }
+ pr_debug("digest: (%d) [%*phN]\n", digest.hdr.length, digest.hdr.length,
+ digest.digest);
out:
if (iint)
iint->evm_status = evm_status;
@@ -230,7 +283,8 @@ out:
return evm_status;
}
-static int evm_protected_xattr(const char *req_xattr_name)
+static int evm_protected_xattr_common(const char *req_xattr_name,
+ bool all_xattrs)
{
int namelen;
int found = 0;
@@ -238,6 +292,9 @@ static int evm_protected_xattr(const char *req_xattr_name)
namelen = strlen(req_xattr_name);
list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ if (!all_xattrs && !xattr->enabled)
+ continue;
+
if ((strlen(xattr->name) == namelen)
&& (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
found = 1;
@@ -254,6 +311,85 @@ static int evm_protected_xattr(const char *req_xattr_name)
return found;
}
+static int evm_protected_xattr(const char *req_xattr_name)
+{
+ return evm_protected_xattr_common(req_xattr_name, false);
+}
+
+int evm_protected_xattr_if_enabled(const char *req_xattr_name)
+{
+ return evm_protected_xattr_common(req_xattr_name, true);
+}
+
+/**
+ * evm_read_protected_xattrs - read EVM protected xattr names, lengths, values
+ * @dentry: dentry of the read xattrs
+ * @inode: inode of the read xattrs
+ * @buffer: buffer xattr names, lengths or values are copied to
+ * @buffer_size: size of buffer
+ * @type: n: names, l: lengths, v: values
+ * @canonical_fmt: data format (true: little endian, false: native format)
+ *
+ * Read protected xattr names (separated by |), lengths (u32) or values for a
+ * given dentry and return the total size of copied data. If buffer is NULL,
+ * just return the total size.
+ *
+ * Returns the total size on success, a negative value on error.
+ */
+int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type, bool canonical_fmt)
+{
+ struct xattr_list *xattr;
+ int rc, size, total_size = 0;
+
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ rc = __vfs_getxattr(dentry, d_backing_inode(dentry),
+ xattr->name, NULL, 0);
+ if (rc < 0 && rc == -ENODATA)
+ continue;
+ else if (rc < 0)
+ return rc;
+
+ switch (type) {
+ case 'n':
+ size = strlen(xattr->name) + 1;
+ if (buffer) {
+ if (total_size)
+ *(buffer + total_size - 1) = '|';
+
+ memcpy(buffer + total_size, xattr->name, size);
+ }
+ break;
+ case 'l':
+ size = sizeof(u32);
+ if (buffer) {
+ if (canonical_fmt)
+ rc = (__force int)cpu_to_le32(rc);
+
+ *(u32 *)(buffer + total_size) = rc;
+ }
+ break;
+ case 'v':
+ size = rc;
+ if (buffer) {
+ rc = __vfs_getxattr(dentry,
+ d_backing_inode(dentry), xattr->name,
+ buffer + total_size,
+ buffer_size - total_size);
+ if (rc < 0)
+ return rc;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ total_size += size;
+ }
+
+ return total_size;
+}
+
/**
* evm_verifyxattr - verify the integrity of the requested xattr
* @dentry: object of the verify xattr
@@ -305,6 +441,92 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
}
/*
+ * evm_xattr_acl_change - check if passed ACL changes the inode mode
+ * @mnt_userns: user namespace of the idmapped mount
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Check if passed ACL changes the inode mode, which is protected by EVM.
+ *
+ * Returns 1 if passed ACL causes inode mode change, 0 otherwise.
+ */
+static int evm_xattr_acl_change(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+#ifdef CONFIG_FS_POSIX_ACL
+ umode_t mode;
+ struct posix_acl *acl = NULL, *acl_res;
+ struct inode *inode = d_backing_inode(dentry);
+ int rc;
+
+ /*
+ * user_ns is not relevant here, ACL_USER/ACL_GROUP don't have impact
+ * on the inode mode (see posix_acl_equiv_mode()).
+ */
+ acl = posix_acl_from_xattr(&init_user_ns, xattr_value, xattr_value_len);
+ if (IS_ERR_OR_NULL(acl))
+ return 1;
+
+ acl_res = acl;
+ /*
+ * Passing mnt_userns is necessary to correctly determine the GID in
+ * an idmapped mount, as the GID is used to clear the setgid bit in
+ * the inode mode.
+ */
+ rc = posix_acl_update_mode(mnt_userns, inode, &mode, &acl_res);
+
+ posix_acl_release(acl);
+
+ if (rc)
+ return 1;
+
+ if (inode->i_mode != mode)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * evm_xattr_change - check if passed xattr value differs from current value
+ * @mnt_userns: user namespace of the idmapped mount
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Check if passed xattr value differs from current value.
+ *
+ * Returns 1 if passed xattr value differs from current value, 0 otherwise.
+ */
+static int evm_xattr_change(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ char *xattr_data = NULL;
+ int rc = 0;
+
+ if (posix_xattr_acl(xattr_name))
+ return evm_xattr_acl_change(mnt_userns, dentry, xattr_name,
+ xattr_value, xattr_value_len);
+
+ rc = vfs_getxattr_alloc(&init_user_ns, dentry, xattr_name, &xattr_data,
+ 0, GFP_NOFS);
+ if (rc < 0)
+ return 1;
+
+ if (rc == xattr_value_len)
+ rc = !!memcmp(xattr_value, xattr_data, rc);
+ else
+ rc = 1;
+
+ kfree(xattr_data);
+ return rc;
+}
+
+/*
* evm_protect_xattr - protect the EVM extended attribute
*
* Prevent security.evm from being modified or removed without the
@@ -316,7 +538,8 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
* For posix xattr acls only, permit security.evm, even if it currently
* doesn't exist, to be updated unless the EVM signature is immutable.
*/
-static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+static int evm_protect_xattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
{
enum integrity_status evm_status;
@@ -338,6 +561,10 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
if (evm_status == INTEGRITY_NOXATTRS) {
struct integrity_iint_cache *iint;
+ /* Exception if the HMAC is not going to be calculated. */
+ if (evm_hmac_disabled())
+ return 0;
+
iint = integrity_iint_find(d_backing_inode(dentry));
if (iint && (iint->flags & IMA_NEW_FILE))
return 0;
@@ -354,7 +581,25 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
-EPERM, 0);
}
out:
- if (evm_status != INTEGRITY_PASS)
+ /* Exception if the HMAC is not going to be calculated. */
+ if (evm_hmac_disabled() && (evm_status == INTEGRITY_NOLABEL ||
+ evm_status == INTEGRITY_UNKNOWN))
+ return 0;
+
+ /*
+ * Writing other xattrs is safe for portable signatures, as portable
+ * signatures are immutable and can never be updated.
+ */
+ if (evm_status == INTEGRITY_FAIL_IMMUTABLE)
+ return 0;
+
+ if (evm_status == INTEGRITY_PASS_IMMUTABLE &&
+ !evm_xattr_change(mnt_userns, dentry, xattr_name, xattr_value,
+ xattr_value_len))
+ return 0;
+
+ if (evm_status != INTEGRITY_PASS &&
+ evm_status != INTEGRITY_PASS_IMMUTABLE)
integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
dentry->d_name.name, "appraise_metadata",
integrity_status_msg[evm_status],
@@ -364,6 +609,7 @@ out:
/**
* evm_inode_setxattr - protect the EVM extended attribute
+ * @mnt_userns: user namespace of the idmapped mount
* @dentry: pointer to the affected dentry
* @xattr_name: pointer to the affected extended attribute name
* @xattr_value: pointer to the new extended attribute value
@@ -375,8 +621,9 @@ out:
* userspace from writing HMAC value. Writing 'security.evm' requires
* requires CAP_SYS_ADMIN privileges.
*/
-int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
- const void *xattr_value, size_t xattr_value_len)
+int evm_inode_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *xattr_name, const void *xattr_value,
+ size_t xattr_value_len)
{
const struct evm_ima_xattr_data *xattr_data = xattr_value;
@@ -393,19 +640,21 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG)
return -EPERM;
}
- return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ return evm_protect_xattr(mnt_userns, dentry, xattr_name, xattr_value,
xattr_value_len);
}
/**
* evm_inode_removexattr - protect the EVM extended attribute
+ * @mnt_userns: user namespace of the idmapped mount
* @dentry: pointer to the affected dentry
* @xattr_name: pointer to the affected extended attribute name
*
* Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that
* the current value is valid.
*/
-int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+int evm_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *xattr_name)
{
/* Policy permits modification of the protected xattrs even though
* there's no HMAC key loaded
@@ -413,7 +662,7 @@ int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
return 0;
- return evm_protect_xattr(dentry, xattr_name, NULL, 0);
+ return evm_protect_xattr(mnt_userns, dentry, xattr_name, NULL, 0);
}
static void evm_reset_status(struct inode *inode)
@@ -426,6 +675,31 @@ static void evm_reset_status(struct inode *inode)
}
/**
+ * evm_revalidate_status - report whether EVM status re-validation is necessary
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Report whether callers of evm_verifyxattr() should re-validate the
+ * EVM status.
+ *
+ * Return true if re-validation is necessary, false otherwise.
+ */
+bool evm_revalidate_status(const char *xattr_name)
+{
+ if (!evm_key_loaded())
+ return false;
+
+ /* evm_inode_post_setattr() passes NULL */
+ if (!xattr_name)
+ return true;
+
+ if (!evm_protected_xattr(xattr_name) && !posix_xattr_acl(xattr_name) &&
+ strcmp(xattr_name, XATTR_NAME_EVM))
+ return false;
+
+ return true;
+}
+
+/**
* evm_inode_post_setxattr - update 'security.evm' to reflect the changes
* @dentry: pointer to the affected dentry
* @xattr_name: pointer to the affected extended attribute name
@@ -441,12 +715,17 @@ static void evm_reset_status(struct inode *inode)
void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
{
- if (!evm_key_loaded() || (!evm_protected_xattr(xattr_name)
- && !posix_xattr_acl(xattr_name)))
+ if (!evm_revalidate_status(xattr_name))
return;
evm_reset_status(dentry->d_inode);
+ if (!strcmp(xattr_name, XATTR_NAME_EVM))
+ return;
+
+ if (!(evm_initialized & EVM_INIT_HMAC))
+ return;
+
evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
}
@@ -462,14 +741,33 @@ void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
*/
void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
{
- if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
+ if (!evm_revalidate_status(xattr_name))
return;
evm_reset_status(dentry->d_inode);
+ if (!strcmp(xattr_name, XATTR_NAME_EVM))
+ return;
+
+ if (!(evm_initialized & EVM_INIT_HMAC))
+ return;
+
evm_update_evmxattr(dentry, xattr_name, NULL, 0);
}
+static int evm_attr_change(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ unsigned int ia_valid = attr->ia_valid;
+
+ if ((!(ia_valid & ATTR_UID) || uid_eq(attr->ia_uid, inode->i_uid)) &&
+ (!(ia_valid & ATTR_GID) || gid_eq(attr->ia_gid, inode->i_gid)) &&
+ (!(ia_valid & ATTR_MODE) || attr->ia_mode == inode->i_mode))
+ return 0;
+
+ return 1;
+}
+
/**
* evm_inode_setattr - prevent updating an invalid EVM extended attribute
* @dentry: pointer to the affected dentry
@@ -491,9 +789,21 @@ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
return 0;
evm_status = evm_verify_current_integrity(dentry);
+ /*
+ * Writing attrs is safe for portable signatures, as portable signatures
+ * are immutable and can never be updated.
+ */
if ((evm_status == INTEGRITY_PASS) ||
- (evm_status == INTEGRITY_NOXATTRS))
+ (evm_status == INTEGRITY_NOXATTRS) ||
+ (evm_status == INTEGRITY_FAIL_IMMUTABLE) ||
+ (evm_hmac_disabled() && (evm_status == INTEGRITY_NOLABEL ||
+ evm_status == INTEGRITY_UNKNOWN)))
return 0;
+
+ if (evm_status == INTEGRITY_PASS_IMMUTABLE &&
+ !evm_attr_change(dentry, attr))
+ return 0;
+
integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
dentry->d_name.name, "appraise_metadata",
integrity_status_msg[evm_status], -EPERM, 0);
@@ -513,7 +823,12 @@ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
*/
void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
{
- if (!evm_key_loaded())
+ if (!evm_revalidate_status(NULL))
+ return;
+
+ evm_reset_status(dentry->d_inode);
+
+ if (!(evm_initialized & EVM_INIT_HMAC))
return;
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
@@ -521,7 +836,7 @@ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
}
/*
- * evm_inode_init_security - initializes security.evm
+ * evm_inode_init_security - initializes security.evm HMAC value
*/
int evm_inode_init_security(struct inode *inode,
const struct xattr *lsm_xattr,
@@ -530,7 +845,8 @@ int evm_inode_init_security(struct inode *inode,
struct evm_xattr *xattr_data;
int rc;
- if (!evm_key_loaded() || !evm_protected_xattr(lsm_xattr->name))
+ if (!(evm_initialized & EVM_INIT_HMAC) ||
+ !evm_protected_xattr(lsm_xattr->name))
return 0;
xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index bbc85637e18b..8a9db7dfca7e 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -66,12 +66,13 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
static ssize_t evm_write_key(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int i, ret;
+ unsigned int i;
+ int ret;
if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
return -EPERM;
- ret = kstrtoint_from_user(buf, count, 0, &i);
+ ret = kstrtouint_from_user(buf, count, 0, &i);
if (ret)
return ret;
@@ -80,12 +81,12 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
if (!i || (i & ~EVM_INIT_MASK) != 0)
return -EINVAL;
- /* Don't allow a request to freshly enable metadata writes if
- * keys are loaded.
+ /*
+ * Don't allow a request to enable metadata writes if
+ * an HMAC key is loaded.
*/
if ((i & EVM_ALLOW_METADATA_WRITES) &&
- ((evm_initialized & EVM_KEY_MASK) != 0) &&
- !(evm_initialized & EVM_ALLOW_METADATA_WRITES))
+ (evm_initialized & EVM_INIT_HMAC) != 0)
return -EPERM;
if (i & EVM_INIT_HMAC) {
@@ -138,8 +139,12 @@ static ssize_t evm_read_xattrs(struct file *filp, char __user *buf,
if (rc)
return -ERESTARTSYS;
- list_for_each_entry(xattr, &evm_config_xattrnames, list)
+ list_for_each_entry(xattr, &evm_config_xattrnames, list) {
+ if (!xattr->enabled)
+ continue;
+
size += strlen(xattr->name) + 1;
+ }
temp = kmalloc(size + 1, GFP_KERNEL);
if (!temp) {
@@ -148,6 +153,9 @@ static ssize_t evm_read_xattrs(struct file *filp, char __user *buf,
}
list_for_each_entry(xattr, &evm_config_xattrnames, list) {
+ if (!xattr->enabled)
+ continue;
+
sprintf(temp + offset, "%s\n", xattr->name);
offset += strlen(xattr->name) + 1;
}
@@ -189,7 +197,7 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
ab = audit_log_start(audit_context(), GFP_KERNEL,
AUDIT_INTEGRITY_EVM_XATTR);
- if (!ab)
+ if (!ab && IS_ENABLED(CONFIG_AUDIT))
return -ENOMEM;
xattr = kmalloc(sizeof(struct xattr_list), GFP_KERNEL);
@@ -198,6 +206,7 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
goto out;
}
+ xattr->enabled = true;
xattr->name = memdup_user_nul(buf, count);
if (IS_ERR(xattr->name)) {
err = PTR_ERR(xattr->name);
@@ -244,6 +253,10 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
list_for_each_entry(tmp, &evm_config_xattrnames, list) {
if (strcmp(xattr->name, tmp->name) == 0) {
err = -EEXIST;
+ if (!tmp->enabled) {
+ tmp->enabled = true;
+ err = count;
+ }
mutex_unlock(&xattr_list_mutex);
goto out;
}
@@ -255,7 +268,7 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
audit_log_end(ab);
return count;
out:
- audit_log_format(ab, " res=%d", err);
+ audit_log_format(ab, " res=%d", (err < 0) ? err : 0);
audit_log_end(ab);
if (xattr) {
kfree(xattr->name);
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index fca8a9409e4a..8638976f7990 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -208,7 +208,9 @@ int integrity_kernel_read(struct file *file, loff_t offset,
void __init integrity_load_keys(void)
{
ima_load_x509();
- evm_load_x509();
+
+ if (!IS_ENABLED(CONFIG_IMA_LOAD_X509))
+ evm_load_x509();
}
static int __init integrity_fs_init(void)
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 12e9250c1bec..d0ceada99243 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -334,3 +334,10 @@ config IMA_SECURE_AND_OR_TRUSTED_BOOT
help
This option is selected by architectures to enable secure and/or
trusted boot based on IMA runtime policies.
+
+config IMA_DISABLE_HTABLE
+ bool "Disable htable to allow measurement of duplicate records"
+ depends on IMA
+ default n
+ help
+ This option disables htable to allow measurement of duplicate records.
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 4e5eb0236278..ef9dcfce45d4 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -242,12 +242,16 @@ static int xattr_verify(enum ima_hooks func, struct integrity_iint_cache *iint,
hash_start = 1;
fallthrough;
case IMA_XATTR_DIGEST:
- if (iint->flags & IMA_DIGSIG_REQUIRED) {
- *cause = "IMA-signature-required";
- *status = INTEGRITY_FAIL;
- break;
+ if (*status != INTEGRITY_PASS_IMMUTABLE) {
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ *cause = "IMA-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+ } else {
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
}
- clear_bit(IMA_DIGSIG, &iint->atomic_flags);
if (xattr_len - sizeof(xattr_value->type) - hash_start >=
iint->ima_hash->length)
/*
@@ -416,6 +420,10 @@ int ima_appraise_measurement(enum ima_hooks func,
case INTEGRITY_NOLABEL: /* No security.evm xattr. */
cause = "missing-HMAC";
goto out;
+ case INTEGRITY_FAIL_IMMUTABLE:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ cause = "invalid-fail-immutable";
+ goto out;
case INTEGRITY_FAIL: /* Invalid HMAC/signature. */
cause = "invalid-HMAC";
goto out;
@@ -459,9 +467,12 @@ out:
status = INTEGRITY_PASS;
}
- /* Permit new files with file signatures, but without data. */
+ /*
+ * Permit new files with file/EVM portable signatures, but
+ * without data.
+ */
if (inode->i_size == 0 && iint->flags & IMA_NEW_FILE &&
- xattr_value && xattr_value->type == EVM_IMA_XATTR_DIGSIG) {
+ test_bit(IMA_DIGSIG, &iint->atomic_flags)) {
status = INTEGRITY_PASS;
}
@@ -522,8 +533,6 @@ void ima_inode_post_setattr(struct user_namespace *mnt_userns,
return;
action = ima_must_appraise(mnt_userns, inode, MAY_ACCESS, POST_SETATTR);
- if (!action)
- __vfs_removexattr(&init_user_ns, dentry, XATTR_NAME_IMA);
iint = integrity_iint_find(inode);
if (iint) {
set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
@@ -570,6 +579,7 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
{
const struct evm_ima_xattr_data *xvalue = xattr_value;
+ int digsig = 0;
int result;
result = ima_protect_xattr(dentry, xattr_name, xattr_value,
@@ -577,9 +587,14 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
if (result == 1) {
if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
return -EINVAL;
- ima_reset_appraise_flags(d_backing_inode(dentry),
- xvalue->type == EVM_IMA_XATTR_DIGSIG);
- result = 0;
+ digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
+ } else if (!strcmp(xattr_name, XATTR_NAME_EVM) && xattr_value_len > 0) {
+ digsig = (xvalue->type == EVM_XATTR_PORTABLE_DIGSIG);
+ }
+ if (result == 1 || evm_revalidate_status(xattr_name)) {
+ ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
+ if (result == 1)
+ result = 0;
}
return result;
}
@@ -589,9 +604,10 @@ int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
int result;
result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
- if (result == 1) {
+ if (result == 1 || evm_revalidate_status(xattr_name)) {
ima_reset_appraise_flags(d_backing_inode(dentry), 0);
- result = 0;
+ if (result == 1)
+ result = 0;
}
return result;
}
diff --git a/security/integrity/ima/ima_asymmetric_keys.c b/security/integrity/ima/ima_asymmetric_keys.c
index 1fb0b0e09559..c985418698a4 100644
--- a/security/integrity/ima/ima_asymmetric_keys.c
+++ b/security/integrity/ima/ima_asymmetric_keys.c
@@ -11,6 +11,7 @@
#include <keys/asymmetric-type.h>
#include <linux/user_namespace.h>
+#include <linux/ima.h>
#include "ima.h"
/**
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index f6a7e9643b54..a7206cc1d7d1 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -598,8 +598,8 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
u8 *data_to_hash = field_data[i].data;
u32 datalen = field_data[i].len;
- u32 datalen_to_hash =
- !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
+ u32 datalen_to_hash = !ima_canonical_fmt ?
+ datalen : (__force u32)cpu_to_le32(datalen);
if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
rc = crypto_shash_update(shash,
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ea8ff8a07b36..3d8e9d5db5aa 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -147,7 +147,7 @@ int ima_measurements_show(struct seq_file *m, void *v)
* PCR used defaults to the same (config option) in
* little-endian format, unless set in policy
*/
- pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr);
+ pcr = !ima_canonical_fmt ? e->pcr : (__force u32)cpu_to_le32(e->pcr);
ima_putc(m, &pcr, sizeof(e->pcr));
/* 2nd: template digest */
@@ -155,7 +155,7 @@ int ima_measurements_show(struct seq_file *m, void *v)
/* 3rd: template name size */
namelen = !ima_canonical_fmt ? strlen(template_name) :
- cpu_to_le32(strlen(template_name));
+ (__force u32)cpu_to_le32(strlen(template_name));
ima_putc(m, &namelen, sizeof(namelen));
/* 4th: template name */
@@ -167,7 +167,7 @@ int ima_measurements_show(struct seq_file *m, void *v)
if (!is_ima_template) {
template_data_len = !ima_canonical_fmt ? e->template_data_len :
- cpu_to_le32(e->template_data_len);
+ (__force u32)cpu_to_le32(e->template_data_len);
ima_putc(m, &template_data_len, sizeof(e->template_data_len));
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 6e8742916d1d..5076a7d9d23e 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -108,6 +108,10 @@ void __init ima_load_x509(void)
ima_policy_flag &= ~unset_flags;
integrity_load_x509(INTEGRITY_KEYRING_IMA, CONFIG_IMA_X509_PATH);
+
+ /* load also EVM key to avoid appraisal */
+ evm_load_x509();
+
ima_policy_flag |= unset_flags;
}
#endif
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 667887665823..f799cc278a9a 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <linux/kexec.h>
#include <linux/of.h>
+#include <linux/ima.h>
#include "ima.h"
#ifdef CONFIG_IMA_KEXEC
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 906c1d8e0b71..287b90509006 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -433,7 +433,7 @@ int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot)
inode = file_inode(vma->vm_file);
action = ima_get_action(file_mnt_user_ns(vma->vm_file), inode,
current_cred(), secid, MAY_EXEC, MMAP_CHECK,
- &pcr, &template, 0);
+ &pcr, &template, NULL);
/* Is the mmap'ed file in policy? */
if (!(action & (IMA_MEASURE | IMA_APPRAISE_SUBMASK)))
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index c096ef8945c7..532da87ce519 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -168,7 +168,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
int result = 0, tpmresult = 0;
mutex_lock(&ima_extend_list_mutex);
- if (!violation) {
+ if (!violation && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) {
if (ima_lookup_digest_entry(digest, entry->pcr)) {
audit_cause = "hash_exists";
result = -EEXIST;
@@ -176,7 +176,8 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
}
- result = ima_add_digest_entry(entry, 1);
+ result = ima_add_digest_entry(entry,
+ !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE));
if (result < 0) {
audit_cause = "ENOMEM";
audit_info = 0;
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 4e081e650047..694560396be0 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -22,6 +22,8 @@ static struct ima_template_desc builtin_templates[] = {
{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
{.name = "ima-buf", .fmt = "d-ng|n-ng|buf"},
{.name = "ima-modsig", .fmt = "d-ng|n-ng|sig|d-modsig|modsig"},
+ {.name = "evm-sig",
+ .fmt = "d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode"},
{.name = "", .fmt = ""}, /* placeholder for a custom format */
};
@@ -45,6 +47,23 @@ static const struct ima_template_field supported_fields[] = {
.field_show = ima_show_template_digest_ng},
{.field_id = "modsig", .field_init = ima_eventmodsig_init,
.field_show = ima_show_template_sig},
+ {.field_id = "evmsig", .field_init = ima_eventevmsig_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "iuid", .field_init = ima_eventinodeuid_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "igid", .field_init = ima_eventinodegid_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "imode", .field_init = ima_eventinodemode_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "xattrnames",
+ .field_init = ima_eventinodexattrnames_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "xattrlengths",
+ .field_init = ima_eventinodexattrlengths_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "xattrvalues",
+ .field_init = ima_eventinodexattrvalues_init,
+ .field_show = ima_show_template_sig},
};
/*
@@ -52,7 +71,8 @@ static const struct ima_template_field supported_fields[] = {
* need to be accounted for since they shouldn't be defined in the same template
* description as 'd-ng' and 'n-ng' respectively.
*/
-#define MAX_TEMPLATE_NAME_LEN sizeof("d-ng|n-ng|sig|buf|d-modisg|modsig")
+#define MAX_TEMPLATE_NAME_LEN \
+ sizeof("d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode")
static struct ima_template_desc *ima_template;
static struct ima_template_desc *ima_buf_template;
@@ -403,9 +423,9 @@ int ima_restore_measurement_list(loff_t size, void *buf)
return 0;
if (ima_canonical_fmt) {
- khdr->version = le16_to_cpu(khdr->version);
- khdr->count = le64_to_cpu(khdr->count);
- khdr->buffer_size = le64_to_cpu(khdr->buffer_size);
+ khdr->version = le16_to_cpu((__force __le16)khdr->version);
+ khdr->count = le64_to_cpu((__force __le64)khdr->count);
+ khdr->buffer_size = le64_to_cpu((__force __le64)khdr->buffer_size);
}
if (khdr->version != 1) {
@@ -495,7 +515,7 @@ int ima_restore_measurement_list(loff_t size, void *buf)
}
entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
- le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
+ le32_to_cpu(*(__le32 *)(hdr[HDR_PCR].data));
ret = ima_restore_measurement_entry(entry);
if (ret < 0)
break;
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index c022ee9e2a4e..ca017cae73eb 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -10,6 +10,8 @@
*/
#include "ima_template_lib.h"
+#include <linux/xattr.h>
+#include <linux/evm.h>
static bool ima_template_hash_algo_allowed(u8 algo)
{
@@ -23,7 +25,8 @@ enum data_formats {
DATA_FMT_DIGEST = 0,
DATA_FMT_DIGEST_WITH_ALGO,
DATA_FMT_STRING,
- DATA_FMT_HEX
+ DATA_FMT_HEX,
+ DATA_FMT_UINT
};
static int ima_write_template_field_data(const void *data, const u32 datalen,
@@ -87,6 +90,36 @@ static void ima_show_template_data_ascii(struct seq_file *m,
case DATA_FMT_STRING:
seq_printf(m, "%s", buf_ptr);
break;
+ case DATA_FMT_UINT:
+ switch (field_data->len) {
+ case sizeof(u8):
+ seq_printf(m, "%u", *(u8 *)buf_ptr);
+ break;
+ case sizeof(u16):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%u",
+ le16_to_cpu(*(__le16 *)buf_ptr));
+ else
+ seq_printf(m, "%u", *(u16 *)buf_ptr);
+ break;
+ case sizeof(u32):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%u",
+ le32_to_cpu(*(__le32 *)buf_ptr));
+ else
+ seq_printf(m, "%u", *(u32 *)buf_ptr);
+ break;
+ case sizeof(u64):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%llu",
+ le64_to_cpu(*(__le64 *)buf_ptr));
+ else
+ seq_printf(m, "%llu", *(u64 *)buf_ptr);
+ break;
+ default:
+ break;
+ }
+ break;
default:
break;
}
@@ -101,7 +134,8 @@ static void ima_show_template_data_binary(struct seq_file *m,
strlen(field_data->data) : field_data->len;
if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) {
- u32 field_len = !ima_canonical_fmt ? len : cpu_to_le32(len);
+ u32 field_len = !ima_canonical_fmt ?
+ len : (__force u32)cpu_to_le32(len);
ima_putc(m, &field_len, sizeof(field_len));
}
@@ -162,6 +196,12 @@ void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
}
+void ima_show_template_uint(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_UINT, field_data);
+}
+
/**
* ima_parse_buf() - Parses lengths and data from an input buffer
* @bufstartp: Buffer start address.
@@ -188,9 +228,10 @@ int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
if (bufp > (bufendp - sizeof(u32)))
break;
- fields[i].len = *(u32 *)bufp;
if (ima_canonical_fmt)
- fields[i].len = le32_to_cpu(fields[i].len);
+ fields[i].len = le32_to_cpu(*(__le32 *)bufp);
+ else
+ fields[i].len = *(u32 *)bufp;
bufp += sizeof(u32);
}
@@ -438,7 +479,7 @@ int ima_eventsig_init(struct ima_event_data *event_data,
struct evm_ima_xattr_data *xattr_value = event_data->xattr_value;
if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG))
- return 0;
+ return ima_eventevmsig_init(event_data, field_data);
return ima_write_template_field_data(xattr_value, event_data->xattr_len,
DATA_FMT_HEX, field_data);
@@ -484,3 +525,163 @@ int ima_eventmodsig_init(struct ima_event_data *event_data,
return ima_write_template_field_data(data, data_len, DATA_FMT_HEX,
field_data);
}
+
+/*
+ * ima_eventevmsig_init - include the EVM portable signature as part of the
+ * template data
+ */
+int ima_eventevmsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct evm_ima_xattr_data *xattr_data = NULL;
+ int rc = 0;
+
+ if (!event_data->file)
+ return 0;
+
+ rc = vfs_getxattr_alloc(&init_user_ns, file_dentry(event_data->file),
+ XATTR_NAME_EVM, (char **)&xattr_data, 0,
+ GFP_NOFS);
+ if (rc <= 0)
+ return 0;
+
+ if (xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG) {
+ kfree(xattr_data);
+ return 0;
+ }
+
+ rc = ima_write_template_field_data((char *)xattr_data, rc, DATA_FMT_HEX,
+ field_data);
+ kfree(xattr_data);
+ return rc;
+}
+
+static int ima_eventinodedac_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ bool get_uid)
+{
+ unsigned int id;
+
+ if (!event_data->file)
+ return 0;
+
+ if (get_uid)
+ id = i_uid_read(file_inode(event_data->file));
+ else
+ id = i_gid_read(file_inode(event_data->file));
+
+ if (ima_canonical_fmt) {
+ if (sizeof(id) == sizeof(u16))
+ id = (__force u16)cpu_to_le16(id);
+ else
+ id = (__force u32)cpu_to_le32(id);
+ }
+
+ return ima_write_template_field_data((void *)&id, sizeof(id),
+ DATA_FMT_UINT, field_data);
+}
+
+/*
+ * ima_eventinodeuid_init - include the inode UID as part of the template
+ * data
+ */
+int ima_eventinodeuid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodedac_init_common(event_data, field_data, true);
+}
+
+/*
+ * ima_eventinodegid_init - include the inode GID as part of the template
+ * data
+ */
+int ima_eventinodegid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodedac_init_common(event_data, field_data, false);
+}
+
+/*
+ * ima_eventinodemode_init - include the inode mode as part of the template
+ * data
+ */
+int ima_eventinodemode_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct inode *inode;
+ u16 mode;
+
+ if (!event_data->file)
+ return 0;
+
+ inode = file_inode(event_data->file);
+ mode = inode->i_mode;
+ if (ima_canonical_fmt)
+ mode = (__force u16)cpu_to_le16(mode);
+
+ return ima_write_template_field_data((char *)&mode, sizeof(mode),
+ DATA_FMT_UINT, field_data);
+}
+
+static int ima_eventinodexattrs_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ char type)
+{
+ u8 *buffer = NULL;
+ int rc;
+
+ if (!event_data->file)
+ return 0;
+
+ rc = evm_read_protected_xattrs(file_dentry(event_data->file), NULL, 0,
+ type, ima_canonical_fmt);
+ if (rc < 0)
+ return 0;
+
+ buffer = kmalloc(rc, GFP_KERNEL);
+ if (!buffer)
+ return 0;
+
+ rc = evm_read_protected_xattrs(file_dentry(event_data->file), buffer,
+ rc, type, ima_canonical_fmt);
+ if (rc < 0) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = ima_write_template_field_data((char *)buffer, rc, DATA_FMT_HEX,
+ field_data);
+out:
+ kfree(buffer);
+ return rc;
+}
+
+/*
+ * ima_eventinodexattrnames_init - include a list of xattr names as part of the
+ * template data
+ */
+int ima_eventinodexattrnames_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'n');
+}
+
+/*
+ * ima_eventinodexattrlengths_init - include a list of xattr lengths as part of
+ * the template data
+ */
+int ima_eventinodexattrlengths_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'l');
+}
+
+/*
+ * ima_eventinodexattrvalues_init - include a list of xattr values as part of
+ * the template data
+ */
+int ima_eventinodexattrvalues_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'v');
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
index 6b3b880637a0..c71f1de95753 100644
--- a/security/integrity/ima/ima_template_lib.h
+++ b/security/integrity/ima/ima_template_lib.h
@@ -27,6 +27,8 @@ void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
struct ima_field_data *field_data);
void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
struct ima_field_data *field_data);
+void ima_show_template_uint(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
int maxfields, struct ima_field_data *fields, int *curfields,
unsigned long *len_mask, int enforce_mask, char *bufname);
@@ -46,4 +48,18 @@ int ima_eventbuf_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
int ima_eventmodsig_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
+int ima_eventevmsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodeuid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodegid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodemode_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrnames_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrlengths_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrvalues_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 82ce14933513..5a5016ef43b0 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -119,7 +119,6 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
return -EINVAL;
ad->u.net->v6info.saddr = ip6->saddr;
ad->u.net->v6info.daddr = ip6->daddr;
- ret = 0;
/* IPv6 can have several extension header before the Transport header
* skip them */
offset = skb_network_offset(skb);
diff --git a/security/safesetid/lsm.c b/security/safesetid/lsm.c
index 1079c6d54784..963f4ad9cb66 100644
--- a/security/safesetid/lsm.c
+++ b/security/safesetid/lsm.c
@@ -22,7 +22,7 @@
#include "lsm.h"
/* Flag indicating whether initialization completed */
-int safesetid_initialized;
+int safesetid_initialized __initdata;
struct setid_ruleset __rcu *safesetid_setuid_rules;
struct setid_ruleset __rcu *safesetid_setgid_rules;
diff --git a/security/safesetid/lsm.h b/security/safesetid/lsm.h
index bde8c43a3767..d346f4849cea 100644
--- a/security/safesetid/lsm.h
+++ b/security/safesetid/lsm.h
@@ -19,7 +19,7 @@
#include <linux/hashtable.h>
/* Flag indicating whether initialization completed */
-extern int safesetid_initialized;
+extern int safesetid_initialized __initdata;
enum sid_policy_type {
SIDPOL_DEFAULT, /* source ID is unaffected by policy */
diff --git a/security/security.c b/security/security.c
index b38155b2de83..09533cbb7221 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1354,7 +1354,7 @@ int security_inode_setxattr(struct user_namespace *mnt_userns,
ret = ima_inode_setxattr(dentry, name, value, size);
if (ret)
return ret;
- return evm_inode_setxattr(dentry, name, value, size);
+ return evm_inode_setxattr(mnt_userns, dentry, name, value, size);
}
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -1399,7 +1399,7 @@ int security_inode_removexattr(struct user_namespace *mnt_userns,
ret = ima_inode_removexattr(dentry, name);
if (ret)
return ret;
- return evm_inode_removexattr(dentry, name);
+ return evm_inode_removexattr(mnt_userns, dentry, name);
}
int security_inode_need_killpriv(struct dentry *dentry)
@@ -2466,9 +2466,9 @@ void security_xfrm_state_free(struct xfrm_state *x)
call_void_hook(xfrm_state_free_security, x);
}
-int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
- return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid, dir);
+ return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid);
}
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index ad451cf9375e..97f4c944a20f 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -297,26 +297,27 @@ static struct avc_xperms_decision_node
struct avc_xperms_decision_node *xpd_node;
struct extended_perms_decision *xpd;
- xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
+ xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd_node)
return NULL;
xpd = &xpd_node->xpd;
if (which & XPERMS_ALLOWED) {
xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->allowed)
goto error;
}
if (which & XPERMS_AUDITALLOW) {
xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->auditallow)
goto error;
}
if (which & XPERMS_DONTAUDIT) {
xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->dontaudit)
goto error;
}
@@ -344,7 +345,7 @@ static struct avc_xperms_node *avc_xperms_alloc(void)
{
struct avc_xperms_node *xp_node;
- xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
+ xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT | __GFP_NOWARN);
if (!xp_node)
return xp_node;
INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -500,7 +501,7 @@ static struct avc_node *avc_alloc_node(struct selinux_avc *avc)
{
struct avc_node *node;
- node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
if (!node)
goto out;
@@ -758,7 +759,11 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
}
}
-/* This is the slow part of avc audit with big stack footprint */
+/*
+ * This is the slow part of avc audit with big stack footprint.
+ * Note that it is non-blocking and can be called from under
+ * rcu_read_lock().
+ */
noinline int slow_avc_audit(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass,
u32 requested, u32 audited, u32 denied, int result,
@@ -819,13 +824,13 @@ out:
}
/**
- * avc_update_node Update an AVC entry
+ * avc_update_node - Update an AVC entry
* @event : Updating event
* @perms : Permission mask bits
* @ssid,@tsid,@tclass : identifier of an AVC entry
* @seqno : sequence number when decision was made
* @xpd: extended_perms_decision to be added to the node
- * @flags: the AVC_* flags, e.g. AVC_NONBLOCKING, AVC_EXTENDED_PERMS, or 0.
+ * @flags: the AVC_* flags, e.g. AVC_EXTENDED_PERMS, or 0.
*
* if a valid AVC entry doesn't exist,this function returns -ENOENT.
* if kmalloc() called internal returns NULL, this function returns -ENOMEM.
@@ -844,21 +849,6 @@ static int avc_update_node(struct selinux_avc *avc,
struct hlist_head *head;
spinlock_t *lock;
- /*
- * If we are in a non-blocking code path, e.g. VFS RCU walk,
- * then we must not add permissions to a cache entry
- * because we will not audit the denial. Otherwise,
- * during the subsequent blocking retry (e.g. VFS ref walk), we
- * will find the permissions already granted in the cache entry
- * and won't audit anything at all, leading to silent denials in
- * permissive mode that only appear when in enforcing mode.
- *
- * See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
- * and selinux_inode_permission().
- */
- if (flags & AVC_NONBLOCKING)
- return 0;
-
node = avc_alloc_node(avc);
if (!node) {
rc = -ENOMEM;
@@ -1119,7 +1109,7 @@ decision:
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
- * @flags: AVC_STRICT, AVC_NONBLOCKING, or 0
+ * @flags: AVC_STRICT or 0
* @avd: access vector decisions
*
* Check the AVC to determine whether the @requested permissions are granted
@@ -1190,26 +1180,7 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
&avd);
rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
- auditdata, 0);
- if (rc2)
- return rc2;
- return rc;
-}
-
-int avc_has_perm_flags(struct selinux_state *state,
- u32 ssid, u32 tsid, u16 tclass, u32 requested,
- struct common_audit_data *auditdata,
- int flags)
-{
- struct av_decision avd;
- int rc, rc2;
-
- rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
- (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
- &avd);
-
- rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
- auditdata, flags);
+ auditdata);
if (rc2)
return rc2;
return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index eaea837d89d1..b0032c42333e 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1676,7 +1676,7 @@ static int cred_has_capability(const struct cred *cred,
sid, sid, sclass, av, 0, &avd);
if (!(opts & CAP_OPT_NOAUDIT)) {
int rc2 = avc_audit(&selinux_state,
- sid, sid, sclass, av, &avd, rc, &ad, 0);
+ sid, sid, sclass, av, &avd, rc, &ad);
if (rc2)
return rc2;
}
@@ -3153,9 +3153,8 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
if (IS_ERR(isec))
return PTR_ERR(isec);
- return avc_has_perm_flags(&selinux_state,
- sid, isec->sid, isec->sclass, FILE__READ, &ad,
- rcu ? MAY_NOT_BLOCK : 0);
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, FILE__READ, &ad);
}
static noinline int audit_inode_permission(struct inode *inode,
@@ -3164,17 +3163,13 @@ static noinline int audit_inode_permission(struct inode *inode,
{
struct common_audit_data ad;
struct inode_security_struct *isec = selinux_inode(inode);
- int rc;
ad.type = LSM_AUDIT_DATA_INODE;
ad.u.inode = inode;
- rc = slow_avc_audit(&selinux_state,
+ return slow_avc_audit(&selinux_state,
current_sid(), isec->sid, isec->sclass, perms,
audited, denied, result, &ad);
- if (rc)
- return rc;
- return 0;
}
static int selinux_inode_permission(struct inode *inode, int mask)
@@ -3209,8 +3204,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
return PTR_ERR(isec);
rc = avc_has_perm_noaudit(&selinux_state,
- sid, isec->sid, isec->sclass, perms,
- no_block ? AVC_NONBLOCKING : 0,
+ sid, isec->sid, isec->sclass, perms, 0,
&avd);
audited = avc_audit_required(perms, &avd, rc,
from_access ? FILE__AUDIT_ACCESS : 0,
@@ -3218,10 +3212,6 @@ static int selinux_inode_permission(struct inode *inode, int mask)
if (likely(!audited))
return rc;
- /* fall back to ref-walk if we have to generate audit */
- if (no_block)
- return -ECHILD;
-
rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
if (rc2)
return rc2;
@@ -6850,7 +6840,7 @@ static int selinux_ib_endport_manage_subnet(void *ib_sec, const char *dev_name,
return err;
ad.type = LSM_AUDIT_DATA_IBENDPORT;
- strncpy(ibendport.dev_name, dev_name, sizeof(ibendport.dev_name));
+ ibendport.dev_name = dev_name;
ibendport.port = port_num;
ad.u.ibendport = &ibendport;
return avc_has_perm(&selinux_state,
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index cf4cc3ef959b..00f78be48283 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -111,7 +111,6 @@ int slow_avc_audit(struct selinux_state *state,
* @avd: access vector decisions
* @result: result from avc_has_perm_noaudit
* @a: auxiliary audit data
- * @flags: VFS walk flags
*
* Audit the granting or denial of permissions in accordance
* with the policy. This function is typically called by
@@ -127,16 +126,12 @@ static inline int avc_audit(struct selinux_state *state,
u16 tclass, u32 requested,
struct av_decision *avd,
int result,
- struct common_audit_data *a,
- int flags)
+ struct common_audit_data *a)
{
u32 audited, denied;
audited = avc_audit_required(requested, avd, result, 0, &denied);
if (likely(!audited))
return 0;
- /* fall back to ref-walk if we have to generate audit */
- if (flags & MAY_NOT_BLOCK)
- return -ECHILD;
return slow_avc_audit(state, ssid, tsid, tclass,
requested, audited, denied, result,
a);
@@ -144,7 +139,6 @@ static inline int avc_audit(struct selinux_state *state,
#define AVC_STRICT 1 /* Ignore permissive mode. */
#define AVC_EXTENDED_PERMS 2 /* update extended permissions */
-#define AVC_NONBLOCKING 4 /* non blocking */
int avc_has_perm_noaudit(struct selinux_state *state,
u32 ssid, u32 tsid,
u16 tclass, u32 requested,
@@ -155,11 +149,6 @@ int avc_has_perm(struct selinux_state *state,
u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct common_audit_data *auditdata);
-int avc_has_perm_flags(struct selinux_state *state,
- u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct common_audit_data *auditdata,
- int flags);
int avc_has_extended_perms(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass, u32 requested,
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 0a6f34a7a971..74159400eeee 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -23,7 +23,7 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid);
void selinux_xfrm_state_free(struct xfrm_state *x);
int selinux_xfrm_state_delete(struct xfrm_state *x);
-int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid);
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi_common *flic);
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 75df32906055..c97695ae508f 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -29,7 +29,7 @@ static struct kmem_cache *avtab_xperms_cachep __ro_after_init;
/* Based on MurmurHash3, written by Austin Appleby and placed in the
* public domain.
*/
-static inline int avtab_hash(struct avtab_key *keyp, u32 mask)
+static inline int avtab_hash(const struct avtab_key *keyp, u32 mask)
{
static const u32 c1 = 0xcc9e2d51;
static const u32 c2 = 0x1b873593;
@@ -68,7 +68,7 @@ static inline int avtab_hash(struct avtab_key *keyp, u32 mask)
static struct avtab_node*
avtab_insert_node(struct avtab *h, int hvalue,
struct avtab_node *prev, struct avtab_node *cur,
- struct avtab_key *key, struct avtab_datum *datum)
+ const struct avtab_key *key, const struct avtab_datum *datum)
{
struct avtab_node *newnode;
struct avtab_extended_perms *xperms;
@@ -103,7 +103,8 @@ avtab_insert_node(struct avtab *h, int hvalue,
return newnode;
}
-static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum)
+static int avtab_insert(struct avtab *h, const struct avtab_key *key,
+ const struct avtab_datum *datum)
{
int hvalue;
struct avtab_node *prev, *cur, *newnode;
@@ -147,8 +148,9 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
* key/specified mask into the table, as needed by the conditional avtab.
* It also returns a pointer to the node inserted.
*/
-struct avtab_node *
-avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum)
+struct avtab_node *avtab_insert_nonunique(struct avtab *h,
+ const struct avtab_key *key,
+ const struct avtab_datum *datum)
{
int hvalue;
struct avtab_node *prev, *cur;
@@ -178,7 +180,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
return avtab_insert_node(h, hvalue, prev, cur, key, datum);
}
-struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
+struct avtab_datum *avtab_search(struct avtab *h, const struct avtab_key *key)
{
int hvalue;
struct avtab_node *cur;
@@ -213,8 +215,8 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
/* This search function returns a node pointer, and can be used in
* conjunction with avtab_search_next_node()
*/
-struct avtab_node*
-avtab_search_node(struct avtab *h, struct avtab_key *key)
+struct avtab_node *avtab_search_node(struct avtab *h,
+ const struct avtab_key *key)
{
int hvalue;
struct avtab_node *cur;
@@ -396,8 +398,8 @@ static uint16_t spec_order[] = {
};
int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
- int (*insertf)(struct avtab *a, struct avtab_key *k,
- struct avtab_datum *d, void *p),
+ int (*insertf)(struct avtab *a, const struct avtab_key *k,
+ const struct avtab_datum *d, void *p),
void *p)
{
__le16 buf16[4];
@@ -557,8 +559,8 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
return insertf(a, &key, &datum, p);
}
-static int avtab_insertf(struct avtab *a, struct avtab_key *k,
- struct avtab_datum *d, void *p)
+static int avtab_insertf(struct avtab *a, const struct avtab_key *k,
+ const struct avtab_datum *d, void *p)
{
return avtab_insert(a, k, d);
}
@@ -607,7 +609,7 @@ bad:
goto out;
}
-int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
+int avtab_write_item(struct policydb *p, const struct avtab_node *cur, void *fp)
{
__le16 buf16[4];
__le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)];
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index f2eeb36265d1..d3ebea8d146f 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -90,24 +90,26 @@ struct avtab {
void avtab_init(struct avtab *h);
int avtab_alloc(struct avtab *, u32);
int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
-struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
+struct avtab_datum *avtab_search(struct avtab *h, const struct avtab_key *k);
void avtab_destroy(struct avtab *h);
void avtab_hash_eval(struct avtab *h, char *tag);
struct policydb;
int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
- int (*insert)(struct avtab *a, struct avtab_key *k,
- struct avtab_datum *d, void *p),
+ int (*insert)(struct avtab *a, const struct avtab_key *k,
+ const struct avtab_datum *d, void *p),
void *p);
int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
-int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
+int avtab_write_item(struct policydb *p, const struct avtab_node *cur, void *fp);
int avtab_write(struct policydb *p, struct avtab *a, void *fp);
-struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key,
- struct avtab_datum *datum);
+struct avtab_node *avtab_insert_nonunique(struct avtab *h,
+ const struct avtab_key *key,
+ const struct avtab_datum *datum);
-struct avtab_node *avtab_search_node(struct avtab *h, struct avtab_key *key);
+struct avtab_node *avtab_search_node(struct avtab *h,
+ const struct avtab_key *key);
struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified);
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 1ef74c085f2b..2ec6e5cd25d9 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -254,7 +254,8 @@ struct cond_insertf_data {
struct cond_av_list *other;
};
-static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum *d, void *ptr)
+static int cond_insertf(struct avtab *a, const struct avtab_key *k,
+ const struct avtab_datum *d, void *ptr)
{
struct cond_insertf_data *data = ptr;
struct policydb *p = data->p;
@@ -628,7 +629,8 @@ static int cond_dup_av_list(struct cond_av_list *new,
static int duplicate_policydb_cond_list(struct policydb *newp,
struct policydb *origp)
{
- int rc, i, j;
+ int rc;
+ u32 i;
rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab);
if (rc)
@@ -648,12 +650,12 @@ static int duplicate_policydb_cond_list(struct policydb *newp,
newp->cond_list_len++;
newn->cur_state = orign->cur_state;
- newn->expr.nodes = kcalloc(orign->expr.len,
- sizeof(*newn->expr.nodes), GFP_KERNEL);
+ newn->expr.nodes = kmemdup(orign->expr.nodes,
+ orign->expr.len * sizeof(*orign->expr.nodes),
+ GFP_KERNEL);
if (!newn->expr.nodes)
goto error;
- for (j = 0; j < orign->expr.len; j++)
- newn->expr.nodes[j] = orign->expr.nodes[j];
+
newn->expr.len = orign->expr.len;
rc = cond_dup_av_list(&newn->true_list, &orign->true_list,
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 9fccf417006b..defc5ef35c66 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -2589,7 +2589,6 @@ int policydb_read(struct policydb *p, void *fp)
if (rc)
goto bad;
- rc = -EINVAL;
rtk->role = le32_to_cpu(buf[0]);
rtk->type = le32_to_cpu(buf[1]);
rtd->new_role = le32_to_cpu(buf[2]);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 0a5ce001609b..d84c77f370dc 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -859,6 +859,7 @@ int security_validate_transition(struct selinux_state *state,
* It returns 0, if @newsid is bounded by @oldsid.
* Otherwise, it returns error code.
*
+ * @state: SELinux state
* @oldsid : current security identifier
* @newsid : destinated security identifier
*/
@@ -1098,6 +1099,7 @@ allow:
/**
* security_compute_av - Compute access vector decisions.
+ * @state: SELinux state
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
@@ -1386,6 +1388,7 @@ out_unlock:
/**
* security_sid_to_context - Obtain a context for a given SID.
+ * @state: SELinux state
* @sid: security identifier, SID
* @scontext: security context
* @scontext_len: length in bytes
@@ -1411,6 +1414,7 @@ int security_sid_to_context_force(struct selinux_state *state, u32 sid,
/**
* security_sid_to_context_inval - Obtain a context for a given SID if it
* is invalid.
+ * @state: SELinux state
* @sid: security identifier, SID
* @scontext: security context
* @scontext_len: length in bytes
@@ -1587,6 +1591,7 @@ out:
/**
* security_context_to_sid - Obtain a SID for a given security context.
+ * @state: SELinux state
* @scontext: security context
* @scontext_len: length in bytes
* @sid: security identifier, SID
@@ -1616,6 +1621,7 @@ int security_context_str_to_sid(struct selinux_state *state,
* security_context_to_sid_default - Obtain a SID for a given security context,
* falling back to specified default if needed.
*
+ * @state: SELinux state
* @scontext: security context
* @scontext_len: length in bytes
* @sid: security identifier, SID
@@ -1907,6 +1913,7 @@ out:
/**
* security_transition_sid - Compute the SID for a new subject/object.
+ * @state: SELinux state
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
@@ -1962,6 +1969,7 @@ int security_member_sid(struct selinux_state *state,
/**
* security_change_sid - Compute the SID for object relabeling.
+ * @state: SELinux state
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
@@ -2060,7 +2068,6 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
context_init(newc);
/* Convert the user. */
- rc = -EINVAL;
usrdatum = symtab_search(&args->newp->p_users,
sym_name(args->oldp,
SYM_USERS, oldc->user - 1));
@@ -2069,7 +2076,6 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
newc->user = usrdatum->value;
/* Convert the role. */
- rc = -EINVAL;
role = symtab_search(&args->newp->p_roles,
sym_name(args->oldp, SYM_ROLES, oldc->role - 1));
if (!role)
@@ -2077,7 +2083,6 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
newc->role = role->value;
/* Convert the type. */
- rc = -EINVAL;
typdatum = symtab_search(&args->newp->p_types,
sym_name(args->oldp,
SYM_TYPES, oldc->type - 1));
@@ -2101,7 +2106,6 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
oc = args->newp->ocontexts[OCON_ISID];
while (oc && oc->sid[0] != SECINITSID_UNLABELED)
oc = oc->next;
- rc = -EINVAL;
if (!oc) {
pr_err("SELinux: unable to look up"
" the initial SIDs list\n");
@@ -2264,6 +2268,7 @@ void selinux_policy_commit(struct selinux_state *state,
/**
* security_load_policy - Load a security policy configuration.
+ * @state: SELinux state
* @data: binary policy data
* @len: length of data in bytes
*
@@ -2371,6 +2376,7 @@ err_policy:
/**
* security_port_sid - Obtain the SID for a port.
+ * @state: SELinux state
* @protocol: protocol number
* @port: port number
* @out_sid: security identifier
@@ -2427,7 +2433,8 @@ out:
}
/**
- * security_pkey_sid - Obtain the SID for a pkey.
+ * security_ib_pkey_sid - Obtain the SID for a pkey.
+ * @state: SELinux state
* @subnet_prefix: Subnet Prefix
* @pkey_num: pkey number
* @out_sid: security identifier
@@ -2486,6 +2493,7 @@ out:
/**
* security_ib_endport_sid - Obtain the SID for a subnet management interface.
+ * @state: SELinux state
* @dev_name: device name
* @port: port number
* @out_sid: security identifier
@@ -2544,6 +2552,7 @@ out:
/**
* security_netif_sid - Obtain the SID for a network interface.
+ * @state: SELinux state
* @name: interface name
* @if_sid: interface SID
*/
@@ -2618,6 +2627,7 @@ static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask)
/**
* security_node_sid - Obtain the SID for a node (host).
+ * @state: SELinux state
* @domain: communication domain aka address family
* @addrp: address
* @addrlen: address length in bytes
@@ -2711,6 +2721,7 @@ out:
/**
* security_get_user_sids - Obtain reachable SIDs for a user.
+ * @state: SELinux state
* @fromsid: starting SID
* @username: username
* @sids: array of reachable SIDs for user
@@ -2903,6 +2914,7 @@ out:
/**
* security_genfs_sid - Obtain a SID for a file in a filesystem
+ * @state: SELinux state
* @fstype: filesystem type
* @path: path from root of mount
* @sclass: file security class
@@ -2947,6 +2959,7 @@ int selinux_policy_genfs_sid(struct selinux_policy *policy,
/**
* security_fs_use - Determine how to handle labeling for a filesystem.
+ * @state: SELinux state
* @sb: superblock in question
*/
int security_fs_use(struct selinux_state *state, struct super_block *sb)
@@ -3286,6 +3299,7 @@ out_unlock:
/**
* security_net_peersid_resolve - Compare and resolve two network peer SIDs
+ * @state: SELinux state
* @nlbl_sid: NetLabel SID
* @nlbl_type: NetLabel labeling protocol type
* @xfrm_sid: XFRM SID
@@ -3510,6 +3524,7 @@ int security_get_allow_unknown(struct selinux_state *state)
/**
* security_policycap_supported - Check for a specific policy capability
+ * @state: SELinux state
* @req_cap: capability
*
* Description:
@@ -3844,6 +3859,7 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
/**
* security_netlbl_secattr_to_sid - Convert a NetLabel secattr to a SELinux SID
+ * @state: SELinux state
* @secattr: the NetLabel packet security attributes
* @sid: the SELinux SID
*
@@ -3926,6 +3942,7 @@ out:
/**
* security_netlbl_sid_to_secattr - Convert a SELinux SID to a NetLabel secattr
+ * @state: SELinux state
* @sid: the SELinux SID
* @secattr: the NetLabel packet security attributes
*
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 634f3db24da6..be83e5ce4469 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -150,7 +150,7 @@ static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
* LSM hook implementation that authorizes that a flow can use a xfrm policy
* rule.
*/
-int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
int rc;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 7eabb448acab..1f391f6a3d47 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -332,7 +332,7 @@ static void smack_log_callback(struct audit_buffer *ab, void *a)
* @object_label : smack label of the object being accessed
* @request: requested permissions
* @result: result from smk_access
- * @a: auxiliary audit data
+ * @ad: auxiliary audit data
*
* Audit the granting or denial of permissions in accordance
* with the policy.
@@ -396,6 +396,7 @@ struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
/**
* smk_insert_entry - insert a smack label into a hash map,
+ * @skp: smack label
*
* this function must be called under smack_known_lock
*/
@@ -476,8 +477,10 @@ char *smk_parse_smack(const char *string, int len)
/**
* smk_netlbl_mls - convert a catset to netlabel mls categories
+ * @level: MLS sensitivity level
* @catset: the Smack categories
* @sap: where to put the netlabel categories
+ * @len: number of bytes for the levels in a CIPSO IP option
*
* Allocates and fills attr.mls
* Returns 0 on success, error code on failure.
@@ -688,10 +691,9 @@ bool smack_privileged_cred(int cap, const struct cred *cred)
bool smack_privileged(int cap)
{
/*
- * Kernel threads may not have credentials we can use.
- * The io_uring kernel threads do have reliable credentials.
+ * All kernel tasks are privileged
*/
- if ((current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD)
+ if (unlikely(current->flags & PF_KTHREAD))
return true;
return smack_privileged_cred(cap, current_cred());
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 22ded2c26089..3a75d2a8f517 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -380,7 +380,7 @@ static int smk_parse_rule(const char *data, struct smack_parsed_rule *rule,
* @data: string to be parsed, null terminated
* @rule: Will be filled with Smack parsed rule
* @import: if non-zero, import labels
- * @tokens: numer of substrings expected in data
+ * @tokens: number of substrings expected in data
*
* Returns number of processed bytes on success, -ERRNO on failure.
*/
@@ -855,6 +855,8 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
if (format == SMK_FIXED24_FMT &&
(count < SMK_CIPSOMIN || count > SMK_CIPSOMAX))
return -EINVAL;
+ if (count > PAGE_SIZE)
+ return -EINVAL;
data = memdup_user_nul(buf, count);
if (IS_ERR(data))
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index b51bad121c11..d79bf07e16be 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -320,6 +320,7 @@ static unsigned int tomoyo_log_count;
* @ns: Pointer to "struct tomoyo_policy_namespace".
* @profile: Profile number.
* @index: Index number of functionality.
+ * @matched_acl: Pointer to "struct tomoyo_acl_info".
* @is_granted: True if granted log, false otherwise.
*
* Returns true if this request should be audited, false otherwise.
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index 065f4941c4d8..a2705798476f 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -139,6 +139,7 @@ static int tomoyo_open(struct inode *inode, struct file *file)
/**
* tomoyo_release - close() for /sys/kernel/security/tomoyo/ interface.
*
+ * @inode: Pointer to "struct inode".
* @file: Pointer to "struct file".
*
*/
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 1f3cd432d830..b6a31901f289 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -63,7 +63,7 @@ static void tomoyo_bprm_committed_creds(struct linux_binprm *bprm)
#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
/**
- * tomoyo_bprm_for_exec - Target for security_bprm_creds_for_exec().
+ * tomoyo_bprm_creds_for_exec - Target for security_bprm_creds_for_exec().
*
* @bprm: Pointer to "struct linux_binprm".
*
@@ -113,8 +113,7 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
/**
* tomoyo_inode_getattr - Target for security_inode_getattr().
*
- * @mnt: Pointer to "struct vfsmount".
- * @dentry: Pointer to "struct dentry".
+ * @path: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
@@ -300,8 +299,7 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
/**
* tomoyo_file_open - Target for security_file_open().
*
- * @f: Pointer to "struct file".
- * @cred: Pointer to "struct cred".
+ * @f: Pointer to "struct file".
*
* Returns 0 on success, negative value otherwise.
*/
@@ -487,8 +485,8 @@ struct lsm_blob_sizes tomoyo_blob_sizes __lsm_ro_after_init = {
/**
* tomoyo_task_alloc - Target for security_task_alloc().
*
- * @task: Pointer to "struct task_struct".
- * @flags: clone() flags.
+ * @task: Pointer to "struct task_struct".
+ * @clone_flags: clone() flags.
*
* Returns 0.
*/
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index e89cac913583..1da2e3722b12 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -83,8 +83,8 @@ const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
/**
* tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss.
*
- * @time: Seconds since 1970/01/01 00:00:00.
- * @stamp: Pointer to "struct tomoyo_time".
+ * @time64: Seconds since 1970/01/01 00:00:00.
+ * @stamp: Pointer to "struct tomoyo_time".
*
* Returns nothing.
*/
diff --git a/sound/core/control_led.c b/sound/core/control_led.c
index 25f57c14f294..a90e31dbde61 100644
--- a/sound/core/control_led.c
+++ b/sound/core/control_led.c
@@ -17,6 +17,9 @@ MODULE_LICENSE("GPL");
#define MAX_LED (((SNDRV_CTL_ELEM_ACCESS_MIC_LED - SNDRV_CTL_ELEM_ACCESS_SPK_LED) \
>> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) + 1)
+#define to_led_card_dev(_dev) \
+ container_of(_dev, struct snd_ctl_led_card, dev)
+
enum snd_ctl_led_mode {
MODE_FOLLOW_MUTE = 0,
MODE_FOLLOW_ROUTE,
@@ -371,6 +374,21 @@ static void snd_ctl_led_disconnect(struct snd_card *card)
snd_ctl_led_refresh();
}
+static void snd_ctl_led_card_release(struct device *dev)
+{
+ struct snd_ctl_led_card *led_card = to_led_card_dev(dev);
+
+ kfree(led_card);
+}
+
+static void snd_ctl_led_release(struct device *dev)
+{
+}
+
+static void snd_ctl_led_dev_release(struct device *dev)
+{
+}
+
/*
* sysfs
*/
@@ -663,6 +681,7 @@ static void snd_ctl_led_sysfs_add(struct snd_card *card)
led_card->number = card->number;
led_card->led = led;
device_initialize(&led_card->dev);
+ led_card->dev.release = snd_ctl_led_card_release;
if (dev_set_name(&led_card->dev, "card%d", card->number) < 0)
goto cerr;
led_card->dev.parent = &led->dev;
@@ -681,7 +700,6 @@ cerr:
put_device(&led_card->dev);
cerr2:
printk(KERN_ERR "snd_ctl_led: unable to add card%d", card->number);
- kfree(led_card);
}
}
@@ -700,8 +718,7 @@ static void snd_ctl_led_sysfs_remove(struct snd_card *card)
snprintf(link_name, sizeof(link_name), "led-%s", led->name);
sysfs_remove_link(&card->ctl_dev.kobj, link_name);
sysfs_remove_link(&led_card->dev.kobj, "card");
- device_del(&led_card->dev);
- kfree(led_card);
+ device_unregister(&led_card->dev);
led->cards[card->number] = NULL;
}
}
@@ -723,6 +740,7 @@ static int __init snd_ctl_led_init(void)
device_initialize(&snd_ctl_led_dev);
snd_ctl_led_dev.class = sound_class;
+ snd_ctl_led_dev.release = snd_ctl_led_dev_release;
dev_set_name(&snd_ctl_led_dev, "ctl-led");
if (device_add(&snd_ctl_led_dev)) {
put_device(&snd_ctl_led_dev);
@@ -733,15 +751,16 @@ static int __init snd_ctl_led_init(void)
INIT_LIST_HEAD(&led->controls);
device_initialize(&led->dev);
led->dev.parent = &snd_ctl_led_dev;
+ led->dev.release = snd_ctl_led_release;
led->dev.groups = snd_ctl_led_dev_attr_groups;
dev_set_name(&led->dev, led->name);
if (device_add(&led->dev)) {
put_device(&led->dev);
for (; group > 0; group--) {
led = &snd_ctl_leds[group - 1];
- device_del(&led->dev);
+ device_unregister(&led->dev);
}
- device_del(&snd_ctl_led_dev);
+ device_unregister(&snd_ctl_led_dev);
return -ENOMEM;
}
}
@@ -767,9 +786,9 @@ static void __exit snd_ctl_led_exit(void)
}
for (group = 0; group < MAX_LED; group++) {
led = &snd_ctl_leds[group];
- device_del(&led->dev);
+ device_unregister(&led->dev);
}
- device_del(&snd_ctl_led_dev);
+ device_unregister(&snd_ctl_led_dev);
snd_ctl_led_clean(NULL);
}
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 1645e4142e30..9863be6fd43e 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -297,8 +297,16 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
return err;
}
spin_lock_irq(&tmr->lock);
- tmr->timeri = t;
+ if (tmr->timeri)
+ err = -EBUSY;
+ else
+ tmr->timeri = t;
spin_unlock_irq(&tmr->lock);
+ if (err < 0) {
+ snd_timer_close(t);
+ snd_timer_instance_free(t);
+ return err;
+ }
return 0;
}
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 6898b1ac0d7f..92b7008fcdb8 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
+ event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
- ts->ccallback(ts, event + 100, &tstamp, resolution);
+ ts->ccallback(ts, event, &tstamp, resolution);
}
/* start/continue a master timer */
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index e0faa6601966..5805c5de39fb 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -804,7 +804,7 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
static inline void cancel_stream(struct amdtp_stream *s)
{
s->packet_index = -1;
- if (current_work() == &s->period_work)
+ if (in_interrupt())
amdtp_stream_pcm_abort(s);
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
}
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index ab5ff7867eb9..d8be146793ee 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -331,6 +331,10 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x51c8,
},
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51cc,
+ },
#endif
};
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a31009afc025..5462f771c2f9 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2917,6 +2917,7 @@ static int hda_codec_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int hda_codec_pm_prepare(struct device *dev)
{
+ dev->power.power_state = PMSG_SUSPEND;
return pm_runtime_suspended(dev);
}
@@ -2924,6 +2925,10 @@ static void hda_codec_pm_complete(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
+ /* If no other pm-functions are called between prepare() and complete() */
+ if (dev->power.power_state.event == PM_EVENT_SUSPEND)
+ dev->power.power_state = PMSG_RESUME;
+
if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
hda_codec_need_resume(codec) || codec->forced_resume))
pm_request_resume(dev);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b638fc2ef6f7..1f8018f9ce57 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3520,6 +3520,7 @@ static int cap_sw_put(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new cap_sw_temp = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Switch",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = cap_sw_info,
.get = cap_sw_get,
.put = cap_sw_put,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 79ade335c8a0..470753b36c8a 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2485,6 +2485,9 @@ static const struct pci_device_id azx_ids[] = {
/* Alderlake-P */
{ PCI_DEVICE(0x8086, 0x51c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Alderlake-M */
+ { PCI_DEVICE(0x8086, 0x51cc),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Elkhart Lake */
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 726507d0b04c..8629e84fef23 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -2206,10 +2206,9 @@ static void cs8409_cs42l42_fixups(struct hda_codec *codec,
break;
case HDA_FIXUP_ACT_PROBE:
- /* Set initial volume on Bullseye to -26 dB */
- if (codec->fixup_id == CS8409_BULLSEYE)
- snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
- HDA_INPUT, 0, 0xff, 0x19);
+ /* Set initial DMIC volume to -26 dB */
+ snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
+ HDA_INPUT, 0, 0xff, 0x19);
snd_hda_gen_add_kctl(&spec->gen,
NULL, &cs8409_cs42l42_hp_volume_mixer);
snd_hda_gen_add_kctl(&spec->gen,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 552e2cb73291..ab5113cccffa 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2603,6 +2603,28 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
{}
};
+static const struct snd_hda_pin_quirk alc882_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec1220, 0x1043, "ASUS", ALC1220_FIXUP_CLEVO_P950,
+ {0x14, 0x01014010},
+ {0x15, 0x01011012},
+ {0x16, 0x01016011},
+ {0x18, 0x01a19040},
+ {0x19, 0x02a19050},
+ {0x1a, 0x0181304f},
+ {0x1b, 0x0221401f},
+ {0x1e, 0x01456130}),
+ SND_HDA_PIN_QUIRK(0x10ec1220, 0x1462, "MS-7C35", ALC1220_FIXUP_CLEVO_P950,
+ {0x14, 0x01015010},
+ {0x15, 0x01011012},
+ {0x16, 0x01011011},
+ {0x18, 0x01a11040},
+ {0x19, 0x02a19050},
+ {0x1a, 0x0181104f},
+ {0x1b, 0x0221401f},
+ {0x1e, 0x01451130}),
+ {}
+};
+
/*
* BIOS auto configuration
*/
@@ -2644,6 +2666,7 @@ static int patch_alc882(struct hda_codec *codec)
snd_hda_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
alc882_fixups);
+ snd_hda_pick_pin_fixup(codec, alc882_pin_fixup_tbl, alc882_fixups, true);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
alc_auto_parse_customize_define(codec);
@@ -6543,6 +6566,9 @@ enum {
ALC295_FIXUP_ASUS_DACS,
ALC295_FIXUP_HP_OMEN,
ALC285_FIXUP_HP_SPECTRE_X360,
+ ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
+ ALC623_FIXUP_LENOVO_THINKSTATION_P340,
+ ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8109,6 +8135,27 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
},
+ [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_ideapad_s740_coef,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+ },
+ [ALC623_FIXUP_LENOVO_THINKSTATION_P340] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_no_shutup,
+ .chained = true,
+ .chain_id = ALC283_FIXUP_HEADSET_MIC,
+ },
+ [ALC255_FIXUP_ACER_HEADPHONE_AND_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x21, 0x03211030 }, /* Change the Headphone location to Left */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8145,6 +8192,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -8266,12 +8314,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -8290,7 +8341,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -8427,7 +8484,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
- SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
@@ -8477,6 +8534,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+ SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8692,6 +8750,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+ {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+ {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+ {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index f22bb2bdf527..8148b0d22e88 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -235,10 +235,6 @@ static int acp3x_dma_open(struct snd_soc_component *component,
return ret;
}
- if (!adata->play_stream && !adata->capture_stream &&
- !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
- rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
-
i2s_data->acp3x_base = adata->acp3x_base;
runtime->private_data = i2s_data;
return ret;
@@ -365,12 +361,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
}
}
- /* Disable ACP irq, when the current stream is being closed and
- * another stream is also not active.
- */
- if (!adata->play_stream && !adata->capture_stream &&
- !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
- rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
return 0;
}
diff --git a/sound/soc/amd/raven/acp3x.h b/sound/soc/amd/raven/acp3x.h
index 03fe93913e12..c3f0c8b7545d 100644
--- a/sound/soc/amd/raven/acp3x.h
+++ b/sound/soc/amd/raven/acp3x.h
@@ -77,6 +77,7 @@
#define ACP_POWER_OFF_IN_PROGRESS 0x03
#define ACP3x_ITER_IRER_SAMP_LEN_MASK 0x38
+#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
struct acp3x_platform_info {
u16 play_i2s_instance;
diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c
index d3536fd6a124..a013a607b3d4 100644
--- a/sound/soc/amd/raven/pci-acp3x.c
+++ b/sound/soc/amd/raven/pci-acp3x.c
@@ -76,6 +76,19 @@ static int acp3x_reset(void __iomem *acp3x_base)
return -ETIMEDOUT;
}
+static void acp3x_enable_interrupts(void __iomem *acp_base)
+{
+ rv_writel(0x01, acp_base + mmACP_EXTERNAL_INTR_ENB);
+}
+
+static void acp3x_disable_interrupts(void __iomem *acp_base)
+{
+ rv_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
+ mmACP_EXTERNAL_INTR_STAT);
+ rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_CNTL);
+ rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_ENB);
+}
+
static int acp3x_init(struct acp3x_dev_data *adata)
{
void __iomem *acp3x_base = adata->acp3x_base;
@@ -93,6 +106,7 @@ static int acp3x_init(struct acp3x_dev_data *adata)
pr_err("ACP3x reset failed\n");
return ret;
}
+ acp3x_enable_interrupts(acp3x_base);
return 0;
}
@@ -100,6 +114,7 @@ static int acp3x_deinit(void __iomem *acp3x_base)
{
int ret;
+ acp3x_disable_interrupts(acp3x_base);
/* Reset */
ret = acp3x_reset(acp3x_base);
if (ret) {
diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
index 34aed80db0eb..37d4600b6f2c 100644
--- a/sound/soc/codecs/ak5558.c
+++ b/sound/soc/codecs/ak5558.c
@@ -307,7 +307,7 @@ static struct snd_soc_dai_driver ak5558_dai = {
};
static struct snd_soc_dai_driver ak5552_dai = {
- .name = "ak5558-aif",
+ .name = "ak5552-aif",
.capture = {
.stream_name = "Capture",
.channels_min = 1,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index f4067230ac42..88e79b9f52ed 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -261,6 +261,9 @@ static const struct regmap_config cs35l32_regmap = {
.readable_reg = cs35l32_readable_register,
.precious_reg = cs35l32_precious_register,
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index 7ad7b733af9b..e8f3dcfd144d 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -1201,6 +1201,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
dev_err(&i2c_client->dev,
"CS35L33 Device ID (%X). Expected ID %X\n",
devid, CS35L33_CHIP_ID);
+ ret = -EINVAL;
goto err_enable;
}
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index 110ee2d06358..3d3c3c34dfe2 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -800,6 +800,9 @@ static struct regmap_config cs35l34_regmap = {
.readable_reg = cs35l34_readable_register,
.precious_reg = cs35l34_precious_register,
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static int cs35l34_handle_of_data(struct i2c_client *i2c_client,
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index bf982e145e94..77473c226f9e 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -399,6 +399,9 @@ static const struct regmap_config cs42l42_regmap = {
.reg_defaults = cs42l42_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults),
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index c44a5cdb796e..7cdffdf6b8cf 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1175,7 +1175,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
struct cs42l56_platform_data *pdata =
dev_get_platdata(&i2c_client->dev);
int ret, i;
- unsigned int devid = 0;
+ unsigned int devid;
unsigned int alpha_rev, metal_rev;
unsigned int reg;
@@ -1245,6 +1245,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
}
ret = regmap_read(cs42l56->regmap, CS42L56_CHIP_ID_1, &reg);
+ if (ret) {
+ dev_err(&i2c_client->dev, "Failed to read chip ID: %d\n", ret);
+ return ret;
+ }
+
devid = reg & CS42L56_CHIP_ID_MASK;
if (devid != CS42L56_DEVID) {
dev_err(&i2c_client->dev,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index c3f974ec78e5..e92bacaab53f 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1268,6 +1268,9 @@ static const struct regmap_config cs42l73_regmap = {
.volatile_reg = cs42l73_volatile_register,
.readable_reg = cs42l73_readable_register,
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c
index 3d67cbf9eaaa..abe0cc0bc03a 100644
--- a/sound/soc/codecs/cs53l30.c
+++ b/sound/soc/codecs/cs53l30.c
@@ -912,6 +912,9 @@ static struct regmap_config cs53l30_regmap = {
.writeable_reg = cs53l30_writeable_register,
.readable_reg = cs53l30_readable_register,
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static int cs53l30_i2c_probe(struct i2c_client *client,
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index bd3c523a8617..13009d08b09a 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -2181,10 +2181,7 @@ static int da7219_register_dai_clks(struct snd_soc_component *component)
ret);
goto err;
}
-
- da7219->dai_clks[i] = devm_clk_hw_get_clk(dev, dai_clk_hw, NULL);
- if (IS_ERR(da7219->dai_clks[i]))
- return PTR_ERR(da7219->dai_clks[i]);
+ da7219->dai_clks[i] = dai_clk_hw->clk;
/* For DT setup onecell data, otherwise create lookup */
if (np) {
diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
index b0ebfc8d180c..171ab7f519c0 100644
--- a/sound/soc/codecs/lpass-rx-macro.c
+++ b/sound/soc/codecs/lpass-rx-macro.c
@@ -3579,6 +3579,7 @@ static const struct of_device_id rx_macro_dt_match[] = {
{ .compatible = "qcom,sm8250-lpass-rx-macro" },
{ }
};
+MODULE_DEVICE_TABLE(of, rx_macro_dt_match);
static struct platform_driver rx_macro_driver = {
.driver = {
diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
index acd2fbc0ca7c..27a0d5defd27 100644
--- a/sound/soc/codecs/lpass-tx-macro.c
+++ b/sound/soc/codecs/lpass-tx-macro.c
@@ -1846,6 +1846,7 @@ static const struct of_device_id tx_macro_dt_match[] = {
{ .compatible = "qcom,sm8250-lpass-tx-macro" },
{ }
};
+MODULE_DEVICE_TABLE(of, tx_macro_dt_match);
static struct platform_driver tx_macro_driver = {
.driver = {
.name = "tx_macro",
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 4be24e7f51c8..f8e49e45ce33 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -41,6 +41,7 @@ struct max98088_priv {
enum max98088_type devtype;
struct max98088_pdata *pdata;
struct clk *mclk;
+ unsigned char mclk_prescaler;
unsigned int sysclk;
struct max98088_cdata dai[2];
int eq_textcnt;
@@ -998,13 +999,16 @@ static int max98088_dai1_hw_params(struct snd_pcm_substream *substream,
/* Configure NI when operating as master */
if (snd_soc_component_read(component, M98088_REG_14_DAI1_FORMAT)
& M98088_DAI_MAS) {
+ unsigned long pclk;
+
if (max98088->sysclk == 0) {
dev_err(component->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
- do_div(ni, (unsigned long long int)max98088->sysclk);
+ pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
+ ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
snd_soc_component_write(component, M98088_REG_12_DAI1_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_component_write(component, M98088_REG_13_DAI1_CLKCFG_LO,
@@ -1065,13 +1069,16 @@ static int max98088_dai2_hw_params(struct snd_pcm_substream *substream,
/* Configure NI when operating as master */
if (snd_soc_component_read(component, M98088_REG_1C_DAI2_FORMAT)
& M98088_DAI_MAS) {
+ unsigned long pclk;
+
if (max98088->sysclk == 0) {
dev_err(component->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
- do_div(ni, (unsigned long long int)max98088->sysclk);
+ pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
+ ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
snd_soc_component_write(component, M98088_REG_1A_DAI2_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_component_write(component, M98088_REG_1B_DAI2_CLKCFG_LO,
@@ -1113,8 +1120,10 @@ static int max98088_dai_set_sysclk(struct snd_soc_dai *dai,
*/
if ((freq >= 10000000) && (freq < 20000000)) {
snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x10);
+ max98088->mclk_prescaler = 1;
} else if ((freq >= 20000000) && (freq < 30000000)) {
snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x20);
+ max98088->mclk_prescaler = 2;
} else {
dev_err(component->dev, "Invalid master clock frequency\n");
return -EINVAL;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 438fa18bcb55..9408ee63cb26 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3388,44 +3388,30 @@ static int rt5645_probe(struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
- int ret = 0;
rt5645->component = component;
switch (rt5645->codec_type) {
case CODEC_TYPE_RT5645:
- ret = snd_soc_dapm_new_controls(dapm,
+ snd_soc_dapm_new_controls(dapm,
rt5645_specific_dapm_widgets,
ARRAY_SIZE(rt5645_specific_dapm_widgets));
- if (ret < 0)
- goto exit;
-
- ret = snd_soc_dapm_add_routes(dapm,
+ snd_soc_dapm_add_routes(dapm,
rt5645_specific_dapm_routes,
ARRAY_SIZE(rt5645_specific_dapm_routes));
- if (ret < 0)
- goto exit;
-
if (rt5645->v_id < 3) {
- ret = snd_soc_dapm_add_routes(dapm,
+ snd_soc_dapm_add_routes(dapm,
rt5645_old_dapm_routes,
ARRAY_SIZE(rt5645_old_dapm_routes));
- if (ret < 0)
- goto exit;
}
break;
case CODEC_TYPE_RT5650:
- ret = snd_soc_dapm_new_controls(dapm,
+ snd_soc_dapm_new_controls(dapm,
rt5650_specific_dapm_widgets,
ARRAY_SIZE(rt5650_specific_dapm_widgets));
- if (ret < 0)
- goto exit;
-
- ret = snd_soc_dapm_add_routes(dapm,
+ snd_soc_dapm_add_routes(dapm,
rt5650_specific_dapm_routes,
ARRAY_SIZE(rt5650_specific_dapm_routes));
- if (ret < 0)
- goto exit;
break;
}
@@ -3433,17 +3419,9 @@ static int rt5645_probe(struct snd_soc_component *component)
/* for JD function */
if (rt5645->pdata.jd_mode) {
- ret = snd_soc_dapm_force_enable_pin(dapm, "JD Power");
- if (ret < 0)
- goto exit;
-
- ret = snd_soc_dapm_force_enable_pin(dapm, "LDO2");
- if (ret < 0)
- goto exit;
-
- ret = snd_soc_dapm_sync(dapm);
- if (ret < 0)
- goto exit;
+ snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+ snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+ snd_soc_dapm_sync(dapm);
}
if (rt5645->pdata.long_name)
@@ -3454,14 +3432,9 @@ static int rt5645_probe(struct snd_soc_component *component)
GFP_KERNEL);
if (!rt5645->eq_param)
- ret = -ENOMEM;
-exit:
- /*
- * If there was an error above, everything will be cleaned up by the
- * caller if we return an error here. This will be done with a later
- * call to rt5645_remove().
- */
- return ret;
+ return -ENOMEM;
+
+ return 0;
}
static void rt5645_remove(struct snd_soc_component *component)
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 87f5709fe2cc..4a50b169fe03 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
return 0;
}
-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
NULL, 0),
- SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
- NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
+ 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
+ NULL, 0),
SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
RT5659_PWR_VREF3_BIT, 0, NULL, 0),
@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
/* Input Side */
- SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
- 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
static int rt5659_probe(struct snd_soc_component *component)
{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
rt5659->component = component;
+ switch (rt5659->pdata.jd_src) {
+ case RT5659_JD_HDA_HEADER:
+ break;
+
+ default:
+ snd_soc_dapm_new_controls(dapm,
+ rt5659_particular_dapm_widgets,
+ ARRAY_SIZE(rt5659_particular_dapm_widgets));
+ break;
+ }
+
return 0;
}
diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
index fed80c8f994f..e78ba3b064c4 100644
--- a/sound/soc/codecs/rt5682-sdw.c
+++ b/sound/soc/codecs/rt5682-sdw.c
@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
- regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
+ regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
diff --git a/sound/soc/codecs/rt711-sdca.c b/sound/soc/codecs/rt711-sdca.c
index cc36739f7fcf..24a084e0b48a 100644
--- a/sound/soc/codecs/rt711-sdca.c
+++ b/sound/soc/codecs/rt711-sdca.c
@@ -683,13 +683,13 @@ static int rt711_sdca_set_fu1e_capture_ctl(struct rt711_sdca_priv *rt711)
ch_r = (rt711->fu1e_dapm_mute || rt711->fu1e_mixer_r_mute) ? 0x01 : 0x00;
err = regmap_write(rt711->regmap,
- SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E,
+ SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E,
RT711_SDCA_CTL_FU_MUTE, CH_L), ch_l);
if (err < 0)
return err;
err = regmap_write(rt711->regmap,
- SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E,
+ SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E,
RT711_SDCA_CTL_FU_MUTE, CH_R), ch_r);
if (err < 0)
return err;
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index ffdf7e559515..82a24e330065 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -408,6 +408,7 @@ static const struct of_device_id sti_sas_dev_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, sti_sas_dev_match);
static int sti_sas_driver_probe(struct platform_device *pdev)
{
diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
index 81866aeb3fbf..55b2a1f52ca3 100644
--- a/sound/soc/codecs/tas2562.h
+++ b/sound/soc/codecs/tas2562.h
@@ -57,13 +57,13 @@
#define TAS2562_TDM_CFG0_RAMPRATE_MASK BIT(5)
#define TAS2562_TDM_CFG0_RAMPRATE_44_1 BIT(5)
#define TAS2562_TDM_CFG0_SAMPRATE_MASK GENMASK(3, 1)
-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ 0x0
-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ 0x1
-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ 0x2
-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ 0x3
-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ 0x4
-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ 0x5
-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
+#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ (0x0 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ (0x1 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ (0x2 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ (0x3 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ (0x4 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ (0x5 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ (0x6 << 1)
#define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 0917d65d6921..556c284f49dd 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -119,6 +119,7 @@ config SND_SOC_FSL_RPMSG
tristate "NXP Audio Base On RPMSG support"
depends on COMMON_CLK
depends on RPMSG
+ depends on SND_IMX_SOC || SND_IMX_SOC = n
select SND_SOC_IMX_RPMSG if SND_IMX_SOC != n
help
Say Y if you want to add rpmsg audio support for the Freescale CPUs.
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index c62bfd1c3ac7..4f55b316cf0f 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -744,6 +744,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
/* Initialize sound card */
priv->pdev = pdev;
priv->card.dev = &pdev->dev;
+ priv->card.owner = THIS_MODULE;
ret = snd_soc_of_parse_card_name(&priv->card, "model");
if (ret) {
snprintf(priv->name, sizeof(priv->name), "%s-audio",
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 2c8a2fcb7922..5e71382467e8 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -209,7 +209,7 @@ static void graph_parse_mclk_fs(struct device_node *top,
static int graph_parse_node(struct asoc_simple_priv *priv,
struct device_node *ep,
struct link_info *li,
- int is_cpu)
+ int *cpu)
{
struct device *dev = simple_priv_to_dev(priv);
struct device_node *top = dev->of_node;
@@ -217,9 +217,9 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct snd_soc_dai_link_component *dlc;
struct asoc_simple_dai *dai;
- int ret, single = 0;
+ int ret;
- if (is_cpu) {
+ if (cpu) {
dlc = asoc_link_to_cpu(dai_link, 0);
dai = simple_props_to_dai_cpu(dai_props, 0);
} else {
@@ -229,7 +229,7 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
graph_parse_mclk_fs(top, ep, dai_props);
- ret = asoc_simple_parse_dai(ep, dlc, &single);
+ ret = asoc_simple_parse_dai(ep, dlc, cpu);
if (ret < 0)
return ret;
@@ -241,9 +241,6 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
if (ret < 0)
return ret;
- if (is_cpu)
- asoc_simple_canonicalize_cpu(dlc, single);
-
return 0;
}
@@ -276,33 +273,29 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
struct link_info *li)
{
struct device *dev = simple_priv_to_dev(priv);
- struct snd_soc_card *card = simple_priv_to_card(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct device_node *top = dev->of_node;
struct device_node *ep = li->cpu ? cpu_ep : codec_ep;
- struct device_node *port;
- struct device_node *ports;
- struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
- struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
char dai_name[64];
int ret;
- port = of_get_parent(ep);
- ports = of_get_parent(port);
-
dev_dbg(dev, "link_of DPCM (%pOF)\n", ep);
if (li->cpu) {
+ struct snd_soc_card *card = simple_priv_to_card(priv);
+ struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+ int is_single_links = 0;
+
/* Codec is dummy */
/* FE settings */
dai_link->dynamic = 1;
dai_link->dpcm_merged_format = 1;
- ret = graph_parse_node(priv, cpu_ep, li, 1);
+ ret = graph_parse_node(priv, cpu_ep, li, &is_single_links);
if (ret)
- goto out_put_node;
+ return ret;
snprintf(dai_name, sizeof(dai_name),
"fe.%pOFP.%s", cpus->of_node, cpus->dai_name);
@@ -318,8 +311,13 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
*/
if (card->component_chaining && !soc_component_is_pcm(cpus))
dai_link->no_pcm = 1;
+
+ asoc_simple_canonicalize_cpu(cpus, is_single_links);
} else {
- struct snd_soc_codec_conf *cconf;
+ struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0);
+ struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
+ struct device_node *port;
+ struct device_node *ports;
/* CPU is dummy */
@@ -327,22 +325,25 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
dai_link->no_pcm = 1;
dai_link->be_hw_params_fixup = asoc_simple_be_hw_params_fixup;
- cconf = simple_props_to_codec_conf(dai_props, 0);
-
- ret = graph_parse_node(priv, codec_ep, li, 0);
+ ret = graph_parse_node(priv, codec_ep, li, NULL);
if (ret < 0)
- goto out_put_node;
+ return ret;
snprintf(dai_name, sizeof(dai_name),
"be.%pOFP.%s", codecs->of_node, codecs->dai_name);
/* check "prefix" from top node */
+ port = of_get_parent(ep);
+ ports = of_get_parent(port);
snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
"prefix");
if (of_node_name_eq(ports, "ports"))
snd_soc_of_parse_node_prefix(ports, cconf, codecs->of_node, "prefix");
snd_soc_of_parse_node_prefix(port, cconf, codecs->of_node,
"prefix");
+
+ of_node_put(ports);
+ of_node_put(port);
}
graph_parse_convert(dev, ep, &dai_props->adata);
@@ -351,11 +352,8 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
-out_put_node:
li->link++;
- of_node_put(ports);
- of_node_put(port);
return ret;
}
@@ -369,20 +367,23 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
char dai_name[64];
- int ret;
+ int ret, is_single_links = 0;
dev_dbg(dev, "link_of (%pOF)\n", cpu_ep);
- ret = graph_parse_node(priv, cpu_ep, li, 1);
+ ret = graph_parse_node(priv, cpu_ep, li, &is_single_links);
if (ret < 0)
return ret;
- ret = graph_parse_node(priv, codec_ep, li, 0);
+ ret = graph_parse_node(priv, codec_ep, li, NULL);
if (ret < 0)
return ret;
snprintf(dai_name, sizeof(dai_name),
"%s-%s", cpus->dai_name, codecs->dai_name);
+
+ asoc_simple_canonicalize_cpu(cpus, is_single_links);
+
ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
if (ret < 0)
return ret;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index a1373be4558f..0015f534d42d 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -93,12 +93,11 @@ static void simple_parse_convert(struct device *dev,
}
static void simple_parse_mclk_fs(struct device_node *top,
- struct device_node *cpu,
- struct device_node *codec,
+ struct device_node *np,
struct simple_dai_props *props,
char *prefix)
{
- struct device_node *node = of_get_parent(cpu);
+ struct device_node *node = of_get_parent(np);
char prop[128];
snprintf(prop, sizeof(prop), "%smclk-fs", PREFIX);
@@ -106,12 +105,71 @@ static void simple_parse_mclk_fs(struct device_node *top,
snprintf(prop, sizeof(prop), "%smclk-fs", prefix);
of_property_read_u32(node, prop, &props->mclk_fs);
- of_property_read_u32(cpu, prop, &props->mclk_fs);
- of_property_read_u32(codec, prop, &props->mclk_fs);
+ of_property_read_u32(np, prop, &props->mclk_fs);
of_node_put(node);
}
+static int simple_parse_node(struct asoc_simple_priv *priv,
+ struct device_node *np,
+ struct link_info *li,
+ char *prefix,
+ int *cpu)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct device_node *top = dev->of_node;
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
+ struct snd_soc_dai_link_component *dlc;
+ struct asoc_simple_dai *dai;
+ int ret;
+
+ if (cpu) {
+ dlc = asoc_link_to_cpu(dai_link, 0);
+ dai = simple_props_to_dai_cpu(dai_props, 0);
+ } else {
+ dlc = asoc_link_to_codec(dai_link, 0);
+ dai = simple_props_to_dai_codec(dai_props, 0);
+ }
+
+ simple_parse_mclk_fs(top, np, dai_props, prefix);
+
+ ret = asoc_simple_parse_dai(np, dlc, cpu);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_parse_clk(dev, np, dai, dlc);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_parse_tdm(np, dai);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int simple_link_init(struct asoc_simple_priv *priv,
+ struct device_node *node,
+ struct device_node *codec,
+ struct link_info *li,
+ char *prefix, char *name)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+ int ret;
+
+ ret = asoc_simple_parse_daifmt(dev, node, codec,
+ prefix, &dai_link->dai_fmt);
+ if (ret < 0)
+ return 0;
+
+ dai_link->init = asoc_simple_dai_init;
+ dai_link->ops = &simple_ops;
+
+ return asoc_simple_set_dailink_name(dev, dai_link, name);
+}
+
static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
struct device_node *np,
struct device_node *codec,
@@ -121,24 +179,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct asoc_simple_dai *dai;
- struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
- struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
- struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
struct device_node *top = dev->of_node;
struct device_node *node = of_get_parent(np);
char *prefix = "";
+ char dai_name[64];
int ret;
dev_dbg(dev, "link_of DPCM (%pOF)\n", np);
- li->link++;
-
/* For single DAI link & old style of DT node */
if (is_top)
prefix = PREFIX;
if (li->cpu) {
+ struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+ struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
int is_single_links = 0;
/* Codec is dummy */
@@ -147,25 +202,16 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
dai_link->dynamic = 1;
dai_link->dpcm_merged_format = 1;
- dai = simple_props_to_dai_cpu(dai_props, 0);
-
- ret = asoc_simple_parse_dai(np, cpus, &is_single_links);
- if (ret)
- goto out_put_node;
-
- ret = asoc_simple_parse_clk(dev, np, dai, cpus);
+ ret = simple_parse_node(priv, np, li, prefix, &is_single_links);
if (ret < 0)
goto out_put_node;
- ret = asoc_simple_set_dailink_name(dev, dai_link,
- "fe.%s",
- cpus->dai_name);
- if (ret < 0)
- goto out_put_node;
+ snprintf(dai_name, sizeof(dai_name), "fe.%s", cpus->dai_name);
asoc_simple_canonicalize_cpu(cpus, is_single_links);
asoc_simple_canonicalize_platform(platforms, cpus);
} else {
+ struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
struct snd_soc_codec_conf *cconf;
/* CPU is dummy */
@@ -174,22 +220,13 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
dai_link->no_pcm = 1;
dai_link->be_hw_params_fixup = asoc_simple_be_hw_params_fixup;
- dai = simple_props_to_dai_codec(dai_props, 0);
cconf = simple_props_to_codec_conf(dai_props, 0);
- ret = asoc_simple_parse_dai(np, codecs, NULL);
+ ret = simple_parse_node(priv, np, li, prefix, NULL);
if (ret < 0)
goto out_put_node;
- ret = asoc_simple_parse_clk(dev, np, dai, codecs);
- if (ret < 0)
- goto out_put_node;
-
- ret = asoc_simple_set_dailink_name(dev, dai_link,
- "be.%s",
- codecs->dai_name);
- if (ret < 0)
- goto out_put_node;
+ snprintf(dai_name, sizeof(dai_name), "be.%s", codecs->dai_name);
/* check "prefix" from top node */
snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
@@ -201,23 +238,14 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
}
simple_parse_convert(dev, np, &dai_props->adata);
- simple_parse_mclk_fs(top, np, codec, dai_props, prefix);
-
- ret = asoc_simple_parse_tdm(np, dai);
- if (ret)
- goto out_put_node;
-
- ret = asoc_simple_parse_daifmt(dev, node, codec,
- prefix, &dai_link->dai_fmt);
- if (ret < 0)
- goto out_put_node;
snd_soc_dai_link_set_capabilities(dai_link);
- dai_link->ops = &simple_ops;
- dai_link->init = asoc_simple_dai_init;
+ ret = simple_link_init(priv, node, codec, li, prefix, dai_name);
out_put_node:
+ li->link++;
+
of_node_put(node);
return ret;
}
@@ -230,23 +258,19 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
{
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct asoc_simple_dai *cpu_dai = simple_props_to_dai_cpu(dai_props, 0);
- struct asoc_simple_dai *codec_dai = simple_props_to_dai_codec(dai_props, 0);
struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
- struct device_node *top = dev->of_node;
struct device_node *cpu = NULL;
struct device_node *node = NULL;
struct device_node *plat = NULL;
+ char dai_name[64];
char prop[128];
char *prefix = "";
int ret, single_cpu = 0;
cpu = np;
node = of_get_parent(np);
- li->link++;
dev_dbg(dev, "link_of (%pOF)\n", node);
@@ -257,18 +281,11 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
snprintf(prop, sizeof(prop), "%splat", prefix);
plat = of_get_child_by_name(node, prop);
- ret = asoc_simple_parse_daifmt(dev, node, codec,
- prefix, &dai_link->dai_fmt);
- if (ret < 0)
- goto dai_link_of_err;
-
- simple_parse_mclk_fs(top, cpu, codec, dai_props, prefix);
-
- ret = asoc_simple_parse_dai(cpu, cpus, &single_cpu);
+ ret = simple_parse_node(priv, cpu, li, prefix, &single_cpu);
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_parse_dai(codec, codecs, NULL);
+ ret = simple_parse_node(priv, codec, li, prefix, NULL);
if (ret < 0)
goto dai_link_of_err;
@@ -276,39 +293,20 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_parse_tdm(cpu, cpu_dai);
- if (ret < 0)
- goto dai_link_of_err;
-
- ret = asoc_simple_parse_tdm(codec, codec_dai);
- if (ret < 0)
- goto dai_link_of_err;
-
- ret = asoc_simple_parse_clk(dev, cpu, cpu_dai, cpus);
- if (ret < 0)
- goto dai_link_of_err;
-
- ret = asoc_simple_parse_clk(dev, codec, codec_dai, codecs);
- if (ret < 0)
- goto dai_link_of_err;
-
- ret = asoc_simple_set_dailink_name(dev, dai_link,
- "%s-%s",
- cpus->dai_name,
- codecs->dai_name);
- if (ret < 0)
- goto dai_link_of_err;
-
- dai_link->ops = &simple_ops;
- dai_link->init = asoc_simple_dai_init;
+ snprintf(dai_name, sizeof(dai_name),
+ "%s-%s", cpus->dai_name, codecs->dai_name);
asoc_simple_canonicalize_cpu(cpus, single_cpu);
asoc_simple_canonicalize_platform(platforms, cpus);
+ ret = simple_link_init(priv, node, codec, li, prefix, dai_name);
+
dai_link_of_err:
of_node_put(plat);
of_node_put(node);
+ li->link++;
+
return ret;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index df2f5d55e8ff..22dbd9d93c1e 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -574,6 +574,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Glavey TM800A550L */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -652,6 +663,20 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MONO_SPEAKER |
BYT_RT5640_MCLK_EN),
},
+ { /* Lenovo Miix 3-830 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 3-830"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Linx Linx7 tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index b941adcbb8f9..939e7e28486a 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -61,22 +61,6 @@ static void dump_registers(struct ssp_device *ssp)
pxa_ssp_read_reg(ssp, SSACD));
}
-static void pxa_ssp_enable(struct ssp_device *ssp)
-{
- uint32_t sscr0;
-
- sscr0 = __raw_readl(ssp->mmio_base + SSCR0) | SSCR0_SSE;
- __raw_writel(sscr0, ssp->mmio_base + SSCR0);
-}
-
-static void pxa_ssp_disable(struct ssp_device *ssp)
-{
- uint32_t sscr0;
-
- sscr0 = __raw_readl(ssp->mmio_base + SSCR0) & ~SSCR0_SSE;
- __raw_writel(sscr0, ssp->mmio_base + SSCR0);
-}
-
static void pxa_ssp_set_dma_params(struct ssp_device *ssp, int width4,
int out, struct snd_dmaengine_dai_dma_data *dma)
{
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index c62d2612e8f5..a6e95db6b3fb 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+ unsigned int id = dai->driver->id;
clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
+ /*
+ * Ensure LRCLK is disabled even in device node validation.
+ * Will not impact if disabled in lpass_cpu_daiops_trigger()
+ * suspend.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
+ else
+ regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
+
+ /*
+ * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
+ * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
+ * lpass_cpu_daiops_prepare.
+ */
+ if (drvdata->mi2s_was_prepared[dai->driver->id]) {
+ drvdata->mi2s_was_prepared[dai->driver->id] = false;
+ clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+ }
+
clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
}
@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /*
+ * Ensure lpass BCLK/LRCLK is enabled during
+ * device resume as lpass_cpu_daiops_prepare() is not called
+ * after the device resumes. We don't check mi2s_was_prepared before
+ * enable/disable BCLK in trigger events because:
+ * 1. These trigger events are paired, so the BCLK
+ * enable_count is balanced.
+ * 2. the BCLK can be shared (ex: headset and headset mic),
+ * we need to increase the enable_count so that we don't
+ * turn off the shared BCLK while other devices are using
+ * it.
+ */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = regmap_fields_write(i2sctl->spken, id,
LPAIF_I2SCTL_SPKEN_ENABLE);
@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ /*
+ * To ensure lpass BCLK/LRCLK is disabled during
+ * device suspend.
+ */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = regmap_fields_write(i2sctl->spken, id,
LPAIF_I2SCTL_SPKEN_DISABLE);
@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
return ret;
}
+static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+ unsigned int id = dai->driver->id;
+ int ret;
+
+ /*
+ * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
+ * data flow starts. This allows other codec to have some delay before
+ * the data flow.
+ * (ex: to drop start up pop noise before capture starts).
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
+ else
+ ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
+
+ if (ret) {
+ dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
+ * be called multiple times. It's paired with the clk_disable in
+ * lpass_cpu_daiops_shutdown.
+ */
+ if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
+ ret = clk_enable(drvdata->mi2s_bit_clk[id]);
+ if (ret) {
+ dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
+ return ret;
+ }
+ drvdata->mi2s_was_prepared[dai->driver->id] = true;
+ }
+ return 0;
+}
+
const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
.set_sysclk = lpass_cpu_daiops_set_sysclk,
.startup = lpass_cpu_daiops_startup,
.shutdown = lpass_cpu_daiops_shutdown,
.hw_params = lpass_cpu_daiops_hw_params,
.trigger = lpass_cpu_daiops_trigger,
+ .prepare = lpass_cpu_daiops_prepare,
};
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
@@ -835,18 +914,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
if (dai_id == LPASS_DP_RX)
continue;
- drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(dev,
+ drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
variant->dai_osr_clk_names[i]);
- if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) {
- dev_warn(dev,
- "%s() error getting optional %s: %ld\n",
- __func__,
- variant->dai_osr_clk_names[i],
- PTR_ERR(drvdata->mi2s_osr_clk[dai_id]));
-
- drvdata->mi2s_osr_clk[dai_id] = NULL;
- }
-
drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
variant->dai_bit_clk_names[i]);
if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 83b2e08ade06..7f72214404ba 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -67,6 +67,10 @@ struct lpass_data {
/* MI2S SD lines to use for playback/capture */
unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
+
+ /* The state of MI2S prepare dai_ops was called */
+ bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
+
int hdmi_port_enable;
/* low-power audio interface (LPAIF) registers */
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1c0904acb935..a76974ccfce1 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2225,6 +2225,8 @@ static char *fmt_single_name(struct device *dev, int *id)
return NULL;
name = devm_kstrdup(dev, devname, GFP_KERNEL);
+ if (!name)
+ return NULL;
/* are we a "%s.%d" name (platform and SPI components) */
found = strstr(name, dev->driver->name);
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 73076d425efb..4893a56208e0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1901,7 +1901,7 @@ static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest,
* @src: older version of pcm as a source
* @pcm: latest version of pcm created from the source
*
- * Support from vesion 4. User should free the returned pcm manually.
+ * Support from version 4. User should free the returned pcm manually.
*/
static int pcm_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_pcm *src,
@@ -2089,7 +2089,7 @@ static void set_link_hw_format(struct snd_soc_dai_link *link,
* @src: old version of phyical link config as a source
* @link: latest version of physical link config created from the source
*
- * Support from vesion 4. User need free the returned link config manually.
+ * Support from version 4. User need free the returned link config manually.
*/
static int link_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_link_config *src,
@@ -2400,7 +2400,7 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
* @src: old version of manifest as a source
* @manifest: latest version of manifest created from the source
*
- * Support from vesion 4. Users need free the returned manifest manually.
+ * Support from version 4. Users need free the returned manifest manually.
*/
static int manifest_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_manifest *src,
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 8d7bab433fb3..c1f9f0f58464 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -421,11 +421,16 @@ static int ssp_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
struct sof_ipc_dai_config *config;
struct snd_sof_dai *sof_dai;
struct sof_ipc_reply reply;
int ret;
+ /* DAI_CONFIG IPC during hw_params is not supported in older firmware */
+ if (v->abi_version < SOF_ABI_VER(3, 18, 0))
+ return 0;
+
list_for_each_entry(sof_dai, &sdev->dai_list, list) {
if (!sof_dai->cpu_dai_name || !sof_dai->dai_config)
continue;
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
index fd265803f7bc..c83fb6255961 100644
--- a/sound/soc/sof/pm.c
+++ b/sound/soc/sof/pm.c
@@ -256,6 +256,7 @@ suspend:
/* reset FW state */
sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+ sdev->enabled_cores_mask = 0;
return ret;
}
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index c1561237ee24..3aa1cf262402 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -484,10 +484,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
dev_err(dev, "mclk register returned %d\n", ret);
return ret;
}
-
- sai->sai_mclk = devm_clk_hw_get_clk(dev, hw, NULL);
- if (IS_ERR(sai->sai_mclk))
- return PTR_ERR(sai->sai_mclk);
+ sai->sai_mclk = hw->clk;
/* register mclk provider */
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
diff --git a/sound/usb/format.c b/sound/usb/format.c
index e6ff317a6785..2287f8c65315 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -436,7 +436,7 @@ static bool check_valid_altsetting_v2v3(struct snd_usb_audio *chip, int iface,
if (snd_BUG_ON(altsetting >= 64 - 8))
return false;
- err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
UAC2_AS_VAL_ALT_SETTINGS << 8,
iface, &raw_data, sizeof(raw_data));
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index fda66b2dbb01..37ad77524c0b 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -3060,7 +3060,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x1235, 0x8203): /* Focusrite Scarlett 6i6 2nd Gen */
case USB_ID(0x1235, 0x8204): /* Focusrite Scarlett 18i8 2nd Gen */
case USB_ID(0x1235, 0x8201): /* Focusrite Scarlett 18i20 2nd Gen */
- err = snd_scarlett_gen2_controls_create(mixer);
+ err = snd_scarlett_gen2_init(mixer);
break;
case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 560c2ade829d..4caf379d5b99 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -635,7 +635,7 @@ static int scarlett2_usb(
/* send a second message to get the response */
err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0),
+ usb_rcvctrlpipe(mixer->chip->dev, 0),
SCARLETT2_USB_VENDOR_SPECIFIC_CMD_RESP,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
0,
@@ -1997,38 +1997,11 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer)
return usb_submit_urb(mixer->urb, GFP_KERNEL);
}
-/* Entry point */
-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
+static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
+ const struct scarlett2_device_info *info)
{
- const struct scarlett2_device_info *info;
int err;
- /* only use UAC_VERSION_2 */
- if (!mixer->protocol)
- return 0;
-
- switch (mixer->chip->usb_id) {
- case USB_ID(0x1235, 0x8203):
- info = &s6i6_gen2_info;
- break;
- case USB_ID(0x1235, 0x8204):
- info = &s18i8_gen2_info;
- break;
- case USB_ID(0x1235, 0x8201):
- info = &s18i20_gen2_info;
- break;
- default: /* device not (yet) supported */
- return -EINVAL;
- }
-
- if (!(mixer->chip->setup & SCARLETT2_ENABLE)) {
- usb_audio_err(mixer->chip,
- "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
- "use options snd_usb_audio device_setup=1 "
- "to enable and report any issues to g@b4.vu");
- return 0;
- }
-
/* Initialise private data, routing, sequence number */
err = scarlett2_init_private(mixer, info);
if (err < 0)
@@ -2073,3 +2046,51 @@ int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
+
+int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ const struct scarlett2_device_info *info;
+ int err;
+
+ /* only use UAC_VERSION_2 */
+ if (!mixer->protocol)
+ return 0;
+
+ switch (chip->usb_id) {
+ case USB_ID(0x1235, 0x8203):
+ info = &s6i6_gen2_info;
+ break;
+ case USB_ID(0x1235, 0x8204):
+ info = &s18i8_gen2_info;
+ break;
+ case USB_ID(0x1235, 0x8201):
+ info = &s18i20_gen2_info;
+ break;
+ default: /* device not (yet) supported */
+ return -EINVAL;
+ }
+
+ if (!(chip->setup & SCARLETT2_ENABLE)) {
+ usb_audio_info(chip,
+ "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
+ "use options snd_usb_audio vid=0x%04x pid=0x%04x "
+ "device_setup=1 to enable and report any issues "
+ "to g@b4.vu",
+ USB_ID_VENDOR(chip->usb_id),
+ USB_ID_PRODUCT(chip->usb_id));
+ return 0;
+ }
+
+ usb_audio_info(chip,
+ "Focusrite Scarlett Gen 2 Mixer Driver enabled pid=0x%04x",
+ USB_ID_PRODUCT(chip->usb_id));
+
+ err = snd_scarlett_gen2_controls_create(mixer, info);
+ if (err < 0)
+ usb_audio_err(mixer->chip,
+ "Error initialising Scarlett Mixer Driver: %d",
+ err);
+
+ return err;
+}
diff --git a/sound/usb/mixer_scarlett_gen2.h b/sound/usb/mixer_scarlett_gen2.h
index 52e1dad77afd..668c6b0cb50a 100644
--- a/sound/usb/mixer_scarlett_gen2.h
+++ b/sound/usb/mixer_scarlett_gen2.h
@@ -2,6 +2,6 @@
#ifndef __USB_MIXER_SCARLETT_GEN2_H
#define __USB_MIXER_SCARLETT_GEN2_H
-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer);
+int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer);
#endif /* __USB_MIXER_SCARLETT_GEN2_H */
diff --git a/tools/arch/mips/include/uapi/asm/perf_regs.h b/tools/arch/mips/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d0f4ecd616cf
--- /dev/null
+++ b/tools/arch/mips/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MIPS_PERF_REGS_H
+#define _ASM_MIPS_PERF_REGS_H
+
+enum perf_event_mips_regs {
+ PERF_REG_MIPS_PC,
+ PERF_REG_MIPS_R1,
+ PERF_REG_MIPS_R2,
+ PERF_REG_MIPS_R3,
+ PERF_REG_MIPS_R4,
+ PERF_REG_MIPS_R5,
+ PERF_REG_MIPS_R6,
+ PERF_REG_MIPS_R7,
+ PERF_REG_MIPS_R8,
+ PERF_REG_MIPS_R9,
+ PERF_REG_MIPS_R10,
+ PERF_REG_MIPS_R11,
+ PERF_REG_MIPS_R12,
+ PERF_REG_MIPS_R13,
+ PERF_REG_MIPS_R14,
+ PERF_REG_MIPS_R15,
+ PERF_REG_MIPS_R16,
+ PERF_REG_MIPS_R17,
+ PERF_REG_MIPS_R18,
+ PERF_REG_MIPS_R19,
+ PERF_REG_MIPS_R20,
+ PERF_REG_MIPS_R21,
+ PERF_REG_MIPS_R22,
+ PERF_REG_MIPS_R23,
+ PERF_REG_MIPS_R24,
+ PERF_REG_MIPS_R25,
+ PERF_REG_MIPS_R26,
+ PERF_REG_MIPS_R27,
+ PERF_REG_MIPS_R28,
+ PERF_REG_MIPS_R29,
+ PERF_REG_MIPS_R30,
+ PERF_REG_MIPS_R31,
+ PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
+};
+#endif /* _ASM_MIPS_PERF_REGS_H */
diff --git a/tools/arch/x86/include/asm/asm.h b/tools/arch/x86/include/asm/asm.h
new file mode 100644
index 000000000000..3ad3da9a7d97
--- /dev/null
+++ b/tools/arch/x86/include/asm/asm.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_ASM_H
+#define _ASM_X86_ASM_H
+
+#ifdef __ASSEMBLY__
+# define __ASM_FORM(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__,
+#else
+#include <linux/stringify.h>
+# define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " "
+# define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__)
+# define __ASM_FORM_COMMA(x, ...) " " __stringify(x,##__VA_ARGS__) ","
+#endif
+
+#define _ASM_BYTES(x, ...) __ASM_FORM(.byte x,##__VA_ARGS__ ;)
+
+#ifndef __x86_64__
+/* 32 bit */
+# define __ASM_SEL(a,b) __ASM_FORM(a)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
+#else
+/* 64 bit */
+# define __ASM_SEL(a,b) __ASM_FORM(b)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
+#endif
+
+#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
+ inst##q##__VA_ARGS__)
+#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
+
+#define _ASM_PTR __ASM_SEL(.long, .quad)
+#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
+
+#define _ASM_MOV __ASM_SIZE(mov)
+#define _ASM_INC __ASM_SIZE(inc)
+#define _ASM_DEC __ASM_SIZE(dec)
+#define _ASM_ADD __ASM_SIZE(add)
+#define _ASM_SUB __ASM_SIZE(sub)
+#define _ASM_XADD __ASM_SIZE(xadd)
+#define _ASM_MUL __ASM_SIZE(mul)
+
+#define _ASM_AX __ASM_REG(ax)
+#define _ASM_BX __ASM_REG(bx)
+#define _ASM_CX __ASM_REG(cx)
+#define _ASM_DX __ASM_REG(dx)
+#define _ASM_SP __ASM_REG(sp)
+#define _ASM_BP __ASM_REG(bp)
+#define _ASM_SI __ASM_REG(si)
+#define _ASM_DI __ASM_REG(di)
+
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1 _ASM_AX
+#define _ASM_ARG2 _ASM_DX
+#define _ASM_ARG3 _ASM_CX
+
+#define _ASM_ARG1L eax
+#define _ASM_ARG2L edx
+#define _ASM_ARG3L ecx
+
+#define _ASM_ARG1W ax
+#define _ASM_ARG2W dx
+#define _ASM_ARG3W cx
+
+#define _ASM_ARG1B al
+#define _ASM_ARG2B dl
+#define _ASM_ARG3B cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1 _ASM_DI
+#define _ASM_ARG2 _ASM_SI
+#define _ASM_ARG3 _ASM_DX
+#define _ASM_ARG4 _ASM_CX
+#define _ASM_ARG5 r8
+#define _ASM_ARG6 r9
+
+#define _ASM_ARG1Q rdi
+#define _ASM_ARG2Q rsi
+#define _ASM_ARG3Q rdx
+#define _ASM_ARG4Q rcx
+#define _ASM_ARG5Q r8
+#define _ASM_ARG6Q r9
+
+#define _ASM_ARG1L edi
+#define _ASM_ARG2L esi
+#define _ASM_ARG3L edx
+#define _ASM_ARG4L ecx
+#define _ASM_ARG5L r8d
+#define _ASM_ARG6L r9d
+
+#define _ASM_ARG1W di
+#define _ASM_ARG2W si
+#define _ASM_ARG3W dx
+#define _ASM_ARG4W cx
+#define _ASM_ARG5W r8w
+#define _ASM_ARG6W r9w
+
+#define _ASM_ARG1B dil
+#define _ASM_ARG2B sil
+#define _ASM_ARG3B dl
+#define _ASM_ARG4B cl
+#define _ASM_ARG5B r8b
+#define _ASM_ARG6B r9b
+
+#endif
+
+/*
+ * Macros to generate condition code outputs from inline assembly,
+ * The output operand must be type "bool".
+ */
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+# define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
+# define CC_OUT(c) "=@cc" #c
+#else
+# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n"
+# define CC_OUT(c) [_cc_ ## c] "=qm"
+#endif
+
+#ifdef __KERNEL__
+
+/* Exception table entry */
+#ifdef __ASSEMBLY__
+# define _ASM_EXTABLE_HANDLE(from, to, handler) \
+ .pushsection "__ex_table","a" ; \
+ .balign 4 ; \
+ .long (from) - . ; \
+ .long (to) - . ; \
+ .long (handler) - . ; \
+ .popsection
+
+# define _ASM_EXTABLE(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
+
+# define _ASM_EXTABLE_UA(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+
+# define _ASM_EXTABLE_CPY(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
+
+# define _ASM_EXTABLE_FAULT(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
+
+# ifdef CONFIG_KPROBES
+# define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist","aw" ; \
+ _ASM_ALIGN ; \
+ _ASM_PTR (entry); \
+ .popsection
+# else
+# define _ASM_NOKPROBE(entry)
+# endif
+
+#else /* ! __ASSEMBLY__ */
+# define _EXPAND_EXTABLE_HANDLE(x) #x
+# define _ASM_EXTABLE_HANDLE(from, to, handler) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 4\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - .\n" \
+ " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
+ " .popsection\n"
+
+# define _ASM_EXTABLE(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
+
+# define _ASM_EXTABLE_UA(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+
+# define _ASM_EXTABLE_CPY(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
+
+# define _ASM_EXTABLE_FAULT(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
+
+/* For C file, we already have NOKPROBE_SYMBOL macro */
+
+/*
+ * This output constraint should be used for any inline asm which has a "call"
+ * instruction. Otherwise the asm may be inserted before the frame pointer
+ * gets set up by the containing function. If you forget to do this, objtool
+ * may print a "call without frame pointer save/setup" warning.
+ */
+register unsigned long current_stack_pointer asm(_ASM_SP);
+#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_ASM_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index b7dd944dc867..8f28fafa98b3 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -56,11 +56,8 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#ifdef CONFIG_X86_SGX
# define DISABLE_SGX 0
diff --git a/tools/arch/x86/include/asm/irq_vectors.h b/tools/arch/x86/include/asm/irq_vectors.h
index 889f8b1b5b7f..43dcb9284208 100644
--- a/tools/arch/x86/include/asm/irq_vectors.h
+++ b/tools/arch/x86/include/asm/irq_vectors.h
@@ -26,8 +26,8 @@
* This file enumerates the exact layout of them:
*/
+/* This is used as an interrupt vector when programming the APIC. */
#define NMI_VECTOR 0x02
-#define MCE_VECTOR 0x12
/*
* IDT vectors usable for external interrupt sources start at 0x20.
@@ -84,7 +84,7 @@
*/
#define IRQ_WORK_VECTOR 0xf6
-#define UV_BAU_MESSAGE 0xf5
+/* 0xf5 - unused, was UV_BAU_MESSAGE */
#define DEFERRED_ERROR_VECTOR 0xf4
/* Vector on which hypervisor callbacks will be delivered */
@@ -114,6 +114,9 @@
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
+#define NR_EXTERNAL_VECTORS (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+#define NR_SYSTEM_VECTORS (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+
/*
* Size the maximum number of interrupts.
*
diff --git a/tools/arch/x86/include/asm/nops.h b/tools/arch/x86/include/asm/nops.h
index c1e5e818ba16..c5573eaa5bb9 100644
--- a/tools/arch/x86/include/asm/nops.h
+++ b/tools/arch/x86/include/asm/nops.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_NOPS_H
#define _ASM_X86_NOPS_H
+#include <asm/asm.h>
+
/*
* Define nops for use with alternative() and for tracing.
*/
@@ -57,20 +59,14 @@
#endif /* CONFIG_64BIT */
-#ifdef __ASSEMBLY__
-#define _ASM_MK_NOP(x) .byte x
-#else
-#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
-#endif
-
-#define ASM_NOP1 _ASM_MK_NOP(BYTES_NOP1)
-#define ASM_NOP2 _ASM_MK_NOP(BYTES_NOP2)
-#define ASM_NOP3 _ASM_MK_NOP(BYTES_NOP3)
-#define ASM_NOP4 _ASM_MK_NOP(BYTES_NOP4)
-#define ASM_NOP5 _ASM_MK_NOP(BYTES_NOP5)
-#define ASM_NOP6 _ASM_MK_NOP(BYTES_NOP6)
-#define ASM_NOP7 _ASM_MK_NOP(BYTES_NOP7)
-#define ASM_NOP8 _ASM_MK_NOP(BYTES_NOP8)
+#define ASM_NOP1 _ASM_BYTES(BYTES_NOP1)
+#define ASM_NOP2 _ASM_BYTES(BYTES_NOP2)
+#define ASM_NOP3 _ASM_BYTES(BYTES_NOP3)
+#define ASM_NOP4 _ASM_BYTES(BYTES_NOP4)
+#define ASM_NOP5 _ASM_BYTES(BYTES_NOP5)
+#define ASM_NOP6 _ASM_BYTES(BYTES_NOP6)
+#define ASM_NOP7 _ASM_BYTES(BYTES_NOP7)
+#define ASM_NOP8 _ASM_BYTES(BYTES_NOP8)
#define ASM_NOP_MAX 8
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 5a3022c8af82..0662f644aad9 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -437,6 +437,8 @@ struct kvm_vmx_nested_state_hdr {
__u16 flags;
} smm;
+ __u16 pad;
+
__u32 flags;
__u64 preemption_timer_deadline;
};
diff --git a/tools/bootconfig/include/linux/bootconfig.h b/tools/bootconfig/include/linux/bootconfig.h
index 078cbd2ba651..de7f30f99af3 100644
--- a/tools/bootconfig/include/linux/bootconfig.h
+++ b/tools/bootconfig/include/linux/bootconfig.h
@@ -4,4 +4,8 @@
#include "../../../../include/linux/bootconfig.h"
+#ifndef fallthrough
+# define fallthrough
+#endif
+
#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 7362bef1a368..6cd6080cac04 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -399,6 +399,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
}
/* TODO: Ensure the @path is initramfs/initrd image */
if (fstat(fd, &stat) < 0) {
+ ret = -errno;
pr_err("Failed to get the size of %s\n", path);
goto out;
}
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index 790944c35602..baee8591ac76 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -30,7 +30,8 @@ CGROUP COMMANDS
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
-| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** }
+| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
+| **sock_release** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
@@ -106,6 +107,7 @@ DESCRIPTION
**getpeername6** call to getpeername(2) for an inet6 socket (since 5.8);
**getsockname4** call to getsockname(2) for an inet4 socket (since 5.8);
**getsockname6** call to getsockname(2) for an inet6 socket (since 5.8).
+ **sock_release** closing an userspace inet socket (since 5.9).
**bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
Detach *PROG* from the cgroup *CGROUP* and attach type
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 358c7309d419..fe1b38e7e887 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -44,7 +44,7 @@ PROG COMMANDS
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/getpeername4** | **cgroup/getpeername6** |
| **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
-| **cgroup/getsockopt** | **cgroup/setsockopt** |
+| **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** |
| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
| }
| *ATTACH_TYPE* := {
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index b3073ae84018..d73232be1e99 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -136,7 +136,7 @@ endif
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
-BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o)
+BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o)
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
@@ -180,6 +180,9 @@ endif
CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
+$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
+ $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $<
+
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index d67518bcbd44..cc33c5824a2f 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -478,7 +478,7 @@ _bpftool()
cgroup/recvmsg4 cgroup/recvmsg6 \
cgroup/post_bind4 cgroup/post_bind6 \
cgroup/sysctl cgroup/getsockopt \
- cgroup/setsockopt struct_ops \
+ cgroup/setsockopt cgroup/sock_release struct_ops \
fentry fexit freplace sk_lookup" -- \
"$cur" ) )
return 0
@@ -1021,7 +1021,7 @@ _bpftool()
device bind4 bind6 post_bind4 post_bind6 connect4 connect6 \
getpeername4 getpeername6 getsockname4 getsockname6 \
sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl getsockopt \
- setsockopt'
+ setsockopt sock_release'
local ATTACH_FLAGS='multi override'
local PROG_TYPE='id pinned tag name'
case $prev in
@@ -1032,7 +1032,7 @@ _bpftool()
ingress|egress|sock_create|sock_ops|device|bind4|bind6|\
post_bind4|post_bind6|connect4|connect6|getpeername4|\
getpeername6|getsockname4|getsockname6|sendmsg4|sendmsg6|\
- recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt)
+ recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt|sock_release)
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
"$cur" ) )
return 0
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index d901cc1b904a..6e53b1d393f4 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -28,7 +28,8 @@
" connect6 | getpeername4 | getpeername6 |\n" \
" getsockname4 | getsockname6 | sendmsg4 |\n" \
" sendmsg6 | recvmsg4 | recvmsg6 |\n" \
- " sysctl | getsockopt | setsockopt }"
+ " sysctl | getsockopt | setsockopt |\n" \
+ " sock_release }"
static unsigned int query_flags;
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 31ade77f5ef8..1d71ff8c52fa 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -18,6 +18,7 @@
#include <sys/stat.h>
#include <sys/mman.h>
#include <bpf/btf.h>
+#include <bpf/bpf_gen_internal.h>
#include "json_writer.h"
#include "main.h"
@@ -106,8 +107,10 @@ static int codegen_datasec_def(struct bpf_object *obj,
if (strcmp(sec_name, ".data") == 0) {
sec_ident = "data";
+ strip_mods = true;
} else if (strcmp(sec_name, ".bss") == 0) {
sec_ident = "bss";
+ strip_mods = true;
} else if (strcmp(sec_name, ".rodata") == 0) {
sec_ident = "rodata";
strip_mods = true;
@@ -129,6 +132,10 @@ static int codegen_datasec_def(struct bpf_object *obj,
int need_off = sec_var->offset, align_off, align;
__u32 var_type_id = var->type;
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
+
if (off > need_off) {
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
sec_name, i, need_off, off);
@@ -268,6 +275,327 @@ static void codegen(const char *template, ...)
free(s);
}
+static void print_hex(const char *data, int data_sz)
+{
+ int i, len;
+
+ for (i = 0, len = 0; i < data_sz; i++) {
+ int w = data[i] ? 4 : 2;
+
+ len += w;
+ if (len > 78) {
+ printf("\\\n");
+ len = w;
+ }
+ if (!data[i])
+ printf("\\0");
+ else
+ printf("\\x%02x", (unsigned char)data[i]);
+ }
+}
+
+static size_t bpf_map_mmap_sz(const struct bpf_map *map)
+{
+ long page_sz = sysconf(_SC_PAGE_SIZE);
+ size_t map_sz;
+
+ map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
+ map_sz = roundup(map_sz, page_sz);
+ return map_sz;
+}
+
+static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *tp_name;
+
+ codegen("\
+ \n\
+ \n\
+ static inline int \n\
+ %1$s__%2$s__attach(struct %1$s *skel) \n\
+ { \n\
+ int prog_fd = skel->progs.%2$s.prog_fd; \n\
+ ", obj_name, bpf_program__name(prog));
+
+ switch (bpf_program__get_type(prog)) {
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
+ printf("\tint fd = bpf_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
+ break;
+ case BPF_PROG_TYPE_TRACING:
+ printf("\tint fd = bpf_raw_tracepoint_open(NULL, prog_fd);\n");
+ break;
+ default:
+ printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
+ break;
+ }
+ codegen("\
+ \n\
+ \n\
+ if (fd > 0) \n\
+ skel->links.%1$s_fd = fd; \n\
+ return fd; \n\
+ } \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ \n\
+ static inline int \n\
+ %1$s__attach(struct %1$s *skel) \n\
+ { \n\
+ int ret = 0; \n\
+ \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
+ ", obj_name, bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ return ret < 0 ? ret : 0; \n\
+ } \n\
+ \n\
+ static inline void \n\
+ %1$s__detach(struct %1$s *skel) \n\
+ { \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ skel_closenz(skel->links.%1$s_fd); \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ } \n\
+ ");
+}
+
+static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+ struct bpf_map *map;
+
+ codegen("\
+ \n\
+ static void \n\
+ %1$s__destroy(struct %1$s *skel) \n\
+ { \n\
+ if (!skel) \n\
+ return; \n\
+ %1$s__detach(skel); \n\
+ ",
+ obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ skel_closenz(skel->progs.%1$s.prog_fd); \n\
+ ", bpf_program__name(prog));
+ }
+
+ bpf_object__for_each_map(map, obj) {
+ const char * ident;
+
+ ident = get_map_ident(map);
+ if (!ident)
+ continue;
+ if (bpf_map__is_internal(map) &&
+ (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
+ printf("\tmunmap(skel->%1$s, %2$zd);\n",
+ ident, bpf_map_mmap_sz(map));
+ codegen("\
+ \n\
+ skel_closenz(skel->maps.%1$s.map_fd); \n\
+ ", ident);
+ }
+ codegen("\
+ \n\
+ free(skel); \n\
+ } \n\
+ ",
+ obj_name);
+}
+
+static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
+{
+ struct bpf_object_load_attr load_attr = {};
+ DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
+ struct bpf_map *map;
+ int err = 0;
+
+ err = bpf_object__gen_loader(obj, &opts);
+ if (err)
+ return err;
+
+ load_attr.obj = obj;
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ load_attr.log_level = 1 + 2 + 4;
+
+ err = bpf_object__load_xattr(&load_attr);
+ if (err) {
+ p_err("failed to load object file");
+ goto out;
+ }
+ /* If there was no error during load then gen_loader_opts
+ * are populated with the loader program.
+ */
+
+ /* finish generating 'struct skel' */
+ codegen("\
+ \n\
+ }; \n\
+ ", obj_name);
+
+
+ codegen_attach_detach(obj, obj_name);
+
+ codegen_destroy(obj, obj_name);
+
+ codegen("\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open(void) \n\
+ { \n\
+ struct %1$s *skel; \n\
+ \n\
+ skel = calloc(sizeof(*skel), 1); \n\
+ if (!skel) \n\
+ goto cleanup; \n\
+ skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
+ ",
+ obj_name, opts.data_sz);
+ bpf_object__for_each_map(map, obj) {
+ const char *ident;
+ const void *mmap_data = NULL;
+ size_t mmap_size = 0;
+
+ ident = get_map_ident(map);
+ if (!ident)
+ continue;
+
+ if (!bpf_map__is_internal(map) ||
+ !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
+ continue;
+
+ codegen("\
+ \n\
+ skel->%1$s = \n\
+ mmap(NULL, %2$zd, PROT_READ | PROT_WRITE,\n\
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0); \n\
+ if (skel->%1$s == (void *) -1) \n\
+ goto cleanup; \n\
+ memcpy(skel->%1$s, (void *)\"\\ \n\
+ ", ident, bpf_map_mmap_sz(map));
+ mmap_data = bpf_map__initial_value(map, &mmap_size);
+ print_hex(mmap_data, mmap_size);
+ printf("\", %2$zd);\n"
+ "\tskel->maps.%1$s.initial_value = (__u64)(long)skel->%1$s;\n",
+ ident, mmap_size);
+ }
+ codegen("\
+ \n\
+ return skel; \n\
+ cleanup: \n\
+ %1$s__destroy(skel); \n\
+ return NULL; \n\
+ } \n\
+ \n\
+ static inline int \n\
+ %1$s__load(struct %1$s *skel) \n\
+ { \n\
+ struct bpf_load_and_run_opts opts = {}; \n\
+ int err; \n\
+ \n\
+ opts.ctx = (struct bpf_loader_ctx *)skel; \n\
+ opts.data_sz = %2$d; \n\
+ opts.data = (void *)\"\\ \n\
+ ",
+ obj_name, opts.data_sz);
+ print_hex(opts.data, opts.data_sz);
+ codegen("\
+ \n\
+ \"; \n\
+ ");
+
+ codegen("\
+ \n\
+ opts.insns_sz = %d; \n\
+ opts.insns = (void *)\"\\ \n\
+ ",
+ opts.insns_sz);
+ print_hex(opts.insns, opts.insns_sz);
+ codegen("\
+ \n\
+ \"; \n\
+ err = bpf_load_and_run(&opts); \n\
+ if (err < 0) \n\
+ return err; \n\
+ ", obj_name);
+ bpf_object__for_each_map(map, obj) {
+ const char *ident, *mmap_flags;
+
+ ident = get_map_ident(map);
+ if (!ident)
+ continue;
+
+ if (!bpf_map__is_internal(map) ||
+ !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
+ continue;
+ if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG)
+ mmap_flags = "PROT_READ";
+ else
+ mmap_flags = "PROT_READ | PROT_WRITE";
+
+ printf("\tskel->%1$s =\n"
+ "\t\tmmap(skel->%1$s, %2$zd, %3$s, MAP_SHARED | MAP_FIXED,\n"
+ "\t\t\tskel->maps.%1$s.map_fd, 0);\n",
+ ident, bpf_map_mmap_sz(map), mmap_flags);
+ }
+ codegen("\
+ \n\
+ return 0; \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open_and_load(void) \n\
+ { \n\
+ struct %1$s *skel; \n\
+ \n\
+ skel = %1$s__open(); \n\
+ if (!skel) \n\
+ return NULL; \n\
+ if (%1$s__load(skel)) { \n\
+ %1$s__destroy(skel); \n\
+ return NULL; \n\
+ } \n\
+ return skel; \n\
+ } \n\
+ ", obj_name);
+
+ codegen("\
+ \n\
+ \n\
+ #endif /* %s */ \n\
+ ",
+ header_guard);
+ err = 0;
+out:
+ return err;
+}
+
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
@@ -277,7 +605,7 @@ static int do_skeleton(int argc, char **argv)
struct bpf_object *obj = NULL;
const char *file, *ident;
struct bpf_program *prog;
- int fd, len, err = -1;
+ int fd, err = -1;
struct bpf_map *map;
struct btf *btf;
struct stat st;
@@ -359,7 +687,25 @@ static int do_skeleton(int argc, char **argv)
}
get_header_guard(header_guard, obj_name);
- codegen("\
+ if (use_loader) {
+ codegen("\
+ \n\
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
+ /* THIS FILE IS AUTOGENERATED! */ \n\
+ #ifndef %2$s \n\
+ #define %2$s \n\
+ \n\
+ #include <stdlib.h> \n\
+ #include <bpf/bpf.h> \n\
+ #include <bpf/skel_internal.h> \n\
+ \n\
+ struct %1$s { \n\
+ struct bpf_loader_ctx ctx; \n\
+ ",
+ obj_name, header_guard
+ );
+ } else {
+ codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
@@ -367,6 +713,7 @@ static int do_skeleton(int argc, char **argv)
#ifndef %2$s \n\
#define %2$s \n\
\n\
+ #include <errno.h> \n\
#include <stdlib.h> \n\
#include <bpf/libbpf.h> \n\
\n\
@@ -375,7 +722,8 @@ static int do_skeleton(int argc, char **argv)
struct bpf_object *obj; \n\
",
obj_name, header_guard
- );
+ );
+ }
if (map_cnt) {
printf("\tstruct {\n");
@@ -383,7 +731,10 @@ static int do_skeleton(int argc, char **argv)
ident = get_map_ident(map);
if (!ident)
continue;
- printf("\t\tstruct bpf_map *%s;\n", ident);
+ if (use_loader)
+ printf("\t\tstruct bpf_map_desc %s;\n", ident);
+ else
+ printf("\t\tstruct bpf_map *%s;\n", ident);
}
printf("\t} maps;\n");
}
@@ -391,14 +742,22 @@ static int do_skeleton(int argc, char **argv)
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
- printf("\t\tstruct bpf_program *%s;\n",
- bpf_program__name(prog));
+ if (use_loader)
+ printf("\t\tstruct bpf_prog_desc %s;\n",
+ bpf_program__name(prog));
+ else
+ printf("\t\tstruct bpf_program *%s;\n",
+ bpf_program__name(prog));
}
printf("\t} progs;\n");
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
- printf("\t\tstruct bpf_link *%s;\n",
- bpf_program__name(prog));
+ if (use_loader)
+ printf("\t\tint %s_fd;\n",
+ bpf_program__name(prog));
+ else
+ printf("\t\tstruct bpf_link *%s;\n",
+ bpf_program__name(prog));
}
printf("\t} links;\n");
}
@@ -409,6 +768,10 @@ static int do_skeleton(int argc, char **argv)
if (err)
goto out;
}
+ if (use_loader) {
+ err = gen_trace(obj, obj_name, header_guard);
+ goto out;
+ }
codegen("\
\n\
@@ -431,18 +794,23 @@ static int do_skeleton(int argc, char **argv)
%1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
{ \n\
struct %1$s *obj; \n\
+ int err; \n\
\n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
- if (!obj) \n\
+ if (!obj) { \n\
+ errno = ENOMEM; \n\
return NULL; \n\
- if (%1$s__create_skeleton(obj)) \n\
- goto err; \n\
- if (bpf_object__open_skeleton(obj->skeleton, opts)) \n\
- goto err; \n\
+ } \n\
+ \n\
+ err = %1$s__create_skeleton(obj); \n\
+ err = err ?: bpf_object__open_skeleton(obj->skeleton, opts);\n\
+ if (err) \n\
+ goto err_out; \n\
\n\
return obj; \n\
- err: \n\
+ err_out: \n\
%1$s__destroy(obj); \n\
+ errno = -err; \n\
return NULL; \n\
} \n\
\n\
@@ -462,12 +830,15 @@ static int do_skeleton(int argc, char **argv)
%1$s__open_and_load(void) \n\
{ \n\
struct %1$s *obj; \n\
+ int err; \n\
\n\
obj = %1$s__open(); \n\
if (!obj) \n\
return NULL; \n\
- if (%1$s__load(obj)) { \n\
+ err = %1$s__load(obj); \n\
+ if (err) { \n\
%1$s__destroy(obj); \n\
+ errno = -err; \n\
return NULL; \n\
} \n\
return obj; \n\
@@ -498,7 +869,7 @@ static int do_skeleton(int argc, char **argv)
\n\
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
if (!s) \n\
- return -1; \n\
+ goto err; \n\
obj->skeleton = s; \n\
\n\
s->sz = sizeof(*s); \n\
@@ -578,19 +949,7 @@ static int do_skeleton(int argc, char **argv)
file_sz);
/* embed contents of BPF object file */
- for (i = 0, len = 0; i < file_sz; i++) {
- int w = obj_data[i] ? 4 : 2;
-
- len += w;
- if (len > 78) {
- printf("\\\n");
- len = w;
- }
- if (!obj_data[i])
- printf("\\0");
- else
- printf("\\x%02x", (unsigned char)obj_data[i]);
- }
+ print_hex(obj_data, file_sz);
codegen("\
\n\
@@ -599,7 +958,7 @@ static int do_skeleton(int argc, char **argv)
return 0; \n\
err: \n\
bpf_object__destroy_skeleton(s); \n\
- return -1; \n\
+ return -ENOMEM; \n\
} \n\
\n\
#endif /* %s */ \n\
@@ -636,7 +995,7 @@ static int do_object(int argc, char **argv)
while (argc) {
file = GET_ARG();
- err = bpf_linker__add_file(linker, file);
+ err = bpf_linker__add_file(linker, file, NULL);
if (err) {
p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
goto out;
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d9afb730136a..3ddfd4843738 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -29,6 +29,7 @@ bool show_pinned;
bool block_mount;
bool verifier_logs;
bool relaxed_maps;
+bool use_loader;
struct btf *base_btf;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
@@ -340,8 +341,10 @@ static int do_batch(int argc, char **argv)
n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
if (!n_argc)
continue;
- if (n_argc < 0)
+ if (n_argc < 0) {
+ err = n_argc;
goto err_close;
+ }
if (json_output) {
jsonw_start_object(json_wtr);
@@ -392,6 +395,7 @@ int main(int argc, char **argv)
{ "mapcompat", no_argument, NULL, 'm' },
{ "nomount", no_argument, NULL, 'n' },
{ "debug", no_argument, NULL, 'd' },
+ { "use-loader", no_argument, NULL, 'L' },
{ "base-btf", required_argument, NULL, 'B' },
{ 0 }
};
@@ -409,7 +413,7 @@ int main(int argc, char **argv)
hash_init(link_table.table);
opterr = 0;
- while ((opt = getopt_long(argc, argv, "VhpjfmndB:",
+ while ((opt = getopt_long(argc, argv, "VhpjfLmndB:",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
@@ -452,6 +456,9 @@ int main(int argc, char **argv)
return -1;
}
break;
+ case 'L':
+ use_loader = true;
+ break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 76e91641262b..c1cf29798b99 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -90,6 +90,7 @@ extern bool show_pids;
extern bool block_mount;
extern bool verifier_logs;
extern bool relaxed_maps;
+extern bool use_loader;
extern struct btf *base_btf;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 3f067d2d7584..cc48726740ad 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -16,6 +16,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/syscall.h>
+#include <dirent.h>
#include <linux/err.h>
#include <linux/perf_event.h>
@@ -24,6 +25,8 @@
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
+#include <bpf/bpf_gen_internal.h>
+#include <bpf/skel_internal.h>
#include "cfg.h"
#include "main.h"
@@ -1499,7 +1502,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
set_max_rlimit();
obj = bpf_object__open_file(file, &open_opts);
- if (IS_ERR_OR_NULL(obj)) {
+ if (libbpf_get_error(obj)) {
p_err("failed to open object file");
goto err_free_reuse_maps;
}
@@ -1645,8 +1648,110 @@ err_free_reuse_maps:
return -1;
}
+static int count_open_fds(void)
+{
+ DIR *dp = opendir("/proc/self/fd");
+ struct dirent *de;
+ int cnt = -3;
+
+ if (!dp)
+ return -1;
+
+ while ((de = readdir(dp)))
+ cnt++;
+
+ closedir(dp);
+ return cnt;
+}
+
+static int try_loader(struct gen_loader_opts *gen)
+{
+ struct bpf_load_and_run_opts opts = {};
+ struct bpf_loader_ctx *ctx;
+ int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
+ sizeof(struct bpf_prog_desc));
+ int log_buf_sz = (1u << 24) - 1;
+ int err, fds_before, fd_delta;
+ char *log_buf;
+
+ ctx = alloca(ctx_sz);
+ memset(ctx, 0, ctx_sz);
+ ctx->sz = ctx_sz;
+ ctx->log_level = 1;
+ ctx->log_size = log_buf_sz;
+ log_buf = malloc(log_buf_sz);
+ if (!log_buf)
+ return -ENOMEM;
+ ctx->log_buf = (long) log_buf;
+ opts.ctx = ctx;
+ opts.data = gen->data;
+ opts.data_sz = gen->data_sz;
+ opts.insns = gen->insns;
+ opts.insns_sz = gen->insns_sz;
+ fds_before = count_open_fds();
+ err = bpf_load_and_run(&opts);
+ fd_delta = count_open_fds() - fds_before;
+ if (err < 0) {
+ fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
+ if (fd_delta)
+ fprintf(stderr, "loader prog leaked %d FDs\n",
+ fd_delta);
+ }
+ free(log_buf);
+ return err;
+}
+
+static int do_loader(int argc, char **argv)
+{
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
+ DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
+ struct bpf_object_load_attr load_attr = {};
+ struct bpf_object *obj;
+ const char *file;
+ int err = 0;
+
+ if (!REQ_ARGS(1))
+ return -1;
+ file = GET_ARG();
+
+ obj = bpf_object__open_file(file, &open_opts);
+ if (libbpf_get_error(obj)) {
+ p_err("failed to open object file");
+ goto err_close_obj;
+ }
+
+ err = bpf_object__gen_loader(obj, &gen);
+ if (err)
+ goto err_close_obj;
+
+ load_attr.obj = obj;
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ load_attr.log_level = 1 + 2 + 4;
+
+ err = bpf_object__load_xattr(&load_attr);
+ if (err) {
+ p_err("failed to load object file");
+ goto err_close_obj;
+ }
+
+ if (verifier_logs) {
+ struct dump_data dd = {};
+
+ kernel_syms_load(&dd);
+ dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
+ kernel_syms_destroy(&dd);
+ }
+ err = try_loader(&gen);
+err_close_obj:
+ bpf_object__close(obj);
+ return err;
+}
+
static int do_load(int argc, char **argv)
{
+ if (use_loader)
+ return do_loader(argc, argv);
return load_with_options(argc, argv, true);
}
@@ -2138,7 +2243,7 @@ static int do_help(int argc, char **argv)
" cgroup/getpeername4 | cgroup/getpeername6 |\n"
" cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
" cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
- " cgroup/getsockopt | cgroup/setsockopt |\n"
+ " cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
" struct_ops | fentry | fexit | freplace | sk_lookup }\n"
" ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 6fc3e6f7f40c..f1f32e21d5cd 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -196,6 +196,9 @@ static const char *print_imm(void *private_data,
else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
+ else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[idx:%u]+%u", insn->imm, (insn + 1)->imm);
else if (insn->src_reg == BPF_PSEUDO_FUNC)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"subprog[%+d]", insn->imm);
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 7550fd9c3188..3ad9301b0f00 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -655,6 +655,9 @@ static int symbols_patch(struct object *obj)
if (sets_patch(obj))
return -1;
+ /* Set type to ensure endian translation occurs. */
+ obj->efile.idlist->d_type = ELF_T_WORD;
+
elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
err = elf_update(obj->efile.elf, ELF_C_WRITE);
diff --git a/tools/debugging/kernel-chktaint b/tools/debugging/kernel-chktaint
index 719f18b1edf0..f1af27ce9f20 100755
--- a/tools/debugging/kernel-chktaint
+++ b/tools/debugging/kernel-chktaint
@@ -196,7 +196,7 @@ else
fi
echo "For a more detailed explanation of the various taint flags see"
-echo " Documentation/admin-guide/tainted-kernels.rst in the the Linux kernel sources"
+echo " Documentation/admin-guide/tainted-kernels.rst in the Linux kernel sources"
echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html"
echo "Raw taint value as int/string: $taint/'$out'"
#EOF#
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 6de5a7fc066b..d2a942086fcb 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index ec6d85a81744..bf9252c7381e 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -527,6 +527,15 @@ union bpf_iter_link_info {
* Look up an element with the given *key* in the map referred to
* by the file descriptor *fd*, and if found, delete the element.
*
+ * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
+ * types, the *flags* argument needs to be set to 0, but for other
+ * map types, it may be specified as:
+ *
+ * **BPF_F_LOCK**
+ * Look up and delete the value of a spin-locked map
+ * without returning the lock. This must be specified if
+ * the elements contain a spinlock.
+ *
* The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
* implement this command as a "pop" operation, deleting the top
* element rather than one corresponding to *key*.
@@ -536,6 +545,10 @@ union bpf_iter_link_info {
* This command is only valid for the following map types:
* * **BPF_MAP_TYPE_QUEUE**
* * **BPF_MAP_TYPE_STACK**
+ * * **BPF_MAP_TYPE_HASH**
+ * * **BPF_MAP_TYPE_PERCPU_HASH**
+ * * **BPF_MAP_TYPE_LRU_HASH**
+ * * **BPF_MAP_TYPE_LRU_PERCPU_HASH**
*
* Return
* Returns zero on success. On error, -1 is returned and *errno*
@@ -837,6 +850,7 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
+ BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
@@ -937,6 +951,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
+ BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
};
enum bpf_attach_type {
@@ -979,6 +994,8 @@ enum bpf_attach_type {
BPF_SK_LOOKUP,
BPF_XDP,
BPF_SK_SKB_VERDICT,
+ BPF_SK_REUSEPORT_SELECT,
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
__MAX_BPF_ATTACH_TYPE
};
@@ -1097,8 +1114,8 @@ enum bpf_link_type {
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
*
- * insn[0].src_reg: BPF_PSEUDO_MAP_FD
- * insn[0].imm: map fd
+ * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
+ * insn[0].imm: map fd or fd_idx
* insn[1].imm: 0
* insn[0].off: 0
* insn[1].off: 0
@@ -1106,15 +1123,19 @@ enum bpf_link_type {
* verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
-/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE
- * insn[0].imm: map fd
+#define BPF_PSEUDO_MAP_IDX 5
+
+/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
+ * insn[0].imm: map fd or fd_idx
* insn[1].imm: offset into value
* insn[0].off: 0
* insn[1].off: 0
* ldimm64 rewrite: address of map[0]+offset
* verifier type: PTR_TO_MAP_VALUE
*/
-#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX_VALUE 6
+
/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
* insn[0].imm: kernel btd id of VAR
* insn[1].imm: 0
@@ -1314,6 +1335,8 @@ union bpf_attr {
/* or valid module BTF object fd or 0 to attach to vmlinux */
__u32 attach_btf_obj_fd;
};
+ __u32 :32; /* pad */
+ __aligned_u64 fd_array; /* array of FDs */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -2534,8 +2557,12 @@ union bpf_attr {
* The lower two bits of *flags* are used as the return code if
* the map lookup fails. This is so that the return value can be
* one of the XDP program return codes up to **XDP_TX**, as chosen
- * by the caller. Any higher bits in the *flags* argument must be
- * unset.
+ * by the caller. The higher bits of *flags* can be set to
+ * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
+ *
+ * With BPF_F_BROADCAST the packet will be broadcasted to all the
+ * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
+ * interface will be excluded when do broadcasting.
*
* See also **bpf_redirect**\ (), which only supports redirecting
* to an ifindex, but doesn't require a map to do so.
@@ -4735,6 +4762,24 @@ union bpf_attr {
* be zero-terminated except when **str_size** is 0.
*
* Or **-EBUSY** if the per-CPU memory copy buffer is busy.
+ *
+ * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
+ * Description
+ * Execute bpf syscall with given arguments.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
+ * Description
+ * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
+ * Return
+ * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
+ *
+ * long bpf_sys_close(u32 fd)
+ * Description
+ * Execute close syscall for given FD.
+ * Return
+ * A syscall result.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -4903,6 +4948,9 @@ union bpf_attr {
FN(check_mtu), \
FN(for_each_map_elem), \
FN(snprintf), \
+ FN(sys_bpf), \
+ FN(btf_find_by_name_kind), \
+ FN(sys_close), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -5080,6 +5128,12 @@ enum {
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
};
+/* Flags for bpf_redirect_map helper */
+enum {
+ BPF_F_BROADCAST = (1ULL << 3),
+ BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
+};
+
#define __bpf_md_ptr(type, name) \
union { \
type name; \
@@ -5364,6 +5418,20 @@ struct sk_reuseport_md {
__u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
__u32 bind_inany; /* Is sock bound to an INANY address? */
__u32 hash; /* A hash of the packet 4 tuples */
+ /* When reuse->migrating_sk is NULL, it is selecting a sk for the
+ * new incoming connection request (e.g. selecting a listen sk for
+ * the received SYN in the TCP case). reuse->sk is one of the sk
+ * in the reuseport group. The bpf prog can use reuse->sk to learn
+ * the local listening ip/port without looking into the skb.
+ *
+ * When reuse->migrating_sk is not NULL, reuse->sk is closed and
+ * reuse->migrating_sk is the socket that needs to be migrated
+ * to another listening socket. migrating_sk could be a fullsock
+ * sk that is fully established or a reqsk that is in-the-middle
+ * of 3-way handshake.
+ */
+ __bpf_md_ptr(struct bpf_sock *, sk);
+ __bpf_md_ptr(struct bpf_sock *, migrating_sk);
};
#define BPF_TAG_SIZE 8
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index f44eb0a04afd..4c32e97dcdf0 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -185,7 +185,7 @@ struct fsxattr {
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
/*
- * A jump here: 130-131 are reserved for zoned block devices
+ * A jump here: 130-136 are reserved for zoned block devices
* (see uapi/linux/blkzoned.h)
*/
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 7d6687618d80..d1b327036ae4 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -289,6 +289,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 3fd9a7e9d90c..79d9c44d1ad7 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -8,6 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -1879,8 +1880,8 @@ struct kvm_hyperv_eventfd {
* conversion after harvesting an entry. Also, it must not skip any
* dirty bits, so that dirty bits are always harvested in sequence.
*/
-#define KVM_DIRTY_GFN_F_DIRTY BIT(0)
-#define KVM_DIRTY_GFN_F_RESET BIT(1)
+#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
#define KVM_DIRTY_GFN_F_MASK 0x3
/*
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index bf8143505c49..f92880a15645 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -464,7 +464,7 @@ struct perf_event_attr {
/*
* User provided data if sigtrap=1, passed back to user via
- * siginfo_t::si_perf, e.g. to permit user to identify the event.
+ * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
*/
__u64 sig_data;
};
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index 18a9f59dc067..967d9c55323d 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -259,4 +259,12 @@ struct prctl_mm_map {
#define PR_PAC_SET_ENABLED_KEYS 60
#define PR_PAC_GET_ENABLED_KEYS 61
+/* Request the scheduler to share a core */
+#define PR_SCHED_CORE 62
+# define PR_SCHED_CORE_GET 0
+# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+# define PR_SCHED_CORE_MAX 4
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 9b057cc7650a..430f6874fa41 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,3 +1,3 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
- btf_dump.o ringbuf.o strset.o linker.o
+ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index e43e1896cb4b..ec14aa725bb0 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -223,18 +223,14 @@ install_lib: all_cmd
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
+INSTALL_HEADERS = bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
+ bpf_helpers.h $(BPF_HELPER_DEFS) bpf_tracing.h \
+ bpf_endian.h bpf_core_read.h skel_internal.h
+
install_headers: $(BPF_HELPER_DEFS)
- $(call QUIET_INSTALL, headers) \
- $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
- $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
- $(call do_install,btf.h,$(prefix)/include/bpf,644); \
- $(call do_install,libbpf_common.h,$(prefix)/include/bpf,644); \
- $(call do_install,xsk.h,$(prefix)/include/bpf,644); \
- $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
- $(call do_install,$(BPF_HELPER_DEFS),$(prefix)/include/bpf,644); \
- $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
- $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \
- $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644);
+ $(call QUIET_INSTALL, headers) \
+ $(foreach hdr,$(INSTALL_HEADERS), \
+ $(call do_install,$(hdr),$(prefix)/include/bpf,644);)
install_pkgconfig: $(PC_FILE)
$(call QUIET_INSTALL, $(PC_FILE)) \
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index bba48ff4c5c0..86dcac44f32f 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -80,6 +80,7 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
{
union bpf_attr attr;
+ int fd;
memset(&attr, '\0', sizeof(attr));
@@ -102,7 +103,8 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
else
attr.inner_map_fd = create_attr->inner_map_fd;
- return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
@@ -160,6 +162,7 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
__u32 map_flags, int node)
{
union bpf_attr attr;
+ int fd;
memset(&attr, '\0', sizeof(attr));
@@ -178,7 +181,8 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
attr.numa_node = node;
}
- return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
@@ -222,10 +226,10 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
int fd;
if (!load_attr->log_buf != !load_attr->log_buf_sz)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
@@ -281,8 +285,10 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
load_attr->func_info_cnt,
load_attr->func_info_rec_size,
attr.func_info_rec_size);
- if (!finfo)
+ if (!finfo) {
+ errno = E2BIG;
goto done;
+ }
attr.func_info = ptr_to_u64(finfo);
attr.func_info_rec_size = load_attr->func_info_rec_size;
@@ -293,8 +299,10 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
load_attr->line_info_cnt,
load_attr->line_info_rec_size,
attr.line_info_rec_size);
- if (!linfo)
+ if (!linfo) {
+ errno = E2BIG;
goto done;
+ }
attr.line_info = ptr_to_u64(linfo);
attr.line_info_rec_size = load_attr->line_info_rec_size;
@@ -318,9 +326,10 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
fd = sys_bpf_prog_load(&attr, sizeof(attr));
done:
+ /* free() doesn't affect errno, so we don't need to restore it */
free(finfo);
free(linfo);
- return fd;
+ return libbpf_err_errno(fd);
}
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
@@ -329,7 +338,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
struct bpf_prog_load_params p = {};
if (!load_attr || !log_buf != !log_buf_sz)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
p.prog_type = load_attr->prog_type;
p.expected_attach_type = load_attr->expected_attach_type;
@@ -391,6 +400,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
int log_level)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.prog_type = type;
@@ -404,13 +414,15 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.kern_version = kern_version;
attr.prog_flags = prog_flags;
- return sys_bpf_prog_load(&attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
@@ -418,24 +430,28 @@ int bpf_map_update_elem(int fd, const void *key, const void *value,
attr.value = ptr_to_u64(value);
attr.flags = flags;
- return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_map_lookup_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
- return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
@@ -443,17 +459,33 @@ int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
attr.value = ptr_to_u64(value);
attr.flags = flags;
- return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+
+ ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
+}
+
+int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
+{
+ union bpf_attr attr;
memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
+ attr.flags = flags;
return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
}
@@ -461,34 +493,40 @@ int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
int bpf_map_delete_elem(int fd, const void *key)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
- return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.next_key = ptr_to_u64(next_key);
- return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_map_freeze(int fd)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
- return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
@@ -500,7 +538,7 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
int ret;
if (!OPTS_VALID(opts, bpf_map_batch_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr));
attr.batch.map_fd = fd;
@@ -515,7 +553,7 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
ret = sys_bpf(cmd, &attr, sizeof(attr));
*count = attr.batch.count;
- return ret;
+ return libbpf_err_errno(ret);
}
int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
@@ -552,22 +590,26 @@ int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
int bpf_obj_pin(int fd, const char *pathname)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
attr.bpf_fd = fd;
- return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_obj_get(const char *pathname)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
- return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
@@ -585,9 +627,10 @@ int bpf_prog_attach_xattr(int prog_fd, int target_fd,
const struct bpf_prog_attach_opts *opts)
{
union bpf_attr attr;
+ int ret;
if (!OPTS_VALID(opts, bpf_prog_attach_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
@@ -596,30 +639,35 @@ int bpf_prog_attach_xattr(int prog_fd, int target_fd,
attr.attach_flags = OPTS_GET(opts, flags, 0);
attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
- return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_type = type;
- return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
- return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_link_create(int prog_fd, int target_fd,
@@ -628,15 +676,16 @@ int bpf_link_create(int prog_fd, int target_fd,
{
__u32 target_btf_id, iter_info_len;
union bpf_attr attr;
+ int fd;
if (!OPTS_VALID(opts, bpf_link_create_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
iter_info_len = OPTS_GET(opts, iter_info_len, 0);
target_btf_id = OPTS_GET(opts, target_btf_id, 0);
if (iter_info_len && target_btf_id)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr));
attr.link_create.prog_fd = prog_fd;
@@ -652,26 +701,30 @@ int bpf_link_create(int prog_fd, int target_fd,
attr.link_create.target_btf_id = target_btf_id;
}
- return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_link_detach(int link_fd)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.link_detach.link_fd = link_fd;
- return sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_link_update(int link_fd, int new_prog_fd,
const struct bpf_link_update_opts *opts)
{
union bpf_attr attr;
+ int ret;
if (!OPTS_VALID(opts, bpf_link_update_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr));
attr.link_update.link_fd = link_fd;
@@ -679,17 +732,20 @@ int bpf_link_update(int link_fd, int new_prog_fd,
attr.link_update.flags = OPTS_GET(opts, flags, 0);
attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
- return sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_iter_create(int link_fd)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.iter_create.link_fd = link_fd;
- return sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
@@ -706,10 +762,12 @@ int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
attr.query.prog_ids = ptr_to_u64(prog_ids);
ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
+
if (attach_flags)
*attach_flags = attr.query.attach_flags;
*prog_cnt = attr.query.prog_cnt;
- return ret;
+
+ return libbpf_err_errno(ret);
}
int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
@@ -727,13 +785,15 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
attr.test.repeat = repeat;
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
+
if (size_out)
*size_out = attr.test.data_size_out;
if (retval)
*retval = attr.test.retval;
if (duration)
*duration = attr.test.duration;
- return ret;
+
+ return libbpf_err_errno(ret);
}
int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
@@ -742,7 +802,7 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
int ret;
if (!test_attr->data_out && test_attr->data_size_out > 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = test_attr->prog_fd;
@@ -757,11 +817,13 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
attr.test.repeat = test_attr->repeat;
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
+
test_attr->data_size_out = attr.test.data_size_out;
test_attr->ctx_size_out = attr.test.ctx_size_out;
test_attr->retval = attr.test.retval;
test_attr->duration = attr.test.duration;
- return ret;
+
+ return libbpf_err_errno(ret);
}
int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
@@ -770,7 +832,7 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
int ret;
if (!OPTS_VALID(opts, bpf_test_run_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = prog_fd;
@@ -788,11 +850,13 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
+
OPTS_SET(opts, data_size_out, attr.test.data_size_out);
OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
OPTS_SET(opts, duration, attr.test.duration);
OPTS_SET(opts, retval, attr.test.retval);
- return ret;
+
+ return libbpf_err_errno(ret);
}
static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
@@ -807,7 +871,7 @@ static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
if (!err)
*next_id = attr.next_id;
- return err;
+ return libbpf_err_errno(err);
}
int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
@@ -833,41 +897,49 @@ int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
int bpf_prog_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.prog_id = id;
- return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_map_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.map_id = id;
- return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_btf_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.btf_id = id;
- return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_link_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.link_id = id;
- return sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
@@ -881,21 +953,24 @@ int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
attr.info.info = ptr_to_u64(info);
err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
+
if (!err)
*info_len = attr.info.info_len;
- return err;
+ return libbpf_err_errno(err);
}
int bpf_raw_tracepoint_open(const char *name, int prog_fd)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
- return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
@@ -915,12 +990,13 @@ retry:
}
fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
- if (fd == -1 && !do_log && log_buf && log_buf_size) {
+
+ if (fd < 0 && !do_log && log_buf && log_buf_size) {
do_log = true;
goto retry;
}
- return fd;
+ return libbpf_err_errno(fd);
}
int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
@@ -937,37 +1013,42 @@ int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
attr.task_fd_query.buf_len = *buf_len;
err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
+
*buf_len = attr.task_fd_query.buf_len;
*prog_id = attr.task_fd_query.prog_id;
*fd_type = attr.task_fd_query.fd_type;
*probe_offset = attr.task_fd_query.probe_offset;
*probe_addr = attr.task_fd_query.probe_addr;
- return err;
+ return libbpf_err_errno(err);
}
int bpf_enable_stats(enum bpf_stats_type type)
{
union bpf_attr attr;
+ int fd;
memset(&attr, 0, sizeof(attr));
attr.enable_stats.type = type;
- return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
+ return libbpf_err_errno(fd);
}
int bpf_prog_bind_map(int prog_fd, int map_fd,
const struct bpf_prog_bind_opts *opts)
{
union bpf_attr attr;
+ int ret;
if (!OPTS_VALID(opts, bpf_prog_bind_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr));
attr.prog_bind_map.prog_fd = prog_fd;
attr.prog_bind_map.map_fd = map_fd;
attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
- return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 875dde20d56e..4f758f8f50cd 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -124,6 +124,8 @@ LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
__u64 flags);
LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
void *value);
+LIBBPF_API int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key,
+ void *value, __u64 flags);
LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
LIBBPF_API int bpf_map_freeze(int fd);
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
new file mode 100644
index 000000000000..615400391e57
--- /dev/null
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2021 Facebook */
+#ifndef __BPF_GEN_INTERNAL_H
+#define __BPF_GEN_INTERNAL_H
+
+struct ksym_relo_desc {
+ const char *name;
+ int kind;
+ int insn_idx;
+};
+
+struct bpf_gen {
+ struct gen_loader_opts *opts;
+ void *data_start;
+ void *data_cur;
+ void *insn_start;
+ void *insn_cur;
+ ssize_t cleanup_label;
+ __u32 nr_progs;
+ __u32 nr_maps;
+ int log_level;
+ int error;
+ struct ksym_relo_desc *relos;
+ int relo_cnt;
+ char attach_target[128];
+ int attach_kind;
+};
+
+void bpf_gen__init(struct bpf_gen *gen, int log_level);
+int bpf_gen__finish(struct bpf_gen *gen);
+void bpf_gen__free(struct bpf_gen *gen);
+void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
+void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx);
+struct bpf_prog_load_params;
+void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
+void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
+void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
+void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx);
+
+#endif
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 9720dc0b4605..b9987c3efa3c 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -158,4 +158,70 @@ enum libbpf_tristate {
#define __kconfig __attribute__((section(".kconfig")))
#define __ksym __attribute__((section(".ksyms")))
+#ifndef ___bpf_concat
+#define ___bpf_concat(a, b) a ## b
+#endif
+#ifndef ___bpf_apply
+#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#endif
+#ifndef ___bpf_nth
+#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
+#endif
+#ifndef ___bpf_narg
+#define ___bpf_narg(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#endif
+
+#define ___bpf_fill0(arr, p, x) do {} while (0)
+#define ___bpf_fill1(arr, p, x) arr[p] = x
+#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
+#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
+#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
+#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
+#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
+#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
+#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
+#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
+#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
+#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
+#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
+#define ___bpf_fill(arr, args...) \
+ ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
+
+/*
+ * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
+ * in a structure.
+ */
+#define BPF_SEQ_PRINTF(seq, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+})
+
+/*
+ * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
+ * an array of u64.
+ */
+#define BPF_SNPRINTF(out, out_size, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_snprintf(out, out_size, ___fmt, \
+ ___param, sizeof(___param)); \
+})
+
#endif
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
index 3ed1a27b5f7c..5c503096ef43 100644
--- a/tools/lib/bpf/bpf_prog_linfo.c
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -106,7 +106,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
nr_linfo = info->nr_line_info;
if (!nr_linfo)
- return NULL;
+ return errno = EINVAL, NULL;
/*
* The min size that bpf_prog_linfo has to access for
@@ -114,11 +114,11 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
*/
if (info->line_info_rec_size <
offsetof(struct bpf_line_info, file_name_off))
- return NULL;
+ return errno = EINVAL, NULL;
prog_linfo = calloc(1, sizeof(*prog_linfo));
if (!prog_linfo)
- return NULL;
+ return errno = ENOMEM, NULL;
/* Copy xlated line_info */
prog_linfo->nr_linfo = nr_linfo;
@@ -174,7 +174,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
err_free:
bpf_prog_linfo__free(prog_linfo);
- return NULL;
+ return errno = EINVAL, NULL;
}
const struct bpf_line_info *
@@ -186,11 +186,11 @@ bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
const __u64 *jited_linfo;
if (func_idx >= prog_linfo->nr_jited_func)
- return NULL;
+ return errno = ENOENT, NULL;
nr_linfo = prog_linfo->nr_jited_linfo_per_func[func_idx];
if (nr_skip >= nr_linfo)
- return NULL;
+ return errno = ENOENT, NULL;
start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip;
jited_rec_size = prog_linfo->jited_rec_size;
@@ -198,7 +198,7 @@ bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
(start * jited_rec_size);
jited_linfo = raw_jited_linfo;
if (addr < *jited_linfo)
- return NULL;
+ return errno = ENOENT, NULL;
nr_linfo -= nr_skip;
rec_size = prog_linfo->rec_size;
@@ -225,13 +225,13 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
nr_linfo = prog_linfo->nr_linfo;
if (nr_skip >= nr_linfo)
- return NULL;
+ return errno = ENOENT, NULL;
rec_size = prog_linfo->rec_size;
raw_linfo = prog_linfo->raw_linfo + (nr_skip * rec_size);
linfo = raw_linfo;
if (insn_off < linfo->insn_off)
- return NULL;
+ return errno = ENOENT, NULL;
nr_linfo -= nr_skip;
for (i = 0; i < nr_linfo; i++) {
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index 8c954ebc0c7c..d6bfbe009296 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -25,26 +25,35 @@
#define bpf_target_sparc
#define bpf_target_defined
#else
- #undef bpf_target_defined
-#endif
/* Fall back to what the compiler says */
-#ifndef bpf_target_defined
#if defined(__x86_64__)
#define bpf_target_x86
+ #define bpf_target_defined
#elif defined(__s390__)
#define bpf_target_s390
+ #define bpf_target_defined
#elif defined(__arm__)
#define bpf_target_arm
+ #define bpf_target_defined
#elif defined(__aarch64__)
#define bpf_target_arm64
+ #define bpf_target_defined
#elif defined(__mips__)
#define bpf_target_mips
+ #define bpf_target_defined
#elif defined(__powerpc__)
#define bpf_target_powerpc
+ #define bpf_target_defined
#elif defined(__sparc__)
#define bpf_target_sparc
+ #define bpf_target_defined
+#endif /* no compiler target */
+
#endif
+
+#ifndef __BPF_TARGET_MISSING
+#define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\""
#endif
#if defined(bpf_target_x86)
@@ -287,7 +296,7 @@ struct pt_regs;
#elif defined(bpf_target_sparc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
-#else
+#elif defined(bpf_target_defined)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
@@ -295,13 +304,48 @@ struct pt_regs;
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
+#if !defined(bpf_target_defined)
+
+#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#endif /* !defined(bpf_target_defined) */
+
+#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
+#endif
+#ifndef ___bpf_apply
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#endif
+#ifndef ___bpf_nth
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
+#endif
+#ifndef ___bpf_narg
#define ___bpf_narg(...) \
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-#define ___bpf_empty(...) \
- ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
+#endif
#define ___bpf_ctx_cast0() ctx
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
@@ -413,56 +457,4 @@ typeof(name(0)) name(struct pt_regs *ctx) \
} \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
-#define ___bpf_fill0(arr, p, x) do {} while (0)
-#define ___bpf_fill1(arr, p, x) arr[p] = x
-#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
-#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
-#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
-#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
-#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
-#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
-#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
-#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
-#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
-#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
-#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
-#define ___bpf_fill(arr, args...) \
- ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
-
-/*
- * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
- * in a structure.
- */
-#define BPF_SEQ_PRINTF(seq, fmt, args...) \
-({ \
- static const char ___fmt[] = fmt; \
- unsigned long long ___param[___bpf_narg(args)]; \
- \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- ___bpf_fill(___param, args); \
- _Pragma("GCC diagnostic pop") \
- \
- bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
- ___param, sizeof(___param)); \
-})
-
-/*
- * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
- * an array of u64.
- */
-#define BPF_SNPRINTF(out, out_size, fmt, args...) \
-({ \
- static const char ___fmt[] = fmt; \
- unsigned long long ___param[___bpf_narg(args)]; \
- \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- ___bpf_fill(___param, args); \
- _Pragma("GCC diagnostic pop") \
- \
- bpf_snprintf(out, out_size, ___fmt, \
- ___param, sizeof(___param)); \
-})
-
#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d57e13a13798..b46760b93bb4 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -443,7 +443,7 @@ struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
{
if (type_id >= btf->start_id + btf->nr_types)
- return NULL;
+ return errno = EINVAL, NULL;
return btf_type_by_id((struct btf *)btf, type_id);
}
@@ -510,7 +510,7 @@ size_t btf__pointer_size(const struct btf *btf)
int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
{
if (ptr_sz != 4 && ptr_sz != 8)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
btf->ptr_sz = ptr_sz;
return 0;
}
@@ -537,7 +537,7 @@ enum btf_endianness btf__endianness(const struct btf *btf)
int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
{
if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
if (!btf->swapped_endian) {
@@ -568,8 +568,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
int i;
t = btf__type_by_id(btf, type_id);
- for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
- i++) {
+ for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_STRUCT:
@@ -592,12 +591,12 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
case BTF_KIND_ARRAY:
array = btf_array(t);
if (nelems && array->nelems > UINT32_MAX / nelems)
- return -E2BIG;
+ return libbpf_err(-E2BIG);
nelems *= array->nelems;
type_id = array->type;
break;
default:
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
t = btf__type_by_id(btf, type_id);
@@ -605,9 +604,9 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
done:
if (size < 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (nelems && size > UINT32_MAX / nelems)
- return -E2BIG;
+ return libbpf_err(-E2BIG);
return nelems * size;
}
@@ -640,7 +639,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
for (i = 0; i < vlen; i++, m++) {
align = btf__align_of(btf, m->type);
if (align <= 0)
- return align;
+ return libbpf_err(align);
max_align = max(max_align, align);
}
@@ -648,7 +647,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
}
default:
pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
- return 0;
+ return errno = EINVAL, 0;
}
}
@@ -667,7 +666,7 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
}
if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
return type_id;
}
@@ -687,7 +686,7 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
return i;
}
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
@@ -709,7 +708,7 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
return i;
}
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
static bool btf_is_modifiable(const struct btf *btf)
@@ -785,12 +784,12 @@ static struct btf *btf_new_empty(struct btf *base_btf)
struct btf *btf__new_empty(void)
{
- return btf_new_empty(NULL);
+ return libbpf_ptr(btf_new_empty(NULL));
}
struct btf *btf__new_empty_split(struct btf *base_btf)
{
- return btf_new_empty(base_btf);
+ return libbpf_ptr(btf_new_empty(base_btf));
}
static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
@@ -846,7 +845,7 @@ done:
struct btf *btf__new(const void *data, __u32 size)
{
- return btf_new(data, size, NULL);
+ return libbpf_ptr(btf_new(data, size, NULL));
}
static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
@@ -937,7 +936,8 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
goto done;
}
btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
- if (IS_ERR(btf))
+ err = libbpf_get_error(btf);
+ if (err)
goto done;
switch (gelf_getclass(elf)) {
@@ -953,9 +953,9 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
}
if (btf_ext && btf_ext_data) {
- *btf_ext = btf_ext__new(btf_ext_data->d_buf,
- btf_ext_data->d_size);
- if (IS_ERR(*btf_ext))
+ *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
+ err = libbpf_get_error(*btf_ext);
+ if (err)
goto done;
} else if (btf_ext) {
*btf_ext = NULL;
@@ -965,30 +965,24 @@ done:
elf_end(elf);
close(fd);
- if (err)
- return ERR_PTR(err);
- /*
- * btf is always parsed before btf_ext, so no need to clean up
- * btf_ext, if btf loading failed
- */
- if (IS_ERR(btf))
+ if (!err)
return btf;
- if (btf_ext && IS_ERR(*btf_ext)) {
- btf__free(btf);
- err = PTR_ERR(*btf_ext);
- return ERR_PTR(err);
- }
- return btf;
+
+ if (btf_ext)
+ btf_ext__free(*btf_ext);
+ btf__free(btf);
+
+ return ERR_PTR(err);
}
struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
{
- return btf_parse_elf(path, NULL, btf_ext);
+ return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
}
struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
{
- return btf_parse_elf(path, base_btf, NULL);
+ return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
}
static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
@@ -1056,36 +1050,39 @@ err_out:
struct btf *btf__parse_raw(const char *path)
{
- return btf_parse_raw(path, NULL);
+ return libbpf_ptr(btf_parse_raw(path, NULL));
}
struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
{
- return btf_parse_raw(path, base_btf);
+ return libbpf_ptr(btf_parse_raw(path, base_btf));
}
static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
{
struct btf *btf;
+ int err;
if (btf_ext)
*btf_ext = NULL;
btf = btf_parse_raw(path, base_btf);
- if (!IS_ERR(btf) || PTR_ERR(btf) != -EPROTO)
+ err = libbpf_get_error(btf);
+ if (!err)
return btf;
-
+ if (err != -EPROTO)
+ return ERR_PTR(err);
return btf_parse_elf(path, base_btf, btf_ext);
}
struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
{
- return btf_parse(path, NULL, btf_ext);
+ return libbpf_ptr(btf_parse(path, NULL, btf_ext));
}
struct btf *btf__parse_split(const char *path, struct btf *base_btf)
{
- return btf_parse(path, base_btf, NULL);
+ return libbpf_ptr(btf_parse(path, base_btf, NULL));
}
static int compare_vsi_off(const void *_a, const void *_b)
@@ -1178,7 +1175,7 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
}
}
- return err;
+ return libbpf_err(err);
}
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
@@ -1191,13 +1188,13 @@ int btf__load(struct btf *btf)
int err = 0;
if (btf->fd >= 0)
- return -EEXIST;
+ return libbpf_err(-EEXIST);
retry_load:
if (log_buf_size) {
log_buf = malloc(log_buf_size);
if (!log_buf)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
*log_buf = 0;
}
@@ -1229,7 +1226,7 @@ retry_load:
done:
free(log_buf);
- return err;
+ return libbpf_err(err);
}
int btf__fd(const struct btf *btf)
@@ -1305,7 +1302,7 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
if (!data)
- return NULL;
+ return errno = -ENOMEM, NULL;
btf->raw_size = data_sz;
if (btf->swapped_endian)
@@ -1323,7 +1320,7 @@ const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
else if (offset - btf->start_str_off < btf->hdr->str_len)
return btf_strs_data(btf) + (offset - btf->start_str_off);
else
- return NULL;
+ return errno = EINVAL, NULL;
}
const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
@@ -1388,17 +1385,20 @@ exit_free:
int btf__get_from_id(__u32 id, struct btf **btf)
{
struct btf *res;
- int btf_fd;
+ int err, btf_fd;
*btf = NULL;
btf_fd = bpf_btf_get_fd_by_id(id);
if (btf_fd < 0)
- return -errno;
+ return libbpf_err(-errno);
res = btf_get_from_fd(btf_fd, NULL);
+ err = libbpf_get_error(res);
+
close(btf_fd);
- if (IS_ERR(res))
- return PTR_ERR(res);
+
+ if (err)
+ return libbpf_err(err);
*btf = res;
return 0;
@@ -1415,31 +1415,30 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
__s64 key_size, value_size;
__s32 container_id;
- if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
- max_name) {
+ if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) {
pr_warn("map:%s length of '____btf_map_%s' is too long\n",
map_name, map_name);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
container_id = btf__find_by_name(btf, container_name);
if (container_id < 0) {
pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
map_name, container_name);
- return container_id;
+ return libbpf_err(container_id);
}
container_type = btf__type_by_id(btf, container_id);
if (!container_type) {
pr_warn("map:%s cannot find BTF type for container_id:%u\n",
map_name, container_id);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
pr_warn("map:%s container_name:%s is an invalid container struct\n",
map_name, container_name);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
key = btf_members(container_type);
@@ -1448,25 +1447,25 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
key_size = btf__resolve_size(btf, key->type);
if (key_size < 0) {
pr_warn("map:%s invalid BTF key_type_size\n", map_name);
- return key_size;
+ return libbpf_err(key_size);
}
if (expected_key_size != key_size) {
pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
map_name, (__u32)key_size, expected_key_size);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
value_size = btf__resolve_size(btf, value->type);
if (value_size < 0) {
pr_warn("map:%s invalid BTF value_type_size\n", map_name);
- return value_size;
+ return libbpf_err(value_size);
}
if (expected_value_size != value_size) {
pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
map_name, (__u32)value_size, expected_value_size);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
*key_type_id = key->type;
@@ -1563,11 +1562,11 @@ int btf__find_str(struct btf *btf, const char *s)
/* BTF needs to be in a modifiable state to build string lookup index */
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
off = strset__find_str(btf->strs_set, s);
if (off < 0)
- return off;
+ return libbpf_err(off);
return btf->start_str_off + off;
}
@@ -1588,11 +1587,11 @@ int btf__add_str(struct btf *btf, const char *s)
}
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
off = strset__add_str(btf->strs_set, s);
if (off < 0)
- return off;
+ return libbpf_err(off);
btf->hdr->str_len = strset__data_size(btf->strs_set);
@@ -1616,7 +1615,7 @@ static int btf_commit_type(struct btf *btf, int data_sz)
err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
if (err)
- return err;
+ return libbpf_err(err);
btf->hdr->type_len += data_sz;
btf->hdr->str_off += data_sz;
@@ -1653,21 +1652,21 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
sz = btf_type_size(src_type);
if (sz < 0)
- return sz;
+ return libbpf_err(sz);
/* deconstruct BTF, if necessary, and invalidate raw_data */
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
memcpy(t, src_type, sz);
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
if (err)
- return err;
+ return libbpf_err(err);
return btf_commit_type(btf, sz);
}
@@ -1688,21 +1687,21 @@ int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding
/* non-empty name */
if (!name || !name[0])
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* byte_sz must be power of 2 */
if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* deconstruct BTF, if necessary, and invalidate raw_data */
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type) + sizeof(int);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
/* if something goes wrong later, we might end up with an extra string,
* but that shouldn't be a problem, because BTF can't be constructed
@@ -1736,20 +1735,20 @@ int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
/* non-empty name */
if (!name || !name[0])
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* byte_sz must be one of the explicitly allowed values */
if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
byte_sz != 16)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
name_off = btf__add_str(btf, name);
if (name_off < 0)
@@ -1780,15 +1779,15 @@ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref
int sz, name_off = 0;
if (validate_type_id(ref_type_id))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name);
@@ -1831,15 +1830,15 @@ int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 n
int sz;
if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type) + sizeof(struct btf_array);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
t->name_off = 0;
t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
@@ -1860,12 +1859,12 @@ static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32
int sz, name_off = 0;
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name);
@@ -1943,30 +1942,30 @@ int btf__add_field(struct btf *btf, const char *name, int type_id,
/* last type should be union/struct */
if (btf->nr_types == 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
t = btf_last_type(btf);
if (!btf_is_composite(t))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (validate_type_id(type_id))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* best-effort bit field offset/size enforcement */
is_bitfield = bit_size || (bit_offset % 8 != 0);
if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* only offset 0 is allowed for unions */
if (btf_is_union(t) && bit_offset)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_member);
m = btf_add_type_mem(btf, sz);
if (!m)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name);
@@ -2008,15 +2007,15 @@ int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
/* byte_sz must be power of 2 */
if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name);
@@ -2048,25 +2047,25 @@ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
/* last type should be BTF_KIND_ENUM */
if (btf->nr_types == 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
t = btf_last_type(btf);
if (!btf_is_enum(t))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* non-empty name */
if (!name || !name[0])
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (value < INT_MIN || value > UINT_MAX)
- return -E2BIG;
+ return libbpf_err(-E2BIG);
/* decompose and invalidate raw data */
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_enum);
v = btf_add_type_mem(btf, sz);
if (!v)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
name_off = btf__add_str(btf, name);
if (name_off < 0)
@@ -2096,7 +2095,7 @@ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
{
if (!name || !name[0])
- return -EINVAL;
+ return libbpf_err(-EINVAL);
switch (fwd_kind) {
case BTF_FWD_STRUCT:
@@ -2117,7 +2116,7 @@ int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
*/
return btf__add_enum(btf, name, sizeof(int));
default:
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
}
@@ -2132,7 +2131,7 @@ int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
{
if (!name || !name[0])
- return -EINVAL;
+ return libbpf_err(-EINVAL);
return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
}
@@ -2187,10 +2186,10 @@ int btf__add_func(struct btf *btf, const char *name,
int id;
if (!name || !name[0])
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
linkage != BTF_FUNC_EXTERN)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
if (id > 0) {
@@ -2198,7 +2197,7 @@ int btf__add_func(struct btf *btf, const char *name,
t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
}
- return id;
+ return libbpf_err(id);
}
/*
@@ -2219,15 +2218,15 @@ int btf__add_func_proto(struct btf *btf, int ret_type_id)
int sz;
if (validate_type_id(ret_type_id))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
/* start out with vlen=0; this will be adjusted when adding enum
* values, if necessary
@@ -2254,23 +2253,23 @@ int btf__add_func_param(struct btf *btf, const char *name, int type_id)
int sz, name_off = 0;
if (validate_type_id(type_id))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* last type should be BTF_KIND_FUNC_PROTO */
if (btf->nr_types == 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
t = btf_last_type(btf);
if (!btf_is_func_proto(t))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_param);
p = btf_add_type_mem(btf, sz);
if (!p)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name);
@@ -2308,21 +2307,21 @@ int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
/* non-empty name */
if (!name || !name[0])
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
linkage != BTF_VAR_GLOBAL_EXTERN)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (validate_type_id(type_id))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* deconstruct BTF, if necessary, and invalidate raw_data */
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type) + sizeof(struct btf_var);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
name_off = btf__add_str(btf, name);
if (name_off < 0)
@@ -2357,15 +2356,15 @@ int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
/* non-empty name */
if (!name || !name[0])
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz);
if (!t)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
name_off = btf__add_str(btf, name);
if (name_off < 0)
@@ -2397,22 +2396,22 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
/* last type should be BTF_KIND_DATASEC */
if (btf->nr_types == 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
t = btf_last_type(btf);
if (!btf_is_datasec(t))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (validate_type_id(var_type_id))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_var_secinfo);
v = btf_add_type_mem(btf, sz);
if (!v)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
v->type = var_type_id;
v->offset = offset;
@@ -2614,11 +2613,11 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
err = btf_ext_parse_hdr(data, size);
if (err)
- return ERR_PTR(err);
+ return libbpf_err_ptr(err);
btf_ext = calloc(1, sizeof(struct btf_ext));
if (!btf_ext)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
btf_ext->data_size = size;
btf_ext->data = malloc(size);
@@ -2628,9 +2627,11 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
}
memcpy(btf_ext->data, data, size);
- if (btf_ext->hdr->hdr_len <
- offsetofend(struct btf_ext_header, line_info_len))
+ if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
+ err = -EINVAL;
goto done;
+ }
+
err = btf_ext_setup_func_info(btf_ext);
if (err)
goto done;
@@ -2639,8 +2640,11 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
if (err)
goto done;
- if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
+ if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) {
+ err = -EINVAL;
goto done;
+ }
+
err = btf_ext_setup_core_relos(btf_ext);
if (err)
goto done;
@@ -2648,7 +2652,7 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
done:
if (err) {
btf_ext__free(btf_ext);
- return ERR_PTR(err);
+ return libbpf_err_ptr(err);
}
return btf_ext;
@@ -2687,7 +2691,7 @@ static int btf_ext_reloc_info(const struct btf *btf,
existing_len = (*cnt) * record_size;
data = realloc(*info, existing_len + records_len);
if (!data)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
memcpy(data + existing_len, sinfo->data, records_len);
/* adjust insn_off only, the rest data will be passed
@@ -2697,15 +2701,14 @@ static int btf_ext_reloc_info(const struct btf *btf,
__u32 *insn_off;
insn_off = data + existing_len + (i * record_size);
- *insn_off = *insn_off / sizeof(struct bpf_insn) +
- insns_cnt;
+ *insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt;
}
*info = data;
*cnt += sinfo->num_info;
return 0;
}
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
int btf_ext__reloc_func_info(const struct btf *btf,
@@ -2894,11 +2897,11 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
if (IS_ERR(d)) {
pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (btf_ensure_modifiable(btf))
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
err = btf_dedup_prep(d);
if (err) {
@@ -2938,7 +2941,7 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
done:
btf_dedup_free(d);
- return err;
+ return libbpf_err(err);
}
#define BTF_UNPROCESSED_ID ((__u32)-1)
@@ -4411,7 +4414,7 @@ struct btf *libbpf_find_kernel_btf(void)
char path[PATH_MAX + 1];
struct utsname buf;
struct btf *btf;
- int i;
+ int i, err;
uname(&buf);
@@ -4425,17 +4428,16 @@ struct btf *libbpf_find_kernel_btf(void)
btf = btf__parse_raw(path);
else
btf = btf__parse_elf(path, NULL);
-
- pr_debug("loading kernel BTF '%s': %ld\n",
- path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
- if (IS_ERR(btf))
+ err = libbpf_get_error(btf);
+ pr_debug("loading kernel BTF '%s': %d\n", path, err);
+ if (err)
continue;
return btf;
}
pr_warn("failed to find valid kernel BTF\n");
- return ERR_PTR(-ESRCH);
+ return libbpf_err_ptr(-ESRCH);
}
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 5e2809d685bf..5dc6b5172bb3 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -128,7 +128,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
d = calloc(1, sizeof(struct btf_dump));
if (!d)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
d->btf = btf;
d->btf_ext = btf_ext;
@@ -156,7 +156,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
return d;
err:
btf_dump__free(d);
- return ERR_PTR(err);
+ return libbpf_err_ptr(err);
}
static int btf_dump_resize(struct btf_dump *d)
@@ -236,16 +236,16 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
int err, i;
if (id > btf__get_nr_types(d->btf))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
err = btf_dump_resize(d);
if (err)
- return err;
+ return libbpf_err(err);
d->emit_queue_cnt = 0;
err = btf_dump_order_type(d, id, false);
if (err < 0)
- return err;
+ return libbpf_err(err);
for (i = 0; i < d->emit_queue_cnt; i++)
btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
@@ -1075,11 +1075,11 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
int lvl, err;
if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
err = btf_dump_resize(d);
if (err)
- return -EINVAL;
+ return libbpf_err(err);
fname = OPTS_GET(opts, field_name, "");
lvl = OPTS_GET(opts, indent_level, 0);
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
new file mode 100644
index 000000000000..8df718a6b142
--- /dev/null
+++ b/tools/lib/bpf/gen_loader.c
@@ -0,0 +1,729 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <linux/filter.h>
+#include "btf.h"
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+#include "hashmap.h"
+#include "bpf_gen_internal.h"
+#include "skel_internal.h"
+
+#define MAX_USED_MAPS 64
+#define MAX_USED_PROGS 32
+
+/* The following structure describes the stack layout of the loader program.
+ * In addition R6 contains the pointer to context.
+ * R7 contains the result of the last sys_bpf command (typically error or FD).
+ * R9 contains the result of the last sys_close command.
+ *
+ * Naming convention:
+ * ctx - bpf program context
+ * stack - bpf program stack
+ * blob - bpf_attr-s, strings, insns, map data.
+ * All the bytes that loader prog will use for read/write.
+ */
+struct loader_stack {
+ __u32 btf_fd;
+ __u32 map_fd[MAX_USED_MAPS];
+ __u32 prog_fd[MAX_USED_PROGS];
+ __u32 inner_map_fd;
+};
+
+#define stack_off(field) \
+ (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
+
+#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
+
+static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
+{
+ size_t off = gen->insn_cur - gen->insn_start;
+ void *insn_start;
+
+ if (gen->error)
+ return gen->error;
+ if (size > INT32_MAX || off + size > INT32_MAX) {
+ gen->error = -ERANGE;
+ return -ERANGE;
+ }
+ insn_start = realloc(gen->insn_start, off + size);
+ if (!insn_start) {
+ gen->error = -ENOMEM;
+ free(gen->insn_start);
+ gen->insn_start = NULL;
+ return -ENOMEM;
+ }
+ gen->insn_start = insn_start;
+ gen->insn_cur = insn_start + off;
+ return 0;
+}
+
+static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
+{
+ size_t off = gen->data_cur - gen->data_start;
+ void *data_start;
+
+ if (gen->error)
+ return gen->error;
+ if (size > INT32_MAX || off + size > INT32_MAX) {
+ gen->error = -ERANGE;
+ return -ERANGE;
+ }
+ data_start = realloc(gen->data_start, off + size);
+ if (!data_start) {
+ gen->error = -ENOMEM;
+ free(gen->data_start);
+ gen->data_start = NULL;
+ return -ENOMEM;
+ }
+ gen->data_start = data_start;
+ gen->data_cur = data_start + off;
+ return 0;
+}
+
+static void emit(struct bpf_gen *gen, struct bpf_insn insn)
+{
+ if (realloc_insn_buf(gen, sizeof(insn)))
+ return;
+ memcpy(gen->insn_cur, &insn, sizeof(insn));
+ gen->insn_cur += sizeof(insn);
+}
+
+static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
+{
+ emit(gen, insn1);
+ emit(gen, insn2);
+}
+
+void bpf_gen__init(struct bpf_gen *gen, int log_level)
+{
+ size_t stack_sz = sizeof(struct loader_stack);
+ int i;
+
+ gen->log_level = log_level;
+ /* save ctx pointer into R6 */
+ emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
+
+ /* bzero stack */
+ emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
+ emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
+
+ /* jump over cleanup code */
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
+ /* size of cleanup code below */
+ (stack_sz / 4) * 3 + 2));
+
+ /* remember the label where all error branches will jump to */
+ gen->cleanup_label = gen->insn_cur - gen->insn_start;
+ /* emit cleanup code: close all temp FDs */
+ for (i = 0; i < stack_sz; i += 4) {
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
+ emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
+ }
+ /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
+ emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ emit(gen, BPF_EXIT_INSN());
+}
+
+static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
+{
+ void *prev;
+
+ if (realloc_data_buf(gen, size))
+ return 0;
+ prev = gen->data_cur;
+ memcpy(gen->data_cur, data, size);
+ gen->data_cur += size;
+ return prev - gen->data_start;
+}
+
+static int insn_bytes_to_bpf_size(__u32 sz)
+{
+ switch (sz) {
+ case 8: return BPF_DW;
+ case 4: return BPF_W;
+ case 2: return BPF_H;
+ case 1: return BPF_B;
+ default: return -1;
+ }
+}
+
+/* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
+static void emit_rel_store(struct bpf_gen *gen, int off, int data)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, data));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
+}
+
+/* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
+static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
+{
+ emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
+ emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
+ bool check_non_zero)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
+ if (check_non_zero)
+ /* If value in ctx is zero don't update the blob.
+ * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
+ */
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
+}
+
+static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
+{
+ emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, attr));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
+ /* remember the result in R7 */
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+}
+
+static bool is_simm16(__s64 value)
+{
+ return value == (__s64)(__s16)value;
+}
+
+static void emit_check_err(struct bpf_gen *gen)
+{
+ __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
+
+ /* R7 contains result of last sys_bpf command.
+ * if (R7 < 0) goto cleanup;
+ */
+ if (is_simm16(off)) {
+ emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
+ } else {
+ gen->error = -ERANGE;
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
+ }
+}
+
+/* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
+static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
+ const char *fmt, va_list args)
+{
+ char buf[1024];
+ int addr, len, ret;
+
+ if (!gen->log_level)
+ return;
+ ret = vsnprintf(buf, sizeof(buf), fmt, args);
+ if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
+ /* The special case to accommodate common debug_ret():
+ * to avoid specifying BPF_REG_7 and adding " r=%%d" to
+ * prints explicitly.
+ */
+ strcat(buf, " r=%d");
+ len = strlen(buf) + 1;
+ addr = add_data(gen, buf, len);
+
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, addr));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ if (reg1 >= 0)
+ emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
+ if (reg2 >= 0)
+ emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
+}
+
+static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ emit_debug(gen, reg1, reg2, fmt, args);
+ va_end(args);
+}
+
+static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ emit_debug(gen, BPF_REG_7, -1, fmt, args);
+ va_end(args);
+}
+
+static void __emit_sys_close(struct bpf_gen *gen)
+{
+ emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
+ /* 2 is the number of the following insns
+ * * 6 is additional insns in debug_regs
+ */
+ 2 + (gen->log_level ? 6 : 0)));
+ emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
+ debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
+}
+
+static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
+ __emit_sys_close(gen);
+}
+
+static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_off));
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
+ __emit_sys_close(gen);
+}
+
+int bpf_gen__finish(struct bpf_gen *gen)
+{
+ int i;
+
+ emit_sys_close_stack(gen, stack_off(btf_fd));
+ for (i = 0; i < gen->nr_progs; i++)
+ move_stack2ctx(gen,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * gen->nr_maps +
+ sizeof(struct bpf_prog_desc) * i +
+ offsetof(struct bpf_prog_desc, prog_fd), 4,
+ stack_off(prog_fd[i]));
+ for (i = 0; i < gen->nr_maps; i++)
+ move_stack2ctx(gen,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * i +
+ offsetof(struct bpf_map_desc, map_fd), 4,
+ stack_off(map_fd[i]));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
+ emit(gen, BPF_EXIT_INSN());
+ pr_debug("gen: finish %d\n", gen->error);
+ if (!gen->error) {
+ struct gen_loader_opts *opts = gen->opts;
+
+ opts->insns = gen->insn_start;
+ opts->insns_sz = gen->insn_cur - gen->insn_start;
+ opts->data = gen->data_start;
+ opts->data_sz = gen->data_cur - gen->data_start;
+ }
+ return gen->error;
+}
+
+void bpf_gen__free(struct bpf_gen *gen)
+{
+ if (!gen)
+ return;
+ free(gen->data_start);
+ free(gen->insn_start);
+ free(gen);
+}
+
+void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
+ __u32 btf_raw_size)
+{
+ int attr_size = offsetofend(union bpf_attr, btf_log_level);
+ int btf_data, btf_load_attr;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: load_btf: size %d\n", btf_raw_size);
+ btf_data = add_data(gen, btf_raw_data, btf_raw_size);
+
+ attr.btf_size = btf_raw_size;
+ btf_load_attr = add_data(gen, &attr, attr_size);
+
+ /* populate union bpf_attr with user provided log details */
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
+ offsetof(struct bpf_loader_ctx, log_level), false);
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
+ offsetof(struct bpf_loader_ctx, log_size), false);
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
+ offsetof(struct bpf_loader_ctx, log_buf), false);
+ /* populate union bpf_attr with a pointer to the BTF data */
+ emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
+ /* emit BTF_LOAD command */
+ emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
+ debug_ret(gen, "btf_load size %d", btf_raw_size);
+ emit_check_err(gen);
+ /* remember btf_fd in the stack, if successful */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
+}
+
+void bpf_gen__map_create(struct bpf_gen *gen,
+ struct bpf_create_map_attr *map_attr, int map_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
+ bool close_inner_map_fd = false;
+ int map_create_attr;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ attr.map_type = map_attr->map_type;
+ attr.key_size = map_attr->key_size;
+ attr.value_size = map_attr->value_size;
+ attr.map_flags = map_attr->map_flags;
+ memcpy(attr.map_name, map_attr->name,
+ min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
+ attr.numa_node = map_attr->numa_node;
+ attr.map_ifindex = map_attr->map_ifindex;
+ attr.max_entries = map_attr->max_entries;
+ switch (attr.map_type) {
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_STACK_TRACE:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_HASH:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ case BPF_MAP_TYPE_RINGBUF:
+ break;
+ default:
+ attr.btf_key_type_id = map_attr->btf_key_type_id;
+ attr.btf_value_type_id = map_attr->btf_value_type_id;
+ }
+
+ pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
+ attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id);
+
+ map_create_attr = add_data(gen, &attr, attr_size);
+ if (attr.btf_value_type_id)
+ /* populate union bpf_attr with btf_fd saved in the stack earlier */
+ move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
+ stack_off(btf_fd));
+ switch (attr.map_type) {
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
+ stack_off(inner_map_fd));
+ close_inner_map_fd = true;
+ break;
+ default:
+ break;
+ }
+ /* conditionally update max_entries */
+ if (map_idx >= 0)
+ move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * map_idx +
+ offsetof(struct bpf_map_desc, max_entries),
+ true /* check that max_entries != 0 */);
+ /* emit MAP_CREATE command */
+ emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
+ debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
+ attr.map_name, map_idx, map_attr->map_type, attr.value_size,
+ attr.btf_value_type_id);
+ emit_check_err(gen);
+ /* remember map_fd in the stack, if successful */
+ if (map_idx < 0) {
+ /* This bpf_gen__map_create() function is called with map_idx >= 0
+ * for all maps that libbpf loading logic tracks.
+ * It's called with -1 to create an inner map.
+ */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
+ stack_off(inner_map_fd)));
+ } else if (map_idx != gen->nr_maps) {
+ gen->error = -EDOM; /* internal bug */
+ return;
+ } else {
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
+ stack_off(map_fd[map_idx])));
+ gen->nr_maps++;
+ }
+ if (close_inner_map_fd)
+ emit_sys_close_stack(gen, stack_off(inner_map_fd));
+}
+
+void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
+ enum bpf_attach_type type)
+{
+ const char *prefix;
+ int kind, ret;
+
+ btf_get_kernel_prefix_kind(type, &prefix, &kind);
+ gen->attach_kind = kind;
+ ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
+ prefix, attach_name);
+ if (ret == sizeof(gen->attach_target))
+ gen->error = -ENOSPC;
+}
+
+static void emit_find_attach_target(struct bpf_gen *gen)
+{
+ int name, len = strlen(gen->attach_target) + 1;
+
+ pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
+ name = add_data(gen, gen->attach_target, len);
+
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, name));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ debug_ret(gen, "find_by_name_kind(%s,%d)",
+ gen->attach_target, gen->attach_kind);
+ emit_check_err(gen);
+ /* if successful, btf_id is in lower 32-bit of R7 and
+ * btf_obj_fd is in upper 32-bit
+ */
+}
+
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
+ int insn_idx)
+{
+ struct ksym_relo_desc *relo;
+
+ relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
+ if (!relo) {
+ gen->error = -ENOMEM;
+ return;
+ }
+ gen->relos = relo;
+ relo += gen->relo_cnt;
+ relo->name = name;
+ relo->kind = kind;
+ relo->insn_idx = insn_idx;
+ gen->relo_cnt++;
+}
+
+static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
+{
+ int name, insn, len = strlen(relo->name) + 1;
+
+ pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
+ name = add_data(gen, relo->name, len);
+
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, name));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
+ emit_check_err(gen);
+ /* store btf_id into insn[insn_idx].imm */
+ insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
+ offsetof(struct bpf_insn, imm);
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, insn));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
+ if (relo->kind == BTF_KIND_VAR) {
+ /* store btf_obj_fd into insn[insn_idx + 1].imm */
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
+ sizeof(struct bpf_insn)));
+ }
+}
+
+static void emit_relos(struct bpf_gen *gen, int insns)
+{
+ int i;
+
+ for (i = 0; i < gen->relo_cnt; i++)
+ emit_relo(gen, gen->relos + i, insns);
+}
+
+static void cleanup_relos(struct bpf_gen *gen, int insns)
+{
+ int i, insn;
+
+ for (i = 0; i < gen->relo_cnt; i++) {
+ if (gen->relos[i].kind != BTF_KIND_VAR)
+ continue;
+ /* close fd recorded in insn[insn_idx + 1].imm */
+ insn = insns +
+ sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
+ offsetof(struct bpf_insn, imm);
+ emit_sys_close_blob(gen, insn);
+ }
+ if (gen->relo_cnt) {
+ free(gen->relos);
+ gen->relo_cnt = 0;
+ gen->relos = NULL;
+ }
+}
+
+void bpf_gen__prog_load(struct bpf_gen *gen,
+ struct bpf_prog_load_params *load_attr, int prog_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, fd_array);
+ int prog_load_attr, license, insns, func_info, line_info;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
+ load_attr->prog_type, load_attr->insn_cnt);
+ /* add license string to blob of bytes */
+ license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
+ /* add insns to blob of bytes */
+ insns = add_data(gen, load_attr->insns,
+ load_attr->insn_cnt * sizeof(struct bpf_insn));
+
+ attr.prog_type = load_attr->prog_type;
+ attr.expected_attach_type = load_attr->expected_attach_type;
+ attr.attach_btf_id = load_attr->attach_btf_id;
+ attr.prog_ifindex = load_attr->prog_ifindex;
+ attr.kern_version = 0;
+ attr.insn_cnt = (__u32)load_attr->insn_cnt;
+ attr.prog_flags = load_attr->prog_flags;
+
+ attr.func_info_rec_size = load_attr->func_info_rec_size;
+ attr.func_info_cnt = load_attr->func_info_cnt;
+ func_info = add_data(gen, load_attr->func_info,
+ attr.func_info_cnt * attr.func_info_rec_size);
+
+ attr.line_info_rec_size = load_attr->line_info_rec_size;
+ attr.line_info_cnt = load_attr->line_info_cnt;
+ line_info = add_data(gen, load_attr->line_info,
+ attr.line_info_cnt * attr.line_info_rec_size);
+
+ memcpy(attr.prog_name, load_attr->name,
+ min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
+ prog_load_attr = add_data(gen, &attr, attr_size);
+
+ /* populate union bpf_attr with a pointer to license */
+ emit_rel_store(gen, attr_field(prog_load_attr, license), license);
+
+ /* populate union bpf_attr with a pointer to instructions */
+ emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
+
+ /* populate union bpf_attr with a pointer to func_info */
+ emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
+
+ /* populate union bpf_attr with a pointer to line_info */
+ emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
+
+ /* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
+ emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
+ stack_off(map_fd[0]));
+
+ /* populate union bpf_attr with user provided log details */
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
+ offsetof(struct bpf_loader_ctx, log_level), false);
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
+ offsetof(struct bpf_loader_ctx, log_size), false);
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
+ offsetof(struct bpf_loader_ctx, log_buf), false);
+ /* populate union bpf_attr with btf_fd saved in the stack earlier */
+ move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
+ stack_off(btf_fd));
+ if (gen->attach_kind) {
+ emit_find_attach_target(gen);
+ /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, prog_load_attr));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
+ offsetof(union bpf_attr, attach_btf_id)));
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
+ offsetof(union bpf_attr, attach_btf_obj_fd)));
+ }
+ emit_relos(gen, insns);
+ /* emit PROG_LOAD command */
+ emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
+ debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
+ /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
+ cleanup_relos(gen, insns);
+ if (gen->attach_kind)
+ emit_sys_close_blob(gen,
+ attr_field(prog_load_attr, attach_btf_obj_fd));
+ emit_check_err(gen);
+ /* remember prog_fd in the stack, if successful */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
+ stack_off(prog_fd[gen->nr_progs])));
+ gen->nr_progs++;
+}
+
+void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
+ __u32 value_size)
+{
+ int attr_size = offsetofend(union bpf_attr, flags);
+ int map_update_attr, value, key;
+ union bpf_attr attr;
+ int zero = 0;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: map_update_elem: idx %d\n", map_idx);
+
+ value = add_data(gen, pvalue, value_size);
+ key = add_data(gen, &zero, sizeof(zero));
+
+ /* if (map_desc[map_idx].initial_value)
+ * copy_from_user(value, initial_value, value_size);
+ */
+ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * map_idx +
+ offsetof(struct bpf_map_desc, initial_value)));
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, value));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
+
+ map_update_attr = add_data(gen, &attr, attr_size);
+ move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
+ stack_off(map_fd[map_idx]));
+ emit_rel_store(gen, attr_field(map_update_attr, key), key);
+ emit_rel_store(gen, attr_field(map_update_attr, value), value);
+ /* emit MAP_UPDATE_ELEM command */
+ emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
+ debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
+ emit_check_err(gen);
+}
+
+void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, map_fd);
+ int map_freeze_attr;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: map_freeze: idx %d\n", map_idx);
+ map_freeze_attr = add_data(gen, &attr, attr_size);
+ move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
+ stack_off(map_fd[map_idx]));
+ /* emit MAP_FREEZE command */
+ emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
+ debug_ret(gen, "map_freeze");
+ emit_check_err(gen);
+}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e2a3cf437814..1e04ce724240 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -54,6 +54,7 @@
#include "str_error.h"
#include "libbpf_internal.h"
#include "hashmap.h"
+#include "bpf_gen_internal.h"
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC 0xcafe4a11
@@ -150,6 +151,23 @@ static inline __u64 ptr_to_u64(const void *ptr)
return (__u64) (unsigned long) ptr;
}
+/* this goes away in libbpf 1.0 */
+enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
+
+int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
+{
+ /* __LIBBPF_STRICT_LAST is the last power-of-2 value used + 1, so to
+ * get all possible values we compensate last +1, and then (2*x - 1)
+ * to get the bit mask
+ */
+ if (mode != LIBBPF_STRICT_ALL
+ && (mode & ~((__LIBBPF_STRICT_LAST - 1) * 2 - 1)))
+ return errno = EINVAL, -EINVAL;
+
+ libbpf_mode = mode;
+ return 0;
+}
+
enum kern_feature_id {
/* v4.14: kernel support for program & map names. */
FEAT_PROG_NAME,
@@ -178,7 +196,7 @@ enum kern_feature_id {
__FEAT_CNT,
};
-static bool kernel_supports(enum kern_feature_id feat_id);
+static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
enum reloc_type {
RELO_LD64,
@@ -432,6 +450,8 @@ struct bpf_object {
bool loaded;
bool has_subcalls;
+ struct bpf_gen *gen_loader;
+
/*
* Information when doing elf related work. Only valid if fd
* is valid.
@@ -677,6 +697,11 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return -LIBBPF_ERRNO__FORMAT;
}
+ if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
+ return -ENOTSUP;
+ }
+
pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
@@ -700,13 +725,14 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
if (err)
return err;
- /* if function is a global/weak symbol, but has hidden
- * visibility (STV_HIDDEN), mark its BTF FUNC as static to
- * enable more permissive BPF verification mode with more
- * outside context available to BPF verifier
+ /* if function is a global/weak symbol, but has restricted
+ * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
+ * as static to enable more permissive BPF verification mode
+ * with more outside context available to BPF verifier
*/
if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
- && GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN)
+ && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN
+ || GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL))
prog->mark_btf_static = true;
nr_progs++;
@@ -1794,7 +1820,6 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
if (!symbols)
return -EINVAL;
-
scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
data = elf_sec_data(obj, scn);
if (!scn || !data) {
@@ -1854,6 +1879,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
return -LIBBPF_ERRNO__FORMAT;
}
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION
+ || GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
+ return -ENOTSUP;
+ }
+
map->libbpf_type = LIBBPF_MAP_UNSPEC;
map->sec_idx = sym.st_shndx;
map->sec_offset = sym.st_value;
@@ -2261,6 +2292,16 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
pr_debug("map '%s': found inner map definition.\n", map->name);
}
+static const char *btf_var_linkage_str(__u32 linkage)
+{
+ switch (linkage) {
+ case BTF_VAR_STATIC: return "static";
+ case BTF_VAR_GLOBAL_ALLOCATED: return "global";
+ case BTF_VAR_GLOBAL_EXTERN: return "extern";
+ default: return "unknown";
+ }
+}
+
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
const struct btf_type *sec,
int var_idx, int sec_idx,
@@ -2293,10 +2334,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
map_name, btf_kind_str(var));
return -EINVAL;
}
- if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
- var_extra->linkage != BTF_VAR_STATIC) {
- pr_warn("map '%s': unsupported var linkage %u.\n",
- map_name, var_extra->linkage);
+ if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
+ pr_warn("map '%s': unsupported map linkage %s.\n",
+ map_name, btf_var_linkage_str(var_extra->linkage));
return -EOPNOTSUPP;
}
@@ -2425,10 +2465,8 @@ static int bpf_object__init_maps(struct bpf_object *obj,
err = err ?: bpf_object__init_global_data_maps(obj);
err = err ?: bpf_object__init_kconfig_map(obj);
err = err ?: bpf_object__init_struct_ops_maps(obj);
- if (err)
- return err;
- return 0;
+ return err;
}
static bool section_have_execinstr(struct bpf_object *obj, int idx)
@@ -2443,20 +2481,20 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
static bool btf_needs_sanitization(struct bpf_object *obj)
{
- bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
- bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
- bool has_float = kernel_supports(FEAT_BTF_FLOAT);
- bool has_func = kernel_supports(FEAT_BTF_FUNC);
+ bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
+ bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
+ bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
+ bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
return !has_func || !has_datasec || !has_func_global || !has_float;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
- bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
- bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
- bool has_float = kernel_supports(FEAT_BTF_FLOAT);
- bool has_func = kernel_supports(FEAT_BTF_FUNC);
+ bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
+ bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
+ bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
+ bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
struct btf_type *t;
int i, j, vlen;
@@ -2539,16 +2577,14 @@ static int bpf_object__init_btf(struct bpf_object *obj,
if (btf_data) {
obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
- if (IS_ERR(obj->btf)) {
- err = PTR_ERR(obj->btf);
+ err = libbpf_get_error(obj->btf);
+ if (err) {
obj->btf = NULL;
- pr_warn("Error loading ELF section %s: %d.\n",
- BTF_ELF_SEC, err);
+ pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
goto out;
}
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
- err = 0;
}
if (btf_ext_data) {
if (!obj->btf) {
@@ -2556,11 +2592,11 @@ static int bpf_object__init_btf(struct bpf_object *obj,
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
goto out;
}
- obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
- btf_ext_data->d_size);
- if (IS_ERR(obj->btf_ext)) {
- pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
- BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
+ obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
+ err = libbpf_get_error(obj->btf_ext);
+ if (err) {
+ pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
+ BTF_EXT_ELF_SEC, err);
obj->btf_ext = NULL;
goto out;
}
@@ -2637,15 +2673,15 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
int err;
/* btf_vmlinux could be loaded earlier */
- if (obj->btf_vmlinux)
+ if (obj->btf_vmlinux || obj->gen_loader)
return 0;
if (!force && !obj_needs_vmlinux_btf(obj))
return 0;
obj->btf_vmlinux = libbpf_find_kernel_btf();
- if (IS_ERR(obj->btf_vmlinux)) {
- err = PTR_ERR(obj->btf_vmlinux);
+ err = libbpf_get_error(obj->btf_vmlinux);
+ if (err) {
pr_warn("Error loading vmlinux BTF: %d\n", err);
obj->btf_vmlinux = NULL;
return err;
@@ -2662,7 +2698,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (!obj->btf)
return 0;
- if (!kernel_supports(FEAT_BTF)) {
+ if (!kernel_supports(obj, FEAT_BTF)) {
if (kernel_needs_btf(obj)) {
err = -EOPNOTSUPP;
goto report;
@@ -2711,15 +2747,29 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
/* clone BTF to sanitize a copy and leave the original intact */
raw_data = btf__get_raw_data(obj->btf, &sz);
kern_btf = btf__new(raw_data, sz);
- if (IS_ERR(kern_btf))
- return PTR_ERR(kern_btf);
+ err = libbpf_get_error(kern_btf);
+ if (err)
+ return err;
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
bpf_object__sanitize_btf(obj, kern_btf);
}
- err = btf__load(kern_btf);
+ if (obj->gen_loader) {
+ __u32 raw_size = 0;
+ const void *raw_data = btf__get_raw_data(kern_btf, &raw_size);
+
+ if (!raw_data)
+ return -ENOMEM;
+ bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
+ /* Pretend to have valid FD to pass various fd >= 0 checks.
+ * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
+ */
+ btf__set_fd(kern_btf, 0);
+ } else {
+ err = btf__load(kern_btf);
+ }
if (sanitize) {
if (!err) {
/* move fd to libbpf's BTF */
@@ -3216,6 +3266,9 @@ static int add_dummy_ksym_var(struct btf *btf)
const struct btf_var_secinfo *vs;
const struct btf_type *sec;
+ if (!btf)
+ return 0;
+
sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
BTF_KIND_DATASEC);
if (sec_btf_id < 0)
@@ -3470,7 +3523,7 @@ bpf_object__find_program_by_title(const struct bpf_object *obj,
if (pos->sec_name && !strcmp(pos->sec_name, title))
return pos;
}
- return NULL;
+ return errno = ENOENT, NULL;
}
static bool prog_is_subprog(const struct bpf_object *obj,
@@ -3503,7 +3556,7 @@ bpf_object__find_program_by_name(const struct bpf_object *obj,
if (!strcmp(prog->name, name))
return prog;
}
- return NULL;
+ return errno = ENOENT, NULL;
}
static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
@@ -3850,11 +3903,11 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err)
- return err;
+ return libbpf_err(err);
new_name = strdup(info.name);
if (!new_name)
- return -errno;
+ return libbpf_err(-errno);
new_fd = open("/", O_RDONLY | O_CLOEXEC);
if (new_fd < 0) {
@@ -3892,7 +3945,7 @@ err_close_new_fd:
close(new_fd);
err_free_new_name:
free(new_name);
- return err;
+ return libbpf_err(err);
}
__u32 bpf_map__max_entries(const struct bpf_map *map)
@@ -3903,7 +3956,7 @@ __u32 bpf_map__max_entries(const struct bpf_map *map)
struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
{
if (!bpf_map_type__is_map_in_map(map->def.type))
- return NULL;
+ return errno = EINVAL, NULL;
return map->inner_map;
}
@@ -3911,7 +3964,7 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{
if (map->fd >= 0)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
map->def.max_entries = max_entries;
return 0;
}
@@ -3919,7 +3972,7 @@ int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
{
if (!map || !max_entries)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
return bpf_map__set_max_entries(map, max_entries);
}
@@ -3935,6 +3988,9 @@ bpf_object__probe_loading(struct bpf_object *obj)
};
int ret;
+ if (obj->gen_loader)
+ return 0;
+
/* make sure basic loading works */
memset(&attr, 0, sizeof(attr));
@@ -3945,6 +4001,10 @@ bpf_object__probe_loading(struct bpf_object *obj)
ret = bpf_load_program_xattr(&attr, NULL, 0);
if (ret < 0) {
+ attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
+ ret = bpf_load_program_xattr(&attr, NULL, 0);
+ }
+ if (ret < 0) {
ret = errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
@@ -4290,11 +4350,17 @@ static struct kern_feature_desc {
},
};
-static bool kernel_supports(enum kern_feature_id feat_id)
+static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
{
struct kern_feature_desc *feat = &feature_probes[feat_id];
int ret;
+ if (obj->gen_loader)
+ /* To generate loader program assume the latest kernel
+ * to avoid doing extra prog_load, map_create syscalls.
+ */
+ return true;
+
if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
ret = feat->probe();
if (ret > 0) {
@@ -4377,6 +4443,13 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
+ if (obj->gen_loader) {
+ bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
+ map->mmaped, map->def.value_size);
+ if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
+ bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
+ return 0;
+ }
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
@@ -4402,14 +4475,14 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
static void bpf_map__destroy(struct bpf_map *map);
-static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
+static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
struct bpf_create_map_attr create_attr;
struct bpf_map_def *def = &map->def;
memset(&create_attr, 0, sizeof(create_attr));
- if (kernel_supports(FEAT_PROG_NAME))
+ if (kernel_supports(obj, FEAT_PROG_NAME))
create_attr.name = map->name;
create_attr.map_ifindex = map->map_ifindex;
create_attr.map_type = def->type;
@@ -4450,7 +4523,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
if (map->inner_map) {
int err;
- err = bpf_object__create_map(obj, map->inner_map);
+ err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
pr_warn("map '%s': failed to create inner map: %d\n",
map->name, err);
@@ -4462,7 +4535,15 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
create_attr.inner_map_fd = map->inner_map_fd;
}
- map->fd = bpf_create_map_xattr(&create_attr);
+ if (obj->gen_loader) {
+ bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
+ /* Pretend to have valid FD to pass various fd >= 0 checks.
+ * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
+ */
+ map->fd = 0;
+ } else {
+ map->fd = bpf_create_map_xattr(&create_attr);
+ }
if (map->fd < 0 && (create_attr.btf_key_type_id ||
create_attr.btf_value_type_id)) {
char *cp, errmsg[STRERR_BUFSIZE];
@@ -4483,6 +4564,8 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
return -errno;
if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
+ if (obj->gen_loader)
+ map->inner_map->fd = -1;
bpf_map__destroy(map->inner_map);
zfree(&map->inner_map);
}
@@ -4490,11 +4573,11 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
return 0;
}
-static int init_map_slots(struct bpf_map *map)
+static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
{
const struct bpf_map *targ_map;
unsigned int i;
- int fd, err;
+ int fd, err = 0;
for (i = 0; i < map->init_slots_sz; i++) {
if (!map->init_slots[i])
@@ -4502,7 +4585,13 @@ static int init_map_slots(struct bpf_map *map)
targ_map = map->init_slots[i];
fd = bpf_map__fd(targ_map);
- err = bpf_map_update_elem(map->fd, &i, &fd, 0);
+ if (obj->gen_loader) {
+ pr_warn("// TODO map_update_elem: idx %td key %d value==map_idx %td\n",
+ map - obj->maps, i, targ_map - obj->maps);
+ return -ENOTSUP;
+ } else {
+ err = bpf_map_update_elem(map->fd, &i, &fd, 0);
+ }
if (err) {
err = -errno;
pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
@@ -4544,7 +4633,7 @@ bpf_object__create_maps(struct bpf_object *obj)
pr_debug("map '%s': skipping creation (preset fd=%d)\n",
map->name, map->fd);
} else {
- err = bpf_object__create_map(obj, map);
+ err = bpf_object__create_map(obj, map, false);
if (err)
goto err_out;
@@ -4560,7 +4649,7 @@ bpf_object__create_maps(struct bpf_object *obj)
}
if (map->init_slots_sz) {
- err = init_map_slots(map);
+ err = init_map_slots(obj, map);
if (err < 0) {
zclose(map->fd);
goto err_out;
@@ -4970,11 +5059,14 @@ static int load_module_btfs(struct bpf_object *obj)
if (obj->btf_modules_loaded)
return 0;
+ if (obj->gen_loader)
+ return 0;
+
/* don't do this again, even if we find no module BTFs */
obj->btf_modules_loaded = true;
/* kernel too old to support module BTFs */
- if (!kernel_supports(FEAT_MODULE_BTF))
+ if (!kernel_supports(obj, FEAT_MODULE_BTF))
return 0;
while (true) {
@@ -5015,10 +5107,10 @@ static int load_module_btfs(struct bpf_object *obj)
}
btf = btf_get_from_fd(fd, obj->btf_vmlinux);
- if (IS_ERR(btf)) {
- pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
- name, id, PTR_ERR(btf));
- err = PTR_ERR(btf);
+ err = libbpf_get_error(btf);
+ if (err) {
+ pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
+ name, id, err);
goto err_out;
}
@@ -6117,6 +6209,12 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
if (str_is_empty(spec_str))
return -EINVAL;
+ if (prog->obj->gen_loader) {
+ pr_warn("// TODO core_relo: prog %td insn[%d] %s %s kind %d\n",
+ prog - prog->obj->programs, relo->insn_off / 8,
+ local_name, spec_str, relo->kind);
+ return -ENOTSUP;
+ }
err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
if (err) {
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
@@ -6272,8 +6370,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
if (targ_btf_path) {
obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
- if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
- err = PTR_ERR(obj->btf_vmlinux_override);
+ err = libbpf_get_error(obj->btf_vmlinux_override);
+ if (err) {
pr_warn("failed to parse target BTF: %d\n", err);
return err;
}
@@ -6368,19 +6466,34 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
switch (relo->type) {
case RELO_LD64:
- insn[0].src_reg = BPF_PSEUDO_MAP_FD;
- insn[0].imm = obj->maps[relo->map_idx].fd;
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
+ insn[0].imm = relo->map_idx;
+ } else {
+ insn[0].src_reg = BPF_PSEUDO_MAP_FD;
+ insn[0].imm = obj->maps[relo->map_idx].fd;
+ }
break;
case RELO_DATA:
- insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insn[1].imm = insn[0].imm + relo->sym_off;
- insn[0].imm = obj->maps[relo->map_idx].fd;
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
+ insn[0].imm = relo->map_idx;
+ } else {
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn[0].imm = obj->maps[relo->map_idx].fd;
+ }
break;
case RELO_EXTERN_VAR:
ext = &obj->externs[relo->sym_off];
if (ext->type == EXT_KCFG) {
- insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
- insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
+ insn[0].imm = obj->kconfig_map_idx;
+ } else {
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
+ }
insn[1].imm = ext->kcfg.data_off;
} else /* EXT_KSYM */ {
if (ext->ksym.type_id) { /* typed ksyms */
@@ -6399,11 +6512,15 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
insn[0].imm = ext->ksym.kernel_btf_id;
break;
case RELO_SUBPROG_ADDR:
- insn[0].src_reg = BPF_PSEUDO_FUNC;
- /* will be handled as a follow up pass */
+ if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
+ pr_warn("prog '%s': relo #%d: bad insn\n",
+ prog->name, i);
+ return -EINVAL;
+ }
+ /* handled already */
break;
case RELO_CALL:
- /* will be handled as a follow up pass */
+ /* handled already */
break;
default:
pr_warn("prog '%s': relo #%d: bad relo type %d\n",
@@ -6494,7 +6611,7 @@ reloc_prog_func_and_line_info(const struct bpf_object *obj,
/* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
* supprot func/line info
*/
- if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
+ if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
return 0;
/* only attempt func info relocation if main program's func_info
@@ -6572,6 +6689,30 @@ static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, si
sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
}
+static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
+{
+ int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
+ struct reloc_desc *relos;
+ int i;
+
+ if (main_prog == subprog)
+ return 0;
+ relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
+ if (!relos)
+ return -ENOMEM;
+ memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
+ sizeof(*relos) * subprog->nr_reloc);
+
+ for (i = main_prog->nr_reloc; i < new_cnt; i++)
+ relos[i].insn_idx += subprog->sub_insn_off;
+ /* After insn_idx adjustment the 'relos' array is still sorted
+ * by insn_idx and doesn't break bsearch.
+ */
+ main_prog->reloc_desc = relos;
+ main_prog->nr_reloc = new_cnt;
+ return 0;
+}
+
static int
bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
struct bpf_program *prog)
@@ -6592,6 +6733,11 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
continue;
relo = find_prog_insn_relo(prog, insn_idx);
+ if (relo && relo->type == RELO_EXTERN_FUNC)
+ /* kfunc relocations will be handled later
+ * in bpf_object__relocate_data()
+ */
+ continue;
if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
prog->name, insn_idx, relo->type);
@@ -6666,6 +6812,10 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
main_prog->name, subprog->insns_cnt, subprog->name);
+ /* The subprog insns are now appended. Append its relos too. */
+ err = append_subprog_relos(main_prog, subprog);
+ if (err)
+ return err;
err = bpf_object__reloc_code(obj, main_prog, subprog);
if (err)
return err;
@@ -6795,11 +6945,25 @@ bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
return 0;
}
+static void
+bpf_object__free_relocs(struct bpf_object *obj)
+{
+ struct bpf_program *prog;
+ int i;
+
+ /* free up relocation descriptors */
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ zfree(&prog->reloc_desc);
+ prog->nr_reloc = 0;
+ }
+}
+
static int
bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
{
struct bpf_program *prog;
- size_t i;
+ size_t i, j;
int err;
if (obj->btf_ext) {
@@ -6810,23 +6974,32 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- /* relocate data references first for all programs and sub-programs,
- * as they don't change relative to code locations, so subsequent
- * subprogram processing won't need to re-calculate any of them
+
+ /* Before relocating calls pre-process relocations and mark
+ * few ld_imm64 instructions that points to subprogs.
+ * Otherwise bpf_object__reloc_code() later would have to consider
+ * all ld_imm64 insns as relocation candidates. That would
+ * reduce relocation speed, since amount of find_prog_insn_relo()
+ * would increase and most of them will fail to find a relo.
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- err = bpf_object__relocate_data(obj, prog);
- if (err) {
- pr_warn("prog '%s': failed to relocate data references: %d\n",
- prog->name, err);
- return err;
+ for (j = 0; j < prog->nr_reloc; j++) {
+ struct reloc_desc *relo = &prog->reloc_desc[j];
+ struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+
+ /* mark the insn, so it's recognized by insn_is_pseudo_func() */
+ if (relo->type == RELO_SUBPROG_ADDR)
+ insn[0].src_reg = BPF_PSEUDO_FUNC;
}
}
- /* now relocate subprogram calls and append used subprograms to main
+
+ /* relocate subprogram calls and append used subprograms to main
* programs; each copy of subprogram code needs to be relocated
* differently for each main program, because its code location might
- * have changed
+ * have changed.
+ * Append subprog relos to main programs to allow data relos to be
+ * processed after text is completely relocated.
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
@@ -6843,12 +7016,20 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- /* free up relocation descriptors */
+ /* Process data relos for main programs */
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- zfree(&prog->reloc_desc);
- prog->nr_reloc = 0;
+ if (prog_is_subprog(obj, prog))
+ continue;
+ err = bpf_object__relocate_data(obj, prog);
+ if (err) {
+ pr_warn("prog '%s': failed to relocate data references: %d\n",
+ prog->name, err);
+ return err;
+ }
}
+ if (!obj->gen_loader)
+ bpf_object__free_relocs(obj);
return 0;
}
@@ -7037,6 +7218,9 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
enum bpf_func_id func_id;
int i;
+ if (obj->gen_loader)
+ return 0;
+
for (i = 0; i < prog->insns_cnt; i++, insn++) {
if (!insn_is_helper_call(insn, &func_id))
continue;
@@ -7048,12 +7232,12 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
switch (func_id) {
case BPF_FUNC_probe_read_kernel:
case BPF_FUNC_probe_read_user:
- if (!kernel_supports(FEAT_PROBE_READ_KERN))
+ if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
insn->imm = BPF_FUNC_probe_read;
break;
case BPF_FUNC_probe_read_kernel_str:
case BPF_FUNC_probe_read_user_str:
- if (!kernel_supports(FEAT_PROBE_READ_KERN))
+ if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
insn->imm = BPF_FUNC_probe_read_str;
break;
default:
@@ -7088,12 +7272,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_type = prog->type;
/* old kernels might not support specifying expected_attach_type */
- if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
+ if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
prog->sec_def->is_exp_attach_type_optional)
load_attr.expected_attach_type = 0;
else
load_attr.expected_attach_type = prog->expected_attach_type;
- if (kernel_supports(FEAT_PROG_NAME))
+ if (kernel_supports(prog->obj, FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insn_cnt = insns_cnt;
@@ -7109,7 +7293,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
/* specify func_info/line_info only if kernel supports them */
btf_fd = bpf_object__btf_fd(prog->obj);
- if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
+ if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
@@ -7121,6 +7305,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags;
+ if (prog->obj->gen_loader) {
+ bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
+ prog - prog->obj->programs);
+ *pfd = -1;
+ return 0;
+ }
retry_load:
if (log_buf_size) {
log_buf = malloc(log_buf_size);
@@ -7139,7 +7329,7 @@ retry_load:
pr_debug("verifier log:\n%s", log_buf);
if (prog->obj->rodata_map_idx >= 0 &&
- kernel_supports(FEAT_PROG_BIND_MAP)) {
+ kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) {
struct bpf_map *rodata_map =
&prog->obj->maps[prog->obj->rodata_map_idx];
@@ -7198,6 +7388,38 @@ out:
return ret;
}
+static int bpf_program__record_externs(struct bpf_program *prog)
+{
+ struct bpf_object *obj = prog->obj;
+ int i;
+
+ for (i = 0; i < prog->nr_reloc; i++) {
+ struct reloc_desc *relo = &prog->reloc_desc[i];
+ struct extern_desc *ext = &obj->externs[relo->sym_off];
+
+ switch (relo->type) {
+ case RELO_EXTERN_VAR:
+ if (ext->type != EXT_KSYM)
+ continue;
+ if (!ext->ksym.type_id) {
+ pr_warn("typeless ksym %s is not supported yet\n",
+ ext->name);
+ return -ENOTSUP;
+ }
+ bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
+ relo->insn_idx);
+ break;
+ case RELO_EXTERN_FUNC:
+ bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
+ relo->insn_idx);
+ break;
+ default:
+ continue;
+ }
+ }
+ return 0;
+}
+
static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
@@ -7206,7 +7428,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
if (prog->obj->loaded) {
pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if ((prog->type == BPF_PROG_TYPE_TRACING ||
@@ -7216,7 +7438,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
if (err)
- return err;
+ return libbpf_err(err);
prog->attach_btf_obj_fd = btf_obj_fd;
prog->attach_btf_id = btf_type_id;
@@ -7226,13 +7448,13 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
if (prog->preprocessor) {
pr_warn("Internal error: can't load program '%s'\n",
prog->name);
- return -LIBBPF_ERRNO__INTERNAL;
+ return libbpf_err(-LIBBPF_ERRNO__INTERNAL);
}
prog->instances.fds = malloc(sizeof(int));
if (!prog->instances.fds) {
pr_warn("Not enough memory for BPF fds\n");
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
}
prog->instances.nr = 1;
prog->instances.fds[0] = -1;
@@ -7243,6 +7465,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
prog->name, prog->instances.nr);
}
+ if (prog->obj->gen_loader)
+ bpf_program__record_externs(prog);
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_ver, &fd);
if (!err)
@@ -7289,7 +7513,7 @@ out:
pr_warn("failed to load program '%s'\n", prog->name);
zfree(&prog->insns);
prog->insns_cnt = 0;
- return err;
+ return libbpf_err(err);
}
static int
@@ -7319,6 +7543,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
if (err)
return err;
}
+ if (obj->gen_loader)
+ bpf_object__free_relocs(obj);
return 0;
}
@@ -7420,7 +7646,7 @@ __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
{
- return __bpf_object__open_xattr(attr, 0);
+ return libbpf_ptr(__bpf_object__open_xattr(attr, 0));
}
struct bpf_object *bpf_object__open(const char *path)
@@ -7430,18 +7656,18 @@ struct bpf_object *bpf_object__open(const char *path)
.prog_type = BPF_PROG_TYPE_UNSPEC,
};
- return bpf_object__open_xattr(&attr);
+ return libbpf_ptr(__bpf_object__open_xattr(&attr, 0));
}
struct bpf_object *
bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
{
if (!path)
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
pr_debug("loading %s\n", path);
- return __bpf_object__open(path, NULL, 0, opts);
+ return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts));
}
struct bpf_object *
@@ -7449,9 +7675,9 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
if (!obj_buf || obj_buf_sz == 0)
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
- return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
+ return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts));
}
struct bpf_object *
@@ -7466,9 +7692,9 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
/* returning NULL is wrong, but backwards-compatible */
if (!obj_buf || obj_buf_sz == 0)
- return NULL;
+ return errno = EINVAL, NULL;
- return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
+ return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts));
}
int bpf_object__unload(struct bpf_object *obj)
@@ -7476,7 +7702,7 @@ int bpf_object__unload(struct bpf_object *obj)
size_t i;
if (!obj)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
for (i = 0; i < obj->nr_maps; i++) {
zclose(obj->maps[i].fd);
@@ -7497,11 +7723,11 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
bpf_object__for_each_map(m, obj) {
if (!bpf_map__is_internal(m))
continue;
- if (!kernel_supports(FEAT_GLOBAL_DATA)) {
+ if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) {
pr_warn("kernel doesn't support global data\n");
return -ENOTSUP;
}
- if (!kernel_supports(FEAT_ARRAY_MMAP))
+ if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
m->def.map_flags ^= BPF_F_MMAPABLE;
}
@@ -7699,6 +7925,12 @@ static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
if (ext->type != EXT_KSYM || !ext->ksym.type_id)
continue;
+ if (obj->gen_loader) {
+ ext->is_set = true;
+ ext->ksym.kernel_btf_obj_fd = 0;
+ ext->ksym.kernel_btf_id = 0;
+ continue;
+ }
t = btf__type_by_id(obj->btf, ext->btf_id);
if (btf_is_var(t))
err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
@@ -7803,16 +8035,19 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
int err, i;
if (!attr)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
obj = attr->obj;
if (!obj)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (obj->loaded) {
pr_warn("object '%s': load can't be attempted twice\n", obj->name);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
+ if (obj->gen_loader)
+ bpf_gen__init(obj->gen_loader, attr->log_level);
+
err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
@@ -7823,6 +8058,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
err = err ? : bpf_object__load_progs(obj, attr->log_level);
+ if (obj->gen_loader) {
+ /* reset FDs */
+ btf__set_fd(obj->btf, -1);
+ for (i = 0; i < obj->nr_maps; i++)
+ obj->maps[i].fd = -1;
+ if (!err)
+ err = bpf_gen__finish(obj->gen_loader);
+ }
+
/* clean up module BTFs */
for (i = 0; i < obj->btf_module_cnt; i++) {
close(obj->btf_modules[i].fd);
@@ -7849,7 +8093,7 @@ out:
bpf_object__unload(obj);
pr_warn("failed to load object '%s'\n", obj->path);
- return err;
+ return libbpf_err(err);
}
int bpf_object__load(struct bpf_object *obj)
@@ -7921,28 +8165,28 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
err = make_parent_dir(path);
if (err)
- return err;
+ return libbpf_err(err);
err = check_path(path);
if (err)
- return err;
+ return libbpf_err(err);
if (prog == NULL) {
pr_warn("invalid program pointer\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (instance < 0 || instance >= prog->instances.nr) {
pr_warn("invalid prog instance %d of prog %s (max %d)\n",
instance, prog->name, prog->instances.nr);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("failed to pin program: %s\n", cp);
- return err;
+ return libbpf_err(err);
}
pr_debug("pinned program '%s'\n", path);
@@ -7956,22 +8200,23 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
err = check_path(path);
if (err)
- return err;
+ return libbpf_err(err);
if (prog == NULL) {
pr_warn("invalid program pointer\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (instance < 0 || instance >= prog->instances.nr) {
pr_warn("invalid prog instance %d of prog %s (max %d)\n",
instance, prog->name, prog->instances.nr);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
err = unlink(path);
if (err != 0)
- return -errno;
+ return libbpf_err(-errno);
+
pr_debug("unpinned program '%s'\n", path);
return 0;
@@ -7983,20 +8228,20 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
err = make_parent_dir(path);
if (err)
- return err;
+ return libbpf_err(err);
err = check_path(path);
if (err)
- return err;
+ return libbpf_err(err);
if (prog == NULL) {
pr_warn("invalid program pointer\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (prog->instances.nr <= 0) {
pr_warn("no instances of prog %s to pin\n", prog->name);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (prog->instances.nr == 1) {
@@ -8040,7 +8285,7 @@ err_unpin:
rmdir(path);
- return err;
+ return libbpf_err(err);
}
int bpf_program__unpin(struct bpf_program *prog, const char *path)
@@ -8049,16 +8294,16 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
err = check_path(path);
if (err)
- return err;
+ return libbpf_err(err);
if (prog == NULL) {
pr_warn("invalid program pointer\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (prog->instances.nr <= 0) {
pr_warn("no instances of prog %s to pin\n", prog->name);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (prog->instances.nr == 1) {
@@ -8072,9 +8317,9 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
if (len < 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
+ return libbpf_err(-ENAMETOOLONG);
err = bpf_program__unpin_instance(prog, buf, i);
if (err)
@@ -8083,7 +8328,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
err = rmdir(path);
if (err)
- return -errno;
+ return libbpf_err(-errno);
return 0;
}
@@ -8095,14 +8340,14 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
if (map == NULL) {
pr_warn("invalid map pointer\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (map->pin_path) {
if (path && strcmp(path, map->pin_path)) {
pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
bpf_map__name(map), map->pin_path, path);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
} else if (map->pinned) {
pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
bpf_map__name(map), map->pin_path);
@@ -8112,10 +8357,10 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
if (!path) {
pr_warn("missing a path to pin map '%s' at\n",
bpf_map__name(map));
- return -EINVAL;
+ return libbpf_err(-EINVAL);
} else if (map->pinned) {
pr_warn("map '%s' already pinned\n", bpf_map__name(map));
- return -EEXIST;
+ return libbpf_err(-EEXIST);
}
map->pin_path = strdup(path);
@@ -8127,11 +8372,11 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
err = make_parent_dir(map->pin_path);
if (err)
- return err;
+ return libbpf_err(err);
err = check_path(map->pin_path);
if (err)
- return err;
+ return libbpf_err(err);
if (bpf_obj_pin(map->fd, map->pin_path)) {
err = -errno;
@@ -8146,7 +8391,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
out_err:
cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
pr_warn("failed to pin map: %s\n", cp);
- return err;
+ return libbpf_err(err);
}
int bpf_map__unpin(struct bpf_map *map, const char *path)
@@ -8155,29 +8400,29 @@ int bpf_map__unpin(struct bpf_map *map, const char *path)
if (map == NULL) {
pr_warn("invalid map pointer\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (map->pin_path) {
if (path && strcmp(path, map->pin_path)) {
pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
bpf_map__name(map), map->pin_path, path);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
path = map->pin_path;
} else if (!path) {
pr_warn("no path to unpin map '%s' from\n",
bpf_map__name(map));
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
err = check_path(path);
if (err)
- return err;
+ return libbpf_err(err);
err = unlink(path);
if (err != 0)
- return -errno;
+ return libbpf_err(-errno);
map->pinned = false;
pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
@@ -8192,7 +8437,7 @@ int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
if (path) {
new = strdup(path);
if (!new)
- return -errno;
+ return libbpf_err(-errno);
}
free(map->pin_path);
@@ -8226,11 +8471,11 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
int err;
if (!obj)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
if (!obj->loaded) {
pr_warn("object not yet loaded; load it first\n");
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
bpf_object__for_each_map(map, obj) {
@@ -8270,7 +8515,7 @@ err_unpin_maps:
bpf_map__unpin(map, NULL);
}
- return err;
+ return libbpf_err(err);
}
int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
@@ -8279,7 +8524,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
int err;
if (!obj)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
bpf_object__for_each_map(map, obj) {
char *pin_path = NULL;
@@ -8291,9 +8536,9 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
len = snprintf(buf, PATH_MAX, "%s/%s", path,
bpf_map__name(map));
if (len < 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
+ return libbpf_err(-ENAMETOOLONG);
sanitize_pin_path(buf);
pin_path = buf;
} else if (!map->pin_path) {
@@ -8302,7 +8547,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
err = bpf_map__unpin(map, pin_path);
if (err)
- return err;
+ return libbpf_err(err);
}
return 0;
@@ -8314,11 +8559,11 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
int err;
if (!obj)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
if (!obj->loaded) {
pr_warn("object not yet loaded; load it first\n");
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
bpf_object__for_each_program(prog, obj) {
@@ -8357,7 +8602,7 @@ err_unpin_programs:
bpf_program__unpin(prog, buf);
}
- return err;
+ return libbpf_err(err);
}
int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
@@ -8366,7 +8611,7 @@ int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
int err;
if (!obj)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
bpf_object__for_each_program(prog, obj) {
char buf[PATH_MAX];
@@ -8375,13 +8620,13 @@ int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
len = snprintf(buf, PATH_MAX, "%s/%s", path,
prog->pin_name);
if (len < 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
+ return libbpf_err(-ENAMETOOLONG);
err = bpf_program__unpin(prog, buf);
if (err)
- return err;
+ return libbpf_err(err);
}
return 0;
@@ -8393,12 +8638,12 @@ int bpf_object__pin(struct bpf_object *obj, const char *path)
err = bpf_object__pin_maps(obj, path);
if (err)
- return err;
+ return libbpf_err(err);
err = bpf_object__pin_programs(obj, path);
if (err) {
bpf_object__unpin_maps(obj, path);
- return err;
+ return libbpf_err(err);
}
return 0;
@@ -8448,6 +8693,7 @@ void bpf_object__close(struct bpf_object *obj)
if (obj->clear_priv)
obj->clear_priv(obj, obj->priv);
+ bpf_gen__free(obj->gen_loader);
bpf_object__elf_finish(obj);
bpf_object__unload(obj);
btf__free(obj->btf);
@@ -8494,7 +8740,7 @@ bpf_object__next(struct bpf_object *prev)
const char *bpf_object__name(const struct bpf_object *obj)
{
- return obj ? obj->name : ERR_PTR(-EINVAL);
+ return obj ? obj->name : libbpf_err_ptr(-EINVAL);
}
unsigned int bpf_object__kversion(const struct bpf_object *obj)
@@ -8515,7 +8761,7 @@ int bpf_object__btf_fd(const struct bpf_object *obj)
int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
{
if (obj->loaded)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
obj->kern_version = kern_version;
@@ -8535,7 +8781,23 @@ int bpf_object__set_priv(struct bpf_object *obj, void *priv,
void *bpf_object__priv(const struct bpf_object *obj)
{
- return obj ? obj->priv : ERR_PTR(-EINVAL);
+ return obj ? obj->priv : libbpf_err_ptr(-EINVAL);
+}
+
+int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
+{
+ struct bpf_gen *gen;
+
+ if (!opts)
+ return -EFAULT;
+ if (!OPTS_VALID(opts, gen_loader_opts))
+ return -EINVAL;
+ gen = calloc(sizeof(*gen), 1);
+ if (!gen)
+ return -ENOMEM;
+ gen->opts = opts;
+ obj->gen_loader = gen;
+ return 0;
}
static struct bpf_program *
@@ -8555,7 +8817,7 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
if (p->obj != obj) {
pr_warn("error: program handler doesn't match object\n");
- return NULL;
+ return errno = EINVAL, NULL;
}
idx = (p - obj->programs) + (forward ? 1 : -1);
@@ -8601,7 +8863,7 @@ int bpf_program__set_priv(struct bpf_program *prog, void *priv,
void *bpf_program__priv(const struct bpf_program *prog)
{
- return prog ? prog->priv : ERR_PTR(-EINVAL);
+ return prog ? prog->priv : libbpf_err_ptr(-EINVAL);
}
void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
@@ -8628,7 +8890,7 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
title = strdup(title);
if (!title) {
pr_warn("failed to strdup program title\n");
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
}
}
@@ -8643,7 +8905,7 @@ bool bpf_program__autoload(const struct bpf_program *prog)
int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
{
if (prog->obj->loaded)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
prog->load = autoload;
return 0;
@@ -8665,17 +8927,17 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
int *instances_fds;
if (nr_instances <= 0 || !prep)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (prog->instances.nr > 0 || prog->instances.fds) {
pr_warn("Can't set pre-processor after loading\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
instances_fds = malloc(sizeof(int) * nr_instances);
if (!instances_fds) {
pr_warn("alloc memory failed for fds\n");
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
}
/* fill all fd with -1 */
@@ -8692,19 +8954,19 @@ int bpf_program__nth_fd(const struct bpf_program *prog, int n)
int fd;
if (!prog)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (n >= prog->instances.nr || n < 0) {
pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
n, prog->name, prog->instances.nr);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
fd = prog->instances.fds[n];
if (fd < 0) {
pr_warn("%dth instance of program '%s' is invalid\n",
n, prog->name);
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
return fd;
@@ -8730,7 +8992,7 @@ static bool bpf_program__is_type(const struct bpf_program *prog,
int bpf_program__set_##NAME(struct bpf_program *prog) \
{ \
if (!prog) \
- return -EINVAL; \
+ return libbpf_err(-EINVAL); \
bpf_program__set_type(prog, TYPE); \
return 0; \
} \
@@ -8820,7 +9082,10 @@ static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
static const struct bpf_sec_def section_defs[] = {
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
- BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
+ BPF_EAPROG_SEC("sk_reuseport/migrate", BPF_PROG_TYPE_SK_REUSEPORT,
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE),
+ BPF_EAPROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT,
+ BPF_SK_REUSEPORT_SELECT),
SEC_DEF("kprobe/", KPROBE,
.attach_fn = attach_kprobe),
BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
@@ -8884,6 +9149,8 @@ static const struct bpf_sec_def section_defs[] = {
.expected_attach_type = BPF_TRACE_ITER,
.is_attach_btf = true,
.attach_fn = attach_iter),
+ SEC_DEF("syscall", SYSCALL,
+ .is_sleepable = true),
BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
BPF_XDP_DEVMAP),
BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
@@ -9015,7 +9282,7 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
char *type_names;
if (!name)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
sec_def = find_sec_def(name);
if (sec_def) {
@@ -9031,7 +9298,7 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
free(type_names);
}
- return -ESRCH;
+ return libbpf_err(-ESRCH);
}
static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
@@ -9173,6 +9440,28 @@ invalid_prog:
#define BTF_ITER_PREFIX "bpf_iter_"
#define BTF_MAX_NAME_SIZE 128
+void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
+ const char **prefix, int *kind)
+{
+ switch (attach_type) {
+ case BPF_TRACE_RAW_TP:
+ *prefix = BTF_TRACE_PREFIX;
+ *kind = BTF_KIND_TYPEDEF;
+ break;
+ case BPF_LSM_MAC:
+ *prefix = BTF_LSM_PREFIX;
+ *kind = BTF_KIND_FUNC;
+ break;
+ case BPF_TRACE_ITER:
+ *prefix = BTF_ITER_PREFIX;
+ *kind = BTF_KIND_FUNC;
+ break;
+ default:
+ *prefix = "";
+ *kind = BTF_KIND_FUNC;
+ }
+}
+
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind)
{
@@ -9193,21 +9482,11 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
static inline int find_attach_btf_id(struct btf *btf, const char *name,
enum bpf_attach_type attach_type)
{
- int err;
+ const char *prefix;
+ int kind;
- if (attach_type == BPF_TRACE_RAW_TP)
- err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
- BTF_KIND_TYPEDEF);
- else if (attach_type == BPF_LSM_MAC)
- err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
- BTF_KIND_FUNC);
- else if (attach_type == BPF_TRACE_ITER)
- err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
- BTF_KIND_FUNC);
- else
- err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
-
- return err;
+ btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
+ return find_btf_by_prefix_kind(btf, prefix, name, kind);
}
int libbpf_find_vmlinux_btf_id(const char *name,
@@ -9217,9 +9496,10 @@ int libbpf_find_vmlinux_btf_id(const char *name,
int err;
btf = libbpf_find_kernel_btf();
- if (IS_ERR(btf)) {
+ err = libbpf_get_error(btf);
+ if (err) {
pr_warn("vmlinux BTF is not found\n");
- return -EINVAL;
+ return libbpf_err(err);
}
err = find_attach_btf_id(btf, name, attach_type);
@@ -9227,7 +9507,7 @@ int libbpf_find_vmlinux_btf_id(const char *name,
pr_warn("%s is not found in vmlinux BTF\n", name);
btf__free(btf);
- return err;
+ return libbpf_err(err);
}
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
@@ -9238,10 +9518,11 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
int err = -EINVAL;
info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
- if (IS_ERR_OR_NULL(info_linear)) {
+ err = libbpf_get_error(info_linear);
+ if (err) {
pr_warn("failed get_prog_info_linear for FD %d\n",
attach_prog_fd);
- return -EINVAL;
+ return err;
}
info = &info_linear->info;
if (!info->btf_id) {
@@ -9306,7 +9587,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd,
__u32 attach_prog_fd = prog->attach_prog_fd;
const char *name = prog->sec_name, *attach_name;
const struct bpf_sec_def *sec = NULL;
- int i, err;
+ int i, err = 0;
if (!name)
return -EINVAL;
@@ -9341,7 +9622,13 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd,
}
/* kernel/module BTF ID */
- err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ if (prog->obj->gen_loader) {
+ bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
+ *btf_obj_fd = 0;
+ *btf_type_id = 1;
+ } else {
+ err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ }
if (err) {
pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
return err;
@@ -9356,13 +9643,13 @@ int libbpf_attach_type_by_name(const char *name,
int i;
if (!name)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
if (strncmp(name, section_defs[i].sec, section_defs[i].len))
continue;
if (!section_defs[i].is_attachable)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
*attach_type = section_defs[i].expected_attach_type;
return 0;
}
@@ -9373,17 +9660,17 @@ int libbpf_attach_type_by_name(const char *name,
free(type_names);
}
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
int bpf_map__fd(const struct bpf_map *map)
{
- return map ? map->fd : -EINVAL;
+ return map ? map->fd : libbpf_err(-EINVAL);
}
const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
{
- return map ? &map->def : ERR_PTR(-EINVAL);
+ return map ? &map->def : libbpf_err_ptr(-EINVAL);
}
const char *bpf_map__name(const struct bpf_map *map)
@@ -9399,7 +9686,7 @@ enum bpf_map_type bpf_map__type(const struct bpf_map *map)
int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
{
if (map->fd >= 0)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
map->def.type = type;
return 0;
}
@@ -9412,7 +9699,7 @@ __u32 bpf_map__map_flags(const struct bpf_map *map)
int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
{
if (map->fd >= 0)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
map->def.map_flags = flags;
return 0;
}
@@ -9425,7 +9712,7 @@ __u32 bpf_map__numa_node(const struct bpf_map *map)
int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
{
if (map->fd >= 0)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
map->numa_node = numa_node;
return 0;
}
@@ -9438,7 +9725,7 @@ __u32 bpf_map__key_size(const struct bpf_map *map)
int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
{
if (map->fd >= 0)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
map->def.key_size = size;
return 0;
}
@@ -9451,7 +9738,7 @@ __u32 bpf_map__value_size(const struct bpf_map *map)
int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
{
if (map->fd >= 0)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
map->def.value_size = size;
return 0;
}
@@ -9470,7 +9757,7 @@ int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv)
{
if (!map)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (map->priv) {
if (map->clear_priv)
@@ -9484,7 +9771,7 @@ int bpf_map__set_priv(struct bpf_map *map, void *priv,
void *bpf_map__priv(const struct bpf_map *map)
{
- return map ? map->priv : ERR_PTR(-EINVAL);
+ return map ? map->priv : libbpf_err_ptr(-EINVAL);
}
int bpf_map__set_initial_value(struct bpf_map *map,
@@ -9492,12 +9779,20 @@ int bpf_map__set_initial_value(struct bpf_map *map,
{
if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
size != map->def.value_size || map->fd >= 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memcpy(map->mmaped, data, size);
return 0;
}
+const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
+{
+ if (!map->mmaped)
+ return NULL;
+ *psize = map->def.value_size;
+ return map->mmaped;
+}
+
bool bpf_map__is_offload_neutral(const struct bpf_map *map)
{
return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
@@ -9516,7 +9811,7 @@ __u32 bpf_map__ifindex(const struct bpf_map *map)
int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
{
if (map->fd >= 0)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
map->map_ifindex = ifindex;
return 0;
}
@@ -9525,11 +9820,11 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
{
if (!bpf_map_type__is_map_in_map(map->def.type)) {
pr_warn("error: unsupported map type\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (map->inner_map_fd != -1) {
pr_warn("error: inner_map_fd already specified\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
zfree(&map->inner_map);
map->inner_map_fd = fd;
@@ -9543,7 +9838,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *s, *e;
if (!obj || !obj->maps)
- return NULL;
+ return errno = EINVAL, NULL;
s = obj->maps;
e = obj->maps + obj->nr_maps;
@@ -9551,7 +9846,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
if ((m < s) || (m >= e)) {
pr_warn("error in %s: map handler doesn't belong to object\n",
__func__);
- return NULL;
+ return errno = EINVAL, NULL;
}
idx = (m - obj->maps) + i;
@@ -9590,7 +9885,7 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
if (pos->name && !strcmp(pos->name, name))
return pos;
}
- return NULL;
+ return errno = ENOENT, NULL;
}
int
@@ -9602,12 +9897,23 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
{
- return ERR_PTR(-ENOTSUP);
+ return libbpf_err_ptr(-ENOTSUP);
}
long libbpf_get_error(const void *ptr)
{
- return PTR_ERR_OR_ZERO(ptr);
+ if (!IS_ERR_OR_NULL(ptr))
+ return 0;
+
+ if (IS_ERR(ptr))
+ errno = -PTR_ERR(ptr);
+
+ /* If ptr == NULL, then errno should be already set by the failing
+ * API, because libbpf never returns NULL on success and it now always
+ * sets errno on error. So no extra errno handling for ptr == NULL
+ * case.
+ */
+ return -errno;
}
int bpf_prog_load(const char *file, enum bpf_prog_type type,
@@ -9633,16 +9939,17 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
int err;
if (!attr)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (!attr->file)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
open_attr.file = attr->file;
open_attr.prog_type = attr->prog_type;
obj = bpf_object__open_xattr(&open_attr);
- if (IS_ERR_OR_NULL(obj))
- return -ENOENT;
+ err = libbpf_get_error(obj);
+ if (err)
+ return libbpf_err(-ENOENT);
bpf_object__for_each_program(prog, obj) {
enum bpf_attach_type attach_type = attr->expected_attach_type;
@@ -9662,7 +9969,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
* didn't provide a fallback type, too bad...
*/
bpf_object__close(obj);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
prog->prog_ifindex = attr->ifindex;
@@ -9680,13 +9987,13 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
if (!first_prog) {
pr_warn("object file doesn't contain bpf program\n");
bpf_object__close(obj);
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
err = bpf_object__load(obj);
if (err) {
bpf_object__close(obj);
- return err;
+ return libbpf_err(err);
}
*pobj = obj;
@@ -9705,7 +10012,10 @@ struct bpf_link {
/* Replace link's underlying BPF program with the new one */
int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
{
- return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
+ int ret;
+
+ ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
+ return libbpf_err_errno(ret);
}
/* Release "ownership" of underlying BPF resource (typically, BPF program
@@ -9738,7 +10048,7 @@ int bpf_link__destroy(struct bpf_link *link)
free(link->pin_path);
free(link);
- return err;
+ return libbpf_err(err);
}
int bpf_link__fd(const struct bpf_link *link)
@@ -9753,7 +10063,7 @@ const char *bpf_link__pin_path(const struct bpf_link *link)
static int bpf_link__detach_fd(struct bpf_link *link)
{
- return close(link->fd);
+ return libbpf_err_errno(close(link->fd));
}
struct bpf_link *bpf_link__open(const char *path)
@@ -9765,13 +10075,13 @@ struct bpf_link *bpf_link__open(const char *path)
if (fd < 0) {
fd = -errno;
pr_warn("failed to open link at %s: %d\n", path, fd);
- return ERR_PTR(fd);
+ return libbpf_err_ptr(fd);
}
link = calloc(1, sizeof(*link));
if (!link) {
close(fd);
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
}
link->detach = &bpf_link__detach_fd;
link->fd = fd;
@@ -9779,7 +10089,7 @@ struct bpf_link *bpf_link__open(const char *path)
link->pin_path = strdup(path);
if (!link->pin_path) {
bpf_link__destroy(link);
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
}
return link;
@@ -9795,22 +10105,22 @@ int bpf_link__pin(struct bpf_link *link, const char *path)
int err;
if (link->pin_path)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
err = make_parent_dir(path);
if (err)
- return err;
+ return libbpf_err(err);
err = check_path(path);
if (err)
- return err;
+ return libbpf_err(err);
link->pin_path = strdup(path);
if (!link->pin_path)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
if (bpf_obj_pin(link->fd, link->pin_path)) {
err = -errno;
zfree(&link->pin_path);
- return err;
+ return libbpf_err(err);
}
pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
@@ -9822,11 +10132,11 @@ int bpf_link__unpin(struct bpf_link *link)
int err;
if (!link->pin_path)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
err = unlink(link->pin_path);
if (err != 0)
- return -errno;
+ return libbpf_err_errno(err);
pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
zfree(&link->pin_path);
@@ -9842,11 +10152,10 @@ static int bpf_link__detach_perf_event(struct bpf_link *link)
err = -errno;
close(link->fd);
- return err;
+ return libbpf_err(err);
}
-struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
- int pfd)
+struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
{
char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
@@ -9855,18 +10164,18 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
if (pfd < 0) {
pr_warn("prog '%s': invalid perf event FD %d\n",
prog->name, pfd);
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
}
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
prog->name);
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_perf_event;
link->fd = pfd;
@@ -9878,14 +10187,14 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
if (err == -EPROTO)
pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
prog->name, pfd);
- return ERR_PTR(err);
+ return libbpf_err_ptr(err);
}
if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
err = -errno;
free(link);
pr_warn("prog '%s': failed to enable pfd %d: %s\n",
prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return ERR_PTR(err);
+ return libbpf_err_ptr(err);
}
return link;
}
@@ -10009,16 +10318,16 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ return libbpf_err_ptr(pfd);
}
link = bpf_program__attach_perf_event(prog, pfd);
- if (IS_ERR(link)) {
+ err = libbpf_get_error(link);
+ if (err) {
close(pfd);
- err = PTR_ERR(link);
pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return link;
+ return libbpf_err_ptr(err);
}
return link;
}
@@ -10051,17 +10360,17 @@ struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ return libbpf_err_ptr(pfd);
}
link = bpf_program__attach_perf_event(prog, pfd);
- if (IS_ERR(link)) {
+ err = libbpf_get_error(link);
+ if (err) {
close(pfd);
- err = PTR_ERR(link);
pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return link;
+ return libbpf_err_ptr(err);
}
return link;
}
@@ -10129,16 +10438,16 @@ struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
prog->name, tp_category, tp_name,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ return libbpf_err_ptr(pfd);
}
link = bpf_program__attach_perf_event(prog, pfd);
- if (IS_ERR(link)) {
+ err = libbpf_get_error(link);
+ if (err) {
close(pfd);
- err = PTR_ERR(link);
pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
prog->name, tp_category, tp_name,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return link;
+ return libbpf_err_ptr(err);
}
return link;
}
@@ -10151,20 +10460,19 @@ static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
sec_name = strdup(prog->sec_name);
if (!sec_name)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
/* extract "tp/<category>/<name>" */
tp_cat = sec_name + sec->len;
tp_name = strchr(tp_cat, '/');
if (!tp_name) {
- link = ERR_PTR(-EINVAL);
- goto out;
+ free(sec_name);
+ return libbpf_err_ptr(-EINVAL);
}
*tp_name = '\0';
tp_name++;
link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
-out:
free(sec_name);
return link;
}
@@ -10179,12 +10487,12 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
pr_warn("prog '%s': can't attach before loaded\n", prog->name);
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
@@ -10193,7 +10501,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
free(link);
pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ return libbpf_err_ptr(pfd);
}
link->fd = pfd;
return link;
@@ -10217,12 +10525,12 @@ static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
pr_warn("prog '%s': can't attach before loaded\n", prog->name);
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
@@ -10231,7 +10539,7 @@ static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
free(link);
pr_warn("prog '%s': failed to attach: %s\n",
prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ return libbpf_err_ptr(pfd);
}
link->fd = pfd;
return (struct bpf_link *)link;
@@ -10259,12 +10567,6 @@ static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
return bpf_program__attach_lsm(prog);
}
-static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
- struct bpf_program *prog)
-{
- return bpf_program__attach_iter(prog, NULL);
-}
-
static struct bpf_link *
bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
const char *target_name)
@@ -10279,12 +10581,12 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
pr_warn("prog '%s': can't attach before loaded\n", prog->name);
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
attach_type = bpf_program__get_expected_attach_type(prog);
@@ -10295,7 +10597,7 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
pr_warn("prog '%s': failed to attach to %s: %s\n",
prog->name, target_name,
libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
- return ERR_PTR(link_fd);
+ return libbpf_err_ptr(link_fd);
}
link->fd = link_fd;
return link;
@@ -10328,19 +10630,19 @@ struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
if (!!target_fd != !!attach_func_name) {
pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
prog->name);
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
}
if (prog->type != BPF_PROG_TYPE_EXT) {
pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
prog->name);
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
}
if (target_fd) {
btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
if (btf_id < 0)
- return ERR_PTR(btf_id);
+ return libbpf_err_ptr(btf_id);
return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
} else {
@@ -10362,7 +10664,7 @@ bpf_program__attach_iter(struct bpf_program *prog,
__u32 target_fd = 0;
if (!OPTS_VALID(opts, bpf_iter_attach_opts))
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
@@ -10370,12 +10672,12 @@ bpf_program__attach_iter(struct bpf_program *prog,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
pr_warn("prog '%s': can't attach before loaded\n", prog->name);
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
@@ -10385,19 +10687,25 @@ bpf_program__attach_iter(struct bpf_program *prog,
free(link);
pr_warn("prog '%s': failed to attach to iterator: %s\n",
prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
- return ERR_PTR(link_fd);
+ return libbpf_err_ptr(link_fd);
}
link->fd = link_fd;
return link;
}
+static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
+ struct bpf_program *prog)
+{
+ return bpf_program__attach_iter(prog, NULL);
+}
+
struct bpf_link *bpf_program__attach(struct bpf_program *prog)
{
const struct bpf_sec_def *sec_def;
sec_def = find_sec_def(prog->sec_name);
if (!sec_def || !sec_def->attach_fn)
- return ERR_PTR(-ESRCH);
+ return libbpf_err_ptr(-ESRCH);
return sec_def->attach_fn(sec_def, prog);
}
@@ -10420,11 +10728,11 @@ struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
int err;
if (!bpf_map__is_struct_ops(map) || map->fd == -1)
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
st_ops = map->st_ops;
for (i = 0; i < btf_vlen(st_ops->type); i++) {
@@ -10444,7 +10752,7 @@ struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
if (err) {
err = -errno;
free(link);
- return ERR_PTR(err);
+ return libbpf_err_ptr(err);
}
link->detach = bpf_link__detach_struct_ops;
@@ -10498,7 +10806,7 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
}
ring_buffer_write_tail(header, data_tail);
- return ret;
+ return libbpf_err(ret);
}
struct perf_buffer;
@@ -10651,7 +10959,7 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
p.lost_cb = opts ? opts->lost_cb : NULL;
p.ctx = opts ? opts->ctx : NULL;
- return __perf_buffer__new(map_fd, page_cnt, &p);
+ return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
struct perf_buffer *
@@ -10667,7 +10975,7 @@ perf_buffer__new_raw(int map_fd, size_t page_cnt,
p.cpus = opts->cpus;
p.map_keys = opts->map_keys;
- return __perf_buffer__new(map_fd, page_cnt, &p);
+ return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
@@ -10888,16 +11196,19 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
int i, cnt, err;
cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
+ if (cnt < 0)
+ return libbpf_err_errno(cnt);
+
for (i = 0; i < cnt; i++) {
struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
err = perf_buffer__process_records(pb, cpu_buf);
if (err) {
pr_warn("error while processing records: %d\n", err);
- return err;
+ return libbpf_err(err);
}
}
- return cnt < 0 ? -errno : cnt;
+ return cnt;
}
/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
@@ -10918,11 +11229,11 @@ int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
struct perf_cpu_buf *cpu_buf;
if (buf_idx >= pb->cpu_cnt)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
cpu_buf = pb->cpu_bufs[buf_idx];
if (!cpu_buf)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
return cpu_buf->fd;
}
@@ -10940,11 +11251,11 @@ int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
struct perf_cpu_buf *cpu_buf;
if (buf_idx >= pb->cpu_cnt)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
cpu_buf = pb->cpu_bufs[buf_idx];
if (!cpu_buf)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
return perf_buffer__process_records(pb, cpu_buf);
}
@@ -10962,7 +11273,7 @@ int perf_buffer__consume(struct perf_buffer *pb)
err = perf_buffer__process_records(pb, cpu_buf);
if (err) {
pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
- return err;
+ return libbpf_err(err);
}
}
return 0;
@@ -11074,13 +11385,13 @@ bpf_program__get_prog_info_linear(int fd, __u64 arrays)
void *ptr;
if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
/* step 1: get array dimensions */
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (err) {
pr_debug("can't get prog info: %s", strerror(errno));
- return ERR_PTR(-EFAULT);
+ return libbpf_err_ptr(-EFAULT);
}
/* step 2: calculate total size of all arrays */
@@ -11112,7 +11423,7 @@ bpf_program__get_prog_info_linear(int fd, __u64 arrays)
data_len = roundup(data_len, sizeof(__u64));
info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
if (!info_linear)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
/* step 4: fill data to info_linear->info */
info_linear->arrays = arrays;
@@ -11144,7 +11455,7 @@ bpf_program__get_prog_info_linear(int fd, __u64 arrays)
if (err) {
pr_debug("can't get prog info: %s", strerror(errno));
free(info_linear);
- return ERR_PTR(-EFAULT);
+ return libbpf_err_ptr(-EFAULT);
}
/* step 6: verify the data */
@@ -11223,26 +11534,26 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
int btf_obj_fd = 0, btf_id = 0, err;
if (!prog || attach_prog_fd < 0 || !attach_func_name)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (prog->obj->loaded)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (attach_prog_fd) {
btf_id = libbpf_find_prog_btf_id(attach_func_name,
attach_prog_fd);
if (btf_id < 0)
- return btf_id;
+ return libbpf_err(btf_id);
} else {
/* load btf_vmlinux, if not yet */
err = bpf_object__load_vmlinux_btf(prog->obj, true);
if (err)
- return err;
+ return libbpf_err(err);
err = find_kernel_btf_id(prog->obj, attach_func_name,
prog->expected_attach_type,
&btf_obj_fd, &btf_id);
if (err)
- return err;
+ return libbpf_err(err);
}
prog->attach_btf_id = btf_id;
@@ -11341,7 +11652,7 @@ int libbpf_num_possible_cpus(void)
err = parse_cpu_mask_file(fcpu, &mask, &n);
if (err)
- return err;
+ return libbpf_err(err);
tmp_cpus = 0;
for (i = 0; i < n; i++) {
@@ -11361,7 +11672,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
.object_name = s->name,
);
struct bpf_object *obj;
- int i;
+ int i, err;
/* Attempt to preserve opts->object_name, unless overriden by user
* explicitly. Overwriting object name for skeletons is discouraged,
@@ -11376,10 +11687,11 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
- if (IS_ERR(obj)) {
- pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
- s->name, PTR_ERR(obj));
- return PTR_ERR(obj);
+ err = libbpf_get_error(obj);
+ if (err) {
+ pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
+ s->name, err);
+ return libbpf_err(err);
}
*s->obj = obj;
@@ -11392,7 +11704,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
pr_warn("failed to find skeleton map '%s'\n", name);
- return -ESRCH;
+ return libbpf_err(-ESRCH);
}
/* externs shouldn't be pre-setup from user code */
@@ -11407,7 +11719,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
pr_warn("failed to find skeleton program '%s'\n", name);
- return -ESRCH;
+ return libbpf_err(-ESRCH);
}
}
@@ -11421,7 +11733,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
err = bpf_object__load(*s->obj);
if (err) {
pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
- return err;
+ return libbpf_err(err);
}
for (i = 0; i < s->map_cnt; i++) {
@@ -11460,7 +11772,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
*mmaped = NULL;
pr_warn("failed to re-mmap() map '%s': %d\n",
bpf_map__name(map), err);
- return err;
+ return libbpf_err(err);
}
}
@@ -11469,7 +11781,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
{
- int i;
+ int i, err;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_program *prog = *s->progs[i].prog;
@@ -11484,10 +11796,11 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
continue;
*link = sec_def->attach_fn(sec_def, prog);
- if (IS_ERR(*link)) {
- pr_warn("failed to auto-attach program '%s': %ld\n",
- bpf_program__name(prog), PTR_ERR(*link));
- return PTR_ERR(*link);
+ err = libbpf_get_error(*link);
+ if (err) {
+ pr_warn("failed to auto-attach program '%s': %d\n",
+ bpf_program__name(prog), err);
+ return libbpf_err(err);
}
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index bec4e6a6e31d..6e61342ba56c 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -18,6 +18,7 @@
#include <linux/bpf.h>
#include "libbpf_common.h"
+#include "libbpf_legacy.h"
#ifdef __cplusplus
extern "C" {
@@ -471,6 +472,7 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
+LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
@@ -498,6 +500,7 @@ LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
+/* XDP related API */
struct xdp_link_info {
__u32 prog_id;
__u32 drv_prog_id;
@@ -520,6 +523,49 @@ LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags);
+/* TC related API */
+enum bpf_tc_attach_point {
+ BPF_TC_INGRESS = 1 << 0,
+ BPF_TC_EGRESS = 1 << 1,
+ BPF_TC_CUSTOM = 1 << 2,
+};
+
+#define BPF_TC_PARENT(a, b) \
+ ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU))
+
+enum bpf_tc_flags {
+ BPF_TC_F_REPLACE = 1 << 0,
+};
+
+struct bpf_tc_hook {
+ size_t sz;
+ int ifindex;
+ enum bpf_tc_attach_point attach_point;
+ __u32 parent;
+ size_t :0;
+};
+#define bpf_tc_hook__last_field parent
+
+struct bpf_tc_opts {
+ size_t sz;
+ int prog_fd;
+ __u32 flags;
+ __u32 prog_id;
+ __u32 handle;
+ __u32 priority;
+ size_t :0;
+};
+#define bpf_tc_opts__last_field priority
+
+LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook);
+LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook);
+LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook,
+ struct bpf_tc_opts *opts);
+LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts);
+LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook,
+ struct bpf_tc_opts *opts);
+
/* Ring buffer APIs */
struct ring_buffer;
@@ -756,6 +802,18 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
+struct gen_loader_opts {
+ size_t sz; /* size of this struct, for forward/backward compatiblity */
+ const char *data;
+ const char *insns;
+ __u32 data_sz;
+ __u32 insns_sz;
+};
+
+#define gen_loader_opts__last_field insns_sz
+LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj,
+ struct gen_loader_opts *opts);
+
enum libbpf_tristate {
TRI_NO = 0,
TRI_YES = 1,
@@ -768,10 +826,18 @@ struct bpf_linker_opts {
};
#define bpf_linker_opts__last_field sz
+struct bpf_linker_file_opts {
+ /* size of this struct, for forward/backward compatiblity */
+ size_t sz;
+};
+#define bpf_linker_file_opts__last_field sz
+
struct bpf_linker;
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
-LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, const char *filename);
+LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
+ const char *filename,
+ const struct bpf_linker_file_opts *opts);
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b9b29baf1df8..944c99d1ded3 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -361,4 +361,17 @@ LIBBPF_0.4.0 {
bpf_linker__new;
bpf_map__inner_map;
bpf_object__set_kversion;
+ bpf_tc_attach;
+ bpf_tc_detach;
+ bpf_tc_hook_create;
+ bpf_tc_hook_destroy;
+ bpf_tc_query;
} LIBBPF_0.3.0;
+
+LIBBPF_0.5.0 {
+ global:
+ bpf_map__initial_value;
+ bpf_map_lookup_and_delete_elem_flags;
+ bpf_object__gen_loader;
+ libbpf_set_strict_mode;
+} LIBBPF_0.4.0;
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
index 0afb51f7a919..96f67a772a1b 100644
--- a/tools/lib/bpf/libbpf_errno.c
+++ b/tools/lib/bpf/libbpf_errno.c
@@ -12,6 +12,7 @@
#include <string.h>
#include "libbpf.h"
+#include "libbpf_internal.h"
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
@@ -39,7 +40,7 @@ static const char *libbpf_strerror_table[NR_ERRNO] = {
int libbpf_strerror(int err, char *buf, size_t size)
{
if (!buf || !size)
- return -1;
+ return libbpf_err(-EINVAL);
err = err > 0 ? err : -err;
@@ -48,7 +49,7 @@ int libbpf_strerror(int err, char *buf, size_t size)
ret = strerror_r(err, buf, size);
buf[size - 1] = '\0';
- return ret;
+ return libbpf_err_errno(ret);
}
if (err < __LIBBPF_ERRNO__END) {
@@ -62,5 +63,5 @@ int libbpf_strerror(int err, char *buf, size_t size)
snprintf(buf, size, "Unknown libbpf error %d", err);
buf[size - 1] = '\0';
- return -1;
+ return libbpf_err(-ENOENT);
}
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index ee426226928f..016ca7cb4f8a 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -11,6 +11,9 @@
#include <stdlib.h>
#include <limits.h>
+#include <errno.h>
+#include <linux/err.h>
+#include "libbpf_legacy.h"
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
@@ -28,6 +31,12 @@
#ifndef R_BPF_64_64
#define R_BPF_64_64 1
#endif
+#ifndef R_BPF_64_ABS64
+#define R_BPF_64_ABS64 2
+#endif
+#ifndef R_BPF_64_ABS32
+#define R_BPF_64_ABS32 3
+#endif
#ifndef R_BPF_64_32
#define R_BPF_64_32 10
#endif
@@ -41,6 +50,11 @@
#define ELF_C_READ_MMAP ELF_C_READ
#endif
+/* Older libelf all end up in this expression, for both 32 and 64 bit */
+#ifndef GELF_ST_VISIBILITY
+#define GELF_ST_VISIBILITY(o) ((o) & 0x03)
+#endif
+
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
@@ -258,6 +272,8 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name,
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
+void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
+ const char **prefix, int *kind);
struct btf_ext_info {
/*
@@ -428,4 +444,54 @@ int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ct
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
+extern enum libbpf_strict_mode libbpf_mode;
+
+/* handle direct returned errors */
+static inline int libbpf_err(int ret)
+{
+ if (ret < 0)
+ errno = -ret;
+ return ret;
+}
+
+/* handle errno-based (e.g., syscall or libc) errors according to libbpf's
+ * strict mode settings
+ */
+static inline int libbpf_err_errno(int ret)
+{
+ if (libbpf_mode & LIBBPF_STRICT_DIRECT_ERRS)
+ /* errno is already assumed to be set on error */
+ return ret < 0 ? -errno : ret;
+
+ /* legacy: on error return -1 directly and don't touch errno */
+ return ret;
+}
+
+/* handle error for pointer-returning APIs, err is assumed to be < 0 always */
+static inline void *libbpf_err_ptr(int err)
+{
+ /* set errno on error, this doesn't break anything */
+ errno = -err;
+
+ if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
+ return NULL;
+
+ /* legacy: encode err as ptr */
+ return ERR_PTR(err);
+}
+
+/* handle pointer-returning APIs' error handling */
+static inline void *libbpf_ptr(void *ret)
+{
+ /* set errno on error, this doesn't break anything */
+ if (IS_ERR(ret))
+ errno = -PTR_ERR(ret);
+
+ if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
+ return IS_ERR(ret) ? NULL : ret;
+
+ /* legacy: pass-through original pointer */
+ return ret;
+}
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
new file mode 100644
index 000000000000..df0d03dcffab
--- /dev/null
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * Libbpf legacy APIs (either discouraged or deprecated, as mentioned in [0])
+ *
+ * [0] https://docs.google.com/document/d/1UyjTZuPFWiPFyKk1tV5an11_iaRuec6U-ZESZ54nNTY
+ *
+ * Copyright (C) 2021 Facebook
+ */
+#ifndef __LIBBPF_LEGACY_BPF_H
+#define __LIBBPF_LEGACY_BPF_H
+
+#include <linux/bpf.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include "libbpf_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum libbpf_strict_mode {
+ /* Turn on all supported strict features of libbpf to simulate libbpf
+ * v1.0 behavior.
+ * This will be the default behavior in libbpf v1.0.
+ */
+ LIBBPF_STRICT_ALL = 0xffffffff,
+
+ /*
+ * Disable any libbpf 1.0 behaviors. This is the default before libbpf
+ * v1.0. It won't be supported anymore in v1.0, please update your
+ * code so that it handles LIBBPF_STRICT_ALL mode before libbpf v1.0.
+ */
+ LIBBPF_STRICT_NONE = 0x00,
+ /*
+ * Return NULL pointers on error, not ERR_PTR(err).
+ * Additionally, libbpf also always sets errno to corresponding Exx
+ * (positive) error code.
+ */
+ LIBBPF_STRICT_CLEAN_PTRS = 0x01,
+ /*
+ * Return actual error codes from low-level APIs directly, not just -1.
+ * Additionally, libbpf also always sets errno to corresponding Exx
+ * (positive) error code.
+ */
+ LIBBPF_STRICT_DIRECT_ERRS = 0x02,
+
+ __LIBBPF_STRICT_LAST,
+};
+
+LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __LIBBPF_LEGACY_BPF_H */
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 9de084b1c699..10911a8cad0f 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -158,7 +158,9 @@ struct bpf_linker {
static int init_output_elf(struct bpf_linker *linker, const char *file);
-static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, struct src_obj *obj);
+static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts,
+ struct src_obj *obj);
static int linker_sanity_check_elf(struct src_obj *obj);
static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec);
@@ -218,16 +220,16 @@ struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts
int err;
if (!OPTS_VALID(opts, bpf_linker_opts))
- return NULL;
+ return errno = EINVAL, NULL;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn_elf("libelf initialization failed");
- return NULL;
+ return errno = EINVAL, NULL;
}
linker = calloc(1, sizeof(*linker));
if (!linker)
- return NULL;
+ return errno = ENOMEM, NULL;
linker->fd = -1;
@@ -239,7 +241,7 @@ struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts
err_out:
bpf_linker__free(linker);
- return NULL;
+ return errno = -err, NULL;
}
static struct dst_sec *add_dst_sec(struct bpf_linker *linker, const char *sec_name)
@@ -435,15 +437,19 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
return 0;
}
-int bpf_linker__add_file(struct bpf_linker *linker, const char *filename)
+int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts)
{
struct src_obj obj = {};
int err = 0;
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return libbpf_err(-EINVAL);
+
if (!linker->elf)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
- err = err ?: linker_load_obj_file(linker, filename, &obj);
+ err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
err = err ?: linker_append_sec_data(linker, &obj);
err = err ?: linker_append_elf_syms(linker, &obj);
err = err ?: linker_append_elf_relos(linker, &obj);
@@ -461,7 +467,7 @@ int bpf_linker__add_file(struct bpf_linker *linker, const char *filename)
if (obj.fd >= 0)
close(obj.fd);
- return err;
+ return libbpf_err(err);
}
static bool is_dwarf_sec_name(const char *name)
@@ -529,7 +535,9 @@ static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
return sec;
}
-static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, struct src_obj *obj)
+static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts,
+ struct src_obj *obj)
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
const int host_endianness = ELFDATA2LSB;
@@ -884,7 +892,8 @@ static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *se
size_t sym_idx = ELF64_R_SYM(relo->r_info);
size_t sym_type = ELF64_R_TYPE(relo->r_info);
- if (sym_type != R_BPF_64_64 && sym_type != R_BPF_64_32) {
+ if (sym_type != R_BPF_64_64 && sym_type != R_BPF_64_32 &&
+ sym_type != R_BPF_64_ABS64 && sym_type != R_BPF_64_ABS32) {
pr_warn("ELF relo #%d in section #%zu has unexpected type %zu in %s\n",
i, sec->sec_idx, sym_type, obj->filename);
return -EINVAL;
@@ -1780,7 +1789,7 @@ static void sym_update_visibility(Elf64_Sym *sym, int sym_vis)
/* libelf doesn't provide setters for ST_VISIBILITY,
* but it is stored in the lower 2 bits of st_other
*/
- sym->st_other &= 0x03;
+ sym->st_other &= ~0x03;
sym->st_other |= sym_vis;
}
@@ -2539,11 +2548,11 @@ int bpf_linker__finalize(struct bpf_linker *linker)
int err, i;
if (!linker->elf)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
err = finalize_btf(linker);
if (err)
- return err;
+ return libbpf_err(err);
/* Finalize strings */
strs_sz = strset__data_size(linker->strtab_strs);
@@ -2575,14 +2584,14 @@ int bpf_linker__finalize(struct bpf_linker *linker)
if (elf_update(linker->elf, ELF_C_NULL) < 0) {
err = -errno;
pr_warn_elf("failed to finalize ELF layout");
- return err;
+ return libbpf_err(err);
}
/* Write out final ELF contents */
if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
err = -errno;
pr_warn_elf("failed to write ELF contents");
- return err;
+ return libbpf_err(err);
}
elf_end(linker->elf);
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index d2cb28e9ef52..39f25e09b51e 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -4,7 +4,10 @@
#include <stdlib.h>
#include <memory.h>
#include <unistd.h>
+#include <arpa/inet.h>
#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/pkt_cls.h>
#include <linux/rtnetlink.h>
#include <sys/socket.h>
#include <errno.h>
@@ -73,9 +76,20 @@ cleanup:
return ret;
}
-static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
- __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
- void *cookie)
+static void libbpf_netlink_close(int sock)
+{
+ close(sock);
+}
+
+enum {
+ NL_CONT,
+ NL_NEXT,
+ NL_DONE,
+};
+
+static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq,
+ __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
+ void *cookie)
{
bool multipart = true;
struct nlmsgerr *err;
@@ -84,6 +98,7 @@ static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
int len, ret;
while (multipart) {
+start:
multipart = false;
len = recv(sock, buf, sizeof(buf), 0);
if (len < 0) {
@@ -121,8 +136,16 @@ static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
}
if (_fn) {
ret = _fn(nh, fn, cookie);
- if (ret)
+ switch (ret) {
+ case NL_CONT:
+ break;
+ case NL_NEXT:
+ goto start;
+ case NL_DONE:
+ return 0;
+ default:
return ret;
+ }
}
}
}
@@ -131,95 +154,92 @@ done:
return ret;
}
-static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
- __u32 flags)
+static int libbpf_netlink_send_recv(struct libbpf_nla_req *req,
+ __dump_nlmsg_t parse_msg,
+ libbpf_dump_nlmsg_t parse_attr,
+ void *cookie)
{
- int sock, seq = 0, ret;
- struct nlattr *nla, *nla_xdp;
- struct {
- struct nlmsghdr nh;
- struct ifinfomsg ifinfo;
- char attrbuf[64];
- } req;
__u32 nl_pid = 0;
+ int sock, ret;
sock = libbpf_netlink_open(&nl_pid);
if (sock < 0)
return sock;
+ req->nh.nlmsg_pid = 0;
+ req->nh.nlmsg_seq = time(NULL);
+
+ if (send(sock, req, req->nh.nlmsg_len, 0) < 0) {
+ ret = -errno;
+ goto out;
+ }
+
+ ret = libbpf_netlink_recv(sock, nl_pid, req->nh.nlmsg_seq,
+ parse_msg, parse_attr, cookie);
+out:
+ libbpf_netlink_close(sock);
+ return ret;
+}
+
+static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
+ __u32 flags)
+{
+ struct nlattr *nla;
+ int ret;
+ struct libbpf_nla_req req;
+
memset(&req, 0, sizeof(req));
- req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
- req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
- req.nh.nlmsg_type = RTM_SETLINK;
- req.nh.nlmsg_pid = 0;
- req.nh.nlmsg_seq = ++seq;
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_type = RTM_SETLINK;
req.ifinfo.ifi_family = AF_UNSPEC;
- req.ifinfo.ifi_index = ifindex;
-
- /* started nested attribute for XDP */
- nla = (struct nlattr *)(((char *)&req)
- + NLMSG_ALIGN(req.nh.nlmsg_len));
- nla->nla_type = NLA_F_NESTED | IFLA_XDP;
- nla->nla_len = NLA_HDRLEN;
-
- /* add XDP fd */
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_FD;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
- nla->nla_len += nla_xdp->nla_len;
-
- /* if user passed in any flags, add those too */
+ req.ifinfo.ifi_index = ifindex;
+
+ nla = nlattr_begin_nested(&req, IFLA_XDP);
+ if (!nla)
+ return -EMSGSIZE;
+ ret = nlattr_add(&req, IFLA_XDP_FD, &fd, sizeof(fd));
+ if (ret < 0)
+ return ret;
if (flags) {
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_FLAGS;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
- nla->nla_len += nla_xdp->nla_len;
+ ret = nlattr_add(&req, IFLA_XDP_FLAGS, &flags, sizeof(flags));
+ if (ret < 0)
+ return ret;
}
-
if (flags & XDP_FLAGS_REPLACE) {
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_EXPECTED_FD;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(old_fd);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &old_fd, sizeof(old_fd));
- nla->nla_len += nla_xdp->nla_len;
+ ret = nlattr_add(&req, IFLA_XDP_EXPECTED_FD, &old_fd,
+ sizeof(old_fd));
+ if (ret < 0)
+ return ret;
}
+ nlattr_end_nested(&req, nla);
- req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
-
- if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
- ret = -errno;
- goto cleanup;
- }
- ret = bpf_netlink_recv(sock, nl_pid, seq, NULL, NULL, NULL);
-
-cleanup:
- close(sock);
- return ret;
+ return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
}
int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
const struct bpf_xdp_set_link_opts *opts)
{
- int old_fd = -1;
+ int old_fd = -1, ret;
if (!OPTS_VALID(opts, bpf_xdp_set_link_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (OPTS_HAS(opts, old_fd)) {
old_fd = OPTS_GET(opts, old_fd, -1);
flags |= XDP_FLAGS_REPLACE;
}
- return __bpf_set_link_xdp_fd_replace(ifindex, fd,
- old_fd,
- flags);
+ ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags);
+ return libbpf_err(ret);
}
int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
{
- return __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
+ int ret;
+
+ ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
+ return libbpf_err(ret);
}
static int __dump_link_nlmsg(struct nlmsghdr *nlh,
@@ -231,6 +251,7 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh,
len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
+
if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
return -LIBBPF_ERRNO__NLPARSE;
@@ -282,34 +303,33 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
return 0;
}
-static int libbpf_nl_get_link(int sock, unsigned int nl_pid,
- libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
-
int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags)
{
struct xdp_id_md xdp_id = {};
- int sock, ret;
- __u32 nl_pid = 0;
__u32 mask;
+ int ret;
+ struct libbpf_nla_req req = {
+ .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nh.nlmsg_type = RTM_GETLINK,
+ .nh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .ifinfo.ifi_family = AF_PACKET,
+ };
if (flags & ~XDP_FLAGS_MASK || !info_size)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
/* Check whether the single {HW,DRV,SKB} mode is set */
flags &= (XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE);
mask = flags - 1;
if (flags && flags & mask)
- return -EINVAL;
-
- sock = libbpf_netlink_open(&nl_pid);
- if (sock < 0)
- return sock;
+ return libbpf_err(-EINVAL);
xdp_id.ifindex = ifindex;
xdp_id.flags = flags;
- ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_info, &xdp_id);
+ ret = libbpf_netlink_send_recv(&req, __dump_link_nlmsg,
+ get_xdp_info, &xdp_id);
if (!ret) {
size_t sz = min(info_size, sizeof(xdp_id.info));
@@ -317,8 +337,7 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
memset((void *) info + sz, 0, info_size - sz);
}
- close(sock);
- return ret;
+ return libbpf_err(ret);
}
static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags)
@@ -346,27 +365,394 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
if (!ret)
*prog_id = get_xdp_id(&info, flags);
- return ret;
+ return libbpf_err(ret);
}
-int libbpf_nl_get_link(int sock, unsigned int nl_pid,
- libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
+typedef int (*qdisc_config_t)(struct libbpf_nla_req *req);
+
+static int clsact_config(struct libbpf_nla_req *req)
{
- struct {
- struct nlmsghdr nlh;
- struct ifinfomsg ifm;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
- .nlh.nlmsg_type = RTM_GETLINK,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .ifm.ifi_family = AF_PACKET,
- };
- int seq = time(NULL);
+ req->tc.tcm_parent = TC_H_CLSACT;
+ req->tc.tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0);
+
+ return nlattr_add(req, TCA_KIND, "clsact", sizeof("clsact"));
+}
+
+static int attach_point_to_config(struct bpf_tc_hook *hook,
+ qdisc_config_t *config)
+{
+ switch (OPTS_GET(hook, attach_point, 0)) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ case BPF_TC_INGRESS | BPF_TC_EGRESS:
+ if (OPTS_GET(hook, parent, 0))
+ return -EINVAL;
+ *config = &clsact_config;
+ return 0;
+ case BPF_TC_CUSTOM:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tc_get_tcm_parent(enum bpf_tc_attach_point attach_point,
+ __u32 *parent)
+{
+ switch (attach_point) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ if (*parent)
+ return -EINVAL;
+ *parent = TC_H_MAKE(TC_H_CLSACT,
+ attach_point == BPF_TC_INGRESS ?
+ TC_H_MIN_INGRESS : TC_H_MIN_EGRESS);
+ break;
+ case BPF_TC_CUSTOM:
+ if (!*parent)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)
+{
+ qdisc_config_t config;
+ int ret;
+ struct libbpf_nla_req req;
+
+ ret = attach_point_to_config(hook, &config);
+ if (ret < 0)
+ return ret;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags;
+ req.nh.nlmsg_type = cmd;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0);
+
+ ret = config(&req);
+ if (ret < 0)
+ return ret;
+
+ return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
+}
+
+static int tc_qdisc_create_excl(struct bpf_tc_hook *hook)
+{
+ return tc_qdisc_modify(hook, RTM_NEWQDISC, NLM_F_CREATE | NLM_F_EXCL);
+}
+
+static int tc_qdisc_delete(struct bpf_tc_hook *hook)
+{
+ return tc_qdisc_modify(hook, RTM_DELQDISC, 0);
+}
+
+int bpf_tc_hook_create(struct bpf_tc_hook *hook)
+{
+ int ret;
+
+ if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
+ OPTS_GET(hook, ifindex, 0) <= 0)
+ return libbpf_err(-EINVAL);
+
+ ret = tc_qdisc_create_excl(hook);
+ return libbpf_err(ret);
+}
+
+static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts,
+ const bool flush);
+
+int bpf_tc_hook_destroy(struct bpf_tc_hook *hook)
+{
+ if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
+ OPTS_GET(hook, ifindex, 0) <= 0)
+ return libbpf_err(-EINVAL);
+
+ switch (OPTS_GET(hook, attach_point, 0)) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ return libbpf_err(__bpf_tc_detach(hook, NULL, true));
+ case BPF_TC_INGRESS | BPF_TC_EGRESS:
+ return libbpf_err(tc_qdisc_delete(hook));
+ case BPF_TC_CUSTOM:
+ return libbpf_err(-EOPNOTSUPP);
+ default:
+ return libbpf_err(-EINVAL);
+ }
+}
+
+struct bpf_cb_ctx {
+ struct bpf_tc_opts *opts;
+ bool processed;
+};
+
+static int __get_tc_info(void *cookie, struct tcmsg *tc, struct nlattr **tb,
+ bool unicast)
+{
+ struct nlattr *tbb[TCA_BPF_MAX + 1];
+ struct bpf_cb_ctx *info = cookie;
+
+ if (!info || !info->opts)
+ return -EINVAL;
+ if (unicast && info->processed)
+ return -EINVAL;
+ if (!tb[TCA_OPTIONS])
+ return NL_CONT;
+
+ libbpf_nla_parse_nested(tbb, TCA_BPF_MAX, tb[TCA_OPTIONS], NULL);
+ if (!tbb[TCA_BPF_ID])
+ return -EINVAL;
+
+ OPTS_SET(info->opts, prog_id, libbpf_nla_getattr_u32(tbb[TCA_BPF_ID]));
+ OPTS_SET(info->opts, handle, tc->tcm_handle);
+ OPTS_SET(info->opts, priority, TC_H_MAJ(tc->tcm_info) >> 16);
+
+ info->processed = true;
+ return unicast ? NL_NEXT : NL_DONE;
+}
+
+static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
+{
+ struct tcmsg *tc = NLMSG_DATA(nh);
+ struct nlattr *tb[TCA_MAX + 1];
+
+ libbpf_nla_parse(tb, TCA_MAX,
+ (struct nlattr *)((void *)tc + NLMSG_ALIGN(sizeof(*tc))),
+ NLMSG_PAYLOAD(nh, sizeof(*tc)), NULL);
+ if (!tb[TCA_KIND])
+ return NL_CONT;
+ return __get_tc_info(cookie, tc, tb, nh->nlmsg_flags & NLM_F_ECHO);
+}
+
+static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ char name[256];
+ int len, ret;
+
+ ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (ret < 0)
+ return ret;
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ ret = nlattr_add(req, TCA_BPF_FD, &fd, sizeof(fd));
+ if (ret < 0)
+ return ret;
+ len = snprintf(name, sizeof(name), "%s:[%u]", info.name, info.id);
+ if (len < 0)
return -errno;
+ if (len >= sizeof(name))
+ return -ENAMETOOLONG;
+ return nlattr_add(req, TCA_BPF_NAME, name, len + 1);
+}
+
+int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
+{
+ __u32 protocol, bpf_flags, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct bpf_cb_ctx info = {};
+ struct libbpf_nla_req req;
+ struct nlattr *nla;
+
+ if (!hook || !opts ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return libbpf_err(-EINVAL);
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || !prog_fd || prog_id)
+ return libbpf_err(-EINVAL);
+ if (priority > UINT16_MAX)
+ return libbpf_err(-EINVAL);
+ if (flags & ~BPF_TC_F_REPLACE)
+ return libbpf_err(-EINVAL);
+
+ flags = (flags & BPF_TC_F_REPLACE) ? NLM_F_REPLACE : NLM_F_EXCL;
+ protocol = ETH_P_ALL;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE |
+ NLM_F_ECHO | flags;
+ req.nh.nlmsg_type = RTM_NEWTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return libbpf_err(ret);
+ req.tc.tcm_parent = parent;
+
+ ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return libbpf_err(ret);
+ nla = nlattr_begin_nested(&req, TCA_OPTIONS);
+ if (!nla)
+ return libbpf_err(-EMSGSIZE);
+ ret = tc_add_fd_and_name(&req, prog_fd);
+ if (ret < 0)
+ return libbpf_err(ret);
+ bpf_flags = TCA_BPF_FLAG_ACT_DIRECT;
+ ret = nlattr_add(&req, TCA_BPF_FLAGS, &bpf_flags, sizeof(bpf_flags));
+ if (ret < 0)
+ return libbpf_err(ret);
+ nlattr_end_nested(&req, nla);
+
+ info.opts = opts;
+
+ ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info);
+ if (ret < 0)
+ return libbpf_err(ret);
+ if (!info.processed)
+ return libbpf_err(-ENOENT);
+ return ret;
+}
+
+static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts,
+ const bool flush)
+{
+ __u32 protocol = 0, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct libbpf_nla_req req;
+
+ if (!hook ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return -EINVAL;
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || flags || prog_fd || prog_id)
+ return -EINVAL;
+ if (priority > UINT16_MAX)
+ return -EINVAL;
+ if (!flush) {
+ if (!handle || !priority)
+ return -EINVAL;
+ protocol = ETH_P_ALL;
+ } else {
+ if (handle || priority)
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_type = RTM_DELTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ if (!flush) {
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+ }
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return ret;
+ req.tc.tcm_parent = parent;
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
- dump_link_nlmsg, cookie);
+ if (!flush) {
+ ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return ret;
+ }
+
+ return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
+}
+
+int bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts)
+{
+ int ret;
+
+ if (!opts)
+ return libbpf_err(-EINVAL);
+
+ ret = __bpf_tc_detach(hook, opts, false);
+ return libbpf_err(ret);
+}
+
+int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
+{
+ __u32 protocol, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct bpf_cb_ctx info = {};
+ struct libbpf_nla_req req;
+
+ if (!hook || !opts ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return libbpf_err(-EINVAL);
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || flags || prog_fd || prog_id ||
+ !handle || !priority)
+ return libbpf_err(-EINVAL);
+ if (priority > UINT16_MAX)
+ return libbpf_err(-EINVAL);
+
+ protocol = ETH_P_ALL;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = RTM_GETTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return libbpf_err(ret);
+ req.tc.tcm_parent = parent;
+
+ ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return libbpf_err(ret);
+
+ info.opts = opts;
+
+ ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info);
+ if (ret < 0)
+ return libbpf_err(ret);
+ if (!info.processed)
+ return libbpf_err(-ENOENT);
+ return ret;
}
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index b607fa9852b1..f57e77a6e40f 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -27,7 +27,7 @@ static struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
int totlen = NLA_ALIGN(nla->nla_len);
*remaining -= totlen;
- return (struct nlattr *) ((char *) nla + totlen);
+ return (struct nlattr *)((void *)nla + totlen);
}
static int nla_ok(const struct nlattr *nla, int remaining)
diff --git a/tools/lib/bpf/nlattr.h b/tools/lib/bpf/nlattr.h
index 6cc3ac91690f..4d15ae2ff812 100644
--- a/tools/lib/bpf/nlattr.h
+++ b/tools/lib/bpf/nlattr.h
@@ -10,7 +10,11 @@
#define __LIBBPF_NLATTR_H
#include <stdint.h>
+#include <string.h>
+#include <errno.h>
#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+
/* avoid multiple definition of netlink features */
#define __LINUX_NETLINK_H
@@ -49,6 +53,15 @@ struct libbpf_nla_policy {
uint16_t maxlen;
};
+struct libbpf_nla_req {
+ struct nlmsghdr nh;
+ union {
+ struct ifinfomsg ifinfo;
+ struct tcmsg tc;
+ };
+ char buf[128];
+};
+
/**
* @ingroup attr
* Iterate over a stream of attributes
@@ -68,7 +81,7 @@ struct libbpf_nla_policy {
*/
static inline void *libbpf_nla_data(const struct nlattr *nla)
{
- return (char *) nla + NLA_HDRLEN;
+ return (void *)nla + NLA_HDRLEN;
}
static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla)
@@ -103,4 +116,49 @@ int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype,
int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh);
+static inline struct nlattr *nla_data(struct nlattr *nla)
+{
+ return (struct nlattr *)((void *)nla + NLA_HDRLEN);
+}
+
+static inline struct nlattr *req_tail(struct libbpf_nla_req *req)
+{
+ return (struct nlattr *)((void *)req + NLMSG_ALIGN(req->nh.nlmsg_len));
+}
+
+static inline int nlattr_add(struct libbpf_nla_req *req, int type,
+ const void *data, int len)
+{
+ struct nlattr *nla;
+
+ if (NLMSG_ALIGN(req->nh.nlmsg_len) + NLA_ALIGN(NLA_HDRLEN + len) > sizeof(*req))
+ return -EMSGSIZE;
+ if (!!data != !!len)
+ return -EINVAL;
+
+ nla = req_tail(req);
+ nla->nla_type = type;
+ nla->nla_len = NLA_HDRLEN + len;
+ if (data)
+ memcpy(nla_data(nla), data, len);
+ req->nh.nlmsg_len = NLMSG_ALIGN(req->nh.nlmsg_len) + NLA_ALIGN(nla->nla_len);
+ return 0;
+}
+
+static inline struct nlattr *nlattr_begin_nested(struct libbpf_nla_req *req, int type)
+{
+ struct nlattr *tail;
+
+ tail = req_tail(req);
+ if (nlattr_add(req, type | NLA_F_NESTED, NULL, 0))
+ return NULL;
+ return tail;
+}
+
+static inline void nlattr_end_nested(struct libbpf_nla_req *req,
+ struct nlattr *tail)
+{
+ tail->nla_len = (void *)req_tail(req) - (void *)tail;
+}
+
#endif /* __LIBBPF_NLATTR_H */
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 1d80ad4e0de8..8bc117bcc7bc 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -69,23 +69,23 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
err = -errno;
pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
map_fd, err);
- return err;
+ return libbpf_err(err);
}
if (info.type != BPF_MAP_TYPE_RINGBUF) {
pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n",
map_fd);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
if (!tmp)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
rb->rings = tmp;
tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
if (!tmp)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
rb->events = tmp;
r = &rb->rings[rb->ring_cnt];
@@ -103,7 +103,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
err = -errno;
pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
map_fd, err);
- return err;
+ return libbpf_err(err);
}
r->consumer_pos = tmp;
@@ -118,7 +118,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
ringbuf_unmap_ring(rb, r);
pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
map_fd, err);
- return err;
+ return libbpf_err(err);
}
r->producer_pos = tmp;
r->data = tmp + rb->page_size;
@@ -133,7 +133,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
ringbuf_unmap_ring(rb, r);
pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
map_fd, err);
- return err;
+ return libbpf_err(err);
}
rb->ring_cnt++;
@@ -165,11 +165,11 @@ ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
int err;
if (!OPTS_VALID(opts, ring_buffer_opts))
- return NULL;
+ return errno = EINVAL, NULL;
rb = calloc(1, sizeof(*rb));
if (!rb)
- return NULL;
+ return errno = ENOMEM, NULL;
rb->page_size = getpagesize();
@@ -188,7 +188,7 @@ ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
err_out:
ring_buffer__free(rb);
- return NULL;
+ return errno = -err, NULL;
}
static inline int roundup_len(__u32 len)
@@ -260,7 +260,7 @@ int ring_buffer__consume(struct ring_buffer *rb)
err = ringbuf_process_ring(ring);
if (err < 0)
- return err;
+ return libbpf_err(err);
res += err;
}
if (res > INT_MAX)
@@ -279,7 +279,7 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
if (cnt < 0)
- return -errno;
+ return libbpf_err(-errno);
for (i = 0; i < cnt; i++) {
__u32 ring_id = rb->events[i].data.fd;
@@ -287,7 +287,7 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
err = ringbuf_process_ring(ring);
if (err < 0)
- return err;
+ return libbpf_err(err);
res += err;
}
if (res > INT_MAX)
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
new file mode 100644
index 000000000000..b22b50c1b173
--- /dev/null
+++ b/tools/lib/bpf/skel_internal.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2021 Facebook */
+#ifndef __SKEL_INTERNAL_H
+#define __SKEL_INTERNAL_H
+
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/mman.h>
+
+/* This file is a base header for auto-generated *.lskel.h files.
+ * Its contents will change and may become part of auto-generation in the future.
+ *
+ * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
+ * and will change from one version of libbpf to another and features
+ * requested during loader program generation.
+ */
+struct bpf_map_desc {
+ union {
+ /* input for the loader prog */
+ struct {
+ __aligned_u64 initial_value;
+ __u32 max_entries;
+ };
+ /* output of the loader prog */
+ struct {
+ int map_fd;
+ };
+ };
+};
+struct bpf_prog_desc {
+ int prog_fd;
+};
+
+struct bpf_loader_ctx {
+ size_t sz;
+ __u32 log_level;
+ __u32 log_size;
+ __u64 log_buf;
+};
+
+struct bpf_load_and_run_opts {
+ struct bpf_loader_ctx *ctx;
+ const void *data;
+ const void *insns;
+ __u32 data_sz;
+ __u32 insns_sz;
+ const char *errstr;
+};
+
+static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+ return syscall(__NR_bpf, cmd, attr, size);
+}
+
+static inline int skel_closenz(int fd)
+{
+ if (fd > 0)
+ return close(fd);
+ return -EINVAL;
+}
+
+static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
+{
+ int map_fd = -1, prog_fd = -1, key = 0, err;
+ union bpf_attr attr;
+
+ map_fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "__loader.map", 4,
+ opts->data_sz, 1, 0);
+ if (map_fd < 0) {
+ opts->errstr = "failed to create loader map";
+ err = -errno;
+ goto out;
+ }
+
+ err = bpf_map_update_elem(map_fd, &key, opts->data, 0);
+ if (err < 0) {
+ opts->errstr = "failed to update loader map";
+ err = -errno;
+ goto out;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SYSCALL;
+ attr.insns = (long) opts->insns;
+ attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
+ attr.license = (long) "Dual BSD/GPL";
+ memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
+ attr.fd_array = (long) &map_fd;
+ attr.log_level = opts->ctx->log_level;
+ attr.log_size = opts->ctx->log_size;
+ attr.log_buf = opts->ctx->log_buf;
+ attr.prog_flags = BPF_F_SLEEPABLE;
+ prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ if (prog_fd < 0) {
+ opts->errstr = "failed to load loader prog";
+ err = -errno;
+ goto out;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.test.prog_fd = prog_fd;
+ attr.test.ctx_in = (long) opts->ctx;
+ attr.test.ctx_size_in = opts->ctx->sz;
+ err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr));
+ if (err < 0 || (int)attr.test.retval < 0) {
+ opts->errstr = "failed to execute loader prog";
+ if (err < 0)
+ err = -errno;
+ else
+ err = (int)attr.test.retval;
+ goto out;
+ }
+ err = 0;
+out:
+ if (map_fd >= 0)
+ close(map_fd);
+ if (prog_fd >= 0)
+ close(prog_fd);
+ return err;
+}
+
+#endif
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 6061431ee04c..e9b619aa0cdf 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -1094,7 +1094,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
goto out_put_ctx;
}
if (xsk->fd == umem->fd)
- umem->rx_ring_setup_done = true;
+ umem->tx_ring_setup_done = true;
}
err = xsk_get_mmap_offsets(xsk->fd, &off);
diff --git a/tools/lib/traceevent/plugins/plugin_kvm.c b/tools/lib/traceevent/plugins/plugin_kvm.c
index 51ceeb9147eb..9ce7b4b68e3f 100644
--- a/tools/lib/traceevent/plugins/plugin_kvm.c
+++ b/tools/lib/traceevent/plugins/plugin_kvm.c
@@ -366,7 +366,7 @@ union kvm_mmu_page_role {
unsigned direct:1;
unsigned access:3;
unsigned invalid:1;
- unsigned nxe:1;
+ unsigned efer_nx:1;
unsigned cr0_wp:1;
unsigned smep_and_not_wp:1;
unsigned smap_and_not_wp:1;
@@ -403,7 +403,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
access_str[role.access],
role.invalid ? " invalid" : "",
role.cr4_pae ? "" : "!",
- role.nxe ? "" : "!",
+ role.efer_nx ? "" : "!",
role.cr0_wp ? "" : "!",
role.smep_and_not_wp ? " smep" : "",
role.smap_and_not_wp ? " smap" : "",
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 24295d39713b..bc821056aba9 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
sec = find_section_by_name(elf, ".altinstructions");
if (!sec) {
sec = elf_create_section(elf, ".altinstructions",
- SHF_WRITE, size, 0);
+ SHF_ALLOC, size, 0);
if (!sec) {
WARN_ELF("elf_create_section");
@@ -747,6 +747,10 @@ int arch_rewrite_retpolines(struct objtool_file *file)
list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
+ if (insn->type != INSN_JUMP_DYNAMIC &&
+ insn->type != INSN_CALL_DYNAMIC)
+ continue;
+
if (!strcmp(insn->sec->name, ".text.__x86.indirect_thunk"))
continue;
diff --git a/tools/objtool/arch/x86/include/arch/special.h b/tools/objtool/arch/x86/include/arch/special.h
index 14271cca0c74..f2918f789a0a 100644
--- a/tools/objtool/arch/x86/include/arch/special.h
+++ b/tools/objtool/arch/x86/include/arch/special.h
@@ -9,6 +9,7 @@
#define JUMP_ENTRY_SIZE 16
#define JUMP_ORIG_OFFSET 0
#define JUMP_NEW_OFFSET 4
+#define JUMP_KEY_OFFSET 8
#define ALT_ENTRY_SIZE 12
#define ALT_ORIG_OFFSET 0
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9ed1a4cd00dc..e5947fbb9e7a 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1225,15 +1225,41 @@ static int handle_jump_alt(struct objtool_file *file,
struct instruction *orig_insn,
struct instruction **new_insn)
{
- if (orig_insn->type == INSN_NOP)
- return 0;
+ if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
+ orig_insn->type != INSN_NOP) {
- if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
WARN_FUNC("unsupported instruction at jump label",
orig_insn->sec, orig_insn->offset);
return -1;
}
+ if (special_alt->key_addend & 2) {
+ struct reloc *reloc = insn_reloc(file, orig_insn);
+
+ if (reloc) {
+ reloc->type = R_NONE;
+ elf_write_reloc(file->elf, reloc);
+ }
+ elf_write_insn(file->elf, orig_insn->sec,
+ orig_insn->offset, orig_insn->len,
+ arch_nop_insn(orig_insn->len));
+ orig_insn->type = INSN_NOP;
+ }
+
+ if (orig_insn->type == INSN_NOP) {
+ if (orig_insn->len == 2)
+ file->jl_nop_short++;
+ else
+ file->jl_nop_long++;
+
+ return 0;
+ }
+
+ if (orig_insn->len == 2)
+ file->jl_short++;
+ else
+ file->jl_long++;
+
*new_insn = list_next_entry(orig_insn, list);
return 0;
}
@@ -1314,6 +1340,12 @@ static int add_special_section_alts(struct objtool_file *file)
free(special_alt);
}
+ if (stats) {
+ printf("jl\\\tNOP\tJMP\n");
+ printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
+ printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
+ }
+
out:
return ret;
}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 743c2e9d0f56..8676c7598728 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -9,6 +9,7 @@
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/mman.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
@@ -27,21 +28,27 @@ static inline u32 str_hash(const char *str)
return jhash(str, strlen(str), 0);
}
-static inline int elf_hash_bits(void)
-{
- return vmlinux ? ELF_HASH_BITS : 16;
-}
+#define __elf_table(name) (elf->name##_hash)
+#define __elf_bits(name) (elf->name##_bits)
-#define elf_hash_add(hashtable, node, key) \
- hlist_add_head(node, &hashtable[hash_min(key, elf_hash_bits())])
+#define elf_hash_add(name, node, key) \
+ hlist_add_head(node, &__elf_table(name)[hash_min(key, __elf_bits(name))])
-static void elf_hash_init(struct hlist_head *table)
-{
- __hash_init(table, 1U << elf_hash_bits());
-}
+#define elf_hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &__elf_table(name)[hash_min(key, __elf_bits(name))], member)
-#define elf_hash_for_each_possible(name, obj, member, key) \
- hlist_for_each_entry(obj, &name[hash_min(key, elf_hash_bits())], member)
+#define elf_alloc_hash(name, size) \
+({ \
+ __elf_bits(name) = max(10, ilog2(size)); \
+ __elf_table(name) = mmap(NULL, sizeof(struct hlist_head) << __elf_bits(name), \
+ PROT_READ|PROT_WRITE, \
+ MAP_PRIVATE|MAP_ANON, -1, 0); \
+ if (__elf_table(name) == (void *)-1L) { \
+ WARN("mmap fail " #name); \
+ __elf_table(name) = NULL; \
+ } \
+ __elf_table(name); \
+})
static bool symbol_to_offset(struct rb_node *a, const struct rb_node *b)
{
@@ -80,9 +87,10 @@ struct section *find_section_by_name(const struct elf *elf, const char *name)
{
struct section *sec;
- elf_hash_for_each_possible(elf->section_name_hash, sec, name_hash, str_hash(name))
+ elf_hash_for_each_possible(section_name, sec, name_hash, str_hash(name)) {
if (!strcmp(sec->name, name))
return sec;
+ }
return NULL;
}
@@ -92,9 +100,10 @@ static struct section *find_section_by_index(struct elf *elf,
{
struct section *sec;
- elf_hash_for_each_possible(elf->section_hash, sec, hash, idx)
+ elf_hash_for_each_possible(section, sec, hash, idx) {
if (sec->idx == idx)
return sec;
+ }
return NULL;
}
@@ -103,9 +112,10 @@ static struct symbol *find_symbol_by_index(struct elf *elf, unsigned int idx)
{
struct symbol *sym;
- elf_hash_for_each_possible(elf->symbol_hash, sym, hash, idx)
+ elf_hash_for_each_possible(symbol, sym, hash, idx) {
if (sym->idx == idx)
return sym;
+ }
return NULL;
}
@@ -170,9 +180,10 @@ struct symbol *find_symbol_by_name(const struct elf *elf, const char *name)
{
struct symbol *sym;
- elf_hash_for_each_possible(elf->symbol_name_hash, sym, name_hash, str_hash(name))
+ elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) {
if (!strcmp(sym->name, name))
return sym;
+ }
return NULL;
}
@@ -189,8 +200,8 @@ struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *se
sec = sec->reloc;
for_offset_range(o, offset, offset + len) {
- elf_hash_for_each_possible(elf->reloc_hash, reloc, hash,
- sec_offset_hash(sec, o)) {
+ elf_hash_for_each_possible(reloc, reloc, hash,
+ sec_offset_hash(sec, o)) {
if (reloc->sec != sec)
continue;
@@ -228,6 +239,10 @@ static int read_sections(struct elf *elf)
return -1;
}
+ if (!elf_alloc_hash(section, sections_nr) ||
+ !elf_alloc_hash(section_name, sections_nr))
+ return -1;
+
for (i = 0; i < sections_nr; i++) {
sec = malloc(sizeof(*sec));
if (!sec) {
@@ -273,13 +288,18 @@ static int read_sections(struct elf *elf)
}
sec->len = sec->sh.sh_size;
+ if (sec->sh.sh_flags & SHF_EXECINSTR)
+ elf->text_size += sec->len;
+
list_add_tail(&sec->list, &elf->sections);
- elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
- elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
+ elf_hash_add(section, &sec->hash, sec->idx);
+ elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
}
- if (stats)
+ if (stats) {
printf("nr_sections: %lu\n", (unsigned long)sections_nr);
+ printf("section_bits: %d\n", elf->section_bits);
+ }
/* sanity check, one more call to elf_nextscn() should return NULL */
if (elf_nextscn(elf->elf, s)) {
@@ -308,8 +328,8 @@ static void elf_add_symbol(struct elf *elf, struct symbol *sym)
else
entry = &sym->sec->symbol_list;
list_add(&sym->list, entry);
- elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
- elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+ elf_hash_add(symbol, &sym->hash, sym->idx);
+ elf_hash_add(symbol_name, &sym->name_hash, str_hash(sym->name));
/*
* Don't store empty STT_NOTYPE symbols in the rbtree. They
@@ -329,19 +349,25 @@ static int read_symbols(struct elf *elf)
Elf32_Word shndx;
symtab = find_section_by_name(elf, ".symtab");
- if (!symtab) {
+ if (symtab) {
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ if (symtab_shndx)
+ shndx_data = symtab_shndx->data;
+
+ symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
+ } else {
/*
* A missing symbol table is actually possible if it's an empty
- * .o file. This can happen for thunk_64.o.
+ * .o file. This can happen for thunk_64.o. Make sure to at
+ * least allocate the symbol hash tables so we can do symbol
+ * lookups without crashing.
*/
- return 0;
+ symbols_nr = 0;
}
- symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
- if (symtab_shndx)
- shndx_data = symtab_shndx->data;
-
- symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
+ if (!elf_alloc_hash(symbol, symbols_nr) ||
+ !elf_alloc_hash(symbol_name, symbols_nr))
+ return -1;
for (i = 0; i < symbols_nr; i++) {
sym = malloc(sizeof(*sym));
@@ -389,8 +415,10 @@ static int read_symbols(struct elf *elf)
elf_add_symbol(elf, sym);
}
- if (stats)
+ if (stats) {
printf("nr_symbols: %lu\n", (unsigned long)symbols_nr);
+ printf("symbol_bits: %d\n", elf->symbol_bits);
+ }
/* Create parent/child links for any cold subfunctions */
list_for_each_entry(sec, &elf->sections, list) {
@@ -479,7 +507,7 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
reloc->addend = addend;
list_add_tail(&reloc->list, &sec->reloc->reloc_list);
- elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
+ elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
sec->reloc->changed = true;
@@ -556,6 +584,9 @@ static int read_relocs(struct elf *elf)
unsigned int symndx;
unsigned long nr_reloc, max_reloc = 0, tot_reloc = 0;
+ if (!elf_alloc_hash(reloc, elf->text_size / 16))
+ return -1;
+
list_for_each_entry(sec, &elf->sections, list) {
if ((sec->sh.sh_type != SHT_RELA) &&
(sec->sh.sh_type != SHT_REL))
@@ -600,7 +631,7 @@ static int read_relocs(struct elf *elf)
}
list_add_tail(&reloc->list, &sec->reloc_list);
- elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
+ elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
nr_reloc++;
}
@@ -611,6 +642,7 @@ static int read_relocs(struct elf *elf)
if (stats) {
printf("max_reloc: %lu\n", max_reloc);
printf("tot_reloc: %lu\n", tot_reloc);
+ printf("reloc_bits: %d\n", elf->reloc_bits);
}
return 0;
@@ -632,12 +664,6 @@ struct elf *elf_open_read(const char *name, int flags)
INIT_LIST_HEAD(&elf->sections);
- elf_hash_init(elf->symbol_hash);
- elf_hash_init(elf->symbol_name_hash);
- elf_hash_init(elf->section_hash);
- elf_hash_init(elf->section_name_hash);
- elf_hash_init(elf->reloc_hash);
-
elf->fd = open(name, flags);
if (elf->fd == -1) {
fprintf(stderr, "objtool: Can't open '%s': %s\n",
@@ -717,7 +743,7 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
{
- struct section *symtab;
+ struct section *symtab, *symtab_shndx;
struct symbol *sym;
Elf_Data *data;
Elf_Scn *s;
@@ -769,6 +795,29 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
symtab->len += data->d_size;
symtab->changed = true;
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ if (symtab_shndx) {
+ s = elf_getscn(elf->elf, symtab_shndx->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return NULL;
+ }
+
+ data = elf_newdata(s);
+ if (!data) {
+ WARN_ELF("elf_newdata");
+ return NULL;
+ }
+
+ data->d_buf = &sym->sym.st_size; /* conveniently 0 */
+ data->d_size = sizeof(Elf32_Word);
+ data->d_align = 4;
+ data->d_type = ELF_T_WORD;
+
+ symtab_shndx->len += 4;
+ symtab_shndx->changed = true;
+ }
+
sym->sec = find_section_by_index(elf, 0);
elf_add_symbol(elf, sym);
@@ -851,8 +900,8 @@ struct section *elf_create_section(struct elf *elf, const char *name,
return NULL;
list_add_tail(&sec->list, &elf->sections);
- elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
- elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
+ elf_hash_add(section, &sec->hash, sec->idx);
+ elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
elf->changed = true;
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 45e5ede363b0..e34395047530 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -83,12 +83,20 @@ struct elf {
int fd;
bool changed;
char *name;
+ unsigned int text_size;
struct list_head sections;
- DECLARE_HASHTABLE(symbol_hash, ELF_HASH_BITS);
- DECLARE_HASHTABLE(symbol_name_hash, ELF_HASH_BITS);
- DECLARE_HASHTABLE(section_hash, ELF_HASH_BITS);
- DECLARE_HASHTABLE(section_name_hash, ELF_HASH_BITS);
- DECLARE_HASHTABLE(reloc_hash, ELF_HASH_BITS);
+
+ int symbol_bits;
+ int symbol_name_bits;
+ int section_bits;
+ int section_name_bits;
+ int reloc_bits;
+
+ struct hlist_head *symbol_hash;
+ struct hlist_head *symbol_name_hash;
+ struct hlist_head *section_hash;
+ struct hlist_head *section_name_hash;
+ struct hlist_head *reloc_hash;
};
#define OFFSET_STRIDE_BITS 4
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
index e4084afb2304..24fa83634de4 100644
--- a/tools/objtool/include/objtool/objtool.h
+++ b/tools/objtool/include/objtool/objtool.h
@@ -22,6 +22,9 @@ struct objtool_file {
struct list_head static_call_list;
struct list_head mcount_loc_list;
bool ignore_unreachables, c_file, hints, rodata;
+
+ unsigned long jl_short, jl_long;
+ unsigned long jl_nop_short, jl_nop_long;
};
struct objtool_file *objtool_open_read(const char *_objname);
diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
index 8a09f4e9d480..dc4721e19002 100644
--- a/tools/objtool/include/objtool/special.h
+++ b/tools/objtool/include/objtool/special.h
@@ -27,6 +27,7 @@ struct special_alt {
unsigned long new_off;
unsigned int orig_len, new_len; /* group only */
+ u8 key_addend;
};
int special_get_alts(struct elf *elf, struct list_head *alts);
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 07b21cfabf5c..bc925cf19e2d 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -23,6 +23,7 @@ struct special_entry {
unsigned char size, orig, new;
unsigned char orig_len, new_len; /* group only */
unsigned char feature; /* ALTERNATIVE macro CPU feature */
+ unsigned char key; /* jump_label key */
};
struct special_entry entries[] = {
@@ -42,6 +43,7 @@ struct special_entry entries[] = {
.size = JUMP_ENTRY_SIZE,
.orig = JUMP_ORIG_OFFSET,
.new = JUMP_NEW_OFFSET,
+ .key = JUMP_KEY_OFFSET,
},
{
.sec = "__ex_table",
@@ -122,6 +124,18 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
alt->new_off -= 0x7ffffff0;
}
+ if (entry->key) {
+ struct reloc *key_reloc;
+
+ key_reloc = find_reloc_by_dest(elf, sec, offset + entry->key);
+ if (!key_reloc) {
+ WARN_FUNC("can't find key reloc",
+ sec, offset + entry->key);
+ return -1;
+ }
+ alt->key_addend = key_reloc->addend;
+ }
+
return 0;
}
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index 1dcec73c910c..bcf3eca5afbe 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -108,9 +108,9 @@ displayed as follows:
perf script --itrace=ibxwpe -F+flags
-The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
-system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
-in transaction, respectively.
+The flags are "bcrosyiABExgh" which stand for branch, call, return, conditional,
+system, asynchronous, interrupt, transaction abort, trace begin, trace end,
+in transaction, VM-entry, and VM-exit respectively.
perf script also supports higher level ways to dump instruction traces:
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 5b8b61075039..48a5f5b26dd4 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -183,14 +183,15 @@ OPTIONS
At this point usage is displayed, and perf-script exits.
The flags field is synthesized and may have a value when Instruction
- Trace decoding. The flags are "bcrosyiABEx" which stand for branch,
+ Trace decoding. The flags are "bcrosyiABExgh" which stand for branch,
call, return, conditional, system, asynchronous, interrupt,
- transaction abort, trace begin, trace end, and in transaction,
+ transaction abort, trace begin, trace end, in transaction, VM-Entry, and VM-Exit
respectively. Known combinations of flags are printed more nicely e.g.
"call" for "bc", "return" for "br", "jcc" for "bo", "jmp" for "b",
"int" for "bci", "iret" for "bri", "syscall" for "bcs", "sysret" for "brs",
"async" for "by", "hw int" for "bcyi", "tx abrt" for "bA", "tr strt" for "bB",
- "tr end" for "bE". However the "x" flag will be display separately in those
+ "tr end" for "bE", "vmentry" for "bcg", "vmexit" for "bch".
+ However the "x" flag will be displayed separately in those
cases e.g. "jcc (x)" for a condition branch within a transaction.
The callindent field is synthesized and may have a value when
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 406a9519145e..73df23dd664c 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -90,7 +90,6 @@ endif
ifeq ($(ARCH),mips)
NO_PERF_REGS := 0
CFLAGS += -I$(OUTPUT)arch/mips/include/generated
- CFLAGS += -I../../arch/mips/include/uapi -I../../arch/mips/include/generated/uapi
LIBUNWIND_LIBS = -lunwind -lunwind-mips
endif
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 9974f5f8e49b..9cd1c34f31b5 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -357,7 +357,7 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
-443 n64 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 2e68fbb57cc6..8f052ff4058c 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -522,7 +522,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 7e4a2aba366d..0690263df1dd 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index ecd551b08d05..ce18119ea0d0 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -364,7 +364,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 87f5b1a4a7fa..833405c27dae 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -80,6 +80,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
if (!perf_header__has_feat(&session->header, HEADER_BUILD_ID))
with_hits = true;
+ if (zstd_init(&(session->zstd_data), 0) < 0)
+ pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
+
/*
* in pipe-mode, the only way to get the buildids is to parse
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3337b5f93336..84803abeb942 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -2714,6 +2714,12 @@ int cmd_record(int argc, const char **argv)
rec->no_buildid = true;
}
+ if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
+ pr_err("Kernel has no cgroup sampling support.\n");
+ err = -EINVAL;
+ goto out_opts;
+ }
+
if (rec->opts.kcore)
rec->data.is_dir = true;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 5a830ae09418..f9f74a514315 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -572,7 +572,8 @@ static int enable_counters(void)
* - we have initial delay configured
*/
if (!target__none(&target) || stat_config.initial_delay) {
- evlist__enable(evsel_list);
+ if (!all_counters_use_bpf)
+ evlist__enable(evsel_list);
if (stat_config.initial_delay > 0)
pr_info(EVLIST_ENABLED_MSG);
}
@@ -581,13 +582,19 @@ static int enable_counters(void)
static void disable_counters(void)
{
+ struct evsel *counter;
+
/*
* If we don't have tracee (attaching to task or cpu), counters may
* still be running. To get accurate group ratios, we must stop groups
* from counting before reading their constituent counters.
*/
- if (!target__none(&target))
- evlist__disable(evsel_list);
+ if (!target__none(&target)) {
+ evlist__for_each_entry(evsel_list, counter)
+ bpf_counter__disable(counter);
+ if (!all_counters_use_bpf)
+ evlist__disable(evsel_list);
+ }
}
static volatile int workload_exec_errno;
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index dd8ff287e930..c783558332b8 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -39,6 +39,7 @@ arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
arch/arm/include/uapi/asm/perf_regs.h
arch/arm64/include/uapi/asm/perf_regs.h
+arch/mips/include/uapi/asm/perf_regs.h
arch/powerpc/include/uapi/asm/perf_regs.h
arch/s390/include/uapi/asm/perf_regs.h
arch/x86/include/uapi/asm/perf_regs.h
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 20cb91ef06ff..2f6b67189b42 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -443,6 +443,8 @@ int main(int argc, const char **argv)
const char *cmd;
char sbuf[STRERR_BUFSIZE];
+ perf_debug_setup();
+
/* libsubcmd init */
exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
pager_init(PERF_PAGER_ENVIRONMENT);
@@ -531,8 +533,6 @@ int main(int argc, const char **argv)
*/
pthread__block_sigwinch();
- perf_debug_setup();
-
while (1) {
static int done_help;
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
index 616f29098c71..605be14f441c 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
@@ -1,46 +1,56 @@
[
{
- "EventCode": "1003C",
+ "EventCode": "0x1003C",
"EventName": "PM_EXEC_STALL_DMISS_L2L3",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
},
{
- "EventCode": "34056",
+ "EventCode": "0x1E054",
+ "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
+ },
+ {
+ "EventCode": "0x34054",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
+ },
+ {
+ "EventCode": "0x34056",
"EventName": "PM_EXEC_STALL_LOAD_FINISH",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
},
{
- "EventCode": "3006C",
+ "EventCode": "0x3006C",
"EventName": "PM_RUN_CYC_SMT2_MODE",
"BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
},
{
- "EventCode": "300F4",
+ "EventCode": "0x300F4",
"EventName": "PM_RUN_INST_CMPL_CONC",
"BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
},
{
- "EventCode": "4C016",
+ "EventCode": "0x4C016",
"EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
},
{
- "EventCode": "4D014",
+ "EventCode": "0x4D014",
"EventName": "PM_EXEC_STALL_LOAD",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
},
{
- "EventCode": "4D016",
+ "EventCode": "0x4D016",
"EventName": "PM_EXEC_STALL_PTESYNC",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
},
{
- "EventCode": "401EA",
+ "EventCode": "0x401EA",
"EventName": "PM_THRESH_EXC_128",
"BriefDescription": "Threshold counter exceeded a value of 128."
},
{
- "EventCode": "400F6",
+ "EventCode": "0x400F6",
"EventName": "PM_BR_MPRED_CMPL",
"BriefDescription": "A mispredicted branch completed. Includes direction and target."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
index 703cd431ae5b..54acb55e2c8c 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
@@ -1,6 +1,6 @@
[
{
- "EventCode": "4016E",
+ "EventCode": "0x4016E",
"EventName": "PM_THRESH_NOT_MET",
"BriefDescription": "Threshold counter did not meet threshold."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
index eac8609dcc90..558f9530f54e 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
@@ -1,216 +1,246 @@
[
{
- "EventCode": "10004",
+ "EventCode": "0x10004",
"EventName": "PM_EXEC_STALL_TRANSLATION",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
},
{
- "EventCode": "10010",
+ "EventCode": "0x10006",
+ "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
+ },
+ {
+ "EventCode": "0x10010",
"EventName": "PM_PMC4_OVERFLOW",
"BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
},
{
- "EventCode": "10020",
+ "EventCode": "0x10020",
"EventName": "PM_PMC4_REWIND",
"BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
},
{
- "EventCode": "10038",
+ "EventCode": "0x10038",
"EventName": "PM_DISP_STALL_TRANSLATION",
"BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
},
{
- "EventCode": "1003A",
+ "EventCode": "0x1003A",
"EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
},
{
- "EventCode": "1E050",
+ "EventCode": "0x1D05E",
+ "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
+ },
+ {
+ "EventCode": "0x1E050",
"EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
},
{
- "EventCode": "1F054",
+ "EventCode": "0x1F054",
"EventName": "PM_DTLB_HIT",
"BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT."
},
{
- "EventCode": "101E8",
+ "EventCode": "0x10064",
+ "EventName": "PM_DISP_STALL_IC_L2",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+ },
+ {
+ "EventCode": "0x101E8",
"EventName": "PM_THRESH_EXC_256",
"BriefDescription": "Threshold counter exceeded a count of 256."
},
{
- "EventCode": "101EC",
+ "EventCode": "0x101EC",
"EventName": "PM_THRESH_MET",
"BriefDescription": "Threshold exceeded."
},
{
- "EventCode": "100F2",
+ "EventCode": "0x100F2",
"EventName": "PM_1PLUS_PPC_CMPL",
"BriefDescription": "Cycles in which at least one instruction is completed by this thread."
},
{
- "EventCode": "100F6",
+ "EventCode": "0x100F6",
"EventName": "PM_IERAT_MISS",
"BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
},
{
- "EventCode": "100F8",
+ "EventCode": "0x100F8",
"EventName": "PM_DISP_STALL_CYC",
"BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
},
{
- "EventCode": "20114",
+ "EventCode": "0x20006",
+ "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+ },
+ {
+ "EventCode": "0x20114",
"EventName": "PM_MRK_L2_RC_DISP",
"BriefDescription": "Marked instruction RC dispatched in L2."
},
{
- "EventCode": "2C010",
+ "EventCode": "0x2C010",
"EventName": "PM_EXEC_STALL_LSU",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
},
{
- "EventCode": "2C016",
+ "EventCode": "0x2C016",
"EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
"BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
},
{
- "EventCode": "2C01E",
+ "EventCode": "0x2C01E",
"EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
},
{
- "EventCode": "2D01A",
+ "EventCode": "0x2D01A",
"EventName": "PM_DISP_STALL_IC_MISS",
"BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
},
{
- "EventCode": "2D01C",
- "EventName": "PM_CMPL_STALL_STCX",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
- },
- {
- "EventCode": "2E018",
+ "EventCode": "0x2E018",
"EventName": "PM_DISP_STALL_FETCH",
"BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
},
{
- "EventCode": "2E01A",
+ "EventCode": "0x2E01A",
"EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
},
{
- "EventCode": "2C142",
+ "EventCode": "0x2C142",
"EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
"BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "24050",
+ "EventCode": "0x24050",
"EventName": "PM_IOPS_DISP",
"BriefDescription": "Internal Operations dispatched. PM_IOPS_DISP / PM_INST_DISP will show the average number of internal operations per PowerPC instruction."
},
{
- "EventCode": "2405E",
+ "EventCode": "0x2405E",
"EventName": "PM_ISSUE_CANCEL",
"BriefDescription": "An instruction issued and the issue was later cancelled. Only one cancel per PowerPC instruction."
},
{
- "EventCode": "200FA",
+ "EventCode": "0x200FA",
"EventName": "PM_BR_TAKEN_CMPL",
"BriefDescription": "Branch Taken instruction completed."
},
{
- "EventCode": "30012",
+ "EventCode": "0x30004",
+ "EventName": "PM_DISP_STALL_FLUSH",
+ "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+ },
+ {
+ "EventCode": "0x3000A",
+ "EventName": "PM_DISP_STALL_ITLB_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
+ },
+ {
+ "EventCode": "0x30012",
"EventName": "PM_FLUSH_COMPLETION",
"BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush."
},
{
- "EventCode": "30014",
+ "EventCode": "0x30014",
"EventName": "PM_EXEC_STALL_STORE",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
},
{
- "EventCode": "30018",
+ "EventCode": "0x30018",
"EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
},
{
- "EventCode": "30026",
+ "EventCode": "0x30026",
"EventName": "PM_EXEC_STALL_STORE_MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
},
{
- "EventCode": "3012A",
+ "EventCode": "0x3012A",
"EventName": "PM_MRK_L2_RC_DONE",
"BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
},
{
- "EventCode": "3F046",
+ "EventCode": "0x3F046",
"EventName": "PM_ITLB_HIT_1G",
"BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "34058",
+ "EventCode": "0x34058",
"EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
"BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
},
{
- "EventCode": "3D05C",
+ "EventCode": "0x3D05C",
"EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
},
{
- "EventCode": "3E052",
+ "EventCode": "0x3E052",
"EventName": "PM_DISP_STALL_IC_L3",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
},
{
- "EventCode": "3E054",
+ "EventCode": "0x3E054",
"EventName": "PM_LD_MISS_L1",
"BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
},
{
- "EventCode": "301EA",
+ "EventCode": "0x301EA",
"EventName": "PM_THRESH_EXC_1024",
"BriefDescription": "Threshold counter exceeded a value of 1024."
},
{
- "EventCode": "300FA",
+ "EventCode": "0x300FA",
"EventName": "PM_INST_FROM_L3MISS",
"BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
},
{
- "EventCode": "40006",
+ "EventCode": "0x40006",
"EventName": "PM_ISSUE_KILL",
"BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
},
{
- "EventCode": "40116",
+ "EventCode": "0x40116",
"EventName": "PM_MRK_LARX_FIN",
"BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "4C010",
+ "EventCode": "0x4C010",
"EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
},
{
- "EventCode": "4D01E",
+ "EventCode": "0x4D01E",
"EventName": "PM_DISP_STALL_BR_MPRED",
"BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
},
{
- "EventCode": "4E010",
+ "EventCode": "0x4E010",
"EventName": "PM_DISP_STALL_IC_L3MISS",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
},
{
- "EventCode": "4E01A",
+ "EventCode": "0x4E01A",
"EventName": "PM_DISP_STALL_HELD_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
},
{
- "EventCode": "44056",
+ "EventCode": "0x4003C",
+ "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+ },
+ {
+ "EventCode": "0x44056",
"EventName": "PM_VECTOR_ST_CMPL",
"BriefDescription": "Vector store instructions completed."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/locks.json b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
index 016d8de0e14a..b5a0d6521963 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/locks.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
@@ -1,11 +1,11 @@
[
{
- "EventCode": "1E058",
+ "EventCode": "0x1E058",
"EventName": "PM_STCX_FAIL_FIN",
"BriefDescription": "Conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "4E050",
+ "EventCode": "0x4E050",
"EventName": "PM_STCX_PASS_FIN",
"BriefDescription": "Conditional store instruction (STCX) passed. LARX and STCX are instructions used to acquire a lock."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
index 93a5a5910648..58b5dfe3a273 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
@@ -1,146 +1,141 @@
[
{
- "EventCode": "1002C",
+ "EventCode": "0x1002C",
"EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
"BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
},
{
- "EventCode": "10132",
+ "EventCode": "0x10132",
"EventName": "PM_MRK_INST_ISSUED",
"BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction."
},
{
- "EventCode": "101E0",
+ "EventCode": "0x101E0",
"EventName": "PM_MRK_INST_DISP",
"BriefDescription": "The thread has dispatched a randomly sampled marked instruction."
},
{
- "EventCode": "101E2",
+ "EventCode": "0x101E2",
"EventName": "PM_MRK_BR_TAKEN_CMPL",
"BriefDescription": "Marked Branch Taken instruction completed."
},
{
- "EventCode": "20112",
+ "EventCode": "0x20112",
"EventName": "PM_MRK_NTF_FIN",
"BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
},
{
- "EventCode": "2C01C",
+ "EventCode": "0x2C01C",
"EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
},
{
- "EventCode": "20138",
+ "EventCode": "0x20138",
"EventName": "PM_MRK_ST_NEST",
"BriefDescription": "A store has been sampled/marked and is at the point of execution where it has completed in the core and can no longer be flushed. At this point the store is sent to the L2."
},
{
- "EventCode": "2013A",
+ "EventCode": "0x2013A",
"EventName": "PM_MRK_BRU_FIN",
"BriefDescription": "Marked Branch instruction finished."
},
{
- "EventCode": "2C144",
+ "EventCode": "0x2C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[15:27]."
},
{
- "EventCode": "24156",
+ "EventCode": "0x24156",
"EventName": "PM_MRK_STCX_FIN",
"BriefDescription": "Marked conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "24158",
+ "EventCode": "0x24158",
"EventName": "PM_MRK_INST",
"BriefDescription": "An instruction was marked. Includes both Random Instruction Sampling (RIS) at decode time and Random Event Sampling (RES) at the time the configured event happens."
},
{
- "EventCode": "2415C",
+ "EventCode": "0x2415C",
"EventName": "PM_MRK_BR_CMPL",
"BriefDescription": "A marked branch completed. All branches are included."
},
{
- "EventCode": "200FD",
+ "EventCode": "0x200FD",
"EventName": "PM_L1_ICACHE_MISS",
"BriefDescription": "Demand iCache Miss."
},
{
- "EventCode": "30130",
+ "EventCode": "0x30130",
"EventName": "PM_MRK_INST_FIN",
"BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
},
{
- "EventCode": "34146",
+ "EventCode": "0x34146",
"EventName": "PM_MRK_LD_CMPL",
"BriefDescription": "Marked loads completed."
},
{
- "EventCode": "3E158",
+ "EventCode": "0x3E158",
"EventName": "PM_MRK_STCX_FAIL",
"BriefDescription": "Marked conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "3E15A",
+ "EventCode": "0x3E15A",
"EventName": "PM_MRK_ST_FIN",
"BriefDescription": "The marked instruction was a store of any kind."
},
{
- "EventCode": "30068",
+ "EventCode": "0x30068",
"EventName": "PM_L1_ICACHE_RELOADED_PREF",
"BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
},
{
- "EventCode": "301E4",
+ "EventCode": "0x301E4",
"EventName": "PM_MRK_BR_MPRED_CMPL",
"BriefDescription": "Marked Branch Mispredicted. Includes direction and target."
},
{
- "EventCode": "300F6",
+ "EventCode": "0x300F6",
"EventName": "PM_LD_DEMAND_MISS_L1",
"BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
},
{
- "EventCode": "300FE",
+ "EventCode": "0x300FE",
"EventName": "PM_DATA_FROM_L3MISS",
"BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
},
{
- "EventCode": "40012",
+ "EventCode": "0x40012",
"EventName": "PM_L1_ICACHE_RELOADED_ALL",
"BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
},
{
- "EventCode": "40134",
+ "EventCode": "0x40134",
"EventName": "PM_MRK_INST_TIMEO",
"BriefDescription": "Marked instruction finish timeout (instruction was lost)."
},
{
- "EventCode": "4003C",
- "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
- },
- {
- "EventCode": "4505A",
+ "EventCode": "0x4505A",
"EventName": "PM_SP_FLOP_CMPL",
"BriefDescription": "Single Precision floating point instructions completed."
},
{
- "EventCode": "4D058",
+ "EventCode": "0x4D058",
"EventName": "PM_VECTOR_FLOP_CMPL",
"BriefDescription": "Vector floating point instructions completed."
},
{
- "EventCode": "4D05A",
+ "EventCode": "0x4D05A",
"EventName": "PM_NON_MATH_FLOP_CMPL",
"BriefDescription": "Non Math instructions completed."
},
{
- "EventCode": "401E0",
+ "EventCode": "0x401E0",
"EventName": "PM_MRK_INST_CMPL",
"BriefDescription": "marked instruction completed."
},
{
- "EventCode": "400FE",
+ "EventCode": "0x400FE",
"EventName": "PM_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
index b01141eeebee..843b51f531e9 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
@@ -1,191 +1,186 @@
[
{
- "EventCode": "1000A",
+ "EventCode": "0x1000A",
"EventName": "PM_PMC3_REWIND",
"BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
},
{
- "EventCode": "1C040",
+ "EventCode": "0x1C040",
"EventName": "PM_XFER_FROM_SRC_PMC1",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "1C142",
+ "EventCode": "0x1C142",
"EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
"BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "1C144",
+ "EventCode": "0x1C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
},
{
- "EventCode": "1C056",
+ "EventCode": "0x1C056",
"EventName": "PM_DERAT_MISS_4K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "1C058",
+ "EventCode": "0x1C058",
"EventName": "PM_DTLB_MISS_16G",
"BriefDescription": "Data TLB reload (after a miss) page size 16G. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "1C05C",
+ "EventCode": "0x1C05C",
"EventName": "PM_DTLB_MISS_2M",
"BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "1E056",
+ "EventCode": "0x1E056",
"EventName": "PM_EXEC_STALL_STORE_PIPE",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
},
{
- "EventCode": "1F150",
+ "EventCode": "0x1F150",
"EventName": "PM_MRK_ST_L2_CYC",
"BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
},
{
- "EventCode": "10062",
+ "EventCode": "0x10062",
"EventName": "PM_LD_L3MISS_PEND_CYC",
"BriefDescription": "Cycles L3 miss was pending for this thread."
},
{
- "EventCode": "20010",
+ "EventCode": "0x20010",
"EventName": "PM_PMC1_OVERFLOW",
"BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
},
{
- "EventCode": "2001A",
+ "EventCode": "0x2001A",
"EventName": "PM_ITLB_HIT",
"BriefDescription": "The PTE required to translate the instruction address was resident in the TLB (instruction TLB access/IERAT reload). Applies to both HPT and RPT. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "2003E",
+ "EventCode": "0x2003E",
"EventName": "PM_PTESYNC_FIN",
"BriefDescription": "Ptesync instruction finished in the store unit. Only one ptesync can finish at a time."
},
{
- "EventCode": "2C040",
+ "EventCode": "0x2C040",
"EventName": "PM_XFER_FROM_SRC_PMC2",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "2C054",
+ "EventCode": "0x2C054",
"EventName": "PM_DERAT_MISS_64K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "2C056",
+ "EventCode": "0x2C056",
"EventName": "PM_DTLB_MISS_4K",
"BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "2D154",
+ "EventCode": "0x2D154",
"EventName": "PM_MRK_DERAT_MISS_64K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "200F6",
+ "EventCode": "0x200F6",
"EventName": "PM_DERAT_MISS",
"BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "3000A",
- "EventName": "PM_DISP_STALL_ITLB_MISS",
- "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
- },
- {
- "EventCode": "30016",
+ "EventCode": "0x30016",
"EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
},
{
- "EventCode": "3C040",
+ "EventCode": "0x3C040",
"EventName": "PM_XFER_FROM_SRC_PMC3",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "3C142",
+ "EventCode": "0x3C142",
"EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
"BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "3C144",
+ "EventCode": "0x3C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
},
{
- "EventCode": "3C054",
+ "EventCode": "0x3C054",
"EventName": "PM_DERAT_MISS_16M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "3C056",
+ "EventCode": "0x3C056",
"EventName": "PM_DTLB_MISS_64K",
"BriefDescription": "Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "3C058",
+ "EventCode": "0x3C058",
"EventName": "PM_LARX_FIN",
"BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "301E2",
+ "EventCode": "0x301E2",
"EventName": "PM_MRK_ST_CMPL",
"BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
},
{
- "EventCode": "300FC",
+ "EventCode": "0x300FC",
"EventName": "PM_DTLB_MISS",
"BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
},
{
- "EventCode": "4D02C",
+ "EventCode": "0x4D02C",
"EventName": "PM_PMC1_REWIND",
"BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
},
{
- "EventCode": "4003E",
+ "EventCode": "0x4003E",
"EventName": "PM_LD_CMPL",
"BriefDescription": "Loads completed."
},
{
- "EventCode": "4C040",
+ "EventCode": "0x4C040",
"EventName": "PM_XFER_FROM_SRC_PMC4",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "4C142",
+ "EventCode": "0x4C142",
"EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
"BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "4C144",
+ "EventCode": "0x4C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
},
{
- "EventCode": "4C056",
+ "EventCode": "0x4C056",
"EventName": "PM_DTLB_MISS_16M",
"BriefDescription": "Data TLB reload (after a miss) page size 16M. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "4C05A",
+ "EventCode": "0x4C05A",
"EventName": "PM_DTLB_MISS_1G",
"BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "4C15E",
+ "EventCode": "0x4C15E",
"EventName": "PM_MRK_DTLB_MISS_64K",
"BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "4D056",
+ "EventCode": "0x4D056",
"EventName": "PM_NON_FMA_FLOP_CMPL",
"BriefDescription": "Non FMA instruction completed."
},
{
- "EventCode": "40164",
+ "EventCode": "0x40164",
"EventName": "PM_MRK_DERAT_MISS_2M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
index a119e56cbf1c..7d0de1a2860b 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/others.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
@@ -1,296 +1,271 @@
[
{
- "EventCode": "10016",
+ "EventCode": "0x10016",
"EventName": "PM_VSU0_ISSUE",
"BriefDescription": "VSU instructions issued to VSU pipe 0."
},
{
- "EventCode": "1001C",
+ "EventCode": "0x1001C",
"EventName": "PM_ULTRAVISOR_INST_CMPL",
"BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
},
{
- "EventCode": "100F0",
+ "EventCode": "0x100F0",
"EventName": "PM_CYC",
"BriefDescription": "Processor cycles."
},
{
- "EventCode": "10134",
+ "EventCode": "0x10134",
"EventName": "PM_MRK_ST_DONE_L2",
"BriefDescription": "Marked stores completed in L2 (RC machine done)."
},
{
- "EventCode": "1505E",
+ "EventCode": "0x1505E",
"EventName": "PM_LD_HIT_L1",
"BriefDescription": "Loads that finished without experiencing an L1 miss."
},
{
- "EventCode": "1D05E",
- "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
- },
- {
- "EventCode": "1E054",
- "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
- },
- {
- "EventCode": "1E05A",
- "EventName": "PM_CMPL_STALL_LWSYNC",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
- },
- {
- "EventCode": "1F056",
+ "EventCode": "0x1F056",
"EventName": "PM_DISP_SS0_2_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
},
{
- "EventCode": "1F15C",
+ "EventCode": "0x1F15C",
"EventName": "PM_MRK_STCX_L2_CYC",
"BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
},
{
- "EventCode": "10066",
+ "EventCode": "0x10066",
"EventName": "PM_ADJUNCT_CYC",
"BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
},
{
- "EventCode": "101E4",
+ "EventCode": "0x101E4",
"EventName": "PM_MRK_L1_ICACHE_MISS",
"BriefDescription": "Marked Instruction suffered an icache Miss."
},
{
- "EventCode": "101EA",
+ "EventCode": "0x101EA",
"EventName": "PM_MRK_L1_RELOAD_VALID",
"BriefDescription": "Marked demand reload."
},
{
- "EventCode": "100F4",
+ "EventCode": "0x100F4",
"EventName": "PM_FLOP_CMPL",
"BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
},
{
- "EventCode": "100FA",
+ "EventCode": "0x100FA",
"EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
"BriefDescription": "Cycles when at least one thread has the run latch set."
},
{
- "EventCode": "100FC",
+ "EventCode": "0x100FC",
"EventName": "PM_LD_REF_L1",
"BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
},
{
- "EventCode": "20006",
- "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
- },
- {
- "EventCode": "2000C",
+ "EventCode": "0x2000C",
"EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
"BriefDescription": "Cycles when the run latch is set for all threads."
},
{
- "EventCode": "2E010",
+ "EventCode": "0x2E010",
"EventName": "PM_ADJUNCT_INST_CMPL",
"BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
},
{
- "EventCode": "2E014",
+ "EventCode": "0x2E014",
"EventName": "PM_STCX_FIN",
"BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "20130",
+ "EventCode": "0x20130",
"EventName": "PM_MRK_INST_DECODED",
"BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
},
{
- "EventCode": "20132",
+ "EventCode": "0x20132",
"EventName": "PM_MRK_DFU_ISSUE",
"BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
},
{
- "EventCode": "20134",
+ "EventCode": "0x20134",
"EventName": "PM_MRK_FXU_ISSUE",
"BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
},
{
- "EventCode": "2505C",
+ "EventCode": "0x2505C",
"EventName": "PM_VSU_ISSUE",
"BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
},
{
- "EventCode": "2F054",
+ "EventCode": "0x2F054",
"EventName": "PM_DISP_SS1_2_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
},
{
- "EventCode": "2F056",
+ "EventCode": "0x2F056",
"EventName": "PM_DISP_SS1_4_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
},
{
- "EventCode": "2006C",
+ "EventCode": "0x2006C",
"EventName": "PM_RUN_CYC_SMT4_MODE",
"BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
},
{
- "EventCode": "201E0",
+ "EventCode": "0x201E0",
"EventName": "PM_MRK_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
},
{
- "EventCode": "201E4",
+ "EventCode": "0x201E4",
"EventName": "PM_MRK_DATA_FROM_L3MISS",
"BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
},
{
- "EventCode": "201E8",
+ "EventCode": "0x201E8",
"EventName": "PM_THRESH_EXC_512",
"BriefDescription": "Threshold counter exceeded a value of 512."
},
{
- "EventCode": "200F2",
+ "EventCode": "0x200F2",
"EventName": "PM_INST_DISP",
"BriefDescription": "PowerPC instructions dispatched."
},
{
- "EventCode": "30132",
+ "EventCode": "0x30132",
"EventName": "PM_MRK_VSU_FIN",
"BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
},
{
- "EventCode": "30038",
+ "EventCode": "0x30038",
"EventName": "PM_EXEC_STALL_DMISS_LMEM",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
},
{
- "EventCode": "3F04A",
+ "EventCode": "0x3F04A",
"EventName": "PM_LSU_ST5_FIN",
"BriefDescription": "LSU Finished an internal operation in ST2 port."
},
{
- "EventCode": "34054",
- "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
- },
- {
- "EventCode": "3405A",
+ "EventCode": "0x3405A",
"EventName": "PM_PRIVILEGED_INST_CMPL",
"BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
},
{
- "EventCode": "3F150",
+ "EventCode": "0x3F150",
"EventName": "PM_MRK_ST_DRAIN_CYC",
"BriefDescription": "cycles to drain st from core to L2."
},
{
- "EventCode": "3F054",
+ "EventCode": "0x3F054",
"EventName": "PM_DISP_SS0_4_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions."
},
{
- "EventCode": "3F056",
+ "EventCode": "0x3F056",
"EventName": "PM_DISP_SS0_8_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
},
{
- "EventCode": "30162",
+ "EventCode": "0x30162",
"EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
"BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
},
{
- "EventCode": "40114",
+ "EventCode": "0x40114",
"EventName": "PM_MRK_START_PROBE_NOP_DISP",
"BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
},
{
- "EventCode": "4001C",
+ "EventCode": "0x4001C",
"EventName": "PM_VSU_FIN",
"BriefDescription": "VSU instructions finished."
},
{
- "EventCode": "4C01A",
+ "EventCode": "0x4C01A",
"EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
},
{
- "EventCode": "4D012",
+ "EventCode": "0x4D012",
"EventName": "PM_PMC3_SAVED",
"BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
},
{
- "EventCode": "4D022",
+ "EventCode": "0x4D022",
"EventName": "PM_HYPERVISOR_INST_CMPL",
"BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
},
{
- "EventCode": "4D026",
+ "EventCode": "0x4D026",
"EventName": "PM_ULTRAVISOR_CYC",
"BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
},
{
- "EventCode": "4D028",
+ "EventCode": "0x4D028",
"EventName": "PM_PRIVILEGED_CYC",
"BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
},
{
- "EventCode": "40030",
+ "EventCode": "0x40030",
"EventName": "PM_INST_FIN",
"BriefDescription": "Instructions finished."
},
{
- "EventCode": "44146",
+ "EventCode": "0x44146",
"EventName": "PM_MRK_STCX_CORE_CYC",
"BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
},
{
- "EventCode": "44054",
+ "EventCode": "0x44054",
"EventName": "PM_VECTOR_LD_CMPL",
"BriefDescription": "Vector load instructions completed."
},
{
- "EventCode": "45054",
+ "EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
"BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
},
{
- "EventCode": "45056",
+ "EventCode": "0x45056",
"EventName": "PM_SCALAR_FLOP_CMPL",
"BriefDescription": "Scalar floating point instructions completed."
},
{
- "EventCode": "4505C",
+ "EventCode": "0x4505C",
"EventName": "PM_MATH_FLOP_CMPL",
"BriefDescription": "Math floating point instructions completed."
},
{
- "EventCode": "4D05E",
+ "EventCode": "0x4D05E",
"EventName": "PM_BR_CMPL",
"BriefDescription": "A branch completed. All branches are included."
},
{
- "EventCode": "4E15E",
+ "EventCode": "0x4E15E",
"EventName": "PM_MRK_INST_FLUSHED",
"BriefDescription": "The marked instruction was flushed."
},
{
- "EventCode": "401E6",
+ "EventCode": "0x401E6",
"EventName": "PM_MRK_INST_FROM_L3MISS",
"BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
},
{
- "EventCode": "401E8",
+ "EventCode": "0x401E8",
"EventName": "PM_MRK_DATA_FROM_L2MISS",
"BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
},
{
- "EventCode": "400F0",
+ "EventCode": "0x400F0",
"EventName": "PM_LD_DEMAND_MISS_L1_FIN",
"BriefDescription": "Load Missed L1, counted at finish time."
},
{
- "EventCode": "400FA",
+ "EventCode": "0x400FA",
"EventName": "PM_RUN_INST_CMPL",
"BriefDescription": "Completed PowerPC instructions gated by the run latch."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
index b61b5cc157ee..b8aded6045fa 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
@@ -1,296 +1,291 @@
[
{
- "EventCode": "100FE",
+ "EventCode": "0x100FE",
"EventName": "PM_INST_CMPL",
"BriefDescription": "PowerPC instructions completed."
},
{
- "EventCode": "10006",
- "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
- },
- {
- "EventCode": "1000C",
+ "EventCode": "0x1000C",
"EventName": "PM_LSU_LD0_FIN",
"BriefDescription": "LSU Finished an internal operation in LD0 port."
},
{
- "EventCode": "1000E",
+ "EventCode": "0x1000E",
"EventName": "PM_MMA_ISSUED",
"BriefDescription": "MMA instructions issued."
},
{
- "EventCode": "10012",
+ "EventCode": "0x10012",
"EventName": "PM_LSU_ST0_FIN",
"BriefDescription": "LSU Finished an internal operation in ST0 port."
},
{
- "EventCode": "10014",
+ "EventCode": "0x10014",
"EventName": "PM_LSU_ST4_FIN",
"BriefDescription": "LSU Finished an internal operation in ST4 port."
},
{
- "EventCode": "10018",
+ "EventCode": "0x10018",
"EventName": "PM_IC_DEMAND_CYC",
"BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss."
},
{
- "EventCode": "10022",
+ "EventCode": "0x10022",
"EventName": "PM_PMC2_SAVED",
"BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
},
{
- "EventCode": "10024",
+ "EventCode": "0x10024",
"EventName": "PM_PMC5_OVERFLOW",
"BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
},
{
- "EventCode": "10058",
+ "EventCode": "0x10058",
"EventName": "PM_EXEC_STALL_FIN_AT_DISP",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline finished at dispatch and did not require execution in the LSU, BRU or VSU."
},
{
- "EventCode": "1005A",
+ "EventCode": "0x1005A",
"EventName": "PM_FLUSH_MPRED",
"BriefDescription": "A flush occurred due to a mispredicted branch. Includes target and direction."
},
{
- "EventCode": "1C05A",
+ "EventCode": "0x1C05A",
"EventName": "PM_DERAT_MISS_2M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "10064",
- "EventName": "PM_DISP_STALL_IC_L2",
- "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+ "EventCode": "0x1E05A",
+ "EventName": "PM_CMPL_STALL_LWSYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
},
{
- "EventCode": "10068",
+ "EventCode": "0x10068",
"EventName": "PM_BR_FIN",
"BriefDescription": "A branch instruction finished. Includes predicted/mispredicted/unconditional."
},
{
- "EventCode": "1006A",
+ "EventCode": "0x1006A",
"EventName": "PM_FX_LSU_FIN",
"BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time."
},
{
- "EventCode": "1006C",
+ "EventCode": "0x1006C",
"EventName": "PM_RUN_CYC_ST_MODE",
"BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
},
{
- "EventCode": "20004",
+ "EventCode": "0x20004",
"EventName": "PM_ISSUE_STALL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet."
},
{
- "EventCode": "2000A",
+ "EventCode": "0x2000A",
"EventName": "PM_HYPERVISOR_CYC",
"BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
},
{
- "EventCode": "2000E",
+ "EventCode": "0x2000E",
"EventName": "PM_LSU_LD1_FIN",
"BriefDescription": "LSU Finished an internal operation in LD1 port."
},
{
- "EventCode": "2C014",
+ "EventCode": "0x2C014",
"EventName": "PM_CMPL_STALL_SPECIAL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing."
},
{
- "EventCode": "2C018",
+ "EventCode": "0x2C018",
"EventName": "PM_EXEC_STALL_DMISS_L3MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3."
},
{
- "EventCode": "2D010",
+ "EventCode": "0x2D010",
"EventName": "PM_LSU_ST1_FIN",
"BriefDescription": "LSU Finished an internal operation in ST1 port."
},
{
- "EventCode": "2D012",
+ "EventCode": "0x2D012",
"EventName": "PM_VSU1_ISSUE",
"BriefDescription": "VSU instructions issued to VSU pipe 1."
},
{
- "EventCode": "2D018",
+ "EventCode": "0x2D018",
"EventName": "PM_EXEC_STALL_VSU",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)."
},
{
- "EventCode": "2E01E",
+ "EventCode": "0x2D01C",
+ "EventName": "PM_CMPL_STALL_STCX",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
+ },
+ {
+ "EventCode": "0x2E01E",
"EventName": "PM_EXEC_STALL_NTC_FLUSH",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
},
{
- "EventCode": "2013C",
+ "EventCode": "0x2013C",
"EventName": "PM_MRK_FX_LSU_FIN",
"BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
},
{
- "EventCode": "2405A",
+ "EventCode": "0x2405A",
"EventName": "PM_NTC_FIN",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
},
{
- "EventCode": "201E2",
+ "EventCode": "0x201E2",
"EventName": "PM_MRK_LD_MISS_L1",
"BriefDescription": "Marked DL1 Demand Miss counted at finish time."
},
{
- "EventCode": "200F4",
+ "EventCode": "0x200F4",
"EventName": "PM_RUN_CYC",
"BriefDescription": "Processor cycles gated by the run latch."
},
{
- "EventCode": "30004",
- "EventName": "PM_DISP_STALL_FLUSH",
- "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
- },
- {
- "EventCode": "30008",
+ "EventCode": "0x30008",
"EventName": "PM_EXEC_STALL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category."
},
{
- "EventCode": "3001A",
+ "EventCode": "0x3001A",
"EventName": "PM_LSU_ST2_FIN",
"BriefDescription": "LSU Finished an internal operation in ST2 port."
},
{
- "EventCode": "30020",
+ "EventCode": "0x30020",
"EventName": "PM_PMC2_REWIND",
"BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
},
{
- "EventCode": "30022",
+ "EventCode": "0x30022",
"EventName": "PM_PMC4_SAVED",
"BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
},
{
- "EventCode": "30024",
+ "EventCode": "0x30024",
"EventName": "PM_PMC6_OVERFLOW",
"BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
},
{
- "EventCode": "30028",
+ "EventCode": "0x30028",
"EventName": "PM_CMPL_STALL_MEM_ECC",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
},
{
- "EventCode": "30036",
+ "EventCode": "0x30036",
"EventName": "PM_EXEC_STALL_SIMPLE_FX",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit."
},
{
- "EventCode": "3003A",
+ "EventCode": "0x3003A",
"EventName": "PM_CMPL_STALL_EXCEPTION",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete."
},
{
- "EventCode": "3F044",
+ "EventCode": "0x3F044",
"EventName": "PM_VSU2_ISSUE",
"BriefDescription": "VSU instructions issued to VSU pipe 2."
},
{
- "EventCode": "30058",
+ "EventCode": "0x30058",
"EventName": "PM_TLBIE_FIN",
"BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
},
{
- "EventCode": "3D058",
+ "EventCode": "0x3D058",
"EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
"BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
},
{
- "EventCode": "30066",
+ "EventCode": "0x30066",
"EventName": "PM_LSU_FIN",
"BriefDescription": "LSU Finished an internal operation (up to 4 per cycle)."
},
{
- "EventCode": "40004",
+ "EventCode": "0x40004",
"EventName": "PM_FXU_ISSUE",
"BriefDescription": "A fixed point instruction was issued to the VSU."
},
{
- "EventCode": "40008",
+ "EventCode": "0x40008",
"EventName": "PM_NTC_ALL_FIN",
"BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline."
},
{
- "EventCode": "40010",
+ "EventCode": "0x40010",
"EventName": "PM_PMC3_OVERFLOW",
"BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
},
{
- "EventCode": "4C012",
+ "EventCode": "0x4C012",
"EventName": "PM_EXEC_STALL_DERAT_ONLY_MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve."
},
{
- "EventCode": "4C018",
+ "EventCode": "0x4C018",
"EventName": "PM_CMPL_STALL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason."
},
{
- "EventCode": "4C01E",
+ "EventCode": "0x4C01E",
"EventName": "PM_LSU_ST3_FIN",
"BriefDescription": "LSU Finished an internal operation in ST3 port."
},
{
- "EventCode": "4D018",
+ "EventCode": "0x4D018",
"EventName": "PM_EXEC_STALL_BRU",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Branch unit."
},
{
- "EventCode": "4D01A",
+ "EventCode": "0x4D01A",
"EventName": "PM_CMPL_STALL_HWSYNC",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a hwsync waiting for response from L2 before completing."
},
{
- "EventCode": "4D01C",
+ "EventCode": "0x4D01C",
"EventName": "PM_EXEC_STALL_TLBIEL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest."
},
{
- "EventCode": "4E012",
+ "EventCode": "0x4E012",
"EventName": "PM_EXEC_STALL_UNKNOWN",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
},
{
- "EventCode": "4D020",
+ "EventCode": "0x4D020",
"EventName": "PM_VSU3_ISSUE",
"BriefDescription": "VSU instruction was issued to VSU pipe 3."
},
{
- "EventCode": "40132",
+ "EventCode": "0x40132",
"EventName": "PM_MRK_LSU_FIN",
"BriefDescription": "LSU marked instruction finish."
},
{
- "EventCode": "45058",
+ "EventCode": "0x45058",
"EventName": "PM_IC_MISS_CMPL",
"BriefDescription": "Non-speculative icache miss, counted at completion."
},
{
- "EventCode": "4D050",
+ "EventCode": "0x4D050",
"EventName": "PM_VSU_NON_FLOP_CMPL",
"BriefDescription": "Non-floating point VSU instructions completed."
},
{
- "EventCode": "4D052",
+ "EventCode": "0x4D052",
"EventName": "PM_2FLOP_CMPL",
"BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
},
{
- "EventCode": "400F2",
+ "EventCode": "0x400F2",
"EventName": "PM_1PLUS_PPC_DISP",
"BriefDescription": "Cycles at least one Instr Dispatched."
},
{
- "EventCode": "400F8",
+ "EventCode": "0x400F8",
"EventName": "PM_FLUSH",
"BriefDescription": "Flush (any type)."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
index ea122a91ceb0..b5d1bd39cfb2 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
@@ -1,21 +1,21 @@
[
{
- "EventCode": "301E8",
+ "EventCode": "0x301E8",
"EventName": "PM_THRESH_EXC_64",
"BriefDescription": "Threshold counter exceeded a value of 64."
},
{
- "EventCode": "45050",
+ "EventCode": "0x45050",
"EventName": "PM_1FLOP_CMPL",
"BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
},
{
- "EventCode": "45052",
+ "EventCode": "0x45052",
"EventName": "PM_4FLOP_CMPL",
"BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
},
{
- "EventCode": "4D054",
+ "EventCode": "0x4D054",
"EventName": "PM_8FLOP_CMPL",
"BriefDescription": "Four Double Precision vector instructions completed."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
index 5a714e3dd71a..db3766dca07c 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
@@ -1,56 +1,56 @@
[
{
- "EventCode": "1F15E",
+ "EventCode": "0x1F15E",
"EventName": "PM_MRK_START_PROBE_NOP_CMPL",
"BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
},
{
- "EventCode": "20016",
+ "EventCode": "0x20016",
"EventName": "PM_ST_FIN",
"BriefDescription": "Store finish count. Includes speculative activity."
},
{
- "EventCode": "20018",
+ "EventCode": "0x20018",
"EventName": "PM_ST_FWD",
"BriefDescription": "Store forwards that finished."
},
{
- "EventCode": "2011C",
+ "EventCode": "0x2011C",
"EventName": "PM_MRK_NTF_CYC",
"BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
},
{
- "EventCode": "2E01C",
+ "EventCode": "0x2E01C",
"EventName": "PM_EXEC_STALL_TLBIE",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
},
{
- "EventCode": "201E6",
+ "EventCode": "0x201E6",
"EventName": "PM_THRESH_EXC_32",
"BriefDescription": "Threshold counter exceeded a value of 32."
},
{
- "EventCode": "200F0",
+ "EventCode": "0x200F0",
"EventName": "PM_ST_CMPL",
"BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)."
},
{
- "EventCode": "200FE",
+ "EventCode": "0x200FE",
"EventName": "PM_DATA_FROM_L2MISS",
"BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
},
{
- "EventCode": "30010",
+ "EventCode": "0x30010",
"EventName": "PM_PMC2_OVERFLOW",
"BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
},
{
- "EventCode": "4D010",
+ "EventCode": "0x4D010",
"EventName": "PM_PMC1_SAVED",
"BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
},
{
- "EventCode": "4D05C",
+ "EventCode": "0x4D05C",
"EventName": "PM_DPP_FLOP_CMPL",
"BriefDescription": "Double-Precision or Quad-Precision instructions completed."
}
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 7422b0ea8790..9604446f8360 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -960,7 +960,7 @@ static int get_maxfds(void)
struct rlimit rlim;
if (getrlimit(RLIMIT_NOFILE, &rlim) == 0)
- return min((int)rlim.rlim_max / 2, 512);
+ return min(rlim.rlim_max / 2, (rlim_t)512);
return 512;
}
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 7daa8bb70a5a..711d4f9f5645 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -91,6 +91,11 @@
from __future__ import print_function
import sys
+# Only change warnings if the python -W option was not used
+if not sys.warnoptions:
+ import warnings
+ # PySide2 causes deprecation warnings, ignore them.
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
import argparse
import weakref
import threading
@@ -125,8 +130,9 @@ if pyside_version_1:
from PySide.QtGui import *
from PySide.QtSql import *
-from decimal import *
-from ctypes import *
+from decimal import Decimal, ROUND_HALF_UP
+from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
+ c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
from multiprocessing import Process, Array, Value, Event
# xrange is range in Python3
@@ -3868,7 +3874,7 @@ def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
if with_hdr:
model = indexes[0].model()
for col in range(min_col, max_col + 1):
- val = model.headerData(col, Qt.Horizontal)
+ val = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)
if as_csv:
text += sep + ToCSValue(val)
sep = ","
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 4a7b8deef3fd..8c10955eff93 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -16,7 +16,7 @@ pinned=0
exclusive=0
exclude_user=0
exclude_kernel=0|1
-exclude_hv=0
+exclude_hv=0|1
exclude_idle=0
mmap=1
comm=1
diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c
index 76a53126efdf..d4b0ef74defc 100644
--- a/tools/perf/tests/pfm.c
+++ b/tools/perf/tests/pfm.c
@@ -131,8 +131,8 @@ static int test__pfm_group(void)
},
{
.events = "{},{instructions}",
- .nr_events = 0,
- .nr_groups = 0,
+ .nr_events = 1,
+ .nr_groups = 1,
},
{
.events = "{instructions},{instructions}",
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 22eb31e48ca7..2f9948b3d943 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -11,9 +11,9 @@ compare_number()
second_num=$2
# upper bound is first_num * 110%
- upper=$(( $first_num + $first_num / 10 ))
+ upper=$(expr $first_num + $first_num / 10 )
# lower bound is first_num * 90%
- lower=$(( $first_num - $first_num / 10 ))
+ lower=$(expr $first_num - $first_num / 10 )
if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
echo "The difference between $first_num and $second_num are greater than 10%."
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index b8fc5c53ba6f..0d8e3dcb7f88 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index ddb52f748c8e..5ed674a2f55e 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -451,10 +451,10 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
goto out;
}
- err = -1;
link = bpf_program__attach(skel->progs.on_switch);
- if (!link) {
+ if (IS_ERR(link)) {
pr_err("Failed to attach leader program\n");
+ err = PTR_ERR(link);
goto out;
}
@@ -521,9 +521,10 @@ static int bperf__load(struct evsel *evsel, struct target *target)
evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
if (evsel->bperf_leader_link_fd < 0 &&
- bperf_reload_leader_program(evsel, attr_map_fd, &entry))
+ bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
+ err = -1;
goto out;
-
+ }
/*
* The bpf_link holds reference to the leader program, and the
* leader program holds reference to the maps. Therefore, if
@@ -550,6 +551,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
/* Step 2: load the follower skeleton */
evsel->follower_skel = bperf_follower_bpf__open();
if (!evsel->follower_skel) {
+ err = -1;
pr_err("Failed to open follower skeleton\n");
goto out;
}
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index b2f4920e19a6..7d2ba8419b0c 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
if ((tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) &&
die_compare_name(die_mem, fvp->name) &&
- /* Does the DIE have location information or external instance? */
+ /*
+ * Does the DIE have location information or const value
+ * or external instance?
+ */
(dwarf_attr(die_mem, DW_AT_external, &attr) ||
- dwarf_attr(die_mem, DW_AT_location, &attr)))
+ dwarf_attr(die_mem, DW_AT_location, &attr) ||
+ dwarf_attr(die_mem, DW_AT_const_value, &attr)))
return DIE_FIND_CB_END;
if (dwarf_haspc(die_mem, fvp->addr))
return DIE_FIND_CB_CONTINUE;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 9130f6fad8d5..bc5e4f294e9e 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -144,6 +144,7 @@ static void perf_env__purge_bpf(struct perf_env *env)
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
rb_erase(&node->rb_node, root);
+ free(node->info_linear);
free(node);
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 8a62fb39e365..19ad64f2bd83 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -100,7 +100,7 @@ enum {
PERF_IP_FLAG_VMEXIT = 1ULL << 12,
};
-#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
+#define PERF_IP_FLAG_CHARS "bcrosyiABExgh"
#define PERF_BRANCH_MASK (\
PERF_IP_FLAG_BRANCH |\
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6e5c41528c7d..6ea3e677dc1e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -425,9 +425,6 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
if (affinity__setup(&affinity) < 0)
return;
- evlist__for_each_entry(evlist, pos)
- bpf_counter__disable(pos);
-
/* Disable 'immediate' events last */
for (imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist, i, cpu) {
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 4a3cd1b5bb33..a8d8463f8ee5 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -428,6 +428,7 @@ struct evsel *evsel__clone(struct evsel *orig)
evsel->auto_merge_stats = orig->auto_merge_stats;
evsel->collect_stat = orig->collect_stat;
evsel->weak_group = orig->weak_group;
+ evsel->use_config_name = orig->use_config_name;
if (evsel__copy_config_terms(evsel, orig) < 0)
goto out_err;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 75cf5dbfe208..bdad52a06438 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -83,8 +83,10 @@ struct evsel {
bool collect_stat;
bool weak_group;
bool bpf_counter;
+ bool use_config_name;
int bpf_fd;
struct bpf_object *bpf_obj;
+ struct list_head config_terms;
};
/*
@@ -116,10 +118,8 @@ struct evsel {
bool merged_stat;
bool reset_group;
bool errored;
- bool use_config_name;
struct hashmap *per_pkg_mask;
struct evsel *leader;
- struct list_head config_terms;
int err;
int cpu_iter;
struct {
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 8c59677bee13..20ad663978cc 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1146,6 +1146,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
decoder->state.type = INTEL_PT_TRANSACTION;
+ if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
+ decoder->state.type |= INTEL_PT_BRANCH;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.flags = decoder->fup_tx_flags;
@@ -1220,8 +1222,10 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
return 0;
if (err == -EAGAIN ||
intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
+ bool no_tip = decoder->pkt_state != INTEL_PT_STATE_FUP;
+
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
- if (intel_pt_fup_event(decoder))
+ if (intel_pt_fup_event(decoder) && no_tip)
return 0;
return -EAGAIN;
}
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 8658d42ce57a..0dfec8761b9a 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -707,8 +707,10 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
*ip += intel_pt_insn->length;
- if (to_ip && *ip == to_ip)
+ if (to_ip && *ip == to_ip) {
+ intel_pt_insn->length = 0;
goto out_no_cache;
+ }
if (*ip >= al.map->end)
break;
@@ -1198,6 +1200,7 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
{
+ ptq->insn_len = 0;
if (ptq->state->flags & INTEL_PT_ABORT_TX) {
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
} else if (ptq->state->flags & INTEL_PT_ASYNC) {
@@ -1211,7 +1214,6 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT;
- ptq->insn_len = 0;
} else {
if (ptq->state->from_ip)
ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 3ff4936a15a4..da19be7da284 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -776,10 +776,10 @@ static int machine__process_ksymbol_register(struct machine *machine,
if (dso) {
dso->kernel = DSO_SPACE__KERNEL;
map = map__new2(0, dso);
+ dso__put(dso);
}
if (!dso || !map) {
- dso__put(dso);
return -ENOMEM;
}
@@ -792,6 +792,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
map->start = event->ksymbol.addr;
map->end = map->start + event->ksymbol.len;
maps__insert(&machine->kmaps, map);
+ map__put(map);
dso__set_loaded(dso);
if (is_bpf_image(event->ksymbol.name)) {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 8336dd8e8098..d3cf2dee36c8 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -162,10 +162,10 @@ static bool contains_event(struct evsel **metric_events, int num_events,
return false;
}
-static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
{
if (!ev1->pmu_name || !ev2->pmu_name)
- return false;
+ return true;
return !strcmp(ev1->pmu_name, ev2->pmu_name);
}
@@ -288,7 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
*/
if (!has_constraint &&
ev->leader != metric_events[i]->leader &&
- evsel_same_pmu(ev->leader, metric_events[i]->leader))
+ evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
break;
if (!strcmp(metric_events[i]->name, ev->name)) {
set_bit(ev->idx, evlist_used);
@@ -1073,16 +1073,18 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
if (ret)
- return ret;
+ goto out;
ret = resolve_metric(d->metric_no_group,
d->metric_list, NULL, d->ids);
if (ret)
- return ret;
+ goto out;
*(d->has_match) = true;
- return *d->ret;
+out:
+ *(d->ret) = ret;
+ return ret;
}
static int metricgroup__add_metric(const char *metric, bool metric_no_group,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4dad14265b81..84108c17f48d 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -150,6 +150,10 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
.symbol = "bpf-output",
.alias = "",
},
+ [PERF_COUNT_SW_CGROUP_SWITCHES] = {
+ .symbol = "cgroup-switches",
+ .alias = "",
+ },
};
#define __PERF_EVENT_FIELD(config, name) \
@@ -2928,9 +2932,14 @@ restart:
}
for (i = 0; i < max; i++, syms++) {
+ /*
+ * New attr.config still not supported here, the latest
+ * example was PERF_COUNT_SW_CGROUP_SWITCHES
+ */
+ if (syms->symbol == NULL)
+ continue;
- if (event_glob != NULL && syms->symbol != NULL &&
- !(strglobmatch(syms->symbol, event_glob) ||
+ if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) ||
(syms->alias && strglobmatch(syms->alias, event_glob))))
continue;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index fb8646cc3e83..923849024b15 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -347,6 +347,7 @@ emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EM
dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
duration_time { return tool(yyscanner, PERF_TOOL_DURATION_TIME); }
bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
+cgroup-switches { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CGROUP_SWITCHES); }
/*
* We have to handle the kernel PMU event cycles-ct/cycles-t/mem-loads/mem-stores separately.
diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c
index 829af17a0867..020411682a3c 100644
--- a/tools/perf/util/perf_api_probe.c
+++ b/tools/perf/util/perf_api_probe.c
@@ -103,6 +103,11 @@ static void perf_probe_build_id(struct evsel *evsel)
evsel->core.attr.build_id = 1;
}
+static void perf_probe_cgroup(struct evsel *evsel)
+{
+ evsel->core.attr.cgroup = 1;
+}
+
bool perf_can_sample_identifier(void)
{
return perf_probe_api(perf_probe_sample_identifier);
@@ -182,3 +187,8 @@ bool perf_can_record_build_id(void)
{
return perf_probe_api(perf_probe_build_id);
}
+
+bool perf_can_record_cgroup(void)
+{
+ return perf_probe_api(perf_probe_cgroup);
+}
diff --git a/tools/perf/util/perf_api_probe.h b/tools/perf/util/perf_api_probe.h
index f12ca55f509a..b104168efb15 100644
--- a/tools/perf/util/perf_api_probe.h
+++ b/tools/perf/util/perf_api_probe.h
@@ -12,5 +12,6 @@ bool perf_can_record_switch_events(void);
bool perf_can_record_text_poke_events(void);
bool perf_can_sample_identifier(void);
bool perf_can_record_build_id(void);
+bool perf_can_record_cgroup(void);
#endif // __PERF_API_PROBE_H
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
index d735acb6c29c..6eef6dfeaa57 100644
--- a/tools/perf/util/pfm.c
+++ b/tools/perf/util/pfm.c
@@ -62,8 +62,16 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
}
/* no event */
- if (*q == '\0')
+ if (*q == '\0') {
+ if (*sep == '}') {
+ if (grp_evt < 0) {
+ ui__error("cannot close a non-existing event group\n");
+ goto error;
+ }
+ grp_evt--;
+ }
continue;
+ }
memset(&attr, 0, sizeof(attr));
event_attr_init(&attr);
@@ -107,6 +115,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
grp_evt = -1;
}
}
+ free(p_orig);
return 0;
error:
free(p_orig);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 866f2d514d72..b029c29ce227 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
immediate_value_is_supported()) {
Dwarf_Sword snum;
+ if (!tvar)
+ return 0;
+
dwarf_formsdata(&attr, &snum);
ret = asprintf(&tvar->value, "\\%ld", (long)snum);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 106b3d60881a..e59242c361ce 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1723,6 +1723,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
if (event->header.size < hdr_sz || event->header.size > buf_sz)
return -1;
+ buf += hdr_sz;
rest = event->header.size - hdr_sz;
if (readn(fd, buf, rest) != (ssize_t)rest)
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index a76fff5e7d83..ca326f98c7a2 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -541,7 +541,7 @@ static void uniquify_event_name(struct evsel *counter)
char *config;
int ret = 0;
- if (counter->uniquified_name ||
+ if (counter->uniquified_name || counter->use_config_name ||
!counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
strlen(counter->pmu_name)))
return;
@@ -555,10 +555,8 @@ static void uniquify_event_name(struct evsel *counter)
}
} else {
if (perf_pmu__has_hybrid()) {
- if (!counter->use_config_name) {
- ret = asprintf(&new_name, "%s/%s/",
- counter->pmu_name, counter->name);
- }
+ ret = asprintf(&new_name, "%s/%s/",
+ counter->pmu_name, counter->name);
} else {
ret = asprintf(&new_name, "%s [%s]",
counter->name, counter->pmu_name);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 4c56aa837434..a73345730ba9 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -2412,6 +2412,7 @@ int cleanup_sdt_note_list(struct list_head *sdt_notes)
list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
list_del_init(&pos->note_list);
+ zfree(&pos->args);
zfree(&pos->name);
zfree(&pos->provider);
free(pos);
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index ab940c508ef0..bf9fd3549a1d 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -15,7 +15,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.9";
+static const char *version_str = "v1.10";
static const int supported_api_ver = 1;
static struct isst_if_platform_info isst_platform_info;
static char *progname;
@@ -106,6 +106,22 @@ int is_skx_based_platform(void)
return 0;
}
+int is_spr_platform(void)
+{
+ if (cpu_model == 0x8F)
+ return 1;
+
+ return 0;
+}
+
+int is_icx_platform(void)
+{
+ if (cpu_model == 0x6A || cpu_model == 0x6C)
+ return 1;
+
+ return 0;
+}
+
static int update_cpu_model(void)
{
unsigned int ebx, ecx, edx;
diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
index 6a26d5769984..4431c8a0d40a 100644
--- a/tools/power/x86/intel-speed-select/isst-core.c
+++ b/tools/power/x86/intel-speed-select/isst-core.c
@@ -201,6 +201,7 @@ void isst_get_uncore_mem_freq(int cpu, int config_index,
{
unsigned int resp;
int ret;
+
ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_MEM_FREQ,
0, config_index, &resp);
if (ret) {
@@ -209,6 +210,20 @@ void isst_get_uncore_mem_freq(int cpu, int config_index,
}
ctdp_level->mem_freq = resp & GENMASK(7, 0);
+ if (is_spr_platform()) {
+ ctdp_level->mem_freq *= 200;
+ } else if (is_icx_platform()) {
+ if (ctdp_level->mem_freq < 7) {
+ ctdp_level->mem_freq = (12 - ctdp_level->mem_freq) * 133.33 * 2 * 10;
+ ctdp_level->mem_freq /= 10;
+ if (ctdp_level->mem_freq % 10 > 5)
+ ctdp_level->mem_freq++;
+ } else {
+ ctdp_level->mem_freq = 0;
+ }
+ } else {
+ ctdp_level->mem_freq = 0;
+ }
debug_printf(
"cpu:%d ctdp:%d CONFIG_TDP_GET_MEM_FREQ resp:%x uncore mem_freq:%d\n",
cpu, config_index, resp, ctdp_level->mem_freq);
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
index 3bf1820c0da1..f97d8859ada7 100644
--- a/tools/power/x86/intel-speed-select/isst-display.c
+++ b/tools/power/x86/intel-speed-select/isst-display.c
@@ -446,7 +446,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
if (ctdp_level->mem_freq) {
snprintf(header, sizeof(header), "mem-frequency(MHz)");
snprintf(value, sizeof(value), "%d",
- ctdp_level->mem_freq * DISP_FREQ_MULTIPLIER);
+ ctdp_level->mem_freq);
format_and_print(outf, level + 2, header, value);
}
diff --git a/tools/power/x86/intel-speed-select/isst.h b/tools/power/x86/intel-speed-select/isst.h
index 0cac6c54be87..1aa15d5ea57c 100644
--- a/tools/power/x86/intel-speed-select/isst.h
+++ b/tools/power/x86/intel-speed-select/isst.h
@@ -257,5 +257,7 @@ extern int get_cpufreq_base_freq(int cpu);
extern int isst_read_pm_config(int cpu, int *cp_state, int *cp_cap);
extern void isst_display_error_info_message(int error, char *msg, int arg_valid, int arg);
extern int is_skx_based_platform(void);
+extern int is_spr_platform(void);
+extern int is_icx_platform(void);
extern void isst_trl_display_information(int cpu, FILE *outf, unsigned long long trl);
#endif
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index bc3299a20338..fb010a35d61a 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -49,6 +49,7 @@ TARGETS += proc
TARGETS += pstore
TARGETS += ptrace
TARGETS += openat2
+TARGETS += rlimits
TARGETS += rseq
TARGETS += rtc
TARGETS += seccomp
diff --git a/tools/testing/selftests/arm64/fp/sve-probe-vls.c b/tools/testing/selftests/arm64/fp/sve-probe-vls.c
index b29cbc642c57..76e138525d55 100644
--- a/tools/testing/selftests/arm64/fp/sve-probe-vls.c
+++ b/tools/testing/selftests/arm64/fp/sve-probe-vls.c
@@ -25,7 +25,7 @@ int main(int argc, char **argv)
ksft_set_plan(2);
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
- ksft_exit_skip("SVE not available");
+ ksft_exit_skip("SVE not available\n");
/*
* Enumerate up to SVE_VQ_MAX vector lengths
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 4866f6a21901..addcfd8b615e 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -10,6 +10,7 @@ FEATURE-DUMP.libbpf
fixdep
test_dev_cgroup
/test_progs*
+!test_progs.h
test_verifier_log
feature
test_sock
@@ -30,10 +31,13 @@ test_sysctl
xdping
test_cpp
*.skel.h
+*.lskel.h
/no_alu32
/bpf_gcc
/tools
/runqslower
/bench
*.ko
+*.tmp
xdpxceiver
+xdp_redirect_multi
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 511259c2c6c5..f405b20c1e6c 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -54,6 +54,7 @@ TEST_FILES = xsk_prereqs.sh \
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
test_xdp_redirect.sh \
+ test_xdp_redirect_multi.sh \
test_xdp_meta.sh \
test_xdp_veth.sh \
test_offload.py \
@@ -84,7 +85,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \
TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
- xdpxceiver
+ xdpxceiver xdp_redirect_multi
TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
@@ -312,6 +313,10 @@ SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h
+LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
+ test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c
+SKEL_BLACKLIST += $$(LSKELS)
+
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
linked_funcs.skel.h-deps := linked_funcs1.o linked_funcs2.o
linked_vars.skel.h-deps := linked_vars1.o linked_vars2.o
@@ -339,6 +344,7 @@ TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
$$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\
$$(TRUNNER_BPF_SRCS)))
+TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS))
TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
@@ -380,6 +386,14 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
+$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked1.o) $$<
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
+ $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
+ $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
+
$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o))
$(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps))
@@ -409,6 +423,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_BPF_OBJS) \
$(TRUNNER_BPF_SKELS) \
+ $(TRUNNER_BPF_LSKELS) \
$(TRUNNER_BPF_SKELS_LINKED) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
@@ -516,6 +531,6 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature \
- $(addprefix $(OUTPUT)/,*.o *.skel.h no_alu32 bpf_gcc bpf_testmod.ko)
+ $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/Makefile.docs b/tools/testing/selftests/bpf/Makefile.docs
index ccf260021e83..eb6a4fea8c79 100644
--- a/tools/testing/selftests/bpf/Makefile.docs
+++ b/tools/testing/selftests/bpf/Makefile.docs
@@ -52,7 +52,8 @@ $(OUTPUT)%.$2: $(OUTPUT)%.rst
ifndef RST2MAN_DEP
$$(error "rst2man not found, but required to generate man pages")
endif
- $$(QUIET_GEN)rst2man $$< > $$@
+ $$(QUIET_GEN)rst2man --exit-status=1 $$< > $$@.tmp
+ $$(QUIET_GEN)mv $$@.tmp $$@
docs-clean-$1:
$$(call QUIET_CLEAN, eBPF_$1-manpage)
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index 3353778c30f8..8deec1ca9150 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -202,3 +202,22 @@ generate valid BTF information for weak variables. Please make sure you use
Clang that contains the fix.
__ https://reviews.llvm.org/D100362
+
+Clang relocation changes
+========================
+
+Clang 13 patch `clang reloc patch`_ made some changes on relocations such
+that existing relocation types are broken into more types and
+each new type corresponds to only one way to resolve relocation.
+See `kernel llvm reloc`_ for more explanation and some examples.
+Using clang 13 to compile old libbpf which has static linker support,
+there will be a compilation failure::
+
+ libbpf: ELF relo #0 in section #6 has unexpected type 2 in .../bpf_tcp_nogpl.o
+
+Here, ``type 2`` refers to new relocation type ``R_BPF_64_ABS64``.
+To fix this issue, user newer libbpf.
+
+.. Links
+.. _clang reloc patch: https://reviews.llvm.org/D102712
+.. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index 332ed2f7b402..6ea15b93a2f8 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -43,6 +43,7 @@ void setup_libbpf()
{
int err;
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
libbpf_set_print(libbpf_print_fn);
err = bump_memlock_rlimit();
diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c
index a967674098ad..c7ec114eca56 100644
--- a/tools/testing/selftests/bpf/benchs/bench_rename.c
+++ b/tools/testing/selftests/bpf/benchs/bench_rename.c
@@ -65,7 +65,7 @@ static void attach_bpf(struct bpf_program *prog)
struct bpf_link *link;
link = bpf_program__attach(prog);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
index bde6c9d4cbd4..d167bffac679 100644
--- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
+++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
@@ -181,7 +181,7 @@ static void ringbuf_libbpf_setup()
}
link = bpf_program__attach(ctx->skel->progs.bench_ringbuf);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
@@ -271,7 +271,7 @@ static void ringbuf_custom_setup()
}
link = bpf_program__attach(ctx->skel->progs.bench_ringbuf);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program\n");
exit(1);
}
@@ -430,7 +430,7 @@ static void perfbuf_libbpf_setup()
}
link = bpf_program__attach(ctx->skel->progs.bench_perfbuf);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program\n");
exit(1);
}
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index 2a0b6c9885a4..f41a491a8cc0 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -60,7 +60,7 @@ static void attach_bpf(struct bpf_program *prog)
struct bpf_link *link;
link = bpf_program__attach(prog);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 12ee40284da0..2060bc122c53 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -40,7 +40,7 @@ struct ipv6_packet pkt_v6 = {
.tcp.doff = 5,
};
-static int settimeo(int fd, int timeout_ms)
+int settimeo(int fd, int timeout_ms)
{
struct timeval timeout = { .tv_sec = 3 };
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index 7205f8afdba1..5e0d51c07b63 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -33,6 +33,7 @@ struct ipv6_packet {
} __packed;
extern struct ipv6_packet pkt_v6;
+int settimeo(int fd, int timeout_ms);
int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
int connect_to_fd(int server_fd, int timeout_ms);
diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c
index 21efe7bbf10d..ba0e1efe5a45 100644
--- a/tools/testing/selftests/bpf/prog_tests/atomics.c
+++ b/tools/testing/selftests/bpf/prog_tests/atomics.c
@@ -2,19 +2,19 @@
#include <test_progs.h>
-#include "atomics.skel.h"
+#include "atomics.lskel.h"
static void test_add(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.add);
- if (CHECK(IS_ERR(link), "attach(add)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__add__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(add)"))
return;
- prog_fd = bpf_program__fd(skel->progs.add);
+ prog_fd = skel->progs.add.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@@ -33,20 +33,20 @@ static void test_add(struct atomics *skel)
ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_sub(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.sub);
- if (CHECK(IS_ERR(link), "attach(sub)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__sub__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(sub)"))
return;
- prog_fd = bpf_program__fd(skel->progs.sub);
+ prog_fd = skel->progs.sub.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run sub",
@@ -66,20 +66,20 @@ static void test_sub(struct atomics *skel)
ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_and(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.and);
- if (CHECK(IS_ERR(link), "attach(and)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__and__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(and)"))
return;
- prog_fd = bpf_program__fd(skel->progs.and);
+ prog_fd = skel->progs.and.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run and",
@@ -94,20 +94,20 @@ static void test_and(struct atomics *skel)
ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_or(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.or);
- if (CHECK(IS_ERR(link), "attach(or)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__or__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(or)"))
return;
- prog_fd = bpf_program__fd(skel->progs.or);
+ prog_fd = skel->progs.or.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run or",
@@ -123,20 +123,20 @@ static void test_or(struct atomics *skel)
ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_xor(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.xor);
- if (CHECK(IS_ERR(link), "attach(xor)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__xor__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(xor)"))
return;
- prog_fd = bpf_program__fd(skel->progs.xor);
+ prog_fd = skel->progs.xor.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run xor",
@@ -151,20 +151,20 @@ static void test_xor(struct atomics *skel)
ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_cmpxchg(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.cmpxchg);
- if (CHECK(IS_ERR(link), "attach(cmpxchg)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__cmpxchg__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)"))
return;
- prog_fd = bpf_program__fd(skel->progs.cmpxchg);
+ prog_fd = skel->progs.cmpxchg.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@@ -180,20 +180,20 @@ static void test_cmpxchg(struct atomics *skel)
ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
static void test_xchg(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
- link = bpf_program__attach(skel->progs.xchg);
- if (CHECK(IS_ERR(link), "attach(xchg)", "err: %ld\n", PTR_ERR(link)))
+ link_fd = atomics__xchg__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(xchg)"))
return;
- prog_fd = bpf_program__fd(skel->progs.xchg);
+ prog_fd = skel->progs.xchg.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@@ -207,7 +207,7 @@ static void test_xchg(struct atomics *skel)
ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
cleanup:
- bpf_link__destroy(link);
+ close(link_fd);
}
void test_atomics(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 9dc4e3dfbcf3..ec11e20d2b92 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -85,16 +85,14 @@ void test_attach_probe(void)
kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
false /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
- if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
- "err %ld\n", PTR_ERR(kprobe_link)))
+ if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
goto cleanup;
skel->links.handle_kprobe = kprobe_link;
kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe,
true /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
- if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe",
- "err %ld\n", PTR_ERR(kretprobe_link)))
+ if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
goto cleanup;
skel->links.handle_kretprobe = kretprobe_link;
@@ -103,8 +101,7 @@ void test_attach_probe(void)
0 /* self pid */,
"/proc/self/exe",
uprobe_offset);
- if (CHECK(IS_ERR(uprobe_link), "attach_uprobe",
- "err %ld\n", PTR_ERR(uprobe_link)))
+ if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
goto cleanup;
skel->links.handle_uprobe = uprobe_link;
@@ -113,8 +110,7 @@ void test_attach_probe(void)
-1 /* any pid */,
"/proc/self/exe",
uprobe_offset);
- if (CHECK(IS_ERR(uretprobe_link), "attach_uretprobe",
- "err %ld\n", PTR_ERR(uretprobe_link)))
+ if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
goto cleanup;
skel->links.handle_uretprobe = uretprobe_link;
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 2d3590cfb5e1..1f1aade56504 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -47,7 +47,7 @@ static void do_dummy_read(struct bpf_program *prog)
int iter_fd, len;
link = bpf_program__attach_iter(prog, NULL);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -201,7 +201,7 @@ static int do_btf_read(struct bpf_iter_task_btf *skel)
int ret = 0;
link = bpf_program__attach_iter(prog, NULL);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
return ret;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -396,7 +396,7 @@ static void test_file_iter(void)
return;
link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
/* unlink this path if it exists. */
@@ -502,7 +502,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
skel->bss->map2_id = map_info.id;
link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto free_map2;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -607,14 +607,12 @@ static void test_bpf_hash_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
- if (CHECK(!IS_ERR(link), "attach_iter",
- "attach_iter for hashmap2 unexpected succeeded\n"))
+ if (!ASSERT_ERR_PTR(link, "attach_iter"))
goto out;
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
- if (CHECK(!IS_ERR(link), "attach_iter",
- "attach_iter for hashmap3 unexpected succeeded\n"))
+ if (!ASSERT_ERR_PTR(link, "attach_iter"))
goto out;
/* hashmap1 should be good, update map values here */
@@ -636,7 +634,7 @@ static void test_bpf_hash_map(void)
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -727,7 +725,7 @@ static void test_bpf_percpu_hash_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -798,7 +796,7 @@ static void test_bpf_array_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -894,7 +892,7 @@ static void test_bpf_percpu_array_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -957,7 +955,7 @@ static void test_bpf_sk_storage_delete(void)
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
&opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -1075,7 +1073,7 @@ static void test_bpf_sk_storage_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -1128,7 +1126,7 @@ static void test_rdonly_buf_out_of_bound(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
- if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
+ if (!ASSERT_ERR_PTR(link, "attach_iter"))
bpf_link__destroy(link);
bpf_iter_test_kern5__destroy(skel);
@@ -1186,8 +1184,7 @@ static void test_task_vma(void)
skel->links.proc_maps = bpf_program__attach_iter(
skel->progs.proc_maps, NULL);
- if (CHECK(IS_ERR(skel->links.proc_maps), "bpf_program__attach_iter",
- "attach iterator failed\n")) {
+ if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
skel->links.proc_maps = NULL;
goto out;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index e25917f04602..efe1e979affb 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -82,7 +82,7 @@ static void *server(void *arg)
bytes, total_bytes, nr_sent, errno);
done:
- if (fd != -1)
+ if (fd >= 0)
close(fd);
if (err) {
WRITE_ONCE(stop, 1);
@@ -191,8 +191,7 @@ static void test_cubic(void)
return;
link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
- if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n",
- PTR_ERR(link))) {
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
bpf_cubic__destroy(cubic_skel);
return;
}
@@ -213,8 +212,7 @@ static void test_dctcp(void)
return;
link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
- if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n",
- PTR_ERR(link))) {
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
bpf_dctcp__destroy(dctcp_skel);
return;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 0457ae32b270..857e3f26086f 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -3811,7 +3811,7 @@ static void do_test_raw(unsigned int test_num)
always_log);
free(raw_btf);
- err = ((btf_fd == -1) != test->btf_load_err);
+ err = ((btf_fd < 0) != test->btf_load_err);
if (CHECK(err, "btf_fd:%d test->btf_load_err:%u",
btf_fd, test->btf_load_err) ||
CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
@@ -3820,7 +3820,7 @@ static void do_test_raw(unsigned int test_num)
goto done;
}
- if (err || btf_fd == -1)
+ if (err || btf_fd < 0)
goto done;
create_attr.name = test->map_name;
@@ -3834,16 +3834,16 @@ static void do_test_raw(unsigned int test_num)
map_fd = bpf_create_map_xattr(&create_attr);
- err = ((map_fd == -1) != test->map_create_err);
+ err = ((map_fd < 0) != test->map_create_err);
CHECK(err, "map_fd:%d test->map_create_err:%u",
map_fd, test->map_create_err);
done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
- if (map_fd != -1)
+ if (map_fd >= 0)
close(map_fd);
}
@@ -3941,7 +3941,7 @@ static int test_big_btf_info(unsigned int test_num)
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
always_log);
- if (CHECK(btf_fd == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -3987,7 +3987,7 @@ done:
free(raw_btf);
free(user_btf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
return err;
@@ -4029,7 +4029,7 @@ static int test_btf_id(unsigned int test_num)
btf_fd[0] = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
always_log);
- if (CHECK(btf_fd[0] == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4043,7 +4043,7 @@ static int test_btf_id(unsigned int test_num)
}
btf_fd[1] = bpf_btf_get_fd_by_id(info[0].id);
- if (CHECK(btf_fd[1] == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd[1] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4071,7 +4071,7 @@ static int test_btf_id(unsigned int test_num)
create_attr.btf_value_type_id = 2;
map_fd = bpf_create_map_xattr(&create_attr);
- if (CHECK(map_fd == -1, "errno:%d", errno)) {
+ if (CHECK(map_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4094,7 +4094,7 @@ static int test_btf_id(unsigned int test_num)
/* Test BTF ID is removed from the kernel */
btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id);
- if (CHECK(btf_fd[0] == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4105,7 +4105,7 @@ static int test_btf_id(unsigned int test_num)
close(map_fd);
map_fd = -1;
btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id);
- if (CHECK(btf_fd[0] != -1, "BTF lingers")) {
+ if (CHECK(btf_fd[0] >= 0, "BTF lingers")) {
err = -1;
goto done;
}
@@ -4117,11 +4117,11 @@ done:
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
- if (map_fd != -1)
+ if (map_fd >= 0)
close(map_fd);
for (i = 0; i < 2; i++) {
free(user_btf[i]);
- if (btf_fd[i] != -1)
+ if (btf_fd[i] >= 0)
close(btf_fd[i]);
}
@@ -4166,7 +4166,7 @@ static void do_test_get_info(unsigned int test_num)
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
always_log);
- if (CHECK(btf_fd == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd <= 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4212,7 +4212,7 @@ done:
free(raw_btf);
free(user_btf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
}
@@ -4249,8 +4249,9 @@ static void do_test_file(unsigned int test_num)
return;
btf = btf__parse_elf(test->file, &btf_ext);
- if (IS_ERR(btf)) {
- if (PTR_ERR(btf) == -ENOENT) {
+ err = libbpf_get_error(btf);
+ if (err) {
+ if (err == -ENOENT) {
printf("%s:SKIP: No ELF %s found", __func__, BTF_ELF_SEC);
test__skip();
return;
@@ -4263,7 +4264,8 @@ static void do_test_file(unsigned int test_num)
btf_ext__free(btf_ext);
obj = bpf_object__open(test->file);
- if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj)))
+ err = libbpf_get_error(obj);
+ if (CHECK(err, "obj: %d", err))
return;
prog = bpf_program__next(NULL, obj);
@@ -4298,7 +4300,7 @@ static void do_test_file(unsigned int test_num)
info_len = sizeof(struct bpf_prog_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "invalid get info (1st) errno:%d", errno)) {
+ if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
@@ -4330,7 +4332,7 @@ static void do_test_file(unsigned int test_num)
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "invalid get info (2nd) errno:%d", errno)) {
+ if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
@@ -4886,7 +4888,7 @@ static void do_test_pprint(int test_num)
always_log);
free(raw_btf);
- if (CHECK(btf_fd == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4901,7 +4903,7 @@ static void do_test_pprint(int test_num)
create_attr.btf_value_type_id = test->value_type_id;
map_fd = bpf_create_map_xattr(&create_attr);
- if (CHECK(map_fd == -1, "errno:%d", errno)) {
+ if (CHECK(map_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4982,7 +4984,7 @@ static void do_test_pprint(int test_num)
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
- if (err == -1)
+ if (err < 0)
goto done;
}
@@ -4998,7 +5000,7 @@ static void do_test_pprint(int test_num)
cpu, cmapv);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
- if (err == -1)
+ if (err < 0)
goto done;
cmapv = cmapv + rounded_value_size;
@@ -5036,9 +5038,9 @@ done:
fprintf(stderr, "OK");
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
- if (map_fd != -1)
+ if (map_fd >= 0)
close(map_fd);
if (pin_file)
fclose(pin_file);
@@ -5950,7 +5952,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
/* get necessary lens */
info_len = sizeof(struct bpf_prog_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "invalid get info (1st) errno:%d", errno)) {
+ if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
}
@@ -5980,7 +5982,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "invalid get info (2nd) errno:%d", errno)) {
+ if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
@@ -6044,7 +6046,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
info_len = sizeof(struct bpf_prog_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "err:%d errno:%d", err, errno)) {
+ if (CHECK(err < 0, "err:%d errno:%d", err, errno)) {
err = -1;
goto done;
}
@@ -6123,7 +6125,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
* Only recheck the info.*line_info* fields.
* Other fields are not the concern of this test.
*/
- if (CHECK(err == -1 ||
+ if (CHECK(err < 0 ||
info.nr_line_info != cnt ||
(jited_cnt && !info.jited_line_info) ||
info.nr_jited_line_info != jited_cnt ||
@@ -6260,7 +6262,7 @@ static void do_test_info_raw(unsigned int test_num)
always_log);
free(raw_btf);
- if (CHECK(btf_fd == -1, "invalid btf_fd errno:%d", errno)) {
+ if (CHECK(btf_fd < 0, "invalid btf_fd errno:%d", errno)) {
err = -1;
goto done;
}
@@ -6273,7 +6275,8 @@ static void do_test_info_raw(unsigned int test_num)
patched_linfo = patch_name_tbd(test->line_info,
test->str_sec, linfo_str_off,
test->str_sec_size, &linfo_size);
- if (IS_ERR(patched_linfo)) {
+ err = libbpf_get_error(patched_linfo);
+ if (err) {
fprintf(stderr, "error in creating raw bpf_line_info");
err = -1;
goto done;
@@ -6297,7 +6300,7 @@ static void do_test_info_raw(unsigned int test_num)
}
prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
- err = ((prog_fd == -1) != test->expected_prog_load_failure);
+ err = ((prog_fd < 0) != test->expected_prog_load_failure);
if (CHECK(err, "prog_fd:%d expected_prog_load_failure:%u errno:%d",
prog_fd, test->expected_prog_load_failure, errno) ||
CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
@@ -6306,7 +6309,7 @@ static void do_test_info_raw(unsigned int test_num)
goto done;
}
- if (prog_fd == -1)
+ if (prog_fd < 0)
goto done;
err = test_get_finfo(test, prog_fd);
@@ -6323,12 +6326,12 @@ done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
- if (prog_fd != -1)
+ if (prog_fd >= 0)
close(prog_fd);
- if (!IS_ERR(patched_linfo))
+ if (!libbpf_get_error(patched_linfo))
free(patched_linfo);
}
@@ -6839,9 +6842,9 @@ static void do_test_dedup(unsigned int test_num)
return;
test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ err = libbpf_get_error(test_btf);
free(raw_btf);
- if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
- PTR_ERR(test_btf))) {
+ if (CHECK(err, "invalid test_btf errno:%d", err)) {
err = -1;
goto done;
}
@@ -6853,9 +6856,9 @@ static void do_test_dedup(unsigned int test_num)
if (!raw_btf)
return;
expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ err = libbpf_get_error(expect_btf);
free(raw_btf);
- if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
- PTR_ERR(expect_btf))) {
+ if (CHECK(err, "invalid expect_btf errno:%d", err)) {
err = -1;
goto done;
}
@@ -6966,10 +6969,8 @@ static void do_test_dedup(unsigned int test_num)
}
done:
- if (!IS_ERR(test_btf))
- btf__free(test_btf);
- if (!IS_ERR(expect_btf))
- btf__free(expect_btf);
+ btf__free(test_btf);
+ btf__free(expect_btf);
}
void test_btf(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 5e129dc2073c..1b90e684ff13 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -32,8 +32,9 @@ static int btf_dump_all_types(const struct btf *btf,
int err = 0, id;
d = btf_dump__new(btf, NULL, opts, btf_dump_printf);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ err = libbpf_get_error(d);
+ if (err)
+ return err;
for (id = 1; id <= type_cnt; id++) {
err = btf_dump__dump_type(d, id);
@@ -56,8 +57,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
snprintf(test_file, sizeof(test_file), "%s.o", t->file);
btf = btf__parse_elf(test_file, NULL);
- if (CHECK(IS_ERR(btf), "btf_parse_elf",
- "failed to load test BTF: %ld\n", PTR_ERR(btf))) {
+ if (!ASSERT_OK_PTR(btf, "btf_parse_elf")) {
err = -PTR_ERR(btf);
btf = NULL;
goto done;
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c
index f36da15b134f..022c7d89d6f4 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_write.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c
@@ -4,8 +4,6 @@
#include <bpf/btf.h>
#include "btf_helpers.h"
-static int duration = 0;
-
void test_btf_write() {
const struct btf_var_secinfo *vi;
const struct btf_type *t;
@@ -16,7 +14,7 @@ void test_btf_write() {
int id, err, str_off;
btf = btf__new_empty();
- if (CHECK(IS_ERR(btf), "new_empty", "failed: %ld\n", PTR_ERR(btf)))
+ if (!ASSERT_OK_PTR(btf, "new_empty"))
return;
str_off = btf__find_str(btf, "int");
diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
index 643dfa35419c..876be0ecb654 100644
--- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
@@ -102,8 +102,7 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd)
*/
parent_link = bpf_program__attach_cgroup(obj->progs.egress,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_link), "parent-cg-attach",
- "err %ld", PTR_ERR(parent_link)))
+ if (!ASSERT_OK_PTR(parent_link, "parent-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
@@ -126,8 +125,7 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd)
*/
child_link = bpf_program__attach_cgroup(obj->progs.egress,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_link), "child-cg-attach",
- "err %ld", PTR_ERR(child_link)))
+ if (!ASSERT_OK_PTR(child_link, "child-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
@@ -147,10 +145,8 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd)
goto close_bpf_object;
close_bpf_object:
- if (!IS_ERR(parent_link))
- bpf_link__destroy(parent_link);
- if (!IS_ERR(child_link))
- bpf_link__destroy(child_link);
+ bpf_link__destroy(parent_link);
+ bpf_link__destroy(child_link);
cg_storage_multi_egress_only__destroy(obj);
}
@@ -176,18 +172,15 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
*/
parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_egress1_link), "parent-egress1-cg-attach",
- "err %ld", PTR_ERR(parent_egress1_link)))
+ if (!ASSERT_OK_PTR(parent_egress1_link, "parent-egress1-cg-attach"))
goto close_bpf_object;
parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_egress2_link), "parent-egress2-cg-attach",
- "err %ld", PTR_ERR(parent_egress2_link)))
+ if (!ASSERT_OK_PTR(parent_egress2_link, "parent-egress2-cg-attach"))
goto close_bpf_object;
parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_ingress_link), "parent-ingress-cg-attach",
- "err %ld", PTR_ERR(parent_ingress_link)))
+ if (!ASSERT_OK_PTR(parent_ingress_link, "parent-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
@@ -221,18 +214,15 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_egress1_link), "child-egress1-cg-attach",
- "err %ld", PTR_ERR(child_egress1_link)))
+ if (!ASSERT_OK_PTR(child_egress1_link, "child-egress1-cg-attach"))
goto close_bpf_object;
child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_egress2_link), "child-egress2-cg-attach",
- "err %ld", PTR_ERR(child_egress2_link)))
+ if (!ASSERT_OK_PTR(child_egress2_link, "child-egress2-cg-attach"))
goto close_bpf_object;
child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_ingress_link), "child-ingress-cg-attach",
- "err %ld", PTR_ERR(child_ingress_link)))
+ if (!ASSERT_OK_PTR(child_ingress_link, "child-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
@@ -264,18 +254,12 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
goto close_bpf_object;
close_bpf_object:
- if (!IS_ERR(parent_egress1_link))
- bpf_link__destroy(parent_egress1_link);
- if (!IS_ERR(parent_egress2_link))
- bpf_link__destroy(parent_egress2_link);
- if (!IS_ERR(parent_ingress_link))
- bpf_link__destroy(parent_ingress_link);
- if (!IS_ERR(child_egress1_link))
- bpf_link__destroy(child_egress1_link);
- if (!IS_ERR(child_egress2_link))
- bpf_link__destroy(child_egress2_link);
- if (!IS_ERR(child_ingress_link))
- bpf_link__destroy(child_ingress_link);
+ bpf_link__destroy(parent_egress1_link);
+ bpf_link__destroy(parent_egress2_link);
+ bpf_link__destroy(parent_ingress_link);
+ bpf_link__destroy(child_egress1_link);
+ bpf_link__destroy(child_egress2_link);
+ bpf_link__destroy(child_ingress_link);
cg_storage_multi_isolated__destroy(obj);
}
@@ -301,18 +285,15 @@ static void test_shared(int parent_cgroup_fd, int child_cgroup_fd)
*/
parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_egress1_link), "parent-egress1-cg-attach",
- "err %ld", PTR_ERR(parent_egress1_link)))
+ if (!ASSERT_OK_PTR(parent_egress1_link, "parent-egress1-cg-attach"))
goto close_bpf_object;
parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_egress2_link), "parent-egress2-cg-attach",
- "err %ld", PTR_ERR(parent_egress2_link)))
+ if (!ASSERT_OK_PTR(parent_egress2_link, "parent-egress2-cg-attach"))
goto close_bpf_object;
parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_ingress_link), "parent-ingress-cg-attach",
- "err %ld", PTR_ERR(parent_ingress_link)))
+ if (!ASSERT_OK_PTR(parent_ingress_link, "parent-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
@@ -338,18 +319,15 @@ static void test_shared(int parent_cgroup_fd, int child_cgroup_fd)
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_egress1_link), "child-egress1-cg-attach",
- "err %ld", PTR_ERR(child_egress1_link)))
+ if (!ASSERT_OK_PTR(child_egress1_link, "child-egress1-cg-attach"))
goto close_bpf_object;
child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_egress2_link), "child-egress2-cg-attach",
- "err %ld", PTR_ERR(child_egress2_link)))
+ if (!ASSERT_OK_PTR(child_egress2_link, "child-egress2-cg-attach"))
goto close_bpf_object;
child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_ingress_link), "child-ingress-cg-attach",
- "err %ld", PTR_ERR(child_ingress_link)))
+ if (!ASSERT_OK_PTR(child_ingress_link, "child-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
@@ -375,18 +353,12 @@ static void test_shared(int parent_cgroup_fd, int child_cgroup_fd)
goto close_bpf_object;
close_bpf_object:
- if (!IS_ERR(parent_egress1_link))
- bpf_link__destroy(parent_egress1_link);
- if (!IS_ERR(parent_egress2_link))
- bpf_link__destroy(parent_egress2_link);
- if (!IS_ERR(parent_ingress_link))
- bpf_link__destroy(parent_ingress_link);
- if (!IS_ERR(child_egress1_link))
- bpf_link__destroy(child_egress1_link);
- if (!IS_ERR(child_egress2_link))
- bpf_link__destroy(child_egress2_link);
- if (!IS_ERR(child_ingress_link))
- bpf_link__destroy(child_ingress_link);
+ bpf_link__destroy(parent_egress1_link);
+ bpf_link__destroy(parent_egress2_link);
+ bpf_link__destroy(parent_ingress_link);
+ bpf_link__destroy(child_egress1_link);
+ bpf_link__destroy(child_egress2_link);
+ bpf_link__destroy(child_ingress_link);
cg_storage_multi_shared__destroy(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
index 0a1fc9816cef..20bb8831dda6 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -167,7 +167,7 @@ void test_cgroup_attach_multi(void)
prog_cnt = 2;
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
- prog_ids, &prog_cnt) != -1);
+ prog_ids, &prog_cnt) >= 0);
CHECK_FAIL(errno != ENOSPC);
CHECK_FAIL(prog_cnt != 4);
/* check that prog_ids are returned even when buffer is too small */
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
index 736796e56ed1..9091524131d6 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
@@ -65,8 +65,7 @@ void test_cgroup_link(void)
for (i = 0; i < cg_nr; i++) {
links[i] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[i].fd);
- if (CHECK(IS_ERR(links[i]), "cg_attach", "i: %d, err: %ld\n",
- i, PTR_ERR(links[i])))
+ if (!ASSERT_OK_PTR(links[i], "cg_attach"))
goto cleanup;
}
@@ -121,8 +120,7 @@ void test_cgroup_link(void)
links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
- if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
- PTR_ERR(links[last_cg])))
+ if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
goto cleanup;
ping_and_check(cg_nr + 1, 0);
@@ -147,7 +145,7 @@ void test_cgroup_link(void)
/* attempt to mix in with multi-attach bpf_link */
tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
- if (CHECK(!IS_ERR(tmp_link), "cg_attach_fail", "unexpected success!\n")) {
+ if (!ASSERT_ERR_PTR(tmp_link, "cg_attach_fail")) {
bpf_link__destroy(tmp_link);
goto cleanup;
}
@@ -165,8 +163,7 @@ void test_cgroup_link(void)
/* attach back link-based one */
links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
- if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
- PTR_ERR(links[last_cg])))
+ if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
goto cleanup;
ping_and_check(cg_nr, 0);
@@ -249,8 +246,7 @@ cleanup:
BPF_CGROUP_INET_EGRESS);
for (i = 0; i < cg_nr; i++) {
- if (!IS_ERR(links[i]))
- bpf_link__destroy(links[i]);
+ bpf_link__destroy(links[i]);
}
test_cgroup_link__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c
index 464edc1c1708..b9dc4ec655b5 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c
@@ -60,7 +60,7 @@ static void run_cgroup_bpf_test(const char *cg_path, int out_sk)
goto cleanup;
link = bpf_program__attach_cgroup(skel->progs.ingress_lookup, cgfd);
- if (CHECK(IS_ERR(link), "cgroup_attach", "err: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "cgroup_attach"))
goto cleanup;
run_lookup_test(&skel->bss->g_serv_port, out_sk);
diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
index b62a39315336..012068f33a0a 100644
--- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c
+++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
@@ -53,7 +53,7 @@ static void test_check_mtu_xdp_attach(void)
prog = skel->progs.xdp_use_helper_basic;
link = bpf_program__attach_xdp(prog, IFINDEX_LO);
- if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_attach"))
goto out;
skel->links.xdp_use_helper_basic = link;
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 607710826dca..d02e064c535f 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -369,8 +369,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test)
const char *name;
int i;
- if (CHECK(IS_ERR(local_btf), "local_btf", "failed: %ld\n", PTR_ERR(local_btf)) ||
- CHECK(IS_ERR(targ_btf), "targ_btf", "failed: %ld\n", PTR_ERR(targ_btf))) {
+ if (!ASSERT_OK_PTR(local_btf, "local_btf") || !ASSERT_OK_PTR(targ_btf, "targ_btf")) {
btf__free(local_btf);
btf__free(targ_btf);
return -EINVAL;
@@ -848,8 +847,7 @@ void test_core_reloc(void)
}
obj = bpf_object__open_file(test_case->bpf_obj_file, NULL);
- if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
- test_case->bpf_obj_file, PTR_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open"))
continue;
probe_name = "raw_tracepoint/sys_enter";
@@ -899,8 +897,7 @@ void test_core_reloc(void)
data->my_pid_tgid = my_pid_tgid;
link = bpf_program__attach_raw_tracepoint(prog, tp_name);
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto cleanup;
/* trigger test run */
@@ -941,10 +938,8 @@ cleanup:
CHECK_FAIL(munmap(mmap_data, mmap_sz));
mmap_data = NULL;
}
- if (!IS_ERR_OR_NULL(link)) {
- bpf_link__destroy(link);
- link = NULL;
- }
+ bpf_link__destroy(link);
+ link = NULL;
bpf_object__close(obj);
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
index 109d0345a2be..91154c2ba256 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fentry_test.skel.h"
-#include "fexit_test.skel.h"
+#include "fentry_test.lskel.h"
+#include "fexit_test.lskel.h"
void test_fentry_fexit(void)
{
@@ -26,7 +26,7 @@ void test_fentry_fexit(void)
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto close_prog;
- prog_fd = bpf_program__fd(fexit_skel->progs.test1);
+ prog_fd = fexit_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
index 7cb111b11995..174c89e7456e 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fentry_test.skel.h"
+#include "fentry_test.lskel.h"
static int fentry_test(struct fentry_test *fentry_skel)
{
int err, prog_fd, i;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
__u64 *result;
err = fentry_test__attach(fentry_skel);
@@ -15,11 +15,11 @@ static int fentry_test(struct fentry_test *fentry_skel)
return err;
/* Check that already linked program can't be attached again. */
- link = bpf_program__attach(fentry_skel->progs.test1);
- if (!ASSERT_ERR_PTR(link, "fentry_attach_link"))
+ link_fd = fentry_test__test1__attach(fentry_skel);
+ if (!ASSERT_LT(link_fd, 0, "fentry_attach_link"))
return -1;
- prog_fd = bpf_program__fd(fentry_skel->progs.test1);
+ prog_fd = fentry_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
ASSERT_OK(err, "test_run");
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index 63990842d20f..73b4c76e6b86 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -146,10 +146,8 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
close_prog:
for (i = 0; i < prog_cnt; i++)
- if (!IS_ERR_OR_NULL(link[i]))
- bpf_link__destroy(link[i]);
- if (!IS_ERR_OR_NULL(obj))
- bpf_object__close(obj);
+ bpf_link__destroy(link[i]);
+ bpf_object__close(obj);
bpf_object__close(tgt_obj);
free(link);
free(prog);
@@ -231,7 +229,7 @@ static int test_second_attach(struct bpf_object *obj)
return err;
link = bpf_program__attach_freplace(prog, tgt_fd, tgt_name);
- if (CHECK(IS_ERR(link), "second_link", "failed to attach second link prog_fd %d tgt_fd %d\n", bpf_program__fd(prog), tgt_fd))
+ if (!ASSERT_OK_PTR(link, "second_link"))
goto out;
err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6),
@@ -283,9 +281,7 @@ static void test_fmod_ret_freplace(void)
opts.attach_prog_fd = pkt_fd;
freplace_obj = bpf_object__open_file(freplace_name, &opts);
- if (CHECK(IS_ERR_OR_NULL(freplace_obj), "freplace_obj_open",
- "failed to open %s: %ld\n", freplace_name,
- PTR_ERR(freplace_obj)))
+ if (!ASSERT_OK_PTR(freplace_obj, "freplace_obj_open"))
goto out;
err = bpf_object__load(freplace_obj);
@@ -294,14 +290,12 @@ static void test_fmod_ret_freplace(void)
prog = bpf_program__next(NULL, freplace_obj);
freplace_link = bpf_program__attach_trace(prog);
- if (CHECK(IS_ERR(freplace_link), "freplace_attach_trace", "failed to link\n"))
+ if (!ASSERT_OK_PTR(freplace_link, "freplace_attach_trace"))
goto out;
opts.attach_prog_fd = bpf_program__fd(prog);
fmod_obj = bpf_object__open_file(fmod_ret_name, &opts);
- if (CHECK(IS_ERR_OR_NULL(fmod_obj), "fmod_obj_open",
- "failed to open %s: %ld\n", fmod_ret_name,
- PTR_ERR(fmod_obj)))
+ if (!ASSERT_OK_PTR(fmod_obj, "fmod_obj_open"))
goto out;
err = bpf_object__load(fmod_obj);
@@ -350,9 +344,7 @@ static void test_obj_load_failure_common(const char *obj_file,
);
obj = bpf_object__open_file(obj_file, &opts);
- if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
- "failed to open %s: %ld\n", obj_file,
- PTR_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open"))
goto close_prog;
/* It should fail to load the program */
@@ -361,8 +353,7 @@ static void test_obj_load_failure_common(const char *obj_file,
goto close_prog;
close_prog:
- if (!IS_ERR_OR_NULL(obj))
- bpf_object__close(obj);
+ bpf_object__close(obj);
bpf_object__close(pkt_obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
index ccc7e8a34ab6..4e7f4b42ea29 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
@@ -6,7 +6,7 @@
#include <time.h>
#include <sys/mman.h>
#include <sys/syscall.h>
-#include "fexit_sleep.skel.h"
+#include "fexit_sleep.lskel.h"
static int do_sleep(void *skel)
{
@@ -58,8 +58,8 @@ void test_fexit_sleep(void)
* waiting for percpu_ref_kill to confirm). The other one
* will be freed quickly.
*/
- close(bpf_program__fd(fexit_skel->progs.nanosleep_fentry));
- close(bpf_program__fd(fexit_skel->progs.nanosleep_fexit));
+ close(fexit_skel->progs.nanosleep_fentry.prog_fd);
+ close(fexit_skel->progs.nanosleep_fexit.prog_fd);
fexit_sleep__detach(fexit_skel);
/* kill the thread to unwind sys_nanosleep stack through the trampoline */
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
index 6792e41f7f69..af3dba726701 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fexit_test.skel.h"
+#include "fexit_test.lskel.h"
static int fexit_test(struct fexit_test *fexit_skel)
{
int err, prog_fd, i;
__u32 duration = 0, retval;
- struct bpf_link *link;
+ int link_fd;
__u64 *result;
err = fexit_test__attach(fexit_skel);
@@ -15,11 +15,11 @@ static int fexit_test(struct fexit_test *fexit_skel)
return err;
/* Check that already linked program can't be attached again. */
- link = bpf_program__attach(fexit_skel->progs.test1);
- if (!ASSERT_ERR_PTR(link, "fexit_attach_link"))
+ link_fd = fexit_test__test1__attach(fexit_skel);
+ if (!ASSERT_LT(link_fd, 0, "fexit_attach_link"))
return -1;
- prog_fd = bpf_program__fd(fexit_skel->progs.test1);
+ prog_fd = fexit_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
ASSERT_OK(err, "test_run");
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index cd6dc80edf18..225714f71ac6 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -541,7 +541,7 @@ static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
return;
link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
- if (CHECK(IS_ERR(link), "attach_netns", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_netns"))
goto out_close;
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
index 172c586b6996..3931ede5c534 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
@@ -134,9 +134,9 @@ static void test_link_create_link_create(int netns, int prog1, int prog2)
/* Expect failure creating link when another link exists */
errno = 0;
link2 = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts);
- if (CHECK_FAIL(link2 != -1 || errno != E2BIG))
+ if (CHECK_FAIL(link2 >= 0 || errno != E2BIG))
perror("bpf_prog_attach(prog2) expected E2BIG");
- if (link2 != -1)
+ if (link2 >= 0)
close(link2);
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -159,9 +159,9 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
/* Expect failure creating link when prog attached */
errno = 0;
link = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts);
- if (CHECK_FAIL(link != -1 || errno != EEXIST))
+ if (CHECK_FAIL(link >= 0 || errno != EEXIST))
perror("bpf_link_create(prog2) expected EEXIST");
- if (link != -1)
+ if (link >= 0)
close(link);
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -623,7 +623,7 @@ static void run_tests(int netns)
}
out_close:
for (i = 0; i < ARRAY_SIZE(progs); i++) {
- if (progs[i] != -1)
+ if (progs[i] >= 0)
CHECK_FAIL(close(progs[i]));
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
index 925722217edf..522237aa4470 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -121,12 +121,12 @@ void test_get_stack_raw_tp(void)
goto close_prog;
link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
pb_opts.sample_cb = get_stack_print_output;
pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts);
- if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;
/* trigger some syscall action */
@@ -141,9 +141,7 @@ void test_get_stack_raw_tp(void)
}
close_prog:
- if (!IS_ERR_OR_NULL(link))
- bpf_link__destroy(link);
- if (!IS_ERR_OR_NULL(pb))
- perf_buffer__free(pb);
+ bpf_link__destroy(link);
+ perf_buffer__free(pb);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
index d884b2ed5bc5..8d5a6023a1bb 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
@@ -48,8 +48,7 @@ void test_get_stackid_cannot_attach(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_no_callchain",
- "should have failed\n");
+ ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_no_callchain");
close(pmu_fd);
/* add PERF_SAMPLE_CALLCHAIN, attach should succeed */
@@ -65,8 +64,7 @@ void test_get_stackid_cannot_attach(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event_callchain",
- "err: %ld\n", PTR_ERR(skel->links.oncpu));
+ ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain");
close(pmu_fd);
/* add exclude_callchain_kernel, attach should fail */
@@ -82,8 +80,7 @@ void test_get_stackid_cannot_attach(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_exclude_callchain_kernel",
- "should have failed\n");
+ ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_exclude_callchain_kernel");
close(pmu_fd);
cleanup:
diff --git a/tools/testing/selftests/bpf/prog_tests/hashmap.c b/tools/testing/selftests/bpf/prog_tests/hashmap.c
index 428d488830c6..4747ab18f97f 100644
--- a/tools/testing/selftests/bpf/prog_tests/hashmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/hashmap.c
@@ -48,8 +48,7 @@ static void test_hashmap_generic(void)
struct hashmap *map;
map = hashmap__new(hash_fn, equal_fn, NULL);
- if (CHECK(IS_ERR(map), "hashmap__new",
- "failed to create map: %ld\n", PTR_ERR(map)))
+ if (!ASSERT_OK_PTR(map, "hashmap__new"))
return;
for (i = 0; i < ELEM_CNT; i++) {
@@ -267,8 +266,7 @@ static void test_hashmap_multimap(void)
/* force collisions */
map = hashmap__new(collision_hash_fn, equal_fn, NULL);
- if (CHECK(IS_ERR(map), "hashmap__new",
- "failed to create map: %ld\n", PTR_ERR(map)))
+ if (!ASSERT_OK_PTR(map, "hashmap__new"))
return;
/* set up multimap:
@@ -339,8 +337,7 @@ static void test_hashmap_empty()
/* force collisions */
map = hashmap__new(hash_fn, equal_fn, NULL);
- if (CHECK(IS_ERR(map), "hashmap__new",
- "failed to create map: %ld\n", PTR_ERR(map)))
+ if (!ASSERT_OK_PTR(map, "hashmap__new"))
goto cleanup;
if (CHECK(hashmap__size(map) != 0, "hashmap__size",
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
index d65107919998..ddfb6bf97152 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -97,15 +97,13 @@ void test_kfree_skb(void)
goto close_prog;
link = bpf_program__attach_raw_tracepoint(prog, NULL);
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
link_fentry = bpf_program__attach_trace(fentry);
- if (CHECK(IS_ERR(link_fentry), "attach fentry", "err %ld\n",
- PTR_ERR(link_fentry)))
+ if (!ASSERT_OK_PTR(link_fentry, "attach fentry"))
goto close_prog;
link_fexit = bpf_program__attach_trace(fexit);
- if (CHECK(IS_ERR(link_fexit), "attach fexit", "err %ld\n",
- PTR_ERR(link_fexit)))
+ if (!ASSERT_OK_PTR(link_fexit, "attach fexit"))
goto close_prog;
perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map");
@@ -116,7 +114,7 @@ void test_kfree_skb(void)
pb_opts.sample_cb = on_sample;
pb_opts.ctx = &passed;
pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
- if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;
memcpy(skb.cb, &cb, sizeof(cb));
@@ -144,12 +142,9 @@ void test_kfree_skb(void)
CHECK_FAIL(!test_ok[0] || !test_ok[1]);
close_prog:
perf_buffer__free(pb);
- if (!IS_ERR_OR_NULL(link))
- bpf_link__destroy(link);
- if (!IS_ERR_OR_NULL(link_fentry))
- bpf_link__destroy(link_fentry);
- if (!IS_ERR_OR_NULL(link_fexit))
- bpf_link__destroy(link_fexit);
+ bpf_link__destroy(link);
+ bpf_link__destroy(link_fentry);
+ bpf_link__destroy(link_fexit);
bpf_object__close(obj);
bpf_object__close(obj2);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index 7fc0951ee75f..30a7b9b837bf 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
-#include "kfunc_call_test.skel.h"
+#include "kfunc_call_test.lskel.h"
#include "kfunc_call_test_subprog.skel.h"
static void test_main(void)
@@ -14,13 +14,13 @@ static void test_main(void)
if (!ASSERT_OK_PTR(skel, "skel"))
return;
- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
+ prog_fd = skel->progs.kfunc_call_test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, (__u32 *)&retval, NULL);
ASSERT_OK(err, "bpf_prog_test_run(test1)");
ASSERT_EQ(retval, 12, "test1-retval");
- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test2);
+ prog_fd = skel->progs.kfunc_call_test2.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, (__u32 *)&retval, NULL);
ASSERT_OK(err, "bpf_prog_test_run(test2)");
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
index b58b775d19f3..67bebd324147 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
@@ -87,8 +87,7 @@ void test_ksyms_btf(void)
struct btf *btf;
btf = libbpf_find_kernel_btf();
- if (CHECK(IS_ERR(btf), "btf_exists", "failed to load kernel BTF: %ld\n",
- PTR_ERR(btf)))
+ if (!ASSERT_OK_PTR(btf, "btf_exists"))
return;
percpu_datasec = btf__find_by_name_kind(btf, ".data..percpu",
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
index 4c232b456479..2cd5cded543f 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
@@ -4,7 +4,7 @@
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
-#include "test_ksyms_module.skel.h"
+#include "test_ksyms_module.lskel.h"
static int duration;
diff --git a/tools/testing/selftests/bpf/prog_tests/link_pinning.c b/tools/testing/selftests/bpf/prog_tests/link_pinning.c
index a743288cf384..6fc97c45f71e 100644
--- a/tools/testing/selftests/bpf/prog_tests/link_pinning.c
+++ b/tools/testing/selftests/bpf/prog_tests/link_pinning.c
@@ -17,7 +17,7 @@ void test_link_pinning_subtest(struct bpf_program *prog,
int err, i;
link = bpf_program__attach(prog);
- if (CHECK(IS_ERR(link), "link_attach", "err: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
bss->in = 1;
@@ -51,7 +51,7 @@ void test_link_pinning_subtest(struct bpf_program *prog,
/* re-open link from BPFFS */
link = bpf_link__open(link_pin_path);
- if (CHECK(IS_ERR(link), "link_open", "err: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_open"))
goto cleanup;
CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path2",
@@ -84,8 +84,7 @@ void test_link_pinning_subtest(struct bpf_program *prog,
CHECK(i == 10000, "link_attached", "got to iteration #%d\n", i);
cleanup:
- if (!IS_ERR(link))
- bpf_link__destroy(link);
+ bpf_link__destroy(link);
}
void test_link_pinning(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c
new file mode 100644
index 000000000000..beebfa9730e1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <test_progs.h>
+#include "test_lookup_and_delete.skel.h"
+
+#define START_VALUE 1234
+#define NEW_VALUE 4321
+#define MAX_ENTRIES 2
+
+static int duration;
+static int nr_cpus;
+
+static int fill_values(int map_fd)
+{
+ __u64 key, value = START_VALUE;
+ int err;
+
+ for (key = 1; key < MAX_ENTRIES + 1; key++) {
+ err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int fill_values_percpu(int map_fd)
+{
+ __u64 key, value[nr_cpus];
+ int i, err;
+
+ for (i = 0; i < nr_cpus; i++)
+ value[i] = START_VALUE;
+
+ for (key = 1; key < MAX_ENTRIES + 1; key++) {
+ err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct test_lookup_and_delete *setup_prog(enum bpf_map_type map_type,
+ int *map_fd)
+{
+ struct test_lookup_and_delete *skel;
+ int err;
+
+ skel = test_lookup_and_delete__open();
+ if (!ASSERT_OK_PTR(skel, "test_lookup_and_delete__open"))
+ return NULL;
+
+ err = bpf_map__set_type(skel->maps.hash_map, map_type);
+ if (!ASSERT_OK(err, "bpf_map__set_type"))
+ goto cleanup;
+
+ err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES);
+ if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
+ goto cleanup;
+
+ err = test_lookup_and_delete__load(skel);
+ if (!ASSERT_OK(err, "test_lookup_and_delete__load"))
+ goto cleanup;
+
+ *map_fd = bpf_map__fd(skel->maps.hash_map);
+ if (!ASSERT_GE(*map_fd, 0, "bpf_map__fd"))
+ goto cleanup;
+
+ return skel;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+ return NULL;
+}
+
+/* Triggers BPF program that updates map with given key and value */
+static int trigger_tp(struct test_lookup_and_delete *skel, __u64 key,
+ __u64 value)
+{
+ int err;
+
+ skel->bss->set_pid = getpid();
+ skel->bss->set_key = key;
+ skel->bss->set_value = value;
+
+ err = test_lookup_and_delete__attach(skel);
+ if (!ASSERT_OK(err, "test_lookup_and_delete__attach"))
+ return -1;
+
+ syscall(__NR_getpgid);
+
+ test_lookup_and_delete__detach(skel);
+
+ return 0;
+}
+
+static void test_lookup_and_delete_hash(void)
+{
+ struct test_lookup_and_delete *skel;
+ __u64 key, value;
+ int map_fd, err;
+
+ /* Setup program and fill the map. */
+ skel = setup_prog(BPF_MAP_TYPE_HASH, &map_fd);
+ if (!ASSERT_OK_PTR(skel, "setup_prog"))
+ return;
+
+ err = fill_values(map_fd);
+ if (!ASSERT_OK(err, "fill_values"))
+ goto cleanup;
+
+ /* Lookup and delete element. */
+ key = 1;
+ err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
+ goto cleanup;
+
+ /* Fetched value should match the initially set value. */
+ if (CHECK(value != START_VALUE, "bpf_map_lookup_and_delete_elem",
+ "unexpected value=%lld\n", value))
+ goto cleanup;
+
+ /* Check that the entry is non existent. */
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+}
+
+static void test_lookup_and_delete_percpu_hash(void)
+{
+ struct test_lookup_and_delete *skel;
+ __u64 key, val, value[nr_cpus];
+ int map_fd, err, i;
+
+ /* Setup program and fill the map. */
+ skel = setup_prog(BPF_MAP_TYPE_PERCPU_HASH, &map_fd);
+ if (!ASSERT_OK_PTR(skel, "setup_prog"))
+ return;
+
+ err = fill_values_percpu(map_fd);
+ if (!ASSERT_OK(err, "fill_values_percpu"))
+ goto cleanup;
+
+ /* Lookup and delete element. */
+ key = 1;
+ err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
+ goto cleanup;
+
+ for (i = 0; i < nr_cpus; i++) {
+ val = value[i];
+
+ /* Fetched value should match the initially set value. */
+ if (CHECK(val != START_VALUE, "map value",
+ "unexpected for cpu %d: %lld\n", i, val))
+ goto cleanup;
+ }
+
+ /* Check that the entry is non existent. */
+ err = bpf_map_lookup_elem(map_fd, &key, value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+}
+
+static void test_lookup_and_delete_lru_hash(void)
+{
+ struct test_lookup_and_delete *skel;
+ __u64 key, value;
+ int map_fd, err;
+
+ /* Setup program and fill the LRU map. */
+ skel = setup_prog(BPF_MAP_TYPE_LRU_HASH, &map_fd);
+ if (!ASSERT_OK_PTR(skel, "setup_prog"))
+ return;
+
+ err = fill_values(map_fd);
+ if (!ASSERT_OK(err, "fill_values"))
+ goto cleanup;
+
+ /* Insert new element at key=3, should reuse LRU element. */
+ key = 3;
+ err = trigger_tp(skel, key, NEW_VALUE);
+ if (!ASSERT_OK(err, "trigger_tp"))
+ goto cleanup;
+
+ /* Lookup and delete element 3. */
+ err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
+ goto cleanup;
+
+ /* Value should match the new value. */
+ if (CHECK(value != NEW_VALUE, "bpf_map_lookup_and_delete_elem",
+ "unexpected value=%lld\n", value))
+ goto cleanup;
+
+ /* Check that entries 3 and 1 are non existent. */
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ key = 1;
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+}
+
+static void test_lookup_and_delete_lru_percpu_hash(void)
+{
+ struct test_lookup_and_delete *skel;
+ __u64 key, val, value[nr_cpus];
+ int map_fd, err, i, cpucnt = 0;
+
+ /* Setup program and fill the LRU map. */
+ skel = setup_prog(BPF_MAP_TYPE_LRU_PERCPU_HASH, &map_fd);
+ if (!ASSERT_OK_PTR(skel, "setup_prog"))
+ return;
+
+ err = fill_values_percpu(map_fd);
+ if (!ASSERT_OK(err, "fill_values_percpu"))
+ goto cleanup;
+
+ /* Insert new element at key=3, should reuse LRU element 1. */
+ key = 3;
+ err = trigger_tp(skel, key, NEW_VALUE);
+ if (!ASSERT_OK(err, "trigger_tp"))
+ goto cleanup;
+
+ /* Clean value. */
+ for (i = 0; i < nr_cpus; i++)
+ value[i] = 0;
+
+ /* Lookup and delete element 3. */
+ err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) {
+ goto cleanup;
+ }
+
+ /* Check if only one CPU has set the value. */
+ for (i = 0; i < nr_cpus; i++) {
+ val = value[i];
+ if (val) {
+ if (CHECK(val != NEW_VALUE, "map value",
+ "unexpected for cpu %d: %lld\n", i, val))
+ goto cleanup;
+ cpucnt++;
+ }
+ }
+ if (CHECK(cpucnt != 1, "map value", "set for %d CPUs instead of 1!\n",
+ cpucnt))
+ goto cleanup;
+
+ /* Check that entries 3 and 1 are non existent. */
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ key = 1;
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+}
+
+void test_lookup_and_delete(void)
+{
+ nr_cpus = bpf_num_possible_cpus();
+
+ if (test__start_subtest("lookup_and_delete"))
+ test_lookup_and_delete_hash();
+ if (test__start_subtest("lookup_and_delete_percpu"))
+ test_lookup_and_delete_percpu_hash();
+ if (test__start_subtest("lookup_and_delete_lru"))
+ test_lookup_and_delete_lru_hash();
+ if (test__start_subtest("lookup_and_delete_lru_percpu"))
+ test_lookup_and_delete_lru_percpu_hash();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
new file mode 100644
index 000000000000..59adb4715394
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check if we can migrate child sockets.
+ *
+ * 1. call listen() for 4 server sockets.
+ * 2. call connect() for 25 client sockets.
+ * 3. call listen() for 1 server socket. (migration target)
+ * 4. update a map to migrate all child sockets
+ * to the last server socket (migrate_map[cookie] = 4)
+ * 5. call shutdown() for first 4 server sockets
+ * and migrate the requests in the accept queue
+ * to the last server socket.
+ * 6. call listen() for the second server socket.
+ * 7. call shutdown() for the last server
+ * and migrate the requests in the accept queue
+ * to the second server socket.
+ * 8. call listen() for the last server.
+ * 9. call shutdown() for the second server
+ * and migrate the requests in the accept queue
+ * to the last server socket.
+ * 10. call accept() for the last server socket.
+ *
+ * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+ */
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "test_progs.h"
+#include "test_migrate_reuseport.skel.h"
+#include "network_helpers.h"
+
+#ifndef TCP_FASTOPEN_CONNECT
+#define TCP_FASTOPEN_CONNECT 30
+#endif
+
+#define IFINDEX_LO 1
+
+#define NR_SERVERS 5
+#define NR_CLIENTS (NR_SERVERS * 5)
+#define MIGRATED_TO (NR_SERVERS - 1)
+
+/* fastopenq->max_qlen and sk->sk_max_ack_backlog */
+#define QLEN (NR_CLIENTS * 5)
+
+#define MSG "Hello World\0"
+#define MSGLEN 12
+
+static struct migrate_reuseport_test_case {
+ const char *name;
+ __s64 servers[NR_SERVERS];
+ __s64 clients[NR_CLIENTS];
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ int family;
+ int state;
+ bool drop_ack;
+ bool expire_synack_timer;
+ bool fastopen;
+ struct bpf_link *link;
+} test_cases[] = {
+ {
+ .name = "IPv4 TCP_ESTABLISHED inet_csk_listen_stop",
+ .family = AF_INET,
+ .state = BPF_TCP_ESTABLISHED,
+ .drop_ack = false,
+ .expire_synack_timer = false,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv4 TCP_SYN_RECV inet_csk_listen_stop",
+ .family = AF_INET,
+ .state = BPF_TCP_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = false,
+ .fastopen = true,
+ },
+ {
+ .name = "IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler",
+ .family = AF_INET,
+ .state = BPF_TCP_NEW_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = true,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv4 TCP_NEW_SYN_RECV inet_csk_complete_hashdance",
+ .family = AF_INET,
+ .state = BPF_TCP_NEW_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = false,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv6 TCP_ESTABLISHED inet_csk_listen_stop",
+ .family = AF_INET6,
+ .state = BPF_TCP_ESTABLISHED,
+ .drop_ack = false,
+ .expire_synack_timer = false,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv6 TCP_SYN_RECV inet_csk_listen_stop",
+ .family = AF_INET6,
+ .state = BPF_TCP_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = false,
+ .fastopen = true,
+ },
+ {
+ .name = "IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler",
+ .family = AF_INET6,
+ .state = BPF_TCP_NEW_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = true,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv6 TCP_NEW_SYN_RECV inet_csk_complete_hashdance",
+ .family = AF_INET6,
+ .state = BPF_TCP_NEW_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = false,
+ .fastopen = false,
+ }
+};
+
+static void init_fds(__s64 fds[], int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ fds[i] = -1;
+}
+
+static void close_fds(__s64 fds[], int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (fds[i] != -1) {
+ close(fds[i]);
+ fds[i] = -1;
+ }
+ }
+}
+
+static int setup_fastopen(char *buf, int size, int *saved_len, bool restore)
+{
+ int err = 0, fd, len;
+
+ fd = open("/proc/sys/net/ipv4/tcp_fastopen", O_RDWR);
+ if (!ASSERT_NEQ(fd, -1, "open"))
+ return -1;
+
+ if (restore) {
+ len = write(fd, buf, *saved_len);
+ if (!ASSERT_EQ(len, *saved_len, "write - restore"))
+ err = -1;
+ } else {
+ *saved_len = read(fd, buf, size);
+ if (!ASSERT_GE(*saved_len, 1, "read")) {
+ err = -1;
+ goto close;
+ }
+
+ err = lseek(fd, 0, SEEK_SET);
+ if (!ASSERT_OK(err, "lseek"))
+ goto close;
+
+ /* (TFO_CLIENT_ENABLE | TFO_SERVER_ENABLE |
+ * TFO_CLIENT_NO_COOKIE | TFO_SERVER_COOKIE_NOT_REQD)
+ */
+ len = write(fd, "519", 3);
+ if (!ASSERT_EQ(len, 3, "write - setup"))
+ err = -1;
+ }
+
+close:
+ close(fd);
+
+ return err;
+}
+
+static int drop_ack(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ if (test_case->family == AF_INET)
+ skel->bss->server_port = ((struct sockaddr_in *)
+ &test_case->addr)->sin_port;
+ else
+ skel->bss->server_port = ((struct sockaddr_in6 *)
+ &test_case->addr)->sin6_port;
+
+ test_case->link = bpf_program__attach_xdp(skel->progs.drop_ack,
+ IFINDEX_LO);
+ if (!ASSERT_OK_PTR(test_case->link, "bpf_program__attach_xdp"))
+ return -1;
+
+ return 0;
+}
+
+static int pass_ack(struct migrate_reuseport_test_case *test_case)
+{
+ int err;
+
+ err = bpf_link__detach(test_case->link);
+ if (!ASSERT_OK(err, "bpf_link__detach"))
+ return -1;
+
+ test_case->link = NULL;
+
+ return 0;
+}
+
+static int start_servers(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ int i, err, prog_fd, reuseport = 1, qlen = QLEN;
+
+ prog_fd = bpf_program__fd(skel->progs.migrate_reuseport);
+
+ make_sockaddr(test_case->family,
+ test_case->family == AF_INET ? "127.0.0.1" : "::1", 0,
+ &test_case->addr, &test_case->addrlen);
+
+ for (i = 0; i < NR_SERVERS; i++) {
+ test_case->servers[i] = socket(test_case->family, SOCK_STREAM,
+ IPPROTO_TCP);
+ if (!ASSERT_NEQ(test_case->servers[i], -1, "socket"))
+ return -1;
+
+ err = setsockopt(test_case->servers[i], SOL_SOCKET,
+ SO_REUSEPORT, &reuseport, sizeof(reuseport));
+ if (!ASSERT_OK(err, "setsockopt - SO_REUSEPORT"))
+ return -1;
+
+ err = bind(test_case->servers[i],
+ (struct sockaddr *)&test_case->addr,
+ test_case->addrlen);
+ if (!ASSERT_OK(err, "bind"))
+ return -1;
+
+ if (i == 0) {
+ err = setsockopt(test_case->servers[i], SOL_SOCKET,
+ SO_ATTACH_REUSEPORT_EBPF,
+ &prog_fd, sizeof(prog_fd));
+ if (!ASSERT_OK(err,
+ "setsockopt - SO_ATTACH_REUSEPORT_EBPF"))
+ return -1;
+
+ err = getsockname(test_case->servers[i],
+ (struct sockaddr *)&test_case->addr,
+ &test_case->addrlen);
+ if (!ASSERT_OK(err, "getsockname"))
+ return -1;
+ }
+
+ if (test_case->fastopen) {
+ err = setsockopt(test_case->servers[i],
+ SOL_TCP, TCP_FASTOPEN,
+ &qlen, sizeof(qlen));
+ if (!ASSERT_OK(err, "setsockopt - TCP_FASTOPEN"))
+ return -1;
+ }
+
+ /* All requests will be tied to the first four listeners */
+ if (i != MIGRATED_TO) {
+ err = listen(test_case->servers[i], qlen);
+ if (!ASSERT_OK(err, "listen"))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int start_clients(struct migrate_reuseport_test_case *test_case)
+{
+ char buf[MSGLEN] = MSG;
+ int i, err;
+
+ for (i = 0; i < NR_CLIENTS; i++) {
+ test_case->clients[i] = socket(test_case->family, SOCK_STREAM,
+ IPPROTO_TCP);
+ if (!ASSERT_NEQ(test_case->clients[i], -1, "socket"))
+ return -1;
+
+ /* The attached XDP program drops only the final ACK, so
+ * clients will transition to TCP_ESTABLISHED immediately.
+ */
+ err = settimeo(test_case->clients[i], 100);
+ if (!ASSERT_OK(err, "settimeo"))
+ return -1;
+
+ if (test_case->fastopen) {
+ int fastopen = 1;
+
+ err = setsockopt(test_case->clients[i], IPPROTO_TCP,
+ TCP_FASTOPEN_CONNECT, &fastopen,
+ sizeof(fastopen));
+ if (!ASSERT_OK(err,
+ "setsockopt - TCP_FASTOPEN_CONNECT"))
+ return -1;
+ }
+
+ err = connect(test_case->clients[i],
+ (struct sockaddr *)&test_case->addr,
+ test_case->addrlen);
+ if (!ASSERT_OK(err, "connect"))
+ return -1;
+
+ err = write(test_case->clients[i], buf, MSGLEN);
+ if (!ASSERT_EQ(err, MSGLEN, "write"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int update_maps(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ int i, err, migrated_to = MIGRATED_TO;
+ int reuseport_map_fd, migrate_map_fd;
+ __u64 value;
+
+ reuseport_map_fd = bpf_map__fd(skel->maps.reuseport_map);
+ migrate_map_fd = bpf_map__fd(skel->maps.migrate_map);
+
+ for (i = 0; i < NR_SERVERS; i++) {
+ value = (__u64)test_case->servers[i];
+ err = bpf_map_update_elem(reuseport_map_fd, &i, &value,
+ BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem - reuseport_map"))
+ return -1;
+
+ err = bpf_map_lookup_elem(reuseport_map_fd, &i, &value);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem - reuseport_map"))
+ return -1;
+
+ err = bpf_map_update_elem(migrate_map_fd, &value, &migrated_to,
+ BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem - migrate_map"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int migrate_dance(struct migrate_reuseport_test_case *test_case)
+{
+ int i, err;
+
+ /* Migrate TCP_ESTABLISHED and TCP_SYN_RECV requests
+ * to the last listener based on eBPF.
+ */
+ for (i = 0; i < MIGRATED_TO; i++) {
+ err = shutdown(test_case->servers[i], SHUT_RDWR);
+ if (!ASSERT_OK(err, "shutdown"))
+ return -1;
+ }
+
+ /* No dance for TCP_NEW_SYN_RECV to migrate based on eBPF */
+ if (test_case->state == BPF_TCP_NEW_SYN_RECV)
+ return 0;
+
+ /* Note that we use the second listener instead of the
+ * first one here.
+ *
+ * The fist listener is bind()ed with port 0 and,
+ * SOCK_BINDPORT_LOCK is not set to sk_userlocks, so
+ * calling listen() again will bind() the first listener
+ * on a new ephemeral port and detach it from the existing
+ * reuseport group. (See: __inet_bind(), tcp_set_state())
+ *
+ * OTOH, the second one is bind()ed with a specific port,
+ * and SOCK_BINDPORT_LOCK is set. Thus, re-listen() will
+ * resurrect the listener on the existing reuseport group.
+ */
+ err = listen(test_case->servers[1], QLEN);
+ if (!ASSERT_OK(err, "listen"))
+ return -1;
+
+ /* Migrate from the last listener to the second one.
+ *
+ * All listeners were detached out of the reuseport_map,
+ * so migration will be done by kernel random pick from here.
+ */
+ err = shutdown(test_case->servers[MIGRATED_TO], SHUT_RDWR);
+ if (!ASSERT_OK(err, "shutdown"))
+ return -1;
+
+ /* Back to the existing reuseport group */
+ err = listen(test_case->servers[MIGRATED_TO], QLEN);
+ if (!ASSERT_OK(err, "listen"))
+ return -1;
+
+ /* Migrate back to the last one from the second one */
+ err = shutdown(test_case->servers[1], SHUT_RDWR);
+ if (!ASSERT_OK(err, "shutdown"))
+ return -1;
+
+ return 0;
+}
+
+static void count_requests(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ struct sockaddr_storage addr;
+ socklen_t len = sizeof(addr);
+ int err, cnt = 0, client;
+ char buf[MSGLEN];
+
+ err = settimeo(test_case->servers[MIGRATED_TO], 4000);
+ if (!ASSERT_OK(err, "settimeo"))
+ goto out;
+
+ for (; cnt < NR_CLIENTS; cnt++) {
+ client = accept(test_case->servers[MIGRATED_TO],
+ (struct sockaddr *)&addr, &len);
+ if (!ASSERT_NEQ(client, -1, "accept"))
+ goto out;
+
+ memset(buf, 0, MSGLEN);
+ read(client, &buf, MSGLEN);
+ close(client);
+
+ if (!ASSERT_STREQ(buf, MSG, "read"))
+ goto out;
+ }
+
+out:
+ ASSERT_EQ(cnt, NR_CLIENTS, "count in userspace");
+
+ switch (test_case->state) {
+ case BPF_TCP_ESTABLISHED:
+ cnt = skel->bss->migrated_at_close;
+ break;
+ case BPF_TCP_SYN_RECV:
+ cnt = skel->bss->migrated_at_close_fastopen;
+ break;
+ case BPF_TCP_NEW_SYN_RECV:
+ if (test_case->expire_synack_timer)
+ cnt = skel->bss->migrated_at_send_synack;
+ else
+ cnt = skel->bss->migrated_at_recv_ack;
+ break;
+ default:
+ cnt = 0;
+ }
+
+ ASSERT_EQ(cnt, NR_CLIENTS, "count in BPF prog");
+}
+
+static void run_test(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ int err, saved_len;
+ char buf[16];
+
+ skel->bss->migrated_at_close = 0;
+ skel->bss->migrated_at_close_fastopen = 0;
+ skel->bss->migrated_at_send_synack = 0;
+ skel->bss->migrated_at_recv_ack = 0;
+
+ init_fds(test_case->servers, NR_SERVERS);
+ init_fds(test_case->clients, NR_CLIENTS);
+
+ if (test_case->fastopen) {
+ memset(buf, 0, sizeof(buf));
+
+ err = setup_fastopen(buf, sizeof(buf), &saved_len, false);
+ if (!ASSERT_OK(err, "setup_fastopen - setup"))
+ return;
+ }
+
+ err = start_servers(test_case, skel);
+ if (!ASSERT_OK(err, "start_servers"))
+ goto close_servers;
+
+ if (test_case->drop_ack) {
+ /* Drop the final ACK of the 3-way handshake and stick the
+ * in-flight requests on TCP_SYN_RECV or TCP_NEW_SYN_RECV.
+ */
+ err = drop_ack(test_case, skel);
+ if (!ASSERT_OK(err, "drop_ack"))
+ goto close_servers;
+ }
+
+ /* Tie requests to the first four listners */
+ err = start_clients(test_case);
+ if (!ASSERT_OK(err, "start_clients"))
+ goto close_clients;
+
+ err = listen(test_case->servers[MIGRATED_TO], QLEN);
+ if (!ASSERT_OK(err, "listen"))
+ goto close_clients;
+
+ err = update_maps(test_case, skel);
+ if (!ASSERT_OK(err, "fill_maps"))
+ goto close_clients;
+
+ /* Migrate the requests in the accept queue only.
+ * TCP_NEW_SYN_RECV requests are not migrated at this point.
+ */
+ err = migrate_dance(test_case);
+ if (!ASSERT_OK(err, "migrate_dance"))
+ goto close_clients;
+
+ if (test_case->expire_synack_timer) {
+ /* Wait for SYN+ACK timers to expire so that
+ * reqsk_timer_handler() migrates TCP_NEW_SYN_RECV requests.
+ */
+ sleep(1);
+ }
+
+ if (test_case->link) {
+ /* Resume 3WHS and migrate TCP_NEW_SYN_RECV requests */
+ err = pass_ack(test_case);
+ if (!ASSERT_OK(err, "pass_ack"))
+ goto close_clients;
+ }
+
+ count_requests(test_case, skel);
+
+close_clients:
+ close_fds(test_case->clients, NR_CLIENTS);
+
+ if (test_case->link) {
+ err = pass_ack(test_case);
+ ASSERT_OK(err, "pass_ack - clean up");
+ }
+
+close_servers:
+ close_fds(test_case->servers, NR_SERVERS);
+
+ if (test_case->fastopen) {
+ err = setup_fastopen(buf, sizeof(buf), &saved_len, true);
+ ASSERT_OK(err, "setup_fastopen - restore");
+ }
+}
+
+void test_migrate_reuseport(void)
+{
+ struct test_migrate_reuseport *skel;
+ int i;
+
+ skel = test_migrate_reuseport__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ test__start_subtest(test_cases[i].name);
+ run_test(&test_cases[i], skel);
+ }
+
+ test_migrate_reuseport__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c
index e178416bddad..6194b776a28b 100644
--- a/tools/testing/selftests/bpf/prog_tests/obj_name.c
+++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c
@@ -38,13 +38,13 @@ void test_obj_name(void)
fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
+ (!tests[i].success && fd >= 0) ||
(!tests[i].success && errno != tests[i].expected_errno),
"check-bpf-prog-name",
"fd %d(%d) errno %d(%d)\n",
fd, tests[i].success, errno, tests[i].expected_errno);
- if (fd != -1)
+ if (fd >= 0)
close(fd);
/* test different attr.map_name during BPF_MAP_CREATE */
@@ -59,13 +59,13 @@ void test_obj_name(void)
memcpy(attr.map_name, tests[i].name, ncopy);
fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
+ (!tests[i].success && fd >= 0) ||
(!tests[i].success && errno != tests[i].expected_errno),
"check-bpf-map-name",
"fd %d(%d) errno %d(%d)\n",
fd, tests[i].success, errno, tests[i].expected_errno);
- if (fd != -1)
+ if (fd >= 0)
close(fd);
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
index e35c444902a7..12c4f45cee1a 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_branches.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
@@ -74,7 +74,7 @@ static void test_perf_branches_common(int perf_fd,
/* attach perf_event */
link = bpf_program__attach_perf_event(skel->progs.perf_branches, perf_fd);
- if (CHECK(IS_ERR(link), "attach_perf_event", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_perf_event"))
goto out_destroy_skel;
/* generate some branches on cpu 0 */
@@ -119,7 +119,7 @@ static void test_perf_branches_hw(void)
* Some setups don't support branch records (virtual machines, !x86),
* so skip test in this case.
*/
- if (pfd == -1) {
+ if (pfd < 0) {
if (errno == ENOENT || errno == EOPNOTSUPP) {
printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n",
__func__);
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
index ca9f0895ec84..6490e9673002 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
@@ -80,7 +80,7 @@ void test_perf_buffer(void)
pb_opts.sample_cb = on_sample;
pb_opts.ctx = &cpu_seen;
pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
- if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto out_close;
CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
index 72c3690844fb..33144c9432ae 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
@@ -97,8 +97,7 @@ void test_perf_event_stackmap(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event",
- "err %ld\n", PTR_ERR(skel->links.oncpu))) {
+ if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
close(pmu_fd);
goto cleanup;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
index 7aecfd9e87d1..95bd12097358 100644
--- a/tools/testing/selftests/bpf/prog_tests/probe_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -15,7 +15,7 @@ void test_probe_user(void)
static const int zero = 0;
obj = bpf_object__open_file(obj_file, &opts);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
kprobe_prog = bpf_object__find_program_by_title(obj, prog_name);
@@ -33,11 +33,8 @@ void test_probe_user(void)
goto cleanup;
kprobe_link = bpf_program__attach(kprobe_prog);
- if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
- "err %ld\n", PTR_ERR(kprobe_link))) {
- kprobe_link = NULL;
+ if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
goto cleanup;
- }
memset(&curr, 0, sizeof(curr));
in->sin_family = AF_INET;
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
index 131d7f7eeb42..89fc98faf19e 100644
--- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
+++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
@@ -46,7 +46,7 @@ void test_prog_run_xattr(void)
tattr.prog_fd = bpf_program__fd(skel->progs.test_pkt_access);
err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
+ CHECK_ATTR(err >= 0 || errno != ENOSPC || tattr.retval, "run",
"err %d errno %d retval %d\n", err, errno, tattr.retval);
CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
@@ -78,6 +78,6 @@ void test_prog_run_xattr(void)
cleanup:
if (skel)
test_pkt_access__destroy(skel);
- if (stats_fd != -1)
+ if (stats_fd >= 0)
close(stats_fd);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
index c5fb191874ac..41720a62c4fa 100644
--- a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
@@ -77,7 +77,7 @@ void test_raw_tp_test_run(void)
/* invalid cpu ID should fail with ENXIO */
opts.cpu = 0xffffffff;
err = bpf_prog_test_run_opts(prog_fd, &opts);
- CHECK(err != -1 || errno != ENXIO,
+ CHECK(err >= 0 || errno != ENXIO,
"test_run_opts_fail",
"should failed with ENXIO\n");
@@ -85,7 +85,7 @@ void test_raw_tp_test_run(void)
opts.cpu = 1;
opts.flags = 0;
err = bpf_prog_test_run_opts(prog_fd, &opts);
- CHECK(err != -1 || errno != EINVAL,
+ CHECK(err >= 0 || errno != EINVAL,
"test_run_opts_fail",
"should failed with EINVAL\n");
diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
index 563e12120e77..5f9eaa3ab584 100644
--- a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
+++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
@@ -30,7 +30,7 @@ void test_rdonly_maps(void)
struct bss bss;
obj = bpf_object__open_file(file, NULL);
- if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open"))
return;
err = bpf_object__load(obj);
@@ -58,11 +58,8 @@ void test_rdonly_maps(void)
goto cleanup;
link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
- if (CHECK(IS_ERR(link), "attach_prog", "prog '%s', err %ld\n",
- t->prog_name, PTR_ERR(link))) {
- link = NULL;
+ if (!ASSERT_OK_PTR(link, "attach_prog"))
goto cleanup;
- }
/* trigger probe */
usleep(1);
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index ac1ee10cffd8..de2688166696 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -15,7 +15,7 @@ void test_reference_tracking(void)
int err = 0;
obj = bpf_object__open_file(file, &open_opts);
- if (CHECK_FAIL(IS_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
index d3c2de2c24d1..f62361306f6d 100644
--- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
+++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
@@ -76,7 +76,7 @@ __resolve_symbol(struct btf *btf, int type_id)
}
for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
- if (test_symbols[i].id != -1)
+ if (test_symbols[i].id >= 0)
continue;
if (BTF_INFO_KIND(type->info) != test_symbols[i].type)
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index de78617f6550..4706cee84360 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -12,7 +12,7 @@
#include <sys/sysinfo.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
-#include "test_ringbuf.skel.h"
+#include "test_ringbuf.lskel.h"
#define EDONE 7777
@@ -86,25 +86,70 @@ void test_ringbuf(void)
const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
pthread_t thread;
long bg_ret = -1;
- int err, cnt;
+ int err, cnt, rb_fd;
int page_size = getpagesize();
+ void *mmap_ptr, *tmp_ptr;
skel = test_ringbuf__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
- err = bpf_map__set_max_entries(skel->maps.ringbuf, page_size);
- if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
- goto cleanup;
+ skel->maps.ringbuf.max_entries = page_size;
err = test_ringbuf__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
+ rb_fd = skel->maps.ringbuf.map_fd;
+ /* good read/write cons_pos */
+ mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
+ ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
+ tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
+ if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
+ goto cleanup;
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
+
+ /* bad writeable prod_pos */
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
+ err = -errno;
+ ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
+ ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
+
+ /* bad writeable data pages */
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+ err = -errno;
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
+ ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
+ mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
+
+ /* good read-only pages */
+ mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
+ if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
+ goto cleanup;
+
+ ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
+ ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
+ ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
+ ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
+
+ /* good read-only pages with initial offset */
+ mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
+ if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
+ goto cleanup;
+
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
+ ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
+
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
- ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf),
+ ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
process_sample, NULL, NULL);
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
index cef63e703924..167cd8a2edfd 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
@@ -63,7 +63,7 @@ void test_ringbuf_multi(void)
goto cleanup;
proto_fd = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, page_size, 0);
- if (CHECK(proto_fd == -1, "bpf_create_map", "bpf_create_map failed\n"))
+ if (CHECK(proto_fd < 0, "bpf_create_map", "bpf_create_map failed\n"))
goto cleanup;
err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 821b4146b7b6..4efd337d6a3c 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -78,7 +78,7 @@ static int create_maps(enum bpf_map_type inner_type)
attr.max_entries = REUSEPORT_ARRAY_SIZE;
reuseport_array = bpf_create_map_xattr(&attr);
- RET_ERR(reuseport_array == -1, "creating reuseport_array",
+ RET_ERR(reuseport_array < 0, "creating reuseport_array",
"reuseport_array:%d errno:%d\n", reuseport_array, errno);
/* Creating outer_map */
@@ -89,7 +89,7 @@ static int create_maps(enum bpf_map_type inner_type)
attr.max_entries = 1;
attr.inner_map_fd = reuseport_array;
outer_map = bpf_create_map_xattr(&attr);
- RET_ERR(outer_map == -1, "creating outer_map",
+ RET_ERR(outer_map < 0, "creating outer_map",
"outer_map:%d errno:%d\n", outer_map, errno);
return 0;
@@ -102,8 +102,9 @@ static int prepare_bpf_obj(void)
int err;
obj = bpf_object__open("test_select_reuseport_kern.o");
- RET_ERR(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o",
- "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj));
+ err = libbpf_get_error(obj);
+ RET_ERR(err, "open test_select_reuseport_kern.o",
+ "obj:%p PTR_ERR(obj):%d\n", obj, err);
map = bpf_object__find_map_by_name(obj, "outer_map");
RET_ERR(!map, "find outer_map", "!map\n");
@@ -116,31 +117,31 @@ static int prepare_bpf_obj(void)
prog = bpf_program__next(NULL, obj);
RET_ERR(!prog, "get first bpf_program", "!prog\n");
select_by_skb_data_prog = bpf_program__fd(prog);
- RET_ERR(select_by_skb_data_prog == -1, "get prog fd",
+ RET_ERR(select_by_skb_data_prog < 0, "get prog fd",
"select_by_skb_data_prog:%d\n", select_by_skb_data_prog);
map = bpf_object__find_map_by_name(obj, "result_map");
RET_ERR(!map, "find result_map", "!map\n");
result_map = bpf_map__fd(map);
- RET_ERR(result_map == -1, "get result_map fd",
+ RET_ERR(result_map < 0, "get result_map fd",
"result_map:%d\n", result_map);
map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map");
RET_ERR(!map, "find tmp_index_ovr_map\n", "!map");
tmp_index_ovr_map = bpf_map__fd(map);
- RET_ERR(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd",
+ RET_ERR(tmp_index_ovr_map < 0, "get tmp_index_ovr_map fd",
"tmp_index_ovr_map:%d\n", tmp_index_ovr_map);
map = bpf_object__find_map_by_name(obj, "linum_map");
RET_ERR(!map, "find linum_map", "!map\n");
linum_map = bpf_map__fd(map);
- RET_ERR(linum_map == -1, "get linum_map fd",
+ RET_ERR(linum_map < 0, "get linum_map fd",
"linum_map:%d\n", linum_map);
map = bpf_object__find_map_by_name(obj, "data_check_map");
RET_ERR(!map, "find data_check_map", "!map\n");
data_check_map = bpf_map__fd(map);
- RET_ERR(data_check_map == -1, "get data_check_map fd",
+ RET_ERR(data_check_map < 0, "get data_check_map fd",
"data_check_map:%d\n", data_check_map);
return 0;
@@ -237,7 +238,7 @@ static long get_linum(void)
int err;
err = bpf_map_lookup_elem(linum_map, &index_zero, &linum);
- RET_ERR(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n",
+ RET_ERR(err < 0, "lookup_elem(linum_map)", "err:%d errno:%d\n",
err, errno);
return linum;
@@ -254,11 +255,11 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd,
addrlen = sizeof(cli_sa);
err = getsockname(cli_fd, (struct sockaddr *)&cli_sa,
&addrlen);
- RET_IF(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n",
+ RET_IF(err < 0, "getsockname(cli_fd)", "err:%d errno:%d\n",
err, errno);
err = bpf_map_lookup_elem(data_check_map, &index_zero, &result);
- RET_IF(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n",
+ RET_IF(err < 0, "lookup_elem(data_check_map)", "err:%d errno:%d\n",
err, errno);
if (type == SOCK_STREAM) {
@@ -347,7 +348,7 @@ static void check_results(void)
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &results[i]);
- RET_IF(err == -1, "lookup_elem(result_map)",
+ RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
}
@@ -524,12 +525,12 @@ static void test_syncookie(int type, sa_family_t family)
*/
err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero,
&tmp_index, BPF_ANY);
- RET_IF(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)",
+ RET_IF(err < 0, "update_elem(tmp_index_ovr_map, 0, 1)",
"err:%d errno:%d\n", err, errno);
do_test(type, family, &cmd, PASS);
err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero,
&tmp_index);
- RET_IF(err == -1 || tmp_index != -1,
+ RET_IF(err < 0 || tmp_index >= 0,
"lookup_elem(tmp_index_ovr_map)",
"err:%d errno:%d tmp_index:%d\n",
err, errno, tmp_index);
@@ -569,7 +570,7 @@ static void test_detach_bpf(int type, sa_family_t family)
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &tmp);
- RET_IF(err == -1, "lookup_elem(result_map)",
+ RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
nr_run_before += tmp;
}
@@ -584,7 +585,7 @@ static void test_detach_bpf(int type, sa_family_t family)
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &tmp);
- RET_IF(err == -1, "lookup_elem(result_map)",
+ RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
nr_run_after += tmp;
}
@@ -632,24 +633,24 @@ static void prepare_sk_fds(int type, sa_family_t family, bool inany)
SO_ATTACH_REUSEPORT_EBPF,
&select_by_skb_data_prog,
sizeof(select_by_skb_data_prog));
- RET_IF(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)",
+ RET_IF(err < 0, "setsockopt(SO_ATTACH_REUEPORT_EBPF)",
"err:%d errno:%d\n", err, errno);
}
err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen);
- RET_IF(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n",
+ RET_IF(err < 0, "bind()", "sk_fds[%d] err:%d errno:%d\n",
i, err, errno);
if (type == SOCK_STREAM) {
err = listen(sk_fds[i], 10);
- RET_IF(err == -1, "listen()",
+ RET_IF(err < 0, "listen()",
"sk_fds[%d] err:%d errno:%d\n",
i, err, errno);
}
err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i],
BPF_NOEXIST);
- RET_IF(err == -1, "update_elem(reuseport_array)",
+ RET_IF(err < 0, "update_elem(reuseport_array)",
"sk_fds[%d] err:%d errno:%d\n", i, err, errno);
if (i == first) {
@@ -682,7 +683,7 @@ static void setup_per_test(int type, sa_family_t family, bool inany,
prepare_sk_fds(type, family, inany);
err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr,
BPF_ANY);
- RET_IF(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)",
+ RET_IF(err < 0, "update_elem(tmp_index_ovr_map, 0, -1)",
"err:%d errno:%d\n", err, errno);
/* Install reuseport_array to outer_map? */
@@ -691,7 +692,7 @@ static void setup_per_test(int type, sa_family_t family, bool inany,
err = bpf_map_update_elem(outer_map, &index_zero, &reuseport_array,
BPF_ANY);
- RET_IF(err == -1, "update_elem(outer_map, 0, reuseport_array)",
+ RET_IF(err < 0, "update_elem(outer_map, 0, reuseport_array)",
"err:%d errno:%d\n", err, errno);
}
@@ -720,18 +721,18 @@ static void cleanup_per_test(bool no_inner_map)
return;
err = bpf_map_delete_elem(outer_map, &index_zero);
- RET_IF(err == -1, "delete_elem(outer_map)",
+ RET_IF(err < 0, "delete_elem(outer_map)",
"err:%d errno:%d\n", err, errno);
}
static void cleanup(void)
{
- if (outer_map != -1) {
+ if (outer_map >= 0) {
close(outer_map);
outer_map = -1;
}
- if (reuseport_array != -1) {
+ if (reuseport_array >= 0) {
close(reuseport_array);
reuseport_array = -1;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 7043e6ded0e6..023cc532992d 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -2,7 +2,7 @@
#include <test_progs.h>
#include "test_send_signal_kern.skel.h"
-static volatile int sigusr1_received = 0;
+int sigusr1_received = 0;
static void sigusr1_handler(int signum)
{
@@ -91,8 +91,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
skel->links.send_signal_perf =
bpf_program__attach_perf_event(skel->progs.send_signal_perf, pmu_fd);
- if (CHECK(IS_ERR(skel->links.send_signal_perf), "attach_perf_event",
- "err %ld\n", PTR_ERR(skel->links.send_signal_perf)))
+ if (!ASSERT_OK_PTR(skel->links.send_signal_perf, "attach_perf_event"))
goto disable_pmu;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
index 45c82db3c58c..aee41547e7f4 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
@@ -480,7 +480,7 @@ static struct bpf_link *attach_lookup_prog(struct bpf_program *prog)
}
link = bpf_program__attach_netns(prog, net_fd);
- if (CHECK(IS_ERR(link), "bpf_program__attach_netns", "failed\n")) {
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_netns")) {
errno = -PTR_ERR(link);
log_err("failed to attach program '%s' to netns",
bpf_program__name(prog));
diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c
index fe87b77af459..f6f130c99b8c 100644
--- a/tools/testing/selftests/bpf/prog_tests/skeleton.c
+++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c
@@ -82,10 +82,8 @@ void test_skeleton(void)
CHECK(data->out2 != 2, "res2", "got %lld != exp %d\n", data->out2, 2);
CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3);
CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4);
- CHECK(bss->handler_out5.a != 5, "res5", "got %d != exp %d\n",
- bss->handler_out5.a, 5);
- CHECK(bss->handler_out5.b != 6, "res6", "got %lld != exp %d\n",
- bss->handler_out5.b, 6);
+ CHECK(bss->out5.a != 5, "res5", "got %d != exp %d\n", bss->out5.a, 5);
+ CHECK(bss->out5.b != 6, "res6", "got %lld != exp %d\n", bss->out5.b, 6);
CHECK(bss->out6 != 14, "res7", "got %d != exp %d\n", bss->out6, 14);
CHECK(bss->bpf_syscall != kcfg->CONFIG_BPF_SYSCALL, "ext1",
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c
index af87118e748e..577d619fb07e 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_fields.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c
@@ -97,12 +97,12 @@ static void check_result(void)
err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
&egress_linum);
- CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
&ingress_linum);
- CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)",
"err:%d errno:%d\n", err, errno);
memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk));
@@ -355,14 +355,12 @@ void test_sock_fields(void)
egress_link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields,
child_cg_fd);
- if (CHECK(IS_ERR(egress_link), "attach_cgroup(egress)", "err:%ld\n",
- PTR_ERR(egress_link)))
+ if (!ASSERT_OK_PTR(egress_link, "attach_cgroup(egress)"))
goto done;
ingress_link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields,
child_cg_fd);
- if (CHECK(IS_ERR(ingress_link), "attach_cgroup(ingress)", "err:%ld\n",
- PTR_ERR(ingress_link)))
+ if (!ASSERT_OK_PTR(ingress_link, "attach_cgroup(ingress)"))
goto done;
linum_map_fd = bpf_map__fd(skel->maps.linum_map);
@@ -375,8 +373,8 @@ done:
bpf_link__destroy(egress_link);
bpf_link__destroy(ingress_link);
test_sock_fields__destroy(skel);
- if (child_cg_fd != -1)
+ if (child_cg_fd >= 0)
close(child_cg_fd);
- if (parent_cg_fd != -1)
+ if (parent_cg_fd >= 0)
close(parent_cg_fd);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index ab77596b64e3..1352ec104149 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -88,11 +88,11 @@ static void test_sockmap_create_update_free(enum bpf_map_type map_type)
int s, map, err;
s = connected_socket_v4();
- if (CHECK_FAIL(s == -1))
+ if (CHECK_FAIL(s < 0))
return;
map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
- if (CHECK_FAIL(map == -1)) {
+ if (CHECK_FAIL(map < 0)) {
perror("bpf_create_map");
goto out;
}
@@ -245,7 +245,7 @@ static void test_sockmap_copy(enum bpf_map_type map_type)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.copy, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
@@ -304,7 +304,7 @@ static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
}
err = bpf_prog_attach(verdict, map, second, 0);
- assert(err == -1 && errno == EBUSY);
+ ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
err = bpf_prog_detach2(verdict, map, first);
if (CHECK_FAIL(err)) {
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
index 06b86addc181..7a0d64fdc192 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
@@ -98,7 +98,7 @@ static void run_tests(int family, enum bpf_map_type map_type)
int map;
map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
- if (CHECK_FAIL(map == -1)) {
+ if (CHECK_FAIL(map < 0)) {
perror("bpf_map_create");
return;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index 648d9ae898d2..515229f24a93 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -139,7 +139,7 @@
#define xbpf_map_delete_elem(fd, key) \
({ \
int __ret = bpf_map_delete_elem((fd), (key)); \
- if (__ret == -1) \
+ if (__ret < 0) \
FAIL_ERRNO("map_delete"); \
__ret; \
})
@@ -147,7 +147,7 @@
#define xbpf_map_lookup_elem(fd, key, val) \
({ \
int __ret = bpf_map_lookup_elem((fd), (key), (val)); \
- if (__ret == -1) \
+ if (__ret < 0) \
FAIL_ERRNO("map_lookup"); \
__ret; \
})
@@ -155,7 +155,7 @@
#define xbpf_map_update_elem(fd, key, val, flags) \
({ \
int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \
- if (__ret == -1) \
+ if (__ret < 0) \
FAIL_ERRNO("map_update"); \
__ret; \
})
@@ -164,7 +164,7 @@
({ \
int __ret = \
bpf_prog_attach((prog), (target), (type), (flags)); \
- if (__ret == -1) \
+ if (__ret < 0) \
FAIL_ERRNO("prog_attach(" #type ")"); \
__ret; \
})
@@ -172,7 +172,7 @@
#define xbpf_prog_detach2(prog, target, type) \
({ \
int __ret = bpf_prog_detach2((prog), (target), (type)); \
- if (__ret == -1) \
+ if (__ret < 0) \
FAIL_ERRNO("prog_detach2(" #type ")"); \
__ret; \
})
@@ -1610,6 +1610,7 @@ static void udp_redir_to_connected(int family, int sotype, int sock_mapfd,
struct sockaddr_storage addr;
int c0, c1, p0, p1;
unsigned int pass;
+ int retries = 100;
socklen_t len;
int err, n;
u64 value;
@@ -1686,9 +1687,13 @@ static void udp_redir_to_connected(int family, int sotype, int sock_mapfd,
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
+again:
n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1);
- if (n < 0)
+ if (n < 0) {
+ if (errno == EAGAIN && retries--)
+ goto again;
FAIL_ERRNO("%s: read", log_prefix);
+ }
if (n == 0)
FAIL("%s: incomplete read", log_prefix);
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
index 11a769e18f5d..0a91d8d9954b 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -62,8 +62,7 @@ retry:
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event",
- "err %ld\n", PTR_ERR(skel->links.oncpu))) {
+ if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
close(pmu_fd);
goto cleanup;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
index 37269d23df93..04b476bd62b9 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -21,7 +21,7 @@ void test_stacktrace_map(void)
goto close_prog;
link = bpf_program__attach_tracepoint(prog, "sched", "sched_switch");
- if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_tp"))
goto close_prog;
/* find map fds */
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
index 404a5498e1a3..4fd30bb651ad 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
@@ -21,7 +21,7 @@ void test_stacktrace_map_raw_tp(void)
goto close_prog;
link = bpf_program__attach_raw_tracepoint(prog, "sched_switch");
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
/* find map fds */
@@ -59,7 +59,6 @@ void test_stacktrace_map_raw_tp(void)
goto close_prog;
close_prog:
- if (!IS_ERR_OR_NULL(link))
- bpf_link__destroy(link);
+ bpf_link__destroy(link);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/static_linked.c b/tools/testing/selftests/bpf/prog_tests/static_linked.c
index 46556976dccc..5c4e3014e063 100644
--- a/tools/testing/selftests/bpf/prog_tests/static_linked.c
+++ b/tools/testing/selftests/bpf/prog_tests/static_linked.c
@@ -14,12 +14,7 @@ void test_static_linked(void)
return;
skel->rodata->rovar1 = 1;
- skel->bss->static_var1 = 2;
- skel->bss->static_var11 = 3;
-
skel->rodata->rovar2 = 4;
- skel->bss->static_var2 = 5;
- skel->bss->static_var22 = 6;
err = test_static_linked__load(skel);
if (!ASSERT_OK(err, "skel_load"))
@@ -32,8 +27,8 @@ void test_static_linked(void)
/* trigger */
usleep(1);
- ASSERT_EQ(skel->bss->var1, 1 * 2 + 2 + 3, "var1");
- ASSERT_EQ(skel->bss->var2, 4 * 3 + 5 + 6, "var2");
+ ASSERT_EQ(skel->data->var1, 1 * 2 + 2 + 3, "var1");
+ ASSERT_EQ(skel->data->var2, 4 * 3 + 5 + 6, "var2");
cleanup:
test_static_linked__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/syscall.c b/tools/testing/selftests/bpf/prog_tests/syscall.c
new file mode 100644
index 000000000000..81e997a69f7a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/syscall.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "syscall.skel.h"
+
+struct args {
+ __u64 log_buf;
+ __u32 log_size;
+ int max_entries;
+ int map_fd;
+ int prog_fd;
+ int btf_fd;
+};
+
+void test_syscall(void)
+{
+ static char verifier_log[8192];
+ struct args ctx = {
+ .max_entries = 1024,
+ .log_buf = (uintptr_t) verifier_log,
+ .log_size = sizeof(verifier_log),
+ };
+ struct bpf_prog_test_run_attr tattr = {
+ .ctx_in = &ctx,
+ .ctx_size_in = sizeof(ctx),
+ };
+ struct syscall *skel = NULL;
+ __u64 key = 12, value = 0;
+ int err;
+
+ skel = syscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ tattr.prog_fd = bpf_program__fd(skel->progs.bpf_prog);
+ err = bpf_prog_test_run_xattr(&tattr);
+ ASSERT_EQ(err, 0, "err");
+ ASSERT_EQ(tattr.retval, 1, "retval");
+ ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd");
+ ASSERT_GT(ctx.prog_fd, 0, "ctx.prog_fd");
+ ASSERT_OK(memcmp(verifier_log, "processed", sizeof("processed") - 1),
+ "verifier_log");
+
+ err = bpf_map_lookup_elem(ctx.map_fd, &key, &value);
+ ASSERT_EQ(err, 0, "map_lookup");
+ ASSERT_EQ(value, 34, "map lookup value");
+cleanup:
+ syscall__destroy(skel);
+ if (ctx.prog_fd > 0)
+ close(ctx.prog_fd);
+ if (ctx.map_fd > 0)
+ close(ctx.map_fd);
+ if (ctx.btf_fd > 0)
+ close(ctx.btf_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
new file mode 100644
index 000000000000..4a505a5adf4d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <linux/pkt_cls.h>
+
+#include "test_tc_bpf.skel.h"
+
+#define LO_IFINDEX 1
+
+#define TEST_DECLARE_OPTS(__fd) \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_h, .handle = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_p, .priority = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_f, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hp, .handle = 1, .priority = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hf, .handle = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_pf, .priority = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpf, .handle = 1, .priority = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpi, .handle = 1, .priority = 1, .prog_id = 42); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpr, .handle = 1, .priority = 1, \
+ .flags = BPF_TC_F_REPLACE); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpfi, .handle = 1, .priority = 1, .prog_fd = __fd, \
+ .prog_id = 42); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_prio_max, .handle = 1, .priority = UINT16_MAX + 1);
+
+static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int ret;
+
+ ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd"))
+ return ret;
+
+ ret = bpf_tc_attach(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ return ret;
+
+ if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
+ !ASSERT_EQ(opts.priority, 1, "priority set") ||
+ !ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
+ goto end;
+
+ opts.prog_id = 0;
+ opts.flags = BPF_TC_F_REPLACE;
+ ret = bpf_tc_attach(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach replace mode"))
+ goto end;
+
+ opts.flags = opts.prog_fd = opts.prog_id = 0;
+ ret = bpf_tc_query(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_query"))
+ goto end;
+
+ if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
+ !ASSERT_EQ(opts.priority, 1, "priority set") ||
+ !ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
+ goto end;
+
+end:
+ opts.flags = opts.prog_fd = opts.prog_id = 0;
+ ret = bpf_tc_detach(hook, &opts);
+ ASSERT_OK(ret, "bpf_tc_detach");
+ return ret;
+}
+
+static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, attach_opts, .handle = 1, .priority = 1, .prog_fd = fd);
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, inv_hook, .attach_point = BPF_TC_INGRESS);
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1);
+ int ret;
+
+ ret = bpf_tc_hook_create(NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook = NULL"))
+ return -EINVAL;
+
+ /* hook ifindex = 0 */
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex == 0"))
+ return -EINVAL;
+ attach_opts.prog_id = 0;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ /* hook ifindex < 0 */
+ inv_hook.ifindex = -1;
+
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex < 0"))
+ return -EINVAL;
+ attach_opts.prog_id = 0;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ inv_hook.ifindex = LO_IFINDEX;
+
+ /* hook.attach_point invalid */
+ inv_hook.attach_point = 0xabcd;
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook.attach_point"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_INGRESS;
+
+ /* hook.attach_point valid, but parent invalid */
+ inv_hook.parent = TC_H_MAKE(1UL << 16, 10);
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_CUSTOM;
+ inv_hook.parent = 0;
+ /* These return EOPNOTSUPP instead of EINVAL as parent is checked after
+ * attach_point of the hook.
+ */
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_INGRESS;
+
+ /* detach */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_detach(NULL, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid opts = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpr);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid flags set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpf);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_fd set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_p);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid handle unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_h);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority > UINT16_MAX"))
+ return -EINVAL;
+ }
+
+ /* query */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_query(NULL, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid opts = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpr);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid flags set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpf);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_fd set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_p);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid handle unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_h);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority > UINT16_MAX"))
+ return -EINVAL;
+
+ /* when chain is not present, kernel returns -EINVAL */
+ ret = bpf_tc_query(hook, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query valid handle, priority set"))
+ return -EINVAL;
+ }
+
+ /* attach */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_attach(NULL, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid opts = NULL"))
+ return -EINVAL;
+
+ opts_hp.flags = 42;
+ ret = bpf_tc_attach(hook, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid flags"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_fd unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_pf);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid handle unset"))
+ return -EINVAL;
+ opts_pf.prog_fd = opts_pf.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_pf), "bpf_tc_detach");
+
+ ret = bpf_tc_attach(hook, &opts_hf);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid priority unset"))
+ return -EINVAL;
+ opts_hf.prog_fd = opts_hf.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_hf), "bpf_tc_detach");
+
+ ret = bpf_tc_attach(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid priority > UINT16_MAX"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_f);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid both handle and priority unset"))
+ return -EINVAL;
+ opts_f.prog_fd = opts_f.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_f), "bpf_tc_detach");
+ }
+
+ return 0;
+}
+
+void test_tc_bpf(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
+ .attach_point = BPF_TC_INGRESS);
+ struct test_tc_bpf *skel = NULL;
+ bool hook_created = false;
+ int cls_fd, ret;
+
+ skel = test_tc_bpf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load"))
+ return;
+
+ cls_fd = bpf_program__fd(skel->progs.cls);
+
+ ret = bpf_tc_hook_create(&hook);
+ if (ret == 0)
+ hook_created = true;
+
+ ret = ret == -EEXIST ? 0 : ret;
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create(BPF_TC_INGRESS)"))
+ goto end;
+
+ hook.attach_point = BPF_TC_CUSTOM;
+ hook.parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ ret = bpf_tc_hook_create(&hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook.attach_point"))
+ goto end;
+
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal ingress"))
+ goto end;
+
+ ret = bpf_tc_hook_destroy(&hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook.attach_point"))
+ goto end;
+
+ hook.attach_point = BPF_TC_INGRESS;
+ hook.parent = 0;
+ bpf_tc_hook_destroy(&hook);
+
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal ingress"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+ hook.attach_point = BPF_TC_EGRESS;
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal egress"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+ ret = test_tc_bpf_api(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_bpf_api"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+end:
+ if (hook_created) {
+ hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&hook);
+ }
+ test_tc_bpf__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
new file mode 100644
index 000000000000..5703c918812b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
+ * between src and dst. The netns fwd has veth links to each src and dst. The
+ * client is in src and server in dst. The test installs a TC BPF program to each
+ * host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
+ * neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
+ * switch from ingress side; it also installs a checker prog on the egress side
+ * to drop unexpected traffic.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <linux/limits.h>
+#include <linux/sysctl.h>
+#include <linux/if_tun.h>
+#include <linux/if.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tc_neigh_fib.skel.h"
+#include "test_tc_neigh.skel.h"
+#include "test_tc_peer.skel.h"
+
+#define NS_SRC "ns_src"
+#define NS_FWD "ns_fwd"
+#define NS_DST "ns_dst"
+
+#define IP4_SRC "172.16.1.100"
+#define IP4_DST "172.16.2.100"
+#define IP4_TUN_SRC "172.17.1.100"
+#define IP4_TUN_FWD "172.17.1.200"
+#define IP4_PORT 9004
+
+#define IP6_SRC "0::1:dead:beef:cafe"
+#define IP6_DST "0::2:dead:beef:cafe"
+#define IP6_TUN_SRC "1::1:dead:beef:cafe"
+#define IP6_TUN_FWD "1::2:dead:beef:cafe"
+#define IP6_PORT 9006
+
+#define IP4_SLL "169.254.0.1"
+#define IP4_DLL "169.254.0.2"
+#define IP4_NET "169.254.0.0"
+
+#define MAC_DST_FWD "00:11:22:33:44:55"
+#define MAC_DST "00:22:33:44:55:66"
+
+#define IFADDR_STR_LEN 18
+#define PING_ARGS "-i 0.2 -c 3 -w 10 -q"
+
+#define SRC_PROG_PIN_FILE "/sys/fs/bpf/test_tc_src"
+#define DST_PROG_PIN_FILE "/sys/fs/bpf/test_tc_dst"
+#define CHK_PROG_PIN_FILE "/sys/fs/bpf/test_tc_chk"
+
+#define TIMEOUT_MILLIS 10000
+
+#define log_err(MSG, ...) \
+ fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+ __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__)
+
+static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL};
+
+static int write_file(const char *path, const char *newval)
+{
+ FILE *f;
+
+ f = fopen(path, "r+");
+ if (!f)
+ return -1;
+ if (fwrite(newval, strlen(newval), 1, f) != 1) {
+ log_err("writing to %s failed", path);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+struct nstoken {
+ int orig_netns_fd;
+};
+
+static int setns_by_fd(int nsfd)
+{
+ int err;
+
+ err = setns(nsfd, CLONE_NEWNET);
+ close(nsfd);
+
+ if (!ASSERT_OK(err, "setns"))
+ return err;
+
+ /* Switch /sys to the new namespace so that e.g. /sys/class/net
+ * reflects the devices in the new namespace.
+ */
+ err = unshare(CLONE_NEWNS);
+ if (!ASSERT_OK(err, "unshare"))
+ return err;
+
+ err = umount2("/sys", MNT_DETACH);
+ if (!ASSERT_OK(err, "umount2 /sys"))
+ return err;
+
+ err = mount("sysfs", "/sys", "sysfs", 0, NULL);
+ if (!ASSERT_OK(err, "mount /sys"))
+ return err;
+
+ err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL);
+ if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
+ return err;
+
+ return 0;
+}
+
+/**
+ * open_netns() - Switch to specified network namespace by name.
+ *
+ * Returns token with which to restore the original namespace
+ * using close_netns().
+ */
+static struct nstoken *open_netns(const char *name)
+{
+ int nsfd;
+ char nspath[PATH_MAX];
+ int err;
+ struct nstoken *token;
+
+ token = malloc(sizeof(struct nstoken));
+ if (!ASSERT_OK_PTR(token, "malloc token"))
+ return NULL;
+
+ token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net"))
+ goto fail;
+
+ snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
+ nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
+ if (!ASSERT_GE(nsfd, 0, "open netns fd"))
+ goto fail;
+
+ err = setns_by_fd(nsfd);
+ if (!ASSERT_OK(err, "setns_by_fd"))
+ goto fail;
+
+ return token;
+fail:
+ free(token);
+ return NULL;
+}
+
+static void close_netns(struct nstoken *token)
+{
+ ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd");
+ free(token);
+}
+
+static int netns_setup_namespaces(const char *verb)
+{
+ const char * const *ns = namespaces;
+ char cmd[128];
+
+ while (*ns) {
+ snprintf(cmd, sizeof(cmd), "ip netns %s %s", verb, *ns);
+ if (!ASSERT_OK(system(cmd), cmd))
+ return -1;
+ ns++;
+ }
+ return 0;
+}
+
+struct netns_setup_result {
+ int ifindex_veth_src_fwd;
+ int ifindex_veth_dst_fwd;
+};
+
+static int get_ifaddr(const char *name, char *ifaddr)
+{
+ char path[PATH_MAX];
+ FILE *f;
+ int ret;
+
+ snprintf(path, PATH_MAX, "/sys/class/net/%s/address", name);
+ f = fopen(path, "r");
+ if (!ASSERT_OK_PTR(f, path))
+ return -1;
+
+ ret = fread(ifaddr, 1, IFADDR_STR_LEN, f);
+ if (!ASSERT_EQ(ret, IFADDR_STR_LEN, "fread ifaddr")) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+static int get_ifindex(const char *name)
+{
+ char path[PATH_MAX];
+ char buf[32];
+ FILE *f;
+ int ret;
+
+ snprintf(path, PATH_MAX, "/sys/class/net/%s/ifindex", name);
+ f = fopen(path, "r");
+ if (!ASSERT_OK_PTR(f, path))
+ return -1;
+
+ ret = fread(buf, 1, sizeof(buf), f);
+ if (!ASSERT_GT(ret, 0, "fread ifindex")) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return atoi(buf);
+}
+
+#define SYS(fmt, ...) \
+ ({ \
+ char cmd[1024]; \
+ snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ if (!ASSERT_OK(system(cmd), cmd)) \
+ goto fail; \
+ })
+
+static int netns_setup_links_and_routes(struct netns_setup_result *result)
+{
+ struct nstoken *nstoken = NULL;
+ char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {};
+
+ SYS("ip link add veth_src type veth peer name veth_src_fwd");
+ SYS("ip link add veth_dst type veth peer name veth_dst_fwd");
+
+ SYS("ip link set veth_dst_fwd address " MAC_DST_FWD);
+ SYS("ip link set veth_dst address " MAC_DST);
+
+ if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr))
+ goto fail;
+
+ result->ifindex_veth_src_fwd = get_ifindex("veth_src_fwd");
+ if (result->ifindex_veth_src_fwd < 0)
+ goto fail;
+ result->ifindex_veth_dst_fwd = get_ifindex("veth_dst_fwd");
+ if (result->ifindex_veth_dst_fwd < 0)
+ goto fail;
+
+ SYS("ip link set veth_src netns " NS_SRC);
+ SYS("ip link set veth_src_fwd netns " NS_FWD);
+ SYS("ip link set veth_dst_fwd netns " NS_FWD);
+ SYS("ip link set veth_dst netns " NS_DST);
+
+ /** setup in 'src' namespace */
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto fail;
+
+ SYS("ip addr add " IP4_SRC "/32 dev veth_src");
+ SYS("ip addr add " IP6_SRC "/128 dev veth_src nodad");
+ SYS("ip link set dev veth_src up");
+
+ SYS("ip route add " IP4_DST "/32 dev veth_src scope global");
+ SYS("ip route add " IP4_NET "/16 dev veth_src scope global");
+ SYS("ip route add " IP6_DST "/128 dev veth_src scope global");
+
+ SYS("ip neigh add " IP4_DST " dev veth_src lladdr %s",
+ veth_src_fwd_addr);
+ SYS("ip neigh add " IP6_DST " dev veth_src lladdr %s",
+ veth_src_fwd_addr);
+
+ close_netns(nstoken);
+
+ /** setup in 'fwd' namespace */
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ goto fail;
+
+ /* The fwd netns automatically gets a v6 LL address / routes, but also
+ * needs v4 one in order to start ARP probing. IP4_NET route is added
+ * to the endpoints so that the ARP processing will reply.
+ */
+ SYS("ip addr add " IP4_SLL "/32 dev veth_src_fwd");
+ SYS("ip addr add " IP4_DLL "/32 dev veth_dst_fwd");
+ SYS("ip link set dev veth_src_fwd up");
+ SYS("ip link set dev veth_dst_fwd up");
+
+ SYS("ip route add " IP4_SRC "/32 dev veth_src_fwd scope global");
+ SYS("ip route add " IP6_SRC "/128 dev veth_src_fwd scope global");
+ SYS("ip route add " IP4_DST "/32 dev veth_dst_fwd scope global");
+ SYS("ip route add " IP6_DST "/128 dev veth_dst_fwd scope global");
+
+ close_netns(nstoken);
+
+ /** setup in 'dst' namespace */
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ goto fail;
+
+ SYS("ip addr add " IP4_DST "/32 dev veth_dst");
+ SYS("ip addr add " IP6_DST "/128 dev veth_dst nodad");
+ SYS("ip link set dev veth_dst up");
+
+ SYS("ip route add " IP4_SRC "/32 dev veth_dst scope global");
+ SYS("ip route add " IP4_NET "/16 dev veth_dst scope global");
+ SYS("ip route add " IP6_SRC "/128 dev veth_dst scope global");
+
+ SYS("ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+ SYS("ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+
+ close_netns(nstoken);
+
+ return 0;
+fail:
+ if (nstoken)
+ close_netns(nstoken);
+ return -1;
+}
+
+static int netns_load_bpf(void)
+{
+ SYS("tc qdisc add dev veth_src_fwd clsact");
+ SYS("tc filter add dev veth_src_fwd ingress bpf da object-pinned "
+ SRC_PROG_PIN_FILE);
+ SYS("tc filter add dev veth_src_fwd egress bpf da object-pinned "
+ CHK_PROG_PIN_FILE);
+
+ SYS("tc qdisc add dev veth_dst_fwd clsact");
+ SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned "
+ DST_PROG_PIN_FILE);
+ SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned "
+ CHK_PROG_PIN_FILE);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void test_tcp(int family, const char *addr, __u16 port)
+{
+ int listen_fd = -1, accept_fd = -1, client_fd = -1;
+ char buf[] = "testing testing";
+ int n;
+ struct nstoken *nstoken;
+
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ return;
+
+ listen_fd = start_server(family, SOCK_STREAM, addr, port, 0);
+ if (!ASSERT_GE(listen_fd, 0, "listen"))
+ goto done;
+
+ close_netns(nstoken);
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto done;
+
+ client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto done;
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ if (!ASSERT_GE(accept_fd, 0, "accept"))
+ goto done;
+
+ if (!ASSERT_OK(settimeo(accept_fd, TIMEOUT_MILLIS), "settimeo"))
+ goto done;
+
+ n = write(client_fd, buf, sizeof(buf));
+ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+ goto done;
+
+ n = read(accept_fd, buf, sizeof(buf));
+ ASSERT_EQ(n, sizeof(buf), "recv from server");
+
+done:
+ if (nstoken)
+ close_netns(nstoken);
+ if (listen_fd >= 0)
+ close(listen_fd);
+ if (accept_fd >= 0)
+ close(accept_fd);
+ if (client_fd >= 0)
+ close(client_fd);
+}
+
+static int test_ping(int family, const char *addr)
+{
+ const char *ping = family == AF_INET6 ? "ping6" : "ping";
+
+ SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping, addr);
+ return 0;
+fail:
+ return -1;
+}
+
+static void test_connectivity(void)
+{
+ test_tcp(AF_INET, IP4_DST, IP4_PORT);
+ test_ping(AF_INET, IP4_DST);
+ test_tcp(AF_INET6, IP6_DST, IP6_PORT);
+ test_ping(AF_INET6, IP6_DST);
+}
+
+static int set_forwarding(bool enable)
+{
+ int err;
+
+ err = write_file("/proc/sys/net/ipv4/ip_forward", enable ? "1" : "0");
+ if (!ASSERT_OK(err, "set ipv4.ip_forward=0"))
+ return err;
+
+ err = write_file("/proc/sys/net/ipv6/conf/all/forwarding", enable ? "1" : "0");
+ if (!ASSERT_OK(err, "set ipv6.forwarding=0"))
+ return err;
+
+ return 0;
+}
+
+static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken = NULL;
+ struct test_tc_neigh_fib *skel = NULL;
+ int err;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_neigh_fib__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_neigh_fib__open"))
+ goto done;
+
+ if (!ASSERT_OK(test_tc_neigh_fib__load(skel), "test_tc_neigh_fib__load"))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+ goto done;
+
+ if (netns_load_bpf())
+ goto done;
+
+ /* bpf_fib_lookup() checks if forwarding is enabled */
+ if (!ASSERT_OK(set_forwarding(true), "enable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_neigh_fib__destroy(skel);
+ close_netns(nstoken);
+}
+
+static void test_tc_redirect_neigh(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken = NULL;
+ struct test_tc_neigh *skel = NULL;
+ int err;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_neigh__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open"))
+ goto done;
+
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+ err = test_tc_neigh__load(skel);
+ if (!ASSERT_OK(err, "test_tc_neigh__load"))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+ goto done;
+
+ if (netns_load_bpf())
+ goto done;
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_neigh__destroy(skel);
+ close_netns(nstoken);
+}
+
+static void test_tc_redirect_peer(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken;
+ struct test_tc_peer *skel;
+ int err;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_peer__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+ goto done;
+
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+ err = test_tc_peer__load(skel);
+ if (!ASSERT_OK(err, "test_tc_peer__load"))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+ goto done;
+
+ if (netns_load_bpf())
+ goto done;
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_peer__destroy(skel);
+ close_netns(nstoken);
+}
+
+static int tun_open(char *name)
+{
+ struct ifreq ifr;
+ int fd, err;
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (!ASSERT_GE(fd, 0, "open /dev/net/tun"))
+ return -1;
+
+ memset(&ifr, 0, sizeof(ifr));
+
+ ifr.ifr_flags = IFF_TUN | IFF_NO_PI;
+ if (*name)
+ strncpy(ifr.ifr_name, name, IFNAMSIZ);
+
+ err = ioctl(fd, TUNSETIFF, &ifr);
+ if (!ASSERT_OK(err, "ioctl TUNSETIFF"))
+ goto fail;
+
+ SYS("ip link set dev %s up", name);
+
+ return fd;
+fail:
+ close(fd);
+ return -1;
+}
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+enum {
+ SRC_TO_TARGET = 0,
+ TARGET_TO_SRC = 1,
+};
+
+static int tun_relay_loop(int src_fd, int target_fd)
+{
+ fd_set rfds, wfds;
+
+ FD_ZERO(&rfds);
+ FD_ZERO(&wfds);
+
+ for (;;) {
+ char buf[1500];
+ int direction, nread, nwrite;
+
+ FD_SET(src_fd, &rfds);
+ FD_SET(target_fd, &rfds);
+
+ if (select(1 + MAX(src_fd, target_fd), &rfds, NULL, NULL, NULL) < 0) {
+ log_err("select failed");
+ return 1;
+ }
+
+ direction = FD_ISSET(src_fd, &rfds) ? SRC_TO_TARGET : TARGET_TO_SRC;
+
+ nread = read(direction == SRC_TO_TARGET ? src_fd : target_fd, buf, sizeof(buf));
+ if (nread < 0) {
+ log_err("read failed");
+ return 1;
+ }
+
+ nwrite = write(direction == SRC_TO_TARGET ? target_fd : src_fd, buf, nread);
+ if (nwrite != nread) {
+ log_err("write failed");
+ return 1;
+ }
+ }
+}
+
+static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+{
+ struct test_tc_peer *skel = NULL;
+ struct nstoken *nstoken = NULL;
+ int err;
+ int tunnel_pid = -1;
+ int src_fd, target_fd;
+ int ifindex;
+
+ /* Start a L3 TUN/TAP tunnel between the src and dst namespaces.
+ * This test is using TUN/TAP instead of e.g. IPIP or GRE tunnel as those
+ * expose the L2 headers encapsulating the IP packet to BPF and hence
+ * don't have skb in suitable state for this test. Alternative to TUN/TAP
+ * would be e.g. Wireguard which would appear as a pure L3 device to BPF,
+ * but that requires much more complicated setup.
+ */
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
+ return;
+
+ src_fd = tun_open("tun_src");
+ if (!ASSERT_GE(src_fd, 0, "tun_open tun_src"))
+ goto fail;
+
+ close_netns(nstoken);
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
+ goto fail;
+
+ target_fd = tun_open("tun_fwd");
+ if (!ASSERT_GE(target_fd, 0, "tun_open tun_fwd"))
+ goto fail;
+
+ tunnel_pid = fork();
+ if (!ASSERT_GE(tunnel_pid, 0, "fork tun_relay_loop"))
+ goto fail;
+
+ if (tunnel_pid == 0)
+ exit(tun_relay_loop(src_fd, target_fd));
+
+ skel = test_tc_peer__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+ goto fail;
+
+ ifindex = get_ifindex("tun_fwd");
+ if (!ASSERT_GE(ifindex, 0, "get_ifindex tun_fwd"))
+ goto fail;
+
+ skel->rodata->IFINDEX_SRC = ifindex;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+ err = test_tc_peer__load(skel);
+ if (!ASSERT_OK(err, "test_tc_peer__load"))
+ goto fail;
+
+ err = bpf_program__pin(skel->progs.tc_src_l3, SRC_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+ goto fail;
+
+ err = bpf_program__pin(skel->progs.tc_dst_l3, DST_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+ goto fail;
+
+ err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+ goto fail;
+
+ /* Load "tc_src_l3" to the tun_fwd interface to redirect packets
+ * towards dst, and "tc_dst" to redirect packets
+ * and "tc_chk" on veth_dst_fwd to drop non-redirected packets.
+ */
+ SYS("tc qdisc add dev tun_fwd clsact");
+ SYS("tc filter add dev tun_fwd ingress bpf da object-pinned "
+ SRC_PROG_PIN_FILE);
+
+ SYS("tc qdisc add dev veth_dst_fwd clsact");
+ SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned "
+ DST_PROG_PIN_FILE);
+ SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned "
+ CHK_PROG_PIN_FILE);
+
+ /* Setup route and neigh tables */
+ SYS("ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24");
+ SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24");
+
+ SYS("ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad");
+ SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad");
+
+ SYS("ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global");
+ SYS("ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD
+ " dev tun_src scope global");
+ SYS("ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global");
+ SYS("ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global");
+ SYS("ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD
+ " dev tun_src scope global");
+ SYS("ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global");
+
+ SYS("ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+ SYS("ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto fail;
+
+ test_connectivity();
+
+fail:
+ if (tunnel_pid > 0) {
+ kill(tunnel_pid, SIGTERM);
+ waitpid(tunnel_pid, NULL, 0);
+ }
+ if (src_fd >= 0)
+ close(src_fd);
+ if (target_fd >= 0)
+ close(target_fd);
+ if (skel)
+ test_tc_peer__destroy(skel);
+ if (nstoken)
+ close_netns(nstoken);
+}
+
+#define RUN_TEST(name) \
+ ({ \
+ struct netns_setup_result setup_result; \
+ if (test__start_subtest(#name)) \
+ if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \
+ if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \
+ "setup links and routes")) \
+ test_ ## name(&setup_result); \
+ netns_setup_namespaces("delete"); \
+ } \
+ })
+
+static void *test_tc_redirect_run_tests(void *arg)
+{
+ RUN_TEST(tc_redirect_peer);
+ RUN_TEST(tc_redirect_peer_l3);
+ RUN_TEST(tc_redirect_neigh);
+ RUN_TEST(tc_redirect_neigh_fib);
+ return NULL;
+}
+
+void test_tc_redirect(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_tc_redirect_run_tests, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
index 08d19cafd5e8..1fa772079967 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
@@ -353,8 +353,7 @@ static void fastopen_estab(void)
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, true)) {
@@ -398,8 +397,7 @@ static void syncookie_estab(void)
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
@@ -431,8 +429,7 @@ static void fin(void)
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
@@ -471,8 +468,7 @@ static void __simple_estab(bool exprm)
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
@@ -509,8 +505,7 @@ static void misc(void)
return;
link = bpf_program__attach_cgroup(misc_skel->progs.misc_estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(misc_estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(misc_estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
index 9966685866fd..123c68c1917d 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
@@ -73,7 +73,7 @@ void test_test_overhead(void)
return;
obj = bpf_object__open_file("./test_overhead.o", NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
@@ -108,7 +108,7 @@ void test_test_overhead(void)
/* attach kprobe */
link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */,
kprobe_func);
- if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_kprobe"))
goto cleanup;
test_run("kprobe");
bpf_link__destroy(link);
@@ -116,28 +116,28 @@ void test_test_overhead(void)
/* attach kretprobe */
link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */,
kprobe_func);
- if (CHECK(IS_ERR(link), "attach kretprobe", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_kretprobe"))
goto cleanup;
test_run("kretprobe");
bpf_link__destroy(link);
/* attach raw_tp */
link = bpf_program__attach_raw_tracepoint(raw_tp_prog, "task_rename");
- if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto cleanup;
test_run("raw_tp");
bpf_link__destroy(link);
/* attach fentry */
link = bpf_program__attach_trace(fentry_prog);
- if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_fentry"))
goto cleanup;
test_run("fentry");
bpf_link__destroy(link);
/* attach fexit */
link = bpf_program__attach_trace(fexit_prog);
- if (CHECK(IS_ERR(link), "attach fexit", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_fexit"))
goto cleanup;
test_run("fexit");
bpf_link__destroy(link);
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
index 39b0decb1bb2..d39bc00feb45 100644
--- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c
+++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
@@ -3,7 +3,7 @@
#include <test_progs.h>
-#include "trace_printk.skel.h"
+#include "trace_printk.lskel.h"
#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe"
#define SEARCHMSG "testing,testing"
@@ -21,6 +21,9 @@ void test_trace_printk(void)
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
+ ASSERT_EQ(skel->rodata->fmt[0], 'T', "invalid printk fmt string");
+ skel->rodata->fmt[0] = 't';
+
err = trace_printk__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index f3022d934e2d..d7f5a931d7f3 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -55,7 +55,7 @@ void test_trampoline_count(void)
/* attach 'allowed' trampoline programs */
for (i = 0; i < MAX_TRAMP_PROGS; i++) {
obj = bpf_object__open_file(object, NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
+ if (!ASSERT_OK_PTR(obj, "obj_open_file")) {
obj = NULL;
goto cleanup;
}
@@ -68,14 +68,14 @@ void test_trampoline_count(void)
if (rand() % 2) {
link = load(inst[i].obj, fentry_name);
- if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) {
+ if (!ASSERT_OK_PTR(link, "attach_prog")) {
link = NULL;
goto cleanup;
}
inst[i].link_fentry = link;
} else {
link = load(inst[i].obj, fexit_name);
- if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) {
+ if (!ASSERT_OK_PTR(link, "attach_prog")) {
link = NULL;
goto cleanup;
}
@@ -85,7 +85,7 @@ void test_trampoline_count(void)
/* and try 1 extra.. */
obj = bpf_object__open_file(object, NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
+ if (!ASSERT_OK_PTR(obj, "obj_open_file")) {
obj = NULL;
goto cleanup;
}
@@ -96,13 +96,15 @@ void test_trampoline_count(void)
/* ..that needs to fail */
link = load(obj, fentry_name);
- if (CHECK(!IS_ERR(link), "cannot attach over the limit", "err %ld\n", PTR_ERR(link))) {
+ err = libbpf_get_error(link);
+ if (!ASSERT_ERR_PTR(link, "cannot attach over the limit")) {
bpf_link__destroy(link);
goto cleanup_extra;
}
/* with E2BIG error */
- CHECK(PTR_ERR(link) != -E2BIG, "proper error check", "err %ld\n", PTR_ERR(link));
+ ASSERT_EQ(err, -E2BIG, "proper error check");
+ ASSERT_EQ(link, NULL, "ptr_is_null");
/* and finaly execute the probe */
if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L)))
diff --git a/tools/testing/selftests/bpf/prog_tests/udp_limit.c b/tools/testing/selftests/bpf/prog_tests/udp_limit.c
index 2aba09d4d01b..56c9d6bd38a3 100644
--- a/tools/testing/selftests/bpf/prog_tests/udp_limit.c
+++ b/tools/testing/selftests/bpf/prog_tests/udp_limit.c
@@ -22,11 +22,10 @@ void test_udp_limit(void)
goto close_cgroup_fd;
skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.sock, "cg_attach_sock"))
+ goto close_skeleton;
skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd);
- if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release),
- "cg-attach", "sock %ld sock_release %ld",
- PTR_ERR(skel->links.sock),
- PTR_ERR(skel->links.sock_release)))
+ if (!ASSERT_OK_PTR(skel->links.sock_release, "cg_attach_sock_release"))
goto close_skeleton;
/* BPF program enforces a single UDP socket per cgroup,
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
index 2c6c570b21f8..3bd5904b4db5 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
@@ -90,7 +90,7 @@ void test_xdp_bpf2bpf(void)
pb_opts.ctx = &passed;
pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map),
1, &pb_opts);
- if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto out;
/* Run test program */
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c
index 6f814999b395..46eed0a33c23 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c
@@ -51,7 +51,7 @@ void test_xdp_link(void)
/* BPF link is not allowed to replace prog attachment */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
- if (CHECK(!IS_ERR(link), "link_attach_fail", "unexpected success\n")) {
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
/* best-effort detach prog */
opts.old_fd = prog_fd1;
@@ -67,7 +67,7 @@ void test_xdp_link(void)
/* now BPF link should attach successfully */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
- if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel1->links.xdp_handler = link;
@@ -95,7 +95,7 @@ void test_xdp_link(void)
/* BPF link is not allowed to replace another BPF link */
link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO);
- if (CHECK(!IS_ERR(link), "link_attach_fail", "unexpected success\n")) {
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
@@ -105,7 +105,7 @@ void test_xdp_link(void)
/* new link attach should succeed */
link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO);
- if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel2->links.xdp_handler = link;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
index 6dfce3fd68bc..0aa3cd34cbe3 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
index b83b5d2e17dc..6c39e86b666f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
index d58d9f1642b5..784a610ce039 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
@@ -3,7 +3,6 @@
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
index 95989f4c99b5..a28e51e2dcee 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
@@ -3,7 +3,6 @@
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c
index b7f32c160f4e..c86b93f33b32 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
index a1ddc36f13ec..bca8b889cb10 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2020, Oracle and/or its affiliates. */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include <errno.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
index b2f7c7c5f952..6e7b400888fe 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
index 43c36f5f7649..f2b8167b72a8 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
index 11d1aa37cf11..4ea6a37d1345 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
index 54380c5e1069..2e4775c35414 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
@@ -3,7 +3,6 @@
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
index b4fbddfa4e10..943f7bba180e 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
@@ -3,7 +3,6 @@
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
index ee49493dc125..400fdf8d6233 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
@@ -9,8 +9,8 @@ __u32 map1_id = 0, map2_id = 0;
__u32 map1_accessed = 0, map2_accessed = 0;
__u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0;
-static volatile const __u32 print_len;
-static volatile const __u32 ret1;
+volatile const __u32 print_len;
+volatile const __u32 ret1;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
index f258583afbbd..cf0c485b1ed7 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
@@ -3,7 +3,6 @@
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
index 65f93bb03f0f..5031e21c433f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
@@ -3,7 +3,6 @@
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index a46a264ce24e..55e283050cab 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -109,10 +109,10 @@ int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location)
return 0;
}
-static volatile struct {
+struct {
bool fentry_test_ok;
bool fexit_test_ok;
-} result;
+} result = {};
SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
diff --git a/tools/testing/selftests/bpf/progs/linked_maps1.c b/tools/testing/selftests/bpf/progs/linked_maps1.c
index 52291515cc72..00bf1ca95986 100644
--- a/tools/testing/selftests/bpf/progs/linked_maps1.c
+++ b/tools/testing/selftests/bpf/progs/linked_maps1.c
@@ -75,7 +75,7 @@ int BPF_PROG(handler_exit1)
val = bpf_map_lookup_elem(&map_weak, &key);
if (val)
output_weak1 = *val;
-
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c
new file mode 100644
index 000000000000..e550f728962d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/syscall.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <../../../tools/include/linux/filter.h>
+#include <linux/btf.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct args {
+ __u64 log_buf;
+ __u32 log_size;
+ int max_entries;
+ int map_fd;
+ int prog_fd;
+ int btf_fd;
+};
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+
+static int btf_load(void)
+{
+ struct btf_blob {
+ struct btf_header btf_hdr;
+ __u32 types[8];
+ __u32 str;
+ } raw_btf = {
+ .btf_hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(__u32) * 8,
+ .str_off = sizeof(__u32) * 8,
+ .str_len = sizeof(__u32),
+ },
+ .types = {
+ /* long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
+ /* unsigned long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ },
+ };
+ static union bpf_attr btf_load_attr = {
+ .btf_size = sizeof(raw_btf),
+ };
+
+ btf_load_attr.btf = (long)&raw_btf;
+ return bpf_sys_bpf(BPF_BTF_LOAD, &btf_load_attr, sizeof(btf_load_attr));
+}
+
+SEC("syscall")
+int bpf_prog(struct args *ctx)
+{
+ static char license[] = "GPL";
+ static struct bpf_insn insns[] = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ static union bpf_attr map_create_attr = {
+ .map_type = BPF_MAP_TYPE_HASH,
+ .key_size = 8,
+ .value_size = 8,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 2,
+ };
+ static union bpf_attr map_update_attr = { .map_fd = 1, };
+ static __u64 key = 12;
+ static __u64 value = 34;
+ static union bpf_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .insn_cnt = sizeof(insns) / sizeof(insns[0]),
+ };
+ int ret;
+
+ ret = btf_load();
+ if (ret <= 0)
+ return ret;
+
+ ctx->btf_fd = ret;
+ map_create_attr.max_entries = ctx->max_entries;
+ map_create_attr.btf_fd = ret;
+
+ prog_load_attr.license = (long) license;
+ prog_load_attr.insns = (long) insns;
+ prog_load_attr.log_buf = ctx->log_buf;
+ prog_load_attr.log_size = ctx->log_size;
+ prog_load_attr.log_level = 1;
+
+ ret = bpf_sys_bpf(BPF_MAP_CREATE, &map_create_attr, sizeof(map_create_attr));
+ if (ret <= 0)
+ return ret;
+ ctx->map_fd = ret;
+ insns[3].imm = ret;
+
+ map_update_attr.map_fd = ret;
+ map_update_attr.key = (long) &key;
+ map_update_attr.value = (long) &value;
+ ret = bpf_sys_bpf(BPF_MAP_UPDATE_ELEM, &map_update_attr, sizeof(map_update_attr));
+ if (ret < 0)
+ return ret;
+
+ ret = bpf_sys_bpf(BPF_PROG_LOAD, &prog_load_attr, sizeof(prog_load_attr));
+ if (ret <= 0)
+ return ret;
+ ctx->prog_fd = ret;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
index 739dc2a51e74..910858fe078a 100644
--- a/tools/testing/selftests/bpf/progs/tailcall3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int count;
+int count = 0;
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c
index f82075b47d7d..bd4be135c39d 100644
--- a/tools/testing/selftests/bpf/progs/tailcall4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall4.c
@@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int selector;
+int selector = 0;
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \
diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c
index ce5450744fd4..adf30a33064e 100644
--- a/tools/testing/selftests/bpf/progs/tailcall5.c
+++ b/tools/testing/selftests/bpf/progs/tailcall5.c
@@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int selector;
+int selector = 0;
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
index 7b1c04183824..3cc4c12817b5 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
@@ -20,7 +20,7 @@ int subprog_tail(struct __sk_buff *skb)
return 1;
}
-static volatile int count;
+int count = 0;
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
index 9a1b166b7fbe..77df6d4db895 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -9,7 +9,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int count;
+int count = 0;
__noinline
int subprog_tail_2(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/test_check_mtu.c b/tools/testing/selftests/bpf/progs/test_check_mtu.c
index c4a9bae96e75..71184af57749 100644
--- a/tools/testing/selftests/bpf/progs/test_check_mtu.c
+++ b/tools/testing/selftests/bpf/progs/test_check_mtu.c
@@ -11,8 +11,8 @@
char _license[] SEC("license") = "GPL";
/* Userspace will update with MTU it can see on device */
-static volatile const int GLOBAL_USER_MTU;
-static volatile const __u32 GLOBAL_USER_IFINDEX;
+volatile const int GLOBAL_USER_MTU;
+volatile const __u32 GLOBAL_USER_IFINDEX;
/* BPF-prog will update these with MTU values it can see */
__u32 global_bpf_mtu_xdp = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index 3c1e042962e6..e2a5acc4785c 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -39,8 +39,8 @@ char _license[] SEC("license") = "Dual BSD/GPL";
/**
* Destination port and IP used for UDP encapsulation.
*/
-static volatile const __be16 ENCAPSULATION_PORT;
-static volatile const __be32 ENCAPSULATION_IP;
+volatile const __be16 ENCAPSULATION_PORT;
+volatile const __be32 ENCAPSULATION_IP;
typedef struct {
uint64_t processed_packets_total;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_args.c b/tools/testing/selftests/bpf/progs/test_global_func_args.c
index cae309538a9e..e712bf77daae 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func_args.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func_args.c
@@ -8,7 +8,7 @@ struct S {
int v;
};
-static volatile struct S global_variable;
+struct S global_variable = {};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
diff --git a/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c b/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c
new file mode 100644
index 000000000000..3a193f42c7e7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u32 set_pid = 0;
+__u64 set_key = 0;
+__u64 set_value = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 2);
+ __type(key, __u64);
+ __type(value, __u64);
+} hash_map SEC(".maps");
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int bpf_lookup_and_delete_test(const void *ctx)
+{
+ if (set_pid == bpf_get_current_pid_tgid() >> 32)
+ bpf_map_update_elem(&hash_map, &set_key, &set_value, BPF_NOEXIST);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c b/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c
new file mode 100644
index 000000000000..27df571abf5b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check if we can migrate child sockets.
+ *
+ * 1. If reuse_md->migrating_sk is NULL (SYN packet),
+ * return SK_PASS without selecting a listener.
+ * 2. If reuse_md->migrating_sk is not NULL (socket migration),
+ * select a listener (reuseport_map[migrate_map[cookie]])
+ *
+ * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+ */
+
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+ __uint(max_entries, 256);
+ __type(key, int);
+ __type(value, __u64);
+} reuseport_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 256);
+ __type(key, __u64);
+ __type(value, int);
+} migrate_map SEC(".maps");
+
+int migrated_at_close = 0;
+int migrated_at_close_fastopen = 0;
+int migrated_at_send_synack = 0;
+int migrated_at_recv_ack = 0;
+__be16 server_port;
+
+SEC("xdp")
+int drop_ack(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eth = data;
+ struct tcphdr *tcp = NULL;
+
+ if (eth + 1 > data_end)
+ goto pass;
+
+ switch (bpf_ntohs(eth->h_proto)) {
+ case ETH_P_IP: {
+ struct iphdr *ip = (struct iphdr *)(eth + 1);
+
+ if (ip + 1 > data_end)
+ goto pass;
+
+ if (ip->protocol != IPPROTO_TCP)
+ goto pass;
+
+ tcp = (struct tcphdr *)((void *)ip + ip->ihl * 4);
+ break;
+ }
+ case ETH_P_IPV6: {
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(eth + 1);
+
+ if (ipv6 + 1 > data_end)
+ goto pass;
+
+ if (ipv6->nexthdr != IPPROTO_TCP)
+ goto pass;
+
+ tcp = (struct tcphdr *)(ipv6 + 1);
+ break;
+ }
+ default:
+ goto pass;
+ }
+
+ if (tcp + 1 > data_end)
+ goto pass;
+
+ if (tcp->dest != server_port)
+ goto pass;
+
+ if (!tcp->syn && tcp->ack)
+ return XDP_DROP;
+
+pass:
+ return XDP_PASS;
+}
+
+SEC("sk_reuseport/migrate")
+int migrate_reuseport(struct sk_reuseport_md *reuse_md)
+{
+ int *key, flags = 0, state, err;
+ __u64 cookie;
+
+ if (!reuse_md->migrating_sk)
+ return SK_PASS;
+
+ state = reuse_md->migrating_sk->state;
+ cookie = bpf_get_socket_cookie(reuse_md->sk);
+
+ key = bpf_map_lookup_elem(&migrate_map, &cookie);
+ if (!key)
+ return SK_DROP;
+
+ err = bpf_sk_select_reuseport(reuse_md, &reuseport_map, key, flags);
+ if (err)
+ return SK_PASS;
+
+ switch (state) {
+ case BPF_TCP_ESTABLISHED:
+ __sync_fetch_and_add(&migrated_at_close, 1);
+ break;
+ case BPF_TCP_SYN_RECV:
+ __sync_fetch_and_add(&migrated_at_close_fastopen, 1);
+ break;
+ case BPF_TCP_NEW_SYN_RECV:
+ if (!reuse_md->len)
+ __sync_fetch_and_add(&migrated_at_send_synack, 1);
+ else
+ __sync_fetch_and_add(&migrated_at_recv_ack, 1);
+ break;
+ }
+
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
index ecbeea2df259..fc8e8a34a3db 100644
--- a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
+++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
@@ -5,7 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-static volatile const struct {
+const struct {
unsigned a[4];
/*
* if the struct's size is multiple of 16, compiler will put it into
@@ -15,11 +15,11 @@ static volatile const struct {
char _y;
} rdonly_values = { .a = {2, 3, 4, 5} };
-static volatile struct {
+struct {
unsigned did_run;
unsigned iters;
unsigned sum;
-} res;
+} res = {};
SEC("raw_tracepoint/sys_enter:skip_loop")
int skip_loop(struct pt_regs *ctx)
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf.c b/tools/testing/selftests/bpf/progs/test_ringbuf.c
index 6b3f288b7c63..eaa7d9dba0be 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf.c
@@ -35,7 +35,7 @@ long prod_pos = 0;
/* inner state */
long seq = 0;
-SEC("tp/syscalls/sys_enter_getpgid")
+SEC("fentry/__x64_sys_getpgid")
int test_ringbuf(void *ctx)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
@@ -48,7 +48,7 @@ int test_ringbuf(void *ctx)
sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
if (!sample) {
__sync_fetch_and_add(&dropped, 1);
- return 1;
+ return 0;
}
sample->pid = pid;
diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c
index 374ccef704e1..441fa1c552c8 100644
--- a/tools/testing/selftests/bpf/progs/test_skeleton.c
+++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
@@ -38,11 +38,11 @@ extern int LINUX_KERNEL_VERSION __kconfig;
bool bpf_syscall = 0;
int kern_ver = 0;
+struct s out5 = {};
+
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
- static volatile struct s out5;
-
out1 = in1;
out2 = in2;
out3 = in3;
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf.c b/tools/testing/selftests/bpf/progs/test_snprintf.c
index e35129bea0a0..e2ad26150f9b 100644
--- a/tools/testing/selftests/bpf/progs/test_snprintf.c
+++ b/tools/testing/selftests/bpf/progs/test_snprintf.c
@@ -3,7 +3,6 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
__u32 pid = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf_single.c b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
index 402adaf344f9..3095837334d3 100644
--- a/tools/testing/selftests/bpf/progs/test_snprintf_single.c
+++ b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
@@ -5,7 +5,7 @@
#include <bpf/bpf_helpers.h>
/* The format string is filled from the userspace such that loading fails */
-static const char fmt[10];
+const char fmt[10];
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index a39eba9f5201..a1cc58b10c7c 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -28,8 +28,8 @@ struct {
__type(value, unsigned int);
} verdict_map SEC(".maps");
-static volatile bool test_sockmap; /* toggled by user-space */
-static volatile bool test_ingress; /* toggled by user-space */
+bool test_sockmap = false; /* toggled by user-space */
+bool test_ingress = false; /* toggled by user-space */
SEC("sk_skb/stream_parser")
int prog_stream_parser(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked1.c b/tools/testing/selftests/bpf/progs/test_static_linked1.c
index ea1a6c4c7172..4f0b612e1661 100644
--- a/tools/testing/selftests/bpf/progs/test_static_linked1.c
+++ b/tools/testing/selftests/bpf/progs/test_static_linked1.c
@@ -4,10 +4,10 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-/* 8-byte aligned .bss */
-static volatile long static_var1;
-static volatile int static_var11;
-int var1 = 0;
+/* 8-byte aligned .data */
+static volatile long static_var1 = 2;
+static volatile int static_var2 = 3;
+int var1 = -1;
/* 4-byte aligned .rodata */
const volatile int rovar1;
@@ -21,7 +21,7 @@ static __noinline int subprog(int x)
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
- var1 = subprog(rovar1) + static_var1 + static_var11;
+ var1 = subprog(rovar1) + static_var1 + static_var2;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked2.c b/tools/testing/selftests/bpf/progs/test_static_linked2.c
index 54d8d1ab577c..766ebd502a60 100644
--- a/tools/testing/selftests/bpf/progs/test_static_linked2.c
+++ b/tools/testing/selftests/bpf/progs/test_static_linked2.c
@@ -4,10 +4,10 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-/* 4-byte aligned .bss */
-static volatile int static_var2;
-static volatile int static_var22;
-int var2 = 0;
+/* 4-byte aligned .data */
+static volatile int static_var1 = 5;
+static volatile int static_var2 = 6;
+int var2 = -1;
/* 8-byte aligned .rodata */
const volatile long rovar2;
@@ -21,7 +21,7 @@ static __noinline int subprog(int x)
SEC("raw_tp/sys_enter")
int handler2(const void *ctx)
{
- var2 = subprog(rovar2) + static_var2 + static_var22;
+ var2 = subprog(rovar2) + static_var1 + static_var2;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c
index d3c5673c0218..b7c37ca09544 100644
--- a/tools/testing/selftests/bpf/progs/test_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/test_subprogs.c
@@ -4,8 +4,18 @@
const char LICENSE[] SEC("license") = "GPL";
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array SEC(".maps");
+
__noinline int sub1(int x)
{
+ int key = 0;
+
+ bpf_map_lookup_elem(&array, &key);
return x + 1;
}
@@ -23,6 +33,9 @@ static __noinline int sub3(int z)
static __noinline int sub4(int w)
{
+ int key = 0;
+
+ bpf_map_lookup_elem(&array, &key);
return w + sub3(5) + sub1(6);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
new file mode 100644
index 000000000000..18a3a7ed924a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* Dummy prog to test TC-BPF API */
+
+SEC("classifier")
+int cls(struct __sk_buff *skb)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
index b985ac4e7a81..0c93d326a663 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
@@ -33,17 +33,8 @@
a.s6_addr32[3] == b.s6_addr32[3])
#endif
-enum {
- dev_src,
- dev_dst,
-};
-
-struct bpf_map_def SEC("maps") ifindex_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb,
__be32 addr)
@@ -79,14 +70,8 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
return v6_equal(ip6h->daddr, addr);
}
-static __always_inline int get_dev_ifindex(int which)
-{
- int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
-
- return ifindex ? *ifindex : 0;
-}
-
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
@@ -98,7 +83,8 @@ SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
}
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
@@ -119,10 +105,11 @@ SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
- return bpf_redirect_neigh(get_dev_ifindex(dev_src), NULL, 0, 0);
+ return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
@@ -143,7 +130,7 @@ SEC("src_ingress") int tc_src(struct __sk_buff *skb)
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
- return bpf_redirect_neigh(get_dev_ifindex(dev_dst), NULL, 0, 0);
+ return bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0);
}
char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
index d82ed3457030..f7ab69cf018e 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
@@ -75,7 +75,8 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb,
return 0;
}
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
@@ -142,12 +143,14 @@ static __always_inline int tc_redir(struct __sk_buff *skb)
/* these are identical, but keep them separate for compatibility with the
* section names expected by test_tc_redirect.sh
*/
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
{
return tc_redir(skb);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
{
return tc_redir(skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c
index fc84a7685aa2..fe818cd5f010 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_peer.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c
@@ -5,41 +5,59 @@
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
#include <bpf/bpf_helpers.h>
-enum {
- dev_src,
- dev_dst,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
-struct bpf_map_def SEC("maps") ifindex_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
+static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66};
-static __always_inline int get_dev_ifindex(int which)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
{
- int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
+ return TC_ACT_SHOT;
+}
- return ifindex ? *ifindex : 0;
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
+{
+ return bpf_redirect_peer(IFINDEX_SRC, 0);
}
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
{
- return TC_ACT_SHOT;
+ return bpf_redirect_peer(IFINDEX_DST, 0);
}
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress_l3")
+int tc_dst_l3(struct __sk_buff *skb)
{
- return bpf_redirect_peer(get_dev_ifindex(dev_src), 0);
+ return bpf_redirect(IFINDEX_SRC, 0);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress_l3")
+int tc_src_l3(struct __sk_buff *skb)
{
- return bpf_redirect_peer(get_dev_ifindex(dev_dst), 0);
+ __u16 proto = skb->protocol;
+
+ if (bpf_skb_change_head(skb, ETH_HLEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, 0, &src_mac, ETH_ALEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, ETH_ALEN, &dst_mac, ETH_ALEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, ETH_ALEN + ETH_ALEN, &proto, sizeof(__u16), 0) != 0)
+ return TC_ACT_SHOT;
+
+ return bpf_redirect_peer(IFINDEX_DST, 0);
}
char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/trace_printk.c b/tools/testing/selftests/bpf/progs/trace_printk.c
index 8ca7f399b670..119582aa105a 100644
--- a/tools/testing/selftests/bpf/progs/trace_printk.c
+++ b/tools/testing/selftests/bpf/progs/trace_printk.c
@@ -10,11 +10,11 @@ char _license[] SEC("license") = "GPL";
int trace_printk_ret = 0;
int trace_printk_ran = 0;
-SEC("tp/raw_syscalls/sys_enter")
+const char fmt[] = "Testing,testing %d\n";
+
+SEC("fentry/__x64_sys_nanosleep")
int sys_enter(void *ctx)
{
- static const char fmt[] = "testing,testing %d\n";
-
trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
++trace_printk_ran);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
new file mode 100644
index 000000000000..880debcbcd65
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+#define KBUILD_MODNAME "foo"
+#include <string.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+/* One map use devmap, another one use devmap_hash for testing */
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 1024);
+} map_all SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 128);
+} map_egress SEC(".maps");
+
+/* map to store egress interfaces mac addresses */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __be64);
+ __uint(max_entries, 128);
+} mac_map SEC(".maps");
+
+SEC("xdp_redirect_map_multi")
+int xdp_redirect_map_multi_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ int if_index = ctx->ingress_ifindex;
+ struct ethhdr *eth = data;
+ __u16 h_proto;
+ __u64 nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ h_proto = eth->h_proto;
+
+ /* Using IPv4 for (BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) testing */
+ if (h_proto == bpf_htons(ETH_P_IP))
+ return bpf_redirect_map(&map_all, 0,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
+ /* Using IPv6 for none flag testing */
+ else if (h_proto == bpf_htons(ETH_P_IPV6))
+ return bpf_redirect_map(&map_all, if_index, 0);
+ /* All others for BPF_F_BROADCAST testing */
+ else
+ return bpf_redirect_map(&map_all, 0, BPF_F_BROADCAST);
+}
+
+/* The following 2 progs are for 2nd devmap prog testing */
+SEC("xdp_redirect_map_ingress")
+int xdp_redirect_map_all_prog(struct xdp_md *ctx)
+{
+ return bpf_redirect_map(&map_egress, 0,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
+}
+
+SEC("xdp_devmap/map_prog")
+int xdp_devmap_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 key = ctx->egress_ifindex;
+ struct ethhdr *eth = data;
+ __u64 nh_off;
+ __be64 *mac;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ mac = bpf_map_lookup_elem(&mac_map, &key);
+ if (mac)
+ __builtin_memcpy(eth->h_source, mac, ETH_ALEN);
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_doc_build.sh b/tools/testing/selftests/bpf/test_doc_build.sh
index 7eb940a7b2eb..ed12111cd2f0 100755
--- a/tools/testing/selftests/bpf/test_doc_build.sh
+++ b/tools/testing/selftests/bpf/test_doc_build.sh
@@ -1,5 +1,6 @@
#!/bin/bash
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+set -e
# Assume script is located under tools/testing/selftests/bpf/. We want to start
# build attempts from the top of kernel repository.
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 6a5349f9eb14..7e9049fa3edf 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -231,6 +231,14 @@ static void test_lru_sanity0(int map_type, int map_flags)
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
errno == ENOENT);
+ /* lookup elem key=1 and delete it, then check it doesn't exist */
+ key = 1;
+ assert(!bpf_map_lookup_and_delete_elem(lru_map_fd, &key, &value));
+ assert(value[0] == 1234);
+
+ /* remove the same element from the expected map */
+ assert(!bpf_map_delete_elem(expected_map_fd, &key));
+
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 51adc42b2b40..30cbf5d98f7d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -53,23 +53,30 @@ static void test_hashmap(unsigned int task, void *data)
value = 0;
/* BPF_NOEXIST means add new element if it doesn't exist. */
- assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
/* key=1 already exists. */
errno == EEXIST);
/* -1 is an invalid flag. */
- assert(bpf_map_update_elem(fd, &key, &value, -1) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, -1) < 0 &&
errno == EINVAL);
/* Check that key=1 can be found. */
assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
key = 2;
+ value = 1234;
+ /* Insert key=2 element. */
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
+
+ /* Check that key=2 matches the value and delete it */
+ assert(bpf_map_lookup_and_delete_elem(fd, &key, &value) == 0 && value == 1234);
+
/* Check that key=2 is not found. */
- assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
/* BPF_EXIST means update existing element. */
- assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) < 0 &&
/* key=2 is not there. */
errno == ENOENT);
@@ -80,7 +87,7 @@ static void test_hashmap(unsigned int task, void *data)
* inserted due to max_entries limit.
*/
key = 0;
- assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == E2BIG);
/* Update existing element, though the map is full. */
@@ -89,12 +96,12 @@ static void test_hashmap(unsigned int task, void *data)
key = 2;
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
key = 3;
- assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == E2BIG);
/* Check that key = 0 doesn't exist. */
key = 0;
- assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
/* Iterate over two elements. */
assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 &&
@@ -104,7 +111,7 @@ static void test_hashmap(unsigned int task, void *data)
assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
(next_key == 1 || next_key == 2) &&
(next_key != first_key));
- assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
errno == ENOENT);
/* Delete both elements. */
@@ -112,13 +119,13 @@ static void test_hashmap(unsigned int task, void *data)
assert(bpf_map_delete_elem(fd, &key) == 0);
key = 2;
assert(bpf_map_delete_elem(fd, &key) == 0);
- assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
key = 0;
/* Check that map is empty. */
- assert(bpf_map_get_next_key(fd, NULL, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, NULL, &next_key) < 0 &&
errno == ENOENT);
- assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &key, &next_key) < 0 &&
errno == ENOENT);
close(fd);
@@ -166,15 +173,25 @@ static void test_hashmap_percpu(unsigned int task, void *data)
/* Insert key=1 element. */
assert(!(expected_key_mask & key));
assert(bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0);
+
+ /* Lookup and delete elem key=1 and check value. */
+ assert(bpf_map_lookup_and_delete_elem(fd, &key, value) == 0 &&
+ bpf_percpu(value,0) == 100);
+
+ for (i = 0; i < nr_cpus; i++)
+ bpf_percpu(value,i) = i + 100;
+
+ /* Insert key=1 element which should not exist. */
+ assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0);
expected_key_mask |= key;
/* BPF_NOEXIST means add new element if it doesn't exist. */
- assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) < 0 &&
/* key=1 already exists. */
errno == EEXIST);
/* -1 is an invalid flag. */
- assert(bpf_map_update_elem(fd, &key, value, -1) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, value, -1) < 0 &&
errno == EINVAL);
/* Check that key=1 can be found. Value could be 0 if the lookup
@@ -186,10 +203,10 @@ static void test_hashmap_percpu(unsigned int task, void *data)
key = 2;
/* Check that key=2 is not found. */
- assert(bpf_map_lookup_elem(fd, &key, value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, value) < 0 && errno == ENOENT);
/* BPF_EXIST means update existing element. */
- assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) < 0 &&
/* key=2 is not there. */
errno == ENOENT);
@@ -202,11 +219,11 @@ static void test_hashmap_percpu(unsigned int task, void *data)
* inserted due to max_entries limit.
*/
key = 0;
- assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) < 0 &&
errno == E2BIG);
/* Check that key = 0 doesn't exist. */
- assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
/* Iterate over two elements. */
assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 &&
@@ -237,13 +254,13 @@ static void test_hashmap_percpu(unsigned int task, void *data)
assert(bpf_map_delete_elem(fd, &key) == 0);
key = 2;
assert(bpf_map_delete_elem(fd, &key) == 0);
- assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
key = 0;
/* Check that map is empty. */
- assert(bpf_map_get_next_key(fd, NULL, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, NULL, &next_key) < 0 &&
errno == ENOENT);
- assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &key, &next_key) < 0 &&
errno == ENOENT);
close(fd);
@@ -360,7 +377,7 @@ static void test_arraymap(unsigned int task, void *data)
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
value = 0;
- assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == EEXIST);
/* Check that key=1 can be found. */
@@ -374,11 +391,11 @@ static void test_arraymap(unsigned int task, void *data)
* due to max_entries limit.
*/
key = 2;
- assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) < 0 &&
errno == E2BIG);
/* Check that key = 2 doesn't exist. */
- assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
/* Iterate over two elements. */
assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 &&
@@ -387,12 +404,12 @@ static void test_arraymap(unsigned int task, void *data)
next_key == 0);
assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
next_key == 1);
- assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
errno == ENOENT);
/* Delete shouldn't succeed. */
key = 1;
- assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL);
+ assert(bpf_map_delete_elem(fd, &key) < 0 && errno == EINVAL);
close(fd);
}
@@ -418,7 +435,7 @@ static void test_arraymap_percpu(unsigned int task, void *data)
assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
bpf_percpu(values, 0) = 0;
- assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) < 0 &&
errno == EEXIST);
/* Check that key=1 can be found. */
@@ -433,11 +450,11 @@ static void test_arraymap_percpu(unsigned int task, void *data)
/* Check that key=2 cannot be inserted due to max_entries limit. */
key = 2;
- assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) < 0 &&
errno == E2BIG);
/* Check that key = 2 doesn't exist. */
- assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, values) < 0 && errno == ENOENT);
/* Iterate over two elements. */
assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 &&
@@ -446,12 +463,12 @@ static void test_arraymap_percpu(unsigned int task, void *data)
next_key == 0);
assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
next_key == 1);
- assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
errno == ENOENT);
/* Delete shouldn't succeed. */
key = 1;
- assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL);
+ assert(bpf_map_delete_elem(fd, &key) < 0 && errno == EINVAL);
close(fd);
}
@@ -555,7 +572,7 @@ static void test_queuemap(unsigned int task, void *data)
assert(bpf_map_update_elem(fd, NULL, &vals[i], 0) == 0);
/* Check that element cannot be pushed due to max_entries limit */
- assert(bpf_map_update_elem(fd, NULL, &val, 0) == -1 &&
+ assert(bpf_map_update_elem(fd, NULL, &val, 0) < 0 &&
errno == E2BIG);
/* Peek element */
@@ -571,12 +588,12 @@ static void test_queuemap(unsigned int task, void *data)
val == vals[i]);
/* Check that there are not elements left */
- assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == -1 &&
+ assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) < 0 &&
errno == ENOENT);
/* Check that non supported functions set errno to EINVAL */
- assert(bpf_map_delete_elem(fd, NULL) == -1 && errno == EINVAL);
- assert(bpf_map_get_next_key(fd, NULL, NULL) == -1 && errno == EINVAL);
+ assert(bpf_map_delete_elem(fd, NULL) < 0 && errno == EINVAL);
+ assert(bpf_map_get_next_key(fd, NULL, NULL) < 0 && errno == EINVAL);
close(fd);
}
@@ -613,7 +630,7 @@ static void test_stackmap(unsigned int task, void *data)
assert(bpf_map_update_elem(fd, NULL, &vals[i], 0) == 0);
/* Check that element cannot be pushed due to max_entries limit */
- assert(bpf_map_update_elem(fd, NULL, &val, 0) == -1 &&
+ assert(bpf_map_update_elem(fd, NULL, &val, 0) < 0 &&
errno == E2BIG);
/* Peek element */
@@ -629,12 +646,12 @@ static void test_stackmap(unsigned int task, void *data)
val == vals[i]);
/* Check that there are not elements left */
- assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == -1 &&
+ assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) < 0 &&
errno == ENOENT);
/* Check that non supported functions set errno to EINVAL */
- assert(bpf_map_delete_elem(fd, NULL) == -1 && errno == EINVAL);
- assert(bpf_map_get_next_key(fd, NULL, NULL) == -1 && errno == EINVAL);
+ assert(bpf_map_delete_elem(fd, NULL) < 0 && errno == EINVAL);
+ assert(bpf_map_get_next_key(fd, NULL, NULL) < 0 && errno == EINVAL);
close(fd);
}
@@ -835,7 +852,7 @@ static void test_sockmap(unsigned int tasks, void *data)
}
bpf_map_rx = bpf_object__find_map_by_name(obj, "sock_map_rx");
- if (IS_ERR(bpf_map_rx)) {
+ if (!bpf_map_rx) {
printf("Failed to load map rx from verdict prog\n");
goto out_sockmap;
}
@@ -847,7 +864,7 @@ static void test_sockmap(unsigned int tasks, void *data)
}
bpf_map_tx = bpf_object__find_map_by_name(obj, "sock_map_tx");
- if (IS_ERR(bpf_map_tx)) {
+ if (!bpf_map_tx) {
printf("Failed to load map tx from verdict prog\n");
goto out_sockmap;
}
@@ -859,7 +876,7 @@ static void test_sockmap(unsigned int tasks, void *data)
}
bpf_map_msg = bpf_object__find_map_by_name(obj, "sock_map_msg");
- if (IS_ERR(bpf_map_msg)) {
+ if (!bpf_map_msg) {
printf("Failed to load map msg from msg_verdict prog\n");
goto out_sockmap;
}
@@ -871,7 +888,7 @@ static void test_sockmap(unsigned int tasks, void *data)
}
bpf_map_break = bpf_object__find_map_by_name(obj, "sock_map_break");
- if (IS_ERR(bpf_map_break)) {
+ if (!bpf_map_break) {
printf("Failed to load map tx from verdict prog\n");
goto out_sockmap;
}
@@ -1153,7 +1170,7 @@ static void test_map_in_map(void)
}
map = bpf_object__find_map_by_name(obj, "mim_array");
- if (IS_ERR(map)) {
+ if (!map) {
printf("Failed to load array of maps from test prog\n");
goto out_map_in_map;
}
@@ -1164,7 +1181,7 @@ static void test_map_in_map(void)
}
map = bpf_object__find_map_by_name(obj, "mim_hash");
- if (IS_ERR(map)) {
+ if (!map) {
printf("Failed to load hash of maps from test prog\n");
goto out_map_in_map;
}
@@ -1177,7 +1194,7 @@ static void test_map_in_map(void)
bpf_object__load(obj);
map = bpf_object__find_map_by_name(obj, "mim_array");
- if (IS_ERR(map)) {
+ if (!map) {
printf("Failed to load array of maps from test prog\n");
goto out_map_in_map;
}
@@ -1194,7 +1211,7 @@ static void test_map_in_map(void)
}
map = bpf_object__find_map_by_name(obj, "mim_hash");
- if (IS_ERR(map)) {
+ if (!map) {
printf("Failed to load hash of maps from test prog\n");
goto out_map_in_map;
}
@@ -1246,7 +1263,7 @@ static void test_map_large(void)
}
key.c = -1;
- assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == E2BIG);
/* Iterate through all elements. */
@@ -1254,12 +1271,12 @@ static void test_map_large(void)
key.c = -1;
for (i = 0; i < MAP_SIZE; i++)
assert(bpf_map_get_next_key(fd, &key, &key) == 0);
- assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
key.c = 0;
assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
key.a = 1;
- assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
close(fd);
}
@@ -1391,7 +1408,7 @@ static void test_map_parallel(void)
run_parallel(TASKS, test_update_delete, data);
/* Check that key=0 is already there. */
- assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == EEXIST);
/* Check that all elements were inserted. */
@@ -1399,7 +1416,7 @@ static void test_map_parallel(void)
key = -1;
for (i = 0; i < MAP_SIZE; i++)
assert(bpf_map_get_next_key(fd, &key, &key) == 0);
- assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
/* Another check for all elements */
for (i = 0; i < MAP_SIZE; i++) {
@@ -1415,8 +1432,8 @@ static void test_map_parallel(void)
/* Nothing should be left. */
key = -1;
- assert(bpf_map_get_next_key(fd, NULL, &key) == -1 && errno == ENOENT);
- assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_get_next_key(fd, NULL, &key) < 0 && errno == ENOENT);
+ assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
}
static void test_map_rdonly(void)
@@ -1434,12 +1451,12 @@ static void test_map_rdonly(void)
key = 1;
value = 1234;
/* Try to insert key=1 element. */
- assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) < 0 &&
errno == EPERM);
/* Check that key=1 is not found. */
- assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
- assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
+ assert(bpf_map_get_next_key(fd, &key, &value) < 0 && errno == ENOENT);
close(fd);
}
@@ -1462,8 +1479,8 @@ static void test_map_wronly_hash(void)
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
/* Check that reading elements and keys from the map is not allowed. */
- assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == EPERM);
- assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == EPERM);
+ assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == EPERM);
+ assert(bpf_map_get_next_key(fd, &key, &value) < 0 && errno == EPERM);
close(fd);
}
@@ -1490,10 +1507,10 @@ static void test_map_wronly_stack_or_queue(enum bpf_map_type map_type)
assert(bpf_map_update_elem(fd, NULL, &value, BPF_ANY) == 0);
/* Peek element should fail */
- assert(bpf_map_lookup_elem(fd, NULL, &value) == -1 && errno == EPERM);
+ assert(bpf_map_lookup_elem(fd, NULL, &value) < 0 && errno == EPERM);
/* Pop element should fail */
- assert(bpf_map_lookup_and_delete_elem(fd, NULL, &value) == -1 &&
+ assert(bpf_map_lookup_and_delete_elem(fd, NULL, &value) < 0 &&
errno == EPERM);
close(fd);
@@ -1547,7 +1564,7 @@ static void prepare_reuseport_grp(int type, int map_fd, size_t map_elem_size,
value = &fd32;
}
err = bpf_map_update_elem(map_fd, &index0, value, BPF_ANY);
- CHECK(err != -1 || errno != EINVAL,
+ CHECK(err >= 0 || errno != EINVAL,
"reuseport array update unbound sk",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
@@ -1576,7 +1593,7 @@ static void prepare_reuseport_grp(int type, int map_fd, size_t map_elem_size,
*/
err = bpf_map_update_elem(map_fd, &index0, value,
BPF_ANY);
- CHECK(err != -1 || errno != EINVAL,
+ CHECK(err >= 0 || errno != EINVAL,
"reuseport array update non-listening sk",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
@@ -1606,31 +1623,31 @@ static void test_reuseport_array(void)
map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
sizeof(__u32), sizeof(__u64), array_size, 0);
- CHECK(map_fd == -1, "reuseport array create",
+ CHECK(map_fd < 0, "reuseport array create",
"map_fd:%d, errno:%d\n", map_fd, errno);
/* Test lookup/update/delete with invalid index */
err = bpf_map_delete_elem(map_fd, &bad_index);
- CHECK(err != -1 || errno != E2BIG, "reuseport array del >=max_entries",
+ CHECK(err >= 0 || errno != E2BIG, "reuseport array del >=max_entries",
"err:%d errno:%d\n", err, errno);
err = bpf_map_update_elem(map_fd, &bad_index, &fd64, BPF_ANY);
- CHECK(err != -1 || errno != E2BIG,
+ CHECK(err >= 0 || errno != E2BIG,
"reuseport array update >=max_entries",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem(map_fd, &bad_index, &map_cookie);
- CHECK(err != -1 || errno != ENOENT,
+ CHECK(err >= 0 || errno != ENOENT,
"reuseport array update >=max_entries",
"err:%d errno:%d\n", err, errno);
/* Test lookup/delete non existence elem */
err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
- CHECK(err != -1 || errno != ENOENT,
+ CHECK(err >= 0 || errno != ENOENT,
"reuseport array lookup not-exist elem",
"err:%d errno:%d\n", err, errno);
err = bpf_map_delete_elem(map_fd, &index3);
- CHECK(err != -1 || errno != ENOENT,
+ CHECK(err >= 0 || errno != ENOENT,
"reuseport array del not-exist elem",
"err:%d errno:%d\n", err, errno);
@@ -1644,7 +1661,7 @@ static void test_reuseport_array(void)
/* BPF_EXIST failure case */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_EXIST);
- CHECK(err != -1 || errno != ENOENT,
+ CHECK(err >= 0 || errno != ENOENT,
"reuseport array update empty elem BPF_EXIST",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
@@ -1653,7 +1670,7 @@ static void test_reuseport_array(void)
/* BPF_NOEXIST success case */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_NOEXIST);
- CHECK(err == -1,
+ CHECK(err < 0,
"reuseport array update empty elem BPF_NOEXIST",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
@@ -1662,7 +1679,7 @@ static void test_reuseport_array(void)
/* BPF_EXIST success case. */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_EXIST);
- CHECK(err == -1,
+ CHECK(err < 0,
"reuseport array update same elem BPF_EXIST",
"sock_type:%d err:%d errno:%d\n", type, err, errno);
fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
@@ -1670,7 +1687,7 @@ static void test_reuseport_array(void)
/* BPF_NOEXIST failure case */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_NOEXIST);
- CHECK(err != -1 || errno != EEXIST,
+ CHECK(err >= 0 || errno != EEXIST,
"reuseport array update non-empty elem BPF_NOEXIST",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
@@ -1679,7 +1696,7 @@ static void test_reuseport_array(void)
/* BPF_ANY case (always succeed) */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_ANY);
- CHECK(err == -1,
+ CHECK(err < 0,
"reuseport array update same sk with BPF_ANY",
"sock_type:%d err:%d errno:%d\n", type, err, errno);
@@ -1688,32 +1705,32 @@ static void test_reuseport_array(void)
/* The same sk cannot be added to reuseport_array twice */
err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_ANY);
- CHECK(err != -1 || errno != EBUSY,
+ CHECK(err >= 0 || errno != EBUSY,
"reuseport array update same sk with same index",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
err = bpf_map_update_elem(map_fd, &index0, &fd64, BPF_ANY);
- CHECK(err != -1 || errno != EBUSY,
+ CHECK(err >= 0 || errno != EBUSY,
"reuseport array update same sk with different index",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
/* Test delete elem */
err = bpf_map_delete_elem(map_fd, &index3);
- CHECK(err == -1, "reuseport array delete sk",
+ CHECK(err < 0, "reuseport array delete sk",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
/* Add it back with BPF_NOEXIST */
err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);
- CHECK(err == -1,
+ CHECK(err < 0,
"reuseport array re-add with BPF_NOEXIST after del",
"sock_type:%d err:%d errno:%d\n", type, err, errno);
/* Test cookie */
err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
- CHECK(err == -1 || sk_cookie != map_cookie,
+ CHECK(err < 0 || sk_cookie != map_cookie,
"reuseport array lookup re-added sk",
"sock_type:%d err:%d errno:%d sk_cookie:0x%llx map_cookie:0x%llxn",
type, err, errno, sk_cookie, map_cookie);
@@ -1722,7 +1739,7 @@ static void test_reuseport_array(void)
for (f = 0; f < ARRAY_SIZE(grpa_fds64); f++)
close(grpa_fds64[f]);
err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
- CHECK(err != -1 || errno != ENOENT,
+ CHECK(err >= 0 || errno != ENOENT,
"reuseport array lookup after close()",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
@@ -1733,7 +1750,7 @@ static void test_reuseport_array(void)
CHECK(fd64 == -1, "socket(SOCK_RAW)", "err:%d errno:%d\n",
err, errno);
err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);
- CHECK(err != -1 || errno != ENOTSUPP, "reuseport array update SOCK_RAW",
+ CHECK(err >= 0 || errno != ENOTSUPP, "reuseport array update SOCK_RAW",
"err:%d errno:%d\n", err, errno);
close(fd64);
@@ -1743,16 +1760,16 @@ static void test_reuseport_array(void)
/* Test 32 bit fd */
map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
sizeof(__u32), sizeof(__u32), array_size, 0);
- CHECK(map_fd == -1, "reuseport array create",
+ CHECK(map_fd < 0, "reuseport array create",
"map_fd:%d, errno:%d\n", map_fd, errno);
prepare_reuseport_grp(SOCK_STREAM, map_fd, sizeof(__u32), &fd64,
&sk_cookie, 1);
fd = fd64;
err = bpf_map_update_elem(map_fd, &index3, &fd, BPF_NOEXIST);
- CHECK(err == -1, "reuseport array update 32 bit fd",
+ CHECK(err < 0, "reuseport array update 32 bit fd",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
- CHECK(err != -1 || errno != ENOSPC,
+ CHECK(err >= 0 || errno != ENOSPC,
"reuseport array lookup 32 bit fd",
"err:%d errno:%d\n", err, errno);
close(fd);
@@ -1798,6 +1815,8 @@ int main(void)
{
srand(time(NULL));
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
map_flags = 0;
run_all_tests();
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 6396932b97e2..6f103106a39b 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -737,6 +737,9 @@ int main(int argc, char **argv)
if (err)
return err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
libbpf_set_print(libbpf_print_fn);
srand(time(NULL));
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index dda52cb649dc..8ef7f334e715 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -249,16 +249,17 @@ extern int test__join_cgroup(const char *path);
#define ASSERT_OK_PTR(ptr, name) ({ \
static int duration = 0; \
const void *___res = (ptr); \
- bool ___ok = !IS_ERR_OR_NULL(___res); \
- CHECK(!___ok, (name), \
- "unexpected error: %ld\n", PTR_ERR(___res)); \
+ int ___err = libbpf_get_error(___res); \
+ bool ___ok = ___err == 0; \
+ CHECK(!___ok, (name), "unexpected error: %d\n", ___err); \
___ok; \
})
#define ASSERT_ERR_PTR(ptr, name) ({ \
static int duration = 0; \
const void *___res = (ptr); \
- bool ___ok = IS_ERR(___res); \
+ int ___err = libbpf_get_error(___res); \
+ bool ___ok = ___err != 0; \
CHECK(!___ok, (name), "unexpected pointer: %p\n", ___res); \
___ok; \
})
diff --git a/tools/testing/selftests/bpf/test_tc_redirect.sh b/tools/testing/selftests/bpf/test_tc_redirect.sh
deleted file mode 100755
index 8868aa1ca902..000000000000
--- a/tools/testing/selftests/bpf/test_tc_redirect.sh
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
-# between src and dst. The netns fwd has veth links to each src and dst. The
-# client is in src and server in dst. The test installs a TC BPF program to each
-# host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
-# neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
-# switch from ingress side; it also installs a checker prog on the egress side
-# to drop unexpected traffic.
-
-if [[ $EUID -ne 0 ]]; then
- echo "This script must be run as root"
- echo "FAIL"
- exit 1
-fi
-
-# check that needed tools are present
-command -v nc >/dev/null 2>&1 || \
- { echo >&2 "nc is not available"; exit 1; }
-command -v dd >/dev/null 2>&1 || \
- { echo >&2 "dd is not available"; exit 1; }
-command -v timeout >/dev/null 2>&1 || \
- { echo >&2 "timeout is not available"; exit 1; }
-command -v ping >/dev/null 2>&1 || \
- { echo >&2 "ping is not available"; exit 1; }
-if command -v ping6 >/dev/null 2>&1; then PING6=ping6; else PING6=ping; fi
-command -v perl >/dev/null 2>&1 || \
- { echo >&2 "perl is not available"; exit 1; }
-command -v jq >/dev/null 2>&1 || \
- { echo >&2 "jq is not available"; exit 1; }
-command -v bpftool >/dev/null 2>&1 || \
- { echo >&2 "bpftool is not available"; exit 1; }
-
-readonly GREEN='\033[0;92m'
-readonly RED='\033[0;31m'
-readonly NC='\033[0m' # No Color
-
-readonly PING_ARG="-c 3 -w 10 -q"
-
-readonly TIMEOUT=10
-
-readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)"
-readonly NS_FWD="ns-fwd-$(mktemp -u XXXXXX)"
-readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)"
-
-readonly IP4_SRC="172.16.1.100"
-readonly IP4_DST="172.16.2.100"
-
-readonly IP6_SRC="::1:dead:beef:cafe"
-readonly IP6_DST="::2:dead:beef:cafe"
-
-readonly IP4_SLL="169.254.0.1"
-readonly IP4_DLL="169.254.0.2"
-readonly IP4_NET="169.254.0.0"
-
-netns_cleanup()
-{
- ip netns del ${NS_SRC}
- ip netns del ${NS_FWD}
- ip netns del ${NS_DST}
-}
-
-netns_setup()
-{
- ip netns add "${NS_SRC}"
- ip netns add "${NS_FWD}"
- ip netns add "${NS_DST}"
-
- ip link add veth_src type veth peer name veth_src_fwd
- ip link add veth_dst type veth peer name veth_dst_fwd
-
- ip link set veth_src netns ${NS_SRC}
- ip link set veth_src_fwd netns ${NS_FWD}
-
- ip link set veth_dst netns ${NS_DST}
- ip link set veth_dst_fwd netns ${NS_FWD}
-
- ip -netns ${NS_SRC} addr add ${IP4_SRC}/32 dev veth_src
- ip -netns ${NS_DST} addr add ${IP4_DST}/32 dev veth_dst
-
- # The fwd netns automatically get a v6 LL address / routes, but also
- # needs v4 one in order to start ARP probing. IP4_NET route is added
- # to the endpoints so that the ARP processing will reply.
-
- ip -netns ${NS_FWD} addr add ${IP4_SLL}/32 dev veth_src_fwd
- ip -netns ${NS_FWD} addr add ${IP4_DLL}/32 dev veth_dst_fwd
-
- ip -netns ${NS_SRC} addr add ${IP6_SRC}/128 dev veth_src nodad
- ip -netns ${NS_DST} addr add ${IP6_DST}/128 dev veth_dst nodad
-
- ip -netns ${NS_SRC} link set dev veth_src up
- ip -netns ${NS_FWD} link set dev veth_src_fwd up
-
- ip -netns ${NS_DST} link set dev veth_dst up
- ip -netns ${NS_FWD} link set dev veth_dst_fwd up
-
- ip -netns ${NS_SRC} route add ${IP4_DST}/32 dev veth_src scope global
- ip -netns ${NS_SRC} route add ${IP4_NET}/16 dev veth_src scope global
- ip -netns ${NS_FWD} route add ${IP4_SRC}/32 dev veth_src_fwd scope global
-
- ip -netns ${NS_SRC} route add ${IP6_DST}/128 dev veth_src scope global
- ip -netns ${NS_FWD} route add ${IP6_SRC}/128 dev veth_src_fwd scope global
-
- ip -netns ${NS_DST} route add ${IP4_SRC}/32 dev veth_dst scope global
- ip -netns ${NS_DST} route add ${IP4_NET}/16 dev veth_dst scope global
- ip -netns ${NS_FWD} route add ${IP4_DST}/32 dev veth_dst_fwd scope global
-
- ip -netns ${NS_DST} route add ${IP6_SRC}/128 dev veth_dst scope global
- ip -netns ${NS_FWD} route add ${IP6_DST}/128 dev veth_dst_fwd scope global
-
- fmac_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/address)
- fmac_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/address)
-
- ip -netns ${NS_SRC} neigh add ${IP4_DST} dev veth_src lladdr $fmac_src
- ip -netns ${NS_DST} neigh add ${IP4_SRC} dev veth_dst lladdr $fmac_dst
-
- ip -netns ${NS_SRC} neigh add ${IP6_DST} dev veth_src lladdr $fmac_src
- ip -netns ${NS_DST} neigh add ${IP6_SRC} dev veth_dst lladdr $fmac_dst
-}
-
-netns_test_connectivity()
-{
- set +e
-
- ip netns exec ${NS_DST} bash -c "nc -4 -l -p 9004 &"
- ip netns exec ${NS_DST} bash -c "nc -6 -l -p 9006 &"
-
- TEST="TCPv4 connectivity test"
- ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP4_DST}/9004"
- if [ $? -ne 0 ]; then
- echo -e "${TEST}: ${RED}FAIL${NC}"
- exit 1
- fi
- echo -e "${TEST}: ${GREEN}PASS${NC}"
-
- TEST="TCPv6 connectivity test"
- ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP6_DST}/9006"
- if [ $? -ne 0 ]; then
- echo -e "${TEST}: ${RED}FAIL${NC}"
- exit 1
- fi
- echo -e "${TEST}: ${GREEN}PASS${NC}"
-
- TEST="ICMPv4 connectivity test"
- ip netns exec ${NS_SRC} ping $PING_ARG ${IP4_DST}
- if [ $? -ne 0 ]; then
- echo -e "${TEST}: ${RED}FAIL${NC}"
- exit 1
- fi
- echo -e "${TEST}: ${GREEN}PASS${NC}"
-
- TEST="ICMPv6 connectivity test"
- ip netns exec ${NS_SRC} $PING6 $PING_ARG ${IP6_DST}
- if [ $? -ne 0 ]; then
- echo -e "${TEST}: ${RED}FAIL${NC}"
- exit 1
- fi
- echo -e "${TEST}: ${GREEN}PASS${NC}"
-
- set -e
-}
-
-hex_mem_str()
-{
- perl -e 'print join(" ", unpack("(H2)8", pack("L", @ARGV)))' $1
-}
-
-netns_setup_bpf()
-{
- local obj=$1
- local use_forwarding=${2:-0}
-
- ip netns exec ${NS_FWD} tc qdisc add dev veth_src_fwd clsact
- ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd ingress bpf da obj $obj sec src_ingress
- ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd egress bpf da obj $obj sec chk_egress
-
- ip netns exec ${NS_FWD} tc qdisc add dev veth_dst_fwd clsact
- ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd ingress bpf da obj $obj sec dst_ingress
- ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd egress bpf da obj $obj sec chk_egress
-
- if [ "$use_forwarding" -eq "1" ]; then
- # bpf_fib_lookup() checks if forwarding is enabled
- ip netns exec ${NS_FWD} sysctl -w net.ipv4.ip_forward=1
- ip netns exec ${NS_FWD} sysctl -w net.ipv6.conf.veth_dst_fwd.forwarding=1
- ip netns exec ${NS_FWD} sysctl -w net.ipv6.conf.veth_src_fwd.forwarding=1
- return 0
- fi
-
- veth_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/ifindex)
- veth_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/ifindex)
-
- progs=$(ip netns exec ${NS_FWD} bpftool net --json | jq -r '.[] | .tc | map(.id) | .[]')
- for prog in $progs; do
- map=$(bpftool prog show id $prog --json | jq -r '.map_ids | .? | .[]')
- if [ ! -z "$map" ]; then
- bpftool map update id $map key hex $(hex_mem_str 0) value hex $(hex_mem_str $veth_src)
- bpftool map update id $map key hex $(hex_mem_str 1) value hex $(hex_mem_str $veth_dst)
- fi
- done
-}
-
-trap netns_cleanup EXIT
-set -e
-
-netns_setup
-netns_setup_bpf test_tc_neigh.o
-netns_test_connectivity
-netns_cleanup
-netns_setup
-netns_setup_bpf test_tc_neigh_fib.o 1
-netns_test_connectivity
-netns_cleanup
-netns_setup
-netns_setup_bpf test_tc_peer.o
-netns_test_connectivity
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 73da7fe8c152..4a39304cc5a6 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -82,6 +82,8 @@ int main(int argc, char **argv)
cpu_set_t cpuset;
__u32 key = 0;
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
@@ -116,7 +118,7 @@ int main(int argc, char **argv)
pb_opts.sample_cb = dummyfn;
pb = perf_buffer__new(bpf_map__fd(perf_map), 8, &pb_opts);
- if (IS_ERR(pb))
+ if (!pb)
goto err;
pthread_create(&tid, NULL, poller_thread, pb);
@@ -163,7 +165,6 @@ err:
bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
close(cg_fd);
cleanup_cgroup_environment();
- if (!IS_ERR_OR_NULL(pb))
- perf_buffer__free(pb);
+ perf_buffer__free(pb);
return error;
}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 1512092e1e68..3a9e332c5e36 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1147,7 +1147,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
}
}
- if (test->insn_processed) {
+ if (!unpriv && test->insn_processed) {
uint32_t insn_processed;
char *proc;
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
new file mode 100755
index 000000000000..1538373157e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
@@ -0,0 +1,204 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test topology:
+# - - - - - - - - - - - - - - - - - - - - - - - - -
+# | veth1 veth2 veth3 | ... init net
+# - -| - - - - - - | - - - - - - | - -
+# --------- --------- ---------
+# | veth0 | | veth0 | | veth0 | ...
+# --------- --------- ---------
+# ns1 ns2 ns3
+#
+# Test modules:
+# XDP modes: generic, native, native + egress_prog
+#
+# Test cases:
+# ARP: Testing BPF_F_BROADCAST, the ingress interface also should receive
+# the redirects.
+# ns1 -> gw: ns1, ns2, ns3, should receive the arp request
+# IPv4: Testing BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, the ingress
+# interface should not receive the redirects.
+# ns1 -> gw: ns1 should not receive, ns2, ns3 should receive redirects.
+# IPv6: Testing none flag, all the pkts should be redirected back
+# ping test: ns1 -> ns2 (block), echo requests will be redirect back
+# egress_prog:
+# all src mac should be egress interface's mac
+
+# netns numbers
+NUM=3
+IFACES=""
+DRV_MODE="xdpgeneric xdpdrv xdpegress"
+PASS=0
+FAIL=0
+
+test_pass()
+{
+ echo "Pass: $@"
+ PASS=$((PASS + 1))
+}
+
+test_fail()
+{
+ echo "fail: $@"
+ FAIL=$((FAIL + 1))
+}
+
+clean_up()
+{
+ for i in $(seq $NUM); do
+ ip link del veth$i 2> /dev/null
+ ip netns del ns$i 2> /dev/null
+ done
+}
+
+# Kselftest framework requirement - SKIP code is 4.
+check_env()
+{
+ ip link set dev lo xdpgeneric off &>/dev/null
+ if [ $? -ne 0 ];then
+ echo "selftests: [SKIP] Could not run test without the ip xdpgeneric support"
+ exit 4
+ fi
+
+ which tcpdump &>/dev/null
+ if [ $? -ne 0 ];then
+ echo "selftests: [SKIP] Could not run test without tcpdump"
+ exit 4
+ fi
+}
+
+setup_ns()
+{
+ local mode=$1
+ IFACES=""
+
+ if [ "$mode" = "xdpegress" ]; then
+ mode="xdpdrv"
+ fi
+
+ for i in $(seq $NUM); do
+ ip netns add ns$i
+ ip link add veth$i type veth peer name veth0 netns ns$i
+ ip link set veth$i up
+ ip -n ns$i link set veth0 up
+
+ ip -n ns$i addr add 192.0.2.$i/24 dev veth0
+ ip -n ns$i addr add 2001:db8::$i/64 dev veth0
+ # Add a neigh entry for IPv4 ping test
+ ip -n ns$i neigh add 192.0.2.253 lladdr 00:00:00:00:00:01 dev veth0
+ ip -n ns$i link set veth0 $mode obj \
+ xdp_dummy.o sec xdp_dummy &> /dev/null || \
+ { test_fail "Unable to load dummy xdp" && exit 1; }
+ IFACES="$IFACES veth$i"
+ veth_mac[$i]=$(ip link show veth$i | awk '/link\/ether/ {print $2}')
+ done
+}
+
+do_egress_tests()
+{
+ local mode=$1
+
+ # mac test
+ ip netns exec ns2 tcpdump -e -i veth0 -nn -l -e &> mac_ns1-2_${mode}.log &
+ ip netns exec ns3 tcpdump -e -i veth0 -nn -l -e &> mac_ns1-3_${mode}.log &
+ sleep 0.5
+ ip netns exec ns1 ping 192.0.2.254 -i 0.1 -c 4 &> /dev/null
+ sleep 0.5
+ pkill -9 tcpdump
+
+ # mac check
+ grep -q "${veth_mac[2]} > ff:ff:ff:ff:ff:ff" mac_ns1-2_${mode}.log && \
+ test_pass "$mode mac ns1-2" || test_fail "$mode mac ns1-2"
+ grep -q "${veth_mac[3]} > ff:ff:ff:ff:ff:ff" mac_ns1-3_${mode}.log && \
+ test_pass "$mode mac ns1-3" || test_fail "$mode mac ns1-3"
+}
+
+do_ping_tests()
+{
+ local mode=$1
+
+ # ping6 test: echo request should be redirect back to itself, not others
+ ip netns exec ns1 ip neigh add 2001:db8::2 dev veth0 lladdr 00:00:00:00:00:02
+
+ ip netns exec ns1 tcpdump -i veth0 -nn -l -e &> ns1-1_${mode}.log &
+ ip netns exec ns2 tcpdump -i veth0 -nn -l -e &> ns1-2_${mode}.log &
+ ip netns exec ns3 tcpdump -i veth0 -nn -l -e &> ns1-3_${mode}.log &
+ sleep 0.5
+ # ARP test
+ ip netns exec ns1 ping 192.0.2.254 -i 0.1 -c 4 &> /dev/null
+ # IPv4 test
+ ip netns exec ns1 ping 192.0.2.253 -i 0.1 -c 4 &> /dev/null
+ # IPv6 test
+ ip netns exec ns1 ping6 2001:db8::2 -i 0.1 -c 2 &> /dev/null
+ sleep 0.5
+ pkill -9 tcpdump
+
+ # All netns should receive the redirect arp requests
+ [ $(grep -c "who-has 192.0.2.254" ns1-1_${mode}.log) -gt 4 ] && \
+ test_pass "$mode arp(F_BROADCAST) ns1-1" || \
+ test_fail "$mode arp(F_BROADCAST) ns1-1"
+ [ $(grep -c "who-has 192.0.2.254" ns1-2_${mode}.log) -le 4 ] && \
+ test_pass "$mode arp(F_BROADCAST) ns1-2" || \
+ test_fail "$mode arp(F_BROADCAST) ns1-2"
+ [ $(grep -c "who-has 192.0.2.254" ns1-3_${mode}.log) -le 4 ] && \
+ test_pass "$mode arp(F_BROADCAST) ns1-3" || \
+ test_fail "$mode arp(F_BROADCAST) ns1-3"
+
+ # ns1 should not receive the redirect echo request, others should
+ [ $(grep -c "ICMP echo request" ns1-1_${mode}.log) -eq 4 ] && \
+ test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-1" || \
+ test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-1"
+ [ $(grep -c "ICMP echo request" ns1-2_${mode}.log) -eq 4 ] && \
+ test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-2" || \
+ test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-2"
+ [ $(grep -c "ICMP echo request" ns1-3_${mode}.log) -eq 4 ] && \
+ test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-3" || \
+ test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-3"
+
+ # ns1 should receive the echo request, ns2 should not
+ [ $(grep -c "ICMP6, echo request" ns1-1_${mode}.log) -eq 4 ] && \
+ test_pass "$mode IPv6 (no flags) ns1-1" || \
+ test_fail "$mode IPv6 (no flags) ns1-1"
+ [ $(grep -c "ICMP6, echo request" ns1-2_${mode}.log) -eq 0 ] && \
+ test_pass "$mode IPv6 (no flags) ns1-2" || \
+ test_fail "$mode IPv6 (no flags) ns1-2"
+}
+
+do_tests()
+{
+ local mode=$1
+ local drv_p
+
+ case ${mode} in
+ xdpdrv) drv_p="-N";;
+ xdpegress) drv_p="-X";;
+ xdpgeneric) drv_p="-S";;
+ esac
+
+ ./xdp_redirect_multi $drv_p $IFACES &> xdp_redirect_${mode}.log &
+ xdp_pid=$!
+ sleep 1
+
+ if [ "$mode" = "xdpegress" ]; then
+ do_egress_tests $mode
+ else
+ do_ping_tests $mode
+ fi
+
+ kill $xdp_pid
+}
+
+trap clean_up 0 2 3 6 9
+
+check_env
+rm -f xdp_redirect_*.log ns*.log mac_ns*.log
+
+for mode in ${DRV_MODE}; do
+ setup_ns $mode
+ do_tests $mode
+ clean_up
+done
+
+echo "Summary: PASS $PASS, FAIL $FAIL"
+[ $FAIL -eq 0 ] && exit 0 || exit 1
diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
index ca8fdb1b3f01..7d7ebee5cc7a 100644
--- a/tools/testing/selftests/bpf/verifier/and.c
+++ b/tools/testing/selftests/bpf/verifier/and.c
@@ -61,6 +61,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R1 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0
},
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
index 8a1caf46ffbc..e061e8799ce2 100644
--- a/tools/testing/selftests/bpf/verifier/bounds.c
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -508,6 +508,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT
},
{
@@ -528,6 +530,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT
},
{
@@ -569,6 +573,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -589,6 +595,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -609,6 +617,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -674,6 +684,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -695,6 +707,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
index 17fe33a75034..2c8935b3e65d 100644
--- a/tools/testing/selftests/bpf/verifier/dead_code.c
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -8,6 +8,8 @@
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
index bd5cae4a7f73..1c857b2fbdf0 100644
--- a/tools/testing/selftests/bpf/verifier/jmp32.c
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -87,6 +87,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -150,6 +152,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -213,6 +217,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -280,6 +286,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -348,6 +356,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -416,6 +426,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -484,6 +496,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -552,6 +566,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -620,6 +636,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -688,6 +706,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -756,6 +776,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
index 8dcd4e0383d5..11fc68da735e 100644
--- a/tools/testing/selftests/bpf/verifier/jset.c
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -82,8 +82,8 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .retval_unpriv = 1,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.retval = 1,
.result = ACCEPT,
},
@@ -141,7 +141,8 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -162,6 +163,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c
index 07eaa04412ae..8ab94d65f3d5 100644
--- a/tools/testing/selftests/bpf/verifier/stack_ptr.c
+++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c
@@ -295,8 +295,6 @@
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid write to stack R1 off=0 size=1",
.result = ACCEPT,
.retval = 42,
},
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
index bd436df5cc32..111801aea5e3 100644
--- a/tools/testing/selftests/bpf/verifier/unpriv.c
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -420,6 +420,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R7 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0,
},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index e5913fd3b903..a3e593ddfafc 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -120,7 +120,7 @@
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+ .errstr_unpriv = "R2 pointer comparison prohibited",
.retval = 0,
},
{
@@ -159,7 +159,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
// fake-dead code; targeted from branch A to
- // prevent dead code sanitization
+ // prevent dead code sanitization, rejected
+ // via branch B however
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -167,7 +168,7 @@
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
.retval = 0,
},
{
@@ -300,8 +301,6 @@
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
@@ -371,8 +370,6 @@
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
@@ -472,8 +469,6 @@
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
@@ -766,8 +761,6 @@
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c
new file mode 100644
index 000000000000..3696a8f32c23
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_redirect_multi.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <net/if.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <sys/resource.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include "bpf_util.h"
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#define MAX_IFACE_NUM 32
+#define MAX_INDEX_NUM 1024
+
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static int ifaces[MAX_IFACE_NUM] = {};
+
+static void int_exit(int sig)
+{
+ __u32 prog_id = 0;
+ int i;
+
+ for (i = 0; ifaces[i] > 0; i++) {
+ if (bpf_get_link_xdp_id(ifaces[i], &prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (prog_id)
+ bpf_set_link_xdp_fd(ifaces[i], -1, xdp_flags);
+ }
+
+ exit(0);
+}
+
+static int get_mac_addr(unsigned int ifindex, void *mac_addr)
+{
+ char ifname[IF_NAMESIZE];
+ struct ifreq ifr;
+ int fd, ret = -1;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return ret;
+
+ if (!if_indextoname(ifindex, ifname))
+ goto err_out;
+
+ strcpy(ifr.ifr_name, ifname);
+
+ if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0)
+ goto err_out;
+
+ memcpy(mac_addr, ifr.ifr_hwaddr.sa_data, 6 * sizeof(char));
+ ret = 0;
+
+err_out:
+ close(fd);
+ return ret;
+}
+
+static void usage(const char *prog)
+{
+ fprintf(stderr,
+ "usage: %s [OPTS] <IFNAME|IFINDEX> <IFNAME|IFINDEX> ...\n"
+ "OPTS:\n"
+ " -S use skb-mode\n"
+ " -N enforce native mode\n"
+ " -F force loading prog\n"
+ " -X load xdp program on egress\n",
+ prog);
+}
+
+int main(int argc, char **argv)
+{
+ int prog_fd, group_all, mac_map;
+ struct bpf_program *ingress_prog, *egress_prog;
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_UNSPEC,
+ };
+ int i, ret, opt, egress_prog_fd = 0;
+ struct bpf_devmap_val devmap_val;
+ bool attach_egress_prog = false;
+ unsigned char mac_addr[6];
+ char ifname[IF_NAMESIZE];
+ struct bpf_object *obj;
+ unsigned int ifindex;
+ char filename[256];
+
+ while ((opt = getopt(argc, argv, "SNFX")) != -1) {
+ switch (opt) {
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case 'N':
+ /* default, set below */
+ break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ case 'X':
+ attach_egress_prog = true;
+ break;
+ default:
+ usage(basename(argv[0]));
+ return 1;
+ }
+ }
+
+ if (!(xdp_flags & XDP_FLAGS_SKB_MODE)) {
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+ } else if (attach_egress_prog) {
+ printf("Load xdp program on egress with SKB mode not supported yet\n");
+ goto err_out;
+ }
+
+ if (optind == argc) {
+ printf("usage: %s <IFNAME|IFINDEX> <IFNAME|IFINDEX> ...\n", argv[0]);
+ goto err_out;
+ }
+
+ printf("Get interfaces");
+ for (i = 0; i < MAX_IFACE_NUM && argv[optind + i]; i++) {
+ ifaces[i] = if_nametoindex(argv[optind + i]);
+ if (!ifaces[i])
+ ifaces[i] = strtoul(argv[optind + i], NULL, 0);
+ if (!if_indextoname(ifaces[i], ifname)) {
+ perror("Invalid interface name or i");
+ goto err_out;
+ }
+ if (ifaces[i] > MAX_INDEX_NUM) {
+ printf("Interface index to large\n");
+ goto err_out;
+ }
+ printf(" %d", ifaces[i]);
+ }
+ printf("\n");
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = filename;
+
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+ goto err_out;
+
+ if (attach_egress_prog)
+ group_all = bpf_object__find_map_fd_by_name(obj, "map_egress");
+ else
+ group_all = bpf_object__find_map_fd_by_name(obj, "map_all");
+ mac_map = bpf_object__find_map_fd_by_name(obj, "mac_map");
+
+ if (group_all < 0 || mac_map < 0) {
+ printf("bpf_object__find_map_fd_by_name failed\n");
+ goto err_out;
+ }
+
+ if (attach_egress_prog) {
+ /* Find ingress/egress prog for 2nd xdp prog */
+ ingress_prog = bpf_object__find_program_by_name(obj, "xdp_redirect_map_all_prog");
+ egress_prog = bpf_object__find_program_by_name(obj, "xdp_devmap_prog");
+ if (!ingress_prog || !egress_prog) {
+ printf("finding ingress/egress_prog in obj file failed\n");
+ goto err_out;
+ }
+ prog_fd = bpf_program__fd(ingress_prog);
+ egress_prog_fd = bpf_program__fd(egress_prog);
+ if (prog_fd < 0 || egress_prog_fd < 0) {
+ printf("find egress_prog fd failed\n");
+ goto err_out;
+ }
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ /* Init forward multicast groups and exclude group */
+ for (i = 0; ifaces[i] > 0; i++) {
+ ifindex = ifaces[i];
+
+ if (attach_egress_prog) {
+ ret = get_mac_addr(ifindex, mac_addr);
+ if (ret < 0) {
+ printf("get interface %d mac failed\n", ifindex);
+ goto err_out;
+ }
+ ret = bpf_map_update_elem(mac_map, &ifindex, mac_addr, 0);
+ if (ret) {
+ perror("bpf_update_elem mac_map failed\n");
+ goto err_out;
+ }
+ }
+
+ /* Add all the interfaces to group all */
+ devmap_val.ifindex = ifindex;
+ devmap_val.bpf_prog.fd = egress_prog_fd;
+ ret = bpf_map_update_elem(group_all, &ifindex, &devmap_val, 0);
+ if (ret) {
+ perror("bpf_map_update_elem");
+ goto err_out;
+ }
+
+ /* bind prog_fd to each interface */
+ ret = bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags);
+ if (ret) {
+ printf("Set xdp fd failed on %d\n", ifindex);
+ goto err_out;
+ }
+ }
+
+ /* sleep some time for testing */
+ sleep(999);
+
+ return 0;
+
+err_out:
+ return 1;
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
index 4029833f7e27..160891dcb4bc 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
@@ -109,6 +109,9 @@ router_destroy()
__addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
tc qdisc del dev $rp2 clsact
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
}
setup_prepare()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
index 42d44e27802c..190c1b6b5365 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
@@ -111,6 +111,9 @@ router_destroy()
__addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
tc qdisc del dev $rp2 clsact
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
}
setup_prepare()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
index 65f43a7ce9c9..1e9a4aff76a2 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
@@ -7,6 +7,8 @@
PORT_NUM_NETIFS=0
+declare -a unsplit
+
port_setup_prepare()
{
:
@@ -20,12 +22,12 @@ port_cleanup()
devlink port unsplit $port
check_err $? "Did not unsplit $netdev"
done
+ unsplit=()
}
split_all_ports()
{
local should_fail=$1; shift
- local -a unsplit
# Loop over the splittable netdevs and create tuples of netdev along
# with its width. For example:
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
index 5cbff8038f84..28a570006d4d 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -93,7 +93,9 @@ switch_destroy()
lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null
lldpad_app_wait_del
+ ip link set dev $swp2 down
ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
ip link set dev $swp1 nomaster
ip link del dev br1
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
index 27de3d9ed08e..f4493ef9cca1 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
@@ -29,37 +29,38 @@ cleanup()
get_prio_pg()
{
- __mlnx_qos -i $swp | sed -n '/^PFC/,/^[^[:space:]]/p' |
- grep buffer | sed 's/ \+/ /g' | cut -d' ' -f 2-
+ # Produces a string of numbers "<B0> <B1> ... <B7> ", where BX is number
+ # of buffer that priority X is mapped to.
+ dcb -j buffer show dev $swp |
+ jq -r '[.prio_buffer | .[] | tostring + " "] | add'
}
get_prio_pfc()
{
- __mlnx_qos -i $swp | sed -n '/^PFC/,/^[^[:space:]]/p' |
- grep enabled | sed 's/ \+/ /g' | cut -d' ' -f 2-
+ # Produces a string of numbers "<P0> <P1> ... <P7> ", where PX denotes
+ # whether priority X has PFC enabled (the value is 1) or disabled (0).
+ dcb -j pfc show dev $swp |
+ jq -r '[.prio_pfc | .[] | if . then "1 " else "0 " end] | add'
}
get_prio_tc()
{
- __mlnx_qos -i $swp | sed -n '/^tc/,$p' |
- awk '/^tc/ { TC = $2 }
- /priority:/ { PRIO[$2]=TC }
- END {
- for (i in PRIO)
- printf("%d ", PRIO[i])
- }'
+ # Produces a string of numbers "<T0> <T1> ... <T7> ", where TC is number
+ # of TC that priority X is mapped to.
+ dcb -j ets show dev $swp |
+ jq -r '[.prio_tc | .[] | tostring + " "] | add'
}
get_buf_size()
{
local idx=$1; shift
- __mlnx_qos -i $swp | grep Receive | sed 's/.*: //' | cut -d, -f $((idx + 1))
+ dcb -j buffer show dev $swp | jq ".buffer_size[$idx]"
}
get_tot_size()
{
- __mlnx_qos -i $swp | grep Receive | sed 's/.*total_size=//'
+ dcb -j buffer show dev $swp | jq '.total_size'
}
check_prio_pg()
@@ -121,18 +122,18 @@ test_dcb_ets()
{
RET=0
- __mlnx_qos -i $swp --prio_tc=0,2,4,6,1,3,5,7 > /dev/null
+ dcb ets set dev $swp prio-tc 0:0 1:2 2:4 3:6 4:1 5:3 6:5 7:7
check_prio_pg "0 2 4 6 1 3 5 7 "
check_prio_tc "0 2 4 6 1 3 5 7 "
check_prio_pfc "0 0 0 0 0 0 0 0 "
- __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+ dcb ets set dev $swp prio-tc all:0
check_prio_pg "0 0 0 0 0 0 0 0 "
check_prio_tc "0 0 0 0 0 0 0 0 "
- __mlnx_qos -i $swp --prio2buffer=1,3,5,7,0,2,4,6 &> /dev/null
+ dcb buffer set dev $swp prio-buffer 0:1 1:3 2:5 3:7 4:0 5:2 6:4 7:6 2>/dev/null
check_fail $? "prio2buffer accepted in DCB mode"
log_test "Configuring headroom through ETS"
@@ -174,7 +175,7 @@ test_pfc()
{
RET=0
- __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,1,2,3 > /dev/null
+ dcb ets set dev $swp prio-tc all:0 5:1 6:2 7:3
local buf0size=$(get_buf_size 0)
local buf1size=$(get_buf_size 1)
@@ -193,7 +194,7 @@ test_pfc()
RET=0
- __mlnx_qos -i $swp --pfc=0,0,0,0,0,1,1,1 --cable_len=0 > /dev/null
+ dcb pfc set dev $swp prio-pfc all:off 5:on 6:on 7:on delay 0
check_prio_pg "0 0 0 0 0 1 2 3 "
check_prio_pfc "0 0 0 0 0 1 1 1 "
@@ -210,7 +211,7 @@ test_pfc()
RET=0
- __mlnx_qos -i $swp --pfc=0,0,0,0,0,1,1,1 --cable_len=1000 > /dev/null
+ dcb pfc set dev $swp delay 1000
check_buf_size 0 "== $buf0size"
check_buf_size 1 "> $buf1size"
@@ -221,8 +222,8 @@ test_pfc()
RET=0
- __mlnx_qos -i $swp --pfc=0,0,0,0,0,0,0,0 --cable_len=0 > /dev/null
- __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+ dcb pfc set dev $swp prio-pfc all:off delay 0
+ dcb ets set dev $swp prio-tc all:0
check_prio_pg "0 0 0 0 0 0 0 0 "
check_prio_tc "0 0 0 0 0 0 0 0 "
@@ -242,13 +243,13 @@ test_tc_priomap()
{
RET=0
- __mlnx_qos -i $swp --prio_tc=0,1,2,3,4,5,6,7 > /dev/null
+ dcb ets set dev $swp prio-tc 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
check_prio_pg "0 1 2 3 4 5 6 7 "
tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
check_prio_pg "0 0 0 0 0 0 0 0 "
- __mlnx_qos -i $swp --prio2buffer=1,3,5,7,0,2,4,6 > /dev/null
+ dcb buffer set dev $swp prio-buffer 0:1 1:3 2:5 3:7 4:0 5:2 6:4 7:6
check_prio_pg "1 3 5 7 0 2 4 6 "
tc qdisc delete dev $swp root
@@ -256,9 +257,9 @@ test_tc_priomap()
# Clean up.
tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
- __mlnx_qos -i $swp --prio2buffer=0,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp prio-buffer all:0
tc qdisc delete dev $swp root
- __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+ dcb ets set dev $swp prio-tc all:0
log_test "TC: priomap"
}
@@ -270,12 +271,12 @@ test_tc_sizes()
RET=0
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 &> /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size 2>/dev/null
check_fail $? "buffer_size should fail before qdisc is added"
tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size
check_err $? "buffer_size should pass after qdisc is added"
check_buf_size 0 "== $size" "set size: "
@@ -283,26 +284,26 @@ test_tc_sizes()
check_buf_size 0 "== $size" "set MTU: "
mtu_restore $swp
- __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0
# After replacing the qdisc for the same kind, buffer_size still has to
# work.
tc qdisc replace dev $swp root handle 1: bfifo limit 1M
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size
check_buf_size 0 "== $size" "post replace, set size: "
- __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0
# Likewise after replacing for a different kind.
tc qdisc replace dev $swp root handle 2: prio bands 8
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size
check_buf_size 0 "== $size" "post replace different kind, set size: "
tc qdisc delete dev $swp root
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 &> /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size 2>/dev/null
check_fail $? "buffer_size should fail after qdisc is deleted"
log_test "TC: buffer size"
@@ -363,10 +364,10 @@ test_tc_int_buf()
tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
test_int_buf "TC: "
- __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0 0:$size
test_int_buf "TC+buffsize: "
- __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+ dcb buffer set dev $swp buffer-size all:0
tc qdisc delete dev $swp root
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
index 0bf76f13c030..faa51012cdac 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
@@ -82,17 +82,3 @@ bail_on_lldpad()
fi
fi
}
-
-__mlnx_qos()
-{
- local err
-
- mlnx_qos "$@" 2>/dev/null
- err=$?
-
- if ((err)); then
- echo "Error ($err) in mlnx_qos $@" >/dev/stderr
- fi
-
- return $err
-}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
index 5c7700212f75..5d5622fc2758 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
@@ -171,7 +171,7 @@ switch_create()
# assignment.
tc qdisc replace dev $swp1 root handle 1: \
ets bands 8 strict 8 priomap 7 6
- __mlnx_qos -i $swp1 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp1 prio-buffer all:0 1:1
# $swp2
# -----
@@ -209,8 +209,8 @@ switch_create()
# the lossless prio into a buffer of its own. Don't bother with buffer
# sizes though, there is not going to be any pressure in the "backward"
# direction.
- __mlnx_qos -i $swp3 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp3 --pfc=0,1,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp3 prio-buffer all:0 1:1
+ dcb pfc set dev $swp3 prio-pfc all:off 1:on
# $swp4
# -----
@@ -226,11 +226,11 @@ switch_create()
# Configure qdisc so that we can hand-tune headroom.
tc qdisc replace dev $swp4 root handle 1: \
ets bands 8 strict 8 priomap 7 6
- __mlnx_qos -i $swp4 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp4 --pfc=0,1,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp4 prio-buffer all:0 1:1
+ dcb pfc set dev $swp4 prio-pfc all:off 1:on
# PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
# is (-2*MTU) about 80K of delay provision.
- __mlnx_qos -i $swp4 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp4 buffer-size all:0 1:$_100KB
# bridges
# -------
@@ -273,9 +273,9 @@ switch_destroy()
# $swp4
# -----
- __mlnx_qos -i $swp4 --buffer_size=0,0,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp4 --pfc=0,0,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp4 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp4 buffer-size all:0
+ dcb pfc set dev $swp4 prio-pfc all:off
+ dcb buffer set dev $swp4 prio-buffer all:0
tc qdisc del dev $swp4 root
devlink_tc_bind_pool_th_restore $swp4 1 ingress
@@ -288,8 +288,8 @@ switch_destroy()
# $swp3
# -----
- __mlnx_qos -i $swp3 --pfc=0,0,0,0,0,0,0,0 >/dev/null
- __mlnx_qos -i $swp3 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ dcb pfc set dev $swp3 prio-pfc all:off
+ dcb buffer set dev $swp3 prio-buffer all:0
tc qdisc del dev $swp3 root
devlink_tc_bind_pool_th_restore $swp3 1 egress
@@ -315,7 +315,7 @@ switch_destroy()
# $swp1
# -----
- __mlnx_qos -i $swp1 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ dcb buffer set dev $swp1 prio-buffer all:0
tc qdisc del dev $swp1 root
devlink_tc_bind_pool_th_restore $swp1 1 ingress
diff --git a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
index e93878d42596..683759d29199 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
@@ -68,7 +68,7 @@ wait_for_routes()
local t0=$1; shift
local route_count=$1; shift
- local t1=$(ip route | grep -o 'offload' | wc -l)
+ local t1=$(ip route | grep 'offload' | grep -v 'offload_failed' | wc -l)
local delta=$((t1 - t0))
echo $delta
[[ $delta -ge $route_count ]]
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
index 093bed088ad0..373d5f2a846e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
@@ -234,15 +234,15 @@ __tc_sample_rate_test()
psample_capture_start
- ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ ip vrf exec v$h1 $MZ $h1 -c 320000 -d 100usec -p 64 -A 192.0.2.1 \
-B $dip -t udp dp=52768,sp=42768 -q
psample_capture_stop
pkts=$(grep -e "group 1 " $CAPTURE_FILE | wc -l)
- pct=$((100 * (pkts - 100) / 100))
+ pct=$((100 * (pkts - 10000) / 10000))
(( -25 <= pct && pct <= 25))
- check_err $? "Expected 100 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
+ check_err $? "Expected 10000 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
log_test "tc sample rate ($desc)"
@@ -587,15 +587,15 @@ __tc_sample_acl_rate_test()
psample_capture_start
- ip vrf exec v$h1 $MZ $h1 -c 3200 -d 1msec -p 64 -A 192.0.2.1 \
+ ip vrf exec v$h1 $MZ $h1 -c 320000 -d 100usec -p 64 -A 192.0.2.1 \
-B 198.51.100.1 -t udp dp=52768,sp=42768 -q
psample_capture_stop
pkts=$(grep -e "group 1 " $CAPTURE_FILE | wc -l)
- pct=$((100 * (pkts - 100) / 100))
+ pct=$((100 * (pkts - 10000) / 10000))
(( -25 <= pct && pct <= 25))
- check_err $? "Expected 100 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
+ check_err $? "Expected 10000 packets, got $pkts packets, which is $pct% off. Required accuracy is +-25%"
# Setup a filter that should not match any packet and make sure packets
# are not sampled.
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index 40909c254365..9de1d123f4f5 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -5,12 +5,13 @@ lib_dir=$(dirname $0)/../../../net/forwarding
ALL_TESTS="fw_flash_test params_test regions_test reload_test \
netns_reload_test resource_test dev_info_test \
- empty_reporter_test dummy_reporter_test"
+ empty_reporter_test dummy_reporter_test rate_test"
NUM_NETIFS=0
source $lib_dir/lib.sh
BUS_ADDR=10
PORT_COUNT=4
+VF_COUNT=4
DEV_NAME=netdevsim$BUS_ADDR
SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV_NAME/net/
DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV_NAME/
@@ -507,6 +508,170 @@ dummy_reporter_test()
log_test "dummy reporter test"
}
+rate_leafs_get()
+{
+ local handle=$1
+
+ cmd_jq "devlink port function rate show -j" \
+ '.[] | to_entries | .[] | select(.value.type == "leaf") | .key | select(contains("'$handle'"))'
+}
+
+rate_nodes_get()
+{
+ local handle=$1
+
+ cmd_jq "devlink port function rate show -j" \
+ '.[] | to_entries | .[] | select(.value.type == "node") | .key | select(contains("'$handle'"))'
+}
+
+rate_attr_set()
+{
+ local handle=$1
+ local name=$2
+ local value=$3
+ local units=$4
+
+ devlink port function rate set $handle $name $value$units
+}
+
+rate_attr_get()
+{
+ local handle=$1
+ local name=$2
+
+ cmd_jq "devlink port function rate show $handle -j" '.[][].'$name
+}
+
+rate_attr_tx_rate_check()
+{
+ local handle=$1
+ local name=$2
+ local rate=$3
+ local debug_file=$4
+
+ rate_attr_set $handle $name $rate mbit
+ check_err $? "Failed to set $name value"
+
+ local debug_value=$(cat $debug_file)
+ check_err $? "Failed to read $name value from debugfs"
+ [ "$debug_value" == "$rate" ]
+ check_err $? "Unexpected $name debug value $debug_value != $rate"
+
+ local api_value=$(( $(rate_attr_get $handle $name) * 8 / 1000000 ))
+ check_err $? "Failed to get $name attr value"
+ [ "$api_value" == "$rate" ]
+ check_err $? "Unexpected $name attr value $api_value != $rate"
+}
+
+rate_attr_parent_check()
+{
+ local handle=$1
+ local parent=$2
+ local debug_file=$3
+
+ rate_attr_set $handle parent $parent
+ check_err $? "Failed to set parent"
+
+ debug_value=$(cat $debug_file)
+ check_err $? "Failed to get parent debugfs value"
+ [ "$debug_value" == "$parent" ]
+ check_err $? "Unexpected parent debug value $debug_value != $parent"
+
+ api_value=$(rate_attr_get $r_obj parent)
+ check_err $? "Failed to get parent attr value"
+ [ "$api_value" == "$parent" ]
+ check_err $? "Unexpected parent attr value $api_value != $parent"
+}
+
+rate_node_add()
+{
+ local handle=$1
+
+ devlink port function rate add $handle
+}
+
+rate_node_del()
+{
+ local handle=$1
+
+ devlink port function rate del $handle
+}
+
+rate_test()
+{
+ RET=0
+
+ echo $VF_COUNT > /sys/bus/netdevsim/devices/$DEV_NAME/sriov_numvfs
+ devlink dev eswitch set $DL_HANDLE mode switchdev
+ local leafs=`rate_leafs_get $DL_HANDLE`
+ local num_leafs=`echo $leafs | wc -w`
+ [ "$num_leafs" == "$VF_COUNT" ]
+ check_err $? "Expected $VF_COUNT rate leafs but got $num_leafs"
+
+ rate=10
+ for r_obj in $leafs
+ do
+ rate_attr_tx_rate_check $r_obj tx_share $rate \
+ $DEBUGFS_DIR/ports/${r_obj##*/}/tx_share
+ rate=$(($rate+10))
+ done
+
+ rate=100
+ for r_obj in $leafs
+ do
+ rate_attr_tx_rate_check $r_obj tx_max $rate \
+ $DEBUGFS_DIR/ports/${r_obj##*/}/tx_max
+ rate=$(($rate+100))
+ done
+
+ local node1_name='group1'
+ local node1="$DL_HANDLE/$node1_name"
+ rate_node_add "$node1"
+ check_err $? "Failed to add node $node1"
+
+ local num_nodes=`rate_nodes_get $DL_HANDLE | wc -w`
+ [ $num_nodes == 1 ]
+ check_err $? "Expected 1 rate node in output but got $num_nodes"
+
+ local node_tx_share=10
+ rate_attr_tx_rate_check $node1 tx_share $node_tx_share \
+ $DEBUGFS_DIR/rate_nodes/${node1##*/}/tx_share
+
+ local node_tx_max=100
+ rate_attr_tx_rate_check $node1 tx_max $node_tx_max \
+ $DEBUGFS_DIR/rate_nodes/${node1##*/}/tx_max
+
+ rate_node_del "$node1"
+ check_err $? "Failed to delete node $node1"
+ local num_nodes=`rate_nodes_get $DL_HANDLE | wc -w`
+ [ $num_nodes == 0 ]
+ check_err $? "Expected 0 rate node but got $num_nodes"
+
+ local node1_name='group1'
+ local node1="$DL_HANDLE/$node1_name"
+ rate_node_add "$node1"
+ check_err $? "Failed to add node $node1"
+
+ rate_attr_parent_check $r_obj $node1_name \
+ $DEBUGFS_DIR/ports/${r_obj##*/}/rate_parent
+
+ local node2_name='group2'
+ local node2="$DL_HANDLE/$node2_name"
+ rate_node_add "$node2"
+ check_err $? "Failed to add node $node2"
+
+ rate_attr_parent_check $node2 $node1_name \
+ $DEBUGFS_DIR/rate_nodes/$node2_name/rate_parent
+ rate_node_del "$node2"
+ check_err $? "Failed to delete node $node2"
+ rate_attr_set "$r_obj" noparent
+ check_err $? "Failed to unset $r_obj parent node"
+ rate_node_del "$node1"
+ check_err $? "Failed to delete node $node1"
+
+ log_test "rate test"
+}
+
setup_prepare()
{
modprobe netdevsim
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh
index da49ad2761b5..109900c817be 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh
@@ -24,13 +24,15 @@ ALL_TESTS="
NETDEVSIM_PATH=/sys/bus/netdevsim/
DEV_ADDR=1337
DEV=netdevsim${DEV_ADDR}
-DEVLINK_DEV=netdevsim/${DEV}
DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV/
SLEEP_TIME=1
NETDEV=""
NUM_NETIFS=0
source $lib_dir/lib.sh
+
+DEVLINK_DEV=
source $lib_dir/devlink_lib.sh
+DEVLINK_DEV=netdevsim/${DEV}
require_command udevadm
@@ -163,6 +165,16 @@ trap_stats_test()
devlink_trap_action_set $trap_name "drop"
devlink_trap_stats_idle_test $trap_name
check_err $? "Stats of trap $trap_name not idle when action is drop"
+
+ echo "y"> $DEBUGFS_DIR/fail_trap_drop_counter_get
+ devlink -s trap show $DEVLINK_DEV trap $trap_name &> /dev/null
+ check_fail $? "Managed to read trap (hard dropped) statistics when should not"
+ echo "n"> $DEBUGFS_DIR/fail_trap_drop_counter_get
+ devlink -s trap show $DEVLINK_DEV trap $trap_name &> /dev/null
+ check_err $? "Did not manage to read trap (hard dropped) statistics when should"
+
+ devlink_trap_drop_stats_idle_test $trap_name
+ check_fail $? "Drop stats of trap $trap_name idle when should not"
else
devlink_trap_stats_idle_test $trap_name
check_fail $? "Stats of non-drop trap $trap_name idle when should not"
diff --git a/tools/testing/selftests/drivers/net/netdevsim/fib.sh b/tools/testing/selftests/drivers/net/netdevsim/fib.sh
index 251f228ce63e..fc794cd30389 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/fib.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/fib.sh
@@ -33,13 +33,15 @@ ALL_TESTS="
NETDEVSIM_PATH=/sys/bus/netdevsim/
DEV_ADDR=1337
DEV=netdevsim${DEV_ADDR}
-DEVLINK_DEV=netdevsim/${DEV}
SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/
NUM_NETIFS=0
source $lib_dir/lib.sh
-source $lib_dir/devlink_lib.sh
source $lib_dir/fib_offload_lib.sh
+DEVLINK_DEV=
+source $lib_dir/devlink_lib.sh
+DEVLINK_DEV=netdevsim/${DEV}
+
ipv4_identical_routes()
{
fib_ipv4_identical_routes_test "testns1"
diff --git a/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh b/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
index ba75c81cda91..e8e0dc088d6a 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
@@ -44,12 +44,14 @@ ALL_TESTS="
NETDEVSIM_PATH=/sys/bus/netdevsim/
DEV_ADDR=1337
DEV=netdevsim${DEV_ADDR}
-DEVLINK_DEV=netdevsim/${DEV}
SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/
DEBUGFS_NET_DIR=/sys/kernel/debug/netdevsim/$DEV/
NUM_NETIFS=0
source $lib_dir/lib.sh
+
+DEVLINK_DEV=
source $lib_dir/devlink_lib.sh
+DEVLINK_DEV=netdevsim/${DEV}
nexthop_check()
{
diff --git a/tools/testing/selftests/drivers/net/netdevsim/psample.sh b/tools/testing/selftests/drivers/net/netdevsim/psample.sh
index ee10b1a8933c..e689ff7a0b12 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/psample.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/psample.sh
@@ -14,13 +14,15 @@ ALL_TESTS="
NETDEVSIM_PATH=/sys/bus/netdevsim/
DEV_ADDR=1337
DEV=netdevsim${DEV_ADDR}
-DEVLINK_DEV=netdevsim/${DEV}
SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/
PSAMPLE_DIR=/sys/kernel/debug/netdevsim/$DEV/psample/
CAPTURE_FILE=$(mktemp)
NUM_NETIFS=0
source $lib_dir/lib.sh
+
+DEVLINK_DEV=
source $lib_dir/devlink_lib.sh
+DEVLINK_DEV=netdevsim/${DEV}
# Available at https://github.com/Mellanox/libpsample
require_command psample
diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
index 0efcd494daab..0e78b49d0f2f 100644
--- a/tools/testing/selftests/futex/functional/.gitignore
+++ b/tools/testing/selftests/futex/functional/.gitignore
@@ -6,3 +6,5 @@ futex_wait_private_mapped_file
futex_wait_timeout
futex_wait_uninitialized_heap
futex_wait_wouldblock
+futex_wait
+futex_requeue
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 23207829ec75..bd1fec59e010 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-INCLUDES := -I../include -I../../
+INCLUDES := -I../include -I../../ -I../../../../../usr/include/ \
+ -I$(KBUILD_OUTPUT)/kselftest/usr/include
CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
LDLIBS := -lpthread -lrt
@@ -14,7 +15,9 @@ TEST_GEN_FILES := \
futex_requeue_pi_signal_restart \
futex_requeue_pi_mismatched_ops \
futex_wait_uninitialized_heap \
- futex_wait_private_mapped_file
+ futex_wait_private_mapped_file \
+ futex_wait \
+ futex_requeue
TEST_PROGS := run.sh
diff --git a/tools/testing/selftests/futex/functional/futex_requeue.c b/tools/testing/selftests/futex/functional/futex_requeue.c
new file mode 100644
index 000000000000..51485be6eb2f
--- /dev/null
+++ b/tools/testing/selftests/futex/functional/futex_requeue.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright Collabora Ltd., 2021
+ *
+ * futex cmp requeue test by André Almeida <andrealmeid@collabora.com>
+ */
+
+#include <pthread.h>
+#include <limits.h>
+#include "logging.h"
+#include "futextest.h"
+
+#define TEST_NAME "futex-requeue"
+#define timeout_ns 30000000
+#define WAKE_WAIT_US 10000
+
+volatile futex_t *f1;
+
+void usage(char *prog)
+{
+ printf("Usage: %s\n", prog);
+ printf(" -c Use color\n");
+ printf(" -h Display this help message\n");
+ printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
+ VQUIET, VCRITICAL, VINFO);
+}
+
+void *waiterfn(void *arg)
+{
+ struct timespec to;
+
+ to.tv_sec = 0;
+ to.tv_nsec = timeout_ns;
+
+ if (futex_wait(f1, *f1, &to, 0))
+ printf("waiter failed errno %d\n", errno);
+
+ return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+ pthread_t waiter[10];
+ int res, ret = RET_PASS;
+ int c, i;
+ volatile futex_t _f1 = 0;
+ volatile futex_t f2 = 0;
+
+ f1 = &_f1;
+
+ while ((c = getopt(argc, argv, "cht:v:")) != -1) {
+ switch (c) {
+ case 'c':
+ log_color(1);
+ break;
+ case 'h':
+ usage(basename(argv[0]));
+ exit(0);
+ case 'v':
+ log_verbosity(atoi(optarg));
+ break;
+ default:
+ usage(basename(argv[0]));
+ exit(1);
+ }
+ }
+
+ ksft_print_header();
+ ksft_set_plan(2);
+ ksft_print_msg("%s: Test futex_requeue\n",
+ basename(argv[0]));
+
+ /*
+ * Requeue a waiter from f1 to f2, and wake f2.
+ */
+ if (pthread_create(&waiter[0], NULL, waiterfn, NULL))
+ error("pthread_create failed\n", errno);
+
+ usleep(WAKE_WAIT_US);
+
+ info("Requeuing 1 futex from f1 to f2\n");
+ res = futex_cmp_requeue(f1, 0, &f2, 0, 1, 0);
+ if (res != 1) {
+ ksft_test_result_fail("futex_requeue simple returned: %d %s\n",
+ res ? errno : res,
+ res ? strerror(errno) : "");
+ ret = RET_FAIL;
+ }
+
+
+ info("Waking 1 futex at f2\n");
+ res = futex_wake(&f2, 1, 0);
+ if (res != 1) {
+ ksft_test_result_fail("futex_requeue simple returned: %d %s\n",
+ res ? errno : res,
+ res ? strerror(errno) : "");
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_requeue simple succeeds\n");
+ }
+
+
+ /*
+ * Create 10 waiters at f1. At futex_requeue, wake 3 and requeue 7.
+ * At futex_wake, wake INT_MAX (should be exactly 7).
+ */
+ for (i = 0; i < 10; i++) {
+ if (pthread_create(&waiter[i], NULL, waiterfn, NULL))
+ error("pthread_create failed\n", errno);
+ }
+
+ usleep(WAKE_WAIT_US);
+
+ info("Waking 3 futexes at f1 and requeuing 7 futexes from f1 to f2\n");
+ res = futex_cmp_requeue(f1, 0, &f2, 3, 7, 0);
+ if (res != 10) {
+ ksft_test_result_fail("futex_requeue many returned: %d %s\n",
+ res ? errno : res,
+ res ? strerror(errno) : "");
+ ret = RET_FAIL;
+ }
+
+ info("Waking INT_MAX futexes at f2\n");
+ res = futex_wake(&f2, INT_MAX, 0);
+ if (res != 7) {
+ ksft_test_result_fail("futex_requeue many returned: %d %s\n",
+ res ? errno : res,
+ res ? strerror(errno) : "");
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_requeue many succeeds\n");
+ }
+
+ ksft_print_cnts();
+ return ret;
+}
diff --git a/tools/testing/selftests/futex/functional/futex_wait.c b/tools/testing/selftests/futex/functional/futex_wait.c
new file mode 100644
index 000000000000..685140d9b93d
--- /dev/null
+++ b/tools/testing/selftests/futex/functional/futex_wait.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright Collabora Ltd., 2021
+ *
+ * futex cmp requeue test by André Almeida <andrealmeid@collabora.com>
+ */
+
+#include <pthread.h>
+#include <sys/shm.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include "logging.h"
+#include "futextest.h"
+
+#define TEST_NAME "futex-wait"
+#define timeout_ns 30000000
+#define WAKE_WAIT_US 10000
+#define SHM_PATH "futex_shm_file"
+
+void *futex;
+
+void usage(char *prog)
+{
+ printf("Usage: %s\n", prog);
+ printf(" -c Use color\n");
+ printf(" -h Display this help message\n");
+ printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
+ VQUIET, VCRITICAL, VINFO);
+}
+
+static void *waiterfn(void *arg)
+{
+ struct timespec to;
+ unsigned int flags = 0;
+
+ if (arg)
+ flags = *((unsigned int *) arg);
+
+ to.tv_sec = 0;
+ to.tv_nsec = timeout_ns;
+
+ if (futex_wait(futex, 0, &to, flags))
+ printf("waiter failed errno %d\n", errno);
+
+ return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+ int res, ret = RET_PASS, fd, c, shm_id;
+ u_int32_t f_private = 0, *shared_data;
+ unsigned int flags = FUTEX_PRIVATE_FLAG;
+ pthread_t waiter;
+ void *shm;
+
+ futex = &f_private;
+
+ while ((c = getopt(argc, argv, "cht:v:")) != -1) {
+ switch (c) {
+ case 'c':
+ log_color(1);
+ break;
+ case 'h':
+ usage(basename(argv[0]));
+ exit(0);
+ case 'v':
+ log_verbosity(atoi(optarg));
+ break;
+ default:
+ usage(basename(argv[0]));
+ exit(1);
+ }
+ }
+
+ ksft_print_header();
+ ksft_set_plan(3);
+ ksft_print_msg("%s: Test futex_wait\n", basename(argv[0]));
+
+ /* Testing a private futex */
+ info("Calling private futex_wait on futex: %p\n", futex);
+ if (pthread_create(&waiter, NULL, waiterfn, (void *) &flags))
+ error("pthread_create failed\n", errno);
+
+ usleep(WAKE_WAIT_US);
+
+ info("Calling private futex_wake on futex: %p\n", futex);
+ res = futex_wake(futex, 1, FUTEX_PRIVATE_FLAG);
+ if (res != 1) {
+ ksft_test_result_fail("futex_wake private returned: %d %s\n",
+ errno, strerror(errno));
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_wake private succeeds\n");
+ }
+
+ /* Testing an anon page shared memory */
+ shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
+ if (shm_id < 0) {
+ perror("shmget");
+ exit(1);
+ }
+
+ shared_data = shmat(shm_id, NULL, 0);
+
+ *shared_data = 0;
+ futex = shared_data;
+
+ info("Calling shared (page anon) futex_wait on futex: %p\n", futex);
+ if (pthread_create(&waiter, NULL, waiterfn, NULL))
+ error("pthread_create failed\n", errno);
+
+ usleep(WAKE_WAIT_US);
+
+ info("Calling shared (page anon) futex_wake on futex: %p\n", futex);
+ res = futex_wake(futex, 1, 0);
+ if (res != 1) {
+ ksft_test_result_fail("futex_wake shared (page anon) returned: %d %s\n",
+ errno, strerror(errno));
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_wake shared (page anon) succeeds\n");
+ }
+
+
+ /* Testing a file backed shared memory */
+ fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
+ if (fd < 0) {
+ perror("open");
+ exit(1);
+ }
+
+ if (ftruncate(fd, sizeof(f_private))) {
+ perror("ftruncate");
+ exit(1);
+ }
+
+ shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (shm == MAP_FAILED) {
+ perror("mmap");
+ exit(1);
+ }
+
+ memcpy(shm, &f_private, sizeof(f_private));
+
+ futex = shm;
+
+ info("Calling shared (file backed) futex_wait on futex: %p\n", futex);
+ if (pthread_create(&waiter, NULL, waiterfn, NULL))
+ error("pthread_create failed\n", errno);
+
+ usleep(WAKE_WAIT_US);
+
+ info("Calling shared (file backed) futex_wake on futex: %p\n", futex);
+ res = futex_wake(shm, 1, 0);
+ if (res != 1) {
+ ksft_test_result_fail("futex_wake shared (file backed) returned: %d %s\n",
+ errno, strerror(errno));
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_wake shared (file backed) succeeds\n");
+ }
+
+ /* Freeing resources */
+ shmdt(shared_data);
+ munmap(shm, sizeof(f_private));
+ remove(SHM_PATH);
+ close(fd);
+
+ ksft_print_cnts();
+ return ret;
+}
diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
index ee55e6d389a3..1f8f6daaf1e7 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
@@ -11,21 +11,18 @@
*
* HISTORY
* 2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
+ * 2021-Apr-26: More test cases by André Almeida <andrealmeid@collabora.com>
*
*****************************************************************************/
-#include <errno.h>
-#include <getopt.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
+#include <pthread.h>
#include "futextest.h"
#include "logging.h"
#define TEST_NAME "futex-wait-timeout"
static long timeout_ns = 100000; /* 100us default timeout */
+static futex_t futex_pi;
void usage(char *prog)
{
@@ -37,11 +34,67 @@ void usage(char *prog)
VQUIET, VCRITICAL, VINFO);
}
+/*
+ * Get a PI lock and hold it forever, so the main thread lock_pi will block
+ * and we can test the timeout
+ */
+void *get_pi_lock(void *arg)
+{
+ int ret;
+ volatile futex_t lock = 0;
+
+ ret = futex_lock_pi(&futex_pi, NULL, 0, 0);
+ if (ret != 0)
+ error("futex_lock_pi failed\n", ret);
+
+ /* Blocks forever */
+ ret = futex_wait(&lock, 0, NULL, 0);
+ error("futex_wait failed\n", ret);
+
+ return NULL;
+}
+
+/*
+ * Check if the function returned the expected error
+ */
+static void test_timeout(int res, int *ret, char *test_name, int err)
+{
+ if (!res || errno != err) {
+ ksft_test_result_fail("%s returned %d\n", test_name,
+ res < 0 ? errno : res);
+ *ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("%s succeeds\n", test_name);
+ }
+}
+
+/*
+ * Calculate absolute timeout and correct overflow
+ */
+static int futex_get_abs_timeout(clockid_t clockid, struct timespec *to,
+ long timeout_ns)
+{
+ if (clock_gettime(clockid, to)) {
+ error("clock_gettime failed\n", errno);
+ return errno;
+ }
+
+ to->tv_nsec += timeout_ns;
+
+ if (to->tv_nsec >= 1000000000) {
+ to->tv_sec++;
+ to->tv_nsec -= 1000000000;
+ }
+
+ return 0;
+}
+
int main(int argc, char *argv[])
{
futex_t f1 = FUTEX_INITIALIZER;
- struct timespec to;
int res, ret = RET_PASS;
+ struct timespec to;
+ pthread_t thread;
int c;
while ((c = getopt(argc, argv, "cht:v:")) != -1) {
@@ -65,22 +118,63 @@ int main(int argc, char *argv[])
}
ksft_print_header();
- ksft_set_plan(1);
+ ksft_set_plan(7);
ksft_print_msg("%s: Block on a futex and wait for timeout\n",
basename(argv[0]));
ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
- /* initialize timeout */
+ pthread_create(&thread, NULL, get_pi_lock, NULL);
+
+ /* initialize relative timeout */
to.tv_sec = 0;
to.tv_nsec = timeout_ns;
- info("Calling futex_wait on f1: %u @ %p\n", f1, &f1);
- res = futex_wait(&f1, f1, &to, FUTEX_PRIVATE_FLAG);
- if (!res || errno != ETIMEDOUT) {
- fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
- ret = RET_FAIL;
- }
+ res = futex_wait(&f1, f1, &to, 0);
+ test_timeout(res, &ret, "futex_wait relative", ETIMEDOUT);
+
+ /* FUTEX_WAIT_BITSET with CLOCK_REALTIME */
+ if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_wait_bitset(&f1, f1, &to, 1, FUTEX_CLOCK_REALTIME);
+ test_timeout(res, &ret, "futex_wait_bitset realtime", ETIMEDOUT);
+
+ /* FUTEX_WAIT_BITSET with CLOCK_MONOTONIC */
+ if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_wait_bitset(&f1, f1, &to, 1, 0);
+ test_timeout(res, &ret, "futex_wait_bitset monotonic", ETIMEDOUT);
+
+ /* FUTEX_WAIT_REQUEUE_PI with CLOCK_REALTIME */
+ if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, FUTEX_CLOCK_REALTIME);
+ test_timeout(res, &ret, "futex_wait_requeue_pi realtime", ETIMEDOUT);
+
+ /* FUTEX_WAIT_REQUEUE_PI with CLOCK_MONOTONIC */
+ if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0);
+ test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
+
+ /*
+ * FUTEX_LOCK_PI with CLOCK_REALTIME
+ * Due to historical reasons, FUTEX_LOCK_PI supports only realtime
+ * clock, but requires the caller to not set CLOCK_REALTIME flag.
+ *
+ * If you call FUTEX_LOCK_PI with a monotonic clock, it'll be
+ * interpreted as a realtime clock, and (unless you mess your machine's
+ * time or your time machine) the monotonic clock value is always
+ * smaller than realtime and the syscall will timeout immediately.
+ */
+ if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_lock_pi(&futex_pi, &to, 0, 0);
+ test_timeout(res, &ret, "futex_lock_pi realtime", ETIMEDOUT);
+
+ /* Test operations that don't support FUTEX_CLOCK_REALTIME */
+ res = futex_lock_pi(&futex_pi, NULL, 0, FUTEX_CLOCK_REALTIME);
+ test_timeout(res, &ret, "futex_lock_pi invalid timeout flag", ENOSYS);
- print_result(TEST_NAME, ret);
+ ksft_print_cnts();
return ret;
}
diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
index 1acb6ace1680..11a9d62290f5 100755
--- a/tools/testing/selftests/futex/functional/run.sh
+++ b/tools/testing/selftests/futex/functional/run.sh
@@ -73,3 +73,9 @@ echo
echo
./futex_wait_uninitialized_heap $COLOR
./futex_wait_private_mapped_file $COLOR
+
+echo
+./futex_wait $COLOR
+
+echo
+./futex_requeue $COLOR
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index bd83158e0e0b..06a351b4f93b 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+/aarch64/debug-exceptions
/aarch64/get-reg-list
-/aarch64/get-reg-list-sve
/aarch64/vgic_init
/s390x/memop
/s390x/resets
@@ -8,12 +8,15 @@
/x86_64/cr4_cpuid_sync_test
/x86_64/debug_regs
/x86_64/evmcs_test
+/x86_64/emulator_error_test
/x86_64/get_cpuid_test
/x86_64/get_msr_index_features
/x86_64/kvm_pv_test
/x86_64/hyperv_clock
/x86_64/hyperv_cpuid
+/x86_64/hyperv_features
/x86_64/mmio_warning_test
+/x86_64/mmu_role_test
/x86_64/platform_info_test
/x86_64/set_boot_cpu_id
/x86_64/set_sregs_test
@@ -29,6 +32,7 @@
/x86_64/vmx_preemption_timer_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
+/x86_64/vmx_nested_tsc_scaling_test
/x86_64/xapic_ipi_test
/x86_64/xen_shinfo_test
/x86_64/xen_vmcall_test
@@ -41,5 +45,7 @@
/kvm_create_max_vcpus
/kvm_page_table_test
/memslot_modification_stress_test
+/memslot_perf_test
/set_memory_region_test
/steal_time
+/kvm_binary_stats_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index e439d027939d..b853be2ae3c6 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -33,19 +33,22 @@ ifeq ($(ARCH),s390)
UNAME_M := s390x
endif
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
-LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
-LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
+LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
+LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
+TEST_GEN_PROGS_x86_64 += x86_64/emulator_error_test
TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
+TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
+TEST_GEN_PROGS_x86_64 += x86_64/mmu_role_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
@@ -60,6 +63,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
TEST_GEN_PROGS_x86_64 += x86_64/xapic_ipi_test
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
@@ -74,11 +78,13 @@ TEST_GEN_PROGS_x86_64 += hardware_disable_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
TEST_GEN_PROGS_x86_64 += kvm_page_table_test
TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
+TEST_GEN_PROGS_x86_64 += memslot_perf_test
TEST_GEN_PROGS_x86_64 += set_memory_region_test
TEST_GEN_PROGS_x86_64 += steal_time
+TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
+TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
-TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
@@ -87,6 +93,7 @@ TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time
+TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
TEST_GEN_PROGS_s390x = s390x/memop
TEST_GEN_PROGS_s390x += s390x/resets
@@ -96,6 +103,7 @@ TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
TEST_GEN_PROGS_s390x += kvm_page_table_test
TEST_GEN_PROGS_s390x += set_memory_region_test
+TEST_GEN_PROGS_s390x += kvm_binary_stats_test
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
LIBKVM += $(LIBKVM_$(UNAME_M))
diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
new file mode 100644
index 000000000000..e5e6c92b60da
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#define VCPU_ID 0
+
+#define MDSCR_KDE (1 << 13)
+#define MDSCR_MDE (1 << 15)
+#define MDSCR_SS (1 << 0)
+
+#define DBGBCR_LEN8 (0xff << 5)
+#define DBGBCR_EXEC (0x0 << 3)
+#define DBGBCR_EL1 (0x1 << 1)
+#define DBGBCR_E (0x1 << 0)
+
+#define DBGWCR_LEN8 (0xff << 5)
+#define DBGWCR_RD (0x1 << 3)
+#define DBGWCR_WR (0x2 << 3)
+#define DBGWCR_EL1 (0x1 << 1)
+#define DBGWCR_E (0x1 << 0)
+
+#define SPSR_D (1 << 9)
+#define SPSR_SS (1 << 21)
+
+extern unsigned char sw_bp, hw_bp, bp_svc, bp_brk, hw_wp, ss_start;
+static volatile uint64_t sw_bp_addr, hw_bp_addr;
+static volatile uint64_t wp_addr, wp_data_addr;
+static volatile uint64_t svc_addr;
+static volatile uint64_t ss_addr[4], ss_idx;
+#define PC(v) ((uint64_t)&(v))
+
+static void reset_debug_state(void)
+{
+ asm volatile("msr daifset, #8");
+
+ write_sysreg(osdlr_el1, 0);
+ write_sysreg(oslar_el1, 0);
+ isb();
+
+ write_sysreg(mdscr_el1, 0);
+ /* This test only uses the first bp and wp slot. */
+ write_sysreg(dbgbvr0_el1, 0);
+ write_sysreg(dbgbcr0_el1, 0);
+ write_sysreg(dbgwcr0_el1, 0);
+ write_sysreg(dbgwvr0_el1, 0);
+ isb();
+}
+
+static void install_wp(uint64_t addr)
+{
+ uint32_t wcr;
+ uint32_t mdscr;
+
+ wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
+ write_sysreg(dbgwcr0_el1, wcr);
+ write_sysreg(dbgwvr0_el1, addr);
+ isb();
+
+ asm volatile("msr daifclr, #8");
+
+ mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
+ write_sysreg(mdscr_el1, mdscr);
+ isb();
+}
+
+static void install_hw_bp(uint64_t addr)
+{
+ uint32_t bcr;
+ uint32_t mdscr;
+
+ bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
+ write_sysreg(dbgbcr0_el1, bcr);
+ write_sysreg(dbgbvr0_el1, addr);
+ isb();
+
+ asm volatile("msr daifclr, #8");
+
+ mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
+ write_sysreg(mdscr_el1, mdscr);
+ isb();
+}
+
+static void install_ss(void)
+{
+ uint32_t mdscr;
+
+ asm volatile("msr daifclr, #8");
+
+ mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS;
+ write_sysreg(mdscr_el1, mdscr);
+ isb();
+}
+
+static volatile char write_data;
+
+static void guest_code(void)
+{
+ GUEST_SYNC(0);
+
+ /* Software-breakpoint */
+ asm volatile("sw_bp: brk #0");
+ GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp));
+
+ GUEST_SYNC(1);
+
+ /* Hardware-breakpoint */
+ reset_debug_state();
+ install_hw_bp(PC(hw_bp));
+ asm volatile("hw_bp: nop");
+ GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp));
+
+ GUEST_SYNC(2);
+
+ /* Hardware-breakpoint + svc */
+ reset_debug_state();
+ install_hw_bp(PC(bp_svc));
+ asm volatile("bp_svc: svc #0");
+ GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_svc));
+ GUEST_ASSERT_EQ(svc_addr, PC(bp_svc) + 4);
+
+ GUEST_SYNC(3);
+
+ /* Hardware-breakpoint + software-breakpoint */
+ reset_debug_state();
+ install_hw_bp(PC(bp_brk));
+ asm volatile("bp_brk: brk #0");
+ GUEST_ASSERT_EQ(sw_bp_addr, PC(bp_brk));
+ GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_brk));
+
+ GUEST_SYNC(4);
+
+ /* Watchpoint */
+ reset_debug_state();
+ install_wp(PC(write_data));
+ write_data = 'x';
+ GUEST_ASSERT_EQ(write_data, 'x');
+ GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
+
+ GUEST_SYNC(5);
+
+ /* Single-step */
+ reset_debug_state();
+ install_ss();
+ ss_idx = 0;
+ asm volatile("ss_start:\n"
+ "mrs x0, esr_el1\n"
+ "add x0, x0, #1\n"
+ "msr daifset, #8\n"
+ : : : "x0");
+ GUEST_ASSERT_EQ(ss_addr[0], PC(ss_start));
+ GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4);
+ GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8);
+
+ GUEST_DONE();
+}
+
+static void guest_sw_bp_handler(struct ex_regs *regs)
+{
+ sw_bp_addr = regs->pc;
+ regs->pc += 4;
+}
+
+static void guest_hw_bp_handler(struct ex_regs *regs)
+{
+ hw_bp_addr = regs->pc;
+ regs->pstate |= SPSR_D;
+}
+
+static void guest_wp_handler(struct ex_regs *regs)
+{
+ wp_data_addr = read_sysreg(far_el1);
+ wp_addr = regs->pc;
+ regs->pstate |= SPSR_D;
+}
+
+static void guest_ss_handler(struct ex_regs *regs)
+{
+ GUEST_ASSERT_1(ss_idx < 4, ss_idx);
+ ss_addr[ss_idx++] = regs->pc;
+ regs->pstate |= SPSR_SS;
+}
+
+static void guest_svc_handler(struct ex_regs *regs)
+{
+ svc_addr = regs->pc;
+}
+
+static int debug_version(struct kvm_vm *vm)
+{
+ uint64_t id_aa64dfr0;
+
+ get_reg(vm, VCPU_ID, ARM64_SYS_REG(ID_AA64DFR0_EL1), &id_aa64dfr0);
+ return id_aa64dfr0 & 0xf;
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct ucall uc;
+ int stage;
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ ucall_init(vm, NULL);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+
+ if (debug_version(vm) < 6) {
+ print_skip("Armv8 debug architecture not supported.");
+ kvm_vm_free(vm);
+ exit(KSFT_SKIP);
+ }
+
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
+ ESR_EC_BRK_INS, guest_sw_bp_handler);
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
+ ESR_EC_HW_BP_CURRENT, guest_hw_bp_handler);
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
+ ESR_EC_WP_CURRENT, guest_wp_handler);
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
+ ESR_EC_SSTEP_CURRENT, guest_ss_handler);
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
+ ESR_EC_SVC64, guest_svc_handler);
+
+ for (stage = 0; stage < 7; stage++) {
+ vcpu_run(vm, VCPU_ID);
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
+ TEST_ASSERT(uc.args[1] == stage,
+ "Stage %d: Unexpected sync ucall, got %lx",
+ stage, (ulong)uc.args[1]);
+ break;
+ case UCALL_ABORT:
+ TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
+ (const char *)uc.args[0],
+ __FILE__, uc.args[1], uc.args[2], uc.args[3]);
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+done:
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list-sve.c b/tools/testing/selftests/kvm/aarch64/get-reg-list-sve.c
deleted file mode 100644
index efba76682b4b..000000000000
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list-sve.c
+++ /dev/null
@@ -1,3 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define REG_LIST_SVE
-#include "get-reg-list.c"
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index 486932164cf2..a16c8f05366c 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -27,17 +27,37 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
#include "kvm_util.h"
#include "test_util.h"
#include "processor.h"
-#ifdef REG_LIST_SVE
-#define reg_list_sve() (true)
-#else
-#define reg_list_sve() (false)
-#endif
+static struct kvm_reg_list *reg_list;
+static __u64 *blessed_reg, blessed_n;
-#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
+struct reg_sublist {
+ const char *name;
+ long capability;
+ int feature;
+ bool finalize;
+ __u64 *regs;
+ __u64 regs_n;
+ __u64 *rejects_set;
+ __u64 rejects_set_n;
+};
+
+struct vcpu_config {
+ char *name;
+ struct reg_sublist sublists[];
+};
+
+static struct vcpu_config *vcpu_configs[];
+static int vcpu_configs_n;
+
+#define for_each_sublist(c, s) \
+ for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
#define for_each_reg(i) \
for ((i) = 0; (i) < reg_list->n; ++(i))
@@ -54,12 +74,41 @@
for_each_reg_filtered(i) \
if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
+static const char *config_name(struct vcpu_config *c)
+{
+ struct reg_sublist *s;
+ int len = 0;
-static struct kvm_reg_list *reg_list;
+ if (c->name)
+ return c->name;
-static __u64 base_regs[], vregs[], sve_regs[], rejects_set[];
-static __u64 base_regs_n, vregs_n, sve_regs_n, rejects_set_n;
-static __u64 *blessed_reg, blessed_n;
+ for_each_sublist(c, s)
+ len += strlen(s->name) + 1;
+
+ c->name = malloc(len);
+
+ len = 0;
+ for_each_sublist(c, s) {
+ if (!strcmp(s->name, "base"))
+ continue;
+ strcat(c->name + len, s->name);
+ len += strlen(s->name) + 1;
+ c->name[len - 1] = '+';
+ }
+ c->name[len - 1] = '\0';
+
+ return c->name;
+}
+
+static bool has_cap(struct vcpu_config *c, long capability)
+{
+ struct reg_sublist *s;
+
+ for_each_sublist(c, s)
+ if (s->capability == capability)
+ return true;
+ return false;
+}
static bool filter_reg(__u64 reg)
{
@@ -96,11 +145,13 @@ static const char *str_with_index(const char *template, __u64 index)
return (const char *)str;
}
+#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
+
#define CORE_REGS_XX_NR_WORDS 2
#define CORE_SPSR_XX_NR_WORDS 2
#define CORE_FPREGS_XX_NR_WORDS 4
-static const char *core_id_to_str(__u64 id)
+static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
{
__u64 core_off = id & ~REG_MASK, idx;
@@ -111,7 +162,7 @@ static const char *core_id_to_str(__u64 id)
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
- TEST_ASSERT(idx < 31, "Unexpected regs.regs index: %lld", idx);
+ TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
case KVM_REG_ARM_CORE_REG(regs.sp):
return "KVM_REG_ARM_CORE_REG(regs.sp)";
@@ -126,12 +177,12 @@ static const char *core_id_to_str(__u64 id)
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
- TEST_ASSERT(idx < KVM_NR_SPSR, "Unexpected spsr index: %lld", idx);
+ TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
- TEST_ASSERT(idx < 32, "Unexpected fp_regs.vregs index: %lld", idx);
+ TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
@@ -139,11 +190,11 @@ static const char *core_id_to_str(__u64 id)
return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
}
- TEST_FAIL("Unknown core reg id: 0x%llx", id);
+ TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id);
return NULL;
}
-static const char *sve_id_to_str(__u64 id)
+static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
{
__u64 sve_off, n, i;
@@ -153,37 +204,37 @@ static const char *sve_id_to_str(__u64 id)
sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
- TEST_ASSERT(i == 0, "Currently we don't expect slice > 0, reg id 0x%llx", id);
+ TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id);
switch (sve_off) {
case KVM_REG_ARM64_SVE_ZREG_BASE ...
KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
- "Unexpected bits set in SVE ZREG id: 0x%llx", id);
+ "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
case KVM_REG_ARM64_SVE_PREG_BASE ...
KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
- "Unexpected bits set in SVE PREG id: 0x%llx", id);
+ "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
case KVM_REG_ARM64_SVE_FFR_BASE:
TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
- "Unexpected bits set in SVE FFR id: 0x%llx", id);
+ "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
return "KVM_REG_ARM64_SVE_FFR(0)";
}
return NULL;
}
-static void print_reg(__u64 id)
+static void print_reg(struct vcpu_config *c, __u64 id)
{
unsigned op0, op1, crn, crm, op2;
const char *reg_size = NULL;
TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
- "KVM_REG_ARM64 missing in reg id: 0x%llx", id);
+ "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id);
switch (id & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U8:
@@ -214,17 +265,17 @@ static void print_reg(__u64 id)
reg_size = "KVM_REG_SIZE_U2048";
break;
default:
- TEST_FAIL("Unexpected reg size: 0x%llx in reg id: 0x%llx",
- (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
+ TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
+ config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
}
switch (id & KVM_REG_ARM_COPROC_MASK) {
case KVM_REG_ARM_CORE:
- printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(id));
+ printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id));
break;
case KVM_REG_ARM_DEMUX:
TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
- "Unexpected bits set in DEMUX reg id: 0x%llx", id);
+ "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id);
printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
break;
@@ -235,23 +286,23 @@ static void print_reg(__u64 id)
crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
- "Unexpected bits set in SYSREG reg id: 0x%llx", id);
+ "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id);
printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
break;
case KVM_REG_ARM_FW:
TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
- "Unexpected bits set in FW reg id: 0x%llx", id);
+ "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
break;
case KVM_REG_ARM64_SVE:
- if (reg_list_sve())
- printf("\t%s,\n", sve_id_to_str(id));
+ if (has_cap(c, KVM_CAP_ARM_SVE))
+ printf("\t%s,\n", sve_id_to_str(c, id));
else
- TEST_FAIL("KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", id);
+ TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id);
break;
default:
- TEST_FAIL("Unexpected coproc type: 0x%llx in reg id: 0x%llx",
- (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
+ TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
+ config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
}
}
@@ -312,56 +363,58 @@ static void core_reg_fixup(void)
reg_list = tmp;
}
-static void prepare_vcpu_init(struct kvm_vcpu_init *init)
+static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
{
- if (reg_list_sve())
- init->features[0] |= 1 << KVM_ARM_VCPU_SVE;
+ struct reg_sublist *s;
+
+ for_each_sublist(c, s)
+ if (s->capability)
+ init->features[s->feature / 32] |= 1 << (s->feature % 32);
}
-static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
+static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config *c)
{
+ struct reg_sublist *s;
int feature;
- if (reg_list_sve()) {
- feature = KVM_ARM_VCPU_SVE;
- vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
+ for_each_sublist(c, s) {
+ if (s->finalize) {
+ feature = s->feature;
+ vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
+ }
}
}
-static void check_supported(void)
+static void check_supported(struct vcpu_config *c)
{
- if (reg_list_sve() && !kvm_check_cap(KVM_CAP_ARM_SVE)) {
- fprintf(stderr, "SVE not available, skipping tests\n");
- exit(KSFT_SKIP);
+ struct reg_sublist *s;
+
+ for_each_sublist(c, s) {
+ if (s->capability && !kvm_check_cap(s->capability)) {
+ fprintf(stderr, "%s: %s not available, skipping tests\n", config_name(c), s->name);
+ exit(KSFT_SKIP);
+ }
}
}
-int main(int ac, char **av)
+static bool print_list;
+static bool print_filtered;
+static bool fixup_core_regs;
+
+static void run_test(struct vcpu_config *c)
{
struct kvm_vcpu_init init = { .target = -1, };
- int new_regs = 0, missing_regs = 0, i;
+ int new_regs = 0, missing_regs = 0, i, n;
int failed_get = 0, failed_set = 0, failed_reject = 0;
- bool print_list = false, print_filtered = false, fixup_core_regs = false;
struct kvm_vm *vm;
- __u64 *vec_regs;
+ struct reg_sublist *s;
- check_supported();
-
- for (i = 1; i < ac; ++i) {
- if (strcmp(av[i], "--core-reg-fixup") == 0)
- fixup_core_regs = true;
- else if (strcmp(av[i], "--list") == 0)
- print_list = true;
- else if (strcmp(av[i], "--list-filtered") == 0)
- print_filtered = true;
- else
- TEST_FAIL("Unknown option: %s\n", av[i]);
- }
+ check_supported(c);
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
- prepare_vcpu_init(&init);
+ prepare_vcpu_init(c, &init);
aarch64_vcpu_add_default(vm, 0, &init, NULL);
- finalize_vcpu(vm, 0);
+ finalize_vcpu(vm, 0, c);
reg_list = vcpu_get_reg_list(vm, 0);
@@ -374,10 +427,10 @@ int main(int ac, char **av)
__u64 id = reg_list->reg[i];
if ((print_list && !filter_reg(id)) ||
(print_filtered && filter_reg(id)))
- print_reg(id);
+ print_reg(c, id);
}
putchar('\n');
- return 0;
+ return;
}
/*
@@ -396,50 +449,52 @@ int main(int ac, char **av)
.id = reg_list->reg[i],
.addr = (__u64)&addr,
};
+ bool reject_reg = false;
int ret;
ret = _vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg);
if (ret) {
- puts("Failed to get ");
- print_reg(reg.id);
+ printf("%s: Failed to get ", config_name(c));
+ print_reg(c, reg.id);
putchar('\n');
++failed_get;
}
/* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
- if (find_reg(rejects_set, rejects_set_n, reg.id)) {
- ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
- if (ret != -1 || errno != EPERM) {
- printf("Failed to reject (ret=%d, errno=%d) ", ret, errno);
- print_reg(reg.id);
- putchar('\n');
- ++failed_reject;
+ for_each_sublist(c, s) {
+ if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
+ reject_reg = true;
+ ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
+ if (ret != -1 || errno != EPERM) {
+ printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
+ print_reg(c, reg.id);
+ putchar('\n');
+ ++failed_reject;
+ }
+ break;
}
- continue;
}
- ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
- if (ret) {
- puts("Failed to set ");
- print_reg(reg.id);
- putchar('\n');
- ++failed_set;
+ if (!reject_reg) {
+ ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ printf("%s: Failed to set ", config_name(c));
+ print_reg(c, reg.id);
+ putchar('\n');
+ ++failed_set;
+ }
}
}
- if (reg_list_sve()) {
- blessed_n = base_regs_n + sve_regs_n;
- vec_regs = sve_regs;
- } else {
- blessed_n = base_regs_n + vregs_n;
- vec_regs = vregs;
- }
-
+ for_each_sublist(c, s)
+ blessed_n += s->regs_n;
blessed_reg = calloc(blessed_n, sizeof(__u64));
- for (i = 0; i < base_regs_n; ++i)
- blessed_reg[i] = base_regs[i];
- for (i = 0; i < blessed_n - base_regs_n; ++i)
- blessed_reg[base_regs_n + i] = vec_regs[i];
+
+ n = 0;
+ for_each_sublist(c, s) {
+ for (i = 0; i < s->regs_n; ++i)
+ blessed_reg[n++] = s->regs[i];
+ }
for_each_new_reg(i)
++new_regs;
@@ -448,40 +503,141 @@ int main(int ac, char **av)
++missing_regs;
if (new_regs || missing_regs) {
- printf("Number blessed registers: %5lld\n", blessed_n);
- printf("Number registers: %5lld\n", reg_list->n);
+ printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
+ printf("%s: Number registers: %5lld\n", config_name(c), reg_list->n);
}
if (new_regs) {
- printf("\nThere are %d new registers.\n"
+ printf("\n%s: There are %d new registers.\n"
"Consider adding them to the blessed reg "
- "list with the following lines:\n\n", new_regs);
+ "list with the following lines:\n\n", config_name(c), new_regs);
for_each_new_reg(i)
- print_reg(reg_list->reg[i]);
+ print_reg(c, reg_list->reg[i]);
putchar('\n');
}
if (missing_regs) {
- printf("\nThere are %d missing registers.\n"
- "The following lines are missing registers:\n\n", missing_regs);
+ printf("\n%s: There are %d missing registers.\n"
+ "The following lines are missing registers:\n\n", config_name(c), missing_regs);
for_each_missing_reg(i)
- print_reg(blessed_reg[i]);
+ print_reg(c, blessed_reg[i]);
putchar('\n');
}
TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
- "There are %d missing registers; "
+ "%s: There are %d missing registers; "
"%d registers failed get; %d registers failed set; %d registers failed reject",
- missing_regs, failed_get, failed_set, failed_reject);
+ config_name(c), missing_regs, failed_get, failed_set, failed_reject);
- return 0;
+ pr_info("%s: PASS\n", config_name(c));
+ blessed_n = 0;
+ free(blessed_reg);
+ free(reg_list);
+ kvm_vm_free(vm);
+}
+
+static void help(void)
+{
+ struct vcpu_config *c;
+ int i;
+
+ printf(
+ "\n"
+ "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered] [--core-reg-fixup]\n\n"
+ " --config=<selection> Used to select a specific vcpu configuration for the test/listing\n"
+ " '<selection>' may be\n");
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ printf(
+ " '%s'\n", config_name(c));
+ }
+
+ printf(
+ "\n"
+ " --list Print the register list rather than test it (requires --config)\n"
+ " --list-filtered Print registers that would normally be filtered out (requires --config)\n"
+ " --core-reg-fixup Needed when running on old kernels with broken core reg listings\n"
+ "\n"
+ );
+}
+
+static struct vcpu_config *parse_config(const char *config)
+{
+ struct vcpu_config *c;
+ int i;
+
+ if (config[8] != '=')
+ help(), exit(1);
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ if (strcmp(config_name(c), &config[9]) == 0)
+ break;
+ }
+
+ if (i == vcpu_configs_n)
+ help(), exit(1);
+
+ return c;
+}
+
+int main(int ac, char **av)
+{
+ struct vcpu_config *c, *sel = NULL;
+ int i, ret = 0;
+ pid_t pid;
+
+ for (i = 1; i < ac; ++i) {
+ if (strcmp(av[i], "--core-reg-fixup") == 0)
+ fixup_core_regs = true;
+ else if (strncmp(av[i], "--config", 8) == 0)
+ sel = parse_config(av[i]);
+ else if (strcmp(av[i], "--list") == 0)
+ print_list = true;
+ else if (strcmp(av[i], "--list-filtered") == 0)
+ print_filtered = true;
+ else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
+ help(), exit(0);
+ else
+ help(), exit(1);
+ }
+
+ if (print_list || print_filtered) {
+ /*
+ * We only want to print the register list of a single config.
+ */
+ if (!sel)
+ help(), exit(1);
+ }
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ if (sel && c != sel)
+ continue;
+
+ pid = fork();
+
+ if (!pid) {
+ run_test(c);
+ exit(0);
+ } else {
+ int wstatus;
+ pid_t wpid = wait(&wstatus);
+ TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
+ if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
+ ret = KSFT_FAIL;
+ }
+ }
+
+ return ret;
}
/*
* The current blessed list was primed with the output of kernel version
* v4.15 with --core-reg-fixup and then later updated with new registers.
*
- * The blessed list is up to date with kernel version v5.10-rc5
+ * The blessed list is up to date with kernel version v5.13-rc3
*/
static __u64 base_regs[] = {
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
@@ -673,8 +829,6 @@ static __u64 base_regs[] = {
ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */
ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */
ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */
- ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */
- ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */
ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */
ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */
ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */
@@ -683,6 +837,16 @@ static __u64 base_regs[] = {
ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */
ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */
ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */
+ ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
+ ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
+ ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
+ ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
+ ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
+};
+
+static __u64 pmu_regs[] = {
+ ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */
+ ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */
ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */
ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */
ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */
@@ -692,8 +856,6 @@ static __u64 base_regs[] = {
ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */
ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */
ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */
- ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
- ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
ARM64_SYS_REG(3, 3, 14, 8, 0),
ARM64_SYS_REG(3, 3, 14, 8, 1),
ARM64_SYS_REG(3, 3, 14, 8, 2),
@@ -757,11 +919,7 @@ static __u64 base_regs[] = {
ARM64_SYS_REG(3, 3, 14, 15, 5),
ARM64_SYS_REG(3, 3, 14, 15, 6),
ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
- ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
- ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
- ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
};
-static __u64 base_regs_n = ARRAY_SIZE(base_regs);
static __u64 vregs[] = {
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
@@ -797,7 +955,6 @@ static __u64 vregs[] = {
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
};
-static __u64 vregs_n = ARRAY_SIZE(vregs);
static __u64 sve_regs[] = {
KVM_REG_ARM64_SVE_VLS,
@@ -852,11 +1009,57 @@ static __u64 sve_regs[] = {
KVM_REG_ARM64_SVE_FFR(0),
ARM64_SYS_REG(3, 0, 1, 2, 0), /* ZCR_EL1 */
};
-static __u64 sve_regs_n = ARRAY_SIZE(sve_regs);
-static __u64 rejects_set[] = {
-#ifdef REG_LIST_SVE
+static __u64 sve_rejects_set[] = {
KVM_REG_ARM64_SVE_VLS,
-#endif
};
-static __u64 rejects_set_n = ARRAY_SIZE(rejects_set);
+
+#define BASE_SUBLIST \
+ { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
+#define VREGS_SUBLIST \
+ { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
+#define PMU_SUBLIST \
+ { "pmu", .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
+#define SVE_SUBLIST \
+ { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
+ .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
+ .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
+
+static struct vcpu_config vregs_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ VREGS_SUBLIST,
+ {0},
+ },
+};
+static struct vcpu_config vregs_pmu_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ VREGS_SUBLIST,
+ PMU_SUBLIST,
+ {0},
+ },
+};
+static struct vcpu_config sve_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ SVE_SUBLIST,
+ {0},
+ },
+};
+static struct vcpu_config sve_pmu_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ SVE_SUBLIST,
+ PMU_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_config *vcpu_configs[] = {
+ &vregs_config,
+ &vregs_pmu_config,
+ &sve_config,
+ &sve_pmu_config,
+};
+static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 5f7a229c3af1..b74704305835 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -9,6 +9,7 @@
#define _GNU_SOURCE /* for pipe2 */
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
@@ -38,6 +39,7 @@
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static size_t demand_paging_size;
static char *guest_data_prototype;
static void *vcpu_worker(void *data)
@@ -71,36 +73,51 @@ static void *vcpu_worker(void *data)
return NULL;
}
-static int handle_uffd_page_request(int uffd, uint64_t addr)
+static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
{
- pid_t tid;
+ pid_t tid = syscall(__NR_gettid);
struct timespec start;
struct timespec ts_diff;
- struct uffdio_copy copy;
int r;
- tid = syscall(__NR_gettid);
+ clock_gettime(CLOCK_MONOTONIC, &start);
- copy.src = (uint64_t)guest_data_prototype;
- copy.dst = addr;
- copy.len = perf_test_args.host_page_size;
- copy.mode = 0;
+ if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) {
+ struct uffdio_copy copy;
- clock_gettime(CLOCK_MONOTONIC, &start);
+ copy.src = (uint64_t)guest_data_prototype;
+ copy.dst = addr;
+ copy.len = demand_paging_size;
+ copy.mode = 0;
- r = ioctl(uffd, UFFDIO_COPY, &copy);
- if (r == -1) {
- pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n",
- addr, tid, errno);
- return r;
+ r = ioctl(uffd, UFFDIO_COPY, &copy);
+ if (r == -1) {
+ pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d with errno: %d\n",
+ addr, tid, errno);
+ return r;
+ }
+ } else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
+ struct uffdio_continue cont = {0};
+
+ cont.range.start = addr;
+ cont.range.len = demand_paging_size;
+
+ r = ioctl(uffd, UFFDIO_CONTINUE, &cont);
+ if (r == -1) {
+ pr_info("Failed UFFDIO_CONTINUE in 0x%lx from thread %d with errno: %d\n",
+ addr, tid, errno);
+ return r;
+ }
+ } else {
+ TEST_FAIL("Invalid uffd mode %d", uffd_mode);
}
ts_diff = timespec_elapsed(start);
- PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
+ PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid,
timespec_to_ns(ts_diff));
PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
- perf_test_args.host_page_size, addr, tid);
+ demand_paging_size, addr, tid);
return 0;
}
@@ -108,6 +125,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
bool quit_uffd_thread;
struct uffd_handler_args {
+ int uffd_mode;
int uffd;
int pipefd;
useconds_t delay;
@@ -169,7 +187,7 @@ static void *uffd_handler_thread_fn(void *arg)
if (r == -1) {
if (errno == EAGAIN)
continue;
- pr_info("Read of uffd gor errno %d", errno);
+ pr_info("Read of uffd got errno %d\n", errno);
return NULL;
}
@@ -184,7 +202,7 @@ static void *uffd_handler_thread_fn(void *arg)
if (delay)
usleep(delay);
addr = msg.arg.pagefault.address;
- r = handle_uffd_page_request(uffd, addr);
+ r = handle_uffd_page_request(uffd_args->uffd_mode, uffd, addr);
if (r < 0)
return NULL;
pages++;
@@ -198,43 +216,53 @@ static void *uffd_handler_thread_fn(void *arg)
return NULL;
}
-static int setup_demand_paging(struct kvm_vm *vm,
- pthread_t *uffd_handler_thread, int pipefd,
- useconds_t uffd_delay,
- struct uffd_handler_args *uffd_args,
- void *hva, uint64_t len)
+static void setup_demand_paging(struct kvm_vm *vm,
+ pthread_t *uffd_handler_thread, int pipefd,
+ int uffd_mode, useconds_t uffd_delay,
+ struct uffd_handler_args *uffd_args,
+ void *hva, void *alias, uint64_t len)
{
+ bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR);
int uffd;
struct uffdio_api uffdio_api;
struct uffdio_register uffdio_register;
+ uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
- uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
- if (uffd == -1) {
- pr_info("uffd creation failed\n");
- return -1;
+ PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
+ is_minor ? "MINOR" : "MISSING",
+ is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
+
+ /* In order to get minor faults, prefault via the alias. */
+ if (is_minor) {
+ size_t p;
+
+ expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
+
+ TEST_ASSERT(alias != NULL, "Alias required for minor faults");
+ for (p = 0; p < (len / demand_paging_size); ++p) {
+ memcpy(alias + (p * demand_paging_size),
+ guest_data_prototype, demand_paging_size);
+ }
}
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
+
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
- if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
- pr_info("ioctl uffdio_api failed\n");
- return -1;
- }
+ TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
+ "ioctl UFFDIO_API failed: %" PRIu64,
+ (uint64_t)uffdio_api.api);
uffdio_register.range.start = (uint64_t)hva;
uffdio_register.range.len = len;
- uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
- pr_info("ioctl uffdio_register failed\n");
- return -1;
- }
-
- if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) !=
- UFFD_API_RANGE_IOCTLS) {
- pr_info("unexpected userfaultfd ioctl set\n");
- return -1;
- }
+ uffdio_register.mode = uffd_mode;
+ TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
+ "ioctl UFFDIO_REGISTER failed");
+ TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) ==
+ expected_ioctls, "missing userfaultfd ioctls");
+ uffd_args->uffd_mode = uffd_mode;
uffd_args->uffd = uffd;
uffd_args->pipefd = pipefd;
uffd_args->delay = uffd_delay;
@@ -243,13 +271,12 @@ static int setup_demand_paging(struct kvm_vm *vm,
PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
hva, hva + len);
-
- return 0;
}
struct test_params {
- bool use_uffd;
+ int uffd_mode;
useconds_t uffd_delay;
+ enum vm_mem_backing_src_type src_type;
bool partition_vcpu_memory_access;
};
@@ -267,14 +294,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
int r;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
- VM_MEM_SRC_ANONYMOUS);
+ p->src_type);
perf_test_args.wr_fract = 1;
- guest_data_prototype = malloc(perf_test_args.host_page_size);
+ demand_paging_size = get_backing_src_pagesz(p->src_type);
+
+ guest_data_prototype = malloc(demand_paging_size);
TEST_ASSERT(guest_data_prototype,
"Failed to allocate buffer for guest data pattern");
- memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size);
+ memset(guest_data_prototype, 0xAB, demand_paging_size);
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
@@ -282,7 +311,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
p->partition_vcpu_memory_access);
- if (p->use_uffd) {
+ if (p->uffd_mode) {
uffd_handler_threads =
malloc(nr_vcpus * sizeof(*uffd_handler_threads));
TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
@@ -296,6 +325,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vm_paddr_t vcpu_gpa;
void *vcpu_hva;
+ void *vcpu_alias;
uint64_t vcpu_mem_size;
@@ -310,8 +340,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
- /* Cache the HVA pointer of the region */
+ /* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
+ vcpu_alias = addr_gpa2alias(vm, vcpu_gpa);
/*
* Set up user fault fd to handle demand paging
@@ -321,13 +352,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
O_CLOEXEC | O_NONBLOCK);
TEST_ASSERT(!r, "Failed to set up pipefd");
- r = setup_demand_paging(vm,
- &uffd_handler_threads[vcpu_id],
- pipefds[vcpu_id * 2],
- p->uffd_delay, &uffd_args[vcpu_id],
- vcpu_hva, vcpu_mem_size);
- if (r < 0)
- exit(-r);
+ setup_demand_paging(vm, &uffd_handler_threads[vcpu_id],
+ pipefds[vcpu_id * 2], p->uffd_mode,
+ p->uffd_delay, &uffd_args[vcpu_id],
+ vcpu_hva, vcpu_alias,
+ vcpu_mem_size);
}
}
@@ -355,7 +384,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("All vCPU threads joined\n");
- if (p->use_uffd) {
+ if (p->uffd_mode) {
char c;
/* Tell the user fault fd handler threads to quit */
@@ -377,7 +406,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
free(guest_data_prototype);
free(vcpu_threads);
- if (p->use_uffd) {
+ if (p->uffd_mode) {
free(uffd_handler_threads);
free(uffd_args);
free(pipefds);
@@ -387,17 +416,19 @@ static void run_test(enum vm_guest_mode mode, void *arg)
static void help(char *name)
{
puts("");
- printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
- " [-b memory] [-v vcpus] [-o]\n", name);
+ printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
+ " [-b memory] [-t type] [-v vcpus] [-o]\n", name);
guest_modes_help();
- printf(" -u: use User Fault FD to handle vCPU page\n"
- " faults.\n");
+ printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
+ " UFFD registration mode: 'MISSING' or 'MINOR'.\n");
printf(" -d: add a delay in usec to the User Fault\n"
" FD handler to simulate demand paging\n"
" overheads. Ignored without -u.\n");
printf(" -b: specify the size of the memory region which should be\n"
" demand paged by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
+ printf(" -t: The type of backing memory to use. Default: anonymous\n");
+ backing_src_help();
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
@@ -409,19 +440,24 @@ int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
struct test_params p = {
+ .src_type = VM_MEM_SRC_ANONYMOUS,
.partition_vcpu_memory_access = true,
};
int opt;
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
break;
case 'u':
- p.use_uffd = true;
+ if (!strcmp("MISSING", optarg))
+ p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
+ else if (!strcmp("MINOR", optarg))
+ p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
+ TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
break;
case 'd':
p.uffd_delay = strtoul(optarg, NULL, 0);
@@ -430,6 +466,9 @@ int main(int argc, char *argv[])
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
+ case 't':
+ p.src_type = parse_backing_src_type(optarg);
+ break;
case 'v':
nr_vcpus = atoi(optarg);
TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
@@ -445,6 +484,11 @@ int main(int argc, char *argv[])
}
}
+ if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
+ !backing_src_is_shared(p.src_type)) {
+ TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");
+ }
+
for_each_guest_mode(run_test, &p);
return 0;
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 81edbd23d371..5fe0140e407e 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -16,7 +16,6 @@
#include <errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
-#include <asm/barrier.h>
#include <linux/atomic.h>
#include "kvm_util.h"
@@ -681,7 +680,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+ kvm_vm_elf_load(vm, program_invocation_name);
#ifdef __x86_64__
vm_create_irqchip(vm);
#endif
@@ -761,7 +760,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
KVM_MEM_LOG_DIRTY_PAGES);
/* Do mapping for the dirty track memory slot */
- virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index 5aadf84c91c0..b21c69a56daa 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -105,7 +105,7 @@ static void run_test(uint32_t run)
CPU_SET(i, &cpu_set);
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+ kvm_vm_elf_load(vm, program_invocation_name);
vm_create_irqchip(vm);
pr_debug("%s: [%d] start vcpus\n", __func__, run);
@@ -132,6 +132,36 @@ static void run_test(uint32_t run)
TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run);
}
+void wait_for_child_setup(pid_t pid)
+{
+ /*
+ * Wait for the child to post to the semaphore, but wake up periodically
+ * to check if the child exited prematurely.
+ */
+ for (;;) {
+ const struct timespec wait_period = { .tv_sec = 1 };
+ int status;
+
+ if (!sem_timedwait(sem, &wait_period))
+ return;
+
+ /* Child is still running, keep waiting. */
+ if (pid != waitpid(pid, &status, WNOHANG))
+ continue;
+
+ /*
+ * Child is no longer running, which is not expected.
+ *
+ * If it exited with a non-zero status, we explicitly forward
+ * the child's status in case it exited with KSFT_SKIP.
+ */
+ if (WIFEXITED(status))
+ exit(WEXITSTATUS(status));
+ else
+ TEST_ASSERT(false, "Child exited unexpectedly");
+ }
+}
+
int main(int argc, char **argv)
{
uint32_t i;
@@ -148,7 +178,7 @@ int main(int argc, char **argv)
run_test(i); /* This function always exits */
pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
- sem_wait(sem);
+ wait_for_child_setup(pid);
r = (rand() % DELAY_US_MAX) + 1;
pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
usleep(r);
diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
index b7fa0c8551db..27dc5c2e56b9 100644
--- a/tools/testing/selftests/kvm/include/aarch64/processor.h
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -8,16 +8,20 @@
#define SELFTEST_KVM_PROCESSOR_H
#include "kvm_util.h"
+#include <linux/stringify.h>
#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
-#define CPACR_EL1 3, 0, 1, 0, 2
-#define TCR_EL1 3, 0, 2, 0, 2
-#define MAIR_EL1 3, 0, 10, 2, 0
-#define TTBR0_EL1 3, 0, 2, 0, 0
-#define SCTLR_EL1 3, 0, 1, 0, 0
+#define CPACR_EL1 3, 0, 1, 0, 2
+#define TCR_EL1 3, 0, 2, 0, 2
+#define MAIR_EL1 3, 0, 10, 2, 0
+#define TTBR0_EL1 3, 0, 2, 0, 0
+#define SCTLR_EL1 3, 0, 1, 0, 0
+#define VBAR_EL1 3, 0, 12, 0, 0
+
+#define ID_AA64DFR0_EL1 3, 0, 0, 5, 0
/*
* Default MAIR
@@ -56,4 +60,73 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *ini
void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_init *init, void *guest_code);
+struct ex_regs {
+ u64 regs[31];
+ u64 sp;
+ u64 pc;
+ u64 pstate;
+};
+
+#define VECTOR_NUM 16
+
+enum {
+ VECTOR_SYNC_CURRENT_SP0,
+ VECTOR_IRQ_CURRENT_SP0,
+ VECTOR_FIQ_CURRENT_SP0,
+ VECTOR_ERROR_CURRENT_SP0,
+
+ VECTOR_SYNC_CURRENT,
+ VECTOR_IRQ_CURRENT,
+ VECTOR_FIQ_CURRENT,
+ VECTOR_ERROR_CURRENT,
+
+ VECTOR_SYNC_LOWER_64,
+ VECTOR_IRQ_LOWER_64,
+ VECTOR_FIQ_LOWER_64,
+ VECTOR_ERROR_LOWER_64,
+
+ VECTOR_SYNC_LOWER_32,
+ VECTOR_IRQ_LOWER_32,
+ VECTOR_FIQ_LOWER_32,
+ VECTOR_ERROR_LOWER_32,
+};
+
+#define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \
+ (v) == VECTOR_SYNC_CURRENT || \
+ (v) == VECTOR_SYNC_LOWER_64 || \
+ (v) == VECTOR_SYNC_LOWER_32)
+
+#define ESR_EC_NUM 64
+#define ESR_EC_SHIFT 26
+#define ESR_EC_MASK (ESR_EC_NUM - 1)
+
+#define ESR_EC_SVC64 0x15
+#define ESR_EC_HW_BP_CURRENT 0x31
+#define ESR_EC_SSTEP_CURRENT 0x33
+#define ESR_EC_WP_CURRENT 0x35
+#define ESR_EC_BRK_INS 0x3c
+
+void vm_init_descriptor_tables(struct kvm_vm *vm);
+void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
+
+typedef void(*handler_fn)(struct ex_regs *);
+void vm_install_exception_handler(struct kvm_vm *vm,
+ int vector, handler_fn handler);
+void vm_install_sync_handler(struct kvm_vm *vm,
+ int vector, int ec, handler_fn handler);
+
+#define write_sysreg(reg, val) \
+({ \
+ u64 __val = (u64)(val); \
+ asm volatile("msr " __stringify(reg) ", %x0" : : "rZ" (__val)); \
+})
+
+#define read_sysreg(reg) \
+({ u64 val; \
+ asm volatile("mrs %0, "__stringify(reg) : "=r"(val) : : "memory");\
+ val; \
+})
+
+#define isb() asm volatile("isb" : : : "memory")
+
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index a8f022794ce3..615ab254899d 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -30,6 +30,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
/* Minimum allocated guest virtual and physical addresses */
#define KVM_UTIL_MIN_VADDR 0x2000
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
#define DEFAULT_GUEST_PHY_PAGES 512
#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
@@ -43,6 +44,7 @@ enum vm_guest_mode {
VM_MODE_P40V48_4K,
VM_MODE_P40V48_64K,
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
+ VM_MODE_P47V64_4K,
NUM_VM_MODES,
};
@@ -60,7 +62,7 @@ enum vm_guest_mode {
#elif defined(__s390x__)
-#define VM_MODE_DEFAULT VM_MODE_P52V48_4K
+#define VM_MODE_DEFAULT VM_MODE_P47V64_4K
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 16)
@@ -77,6 +79,7 @@ struct vm_guest_mode_params {
};
extern const struct vm_guest_mode_params vm_guest_mode_params[];
+int open_kvm_dev_path_or_exit(void);
int kvm_check_cap(long cap);
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
@@ -96,8 +99,7 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm);
int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
size_t len);
-void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
- uint32_t data_memslot, uint32_t pgd_memslot);
+void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
@@ -139,13 +141,16 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- uint32_t data_memslot, uint32_t pgd_memslot);
+vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
+vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
+vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
+
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- unsigned int npages, uint32_t pgd_memslot);
+ unsigned int npages);
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
+void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
/*
* Address Guest Virtual to Guest Physical
@@ -234,7 +239,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
const char *exit_reason_str(unsigned int exit_reason);
-void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
+void virt_pgd_alloc(struct kvm_vm *vm);
/*
* VM Virtual Page Map
@@ -252,13 +257,13 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
* Within @vm, creates a virtual translation for the page starting
* at @vaddr to the page starting at @paddr.
*/
-void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint32_t memslot);
+void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot);
+vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
/*
* Create a VM with reasonable defaults
@@ -283,10 +288,11 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
uint32_t num_percpu_pages, void *guest_code,
uint32_t vcpuids[]);
-/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */
struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
- uint64_t extra_mem_pages, uint32_t num_percpu_pages,
- void *guest_code, uint32_t vcpuids[]);
+ uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[]);
/*
* Adds a vCPU with reasonable defaults (e.g. a stack)
@@ -302,7 +308,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
-unsigned int vm_get_max_gfn(struct kvm_vm *vm);
+uint64_t vm_get_max_gfn(struct kvm_vm *vm);
int vm_get_fd(struct kvm_vm *vm);
unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
@@ -347,6 +353,7 @@ enum {
UCALL_SYNC,
UCALL_ABORT,
UCALL_DONE,
+ UCALL_UNHANDLED,
};
#define UCALL_MAX_ARGS 6
@@ -365,26 +372,31 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
#define GUEST_DONE() ucall(UCALL_DONE, 0)
-#define __GUEST_ASSERT(_condition, _nargs, _args...) do { \
- if (!(_condition)) \
- ucall(UCALL_ABORT, 2 + _nargs, \
- "Failed guest assert: " \
- #_condition, __LINE__, _args); \
+#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) do { \
+ if (!(_condition)) \
+ ucall(UCALL_ABORT, 2 + _nargs, \
+ "Failed guest assert: " \
+ _condstr, __LINE__, _args); \
} while (0)
#define GUEST_ASSERT(_condition) \
- __GUEST_ASSERT((_condition), 0, 0)
+ __GUEST_ASSERT(_condition, #_condition, 0, 0)
#define GUEST_ASSERT_1(_condition, arg1) \
- __GUEST_ASSERT((_condition), 1, (arg1))
+ __GUEST_ASSERT(_condition, #_condition, 1, (arg1))
#define GUEST_ASSERT_2(_condition, arg1, arg2) \
- __GUEST_ASSERT((_condition), 2, (arg1), (arg2))
+ __GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2))
#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \
- __GUEST_ASSERT((_condition), 3, (arg1), (arg2), (arg3))
+ __GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3))
#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \
- __GUEST_ASSERT((_condition), 4, (arg1), (arg2), (arg3), (arg4))
+ __GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4))
+
+#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b)
+
+int vm_get_stats_fd(struct kvm_vm *vm);
+int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid);
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index fade3130eb01..d79be15dd3d2 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -17,6 +17,7 @@
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
+#include <sys/mman.h>
#include "kselftest.h"
static inline int _no_printf(const char *format, ...) { return 0; }
@@ -84,6 +85,8 @@ enum vm_mem_backing_src_type {
VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB,
VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB,
VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB,
+ VM_MEM_SRC_SHMEM,
+ VM_MEM_SRC_SHARED_HUGETLB,
NUM_SRC_TYPES,
};
@@ -100,4 +103,13 @@ size_t get_backing_src_pagesz(uint32_t i);
void backing_src_help(void);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
+/*
+ * Whether or not the given source type is shared memory (as opposed to
+ * anonymous).
+ */
+static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t)
+{
+ return vm_mem_backing_src_alias(t)->flag & MAP_SHARED;
+}
+
#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/apic.h b/tools/testing/selftests/kvm/include/x86_64/apic.h
new file mode 100644
index 000000000000..0be4757f1f20
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/apic.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tools/testing/selftests/kvm/include/x86_64/apic.h
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+
+#ifndef SELFTEST_KVM_APIC_H
+#define SELFTEST_KVM_APIC_H
+
+#include <stdint.h>
+
+#include "processor.h"
+
+#define APIC_DEFAULT_GPA 0xfee00000ULL
+
+/* APIC base address MSR and fields */
+#define MSR_IA32_APICBASE 0x0000001b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_EXTD (1<<10)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+#define GET_APIC_BASE(x) (((x) >> 12) << 12)
+
+#define APIC_BASE_MSR 0x800
+#define X2APIC_ENABLE (1UL << 10)
+#define APIC_ID 0x20
+#define APIC_LVR 0x30
+#define GET_APIC_ID_FIELD(x) (((x) >> 24) & 0xFF)
+#define APIC_TASKPRI 0x80
+#define APIC_PROCPRI 0xA0
+#define APIC_EOI 0xB0
+#define APIC_SPIV 0xF0
+#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
+#define APIC_SPIV_APIC_ENABLED (1 << 8)
+#define APIC_ICR 0x300
+#define APIC_DEST_SELF 0x40000
+#define APIC_DEST_ALLINC 0x80000
+#define APIC_DEST_ALLBUT 0xC0000
+#define APIC_ICR_RR_MASK 0x30000
+#define APIC_ICR_RR_INVALID 0x00000
+#define APIC_ICR_RR_INPROG 0x10000
+#define APIC_ICR_RR_VALID 0x20000
+#define APIC_INT_LEVELTRIG 0x08000
+#define APIC_INT_ASSERT 0x04000
+#define APIC_ICR_BUSY 0x01000
+#define APIC_DEST_LOGICAL 0x00800
+#define APIC_DEST_PHYSICAL 0x00000
+#define APIC_DM_FIXED 0x00000
+#define APIC_DM_FIXED_MASK 0x00700
+#define APIC_DM_LOWEST 0x00100
+#define APIC_DM_SMI 0x00200
+#define APIC_DM_REMRD 0x00300
+#define APIC_DM_NMI 0x00400
+#define APIC_DM_INIT 0x00500
+#define APIC_DM_STARTUP 0x00600
+#define APIC_DM_EXTINT 0x00700
+#define APIC_VECTOR_MASK 0x000FF
+#define APIC_ICR2 0x310
+#define SET_APIC_DEST_FIELD(x) ((x) << 24)
+
+void apic_disable(void);
+void xapic_enable(void);
+void x2apic_enable(void);
+
+static inline uint32_t get_bsp_flag(void)
+{
+ return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP;
+}
+
+static inline uint32_t xapic_read_reg(unsigned int reg)
+{
+ return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2];
+}
+
+static inline void xapic_write_reg(unsigned int reg, uint32_t val)
+{
+ ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val;
+}
+
+static inline uint64_t x2apic_read_reg(unsigned int reg)
+{
+ return rdmsr(APIC_BASE_MSR + (reg >> 4));
+}
+
+static inline void x2apic_write_reg(unsigned int reg, uint64_t value)
+{
+ wrmsr(APIC_BASE_MSR + (reg >> 4), value);
+}
+
+#endif /* SELFTEST_KVM_APIC_H */
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
index a034438b6266..c9af97abd622 100644
--- a/tools/testing/selftests/kvm/include/evmcs.h
+++ b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * tools/testing/selftests/kvm/include/vmx.h
+ * tools/testing/selftests/kvm/include/x86_64/evmcs.h
*
* Copyright (C) 2018, Red Hat, Inc.
*
diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
new file mode 100644
index 000000000000..412eaee7884a
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * tools/testing/selftests/kvm/include/x86_64/hyperv.h
+ *
+ * Copyright (C) 2021, Red Hat, Inc.
+ *
+ */
+
+#ifndef SELFTEST_KVM_HYPERV_H
+#define SELFTEST_KVM_HYPERV_H
+
+#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
+#define HYPERV_CPUID_INTERFACE 0x40000001
+#define HYPERV_CPUID_VERSION 0x40000002
+#define HYPERV_CPUID_FEATURES 0x40000003
+#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
+#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HYPERV_CPUID_CPU_MANAGEMENT_FEATURES 0x40000007
+#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A
+#define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
+#define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
+#define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
+
+#define HV_X64_MSR_GUEST_OS_ID 0x40000000
+#define HV_X64_MSR_HYPERCALL 0x40000001
+#define HV_X64_MSR_VP_INDEX 0x40000002
+#define HV_X64_MSR_RESET 0x40000003
+#define HV_X64_MSR_VP_RUNTIME 0x40000010
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
+#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
+#define HV_X64_MSR_EOI 0x40000070
+#define HV_X64_MSR_ICR 0x40000071
+#define HV_X64_MSR_TPR 0x40000072
+#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
+#define HV_X64_MSR_SCONTROL 0x40000080
+#define HV_X64_MSR_SVERSION 0x40000081
+#define HV_X64_MSR_SIEFP 0x40000082
+#define HV_X64_MSR_SIMP 0x40000083
+#define HV_X64_MSR_EOM 0x40000084
+#define HV_X64_MSR_SINT0 0x40000090
+#define HV_X64_MSR_SINT1 0x40000091
+#define HV_X64_MSR_SINT2 0x40000092
+#define HV_X64_MSR_SINT3 0x40000093
+#define HV_X64_MSR_SINT4 0x40000094
+#define HV_X64_MSR_SINT5 0x40000095
+#define HV_X64_MSR_SINT6 0x40000096
+#define HV_X64_MSR_SINT7 0x40000097
+#define HV_X64_MSR_SINT8 0x40000098
+#define HV_X64_MSR_SINT9 0x40000099
+#define HV_X64_MSR_SINT10 0x4000009A
+#define HV_X64_MSR_SINT11 0x4000009B
+#define HV_X64_MSR_SINT12 0x4000009C
+#define HV_X64_MSR_SINT13 0x4000009D
+#define HV_X64_MSR_SINT14 0x4000009E
+#define HV_X64_MSR_SINT15 0x4000009F
+#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
+#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
+#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
+#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
+#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
+#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
+#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
+#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
+#define HV_X64_MSR_GUEST_IDLE 0x400000F0
+#define HV_X64_MSR_CRASH_P0 0x40000100
+#define HV_X64_MSR_CRASH_P1 0x40000101
+#define HV_X64_MSR_CRASH_P2 0x40000102
+#define HV_X64_MSR_CRASH_P3 0x40000103
+#define HV_X64_MSR_CRASH_P4 0x40000104
+#define HV_X64_MSR_CRASH_CTL 0x40000105
+#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
+#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
+#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
+#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118
+
+#define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
+#define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
+#define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
+#define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
+#define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
+#define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
+
+/* HYPERV_CPUID_FEATURES.EAX */
+#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
+#define HV_MSR_SYNIC_AVAILABLE BIT(2)
+#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
+#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
+#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
+#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
+#define HV_MSR_RESET_AVAILABLE BIT(7)
+#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
+#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
+#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
+#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
+#define HV_ACCESS_REENLIGHTENMENT BIT(13)
+#define HV_ACCESS_TSC_INVARIANT BIT(15)
+
+/* HYPERV_CPUID_FEATURES.EBX */
+#define HV_CREATE_PARTITIONS BIT(0)
+#define HV_ACCESS_PARTITION_ID BIT(1)
+#define HV_ACCESS_MEMORY_POOL BIT(2)
+#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
+#define HV_POST_MESSAGES BIT(4)
+#define HV_SIGNAL_EVENTS BIT(5)
+#define HV_CREATE_PORT BIT(6)
+#define HV_CONNECT_PORT BIT(7)
+#define HV_ACCESS_STATS BIT(8)
+#define HV_DEBUGGING BIT(11)
+#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ISOLATION BIT(22)
+
+/* HYPERV_CPUID_FEATURES.EDX */
+#define HV_X64_MWAIT_AVAILABLE BIT(0)
+#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1)
+#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2)
+#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3)
+#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4)
+#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
+#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8)
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
+#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
+#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
+
+/* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
+#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0)
+#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1)
+#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2)
+#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3)
+#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4)
+#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5)
+#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9)
+#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10)
+#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11)
+#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
+
+/* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
+#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
+
+/* Hypercalls */
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
+#define HVCALL_SEND_IPI_EX 0x0015
+#define HVCALL_GET_PARTITION_ID 0x0046
+#define HVCALL_DEPOSIT_MEMORY 0x0048
+#define HVCALL_CREATE_VP 0x004e
+#define HVCALL_GET_VP_REGISTERS 0x0050
+#define HVCALL_SET_VP_REGISTERS 0x0051
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_POST_DEBUG_DATA 0x0069
+#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
+#define HVCALL_RESET_DEBUG_SESSION 0x006b
+#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
+#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
+#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
+#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+/* hypercall status code */
+#define HV_STATUS_SUCCESS 0
+#define HV_STATUS_INVALID_HYPERCALL_CODE 2
+#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
+#define HV_STATUS_INVALID_ALIGNMENT 4
+#define HV_STATUS_INVALID_PARAMETER 5
+#define HV_STATUS_ACCESS_DENIED 6
+#define HV_STATUS_OPERATION_DENIED 8
+#define HV_STATUS_INSUFFICIENT_MEMORY 11
+#define HV_STATUS_INVALID_PORT_ID 17
+#define HV_STATUS_INVALID_CONNECTION_ID 18
+#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+
+#endif /* !SELFTEST_KVM_HYPERV_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 0b30b4e15c38..242ae8e09a65 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -13,6 +13,8 @@
#include <asm/msr-index.h>
+#include "../kvm_util.h"
+
#define X86_EFLAGS_FIXED (1u << 1)
#define X86_CR4_VME (1ul << 0)
@@ -53,7 +55,8 @@
#define CPUID_PKU (1ul << 3)
#define CPUID_LA57 (1ul << 16)
-#define UNEXPECTED_VECTOR_PORT 0xfff0u
+/* CPUID.0x8000_0001.EDX */
+#define CPUID_GBPAGES (1ul << 26)
/* General Registers in 64-Bit Mode */
struct gpr64_regs {
@@ -391,9 +394,13 @@ struct ex_regs {
void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
-void vm_handle_exception(struct kvm_vm *vm, int vector,
+void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *));
+uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr);
+void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
+ uint64_t pte);
+
/*
* set_cpuid() - overwrites a matching cpuid entry with the provided value.
* matches based on ent->function && ent->index. returns true
@@ -410,6 +417,14 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
+enum x86_page_size {
+ X86_PAGE_SIZE_4K = 0,
+ X86_PAGE_SIZE_2M,
+ X86_PAGE_SIZE_1G,
+};
+void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ enum x86_page_size page_size);
+
/*
* Basic CPU control in CR0
*/
@@ -425,53 +440,6 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui
#define X86_CR0_CD (1UL<<30) /* Cache Disable */
#define X86_CR0_PG (1UL<<31) /* Paging */
-#define APIC_DEFAULT_GPA 0xfee00000ULL
-
-/* APIC base address MSR and fields */
-#define MSR_IA32_APICBASE 0x0000001b
-#define MSR_IA32_APICBASE_BSP (1<<8)
-#define MSR_IA32_APICBASE_EXTD (1<<10)
-#define MSR_IA32_APICBASE_ENABLE (1<<11)
-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-#define GET_APIC_BASE(x) (((x) >> 12) << 12)
-
-#define APIC_BASE_MSR 0x800
-#define X2APIC_ENABLE (1UL << 10)
-#define APIC_ID 0x20
-#define APIC_LVR 0x30
-#define GET_APIC_ID_FIELD(x) (((x) >> 24) & 0xFF)
-#define APIC_TASKPRI 0x80
-#define APIC_PROCPRI 0xA0
-#define APIC_EOI 0xB0
-#define APIC_SPIV 0xF0
-#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
-#define APIC_SPIV_APIC_ENABLED (1 << 8)
-#define APIC_ICR 0x300
-#define APIC_DEST_SELF 0x40000
-#define APIC_DEST_ALLINC 0x80000
-#define APIC_DEST_ALLBUT 0xC0000
-#define APIC_ICR_RR_MASK 0x30000
-#define APIC_ICR_RR_INVALID 0x00000
-#define APIC_ICR_RR_INPROG 0x10000
-#define APIC_ICR_RR_VALID 0x20000
-#define APIC_INT_LEVELTRIG 0x08000
-#define APIC_INT_ASSERT 0x04000
-#define APIC_ICR_BUSY 0x01000
-#define APIC_DEST_LOGICAL 0x00800
-#define APIC_DEST_PHYSICAL 0x00000
-#define APIC_DM_FIXED 0x00000
-#define APIC_DM_FIXED_MASK 0x00700
-#define APIC_DM_LOWEST 0x00100
-#define APIC_DM_SMI 0x00200
-#define APIC_DM_REMRD 0x00300
-#define APIC_DM_NMI 0x00400
-#define APIC_DM_INIT 0x00500
-#define APIC_DM_STARTUP 0x00600
-#define APIC_DM_EXTINT 0x00700
-#define APIC_VECTOR_MASK 0x000FF
-#define APIC_ICR2 0x310
-#define SET_APIC_DEST_FIELD(x) ((x) << 24)
-
/* VMX_EPT_VPID_CAP bits */
#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 65eb1079a161..583ceb0d1457 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -10,6 +10,7 @@
#include <stdint.h>
#include "processor.h"
+#include "apic.h"
/*
* Definitions of Primary Processor-Based VM-Execution Controls.
@@ -607,15 +608,13 @@ bool nested_vmx_supported(void);
void nested_vmx_check_supported(void);
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
+ uint64_t nested_paddr, uint64_t paddr);
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, uint64_t size,
- uint32_t eptp_memslot);
+ uint64_t nested_paddr, uint64_t paddr, uint64_t size);
void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint32_t memslot, uint32_t eptp_memslot);
+ uint32_t memslot);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot);
-void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint32_t eptp_memslot);
+void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
#endif /* SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/kvm_binary_stats_test.c b/tools/testing/selftests/kvm/kvm_binary_stats_test.c
new file mode 100644
index 000000000000..5906bbc08483
--- /dev/null
+++ b/tools/testing/selftests/kvm/kvm_binary_stats_test.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kvm_binary_stats_test
+ *
+ * Copyright (C) 2021, Google LLC.
+ *
+ * Test the fd-based interface for KVM statistics.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "asm/kvm.h"
+#include "linux/kvm.h"
+
+static void stats_test(int stats_fd)
+{
+ ssize_t ret;
+ int i;
+ size_t size_desc;
+ size_t size_data = 0;
+ struct kvm_stats_header *header;
+ char *id;
+ struct kvm_stats_desc *stats_desc;
+ u64 *stats_data;
+ struct kvm_stats_desc *pdesc;
+
+ /* Read kvm stats header */
+ header = malloc(sizeof(*header));
+ TEST_ASSERT(header, "Allocate memory for stats header");
+
+ ret = read(stats_fd, header, sizeof(*header));
+ TEST_ASSERT(ret == sizeof(*header), "Read stats header");
+ size_desc = sizeof(*stats_desc) + header->name_size;
+
+ /* Read kvm stats id string */
+ id = malloc(header->name_size);
+ TEST_ASSERT(id, "Allocate memory for id string");
+ ret = read(stats_fd, id, header->name_size);
+ TEST_ASSERT(ret == header->name_size, "Read id string");
+
+ /* Check id string, that should start with "kvm" */
+ TEST_ASSERT(!strncmp(id, "kvm", 3) && strlen(id) < header->name_size,
+ "Invalid KVM stats type, id: %s", id);
+
+ /* Sanity check for other fields in header */
+ if (header->num_desc == 0) {
+ printf("No KVM stats defined!");
+ return;
+ }
+ /* Check overlap */
+ TEST_ASSERT(header->desc_offset > 0 && header->data_offset > 0
+ && header->desc_offset >= sizeof(*header)
+ && header->data_offset >= sizeof(*header),
+ "Invalid offset fields in header");
+ TEST_ASSERT(header->desc_offset > header->data_offset ||
+ (header->desc_offset + size_desc * header->num_desc <=
+ header->data_offset),
+ "Descriptor block is overlapped with data block");
+
+ /* Allocate memory for stats descriptors */
+ stats_desc = calloc(header->num_desc, size_desc);
+ TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
+ /* Read kvm stats descriptors */
+ ret = pread(stats_fd, stats_desc,
+ size_desc * header->num_desc, header->desc_offset);
+ TEST_ASSERT(ret == size_desc * header->num_desc,
+ "Read KVM stats descriptors");
+
+ /* Sanity check for fields in descriptors */
+ for (i = 0; i < header->num_desc; ++i) {
+ pdesc = (void *)stats_desc + i * size_desc;
+ /* Check type,unit,base boundaries */
+ TEST_ASSERT((pdesc->flags & KVM_STATS_TYPE_MASK)
+ <= KVM_STATS_TYPE_MAX, "Unknown KVM stats type");
+ TEST_ASSERT((pdesc->flags & KVM_STATS_UNIT_MASK)
+ <= KVM_STATS_UNIT_MAX, "Unknown KVM stats unit");
+ TEST_ASSERT((pdesc->flags & KVM_STATS_BASE_MASK)
+ <= KVM_STATS_BASE_MAX, "Unknown KVM stats base");
+ /* Check exponent for stats unit
+ * Exponent for counter should be greater than or equal to 0
+ * Exponent for unit bytes should be greater than or equal to 0
+ * Exponent for unit seconds should be less than or equal to 0
+ * Exponent for unit clock cycles should be greater than or
+ * equal to 0
+ */
+ switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
+ case KVM_STATS_UNIT_NONE:
+ case KVM_STATS_UNIT_BYTES:
+ case KVM_STATS_UNIT_CYCLES:
+ TEST_ASSERT(pdesc->exponent >= 0,
+ "Unsupported KVM stats unit");
+ break;
+ case KVM_STATS_UNIT_SECONDS:
+ TEST_ASSERT(pdesc->exponent <= 0,
+ "Unsupported KVM stats unit");
+ break;
+ }
+ /* Check name string */
+ TEST_ASSERT(strlen(pdesc->name) < header->name_size,
+ "KVM stats name(%s) too long", pdesc->name);
+ /* Check size field, which should not be zero */
+ TEST_ASSERT(pdesc->size, "KVM descriptor(%s) with size of 0",
+ pdesc->name);
+ size_data += pdesc->size * sizeof(*stats_data);
+ }
+ /* Check overlap */
+ TEST_ASSERT(header->data_offset >= header->desc_offset
+ || header->data_offset + size_data <= header->desc_offset,
+ "Data block is overlapped with Descriptor block");
+ /* Check validity of all stats data size */
+ TEST_ASSERT(size_data >= header->num_desc * sizeof(*stats_data),
+ "Data size is not correct");
+ /* Check stats offset */
+ for (i = 0; i < header->num_desc; ++i) {
+ pdesc = (void *)stats_desc + i * size_desc;
+ TEST_ASSERT(pdesc->offset < size_data,
+ "Invalid offset (%u) for stats: %s",
+ pdesc->offset, pdesc->name);
+ }
+
+ /* Allocate memory for stats data */
+ stats_data = malloc(size_data);
+ TEST_ASSERT(stats_data, "Allocate memory for stats data");
+ /* Read kvm stats data as a bulk */
+ ret = pread(stats_fd, stats_data, size_data, header->data_offset);
+ TEST_ASSERT(ret == size_data, "Read KVM stats data");
+ /* Read kvm stats data one by one */
+ size_data = 0;
+ for (i = 0; i < header->num_desc; ++i) {
+ pdesc = (void *)stats_desc + i * size_desc;
+ ret = pread(stats_fd, stats_data,
+ pdesc->size * sizeof(*stats_data),
+ header->data_offset + size_data);
+ TEST_ASSERT(ret == pdesc->size * sizeof(*stats_data),
+ "Read data of KVM stats: %s", pdesc->name);
+ size_data += pdesc->size * sizeof(*stats_data);
+ }
+
+ free(stats_data);
+ free(stats_desc);
+ free(id);
+ free(header);
+}
+
+
+static void vm_stats_test(struct kvm_vm *vm)
+{
+ int stats_fd;
+
+ /* Get fd for VM stats */
+ stats_fd = vm_get_stats_fd(vm);
+ TEST_ASSERT(stats_fd >= 0, "Get VM stats fd");
+
+ stats_test(stats_fd);
+ close(stats_fd);
+ TEST_ASSERT(fcntl(stats_fd, F_GETFD) == -1, "Stats fd not freed");
+}
+
+static void vcpu_stats_test(struct kvm_vm *vm, int vcpu_id)
+{
+ int stats_fd;
+
+ /* Get fd for VCPU stats */
+ stats_fd = vcpu_get_stats_fd(vm, vcpu_id);
+ TEST_ASSERT(stats_fd >= 0, "Get VCPU stats fd");
+
+ stats_test(stats_fd);
+ close(stats_fd);
+ TEST_ASSERT(fcntl(stats_fd, F_GETFD) == -1, "Stats fd not freed");
+}
+
+#define DEFAULT_NUM_VM 4
+#define DEFAULT_NUM_VCPU 4
+
+/*
+ * Usage: kvm_bin_form_stats [#vm] [#vcpu]
+ * The first parameter #vm set the number of VMs being created.
+ * The second parameter #vcpu set the number of VCPUs being created.
+ * By default, DEFAULT_NUM_VM VM and DEFAULT_NUM_VCPU VCPU for the VM would be
+ * created for testing.
+ */
+
+int main(int argc, char *argv[])
+{
+ int i, j;
+ struct kvm_vm **vms;
+ int max_vm = DEFAULT_NUM_VM;
+ int max_vcpu = DEFAULT_NUM_VCPU;
+
+ /* Get the number of VMs and VCPUs that would be created for testing. */
+ if (argc > 1) {
+ max_vm = strtol(argv[1], NULL, 0);
+ if (max_vm <= 0)
+ max_vm = DEFAULT_NUM_VM;
+ }
+ if (argc > 2) {
+ max_vcpu = strtol(argv[2], NULL, 0);
+ if (max_vcpu <= 0)
+ max_vcpu = DEFAULT_NUM_VCPU;
+ }
+
+ /* Check the extension for binary stats */
+ if (kvm_check_cap(KVM_CAP_BINARY_STATS_FD) <= 0) {
+ print_skip("Binary form statistics interface is not supported");
+ exit(KSFT_SKIP);
+ }
+
+ /* Create VMs and VCPUs */
+ vms = malloc(sizeof(vms[0]) * max_vm);
+ TEST_ASSERT(vms, "Allocate memory for storing VM pointers");
+ for (i = 0; i < max_vm; ++i) {
+ vms[i] = vm_create(VM_MODE_DEFAULT,
+ DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+ for (j = 0; j < max_vcpu; ++j)
+ vm_vcpu_add(vms[i], j);
+ }
+
+ /* Check stats read for every VM and VCPU */
+ for (i = 0; i < max_vm; ++i) {
+ vm_stats_test(vms[i]);
+ for (j = 0; j < max_vcpu; ++j)
+ vcpu_stats_test(vms[i], j);
+ }
+
+ for (i = 0; i < max_vm; ++i)
+ kvm_vm_free(vms[i]);
+ free(vms);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 1c4753fff19e..0d04a7db7f24 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -268,7 +268,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
/* Create a VM with enough guest pages */
guest_num_pages = test_mem_size / guest_page_size;
- vm = vm_create_with_vcpus(mode, nr_vcpus,
+ vm = vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
guest_num_pages, 0, guest_code, NULL);
/* Align down GPA of the testing memslot */
@@ -303,7 +303,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
TEST_MEM_SLOT_INDEX, guest_num_pages, 0);
/* Do mapping(GVA->GPA) for the testing memory slot */
- virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/handlers.S b/tools/testing/selftests/kvm/lib/aarch64/handlers.S
new file mode 100644
index 000000000000..0e443eadfac6
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/handlers.S
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+.macro save_registers
+ add sp, sp, #-16 * 17
+
+ stp x0, x1, [sp, #16 * 0]
+ stp x2, x3, [sp, #16 * 1]
+ stp x4, x5, [sp, #16 * 2]
+ stp x6, x7, [sp, #16 * 3]
+ stp x8, x9, [sp, #16 * 4]
+ stp x10, x11, [sp, #16 * 5]
+ stp x12, x13, [sp, #16 * 6]
+ stp x14, x15, [sp, #16 * 7]
+ stp x16, x17, [sp, #16 * 8]
+ stp x18, x19, [sp, #16 * 9]
+ stp x20, x21, [sp, #16 * 10]
+ stp x22, x23, [sp, #16 * 11]
+ stp x24, x25, [sp, #16 * 12]
+ stp x26, x27, [sp, #16 * 13]
+ stp x28, x29, [sp, #16 * 14]
+
+ /*
+ * This stores sp_el1 into ex_regs.sp so exception handlers can "look"
+ * at it. It will _not_ be used to restore the sp on return from the
+ * exception so handlers can not update it.
+ */
+ add x1, sp, #16 * 17
+ stp x30, x1, [sp, #16 * 15] /* x30, SP */
+
+ mrs x1, elr_el1
+ mrs x2, spsr_el1
+ stp x1, x2, [sp, #16 * 16] /* PC, PSTATE */
+.endm
+
+.macro restore_registers
+ ldp x1, x2, [sp, #16 * 16] /* PC, PSTATE */
+ msr elr_el1, x1
+ msr spsr_el1, x2
+
+ /* sp is not restored */
+ ldp x30, xzr, [sp, #16 * 15] /* x30, SP */
+
+ ldp x28, x29, [sp, #16 * 14]
+ ldp x26, x27, [sp, #16 * 13]
+ ldp x24, x25, [sp, #16 * 12]
+ ldp x22, x23, [sp, #16 * 11]
+ ldp x20, x21, [sp, #16 * 10]
+ ldp x18, x19, [sp, #16 * 9]
+ ldp x16, x17, [sp, #16 * 8]
+ ldp x14, x15, [sp, #16 * 7]
+ ldp x12, x13, [sp, #16 * 6]
+ ldp x10, x11, [sp, #16 * 5]
+ ldp x8, x9, [sp, #16 * 4]
+ ldp x6, x7, [sp, #16 * 3]
+ ldp x4, x5, [sp, #16 * 2]
+ ldp x2, x3, [sp, #16 * 1]
+ ldp x0, x1, [sp, #16 * 0]
+
+ add sp, sp, #16 * 17
+
+ eret
+.endm
+
+.pushsection ".entry.text", "ax"
+.balign 0x800
+.global vectors
+vectors:
+.popsection
+
+.set vector, 0
+
+/*
+ * Build an exception handler for vector and append a jump to it into
+ * vectors (while making sure that it's 0x80 aligned).
+ */
+.macro HANDLER, label
+handler_\label:
+ save_registers
+ mov x0, sp
+ mov x1, #vector
+ bl route_exception
+ restore_registers
+
+.pushsection ".entry.text", "ax"
+.balign 0x80
+ b handler_\label
+.popsection
+
+.set vector, vector + 1
+.endm
+
+.macro HANDLER_INVALID
+.pushsection ".entry.text", "ax"
+.balign 0x80
+/* This will abort so no need to save and restore registers. */
+ mov x0, #vector
+ mov x1, #0 /* ec */
+ mov x2, #0 /* valid_ec */
+ b kvm_exit_unexpected_exception
+.popsection
+
+.set vector, vector + 1
+.endm
+
+/*
+ * Caution: be sure to not add anything between the declaration of vectors
+ * above and these macro calls that will build the vectors table below it.
+ */
+ HANDLER_INVALID // Synchronous EL1t
+ HANDLER_INVALID // IRQ EL1t
+ HANDLER_INVALID // FIQ EL1t
+ HANDLER_INVALID // Error EL1t
+
+ HANDLER el1h_sync // Synchronous EL1h
+ HANDLER el1h_irq // IRQ EL1h
+ HANDLER el1h_fiq // FIQ EL1h
+ HANDLER el1h_error // Error EL1h
+
+ HANDLER el0_sync_64 // Synchronous 64-bit EL0
+ HANDLER el0_irq_64 // IRQ 64-bit EL0
+ HANDLER el0_fiq_64 // FIQ 64-bit EL0
+ HANDLER el0_error_64 // Error 64-bit EL0
+
+ HANDLER el0_sync_32 // Synchronous 32-bit EL0
+ HANDLER el0_irq_32 // IRQ 32-bit EL0
+ HANDLER el0_fiq_32 // FIQ 32-bit EL0
+ HANDLER el0_error_32 // Error 32-bit EL0
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index cee92d477dc0..9f49f6caafe5 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -6,14 +6,16 @@
*/
#include <linux/compiler.h>
+#include <assert.h>
#include "kvm_util.h"
#include "../kvm_util_internal.h"
#include "processor.h"
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
+static vm_vaddr_t exception_handlers;
+
static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
{
return (v + vm->page_size) & ~(vm->page_size - 1);
@@ -72,19 +74,19 @@ static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
return 1 << (vm->page_shift - 3);
}
-void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
+void virt_pgd_alloc(struct kvm_vm *vm)
{
if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_pages_alloc(vm,
page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
vm->pgd = paddr;
vm->pgd_created = true;
}
}
-void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint32_t pgd_memslot, uint64_t flags)
+static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ uint64_t flags)
{
uint8_t attr_idx = flags & 7;
uint64_t *ptep;
@@ -104,25 +106,19 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
paddr, vm->max_gfn, vm->page_size);
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
- if (!*ptep) {
- *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
- *ptep |= 3;
- }
+ if (!*ptep)
+ *ptep = vm_alloc_page_table(vm) | 3;
switch (vm->pgtable_levels) {
case 4:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
- if (!*ptep) {
- *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
- *ptep |= 3;
- }
+ if (!*ptep)
+ *ptep = vm_alloc_page_table(vm) | 3;
/* fall through */
case 3:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
- if (!*ptep) {
- *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
- *ptep |= 3;
- }
+ if (!*ptep)
+ *ptep = vm_alloc_page_table(vm) | 3;
/* fall through */
case 2:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
@@ -135,12 +131,11 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
*ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
}
-void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint32_t pgd_memslot)
+void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
- _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx);
+ _virt_pg_map(vm, vaddr, paddr, attr_idx);
}
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
@@ -302,7 +297,7 @@ void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
- DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0);
+ DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
vm_vcpu_add(vm, vcpuid);
aarch64_vcpu_setup(vm, vcpuid, init);
@@ -334,6 +329,100 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
va_end(ap);
}
+void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
+{
+ ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
+ while (1)
+ ;
+}
+
void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
{
+ struct ucall uc;
+
+ if (get_ucall(vm, vcpuid, &uc) != UCALL_UNHANDLED)
+ return;
+
+ if (uc.args[2]) /* valid_ec */ {
+ assert(VECTOR_IS_SYNC(uc.args[0]));
+ TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
+ uc.args[0], uc.args[1]);
+ } else {
+ assert(!VECTOR_IS_SYNC(uc.args[0]));
+ TEST_FAIL("Unexpected exception (vector:0x%lx)",
+ uc.args[0]);
+ }
+}
+
+struct handlers {
+ handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
+};
+
+void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ extern char vectors;
+
+ set_reg(vm, vcpuid, ARM64_SYS_REG(VBAR_EL1), (uint64_t)&vectors);
+}
+
+void route_exception(struct ex_regs *regs, int vector)
+{
+ struct handlers *handlers = (struct handlers *)exception_handlers;
+ bool valid_ec;
+ int ec = 0;
+
+ switch (vector) {
+ case VECTOR_SYNC_CURRENT:
+ case VECTOR_SYNC_LOWER_64:
+ ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
+ valid_ec = true;
+ break;
+ case VECTOR_IRQ_CURRENT:
+ case VECTOR_IRQ_LOWER_64:
+ case VECTOR_FIQ_CURRENT:
+ case VECTOR_FIQ_LOWER_64:
+ case VECTOR_ERROR_CURRENT:
+ case VECTOR_ERROR_LOWER_64:
+ ec = 0;
+ valid_ec = false;
+ break;
+ default:
+ valid_ec = false;
+ goto unexpected_exception;
+ }
+
+ if (handlers && handlers->exception_handlers[vector][ec])
+ return handlers->exception_handlers[vector][ec](regs);
+
+unexpected_exception:
+ kvm_exit_unexpected_exception(vector, ec, valid_ec);
+}
+
+void vm_init_descriptor_tables(struct kvm_vm *vm)
+{
+ vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
+ vm->page_size, 0, 0);
+
+ *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+}
+
+void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
+ void (*handler)(struct ex_regs *))
+{
+ struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
+
+ assert(VECTOR_IS_SYNC(vector));
+ assert(vector < VECTOR_NUM);
+ assert(ec < ESR_EC_NUM);
+ handlers->exception_handlers[vector][ec] = handler;
+}
+
+void vm_install_exception_handler(struct kvm_vm *vm, int vector,
+ void (*handler)(struct ex_regs *))
+{
+ struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
+
+ assert(!VECTOR_IS_SYNC(vector));
+ assert(vector < VECTOR_NUM);
+ handlers->exception_handlers[vector][0] = handler;
}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index 2f37b90ee1a9..e0b0164e9af8 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -14,7 +14,7 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
return false;
- virt_pg_map(vm, gpa, gpa, 0);
+ virt_pg_map(vm, gpa, gpa);
ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
sync_global_to_guest(vm, ucall_exit_mmio_addr);
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index bc75a91e00a6..eac44f5d0db0 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -111,8 +111,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
* by the image and it needs to have sufficient available physical pages, to
* back the virtual pages used to load the image.
*/
-void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
- uint32_t data_memslot, uint32_t pgd_memslot)
+void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
{
off_t offset, offset_rv;
Elf64_Ehdr hdr;
@@ -164,8 +163,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1;
- vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart,
- data_memslot, pgd_memslot);
+ vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart);
TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
"virtual memory for segment at requested min addr,\n"
" segment idx: %u\n"
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index fc83f6c5902d..5b56b57b3c20 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -32,6 +32,34 @@ static void *align(void *x, size_t size)
}
/*
+ * Open KVM_DEV_PATH if available, otherwise exit the entire program.
+ *
+ * Input Args:
+ * flags - The flags to pass when opening KVM_DEV_PATH.
+ *
+ * Return:
+ * The opened file descriptor of /dev/kvm.
+ */
+static int _open_kvm_dev_path_or_exit(int flags)
+{
+ int fd;
+
+ fd = open(KVM_DEV_PATH, flags);
+ if (fd < 0) {
+ print_skip("%s not available, is KVM loaded? (errno: %d)",
+ KVM_DEV_PATH, errno);
+ exit(KSFT_SKIP);
+ }
+
+ return fd;
+}
+
+int open_kvm_dev_path_or_exit(void)
+{
+ return _open_kvm_dev_path_or_exit(O_RDONLY);
+}
+
+/*
* Capability
*
* Input Args:
@@ -52,12 +80,9 @@ int kvm_check_cap(long cap)
int ret;
int kvm_fd;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
-
+ kvm_fd = open_kvm_dev_path_or_exit();
ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
- TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
+ TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
" rc: %i errno: %i", ret, errno);
close(kvm_fd);
@@ -128,9 +153,7 @@ void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
static void vm_open(struct kvm_vm *vm, int perm)
{
- vm->kvm_fd = open(KVM_DEV_PATH, perm);
- if (vm->kvm_fd < 0)
- exit(KSFT_SKIP);
+ vm->kvm_fd = _open_kvm_dev_path_or_exit(perm);
if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
print_skip("immediate_exit not available");
@@ -152,6 +175,7 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
+ [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
};
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
@@ -169,6 +193,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 40, 48, 0x1000, 12 },
{ 40, 48, 0x10000, 16 },
{ 0, 0, 0x1000, 12 },
+ { 47, 64, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
@@ -203,7 +228,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
TEST_ASSERT(vm != NULL, "Insufficient Memory");
INIT_LIST_HEAD(&vm->vcpus);
- INIT_LIST_HEAD(&vm->userspace_mem_regions);
+ vm->regions.gpa_tree = RB_ROOT;
+ vm->regions.hva_tree = RB_ROOT;
+ hash_init(vm->regions.slot_hash);
vm->mode = mode;
vm->type = 0;
@@ -252,6 +279,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
#endif
break;
+ case VM_MODE_P47V64_4K:
+ vm->pgtable_levels = 5;
+ break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
}
@@ -283,21 +313,50 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
+/*
+ * VM Create with customized parameters
+ *
+ * Input Args:
+ * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
+ * nr_vcpus - VCPU count
+ * slot0_mem_pages - Slot0 physical memory size
+ * extra_mem_pages - Non-slot0 physical memory total size
+ * num_percpu_pages - Per-cpu physical memory pages
+ * guest_code - Guest entry point
+ * vcpuids - VCPU IDs
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Pointer to opaque structure that describes the created VM.
+ *
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K),
+ * with customized slot0 memory size, at least 512 pages currently.
+ * extra_mem_pages is only used to calculate the maximum page table size,
+ * no real memory allocation for non-slot0 memory in this function.
+ */
struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
- uint64_t extra_mem_pages, uint32_t num_percpu_pages,
- void *guest_code, uint32_t vcpuids[])
+ uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[])
{
+ uint64_t vcpu_pages, extra_pg_pages, pages;
+ struct kvm_vm *vm;
+ int i;
+
+ /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
+ if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
+ slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
+
/* The maximum page table size for a memory region will be when the
* smallest pages are used. Considering each page contains x page
* table descriptors, the total extra size for page tables (for extra
* N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
* than N/x*2.
*/
- uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
- uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
- uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
- struct kvm_vm *vm;
- int i;
+ vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+ extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+ pages = slot0_mem_pages + vcpu_pages + extra_pg_pages;
TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"nr_vcpus = %d too large for host, max-vcpus = %d",
@@ -306,7 +365,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
pages = vm_adjust_num_guest_pages(mode, pages);
vm = vm_create(mode, pages, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+ kvm_vm_elf_load(vm, program_invocation_name);
#ifdef __x86_64__
vm_create_irqchip(vm);
@@ -316,10 +375,6 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
vm_vcpu_add_default(vm, vcpuid, guest_code);
-
-#ifdef __x86_64__
- vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
-#endif
}
return vm;
@@ -329,8 +384,8 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
uint32_t num_percpu_pages, void *guest_code,
uint32_t vcpuids[])
{
- return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
- num_percpu_pages, guest_code, vcpuids);
+ return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
+ extra_mem_pages, num_percpu_pages, guest_code, vcpuids);
}
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
@@ -355,13 +410,14 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
*/
void kvm_vm_restart(struct kvm_vm *vmp, int perm)
{
+ int ctr;
struct userspace_mem_region *region;
vm_open(vmp, perm);
if (vmp->has_irqchip)
vm_create_irqchip(vmp);
- list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
+ hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
" rc: %i errno: %i\n"
@@ -424,14 +480,21 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
static struct userspace_mem_region *
userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
{
- struct userspace_mem_region *region;
+ struct rb_node *node;
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+ for (node = vm->regions.gpa_tree.rb_node; node; ) {
+ struct userspace_mem_region *region =
+ container_of(node, struct userspace_mem_region, gpa_node);
uint64_t existing_start = region->region.guest_phys_addr;
uint64_t existing_end = region->region.guest_phys_addr
+ region->region.memory_size - 1;
if (start <= existing_end && end >= existing_start)
return region;
+
+ if (start < existing_start)
+ node = node->rb_left;
+ else
+ node = node->rb_right;
}
return NULL;
@@ -546,11 +609,16 @@ void kvm_vm_release(struct kvm_vm *vmp)
}
static void __vm_mem_region_delete(struct kvm_vm *vm,
- struct userspace_mem_region *region)
+ struct userspace_mem_region *region,
+ bool unlink)
{
int ret;
- list_del(&region->list);
+ if (unlink) {
+ rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
+ rb_erase(&region->hva_node, &vm->regions.hva_tree);
+ hash_del(&region->slot_node);
+ }
region->region.memory_size = 0;
ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
@@ -569,14 +637,16 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
*/
void kvm_vm_free(struct kvm_vm *vmp)
{
- struct userspace_mem_region *region, *tmp;
+ int ctr;
+ struct hlist_node *node;
+ struct userspace_mem_region *region;
if (vmp == NULL)
return;
/* Free userspace_mem_regions. */
- list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list)
- __vm_mem_region_delete(vmp, region);
+ hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
+ __vm_mem_region_delete(vmp, region, false);
/* Free sparsebit arrays. */
sparsebit_free(&vmp->vpages_valid);
@@ -658,13 +728,64 @@ int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
return 0;
}
+static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
+ struct userspace_mem_region *region)
+{
+ struct rb_node **cur, *parent;
+
+ for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
+ struct userspace_mem_region *cregion;
+
+ cregion = container_of(*cur, typeof(*cregion), gpa_node);
+ parent = *cur;
+ if (region->region.guest_phys_addr <
+ cregion->region.guest_phys_addr)
+ cur = &(*cur)->rb_left;
+ else {
+ TEST_ASSERT(region->region.guest_phys_addr !=
+ cregion->region.guest_phys_addr,
+ "Duplicate GPA in region tree");
+
+ cur = &(*cur)->rb_right;
+ }
+ }
+
+ rb_link_node(&region->gpa_node, parent, cur);
+ rb_insert_color(&region->gpa_node, gpa_tree);
+}
+
+static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
+ struct userspace_mem_region *region)
+{
+ struct rb_node **cur, *parent;
+
+ for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
+ struct userspace_mem_region *cregion;
+
+ cregion = container_of(*cur, typeof(*cregion), hva_node);
+ parent = *cur;
+ if (region->host_mem < cregion->host_mem)
+ cur = &(*cur)->rb_left;
+ else {
+ TEST_ASSERT(region->host_mem !=
+ cregion->host_mem,
+ "Duplicate HVA in region tree");
+
+ cur = &(*cur)->rb_right;
+ }
+ }
+
+ rb_link_node(&region->hva_node, parent, cur);
+ rb_insert_color(&region->hva_node, hva_tree);
+}
+
/*
* VM Userspace Memory Region Add
*
* Input Args:
* vm - Virtual Machine
- * backing_src - Storage source for this region.
- * NULL to use anonymous memory.
+ * src_type - Storage source for this region.
+ * NULL to use anonymous memory.
* guest_paddr - Starting guest physical address
* slot - KVM region slot
* npages - Number of physical pages
@@ -722,7 +843,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
(uint64_t) region->region.memory_size);
/* Confirm no region with the requested slot already exists. */
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+ hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+ slot) {
if (region->region.slot != slot)
continue;
@@ -755,11 +877,30 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
if (alignment > 1)
region->mmap_size += alignment;
+ region->fd = -1;
+ if (backing_src_is_shared(src_type)) {
+ int memfd_flags = MFD_CLOEXEC;
+
+ if (src_type == VM_MEM_SRC_SHARED_HUGETLB)
+ memfd_flags |= MFD_HUGETLB;
+
+ region->fd = memfd_create("kvm_selftest", memfd_flags);
+ TEST_ASSERT(region->fd != -1,
+ "memfd_create failed, errno: %i", errno);
+
+ ret = ftruncate(region->fd, region->mmap_size);
+ TEST_ASSERT(ret == 0, "ftruncate failed, errno: %i", errno);
+
+ ret = fallocate(region->fd,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0,
+ region->mmap_size);
+ TEST_ASSERT(ret == 0, "fallocate failed, errno: %i", errno);
+ }
+
region->mmap_start = mmap(NULL, region->mmap_size,
PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS
- | vm_mem_backing_src_alias(src_type)->flag,
- -1, 0);
+ vm_mem_backing_src_alias(src_type)->flag,
+ region->fd, 0);
TEST_ASSERT(region->mmap_start != MAP_FAILED,
"test_malloc failed, mmap_start: %p errno: %i",
region->mmap_start, errno);
@@ -793,8 +934,23 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
ret, errno, slot, flags,
guest_paddr, (uint64_t) region->region.memory_size);
- /* Add to linked-list of memory regions. */
- list_add(&region->list, &vm->userspace_mem_regions);
+ /* Add to quick lookup data structures */
+ vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
+ vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
+ hash_add(vm->regions.slot_hash, &region->slot_node, slot);
+
+ /* If shared memory, create an alias. */
+ if (region->fd >= 0) {
+ region->mmap_alias = mmap(NULL, region->mmap_size,
+ PROT_READ | PROT_WRITE,
+ vm_mem_backing_src_alias(src_type)->flag,
+ region->fd, 0);
+ TEST_ASSERT(region->mmap_alias != MAP_FAILED,
+ "mmap of alias failed, errno: %i", errno);
+
+ /* Align host alias address */
+ region->host_alias = align(region->mmap_alias, alignment);
+ }
}
/*
@@ -817,10 +973,10 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
{
struct userspace_mem_region *region;
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+ hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+ memslot)
if (region->region.slot == memslot)
return region;
- }
fprintf(stderr, "No mem region with the requested slot found,\n"
" requested slot: %u\n", memslot);
@@ -905,7 +1061,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
*/
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
{
- __vm_mem_region_delete(vm, memslot2region(vm, slot));
+ __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
}
/*
@@ -925,9 +1081,7 @@ static int vcpu_mmap_sz(void)
{
int dev_fd, ret;
- dev_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (dev_fd < 0)
- exit(KSFT_SKIP);
+ dev_fd = open_kvm_dev_path_or_exit();
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
TEST_ASSERT(ret >= sizeof(struct kvm_run),
@@ -1093,12 +1247,13 @@ va_found:
* a unique set of pages, with the minimum real allocation being at least
* a page.
*/
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- uint32_t data_memslot, uint32_t pgd_memslot)
+vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
{
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
- virt_pgd_alloc(vm, pgd_memslot);
+ virt_pgd_alloc(vm);
+ vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
+ KVM_UTIL_MIN_PFN * vm->page_size, 0);
/*
* Find an unused range of virtual page addresses of at least
@@ -1108,13 +1263,9 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
/* Map the virtual pages. */
for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
- pages--, vaddr += vm->page_size) {
- vm_paddr_t paddr;
-
- paddr = vm_phy_page_alloc(vm,
- KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
+ pages--, vaddr += vm->page_size, paddr += vm->page_size) {
- virt_pg_map(vm, vaddr, paddr, pgd_memslot);
+ virt_pg_map(vm, vaddr, paddr);
sparsebit_set(vm->vpages_mapped,
vaddr >> vm->page_shift);
@@ -1124,6 +1275,44 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
}
/*
+ * VM Virtual Address Allocate Pages
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Starting guest virtual address
+ *
+ * Allocates at least N system pages worth of bytes within the virtual address
+ * space of the vm.
+ */
+vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
+{
+ return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
+}
+
+/*
+ * VM Virtual Address Allocate Page
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Starting guest virtual address
+ *
+ * Allocates at least one system page worth of bytes within the virtual address
+ * space of the vm.
+ */
+vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
+{
+ return vm_vaddr_alloc_pages(vm, 1);
+}
+
+/*
* Map a range of VM virtual address to the VM's physical address
*
* Input Args:
@@ -1141,7 +1330,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
* @npages starting at @vaddr to the page range starting at @paddr.
*/
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- unsigned int npages, uint32_t pgd_memslot)
+ unsigned int npages)
{
size_t page_size = vm->page_size;
size_t size = npages * page_size;
@@ -1150,7 +1339,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) {
- virt_pg_map(vm, vaddr, paddr, pgd_memslot);
+ virt_pg_map(vm, vaddr, paddr);
vaddr += page_size;
paddr += page_size;
}
@@ -1177,16 +1366,14 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
{
struct userspace_mem_region *region;
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
- if ((gpa >= region->region.guest_phys_addr)
- && (gpa <= (region->region.guest_phys_addr
- + region->region.memory_size - 1)))
- return (void *) ((uintptr_t) region->host_mem
- + (gpa - region->region.guest_phys_addr));
+ region = userspace_mem_region_find(vm, gpa, gpa);
+ if (!region) {
+ TEST_FAIL("No vm physical memory at 0x%lx", gpa);
+ return NULL;
}
- TEST_FAIL("No vm physical memory at 0x%lx", gpa);
- return NULL;
+ return (void *)((uintptr_t)region->host_mem
+ + (gpa - region->region.guest_phys_addr));
}
/*
@@ -1208,15 +1395,22 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
*/
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
{
- struct userspace_mem_region *region;
+ struct rb_node *node;
+
+ for (node = vm->regions.hva_tree.rb_node; node; ) {
+ struct userspace_mem_region *region =
+ container_of(node, struct userspace_mem_region, hva_node);
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
- if ((hva >= region->host_mem)
- && (hva <= (region->host_mem
- + region->region.memory_size - 1)))
- return (vm_paddr_t) ((uintptr_t)
- region->region.guest_phys_addr
- + (hva - (uintptr_t) region->host_mem));
+ if (hva >= region->host_mem) {
+ if (hva <= (region->host_mem
+ + region->region.memory_size - 1))
+ return (vm_paddr_t)((uintptr_t)
+ region->region.guest_phys_addr
+ + (hva - (uintptr_t)region->host_mem));
+
+ node = node->rb_right;
+ } else
+ node = node->rb_left;
}
TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
@@ -1224,6 +1418,42 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
}
/*
+ * Address VM physical to Host Virtual *alias*.
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * gpa - VM physical address
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Equivalent address within the host virtual *alias* area, or NULL
+ * (without failing the test) if the guest memory is not shared (so
+ * no alias exists).
+ *
+ * When vm_create() and related functions are called with a shared memory
+ * src_type, we also create a writable, shared alias mapping of the
+ * underlying guest memory. This allows the host to manipulate guest memory
+ * without mapping that memory in the guest's address space. And, for
+ * userfaultfd-based demand paging, we can do so without triggering userfaults.
+ */
+void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
+{
+ struct userspace_mem_region *region;
+ uintptr_t offset;
+
+ region = userspace_mem_region_find(vm, gpa, gpa);
+ if (!region)
+ return NULL;
+
+ if (!region->host_alias)
+ return NULL;
+
+ offset = gpa - region->region.guest_phys_addr;
+ return (void *) ((uintptr_t) region->host_alias + offset);
+}
+
+/*
* VM Create IRQ Chip
*
* Input Args:
@@ -1822,6 +2052,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
*/
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
+ int ctr;
struct userspace_mem_region *region;
struct vcpu *vcpu;
@@ -1829,7 +2060,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
fprintf(stream, "%*sMem Regions:\n", indent, "");
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+ hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
"host_virt: %p\n", indent + 2, "",
(uint64_t) region->region.guest_phys_addr,
@@ -1978,6 +2209,14 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
}
+/* Arbitrary minimum physical address used for virtual translation tables. */
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
+
+vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
+{
+ return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+}
+
/*
* Address Guest Virtual to Host Virtual
*
@@ -2015,10 +2254,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm)
if (vm == NULL) {
/* Ensure that the KVM vendor-specific module is loaded. */
- f = fopen(KVM_DEV_PATH, "r");
- TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
- errno);
- fclose(f);
+ close(open_kvm_dev_path_or_exit());
}
f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
@@ -2041,7 +2277,7 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
return vm->page_shift;
}
-unsigned int vm_get_max_gfn(struct kvm_vm *vm)
+uint64_t vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
}
@@ -2090,3 +2326,15 @@ unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
return vm_adjust_num_guest_pages(mode, n);
}
+
+int vm_get_stats_fd(struct kvm_vm *vm)
+{
+ return ioctl(vm->fd, KVM_GET_STATS_FD, NULL);
+}
+
+int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+
+ return ioctl(vcpu->fd, KVM_GET_STATS_FD, NULL);
+}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 91ce1b5d480b..a03febc24ba6 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -8,6 +8,9 @@
#ifndef SELFTEST_KVM_UTIL_INTERNAL_H
#define SELFTEST_KVM_UTIL_INTERNAL_H
+#include "linux/hashtable.h"
+#include "linux/rbtree.h"
+
#include "sparsebit.h"
struct userspace_mem_region {
@@ -16,9 +19,13 @@ struct userspace_mem_region {
int fd;
off_t offset;
void *host_mem;
+ void *host_alias;
void *mmap_start;
+ void *mmap_alias;
size_t mmap_size;
- struct list_head list;
+ struct rb_node gpa_node;
+ struct rb_node hva_node;
+ struct hlist_node slot_node;
};
struct vcpu {
@@ -31,6 +38,12 @@ struct vcpu {
uint32_t dirty_gfns_count;
};
+struct userspace_mem_regions {
+ struct rb_root gpa_tree;
+ struct rb_root hva_tree;
+ DECLARE_HASHTABLE(slot_hash, 9);
+};
+
struct kvm_vm {
int mode;
unsigned long type;
@@ -43,7 +56,7 @@ struct kvm_vm {
unsigned int va_bits;
uint64_t max_gfn;
struct list_head vcpus;
- struct list_head userspace_mem_regions;
+ struct userspace_mem_regions regions;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
bool has_irqchip;
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 81490b9b4e32..b488f4aefea8 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2020, Google LLC.
*/
+#include <inttypes.h>
#include "kvm_util.h"
#include "perf_test_util.h"
@@ -68,7 +69,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
"Guest memory size is not guest page size aligned.");
- vm = vm_create_with_vcpus(mode, vcpus,
+ vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
0, guest_code, NULL);
@@ -80,7 +81,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
*/
TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
"Requested more guest memory than address space allows.\n"
- " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+ " guest pages: %" PRIx64 " max gfn: %" PRIx64
+ " vcpus: %d wss: %" PRIx64 "]\n",
guest_num_pages, vm_get_max_gfn(vm), vcpus,
vcpu_memory_bytes);
@@ -99,7 +101,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
guest_num_pages, 0);
/* Do mapping for the demand paging memory slot */
- virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
ucall_init(vm, NULL);
diff --git a/tools/testing/selftests/kvm/lib/rbtree.c b/tools/testing/selftests/kvm/lib/rbtree.c
new file mode 100644
index 000000000000..a703f0194ea3
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/rbtree.c
@@ -0,0 +1 @@
+#include "../../../../lib/rbtree.c"
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index 0152f356c099..f87c7137598e 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -9,11 +9,9 @@
#include "kvm_util.h"
#include "../kvm_util_internal.h"
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
-
#define PAGES_PER_REGION 4
-void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot)
+void virt_pgd_alloc(struct kvm_vm *vm)
{
vm_paddr_t paddr;
@@ -24,7 +22,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot)
return;
paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot);
+ KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
vm->pgd = paddr;
@@ -36,12 +34,12 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot)
* a page table (ri == 4). Returns a suitable region/segment table entry
* which points to the freshly allocated pages.
*/
-static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot)
+static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
{
uint64_t taddr;
taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot);
+ KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
return (taddr & REGION_ENTRY_ORIGIN)
@@ -49,8 +47,7 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot)
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
}
-void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa,
- uint32_t memslot)
+void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
{
int ri, idx;
uint64_t *entry;
@@ -77,7 +74,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa,
for (ri = 1; ri <= 4; ri++) {
idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
if (entry[idx] & REGION_ENTRY_INVALID)
- entry[idx] = virt_alloc_region(vm, ri, memslot);
+ entry[idx] = virt_alloc_region(vm, ri);
entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
}
@@ -170,7 +167,7 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
vm->page_size);
stack_vaddr = vm_vaddr_alloc(vm, stack_size,
- DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
+ DEFAULT_GUEST_STACK_VADDR_MIN);
vm_vcpu_add(vm, vcpuid);
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 63d2bc7d757b..af1031fed97f 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -166,72 +166,89 @@ size_t get_def_hugetlb_pagesz(void)
return 0;
}
+#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
+#define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB)
+
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
{
static const struct vm_mem_backing_src_alias aliases[] = {
[VM_MEM_SRC_ANONYMOUS] = {
.name = "anonymous",
- .flag = 0,
+ .flag = ANON_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_THP] = {
.name = "anonymous_thp",
- .flag = 0,
+ .flag = ANON_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB] = {
.name = "anonymous_hugetlb",
- .flag = MAP_HUGETLB,
+ .flag = ANON_HUGE_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = {
.name = "anonymous_hugetlb_16kb",
- .flag = MAP_HUGETLB | MAP_HUGE_16KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = {
.name = "anonymous_hugetlb_64kb",
- .flag = MAP_HUGETLB | MAP_HUGE_64KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_64KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = {
.name = "anonymous_hugetlb_512kb",
- .flag = MAP_HUGETLB | MAP_HUGE_512KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_512KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = {
.name = "anonymous_hugetlb_1mb",
- .flag = MAP_HUGETLB | MAP_HUGE_1MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_1MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = {
.name = "anonymous_hugetlb_2mb",
- .flag = MAP_HUGETLB | MAP_HUGE_2MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_2MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = {
.name = "anonymous_hugetlb_8mb",
- .flag = MAP_HUGETLB | MAP_HUGE_8MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_8MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = {
.name = "anonymous_hugetlb_16mb",
- .flag = MAP_HUGETLB | MAP_HUGE_16MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = {
.name = "anonymous_hugetlb_32mb",
- .flag = MAP_HUGETLB | MAP_HUGE_32MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_32MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = {
.name = "anonymous_hugetlb_256mb",
- .flag = MAP_HUGETLB | MAP_HUGE_256MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_256MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = {
.name = "anonymous_hugetlb_512mb",
- .flag = MAP_HUGETLB | MAP_HUGE_512MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_512MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = {
.name = "anonymous_hugetlb_1gb",
- .flag = MAP_HUGETLB | MAP_HUGE_1GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_1GB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = {
.name = "anonymous_hugetlb_2gb",
- .flag = MAP_HUGETLB | MAP_HUGE_2GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_2GB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = {
.name = "anonymous_hugetlb_16gb",
- .flag = MAP_HUGETLB | MAP_HUGE_16GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16GB,
+ },
+ [VM_MEM_SRC_SHMEM] = {
+ .name = "shmem",
+ .flag = MAP_SHARED,
+ },
+ [VM_MEM_SRC_SHARED_HUGETLB] = {
+ .name = "shared_hugetlb",
+ /*
+ * No MAP_HUGETLB, we use MFD_HUGETLB instead. Since
+ * we're using "file backed" memory, we need to specify
+ * this when the FD is created, not when the area is
+ * mapped.
+ */
+ .flag = MAP_SHARED,
},
};
_Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES,
@@ -250,10 +267,12 @@ size_t get_backing_src_pagesz(uint32_t i)
switch (i) {
case VM_MEM_SRC_ANONYMOUS:
+ case VM_MEM_SRC_SHMEM:
return getpagesize();
case VM_MEM_SRC_ANONYMOUS_THP:
return get_trans_hugepagesz();
case VM_MEM_SRC_ANONYMOUS_HUGETLB:
+ case VM_MEM_SRC_SHARED_HUGETLB:
return get_def_hugetlb_pagesz();
default:
return MAP_HUGE_PAGE_SIZE(flag);
diff --git a/tools/testing/selftests/kvm/lib/x86_64/apic.c b/tools/testing/selftests/kvm/lib/x86_64/apic.c
new file mode 100644
index 000000000000..7168e25c194e
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/apic.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * tools/testing/selftests/kvm/lib/x86_64/processor.c
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+
+#include "apic.h"
+
+void apic_disable(void)
+{
+ wrmsr(MSR_IA32_APICBASE,
+ rdmsr(MSR_IA32_APICBASE) &
+ ~(MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD));
+}
+
+void xapic_enable(void)
+{
+ uint64_t val = rdmsr(MSR_IA32_APICBASE);
+
+ /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */
+ if (val & MSR_IA32_APICBASE_EXTD) {
+ apic_disable();
+ wrmsr(MSR_IA32_APICBASE,
+ rdmsr(MSR_IA32_APICBASE) | MSR_IA32_APICBASE_ENABLE);
+ } else if (!(val & MSR_IA32_APICBASE_ENABLE)) {
+ wrmsr(MSR_IA32_APICBASE, val | MSR_IA32_APICBASE_ENABLE);
+ }
+
+ /*
+ * Per SDM: reset value of spurious interrupt vector register has the
+ * APIC software enabled bit=0. It must be enabled in addition to the
+ * enable bit in the MSR.
+ */
+ val = xapic_read_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED;
+ xapic_write_reg(APIC_SPIV, val);
+}
+
+void x2apic_enable(void)
+{
+ wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) |
+ MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD);
+ x2apic_write_reg(APIC_SPIV,
+ x2apic_read_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED);
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index a8906e60a108..28cb881f440d 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -17,13 +17,10 @@
#define DEFAULT_CODE_SELECTOR 0x8
#define DEFAULT_DATA_SELECTOR 0x10
-/* Minimum physical address used for virtual translation tables. */
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
-
vm_vaddr_t exception_handlers;
/* Virtual translation table structure declarations */
-struct pageMapL4Entry {
+struct pageUpperEntry {
uint64_t present:1;
uint64_t writable:1;
uint64_t user:1;
@@ -33,37 +30,7 @@ struct pageMapL4Entry {
uint64_t ignored_06:1;
uint64_t page_size:1;
uint64_t ignored_11_08:4;
- uint64_t address:40;
- uint64_t ignored_62_52:11;
- uint64_t execute_disable:1;
-};
-
-struct pageDirectoryPointerEntry {
- uint64_t present:1;
- uint64_t writable:1;
- uint64_t user:1;
- uint64_t write_through:1;
- uint64_t cache_disable:1;
- uint64_t accessed:1;
- uint64_t ignored_06:1;
- uint64_t page_size:1;
- uint64_t ignored_11_08:4;
- uint64_t address:40;
- uint64_t ignored_62_52:11;
- uint64_t execute_disable:1;
-};
-
-struct pageDirectoryEntry {
- uint64_t present:1;
- uint64_t writable:1;
- uint64_t user:1;
- uint64_t write_through:1;
- uint64_t cache_disable:1;
- uint64_t accessed:1;
- uint64_t ignored_06:1;
- uint64_t page_size:1;
- uint64_t ignored_11_08:4;
- uint64_t address:40;
+ uint64_t pfn:40;
uint64_t ignored_62_52:11;
uint64_t execute_disable:1;
};
@@ -79,7 +46,7 @@ struct pageTableEntry {
uint64_t reserved_07:1;
uint64_t global:1;
uint64_t ignored_11_09:3;
- uint64_t address:40;
+ uint64_t pfn:40;
uint64_t ignored_62_52:11;
uint64_t execute_disable:1;
};
@@ -207,96 +174,211 @@ void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
}
}
-void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
+void virt_pgd_alloc(struct kvm_vm *vm)
{
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
/* If needed, create page map l4 table. */
if (!vm->pgd_created) {
- vm_paddr_t paddr = vm_phy_page_alloc(vm,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
- vm->pgd = paddr;
+ vm->pgd = vm_alloc_page_table(vm);
vm->pgd_created = true;
}
}
-void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint32_t pgd_memslot)
+static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
+ int level)
+{
+ uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift);
+ int index = vaddr >> (vm->page_shift + level * 9) & 0x1ffu;
+
+ return &page_table[index];
+}
+
+static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
+ uint64_t pt_pfn,
+ uint64_t vaddr,
+ uint64_t paddr,
+ int level,
+ enum x86_page_size page_size)
+{
+ struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
+
+ if (!pte->present) {
+ pte->writable = true;
+ pte->present = true;
+ pte->page_size = (level == page_size);
+ if (pte->page_size)
+ pte->pfn = paddr >> vm->page_shift;
+ else
+ pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
+ } else {
+ /*
+ * Entry already present. Assert that the caller doesn't want
+ * a hugepage at this level, and that there isn't a hugepage at
+ * this level.
+ */
+ TEST_ASSERT(level != page_size,
+ "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
+ page_size, vaddr);
+ TEST_ASSERT(!pte->page_size,
+ "Cannot create page table at level: %u, vaddr: 0x%lx\n",
+ level, vaddr);
+ }
+ return pte;
+}
+
+void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ enum x86_page_size page_size)
+{
+ const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
+ struct pageUpperEntry *pml4e, *pdpe, *pde;
+ struct pageTableEntry *pte;
+
+ TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
+ "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
+
+ TEST_ASSERT((vaddr % pg_size) == 0,
+ "Virtual address not aligned,\n"
+ "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
+ "Invalid virtual address, vaddr: 0x%lx", vaddr);
+ TEST_ASSERT((paddr % pg_size) == 0,
+ "Physical address not aligned,\n"
+ " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
+ TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ "Physical address beyond maximum supported,\n"
+ " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ paddr, vm->max_gfn, vm->page_size);
+
+ /*
+ * Allocate upper level page tables, if not already present. Return
+ * early if a hugepage was created.
+ */
+ pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
+ vaddr, paddr, 3, page_size);
+ if (pml4e->page_size)
+ return;
+
+ pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
+ if (pdpe->page_size)
+ return;
+
+ pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
+ if (pde->page_size)
+ return;
+
+ /* Fill in page table entry. */
+ pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
+ TEST_ASSERT(!pte->present,
+ "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
+ pte->pfn = paddr >> vm->page_shift;
+ pte->writable = true;
+ pte->present = 1;
+}
+
+void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+{
+ __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
+}
+
+static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
+ uint64_t vaddr)
{
uint16_t index[4];
- struct pageMapL4Entry *pml4e;
+ struct pageUpperEntry *pml4e, *pdpe, *pde;
+ struct pageTableEntry *pte;
+ struct kvm_cpuid_entry2 *entry;
+ struct kvm_sregs sregs;
+ int max_phy_addr;
+ /* Set the bottom 52 bits. */
+ uint64_t rsvd_mask = 0x000fffffffffffff;
+
+ entry = kvm_get_supported_cpuid_index(0x80000008, 0);
+ max_phy_addr = entry->eax & 0x000000ff;
+ /* Clear the bottom bits of the reserved mask. */
+ rsvd_mask = (rsvd_mask >> max_phy_addr) << max_phy_addr;
+
+ /*
+ * SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
+ * with 4-Level Paging and 5-Level Paging".
+ * If IA32_EFER.NXE = 0 and the P flag of a paging-structure entry is 1,
+ * the XD flag (bit 63) is reserved.
+ */
+ vcpu_sregs_get(vm, vcpuid, &sregs);
+ if ((sregs.efer & EFER_NX) == 0) {
+ rsvd_mask |= (1ull << 63);
+ }
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
-
- TEST_ASSERT((vaddr % vm->page_size) == 0,
- "Virtual address not on page boundary,\n"
- " vaddr: 0x%lx vm->page_size: 0x%x",
- vaddr, vm->page_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
(vaddr >> vm->page_shift)),
"Invalid virtual address, vaddr: 0x%lx",
vaddr);
- TEST_ASSERT((paddr % vm->page_size) == 0,
- "Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x",
- paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
- "Physical address beyond beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
+ /*
+ * Based on the mode check above there are 48 bits in the vaddr, so
+ * shift 16 to sign extend the last bit (bit-47),
+ */
+ TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16),
+ "Canonical check failed. The virtual address is invalid.");
index[0] = (vaddr >> 12) & 0x1ffu;
index[1] = (vaddr >> 21) & 0x1ffu;
index[2] = (vaddr >> 30) & 0x1ffu;
index[3] = (vaddr >> 39) & 0x1ffu;
- /* Allocate page directory pointer table if not present. */
pml4e = addr_gpa2hva(vm, vm->pgd);
- if (!pml4e[index[3]].present) {
- pml4e[index[3]].address = vm_phy_page_alloc(vm,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
- >> vm->page_shift;
- pml4e[index[3]].writable = true;
- pml4e[index[3]].present = true;
- }
+ TEST_ASSERT(pml4e[index[3]].present,
+ "Expected pml4e to be present for gva: 0x%08lx", vaddr);
+ TEST_ASSERT((*(uint64_t*)(&pml4e[index[3]]) &
+ (rsvd_mask | (1ull << 7))) == 0,
+ "Unexpected reserved bits set.");
+
+ pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
+ TEST_ASSERT(pdpe[index[2]].present,
+ "Expected pdpe to be present for gva: 0x%08lx", vaddr);
+ TEST_ASSERT(pdpe[index[2]].page_size == 0,
+ "Expected pdpe to map a pde not a 1-GByte page.");
+ TEST_ASSERT((*(uint64_t*)(&pdpe[index[2]]) & rsvd_mask) == 0,
+ "Unexpected reserved bits set.");
+
+ pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
+ TEST_ASSERT(pde[index[1]].present,
+ "Expected pde to be present for gva: 0x%08lx", vaddr);
+ TEST_ASSERT(pde[index[1]].page_size == 0,
+ "Expected pde to map a pte not a 2-MByte page.");
+ TEST_ASSERT((*(uint64_t*)(&pde[index[1]]) & rsvd_mask) == 0,
+ "Unexpected reserved bits set.");
+
+ pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
+ TEST_ASSERT(pte[index[0]].present,
+ "Expected pte to be present for gva: 0x%08lx", vaddr);
+
+ return &pte[index[0]];
+}
- /* Allocate page directory table if not present. */
- struct pageDirectoryPointerEntry *pdpe;
- pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
- if (!pdpe[index[2]].present) {
- pdpe[index[2]].address = vm_phy_page_alloc(vm,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
- >> vm->page_shift;
- pdpe[index[2]].writable = true;
- pdpe[index[2]].present = true;
- }
+uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
+{
+ struct pageTableEntry *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
- /* Allocate page table if not present. */
- struct pageDirectoryEntry *pde;
- pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
- if (!pde[index[1]].present) {
- pde[index[1]].address = vm_phy_page_alloc(vm,
- KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
- >> vm->page_shift;
- pde[index[1]].writable = true;
- pde[index[1]].present = true;
- }
+ return *(uint64_t *)pte;
+}
- /* Fill in page table entry. */
- struct pageTableEntry *pte;
- pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
- pte[index[0]].address = paddr >> vm->page_shift;
- pte[index[0]].writable = true;
- pte[index[0]].present = 1;
+void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
+ uint64_t pte)
+{
+ struct pageTableEntry *new_pte = _vm_get_page_table_entry(vm, vcpuid,
+ vaddr);
+
+ *(uint64_t *)new_pte = pte;
}
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
- struct pageMapL4Entry *pml4e, *pml4e_start;
- struct pageDirectoryPointerEntry *pdpe, *pdpe_start;
- struct pageDirectoryEntry *pde, *pde_start;
+ struct pageUpperEntry *pml4e, *pml4e_start;
+ struct pageUpperEntry *pdpe, *pdpe_start;
+ struct pageUpperEntry *pde, *pde_start;
struct pageTableEntry *pte, *pte_start;
if (!vm->pgd_created)
@@ -307,8 +389,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
fprintf(stream, "%*s index hvaddr gpaddr "
"addr w exec dirty\n",
indent, "");
- pml4e_start = (struct pageMapL4Entry *) addr_gpa2hva(vm,
- vm->pgd);
+ pml4e_start = (struct pageUpperEntry *) addr_gpa2hva(vm, vm->pgd);
for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
pml4e = &pml4e_start[n1];
if (!pml4e->present)
@@ -317,11 +398,10 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
" %u\n",
indent, "",
pml4e - pml4e_start, pml4e,
- addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->address,
+ addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->pfn,
pml4e->writable, pml4e->execute_disable);
- pdpe_start = addr_gpa2hva(vm, pml4e->address
- * vm->page_size);
+ pdpe_start = addr_gpa2hva(vm, pml4e->pfn * vm->page_size);
for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
pdpe = &pdpe_start[n2];
if (!pdpe->present)
@@ -331,11 +411,10 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
indent, "",
pdpe - pdpe_start, pdpe,
addr_hva2gpa(vm, pdpe),
- (uint64_t) pdpe->address, pdpe->writable,
+ (uint64_t) pdpe->pfn, pdpe->writable,
pdpe->execute_disable);
- pde_start = addr_gpa2hva(vm,
- pdpe->address * vm->page_size);
+ pde_start = addr_gpa2hva(vm, pdpe->pfn * vm->page_size);
for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
pde = &pde_start[n3];
if (!pde->present)
@@ -344,11 +423,10 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
"0x%-12lx 0x%-10lx %u %u\n",
indent, "", pde - pde_start, pde,
addr_hva2gpa(vm, pde),
- (uint64_t) pde->address, pde->writable,
+ (uint64_t) pde->pfn, pde->writable,
pde->execute_disable);
- pte_start = addr_gpa2hva(vm,
- pde->address * vm->page_size);
+ pte_start = addr_gpa2hva(vm, pde->pfn * vm->page_size);
for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
pte = &pte_start[n4];
if (!pte->present)
@@ -359,7 +437,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
indent, "",
pte - pte_start, pte,
addr_hva2gpa(vm, pte),
- (uint64_t) pte->address,
+ (uint64_t) pte->pfn,
pte->writable,
pte->execute_disable,
pte->dirty,
@@ -480,9 +558,7 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint16_t index[4];
- struct pageMapL4Entry *pml4e;
- struct pageDirectoryPointerEntry *pdpe;
- struct pageDirectoryEntry *pde;
+ struct pageUpperEntry *pml4e, *pdpe, *pde;
struct pageTableEntry *pte;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
@@ -499,43 +575,39 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
if (!pml4e[index[3]].present)
goto unmapped_gva;
- pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
+ pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
if (!pdpe[index[2]].present)
goto unmapped_gva;
- pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
+ pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
if (!pde[index[1]].present)
goto unmapped_gva;
- pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
+ pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
if (!pte[index[0]].present)
goto unmapped_gva;
- return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu);
+ return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu);
unmapped_gva:
TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
exit(EXIT_FAILURE);
}
-static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt, int gdt_memslot,
- int pgd_memslot)
+static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
{
if (!vm->gdt)
- vm->gdt = vm_vaddr_alloc(vm, getpagesize(),
- KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
+ vm->gdt = vm_vaddr_alloc_page(vm);
dt->base = vm->gdt;
dt->limit = getpagesize();
}
static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
- int selector, int gdt_memslot,
- int pgd_memslot)
+ int selector)
{
if (!vm->tss)
- vm->tss = vm_vaddr_alloc(vm, getpagesize(),
- KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
+ vm->tss = vm_vaddr_alloc_page(vm);
memset(segp, 0, sizeof(*segp));
segp->base = vm->tss;
@@ -546,7 +618,7 @@ static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
kvm_seg_fill_gdt_64bit(vm, segp);
}
-static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
+static void vcpu_setup(struct kvm_vm *vm, int vcpuid)
{
struct kvm_sregs sregs;
@@ -555,7 +627,7 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_m
sregs.idt.limit = 0;
- kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
+ kvm_setup_gdt(vm, &sregs.gdt);
switch (vm->mode) {
case VM_MODE_PXXV48_4K:
@@ -567,7 +639,7 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_m
kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
- kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot);
+ kvm_setup_tss_64bit(vm, &sregs.tr, 0x18);
break;
default:
@@ -584,11 +656,11 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
struct kvm_regs regs;
vm_vaddr_t stack_vaddr;
stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
- DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
+ DEFAULT_GUEST_STACK_VADDR_MIN);
/* Create VCPU */
vm_vcpu_add(vm, vcpuid);
- vcpu_setup(vm, vcpuid, 0, 0);
+ vcpu_setup(vm, vcpuid);
/* Setup guest general purpose registers */
vcpu_regs_get(vm, vcpuid, &regs);
@@ -600,6 +672,9 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
/* Setup the MP state */
mp_state.mp_state = 0;
vcpu_set_mp_state(vm, vcpuid, &mp_state);
+
+ /* Setup supported CPUIDs */
+ vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
}
/*
@@ -657,9 +732,7 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
return cpuid;
cpuid = allocate_kvm_cpuid2();
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
@@ -691,9 +764,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
@@ -986,9 +1057,7 @@ struct kvm_msr_list *kvm_get_msr_index_list(void)
struct kvm_msr_list *list;
int nmsrs, r, kvm_fd;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
nmsrs = kvm_get_num_msrs_fd(kvm_fd);
list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
@@ -1207,7 +1276,7 @@ static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
void kvm_exit_unexpected_vector(uint32_t value)
{
- outl(UNEXPECTED_VECTOR_PORT, value);
+ ucall(UCALL_UNHANDLED, 1, value);
}
void route_exception(struct ex_regs *regs)
@@ -1228,8 +1297,8 @@ void vm_init_descriptor_tables(struct kvm_vm *vm)
extern void *idt_handlers;
int i;
- vm->idt = vm_vaddr_alloc(vm, getpagesize(), 0x2000, 0, 0);
- vm->handlers = vm_vaddr_alloc(vm, 256 * sizeof(void *), 0x2000, 0, 0);
+ vm->idt = vm_vaddr_alloc_page(vm);
+ vm->handlers = vm_vaddr_alloc_page(vm);
/* Handlers have the same address in both address spaces.*/
for (i = 0; i < NUM_INTERRUPTS; i++)
set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
@@ -1250,8 +1319,8 @@ void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
-void vm_handle_exception(struct kvm_vm *vm, int vector,
- void (*handler)(struct ex_regs *))
+void vm_install_exception_handler(struct kvm_vm *vm, int vector,
+ void (*handler)(struct ex_regs *))
{
vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
@@ -1260,16 +1329,13 @@ void vm_handle_exception(struct kvm_vm *vm, int vector,
void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
{
- if (vcpu_state(vm, vcpuid)->exit_reason == KVM_EXIT_IO
- && vcpu_state(vm, vcpuid)->io.port == UNEXPECTED_VECTOR_PORT
- && vcpu_state(vm, vcpuid)->io.size == 4) {
- /* Grab pointer to io data */
- uint32_t *data = (void *)vcpu_state(vm, vcpuid)
- + vcpu_state(vm, vcpuid)->io.data_offset;
-
- TEST_ASSERT(false,
- "Unexpected vectored event in guest (vector:0x%x)",
- *data);
+ struct ucall uc;
+
+ if (get_ucall(vm, vcpuid, &uc) == UCALL_UNHANDLED) {
+ uint64_t vector = uc.args[0];
+
+ TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
+ vector);
}
}
@@ -1312,9 +1378,7 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
return cpuid;
cpuid = allocate_kvm_cpuid2();
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n",
diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c
index 827fe6028dd4..2ac98d70d02b 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c
@@ -30,17 +30,14 @@ u64 rflags;
struct svm_test_data *
vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
{
- vm_vaddr_t svm_gva = vm_vaddr_alloc(vm, getpagesize(),
- 0x10000, 0, 0);
+ vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm);
struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
- svm->vmcb = (void *)vm_vaddr_alloc(vm, getpagesize(),
- 0x10000, 0, 0);
+ svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
- svm->save_area = (void *)vm_vaddr_alloc(vm, getpagesize(),
- 0x10000, 0, 0);
+ svm->save_area = (void *)vm_vaddr_alloc_page(vm);
svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 2448b30e8efa..d089d8b850b5 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -77,50 +77,48 @@ int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
struct vmx_pages *
vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
{
- vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
/* Setup of a region of guest memory for the vmxon region. */
- vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
/* Setup of a region of guest memory for a vmcs. */
- vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
/* Setup of a region of guest memory for the MSR bitmap. */
- vmx->msr = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->msr = (void *)vm_vaddr_alloc_page(vm);
vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
memset(vmx->msr_hva, 0, getpagesize());
/* Setup of a region of guest memory for the shadow VMCS. */
- vmx->shadow_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
/* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
- vmx->vmread = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
memset(vmx->vmread_hva, 0, getpagesize());
- vmx->vmwrite = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm);
vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
memset(vmx->vmwrite_hva, 0, getpagesize());
/* Setup of a region of guest memory for the VP Assist page. */
- vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(),
- 0x10000, 0, 0);
+ vmx->vp_assist = (void *)vm_vaddr_alloc_page(vm);
vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
/* Setup of a region of guest memory for the enlightened VMCS. */
- vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(),
- 0x10000, 0, 0);
+ vmx->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->enlightened_vmcs_hva =
addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
vmx->enlightened_vmcs_gpa =
@@ -395,7 +393,7 @@ void nested_vmx_check_supported(void)
}
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
+ uint64_t nested_paddr, uint64_t paddr)
{
uint16_t index[4];
struct eptPageTableEntry *pml4e;
@@ -428,9 +426,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
/* Allocate page directory pointer table if not present. */
pml4e = vmx->eptp_hva;
if (!pml4e[index[3]].readable) {
- pml4e[index[3]].address = vm_phy_page_alloc(vm,
- KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
- >> vm->page_shift;
+ pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
pml4e[index[3]].writable = true;
pml4e[index[3]].readable = true;
pml4e[index[3]].executable = true;
@@ -440,9 +436,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
struct eptPageTableEntry *pdpe;
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
if (!pdpe[index[2]].readable) {
- pdpe[index[2]].address = vm_phy_page_alloc(vm,
- KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
- >> vm->page_shift;
+ pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
pdpe[index[2]].writable = true;
pdpe[index[2]].readable = true;
pdpe[index[2]].executable = true;
@@ -452,9 +446,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
struct eptPageTableEntry *pde;
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
if (!pde[index[1]].readable) {
- pde[index[1]].address = vm_phy_page_alloc(vm,
- KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
- >> vm->page_shift;
+ pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
pde[index[1]].writable = true;
pde[index[1]].readable = true;
pde[index[1]].executable = true;
@@ -494,8 +486,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
* page range starting at nested_paddr to the page range starting at paddr.
*/
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, uint64_t size,
- uint32_t eptp_memslot)
+ uint64_t nested_paddr, uint64_t paddr, uint64_t size)
{
size_t page_size = vm->page_size;
size_t npages = size / page_size;
@@ -504,7 +495,7 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) {
- nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot);
+ nested_pg_map(vmx, vm, nested_paddr, paddr);
nested_paddr += page_size;
paddr += page_size;
}
@@ -514,7 +505,7 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
* physical pages in VM.
*/
void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint32_t memslot, uint32_t eptp_memslot)
+ uint32_t memslot)
{
sparsebit_idx_t i, last;
struct userspace_mem_region *region =
@@ -530,24 +521,21 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
nested_map(vmx, vm,
(uint64_t)i << vm->page_shift,
(uint64_t)i << vm->page_shift,
- 1 << vm->page_shift,
- eptp_memslot);
+ 1 << vm->page_shift);
}
}
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
- vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
}
-void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint32_t eptp_memslot)
+void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
{
- vmx->apic_access = (void *)vm_vaddr_alloc(vm, getpagesize(),
- 0x10000, 0, 0);
+ vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
}
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 6096bf0a5b34..98351ba0933c 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -71,14 +71,22 @@ struct memslot_antagonist_args {
};
static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
- uint64_t nr_modifications, uint64_t gpa)
+ uint64_t nr_modifications)
{
+ const uint64_t pages = 1;
+ uint64_t gpa;
int i;
+ /*
+ * Add the dummy memslot just below the perf_test_util memslot, which is
+ * at the top of the guest physical address space.
+ */
+ gpa = guest_test_phys_mem - pages * vm_get_page_size(vm);
+
for (i = 0; i < nr_modifications; i++) {
usleep(delay);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
- DUMMY_MEMSLOT_INDEX, 1, 0);
+ DUMMY_MEMSLOT_INDEX, pages, 0);
vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
}
@@ -120,11 +128,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Started all vCPUs\n");
add_remove_memslot(vm, p->memslot_modification_delay,
- p->nr_memslot_modifications,
- guest_test_phys_mem +
- (guest_percpu_mem_size * nr_vcpus) +
- perf_test_args.host_page_size +
- perf_test_args.guest_page_size);
+ p->nr_memslot_modifications);
run_vcpus = false;
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
new file mode 100644
index 000000000000..d6e381e01db7
--- /dev/null
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -0,0 +1,1037 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A memslot-related performance benchmark.
+ *
+ * Copyright (C) 2021 Oracle and/or its affiliates.
+ *
+ * Basic guest setup / host vCPU thread code lifted from set_memory_region_test.
+ */
+#include <pthread.h>
+#include <sched.h>
+#include <semaphore.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <linux/compiler.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#define VCPU_ID 0
+
+#define MEM_SIZE ((512U << 20) + 4096)
+#define MEM_SIZE_PAGES (MEM_SIZE / 4096)
+#define MEM_GPA 0x10000000UL
+#define MEM_AUX_GPA MEM_GPA
+#define MEM_SYNC_GPA MEM_AUX_GPA
+#define MEM_TEST_GPA (MEM_AUX_GPA + 4096)
+#define MEM_TEST_SIZE (MEM_SIZE - 4096)
+static_assert(MEM_SIZE % 4096 == 0, "invalid mem size");
+static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
+
+/*
+ * 32 MiB is max size that gets well over 100 iterations on 509 slots.
+ * Considering that each slot needs to have at least one page up to
+ * 8194 slots in use can then be tested (although with slightly
+ * limited resolution).
+ */
+#define MEM_SIZE_MAP ((32U << 20) + 4096)
+#define MEM_SIZE_MAP_PAGES (MEM_SIZE_MAP / 4096)
+#define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - 4096)
+#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096)
+static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
+
+/*
+ * 128 MiB is min size that fills 32k slots with at least one page in each
+ * while at the same time gets 100+ iterations in such test
+ */
+#define MEM_TEST_UNMAP_SIZE (128U << 20)
+#define MEM_TEST_UNMAP_SIZE_PAGES (MEM_TEST_UNMAP_SIZE / 4096)
+/* 2 MiB chunk size like a typical huge page */
+#define MEM_TEST_UNMAP_CHUNK_PAGES (2U << (20 - 12))
+static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE,
+ "invalid unmap test region size");
+static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0,
+ "invalid unmap test region size");
+static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
+ (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0,
+ "invalid unmap test region size");
+
+/*
+ * For the move active test the middle of the test area is placed on
+ * a memslot boundary: half lies in the memslot being moved, half in
+ * other memslot(s).
+ *
+ * When running this test with 32k memslots (32764, really) each memslot
+ * contains 4 pages.
+ * The last one additionally contains the remaining 21 pages of memory,
+ * for the total size of 25 pages.
+ * Hence, the maximum size here is 50 pages.
+ */
+#define MEM_TEST_MOVE_SIZE_PAGES (50)
+#define MEM_TEST_MOVE_SIZE (MEM_TEST_MOVE_SIZE_PAGES * 4096)
+#define MEM_TEST_MOVE_GPA_DEST (MEM_GPA + MEM_SIZE)
+static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
+ "invalid move test region size");
+
+#define MEM_TEST_VAL_1 0x1122334455667788
+#define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00
+
+struct vm_data {
+ struct kvm_vm *vm;
+ pthread_t vcpu_thread;
+ uint32_t nslots;
+ uint64_t npages;
+ uint64_t pages_per_slot;
+ void **hva_slots;
+ bool mmio_ok;
+ uint64_t mmio_gpa_min;
+ uint64_t mmio_gpa_max;
+};
+
+struct sync_area {
+ atomic_bool start_flag;
+ atomic_bool exit_flag;
+ atomic_bool sync_flag;
+ void *move_area_ptr;
+};
+
+/*
+ * Technically, we need also for the atomic bool to be address-free, which
+ * is recommended, but not strictly required, by C11 for lockless
+ * implementations.
+ * However, in practice both GCC and Clang fulfill this requirement on
+ * all KVM-supported platforms.
+ */
+static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless");
+
+static sem_t vcpu_ready;
+
+static bool map_unmap_verify;
+
+static bool verbose;
+#define pr_info_v(...) \
+ do { \
+ if (verbose) \
+ pr_info(__VA_ARGS__); \
+ } while (0)
+
+static void *vcpu_worker(void *data)
+{
+ struct vm_data *vm = data;
+ struct kvm_run *run;
+ struct ucall uc;
+ uint64_t cmd;
+
+ run = vcpu_state(vm->vm, VCPU_ID);
+ while (1) {
+ vcpu_run(vm->vm, VCPU_ID);
+
+ if (run->exit_reason == KVM_EXIT_IO) {
+ cmd = get_ucall(vm->vm, VCPU_ID, &uc);
+ if (cmd != UCALL_SYNC)
+ break;
+
+ sem_post(&vcpu_ready);
+ continue;
+ }
+
+ if (run->exit_reason != KVM_EXIT_MMIO)
+ break;
+
+ TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
+ TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
+ TEST_ASSERT(run->mmio.len == 8,
+ "Unexpected exit mmio size = %u", run->mmio.len);
+ TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
+ run->mmio.phys_addr <= vm->mmio_gpa_max,
+ "Unexpected exit mmio address = 0x%llx",
+ run->mmio.phys_addr);
+ }
+
+ if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
+ TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
+ __FILE__, uc.args[1], uc.args[2]);
+
+ return NULL;
+}
+
+static void wait_for_vcpu(void)
+{
+ struct timespec ts;
+
+ TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
+ "clock_gettime() failed: %d\n", errno);
+
+ ts.tv_sec += 2;
+ TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
+ "sem_timedwait() failed: %d\n", errno);
+}
+
+static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
+{
+ uint64_t gpage, pgoffs;
+ uint32_t slot, slotoffs;
+ void *base;
+
+ TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
+ TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096,
+ "Too high gpa to translate");
+ gpa -= MEM_GPA;
+
+ gpage = gpa / 4096;
+ pgoffs = gpa % 4096;
+ slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
+ slotoffs = gpage - (slot * data->pages_per_slot);
+
+ if (rempages) {
+ uint64_t slotpages;
+
+ if (slot == data->nslots - 1)
+ slotpages = data->npages - slot * data->pages_per_slot;
+ else
+ slotpages = data->pages_per_slot;
+
+ TEST_ASSERT(!pgoffs,
+ "Asking for remaining pages in slot but gpa not page aligned");
+ *rempages = slotpages - slotoffs;
+ }
+
+ base = data->hva_slots[slot];
+ return (uint8_t *)base + slotoffs * 4096 + pgoffs;
+}
+
+static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
+{
+ TEST_ASSERT(slot < data->nslots, "Too high slot number");
+
+ return MEM_GPA + slot * data->pages_per_slot * 4096;
+}
+
+static struct vm_data *alloc_vm(void)
+{
+ struct vm_data *data;
+
+ data = malloc(sizeof(*data));
+ TEST_ASSERT(data, "malloc(vmdata) failed");
+
+ data->vm = NULL;
+ data->hva_slots = NULL;
+
+ return data;
+}
+
+static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
+ void *guest_code, uint64_t mempages,
+ struct timespec *slot_runtime)
+{
+ uint32_t max_mem_slots;
+ uint64_t rempages;
+ uint64_t guest_addr;
+ uint32_t slot;
+ struct timespec tstart;
+ struct sync_area *sync;
+
+ max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
+ TEST_ASSERT(max_mem_slots > 1,
+ "KVM_CAP_NR_MEMSLOTS should be greater than 1");
+ TEST_ASSERT(nslots > 1 || nslots == -1,
+ "Slot count cap should be greater than 1");
+ if (nslots != -1)
+ max_mem_slots = min(max_mem_slots, (uint32_t)nslots);
+ pr_info_v("Allowed number of memory slots: %"PRIu32"\n", max_mem_slots);
+
+ TEST_ASSERT(mempages > 1,
+ "Can't test without any memory");
+
+ data->npages = mempages;
+ data->nslots = max_mem_slots - 1;
+ data->pages_per_slot = mempages / data->nslots;
+ if (!data->pages_per_slot) {
+ *maxslots = mempages + 1;
+ return false;
+ }
+
+ rempages = mempages % data->nslots;
+ data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
+ TEST_ASSERT(data->hva_slots, "malloc() fail");
+
+ data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
+
+ pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
+ max_mem_slots - 1, data->pages_per_slot, rempages);
+
+ clock_gettime(CLOCK_MONOTONIC, &tstart);
+ for (slot = 1, guest_addr = MEM_GPA; slot < max_mem_slots; slot++) {
+ uint64_t npages;
+
+ npages = data->pages_per_slot;
+ if (slot == max_mem_slots - 1)
+ npages += rempages;
+
+ vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
+ guest_addr, slot, npages,
+ 0);
+ guest_addr += npages * 4096;
+ }
+ *slot_runtime = timespec_elapsed(tstart);
+
+ for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) {
+ uint64_t npages;
+ uint64_t gpa;
+
+ npages = data->pages_per_slot;
+ if (slot == max_mem_slots - 2)
+ npages += rempages;
+
+ gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr,
+ slot + 1);
+ TEST_ASSERT(gpa == guest_addr,
+ "vm_phy_pages_alloc() failed\n");
+
+ data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr);
+ memset(data->hva_slots[slot], 0, npages * 4096);
+
+ guest_addr += npages * 4096;
+ }
+
+ virt_map(data->vm, MEM_GPA, MEM_GPA, mempages);
+
+ sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+ atomic_init(&sync->start_flag, false);
+ atomic_init(&sync->exit_flag, false);
+ atomic_init(&sync->sync_flag, false);
+
+ data->mmio_ok = false;
+
+ return true;
+}
+
+static void launch_vm(struct vm_data *data)
+{
+ pr_info_v("Launching the test VM\n");
+
+ pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data);
+
+ /* Ensure the guest thread is spun up. */
+ wait_for_vcpu();
+}
+
+static void free_vm(struct vm_data *data)
+{
+ kvm_vm_free(data->vm);
+ free(data->hva_slots);
+ free(data);
+}
+
+static void wait_guest_exit(struct vm_data *data)
+{
+ pthread_join(data->vcpu_thread, NULL);
+}
+
+static void let_guest_run(struct sync_area *sync)
+{
+ atomic_store_explicit(&sync->start_flag, true, memory_order_release);
+}
+
+static void guest_spin_until_start(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+ while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire))
+ ;
+}
+
+static void make_guest_exit(struct sync_area *sync)
+{
+ atomic_store_explicit(&sync->exit_flag, true, memory_order_release);
+}
+
+static bool _guest_should_exit(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+ return atomic_load_explicit(&sync->exit_flag, memory_order_acquire);
+}
+
+#define guest_should_exit() unlikely(_guest_should_exit())
+
+/*
+ * noinline so we can easily see how much time the host spends waiting
+ * for the guest.
+ * For the same reason use alarm() instead of polling clock_gettime()
+ * to implement a wait timeout.
+ */
+static noinline void host_perform_sync(struct sync_area *sync)
+{
+ alarm(2);
+
+ atomic_store_explicit(&sync->sync_flag, true, memory_order_release);
+ while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire))
+ ;
+
+ alarm(0);
+}
+
+static bool guest_perform_sync(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+ bool expected;
+
+ do {
+ if (guest_should_exit())
+ return false;
+
+ expected = true;
+ } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag,
+ &expected, false,
+ memory_order_acq_rel,
+ memory_order_relaxed));
+
+ return true;
+}
+
+static void guest_code_test_memslot_move(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+ uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
+
+ GUEST_SYNC(0);
+
+ guest_spin_until_start();
+
+ while (!guest_should_exit()) {
+ uintptr_t ptr;
+
+ for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
+ ptr += 4096)
+ *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+ /*
+ * No host sync here since the MMIO exits are so expensive
+ * that the host would spend most of its time waiting for
+ * the guest and so instead of measuring memslot move
+ * performance we would measure the performance and
+ * likelihood of MMIO exits
+ */
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_code_test_memslot_map(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+ GUEST_SYNC(0);
+
+ guest_spin_until_start();
+
+ while (1) {
+ uintptr_t ptr;
+
+ for (ptr = MEM_TEST_GPA;
+ ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += 4096)
+ *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+ if (!guest_perform_sync())
+ break;
+
+ for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
+ ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += 4096)
+ *(uint64_t *)ptr = MEM_TEST_VAL_2;
+
+ if (!guest_perform_sync())
+ break;
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_code_test_memslot_unmap(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+ GUEST_SYNC(0);
+
+ guest_spin_until_start();
+
+ while (1) {
+ uintptr_t ptr = MEM_TEST_GPA;
+
+ /*
+ * We can afford to access (map) just a small number of pages
+ * per host sync as otherwise the host will spend
+ * a significant amount of its time waiting for the guest
+ * (instead of doing unmap operations), so this will
+ * effectively turn this test into a map performance test.
+ *
+ * Just access a single page to be on the safe side.
+ */
+ *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+ if (!guest_perform_sync())
+ break;
+
+ ptr += MEM_TEST_UNMAP_SIZE / 2;
+ *(uint64_t *)ptr = MEM_TEST_VAL_2;
+
+ if (!guest_perform_sync())
+ break;
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_code_test_memslot_rw(void)
+{
+ GUEST_SYNC(0);
+
+ guest_spin_until_start();
+
+ while (1) {
+ uintptr_t ptr;
+
+ for (ptr = MEM_TEST_GPA;
+ ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096)
+ *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+ if (!guest_perform_sync())
+ break;
+
+ for (ptr = MEM_TEST_GPA + 4096 / 2;
+ ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) {
+ uint64_t val = *(uint64_t *)ptr;
+
+ GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val);
+ *(uint64_t *)ptr = 0;
+ }
+
+ if (!guest_perform_sync())
+ break;
+ }
+
+ GUEST_DONE();
+}
+
+static bool test_memslot_move_prepare(struct vm_data *data,
+ struct sync_area *sync,
+ uint64_t *maxslots, bool isactive)
+{
+ uint64_t movesrcgpa, movetestgpa;
+
+ movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
+
+ if (isactive) {
+ uint64_t lastpages;
+
+ vm_gpa2hva(data, movesrcgpa, &lastpages);
+ if (lastpages < MEM_TEST_MOVE_SIZE_PAGES / 2) {
+ *maxslots = 0;
+ return false;
+ }
+ }
+
+ movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1));
+ sync->move_area_ptr = (void *)movetestgpa;
+
+ if (isactive) {
+ data->mmio_ok = true;
+ data->mmio_gpa_min = movesrcgpa;
+ data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1;
+ }
+
+ return true;
+}
+
+static bool test_memslot_move_prepare_active(struct vm_data *data,
+ struct sync_area *sync,
+ uint64_t *maxslots)
+{
+ return test_memslot_move_prepare(data, sync, maxslots, true);
+}
+
+static bool test_memslot_move_prepare_inactive(struct vm_data *data,
+ struct sync_area *sync,
+ uint64_t *maxslots)
+{
+ return test_memslot_move_prepare(data, sync, maxslots, false);
+}
+
+static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
+{
+ uint64_t movesrcgpa;
+
+ movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
+ vm_mem_region_move(data->vm, data->nslots - 1 + 1,
+ MEM_TEST_MOVE_GPA_DEST);
+ vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa);
+}
+
+static void test_memslot_do_unmap(struct vm_data *data,
+ uint64_t offsp, uint64_t count)
+{
+ uint64_t gpa, ctr;
+
+ for (gpa = MEM_TEST_GPA + offsp * 4096, ctr = 0; ctr < count; ) {
+ uint64_t npages;
+ void *hva;
+ int ret;
+
+ hva = vm_gpa2hva(data, gpa, &npages);
+ TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa);
+ npages = min(npages, count - ctr);
+ ret = madvise(hva, npages * 4096, MADV_DONTNEED);
+ TEST_ASSERT(!ret,
+ "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64,
+ hva, gpa);
+ ctr += npages;
+ gpa += npages * 4096;
+ }
+ TEST_ASSERT(ctr == count,
+ "madvise(MADV_DONTNEED) should exactly cover all of the requested area");
+}
+
+static void test_memslot_map_unmap_check(struct vm_data *data,
+ uint64_t offsp, uint64_t valexp)
+{
+ uint64_t gpa;
+ uint64_t *val;
+
+ if (!map_unmap_verify)
+ return;
+
+ gpa = MEM_TEST_GPA + offsp * 4096;
+ val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
+ TEST_ASSERT(*val == valexp,
+ "Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")",
+ *val, valexp, gpa);
+ *val = 0;
+}
+
+static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
+{
+ /*
+ * Unmap the second half of the test area while guest writes to (maps)
+ * the first half.
+ */
+ test_memslot_do_unmap(data, MEM_TEST_MAP_SIZE_PAGES / 2,
+ MEM_TEST_MAP_SIZE_PAGES / 2);
+
+ /*
+ * Wait for the guest to finish writing the first half of the test
+ * area, verify the written value on the first and the last page of
+ * this area and then unmap it.
+ * Meanwhile, the guest is writing to (mapping) the second half of
+ * the test area.
+ */
+ host_perform_sync(sync);
+ test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
+ test_memslot_map_unmap_check(data,
+ MEM_TEST_MAP_SIZE_PAGES / 2 - 1,
+ MEM_TEST_VAL_1);
+ test_memslot_do_unmap(data, 0, MEM_TEST_MAP_SIZE_PAGES / 2);
+
+
+ /*
+ * Wait for the guest to finish writing the second half of the test
+ * area and verify the written value on the first and the last page
+ * of this area.
+ * The area will be unmapped at the beginning of the next loop
+ * iteration.
+ * Meanwhile, the guest is writing to (mapping) the first half of
+ * the test area.
+ */
+ host_perform_sync(sync);
+ test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES / 2,
+ MEM_TEST_VAL_2);
+ test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1,
+ MEM_TEST_VAL_2);
+}
+
+static void test_memslot_unmap_loop_common(struct vm_data *data,
+ struct sync_area *sync,
+ uint64_t chunk)
+{
+ uint64_t ctr;
+
+ /*
+ * Wait for the guest to finish mapping page(s) in the first half
+ * of the test area, verify the written value and then perform unmap
+ * of this area.
+ * Meanwhile, the guest is writing to (mapping) page(s) in the second
+ * half of the test area.
+ */
+ host_perform_sync(sync);
+ test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
+ for (ctr = 0; ctr < MEM_TEST_UNMAP_SIZE_PAGES / 2; ctr += chunk)
+ test_memslot_do_unmap(data, ctr, chunk);
+
+ /* Likewise, but for the opposite host / guest areas */
+ host_perform_sync(sync);
+ test_memslot_map_unmap_check(data, MEM_TEST_UNMAP_SIZE_PAGES / 2,
+ MEM_TEST_VAL_2);
+ for (ctr = MEM_TEST_UNMAP_SIZE_PAGES / 2;
+ ctr < MEM_TEST_UNMAP_SIZE_PAGES; ctr += chunk)
+ test_memslot_do_unmap(data, ctr, chunk);
+}
+
+static void test_memslot_unmap_loop(struct vm_data *data,
+ struct sync_area *sync)
+{
+ test_memslot_unmap_loop_common(data, sync, 1);
+}
+
+static void test_memslot_unmap_loop_chunked(struct vm_data *data,
+ struct sync_area *sync)
+{
+ test_memslot_unmap_loop_common(data, sync, MEM_TEST_UNMAP_CHUNK_PAGES);
+}
+
+static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
+{
+ uint64_t gptr;
+
+ for (gptr = MEM_TEST_GPA + 4096 / 2;
+ gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096)
+ *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
+
+ host_perform_sync(sync);
+
+ for (gptr = MEM_TEST_GPA;
+ gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) {
+ uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
+ uint64_t val = *vptr;
+
+ TEST_ASSERT(val == MEM_TEST_VAL_1,
+ "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")",
+ val, gptr);
+ *vptr = 0;
+ }
+
+ host_perform_sync(sync);
+}
+
+struct test_data {
+ const char *name;
+ uint64_t mem_size;
+ void (*guest_code)(void);
+ bool (*prepare)(struct vm_data *data, struct sync_area *sync,
+ uint64_t *maxslots);
+ void (*loop)(struct vm_data *data, struct sync_area *sync);
+};
+
+static bool test_execute(int nslots, uint64_t *maxslots,
+ unsigned int maxtime,
+ const struct test_data *tdata,
+ uint64_t *nloops,
+ struct timespec *slot_runtime,
+ struct timespec *guest_runtime)
+{
+ uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES;
+ struct vm_data *data;
+ struct sync_area *sync;
+ struct timespec tstart;
+ bool ret = true;
+
+ data = alloc_vm();
+ if (!prepare_vm(data, nslots, maxslots, tdata->guest_code,
+ mem_size, slot_runtime)) {
+ ret = false;
+ goto exit_free;
+ }
+
+ sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+
+ if (tdata->prepare &&
+ !tdata->prepare(data, sync, maxslots)) {
+ ret = false;
+ goto exit_free;
+ }
+
+ launch_vm(data);
+
+ clock_gettime(CLOCK_MONOTONIC, &tstart);
+ let_guest_run(sync);
+
+ while (1) {
+ *guest_runtime = timespec_elapsed(tstart);
+ if (guest_runtime->tv_sec >= maxtime)
+ break;
+
+ tdata->loop(data, sync);
+
+ (*nloops)++;
+ }
+
+ make_guest_exit(sync);
+ wait_guest_exit(data);
+
+exit_free:
+ free_vm(data);
+
+ return ret;
+}
+
+static const struct test_data tests[] = {
+ {
+ .name = "map",
+ .mem_size = MEM_SIZE_MAP_PAGES,
+ .guest_code = guest_code_test_memslot_map,
+ .loop = test_memslot_map_loop,
+ },
+ {
+ .name = "unmap",
+ .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+ .guest_code = guest_code_test_memslot_unmap,
+ .loop = test_memslot_unmap_loop,
+ },
+ {
+ .name = "unmap chunked",
+ .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+ .guest_code = guest_code_test_memslot_unmap,
+ .loop = test_memslot_unmap_loop_chunked,
+ },
+ {
+ .name = "move active area",
+ .guest_code = guest_code_test_memslot_move,
+ .prepare = test_memslot_move_prepare_active,
+ .loop = test_memslot_move_loop,
+ },
+ {
+ .name = "move inactive area",
+ .guest_code = guest_code_test_memslot_move,
+ .prepare = test_memslot_move_prepare_inactive,
+ .loop = test_memslot_move_loop,
+ },
+ {
+ .name = "RW",
+ .guest_code = guest_code_test_memslot_rw,
+ .loop = test_memslot_rw_loop
+ },
+};
+
+#define NTESTS ARRAY_SIZE(tests)
+
+struct test_args {
+ int tfirst;
+ int tlast;
+ int nslots;
+ int seconds;
+ int runs;
+};
+
+static void help(char *name, struct test_args *targs)
+{
+ int ctr;
+
+ pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r run_count]\n",
+ name);
+ pr_info(" -h: print this help screen.\n");
+ pr_info(" -v: enable verbose mode (not for benchmarking).\n");
+ pr_info(" -d: enable extra debug checks.\n");
+ pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n",
+ targs->nslots);
+ pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n",
+ targs->tfirst, NTESTS - 1);
+ pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n",
+ targs->tlast, NTESTS - 1);
+ pr_info(" -l: specify the test length in seconds (currently: %i)\n",
+ targs->seconds);
+ pr_info(" -r: specify the number of runs per test (currently: %i)\n",
+ targs->runs);
+
+ pr_info("\nAvailable tests:\n");
+ for (ctr = 0; ctr < NTESTS; ctr++)
+ pr_info("%d: %s\n", ctr, tests[ctr].name);
+}
+
+static bool parse_args(int argc, char *argv[],
+ struct test_args *targs)
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
+ switch (opt) {
+ case 'h':
+ default:
+ help(argv[0], targs);
+ return false;
+ case 'v':
+ verbose = true;
+ break;
+ case 'd':
+ map_unmap_verify = true;
+ break;
+ case 's':
+ targs->nslots = atoi(optarg);
+ if (targs->nslots <= 0 && targs->nslots != -1) {
+ pr_info("Slot count cap has to be positive or -1 for no cap\n");
+ return false;
+ }
+ break;
+ case 'f':
+ targs->tfirst = atoi(optarg);
+ if (targs->tfirst < 0) {
+ pr_info("First test to run has to be non-negative\n");
+ return false;
+ }
+ break;
+ case 'e':
+ targs->tlast = atoi(optarg);
+ if (targs->tlast < 0 || targs->tlast >= NTESTS) {
+ pr_info("Last test to run has to be non-negative and less than %zu\n",
+ NTESTS);
+ return false;
+ }
+ break;
+ case 'l':
+ targs->seconds = atoi(optarg);
+ if (targs->seconds < 0) {
+ pr_info("Test length in seconds has to be non-negative\n");
+ return false;
+ }
+ break;
+ case 'r':
+ targs->runs = atoi(optarg);
+ if (targs->runs <= 0) {
+ pr_info("Runs per test has to be positive\n");
+ return false;
+ }
+ break;
+ }
+ }
+
+ if (optind < argc) {
+ help(argv[0], targs);
+ return false;
+ }
+
+ if (targs->tfirst > targs->tlast) {
+ pr_info("First test to run cannot be greater than the last test to run\n");
+ return false;
+ }
+
+ return true;
+}
+
+struct test_result {
+ struct timespec slot_runtime, guest_runtime, iter_runtime;
+ int64_t slottimens, runtimens;
+ uint64_t nloops;
+};
+
+static bool test_loop(const struct test_data *data,
+ const struct test_args *targs,
+ struct test_result *rbestslottime,
+ struct test_result *rbestruntime)
+{
+ uint64_t maxslots;
+ struct test_result result;
+
+ result.nloops = 0;
+ if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
+ &result.nloops,
+ &result.slot_runtime, &result.guest_runtime)) {
+ if (maxslots)
+ pr_info("Memslot count too high for this test, decrease the cap (max is %"PRIu64")\n",
+ maxslots);
+ else
+ pr_info("Memslot count may be too high for this test, try adjusting the cap\n");
+
+ return false;
+ }
+
+ pr_info("Test took %ld.%.9lds for slot setup + %ld.%.9lds all iterations\n",
+ result.slot_runtime.tv_sec, result.slot_runtime.tv_nsec,
+ result.guest_runtime.tv_sec, result.guest_runtime.tv_nsec);
+ if (!result.nloops) {
+ pr_info("No full loops done - too short test time or system too loaded?\n");
+ return true;
+ }
+
+ result.iter_runtime = timespec_div(result.guest_runtime,
+ result.nloops);
+ pr_info("Done %"PRIu64" iterations, avg %ld.%.9lds each\n",
+ result.nloops,
+ result.iter_runtime.tv_sec,
+ result.iter_runtime.tv_nsec);
+ result.slottimens = timespec_to_ns(result.slot_runtime);
+ result.runtimens = timespec_to_ns(result.iter_runtime);
+
+ /*
+ * Only rank the slot setup time for tests using the whole test memory
+ * area so they are comparable
+ */
+ if (!data->mem_size &&
+ (!rbestslottime->slottimens ||
+ result.slottimens < rbestslottime->slottimens))
+ *rbestslottime = result;
+ if (!rbestruntime->runtimens ||
+ result.runtimens < rbestruntime->runtimens)
+ *rbestruntime = result;
+
+ return true;
+}
+
+int main(int argc, char *argv[])
+{
+ struct test_args targs = {
+ .tfirst = 0,
+ .tlast = NTESTS - 1,
+ .nslots = -1,
+ .seconds = 5,
+ .runs = 1,
+ };
+ struct test_result rbestslottime;
+ int tctr;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ if (!parse_args(argc, argv, &targs))
+ return -1;
+
+ rbestslottime.slottimens = 0;
+ for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
+ const struct test_data *data = &tests[tctr];
+ unsigned int runctr;
+ struct test_result rbestruntime;
+
+ if (tctr > targs.tfirst)
+ pr_info("\n");
+
+ pr_info("Testing %s performance with %i runs, %d seconds each\n",
+ data->name, targs.runs, targs.seconds);
+
+ rbestruntime.runtimens = 0;
+ for (runctr = 0; runctr < targs.runs; runctr++)
+ if (!test_loop(data, &targs,
+ &rbestslottime, &rbestruntime))
+ break;
+
+ if (rbestruntime.runtimens)
+ pr_info("Best runtime result was %ld.%.9lds per iteration (with %"PRIu64" iterations)\n",
+ rbestruntime.iter_runtime.tv_sec,
+ rbestruntime.iter_runtime.tv_nsec,
+ rbestruntime.nloops);
+ }
+
+ if (rbestslottime.slottimens)
+ pr_info("Best slot setup time for the whole test area was %ld.%.9lds\n",
+ rbestslottime.slot_runtime.tv_sec,
+ rbestslottime.slot_runtime.tv_nsec);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 978f5b5f4dc0..85b18bb8f762 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -132,7 +132,7 @@ static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code)
gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
- virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0);
+ virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2);
/* Ditto for the host mapping so that both pages can be zeroed. */
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
@@ -376,7 +376,7 @@ static void test_add_max_memory_regions(void)
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
- mem = mmap(NULL, MEM_REGION_SIZE * max_mem_slots + alignment,
+ mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
@@ -401,7 +401,7 @@ static void test_add_max_memory_regions(void)
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Adding one more memory slot should fail with EINVAL");
- munmap(mem, MEM_REGION_SIZE * max_mem_slots + alignment);
+ munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
munmap(mem_extra, MEM_REGION_SIZE);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index fcc840088c91..b0031f2d38fd 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -73,8 +73,6 @@ static void steal_time_init(struct kvm_vm *vm)
for (i = 0; i < NR_VCPUS; ++i) {
int ret;
- vcpu_set_cpuid(vm, i, kvm_get_supported_cpuid());
-
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vm, st_gva[i]);
@@ -295,7 +293,7 @@ int main(int ac, char **av)
vm = vm_create_default(0, 0, guest_code);
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
- virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages, 0);
+ virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
ucall_init(vm, NULL);
/* Add the rest of the VCPUs */
diff --git a/tools/testing/selftests/kvm/x86_64/emulator_error_test.c b/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
new file mode 100644
index 000000000000..f070ff0224fa
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Google LLC.
+ *
+ * Tests for KVM_CAP_EXIT_ON_EMULATION_FAILURE capability.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "vmx.h"
+
+#define VCPU_ID 1
+#define PAGE_SIZE 4096
+#define MAXPHYADDR 36
+
+#define MEM_REGION_GVA 0x0000123456789000
+#define MEM_REGION_GPA 0x0000000700000000
+#define MEM_REGION_SLOT 10
+#define MEM_REGION_SIZE PAGE_SIZE
+
+static void guest_code(void)
+{
+ __asm__ __volatile__("flds (%[addr])"
+ :: [addr]"r"(MEM_REGION_GVA));
+
+ GUEST_DONE();
+}
+
+static void run_guest(struct kvm_vm *vm)
+{
+ int rc;
+
+ rc = _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+}
+
+/*
+ * Accessors to get R/M, REG, and Mod bits described in the SDM vol 2,
+ * figure 2-2 "Table Interpretation of ModR/M Byte (C8H)".
+ */
+#define GET_RM(insn_byte) (insn_byte & 0x7)
+#define GET_REG(insn_byte) ((insn_byte & 0x38) >> 3)
+#define GET_MOD(insn_byte) ((insn_byte & 0xc) >> 6)
+
+/* Ensure we are dealing with a simple 2-byte flds instruction. */
+static bool is_flds(uint8_t *insn_bytes, uint8_t insn_size)
+{
+ return insn_size >= 2 &&
+ insn_bytes[0] == 0xd9 &&
+ GET_REG(insn_bytes[1]) == 0x0 &&
+ GET_MOD(insn_bytes[1]) == 0x0 &&
+ /* Ensure there is no SIB byte. */
+ GET_RM(insn_bytes[1]) != 0x4 &&
+ /* Ensure there is no displacement byte. */
+ GET_RM(insn_bytes[1]) != 0x5;
+}
+
+static void process_exit_on_emulation_error(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_regs regs;
+ uint8_t *insn_bytes;
+ uint8_t insn_size;
+ uint64_t flags;
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
+ "Unexpected exit reason: %u (%s)",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
+ "Unexpected suberror: %u",
+ run->emulation_failure.suberror);
+
+ if (run->emulation_failure.ndata >= 1) {
+ flags = run->emulation_failure.flags;
+ if ((flags & KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES) &&
+ run->emulation_failure.ndata >= 3) {
+ insn_size = run->emulation_failure.insn_size;
+ insn_bytes = run->emulation_failure.insn_bytes;
+
+ TEST_ASSERT(insn_size <= 15 && insn_size > 0,
+ "Unexpected instruction size: %u",
+ insn_size);
+
+ TEST_ASSERT(is_flds(insn_bytes, insn_size),
+ "Unexpected instruction. Expected 'flds' (0xd9 /0)");
+
+ /*
+ * If is_flds() succeeded then the instruction bytes
+ * contained an flds instruction that is 2-bytes in
+ * length (ie: no prefix, no SIB, no displacement).
+ */
+ vcpu_regs_get(vm, VCPU_ID, &regs);
+ regs.rip += 2;
+ vcpu_regs_set(vm, VCPU_ID, &regs);
+ }
+ }
+}
+
+static void do_guest_assert(struct kvm_vm *vm, struct ucall *uc)
+{
+ TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0], __FILE__,
+ uc->args[1]);
+}
+
+static void check_for_guest_assert(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ if (run->exit_reason == KVM_EXIT_IO &&
+ get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
+ do_guest_assert(vm, &uc);
+ }
+}
+
+static void process_ucall_done(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ check_for_guest_assert(vm);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s)",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
+ "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
+ uc.cmd, UCALL_DONE);
+}
+
+static uint64_t process_ucall(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s)",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
+ break;
+ case UCALL_ABORT:
+ do_guest_assert(vm, &uc);
+ break;
+ case UCALL_DONE:
+ process_ucall_done(vm);
+ break;
+ default:
+ TEST_ASSERT(false, "Unexpected ucall");
+ }
+
+ return uc.cmd;
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_enable_cap emul_failure_cap = {
+ .cap = KVM_CAP_EXIT_ON_EMULATION_FAILURE,
+ .args[0] = 1,
+ };
+ struct kvm_cpuid_entry2 *entry;
+ struct kvm_cpuid2 *cpuid;
+ struct kvm_vm *vm;
+ uint64_t gpa, pte;
+ uint64_t *hva;
+ int rc;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ if (!kvm_check_cap(KVM_CAP_SMALLER_MAXPHYADDR)) {
+ printf("module parameter 'allow_smaller_maxphyaddr' is not set. Skipping test.\n");
+ return 0;
+ }
+
+ cpuid = kvm_get_supported_cpuid();
+
+ entry = kvm_get_supported_cpuid_index(0x80000008, 0);
+ entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR;
+ set_cpuid(cpuid, entry);
+
+ vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+
+ rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
+ TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
+ vm_enable_cap(vm, &emul_failure_cap);
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ MEM_REGION_GPA, MEM_REGION_SLOT,
+ MEM_REGION_SIZE / PAGE_SIZE, 0);
+ gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
+ MEM_REGION_GPA, MEM_REGION_SLOT);
+ TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
+ virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
+ hva = addr_gpa2hva(vm, MEM_REGION_GPA);
+ memset(hva, 0, PAGE_SIZE);
+ pte = vm_get_page_table_entry(vm, VCPU_ID, MEM_REGION_GVA);
+ vm_set_page_table_entry(vm, VCPU_ID, MEM_REGION_GVA, pte | (1ull << 36));
+
+ run_guest(vm);
+ process_exit_on_emulation_error(vm);
+ run_guest(vm);
+
+ TEST_ASSERT(process_ucall(vm) == UCALL_DONE, "Expected UCALL_DONE");
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index 63096cea26c6..2b46dcca86a8 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -22,15 +22,6 @@
static int ud_count;
-void enable_x2apic(void)
-{
- uint32_t spiv_reg = APIC_BASE_MSR + (APIC_SPIV >> 4);
-
- wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) |
- MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD);
- wrmsr(spiv_reg, rdmsr(spiv_reg) | APIC_SPIV_APIC_ENABLED);
-}
-
static void guest_ud_handler(struct ex_regs *regs)
{
ud_count++;
@@ -59,7 +50,7 @@ void guest_code(struct vmx_pages *vmx_pages)
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- enable_x2apic();
+ x2apic_enable();
GUEST_SYNC(1);
GUEST_SYNC(2);
@@ -121,14 +112,38 @@ void inject_nmi(struct kvm_vm *vm)
vcpu_events_set(vm, VCPU_ID, &events);
}
+static void save_restore_vm(struct kvm_vm *vm)
+{
+ struct kvm_regs regs1, regs2;
+ struct kvm_x86_state *state;
+
+ state = vcpu_save_state(vm, VCPU_ID);
+ memset(&regs1, 0, sizeof(regs1));
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+
+ kvm_vm_release(vm);
+
+ /* Restore state in a new VM. */
+ kvm_vm_restart(vm, O_RDWR);
+ vm_vcpu_add(vm, VCPU_ID);
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
+ vcpu_enable_evmcs(vm, VCPU_ID);
+ vcpu_load_state(vm, VCPU_ID, state);
+ free(state);
+
+ memset(&regs2, 0, sizeof(regs2));
+ vcpu_regs_get(vm, VCPU_ID, &regs2);
+ TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
+ "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
+ (ulong) regs2.rdi, (ulong) regs2.rsi);
+}
+
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0;
- struct kvm_regs regs1, regs2;
struct kvm_vm *vm;
struct kvm_run *run;
- struct kvm_x86_state *state;
struct ucall uc;
int stage;
@@ -145,21 +160,18 @@ int main(int argc, char *argv[])
vcpu_set_hv_cpuid(vm, VCPU_ID);
vcpu_enable_evmcs(vm, VCPU_ID);
- run = vcpu_state(vm, VCPU_ID);
-
- vcpu_regs_get(vm, VCPU_ID, &regs1);
-
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
- vm_handle_exception(vm, NMI_VECTOR, guest_nmi_handler);
+ vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+ vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
pr_info("Running L1 which uses EVMCS to run L2\n");
for (stage = 1;; stage++) {
+ run = vcpu_state(vm, VCPU_ID);
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
@@ -184,32 +196,23 @@ int main(int argc, char *argv[])
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
- state = vcpu_save_state(vm, VCPU_ID);
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
-
- kvm_vm_release(vm);
-
- /* Restore state in a new VM. */
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_hv_cpuid(vm, VCPU_ID);
- vcpu_enable_evmcs(vm, VCPU_ID);
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
- free(state);
-
- memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, &regs2);
- TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
- "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
- (ulong) regs2.rdi, (ulong) regs2.rsi);
+ save_restore_vm(vm);
/* Force immediate L2->L1 exit before resuming */
if (stage == 8) {
pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
inject_nmi(vm);
}
+
+ /*
+ * Do KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE for a freshly
+ * restored VM (before the first KVM_RUN) to check that
+ * KVM_STATE_NESTED_EVMCS is not lost.
+ */
+ if (stage == 9) {
+ pr_info("Trying extra KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE cycle\n");
+ save_restore_vm(vm);
+ }
}
done:
diff --git a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
index 9b78e8889638..a711f83749ea 100644
--- a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
@@ -19,7 +19,12 @@ struct {
u32 function;
u32 index;
} mangled_cpuids[] = {
+ /*
+ * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
+ * which are not controlled for by this test.
+ */
{.function = 0xd, .index = 0},
+ {.function = 0xd, .index = 1},
};
static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
@@ -140,8 +145,7 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
{
int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
- vm_vaddr_t gva = vm_vaddr_alloc(vm, size,
- getpagesize(), 0, 0);
+ vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR);
struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva);
memcpy(guest_cpuids, cpuid, size);
diff --git a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
index cb953df4d7d0..8aed0db1331d 100644
--- a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
+++ b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
@@ -37,9 +37,7 @@ static void test_get_msr_index(void)
int old_res, res, kvm_fd, r;
struct kvm_msr_list *list;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
old_res = kvm_num_index_msrs(kvm_fd, 0);
TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
@@ -101,9 +99,7 @@ static void test_get_msr_feature(void)
int res, old_res, i, kvm_fd;
struct kvm_msr_list *feature_list;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
old_res = kvm_num_feature_msrs(kvm_fd, 0);
TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
index 7f1d2765572c..bab10ae787b6 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
@@ -7,6 +7,7 @@
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
+#include "hyperv.h"
struct ms_hyperv_tsc_page {
volatile u32 tsc_sequence;
@@ -15,13 +16,6 @@ struct ms_hyperv_tsc_page {
volatile s64 tsc_offset;
} __packed;
-#define HV_X64_MSR_GUEST_OS_ID 0x40000000
-#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
-#define HV_X64_MSR_REFERENCE_TSC 0x40000021
-#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
-#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
-#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
-
/* Simplified mul_u64_u64_shr() */
static inline u64 mul_u64_u64_shr64(u64 a, u64 b)
{
@@ -220,7 +214,7 @@ int main(void)
vcpu_set_hv_cpuid(vm, VCPU_ID);
- tsc_page_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ tsc_page_gva = vm_vaddr_alloc_page(vm);
memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned\n");
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
new file mode 100644
index 000000000000..42bd658f52a8
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021, Red Hat, Inc.
+ *
+ * Tests for Hyper-V features enablement
+ */
+#include <asm/kvm_para.h>
+#include <linux/kvm_para.h>
+#include <stdint.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "hyperv.h"
+
+#define VCPU_ID 0
+#define LINUX_OS_ID ((u64)0x8100 << 48)
+
+extern unsigned char rdmsr_start;
+extern unsigned char rdmsr_end;
+
+static u64 do_rdmsr(u32 idx)
+{
+ u32 lo, hi;
+
+ asm volatile("rdmsr_start: rdmsr;"
+ "rdmsr_end:"
+ : "=a"(lo), "=c"(hi)
+ : "c"(idx));
+
+ return (((u64) hi) << 32) | lo;
+}
+
+extern unsigned char wrmsr_start;
+extern unsigned char wrmsr_end;
+
+static void do_wrmsr(u32 idx, u64 val)
+{
+ u32 lo, hi;
+
+ lo = val;
+ hi = val >> 32;
+
+ asm volatile("wrmsr_start: wrmsr;"
+ "wrmsr_end:"
+ : : "a"(lo), "c"(idx), "d"(hi));
+}
+
+static int nr_gp;
+
+static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
+ vm_vaddr_t output_address)
+{
+ u64 hv_status;
+
+ asm volatile("mov %3, %%r8\n"
+ "vmcall"
+ : "=a" (hv_status),
+ "+c" (control), "+d" (input_address)
+ : "r" (output_address)
+ : "cc", "memory", "r8", "r9", "r10", "r11");
+
+ return hv_status;
+}
+
+static void guest_gp_handler(struct ex_regs *regs)
+{
+ unsigned char *rip = (unsigned char *)regs->rip;
+ bool r, w;
+
+ r = rip == &rdmsr_start;
+ w = rip == &wrmsr_start;
+ GUEST_ASSERT(r || w);
+
+ nr_gp++;
+
+ if (r)
+ regs->rip = (uint64_t)&rdmsr_end;
+ else
+ regs->rip = (uint64_t)&wrmsr_end;
+}
+
+struct msr_data {
+ uint32_t idx;
+ bool available;
+ bool write;
+ u64 write_val;
+};
+
+struct hcall_data {
+ uint64_t control;
+ uint64_t expect;
+};
+
+static void guest_msr(struct msr_data *msr)
+{
+ int i = 0;
+
+ while (msr->idx) {
+ WRITE_ONCE(nr_gp, 0);
+ if (!msr->write)
+ do_rdmsr(msr->idx);
+ else
+ do_wrmsr(msr->idx, msr->write_val);
+
+ if (msr->available)
+ GUEST_ASSERT(READ_ONCE(nr_gp) == 0);
+ else
+ GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
+
+ GUEST_SYNC(i++);
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
+{
+ int i = 0;
+
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
+ wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
+
+ while (hcall->control) {
+ GUEST_ASSERT(hypercall(hcall->control, pgs_gpa,
+ pgs_gpa + 4096) == hcall->expect);
+ GUEST_SYNC(i++);
+ }
+
+ GUEST_DONE();
+}
+
+static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 *feat,
+ struct kvm_cpuid_entry2 *recomm,
+ struct kvm_cpuid_entry2 *dbg)
+{
+ TEST_ASSERT(set_cpuid(cpuid, feat),
+ "failed to set KVM_CPUID_FEATURES leaf");
+ TEST_ASSERT(set_cpuid(cpuid, recomm),
+ "failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf");
+ TEST_ASSERT(set_cpuid(cpuid, dbg),
+ "failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf");
+ vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+}
+
+static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
+ struct kvm_cpuid2 *best)
+{
+ struct kvm_run *run;
+ struct ucall uc;
+ int stage = 0, r;
+ struct kvm_cpuid_entry2 feat = {
+ .function = HYPERV_CPUID_FEATURES
+ };
+ struct kvm_cpuid_entry2 recomm = {
+ .function = HYPERV_CPUID_ENLIGHTMENT_INFO
+ };
+ struct kvm_cpuid_entry2 dbg = {
+ .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
+ };
+ struct kvm_enable_cap cap = {0};
+
+ run = vcpu_state(vm, VCPU_ID);
+
+ while (true) {
+ switch (stage) {
+ case 0:
+ /*
+ * Only available when Hyper-V identification is set
+ */
+ msr->idx = HV_X64_MSR_GUEST_OS_ID;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 1:
+ msr->idx = HV_X64_MSR_HYPERCALL;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 2:
+ feat.eax |= HV_MSR_HYPERCALL_AVAILABLE;
+ /*
+ * HV_X64_MSR_GUEST_OS_ID has to be written first to make
+ * HV_X64_MSR_HYPERCALL available.
+ */
+ msr->idx = HV_X64_MSR_GUEST_OS_ID;
+ msr->write = 1;
+ msr->write_val = LINUX_OS_ID;
+ msr->available = 1;
+ break;
+ case 3:
+ msr->idx = HV_X64_MSR_GUEST_OS_ID;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 4:
+ msr->idx = HV_X64_MSR_HYPERCALL;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+
+ case 5:
+ msr->idx = HV_X64_MSR_VP_RUNTIME;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 6:
+ feat.eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 7:
+ /* Read only */
+ msr->write = 1;
+ msr->write_val = 1;
+ msr->available = 0;
+ break;
+
+ case 8:
+ msr->idx = HV_X64_MSR_TIME_REF_COUNT;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 9:
+ feat.eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 10:
+ /* Read only */
+ msr->write = 1;
+ msr->write_val = 1;
+ msr->available = 0;
+ break;
+
+ case 11:
+ msr->idx = HV_X64_MSR_VP_INDEX;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 12:
+ feat.eax |= HV_MSR_VP_INDEX_AVAILABLE;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 13:
+ /* Read only */
+ msr->write = 1;
+ msr->write_val = 1;
+ msr->available = 0;
+ break;
+
+ case 14:
+ msr->idx = HV_X64_MSR_RESET;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 15:
+ feat.eax |= HV_MSR_RESET_AVAILABLE;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 16:
+ msr->write = 1;
+ msr->write_val = 0;
+ msr->available = 1;
+ break;
+
+ case 17:
+ msr->idx = HV_X64_MSR_REFERENCE_TSC;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 18:
+ feat.eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 19:
+ msr->write = 1;
+ msr->write_val = 0;
+ msr->available = 1;
+ break;
+
+ case 20:
+ msr->idx = HV_X64_MSR_EOM;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 21:
+ /*
+ * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
+ * capability enabled and guest visible CPUID bit unset.
+ */
+ cap.cap = KVM_CAP_HYPERV_SYNIC2;
+ vcpu_enable_cap(vm, VCPU_ID, &cap);
+ break;
+ case 22:
+ feat.eax |= HV_MSR_SYNIC_AVAILABLE;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 23:
+ msr->write = 1;
+ msr->write_val = 0;
+ msr->available = 1;
+ break;
+
+ case 24:
+ msr->idx = HV_X64_MSR_STIMER0_CONFIG;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 25:
+ feat.eax |= HV_MSR_SYNTIMER_AVAILABLE;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 26:
+ msr->write = 1;
+ msr->write_val = 0;
+ msr->available = 1;
+ break;
+ case 27:
+ /* Direct mode test */
+ msr->write = 1;
+ msr->write_val = 1 << 12;
+ msr->available = 0;
+ break;
+ case 28:
+ feat.edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
+ msr->available = 1;
+ break;
+
+ case 29:
+ msr->idx = HV_X64_MSR_EOI;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 30:
+ feat.eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
+ msr->write = 1;
+ msr->write_val = 1;
+ msr->available = 1;
+ break;
+
+ case 31:
+ msr->idx = HV_X64_MSR_TSC_FREQUENCY;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 32:
+ feat.eax |= HV_ACCESS_FREQUENCY_MSRS;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 33:
+ /* Read only */
+ msr->write = 1;
+ msr->write_val = 1;
+ msr->available = 0;
+ break;
+
+ case 34:
+ msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 35:
+ feat.eax |= HV_ACCESS_REENLIGHTENMENT;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 36:
+ msr->write = 1;
+ msr->write_val = 1;
+ msr->available = 1;
+ break;
+ case 37:
+ /* Can only write '0' */
+ msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
+ msr->write = 1;
+ msr->write_val = 1;
+ msr->available = 0;
+ break;
+
+ case 38:
+ msr->idx = HV_X64_MSR_CRASH_P0;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 39:
+ feat.edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 40:
+ msr->write = 1;
+ msr->write_val = 1;
+ msr->available = 1;
+ break;
+
+ case 41:
+ msr->idx = HV_X64_MSR_SYNDBG_STATUS;
+ msr->write = 0;
+ msr->available = 0;
+ break;
+ case 42:
+ feat.edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
+ dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ msr->write = 0;
+ msr->available = 1;
+ break;
+ case 43:
+ msr->write = 1;
+ msr->write_val = 0;
+ msr->available = 1;
+ break;
+
+ case 44:
+ /* END */
+ msr->idx = 0;
+ break;
+ }
+
+ hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
+
+ if (msr->idx)
+ pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
+ msr->idx, msr->write ? "write" : "read");
+ else
+ pr_debug("Stage %d: finish\n", stage);
+
+ r = _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "unexpected exit reason: %u (%s)",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
+ TEST_ASSERT(uc.args[1] == stage,
+ "Unexpected stage: %ld (%d expected)\n",
+ uc.args[1], stage);
+ break;
+ case UCALL_ABORT:
+ TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
+ return;
+ case UCALL_DONE:
+ return;
+ }
+
+ stage++;
+ }
+}
+
+static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall,
+ void *input, void *output, struct kvm_cpuid2 *best)
+{
+ struct kvm_run *run;
+ struct ucall uc;
+ int stage = 0, r;
+ struct kvm_cpuid_entry2 feat = {
+ .function = HYPERV_CPUID_FEATURES,
+ .eax = HV_MSR_HYPERCALL_AVAILABLE
+ };
+ struct kvm_cpuid_entry2 recomm = {
+ .function = HYPERV_CPUID_ENLIGHTMENT_INFO
+ };
+ struct kvm_cpuid_entry2 dbg = {
+ .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
+ };
+
+ run = vcpu_state(vm, VCPU_ID);
+
+ while (true) {
+ switch (stage) {
+ case 0:
+ hcall->control = 0xdeadbeef;
+ hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
+ break;
+
+ case 1:
+ hcall->control = HVCALL_POST_MESSAGE;
+ hcall->expect = HV_STATUS_ACCESS_DENIED;
+ break;
+ case 2:
+ feat.ebx |= HV_POST_MESSAGES;
+ hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ break;
+
+ case 3:
+ hcall->control = HVCALL_SIGNAL_EVENT;
+ hcall->expect = HV_STATUS_ACCESS_DENIED;
+ break;
+ case 4:
+ feat.ebx |= HV_SIGNAL_EVENTS;
+ hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ break;
+
+ case 5:
+ hcall->control = HVCALL_RESET_DEBUG_SESSION;
+ hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
+ break;
+ case 6:
+ dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ hcall->expect = HV_STATUS_ACCESS_DENIED;
+ break;
+ case 7:
+ feat.ebx |= HV_DEBUGGING;
+ hcall->expect = HV_STATUS_OPERATION_DENIED;
+ break;
+
+ case 8:
+ hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
+ hcall->expect = HV_STATUS_ACCESS_DENIED;
+ break;
+ case 9:
+ recomm.eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ hcall->expect = HV_STATUS_SUCCESS;
+ break;
+ case 10:
+ hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
+ hcall->expect = HV_STATUS_ACCESS_DENIED;
+ break;
+ case 11:
+ recomm.eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
+ hcall->expect = HV_STATUS_SUCCESS;
+ break;
+
+ case 12:
+ hcall->control = HVCALL_SEND_IPI;
+ hcall->expect = HV_STATUS_ACCESS_DENIED;
+ break;
+ case 13:
+ recomm.eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
+ hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ break;
+ case 14:
+ /* Nothing in 'sparse banks' -> success */
+ hcall->control = HVCALL_SEND_IPI_EX;
+ hcall->expect = HV_STATUS_SUCCESS;
+ break;
+
+ case 15:
+ hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
+ hcall->expect = HV_STATUS_ACCESS_DENIED;
+ break;
+ case 16:
+ recomm.ebx = 0xfff;
+ hcall->expect = HV_STATUS_SUCCESS;
+ break;
+
+ case 17:
+ /* END */
+ hcall->control = 0;
+ break;
+ }
+
+ hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
+
+ if (hcall->control)
+ pr_debug("Stage %d: testing hcall: 0x%lx\n", stage,
+ hcall->control);
+ else
+ pr_debug("Stage %d: finish\n", stage);
+
+ r = _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "unexpected exit reason: %u (%s)",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
+ TEST_ASSERT(uc.args[1] == stage,
+ "Unexpected stage: %ld (%d expected)\n",
+ uc.args[1], stage);
+ break;
+ case UCALL_ABORT:
+ TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
+ return;
+ case UCALL_DONE:
+ return;
+ }
+
+ stage++;
+ }
+}
+
+int main(void)
+{
+ struct kvm_cpuid2 *best;
+ struct kvm_vm *vm;
+ vm_vaddr_t msr_gva, hcall_page, hcall_params;
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+ .args = {1}
+ };
+
+ /* Test MSRs */
+ vm = vm_create_default(VCPU_ID, 0, guest_msr);
+
+ msr_gva = vm_vaddr_alloc_page(vm);
+ memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
+ vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
+ vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+ best = kvm_get_supported_hv_cpuid();
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
+
+ pr_info("Testing access to Hyper-V specific MSRs\n");
+ guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
+ best);
+ kvm_vm_free(vm);
+
+ /* Test hypercalls */
+ vm = vm_create_default(VCPU_ID, 0, guest_hcall);
+
+ /* Hypercall input/output */
+ hcall_page = vm_vaddr_alloc_pages(vm, 2);
+ memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
+
+ hcall_params = vm_vaddr_alloc_page(vm);
+ memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
+
+ vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
+ vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+ best = kvm_get_supported_hv_cpuid();
+
+ pr_info("Testing access to Hyper-V hypercalls\n");
+ guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params),
+ addr_gva2hva(vm, hcall_page),
+ addr_gva2hva(vm, hcall_page) + getpagesize(),
+ best);
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
index 732b244d6956..04ed975662c9 100644
--- a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
+++ b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
@@ -227,7 +227,7 @@ int main(void)
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
+ vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
enter_guest(vm);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
new file mode 100644
index 000000000000..523371cf8e8f
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID 1
+
+#define MMIO_GPA 0x100000000ull
+
+static void guest_code(void)
+{
+ (void)READ_ONCE(*((uint64_t *)MMIO_GPA));
+ (void)READ_ONCE(*((uint64_t *)MMIO_GPA));
+
+ GUEST_ASSERT(0);
+}
+
+static void guest_pf_handler(struct ex_regs *regs)
+{
+ /* PFEC == RSVD | PRESENT (read, kernel). */
+ GUEST_ASSERT(regs->error_code == 0x9);
+ GUEST_DONE();
+}
+
+static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
+{
+ u32 good_cpuid_val = *cpuid_reg;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ uint64_t cmd;
+ int r;
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ run = vcpu_state(vm, VCPU_ID);
+
+ /* Map 1gb page without a backing memlot. */
+ __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G);
+
+ r = _vcpu_run(vm, VCPU_ID);
+
+ /* Guest access to the 1gb page should trigger MMIO. */
+ TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO,
+ "Unexpected exit reason: %u (%s), expected MMIO exit (1gb page w/o memslot)\n",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ TEST_ASSERT(run->mmio.len == 8, "Unexpected exit mmio size = %u", run->mmio.len);
+
+ TEST_ASSERT(run->mmio.phys_addr == MMIO_GPA,
+ "Unexpected exit mmio address = 0x%llx", run->mmio.phys_addr);
+
+ /*
+ * Effect the CPUID change for the guest and re-enter the guest. Its
+ * access should now #PF due to the PAGE_SIZE bit being reserved or
+ * the resulting GPA being invalid. Note, kvm_get_supported_cpuid()
+ * returns the struct that contains the entry being modified. Eww.
+ */
+ *cpuid_reg = evil_cpuid_val;
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ /*
+ * Add a dummy memslot to coerce KVM into bumping the MMIO generation.
+ * KVM does not "officially" support mucking with CPUID after KVM_RUN,
+ * and will incorrectly reuse MMIO SPTEs. Don't delete the memslot!
+ * KVM x86 zaps all shadow pages on memslot deletion.
+ */
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ MMIO_GPA << 1, 10, 1, 0);
+
+ /* Set up a #PF handler to eat the RSVD #PF and signal all done! */
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vm_handle_exception(vm, PF_VECTOR, guest_pf_handler);
+
+ r = _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
+
+ cmd = get_ucall(vm, VCPU_ID, NULL);
+ TEST_ASSERT(cmd == UCALL_DONE,
+ "Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n",
+ exit_reason_str(run->exit_reason), cmd);
+
+ /*
+ * Restore the happy CPUID value for the next test. Yes, changes are
+ * indeed persistent across VM destruction.
+ */
+ *cpuid_reg = good_cpuid_val;
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_cpuid_entry2 *entry;
+ int opt;
+
+ /*
+ * All tests are opt-in because TDP doesn't play nice with reserved #PF
+ * in the GVA->GPA translation. The hardware page walker doesn't let
+ * software change GBPAGES or MAXPHYADDR, and KVM doesn't manually walk
+ * the GVA on fault for performance reasons.
+ */
+ bool do_gbpages = false;
+ bool do_maxphyaddr = false;
+
+ setbuf(stdout, NULL);
+
+ while ((opt = getopt(argc, argv, "gm")) != -1) {
+ switch (opt) {
+ case 'g':
+ do_gbpages = true;
+ break;
+ case 'm':
+ do_maxphyaddr = true;
+ break;
+ case 'h':
+ default:
+ printf("usage: %s [-g (GBPAGES)] [-m (MAXPHYADDR)]\n", argv[0]);
+ break;
+ }
+ }
+
+ if (!do_gbpages && !do_maxphyaddr) {
+ print_skip("No sub-tests selected");
+ return 0;
+ }
+
+ entry = kvm_get_supported_cpuid_entry(0x80000001);
+ if (!(entry->edx & CPUID_GBPAGES)) {
+ print_skip("1gb hugepages not supported");
+ return 0;
+ }
+
+ if (do_gbpages) {
+ pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
+ mmu_role_test(&entry->edx, entry->edx & ~CPUID_GBPAGES);
+ }
+
+ if (do_maxphyaddr) {
+ pr_info("Test MMIO after changing CPUID.MAXPHYADDR\n\n");
+ entry = kvm_get_supported_cpuid_entry(0x80000008);
+ mmu_role_test(&entry->eax, (entry->eax & ~0xff) | 0x20);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
index 12c558fc8074..ae76436af0cc 100644
--- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
+++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
@@ -14,16 +14,12 @@
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
+#include "apic.h"
#define N_VCPU 2
#define VCPU_ID0 0
#define VCPU_ID1 1
-static uint32_t get_bsp_flag(void)
-{
- return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP;
-}
-
static void guest_bsp_vcpu(void *arg)
{
GUEST_SYNC(1);
@@ -94,7 +90,7 @@ static struct kvm_vm *create_vm(void)
pages = vm_adjust_num_guest_pages(VM_MODE_DEFAULT, pages);
vm = vm_create(VM_MODE_DEFAULT, pages, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+ kvm_vm_elf_load(vm, program_invocation_name);
vm_create_irqchip(vm);
return vm;
@@ -106,8 +102,6 @@ static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)
vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu);
else
vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu);
-
- vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
}
static void run_vm_bsp(uint32_t bsp_vcpu)
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index 613c42c5a9b8..c1f831803ad2 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -55,8 +55,8 @@ static inline void sync_with_host(uint64_t phase)
void self_smi(void)
{
- wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
- APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
+ x2apic_write_reg(APIC_ICR,
+ APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
}
void guest_code(void *arg)
diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index d672f0a473f8..fc03a150278d 100644
--- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -24,6 +24,10 @@
#define UCALL_PIO_PORT ((uint16_t)0x1000)
+struct ucall uc_none = {
+ .cmd = UCALL_NONE,
+};
+
/*
* ucall is embedded here to protect against compiler reshuffling registers
* before calling a function. In this test we only need to get KVM_EXIT_IO
@@ -34,7 +38,8 @@ void guest_code(void)
asm volatile("1: in %[port], %%al\n"
"add $0x1, %%rbx\n"
"jmp 1b"
- : : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
+ : : [port] "d" (UCALL_PIO_PORT), "D" (&uc_none)
+ : "rax", "rbx");
}
static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
index e357d8e222d4..5a6a662f2e59 100644
--- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
@@ -18,15 +18,6 @@
#define rounded_rdmsr(x) ROUND(rdmsr(x))
#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vm, 0, x))
-#define GUEST_ASSERT_EQ(a, b) do { \
- __typeof(a) _a = (a); \
- __typeof(b) _b = (b); \
- if (_a != _b) \
- ucall(UCALL_ABORT, 4, \
- "Failed guest assert: " \
- #a " == " #b, __LINE__, _a, _b); \
- } while(0)
-
static void guest_code(void)
{
u64 val = 0;
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
index 72c0d0797522..e3e20e8848d0 100644
--- a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
+++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
@@ -574,7 +574,7 @@ static void test_msr_filter_allow(void) {
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
+ vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
/* Process guest code userspace exits. */
run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
@@ -588,12 +588,12 @@ static void test_msr_filter_allow(void) {
run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
- vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
+ vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
run_guest(vm);
- vm_handle_exception(vm, UD_VECTOR, NULL);
+ vm_install_exception_handler(vm, UD_VECTOR, NULL);
if (process_ucall(vm) != UCALL_DONE) {
- vm_handle_exception(vm, GP_VECTOR, guest_fep_gp_handler);
+ vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler);
/* Process emulated rdmsr and wrmsr instructions. */
run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
index d14888b34adb..d438c4d3228a 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
@@ -96,7 +96,7 @@ int main(int argc, char *argv[])
}
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
- prepare_virtualize_apic_accesses(vmx, vm, 0);
+ prepare_virtualize_apic_accesses(vmx, vm);
vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa);
while (!done) {
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index 537de1068554..06a64980a5d2 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -97,7 +97,7 @@ int main(int argc, char *argv[])
* Add an identity map for GVA range [0xc0000000, 0xc0002000). This
* affects both L1 and L2. However...
*/
- virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES, 0);
+ virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
/*
* ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
@@ -107,9 +107,9 @@ int main(int argc, char *argv[])
* meaning after the last call to virt_map.
*/
prepare_eptp(vmx, vm, 0);
- nested_map_memslot(vmx, vm, 0, 0);
- nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
- nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
+ nested_map_memslot(vmx, vm, 0);
+ nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
+ nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
bmap = bitmap_alloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
new file mode 100644
index 000000000000..280c01fd2412
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vmx_nested_tsc_scaling_test
+ *
+ * Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This test case verifies that nested TSC scaling behaves as expected when
+ * both L1 and L2 are scaled using different ratios. For this test we scale
+ * L1 down and scale L2 up.
+ */
+
+#include <time.h>
+
+#include "kvm_util.h"
+#include "vmx.h"
+#include "kselftest.h"
+
+
+#define VCPU_ID 0
+
+/* L2 is scaled up (from L1's perspective) by this factor */
+#define L2_SCALE_FACTOR 4ULL
+
+#define TSC_OFFSET_L2 ((uint64_t) -33125236320908)
+#define TSC_MULTIPLIER_L2 (L2_SCALE_FACTOR << 48)
+
+#define L2_GUEST_STACK_SIZE 64
+
+enum { USLEEP, UCHECK_L1, UCHECK_L2 };
+#define GUEST_SLEEP(sec) ucall(UCALL_SYNC, 2, USLEEP, sec)
+#define GUEST_CHECK(level, freq) ucall(UCALL_SYNC, 2, level, freq)
+
+
+/*
+ * This function checks whether the "actual" TSC frequency of a guest matches
+ * its expected frequency. In order to account for delays in taking the TSC
+ * measurements, a difference of 1% between the actual and the expected value
+ * is tolerated.
+ */
+static void compare_tsc_freq(uint64_t actual, uint64_t expected)
+{
+ uint64_t tolerance, thresh_low, thresh_high;
+
+ tolerance = expected / 100;
+ thresh_low = expected - tolerance;
+ thresh_high = expected + tolerance;
+
+ TEST_ASSERT(thresh_low < actual,
+ "TSC freq is expected to be between %"PRIu64" and %"PRIu64
+ " but it actually is %"PRIu64,
+ thresh_low, thresh_high, actual);
+ TEST_ASSERT(thresh_high > actual,
+ "TSC freq is expected to be between %"PRIu64" and %"PRIu64
+ " but it actually is %"PRIu64,
+ thresh_low, thresh_high, actual);
+}
+
+static void check_tsc_freq(int level)
+{
+ uint64_t tsc_start, tsc_end, tsc_freq;
+
+ /*
+ * Reading the TSC twice with about a second's difference should give
+ * us an approximation of the TSC frequency from the guest's
+ * perspective. Now, this won't be completely accurate, but it should
+ * be good enough for the purposes of this test.
+ */
+ tsc_start = rdmsr(MSR_IA32_TSC);
+ GUEST_SLEEP(1);
+ tsc_end = rdmsr(MSR_IA32_TSC);
+
+ tsc_freq = tsc_end - tsc_start;
+
+ GUEST_CHECK(level, tsc_freq);
+}
+
+static void l2_guest_code(void)
+{
+ check_tsc_freq(UCHECK_L2);
+
+ /* exit to L1 */
+ __asm__ __volatile__("vmcall");
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ uint32_t control;
+
+ /* check that L1's frequency looks alright before launching L2 */
+ check_tsc_freq(UCHECK_L1);
+
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+
+ /* prepare the VMCS for L2 execution */
+ prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /* enable TSC offsetting and TSC scaling for L2 */
+ control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
+ control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
+ vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
+
+ control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
+ control |= SECONDARY_EXEC_TSC_SCALING;
+ vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
+
+ vmwrite(TSC_OFFSET, TSC_OFFSET_L2);
+ vmwrite(TSC_MULTIPLIER, TSC_MULTIPLIER_L2);
+ vmwrite(TSC_MULTIPLIER_HIGH, TSC_MULTIPLIER_L2 >> 32);
+
+ /* launch L2 */
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+
+ /* check that L1's frequency still looks good */
+ check_tsc_freq(UCHECK_L1);
+
+ GUEST_DONE();
+}
+
+static void tsc_scaling_check_supported(void)
+{
+ if (!kvm_check_cap(KVM_CAP_TSC_CONTROL)) {
+ print_skip("TSC scaling not supported by the HW");
+ exit(KSFT_SKIP);
+ }
+}
+
+static void stable_tsc_check_supported(void)
+{
+ FILE *fp;
+ char buf[4];
+
+ fp = fopen("/sys/devices/system/clocksource/clocksource0/current_clocksource", "r");
+ if (fp == NULL)
+ goto skip_test;
+
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ goto skip_test;
+
+ if (strncmp(buf, "tsc", sizeof(buf)))
+ goto skip_test;
+
+ return;
+skip_test:
+ print_skip("Kernel does not use TSC clocksource - assuming that host TSC is not stable");
+ exit(KSFT_SKIP);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ vm_vaddr_t vmx_pages_gva;
+
+ uint64_t tsc_start, tsc_end;
+ uint64_t tsc_khz;
+ uint64_t l1_scale_factor;
+ uint64_t l0_tsc_freq = 0;
+ uint64_t l1_tsc_freq = 0;
+ uint64_t l2_tsc_freq = 0;
+
+ nested_vmx_check_supported();
+ tsc_scaling_check_supported();
+ stable_tsc_check_supported();
+
+ /*
+ * We set L1's scale factor to be a random number from 2 to 10.
+ * Ideally we would do the same for L2's factor but that one is
+ * referenced by both main() and l1_guest_code() and using a global
+ * variable does not work.
+ */
+ srand(time(NULL));
+ l1_scale_factor = (rand() % 9) + 2;
+ printf("L1's scale down factor is: %"PRIu64"\n", l1_scale_factor);
+ printf("L2's scale up factor is: %llu\n", L2_SCALE_FACTOR);
+
+ tsc_start = rdtsc();
+ sleep(1);
+ tsc_end = rdtsc();
+
+ l0_tsc_freq = tsc_end - tsc_start;
+ printf("real TSC frequency is around: %"PRIu64"\n", l0_tsc_freq);
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+ tsc_khz = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_TSC_KHZ, NULL);
+ TEST_ASSERT(tsc_khz != -1, "vcpu ioctl KVM_GET_TSC_KHZ failed");
+
+ /* scale down L1's TSC frequency */
+ vcpu_ioctl(vm, VCPU_ID, KVM_SET_TSC_KHZ,
+ (void *) (tsc_khz / l1_scale_factor));
+
+ for (;;) {
+ volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_FAIL("%s", (const char *) uc.args[0]);
+ case UCALL_SYNC:
+ switch (uc.args[0]) {
+ case USLEEP:
+ sleep(uc.args[1]);
+ break;
+ case UCHECK_L1:
+ l1_tsc_freq = uc.args[1];
+ printf("L1's TSC frequency is around: %"PRIu64
+ "\n", l1_tsc_freq);
+
+ compare_tsc_freq(l1_tsc_freq,
+ l0_tsc_freq / l1_scale_factor);
+ break;
+ case UCHECK_L2:
+ l2_tsc_freq = uc.args[1];
+ printf("L2's TSC frequency is around: %"PRIu64
+ "\n", l2_tsc_freq);
+
+ compare_tsc_freq(l2_tsc_freq,
+ l1_tsc_freq * L2_SCALE_FACTOR);
+ break;
+ }
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+done:
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
index 2f964cdc273c..afbbc40df884 100644
--- a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
@@ -42,8 +42,6 @@
#define HALTER_VCPU_ID 0
#define SENDER_VCPU_ID 1
-volatile uint32_t *apic_base = (volatile uint32_t *)APIC_DEFAULT_GPA;
-
/*
* Vector for IPI from sender vCPU to halting vCPU.
* Value is arbitrary and was chosen for the alternating bit pattern. Any
@@ -86,45 +84,6 @@ struct thread_params {
uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */
};
-uint32_t read_apic_reg(uint reg)
-{
- return apic_base[reg >> 2];
-}
-
-void write_apic_reg(uint reg, uint32_t val)
-{
- apic_base[reg >> 2] = val;
-}
-
-void disable_apic(void)
-{
- wrmsr(MSR_IA32_APICBASE,
- rdmsr(MSR_IA32_APICBASE) &
- ~(MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD));
-}
-
-void enable_xapic(void)
-{
- uint64_t val = rdmsr(MSR_IA32_APICBASE);
-
- /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */
- if (val & MSR_IA32_APICBASE_EXTD) {
- disable_apic();
- wrmsr(MSR_IA32_APICBASE,
- rdmsr(MSR_IA32_APICBASE) | MSR_IA32_APICBASE_ENABLE);
- } else if (!(val & MSR_IA32_APICBASE_ENABLE)) {
- wrmsr(MSR_IA32_APICBASE, val | MSR_IA32_APICBASE_ENABLE);
- }
-
- /*
- * Per SDM: reset value of spurious interrupt vector register has the
- * APIC software enabled bit=0. It must be enabled in addition to the
- * enable bit in the MSR.
- */
- val = read_apic_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED;
- write_apic_reg(APIC_SPIV, val);
-}
-
void verify_apic_base_addr(void)
{
uint64_t msr = rdmsr(MSR_IA32_APICBASE);
@@ -136,10 +95,10 @@ void verify_apic_base_addr(void)
static void halter_guest_code(struct test_data_page *data)
{
verify_apic_base_addr();
- enable_xapic();
+ xapic_enable();
- data->halter_apic_id = GET_APIC_ID_FIELD(read_apic_reg(APIC_ID));
- data->halter_lvr = read_apic_reg(APIC_LVR);
+ data->halter_apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
+ data->halter_lvr = xapic_read_reg(APIC_LVR);
/*
* Loop forever HLTing and recording halts & wakes. Disable interrupts
@@ -150,8 +109,8 @@ static void halter_guest_code(struct test_data_page *data)
* TPR and PPR for diagnostic purposes in case the test fails.
*/
for (;;) {
- data->halter_tpr = read_apic_reg(APIC_TASKPRI);
- data->halter_ppr = read_apic_reg(APIC_PROCPRI);
+ data->halter_tpr = xapic_read_reg(APIC_TASKPRI);
+ data->halter_ppr = xapic_read_reg(APIC_PROCPRI);
data->hlt_count++;
asm volatile("sti; hlt; cli");
data->wake_count++;
@@ -166,7 +125,7 @@ static void halter_guest_code(struct test_data_page *data)
static void guest_ipi_handler(struct ex_regs *regs)
{
ipis_rcvd++;
- write_apic_reg(APIC_EOI, 77);
+ xapic_write_reg(APIC_EOI, 77);
}
static void sender_guest_code(struct test_data_page *data)
@@ -179,7 +138,7 @@ static void sender_guest_code(struct test_data_page *data)
uint64_t tsc_start;
verify_apic_base_addr();
- enable_xapic();
+ xapic_enable();
/*
* Init interrupt command register for sending IPIs
@@ -206,8 +165,8 @@ static void sender_guest_code(struct test_data_page *data)
* First IPI can be sent unconditionally because halter vCPU
* starts earlier.
*/
- write_apic_reg(APIC_ICR2, icr2_val);
- write_apic_reg(APIC_ICR, icr_val);
+ xapic_write_reg(APIC_ICR2, icr2_val);
+ xapic_write_reg(APIC_ICR, icr_val);
data->ipis_sent++;
/*
@@ -462,13 +421,13 @@ int main(int argc, char *argv[])
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, HALTER_VCPU_ID);
- vm_handle_exception(vm, IPI_VECTOR, guest_ipi_handler);
+ vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
- virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA, 0);
+ virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code);
- test_data_page_vaddr = vm_vaddr_alloc(vm, 0x1000, 0x1000, 0, 0);
+ test_data_page_vaddr = vm_vaddr_alloc_page(vm);
data =
(struct test_data_page *)addr_gva2hva(vm, test_data_page_vaddr);
memset(data, 0, sizeof(*data));
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 1f4a0599683c..117bf49a3d79 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -146,7 +146,7 @@ int main(int argc, char *argv[])
/* Map a region for the shared_info page */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0);
- virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 2, 0);
+ virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 2);
struct kvm_xen_hvm_config hvmc = {
.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
index 8389e0bfd711..adc94452b57c 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -103,7 +103,7 @@ int main(int argc, char *argv[])
/* Map a region for the hypercall pages */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0);
- virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2, 0);
+ virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2);
for (;;) {
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
diff --git a/tools/testing/selftests/lib/Makefile b/tools/testing/selftests/lib/Makefile
index a105f094676e..ee71fc99d5b5 100644
--- a/tools/testing/selftests/lib/Makefile
+++ b/tools/testing/selftests/lib/Makefile
@@ -4,6 +4,6 @@
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
all:
-TEST_PROGS := printf.sh bitmap.sh prime_numbers.sh strscpy.sh
+TEST_PROGS := printf.sh bitmap.sh prime_numbers.sh scanf.sh strscpy.sh
include ../lib.mk
diff --git a/tools/testing/selftests/lib/config b/tools/testing/selftests/lib/config
index b80ee3f6e265..645839b50b0a 100644
--- a/tools/testing/selftests/lib/config
+++ b/tools/testing/selftests/lib/config
@@ -1,4 +1,5 @@
CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_PRIME_NUMBERS=m
CONFIG_TEST_STRSCPY=m
diff --git a/tools/testing/selftests/lib/scanf.sh b/tools/testing/selftests/lib/scanf.sh
new file mode 100755
index 000000000000..b59b8ba561c3
--- /dev/null
+++ b/tools/testing/selftests/lib/scanf.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# Tests the scanf infrastructure using test_scanf kernel module.
+$(dirname $0)/../kselftest/module.sh "scanf" test_scanf
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
index 4e94e566e040..f31205f04ee0 100644
--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -136,6 +136,10 @@ struct mount_attr {
#define MOUNT_ATTR_IDMAP 0x00100000
#endif
+#ifndef MOUNT_ATTR_NOSYMFOLLOW
+#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000
+#endif
+
static inline int sys_mount_setattr(int dfd, const char *path, unsigned int flags,
struct mount_attr *attr, size_t size)
{
@@ -235,6 +239,10 @@ static int prepare_unpriv_mountns(void)
return 0;
}
+#ifndef ST_NOSYMFOLLOW
+#define ST_NOSYMFOLLOW 0x2000 /* do not follow symlinks */
+#endif
+
static int read_mnt_flags(const char *path)
{
int ret;
@@ -245,9 +253,9 @@ static int read_mnt_flags(const char *path)
if (ret != 0)
return -EINVAL;
- if (stat.f_flag &
- ~(ST_RDONLY | ST_NOSUID | ST_NODEV | ST_NOEXEC | ST_NOATIME |
- ST_NODIRATIME | ST_RELATIME | ST_SYNCHRONOUS | ST_MANDLOCK))
+ if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | ST_NOEXEC |
+ ST_NOATIME | ST_NODIRATIME | ST_RELATIME |
+ ST_SYNCHRONOUS | ST_MANDLOCK | ST_NOSYMFOLLOW))
return -EINVAL;
mnt_flags = 0;
@@ -269,6 +277,8 @@ static int read_mnt_flags(const char *path)
mnt_flags |= MS_SYNCHRONOUS;
if (stat.f_flag & ST_MANDLOCK)
mnt_flags |= ST_MANDLOCK;
+ if (stat.f_flag & ST_NOSYMFOLLOW)
+ mnt_flags |= ST_NOSYMFOLLOW;
return mnt_flags;
}
@@ -368,8 +378,13 @@ static bool mount_setattr_supported(void)
FIXTURE(mount_setattr) {
};
+#define NOSYMFOLLOW_TARGET "/mnt/A/AA/data"
+#define NOSYMFOLLOW_SYMLINK "/mnt/A/AA/symlink"
+
FIXTURE_SETUP(mount_setattr)
{
+ int fd = -EBADF;
+
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
@@ -412,6 +427,11 @@ FIXTURE_SETUP(mount_setattr)
ASSERT_EQ(mount("testing", "/tmp/B/BB", "devpts",
MS_RELATIME | MS_NOEXEC | MS_RDONLY, 0), 0);
+
+ fd = creat(NOSYMFOLLOW_TARGET, O_RDWR | O_CLOEXEC);
+ ASSERT_GT(fd, 0);
+ ASSERT_EQ(symlink(NOSYMFOLLOW_TARGET, NOSYMFOLLOW_SYMLINK), 0);
+ ASSERT_EQ(close(fd), 0);
}
FIXTURE_TEARDOWN(mount_setattr)
@@ -1421,4 +1441,66 @@ TEST_F(mount_setattr_idmapped, idmap_mount_tree_invalid)
ASSERT_EQ(expected_uid_gid(open_tree_fd, "B/BB/b", 0, 0, 0), 0);
}
+TEST_F(mount_setattr, mount_attr_nosymfollow)
+{
+ int fd;
+ unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_NOSYMFOLLOW,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ fd = open(NOSYMFOLLOW_SYMLINK, O_RDWR | O_CLOEXEC);
+ ASSERT_GT(fd, 0);
+ ASSERT_EQ(close(fd), 0);
+
+ old_flags = read_mnt_flags("/mnt/A");
+ ASSERT_GT(old_flags, 0);
+
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags = old_flags;
+ expected_flags |= ST_NOSYMFOLLOW;
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ fd = open(NOSYMFOLLOW_SYMLINK, O_RDWR | O_CLOEXEC);
+ ASSERT_LT(fd, 0);
+ ASSERT_EQ(errno, ELOOP);
+
+ attr.attr_set &= ~MOUNT_ATTR_NOSYMFOLLOW;
+ attr.attr_clr |= MOUNT_ATTR_NOSYMFOLLOW;
+
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags &= ~ST_NOSYMFOLLOW;
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ fd = open(NOSYMFOLLOW_SYMLINK, O_RDWR | O_CLOEXEC);
+ ASSERT_GT(fd, 0);
+ ASSERT_EQ(close(fd), 0);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/nci/.gitignore b/tools/testing/selftests/nci/.gitignore
new file mode 100644
index 000000000000..448eeb4590fc
--- /dev/null
+++ b/tools/testing/selftests/nci/.gitignore
@@ -0,0 +1 @@
+/nci_dev
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 61ae899cfc17..19deb9cdf72f 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -30,3 +30,4 @@ hwtstamp_config
rxtimestamp
timestamping
txtimestamp
+so_netns_cookie
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 3915bb7bfc39..79c9eb0034d5 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -30,7 +30,7 @@ TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
-TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr
+TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr so_netns_cookie
TEST_GEN_FILES += tcp_fastopen_backup_key
TEST_GEN_FILES += fin_ack_lat
TEST_GEN_FILES += reuseaddr_ports_exhausted
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 614d5477365a..6f905b53904f 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -1,4 +1,5 @@
CONFIG_USER_NS=y
+CONFIG_NET_NS=y
CONFIG_BPF_SYSCALL=y
CONFIG_TEST_BPF=m
CONFIG_NUMA=y
diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py
index 834066d465fc..2b5d6ff87373 100755
--- a/tools/testing/selftests/net/devlink_port_split.py
+++ b/tools/testing/selftests/net/devlink_port_split.py
@@ -18,6 +18,8 @@ import sys
#
+# Kselftest framework requirement - SKIP code is 4
+KSFT_SKIP=4
Port = collections.namedtuple('Port', 'bus_info name')
@@ -239,7 +241,11 @@ def main(cmdline=None):
assert stderr == ""
devs = json.loads(stdout)['dev']
- dev = list(devs.keys())[0]
+ if devs:
+ dev = list(devs.keys())[0]
+ else:
+ print("no devlink device was found, test skipped")
+ sys.exit(KSFT_SKIP)
cmd = "devlink dev show %s" % dev
stdout, stderr = run_command(cmd)
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index 49774a8a7736..0d293391e9a4 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -925,6 +925,14 @@ ipv6_fcnal_runtime()
run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1"
run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81"
+ # route can not use prefsrc with nexthops
+ run_cmd "$IP ro add 2001:db8:101::2/128 nhid 86 from 2001:db8:91::1"
+ log_test $? 2 "IPv6 route can not use src routing with external nexthop"
+
+ # check cleanup path on invalid metric
+ run_cmd "$IP ro add 2001:db8:101::2/128 nhid 86 congctl lock foo"
+ log_test $? 2 "IPv6 route with invalid metric"
+
# rpfilter and default route
$IP nexthop flush >/dev/null 2>&1
run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP"
@@ -1366,6 +1374,10 @@ ipv4_fcnal_runtime()
run_cmd "$IP nexthop replace id 22 via 172.16.2.2 dev veth3"
log_test $? 2 "Nexthop replace with invalid scope for existing route"
+ # check cleanup path on invalid metric
+ run_cmd "$IP ro add 172.16.101.2/32 nhid 22 congctl lock foo"
+ log_test $? 2 "IPv4 route with invalid metric"
+
#
# add route with nexthop and check traffic
#
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 76d9487fb03c..5abe92d55b69 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
ipv4_rt_replace_mpath
}
+# checks that cached input route on VRF port is deleted
+# when VRF is deleted
+ipv4_local_rt_cache()
+{
+ run_cmd "ip addr add 10.0.0.1/32 dev lo"
+ run_cmd "ip netns add test-ns"
+ run_cmd "ip link add veth-outside type veth peer name veth-inside"
+ run_cmd "ip link add vrf-100 type vrf table 1100"
+ run_cmd "ip link set veth-outside master vrf-100"
+ run_cmd "ip link set veth-inside netns test-ns"
+ run_cmd "ip link set veth-outside up"
+ run_cmd "ip link set vrf-100 up"
+ run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
+ run_cmd "ip netns exec test-ns ip link set veth-inside up"
+ run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
+ run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
+ run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
+ run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
+ run_cmd "ip link delete vrf-100"
+
+ # if we do not hang test is a success
+ log_test $? 0 "Cached route removed from VRF port device"
+}
+
ipv4_route_test()
{
route_setup
ipv4_rt_add
ipv4_rt_replace
+ ipv4_local_rt_cache
route_cleanup
}
diff --git a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
new file mode 100755
index 000000000000..a15d21dc035a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
@@ -0,0 +1,364 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test traffic distribution between two paths when using custom hash policy.
+#
+# +--------------------------------+
+# | H1 |
+# | $h1 + |
+# | 198.51.100.{2-253}/24 | |
+# | 2001:db8:1::{2-fd}/64 | |
+# +-------------------------|------+
+# |
+# +-------------------------|-------------------------+
+# | SW1 | |
+# | $rp1 + |
+# | 198.51.100.1/24 |
+# | 2001:db8:1::1/64 |
+# | |
+# | |
+# | $rp11 + + $rp12 |
+# | 192.0.2.1/28 | | 192.0.2.17/28 |
+# | 2001:db8:2::1/64 | | 2001:db8:3::1/64 |
+# +------------------|-------------|------------------+
+# | |
+# +------------------|-------------|------------------+
+# | SW2 | | |
+# | | | |
+# | $rp21 + + $rp22 |
+# | 192.0.2.2/28 192.0.2.18/28 |
+# | 2001:db8:2::2/64 2001:db8:3::2/64 |
+# | |
+# | |
+# | $rp2 + |
+# | 203.0.113.1/24 | |
+# | 2001:db8:4::1/64 | |
+# +-------------------------|-------------------------+
+# |
+# +-------------------------|------+
+# | H2 | |
+# | $h2 + |
+# | 203.0.113.{2-253}/24 |
+# | 2001:db8:4::{2-fd}/64 |
+# +--------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ custom_hash
+"
+
+NUM_NETIFS=8
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 198.51.100.2/24 2001:db8:1::2/64
+ ip route add vrf v$h1 default via 198.51.100.1 dev $h1
+ ip -6 route add vrf v$h1 default via 2001:db8:1::1 dev $h1
+}
+
+h1_destroy()
+{
+ ip -6 route del vrf v$h1 default
+ ip route del vrf v$h1 default
+ simple_if_fini $h1 198.51.100.2/24 2001:db8:1::2/64
+}
+
+sw1_create()
+{
+ simple_if_init $rp1 198.51.100.1/24 2001:db8:1::1/64
+ __simple_if_init $rp11 v$rp1 192.0.2.1/28 2001:db8:2::1/64
+ __simple_if_init $rp12 v$rp1 192.0.2.17/28 2001:db8:3::1/64
+
+ ip route add vrf v$rp1 203.0.113.0/24 \
+ nexthop via 192.0.2.2 dev $rp11 \
+ nexthop via 192.0.2.18 dev $rp12
+
+ ip -6 route add vrf v$rp1 2001:db8:4::/64 \
+ nexthop via 2001:db8:2::2 dev $rp11 \
+ nexthop via 2001:db8:3::2 dev $rp12
+}
+
+sw1_destroy()
+{
+ ip -6 route del vrf v$rp1 2001:db8:4::/64
+
+ ip route del vrf v$rp1 203.0.113.0/24
+
+ __simple_if_fini $rp12 192.0.2.17/28 2001:db8:3::1/64
+ __simple_if_fini $rp11 192.0.2.1/28 2001:db8:2::1/64
+ simple_if_fini $rp1 198.51.100.1/24 2001:db8:1::1/64
+}
+
+sw2_create()
+{
+ simple_if_init $rp2 203.0.113.1/24 2001:db8:4::1/64
+ __simple_if_init $rp21 v$rp2 192.0.2.2/28 2001:db8:2::2/64
+ __simple_if_init $rp22 v$rp2 192.0.2.18/28 2001:db8:3::2/64
+
+ ip route add vrf v$rp2 198.51.100.0/24 \
+ nexthop via 192.0.2.1 dev $rp21 \
+ nexthop via 192.0.2.17 dev $rp22
+
+ ip -6 route add vrf v$rp2 2001:db8:1::/64 \
+ nexthop via 2001:db8:2::1 dev $rp21 \
+ nexthop via 2001:db8:3::1 dev $rp22
+}
+
+sw2_destroy()
+{
+ ip -6 route del vrf v$rp2 2001:db8:1::/64
+
+ ip route del vrf v$rp2 198.51.100.0/24
+
+ __simple_if_fini $rp22 192.0.2.18/28 2001:db8:3::2/64
+ __simple_if_fini $rp21 192.0.2.2/28 2001:db8:2::2/64
+ simple_if_fini $rp2 203.0.113.1/24 2001:db8:4::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 203.0.113.2/24 2001:db8:4::2/64
+ ip route add vrf v$h2 default via 203.0.113.1 dev $h2
+ ip -6 route add vrf v$h2 default via 2001:db8:4::1 dev $h2
+}
+
+h2_destroy()
+{
+ ip -6 route del vrf v$h2 default
+ ip route del vrf v$h2 default
+ simple_if_fini $h2 203.0.113.2/24 2001:db8:4::2/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+
+ rp1=${NETIFS[p2]}
+
+ rp11=${NETIFS[p3]}
+ rp21=${NETIFS[p4]}
+
+ rp12=${NETIFS[p5]}
+ rp22=${NETIFS[p6]}
+
+ rp2=${NETIFS[p7]}
+
+ h2=${NETIFS[p8]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ h2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ h2_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 203.0.113.2
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:4::2
+}
+
+send_src_ipv4()
+{
+ $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_src_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+send_src_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_flowlabel()
+{
+ # Generate 16384 echo requests, each with a random flow label.
+ for _ in $(seq 1 16384); do
+ ip vrf exec v$h1 \
+ $PING6 2001:db8:4::2 -F 0 -c 1 -q >/dev/null 2>&1
+ done
+}
+
+send_src_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+custom_hash_test()
+{
+ local field="$1"; shift
+ local balanced="$1"; shift
+ local send_flows="$@"
+
+ RET=0
+
+ local t0_rp11=$(link_stats_tx_packets_get $rp11)
+ local t0_rp12=$(link_stats_tx_packets_get $rp12)
+
+ $send_flows
+
+ local t1_rp11=$(link_stats_tx_packets_get $rp11)
+ local t1_rp12=$(link_stats_tx_packets_get $rp12)
+
+ local d_rp11=$((t1_rp11 - t0_rp11))
+ local d_rp12=$((t1_rp12 - t0_rp12))
+
+ local diff=$((d_rp12 - d_rp11))
+ local sum=$((d_rp11 + d_rp12))
+
+ local pct=$(echo "$diff / $sum * 100" | bc -l)
+ local is_balanced=$(echo "-20 <= $pct && $pct <= 20" | bc)
+
+ [[ ( $is_balanced -eq 1 && $balanced == "balanced" ) ||
+ ( $is_balanced -eq 0 && $balanced == "unbalanced" ) ]]
+ check_err $? "Expected traffic to be $balanced, but it is not"
+
+ log_test "Multipath hash field: $field ($balanced)"
+ log_info "Packets sent on path1 / path2: $d_rp11 / $d_rp12"
+}
+
+custom_hash_v4()
+{
+ log_info "Running IPv4 custom multipath hash tests"
+
+ sysctl_set net.ipv4.fib_multipath_hash_policy 3
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv4.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0001
+ custom_hash_test "Source IP" "balanced" send_src_ipv4
+ custom_hash_test "Source IP" "unbalanced" send_dst_ipv4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0002
+ custom_hash_test "Destination IP" "balanced" send_dst_ipv4
+ custom_hash_test "Destination IP" "unbalanced" send_src_ipv4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0010
+ custom_hash_test "Source port" "balanced" send_src_udp4
+ custom_hash_test "Source port" "unbalanced" send_dst_udp4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0020
+ custom_hash_test "Destination port" "balanced" send_dst_udp4
+ custom_hash_test "Destination port" "unbalanced" send_src_udp4
+
+ sysctl_restore net.ipv4.neigh.default.gc_thresh3
+ sysctl_restore net.ipv4.neigh.default.gc_thresh2
+ sysctl_restore net.ipv4.neigh.default.gc_thresh1
+
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+custom_hash_v6()
+{
+ log_info "Running IPv6 custom multipath hash tests"
+
+ sysctl_set net.ipv6.fib_multipath_hash_policy 3
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv6.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0001
+ custom_hash_test "Source IP" "balanced" send_src_ipv6
+ custom_hash_test "Source IP" "unbalanced" send_dst_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0002
+ custom_hash_test "Destination IP" "balanced" send_dst_ipv6
+ custom_hash_test "Destination IP" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0008
+ custom_hash_test "Flowlabel" "balanced" send_flowlabel
+ custom_hash_test "Flowlabel" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0010
+ custom_hash_test "Source port" "balanced" send_src_udp6
+ custom_hash_test "Source port" "unbalanced" send_dst_udp6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0020
+ custom_hash_test "Destination port" "balanced" send_dst_udp6
+ custom_hash_test "Destination port" "unbalanced" send_src_udp6
+
+ sysctl_restore net.ipv6.neigh.default.gc_thresh3
+ sysctl_restore net.ipv6.neigh.default.gc_thresh2
+ sysctl_restore net.ipv6.neigh.default.gc_thresh1
+
+ sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
+
+custom_hash()
+{
+ # Test that when the hash policy is set to custom, traffic is
+ # distributed only according to the fields set in the
+ # fib_multipath_hash_fields sysctl.
+ #
+ # Each time set a different field and make sure traffic is only
+ # distributed when the field is changed in the packet stream.
+ custom_hash_v4
+ custom_hash_v6
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 9c12c4fd3afc..13d3d4428a32 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -18,6 +18,12 @@ if [[ ! -v DEVLINK_DEV ]]; then
DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
-n | cut -d" " -f3)
+elif [[ ! -z "$DEVLINK_DEV" ]]; then
+ devlink dev show $DEVLINK_DEV &> /dev/null
+ if [ $? -ne 0 ]; then
+ echo "SKIP: devlink device \"$DEVLINK_DEV\" not found"
+ exit 1
+ fi
fi
##############################################################################
@@ -318,6 +324,14 @@ devlink_trap_rx_bytes_get()
| jq '.[][][]["stats"]["rx"]["bytes"]'
}
+devlink_trap_drop_packets_get()
+{
+ local trap_name=$1; shift
+
+ devlink -js trap show $DEVLINK_DEV trap $trap_name \
+ | jq '.[][][]["stats"]["rx"]["dropped"]'
+}
+
devlink_trap_stats_idle_test()
{
local trap_name=$1; shift
@@ -339,6 +353,24 @@ devlink_trap_stats_idle_test()
fi
}
+devlink_trap_drop_stats_idle_test()
+{
+ local trap_name=$1; shift
+ local t0_packets t0_bytes
+
+ t0_packets=$(devlink_trap_drop_packets_get $trap_name)
+
+ sleep 1
+
+ t1_packets=$(devlink_trap_drop_packets_get $trap_name)
+
+ if [[ $t0_packets -eq $t1_packets ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
devlink_traps_enable_all()
{
local trap_name
diff --git a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
new file mode 100755
index 000000000000..a73f52efcb6c
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
@@ -0,0 +1,456 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test traffic distribution when there are multiple paths between an IPv4 GRE
+# tunnel. The tunnel carries IPv4 and IPv6 traffic between multiple hosts.
+# Multiple routes are in the underlay network. With the default multipath
+# policy, SW2 will only look at the outer IP addresses, hence only a single
+# route would be used.
+#
+# +--------------------------------+
+# | H1 |
+# | $h1 + |
+# | 198.51.100.{2-253}/24 | |
+# | 2001:db8:1::{2-fd}/64 | |
+# +-------------------------|------+
+# |
+# +-------------------------|------------------+
+# | SW1 | |
+# | $ol1 + |
+# | 198.51.100.1/24 |
+# | 2001:db8:1::1/64 |
+# | |
+# | + g1 (gre) |
+# | loc=192.0.2.1 |
+# | rem=192.0.2.2 --. |
+# | tos=inherit | |
+# | v |
+# | + $ul1 |
+# | | 192.0.2.17/28 |
+# +---------------------|----------------------+
+# |
+# +---------------------|----------------------+
+# | SW2 | |
+# | $ul21 + |
+# | 192.0.2.18/28 | |
+# | | |
+# ! __________________+___ |
+# | / \ |
+# | | | |
+# | + $ul22.111 (vlan) + $ul22.222 (vlan) |
+# | | 192.0.2.33/28 | 192.0.2.49/28 |
+# | | | |
+# +--|----------------------|------------------+
+# | |
+# +--|----------------------|------------------+
+# | | | |
+# | + $ul32.111 (vlan) + $ul32.222 (vlan) |
+# | | 192.0.2.34/28 | 192.0.2.50/28 |
+# | | | |
+# | \__________________+___/ |
+# | | |
+# | | |
+# | $ul31 + |
+# | 192.0.2.65/28 | SW3 |
+# +---------------------|----------------------+
+# |
+# +---------------------|----------------------+
+# | + $ul4 |
+# | ^ 192.0.2.66/28 |
+# | | |
+# | + g2 (gre) | |
+# | loc=192.0.2.2 | |
+# | rem=192.0.2.1 --' |
+# | tos=inherit |
+# | |
+# | $ol4 + |
+# | 203.0.113.1/24 | |
+# | 2001:db8:2::1/64 | SW4 |
+# +-------------------------|------------------+
+# |
+# +-------------------------|------+
+# | | |
+# | $h2 + |
+# | 203.0.113.{2-253}/24 |
+# | 2001:db8:2::{2-fd}/64 H2 |
+# +--------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ custom_hash
+"
+
+NUM_NETIFS=10
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 198.51.100.2/24 2001:db8:1::2/64
+ ip route add vrf v$h1 default via 198.51.100.1 dev $h1
+ ip -6 route add vrf v$h1 default via 2001:db8:1::1 dev $h1
+}
+
+h1_destroy()
+{
+ ip -6 route del vrf v$h1 default
+ ip route del vrf v$h1 default
+ simple_if_fini $h1 198.51.100.2/24 2001:db8:1::2/64
+}
+
+sw1_create()
+{
+ simple_if_init $ol1 198.51.100.1/24 2001:db8:1::1/64
+ __simple_if_init $ul1 v$ol1 192.0.2.17/28
+
+ tunnel_create g1 gre 192.0.2.1 192.0.2.2 tos inherit dev v$ol1
+ __simple_if_init g1 v$ol1 192.0.2.1/32
+ ip route add vrf v$ol1 192.0.2.2/32 via 192.0.2.18
+
+ ip route add vrf v$ol1 203.0.113.0/24 dev g1
+ ip -6 route add vrf v$ol1 2001:db8:2::/64 dev g1
+}
+
+sw1_destroy()
+{
+ ip -6 route del vrf v$ol1 2001:db8:2::/64
+ ip route del vrf v$ol1 203.0.113.0/24
+
+ ip route del vrf v$ol1 192.0.2.2/32
+ __simple_if_fini g1 192.0.2.1/32
+ tunnel_destroy g1
+
+ __simple_if_fini $ul1 192.0.2.17/28
+ simple_if_fini $ol1 198.51.100.1/24 2001:db8:1::1/64
+}
+
+sw2_create()
+{
+ simple_if_init $ul21 192.0.2.18/28
+ __simple_if_init $ul22 v$ul21
+ vlan_create $ul22 111 v$ul21 192.0.2.33/28
+ vlan_create $ul22 222 v$ul21 192.0.2.49/28
+
+ ip route add vrf v$ul21 192.0.2.1/32 via 192.0.2.17
+ ip route add vrf v$ul21 192.0.2.2/32 \
+ nexthop via 192.0.2.34 \
+ nexthop via 192.0.2.50
+}
+
+sw2_destroy()
+{
+ ip route del vrf v$ul21 192.0.2.2/32
+ ip route del vrf v$ul21 192.0.2.1/32
+
+ vlan_destroy $ul22 222
+ vlan_destroy $ul22 111
+ __simple_if_fini $ul22
+ simple_if_fini $ul21 192.0.2.18/28
+}
+
+sw3_create()
+{
+ simple_if_init $ul31 192.0.2.65/28
+ __simple_if_init $ul32 v$ul31
+ vlan_create $ul32 111 v$ul31 192.0.2.34/28
+ vlan_create $ul32 222 v$ul31 192.0.2.50/28
+
+ ip route add vrf v$ul31 192.0.2.2/32 via 192.0.2.66
+ ip route add vrf v$ul31 192.0.2.1/32 \
+ nexthop via 192.0.2.33 \
+ nexthop via 192.0.2.49
+
+ tc qdisc add dev $ul32 clsact
+ tc filter add dev $ul32 ingress pref 111 prot 802.1Q \
+ flower vlan_id 111 action pass
+ tc filter add dev $ul32 ingress pref 222 prot 802.1Q \
+ flower vlan_id 222 action pass
+}
+
+sw3_destroy()
+{
+ tc qdisc del dev $ul32 clsact
+
+ ip route del vrf v$ul31 192.0.2.1/32
+ ip route del vrf v$ul31 192.0.2.2/32
+
+ vlan_destroy $ul32 222
+ vlan_destroy $ul32 111
+ __simple_if_fini $ul32
+ simple_if_fini $ul31 192.0.2.65/28
+}
+
+sw4_create()
+{
+ simple_if_init $ol4 203.0.113.1/24 2001:db8:2::1/64
+ __simple_if_init $ul4 v$ol4 192.0.2.66/28
+
+ tunnel_create g2 gre 192.0.2.2 192.0.2.1 tos inherit dev v$ol4
+ __simple_if_init g2 v$ol4 192.0.2.2/32
+ ip route add vrf v$ol4 192.0.2.1/32 via 192.0.2.65
+
+ ip route add vrf v$ol4 198.51.100.0/24 dev g2
+ ip -6 route add vrf v$ol4 2001:db8:1::/64 dev g2
+}
+
+sw4_destroy()
+{
+ ip -6 route del vrf v$ol4 2001:db8:1::/64
+ ip route del vrf v$ol4 198.51.100.0/24
+
+ ip route del vrf v$ol4 192.0.2.1/32
+ __simple_if_fini g2 192.0.2.2/32
+ tunnel_destroy g2
+
+ __simple_if_fini $ul4 192.0.2.66/28
+ simple_if_fini $ol4 203.0.113.1/24 2001:db8:2::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 203.0.113.2/24 2001:db8:2::2/64
+ ip route add vrf v$h2 default via 203.0.113.1 dev $h2
+ ip -6 route add vrf v$h2 default via 2001:db8:2::1 dev $h2
+}
+
+h2_destroy()
+{
+ ip -6 route del vrf v$h2 default
+ ip route del vrf v$h2 default
+ simple_if_fini $h2 203.0.113.2/24 2001:db8:2::2/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+
+ ol1=${NETIFS[p2]}
+ ul1=${NETIFS[p3]}
+
+ ul21=${NETIFS[p4]}
+ ul22=${NETIFS[p5]}
+
+ ul32=${NETIFS[p6]}
+ ul31=${NETIFS[p7]}
+
+ ul4=${NETIFS[p8]}
+ ol4=${NETIFS[p9]}
+
+ h2=${NETIFS[p10]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ sw3_create
+ sw4_create
+ h2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ h2_destroy
+ sw4_destroy
+ sw3_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 203.0.113.2
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+send_src_ipv4()
+{
+ $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_src_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+send_src_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_flowlabel()
+{
+ # Generate 16384 echo requests, each with a random flow label.
+ for _ in $(seq 1 16384); do
+ ip vrf exec v$h1 \
+ $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1
+ done
+}
+
+send_src_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+custom_hash_test()
+{
+ local field="$1"; shift
+ local balanced="$1"; shift
+ local send_flows="$@"
+
+ RET=0
+
+ local t0_111=$(tc_rule_stats_get $ul32 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul32 222 ingress)
+
+ $send_flows
+
+ local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul32 222 ingress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+
+ local diff=$((d222 - d111))
+ local sum=$((d111 + d222))
+
+ local pct=$(echo "$diff / $sum * 100" | bc -l)
+ local is_balanced=$(echo "-20 <= $pct && $pct <= 20" | bc)
+
+ [[ ( $is_balanced -eq 1 && $balanced == "balanced" ) ||
+ ( $is_balanced -eq 0 && $balanced == "unbalanced" ) ]]
+ check_err $? "Expected traffic to be $balanced, but it is not"
+
+ log_test "Multipath hash field: $field ($balanced)"
+ log_info "Packets sent on path1 / path2: $d111 / $d222"
+}
+
+custom_hash_v4()
+{
+ log_info "Running IPv4 overlay custom multipath hash tests"
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv4.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0040
+ custom_hash_test "Inner source IP" "balanced" send_src_ipv4
+ custom_hash_test "Inner source IP" "unbalanced" send_dst_ipv4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0080
+ custom_hash_test "Inner destination IP" "balanced" send_dst_ipv4
+ custom_hash_test "Inner destination IP" "unbalanced" send_src_ipv4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0400
+ custom_hash_test "Inner source port" "balanced" send_src_udp4
+ custom_hash_test "Inner source port" "unbalanced" send_dst_udp4
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0800
+ custom_hash_test "Inner destination port" "balanced" send_dst_udp4
+ custom_hash_test "Inner destination port" "unbalanced" send_src_udp4
+
+ sysctl_restore net.ipv4.neigh.default.gc_thresh3
+ sysctl_restore net.ipv4.neigh.default.gc_thresh2
+ sysctl_restore net.ipv4.neigh.default.gc_thresh1
+}
+
+custom_hash_v6()
+{
+ log_info "Running IPv6 overlay custom multipath hash tests"
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv6.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0040
+ custom_hash_test "Inner source IP" "balanced" send_src_ipv6
+ custom_hash_test "Inner source IP" "unbalanced" send_dst_ipv6
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0080
+ custom_hash_test "Inner destination IP" "balanced" send_dst_ipv6
+ custom_hash_test "Inner destination IP" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0200
+ custom_hash_test "Inner flowlabel" "balanced" send_flowlabel
+ custom_hash_test "Inner flowlabel" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0400
+ custom_hash_test "Inner source port" "balanced" send_src_udp6
+ custom_hash_test "Inner source port" "unbalanced" send_dst_udp6
+
+ sysctl_set net.ipv4.fib_multipath_hash_fields 0x0800
+ custom_hash_test "Inner destination port" "balanced" send_dst_udp6
+ custom_hash_test "Inner destination port" "unbalanced" send_src_udp6
+
+ sysctl_restore net.ipv6.neigh.default.gc_thresh3
+ sysctl_restore net.ipv6.neigh.default.gc_thresh2
+ sysctl_restore net.ipv6.neigh.default.gc_thresh1
+}
+
+custom_hash()
+{
+ # Test that when the hash policy is set to custom, traffic is
+ # distributed only according to the fields set in the
+ # fib_multipath_hash_fields sysctl.
+ #
+ # Each time set a different field and make sure traffic is only
+ # distributed when the field is changed in the packet stream.
+
+ sysctl_set net.ipv4.fib_multipath_hash_policy 3
+
+ custom_hash_v4
+ custom_hash_v6
+
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
new file mode 100755
index 000000000000..8fea2c2e0b25
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
@@ -0,0 +1,458 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test traffic distribution when there are multiple paths between an IPv6 GRE
+# tunnel. The tunnel carries IPv4 and IPv6 traffic between multiple hosts.
+# Multiple routes are in the underlay network. With the default multipath
+# policy, SW2 will only look at the outer IP addresses, hence only a single
+# route would be used.
+#
+# +--------------------------------+
+# | H1 |
+# | $h1 + |
+# | 198.51.100.{2-253}/24 | |
+# | 2001:db8:1::{2-fd}/64 | |
+# +-------------------------|------+
+# |
+# +-------------------------|-------------------+
+# | SW1 | |
+# | $ol1 + |
+# | 198.51.100.1/24 |
+# | 2001:db8:1::1/64 |
+# | |
+# |+ g1 (ip6gre) |
+# | loc=2001:db8:3::1 |
+# | rem=2001:db8:3::2 -. |
+# | tos=inherit | |
+# | v |
+# | + $ul1 |
+# | | 2001:db8:10::1/64 |
+# +---------------------|-----------------------+
+# |
+# +---------------------|-----------------------+
+# | SW2 | |
+# | $ul21 + |
+# | 2001:db8:10::2/64 | |
+# | | |
+# ! __________________+___ |
+# | / \ |
+# | | | |
+# | + $ul22.111 (vlan) + $ul22.222 (vlan) |
+# | | 2001:db8:11::1/64 | 2001:db8:12::1/64 |
+# | | | |
+# +--|----------------------|-------------------+
+# | |
+# +--|----------------------|-------------------+
+# | | | |
+# | + $ul32.111 (vlan) + $ul32.222 (vlan) |
+# | | 2001:db8:11::2/64 | 2001:db8:12::2/64 |
+# | | | |
+# | \__________________+___/ |
+# | | |
+# | | |
+# | $ul31 + |
+# | 2001:db8:13::1/64 | SW3 |
+# +---------------------|-----------------------+
+# |
+# +---------------------|-----------------------+
+# | + $ul4 |
+# | ^ 2001:db8:13::2/64 |
+# | | |
+# |+ g2 (ip6gre) | |
+# | loc=2001:db8:3::2 | |
+# | rem=2001:db8:3::1 -' |
+# | tos=inherit |
+# | |
+# | $ol4 + |
+# | 203.0.113.1/24 | |
+# | 2001:db8:2::1/64 | SW4 |
+# +-------------------------|-------------------+
+# |
+# +-------------------------|------+
+# | | |
+# | $h2 + |
+# | 203.0.113.{2-253}/24 |
+# | 2001:db8:2::{2-fd}/64 H2 |
+# +--------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ custom_hash
+"
+
+NUM_NETIFS=10
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 198.51.100.2/24 2001:db8:1::2/64
+ ip route add vrf v$h1 default via 198.51.100.1 dev $h1
+ ip -6 route add vrf v$h1 default via 2001:db8:1::1 dev $h1
+}
+
+h1_destroy()
+{
+ ip -6 route del vrf v$h1 default
+ ip route del vrf v$h1 default
+ simple_if_fini $h1 198.51.100.2/24 2001:db8:1::2/64
+}
+
+sw1_create()
+{
+ simple_if_init $ol1 198.51.100.1/24 2001:db8:1::1/64
+ __simple_if_init $ul1 v$ol1 2001:db8:10::1/64
+
+ tunnel_create g1 ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \
+ dev v$ol1
+ __simple_if_init g1 v$ol1 2001:db8:3::1/128
+ ip route add vrf v$ol1 2001:db8:3::2/128 via 2001:db8:10::2
+
+ ip route add vrf v$ol1 203.0.113.0/24 dev g1
+ ip -6 route add vrf v$ol1 2001:db8:2::/64 dev g1
+}
+
+sw1_destroy()
+{
+ ip -6 route del vrf v$ol1 2001:db8:2::/64
+ ip route del vrf v$ol1 203.0.113.0/24
+
+ ip route del vrf v$ol1 2001:db8:3::2/128
+ __simple_if_fini g1 2001:db8:3::1/128
+ tunnel_destroy g1
+
+ __simple_if_fini $ul1 2001:db8:10::1/64
+ simple_if_fini $ol1 198.51.100.1/24 2001:db8:1::1/64
+}
+
+sw2_create()
+{
+ simple_if_init $ul21 2001:db8:10::2/64
+ __simple_if_init $ul22 v$ul21
+ vlan_create $ul22 111 v$ul21 2001:db8:11::1/64
+ vlan_create $ul22 222 v$ul21 2001:db8:12::1/64
+
+ ip -6 route add vrf v$ul21 2001:db8:3::1/128 via 2001:db8:10::1
+ ip -6 route add vrf v$ul21 2001:db8:3::2/128 \
+ nexthop via 2001:db8:11::2 \
+ nexthop via 2001:db8:12::2
+}
+
+sw2_destroy()
+{
+ ip -6 route del vrf v$ul21 2001:db8:3::2/128
+ ip -6 route del vrf v$ul21 2001:db8:3::1/128
+
+ vlan_destroy $ul22 222
+ vlan_destroy $ul22 111
+ __simple_if_fini $ul22
+ simple_if_fini $ul21 2001:db8:10::2/64
+}
+
+sw3_create()
+{
+ simple_if_init $ul31 2001:db8:13::1/64
+ __simple_if_init $ul32 v$ul31
+ vlan_create $ul32 111 v$ul31 2001:db8:11::2/64
+ vlan_create $ul32 222 v$ul31 2001:db8:12::2/64
+
+ ip -6 route add vrf v$ul31 2001:db8:3::2/128 via 2001:db8:13::2
+ ip -6 route add vrf v$ul31 2001:db8:3::1/128 \
+ nexthop via 2001:db8:11::1 \
+ nexthop via 2001:db8:12::1
+
+ tc qdisc add dev $ul32 clsact
+ tc filter add dev $ul32 ingress pref 111 prot 802.1Q \
+ flower vlan_id 111 action pass
+ tc filter add dev $ul32 ingress pref 222 prot 802.1Q \
+ flower vlan_id 222 action pass
+}
+
+sw3_destroy()
+{
+ tc qdisc del dev $ul32 clsact
+
+ ip -6 route del vrf v$ul31 2001:db8:3::1/128
+ ip -6 route del vrf v$ul31 2001:db8:3::2/128
+
+ vlan_destroy $ul32 222
+ vlan_destroy $ul32 111
+ __simple_if_fini $ul32
+ simple_if_fini $ul31 2001:db8:13::1/64
+}
+
+sw4_create()
+{
+ simple_if_init $ol4 203.0.113.1/24 2001:db8:2::1/64
+ __simple_if_init $ul4 v$ol4 2001:db8:13::2/64
+
+ tunnel_create g2 ip6gre 2001:db8:3::2 2001:db8:3::1 tos inherit \
+ dev v$ol4
+ __simple_if_init g2 v$ol4 2001:db8:3::2/128
+ ip -6 route add vrf v$ol4 2001:db8:3::1/128 via 2001:db8:13::1
+
+ ip route add vrf v$ol4 198.51.100.0/24 dev g2
+ ip -6 route add vrf v$ol4 2001:db8:1::/64 dev g2
+}
+
+sw4_destroy()
+{
+ ip -6 route del vrf v$ol4 2001:db8:1::/64
+ ip route del vrf v$ol4 198.51.100.0/24
+
+ ip -6 route del vrf v$ol4 2001:db8:3::1/128
+ __simple_if_fini g2 2001:db8:3::2/128
+ tunnel_destroy g2
+
+ __simple_if_fini $ul4 2001:db8:13::2/64
+ simple_if_fini $ol4 203.0.113.1/24 2001:db8:2::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 203.0.113.2/24 2001:db8:2::2/64
+ ip route add vrf v$h2 default via 203.0.113.1 dev $h2
+ ip -6 route add vrf v$h2 default via 2001:db8:2::1 dev $h2
+}
+
+h2_destroy()
+{
+ ip -6 route del vrf v$h2 default
+ ip route del vrf v$h2 default
+ simple_if_fini $h2 203.0.113.2/24 2001:db8:2::2/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+
+ ol1=${NETIFS[p2]}
+ ul1=${NETIFS[p3]}
+
+ ul21=${NETIFS[p4]}
+ ul22=${NETIFS[p5]}
+
+ ul32=${NETIFS[p6]}
+ ul31=${NETIFS[p7]}
+
+ ul4=${NETIFS[p8]}
+ ol4=${NETIFS[p9]}
+
+ h2=${NETIFS[p10]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ sw3_create
+ sw4_create
+ h2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ h2_destroy
+ sw4_destroy
+ sw3_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 203.0.113.2
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+send_src_ipv4()
+{
+ $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_src_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp4()
+{
+ $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+send_src_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_dst_ipv6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
+ -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+}
+
+send_flowlabel()
+{
+ # Generate 16384 echo requests, each with a random flow label.
+ for _ in $(seq 1 16384); do
+ ip vrf exec v$h1 \
+ $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1
+ done
+}
+
+send_src_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=0-32768,dp=30000"
+}
+
+send_dst_udp6()
+{
+ $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ -d 1msec -t udp "sp=20000,dp=0-32768"
+}
+
+custom_hash_test()
+{
+ local field="$1"; shift
+ local balanced="$1"; shift
+ local send_flows="$@"
+
+ RET=0
+
+ local t0_111=$(tc_rule_stats_get $ul32 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul32 222 ingress)
+
+ $send_flows
+
+ local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul32 222 ingress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+
+ local diff=$((d222 - d111))
+ local sum=$((d111 + d222))
+
+ local pct=$(echo "$diff / $sum * 100" | bc -l)
+ local is_balanced=$(echo "-20 <= $pct && $pct <= 20" | bc)
+
+ [[ ( $is_balanced -eq 1 && $balanced == "balanced" ) ||
+ ( $is_balanced -eq 0 && $balanced == "unbalanced" ) ]]
+ check_err $? "Expected traffic to be $balanced, but it is not"
+
+ log_test "Multipath hash field: $field ($balanced)"
+ log_info "Packets sent on path1 / path2: $d111 / $d222"
+}
+
+custom_hash_v4()
+{
+ log_info "Running IPv4 overlay custom multipath hash tests"
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv4.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv4.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0040
+ custom_hash_test "Inner source IP" "balanced" send_src_ipv4
+ custom_hash_test "Inner source IP" "unbalanced" send_dst_ipv4
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0080
+ custom_hash_test "Inner destination IP" "balanced" send_dst_ipv4
+ custom_hash_test "Inner destination IP" "unbalanced" send_src_ipv4
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0400
+ custom_hash_test "Inner source port" "balanced" send_src_udp4
+ custom_hash_test "Inner source port" "unbalanced" send_dst_udp4
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0800
+ custom_hash_test "Inner destination port" "balanced" send_dst_udp4
+ custom_hash_test "Inner destination port" "unbalanced" send_src_udp4
+
+ sysctl_restore net.ipv4.neigh.default.gc_thresh3
+ sysctl_restore net.ipv4.neigh.default.gc_thresh2
+ sysctl_restore net.ipv4.neigh.default.gc_thresh1
+}
+
+custom_hash_v6()
+{
+ log_info "Running IPv6 overlay custom multipath hash tests"
+
+ # Prevent the neighbour table from overflowing, as different neighbour
+ # entries will be created on $ol4 when using different destination IPs.
+ sysctl_set net.ipv6.neigh.default.gc_thresh1 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh2 1024
+ sysctl_set net.ipv6.neigh.default.gc_thresh3 1024
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0040
+ custom_hash_test "Inner source IP" "balanced" send_src_ipv6
+ custom_hash_test "Inner source IP" "unbalanced" send_dst_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0080
+ custom_hash_test "Inner destination IP" "balanced" send_dst_ipv6
+ custom_hash_test "Inner destination IP" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0200
+ custom_hash_test "Inner flowlabel" "balanced" send_flowlabel
+ custom_hash_test "Inner flowlabel" "unbalanced" send_src_ipv6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0400
+ custom_hash_test "Inner source port" "balanced" send_src_udp6
+ custom_hash_test "Inner source port" "unbalanced" send_dst_udp6
+
+ sysctl_set net.ipv6.fib_multipath_hash_fields 0x0800
+ custom_hash_test "Inner destination port" "balanced" send_dst_udp6
+ custom_hash_test "Inner destination port" "unbalanced" send_src_udp6
+
+ sysctl_restore net.ipv6.neigh.default.gc_thresh3
+ sysctl_restore net.ipv6.neigh.default.gc_thresh2
+ sysctl_restore net.ipv6.neigh.default.gc_thresh1
+}
+
+custom_hash()
+{
+ # Test that when the hash policy is set to custom, traffic is
+ # distributed only according to the fields set in the
+ # fib_multipath_hash_fields sysctl.
+ #
+ # Each time set a different field and make sure traffic is only
+ # distributed when the field is changed in the packet stream.
+
+ sysctl_set net.ipv6.fib_multipath_hash_policy 3
+
+ custom_hash_v4
+ custom_hash_v6
+
+ sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
index 55eeacf59241..64fbd211d907 100755
--- a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
+++ b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
@@ -75,7 +75,9 @@ switch_destroy()
tc qdisc del dev $swp2 clsact
tc qdisc del dev $swp1 clsact
+ ip link set dev $swp2 down
ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
ip link set dev $swp1 nomaster
ip link del dev br1
}
diff --git a/tools/testing/selftests/net/forwarding/pedit_l4port.sh b/tools/testing/selftests/net/forwarding/pedit_l4port.sh
index 5f20d289ee43..10e594c55117 100755
--- a/tools/testing/selftests/net/forwarding/pedit_l4port.sh
+++ b/tools/testing/selftests/net/forwarding/pedit_l4port.sh
@@ -71,7 +71,9 @@ switch_destroy()
tc qdisc del dev $swp2 clsact
tc qdisc del dev $swp1 clsact
+ ip link set dev $swp2 down
ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
ip link set dev $swp1 nomaster
ip link del dev br1
}
diff --git a/tools/testing/selftests/net/forwarding/skbedit_priority.sh b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
index e3bd8a6bb8b4..bde11dc27873 100755
--- a/tools/testing/selftests/net/forwarding/skbedit_priority.sh
+++ b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
@@ -72,7 +72,9 @@ switch_destroy()
tc qdisc del dev $swp2 clsact
tc qdisc del dev $swp1 clsact
+ ip link set dev $swp2 down
ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
ip link set dev $swp1 nomaster
ip link del dev br1
}
diff --git a/tools/testing/selftests/net/icmp.sh b/tools/testing/selftests/net/icmp.sh
new file mode 100755
index 000000000000..e4b04cd1644a
--- /dev/null
+++ b/tools/testing/selftests/net/icmp.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for checking ICMP response with dummy address instead of 0.0.0.0.
+# Sets up two namespaces like:
+# +----------------------+ +--------------------+
+# | ns1 | v4-via-v6 routes: | ns2 |
+# | | ' | |
+# | +--------+ -> 172.16.1.0/24 -> +--------+ |
+# | | veth0 +--------------------------+ veth0 | |
+# | +--------+ <- 172.16.0.0/24 <- +--------+ |
+# | 172.16.0.1 | | 2001:db8:1::2/64 |
+# | 2001:db8:1::2/64 | | |
+# +----------------------+ +--------------------+
+#
+# And then tries to ping 172.16.1.1 from ns1. This results in a "net
+# unreachable" message being sent from ns2, but there is no IPv4 address set in
+# that address space, so the kernel should substitute the dummy address
+# 192.0.0.8 defined in RFC7600.
+
+NS1=ns1
+NS2=ns2
+H1_IP=172.16.0.1/32
+H1_IP6=2001:db8:1::1
+RT1=172.16.1.0/24
+PINGADDR=172.16.1.1
+RT2=172.16.0.0/24
+H2_IP6=2001:db8:1::2
+
+TMPFILE=$(mktemp)
+
+cleanup()
+{
+ rm -f "$TMPFILE"
+ ip netns del $NS1
+ ip netns del $NS2
+}
+
+trap cleanup EXIT
+
+# Namespaces
+ip netns add $NS1
+ip netns add $NS2
+
+# Connectivity
+ip -netns $NS1 link add veth0 type veth peer name veth0 netns $NS2
+ip -netns $NS1 link set dev veth0 up
+ip -netns $NS2 link set dev veth0 up
+ip -netns $NS1 addr add $H1_IP dev veth0
+ip -netns $NS1 addr add $H1_IP6/64 dev veth0 nodad
+ip -netns $NS2 addr add $H2_IP6/64 dev veth0 nodad
+ip -netns $NS1 route add $RT1 via inet6 $H2_IP6
+ip -netns $NS2 route add $RT2 via inet6 $H1_IP6
+
+# Make sure ns2 will respond with ICMP unreachable
+ip netns exec $NS2 sysctl -qw net.ipv4.icmp_ratelimit=0 net.ipv4.ip_forward=1
+
+# Run the test - a ping runs in the background, and we capture ICMP responses
+# with tcpdump; -c 1 means it should exit on the first ping, but add a timeout
+# in case something goes wrong
+ip netns exec $NS1 ping -w 3 -i 0.5 $PINGADDR >/dev/null &
+ip netns exec $NS1 timeout 10 tcpdump -tpni veth0 -c 1 'icmp and icmp[icmptype] != icmp-echo' > $TMPFILE 2>/dev/null
+
+# Parse response and check for dummy address
+# tcpdump output looks like:
+# IP 192.0.0.8 > 172.16.0.1: ICMP net 172.16.1.1 unreachable, length 92
+RESP_IP=$(awk '{print $2}' < $TMPFILE)
+if [[ "$RESP_IP" != "192.0.0.8" ]]; then
+ echo "FAIL - got ICMP response from $RESP_IP, should be 192.0.0.8"
+ exit 1
+else
+ echo "OK"
+ exit 0
+fi
diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh
index bf361f30d6ef..c19ecc6a8614 100755
--- a/tools/testing/selftests/net/icmp_redirect.sh
+++ b/tools/testing/selftests/net/icmp_redirect.sh
@@ -63,10 +63,14 @@ log_test()
local rc=$1
local expected=$2
local msg="$3"
+ local xfail=$4
if [ ${rc} -eq ${expected} ]; then
printf "TEST: %-60s [ OK ]\n" "${msg}"
nsuccess=$((nsuccess+1))
+ elif [ ${rc} -eq ${xfail} ]; then
+ printf "TEST: %-60s [XFAIL]\n" "${msg}"
+ nxfail=$((nxfail+1))
else
ret=1
nfail=$((nfail+1))
@@ -322,7 +326,7 @@ check_exception()
ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
grep -v "mtu" | grep -q "${R1_LLADDR}"
fi
- log_test $? 0 "IPv6: ${desc}"
+ log_test $? 0 "IPv6: ${desc}" 1
}
run_ping()
@@ -488,6 +492,7 @@ which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
ret=0
nsuccess=0
nfail=0
+nxfail=0
while getopts :pv o
do
@@ -532,5 +537,6 @@ fi
printf "\nTests passed: %3d\n" ${nsuccess}
printf "Tests failed: %3d\n" ${nfail}
+printf "Tests xfailed: %3d\n" ${nxfail}
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index d88e1fdfb147..89c4753c2760 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -6,6 +6,7 @@
#include <limits.h>
#include <fcntl.h>
#include <string.h>
+#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
@@ -25,6 +26,7 @@
#include <netinet/in.h>
#include <linux/tcp.h>
+#include <linux/time_types.h>
extern int optind;
@@ -66,6 +68,13 @@ static unsigned int cfg_do_w;
static int cfg_wait;
static uint32_t cfg_mark;
+struct cfg_cmsg_types {
+ unsigned int cmsg_enabled:1;
+ unsigned int timestampns:1;
+};
+
+static struct cfg_cmsg_types cfg_cmsg_types;
+
static void die_usage(void)
{
fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] [-m mode]"
@@ -80,11 +89,22 @@ static void die_usage(void)
fprintf(stderr, "\t-M mark -- set socket packet mark\n");
fprintf(stderr, "\t-u -- check mptcp ulp\n");
fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n");
+ fprintf(stderr, "\t-c cmsg -- test cmsg type <cmsg>\n");
fprintf(stderr,
"\t-P [saveWithPeek|saveAfterPeek] -- save data with/after MSG_PEEK form tcp socket\n");
exit(1);
}
+static void xerror(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(1);
+}
+
static void handle_signal(int nr)
{
quit = true;
@@ -338,6 +358,58 @@ static size_t do_write(const int fd, char *buf, const size_t len)
return offset;
}
+static void process_cmsg(struct msghdr *msgh)
+{
+ struct __kernel_timespec ts;
+ bool ts_found = false;
+ struct cmsghdr *cmsg;
+
+ for (cmsg = CMSG_FIRSTHDR(msgh); cmsg ; cmsg = CMSG_NXTHDR(msgh, cmsg)) {
+ if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) {
+ memcpy(&ts, CMSG_DATA(cmsg), sizeof(ts));
+ ts_found = true;
+ continue;
+ }
+ }
+
+ if (cfg_cmsg_types.timestampns) {
+ if (!ts_found)
+ xerror("TIMESTAMPNS not present\n");
+ }
+}
+
+static ssize_t do_recvmsg_cmsg(const int fd, char *buf, const size_t len)
+{
+ char msg_buf[8192];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = len,
+ };
+ struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = msg_buf,
+ .msg_controllen = sizeof(msg_buf),
+ };
+ int flags = 0;
+ int ret = recvmsg(fd, &msg, flags);
+
+ if (ret <= 0)
+ return ret;
+
+ if (msg.msg_controllen && !cfg_cmsg_types.cmsg_enabled)
+ xerror("got %lu bytes of cmsg data, expected 0\n",
+ (unsigned long)msg.msg_controllen);
+
+ if (msg.msg_controllen == 0 && cfg_cmsg_types.cmsg_enabled)
+ xerror("%s\n", "got no cmsg data");
+
+ if (msg.msg_controllen)
+ process_cmsg(&msg);
+
+ return ret;
+}
+
static ssize_t do_rnd_read(const int fd, char *buf, const size_t len)
{
int ret = 0;
@@ -357,6 +429,8 @@ static ssize_t do_rnd_read(const int fd, char *buf, const size_t len)
} else if (cfg_peek == CFG_AFTER_PEEK) {
ret = recv(fd, buf, cap, MSG_PEEK);
ret = (ret < 0) ? ret : read(fd, buf, cap);
+ } else if (cfg_cmsg_types.cmsg_enabled) {
+ ret = do_recvmsg_cmsg(fd, buf, cap);
} else {
ret = read(fd, buf, cap);
}
@@ -786,6 +860,48 @@ static void init_rng(void)
srand(foo);
}
+static void xsetsockopt(int fd, int level, int optname, const void *optval, socklen_t optlen)
+{
+ int err;
+
+ err = setsockopt(fd, level, optname, optval, optlen);
+ if (err) {
+ perror("setsockopt");
+ exit(1);
+ }
+}
+
+static void apply_cmsg_types(int fd, const struct cfg_cmsg_types *cmsg)
+{
+ static const unsigned int on = 1;
+
+ if (cmsg->timestampns)
+ xsetsockopt(fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, &on, sizeof(on));
+}
+
+static void parse_cmsg_types(const char *type)
+{
+ char *next = strchr(type, ',');
+ unsigned int len = 0;
+
+ cfg_cmsg_types.cmsg_enabled = 1;
+
+ if (next) {
+ parse_cmsg_types(next + 1);
+ len = next - type;
+ } else {
+ len = strlen(type);
+ }
+
+ if (strncmp(type, "TIMESTAMPNS", len) == 0) {
+ cfg_cmsg_types.timestampns = 1;
+ return;
+ }
+
+ fprintf(stderr, "Unrecognized cmsg option %s\n", type);
+ exit(1);
+}
+
int main_loop(void)
{
int fd;
@@ -801,6 +917,8 @@ int main_loop(void)
set_rcvbuf(fd, cfg_rcvbuf);
if (cfg_sndbuf)
set_sndbuf(fd, cfg_sndbuf);
+ if (cfg_cmsg_types.cmsg_enabled)
+ apply_cmsg_types(fd, &cfg_cmsg_types);
return copyfd_io(0, fd, 1);
}
@@ -887,7 +1005,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "6jr:lp:s:hut:m:S:R:w:M:P:")) != -1) {
+ while ((c = getopt(argc, argv, "6jr:lp:s:hut:m:S:R:w:M:P:c:")) != -1) {
switch (c) {
case 'j':
cfg_join = true;
@@ -943,6 +1061,9 @@ static void parse_opts(int argc, char **argv)
case 'P':
cfg_peek = parse_peek(optarg);
break;
+ case 'c':
+ parse_cmsg_types(optarg);
+ break;
}
}
@@ -976,6 +1097,8 @@ int main(int argc, char *argv[])
set_sndbuf(fd, cfg_sndbuf);
if (cfg_mark)
set_mark(fd, cfg_mark);
+ if (cfg_cmsg_types.cmsg_enabled)
+ apply_cmsg_types(fd, &cfg_cmsg_types);
return main_loop_s(fd);
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 3c4cb72ed8a4..559173a8e387 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -3,7 +3,7 @@
time_start=$(date +%s)
-optstring="S:R:d:e:l:r:h4cm:f:t"
+optstring="S:R:d:e:l:r:h4cm:f:tC"
ret=0
sin=""
sout=""
@@ -22,6 +22,7 @@ sndbuf=0
rcvbuf=0
options_log=true
do_tcp=0
+checksum=false
filesize=0
if [ $tc_loss -eq 100 ];then
@@ -47,6 +48,7 @@ usage() {
echo -e "\t-R: set rcvbuf value (default: use kernel default)"
echo -e "\t-m: test mode (poll, sendfile; default: poll)"
echo -e "\t-t: also run tests with TCP (use twice to non-fallback tcp)"
+ echo -e "\t-C: enable the MPTCP data checksum"
}
while getopts "$optstring" option;do
@@ -104,6 +106,9 @@ while getopts "$optstring" option;do
"t")
do_tcp=$((do_tcp+1))
;;
+ "C")
+ checksum=true
+ ;;
"?")
usage $0
exit 1
@@ -197,8 +202,11 @@ ip -net "$ns4" link set ns4eth3 up
ip -net "$ns4" route add default via 10.0.3.2
ip -net "$ns4" route add default via dead:beef:3::2
-# use TCP syn cookies, even if no flooding was detected.
-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+if $checksum; then
+ for i in "$ns1" "$ns2" "$ns3" "$ns4";do
+ ip netns exec $i sysctl -q net.mptcp.checksum_enabled=1
+ done
+fi
set_ethtool_flags() {
local ns="$1"
@@ -501,6 +509,7 @@ do_transfer()
local stat_ackrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
expect_synrx=$((stat_synrx_last_l))
expect_ackrx=$((stat_ackrx_last_l))
@@ -518,10 +527,14 @@ do_transfer()
"${stat_synrx_now_l}" "${expect_synrx}" 1>&2
retc=1
fi
- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ]; then
- printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
- "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
- rets=1
+ if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then
+ if [ ${stat_ooo_now} -eq 0 ]; then
+ printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
+ "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
+ rets=1
+ else
+ printf "[ Note ] fallback due to TCP OoO"
+ fi
fi
if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
@@ -667,6 +680,25 @@ run_tests_peekmode()
run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1 "-P ${peekmode}"
}
+display_time()
+{
+ time_end=$(date +%s)
+ time_run=$((time_end-time_start))
+
+ echo "Time: ${time_run} seconds"
+}
+
+stop_if_error()
+{
+ local msg="$1"
+
+ if [ ${ret} -ne 0 ]; then
+ echo "FAIL: ${msg}" 1>&2
+ display_time
+ exit ${ret}
+ fi
+}
+
make_file "$cin" "client"
make_file "$sin" "server"
@@ -674,6 +706,8 @@ check_mptcp_disabled
check_mptcp_ulp_setsockopt
+stop_if_error "The kernel configuration is not valid for MPTCP"
+
echo "INFO: validating network environment with pings"
for sender in "$ns1" "$ns2" "$ns3" "$ns4";do
do_ping "$ns1" $sender 10.0.1.1
@@ -693,6 +727,8 @@ for sender in "$ns1" "$ns2" "$ns3" "$ns4";do
do_ping "$ns4" $sender dead:beef:3::1
done
+stop_if_error "Could not even run ping tests"
+
[ -n "$tc_loss" ] && tc -net "$ns2" qdisc add dev ns2eth3 root netem loss random $tc_loss delay ${tc_delay}ms
echo -n "INFO: Using loss of $tc_loss "
test "$tc_delay" -gt 0 && echo -n "delay $tc_delay ms "
@@ -720,18 +756,24 @@ echo "on ns3eth4"
tc -net "$ns3" qdisc add dev ns3eth4 root netem delay ${reorder_delay}ms $tc_reorder
+run_tests_lo "$ns1" "$ns1" 10.0.1.1 1
+stop_if_error "Could not even run loopback test"
+
+run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1
+stop_if_error "Could not even run loopback v6 test"
+
for sender in $ns1 $ns2 $ns3 $ns4;do
- run_tests_lo "$ns1" "$sender" 10.0.1.1 1
- if [ $ret -ne 0 ] ;then
- echo "FAIL: Could not even run loopback test" 1>&2
- exit $ret
- fi
- run_tests_lo "$ns1" $sender dead:beef:1::1 1
- if [ $ret -ne 0 ] ;then
- echo "FAIL: Could not even run loopback v6 test" 2>&1
- exit $ret
+ # ns1<->ns2 is not subject to reordering/tc delays. Use it to test
+ # mptcp syncookie support.
+ if [ $sender = $ns1 ]; then
+ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+ else
+ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
fi
+ run_tests "$ns1" $sender 10.0.1.1
+ run_tests "$ns1" $sender dead:beef:1::1
+
run_tests "$ns2" $sender 10.0.1.2
run_tests "$ns2" $sender dead:beef:1::2
run_tests "$ns2" $sender 10.0.2.1
@@ -744,14 +786,13 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
run_tests "$ns4" $sender 10.0.3.1
run_tests "$ns4" $sender dead:beef:3::1
+
+ stop_if_error "Tests with $sender as a sender have failed"
done
run_tests_peekmode "saveWithPeek"
run_tests_peekmode "saveAfterPeek"
+stop_if_error "Tests with peek mode have failed"
-time_end=$(date +%s)
-time_run=$((time_end-time_start))
-
-echo "Time: ${time_run} seconds"
-
+display_time
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index fd99485cf2a4..9a191c1a5de8 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -12,6 +12,7 @@ timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
mptcp_connect=""
capture=0
+checksum=0
do_all_tests=1
TEST_COUNT=0
@@ -49,6 +50,9 @@ init()
ip netns exec $netns sysctl -q net.mptcp.enabled=1
ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
+ if [ $checksum -eq 1 ]; then
+ ip netns exec $netns sysctl -q net.mptcp.checksum_enabled=1
+ fi
done
# ns1 ns2
@@ -124,6 +128,28 @@ reset_with_add_addr_timeout()
-j DROP
}
+reset_with_checksum()
+{
+ local ns1_enable=$1
+ local ns2_enable=$2
+
+ reset
+
+ ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=$ns1_enable
+ ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=$ns2_enable
+}
+
+reset_with_allow_join_id0()
+{
+ local ns1_enable=$1
+ local ns2_enable=$2
+
+ reset
+
+ ip netns exec $ns1 sysctl -q net.mptcp.allow_join_initial_addr_port=$ns1_enable
+ ip netns exec $ns2 sysctl -q net.mptcp.allow_join_initial_addr_port=$ns2_enable
+}
+
ip -Version > /dev/null 2>&1
if [ $? -ne 0 ];then
echo "SKIP: Could not run test without ip tool"
@@ -476,6 +502,45 @@ run_tests()
fi
}
+chk_csum_nr()
+{
+ local msg=${1:-""}
+ local count
+ local dump_stats
+
+ if [ ! -z "$msg" ]; then
+ printf "%02u" "$TEST_COUNT"
+ else
+ echo -n " "
+ fi
+ printf " %-36s %s" "$msg" "sum"
+ count=`ip netns exec $ns1 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != 0 ]; then
+ echo "[fail] got $count data checksum error[s] expected 0"
+ ret=1
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+ echo -n " - csum "
+ count=`ip netns exec $ns2 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != 0 ]; then
+ echo "[fail] got $count data checksum error[s] expected 0"
+ ret=1
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+ if [ "${dump_stats}" = 1 ]; then
+ echo Server ns stats
+ ip netns exec $ns1 nstat -as | grep MPTcp
+ echo Client ns stats
+ ip netns exec $ns2 nstat -as | grep MPTcp
+ fi
+}
+
chk_join_nr()
{
local msg="$1"
@@ -523,6 +588,9 @@ chk_join_nr()
echo Client ns stats
ip netns exec $ns2 nstat -as | grep MPTcp
fi
+ if [ $checksum -eq 1 ]; then
+ chk_csum_nr
+ fi
}
chk_add_nr()
@@ -1374,6 +1442,94 @@ syncookies_tests()
chk_add_nr 1 1
}
+checksum_tests()
+{
+ # checksum test 0 0
+ reset_with_checksum 0 0
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_csum_nr "checksum test 0 0"
+
+ # checksum test 1 1
+ reset_with_checksum 1 1
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_csum_nr "checksum test 1 1"
+
+ # checksum test 0 1
+ reset_with_checksum 0 1
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_csum_nr "checksum test 0 1"
+
+ # checksum test 1 0
+ reset_with_checksum 1 0
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_csum_nr "checksum test 1 0"
+}
+
+deny_join_id0_tests()
+{
+ # subflow allow join id0 ns1
+ reset_with_allow_join_id0 1 0
+ ip netns exec $ns1 ./pm_nl_ctl limits 1 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "single subflow allow join id0 ns1" 1 1 1
+
+ # subflow allow join id0 ns2
+ reset_with_allow_join_id0 0 1
+ ip netns exec $ns1 ./pm_nl_ctl limits 1 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "single subflow allow join id0 ns2" 0 0 0
+
+ # signal address allow join id0 ns1
+ # ADD_ADDRs are not affected by allow_join_id0 value.
+ reset_with_allow_join_id0 1 0
+ ip netns exec $ns1 ./pm_nl_ctl limits 1 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "signal address allow join id0 ns1" 1 1 1
+ chk_add_nr 1 1
+
+ # signal address allow join id0 ns2
+ # ADD_ADDRs are not affected by allow_join_id0 value.
+ reset_with_allow_join_id0 0 1
+ ip netns exec $ns1 ./pm_nl_ctl limits 1 1
+ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "signal address allow join id0 ns2" 1 1 1
+ chk_add_nr 1 1
+
+ # subflow and address allow join id0 ns1
+ reset_with_allow_join_id0 1 0
+ ip netns exec $ns1 ./pm_nl_ctl limits 2 2
+ ip netns exec $ns2 ./pm_nl_ctl limits 2 2
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "subflow and address allow join id0 1" 2 2 2
+
+ # subflow and address allow join id0 ns2
+ reset_with_allow_join_id0 0 1
+ ip netns exec $ns1 ./pm_nl_ctl limits 2 2
+ ip netns exec $ns2 ./pm_nl_ctl limits 2 2
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "subflow and address allow join id0 2" 1 1 1
+}
+
all_tests()
{
subflows_tests
@@ -1387,6 +1543,8 @@ all_tests()
backup_tests
add_addr_ports_tests
syncookies_tests
+ checksum_tests
+ deny_join_id0_tests
}
usage()
@@ -1403,7 +1561,10 @@ usage()
echo " -b backup_tests"
echo " -p add_addr_ports_tests"
echo " -k syncookies_tests"
+ echo " -S checksum_tests"
+ echo " -d deny_join_id0_tests"
echo " -c capture pcap files"
+ echo " -C enable data checksum"
echo " -h help"
}
@@ -1418,13 +1579,16 @@ make_file "$sin" "server" 1
trap cleanup EXIT
for arg in "$@"; do
- # check for "capture" arg before launching tests
+ # check for "capture/checksum" args before launching tests
if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"c"[0-9a-zA-Z]*$ ]]; then
capture=1
fi
+ if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"C"[0-9a-zA-Z]*$ ]]; then
+ checksum=1
+ fi
- # exception for the capture option, the rest means: a part of the tests
- if [ "${arg}" != "-c" ]; then
+ # exception for the capture/checksum options, the rest means: a part of the tests
+ if [ "${arg}" != "-c" ] && [ "${arg}" != "-C" ]; then
do_all_tests=0
fi
done
@@ -1434,7 +1598,7 @@ if [ $do_all_tests -eq 1 ]; then
exit $ret
fi
-while getopts 'fsltra64bpkch' opt; do
+while getopts 'fsltra64bpkdchCS' opt; do
case $opt in
f)
subflows_tests
@@ -1469,8 +1633,16 @@ while getopts 'fsltra64bpkch' opt; do
k)
syncookies_tests
;;
+ S)
+ checksum_tests
+ ;;
+ d)
+ deny_join_id0_tests
+ ;;
c)
;;
+ C)
+ ;;
h | *)
usage
;;
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index 2fa13946ac04..1579e471a5e7 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -178,7 +178,7 @@ do_transfer()
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
- $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} \
+ $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c TIMESTAMPNS \
${local_addr} < "$sin" > "$sout" &
spid=$!
@@ -186,7 +186,7 @@ do_transfer()
timeout ${timeout_test} \
ip netns exec ${connector_ns} \
- $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} \
+ $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c TIMESTAMPNS \
$connect_addr < "$cin" > "$cout" &
cpid=$!
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 3aeef3bcb101..fd63ebfe9a2b 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -60,6 +60,8 @@ setup()
for i in "$ns1" "$ns2" "$ns3";do
ip netns add $i || exit $ksft_skip
ip -net $i link set lo up
+ ip netns exec $i sysctl -q net.ipv4.conf.all.rp_filter=0
+ ip netns exec $i sysctl -q net.ipv4.conf.default.rp_filter=0
done
ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
@@ -80,7 +82,6 @@ setup()
ip netns exec "$ns1" ./pm_nl_ctl limits 1 1
ip netns exec "$ns1" ./pm_nl_ctl add 10.0.2.1 dev ns1eth2 flags subflow
- ip netns exec "$ns1" sysctl -q net.ipv4.conf.all.rp_filter=0
ip -net "$ns2" addr add 10.0.1.2/24 dev ns2eth1
ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
diff --git a/tools/testing/selftests/net/so_netns_cookie.c b/tools/testing/selftests/net/so_netns_cookie.c
new file mode 100644
index 000000000000..b39e87e967cd
--- /dev/null
+++ b/tools/testing/selftests/net/so_netns_cookie.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#ifndef SO_NETNS_COOKIE
+#define SO_NETNS_COOKIE 71
+#endif
+
+#define pr_err(fmt, ...) \
+ ({ \
+ fprintf(stderr, "%s:%d:" fmt ": %m\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ 1; \
+ })
+
+int main(int argc, char *argvp[])
+{
+ uint64_t cookie1, cookie2;
+ socklen_t vallen;
+ int sock1, sock2;
+
+ sock1 = socket(AF_INET, SOCK_STREAM, 0);
+ if (sock1 < 0)
+ return pr_err("Unable to create TCP socket");
+
+ vallen = sizeof(cookie1);
+ if (getsockopt(sock1, SOL_SOCKET, SO_NETNS_COOKIE, &cookie1, &vallen) != 0)
+ return pr_err("getsockopt(SOL_SOCKET, SO_NETNS_COOKIE)");
+
+ if (!cookie1)
+ return pr_err("SO_NETNS_COOKIE returned zero cookie");
+
+ if (unshare(CLONE_NEWNET))
+ return pr_err("unshare");
+
+ sock2 = socket(AF_INET, SOCK_STREAM, 0);
+ if (sock2 < 0)
+ return pr_err("Unable to create TCP socket");
+
+ vallen = sizeof(cookie2);
+ if (getsockopt(sock2, SOL_SOCKET, SO_NETNS_COOKIE, &cookie2, &vallen) != 0)
+ return pr_err("getsockopt(SOL_SOCKET, SO_NETNS_COOKIE)");
+
+ if (!cookie2)
+ return pr_err("SO_NETNS_COOKIE returned zero cookie");
+
+ if (cookie1 == cookie2)
+ return pr_err("SO_NETNS_COOKIE returned identical cookies for distinct ns");
+
+ close(sock1);
+ close(sock2);
+ return 0;
+}
diff --git a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
new file mode 100755
index 000000000000..75ada17ac061
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
@@ -0,0 +1,573 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+# author: Paolo Lungaroni <paolo.lungaroni@uniroma2.it>
+
+# This test is designed for evaluating the new SRv6 End.DT46 Behavior used for
+# implementing IPv4/IPv6 L3 VPN use cases.
+#
+# The current SRv6 code in the Linux kernel only implements SRv6 End.DT4 and
+# End.DT6 Behaviors which can be used respectively to support IPv4-in-IPv6 and
+# IPv6-in-IPv6 VPNs. With End.DT4 and End.DT6 it is not possible to create a
+# single SRv6 VPN tunnel to carry both IPv4 and IPv6 traffic.
+# The SRv6 End.DT46 Behavior implementation is meant to support the
+# decapsulation of IPv4 and IPv6 traffic coming from a single SRv6 tunnel.
+# Therefore, the SRv6 End.DT46 Behavior in the Linux kernel greatly simplifies
+# the setup and operations of SRv6 VPNs.
+#
+# Hereafter a network diagram is shown, where two different tenants (named 100
+# and 200) offer IPv4/IPv6 L3 VPN services allowing hosts to communicate with
+# each other across an IPv6 network.
+#
+# Only hosts belonging to the same tenant (and to the same VPN) can communicate
+# with each other. Instead, the communication among hosts of different tenants
+# is forbidden.
+# In other words, hosts hs-t100-1 and hs-t100-2 are connected through the
+# IPv4/IPv6 L3 VPN of tenant 100 while hs-t200-3 and hs-t200-4 are connected
+# using the IPv4/IPv6 L3 VPN of tenant 200. Cross connection between tenant 100
+# and tenant 200 is forbidden and thus, for example, hs-t100-1 cannot reach
+# hs-t200-3 and vice versa.
+#
+# Routers rt-1 and rt-2 implement IPv4/IPv6 L3 VPN services leveraging the SRv6
+# architecture. The key components for such VPNs are: a) SRv6 Encap behavior,
+# b) SRv6 End.DT46 Behavior and c) VRF.
+#
+# To explain how an IPv4/IPv6 L3 VPN based on SRv6 works, let us briefly
+# consider an example where, within the same domain of tenant 100, the host
+# hs-t100-1 pings the host hs-t100-2.
+#
+# First of all, L2 reachability of the host hs-t100-2 is taken into account by
+# the router rt-1 which acts as a arp/ndp proxy.
+#
+# When the host hs-t100-1 sends an IPv6 or IPv4 packet destined to hs-t100-2,
+# the router rt-1 receives the packet on the internal veth-t100 interface. Such
+# interface is enslaved to the VRF vrf-100 whose associated table contains the
+# SRv6 Encap route for encapsulating any IPv6 or IPv4 packet in a IPv6 plus the
+# Segment Routing Header (SRH) packet. This packet is sent through the (IPv6)
+# core network up to the router rt-2 that receives it on veth0 interface.
+#
+# The rt-2 router uses the 'localsid' routing table to process incoming
+# IPv6+SRH packets which belong to the VPN of the tenant 100. For each of these
+# packets, the SRv6 End.DT46 Behavior removes the outer IPv6+SRH headers and
+# performs the lookup on the vrf-100 table using the destination address of
+# the decapsulated IPv6 or IPv4 packet. Afterwards, the packet is sent to the
+# host hs-t100-2 through the veth-t100 interface.
+#
+# The ping response follows the same processing but this time the roles of rt-1
+# and rt-2 are swapped.
+#
+# Of course, the IPv4/IPv6 L3 VPN for tenant 200 works exactly as the IPv4/IPv6
+# L3 VPN for tenant 100. In this case, only hosts hs-t200-3 and hs-t200-4 are
+# able to connect with each other.
+#
+#
+# +-------------------+ +-------------------+
+# | | | |
+# | hs-t100-1 netns | | hs-t100-2 netns |
+# | | | |
+# | +-------------+ | | +-------------+ |
+# | | veth0 | | | | veth0 | |
+# | | cafe::1/64 | | | | cafe::2/64 | |
+# | | 10.0.0.1/24 | | | | 10.0.0.2/24 | |
+# | +-------------+ | | +-------------+ |
+# | . | | . |
+# +-------------------+ +-------------------+
+# . .
+# . .
+# . .
+# +-----------------------------------+ +-----------------------------------+
+# | . | | . |
+# | +---------------+ | | +---------------- |
+# | | veth-t100 | | | | veth-t100 | |
+# | | cafe::254/64 | | | | cafe::254/64 | |
+# | | 10.0.0.254/24 | +----------+ | | +----------+ | 10.0.0.254/24 | |
+# | +-------+-------+ | localsid | | | | localsid | +-------+-------- |
+# | | | table | | | | table | | |
+# | +----+----+ +----------+ | | +----------+ +----+----+ |
+# | | vrf-100 | | | | vrf-100 | |
+# | +---------+ +------------+ | | +------------+ +---------+ |
+# | | veth0 | | | | veth0 | |
+# | | fd00::1/64 |.|...|.| fd00::2/64 | |
+# | +---------+ +------------+ | | +------------+ +---------+ |
+# | | vrf-200 | | | | vrf-200 | |
+# | +----+----+ | | +----+----+ |
+# | | | | | |
+# | +-------+-------+ | | +-------+-------- |
+# | | veth-t200 | | | | veth-t200 | |
+# | | cafe::254/64 | | | | cafe::254/64 | |
+# | | 10.0.0.254/24 | | | | 10.0.0.254/24 | |
+# | +---------------+ rt-1 netns | | rt-2 netns +---------------- |
+# | . | | . |
+# +-----------------------------------+ +-----------------------------------+
+# . .
+# . .
+# . .
+# . .
+# +-------------------+ +-------------------+
+# | . | | . |
+# | +-------------+ | | +-------------+ |
+# | | veth0 | | | | veth0 | |
+# | | cafe::3/64 | | | | cafe::4/64 | |
+# | | 10.0.0.3/24 | | | | 10.0.0.4/24 | |
+# | +-------------+ | | +-------------+ |
+# | | | |
+# | hs-t200-3 netns | | hs-t200-4 netns |
+# | | | |
+# +-------------------+ +-------------------+
+#
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+# | Network configuration |
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# rt-1: localsid table (table 90)
+# +--------------------------------------------------+
+# |SID |Action |
+# +--------------------------------------------------+
+# |fc00:21:100::6046|apply SRv6 End.DT46 vrftable 100|
+# +--------------------------------------------------+
+# |fc00:21:200::6046|apply SRv6 End.DT46 vrftable 200|
+# +--------------------------------------------------+
+#
+# rt-1: VRF tenant 100 (table 100)
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |cafe::2 |apply seg6 encap segs fc00:12:100::6046|
+# +---------------------------------------------------+
+# |cafe::/64 |forward to dev veth-t100 |
+# +---------------------------------------------------+
+# |10.0.0.2 |apply seg6 encap segs fc00:12:100::6046|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth-t100 |
+# +---------------------------------------------------+
+#
+# rt-1: VRF tenant 200 (table 200)
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |cafe::4 |apply seg6 encap segs fc00:12:200::6046|
+# +---------------------------------------------------+
+# |cafe::/64 |forward to dev veth-t200 |
+# +---------------------------------------------------+
+# |10.0.0.4 |apply seg6 encap segs fc00:12:200::6046|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth-t200 |
+# +---------------------------------------------------+
+#
+#
+# rt-2: localsid table (table 90)
+# +--------------------------------------------------+
+# |SID |Action |
+# +--------------------------------------------------+
+# |fc00:12:100::6046|apply SRv6 End.DT46 vrftable 100|
+# +--------------------------------------------------+
+# |fc00:12:200::6046|apply SRv6 End.DT46 vrftable 200|
+# +--------------------------------------------------+
+#
+# rt-2: VRF tenant 100 (table 100)
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |cafe::1 |apply seg6 encap segs fc00:21:100::6046|
+# +---------------------------------------------------+
+# |cafe::/64 |forward to dev veth-t100 |
+# +---------------------------------------------------+
+# |10.0.0.1 |apply seg6 encap segs fc00:21:100::6046|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth-t100 |
+# +---------------------------------------------------+
+#
+# rt-2: VRF tenant 200 (table 200)
+# +---------------------------------------------------+
+# |host |Action |
+# +---------------------------------------------------+
+# |cafe::3 |apply seg6 encap segs fc00:21:200::6046|
+# +---------------------------------------------------+
+# |cafe::/64 |forward to dev veth-t200 |
+# +---------------------------------------------------+
+# |10.0.0.3 |apply seg6 encap segs fc00:21:200::6046|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth-t200 |
+# +---------------------------------------------------+
+#
+
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fd00
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fc00
+PING_TIMEOUT_SEC=4
+
+ret=0
+
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+cleanup()
+{
+ ip link del veth-rt-1 2>/dev/null || true
+ ip link del veth-rt-2 2>/dev/null || true
+
+ # destroy routers rt-* and hosts hs-*
+ for ns in $(ip netns show | grep -E 'rt-*|hs-*'); do
+ ip netns del ${ns} || true
+ done
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt=$1
+ local nsname=rt-${rt}
+
+ ip netns add ${nsname}
+ ip link set veth-rt-${rt} netns ${nsname}
+ ip -netns ${nsname} link set veth-rt-${rt} name veth0
+
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
+ ip -netns ${nsname} link set veth0 up
+ ip -netns ${nsname} link set lo up
+
+ ip netns exec ${nsname} sysctl -wq net.ipv4.ip_forward=1
+ ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.forwarding=1
+}
+
+setup_hs()
+{
+ local hs=$1
+ local rt=$2
+ local tid=$3
+ local hsname=hs-t${tid}-${hs}
+ local rtname=rt-${rt}
+ local rtveth=veth-t${tid}
+
+ # set the networking for the host
+ ip netns add ${hsname}
+
+ ip netns exec ${hsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${hsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad
+ ip -netns ${hsname} addr add ${IPv4_HS_NETWORK}.${hs}/24 dev veth0
+ ip -netns ${hsname} link set veth0 up
+ ip -netns ${hsname} link set lo up
+
+ # configure the VRF for the tenant X on the router which is directly
+ # connected to the source host.
+ ip -netns ${rtname} link add vrf-${tid} type vrf table ${tid}
+ ip -netns ${rtname} link set vrf-${tid} up
+
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ # enslave the veth-tX interface to the vrf-X in the access router
+ ip -netns ${rtname} link set ${rtveth} master vrf-${tid}
+ ip -netns ${rtname} addr add ${IPv6_HS_NETWORK}::254/64 dev ${rtveth} nodad
+ ip -netns ${rtname} addr add ${IPv4_HS_NETWORK}.254/24 dev ${rtveth}
+ ip -netns ${rtname} link set ${rtveth} up
+
+ ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1
+ ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
+
+ # disable the rp_filter otherwise the kernel gets confused about how
+ # to route decap ipv4 packets.
+ ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0
+
+ ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+}
+
+setup_vpn_config()
+{
+ local hssrc=$1
+ local rtsrc=$2
+ local hsdst=$3
+ local rtdst=$4
+ local tid=$5
+
+ local hssrc_name=hs-t${tid}-${hssrc}
+ local hsdst_name=hs-t${tid}-${hsdst}
+ local rtsrc_name=rt-${rtsrc}
+ local rtdst_name=rt-${rtdst}
+ local rtveth=veth-t${tid}
+ local vpn_sid=${VPN_LOCATOR_SERVICE}:${hssrc}${hsdst}:${tid}::6046
+
+ ip -netns ${rtsrc_name} -6 neigh add proxy ${IPv6_HS_NETWORK}::${hsdst} dev ${rtveth}
+
+ # set the encap route for encapsulating packets which arrive from the
+ # host hssrc and destined to the access router rtsrc.
+ ip -netns ${rtsrc_name} -6 route add ${IPv6_HS_NETWORK}::${hsdst}/128 vrf vrf-${tid} \
+ encap seg6 mode encap segs ${vpn_sid} dev veth0
+ ip -netns ${rtsrc_name} -4 route add ${IPv4_HS_NETWORK}.${hsdst}/32 vrf vrf-${tid} \
+ encap seg6 mode encap segs ${vpn_sid} dev veth0
+ ip -netns ${rtsrc_name} -6 route add ${vpn_sid}/128 vrf vrf-${tid} \
+ via fd00::${rtdst} dev veth0
+
+ # set the decap route for decapsulating packets which arrive from
+ # the rtdst router and destined to the hsdst host.
+ ip -netns ${rtdst_name} -6 route add ${vpn_sid}/128 table ${LOCALSID_TABLE_ID} \
+ encap seg6local action End.DT46 vrftable ${tid} dev vrf-${tid}
+
+ # all sids for VPNs start with a common locator which is fc00::/16.
+ # Routes for handling the SRv6 End.DT46 behavior instances are grouped
+ # together in the 'localsid' table.
+ #
+ # NOTE: added only once
+ if [ -z "$(ip -netns ${rtdst_name} -6 rule show | \
+ grep "to ${VPN_LOCATOR_SERVICE}::/16 lookup ${LOCALSID_TABLE_ID}")" ]; then
+ ip -netns ${rtdst_name} -6 rule add \
+ to ${VPN_LOCATOR_SERVICE}::/16 \
+ lookup ${LOCALSID_TABLE_ID} prio 999
+ fi
+
+ # set default routes to unreachable for both ipv4 and ipv6
+ ip -netns ${rtsrc_name} -6 route add unreachable default metric 4278198272 \
+ vrf vrf-${tid}
+
+ ip -netns ${rtsrc_name} -4 route add unreachable default metric 4278198272 \
+ vrf vrf-${tid}
+}
+
+setup()
+{
+ ip link add veth-rt-1 type veth peer name veth-rt-2
+ # setup the networking for router rt-1 and router rt-2
+ setup_rt_networking 1
+ setup_rt_networking 2
+
+ # setup two hosts for the tenant 100.
+ # - host hs-1 is directly connected to the router rt-1;
+ # - host hs-2 is directly connected to the router rt-2.
+ setup_hs 1 1 100 #args: host router tenant
+ setup_hs 2 2 100
+
+ # setup two hosts for the tenant 200
+ # - host hs-3 is directly connected to the router rt-1;
+ # - host hs-4 is directly connected to the router rt-2.
+ setup_hs 3 1 200
+ setup_hs 4 2 200
+
+ # setup the IPv4/IPv6 L3 VPN which connects the host hs-t100-1 and host
+ # hs-t100-2 within the same tenant 100.
+ setup_vpn_config 1 1 2 2 100 #args: src_host src_router dst_host dst_router tenant
+ setup_vpn_config 2 2 1 1 100
+
+ # setup the IPv4/IPv6 L3 VPN which connects the host hs-t200-3 and host
+ # hs-t200-4 within the same tenant 200.
+ setup_vpn_config 3 1 4 2 200
+ setup_vpn_config 4 2 3 1 200
+}
+
+check_rt_connectivity()
+{
+ local rtsrc=$1
+ local rtdst=$2
+
+ ip netns exec rt-${rtsrc} ping -c 1 -W 1 ${IPv6_RT_NETWORK}::${rtdst} \
+ >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+ local rtsrc=$1
+ local rtdst=$2
+
+ check_rt_connectivity ${rtsrc} ${rtdst}
+ log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ ip netns exec hs-t${tid}-${hssrc} ping -c 1 -W ${PING_TIMEOUT_SEC} \
+ ${IPv6_HS_NETWORK}::${hsdst} >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ ip netns exec hs-t${tid}-${hssrc} ping -c 1 -W ${PING_TIMEOUT_SEC} \
+ ${IPv4_HS_NETWORK}.${hsdst} >/dev/null 2>&1
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc=$1
+ local hsdst=$2
+ local tid=$3
+
+ check_hs_ipv6_connectivity ${hssrc} ${hsdst} ${tid}
+ log_test $? 0 "IPv6 Hosts connectivity: hs-t${tid}-${hssrc} -> hs-t${tid}-${hsdst} (tenant ${tid})"
+
+ check_hs_ipv4_connectivity ${hssrc} ${hsdst} ${tid}
+ log_test $? 0 "IPv4 Hosts connectivity: hs-t${tid}-${hssrc} -> hs-t${tid}-${hsdst} (tenant ${tid})"
+
+}
+
+check_and_log_hs_isolation()
+{
+ local hssrc=$1
+ local tidsrc=$2
+ local hsdst=$3
+ local tiddst=$4
+
+ check_hs_ipv6_connectivity ${hssrc} ${hsdst} ${tidsrc}
+ # NOTE: ping should fail
+ log_test $? 1 "IPv6 Hosts isolation: hs-t${tidsrc}-${hssrc} -X-> hs-t${tiddst}-${hsdst}"
+
+ check_hs_ipv4_connectivity ${hssrc} ${hsdst} ${tidsrc}
+ # NOTE: ping should fail
+ log_test $? 1 "IPv4 Hosts isolation: hs-t${tidsrc}-${hssrc} -X-> hs-t${tiddst}-${hsdst}"
+
+}
+
+
+check_and_log_hs2gw_connectivity()
+{
+ local hssrc=$1
+ local tid=$2
+
+ check_hs_ipv6_connectivity ${hssrc} 254 ${tid}
+ log_test $? 0 "IPv6 Hosts connectivity: hs-t${tid}-${hssrc} -> gw (tenant ${tid})"
+
+ check_hs_ipv4_connectivity ${hssrc} 254 ${tid}
+ log_test $? 0 "IPv4 Hosts connectivity: hs-t${tid}-${hssrc} -> gw (tenant ${tid})"
+
+}
+
+router_tests()
+{
+ log_section "IPv6 routers connectivity test"
+
+ check_and_log_rt_connectivity 1 2
+ check_and_log_rt_connectivity 2 1
+}
+
+host2gateway_tests()
+{
+ log_section "IPv4/IPv6 connectivity test among hosts and gateway"
+
+ check_and_log_hs2gw_connectivity 1 100
+ check_and_log_hs2gw_connectivity 2 100
+
+ check_and_log_hs2gw_connectivity 3 200
+ check_and_log_hs2gw_connectivity 4 200
+}
+
+host_vpn_tests()
+{
+ log_section "SRv6 VPN connectivity test among hosts in the same tenant"
+
+ check_and_log_hs_connectivity 1 2 100
+ check_and_log_hs_connectivity 2 1 100
+
+ check_and_log_hs_connectivity 3 4 200
+ check_and_log_hs_connectivity 4 3 200
+}
+
+host_vpn_isolation_tests()
+{
+ local i
+ local j
+ local k
+ local tmp
+ local l1="1 2"
+ local l2="3 4"
+ local t1=100
+ local t2=200
+
+ log_section "SRv6 VPN isolation test among hosts in different tentants"
+
+ for k in 0 1; do
+ for i in ${l1}; do
+ for j in ${l2}; do
+ check_and_log_hs_isolation ${i} ${t1} ${j} ${t2}
+ done
+ done
+
+ # let us test the reverse path
+ tmp="${l1}"; l1="${l2}"; l2="${tmp}"
+ tmp=${t1}; t1=${t2}; t2=${tmp}
+ done
+}
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit 0
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit 0
+fi
+
+modprobe vrf &>/dev/null
+if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
+ echo "SKIP: vrf sysctl does not exist"
+ exit 0
+fi
+
+cleanup &>/dev/null
+
+setup
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+host_vpn_isolation_tests
+
+print_log_test_results
+
+cleanup &>/dev/null
+
+exit ${ret}
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 426d07875a48..112d41d01b12 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -25,6 +25,47 @@
#define TLS_PAYLOAD_MAX_LEN 16384
#define SOL_TLS 282
+struct tls_crypto_info_keys {
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes128;
+ struct tls12_crypto_info_chacha20_poly1305 chacha20;
+ };
+ size_t len;
+};
+
+static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
+ struct tls_crypto_info_keys *tls12)
+{
+ memset(tls12, 0, sizeof(*tls12));
+
+ switch (cipher_type) {
+ case TLS_CIPHER_CHACHA20_POLY1305:
+ tls12->len = sizeof(struct tls12_crypto_info_chacha20_poly1305);
+ tls12->chacha20.info.version = tls_version;
+ tls12->chacha20.info.cipher_type = cipher_type;
+ break;
+ case TLS_CIPHER_AES_GCM_128:
+ tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_128);
+ tls12->aes128.info.version = tls_version;
+ tls12->aes128.info.cipher_type = cipher_type;
+ break;
+ default:
+ break;
+ }
+}
+
+static void memrnd(void *s, size_t n)
+{
+ int *dword = s;
+ char *byte;
+
+ for (; n >= 4; n -= 4)
+ *dword++ = rand();
+ byte = (void *)dword;
+ while (n--)
+ *byte++ = rand();
+}
+
FIXTURE(tls_basic)
{
int fd, cfd;
@@ -133,33 +174,16 @@ FIXTURE_VARIANT_ADD(tls, 13_chacha)
FIXTURE_SETUP(tls)
{
- union {
- struct tls12_crypto_info_aes_gcm_128 aes128;
- struct tls12_crypto_info_chacha20_poly1305 chacha20;
- } tls12;
+ struct tls_crypto_info_keys tls12;
struct sockaddr_in addr;
socklen_t len;
int sfd, ret;
- size_t tls12_sz;
self->notls = false;
len = sizeof(addr);
- memset(&tls12, 0, sizeof(tls12));
- switch (variant->cipher_type) {
- case TLS_CIPHER_CHACHA20_POLY1305:
- tls12_sz = sizeof(struct tls12_crypto_info_chacha20_poly1305);
- tls12.chacha20.info.version = variant->tls_version;
- tls12.chacha20.info.cipher_type = variant->cipher_type;
- break;
- case TLS_CIPHER_AES_GCM_128:
- tls12_sz = sizeof(struct tls12_crypto_info_aes_gcm_128);
- tls12.aes128.info.version = variant->tls_version;
- tls12.aes128.info.cipher_type = variant->cipher_type;
- break;
- default:
- tls12_sz = 0;
- }
+ tls_crypto_info_init(variant->tls_version, variant->cipher_type,
+ &tls12);
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
@@ -187,7 +211,7 @@ FIXTURE_SETUP(tls)
if (!self->notls) {
ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
- tls12_sz);
+ tls12.len);
ASSERT_EQ(ret, 0);
}
@@ -200,7 +224,7 @@ FIXTURE_SETUP(tls)
ASSERT_EQ(ret, 0);
ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
- tls12_sz);
+ tls12.len);
ASSERT_EQ(ret, 0);
}
@@ -308,6 +332,8 @@ TEST_F(tls, recv_max)
char recv_mem[TLS_PAYLOAD_MAX_LEN];
char buf[TLS_PAYLOAD_MAX_LEN];
+ memrnd(buf, sizeof(buf));
+
EXPECT_GE(send(self->fd, buf, send_len, 0), 0);
EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0);
@@ -588,6 +614,8 @@ TEST_F(tls, recvmsg_single_max)
struct iovec vec;
struct msghdr hdr;
+ memrnd(send_mem, sizeof(send_mem));
+
EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len);
vec.iov_base = (char *)recv_mem;
vec.iov_len = TLS_PAYLOAD_MAX_LEN;
@@ -610,6 +638,8 @@ TEST_F(tls, recvmsg_multiple)
struct msghdr hdr;
int i;
+ memrnd(buf, sizeof(buf));
+
EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len);
for (i = 0; i < msg_iovlen; i++) {
iov_base[i] = (char *)malloc(iov_len);
@@ -634,6 +664,8 @@ TEST_F(tls, single_send_multiple_recv)
char send_mem[TLS_PAYLOAD_MAX_LEN * 2];
char recv_mem[TLS_PAYLOAD_MAX_LEN * 2];
+ memrnd(send_mem, sizeof(send_mem));
+
EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
memset(recv_mem, 0, total_len);
@@ -834,18 +866,17 @@ TEST_F(tls, bidir)
int ret;
if (!self->notls) {
- struct tls12_crypto_info_aes_gcm_128 tls12;
+ struct tls_crypto_info_keys tls12;
- memset(&tls12, 0, sizeof(tls12));
- tls12.info.version = variant->tls_version;
- tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+ tls_crypto_info_init(variant->tls_version, variant->cipher_type,
+ &tls12);
ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12,
- sizeof(tls12));
+ tls12.len);
ASSERT_EQ(ret, 0);
ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12,
- sizeof(tls12));
+ tls12.len);
ASSERT_EQ(ret, 0);
}
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index a8fa64136282..7f26591f236b 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
readonly BASE="ns-$(mktemp -u XXXXXX)"
diff --git a/tools/testing/selftests/net/unicast_extensions.sh b/tools/testing/selftests/net/unicast_extensions.sh
index dbf0421986df..66354cdd5ce4 100755
--- a/tools/testing/selftests/net/unicast_extensions.sh
+++ b/tools/testing/selftests/net/unicast_extensions.sh
@@ -189,6 +189,15 @@ segmenttest 255.255.255.1 255.255.255.254 24 "assign and ping inside 255.255.255
route_test 240.5.6.7 240.5.6.1 255.1.2.1 255.1.2.3 24 "route between 240.5.6/24 and 255.1.2/24 (is allowed)"
route_test 0.200.6.7 0.200.38.1 245.99.101.1 245.99.200.111 16 "route between 0.200/16 and 245.99/16 (is allowed)"
#
+# Test support for lowest address ending in .0
+segmenttest 5.10.15.20 5.10.15.0 24 "assign and ping lowest address (/24)"
+#
+# Test support for lowest address not ending in .0
+segmenttest 192.168.101.192 192.168.101.193 26 "assign and ping lowest address (/26)"
+#
+# Routing using lowest address as a gateway/endpoint
+route_test 192.168.42.1 192.168.42.0 9.8.7.6 9.8.7.0 24 "routing using lowest address"
+#
# ==============================================
# ==== TESTS THAT CURRENTLY EXPECT FAILURE =====
# ==============================================
@@ -202,14 +211,6 @@ segmenttest 255.255.255.1 255.255.255.255 16 "assigning 255.255.255.255 (is forb
# Currently Linux does not allow this, so this should fail too
segmenttest 127.99.4.5 127.99.4.6 16 "assign and ping inside 127/8 (is forbidden)"
#
-# Test support for lowest address
-# Currently Linux does not allow this, so this should fail too
-segmenttest 5.10.15.20 5.10.15.0 24 "assign and ping lowest address (is forbidden)"
-#
-# Routing using lowest address as a gateway/endpoint
-# Currently Linux does not allow this, so this should fail too
-route_test 192.168.42.1 192.168.42.0 9.8.7.6 9.8.7.0 24 "routing using lowest address (is forbidden)"
-#
# Test support for unicast use of class D
# Currently Linux does not allow this, so this should fail too
segmenttest 225.1.2.3 225.1.2.200 24 "assign and ping class D address (is forbidden)"
diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
index 2fedc0781ce8..11d7cdb898c0 100755
--- a/tools/testing/selftests/net/veth.sh
+++ b/tools/testing/selftests/net/veth.sh
@@ -18,7 +18,8 @@ ret=0
cleanup() {
local ns
- local -r jobs="$(jobs -p)"
+ local jobs
+ readonly jobs="$(jobs -p)"
[ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
rm -f $STATS
@@ -108,7 +109,7 @@ chk_gro() {
if [ ! -f ../bpf/xdp_dummy.o ]; then
echo "Missing xdp_dummy helper. Build bpf selftest first"
- exit -1
+ exit 1
fi
create_ns
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 3171069a6b46..cd6430b39982 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
+TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
new file mode 100755
index 000000000000..6caf6ac8c285
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_fib.sh
@@ -0,0 +1,221 @@
+#!/bin/bash
+#
+# This tests the fib expression.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsrouter="nsrouter-$sfx"
+timeout=4
+
+log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
+
+cleanup()
+{
+ ip netns del ${ns1}
+ ip netns del ${ns2}
+ ip netns del ${nsrouter}
+
+ [ $log_netns -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns=$log_netns
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add ${nsrouter}
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+dmesg | grep -q ' nft_rpfilter: '
+if [ $? -eq 0 ]; then
+ dmesg -c | grep ' nft_rpfilter: '
+ echo "WARN: a previous test run has failed" 1>&2
+fi
+
+sysctl -q net.netfilter.nf_log_all_netns=1
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+load_ruleset() {
+ local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority 0; policy accept;
+ fib saddr . iif oif missing counter log prefix "$netns nft_rpfilter: " drop
+ }
+}
+EOF
+}
+
+load_ruleset_count() {
+ local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority 0; policy accept;
+ ip daddr 1.1.1.1 fib saddr . iif oif missing counter drop
+ ip6 daddr 1c3::c01d fib saddr . iif oif missing counter drop
+ }
+}
+EOF
+}
+
+check_drops() {
+ dmesg | grep -q ' nft_rpfilter: '
+ if [ $? -eq 0 ]; then
+ dmesg | grep ' nft_rpfilter: '
+ echo "FAIL: rpfilter did drop packets"
+ return 1
+ fi
+
+ return 0
+}
+
+check_fib_counter() {
+ local want=$1
+ local ns=$2
+ local address=$3
+
+ line=$(ip netns exec ${ns} nft list table inet filter | grep 'fib saddr . iif' | grep $address | grep "packets $want" )
+ ret=$?
+
+ if [ $ret -ne 0 ];then
+ echo "Netns $ns fib counter doesn't match expected packet count of $want for $address" 1>&2
+ ip netns exec ${ns} nft list table inet filter
+ return 1
+ fi
+
+ if [ $want -gt 0 ]; then
+ echo "PASS: fib expression did drop packets for $address"
+ fi
+
+ return 0
+}
+
+load_ruleset ${nsrouter}
+load_ruleset ${ns1}
+load_ruleset ${ns2}
+
+ip link add veth0 netns ${nsrouter} type veth peer name eth0 netns ${ns1} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
+ip link add veth1 netns ${nsrouter} type veth peer name eth0 netns ${ns2}
+
+ip -net ${nsrouter} link set lo up
+ip -net ${nsrouter} link set veth0 up
+ip -net ${nsrouter} addr add 10.0.1.1/24 dev veth0
+ip -net ${nsrouter} addr add dead:1::1/64 dev veth0
+
+ip -net ${nsrouter} link set veth1 up
+ip -net ${nsrouter} addr add 10.0.2.1/24 dev veth1
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth1
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set eth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set eth0 up
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+ip -net ${ns1} route add default via 10.0.1.1
+ip -net ${ns1} route add default via dead:1::1
+
+ip -net ${ns2} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns2} addr add dead:2::99/64 dev eth0
+ip -net ${ns2} route add default via 10.0.2.1
+ip -net ${ns2} route add default via dead:2::1
+
+test_ping() {
+ local daddr4=$1
+ local daddr6=$2
+
+ ip netns exec ${ns1} ping -c 1 -q $daddr4 > /dev/null
+ ret=$?
+ if [ $ret -ne 0 ];then
+ check_drops
+ echo "FAIL: ${ns1} cannot reach $daddr4, ret $ret" 1>&2
+ return 1
+ fi
+
+ ip netns exec ${ns1} ping -c 3 -q $daddr6 > /dev/null
+ ret=$?
+ if [ $ret -ne 0 ];then
+ check_drops
+ echo "FAIL: ${ns1} cannot reach $daddr6, ret $ret" 1>&2
+ return 1
+ fi
+
+ return 0
+}
+
+ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+sleep 3
+
+test_ping 10.0.2.1 dead:2::1 || exit 1
+check_drops || exit 1
+
+test_ping 10.0.2.99 dead:2::99 || exit 1
+check_drops || exit 1
+
+echo "PASS: fib expression did not cause unwanted packet drops"
+
+ip netns exec ${nsrouter} nft flush table inet filter
+
+ip -net ${ns1} route del default
+ip -net ${ns1} -6 route del default
+
+ip -net ${ns1} addr del 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr del dead:1::99/64 dev eth0
+
+ip -net ${ns1} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns1} addr add dead:2::99/64 dev eth0
+
+ip -net ${ns1} route add default via 10.0.2.1
+ip -net ${ns1} -6 route add default via dead:2::1
+
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth0
+
+# switch to ruleset that doesn't log, this time
+# its expected that this does drop the packets.
+load_ruleset_count ${nsrouter}
+
+# ns1 has a default route, but nsrouter does not.
+# must not check return value, ping to 1.1.1.1 will
+# fail.
+check_fib_counter 0 ${nsrouter} 1.1.1.1 || exit 1
+check_fib_counter 0 ${nsrouter} 1c3::c01d || exit 1
+
+ip netns exec ${ns1} ping -c 1 -W 1 -q 1.1.1.1 > /dev/null
+check_fib_counter 1 ${nsrouter} 1.1.1.1 || exit 1
+
+sleep 2
+ip netns exec ${ns1} ping -c 3 -q 1c3::c01d > /dev/null
+check_fib_counter 3 ${nsrouter} 1c3::c01d || exit 1
+
+exit 0
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
index 381d874cce99..d7ec1e7da0d0 100644
--- a/tools/testing/selftests/openat2/openat2_test.c
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -155,7 +155,7 @@ struct flag_test {
int err;
};
-#define NUM_OPENAT2_FLAG_TESTS 24
+#define NUM_OPENAT2_FLAG_TESTS 25
void test_openat2_flags(void)
{
@@ -229,6 +229,11 @@ void test_openat2_flags(void)
{ .name = "invalid how.resolve and O_PATH",
.how.flags = O_PATH,
.how.resolve = 0x1337, .err = -EINVAL },
+
+ /* currently unknown upper 32 bit rejected. */
+ { .name = "currently unknown bit (1 << 63)",
+ .how.flags = O_RDONLY | (1ULL << 63),
+ .how.resolve = 0, .err = -EINVAL },
};
BUILD_BUG_ON(ARRAY_LEN(tests) != NUM_OPENAT2_FLAG_TESTS);
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index bed4b5318a86..8f3e72e626fa 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -10,6 +10,7 @@
/proc-self-map-files-002
/proc-self-syscall
/proc-self-wchan
+/proc-subset-pid
/proc-uptime-001
/proc-uptime-002
/read
diff --git a/tools/testing/selftests/rlimits/.gitignore b/tools/testing/selftests/rlimits/.gitignore
new file mode 100644
index 000000000000..091021f255b3
--- /dev/null
+++ b/tools/testing/selftests/rlimits/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+rlimits-per-userns
diff --git a/tools/testing/selftests/rlimits/Makefile b/tools/testing/selftests/rlimits/Makefile
new file mode 100644
index 000000000000..03aadb406212
--- /dev/null
+++ b/tools/testing/selftests/rlimits/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS += -Wall -O2 -g
+TEST_GEN_PROGS := rlimits-per-userns
+
+include ../lib.mk
diff --git a/tools/testing/selftests/rlimits/config b/tools/testing/selftests/rlimits/config
new file mode 100644
index 000000000000..416bd53ce982
--- /dev/null
+++ b/tools/testing/selftests/rlimits/config
@@ -0,0 +1 @@
+CONFIG_USER_NS=y
diff --git a/tools/testing/selftests/rlimits/rlimits-per-userns.c b/tools/testing/selftests/rlimits/rlimits-per-userns.c
new file mode 100644
index 000000000000..26dc949e93ea
--- /dev/null
+++ b/tools/testing/selftests/rlimits/rlimits-per-userns.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Author: Alexey Gladkov <gladkov.alexey@gmail.com>
+ */
+#define _GNU_SOURCE
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sched.h>
+#include <signal.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <err.h>
+
+#define NR_CHILDS 2
+
+static char *service_prog;
+static uid_t user = 60000;
+static uid_t group = 60000;
+
+static void setrlimit_nproc(rlim_t n)
+{
+ pid_t pid = getpid();
+ struct rlimit limit = {
+ .rlim_cur = n,
+ .rlim_max = n
+ };
+
+ warnx("(pid=%d): Setting RLIMIT_NPROC=%ld", pid, n);
+
+ if (setrlimit(RLIMIT_NPROC, &limit) < 0)
+ err(EXIT_FAILURE, "(pid=%d): setrlimit(RLIMIT_NPROC)", pid);
+}
+
+static pid_t fork_child(void)
+{
+ pid_t pid = fork();
+
+ if (pid < 0)
+ err(EXIT_FAILURE, "fork");
+
+ if (pid > 0)
+ return pid;
+
+ pid = getpid();
+
+ warnx("(pid=%d): New process starting ...", pid);
+
+ if (prctl(PR_SET_PDEATHSIG, SIGKILL) < 0)
+ err(EXIT_FAILURE, "(pid=%d): prctl(PR_SET_PDEATHSIG)", pid);
+
+ signal(SIGUSR1, SIG_DFL);
+
+ warnx("(pid=%d): Changing to uid=%d, gid=%d", pid, user, group);
+
+ if (setgid(group) < 0)
+ err(EXIT_FAILURE, "(pid=%d): setgid(%d)", pid, group);
+ if (setuid(user) < 0)
+ err(EXIT_FAILURE, "(pid=%d): setuid(%d)", pid, user);
+
+ warnx("(pid=%d): Service running ...", pid);
+
+ warnx("(pid=%d): Unshare user namespace", pid);
+ if (unshare(CLONE_NEWUSER) < 0)
+ err(EXIT_FAILURE, "unshare(CLONE_NEWUSER)");
+
+ char *const argv[] = { "service", NULL };
+ char *const envp[] = { "I_AM_SERVICE=1", NULL };
+
+ warnx("(pid=%d): Executing real service ...", pid);
+
+ execve(service_prog, argv, envp);
+ err(EXIT_FAILURE, "(pid=%d): execve", pid);
+}
+
+int main(int argc, char **argv)
+{
+ size_t i;
+ pid_t child[NR_CHILDS];
+ int wstatus[NR_CHILDS];
+ int childs = NR_CHILDS;
+ pid_t pid;
+
+ if (getenv("I_AM_SERVICE")) {
+ pause();
+ exit(EXIT_SUCCESS);
+ }
+
+ service_prog = argv[0];
+ pid = getpid();
+
+ warnx("(pid=%d) Starting testcase", pid);
+
+ /*
+ * This rlimit is not a problem for root because it can be exceeded.
+ */
+ setrlimit_nproc(1);
+
+ for (i = 0; i < NR_CHILDS; i++) {
+ child[i] = fork_child();
+ wstatus[i] = 0;
+ usleep(250000);
+ }
+
+ while (1) {
+ for (i = 0; i < NR_CHILDS; i++) {
+ if (child[i] <= 0)
+ continue;
+
+ errno = 0;
+ pid_t ret = waitpid(child[i], &wstatus[i], WNOHANG);
+
+ if (!ret || (!WIFEXITED(wstatus[i]) && !WIFSIGNALED(wstatus[i])))
+ continue;
+
+ if (ret < 0 && errno != ECHILD)
+ warn("(pid=%d): waitpid(%d)", pid, child[i]);
+
+ child[i] *= -1;
+ childs -= 1;
+ }
+
+ if (!childs)
+ break;
+
+ usleep(250000);
+
+ for (i = 0; i < NR_CHILDS; i++) {
+ if (child[i] <= 0)
+ continue;
+ kill(child[i], SIGUSR1);
+ }
+ }
+
+ for (i = 0; i < NR_CHILDS; i++) {
+ if (WIFEXITED(wstatus[i]))
+ warnx("(pid=%d): pid %d exited, status=%d",
+ pid, -child[i], WEXITSTATUS(wstatus[i]));
+ else if (WIFSIGNALED(wstatus[i]))
+ warnx("(pid=%d): pid %d killed by signal %d",
+ pid, -child[i], WTERMSIG(wstatus[i]));
+
+ if (WIFSIGNALED(wstatus[i]) && WTERMSIG(wstatus[i]) == SIGUSR1)
+ continue;
+
+ warnx("(pid=%d): Test failed", pid);
+ exit(EXIT_FAILURE);
+ }
+
+ warnx("(pid=%d): Test passed", pid);
+ exit(EXIT_SUCCESS);
+}
diff --git a/tools/testing/selftests/sched/.gitignore b/tools/testing/selftests/sched/.gitignore
new file mode 100644
index 000000000000..6996d4654d92
--- /dev/null
+++ b/tools/testing/selftests/sched/.gitignore
@@ -0,0 +1 @@
+cs_prctl_test
diff --git a/tools/testing/selftests/sched/Makefile b/tools/testing/selftests/sched/Makefile
new file mode 100644
index 000000000000..10c72f14fea9
--- /dev/null
+++ b/tools/testing/selftests/sched/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+CLANG_FLAGS += -no-integrated-as
+endif
+
+CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -Wl,-rpath=./ \
+ $(CLANG_FLAGS)
+LDLIBS += -lpthread
+
+TEST_GEN_FILES := cs_prctl_test
+TEST_PROGS := cs_prctl_test
+
+include ../lib.mk
diff --git a/tools/testing/selftests/sched/config b/tools/testing/selftests/sched/config
new file mode 100644
index 000000000000..e8b09aa7c0c4
--- /dev/null
+++ b/tools/testing/selftests/sched/config
@@ -0,0 +1 @@
+CONFIG_SCHED_DEBUG=y
diff --git a/tools/testing/selftests/sched/cs_prctl_test.c b/tools/testing/selftests/sched/cs_prctl_test.c
new file mode 100644
index 000000000000..63fe6521c56d
--- /dev/null
+++ b/tools/testing/selftests/sched/cs_prctl_test.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Use the core scheduling prctl() to test core scheduling cookies control.
+ *
+ * Copyright (c) 2021 Oracle and/or its affiliates.
+ * Author: Chris Hyser <chris.hyser@oracle.com>
+ *
+ *
+ * This library is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, see <http://www.gnu.org/licenses>.
+ */
+
+#define _GNU_SOURCE
+#include <sys/eventfd.h>
+#include <sys/wait.h>
+#include <sys/types.h>
+#include <sched.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <time.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if __GLIBC_PREREQ(2, 30) == 0
+#include <sys/syscall.h>
+static pid_t gettid(void)
+{
+ return syscall(SYS_gettid);
+}
+#endif
+
+#ifndef PR_SCHED_CORE
+#define PR_SCHED_CORE 62
+# define PR_SCHED_CORE_GET 0
+# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+# define PR_SCHED_CORE_MAX 4
+#endif
+
+#define MAX_PROCESSES 128
+#define MAX_THREADS 128
+
+static const char USAGE[] = "cs_prctl_test [options]\n"
+" options:\n"
+" -P : number of processes to create.\n"
+" -T : number of threads per process to create.\n"
+" -d : delay time to keep tasks alive.\n"
+" -k : keep tasks alive until keypress.\n";
+
+enum pid_type {PIDTYPE_PID = 0, PIDTYPE_TGID, PIDTYPE_PGID};
+
+const int THREAD_CLONE_FLAGS = CLONE_THREAD | CLONE_SIGHAND | CLONE_FS | CLONE_VM | CLONE_FILES;
+
+static int _prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4,
+ unsigned long arg5)
+{
+ int res;
+
+ res = prctl(option, arg2, arg3, arg4, arg5);
+ printf("%d = prctl(%d, %ld, %ld, %ld, %lx)\n", res, option, (long)arg2, (long)arg3,
+ (long)arg4, arg5);
+ return res;
+}
+
+#define STACK_SIZE (1024 * 1024)
+
+#define handle_error(msg) __handle_error(__FILE__, __LINE__, msg)
+static void __handle_error(char *fn, int ln, char *msg)
+{
+ printf("(%s:%d) - ", fn, ln);
+ perror(msg);
+ exit(EXIT_FAILURE);
+}
+
+static void handle_usage(int rc, char *msg)
+{
+ puts(USAGE);
+ puts(msg);
+ putchar('\n');
+ exit(rc);
+}
+
+static unsigned long get_cs_cookie(int pid)
+{
+ unsigned long long cookie;
+ int ret;
+
+ ret = prctl(PR_SCHED_CORE, PR_SCHED_CORE_GET, pid, PIDTYPE_PID,
+ (unsigned long)&cookie);
+ if (ret) {
+ printf("Not a core sched system\n");
+ return -1UL;
+ }
+
+ return cookie;
+}
+
+struct child_args {
+ int num_threads;
+ int pfd[2];
+ int cpid;
+ int thr_tids[MAX_THREADS];
+};
+
+static int child_func_thread(void __attribute__((unused))*arg)
+{
+ while (1)
+ usleep(20000);
+ return 0;
+}
+
+static void create_threads(int num_threads, int thr_tids[])
+{
+ void *child_stack;
+ pid_t tid;
+ int i;
+
+ for (i = 0; i < num_threads; ++i) {
+ child_stack = malloc(STACK_SIZE);
+ if (!child_stack)
+ handle_error("child stack allocate");
+
+ tid = clone(child_func_thread, child_stack + STACK_SIZE, THREAD_CLONE_FLAGS, NULL);
+ if (tid == -1)
+ handle_error("clone thread");
+ thr_tids[i] = tid;
+ }
+}
+
+static int child_func_process(void *arg)
+{
+ struct child_args *ca = (struct child_args *)arg;
+
+ close(ca->pfd[0]);
+
+ create_threads(ca->num_threads, ca->thr_tids);
+
+ write(ca->pfd[1], &ca->thr_tids, sizeof(int) * ca->num_threads);
+ close(ca->pfd[1]);
+
+ while (1)
+ usleep(20000);
+ return 0;
+}
+
+static unsigned char child_func_process_stack[STACK_SIZE];
+
+void create_processes(int num_processes, int num_threads, struct child_args proc[])
+{
+ pid_t cpid;
+ int i;
+
+ for (i = 0; i < num_processes; ++i) {
+ proc[i].num_threads = num_threads;
+
+ if (pipe(proc[i].pfd) == -1)
+ handle_error("pipe() failed");
+
+ cpid = clone(child_func_process, child_func_process_stack + STACK_SIZE,
+ SIGCHLD, &proc[i]);
+ proc[i].cpid = cpid;
+ close(proc[i].pfd[1]);
+ }
+
+ for (i = 0; i < num_processes; ++i) {
+ read(proc[i].pfd[0], &proc[i].thr_tids, sizeof(int) * proc[i].num_threads);
+ close(proc[i].pfd[0]);
+ }
+}
+
+void disp_processes(int num_processes, struct child_args proc[])
+{
+ int i, j;
+
+ printf("tid=%d, / tgid=%d / pgid=%d: %lx\n", gettid(), getpid(), getpgid(0),
+ get_cs_cookie(getpid()));
+
+ for (i = 0; i < num_processes; ++i) {
+ printf(" tid=%d, / tgid=%d / pgid=%d: %lx\n", proc[i].cpid, proc[i].cpid,
+ getpgid(proc[i].cpid), get_cs_cookie(proc[i].cpid));
+ for (j = 0; j < proc[i].num_threads; ++j) {
+ printf(" tid=%d, / tgid=%d / pgid=%d: %lx\n", proc[i].thr_tids[j],
+ proc[i].cpid, getpgid(0), get_cs_cookie(proc[i].thr_tids[j]));
+ }
+ }
+ puts("\n");
+}
+
+static int errors;
+
+#define validate(v) _validate(__LINE__, v, #v)
+void _validate(int line, int val, char *msg)
+{
+ if (!val) {
+ ++errors;
+ printf("(%d) FAILED: %s\n", line, msg);
+ } else {
+ printf("(%d) PASSED: %s\n", line, msg);
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ struct child_args procs[MAX_PROCESSES];
+
+ int keypress = 0;
+ int num_processes = 2;
+ int num_threads = 3;
+ int delay = 0;
+ int res = 0;
+ int pidx;
+ int pid;
+ int opt;
+
+ while ((opt = getopt(argc, argv, ":hkT:P:d:")) != -1) {
+ switch (opt) {
+ case 'P':
+ num_processes = (int)strtol(optarg, NULL, 10);
+ break;
+ case 'T':
+ num_threads = (int)strtoul(optarg, NULL, 10);
+ break;
+ case 'd':
+ delay = (int)strtol(optarg, NULL, 10);
+ break;
+ case 'k':
+ keypress = 1;
+ break;
+ case 'h':
+ printf(USAGE);
+ exit(EXIT_SUCCESS);
+ default:
+ handle_usage(20, "unknown option");
+ }
+ }
+
+ if (num_processes < 1 || num_processes > MAX_PROCESSES)
+ handle_usage(1, "Bad processes value");
+
+ if (num_threads < 1 || num_threads > MAX_THREADS)
+ handle_usage(2, "Bad thread value");
+
+ if (keypress)
+ delay = -1;
+
+ srand(time(NULL));
+
+ /* put into separate process group */
+ if (setpgid(0, 0) != 0)
+ handle_error("process group");
+
+ printf("\n## Create a thread/process/process group hiearchy\n");
+ create_processes(num_processes, num_threads, procs);
+ disp_processes(num_processes, procs);
+ validate(get_cs_cookie(0) == 0);
+
+ printf("\n## Set a cookie on entire process group\n");
+ if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_CREATE, 0, PIDTYPE_PGID, 0) < 0)
+ handle_error("core_sched create failed -- PGID");
+ disp_processes(num_processes, procs);
+
+ validate(get_cs_cookie(0) != 0);
+
+ /* get a random process pid */
+ pidx = rand() % num_processes;
+ pid = procs[pidx].cpid;
+
+ validate(get_cs_cookie(0) == get_cs_cookie(pid));
+ validate(get_cs_cookie(0) == get_cs_cookie(procs[pidx].thr_tids[0]));
+
+ printf("\n## Set a new cookie on entire process/TGID [%d]\n", pid);
+ if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_CREATE, pid, PIDTYPE_TGID, 0) < 0)
+ handle_error("core_sched create failed -- TGID");
+ disp_processes(num_processes, procs);
+
+ validate(get_cs_cookie(0) != get_cs_cookie(pid));
+ validate(get_cs_cookie(pid) != 0);
+ validate(get_cs_cookie(pid) == get_cs_cookie(procs[pidx].thr_tids[0]));
+
+ printf("\n## Copy the cookie of current/PGID[%d], to pid [%d] as PIDTYPE_PID\n",
+ getpid(), pid);
+ if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_SHARE_TO, pid, PIDTYPE_PID, 0) < 0)
+ handle_error("core_sched share to itself failed -- PID");
+ disp_processes(num_processes, procs);
+
+ validate(get_cs_cookie(0) == get_cs_cookie(pid));
+ validate(get_cs_cookie(pid) != 0);
+ validate(get_cs_cookie(pid) != get_cs_cookie(procs[pidx].thr_tids[0]));
+
+ printf("\n## Copy cookie from a thread [%d] to current/PGID [%d] as PIDTYPE_PID\n",
+ procs[pidx].thr_tids[0], getpid());
+ if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_SHARE_FROM, procs[pidx].thr_tids[0],
+ PIDTYPE_PID, 0) < 0)
+ handle_error("core_sched share from thread failed -- PID");
+ disp_processes(num_processes, procs);
+
+ validate(get_cs_cookie(0) == get_cs_cookie(procs[pidx].thr_tids[0]));
+ validate(get_cs_cookie(pid) != get_cs_cookie(procs[pidx].thr_tids[0]));
+
+ printf("\n## Copy cookie from current [%d] to current as pidtype PGID\n", getpid());
+ if (_prctl(PR_SCHED_CORE, PR_SCHED_CORE_SHARE_TO, 0, PIDTYPE_PGID, 0) < 0)
+ handle_error("core_sched share to self failed -- PGID");
+ disp_processes(num_processes, procs);
+
+ validate(get_cs_cookie(0) == get_cs_cookie(pid));
+ validate(get_cs_cookie(pid) != 0);
+ validate(get_cs_cookie(pid) == get_cs_cookie(procs[pidx].thr_tids[0]));
+
+ if (errors) {
+ printf("TESTS FAILED. errors: %d\n", errors);
+ res = 10;
+ } else {
+ printf("SUCCESS !!!\n");
+ }
+
+ if (keypress)
+ getchar();
+ else
+ sleep(delay);
+
+ for (pidx = 0; pidx < num_processes; ++pidx)
+ kill(procs[pidx].cpid, 15);
+
+ return res;
+}
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c
index fcc806585266..6e5102a7d7c9 100644
--- a/tools/testing/selftests/seccomp/seccomp_benchmark.c
+++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c
@@ -143,9 +143,15 @@ int main(int argc, char *argv[])
unsigned long long native, filter1, filter2, bitmap1, bitmap2;
unsigned long long entry, per_filter1, per_filter2;
+ setbuf(stdout, NULL);
+
+ printf("Running on:\n");
+ system("uname -a");
+
printf("Current BPF sysctl settings:\n");
- system("sysctl net.core.bpf_jit_enable");
- system("sysctl net.core.bpf_jit_harden");
+ /* Avoid using "sysctl" which may not be installed. */
+ system("grep -H . /proc/sys/net/core/bpf_jit_enable");
+ system("grep -H . /proc/sys/net/core/bpf_jit_harden");
if (argc > 1)
samples = strtoull(argv[1], NULL, 0);
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index e3d5c77a8612..1d64891e6492 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -235,6 +235,10 @@ struct seccomp_notif_addfd {
};
#endif
+#ifndef SECCOMP_ADDFD_FLAG_SEND
+#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
+#endif
+
struct seccomp_notif_addfd_small {
__u64 id;
char weird[4];
@@ -3959,7 +3963,7 @@ TEST(user_notification_addfd)
{
pid_t pid;
long ret;
- int status, listener, memfd, fd;
+ int status, listener, memfd, fd, nextfd;
struct seccomp_notif_addfd addfd = {};
struct seccomp_notif_addfd_small small = {};
struct seccomp_notif_addfd_big big = {};
@@ -3968,25 +3972,34 @@ TEST(user_notification_addfd)
/* 100 ms */
struct timespec delay = { .tv_nsec = 100000000 };
+ /* There may be arbitrary already-open fds at test start. */
memfd = memfd_create("test", 0);
ASSERT_GE(memfd, 0);
+ nextfd = memfd + 1;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
+ /* fd: 4 */
/* Check that the basic notification machinery works */
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
- ASSERT_GE(listener, 0);
+ ASSERT_EQ(listener, nextfd++);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
+ /* fds will be added and this value is expected */
if (syscall(__NR_getppid) != USER_NOTIF_MAGIC)
exit(1);
+
+ /* Atomic addfd+send is received here. Check it is a valid fd */
+ if (fcntl(syscall(__NR_getppid), F_GETFD) == -1)
+ exit(1);
+
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
}
@@ -4028,14 +4041,14 @@ TEST(user_notification_addfd)
/* Verify we can set an arbitrary remote fd */
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
- EXPECT_GE(fd, 0);
+ EXPECT_EQ(fd, nextfd++);
EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
/* Verify we can set an arbitrary remote fd with large size */
memset(&big, 0x0, sizeof(big));
big.addfd = addfd;
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big);
- EXPECT_GE(fd, 0);
+ EXPECT_EQ(fd, nextfd++);
/* Verify we can set a specific remote fd */
addfd.newfd = 42;
@@ -4065,6 +4078,32 @@ TEST(user_notification_addfd)
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
ASSERT_EQ(addfd.id, req.id);
+ /* Verify we can do an atomic addfd and send */
+ addfd.newfd = 0;
+ addfd.flags = SECCOMP_ADDFD_FLAG_SEND;
+ fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
+ /*
+ * Child has earlier "low" fds and now 42, so we expect the next
+ * lowest available fd to be assigned here.
+ */
+ EXPECT_EQ(fd, nextfd++);
+ EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
+
+ /*
+ * This sets the ID of the ADD FD to the last request plus 1. The
+ * notification ID increments 1 per notification.
+ */
+ addfd.id = req.id + 1;
+
+ /* This spins until the underlying notification is generated */
+ while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 &&
+ errno != -EINPROGRESS)
+ nanosleep(&delay, NULL);
+
+ memset(&req, 0, sizeof(req));
+ ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+ ASSERT_EQ(addfd.id, req.id);
+
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
@@ -4125,6 +4164,10 @@ TEST(user_notification_addfd_rlimit)
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EMFILE);
+ addfd.flags = SECCOMP_ADDFD_FLAG_SEND;
+ EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
+ EXPECT_EQ(errno, EMFILE);
+
addfd.newfd = 100;
addfd.flags = SECCOMP_ADDFD_FLAG_SETFD;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
index 229ee185b27e..254136e3da5a 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
@@ -29,22 +29,26 @@ class SubPlugin(TdcPlugin):
return
# Check for required fields
- scapyinfo = self.args.caseinfo['scapy']
- scapy_keys = ['iface', 'count', 'packet']
- missing_keys = []
- keyfail = False
- for k in scapy_keys:
- if k not in scapyinfo:
- keyfail = True
- missing_keys.add(k)
- if keyfail:
- print('{}: Scapy block present in the test, but is missing info:'
- .format(self.sub_class))
- print('{}'.format(missing_keys))
-
- pkt = eval(scapyinfo['packet'])
- if '$' in scapyinfo['iface']:
- tpl = Template(scapyinfo['iface'])
- scapyinfo['iface'] = tpl.safe_substitute(NAMES)
- for count in range(scapyinfo['count']):
- sendp(pkt, iface=scapyinfo['iface'])
+ lscapyinfo = self.args.caseinfo['scapy']
+ if type(lscapyinfo) != list:
+ lscapyinfo = [ lscapyinfo, ]
+
+ for scapyinfo in lscapyinfo:
+ scapy_keys = ['iface', 'count', 'packet']
+ missing_keys = []
+ keyfail = False
+ for k in scapy_keys:
+ if k not in scapyinfo:
+ keyfail = True
+ missing_keys.append(k)
+ if keyfail:
+ print('{}: Scapy block present in the test, but is missing info:'
+ .format(self.sub_class))
+ print('{}'.format(missing_keys))
+
+ pkt = eval(scapyinfo['packet'])
+ if '$' in scapyinfo['iface']:
+ tpl = Template(scapyinfo['iface'])
+ scapyinfo['iface'] = tpl.safe_substitute(NAMES)
+ for count in range(scapyinfo['count']):
+ sendp(pkt, iface=scapyinfo['iface'])
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
index 4202e95e27b9..bd843ab00a58 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
@@ -406,5 +406,50 @@
"teardown": [
"$TC actions flush action ct"
]
+ },
+ {
+ "id": "3992",
+ "name": "Add ct action triggering DNAT tuple conflict",
+ "category": [
+ "actions",
+ "ct",
+ "scapy"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin",
+ "scapyPlugin"
+ ]
+ },
+ "setup": [
+ [
+ "$TC qdisc del dev $DEV1 ingress",
+ 0,
+ 1,
+ 2,
+ 255
+ ],
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 ingress protocol ip prio 1 flower ct_state -trk action ct commit nat dst addr 20.0.0.1 port 10 pipe action drop",
+ "scapy": [
+ {
+ "iface": "$DEV0",
+ "count": 1,
+ "packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.10')/TCP(sport=5000,dport=10)"
+ },
+ {
+ "iface": "$DEV0",
+ "count": 1,
+ "packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.20')/TCP(sport=5000,dport=10)"
+ }
+ ],
+ "expExitCode": "0",
+ "verifyCmd": "cat /proc/net/nf_conntrack",
+ "matchPattern": "dst=10.0.0.20",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
index 41d783254b08..2aad4caa8581 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
@@ -446,6 +446,30 @@
"teardown": []
},
{
+ "id": "ba5b",
+ "name": "Add vlan modify action for protocol 802.1Q setting priority 0",
+ "category": [
+ "actions",
+ "vlan"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action vlan",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action vlan modify protocol 802.1Q id 5 priority 0 index 100",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action vlan index 100",
+ "matchPattern": "action order [0-9]+: vlan.*modify id 100 priority 0 protocol 802.1Q pipe.*index 100 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action vlan"
+ ]
+ },
+ {
"id": "6812",
"name": "Add vlan modify action for protocol 802.1Q",
"category": [
@@ -463,7 +487,7 @@
"cmdUnderTest": "$TC actions add action vlan modify protocol 802.1Q id 5 index 100",
"expExitCode": "0",
"verifyCmd": "$TC actions get action vlan index 100",
- "matchPattern": "action order [0-9]+: vlan.*modify id 100 protocol 802.1Q priority 0 pipe.*index 100 ref",
+ "matchPattern": "action order [0-9]+: vlan.*modify id 100 protocol 802.1Q pipe.*index 100 ref",
"matchCount": "0",
"teardown": [
"$TC actions flush action vlan"
@@ -487,7 +511,7 @@
"cmdUnderTest": "$TC actions add action vlan modify protocol 802.1ad id 500 reclassify index 12",
"expExitCode": "0",
"verifyCmd": "$TC actions get action vlan index 12",
- "matchPattern": "action order [0-9]+: vlan.*modify id 500 protocol 802.1ad priority 0 reclassify.*index 12 ref",
+ "matchPattern": "action order [0-9]+: vlan.*modify id 500 protocol 802.1ad reclassify.*index 12 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action vlan"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
index 1cda2e11b3ad..773c5027553d 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
@@ -9,11 +9,11 @@
"setup": [
"$IP link add dev $DUMMY type dummy || /bin/true"
],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536",
- "expExitCode": "2",
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_pie flows 65536",
+ "expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc",
- "matchCount": "0",
+ "matchPattern": "qdisc fq_pie 1: root refcnt 2 limit 10240p flows 65536",
+ "matchCount": "1",
"teardown": [
"$IP link del dev $DUMMY"
]
diff --git a/tools/testing/selftests/vm/gup_test.c b/tools/testing/selftests/vm/gup_test.c
index 1e662d59c502..fe043f67798b 100644
--- a/tools/testing/selftests/vm/gup_test.c
+++ b/tools/testing/selftests/vm/gup_test.c
@@ -6,6 +6,8 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <pthread.h>
+#include <assert.h>
#include "../../../../mm/gup_test.h"
#define MB (1UL << 20)
@@ -15,6 +17,12 @@
#define FOLL_WRITE 0x01 /* check pte is writable */
#define FOLL_TOUCH 0x02 /* mark page accessed */
+static unsigned long cmd = GUP_FAST_BENCHMARK;
+static int gup_fd, repeats = 1;
+static unsigned long size = 128 * MB;
+/* Serialize prints */
+static pthread_mutex_t print_mutex = PTHREAD_MUTEX_INITIALIZER;
+
static char *cmd_to_str(unsigned long cmd)
{
switch (cmd) {
@@ -34,17 +42,55 @@ static char *cmd_to_str(unsigned long cmd)
return "Unknown command";
}
+void *gup_thread(void *data)
+{
+ struct gup_test gup = *(struct gup_test *)data;
+ int i;
+
+ /* Only report timing information on the *_BENCHMARK commands: */
+ if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) ||
+ (cmd == PIN_LONGTERM_BENCHMARK)) {
+ for (i = 0; i < repeats; i++) {
+ gup.size = size;
+ if (ioctl(gup_fd, cmd, &gup))
+ perror("ioctl"), exit(1);
+
+ pthread_mutex_lock(&print_mutex);
+ printf("%s: Time: get:%lld put:%lld us",
+ cmd_to_str(cmd), gup.get_delta_usec,
+ gup.put_delta_usec);
+ if (gup.size != size)
+ printf(", truncated (size: %lld)", gup.size);
+ printf("\n");
+ pthread_mutex_unlock(&print_mutex);
+ }
+ } else {
+ gup.size = size;
+ if (ioctl(gup_fd, cmd, &gup)) {
+ perror("ioctl");
+ exit(1);
+ }
+
+ pthread_mutex_lock(&print_mutex);
+ printf("%s: done\n", cmd_to_str(cmd));
+ if (gup.size != size)
+ printf("Truncated (size: %lld)\n", gup.size);
+ pthread_mutex_unlock(&print_mutex);
+ }
+
+ return NULL;
+}
+
int main(int argc, char **argv)
{
struct gup_test gup = { 0 };
- unsigned long size = 128 * MB;
- int i, fd, filed, opt, nr_pages = 1, thp = -1, repeats = 1, write = 1;
- unsigned long cmd = GUP_FAST_BENCHMARK;
+ int filed, i, opt, nr_pages = 1, thp = -1, write = 1, nthreads = 1, ret;
int flags = MAP_PRIVATE, touch = 0;
char *file = "/dev/zero";
+ pthread_t *tid;
char *p;
- while ((opt = getopt(argc, argv, "m:r:n:F:f:abctTLUuwWSHpz")) != -1) {
+ while ((opt = getopt(argc, argv, "m:r:n:F:f:abcj:tTLUuwWSHpz")) != -1) {
switch (opt) {
case 'a':
cmd = PIN_FAST_BENCHMARK;
@@ -74,6 +120,9 @@ int main(int argc, char **argv)
/* strtol, so you can pass flags in hex form */
gup.gup_flags = strtol(optarg, 0, 0);
break;
+ case 'j':
+ nthreads = atoi(optarg);
+ break;
case 'm':
size = atoi(optarg) * MB;
break;
@@ -154,8 +203,8 @@ int main(int argc, char **argv)
if (write)
gup.gup_flags |= FOLL_WRITE;
- fd = open("/sys/kernel/debug/gup_test", O_RDWR);
- if (fd == -1) {
+ gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
+ if (gup_fd == -1) {
perror("open");
exit(1);
}
@@ -185,32 +234,17 @@ int main(int argc, char **argv)
p[0] = 0;
}
- /* Only report timing information on the *_BENCHMARK commands: */
- if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) ||
- (cmd == PIN_LONGTERM_BENCHMARK)) {
- for (i = 0; i < repeats; i++) {
- gup.size = size;
- if (ioctl(fd, cmd, &gup))
- perror("ioctl"), exit(1);
-
- printf("%s: Time: get:%lld put:%lld us",
- cmd_to_str(cmd), gup.get_delta_usec,
- gup.put_delta_usec);
- if (gup.size != size)
- printf(", truncated (size: %lld)", gup.size);
- printf("\n");
- }
- } else {
- gup.size = size;
- if (ioctl(fd, cmd, &gup)) {
- perror("ioctl");
- exit(1);
- }
-
- printf("%s: done\n", cmd_to_str(cmd));
- if (gup.size != size)
- printf("Truncated (size: %lld)\n", gup.size);
+ tid = malloc(sizeof(pthread_t) * nthreads);
+ assert(tid);
+ for (i = 0; i < nthreads; i++) {
+ ret = pthread_create(&tid[i], NULL, gup_thread, &gup);
+ assert(ret == 0);
+ }
+ for (i = 0; i < nthreads; i++) {
+ ret = pthread_join(tid[i], NULL);
+ assert(ret == 0);
}
+ free(tid);
return 0;
}
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index 7ed7cd95e58f..ebc4ee0fe179 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0
ip1 -4 route add default dev wg0 table 51820
ip1 -4 rule add not fwmark 51820 table 51820
ip1 -4 rule add table main suppress_prefixlength 0
+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
# Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
n1 ping -W 1 -c 100 -f 192.168.99.7
n1 ping -W 1 -c 100 -f abab::1111
diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
index 4eecb432a66c..74db83a0aedd 100644
--- a/tools/testing/selftests/wireguard/qemu/kernel.config
+++ b/tools/testing/selftests/wireguard/qemu/kernel.config
@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
CONFIG_NETFILTER_XT_NAT=y
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MARK=y
-CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_NF_NAT_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
diff --git a/tools/testing/selftests/x86/syscall_numbering.c b/tools/testing/selftests/x86/syscall_numbering.c
index d6b09cb1aa2c..991591718bb0 100644
--- a/tools/testing/selftests/x86/syscall_numbering.c
+++ b/tools/testing/selftests/x86/syscall_numbering.c
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * syscall_arg_fault.c - tests faults 32-bit fast syscall stack args
+ * syscall_numbering.c - test calling the x86-64 kernel with various
+ * valid and invalid system call numbers.
+ *
* Copyright (c) 2018 Andrew Lutomirski
*/
@@ -11,79 +13,470 @@
#include <stdbool.h>
#include <errno.h>
#include <unistd.h>
-#include <syscall.h>
+#include <string.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <signal.h>
+#include <sysexits.h>
-static int nerrs;
+#include <sys/ptrace.h>
+#include <sys/user.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
-#define X32_BIT 0x40000000UL
+#include <linux/ptrace.h>
-static void check_enosys(unsigned long nr, bool *ok)
+/* Common system call numbers */
+#define SYS_READ 0
+#define SYS_WRITE 1
+#define SYS_GETPID 39
+/* x64-only system call numbers */
+#define X64_IOCTL 16
+#define X64_READV 19
+#define X64_WRITEV 20
+/* x32-only system call numbers (without X32_BIT) */
+#define X32_IOCTL 514
+#define X32_READV 515
+#define X32_WRITEV 516
+
+#define X32_BIT 0x40000000
+
+static int nullfd = -1; /* File descriptor for /dev/null */
+static bool with_x32; /* x32 supported on this kernel? */
+
+enum ptrace_pass {
+ PTP_NOTHING,
+ PTP_GETREGS,
+ PTP_WRITEBACK,
+ PTP_FUZZRET,
+ PTP_FUZZHIGH,
+ PTP_INTNUM,
+ PTP_DONE
+};
+
+static const char * const ptrace_pass_name[] =
{
- /* If this fails, a segfault is reasonably likely. */
- fflush(stdout);
+ [PTP_NOTHING] = "just stop, no data read",
+ [PTP_GETREGS] = "only getregs",
+ [PTP_WRITEBACK] = "getregs, unmodified setregs",
+ [PTP_FUZZRET] = "modifying the default return",
+ [PTP_FUZZHIGH] = "clobbering the top 32 bits",
+ [PTP_INTNUM] = "sign-extending the syscall number",
+};
- long ret = syscall(nr, 0, 0, 0, 0, 0, 0);
- if (ret == 0) {
- printf("[FAIL]\tsyscall %lu succeeded, but it should have failed\n", nr);
- *ok = false;
- } else if (errno != ENOSYS) {
- printf("[FAIL]\tsyscall %lu had error code %d, but it should have reported ENOSYS\n", nr, errno);
- *ok = false;
- }
+/*
+ * Shared memory block between tracer and test
+ */
+struct shared {
+ unsigned int nerr; /* Total error count */
+ unsigned int indent; /* Message indentation level */
+ enum ptrace_pass ptrace_pass;
+ bool probing_syscall; /* In probe_syscall() */
+};
+static volatile struct shared *sh;
+
+static inline unsigned int offset(void)
+{
+ unsigned int level = sh ? sh->indent : 0;
+
+ return 8 + level * 4;
}
-static void test_x32_without_x32_bit(void)
+#define msg(lvl, fmt, ...) printf("%-*s" fmt, offset(), "[" #lvl "]", \
+ ## __VA_ARGS__)
+
+#define run(fmt, ...) msg(RUN, fmt, ## __VA_ARGS__)
+#define info(fmt, ...) msg(INFO, fmt, ## __VA_ARGS__)
+#define ok(fmt, ...) msg(OK, fmt, ## __VA_ARGS__)
+
+#define fail(fmt, ...) \
+ do { \
+ msg(FAIL, fmt, ## __VA_ARGS__); \
+ sh->nerr++; \
+ } while (0)
+
+#define crit(fmt, ...) \
+ do { \
+ sh->indent = 0; \
+ msg(FAIL, fmt, ## __VA_ARGS__); \
+ msg(SKIP, "Unable to run test\n"); \
+ exit(EX_OSERR); \
+ } while (0)
+
+/* Sentinel for ptrace-modified return value */
+#define MODIFIED_BY_PTRACE -9999
+
+/*
+ * Directly invokes the given syscall with nullfd as the first argument
+ * and the rest zero. Avoids involving glibc wrappers in case they ever
+ * end up intercepting some system calls for some reason, or modify
+ * the system call number itself.
+ */
+static long long probe_syscall(int msb, int lsb)
{
- bool ok = true;
+ register long long arg1 asm("rdi") = nullfd;
+ register long long arg2 asm("rsi") = 0;
+ register long long arg3 asm("rdx") = 0;
+ register long long arg4 asm("r10") = 0;
+ register long long arg5 asm("r8") = 0;
+ register long long arg6 asm("r9") = 0;
+ long long nr = ((long long)msb << 32) | (unsigned int)lsb;
+ long long ret;
/*
- * Syscalls 512-547 are "x32" syscalls. They are intended to be
- * called with the x32 (0x40000000) bit set. Calling them without
- * the x32 bit set is nonsense and should not work.
+ * We pass in an extra copy of the extended system call number
+ * in %rbx, so we can examine it from the ptrace handler without
+ * worrying about it being possibly modified. This is to test
+ * the validity of struct user regs.orig_rax a.k.a.
+ * struct pt_regs.orig_ax.
*/
- printf("[RUN]\tChecking syscalls 512-547\n");
- for (int i = 512; i <= 547; i++)
- check_enosys(i, &ok);
+ sh->probing_syscall = true;
+ asm volatile("syscall"
+ : "=a" (ret)
+ : "a" (nr), "b" (nr),
+ "r" (arg1), "r" (arg2), "r" (arg3),
+ "r" (arg4), "r" (arg5), "r" (arg6)
+ : "rcx", "r11", "memory", "cc");
+ sh->probing_syscall = false;
+
+ return ret;
+}
+
+static const char *syscall_str(int msb, int start, int end)
+{
+ static char buf[64];
+ const char * const type = (start & X32_BIT) ? "x32" : "x64";
+ int lsb = start;
/*
- * Check that a handful of 64-bit-only syscalls are rejected if the x32
- * bit is set.
+ * Improve readability by stripping the x32 bit, but round
+ * toward zero so we don't display -1 as -1073741825.
*/
- printf("[RUN]\tChecking some 64-bit syscalls in x32 range\n");
- check_enosys(16 | X32_BIT, &ok); /* ioctl */
- check_enosys(19 | X32_BIT, &ok); /* readv */
- check_enosys(20 | X32_BIT, &ok); /* writev */
+ if (lsb < 0)
+ lsb |= X32_BIT;
+ else
+ lsb &= ~X32_BIT;
+
+ if (start == end)
+ snprintf(buf, sizeof buf, "%s syscall %d:%d",
+ type, msb, lsb);
+ else
+ snprintf(buf, sizeof buf, "%s syscalls %d:%d..%d",
+ type, msb, lsb, lsb + (end-start));
+
+ return buf;
+}
+
+static unsigned int _check_for(int msb, int start, int end, long long expect,
+ const char *expect_str)
+{
+ unsigned int err = 0;
+
+ sh->indent++;
+ if (start != end)
+ sh->indent++;
+
+ for (int nr = start; nr <= end; nr++) {
+ long long ret = probe_syscall(msb, nr);
+
+ if (ret != expect) {
+ fail("%s returned %lld, but it should have returned %s\n",
+ syscall_str(msb, nr, nr),
+ ret, expect_str);
+ err++;
+ }
+ }
+
+ if (start != end)
+ sh->indent--;
+
+ if (err) {
+ if (start != end)
+ fail("%s had %u failure%s\n",
+ syscall_str(msb, start, end),
+ err, err == 1 ? "s" : "");
+ } else {
+ ok("%s returned %s as expected\n",
+ syscall_str(msb, start, end), expect_str);
+ }
+
+ sh->indent--;
+
+ return err;
+}
+
+#define check_for(msb,start,end,expect) \
+ _check_for(msb,start,end,expect,#expect)
+
+static bool check_zero(int msb, int nr)
+{
+ return check_for(msb, nr, nr, 0);
+}
+
+static bool check_enosys(int msb, int nr)
+{
+ return check_for(msb, nr, nr, -ENOSYS);
+}
+
+/*
+ * Anyone diagnosing a failure will want to know whether the kernel
+ * supports x32. Tell them. This can also be used to conditionalize
+ * tests based on existence or nonexistence of x32.
+ */
+static bool test_x32(void)
+{
+ long long ret;
+ pid_t mypid = getpid();
+
+ run("Checking for x32 by calling x32 getpid()\n");
+ ret = probe_syscall(0, SYS_GETPID | X32_BIT);
+
+ sh->indent++;
+ if (ret == mypid) {
+ info("x32 is supported\n");
+ with_x32 = true;
+ } else if (ret == -ENOSYS) {
+ info("x32 is not supported\n");
+ with_x32 = false;
+ } else {
+ fail("x32 getpid() returned %lld, but it should have returned either %lld or -ENOSYS\n", ret, (long long)mypid);
+ with_x32 = false;
+ }
+ sh->indent--;
+ return with_x32;
+}
+
+static void test_syscalls_common(int msb)
+{
+ enum ptrace_pass pass = sh->ptrace_pass;
+
+ run("Checking some common syscalls as 64 bit\n");
+ check_zero(msb, SYS_READ);
+ check_zero(msb, SYS_WRITE);
+
+ run("Checking some 64-bit only syscalls as 64 bit\n");
+ check_zero(msb, X64_READV);
+ check_zero(msb, X64_WRITEV);
+
+ run("Checking out of range system calls\n");
+ check_for(msb, -64, -2, -ENOSYS);
+ if (pass >= PTP_FUZZRET)
+ check_for(msb, -1, -1, MODIFIED_BY_PTRACE);
+ else
+ check_for(msb, -1, -1, -ENOSYS);
+ check_for(msb, X32_BIT-64, X32_BIT-1, -ENOSYS);
+ check_for(msb, -64-X32_BIT, -1-X32_BIT, -ENOSYS);
+ check_for(msb, INT_MAX-64, INT_MAX-1, -ENOSYS);
+}
+static void test_syscalls_with_x32(int msb)
+{
/*
- * Check some syscalls with high bits set.
+ * Syscalls 512-547 are "x32" syscalls. They are
+ * intended to be called with the x32 (0x40000000) bit
+ * set. Calling them without the x32 bit set is
+ * nonsense and should not work.
*/
- printf("[RUN]\tChecking numbers above 2^32-1\n");
- check_enosys((1UL << 32), &ok);
- check_enosys(X32_BIT | (1UL << 32), &ok);
+ run("Checking x32 syscalls as 64 bit\n");
+ check_for(msb, 512, 547, -ENOSYS);
- if (!ok)
- nerrs++;
- else
- printf("[OK]\tThey all returned -ENOSYS\n");
+ run("Checking some common syscalls as x32\n");
+ check_zero(msb, SYS_READ | X32_BIT);
+ check_zero(msb, SYS_WRITE | X32_BIT);
+
+ run("Checking some x32 syscalls as x32\n");
+ check_zero(msb, X32_READV | X32_BIT);
+ check_zero(msb, X32_WRITEV | X32_BIT);
+
+ run("Checking some 64-bit syscalls as x32\n");
+ check_enosys(msb, X64_IOCTL | X32_BIT);
+ check_enosys(msb, X64_READV | X32_BIT);
+ check_enosys(msb, X64_WRITEV | X32_BIT);
}
-int main()
+static void test_syscalls_without_x32(int msb)
{
+ run("Checking for absence of x32 system calls\n");
+ check_for(msb, 0 | X32_BIT, 999 | X32_BIT, -ENOSYS);
+}
+
+static void test_syscall_numbering(void)
+{
+ static const int msbs[] = {
+ 0, 1, -1, X32_BIT-1, X32_BIT, X32_BIT-1, -X32_BIT, INT_MAX,
+ INT_MIN, INT_MIN+1
+ };
+
+ sh->indent++;
+
/*
- * Anyone diagnosing a failure will want to know whether the kernel
- * supports x32. Tell them.
+ * The MSB is supposed to be ignored, so we loop over a few
+ * to test that out.
*/
- printf("\tChecking for x32...");
- fflush(stdout);
- if (syscall(39 | X32_BIT, 0, 0, 0, 0, 0, 0) >= 0) {
- printf(" supported\n");
- } else if (errno == ENOSYS) {
- printf(" not supported\n");
+ for (size_t i = 0; i < sizeof(msbs)/sizeof(msbs[0]); i++) {
+ int msb = msbs[i];
+ run("Checking system calls with msb = %d (0x%x)\n",
+ msb, msb);
+
+ sh->indent++;
+
+ test_syscalls_common(msb);
+ if (with_x32)
+ test_syscalls_with_x32(msb);
+ else
+ test_syscalls_without_x32(msb);
+
+ sh->indent--;
+ }
+
+ sh->indent--;
+}
+
+static void syscall_numbering_tracee(void)
+{
+ enum ptrace_pass pass;
+
+ if (ptrace(PTRACE_TRACEME, 0, 0, 0)) {
+ crit("Failed to request tracing\n");
+ return;
+ }
+ raise(SIGSTOP);
+
+ for (sh->ptrace_pass = pass = PTP_NOTHING; pass < PTP_DONE;
+ sh->ptrace_pass = ++pass) {
+ run("Running tests under ptrace: %s\n", ptrace_pass_name[pass]);
+ test_syscall_numbering();
+ }
+}
+
+static void mess_with_syscall(pid_t testpid, enum ptrace_pass pass)
+{
+ struct user_regs_struct regs;
+
+ sh->probing_syscall = false; /* Do this on entry only */
+
+ /* For these, don't even getregs */
+ if (pass == PTP_NOTHING || pass == PTP_DONE)
+ return;
+
+ ptrace(PTRACE_GETREGS, testpid, NULL, &regs);
+
+ if (regs.orig_rax != regs.rbx) {
+ fail("orig_rax %#llx doesn't match syscall number %#llx\n",
+ (unsigned long long)regs.orig_rax,
+ (unsigned long long)regs.rbx);
+ }
+
+ switch (pass) {
+ case PTP_GETREGS:
+ /* Just read, no writeback */
+ return;
+ case PTP_WRITEBACK:
+ /* Write back the same register state verbatim */
+ break;
+ case PTP_FUZZRET:
+ regs.rax = MODIFIED_BY_PTRACE;
+ break;
+ case PTP_FUZZHIGH:
+ regs.rax = MODIFIED_BY_PTRACE;
+ regs.orig_rax = regs.orig_rax | 0xffffffff00000000ULL;
+ break;
+ case PTP_INTNUM:
+ regs.rax = MODIFIED_BY_PTRACE;
+ regs.orig_rax = (int)regs.orig_rax;
+ break;
+ default:
+ crit("invalid ptrace_pass\n");
+ break;
+ }
+
+ ptrace(PTRACE_SETREGS, testpid, NULL, &regs);
+}
+
+static void syscall_numbering_tracer(pid_t testpid)
+{
+ int wstatus;
+
+ do {
+ pid_t wpid = waitpid(testpid, &wstatus, 0);
+ if (wpid < 0 && errno != EINTR)
+ break;
+ if (wpid != testpid)
+ continue;
+ if (!WIFSTOPPED(wstatus))
+ break; /* Thread exited? */
+
+ if (sh->probing_syscall && WSTOPSIG(wstatus) == SIGTRAP)
+ mess_with_syscall(testpid, sh->ptrace_pass);
+ } while (sh->ptrace_pass != PTP_DONE &&
+ !ptrace(PTRACE_SYSCALL, testpid, NULL, NULL));
+
+ ptrace(PTRACE_DETACH, testpid, NULL, NULL);
+
+ /* Wait for the child process to terminate */
+ while (waitpid(testpid, &wstatus, 0) != testpid || !WIFEXITED(wstatus))
+ /* wait some more */;
+}
+
+static void test_traced_syscall_numbering(void)
+{
+ pid_t testpid;
+
+ /* Launch the test thread; this thread continues as the tracer thread */
+ testpid = fork();
+
+ if (testpid < 0) {
+ crit("Unable to launch tracer process\n");
+ } else if (testpid == 0) {
+ syscall_numbering_tracee();
+ _exit(0);
} else {
- printf(" confused\n");
+ syscall_numbering_tracer(testpid);
}
+}
- test_x32_without_x32_bit();
+int main(void)
+{
+ unsigned int nerr;
- return nerrs ? 1 : 0;
+ /*
+ * It is quite likely to get a segfault on a failure, so make
+ * sure the message gets out by setting stdout to nonbuffered.
+ */
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ /*
+ * Harmless file descriptor to work on...
+ */
+ nullfd = open("/dev/null", O_RDWR);
+ if (nullfd < 0) {
+ crit("Unable to open /dev/null: %s\n", strerror(errno));
+ }
+
+ /*
+ * Set up a block of shared memory...
+ */
+ sh = mmap(NULL, sysconf(_SC_PAGE_SIZE), PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_SHARED, 0, 0);
+ if (sh == MAP_FAILED) {
+ crit("Unable to allocated shared memory block: %s\n",
+ strerror(errno));
+ }
+
+ with_x32 = test_x32();
+
+ run("Running tests without ptrace...\n");
+ test_syscall_numbering();
+
+ test_traced_syscall_numbering();
+
+ nerr = sh->nerr;
+ if (!nerr) {
+ ok("All system calls succeeded or failed as expected\n");
+ return 0;
+ } else {
+ fail("A total of %u system call%s had incorrect behavior\n",
+ nerr, nerr != 1 ? "s" : "");
+ return 1;
+ }
}
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index 93cbd6f603f9..2acbb7703c6a 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -84,7 +84,7 @@ void vsock_wait_remote_close(int fd)
}
/* Connect to <cid, port> and return the file descriptor. */
-int vsock_stream_connect(unsigned int cid, unsigned int port)
+static int vsock_connect(unsigned int cid, unsigned int port, int type)
{
union {
struct sockaddr sa;
@@ -101,7 +101,7 @@ int vsock_stream_connect(unsigned int cid, unsigned int port)
control_expectln("LISTENING");
- fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+ fd = socket(AF_VSOCK, type, 0);
timeout_begin(TIMEOUT);
do {
@@ -120,11 +120,21 @@ int vsock_stream_connect(unsigned int cid, unsigned int port)
return fd;
}
+int vsock_stream_connect(unsigned int cid, unsigned int port)
+{
+ return vsock_connect(cid, port, SOCK_STREAM);
+}
+
+int vsock_seqpacket_connect(unsigned int cid, unsigned int port)
+{
+ return vsock_connect(cid, port, SOCK_SEQPACKET);
+}
+
/* Listen on <cid, port> and return the first incoming connection. The remote
* address is stored to clientaddrp. clientaddrp may be NULL.
*/
-int vsock_stream_accept(unsigned int cid, unsigned int port,
- struct sockaddr_vm *clientaddrp)
+static int vsock_accept(unsigned int cid, unsigned int port,
+ struct sockaddr_vm *clientaddrp, int type)
{
union {
struct sockaddr sa;
@@ -145,7 +155,7 @@ int vsock_stream_accept(unsigned int cid, unsigned int port,
int client_fd;
int old_errno;
- fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+ fd = socket(AF_VSOCK, type, 0);
if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) {
perror("bind");
@@ -189,6 +199,18 @@ int vsock_stream_accept(unsigned int cid, unsigned int port,
return client_fd;
}
+int vsock_stream_accept(unsigned int cid, unsigned int port,
+ struct sockaddr_vm *clientaddrp)
+{
+ return vsock_accept(cid, port, clientaddrp, SOCK_STREAM);
+}
+
+int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
+ struct sockaddr_vm *clientaddrp)
+{
+ return vsock_accept(cid, port, clientaddrp, SOCK_SEQPACKET);
+}
+
/* Transmit one byte and check the return value.
*
* expected_ret:
diff --git a/tools/testing/vsock/util.h b/tools/testing/vsock/util.h
index e53dd09d26d9..a3375ad2fb7f 100644
--- a/tools/testing/vsock/util.h
+++ b/tools/testing/vsock/util.h
@@ -36,8 +36,11 @@ struct test_case {
void init_signals(void);
unsigned int parse_cid(const char *str);
int vsock_stream_connect(unsigned int cid, unsigned int port);
+int vsock_seqpacket_connect(unsigned int cid, unsigned int port);
int vsock_stream_accept(unsigned int cid, unsigned int port,
struct sockaddr_vm *clientaddrp);
+int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
+ struct sockaddr_vm *clientaddrp);
void vsock_wait_remote_close(int fd);
void send_byte(int fd, int expected_ret, int flags);
void recv_byte(int fd, int expected_ret, int flags);
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index 5a4fb80fa832..67766bfe176f 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -14,6 +14,8 @@
#include <errno.h>
#include <unistd.h>
#include <linux/kernel.h>
+#include <sys/types.h>
+#include <sys/socket.h>
#include "timeout.h"
#include "control.h"
@@ -279,6 +281,110 @@ static void test_stream_msg_peek_server(const struct test_opts *opts)
close(fd);
}
+#define MESSAGES_CNT 7
+static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
+{
+ int fd;
+
+ fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ if (fd < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Send several messages, one with MSG_EOR flag */
+ for (int i = 0; i < MESSAGES_CNT; i++)
+ send_byte(fd, 1, 0);
+
+ control_writeln("SENDDONE");
+ close(fd);
+}
+
+static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
+{
+ int fd;
+ char buf[16];
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ control_expectln("SENDDONE");
+ iov.iov_base = buf;
+ iov.iov_len = sizeof(buf);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ for (int i = 0; i < MESSAGES_CNT; i++) {
+ if (recvmsg(fd, &msg, 0) != 1) {
+ perror("message bound violated");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ close(fd);
+}
+
+#define MESSAGE_TRUNC_SZ 32
+static void test_seqpacket_msg_trunc_client(const struct test_opts *opts)
+{
+ int fd;
+ char buf[MESSAGE_TRUNC_SZ];
+
+ fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ if (fd < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ if (send(fd, buf, sizeof(buf), 0) != sizeof(buf)) {
+ perror("send failed");
+ exit(EXIT_FAILURE);
+ }
+
+ control_writeln("SENDDONE");
+ close(fd);
+}
+
+static void test_seqpacket_msg_trunc_server(const struct test_opts *opts)
+{
+ int fd;
+ char buf[MESSAGE_TRUNC_SZ / 2];
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ control_expectln("SENDDONE");
+ iov.iov_base = buf;
+ iov.iov_len = sizeof(buf);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ ssize_t ret = recvmsg(fd, &msg, MSG_TRUNC);
+
+ if (ret != MESSAGE_TRUNC_SZ) {
+ printf("%zi\n", ret);
+ perror("MSG_TRUNC doesn't work");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!(msg.msg_flags & MSG_TRUNC)) {
+ fprintf(stderr, "MSG_TRUNC expected\n");
+ exit(EXIT_FAILURE);
+ }
+
+ close(fd);
+}
+
static struct test_case test_cases[] = {
{
.name = "SOCK_STREAM connection reset",
@@ -309,6 +415,16 @@ static struct test_case test_cases[] = {
.run_client = test_stream_msg_peek_client,
.run_server = test_stream_msg_peek_server,
},
+ {
+ .name = "SOCK_SEQPACKET msg bounds",
+ .run_client = test_seqpacket_msg_bounds_client,
+ .run_server = test_seqpacket_msg_bounds_server,
+ },
+ {
+ .name = "SOCK_SEQPACKET MSG_TRUNC flag",
+ .run_client = test_seqpacket_msg_trunc_client,
+ .run_server = test_seqpacket_msg_trunc_server,
+ },
{},
};
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index 85eb65ea16d3..0e75f22c9475 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -132,6 +132,10 @@ int main(int argc, char **argv)
qsort(list, list_size, sizeof(list[0]), compare_txt);
list2 = malloc(sizeof(*list) * list_size);
+ if (!list2) {
+ printf("Out of memory\n");
+ exit(1);
+ }
printf("culling\n");
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 1c37ccd5d402..62b39149b8c8 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -63,3 +63,6 @@ config HAVE_KVM_NO_POLL
config KVM_XFER_TO_GUEST_WORK
bool
+
+config HAVE_KVM_PM_NOTIFIER
+ bool
diff --git a/virt/kvm/binary_stats.c b/virt/kvm/binary_stats.c
new file mode 100644
index 000000000000..e609d428811a
--- /dev/null
+++ b/virt/kvm/binary_stats.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM binary statistics interface implementation
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+
+/**
+ * kvm_stats_read() - Common function to read from the binary statistics
+ * file descriptor.
+ *
+ * @id: identification string of the stats
+ * @header: stats header for a vm or a vcpu
+ * @desc: start address of an array of stats descriptors for a vm or a vcpu
+ * @stats: start address of stats data block for a vm or a vcpu
+ * @size_stats: the size of stats data block pointed by @stats
+ * @user_buffer: start address of userspace buffer
+ * @size: requested read size from userspace
+ * @offset: the start position from which the content will be read for the
+ * corresponding vm or vcp file descriptor
+ *
+ * The file content of a vm/vcpu file descriptor is now defined as below:
+ * +-------------+
+ * | Header |
+ * +-------------+
+ * | id string |
+ * +-------------+
+ * | Descriptors |
+ * +-------------+
+ * | Stats Data |
+ * +-------------+
+ * Although this function allows userspace to read any amount of data (as long
+ * as in the limit) from any position, the typical usage would follow below
+ * steps:
+ * 1. Read header from offset 0. Get the offset of descriptors and stats data
+ * and some other necessary information. This is a one-time work for the
+ * lifecycle of the corresponding vm/vcpu stats fd.
+ * 2. Read id string from its offset. This is a one-time work for the lifecycle
+ * of the corresponding vm/vcpu stats fd.
+ * 3. Read descriptors from its offset and discover all the stats by parsing
+ * descriptors. This is a one-time work for the lifecycle of the
+ * corresponding vm/vcpu stats fd.
+ * 4. Periodically read stats data from its offset using pread.
+ *
+ * Return: the number of bytes that has been successfully read
+ */
+ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
+ const struct _kvm_stats_desc *desc,
+ void *stats, size_t size_stats,
+ char __user *user_buffer, size_t size, loff_t *offset)
+{
+ ssize_t len;
+ ssize_t copylen;
+ ssize_t remain = size;
+ size_t size_desc;
+ size_t size_header;
+ void *src;
+ loff_t pos = *offset;
+ char __user *dest = user_buffer;
+
+ size_header = sizeof(*header);
+ size_desc = header->num_desc * sizeof(*desc);
+
+ len = KVM_STATS_NAME_SIZE + size_header + size_desc + size_stats - pos;
+ len = min(len, remain);
+ if (len <= 0)
+ return 0;
+ remain = len;
+
+ /*
+ * Copy kvm stats header.
+ * The header is the first block of content userspace usually read out.
+ * The pos is 0 and the copylen and remain would be the size of header.
+ * The copy of the header would be skipped if offset is larger than the
+ * size of header. That usually happens when userspace reads stats
+ * descriptors and stats data.
+ */
+ copylen = size_header - pos;
+ copylen = min(copylen, remain);
+ if (copylen > 0) {
+ src = (void *)header + pos;
+ if (copy_to_user(dest, src, copylen))
+ return -EFAULT;
+ remain -= copylen;
+ pos += copylen;
+ dest += copylen;
+ }
+
+ /*
+ * Copy kvm stats header id string.
+ * The id string is unique for every vm/vcpu, which is stored in kvm
+ * and kvm_vcpu structure.
+ * The id string is part of the stat header from the perspective of
+ * userspace, it is usually read out together with previous constant
+ * header part and could be skipped for later descriptors and stats
+ * data readings.
+ */
+ copylen = header->id_offset + KVM_STATS_NAME_SIZE - pos;
+ copylen = min(copylen, remain);
+ if (copylen > 0) {
+ src = id + pos - header->id_offset;
+ if (copy_to_user(dest, src, copylen))
+ return -EFAULT;
+ remain -= copylen;
+ pos += copylen;
+ dest += copylen;
+ }
+
+ /*
+ * Copy kvm stats descriptors.
+ * The descriptors copy would be skipped in the typical case that
+ * userspace periodically read stats data, since the pos would be
+ * greater than the end address of descriptors
+ * (header->header.desc_offset + size_desc) causing copylen <= 0.
+ */
+ copylen = header->desc_offset + size_desc - pos;
+ copylen = min(copylen, remain);
+ if (copylen > 0) {
+ src = (void *)desc + pos - header->desc_offset;
+ if (copy_to_user(dest, src, copylen))
+ return -EFAULT;
+ remain -= copylen;
+ pos += copylen;
+ dest += copylen;
+ }
+
+ /* Copy kvm stats values */
+ copylen = header->data_offset + size_stats - pos;
+ copylen = min(copylen, remain);
+ if (copylen > 0) {
+ src = stats + pos - header->data_offset;
+ if (copy_to_user(dest, src, copylen))
+ return -EFAULT;
+ remain -= copylen;
+ pos += copylen;
+ dest += copylen;
+ }
+
+ *offset = pos;
+ return len;
+}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6b4feb92dc79..7d95126cda9e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,6 +51,7 @@
#include <linux/io.h>
#include <linux/lockdep.h>
#include <linux/kthread.h>
+#include <linux/suspend.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
@@ -114,7 +115,6 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
struct dentry *kvm_debugfs_dir;
EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
-static int kvm_debugfs_num_entries;
static const struct file_operations stat_fops_per_vm;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
@@ -307,6 +307,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
return kvm_make_all_cpus_request_except(kvm, req, NULL);
}
+EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
@@ -330,7 +331,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
*/
if (!kvm_arch_flush_remote_tlb(kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
- ++kvm->stat.remote_tlb_flush;
+ ++kvm->stat.generic.remote_tlb_flush;
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
@@ -779,6 +780,38 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+static int kvm_pm_notifier_call(struct notifier_block *bl,
+ unsigned long state,
+ void *unused)
+{
+ struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
+
+ return kvm_arch_pm_notifier(kvm, state);
+}
+
+static void kvm_init_pm_notifier(struct kvm *kvm)
+{
+ kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
+ /* Suspend KVM before we suspend ftrace, RCU, etc. */
+ kvm->pm_notifier.priority = INT_MAX;
+ register_pm_notifier(&kvm->pm_notifier);
+}
+
+static void kvm_destroy_pm_notifier(struct kvm *kvm)
+{
+ unregister_pm_notifier(&kvm->pm_notifier);
+}
+#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
+static void kvm_init_pm_notifier(struct kvm *kvm)
+{
+}
+
+static void kvm_destroy_pm_notifier(struct kvm *kvm)
+{
+}
+#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
+
static struct kvm_memslots *kvm_alloc_memslots(void)
{
int i;
@@ -826,9 +859,24 @@ static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
kvfree(slots);
}
+static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
+{
+ switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
+ case KVM_STATS_TYPE_INSTANT:
+ return 0444;
+ case KVM_STATS_TYPE_CUMULATIVE:
+ case KVM_STATS_TYPE_PEAK:
+ default:
+ return 0644;
+ }
+}
+
+
static void kvm_destroy_vm_debugfs(struct kvm *kvm)
{
int i;
+ int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
+ kvm_vcpu_stats_header.num_desc;
if (!kvm->debugfs_dentry)
return;
@@ -846,7 +894,10 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
{
char dir_name[ITOA_MAX_LEN * 2];
struct kvm_stat_data *stat_data;
- struct kvm_stats_debugfs_item *p;
+ const struct _kvm_stats_desc *pdesc;
+ int i;
+ int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
+ kvm_vcpu_stats_header.num_desc;
if (!debugfs_initialized())
return 0;
@@ -860,15 +911,32 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
if (!kvm->debugfs_stat_data)
return -ENOMEM;
- for (p = debugfs_entries; p->name; p++) {
+ for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
+ pdesc = &kvm_vm_stats_desc[i];
stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
if (!stat_data)
return -ENOMEM;
stat_data->kvm = kvm;
- stat_data->dbgfs_item = p;
- kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
- debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
+ stat_data->desc = pdesc;
+ stat_data->kind = KVM_STAT_VM;
+ kvm->debugfs_stat_data[i] = stat_data;
+ debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
+ kvm->debugfs_dentry, stat_data,
+ &stat_fops_per_vm);
+ }
+
+ for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
+ pdesc = &kvm_vcpu_stats_desc[i];
+ stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
+ if (!stat_data)
+ return -ENOMEM;
+
+ stat_data->kvm = kvm;
+ stat_data->desc = pdesc;
+ stat_data->kind = KVM_STAT_VCPU;
+ kvm->debugfs_stat_data[i] = stat_data;
+ debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm->debugfs_dentry, stat_data,
&stat_fops_per_vm);
}
@@ -908,6 +976,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
+ mutex_init(&kvm->slots_arch_lock);
INIT_LIST_HEAD(&kvm->devices);
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
@@ -962,6 +1031,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
mutex_unlock(&kvm_lock);
preempt_notifier_inc();
+ kvm_init_pm_notifier(kvm);
return kvm;
@@ -1009,6 +1079,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
int i;
struct mm_struct *mm = kvm->mm;
+ kvm_destroy_pm_notifier(kvm);
kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
@@ -1280,6 +1351,14 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
rcu_assign_pointer(kvm->memslots[as_id], slots);
+
+ /*
+ * Acquired in kvm_set_memslot. Must be released before synchronize
+ * SRCU below in order to avoid deadlock with another thread
+ * acquiring the slots_arch_lock in an srcu critical section.
+ */
+ mutex_unlock(&kvm->slots_arch_lock);
+
synchronize_srcu_expedited(&kvm->srcu);
/*
@@ -1306,6 +1385,18 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
return old_memslots;
}
+static size_t kvm_memslots_size(int slots)
+{
+ return sizeof(struct kvm_memslots) +
+ (sizeof(struct kvm_memory_slot) * slots);
+}
+
+static void kvm_copy_memslots(struct kvm_memslots *to,
+ struct kvm_memslots *from)
+{
+ memcpy(to, from, kvm_memslots_size(from->used_slots));
+}
+
/*
* Note, at a minimum, the current number of used slots must be allocated, even
* when deleting a memslot, as we need a complete duplicate of the memslots for
@@ -1315,19 +1406,16 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
enum kvm_mr_change change)
{
struct kvm_memslots *slots;
- size_t old_size, new_size;
-
- old_size = sizeof(struct kvm_memslots) +
- (sizeof(struct kvm_memory_slot) * old->used_slots);
+ size_t new_size;
if (change == KVM_MR_CREATE)
- new_size = old_size + sizeof(struct kvm_memory_slot);
+ new_size = kvm_memslots_size(old->used_slots + 1);
else
- new_size = old_size;
+ new_size = kvm_memslots_size(old->used_slots);
slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
if (likely(slots))
- memcpy(slots, old, old_size);
+ kvm_copy_memslots(slots, old);
return slots;
}
@@ -1342,9 +1430,27 @@ static int kvm_set_memslot(struct kvm *kvm,
struct kvm_memslots *slots;
int r;
+ /*
+ * Released in install_new_memslots.
+ *
+ * Must be held from before the current memslots are copied until
+ * after the new memslots are installed with rcu_assign_pointer,
+ * then released before the synchronize srcu in install_new_memslots.
+ *
+ * When modifying memslots outside of the slots_lock, must be held
+ * before reading the pointer to the current memslots until after all
+ * changes to those memslots are complete.
+ *
+ * These rules ensure that installing new memslots does not lose
+ * changes made to the previous memslots.
+ */
+ mutex_lock(&kvm->slots_arch_lock);
+
slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
- if (!slots)
+ if (!slots) {
+ mutex_unlock(&kvm->slots_arch_lock);
return -ENOMEM;
+ }
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
/*
@@ -1355,10 +1461,9 @@ static int kvm_set_memslot(struct kvm *kvm,
slot->flags |= KVM_MEMSLOT_INVALID;
/*
- * We can re-use the old memslots, the only difference from the
- * newly installed memslots is the invalid flag, which will get
- * dropped by update_memslots anyway. We'll also revert to the
- * old memslots if preparing the new memory region fails.
+ * We can re-use the memory from the old memslots.
+ * It will be overwritten with a copy of the new memslots
+ * after reacquiring the slots_arch_lock below.
*/
slots = install_new_memslots(kvm, as_id, slots);
@@ -1370,6 +1475,17 @@ static int kvm_set_memslot(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_root)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
+
+ /* Released in install_new_memslots. */
+ mutex_lock(&kvm->slots_arch_lock);
+
+ /*
+ * The arch-specific fields of the memslots could have changed
+ * between releasing the slots_arch_lock in
+ * install_new_memslots and here, so get a fresh copy of the
+ * slots.
+ */
+ kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
}
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
@@ -1385,8 +1501,13 @@ static int kvm_set_memslot(struct kvm *kvm,
return 0;
out_slots:
- if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
+ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
+ slot = id_to_memslot(slots, old->id);
+ slot->flags &= ~KVM_MEMSLOT_INVALID;
slots = install_new_memslots(kvm, as_id, slots);
+ } else {
+ mutex_unlock(&kvm->slots_arch_lock);
+ }
kvfree(slots);
return r;
}
@@ -2054,6 +2175,13 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
return true;
}
+static int kvm_try_get_pfn(kvm_pfn_t pfn)
+{
+ if (kvm_is_reserved_pfn(pfn))
+ return 1;
+ return get_page_unless_zero(pfn_to_page(pfn));
+}
+
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
unsigned long addr, bool *async,
bool write_fault, bool *writable,
@@ -2103,13 +2231,21 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
* Whoever called remap_pfn_range is also going to call e.g.
* unmap_mapping_range before the underlying pages are freed,
* causing a call to our MMU notifier.
+ *
+ * Certain IO or PFNMAP mappings can be backed with valid
+ * struct pages, but be allocated without refcounting e.g.,
+ * tail pages of non-compound higher order allocations, which
+ * would then underflow the refcount when the caller does the
+ * required put_page. Don't allow those pages here.
*/
- kvm_get_pfn(pfn);
+ if (!kvm_try_get_pfn(pfn))
+ r = -EFAULT;
out:
pte_unmap_unlock(ptep, ptl);
*p_pfn = pfn;
- return 0;
+
+ return r;
}
/*
@@ -2154,7 +2290,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
}
retry:
- vma = find_vma_intersection(current->mm, addr, addr + 1);
+ vma = vma_lookup(current->mm, addr);
if (vma == NULL)
pfn = KVM_PFN_ERR_FAULT;
@@ -2929,6 +3065,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
goto out;
if (signal_pending(current))
goto out;
+ if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
+ goto out;
ret = 0;
out:
@@ -2940,9 +3078,9 @@ static inline void
update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
{
if (waited)
- vcpu->stat.halt_poll_fail_ns += poll_ns;
+ vcpu->stat.generic.halt_poll_fail_ns += poll_ns;
else
- vcpu->stat.halt_poll_success_ns += poll_ns;
+ vcpu->stat.generic.halt_poll_success_ns += poll_ns;
}
/*
@@ -2960,21 +3098,20 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
- ++vcpu->stat.halt_attempted_poll;
+ ++vcpu->stat.generic.halt_attempted_poll;
do {
/*
* This sets KVM_REQ_UNHALT if an interrupt
* arrives.
*/
if (kvm_vcpu_check_block(vcpu) < 0) {
- ++vcpu->stat.halt_successful_poll;
+ ++vcpu->stat.generic.halt_successful_poll;
if (!vcpu_valid_wakeup(vcpu))
- ++vcpu->stat.halt_poll_invalid;
+ ++vcpu->stat.generic.halt_poll_invalid;
goto out;
}
poll_end = cur = ktime_get();
- } while (single_task_running() && !need_resched() &&
- ktime_before(cur, stop));
+ } while (kvm_vcpu_can_poll(cur, stop));
}
prepare_to_rcuwait(&vcpu->wait);
@@ -3027,7 +3164,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
waitp = kvm_arch_vcpu_get_wait(vcpu);
if (rcuwait_wake_up(waitp)) {
WRITE_ONCE(vcpu->ready, true);
- ++vcpu->stat.halt_wakeup;
+ ++vcpu->stat.generic.halt_wakeup;
return true;
}
@@ -3360,6 +3497,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
+ /* Fill the stats id string for the vcpu */
+ snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
+ task_pid_nr(current), id);
+
/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
@@ -3409,6 +3550,44 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
return 0;
}
+static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
+ size_t size, loff_t *offset)
+{
+ struct kvm_vcpu *vcpu = file->private_data;
+
+ return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
+ &kvm_vcpu_stats_desc[0], &vcpu->stat,
+ sizeof(vcpu->stat), user_buffer, size, offset);
+}
+
+static const struct file_operations kvm_vcpu_stats_fops = {
+ .read = kvm_vcpu_stats_read,
+ .llseek = noop_llseek,
+};
+
+static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
+{
+ int fd;
+ struct file *file;
+ char name[15 + ITOA_MAX_LEN + 1];
+
+ snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ return PTR_ERR(file);
+ }
+ file->f_mode |= FMODE_PREAD;
+ fd_install(fd, file);
+
+ return fd;
+}
+
static long kvm_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -3606,6 +3785,10 @@ out_free1:
r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
break;
}
+ case KVM_GET_STATS_FD: {
+ r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
+ break;
+ }
default:
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
}
@@ -3864,6 +4047,8 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#else
return 0;
#endif
+ case KVM_CAP_BINARY_STATS_FD:
+ return 1;
default:
break;
}
@@ -3967,6 +4152,42 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
}
}
+static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
+ size_t size, loff_t *offset)
+{
+ struct kvm *kvm = file->private_data;
+
+ return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
+ &kvm_vm_stats_desc[0], &kvm->stat,
+ sizeof(kvm->stat), user_buffer, size, offset);
+}
+
+static const struct file_operations kvm_vm_stats_fops = {
+ .read = kvm_vm_stats_read,
+ .llseek = noop_llseek,
+};
+
+static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
+{
+ int fd;
+ struct file *file;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ file = anon_inode_getfile("kvm-vm-stats",
+ &kvm_vm_stats_fops, kvm, O_RDONLY);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ return PTR_ERR(file);
+ }
+ file->f_mode |= FMODE_PREAD;
+ fd_install(fd, file);
+
+ return fd;
+}
+
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -4149,6 +4370,9 @@ static long kvm_vm_ioctl(struct file *filp,
case KVM_RESET_DIRTY_RINGS:
r = kvm_vm_ioctl_reset_dirty_pages(kvm);
break;
+ case KVM_GET_STATS_FD:
+ r = kvm_vm_ioctl_get_stats_fd(kvm);
+ break;
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
@@ -4228,6 +4452,9 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
if (r < 0)
goto put_kvm;
+ snprintf(kvm->stats_id, sizeof(kvm->stats_id),
+ "kvm-%d", task_pid_nr(current));
+
file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
if (IS_ERR(file)) {
put_unused_fd(r);
@@ -4722,7 +4949,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
return -ENOENT;
if (simple_attr_open(inode, file, get,
- KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222
+ kvm_stats_debugfs_mode(stat_data->desc) & 0222
? set : NULL,
fmt)) {
kvm_put_kvm(stat_data->kvm);
@@ -4745,14 +4972,14 @@ static int kvm_debugfs_release(struct inode *inode, struct file *file)
static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
{
- *val = *(ulong *)((void *)kvm + offset);
+ *val = *(u64 *)((void *)(&kvm->stat) + offset);
return 0;
}
static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
{
- *(ulong *)((void *)kvm + offset) = 0;
+ *(u64 *)((void *)(&kvm->stat) + offset) = 0;
return 0;
}
@@ -4765,7 +4992,7 @@ static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
*val = 0;
kvm_for_each_vcpu(i, vcpu, kvm)
- *val += *(u64 *)((void *)vcpu + offset);
+ *val += *(u64 *)((void *)(&vcpu->stat) + offset);
return 0;
}
@@ -4776,7 +5003,7 @@ static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
- *(u64 *)((void *)vcpu + offset) = 0;
+ *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
return 0;
}
@@ -4786,14 +5013,14 @@ static int kvm_stat_data_get(void *data, u64 *val)
int r = -EFAULT;
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
- switch (stat_data->dbgfs_item->kind) {
+ switch (stat_data->kind) {
case KVM_STAT_VM:
r = kvm_get_stat_per_vm(stat_data->kvm,
- stat_data->dbgfs_item->offset, val);
+ stat_data->desc->desc.offset, val);
break;
case KVM_STAT_VCPU:
r = kvm_get_stat_per_vcpu(stat_data->kvm,
- stat_data->dbgfs_item->offset, val);
+ stat_data->desc->desc.offset, val);
break;
}
@@ -4808,14 +5035,14 @@ static int kvm_stat_data_clear(void *data, u64 val)
if (val)
return -EINVAL;
- switch (stat_data->dbgfs_item->kind) {
+ switch (stat_data->kind) {
case KVM_STAT_VM:
r = kvm_clear_stat_per_vm(stat_data->kvm,
- stat_data->dbgfs_item->offset);
+ stat_data->desc->desc.offset);
break;
case KVM_STAT_VCPU:
r = kvm_clear_stat_per_vcpu(stat_data->kvm,
- stat_data->dbgfs_item->offset);
+ stat_data->desc->desc.offset);
break;
}
@@ -4872,6 +5099,7 @@ static int vm_stat_clear(void *_offset, u64 val)
}
DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
static int vcpu_stat_get(void *_offset, u64 *val)
{
@@ -4908,11 +5136,7 @@ static int vcpu_stat_clear(void *_offset, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
"%llu\n");
-
-static const struct file_operations *stat_fops[] = {
- [KVM_STAT_VCPU] = &vcpu_stat_fops,
- [KVM_STAT_VM] = &vm_stat_fops,
-};
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
{
@@ -4966,15 +5190,32 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
static void kvm_init_debug(void)
{
- struct kvm_stats_debugfs_item *p;
+ const struct file_operations *fops;
+ const struct _kvm_stats_desc *pdesc;
+ int i;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
- kvm_debugfs_num_entries = 0;
- for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
- debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
- kvm_debugfs_dir, (void *)(long)p->offset,
- stat_fops[p->kind]);
+ for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
+ pdesc = &kvm_vm_stats_desc[i];
+ if (kvm_stats_debugfs_mode(pdesc) & 0222)
+ fops = &vm_stat_fops;
+ else
+ fops = &vm_stat_readonly_fops;
+ debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
+ kvm_debugfs_dir,
+ (void *)(long)pdesc->desc.offset, fops);
+ }
+
+ for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
+ pdesc = &kvm_vcpu_stats_desc[i];
+ if (kvm_stats_debugfs_mode(pdesc) & 0222)
+ fops = &vcpu_stat_fops;
+ else
+ fops = &vcpu_stat_readonly_fops;
+ debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
+ kvm_debugfs_dir,
+ (void *)(long)pdesc->desc.offset, fops);
}
}
@@ -5023,7 +5264,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- if (current->state == TASK_RUNNING) {
+ if (current->on_rq) {
WRITE_ONCE(vcpu->preempted, true);
WRITE_ONCE(vcpu->ready, true);
}
@@ -5124,7 +5365,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
SLAB_ACCOUNT,
offsetof(struct kvm_vcpu, arch),
- sizeof_field(struct kvm_vcpu, arch),
+ offsetofend(struct kvm_vcpu, stats_id)
+ - offsetof(struct kvm_vcpu, arch),
NULL);
if (!kvm_vcpu_cache) {
r = -ENOMEM;
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index c9bb3957f58a..28fda42e471b 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -40,21 +40,17 @@ static int __connect(struct irq_bypass_producer *prod,
if (prod->add_consumer)
ret = prod->add_consumer(prod, cons);
- if (ret)
- goto err_add_consumer;
-
- ret = cons->add_producer(cons, prod);
- if (ret)
- goto err_add_producer;
+ if (!ret) {
+ ret = cons->add_producer(cons, prod);
+ if (ret && prod->del_consumer)
+ prod->del_consumer(prod, cons);
+ }
if (cons->start)
cons->start(cons);
if (prod->start)
prod->start(prod);
-err_add_producer:
- if (prod->del_consumer)
- prod->del_consumer(prod, cons);
-err_add_consumer:
+
return ret;
}